xref: /wlan-dirver/qca-wifi-host-cmn/hif/src/hif_main.c (revision 689990d5df106ae79275687b40b8144dd8fee6ff)
1 /*
2  * Copyright (c) 2015-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include "targcfg.h"
21 #include "qdf_lock.h"
22 #include "qdf_status.h"
23 #include "qdf_status.h"
24 #include <qdf_atomic.h>         /* qdf_atomic_read */
25 #include <targaddrs.h>
26 #include "hif_io32.h"
27 #include <hif.h>
28 #include <target_type.h>
29 #include "regtable.h"
30 #define ATH_MODULE_NAME hif
31 #include <a_debug.h>
32 #include "hif_main.h"
33 #include "hif_hw_version.h"
34 #if (defined(HIF_PCI) || defined(HIF_SNOC) || defined(HIF_AHB) || \
35      defined(HIF_IPCI))
36 #include "ce_tasklet.h"
37 #include "ce_api.h"
38 #endif
39 #include "qdf_trace.h"
40 #include "qdf_status.h"
41 #include "hif_debug.h"
42 #include "mp_dev.h"
43 #if defined(QCA_WIFI_QCA8074) || defined(QCA_WIFI_QCA6018) || \
44 	defined(QCA_WIFI_QCA5018) || defined(QCA_WIFI_QCA9574) || \
45 	defined(QCA_WIFI_QCA5332)
46 #include "hal_api.h"
47 #endif
48 #include "hif_napi.h"
49 #include "hif_unit_test_suspend_i.h"
50 #include "qdf_module.h"
51 #ifdef HIF_CE_LOG_INFO
52 #include <qdf_notifier.h>
53 #include <qdf_hang_event_notifier.h>
54 #endif
55 #include <linux/cpumask.h>
56 
57 #include <pld_common.h>
58 #include "ce_internal.h"
59 #include <qdf_tracepoint.h>
60 
61 void hif_dump(struct hif_opaque_softc *hif_ctx, uint8_t cmd_id, bool start)
62 {
63 	hif_trigger_dump(hif_ctx, cmd_id, start);
64 }
65 
66 /**
67  * hif_get_target_id(): hif_get_target_id
68  * @scn: scn
69  *
70  * Return the virtual memory base address to the caller
71  *
72  * @scn: hif_softc
73  *
74  * Return: A_target_id_t
75  */
76 A_target_id_t hif_get_target_id(struct hif_softc *scn)
77 {
78 	return scn->mem;
79 }
80 
81 /**
82  * hif_get_targetdef(): hif_get_targetdef
83  * @hif_ctx: hif context
84  *
85  * Return: void *
86  */
87 void *hif_get_targetdef(struct hif_opaque_softc *hif_ctx)
88 {
89 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
90 
91 	return scn->targetdef;
92 }
93 
94 #ifdef FORCE_WAKE
95 #ifndef QCA_WIFI_WCN6450
96 void hif_srng_init_phase(struct hif_opaque_softc *hif_ctx,
97 			 bool init_phase)
98 {
99 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
100 
101 	if (ce_srng_based(scn))
102 		hal_set_init_phase(scn->hal_soc, init_phase);
103 }
104 #else
105 void hif_srng_init_phase(struct hif_opaque_softc *hif_ctx,
106 			 bool init_phase)
107 {
108 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
109 
110 	hal_set_init_phase(scn->hal_soc, init_phase);
111 }
112 #endif
113 #endif /* FORCE_WAKE */
114 
115 #ifdef HIF_IPCI
116 void hif_shutdown_notifier_cb(void *hif_ctx)
117 {
118 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
119 
120 	scn->recovery = true;
121 }
122 #endif
123 
124 /**
125  * hif_vote_link_down(): unvote for link up
126  * @hif_ctx: hif context
127  *
128  * Call hif_vote_link_down to release a previous request made using
129  * hif_vote_link_up. A hif_vote_link_down call should only be made
130  * after a corresponding hif_vote_link_up, otherwise you could be
131  * negating a vote from another source. When no votes are present
132  * hif will not guarantee the linkstate after hif_bus_suspend.
133  *
134  * SYNCHRONIZE WITH hif_vote_link_up by only calling in MC thread
135  * and initialization deinitialization sequencences.
136  *
137  * Return: n/a
138  */
139 void hif_vote_link_down(struct hif_opaque_softc *hif_ctx)
140 {
141 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
142 
143 	QDF_BUG(scn);
144 	if (scn->linkstate_vote == 0)
145 		QDF_DEBUG_PANIC("linkstate_vote(%d) has already been 0",
146 				scn->linkstate_vote);
147 
148 	scn->linkstate_vote--;
149 	hif_info("Down_linkstate_vote %d", scn->linkstate_vote);
150 	if (scn->linkstate_vote == 0)
151 		hif_bus_prevent_linkdown(scn, false);
152 }
153 
154 /**
155  * hif_vote_link_up(): vote to prevent bus from suspending
156  * @hif_ctx: hif context
157  *
158  * Makes hif guarantee that fw can message the host normally
159  * during suspend.
160  *
161  * SYNCHRONIZE WITH hif_vote_link_up by only calling in MC thread
162  * and initialization deinitialization sequencences.
163  *
164  * Return: n/a
165  */
166 void hif_vote_link_up(struct hif_opaque_softc *hif_ctx)
167 {
168 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
169 
170 	QDF_BUG(scn);
171 	scn->linkstate_vote++;
172 	hif_info("Up_linkstate_vote %d", scn->linkstate_vote);
173 	if (scn->linkstate_vote == 1)
174 		hif_bus_prevent_linkdown(scn, true);
175 }
176 
177 /**
178  * hif_can_suspend_link(): query if hif is permitted to suspend the link
179  * @hif_ctx: hif context
180  *
181  * Hif will ensure that the link won't be suspended if the upperlayers
182  * don't want it to.
183  *
184  * SYNCHRONIZATION: MC thread is stopped before bus suspend thus
185  * we don't need extra locking to ensure votes dont change while
186  * we are in the process of suspending or resuming.
187  *
188  * Return: false if hif will guarantee link up during suspend.
189  */
190 bool hif_can_suspend_link(struct hif_opaque_softc *hif_ctx)
191 {
192 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
193 
194 	QDF_BUG(scn);
195 	return scn->linkstate_vote == 0;
196 }
197 
198 /**
199  * hif_hia_item_address(): hif_hia_item_address
200  * @target_type: target_type
201  * @item_offset: item_offset
202  *
203  * Return: n/a
204  */
205 uint32_t hif_hia_item_address(uint32_t target_type, uint32_t item_offset)
206 {
207 	switch (target_type) {
208 	case TARGET_TYPE_AR6002:
209 		return AR6002_HOST_INTEREST_ADDRESS + item_offset;
210 	case TARGET_TYPE_AR6003:
211 		return AR6003_HOST_INTEREST_ADDRESS + item_offset;
212 	case TARGET_TYPE_AR6004:
213 		return AR6004_HOST_INTEREST_ADDRESS + item_offset;
214 	case TARGET_TYPE_AR6006:
215 		return AR6006_HOST_INTEREST_ADDRESS + item_offset;
216 	case TARGET_TYPE_AR9888:
217 		return AR9888_HOST_INTEREST_ADDRESS + item_offset;
218 	case TARGET_TYPE_AR6320:
219 	case TARGET_TYPE_AR6320V2:
220 		return AR6320_HOST_INTEREST_ADDRESS + item_offset;
221 	case TARGET_TYPE_ADRASTEA:
222 		/* ADRASTEA doesn't have a host interest address */
223 		ASSERT(0);
224 		return 0;
225 	case TARGET_TYPE_AR900B:
226 		return AR900B_HOST_INTEREST_ADDRESS + item_offset;
227 	case TARGET_TYPE_QCA9984:
228 		return QCA9984_HOST_INTEREST_ADDRESS + item_offset;
229 	case TARGET_TYPE_QCA9888:
230 		return QCA9888_HOST_INTEREST_ADDRESS + item_offset;
231 
232 	default:
233 		ASSERT(0);
234 		return 0;
235 	}
236 }
237 
238 /**
239  * hif_max_num_receives_reached() - check max receive is reached
240  * @scn: HIF Context
241  * @count: unsigned int.
242  *
243  * Output check status as bool
244  *
245  * Return: bool
246  */
247 bool hif_max_num_receives_reached(struct hif_softc *scn, unsigned int count)
248 {
249 	if (QDF_IS_EPPING_ENABLED(hif_get_conparam(scn)))
250 		return count > 120;
251 	else
252 		return count > MAX_NUM_OF_RECEIVES;
253 }
254 
255 /**
256  * init_buffer_count() - initial buffer count
257  * @maxSize: qdf_size_t
258  *
259  * routine to modify the initial buffer count to be allocated on an os
260  * platform basis. Platform owner will need to modify this as needed
261  *
262  * Return: qdf_size_t
263  */
264 qdf_size_t init_buffer_count(qdf_size_t maxSize)
265 {
266 	return maxSize;
267 }
268 
269 /**
270  * hif_save_htc_htt_config_endpoint() - save htt_tx_endpoint
271  * @hif_ctx: hif context
272  * @htc_htt_tx_endpoint: htt_tx_endpoint
273  *
274  * Return: void
275  */
276 void hif_save_htc_htt_config_endpoint(struct hif_opaque_softc *hif_ctx,
277 							int htc_htt_tx_endpoint)
278 {
279 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
280 
281 	if (!scn) {
282 		hif_err("scn or scn->hif_sc is NULL!");
283 		return;
284 	}
285 
286 	scn->htc_htt_tx_endpoint = htc_htt_tx_endpoint;
287 }
288 qdf_export_symbol(hif_save_htc_htt_config_endpoint);
289 
290 static const struct qwlan_hw qwlan_hw_list[] = {
291 	{
292 		.id = AR6320_REV1_VERSION,
293 		.subid = 0,
294 		.name = "QCA6174_REV1",
295 	},
296 	{
297 		.id = AR6320_REV1_1_VERSION,
298 		.subid = 0x1,
299 		.name = "QCA6174_REV1_1",
300 	},
301 	{
302 		.id = AR6320_REV1_3_VERSION,
303 		.subid = 0x2,
304 		.name = "QCA6174_REV1_3",
305 	},
306 	{
307 		.id = AR6320_REV2_1_VERSION,
308 		.subid = 0x4,
309 		.name = "QCA6174_REV2_1",
310 	},
311 	{
312 		.id = AR6320_REV2_1_VERSION,
313 		.subid = 0x5,
314 		.name = "QCA6174_REV2_2",
315 	},
316 	{
317 		.id = AR6320_REV3_VERSION,
318 		.subid = 0x6,
319 		.name = "QCA6174_REV2.3",
320 	},
321 	{
322 		.id = AR6320_REV3_VERSION,
323 		.subid = 0x8,
324 		.name = "QCA6174_REV3",
325 	},
326 	{
327 		.id = AR6320_REV3_VERSION,
328 		.subid = 0x9,
329 		.name = "QCA6174_REV3_1",
330 	},
331 	{
332 		.id = AR6320_REV3_2_VERSION,
333 		.subid = 0xA,
334 		.name = "AR6320_REV3_2_VERSION",
335 	},
336 	{
337 		.id = QCA6390_V1,
338 		.subid = 0x0,
339 		.name = "QCA6390_V1",
340 	},
341 	{
342 		.id = QCA6490_V1,
343 		.subid = 0x0,
344 		.name = "QCA6490_V1",
345 	},
346 	{
347 		.id = WCN3990_v1,
348 		.subid = 0x0,
349 		.name = "WCN3990_V1",
350 	},
351 	{
352 		.id = WCN3990_v2,
353 		.subid = 0x0,
354 		.name = "WCN3990_V2",
355 	},
356 	{
357 		.id = WCN3990_v2_1,
358 		.subid = 0x0,
359 		.name = "WCN3990_V2.1",
360 	},
361 	{
362 		.id = WCN3998,
363 		.subid = 0x0,
364 		.name = "WCN3998",
365 	},
366 	{
367 		.id = QCA9379_REV1_VERSION,
368 		.subid = 0xC,
369 		.name = "QCA9379_REV1",
370 	},
371 	{
372 		.id = QCA9379_REV1_VERSION,
373 		.subid = 0xD,
374 		.name = "QCA9379_REV1_1",
375 	},
376 	{
377 		.id = MANGO_V1,
378 		.subid = 0xF,
379 		.name = "MANGO_V1",
380 	},
381 	{
382 		.id = PEACH_V1,
383 		.subid = 0,
384 		.name = "PEACH_V1",
385 	},
386 
387 	{
388 		.id = KIWI_V1,
389 		.subid = 0,
390 		.name = "KIWI_V1",
391 	},
392 	{
393 		.id = KIWI_V2,
394 		.subid = 0,
395 		.name = "KIWI_V2",
396 	},
397 	{
398 		.id = WCN6750_V1,
399 		.subid = 0,
400 		.name = "WCN6750_V1",
401 	},
402 	{
403 		.id = WCN6750_V2,
404 		.subid = 0,
405 		.name = "WCN6750_V2",
406 	},
407 	{
408 		.id = WCN6450_V1,
409 		.subid = 0,
410 		.name = "WCN6450_V1",
411 	},
412 	{
413 		.id = QCA6490_v2_1,
414 		.subid = 0,
415 		.name = "QCA6490",
416 	},
417 	{
418 		.id = QCA6490_v2,
419 		.subid = 0,
420 		.name = "QCA6490",
421 	},
422 	{
423 		.id = WCN3990_TALOS,
424 		.subid = 0,
425 		.name = "WCN3990",
426 	},
427 	{
428 		.id = WCN3990_MOOREA,
429 		.subid = 0,
430 		.name = "WCN3990",
431 	},
432 	{
433 		.id = WCN3990_SAIPAN,
434 		.subid = 0,
435 		.name = "WCN3990",
436 	},
437 	{
438 		.id = WCN3990_RENNELL,
439 		.subid = 0,
440 		.name = "WCN3990",
441 	},
442 	{
443 		.id = WCN3990_BITRA,
444 		.subid = 0,
445 		.name = "WCN3990",
446 	},
447 	{
448 		.id = WCN3990_DIVAR,
449 		.subid = 0,
450 		.name = "WCN3990",
451 	},
452 	{
453 		.id = WCN3990_ATHERTON,
454 		.subid = 0,
455 		.name = "WCN3990",
456 	},
457 	{
458 		.id = WCN3990_STRAIT,
459 		.subid = 0,
460 		.name = "WCN3990",
461 	},
462 	{
463 		.id = WCN3990_NETRANI,
464 		.subid = 0,
465 		.name = "WCN3990",
466 	},
467 	{
468 		.id = WCN3990_CLARENCE,
469 		.subid = 0,
470 		.name = "WCN3990",
471 	}
472 };
473 
474 /**
475  * hif_get_hw_name(): get a human readable name for the hardware
476  * @info: Target Info
477  *
478  * Return: human readable name for the underlying wifi hardware.
479  */
480 static const char *hif_get_hw_name(struct hif_target_info *info)
481 {
482 	int i;
483 
484 	hif_debug("target version = %d, target revision = %d",
485 		  info->target_version,
486 		  info->target_revision);
487 
488 	if (info->hw_name)
489 		return info->hw_name;
490 
491 	for (i = 0; i < ARRAY_SIZE(qwlan_hw_list); i++) {
492 		if (info->target_version == qwlan_hw_list[i].id &&
493 		    info->target_revision == qwlan_hw_list[i].subid) {
494 			return qwlan_hw_list[i].name;
495 		}
496 	}
497 
498 	info->hw_name = qdf_mem_malloc(64);
499 	if (!info->hw_name)
500 		return "Unknown Device (nomem)";
501 
502 	i = qdf_snprint(info->hw_name, 64, "HW_VERSION=%x.",
503 			info->target_version);
504 	if (i < 0)
505 		return "Unknown Device (snprintf failure)";
506 	else
507 		return info->hw_name;
508 }
509 
510 /**
511  * hif_get_hw_info(): hif_get_hw_info
512  * @scn: scn
513  * @version: version
514  * @revision: revision
515  * @target_name: target name
516  *
517  * Return: n/a
518  */
519 void hif_get_hw_info(struct hif_opaque_softc *scn, u32 *version, u32 *revision,
520 			const char **target_name)
521 {
522 	struct hif_target_info *info = hif_get_target_info_handle(scn);
523 	struct hif_softc *sc = HIF_GET_SOFTC(scn);
524 
525 	if (sc->bus_type == QDF_BUS_TYPE_USB)
526 		hif_usb_get_hw_info(sc);
527 
528 	*version = info->target_version;
529 	*revision = info->target_revision;
530 	*target_name = hif_get_hw_name(info);
531 }
532 
533 /**
534  * hif_get_dev_ba(): API to get device base address.
535  * @hif_handle: hif handle
536  *
537  * Return: device base address
538  */
539 void *hif_get_dev_ba(struct hif_opaque_softc *hif_handle)
540 {
541 	struct hif_softc *scn = (struct hif_softc *)hif_handle;
542 
543 	return scn->mem;
544 }
545 qdf_export_symbol(hif_get_dev_ba);
546 
547 /**
548  * hif_get_dev_ba_ce(): API to get device ce base address.
549  * @hif_handle: hif handle
550  *
551  * Return: dev mem base address for CE
552  */
553 void *hif_get_dev_ba_ce(struct hif_opaque_softc *hif_handle)
554 {
555 	struct hif_softc *scn = (struct hif_softc *)hif_handle;
556 
557 	return scn->mem_ce;
558 }
559 
560 qdf_export_symbol(hif_get_dev_ba_ce);
561 
562 /**
563  * hif_get_dev_ba_pmm(): API to get device pmm base address.
564  * @hif_handle: scn
565  *
566  * Return: dev mem base address for PMM
567  */
568 
569 void *hif_get_dev_ba_pmm(struct hif_opaque_softc *hif_handle)
570 {
571 	struct hif_softc *scn = (struct hif_softc *)hif_handle;
572 
573 	return scn->mem_pmm_base;
574 }
575 
576 qdf_export_symbol(hif_get_dev_ba_pmm);
577 
578 uint32_t hif_get_soc_version(struct hif_opaque_softc *hif_handle)
579 {
580 	struct hif_softc *scn = (struct hif_softc *)hif_handle;
581 
582 	return scn->target_info.soc_version;
583 }
584 
585 qdf_export_symbol(hif_get_soc_version);
586 
587 /**
588  * hif_get_dev_ba_cmem(): API to get device ce base address.
589  * @hif_handle: hif handle
590  *
591  * Return: dev mem base address for CMEM
592  */
593 void *hif_get_dev_ba_cmem(struct hif_opaque_softc *hif_handle)
594 {
595 	struct hif_softc *scn = (struct hif_softc *)hif_handle;
596 
597 	return scn->mem_cmem;
598 }
599 
600 qdf_export_symbol(hif_get_dev_ba_cmem);
601 
602 #ifdef FEATURE_RUNTIME_PM
603 void hif_runtime_prevent_linkdown(struct hif_softc *scn, bool is_get)
604 {
605 	if (is_get)
606 		qdf_runtime_pm_prevent_suspend(&scn->prevent_linkdown_lock);
607 	else
608 		qdf_runtime_pm_allow_suspend(&scn->prevent_linkdown_lock);
609 }
610 
611 static inline
612 void hif_rtpm_lock_init(struct hif_softc *scn)
613 {
614 	qdf_runtime_lock_init(&scn->prevent_linkdown_lock);
615 }
616 
617 static inline
618 void hif_rtpm_lock_deinit(struct hif_softc *scn)
619 {
620 	qdf_runtime_lock_deinit(&scn->prevent_linkdown_lock);
621 }
622 #else
623 static inline
624 void hif_rtpm_lock_init(struct hif_softc *scn)
625 {
626 }
627 
628 static inline
629 void hif_rtpm_lock_deinit(struct hif_softc *scn)
630 {
631 }
632 #endif
633 
634 #ifdef WLAN_CE_INTERRUPT_THRESHOLD_CONFIG
635 /**
636  * hif_get_interrupt_threshold_cfg_from_psoc() - Retrieve ini cfg from psoc
637  * @scn: hif context
638  * @psoc: psoc objmgr handle
639  *
640  * Return: None
641  */
642 static inline
643 void hif_get_interrupt_threshold_cfg_from_psoc(struct hif_softc *scn,
644 					       struct wlan_objmgr_psoc *psoc)
645 {
646 	if (psoc) {
647 		scn->ini_cfg.ce_status_ring_timer_threshold =
648 			cfg_get(psoc,
649 				CFG_CE_STATUS_RING_TIMER_THRESHOLD);
650 		scn->ini_cfg.ce_status_ring_batch_count_threshold =
651 			cfg_get(psoc,
652 				CFG_CE_STATUS_RING_BATCH_COUNT_THRESHOLD);
653 	}
654 }
655 #else
656 static inline
657 void hif_get_interrupt_threshold_cfg_from_psoc(struct hif_softc *scn,
658 					       struct wlan_objmgr_psoc *psoc)
659 {
660 }
661 #endif /* WLAN_CE_INTERRUPT_THRESHOLD_CONFIG */
662 
663 /**
664  * hif_get_cfg_from_psoc() - Retrieve ini cfg from psoc
665  * @scn: hif context
666  * @psoc: psoc objmgr handle
667  *
668  * Return: None
669  */
670 static inline
671 void hif_get_cfg_from_psoc(struct hif_softc *scn,
672 			   struct wlan_objmgr_psoc *psoc)
673 {
674 	if (psoc) {
675 		scn->ini_cfg.disable_wake_irq =
676 			cfg_get(psoc, CFG_DISABLE_WAKE_IRQ);
677 		/**
678 		 * Wake IRQ can't share the same IRQ with the copy engines
679 		 * In one MSI mode, we don't know whether wake IRQ is triggered
680 		 * or not in wake IRQ handler. known issue CR 2055359
681 		 * If you want to support Wake IRQ. Please allocate at least
682 		 * 2 MSI vector. The first is for wake IRQ while the others
683 		 * share the second vector
684 		 */
685 		if (pld_is_one_msi(scn->qdf_dev->dev)) {
686 			hif_debug("Disable wake IRQ once it is one MSI mode");
687 			scn->ini_cfg.disable_wake_irq = true;
688 		}
689 		hif_get_interrupt_threshold_cfg_from_psoc(scn, psoc);
690 	}
691 }
692 
693 #if defined(HIF_CE_LOG_INFO) || defined(HIF_BUS_LOG_INFO)
694 /**
695  * hif_recovery_notifier_cb - Recovery notifier callback to log
696  *  hang event data
697  * @block: notifier block
698  * @state: state
699  * @data: notifier data
700  *
701  * Return: status
702  */
703 static
704 int hif_recovery_notifier_cb(struct notifier_block *block, unsigned long state,
705 			     void *data)
706 {
707 	struct qdf_notifer_data *notif_data = data;
708 	qdf_notif_block *notif_block;
709 	struct hif_softc *hif_handle;
710 	bool bus_id_invalid;
711 
712 	if (!data || !block)
713 		return -EINVAL;
714 
715 	notif_block = qdf_container_of(block, qdf_notif_block, notif_block);
716 
717 	hif_handle = notif_block->priv_data;
718 	if (!hif_handle)
719 		return -EINVAL;
720 
721 	bus_id_invalid = hif_log_bus_info(hif_handle, notif_data->hang_data,
722 					  &notif_data->offset);
723 	if (bus_id_invalid)
724 		return NOTIFY_STOP_MASK;
725 
726 	hif_log_ce_info(hif_handle, notif_data->hang_data,
727 			&notif_data->offset);
728 
729 	return 0;
730 }
731 
732 /**
733  * hif_register_recovery_notifier - Register hif recovery notifier
734  * @hif_handle: hif handle
735  *
736  * Return: status
737  */
738 static
739 QDF_STATUS hif_register_recovery_notifier(struct hif_softc *hif_handle)
740 {
741 	qdf_notif_block *hif_notifier;
742 
743 	if (!hif_handle)
744 		return QDF_STATUS_E_FAILURE;
745 
746 	hif_notifier = &hif_handle->hif_recovery_notifier;
747 
748 	hif_notifier->notif_block.notifier_call = hif_recovery_notifier_cb;
749 	hif_notifier->priv_data = hif_handle;
750 	return qdf_hang_event_register_notifier(hif_notifier);
751 }
752 
753 /**
754  * hif_unregister_recovery_notifier - Un-register hif recovery notifier
755  * @hif_handle: hif handle
756  *
757  * Return: status
758  */
759 static
760 QDF_STATUS hif_unregister_recovery_notifier(struct hif_softc *hif_handle)
761 {
762 	qdf_notif_block *hif_notifier = &hif_handle->hif_recovery_notifier;
763 
764 	return qdf_hang_event_unregister_notifier(hif_notifier);
765 }
766 #else
767 static inline
768 QDF_STATUS hif_register_recovery_notifier(struct hif_softc *hif_handle)
769 {
770 	return QDF_STATUS_SUCCESS;
771 }
772 
773 static inline
774 QDF_STATUS hif_unregister_recovery_notifier(struct hif_softc *hif_handle)
775 {
776 	return QDF_STATUS_SUCCESS;
777 }
778 #endif
779 
780 #ifdef HIF_CPU_PERF_AFFINE_MASK
781 /**
782  * __hif_cpu_hotplug_notify() - CPU hotplug event handler
783  * @context: HIF context
784  * @cpu: CPU Id of the CPU generating the event
785  * @cpu_up: true if the CPU is online
786  *
787  * Return: None
788  */
789 static void __hif_cpu_hotplug_notify(void *context,
790 				     uint32_t cpu, bool cpu_up)
791 {
792 	struct hif_softc *scn = context;
793 
794 	if (!scn)
795 		return;
796 	if (hif_is_driver_unloading(scn) || hif_is_recovery_in_progress(scn))
797 		return;
798 
799 	if (cpu_up) {
800 		hif_config_irq_set_perf_affinity_hint(GET_HIF_OPAQUE_HDL(scn));
801 		hif_debug("Setting affinity for online CPU: %d", cpu);
802 	} else {
803 		hif_debug("Skip setting affinity for offline CPU: %d", cpu);
804 	}
805 }
806 
807 /**
808  * hif_cpu_hotplug_notify - cpu core up/down notification
809  * handler
810  * @context: HIF context
811  * @cpu: CPU generating the event
812  * @cpu_up: true if the CPU is online
813  *
814  * Return: None
815  */
816 static void hif_cpu_hotplug_notify(void *context, uint32_t cpu, bool cpu_up)
817 {
818 	struct qdf_op_sync *op_sync;
819 
820 	if (qdf_op_protect(&op_sync))
821 		return;
822 
823 	__hif_cpu_hotplug_notify(context, cpu, cpu_up);
824 
825 	qdf_op_unprotect(op_sync);
826 }
827 
828 static void hif_cpu_online_cb(void *context, uint32_t cpu)
829 {
830 	hif_cpu_hotplug_notify(context, cpu, true);
831 }
832 
833 static void hif_cpu_before_offline_cb(void *context, uint32_t cpu)
834 {
835 	hif_cpu_hotplug_notify(context, cpu, false);
836 }
837 
838 static void hif_cpuhp_register(struct hif_softc *scn)
839 {
840 	if (!scn) {
841 		hif_info_high("cannot register hotplug notifiers");
842 		return;
843 	}
844 	qdf_cpuhp_register(&scn->cpuhp_event_handle,
845 			   scn,
846 			   hif_cpu_online_cb,
847 			   hif_cpu_before_offline_cb);
848 }
849 
850 static void hif_cpuhp_unregister(struct hif_softc *scn)
851 {
852 	if (!scn) {
853 		hif_info_high("cannot unregister hotplug notifiers");
854 		return;
855 	}
856 	qdf_cpuhp_unregister(&scn->cpuhp_event_handle);
857 }
858 
859 #else
860 static void hif_cpuhp_register(struct hif_softc *scn)
861 {
862 }
863 
864 static void hif_cpuhp_unregister(struct hif_softc *scn)
865 {
866 }
867 #endif /* ifdef HIF_CPU_PERF_AFFINE_MASK */
868 
869 #ifdef HIF_DETECTION_LATENCY_ENABLE
870 /*
871  * Bitmask to control enablement of latency detection for the tasklets,
872  * bit-X represents for tasklet of WLAN_CE_X.
873  */
874 #ifndef DETECTION_LATENCY_TASKLET_MASK
875 #define DETECTION_LATENCY_TASKLET_MASK (BIT(2) | BIT(7))
876 #endif
877 
878 static inline int
879 __hif_tasklet_latency(struct hif_softc *scn, bool from_timer, int idx)
880 {
881 	qdf_time_t sched_time =
882 		scn->latency_detect.tasklet_info[idx].sched_time;
883 	qdf_time_t exec_time =
884 		scn->latency_detect.tasklet_info[idx].exec_time;
885 	qdf_time_t curr_time = qdf_system_ticks();
886 	uint32_t threshold = scn->latency_detect.threshold;
887 	qdf_time_t expect_exec_time =
888 		sched_time + qdf_system_msecs_to_ticks(threshold);
889 
890 	/* 2 kinds of check here.
891 	 * from_timer==true:  check if tasklet stall
892 	 * from_timer==false: check tasklet execute comes late
893 	 */
894 	if (from_timer ?
895 	    (qdf_system_time_after(sched_time, exec_time) &&
896 	     qdf_system_time_after(curr_time, expect_exec_time)) :
897 	    qdf_system_time_after(exec_time, expect_exec_time)) {
898 		hif_err("tasklet[%d] latency detected: from_timer %d, curr_time %lu, sched_time %lu, exec_time %lu, threshold %ums, timeout %ums, cpu_id %d, called: %ps",
899 			idx, from_timer, curr_time, sched_time,
900 			exec_time, threshold,
901 			scn->latency_detect.timeout,
902 			qdf_get_cpu(), (void *)_RET_IP_);
903 		qdf_trigger_self_recovery(NULL,
904 					  QDF_TASKLET_CREDIT_LATENCY_DETECT);
905 		return -ETIMEDOUT;
906 	}
907 
908 	return 0;
909 }
910 
911 /**
912  * hif_tasklet_latency_detect_enabled() - check whether latency detect
913  * is enabled for the tasklet which is specified by idx
914  * @scn: HIF opaque context
915  * @idx: CE id
916  *
917  * Return: true if latency detect is enabled for the specified tasklet,
918  * false otherwise.
919  */
920 static inline bool
921 hif_tasklet_latency_detect_enabled(struct hif_softc *scn, int idx)
922 {
923 	if (QDF_GLOBAL_MISSION_MODE != hif_get_conparam(scn))
924 		return false;
925 
926 	if (!scn->latency_detect.enable_detection)
927 		return false;
928 
929 	if (idx < 0 || idx >= HIF_TASKLET_IN_MONITOR ||
930 	    !qdf_test_bit(idx, scn->latency_detect.tasklet_bmap))
931 		return false;
932 
933 	return true;
934 }
935 
936 void hif_tasklet_latency_record_exec(struct hif_softc *scn, int idx)
937 {
938 	if (!hif_tasklet_latency_detect_enabled(scn, idx))
939 		return;
940 
941 	/*
942 	 * hif_set_enable_detection(true) might come between
943 	 * hif_tasklet_latency_record_sched() and
944 	 * hif_tasklet_latency_record_exec() during wlan startup, then the
945 	 * sched_time is 0 but exec_time is not, and hit the timeout case in
946 	 * __hif_tasklet_latency().
947 	 * To avoid such issue, skip exec_time recording if sched_time has not
948 	 * been recorded.
949 	 */
950 	if (!scn->latency_detect.tasklet_info[idx].sched_time)
951 		return;
952 
953 	scn->latency_detect.tasklet_info[idx].exec_time = qdf_system_ticks();
954 	__hif_tasklet_latency(scn, false, idx);
955 }
956 
957 void hif_tasklet_latency_record_sched(struct hif_softc *scn, int idx)
958 {
959 	if (!hif_tasklet_latency_detect_enabled(scn, idx))
960 		return;
961 
962 	scn->latency_detect.tasklet_info[idx].sched_cpuid = qdf_get_cpu();
963 	scn->latency_detect.tasklet_info[idx].sched_time = qdf_system_ticks();
964 }
965 
966 static inline void hif_credit_latency(struct hif_softc *scn, bool from_timer)
967 {
968 	qdf_time_t credit_request_time =
969 		scn->latency_detect.credit_request_time;
970 	qdf_time_t credit_report_time = scn->latency_detect.credit_report_time;
971 	qdf_time_t curr_jiffies = qdf_system_ticks();
972 	uint32_t threshold = scn->latency_detect.threshold;
973 	int cpu_id = qdf_get_cpu();
974 
975 	/* 2 kinds of check here.
976 	 * from_timer==true:  check if credit report stall
977 	 * from_timer==false: check credit report comes late
978 	 */
979 
980 	if ((from_timer ?
981 	     qdf_system_time_after(credit_request_time, credit_report_time) :
982 	     qdf_system_time_after(credit_report_time, credit_request_time)) &&
983 	    qdf_system_time_after(curr_jiffies,
984 				  credit_request_time +
985 				  qdf_system_msecs_to_ticks(threshold))) {
986 		hif_err("credit report latency: from timer %d, curr_jiffies %lu, credit_request_time %lu, credit_report_time %lu, threshold %ums, timeout %ums, cpu_id %d, called: %ps",
987 			from_timer, curr_jiffies, credit_request_time,
988 			credit_report_time, threshold,
989 			scn->latency_detect.timeout,
990 			cpu_id, (void *)_RET_IP_);
991 		goto latency;
992 	}
993 	return;
994 
995 latency:
996 	qdf_trigger_self_recovery(NULL, QDF_TASKLET_CREDIT_LATENCY_DETECT);
997 }
998 
999 static inline void hif_tasklet_latency(struct hif_softc *scn, bool from_timer)
1000 {
1001 	int i, ret;
1002 
1003 	for (i = 0; i < HIF_TASKLET_IN_MONITOR; i++) {
1004 		if (!qdf_test_bit(i, scn->latency_detect.tasklet_bmap))
1005 			continue;
1006 
1007 		ret = __hif_tasklet_latency(scn, from_timer, i);
1008 		if (ret)
1009 			return;
1010 	}
1011 }
1012 
1013 /**
1014  * hif_check_detection_latency(): to check if latency for tasklet/credit
1015  *
1016  * @scn: hif context
1017  * @from_timer: if called from timer handler
1018  * @bitmap_type: indicate if check tasklet or credit
1019  *
1020  * Return: none
1021  */
1022 void hif_check_detection_latency(struct hif_softc *scn,
1023 				 bool from_timer,
1024 				 uint32_t bitmap_type)
1025 {
1026 	if (QDF_GLOBAL_MISSION_MODE != hif_get_conparam(scn))
1027 		return;
1028 
1029 	if (!scn->latency_detect.enable_detection)
1030 		return;
1031 
1032 	if (bitmap_type & BIT(HIF_DETECT_TASKLET))
1033 		hif_tasklet_latency(scn, from_timer);
1034 
1035 	if (bitmap_type & BIT(HIF_DETECT_CREDIT))
1036 		hif_credit_latency(scn, from_timer);
1037 }
1038 
1039 static void hif_latency_detect_timeout_handler(void *arg)
1040 {
1041 	struct hif_softc *scn = (struct hif_softc *)arg;
1042 	int next_cpu, i;
1043 	qdf_cpu_mask cpu_mask = {0};
1044 	struct hif_latency_detect *detect = &scn->latency_detect;
1045 
1046 	hif_check_detection_latency(scn, true,
1047 				    BIT(HIF_DETECT_TASKLET) |
1048 				    BIT(HIF_DETECT_CREDIT));
1049 
1050 	/* it need to make sure timer start on a different cpu,
1051 	 * so it can detect the tasklet schedule stall, but there
1052 	 * is still chance that, after timer has been started, then
1053 	 * irq/tasklet happens on the same cpu, then tasklet will
1054 	 * execute before softirq timer, if this tasklet stall, the
1055 	 * timer can't detect it, we can accept this as a limitation,
1056 	 * if tasklet stall, anyway other place will detect it, just
1057 	 * a little later.
1058 	 */
1059 	qdf_cpumask_copy(&cpu_mask, (const qdf_cpu_mask *)cpu_active_mask);
1060 	for (i = 0; i < HIF_TASKLET_IN_MONITOR; i++) {
1061 		if (!qdf_test_bit(i, detect->tasklet_bmap))
1062 			continue;
1063 
1064 		qdf_cpumask_clear_cpu(detect->tasklet_info[i].sched_cpuid,
1065 				      &cpu_mask);
1066 	}
1067 
1068 	next_cpu = cpumask_first(&cpu_mask);
1069 	if (qdf_unlikely(next_cpu >= nr_cpu_ids)) {
1070 		hif_debug("start timer on local");
1071 		/* it doesn't found a available cpu, start on local cpu*/
1072 		qdf_timer_mod(&detect->timer, detect->timeout);
1073 	} else {
1074 		qdf_timer_start_on(&detect->timer, detect->timeout, next_cpu);
1075 	}
1076 }
1077 
1078 static void hif_latency_detect_timer_init(struct hif_softc *scn)
1079 {
1080 	scn->latency_detect.timeout =
1081 		DETECTION_TIMER_TIMEOUT;
1082 	scn->latency_detect.threshold =
1083 		DETECTION_LATENCY_THRESHOLD;
1084 
1085 	hif_info("timer timeout %u, latency threshold %u",
1086 		 scn->latency_detect.timeout,
1087 		 scn->latency_detect.threshold);
1088 
1089 	scn->latency_detect.is_timer_started = false;
1090 
1091 	qdf_timer_init(NULL,
1092 		       &scn->latency_detect.timer,
1093 		       &hif_latency_detect_timeout_handler,
1094 		       scn,
1095 		       QDF_TIMER_TYPE_SW_SPIN);
1096 }
1097 
1098 static void hif_latency_detect_timer_deinit(struct hif_softc *scn)
1099 {
1100 	hif_info("deinit timer");
1101 	qdf_timer_free(&scn->latency_detect.timer);
1102 }
1103 
1104 static void hif_latency_detect_init(struct hif_softc *scn)
1105 {
1106 	uint32_t tasklet_mask;
1107 	int i;
1108 
1109 	if (QDF_GLOBAL_MISSION_MODE != hif_get_conparam(scn))
1110 		return;
1111 
1112 	tasklet_mask = DETECTION_LATENCY_TASKLET_MASK;
1113 	hif_info("tasklet mask is 0x%x", tasklet_mask);
1114 	for (i = 0; i < HIF_TASKLET_IN_MONITOR; i++) {
1115 		if (BIT(i) & tasklet_mask)
1116 			qdf_set_bit(i, scn->latency_detect.tasklet_bmap);
1117 	}
1118 
1119 	hif_latency_detect_timer_init(scn);
1120 }
1121 
1122 static void hif_latency_detect_deinit(struct hif_softc *scn)
1123 {
1124 	int i;
1125 
1126 	if (QDF_GLOBAL_MISSION_MODE != hif_get_conparam(scn))
1127 		return;
1128 
1129 	hif_latency_detect_timer_deinit(scn);
1130 	for (i = 0; i < HIF_TASKLET_IN_MONITOR; i++)
1131 		qdf_clear_bit(i, scn->latency_detect.tasklet_bmap);
1132 }
1133 
1134 void hif_latency_detect_timer_start(struct hif_opaque_softc *hif_ctx)
1135 {
1136 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1137 
1138 	if (QDF_GLOBAL_MISSION_MODE != hif_get_conparam(scn))
1139 		return;
1140 
1141 	hif_debug_rl("start timer");
1142 	if (scn->latency_detect.is_timer_started) {
1143 		hif_info("timer has been started");
1144 		return;
1145 	}
1146 
1147 	qdf_timer_start(&scn->latency_detect.timer,
1148 			scn->latency_detect.timeout);
1149 	scn->latency_detect.is_timer_started = true;
1150 }
1151 
1152 void hif_latency_detect_timer_stop(struct hif_opaque_softc *hif_ctx)
1153 {
1154 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1155 
1156 	if (QDF_GLOBAL_MISSION_MODE != hif_get_conparam(scn))
1157 		return;
1158 
1159 	hif_debug_rl("stop timer");
1160 
1161 	qdf_timer_sync_cancel(&scn->latency_detect.timer);
1162 	scn->latency_detect.is_timer_started = false;
1163 }
1164 
1165 void hif_latency_detect_credit_record_time(
1166 	enum hif_credit_exchange_type type,
1167 	struct hif_opaque_softc *hif_ctx)
1168 {
1169 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1170 
1171 	if (!scn) {
1172 		hif_err("Could not do runtime put, scn is null");
1173 		return;
1174 	}
1175 
1176 	if (QDF_GLOBAL_MISSION_MODE != hif_get_conparam(scn))
1177 		return;
1178 
1179 	if (HIF_REQUEST_CREDIT == type)
1180 		scn->latency_detect.credit_request_time = qdf_system_ticks();
1181 	else if (HIF_PROCESS_CREDIT_REPORT == type)
1182 		scn->latency_detect.credit_report_time = qdf_system_ticks();
1183 
1184 	hif_check_detection_latency(scn, false, BIT(HIF_DETECT_CREDIT));
1185 }
1186 
1187 void hif_set_enable_detection(struct hif_opaque_softc *hif_ctx, bool value)
1188 {
1189 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1190 
1191 	if (!scn) {
1192 		hif_err("Could not do runtime put, scn is null");
1193 		return;
1194 	}
1195 
1196 	if (QDF_GLOBAL_MISSION_MODE != hif_get_conparam(scn))
1197 		return;
1198 
1199 	scn->latency_detect.enable_detection = value;
1200 }
1201 #else
1202 static inline void hif_latency_detect_init(struct hif_softc *scn)
1203 {}
1204 
1205 static inline void hif_latency_detect_deinit(struct hif_softc *scn)
1206 {}
1207 #endif
1208 
1209 #ifdef WLAN_FEATURE_AFFINITY_MGR
1210 #define AFFINITY_THRESHOLD 5000000
1211 static inline void
1212 hif_affinity_mgr_init(struct hif_softc *scn, struct wlan_objmgr_psoc *psoc)
1213 {
1214 	unsigned int cpus;
1215 	qdf_cpu_mask allowed_mask = {0};
1216 
1217 	scn->affinity_mgr_supported =
1218 		(cfg_get(psoc, CFG_IRQ_AFFINE_AUDIO_USE_CASE) &&
1219 		qdf_walt_get_cpus_taken_supported());
1220 
1221 	hif_info("Affinity Manager supported: %d", scn->affinity_mgr_supported);
1222 
1223 	if (!scn->affinity_mgr_supported)
1224 		return;
1225 
1226 	scn->time_threshold = AFFINITY_THRESHOLD;
1227 	qdf_for_each_possible_cpu(cpus)
1228 		if (qdf_topology_physical_package_id(cpus) ==
1229 			CPU_CLUSTER_TYPE_LITTLE)
1230 			qdf_cpumask_set_cpu(cpus, &allowed_mask);
1231 	qdf_cpumask_copy(&scn->allowed_mask, &allowed_mask);
1232 }
1233 #else
1234 static inline void
1235 hif_affinity_mgr_init(struct hif_softc *scn, struct wlan_objmgr_psoc *psoc)
1236 {
1237 }
1238 #endif
1239 
1240 #ifdef FEATURE_DIRECT_LINK
1241 /**
1242  * hif_init_direct_link_rcv_pipe_num(): Initialize the direct link receive
1243  *  pipe number
1244  * @scn: hif context
1245  *
1246  * Return: None
1247  */
1248 static inline
1249 void hif_init_direct_link_rcv_pipe_num(struct hif_softc *scn)
1250 {
1251 	scn->dl_recv_pipe_num = INVALID_PIPE_NO;
1252 }
1253 #else
1254 static inline
1255 void hif_init_direct_link_rcv_pipe_num(struct hif_softc *scn)
1256 {
1257 }
1258 #endif
1259 
1260 struct hif_opaque_softc *hif_open(qdf_device_t qdf_ctx,
1261 				  uint32_t mode,
1262 				  enum qdf_bus_type bus_type,
1263 				  struct hif_driver_state_callbacks *cbk,
1264 				  struct wlan_objmgr_psoc *psoc)
1265 {
1266 	struct hif_softc *scn;
1267 	QDF_STATUS status = QDF_STATUS_SUCCESS;
1268 	int bus_context_size = hif_bus_get_context_size(bus_type);
1269 
1270 	if (bus_context_size == 0) {
1271 		hif_err("context size 0 not allowed");
1272 		return NULL;
1273 	}
1274 
1275 	scn = (struct hif_softc *)qdf_mem_malloc(bus_context_size);
1276 	if (!scn)
1277 		return GET_HIF_OPAQUE_HDL(scn);
1278 
1279 	scn->qdf_dev = qdf_ctx;
1280 	scn->hif_con_param = mode;
1281 	qdf_atomic_init(&scn->active_tasklet_cnt);
1282 
1283 	qdf_atomic_init(&scn->active_grp_tasklet_cnt);
1284 	qdf_atomic_init(&scn->link_suspended);
1285 	qdf_atomic_init(&scn->tasklet_from_intr);
1286 	hif_system_pm_set_state_on(GET_HIF_OPAQUE_HDL(scn));
1287 	qdf_mem_copy(&scn->callbacks, cbk,
1288 		     sizeof(struct hif_driver_state_callbacks));
1289 	scn->bus_type  = bus_type;
1290 
1291 	hif_allow_ep_vote_access(GET_HIF_OPAQUE_HDL(scn));
1292 	hif_get_cfg_from_psoc(scn, psoc);
1293 
1294 	hif_set_event_hist_mask(GET_HIF_OPAQUE_HDL(scn));
1295 	status = hif_bus_open(scn, bus_type);
1296 	if (status != QDF_STATUS_SUCCESS) {
1297 		hif_err("hif_bus_open error = %d, bus_type = %d",
1298 			status, bus_type);
1299 		qdf_mem_free(scn);
1300 		scn = NULL;
1301 		goto out;
1302 	}
1303 
1304 	hif_rtpm_lock_init(scn);
1305 
1306 	hif_cpuhp_register(scn);
1307 	hif_latency_detect_init(scn);
1308 	hif_affinity_mgr_init(scn, psoc);
1309 	hif_init_direct_link_rcv_pipe_num(scn);
1310 	hif_ce_desc_history_log_register(scn);
1311 	hif_desc_history_log_register();
1312 
1313 out:
1314 	return GET_HIF_OPAQUE_HDL(scn);
1315 }
1316 
1317 #ifdef ADRASTEA_RRI_ON_DDR
1318 /**
1319  * hif_uninit_rri_on_ddr(): free consistent memory allocated for rri
1320  * @scn: hif context
1321  *
1322  * Return: none
1323  */
1324 void hif_uninit_rri_on_ddr(struct hif_softc *scn)
1325 {
1326 	if (scn->vaddr_rri_on_ddr)
1327 		qdf_mem_free_consistent(scn->qdf_dev, scn->qdf_dev->dev,
1328 					RRI_ON_DDR_MEM_SIZE,
1329 					scn->vaddr_rri_on_ddr,
1330 					scn->paddr_rri_on_ddr, 0);
1331 	scn->vaddr_rri_on_ddr = NULL;
1332 }
1333 #endif
1334 
1335 /**
1336  * hif_close(): hif_close
1337  * @hif_ctx: hif_ctx
1338  *
1339  * Return: n/a
1340  */
1341 void hif_close(struct hif_opaque_softc *hif_ctx)
1342 {
1343 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1344 
1345 	if (!scn) {
1346 		hif_err("hif_opaque_softc is NULL");
1347 		return;
1348 	}
1349 
1350 	hif_desc_history_log_unregister();
1351 	hif_ce_desc_history_log_unregister();
1352 	hif_latency_detect_deinit(scn);
1353 
1354 	if (scn->athdiag_procfs_inited) {
1355 		athdiag_procfs_remove();
1356 		scn->athdiag_procfs_inited = false;
1357 	}
1358 
1359 	if (scn->target_info.hw_name) {
1360 		char *hw_name = scn->target_info.hw_name;
1361 
1362 		scn->target_info.hw_name = "ErrUnloading";
1363 		qdf_mem_free(hw_name);
1364 	}
1365 
1366 	hif_uninit_rri_on_ddr(scn);
1367 	hif_cleanup_static_buf_to_target(scn);
1368 	hif_cpuhp_unregister(scn);
1369 	hif_rtpm_lock_deinit(scn);
1370 
1371 	hif_bus_close(scn);
1372 
1373 	qdf_mem_free(scn);
1374 }
1375 
1376 /**
1377  * hif_get_num_active_grp_tasklets() - get the number of active
1378  *		datapath group tasklets pending to be completed.
1379  * @scn: HIF context
1380  *
1381  * Returns: the number of datapath group tasklets which are active
1382  */
1383 static inline int hif_get_num_active_grp_tasklets(struct hif_softc *scn)
1384 {
1385 	return qdf_atomic_read(&scn->active_grp_tasklet_cnt);
1386 }
1387 
1388 #if (defined(QCA_WIFI_QCA8074) || defined(QCA_WIFI_QCA6018) || \
1389 	defined(QCA_WIFI_QCA6290) || defined(QCA_WIFI_QCA6390) || \
1390 	defined(QCA_WIFI_QCN9000) || defined(QCA_WIFI_QCA6490) || \
1391 	defined(QCA_WIFI_QCA6750) || defined(QCA_WIFI_QCA5018) || \
1392 	defined(QCA_WIFI_KIWI) || defined(QCA_WIFI_QCN9224) || \
1393 	defined(QCA_WIFI_QCN6432) || \
1394 	defined(QCA_WIFI_QCA9574)) || defined(QCA_WIFI_QCA5332)
1395 /**
1396  * hif_get_num_pending_work() - get the number of entries in
1397  *		the workqueue pending to be completed.
1398  * @scn: HIF context
1399  *
1400  * Returns: the number of tasklets which are active
1401  */
1402 static inline int hif_get_num_pending_work(struct hif_softc *scn)
1403 {
1404 	return hal_get_reg_write_pending_work(scn->hal_soc);
1405 }
1406 #elif defined(FEATURE_HIF_DELAYED_REG_WRITE)
1407 static inline int hif_get_num_pending_work(struct hif_softc *scn)
1408 {
1409 	return qdf_atomic_read(&scn->active_work_cnt);
1410 }
1411 #else
1412 
1413 static inline int hif_get_num_pending_work(struct hif_softc *scn)
1414 {
1415 	return 0;
1416 }
1417 #endif
1418 
1419 QDF_STATUS hif_try_complete_tasks(struct hif_softc *scn)
1420 {
1421 	uint32_t task_drain_wait_cnt = 0;
1422 	int tasklet = 0, grp_tasklet = 0, work = 0;
1423 
1424 	while ((tasklet = hif_get_num_active_tasklets(scn)) ||
1425 	       (grp_tasklet = hif_get_num_active_grp_tasklets(scn)) ||
1426 	       (work = hif_get_num_pending_work(scn))) {
1427 		if (++task_drain_wait_cnt > HIF_TASK_DRAIN_WAIT_CNT) {
1428 			hif_err("pending tasklets %d grp tasklets %d work %d",
1429 				tasklet, grp_tasklet, work);
1430 			QDF_DEBUG_PANIC("Complete tasks takes more than %u ms: tasklets %d grp tasklets %d work %d",
1431 					HIF_TASK_DRAIN_WAIT_CNT * 10,
1432 					tasklet, grp_tasklet, work);
1433 			return QDF_STATUS_E_FAULT;
1434 		}
1435 		hif_info("waiting for tasklets %d grp tasklets %d work %d",
1436 			 tasklet, grp_tasklet, work);
1437 		msleep(10);
1438 	}
1439 
1440 	return QDF_STATUS_SUCCESS;
1441 }
1442 
1443 #ifdef HIF_HAL_REG_ACCESS_SUPPORT
1444 void hif_reg_window_write(struct hif_softc *scn, uint32_t offset,
1445 			  uint32_t value)
1446 {
1447 	hal_write32_mb(scn->hal_soc, offset, value);
1448 }
1449 
1450 uint32_t hif_reg_window_read(struct hif_softc *scn, uint32_t offset)
1451 {
1452 	return hal_read32_mb(scn->hal_soc, offset);
1453 }
1454 #endif
1455 
1456 #if defined(HIF_IPCI) && defined(FEATURE_HAL_DELAYED_REG_WRITE)
1457 QDF_STATUS hif_try_prevent_ep_vote_access(struct hif_opaque_softc *hif_ctx)
1458 {
1459 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1460 	uint32_t work_drain_wait_cnt = 0;
1461 	uint32_t wait_cnt = 0;
1462 	int work = 0;
1463 
1464 	qdf_atomic_set(&scn->dp_ep_vote_access,
1465 		       HIF_EP_VOTE_ACCESS_DISABLE);
1466 	qdf_atomic_set(&scn->ep_vote_access,
1467 		       HIF_EP_VOTE_ACCESS_DISABLE);
1468 
1469 	while ((work = hif_get_num_pending_work(scn))) {
1470 		if (++work_drain_wait_cnt > HIF_WORK_DRAIN_WAIT_CNT) {
1471 			qdf_atomic_set(&scn->dp_ep_vote_access,
1472 				       HIF_EP_VOTE_ACCESS_ENABLE);
1473 			qdf_atomic_set(&scn->ep_vote_access,
1474 				       HIF_EP_VOTE_ACCESS_ENABLE);
1475 			hif_err("timeout wait for pending work %d ", work);
1476 			return QDF_STATUS_E_FAULT;
1477 		}
1478 		qdf_sleep(10);
1479 	}
1480 
1481 	if (pld_is_pci_ep_awake(scn->qdf_dev->dev) == -ENOTSUPP)
1482 	return QDF_STATUS_SUCCESS;
1483 
1484 	while (pld_is_pci_ep_awake(scn->qdf_dev->dev)) {
1485 		if (++wait_cnt > HIF_EP_WAKE_RESET_WAIT_CNT) {
1486 			hif_err("Release EP vote is not proceed by Fw");
1487 			return QDF_STATUS_E_FAULT;
1488 		}
1489 		qdf_sleep(5);
1490 	}
1491 
1492 	return QDF_STATUS_SUCCESS;
1493 }
1494 
1495 void hif_set_ep_intermediate_vote_access(struct hif_opaque_softc *hif_ctx)
1496 {
1497 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1498 	uint8_t vote_access;
1499 
1500 	vote_access = qdf_atomic_read(&scn->ep_vote_access);
1501 
1502 	if (vote_access != HIF_EP_VOTE_ACCESS_DISABLE)
1503 		hif_info("EP vote changed from:%u to intermediate state",
1504 			 vote_access);
1505 
1506 	if (QDF_IS_STATUS_ERROR(hif_try_prevent_ep_vote_access(hif_ctx)))
1507 		QDF_BUG(0);
1508 
1509 	qdf_atomic_set(&scn->ep_vote_access,
1510 		       HIF_EP_VOTE_INTERMEDIATE_ACCESS);
1511 }
1512 
1513 void hif_allow_ep_vote_access(struct hif_opaque_softc *hif_ctx)
1514 {
1515 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1516 
1517 	qdf_atomic_set(&scn->dp_ep_vote_access,
1518 		       HIF_EP_VOTE_ACCESS_ENABLE);
1519 	qdf_atomic_set(&scn->ep_vote_access,
1520 		       HIF_EP_VOTE_ACCESS_ENABLE);
1521 }
1522 
1523 void hif_set_ep_vote_access(struct hif_opaque_softc *hif_ctx,
1524 			    uint8_t type, uint8_t access)
1525 {
1526 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1527 
1528 	if (type == HIF_EP_VOTE_DP_ACCESS)
1529 		qdf_atomic_set(&scn->dp_ep_vote_access, access);
1530 	else
1531 		qdf_atomic_set(&scn->ep_vote_access, access);
1532 }
1533 
1534 uint8_t hif_get_ep_vote_access(struct hif_opaque_softc *hif_ctx,
1535 			       uint8_t type)
1536 {
1537 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1538 
1539 	if (type == HIF_EP_VOTE_DP_ACCESS)
1540 		return qdf_atomic_read(&scn->dp_ep_vote_access);
1541 	else
1542 		return qdf_atomic_read(&scn->ep_vote_access);
1543 }
1544 #endif
1545 
1546 #ifdef FEATURE_HIF_DELAYED_REG_WRITE
1547 #ifdef MEMORY_DEBUG
1548 #define HIF_REG_WRITE_QUEUE_LEN 128
1549 #else
1550 #define HIF_REG_WRITE_QUEUE_LEN 32
1551 #endif
1552 
1553 /**
1554  * hif_print_reg_write_stats() - Print hif delayed reg write stats
1555  * @hif_ctx: hif opaque handle
1556  *
1557  * Return: None
1558  */
1559 void hif_print_reg_write_stats(struct hif_opaque_softc *hif_ctx)
1560 {
1561 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1562 	struct CE_state *ce_state;
1563 	uint32_t *hist;
1564 	int i;
1565 
1566 	hist = scn->wstats.sched_delay;
1567 	hif_debug("wstats: enq %u deq %u coal %u direct %u q_depth %u max_q %u sched-delay hist %u %u %u %u",
1568 		  qdf_atomic_read(&scn->wstats.enqueues),
1569 		  scn->wstats.dequeues,
1570 		  qdf_atomic_read(&scn->wstats.coalesces),
1571 		  qdf_atomic_read(&scn->wstats.direct),
1572 		  qdf_atomic_read(&scn->wstats.q_depth),
1573 		  scn->wstats.max_q_depth,
1574 		  hist[HIF_REG_WRITE_SCHED_DELAY_SUB_100us],
1575 		  hist[HIF_REG_WRITE_SCHED_DELAY_SUB_1000us],
1576 		  hist[HIF_REG_WRITE_SCHED_DELAY_SUB_5000us],
1577 		  hist[HIF_REG_WRITE_SCHED_DELAY_GT_5000us]);
1578 
1579 	for (i = 0; i < scn->ce_count; i++) {
1580 		ce_state = scn->ce_id_to_state[i];
1581 		if (!ce_state)
1582 			continue;
1583 
1584 		hif_debug("ce%d: enq %u deq %u coal %u direct %u",
1585 			  i, ce_state->wstats.enqueues,
1586 			  ce_state->wstats.dequeues,
1587 			  ce_state->wstats.coalesces,
1588 			  ce_state->wstats.direct);
1589 	}
1590 }
1591 
1592 /**
1593  * hif_is_reg_write_tput_level_high() - throughput level for delayed reg writes
1594  * @scn: hif_softc pointer
1595  *
1596  * Return: true if throughput is high, else false.
1597  */
1598 static inline bool hif_is_reg_write_tput_level_high(struct hif_softc *scn)
1599 {
1600 	int bw_level = hif_get_bandwidth_level(GET_HIF_OPAQUE_HDL(scn));
1601 
1602 	return (bw_level >= PLD_BUS_WIDTH_MEDIUM) ? true : false;
1603 }
1604 
1605 /**
1606  * hif_reg_write_fill_sched_delay_hist() - fill reg write delay histogram
1607  * @scn: hif_softc pointer
1608  * @delay_us: delay in us
1609  *
1610  * Return: None
1611  */
1612 static inline void hif_reg_write_fill_sched_delay_hist(struct hif_softc *scn,
1613 						       uint64_t delay_us)
1614 {
1615 	uint32_t *hist;
1616 
1617 	hist = scn->wstats.sched_delay;
1618 
1619 	if (delay_us < 100)
1620 		hist[HIF_REG_WRITE_SCHED_DELAY_SUB_100us]++;
1621 	else if (delay_us < 1000)
1622 		hist[HIF_REG_WRITE_SCHED_DELAY_SUB_1000us]++;
1623 	else if (delay_us < 5000)
1624 		hist[HIF_REG_WRITE_SCHED_DELAY_SUB_5000us]++;
1625 	else
1626 		hist[HIF_REG_WRITE_SCHED_DELAY_GT_5000us]++;
1627 }
1628 
1629 /**
1630  * hif_process_reg_write_q_elem() - process a register write queue element
1631  * @scn: hif_softc pointer
1632  * @q_elem: pointer to hal register write queue element
1633  *
1634  * Return: The value which was written to the address
1635  */
1636 static int32_t
1637 hif_process_reg_write_q_elem(struct hif_softc *scn,
1638 			     struct hif_reg_write_q_elem *q_elem)
1639 {
1640 	struct CE_state *ce_state = q_elem->ce_state;
1641 	uint32_t write_val = -1;
1642 
1643 	qdf_spin_lock_bh(&ce_state->ce_index_lock);
1644 
1645 	ce_state->reg_write_in_progress = false;
1646 	ce_state->wstats.dequeues++;
1647 
1648 	if (ce_state->src_ring) {
1649 		q_elem->dequeue_val = ce_state->src_ring->write_index;
1650 		hal_write32_mb(scn->hal_soc, ce_state->ce_wrt_idx_offset,
1651 			       ce_state->src_ring->write_index);
1652 		write_val = ce_state->src_ring->write_index;
1653 	} else if (ce_state->dest_ring) {
1654 		q_elem->dequeue_val = ce_state->dest_ring->write_index;
1655 		hal_write32_mb(scn->hal_soc, ce_state->ce_wrt_idx_offset,
1656 			       ce_state->dest_ring->write_index);
1657 		write_val = ce_state->dest_ring->write_index;
1658 	} else {
1659 		hif_debug("invalid reg write received");
1660 		qdf_assert(0);
1661 	}
1662 
1663 	q_elem->valid = 0;
1664 	ce_state->last_dequeue_time = q_elem->dequeue_time;
1665 
1666 	qdf_spin_unlock_bh(&ce_state->ce_index_lock);
1667 
1668 	return write_val;
1669 }
1670 
1671 /**
1672  * hif_reg_write_work() - Worker to process delayed writes
1673  * @arg: hif_softc pointer
1674  *
1675  * Return: None
1676  */
1677 static void hif_reg_write_work(void *arg)
1678 {
1679 	struct hif_softc *scn = arg;
1680 	struct hif_reg_write_q_elem *q_elem;
1681 	uint32_t offset;
1682 	uint64_t delta_us;
1683 	int32_t q_depth, write_val;
1684 	uint32_t num_processed = 0;
1685 	int32_t ring_id;
1686 
1687 	q_elem = &scn->reg_write_queue[scn->read_idx];
1688 	q_elem->work_scheduled_time = qdf_get_log_timestamp();
1689 	q_elem->cpu_id = qdf_get_cpu();
1690 
1691 	/* Make sure q_elem consistent in the memory for multi-cores */
1692 	qdf_rmb();
1693 	if (!q_elem->valid)
1694 		return;
1695 
1696 	q_depth = qdf_atomic_read(&scn->wstats.q_depth);
1697 	if (q_depth > scn->wstats.max_q_depth)
1698 		scn->wstats.max_q_depth =  q_depth;
1699 
1700 	if (hif_prevent_link_low_power_states(GET_HIF_OPAQUE_HDL(scn))) {
1701 		scn->wstats.prevent_l1_fails++;
1702 		return;
1703 	}
1704 
1705 	while (true) {
1706 		qdf_rmb();
1707 		if (!q_elem->valid)
1708 			break;
1709 
1710 		qdf_rmb();
1711 		q_elem->dequeue_time = qdf_get_log_timestamp();
1712 		ring_id = q_elem->ce_state->id;
1713 		offset = q_elem->offset;
1714 		delta_us = qdf_log_timestamp_to_usecs(q_elem->dequeue_time -
1715 						      q_elem->enqueue_time);
1716 		hif_reg_write_fill_sched_delay_hist(scn, delta_us);
1717 
1718 		scn->wstats.dequeues++;
1719 		qdf_atomic_dec(&scn->wstats.q_depth);
1720 
1721 		write_val = hif_process_reg_write_q_elem(scn, q_elem);
1722 		hif_debug("read_idx %u ce_id %d offset 0x%x dequeue_val %d",
1723 			  scn->read_idx, ring_id, offset, write_val);
1724 
1725 		qdf_trace_dp_del_reg_write(ring_id, q_elem->enqueue_val,
1726 					   q_elem->dequeue_val,
1727 					   q_elem->enqueue_time,
1728 					   q_elem->dequeue_time);
1729 		num_processed++;
1730 		scn->read_idx = (scn->read_idx + 1) &
1731 					(HIF_REG_WRITE_QUEUE_LEN - 1);
1732 		q_elem = &scn->reg_write_queue[scn->read_idx];
1733 	}
1734 
1735 	hif_allow_link_low_power_states(GET_HIF_OPAQUE_HDL(scn));
1736 
1737 	/*
1738 	 * Decrement active_work_cnt by the number of elements dequeued after
1739 	 * hif_allow_link_low_power_states.
1740 	 * This makes sure that hif_try_complete_tasks will wait till we make
1741 	 * the bus access in hif_allow_link_low_power_states. This will avoid
1742 	 * race condition between delayed register worker and bus suspend
1743 	 * (system suspend or runtime suspend).
1744 	 *
1745 	 * The following decrement should be done at the end!
1746 	 */
1747 	qdf_atomic_sub(num_processed, &scn->active_work_cnt);
1748 }
1749 
1750 /**
1751  * hif_delayed_reg_write_deinit() - De-Initialize delayed reg write processing
1752  * @scn: hif_softc pointer
1753  *
1754  * De-initialize main data structures to process register writes in a delayed
1755  * workqueue.
1756  *
1757  * Return: None
1758  */
1759 static void hif_delayed_reg_write_deinit(struct hif_softc *scn)
1760 {
1761 	qdf_flush_work(&scn->reg_write_work);
1762 	qdf_disable_work(&scn->reg_write_work);
1763 	qdf_flush_workqueue(0, scn->reg_write_wq);
1764 	qdf_destroy_workqueue(0, scn->reg_write_wq);
1765 	qdf_mem_free(scn->reg_write_queue);
1766 }
1767 
1768 /**
1769  * hif_delayed_reg_write_init() - Initialization function for delayed reg writes
1770  * @scn: hif_softc pointer
1771  *
1772  * Initialize main data structures to process register writes in a delayed
1773  * workqueue.
1774  */
1775 
1776 static QDF_STATUS hif_delayed_reg_write_init(struct hif_softc *scn)
1777 {
1778 	qdf_atomic_init(&scn->active_work_cnt);
1779 	scn->reg_write_wq =
1780 		qdf_alloc_high_prior_ordered_workqueue("hif_register_write_wq");
1781 	qdf_create_work(0, &scn->reg_write_work, hif_reg_write_work, scn);
1782 	scn->reg_write_queue = qdf_mem_malloc(HIF_REG_WRITE_QUEUE_LEN *
1783 					      sizeof(*scn->reg_write_queue));
1784 	if (!scn->reg_write_queue) {
1785 		hif_err("unable to allocate memory for delayed reg write");
1786 		QDF_BUG(0);
1787 		return QDF_STATUS_E_NOMEM;
1788 	}
1789 
1790 	/* Initial value of indices */
1791 	scn->read_idx = 0;
1792 	qdf_atomic_set(&scn->write_idx, -1);
1793 
1794 	return QDF_STATUS_SUCCESS;
1795 }
1796 
1797 static void hif_reg_write_enqueue(struct hif_softc *scn,
1798 				  struct CE_state *ce_state,
1799 				  uint32_t value)
1800 {
1801 	struct hif_reg_write_q_elem *q_elem;
1802 	uint32_t write_idx;
1803 
1804 	if (ce_state->reg_write_in_progress) {
1805 		hif_debug("Already in progress ce_id %d offset 0x%x value %u",
1806 			  ce_state->id, ce_state->ce_wrt_idx_offset, value);
1807 		qdf_atomic_inc(&scn->wstats.coalesces);
1808 		ce_state->wstats.coalesces++;
1809 		return;
1810 	}
1811 
1812 	write_idx = qdf_atomic_inc_return(&scn->write_idx);
1813 	write_idx = write_idx & (HIF_REG_WRITE_QUEUE_LEN - 1);
1814 
1815 	q_elem = &scn->reg_write_queue[write_idx];
1816 	if (q_elem->valid) {
1817 		hif_err("queue full");
1818 		QDF_BUG(0);
1819 		return;
1820 	}
1821 
1822 	qdf_atomic_inc(&scn->wstats.enqueues);
1823 	ce_state->wstats.enqueues++;
1824 
1825 	qdf_atomic_inc(&scn->wstats.q_depth);
1826 
1827 	q_elem->ce_state = ce_state;
1828 	q_elem->offset = ce_state->ce_wrt_idx_offset;
1829 	q_elem->enqueue_val = value;
1830 	q_elem->enqueue_time = qdf_get_log_timestamp();
1831 
1832 	/*
1833 	 * Before the valid flag is set to true, all the other
1834 	 * fields in the q_elem needs to be updated in memory.
1835 	 * Else there is a chance that the dequeuing worker thread
1836 	 * might read stale entries and process incorrect srng.
1837 	 */
1838 	qdf_wmb();
1839 	q_elem->valid = true;
1840 
1841 	/*
1842 	 * After all other fields in the q_elem has been updated
1843 	 * in memory successfully, the valid flag needs to be updated
1844 	 * in memory in time too.
1845 	 * Else there is a chance that the dequeuing worker thread
1846 	 * might read stale valid flag and the work will be bypassed
1847 	 * for this round. And if there is no other work scheduled
1848 	 * later, this hal register writing won't be updated any more.
1849 	 */
1850 	qdf_wmb();
1851 
1852 	ce_state->reg_write_in_progress  = true;
1853 	qdf_atomic_inc(&scn->active_work_cnt);
1854 
1855 	hif_debug("write_idx %u ce_id %d offset 0x%x value %u",
1856 		  write_idx, ce_state->id, ce_state->ce_wrt_idx_offset, value);
1857 
1858 	qdf_queue_work(scn->qdf_dev, scn->reg_write_wq,
1859 		       &scn->reg_write_work);
1860 }
1861 
1862 void hif_delayed_reg_write(struct hif_softc *scn, uint32_t ctrl_addr,
1863 			   uint32_t val)
1864 {
1865 	struct CE_state *ce_state;
1866 	int ce_id = COPY_ENGINE_ID(ctrl_addr);
1867 
1868 	ce_state = scn->ce_id_to_state[ce_id];
1869 
1870 	if (!ce_state->htt_tx_data && !ce_state->htt_rx_data) {
1871 		hif_reg_write_enqueue(scn, ce_state, val);
1872 		return;
1873 	}
1874 
1875 	if (hif_is_reg_write_tput_level_high(scn) ||
1876 	    (PLD_MHI_STATE_L0 == pld_get_mhi_state(scn->qdf_dev->dev))) {
1877 		hal_write32_mb(scn->hal_soc, ce_state->ce_wrt_idx_offset, val);
1878 		qdf_atomic_inc(&scn->wstats.direct);
1879 		ce_state->wstats.direct++;
1880 	} else {
1881 		hif_reg_write_enqueue(scn, ce_state, val);
1882 	}
1883 }
1884 #else
1885 static inline QDF_STATUS hif_delayed_reg_write_init(struct hif_softc *scn)
1886 {
1887 	return QDF_STATUS_SUCCESS;
1888 }
1889 
1890 static inline void  hif_delayed_reg_write_deinit(struct hif_softc *scn)
1891 {
1892 }
1893 #endif
1894 
1895 #if defined(QCA_WIFI_WCN6450)
1896 static QDF_STATUS hif_hal_attach(struct hif_softc *scn)
1897 {
1898 	scn->hal_soc = hal_attach(hif_softc_to_hif_opaque_softc(scn),
1899 				  scn->qdf_dev);
1900 	if (!scn->hal_soc)
1901 		return QDF_STATUS_E_FAILURE;
1902 
1903 	return QDF_STATUS_SUCCESS;
1904 }
1905 
1906 static QDF_STATUS hif_hal_detach(struct hif_softc *scn)
1907 {
1908 	hal_detach(scn->hal_soc);
1909 	scn->hal_soc = NULL;
1910 
1911 	return QDF_STATUS_SUCCESS;
1912 }
1913 #elif (defined(QCA_WIFI_QCA8074) || defined(QCA_WIFI_QCA6018) || \
1914 	defined(QCA_WIFI_QCA6290) || defined(QCA_WIFI_QCA6390) || \
1915 	defined(QCA_WIFI_QCN9000) || defined(QCA_WIFI_QCA6490) || \
1916 	defined(QCA_WIFI_QCA6750) || defined(QCA_WIFI_QCA5018) || \
1917 	defined(QCA_WIFI_KIWI) || defined(QCA_WIFI_QCN9224) || \
1918 	defined(QCA_WIFI_QCA9574)) || defined(QCA_WIFI_QCA5332)
1919 static QDF_STATUS hif_hal_attach(struct hif_softc *scn)
1920 {
1921 	if (ce_srng_based(scn)) {
1922 		scn->hal_soc = hal_attach(
1923 					hif_softc_to_hif_opaque_softc(scn),
1924 					scn->qdf_dev);
1925 		if (!scn->hal_soc)
1926 			return QDF_STATUS_E_FAILURE;
1927 	}
1928 
1929 	return QDF_STATUS_SUCCESS;
1930 }
1931 
1932 static QDF_STATUS hif_hal_detach(struct hif_softc *scn)
1933 {
1934 	if (ce_srng_based(scn)) {
1935 		hal_detach(scn->hal_soc);
1936 		scn->hal_soc = NULL;
1937 	}
1938 
1939 	return QDF_STATUS_SUCCESS;
1940 }
1941 #else
1942 static QDF_STATUS hif_hal_attach(struct hif_softc *scn)
1943 {
1944 	return QDF_STATUS_SUCCESS;
1945 }
1946 
1947 static QDF_STATUS hif_hal_detach(struct hif_softc *scn)
1948 {
1949 	return QDF_STATUS_SUCCESS;
1950 }
1951 #endif
1952 
1953 int hif_init_dma_mask(struct device *dev, enum qdf_bus_type bus_type)
1954 {
1955 	int ret;
1956 
1957 	switch (bus_type) {
1958 	case QDF_BUS_TYPE_IPCI:
1959 		ret = qdf_set_dma_coherent_mask(dev,
1960 						DMA_COHERENT_MASK_DEFAULT);
1961 		if (ret) {
1962 			hif_err("Failed to set dma mask error = %d", ret);
1963 			return ret;
1964 		}
1965 
1966 		break;
1967 	default:
1968 		/* Follow the existing sequence for other targets */
1969 		break;
1970 	}
1971 
1972 	return 0;
1973 }
1974 
1975 /**
1976  * hif_enable(): hif_enable
1977  * @hif_ctx: hif_ctx
1978  * @dev: dev
1979  * @bdev: bus dev
1980  * @bid: bus ID
1981  * @bus_type: bus type
1982  * @type: enable type
1983  *
1984  * Return: QDF_STATUS
1985  */
1986 QDF_STATUS hif_enable(struct hif_opaque_softc *hif_ctx, struct device *dev,
1987 					  void *bdev,
1988 					  const struct hif_bus_id *bid,
1989 					  enum qdf_bus_type bus_type,
1990 					  enum hif_enable_type type)
1991 {
1992 	QDF_STATUS status;
1993 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1994 
1995 	if (!scn) {
1996 		hif_err("hif_ctx = NULL");
1997 		return QDF_STATUS_E_NULL_VALUE;
1998 	}
1999 
2000 	status = hif_enable_bus(scn, dev, bdev, bid, type);
2001 	if (status != QDF_STATUS_SUCCESS) {
2002 		hif_err("hif_enable_bus error = %d", status);
2003 		return status;
2004 	}
2005 
2006 	status = hif_hal_attach(scn);
2007 	if (status != QDF_STATUS_SUCCESS) {
2008 		hif_err("hal attach failed");
2009 		goto disable_bus;
2010 	}
2011 
2012 	if (hif_delayed_reg_write_init(scn) != QDF_STATUS_SUCCESS) {
2013 		hif_err("unable to initialize delayed reg write");
2014 		goto hal_detach;
2015 	}
2016 
2017 	if (hif_bus_configure(scn)) {
2018 		hif_err("Target probe failed");
2019 		status = QDF_STATUS_E_FAILURE;
2020 		goto hal_detach;
2021 	}
2022 
2023 	hif_ut_suspend_init(scn);
2024 	hif_register_recovery_notifier(scn);
2025 	hif_latency_detect_timer_start(hif_ctx);
2026 
2027 	/*
2028 	 * Flag to avoid potential unallocated memory access from MSI
2029 	 * interrupt handler which could get scheduled as soon as MSI
2030 	 * is enabled, i.e to take care of the race due to the order
2031 	 * in where MSI is enabled before the memory, that will be
2032 	 * in interrupt handlers, is allocated.
2033 	 */
2034 
2035 	scn->hif_init_done = true;
2036 
2037 	hif_debug("OK");
2038 
2039 	return QDF_STATUS_SUCCESS;
2040 
2041 hal_detach:
2042 	hif_hal_detach(scn);
2043 disable_bus:
2044 	hif_disable_bus(scn);
2045 	return status;
2046 }
2047 
2048 void hif_disable(struct hif_opaque_softc *hif_ctx, enum hif_disable_type type)
2049 {
2050 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2051 
2052 	if (!scn)
2053 		return;
2054 
2055 	hif_delayed_reg_write_deinit(scn);
2056 	hif_set_enable_detection(hif_ctx, false);
2057 	hif_latency_detect_timer_stop(hif_ctx);
2058 
2059 	hif_unregister_recovery_notifier(scn);
2060 
2061 	hif_nointrs(scn);
2062 	if (scn->hif_init_done == false)
2063 		hif_shutdown_device(hif_ctx);
2064 	else
2065 		hif_stop(hif_ctx);
2066 
2067 	hif_hal_detach(scn);
2068 
2069 	hif_disable_bus(scn);
2070 
2071 	hif_wlan_disable(scn);
2072 
2073 	scn->notice_send = false;
2074 
2075 	hif_debug("X");
2076 }
2077 
2078 #ifdef CE_TASKLET_DEBUG_ENABLE
2079 void hif_enable_ce_latency_stats(struct hif_opaque_softc *hif_ctx, uint8_t val)
2080 {
2081 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2082 
2083 	if (!scn)
2084 		return;
2085 
2086 	scn->ce_latency_stats = val;
2087 }
2088 #endif
2089 
2090 void hif_display_stats(struct hif_opaque_softc *hif_ctx)
2091 {
2092 	hif_display_bus_stats(hif_ctx);
2093 }
2094 
2095 qdf_export_symbol(hif_display_stats);
2096 
2097 void hif_clear_stats(struct hif_opaque_softc *hif_ctx)
2098 {
2099 	hif_clear_bus_stats(hif_ctx);
2100 }
2101 
2102 /**
2103  * hif_crash_shutdown_dump_bus_register() - dump bus registers
2104  * @hif_ctx: hif_ctx
2105  *
2106  * Return: n/a
2107  */
2108 #if defined(TARGET_RAMDUMP_AFTER_KERNEL_PANIC) && defined(WLAN_FEATURE_BMI)
2109 
2110 static void hif_crash_shutdown_dump_bus_register(void *hif_ctx)
2111 {
2112 	struct hif_opaque_softc *scn = hif_ctx;
2113 
2114 	if (hif_check_soc_status(scn))
2115 		return;
2116 
2117 	if (hif_dump_registers(scn))
2118 		hif_err("Failed to dump bus registers!");
2119 }
2120 
2121 /**
2122  * hif_crash_shutdown(): hif_crash_shutdown
2123  *
2124  * This function is called by the platform driver to dump CE registers
2125  *
2126  * @hif_ctx: hif_ctx
2127  *
2128  * Return: n/a
2129  */
2130 void hif_crash_shutdown(struct hif_opaque_softc *hif_ctx)
2131 {
2132 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2133 
2134 	if (!hif_ctx)
2135 		return;
2136 
2137 	if (scn->bus_type == QDF_BUS_TYPE_SNOC) {
2138 		hif_warn("RAM dump disabled for bustype %d", scn->bus_type);
2139 		return;
2140 	}
2141 
2142 	if (TARGET_STATUS_RESET == scn->target_status) {
2143 		hif_warn("Target is already asserted, ignore!");
2144 		return;
2145 	}
2146 
2147 	if (hif_is_load_or_unload_in_progress(scn)) {
2148 		hif_err("Load/unload is in progress, ignore!");
2149 		return;
2150 	}
2151 
2152 	hif_crash_shutdown_dump_bus_register(hif_ctx);
2153 	hif_set_target_status(hif_ctx, TARGET_STATUS_RESET);
2154 
2155 	if (ol_copy_ramdump(hif_ctx))
2156 		goto out;
2157 
2158 	hif_info("RAM dump collecting completed!");
2159 
2160 out:
2161 	return;
2162 }
2163 #else
2164 void hif_crash_shutdown(struct hif_opaque_softc *hif_ctx)
2165 {
2166 	hif_debug("Collecting target RAM dump disabled");
2167 }
2168 #endif /* TARGET_RAMDUMP_AFTER_KERNEL_PANIC */
2169 
2170 #ifdef QCA_WIFI_3_0
2171 /**
2172  * hif_check_fw_reg(): hif_check_fw_reg
2173  * @scn: scn
2174  *
2175  * Return: int
2176  */
2177 int hif_check_fw_reg(struct hif_opaque_softc *scn)
2178 {
2179 	return 0;
2180 }
2181 #endif
2182 
2183 /**
2184  * hif_read_phy_mem_base(): hif_read_phy_mem_base
2185  * @scn: scn
2186  * @phy_mem_base: physical mem base
2187  *
2188  * Return: n/a
2189  */
2190 void hif_read_phy_mem_base(struct hif_softc *scn, qdf_dma_addr_t *phy_mem_base)
2191 {
2192 	*phy_mem_base = scn->mem_pa;
2193 }
2194 qdf_export_symbol(hif_read_phy_mem_base);
2195 
2196 /**
2197  * hif_get_device_type(): hif_get_device_type
2198  * @device_id: device_id
2199  * @revision_id: revision_id
2200  * @hif_type: returned hif_type
2201  * @target_type: returned target_type
2202  *
2203  * Return: int
2204  */
2205 int hif_get_device_type(uint32_t device_id,
2206 			uint32_t revision_id,
2207 			uint32_t *hif_type, uint32_t *target_type)
2208 {
2209 	int ret = 0;
2210 
2211 	switch (device_id) {
2212 	case ADRASTEA_DEVICE_ID_P2_E12:
2213 
2214 		*hif_type = HIF_TYPE_ADRASTEA;
2215 		*target_type = TARGET_TYPE_ADRASTEA;
2216 		break;
2217 
2218 	case AR9888_DEVICE_ID:
2219 		*hif_type = HIF_TYPE_AR9888;
2220 		*target_type = TARGET_TYPE_AR9888;
2221 		break;
2222 
2223 	case AR6320_DEVICE_ID:
2224 		switch (revision_id) {
2225 		case AR6320_FW_1_1:
2226 		case AR6320_FW_1_3:
2227 			*hif_type = HIF_TYPE_AR6320;
2228 			*target_type = TARGET_TYPE_AR6320;
2229 			break;
2230 
2231 		case AR6320_FW_2_0:
2232 		case AR6320_FW_3_0:
2233 		case AR6320_FW_3_2:
2234 			*hif_type = HIF_TYPE_AR6320V2;
2235 			*target_type = TARGET_TYPE_AR6320V2;
2236 			break;
2237 
2238 		default:
2239 			hif_err("dev_id = 0x%x, rev_id = 0x%x",
2240 				device_id, revision_id);
2241 			ret = -ENODEV;
2242 			goto end;
2243 		}
2244 		break;
2245 
2246 	case AR9887_DEVICE_ID:
2247 		*hif_type = HIF_TYPE_AR9888;
2248 		*target_type = TARGET_TYPE_AR9888;
2249 		hif_info(" *********** AR9887 **************");
2250 		break;
2251 
2252 	case QCA9984_DEVICE_ID:
2253 		*hif_type = HIF_TYPE_QCA9984;
2254 		*target_type = TARGET_TYPE_QCA9984;
2255 		hif_info(" *********** QCA9984 *************");
2256 		break;
2257 
2258 	case QCA9888_DEVICE_ID:
2259 		*hif_type = HIF_TYPE_QCA9888;
2260 		*target_type = TARGET_TYPE_QCA9888;
2261 		hif_info(" *********** QCA9888 *************");
2262 		break;
2263 
2264 	case AR900B_DEVICE_ID:
2265 		*hif_type = HIF_TYPE_AR900B;
2266 		*target_type = TARGET_TYPE_AR900B;
2267 		hif_info(" *********** AR900B *************");
2268 		break;
2269 
2270 	case QCA8074_DEVICE_ID:
2271 		*hif_type = HIF_TYPE_QCA8074;
2272 		*target_type = TARGET_TYPE_QCA8074;
2273 		hif_info(" *********** QCA8074  *************");
2274 		break;
2275 
2276 	case QCA6290_EMULATION_DEVICE_ID:
2277 	case QCA6290_DEVICE_ID:
2278 		*hif_type = HIF_TYPE_QCA6290;
2279 		*target_type = TARGET_TYPE_QCA6290;
2280 		hif_info(" *********** QCA6290EMU *************");
2281 		break;
2282 
2283 	case QCN9000_DEVICE_ID:
2284 		*hif_type = HIF_TYPE_QCN9000;
2285 		*target_type = TARGET_TYPE_QCN9000;
2286 		hif_info(" *********** QCN9000 *************");
2287 		break;
2288 
2289 	case QCN9224_DEVICE_ID:
2290 		*hif_type = HIF_TYPE_QCN9224;
2291 		*target_type = TARGET_TYPE_QCN9224;
2292 		hif_info(" *********** QCN9224 *************");
2293 		break;
2294 
2295 	case QCN6122_DEVICE_ID:
2296 		*hif_type = HIF_TYPE_QCN6122;
2297 		*target_type = TARGET_TYPE_QCN6122;
2298 		hif_info(" *********** QCN6122 *************");
2299 		break;
2300 
2301 	case QCN9160_DEVICE_ID:
2302 		*hif_type = HIF_TYPE_QCN9160;
2303 		*target_type = TARGET_TYPE_QCN9160;
2304 		hif_info(" *********** QCN9160 *************");
2305 		break;
2306 
2307 	case QCN6432_DEVICE_ID:
2308 		*hif_type = HIF_TYPE_QCN6432;
2309 		*target_type = TARGET_TYPE_QCN6432;
2310 		hif_info(" *********** QCN6432 *************");
2311 		break;
2312 
2313 	case QCN7605_DEVICE_ID:
2314 	case QCN7605_COMPOSITE:
2315 	case QCN7605_STANDALONE:
2316 	case QCN7605_STANDALONE_V2:
2317 	case QCN7605_COMPOSITE_V2:
2318 		*hif_type = HIF_TYPE_QCN7605;
2319 		*target_type = TARGET_TYPE_QCN7605;
2320 		hif_info(" *********** QCN7605 *************");
2321 		break;
2322 
2323 	case QCA6390_DEVICE_ID:
2324 	case QCA6390_EMULATION_DEVICE_ID:
2325 		*hif_type = HIF_TYPE_QCA6390;
2326 		*target_type = TARGET_TYPE_QCA6390;
2327 		hif_info(" *********** QCA6390 *************");
2328 		break;
2329 
2330 	case QCA6490_DEVICE_ID:
2331 	case QCA6490_EMULATION_DEVICE_ID:
2332 		*hif_type = HIF_TYPE_QCA6490;
2333 		*target_type = TARGET_TYPE_QCA6490;
2334 		hif_info(" *********** QCA6490 *************");
2335 		break;
2336 
2337 	case QCA6750_DEVICE_ID:
2338 	case QCA6750_EMULATION_DEVICE_ID:
2339 		*hif_type = HIF_TYPE_QCA6750;
2340 		*target_type = TARGET_TYPE_QCA6750;
2341 		hif_info(" *********** QCA6750 *************");
2342 		break;
2343 
2344 	case KIWI_DEVICE_ID:
2345 		*hif_type = HIF_TYPE_KIWI;
2346 		*target_type = TARGET_TYPE_KIWI;
2347 		hif_info(" *********** KIWI *************");
2348 		break;
2349 
2350 	case MANGO_DEVICE_ID:
2351 		*hif_type = HIF_TYPE_MANGO;
2352 		*target_type = TARGET_TYPE_MANGO;
2353 		hif_info(" *********** MANGO *************");
2354 		break;
2355 
2356 	case PEACH_DEVICE_ID:
2357 		*hif_type = HIF_TYPE_PEACH;
2358 		*target_type = TARGET_TYPE_PEACH;
2359 		hif_info(" *********** PEACH *************");
2360 		break;
2361 
2362 	case QCA8074V2_DEVICE_ID:
2363 		*hif_type = HIF_TYPE_QCA8074V2;
2364 		*target_type = TARGET_TYPE_QCA8074V2;
2365 		hif_info(" *********** QCA8074V2 *************");
2366 		break;
2367 
2368 	case QCA6018_DEVICE_ID:
2369 	case RUMIM2M_DEVICE_ID_NODE0:
2370 	case RUMIM2M_DEVICE_ID_NODE1:
2371 	case RUMIM2M_DEVICE_ID_NODE2:
2372 	case RUMIM2M_DEVICE_ID_NODE3:
2373 	case RUMIM2M_DEVICE_ID_NODE4:
2374 	case RUMIM2M_DEVICE_ID_NODE5:
2375 		*hif_type = HIF_TYPE_QCA6018;
2376 		*target_type = TARGET_TYPE_QCA6018;
2377 		hif_info(" *********** QCA6018 *************");
2378 		break;
2379 
2380 	case QCA5018_DEVICE_ID:
2381 		*hif_type = HIF_TYPE_QCA5018;
2382 		*target_type = TARGET_TYPE_QCA5018;
2383 		hif_info(" *********** qca5018 *************");
2384 		break;
2385 
2386 	case QCA5332_DEVICE_ID:
2387 		*hif_type = HIF_TYPE_QCA5332;
2388 		*target_type = TARGET_TYPE_QCA5332;
2389 		hif_info(" *********** QCA5332 *************");
2390 		break;
2391 
2392 	case QCA9574_DEVICE_ID:
2393 		*hif_type = HIF_TYPE_QCA9574;
2394 		*target_type = TARGET_TYPE_QCA9574;
2395 		hif_info(" *********** QCA9574 *************");
2396 		break;
2397 
2398 	case WCN6450_DEVICE_ID:
2399 		*hif_type = HIF_TYPE_WCN6450;
2400 		*target_type = TARGET_TYPE_WCN6450;
2401 		hif_info(" *********** WCN6450 *************");
2402 		break;
2403 
2404 	default:
2405 		hif_err("Unsupported device ID = 0x%x!", device_id);
2406 		ret = -ENODEV;
2407 		break;
2408 	}
2409 
2410 	if (*target_type == TARGET_TYPE_UNKNOWN) {
2411 		hif_err("Unsupported target_type!");
2412 		ret = -ENODEV;
2413 	}
2414 end:
2415 	return ret;
2416 }
2417 
2418 /**
2419  * hif_get_bus_type() - return the bus type
2420  * @hif_hdl: HIF Context
2421  *
2422  * Return: enum qdf_bus_type
2423  */
2424 enum qdf_bus_type hif_get_bus_type(struct hif_opaque_softc *hif_hdl)
2425 {
2426 	struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
2427 
2428 	return scn->bus_type;
2429 }
2430 
2431 /*
2432  * Target info and ini parameters are global to the driver
2433  * Hence these structures are exposed to all the modules in
2434  * the driver and they don't need to maintains multiple copies
2435  * of the same info, instead get the handle from hif and
2436  * modify them in hif
2437  */
2438 
2439 /**
2440  * hif_get_ini_handle() - API to get hif_config_param handle
2441  * @hif_ctx: HIF Context
2442  *
2443  * Return: pointer to hif_config_info
2444  */
2445 struct hif_config_info *hif_get_ini_handle(struct hif_opaque_softc *hif_ctx)
2446 {
2447 	struct hif_softc *sc = HIF_GET_SOFTC(hif_ctx);
2448 
2449 	return &sc->hif_config;
2450 }
2451 
2452 /**
2453  * hif_get_target_info_handle() - API to get hif_target_info handle
2454  * @hif_ctx: HIF context
2455  *
2456  * Return: Pointer to hif_target_info
2457  */
2458 struct hif_target_info *hif_get_target_info_handle(
2459 					struct hif_opaque_softc *hif_ctx)
2460 {
2461 	struct hif_softc *sc = HIF_GET_SOFTC(hif_ctx);
2462 
2463 	return &sc->target_info;
2464 
2465 }
2466 qdf_export_symbol(hif_get_target_info_handle);
2467 
2468 #ifdef RECEIVE_OFFLOAD
2469 void hif_offld_flush_cb_register(struct hif_opaque_softc *scn,
2470 				 void (offld_flush_handler)(void *))
2471 {
2472 	if (hif_napi_enabled(scn, -1))
2473 		hif_napi_rx_offld_flush_cb_register(scn, offld_flush_handler);
2474 	else
2475 		hif_err("NAPI not enabled");
2476 }
2477 qdf_export_symbol(hif_offld_flush_cb_register);
2478 
2479 void hif_offld_flush_cb_deregister(struct hif_opaque_softc *scn)
2480 {
2481 	if (hif_napi_enabled(scn, -1))
2482 		hif_napi_rx_offld_flush_cb_deregister(scn);
2483 	else
2484 		hif_err("NAPI not enabled");
2485 }
2486 qdf_export_symbol(hif_offld_flush_cb_deregister);
2487 
2488 int hif_get_rx_ctx_id(int ctx_id, struct hif_opaque_softc *hif_hdl)
2489 {
2490 	if (hif_napi_enabled(hif_hdl, -1))
2491 		return NAPI_PIPE2ID(ctx_id);
2492 	else
2493 		return ctx_id;
2494 }
2495 #else /* RECEIVE_OFFLOAD */
2496 int hif_get_rx_ctx_id(int ctx_id, struct hif_opaque_softc *hif_hdl)
2497 {
2498 	return 0;
2499 }
2500 qdf_export_symbol(hif_get_rx_ctx_id);
2501 #endif /* RECEIVE_OFFLOAD */
2502 
2503 #if defined(FEATURE_LRO)
2504 
2505 /**
2506  * hif_get_lro_info - Returns LRO instance for instance ID
2507  * @ctx_id: LRO instance ID
2508  * @hif_hdl: HIF Context
2509  *
2510  * Return: Pointer to LRO instance.
2511  */
2512 void *hif_get_lro_info(int ctx_id, struct hif_opaque_softc *hif_hdl)
2513 {
2514 	void *data;
2515 
2516 	if (hif_napi_enabled(hif_hdl, -1))
2517 		data = hif_napi_get_lro_info(hif_hdl, ctx_id);
2518 	else
2519 		data = hif_ce_get_lro_ctx(hif_hdl, ctx_id);
2520 
2521 	return data;
2522 }
2523 #endif
2524 
2525 /**
2526  * hif_get_target_status - API to get target status
2527  * @hif_ctx: HIF Context
2528  *
2529  * Return: enum hif_target_status
2530  */
2531 enum hif_target_status hif_get_target_status(struct hif_opaque_softc *hif_ctx)
2532 {
2533 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2534 
2535 	return scn->target_status;
2536 }
2537 qdf_export_symbol(hif_get_target_status);
2538 
2539 /**
2540  * hif_set_target_status() - API to set target status
2541  * @hif_ctx: HIF Context
2542  * @status: Target Status
2543  *
2544  * Return: void
2545  */
2546 void hif_set_target_status(struct hif_opaque_softc *hif_ctx, enum
2547 			   hif_target_status status)
2548 {
2549 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2550 
2551 	scn->target_status = status;
2552 }
2553 
2554 /**
2555  * hif_init_ini_config() - API to initialize HIF configuration parameters
2556  * @hif_ctx: HIF Context
2557  * @cfg: HIF Configuration
2558  *
2559  * Return: void
2560  */
2561 void hif_init_ini_config(struct hif_opaque_softc *hif_ctx,
2562 			 struct hif_config_info *cfg)
2563 {
2564 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2565 
2566 	qdf_mem_copy(&scn->hif_config, cfg, sizeof(struct hif_config_info));
2567 }
2568 
2569 /**
2570  * hif_get_conparam() - API to get driver mode in HIF
2571  * @scn: HIF Context
2572  *
2573  * Return: driver mode of operation
2574  */
2575 uint32_t hif_get_conparam(struct hif_softc *scn)
2576 {
2577 	if (!scn)
2578 		return 0;
2579 
2580 	return scn->hif_con_param;
2581 }
2582 
2583 /**
2584  * hif_get_callbacks_handle() - API to get callbacks Handle
2585  * @scn: HIF Context
2586  *
2587  * Return: pointer to HIF Callbacks
2588  */
2589 struct hif_driver_state_callbacks *hif_get_callbacks_handle(
2590 							struct hif_softc *scn)
2591 {
2592 	return &scn->callbacks;
2593 }
2594 
2595 /**
2596  * hif_is_driver_unloading() - API to query upper layers if driver is unloading
2597  * @scn: HIF Context
2598  *
2599  * Return: True/False
2600  */
2601 bool hif_is_driver_unloading(struct hif_softc *scn)
2602 {
2603 	struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
2604 
2605 	if (cbk && cbk->is_driver_unloading)
2606 		return cbk->is_driver_unloading(cbk->context);
2607 
2608 	return false;
2609 }
2610 
2611 /**
2612  * hif_is_load_or_unload_in_progress() - API to query upper layers if
2613  * load/unload in progress
2614  * @scn: HIF Context
2615  *
2616  * Return: True/False
2617  */
2618 bool hif_is_load_or_unload_in_progress(struct hif_softc *scn)
2619 {
2620 	struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
2621 
2622 	if (cbk && cbk->is_load_unload_in_progress)
2623 		return cbk->is_load_unload_in_progress(cbk->context);
2624 
2625 	return false;
2626 }
2627 
2628 /**
2629  * hif_is_recovery_in_progress() - API to query upper layers if recovery in
2630  * progress
2631  * @scn: HIF Context
2632  *
2633  * Return: True/False
2634  */
2635 bool hif_is_recovery_in_progress(struct hif_softc *scn)
2636 {
2637 	struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
2638 
2639 	if (cbk && cbk->is_recovery_in_progress)
2640 		return cbk->is_recovery_in_progress(cbk->context);
2641 
2642 	return false;
2643 }
2644 
2645 #if defined(HIF_PCI) || defined(HIF_SNOC) || defined(HIF_AHB) || \
2646     defined(HIF_IPCI)
2647 
2648 /**
2649  * hif_update_pipe_callback() - API to register pipe specific callbacks
2650  * @osc: Opaque softc
2651  * @pipeid: pipe id
2652  * @callbacks: callbacks to register
2653  *
2654  * Return: void
2655  */
2656 
2657 void hif_update_pipe_callback(struct hif_opaque_softc *osc,
2658 					u_int8_t pipeid,
2659 					struct hif_msg_callbacks *callbacks)
2660 {
2661 	struct hif_softc *scn = HIF_GET_SOFTC(osc);
2662 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2663 	struct HIF_CE_pipe_info *pipe_info;
2664 
2665 	QDF_BUG(pipeid < CE_COUNT_MAX);
2666 
2667 	hif_debug("pipeid: %d", pipeid);
2668 
2669 	pipe_info = &hif_state->pipe_info[pipeid];
2670 
2671 	qdf_mem_copy(&pipe_info->pipe_callbacks,
2672 			callbacks, sizeof(pipe_info->pipe_callbacks));
2673 }
2674 qdf_export_symbol(hif_update_pipe_callback);
2675 
2676 /**
2677  * hif_is_target_ready() - API to query if target is in ready state
2678  * progress
2679  * @scn: HIF Context
2680  *
2681  * Return: True/False
2682  */
2683 bool hif_is_target_ready(struct hif_softc *scn)
2684 {
2685 	struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
2686 
2687 	if (cbk && cbk->is_target_ready)
2688 		return cbk->is_target_ready(cbk->context);
2689 	/*
2690 	 * if callback is not registered then there is no way to determine
2691 	 * if target is ready. In-such case return true to indicate that
2692 	 * target is ready.
2693 	 */
2694 	return true;
2695 }
2696 qdf_export_symbol(hif_is_target_ready);
2697 
2698 int hif_get_bandwidth_level(struct hif_opaque_softc *hif_handle)
2699 {
2700 	struct hif_softc *scn = HIF_GET_SOFTC(hif_handle);
2701 	struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
2702 
2703 	if (cbk && cbk->get_bandwidth_level)
2704 		return cbk->get_bandwidth_level(cbk->context);
2705 
2706 	return 0;
2707 }
2708 
2709 qdf_export_symbol(hif_get_bandwidth_level);
2710 
2711 #ifdef DP_MEM_PRE_ALLOC
2712 void *hif_mem_alloc_consistent_unaligned(struct hif_softc *scn,
2713 					 qdf_size_t size,
2714 					 qdf_dma_addr_t *paddr,
2715 					 uint32_t ring_type,
2716 					 uint8_t *is_mem_prealloc)
2717 {
2718 	void *vaddr = NULL;
2719 	struct hif_driver_state_callbacks *cbk =
2720 				hif_get_callbacks_handle(scn);
2721 
2722 	*is_mem_prealloc = false;
2723 	if (cbk && cbk->prealloc_get_consistent_mem_unaligned) {
2724 		vaddr = cbk->prealloc_get_consistent_mem_unaligned(size,
2725 								   paddr,
2726 								   ring_type);
2727 		if (vaddr) {
2728 			*is_mem_prealloc = true;
2729 			goto end;
2730 		}
2731 	}
2732 
2733 	vaddr = qdf_mem_alloc_consistent(scn->qdf_dev,
2734 					 scn->qdf_dev->dev,
2735 					 size,
2736 					 paddr);
2737 end:
2738 	dp_info("%s va_unaligned %pK pa_unaligned %pK size %d ring_type %d",
2739 		*is_mem_prealloc ? "pre-alloc" : "dynamic-alloc", vaddr,
2740 		(void *)*paddr, (int)size, ring_type);
2741 
2742 	return vaddr;
2743 }
2744 
2745 void hif_mem_free_consistent_unaligned(struct hif_softc *scn,
2746 				       qdf_size_t size,
2747 				       void *vaddr,
2748 				       qdf_dma_addr_t paddr,
2749 				       qdf_dma_context_t memctx,
2750 				       uint8_t is_mem_prealloc)
2751 {
2752 	struct hif_driver_state_callbacks *cbk =
2753 				hif_get_callbacks_handle(scn);
2754 
2755 	if (is_mem_prealloc) {
2756 		if (cbk && cbk->prealloc_put_consistent_mem_unaligned) {
2757 			cbk->prealloc_put_consistent_mem_unaligned(vaddr);
2758 		} else {
2759 			dp_warn("dp_prealloc_put_consistent_unligned NULL");
2760 			QDF_BUG(0);
2761 		}
2762 	} else {
2763 		qdf_mem_free_consistent(scn->qdf_dev, scn->qdf_dev->dev,
2764 					size, vaddr, paddr, memctx);
2765 	}
2766 }
2767 
2768 void hif_prealloc_get_multi_pages(struct hif_softc *scn, uint32_t desc_type,
2769 				  qdf_size_t elem_size, uint16_t elem_num,
2770 				  struct qdf_mem_multi_page_t *pages,
2771 				  bool cacheable)
2772 {
2773 	struct hif_driver_state_callbacks *cbk =
2774 			hif_get_callbacks_handle(scn);
2775 
2776 	if (cbk && cbk->prealloc_get_multi_pages)
2777 		cbk->prealloc_get_multi_pages(desc_type, elem_size, elem_num,
2778 					      pages, cacheable);
2779 
2780 	if (!pages->num_pages)
2781 		qdf_mem_multi_pages_alloc(scn->qdf_dev, pages,
2782 					  elem_size, elem_num, 0, cacheable);
2783 }
2784 
2785 void hif_prealloc_put_multi_pages(struct hif_softc *scn, uint32_t desc_type,
2786 				  struct qdf_mem_multi_page_t *pages,
2787 				  bool cacheable)
2788 {
2789 	struct hif_driver_state_callbacks *cbk =
2790 			hif_get_callbacks_handle(scn);
2791 
2792 	if (cbk && cbk->prealloc_put_multi_pages &&
2793 	    pages->is_mem_prealloc)
2794 		cbk->prealloc_put_multi_pages(desc_type, pages);
2795 
2796 	if (!pages->is_mem_prealloc)
2797 		qdf_mem_multi_pages_free(scn->qdf_dev, pages, 0,
2798 					 cacheable);
2799 }
2800 #endif
2801 
2802 /**
2803  * hif_batch_send() - API to access hif specific function
2804  * ce_batch_send.
2805  * @osc: HIF Context
2806  * @msdu: list of msdus to be sent
2807  * @transfer_id: transfer id
2808  * @len: downloaded length
2809  * @sendhead:
2810  *
2811  * Return: list of msds not sent
2812  */
2813 qdf_nbuf_t hif_batch_send(struct hif_opaque_softc *osc, qdf_nbuf_t msdu,
2814 		uint32_t transfer_id, u_int32_t len, uint32_t sendhead)
2815 {
2816 	void *ce_tx_hdl = hif_get_ce_handle(osc, CE_HTT_TX_CE);
2817 
2818 	if (!ce_tx_hdl)
2819 		return NULL;
2820 
2821 	return ce_batch_send((struct CE_handle *)ce_tx_hdl, msdu, transfer_id,
2822 			len, sendhead);
2823 }
2824 qdf_export_symbol(hif_batch_send);
2825 
2826 /**
2827  * hif_update_tx_ring() - API to access hif specific function
2828  * ce_update_tx_ring.
2829  * @osc: HIF Context
2830  * @num_htt_cmpls: number of htt compl received.
2831  *
2832  * Return: void
2833  */
2834 void hif_update_tx_ring(struct hif_opaque_softc *osc, u_int32_t num_htt_cmpls)
2835 {
2836 	void *ce_tx_hdl = hif_get_ce_handle(osc, CE_HTT_TX_CE);
2837 
2838 	ce_update_tx_ring(ce_tx_hdl, num_htt_cmpls);
2839 }
2840 qdf_export_symbol(hif_update_tx_ring);
2841 
2842 
2843 /**
2844  * hif_send_single() - API to access hif specific function
2845  * ce_send_single.
2846  * @osc: HIF Context
2847  * @msdu : msdu to be sent
2848  * @transfer_id: transfer id
2849  * @len : downloaded length
2850  *
2851  * Return: msdu sent status
2852  */
2853 QDF_STATUS hif_send_single(struct hif_opaque_softc *osc, qdf_nbuf_t msdu,
2854 			   uint32_t transfer_id, u_int32_t len)
2855 {
2856 	void *ce_tx_hdl = hif_get_ce_handle(osc, CE_HTT_TX_CE);
2857 
2858 	if (!ce_tx_hdl)
2859 		return QDF_STATUS_E_NULL_VALUE;
2860 
2861 	return ce_send_single((struct CE_handle *)ce_tx_hdl, msdu, transfer_id,
2862 			len);
2863 }
2864 qdf_export_symbol(hif_send_single);
2865 #endif
2866 
2867 /**
2868  * hif_reg_write() - API to access hif specific function
2869  * hif_write32_mb.
2870  * @hif_ctx : HIF Context
2871  * @offset : offset on which value has to be written
2872  * @value : value to be written
2873  *
2874  * Return: None
2875  */
2876 void hif_reg_write(struct hif_opaque_softc *hif_ctx, uint32_t offset,
2877 		uint32_t value)
2878 {
2879 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2880 
2881 	hif_write32_mb(scn, scn->mem + offset, value);
2882 
2883 }
2884 qdf_export_symbol(hif_reg_write);
2885 
2886 /**
2887  * hif_reg_read() - API to access hif specific function
2888  * hif_read32_mb.
2889  * @hif_ctx : HIF Context
2890  * @offset : offset from which value has to be read
2891  *
2892  * Return: Read value
2893  */
2894 uint32_t hif_reg_read(struct hif_opaque_softc *hif_ctx, uint32_t offset)
2895 {
2896 
2897 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2898 
2899 	return hif_read32_mb(scn, scn->mem + offset);
2900 }
2901 qdf_export_symbol(hif_reg_read);
2902 
2903 /**
2904  * hif_ramdump_handler(): generic ramdump handler
2905  * @scn: struct hif_opaque_softc
2906  *
2907  * Return: None
2908  */
2909 void hif_ramdump_handler(struct hif_opaque_softc *scn)
2910 {
2911 	if (hif_get_bus_type(scn) == QDF_BUS_TYPE_USB)
2912 		hif_usb_ramdump_handler(scn);
2913 }
2914 
2915 hif_pm_wake_irq_type hif_pm_get_wake_irq_type(struct hif_opaque_softc *hif_ctx)
2916 {
2917 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2918 
2919 	return scn->wake_irq_type;
2920 }
2921 
2922 irqreturn_t hif_wake_interrupt_handler(int irq, void *context)
2923 {
2924 	struct hif_softc *scn = context;
2925 
2926 	hif_info("wake interrupt received on irq %d", irq);
2927 
2928 	hif_rtpm_set_monitor_wake_intr(0);
2929 	hif_rtpm_request_resume();
2930 
2931 	if (scn->initial_wakeup_cb)
2932 		scn->initial_wakeup_cb(scn->initial_wakeup_priv);
2933 
2934 	if (hif_is_ut_suspended(scn))
2935 		hif_ut_fw_resume(scn);
2936 
2937 	qdf_pm_system_wakeup();
2938 
2939 	return IRQ_HANDLED;
2940 }
2941 
2942 void hif_set_initial_wakeup_cb(struct hif_opaque_softc *hif_ctx,
2943 			       void (*callback)(void *),
2944 			       void *priv)
2945 {
2946 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2947 
2948 	scn->initial_wakeup_cb = callback;
2949 	scn->initial_wakeup_priv = priv;
2950 }
2951 
2952 void hif_set_ce_service_max_yield_time(struct hif_opaque_softc *hif,
2953 				       uint32_t ce_service_max_yield_time)
2954 {
2955 	struct hif_softc *hif_ctx = HIF_GET_SOFTC(hif);
2956 
2957 	hif_ctx->ce_service_max_yield_time =
2958 		ce_service_max_yield_time * 1000;
2959 }
2960 
2961 unsigned long long
2962 hif_get_ce_service_max_yield_time(struct hif_opaque_softc *hif)
2963 {
2964 	struct hif_softc *hif_ctx = HIF_GET_SOFTC(hif);
2965 
2966 	return hif_ctx->ce_service_max_yield_time;
2967 }
2968 
2969 void hif_set_ce_service_max_rx_ind_flush(struct hif_opaque_softc *hif,
2970 				       uint8_t ce_service_max_rx_ind_flush)
2971 {
2972 	struct hif_softc *hif_ctx = HIF_GET_SOFTC(hif);
2973 
2974 	if (ce_service_max_rx_ind_flush == 0 ||
2975 	    ce_service_max_rx_ind_flush > MSG_FLUSH_NUM)
2976 		hif_ctx->ce_service_max_rx_ind_flush = MSG_FLUSH_NUM;
2977 	else
2978 		hif_ctx->ce_service_max_rx_ind_flush =
2979 						ce_service_max_rx_ind_flush;
2980 }
2981 
2982 #ifdef SYSTEM_PM_CHECK
2983 void __hif_system_pm_set_state(struct hif_opaque_softc *hif,
2984 			       enum hif_system_pm_state state)
2985 {
2986 	struct hif_softc *hif_ctx = HIF_GET_SOFTC(hif);
2987 
2988 	qdf_atomic_set(&hif_ctx->sys_pm_state, state);
2989 }
2990 
2991 int32_t hif_system_pm_get_state(struct hif_opaque_softc *hif)
2992 {
2993 	struct hif_softc *hif_ctx = HIF_GET_SOFTC(hif);
2994 
2995 	return qdf_atomic_read(&hif_ctx->sys_pm_state);
2996 }
2997 
2998 int hif_system_pm_state_check(struct hif_opaque_softc *hif)
2999 {
3000 	struct hif_softc *hif_ctx = HIF_GET_SOFTC(hif);
3001 	int32_t sys_pm_state;
3002 
3003 	if (!hif_ctx) {
3004 		hif_err("hif context is null");
3005 		return -EFAULT;
3006 	}
3007 
3008 	sys_pm_state = qdf_atomic_read(&hif_ctx->sys_pm_state);
3009 	if (sys_pm_state == HIF_SYSTEM_PM_STATE_BUS_SUSPENDING ||
3010 	    sys_pm_state == HIF_SYSTEM_PM_STATE_BUS_SUSPENDED) {
3011 		hif_info("Triggering system wakeup");
3012 		qdf_pm_system_wakeup();
3013 		return -EAGAIN;
3014 	}
3015 
3016 	return 0;
3017 }
3018 #endif
3019 #ifdef WLAN_FEATURE_AFFINITY_MGR
3020 /*
3021  * hif_audio_cpu_affinity_allowed() - Check if audio cpu affinity allowed
3022  *
3023  * @scn: hif handle
3024  * @cfg: hif affinity manager configuration for IRQ
3025  * @audio_taken_cpu: Current CPUs which are taken by audio.
3026  * @current_time: Current system time.
3027  *
3028  * This API checks for 2 conditions
3029  *  1) Last audio taken mask and current taken mask are different
3030  *  2) Last time when IRQ was affined away due to audio taken CPUs is
3031  *     more than time threshold (5 Seconds in current case).
3032  * If both condition satisfies then only return true.
3033  *
3034  * Return: bool: true if it is allowed to affine away audio taken cpus.
3035  */
3036 static inline bool
3037 hif_audio_cpu_affinity_allowed(struct hif_softc *scn,
3038 			       struct hif_cpu_affinity *cfg,
3039 			       qdf_cpu_mask audio_taken_cpu,
3040 			       uint64_t current_time)
3041 {
3042 	if (!qdf_cpumask_equal(&audio_taken_cpu, &cfg->walt_taken_mask) &&
3043 	    (qdf_log_timestamp_to_usecs(current_time -
3044 			 cfg->last_affined_away)
3045 		< scn->time_threshold))
3046 		return false;
3047 	return true;
3048 }
3049 
3050 /*
3051  * hif_affinity_mgr_check_update_mask() - Check if cpu mask need to be updated
3052  *
3053  * @scn: hif handle
3054  * @cfg: hif affinity manager configuration for IRQ
3055  * @audio_taken_cpu: Current CPUs which are taken by audio.
3056  * @cpu_mask: CPU mask which need to be updated.
3057  * @current_time: Current system time.
3058  *
3059  * This API checks if Pro audio use case is running and if cpu_mask need
3060  * to be updated
3061  *
3062  * Return: QDF_STATUS
3063  */
3064 static inline QDF_STATUS
3065 hif_affinity_mgr_check_update_mask(struct hif_softc *scn,
3066 				   struct hif_cpu_affinity *cfg,
3067 				   qdf_cpu_mask audio_taken_cpu,
3068 				   qdf_cpu_mask *cpu_mask,
3069 				   uint64_t current_time)
3070 {
3071 	qdf_cpu_mask allowed_mask;
3072 
3073 	/*
3074 	 * Case 1: audio_taken_mask is empty
3075 	 *   Check if passed cpu_mask and wlan_requested_mask is same or not.
3076 	 *      If both mask are different copy wlan_requested_mask(IRQ affinity
3077 	 *      mask requested by WLAN) to cpu_mask.
3078 	 *
3079 	 * Case 2: audio_taken_mask is not empty
3080 	 *   1. Only allow update if last time when IRQ was affined away due to
3081 	 *      audio taken CPUs is more than 5 seconds or update is requested
3082 	 *      by WLAN
3083 	 *   2. Only allow silver cores to be affined away.
3084 	 *   3. Check if any allowed CPUs for audio use case is set in cpu_mask.
3085 	 *       i. If any CPU mask is set, mask out that CPU from the cpu_mask
3086 	 *       ii. If after masking out audio taken cpu(Silver cores) cpu_mask
3087 	 *           is empty, set mask to all cpu except cpus taken by audio.
3088 	 * Example:
3089 	 *| Audio mask | mask allowed | cpu_mask | WLAN req mask | new cpu_mask|
3090 	 *|  0x00      |       0x00   |   0x0C   |       0x0C    |      0x0C   |
3091 	 *|  0x00      |       0x00   |   0x03   |       0x03    |      0x03   |
3092 	 *|  0x00      |       0x00   |   0xFC   |       0x03    |      0x03   |
3093 	 *|  0x00      |       0x00   |   0x03   |       0x0C    |      0x0C   |
3094 	 *|  0x0F      |       0x03   |   0x0C   |       0x0C    |      0x0C   |
3095 	 *|  0x0F      |       0x03   |   0x03   |       0x03    |      0xFC   |
3096 	 *|  0x03      |       0x03   |   0x0C   |       0x0C    |      0x0C   |
3097 	 *|  0x03      |       0x03   |   0x03   |       0x03    |      0xFC   |
3098 	 *|  0x03      |       0x03   |   0xFC   |       0x03    |      0xFC   |
3099 	 *|  0xF0      |       0x00   |   0x0C   |       0x0C    |      0x0C   |
3100 	 *|  0xF0      |       0x00   |   0x03   |       0x03    |      0x03   |
3101 	 */
3102 
3103 	/* Check if audio taken mask is empty*/
3104 	if (qdf_likely(qdf_cpumask_empty(&audio_taken_cpu))) {
3105 		/* If CPU mask requested by WLAN for the IRQ and
3106 		 * cpu_mask passed CPU mask set for IRQ is different
3107 		 * Copy requested mask into cpu_mask and return
3108 		 */
3109 		if (qdf_unlikely(!qdf_cpumask_equal(cpu_mask,
3110 						    &cfg->wlan_requested_mask))) {
3111 			qdf_cpumask_copy(cpu_mask, &cfg->wlan_requested_mask);
3112 			return QDF_STATUS_SUCCESS;
3113 		}
3114 		return QDF_STATUS_E_ALREADY;
3115 	}
3116 
3117 	if (!(hif_audio_cpu_affinity_allowed(scn, cfg, audio_taken_cpu,
3118 					     current_time) ||
3119 	      cfg->update_requested))
3120 		return QDF_STATUS_E_AGAIN;
3121 
3122 	/* Only allow Silver cores to be affine away */
3123 	qdf_cpumask_and(&allowed_mask, &scn->allowed_mask, &audio_taken_cpu);
3124 	if (qdf_cpumask_intersects(cpu_mask, &allowed_mask)) {
3125 		/* If any of taken CPU(Silver cores) mask is set in cpu_mask,
3126 		 *  mask out the audio taken CPUs from the cpu_mask.
3127 		 */
3128 		qdf_cpumask_andnot(cpu_mask, &cfg->wlan_requested_mask,
3129 				   &allowed_mask);
3130 		/* If cpu_mask is empty set it to all CPUs
3131 		 * except taken by audio(Silver cores)
3132 		 */
3133 		if (qdf_unlikely(qdf_cpumask_empty(cpu_mask)))
3134 			qdf_cpumask_complement(cpu_mask, &allowed_mask);
3135 		return QDF_STATUS_SUCCESS;
3136 	}
3137 
3138 	return QDF_STATUS_E_ALREADY;
3139 }
3140 
3141 static inline QDF_STATUS
3142 hif_check_and_affine_irq(struct hif_softc *scn, struct hif_cpu_affinity *cfg,
3143 			 qdf_cpu_mask audio_taken_cpu, qdf_cpu_mask cpu_mask,
3144 			 uint64_t current_time)
3145 {
3146 	QDF_STATUS status;
3147 
3148 	status = hif_affinity_mgr_check_update_mask(scn, cfg,
3149 						    audio_taken_cpu,
3150 						    &cpu_mask,
3151 						    current_time);
3152 	/* Set IRQ affinity if CPU mask was updated */
3153 	if (QDF_IS_STATUS_SUCCESS(status)) {
3154 		status = hif_irq_set_affinity_hint(cfg->irq,
3155 						   &cpu_mask);
3156 		if (QDF_IS_STATUS_SUCCESS(status)) {
3157 			/* Store audio taken CPU mask */
3158 			qdf_cpumask_copy(&cfg->walt_taken_mask,
3159 					 &audio_taken_cpu);
3160 			/* Store CPU mask which was set for IRQ*/
3161 			qdf_cpumask_copy(&cfg->current_irq_mask,
3162 					 &cpu_mask);
3163 			/* Set time when IRQ affinity was updated */
3164 			cfg->last_updated = current_time;
3165 			if (hif_audio_cpu_affinity_allowed(scn, cfg,
3166 							   audio_taken_cpu,
3167 							   current_time))
3168 				/* If CPU mask was updated due to CPU
3169 				 * taken by audio, update
3170 				 * last_affined_away time
3171 				 */
3172 				cfg->last_affined_away = current_time;
3173 		}
3174 	}
3175 
3176 	return status;
3177 }
3178 
3179 void hif_affinity_mgr_affine_irq(struct hif_softc *scn)
3180 {
3181 	bool audio_affinity_allowed = false;
3182 	int i, j, ce_id;
3183 	uint64_t current_time;
3184 	char cpu_str[10];
3185 	QDF_STATUS status;
3186 	qdf_cpu_mask cpu_mask, audio_taken_cpu;
3187 	struct HIF_CE_state *hif_state;
3188 	struct hif_exec_context *hif_ext_group;
3189 	struct CE_attr *host_ce_conf;
3190 	struct HIF_CE_state *ce_sc;
3191 	struct hif_cpu_affinity *cfg;
3192 
3193 	if (!scn->affinity_mgr_supported)
3194 		return;
3195 
3196 	current_time = hif_get_log_timestamp();
3197 	/* Get CPU mask for audio taken CPUs */
3198 	audio_taken_cpu = qdf_walt_get_cpus_taken();
3199 
3200 	ce_sc = HIF_GET_CE_STATE(scn);
3201 	host_ce_conf = ce_sc->host_ce_config;
3202 	for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
3203 		if (host_ce_conf[ce_id].flags & CE_ATTR_DISABLE_INTR)
3204 			continue;
3205 		cfg = &scn->ce_irq_cpu_mask[ce_id];
3206 		qdf_cpumask_copy(&cpu_mask, &cfg->current_irq_mask);
3207 		status =
3208 			hif_check_and_affine_irq(scn, cfg, audio_taken_cpu,
3209 						 cpu_mask, current_time);
3210 		if (QDF_IS_STATUS_SUCCESS(status))
3211 			audio_affinity_allowed = true;
3212 	}
3213 
3214 	hif_state = HIF_GET_CE_STATE(scn);
3215 	for (i = 0; i < hif_state->hif_num_extgroup; i++) {
3216 		hif_ext_group = hif_state->hif_ext_group[i];
3217 		for (j = 0; j < hif_ext_group->numirq; j++) {
3218 			cfg = &scn->irq_cpu_mask[hif_ext_group->grp_id][j];
3219 			qdf_cpumask_copy(&cpu_mask, &cfg->current_irq_mask);
3220 			status =
3221 				hif_check_and_affine_irq(scn, cfg, audio_taken_cpu,
3222 							 cpu_mask, current_time);
3223 			if (QDF_IS_STATUS_SUCCESS(status)) {
3224 				qdf_atomic_set(&hif_ext_group->force_napi_complete, -1);
3225 				audio_affinity_allowed = true;
3226 			}
3227 		}
3228 	}
3229 	if (audio_affinity_allowed) {
3230 		qdf_thread_cpumap_print_to_pagebuf(false, cpu_str,
3231 						   &audio_taken_cpu);
3232 		hif_info("Audio taken CPU mask: %s", cpu_str);
3233 	}
3234 }
3235 
3236 static inline QDF_STATUS
3237 hif_affinity_mgr_set_irq_affinity(struct hif_softc *scn, uint32_t irq,
3238 				  struct hif_cpu_affinity *cfg,
3239 				  qdf_cpu_mask *cpu_mask)
3240 {
3241 	uint64_t current_time;
3242 	char cpu_str[10];
3243 	QDF_STATUS status, mask_updated;
3244 	qdf_cpu_mask audio_taken_cpu = qdf_walt_get_cpus_taken();
3245 
3246 	current_time = hif_get_log_timestamp();
3247 	qdf_cpumask_copy(&cfg->wlan_requested_mask, cpu_mask);
3248 	cfg->update_requested = true;
3249 	mask_updated = hif_affinity_mgr_check_update_mask(scn, cfg,
3250 							  audio_taken_cpu,
3251 							  cpu_mask,
3252 							  current_time);
3253 	status = hif_irq_set_affinity_hint(irq, cpu_mask);
3254 	if (QDF_IS_STATUS_SUCCESS(status)) {
3255 		qdf_cpumask_copy(&cfg->walt_taken_mask, &audio_taken_cpu);
3256 		qdf_cpumask_copy(&cfg->current_irq_mask, cpu_mask);
3257 		if (QDF_IS_STATUS_SUCCESS(mask_updated)) {
3258 			cfg->last_updated = current_time;
3259 			if (hif_audio_cpu_affinity_allowed(scn, cfg,
3260 							   audio_taken_cpu,
3261 							   current_time)) {
3262 				cfg->last_affined_away = current_time;
3263 				qdf_thread_cpumap_print_to_pagebuf(false,
3264 								   cpu_str,
3265 								   &audio_taken_cpu);
3266 				hif_info_rl("Audio taken CPU mask: %s",
3267 					    cpu_str);
3268 			}
3269 		}
3270 	}
3271 	cfg->update_requested = false;
3272 	return status;
3273 }
3274 
3275 QDF_STATUS
3276 hif_affinity_mgr_set_qrg_irq_affinity(struct hif_softc *scn, uint32_t irq,
3277 				      uint32_t grp_id, uint32_t irq_index,
3278 				      qdf_cpu_mask *cpu_mask)
3279 {
3280 	struct hif_cpu_affinity *cfg;
3281 
3282 	if (!scn->affinity_mgr_supported)
3283 		return hif_irq_set_affinity_hint(irq, cpu_mask);
3284 
3285 	cfg = &scn->irq_cpu_mask[grp_id][irq_index];
3286 	return hif_affinity_mgr_set_irq_affinity(scn, irq, cfg, cpu_mask);
3287 }
3288 
3289 QDF_STATUS
3290 hif_affinity_mgr_set_ce_irq_affinity(struct hif_softc *scn, uint32_t irq,
3291 				     uint32_t ce_id, qdf_cpu_mask *cpu_mask)
3292 {
3293 	struct hif_cpu_affinity *cfg;
3294 
3295 	if (!scn->affinity_mgr_supported)
3296 		return hif_irq_set_affinity_hint(irq, cpu_mask);
3297 
3298 	cfg = &scn->ce_irq_cpu_mask[ce_id];
3299 	return hif_affinity_mgr_set_irq_affinity(scn, irq, cfg, cpu_mask);
3300 }
3301 
3302 void
3303 hif_affinity_mgr_init_ce_irq(struct hif_softc *scn, int id, int irq)
3304 {
3305 	unsigned int cpus;
3306 	qdf_cpu_mask cpu_mask = {0};
3307 	struct hif_cpu_affinity *cfg = NULL;
3308 
3309 	if (!scn->affinity_mgr_supported)
3310 		return;
3311 
3312 	/* Set CPU Mask to Silver core */
3313 	qdf_for_each_possible_cpu(cpus)
3314 		if (qdf_topology_physical_package_id(cpus) ==
3315 		    CPU_CLUSTER_TYPE_LITTLE)
3316 			qdf_cpumask_set_cpu(cpus, &cpu_mask);
3317 
3318 	cfg = &scn->ce_irq_cpu_mask[id];
3319 	qdf_cpumask_copy(&cfg->current_irq_mask, &cpu_mask);
3320 	qdf_cpumask_copy(&cfg->wlan_requested_mask, &cpu_mask);
3321 	cfg->irq = irq;
3322 	cfg->last_updated = 0;
3323 	cfg->last_affined_away = 0;
3324 	cfg->update_requested = false;
3325 }
3326 
3327 void
3328 hif_affinity_mgr_init_grp_irq(struct hif_softc *scn, int grp_id,
3329 			      int irq_num, int irq)
3330 {
3331 	unsigned int cpus;
3332 	qdf_cpu_mask cpu_mask = {0};
3333 	struct hif_cpu_affinity *cfg = NULL;
3334 
3335 	if (!scn->affinity_mgr_supported)
3336 		return;
3337 
3338 	/* Set CPU Mask to Silver core */
3339 	qdf_for_each_possible_cpu(cpus)
3340 		if (qdf_topology_physical_package_id(cpus) ==
3341 		    CPU_CLUSTER_TYPE_LITTLE)
3342 			qdf_cpumask_set_cpu(cpus, &cpu_mask);
3343 
3344 	cfg = &scn->irq_cpu_mask[grp_id][irq_num];
3345 	qdf_cpumask_copy(&cfg->current_irq_mask, &cpu_mask);
3346 	qdf_cpumask_copy(&cfg->wlan_requested_mask, &cpu_mask);
3347 	cfg->irq = irq;
3348 	cfg->last_updated = 0;
3349 	cfg->last_affined_away = 0;
3350 	cfg->update_requested = false;
3351 }
3352 #endif
3353