xref: /wlan-dirver/qca-wifi-host-cmn/hif/src/hif_main.c (revision 449758b4de7a219dad7b7a0e20ce2ea1c8388e34)
1 /*
2  * Copyright (c) 2015-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include "targcfg.h"
21 #include "qdf_lock.h"
22 #include "qdf_status.h"
23 #include "qdf_status.h"
24 #include <qdf_atomic.h>         /* qdf_atomic_read */
25 #include <targaddrs.h>
26 #include "hif_io32.h"
27 #include <hif.h>
28 #include <target_type.h>
29 #include "regtable.h"
30 #define ATH_MODULE_NAME hif
31 #include <a_debug.h>
32 #include "hif_main.h"
33 #include "hif_hw_version.h"
34 #if (defined(HIF_PCI) || defined(HIF_SNOC) || defined(HIF_AHB) || \
35      defined(HIF_IPCI))
36 #include "ce_tasklet.h"
37 #include "ce_api.h"
38 #endif
39 #include "qdf_trace.h"
40 #include "qdf_status.h"
41 #include "hif_debug.h"
42 #include "mp_dev.h"
43 #if defined(QCA_WIFI_QCA8074) || defined(QCA_WIFI_QCA6018) || \
44 	defined(QCA_WIFI_QCA5018) || defined(QCA_WIFI_QCA9574) || \
45 	defined(QCA_WIFI_QCA5332)
46 #include "hal_api.h"
47 #endif
48 #include "hif_napi.h"
49 #include "hif_unit_test_suspend_i.h"
50 #include "qdf_module.h"
51 #ifdef HIF_CE_LOG_INFO
52 #include <qdf_notifier.h>
53 #include <qdf_hang_event_notifier.h>
54 #endif
55 #include <linux/cpumask.h>
56 
57 #include <pld_common.h>
58 #include "ce_internal.h"
59 #include <qdf_tracepoint.h>
60 
61 void hif_dump(struct hif_opaque_softc *hif_ctx, uint8_t cmd_id, bool start)
62 {
63 	hif_trigger_dump(hif_ctx, cmd_id, start);
64 }
65 
66 /**
67  * hif_get_target_id(): hif_get_target_id
68  * @scn: scn
69  *
70  * Return the virtual memory base address to the caller
71  *
72  * @scn: hif_softc
73  *
74  * Return: A_target_id_t
75  */
76 A_target_id_t hif_get_target_id(struct hif_softc *scn)
77 {
78 	return scn->mem;
79 }
80 
81 /**
82  * hif_get_targetdef(): hif_get_targetdef
83  * @hif_ctx: hif context
84  *
85  * Return: void *
86  */
87 void *hif_get_targetdef(struct hif_opaque_softc *hif_ctx)
88 {
89 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
90 
91 	return scn->targetdef;
92 }
93 
94 #ifdef FORCE_WAKE
95 #ifndef QCA_WIFI_WCN6450
96 void hif_srng_init_phase(struct hif_opaque_softc *hif_ctx,
97 			 bool init_phase)
98 {
99 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
100 
101 	if (ce_srng_based(scn))
102 		hal_set_init_phase(scn->hal_soc, init_phase);
103 }
104 #else
105 void hif_srng_init_phase(struct hif_opaque_softc *hif_ctx,
106 			 bool init_phase)
107 {
108 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
109 
110 	hal_set_init_phase(scn->hal_soc, init_phase);
111 }
112 #endif
113 #endif /* FORCE_WAKE */
114 
115 #ifdef HIF_IPCI
116 void hif_shutdown_notifier_cb(void *hif_ctx)
117 {
118 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
119 
120 	scn->recovery = true;
121 }
122 #endif
123 
124 /**
125  * hif_vote_link_down(): unvote for link up
126  * @hif_ctx: hif context
127  *
128  * Call hif_vote_link_down to release a previous request made using
129  * hif_vote_link_up. A hif_vote_link_down call should only be made
130  * after a corresponding hif_vote_link_up, otherwise you could be
131  * negating a vote from another source. When no votes are present
132  * hif will not guarantee the linkstate after hif_bus_suspend.
133  *
134  * SYNCHRONIZE WITH hif_vote_link_up by only calling in MC thread
135  * and initialization deinitialization sequencences.
136  *
137  * Return: n/a
138  */
139 void hif_vote_link_down(struct hif_opaque_softc *hif_ctx)
140 {
141 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
142 
143 	QDF_BUG(scn);
144 	if (scn->linkstate_vote == 0)
145 		QDF_DEBUG_PANIC("linkstate_vote(%d) has already been 0",
146 				scn->linkstate_vote);
147 
148 	scn->linkstate_vote--;
149 	hif_info("Down_linkstate_vote %d", scn->linkstate_vote);
150 	if (scn->linkstate_vote == 0)
151 		hif_bus_prevent_linkdown(scn, false);
152 }
153 
154 /**
155  * hif_vote_link_up(): vote to prevent bus from suspending
156  * @hif_ctx: hif context
157  *
158  * Makes hif guarantee that fw can message the host normally
159  * during suspend.
160  *
161  * SYNCHRONIZE WITH hif_vote_link_up by only calling in MC thread
162  * and initialization deinitialization sequencences.
163  *
164  * Return: n/a
165  */
166 void hif_vote_link_up(struct hif_opaque_softc *hif_ctx)
167 {
168 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
169 
170 	QDF_BUG(scn);
171 	scn->linkstate_vote++;
172 	hif_info("Up_linkstate_vote %d", scn->linkstate_vote);
173 	if (scn->linkstate_vote == 1)
174 		hif_bus_prevent_linkdown(scn, true);
175 }
176 
177 /**
178  * hif_can_suspend_link(): query if hif is permitted to suspend the link
179  * @hif_ctx: hif context
180  *
181  * Hif will ensure that the link won't be suspended if the upperlayers
182  * don't want it to.
183  *
184  * SYNCHRONIZATION: MC thread is stopped before bus suspend thus
185  * we don't need extra locking to ensure votes dont change while
186  * we are in the process of suspending or resuming.
187  *
188  * Return: false if hif will guarantee link up during suspend.
189  */
190 bool hif_can_suspend_link(struct hif_opaque_softc *hif_ctx)
191 {
192 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
193 
194 	QDF_BUG(scn);
195 	return scn->linkstate_vote == 0;
196 }
197 
198 /**
199  * hif_hia_item_address(): hif_hia_item_address
200  * @target_type: target_type
201  * @item_offset: item_offset
202  *
203  * Return: n/a
204  */
205 uint32_t hif_hia_item_address(uint32_t target_type, uint32_t item_offset)
206 {
207 	switch (target_type) {
208 	case TARGET_TYPE_AR6002:
209 		return AR6002_HOST_INTEREST_ADDRESS + item_offset;
210 	case TARGET_TYPE_AR6003:
211 		return AR6003_HOST_INTEREST_ADDRESS + item_offset;
212 	case TARGET_TYPE_AR6004:
213 		return AR6004_HOST_INTEREST_ADDRESS + item_offset;
214 	case TARGET_TYPE_AR6006:
215 		return AR6006_HOST_INTEREST_ADDRESS + item_offset;
216 	case TARGET_TYPE_AR9888:
217 		return AR9888_HOST_INTEREST_ADDRESS + item_offset;
218 	case TARGET_TYPE_AR6320:
219 	case TARGET_TYPE_AR6320V2:
220 		return AR6320_HOST_INTEREST_ADDRESS + item_offset;
221 	case TARGET_TYPE_ADRASTEA:
222 		/* ADRASTEA doesn't have a host interest address */
223 		ASSERT(0);
224 		return 0;
225 	case TARGET_TYPE_AR900B:
226 		return AR900B_HOST_INTEREST_ADDRESS + item_offset;
227 	case TARGET_TYPE_QCA9984:
228 		return QCA9984_HOST_INTEREST_ADDRESS + item_offset;
229 	case TARGET_TYPE_QCA9888:
230 		return QCA9888_HOST_INTEREST_ADDRESS + item_offset;
231 
232 	default:
233 		ASSERT(0);
234 		return 0;
235 	}
236 }
237 
238 /**
239  * hif_max_num_receives_reached() - check max receive is reached
240  * @scn: HIF Context
241  * @count: unsigned int.
242  *
243  * Output check status as bool
244  *
245  * Return: bool
246  */
247 bool hif_max_num_receives_reached(struct hif_softc *scn, unsigned int count)
248 {
249 	if (QDF_IS_EPPING_ENABLED(hif_get_conparam(scn)))
250 		return count > 120;
251 	else
252 		return count > MAX_NUM_OF_RECEIVES;
253 }
254 
255 /**
256  * init_buffer_count() - initial buffer count
257  * @maxSize: qdf_size_t
258  *
259  * routine to modify the initial buffer count to be allocated on an os
260  * platform basis. Platform owner will need to modify this as needed
261  *
262  * Return: qdf_size_t
263  */
264 qdf_size_t init_buffer_count(qdf_size_t maxSize)
265 {
266 	return maxSize;
267 }
268 
269 /**
270  * hif_save_htc_htt_config_endpoint() - save htt_tx_endpoint
271  * @hif_ctx: hif context
272  * @htc_htt_tx_endpoint: htt_tx_endpoint
273  *
274  * Return: void
275  */
276 void hif_save_htc_htt_config_endpoint(struct hif_opaque_softc *hif_ctx,
277 							int htc_htt_tx_endpoint)
278 {
279 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
280 
281 	if (!scn) {
282 		hif_err("scn or scn->hif_sc is NULL!");
283 		return;
284 	}
285 
286 	scn->htc_htt_tx_endpoint = htc_htt_tx_endpoint;
287 }
288 qdf_export_symbol(hif_save_htc_htt_config_endpoint);
289 
290 static const struct qwlan_hw qwlan_hw_list[] = {
291 	{
292 		.id = AR6320_REV1_VERSION,
293 		.subid = 0,
294 		.name = "QCA6174_REV1",
295 	},
296 	{
297 		.id = AR6320_REV1_1_VERSION,
298 		.subid = 0x1,
299 		.name = "QCA6174_REV1_1",
300 	},
301 	{
302 		.id = AR6320_REV1_3_VERSION,
303 		.subid = 0x2,
304 		.name = "QCA6174_REV1_3",
305 	},
306 	{
307 		.id = AR6320_REV2_1_VERSION,
308 		.subid = 0x4,
309 		.name = "QCA6174_REV2_1",
310 	},
311 	{
312 		.id = AR6320_REV2_1_VERSION,
313 		.subid = 0x5,
314 		.name = "QCA6174_REV2_2",
315 	},
316 	{
317 		.id = AR6320_REV3_VERSION,
318 		.subid = 0x6,
319 		.name = "QCA6174_REV2.3",
320 	},
321 	{
322 		.id = AR6320_REV3_VERSION,
323 		.subid = 0x8,
324 		.name = "QCA6174_REV3",
325 	},
326 	{
327 		.id = AR6320_REV3_VERSION,
328 		.subid = 0x9,
329 		.name = "QCA6174_REV3_1",
330 	},
331 	{
332 		.id = AR6320_REV3_2_VERSION,
333 		.subid = 0xA,
334 		.name = "AR6320_REV3_2_VERSION",
335 	},
336 	{
337 		.id = QCA6390_V1,
338 		.subid = 0x0,
339 		.name = "QCA6390_V1",
340 	},
341 	{
342 		.id = QCA6490_V1,
343 		.subid = 0x0,
344 		.name = "QCA6490_V1",
345 	},
346 	{
347 		.id = WCN3990_v1,
348 		.subid = 0x0,
349 		.name = "WCN3990_V1",
350 	},
351 	{
352 		.id = WCN3990_v2,
353 		.subid = 0x0,
354 		.name = "WCN3990_V2",
355 	},
356 	{
357 		.id = WCN3990_v2_1,
358 		.subid = 0x0,
359 		.name = "WCN3990_V2.1",
360 	},
361 	{
362 		.id = WCN3998,
363 		.subid = 0x0,
364 		.name = "WCN3998",
365 	},
366 	{
367 		.id = QCA9379_REV1_VERSION,
368 		.subid = 0xC,
369 		.name = "QCA9379_REV1",
370 	},
371 	{
372 		.id = QCA9379_REV1_VERSION,
373 		.subid = 0xD,
374 		.name = "QCA9379_REV1_1",
375 	},
376 	{
377 		.id = MANGO_V1,
378 		.subid = 0xF,
379 		.name = "MANGO_V1",
380 	},
381 	{
382 		.id = PEACH_V1,
383 		.subid = 0,
384 		.name = "PEACH_V1",
385 	},
386 
387 	{
388 		.id = KIWI_V1,
389 		.subid = 0,
390 		.name = "KIWI_V1",
391 	},
392 	{
393 		.id = KIWI_V2,
394 		.subid = 0,
395 		.name = "KIWI_V2",
396 	},
397 	{
398 		.id = WCN6750_V1,
399 		.subid = 0,
400 		.name = "WCN6750_V1",
401 	},
402 	{
403 		.id = WCN6750_V2,
404 		.subid = 0,
405 		.name = "WCN6750_V2",
406 	},
407 	{
408 		.id = WCN6450_V1,
409 		.subid = 0,
410 		.name = "WCN6450_V1",
411 	},
412 	{
413 		.id = QCA6490_v2_1,
414 		.subid = 0,
415 		.name = "QCA6490",
416 	},
417 	{
418 		.id = QCA6490_v2,
419 		.subid = 0,
420 		.name = "QCA6490",
421 	},
422 	{
423 		.id = WCN3990_TALOS,
424 		.subid = 0,
425 		.name = "WCN3990",
426 	},
427 	{
428 		.id = WCN3990_MOOREA,
429 		.subid = 0,
430 		.name = "WCN3990",
431 	},
432 	{
433 		.id = WCN3990_SAIPAN,
434 		.subid = 0,
435 		.name = "WCN3990",
436 	},
437 	{
438 		.id = WCN3990_RENNELL,
439 		.subid = 0,
440 		.name = "WCN3990",
441 	},
442 	{
443 		.id = WCN3990_BITRA,
444 		.subid = 0,
445 		.name = "WCN3990",
446 	},
447 	{
448 		.id = WCN3990_DIVAR,
449 		.subid = 0,
450 		.name = "WCN3990",
451 	},
452 	{
453 		.id = WCN3990_ATHERTON,
454 		.subid = 0,
455 		.name = "WCN3990",
456 	},
457 	{
458 		.id = WCN3990_STRAIT,
459 		.subid = 0,
460 		.name = "WCN3990",
461 	},
462 	{
463 		.id = WCN3990_NETRANI,
464 		.subid = 0,
465 		.name = "WCN3990",
466 	},
467 	{
468 		.id = WCN3990_CLARENCE,
469 		.subid = 0,
470 		.name = "WCN3990",
471 	}
472 };
473 
474 /**
475  * hif_get_hw_name(): get a human readable name for the hardware
476  * @info: Target Info
477  *
478  * Return: human readable name for the underlying wifi hardware.
479  */
480 static const char *hif_get_hw_name(struct hif_target_info *info)
481 {
482 	int i;
483 
484 	hif_debug("target version = %d, target revision = %d",
485 		  info->target_version,
486 		  info->target_revision);
487 
488 	if (info->hw_name)
489 		return info->hw_name;
490 
491 	for (i = 0; i < ARRAY_SIZE(qwlan_hw_list); i++) {
492 		if (info->target_version == qwlan_hw_list[i].id &&
493 		    info->target_revision == qwlan_hw_list[i].subid) {
494 			return qwlan_hw_list[i].name;
495 		}
496 	}
497 
498 	info->hw_name = qdf_mem_malloc(64);
499 	if (!info->hw_name)
500 		return "Unknown Device (nomem)";
501 
502 	i = qdf_snprint(info->hw_name, 64, "HW_VERSION=%x.",
503 			info->target_version);
504 	if (i < 0)
505 		return "Unknown Device (snprintf failure)";
506 	else
507 		return info->hw_name;
508 }
509 
510 /**
511  * hif_get_hw_info(): hif_get_hw_info
512  * @scn: scn
513  * @version: version
514  * @revision: revision
515  * @target_name: target name
516  *
517  * Return: n/a
518  */
519 void hif_get_hw_info(struct hif_opaque_softc *scn, u32 *version, u32 *revision,
520 			const char **target_name)
521 {
522 	struct hif_target_info *info = hif_get_target_info_handle(scn);
523 	struct hif_softc *sc = HIF_GET_SOFTC(scn);
524 
525 	if (sc->bus_type == QDF_BUS_TYPE_USB)
526 		hif_usb_get_hw_info(sc);
527 
528 	*version = info->target_version;
529 	*revision = info->target_revision;
530 	*target_name = hif_get_hw_name(info);
531 }
532 
533 /**
534  * hif_get_dev_ba(): API to get device base address.
535  * @hif_handle: hif handle
536  *
537  * Return: device base address
538  */
539 void *hif_get_dev_ba(struct hif_opaque_softc *hif_handle)
540 {
541 	struct hif_softc *scn = (struct hif_softc *)hif_handle;
542 
543 	return scn->mem;
544 }
545 qdf_export_symbol(hif_get_dev_ba);
546 
547 /**
548  * hif_get_dev_ba_ce(): API to get device ce base address.
549  * @hif_handle: hif handle
550  *
551  * Return: dev mem base address for CE
552  */
553 void *hif_get_dev_ba_ce(struct hif_opaque_softc *hif_handle)
554 {
555 	struct hif_softc *scn = (struct hif_softc *)hif_handle;
556 
557 	return scn->mem_ce;
558 }
559 
560 qdf_export_symbol(hif_get_dev_ba_ce);
561 
562 /**
563  * hif_get_dev_ba_pmm(): API to get device pmm base address.
564  * @hif_handle: scn
565  *
566  * Return: dev mem base address for PMM
567  */
568 
569 void *hif_get_dev_ba_pmm(struct hif_opaque_softc *hif_handle)
570 {
571 	struct hif_softc *scn = (struct hif_softc *)hif_handle;
572 
573 	return scn->mem_pmm_base;
574 }
575 
576 qdf_export_symbol(hif_get_dev_ba_pmm);
577 
578 uint32_t hif_get_soc_version(struct hif_opaque_softc *hif_handle)
579 {
580 	struct hif_softc *scn = (struct hif_softc *)hif_handle;
581 
582 	return scn->target_info.soc_version;
583 }
584 
585 qdf_export_symbol(hif_get_soc_version);
586 
587 /**
588  * hif_get_dev_ba_cmem(): API to get device ce base address.
589  * @hif_handle: hif handle
590  *
591  * Return: dev mem base address for CMEM
592  */
593 void *hif_get_dev_ba_cmem(struct hif_opaque_softc *hif_handle)
594 {
595 	struct hif_softc *scn = (struct hif_softc *)hif_handle;
596 
597 	return scn->mem_cmem;
598 }
599 
600 qdf_export_symbol(hif_get_dev_ba_cmem);
601 
602 #ifdef FEATURE_RUNTIME_PM
603 void hif_runtime_prevent_linkdown(struct hif_softc *scn, bool is_get)
604 {
605 	if (is_get)
606 		qdf_runtime_pm_prevent_suspend(&scn->prevent_linkdown_lock);
607 	else
608 		qdf_runtime_pm_allow_suspend(&scn->prevent_linkdown_lock);
609 }
610 
611 static inline
612 void hif_rtpm_lock_init(struct hif_softc *scn)
613 {
614 	qdf_runtime_lock_init(&scn->prevent_linkdown_lock);
615 }
616 
617 static inline
618 void hif_rtpm_lock_deinit(struct hif_softc *scn)
619 {
620 	qdf_runtime_lock_deinit(&scn->prevent_linkdown_lock);
621 }
622 #else
623 static inline
624 void hif_rtpm_lock_init(struct hif_softc *scn)
625 {
626 }
627 
628 static inline
629 void hif_rtpm_lock_deinit(struct hif_softc *scn)
630 {
631 }
632 #endif
633 
634 #ifdef WLAN_CE_INTERRUPT_THRESHOLD_CONFIG
635 /**
636  * hif_get_interrupt_threshold_cfg_from_psoc() - Retrieve ini cfg from psoc
637  * @scn: hif context
638  * @psoc: psoc objmgr handle
639  *
640  * Return: None
641  */
642 static inline
643 void hif_get_interrupt_threshold_cfg_from_psoc(struct hif_softc *scn,
644 					       struct wlan_objmgr_psoc *psoc)
645 {
646 	if (psoc) {
647 		scn->ini_cfg.ce_status_ring_timer_threshold =
648 			cfg_get(psoc,
649 				CFG_CE_STATUS_RING_TIMER_THRESHOLD);
650 		scn->ini_cfg.ce_status_ring_batch_count_threshold =
651 			cfg_get(psoc,
652 				CFG_CE_STATUS_RING_BATCH_COUNT_THRESHOLD);
653 	}
654 }
655 #else
656 static inline
657 void hif_get_interrupt_threshold_cfg_from_psoc(struct hif_softc *scn,
658 					       struct wlan_objmgr_psoc *psoc)
659 {
660 }
661 #endif /* WLAN_CE_INTERRUPT_THRESHOLD_CONFIG */
662 
663 /**
664  * hif_get_cfg_from_psoc() - Retrieve ini cfg from psoc
665  * @scn: hif context
666  * @psoc: psoc objmgr handle
667  *
668  * Return: None
669  */
670 static inline
671 void hif_get_cfg_from_psoc(struct hif_softc *scn,
672 			   struct wlan_objmgr_psoc *psoc)
673 {
674 	if (psoc) {
675 		scn->ini_cfg.disable_wake_irq =
676 			cfg_get(psoc, CFG_DISABLE_WAKE_IRQ);
677 		/**
678 		 * Wake IRQ can't share the same IRQ with the copy engines
679 		 * In one MSI mode, we don't know whether wake IRQ is triggered
680 		 * or not in wake IRQ handler. known issue CR 2055359
681 		 * If you want to support Wake IRQ. Please allocate at least
682 		 * 2 MSI vector. The first is for wake IRQ while the others
683 		 * share the second vector
684 		 */
685 		if (pld_is_one_msi(scn->qdf_dev->dev)) {
686 			hif_debug("Disable wake IRQ once it is one MSI mode");
687 			scn->ini_cfg.disable_wake_irq = true;
688 		}
689 		hif_get_interrupt_threshold_cfg_from_psoc(scn, psoc);
690 	}
691 }
692 
693 #if defined(HIF_CE_LOG_INFO) || defined(HIF_BUS_LOG_INFO)
694 /**
695  * hif_recovery_notifier_cb - Recovery notifier callback to log
696  *  hang event data
697  * @block: notifier block
698  * @state: state
699  * @data: notifier data
700  *
701  * Return: status
702  */
703 static
704 int hif_recovery_notifier_cb(struct notifier_block *block, unsigned long state,
705 			     void *data)
706 {
707 	struct qdf_notifer_data *notif_data = data;
708 	qdf_notif_block *notif_block;
709 	struct hif_softc *hif_handle;
710 	bool bus_id_invalid;
711 
712 	if (!data || !block)
713 		return -EINVAL;
714 
715 	notif_block = qdf_container_of(block, qdf_notif_block, notif_block);
716 
717 	hif_handle = notif_block->priv_data;
718 	if (!hif_handle)
719 		return -EINVAL;
720 
721 	bus_id_invalid = hif_log_bus_info(hif_handle, notif_data->hang_data,
722 					  &notif_data->offset);
723 	if (bus_id_invalid)
724 		return NOTIFY_STOP_MASK;
725 
726 	hif_log_ce_info(hif_handle, notif_data->hang_data,
727 			&notif_data->offset);
728 
729 	return 0;
730 }
731 
732 /**
733  * hif_register_recovery_notifier - Register hif recovery notifier
734  * @hif_handle: hif handle
735  *
736  * Return: status
737  */
738 static
739 QDF_STATUS hif_register_recovery_notifier(struct hif_softc *hif_handle)
740 {
741 	qdf_notif_block *hif_notifier;
742 
743 	if (!hif_handle)
744 		return QDF_STATUS_E_FAILURE;
745 
746 	hif_notifier = &hif_handle->hif_recovery_notifier;
747 
748 	hif_notifier->notif_block.notifier_call = hif_recovery_notifier_cb;
749 	hif_notifier->priv_data = hif_handle;
750 	return qdf_hang_event_register_notifier(hif_notifier);
751 }
752 
753 /**
754  * hif_unregister_recovery_notifier - Un-register hif recovery notifier
755  * @hif_handle: hif handle
756  *
757  * Return: status
758  */
759 static
760 QDF_STATUS hif_unregister_recovery_notifier(struct hif_softc *hif_handle)
761 {
762 	qdf_notif_block *hif_notifier = &hif_handle->hif_recovery_notifier;
763 
764 	return qdf_hang_event_unregister_notifier(hif_notifier);
765 }
766 #else
767 static inline
768 QDF_STATUS hif_register_recovery_notifier(struct hif_softc *hif_handle)
769 {
770 	return QDF_STATUS_SUCCESS;
771 }
772 
773 static inline
774 QDF_STATUS hif_unregister_recovery_notifier(struct hif_softc *hif_handle)
775 {
776 	return QDF_STATUS_SUCCESS;
777 }
778 #endif
779 
780 #if defined(HIF_CPU_PERF_AFFINE_MASK) || \
781 	defined(FEATURE_ENABLE_CE_DP_IRQ_AFFINE)
782 /**
783  * __hif_cpu_hotplug_notify() - CPU hotplug event handler
784  * @context: HIF context
785  * @cpu: CPU Id of the CPU generating the event
786  * @cpu_up: true if the CPU is online
787  *
788  * Return: None
789  */
790 static void __hif_cpu_hotplug_notify(void *context,
791 				     uint32_t cpu, bool cpu_up)
792 {
793 	struct hif_softc *scn = context;
794 
795 	if (!scn)
796 		return;
797 	if (hif_is_driver_unloading(scn) || hif_is_recovery_in_progress(scn))
798 		return;
799 
800 	if (cpu_up) {
801 		hif_config_irq_set_perf_affinity_hint(GET_HIF_OPAQUE_HDL(scn));
802 		hif_debug("Setting affinity for online CPU: %d", cpu);
803 	} else {
804 		hif_debug("Skip setting affinity for offline CPU: %d", cpu);
805 	}
806 }
807 
808 /**
809  * hif_cpu_hotplug_notify - cpu core up/down notification
810  * handler
811  * @context: HIF context
812  * @cpu: CPU generating the event
813  * @cpu_up: true if the CPU is online
814  *
815  * Return: None
816  */
817 static void hif_cpu_hotplug_notify(void *context, uint32_t cpu, bool cpu_up)
818 {
819 	struct qdf_op_sync *op_sync;
820 
821 	if (qdf_op_protect(&op_sync))
822 		return;
823 
824 	__hif_cpu_hotplug_notify(context, cpu, cpu_up);
825 
826 	qdf_op_unprotect(op_sync);
827 }
828 
829 static void hif_cpu_online_cb(void *context, uint32_t cpu)
830 {
831 	hif_cpu_hotplug_notify(context, cpu, true);
832 }
833 
834 static void hif_cpu_before_offline_cb(void *context, uint32_t cpu)
835 {
836 	hif_cpu_hotplug_notify(context, cpu, false);
837 }
838 
839 static void hif_cpuhp_register(struct hif_softc *scn)
840 {
841 	if (!scn) {
842 		hif_info_high("cannot register hotplug notifiers");
843 		return;
844 	}
845 	qdf_cpuhp_register(&scn->cpuhp_event_handle,
846 			   scn,
847 			   hif_cpu_online_cb,
848 			   hif_cpu_before_offline_cb);
849 }
850 
851 static void hif_cpuhp_unregister(struct hif_softc *scn)
852 {
853 	if (!scn) {
854 		hif_info_high("cannot unregister hotplug notifiers");
855 		return;
856 	}
857 	qdf_cpuhp_unregister(&scn->cpuhp_event_handle);
858 }
859 
860 #else
861 static void hif_cpuhp_register(struct hif_softc *scn)
862 {
863 }
864 
865 static void hif_cpuhp_unregister(struct hif_softc *scn)
866 {
867 }
868 #endif /* ifdef HIF_CPU_PERF_AFFINE_MASK */
869 
870 #ifdef HIF_DETECTION_LATENCY_ENABLE
871 /*
872  * Bitmask to control enablement of latency detection for the tasklets,
873  * bit-X represents for tasklet of WLAN_CE_X.
874  */
875 #ifndef DETECTION_LATENCY_TASKLET_MASK
876 #define DETECTION_LATENCY_TASKLET_MASK (BIT(2) | BIT(7))
877 #endif
878 
879 static inline int
880 __hif_tasklet_latency(struct hif_softc *scn, bool from_timer, int idx)
881 {
882 	qdf_time_t sched_time =
883 		scn->latency_detect.tasklet_info[idx].sched_time;
884 	qdf_time_t exec_time =
885 		scn->latency_detect.tasklet_info[idx].exec_time;
886 	qdf_time_t curr_time = qdf_system_ticks();
887 	uint32_t threshold = scn->latency_detect.threshold;
888 	qdf_time_t expect_exec_time =
889 		sched_time + qdf_system_msecs_to_ticks(threshold);
890 
891 	/* 2 kinds of check here.
892 	 * from_timer==true:  check if tasklet stall
893 	 * from_timer==false: check tasklet execute comes late
894 	 */
895 	if (from_timer ?
896 	    (qdf_system_time_after(sched_time, exec_time) &&
897 	     qdf_system_time_after(curr_time, expect_exec_time)) :
898 	    qdf_system_time_after(exec_time, expect_exec_time)) {
899 		hif_err("tasklet[%d] latency detected: from_timer %d, curr_time %lu, sched_time %lu, exec_time %lu, threshold %ums, timeout %ums, cpu_id %d, called: %ps",
900 			idx, from_timer, curr_time, sched_time,
901 			exec_time, threshold,
902 			scn->latency_detect.timeout,
903 			qdf_get_cpu(), (void *)_RET_IP_);
904 		qdf_trigger_self_recovery(NULL,
905 					  QDF_TASKLET_CREDIT_LATENCY_DETECT);
906 		return -ETIMEDOUT;
907 	}
908 
909 	return 0;
910 }
911 
912 /**
913  * hif_tasklet_latency_detect_enabled() - check whether latency detect
914  * is enabled for the tasklet which is specified by idx
915  * @scn: HIF opaque context
916  * @idx: CE id
917  *
918  * Return: true if latency detect is enabled for the specified tasklet,
919  * false otherwise.
920  */
921 static inline bool
922 hif_tasklet_latency_detect_enabled(struct hif_softc *scn, int idx)
923 {
924 	if (QDF_GLOBAL_MISSION_MODE != hif_get_conparam(scn))
925 		return false;
926 
927 	if (!scn->latency_detect.enable_detection)
928 		return false;
929 
930 	if (idx < 0 || idx >= HIF_TASKLET_IN_MONITOR ||
931 	    !qdf_test_bit(idx, scn->latency_detect.tasklet_bmap))
932 		return false;
933 
934 	return true;
935 }
936 
937 void hif_tasklet_latency_record_exec(struct hif_softc *scn, int idx)
938 {
939 	if (!hif_tasklet_latency_detect_enabled(scn, idx))
940 		return;
941 
942 	/*
943 	 * hif_set_enable_detection(true) might come between
944 	 * hif_tasklet_latency_record_sched() and
945 	 * hif_tasklet_latency_record_exec() during wlan startup, then the
946 	 * sched_time is 0 but exec_time is not, and hit the timeout case in
947 	 * __hif_tasklet_latency().
948 	 * To avoid such issue, skip exec_time recording if sched_time has not
949 	 * been recorded.
950 	 */
951 	if (!scn->latency_detect.tasklet_info[idx].sched_time)
952 		return;
953 
954 	scn->latency_detect.tasklet_info[idx].exec_time = qdf_system_ticks();
955 	__hif_tasklet_latency(scn, false, idx);
956 }
957 
958 void hif_tasklet_latency_record_sched(struct hif_softc *scn, int idx)
959 {
960 	if (!hif_tasklet_latency_detect_enabled(scn, idx))
961 		return;
962 
963 	scn->latency_detect.tasklet_info[idx].sched_cpuid = qdf_get_cpu();
964 	scn->latency_detect.tasklet_info[idx].sched_time = qdf_system_ticks();
965 }
966 
967 static inline void hif_credit_latency(struct hif_softc *scn, bool from_timer)
968 {
969 	qdf_time_t credit_request_time =
970 		scn->latency_detect.credit_request_time;
971 	qdf_time_t credit_report_time = scn->latency_detect.credit_report_time;
972 	qdf_time_t curr_jiffies = qdf_system_ticks();
973 	uint32_t threshold = scn->latency_detect.threshold;
974 	int cpu_id = qdf_get_cpu();
975 
976 	/* 2 kinds of check here.
977 	 * from_timer==true:  check if credit report stall
978 	 * from_timer==false: check credit report comes late
979 	 */
980 
981 	if ((from_timer ?
982 	     qdf_system_time_after(credit_request_time, credit_report_time) :
983 	     qdf_system_time_after(credit_report_time, credit_request_time)) &&
984 	    qdf_system_time_after(curr_jiffies,
985 				  credit_request_time +
986 				  qdf_system_msecs_to_ticks(threshold))) {
987 		hif_err("credit report latency: from timer %d, curr_jiffies %lu, credit_request_time %lu, credit_report_time %lu, threshold %ums, timeout %ums, cpu_id %d, called: %ps",
988 			from_timer, curr_jiffies, credit_request_time,
989 			credit_report_time, threshold,
990 			scn->latency_detect.timeout,
991 			cpu_id, (void *)_RET_IP_);
992 		goto latency;
993 	}
994 	return;
995 
996 latency:
997 	qdf_trigger_self_recovery(NULL, QDF_TASKLET_CREDIT_LATENCY_DETECT);
998 }
999 
1000 static inline void hif_tasklet_latency(struct hif_softc *scn, bool from_timer)
1001 {
1002 	int i, ret;
1003 
1004 	for (i = 0; i < HIF_TASKLET_IN_MONITOR; i++) {
1005 		if (!qdf_test_bit(i, scn->latency_detect.tasklet_bmap))
1006 			continue;
1007 
1008 		ret = __hif_tasklet_latency(scn, from_timer, i);
1009 		if (ret)
1010 			return;
1011 	}
1012 }
1013 
1014 /**
1015  * hif_check_detection_latency(): to check if latency for tasklet/credit
1016  *
1017  * @scn: hif context
1018  * @from_timer: if called from timer handler
1019  * @bitmap_type: indicate if check tasklet or credit
1020  *
1021  * Return: none
1022  */
1023 void hif_check_detection_latency(struct hif_softc *scn,
1024 				 bool from_timer,
1025 				 uint32_t bitmap_type)
1026 {
1027 	if (QDF_GLOBAL_MISSION_MODE != hif_get_conparam(scn))
1028 		return;
1029 
1030 	if (!scn->latency_detect.enable_detection)
1031 		return;
1032 
1033 	if (bitmap_type & BIT(HIF_DETECT_TASKLET))
1034 		hif_tasklet_latency(scn, from_timer);
1035 
1036 	if (bitmap_type & BIT(HIF_DETECT_CREDIT))
1037 		hif_credit_latency(scn, from_timer);
1038 }
1039 
1040 static void hif_latency_detect_timeout_handler(void *arg)
1041 {
1042 	struct hif_softc *scn = (struct hif_softc *)arg;
1043 	int next_cpu, i;
1044 	qdf_cpu_mask cpu_mask = {0};
1045 	struct hif_latency_detect *detect = &scn->latency_detect;
1046 
1047 	hif_check_detection_latency(scn, true,
1048 				    BIT(HIF_DETECT_TASKLET) |
1049 				    BIT(HIF_DETECT_CREDIT));
1050 
1051 	/* it need to make sure timer start on a different cpu,
1052 	 * so it can detect the tasklet schedule stall, but there
1053 	 * is still chance that, after timer has been started, then
1054 	 * irq/tasklet happens on the same cpu, then tasklet will
1055 	 * execute before softirq timer, if this tasklet stall, the
1056 	 * timer can't detect it, we can accept this as a limitation,
1057 	 * if tasklet stall, anyway other place will detect it, just
1058 	 * a little later.
1059 	 */
1060 	qdf_cpumask_copy(&cpu_mask, (const qdf_cpu_mask *)cpu_active_mask);
1061 	for (i = 0; i < HIF_TASKLET_IN_MONITOR; i++) {
1062 		if (!qdf_test_bit(i, detect->tasklet_bmap))
1063 			continue;
1064 
1065 		qdf_cpumask_clear_cpu(detect->tasklet_info[i].sched_cpuid,
1066 				      &cpu_mask);
1067 	}
1068 
1069 	next_cpu = cpumask_first(&cpu_mask);
1070 	if (qdf_unlikely(next_cpu >= nr_cpu_ids)) {
1071 		hif_debug("start timer on local");
1072 		/* it doesn't found a available cpu, start on local cpu*/
1073 		qdf_timer_mod(&detect->timer, detect->timeout);
1074 	} else {
1075 		qdf_timer_start_on(&detect->timer, detect->timeout, next_cpu);
1076 	}
1077 }
1078 
1079 static void hif_latency_detect_timer_init(struct hif_softc *scn)
1080 {
1081 	scn->latency_detect.timeout =
1082 		DETECTION_TIMER_TIMEOUT;
1083 	scn->latency_detect.threshold =
1084 		DETECTION_LATENCY_THRESHOLD;
1085 
1086 	hif_info("timer timeout %u, latency threshold %u",
1087 		 scn->latency_detect.timeout,
1088 		 scn->latency_detect.threshold);
1089 
1090 	scn->latency_detect.is_timer_started = false;
1091 
1092 	qdf_timer_init(NULL,
1093 		       &scn->latency_detect.timer,
1094 		       &hif_latency_detect_timeout_handler,
1095 		       scn,
1096 		       QDF_TIMER_TYPE_SW_SPIN);
1097 }
1098 
1099 static void hif_latency_detect_timer_deinit(struct hif_softc *scn)
1100 {
1101 	hif_info("deinit timer");
1102 	qdf_timer_free(&scn->latency_detect.timer);
1103 }
1104 
1105 static void hif_latency_detect_init(struct hif_softc *scn)
1106 {
1107 	uint32_t tasklet_mask;
1108 	int i;
1109 
1110 	if (QDF_GLOBAL_MISSION_MODE != hif_get_conparam(scn))
1111 		return;
1112 
1113 	tasklet_mask = DETECTION_LATENCY_TASKLET_MASK;
1114 	hif_info("tasklet mask is 0x%x", tasklet_mask);
1115 	for (i = 0; i < HIF_TASKLET_IN_MONITOR; i++) {
1116 		if (BIT(i) & tasklet_mask)
1117 			qdf_set_bit(i, scn->latency_detect.tasklet_bmap);
1118 	}
1119 
1120 	hif_latency_detect_timer_init(scn);
1121 }
1122 
1123 static void hif_latency_detect_deinit(struct hif_softc *scn)
1124 {
1125 	int i;
1126 
1127 	if (QDF_GLOBAL_MISSION_MODE != hif_get_conparam(scn))
1128 		return;
1129 
1130 	hif_latency_detect_timer_deinit(scn);
1131 	for (i = 0; i < HIF_TASKLET_IN_MONITOR; i++)
1132 		qdf_clear_bit(i, scn->latency_detect.tasklet_bmap);
1133 }
1134 
1135 void hif_latency_detect_timer_start(struct hif_opaque_softc *hif_ctx)
1136 {
1137 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1138 
1139 	if (QDF_GLOBAL_MISSION_MODE != hif_get_conparam(scn))
1140 		return;
1141 
1142 	hif_debug_rl("start timer");
1143 	if (scn->latency_detect.is_timer_started) {
1144 		hif_info("timer has been started");
1145 		return;
1146 	}
1147 
1148 	qdf_timer_start(&scn->latency_detect.timer,
1149 			scn->latency_detect.timeout);
1150 	scn->latency_detect.is_timer_started = true;
1151 }
1152 
1153 void hif_latency_detect_timer_stop(struct hif_opaque_softc *hif_ctx)
1154 {
1155 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1156 
1157 	if (QDF_GLOBAL_MISSION_MODE != hif_get_conparam(scn))
1158 		return;
1159 
1160 	hif_debug_rl("stop timer");
1161 
1162 	qdf_timer_sync_cancel(&scn->latency_detect.timer);
1163 	scn->latency_detect.is_timer_started = false;
1164 }
1165 
1166 void hif_latency_detect_credit_record_time(
1167 	enum hif_credit_exchange_type type,
1168 	struct hif_opaque_softc *hif_ctx)
1169 {
1170 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1171 
1172 	if (!scn) {
1173 		hif_err("Could not do runtime put, scn is null");
1174 		return;
1175 	}
1176 
1177 	if (QDF_GLOBAL_MISSION_MODE != hif_get_conparam(scn))
1178 		return;
1179 
1180 	if (HIF_REQUEST_CREDIT == type)
1181 		scn->latency_detect.credit_request_time = qdf_system_ticks();
1182 	else if (HIF_PROCESS_CREDIT_REPORT == type)
1183 		scn->latency_detect.credit_report_time = qdf_system_ticks();
1184 
1185 	hif_check_detection_latency(scn, false, BIT(HIF_DETECT_CREDIT));
1186 }
1187 
1188 void hif_set_enable_detection(struct hif_opaque_softc *hif_ctx, bool value)
1189 {
1190 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1191 
1192 	if (!scn) {
1193 		hif_err("Could not do runtime put, scn is null");
1194 		return;
1195 	}
1196 
1197 	if (QDF_GLOBAL_MISSION_MODE != hif_get_conparam(scn))
1198 		return;
1199 
1200 	scn->latency_detect.enable_detection = value;
1201 }
1202 #else
1203 static inline void hif_latency_detect_init(struct hif_softc *scn)
1204 {}
1205 
1206 static inline void hif_latency_detect_deinit(struct hif_softc *scn)
1207 {}
1208 #endif
1209 
1210 #ifdef WLAN_FEATURE_AFFINITY_MGR
1211 #define AFFINITY_THRESHOLD 5000000
1212 static inline void
1213 hif_affinity_mgr_init(struct hif_softc *scn, struct wlan_objmgr_psoc *psoc)
1214 {
1215 	unsigned int cpus;
1216 	qdf_cpu_mask allowed_mask = {0};
1217 
1218 	scn->affinity_mgr_supported =
1219 		(cfg_get(psoc, CFG_IRQ_AFFINE_AUDIO_USE_CASE) &&
1220 		qdf_walt_get_cpus_taken_supported());
1221 
1222 	hif_info("Affinity Manager supported: %d", scn->affinity_mgr_supported);
1223 
1224 	if (!scn->affinity_mgr_supported)
1225 		return;
1226 
1227 	scn->time_threshold = AFFINITY_THRESHOLD;
1228 	qdf_for_each_possible_cpu(cpus)
1229 		if (qdf_topology_physical_package_id(cpus) ==
1230 			CPU_CLUSTER_TYPE_LITTLE)
1231 			qdf_cpumask_set_cpu(cpus, &allowed_mask);
1232 	qdf_cpumask_copy(&scn->allowed_mask, &allowed_mask);
1233 }
1234 #else
1235 static inline void
1236 hif_affinity_mgr_init(struct hif_softc *scn, struct wlan_objmgr_psoc *psoc)
1237 {
1238 }
1239 #endif
1240 
1241 #ifdef FEATURE_DIRECT_LINK
1242 /**
1243  * hif_init_direct_link_rcv_pipe_num(): Initialize the direct link receive
1244  *  pipe number
1245  * @scn: hif context
1246  *
1247  * Return: None
1248  */
1249 static inline
1250 void hif_init_direct_link_rcv_pipe_num(struct hif_softc *scn)
1251 {
1252 	scn->dl_recv_pipe_num = INVALID_PIPE_NO;
1253 }
1254 #else
1255 static inline
1256 void hif_init_direct_link_rcv_pipe_num(struct hif_softc *scn)
1257 {
1258 }
1259 #endif
1260 
1261 struct hif_opaque_softc *hif_open(qdf_device_t qdf_ctx,
1262 				  uint32_t mode,
1263 				  enum qdf_bus_type bus_type,
1264 				  struct hif_driver_state_callbacks *cbk,
1265 				  struct wlan_objmgr_psoc *psoc)
1266 {
1267 	struct hif_softc *scn;
1268 	QDF_STATUS status = QDF_STATUS_SUCCESS;
1269 	int bus_context_size = hif_bus_get_context_size(bus_type);
1270 
1271 	if (bus_context_size == 0) {
1272 		hif_err("context size 0 not allowed");
1273 		return NULL;
1274 	}
1275 
1276 	scn = (struct hif_softc *)qdf_mem_malloc(bus_context_size);
1277 	if (!scn)
1278 		return GET_HIF_OPAQUE_HDL(scn);
1279 
1280 	scn->qdf_dev = qdf_ctx;
1281 	scn->hif_con_param = mode;
1282 	qdf_atomic_init(&scn->active_tasklet_cnt);
1283 
1284 	qdf_atomic_init(&scn->active_grp_tasklet_cnt);
1285 	qdf_atomic_init(&scn->link_suspended);
1286 	qdf_atomic_init(&scn->tasklet_from_intr);
1287 	hif_system_pm_set_state_on(GET_HIF_OPAQUE_HDL(scn));
1288 	qdf_mem_copy(&scn->callbacks, cbk,
1289 		     sizeof(struct hif_driver_state_callbacks));
1290 	scn->bus_type  = bus_type;
1291 
1292 	hif_allow_ep_vote_access(GET_HIF_OPAQUE_HDL(scn));
1293 	hif_get_cfg_from_psoc(scn, psoc);
1294 
1295 	hif_set_event_hist_mask(GET_HIF_OPAQUE_HDL(scn));
1296 	status = hif_bus_open(scn, bus_type);
1297 	if (status != QDF_STATUS_SUCCESS) {
1298 		hif_err("hif_bus_open error = %d, bus_type = %d",
1299 			status, bus_type);
1300 		qdf_mem_free(scn);
1301 		scn = NULL;
1302 		goto out;
1303 	}
1304 
1305 	hif_rtpm_lock_init(scn);
1306 
1307 	hif_cpuhp_register(scn);
1308 	hif_latency_detect_init(scn);
1309 	hif_affinity_mgr_init(scn, psoc);
1310 	hif_init_direct_link_rcv_pipe_num(scn);
1311 	hif_ce_desc_history_log_register(scn);
1312 	hif_desc_history_log_register();
1313 
1314 out:
1315 	return GET_HIF_OPAQUE_HDL(scn);
1316 }
1317 
1318 #ifdef ADRASTEA_RRI_ON_DDR
1319 /**
1320  * hif_uninit_rri_on_ddr(): free consistent memory allocated for rri
1321  * @scn: hif context
1322  *
1323  * Return: none
1324  */
1325 void hif_uninit_rri_on_ddr(struct hif_softc *scn)
1326 {
1327 	if (scn->vaddr_rri_on_ddr)
1328 		qdf_mem_free_consistent(scn->qdf_dev, scn->qdf_dev->dev,
1329 					RRI_ON_DDR_MEM_SIZE,
1330 					scn->vaddr_rri_on_ddr,
1331 					scn->paddr_rri_on_ddr, 0);
1332 	scn->vaddr_rri_on_ddr = NULL;
1333 }
1334 #endif
1335 
1336 /**
1337  * hif_close(): hif_close
1338  * @hif_ctx: hif_ctx
1339  *
1340  * Return: n/a
1341  */
1342 void hif_close(struct hif_opaque_softc *hif_ctx)
1343 {
1344 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1345 
1346 	if (!scn) {
1347 		hif_err("hif_opaque_softc is NULL");
1348 		return;
1349 	}
1350 
1351 	hif_desc_history_log_unregister();
1352 	hif_ce_desc_history_log_unregister();
1353 	hif_latency_detect_deinit(scn);
1354 
1355 	if (scn->athdiag_procfs_inited) {
1356 		athdiag_procfs_remove();
1357 		scn->athdiag_procfs_inited = false;
1358 	}
1359 
1360 	if (scn->target_info.hw_name) {
1361 		char *hw_name = scn->target_info.hw_name;
1362 
1363 		scn->target_info.hw_name = "ErrUnloading";
1364 		qdf_mem_free(hw_name);
1365 	}
1366 
1367 	hif_uninit_rri_on_ddr(scn);
1368 	hif_cleanup_static_buf_to_target(scn);
1369 	hif_cpuhp_unregister(scn);
1370 	hif_rtpm_lock_deinit(scn);
1371 
1372 	hif_bus_close(scn);
1373 
1374 	qdf_mem_free(scn);
1375 }
1376 
1377 /**
1378  * hif_get_num_active_grp_tasklets() - get the number of active
1379  *		datapath group tasklets pending to be completed.
1380  * @scn: HIF context
1381  *
1382  * Returns: the number of datapath group tasklets which are active
1383  */
1384 static inline int hif_get_num_active_grp_tasklets(struct hif_softc *scn)
1385 {
1386 	return qdf_atomic_read(&scn->active_grp_tasklet_cnt);
1387 }
1388 
1389 #if (defined(QCA_WIFI_QCA8074) || defined(QCA_WIFI_QCA6018) || \
1390 	defined(QCA_WIFI_QCA6290) || defined(QCA_WIFI_QCA6390) || \
1391 	defined(QCA_WIFI_QCN9000) || defined(QCA_WIFI_QCA6490) || \
1392 	defined(QCA_WIFI_QCA6750) || defined(QCA_WIFI_QCA5018) || \
1393 	defined(QCA_WIFI_KIWI) || defined(QCA_WIFI_QCN9224) || \
1394 	defined(QCA_WIFI_QCN6432) || \
1395 	defined(QCA_WIFI_QCA9574)) || defined(QCA_WIFI_QCA5332)
1396 /**
1397  * hif_get_num_pending_work() - get the number of entries in
1398  *		the workqueue pending to be completed.
1399  * @scn: HIF context
1400  *
1401  * Returns: the number of tasklets which are active
1402  */
1403 static inline int hif_get_num_pending_work(struct hif_softc *scn)
1404 {
1405 	return hal_get_reg_write_pending_work(scn->hal_soc);
1406 }
1407 #elif defined(FEATURE_HIF_DELAYED_REG_WRITE)
1408 static inline int hif_get_num_pending_work(struct hif_softc *scn)
1409 {
1410 	return qdf_atomic_read(&scn->active_work_cnt);
1411 }
1412 #else
1413 
1414 static inline int hif_get_num_pending_work(struct hif_softc *scn)
1415 {
1416 	return 0;
1417 }
1418 #endif
1419 
1420 QDF_STATUS hif_try_complete_tasks(struct hif_softc *scn)
1421 {
1422 	uint32_t task_drain_wait_cnt = 0;
1423 	int tasklet = 0, grp_tasklet = 0, work = 0;
1424 
1425 	while ((tasklet = hif_get_num_active_tasklets(scn)) ||
1426 	       (grp_tasklet = hif_get_num_active_grp_tasklets(scn)) ||
1427 	       (work = hif_get_num_pending_work(scn))) {
1428 		if (++task_drain_wait_cnt > HIF_TASK_DRAIN_WAIT_CNT) {
1429 			hif_err("pending tasklets %d grp tasklets %d work %d",
1430 				tasklet, grp_tasklet, work);
1431 			QDF_DEBUG_PANIC("Complete tasks takes more than %u ms: tasklets %d grp tasklets %d work %d",
1432 					HIF_TASK_DRAIN_WAIT_CNT * 10,
1433 					tasklet, grp_tasklet, work);
1434 			return QDF_STATUS_E_FAULT;
1435 		}
1436 		hif_info("waiting for tasklets %d grp tasklets %d work %d",
1437 			 tasklet, grp_tasklet, work);
1438 		msleep(10);
1439 	}
1440 
1441 	return QDF_STATUS_SUCCESS;
1442 }
1443 
1444 #ifdef HIF_HAL_REG_ACCESS_SUPPORT
1445 void hif_reg_window_write(struct hif_softc *scn, uint32_t offset,
1446 			  uint32_t value)
1447 {
1448 	hal_write32_mb(scn->hal_soc, offset, value);
1449 }
1450 
1451 uint32_t hif_reg_window_read(struct hif_softc *scn, uint32_t offset)
1452 {
1453 	return hal_read32_mb(scn->hal_soc, offset);
1454 }
1455 #endif
1456 
1457 #if defined(HIF_IPCI) && defined(FEATURE_HAL_DELAYED_REG_WRITE)
1458 QDF_STATUS hif_try_prevent_ep_vote_access(struct hif_opaque_softc *hif_ctx)
1459 {
1460 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1461 	uint32_t work_drain_wait_cnt = 0;
1462 	uint32_t wait_cnt = 0;
1463 	int work = 0;
1464 
1465 	qdf_atomic_set(&scn->dp_ep_vote_access,
1466 		       HIF_EP_VOTE_ACCESS_DISABLE);
1467 	qdf_atomic_set(&scn->ep_vote_access,
1468 		       HIF_EP_VOTE_ACCESS_DISABLE);
1469 
1470 	while ((work = hif_get_num_pending_work(scn))) {
1471 		if (++work_drain_wait_cnt > HIF_WORK_DRAIN_WAIT_CNT) {
1472 			qdf_atomic_set(&scn->dp_ep_vote_access,
1473 				       HIF_EP_VOTE_ACCESS_ENABLE);
1474 			qdf_atomic_set(&scn->ep_vote_access,
1475 				       HIF_EP_VOTE_ACCESS_ENABLE);
1476 			hif_err("timeout wait for pending work %d ", work);
1477 			return QDF_STATUS_E_FAULT;
1478 		}
1479 		qdf_sleep(10);
1480 	}
1481 
1482 	if (pld_is_pci_ep_awake(scn->qdf_dev->dev) == -ENOTSUPP)
1483 	return QDF_STATUS_SUCCESS;
1484 
1485 	while (pld_is_pci_ep_awake(scn->qdf_dev->dev)) {
1486 		if (++wait_cnt > HIF_EP_WAKE_RESET_WAIT_CNT) {
1487 			hif_err("Release EP vote is not proceed by Fw");
1488 			return QDF_STATUS_E_FAULT;
1489 		}
1490 		qdf_sleep(5);
1491 	}
1492 
1493 	return QDF_STATUS_SUCCESS;
1494 }
1495 
1496 void hif_set_ep_intermediate_vote_access(struct hif_opaque_softc *hif_ctx)
1497 {
1498 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1499 	uint8_t vote_access;
1500 
1501 	vote_access = qdf_atomic_read(&scn->ep_vote_access);
1502 
1503 	if (vote_access != HIF_EP_VOTE_ACCESS_DISABLE)
1504 		hif_info("EP vote changed from:%u to intermediate state",
1505 			 vote_access);
1506 
1507 	if (QDF_IS_STATUS_ERROR(hif_try_prevent_ep_vote_access(hif_ctx)))
1508 		QDF_BUG(0);
1509 
1510 	qdf_atomic_set(&scn->ep_vote_access,
1511 		       HIF_EP_VOTE_INTERMEDIATE_ACCESS);
1512 }
1513 
1514 void hif_allow_ep_vote_access(struct hif_opaque_softc *hif_ctx)
1515 {
1516 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1517 
1518 	qdf_atomic_set(&scn->dp_ep_vote_access,
1519 		       HIF_EP_VOTE_ACCESS_ENABLE);
1520 	qdf_atomic_set(&scn->ep_vote_access,
1521 		       HIF_EP_VOTE_ACCESS_ENABLE);
1522 }
1523 
1524 void hif_set_ep_vote_access(struct hif_opaque_softc *hif_ctx,
1525 			    uint8_t type, uint8_t access)
1526 {
1527 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1528 
1529 	if (type == HIF_EP_VOTE_DP_ACCESS)
1530 		qdf_atomic_set(&scn->dp_ep_vote_access, access);
1531 	else
1532 		qdf_atomic_set(&scn->ep_vote_access, access);
1533 }
1534 
1535 uint8_t hif_get_ep_vote_access(struct hif_opaque_softc *hif_ctx,
1536 			       uint8_t type)
1537 {
1538 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1539 
1540 	if (type == HIF_EP_VOTE_DP_ACCESS)
1541 		return qdf_atomic_read(&scn->dp_ep_vote_access);
1542 	else
1543 		return qdf_atomic_read(&scn->ep_vote_access);
1544 }
1545 #endif
1546 
1547 #ifdef FEATURE_HIF_DELAYED_REG_WRITE
1548 #ifdef MEMORY_DEBUG
1549 #define HIF_REG_WRITE_QUEUE_LEN 128
1550 #else
1551 #define HIF_REG_WRITE_QUEUE_LEN 32
1552 #endif
1553 
1554 /**
1555  * hif_print_reg_write_stats() - Print hif delayed reg write stats
1556  * @hif_ctx: hif opaque handle
1557  *
1558  * Return: None
1559  */
1560 void hif_print_reg_write_stats(struct hif_opaque_softc *hif_ctx)
1561 {
1562 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1563 	struct CE_state *ce_state;
1564 	uint32_t *hist;
1565 	int i;
1566 
1567 	hist = scn->wstats.sched_delay;
1568 	hif_debug("wstats: enq %u deq %u coal %u direct %u q_depth %u max_q %u sched-delay hist %u %u %u %u",
1569 		  qdf_atomic_read(&scn->wstats.enqueues),
1570 		  scn->wstats.dequeues,
1571 		  qdf_atomic_read(&scn->wstats.coalesces),
1572 		  qdf_atomic_read(&scn->wstats.direct),
1573 		  qdf_atomic_read(&scn->wstats.q_depth),
1574 		  scn->wstats.max_q_depth,
1575 		  hist[HIF_REG_WRITE_SCHED_DELAY_SUB_100us],
1576 		  hist[HIF_REG_WRITE_SCHED_DELAY_SUB_1000us],
1577 		  hist[HIF_REG_WRITE_SCHED_DELAY_SUB_5000us],
1578 		  hist[HIF_REG_WRITE_SCHED_DELAY_GT_5000us]);
1579 
1580 	for (i = 0; i < scn->ce_count; i++) {
1581 		ce_state = scn->ce_id_to_state[i];
1582 		if (!ce_state)
1583 			continue;
1584 
1585 		hif_debug("ce%d: enq %u deq %u coal %u direct %u",
1586 			  i, ce_state->wstats.enqueues,
1587 			  ce_state->wstats.dequeues,
1588 			  ce_state->wstats.coalesces,
1589 			  ce_state->wstats.direct);
1590 	}
1591 }
1592 
1593 /**
1594  * hif_is_reg_write_tput_level_high() - throughput level for delayed reg writes
1595  * @scn: hif_softc pointer
1596  *
1597  * Return: true if throughput is high, else false.
1598  */
1599 static inline bool hif_is_reg_write_tput_level_high(struct hif_softc *scn)
1600 {
1601 	int bw_level = hif_get_bandwidth_level(GET_HIF_OPAQUE_HDL(scn));
1602 
1603 	return (bw_level >= PLD_BUS_WIDTH_MEDIUM) ? true : false;
1604 }
1605 
1606 /**
1607  * hif_reg_write_fill_sched_delay_hist() - fill reg write delay histogram
1608  * @scn: hif_softc pointer
1609  * @delay_us: delay in us
1610  *
1611  * Return: None
1612  */
1613 static inline void hif_reg_write_fill_sched_delay_hist(struct hif_softc *scn,
1614 						       uint64_t delay_us)
1615 {
1616 	uint32_t *hist;
1617 
1618 	hist = scn->wstats.sched_delay;
1619 
1620 	if (delay_us < 100)
1621 		hist[HIF_REG_WRITE_SCHED_DELAY_SUB_100us]++;
1622 	else if (delay_us < 1000)
1623 		hist[HIF_REG_WRITE_SCHED_DELAY_SUB_1000us]++;
1624 	else if (delay_us < 5000)
1625 		hist[HIF_REG_WRITE_SCHED_DELAY_SUB_5000us]++;
1626 	else
1627 		hist[HIF_REG_WRITE_SCHED_DELAY_GT_5000us]++;
1628 }
1629 
1630 /**
1631  * hif_process_reg_write_q_elem() - process a register write queue element
1632  * @scn: hif_softc pointer
1633  * @q_elem: pointer to hal register write queue element
1634  *
1635  * Return: The value which was written to the address
1636  */
1637 static int32_t
1638 hif_process_reg_write_q_elem(struct hif_softc *scn,
1639 			     struct hif_reg_write_q_elem *q_elem)
1640 {
1641 	struct CE_state *ce_state = q_elem->ce_state;
1642 	uint32_t write_val = -1;
1643 
1644 	qdf_spin_lock_bh(&ce_state->ce_index_lock);
1645 
1646 	ce_state->reg_write_in_progress = false;
1647 	ce_state->wstats.dequeues++;
1648 
1649 	if (ce_state->src_ring) {
1650 		q_elem->dequeue_val = ce_state->src_ring->write_index;
1651 		hal_write32_mb(scn->hal_soc, ce_state->ce_wrt_idx_offset,
1652 			       ce_state->src_ring->write_index);
1653 		write_val = ce_state->src_ring->write_index;
1654 	} else if (ce_state->dest_ring) {
1655 		q_elem->dequeue_val = ce_state->dest_ring->write_index;
1656 		hal_write32_mb(scn->hal_soc, ce_state->ce_wrt_idx_offset,
1657 			       ce_state->dest_ring->write_index);
1658 		write_val = ce_state->dest_ring->write_index;
1659 	} else {
1660 		hif_debug("invalid reg write received");
1661 		qdf_assert(0);
1662 	}
1663 
1664 	q_elem->valid = 0;
1665 	ce_state->last_dequeue_time = q_elem->dequeue_time;
1666 
1667 	qdf_spin_unlock_bh(&ce_state->ce_index_lock);
1668 
1669 	return write_val;
1670 }
1671 
1672 /**
1673  * hif_reg_write_work() - Worker to process delayed writes
1674  * @arg: hif_softc pointer
1675  *
1676  * Return: None
1677  */
1678 static void hif_reg_write_work(void *arg)
1679 {
1680 	struct hif_softc *scn = arg;
1681 	struct hif_reg_write_q_elem *q_elem;
1682 	uint32_t offset;
1683 	uint64_t delta_us;
1684 	int32_t q_depth, write_val;
1685 	uint32_t num_processed = 0;
1686 	int32_t ring_id;
1687 
1688 	q_elem = &scn->reg_write_queue[scn->read_idx];
1689 	q_elem->work_scheduled_time = qdf_get_log_timestamp();
1690 	q_elem->cpu_id = qdf_get_cpu();
1691 
1692 	/* Make sure q_elem consistent in the memory for multi-cores */
1693 	qdf_rmb();
1694 	if (!q_elem->valid)
1695 		return;
1696 
1697 	q_depth = qdf_atomic_read(&scn->wstats.q_depth);
1698 	if (q_depth > scn->wstats.max_q_depth)
1699 		scn->wstats.max_q_depth =  q_depth;
1700 
1701 	if (hif_prevent_link_low_power_states(GET_HIF_OPAQUE_HDL(scn))) {
1702 		scn->wstats.prevent_l1_fails++;
1703 		return;
1704 	}
1705 
1706 	while (true) {
1707 		qdf_rmb();
1708 		if (!q_elem->valid)
1709 			break;
1710 
1711 		qdf_rmb();
1712 		q_elem->dequeue_time = qdf_get_log_timestamp();
1713 		ring_id = q_elem->ce_state->id;
1714 		offset = q_elem->offset;
1715 		delta_us = qdf_log_timestamp_to_usecs(q_elem->dequeue_time -
1716 						      q_elem->enqueue_time);
1717 		hif_reg_write_fill_sched_delay_hist(scn, delta_us);
1718 
1719 		scn->wstats.dequeues++;
1720 		qdf_atomic_dec(&scn->wstats.q_depth);
1721 
1722 		write_val = hif_process_reg_write_q_elem(scn, q_elem);
1723 		hif_debug("read_idx %u ce_id %d offset 0x%x dequeue_val %d",
1724 			  scn->read_idx, ring_id, offset, write_val);
1725 
1726 		qdf_trace_dp_del_reg_write(ring_id, q_elem->enqueue_val,
1727 					   q_elem->dequeue_val,
1728 					   q_elem->enqueue_time,
1729 					   q_elem->dequeue_time);
1730 		num_processed++;
1731 		scn->read_idx = (scn->read_idx + 1) &
1732 					(HIF_REG_WRITE_QUEUE_LEN - 1);
1733 		q_elem = &scn->reg_write_queue[scn->read_idx];
1734 	}
1735 
1736 	hif_allow_link_low_power_states(GET_HIF_OPAQUE_HDL(scn));
1737 
1738 	/*
1739 	 * Decrement active_work_cnt by the number of elements dequeued after
1740 	 * hif_allow_link_low_power_states.
1741 	 * This makes sure that hif_try_complete_tasks will wait till we make
1742 	 * the bus access in hif_allow_link_low_power_states. This will avoid
1743 	 * race condition between delayed register worker and bus suspend
1744 	 * (system suspend or runtime suspend).
1745 	 *
1746 	 * The following decrement should be done at the end!
1747 	 */
1748 	qdf_atomic_sub(num_processed, &scn->active_work_cnt);
1749 }
1750 
1751 /**
1752  * hif_delayed_reg_write_deinit() - De-Initialize delayed reg write processing
1753  * @scn: hif_softc pointer
1754  *
1755  * De-initialize main data structures to process register writes in a delayed
1756  * workqueue.
1757  *
1758  * Return: None
1759  */
1760 static void hif_delayed_reg_write_deinit(struct hif_softc *scn)
1761 {
1762 	qdf_flush_work(&scn->reg_write_work);
1763 	qdf_disable_work(&scn->reg_write_work);
1764 	qdf_flush_workqueue(0, scn->reg_write_wq);
1765 	qdf_destroy_workqueue(0, scn->reg_write_wq);
1766 	qdf_mem_free(scn->reg_write_queue);
1767 }
1768 
1769 /**
1770  * hif_delayed_reg_write_init() - Initialization function for delayed reg writes
1771  * @scn: hif_softc pointer
1772  *
1773  * Initialize main data structures to process register writes in a delayed
1774  * workqueue.
1775  */
1776 
1777 static QDF_STATUS hif_delayed_reg_write_init(struct hif_softc *scn)
1778 {
1779 	qdf_atomic_init(&scn->active_work_cnt);
1780 	scn->reg_write_wq =
1781 		qdf_alloc_high_prior_ordered_workqueue("hif_register_write_wq");
1782 	qdf_create_work(0, &scn->reg_write_work, hif_reg_write_work, scn);
1783 	scn->reg_write_queue = qdf_mem_malloc(HIF_REG_WRITE_QUEUE_LEN *
1784 					      sizeof(*scn->reg_write_queue));
1785 	if (!scn->reg_write_queue) {
1786 		hif_err("unable to allocate memory for delayed reg write");
1787 		QDF_BUG(0);
1788 		return QDF_STATUS_E_NOMEM;
1789 	}
1790 
1791 	/* Initial value of indices */
1792 	scn->read_idx = 0;
1793 	qdf_atomic_set(&scn->write_idx, -1);
1794 
1795 	return QDF_STATUS_SUCCESS;
1796 }
1797 
1798 static void hif_reg_write_enqueue(struct hif_softc *scn,
1799 				  struct CE_state *ce_state,
1800 				  uint32_t value)
1801 {
1802 	struct hif_reg_write_q_elem *q_elem;
1803 	uint32_t write_idx;
1804 
1805 	if (ce_state->reg_write_in_progress) {
1806 		hif_debug("Already in progress ce_id %d offset 0x%x value %u",
1807 			  ce_state->id, ce_state->ce_wrt_idx_offset, value);
1808 		qdf_atomic_inc(&scn->wstats.coalesces);
1809 		ce_state->wstats.coalesces++;
1810 		return;
1811 	}
1812 
1813 	write_idx = qdf_atomic_inc_return(&scn->write_idx);
1814 	write_idx = write_idx & (HIF_REG_WRITE_QUEUE_LEN - 1);
1815 
1816 	q_elem = &scn->reg_write_queue[write_idx];
1817 	if (q_elem->valid) {
1818 		hif_err("queue full");
1819 		QDF_BUG(0);
1820 		return;
1821 	}
1822 
1823 	qdf_atomic_inc(&scn->wstats.enqueues);
1824 	ce_state->wstats.enqueues++;
1825 
1826 	qdf_atomic_inc(&scn->wstats.q_depth);
1827 
1828 	q_elem->ce_state = ce_state;
1829 	q_elem->offset = ce_state->ce_wrt_idx_offset;
1830 	q_elem->enqueue_val = value;
1831 	q_elem->enqueue_time = qdf_get_log_timestamp();
1832 
1833 	/*
1834 	 * Before the valid flag is set to true, all the other
1835 	 * fields in the q_elem needs to be updated in memory.
1836 	 * Else there is a chance that the dequeuing worker thread
1837 	 * might read stale entries and process incorrect srng.
1838 	 */
1839 	qdf_wmb();
1840 	q_elem->valid = true;
1841 
1842 	/*
1843 	 * After all other fields in the q_elem has been updated
1844 	 * in memory successfully, the valid flag needs to be updated
1845 	 * in memory in time too.
1846 	 * Else there is a chance that the dequeuing worker thread
1847 	 * might read stale valid flag and the work will be bypassed
1848 	 * for this round. And if there is no other work scheduled
1849 	 * later, this hal register writing won't be updated any more.
1850 	 */
1851 	qdf_wmb();
1852 
1853 	ce_state->reg_write_in_progress  = true;
1854 	qdf_atomic_inc(&scn->active_work_cnt);
1855 
1856 	hif_debug("write_idx %u ce_id %d offset 0x%x value %u",
1857 		  write_idx, ce_state->id, ce_state->ce_wrt_idx_offset, value);
1858 
1859 	qdf_queue_work(scn->qdf_dev, scn->reg_write_wq,
1860 		       &scn->reg_write_work);
1861 }
1862 
1863 void hif_delayed_reg_write(struct hif_softc *scn, uint32_t ctrl_addr,
1864 			   uint32_t val)
1865 {
1866 	struct CE_state *ce_state;
1867 	int ce_id = COPY_ENGINE_ID(ctrl_addr);
1868 
1869 	ce_state = scn->ce_id_to_state[ce_id];
1870 
1871 	if (!ce_state->htt_tx_data && !ce_state->htt_rx_data) {
1872 		hif_reg_write_enqueue(scn, ce_state, val);
1873 		return;
1874 	}
1875 
1876 	if (hif_is_reg_write_tput_level_high(scn) ||
1877 	    (PLD_MHI_STATE_L0 == pld_get_mhi_state(scn->qdf_dev->dev))) {
1878 		hal_write32_mb(scn->hal_soc, ce_state->ce_wrt_idx_offset, val);
1879 		qdf_atomic_inc(&scn->wstats.direct);
1880 		ce_state->wstats.direct++;
1881 	} else {
1882 		hif_reg_write_enqueue(scn, ce_state, val);
1883 	}
1884 }
1885 #else
1886 static inline QDF_STATUS hif_delayed_reg_write_init(struct hif_softc *scn)
1887 {
1888 	return QDF_STATUS_SUCCESS;
1889 }
1890 
1891 static inline void  hif_delayed_reg_write_deinit(struct hif_softc *scn)
1892 {
1893 }
1894 #endif
1895 
1896 #if defined(QCA_WIFI_WCN6450)
1897 static QDF_STATUS hif_hal_attach(struct hif_softc *scn)
1898 {
1899 	scn->hal_soc = hal_attach(hif_softc_to_hif_opaque_softc(scn),
1900 				  scn->qdf_dev);
1901 	if (!scn->hal_soc)
1902 		return QDF_STATUS_E_FAILURE;
1903 
1904 	return QDF_STATUS_SUCCESS;
1905 }
1906 
1907 static QDF_STATUS hif_hal_detach(struct hif_softc *scn)
1908 {
1909 	hal_detach(scn->hal_soc);
1910 	scn->hal_soc = NULL;
1911 
1912 	return QDF_STATUS_SUCCESS;
1913 }
1914 #elif (defined(QCA_WIFI_QCA8074) || defined(QCA_WIFI_QCA6018) || \
1915 	defined(QCA_WIFI_QCA6290) || defined(QCA_WIFI_QCA6390) || \
1916 	defined(QCA_WIFI_QCN9000) || defined(QCA_WIFI_QCA6490) || \
1917 	defined(QCA_WIFI_QCA6750) || defined(QCA_WIFI_QCA5018) || \
1918 	defined(QCA_WIFI_KIWI) || defined(QCA_WIFI_QCN9224) || \
1919 	defined(QCA_WIFI_QCA9574)) || defined(QCA_WIFI_QCA5332)
1920 static QDF_STATUS hif_hal_attach(struct hif_softc *scn)
1921 {
1922 	if (ce_srng_based(scn)) {
1923 		scn->hal_soc = hal_attach(
1924 					hif_softc_to_hif_opaque_softc(scn),
1925 					scn->qdf_dev);
1926 		if (!scn->hal_soc)
1927 			return QDF_STATUS_E_FAILURE;
1928 	}
1929 
1930 	return QDF_STATUS_SUCCESS;
1931 }
1932 
1933 static QDF_STATUS hif_hal_detach(struct hif_softc *scn)
1934 {
1935 	if (ce_srng_based(scn)) {
1936 		hal_detach(scn->hal_soc);
1937 		scn->hal_soc = NULL;
1938 	}
1939 
1940 	return QDF_STATUS_SUCCESS;
1941 }
1942 #else
1943 static QDF_STATUS hif_hal_attach(struct hif_softc *scn)
1944 {
1945 	return QDF_STATUS_SUCCESS;
1946 }
1947 
1948 static QDF_STATUS hif_hal_detach(struct hif_softc *scn)
1949 {
1950 	return QDF_STATUS_SUCCESS;
1951 }
1952 #endif
1953 
1954 int hif_init_dma_mask(struct device *dev, enum qdf_bus_type bus_type)
1955 {
1956 	int ret;
1957 
1958 	switch (bus_type) {
1959 	case QDF_BUS_TYPE_IPCI:
1960 		ret = qdf_set_dma_coherent_mask(dev,
1961 						DMA_COHERENT_MASK_DEFAULT);
1962 		if (ret) {
1963 			hif_err("Failed to set dma mask error = %d", ret);
1964 			return ret;
1965 		}
1966 
1967 		break;
1968 	default:
1969 		/* Follow the existing sequence for other targets */
1970 		break;
1971 	}
1972 
1973 	return 0;
1974 }
1975 
1976 /**
1977  * hif_enable(): hif_enable
1978  * @hif_ctx: hif_ctx
1979  * @dev: dev
1980  * @bdev: bus dev
1981  * @bid: bus ID
1982  * @bus_type: bus type
1983  * @type: enable type
1984  *
1985  * Return: QDF_STATUS
1986  */
1987 QDF_STATUS hif_enable(struct hif_opaque_softc *hif_ctx, struct device *dev,
1988 					  void *bdev,
1989 					  const struct hif_bus_id *bid,
1990 					  enum qdf_bus_type bus_type,
1991 					  enum hif_enable_type type)
1992 {
1993 	QDF_STATUS status;
1994 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1995 
1996 	if (!scn) {
1997 		hif_err("hif_ctx = NULL");
1998 		return QDF_STATUS_E_NULL_VALUE;
1999 	}
2000 
2001 	status = hif_enable_bus(scn, dev, bdev, bid, type);
2002 	if (status != QDF_STATUS_SUCCESS) {
2003 		hif_err("hif_enable_bus error = %d", status);
2004 		return status;
2005 	}
2006 
2007 	status = hif_hal_attach(scn);
2008 	if (status != QDF_STATUS_SUCCESS) {
2009 		hif_err("hal attach failed");
2010 		goto disable_bus;
2011 	}
2012 
2013 	if (hif_delayed_reg_write_init(scn) != QDF_STATUS_SUCCESS) {
2014 		hif_err("unable to initialize delayed reg write");
2015 		goto hal_detach;
2016 	}
2017 
2018 	if (hif_bus_configure(scn)) {
2019 		hif_err("Target probe failed");
2020 		status = QDF_STATUS_E_FAILURE;
2021 		goto hal_detach;
2022 	}
2023 
2024 	hif_ut_suspend_init(scn);
2025 	hif_register_recovery_notifier(scn);
2026 	hif_latency_detect_timer_start(hif_ctx);
2027 
2028 	/*
2029 	 * Flag to avoid potential unallocated memory access from MSI
2030 	 * interrupt handler which could get scheduled as soon as MSI
2031 	 * is enabled, i.e to take care of the race due to the order
2032 	 * in where MSI is enabled before the memory, that will be
2033 	 * in interrupt handlers, is allocated.
2034 	 */
2035 
2036 	scn->hif_init_done = true;
2037 
2038 	hif_debug("OK");
2039 
2040 	return QDF_STATUS_SUCCESS;
2041 
2042 hal_detach:
2043 	hif_hal_detach(scn);
2044 disable_bus:
2045 	hif_disable_bus(scn);
2046 	return status;
2047 }
2048 
2049 void hif_disable(struct hif_opaque_softc *hif_ctx, enum hif_disable_type type)
2050 {
2051 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2052 
2053 	if (!scn)
2054 		return;
2055 
2056 	hif_delayed_reg_write_deinit(scn);
2057 	hif_set_enable_detection(hif_ctx, false);
2058 	hif_latency_detect_timer_stop(hif_ctx);
2059 
2060 	hif_unregister_recovery_notifier(scn);
2061 
2062 	hif_nointrs(scn);
2063 	if (scn->hif_init_done == false)
2064 		hif_shutdown_device(hif_ctx);
2065 	else
2066 		hif_stop(hif_ctx);
2067 
2068 	hif_hal_detach(scn);
2069 
2070 	hif_disable_bus(scn);
2071 
2072 	hif_wlan_disable(scn);
2073 
2074 	scn->notice_send = false;
2075 
2076 	hif_debug("X");
2077 }
2078 
2079 #ifdef CE_TASKLET_DEBUG_ENABLE
2080 void hif_enable_ce_latency_stats(struct hif_opaque_softc *hif_ctx, uint8_t val)
2081 {
2082 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2083 
2084 	if (!scn)
2085 		return;
2086 
2087 	scn->ce_latency_stats = val;
2088 }
2089 #endif
2090 
2091 void hif_display_stats(struct hif_opaque_softc *hif_ctx)
2092 {
2093 	hif_display_bus_stats(hif_ctx);
2094 }
2095 
2096 qdf_export_symbol(hif_display_stats);
2097 
2098 void hif_clear_stats(struct hif_opaque_softc *hif_ctx)
2099 {
2100 	hif_clear_bus_stats(hif_ctx);
2101 }
2102 
2103 /**
2104  * hif_crash_shutdown_dump_bus_register() - dump bus registers
2105  * @hif_ctx: hif_ctx
2106  *
2107  * Return: n/a
2108  */
2109 #if defined(TARGET_RAMDUMP_AFTER_KERNEL_PANIC) && defined(WLAN_FEATURE_BMI)
2110 
2111 static void hif_crash_shutdown_dump_bus_register(void *hif_ctx)
2112 {
2113 	struct hif_opaque_softc *scn = hif_ctx;
2114 
2115 	if (hif_check_soc_status(scn))
2116 		return;
2117 
2118 	if (hif_dump_registers(scn))
2119 		hif_err("Failed to dump bus registers!");
2120 }
2121 
2122 /**
2123  * hif_crash_shutdown(): hif_crash_shutdown
2124  *
2125  * This function is called by the platform driver to dump CE registers
2126  *
2127  * @hif_ctx: hif_ctx
2128  *
2129  * Return: n/a
2130  */
2131 void hif_crash_shutdown(struct hif_opaque_softc *hif_ctx)
2132 {
2133 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2134 
2135 	if (!hif_ctx)
2136 		return;
2137 
2138 	if (scn->bus_type == QDF_BUS_TYPE_SNOC) {
2139 		hif_warn("RAM dump disabled for bustype %d", scn->bus_type);
2140 		return;
2141 	}
2142 
2143 	if (TARGET_STATUS_RESET == scn->target_status) {
2144 		hif_warn("Target is already asserted, ignore!");
2145 		return;
2146 	}
2147 
2148 	if (hif_is_load_or_unload_in_progress(scn)) {
2149 		hif_err("Load/unload is in progress, ignore!");
2150 		return;
2151 	}
2152 
2153 	hif_crash_shutdown_dump_bus_register(hif_ctx);
2154 	hif_set_target_status(hif_ctx, TARGET_STATUS_RESET);
2155 
2156 	if (ol_copy_ramdump(hif_ctx))
2157 		goto out;
2158 
2159 	hif_info("RAM dump collecting completed!");
2160 
2161 out:
2162 	return;
2163 }
2164 #else
2165 void hif_crash_shutdown(struct hif_opaque_softc *hif_ctx)
2166 {
2167 	hif_debug("Collecting target RAM dump disabled");
2168 }
2169 #endif /* TARGET_RAMDUMP_AFTER_KERNEL_PANIC */
2170 
2171 #ifdef QCA_WIFI_3_0
2172 /**
2173  * hif_check_fw_reg(): hif_check_fw_reg
2174  * @scn: scn
2175  *
2176  * Return: int
2177  */
2178 int hif_check_fw_reg(struct hif_opaque_softc *scn)
2179 {
2180 	return 0;
2181 }
2182 #endif
2183 
2184 /**
2185  * hif_read_phy_mem_base(): hif_read_phy_mem_base
2186  * @scn: scn
2187  * @phy_mem_base: physical mem base
2188  *
2189  * Return: n/a
2190  */
2191 void hif_read_phy_mem_base(struct hif_softc *scn, qdf_dma_addr_t *phy_mem_base)
2192 {
2193 	*phy_mem_base = scn->mem_pa;
2194 }
2195 qdf_export_symbol(hif_read_phy_mem_base);
2196 
2197 /**
2198  * hif_get_device_type(): hif_get_device_type
2199  * @device_id: device_id
2200  * @revision_id: revision_id
2201  * @hif_type: returned hif_type
2202  * @target_type: returned target_type
2203  *
2204  * Return: int
2205  */
2206 int hif_get_device_type(uint32_t device_id,
2207 			uint32_t revision_id,
2208 			uint32_t *hif_type, uint32_t *target_type)
2209 {
2210 	int ret = 0;
2211 
2212 	switch (device_id) {
2213 	case ADRASTEA_DEVICE_ID_P2_E12:
2214 
2215 		*hif_type = HIF_TYPE_ADRASTEA;
2216 		*target_type = TARGET_TYPE_ADRASTEA;
2217 		break;
2218 
2219 	case AR9888_DEVICE_ID:
2220 		*hif_type = HIF_TYPE_AR9888;
2221 		*target_type = TARGET_TYPE_AR9888;
2222 		break;
2223 
2224 	case AR6320_DEVICE_ID:
2225 		switch (revision_id) {
2226 		case AR6320_FW_1_1:
2227 		case AR6320_FW_1_3:
2228 			*hif_type = HIF_TYPE_AR6320;
2229 			*target_type = TARGET_TYPE_AR6320;
2230 			break;
2231 
2232 		case AR6320_FW_2_0:
2233 		case AR6320_FW_3_0:
2234 		case AR6320_FW_3_2:
2235 			*hif_type = HIF_TYPE_AR6320V2;
2236 			*target_type = TARGET_TYPE_AR6320V2;
2237 			break;
2238 
2239 		default:
2240 			hif_err("dev_id = 0x%x, rev_id = 0x%x",
2241 				device_id, revision_id);
2242 			ret = -ENODEV;
2243 			goto end;
2244 		}
2245 		break;
2246 
2247 	case AR9887_DEVICE_ID:
2248 		*hif_type = HIF_TYPE_AR9888;
2249 		*target_type = TARGET_TYPE_AR9888;
2250 		hif_info(" *********** AR9887 **************");
2251 		break;
2252 
2253 	case QCA9984_DEVICE_ID:
2254 		*hif_type = HIF_TYPE_QCA9984;
2255 		*target_type = TARGET_TYPE_QCA9984;
2256 		hif_info(" *********** QCA9984 *************");
2257 		break;
2258 
2259 	case QCA9888_DEVICE_ID:
2260 		*hif_type = HIF_TYPE_QCA9888;
2261 		*target_type = TARGET_TYPE_QCA9888;
2262 		hif_info(" *********** QCA9888 *************");
2263 		break;
2264 
2265 	case AR900B_DEVICE_ID:
2266 		*hif_type = HIF_TYPE_AR900B;
2267 		*target_type = TARGET_TYPE_AR900B;
2268 		hif_info(" *********** AR900B *************");
2269 		break;
2270 
2271 	case QCA8074_DEVICE_ID:
2272 		*hif_type = HIF_TYPE_QCA8074;
2273 		*target_type = TARGET_TYPE_QCA8074;
2274 		hif_info(" *********** QCA8074  *************");
2275 		break;
2276 
2277 	case QCA6290_EMULATION_DEVICE_ID:
2278 	case QCA6290_DEVICE_ID:
2279 		*hif_type = HIF_TYPE_QCA6290;
2280 		*target_type = TARGET_TYPE_QCA6290;
2281 		hif_info(" *********** QCA6290EMU *************");
2282 		break;
2283 
2284 	case QCN9000_DEVICE_ID:
2285 		*hif_type = HIF_TYPE_QCN9000;
2286 		*target_type = TARGET_TYPE_QCN9000;
2287 		hif_info(" *********** QCN9000 *************");
2288 		break;
2289 
2290 	case QCN9224_DEVICE_ID:
2291 		*hif_type = HIF_TYPE_QCN9224;
2292 		*target_type = TARGET_TYPE_QCN9224;
2293 		hif_info(" *********** QCN9224 *************");
2294 		break;
2295 
2296 	case QCN6122_DEVICE_ID:
2297 		*hif_type = HIF_TYPE_QCN6122;
2298 		*target_type = TARGET_TYPE_QCN6122;
2299 		hif_info(" *********** QCN6122 *************");
2300 		break;
2301 
2302 	case QCN9160_DEVICE_ID:
2303 		*hif_type = HIF_TYPE_QCN9160;
2304 		*target_type = TARGET_TYPE_QCN9160;
2305 		hif_info(" *********** QCN9160 *************");
2306 		break;
2307 
2308 	case QCN6432_DEVICE_ID:
2309 		*hif_type = HIF_TYPE_QCN6432;
2310 		*target_type = TARGET_TYPE_QCN6432;
2311 		hif_info(" *********** QCN6432 *************");
2312 		break;
2313 
2314 	case QCN7605_DEVICE_ID:
2315 	case QCN7605_COMPOSITE:
2316 	case QCN7605_STANDALONE:
2317 	case QCN7605_STANDALONE_V2:
2318 	case QCN7605_COMPOSITE_V2:
2319 		*hif_type = HIF_TYPE_QCN7605;
2320 		*target_type = TARGET_TYPE_QCN7605;
2321 		hif_info(" *********** QCN7605 *************");
2322 		break;
2323 
2324 	case QCA6390_DEVICE_ID:
2325 	case QCA6390_EMULATION_DEVICE_ID:
2326 		*hif_type = HIF_TYPE_QCA6390;
2327 		*target_type = TARGET_TYPE_QCA6390;
2328 		hif_info(" *********** QCA6390 *************");
2329 		break;
2330 
2331 	case QCA6490_DEVICE_ID:
2332 	case QCA6490_EMULATION_DEVICE_ID:
2333 		*hif_type = HIF_TYPE_QCA6490;
2334 		*target_type = TARGET_TYPE_QCA6490;
2335 		hif_info(" *********** QCA6490 *************");
2336 		break;
2337 
2338 	case QCA6750_DEVICE_ID:
2339 	case QCA6750_EMULATION_DEVICE_ID:
2340 		*hif_type = HIF_TYPE_QCA6750;
2341 		*target_type = TARGET_TYPE_QCA6750;
2342 		hif_info(" *********** QCA6750 *************");
2343 		break;
2344 
2345 	case KIWI_DEVICE_ID:
2346 		*hif_type = HIF_TYPE_KIWI;
2347 		*target_type = TARGET_TYPE_KIWI;
2348 		hif_info(" *********** KIWI *************");
2349 		break;
2350 
2351 	case MANGO_DEVICE_ID:
2352 		*hif_type = HIF_TYPE_MANGO;
2353 		*target_type = TARGET_TYPE_MANGO;
2354 		hif_info(" *********** MANGO *************");
2355 		break;
2356 
2357 	case PEACH_DEVICE_ID:
2358 		*hif_type = HIF_TYPE_PEACH;
2359 		*target_type = TARGET_TYPE_PEACH;
2360 		hif_info(" *********** PEACH *************");
2361 		break;
2362 
2363 	case QCA8074V2_DEVICE_ID:
2364 		*hif_type = HIF_TYPE_QCA8074V2;
2365 		*target_type = TARGET_TYPE_QCA8074V2;
2366 		hif_info(" *********** QCA8074V2 *************");
2367 		break;
2368 
2369 	case QCA6018_DEVICE_ID:
2370 	case RUMIM2M_DEVICE_ID_NODE0:
2371 	case RUMIM2M_DEVICE_ID_NODE1:
2372 	case RUMIM2M_DEVICE_ID_NODE2:
2373 	case RUMIM2M_DEVICE_ID_NODE3:
2374 	case RUMIM2M_DEVICE_ID_NODE4:
2375 	case RUMIM2M_DEVICE_ID_NODE5:
2376 		*hif_type = HIF_TYPE_QCA6018;
2377 		*target_type = TARGET_TYPE_QCA6018;
2378 		hif_info(" *********** QCA6018 *************");
2379 		break;
2380 
2381 	case QCA5018_DEVICE_ID:
2382 		*hif_type = HIF_TYPE_QCA5018;
2383 		*target_type = TARGET_TYPE_QCA5018;
2384 		hif_info(" *********** qca5018 *************");
2385 		break;
2386 
2387 	case QCA5332_DEVICE_ID:
2388 		*hif_type = HIF_TYPE_QCA5332;
2389 		*target_type = TARGET_TYPE_QCA5332;
2390 		hif_info(" *********** QCA5332 *************");
2391 		break;
2392 
2393 	case QCA9574_DEVICE_ID:
2394 		*hif_type = HIF_TYPE_QCA9574;
2395 		*target_type = TARGET_TYPE_QCA9574;
2396 		hif_info(" *********** QCA9574 *************");
2397 		break;
2398 
2399 	case WCN6450_DEVICE_ID:
2400 		*hif_type = HIF_TYPE_WCN6450;
2401 		*target_type = TARGET_TYPE_WCN6450;
2402 		hif_info(" *********** WCN6450 *************");
2403 		break;
2404 
2405 	default:
2406 		hif_err("Unsupported device ID = 0x%x!", device_id);
2407 		ret = -ENODEV;
2408 		break;
2409 	}
2410 
2411 	if (*target_type == TARGET_TYPE_UNKNOWN) {
2412 		hif_err("Unsupported target_type!");
2413 		ret = -ENODEV;
2414 	}
2415 end:
2416 	return ret;
2417 }
2418 
2419 /**
2420  * hif_get_bus_type() - return the bus type
2421  * @hif_hdl: HIF Context
2422  *
2423  * Return: enum qdf_bus_type
2424  */
2425 enum qdf_bus_type hif_get_bus_type(struct hif_opaque_softc *hif_hdl)
2426 {
2427 	struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
2428 
2429 	return scn->bus_type;
2430 }
2431 
2432 /*
2433  * Target info and ini parameters are global to the driver
2434  * Hence these structures are exposed to all the modules in
2435  * the driver and they don't need to maintains multiple copies
2436  * of the same info, instead get the handle from hif and
2437  * modify them in hif
2438  */
2439 
2440 /**
2441  * hif_get_ini_handle() - API to get hif_config_param handle
2442  * @hif_ctx: HIF Context
2443  *
2444  * Return: pointer to hif_config_info
2445  */
2446 struct hif_config_info *hif_get_ini_handle(struct hif_opaque_softc *hif_ctx)
2447 {
2448 	struct hif_softc *sc = HIF_GET_SOFTC(hif_ctx);
2449 
2450 	return &sc->hif_config;
2451 }
2452 
2453 /**
2454  * hif_get_target_info_handle() - API to get hif_target_info handle
2455  * @hif_ctx: HIF context
2456  *
2457  * Return: Pointer to hif_target_info
2458  */
2459 struct hif_target_info *hif_get_target_info_handle(
2460 					struct hif_opaque_softc *hif_ctx)
2461 {
2462 	struct hif_softc *sc = HIF_GET_SOFTC(hif_ctx);
2463 
2464 	return &sc->target_info;
2465 
2466 }
2467 qdf_export_symbol(hif_get_target_info_handle);
2468 
2469 #ifdef RECEIVE_OFFLOAD
2470 void hif_offld_flush_cb_register(struct hif_opaque_softc *scn,
2471 				 void (offld_flush_handler)(void *))
2472 {
2473 	if (hif_napi_enabled(scn, -1))
2474 		hif_napi_rx_offld_flush_cb_register(scn, offld_flush_handler);
2475 	else
2476 		hif_err("NAPI not enabled");
2477 }
2478 qdf_export_symbol(hif_offld_flush_cb_register);
2479 
2480 void hif_offld_flush_cb_deregister(struct hif_opaque_softc *scn)
2481 {
2482 	if (hif_napi_enabled(scn, -1))
2483 		hif_napi_rx_offld_flush_cb_deregister(scn);
2484 	else
2485 		hif_err("NAPI not enabled");
2486 }
2487 qdf_export_symbol(hif_offld_flush_cb_deregister);
2488 
2489 int hif_get_rx_ctx_id(int ctx_id, struct hif_opaque_softc *hif_hdl)
2490 {
2491 	if (hif_napi_enabled(hif_hdl, -1))
2492 		return NAPI_PIPE2ID(ctx_id);
2493 	else
2494 		return ctx_id;
2495 }
2496 #else /* RECEIVE_OFFLOAD */
2497 int hif_get_rx_ctx_id(int ctx_id, struct hif_opaque_softc *hif_hdl)
2498 {
2499 	return 0;
2500 }
2501 qdf_export_symbol(hif_get_rx_ctx_id);
2502 #endif /* RECEIVE_OFFLOAD */
2503 
2504 #if defined(FEATURE_LRO)
2505 
2506 /**
2507  * hif_get_lro_info - Returns LRO instance for instance ID
2508  * @ctx_id: LRO instance ID
2509  * @hif_hdl: HIF Context
2510  *
2511  * Return: Pointer to LRO instance.
2512  */
2513 void *hif_get_lro_info(int ctx_id, struct hif_opaque_softc *hif_hdl)
2514 {
2515 	void *data;
2516 
2517 	if (hif_napi_enabled(hif_hdl, -1))
2518 		data = hif_napi_get_lro_info(hif_hdl, ctx_id);
2519 	else
2520 		data = hif_ce_get_lro_ctx(hif_hdl, ctx_id);
2521 
2522 	return data;
2523 }
2524 #endif
2525 
2526 /**
2527  * hif_get_target_status - API to get target status
2528  * @hif_ctx: HIF Context
2529  *
2530  * Return: enum hif_target_status
2531  */
2532 enum hif_target_status hif_get_target_status(struct hif_opaque_softc *hif_ctx)
2533 {
2534 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2535 
2536 	return scn->target_status;
2537 }
2538 qdf_export_symbol(hif_get_target_status);
2539 
2540 /**
2541  * hif_set_target_status() - API to set target status
2542  * @hif_ctx: HIF Context
2543  * @status: Target Status
2544  *
2545  * Return: void
2546  */
2547 void hif_set_target_status(struct hif_opaque_softc *hif_ctx, enum
2548 			   hif_target_status status)
2549 {
2550 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2551 
2552 	scn->target_status = status;
2553 }
2554 
2555 /**
2556  * hif_init_ini_config() - API to initialize HIF configuration parameters
2557  * @hif_ctx: HIF Context
2558  * @cfg: HIF Configuration
2559  *
2560  * Return: void
2561  */
2562 void hif_init_ini_config(struct hif_opaque_softc *hif_ctx,
2563 			 struct hif_config_info *cfg)
2564 {
2565 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2566 
2567 	qdf_mem_copy(&scn->hif_config, cfg, sizeof(struct hif_config_info));
2568 }
2569 
2570 /**
2571  * hif_get_conparam() - API to get driver mode in HIF
2572  * @scn: HIF Context
2573  *
2574  * Return: driver mode of operation
2575  */
2576 uint32_t hif_get_conparam(struct hif_softc *scn)
2577 {
2578 	if (!scn)
2579 		return 0;
2580 
2581 	return scn->hif_con_param;
2582 }
2583 
2584 /**
2585  * hif_get_callbacks_handle() - API to get callbacks Handle
2586  * @scn: HIF Context
2587  *
2588  * Return: pointer to HIF Callbacks
2589  */
2590 struct hif_driver_state_callbacks *hif_get_callbacks_handle(
2591 							struct hif_softc *scn)
2592 {
2593 	return &scn->callbacks;
2594 }
2595 
2596 /**
2597  * hif_is_driver_unloading() - API to query upper layers if driver is unloading
2598  * @scn: HIF Context
2599  *
2600  * Return: True/False
2601  */
2602 bool hif_is_driver_unloading(struct hif_softc *scn)
2603 {
2604 	struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
2605 
2606 	if (cbk && cbk->is_driver_unloading)
2607 		return cbk->is_driver_unloading(cbk->context);
2608 
2609 	return false;
2610 }
2611 
2612 /**
2613  * hif_is_load_or_unload_in_progress() - API to query upper layers if
2614  * load/unload in progress
2615  * @scn: HIF Context
2616  *
2617  * Return: True/False
2618  */
2619 bool hif_is_load_or_unload_in_progress(struct hif_softc *scn)
2620 {
2621 	struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
2622 
2623 	if (cbk && cbk->is_load_unload_in_progress)
2624 		return cbk->is_load_unload_in_progress(cbk->context);
2625 
2626 	return false;
2627 }
2628 
2629 /**
2630  * hif_is_recovery_in_progress() - API to query upper layers if recovery in
2631  * progress
2632  * @scn: HIF Context
2633  *
2634  * Return: True/False
2635  */
2636 bool hif_is_recovery_in_progress(struct hif_softc *scn)
2637 {
2638 	struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
2639 
2640 	if (cbk && cbk->is_recovery_in_progress)
2641 		return cbk->is_recovery_in_progress(cbk->context);
2642 
2643 	return false;
2644 }
2645 
2646 #if defined(HIF_PCI) || defined(HIF_SNOC) || defined(HIF_AHB) || \
2647     defined(HIF_IPCI)
2648 
2649 /**
2650  * hif_update_pipe_callback() - API to register pipe specific callbacks
2651  * @osc: Opaque softc
2652  * @pipeid: pipe id
2653  * @callbacks: callbacks to register
2654  *
2655  * Return: void
2656  */
2657 
2658 void hif_update_pipe_callback(struct hif_opaque_softc *osc,
2659 					u_int8_t pipeid,
2660 					struct hif_msg_callbacks *callbacks)
2661 {
2662 	struct hif_softc *scn = HIF_GET_SOFTC(osc);
2663 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2664 	struct HIF_CE_pipe_info *pipe_info;
2665 
2666 	QDF_BUG(pipeid < CE_COUNT_MAX);
2667 
2668 	hif_debug("pipeid: %d", pipeid);
2669 
2670 	pipe_info = &hif_state->pipe_info[pipeid];
2671 
2672 	qdf_mem_copy(&pipe_info->pipe_callbacks,
2673 			callbacks, sizeof(pipe_info->pipe_callbacks));
2674 }
2675 qdf_export_symbol(hif_update_pipe_callback);
2676 
2677 /**
2678  * hif_is_target_ready() - API to query if target is in ready state
2679  * progress
2680  * @scn: HIF Context
2681  *
2682  * Return: True/False
2683  */
2684 bool hif_is_target_ready(struct hif_softc *scn)
2685 {
2686 	struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
2687 
2688 	if (cbk && cbk->is_target_ready)
2689 		return cbk->is_target_ready(cbk->context);
2690 	/*
2691 	 * if callback is not registered then there is no way to determine
2692 	 * if target is ready. In-such case return true to indicate that
2693 	 * target is ready.
2694 	 */
2695 	return true;
2696 }
2697 qdf_export_symbol(hif_is_target_ready);
2698 
2699 int hif_get_bandwidth_level(struct hif_opaque_softc *hif_handle)
2700 {
2701 	struct hif_softc *scn = HIF_GET_SOFTC(hif_handle);
2702 	struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
2703 
2704 	if (cbk && cbk->get_bandwidth_level)
2705 		return cbk->get_bandwidth_level(cbk->context);
2706 
2707 	return 0;
2708 }
2709 
2710 qdf_export_symbol(hif_get_bandwidth_level);
2711 
2712 #ifdef DP_MEM_PRE_ALLOC
2713 void *hif_mem_alloc_consistent_unaligned(struct hif_softc *scn,
2714 					 qdf_size_t size,
2715 					 qdf_dma_addr_t *paddr,
2716 					 uint32_t ring_type,
2717 					 uint8_t *is_mem_prealloc)
2718 {
2719 	void *vaddr = NULL;
2720 	struct hif_driver_state_callbacks *cbk =
2721 				hif_get_callbacks_handle(scn);
2722 
2723 	*is_mem_prealloc = false;
2724 	if (cbk && cbk->prealloc_get_consistent_mem_unaligned) {
2725 		vaddr = cbk->prealloc_get_consistent_mem_unaligned(size,
2726 								   paddr,
2727 								   ring_type);
2728 		if (vaddr) {
2729 			*is_mem_prealloc = true;
2730 			goto end;
2731 		}
2732 	}
2733 
2734 	vaddr = qdf_mem_alloc_consistent(scn->qdf_dev,
2735 					 scn->qdf_dev->dev,
2736 					 size,
2737 					 paddr);
2738 end:
2739 	dp_info("%s va_unaligned %pK pa_unaligned %pK size %d ring_type %d",
2740 		*is_mem_prealloc ? "pre-alloc" : "dynamic-alloc", vaddr,
2741 		(void *)*paddr, (int)size, ring_type);
2742 
2743 	return vaddr;
2744 }
2745 
2746 void hif_mem_free_consistent_unaligned(struct hif_softc *scn,
2747 				       qdf_size_t size,
2748 				       void *vaddr,
2749 				       qdf_dma_addr_t paddr,
2750 				       qdf_dma_context_t memctx,
2751 				       uint8_t is_mem_prealloc)
2752 {
2753 	struct hif_driver_state_callbacks *cbk =
2754 				hif_get_callbacks_handle(scn);
2755 
2756 	if (is_mem_prealloc) {
2757 		if (cbk && cbk->prealloc_put_consistent_mem_unaligned) {
2758 			cbk->prealloc_put_consistent_mem_unaligned(vaddr);
2759 		} else {
2760 			dp_warn("dp_prealloc_put_consistent_unligned NULL");
2761 			QDF_BUG(0);
2762 		}
2763 	} else {
2764 		qdf_mem_free_consistent(scn->qdf_dev, scn->qdf_dev->dev,
2765 					size, vaddr, paddr, memctx);
2766 	}
2767 }
2768 
2769 void hif_prealloc_get_multi_pages(struct hif_softc *scn, uint32_t desc_type,
2770 				  qdf_size_t elem_size, uint16_t elem_num,
2771 				  struct qdf_mem_multi_page_t *pages,
2772 				  bool cacheable)
2773 {
2774 	struct hif_driver_state_callbacks *cbk =
2775 			hif_get_callbacks_handle(scn);
2776 
2777 	if (cbk && cbk->prealloc_get_multi_pages)
2778 		cbk->prealloc_get_multi_pages(desc_type, elem_size, elem_num,
2779 					      pages, cacheable);
2780 
2781 	if (!pages->num_pages)
2782 		qdf_mem_multi_pages_alloc(scn->qdf_dev, pages,
2783 					  elem_size, elem_num, 0, cacheable);
2784 }
2785 
2786 void hif_prealloc_put_multi_pages(struct hif_softc *scn, uint32_t desc_type,
2787 				  struct qdf_mem_multi_page_t *pages,
2788 				  bool cacheable)
2789 {
2790 	struct hif_driver_state_callbacks *cbk =
2791 			hif_get_callbacks_handle(scn);
2792 
2793 	if (cbk && cbk->prealloc_put_multi_pages &&
2794 	    pages->is_mem_prealloc)
2795 		cbk->prealloc_put_multi_pages(desc_type, pages);
2796 
2797 	if (!pages->is_mem_prealloc)
2798 		qdf_mem_multi_pages_free(scn->qdf_dev, pages, 0,
2799 					 cacheable);
2800 }
2801 #endif
2802 
2803 /**
2804  * hif_batch_send() - API to access hif specific function
2805  * ce_batch_send.
2806  * @osc: HIF Context
2807  * @msdu: list of msdus to be sent
2808  * @transfer_id: transfer id
2809  * @len: downloaded length
2810  * @sendhead:
2811  *
2812  * Return: list of msds not sent
2813  */
2814 qdf_nbuf_t hif_batch_send(struct hif_opaque_softc *osc, qdf_nbuf_t msdu,
2815 		uint32_t transfer_id, u_int32_t len, uint32_t sendhead)
2816 {
2817 	void *ce_tx_hdl = hif_get_ce_handle(osc, CE_HTT_TX_CE);
2818 
2819 	if (!ce_tx_hdl)
2820 		return NULL;
2821 
2822 	return ce_batch_send((struct CE_handle *)ce_tx_hdl, msdu, transfer_id,
2823 			len, sendhead);
2824 }
2825 qdf_export_symbol(hif_batch_send);
2826 
2827 /**
2828  * hif_update_tx_ring() - API to access hif specific function
2829  * ce_update_tx_ring.
2830  * @osc: HIF Context
2831  * @num_htt_cmpls: number of htt compl received.
2832  *
2833  * Return: void
2834  */
2835 void hif_update_tx_ring(struct hif_opaque_softc *osc, u_int32_t num_htt_cmpls)
2836 {
2837 	void *ce_tx_hdl = hif_get_ce_handle(osc, CE_HTT_TX_CE);
2838 
2839 	ce_update_tx_ring(ce_tx_hdl, num_htt_cmpls);
2840 }
2841 qdf_export_symbol(hif_update_tx_ring);
2842 
2843 
2844 /**
2845  * hif_send_single() - API to access hif specific function
2846  * ce_send_single.
2847  * @osc: HIF Context
2848  * @msdu : msdu to be sent
2849  * @transfer_id: transfer id
2850  * @len : downloaded length
2851  *
2852  * Return: msdu sent status
2853  */
2854 QDF_STATUS hif_send_single(struct hif_opaque_softc *osc, qdf_nbuf_t msdu,
2855 			   uint32_t transfer_id, u_int32_t len)
2856 {
2857 	void *ce_tx_hdl = hif_get_ce_handle(osc, CE_HTT_TX_CE);
2858 
2859 	if (!ce_tx_hdl)
2860 		return QDF_STATUS_E_NULL_VALUE;
2861 
2862 	return ce_send_single((struct CE_handle *)ce_tx_hdl, msdu, transfer_id,
2863 			len);
2864 }
2865 qdf_export_symbol(hif_send_single);
2866 #endif
2867 
2868 /**
2869  * hif_reg_write() - API to access hif specific function
2870  * hif_write32_mb.
2871  * @hif_ctx : HIF Context
2872  * @offset : offset on which value has to be written
2873  * @value : value to be written
2874  *
2875  * Return: None
2876  */
2877 void hif_reg_write(struct hif_opaque_softc *hif_ctx, uint32_t offset,
2878 		uint32_t value)
2879 {
2880 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2881 
2882 	hif_write32_mb(scn, scn->mem + offset, value);
2883 
2884 }
2885 qdf_export_symbol(hif_reg_write);
2886 
2887 /**
2888  * hif_reg_read() - API to access hif specific function
2889  * hif_read32_mb.
2890  * @hif_ctx : HIF Context
2891  * @offset : offset from which value has to be read
2892  *
2893  * Return: Read value
2894  */
2895 uint32_t hif_reg_read(struct hif_opaque_softc *hif_ctx, uint32_t offset)
2896 {
2897 
2898 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2899 
2900 	return hif_read32_mb(scn, scn->mem + offset);
2901 }
2902 qdf_export_symbol(hif_reg_read);
2903 
2904 /**
2905  * hif_ramdump_handler(): generic ramdump handler
2906  * @scn: struct hif_opaque_softc
2907  *
2908  * Return: None
2909  */
2910 void hif_ramdump_handler(struct hif_opaque_softc *scn)
2911 {
2912 	if (hif_get_bus_type(scn) == QDF_BUS_TYPE_USB)
2913 		hif_usb_ramdump_handler(scn);
2914 }
2915 
2916 hif_pm_wake_irq_type hif_pm_get_wake_irq_type(struct hif_opaque_softc *hif_ctx)
2917 {
2918 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2919 
2920 	return scn->wake_irq_type;
2921 }
2922 
2923 irqreturn_t hif_wake_interrupt_handler(int irq, void *context)
2924 {
2925 	struct hif_softc *scn = context;
2926 
2927 	hif_info("wake interrupt received on irq %d", irq);
2928 
2929 	hif_rtpm_set_monitor_wake_intr(0);
2930 	hif_rtpm_request_resume();
2931 
2932 	if (scn->initial_wakeup_cb)
2933 		scn->initial_wakeup_cb(scn->initial_wakeup_priv);
2934 
2935 	if (hif_is_ut_suspended(scn))
2936 		hif_ut_fw_resume(scn);
2937 
2938 	qdf_pm_system_wakeup();
2939 
2940 	return IRQ_HANDLED;
2941 }
2942 
2943 void hif_set_initial_wakeup_cb(struct hif_opaque_softc *hif_ctx,
2944 			       void (*callback)(void *),
2945 			       void *priv)
2946 {
2947 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2948 
2949 	scn->initial_wakeup_cb = callback;
2950 	scn->initial_wakeup_priv = priv;
2951 }
2952 
2953 void hif_set_ce_service_max_yield_time(struct hif_opaque_softc *hif,
2954 				       uint32_t ce_service_max_yield_time)
2955 {
2956 	struct hif_softc *hif_ctx = HIF_GET_SOFTC(hif);
2957 
2958 	hif_ctx->ce_service_max_yield_time =
2959 		ce_service_max_yield_time * 1000;
2960 }
2961 
2962 unsigned long long
2963 hif_get_ce_service_max_yield_time(struct hif_opaque_softc *hif)
2964 {
2965 	struct hif_softc *hif_ctx = HIF_GET_SOFTC(hif);
2966 
2967 	return hif_ctx->ce_service_max_yield_time;
2968 }
2969 
2970 void hif_set_ce_service_max_rx_ind_flush(struct hif_opaque_softc *hif,
2971 				       uint8_t ce_service_max_rx_ind_flush)
2972 {
2973 	struct hif_softc *hif_ctx = HIF_GET_SOFTC(hif);
2974 
2975 	if (ce_service_max_rx_ind_flush == 0 ||
2976 	    ce_service_max_rx_ind_flush > MSG_FLUSH_NUM)
2977 		hif_ctx->ce_service_max_rx_ind_flush = MSG_FLUSH_NUM;
2978 	else
2979 		hif_ctx->ce_service_max_rx_ind_flush =
2980 						ce_service_max_rx_ind_flush;
2981 }
2982 
2983 #ifdef SYSTEM_PM_CHECK
2984 void __hif_system_pm_set_state(struct hif_opaque_softc *hif,
2985 			       enum hif_system_pm_state state)
2986 {
2987 	struct hif_softc *hif_ctx = HIF_GET_SOFTC(hif);
2988 
2989 	qdf_atomic_set(&hif_ctx->sys_pm_state, state);
2990 }
2991 
2992 int32_t hif_system_pm_get_state(struct hif_opaque_softc *hif)
2993 {
2994 	struct hif_softc *hif_ctx = HIF_GET_SOFTC(hif);
2995 
2996 	return qdf_atomic_read(&hif_ctx->sys_pm_state);
2997 }
2998 
2999 int hif_system_pm_state_check(struct hif_opaque_softc *hif)
3000 {
3001 	struct hif_softc *hif_ctx = HIF_GET_SOFTC(hif);
3002 	int32_t sys_pm_state;
3003 
3004 	if (!hif_ctx) {
3005 		hif_err("hif context is null");
3006 		return -EFAULT;
3007 	}
3008 
3009 	sys_pm_state = qdf_atomic_read(&hif_ctx->sys_pm_state);
3010 	if (sys_pm_state == HIF_SYSTEM_PM_STATE_BUS_SUSPENDING ||
3011 	    sys_pm_state == HIF_SYSTEM_PM_STATE_BUS_SUSPENDED) {
3012 		hif_info("Triggering system wakeup");
3013 		qdf_pm_system_wakeup();
3014 		return -EAGAIN;
3015 	}
3016 
3017 	return 0;
3018 }
3019 #endif
3020 #ifdef WLAN_FEATURE_AFFINITY_MGR
3021 /*
3022  * hif_audio_cpu_affinity_allowed() - Check if audio cpu affinity allowed
3023  *
3024  * @scn: hif handle
3025  * @cfg: hif affinity manager configuration for IRQ
3026  * @audio_taken_cpu: Current CPUs which are taken by audio.
3027  * @current_time: Current system time.
3028  *
3029  * This API checks for 2 conditions
3030  *  1) Last audio taken mask and current taken mask are different
3031  *  2) Last time when IRQ was affined away due to audio taken CPUs is
3032  *     more than time threshold (5 Seconds in current case).
3033  * If both condition satisfies then only return true.
3034  *
3035  * Return: bool: true if it is allowed to affine away audio taken cpus.
3036  */
3037 static inline bool
3038 hif_audio_cpu_affinity_allowed(struct hif_softc *scn,
3039 			       struct hif_cpu_affinity *cfg,
3040 			       qdf_cpu_mask audio_taken_cpu,
3041 			       uint64_t current_time)
3042 {
3043 	if (!qdf_cpumask_equal(&audio_taken_cpu, &cfg->walt_taken_mask) &&
3044 	    (qdf_log_timestamp_to_usecs(current_time -
3045 			 cfg->last_affined_away)
3046 		< scn->time_threshold))
3047 		return false;
3048 	return true;
3049 }
3050 
3051 /*
3052  * hif_affinity_mgr_check_update_mask() - Check if cpu mask need to be updated
3053  *
3054  * @scn: hif handle
3055  * @cfg: hif affinity manager configuration for IRQ
3056  * @audio_taken_cpu: Current CPUs which are taken by audio.
3057  * @cpu_mask: CPU mask which need to be updated.
3058  * @current_time: Current system time.
3059  *
3060  * This API checks if Pro audio use case is running and if cpu_mask need
3061  * to be updated
3062  *
3063  * Return: QDF_STATUS
3064  */
3065 static inline QDF_STATUS
3066 hif_affinity_mgr_check_update_mask(struct hif_softc *scn,
3067 				   struct hif_cpu_affinity *cfg,
3068 				   qdf_cpu_mask audio_taken_cpu,
3069 				   qdf_cpu_mask *cpu_mask,
3070 				   uint64_t current_time)
3071 {
3072 	qdf_cpu_mask allowed_mask;
3073 
3074 	/*
3075 	 * Case 1: audio_taken_mask is empty
3076 	 *   Check if passed cpu_mask and wlan_requested_mask is same or not.
3077 	 *      If both mask are different copy wlan_requested_mask(IRQ affinity
3078 	 *      mask requested by WLAN) to cpu_mask.
3079 	 *
3080 	 * Case 2: audio_taken_mask is not empty
3081 	 *   1. Only allow update if last time when IRQ was affined away due to
3082 	 *      audio taken CPUs is more than 5 seconds or update is requested
3083 	 *      by WLAN
3084 	 *   2. Only allow silver cores to be affined away.
3085 	 *   3. Check if any allowed CPUs for audio use case is set in cpu_mask.
3086 	 *       i. If any CPU mask is set, mask out that CPU from the cpu_mask
3087 	 *       ii. If after masking out audio taken cpu(Silver cores) cpu_mask
3088 	 *           is empty, set mask to all cpu except cpus taken by audio.
3089 	 * Example:
3090 	 *| Audio mask | mask allowed | cpu_mask | WLAN req mask | new cpu_mask|
3091 	 *|  0x00      |       0x00   |   0x0C   |       0x0C    |      0x0C   |
3092 	 *|  0x00      |       0x00   |   0x03   |       0x03    |      0x03   |
3093 	 *|  0x00      |       0x00   |   0xFC   |       0x03    |      0x03   |
3094 	 *|  0x00      |       0x00   |   0x03   |       0x0C    |      0x0C   |
3095 	 *|  0x0F      |       0x03   |   0x0C   |       0x0C    |      0x0C   |
3096 	 *|  0x0F      |       0x03   |   0x03   |       0x03    |      0xFC   |
3097 	 *|  0x03      |       0x03   |   0x0C   |       0x0C    |      0x0C   |
3098 	 *|  0x03      |       0x03   |   0x03   |       0x03    |      0xFC   |
3099 	 *|  0x03      |       0x03   |   0xFC   |       0x03    |      0xFC   |
3100 	 *|  0xF0      |       0x00   |   0x0C   |       0x0C    |      0x0C   |
3101 	 *|  0xF0      |       0x00   |   0x03   |       0x03    |      0x03   |
3102 	 */
3103 
3104 	/* Check if audio taken mask is empty*/
3105 	if (qdf_likely(qdf_cpumask_empty(&audio_taken_cpu))) {
3106 		/* If CPU mask requested by WLAN for the IRQ and
3107 		 * cpu_mask passed CPU mask set for IRQ is different
3108 		 * Copy requested mask into cpu_mask and return
3109 		 */
3110 		if (qdf_unlikely(!qdf_cpumask_equal(cpu_mask,
3111 						    &cfg->wlan_requested_mask))) {
3112 			qdf_cpumask_copy(cpu_mask, &cfg->wlan_requested_mask);
3113 			return QDF_STATUS_SUCCESS;
3114 		}
3115 		return QDF_STATUS_E_ALREADY;
3116 	}
3117 
3118 	if (!(hif_audio_cpu_affinity_allowed(scn, cfg, audio_taken_cpu,
3119 					     current_time) ||
3120 	      cfg->update_requested))
3121 		return QDF_STATUS_E_AGAIN;
3122 
3123 	/* Only allow Silver cores to be affine away */
3124 	qdf_cpumask_and(&allowed_mask, &scn->allowed_mask, &audio_taken_cpu);
3125 	if (qdf_cpumask_intersects(cpu_mask, &allowed_mask)) {
3126 		/* If any of taken CPU(Silver cores) mask is set in cpu_mask,
3127 		 *  mask out the audio taken CPUs from the cpu_mask.
3128 		 */
3129 		qdf_cpumask_andnot(cpu_mask, &cfg->wlan_requested_mask,
3130 				   &allowed_mask);
3131 		/* If cpu_mask is empty set it to all CPUs
3132 		 * except taken by audio(Silver cores)
3133 		 */
3134 		if (qdf_unlikely(qdf_cpumask_empty(cpu_mask)))
3135 			qdf_cpumask_complement(cpu_mask, &allowed_mask);
3136 		return QDF_STATUS_SUCCESS;
3137 	}
3138 
3139 	return QDF_STATUS_E_ALREADY;
3140 }
3141 
3142 static inline QDF_STATUS
3143 hif_check_and_affine_irq(struct hif_softc *scn, struct hif_cpu_affinity *cfg,
3144 			 qdf_cpu_mask audio_taken_cpu, qdf_cpu_mask cpu_mask,
3145 			 uint64_t current_time)
3146 {
3147 	QDF_STATUS status;
3148 
3149 	status = hif_affinity_mgr_check_update_mask(scn, cfg,
3150 						    audio_taken_cpu,
3151 						    &cpu_mask,
3152 						    current_time);
3153 	/* Set IRQ affinity if CPU mask was updated */
3154 	if (QDF_IS_STATUS_SUCCESS(status)) {
3155 		status = hif_irq_set_affinity_hint(cfg->irq,
3156 						   &cpu_mask);
3157 		if (QDF_IS_STATUS_SUCCESS(status)) {
3158 			/* Store audio taken CPU mask */
3159 			qdf_cpumask_copy(&cfg->walt_taken_mask,
3160 					 &audio_taken_cpu);
3161 			/* Store CPU mask which was set for IRQ*/
3162 			qdf_cpumask_copy(&cfg->current_irq_mask,
3163 					 &cpu_mask);
3164 			/* Set time when IRQ affinity was updated */
3165 			cfg->last_updated = current_time;
3166 			if (hif_audio_cpu_affinity_allowed(scn, cfg,
3167 							   audio_taken_cpu,
3168 							   current_time))
3169 				/* If CPU mask was updated due to CPU
3170 				 * taken by audio, update
3171 				 * last_affined_away time
3172 				 */
3173 				cfg->last_affined_away = current_time;
3174 		}
3175 	}
3176 
3177 	return status;
3178 }
3179 
3180 void hif_affinity_mgr_affine_irq(struct hif_softc *scn)
3181 {
3182 	bool audio_affinity_allowed = false;
3183 	int i, j, ce_id;
3184 	uint64_t current_time;
3185 	char cpu_str[10];
3186 	QDF_STATUS status;
3187 	qdf_cpu_mask cpu_mask, audio_taken_cpu;
3188 	struct HIF_CE_state *hif_state;
3189 	struct hif_exec_context *hif_ext_group;
3190 	struct CE_attr *host_ce_conf;
3191 	struct HIF_CE_state *ce_sc;
3192 	struct hif_cpu_affinity *cfg;
3193 
3194 	if (!scn->affinity_mgr_supported)
3195 		return;
3196 
3197 	current_time = hif_get_log_timestamp();
3198 	/* Get CPU mask for audio taken CPUs */
3199 	audio_taken_cpu = qdf_walt_get_cpus_taken();
3200 
3201 	ce_sc = HIF_GET_CE_STATE(scn);
3202 	host_ce_conf = ce_sc->host_ce_config;
3203 	for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
3204 		if (host_ce_conf[ce_id].flags & CE_ATTR_DISABLE_INTR)
3205 			continue;
3206 		cfg = &scn->ce_irq_cpu_mask[ce_id];
3207 		qdf_cpumask_copy(&cpu_mask, &cfg->current_irq_mask);
3208 		status =
3209 			hif_check_and_affine_irq(scn, cfg, audio_taken_cpu,
3210 						 cpu_mask, current_time);
3211 		if (QDF_IS_STATUS_SUCCESS(status))
3212 			audio_affinity_allowed = true;
3213 	}
3214 
3215 	hif_state = HIF_GET_CE_STATE(scn);
3216 	for (i = 0; i < hif_state->hif_num_extgroup; i++) {
3217 		hif_ext_group = hif_state->hif_ext_group[i];
3218 		for (j = 0; j < hif_ext_group->numirq; j++) {
3219 			cfg = &scn->irq_cpu_mask[hif_ext_group->grp_id][j];
3220 			qdf_cpumask_copy(&cpu_mask, &cfg->current_irq_mask);
3221 			status =
3222 				hif_check_and_affine_irq(scn, cfg, audio_taken_cpu,
3223 							 cpu_mask, current_time);
3224 			if (QDF_IS_STATUS_SUCCESS(status)) {
3225 				qdf_atomic_set(&hif_ext_group->force_napi_complete, -1);
3226 				audio_affinity_allowed = true;
3227 			}
3228 		}
3229 	}
3230 	if (audio_affinity_allowed) {
3231 		qdf_thread_cpumap_print_to_pagebuf(false, cpu_str,
3232 						   &audio_taken_cpu);
3233 		hif_info("Audio taken CPU mask: %s", cpu_str);
3234 	}
3235 }
3236 
3237 static inline QDF_STATUS
3238 hif_affinity_mgr_set_irq_affinity(struct hif_softc *scn, uint32_t irq,
3239 				  struct hif_cpu_affinity *cfg,
3240 				  qdf_cpu_mask *cpu_mask)
3241 {
3242 	uint64_t current_time;
3243 	char cpu_str[10];
3244 	QDF_STATUS status, mask_updated;
3245 	qdf_cpu_mask audio_taken_cpu = qdf_walt_get_cpus_taken();
3246 
3247 	current_time = hif_get_log_timestamp();
3248 	qdf_cpumask_copy(&cfg->wlan_requested_mask, cpu_mask);
3249 	cfg->update_requested = true;
3250 	mask_updated = hif_affinity_mgr_check_update_mask(scn, cfg,
3251 							  audio_taken_cpu,
3252 							  cpu_mask,
3253 							  current_time);
3254 	status = hif_irq_set_affinity_hint(irq, cpu_mask);
3255 	if (QDF_IS_STATUS_SUCCESS(status)) {
3256 		qdf_cpumask_copy(&cfg->walt_taken_mask, &audio_taken_cpu);
3257 		qdf_cpumask_copy(&cfg->current_irq_mask, cpu_mask);
3258 		if (QDF_IS_STATUS_SUCCESS(mask_updated)) {
3259 			cfg->last_updated = current_time;
3260 			if (hif_audio_cpu_affinity_allowed(scn, cfg,
3261 							   audio_taken_cpu,
3262 							   current_time)) {
3263 				cfg->last_affined_away = current_time;
3264 				qdf_thread_cpumap_print_to_pagebuf(false,
3265 								   cpu_str,
3266 								   &audio_taken_cpu);
3267 				hif_info_rl("Audio taken CPU mask: %s",
3268 					    cpu_str);
3269 			}
3270 		}
3271 	}
3272 	cfg->update_requested = false;
3273 	return status;
3274 }
3275 
3276 QDF_STATUS
3277 hif_affinity_mgr_set_qrg_irq_affinity(struct hif_softc *scn, uint32_t irq,
3278 				      uint32_t grp_id, uint32_t irq_index,
3279 				      qdf_cpu_mask *cpu_mask)
3280 {
3281 	struct hif_cpu_affinity *cfg;
3282 
3283 	if (!scn->affinity_mgr_supported)
3284 		return hif_irq_set_affinity_hint(irq, cpu_mask);
3285 
3286 	cfg = &scn->irq_cpu_mask[grp_id][irq_index];
3287 	return hif_affinity_mgr_set_irq_affinity(scn, irq, cfg, cpu_mask);
3288 }
3289 
3290 QDF_STATUS
3291 hif_affinity_mgr_set_ce_irq_affinity(struct hif_softc *scn, uint32_t irq,
3292 				     uint32_t ce_id, qdf_cpu_mask *cpu_mask)
3293 {
3294 	struct hif_cpu_affinity *cfg;
3295 
3296 	if (!scn->affinity_mgr_supported)
3297 		return hif_irq_set_affinity_hint(irq, cpu_mask);
3298 
3299 	cfg = &scn->ce_irq_cpu_mask[ce_id];
3300 	return hif_affinity_mgr_set_irq_affinity(scn, irq, cfg, cpu_mask);
3301 }
3302 
3303 void
3304 hif_affinity_mgr_init_ce_irq(struct hif_softc *scn, int id, int irq)
3305 {
3306 	unsigned int cpus;
3307 	qdf_cpu_mask cpu_mask = {0};
3308 	struct hif_cpu_affinity *cfg = NULL;
3309 
3310 	if (!scn->affinity_mgr_supported)
3311 		return;
3312 
3313 	/* Set CPU Mask to Silver core */
3314 	qdf_for_each_possible_cpu(cpus)
3315 		if (qdf_topology_physical_package_id(cpus) ==
3316 		    CPU_CLUSTER_TYPE_LITTLE)
3317 			qdf_cpumask_set_cpu(cpus, &cpu_mask);
3318 
3319 	cfg = &scn->ce_irq_cpu_mask[id];
3320 	qdf_cpumask_copy(&cfg->current_irq_mask, &cpu_mask);
3321 	qdf_cpumask_copy(&cfg->wlan_requested_mask, &cpu_mask);
3322 	cfg->irq = irq;
3323 	cfg->last_updated = 0;
3324 	cfg->last_affined_away = 0;
3325 	cfg->update_requested = false;
3326 }
3327 
3328 void
3329 hif_affinity_mgr_init_grp_irq(struct hif_softc *scn, int grp_id,
3330 			      int irq_num, int irq)
3331 {
3332 	unsigned int cpus;
3333 	qdf_cpu_mask cpu_mask = {0};
3334 	struct hif_cpu_affinity *cfg = NULL;
3335 
3336 	if (!scn->affinity_mgr_supported)
3337 		return;
3338 
3339 	/* Set CPU Mask to Silver core */
3340 	qdf_for_each_possible_cpu(cpus)
3341 		if (qdf_topology_physical_package_id(cpus) ==
3342 		    CPU_CLUSTER_TYPE_LITTLE)
3343 			qdf_cpumask_set_cpu(cpus, &cpu_mask);
3344 
3345 	cfg = &scn->irq_cpu_mask[grp_id][irq_num];
3346 	qdf_cpumask_copy(&cfg->current_irq_mask, &cpu_mask);
3347 	qdf_cpumask_copy(&cfg->wlan_requested_mask, &cpu_mask);
3348 	cfg->irq = irq;
3349 	cfg->last_updated = 0;
3350 	cfg->last_affined_away = 0;
3351 	cfg->update_requested = false;
3352 }
3353 #endif
3354 
3355 #if defined(HIF_CPU_PERF_AFFINE_MASK) || \
3356 	defined(FEATURE_ENABLE_CE_DP_IRQ_AFFINE)
3357 void hif_config_irq_set_perf_affinity_hint(
3358 	struct hif_opaque_softc *hif_ctx)
3359 {
3360 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
3361 
3362 	hif_config_irq_affinity(scn);
3363 }
3364 
3365 qdf_export_symbol(hif_config_irq_set_perf_affinity_hint);
3366 #endif
3367