xref: /wlan-dirver/qca-wifi-host-cmn/hif/src/hif_main.c (revision 953b07cf94f030549a95ff8c5c5be0546111224c)
1 /*
2  * Copyright (c) 2015-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include "targcfg.h"
21 #include "qdf_lock.h"
22 #include "qdf_status.h"
23 #include "qdf_status.h"
24 #include <qdf_atomic.h>         /* qdf_atomic_read */
25 #include <targaddrs.h>
26 #include "hif_io32.h"
27 #include <hif.h>
28 #include <target_type.h>
29 #include "regtable.h"
30 #define ATH_MODULE_NAME hif
31 #include <a_debug.h>
32 #include "hif_main.h"
33 #include "hif_hw_version.h"
34 #if (defined(HIF_PCI) || defined(HIF_SNOC) || defined(HIF_AHB) || \
35      defined(HIF_IPCI))
36 #include "ce_tasklet.h"
37 #include "ce_api.h"
38 #endif
39 #include "qdf_trace.h"
40 #include "qdf_status.h"
41 #include "hif_debug.h"
42 #include "mp_dev.h"
43 #if defined(QCA_WIFI_QCA8074) || defined(QCA_WIFI_QCA6018) || \
44 	defined(QCA_WIFI_QCA5018) || defined(QCA_WIFI_QCA9574) || \
45 	defined(QCA_WIFI_QCA5332)
46 #include "hal_api.h"
47 #endif
48 #include "hif_napi.h"
49 #include "hif_unit_test_suspend_i.h"
50 #include "qdf_module.h"
51 #ifdef HIF_CE_LOG_INFO
52 #include <qdf_notifier.h>
53 #include <qdf_hang_event_notifier.h>
54 #endif
55 #include <linux/cpumask.h>
56 
57 #include <pld_common.h>
58 #include "ce_internal.h"
59 #include <qdf_tracepoint.h>
60 #include "qdf_ssr_driver_dump.h"
61 
62 void hif_dump(struct hif_opaque_softc *hif_ctx, uint8_t cmd_id, bool start)
63 {
64 	hif_trigger_dump(hif_ctx, cmd_id, start);
65 }
66 
67 /**
68  * hif_get_target_id(): hif_get_target_id
69  * @scn: scn
70  *
71  * Return the virtual memory base address to the caller
72  *
73  * @scn: hif_softc
74  *
75  * Return: A_target_id_t
76  */
77 A_target_id_t hif_get_target_id(struct hif_softc *scn)
78 {
79 	return scn->mem;
80 }
81 
82 /**
83  * hif_get_targetdef(): hif_get_targetdef
84  * @hif_ctx: hif context
85  *
86  * Return: void *
87  */
88 void *hif_get_targetdef(struct hif_opaque_softc *hif_ctx)
89 {
90 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
91 
92 	return scn->targetdef;
93 }
94 
95 #ifdef FORCE_WAKE
96 #ifndef QCA_WIFI_WCN6450
97 void hif_srng_init_phase(struct hif_opaque_softc *hif_ctx,
98 			 bool init_phase)
99 {
100 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
101 
102 	if (ce_srng_based(scn))
103 		hal_set_init_phase(scn->hal_soc, init_phase);
104 }
105 #else
106 void hif_srng_init_phase(struct hif_opaque_softc *hif_ctx,
107 			 bool init_phase)
108 {
109 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
110 
111 	hal_set_init_phase(scn->hal_soc, init_phase);
112 }
113 #endif
114 #endif /* FORCE_WAKE */
115 
116 #ifdef HIF_IPCI
117 void hif_shutdown_notifier_cb(void *hif_ctx)
118 {
119 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
120 
121 	scn->recovery = true;
122 }
123 #endif
124 
125 /**
126  * hif_vote_link_down(): unvote for link up
127  * @hif_ctx: hif context
128  *
129  * Call hif_vote_link_down to release a previous request made using
130  * hif_vote_link_up. A hif_vote_link_down call should only be made
131  * after a corresponding hif_vote_link_up, otherwise you could be
132  * negating a vote from another source. When no votes are present
133  * hif will not guarantee the linkstate after hif_bus_suspend.
134  *
135  * SYNCHRONIZE WITH hif_vote_link_up by only calling in MC thread
136  * and initialization deinitialization sequencences.
137  *
138  * Return: n/a
139  */
140 void hif_vote_link_down(struct hif_opaque_softc *hif_ctx)
141 {
142 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
143 
144 	QDF_BUG(scn);
145 	if (scn->linkstate_vote == 0)
146 		QDF_DEBUG_PANIC("linkstate_vote(%d) has already been 0",
147 				scn->linkstate_vote);
148 
149 	scn->linkstate_vote--;
150 	hif_info("Down_linkstate_vote %d", scn->linkstate_vote);
151 	if (scn->linkstate_vote == 0)
152 		hif_bus_prevent_linkdown(scn, false);
153 }
154 
155 /**
156  * hif_vote_link_up(): vote to prevent bus from suspending
157  * @hif_ctx: hif context
158  *
159  * Makes hif guarantee that fw can message the host normally
160  * during suspend.
161  *
162  * SYNCHRONIZE WITH hif_vote_link_up by only calling in MC thread
163  * and initialization deinitialization sequencences.
164  *
165  * Return: n/a
166  */
167 void hif_vote_link_up(struct hif_opaque_softc *hif_ctx)
168 {
169 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
170 
171 	QDF_BUG(scn);
172 	scn->linkstate_vote++;
173 	hif_info("Up_linkstate_vote %d", scn->linkstate_vote);
174 	if (scn->linkstate_vote == 1)
175 		hif_bus_prevent_linkdown(scn, true);
176 }
177 
178 /**
179  * hif_can_suspend_link(): query if hif is permitted to suspend the link
180  * @hif_ctx: hif context
181  *
182  * Hif will ensure that the link won't be suspended if the upperlayers
183  * don't want it to.
184  *
185  * SYNCHRONIZATION: MC thread is stopped before bus suspend thus
186  * we don't need extra locking to ensure votes dont change while
187  * we are in the process of suspending or resuming.
188  *
189  * Return: false if hif will guarantee link up during suspend.
190  */
191 bool hif_can_suspend_link(struct hif_opaque_softc *hif_ctx)
192 {
193 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
194 
195 	QDF_BUG(scn);
196 	return scn->linkstate_vote == 0;
197 }
198 
199 /**
200  * hif_hia_item_address(): hif_hia_item_address
201  * @target_type: target_type
202  * @item_offset: item_offset
203  *
204  * Return: n/a
205  */
206 uint32_t hif_hia_item_address(uint32_t target_type, uint32_t item_offset)
207 {
208 	switch (target_type) {
209 	case TARGET_TYPE_AR6002:
210 		return AR6002_HOST_INTEREST_ADDRESS + item_offset;
211 	case TARGET_TYPE_AR6003:
212 		return AR6003_HOST_INTEREST_ADDRESS + item_offset;
213 	case TARGET_TYPE_AR6004:
214 		return AR6004_HOST_INTEREST_ADDRESS + item_offset;
215 	case TARGET_TYPE_AR6006:
216 		return AR6006_HOST_INTEREST_ADDRESS + item_offset;
217 	case TARGET_TYPE_AR9888:
218 		return AR9888_HOST_INTEREST_ADDRESS + item_offset;
219 	case TARGET_TYPE_AR6320:
220 	case TARGET_TYPE_AR6320V2:
221 		return AR6320_HOST_INTEREST_ADDRESS + item_offset;
222 	case TARGET_TYPE_ADRASTEA:
223 		/* ADRASTEA doesn't have a host interest address */
224 		ASSERT(0);
225 		return 0;
226 	case TARGET_TYPE_AR900B:
227 		return AR900B_HOST_INTEREST_ADDRESS + item_offset;
228 	case TARGET_TYPE_QCA9984:
229 		return QCA9984_HOST_INTEREST_ADDRESS + item_offset;
230 	case TARGET_TYPE_QCA9888:
231 		return QCA9888_HOST_INTEREST_ADDRESS + item_offset;
232 
233 	default:
234 		ASSERT(0);
235 		return 0;
236 	}
237 }
238 
239 /**
240  * hif_max_num_receives_reached() - check max receive is reached
241  * @scn: HIF Context
242  * @count: unsigned int.
243  *
244  * Output check status as bool
245  *
246  * Return: bool
247  */
248 bool hif_max_num_receives_reached(struct hif_softc *scn, unsigned int count)
249 {
250 	if (QDF_IS_EPPING_ENABLED(hif_get_conparam(scn)))
251 		return count > 120;
252 	else
253 		return count > MAX_NUM_OF_RECEIVES;
254 }
255 
256 /**
257  * init_buffer_count() - initial buffer count
258  * @maxSize: qdf_size_t
259  *
260  * routine to modify the initial buffer count to be allocated on an os
261  * platform basis. Platform owner will need to modify this as needed
262  *
263  * Return: qdf_size_t
264  */
265 qdf_size_t init_buffer_count(qdf_size_t maxSize)
266 {
267 	return maxSize;
268 }
269 
270 /**
271  * hif_save_htc_htt_config_endpoint() - save htt_tx_endpoint
272  * @hif_ctx: hif context
273  * @htc_htt_tx_endpoint: htt_tx_endpoint
274  *
275  * Return: void
276  */
277 void hif_save_htc_htt_config_endpoint(struct hif_opaque_softc *hif_ctx,
278 							int htc_htt_tx_endpoint)
279 {
280 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
281 
282 	if (!scn) {
283 		hif_err("scn or scn->hif_sc is NULL!");
284 		return;
285 	}
286 
287 	scn->htc_htt_tx_endpoint = htc_htt_tx_endpoint;
288 }
289 qdf_export_symbol(hif_save_htc_htt_config_endpoint);
290 
291 static const struct qwlan_hw qwlan_hw_list[] = {
292 	{
293 		.id = AR6320_REV1_VERSION,
294 		.subid = 0,
295 		.name = "QCA6174_REV1",
296 	},
297 	{
298 		.id = AR6320_REV1_1_VERSION,
299 		.subid = 0x1,
300 		.name = "QCA6174_REV1_1",
301 	},
302 	{
303 		.id = AR6320_REV1_3_VERSION,
304 		.subid = 0x2,
305 		.name = "QCA6174_REV1_3",
306 	},
307 	{
308 		.id = AR6320_REV2_1_VERSION,
309 		.subid = 0x4,
310 		.name = "QCA6174_REV2_1",
311 	},
312 	{
313 		.id = AR6320_REV2_1_VERSION,
314 		.subid = 0x5,
315 		.name = "QCA6174_REV2_2",
316 	},
317 	{
318 		.id = AR6320_REV3_VERSION,
319 		.subid = 0x6,
320 		.name = "QCA6174_REV2.3",
321 	},
322 	{
323 		.id = AR6320_REV3_VERSION,
324 		.subid = 0x8,
325 		.name = "QCA6174_REV3",
326 	},
327 	{
328 		.id = AR6320_REV3_VERSION,
329 		.subid = 0x9,
330 		.name = "QCA6174_REV3_1",
331 	},
332 	{
333 		.id = AR6320_REV3_2_VERSION,
334 		.subid = 0xA,
335 		.name = "AR6320_REV3_2_VERSION",
336 	},
337 	{
338 		.id = QCA6390_V1,
339 		.subid = 0x0,
340 		.name = "QCA6390_V1",
341 	},
342 	{
343 		.id = QCA6490_V1,
344 		.subid = 0x0,
345 		.name = "QCA6490_V1",
346 	},
347 	{
348 		.id = WCN3990_v1,
349 		.subid = 0x0,
350 		.name = "WCN3990_V1",
351 	},
352 	{
353 		.id = WCN3990_v2,
354 		.subid = 0x0,
355 		.name = "WCN3990_V2",
356 	},
357 	{
358 		.id = WCN3990_v2_1,
359 		.subid = 0x0,
360 		.name = "WCN3990_V2.1",
361 	},
362 	{
363 		.id = WCN3998,
364 		.subid = 0x0,
365 		.name = "WCN3998",
366 	},
367 	{
368 		.id = QCA9379_REV1_VERSION,
369 		.subid = 0xC,
370 		.name = "QCA9379_REV1",
371 	},
372 	{
373 		.id = QCA9379_REV1_VERSION,
374 		.subid = 0xD,
375 		.name = "QCA9379_REV1_1",
376 	},
377 	{
378 		.id = MANGO_V1,
379 		.subid = 0xF,
380 		.name = "MANGO_V1",
381 	},
382 	{
383 		.id = PEACH_V1,
384 		.subid = 0,
385 		.name = "PEACH_V1",
386 	},
387 
388 	{
389 		.id = KIWI_V1,
390 		.subid = 0,
391 		.name = "KIWI_V1",
392 	},
393 	{
394 		.id = KIWI_V2,
395 		.subid = 0,
396 		.name = "KIWI_V2",
397 	},
398 	{
399 		.id = WCN6750_V1,
400 		.subid = 0,
401 		.name = "WCN6750_V1",
402 	},
403 	{
404 		.id = WCN6750_V2,
405 		.subid = 0,
406 		.name = "WCN6750_V2",
407 	},
408 	{
409 		.id = WCN6450_V1,
410 		.subid = 0,
411 		.name = "WCN6450_V1",
412 	},
413 	{
414 		.id = QCA6490_v2_1,
415 		.subid = 0,
416 		.name = "QCA6490",
417 	},
418 	{
419 		.id = QCA6490_v2,
420 		.subid = 0,
421 		.name = "QCA6490",
422 	},
423 	{
424 		.id = WCN3990_TALOS,
425 		.subid = 0,
426 		.name = "WCN3990",
427 	},
428 	{
429 		.id = WCN3990_MOOREA,
430 		.subid = 0,
431 		.name = "WCN3990",
432 	},
433 	{
434 		.id = WCN3990_SAIPAN,
435 		.subid = 0,
436 		.name = "WCN3990",
437 	},
438 	{
439 		.id = WCN3990_RENNELL,
440 		.subid = 0,
441 		.name = "WCN3990",
442 	},
443 	{
444 		.id = WCN3990_BITRA,
445 		.subid = 0,
446 		.name = "WCN3990",
447 	},
448 	{
449 		.id = WCN3990_DIVAR,
450 		.subid = 0,
451 		.name = "WCN3990",
452 	},
453 	{
454 		.id = WCN3990_ATHERTON,
455 		.subid = 0,
456 		.name = "WCN3990",
457 	},
458 	{
459 		.id = WCN3990_STRAIT,
460 		.subid = 0,
461 		.name = "WCN3990",
462 	},
463 	{
464 		.id = WCN3990_NETRANI,
465 		.subid = 0,
466 		.name = "WCN3990",
467 	},
468 	{
469 		.id = WCN3990_CLARENCE,
470 		.subid = 0,
471 		.name = "WCN3990",
472 	}
473 };
474 
475 /**
476  * hif_get_hw_name(): get a human readable name for the hardware
477  * @info: Target Info
478  *
479  * Return: human readable name for the underlying wifi hardware.
480  */
481 static const char *hif_get_hw_name(struct hif_target_info *info)
482 {
483 	int i;
484 
485 	hif_debug("target version = %d, target revision = %d",
486 		  info->target_version,
487 		  info->target_revision);
488 
489 	if (info->hw_name)
490 		return info->hw_name;
491 
492 	for (i = 0; i < ARRAY_SIZE(qwlan_hw_list); i++) {
493 		if (info->target_version == qwlan_hw_list[i].id &&
494 		    info->target_revision == qwlan_hw_list[i].subid) {
495 			return qwlan_hw_list[i].name;
496 		}
497 	}
498 
499 	info->hw_name = qdf_mem_malloc(64);
500 	if (!info->hw_name)
501 		return "Unknown Device (nomem)";
502 
503 	i = qdf_snprint(info->hw_name, 64, "HW_VERSION=%x.",
504 			info->target_version);
505 	if (i < 0)
506 		return "Unknown Device (snprintf failure)";
507 	else
508 		return info->hw_name;
509 }
510 
511 /**
512  * hif_get_hw_info(): hif_get_hw_info
513  * @scn: scn
514  * @version: version
515  * @revision: revision
516  * @target_name: target name
517  *
518  * Return: n/a
519  */
520 void hif_get_hw_info(struct hif_opaque_softc *scn, u32 *version, u32 *revision,
521 			const char **target_name)
522 {
523 	struct hif_target_info *info = hif_get_target_info_handle(scn);
524 	struct hif_softc *sc = HIF_GET_SOFTC(scn);
525 
526 	if (sc->bus_type == QDF_BUS_TYPE_USB)
527 		hif_usb_get_hw_info(sc);
528 
529 	*version = info->target_version;
530 	*revision = info->target_revision;
531 	*target_name = hif_get_hw_name(info);
532 }
533 
534 /**
535  * hif_get_dev_ba(): API to get device base address.
536  * @hif_handle: hif handle
537  *
538  * Return: device base address
539  */
540 void *hif_get_dev_ba(struct hif_opaque_softc *hif_handle)
541 {
542 	struct hif_softc *scn = (struct hif_softc *)hif_handle;
543 
544 	return scn->mem;
545 }
546 qdf_export_symbol(hif_get_dev_ba);
547 
548 /**
549  * hif_get_dev_ba_ce(): API to get device ce base address.
550  * @hif_handle: hif handle
551  *
552  * Return: dev mem base address for CE
553  */
554 void *hif_get_dev_ba_ce(struct hif_opaque_softc *hif_handle)
555 {
556 	struct hif_softc *scn = (struct hif_softc *)hif_handle;
557 
558 	return scn->mem_ce;
559 }
560 
561 qdf_export_symbol(hif_get_dev_ba_ce);
562 
563 /**
564  * hif_get_dev_ba_pmm(): API to get device pmm base address.
565  * @hif_handle: scn
566  *
567  * Return: dev mem base address for PMM
568  */
569 
570 void *hif_get_dev_ba_pmm(struct hif_opaque_softc *hif_handle)
571 {
572 	struct hif_softc *scn = (struct hif_softc *)hif_handle;
573 
574 	return scn->mem_pmm_base;
575 }
576 
577 qdf_export_symbol(hif_get_dev_ba_pmm);
578 
579 uint32_t hif_get_soc_version(struct hif_opaque_softc *hif_handle)
580 {
581 	struct hif_softc *scn = (struct hif_softc *)hif_handle;
582 
583 	return scn->target_info.soc_version;
584 }
585 
586 qdf_export_symbol(hif_get_soc_version);
587 
588 /**
589  * hif_get_dev_ba_cmem(): API to get device ce base address.
590  * @hif_handle: hif handle
591  *
592  * Return: dev mem base address for CMEM
593  */
594 void *hif_get_dev_ba_cmem(struct hif_opaque_softc *hif_handle)
595 {
596 	struct hif_softc *scn = (struct hif_softc *)hif_handle;
597 
598 	return scn->mem_cmem;
599 }
600 
601 qdf_export_symbol(hif_get_dev_ba_cmem);
602 
603 #ifdef FEATURE_RUNTIME_PM
604 void hif_runtime_prevent_linkdown(struct hif_softc *scn, bool is_get)
605 {
606 	if (is_get)
607 		qdf_runtime_pm_prevent_suspend(&scn->prevent_linkdown_lock);
608 	else
609 		qdf_runtime_pm_allow_suspend(&scn->prevent_linkdown_lock);
610 }
611 
612 static inline
613 void hif_rtpm_lock_init(struct hif_softc *scn)
614 {
615 	qdf_runtime_lock_init(&scn->prevent_linkdown_lock);
616 }
617 
618 static inline
619 void hif_rtpm_lock_deinit(struct hif_softc *scn)
620 {
621 	qdf_runtime_lock_deinit(&scn->prevent_linkdown_lock);
622 }
623 #else
624 static inline
625 void hif_rtpm_lock_init(struct hif_softc *scn)
626 {
627 }
628 
629 static inline
630 void hif_rtpm_lock_deinit(struct hif_softc *scn)
631 {
632 }
633 #endif
634 
635 #ifdef WLAN_CE_INTERRUPT_THRESHOLD_CONFIG
636 /**
637  * hif_get_interrupt_threshold_cfg_from_psoc() - Retrieve ini cfg from psoc
638  * @scn: hif context
639  * @psoc: psoc objmgr handle
640  *
641  * Return: None
642  */
643 static inline
644 void hif_get_interrupt_threshold_cfg_from_psoc(struct hif_softc *scn,
645 					       struct wlan_objmgr_psoc *psoc)
646 {
647 	if (psoc) {
648 		scn->ini_cfg.ce_status_ring_timer_threshold =
649 			cfg_get(psoc,
650 				CFG_CE_STATUS_RING_TIMER_THRESHOLD);
651 		scn->ini_cfg.ce_status_ring_batch_count_threshold =
652 			cfg_get(psoc,
653 				CFG_CE_STATUS_RING_BATCH_COUNT_THRESHOLD);
654 	}
655 }
656 #else
657 static inline
658 void hif_get_interrupt_threshold_cfg_from_psoc(struct hif_softc *scn,
659 					       struct wlan_objmgr_psoc *psoc)
660 {
661 }
662 #endif /* WLAN_CE_INTERRUPT_THRESHOLD_CONFIG */
663 
664 /**
665  * hif_get_cfg_from_psoc() - Retrieve ini cfg from psoc
666  * @scn: hif context
667  * @psoc: psoc objmgr handle
668  *
669  * Return: None
670  */
671 static inline
672 void hif_get_cfg_from_psoc(struct hif_softc *scn,
673 			   struct wlan_objmgr_psoc *psoc)
674 {
675 	if (psoc) {
676 		scn->ini_cfg.disable_wake_irq =
677 			cfg_get(psoc, CFG_DISABLE_WAKE_IRQ);
678 		/**
679 		 * Wake IRQ can't share the same IRQ with the copy engines
680 		 * In one MSI mode, we don't know whether wake IRQ is triggered
681 		 * or not in wake IRQ handler. known issue CR 2055359
682 		 * If you want to support Wake IRQ. Please allocate at least
683 		 * 2 MSI vector. The first is for wake IRQ while the others
684 		 * share the second vector
685 		 */
686 		if (pld_is_one_msi(scn->qdf_dev->dev)) {
687 			hif_debug("Disable wake IRQ once it is one MSI mode");
688 			scn->ini_cfg.disable_wake_irq = true;
689 		}
690 		hif_get_interrupt_threshold_cfg_from_psoc(scn, psoc);
691 	}
692 }
693 
694 #if defined(HIF_CE_LOG_INFO) || defined(HIF_BUS_LOG_INFO)
695 /**
696  * hif_recovery_notifier_cb - Recovery notifier callback to log
697  *  hang event data
698  * @block: notifier block
699  * @state: state
700  * @data: notifier data
701  *
702  * Return: status
703  */
704 static
705 int hif_recovery_notifier_cb(struct notifier_block *block, unsigned long state,
706 			     void *data)
707 {
708 	struct qdf_notifer_data *notif_data = data;
709 	qdf_notif_block *notif_block;
710 	struct hif_softc *hif_handle;
711 	bool bus_id_invalid;
712 
713 	if (!data || !block)
714 		return -EINVAL;
715 
716 	notif_block = qdf_container_of(block, qdf_notif_block, notif_block);
717 
718 	hif_handle = notif_block->priv_data;
719 	if (!hif_handle)
720 		return -EINVAL;
721 
722 	bus_id_invalid = hif_log_bus_info(hif_handle, notif_data->hang_data,
723 					  &notif_data->offset);
724 	if (bus_id_invalid)
725 		return NOTIFY_STOP_MASK;
726 
727 	hif_log_ce_info(hif_handle, notif_data->hang_data,
728 			&notif_data->offset);
729 
730 	return 0;
731 }
732 
733 /**
734  * hif_register_recovery_notifier - Register hif recovery notifier
735  * @hif_handle: hif handle
736  *
737  * Return: status
738  */
739 static
740 QDF_STATUS hif_register_recovery_notifier(struct hif_softc *hif_handle)
741 {
742 	qdf_notif_block *hif_notifier;
743 
744 	if (!hif_handle)
745 		return QDF_STATUS_E_FAILURE;
746 
747 	hif_notifier = &hif_handle->hif_recovery_notifier;
748 
749 	hif_notifier->notif_block.notifier_call = hif_recovery_notifier_cb;
750 	hif_notifier->priv_data = hif_handle;
751 	return qdf_hang_event_register_notifier(hif_notifier);
752 }
753 
754 /**
755  * hif_unregister_recovery_notifier - Un-register hif recovery notifier
756  * @hif_handle: hif handle
757  *
758  * Return: status
759  */
760 static
761 QDF_STATUS hif_unregister_recovery_notifier(struct hif_softc *hif_handle)
762 {
763 	qdf_notif_block *hif_notifier = &hif_handle->hif_recovery_notifier;
764 
765 	return qdf_hang_event_unregister_notifier(hif_notifier);
766 }
767 #else
768 static inline
769 QDF_STATUS hif_register_recovery_notifier(struct hif_softc *hif_handle)
770 {
771 	return QDF_STATUS_SUCCESS;
772 }
773 
774 static inline
775 QDF_STATUS hif_unregister_recovery_notifier(struct hif_softc *hif_handle)
776 {
777 	return QDF_STATUS_SUCCESS;
778 }
779 #endif
780 
781 #if defined(HIF_CPU_PERF_AFFINE_MASK) || \
782 	defined(FEATURE_ENABLE_CE_DP_IRQ_AFFINE)
783 /**
784  * __hif_cpu_hotplug_notify() - CPU hotplug event handler
785  * @context: HIF context
786  * @cpu: CPU Id of the CPU generating the event
787  * @cpu_up: true if the CPU is online
788  *
789  * Return: None
790  */
791 static void __hif_cpu_hotplug_notify(void *context,
792 				     uint32_t cpu, bool cpu_up)
793 {
794 	struct hif_softc *scn = context;
795 
796 	if (!scn)
797 		return;
798 	if (hif_is_driver_unloading(scn) || hif_is_recovery_in_progress(scn))
799 		return;
800 
801 	if (cpu_up) {
802 		hif_config_irq_set_perf_affinity_hint(GET_HIF_OPAQUE_HDL(scn));
803 		hif_debug("Setting affinity for online CPU: %d", cpu);
804 	} else {
805 		hif_debug("Skip setting affinity for offline CPU: %d", cpu);
806 	}
807 }
808 
809 /**
810  * hif_cpu_hotplug_notify - cpu core up/down notification
811  * handler
812  * @context: HIF context
813  * @cpu: CPU generating the event
814  * @cpu_up: true if the CPU is online
815  *
816  * Return: None
817  */
818 static void hif_cpu_hotplug_notify(void *context, uint32_t cpu, bool cpu_up)
819 {
820 	struct qdf_op_sync *op_sync;
821 
822 	if (qdf_op_protect(&op_sync))
823 		return;
824 
825 	__hif_cpu_hotplug_notify(context, cpu, cpu_up);
826 
827 	qdf_op_unprotect(op_sync);
828 }
829 
830 static void hif_cpu_online_cb(void *context, uint32_t cpu)
831 {
832 	hif_cpu_hotplug_notify(context, cpu, true);
833 }
834 
835 static void hif_cpu_before_offline_cb(void *context, uint32_t cpu)
836 {
837 	hif_cpu_hotplug_notify(context, cpu, false);
838 }
839 
840 static void hif_cpuhp_register(struct hif_softc *scn)
841 {
842 	if (!scn) {
843 		hif_info_high("cannot register hotplug notifiers");
844 		return;
845 	}
846 	qdf_cpuhp_register(&scn->cpuhp_event_handle,
847 			   scn,
848 			   hif_cpu_online_cb,
849 			   hif_cpu_before_offline_cb);
850 }
851 
852 static void hif_cpuhp_unregister(struct hif_softc *scn)
853 {
854 	if (!scn) {
855 		hif_info_high("cannot unregister hotplug notifiers");
856 		return;
857 	}
858 	qdf_cpuhp_unregister(&scn->cpuhp_event_handle);
859 }
860 
861 #else
862 static void hif_cpuhp_register(struct hif_softc *scn)
863 {
864 }
865 
866 static void hif_cpuhp_unregister(struct hif_softc *scn)
867 {
868 }
869 #endif /* ifdef HIF_CPU_PERF_AFFINE_MASK */
870 
871 #ifdef HIF_DETECTION_LATENCY_ENABLE
872 /*
873  * Bitmask to control enablement of latency detection for the tasklets,
874  * bit-X represents for tasklet of WLAN_CE_X.
875  */
876 #ifndef DETECTION_LATENCY_TASKLET_MASK
877 #define DETECTION_LATENCY_TASKLET_MASK (BIT(2) | BIT(7))
878 #endif
879 
880 static inline int
881 __hif_tasklet_latency(struct hif_softc *scn, bool from_timer, int idx)
882 {
883 	qdf_time_t sched_time =
884 		scn->latency_detect.tasklet_info[idx].sched_time;
885 	qdf_time_t exec_time =
886 		scn->latency_detect.tasklet_info[idx].exec_time;
887 	qdf_time_t curr_time = qdf_system_ticks();
888 	uint32_t threshold = scn->latency_detect.threshold;
889 	qdf_time_t expect_exec_time =
890 		sched_time + qdf_system_msecs_to_ticks(threshold);
891 
892 	/* 2 kinds of check here.
893 	 * from_timer==true:  check if tasklet stall
894 	 * from_timer==false: check tasklet execute comes late
895 	 */
896 	if (from_timer ?
897 	    (qdf_system_time_after(sched_time, exec_time) &&
898 	     qdf_system_time_after(curr_time, expect_exec_time)) :
899 	    qdf_system_time_after(exec_time, expect_exec_time)) {
900 		hif_err("tasklet[%d] latency detected: from_timer %d, curr_time %lu, sched_time %lu, exec_time %lu, threshold %ums, timeout %ums, cpu_id %d, called: %ps",
901 			idx, from_timer, curr_time, sched_time,
902 			exec_time, threshold,
903 			scn->latency_detect.timeout,
904 			qdf_get_cpu(), (void *)_RET_IP_);
905 		qdf_trigger_self_recovery(NULL,
906 					  QDF_TASKLET_CREDIT_LATENCY_DETECT);
907 		return -ETIMEDOUT;
908 	}
909 
910 	return 0;
911 }
912 
913 /**
914  * hif_tasklet_latency_detect_enabled() - check whether latency detect
915  * is enabled for the tasklet which is specified by idx
916  * @scn: HIF opaque context
917  * @idx: CE id
918  *
919  * Return: true if latency detect is enabled for the specified tasklet,
920  * false otherwise.
921  */
922 static inline bool
923 hif_tasklet_latency_detect_enabled(struct hif_softc *scn, int idx)
924 {
925 	if (QDF_GLOBAL_MISSION_MODE != hif_get_conparam(scn))
926 		return false;
927 
928 	if (!scn->latency_detect.enable_detection)
929 		return false;
930 
931 	if (idx < 0 || idx >= HIF_TASKLET_IN_MONITOR ||
932 	    !qdf_test_bit(idx, scn->latency_detect.tasklet_bmap))
933 		return false;
934 
935 	return true;
936 }
937 
938 void hif_tasklet_latency_record_exec(struct hif_softc *scn, int idx)
939 {
940 	if (!hif_tasklet_latency_detect_enabled(scn, idx))
941 		return;
942 
943 	/*
944 	 * hif_set_enable_detection(true) might come between
945 	 * hif_tasklet_latency_record_sched() and
946 	 * hif_tasklet_latency_record_exec() during wlan startup, then the
947 	 * sched_time is 0 but exec_time is not, and hit the timeout case in
948 	 * __hif_tasklet_latency().
949 	 * To avoid such issue, skip exec_time recording if sched_time has not
950 	 * been recorded.
951 	 */
952 	if (!scn->latency_detect.tasklet_info[idx].sched_time)
953 		return;
954 
955 	scn->latency_detect.tasklet_info[idx].exec_time = qdf_system_ticks();
956 	__hif_tasklet_latency(scn, false, idx);
957 }
958 
959 void hif_tasklet_latency_record_sched(struct hif_softc *scn, int idx)
960 {
961 	if (!hif_tasklet_latency_detect_enabled(scn, idx))
962 		return;
963 
964 	scn->latency_detect.tasklet_info[idx].sched_cpuid = qdf_get_cpu();
965 	scn->latency_detect.tasklet_info[idx].sched_time = qdf_system_ticks();
966 }
967 
968 static inline void hif_credit_latency(struct hif_softc *scn, bool from_timer)
969 {
970 	qdf_time_t credit_request_time =
971 		scn->latency_detect.credit_request_time;
972 	qdf_time_t credit_report_time = scn->latency_detect.credit_report_time;
973 	qdf_time_t curr_jiffies = qdf_system_ticks();
974 	uint32_t threshold = scn->latency_detect.threshold;
975 	int cpu_id = qdf_get_cpu();
976 
977 	/* 2 kinds of check here.
978 	 * from_timer==true:  check if credit report stall
979 	 * from_timer==false: check credit report comes late
980 	 */
981 
982 	if ((from_timer ?
983 	     qdf_system_time_after(credit_request_time, credit_report_time) :
984 	     qdf_system_time_after(credit_report_time, credit_request_time)) &&
985 	    qdf_system_time_after(curr_jiffies,
986 				  credit_request_time +
987 				  qdf_system_msecs_to_ticks(threshold))) {
988 		hif_err("credit report latency: from timer %d, curr_jiffies %lu, credit_request_time %lu, credit_report_time %lu, threshold %ums, timeout %ums, cpu_id %d, called: %ps",
989 			from_timer, curr_jiffies, credit_request_time,
990 			credit_report_time, threshold,
991 			scn->latency_detect.timeout,
992 			cpu_id, (void *)_RET_IP_);
993 		goto latency;
994 	}
995 	return;
996 
997 latency:
998 	qdf_trigger_self_recovery(NULL, QDF_TASKLET_CREDIT_LATENCY_DETECT);
999 }
1000 
1001 static inline void hif_tasklet_latency(struct hif_softc *scn, bool from_timer)
1002 {
1003 	int i, ret;
1004 
1005 	for (i = 0; i < HIF_TASKLET_IN_MONITOR; i++) {
1006 		if (!qdf_test_bit(i, scn->latency_detect.tasklet_bmap))
1007 			continue;
1008 
1009 		ret = __hif_tasklet_latency(scn, from_timer, i);
1010 		if (ret)
1011 			return;
1012 	}
1013 }
1014 
1015 /**
1016  * hif_check_detection_latency(): to check if latency for tasklet/credit
1017  *
1018  * @scn: hif context
1019  * @from_timer: if called from timer handler
1020  * @bitmap_type: indicate if check tasklet or credit
1021  *
1022  * Return: none
1023  */
1024 void hif_check_detection_latency(struct hif_softc *scn,
1025 				 bool from_timer,
1026 				 uint32_t bitmap_type)
1027 {
1028 	if (QDF_GLOBAL_MISSION_MODE != hif_get_conparam(scn))
1029 		return;
1030 
1031 	if (!scn->latency_detect.enable_detection)
1032 		return;
1033 
1034 	if (bitmap_type & BIT(HIF_DETECT_TASKLET))
1035 		hif_tasklet_latency(scn, from_timer);
1036 
1037 	if (bitmap_type & BIT(HIF_DETECT_CREDIT))
1038 		hif_credit_latency(scn, from_timer);
1039 }
1040 
1041 static void hif_latency_detect_timeout_handler(void *arg)
1042 {
1043 	struct hif_softc *scn = (struct hif_softc *)arg;
1044 	int next_cpu, i;
1045 	qdf_cpu_mask cpu_mask = {0};
1046 	struct hif_latency_detect *detect = &scn->latency_detect;
1047 
1048 	hif_check_detection_latency(scn, true,
1049 				    BIT(HIF_DETECT_TASKLET) |
1050 				    BIT(HIF_DETECT_CREDIT));
1051 
1052 	/* it need to make sure timer start on a different cpu,
1053 	 * so it can detect the tasklet schedule stall, but there
1054 	 * is still chance that, after timer has been started, then
1055 	 * irq/tasklet happens on the same cpu, then tasklet will
1056 	 * execute before softirq timer, if this tasklet stall, the
1057 	 * timer can't detect it, we can accept this as a limitation,
1058 	 * if tasklet stall, anyway other place will detect it, just
1059 	 * a little later.
1060 	 */
1061 	qdf_cpumask_copy(&cpu_mask, (const qdf_cpu_mask *)cpu_active_mask);
1062 	for (i = 0; i < HIF_TASKLET_IN_MONITOR; i++) {
1063 		if (!qdf_test_bit(i, detect->tasklet_bmap))
1064 			continue;
1065 
1066 		qdf_cpumask_clear_cpu(detect->tasklet_info[i].sched_cpuid,
1067 				      &cpu_mask);
1068 	}
1069 
1070 	next_cpu = cpumask_first(&cpu_mask);
1071 	if (qdf_unlikely(next_cpu >= nr_cpu_ids)) {
1072 		hif_debug("start timer on local");
1073 		/* it doesn't found a available cpu, start on local cpu*/
1074 		qdf_timer_mod(&detect->timer, detect->timeout);
1075 	} else {
1076 		qdf_timer_start_on(&detect->timer, detect->timeout, next_cpu);
1077 	}
1078 }
1079 
1080 static void hif_latency_detect_timer_init(struct hif_softc *scn)
1081 {
1082 	scn->latency_detect.timeout =
1083 		DETECTION_TIMER_TIMEOUT;
1084 	scn->latency_detect.threshold =
1085 		DETECTION_LATENCY_THRESHOLD;
1086 
1087 	hif_info("timer timeout %u, latency threshold %u",
1088 		 scn->latency_detect.timeout,
1089 		 scn->latency_detect.threshold);
1090 
1091 	scn->latency_detect.is_timer_started = false;
1092 
1093 	qdf_timer_init(NULL,
1094 		       &scn->latency_detect.timer,
1095 		       &hif_latency_detect_timeout_handler,
1096 		       scn,
1097 		       QDF_TIMER_TYPE_SW_SPIN);
1098 }
1099 
1100 static void hif_latency_detect_timer_deinit(struct hif_softc *scn)
1101 {
1102 	hif_info("deinit timer");
1103 	qdf_timer_free(&scn->latency_detect.timer);
1104 }
1105 
1106 static void hif_latency_detect_init(struct hif_softc *scn)
1107 {
1108 	uint32_t tasklet_mask;
1109 	int i;
1110 
1111 	if (QDF_GLOBAL_MISSION_MODE != hif_get_conparam(scn))
1112 		return;
1113 
1114 	tasklet_mask = DETECTION_LATENCY_TASKLET_MASK;
1115 	hif_info("tasklet mask is 0x%x", tasklet_mask);
1116 	for (i = 0; i < HIF_TASKLET_IN_MONITOR; i++) {
1117 		if (BIT(i) & tasklet_mask)
1118 			qdf_set_bit(i, scn->latency_detect.tasklet_bmap);
1119 	}
1120 
1121 	hif_latency_detect_timer_init(scn);
1122 }
1123 
1124 static void hif_latency_detect_deinit(struct hif_softc *scn)
1125 {
1126 	int i;
1127 
1128 	if (QDF_GLOBAL_MISSION_MODE != hif_get_conparam(scn))
1129 		return;
1130 
1131 	hif_latency_detect_timer_deinit(scn);
1132 	for (i = 0; i < HIF_TASKLET_IN_MONITOR; i++)
1133 		qdf_clear_bit(i, scn->latency_detect.tasklet_bmap);
1134 }
1135 
1136 void hif_latency_detect_timer_start(struct hif_opaque_softc *hif_ctx)
1137 {
1138 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1139 
1140 	if (QDF_GLOBAL_MISSION_MODE != hif_get_conparam(scn))
1141 		return;
1142 
1143 	hif_debug_rl("start timer");
1144 	if (scn->latency_detect.is_timer_started) {
1145 		hif_info("timer has been started");
1146 		return;
1147 	}
1148 
1149 	qdf_timer_start(&scn->latency_detect.timer,
1150 			scn->latency_detect.timeout);
1151 	scn->latency_detect.is_timer_started = true;
1152 }
1153 
1154 void hif_latency_detect_timer_stop(struct hif_opaque_softc *hif_ctx)
1155 {
1156 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1157 
1158 	if (QDF_GLOBAL_MISSION_MODE != hif_get_conparam(scn))
1159 		return;
1160 
1161 	hif_debug_rl("stop timer");
1162 
1163 	qdf_timer_sync_cancel(&scn->latency_detect.timer);
1164 	scn->latency_detect.is_timer_started = false;
1165 }
1166 
1167 void hif_latency_detect_credit_record_time(
1168 	enum hif_credit_exchange_type type,
1169 	struct hif_opaque_softc *hif_ctx)
1170 {
1171 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1172 
1173 	if (!scn) {
1174 		hif_err("Could not do runtime put, scn is null");
1175 		return;
1176 	}
1177 
1178 	if (QDF_GLOBAL_MISSION_MODE != hif_get_conparam(scn))
1179 		return;
1180 
1181 	if (HIF_REQUEST_CREDIT == type)
1182 		scn->latency_detect.credit_request_time = qdf_system_ticks();
1183 	else if (HIF_PROCESS_CREDIT_REPORT == type)
1184 		scn->latency_detect.credit_report_time = qdf_system_ticks();
1185 
1186 	hif_check_detection_latency(scn, false, BIT(HIF_DETECT_CREDIT));
1187 }
1188 
1189 void hif_set_enable_detection(struct hif_opaque_softc *hif_ctx, bool value)
1190 {
1191 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1192 
1193 	if (!scn) {
1194 		hif_err("Could not do runtime put, scn is null");
1195 		return;
1196 	}
1197 
1198 	if (QDF_GLOBAL_MISSION_MODE != hif_get_conparam(scn))
1199 		return;
1200 
1201 	scn->latency_detect.enable_detection = value;
1202 }
1203 #else
1204 static inline void hif_latency_detect_init(struct hif_softc *scn)
1205 {}
1206 
1207 static inline void hif_latency_detect_deinit(struct hif_softc *scn)
1208 {}
1209 #endif
1210 
1211 #ifdef WLAN_FEATURE_AFFINITY_MGR
1212 #define AFFINITY_THRESHOLD 5000000
1213 static inline void
1214 hif_affinity_mgr_init(struct hif_softc *scn, struct wlan_objmgr_psoc *psoc)
1215 {
1216 	unsigned int cpus;
1217 	qdf_cpu_mask allowed_mask = {0};
1218 
1219 	scn->affinity_mgr_supported =
1220 		(cfg_get(psoc, CFG_IRQ_AFFINE_AUDIO_USE_CASE) &&
1221 		qdf_walt_get_cpus_taken_supported());
1222 
1223 	hif_info("Affinity Manager supported: %d", scn->affinity_mgr_supported);
1224 
1225 	if (!scn->affinity_mgr_supported)
1226 		return;
1227 
1228 	scn->time_threshold = AFFINITY_THRESHOLD;
1229 	qdf_for_each_possible_cpu(cpus)
1230 		if (qdf_topology_physical_package_id(cpus) ==
1231 			CPU_CLUSTER_TYPE_LITTLE)
1232 			qdf_cpumask_set_cpu(cpus, &allowed_mask);
1233 	qdf_cpumask_copy(&scn->allowed_mask, &allowed_mask);
1234 }
1235 #else
1236 static inline void
1237 hif_affinity_mgr_init(struct hif_softc *scn, struct wlan_objmgr_psoc *psoc)
1238 {
1239 }
1240 #endif
1241 
1242 #ifdef FEATURE_DIRECT_LINK
1243 /**
1244  * hif_init_direct_link_rcv_pipe_num(): Initialize the direct link receive
1245  *  pipe number
1246  * @scn: hif context
1247  *
1248  * Return: None
1249  */
1250 static inline
1251 void hif_init_direct_link_rcv_pipe_num(struct hif_softc *scn)
1252 {
1253 	scn->dl_recv_pipe_num = INVALID_PIPE_NO;
1254 }
1255 #else
1256 static inline
1257 void hif_init_direct_link_rcv_pipe_num(struct hif_softc *scn)
1258 {
1259 }
1260 #endif
1261 
1262 struct hif_opaque_softc *hif_open(qdf_device_t qdf_ctx,
1263 				  uint32_t mode,
1264 				  enum qdf_bus_type bus_type,
1265 				  struct hif_driver_state_callbacks *cbk,
1266 				  struct wlan_objmgr_psoc *psoc)
1267 {
1268 	struct hif_softc *scn;
1269 	QDF_STATUS status = QDF_STATUS_SUCCESS;
1270 	int bus_context_size = hif_bus_get_context_size(bus_type);
1271 
1272 	if (bus_context_size == 0) {
1273 		hif_err("context size 0 not allowed");
1274 		return NULL;
1275 	}
1276 
1277 	scn = (struct hif_softc *)qdf_mem_malloc(bus_context_size);
1278 	if (!scn)
1279 		return GET_HIF_OPAQUE_HDL(scn);
1280 
1281 	scn->qdf_dev = qdf_ctx;
1282 	scn->hif_con_param = mode;
1283 	qdf_atomic_init(&scn->active_tasklet_cnt);
1284 
1285 	qdf_atomic_init(&scn->active_grp_tasklet_cnt);
1286 	qdf_atomic_init(&scn->link_suspended);
1287 	qdf_atomic_init(&scn->tasklet_from_intr);
1288 	hif_system_pm_set_state_on(GET_HIF_OPAQUE_HDL(scn));
1289 	qdf_mem_copy(&scn->callbacks, cbk,
1290 		     sizeof(struct hif_driver_state_callbacks));
1291 	scn->bus_type  = bus_type;
1292 
1293 	hif_allow_ep_vote_access(GET_HIF_OPAQUE_HDL(scn));
1294 	hif_get_cfg_from_psoc(scn, psoc);
1295 
1296 	hif_set_event_hist_mask(GET_HIF_OPAQUE_HDL(scn));
1297 	status = hif_bus_open(scn, bus_type);
1298 	if (status != QDF_STATUS_SUCCESS) {
1299 		hif_err("hif_bus_open error = %d, bus_type = %d",
1300 			status, bus_type);
1301 		qdf_mem_free(scn);
1302 		scn = NULL;
1303 		goto out;
1304 	}
1305 
1306 	hif_rtpm_lock_init(scn);
1307 
1308 	hif_cpuhp_register(scn);
1309 	hif_latency_detect_init(scn);
1310 	hif_affinity_mgr_init(scn, psoc);
1311 	hif_init_direct_link_rcv_pipe_num(scn);
1312 	hif_ce_desc_history_log_register(scn);
1313 	hif_desc_history_log_register();
1314 	qdf_ssr_driver_dump_register_region("hif", scn, sizeof(*scn));
1315 
1316 out:
1317 	return GET_HIF_OPAQUE_HDL(scn);
1318 }
1319 
1320 #ifdef ADRASTEA_RRI_ON_DDR
1321 /**
1322  * hif_uninit_rri_on_ddr(): free consistent memory allocated for rri
1323  * @scn: hif context
1324  *
1325  * Return: none
1326  */
1327 void hif_uninit_rri_on_ddr(struct hif_softc *scn)
1328 {
1329 	if (scn->vaddr_rri_on_ddr)
1330 		qdf_mem_free_consistent(scn->qdf_dev, scn->qdf_dev->dev,
1331 					RRI_ON_DDR_MEM_SIZE,
1332 					scn->vaddr_rri_on_ddr,
1333 					scn->paddr_rri_on_ddr, 0);
1334 	scn->vaddr_rri_on_ddr = NULL;
1335 }
1336 #endif
1337 
1338 /**
1339  * hif_close(): hif_close
1340  * @hif_ctx: hif_ctx
1341  *
1342  * Return: n/a
1343  */
1344 void hif_close(struct hif_opaque_softc *hif_ctx)
1345 {
1346 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1347 
1348 	if (!scn) {
1349 		hif_err("hif_opaque_softc is NULL");
1350 		return;
1351 	}
1352 
1353 	qdf_ssr_driver_dump_unregister_region("hif");
1354 	hif_desc_history_log_unregister();
1355 	hif_ce_desc_history_log_unregister();
1356 	hif_latency_detect_deinit(scn);
1357 
1358 	if (scn->athdiag_procfs_inited) {
1359 		athdiag_procfs_remove();
1360 		scn->athdiag_procfs_inited = false;
1361 	}
1362 
1363 	if (scn->target_info.hw_name) {
1364 		char *hw_name = scn->target_info.hw_name;
1365 
1366 		scn->target_info.hw_name = "ErrUnloading";
1367 		qdf_mem_free(hw_name);
1368 	}
1369 
1370 	hif_uninit_rri_on_ddr(scn);
1371 	hif_cleanup_static_buf_to_target(scn);
1372 	hif_cpuhp_unregister(scn);
1373 	hif_rtpm_lock_deinit(scn);
1374 
1375 	hif_bus_close(scn);
1376 
1377 	qdf_mem_free(scn);
1378 }
1379 
1380 /**
1381  * hif_get_num_active_grp_tasklets() - get the number of active
1382  *		datapath group tasklets pending to be completed.
1383  * @scn: HIF context
1384  *
1385  * Returns: the number of datapath group tasklets which are active
1386  */
1387 static inline int hif_get_num_active_grp_tasklets(struct hif_softc *scn)
1388 {
1389 	return qdf_atomic_read(&scn->active_grp_tasklet_cnt);
1390 }
1391 
1392 #if (defined(QCA_WIFI_QCA8074) || defined(QCA_WIFI_QCA6018) || \
1393 	defined(QCA_WIFI_QCA6290) || defined(QCA_WIFI_QCA6390) || \
1394 	defined(QCA_WIFI_QCN9000) || defined(QCA_WIFI_QCA6490) || \
1395 	defined(QCA_WIFI_QCA6750) || defined(QCA_WIFI_QCA5018) || \
1396 	defined(QCA_WIFI_KIWI) || defined(QCA_WIFI_QCN9224) || \
1397 	defined(QCA_WIFI_QCN6432) || \
1398 	defined(QCA_WIFI_QCA9574)) || defined(QCA_WIFI_QCA5332)
1399 /**
1400  * hif_get_num_pending_work() - get the number of entries in
1401  *		the workqueue pending to be completed.
1402  * @scn: HIF context
1403  *
1404  * Returns: the number of tasklets which are active
1405  */
1406 static inline int hif_get_num_pending_work(struct hif_softc *scn)
1407 {
1408 	return hal_get_reg_write_pending_work(scn->hal_soc);
1409 }
1410 #elif defined(FEATURE_HIF_DELAYED_REG_WRITE)
1411 static inline int hif_get_num_pending_work(struct hif_softc *scn)
1412 {
1413 	return qdf_atomic_read(&scn->active_work_cnt);
1414 }
1415 #else
1416 
1417 static inline int hif_get_num_pending_work(struct hif_softc *scn)
1418 {
1419 	return 0;
1420 }
1421 #endif
1422 
1423 QDF_STATUS hif_try_complete_tasks(struct hif_softc *scn)
1424 {
1425 	uint32_t task_drain_wait_cnt = 0;
1426 	int tasklet = 0, grp_tasklet = 0, work = 0;
1427 
1428 	while ((tasklet = hif_get_num_active_tasklets(scn)) ||
1429 	       (grp_tasklet = hif_get_num_active_grp_tasklets(scn)) ||
1430 	       (work = hif_get_num_pending_work(scn))) {
1431 		if (++task_drain_wait_cnt > HIF_TASK_DRAIN_WAIT_CNT) {
1432 			hif_err("pending tasklets %d grp tasklets %d work %d",
1433 				tasklet, grp_tasklet, work);
1434 			QDF_DEBUG_PANIC("Complete tasks takes more than %u ms: tasklets %d grp tasklets %d work %d",
1435 					HIF_TASK_DRAIN_WAIT_CNT * 10,
1436 					tasklet, grp_tasklet, work);
1437 			return QDF_STATUS_E_FAULT;
1438 		}
1439 		hif_info("waiting for tasklets %d grp tasklets %d work %d",
1440 			 tasklet, grp_tasklet, work);
1441 		msleep(10);
1442 	}
1443 
1444 	return QDF_STATUS_SUCCESS;
1445 }
1446 
1447 #ifdef HIF_HAL_REG_ACCESS_SUPPORT
1448 void hif_reg_window_write(struct hif_softc *scn, uint32_t offset,
1449 			  uint32_t value)
1450 {
1451 	hal_write32_mb(scn->hal_soc, offset, value);
1452 }
1453 
1454 uint32_t hif_reg_window_read(struct hif_softc *scn, uint32_t offset)
1455 {
1456 	return hal_read32_mb(scn->hal_soc, offset);
1457 }
1458 #endif
1459 
1460 #if defined(HIF_IPCI) && defined(FEATURE_HAL_DELAYED_REG_WRITE)
1461 QDF_STATUS hif_try_prevent_ep_vote_access(struct hif_opaque_softc *hif_ctx)
1462 {
1463 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1464 	uint32_t work_drain_wait_cnt = 0;
1465 	uint32_t wait_cnt = 0;
1466 	int work = 0;
1467 
1468 	qdf_atomic_set(&scn->dp_ep_vote_access,
1469 		       HIF_EP_VOTE_ACCESS_DISABLE);
1470 	qdf_atomic_set(&scn->ep_vote_access,
1471 		       HIF_EP_VOTE_ACCESS_DISABLE);
1472 
1473 	while ((work = hif_get_num_pending_work(scn))) {
1474 		if (++work_drain_wait_cnt > HIF_WORK_DRAIN_WAIT_CNT) {
1475 			qdf_atomic_set(&scn->dp_ep_vote_access,
1476 				       HIF_EP_VOTE_ACCESS_ENABLE);
1477 			qdf_atomic_set(&scn->ep_vote_access,
1478 				       HIF_EP_VOTE_ACCESS_ENABLE);
1479 			hif_err("timeout wait for pending work %d ", work);
1480 			return QDF_STATUS_E_FAULT;
1481 		}
1482 		qdf_sleep(10);
1483 	}
1484 
1485 	if (pld_is_pci_ep_awake(scn->qdf_dev->dev) == -ENOTSUPP)
1486 	return QDF_STATUS_SUCCESS;
1487 
1488 	while (pld_is_pci_ep_awake(scn->qdf_dev->dev)) {
1489 		if (++wait_cnt > HIF_EP_WAKE_RESET_WAIT_CNT) {
1490 			hif_err("Release EP vote is not proceed by Fw");
1491 			return QDF_STATUS_E_FAULT;
1492 		}
1493 		qdf_sleep(5);
1494 	}
1495 
1496 	return QDF_STATUS_SUCCESS;
1497 }
1498 
1499 void hif_set_ep_intermediate_vote_access(struct hif_opaque_softc *hif_ctx)
1500 {
1501 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1502 	uint8_t vote_access;
1503 
1504 	vote_access = qdf_atomic_read(&scn->ep_vote_access);
1505 
1506 	if (vote_access != HIF_EP_VOTE_ACCESS_DISABLE)
1507 		hif_info("EP vote changed from:%u to intermediate state",
1508 			 vote_access);
1509 
1510 	if (QDF_IS_STATUS_ERROR(hif_try_prevent_ep_vote_access(hif_ctx)))
1511 		QDF_BUG(0);
1512 
1513 	qdf_atomic_set(&scn->ep_vote_access,
1514 		       HIF_EP_VOTE_INTERMEDIATE_ACCESS);
1515 }
1516 
1517 void hif_allow_ep_vote_access(struct hif_opaque_softc *hif_ctx)
1518 {
1519 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1520 
1521 	qdf_atomic_set(&scn->dp_ep_vote_access,
1522 		       HIF_EP_VOTE_ACCESS_ENABLE);
1523 	qdf_atomic_set(&scn->ep_vote_access,
1524 		       HIF_EP_VOTE_ACCESS_ENABLE);
1525 }
1526 
1527 void hif_set_ep_vote_access(struct hif_opaque_softc *hif_ctx,
1528 			    uint8_t type, uint8_t access)
1529 {
1530 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1531 
1532 	if (type == HIF_EP_VOTE_DP_ACCESS)
1533 		qdf_atomic_set(&scn->dp_ep_vote_access, access);
1534 	else
1535 		qdf_atomic_set(&scn->ep_vote_access, access);
1536 }
1537 
1538 uint8_t hif_get_ep_vote_access(struct hif_opaque_softc *hif_ctx,
1539 			       uint8_t type)
1540 {
1541 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1542 
1543 	if (type == HIF_EP_VOTE_DP_ACCESS)
1544 		return qdf_atomic_read(&scn->dp_ep_vote_access);
1545 	else
1546 		return qdf_atomic_read(&scn->ep_vote_access);
1547 }
1548 #endif
1549 
1550 #ifdef FEATURE_HIF_DELAYED_REG_WRITE
1551 #ifdef MEMORY_DEBUG
1552 #define HIF_REG_WRITE_QUEUE_LEN 128
1553 #else
1554 #define HIF_REG_WRITE_QUEUE_LEN 32
1555 #endif
1556 
1557 /**
1558  * hif_print_reg_write_stats() - Print hif delayed reg write stats
1559  * @hif_ctx: hif opaque handle
1560  *
1561  * Return: None
1562  */
1563 void hif_print_reg_write_stats(struct hif_opaque_softc *hif_ctx)
1564 {
1565 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1566 	struct CE_state *ce_state;
1567 	uint32_t *hist;
1568 	int i;
1569 
1570 	hist = scn->wstats.sched_delay;
1571 	hif_debug("wstats: enq %u deq %u coal %u direct %u q_depth %u max_q %u sched-delay hist %u %u %u %u",
1572 		  qdf_atomic_read(&scn->wstats.enqueues),
1573 		  scn->wstats.dequeues,
1574 		  qdf_atomic_read(&scn->wstats.coalesces),
1575 		  qdf_atomic_read(&scn->wstats.direct),
1576 		  qdf_atomic_read(&scn->wstats.q_depth),
1577 		  scn->wstats.max_q_depth,
1578 		  hist[HIF_REG_WRITE_SCHED_DELAY_SUB_100us],
1579 		  hist[HIF_REG_WRITE_SCHED_DELAY_SUB_1000us],
1580 		  hist[HIF_REG_WRITE_SCHED_DELAY_SUB_5000us],
1581 		  hist[HIF_REG_WRITE_SCHED_DELAY_GT_5000us]);
1582 
1583 	for (i = 0; i < scn->ce_count; i++) {
1584 		ce_state = scn->ce_id_to_state[i];
1585 		if (!ce_state)
1586 			continue;
1587 
1588 		hif_debug("ce%d: enq %u deq %u coal %u direct %u",
1589 			  i, ce_state->wstats.enqueues,
1590 			  ce_state->wstats.dequeues,
1591 			  ce_state->wstats.coalesces,
1592 			  ce_state->wstats.direct);
1593 	}
1594 }
1595 
1596 /**
1597  * hif_is_reg_write_tput_level_high() - throughput level for delayed reg writes
1598  * @scn: hif_softc pointer
1599  *
1600  * Return: true if throughput is high, else false.
1601  */
1602 static inline bool hif_is_reg_write_tput_level_high(struct hif_softc *scn)
1603 {
1604 	int bw_level = hif_get_bandwidth_level(GET_HIF_OPAQUE_HDL(scn));
1605 
1606 	return (bw_level >= PLD_BUS_WIDTH_MEDIUM) ? true : false;
1607 }
1608 
1609 /**
1610  * hif_reg_write_fill_sched_delay_hist() - fill reg write delay histogram
1611  * @scn: hif_softc pointer
1612  * @delay_us: delay in us
1613  *
1614  * Return: None
1615  */
1616 static inline void hif_reg_write_fill_sched_delay_hist(struct hif_softc *scn,
1617 						       uint64_t delay_us)
1618 {
1619 	uint32_t *hist;
1620 
1621 	hist = scn->wstats.sched_delay;
1622 
1623 	if (delay_us < 100)
1624 		hist[HIF_REG_WRITE_SCHED_DELAY_SUB_100us]++;
1625 	else if (delay_us < 1000)
1626 		hist[HIF_REG_WRITE_SCHED_DELAY_SUB_1000us]++;
1627 	else if (delay_us < 5000)
1628 		hist[HIF_REG_WRITE_SCHED_DELAY_SUB_5000us]++;
1629 	else
1630 		hist[HIF_REG_WRITE_SCHED_DELAY_GT_5000us]++;
1631 }
1632 
1633 /**
1634  * hif_process_reg_write_q_elem() - process a register write queue element
1635  * @scn: hif_softc pointer
1636  * @q_elem: pointer to hal register write queue element
1637  *
1638  * Return: The value which was written to the address
1639  */
1640 static int32_t
1641 hif_process_reg_write_q_elem(struct hif_softc *scn,
1642 			     struct hif_reg_write_q_elem *q_elem)
1643 {
1644 	struct CE_state *ce_state = q_elem->ce_state;
1645 	uint32_t write_val = -1;
1646 
1647 	qdf_spin_lock_bh(&ce_state->ce_index_lock);
1648 
1649 	ce_state->reg_write_in_progress = false;
1650 	ce_state->wstats.dequeues++;
1651 
1652 	if (ce_state->src_ring) {
1653 		q_elem->dequeue_val = ce_state->src_ring->write_index;
1654 		hal_write32_mb(scn->hal_soc, ce_state->ce_wrt_idx_offset,
1655 			       ce_state->src_ring->write_index);
1656 		write_val = ce_state->src_ring->write_index;
1657 	} else if (ce_state->dest_ring) {
1658 		q_elem->dequeue_val = ce_state->dest_ring->write_index;
1659 		hal_write32_mb(scn->hal_soc, ce_state->ce_wrt_idx_offset,
1660 			       ce_state->dest_ring->write_index);
1661 		write_val = ce_state->dest_ring->write_index;
1662 	} else {
1663 		hif_debug("invalid reg write received");
1664 		qdf_assert(0);
1665 	}
1666 
1667 	q_elem->valid = 0;
1668 	ce_state->last_dequeue_time = q_elem->dequeue_time;
1669 
1670 	qdf_spin_unlock_bh(&ce_state->ce_index_lock);
1671 
1672 	return write_val;
1673 }
1674 
1675 /**
1676  * hif_reg_write_work() - Worker to process delayed writes
1677  * @arg: hif_softc pointer
1678  *
1679  * Return: None
1680  */
1681 static void hif_reg_write_work(void *arg)
1682 {
1683 	struct hif_softc *scn = arg;
1684 	struct hif_reg_write_q_elem *q_elem;
1685 	uint32_t offset;
1686 	uint64_t delta_us;
1687 	int32_t q_depth, write_val;
1688 	uint32_t num_processed = 0;
1689 	int32_t ring_id;
1690 
1691 	q_elem = &scn->reg_write_queue[scn->read_idx];
1692 	q_elem->work_scheduled_time = qdf_get_log_timestamp();
1693 	q_elem->cpu_id = qdf_get_cpu();
1694 
1695 	/* Make sure q_elem consistent in the memory for multi-cores */
1696 	qdf_rmb();
1697 	if (!q_elem->valid)
1698 		return;
1699 
1700 	q_depth = qdf_atomic_read(&scn->wstats.q_depth);
1701 	if (q_depth > scn->wstats.max_q_depth)
1702 		scn->wstats.max_q_depth =  q_depth;
1703 
1704 	if (hif_prevent_link_low_power_states(GET_HIF_OPAQUE_HDL(scn))) {
1705 		scn->wstats.prevent_l1_fails++;
1706 		return;
1707 	}
1708 
1709 	while (true) {
1710 		qdf_rmb();
1711 		if (!q_elem->valid)
1712 			break;
1713 
1714 		qdf_rmb();
1715 		q_elem->dequeue_time = qdf_get_log_timestamp();
1716 		ring_id = q_elem->ce_state->id;
1717 		offset = q_elem->offset;
1718 		delta_us = qdf_log_timestamp_to_usecs(q_elem->dequeue_time -
1719 						      q_elem->enqueue_time);
1720 		hif_reg_write_fill_sched_delay_hist(scn, delta_us);
1721 
1722 		scn->wstats.dequeues++;
1723 		qdf_atomic_dec(&scn->wstats.q_depth);
1724 
1725 		write_val = hif_process_reg_write_q_elem(scn, q_elem);
1726 		hif_debug("read_idx %u ce_id %d offset 0x%x dequeue_val %d",
1727 			  scn->read_idx, ring_id, offset, write_val);
1728 
1729 		qdf_trace_dp_del_reg_write(ring_id, q_elem->enqueue_val,
1730 					   q_elem->dequeue_val,
1731 					   q_elem->enqueue_time,
1732 					   q_elem->dequeue_time);
1733 		num_processed++;
1734 		scn->read_idx = (scn->read_idx + 1) &
1735 					(HIF_REG_WRITE_QUEUE_LEN - 1);
1736 		q_elem = &scn->reg_write_queue[scn->read_idx];
1737 	}
1738 
1739 	hif_allow_link_low_power_states(GET_HIF_OPAQUE_HDL(scn));
1740 
1741 	/*
1742 	 * Decrement active_work_cnt by the number of elements dequeued after
1743 	 * hif_allow_link_low_power_states.
1744 	 * This makes sure that hif_try_complete_tasks will wait till we make
1745 	 * the bus access in hif_allow_link_low_power_states. This will avoid
1746 	 * race condition between delayed register worker and bus suspend
1747 	 * (system suspend or runtime suspend).
1748 	 *
1749 	 * The following decrement should be done at the end!
1750 	 */
1751 	qdf_atomic_sub(num_processed, &scn->active_work_cnt);
1752 }
1753 
1754 /**
1755  * hif_delayed_reg_write_deinit() - De-Initialize delayed reg write processing
1756  * @scn: hif_softc pointer
1757  *
1758  * De-initialize main data structures to process register writes in a delayed
1759  * workqueue.
1760  *
1761  * Return: None
1762  */
1763 static void hif_delayed_reg_write_deinit(struct hif_softc *scn)
1764 {
1765 	qdf_flush_work(&scn->reg_write_work);
1766 	qdf_disable_work(&scn->reg_write_work);
1767 	qdf_flush_workqueue(0, scn->reg_write_wq);
1768 	qdf_destroy_workqueue(0, scn->reg_write_wq);
1769 	qdf_mem_free(scn->reg_write_queue);
1770 }
1771 
1772 /**
1773  * hif_delayed_reg_write_init() - Initialization function for delayed reg writes
1774  * @scn: hif_softc pointer
1775  *
1776  * Initialize main data structures to process register writes in a delayed
1777  * workqueue.
1778  */
1779 
1780 static QDF_STATUS hif_delayed_reg_write_init(struct hif_softc *scn)
1781 {
1782 	qdf_atomic_init(&scn->active_work_cnt);
1783 	scn->reg_write_wq =
1784 		qdf_alloc_high_prior_ordered_workqueue("hif_register_write_wq");
1785 	qdf_create_work(0, &scn->reg_write_work, hif_reg_write_work, scn);
1786 	scn->reg_write_queue = qdf_mem_malloc(HIF_REG_WRITE_QUEUE_LEN *
1787 					      sizeof(*scn->reg_write_queue));
1788 	if (!scn->reg_write_queue) {
1789 		hif_err("unable to allocate memory for delayed reg write");
1790 		QDF_BUG(0);
1791 		return QDF_STATUS_E_NOMEM;
1792 	}
1793 
1794 	/* Initial value of indices */
1795 	scn->read_idx = 0;
1796 	qdf_atomic_set(&scn->write_idx, -1);
1797 
1798 	return QDF_STATUS_SUCCESS;
1799 }
1800 
1801 static void hif_reg_write_enqueue(struct hif_softc *scn,
1802 				  struct CE_state *ce_state,
1803 				  uint32_t value)
1804 {
1805 	struct hif_reg_write_q_elem *q_elem;
1806 	uint32_t write_idx;
1807 
1808 	if (ce_state->reg_write_in_progress) {
1809 		hif_debug("Already in progress ce_id %d offset 0x%x value %u",
1810 			  ce_state->id, ce_state->ce_wrt_idx_offset, value);
1811 		qdf_atomic_inc(&scn->wstats.coalesces);
1812 		ce_state->wstats.coalesces++;
1813 		return;
1814 	}
1815 
1816 	write_idx = qdf_atomic_inc_return(&scn->write_idx);
1817 	write_idx = write_idx & (HIF_REG_WRITE_QUEUE_LEN - 1);
1818 
1819 	q_elem = &scn->reg_write_queue[write_idx];
1820 	if (q_elem->valid) {
1821 		hif_err("queue full");
1822 		QDF_BUG(0);
1823 		return;
1824 	}
1825 
1826 	qdf_atomic_inc(&scn->wstats.enqueues);
1827 	ce_state->wstats.enqueues++;
1828 
1829 	qdf_atomic_inc(&scn->wstats.q_depth);
1830 
1831 	q_elem->ce_state = ce_state;
1832 	q_elem->offset = ce_state->ce_wrt_idx_offset;
1833 	q_elem->enqueue_val = value;
1834 	q_elem->enqueue_time = qdf_get_log_timestamp();
1835 
1836 	/*
1837 	 * Before the valid flag is set to true, all the other
1838 	 * fields in the q_elem needs to be updated in memory.
1839 	 * Else there is a chance that the dequeuing worker thread
1840 	 * might read stale entries and process incorrect srng.
1841 	 */
1842 	qdf_wmb();
1843 	q_elem->valid = true;
1844 
1845 	/*
1846 	 * After all other fields in the q_elem has been updated
1847 	 * in memory successfully, the valid flag needs to be updated
1848 	 * in memory in time too.
1849 	 * Else there is a chance that the dequeuing worker thread
1850 	 * might read stale valid flag and the work will be bypassed
1851 	 * for this round. And if there is no other work scheduled
1852 	 * later, this hal register writing won't be updated any more.
1853 	 */
1854 	qdf_wmb();
1855 
1856 	ce_state->reg_write_in_progress  = true;
1857 	qdf_atomic_inc(&scn->active_work_cnt);
1858 
1859 	hif_debug("write_idx %u ce_id %d offset 0x%x value %u",
1860 		  write_idx, ce_state->id, ce_state->ce_wrt_idx_offset, value);
1861 
1862 	qdf_queue_work(scn->qdf_dev, scn->reg_write_wq,
1863 		       &scn->reg_write_work);
1864 }
1865 
1866 void hif_delayed_reg_write(struct hif_softc *scn, uint32_t ctrl_addr,
1867 			   uint32_t val)
1868 {
1869 	struct CE_state *ce_state;
1870 	int ce_id = COPY_ENGINE_ID(ctrl_addr);
1871 
1872 	ce_state = scn->ce_id_to_state[ce_id];
1873 
1874 	if (!ce_state->htt_tx_data && !ce_state->htt_rx_data) {
1875 		hif_reg_write_enqueue(scn, ce_state, val);
1876 		return;
1877 	}
1878 
1879 	if (hif_is_reg_write_tput_level_high(scn) ||
1880 	    (PLD_MHI_STATE_L0 == pld_get_mhi_state(scn->qdf_dev->dev))) {
1881 		hal_write32_mb(scn->hal_soc, ce_state->ce_wrt_idx_offset, val);
1882 		qdf_atomic_inc(&scn->wstats.direct);
1883 		ce_state->wstats.direct++;
1884 	} else {
1885 		hif_reg_write_enqueue(scn, ce_state, val);
1886 	}
1887 }
1888 #else
1889 static inline QDF_STATUS hif_delayed_reg_write_init(struct hif_softc *scn)
1890 {
1891 	return QDF_STATUS_SUCCESS;
1892 }
1893 
1894 static inline void  hif_delayed_reg_write_deinit(struct hif_softc *scn)
1895 {
1896 }
1897 #endif
1898 
1899 #if defined(QCA_WIFI_WCN6450)
1900 static QDF_STATUS hif_hal_attach(struct hif_softc *scn)
1901 {
1902 	scn->hal_soc = hal_attach(hif_softc_to_hif_opaque_softc(scn),
1903 				  scn->qdf_dev);
1904 	if (!scn->hal_soc)
1905 		return QDF_STATUS_E_FAILURE;
1906 
1907 	return QDF_STATUS_SUCCESS;
1908 }
1909 
1910 static QDF_STATUS hif_hal_detach(struct hif_softc *scn)
1911 {
1912 	hal_detach(scn->hal_soc);
1913 	scn->hal_soc = NULL;
1914 
1915 	return QDF_STATUS_SUCCESS;
1916 }
1917 #elif (defined(QCA_WIFI_QCA8074) || defined(QCA_WIFI_QCA6018) || \
1918 	defined(QCA_WIFI_QCA6290) || defined(QCA_WIFI_QCA6390) || \
1919 	defined(QCA_WIFI_QCN9000) || defined(QCA_WIFI_QCA6490) || \
1920 	defined(QCA_WIFI_QCA6750) || defined(QCA_WIFI_QCA5018) || \
1921 	defined(QCA_WIFI_KIWI) || defined(QCA_WIFI_QCN9224) || \
1922 	defined(QCA_WIFI_QCA9574)) || defined(QCA_WIFI_QCA5332)
1923 static QDF_STATUS hif_hal_attach(struct hif_softc *scn)
1924 {
1925 	if (ce_srng_based(scn)) {
1926 		scn->hal_soc = hal_attach(
1927 					hif_softc_to_hif_opaque_softc(scn),
1928 					scn->qdf_dev);
1929 		if (!scn->hal_soc)
1930 			return QDF_STATUS_E_FAILURE;
1931 	}
1932 
1933 	return QDF_STATUS_SUCCESS;
1934 }
1935 
1936 static QDF_STATUS hif_hal_detach(struct hif_softc *scn)
1937 {
1938 	if (ce_srng_based(scn)) {
1939 		hal_detach(scn->hal_soc);
1940 		scn->hal_soc = NULL;
1941 	}
1942 
1943 	return QDF_STATUS_SUCCESS;
1944 }
1945 #else
1946 static QDF_STATUS hif_hal_attach(struct hif_softc *scn)
1947 {
1948 	return QDF_STATUS_SUCCESS;
1949 }
1950 
1951 static QDF_STATUS hif_hal_detach(struct hif_softc *scn)
1952 {
1953 	return QDF_STATUS_SUCCESS;
1954 }
1955 #endif
1956 
1957 int hif_init_dma_mask(struct device *dev, enum qdf_bus_type bus_type)
1958 {
1959 	int ret;
1960 
1961 	switch (bus_type) {
1962 	case QDF_BUS_TYPE_IPCI:
1963 		ret = qdf_set_dma_coherent_mask(dev,
1964 						DMA_COHERENT_MASK_DEFAULT);
1965 		if (ret) {
1966 			hif_err("Failed to set dma mask error = %d", ret);
1967 			return ret;
1968 		}
1969 
1970 		break;
1971 	default:
1972 		/* Follow the existing sequence for other targets */
1973 		break;
1974 	}
1975 
1976 	return 0;
1977 }
1978 
1979 /**
1980  * hif_enable(): hif_enable
1981  * @hif_ctx: hif_ctx
1982  * @dev: dev
1983  * @bdev: bus dev
1984  * @bid: bus ID
1985  * @bus_type: bus type
1986  * @type: enable type
1987  *
1988  * Return: QDF_STATUS
1989  */
1990 QDF_STATUS hif_enable(struct hif_opaque_softc *hif_ctx, struct device *dev,
1991 					  void *bdev,
1992 					  const struct hif_bus_id *bid,
1993 					  enum qdf_bus_type bus_type,
1994 					  enum hif_enable_type type)
1995 {
1996 	QDF_STATUS status;
1997 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1998 
1999 	if (!scn) {
2000 		hif_err("hif_ctx = NULL");
2001 		return QDF_STATUS_E_NULL_VALUE;
2002 	}
2003 
2004 	status = hif_enable_bus(scn, dev, bdev, bid, type);
2005 	if (status != QDF_STATUS_SUCCESS) {
2006 		hif_err("hif_enable_bus error = %d", status);
2007 		return status;
2008 	}
2009 
2010 	status = hif_hal_attach(scn);
2011 	if (status != QDF_STATUS_SUCCESS) {
2012 		hif_err("hal attach failed");
2013 		goto disable_bus;
2014 	}
2015 
2016 	if (hif_delayed_reg_write_init(scn) != QDF_STATUS_SUCCESS) {
2017 		hif_err("unable to initialize delayed reg write");
2018 		goto hal_detach;
2019 	}
2020 
2021 	if (hif_bus_configure(scn)) {
2022 		hif_err("Target probe failed");
2023 		status = QDF_STATUS_E_FAILURE;
2024 		goto hal_detach;
2025 	}
2026 
2027 	hif_ut_suspend_init(scn);
2028 	hif_register_recovery_notifier(scn);
2029 	hif_latency_detect_timer_start(hif_ctx);
2030 
2031 	/*
2032 	 * Flag to avoid potential unallocated memory access from MSI
2033 	 * interrupt handler which could get scheduled as soon as MSI
2034 	 * is enabled, i.e to take care of the race due to the order
2035 	 * in where MSI is enabled before the memory, that will be
2036 	 * in interrupt handlers, is allocated.
2037 	 */
2038 
2039 	scn->hif_init_done = true;
2040 
2041 	hif_debug("OK");
2042 
2043 	return QDF_STATUS_SUCCESS;
2044 
2045 hal_detach:
2046 	hif_hal_detach(scn);
2047 disable_bus:
2048 	hif_disable_bus(scn);
2049 	return status;
2050 }
2051 
2052 void hif_disable(struct hif_opaque_softc *hif_ctx, enum hif_disable_type type)
2053 {
2054 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2055 
2056 	if (!scn)
2057 		return;
2058 
2059 	hif_delayed_reg_write_deinit(scn);
2060 	hif_set_enable_detection(hif_ctx, false);
2061 	hif_latency_detect_timer_stop(hif_ctx);
2062 
2063 	hif_unregister_recovery_notifier(scn);
2064 
2065 	hif_nointrs(scn);
2066 	if (scn->hif_init_done == false)
2067 		hif_shutdown_device(hif_ctx);
2068 	else
2069 		hif_stop(hif_ctx);
2070 
2071 	hif_hal_detach(scn);
2072 
2073 	hif_disable_bus(scn);
2074 
2075 	hif_wlan_disable(scn);
2076 
2077 	scn->notice_send = false;
2078 
2079 	hif_debug("X");
2080 }
2081 
2082 #ifdef CE_TASKLET_DEBUG_ENABLE
2083 void hif_enable_ce_latency_stats(struct hif_opaque_softc *hif_ctx, uint8_t val)
2084 {
2085 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2086 
2087 	if (!scn)
2088 		return;
2089 
2090 	scn->ce_latency_stats = val;
2091 }
2092 #endif
2093 
2094 void hif_display_stats(struct hif_opaque_softc *hif_ctx)
2095 {
2096 	hif_display_bus_stats(hif_ctx);
2097 }
2098 
2099 qdf_export_symbol(hif_display_stats);
2100 
2101 void hif_clear_stats(struct hif_opaque_softc *hif_ctx)
2102 {
2103 	hif_clear_bus_stats(hif_ctx);
2104 }
2105 
2106 /**
2107  * hif_crash_shutdown_dump_bus_register() - dump bus registers
2108  * @hif_ctx: hif_ctx
2109  *
2110  * Return: n/a
2111  */
2112 #if defined(TARGET_RAMDUMP_AFTER_KERNEL_PANIC) && defined(WLAN_FEATURE_BMI)
2113 
2114 static void hif_crash_shutdown_dump_bus_register(void *hif_ctx)
2115 {
2116 	struct hif_opaque_softc *scn = hif_ctx;
2117 
2118 	if (hif_check_soc_status(scn))
2119 		return;
2120 
2121 	if (hif_dump_registers(scn))
2122 		hif_err("Failed to dump bus registers!");
2123 }
2124 
2125 /**
2126  * hif_crash_shutdown(): hif_crash_shutdown
2127  *
2128  * This function is called by the platform driver to dump CE registers
2129  *
2130  * @hif_ctx: hif_ctx
2131  *
2132  * Return: n/a
2133  */
2134 void hif_crash_shutdown(struct hif_opaque_softc *hif_ctx)
2135 {
2136 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2137 
2138 	if (!hif_ctx)
2139 		return;
2140 
2141 	if (scn->bus_type == QDF_BUS_TYPE_SNOC) {
2142 		hif_warn("RAM dump disabled for bustype %d", scn->bus_type);
2143 		return;
2144 	}
2145 
2146 	if (TARGET_STATUS_RESET == scn->target_status) {
2147 		hif_warn("Target is already asserted, ignore!");
2148 		return;
2149 	}
2150 
2151 	if (hif_is_load_or_unload_in_progress(scn)) {
2152 		hif_err("Load/unload is in progress, ignore!");
2153 		return;
2154 	}
2155 
2156 	hif_crash_shutdown_dump_bus_register(hif_ctx);
2157 	hif_set_target_status(hif_ctx, TARGET_STATUS_RESET);
2158 
2159 	if (ol_copy_ramdump(hif_ctx))
2160 		goto out;
2161 
2162 	hif_info("RAM dump collecting completed!");
2163 
2164 out:
2165 	return;
2166 }
2167 #else
2168 void hif_crash_shutdown(struct hif_opaque_softc *hif_ctx)
2169 {
2170 	hif_debug("Collecting target RAM dump disabled");
2171 }
2172 #endif /* TARGET_RAMDUMP_AFTER_KERNEL_PANIC */
2173 
2174 #ifdef QCA_WIFI_3_0
2175 /**
2176  * hif_check_fw_reg(): hif_check_fw_reg
2177  * @scn: scn
2178  *
2179  * Return: int
2180  */
2181 int hif_check_fw_reg(struct hif_opaque_softc *scn)
2182 {
2183 	return 0;
2184 }
2185 #endif
2186 
2187 /**
2188  * hif_read_phy_mem_base(): hif_read_phy_mem_base
2189  * @scn: scn
2190  * @phy_mem_base: physical mem base
2191  *
2192  * Return: n/a
2193  */
2194 void hif_read_phy_mem_base(struct hif_softc *scn, qdf_dma_addr_t *phy_mem_base)
2195 {
2196 	*phy_mem_base = scn->mem_pa;
2197 }
2198 qdf_export_symbol(hif_read_phy_mem_base);
2199 
2200 /**
2201  * hif_get_device_type(): hif_get_device_type
2202  * @device_id: device_id
2203  * @revision_id: revision_id
2204  * @hif_type: returned hif_type
2205  * @target_type: returned target_type
2206  *
2207  * Return: int
2208  */
2209 int hif_get_device_type(uint32_t device_id,
2210 			uint32_t revision_id,
2211 			uint32_t *hif_type, uint32_t *target_type)
2212 {
2213 	int ret = 0;
2214 
2215 	switch (device_id) {
2216 	case ADRASTEA_DEVICE_ID_P2_E12:
2217 
2218 		*hif_type = HIF_TYPE_ADRASTEA;
2219 		*target_type = TARGET_TYPE_ADRASTEA;
2220 		break;
2221 
2222 	case AR9888_DEVICE_ID:
2223 		*hif_type = HIF_TYPE_AR9888;
2224 		*target_type = TARGET_TYPE_AR9888;
2225 		break;
2226 
2227 	case AR6320_DEVICE_ID:
2228 		switch (revision_id) {
2229 		case AR6320_FW_1_1:
2230 		case AR6320_FW_1_3:
2231 			*hif_type = HIF_TYPE_AR6320;
2232 			*target_type = TARGET_TYPE_AR6320;
2233 			break;
2234 
2235 		case AR6320_FW_2_0:
2236 		case AR6320_FW_3_0:
2237 		case AR6320_FW_3_2:
2238 			*hif_type = HIF_TYPE_AR6320V2;
2239 			*target_type = TARGET_TYPE_AR6320V2;
2240 			break;
2241 
2242 		default:
2243 			hif_err("dev_id = 0x%x, rev_id = 0x%x",
2244 				device_id, revision_id);
2245 			ret = -ENODEV;
2246 			goto end;
2247 		}
2248 		break;
2249 
2250 	case AR9887_DEVICE_ID:
2251 		*hif_type = HIF_TYPE_AR9888;
2252 		*target_type = TARGET_TYPE_AR9888;
2253 		hif_info(" *********** AR9887 **************");
2254 		break;
2255 
2256 	case QCA9984_DEVICE_ID:
2257 		*hif_type = HIF_TYPE_QCA9984;
2258 		*target_type = TARGET_TYPE_QCA9984;
2259 		hif_info(" *********** QCA9984 *************");
2260 		break;
2261 
2262 	case QCA9888_DEVICE_ID:
2263 		*hif_type = HIF_TYPE_QCA9888;
2264 		*target_type = TARGET_TYPE_QCA9888;
2265 		hif_info(" *********** QCA9888 *************");
2266 		break;
2267 
2268 	case AR900B_DEVICE_ID:
2269 		*hif_type = HIF_TYPE_AR900B;
2270 		*target_type = TARGET_TYPE_AR900B;
2271 		hif_info(" *********** AR900B *************");
2272 		break;
2273 
2274 	case QCA8074_DEVICE_ID:
2275 		*hif_type = HIF_TYPE_QCA8074;
2276 		*target_type = TARGET_TYPE_QCA8074;
2277 		hif_info(" *********** QCA8074  *************");
2278 		break;
2279 
2280 	case QCA6290_EMULATION_DEVICE_ID:
2281 	case QCA6290_DEVICE_ID:
2282 		*hif_type = HIF_TYPE_QCA6290;
2283 		*target_type = TARGET_TYPE_QCA6290;
2284 		hif_info(" *********** QCA6290EMU *************");
2285 		break;
2286 
2287 	case QCN9000_DEVICE_ID:
2288 		*hif_type = HIF_TYPE_QCN9000;
2289 		*target_type = TARGET_TYPE_QCN9000;
2290 		hif_info(" *********** QCN9000 *************");
2291 		break;
2292 
2293 	case QCN9224_DEVICE_ID:
2294 		*hif_type = HIF_TYPE_QCN9224;
2295 		*target_type = TARGET_TYPE_QCN9224;
2296 		hif_info(" *********** QCN9224 *************");
2297 		break;
2298 
2299 	case QCN6122_DEVICE_ID:
2300 		*hif_type = HIF_TYPE_QCN6122;
2301 		*target_type = TARGET_TYPE_QCN6122;
2302 		hif_info(" *********** QCN6122 *************");
2303 		break;
2304 
2305 	case QCN9160_DEVICE_ID:
2306 		*hif_type = HIF_TYPE_QCN9160;
2307 		*target_type = TARGET_TYPE_QCN9160;
2308 		hif_info(" *********** QCN9160 *************");
2309 		break;
2310 
2311 	case QCN6432_DEVICE_ID:
2312 		*hif_type = HIF_TYPE_QCN6432;
2313 		*target_type = TARGET_TYPE_QCN6432;
2314 		hif_info(" *********** QCN6432 *************");
2315 		break;
2316 
2317 	case QCN7605_DEVICE_ID:
2318 	case QCN7605_COMPOSITE:
2319 	case QCN7605_STANDALONE:
2320 	case QCN7605_STANDALONE_V2:
2321 	case QCN7605_COMPOSITE_V2:
2322 		*hif_type = HIF_TYPE_QCN7605;
2323 		*target_type = TARGET_TYPE_QCN7605;
2324 		hif_info(" *********** QCN7605 *************");
2325 		break;
2326 
2327 	case QCA6390_DEVICE_ID:
2328 	case QCA6390_EMULATION_DEVICE_ID:
2329 		*hif_type = HIF_TYPE_QCA6390;
2330 		*target_type = TARGET_TYPE_QCA6390;
2331 		hif_info(" *********** QCA6390 *************");
2332 		break;
2333 
2334 	case QCA6490_DEVICE_ID:
2335 	case QCA6490_EMULATION_DEVICE_ID:
2336 		*hif_type = HIF_TYPE_QCA6490;
2337 		*target_type = TARGET_TYPE_QCA6490;
2338 		hif_info(" *********** QCA6490 *************");
2339 		break;
2340 
2341 	case QCA6750_DEVICE_ID:
2342 	case QCA6750_EMULATION_DEVICE_ID:
2343 		*hif_type = HIF_TYPE_QCA6750;
2344 		*target_type = TARGET_TYPE_QCA6750;
2345 		hif_info(" *********** QCA6750 *************");
2346 		break;
2347 
2348 	case KIWI_DEVICE_ID:
2349 		*hif_type = HIF_TYPE_KIWI;
2350 		*target_type = TARGET_TYPE_KIWI;
2351 		hif_info(" *********** KIWI *************");
2352 		break;
2353 
2354 	case MANGO_DEVICE_ID:
2355 		*hif_type = HIF_TYPE_MANGO;
2356 		*target_type = TARGET_TYPE_MANGO;
2357 		hif_info(" *********** MANGO *************");
2358 		break;
2359 
2360 	case PEACH_DEVICE_ID:
2361 		*hif_type = HIF_TYPE_PEACH;
2362 		*target_type = TARGET_TYPE_PEACH;
2363 		hif_info(" *********** PEACH *************");
2364 		break;
2365 
2366 	case QCA8074V2_DEVICE_ID:
2367 		*hif_type = HIF_TYPE_QCA8074V2;
2368 		*target_type = TARGET_TYPE_QCA8074V2;
2369 		hif_info(" *********** QCA8074V2 *************");
2370 		break;
2371 
2372 	case QCA6018_DEVICE_ID:
2373 	case RUMIM2M_DEVICE_ID_NODE0:
2374 	case RUMIM2M_DEVICE_ID_NODE1:
2375 	case RUMIM2M_DEVICE_ID_NODE2:
2376 	case RUMIM2M_DEVICE_ID_NODE3:
2377 	case RUMIM2M_DEVICE_ID_NODE4:
2378 	case RUMIM2M_DEVICE_ID_NODE5:
2379 		*hif_type = HIF_TYPE_QCA6018;
2380 		*target_type = TARGET_TYPE_QCA6018;
2381 		hif_info(" *********** QCA6018 *************");
2382 		break;
2383 
2384 	case QCA5018_DEVICE_ID:
2385 		*hif_type = HIF_TYPE_QCA5018;
2386 		*target_type = TARGET_TYPE_QCA5018;
2387 		hif_info(" *********** qca5018 *************");
2388 		break;
2389 
2390 	case QCA5332_DEVICE_ID:
2391 		*hif_type = HIF_TYPE_QCA5332;
2392 		*target_type = TARGET_TYPE_QCA5332;
2393 		hif_info(" *********** QCA5332 *************");
2394 		break;
2395 
2396 	case QCA9574_DEVICE_ID:
2397 		*hif_type = HIF_TYPE_QCA9574;
2398 		*target_type = TARGET_TYPE_QCA9574;
2399 		hif_info(" *********** QCA9574 *************");
2400 		break;
2401 
2402 	case WCN6450_DEVICE_ID:
2403 		*hif_type = HIF_TYPE_WCN6450;
2404 		*target_type = TARGET_TYPE_WCN6450;
2405 		hif_info(" *********** WCN6450 *************");
2406 		break;
2407 
2408 	default:
2409 		hif_err("Unsupported device ID = 0x%x!", device_id);
2410 		ret = -ENODEV;
2411 		break;
2412 	}
2413 
2414 	if (*target_type == TARGET_TYPE_UNKNOWN) {
2415 		hif_err("Unsupported target_type!");
2416 		ret = -ENODEV;
2417 	}
2418 end:
2419 	return ret;
2420 }
2421 
2422 /**
2423  * hif_get_bus_type() - return the bus type
2424  * @hif_hdl: HIF Context
2425  *
2426  * Return: enum qdf_bus_type
2427  */
2428 enum qdf_bus_type hif_get_bus_type(struct hif_opaque_softc *hif_hdl)
2429 {
2430 	struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
2431 
2432 	return scn->bus_type;
2433 }
2434 
2435 /*
2436  * Target info and ini parameters are global to the driver
2437  * Hence these structures are exposed to all the modules in
2438  * the driver and they don't need to maintains multiple copies
2439  * of the same info, instead get the handle from hif and
2440  * modify them in hif
2441  */
2442 
2443 /**
2444  * hif_get_ini_handle() - API to get hif_config_param handle
2445  * @hif_ctx: HIF Context
2446  *
2447  * Return: pointer to hif_config_info
2448  */
2449 struct hif_config_info *hif_get_ini_handle(struct hif_opaque_softc *hif_ctx)
2450 {
2451 	struct hif_softc *sc = HIF_GET_SOFTC(hif_ctx);
2452 
2453 	return &sc->hif_config;
2454 }
2455 
2456 /**
2457  * hif_get_target_info_handle() - API to get hif_target_info handle
2458  * @hif_ctx: HIF context
2459  *
2460  * Return: Pointer to hif_target_info
2461  */
2462 struct hif_target_info *hif_get_target_info_handle(
2463 					struct hif_opaque_softc *hif_ctx)
2464 {
2465 	struct hif_softc *sc = HIF_GET_SOFTC(hif_ctx);
2466 
2467 	return &sc->target_info;
2468 
2469 }
2470 qdf_export_symbol(hif_get_target_info_handle);
2471 
2472 #ifdef RECEIVE_OFFLOAD
2473 void hif_offld_flush_cb_register(struct hif_opaque_softc *scn,
2474 				 void (offld_flush_handler)(void *))
2475 {
2476 	if (hif_napi_enabled(scn, -1))
2477 		hif_napi_rx_offld_flush_cb_register(scn, offld_flush_handler);
2478 	else
2479 		hif_err("NAPI not enabled");
2480 }
2481 qdf_export_symbol(hif_offld_flush_cb_register);
2482 
2483 void hif_offld_flush_cb_deregister(struct hif_opaque_softc *scn)
2484 {
2485 	if (hif_napi_enabled(scn, -1))
2486 		hif_napi_rx_offld_flush_cb_deregister(scn);
2487 	else
2488 		hif_err("NAPI not enabled");
2489 }
2490 qdf_export_symbol(hif_offld_flush_cb_deregister);
2491 
2492 int hif_get_rx_ctx_id(int ctx_id, struct hif_opaque_softc *hif_hdl)
2493 {
2494 	if (hif_napi_enabled(hif_hdl, -1))
2495 		return NAPI_PIPE2ID(ctx_id);
2496 	else
2497 		return ctx_id;
2498 }
2499 #else /* RECEIVE_OFFLOAD */
2500 int hif_get_rx_ctx_id(int ctx_id, struct hif_opaque_softc *hif_hdl)
2501 {
2502 	return 0;
2503 }
2504 qdf_export_symbol(hif_get_rx_ctx_id);
2505 #endif /* RECEIVE_OFFLOAD */
2506 
2507 #if defined(FEATURE_LRO)
2508 
2509 /**
2510  * hif_get_lro_info - Returns LRO instance for instance ID
2511  * @ctx_id: LRO instance ID
2512  * @hif_hdl: HIF Context
2513  *
2514  * Return: Pointer to LRO instance.
2515  */
2516 void *hif_get_lro_info(int ctx_id, struct hif_opaque_softc *hif_hdl)
2517 {
2518 	void *data;
2519 
2520 	if (hif_napi_enabled(hif_hdl, -1))
2521 		data = hif_napi_get_lro_info(hif_hdl, ctx_id);
2522 	else
2523 		data = hif_ce_get_lro_ctx(hif_hdl, ctx_id);
2524 
2525 	return data;
2526 }
2527 #endif
2528 
2529 /**
2530  * hif_get_target_status - API to get target status
2531  * @hif_ctx: HIF Context
2532  *
2533  * Return: enum hif_target_status
2534  */
2535 enum hif_target_status hif_get_target_status(struct hif_opaque_softc *hif_ctx)
2536 {
2537 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2538 
2539 	return scn->target_status;
2540 }
2541 qdf_export_symbol(hif_get_target_status);
2542 
2543 /**
2544  * hif_set_target_status() - API to set target status
2545  * @hif_ctx: HIF Context
2546  * @status: Target Status
2547  *
2548  * Return: void
2549  */
2550 void hif_set_target_status(struct hif_opaque_softc *hif_ctx, enum
2551 			   hif_target_status status)
2552 {
2553 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2554 
2555 	scn->target_status = status;
2556 }
2557 
2558 /**
2559  * hif_init_ini_config() - API to initialize HIF configuration parameters
2560  * @hif_ctx: HIF Context
2561  * @cfg: HIF Configuration
2562  *
2563  * Return: void
2564  */
2565 void hif_init_ini_config(struct hif_opaque_softc *hif_ctx,
2566 			 struct hif_config_info *cfg)
2567 {
2568 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2569 
2570 	qdf_mem_copy(&scn->hif_config, cfg, sizeof(struct hif_config_info));
2571 }
2572 
2573 /**
2574  * hif_get_conparam() - API to get driver mode in HIF
2575  * @scn: HIF Context
2576  *
2577  * Return: driver mode of operation
2578  */
2579 uint32_t hif_get_conparam(struct hif_softc *scn)
2580 {
2581 	if (!scn)
2582 		return 0;
2583 
2584 	return scn->hif_con_param;
2585 }
2586 
2587 /**
2588  * hif_get_callbacks_handle() - API to get callbacks Handle
2589  * @scn: HIF Context
2590  *
2591  * Return: pointer to HIF Callbacks
2592  */
2593 struct hif_driver_state_callbacks *hif_get_callbacks_handle(
2594 							struct hif_softc *scn)
2595 {
2596 	return &scn->callbacks;
2597 }
2598 
2599 /**
2600  * hif_is_driver_unloading() - API to query upper layers if driver is unloading
2601  * @scn: HIF Context
2602  *
2603  * Return: True/False
2604  */
2605 bool hif_is_driver_unloading(struct hif_softc *scn)
2606 {
2607 	struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
2608 
2609 	if (cbk && cbk->is_driver_unloading)
2610 		return cbk->is_driver_unloading(cbk->context);
2611 
2612 	return false;
2613 }
2614 
2615 /**
2616  * hif_is_load_or_unload_in_progress() - API to query upper layers if
2617  * load/unload in progress
2618  * @scn: HIF Context
2619  *
2620  * Return: True/False
2621  */
2622 bool hif_is_load_or_unload_in_progress(struct hif_softc *scn)
2623 {
2624 	struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
2625 
2626 	if (cbk && cbk->is_load_unload_in_progress)
2627 		return cbk->is_load_unload_in_progress(cbk->context);
2628 
2629 	return false;
2630 }
2631 
2632 /**
2633  * hif_is_recovery_in_progress() - API to query upper layers if recovery in
2634  * progress
2635  * @scn: HIF Context
2636  *
2637  * Return: True/False
2638  */
2639 bool hif_is_recovery_in_progress(struct hif_softc *scn)
2640 {
2641 	struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
2642 
2643 	if (cbk && cbk->is_recovery_in_progress)
2644 		return cbk->is_recovery_in_progress(cbk->context);
2645 
2646 	return false;
2647 }
2648 
2649 #if defined(HIF_PCI) || defined(HIF_SNOC) || defined(HIF_AHB) || \
2650     defined(HIF_IPCI)
2651 
2652 /**
2653  * hif_update_pipe_callback() - API to register pipe specific callbacks
2654  * @osc: Opaque softc
2655  * @pipeid: pipe id
2656  * @callbacks: callbacks to register
2657  *
2658  * Return: void
2659  */
2660 
2661 void hif_update_pipe_callback(struct hif_opaque_softc *osc,
2662 					u_int8_t pipeid,
2663 					struct hif_msg_callbacks *callbacks)
2664 {
2665 	struct hif_softc *scn = HIF_GET_SOFTC(osc);
2666 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2667 	struct HIF_CE_pipe_info *pipe_info;
2668 
2669 	QDF_BUG(pipeid < CE_COUNT_MAX);
2670 
2671 	hif_debug("pipeid: %d", pipeid);
2672 
2673 	pipe_info = &hif_state->pipe_info[pipeid];
2674 
2675 	qdf_mem_copy(&pipe_info->pipe_callbacks,
2676 			callbacks, sizeof(pipe_info->pipe_callbacks));
2677 }
2678 qdf_export_symbol(hif_update_pipe_callback);
2679 
2680 /**
2681  * hif_is_target_ready() - API to query if target is in ready state
2682  * progress
2683  * @scn: HIF Context
2684  *
2685  * Return: True/False
2686  */
2687 bool hif_is_target_ready(struct hif_softc *scn)
2688 {
2689 	struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
2690 
2691 	if (cbk && cbk->is_target_ready)
2692 		return cbk->is_target_ready(cbk->context);
2693 	/*
2694 	 * if callback is not registered then there is no way to determine
2695 	 * if target is ready. In-such case return true to indicate that
2696 	 * target is ready.
2697 	 */
2698 	return true;
2699 }
2700 qdf_export_symbol(hif_is_target_ready);
2701 
2702 int hif_get_bandwidth_level(struct hif_opaque_softc *hif_handle)
2703 {
2704 	struct hif_softc *scn = HIF_GET_SOFTC(hif_handle);
2705 	struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
2706 
2707 	if (cbk && cbk->get_bandwidth_level)
2708 		return cbk->get_bandwidth_level(cbk->context);
2709 
2710 	return 0;
2711 }
2712 
2713 qdf_export_symbol(hif_get_bandwidth_level);
2714 
2715 #ifdef DP_MEM_PRE_ALLOC
2716 void *hif_mem_alloc_consistent_unaligned(struct hif_softc *scn,
2717 					 qdf_size_t size,
2718 					 qdf_dma_addr_t *paddr,
2719 					 uint32_t ring_type,
2720 					 uint8_t *is_mem_prealloc)
2721 {
2722 	void *vaddr = NULL;
2723 	struct hif_driver_state_callbacks *cbk =
2724 				hif_get_callbacks_handle(scn);
2725 
2726 	*is_mem_prealloc = false;
2727 	if (cbk && cbk->prealloc_get_consistent_mem_unaligned) {
2728 		vaddr = cbk->prealloc_get_consistent_mem_unaligned(size,
2729 								   paddr,
2730 								   ring_type);
2731 		if (vaddr) {
2732 			*is_mem_prealloc = true;
2733 			goto end;
2734 		}
2735 	}
2736 
2737 	vaddr = qdf_mem_alloc_consistent(scn->qdf_dev,
2738 					 scn->qdf_dev->dev,
2739 					 size,
2740 					 paddr);
2741 end:
2742 	dp_info("%s va_unaligned %pK pa_unaligned %pK size %d ring_type %d",
2743 		*is_mem_prealloc ? "pre-alloc" : "dynamic-alloc", vaddr,
2744 		(void *)*paddr, (int)size, ring_type);
2745 
2746 	return vaddr;
2747 }
2748 
2749 void hif_mem_free_consistent_unaligned(struct hif_softc *scn,
2750 				       qdf_size_t size,
2751 				       void *vaddr,
2752 				       qdf_dma_addr_t paddr,
2753 				       qdf_dma_context_t memctx,
2754 				       uint8_t is_mem_prealloc)
2755 {
2756 	struct hif_driver_state_callbacks *cbk =
2757 				hif_get_callbacks_handle(scn);
2758 
2759 	if (is_mem_prealloc) {
2760 		if (cbk && cbk->prealloc_put_consistent_mem_unaligned) {
2761 			cbk->prealloc_put_consistent_mem_unaligned(vaddr);
2762 		} else {
2763 			dp_warn("dp_prealloc_put_consistent_unligned NULL");
2764 			QDF_BUG(0);
2765 		}
2766 	} else {
2767 		qdf_mem_free_consistent(scn->qdf_dev, scn->qdf_dev->dev,
2768 					size, vaddr, paddr, memctx);
2769 	}
2770 }
2771 
2772 void hif_prealloc_get_multi_pages(struct hif_softc *scn, uint32_t desc_type,
2773 				  qdf_size_t elem_size, uint16_t elem_num,
2774 				  struct qdf_mem_multi_page_t *pages,
2775 				  bool cacheable)
2776 {
2777 	struct hif_driver_state_callbacks *cbk =
2778 			hif_get_callbacks_handle(scn);
2779 
2780 	if (cbk && cbk->prealloc_get_multi_pages)
2781 		cbk->prealloc_get_multi_pages(desc_type, elem_size, elem_num,
2782 					      pages, cacheable);
2783 
2784 	if (!pages->num_pages)
2785 		qdf_mem_multi_pages_alloc(scn->qdf_dev, pages,
2786 					  elem_size, elem_num, 0, cacheable);
2787 }
2788 
2789 void hif_prealloc_put_multi_pages(struct hif_softc *scn, uint32_t desc_type,
2790 				  struct qdf_mem_multi_page_t *pages,
2791 				  bool cacheable)
2792 {
2793 	struct hif_driver_state_callbacks *cbk =
2794 			hif_get_callbacks_handle(scn);
2795 
2796 	if (cbk && cbk->prealloc_put_multi_pages &&
2797 	    pages->is_mem_prealloc)
2798 		cbk->prealloc_put_multi_pages(desc_type, pages);
2799 
2800 	if (!pages->is_mem_prealloc)
2801 		qdf_mem_multi_pages_free(scn->qdf_dev, pages, 0,
2802 					 cacheable);
2803 }
2804 #endif
2805 
2806 /**
2807  * hif_batch_send() - API to access hif specific function
2808  * ce_batch_send.
2809  * @osc: HIF Context
2810  * @msdu: list of msdus to be sent
2811  * @transfer_id: transfer id
2812  * @len: downloaded length
2813  * @sendhead:
2814  *
2815  * Return: list of msds not sent
2816  */
2817 qdf_nbuf_t hif_batch_send(struct hif_opaque_softc *osc, qdf_nbuf_t msdu,
2818 		uint32_t transfer_id, u_int32_t len, uint32_t sendhead)
2819 {
2820 	void *ce_tx_hdl = hif_get_ce_handle(osc, CE_HTT_TX_CE);
2821 
2822 	if (!ce_tx_hdl)
2823 		return NULL;
2824 
2825 	return ce_batch_send((struct CE_handle *)ce_tx_hdl, msdu, transfer_id,
2826 			len, sendhead);
2827 }
2828 qdf_export_symbol(hif_batch_send);
2829 
2830 /**
2831  * hif_update_tx_ring() - API to access hif specific function
2832  * ce_update_tx_ring.
2833  * @osc: HIF Context
2834  * @num_htt_cmpls: number of htt compl received.
2835  *
2836  * Return: void
2837  */
2838 void hif_update_tx_ring(struct hif_opaque_softc *osc, u_int32_t num_htt_cmpls)
2839 {
2840 	void *ce_tx_hdl = hif_get_ce_handle(osc, CE_HTT_TX_CE);
2841 
2842 	ce_update_tx_ring(ce_tx_hdl, num_htt_cmpls);
2843 }
2844 qdf_export_symbol(hif_update_tx_ring);
2845 
2846 
2847 /**
2848  * hif_send_single() - API to access hif specific function
2849  * ce_send_single.
2850  * @osc: HIF Context
2851  * @msdu : msdu to be sent
2852  * @transfer_id: transfer id
2853  * @len : downloaded length
2854  *
2855  * Return: msdu sent status
2856  */
2857 QDF_STATUS hif_send_single(struct hif_opaque_softc *osc, qdf_nbuf_t msdu,
2858 			   uint32_t transfer_id, u_int32_t len)
2859 {
2860 	void *ce_tx_hdl = hif_get_ce_handle(osc, CE_HTT_TX_CE);
2861 
2862 	if (!ce_tx_hdl)
2863 		return QDF_STATUS_E_NULL_VALUE;
2864 
2865 	return ce_send_single((struct CE_handle *)ce_tx_hdl, msdu, transfer_id,
2866 			len);
2867 }
2868 qdf_export_symbol(hif_send_single);
2869 #endif
2870 
2871 /**
2872  * hif_reg_write() - API to access hif specific function
2873  * hif_write32_mb.
2874  * @hif_ctx : HIF Context
2875  * @offset : offset on which value has to be written
2876  * @value : value to be written
2877  *
2878  * Return: None
2879  */
2880 void hif_reg_write(struct hif_opaque_softc *hif_ctx, uint32_t offset,
2881 		uint32_t value)
2882 {
2883 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2884 
2885 	hif_write32_mb(scn, scn->mem + offset, value);
2886 
2887 }
2888 qdf_export_symbol(hif_reg_write);
2889 
2890 /**
2891  * hif_reg_read() - API to access hif specific function
2892  * hif_read32_mb.
2893  * @hif_ctx : HIF Context
2894  * @offset : offset from which value has to be read
2895  *
2896  * Return: Read value
2897  */
2898 uint32_t hif_reg_read(struct hif_opaque_softc *hif_ctx, uint32_t offset)
2899 {
2900 
2901 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2902 
2903 	return hif_read32_mb(scn, scn->mem + offset);
2904 }
2905 qdf_export_symbol(hif_reg_read);
2906 
2907 /**
2908  * hif_ramdump_handler(): generic ramdump handler
2909  * @scn: struct hif_opaque_softc
2910  *
2911  * Return: None
2912  */
2913 void hif_ramdump_handler(struct hif_opaque_softc *scn)
2914 {
2915 	if (hif_get_bus_type(scn) == QDF_BUS_TYPE_USB)
2916 		hif_usb_ramdump_handler(scn);
2917 }
2918 
2919 hif_pm_wake_irq_type hif_pm_get_wake_irq_type(struct hif_opaque_softc *hif_ctx)
2920 {
2921 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2922 
2923 	return scn->wake_irq_type;
2924 }
2925 
2926 irqreturn_t hif_wake_interrupt_handler(int irq, void *context)
2927 {
2928 	struct hif_softc *scn = context;
2929 
2930 	hif_info("wake interrupt received on irq %d", irq);
2931 
2932 	hif_rtpm_set_monitor_wake_intr(0);
2933 	hif_rtpm_request_resume();
2934 
2935 	if (scn->initial_wakeup_cb)
2936 		scn->initial_wakeup_cb(scn->initial_wakeup_priv);
2937 
2938 	if (hif_is_ut_suspended(scn))
2939 		hif_ut_fw_resume(scn);
2940 
2941 	qdf_pm_system_wakeup();
2942 
2943 	return IRQ_HANDLED;
2944 }
2945 
2946 void hif_set_initial_wakeup_cb(struct hif_opaque_softc *hif_ctx,
2947 			       void (*callback)(void *),
2948 			       void *priv)
2949 {
2950 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2951 
2952 	scn->initial_wakeup_cb = callback;
2953 	scn->initial_wakeup_priv = priv;
2954 }
2955 
2956 void hif_set_ce_service_max_yield_time(struct hif_opaque_softc *hif,
2957 				       uint32_t ce_service_max_yield_time)
2958 {
2959 	struct hif_softc *hif_ctx = HIF_GET_SOFTC(hif);
2960 
2961 	hif_ctx->ce_service_max_yield_time =
2962 		ce_service_max_yield_time * 1000;
2963 }
2964 
2965 unsigned long long
2966 hif_get_ce_service_max_yield_time(struct hif_opaque_softc *hif)
2967 {
2968 	struct hif_softc *hif_ctx = HIF_GET_SOFTC(hif);
2969 
2970 	return hif_ctx->ce_service_max_yield_time;
2971 }
2972 
2973 void hif_set_ce_service_max_rx_ind_flush(struct hif_opaque_softc *hif,
2974 				       uint8_t ce_service_max_rx_ind_flush)
2975 {
2976 	struct hif_softc *hif_ctx = HIF_GET_SOFTC(hif);
2977 
2978 	if (ce_service_max_rx_ind_flush == 0 ||
2979 	    ce_service_max_rx_ind_flush > MSG_FLUSH_NUM)
2980 		hif_ctx->ce_service_max_rx_ind_flush = MSG_FLUSH_NUM;
2981 	else
2982 		hif_ctx->ce_service_max_rx_ind_flush =
2983 						ce_service_max_rx_ind_flush;
2984 }
2985 
2986 #ifdef SYSTEM_PM_CHECK
2987 void __hif_system_pm_set_state(struct hif_opaque_softc *hif,
2988 			       enum hif_system_pm_state state)
2989 {
2990 	struct hif_softc *hif_ctx = HIF_GET_SOFTC(hif);
2991 
2992 	qdf_atomic_set(&hif_ctx->sys_pm_state, state);
2993 }
2994 
2995 int32_t hif_system_pm_get_state(struct hif_opaque_softc *hif)
2996 {
2997 	struct hif_softc *hif_ctx = HIF_GET_SOFTC(hif);
2998 
2999 	return qdf_atomic_read(&hif_ctx->sys_pm_state);
3000 }
3001 
3002 int hif_system_pm_state_check(struct hif_opaque_softc *hif)
3003 {
3004 	struct hif_softc *hif_ctx = HIF_GET_SOFTC(hif);
3005 	int32_t sys_pm_state;
3006 
3007 	if (!hif_ctx) {
3008 		hif_err("hif context is null");
3009 		return -EFAULT;
3010 	}
3011 
3012 	sys_pm_state = qdf_atomic_read(&hif_ctx->sys_pm_state);
3013 	if (sys_pm_state == HIF_SYSTEM_PM_STATE_BUS_SUSPENDING ||
3014 	    sys_pm_state == HIF_SYSTEM_PM_STATE_BUS_SUSPENDED) {
3015 		hif_info("Triggering system wakeup");
3016 		qdf_pm_system_wakeup();
3017 		return -EAGAIN;
3018 	}
3019 
3020 	return 0;
3021 }
3022 #endif
3023 #ifdef WLAN_FEATURE_AFFINITY_MGR
3024 /*
3025  * hif_audio_cpu_affinity_allowed() - Check if audio cpu affinity allowed
3026  *
3027  * @scn: hif handle
3028  * @cfg: hif affinity manager configuration for IRQ
3029  * @audio_taken_cpu: Current CPUs which are taken by audio.
3030  * @current_time: Current system time.
3031  *
3032  * This API checks for 2 conditions
3033  *  1) Last audio taken mask and current taken mask are different
3034  *  2) Last time when IRQ was affined away due to audio taken CPUs is
3035  *     more than time threshold (5 Seconds in current case).
3036  * If both condition satisfies then only return true.
3037  *
3038  * Return: bool: true if it is allowed to affine away audio taken cpus.
3039  */
3040 static inline bool
3041 hif_audio_cpu_affinity_allowed(struct hif_softc *scn,
3042 			       struct hif_cpu_affinity *cfg,
3043 			       qdf_cpu_mask audio_taken_cpu,
3044 			       uint64_t current_time)
3045 {
3046 	if (!qdf_cpumask_equal(&audio_taken_cpu, &cfg->walt_taken_mask) &&
3047 	    (qdf_log_timestamp_to_usecs(current_time -
3048 			 cfg->last_affined_away)
3049 		< scn->time_threshold))
3050 		return false;
3051 	return true;
3052 }
3053 
3054 /*
3055  * hif_affinity_mgr_check_update_mask() - Check if cpu mask need to be updated
3056  *
3057  * @scn: hif handle
3058  * @cfg: hif affinity manager configuration for IRQ
3059  * @audio_taken_cpu: Current CPUs which are taken by audio.
3060  * @cpu_mask: CPU mask which need to be updated.
3061  * @current_time: Current system time.
3062  *
3063  * This API checks if Pro audio use case is running and if cpu_mask need
3064  * to be updated
3065  *
3066  * Return: QDF_STATUS
3067  */
3068 static inline QDF_STATUS
3069 hif_affinity_mgr_check_update_mask(struct hif_softc *scn,
3070 				   struct hif_cpu_affinity *cfg,
3071 				   qdf_cpu_mask audio_taken_cpu,
3072 				   qdf_cpu_mask *cpu_mask,
3073 				   uint64_t current_time)
3074 {
3075 	qdf_cpu_mask allowed_mask;
3076 
3077 	/*
3078 	 * Case 1: audio_taken_mask is empty
3079 	 *   Check if passed cpu_mask and wlan_requested_mask is same or not.
3080 	 *      If both mask are different copy wlan_requested_mask(IRQ affinity
3081 	 *      mask requested by WLAN) to cpu_mask.
3082 	 *
3083 	 * Case 2: audio_taken_mask is not empty
3084 	 *   1. Only allow update if last time when IRQ was affined away due to
3085 	 *      audio taken CPUs is more than 5 seconds or update is requested
3086 	 *      by WLAN
3087 	 *   2. Only allow silver cores to be affined away.
3088 	 *   3. Check if any allowed CPUs for audio use case is set in cpu_mask.
3089 	 *       i. If any CPU mask is set, mask out that CPU from the cpu_mask
3090 	 *       ii. If after masking out audio taken cpu(Silver cores) cpu_mask
3091 	 *           is empty, set mask to all cpu except cpus taken by audio.
3092 	 * Example:
3093 	 *| Audio mask | mask allowed | cpu_mask | WLAN req mask | new cpu_mask|
3094 	 *|  0x00      |       0x00   |   0x0C   |       0x0C    |      0x0C   |
3095 	 *|  0x00      |       0x00   |   0x03   |       0x03    |      0x03   |
3096 	 *|  0x00      |       0x00   |   0xFC   |       0x03    |      0x03   |
3097 	 *|  0x00      |       0x00   |   0x03   |       0x0C    |      0x0C   |
3098 	 *|  0x0F      |       0x03   |   0x0C   |       0x0C    |      0x0C   |
3099 	 *|  0x0F      |       0x03   |   0x03   |       0x03    |      0xFC   |
3100 	 *|  0x03      |       0x03   |   0x0C   |       0x0C    |      0x0C   |
3101 	 *|  0x03      |       0x03   |   0x03   |       0x03    |      0xFC   |
3102 	 *|  0x03      |       0x03   |   0xFC   |       0x03    |      0xFC   |
3103 	 *|  0xF0      |       0x00   |   0x0C   |       0x0C    |      0x0C   |
3104 	 *|  0xF0      |       0x00   |   0x03   |       0x03    |      0x03   |
3105 	 */
3106 
3107 	/* Check if audio taken mask is empty*/
3108 	if (qdf_likely(qdf_cpumask_empty(&audio_taken_cpu))) {
3109 		/* If CPU mask requested by WLAN for the IRQ and
3110 		 * cpu_mask passed CPU mask set for IRQ is different
3111 		 * Copy requested mask into cpu_mask and return
3112 		 */
3113 		if (qdf_unlikely(!qdf_cpumask_equal(cpu_mask,
3114 						    &cfg->wlan_requested_mask))) {
3115 			qdf_cpumask_copy(cpu_mask, &cfg->wlan_requested_mask);
3116 			return QDF_STATUS_SUCCESS;
3117 		}
3118 		return QDF_STATUS_E_ALREADY;
3119 	}
3120 
3121 	if (!(hif_audio_cpu_affinity_allowed(scn, cfg, audio_taken_cpu,
3122 					     current_time) ||
3123 	      cfg->update_requested))
3124 		return QDF_STATUS_E_AGAIN;
3125 
3126 	/* Only allow Silver cores to be affine away */
3127 	qdf_cpumask_and(&allowed_mask, &scn->allowed_mask, &audio_taken_cpu);
3128 	if (qdf_cpumask_intersects(cpu_mask, &allowed_mask)) {
3129 		/* If any of taken CPU(Silver cores) mask is set in cpu_mask,
3130 		 *  mask out the audio taken CPUs from the cpu_mask.
3131 		 */
3132 		qdf_cpumask_andnot(cpu_mask, &cfg->wlan_requested_mask,
3133 				   &allowed_mask);
3134 		/* If cpu_mask is empty set it to all CPUs
3135 		 * except taken by audio(Silver cores)
3136 		 */
3137 		if (qdf_unlikely(qdf_cpumask_empty(cpu_mask)))
3138 			qdf_cpumask_complement(cpu_mask, &allowed_mask);
3139 		return QDF_STATUS_SUCCESS;
3140 	}
3141 
3142 	return QDF_STATUS_E_ALREADY;
3143 }
3144 
3145 static inline QDF_STATUS
3146 hif_check_and_affine_irq(struct hif_softc *scn, struct hif_cpu_affinity *cfg,
3147 			 qdf_cpu_mask audio_taken_cpu, qdf_cpu_mask cpu_mask,
3148 			 uint64_t current_time)
3149 {
3150 	QDF_STATUS status;
3151 
3152 	status = hif_affinity_mgr_check_update_mask(scn, cfg,
3153 						    audio_taken_cpu,
3154 						    &cpu_mask,
3155 						    current_time);
3156 	/* Set IRQ affinity if CPU mask was updated */
3157 	if (QDF_IS_STATUS_SUCCESS(status)) {
3158 		status = hif_irq_set_affinity_hint(cfg->irq,
3159 						   &cpu_mask);
3160 		if (QDF_IS_STATUS_SUCCESS(status)) {
3161 			/* Store audio taken CPU mask */
3162 			qdf_cpumask_copy(&cfg->walt_taken_mask,
3163 					 &audio_taken_cpu);
3164 			/* Store CPU mask which was set for IRQ*/
3165 			qdf_cpumask_copy(&cfg->current_irq_mask,
3166 					 &cpu_mask);
3167 			/* Set time when IRQ affinity was updated */
3168 			cfg->last_updated = current_time;
3169 			if (hif_audio_cpu_affinity_allowed(scn, cfg,
3170 							   audio_taken_cpu,
3171 							   current_time))
3172 				/* If CPU mask was updated due to CPU
3173 				 * taken by audio, update
3174 				 * last_affined_away time
3175 				 */
3176 				cfg->last_affined_away = current_time;
3177 		}
3178 	}
3179 
3180 	return status;
3181 }
3182 
3183 void hif_affinity_mgr_affine_irq(struct hif_softc *scn)
3184 {
3185 	bool audio_affinity_allowed = false;
3186 	int i, j, ce_id;
3187 	uint64_t current_time;
3188 	char cpu_str[10];
3189 	QDF_STATUS status;
3190 	qdf_cpu_mask cpu_mask, audio_taken_cpu;
3191 	struct HIF_CE_state *hif_state;
3192 	struct hif_exec_context *hif_ext_group;
3193 	struct CE_attr *host_ce_conf;
3194 	struct HIF_CE_state *ce_sc;
3195 	struct hif_cpu_affinity *cfg;
3196 
3197 	if (!scn->affinity_mgr_supported)
3198 		return;
3199 
3200 	current_time = hif_get_log_timestamp();
3201 	/* Get CPU mask for audio taken CPUs */
3202 	audio_taken_cpu = qdf_walt_get_cpus_taken();
3203 
3204 	ce_sc = HIF_GET_CE_STATE(scn);
3205 	host_ce_conf = ce_sc->host_ce_config;
3206 	for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
3207 		if (host_ce_conf[ce_id].flags & CE_ATTR_DISABLE_INTR)
3208 			continue;
3209 		cfg = &scn->ce_irq_cpu_mask[ce_id];
3210 		qdf_cpumask_copy(&cpu_mask, &cfg->current_irq_mask);
3211 		status =
3212 			hif_check_and_affine_irq(scn, cfg, audio_taken_cpu,
3213 						 cpu_mask, current_time);
3214 		if (QDF_IS_STATUS_SUCCESS(status))
3215 			audio_affinity_allowed = true;
3216 	}
3217 
3218 	hif_state = HIF_GET_CE_STATE(scn);
3219 	for (i = 0; i < hif_state->hif_num_extgroup; i++) {
3220 		hif_ext_group = hif_state->hif_ext_group[i];
3221 		for (j = 0; j < hif_ext_group->numirq; j++) {
3222 			cfg = &scn->irq_cpu_mask[hif_ext_group->grp_id][j];
3223 			qdf_cpumask_copy(&cpu_mask, &cfg->current_irq_mask);
3224 			status =
3225 				hif_check_and_affine_irq(scn, cfg, audio_taken_cpu,
3226 							 cpu_mask, current_time);
3227 			if (QDF_IS_STATUS_SUCCESS(status)) {
3228 				qdf_atomic_set(&hif_ext_group->force_napi_complete, -1);
3229 				audio_affinity_allowed = true;
3230 			}
3231 		}
3232 	}
3233 	if (audio_affinity_allowed) {
3234 		qdf_thread_cpumap_print_to_pagebuf(false, cpu_str,
3235 						   &audio_taken_cpu);
3236 		hif_info("Audio taken CPU mask: %s", cpu_str);
3237 	}
3238 }
3239 
3240 static inline QDF_STATUS
3241 hif_affinity_mgr_set_irq_affinity(struct hif_softc *scn, uint32_t irq,
3242 				  struct hif_cpu_affinity *cfg,
3243 				  qdf_cpu_mask *cpu_mask)
3244 {
3245 	uint64_t current_time;
3246 	char cpu_str[10];
3247 	QDF_STATUS status, mask_updated;
3248 	qdf_cpu_mask audio_taken_cpu = qdf_walt_get_cpus_taken();
3249 
3250 	current_time = hif_get_log_timestamp();
3251 	qdf_cpumask_copy(&cfg->wlan_requested_mask, cpu_mask);
3252 	cfg->update_requested = true;
3253 	mask_updated = hif_affinity_mgr_check_update_mask(scn, cfg,
3254 							  audio_taken_cpu,
3255 							  cpu_mask,
3256 							  current_time);
3257 	status = hif_irq_set_affinity_hint(irq, cpu_mask);
3258 	if (QDF_IS_STATUS_SUCCESS(status)) {
3259 		qdf_cpumask_copy(&cfg->walt_taken_mask, &audio_taken_cpu);
3260 		qdf_cpumask_copy(&cfg->current_irq_mask, cpu_mask);
3261 		if (QDF_IS_STATUS_SUCCESS(mask_updated)) {
3262 			cfg->last_updated = current_time;
3263 			if (hif_audio_cpu_affinity_allowed(scn, cfg,
3264 							   audio_taken_cpu,
3265 							   current_time)) {
3266 				cfg->last_affined_away = current_time;
3267 				qdf_thread_cpumap_print_to_pagebuf(false,
3268 								   cpu_str,
3269 								   &audio_taken_cpu);
3270 				hif_info_rl("Audio taken CPU mask: %s",
3271 					    cpu_str);
3272 			}
3273 		}
3274 	}
3275 	cfg->update_requested = false;
3276 	return status;
3277 }
3278 
3279 QDF_STATUS
3280 hif_affinity_mgr_set_qrg_irq_affinity(struct hif_softc *scn, uint32_t irq,
3281 				      uint32_t grp_id, uint32_t irq_index,
3282 				      qdf_cpu_mask *cpu_mask)
3283 {
3284 	struct hif_cpu_affinity *cfg;
3285 
3286 	if (!scn->affinity_mgr_supported)
3287 		return hif_irq_set_affinity_hint(irq, cpu_mask);
3288 
3289 	cfg = &scn->irq_cpu_mask[grp_id][irq_index];
3290 	return hif_affinity_mgr_set_irq_affinity(scn, irq, cfg, cpu_mask);
3291 }
3292 
3293 QDF_STATUS
3294 hif_affinity_mgr_set_ce_irq_affinity(struct hif_softc *scn, uint32_t irq,
3295 				     uint32_t ce_id, qdf_cpu_mask *cpu_mask)
3296 {
3297 	struct hif_cpu_affinity *cfg;
3298 
3299 	if (!scn->affinity_mgr_supported)
3300 		return hif_irq_set_affinity_hint(irq, cpu_mask);
3301 
3302 	cfg = &scn->ce_irq_cpu_mask[ce_id];
3303 	return hif_affinity_mgr_set_irq_affinity(scn, irq, cfg, cpu_mask);
3304 }
3305 
3306 void
3307 hif_affinity_mgr_init_ce_irq(struct hif_softc *scn, int id, int irq)
3308 {
3309 	unsigned int cpus;
3310 	qdf_cpu_mask cpu_mask = {0};
3311 	struct hif_cpu_affinity *cfg = NULL;
3312 
3313 	if (!scn->affinity_mgr_supported)
3314 		return;
3315 
3316 	/* Set CPU Mask to Silver core */
3317 	qdf_for_each_possible_cpu(cpus)
3318 		if (qdf_topology_physical_package_id(cpus) ==
3319 		    CPU_CLUSTER_TYPE_LITTLE)
3320 			qdf_cpumask_set_cpu(cpus, &cpu_mask);
3321 
3322 	cfg = &scn->ce_irq_cpu_mask[id];
3323 	qdf_cpumask_copy(&cfg->current_irq_mask, &cpu_mask);
3324 	qdf_cpumask_copy(&cfg->wlan_requested_mask, &cpu_mask);
3325 	cfg->irq = irq;
3326 	cfg->last_updated = 0;
3327 	cfg->last_affined_away = 0;
3328 	cfg->update_requested = false;
3329 }
3330 
3331 void
3332 hif_affinity_mgr_init_grp_irq(struct hif_softc *scn, int grp_id,
3333 			      int irq_num, int irq)
3334 {
3335 	unsigned int cpus;
3336 	qdf_cpu_mask cpu_mask = {0};
3337 	struct hif_cpu_affinity *cfg = NULL;
3338 
3339 	if (!scn->affinity_mgr_supported)
3340 		return;
3341 
3342 	/* Set CPU Mask to Silver core */
3343 	qdf_for_each_possible_cpu(cpus)
3344 		if (qdf_topology_physical_package_id(cpus) ==
3345 		    CPU_CLUSTER_TYPE_LITTLE)
3346 			qdf_cpumask_set_cpu(cpus, &cpu_mask);
3347 
3348 	cfg = &scn->irq_cpu_mask[grp_id][irq_num];
3349 	qdf_cpumask_copy(&cfg->current_irq_mask, &cpu_mask);
3350 	qdf_cpumask_copy(&cfg->wlan_requested_mask, &cpu_mask);
3351 	cfg->irq = irq;
3352 	cfg->last_updated = 0;
3353 	cfg->last_affined_away = 0;
3354 	cfg->update_requested = false;
3355 }
3356 #endif
3357 
3358 #if defined(HIF_CPU_PERF_AFFINE_MASK) || \
3359 	defined(FEATURE_ENABLE_CE_DP_IRQ_AFFINE)
3360 void hif_config_irq_set_perf_affinity_hint(
3361 	struct hif_opaque_softc *hif_ctx)
3362 {
3363 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
3364 
3365 	hif_config_irq_affinity(scn);
3366 }
3367 
3368 qdf_export_symbol(hif_config_irq_set_perf_affinity_hint);
3369 #endif
3370