xref: /wlan-dirver/qca-wifi-host-cmn/hif/src/hif_main.c (revision 30bc8285d2c21db8126f402d9b60553e85914fce)
1 /*
2  * Copyright (c) 2015-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include "targcfg.h"
21 #include "qdf_lock.h"
22 #include "qdf_status.h"
23 #include "qdf_status.h"
24 #include <qdf_atomic.h>         /* qdf_atomic_read */
25 #include <targaddrs.h>
26 #include "hif_io32.h"
27 #include <hif.h>
28 #include <target_type.h>
29 #include "regtable.h"
30 #define ATH_MODULE_NAME hif
31 #include <a_debug.h>
32 #include "hif_main.h"
33 #include "hif_hw_version.h"
34 #if (defined(HIF_PCI) || defined(HIF_SNOC) || defined(HIF_AHB) || \
35      defined(HIF_IPCI))
36 #include "ce_tasklet.h"
37 #include "ce_api.h"
38 #endif
39 #include "qdf_trace.h"
40 #include "qdf_status.h"
41 #include "hif_debug.h"
42 #include "mp_dev.h"
43 #if defined(QCA_WIFI_QCA8074) || defined(QCA_WIFI_QCA6018) || \
44 	defined(QCA_WIFI_QCA5018) || defined(QCA_WIFI_QCA9574) || \
45 	defined(QCA_WIFI_QCA5332)
46 #include "hal_api.h"
47 #endif
48 #include "hif_napi.h"
49 #include "hif_unit_test_suspend_i.h"
50 #include "qdf_module.h"
51 #ifdef HIF_CE_LOG_INFO
52 #include <qdf_notifier.h>
53 #include <qdf_hang_event_notifier.h>
54 #endif
55 #include <linux/cpumask.h>
56 
57 #include <pld_common.h>
58 
59 void hif_dump(struct hif_opaque_softc *hif_ctx, uint8_t cmd_id, bool start)
60 {
61 	hif_trigger_dump(hif_ctx, cmd_id, start);
62 }
63 
64 /**
65  * hif_get_target_id(): hif_get_target_id
66  * @scn: scn
67  *
68  * Return the virtual memory base address to the caller
69  *
70  * @scn: hif_softc
71  *
72  * Return: A_target_id_t
73  */
74 A_target_id_t hif_get_target_id(struct hif_softc *scn)
75 {
76 	return scn->mem;
77 }
78 
79 /**
80  * hif_get_targetdef(): hif_get_targetdef
81  * @hif_ctx: hif context
82  *
83  * Return: void *
84  */
85 void *hif_get_targetdef(struct hif_opaque_softc *hif_ctx)
86 {
87 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
88 
89 	return scn->targetdef;
90 }
91 
92 #ifdef FORCE_WAKE
93 void hif_srng_init_phase(struct hif_opaque_softc *hif_ctx,
94 			 bool init_phase)
95 {
96 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
97 
98 	if (ce_srng_based(scn))
99 		hal_set_init_phase(scn->hal_soc, init_phase);
100 }
101 #endif /* FORCE_WAKE */
102 
103 #ifdef HIF_IPCI
104 void hif_shutdown_notifier_cb(void *hif_ctx)
105 {
106 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
107 
108 	scn->recovery = true;
109 }
110 #endif
111 
112 /**
113  * hif_vote_link_down(): unvote for link up
114  * @hif_ctx: hif context
115  *
116  * Call hif_vote_link_down to release a previous request made using
117  * hif_vote_link_up. A hif_vote_link_down call should only be made
118  * after a corresponding hif_vote_link_up, otherwise you could be
119  * negating a vote from another source. When no votes are present
120  * hif will not guarantee the linkstate after hif_bus_suspend.
121  *
122  * SYNCHRONIZE WITH hif_vote_link_up by only calling in MC thread
123  * and initialization deinitialization sequencences.
124  *
125  * Return: n/a
126  */
127 void hif_vote_link_down(struct hif_opaque_softc *hif_ctx)
128 {
129 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
130 
131 	QDF_BUG(scn);
132 	if (scn->linkstate_vote == 0)
133 		QDF_DEBUG_PANIC("linkstate_vote(%d) has already been 0",
134 				scn->linkstate_vote);
135 
136 	scn->linkstate_vote--;
137 	hif_info("Down_linkstate_vote %d", scn->linkstate_vote);
138 	if (scn->linkstate_vote == 0)
139 		hif_bus_prevent_linkdown(scn, false);
140 }
141 
142 /**
143  * hif_vote_link_up(): vote to prevent bus from suspending
144  * @hif_ctx: hif context
145  *
146  * Makes hif guarantee that fw can message the host normally
147  * during suspend.
148  *
149  * SYNCHRONIZE WITH hif_vote_link_up by only calling in MC thread
150  * and initialization deinitialization sequencences.
151  *
152  * Return: n/a
153  */
154 void hif_vote_link_up(struct hif_opaque_softc *hif_ctx)
155 {
156 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
157 
158 	QDF_BUG(scn);
159 	scn->linkstate_vote++;
160 	hif_info("Up_linkstate_vote %d", scn->linkstate_vote);
161 	if (scn->linkstate_vote == 1)
162 		hif_bus_prevent_linkdown(scn, true);
163 }
164 
165 /**
166  * hif_can_suspend_link(): query if hif is permitted to suspend the link
167  * @hif_ctx: hif context
168  *
169  * Hif will ensure that the link won't be suspended if the upperlayers
170  * don't want it to.
171  *
172  * SYNCHRONIZATION: MC thread is stopped before bus suspend thus
173  * we don't need extra locking to ensure votes dont change while
174  * we are in the process of suspending or resuming.
175  *
176  * Return: false if hif will guarantee link up during suspend.
177  */
178 bool hif_can_suspend_link(struct hif_opaque_softc *hif_ctx)
179 {
180 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
181 
182 	QDF_BUG(scn);
183 	return scn->linkstate_vote == 0;
184 }
185 
186 /**
187  * hif_hia_item_address(): hif_hia_item_address
188  * @target_type: target_type
189  * @item_offset: item_offset
190  *
191  * Return: n/a
192  */
193 uint32_t hif_hia_item_address(uint32_t target_type, uint32_t item_offset)
194 {
195 	switch (target_type) {
196 	case TARGET_TYPE_AR6002:
197 		return AR6002_HOST_INTEREST_ADDRESS + item_offset;
198 	case TARGET_TYPE_AR6003:
199 		return AR6003_HOST_INTEREST_ADDRESS + item_offset;
200 	case TARGET_TYPE_AR6004:
201 		return AR6004_HOST_INTEREST_ADDRESS + item_offset;
202 	case TARGET_TYPE_AR6006:
203 		return AR6006_HOST_INTEREST_ADDRESS + item_offset;
204 	case TARGET_TYPE_AR9888:
205 		return AR9888_HOST_INTEREST_ADDRESS + item_offset;
206 	case TARGET_TYPE_AR6320:
207 	case TARGET_TYPE_AR6320V2:
208 		return AR6320_HOST_INTEREST_ADDRESS + item_offset;
209 	case TARGET_TYPE_ADRASTEA:
210 		/* ADRASTEA doesn't have a host interest address */
211 		ASSERT(0);
212 		return 0;
213 	case TARGET_TYPE_AR900B:
214 		return AR900B_HOST_INTEREST_ADDRESS + item_offset;
215 	case TARGET_TYPE_QCA9984:
216 		return QCA9984_HOST_INTEREST_ADDRESS + item_offset;
217 	case TARGET_TYPE_QCA9888:
218 		return QCA9888_HOST_INTEREST_ADDRESS + item_offset;
219 
220 	default:
221 		ASSERT(0);
222 		return 0;
223 	}
224 }
225 
226 /**
227  * hif_max_num_receives_reached() - check max receive is reached
228  * @scn: HIF Context
229  * @count: unsigned int.
230  *
231  * Output check status as bool
232  *
233  * Return: bool
234  */
235 bool hif_max_num_receives_reached(struct hif_softc *scn, unsigned int count)
236 {
237 	if (QDF_IS_EPPING_ENABLED(hif_get_conparam(scn)))
238 		return count > 120;
239 	else
240 		return count > MAX_NUM_OF_RECEIVES;
241 }
242 
243 /**
244  * init_buffer_count() - initial buffer count
245  * @maxSize: qdf_size_t
246  *
247  * routine to modify the initial buffer count to be allocated on an os
248  * platform basis. Platform owner will need to modify this as needed
249  *
250  * Return: qdf_size_t
251  */
252 qdf_size_t init_buffer_count(qdf_size_t maxSize)
253 {
254 	return maxSize;
255 }
256 
257 /**
258  * hif_save_htc_htt_config_endpoint() - save htt_tx_endpoint
259  * @hif_ctx: hif context
260  * @htc_htt_tx_endpoint: htt_tx_endpoint
261  *
262  * Return: void
263  */
264 void hif_save_htc_htt_config_endpoint(struct hif_opaque_softc *hif_ctx,
265 							int htc_htt_tx_endpoint)
266 {
267 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
268 
269 	if (!scn) {
270 		hif_err("scn or scn->hif_sc is NULL!");
271 		return;
272 	}
273 
274 	scn->htc_htt_tx_endpoint = htc_htt_tx_endpoint;
275 }
276 qdf_export_symbol(hif_save_htc_htt_config_endpoint);
277 
278 static const struct qwlan_hw qwlan_hw_list[] = {
279 	{
280 		.id = AR6320_REV1_VERSION,
281 		.subid = 0,
282 		.name = "QCA6174_REV1",
283 	},
284 	{
285 		.id = AR6320_REV1_1_VERSION,
286 		.subid = 0x1,
287 		.name = "QCA6174_REV1_1",
288 	},
289 	{
290 		.id = AR6320_REV1_3_VERSION,
291 		.subid = 0x2,
292 		.name = "QCA6174_REV1_3",
293 	},
294 	{
295 		.id = AR6320_REV2_1_VERSION,
296 		.subid = 0x4,
297 		.name = "QCA6174_REV2_1",
298 	},
299 	{
300 		.id = AR6320_REV2_1_VERSION,
301 		.subid = 0x5,
302 		.name = "QCA6174_REV2_2",
303 	},
304 	{
305 		.id = AR6320_REV3_VERSION,
306 		.subid = 0x6,
307 		.name = "QCA6174_REV2.3",
308 	},
309 	{
310 		.id = AR6320_REV3_VERSION,
311 		.subid = 0x8,
312 		.name = "QCA6174_REV3",
313 	},
314 	{
315 		.id = AR6320_REV3_VERSION,
316 		.subid = 0x9,
317 		.name = "QCA6174_REV3_1",
318 	},
319 	{
320 		.id = AR6320_REV3_2_VERSION,
321 		.subid = 0xA,
322 		.name = "AR6320_REV3_2_VERSION",
323 	},
324 	{
325 		.id = QCA6390_V1,
326 		.subid = 0x0,
327 		.name = "QCA6390_V1",
328 	},
329 	{
330 		.id = QCA6490_V1,
331 		.subid = 0x0,
332 		.name = "QCA6490_V1",
333 	},
334 	{
335 		.id = WCN3990_v1,
336 		.subid = 0x0,
337 		.name = "WCN3990_V1",
338 	},
339 	{
340 		.id = WCN3990_v2,
341 		.subid = 0x0,
342 		.name = "WCN3990_V2",
343 	},
344 	{
345 		.id = WCN3990_v2_1,
346 		.subid = 0x0,
347 		.name = "WCN3990_V2.1",
348 	},
349 	{
350 		.id = WCN3998,
351 		.subid = 0x0,
352 		.name = "WCN3998",
353 	},
354 	{
355 		.id = QCA9379_REV1_VERSION,
356 		.subid = 0xC,
357 		.name = "QCA9379_REV1",
358 	},
359 	{
360 		.id = QCA9379_REV1_VERSION,
361 		.subid = 0xD,
362 		.name = "QCA9379_REV1_1",
363 	},
364 	{
365 		.id = MANGO_V1,
366 		.subid = 0xF,
367 		.name = "MANGO_V1",
368 	},
369 	{
370 		.id = KIWI_V1,
371 		.subid = 0,
372 		.name = "KIWI_V1",
373 	},
374 	{
375 		.id = KIWI_V2,
376 		.subid = 0,
377 		.name = "KIWI_V2",
378 	},
379 	{
380 		.id = WCN6750_V1,
381 		.subid = 0,
382 		.name = "WCN6750_V1",
383 	},
384 	{
385 		.id = QCA6490_v2_1,
386 		.subid = 0,
387 		.name = "QCA6490",
388 	},
389 	{
390 		.id = QCA6490_v2,
391 		.subid = 0,
392 		.name = "QCA6490",
393 	},
394 	{
395 		.id = WCN3990_v2_2,
396 		.subid = 0,
397 		.name = "WCN3990_v2_2",
398 	},
399 	{
400 		.id = WCN3990_TALOS,
401 		.subid = 0,
402 		.name = "WCN3990",
403 	},
404 	{
405 		.id = WCN3990_MOOREA,
406 		.subid = 0,
407 		.name = "WCN3990",
408 	},
409 	{
410 		.id = WCN3990_SAIPAN,
411 		.subid = 0,
412 		.name = "WCN3990",
413 	},
414 	{
415 		.id = WCN3990_RENNELL,
416 		.subid = 0,
417 		.name = "WCN3990",
418 	},
419 	{
420 		.id = WCN3990_BITRA,
421 		.subid = 0,
422 		.name = "WCN3990",
423 	},
424 	{
425 		.id = WCN3990_DIVAR,
426 		.subid = 0,
427 		.name = "WCN3990",
428 	},
429 	{
430 		.id = WCN3990_ATHERTON,
431 		.subid = 0,
432 		.name = "WCN3990",
433 	},
434 	{
435 		.id = WCN3990_STRAIT,
436 		.subid = 0,
437 		.name = "WCN3990",
438 	},
439 	{
440 		.id = WCN3990_NETRANI,
441 		.subid = 0,
442 		.name = "WCN3990",
443 	},
444 	{
445 		.id = WCN3990_CLARENCE,
446 		.subid = 0,
447 		.name = "WCN3990",
448 	}
449 };
450 
451 /**
452  * hif_get_hw_name(): get a human readable name for the hardware
453  * @info: Target Info
454  *
455  * Return: human readable name for the underlying wifi hardware.
456  */
457 static const char *hif_get_hw_name(struct hif_target_info *info)
458 {
459 	int i;
460 
461 	hif_debug("target version = %d, target revision = %d",
462 		  info->target_version,
463 		  info->target_revision);
464 
465 	if (info->hw_name)
466 		return info->hw_name;
467 
468 	for (i = 0; i < ARRAY_SIZE(qwlan_hw_list); i++) {
469 		if (info->target_version == qwlan_hw_list[i].id &&
470 		    info->target_revision == qwlan_hw_list[i].subid) {
471 			return qwlan_hw_list[i].name;
472 		}
473 	}
474 
475 	info->hw_name = qdf_mem_malloc(64);
476 	if (!info->hw_name)
477 		return "Unknown Device (nomem)";
478 
479 	i = qdf_snprint(info->hw_name, 64, "HW_VERSION=%x.",
480 			info->target_version);
481 	if (i < 0)
482 		return "Unknown Device (snprintf failure)";
483 	else
484 		return info->hw_name;
485 }
486 
487 /**
488  * hif_get_hw_info(): hif_get_hw_info
489  * @scn: scn
490  * @version: version
491  * @revision: revision
492  * @target_name: target name
493  *
494  * Return: n/a
495  */
496 void hif_get_hw_info(struct hif_opaque_softc *scn, u32 *version, u32 *revision,
497 			const char **target_name)
498 {
499 	struct hif_target_info *info = hif_get_target_info_handle(scn);
500 	struct hif_softc *sc = HIF_GET_SOFTC(scn);
501 
502 	if (sc->bus_type == QDF_BUS_TYPE_USB)
503 		hif_usb_get_hw_info(sc);
504 
505 	*version = info->target_version;
506 	*revision = info->target_revision;
507 	*target_name = hif_get_hw_name(info);
508 }
509 
510 /**
511  * hif_get_dev_ba(): API to get device base address.
512  * @hif_handle: hif handle
513  *
514  * Return: device base address
515  */
516 void *hif_get_dev_ba(struct hif_opaque_softc *hif_handle)
517 {
518 	struct hif_softc *scn = (struct hif_softc *)hif_handle;
519 
520 	return scn->mem;
521 }
522 qdf_export_symbol(hif_get_dev_ba);
523 
524 /**
525  * hif_get_dev_ba_ce(): API to get device ce base address.
526  * @hif_handle: hif handle
527  *
528  * Return: dev mem base address for CE
529  */
530 void *hif_get_dev_ba_ce(struct hif_opaque_softc *hif_handle)
531 {
532 	struct hif_softc *scn = (struct hif_softc *)hif_handle;
533 
534 	return scn->mem_ce;
535 }
536 
537 qdf_export_symbol(hif_get_dev_ba_ce);
538 
539 /**
540  * hif_get_dev_ba_pmm(): API to get device pmm base address.
541  * @hif_handle: scn
542  *
543  * Return: dev mem base address for PMM
544  */
545 
546 void *hif_get_dev_ba_pmm(struct hif_opaque_softc *hif_handle)
547 {
548 	struct hif_softc *scn = (struct hif_softc *)hif_handle;
549 
550 	return scn->mem_pmm_base;
551 }
552 
553 qdf_export_symbol(hif_get_dev_ba_pmm);
554 
555 uint32_t hif_get_soc_version(struct hif_opaque_softc *hif_handle)
556 {
557 	struct hif_softc *scn = (struct hif_softc *)hif_handle;
558 
559 	return scn->target_info.soc_version;
560 }
561 
562 qdf_export_symbol(hif_get_soc_version);
563 
564 /**
565  * hif_get_dev_ba_cmem(): API to get device ce base address.
566  * @hif_handle: hif handle
567  *
568  * Return: dev mem base address for CMEM
569  */
570 void *hif_get_dev_ba_cmem(struct hif_opaque_softc *hif_handle)
571 {
572 	struct hif_softc *scn = (struct hif_softc *)hif_handle;
573 
574 	return scn->mem_cmem;
575 }
576 
577 qdf_export_symbol(hif_get_dev_ba_cmem);
578 
579 #ifdef FEATURE_RUNTIME_PM
580 void hif_runtime_prevent_linkdown(struct hif_softc *scn, bool is_get)
581 {
582 	if (is_get)
583 		qdf_runtime_pm_prevent_suspend(&scn->prevent_linkdown_lock);
584 	else
585 		qdf_runtime_pm_allow_suspend(&scn->prevent_linkdown_lock);
586 }
587 
588 static inline
589 void hif_rtpm_lock_init(struct hif_softc *scn)
590 {
591 	qdf_runtime_lock_init(&scn->prevent_linkdown_lock);
592 }
593 
594 static inline
595 void hif_rtpm_lock_deinit(struct hif_softc *scn)
596 {
597 	qdf_runtime_lock_deinit(&scn->prevent_linkdown_lock);
598 }
599 #else
600 static inline
601 void hif_rtpm_lock_init(struct hif_softc *scn)
602 {
603 }
604 
605 static inline
606 void hif_rtpm_lock_deinit(struct hif_softc *scn)
607 {
608 }
609 #endif
610 
611 #ifdef WLAN_CE_INTERRUPT_THRESHOLD_CONFIG
612 /**
613  * hif_get_interrupt_threshold_cfg_from_psoc() - Retrieve ini cfg from psoc
614  * @scn: hif context
615  * @psoc: psoc objmgr handle
616  *
617  * Return: None
618  */
619 static inline
620 void hif_get_interrupt_threshold_cfg_from_psoc(struct hif_softc *scn,
621 					       struct wlan_objmgr_psoc *psoc)
622 {
623 	if (psoc) {
624 		scn->ini_cfg.ce_status_ring_timer_threshold =
625 			cfg_get(psoc,
626 				CFG_CE_STATUS_RING_TIMER_THRESHOLD);
627 		scn->ini_cfg.ce_status_ring_batch_count_threshold =
628 			cfg_get(psoc,
629 				CFG_CE_STATUS_RING_BATCH_COUNT_THRESHOLD);
630 	}
631 }
632 #else
633 static inline
634 void hif_get_interrupt_threshold_cfg_from_psoc(struct hif_softc *scn,
635 					       struct wlan_objmgr_psoc *psoc)
636 {
637 }
638 #endif /* WLAN_CE_INTERRUPT_THRESHOLD_CONFIG */
639 
640 /**
641  * hif_get_cfg_from_psoc() - Retrieve ini cfg from psoc
642  * @scn: hif context
643  * @psoc: psoc objmgr handle
644  *
645  * Return: None
646  */
647 static inline
648 void hif_get_cfg_from_psoc(struct hif_softc *scn,
649 			   struct wlan_objmgr_psoc *psoc)
650 {
651 	if (psoc) {
652 		scn->ini_cfg.disable_wake_irq =
653 			cfg_get(psoc, CFG_DISABLE_WAKE_IRQ);
654 		/**
655 		 * Wake IRQ can't share the same IRQ with the copy engines
656 		 * In one MSI mode, we don't know whether wake IRQ is triggered
657 		 * or not in wake IRQ handler. known issue CR 2055359
658 		 * If you want to support Wake IRQ. Please allocate at least
659 		 * 2 MSI vector. The first is for wake IRQ while the others
660 		 * share the second vector
661 		 */
662 		if (pld_is_one_msi(scn->qdf_dev->dev)) {
663 			hif_debug("Disable wake IRQ once it is one MSI mode");
664 			scn->ini_cfg.disable_wake_irq = true;
665 		}
666 		hif_get_interrupt_threshold_cfg_from_psoc(scn, psoc);
667 	}
668 }
669 
670 #if defined(HIF_CE_LOG_INFO) || defined(HIF_BUS_LOG_INFO)
671 /**
672  * hif_recovery_notifier_cb - Recovery notifier callback to log
673  *  hang event data
674  * @block: notifier block
675  * @state: state
676  * @data: notifier data
677  *
678  * Return: status
679  */
680 static
681 int hif_recovery_notifier_cb(struct notifier_block *block, unsigned long state,
682 			     void *data)
683 {
684 	struct qdf_notifer_data *notif_data = data;
685 	qdf_notif_block *notif_block;
686 	struct hif_softc *hif_handle;
687 	bool bus_id_invalid;
688 
689 	if (!data || !block)
690 		return -EINVAL;
691 
692 	notif_block = qdf_container_of(block, qdf_notif_block, notif_block);
693 
694 	hif_handle = notif_block->priv_data;
695 	if (!hif_handle)
696 		return -EINVAL;
697 
698 	bus_id_invalid = hif_log_bus_info(hif_handle, notif_data->hang_data,
699 					  &notif_data->offset);
700 	if (bus_id_invalid)
701 		return NOTIFY_STOP_MASK;
702 
703 	hif_log_ce_info(hif_handle, notif_data->hang_data,
704 			&notif_data->offset);
705 
706 	return 0;
707 }
708 
709 /**
710  * hif_register_recovery_notifier - Register hif recovery notifier
711  * @hif_handle: hif handle
712  *
713  * Return: status
714  */
715 static
716 QDF_STATUS hif_register_recovery_notifier(struct hif_softc *hif_handle)
717 {
718 	qdf_notif_block *hif_notifier;
719 
720 	if (!hif_handle)
721 		return QDF_STATUS_E_FAILURE;
722 
723 	hif_notifier = &hif_handle->hif_recovery_notifier;
724 
725 	hif_notifier->notif_block.notifier_call = hif_recovery_notifier_cb;
726 	hif_notifier->priv_data = hif_handle;
727 	return qdf_hang_event_register_notifier(hif_notifier);
728 }
729 
730 /**
731  * hif_unregister_recovery_notifier - Un-register hif recovery notifier
732  * @hif_handle: hif handle
733  *
734  * Return: status
735  */
736 static
737 QDF_STATUS hif_unregister_recovery_notifier(struct hif_softc *hif_handle)
738 {
739 	qdf_notif_block *hif_notifier = &hif_handle->hif_recovery_notifier;
740 
741 	return qdf_hang_event_unregister_notifier(hif_notifier);
742 }
743 #else
744 static inline
745 QDF_STATUS hif_register_recovery_notifier(struct hif_softc *hif_handle)
746 {
747 	return QDF_STATUS_SUCCESS;
748 }
749 
750 static inline
751 QDF_STATUS hif_unregister_recovery_notifier(struct hif_softc *hif_handle)
752 {
753 	return QDF_STATUS_SUCCESS;
754 }
755 #endif
756 
757 #ifdef HIF_CPU_PERF_AFFINE_MASK
758 /**
759  * __hif_cpu_hotplug_notify() - CPU hotplug event handler
760  * @context: HIF context
761  * @cpu: CPU Id of the CPU generating the event
762  * @cpu_up: true if the CPU is online
763  *
764  * Return: None
765  */
766 static void __hif_cpu_hotplug_notify(void *context,
767 				     uint32_t cpu, bool cpu_up)
768 {
769 	struct hif_softc *scn = context;
770 
771 	if (!scn)
772 		return;
773 	if (hif_is_driver_unloading(scn) || hif_is_recovery_in_progress(scn))
774 		return;
775 
776 	if (cpu_up) {
777 		hif_config_irq_set_perf_affinity_hint(GET_HIF_OPAQUE_HDL(scn));
778 		hif_debug("Setting affinity for online CPU: %d", cpu);
779 	} else {
780 		hif_debug("Skip setting affinity for offline CPU: %d", cpu);
781 	}
782 }
783 
784 /**
785  * hif_cpu_hotplug_notify - cpu core up/down notification
786  * handler
787  * @context: HIF context
788  * @cpu: CPU generating the event
789  * @cpu_up: true if the CPU is online
790  *
791  * Return: None
792  */
793 static void hif_cpu_hotplug_notify(void *context, uint32_t cpu, bool cpu_up)
794 {
795 	struct qdf_op_sync *op_sync;
796 
797 	if (qdf_op_protect(&op_sync))
798 		return;
799 
800 	__hif_cpu_hotplug_notify(context, cpu, cpu_up);
801 
802 	qdf_op_unprotect(op_sync);
803 }
804 
805 static void hif_cpu_online_cb(void *context, uint32_t cpu)
806 {
807 	hif_cpu_hotplug_notify(context, cpu, true);
808 }
809 
810 static void hif_cpu_before_offline_cb(void *context, uint32_t cpu)
811 {
812 	hif_cpu_hotplug_notify(context, cpu, false);
813 }
814 
815 static void hif_cpuhp_register(struct hif_softc *scn)
816 {
817 	if (!scn) {
818 		hif_info_high("cannot register hotplug notifiers");
819 		return;
820 	}
821 	qdf_cpuhp_register(&scn->cpuhp_event_handle,
822 			   scn,
823 			   hif_cpu_online_cb,
824 			   hif_cpu_before_offline_cb);
825 }
826 
827 static void hif_cpuhp_unregister(struct hif_softc *scn)
828 {
829 	if (!scn) {
830 		hif_info_high("cannot unregister hotplug notifiers");
831 		return;
832 	}
833 	qdf_cpuhp_unregister(&scn->cpuhp_event_handle);
834 }
835 
836 #else
837 static void hif_cpuhp_register(struct hif_softc *scn)
838 {
839 }
840 
841 static void hif_cpuhp_unregister(struct hif_softc *scn)
842 {
843 }
844 #endif /* ifdef HIF_CPU_PERF_AFFINE_MASK */
845 
846 #ifdef HIF_DETECTION_LATENCY_ENABLE
847 
848 void hif_tasklet_latency(struct hif_softc *scn, bool from_timer)
849 {
850 	qdf_time_t ce2_tasklet_sched_time =
851 		scn->latency_detect.ce2_tasklet_sched_time;
852 	qdf_time_t ce2_tasklet_exec_time =
853 		scn->latency_detect.ce2_tasklet_exec_time;
854 	qdf_time_t curr_jiffies = qdf_system_ticks();
855 	uint32_t detect_latency_threshold =
856 		scn->latency_detect.detect_latency_threshold;
857 	int cpu_id = qdf_get_cpu();
858 
859 	/* 2 kinds of check here.
860 	 * from_timer==true:  check if tasklet stall
861 	 * from_timer==false: check tasklet execute comes late
862 	 */
863 
864 	if ((from_timer ?
865 	    qdf_system_time_after(ce2_tasklet_sched_time,
866 				  ce2_tasklet_exec_time) :
867 	    qdf_system_time_after(ce2_tasklet_exec_time,
868 				  ce2_tasklet_sched_time)) &&
869 	    qdf_system_time_after(
870 		curr_jiffies,
871 		ce2_tasklet_sched_time +
872 		qdf_system_msecs_to_ticks(detect_latency_threshold))) {
873 		hif_err("tasklet ce2 latency: from_timer %d, curr_jiffies %lu, ce2_tasklet_sched_time %lu,ce2_tasklet_exec_time %lu, detect_latency_threshold %ums detect_latency_timer_timeout %ums, cpu_id %d, called: %ps",
874 			from_timer, curr_jiffies, ce2_tasklet_sched_time,
875 			ce2_tasklet_exec_time, detect_latency_threshold,
876 			scn->latency_detect.detect_latency_timer_timeout,
877 			cpu_id, (void *)_RET_IP_);
878 		goto latency;
879 	}
880 	return;
881 
882 latency:
883 	qdf_trigger_self_recovery(NULL, QDF_TASKLET_CREDIT_LATENCY_DETECT);
884 }
885 
886 void hif_credit_latency(struct hif_softc *scn, bool from_timer)
887 {
888 	qdf_time_t credit_request_time =
889 		scn->latency_detect.credit_request_time;
890 	qdf_time_t credit_report_time =
891 		scn->latency_detect.credit_report_time;
892 	qdf_time_t curr_jiffies = qdf_system_ticks();
893 	uint32_t detect_latency_threshold =
894 		scn->latency_detect.detect_latency_threshold;
895 	int cpu_id = qdf_get_cpu();
896 
897 	/* 2 kinds of check here.
898 	 * from_timer==true:  check if credit report stall
899 	 * from_timer==false: check credit report comes late
900 	 */
901 
902 	if ((from_timer ?
903 	    qdf_system_time_after(credit_request_time,
904 				  credit_report_time) :
905 	    qdf_system_time_after(credit_report_time,
906 				  credit_request_time)) &&
907 	    qdf_system_time_after(
908 		curr_jiffies,
909 		credit_request_time +
910 		qdf_system_msecs_to_ticks(detect_latency_threshold))) {
911 		hif_err("credit report latency: from timer %d, curr_jiffies %lu, credit_request_time %lu,credit_report_time %lu, detect_latency_threshold %ums, detect_latency_timer_timeout %ums, cpu_id %d, called: %ps",
912 			from_timer, curr_jiffies, credit_request_time,
913 			credit_report_time, detect_latency_threshold,
914 			scn->latency_detect.detect_latency_timer_timeout,
915 			cpu_id, (void *)_RET_IP_);
916 		goto latency;
917 	}
918 	return;
919 
920 latency:
921 	qdf_trigger_self_recovery(NULL, QDF_TASKLET_CREDIT_LATENCY_DETECT);
922 }
923 
924 /**
925  * hif_check_detection_latency(): to check if latency for tasklet/credit
926  *
927  * @scn: hif context
928  * @from_timer: if called from timer handler
929  * @bitmap_type: indicate if check tasklet or credit
930  *
931  * Return: none
932  */
933 void hif_check_detection_latency(struct hif_softc *scn,
934 				 bool from_timer,
935 				 uint32_t bitmap_type)
936 {
937 	if (QDF_GLOBAL_MISSION_MODE != hif_get_conparam(scn))
938 		return;
939 
940 	if (!scn->latency_detect.enable_detection)
941 		return;
942 
943 	if (bitmap_type & BIT(HIF_DETECT_TASKLET))
944 		hif_tasklet_latency(scn, from_timer);
945 
946 	if (bitmap_type & BIT(HIF_DETECT_CREDIT))
947 		hif_credit_latency(scn, from_timer);
948 }
949 
950 static void hif_latency_detect_timeout_handler(void *arg)
951 {
952 	struct hif_softc *scn = (struct hif_softc *)arg;
953 	int next_cpu;
954 
955 	hif_check_detection_latency(scn, true,
956 				    BIT(HIF_DETECT_TASKLET) |
957 				    BIT(HIF_DETECT_CREDIT));
958 
959 	/* it need to make sure timer start on a different cpu,
960 	 * so it can detect the tasklet schedule stall, but there
961 	 * is still chance that, after timer has been started, then
962 	 * irq/tasklet happens on the same cpu, then tasklet will
963 	 * execute before softirq timer, if this tasklet stall, the
964 	 * timer can't detect it, we can accept this as a limitation,
965 	 * if tasklet stall, anyway other place will detect it, just
966 	 * a little later.
967 	 */
968 	next_cpu = cpumask_any_but(
969 			cpu_active_mask,
970 			scn->latency_detect.ce2_tasklet_sched_cpuid);
971 
972 	if (qdf_unlikely(next_cpu >= nr_cpu_ids)) {
973 		hif_debug("start timer on local");
974 		/* it doesn't found a available cpu, start on local cpu*/
975 		qdf_timer_mod(
976 			&scn->latency_detect.detect_latency_timer,
977 			scn->latency_detect.detect_latency_timer_timeout);
978 	} else {
979 		qdf_timer_start_on(
980 			&scn->latency_detect.detect_latency_timer,
981 			scn->latency_detect.detect_latency_timer_timeout,
982 			next_cpu);
983 	}
984 }
985 
986 static void hif_latency_detect_timer_init(struct hif_softc *scn)
987 {
988 	if (!scn) {
989 		hif_info_high("scn is null");
990 		return;
991 	}
992 
993 	if (QDF_GLOBAL_MISSION_MODE != hif_get_conparam(scn))
994 		return;
995 
996 	scn->latency_detect.detect_latency_timer_timeout =
997 		DETECTION_TIMER_TIMEOUT;
998 	scn->latency_detect.detect_latency_threshold =
999 		DETECTION_LATENCY_THRESHOLD;
1000 
1001 	hif_info("timer timeout %u, latency threshold %u",
1002 		 scn->latency_detect.detect_latency_timer_timeout,
1003 		 scn->latency_detect.detect_latency_threshold);
1004 
1005 	scn->latency_detect.is_timer_started = false;
1006 
1007 	qdf_timer_init(NULL,
1008 		       &scn->latency_detect.detect_latency_timer,
1009 		       &hif_latency_detect_timeout_handler,
1010 		       scn,
1011 		       QDF_TIMER_TYPE_SW_SPIN);
1012 }
1013 
1014 static void hif_latency_detect_timer_deinit(struct hif_softc *scn)
1015 {
1016 	if (QDF_GLOBAL_MISSION_MODE != hif_get_conparam(scn))
1017 		return;
1018 
1019 	hif_info("deinit timer");
1020 	qdf_timer_free(&scn->latency_detect.detect_latency_timer);
1021 }
1022 
1023 void hif_latency_detect_timer_start(struct hif_opaque_softc *hif_ctx)
1024 {
1025 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1026 
1027 	if (QDF_GLOBAL_MISSION_MODE != hif_get_conparam(scn))
1028 		return;
1029 
1030 	hif_debug_rl("start timer");
1031 	if (scn->latency_detect.is_timer_started) {
1032 		hif_info("timer has been started");
1033 		return;
1034 	}
1035 
1036 	qdf_timer_start(&scn->latency_detect.detect_latency_timer,
1037 			scn->latency_detect.detect_latency_timer_timeout);
1038 	scn->latency_detect.is_timer_started = true;
1039 }
1040 
1041 void hif_latency_detect_timer_stop(struct hif_opaque_softc *hif_ctx)
1042 {
1043 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1044 
1045 	if (QDF_GLOBAL_MISSION_MODE != hif_get_conparam(scn))
1046 		return;
1047 
1048 	hif_debug_rl("stop timer");
1049 
1050 	qdf_timer_sync_cancel(&scn->latency_detect.detect_latency_timer);
1051 	scn->latency_detect.is_timer_started = false;
1052 }
1053 
1054 void hif_latency_detect_credit_record_time(
1055 	enum hif_credit_exchange_type type,
1056 	struct hif_opaque_softc *hif_ctx)
1057 {
1058 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1059 
1060 	if (!scn) {
1061 		hif_err("Could not do runtime put, scn is null");
1062 		return;
1063 	}
1064 
1065 	if (QDF_GLOBAL_MISSION_MODE != hif_get_conparam(scn))
1066 		return;
1067 
1068 	if (HIF_REQUEST_CREDIT == type)
1069 		scn->latency_detect.credit_request_time = qdf_system_ticks();
1070 	else if (HIF_PROCESS_CREDIT_REPORT == type)
1071 		scn->latency_detect.credit_report_time = qdf_system_ticks();
1072 
1073 	hif_check_detection_latency(scn, false, BIT(HIF_DETECT_CREDIT));
1074 }
1075 
1076 void hif_set_enable_detection(struct hif_opaque_softc *hif_ctx, bool value)
1077 {
1078 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1079 
1080 	if (!scn) {
1081 		hif_err("Could not do runtime put, scn is null");
1082 		return;
1083 	}
1084 
1085 	if (QDF_GLOBAL_MISSION_MODE != hif_get_conparam(scn))
1086 		return;
1087 
1088 	scn->latency_detect.enable_detection = value;
1089 }
1090 #else
1091 static void hif_latency_detect_timer_init(struct hif_softc *scn)
1092 {}
1093 
1094 static void hif_latency_detect_timer_deinit(struct hif_softc *scn)
1095 {}
1096 #endif
1097 struct hif_opaque_softc *hif_open(qdf_device_t qdf_ctx,
1098 				  uint32_t mode,
1099 				  enum qdf_bus_type bus_type,
1100 				  struct hif_driver_state_callbacks *cbk,
1101 				  struct wlan_objmgr_psoc *psoc)
1102 {
1103 	struct hif_softc *scn;
1104 	QDF_STATUS status = QDF_STATUS_SUCCESS;
1105 	int bus_context_size = hif_bus_get_context_size(bus_type);
1106 
1107 	if (bus_context_size == 0) {
1108 		hif_err("context size 0 not allowed");
1109 		return NULL;
1110 	}
1111 
1112 	scn = (struct hif_softc *)qdf_mem_malloc(bus_context_size);
1113 	if (!scn)
1114 		return GET_HIF_OPAQUE_HDL(scn);
1115 
1116 	scn->qdf_dev = qdf_ctx;
1117 	scn->hif_con_param = mode;
1118 	qdf_atomic_init(&scn->active_tasklet_cnt);
1119 
1120 	qdf_atomic_init(&scn->active_grp_tasklet_cnt);
1121 	qdf_atomic_init(&scn->link_suspended);
1122 	qdf_atomic_init(&scn->tasklet_from_intr);
1123 	hif_system_pm_set_state_on(GET_HIF_OPAQUE_HDL(scn));
1124 	qdf_mem_copy(&scn->callbacks, cbk,
1125 		     sizeof(struct hif_driver_state_callbacks));
1126 	scn->bus_type  = bus_type;
1127 
1128 	hif_allow_ep_vote_access(GET_HIF_OPAQUE_HDL(scn));
1129 	hif_get_cfg_from_psoc(scn, psoc);
1130 
1131 	hif_set_event_hist_mask(GET_HIF_OPAQUE_HDL(scn));
1132 	status = hif_bus_open(scn, bus_type);
1133 	if (status != QDF_STATUS_SUCCESS) {
1134 		hif_err("hif_bus_open error = %d, bus_type = %d",
1135 			status, bus_type);
1136 		qdf_mem_free(scn);
1137 		scn = NULL;
1138 		goto out;
1139 	}
1140 
1141 	hif_rtpm_lock_init(scn);
1142 
1143 	hif_cpuhp_register(scn);
1144 	hif_latency_detect_timer_init(scn);
1145 
1146 out:
1147 	return GET_HIF_OPAQUE_HDL(scn);
1148 }
1149 
1150 #ifdef ADRASTEA_RRI_ON_DDR
1151 /**
1152  * hif_uninit_rri_on_ddr(): free consistent memory allocated for rri
1153  * @scn: hif context
1154  *
1155  * Return: none
1156  */
1157 void hif_uninit_rri_on_ddr(struct hif_softc *scn)
1158 {
1159 	if (scn->vaddr_rri_on_ddr)
1160 		qdf_mem_free_consistent(scn->qdf_dev, scn->qdf_dev->dev,
1161 					(CE_COUNT * sizeof(uint32_t)),
1162 					scn->vaddr_rri_on_ddr,
1163 					scn->paddr_rri_on_ddr, 0);
1164 	scn->vaddr_rri_on_ddr = NULL;
1165 }
1166 #endif
1167 
1168 /**
1169  * hif_close(): hif_close
1170  * @hif_ctx: hif_ctx
1171  *
1172  * Return: n/a
1173  */
1174 void hif_close(struct hif_opaque_softc *hif_ctx)
1175 {
1176 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1177 
1178 	if (!scn) {
1179 		hif_err("hif_opaque_softc is NULL");
1180 		return;
1181 	}
1182 
1183 	hif_latency_detect_timer_deinit(scn);
1184 
1185 	if (scn->athdiag_procfs_inited) {
1186 		athdiag_procfs_remove();
1187 		scn->athdiag_procfs_inited = false;
1188 	}
1189 
1190 	if (scn->target_info.hw_name) {
1191 		char *hw_name = scn->target_info.hw_name;
1192 
1193 		scn->target_info.hw_name = "ErrUnloading";
1194 		qdf_mem_free(hw_name);
1195 	}
1196 
1197 	hif_uninit_rri_on_ddr(scn);
1198 	hif_cleanup_static_buf_to_target(scn);
1199 	hif_cpuhp_unregister(scn);
1200 	hif_rtpm_lock_deinit(scn);
1201 
1202 	hif_bus_close(scn);
1203 
1204 	qdf_mem_free(scn);
1205 }
1206 
1207 /**
1208  * hif_get_num_active_grp_tasklets() - get the number of active
1209  *		datapath group tasklets pending to be completed.
1210  * @scn: HIF context
1211  *
1212  * Returns: the number of datapath group tasklets which are active
1213  */
1214 static inline int hif_get_num_active_grp_tasklets(struct hif_softc *scn)
1215 {
1216 	return qdf_atomic_read(&scn->active_grp_tasklet_cnt);
1217 }
1218 
1219 #if (defined(QCA_WIFI_QCA8074) || defined(QCA_WIFI_QCA6018) || \
1220 	defined(QCA_WIFI_QCA6290) || defined(QCA_WIFI_QCA6390) || \
1221 	defined(QCA_WIFI_QCN9000) || defined(QCA_WIFI_QCA6490) || \
1222 	defined(QCA_WIFI_QCA6750) || defined(QCA_WIFI_QCA5018) || \
1223 	defined(QCA_WIFI_KIWI) || defined(QCA_WIFI_QCN9224) || \
1224 	defined(QCA_WIFI_QCA9574)) || defined(QCA_WIFI_QCA5332)
1225 /**
1226  * hif_get_num_pending_work() - get the number of entries in
1227  *		the workqueue pending to be completed.
1228  * @scn: HIF context
1229  *
1230  * Returns: the number of tasklets which are active
1231  */
1232 static inline int hif_get_num_pending_work(struct hif_softc *scn)
1233 {
1234 	return hal_get_reg_write_pending_work(scn->hal_soc);
1235 }
1236 #else
1237 
1238 static inline int hif_get_num_pending_work(struct hif_softc *scn)
1239 {
1240 	return 0;
1241 }
1242 #endif
1243 
1244 QDF_STATUS hif_try_complete_tasks(struct hif_softc *scn)
1245 {
1246 	uint32_t task_drain_wait_cnt = 0;
1247 	int tasklet = 0, grp_tasklet = 0, work = 0;
1248 
1249 	while ((tasklet = hif_get_num_active_tasklets(scn)) ||
1250 	       (grp_tasklet = hif_get_num_active_grp_tasklets(scn)) ||
1251 	       (work = hif_get_num_pending_work(scn))) {
1252 		if (++task_drain_wait_cnt > HIF_TASK_DRAIN_WAIT_CNT) {
1253 			hif_err("pending tasklets %d grp tasklets %d work %d",
1254 				tasklet, grp_tasklet, work);
1255 			return QDF_STATUS_E_FAULT;
1256 		}
1257 		hif_info("waiting for tasklets %d grp tasklets %d work %d",
1258 			 tasklet, grp_tasklet, work);
1259 		msleep(10);
1260 	}
1261 
1262 	return QDF_STATUS_SUCCESS;
1263 }
1264 
1265 #if defined(HIF_IPCI) && defined(FEATURE_HAL_DELAYED_REG_WRITE)
1266 QDF_STATUS hif_try_prevent_ep_vote_access(struct hif_opaque_softc *hif_ctx)
1267 {
1268 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1269 	uint32_t work_drain_wait_cnt = 0;
1270 	uint32_t wait_cnt = 0;
1271 	int work = 0;
1272 
1273 	qdf_atomic_set(&scn->dp_ep_vote_access,
1274 		       HIF_EP_VOTE_ACCESS_DISABLE);
1275 	qdf_atomic_set(&scn->ep_vote_access,
1276 		       HIF_EP_VOTE_ACCESS_DISABLE);
1277 
1278 	while ((work = hif_get_num_pending_work(scn))) {
1279 		if (++work_drain_wait_cnt > HIF_WORK_DRAIN_WAIT_CNT) {
1280 			qdf_atomic_set(&scn->dp_ep_vote_access,
1281 				       HIF_EP_VOTE_ACCESS_ENABLE);
1282 			qdf_atomic_set(&scn->ep_vote_access,
1283 				       HIF_EP_VOTE_ACCESS_ENABLE);
1284 			hif_err("timeout wait for pending work %d ", work);
1285 			return QDF_STATUS_E_FAULT;
1286 		}
1287 		qdf_sleep(10);
1288 	}
1289 
1290 	if (pld_is_pci_ep_awake(scn->qdf_dev->dev) == -ENOTSUPP)
1291 	return QDF_STATUS_SUCCESS;
1292 
1293 	while (pld_is_pci_ep_awake(scn->qdf_dev->dev)) {
1294 		if (++wait_cnt > HIF_EP_WAKE_RESET_WAIT_CNT) {
1295 			hif_err("Release EP vote is not proceed by Fw");
1296 			return QDF_STATUS_E_FAULT;
1297 		}
1298 		qdf_sleep(5);
1299 	}
1300 
1301 	return QDF_STATUS_SUCCESS;
1302 }
1303 
1304 void hif_set_ep_intermediate_vote_access(struct hif_opaque_softc *hif_ctx)
1305 {
1306 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1307 	uint8_t vote_access;
1308 
1309 	vote_access = qdf_atomic_read(&scn->ep_vote_access);
1310 
1311 	if (vote_access != HIF_EP_VOTE_ACCESS_DISABLE)
1312 		hif_info("EP vote changed from:%u to intermediate state",
1313 			 vote_access);
1314 
1315 	if (QDF_IS_STATUS_ERROR(hif_try_prevent_ep_vote_access(hif_ctx)))
1316 		QDF_BUG(0);
1317 
1318 	qdf_atomic_set(&scn->ep_vote_access,
1319 		       HIF_EP_VOTE_INTERMEDIATE_ACCESS);
1320 }
1321 
1322 void hif_allow_ep_vote_access(struct hif_opaque_softc *hif_ctx)
1323 {
1324 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1325 
1326 	qdf_atomic_set(&scn->dp_ep_vote_access,
1327 		       HIF_EP_VOTE_ACCESS_ENABLE);
1328 	qdf_atomic_set(&scn->ep_vote_access,
1329 		       HIF_EP_VOTE_ACCESS_ENABLE);
1330 }
1331 
1332 void hif_set_ep_vote_access(struct hif_opaque_softc *hif_ctx,
1333 			    uint8_t type, uint8_t access)
1334 {
1335 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1336 
1337 	if (type == HIF_EP_VOTE_DP_ACCESS)
1338 		qdf_atomic_set(&scn->dp_ep_vote_access, access);
1339 	else
1340 		qdf_atomic_set(&scn->ep_vote_access, access);
1341 }
1342 
1343 uint8_t hif_get_ep_vote_access(struct hif_opaque_softc *hif_ctx,
1344 			       uint8_t type)
1345 {
1346 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1347 
1348 	if (type == HIF_EP_VOTE_DP_ACCESS)
1349 		return qdf_atomic_read(&scn->dp_ep_vote_access);
1350 	else
1351 		return qdf_atomic_read(&scn->ep_vote_access);
1352 }
1353 #endif
1354 
1355 #if (defined(QCA_WIFI_QCA8074) || defined(QCA_WIFI_QCA6018) || \
1356 	defined(QCA_WIFI_QCA6290) || defined(QCA_WIFI_QCA6390) || \
1357 	defined(QCA_WIFI_QCN9000) || defined(QCA_WIFI_QCA6490) || \
1358 	defined(QCA_WIFI_QCA6750) || defined(QCA_WIFI_QCA5018) || \
1359 	defined(QCA_WIFI_KIWI) || defined(QCA_WIFI_QCN9224) || \
1360 	defined(QCA_WIFI_QCA9574)) || defined(QCA_WIFI_QCA5332)
1361 static QDF_STATUS hif_hal_attach(struct hif_softc *scn)
1362 {
1363 	if (ce_srng_based(scn)) {
1364 		scn->hal_soc = hal_attach(
1365 					hif_softc_to_hif_opaque_softc(scn),
1366 					scn->qdf_dev);
1367 		if (!scn->hal_soc)
1368 			return QDF_STATUS_E_FAILURE;
1369 	}
1370 
1371 	return QDF_STATUS_SUCCESS;
1372 }
1373 
1374 static QDF_STATUS hif_hal_detach(struct hif_softc *scn)
1375 {
1376 	if (ce_srng_based(scn)) {
1377 		hal_detach(scn->hal_soc);
1378 		scn->hal_soc = NULL;
1379 	}
1380 
1381 	return QDF_STATUS_SUCCESS;
1382 }
1383 #else
1384 static QDF_STATUS hif_hal_attach(struct hif_softc *scn)
1385 {
1386 	return QDF_STATUS_SUCCESS;
1387 }
1388 
1389 static QDF_STATUS hif_hal_detach(struct hif_softc *scn)
1390 {
1391 	return QDF_STATUS_SUCCESS;
1392 }
1393 #endif
1394 
1395 int hif_init_dma_mask(struct device *dev, enum qdf_bus_type bus_type)
1396 {
1397 	int ret;
1398 
1399 	switch (bus_type) {
1400 	case QDF_BUS_TYPE_IPCI:
1401 		ret = qdf_set_dma_coherent_mask(dev,
1402 						DMA_COHERENT_MASK_DEFAULT);
1403 		if (ret) {
1404 			hif_err("Failed to set dma mask error = %d", ret);
1405 			return ret;
1406 		}
1407 
1408 		break;
1409 	default:
1410 		/* Follow the existing sequence for other targets */
1411 		break;
1412 	}
1413 
1414 	return 0;
1415 }
1416 
1417 /**
1418  * hif_enable(): hif_enable
1419  * @hif_ctx: hif_ctx
1420  * @dev: dev
1421  * @bdev: bus dev
1422  * @bid: bus ID
1423  * @bus_type: bus type
1424  * @type: enable type
1425  *
1426  * Return: QDF_STATUS
1427  */
1428 QDF_STATUS hif_enable(struct hif_opaque_softc *hif_ctx, struct device *dev,
1429 					  void *bdev,
1430 					  const struct hif_bus_id *bid,
1431 					  enum qdf_bus_type bus_type,
1432 					  enum hif_enable_type type)
1433 {
1434 	QDF_STATUS status;
1435 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1436 
1437 	if (!scn) {
1438 		hif_err("hif_ctx = NULL");
1439 		return QDF_STATUS_E_NULL_VALUE;
1440 	}
1441 
1442 	status = hif_enable_bus(scn, dev, bdev, bid, type);
1443 	if (status != QDF_STATUS_SUCCESS) {
1444 		hif_err("hif_enable_bus error = %d", status);
1445 		return status;
1446 	}
1447 
1448 	status = hif_hal_attach(scn);
1449 	if (status != QDF_STATUS_SUCCESS) {
1450 		hif_err("hal attach failed");
1451 		goto disable_bus;
1452 	}
1453 
1454 	if (hif_bus_configure(scn)) {
1455 		hif_err("Target probe failed");
1456 		status = QDF_STATUS_E_FAILURE;
1457 		goto hal_detach;
1458 	}
1459 
1460 	hif_ut_suspend_init(scn);
1461 	hif_register_recovery_notifier(scn);
1462 	hif_latency_detect_timer_start(hif_ctx);
1463 
1464 	/*
1465 	 * Flag to avoid potential unallocated memory access from MSI
1466 	 * interrupt handler which could get scheduled as soon as MSI
1467 	 * is enabled, i.e to take care of the race due to the order
1468 	 * in where MSI is enabled before the memory, that will be
1469 	 * in interrupt handlers, is allocated.
1470 	 */
1471 
1472 	scn->hif_init_done = true;
1473 
1474 	hif_debug("OK");
1475 
1476 	return QDF_STATUS_SUCCESS;
1477 
1478 hal_detach:
1479 	hif_hal_detach(scn);
1480 disable_bus:
1481 	hif_disable_bus(scn);
1482 	return status;
1483 }
1484 
1485 void hif_disable(struct hif_opaque_softc *hif_ctx, enum hif_disable_type type)
1486 {
1487 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1488 
1489 	if (!scn)
1490 		return;
1491 
1492 	hif_set_enable_detection(hif_ctx, false);
1493 	hif_latency_detect_timer_stop(hif_ctx);
1494 
1495 	hif_unregister_recovery_notifier(scn);
1496 
1497 	hif_nointrs(scn);
1498 	if (scn->hif_init_done == false)
1499 		hif_shutdown_device(hif_ctx);
1500 	else
1501 		hif_stop(hif_ctx);
1502 
1503 	hif_hal_detach(scn);
1504 
1505 	hif_disable_bus(scn);
1506 
1507 	hif_wlan_disable(scn);
1508 
1509 	scn->notice_send = false;
1510 
1511 	hif_debug("X");
1512 }
1513 
1514 #ifdef CE_TASKLET_DEBUG_ENABLE
1515 void hif_enable_ce_latency_stats(struct hif_opaque_softc *hif_ctx, uint8_t val)
1516 {
1517 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1518 
1519 	if (!scn)
1520 		return;
1521 
1522 	scn->ce_latency_stats = val;
1523 }
1524 #endif
1525 
1526 void hif_display_stats(struct hif_opaque_softc *hif_ctx)
1527 {
1528 	hif_display_bus_stats(hif_ctx);
1529 }
1530 
1531 qdf_export_symbol(hif_display_stats);
1532 
1533 void hif_clear_stats(struct hif_opaque_softc *hif_ctx)
1534 {
1535 	hif_clear_bus_stats(hif_ctx);
1536 }
1537 
1538 /**
1539  * hif_crash_shutdown_dump_bus_register() - dump bus registers
1540  * @hif_ctx: hif_ctx
1541  *
1542  * Return: n/a
1543  */
1544 #if defined(TARGET_RAMDUMP_AFTER_KERNEL_PANIC) && defined(WLAN_FEATURE_BMI)
1545 
1546 static void hif_crash_shutdown_dump_bus_register(void *hif_ctx)
1547 {
1548 	struct hif_opaque_softc *scn = hif_ctx;
1549 
1550 	if (hif_check_soc_status(scn))
1551 		return;
1552 
1553 	if (hif_dump_registers(scn))
1554 		hif_err("Failed to dump bus registers!");
1555 }
1556 
1557 /**
1558  * hif_crash_shutdown(): hif_crash_shutdown
1559  *
1560  * This function is called by the platform driver to dump CE registers
1561  *
1562  * @hif_ctx: hif_ctx
1563  *
1564  * Return: n/a
1565  */
1566 void hif_crash_shutdown(struct hif_opaque_softc *hif_ctx)
1567 {
1568 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1569 
1570 	if (!hif_ctx)
1571 		return;
1572 
1573 	if (scn->bus_type == QDF_BUS_TYPE_SNOC) {
1574 		hif_warn("RAM dump disabled for bustype %d", scn->bus_type);
1575 		return;
1576 	}
1577 
1578 	if (TARGET_STATUS_RESET == scn->target_status) {
1579 		hif_warn("Target is already asserted, ignore!");
1580 		return;
1581 	}
1582 
1583 	if (hif_is_load_or_unload_in_progress(scn)) {
1584 		hif_err("Load/unload is in progress, ignore!");
1585 		return;
1586 	}
1587 
1588 	hif_crash_shutdown_dump_bus_register(hif_ctx);
1589 	hif_set_target_status(hif_ctx, TARGET_STATUS_RESET);
1590 
1591 	if (ol_copy_ramdump(hif_ctx))
1592 		goto out;
1593 
1594 	hif_info("RAM dump collecting completed!");
1595 
1596 out:
1597 	return;
1598 }
1599 #else
1600 void hif_crash_shutdown(struct hif_opaque_softc *hif_ctx)
1601 {
1602 	hif_debug("Collecting target RAM dump disabled");
1603 }
1604 #endif /* TARGET_RAMDUMP_AFTER_KERNEL_PANIC */
1605 
1606 #ifdef QCA_WIFI_3_0
1607 /**
1608  * hif_check_fw_reg(): hif_check_fw_reg
1609  * @scn: scn
1610  *
1611  * Return: int
1612  */
1613 int hif_check_fw_reg(struct hif_opaque_softc *scn)
1614 {
1615 	return 0;
1616 }
1617 #endif
1618 
1619 /**
1620  * hif_read_phy_mem_base(): hif_read_phy_mem_base
1621  * @scn: scn
1622  * @phy_mem_base: physical mem base
1623  *
1624  * Return: n/a
1625  */
1626 void hif_read_phy_mem_base(struct hif_softc *scn, qdf_dma_addr_t *phy_mem_base)
1627 {
1628 	*phy_mem_base = scn->mem_pa;
1629 }
1630 qdf_export_symbol(hif_read_phy_mem_base);
1631 
1632 /**
1633  * hif_get_device_type(): hif_get_device_type
1634  * @device_id: device_id
1635  * @revision_id: revision_id
1636  * @hif_type: returned hif_type
1637  * @target_type: returned target_type
1638  *
1639  * Return: int
1640  */
1641 int hif_get_device_type(uint32_t device_id,
1642 			uint32_t revision_id,
1643 			uint32_t *hif_type, uint32_t *target_type)
1644 {
1645 	int ret = 0;
1646 
1647 	switch (device_id) {
1648 	case ADRASTEA_DEVICE_ID_P2_E12:
1649 
1650 		*hif_type = HIF_TYPE_ADRASTEA;
1651 		*target_type = TARGET_TYPE_ADRASTEA;
1652 		break;
1653 
1654 	case AR9888_DEVICE_ID:
1655 		*hif_type = HIF_TYPE_AR9888;
1656 		*target_type = TARGET_TYPE_AR9888;
1657 		break;
1658 
1659 	case AR6320_DEVICE_ID:
1660 		switch (revision_id) {
1661 		case AR6320_FW_1_1:
1662 		case AR6320_FW_1_3:
1663 			*hif_type = HIF_TYPE_AR6320;
1664 			*target_type = TARGET_TYPE_AR6320;
1665 			break;
1666 
1667 		case AR6320_FW_2_0:
1668 		case AR6320_FW_3_0:
1669 		case AR6320_FW_3_2:
1670 			*hif_type = HIF_TYPE_AR6320V2;
1671 			*target_type = TARGET_TYPE_AR6320V2;
1672 			break;
1673 
1674 		default:
1675 			hif_err("dev_id = 0x%x, rev_id = 0x%x",
1676 				device_id, revision_id);
1677 			ret = -ENODEV;
1678 			goto end;
1679 		}
1680 		break;
1681 
1682 	case AR9887_DEVICE_ID:
1683 		*hif_type = HIF_TYPE_AR9888;
1684 		*target_type = TARGET_TYPE_AR9888;
1685 		hif_info(" *********** AR9887 **************");
1686 		break;
1687 
1688 	case QCA9984_DEVICE_ID:
1689 		*hif_type = HIF_TYPE_QCA9984;
1690 		*target_type = TARGET_TYPE_QCA9984;
1691 		hif_info(" *********** QCA9984 *************");
1692 		break;
1693 
1694 	case QCA9888_DEVICE_ID:
1695 		*hif_type = HIF_TYPE_QCA9888;
1696 		*target_type = TARGET_TYPE_QCA9888;
1697 		hif_info(" *********** QCA9888 *************");
1698 		break;
1699 
1700 	case AR900B_DEVICE_ID:
1701 		*hif_type = HIF_TYPE_AR900B;
1702 		*target_type = TARGET_TYPE_AR900B;
1703 		hif_info(" *********** AR900B *************");
1704 		break;
1705 
1706 	case QCA8074_DEVICE_ID:
1707 		*hif_type = HIF_TYPE_QCA8074;
1708 		*target_type = TARGET_TYPE_QCA8074;
1709 		hif_info(" *********** QCA8074  *************");
1710 		break;
1711 
1712 	case QCA6290_EMULATION_DEVICE_ID:
1713 	case QCA6290_DEVICE_ID:
1714 		*hif_type = HIF_TYPE_QCA6290;
1715 		*target_type = TARGET_TYPE_QCA6290;
1716 		hif_info(" *********** QCA6290EMU *************");
1717 		break;
1718 
1719 	case QCN9000_DEVICE_ID:
1720 		*hif_type = HIF_TYPE_QCN9000;
1721 		*target_type = TARGET_TYPE_QCN9000;
1722 		hif_info(" *********** QCN9000 *************");
1723 		break;
1724 
1725 	case QCN9224_DEVICE_ID:
1726 		*hif_type = HIF_TYPE_QCN9224;
1727 		*target_type = TARGET_TYPE_QCN9224;
1728 		hif_info(" *********** QCN9224 *************");
1729 		break;
1730 
1731 	case QCN6122_DEVICE_ID:
1732 		*hif_type = HIF_TYPE_QCN6122;
1733 		*target_type = TARGET_TYPE_QCN6122;
1734 		hif_info(" *********** QCN6122 *************");
1735 		break;
1736 
1737 	case QCN9160_DEVICE_ID:
1738 		*hif_type = HIF_TYPE_QCN9160;
1739 		*target_type = TARGET_TYPE_QCN9160;
1740 		hif_info(" *********** QCN9160 *************");
1741 		break;
1742 
1743 	case QCN7605_DEVICE_ID:
1744 	case QCN7605_COMPOSITE:
1745 	case QCN7605_STANDALONE:
1746 	case QCN7605_STANDALONE_V2:
1747 	case QCN7605_COMPOSITE_V2:
1748 		*hif_type = HIF_TYPE_QCN7605;
1749 		*target_type = TARGET_TYPE_QCN7605;
1750 		hif_info(" *********** QCN7605 *************");
1751 		break;
1752 
1753 	case QCA6390_DEVICE_ID:
1754 	case QCA6390_EMULATION_DEVICE_ID:
1755 		*hif_type = HIF_TYPE_QCA6390;
1756 		*target_type = TARGET_TYPE_QCA6390;
1757 		hif_info(" *********** QCA6390 *************");
1758 		break;
1759 
1760 	case QCA6490_DEVICE_ID:
1761 	case QCA6490_EMULATION_DEVICE_ID:
1762 		*hif_type = HIF_TYPE_QCA6490;
1763 		*target_type = TARGET_TYPE_QCA6490;
1764 		hif_info(" *********** QCA6490 *************");
1765 		break;
1766 
1767 	case QCA6750_DEVICE_ID:
1768 	case QCA6750_EMULATION_DEVICE_ID:
1769 		*hif_type = HIF_TYPE_QCA6750;
1770 		*target_type = TARGET_TYPE_QCA6750;
1771 		hif_info(" *********** QCA6750 *************");
1772 		break;
1773 
1774 	case KIWI_DEVICE_ID:
1775 		*hif_type = HIF_TYPE_KIWI;
1776 		*target_type = TARGET_TYPE_KIWI;
1777 		hif_info(" *********** KIWI *************");
1778 		break;
1779 
1780 	case MANGO_DEVICE_ID:
1781 		*hif_type = HIF_TYPE_MANGO;
1782 		*target_type = TARGET_TYPE_MANGO;
1783 		hif_info(" *********** MANGO *************");
1784 		break;
1785 
1786 	case QCA8074V2_DEVICE_ID:
1787 		*hif_type = HIF_TYPE_QCA8074V2;
1788 		*target_type = TARGET_TYPE_QCA8074V2;
1789 		hif_info(" *********** QCA8074V2 *************");
1790 		break;
1791 
1792 	case QCA6018_DEVICE_ID:
1793 	case RUMIM2M_DEVICE_ID_NODE0:
1794 	case RUMIM2M_DEVICE_ID_NODE1:
1795 	case RUMIM2M_DEVICE_ID_NODE2:
1796 	case RUMIM2M_DEVICE_ID_NODE3:
1797 	case RUMIM2M_DEVICE_ID_NODE4:
1798 	case RUMIM2M_DEVICE_ID_NODE5:
1799 		*hif_type = HIF_TYPE_QCA6018;
1800 		*target_type = TARGET_TYPE_QCA6018;
1801 		hif_info(" *********** QCA6018 *************");
1802 		break;
1803 
1804 	case QCA5018_DEVICE_ID:
1805 		*hif_type = HIF_TYPE_QCA5018;
1806 		*target_type = TARGET_TYPE_QCA5018;
1807 		hif_info(" *********** qca5018 *************");
1808 		break;
1809 
1810 	case QCA5332_DEVICE_ID:
1811 		*hif_type = HIF_TYPE_QCA5332;
1812 		*target_type = TARGET_TYPE_QCA5332;
1813 		hif_info(" *********** QCA5332 *************");
1814 		break;
1815 
1816 	case QCA9574_DEVICE_ID:
1817 		*hif_type = HIF_TYPE_QCA9574;
1818 		*target_type = TARGET_TYPE_QCA9574;
1819 		hif_info(" *********** QCA9574 *************");
1820 		break;
1821 
1822 	default:
1823 		hif_err("Unsupported device ID = 0x%x!", device_id);
1824 		ret = -ENODEV;
1825 		break;
1826 	}
1827 
1828 	if (*target_type == TARGET_TYPE_UNKNOWN) {
1829 		hif_err("Unsupported target_type!");
1830 		ret = -ENODEV;
1831 	}
1832 end:
1833 	return ret;
1834 }
1835 
1836 /**
1837  * hif_get_bus_type() - return the bus type
1838  * @hif_hdl: HIF Context
1839  *
1840  * Return: enum qdf_bus_type
1841  */
1842 enum qdf_bus_type hif_get_bus_type(struct hif_opaque_softc *hif_hdl)
1843 {
1844 	struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
1845 
1846 	return scn->bus_type;
1847 }
1848 
1849 /*
1850  * Target info and ini parameters are global to the driver
1851  * Hence these structures are exposed to all the modules in
1852  * the driver and they don't need to maintains multiple copies
1853  * of the same info, instead get the handle from hif and
1854  * modify them in hif
1855  */
1856 
1857 /**
1858  * hif_get_ini_handle() - API to get hif_config_param handle
1859  * @hif_ctx: HIF Context
1860  *
1861  * Return: pointer to hif_config_info
1862  */
1863 struct hif_config_info *hif_get_ini_handle(struct hif_opaque_softc *hif_ctx)
1864 {
1865 	struct hif_softc *sc = HIF_GET_SOFTC(hif_ctx);
1866 
1867 	return &sc->hif_config;
1868 }
1869 
1870 /**
1871  * hif_get_target_info_handle() - API to get hif_target_info handle
1872  * @hif_ctx: HIF context
1873  *
1874  * Return: Pointer to hif_target_info
1875  */
1876 struct hif_target_info *hif_get_target_info_handle(
1877 					struct hif_opaque_softc *hif_ctx)
1878 {
1879 	struct hif_softc *sc = HIF_GET_SOFTC(hif_ctx);
1880 
1881 	return &sc->target_info;
1882 
1883 }
1884 qdf_export_symbol(hif_get_target_info_handle);
1885 
1886 #ifdef RECEIVE_OFFLOAD
1887 void hif_offld_flush_cb_register(struct hif_opaque_softc *scn,
1888 				 void (offld_flush_handler)(void *))
1889 {
1890 	if (hif_napi_enabled(scn, -1))
1891 		hif_napi_rx_offld_flush_cb_register(scn, offld_flush_handler);
1892 	else
1893 		hif_err("NAPI not enabled");
1894 }
1895 qdf_export_symbol(hif_offld_flush_cb_register);
1896 
1897 void hif_offld_flush_cb_deregister(struct hif_opaque_softc *scn)
1898 {
1899 	if (hif_napi_enabled(scn, -1))
1900 		hif_napi_rx_offld_flush_cb_deregister(scn);
1901 	else
1902 		hif_err("NAPI not enabled");
1903 }
1904 qdf_export_symbol(hif_offld_flush_cb_deregister);
1905 
1906 int hif_get_rx_ctx_id(int ctx_id, struct hif_opaque_softc *hif_hdl)
1907 {
1908 	if (hif_napi_enabled(hif_hdl, -1))
1909 		return NAPI_PIPE2ID(ctx_id);
1910 	else
1911 		return ctx_id;
1912 }
1913 #else /* RECEIVE_OFFLOAD */
1914 int hif_get_rx_ctx_id(int ctx_id, struct hif_opaque_softc *hif_hdl)
1915 {
1916 	return 0;
1917 }
1918 qdf_export_symbol(hif_get_rx_ctx_id);
1919 #endif /* RECEIVE_OFFLOAD */
1920 
1921 #if defined(FEATURE_LRO)
1922 
1923 /**
1924  * hif_get_lro_info - Returns LRO instance for instance ID
1925  * @ctx_id: LRO instance ID
1926  * @hif_hdl: HIF Context
1927  *
1928  * Return: Pointer to LRO instance.
1929  */
1930 void *hif_get_lro_info(int ctx_id, struct hif_opaque_softc *hif_hdl)
1931 {
1932 	void *data;
1933 
1934 	if (hif_napi_enabled(hif_hdl, -1))
1935 		data = hif_napi_get_lro_info(hif_hdl, ctx_id);
1936 	else
1937 		data = hif_ce_get_lro_ctx(hif_hdl, ctx_id);
1938 
1939 	return data;
1940 }
1941 #endif
1942 
1943 /**
1944  * hif_get_target_status - API to get target status
1945  * @hif_ctx: HIF Context
1946  *
1947  * Return: enum hif_target_status
1948  */
1949 enum hif_target_status hif_get_target_status(struct hif_opaque_softc *hif_ctx)
1950 {
1951 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1952 
1953 	return scn->target_status;
1954 }
1955 qdf_export_symbol(hif_get_target_status);
1956 
1957 /**
1958  * hif_set_target_status() - API to set target status
1959  * @hif_ctx: HIF Context
1960  * @status: Target Status
1961  *
1962  * Return: void
1963  */
1964 void hif_set_target_status(struct hif_opaque_softc *hif_ctx, enum
1965 			   hif_target_status status)
1966 {
1967 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1968 
1969 	scn->target_status = status;
1970 }
1971 
1972 /**
1973  * hif_init_ini_config() - API to initialize HIF configuration parameters
1974  * @hif_ctx: HIF Context
1975  * @cfg: HIF Configuration
1976  *
1977  * Return: void
1978  */
1979 void hif_init_ini_config(struct hif_opaque_softc *hif_ctx,
1980 			 struct hif_config_info *cfg)
1981 {
1982 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1983 
1984 	qdf_mem_copy(&scn->hif_config, cfg, sizeof(struct hif_config_info));
1985 }
1986 
1987 /**
1988  * hif_get_conparam() - API to get driver mode in HIF
1989  * @scn: HIF Context
1990  *
1991  * Return: driver mode of operation
1992  */
1993 uint32_t hif_get_conparam(struct hif_softc *scn)
1994 {
1995 	if (!scn)
1996 		return 0;
1997 
1998 	return scn->hif_con_param;
1999 }
2000 
2001 /**
2002  * hif_get_callbacks_handle() - API to get callbacks Handle
2003  * @scn: HIF Context
2004  *
2005  * Return: pointer to HIF Callbacks
2006  */
2007 struct hif_driver_state_callbacks *hif_get_callbacks_handle(
2008 							struct hif_softc *scn)
2009 {
2010 	return &scn->callbacks;
2011 }
2012 
2013 /**
2014  * hif_is_driver_unloading() - API to query upper layers if driver is unloading
2015  * @scn: HIF Context
2016  *
2017  * Return: True/False
2018  */
2019 bool hif_is_driver_unloading(struct hif_softc *scn)
2020 {
2021 	struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
2022 
2023 	if (cbk && cbk->is_driver_unloading)
2024 		return cbk->is_driver_unloading(cbk->context);
2025 
2026 	return false;
2027 }
2028 
2029 /**
2030  * hif_is_load_or_unload_in_progress() - API to query upper layers if
2031  * load/unload in progress
2032  * @scn: HIF Context
2033  *
2034  * Return: True/False
2035  */
2036 bool hif_is_load_or_unload_in_progress(struct hif_softc *scn)
2037 {
2038 	struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
2039 
2040 	if (cbk && cbk->is_load_unload_in_progress)
2041 		return cbk->is_load_unload_in_progress(cbk->context);
2042 
2043 	return false;
2044 }
2045 
2046 /**
2047  * hif_is_recovery_in_progress() - API to query upper layers if recovery in
2048  * progress
2049  * @scn: HIF Context
2050  *
2051  * Return: True/False
2052  */
2053 bool hif_is_recovery_in_progress(struct hif_softc *scn)
2054 {
2055 	struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
2056 
2057 	if (cbk && cbk->is_recovery_in_progress)
2058 		return cbk->is_recovery_in_progress(cbk->context);
2059 
2060 	return false;
2061 }
2062 
2063 #if defined(HIF_PCI) || defined(HIF_SNOC) || defined(HIF_AHB) || \
2064     defined(HIF_IPCI)
2065 
2066 /**
2067  * hif_update_pipe_callback() - API to register pipe specific callbacks
2068  * @osc: Opaque softc
2069  * @pipeid: pipe id
2070  * @callbacks: callbacks to register
2071  *
2072  * Return: void
2073  */
2074 
2075 void hif_update_pipe_callback(struct hif_opaque_softc *osc,
2076 					u_int8_t pipeid,
2077 					struct hif_msg_callbacks *callbacks)
2078 {
2079 	struct hif_softc *scn = HIF_GET_SOFTC(osc);
2080 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2081 	struct HIF_CE_pipe_info *pipe_info;
2082 
2083 	QDF_BUG(pipeid < CE_COUNT_MAX);
2084 
2085 	hif_debug("pipeid: %d", pipeid);
2086 
2087 	pipe_info = &hif_state->pipe_info[pipeid];
2088 
2089 	qdf_mem_copy(&pipe_info->pipe_callbacks,
2090 			callbacks, sizeof(pipe_info->pipe_callbacks));
2091 }
2092 qdf_export_symbol(hif_update_pipe_callback);
2093 
2094 /**
2095  * hif_is_target_ready() - API to query if target is in ready state
2096  * progress
2097  * @scn: HIF Context
2098  *
2099  * Return: True/False
2100  */
2101 bool hif_is_target_ready(struct hif_softc *scn)
2102 {
2103 	struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
2104 
2105 	if (cbk && cbk->is_target_ready)
2106 		return cbk->is_target_ready(cbk->context);
2107 	/*
2108 	 * if callback is not registered then there is no way to determine
2109 	 * if target is ready. In-such case return true to indicate that
2110 	 * target is ready.
2111 	 */
2112 	return true;
2113 }
2114 qdf_export_symbol(hif_is_target_ready);
2115 
2116 int hif_get_bandwidth_level(struct hif_opaque_softc *hif_handle)
2117 {
2118 	struct hif_softc *scn = HIF_GET_SOFTC(hif_handle);
2119 	struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
2120 
2121 	if (cbk && cbk->get_bandwidth_level)
2122 		return cbk->get_bandwidth_level(cbk->context);
2123 
2124 	return 0;
2125 }
2126 
2127 qdf_export_symbol(hif_get_bandwidth_level);
2128 
2129 #ifdef DP_MEM_PRE_ALLOC
2130 void *hif_mem_alloc_consistent_unaligned(struct hif_softc *scn,
2131 					 qdf_size_t size,
2132 					 qdf_dma_addr_t *paddr,
2133 					 uint32_t ring_type,
2134 					 uint8_t *is_mem_prealloc)
2135 {
2136 	void *vaddr = NULL;
2137 	struct hif_driver_state_callbacks *cbk =
2138 				hif_get_callbacks_handle(scn);
2139 
2140 	*is_mem_prealloc = false;
2141 	if (cbk && cbk->prealloc_get_consistent_mem_unaligned) {
2142 		vaddr = cbk->prealloc_get_consistent_mem_unaligned(size,
2143 								   paddr,
2144 								   ring_type);
2145 		if (vaddr) {
2146 			*is_mem_prealloc = true;
2147 			goto end;
2148 		}
2149 	}
2150 
2151 	vaddr = qdf_mem_alloc_consistent(scn->qdf_dev,
2152 					 scn->qdf_dev->dev,
2153 					 size,
2154 					 paddr);
2155 end:
2156 	dp_info("%s va_unaligned %pK pa_unaligned %pK size %d ring_type %d",
2157 		*is_mem_prealloc ? "pre-alloc" : "dynamic-alloc", vaddr,
2158 		(void *)*paddr, (int)size, ring_type);
2159 
2160 	return vaddr;
2161 }
2162 
2163 void hif_mem_free_consistent_unaligned(struct hif_softc *scn,
2164 				       qdf_size_t size,
2165 				       void *vaddr,
2166 				       qdf_dma_addr_t paddr,
2167 				       qdf_dma_context_t memctx,
2168 				       uint8_t is_mem_prealloc)
2169 {
2170 	struct hif_driver_state_callbacks *cbk =
2171 				hif_get_callbacks_handle(scn);
2172 
2173 	if (is_mem_prealloc) {
2174 		if (cbk && cbk->prealloc_put_consistent_mem_unaligned) {
2175 			cbk->prealloc_put_consistent_mem_unaligned(vaddr);
2176 		} else {
2177 			dp_warn("dp_prealloc_put_consistent_unligned NULL");
2178 			QDF_BUG(0);
2179 		}
2180 	} else {
2181 		qdf_mem_free_consistent(scn->qdf_dev, scn->qdf_dev->dev,
2182 					size, vaddr, paddr, memctx);
2183 	}
2184 }
2185 #endif
2186 
2187 /**
2188  * hif_batch_send() - API to access hif specific function
2189  * ce_batch_send.
2190  * @osc: HIF Context
2191  * @msdu: list of msdus to be sent
2192  * @transfer_id: transfer id
2193  * @len: downloaded length
2194  * @sendhead:
2195  *
2196  * Return: list of msds not sent
2197  */
2198 qdf_nbuf_t hif_batch_send(struct hif_opaque_softc *osc, qdf_nbuf_t msdu,
2199 		uint32_t transfer_id, u_int32_t len, uint32_t sendhead)
2200 {
2201 	void *ce_tx_hdl = hif_get_ce_handle(osc, CE_HTT_TX_CE);
2202 
2203 	if (!ce_tx_hdl)
2204 		return NULL;
2205 
2206 	return ce_batch_send((struct CE_handle *)ce_tx_hdl, msdu, transfer_id,
2207 			len, sendhead);
2208 }
2209 qdf_export_symbol(hif_batch_send);
2210 
2211 /**
2212  * hif_update_tx_ring() - API to access hif specific function
2213  * ce_update_tx_ring.
2214  * @osc: HIF Context
2215  * @num_htt_cmpls: number of htt compl received.
2216  *
2217  * Return: void
2218  */
2219 void hif_update_tx_ring(struct hif_opaque_softc *osc, u_int32_t num_htt_cmpls)
2220 {
2221 	void *ce_tx_hdl = hif_get_ce_handle(osc, CE_HTT_TX_CE);
2222 
2223 	ce_update_tx_ring(ce_tx_hdl, num_htt_cmpls);
2224 }
2225 qdf_export_symbol(hif_update_tx_ring);
2226 
2227 
2228 /**
2229  * hif_send_single() - API to access hif specific function
2230  * ce_send_single.
2231  * @osc: HIF Context
2232  * @msdu : msdu to be sent
2233  * @transfer_id: transfer id
2234  * @len : downloaded length
2235  *
2236  * Return: msdu sent status
2237  */
2238 QDF_STATUS hif_send_single(struct hif_opaque_softc *osc, qdf_nbuf_t msdu,
2239 			   uint32_t transfer_id, u_int32_t len)
2240 {
2241 	void *ce_tx_hdl = hif_get_ce_handle(osc, CE_HTT_TX_CE);
2242 
2243 	if (!ce_tx_hdl)
2244 		return QDF_STATUS_E_NULL_VALUE;
2245 
2246 	return ce_send_single((struct CE_handle *)ce_tx_hdl, msdu, transfer_id,
2247 			len);
2248 }
2249 qdf_export_symbol(hif_send_single);
2250 #endif
2251 
2252 /**
2253  * hif_reg_write() - API to access hif specific function
2254  * hif_write32_mb.
2255  * @hif_ctx : HIF Context
2256  * @offset : offset on which value has to be written
2257  * @value : value to be written
2258  *
2259  * Return: None
2260  */
2261 void hif_reg_write(struct hif_opaque_softc *hif_ctx, uint32_t offset,
2262 		uint32_t value)
2263 {
2264 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2265 
2266 	hif_write32_mb(scn, scn->mem + offset, value);
2267 
2268 }
2269 qdf_export_symbol(hif_reg_write);
2270 
2271 /**
2272  * hif_reg_read() - API to access hif specific function
2273  * hif_read32_mb.
2274  * @hif_ctx : HIF Context
2275  * @offset : offset from which value has to be read
2276  *
2277  * Return: Read value
2278  */
2279 uint32_t hif_reg_read(struct hif_opaque_softc *hif_ctx, uint32_t offset)
2280 {
2281 
2282 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2283 
2284 	return hif_read32_mb(scn, scn->mem + offset);
2285 }
2286 qdf_export_symbol(hif_reg_read);
2287 
2288 /**
2289  * hif_ramdump_handler(): generic ramdump handler
2290  * @scn: struct hif_opaque_softc
2291  *
2292  * Return: None
2293  */
2294 void hif_ramdump_handler(struct hif_opaque_softc *scn)
2295 {
2296 	if (hif_get_bus_type(scn) == QDF_BUS_TYPE_USB)
2297 		hif_usb_ramdump_handler(scn);
2298 }
2299 
2300 hif_pm_wake_irq_type hif_pm_get_wake_irq_type(struct hif_opaque_softc *hif_ctx)
2301 {
2302 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2303 
2304 	return scn->wake_irq_type;
2305 }
2306 
2307 irqreturn_t hif_wake_interrupt_handler(int irq, void *context)
2308 {
2309 	struct hif_softc *scn = context;
2310 
2311 	hif_info("wake interrupt received on irq %d", irq);
2312 
2313 	hif_rtpm_set_monitor_wake_intr(0);
2314 	hif_rtpm_request_resume();
2315 
2316 	if (scn->initial_wakeup_cb)
2317 		scn->initial_wakeup_cb(scn->initial_wakeup_priv);
2318 
2319 	if (hif_is_ut_suspended(scn))
2320 		hif_ut_fw_resume(scn);
2321 
2322 	qdf_pm_system_wakeup();
2323 
2324 	return IRQ_HANDLED;
2325 }
2326 
2327 void hif_set_initial_wakeup_cb(struct hif_opaque_softc *hif_ctx,
2328 			       void (*callback)(void *),
2329 			       void *priv)
2330 {
2331 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2332 
2333 	scn->initial_wakeup_cb = callback;
2334 	scn->initial_wakeup_priv = priv;
2335 }
2336 
2337 void hif_set_ce_service_max_yield_time(struct hif_opaque_softc *hif,
2338 				       uint32_t ce_service_max_yield_time)
2339 {
2340 	struct hif_softc *hif_ctx = HIF_GET_SOFTC(hif);
2341 
2342 	hif_ctx->ce_service_max_yield_time =
2343 		ce_service_max_yield_time * 1000;
2344 }
2345 
2346 unsigned long long
2347 hif_get_ce_service_max_yield_time(struct hif_opaque_softc *hif)
2348 {
2349 	struct hif_softc *hif_ctx = HIF_GET_SOFTC(hif);
2350 
2351 	return hif_ctx->ce_service_max_yield_time;
2352 }
2353 
2354 void hif_set_ce_service_max_rx_ind_flush(struct hif_opaque_softc *hif,
2355 				       uint8_t ce_service_max_rx_ind_flush)
2356 {
2357 	struct hif_softc *hif_ctx = HIF_GET_SOFTC(hif);
2358 
2359 	if (ce_service_max_rx_ind_flush == 0 ||
2360 	    ce_service_max_rx_ind_flush > MSG_FLUSH_NUM)
2361 		hif_ctx->ce_service_max_rx_ind_flush = MSG_FLUSH_NUM;
2362 	else
2363 		hif_ctx->ce_service_max_rx_ind_flush =
2364 						ce_service_max_rx_ind_flush;
2365 }
2366 
2367 #ifdef SYSTEM_PM_CHECK
2368 void __hif_system_pm_set_state(struct hif_opaque_softc *hif,
2369 			       enum hif_system_pm_state state)
2370 {
2371 	struct hif_softc *hif_ctx = HIF_GET_SOFTC(hif);
2372 
2373 	qdf_atomic_set(&hif_ctx->sys_pm_state, state);
2374 }
2375 
2376 int32_t hif_system_pm_get_state(struct hif_opaque_softc *hif)
2377 {
2378 	struct hif_softc *hif_ctx = HIF_GET_SOFTC(hif);
2379 
2380 	return qdf_atomic_read(&hif_ctx->sys_pm_state);
2381 }
2382 
2383 int hif_system_pm_state_check(struct hif_opaque_softc *hif)
2384 {
2385 	struct hif_softc *hif_ctx = HIF_GET_SOFTC(hif);
2386 	int32_t sys_pm_state;
2387 
2388 	if (!hif_ctx) {
2389 		hif_err("hif context is null");
2390 		return -EFAULT;
2391 	}
2392 
2393 	sys_pm_state = qdf_atomic_read(&hif_ctx->sys_pm_state);
2394 	if (sys_pm_state == HIF_SYSTEM_PM_STATE_BUS_SUSPENDING ||
2395 	    sys_pm_state == HIF_SYSTEM_PM_STATE_BUS_SUSPENDED) {
2396 		hif_info("Triggering system wakeup");
2397 		qdf_pm_system_wakeup();
2398 		return -EAGAIN;
2399 	}
2400 
2401 	return 0;
2402 }
2403 #endif
2404