xref: /wlan-dirver/qca-wifi-host-cmn/hif/src/hif_main.c (revision 2888b71da71bce103343119fa1b31f4a0cee07c8)
1 /*
2  * Copyright (c) 2015-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include "targcfg.h"
21 #include "qdf_lock.h"
22 #include "qdf_status.h"
23 #include "qdf_status.h"
24 #include <qdf_atomic.h>         /* qdf_atomic_read */
25 #include <targaddrs.h>
26 #include "hif_io32.h"
27 #include <hif.h>
28 #include <target_type.h>
29 #include "regtable.h"
30 #define ATH_MODULE_NAME hif
31 #include <a_debug.h>
32 #include "hif_main.h"
33 #include "hif_hw_version.h"
34 #if (defined(HIF_PCI) || defined(HIF_SNOC) || defined(HIF_AHB) || \
35      defined(HIF_IPCI))
36 #include "ce_tasklet.h"
37 #include "ce_api.h"
38 #endif
39 #include "qdf_trace.h"
40 #include "qdf_status.h"
41 #include "hif_debug.h"
42 #include "mp_dev.h"
43 #if defined(QCA_WIFI_QCA8074) || defined(QCA_WIFI_QCA6018) || \
44 	defined(QCA_WIFI_QCA5018) || defined(QCA_WIFI_QCA9574) || \
45 	defined(QCA_WIFI_QCA5332)
46 #include "hal_api.h"
47 #endif
48 #include "hif_napi.h"
49 #include "hif_unit_test_suspend_i.h"
50 #include "qdf_module.h"
51 #ifdef HIF_CE_LOG_INFO
52 #include <qdf_notifier.h>
53 #include <qdf_hang_event_notifier.h>
54 #endif
55 #include <linux/cpumask.h>
56 
57 #if defined(HIF_IPCI) && defined(FEATURE_HAL_DELAYED_REG_WRITE)
58 #include <pld_common.h>
59 #endif
60 
61 void hif_dump(struct hif_opaque_softc *hif_ctx, uint8_t cmd_id, bool start)
62 {
63 	hif_trigger_dump(hif_ctx, cmd_id, start);
64 }
65 
66 /**
67  * hif_get_target_id(): hif_get_target_id
68  *
69  * Return the virtual memory base address to the caller
70  *
71  * @scn: hif_softc
72  *
73  * Return: A_target_id_t
74  */
75 A_target_id_t hif_get_target_id(struct hif_softc *scn)
76 {
77 	return scn->mem;
78 }
79 
80 /**
81  * hif_get_targetdef(): hif_get_targetdef
82  * @scn: scn
83  *
84  * Return: void *
85  */
86 void *hif_get_targetdef(struct hif_opaque_softc *hif_ctx)
87 {
88 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
89 
90 	return scn->targetdef;
91 }
92 
93 #ifdef FORCE_WAKE
94 void hif_srng_init_phase(struct hif_opaque_softc *hif_ctx,
95 			 bool init_phase)
96 {
97 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
98 
99 	if (ce_srng_based(scn))
100 		hal_set_init_phase(scn->hal_soc, init_phase);
101 }
102 #endif /* FORCE_WAKE */
103 
104 #ifdef HIF_IPCI
105 void hif_shutdown_notifier_cb(void *hif_ctx)
106 {
107 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
108 
109 	scn->recovery = true;
110 }
111 #endif
112 
113 /**
114  * hif_vote_link_down(): unvote for link up
115  *
116  * Call hif_vote_link_down to release a previous request made using
117  * hif_vote_link_up. A hif_vote_link_down call should only be made
118  * after a corresponding hif_vote_link_up, otherwise you could be
119  * negating a vote from another source. When no votes are present
120  * hif will not guarantee the linkstate after hif_bus_suspend.
121  *
122  * SYNCHRONIZE WITH hif_vote_link_up by only calling in MC thread
123  * and initialization deinitialization sequencences.
124  *
125  * Return: n/a
126  */
127 void hif_vote_link_down(struct hif_opaque_softc *hif_ctx)
128 {
129 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
130 
131 	QDF_BUG(scn);
132 	if (scn->linkstate_vote == 0)
133 		QDF_DEBUG_PANIC("linkstate_vote(%d) has already been 0",
134 				scn->linkstate_vote);
135 
136 	scn->linkstate_vote--;
137 	hif_info("Down_linkstate_vote %d", scn->linkstate_vote);
138 	if (scn->linkstate_vote == 0)
139 		hif_bus_prevent_linkdown(scn, false);
140 }
141 
142 /**
143  * hif_vote_link_up(): vote to prevent bus from suspending
144  *
145  * Makes hif guarantee that fw can message the host normally
146  * durring suspend.
147  *
148  * SYNCHRONIZE WITH hif_vote_link_up by only calling in MC thread
149  * and initialization deinitialization sequencences.
150  *
151  * Return: n/a
152  */
153 void hif_vote_link_up(struct hif_opaque_softc *hif_ctx)
154 {
155 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
156 
157 	QDF_BUG(scn);
158 	scn->linkstate_vote++;
159 	hif_info("Up_linkstate_vote %d", scn->linkstate_vote);
160 	if (scn->linkstate_vote == 1)
161 		hif_bus_prevent_linkdown(scn, true);
162 }
163 
164 /**
165  * hif_can_suspend_link(): query if hif is permitted to suspend the link
166  *
167  * Hif will ensure that the link won't be suspended if the upperlayers
168  * don't want it to.
169  *
170  * SYNCHRONIZATION: MC thread is stopped before bus suspend thus
171  * we don't need extra locking to ensure votes dont change while
172  * we are in the process of suspending or resuming.
173  *
174  * Return: false if hif will guarantee link up durring suspend.
175  */
176 bool hif_can_suspend_link(struct hif_opaque_softc *hif_ctx)
177 {
178 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
179 
180 	QDF_BUG(scn);
181 	return scn->linkstate_vote == 0;
182 }
183 
184 /**
185  * hif_hia_item_address(): hif_hia_item_address
186  * @target_type: target_type
187  * @item_offset: item_offset
188  *
189  * Return: n/a
190  */
191 uint32_t hif_hia_item_address(uint32_t target_type, uint32_t item_offset)
192 {
193 	switch (target_type) {
194 	case TARGET_TYPE_AR6002:
195 		return AR6002_HOST_INTEREST_ADDRESS + item_offset;
196 	case TARGET_TYPE_AR6003:
197 		return AR6003_HOST_INTEREST_ADDRESS + item_offset;
198 	case TARGET_TYPE_AR6004:
199 		return AR6004_HOST_INTEREST_ADDRESS + item_offset;
200 	case TARGET_TYPE_AR6006:
201 		return AR6006_HOST_INTEREST_ADDRESS + item_offset;
202 	case TARGET_TYPE_AR9888:
203 		return AR9888_HOST_INTEREST_ADDRESS + item_offset;
204 	case TARGET_TYPE_AR6320:
205 	case TARGET_TYPE_AR6320V2:
206 		return AR6320_HOST_INTEREST_ADDRESS + item_offset;
207 	case TARGET_TYPE_ADRASTEA:
208 		/* ADRASTEA doesn't have a host interest address */
209 		ASSERT(0);
210 		return 0;
211 	case TARGET_TYPE_AR900B:
212 		return AR900B_HOST_INTEREST_ADDRESS + item_offset;
213 	case TARGET_TYPE_QCA9984:
214 		return QCA9984_HOST_INTEREST_ADDRESS + item_offset;
215 	case TARGET_TYPE_QCA9888:
216 		return QCA9888_HOST_INTEREST_ADDRESS + item_offset;
217 
218 	default:
219 		ASSERT(0);
220 		return 0;
221 	}
222 }
223 
224 /**
225  * hif_max_num_receives_reached() - check max receive is reached
226  * @scn: HIF Context
227  * @count: unsigned int.
228  *
229  * Output check status as bool
230  *
231  * Return: bool
232  */
233 bool hif_max_num_receives_reached(struct hif_softc *scn, unsigned int count)
234 {
235 	if (QDF_IS_EPPING_ENABLED(hif_get_conparam(scn)))
236 		return count > 120;
237 	else
238 		return count > MAX_NUM_OF_RECEIVES;
239 }
240 
241 /**
242  * init_buffer_count() - initial buffer count
243  * @maxSize: qdf_size_t
244  *
245  * routine to modify the initial buffer count to be allocated on an os
246  * platform basis. Platform owner will need to modify this as needed
247  *
248  * Return: qdf_size_t
249  */
250 qdf_size_t init_buffer_count(qdf_size_t maxSize)
251 {
252 	return maxSize;
253 }
254 
255 /**
256  * hif_save_htc_htt_config_endpoint() - save htt_tx_endpoint
257  * @hif_ctx: hif context
258  * @htc_htt_tx_endpoint: htt_tx_endpoint
259  *
260  * Return: void
261  */
262 void hif_save_htc_htt_config_endpoint(struct hif_opaque_softc *hif_ctx,
263 							int htc_htt_tx_endpoint)
264 {
265 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
266 
267 	if (!scn) {
268 		hif_err("scn or scn->hif_sc is NULL!");
269 		return;
270 	}
271 
272 	scn->htc_htt_tx_endpoint = htc_htt_tx_endpoint;
273 }
274 qdf_export_symbol(hif_save_htc_htt_config_endpoint);
275 
276 static const struct qwlan_hw qwlan_hw_list[] = {
277 	{
278 		.id = AR6320_REV1_VERSION,
279 		.subid = 0,
280 		.name = "QCA6174_REV1",
281 	},
282 	{
283 		.id = AR6320_REV1_1_VERSION,
284 		.subid = 0x1,
285 		.name = "QCA6174_REV1_1",
286 	},
287 	{
288 		.id = AR6320_REV1_3_VERSION,
289 		.subid = 0x2,
290 		.name = "QCA6174_REV1_3",
291 	},
292 	{
293 		.id = AR6320_REV2_1_VERSION,
294 		.subid = 0x4,
295 		.name = "QCA6174_REV2_1",
296 	},
297 	{
298 		.id = AR6320_REV2_1_VERSION,
299 		.subid = 0x5,
300 		.name = "QCA6174_REV2_2",
301 	},
302 	{
303 		.id = AR6320_REV3_VERSION,
304 		.subid = 0x6,
305 		.name = "QCA6174_REV2.3",
306 	},
307 	{
308 		.id = AR6320_REV3_VERSION,
309 		.subid = 0x8,
310 		.name = "QCA6174_REV3",
311 	},
312 	{
313 		.id = AR6320_REV3_VERSION,
314 		.subid = 0x9,
315 		.name = "QCA6174_REV3_1",
316 	},
317 	{
318 		.id = AR6320_REV3_2_VERSION,
319 		.subid = 0xA,
320 		.name = "AR6320_REV3_2_VERSION",
321 	},
322 	{
323 		.id = QCA6390_V1,
324 		.subid = 0x0,
325 		.name = "QCA6390_V1",
326 	},
327 	{
328 		.id = QCA6490_V1,
329 		.subid = 0x0,
330 		.name = "QCA6490_V1",
331 	},
332 	{
333 		.id = WCN3990_v1,
334 		.subid = 0x0,
335 		.name = "WCN3990_V1",
336 	},
337 	{
338 		.id = WCN3990_v2,
339 		.subid = 0x0,
340 		.name = "WCN3990_V2",
341 	},
342 	{
343 		.id = WCN3990_v2_1,
344 		.subid = 0x0,
345 		.name = "WCN3990_V2.1",
346 	},
347 	{
348 		.id = WCN3998,
349 		.subid = 0x0,
350 		.name = "WCN3998",
351 	},
352 	{
353 		.id = QCA9379_REV1_VERSION,
354 		.subid = 0xC,
355 		.name = "QCA9379_REV1",
356 	},
357 	{
358 		.id = QCA9379_REV1_VERSION,
359 		.subid = 0xD,
360 		.name = "QCA9379_REV1_1",
361 	},
362 	{
363 		.id = KIWI_V1,
364 		.subid = 0xE,
365 		.name = "KIWI_V1",
366 	},
367 	{
368 		.id = MANGO_V1,
369 		.subid = 0xF,
370 		.name = "MANGO_V1",
371 	}
372 };
373 
374 /**
375  * hif_get_hw_name(): get a human readable name for the hardware
376  * @info: Target Info
377  *
378  * Return: human readable name for the underlying wifi hardware.
379  */
380 static const char *hif_get_hw_name(struct hif_target_info *info)
381 {
382 	int i;
383 
384 	if (info->hw_name)
385 		return info->hw_name;
386 
387 	for (i = 0; i < ARRAY_SIZE(qwlan_hw_list); i++) {
388 		if (info->target_version == qwlan_hw_list[i].id &&
389 		    info->target_revision == qwlan_hw_list[i].subid) {
390 			return qwlan_hw_list[i].name;
391 		}
392 	}
393 
394 	info->hw_name = qdf_mem_malloc(64);
395 	if (!info->hw_name)
396 		return "Unknown Device (nomem)";
397 
398 	i = qdf_snprint(info->hw_name, 64, "HW_VERSION=%x.",
399 			info->target_version);
400 	if (i < 0)
401 		return "Unknown Device (snprintf failure)";
402 	else
403 		return info->hw_name;
404 }
405 
406 /**
407  * hif_get_hw_info(): hif_get_hw_info
408  * @scn: scn
409  * @version: version
410  * @revision: revision
411  *
412  * Return: n/a
413  */
414 void hif_get_hw_info(struct hif_opaque_softc *scn, u32 *version, u32 *revision,
415 			const char **target_name)
416 {
417 	struct hif_target_info *info = hif_get_target_info_handle(scn);
418 	struct hif_softc *sc = HIF_GET_SOFTC(scn);
419 
420 	if (sc->bus_type == QDF_BUS_TYPE_USB)
421 		hif_usb_get_hw_info(sc);
422 
423 	*version = info->target_version;
424 	*revision = info->target_revision;
425 	*target_name = hif_get_hw_name(info);
426 }
427 
428 /**
429  * hif_get_dev_ba(): API to get device base address.
430  * @scn: scn
431  * @version: version
432  * @revision: revision
433  *
434  * Return: n/a
435  */
436 void *hif_get_dev_ba(struct hif_opaque_softc *hif_handle)
437 {
438 	struct hif_softc *scn = (struct hif_softc *)hif_handle;
439 
440 	return scn->mem;
441 }
442 qdf_export_symbol(hif_get_dev_ba);
443 
444 /**
445  * hif_get_dev_ba_ce(): API to get device ce base address.
446  * @scn: scn
447  *
448  * Return: dev mem base address for CE
449  */
450 void *hif_get_dev_ba_ce(struct hif_opaque_softc *hif_handle)
451 {
452 	struct hif_softc *scn = (struct hif_softc *)hif_handle;
453 
454 	return scn->mem_ce;
455 }
456 
457 qdf_export_symbol(hif_get_dev_ba_ce);
458 
459 uint32_t hif_get_soc_version(struct hif_opaque_softc *hif_handle)
460 {
461 	struct hif_softc *scn = (struct hif_softc *)hif_handle;
462 
463 	return scn->target_info.soc_version;
464 }
465 
466 qdf_export_symbol(hif_get_soc_version);
467 
468 #ifdef FEATURE_RUNTIME_PM
469 void hif_runtime_prevent_linkdown(struct hif_softc *scn, bool is_get)
470 {
471 	if (is_get)
472 		qdf_runtime_pm_prevent_suspend(&scn->prevent_linkdown_lock);
473 	else
474 		qdf_runtime_pm_allow_suspend(&scn->prevent_linkdown_lock);
475 }
476 
477 static inline
478 void hif_rtpm_lock_init(struct hif_softc *scn)
479 {
480 	qdf_runtime_lock_init(&scn->prevent_linkdown_lock);
481 }
482 
483 static inline
484 void hif_rtpm_lock_deinit(struct hif_softc *scn)
485 {
486 	qdf_runtime_lock_deinit(&scn->prevent_linkdown_lock);
487 }
488 #else
489 static inline
490 void hif_rtpm_lock_init(struct hif_softc *scn)
491 {
492 }
493 
494 static inline
495 void hif_rtpm_lock_deinit(struct hif_softc *scn)
496 {
497 }
498 #endif
499 
500 #ifdef WLAN_CE_INTERRUPT_THRESHOLD_CONFIG
501 /**
502  * hif_get_cfg_from_psoc() - Retrieve ini cfg from psoc
503  * @scn: hif context
504  * @psoc: psoc objmgr handle
505  *
506  * Return: None
507  */
508 static inline
509 void hif_get_cfg_from_psoc(struct hif_softc *scn,
510 			   struct wlan_objmgr_psoc *psoc)
511 {
512 	if (psoc) {
513 		scn->ini_cfg.ce_status_ring_timer_threshold =
514 			cfg_get(psoc,
515 				CFG_CE_STATUS_RING_TIMER_THRESHOLD);
516 		scn->ini_cfg.ce_status_ring_batch_count_threshold =
517 			cfg_get(psoc,
518 				CFG_CE_STATUS_RING_BATCH_COUNT_THRESHOLD);
519 	}
520 }
521 #else
522 static inline
523 void hif_get_cfg_from_psoc(struct hif_softc *scn,
524 			   struct wlan_objmgr_psoc *psoc)
525 {
526 }
527 #endif /* WLAN_CE_INTERRUPT_THRESHOLD_CONFIG */
528 
529 #if defined(HIF_CE_LOG_INFO) || defined(HIF_BUS_LOG_INFO)
530 /**
531  * hif_recovery_notifier_cb - Recovery notifier callback to log
532  *  hang event data
533  * @block: notifier block
534  * @state: state
535  * @data: notifier data
536  *
537  * Return: status
538  */
539 static
540 int hif_recovery_notifier_cb(struct notifier_block *block, unsigned long state,
541 			     void *data)
542 {
543 	struct qdf_notifer_data *notif_data = data;
544 	qdf_notif_block *notif_block;
545 	struct hif_softc *hif_handle;
546 	bool bus_id_invalid;
547 
548 	if (!data || !block)
549 		return -EINVAL;
550 
551 	notif_block = qdf_container_of(block, qdf_notif_block, notif_block);
552 
553 	hif_handle = notif_block->priv_data;
554 	if (!hif_handle)
555 		return -EINVAL;
556 
557 	bus_id_invalid = hif_log_bus_info(hif_handle, notif_data->hang_data,
558 					  &notif_data->offset);
559 	if (bus_id_invalid)
560 		return NOTIFY_STOP_MASK;
561 
562 	hif_log_ce_info(hif_handle, notif_data->hang_data,
563 			&notif_data->offset);
564 
565 	return 0;
566 }
567 
568 /**
569  * hif_register_recovery_notifier - Register hif recovery notifier
570  * @hif_handle: hif handle
571  *
572  * Return: status
573  */
574 static
575 QDF_STATUS hif_register_recovery_notifier(struct hif_softc *hif_handle)
576 {
577 	qdf_notif_block *hif_notifier;
578 
579 	if (!hif_handle)
580 		return QDF_STATUS_E_FAILURE;
581 
582 	hif_notifier = &hif_handle->hif_recovery_notifier;
583 
584 	hif_notifier->notif_block.notifier_call = hif_recovery_notifier_cb;
585 	hif_notifier->priv_data = hif_handle;
586 	return qdf_hang_event_register_notifier(hif_notifier);
587 }
588 
589 /**
590  * hif_unregister_recovery_notifier - Un-register hif recovery notifier
591  * @hif_handle: hif handle
592  *
593  * Return: status
594  */
595 static
596 QDF_STATUS hif_unregister_recovery_notifier(struct hif_softc *hif_handle)
597 {
598 	qdf_notif_block *hif_notifier = &hif_handle->hif_recovery_notifier;
599 
600 	return qdf_hang_event_unregister_notifier(hif_notifier);
601 }
602 #else
603 static inline
604 QDF_STATUS hif_register_recovery_notifier(struct hif_softc *hif_handle)
605 {
606 	return QDF_STATUS_SUCCESS;
607 }
608 
609 static inline
610 QDF_STATUS hif_unregister_recovery_notifier(struct hif_softc *hif_handle)
611 {
612 	return QDF_STATUS_SUCCESS;
613 }
614 #endif
615 
616 #ifdef HIF_CPU_PERF_AFFINE_MASK
617 /**
618  * __hif_cpu_hotplug_notify() - CPU hotplug event handler
619  * @cpu: CPU Id of the CPU generating the event
620  * @cpu_up: true if the CPU is online
621  *
622  * Return: None
623  */
624 static void __hif_cpu_hotplug_notify(void *context,
625 				     uint32_t cpu, bool cpu_up)
626 {
627 	struct hif_softc *scn = context;
628 
629 	if (!scn)
630 		return;
631 	if (hif_is_driver_unloading(scn) || hif_is_recovery_in_progress(scn))
632 		return;
633 
634 	if (cpu_up) {
635 		hif_config_irq_set_perf_affinity_hint(GET_HIF_OPAQUE_HDL(scn));
636 		hif_debug("Setting affinity for online CPU: %d", cpu);
637 	} else {
638 		hif_debug("Skip setting affinity for offline CPU: %d", cpu);
639 	}
640 }
641 
642 /**
643  * hif_cpu_hotplug_notify - cpu core up/down notification
644  * handler
645  * @cpu: CPU generating the event
646  * @cpu_up: true if the CPU is online
647  *
648  * Return: None
649  */
650 static void hif_cpu_hotplug_notify(void *context, uint32_t cpu, bool cpu_up)
651 {
652 	struct qdf_op_sync *op_sync;
653 
654 	if (qdf_op_protect(&op_sync))
655 		return;
656 
657 	__hif_cpu_hotplug_notify(context, cpu, cpu_up);
658 
659 	qdf_op_unprotect(op_sync);
660 }
661 
662 static void hif_cpu_online_cb(void *context, uint32_t cpu)
663 {
664 	hif_cpu_hotplug_notify(context, cpu, true);
665 }
666 
667 static void hif_cpu_before_offline_cb(void *context, uint32_t cpu)
668 {
669 	hif_cpu_hotplug_notify(context, cpu, false);
670 }
671 
672 static void hif_cpuhp_register(struct hif_softc *scn)
673 {
674 	if (!scn) {
675 		hif_info_high("cannot register hotplug notifiers");
676 		return;
677 	}
678 	qdf_cpuhp_register(&scn->cpuhp_event_handle,
679 			   scn,
680 			   hif_cpu_online_cb,
681 			   hif_cpu_before_offline_cb);
682 }
683 
684 static void hif_cpuhp_unregister(struct hif_softc *scn)
685 {
686 	if (!scn) {
687 		hif_info_high("cannot unregister hotplug notifiers");
688 		return;
689 	}
690 	qdf_cpuhp_unregister(&scn->cpuhp_event_handle);
691 }
692 
693 #else
694 static void hif_cpuhp_register(struct hif_softc *scn)
695 {
696 }
697 
698 static void hif_cpuhp_unregister(struct hif_softc *scn)
699 {
700 }
701 #endif /* ifdef HIF_CPU_PERF_AFFINE_MASK */
702 
703 #ifdef HIF_DETECTION_LATENCY_ENABLE
704 
705 void hif_tasklet_latency(struct hif_softc *scn, bool from_timer)
706 {
707 	qdf_time_t ce2_tasklet_sched_time =
708 		scn->latency_detect.ce2_tasklet_sched_time;
709 	qdf_time_t ce2_tasklet_exec_time =
710 		scn->latency_detect.ce2_tasklet_exec_time;
711 	qdf_time_t curr_jiffies = qdf_system_ticks();
712 	uint32_t detect_latency_threshold =
713 		scn->latency_detect.detect_latency_threshold;
714 	int cpu_id = qdf_get_cpu();
715 
716 	/* 2 kinds of check here.
717 	 * from_timer==true:  check if tasklet stall
718 	 * from_timer==false: check tasklet execute comes late
719 	 */
720 
721 	if ((from_timer ?
722 	    qdf_system_time_after(ce2_tasklet_sched_time,
723 				  ce2_tasklet_exec_time) :
724 	    qdf_system_time_after(ce2_tasklet_exec_time,
725 				  ce2_tasklet_sched_time)) &&
726 	    qdf_system_time_after(
727 		curr_jiffies,
728 		ce2_tasklet_sched_time +
729 		qdf_system_msecs_to_ticks(detect_latency_threshold))) {
730 		hif_err("tasklet ce2 latency: from_timer %d, curr_jiffies %lu, ce2_tasklet_sched_time %lu,ce2_tasklet_exec_time %lu, detect_latency_threshold %ums detect_latency_timer_timeout %ums, cpu_id %d, called: %ps",
731 			from_timer, curr_jiffies, ce2_tasklet_sched_time,
732 			ce2_tasklet_exec_time, detect_latency_threshold,
733 			scn->latency_detect.detect_latency_timer_timeout,
734 			cpu_id, (void *)_RET_IP_);
735 		goto latency;
736 	}
737 	return;
738 
739 latency:
740 	qdf_trigger_self_recovery(NULL, QDF_TASKLET_CREDIT_LATENCY_DETECT);
741 }
742 
743 void hif_credit_latency(struct hif_softc *scn, bool from_timer)
744 {
745 	qdf_time_t credit_request_time =
746 		scn->latency_detect.credit_request_time;
747 	qdf_time_t credit_report_time =
748 		scn->latency_detect.credit_report_time;
749 	qdf_time_t curr_jiffies = qdf_system_ticks();
750 	uint32_t detect_latency_threshold =
751 		scn->latency_detect.detect_latency_threshold;
752 	int cpu_id = qdf_get_cpu();
753 
754 	/* 2 kinds of check here.
755 	 * from_timer==true:  check if credit report stall
756 	 * from_timer==false: check credit report comes late
757 	 */
758 
759 	if ((from_timer ?
760 	    qdf_system_time_after(credit_request_time,
761 				  credit_report_time) :
762 	    qdf_system_time_after(credit_report_time,
763 				  credit_request_time)) &&
764 	    qdf_system_time_after(
765 		curr_jiffies,
766 		credit_request_time +
767 		qdf_system_msecs_to_ticks(detect_latency_threshold))) {
768 		hif_err("credit report latency: from timer %d, curr_jiffies %lu, credit_request_time %lu,credit_report_time %lu, detect_latency_threshold %ums, detect_latency_timer_timeout %ums, cpu_id %d, called: %ps",
769 			from_timer, curr_jiffies, credit_request_time,
770 			credit_report_time, detect_latency_threshold,
771 			scn->latency_detect.detect_latency_timer_timeout,
772 			cpu_id, (void *)_RET_IP_);
773 		goto latency;
774 	}
775 	return;
776 
777 latency:
778 	qdf_trigger_self_recovery(NULL, QDF_TASKLET_CREDIT_LATENCY_DETECT);
779 }
780 
781 /**
782  * hif_check_detection_latency(): to check if latency for tasklet/credit
783  *
784  * @scn: hif context
785  * @from_timer: if called from timer handler
786  * @bitmap_type: indicate if check tasklet or credit
787  *
788  * Return: none
789  */
790 void hif_check_detection_latency(struct hif_softc *scn,
791 				 bool from_timer,
792 				 uint32_t bitmap_type)
793 {
794 	if (QDF_GLOBAL_MISSION_MODE != hif_get_conparam(scn))
795 		return;
796 
797 	if (!scn->latency_detect.enable_detection)
798 		return;
799 
800 	if (bitmap_type & BIT(HIF_DETECT_TASKLET))
801 		hif_tasklet_latency(scn, from_timer);
802 
803 	if (bitmap_type & BIT(HIF_DETECT_CREDIT))
804 		hif_credit_latency(scn, from_timer);
805 }
806 
807 static void hif_latency_detect_timeout_handler(void *arg)
808 {
809 	struct hif_softc *scn = (struct hif_softc *)arg;
810 	int next_cpu;
811 
812 	hif_check_detection_latency(scn, true,
813 				    BIT(HIF_DETECT_TASKLET) |
814 				    BIT(HIF_DETECT_CREDIT));
815 
816 	/* it need to make sure timer start on a differnt cpu,
817 	 * so it can detect the tasklet schedule stall, but there
818 	 * is still chance that, after timer has been started, then
819 	 * irq/tasklet happens on the same cpu, then tasklet will
820 	 * execute before softirq timer, if this tasklet stall, the
821 	 * timer can't detect it, we can accept this as a limition,
822 	 * if tasklet stall, anyway other place will detect it, just
823 	 * a little later.
824 	 */
825 	next_cpu = cpumask_any_but(
826 			cpu_active_mask,
827 			scn->latency_detect.ce2_tasklet_sched_cpuid);
828 
829 	if (qdf_unlikely(next_cpu >= nr_cpu_ids)) {
830 		hif_debug("start timer on local");
831 		/* it doesn't found a available cpu, start on local cpu*/
832 		qdf_timer_mod(
833 			&scn->latency_detect.detect_latency_timer,
834 			scn->latency_detect.detect_latency_timer_timeout);
835 	} else {
836 		qdf_timer_start_on(
837 			&scn->latency_detect.detect_latency_timer,
838 			scn->latency_detect.detect_latency_timer_timeout,
839 			next_cpu);
840 	}
841 }
842 
843 static void hif_latency_detect_timer_init(struct hif_softc *scn)
844 {
845 	if (!scn) {
846 		hif_info_high("scn is null");
847 		return;
848 	}
849 
850 	if (QDF_GLOBAL_MISSION_MODE != hif_get_conparam(scn))
851 		return;
852 
853 	scn->latency_detect.detect_latency_timer_timeout =
854 		DETECTION_TIMER_TIMEOUT;
855 	scn->latency_detect.detect_latency_threshold =
856 		DETECTION_LATENCY_THRESHOLD;
857 
858 	hif_info("timer timeout %u, latency threshold %u",
859 		 scn->latency_detect.detect_latency_timer_timeout,
860 		 scn->latency_detect.detect_latency_threshold);
861 
862 	scn->latency_detect.is_timer_started = false;
863 
864 	qdf_timer_init(NULL,
865 		       &scn->latency_detect.detect_latency_timer,
866 		       &hif_latency_detect_timeout_handler,
867 		       scn,
868 		       QDF_TIMER_TYPE_SW_SPIN);
869 }
870 
871 static void hif_latency_detect_timer_deinit(struct hif_softc *scn)
872 {
873 	if (QDF_GLOBAL_MISSION_MODE != hif_get_conparam(scn))
874 		return;
875 
876 	hif_info("deinit timer");
877 	qdf_timer_free(&scn->latency_detect.detect_latency_timer);
878 }
879 
880 void hif_latency_detect_timer_start(struct hif_opaque_softc *hif_ctx)
881 {
882 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
883 
884 	if (QDF_GLOBAL_MISSION_MODE != hif_get_conparam(scn))
885 		return;
886 
887 	hif_debug_rl("start timer");
888 	if (scn->latency_detect.is_timer_started) {
889 		hif_info("timer has been started");
890 		return;
891 	}
892 
893 	qdf_timer_start(&scn->latency_detect.detect_latency_timer,
894 			scn->latency_detect.detect_latency_timer_timeout);
895 	scn->latency_detect.is_timer_started = true;
896 }
897 
898 void hif_latency_detect_timer_stop(struct hif_opaque_softc *hif_ctx)
899 {
900 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
901 
902 	if (QDF_GLOBAL_MISSION_MODE != hif_get_conparam(scn))
903 		return;
904 
905 	hif_debug_rl("stop timer");
906 
907 	qdf_timer_sync_cancel(&scn->latency_detect.detect_latency_timer);
908 	scn->latency_detect.is_timer_started = false;
909 }
910 
911 void hif_latency_detect_credit_record_time(
912 	enum hif_credit_exchange_type type,
913 	struct hif_opaque_softc *hif_ctx)
914 {
915 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
916 
917 	if (!scn) {
918 		hif_err("Could not do runtime put, scn is null");
919 		return;
920 	}
921 
922 	if (QDF_GLOBAL_MISSION_MODE != hif_get_conparam(scn))
923 		return;
924 
925 	if (HIF_REQUEST_CREDIT == type)
926 		scn->latency_detect.credit_request_time = qdf_system_ticks();
927 	else if (HIF_PROCESS_CREDIT_REPORT == type)
928 		scn->latency_detect.credit_report_time = qdf_system_ticks();
929 
930 	hif_check_detection_latency(scn, false, BIT(HIF_DETECT_CREDIT));
931 }
932 
933 void hif_set_enable_detection(struct hif_opaque_softc *hif_ctx, bool value)
934 {
935 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
936 
937 	if (!scn) {
938 		hif_err("Could not do runtime put, scn is null");
939 		return;
940 	}
941 
942 	if (QDF_GLOBAL_MISSION_MODE != hif_get_conparam(scn))
943 		return;
944 
945 	scn->latency_detect.enable_detection = value;
946 }
947 #else
948 static void hif_latency_detect_timer_init(struct hif_softc *scn)
949 {}
950 
951 static void hif_latency_detect_timer_deinit(struct hif_softc *scn)
952 {}
953 #endif
954 struct hif_opaque_softc *hif_open(qdf_device_t qdf_ctx,
955 				  uint32_t mode,
956 				  enum qdf_bus_type bus_type,
957 				  struct hif_driver_state_callbacks *cbk,
958 				  struct wlan_objmgr_psoc *psoc)
959 {
960 	struct hif_softc *scn;
961 	QDF_STATUS status = QDF_STATUS_SUCCESS;
962 	int bus_context_size = hif_bus_get_context_size(bus_type);
963 
964 	if (bus_context_size == 0) {
965 		hif_err("context size 0 not allowed");
966 		return NULL;
967 	}
968 
969 	scn = (struct hif_softc *)qdf_mem_malloc(bus_context_size);
970 	if (!scn)
971 		return GET_HIF_OPAQUE_HDL(scn);
972 
973 	scn->qdf_dev = qdf_ctx;
974 	scn->hif_con_param = mode;
975 	qdf_atomic_init(&scn->active_tasklet_cnt);
976 
977 	qdf_atomic_init(&scn->active_grp_tasklet_cnt);
978 	qdf_atomic_init(&scn->link_suspended);
979 	qdf_atomic_init(&scn->tasklet_from_intr);
980 	hif_system_pm_set_state_on(GET_HIF_OPAQUE_HDL(scn));
981 	qdf_mem_copy(&scn->callbacks, cbk,
982 		     sizeof(struct hif_driver_state_callbacks));
983 	scn->bus_type  = bus_type;
984 
985 	hif_allow_ep_vote_access(GET_HIF_OPAQUE_HDL(scn));
986 	hif_get_cfg_from_psoc(scn, psoc);
987 
988 	hif_set_event_hist_mask(GET_HIF_OPAQUE_HDL(scn));
989 	status = hif_bus_open(scn, bus_type);
990 	if (status != QDF_STATUS_SUCCESS) {
991 		hif_err("hif_bus_open error = %d, bus_type = %d",
992 			status, bus_type);
993 		qdf_mem_free(scn);
994 		scn = NULL;
995 		goto out;
996 	}
997 
998 	hif_rtpm_lock_init(scn);
999 
1000 	hif_cpuhp_register(scn);
1001 	hif_latency_detect_timer_init(scn);
1002 
1003 out:
1004 	return GET_HIF_OPAQUE_HDL(scn);
1005 }
1006 
1007 #ifdef ADRASTEA_RRI_ON_DDR
1008 /**
1009  * hif_uninit_rri_on_ddr(): free consistent memory allocated for rri
1010  * @scn: hif context
1011  *
1012  * Return: none
1013  */
1014 void hif_uninit_rri_on_ddr(struct hif_softc *scn)
1015 {
1016 	if (scn->vaddr_rri_on_ddr)
1017 		qdf_mem_free_consistent(scn->qdf_dev, scn->qdf_dev->dev,
1018 					(CE_COUNT * sizeof(uint32_t)),
1019 					scn->vaddr_rri_on_ddr,
1020 					scn->paddr_rri_on_ddr, 0);
1021 	scn->vaddr_rri_on_ddr = NULL;
1022 }
1023 #endif
1024 
1025 /**
1026  * hif_close(): hif_close
1027  * @hif_ctx: hif_ctx
1028  *
1029  * Return: n/a
1030  */
1031 void hif_close(struct hif_opaque_softc *hif_ctx)
1032 {
1033 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1034 
1035 	if (!scn) {
1036 		hif_err("hif_opaque_softc is NULL");
1037 		return;
1038 	}
1039 
1040 	hif_latency_detect_timer_deinit(scn);
1041 
1042 	if (scn->athdiag_procfs_inited) {
1043 		athdiag_procfs_remove();
1044 		scn->athdiag_procfs_inited = false;
1045 	}
1046 
1047 	if (scn->target_info.hw_name) {
1048 		char *hw_name = scn->target_info.hw_name;
1049 
1050 		scn->target_info.hw_name = "ErrUnloading";
1051 		qdf_mem_free(hw_name);
1052 	}
1053 
1054 	hif_uninit_rri_on_ddr(scn);
1055 	hif_cleanup_static_buf_to_target(scn);
1056 	hif_cpuhp_unregister(scn);
1057 	hif_rtpm_lock_deinit(scn);
1058 
1059 	hif_bus_close(scn);
1060 
1061 	qdf_mem_free(scn);
1062 }
1063 
1064 /**
1065  * hif_get_num_active_grp_tasklets() - get the number of active
1066  *		datapath group tasklets pending to be completed.
1067  * @scn: HIF context
1068  *
1069  * Returns: the number of datapath group tasklets which are active
1070  */
1071 static inline int hif_get_num_active_grp_tasklets(struct hif_softc *scn)
1072 {
1073 	return qdf_atomic_read(&scn->active_grp_tasklet_cnt);
1074 }
1075 
1076 #if (defined(QCA_WIFI_QCA8074) || defined(QCA_WIFI_QCA6018) || \
1077 	defined(QCA_WIFI_QCA6290) || defined(QCA_WIFI_QCA6390) || \
1078 	defined(QCA_WIFI_QCN9000) || defined(QCA_WIFI_QCA6490) || \
1079 	defined(QCA_WIFI_QCA6750) || defined(QCA_WIFI_QCA5018) || \
1080 	defined(QCA_WIFI_KIWI) || defined(QCA_WIFI_QCN9224) || \
1081 	defined(QCA_WIFI_QCA9574)) || defined(QCA_WIFI_QCA5332)
1082 /**
1083  * hif_get_num_pending_work() - get the number of entries in
1084  *		the workqueue pending to be completed.
1085  * @scn: HIF context
1086  *
1087  * Returns: the number of tasklets which are active
1088  */
1089 static inline int hif_get_num_pending_work(struct hif_softc *scn)
1090 {
1091 	return hal_get_reg_write_pending_work(scn->hal_soc);
1092 }
1093 #else
1094 
1095 static inline int hif_get_num_pending_work(struct hif_softc *scn)
1096 {
1097 	return 0;
1098 }
1099 #endif
1100 
1101 QDF_STATUS hif_try_complete_tasks(struct hif_softc *scn)
1102 {
1103 	uint32_t task_drain_wait_cnt = 0;
1104 	int tasklet = 0, grp_tasklet = 0, work = 0;
1105 
1106 	while ((tasklet = hif_get_num_active_tasklets(scn)) ||
1107 	       (grp_tasklet = hif_get_num_active_grp_tasklets(scn)) ||
1108 	       (work = hif_get_num_pending_work(scn))) {
1109 		if (++task_drain_wait_cnt > HIF_TASK_DRAIN_WAIT_CNT) {
1110 			hif_err("pending tasklets %d grp tasklets %d work %d",
1111 				tasklet, grp_tasklet, work);
1112 			return QDF_STATUS_E_FAULT;
1113 		}
1114 		hif_info("waiting for tasklets %d grp tasklets %d work %d",
1115 			 tasklet, grp_tasklet, work);
1116 		msleep(10);
1117 	}
1118 
1119 	return QDF_STATUS_SUCCESS;
1120 }
1121 
1122 #if defined(HIF_IPCI) && defined(FEATURE_HAL_DELAYED_REG_WRITE)
1123 QDF_STATUS hif_try_prevent_ep_vote_access(struct hif_opaque_softc *hif_ctx)
1124 {
1125 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1126 	uint32_t work_drain_wait_cnt = 0;
1127 	uint32_t wait_cnt = 0;
1128 	int work = 0;
1129 
1130 	qdf_atomic_set(&scn->dp_ep_vote_access,
1131 		       HIF_EP_VOTE_ACCESS_DISABLE);
1132 	qdf_atomic_set(&scn->ep_vote_access,
1133 		       HIF_EP_VOTE_ACCESS_DISABLE);
1134 
1135 	while ((work = hif_get_num_pending_work(scn))) {
1136 		if (++work_drain_wait_cnt > HIF_WORK_DRAIN_WAIT_CNT) {
1137 			qdf_atomic_set(&scn->dp_ep_vote_access,
1138 				       HIF_EP_VOTE_ACCESS_ENABLE);
1139 			qdf_atomic_set(&scn->ep_vote_access,
1140 				       HIF_EP_VOTE_ACCESS_ENABLE);
1141 			hif_err("timeout wait for pending work %d ", work);
1142 			return QDF_STATUS_E_FAULT;
1143 		}
1144 		qdf_sleep(10);
1145 	}
1146 
1147 	if (pld_is_pci_ep_awake(scn->qdf_dev->dev) == -ENOTSUPP)
1148 	return QDF_STATUS_SUCCESS;
1149 
1150 	while (pld_is_pci_ep_awake(scn->qdf_dev->dev)) {
1151 		if (++wait_cnt > HIF_EP_WAKE_RESET_WAIT_CNT) {
1152 			hif_err("Release EP vote is not proceed by Fw");
1153 			return QDF_STATUS_E_FAULT;
1154 		}
1155 		qdf_sleep(5);
1156 	}
1157 
1158 	return QDF_STATUS_SUCCESS;
1159 }
1160 
1161 void hif_set_ep_intermediate_vote_access(struct hif_opaque_softc *hif_ctx)
1162 {
1163 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1164 	uint8_t vote_access;
1165 
1166 	vote_access = qdf_atomic_read(&scn->ep_vote_access);
1167 
1168 	if (vote_access != HIF_EP_VOTE_ACCESS_DISABLE)
1169 		hif_info("EP vote changed from:%u to intermediate state",
1170 			 vote_access);
1171 
1172 	if (QDF_IS_STATUS_ERROR(hif_try_prevent_ep_vote_access(hif_ctx)))
1173 		QDF_BUG(0);
1174 
1175 	qdf_atomic_set(&scn->ep_vote_access,
1176 		       HIF_EP_VOTE_INTERMEDIATE_ACCESS);
1177 }
1178 
1179 void hif_allow_ep_vote_access(struct hif_opaque_softc *hif_ctx)
1180 {
1181 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1182 
1183 	qdf_atomic_set(&scn->dp_ep_vote_access,
1184 		       HIF_EP_VOTE_ACCESS_ENABLE);
1185 	qdf_atomic_set(&scn->ep_vote_access,
1186 		       HIF_EP_VOTE_ACCESS_ENABLE);
1187 }
1188 
1189 void hif_set_ep_vote_access(struct hif_opaque_softc *hif_ctx,
1190 			    uint8_t type, uint8_t access)
1191 {
1192 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1193 
1194 	if (type == HIF_EP_VOTE_DP_ACCESS)
1195 		qdf_atomic_set(&scn->dp_ep_vote_access, access);
1196 	else
1197 		qdf_atomic_set(&scn->ep_vote_access, access);
1198 }
1199 
1200 uint8_t hif_get_ep_vote_access(struct hif_opaque_softc *hif_ctx,
1201 			       uint8_t type)
1202 {
1203 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1204 
1205 	if (type == HIF_EP_VOTE_DP_ACCESS)
1206 		return qdf_atomic_read(&scn->dp_ep_vote_access);
1207 	else
1208 		return qdf_atomic_read(&scn->ep_vote_access);
1209 }
1210 #endif
1211 
1212 #if (defined(QCA_WIFI_QCA8074) || defined(QCA_WIFI_QCA6018) || \
1213 	defined(QCA_WIFI_QCA6290) || defined(QCA_WIFI_QCA6390) || \
1214 	defined(QCA_WIFI_QCN9000) || defined(QCA_WIFI_QCA6490) || \
1215 	defined(QCA_WIFI_QCA6750) || defined(QCA_WIFI_QCA5018) || \
1216 	defined(QCA_WIFI_KIWI) || defined(QCA_WIFI_QCN9224) || \
1217 	defined(QCA_WIFI_QCA9574)) || defined(QCA_WIFI_QCA5332)
1218 static QDF_STATUS hif_hal_attach(struct hif_softc *scn)
1219 {
1220 	if (ce_srng_based(scn)) {
1221 		scn->hal_soc = hal_attach(
1222 					hif_softc_to_hif_opaque_softc(scn),
1223 					scn->qdf_dev);
1224 		if (!scn->hal_soc)
1225 			return QDF_STATUS_E_FAILURE;
1226 	}
1227 
1228 	return QDF_STATUS_SUCCESS;
1229 }
1230 
1231 static QDF_STATUS hif_hal_detach(struct hif_softc *scn)
1232 {
1233 	if (ce_srng_based(scn)) {
1234 		hal_detach(scn->hal_soc);
1235 		scn->hal_soc = NULL;
1236 	}
1237 
1238 	return QDF_STATUS_SUCCESS;
1239 }
1240 #else
1241 static QDF_STATUS hif_hal_attach(struct hif_softc *scn)
1242 {
1243 	return QDF_STATUS_SUCCESS;
1244 }
1245 
1246 static QDF_STATUS hif_hal_detach(struct hif_softc *scn)
1247 {
1248 	return QDF_STATUS_SUCCESS;
1249 }
1250 #endif
1251 
1252 int hif_init_dma_mask(struct device *dev, enum qdf_bus_type bus_type)
1253 {
1254 	int ret;
1255 
1256 	switch (bus_type) {
1257 	case QDF_BUS_TYPE_IPCI:
1258 		ret = qdf_set_dma_coherent_mask(dev,
1259 						DMA_COHERENT_MASK_DEFAULT);
1260 		if (ret) {
1261 			hif_err("Failed to set dma mask error = %d", ret);
1262 			return ret;
1263 		}
1264 
1265 		break;
1266 	default:
1267 		/* Follow the existing sequence for other targets */
1268 		break;
1269 	}
1270 
1271 	return 0;
1272 }
1273 
1274 /**
1275  * hif_enable(): hif_enable
1276  * @hif_ctx: hif_ctx
1277  * @dev: dev
1278  * @bdev: bus dev
1279  * @bid: bus ID
1280  * @bus_type: bus type
1281  * @type: enable type
1282  *
1283  * Return: QDF_STATUS
1284  */
1285 QDF_STATUS hif_enable(struct hif_opaque_softc *hif_ctx, struct device *dev,
1286 					  void *bdev,
1287 					  const struct hif_bus_id *bid,
1288 					  enum qdf_bus_type bus_type,
1289 					  enum hif_enable_type type)
1290 {
1291 	QDF_STATUS status;
1292 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1293 
1294 	if (!scn) {
1295 		hif_err("hif_ctx = NULL");
1296 		return QDF_STATUS_E_NULL_VALUE;
1297 	}
1298 
1299 	status = hif_enable_bus(scn, dev, bdev, bid, type);
1300 	if (status != QDF_STATUS_SUCCESS) {
1301 		hif_err("hif_enable_bus error = %d", status);
1302 		return status;
1303 	}
1304 
1305 	status = hif_hal_attach(scn);
1306 	if (status != QDF_STATUS_SUCCESS) {
1307 		hif_err("hal attach failed");
1308 		goto disable_bus;
1309 	}
1310 
1311 	if (hif_bus_configure(scn)) {
1312 		hif_err("Target probe failed");
1313 		status = QDF_STATUS_E_FAILURE;
1314 		goto hal_detach;
1315 	}
1316 
1317 	hif_ut_suspend_init(scn);
1318 	hif_register_recovery_notifier(scn);
1319 	hif_latency_detect_timer_start(hif_ctx);
1320 
1321 	/*
1322 	 * Flag to avoid potential unallocated memory access from MSI
1323 	 * interrupt handler which could get scheduled as soon as MSI
1324 	 * is enabled, i.e to take care of the race due to the order
1325 	 * in where MSI is enabled before the memory, that will be
1326 	 * in interrupt handlers, is allocated.
1327 	 */
1328 
1329 	scn->hif_init_done = true;
1330 
1331 	hif_debug("OK");
1332 
1333 	return QDF_STATUS_SUCCESS;
1334 
1335 hal_detach:
1336 	hif_hal_detach(scn);
1337 disable_bus:
1338 	hif_disable_bus(scn);
1339 	return status;
1340 }
1341 
1342 void hif_disable(struct hif_opaque_softc *hif_ctx, enum hif_disable_type type)
1343 {
1344 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1345 
1346 	if (!scn)
1347 		return;
1348 
1349 	hif_set_enable_detection(hif_ctx, false);
1350 	hif_latency_detect_timer_stop(hif_ctx);
1351 
1352 	hif_unregister_recovery_notifier(scn);
1353 
1354 	hif_nointrs(scn);
1355 	if (scn->hif_init_done == false)
1356 		hif_shutdown_device(hif_ctx);
1357 	else
1358 		hif_stop(hif_ctx);
1359 
1360 	hif_hal_detach(scn);
1361 
1362 	hif_disable_bus(scn);
1363 
1364 	hif_wlan_disable(scn);
1365 
1366 	scn->notice_send = false;
1367 
1368 	hif_debug("X");
1369 }
1370 
1371 #ifdef CE_TASKLET_DEBUG_ENABLE
1372 void hif_enable_ce_latency_stats(struct hif_opaque_softc *hif_ctx, uint8_t val)
1373 {
1374 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1375 
1376 	if (!scn)
1377 		return;
1378 
1379 	scn->ce_latency_stats = val;
1380 }
1381 #endif
1382 
1383 void hif_display_stats(struct hif_opaque_softc *hif_ctx)
1384 {
1385 	hif_display_bus_stats(hif_ctx);
1386 }
1387 
1388 qdf_export_symbol(hif_display_stats);
1389 
1390 void hif_clear_stats(struct hif_opaque_softc *hif_ctx)
1391 {
1392 	hif_clear_bus_stats(hif_ctx);
1393 }
1394 
1395 /**
1396  * hif_crash_shutdown_dump_bus_register() - dump bus registers
1397  * @hif_ctx: hif_ctx
1398  *
1399  * Return: n/a
1400  */
1401 #if defined(TARGET_RAMDUMP_AFTER_KERNEL_PANIC) && defined(WLAN_FEATURE_BMI)
1402 
1403 static void hif_crash_shutdown_dump_bus_register(void *hif_ctx)
1404 {
1405 	struct hif_opaque_softc *scn = hif_ctx;
1406 
1407 	if (hif_check_soc_status(scn))
1408 		return;
1409 
1410 	if (hif_dump_registers(scn))
1411 		hif_err("Failed to dump bus registers!");
1412 }
1413 
1414 /**
1415  * hif_crash_shutdown(): hif_crash_shutdown
1416  *
1417  * This function is called by the platform driver to dump CE registers
1418  *
1419  * @hif_ctx: hif_ctx
1420  *
1421  * Return: n/a
1422  */
1423 void hif_crash_shutdown(struct hif_opaque_softc *hif_ctx)
1424 {
1425 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1426 
1427 	if (!hif_ctx)
1428 		return;
1429 
1430 	if (scn->bus_type == QDF_BUS_TYPE_SNOC) {
1431 		hif_warn("RAM dump disabled for bustype %d", scn->bus_type);
1432 		return;
1433 	}
1434 
1435 	if (TARGET_STATUS_RESET == scn->target_status) {
1436 		hif_warn("Target is already asserted, ignore!");
1437 		return;
1438 	}
1439 
1440 	if (hif_is_load_or_unload_in_progress(scn)) {
1441 		hif_err("Load/unload is in progress, ignore!");
1442 		return;
1443 	}
1444 
1445 	hif_crash_shutdown_dump_bus_register(hif_ctx);
1446 	hif_set_target_status(hif_ctx, TARGET_STATUS_RESET);
1447 
1448 	if (ol_copy_ramdump(hif_ctx))
1449 		goto out;
1450 
1451 	hif_info("RAM dump collecting completed!");
1452 
1453 out:
1454 	return;
1455 }
1456 #else
1457 void hif_crash_shutdown(struct hif_opaque_softc *hif_ctx)
1458 {
1459 	hif_debug("Collecting target RAM dump disabled");
1460 }
1461 #endif /* TARGET_RAMDUMP_AFTER_KERNEL_PANIC */
1462 
1463 #ifdef QCA_WIFI_3_0
1464 /**
1465  * hif_check_fw_reg(): hif_check_fw_reg
1466  * @scn: scn
1467  * @state:
1468  *
1469  * Return: int
1470  */
1471 int hif_check_fw_reg(struct hif_opaque_softc *scn)
1472 {
1473 	return 0;
1474 }
1475 #endif
1476 
1477 /**
1478  * hif_read_phy_mem_base(): hif_read_phy_mem_base
1479  * @scn: scn
1480  * @phy_mem_base: physical mem base
1481  *
1482  * Return: n/a
1483  */
1484 void hif_read_phy_mem_base(struct hif_softc *scn, qdf_dma_addr_t *phy_mem_base)
1485 {
1486 	*phy_mem_base = scn->mem_pa;
1487 }
1488 qdf_export_symbol(hif_read_phy_mem_base);
1489 
1490 /**
1491  * hif_get_device_type(): hif_get_device_type
1492  * @device_id: device_id
1493  * @revision_id: revision_id
1494  * @hif_type: returned hif_type
1495  * @target_type: returned target_type
1496  *
1497  * Return: int
1498  */
1499 int hif_get_device_type(uint32_t device_id,
1500 			uint32_t revision_id,
1501 			uint32_t *hif_type, uint32_t *target_type)
1502 {
1503 	int ret = 0;
1504 
1505 	switch (device_id) {
1506 	case ADRASTEA_DEVICE_ID_P2_E12:
1507 
1508 		*hif_type = HIF_TYPE_ADRASTEA;
1509 		*target_type = TARGET_TYPE_ADRASTEA;
1510 		break;
1511 
1512 	case AR9888_DEVICE_ID:
1513 		*hif_type = HIF_TYPE_AR9888;
1514 		*target_type = TARGET_TYPE_AR9888;
1515 		break;
1516 
1517 	case AR6320_DEVICE_ID:
1518 		switch (revision_id) {
1519 		case AR6320_FW_1_1:
1520 		case AR6320_FW_1_3:
1521 			*hif_type = HIF_TYPE_AR6320;
1522 			*target_type = TARGET_TYPE_AR6320;
1523 			break;
1524 
1525 		case AR6320_FW_2_0:
1526 		case AR6320_FW_3_0:
1527 		case AR6320_FW_3_2:
1528 			*hif_type = HIF_TYPE_AR6320V2;
1529 			*target_type = TARGET_TYPE_AR6320V2;
1530 			break;
1531 
1532 		default:
1533 			hif_err("dev_id = 0x%x, rev_id = 0x%x",
1534 				device_id, revision_id);
1535 			ret = -ENODEV;
1536 			goto end;
1537 		}
1538 		break;
1539 
1540 	case AR9887_DEVICE_ID:
1541 		*hif_type = HIF_TYPE_AR9888;
1542 		*target_type = TARGET_TYPE_AR9888;
1543 		hif_info(" *********** AR9887 **************");
1544 		break;
1545 
1546 	case QCA9984_DEVICE_ID:
1547 		*hif_type = HIF_TYPE_QCA9984;
1548 		*target_type = TARGET_TYPE_QCA9984;
1549 		hif_info(" *********** QCA9984 *************");
1550 		break;
1551 
1552 	case QCA9888_DEVICE_ID:
1553 		*hif_type = HIF_TYPE_QCA9888;
1554 		*target_type = TARGET_TYPE_QCA9888;
1555 		hif_info(" *********** QCA9888 *************");
1556 		break;
1557 
1558 	case AR900B_DEVICE_ID:
1559 		*hif_type = HIF_TYPE_AR900B;
1560 		*target_type = TARGET_TYPE_AR900B;
1561 		hif_info(" *********** AR900B *************");
1562 		break;
1563 
1564 	case QCA8074_DEVICE_ID:
1565 		*hif_type = HIF_TYPE_QCA8074;
1566 		*target_type = TARGET_TYPE_QCA8074;
1567 		hif_info(" *********** QCA8074  *************");
1568 		break;
1569 
1570 	case QCA6290_EMULATION_DEVICE_ID:
1571 	case QCA6290_DEVICE_ID:
1572 		*hif_type = HIF_TYPE_QCA6290;
1573 		*target_type = TARGET_TYPE_QCA6290;
1574 		hif_info(" *********** QCA6290EMU *************");
1575 		break;
1576 
1577 	case QCN9000_DEVICE_ID:
1578 		*hif_type = HIF_TYPE_QCN9000;
1579 		*target_type = TARGET_TYPE_QCN9000;
1580 		hif_info(" *********** QCN9000 *************");
1581 		break;
1582 
1583 	case QCN9224_DEVICE_ID:
1584 		*hif_type = HIF_TYPE_QCN9224;
1585 		*target_type = TARGET_TYPE_QCN9224;
1586 		hif_info(" *********** QCN9224 *************");
1587 		break;
1588 
1589 	case QCN6122_DEVICE_ID:
1590 		*hif_type = HIF_TYPE_QCN6122;
1591 		*target_type = TARGET_TYPE_QCN6122;
1592 		hif_info(" *********** QCN6122 *************");
1593 		break;
1594 
1595 	case QCN7605_DEVICE_ID:
1596 	case QCN7605_COMPOSITE:
1597 	case QCN7605_STANDALONE:
1598 	case QCN7605_STANDALONE_V2:
1599 	case QCN7605_COMPOSITE_V2:
1600 		*hif_type = HIF_TYPE_QCN7605;
1601 		*target_type = TARGET_TYPE_QCN7605;
1602 		hif_info(" *********** QCN7605 *************");
1603 		break;
1604 
1605 	case QCA6390_DEVICE_ID:
1606 	case QCA6390_EMULATION_DEVICE_ID:
1607 		*hif_type = HIF_TYPE_QCA6390;
1608 		*target_type = TARGET_TYPE_QCA6390;
1609 		hif_info(" *********** QCA6390 *************");
1610 		break;
1611 
1612 	case QCA6490_DEVICE_ID:
1613 	case QCA6490_EMULATION_DEVICE_ID:
1614 		*hif_type = HIF_TYPE_QCA6490;
1615 		*target_type = TARGET_TYPE_QCA6490;
1616 		hif_info(" *********** QCA6490 *************");
1617 		break;
1618 
1619 	case QCA6750_DEVICE_ID:
1620 	case QCA6750_EMULATION_DEVICE_ID:
1621 		*hif_type = HIF_TYPE_QCA6750;
1622 		*target_type = TARGET_TYPE_QCA6750;
1623 		hif_info(" *********** QCA6750 *************");
1624 		break;
1625 
1626 	case KIWI_DEVICE_ID:
1627 		*hif_type = HIF_TYPE_KIWI;
1628 		*target_type = TARGET_TYPE_KIWI;
1629 		hif_info(" *********** KIWI *************");
1630 		break;
1631 
1632 	case MANGO_DEVICE_ID:
1633 		*hif_type = HIF_TYPE_MANGO;
1634 		*target_type = TARGET_TYPE_MANGO;
1635 		hif_info(" *********** MANGO *************");
1636 		break;
1637 
1638 	case QCA8074V2_DEVICE_ID:
1639 		*hif_type = HIF_TYPE_QCA8074V2;
1640 		*target_type = TARGET_TYPE_QCA8074V2;
1641 		hif_info(" *********** QCA8074V2 *************");
1642 		break;
1643 
1644 	case QCA6018_DEVICE_ID:
1645 	case RUMIM2M_DEVICE_ID_NODE0:
1646 	case RUMIM2M_DEVICE_ID_NODE1:
1647 	case RUMIM2M_DEVICE_ID_NODE2:
1648 	case RUMIM2M_DEVICE_ID_NODE3:
1649 	case RUMIM2M_DEVICE_ID_NODE4:
1650 	case RUMIM2M_DEVICE_ID_NODE5:
1651 		*hif_type = HIF_TYPE_QCA6018;
1652 		*target_type = TARGET_TYPE_QCA6018;
1653 		hif_info(" *********** QCA6018 *************");
1654 		break;
1655 
1656 	case QCA5018_DEVICE_ID:
1657 		*hif_type = HIF_TYPE_QCA5018;
1658 		*target_type = TARGET_TYPE_QCA5018;
1659 		hif_info(" *********** qca5018 *************");
1660 		break;
1661 
1662 	case QCA5332_DEVICE_ID:
1663 		*hif_type = HIF_TYPE_QCA5332;
1664 		*target_type = TARGET_TYPE_QCA5332;
1665 		hif_info(" *********** QCA5332 *************");
1666 		break;
1667 
1668 	case QCA9574_DEVICE_ID:
1669 		*hif_type = HIF_TYPE_QCA9574;
1670 		*target_type = TARGET_TYPE_QCA9574;
1671 		hif_info(" *********** QCA9574 *************");
1672 		break;
1673 
1674 	default:
1675 		hif_err("Unsupported device ID = 0x%x!", device_id);
1676 		ret = -ENODEV;
1677 		break;
1678 	}
1679 
1680 	if (*target_type == TARGET_TYPE_UNKNOWN) {
1681 		hif_err("Unsupported target_type!");
1682 		ret = -ENODEV;
1683 	}
1684 end:
1685 	return ret;
1686 }
1687 
1688 /**
1689  * hif_get_bus_type() - return the bus type
1690  *
1691  * Return: enum qdf_bus_type
1692  */
1693 enum qdf_bus_type hif_get_bus_type(struct hif_opaque_softc *hif_hdl)
1694 {
1695 	struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
1696 
1697 	return scn->bus_type;
1698 }
1699 
1700 /**
1701  * Target info and ini parameters are global to the driver
1702  * Hence these structures are exposed to all the modules in
1703  * the driver and they don't need to maintains multiple copies
1704  * of the same info, instead get the handle from hif and
1705  * modify them in hif
1706  */
1707 
1708 /**
1709  * hif_get_ini_handle() - API to get hif_config_param handle
1710  * @hif_ctx: HIF Context
1711  *
1712  * Return: pointer to hif_config_info
1713  */
1714 struct hif_config_info *hif_get_ini_handle(struct hif_opaque_softc *hif_ctx)
1715 {
1716 	struct hif_softc *sc = HIF_GET_SOFTC(hif_ctx);
1717 
1718 	return &sc->hif_config;
1719 }
1720 
1721 /**
1722  * hif_get_target_info_handle() - API to get hif_target_info handle
1723  * @hif_ctx: HIF context
1724  *
1725  * Return: Pointer to hif_target_info
1726  */
1727 struct hif_target_info *hif_get_target_info_handle(
1728 					struct hif_opaque_softc *hif_ctx)
1729 {
1730 	struct hif_softc *sc = HIF_GET_SOFTC(hif_ctx);
1731 
1732 	return &sc->target_info;
1733 
1734 }
1735 qdf_export_symbol(hif_get_target_info_handle);
1736 
1737 #ifdef RECEIVE_OFFLOAD
1738 void hif_offld_flush_cb_register(struct hif_opaque_softc *scn,
1739 				 void (offld_flush_handler)(void *))
1740 {
1741 	if (hif_napi_enabled(scn, -1))
1742 		hif_napi_rx_offld_flush_cb_register(scn, offld_flush_handler);
1743 	else
1744 		hif_err("NAPI not enabled");
1745 }
1746 qdf_export_symbol(hif_offld_flush_cb_register);
1747 
1748 void hif_offld_flush_cb_deregister(struct hif_opaque_softc *scn)
1749 {
1750 	if (hif_napi_enabled(scn, -1))
1751 		hif_napi_rx_offld_flush_cb_deregister(scn);
1752 	else
1753 		hif_err("NAPI not enabled");
1754 }
1755 qdf_export_symbol(hif_offld_flush_cb_deregister);
1756 
1757 int hif_get_rx_ctx_id(int ctx_id, struct hif_opaque_softc *hif_hdl)
1758 {
1759 	if (hif_napi_enabled(hif_hdl, -1))
1760 		return NAPI_PIPE2ID(ctx_id);
1761 	else
1762 		return ctx_id;
1763 }
1764 #else /* RECEIVE_OFFLOAD */
1765 int hif_get_rx_ctx_id(int ctx_id, struct hif_opaque_softc *hif_hdl)
1766 {
1767 	return 0;
1768 }
1769 qdf_export_symbol(hif_get_rx_ctx_id);
1770 #endif /* RECEIVE_OFFLOAD */
1771 
1772 #if defined(FEATURE_LRO)
1773 
1774 /**
1775  * hif_get_lro_info - Returns LRO instance for instance ID
1776  * @ctx_id: LRO instance ID
1777  * @hif_hdl: HIF Context
1778  *
1779  * Return: Pointer to LRO instance.
1780  */
1781 void *hif_get_lro_info(int ctx_id, struct hif_opaque_softc *hif_hdl)
1782 {
1783 	void *data;
1784 
1785 	if (hif_napi_enabled(hif_hdl, -1))
1786 		data = hif_napi_get_lro_info(hif_hdl, ctx_id);
1787 	else
1788 		data = hif_ce_get_lro_ctx(hif_hdl, ctx_id);
1789 
1790 	return data;
1791 }
1792 #endif
1793 
1794 /**
1795  * hif_get_target_status - API to get target status
1796  * @hif_ctx: HIF Context
1797  *
1798  * Return: enum hif_target_status
1799  */
1800 enum hif_target_status hif_get_target_status(struct hif_opaque_softc *hif_ctx)
1801 {
1802 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1803 
1804 	return scn->target_status;
1805 }
1806 qdf_export_symbol(hif_get_target_status);
1807 
1808 /**
1809  * hif_set_target_status() - API to set target status
1810  * @hif_ctx: HIF Context
1811  * @status: Target Status
1812  *
1813  * Return: void
1814  */
1815 void hif_set_target_status(struct hif_opaque_softc *hif_ctx, enum
1816 			   hif_target_status status)
1817 {
1818 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1819 
1820 	scn->target_status = status;
1821 }
1822 
1823 /**
1824  * hif_init_ini_config() - API to initialize HIF configuration parameters
1825  * @hif_ctx: HIF Context
1826  * @cfg: HIF Configuration
1827  *
1828  * Return: void
1829  */
1830 void hif_init_ini_config(struct hif_opaque_softc *hif_ctx,
1831 			 struct hif_config_info *cfg)
1832 {
1833 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1834 
1835 	qdf_mem_copy(&scn->hif_config, cfg, sizeof(struct hif_config_info));
1836 }
1837 
1838 /**
1839  * hif_get_conparam() - API to get driver mode in HIF
1840  * @scn: HIF Context
1841  *
1842  * Return: driver mode of operation
1843  */
1844 uint32_t hif_get_conparam(struct hif_softc *scn)
1845 {
1846 	if (!scn)
1847 		return 0;
1848 
1849 	return scn->hif_con_param;
1850 }
1851 
1852 /**
1853  * hif_get_callbacks_handle() - API to get callbacks Handle
1854  * @scn: HIF Context
1855  *
1856  * Return: pointer to HIF Callbacks
1857  */
1858 struct hif_driver_state_callbacks *hif_get_callbacks_handle(
1859 							struct hif_softc *scn)
1860 {
1861 	return &scn->callbacks;
1862 }
1863 
1864 /**
1865  * hif_is_driver_unloading() - API to query upper layers if driver is unloading
1866  * @scn: HIF Context
1867  *
1868  * Return: True/False
1869  */
1870 bool hif_is_driver_unloading(struct hif_softc *scn)
1871 {
1872 	struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
1873 
1874 	if (cbk && cbk->is_driver_unloading)
1875 		return cbk->is_driver_unloading(cbk->context);
1876 
1877 	return false;
1878 }
1879 
1880 /**
1881  * hif_is_load_or_unload_in_progress() - API to query upper layers if
1882  * load/unload in progress
1883  * @scn: HIF Context
1884  *
1885  * Return: True/False
1886  */
1887 bool hif_is_load_or_unload_in_progress(struct hif_softc *scn)
1888 {
1889 	struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
1890 
1891 	if (cbk && cbk->is_load_unload_in_progress)
1892 		return cbk->is_load_unload_in_progress(cbk->context);
1893 
1894 	return false;
1895 }
1896 
1897 /**
1898  * hif_is_recovery_in_progress() - API to query upper layers if recovery in
1899  * progress
1900  * @scn: HIF Context
1901  *
1902  * Return: True/False
1903  */
1904 bool hif_is_recovery_in_progress(struct hif_softc *scn)
1905 {
1906 	struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
1907 
1908 	if (cbk && cbk->is_recovery_in_progress)
1909 		return cbk->is_recovery_in_progress(cbk->context);
1910 
1911 	return false;
1912 }
1913 
1914 #if defined(HIF_PCI) || defined(HIF_SNOC) || defined(HIF_AHB) || \
1915     defined(HIF_IPCI)
1916 
1917 /**
1918  * hif_update_pipe_callback() - API to register pipe specific callbacks
1919  * @osc: Opaque softc
1920  * @pipeid: pipe id
1921  * @callbacks: callbacks to register
1922  *
1923  * Return: void
1924  */
1925 
1926 void hif_update_pipe_callback(struct hif_opaque_softc *osc,
1927 					u_int8_t pipeid,
1928 					struct hif_msg_callbacks *callbacks)
1929 {
1930 	struct hif_softc *scn = HIF_GET_SOFTC(osc);
1931 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1932 	struct HIF_CE_pipe_info *pipe_info;
1933 
1934 	QDF_BUG(pipeid < CE_COUNT_MAX);
1935 
1936 	hif_debug("pipeid: %d", pipeid);
1937 
1938 	pipe_info = &hif_state->pipe_info[pipeid];
1939 
1940 	qdf_mem_copy(&pipe_info->pipe_callbacks,
1941 			callbacks, sizeof(pipe_info->pipe_callbacks));
1942 }
1943 qdf_export_symbol(hif_update_pipe_callback);
1944 
1945 /**
1946  * hif_is_target_ready() - API to query if target is in ready state
1947  * progress
1948  * @scn: HIF Context
1949  *
1950  * Return: True/False
1951  */
1952 bool hif_is_target_ready(struct hif_softc *scn)
1953 {
1954 	struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
1955 
1956 	if (cbk && cbk->is_target_ready)
1957 		return cbk->is_target_ready(cbk->context);
1958 	/*
1959 	 * if callback is not registered then there is no way to determine
1960 	 * if target is ready. In-such case return true to indicate that
1961 	 * target is ready.
1962 	 */
1963 	return true;
1964 }
1965 qdf_export_symbol(hif_is_target_ready);
1966 
1967 int hif_get_bandwidth_level(struct hif_opaque_softc *hif_handle)
1968 {
1969 	struct hif_softc *scn = HIF_GET_SOFTC(hif_handle);
1970 	struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
1971 
1972 	if (cbk && cbk->get_bandwidth_level)
1973 		return cbk->get_bandwidth_level(cbk->context);
1974 
1975 	return 0;
1976 }
1977 
1978 qdf_export_symbol(hif_get_bandwidth_level);
1979 
1980 #ifdef DP_MEM_PRE_ALLOC
1981 void *hif_mem_alloc_consistent_unaligned(struct hif_softc *scn,
1982 					 qdf_size_t size,
1983 					 qdf_dma_addr_t *paddr,
1984 					 uint32_t ring_type,
1985 					 uint8_t *is_mem_prealloc)
1986 {
1987 	void *vaddr = NULL;
1988 	struct hif_driver_state_callbacks *cbk =
1989 				hif_get_callbacks_handle(scn);
1990 
1991 	*is_mem_prealloc = false;
1992 	if (cbk && cbk->prealloc_get_consistent_mem_unaligned) {
1993 		vaddr = cbk->prealloc_get_consistent_mem_unaligned(size,
1994 								   paddr,
1995 								   ring_type);
1996 		if (vaddr) {
1997 			*is_mem_prealloc = true;
1998 			goto end;
1999 		}
2000 	}
2001 
2002 	vaddr = qdf_mem_alloc_consistent(scn->qdf_dev,
2003 					 scn->qdf_dev->dev,
2004 					 size,
2005 					 paddr);
2006 end:
2007 	dp_info("%s va_unaligned %pK pa_unaligned %pK size %d ring_type %d",
2008 		*is_mem_prealloc ? "pre-alloc" : "dynamic-alloc", vaddr,
2009 		(void *)*paddr, (int)size, ring_type);
2010 
2011 	return vaddr;
2012 }
2013 
2014 void hif_mem_free_consistent_unaligned(struct hif_softc *scn,
2015 				       qdf_size_t size,
2016 				       void *vaddr,
2017 				       qdf_dma_addr_t paddr,
2018 				       qdf_dma_context_t memctx,
2019 				       uint8_t is_mem_prealloc)
2020 {
2021 	struct hif_driver_state_callbacks *cbk =
2022 				hif_get_callbacks_handle(scn);
2023 
2024 	if (is_mem_prealloc) {
2025 		if (cbk && cbk->prealloc_put_consistent_mem_unaligned) {
2026 			cbk->prealloc_put_consistent_mem_unaligned(vaddr);
2027 		} else {
2028 			dp_warn("dp_prealloc_put_consistent_unligned NULL");
2029 			QDF_BUG(0);
2030 		}
2031 	} else {
2032 		qdf_mem_free_consistent(scn->qdf_dev, scn->qdf_dev->dev,
2033 					size, vaddr, paddr, memctx);
2034 	}
2035 }
2036 #endif
2037 
2038 /**
2039  * hif_batch_send() - API to access hif specific function
2040  * ce_batch_send.
2041  * @osc: HIF Context
2042  * @msdu : list of msdus to be sent
2043  * @transfer_id : transfer id
2044  * @len : donwloaded length
2045  *
2046  * Return: list of msds not sent
2047  */
2048 qdf_nbuf_t hif_batch_send(struct hif_opaque_softc *osc, qdf_nbuf_t msdu,
2049 		uint32_t transfer_id, u_int32_t len, uint32_t sendhead)
2050 {
2051 	void *ce_tx_hdl = hif_get_ce_handle(osc, CE_HTT_TX_CE);
2052 
2053 	if (!ce_tx_hdl)
2054 		return NULL;
2055 
2056 	return ce_batch_send((struct CE_handle *)ce_tx_hdl, msdu, transfer_id,
2057 			len, sendhead);
2058 }
2059 qdf_export_symbol(hif_batch_send);
2060 
2061 /**
2062  * hif_update_tx_ring() - API to access hif specific function
2063  * ce_update_tx_ring.
2064  * @osc: HIF Context
2065  * @num_htt_cmpls : number of htt compl received.
2066  *
2067  * Return: void
2068  */
2069 void hif_update_tx_ring(struct hif_opaque_softc *osc, u_int32_t num_htt_cmpls)
2070 {
2071 	void *ce_tx_hdl = hif_get_ce_handle(osc, CE_HTT_TX_CE);
2072 
2073 	ce_update_tx_ring(ce_tx_hdl, num_htt_cmpls);
2074 }
2075 qdf_export_symbol(hif_update_tx_ring);
2076 
2077 
2078 /**
2079  * hif_send_single() - API to access hif specific function
2080  * ce_send_single.
2081  * @osc: HIF Context
2082  * @msdu : msdu to be sent
2083  * @transfer_id: transfer id
2084  * @len : downloaded length
2085  *
2086  * Return: msdu sent status
2087  */
2088 QDF_STATUS hif_send_single(struct hif_opaque_softc *osc, qdf_nbuf_t msdu,
2089 			   uint32_t transfer_id, u_int32_t len)
2090 {
2091 	void *ce_tx_hdl = hif_get_ce_handle(osc, CE_HTT_TX_CE);
2092 
2093 	if (!ce_tx_hdl)
2094 		return QDF_STATUS_E_NULL_VALUE;
2095 
2096 	return ce_send_single((struct CE_handle *)ce_tx_hdl, msdu, transfer_id,
2097 			len);
2098 }
2099 qdf_export_symbol(hif_send_single);
2100 #endif
2101 
2102 /**
2103  * hif_reg_write() - API to access hif specific function
2104  * hif_write32_mb.
2105  * @hif_ctx : HIF Context
2106  * @offset : offset on which value has to be written
2107  * @value : value to be written
2108  *
2109  * Return: None
2110  */
2111 void hif_reg_write(struct hif_opaque_softc *hif_ctx, uint32_t offset,
2112 		uint32_t value)
2113 {
2114 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2115 
2116 	hif_write32_mb(scn, scn->mem + offset, value);
2117 
2118 }
2119 qdf_export_symbol(hif_reg_write);
2120 
2121 /**
2122  * hif_reg_read() - API to access hif specific function
2123  * hif_read32_mb.
2124  * @hif_ctx : HIF Context
2125  * @offset : offset from which value has to be read
2126  *
2127  * Return: Read value
2128  */
2129 uint32_t hif_reg_read(struct hif_opaque_softc *hif_ctx, uint32_t offset)
2130 {
2131 
2132 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2133 
2134 	return hif_read32_mb(scn, scn->mem + offset);
2135 }
2136 qdf_export_symbol(hif_reg_read);
2137 
2138 /**
2139  * hif_ramdump_handler(): generic ramdump handler
2140  * @scn: struct hif_opaque_softc
2141  *
2142  * Return: None
2143  */
2144 void hif_ramdump_handler(struct hif_opaque_softc *scn)
2145 {
2146 	if (hif_get_bus_type(scn) == QDF_BUS_TYPE_USB)
2147 		hif_usb_ramdump_handler(scn);
2148 }
2149 
2150 hif_pm_wake_irq_type hif_pm_get_wake_irq_type(struct hif_opaque_softc *hif_ctx)
2151 {
2152 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2153 
2154 	return scn->wake_irq_type;
2155 }
2156 
2157 irqreturn_t hif_wake_interrupt_handler(int irq, void *context)
2158 {
2159 	struct hif_softc *scn = context;
2160 
2161 	hif_info("wake interrupt received on irq %d", irq);
2162 
2163 	hif_rtpm_set_monitor_wake_intr(0);
2164 	hif_rtpm_request_resume();
2165 
2166 	if (scn->initial_wakeup_cb)
2167 		scn->initial_wakeup_cb(scn->initial_wakeup_priv);
2168 
2169 	if (hif_is_ut_suspended(scn))
2170 		hif_ut_fw_resume(scn);
2171 
2172 	qdf_pm_system_wakeup();
2173 
2174 	return IRQ_HANDLED;
2175 }
2176 
2177 void hif_set_initial_wakeup_cb(struct hif_opaque_softc *hif_ctx,
2178 			       void (*callback)(void *),
2179 			       void *priv)
2180 {
2181 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2182 
2183 	scn->initial_wakeup_cb = callback;
2184 	scn->initial_wakeup_priv = priv;
2185 }
2186 
2187 void hif_set_ce_service_max_yield_time(struct hif_opaque_softc *hif,
2188 				       uint32_t ce_service_max_yield_time)
2189 {
2190 	struct hif_softc *hif_ctx = HIF_GET_SOFTC(hif);
2191 
2192 	hif_ctx->ce_service_max_yield_time =
2193 		ce_service_max_yield_time * 1000;
2194 }
2195 
2196 unsigned long long
2197 hif_get_ce_service_max_yield_time(struct hif_opaque_softc *hif)
2198 {
2199 	struct hif_softc *hif_ctx = HIF_GET_SOFTC(hif);
2200 
2201 	return hif_ctx->ce_service_max_yield_time;
2202 }
2203 
2204 void hif_set_ce_service_max_rx_ind_flush(struct hif_opaque_softc *hif,
2205 				       uint8_t ce_service_max_rx_ind_flush)
2206 {
2207 	struct hif_softc *hif_ctx = HIF_GET_SOFTC(hif);
2208 
2209 	if (ce_service_max_rx_ind_flush == 0 ||
2210 	    ce_service_max_rx_ind_flush > MSG_FLUSH_NUM)
2211 		hif_ctx->ce_service_max_rx_ind_flush = MSG_FLUSH_NUM;
2212 	else
2213 		hif_ctx->ce_service_max_rx_ind_flush =
2214 						ce_service_max_rx_ind_flush;
2215 }
2216 
2217 #ifdef SYSTEM_PM_CHECK
2218 void __hif_system_pm_set_state(struct hif_opaque_softc *hif,
2219 			       enum hif_system_pm_state state)
2220 {
2221 	struct hif_softc *hif_ctx = HIF_GET_SOFTC(hif);
2222 
2223 	qdf_atomic_set(&hif_ctx->sys_pm_state, state);
2224 }
2225 
2226 int32_t hif_system_pm_get_state(struct hif_opaque_softc *hif)
2227 {
2228 	struct hif_softc *hif_ctx = HIF_GET_SOFTC(hif);
2229 
2230 	return qdf_atomic_read(&hif_ctx->sys_pm_state);
2231 }
2232 
2233 int hif_system_pm_state_check(struct hif_opaque_softc *hif)
2234 {
2235 	struct hif_softc *hif_ctx = HIF_GET_SOFTC(hif);
2236 	int32_t sys_pm_state;
2237 
2238 	if (!hif_ctx) {
2239 		hif_err("hif context is null");
2240 		return -EFAULT;
2241 	}
2242 
2243 	sys_pm_state = qdf_atomic_read(&hif_ctx->sys_pm_state);
2244 	if (sys_pm_state == HIF_SYSTEM_PM_STATE_BUS_SUSPENDING ||
2245 	    sys_pm_state == HIF_SYSTEM_PM_STATE_BUS_SUSPENDED) {
2246 		hif_info("Triggering system wakeup");
2247 		qdf_pm_system_wakeup();
2248 		return -EAGAIN;
2249 	}
2250 
2251 	return 0;
2252 }
2253 #endif
2254