xref: /wlan-dirver/qca-wifi-host-cmn/hif/src/hif_main.c (revision 7630cc90f02e8e853426e72adcbd746fb48d2d89)
1 /*
2  * Copyright (c) 2015-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include "targcfg.h"
21 #include "qdf_lock.h"
22 #include "qdf_status.h"
23 #include "qdf_status.h"
24 #include <qdf_atomic.h>         /* qdf_atomic_read */
25 #include <targaddrs.h>
26 #include "hif_io32.h"
27 #include <hif.h>
28 #include <target_type.h>
29 #include "regtable.h"
30 #define ATH_MODULE_NAME hif
31 #include <a_debug.h>
32 #include "hif_main.h"
33 #include "hif_hw_version.h"
34 #if (defined(HIF_PCI) || defined(HIF_SNOC) || defined(HIF_AHB) || \
35      defined(HIF_IPCI))
36 #include "ce_tasklet.h"
37 #include "ce_api.h"
38 #endif
39 #include "qdf_trace.h"
40 #include "qdf_status.h"
41 #include "hif_debug.h"
42 #include "mp_dev.h"
43 #if defined(QCA_WIFI_QCA8074) || defined(QCA_WIFI_QCA6018) || \
44 	defined(QCA_WIFI_QCA5018) || defined(QCA_WIFI_QCA9574)
45 #include "hal_api.h"
46 #endif
47 #include "hif_napi.h"
48 #include "hif_unit_test_suspend_i.h"
49 #include "qdf_module.h"
50 #ifdef HIF_CE_LOG_INFO
51 #include <qdf_notifier.h>
52 #include <qdf_hang_event_notifier.h>
53 #endif
54 #include <linux/cpumask.h>
55 
56 #if defined(HIF_IPCI) && defined(FEATURE_HAL_DELAYED_REG_WRITE)
57 #include <pld_common.h>
58 #endif
59 
60 void hif_dump(struct hif_opaque_softc *hif_ctx, uint8_t cmd_id, bool start)
61 {
62 	hif_trigger_dump(hif_ctx, cmd_id, start);
63 }
64 
65 /**
66  * hif_get_target_id(): hif_get_target_id
67  *
68  * Return the virtual memory base address to the caller
69  *
70  * @scn: hif_softc
71  *
72  * Return: A_target_id_t
73  */
74 A_target_id_t hif_get_target_id(struct hif_softc *scn)
75 {
76 	return scn->mem;
77 }
78 
79 /**
80  * hif_get_targetdef(): hif_get_targetdef
81  * @scn: scn
82  *
83  * Return: void *
84  */
85 void *hif_get_targetdef(struct hif_opaque_softc *hif_ctx)
86 {
87 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
88 
89 	return scn->targetdef;
90 }
91 
92 #ifdef FORCE_WAKE
93 void hif_srng_init_phase(struct hif_opaque_softc *hif_ctx,
94 			 bool init_phase)
95 {
96 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
97 
98 	if (ce_srng_based(scn))
99 		hal_set_init_phase(scn->hal_soc, init_phase);
100 }
101 #endif /* FORCE_WAKE */
102 
103 #ifdef HIF_IPCI
104 void hif_shutdown_notifier_cb(void *hif_ctx)
105 {
106 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
107 
108 	scn->recovery = true;
109 }
110 #endif
111 
112 /**
113  * hif_vote_link_down(): unvote for link up
114  *
115  * Call hif_vote_link_down to release a previous request made using
116  * hif_vote_link_up. A hif_vote_link_down call should only be made
117  * after a corresponding hif_vote_link_up, otherwise you could be
118  * negating a vote from another source. When no votes are present
119  * hif will not guarantee the linkstate after hif_bus_suspend.
120  *
121  * SYNCHRONIZE WITH hif_vote_link_up by only calling in MC thread
122  * and initialization deinitialization sequencences.
123  *
124  * Return: n/a
125  */
126 void hif_vote_link_down(struct hif_opaque_softc *hif_ctx)
127 {
128 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
129 
130 	QDF_BUG(scn);
131 	if (scn->linkstate_vote == 0)
132 		QDF_DEBUG_PANIC("linkstate_vote(%d) has already been 0",
133 				scn->linkstate_vote);
134 
135 	scn->linkstate_vote--;
136 	hif_info("Down_linkstate_vote %d", scn->linkstate_vote);
137 	if (scn->linkstate_vote == 0)
138 		hif_bus_prevent_linkdown(scn, false);
139 }
140 
141 /**
142  * hif_vote_link_up(): vote to prevent bus from suspending
143  *
144  * Makes hif guarantee that fw can message the host normally
145  * durring suspend.
146  *
147  * SYNCHRONIZE WITH hif_vote_link_up by only calling in MC thread
148  * and initialization deinitialization sequencences.
149  *
150  * Return: n/a
151  */
152 void hif_vote_link_up(struct hif_opaque_softc *hif_ctx)
153 {
154 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
155 
156 	QDF_BUG(scn);
157 	scn->linkstate_vote++;
158 	hif_info("Up_linkstate_vote %d", scn->linkstate_vote);
159 	if (scn->linkstate_vote == 1)
160 		hif_bus_prevent_linkdown(scn, true);
161 }
162 
163 /**
164  * hif_can_suspend_link(): query if hif is permitted to suspend the link
165  *
166  * Hif will ensure that the link won't be suspended if the upperlayers
167  * don't want it to.
168  *
169  * SYNCHRONIZATION: MC thread is stopped before bus suspend thus
170  * we don't need extra locking to ensure votes dont change while
171  * we are in the process of suspending or resuming.
172  *
173  * Return: false if hif will guarantee link up durring suspend.
174  */
175 bool hif_can_suspend_link(struct hif_opaque_softc *hif_ctx)
176 {
177 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
178 
179 	QDF_BUG(scn);
180 	return scn->linkstate_vote == 0;
181 }
182 
183 /**
184  * hif_hia_item_address(): hif_hia_item_address
185  * @target_type: target_type
186  * @item_offset: item_offset
187  *
188  * Return: n/a
189  */
190 uint32_t hif_hia_item_address(uint32_t target_type, uint32_t item_offset)
191 {
192 	switch (target_type) {
193 	case TARGET_TYPE_AR6002:
194 		return AR6002_HOST_INTEREST_ADDRESS + item_offset;
195 	case TARGET_TYPE_AR6003:
196 		return AR6003_HOST_INTEREST_ADDRESS + item_offset;
197 	case TARGET_TYPE_AR6004:
198 		return AR6004_HOST_INTEREST_ADDRESS + item_offset;
199 	case TARGET_TYPE_AR6006:
200 		return AR6006_HOST_INTEREST_ADDRESS + item_offset;
201 	case TARGET_TYPE_AR9888:
202 		return AR9888_HOST_INTEREST_ADDRESS + item_offset;
203 	case TARGET_TYPE_AR6320:
204 	case TARGET_TYPE_AR6320V2:
205 		return AR6320_HOST_INTEREST_ADDRESS + item_offset;
206 	case TARGET_TYPE_ADRASTEA:
207 		/* ADRASTEA doesn't have a host interest address */
208 		ASSERT(0);
209 		return 0;
210 	case TARGET_TYPE_AR900B:
211 		return AR900B_HOST_INTEREST_ADDRESS + item_offset;
212 	case TARGET_TYPE_QCA9984:
213 		return QCA9984_HOST_INTEREST_ADDRESS + item_offset;
214 	case TARGET_TYPE_QCA9888:
215 		return QCA9888_HOST_INTEREST_ADDRESS + item_offset;
216 
217 	default:
218 		ASSERT(0);
219 		return 0;
220 	}
221 }
222 
223 /**
224  * hif_max_num_receives_reached() - check max receive is reached
225  * @scn: HIF Context
226  * @count: unsigned int.
227  *
228  * Output check status as bool
229  *
230  * Return: bool
231  */
232 bool hif_max_num_receives_reached(struct hif_softc *scn, unsigned int count)
233 {
234 	if (QDF_IS_EPPING_ENABLED(hif_get_conparam(scn)))
235 		return count > 120;
236 	else
237 		return count > MAX_NUM_OF_RECEIVES;
238 }
239 
240 /**
241  * init_buffer_count() - initial buffer count
242  * @maxSize: qdf_size_t
243  *
244  * routine to modify the initial buffer count to be allocated on an os
245  * platform basis. Platform owner will need to modify this as needed
246  *
247  * Return: qdf_size_t
248  */
249 qdf_size_t init_buffer_count(qdf_size_t maxSize)
250 {
251 	return maxSize;
252 }
253 
254 /**
255  * hif_save_htc_htt_config_endpoint() - save htt_tx_endpoint
256  * @hif_ctx: hif context
257  * @htc_htt_tx_endpoint: htt_tx_endpoint
258  *
259  * Return: void
260  */
261 void hif_save_htc_htt_config_endpoint(struct hif_opaque_softc *hif_ctx,
262 							int htc_htt_tx_endpoint)
263 {
264 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
265 
266 	if (!scn) {
267 		hif_err("scn or scn->hif_sc is NULL!");
268 		return;
269 	}
270 
271 	scn->htc_htt_tx_endpoint = htc_htt_tx_endpoint;
272 }
273 qdf_export_symbol(hif_save_htc_htt_config_endpoint);
274 
275 static const struct qwlan_hw qwlan_hw_list[] = {
276 	{
277 		.id = AR6320_REV1_VERSION,
278 		.subid = 0,
279 		.name = "QCA6174_REV1",
280 	},
281 	{
282 		.id = AR6320_REV1_1_VERSION,
283 		.subid = 0x1,
284 		.name = "QCA6174_REV1_1",
285 	},
286 	{
287 		.id = AR6320_REV1_3_VERSION,
288 		.subid = 0x2,
289 		.name = "QCA6174_REV1_3",
290 	},
291 	{
292 		.id = AR6320_REV2_1_VERSION,
293 		.subid = 0x4,
294 		.name = "QCA6174_REV2_1",
295 	},
296 	{
297 		.id = AR6320_REV2_1_VERSION,
298 		.subid = 0x5,
299 		.name = "QCA6174_REV2_2",
300 	},
301 	{
302 		.id = AR6320_REV3_VERSION,
303 		.subid = 0x6,
304 		.name = "QCA6174_REV2.3",
305 	},
306 	{
307 		.id = AR6320_REV3_VERSION,
308 		.subid = 0x8,
309 		.name = "QCA6174_REV3",
310 	},
311 	{
312 		.id = AR6320_REV3_VERSION,
313 		.subid = 0x9,
314 		.name = "QCA6174_REV3_1",
315 	},
316 	{
317 		.id = AR6320_REV3_2_VERSION,
318 		.subid = 0xA,
319 		.name = "AR6320_REV3_2_VERSION",
320 	},
321 	{
322 		.id = QCA6390_V1,
323 		.subid = 0x0,
324 		.name = "QCA6390_V1",
325 	},
326 	{
327 		.id = QCA6490_V1,
328 		.subid = 0x0,
329 		.name = "QCA6490_V1",
330 	},
331 	{
332 		.id = WCN3990_v1,
333 		.subid = 0x0,
334 		.name = "WCN3990_V1",
335 	},
336 	{
337 		.id = WCN3990_v2,
338 		.subid = 0x0,
339 		.name = "WCN3990_V2",
340 	},
341 	{
342 		.id = WCN3990_v2_1,
343 		.subid = 0x0,
344 		.name = "WCN3990_V2.1",
345 	},
346 	{
347 		.id = WCN3998,
348 		.subid = 0x0,
349 		.name = "WCN3998",
350 	},
351 	{
352 		.id = QCA9379_REV1_VERSION,
353 		.subid = 0xC,
354 		.name = "QCA9379_REV1",
355 	},
356 	{
357 		.id = QCA9379_REV1_VERSION,
358 		.subid = 0xD,
359 		.name = "QCA9379_REV1_1",
360 	},
361 	{
362 		.id = KIWI_V1,
363 		.subid = 0xE,
364 		.name = "KIWI_V1",
365 	},
366 	{
367 		.id = MANGO_V1,
368 		.subid = 0xF,
369 		.name = "MANGO_V1",
370 	}
371 };
372 
373 /**
374  * hif_get_hw_name(): get a human readable name for the hardware
375  * @info: Target Info
376  *
377  * Return: human readable name for the underlying wifi hardware.
378  */
379 static const char *hif_get_hw_name(struct hif_target_info *info)
380 {
381 	int i;
382 
383 	if (info->hw_name)
384 		return info->hw_name;
385 
386 	for (i = 0; i < ARRAY_SIZE(qwlan_hw_list); i++) {
387 		if (info->target_version == qwlan_hw_list[i].id &&
388 		    info->target_revision == qwlan_hw_list[i].subid) {
389 			return qwlan_hw_list[i].name;
390 		}
391 	}
392 
393 	info->hw_name = qdf_mem_malloc(64);
394 	if (!info->hw_name)
395 		return "Unknown Device (nomem)";
396 
397 	i = qdf_snprint(info->hw_name, 64, "HW_VERSION=%x.",
398 			info->target_version);
399 	if (i < 0)
400 		return "Unknown Device (snprintf failure)";
401 	else
402 		return info->hw_name;
403 }
404 
405 /**
406  * hif_get_hw_info(): hif_get_hw_info
407  * @scn: scn
408  * @version: version
409  * @revision: revision
410  *
411  * Return: n/a
412  */
413 void hif_get_hw_info(struct hif_opaque_softc *scn, u32 *version, u32 *revision,
414 			const char **target_name)
415 {
416 	struct hif_target_info *info = hif_get_target_info_handle(scn);
417 	struct hif_softc *sc = HIF_GET_SOFTC(scn);
418 
419 	if (sc->bus_type == QDF_BUS_TYPE_USB)
420 		hif_usb_get_hw_info(sc);
421 
422 	*version = info->target_version;
423 	*revision = info->target_revision;
424 	*target_name = hif_get_hw_name(info);
425 }
426 
427 /**
428  * hif_get_dev_ba(): API to get device base address.
429  * @scn: scn
430  * @version: version
431  * @revision: revision
432  *
433  * Return: n/a
434  */
435 void *hif_get_dev_ba(struct hif_opaque_softc *hif_handle)
436 {
437 	struct hif_softc *scn = (struct hif_softc *)hif_handle;
438 
439 	return scn->mem;
440 }
441 qdf_export_symbol(hif_get_dev_ba);
442 
443 /**
444  * hif_get_dev_ba_ce(): API to get device ce base address.
445  * @scn: scn
446  *
447  * Return: dev mem base address for CE
448  */
449 void *hif_get_dev_ba_ce(struct hif_opaque_softc *hif_handle)
450 {
451 	struct hif_softc *scn = (struct hif_softc *)hif_handle;
452 
453 	return scn->mem_ce;
454 }
455 
456 qdf_export_symbol(hif_get_dev_ba_ce);
457 
458 uint32_t hif_get_soc_version(struct hif_opaque_softc *hif_handle)
459 {
460 	struct hif_softc *scn = (struct hif_softc *)hif_handle;
461 
462 	return scn->target_info.soc_version;
463 }
464 
465 qdf_export_symbol(hif_get_soc_version);
466 
467 #ifdef FEATURE_RUNTIME_PM
468 void hif_runtime_prevent_linkdown(struct hif_softc *scn, bool is_get)
469 {
470 	if (is_get)
471 		qdf_runtime_pm_prevent_suspend(&scn->prevent_linkdown_lock);
472 	else
473 		qdf_runtime_pm_allow_suspend(&scn->prevent_linkdown_lock);
474 }
475 
476 static inline
477 void hif_rtpm_lock_init(struct hif_softc *scn)
478 {
479 	qdf_runtime_lock_init(&scn->prevent_linkdown_lock);
480 }
481 
482 static inline
483 void hif_rtpm_lock_deinit(struct hif_softc *scn)
484 {
485 	qdf_runtime_lock_deinit(&scn->prevent_linkdown_lock);
486 }
487 #else
488 static inline
489 void hif_rtpm_lock_init(struct hif_softc *scn)
490 {
491 }
492 
493 static inline
494 void hif_rtpm_lock_deinit(struct hif_softc *scn)
495 {
496 }
497 #endif
498 
499 #ifdef WLAN_CE_INTERRUPT_THRESHOLD_CONFIG
500 /**
501  * hif_get_cfg_from_psoc() - Retrieve ini cfg from psoc
502  * @scn: hif context
503  * @psoc: psoc objmgr handle
504  *
505  * Return: None
506  */
507 static inline
508 void hif_get_cfg_from_psoc(struct hif_softc *scn,
509 			   struct wlan_objmgr_psoc *psoc)
510 {
511 	if (psoc) {
512 		scn->ini_cfg.ce_status_ring_timer_threshold =
513 			cfg_get(psoc,
514 				CFG_CE_STATUS_RING_TIMER_THRESHOLD);
515 		scn->ini_cfg.ce_status_ring_batch_count_threshold =
516 			cfg_get(psoc,
517 				CFG_CE_STATUS_RING_BATCH_COUNT_THRESHOLD);
518 	}
519 }
520 #else
521 static inline
522 void hif_get_cfg_from_psoc(struct hif_softc *scn,
523 			   struct wlan_objmgr_psoc *psoc)
524 {
525 }
526 #endif /* WLAN_CE_INTERRUPT_THRESHOLD_CONFIG */
527 
528 #if defined(HIF_CE_LOG_INFO) || defined(HIF_BUS_LOG_INFO)
529 /**
530  * hif_recovery_notifier_cb - Recovery notifier callback to log
531  *  hang event data
532  * @block: notifier block
533  * @state: state
534  * @data: notifier data
535  *
536  * Return: status
537  */
538 static
539 int hif_recovery_notifier_cb(struct notifier_block *block, unsigned long state,
540 			     void *data)
541 {
542 	struct qdf_notifer_data *notif_data = data;
543 	qdf_notif_block *notif_block;
544 	struct hif_softc *hif_handle;
545 	bool bus_id_invalid;
546 
547 	if (!data || !block)
548 		return -EINVAL;
549 
550 	notif_block = qdf_container_of(block, qdf_notif_block, notif_block);
551 
552 	hif_handle = notif_block->priv_data;
553 	if (!hif_handle)
554 		return -EINVAL;
555 
556 	bus_id_invalid = hif_log_bus_info(hif_handle, notif_data->hang_data,
557 					  &notif_data->offset);
558 	if (bus_id_invalid)
559 		return NOTIFY_STOP_MASK;
560 
561 	hif_log_ce_info(hif_handle, notif_data->hang_data,
562 			&notif_data->offset);
563 
564 	return 0;
565 }
566 
567 /**
568  * hif_register_recovery_notifier - Register hif recovery notifier
569  * @hif_handle: hif handle
570  *
571  * Return: status
572  */
573 static
574 QDF_STATUS hif_register_recovery_notifier(struct hif_softc *hif_handle)
575 {
576 	qdf_notif_block *hif_notifier;
577 
578 	if (!hif_handle)
579 		return QDF_STATUS_E_FAILURE;
580 
581 	hif_notifier = &hif_handle->hif_recovery_notifier;
582 
583 	hif_notifier->notif_block.notifier_call = hif_recovery_notifier_cb;
584 	hif_notifier->priv_data = hif_handle;
585 	return qdf_hang_event_register_notifier(hif_notifier);
586 }
587 
588 /**
589  * hif_unregister_recovery_notifier - Un-register hif recovery notifier
590  * @hif_handle: hif handle
591  *
592  * Return: status
593  */
594 static
595 QDF_STATUS hif_unregister_recovery_notifier(struct hif_softc *hif_handle)
596 {
597 	qdf_notif_block *hif_notifier = &hif_handle->hif_recovery_notifier;
598 
599 	return qdf_hang_event_unregister_notifier(hif_notifier);
600 }
601 #else
602 static inline
603 QDF_STATUS hif_register_recovery_notifier(struct hif_softc *hif_handle)
604 {
605 	return QDF_STATUS_SUCCESS;
606 }
607 
608 static inline
609 QDF_STATUS hif_unregister_recovery_notifier(struct hif_softc *hif_handle)
610 {
611 	return QDF_STATUS_SUCCESS;
612 }
613 #endif
614 
615 #ifdef HIF_CPU_PERF_AFFINE_MASK
616 /**
617  * __hif_cpu_hotplug_notify() - CPU hotplug event handler
618  * @cpu: CPU Id of the CPU generating the event
619  * @cpu_up: true if the CPU is online
620  *
621  * Return: None
622  */
623 static void __hif_cpu_hotplug_notify(void *context,
624 				     uint32_t cpu, bool cpu_up)
625 {
626 	struct hif_softc *scn = context;
627 
628 	if (!scn)
629 		return;
630 	if (hif_is_driver_unloading(scn) || hif_is_recovery_in_progress(scn))
631 		return;
632 
633 	if (cpu_up) {
634 		hif_config_irq_set_perf_affinity_hint(GET_HIF_OPAQUE_HDL(scn));
635 		hif_debug("Setting affinity for online CPU: %d", cpu);
636 	} else {
637 		hif_debug("Skip setting affinity for offline CPU: %d", cpu);
638 	}
639 }
640 
641 /**
642  * hif_cpu_hotplug_notify - cpu core up/down notification
643  * handler
644  * @cpu: CPU generating the event
645  * @cpu_up: true if the CPU is online
646  *
647  * Return: None
648  */
649 static void hif_cpu_hotplug_notify(void *context, uint32_t cpu, bool cpu_up)
650 {
651 	struct qdf_op_sync *op_sync;
652 
653 	if (qdf_op_protect(&op_sync))
654 		return;
655 
656 	__hif_cpu_hotplug_notify(context, cpu, cpu_up);
657 
658 	qdf_op_unprotect(op_sync);
659 }
660 
661 static void hif_cpu_online_cb(void *context, uint32_t cpu)
662 {
663 	hif_cpu_hotplug_notify(context, cpu, true);
664 }
665 
666 static void hif_cpu_before_offline_cb(void *context, uint32_t cpu)
667 {
668 	hif_cpu_hotplug_notify(context, cpu, false);
669 }
670 
671 static void hif_cpuhp_register(struct hif_softc *scn)
672 {
673 	if (!scn) {
674 		hif_info_high("cannot register hotplug notifiers");
675 		return;
676 	}
677 	qdf_cpuhp_register(&scn->cpuhp_event_handle,
678 			   scn,
679 			   hif_cpu_online_cb,
680 			   hif_cpu_before_offline_cb);
681 }
682 
683 static void hif_cpuhp_unregister(struct hif_softc *scn)
684 {
685 	if (!scn) {
686 		hif_info_high("cannot unregister hotplug notifiers");
687 		return;
688 	}
689 	qdf_cpuhp_unregister(&scn->cpuhp_event_handle);
690 }
691 
692 #else
693 static void hif_cpuhp_register(struct hif_softc *scn)
694 {
695 }
696 
697 static void hif_cpuhp_unregister(struct hif_softc *scn)
698 {
699 }
700 #endif /* ifdef HIF_CPU_PERF_AFFINE_MASK */
701 
702 #ifdef HIF_DETECTION_LATENCY_ENABLE
703 
704 void hif_tasklet_latency(struct hif_softc *scn, bool from_timer)
705 {
706 	qdf_time_t ce2_tasklet_sched_time =
707 		scn->latency_detect.ce2_tasklet_sched_time;
708 	qdf_time_t ce2_tasklet_exec_time =
709 		scn->latency_detect.ce2_tasklet_exec_time;
710 	qdf_time_t curr_jiffies = qdf_system_ticks();
711 	uint32_t detect_latency_threshold =
712 		scn->latency_detect.detect_latency_threshold;
713 	int cpu_id = qdf_get_cpu();
714 
715 	/* 2 kinds of check here.
716 	 * from_timer==true:  check if tasklet stall
717 	 * from_timer==false: check tasklet execute comes late
718 	 */
719 
720 	if ((from_timer ?
721 	    qdf_system_time_after(ce2_tasklet_sched_time,
722 				  ce2_tasklet_exec_time) :
723 	    qdf_system_time_after(ce2_tasklet_exec_time,
724 				  ce2_tasklet_sched_time)) &&
725 	    qdf_system_time_after(
726 		curr_jiffies,
727 		ce2_tasklet_sched_time +
728 		qdf_system_msecs_to_ticks(detect_latency_threshold))) {
729 		hif_err("tasklet ce2 latency: from_timer %d, curr_jiffies %lu, ce2_tasklet_sched_time %lu,ce2_tasklet_exec_time %lu, detect_latency_threshold %ums detect_latency_timer_timeout %ums, cpu_id %d, called: %ps",
730 			from_timer, curr_jiffies, ce2_tasklet_sched_time,
731 			ce2_tasklet_exec_time, detect_latency_threshold,
732 			scn->latency_detect.detect_latency_timer_timeout,
733 			cpu_id, (void *)_RET_IP_);
734 		goto latency;
735 	}
736 	return;
737 
738 latency:
739 	qdf_trigger_self_recovery(NULL, QDF_TASKLET_CREDIT_LATENCY_DETECT);
740 }
741 
742 void hif_credit_latency(struct hif_softc *scn, bool from_timer)
743 {
744 	qdf_time_t credit_request_time =
745 		scn->latency_detect.credit_request_time;
746 	qdf_time_t credit_report_time =
747 		scn->latency_detect.credit_report_time;
748 	qdf_time_t curr_jiffies = qdf_system_ticks();
749 	uint32_t detect_latency_threshold =
750 		scn->latency_detect.detect_latency_threshold;
751 	int cpu_id = qdf_get_cpu();
752 
753 	/* 2 kinds of check here.
754 	 * from_timer==true:  check if credit report stall
755 	 * from_timer==false: check credit report comes late
756 	 */
757 
758 	if ((from_timer ?
759 	    qdf_system_time_after(credit_request_time,
760 				  credit_report_time) :
761 	    qdf_system_time_after(credit_report_time,
762 				  credit_request_time)) &&
763 	    qdf_system_time_after(
764 		curr_jiffies,
765 		credit_request_time +
766 		qdf_system_msecs_to_ticks(detect_latency_threshold))) {
767 		hif_err("credit report latency: from timer %d, curr_jiffies %lu, credit_request_time %lu,credit_report_time %lu, detect_latency_threshold %ums, detect_latency_timer_timeout %ums, cpu_id %d, called: %ps",
768 			from_timer, curr_jiffies, credit_request_time,
769 			credit_report_time, detect_latency_threshold,
770 			scn->latency_detect.detect_latency_timer_timeout,
771 			cpu_id, (void *)_RET_IP_);
772 		goto latency;
773 	}
774 	return;
775 
776 latency:
777 	qdf_trigger_self_recovery(NULL, QDF_TASKLET_CREDIT_LATENCY_DETECT);
778 }
779 
780 /**
781  * hif_check_detection_latency(): to check if latency for tasklet/credit
782  *
783  * @scn: hif context
784  * @from_timer: if called from timer handler
785  * @bitmap_type: indicate if check tasklet or credit
786  *
787  * Return: none
788  */
789 void hif_check_detection_latency(struct hif_softc *scn,
790 				 bool from_timer,
791 				 uint32_t bitmap_type)
792 {
793 	if (QDF_GLOBAL_MISSION_MODE != hif_get_conparam(scn))
794 		return;
795 
796 	if (!scn->latency_detect.enable_detection)
797 		return;
798 
799 	if (bitmap_type & BIT(HIF_DETECT_TASKLET))
800 		hif_tasklet_latency(scn, from_timer);
801 
802 	if (bitmap_type & BIT(HIF_DETECT_CREDIT))
803 		hif_credit_latency(scn, from_timer);
804 }
805 
806 static void hif_latency_detect_timeout_handler(void *arg)
807 {
808 	struct hif_softc *scn = (struct hif_softc *)arg;
809 	int next_cpu;
810 
811 	hif_check_detection_latency(scn, true,
812 				    BIT(HIF_DETECT_TASKLET) |
813 				    BIT(HIF_DETECT_CREDIT));
814 
815 	/* it need to make sure timer start on a differnt cpu,
816 	 * so it can detect the tasklet schedule stall, but there
817 	 * is still chance that, after timer has been started, then
818 	 * irq/tasklet happens on the same cpu, then tasklet will
819 	 * execute before softirq timer, if this tasklet stall, the
820 	 * timer can't detect it, we can accept this as a limition,
821 	 * if tasklet stall, anyway other place will detect it, just
822 	 * a little later.
823 	 */
824 	next_cpu = cpumask_any_but(
825 			cpu_active_mask,
826 			scn->latency_detect.ce2_tasklet_sched_cpuid);
827 
828 	if (qdf_unlikely(next_cpu >= nr_cpu_ids)) {
829 		hif_debug("start timer on local");
830 		/* it doesn't found a available cpu, start on local cpu*/
831 		qdf_timer_mod(
832 			&scn->latency_detect.detect_latency_timer,
833 			scn->latency_detect.detect_latency_timer_timeout);
834 	} else {
835 		qdf_timer_start_on(
836 			&scn->latency_detect.detect_latency_timer,
837 			scn->latency_detect.detect_latency_timer_timeout,
838 			next_cpu);
839 	}
840 }
841 
842 static void hif_latency_detect_timer_init(struct hif_softc *scn)
843 {
844 	if (!scn) {
845 		hif_info_high("scn is null");
846 		return;
847 	}
848 
849 	if (QDF_GLOBAL_MISSION_MODE != hif_get_conparam(scn))
850 		return;
851 
852 	scn->latency_detect.detect_latency_timer_timeout =
853 		DETECTION_TIMER_TIMEOUT;
854 	scn->latency_detect.detect_latency_threshold =
855 		DETECTION_LATENCY_THRESHOLD;
856 
857 	hif_info("timer timeout %u, latency threshold %u",
858 		 scn->latency_detect.detect_latency_timer_timeout,
859 		 scn->latency_detect.detect_latency_threshold);
860 
861 	scn->latency_detect.is_timer_started = false;
862 
863 	qdf_timer_init(NULL,
864 		       &scn->latency_detect.detect_latency_timer,
865 		       &hif_latency_detect_timeout_handler,
866 		       scn,
867 		       QDF_TIMER_TYPE_SW_SPIN);
868 }
869 
870 static void hif_latency_detect_timer_deinit(struct hif_softc *scn)
871 {
872 	if (QDF_GLOBAL_MISSION_MODE != hif_get_conparam(scn))
873 		return;
874 
875 	hif_info("deinit timer");
876 	qdf_timer_free(&scn->latency_detect.detect_latency_timer);
877 }
878 
879 void hif_latency_detect_timer_start(struct hif_opaque_softc *hif_ctx)
880 {
881 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
882 
883 	if (QDF_GLOBAL_MISSION_MODE != hif_get_conparam(scn))
884 		return;
885 
886 	hif_debug_rl("start timer");
887 	if (scn->latency_detect.is_timer_started) {
888 		hif_info("timer has been started");
889 		return;
890 	}
891 
892 	qdf_timer_start(&scn->latency_detect.detect_latency_timer,
893 			scn->latency_detect.detect_latency_timer_timeout);
894 	scn->latency_detect.is_timer_started = true;
895 }
896 
897 void hif_latency_detect_timer_stop(struct hif_opaque_softc *hif_ctx)
898 {
899 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
900 
901 	if (QDF_GLOBAL_MISSION_MODE != hif_get_conparam(scn))
902 		return;
903 
904 	hif_debug_rl("stop timer");
905 
906 	qdf_timer_sync_cancel(&scn->latency_detect.detect_latency_timer);
907 	scn->latency_detect.is_timer_started = false;
908 }
909 
910 void hif_latency_detect_credit_record_time(
911 	enum hif_credit_exchange_type type,
912 	struct hif_opaque_softc *hif_ctx)
913 {
914 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
915 
916 	if (!scn) {
917 		hif_err("Could not do runtime put, scn is null");
918 		return;
919 	}
920 
921 	if (QDF_GLOBAL_MISSION_MODE != hif_get_conparam(scn))
922 		return;
923 
924 	if (HIF_REQUEST_CREDIT == type)
925 		scn->latency_detect.credit_request_time = qdf_system_ticks();
926 	else if (HIF_PROCESS_CREDIT_REPORT == type)
927 		scn->latency_detect.credit_report_time = qdf_system_ticks();
928 
929 	hif_check_detection_latency(scn, false, BIT(HIF_DETECT_CREDIT));
930 }
931 
932 void hif_set_enable_detection(struct hif_opaque_softc *hif_ctx, bool value)
933 {
934 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
935 
936 	if (!scn) {
937 		hif_err("Could not do runtime put, scn is null");
938 		return;
939 	}
940 
941 	if (QDF_GLOBAL_MISSION_MODE != hif_get_conparam(scn))
942 		return;
943 
944 	scn->latency_detect.enable_detection = value;
945 }
946 #else
947 static void hif_latency_detect_timer_init(struct hif_softc *scn)
948 {}
949 
950 static void hif_latency_detect_timer_deinit(struct hif_softc *scn)
951 {}
952 #endif
953 struct hif_opaque_softc *hif_open(qdf_device_t qdf_ctx,
954 				  uint32_t mode,
955 				  enum qdf_bus_type bus_type,
956 				  struct hif_driver_state_callbacks *cbk,
957 				  struct wlan_objmgr_psoc *psoc)
958 {
959 	struct hif_softc *scn;
960 	QDF_STATUS status = QDF_STATUS_SUCCESS;
961 	int bus_context_size = hif_bus_get_context_size(bus_type);
962 
963 	if (bus_context_size == 0) {
964 		hif_err("context size 0 not allowed");
965 		return NULL;
966 	}
967 
968 	scn = (struct hif_softc *)qdf_mem_malloc(bus_context_size);
969 	if (!scn)
970 		return GET_HIF_OPAQUE_HDL(scn);
971 
972 	scn->qdf_dev = qdf_ctx;
973 	scn->hif_con_param = mode;
974 	qdf_atomic_init(&scn->active_tasklet_cnt);
975 
976 	qdf_atomic_init(&scn->active_grp_tasklet_cnt);
977 	qdf_atomic_init(&scn->link_suspended);
978 	qdf_atomic_init(&scn->tasklet_from_intr);
979 	hif_system_pm_set_state_on(GET_HIF_OPAQUE_HDL(scn));
980 	qdf_mem_copy(&scn->callbacks, cbk,
981 		     sizeof(struct hif_driver_state_callbacks));
982 	scn->bus_type  = bus_type;
983 
984 	hif_allow_ep_vote_access(GET_HIF_OPAQUE_HDL(scn));
985 	hif_get_cfg_from_psoc(scn, psoc);
986 
987 	hif_set_event_hist_mask(GET_HIF_OPAQUE_HDL(scn));
988 	status = hif_bus_open(scn, bus_type);
989 	if (status != QDF_STATUS_SUCCESS) {
990 		hif_err("hif_bus_open error = %d, bus_type = %d",
991 			status, bus_type);
992 		qdf_mem_free(scn);
993 		scn = NULL;
994 		goto out;
995 	}
996 
997 	hif_rtpm_lock_init(scn);
998 
999 	hif_cpuhp_register(scn);
1000 	hif_latency_detect_timer_init(scn);
1001 
1002 out:
1003 	return GET_HIF_OPAQUE_HDL(scn);
1004 }
1005 
1006 #ifdef ADRASTEA_RRI_ON_DDR
1007 /**
1008  * hif_uninit_rri_on_ddr(): free consistent memory allocated for rri
1009  * @scn: hif context
1010  *
1011  * Return: none
1012  */
1013 void hif_uninit_rri_on_ddr(struct hif_softc *scn)
1014 {
1015 	if (scn->vaddr_rri_on_ddr)
1016 		qdf_mem_free_consistent(scn->qdf_dev, scn->qdf_dev->dev,
1017 					(CE_COUNT * sizeof(uint32_t)),
1018 					scn->vaddr_rri_on_ddr,
1019 					scn->paddr_rri_on_ddr, 0);
1020 	scn->vaddr_rri_on_ddr = NULL;
1021 }
1022 #endif
1023 
1024 /**
1025  * hif_close(): hif_close
1026  * @hif_ctx: hif_ctx
1027  *
1028  * Return: n/a
1029  */
1030 void hif_close(struct hif_opaque_softc *hif_ctx)
1031 {
1032 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1033 
1034 	if (!scn) {
1035 		hif_err("hif_opaque_softc is NULL");
1036 		return;
1037 	}
1038 
1039 	hif_latency_detect_timer_deinit(scn);
1040 
1041 	if (scn->athdiag_procfs_inited) {
1042 		athdiag_procfs_remove();
1043 		scn->athdiag_procfs_inited = false;
1044 	}
1045 
1046 	if (scn->target_info.hw_name) {
1047 		char *hw_name = scn->target_info.hw_name;
1048 
1049 		scn->target_info.hw_name = "ErrUnloading";
1050 		qdf_mem_free(hw_name);
1051 	}
1052 
1053 	hif_uninit_rri_on_ddr(scn);
1054 	hif_cleanup_static_buf_to_target(scn);
1055 	hif_cpuhp_unregister(scn);
1056 	hif_rtpm_lock_deinit(scn);
1057 
1058 	hif_bus_close(scn);
1059 
1060 	qdf_mem_free(scn);
1061 }
1062 
1063 /**
1064  * hif_get_num_active_grp_tasklets() - get the number of active
1065  *		datapath group tasklets pending to be completed.
1066  * @scn: HIF context
1067  *
1068  * Returns: the number of datapath group tasklets which are active
1069  */
1070 static inline int hif_get_num_active_grp_tasklets(struct hif_softc *scn)
1071 {
1072 	return qdf_atomic_read(&scn->active_grp_tasklet_cnt);
1073 }
1074 
1075 #if (defined(QCA_WIFI_QCA8074) || defined(QCA_WIFI_QCA6018) || \
1076 	defined(QCA_WIFI_QCA6290) || defined(QCA_WIFI_QCA6390) || \
1077 	defined(QCA_WIFI_QCN9000) || defined(QCA_WIFI_QCA6490) || \
1078 	defined(QCA_WIFI_QCA6750) || defined(QCA_WIFI_QCA5018) || \
1079 	defined(QCA_WIFI_KIWI) || defined(QCA_WIFI_QCN9224) || \
1080 	defined(QCA_WIFI_QCA9574))
1081 /**
1082  * hif_get_num_pending_work() - get the number of entries in
1083  *		the workqueue pending to be completed.
1084  * @scn: HIF context
1085  *
1086  * Returns: the number of tasklets which are active
1087  */
1088 static inline int hif_get_num_pending_work(struct hif_softc *scn)
1089 {
1090 	return hal_get_reg_write_pending_work(scn->hal_soc);
1091 }
1092 #else
1093 
1094 static inline int hif_get_num_pending_work(struct hif_softc *scn)
1095 {
1096 	return 0;
1097 }
1098 #endif
1099 
1100 QDF_STATUS hif_try_complete_tasks(struct hif_softc *scn)
1101 {
1102 	uint32_t task_drain_wait_cnt = 0;
1103 	int tasklet = 0, grp_tasklet = 0, work = 0;
1104 
1105 	while ((tasklet = hif_get_num_active_tasklets(scn)) ||
1106 	       (grp_tasklet = hif_get_num_active_grp_tasklets(scn)) ||
1107 	       (work = hif_get_num_pending_work(scn))) {
1108 		if (++task_drain_wait_cnt > HIF_TASK_DRAIN_WAIT_CNT) {
1109 			hif_err("pending tasklets %d grp tasklets %d work %d",
1110 				tasklet, grp_tasklet, work);
1111 			return QDF_STATUS_E_FAULT;
1112 		}
1113 		hif_info("waiting for tasklets %d grp tasklets %d work %d",
1114 			 tasklet, grp_tasklet, work);
1115 		msleep(10);
1116 	}
1117 
1118 	return QDF_STATUS_SUCCESS;
1119 }
1120 
1121 #if defined(HIF_IPCI) && defined(FEATURE_HAL_DELAYED_REG_WRITE)
1122 QDF_STATUS hif_try_prevent_ep_vote_access(struct hif_opaque_softc *hif_ctx)
1123 {
1124 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1125 	uint32_t work_drain_wait_cnt = 0;
1126 	uint32_t wait_cnt = 0;
1127 	int work = 0;
1128 
1129 	qdf_atomic_set(&scn->dp_ep_vote_access,
1130 		       HIF_EP_VOTE_ACCESS_DISABLE);
1131 	qdf_atomic_set(&scn->ep_vote_access,
1132 		       HIF_EP_VOTE_ACCESS_DISABLE);
1133 
1134 	while ((work = hif_get_num_pending_work(scn))) {
1135 		if (++work_drain_wait_cnt > HIF_WORK_DRAIN_WAIT_CNT) {
1136 			qdf_atomic_set(&scn->dp_ep_vote_access,
1137 				       HIF_EP_VOTE_ACCESS_ENABLE);
1138 			qdf_atomic_set(&scn->ep_vote_access,
1139 				       HIF_EP_VOTE_ACCESS_ENABLE);
1140 			hif_err("timeout wait for pending work %d ", work);
1141 			return QDF_STATUS_E_FAULT;
1142 		}
1143 		qdf_sleep(10);
1144 	}
1145 
1146 	if (pld_is_pci_ep_awake(scn->qdf_dev->dev) == -ENOTSUPP)
1147 	return QDF_STATUS_SUCCESS;
1148 
1149 	while (pld_is_pci_ep_awake(scn->qdf_dev->dev)) {
1150 		if (++wait_cnt > HIF_EP_WAKE_RESET_WAIT_CNT) {
1151 			hif_err("Release EP vote is not proceed by Fw");
1152 			return QDF_STATUS_E_FAULT;
1153 		}
1154 		qdf_sleep(5);
1155 	}
1156 
1157 	return QDF_STATUS_SUCCESS;
1158 }
1159 
1160 void hif_set_ep_intermediate_vote_access(struct hif_opaque_softc *hif_ctx)
1161 {
1162 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1163 	uint8_t vote_access;
1164 
1165 	vote_access = qdf_atomic_read(&scn->ep_vote_access);
1166 
1167 	if (vote_access != HIF_EP_VOTE_ACCESS_DISABLE)
1168 		hif_info("EP vote changed from:%u to intermediate state",
1169 			 vote_access);
1170 
1171 	if (QDF_IS_STATUS_ERROR(hif_try_prevent_ep_vote_access(hif_ctx)))
1172 		QDF_BUG(0);
1173 
1174 	qdf_atomic_set(&scn->ep_vote_access,
1175 		       HIF_EP_VOTE_INTERMEDIATE_ACCESS);
1176 }
1177 
1178 void hif_allow_ep_vote_access(struct hif_opaque_softc *hif_ctx)
1179 {
1180 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1181 
1182 	qdf_atomic_set(&scn->dp_ep_vote_access,
1183 		       HIF_EP_VOTE_ACCESS_ENABLE);
1184 	qdf_atomic_set(&scn->ep_vote_access,
1185 		       HIF_EP_VOTE_ACCESS_ENABLE);
1186 }
1187 
1188 void hif_set_ep_vote_access(struct hif_opaque_softc *hif_ctx,
1189 			    uint8_t type, uint8_t access)
1190 {
1191 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1192 
1193 	if (type == HIF_EP_VOTE_DP_ACCESS)
1194 		qdf_atomic_set(&scn->dp_ep_vote_access, access);
1195 	else
1196 		qdf_atomic_set(&scn->ep_vote_access, access);
1197 }
1198 
1199 uint8_t hif_get_ep_vote_access(struct hif_opaque_softc *hif_ctx,
1200 			       uint8_t type)
1201 {
1202 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1203 
1204 	if (type == HIF_EP_VOTE_DP_ACCESS)
1205 		return qdf_atomic_read(&scn->dp_ep_vote_access);
1206 	else
1207 		return qdf_atomic_read(&scn->ep_vote_access);
1208 }
1209 #endif
1210 
1211 #if (defined(QCA_WIFI_QCA8074) || defined(QCA_WIFI_QCA6018) || \
1212 	defined(QCA_WIFI_QCA6290) || defined(QCA_WIFI_QCA6390) || \
1213 	defined(QCA_WIFI_QCN9000) || defined(QCA_WIFI_QCA6490) || \
1214 	defined(QCA_WIFI_QCA6750) || defined(QCA_WIFI_QCA5018) || \
1215 	defined(QCA_WIFI_KIWI) || defined(QCA_WIFI_QCN9224) || \
1216 	defined(QCA_WIFI_QCA9574))
1217 static QDF_STATUS hif_hal_attach(struct hif_softc *scn)
1218 {
1219 	if (ce_srng_based(scn)) {
1220 		scn->hal_soc = hal_attach(
1221 					hif_softc_to_hif_opaque_softc(scn),
1222 					scn->qdf_dev);
1223 		if (!scn->hal_soc)
1224 			return QDF_STATUS_E_FAILURE;
1225 	}
1226 
1227 	return QDF_STATUS_SUCCESS;
1228 }
1229 
1230 static QDF_STATUS hif_hal_detach(struct hif_softc *scn)
1231 {
1232 	if (ce_srng_based(scn)) {
1233 		hal_detach(scn->hal_soc);
1234 		scn->hal_soc = NULL;
1235 	}
1236 
1237 	return QDF_STATUS_SUCCESS;
1238 }
1239 #else
1240 static QDF_STATUS hif_hal_attach(struct hif_softc *scn)
1241 {
1242 	return QDF_STATUS_SUCCESS;
1243 }
1244 
1245 static QDF_STATUS hif_hal_detach(struct hif_softc *scn)
1246 {
1247 	return QDF_STATUS_SUCCESS;
1248 }
1249 #endif
1250 
1251 int hif_init_dma_mask(struct device *dev, enum qdf_bus_type bus_type)
1252 {
1253 	int ret;
1254 
1255 	switch (bus_type) {
1256 	case QDF_BUS_TYPE_IPCI:
1257 		ret = qdf_set_dma_coherent_mask(dev,
1258 						DMA_COHERENT_MASK_DEFAULT);
1259 		if (ret) {
1260 			hif_err("Failed to set dma mask error = %d", ret);
1261 			return ret;
1262 		}
1263 
1264 		break;
1265 	default:
1266 		/* Follow the existing sequence for other targets */
1267 		break;
1268 	}
1269 
1270 	return 0;
1271 }
1272 
1273 /**
1274  * hif_enable(): hif_enable
1275  * @hif_ctx: hif_ctx
1276  * @dev: dev
1277  * @bdev: bus dev
1278  * @bid: bus ID
1279  * @bus_type: bus type
1280  * @type: enable type
1281  *
1282  * Return: QDF_STATUS
1283  */
1284 QDF_STATUS hif_enable(struct hif_opaque_softc *hif_ctx, struct device *dev,
1285 					  void *bdev,
1286 					  const struct hif_bus_id *bid,
1287 					  enum qdf_bus_type bus_type,
1288 					  enum hif_enable_type type)
1289 {
1290 	QDF_STATUS status;
1291 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1292 
1293 	if (!scn) {
1294 		hif_err("hif_ctx = NULL");
1295 		return QDF_STATUS_E_NULL_VALUE;
1296 	}
1297 
1298 	status = hif_enable_bus(scn, dev, bdev, bid, type);
1299 	if (status != QDF_STATUS_SUCCESS) {
1300 		hif_err("hif_enable_bus error = %d", status);
1301 		return status;
1302 	}
1303 
1304 	status = hif_hal_attach(scn);
1305 	if (status != QDF_STATUS_SUCCESS) {
1306 		hif_err("hal attach failed");
1307 		goto disable_bus;
1308 	}
1309 
1310 	if (hif_bus_configure(scn)) {
1311 		hif_err("Target probe failed");
1312 		status = QDF_STATUS_E_FAILURE;
1313 		goto hal_detach;
1314 	}
1315 
1316 	hif_ut_suspend_init(scn);
1317 	hif_register_recovery_notifier(scn);
1318 	hif_latency_detect_timer_start(hif_ctx);
1319 
1320 	/*
1321 	 * Flag to avoid potential unallocated memory access from MSI
1322 	 * interrupt handler which could get scheduled as soon as MSI
1323 	 * is enabled, i.e to take care of the race due to the order
1324 	 * in where MSI is enabled before the memory, that will be
1325 	 * in interrupt handlers, is allocated.
1326 	 */
1327 
1328 	scn->hif_init_done = true;
1329 
1330 	hif_debug("OK");
1331 
1332 	return QDF_STATUS_SUCCESS;
1333 
1334 hal_detach:
1335 	hif_hal_detach(scn);
1336 disable_bus:
1337 	hif_disable_bus(scn);
1338 	return status;
1339 }
1340 
1341 void hif_disable(struct hif_opaque_softc *hif_ctx, enum hif_disable_type type)
1342 {
1343 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1344 
1345 	if (!scn)
1346 		return;
1347 
1348 	hif_set_enable_detection(hif_ctx, false);
1349 	hif_latency_detect_timer_stop(hif_ctx);
1350 
1351 	hif_unregister_recovery_notifier(scn);
1352 
1353 	hif_nointrs(scn);
1354 	if (scn->hif_init_done == false)
1355 		hif_shutdown_device(hif_ctx);
1356 	else
1357 		hif_stop(hif_ctx);
1358 
1359 	hif_hal_detach(scn);
1360 
1361 	hif_disable_bus(scn);
1362 
1363 	hif_wlan_disable(scn);
1364 
1365 	scn->notice_send = false;
1366 
1367 	hif_debug("X");
1368 }
1369 
1370 #ifdef CE_TASKLET_DEBUG_ENABLE
1371 void hif_enable_ce_latency_stats(struct hif_opaque_softc *hif_ctx, uint8_t val)
1372 {
1373 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1374 
1375 	if (!scn)
1376 		return;
1377 
1378 	scn->ce_latency_stats = val;
1379 }
1380 #endif
1381 
1382 void hif_display_stats(struct hif_opaque_softc *hif_ctx)
1383 {
1384 	hif_display_bus_stats(hif_ctx);
1385 }
1386 
1387 qdf_export_symbol(hif_display_stats);
1388 
1389 void hif_clear_stats(struct hif_opaque_softc *hif_ctx)
1390 {
1391 	hif_clear_bus_stats(hif_ctx);
1392 }
1393 
1394 /**
1395  * hif_crash_shutdown_dump_bus_register() - dump bus registers
1396  * @hif_ctx: hif_ctx
1397  *
1398  * Return: n/a
1399  */
1400 #if defined(TARGET_RAMDUMP_AFTER_KERNEL_PANIC) && defined(WLAN_FEATURE_BMI)
1401 
1402 static void hif_crash_shutdown_dump_bus_register(void *hif_ctx)
1403 {
1404 	struct hif_opaque_softc *scn = hif_ctx;
1405 
1406 	if (hif_check_soc_status(scn))
1407 		return;
1408 
1409 	if (hif_dump_registers(scn))
1410 		hif_err("Failed to dump bus registers!");
1411 }
1412 
1413 /**
1414  * hif_crash_shutdown(): hif_crash_shutdown
1415  *
1416  * This function is called by the platform driver to dump CE registers
1417  *
1418  * @hif_ctx: hif_ctx
1419  *
1420  * Return: n/a
1421  */
1422 void hif_crash_shutdown(struct hif_opaque_softc *hif_ctx)
1423 {
1424 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1425 
1426 	if (!hif_ctx)
1427 		return;
1428 
1429 	if (scn->bus_type == QDF_BUS_TYPE_SNOC) {
1430 		hif_warn("RAM dump disabled for bustype %d", scn->bus_type);
1431 		return;
1432 	}
1433 
1434 	if (TARGET_STATUS_RESET == scn->target_status) {
1435 		hif_warn("Target is already asserted, ignore!");
1436 		return;
1437 	}
1438 
1439 	if (hif_is_load_or_unload_in_progress(scn)) {
1440 		hif_err("Load/unload is in progress, ignore!");
1441 		return;
1442 	}
1443 
1444 	hif_crash_shutdown_dump_bus_register(hif_ctx);
1445 	hif_set_target_status(hif_ctx, TARGET_STATUS_RESET);
1446 
1447 	if (ol_copy_ramdump(hif_ctx))
1448 		goto out;
1449 
1450 	hif_info("RAM dump collecting completed!");
1451 
1452 out:
1453 	return;
1454 }
1455 #else
1456 void hif_crash_shutdown(struct hif_opaque_softc *hif_ctx)
1457 {
1458 	hif_debug("Collecting target RAM dump disabled");
1459 }
1460 #endif /* TARGET_RAMDUMP_AFTER_KERNEL_PANIC */
1461 
1462 #ifdef QCA_WIFI_3_0
1463 /**
1464  * hif_check_fw_reg(): hif_check_fw_reg
1465  * @scn: scn
1466  * @state:
1467  *
1468  * Return: int
1469  */
1470 int hif_check_fw_reg(struct hif_opaque_softc *scn)
1471 {
1472 	return 0;
1473 }
1474 #endif
1475 
1476 /**
1477  * hif_read_phy_mem_base(): hif_read_phy_mem_base
1478  * @scn: scn
1479  * @phy_mem_base: physical mem base
1480  *
1481  * Return: n/a
1482  */
1483 void hif_read_phy_mem_base(struct hif_softc *scn, qdf_dma_addr_t *phy_mem_base)
1484 {
1485 	*phy_mem_base = scn->mem_pa;
1486 }
1487 qdf_export_symbol(hif_read_phy_mem_base);
1488 
1489 /**
1490  * hif_get_device_type(): hif_get_device_type
1491  * @device_id: device_id
1492  * @revision_id: revision_id
1493  * @hif_type: returned hif_type
1494  * @target_type: returned target_type
1495  *
1496  * Return: int
1497  */
1498 int hif_get_device_type(uint32_t device_id,
1499 			uint32_t revision_id,
1500 			uint32_t *hif_type, uint32_t *target_type)
1501 {
1502 	int ret = 0;
1503 
1504 	switch (device_id) {
1505 	case ADRASTEA_DEVICE_ID_P2_E12:
1506 
1507 		*hif_type = HIF_TYPE_ADRASTEA;
1508 		*target_type = TARGET_TYPE_ADRASTEA;
1509 		break;
1510 
1511 	case AR9888_DEVICE_ID:
1512 		*hif_type = HIF_TYPE_AR9888;
1513 		*target_type = TARGET_TYPE_AR9888;
1514 		break;
1515 
1516 	case AR6320_DEVICE_ID:
1517 		switch (revision_id) {
1518 		case AR6320_FW_1_1:
1519 		case AR6320_FW_1_3:
1520 			*hif_type = HIF_TYPE_AR6320;
1521 			*target_type = TARGET_TYPE_AR6320;
1522 			break;
1523 
1524 		case AR6320_FW_2_0:
1525 		case AR6320_FW_3_0:
1526 		case AR6320_FW_3_2:
1527 			*hif_type = HIF_TYPE_AR6320V2;
1528 			*target_type = TARGET_TYPE_AR6320V2;
1529 			break;
1530 
1531 		default:
1532 			hif_err("dev_id = 0x%x, rev_id = 0x%x",
1533 				device_id, revision_id);
1534 			ret = -ENODEV;
1535 			goto end;
1536 		}
1537 		break;
1538 
1539 	case AR9887_DEVICE_ID:
1540 		*hif_type = HIF_TYPE_AR9888;
1541 		*target_type = TARGET_TYPE_AR9888;
1542 		hif_info(" *********** AR9887 **************");
1543 		break;
1544 
1545 	case QCA9984_DEVICE_ID:
1546 		*hif_type = HIF_TYPE_QCA9984;
1547 		*target_type = TARGET_TYPE_QCA9984;
1548 		hif_info(" *********** QCA9984 *************");
1549 		break;
1550 
1551 	case QCA9888_DEVICE_ID:
1552 		*hif_type = HIF_TYPE_QCA9888;
1553 		*target_type = TARGET_TYPE_QCA9888;
1554 		hif_info(" *********** QCA9888 *************");
1555 		break;
1556 
1557 	case AR900B_DEVICE_ID:
1558 		*hif_type = HIF_TYPE_AR900B;
1559 		*target_type = TARGET_TYPE_AR900B;
1560 		hif_info(" *********** AR900B *************");
1561 		break;
1562 
1563 	case QCA8074_DEVICE_ID:
1564 		*hif_type = HIF_TYPE_QCA8074;
1565 		*target_type = TARGET_TYPE_QCA8074;
1566 		hif_info(" *********** QCA8074  *************");
1567 		break;
1568 
1569 	case QCA6290_EMULATION_DEVICE_ID:
1570 	case QCA6290_DEVICE_ID:
1571 		*hif_type = HIF_TYPE_QCA6290;
1572 		*target_type = TARGET_TYPE_QCA6290;
1573 		hif_info(" *********** QCA6290EMU *************");
1574 		break;
1575 
1576 	case QCN9000_DEVICE_ID:
1577 		*hif_type = HIF_TYPE_QCN9000;
1578 		*target_type = TARGET_TYPE_QCN9000;
1579 		hif_info(" *********** QCN9000 *************");
1580 		break;
1581 
1582 	case QCN9224_DEVICE_ID:
1583 		*hif_type = HIF_TYPE_QCN9224;
1584 		*target_type = TARGET_TYPE_QCN9224;
1585 		hif_info(" *********** QCN9224 *************");
1586 		break;
1587 
1588 	case QCN6122_DEVICE_ID:
1589 		*hif_type = HIF_TYPE_QCN6122;
1590 		*target_type = TARGET_TYPE_QCN6122;
1591 		hif_info(" *********** QCN6122 *************");
1592 		break;
1593 
1594 	case QCN7605_DEVICE_ID:
1595 	case QCN7605_COMPOSITE:
1596 	case QCN7605_STANDALONE:
1597 	case QCN7605_STANDALONE_V2:
1598 	case QCN7605_COMPOSITE_V2:
1599 		*hif_type = HIF_TYPE_QCN7605;
1600 		*target_type = TARGET_TYPE_QCN7605;
1601 		hif_info(" *********** QCN7605 *************");
1602 		break;
1603 
1604 	case QCA6390_DEVICE_ID:
1605 	case QCA6390_EMULATION_DEVICE_ID:
1606 		*hif_type = HIF_TYPE_QCA6390;
1607 		*target_type = TARGET_TYPE_QCA6390;
1608 		hif_info(" *********** QCA6390 *************");
1609 		break;
1610 
1611 	case QCA6490_DEVICE_ID:
1612 	case QCA6490_EMULATION_DEVICE_ID:
1613 		*hif_type = HIF_TYPE_QCA6490;
1614 		*target_type = TARGET_TYPE_QCA6490;
1615 		hif_info(" *********** QCA6490 *************");
1616 		break;
1617 
1618 	case QCA6750_DEVICE_ID:
1619 	case QCA6750_EMULATION_DEVICE_ID:
1620 		*hif_type = HIF_TYPE_QCA6750;
1621 		*target_type = TARGET_TYPE_QCA6750;
1622 		hif_info(" *********** QCA6750 *************");
1623 		break;
1624 
1625 	case KIWI_DEVICE_ID:
1626 		*hif_type = HIF_TYPE_KIWI;
1627 		*target_type = TARGET_TYPE_KIWI;
1628 		hif_info(" *********** KIWI *************");
1629 		break;
1630 
1631 	case MANGO_DEVICE_ID:
1632 		*hif_type = HIF_TYPE_MANGO;
1633 		*target_type = TARGET_TYPE_MANGO;
1634 		hif_info(" *********** MANGO *************");
1635 		break;
1636 
1637 	case QCA8074V2_DEVICE_ID:
1638 		*hif_type = HIF_TYPE_QCA8074V2;
1639 		*target_type = TARGET_TYPE_QCA8074V2;
1640 		hif_info(" *********** QCA8074V2 *************");
1641 		break;
1642 
1643 	case QCA6018_DEVICE_ID:
1644 	case RUMIM2M_DEVICE_ID_NODE0:
1645 	case RUMIM2M_DEVICE_ID_NODE1:
1646 	case RUMIM2M_DEVICE_ID_NODE2:
1647 	case RUMIM2M_DEVICE_ID_NODE3:
1648 	case RUMIM2M_DEVICE_ID_NODE4:
1649 	case RUMIM2M_DEVICE_ID_NODE5:
1650 		*hif_type = HIF_TYPE_QCA6018;
1651 		*target_type = TARGET_TYPE_QCA6018;
1652 		hif_info(" *********** QCA6018 *************");
1653 		break;
1654 
1655 	case QCA5018_DEVICE_ID:
1656 		*hif_type = HIF_TYPE_QCA5018;
1657 		*target_type = TARGET_TYPE_QCA5018;
1658 		hif_info(" *********** qca5018 *************");
1659 		break;
1660 
1661 	case QCA9574_DEVICE_ID:
1662 		*hif_type = HIF_TYPE_QCA9574;
1663 		*target_type = TARGET_TYPE_QCA9574;
1664 		hif_info(" *********** QCA9574 *************");
1665 		break;
1666 
1667 	default:
1668 		hif_err("Unsupported device ID = 0x%x!", device_id);
1669 		ret = -ENODEV;
1670 		break;
1671 	}
1672 
1673 	if (*target_type == TARGET_TYPE_UNKNOWN) {
1674 		hif_err("Unsupported target_type!");
1675 		ret = -ENODEV;
1676 	}
1677 end:
1678 	return ret;
1679 }
1680 
1681 /**
1682  * hif_get_bus_type() - return the bus type
1683  *
1684  * Return: enum qdf_bus_type
1685  */
1686 enum qdf_bus_type hif_get_bus_type(struct hif_opaque_softc *hif_hdl)
1687 {
1688 	struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
1689 
1690 	return scn->bus_type;
1691 }
1692 
1693 /**
1694  * Target info and ini parameters are global to the driver
1695  * Hence these structures are exposed to all the modules in
1696  * the driver and they don't need to maintains multiple copies
1697  * of the same info, instead get the handle from hif and
1698  * modify them in hif
1699  */
1700 
1701 /**
1702  * hif_get_ini_handle() - API to get hif_config_param handle
1703  * @hif_ctx: HIF Context
1704  *
1705  * Return: pointer to hif_config_info
1706  */
1707 struct hif_config_info *hif_get_ini_handle(struct hif_opaque_softc *hif_ctx)
1708 {
1709 	struct hif_softc *sc = HIF_GET_SOFTC(hif_ctx);
1710 
1711 	return &sc->hif_config;
1712 }
1713 
1714 /**
1715  * hif_get_target_info_handle() - API to get hif_target_info handle
1716  * @hif_ctx: HIF context
1717  *
1718  * Return: Pointer to hif_target_info
1719  */
1720 struct hif_target_info *hif_get_target_info_handle(
1721 					struct hif_opaque_softc *hif_ctx)
1722 {
1723 	struct hif_softc *sc = HIF_GET_SOFTC(hif_ctx);
1724 
1725 	return &sc->target_info;
1726 
1727 }
1728 qdf_export_symbol(hif_get_target_info_handle);
1729 
1730 #ifdef RECEIVE_OFFLOAD
1731 void hif_offld_flush_cb_register(struct hif_opaque_softc *scn,
1732 				 void (offld_flush_handler)(void *))
1733 {
1734 	if (hif_napi_enabled(scn, -1))
1735 		hif_napi_rx_offld_flush_cb_register(scn, offld_flush_handler);
1736 	else
1737 		hif_err("NAPI not enabled");
1738 }
1739 qdf_export_symbol(hif_offld_flush_cb_register);
1740 
1741 void hif_offld_flush_cb_deregister(struct hif_opaque_softc *scn)
1742 {
1743 	if (hif_napi_enabled(scn, -1))
1744 		hif_napi_rx_offld_flush_cb_deregister(scn);
1745 	else
1746 		hif_err("NAPI not enabled");
1747 }
1748 qdf_export_symbol(hif_offld_flush_cb_deregister);
1749 
1750 int hif_get_rx_ctx_id(int ctx_id, struct hif_opaque_softc *hif_hdl)
1751 {
1752 	if (hif_napi_enabled(hif_hdl, -1))
1753 		return NAPI_PIPE2ID(ctx_id);
1754 	else
1755 		return ctx_id;
1756 }
1757 #else /* RECEIVE_OFFLOAD */
1758 int hif_get_rx_ctx_id(int ctx_id, struct hif_opaque_softc *hif_hdl)
1759 {
1760 	return 0;
1761 }
1762 qdf_export_symbol(hif_get_rx_ctx_id);
1763 #endif /* RECEIVE_OFFLOAD */
1764 
1765 #if defined(FEATURE_LRO)
1766 
1767 /**
1768  * hif_get_lro_info - Returns LRO instance for instance ID
1769  * @ctx_id: LRO instance ID
1770  * @hif_hdl: HIF Context
1771  *
1772  * Return: Pointer to LRO instance.
1773  */
1774 void *hif_get_lro_info(int ctx_id, struct hif_opaque_softc *hif_hdl)
1775 {
1776 	void *data;
1777 
1778 	if (hif_napi_enabled(hif_hdl, -1))
1779 		data = hif_napi_get_lro_info(hif_hdl, ctx_id);
1780 	else
1781 		data = hif_ce_get_lro_ctx(hif_hdl, ctx_id);
1782 
1783 	return data;
1784 }
1785 #endif
1786 
1787 /**
1788  * hif_get_target_status - API to get target status
1789  * @hif_ctx: HIF Context
1790  *
1791  * Return: enum hif_target_status
1792  */
1793 enum hif_target_status hif_get_target_status(struct hif_opaque_softc *hif_ctx)
1794 {
1795 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1796 
1797 	return scn->target_status;
1798 }
1799 qdf_export_symbol(hif_get_target_status);
1800 
1801 /**
1802  * hif_set_target_status() - API to set target status
1803  * @hif_ctx: HIF Context
1804  * @status: Target Status
1805  *
1806  * Return: void
1807  */
1808 void hif_set_target_status(struct hif_opaque_softc *hif_ctx, enum
1809 			   hif_target_status status)
1810 {
1811 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1812 
1813 	scn->target_status = status;
1814 }
1815 
1816 /**
1817  * hif_init_ini_config() - API to initialize HIF configuration parameters
1818  * @hif_ctx: HIF Context
1819  * @cfg: HIF Configuration
1820  *
1821  * Return: void
1822  */
1823 void hif_init_ini_config(struct hif_opaque_softc *hif_ctx,
1824 			 struct hif_config_info *cfg)
1825 {
1826 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1827 
1828 	qdf_mem_copy(&scn->hif_config, cfg, sizeof(struct hif_config_info));
1829 }
1830 
1831 /**
1832  * hif_get_conparam() - API to get driver mode in HIF
1833  * @scn: HIF Context
1834  *
1835  * Return: driver mode of operation
1836  */
1837 uint32_t hif_get_conparam(struct hif_softc *scn)
1838 {
1839 	if (!scn)
1840 		return 0;
1841 
1842 	return scn->hif_con_param;
1843 }
1844 
1845 /**
1846  * hif_get_callbacks_handle() - API to get callbacks Handle
1847  * @scn: HIF Context
1848  *
1849  * Return: pointer to HIF Callbacks
1850  */
1851 struct hif_driver_state_callbacks *hif_get_callbacks_handle(
1852 							struct hif_softc *scn)
1853 {
1854 	return &scn->callbacks;
1855 }
1856 
1857 /**
1858  * hif_is_driver_unloading() - API to query upper layers if driver is unloading
1859  * @scn: HIF Context
1860  *
1861  * Return: True/False
1862  */
1863 bool hif_is_driver_unloading(struct hif_softc *scn)
1864 {
1865 	struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
1866 
1867 	if (cbk && cbk->is_driver_unloading)
1868 		return cbk->is_driver_unloading(cbk->context);
1869 
1870 	return false;
1871 }
1872 
1873 /**
1874  * hif_is_load_or_unload_in_progress() - API to query upper layers if
1875  * load/unload in progress
1876  * @scn: HIF Context
1877  *
1878  * Return: True/False
1879  */
1880 bool hif_is_load_or_unload_in_progress(struct hif_softc *scn)
1881 {
1882 	struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
1883 
1884 	if (cbk && cbk->is_load_unload_in_progress)
1885 		return cbk->is_load_unload_in_progress(cbk->context);
1886 
1887 	return false;
1888 }
1889 
1890 /**
1891  * hif_is_recovery_in_progress() - API to query upper layers if recovery in
1892  * progress
1893  * @scn: HIF Context
1894  *
1895  * Return: True/False
1896  */
1897 bool hif_is_recovery_in_progress(struct hif_softc *scn)
1898 {
1899 	struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
1900 
1901 	if (cbk && cbk->is_recovery_in_progress)
1902 		return cbk->is_recovery_in_progress(cbk->context);
1903 
1904 	return false;
1905 }
1906 
1907 #if defined(HIF_PCI) || defined(HIF_SNOC) || defined(HIF_AHB) || \
1908     defined(HIF_IPCI)
1909 
1910 /**
1911  * hif_update_pipe_callback() - API to register pipe specific callbacks
1912  * @osc: Opaque softc
1913  * @pipeid: pipe id
1914  * @callbacks: callbacks to register
1915  *
1916  * Return: void
1917  */
1918 
1919 void hif_update_pipe_callback(struct hif_opaque_softc *osc,
1920 					u_int8_t pipeid,
1921 					struct hif_msg_callbacks *callbacks)
1922 {
1923 	struct hif_softc *scn = HIF_GET_SOFTC(osc);
1924 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1925 	struct HIF_CE_pipe_info *pipe_info;
1926 
1927 	QDF_BUG(pipeid < CE_COUNT_MAX);
1928 
1929 	hif_debug("pipeid: %d", pipeid);
1930 
1931 	pipe_info = &hif_state->pipe_info[pipeid];
1932 
1933 	qdf_mem_copy(&pipe_info->pipe_callbacks,
1934 			callbacks, sizeof(pipe_info->pipe_callbacks));
1935 }
1936 qdf_export_symbol(hif_update_pipe_callback);
1937 
1938 /**
1939  * hif_is_target_ready() - API to query if target is in ready state
1940  * progress
1941  * @scn: HIF Context
1942  *
1943  * Return: True/False
1944  */
1945 bool hif_is_target_ready(struct hif_softc *scn)
1946 {
1947 	struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
1948 
1949 	if (cbk && cbk->is_target_ready)
1950 		return cbk->is_target_ready(cbk->context);
1951 	/*
1952 	 * if callback is not registered then there is no way to determine
1953 	 * if target is ready. In-such case return true to indicate that
1954 	 * target is ready.
1955 	 */
1956 	return true;
1957 }
1958 qdf_export_symbol(hif_is_target_ready);
1959 
1960 int hif_get_bandwidth_level(struct hif_opaque_softc *hif_handle)
1961 {
1962 	struct hif_softc *scn = HIF_GET_SOFTC(hif_handle);
1963 	struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
1964 
1965 	if (cbk && cbk->get_bandwidth_level)
1966 		return cbk->get_bandwidth_level(cbk->context);
1967 
1968 	return 0;
1969 }
1970 
1971 qdf_export_symbol(hif_get_bandwidth_level);
1972 
1973 #ifdef DP_MEM_PRE_ALLOC
1974 void *hif_mem_alloc_consistent_unaligned(struct hif_softc *scn,
1975 					 qdf_size_t size,
1976 					 qdf_dma_addr_t *paddr,
1977 					 uint32_t ring_type,
1978 					 uint8_t *is_mem_prealloc)
1979 {
1980 	void *vaddr = NULL;
1981 	struct hif_driver_state_callbacks *cbk =
1982 				hif_get_callbacks_handle(scn);
1983 
1984 	*is_mem_prealloc = false;
1985 	if (cbk && cbk->prealloc_get_consistent_mem_unaligned) {
1986 		vaddr = cbk->prealloc_get_consistent_mem_unaligned(size,
1987 								   paddr,
1988 								   ring_type);
1989 		if (vaddr) {
1990 			*is_mem_prealloc = true;
1991 			goto end;
1992 		}
1993 	}
1994 
1995 	vaddr = qdf_mem_alloc_consistent(scn->qdf_dev,
1996 					 scn->qdf_dev->dev,
1997 					 size,
1998 					 paddr);
1999 end:
2000 	dp_info("%s va_unaligned %pK pa_unaligned %pK size %d ring_type %d",
2001 		*is_mem_prealloc ? "pre-alloc" : "dynamic-alloc", vaddr,
2002 		(void *)*paddr, (int)size, ring_type);
2003 
2004 	return vaddr;
2005 }
2006 
2007 void hif_mem_free_consistent_unaligned(struct hif_softc *scn,
2008 				       qdf_size_t size,
2009 				       void *vaddr,
2010 				       qdf_dma_addr_t paddr,
2011 				       qdf_dma_context_t memctx,
2012 				       uint8_t is_mem_prealloc)
2013 {
2014 	struct hif_driver_state_callbacks *cbk =
2015 				hif_get_callbacks_handle(scn);
2016 
2017 	if (is_mem_prealloc) {
2018 		if (cbk && cbk->prealloc_put_consistent_mem_unaligned) {
2019 			cbk->prealloc_put_consistent_mem_unaligned(vaddr);
2020 		} else {
2021 			dp_warn("dp_prealloc_put_consistent_unligned NULL");
2022 			QDF_BUG(0);
2023 		}
2024 	} else {
2025 		qdf_mem_free_consistent(scn->qdf_dev, scn->qdf_dev->dev,
2026 					size, vaddr, paddr, memctx);
2027 	}
2028 }
2029 #endif
2030 
2031 /**
2032  * hif_batch_send() - API to access hif specific function
2033  * ce_batch_send.
2034  * @osc: HIF Context
2035  * @msdu : list of msdus to be sent
2036  * @transfer_id : transfer id
2037  * @len : donwloaded length
2038  *
2039  * Return: list of msds not sent
2040  */
2041 qdf_nbuf_t hif_batch_send(struct hif_opaque_softc *osc, qdf_nbuf_t msdu,
2042 		uint32_t transfer_id, u_int32_t len, uint32_t sendhead)
2043 {
2044 	void *ce_tx_hdl = hif_get_ce_handle(osc, CE_HTT_TX_CE);
2045 
2046 	if (!ce_tx_hdl)
2047 		return NULL;
2048 
2049 	return ce_batch_send((struct CE_handle *)ce_tx_hdl, msdu, transfer_id,
2050 			len, sendhead);
2051 }
2052 qdf_export_symbol(hif_batch_send);
2053 
2054 /**
2055  * hif_update_tx_ring() - API to access hif specific function
2056  * ce_update_tx_ring.
2057  * @osc: HIF Context
2058  * @num_htt_cmpls : number of htt compl received.
2059  *
2060  * Return: void
2061  */
2062 void hif_update_tx_ring(struct hif_opaque_softc *osc, u_int32_t num_htt_cmpls)
2063 {
2064 	void *ce_tx_hdl = hif_get_ce_handle(osc, CE_HTT_TX_CE);
2065 
2066 	ce_update_tx_ring(ce_tx_hdl, num_htt_cmpls);
2067 }
2068 qdf_export_symbol(hif_update_tx_ring);
2069 
2070 
2071 /**
2072  * hif_send_single() - API to access hif specific function
2073  * ce_send_single.
2074  * @osc: HIF Context
2075  * @msdu : msdu to be sent
2076  * @transfer_id: transfer id
2077  * @len : downloaded length
2078  *
2079  * Return: msdu sent status
2080  */
2081 QDF_STATUS hif_send_single(struct hif_opaque_softc *osc, qdf_nbuf_t msdu,
2082 			   uint32_t transfer_id, u_int32_t len)
2083 {
2084 	void *ce_tx_hdl = hif_get_ce_handle(osc, CE_HTT_TX_CE);
2085 
2086 	if (!ce_tx_hdl)
2087 		return QDF_STATUS_E_NULL_VALUE;
2088 
2089 	return ce_send_single((struct CE_handle *)ce_tx_hdl, msdu, transfer_id,
2090 			len);
2091 }
2092 qdf_export_symbol(hif_send_single);
2093 #endif
2094 
2095 /**
2096  * hif_reg_write() - API to access hif specific function
2097  * hif_write32_mb.
2098  * @hif_ctx : HIF Context
2099  * @offset : offset on which value has to be written
2100  * @value : value to be written
2101  *
2102  * Return: None
2103  */
2104 void hif_reg_write(struct hif_opaque_softc *hif_ctx, uint32_t offset,
2105 		uint32_t value)
2106 {
2107 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2108 
2109 	hif_write32_mb(scn, scn->mem + offset, value);
2110 
2111 }
2112 qdf_export_symbol(hif_reg_write);
2113 
2114 /**
2115  * hif_reg_read() - API to access hif specific function
2116  * hif_read32_mb.
2117  * @hif_ctx : HIF Context
2118  * @offset : offset from which value has to be read
2119  *
2120  * Return: Read value
2121  */
2122 uint32_t hif_reg_read(struct hif_opaque_softc *hif_ctx, uint32_t offset)
2123 {
2124 
2125 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2126 
2127 	return hif_read32_mb(scn, scn->mem + offset);
2128 }
2129 qdf_export_symbol(hif_reg_read);
2130 
2131 /**
2132  * hif_ramdump_handler(): generic ramdump handler
2133  * @scn: struct hif_opaque_softc
2134  *
2135  * Return: None
2136  */
2137 void hif_ramdump_handler(struct hif_opaque_softc *scn)
2138 {
2139 	if (hif_get_bus_type(scn) == QDF_BUS_TYPE_USB)
2140 		hif_usb_ramdump_handler(scn);
2141 }
2142 
2143 hif_pm_wake_irq_type hif_pm_get_wake_irq_type(struct hif_opaque_softc *hif_ctx)
2144 {
2145 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2146 
2147 	return scn->wake_irq_type;
2148 }
2149 
2150 irqreturn_t hif_wake_interrupt_handler(int irq, void *context)
2151 {
2152 	struct hif_softc *scn = context;
2153 
2154 	hif_info("wake interrupt received on irq %d", irq);
2155 
2156 	hif_rtpm_set_monitor_wake_intr(0);
2157 	hif_rtpm_request_resume();
2158 
2159 	if (scn->initial_wakeup_cb)
2160 		scn->initial_wakeup_cb(scn->initial_wakeup_priv);
2161 
2162 	if (hif_is_ut_suspended(scn))
2163 		hif_ut_fw_resume(scn);
2164 
2165 	qdf_pm_system_wakeup();
2166 
2167 	return IRQ_HANDLED;
2168 }
2169 
2170 void hif_set_initial_wakeup_cb(struct hif_opaque_softc *hif_ctx,
2171 			       void (*callback)(void *),
2172 			       void *priv)
2173 {
2174 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2175 
2176 	scn->initial_wakeup_cb = callback;
2177 	scn->initial_wakeup_priv = priv;
2178 }
2179 
2180 void hif_set_ce_service_max_yield_time(struct hif_opaque_softc *hif,
2181 				       uint32_t ce_service_max_yield_time)
2182 {
2183 	struct hif_softc *hif_ctx = HIF_GET_SOFTC(hif);
2184 
2185 	hif_ctx->ce_service_max_yield_time =
2186 		ce_service_max_yield_time * 1000;
2187 }
2188 
2189 unsigned long long
2190 hif_get_ce_service_max_yield_time(struct hif_opaque_softc *hif)
2191 {
2192 	struct hif_softc *hif_ctx = HIF_GET_SOFTC(hif);
2193 
2194 	return hif_ctx->ce_service_max_yield_time;
2195 }
2196 
2197 void hif_set_ce_service_max_rx_ind_flush(struct hif_opaque_softc *hif,
2198 				       uint8_t ce_service_max_rx_ind_flush)
2199 {
2200 	struct hif_softc *hif_ctx = HIF_GET_SOFTC(hif);
2201 
2202 	if (ce_service_max_rx_ind_flush == 0 ||
2203 	    ce_service_max_rx_ind_flush > MSG_FLUSH_NUM)
2204 		hif_ctx->ce_service_max_rx_ind_flush = MSG_FLUSH_NUM;
2205 	else
2206 		hif_ctx->ce_service_max_rx_ind_flush =
2207 						ce_service_max_rx_ind_flush;
2208 }
2209 
2210 #ifdef SYSTEM_PM_CHECK
2211 void __hif_system_pm_set_state(struct hif_opaque_softc *hif,
2212 			       enum hif_system_pm_state state)
2213 {
2214 	struct hif_softc *hif_ctx = HIF_GET_SOFTC(hif);
2215 
2216 	qdf_atomic_set(&hif_ctx->sys_pm_state, state);
2217 }
2218 
2219 int32_t hif_system_pm_get_state(struct hif_opaque_softc *hif)
2220 {
2221 	struct hif_softc *hif_ctx = HIF_GET_SOFTC(hif);
2222 
2223 	return qdf_atomic_read(&hif_ctx->sys_pm_state);
2224 }
2225 
2226 int hif_system_pm_state_check(struct hif_opaque_softc *hif)
2227 {
2228 	struct hif_softc *hif_ctx = HIF_GET_SOFTC(hif);
2229 	int32_t sys_pm_state;
2230 
2231 	if (!hif_ctx) {
2232 		hif_err("hif context is null");
2233 		return -EFAULT;
2234 	}
2235 
2236 	sys_pm_state = qdf_atomic_read(&hif_ctx->sys_pm_state);
2237 	if (sys_pm_state == HIF_SYSTEM_PM_STATE_BUS_SUSPENDING ||
2238 	    sys_pm_state == HIF_SYSTEM_PM_STATE_BUS_SUSPENDED) {
2239 		hif_info("Triggering system wakeup");
2240 		qdf_pm_system_wakeup();
2241 		return -EAGAIN;
2242 	}
2243 
2244 	return 0;
2245 }
2246 #endif
2247