xref: /wlan-dirver/qca-wifi-host-cmn/hif/src/hif_main.c (revision ec13c2ea397bc11464d596ff86f4da9fdc154da8)
1 /*
2  * Copyright (c) 2015-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include "targcfg.h"
21 #include "qdf_lock.h"
22 #include "qdf_status.h"
23 #include "qdf_status.h"
24 #include <qdf_atomic.h>         /* qdf_atomic_read */
25 #include <targaddrs.h>
26 #include "hif_io32.h"
27 #include <hif.h>
28 #include <target_type.h>
29 #include "regtable.h"
30 #define ATH_MODULE_NAME hif
31 #include <a_debug.h>
32 #include "hif_main.h"
33 #include "hif_hw_version.h"
34 #if (defined(HIF_PCI) || defined(HIF_SNOC) || defined(HIF_AHB) || \
35      defined(HIF_IPCI))
36 #include "ce_tasklet.h"
37 #include "ce_api.h"
38 #endif
39 #include "qdf_trace.h"
40 #include "qdf_status.h"
41 #include "hif_debug.h"
42 #include "mp_dev.h"
43 #if defined(QCA_WIFI_QCA8074) || defined(QCA_WIFI_QCA6018) || \
44 	defined(QCA_WIFI_QCA5018) || defined(QCA_WIFI_QCA9574)
45 #include "hal_api.h"
46 #endif
47 #include "hif_napi.h"
48 #include "hif_unit_test_suspend_i.h"
49 #include "qdf_module.h"
50 #ifdef HIF_CE_LOG_INFO
51 #include <qdf_notifier.h>
52 #include <qdf_hang_event_notifier.h>
53 #endif
54 #include <linux/cpumask.h>
55 
56 #if defined(HIF_IPCI) && defined(FEATURE_HAL_DELAYED_REG_WRITE)
57 #include <pld_common.h>
58 #endif
59 
60 void hif_dump(struct hif_opaque_softc *hif_ctx, uint8_t cmd_id, bool start)
61 {
62 	hif_trigger_dump(hif_ctx, cmd_id, start);
63 }
64 
65 /**
66  * hif_get_target_id(): hif_get_target_id
67  *
68  * Return the virtual memory base address to the caller
69  *
70  * @scn: hif_softc
71  *
72  * Return: A_target_id_t
73  */
74 A_target_id_t hif_get_target_id(struct hif_softc *scn)
75 {
76 	return scn->mem;
77 }
78 
79 /**
80  * hif_get_targetdef(): hif_get_targetdef
81  * @scn: scn
82  *
83  * Return: void *
84  */
85 void *hif_get_targetdef(struct hif_opaque_softc *hif_ctx)
86 {
87 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
88 
89 	return scn->targetdef;
90 }
91 
92 #ifdef FORCE_WAKE
93 void hif_srng_init_phase(struct hif_opaque_softc *hif_ctx,
94 			 bool init_phase)
95 {
96 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
97 
98 	if (ce_srng_based(scn))
99 		hal_set_init_phase(scn->hal_soc, init_phase);
100 }
101 #endif /* FORCE_WAKE */
102 
103 #ifdef HIF_IPCI
104 void hif_shutdown_notifier_cb(void *hif_ctx)
105 {
106 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
107 
108 	scn->recovery = true;
109 }
110 #endif
111 
112 /**
113  * hif_vote_link_down(): unvote for link up
114  *
115  * Call hif_vote_link_down to release a previous request made using
116  * hif_vote_link_up. A hif_vote_link_down call should only be made
117  * after a corresponding hif_vote_link_up, otherwise you could be
118  * negating a vote from another source. When no votes are present
119  * hif will not guarantee the linkstate after hif_bus_suspend.
120  *
121  * SYNCHRONIZE WITH hif_vote_link_up by only calling in MC thread
122  * and initialization deinitialization sequencences.
123  *
124  * Return: n/a
125  */
126 void hif_vote_link_down(struct hif_opaque_softc *hif_ctx)
127 {
128 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
129 
130 	QDF_BUG(scn);
131 	if (scn->linkstate_vote == 0)
132 		QDF_DEBUG_PANIC("linkstate_vote(%d) has already been 0",
133 				scn->linkstate_vote);
134 
135 	scn->linkstate_vote--;
136 	hif_info("Down_linkstate_vote %d", scn->linkstate_vote);
137 	if (scn->linkstate_vote == 0)
138 		hif_bus_prevent_linkdown(scn, false);
139 }
140 
141 /**
142  * hif_vote_link_up(): vote to prevent bus from suspending
143  *
144  * Makes hif guarantee that fw can message the host normally
145  * durring suspend.
146  *
147  * SYNCHRONIZE WITH hif_vote_link_up by only calling in MC thread
148  * and initialization deinitialization sequencences.
149  *
150  * Return: n/a
151  */
152 void hif_vote_link_up(struct hif_opaque_softc *hif_ctx)
153 {
154 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
155 
156 	QDF_BUG(scn);
157 	scn->linkstate_vote++;
158 	hif_info("Up_linkstate_vote %d", scn->linkstate_vote);
159 	if (scn->linkstate_vote == 1)
160 		hif_bus_prevent_linkdown(scn, true);
161 }
162 
163 /**
164  * hif_can_suspend_link(): query if hif is permitted to suspend the link
165  *
166  * Hif will ensure that the link won't be suspended if the upperlayers
167  * don't want it to.
168  *
169  * SYNCHRONIZATION: MC thread is stopped before bus suspend thus
170  * we don't need extra locking to ensure votes dont change while
171  * we are in the process of suspending or resuming.
172  *
173  * Return: false if hif will guarantee link up durring suspend.
174  */
175 bool hif_can_suspend_link(struct hif_opaque_softc *hif_ctx)
176 {
177 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
178 
179 	QDF_BUG(scn);
180 	return scn->linkstate_vote == 0;
181 }
182 
183 /**
184  * hif_hia_item_address(): hif_hia_item_address
185  * @target_type: target_type
186  * @item_offset: item_offset
187  *
188  * Return: n/a
189  */
190 uint32_t hif_hia_item_address(uint32_t target_type, uint32_t item_offset)
191 {
192 	switch (target_type) {
193 	case TARGET_TYPE_AR6002:
194 		return AR6002_HOST_INTEREST_ADDRESS + item_offset;
195 	case TARGET_TYPE_AR6003:
196 		return AR6003_HOST_INTEREST_ADDRESS + item_offset;
197 	case TARGET_TYPE_AR6004:
198 		return AR6004_HOST_INTEREST_ADDRESS + item_offset;
199 	case TARGET_TYPE_AR6006:
200 		return AR6006_HOST_INTEREST_ADDRESS + item_offset;
201 	case TARGET_TYPE_AR9888:
202 		return AR9888_HOST_INTEREST_ADDRESS + item_offset;
203 	case TARGET_TYPE_AR6320:
204 	case TARGET_TYPE_AR6320V2:
205 		return AR6320_HOST_INTEREST_ADDRESS + item_offset;
206 	case TARGET_TYPE_ADRASTEA:
207 		/* ADRASTEA doesn't have a host interest address */
208 		ASSERT(0);
209 		return 0;
210 	case TARGET_TYPE_AR900B:
211 		return AR900B_HOST_INTEREST_ADDRESS + item_offset;
212 	case TARGET_TYPE_QCA9984:
213 		return QCA9984_HOST_INTEREST_ADDRESS + item_offset;
214 	case TARGET_TYPE_QCA9888:
215 		return QCA9888_HOST_INTEREST_ADDRESS + item_offset;
216 
217 	default:
218 		ASSERT(0);
219 		return 0;
220 	}
221 }
222 
223 /**
224  * hif_max_num_receives_reached() - check max receive is reached
225  * @scn: HIF Context
226  * @count: unsigned int.
227  *
228  * Output check status as bool
229  *
230  * Return: bool
231  */
232 bool hif_max_num_receives_reached(struct hif_softc *scn, unsigned int count)
233 {
234 	if (QDF_IS_EPPING_ENABLED(hif_get_conparam(scn)))
235 		return count > 120;
236 	else
237 		return count > MAX_NUM_OF_RECEIVES;
238 }
239 
240 /**
241  * init_buffer_count() - initial buffer count
242  * @maxSize: qdf_size_t
243  *
244  * routine to modify the initial buffer count to be allocated on an os
245  * platform basis. Platform owner will need to modify this as needed
246  *
247  * Return: qdf_size_t
248  */
249 qdf_size_t init_buffer_count(qdf_size_t maxSize)
250 {
251 	return maxSize;
252 }
253 
254 /**
255  * hif_save_htc_htt_config_endpoint() - save htt_tx_endpoint
256  * @hif_ctx: hif context
257  * @htc_htt_tx_endpoint: htt_tx_endpoint
258  *
259  * Return: void
260  */
261 void hif_save_htc_htt_config_endpoint(struct hif_opaque_softc *hif_ctx,
262 							int htc_htt_tx_endpoint)
263 {
264 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
265 
266 	if (!scn) {
267 		hif_err("scn or scn->hif_sc is NULL!");
268 		return;
269 	}
270 
271 	scn->htc_htt_tx_endpoint = htc_htt_tx_endpoint;
272 }
273 qdf_export_symbol(hif_save_htc_htt_config_endpoint);
274 
275 static const struct qwlan_hw qwlan_hw_list[] = {
276 	{
277 		.id = AR6320_REV1_VERSION,
278 		.subid = 0,
279 		.name = "QCA6174_REV1",
280 	},
281 	{
282 		.id = AR6320_REV1_1_VERSION,
283 		.subid = 0x1,
284 		.name = "QCA6174_REV1_1",
285 	},
286 	{
287 		.id = AR6320_REV1_3_VERSION,
288 		.subid = 0x2,
289 		.name = "QCA6174_REV1_3",
290 	},
291 	{
292 		.id = AR6320_REV2_1_VERSION,
293 		.subid = 0x4,
294 		.name = "QCA6174_REV2_1",
295 	},
296 	{
297 		.id = AR6320_REV2_1_VERSION,
298 		.subid = 0x5,
299 		.name = "QCA6174_REV2_2",
300 	},
301 	{
302 		.id = AR6320_REV3_VERSION,
303 		.subid = 0x6,
304 		.name = "QCA6174_REV2.3",
305 	},
306 	{
307 		.id = AR6320_REV3_VERSION,
308 		.subid = 0x8,
309 		.name = "QCA6174_REV3",
310 	},
311 	{
312 		.id = AR6320_REV3_VERSION,
313 		.subid = 0x9,
314 		.name = "QCA6174_REV3_1",
315 	},
316 	{
317 		.id = AR6320_REV3_2_VERSION,
318 		.subid = 0xA,
319 		.name = "AR6320_REV3_2_VERSION",
320 	},
321 	{
322 		.id = QCA6390_V1,
323 		.subid = 0x0,
324 		.name = "QCA6390_V1",
325 	},
326 	{
327 		.id = QCA6490_V1,
328 		.subid = 0x0,
329 		.name = "QCA6490_V1",
330 	},
331 	{
332 		.id = WCN3990_v1,
333 		.subid = 0x0,
334 		.name = "WCN3990_V1",
335 	},
336 	{
337 		.id = WCN3990_v2,
338 		.subid = 0x0,
339 		.name = "WCN3990_V2",
340 	},
341 	{
342 		.id = WCN3990_v2_1,
343 		.subid = 0x0,
344 		.name = "WCN3990_V2.1",
345 	},
346 	{
347 		.id = WCN3998,
348 		.subid = 0x0,
349 		.name = "WCN3998",
350 	},
351 	{
352 		.id = QCA9379_REV1_VERSION,
353 		.subid = 0xC,
354 		.name = "QCA9379_REV1",
355 	},
356 	{
357 		.id = QCA9379_REV1_VERSION,
358 		.subid = 0xD,
359 		.name = "QCA9379_REV1_1",
360 	},
361 	{
362 		.id = KIWI_V1,
363 		.subid = 0xE,
364 		.name = "KIWI_V1",
365 	},
366 	{
367 		.id = MANGO_V1,
368 		.subid = 0xF,
369 		.name = "MANGO_V1",
370 	}
371 };
372 
373 /**
374  * hif_get_hw_name(): get a human readable name for the hardware
375  * @info: Target Info
376  *
377  * Return: human readable name for the underlying wifi hardware.
378  */
379 static const char *hif_get_hw_name(struct hif_target_info *info)
380 {
381 	int i;
382 
383 	if (info->hw_name)
384 		return info->hw_name;
385 
386 	for (i = 0; i < ARRAY_SIZE(qwlan_hw_list); i++) {
387 		if (info->target_version == qwlan_hw_list[i].id &&
388 		    info->target_revision == qwlan_hw_list[i].subid) {
389 			return qwlan_hw_list[i].name;
390 		}
391 	}
392 
393 	info->hw_name = qdf_mem_malloc(64);
394 	if (!info->hw_name)
395 		return "Unknown Device (nomem)";
396 
397 	i = qdf_snprint(info->hw_name, 64, "HW_VERSION=%x.",
398 			info->target_version);
399 	if (i < 0)
400 		return "Unknown Device (snprintf failure)";
401 	else
402 		return info->hw_name;
403 }
404 
405 /**
406  * hif_get_hw_info(): hif_get_hw_info
407  * @scn: scn
408  * @version: version
409  * @revision: revision
410  *
411  * Return: n/a
412  */
413 void hif_get_hw_info(struct hif_opaque_softc *scn, u32 *version, u32 *revision,
414 			const char **target_name)
415 {
416 	struct hif_target_info *info = hif_get_target_info_handle(scn);
417 	struct hif_softc *sc = HIF_GET_SOFTC(scn);
418 
419 	if (sc->bus_type == QDF_BUS_TYPE_USB)
420 		hif_usb_get_hw_info(sc);
421 
422 	*version = info->target_version;
423 	*revision = info->target_revision;
424 	*target_name = hif_get_hw_name(info);
425 }
426 
427 /**
428  * hif_get_dev_ba(): API to get device base address.
429  * @scn: scn
430  * @version: version
431  * @revision: revision
432  *
433  * Return: n/a
434  */
435 void *hif_get_dev_ba(struct hif_opaque_softc *hif_handle)
436 {
437 	struct hif_softc *scn = (struct hif_softc *)hif_handle;
438 
439 	return scn->mem;
440 }
441 qdf_export_symbol(hif_get_dev_ba);
442 
443 /**
444  * hif_get_dev_ba_ce(): API to get device ce base address.
445  * @scn: scn
446  *
447  * Return: dev mem base address for CE
448  */
449 void *hif_get_dev_ba_ce(struct hif_opaque_softc *hif_handle)
450 {
451 	struct hif_softc *scn = (struct hif_softc *)hif_handle;
452 
453 	return scn->mem_ce;
454 }
455 
456 qdf_export_symbol(hif_get_dev_ba_ce);
457 
458 #ifdef FEATURE_RUNTIME_PM
459 void hif_runtime_prevent_linkdown(struct hif_softc *scn, bool is_get)
460 {
461 	if (is_get)
462 		qdf_runtime_pm_prevent_suspend(&scn->prevent_linkdown_lock);
463 	else
464 		qdf_runtime_pm_allow_suspend(&scn->prevent_linkdown_lock);
465 }
466 
467 static inline
468 void hif_rtpm_lock_init(struct hif_softc *scn)
469 {
470 	qdf_runtime_lock_init(&scn->prevent_linkdown_lock);
471 }
472 
473 static inline
474 void hif_rtpm_lock_deinit(struct hif_softc *scn)
475 {
476 	qdf_runtime_lock_deinit(&scn->prevent_linkdown_lock);
477 }
478 #else
479 static inline
480 void hif_rtpm_lock_init(struct hif_softc *scn)
481 {
482 }
483 
484 static inline
485 void hif_rtpm_lock_deinit(struct hif_softc *scn)
486 {
487 }
488 #endif
489 
490 #ifdef WLAN_CE_INTERRUPT_THRESHOLD_CONFIG
491 /**
492  * hif_get_cfg_from_psoc() - Retrieve ini cfg from psoc
493  * @scn: hif context
494  * @psoc: psoc objmgr handle
495  *
496  * Return: None
497  */
498 static inline
499 void hif_get_cfg_from_psoc(struct hif_softc *scn,
500 			   struct wlan_objmgr_psoc *psoc)
501 {
502 	if (psoc) {
503 		scn->ini_cfg.ce_status_ring_timer_threshold =
504 			cfg_get(psoc,
505 				CFG_CE_STATUS_RING_TIMER_THRESHOLD);
506 		scn->ini_cfg.ce_status_ring_batch_count_threshold =
507 			cfg_get(psoc,
508 				CFG_CE_STATUS_RING_BATCH_COUNT_THRESHOLD);
509 	}
510 }
511 #else
512 static inline
513 void hif_get_cfg_from_psoc(struct hif_softc *scn,
514 			   struct wlan_objmgr_psoc *psoc)
515 {
516 }
517 #endif /* WLAN_CE_INTERRUPT_THRESHOLD_CONFIG */
518 
519 #if defined(HIF_CE_LOG_INFO) || defined(HIF_BUS_LOG_INFO)
520 /**
521  * hif_recovery_notifier_cb - Recovery notifier callback to log
522  *  hang event data
523  * @block: notifier block
524  * @state: state
525  * @data: notifier data
526  *
527  * Return: status
528  */
529 static
530 int hif_recovery_notifier_cb(struct notifier_block *block, unsigned long state,
531 			     void *data)
532 {
533 	struct qdf_notifer_data *notif_data = data;
534 	qdf_notif_block *notif_block;
535 	struct hif_softc *hif_handle;
536 	bool bus_id_invalid;
537 
538 	if (!data || !block)
539 		return -EINVAL;
540 
541 	notif_block = qdf_container_of(block, qdf_notif_block, notif_block);
542 
543 	hif_handle = notif_block->priv_data;
544 	if (!hif_handle)
545 		return -EINVAL;
546 
547 	bus_id_invalid = hif_log_bus_info(hif_handle, notif_data->hang_data,
548 					  &notif_data->offset);
549 	if (bus_id_invalid)
550 		return NOTIFY_STOP_MASK;
551 
552 	hif_log_ce_info(hif_handle, notif_data->hang_data,
553 			&notif_data->offset);
554 
555 	return 0;
556 }
557 
558 /**
559  * hif_register_recovery_notifier - Register hif recovery notifier
560  * @hif_handle: hif handle
561  *
562  * Return: status
563  */
564 static
565 QDF_STATUS hif_register_recovery_notifier(struct hif_softc *hif_handle)
566 {
567 	qdf_notif_block *hif_notifier;
568 
569 	if (!hif_handle)
570 		return QDF_STATUS_E_FAILURE;
571 
572 	hif_notifier = &hif_handle->hif_recovery_notifier;
573 
574 	hif_notifier->notif_block.notifier_call = hif_recovery_notifier_cb;
575 	hif_notifier->priv_data = hif_handle;
576 	return qdf_hang_event_register_notifier(hif_notifier);
577 }
578 
579 /**
580  * hif_unregister_recovery_notifier - Un-register hif recovery notifier
581  * @hif_handle: hif handle
582  *
583  * Return: status
584  */
585 static
586 QDF_STATUS hif_unregister_recovery_notifier(struct hif_softc *hif_handle)
587 {
588 	qdf_notif_block *hif_notifier = &hif_handle->hif_recovery_notifier;
589 
590 	return qdf_hang_event_unregister_notifier(hif_notifier);
591 }
592 #else
593 static inline
594 QDF_STATUS hif_register_recovery_notifier(struct hif_softc *hif_handle)
595 {
596 	return QDF_STATUS_SUCCESS;
597 }
598 
599 static inline
600 QDF_STATUS hif_unregister_recovery_notifier(struct hif_softc *hif_handle)
601 {
602 	return QDF_STATUS_SUCCESS;
603 }
604 #endif
605 
606 #ifdef HIF_CPU_PERF_AFFINE_MASK
607 /**
608  * __hif_cpu_hotplug_notify() - CPU hotplug event handler
609  * @cpu: CPU Id of the CPU generating the event
610  * @cpu_up: true if the CPU is online
611  *
612  * Return: None
613  */
614 static void __hif_cpu_hotplug_notify(void *context,
615 				     uint32_t cpu, bool cpu_up)
616 {
617 	struct hif_softc *scn = context;
618 
619 	if (!scn)
620 		return;
621 	if (hif_is_driver_unloading(scn) || hif_is_recovery_in_progress(scn))
622 		return;
623 
624 	if (cpu_up) {
625 		hif_config_irq_set_perf_affinity_hint(GET_HIF_OPAQUE_HDL(scn));
626 		hif_debug("Setting affinity for online CPU: %d", cpu);
627 	} else {
628 		hif_debug("Skip setting affinity for offline CPU: %d", cpu);
629 	}
630 }
631 
632 /**
633  * hif_cpu_hotplug_notify - cpu core up/down notification
634  * handler
635  * @cpu: CPU generating the event
636  * @cpu_up: true if the CPU is online
637  *
638  * Return: None
639  */
640 static void hif_cpu_hotplug_notify(void *context, uint32_t cpu, bool cpu_up)
641 {
642 	struct qdf_op_sync *op_sync;
643 
644 	if (qdf_op_protect(&op_sync))
645 		return;
646 
647 	__hif_cpu_hotplug_notify(context, cpu, cpu_up);
648 
649 	qdf_op_unprotect(op_sync);
650 }
651 
652 static void hif_cpu_online_cb(void *context, uint32_t cpu)
653 {
654 	hif_cpu_hotplug_notify(context, cpu, true);
655 }
656 
657 static void hif_cpu_before_offline_cb(void *context, uint32_t cpu)
658 {
659 	hif_cpu_hotplug_notify(context, cpu, false);
660 }
661 
662 static void hif_cpuhp_register(struct hif_softc *scn)
663 {
664 	if (!scn) {
665 		hif_info_high("cannot register hotplug notifiers");
666 		return;
667 	}
668 	qdf_cpuhp_register(&scn->cpuhp_event_handle,
669 			   scn,
670 			   hif_cpu_online_cb,
671 			   hif_cpu_before_offline_cb);
672 }
673 
674 static void hif_cpuhp_unregister(struct hif_softc *scn)
675 {
676 	if (!scn) {
677 		hif_info_high("cannot unregister hotplug notifiers");
678 		return;
679 	}
680 	qdf_cpuhp_unregister(&scn->cpuhp_event_handle);
681 }
682 
683 #else
684 static void hif_cpuhp_register(struct hif_softc *scn)
685 {
686 }
687 
688 static void hif_cpuhp_unregister(struct hif_softc *scn)
689 {
690 }
691 #endif /* ifdef HIF_CPU_PERF_AFFINE_MASK */
692 
693 #ifdef HIF_DETECTION_LATENCY_ENABLE
694 
695 void hif_tasklet_latency(struct hif_softc *scn, bool from_timer)
696 {
697 	qdf_time_t ce2_tasklet_sched_time =
698 		scn->latency_detect.ce2_tasklet_sched_time;
699 	qdf_time_t ce2_tasklet_exec_time =
700 		scn->latency_detect.ce2_tasklet_exec_time;
701 	qdf_time_t curr_jiffies = qdf_system_ticks();
702 	uint32_t detect_latency_threshold =
703 		scn->latency_detect.detect_latency_threshold;
704 	int cpu_id = qdf_get_cpu();
705 
706 	/* 2 kinds of check here.
707 	 * from_timer==true:  check if tasklet stall
708 	 * from_timer==false: check tasklet execute comes late
709 	 */
710 
711 	if ((from_timer ?
712 	    qdf_system_time_after(ce2_tasklet_sched_time,
713 				  ce2_tasklet_exec_time) :
714 	    qdf_system_time_after(ce2_tasklet_exec_time,
715 				  ce2_tasklet_sched_time)) &&
716 	    qdf_system_time_after(
717 		curr_jiffies,
718 		ce2_tasklet_sched_time +
719 		qdf_system_msecs_to_ticks(detect_latency_threshold))) {
720 		hif_err("tasklet ce2 latency: from_timer %d, curr_jiffies %lu, ce2_tasklet_sched_time %lu,ce2_tasklet_exec_time %lu, detect_latency_threshold %ums detect_latency_timer_timeout %ums, cpu_id %d, called: %ps",
721 			from_timer, curr_jiffies, ce2_tasklet_sched_time,
722 			ce2_tasklet_exec_time, detect_latency_threshold,
723 			scn->latency_detect.detect_latency_timer_timeout,
724 			cpu_id, (void *)_RET_IP_);
725 		goto latency;
726 	}
727 	return;
728 
729 latency:
730 	qdf_trigger_self_recovery(NULL, QDF_TASKLET_CREDIT_LATENCY_DETECT);
731 }
732 
733 void hif_credit_latency(struct hif_softc *scn, bool from_timer)
734 {
735 	qdf_time_t credit_request_time =
736 		scn->latency_detect.credit_request_time;
737 	qdf_time_t credit_report_time =
738 		scn->latency_detect.credit_report_time;
739 	qdf_time_t curr_jiffies = qdf_system_ticks();
740 	uint32_t detect_latency_threshold =
741 		scn->latency_detect.detect_latency_threshold;
742 	int cpu_id = qdf_get_cpu();
743 
744 	/* 2 kinds of check here.
745 	 * from_timer==true:  check if credit report stall
746 	 * from_timer==false: check credit report comes late
747 	 */
748 
749 	if ((from_timer ?
750 	    qdf_system_time_after(credit_request_time,
751 				  credit_report_time) :
752 	    qdf_system_time_after(credit_report_time,
753 				  credit_request_time)) &&
754 	    qdf_system_time_after(
755 		curr_jiffies,
756 		credit_request_time +
757 		qdf_system_msecs_to_ticks(detect_latency_threshold))) {
758 		hif_err("credit report latency: from timer %d, curr_jiffies %lu, credit_request_time %lu,credit_report_time %lu, detect_latency_threshold %ums, detect_latency_timer_timeout %ums, cpu_id %d, called: %ps",
759 			from_timer, curr_jiffies, credit_request_time,
760 			credit_report_time, detect_latency_threshold,
761 			scn->latency_detect.detect_latency_timer_timeout,
762 			cpu_id, (void *)_RET_IP_);
763 		goto latency;
764 	}
765 	return;
766 
767 latency:
768 	qdf_trigger_self_recovery(NULL, QDF_TASKLET_CREDIT_LATENCY_DETECT);
769 }
770 
771 /**
772  * hif_check_detection_latency(): to check if latency for tasklet/credit
773  *
774  * @scn: hif context
775  * @from_timer: if called from timer handler
776  * @bitmap_type: indicate if check tasklet or credit
777  *
778  * Return: none
779  */
780 void hif_check_detection_latency(struct hif_softc *scn,
781 				 bool from_timer,
782 				 uint32_t bitmap_type)
783 {
784 	if (QDF_GLOBAL_MISSION_MODE != hif_get_conparam(scn))
785 		return;
786 
787 	if (!scn->latency_detect.enable_detection)
788 		return;
789 
790 	if (bitmap_type & BIT(HIF_DETECT_TASKLET))
791 		hif_tasklet_latency(scn, from_timer);
792 
793 	if (bitmap_type & BIT(HIF_DETECT_CREDIT))
794 		hif_credit_latency(scn, from_timer);
795 }
796 
797 static void hif_latency_detect_timeout_handler(void *arg)
798 {
799 	struct hif_softc *scn = (struct hif_softc *)arg;
800 	int next_cpu;
801 
802 	hif_check_detection_latency(scn, true,
803 				    BIT(HIF_DETECT_TASKLET) |
804 				    BIT(HIF_DETECT_CREDIT));
805 
806 	/* it need to make sure timer start on a differnt cpu,
807 	 * so it can detect the tasklet schedule stall, but there
808 	 * is still chance that, after timer has been started, then
809 	 * irq/tasklet happens on the same cpu, then tasklet will
810 	 * execute before softirq timer, if this tasklet stall, the
811 	 * timer can't detect it, we can accept this as a limition,
812 	 * if tasklet stall, anyway other place will detect it, just
813 	 * a little later.
814 	 */
815 	next_cpu = cpumask_any_but(
816 			cpu_active_mask,
817 			scn->latency_detect.ce2_tasklet_sched_cpuid);
818 
819 	if (qdf_unlikely(next_cpu >= nr_cpu_ids)) {
820 		hif_debug("start timer on local");
821 		/* it doesn't found a available cpu, start on local cpu*/
822 		qdf_timer_mod(
823 			&scn->latency_detect.detect_latency_timer,
824 			scn->latency_detect.detect_latency_timer_timeout);
825 	} else {
826 		qdf_timer_start_on(
827 			&scn->latency_detect.detect_latency_timer,
828 			scn->latency_detect.detect_latency_timer_timeout,
829 			next_cpu);
830 	}
831 }
832 
833 static void hif_latency_detect_timer_init(struct hif_softc *scn)
834 {
835 	if (!scn) {
836 		hif_info_high("scn is null");
837 		return;
838 	}
839 
840 	if (QDF_GLOBAL_MISSION_MODE != hif_get_conparam(scn))
841 		return;
842 
843 	scn->latency_detect.detect_latency_timer_timeout =
844 		DETECTION_TIMER_TIMEOUT;
845 	scn->latency_detect.detect_latency_threshold =
846 		DETECTION_LATENCY_THRESHOLD;
847 
848 	hif_info("timer timeout %u, latency threshold %u",
849 		 scn->latency_detect.detect_latency_timer_timeout,
850 		 scn->latency_detect.detect_latency_threshold);
851 
852 	scn->latency_detect.is_timer_started = false;
853 
854 	qdf_timer_init(NULL,
855 		       &scn->latency_detect.detect_latency_timer,
856 		       &hif_latency_detect_timeout_handler,
857 		       scn,
858 		       QDF_TIMER_TYPE_SW_SPIN);
859 }
860 
861 static void hif_latency_detect_timer_deinit(struct hif_softc *scn)
862 {
863 	if (QDF_GLOBAL_MISSION_MODE != hif_get_conparam(scn))
864 		return;
865 
866 	hif_info("deinit timer");
867 	qdf_timer_free(&scn->latency_detect.detect_latency_timer);
868 }
869 
870 void hif_latency_detect_timer_start(struct hif_opaque_softc *hif_ctx)
871 {
872 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
873 
874 	if (QDF_GLOBAL_MISSION_MODE != hif_get_conparam(scn))
875 		return;
876 
877 	hif_debug_rl("start timer");
878 	if (scn->latency_detect.is_timer_started) {
879 		hif_info("timer has been started");
880 		return;
881 	}
882 
883 	qdf_timer_start(&scn->latency_detect.detect_latency_timer,
884 			scn->latency_detect.detect_latency_timer_timeout);
885 	scn->latency_detect.is_timer_started = true;
886 }
887 
888 void hif_latency_detect_timer_stop(struct hif_opaque_softc *hif_ctx)
889 {
890 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
891 
892 	if (QDF_GLOBAL_MISSION_MODE != hif_get_conparam(scn))
893 		return;
894 
895 	hif_debug_rl("stop timer");
896 
897 	qdf_timer_sync_cancel(&scn->latency_detect.detect_latency_timer);
898 	scn->latency_detect.is_timer_started = false;
899 }
900 
901 void hif_latency_detect_credit_record_time(
902 	enum hif_credit_exchange_type type,
903 	struct hif_opaque_softc *hif_ctx)
904 {
905 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
906 
907 	if (!scn) {
908 		hif_err("Could not do runtime put, scn is null");
909 		return;
910 	}
911 
912 	if (QDF_GLOBAL_MISSION_MODE != hif_get_conparam(scn))
913 		return;
914 
915 	if (HIF_REQUEST_CREDIT == type)
916 		scn->latency_detect.credit_request_time = qdf_system_ticks();
917 	else if (HIF_PROCESS_CREDIT_REPORT == type)
918 		scn->latency_detect.credit_report_time = qdf_system_ticks();
919 
920 	hif_check_detection_latency(scn, false, BIT(HIF_DETECT_CREDIT));
921 }
922 
923 void hif_set_enable_detection(struct hif_opaque_softc *hif_ctx, bool value)
924 {
925 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
926 
927 	if (!scn) {
928 		hif_err("Could not do runtime put, scn is null");
929 		return;
930 	}
931 
932 	if (QDF_GLOBAL_MISSION_MODE != hif_get_conparam(scn))
933 		return;
934 
935 	scn->latency_detect.enable_detection = value;
936 }
937 #else
938 static void hif_latency_detect_timer_init(struct hif_softc *scn)
939 {}
940 
941 static void hif_latency_detect_timer_deinit(struct hif_softc *scn)
942 {}
943 #endif
944 struct hif_opaque_softc *hif_open(qdf_device_t qdf_ctx,
945 				  uint32_t mode,
946 				  enum qdf_bus_type bus_type,
947 				  struct hif_driver_state_callbacks *cbk,
948 				  struct wlan_objmgr_psoc *psoc)
949 {
950 	struct hif_softc *scn;
951 	QDF_STATUS status = QDF_STATUS_SUCCESS;
952 	int bus_context_size = hif_bus_get_context_size(bus_type);
953 
954 	if (bus_context_size == 0) {
955 		hif_err("context size 0 not allowed");
956 		return NULL;
957 	}
958 
959 	scn = (struct hif_softc *)qdf_mem_malloc(bus_context_size);
960 	if (!scn)
961 		return GET_HIF_OPAQUE_HDL(scn);
962 
963 	scn->qdf_dev = qdf_ctx;
964 	scn->hif_con_param = mode;
965 	qdf_atomic_init(&scn->active_tasklet_cnt);
966 
967 	qdf_atomic_init(&scn->active_grp_tasklet_cnt);
968 	qdf_atomic_init(&scn->link_suspended);
969 	qdf_atomic_init(&scn->tasklet_from_intr);
970 	hif_system_pm_set_state_on(GET_HIF_OPAQUE_HDL(scn));
971 	qdf_mem_copy(&scn->callbacks, cbk,
972 		     sizeof(struct hif_driver_state_callbacks));
973 	scn->bus_type  = bus_type;
974 
975 	hif_allow_ep_vote_access(GET_HIF_OPAQUE_HDL(scn));
976 	hif_get_cfg_from_psoc(scn, psoc);
977 
978 	hif_set_event_hist_mask(GET_HIF_OPAQUE_HDL(scn));
979 	status = hif_bus_open(scn, bus_type);
980 	if (status != QDF_STATUS_SUCCESS) {
981 		hif_err("hif_bus_open error = %d, bus_type = %d",
982 			status, bus_type);
983 		qdf_mem_free(scn);
984 		scn = NULL;
985 		goto out;
986 	}
987 
988 	hif_rtpm_lock_init(scn);
989 
990 	hif_cpuhp_register(scn);
991 	hif_latency_detect_timer_init(scn);
992 
993 out:
994 	return GET_HIF_OPAQUE_HDL(scn);
995 }
996 
997 #ifdef ADRASTEA_RRI_ON_DDR
998 /**
999  * hif_uninit_rri_on_ddr(): free consistent memory allocated for rri
1000  * @scn: hif context
1001  *
1002  * Return: none
1003  */
1004 void hif_uninit_rri_on_ddr(struct hif_softc *scn)
1005 {
1006 	if (scn->vaddr_rri_on_ddr)
1007 		qdf_mem_free_consistent(scn->qdf_dev, scn->qdf_dev->dev,
1008 					(CE_COUNT * sizeof(uint32_t)),
1009 					scn->vaddr_rri_on_ddr,
1010 					scn->paddr_rri_on_ddr, 0);
1011 	scn->vaddr_rri_on_ddr = NULL;
1012 }
1013 #endif
1014 
1015 /**
1016  * hif_close(): hif_close
1017  * @hif_ctx: hif_ctx
1018  *
1019  * Return: n/a
1020  */
1021 void hif_close(struct hif_opaque_softc *hif_ctx)
1022 {
1023 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1024 
1025 	if (!scn) {
1026 		hif_err("hif_opaque_softc is NULL");
1027 		return;
1028 	}
1029 
1030 	hif_latency_detect_timer_deinit(scn);
1031 
1032 	if (scn->athdiag_procfs_inited) {
1033 		athdiag_procfs_remove();
1034 		scn->athdiag_procfs_inited = false;
1035 	}
1036 
1037 	if (scn->target_info.hw_name) {
1038 		char *hw_name = scn->target_info.hw_name;
1039 
1040 		scn->target_info.hw_name = "ErrUnloading";
1041 		qdf_mem_free(hw_name);
1042 	}
1043 
1044 	hif_uninit_rri_on_ddr(scn);
1045 	hif_cleanup_static_buf_to_target(scn);
1046 	hif_cpuhp_unregister(scn);
1047 	hif_rtpm_lock_deinit(scn);
1048 
1049 	hif_bus_close(scn);
1050 
1051 	qdf_mem_free(scn);
1052 }
1053 
1054 /**
1055  * hif_get_num_active_grp_tasklets() - get the number of active
1056  *		datapath group tasklets pending to be completed.
1057  * @scn: HIF context
1058  *
1059  * Returns: the number of datapath group tasklets which are active
1060  */
1061 static inline int hif_get_num_active_grp_tasklets(struct hif_softc *scn)
1062 {
1063 	return qdf_atomic_read(&scn->active_grp_tasklet_cnt);
1064 }
1065 
1066 #if (defined(QCA_WIFI_QCA8074) || defined(QCA_WIFI_QCA6018) || \
1067 	defined(QCA_WIFI_QCA6290) || defined(QCA_WIFI_QCA6390) || \
1068 	defined(QCA_WIFI_QCN9000) || defined(QCA_WIFI_QCA6490) || \
1069 	defined(QCA_WIFI_QCA6750) || defined(QCA_WIFI_QCA5018) || \
1070 	defined(QCA_WIFI_KIWI) || defined(QCA_WIFI_QCN9224) || \
1071 	defined(QCA_WIFI_QCA9574))
1072 /**
1073  * hif_get_num_pending_work() - get the number of entries in
1074  *		the workqueue pending to be completed.
1075  * @scn: HIF context
1076  *
1077  * Returns: the number of tasklets which are active
1078  */
1079 static inline int hif_get_num_pending_work(struct hif_softc *scn)
1080 {
1081 	return hal_get_reg_write_pending_work(scn->hal_soc);
1082 }
1083 #else
1084 
1085 static inline int hif_get_num_pending_work(struct hif_softc *scn)
1086 {
1087 	return 0;
1088 }
1089 #endif
1090 
1091 QDF_STATUS hif_try_complete_tasks(struct hif_softc *scn)
1092 {
1093 	uint32_t task_drain_wait_cnt = 0;
1094 	int tasklet = 0, grp_tasklet = 0, work = 0;
1095 
1096 	while ((tasklet = hif_get_num_active_tasklets(scn)) ||
1097 	       (grp_tasklet = hif_get_num_active_grp_tasklets(scn)) ||
1098 	       (work = hif_get_num_pending_work(scn))) {
1099 		if (++task_drain_wait_cnt > HIF_TASK_DRAIN_WAIT_CNT) {
1100 			hif_err("pending tasklets %d grp tasklets %d work %d",
1101 				tasklet, grp_tasklet, work);
1102 			return QDF_STATUS_E_FAULT;
1103 		}
1104 		hif_info("waiting for tasklets %d grp tasklets %d work %d",
1105 			 tasklet, grp_tasklet, work);
1106 		msleep(10);
1107 	}
1108 
1109 	return QDF_STATUS_SUCCESS;
1110 }
1111 
1112 #if defined(HIF_IPCI) && defined(FEATURE_HAL_DELAYED_REG_WRITE)
1113 QDF_STATUS hif_try_prevent_ep_vote_access(struct hif_opaque_softc *hif_ctx)
1114 {
1115 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1116 	uint32_t work_drain_wait_cnt = 0;
1117 	uint32_t wait_cnt = 0;
1118 	int work = 0;
1119 
1120 	qdf_atomic_set(&scn->dp_ep_vote_access,
1121 		       HIF_EP_VOTE_ACCESS_DISABLE);
1122 	qdf_atomic_set(&scn->ep_vote_access,
1123 		       HIF_EP_VOTE_ACCESS_DISABLE);
1124 
1125 	while ((work = hif_get_num_pending_work(scn))) {
1126 		if (++work_drain_wait_cnt > HIF_WORK_DRAIN_WAIT_CNT) {
1127 			qdf_atomic_set(&scn->dp_ep_vote_access,
1128 				       HIF_EP_VOTE_ACCESS_ENABLE);
1129 			qdf_atomic_set(&scn->ep_vote_access,
1130 				       HIF_EP_VOTE_ACCESS_ENABLE);
1131 			hif_err("timeout wait for pending work %d ", work);
1132 			return QDF_STATUS_E_FAULT;
1133 		}
1134 		qdf_sleep(10);
1135 	}
1136 
1137 	if (pld_is_pci_ep_awake(scn->qdf_dev->dev) == -ENOTSUPP)
1138 	return QDF_STATUS_SUCCESS;
1139 
1140 	while (pld_is_pci_ep_awake(scn->qdf_dev->dev)) {
1141 		if (++wait_cnt > HIF_EP_WAKE_RESET_WAIT_CNT) {
1142 			hif_err("Release EP vote is not proceed by Fw");
1143 			return QDF_STATUS_E_FAULT;
1144 		}
1145 		qdf_sleep(5);
1146 	}
1147 
1148 	return QDF_STATUS_SUCCESS;
1149 }
1150 
1151 void hif_set_ep_intermediate_vote_access(struct hif_opaque_softc *hif_ctx)
1152 {
1153 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1154 	uint8_t vote_access;
1155 
1156 	vote_access = qdf_atomic_read(&scn->ep_vote_access);
1157 
1158 	if (vote_access != HIF_EP_VOTE_ACCESS_DISABLE)
1159 		hif_info("EP vote changed from:%u to intermediate state",
1160 			 vote_access);
1161 
1162 	if (QDF_IS_STATUS_ERROR(hif_try_prevent_ep_vote_access(hif_ctx)))
1163 		QDF_BUG(0);
1164 
1165 	qdf_atomic_set(&scn->ep_vote_access,
1166 		       HIF_EP_VOTE_INTERMEDIATE_ACCESS);
1167 }
1168 
1169 void hif_allow_ep_vote_access(struct hif_opaque_softc *hif_ctx)
1170 {
1171 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1172 
1173 	qdf_atomic_set(&scn->dp_ep_vote_access,
1174 		       HIF_EP_VOTE_ACCESS_ENABLE);
1175 	qdf_atomic_set(&scn->ep_vote_access,
1176 		       HIF_EP_VOTE_ACCESS_ENABLE);
1177 }
1178 
1179 void hif_set_ep_vote_access(struct hif_opaque_softc *hif_ctx,
1180 			    uint8_t type, uint8_t access)
1181 {
1182 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1183 
1184 	if (type == HIF_EP_VOTE_DP_ACCESS)
1185 		qdf_atomic_set(&scn->dp_ep_vote_access, access);
1186 	else
1187 		qdf_atomic_set(&scn->ep_vote_access, access);
1188 }
1189 
1190 uint8_t hif_get_ep_vote_access(struct hif_opaque_softc *hif_ctx,
1191 			       uint8_t type)
1192 {
1193 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1194 
1195 	if (type == HIF_EP_VOTE_DP_ACCESS)
1196 		return qdf_atomic_read(&scn->dp_ep_vote_access);
1197 	else
1198 		return qdf_atomic_read(&scn->ep_vote_access);
1199 }
1200 #endif
1201 
1202 #if (defined(QCA_WIFI_QCA8074) || defined(QCA_WIFI_QCA6018) || \
1203 	defined(QCA_WIFI_QCA6290) || defined(QCA_WIFI_QCA6390) || \
1204 	defined(QCA_WIFI_QCN9000) || defined(QCA_WIFI_QCA6490) || \
1205 	defined(QCA_WIFI_QCA6750) || defined(QCA_WIFI_QCA5018) || \
1206 	defined(QCA_WIFI_KIWI) || defined(QCA_WIFI_QCN9224) || \
1207 	defined(QCA_WIFI_QCA9574))
1208 static QDF_STATUS hif_hal_attach(struct hif_softc *scn)
1209 {
1210 	if (ce_srng_based(scn)) {
1211 		scn->hal_soc = hal_attach(
1212 					hif_softc_to_hif_opaque_softc(scn),
1213 					scn->qdf_dev);
1214 		if (!scn->hal_soc)
1215 			return QDF_STATUS_E_FAILURE;
1216 	}
1217 
1218 	return QDF_STATUS_SUCCESS;
1219 }
1220 
1221 static QDF_STATUS hif_hal_detach(struct hif_softc *scn)
1222 {
1223 	if (ce_srng_based(scn)) {
1224 		hal_detach(scn->hal_soc);
1225 		scn->hal_soc = NULL;
1226 	}
1227 
1228 	return QDF_STATUS_SUCCESS;
1229 }
1230 #else
1231 static QDF_STATUS hif_hal_attach(struct hif_softc *scn)
1232 {
1233 	return QDF_STATUS_SUCCESS;
1234 }
1235 
1236 static QDF_STATUS hif_hal_detach(struct hif_softc *scn)
1237 {
1238 	return QDF_STATUS_SUCCESS;
1239 }
1240 #endif
1241 
1242 int hif_init_dma_mask(struct device *dev, enum qdf_bus_type bus_type)
1243 {
1244 	int ret;
1245 
1246 	switch (bus_type) {
1247 	case QDF_BUS_TYPE_IPCI:
1248 		ret = qdf_set_dma_coherent_mask(dev,
1249 						DMA_COHERENT_MASK_DEFAULT);
1250 		if (ret) {
1251 			hif_err("Failed to set dma mask error = %d", ret);
1252 			return ret;
1253 		}
1254 
1255 		break;
1256 	default:
1257 		/* Follow the existing sequence for other targets */
1258 		break;
1259 	}
1260 
1261 	return 0;
1262 }
1263 
1264 /**
1265  * hif_enable(): hif_enable
1266  * @hif_ctx: hif_ctx
1267  * @dev: dev
1268  * @bdev: bus dev
1269  * @bid: bus ID
1270  * @bus_type: bus type
1271  * @type: enable type
1272  *
1273  * Return: QDF_STATUS
1274  */
1275 QDF_STATUS hif_enable(struct hif_opaque_softc *hif_ctx, struct device *dev,
1276 					  void *bdev,
1277 					  const struct hif_bus_id *bid,
1278 					  enum qdf_bus_type bus_type,
1279 					  enum hif_enable_type type)
1280 {
1281 	QDF_STATUS status;
1282 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1283 
1284 	if (!scn) {
1285 		hif_err("hif_ctx = NULL");
1286 		return QDF_STATUS_E_NULL_VALUE;
1287 	}
1288 
1289 	status = hif_enable_bus(scn, dev, bdev, bid, type);
1290 	if (status != QDF_STATUS_SUCCESS) {
1291 		hif_err("hif_enable_bus error = %d", status);
1292 		return status;
1293 	}
1294 
1295 	status = hif_hal_attach(scn);
1296 	if (status != QDF_STATUS_SUCCESS) {
1297 		hif_err("hal attach failed");
1298 		goto disable_bus;
1299 	}
1300 
1301 	if (hif_bus_configure(scn)) {
1302 		hif_err("Target probe failed");
1303 		status = QDF_STATUS_E_FAILURE;
1304 		goto hal_detach;
1305 	}
1306 
1307 	hif_ut_suspend_init(scn);
1308 	hif_register_recovery_notifier(scn);
1309 	hif_latency_detect_timer_start(hif_ctx);
1310 
1311 	/*
1312 	 * Flag to avoid potential unallocated memory access from MSI
1313 	 * interrupt handler which could get scheduled as soon as MSI
1314 	 * is enabled, i.e to take care of the race due to the order
1315 	 * in where MSI is enabled before the memory, that will be
1316 	 * in interrupt handlers, is allocated.
1317 	 */
1318 
1319 	scn->hif_init_done = true;
1320 
1321 	hif_debug("OK");
1322 
1323 	return QDF_STATUS_SUCCESS;
1324 
1325 hal_detach:
1326 	hif_hal_detach(scn);
1327 disable_bus:
1328 	hif_disable_bus(scn);
1329 	return status;
1330 }
1331 
1332 void hif_disable(struct hif_opaque_softc *hif_ctx, enum hif_disable_type type)
1333 {
1334 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1335 
1336 	if (!scn)
1337 		return;
1338 
1339 	hif_set_enable_detection(hif_ctx, false);
1340 	hif_latency_detect_timer_stop(hif_ctx);
1341 
1342 	hif_unregister_recovery_notifier(scn);
1343 
1344 	hif_nointrs(scn);
1345 	if (scn->hif_init_done == false)
1346 		hif_shutdown_device(hif_ctx);
1347 	else
1348 		hif_stop(hif_ctx);
1349 
1350 	hif_hal_detach(scn);
1351 
1352 	hif_disable_bus(scn);
1353 
1354 	hif_wlan_disable(scn);
1355 
1356 	scn->notice_send = false;
1357 
1358 	hif_debug("X");
1359 }
1360 
1361 #ifdef CE_TASKLET_DEBUG_ENABLE
1362 void hif_enable_ce_latency_stats(struct hif_opaque_softc *hif_ctx, uint8_t val)
1363 {
1364 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1365 
1366 	if (!scn)
1367 		return;
1368 
1369 	scn->ce_latency_stats = val;
1370 }
1371 #endif
1372 
1373 void hif_display_stats(struct hif_opaque_softc *hif_ctx)
1374 {
1375 	hif_display_bus_stats(hif_ctx);
1376 }
1377 
1378 qdf_export_symbol(hif_display_stats);
1379 
1380 void hif_clear_stats(struct hif_opaque_softc *hif_ctx)
1381 {
1382 	hif_clear_bus_stats(hif_ctx);
1383 }
1384 
1385 /**
1386  * hif_crash_shutdown_dump_bus_register() - dump bus registers
1387  * @hif_ctx: hif_ctx
1388  *
1389  * Return: n/a
1390  */
1391 #if defined(TARGET_RAMDUMP_AFTER_KERNEL_PANIC) && defined(WLAN_FEATURE_BMI)
1392 
1393 static void hif_crash_shutdown_dump_bus_register(void *hif_ctx)
1394 {
1395 	struct hif_opaque_softc *scn = hif_ctx;
1396 
1397 	if (hif_check_soc_status(scn))
1398 		return;
1399 
1400 	if (hif_dump_registers(scn))
1401 		hif_err("Failed to dump bus registers!");
1402 }
1403 
1404 /**
1405  * hif_crash_shutdown(): hif_crash_shutdown
1406  *
1407  * This function is called by the platform driver to dump CE registers
1408  *
1409  * @hif_ctx: hif_ctx
1410  *
1411  * Return: n/a
1412  */
1413 void hif_crash_shutdown(struct hif_opaque_softc *hif_ctx)
1414 {
1415 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1416 
1417 	if (!hif_ctx)
1418 		return;
1419 
1420 	if (scn->bus_type == QDF_BUS_TYPE_SNOC) {
1421 		hif_warn("RAM dump disabled for bustype %d", scn->bus_type);
1422 		return;
1423 	}
1424 
1425 	if (TARGET_STATUS_RESET == scn->target_status) {
1426 		hif_warn("Target is already asserted, ignore!");
1427 		return;
1428 	}
1429 
1430 	if (hif_is_load_or_unload_in_progress(scn)) {
1431 		hif_err("Load/unload is in progress, ignore!");
1432 		return;
1433 	}
1434 
1435 	hif_crash_shutdown_dump_bus_register(hif_ctx);
1436 	hif_set_target_status(hif_ctx, TARGET_STATUS_RESET);
1437 
1438 	if (ol_copy_ramdump(hif_ctx))
1439 		goto out;
1440 
1441 	hif_info("RAM dump collecting completed!");
1442 
1443 out:
1444 	return;
1445 }
1446 #else
1447 void hif_crash_shutdown(struct hif_opaque_softc *hif_ctx)
1448 {
1449 	hif_debug("Collecting target RAM dump disabled");
1450 }
1451 #endif /* TARGET_RAMDUMP_AFTER_KERNEL_PANIC */
1452 
1453 #ifdef QCA_WIFI_3_0
1454 /**
1455  * hif_check_fw_reg(): hif_check_fw_reg
1456  * @scn: scn
1457  * @state:
1458  *
1459  * Return: int
1460  */
1461 int hif_check_fw_reg(struct hif_opaque_softc *scn)
1462 {
1463 	return 0;
1464 }
1465 #endif
1466 
1467 /**
1468  * hif_read_phy_mem_base(): hif_read_phy_mem_base
1469  * @scn: scn
1470  * @phy_mem_base: physical mem base
1471  *
1472  * Return: n/a
1473  */
1474 void hif_read_phy_mem_base(struct hif_softc *scn, qdf_dma_addr_t *phy_mem_base)
1475 {
1476 	*phy_mem_base = scn->mem_pa;
1477 }
1478 qdf_export_symbol(hif_read_phy_mem_base);
1479 
1480 /**
1481  * hif_get_device_type(): hif_get_device_type
1482  * @device_id: device_id
1483  * @revision_id: revision_id
1484  * @hif_type: returned hif_type
1485  * @target_type: returned target_type
1486  *
1487  * Return: int
1488  */
1489 int hif_get_device_type(uint32_t device_id,
1490 			uint32_t revision_id,
1491 			uint32_t *hif_type, uint32_t *target_type)
1492 {
1493 	int ret = 0;
1494 
1495 	switch (device_id) {
1496 	case ADRASTEA_DEVICE_ID_P2_E12:
1497 
1498 		*hif_type = HIF_TYPE_ADRASTEA;
1499 		*target_type = TARGET_TYPE_ADRASTEA;
1500 		break;
1501 
1502 	case AR9888_DEVICE_ID:
1503 		*hif_type = HIF_TYPE_AR9888;
1504 		*target_type = TARGET_TYPE_AR9888;
1505 		break;
1506 
1507 	case AR6320_DEVICE_ID:
1508 		switch (revision_id) {
1509 		case AR6320_FW_1_1:
1510 		case AR6320_FW_1_3:
1511 			*hif_type = HIF_TYPE_AR6320;
1512 			*target_type = TARGET_TYPE_AR6320;
1513 			break;
1514 
1515 		case AR6320_FW_2_0:
1516 		case AR6320_FW_3_0:
1517 		case AR6320_FW_3_2:
1518 			*hif_type = HIF_TYPE_AR6320V2;
1519 			*target_type = TARGET_TYPE_AR6320V2;
1520 			break;
1521 
1522 		default:
1523 			hif_err("dev_id = 0x%x, rev_id = 0x%x",
1524 				device_id, revision_id);
1525 			ret = -ENODEV;
1526 			goto end;
1527 		}
1528 		break;
1529 
1530 	case AR9887_DEVICE_ID:
1531 		*hif_type = HIF_TYPE_AR9888;
1532 		*target_type = TARGET_TYPE_AR9888;
1533 		hif_info(" *********** AR9887 **************");
1534 		break;
1535 
1536 	case QCA9984_DEVICE_ID:
1537 		*hif_type = HIF_TYPE_QCA9984;
1538 		*target_type = TARGET_TYPE_QCA9984;
1539 		hif_info(" *********** QCA9984 *************");
1540 		break;
1541 
1542 	case QCA9888_DEVICE_ID:
1543 		*hif_type = HIF_TYPE_QCA9888;
1544 		*target_type = TARGET_TYPE_QCA9888;
1545 		hif_info(" *********** QCA9888 *************");
1546 		break;
1547 
1548 	case AR900B_DEVICE_ID:
1549 		*hif_type = HIF_TYPE_AR900B;
1550 		*target_type = TARGET_TYPE_AR900B;
1551 		hif_info(" *********** AR900B *************");
1552 		break;
1553 
1554 	case QCA8074_DEVICE_ID:
1555 		*hif_type = HIF_TYPE_QCA8074;
1556 		*target_type = TARGET_TYPE_QCA8074;
1557 		hif_info(" *********** QCA8074  *************");
1558 		break;
1559 
1560 	case QCA6290_EMULATION_DEVICE_ID:
1561 	case QCA6290_DEVICE_ID:
1562 		*hif_type = HIF_TYPE_QCA6290;
1563 		*target_type = TARGET_TYPE_QCA6290;
1564 		hif_info(" *********** QCA6290EMU *************");
1565 		break;
1566 
1567 	case QCN9000_DEVICE_ID:
1568 		*hif_type = HIF_TYPE_QCN9000;
1569 		*target_type = TARGET_TYPE_QCN9000;
1570 		hif_info(" *********** QCN9000 *************");
1571 		break;
1572 
1573 	case QCN9224_DEVICE_ID:
1574 		*hif_type = HIF_TYPE_QCN9224;
1575 		*target_type = TARGET_TYPE_QCN9224;
1576 		hif_info(" *********** QCN9224 *************");
1577 		break;
1578 
1579 	case QCN6122_DEVICE_ID:
1580 		*hif_type = HIF_TYPE_QCN6122;
1581 		*target_type = TARGET_TYPE_QCN6122;
1582 		hif_info(" *********** QCN6122 *************");
1583 		break;
1584 
1585 	case QCN7605_DEVICE_ID:
1586 	case QCN7605_COMPOSITE:
1587 	case QCN7605_STANDALONE:
1588 	case QCN7605_STANDALONE_V2:
1589 	case QCN7605_COMPOSITE_V2:
1590 		*hif_type = HIF_TYPE_QCN7605;
1591 		*target_type = TARGET_TYPE_QCN7605;
1592 		hif_info(" *********** QCN7605 *************");
1593 		break;
1594 
1595 	case QCA6390_DEVICE_ID:
1596 	case QCA6390_EMULATION_DEVICE_ID:
1597 		*hif_type = HIF_TYPE_QCA6390;
1598 		*target_type = TARGET_TYPE_QCA6390;
1599 		hif_info(" *********** QCA6390 *************");
1600 		break;
1601 
1602 	case QCA6490_DEVICE_ID:
1603 	case QCA6490_EMULATION_DEVICE_ID:
1604 		*hif_type = HIF_TYPE_QCA6490;
1605 		*target_type = TARGET_TYPE_QCA6490;
1606 		hif_info(" *********** QCA6490 *************");
1607 		break;
1608 
1609 	case QCA6750_DEVICE_ID:
1610 	case QCA6750_EMULATION_DEVICE_ID:
1611 		*hif_type = HIF_TYPE_QCA6750;
1612 		*target_type = TARGET_TYPE_QCA6750;
1613 		hif_info(" *********** QCA6750 *************");
1614 		break;
1615 
1616 	case KIWI_DEVICE_ID:
1617 		*hif_type = HIF_TYPE_KIWI;
1618 		*target_type = TARGET_TYPE_KIWI;
1619 		hif_info(" *********** KIWI *************");
1620 		break;
1621 
1622 	case MANGO_DEVICE_ID:
1623 		*hif_type = HIF_TYPE_MANGO;
1624 		*target_type = TARGET_TYPE_MANGO;
1625 		hif_info(" *********** MANGO *************");
1626 		break;
1627 
1628 	case QCA8074V2_DEVICE_ID:
1629 		*hif_type = HIF_TYPE_QCA8074V2;
1630 		*target_type = TARGET_TYPE_QCA8074V2;
1631 		hif_info(" *********** QCA8074V2 *************");
1632 		break;
1633 
1634 	case QCA6018_DEVICE_ID:
1635 	case RUMIM2M_DEVICE_ID_NODE0:
1636 	case RUMIM2M_DEVICE_ID_NODE1:
1637 	case RUMIM2M_DEVICE_ID_NODE2:
1638 	case RUMIM2M_DEVICE_ID_NODE3:
1639 	case RUMIM2M_DEVICE_ID_NODE4:
1640 	case RUMIM2M_DEVICE_ID_NODE5:
1641 		*hif_type = HIF_TYPE_QCA6018;
1642 		*target_type = TARGET_TYPE_QCA6018;
1643 		hif_info(" *********** QCA6018 *************");
1644 		break;
1645 
1646 	case QCA5018_DEVICE_ID:
1647 		*hif_type = HIF_TYPE_QCA5018;
1648 		*target_type = TARGET_TYPE_QCA5018;
1649 		hif_info(" *********** qca5018 *************");
1650 		break;
1651 
1652 	case QCA9574_DEVICE_ID:
1653 		*hif_type = HIF_TYPE_QCA9574;
1654 		*target_type = TARGET_TYPE_QCA9574;
1655 		hif_info(" *********** QCA9574 *************");
1656 		break;
1657 
1658 	default:
1659 		hif_err("Unsupported device ID = 0x%x!", device_id);
1660 		ret = -ENODEV;
1661 		break;
1662 	}
1663 
1664 	if (*target_type == TARGET_TYPE_UNKNOWN) {
1665 		hif_err("Unsupported target_type!");
1666 		ret = -ENODEV;
1667 	}
1668 end:
1669 	return ret;
1670 }
1671 
1672 /**
1673  * hif_get_bus_type() - return the bus type
1674  *
1675  * Return: enum qdf_bus_type
1676  */
1677 enum qdf_bus_type hif_get_bus_type(struct hif_opaque_softc *hif_hdl)
1678 {
1679 	struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
1680 
1681 	return scn->bus_type;
1682 }
1683 
1684 /**
1685  * Target info and ini parameters are global to the driver
1686  * Hence these structures are exposed to all the modules in
1687  * the driver and they don't need to maintains multiple copies
1688  * of the same info, instead get the handle from hif and
1689  * modify them in hif
1690  */
1691 
1692 /**
1693  * hif_get_ini_handle() - API to get hif_config_param handle
1694  * @hif_ctx: HIF Context
1695  *
1696  * Return: pointer to hif_config_info
1697  */
1698 struct hif_config_info *hif_get_ini_handle(struct hif_opaque_softc *hif_ctx)
1699 {
1700 	struct hif_softc *sc = HIF_GET_SOFTC(hif_ctx);
1701 
1702 	return &sc->hif_config;
1703 }
1704 
1705 /**
1706  * hif_get_target_info_handle() - API to get hif_target_info handle
1707  * @hif_ctx: HIF context
1708  *
1709  * Return: Pointer to hif_target_info
1710  */
1711 struct hif_target_info *hif_get_target_info_handle(
1712 					struct hif_opaque_softc *hif_ctx)
1713 {
1714 	struct hif_softc *sc = HIF_GET_SOFTC(hif_ctx);
1715 
1716 	return &sc->target_info;
1717 
1718 }
1719 qdf_export_symbol(hif_get_target_info_handle);
1720 
1721 #ifdef RECEIVE_OFFLOAD
1722 void hif_offld_flush_cb_register(struct hif_opaque_softc *scn,
1723 				 void (offld_flush_handler)(void *))
1724 {
1725 	if (hif_napi_enabled(scn, -1))
1726 		hif_napi_rx_offld_flush_cb_register(scn, offld_flush_handler);
1727 	else
1728 		hif_err("NAPI not enabled");
1729 }
1730 qdf_export_symbol(hif_offld_flush_cb_register);
1731 
1732 void hif_offld_flush_cb_deregister(struct hif_opaque_softc *scn)
1733 {
1734 	if (hif_napi_enabled(scn, -1))
1735 		hif_napi_rx_offld_flush_cb_deregister(scn);
1736 	else
1737 		hif_err("NAPI not enabled");
1738 }
1739 qdf_export_symbol(hif_offld_flush_cb_deregister);
1740 
1741 int hif_get_rx_ctx_id(int ctx_id, struct hif_opaque_softc *hif_hdl)
1742 {
1743 	if (hif_napi_enabled(hif_hdl, -1))
1744 		return NAPI_PIPE2ID(ctx_id);
1745 	else
1746 		return ctx_id;
1747 }
1748 #else /* RECEIVE_OFFLOAD */
1749 int hif_get_rx_ctx_id(int ctx_id, struct hif_opaque_softc *hif_hdl)
1750 {
1751 	return 0;
1752 }
1753 qdf_export_symbol(hif_get_rx_ctx_id);
1754 #endif /* RECEIVE_OFFLOAD */
1755 
1756 #if defined(FEATURE_LRO)
1757 
1758 /**
1759  * hif_get_lro_info - Returns LRO instance for instance ID
1760  * @ctx_id: LRO instance ID
1761  * @hif_hdl: HIF Context
1762  *
1763  * Return: Pointer to LRO instance.
1764  */
1765 void *hif_get_lro_info(int ctx_id, struct hif_opaque_softc *hif_hdl)
1766 {
1767 	void *data;
1768 
1769 	if (hif_napi_enabled(hif_hdl, -1))
1770 		data = hif_napi_get_lro_info(hif_hdl, ctx_id);
1771 	else
1772 		data = hif_ce_get_lro_ctx(hif_hdl, ctx_id);
1773 
1774 	return data;
1775 }
1776 #endif
1777 
1778 /**
1779  * hif_get_target_status - API to get target status
1780  * @hif_ctx: HIF Context
1781  *
1782  * Return: enum hif_target_status
1783  */
1784 enum hif_target_status hif_get_target_status(struct hif_opaque_softc *hif_ctx)
1785 {
1786 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1787 
1788 	return scn->target_status;
1789 }
1790 qdf_export_symbol(hif_get_target_status);
1791 
1792 /**
1793  * hif_set_target_status() - API to set target status
1794  * @hif_ctx: HIF Context
1795  * @status: Target Status
1796  *
1797  * Return: void
1798  */
1799 void hif_set_target_status(struct hif_opaque_softc *hif_ctx, enum
1800 			   hif_target_status status)
1801 {
1802 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1803 
1804 	scn->target_status = status;
1805 }
1806 
1807 /**
1808  * hif_init_ini_config() - API to initialize HIF configuration parameters
1809  * @hif_ctx: HIF Context
1810  * @cfg: HIF Configuration
1811  *
1812  * Return: void
1813  */
1814 void hif_init_ini_config(struct hif_opaque_softc *hif_ctx,
1815 			 struct hif_config_info *cfg)
1816 {
1817 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1818 
1819 	qdf_mem_copy(&scn->hif_config, cfg, sizeof(struct hif_config_info));
1820 }
1821 
1822 /**
1823  * hif_get_conparam() - API to get driver mode in HIF
1824  * @scn: HIF Context
1825  *
1826  * Return: driver mode of operation
1827  */
1828 uint32_t hif_get_conparam(struct hif_softc *scn)
1829 {
1830 	if (!scn)
1831 		return 0;
1832 
1833 	return scn->hif_con_param;
1834 }
1835 
1836 /**
1837  * hif_get_callbacks_handle() - API to get callbacks Handle
1838  * @scn: HIF Context
1839  *
1840  * Return: pointer to HIF Callbacks
1841  */
1842 struct hif_driver_state_callbacks *hif_get_callbacks_handle(
1843 							struct hif_softc *scn)
1844 {
1845 	return &scn->callbacks;
1846 }
1847 
1848 /**
1849  * hif_is_driver_unloading() - API to query upper layers if driver is unloading
1850  * @scn: HIF Context
1851  *
1852  * Return: True/False
1853  */
1854 bool hif_is_driver_unloading(struct hif_softc *scn)
1855 {
1856 	struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
1857 
1858 	if (cbk && cbk->is_driver_unloading)
1859 		return cbk->is_driver_unloading(cbk->context);
1860 
1861 	return false;
1862 }
1863 
1864 /**
1865  * hif_is_load_or_unload_in_progress() - API to query upper layers if
1866  * load/unload in progress
1867  * @scn: HIF Context
1868  *
1869  * Return: True/False
1870  */
1871 bool hif_is_load_or_unload_in_progress(struct hif_softc *scn)
1872 {
1873 	struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
1874 
1875 	if (cbk && cbk->is_load_unload_in_progress)
1876 		return cbk->is_load_unload_in_progress(cbk->context);
1877 
1878 	return false;
1879 }
1880 
1881 /**
1882  * hif_is_recovery_in_progress() - API to query upper layers if recovery in
1883  * progress
1884  * @scn: HIF Context
1885  *
1886  * Return: True/False
1887  */
1888 bool hif_is_recovery_in_progress(struct hif_softc *scn)
1889 {
1890 	struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
1891 
1892 	if (cbk && cbk->is_recovery_in_progress)
1893 		return cbk->is_recovery_in_progress(cbk->context);
1894 
1895 	return false;
1896 }
1897 
1898 #if defined(HIF_PCI) || defined(HIF_SNOC) || defined(HIF_AHB) || \
1899     defined(HIF_IPCI)
1900 
1901 /**
1902  * hif_update_pipe_callback() - API to register pipe specific callbacks
1903  * @osc: Opaque softc
1904  * @pipeid: pipe id
1905  * @callbacks: callbacks to register
1906  *
1907  * Return: void
1908  */
1909 
1910 void hif_update_pipe_callback(struct hif_opaque_softc *osc,
1911 					u_int8_t pipeid,
1912 					struct hif_msg_callbacks *callbacks)
1913 {
1914 	struct hif_softc *scn = HIF_GET_SOFTC(osc);
1915 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1916 	struct HIF_CE_pipe_info *pipe_info;
1917 
1918 	QDF_BUG(pipeid < CE_COUNT_MAX);
1919 
1920 	hif_debug("pipeid: %d", pipeid);
1921 
1922 	pipe_info = &hif_state->pipe_info[pipeid];
1923 
1924 	qdf_mem_copy(&pipe_info->pipe_callbacks,
1925 			callbacks, sizeof(pipe_info->pipe_callbacks));
1926 }
1927 qdf_export_symbol(hif_update_pipe_callback);
1928 
1929 /**
1930  * hif_is_target_ready() - API to query if target is in ready state
1931  * progress
1932  * @scn: HIF Context
1933  *
1934  * Return: True/False
1935  */
1936 bool hif_is_target_ready(struct hif_softc *scn)
1937 {
1938 	struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
1939 
1940 	if (cbk && cbk->is_target_ready)
1941 		return cbk->is_target_ready(cbk->context);
1942 	/*
1943 	 * if callback is not registered then there is no way to determine
1944 	 * if target is ready. In-such case return true to indicate that
1945 	 * target is ready.
1946 	 */
1947 	return true;
1948 }
1949 qdf_export_symbol(hif_is_target_ready);
1950 
1951 int hif_get_bandwidth_level(struct hif_opaque_softc *hif_handle)
1952 {
1953 	struct hif_softc *scn = HIF_GET_SOFTC(hif_handle);
1954 	struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
1955 
1956 	if (cbk && cbk->get_bandwidth_level)
1957 		return cbk->get_bandwidth_level(cbk->context);
1958 
1959 	return 0;
1960 }
1961 
1962 qdf_export_symbol(hif_get_bandwidth_level);
1963 
1964 #ifdef DP_MEM_PRE_ALLOC
1965 void *hif_mem_alloc_consistent_unaligned(struct hif_softc *scn,
1966 					 qdf_size_t size,
1967 					 qdf_dma_addr_t *paddr,
1968 					 uint32_t ring_type,
1969 					 uint8_t *is_mem_prealloc)
1970 {
1971 	void *vaddr = NULL;
1972 	struct hif_driver_state_callbacks *cbk =
1973 				hif_get_callbacks_handle(scn);
1974 
1975 	*is_mem_prealloc = false;
1976 	if (cbk && cbk->prealloc_get_consistent_mem_unaligned) {
1977 		vaddr = cbk->prealloc_get_consistent_mem_unaligned(size,
1978 								   paddr,
1979 								   ring_type);
1980 		if (vaddr) {
1981 			*is_mem_prealloc = true;
1982 			goto end;
1983 		}
1984 	}
1985 
1986 	vaddr = qdf_mem_alloc_consistent(scn->qdf_dev,
1987 					 scn->qdf_dev->dev,
1988 					 size,
1989 					 paddr);
1990 end:
1991 	dp_info("%s va_unaligned %pK pa_unaligned %pK size %d ring_type %d",
1992 		*is_mem_prealloc ? "pre-alloc" : "dynamic-alloc", vaddr,
1993 		(void *)*paddr, (int)size, ring_type);
1994 
1995 	return vaddr;
1996 }
1997 
1998 void hif_mem_free_consistent_unaligned(struct hif_softc *scn,
1999 				       qdf_size_t size,
2000 				       void *vaddr,
2001 				       qdf_dma_addr_t paddr,
2002 				       qdf_dma_context_t memctx,
2003 				       uint8_t is_mem_prealloc)
2004 {
2005 	struct hif_driver_state_callbacks *cbk =
2006 				hif_get_callbacks_handle(scn);
2007 
2008 	if (is_mem_prealloc) {
2009 		if (cbk && cbk->prealloc_put_consistent_mem_unaligned) {
2010 			cbk->prealloc_put_consistent_mem_unaligned(vaddr);
2011 		} else {
2012 			dp_warn("dp_prealloc_put_consistent_unligned NULL");
2013 			QDF_BUG(0);
2014 		}
2015 	} else {
2016 		qdf_mem_free_consistent(scn->qdf_dev, scn->qdf_dev->dev,
2017 					size, vaddr, paddr, memctx);
2018 	}
2019 }
2020 #endif
2021 
2022 /**
2023  * hif_batch_send() - API to access hif specific function
2024  * ce_batch_send.
2025  * @osc: HIF Context
2026  * @msdu : list of msdus to be sent
2027  * @transfer_id : transfer id
2028  * @len : donwloaded length
2029  *
2030  * Return: list of msds not sent
2031  */
2032 qdf_nbuf_t hif_batch_send(struct hif_opaque_softc *osc, qdf_nbuf_t msdu,
2033 		uint32_t transfer_id, u_int32_t len, uint32_t sendhead)
2034 {
2035 	void *ce_tx_hdl = hif_get_ce_handle(osc, CE_HTT_TX_CE);
2036 
2037 	if (!ce_tx_hdl)
2038 		return NULL;
2039 
2040 	return ce_batch_send((struct CE_handle *)ce_tx_hdl, msdu, transfer_id,
2041 			len, sendhead);
2042 }
2043 qdf_export_symbol(hif_batch_send);
2044 
2045 /**
2046  * hif_update_tx_ring() - API to access hif specific function
2047  * ce_update_tx_ring.
2048  * @osc: HIF Context
2049  * @num_htt_cmpls : number of htt compl received.
2050  *
2051  * Return: void
2052  */
2053 void hif_update_tx_ring(struct hif_opaque_softc *osc, u_int32_t num_htt_cmpls)
2054 {
2055 	void *ce_tx_hdl = hif_get_ce_handle(osc, CE_HTT_TX_CE);
2056 
2057 	ce_update_tx_ring(ce_tx_hdl, num_htt_cmpls);
2058 }
2059 qdf_export_symbol(hif_update_tx_ring);
2060 
2061 
2062 /**
2063  * hif_send_single() - API to access hif specific function
2064  * ce_send_single.
2065  * @osc: HIF Context
2066  * @msdu : msdu to be sent
2067  * @transfer_id: transfer id
2068  * @len : downloaded length
2069  *
2070  * Return: msdu sent status
2071  */
2072 QDF_STATUS hif_send_single(struct hif_opaque_softc *osc, qdf_nbuf_t msdu,
2073 			   uint32_t transfer_id, u_int32_t len)
2074 {
2075 	void *ce_tx_hdl = hif_get_ce_handle(osc, CE_HTT_TX_CE);
2076 
2077 	if (!ce_tx_hdl)
2078 		return QDF_STATUS_E_NULL_VALUE;
2079 
2080 	return ce_send_single((struct CE_handle *)ce_tx_hdl, msdu, transfer_id,
2081 			len);
2082 }
2083 qdf_export_symbol(hif_send_single);
2084 #endif
2085 
2086 /**
2087  * hif_reg_write() - API to access hif specific function
2088  * hif_write32_mb.
2089  * @hif_ctx : HIF Context
2090  * @offset : offset on which value has to be written
2091  * @value : value to be written
2092  *
2093  * Return: None
2094  */
2095 void hif_reg_write(struct hif_opaque_softc *hif_ctx, uint32_t offset,
2096 		uint32_t value)
2097 {
2098 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2099 
2100 	hif_write32_mb(scn, scn->mem + offset, value);
2101 
2102 }
2103 qdf_export_symbol(hif_reg_write);
2104 
2105 /**
2106  * hif_reg_read() - API to access hif specific function
2107  * hif_read32_mb.
2108  * @hif_ctx : HIF Context
2109  * @offset : offset from which value has to be read
2110  *
2111  * Return: Read value
2112  */
2113 uint32_t hif_reg_read(struct hif_opaque_softc *hif_ctx, uint32_t offset)
2114 {
2115 
2116 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2117 
2118 	return hif_read32_mb(scn, scn->mem + offset);
2119 }
2120 qdf_export_symbol(hif_reg_read);
2121 
2122 /**
2123  * hif_ramdump_handler(): generic ramdump handler
2124  * @scn: struct hif_opaque_softc
2125  *
2126  * Return: None
2127  */
2128 void hif_ramdump_handler(struct hif_opaque_softc *scn)
2129 {
2130 	if (hif_get_bus_type(scn) == QDF_BUS_TYPE_USB)
2131 		hif_usb_ramdump_handler(scn);
2132 }
2133 
2134 hif_pm_wake_irq_type hif_pm_get_wake_irq_type(struct hif_opaque_softc *hif_ctx)
2135 {
2136 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2137 
2138 	return scn->wake_irq_type;
2139 }
2140 
2141 irqreturn_t hif_wake_interrupt_handler(int irq, void *context)
2142 {
2143 	struct hif_softc *scn = context;
2144 
2145 	hif_info("wake interrupt received on irq %d", irq);
2146 
2147 	hif_rtpm_set_monitor_wake_intr(0);
2148 	hif_rtpm_request_resume();
2149 
2150 	if (scn->initial_wakeup_cb)
2151 		scn->initial_wakeup_cb(scn->initial_wakeup_priv);
2152 
2153 	if (hif_is_ut_suspended(scn))
2154 		hif_ut_fw_resume(scn);
2155 
2156 	qdf_pm_system_wakeup();
2157 
2158 	return IRQ_HANDLED;
2159 }
2160 
2161 void hif_set_initial_wakeup_cb(struct hif_opaque_softc *hif_ctx,
2162 			       void (*callback)(void *),
2163 			       void *priv)
2164 {
2165 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2166 
2167 	scn->initial_wakeup_cb = callback;
2168 	scn->initial_wakeup_priv = priv;
2169 }
2170 
2171 void hif_set_ce_service_max_yield_time(struct hif_opaque_softc *hif,
2172 				       uint32_t ce_service_max_yield_time)
2173 {
2174 	struct hif_softc *hif_ctx = HIF_GET_SOFTC(hif);
2175 
2176 	hif_ctx->ce_service_max_yield_time =
2177 		ce_service_max_yield_time * 1000;
2178 }
2179 
2180 unsigned long long
2181 hif_get_ce_service_max_yield_time(struct hif_opaque_softc *hif)
2182 {
2183 	struct hif_softc *hif_ctx = HIF_GET_SOFTC(hif);
2184 
2185 	return hif_ctx->ce_service_max_yield_time;
2186 }
2187 
2188 void hif_set_ce_service_max_rx_ind_flush(struct hif_opaque_softc *hif,
2189 				       uint8_t ce_service_max_rx_ind_flush)
2190 {
2191 	struct hif_softc *hif_ctx = HIF_GET_SOFTC(hif);
2192 
2193 	if (ce_service_max_rx_ind_flush == 0 ||
2194 	    ce_service_max_rx_ind_flush > MSG_FLUSH_NUM)
2195 		hif_ctx->ce_service_max_rx_ind_flush = MSG_FLUSH_NUM;
2196 	else
2197 		hif_ctx->ce_service_max_rx_ind_flush =
2198 						ce_service_max_rx_ind_flush;
2199 }
2200 
2201 #ifdef SYSTEM_PM_CHECK
2202 void __hif_system_pm_set_state(struct hif_opaque_softc *hif,
2203 			       enum hif_system_pm_state state)
2204 {
2205 	struct hif_softc *hif_ctx = HIF_GET_SOFTC(hif);
2206 
2207 	qdf_atomic_set(&hif_ctx->sys_pm_state, state);
2208 }
2209 
2210 int32_t hif_system_pm_get_state(struct hif_opaque_softc *hif)
2211 {
2212 	struct hif_softc *hif_ctx = HIF_GET_SOFTC(hif);
2213 
2214 	return qdf_atomic_read(&hif_ctx->sys_pm_state);
2215 }
2216 
2217 int hif_system_pm_state_check(struct hif_opaque_softc *hif)
2218 {
2219 	struct hif_softc *hif_ctx = HIF_GET_SOFTC(hif);
2220 	int32_t sys_pm_state;
2221 
2222 	if (!hif_ctx) {
2223 		hif_err("hif context is null");
2224 		return -EFAULT;
2225 	}
2226 
2227 	sys_pm_state = qdf_atomic_read(&hif_ctx->sys_pm_state);
2228 	if (sys_pm_state == HIF_SYSTEM_PM_STATE_BUS_SUSPENDING ||
2229 	    sys_pm_state == HIF_SYSTEM_PM_STATE_BUS_SUSPENDED) {
2230 		hif_info("Triggering system wakeup");
2231 		qdf_pm_system_wakeup();
2232 		return -EAGAIN;
2233 	}
2234 
2235 	return 0;
2236 }
2237 #endif
2238