xref: /wlan-dirver/qca-wifi-host-cmn/hif/src/hif_main.c (revision ccf6794c7efeda37a9772e5eb4d4dab2ab5af07a)
1 /*
2  * Copyright (c) 2015-2021 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include "targcfg.h"
20 #include "qdf_lock.h"
21 #include "qdf_status.h"
22 #include "qdf_status.h"
23 #include <qdf_atomic.h>         /* qdf_atomic_read */
24 #include <targaddrs.h>
25 #include "hif_io32.h"
26 #include <hif.h>
27 #include <target_type.h>
28 #include "regtable.h"
29 #define ATH_MODULE_NAME hif
30 #include <a_debug.h>
31 #include "hif_main.h"
32 #include "hif_hw_version.h"
33 #if (defined(HIF_PCI) || defined(HIF_SNOC) || defined(HIF_AHB) || \
34      defined(HIF_IPCI))
35 #include "ce_tasklet.h"
36 #include "ce_api.h"
37 #endif
38 #include "qdf_trace.h"
39 #include "qdf_status.h"
40 #include "hif_debug.h"
41 #include "mp_dev.h"
42 #if defined(QCA_WIFI_QCA8074) || defined(QCA_WIFI_QCA6018) || \
43 	defined(QCA_WIFI_QCA5018) || defined(QCA_WIFI_QCA9574)
44 #include "hal_api.h"
45 #endif
46 #include "hif_napi.h"
47 #include "hif_unit_test_suspend_i.h"
48 #include "qdf_module.h"
49 #ifdef HIF_CE_LOG_INFO
50 #include <qdf_notifier.h>
51 #include <qdf_hang_event_notifier.h>
52 #endif
53 #include <linux/cpumask.h>
54 
55 #if defined(HIF_IPCI) && defined(FEATURE_HAL_DELAYED_REG_WRITE)
56 #include <pld_common.h>
57 #endif
58 
59 void hif_dump(struct hif_opaque_softc *hif_ctx, uint8_t cmd_id, bool start)
60 {
61 	hif_trigger_dump(hif_ctx, cmd_id, start);
62 }
63 
64 /**
65  * hif_get_target_id(): hif_get_target_id
66  *
67  * Return the virtual memory base address to the caller
68  *
69  * @scn: hif_softc
70  *
71  * Return: A_target_id_t
72  */
73 A_target_id_t hif_get_target_id(struct hif_softc *scn)
74 {
75 	return scn->mem;
76 }
77 
78 /**
79  * hif_get_targetdef(): hif_get_targetdef
80  * @scn: scn
81  *
82  * Return: void *
83  */
84 void *hif_get_targetdef(struct hif_opaque_softc *hif_ctx)
85 {
86 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
87 
88 	return scn->targetdef;
89 }
90 
91 #ifdef FORCE_WAKE
92 void hif_srng_init_phase(struct hif_opaque_softc *hif_ctx,
93 			 bool init_phase)
94 {
95 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
96 
97 	if (ce_srng_based(scn))
98 		hal_set_init_phase(scn->hal_soc, init_phase);
99 }
100 #endif /* FORCE_WAKE */
101 
102 #ifdef HIF_IPCI
103 void hif_shutdown_notifier_cb(void *hif_ctx)
104 {
105 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
106 
107 	scn->recovery = true;
108 }
109 #endif
110 
111 /**
112  * hif_vote_link_down(): unvote for link up
113  *
114  * Call hif_vote_link_down to release a previous request made using
115  * hif_vote_link_up. A hif_vote_link_down call should only be made
116  * after a corresponding hif_vote_link_up, otherwise you could be
117  * negating a vote from another source. When no votes are present
118  * hif will not guarantee the linkstate after hif_bus_suspend.
119  *
120  * SYNCHRONIZE WITH hif_vote_link_up by only calling in MC thread
121  * and initialization deinitialization sequencences.
122  *
123  * Return: n/a
124  */
125 void hif_vote_link_down(struct hif_opaque_softc *hif_ctx)
126 {
127 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
128 
129 	QDF_BUG(scn);
130 	if (scn->linkstate_vote == 0)
131 		QDF_DEBUG_PANIC("linkstate_vote(%d) has already been 0",
132 				scn->linkstate_vote);
133 
134 	scn->linkstate_vote--;
135 	hif_info("Down_linkstate_vote %d", scn->linkstate_vote);
136 	if (scn->linkstate_vote == 0)
137 		hif_bus_prevent_linkdown(scn, false);
138 }
139 
140 /**
141  * hif_vote_link_up(): vote to prevent bus from suspending
142  *
143  * Makes hif guarantee that fw can message the host normally
144  * durring suspend.
145  *
146  * SYNCHRONIZE WITH hif_vote_link_up by only calling in MC thread
147  * and initialization deinitialization sequencences.
148  *
149  * Return: n/a
150  */
151 void hif_vote_link_up(struct hif_opaque_softc *hif_ctx)
152 {
153 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
154 
155 	QDF_BUG(scn);
156 	scn->linkstate_vote++;
157 	hif_info("Up_linkstate_vote %d", scn->linkstate_vote);
158 	if (scn->linkstate_vote == 1)
159 		hif_bus_prevent_linkdown(scn, true);
160 }
161 
162 /**
163  * hif_can_suspend_link(): query if hif is permitted to suspend the link
164  *
165  * Hif will ensure that the link won't be suspended if the upperlayers
166  * don't want it to.
167  *
168  * SYNCHRONIZATION: MC thread is stopped before bus suspend thus
169  * we don't need extra locking to ensure votes dont change while
170  * we are in the process of suspending or resuming.
171  *
172  * Return: false if hif will guarantee link up durring suspend.
173  */
174 bool hif_can_suspend_link(struct hif_opaque_softc *hif_ctx)
175 {
176 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
177 
178 	QDF_BUG(scn);
179 	return scn->linkstate_vote == 0;
180 }
181 
182 /**
183  * hif_hia_item_address(): hif_hia_item_address
184  * @target_type: target_type
185  * @item_offset: item_offset
186  *
187  * Return: n/a
188  */
189 uint32_t hif_hia_item_address(uint32_t target_type, uint32_t item_offset)
190 {
191 	switch (target_type) {
192 	case TARGET_TYPE_AR6002:
193 		return AR6002_HOST_INTEREST_ADDRESS + item_offset;
194 	case TARGET_TYPE_AR6003:
195 		return AR6003_HOST_INTEREST_ADDRESS + item_offset;
196 	case TARGET_TYPE_AR6004:
197 		return AR6004_HOST_INTEREST_ADDRESS + item_offset;
198 	case TARGET_TYPE_AR6006:
199 		return AR6006_HOST_INTEREST_ADDRESS + item_offset;
200 	case TARGET_TYPE_AR9888:
201 		return AR9888_HOST_INTEREST_ADDRESS + item_offset;
202 	case TARGET_TYPE_AR6320:
203 	case TARGET_TYPE_AR6320V2:
204 		return AR6320_HOST_INTEREST_ADDRESS + item_offset;
205 	case TARGET_TYPE_ADRASTEA:
206 		/* ADRASTEA doesn't have a host interest address */
207 		ASSERT(0);
208 		return 0;
209 	case TARGET_TYPE_AR900B:
210 		return AR900B_HOST_INTEREST_ADDRESS + item_offset;
211 	case TARGET_TYPE_QCA9984:
212 		return QCA9984_HOST_INTEREST_ADDRESS + item_offset;
213 	case TARGET_TYPE_QCA9888:
214 		return QCA9888_HOST_INTEREST_ADDRESS + item_offset;
215 	case TARGET_TYPE_IPQ4019:
216 		return IPQ4019_HOST_INTEREST_ADDRESS + item_offset;
217 
218 	default:
219 		ASSERT(0);
220 		return 0;
221 	}
222 }
223 
224 /**
225  * hif_max_num_receives_reached() - check max receive is reached
226  * @scn: HIF Context
227  * @count: unsigned int.
228  *
229  * Output check status as bool
230  *
231  * Return: bool
232  */
233 bool hif_max_num_receives_reached(struct hif_softc *scn, unsigned int count)
234 {
235 	if (QDF_IS_EPPING_ENABLED(hif_get_conparam(scn)))
236 		return count > 120;
237 	else
238 		return count > MAX_NUM_OF_RECEIVES;
239 }
240 
241 /**
242  * init_buffer_count() - initial buffer count
243  * @maxSize: qdf_size_t
244  *
245  * routine to modify the initial buffer count to be allocated on an os
246  * platform basis. Platform owner will need to modify this as needed
247  *
248  * Return: qdf_size_t
249  */
250 qdf_size_t init_buffer_count(qdf_size_t maxSize)
251 {
252 	return maxSize;
253 }
254 
255 /**
256  * hif_save_htc_htt_config_endpoint() - save htt_tx_endpoint
257  * @hif_ctx: hif context
258  * @htc_htt_tx_endpoint: htt_tx_endpoint
259  *
260  * Return: void
261  */
262 void hif_save_htc_htt_config_endpoint(struct hif_opaque_softc *hif_ctx,
263 							int htc_htt_tx_endpoint)
264 {
265 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
266 
267 	if (!scn) {
268 		hif_err("scn or scn->hif_sc is NULL!");
269 		return;
270 	}
271 
272 	scn->htc_htt_tx_endpoint = htc_htt_tx_endpoint;
273 }
274 qdf_export_symbol(hif_save_htc_htt_config_endpoint);
275 
276 static const struct qwlan_hw qwlan_hw_list[] = {
277 	{
278 		.id = AR6320_REV1_VERSION,
279 		.subid = 0,
280 		.name = "QCA6174_REV1",
281 	},
282 	{
283 		.id = AR6320_REV1_1_VERSION,
284 		.subid = 0x1,
285 		.name = "QCA6174_REV1_1",
286 	},
287 	{
288 		.id = AR6320_REV1_3_VERSION,
289 		.subid = 0x2,
290 		.name = "QCA6174_REV1_3",
291 	},
292 	{
293 		.id = AR6320_REV2_1_VERSION,
294 		.subid = 0x4,
295 		.name = "QCA6174_REV2_1",
296 	},
297 	{
298 		.id = AR6320_REV2_1_VERSION,
299 		.subid = 0x5,
300 		.name = "QCA6174_REV2_2",
301 	},
302 	{
303 		.id = AR6320_REV3_VERSION,
304 		.subid = 0x6,
305 		.name = "QCA6174_REV2.3",
306 	},
307 	{
308 		.id = AR6320_REV3_VERSION,
309 		.subid = 0x8,
310 		.name = "QCA6174_REV3",
311 	},
312 	{
313 		.id = AR6320_REV3_VERSION,
314 		.subid = 0x9,
315 		.name = "QCA6174_REV3_1",
316 	},
317 	{
318 		.id = AR6320_REV3_2_VERSION,
319 		.subid = 0xA,
320 		.name = "AR6320_REV3_2_VERSION",
321 	},
322 	{
323 		.id = QCA6390_V1,
324 		.subid = 0x0,
325 		.name = "QCA6390_V1",
326 	},
327 	{
328 		.id = QCA6490_V1,
329 		.subid = 0x0,
330 		.name = "QCA6490_V1",
331 	},
332 	{
333 		.id = WCN3990_v1,
334 		.subid = 0x0,
335 		.name = "WCN3990_V1",
336 	},
337 	{
338 		.id = WCN3990_v2,
339 		.subid = 0x0,
340 		.name = "WCN3990_V2",
341 	},
342 	{
343 		.id = WCN3990_v2_1,
344 		.subid = 0x0,
345 		.name = "WCN3990_V2.1",
346 	},
347 	{
348 		.id = WCN3998,
349 		.subid = 0x0,
350 		.name = "WCN3998",
351 	},
352 	{
353 		.id = QCA9379_REV1_VERSION,
354 		.subid = 0xC,
355 		.name = "QCA9379_REV1",
356 	},
357 	{
358 		.id = QCA9379_REV1_VERSION,
359 		.subid = 0xD,
360 		.name = "QCA9379_REV1_1",
361 	},
362 	{
363 		.id = WCN7850_V1,
364 		.subid = 0xE,
365 		.name = "WCN7850_V1",
366 	}
367 };
368 
369 /**
370  * hif_get_hw_name(): get a human readable name for the hardware
371  * @info: Target Info
372  *
373  * Return: human readable name for the underlying wifi hardware.
374  */
375 static const char *hif_get_hw_name(struct hif_target_info *info)
376 {
377 	int i;
378 
379 	if (info->hw_name)
380 		return info->hw_name;
381 
382 	for (i = 0; i < ARRAY_SIZE(qwlan_hw_list); i++) {
383 		if (info->target_version == qwlan_hw_list[i].id &&
384 		    info->target_revision == qwlan_hw_list[i].subid) {
385 			return qwlan_hw_list[i].name;
386 		}
387 	}
388 
389 	info->hw_name = qdf_mem_malloc(64);
390 	if (!info->hw_name)
391 		return "Unknown Device (nomem)";
392 
393 	i = qdf_snprint(info->hw_name, 64, "HW_VERSION=%x.",
394 			info->target_version);
395 	if (i < 0)
396 		return "Unknown Device (snprintf failure)";
397 	else
398 		return info->hw_name;
399 }
400 
401 /**
402  * hif_get_hw_info(): hif_get_hw_info
403  * @scn: scn
404  * @version: version
405  * @revision: revision
406  *
407  * Return: n/a
408  */
409 void hif_get_hw_info(struct hif_opaque_softc *scn, u32 *version, u32 *revision,
410 			const char **target_name)
411 {
412 	struct hif_target_info *info = hif_get_target_info_handle(scn);
413 	struct hif_softc *sc = HIF_GET_SOFTC(scn);
414 
415 	if (sc->bus_type == QDF_BUS_TYPE_USB)
416 		hif_usb_get_hw_info(sc);
417 
418 	*version = info->target_version;
419 	*revision = info->target_revision;
420 	*target_name = hif_get_hw_name(info);
421 }
422 
423 /**
424  * hif_get_dev_ba(): API to get device base address.
425  * @scn: scn
426  * @version: version
427  * @revision: revision
428  *
429  * Return: n/a
430  */
431 void *hif_get_dev_ba(struct hif_opaque_softc *hif_handle)
432 {
433 	struct hif_softc *scn = (struct hif_softc *)hif_handle;
434 
435 	return scn->mem;
436 }
437 qdf_export_symbol(hif_get_dev_ba);
438 
439 /**
440  * hif_get_dev_ba_ce(): API to get device ce base address.
441  * @scn: scn
442  *
443  * Return: dev mem base address for CE
444  */
445 void *hif_get_dev_ba_ce(struct hif_opaque_softc *hif_handle)
446 {
447 	struct hif_softc *scn = (struct hif_softc *)hif_handle;
448 
449 	return scn->mem_ce;
450 }
451 
452 qdf_export_symbol(hif_get_dev_ba_ce);
453 
454 #ifdef WLAN_CE_INTERRUPT_THRESHOLD_CONFIG
455 /**
456  * hif_get_cfg_from_psoc() - Retrieve ini cfg from psoc
457  * @scn: hif context
458  * @psoc: psoc objmgr handle
459  *
460  * Return: None
461  */
462 static inline
463 void hif_get_cfg_from_psoc(struct hif_softc *scn,
464 			   struct wlan_objmgr_psoc *psoc)
465 {
466 	if (psoc) {
467 		scn->ini_cfg.ce_status_ring_timer_threshold =
468 			cfg_get(psoc,
469 				CFG_CE_STATUS_RING_TIMER_THRESHOLD);
470 		scn->ini_cfg.ce_status_ring_batch_count_threshold =
471 			cfg_get(psoc,
472 				CFG_CE_STATUS_RING_BATCH_COUNT_THRESHOLD);
473 	}
474 }
475 #else
476 static inline
477 void hif_get_cfg_from_psoc(struct hif_softc *scn,
478 			   struct wlan_objmgr_psoc *psoc)
479 {
480 }
481 #endif /* WLAN_CE_INTERRUPT_THRESHOLD_CONFIG */
482 
483 #if defined(HIF_CE_LOG_INFO) || defined(HIF_BUS_LOG_INFO)
484 /**
485  * hif_recovery_notifier_cb - Recovery notifier callback to log
486  *  hang event data
487  * @block: notifier block
488  * @state: state
489  * @data: notifier data
490  *
491  * Return: status
492  */
493 static
494 int hif_recovery_notifier_cb(struct notifier_block *block, unsigned long state,
495 			     void *data)
496 {
497 	struct qdf_notifer_data *notif_data = data;
498 	qdf_notif_block *notif_block;
499 	struct hif_softc *hif_handle;
500 	bool bus_id_invalid;
501 
502 	if (!data || !block)
503 		return -EINVAL;
504 
505 	notif_block = qdf_container_of(block, qdf_notif_block, notif_block);
506 
507 	hif_handle = notif_block->priv_data;
508 	if (!hif_handle)
509 		return -EINVAL;
510 
511 	bus_id_invalid = hif_log_bus_info(hif_handle, notif_data->hang_data,
512 					  &notif_data->offset);
513 	if (bus_id_invalid)
514 		return NOTIFY_STOP_MASK;
515 
516 	hif_log_ce_info(hif_handle, notif_data->hang_data,
517 			&notif_data->offset);
518 
519 	return 0;
520 }
521 
522 /**
523  * hif_register_recovery_notifier - Register hif recovery notifier
524  * @hif_handle: hif handle
525  *
526  * Return: status
527  */
528 static
529 QDF_STATUS hif_register_recovery_notifier(struct hif_softc *hif_handle)
530 {
531 	qdf_notif_block *hif_notifier;
532 
533 	if (!hif_handle)
534 		return QDF_STATUS_E_FAILURE;
535 
536 	hif_notifier = &hif_handle->hif_recovery_notifier;
537 
538 	hif_notifier->notif_block.notifier_call = hif_recovery_notifier_cb;
539 	hif_notifier->priv_data = hif_handle;
540 	return qdf_hang_event_register_notifier(hif_notifier);
541 }
542 
543 /**
544  * hif_unregister_recovery_notifier - Un-register hif recovery notifier
545  * @hif_handle: hif handle
546  *
547  * Return: status
548  */
549 static
550 QDF_STATUS hif_unregister_recovery_notifier(struct hif_softc *hif_handle)
551 {
552 	qdf_notif_block *hif_notifier = &hif_handle->hif_recovery_notifier;
553 
554 	return qdf_hang_event_unregister_notifier(hif_notifier);
555 }
556 #else
557 static inline
558 QDF_STATUS hif_register_recovery_notifier(struct hif_softc *hif_handle)
559 {
560 	return QDF_STATUS_SUCCESS;
561 }
562 
563 static inline
564 QDF_STATUS hif_unregister_recovery_notifier(struct hif_softc *hif_handle)
565 {
566 	return QDF_STATUS_SUCCESS;
567 }
568 #endif
569 
570 #ifdef HIF_CPU_PERF_AFFINE_MASK
571 /**
572  * __hif_cpu_hotplug_notify() - CPU hotplug event handler
573  * @cpu: CPU Id of the CPU generating the event
574  * @cpu_up: true if the CPU is online
575  *
576  * Return: None
577  */
578 static void __hif_cpu_hotplug_notify(void *context,
579 				     uint32_t cpu, bool cpu_up)
580 {
581 	struct hif_softc *scn = context;
582 
583 	if (!scn)
584 		return;
585 	if (hif_is_driver_unloading(scn) || hif_is_recovery_in_progress(scn))
586 		return;
587 
588 	if (cpu_up) {
589 		hif_config_irq_set_perf_affinity_hint(GET_HIF_OPAQUE_HDL(scn));
590 		hif_debug("Setting affinity for online CPU: %d", cpu);
591 	} else {
592 		hif_debug("Skip setting affinity for offline CPU: %d", cpu);
593 	}
594 }
595 
596 /**
597  * hif_cpu_hotplug_notify - cpu core up/down notification
598  * handler
599  * @cpu: CPU generating the event
600  * @cpu_up: true if the CPU is online
601  *
602  * Return: None
603  */
604 static void hif_cpu_hotplug_notify(void *context, uint32_t cpu, bool cpu_up)
605 {
606 	struct qdf_op_sync *op_sync;
607 
608 	if (qdf_op_protect(&op_sync))
609 		return;
610 
611 	__hif_cpu_hotplug_notify(context, cpu, cpu_up);
612 
613 	qdf_op_unprotect(op_sync);
614 }
615 
616 static void hif_cpu_online_cb(void *context, uint32_t cpu)
617 {
618 	hif_cpu_hotplug_notify(context, cpu, true);
619 }
620 
621 static void hif_cpu_before_offline_cb(void *context, uint32_t cpu)
622 {
623 	hif_cpu_hotplug_notify(context, cpu, false);
624 }
625 
626 static void hif_cpuhp_register(struct hif_softc *scn)
627 {
628 	if (!scn) {
629 		hif_info_high("cannot register hotplug notifiers");
630 		return;
631 	}
632 	qdf_cpuhp_register(&scn->cpuhp_event_handle,
633 			   scn,
634 			   hif_cpu_online_cb,
635 			   hif_cpu_before_offline_cb);
636 }
637 
638 static void hif_cpuhp_unregister(struct hif_softc *scn)
639 {
640 	if (!scn) {
641 		hif_info_high("cannot unregister hotplug notifiers");
642 		return;
643 	}
644 	qdf_cpuhp_unregister(&scn->cpuhp_event_handle);
645 }
646 
647 #else
648 static void hif_cpuhp_register(struct hif_softc *scn)
649 {
650 }
651 
652 static void hif_cpuhp_unregister(struct hif_softc *scn)
653 {
654 }
655 #endif /* ifdef HIF_CPU_PERF_AFFINE_MASK */
656 
657 #ifdef HIF_DETECTION_LATENCY_ENABLE
658 
659 void hif_tasklet_latency(struct hif_softc *scn, bool from_timer)
660 {
661 	qdf_time_t ce2_tasklet_sched_time =
662 		scn->latency_detect.ce2_tasklet_sched_time;
663 	qdf_time_t ce2_tasklet_exec_time =
664 		scn->latency_detect.ce2_tasklet_exec_time;
665 	qdf_time_t curr_jiffies = qdf_system_ticks();
666 	uint32_t detect_latency_threshold =
667 		scn->latency_detect.detect_latency_threshold;
668 	int cpu_id = qdf_get_cpu();
669 
670 	/* 2 kinds of check here.
671 	 * from_timer==true:  check if tasklet stall
672 	 * from_timer==false: check tasklet execute comes late
673 	 */
674 
675 	if ((from_timer ?
676 	    qdf_system_time_after(ce2_tasklet_sched_time,
677 				  ce2_tasklet_exec_time) :
678 	    qdf_system_time_after(ce2_tasklet_exec_time,
679 				  ce2_tasklet_sched_time)) &&
680 	    qdf_system_time_after(
681 		curr_jiffies,
682 		ce2_tasklet_sched_time +
683 		qdf_system_msecs_to_ticks(detect_latency_threshold))) {
684 		hif_err("tasklet ce2 latency: from_timer %d, curr_jiffies %lu, ce2_tasklet_sched_time %lu,ce2_tasklet_exec_time %lu, detect_latency_threshold %ums detect_latency_timer_timeout %ums, cpu_id %d, called: %ps",
685 			from_timer, curr_jiffies, ce2_tasklet_sched_time,
686 			ce2_tasklet_exec_time, detect_latency_threshold,
687 			scn->latency_detect.detect_latency_timer_timeout,
688 			cpu_id, (void *)_RET_IP_);
689 		goto latency;
690 	}
691 	return;
692 
693 latency:
694 	qdf_trigger_self_recovery(NULL, QDF_TASKLET_CREDIT_LATENCY_DETECT);
695 }
696 
697 void hif_credit_latency(struct hif_softc *scn, bool from_timer)
698 {
699 	qdf_time_t credit_request_time =
700 		scn->latency_detect.credit_request_time;
701 	qdf_time_t credit_report_time =
702 		scn->latency_detect.credit_report_time;
703 	qdf_time_t curr_jiffies = qdf_system_ticks();
704 	uint32_t detect_latency_threshold =
705 		scn->latency_detect.detect_latency_threshold;
706 	int cpu_id = qdf_get_cpu();
707 
708 	/* 2 kinds of check here.
709 	 * from_timer==true:  check if credit report stall
710 	 * from_timer==false: check credit report comes late
711 	 */
712 
713 	if ((from_timer ?
714 	    qdf_system_time_after(credit_request_time,
715 				  credit_report_time) :
716 	    qdf_system_time_after(credit_report_time,
717 				  credit_request_time)) &&
718 	    qdf_system_time_after(
719 		curr_jiffies,
720 		credit_request_time +
721 		qdf_system_msecs_to_ticks(detect_latency_threshold))) {
722 		hif_err("credit report latency: from timer %d, curr_jiffies %lu, credit_request_time %lu,credit_report_time %lu, detect_latency_threshold %ums, detect_latency_timer_timeout %ums, cpu_id %d, called: %ps",
723 			from_timer, curr_jiffies, credit_request_time,
724 			credit_report_time, detect_latency_threshold,
725 			scn->latency_detect.detect_latency_timer_timeout,
726 			cpu_id, (void *)_RET_IP_);
727 		goto latency;
728 	}
729 	return;
730 
731 latency:
732 	qdf_trigger_self_recovery(NULL, QDF_TASKLET_CREDIT_LATENCY_DETECT);
733 }
734 
735 /**
736  * hif_check_detection_latency(): to check if latency for tasklet/credit
737  *
738  * @scn: hif context
739  * @from_timer: if called from timer handler
740  * @bitmap_type: indicate if check tasklet or credit
741  *
742  * Return: none
743  */
744 void hif_check_detection_latency(struct hif_softc *scn,
745 				 bool from_timer,
746 				 uint32_t bitmap_type)
747 {
748 	if (QDF_GLOBAL_MISSION_MODE != hif_get_conparam(scn))
749 		return;
750 
751 	if (!scn->latency_detect.enable_detection)
752 		return;
753 
754 	if (bitmap_type & BIT(HIF_DETECT_TASKLET))
755 		hif_tasklet_latency(scn, from_timer);
756 
757 	if (bitmap_type & BIT(HIF_DETECT_CREDIT))
758 		hif_credit_latency(scn, from_timer);
759 }
760 
761 static void hif_latency_detect_timeout_handler(void *arg)
762 {
763 	struct hif_softc *scn = (struct hif_softc *)arg;
764 	int next_cpu;
765 
766 	hif_check_detection_latency(scn, true,
767 				    BIT(HIF_DETECT_TASKLET) |
768 				    BIT(HIF_DETECT_CREDIT));
769 
770 	/* it need to make sure timer start on a differnt cpu,
771 	 * so it can detect the tasklet schedule stall, but there
772 	 * is still chance that, after timer has been started, then
773 	 * irq/tasklet happens on the same cpu, then tasklet will
774 	 * execute before softirq timer, if this tasklet stall, the
775 	 * timer can't detect it, we can accept this as a limition,
776 	 * if tasklet stall, anyway other place will detect it, just
777 	 * a little later.
778 	 */
779 	next_cpu = cpumask_any_but(
780 			cpu_active_mask,
781 			scn->latency_detect.ce2_tasklet_sched_cpuid);
782 
783 	if (qdf_unlikely(next_cpu >= nr_cpu_ids)) {
784 		hif_debug("start timer on local");
785 		/* it doesn't found a available cpu, start on local cpu*/
786 		qdf_timer_mod(
787 			&scn->latency_detect.detect_latency_timer,
788 			scn->latency_detect.detect_latency_timer_timeout);
789 	} else {
790 		qdf_timer_start_on(
791 			&scn->latency_detect.detect_latency_timer,
792 			scn->latency_detect.detect_latency_timer_timeout,
793 			next_cpu);
794 	}
795 }
796 
797 static void hif_latency_detect_timer_init(struct hif_softc *scn)
798 {
799 	if (!scn) {
800 		hif_info_high("scn is null");
801 		return;
802 	}
803 
804 	if (QDF_GLOBAL_MISSION_MODE != hif_get_conparam(scn))
805 		return;
806 
807 	scn->latency_detect.detect_latency_timer_timeout =
808 		DETECTION_TIMER_TIMEOUT;
809 	scn->latency_detect.detect_latency_threshold =
810 		DETECTION_LATENCY_THRESHOLD;
811 
812 	hif_info("timer timeout %u, latency threshold %u",
813 		 scn->latency_detect.detect_latency_timer_timeout,
814 		 scn->latency_detect.detect_latency_threshold);
815 
816 	scn->latency_detect.is_timer_started = false;
817 
818 	qdf_timer_init(NULL,
819 		       &scn->latency_detect.detect_latency_timer,
820 		       &hif_latency_detect_timeout_handler,
821 		       scn,
822 		       QDF_TIMER_TYPE_SW_SPIN);
823 }
824 
825 static void hif_latency_detect_timer_deinit(struct hif_softc *scn)
826 {
827 	if (QDF_GLOBAL_MISSION_MODE != hif_get_conparam(scn))
828 		return;
829 
830 	hif_info("deinit timer");
831 	qdf_timer_free(&scn->latency_detect.detect_latency_timer);
832 }
833 
834 void hif_latency_detect_timer_start(struct hif_opaque_softc *hif_ctx)
835 {
836 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
837 
838 	if (QDF_GLOBAL_MISSION_MODE != hif_get_conparam(scn))
839 		return;
840 
841 	hif_info_rl("start timer");
842 	if (scn->latency_detect.is_timer_started) {
843 		hif_info("timer has been started");
844 		return;
845 	}
846 
847 	qdf_timer_start(&scn->latency_detect.detect_latency_timer,
848 			scn->latency_detect.detect_latency_timer_timeout);
849 	scn->latency_detect.is_timer_started = true;
850 }
851 
852 void hif_latency_detect_timer_stop(struct hif_opaque_softc *hif_ctx)
853 {
854 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
855 
856 	if (QDF_GLOBAL_MISSION_MODE != hif_get_conparam(scn))
857 		return;
858 
859 	hif_info_rl("stop timer");
860 
861 	qdf_timer_sync_cancel(&scn->latency_detect.detect_latency_timer);
862 	scn->latency_detect.is_timer_started = false;
863 }
864 
865 void hif_latency_detect_credit_record_time(
866 	enum hif_credit_exchange_type type,
867 	struct hif_opaque_softc *hif_ctx)
868 {
869 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
870 
871 	if (!scn) {
872 		hif_err("Could not do runtime put, scn is null");
873 		return;
874 	}
875 
876 	if (QDF_GLOBAL_MISSION_MODE != hif_get_conparam(scn))
877 		return;
878 
879 	if (HIF_REQUEST_CREDIT == type)
880 		scn->latency_detect.credit_request_time = qdf_system_ticks();
881 	else if (HIF_PROCESS_CREDIT_REPORT == type)
882 		scn->latency_detect.credit_report_time = qdf_system_ticks();
883 
884 	hif_check_detection_latency(scn, false, BIT(HIF_DETECT_CREDIT));
885 }
886 
887 void hif_set_enable_detection(struct hif_opaque_softc *hif_ctx, bool value)
888 {
889 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
890 
891 	if (!scn) {
892 		hif_err("Could not do runtime put, scn is null");
893 		return;
894 	}
895 
896 	if (QDF_GLOBAL_MISSION_MODE != hif_get_conparam(scn))
897 		return;
898 
899 	scn->latency_detect.enable_detection = value;
900 }
901 #else
902 static void hif_latency_detect_timer_init(struct hif_softc *scn)
903 {}
904 
905 static void hif_latency_detect_timer_deinit(struct hif_softc *scn)
906 {}
907 #endif
908 struct hif_opaque_softc *hif_open(qdf_device_t qdf_ctx,
909 				  uint32_t mode,
910 				  enum qdf_bus_type bus_type,
911 				  struct hif_driver_state_callbacks *cbk,
912 				  struct wlan_objmgr_psoc *psoc)
913 {
914 	struct hif_softc *scn;
915 	QDF_STATUS status = QDF_STATUS_SUCCESS;
916 	int bus_context_size = hif_bus_get_context_size(bus_type);
917 
918 	if (bus_context_size == 0) {
919 		hif_err("context size 0 not allowed");
920 		return NULL;
921 	}
922 
923 	scn = (struct hif_softc *)qdf_mem_malloc(bus_context_size);
924 	if (!scn)
925 		return GET_HIF_OPAQUE_HDL(scn);
926 
927 	scn->qdf_dev = qdf_ctx;
928 	scn->hif_con_param = mode;
929 	qdf_atomic_init(&scn->active_tasklet_cnt);
930 
931 	qdf_atomic_init(&scn->active_grp_tasklet_cnt);
932 	qdf_atomic_init(&scn->link_suspended);
933 	qdf_atomic_init(&scn->tasklet_from_intr);
934 	hif_system_pm_set_state_on(GET_HIF_OPAQUE_HDL(scn));
935 	qdf_mem_copy(&scn->callbacks, cbk,
936 		     sizeof(struct hif_driver_state_callbacks));
937 	scn->bus_type  = bus_type;
938 
939 	hif_pm_set_link_state(GET_HIF_OPAQUE_HDL(scn), HIF_PM_LINK_STATE_DOWN);
940 	hif_allow_ep_vote_access(GET_HIF_OPAQUE_HDL(scn));
941 	hif_get_cfg_from_psoc(scn, psoc);
942 
943 	hif_set_event_hist_mask(GET_HIF_OPAQUE_HDL(scn));
944 	status = hif_bus_open(scn, bus_type);
945 	if (status != QDF_STATUS_SUCCESS) {
946 		hif_err("hif_bus_open error = %d, bus_type = %d",
947 			status, bus_type);
948 		qdf_mem_free(scn);
949 		scn = NULL;
950 		goto out;
951 	}
952 
953 	hif_cpuhp_register(scn);
954 	hif_latency_detect_timer_init(scn);
955 
956 out:
957 	return GET_HIF_OPAQUE_HDL(scn);
958 }
959 
960 #ifdef ADRASTEA_RRI_ON_DDR
961 /**
962  * hif_uninit_rri_on_ddr(): free consistent memory allocated for rri
963  * @scn: hif context
964  *
965  * Return: none
966  */
967 void hif_uninit_rri_on_ddr(struct hif_softc *scn)
968 {
969 	if (scn->vaddr_rri_on_ddr)
970 		qdf_mem_free_consistent(scn->qdf_dev, scn->qdf_dev->dev,
971 					(CE_COUNT * sizeof(uint32_t)),
972 					scn->vaddr_rri_on_ddr,
973 					scn->paddr_rri_on_ddr, 0);
974 	scn->vaddr_rri_on_ddr = NULL;
975 }
976 #endif
977 
978 /**
979  * hif_close(): hif_close
980  * @hif_ctx: hif_ctx
981  *
982  * Return: n/a
983  */
984 void hif_close(struct hif_opaque_softc *hif_ctx)
985 {
986 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
987 
988 	if (!scn) {
989 		hif_err("hif_opaque_softc is NULL");
990 		return;
991 	}
992 
993 	hif_latency_detect_timer_deinit(scn);
994 
995 	if (scn->athdiag_procfs_inited) {
996 		athdiag_procfs_remove();
997 		scn->athdiag_procfs_inited = false;
998 	}
999 
1000 	if (scn->target_info.hw_name) {
1001 		char *hw_name = scn->target_info.hw_name;
1002 
1003 		scn->target_info.hw_name = "ErrUnloading";
1004 		qdf_mem_free(hw_name);
1005 	}
1006 
1007 	hif_uninit_rri_on_ddr(scn);
1008 	hif_cleanup_static_buf_to_target(scn);
1009 	hif_cpuhp_unregister(scn);
1010 
1011 	hif_bus_close(scn);
1012 
1013 	qdf_mem_free(scn);
1014 }
1015 
1016 /**
1017  * hif_get_num_active_grp_tasklets() - get the number of active
1018  *		datapath group tasklets pending to be completed.
1019  * @scn: HIF context
1020  *
1021  * Returns: the number of datapath group tasklets which are active
1022  */
1023 static inline int hif_get_num_active_grp_tasklets(struct hif_softc *scn)
1024 {
1025 	return qdf_atomic_read(&scn->active_grp_tasklet_cnt);
1026 }
1027 
1028 #if (defined(QCA_WIFI_QCA8074) || defined(QCA_WIFI_QCA6018) || \
1029 	defined(QCA_WIFI_QCA6290) || defined(QCA_WIFI_QCA6390) || \
1030 	defined(QCA_WIFI_QCN9000) || defined(QCA_WIFI_QCA6490) || \
1031 	defined(QCA_WIFI_QCA6750) || defined(QCA_WIFI_QCA5018) || \
1032 	defined(QCA_WIFI_WCN7850) || defined(QCA_WIFI_QCN9224) || \
1033 	defined(QCA_WIFI_QCA9574))
1034 /**
1035  * hif_get_num_pending_work() - get the number of entries in
1036  *		the workqueue pending to be completed.
1037  * @scn: HIF context
1038  *
1039  * Returns: the number of tasklets which are active
1040  */
1041 static inline int hif_get_num_pending_work(struct hif_softc *scn)
1042 {
1043 	return hal_get_reg_write_pending_work(scn->hal_soc);
1044 }
1045 #else
1046 
1047 static inline int hif_get_num_pending_work(struct hif_softc *scn)
1048 {
1049 	return 0;
1050 }
1051 #endif
1052 
1053 QDF_STATUS hif_try_complete_tasks(struct hif_softc *scn)
1054 {
1055 	uint32_t task_drain_wait_cnt = 0;
1056 	int tasklet = 0, grp_tasklet = 0, work = 0;
1057 
1058 	while ((tasklet = hif_get_num_active_tasklets(scn)) ||
1059 	       (grp_tasklet = hif_get_num_active_grp_tasklets(scn)) ||
1060 	       (work = hif_get_num_pending_work(scn))) {
1061 		if (++task_drain_wait_cnt > HIF_TASK_DRAIN_WAIT_CNT) {
1062 			hif_err("pending tasklets %d grp tasklets %d work %d",
1063 				tasklet, grp_tasklet, work);
1064 			return QDF_STATUS_E_FAULT;
1065 		}
1066 		hif_info("waiting for tasklets %d grp tasklets %d work %d",
1067 			 tasklet, grp_tasklet, work);
1068 		msleep(10);
1069 	}
1070 
1071 	return QDF_STATUS_SUCCESS;
1072 }
1073 
1074 #if defined(HIF_IPCI) && defined(FEATURE_HAL_DELAYED_REG_WRITE)
1075 QDF_STATUS hif_try_prevent_ep_vote_access(struct hif_opaque_softc *hif_ctx)
1076 {
1077 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1078 	uint32_t work_drain_wait_cnt = 0;
1079 	uint32_t wait_cnt = 0;
1080 	int work = 0;
1081 
1082 	qdf_atomic_set(&scn->dp_ep_vote_access,
1083 		       HIF_EP_VOTE_ACCESS_DISABLE);
1084 	qdf_atomic_set(&scn->ep_vote_access,
1085 		       HIF_EP_VOTE_ACCESS_DISABLE);
1086 
1087 	while ((work = hif_get_num_pending_work(scn))) {
1088 		if (++work_drain_wait_cnt > HIF_WORK_DRAIN_WAIT_CNT) {
1089 			qdf_atomic_set(&scn->dp_ep_vote_access,
1090 				       HIF_EP_VOTE_ACCESS_ENABLE);
1091 			qdf_atomic_set(&scn->ep_vote_access,
1092 				       HIF_EP_VOTE_ACCESS_ENABLE);
1093 			hif_err("timeout wait for pending work %d ", work);
1094 			return QDF_STATUS_E_FAULT;
1095 		}
1096 		qdf_sleep(10);
1097 	}
1098 
1099 	if (pld_is_pci_ep_awake(scn->qdf_dev->dev) == -ENOTSUPP)
1100 	return QDF_STATUS_SUCCESS;
1101 
1102 	while (pld_is_pci_ep_awake(scn->qdf_dev->dev)) {
1103 		if (++wait_cnt > HIF_EP_WAKE_RESET_WAIT_CNT) {
1104 			hif_err("Release EP vote is not proceed by Fw");
1105 			return QDF_STATUS_E_FAULT;
1106 		}
1107 		qdf_sleep(5);
1108 	}
1109 
1110 	return QDF_STATUS_SUCCESS;
1111 }
1112 
1113 void hif_set_ep_intermediate_vote_access(struct hif_opaque_softc *hif_ctx)
1114 {
1115 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1116 	uint8_t vote_access;
1117 
1118 	vote_access = qdf_atomic_read(&scn->ep_vote_access);
1119 
1120 	if (vote_access != HIF_EP_VOTE_ACCESS_DISABLE)
1121 		hif_info("EP vote changed from:%u to intermediate state",
1122 			 vote_access);
1123 
1124 	if (QDF_IS_STATUS_ERROR(hif_try_prevent_ep_vote_access(hif_ctx)))
1125 		QDF_BUG(0);
1126 
1127 	qdf_atomic_set(&scn->ep_vote_access,
1128 		       HIF_EP_VOTE_INTERMEDIATE_ACCESS);
1129 }
1130 
1131 void hif_allow_ep_vote_access(struct hif_opaque_softc *hif_ctx)
1132 {
1133 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1134 
1135 	qdf_atomic_set(&scn->dp_ep_vote_access,
1136 		       HIF_EP_VOTE_ACCESS_ENABLE);
1137 	qdf_atomic_set(&scn->ep_vote_access,
1138 		       HIF_EP_VOTE_ACCESS_ENABLE);
1139 }
1140 
1141 void hif_set_ep_vote_access(struct hif_opaque_softc *hif_ctx,
1142 			    uint8_t type, uint8_t access)
1143 {
1144 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1145 
1146 	if (type == HIF_EP_VOTE_DP_ACCESS)
1147 		qdf_atomic_set(&scn->dp_ep_vote_access, access);
1148 	else
1149 		qdf_atomic_set(&scn->ep_vote_access, access);
1150 }
1151 
1152 uint8_t hif_get_ep_vote_access(struct hif_opaque_softc *hif_ctx,
1153 			       uint8_t type)
1154 {
1155 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1156 
1157 	if (type == HIF_EP_VOTE_DP_ACCESS)
1158 		return qdf_atomic_read(&scn->dp_ep_vote_access);
1159 	else
1160 		return qdf_atomic_read(&scn->ep_vote_access);
1161 }
1162 #endif
1163 
1164 #if (defined(QCA_WIFI_QCA8074) || defined(QCA_WIFI_QCA6018) || \
1165 	defined(QCA_WIFI_QCA6290) || defined(QCA_WIFI_QCA6390) || \
1166 	defined(QCA_WIFI_QCN9000) || defined(QCA_WIFI_QCA6490) || \
1167 	defined(QCA_WIFI_QCA6750) || defined(QCA_WIFI_QCA5018) || \
1168 	defined(QCA_WIFI_WCN7850) || defined(QCA_WIFI_QCN9224) || \
1169 	defined(QCA_WIFI_QCA9574))
1170 static QDF_STATUS hif_hal_attach(struct hif_softc *scn)
1171 {
1172 	if (ce_srng_based(scn)) {
1173 		scn->hal_soc = hal_attach(
1174 					hif_softc_to_hif_opaque_softc(scn),
1175 					scn->qdf_dev);
1176 		if (!scn->hal_soc)
1177 			return QDF_STATUS_E_FAILURE;
1178 	}
1179 
1180 	return QDF_STATUS_SUCCESS;
1181 }
1182 
1183 static QDF_STATUS hif_hal_detach(struct hif_softc *scn)
1184 {
1185 	if (ce_srng_based(scn)) {
1186 		hal_detach(scn->hal_soc);
1187 		scn->hal_soc = NULL;
1188 	}
1189 
1190 	return QDF_STATUS_SUCCESS;
1191 }
1192 #else
1193 static QDF_STATUS hif_hal_attach(struct hif_softc *scn)
1194 {
1195 	return QDF_STATUS_SUCCESS;
1196 }
1197 
1198 static QDF_STATUS hif_hal_detach(struct hif_softc *scn)
1199 {
1200 	return QDF_STATUS_SUCCESS;
1201 }
1202 #endif
1203 
1204 int hif_init_dma_mask(struct device *dev, enum qdf_bus_type bus_type)
1205 {
1206 	int ret;
1207 
1208 	switch (bus_type) {
1209 	case QDF_BUS_TYPE_IPCI:
1210 		ret = qdf_set_dma_coherent_mask(dev,
1211 						DMA_COHERENT_MASK_DEFAULT);
1212 		if (ret) {
1213 			hif_err("Failed to set dma mask error = %d", ret);
1214 			return ret;
1215 		}
1216 
1217 		break;
1218 	default:
1219 		/* Follow the existing sequence for other targets */
1220 		break;
1221 	}
1222 
1223 	return 0;
1224 }
1225 
1226 /**
1227  * hif_enable(): hif_enable
1228  * @hif_ctx: hif_ctx
1229  * @dev: dev
1230  * @bdev: bus dev
1231  * @bid: bus ID
1232  * @bus_type: bus type
1233  * @type: enable type
1234  *
1235  * Return: QDF_STATUS
1236  */
1237 QDF_STATUS hif_enable(struct hif_opaque_softc *hif_ctx, struct device *dev,
1238 					  void *bdev,
1239 					  const struct hif_bus_id *bid,
1240 					  enum qdf_bus_type bus_type,
1241 					  enum hif_enable_type type)
1242 {
1243 	QDF_STATUS status;
1244 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1245 
1246 	if (!scn) {
1247 		hif_err("hif_ctx = NULL");
1248 		return QDF_STATUS_E_NULL_VALUE;
1249 	}
1250 
1251 	status = hif_enable_bus(scn, dev, bdev, bid, type);
1252 	if (status != QDF_STATUS_SUCCESS) {
1253 		hif_err("hif_enable_bus error = %d", status);
1254 		return status;
1255 	}
1256 
1257 	hif_pm_set_link_state(GET_HIF_OPAQUE_HDL(scn), HIF_PM_LINK_STATE_UP);
1258 	status = hif_hal_attach(scn);
1259 	if (status != QDF_STATUS_SUCCESS) {
1260 		hif_err("hal attach failed");
1261 		goto disable_bus;
1262 	}
1263 
1264 	if (hif_bus_configure(scn)) {
1265 		hif_err("Target probe failed");
1266 		status = QDF_STATUS_E_FAILURE;
1267 		goto hal_detach;
1268 	}
1269 
1270 	hif_ut_suspend_init(scn);
1271 	hif_register_recovery_notifier(scn);
1272 	hif_latency_detect_timer_start(hif_ctx);
1273 
1274 	/*
1275 	 * Flag to avoid potential unallocated memory access from MSI
1276 	 * interrupt handler which could get scheduled as soon as MSI
1277 	 * is enabled, i.e to take care of the race due to the order
1278 	 * in where MSI is enabled before the memory, that will be
1279 	 * in interrupt handlers, is allocated.
1280 	 */
1281 
1282 	scn->hif_init_done = true;
1283 
1284 	hif_debug("OK");
1285 
1286 	return QDF_STATUS_SUCCESS;
1287 
1288 hal_detach:
1289 	hif_hal_detach(scn);
1290 disable_bus:
1291 	hif_disable_bus(scn);
1292 	return status;
1293 }
1294 
1295 void hif_disable(struct hif_opaque_softc *hif_ctx, enum hif_disable_type type)
1296 {
1297 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1298 
1299 	if (!scn)
1300 		return;
1301 
1302 	hif_set_enable_detection(hif_ctx, false);
1303 	hif_latency_detect_timer_stop(hif_ctx);
1304 
1305 	hif_unregister_recovery_notifier(scn);
1306 
1307 	hif_nointrs(scn);
1308 	if (scn->hif_init_done == false)
1309 		hif_shutdown_device(hif_ctx);
1310 	else
1311 		hif_stop(hif_ctx);
1312 
1313 	hif_hal_detach(scn);
1314 
1315 	hif_pm_set_link_state(hif_ctx, HIF_PM_LINK_STATE_DOWN);
1316 	hif_disable_bus(scn);
1317 
1318 	hif_wlan_disable(scn);
1319 
1320 	scn->notice_send = false;
1321 
1322 	hif_debug("X");
1323 }
1324 
1325 #ifdef CE_TASKLET_DEBUG_ENABLE
1326 void hif_enable_ce_latency_stats(struct hif_opaque_softc *hif_ctx, uint8_t val)
1327 {
1328 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1329 
1330 	if (!scn)
1331 		return;
1332 
1333 	scn->ce_latency_stats = val;
1334 }
1335 #endif
1336 
1337 void hif_display_stats(struct hif_opaque_softc *hif_ctx)
1338 {
1339 	hif_display_bus_stats(hif_ctx);
1340 }
1341 
1342 qdf_export_symbol(hif_display_stats);
1343 
1344 void hif_clear_stats(struct hif_opaque_softc *hif_ctx)
1345 {
1346 	hif_clear_bus_stats(hif_ctx);
1347 }
1348 
1349 /**
1350  * hif_crash_shutdown_dump_bus_register() - dump bus registers
1351  * @hif_ctx: hif_ctx
1352  *
1353  * Return: n/a
1354  */
1355 #if defined(TARGET_RAMDUMP_AFTER_KERNEL_PANIC) && defined(WLAN_FEATURE_BMI)
1356 
1357 static void hif_crash_shutdown_dump_bus_register(void *hif_ctx)
1358 {
1359 	struct hif_opaque_softc *scn = hif_ctx;
1360 
1361 	if (hif_check_soc_status(scn))
1362 		return;
1363 
1364 	if (hif_dump_registers(scn))
1365 		hif_err("Failed to dump bus registers!");
1366 }
1367 
1368 /**
1369  * hif_crash_shutdown(): hif_crash_shutdown
1370  *
1371  * This function is called by the platform driver to dump CE registers
1372  *
1373  * @hif_ctx: hif_ctx
1374  *
1375  * Return: n/a
1376  */
1377 void hif_crash_shutdown(struct hif_opaque_softc *hif_ctx)
1378 {
1379 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1380 
1381 	if (!hif_ctx)
1382 		return;
1383 
1384 	if (scn->bus_type == QDF_BUS_TYPE_SNOC) {
1385 		hif_warn("RAM dump disabled for bustype %d", scn->bus_type);
1386 		return;
1387 	}
1388 
1389 	if (TARGET_STATUS_RESET == scn->target_status) {
1390 		hif_warn("Target is already asserted, ignore!");
1391 		return;
1392 	}
1393 
1394 	if (hif_is_load_or_unload_in_progress(scn)) {
1395 		hif_err("Load/unload is in progress, ignore!");
1396 		return;
1397 	}
1398 
1399 	hif_crash_shutdown_dump_bus_register(hif_ctx);
1400 	hif_set_target_status(hif_ctx, TARGET_STATUS_RESET);
1401 
1402 	if (ol_copy_ramdump(hif_ctx))
1403 		goto out;
1404 
1405 	hif_info("RAM dump collecting completed!");
1406 
1407 out:
1408 	return;
1409 }
1410 #else
1411 void hif_crash_shutdown(struct hif_opaque_softc *hif_ctx)
1412 {
1413 	hif_debug("Collecting target RAM dump disabled");
1414 }
1415 #endif /* TARGET_RAMDUMP_AFTER_KERNEL_PANIC */
1416 
1417 #ifdef QCA_WIFI_3_0
1418 /**
1419  * hif_check_fw_reg(): hif_check_fw_reg
1420  * @scn: scn
1421  * @state:
1422  *
1423  * Return: int
1424  */
1425 int hif_check_fw_reg(struct hif_opaque_softc *scn)
1426 {
1427 	return 0;
1428 }
1429 #endif
1430 
1431 /**
1432  * hif_read_phy_mem_base(): hif_read_phy_mem_base
1433  * @scn: scn
1434  * @phy_mem_base: physical mem base
1435  *
1436  * Return: n/a
1437  */
1438 void hif_read_phy_mem_base(struct hif_softc *scn, qdf_dma_addr_t *phy_mem_base)
1439 {
1440 	*phy_mem_base = scn->mem_pa;
1441 }
1442 qdf_export_symbol(hif_read_phy_mem_base);
1443 
1444 /**
1445  * hif_get_device_type(): hif_get_device_type
1446  * @device_id: device_id
1447  * @revision_id: revision_id
1448  * @hif_type: returned hif_type
1449  * @target_type: returned target_type
1450  *
1451  * Return: int
1452  */
1453 int hif_get_device_type(uint32_t device_id,
1454 			uint32_t revision_id,
1455 			uint32_t *hif_type, uint32_t *target_type)
1456 {
1457 	int ret = 0;
1458 
1459 	switch (device_id) {
1460 	case ADRASTEA_DEVICE_ID_P2_E12:
1461 
1462 		*hif_type = HIF_TYPE_ADRASTEA;
1463 		*target_type = TARGET_TYPE_ADRASTEA;
1464 		break;
1465 
1466 	case AR9888_DEVICE_ID:
1467 		*hif_type = HIF_TYPE_AR9888;
1468 		*target_type = TARGET_TYPE_AR9888;
1469 		break;
1470 
1471 	case AR6320_DEVICE_ID:
1472 		switch (revision_id) {
1473 		case AR6320_FW_1_1:
1474 		case AR6320_FW_1_3:
1475 			*hif_type = HIF_TYPE_AR6320;
1476 			*target_type = TARGET_TYPE_AR6320;
1477 			break;
1478 
1479 		case AR6320_FW_2_0:
1480 		case AR6320_FW_3_0:
1481 		case AR6320_FW_3_2:
1482 			*hif_type = HIF_TYPE_AR6320V2;
1483 			*target_type = TARGET_TYPE_AR6320V2;
1484 			break;
1485 
1486 		default:
1487 			hif_err("dev_id = 0x%x, rev_id = 0x%x",
1488 				device_id, revision_id);
1489 			ret = -ENODEV;
1490 			goto end;
1491 		}
1492 		break;
1493 
1494 	case AR9887_DEVICE_ID:
1495 		*hif_type = HIF_TYPE_AR9888;
1496 		*target_type = TARGET_TYPE_AR9888;
1497 		hif_info(" *********** AR9887 **************");
1498 		break;
1499 
1500 	case QCA9984_DEVICE_ID:
1501 		*hif_type = HIF_TYPE_QCA9984;
1502 		*target_type = TARGET_TYPE_QCA9984;
1503 		hif_info(" *********** QCA9984 *************");
1504 		break;
1505 
1506 	case QCA9888_DEVICE_ID:
1507 		*hif_type = HIF_TYPE_QCA9888;
1508 		*target_type = TARGET_TYPE_QCA9888;
1509 		hif_info(" *********** QCA9888 *************");
1510 		break;
1511 
1512 	case AR900B_DEVICE_ID:
1513 		*hif_type = HIF_TYPE_AR900B;
1514 		*target_type = TARGET_TYPE_AR900B;
1515 		hif_info(" *********** AR900B *************");
1516 		break;
1517 
1518 	case IPQ4019_DEVICE_ID:
1519 		*hif_type = HIF_TYPE_IPQ4019;
1520 		*target_type = TARGET_TYPE_IPQ4019;
1521 		hif_info(" *********** IPQ4019  *************");
1522 		break;
1523 
1524 	case QCA8074_DEVICE_ID:
1525 		*hif_type = HIF_TYPE_QCA8074;
1526 		*target_type = TARGET_TYPE_QCA8074;
1527 		hif_info(" *********** QCA8074  *************");
1528 		break;
1529 
1530 	case QCA6290_EMULATION_DEVICE_ID:
1531 	case QCA6290_DEVICE_ID:
1532 		*hif_type = HIF_TYPE_QCA6290;
1533 		*target_type = TARGET_TYPE_QCA6290;
1534 		hif_info(" *********** QCA6290EMU *************");
1535 		break;
1536 
1537 	case QCN9000_DEVICE_ID:
1538 		*hif_type = HIF_TYPE_QCN9000;
1539 		*target_type = TARGET_TYPE_QCN9000;
1540 		hif_info(" *********** QCN9000 *************");
1541 		break;
1542 
1543 	case QCN9224_DEVICE_ID:
1544 		*hif_type = HIF_TYPE_QCN9224;
1545 		*target_type = TARGET_TYPE_QCN9224;
1546 		hif_info(" *********** QCN9224 *************");
1547 		break;
1548 
1549 	case QCN6122_DEVICE_ID:
1550 		*hif_type = HIF_TYPE_QCN6122;
1551 		*target_type = TARGET_TYPE_QCN6122;
1552 		hif_info(" *********** QCN6122 *************");
1553 		break;
1554 
1555 	case QCN7605_DEVICE_ID:
1556 	case QCN7605_COMPOSITE:
1557 	case QCN7605_STANDALONE:
1558 	case QCN7605_STANDALONE_V2:
1559 	case QCN7605_COMPOSITE_V2:
1560 		*hif_type = HIF_TYPE_QCN7605;
1561 		*target_type = TARGET_TYPE_QCN7605;
1562 		hif_info(" *********** QCN7605 *************");
1563 		break;
1564 
1565 	case QCA6390_DEVICE_ID:
1566 	case QCA6390_EMULATION_DEVICE_ID:
1567 		*hif_type = HIF_TYPE_QCA6390;
1568 		*target_type = TARGET_TYPE_QCA6390;
1569 		hif_info(" *********** QCA6390 *************");
1570 		break;
1571 
1572 	case QCA6490_DEVICE_ID:
1573 	case QCA6490_EMULATION_DEVICE_ID:
1574 		*hif_type = HIF_TYPE_QCA6490;
1575 		*target_type = TARGET_TYPE_QCA6490;
1576 		hif_info(" *********** QCA6490 *************");
1577 		break;
1578 
1579 	case QCA6750_DEVICE_ID:
1580 	case QCA6750_EMULATION_DEVICE_ID:
1581 		*hif_type = HIF_TYPE_QCA6750;
1582 		*target_type = TARGET_TYPE_QCA6750;
1583 		hif_info(" *********** QCA6750 *************");
1584 		break;
1585 
1586 	case WCN7850_DEVICE_ID:
1587 		*hif_type = HIF_TYPE_WCN7850;
1588 		*target_type = TARGET_TYPE_WCN7850;
1589 		hif_info(" *********** WCN7850 *************");
1590 		break;
1591 
1592 	case QCA8074V2_DEVICE_ID:
1593 		*hif_type = HIF_TYPE_QCA8074V2;
1594 		*target_type = TARGET_TYPE_QCA8074V2;
1595 		hif_info(" *********** QCA8074V2 *************");
1596 		break;
1597 
1598 	case QCA6018_DEVICE_ID:
1599 	case RUMIM2M_DEVICE_ID_NODE0:
1600 	case RUMIM2M_DEVICE_ID_NODE1:
1601 	case RUMIM2M_DEVICE_ID_NODE2:
1602 	case RUMIM2M_DEVICE_ID_NODE3:
1603 	case RUMIM2M_DEVICE_ID_NODE4:
1604 	case RUMIM2M_DEVICE_ID_NODE5:
1605 		*hif_type = HIF_TYPE_QCA6018;
1606 		*target_type = TARGET_TYPE_QCA6018;
1607 		hif_info(" *********** QCA6018 *************");
1608 		break;
1609 
1610 	case QCA5018_DEVICE_ID:
1611 		*hif_type = HIF_TYPE_QCA5018;
1612 		*target_type = TARGET_TYPE_QCA5018;
1613 		hif_info(" *********** qca5018 *************");
1614 		break;
1615 
1616 	case QCA9574_DEVICE_ID:
1617 		*hif_type = HIF_TYPE_QCA9574;
1618 		*target_type = TARGET_TYPE_QCA9574;
1619 		hif_info(" *********** QCA9574 *************");
1620 		break;
1621 
1622 	default:
1623 		hif_err("Unsupported device ID = 0x%x!", device_id);
1624 		ret = -ENODEV;
1625 		break;
1626 	}
1627 
1628 	if (*target_type == TARGET_TYPE_UNKNOWN) {
1629 		hif_err("Unsupported target_type!");
1630 		ret = -ENODEV;
1631 	}
1632 end:
1633 	return ret;
1634 }
1635 
1636 /**
1637  * hif_get_bus_type() - return the bus type
1638  *
1639  * Return: enum qdf_bus_type
1640  */
1641 enum qdf_bus_type hif_get_bus_type(struct hif_opaque_softc *hif_hdl)
1642 {
1643 	struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
1644 
1645 	return scn->bus_type;
1646 }
1647 
1648 /**
1649  * Target info and ini parameters are global to the driver
1650  * Hence these structures are exposed to all the modules in
1651  * the driver and they don't need to maintains multiple copies
1652  * of the same info, instead get the handle from hif and
1653  * modify them in hif
1654  */
1655 
1656 /**
1657  * hif_get_ini_handle() - API to get hif_config_param handle
1658  * @hif_ctx: HIF Context
1659  *
1660  * Return: pointer to hif_config_info
1661  */
1662 struct hif_config_info *hif_get_ini_handle(struct hif_opaque_softc *hif_ctx)
1663 {
1664 	struct hif_softc *sc = HIF_GET_SOFTC(hif_ctx);
1665 
1666 	return &sc->hif_config;
1667 }
1668 
1669 /**
1670  * hif_get_target_info_handle() - API to get hif_target_info handle
1671  * @hif_ctx: HIF context
1672  *
1673  * Return: Pointer to hif_target_info
1674  */
1675 struct hif_target_info *hif_get_target_info_handle(
1676 					struct hif_opaque_softc *hif_ctx)
1677 {
1678 	struct hif_softc *sc = HIF_GET_SOFTC(hif_ctx);
1679 
1680 	return &sc->target_info;
1681 
1682 }
1683 qdf_export_symbol(hif_get_target_info_handle);
1684 
1685 #ifdef RECEIVE_OFFLOAD
1686 void hif_offld_flush_cb_register(struct hif_opaque_softc *scn,
1687 				 void (offld_flush_handler)(void *))
1688 {
1689 	if (hif_napi_enabled(scn, -1))
1690 		hif_napi_rx_offld_flush_cb_register(scn, offld_flush_handler);
1691 	else
1692 		hif_err("NAPI not enabled");
1693 }
1694 qdf_export_symbol(hif_offld_flush_cb_register);
1695 
1696 void hif_offld_flush_cb_deregister(struct hif_opaque_softc *scn)
1697 {
1698 	if (hif_napi_enabled(scn, -1))
1699 		hif_napi_rx_offld_flush_cb_deregister(scn);
1700 	else
1701 		hif_err("NAPI not enabled");
1702 }
1703 qdf_export_symbol(hif_offld_flush_cb_deregister);
1704 
1705 int hif_get_rx_ctx_id(int ctx_id, struct hif_opaque_softc *hif_hdl)
1706 {
1707 	if (hif_napi_enabled(hif_hdl, -1))
1708 		return NAPI_PIPE2ID(ctx_id);
1709 	else
1710 		return ctx_id;
1711 }
1712 #else /* RECEIVE_OFFLOAD */
1713 int hif_get_rx_ctx_id(int ctx_id, struct hif_opaque_softc *hif_hdl)
1714 {
1715 	return 0;
1716 }
1717 qdf_export_symbol(hif_get_rx_ctx_id);
1718 #endif /* RECEIVE_OFFLOAD */
1719 
1720 #if defined(FEATURE_LRO)
1721 
1722 /**
1723  * hif_get_lro_info - Returns LRO instance for instance ID
1724  * @ctx_id: LRO instance ID
1725  * @hif_hdl: HIF Context
1726  *
1727  * Return: Pointer to LRO instance.
1728  */
1729 void *hif_get_lro_info(int ctx_id, struct hif_opaque_softc *hif_hdl)
1730 {
1731 	void *data;
1732 
1733 	if (hif_napi_enabled(hif_hdl, -1))
1734 		data = hif_napi_get_lro_info(hif_hdl, ctx_id);
1735 	else
1736 		data = hif_ce_get_lro_ctx(hif_hdl, ctx_id);
1737 
1738 	return data;
1739 }
1740 #endif
1741 
1742 /**
1743  * hif_get_target_status - API to get target status
1744  * @hif_ctx: HIF Context
1745  *
1746  * Return: enum hif_target_status
1747  */
1748 enum hif_target_status hif_get_target_status(struct hif_opaque_softc *hif_ctx)
1749 {
1750 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1751 
1752 	return scn->target_status;
1753 }
1754 qdf_export_symbol(hif_get_target_status);
1755 
1756 /**
1757  * hif_set_target_status() - API to set target status
1758  * @hif_ctx: HIF Context
1759  * @status: Target Status
1760  *
1761  * Return: void
1762  */
1763 void hif_set_target_status(struct hif_opaque_softc *hif_ctx, enum
1764 			   hif_target_status status)
1765 {
1766 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1767 
1768 	scn->target_status = status;
1769 }
1770 
1771 /**
1772  * hif_init_ini_config() - API to initialize HIF configuration parameters
1773  * @hif_ctx: HIF Context
1774  * @cfg: HIF Configuration
1775  *
1776  * Return: void
1777  */
1778 void hif_init_ini_config(struct hif_opaque_softc *hif_ctx,
1779 			 struct hif_config_info *cfg)
1780 {
1781 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1782 
1783 	qdf_mem_copy(&scn->hif_config, cfg, sizeof(struct hif_config_info));
1784 }
1785 
1786 /**
1787  * hif_get_conparam() - API to get driver mode in HIF
1788  * @scn: HIF Context
1789  *
1790  * Return: driver mode of operation
1791  */
1792 uint32_t hif_get_conparam(struct hif_softc *scn)
1793 {
1794 	if (!scn)
1795 		return 0;
1796 
1797 	return scn->hif_con_param;
1798 }
1799 
1800 /**
1801  * hif_get_callbacks_handle() - API to get callbacks Handle
1802  * @scn: HIF Context
1803  *
1804  * Return: pointer to HIF Callbacks
1805  */
1806 struct hif_driver_state_callbacks *hif_get_callbacks_handle(
1807 							struct hif_softc *scn)
1808 {
1809 	return &scn->callbacks;
1810 }
1811 
1812 /**
1813  * hif_is_driver_unloading() - API to query upper layers if driver is unloading
1814  * @scn: HIF Context
1815  *
1816  * Return: True/False
1817  */
1818 bool hif_is_driver_unloading(struct hif_softc *scn)
1819 {
1820 	struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
1821 
1822 	if (cbk && cbk->is_driver_unloading)
1823 		return cbk->is_driver_unloading(cbk->context);
1824 
1825 	return false;
1826 }
1827 
1828 /**
1829  * hif_is_load_or_unload_in_progress() - API to query upper layers if
1830  * load/unload in progress
1831  * @scn: HIF Context
1832  *
1833  * Return: True/False
1834  */
1835 bool hif_is_load_or_unload_in_progress(struct hif_softc *scn)
1836 {
1837 	struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
1838 
1839 	if (cbk && cbk->is_load_unload_in_progress)
1840 		return cbk->is_load_unload_in_progress(cbk->context);
1841 
1842 	return false;
1843 }
1844 
1845 /**
1846  * hif_is_recovery_in_progress() - API to query upper layers if recovery in
1847  * progress
1848  * @scn: HIF Context
1849  *
1850  * Return: True/False
1851  */
1852 bool hif_is_recovery_in_progress(struct hif_softc *scn)
1853 {
1854 	struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
1855 
1856 	if (cbk && cbk->is_recovery_in_progress)
1857 		return cbk->is_recovery_in_progress(cbk->context);
1858 
1859 	return false;
1860 }
1861 
1862 #if defined(HIF_PCI) || defined(HIF_SNOC) || defined(HIF_AHB) || \
1863     defined(HIF_IPCI)
1864 
1865 /**
1866  * hif_update_pipe_callback() - API to register pipe specific callbacks
1867  * @osc: Opaque softc
1868  * @pipeid: pipe id
1869  * @callbacks: callbacks to register
1870  *
1871  * Return: void
1872  */
1873 
1874 void hif_update_pipe_callback(struct hif_opaque_softc *osc,
1875 					u_int8_t pipeid,
1876 					struct hif_msg_callbacks *callbacks)
1877 {
1878 	struct hif_softc *scn = HIF_GET_SOFTC(osc);
1879 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1880 	struct HIF_CE_pipe_info *pipe_info;
1881 
1882 	QDF_BUG(pipeid < CE_COUNT_MAX);
1883 
1884 	hif_debug("pipeid: %d", pipeid);
1885 
1886 	pipe_info = &hif_state->pipe_info[pipeid];
1887 
1888 	qdf_mem_copy(&pipe_info->pipe_callbacks,
1889 			callbacks, sizeof(pipe_info->pipe_callbacks));
1890 }
1891 qdf_export_symbol(hif_update_pipe_callback);
1892 
1893 /**
1894  * hif_is_target_ready() - API to query if target is in ready state
1895  * progress
1896  * @scn: HIF Context
1897  *
1898  * Return: True/False
1899  */
1900 bool hif_is_target_ready(struct hif_softc *scn)
1901 {
1902 	struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
1903 
1904 	if (cbk && cbk->is_target_ready)
1905 		return cbk->is_target_ready(cbk->context);
1906 	/*
1907 	 * if callback is not registered then there is no way to determine
1908 	 * if target is ready. In-such case return true to indicate that
1909 	 * target is ready.
1910 	 */
1911 	return true;
1912 }
1913 qdf_export_symbol(hif_is_target_ready);
1914 
1915 int hif_get_bandwidth_level(struct hif_opaque_softc *hif_handle)
1916 {
1917 	struct hif_softc *scn = HIF_GET_SOFTC(hif_handle);
1918 	struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
1919 
1920 	if (cbk && cbk->get_bandwidth_level)
1921 		return cbk->get_bandwidth_level(cbk->context);
1922 
1923 	return 0;
1924 }
1925 
1926 qdf_export_symbol(hif_get_bandwidth_level);
1927 
1928 #ifdef DP_MEM_PRE_ALLOC
1929 void *hif_mem_alloc_consistent_unaligned(struct hif_softc *scn,
1930 					 qdf_size_t size,
1931 					 qdf_dma_addr_t *paddr,
1932 					 uint32_t ring_type,
1933 					 uint8_t *is_mem_prealloc)
1934 {
1935 	void *vaddr = NULL;
1936 	struct hif_driver_state_callbacks *cbk =
1937 				hif_get_callbacks_handle(scn);
1938 
1939 	*is_mem_prealloc = false;
1940 	if (cbk && cbk->prealloc_get_consistent_mem_unaligned) {
1941 		vaddr = cbk->prealloc_get_consistent_mem_unaligned(size,
1942 								   paddr,
1943 								   ring_type);
1944 		if (vaddr) {
1945 			*is_mem_prealloc = true;
1946 			goto end;
1947 		}
1948 	}
1949 
1950 	vaddr = qdf_mem_alloc_consistent(scn->qdf_dev,
1951 					 scn->qdf_dev->dev,
1952 					 size,
1953 					 paddr);
1954 end:
1955 	dp_info("%s va_unaligned %pK pa_unaligned %pK size %d ring_type %d",
1956 		*is_mem_prealloc ? "pre-alloc" : "dynamic-alloc", vaddr,
1957 		(void *)*paddr, (int)size, ring_type);
1958 
1959 	return vaddr;
1960 }
1961 
1962 void hif_mem_free_consistent_unaligned(struct hif_softc *scn,
1963 				       qdf_size_t size,
1964 				       void *vaddr,
1965 				       qdf_dma_addr_t paddr,
1966 				       qdf_dma_context_t memctx,
1967 				       uint8_t is_mem_prealloc)
1968 {
1969 	struct hif_driver_state_callbacks *cbk =
1970 				hif_get_callbacks_handle(scn);
1971 
1972 	if (is_mem_prealloc) {
1973 		if (cbk && cbk->prealloc_put_consistent_mem_unaligned) {
1974 			cbk->prealloc_put_consistent_mem_unaligned(vaddr);
1975 		} else {
1976 			dp_warn("dp_prealloc_put_consistent_unligned NULL");
1977 			QDF_BUG(0);
1978 		}
1979 	} else {
1980 		qdf_mem_free_consistent(scn->qdf_dev, scn->qdf_dev->dev,
1981 					size, vaddr, paddr, memctx);
1982 	}
1983 }
1984 #endif
1985 
1986 /**
1987  * hif_batch_send() - API to access hif specific function
1988  * ce_batch_send.
1989  * @osc: HIF Context
1990  * @msdu : list of msdus to be sent
1991  * @transfer_id : transfer id
1992  * @len : donwloaded length
1993  *
1994  * Return: list of msds not sent
1995  */
1996 qdf_nbuf_t hif_batch_send(struct hif_opaque_softc *osc, qdf_nbuf_t msdu,
1997 		uint32_t transfer_id, u_int32_t len, uint32_t sendhead)
1998 {
1999 	void *ce_tx_hdl = hif_get_ce_handle(osc, CE_HTT_TX_CE);
2000 
2001 	if (!ce_tx_hdl)
2002 		return NULL;
2003 
2004 	return ce_batch_send((struct CE_handle *)ce_tx_hdl, msdu, transfer_id,
2005 			len, sendhead);
2006 }
2007 qdf_export_symbol(hif_batch_send);
2008 
2009 /**
2010  * hif_update_tx_ring() - API to access hif specific function
2011  * ce_update_tx_ring.
2012  * @osc: HIF Context
2013  * @num_htt_cmpls : number of htt compl received.
2014  *
2015  * Return: void
2016  */
2017 void hif_update_tx_ring(struct hif_opaque_softc *osc, u_int32_t num_htt_cmpls)
2018 {
2019 	void *ce_tx_hdl = hif_get_ce_handle(osc, CE_HTT_TX_CE);
2020 
2021 	ce_update_tx_ring(ce_tx_hdl, num_htt_cmpls);
2022 }
2023 qdf_export_symbol(hif_update_tx_ring);
2024 
2025 
2026 /**
2027  * hif_send_single() - API to access hif specific function
2028  * ce_send_single.
2029  * @osc: HIF Context
2030  * @msdu : msdu to be sent
2031  * @transfer_id: transfer id
2032  * @len : downloaded length
2033  *
2034  * Return: msdu sent status
2035  */
2036 QDF_STATUS hif_send_single(struct hif_opaque_softc *osc, qdf_nbuf_t msdu,
2037 			   uint32_t transfer_id, u_int32_t len)
2038 {
2039 	void *ce_tx_hdl = hif_get_ce_handle(osc, CE_HTT_TX_CE);
2040 
2041 	if (!ce_tx_hdl)
2042 		return QDF_STATUS_E_NULL_VALUE;
2043 
2044 	return ce_send_single((struct CE_handle *)ce_tx_hdl, msdu, transfer_id,
2045 			len);
2046 }
2047 qdf_export_symbol(hif_send_single);
2048 #endif
2049 
2050 /**
2051  * hif_reg_write() - API to access hif specific function
2052  * hif_write32_mb.
2053  * @hif_ctx : HIF Context
2054  * @offset : offset on which value has to be written
2055  * @value : value to be written
2056  *
2057  * Return: None
2058  */
2059 void hif_reg_write(struct hif_opaque_softc *hif_ctx, uint32_t offset,
2060 		uint32_t value)
2061 {
2062 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2063 
2064 	hif_write32_mb(scn, scn->mem + offset, value);
2065 
2066 }
2067 qdf_export_symbol(hif_reg_write);
2068 
2069 /**
2070  * hif_reg_read() - API to access hif specific function
2071  * hif_read32_mb.
2072  * @hif_ctx : HIF Context
2073  * @offset : offset from which value has to be read
2074  *
2075  * Return: Read value
2076  */
2077 uint32_t hif_reg_read(struct hif_opaque_softc *hif_ctx, uint32_t offset)
2078 {
2079 
2080 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2081 
2082 	return hif_read32_mb(scn, scn->mem + offset);
2083 }
2084 qdf_export_symbol(hif_reg_read);
2085 
2086 /**
2087  * hif_ramdump_handler(): generic ramdump handler
2088  * @scn: struct hif_opaque_softc
2089  *
2090  * Return: None
2091  */
2092 void hif_ramdump_handler(struct hif_opaque_softc *scn)
2093 {
2094 	if (hif_get_bus_type(scn) == QDF_BUS_TYPE_USB)
2095 		hif_usb_ramdump_handler(scn);
2096 }
2097 
2098 hif_pm_wake_irq_type hif_pm_get_wake_irq_type(struct hif_opaque_softc *hif_ctx)
2099 {
2100 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2101 
2102 	return scn->wake_irq_type;
2103 }
2104 
2105 irqreturn_t hif_wake_interrupt_handler(int irq, void *context)
2106 {
2107 	struct hif_softc *scn = context;
2108 	struct hif_opaque_softc *hif_ctx = GET_HIF_OPAQUE_HDL(scn);
2109 
2110 	hif_info("wake interrupt received on irq %d", irq);
2111 
2112 	hif_pm_runtime_set_monitor_wake_intr(hif_ctx, 0);
2113 	hif_pm_runtime_request_resume(hif_ctx, RTPM_ID_WAKE_INTR_HANDLER);
2114 
2115 	if (scn->initial_wakeup_cb)
2116 		scn->initial_wakeup_cb(scn->initial_wakeup_priv);
2117 
2118 	if (hif_is_ut_suspended(scn))
2119 		hif_ut_fw_resume(scn);
2120 
2121 	qdf_pm_system_wakeup();
2122 
2123 	return IRQ_HANDLED;
2124 }
2125 
2126 void hif_set_initial_wakeup_cb(struct hif_opaque_softc *hif_ctx,
2127 			       void (*callback)(void *),
2128 			       void *priv)
2129 {
2130 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2131 
2132 	scn->initial_wakeup_cb = callback;
2133 	scn->initial_wakeup_priv = priv;
2134 }
2135 
2136 void hif_set_ce_service_max_yield_time(struct hif_opaque_softc *hif,
2137 				       uint32_t ce_service_max_yield_time)
2138 {
2139 	struct hif_softc *hif_ctx = HIF_GET_SOFTC(hif);
2140 
2141 	hif_ctx->ce_service_max_yield_time =
2142 		ce_service_max_yield_time * 1000;
2143 }
2144 
2145 unsigned long long
2146 hif_get_ce_service_max_yield_time(struct hif_opaque_softc *hif)
2147 {
2148 	struct hif_softc *hif_ctx = HIF_GET_SOFTC(hif);
2149 
2150 	return hif_ctx->ce_service_max_yield_time;
2151 }
2152 
2153 void hif_set_ce_service_max_rx_ind_flush(struct hif_opaque_softc *hif,
2154 				       uint8_t ce_service_max_rx_ind_flush)
2155 {
2156 	struct hif_softc *hif_ctx = HIF_GET_SOFTC(hif);
2157 
2158 	if (ce_service_max_rx_ind_flush == 0 ||
2159 	    ce_service_max_rx_ind_flush > MSG_FLUSH_NUM)
2160 		hif_ctx->ce_service_max_rx_ind_flush = MSG_FLUSH_NUM;
2161 	else
2162 		hif_ctx->ce_service_max_rx_ind_flush =
2163 						ce_service_max_rx_ind_flush;
2164 }
2165 
2166 #ifdef SYSTEM_PM_CHECK
2167 void __hif_system_pm_set_state(struct hif_opaque_softc *hif,
2168 			       enum hif_system_pm_state state)
2169 {
2170 	struct hif_softc *hif_ctx = HIF_GET_SOFTC(hif);
2171 
2172 	qdf_atomic_set(&hif_ctx->sys_pm_state, state);
2173 }
2174 
2175 int32_t hif_system_pm_get_state(struct hif_opaque_softc *hif)
2176 {
2177 	struct hif_softc *hif_ctx = HIF_GET_SOFTC(hif);
2178 
2179 	return qdf_atomic_read(&hif_ctx->sys_pm_state);
2180 }
2181 
2182 int hif_system_pm_state_check(struct hif_opaque_softc *hif)
2183 {
2184 	struct hif_softc *hif_ctx = HIF_GET_SOFTC(hif);
2185 	int32_t sys_pm_state;
2186 
2187 	if (!hif_ctx) {
2188 		hif_err("hif context is null");
2189 		return -EFAULT;
2190 	}
2191 
2192 	sys_pm_state = qdf_atomic_read(&hif_ctx->sys_pm_state);
2193 	if (sys_pm_state == HIF_SYSTEM_PM_STATE_BUS_SUSPENDING ||
2194 	    sys_pm_state == HIF_SYSTEM_PM_STATE_BUS_SUSPENDED) {
2195 		hif_info("Triggering system wakeup");
2196 		qdf_pm_system_wakeup();
2197 		return -EAGAIN;
2198 	}
2199 
2200 	return 0;
2201 }
2202 #endif
2203