xref: /wlan-dirver/qca-wifi-host-cmn/hif/src/hif_main.c (revision d94f0fb619d3da5ae22f9943f88d4634e2d28581)
1 /*
2  * Copyright (c) 2015-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include "targcfg.h"
21 #include "qdf_lock.h"
22 #include "qdf_status.h"
23 #include "qdf_status.h"
24 #include <qdf_atomic.h>         /* qdf_atomic_read */
25 #include <targaddrs.h>
26 #include "hif_io32.h"
27 #include <hif.h>
28 #include <target_type.h>
29 #include "regtable.h"
30 #define ATH_MODULE_NAME hif
31 #include <a_debug.h>
32 #include "hif_main.h"
33 #include "hif_hw_version.h"
34 #if (defined(HIF_PCI) || defined(HIF_SNOC) || defined(HIF_AHB) || \
35      defined(HIF_IPCI))
36 #include "ce_tasklet.h"
37 #include "ce_api.h"
38 #endif
39 #include "qdf_trace.h"
40 #include "qdf_status.h"
41 #include "hif_debug.h"
42 #include "mp_dev.h"
43 #if defined(QCA_WIFI_QCA8074) || defined(QCA_WIFI_QCA6018) || \
44 	defined(QCA_WIFI_QCA5018) || defined(QCA_WIFI_QCA9574)
45 #include "hal_api.h"
46 #endif
47 #include "hif_napi.h"
48 #include "hif_unit_test_suspend_i.h"
49 #include "qdf_module.h"
50 #ifdef HIF_CE_LOG_INFO
51 #include <qdf_notifier.h>
52 #include <qdf_hang_event_notifier.h>
53 #endif
54 #include <linux/cpumask.h>
55 
56 #if defined(HIF_IPCI) && defined(FEATURE_HAL_DELAYED_REG_WRITE)
57 #include <pld_common.h>
58 #endif
59 
60 void hif_dump(struct hif_opaque_softc *hif_ctx, uint8_t cmd_id, bool start)
61 {
62 	hif_trigger_dump(hif_ctx, cmd_id, start);
63 }
64 
65 /**
66  * hif_get_target_id(): hif_get_target_id
67  *
68  * Return the virtual memory base address to the caller
69  *
70  * @scn: hif_softc
71  *
72  * Return: A_target_id_t
73  */
74 A_target_id_t hif_get_target_id(struct hif_softc *scn)
75 {
76 	return scn->mem;
77 }
78 
79 /**
80  * hif_get_targetdef(): hif_get_targetdef
81  * @scn: scn
82  *
83  * Return: void *
84  */
85 void *hif_get_targetdef(struct hif_opaque_softc *hif_ctx)
86 {
87 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
88 
89 	return scn->targetdef;
90 }
91 
92 #ifdef FORCE_WAKE
93 void hif_srng_init_phase(struct hif_opaque_softc *hif_ctx,
94 			 bool init_phase)
95 {
96 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
97 
98 	if (ce_srng_based(scn))
99 		hal_set_init_phase(scn->hal_soc, init_phase);
100 }
101 #endif /* FORCE_WAKE */
102 
103 #ifdef HIF_IPCI
104 void hif_shutdown_notifier_cb(void *hif_ctx)
105 {
106 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
107 
108 	scn->recovery = true;
109 }
110 #endif
111 
112 /**
113  * hif_vote_link_down(): unvote for link up
114  *
115  * Call hif_vote_link_down to release a previous request made using
116  * hif_vote_link_up. A hif_vote_link_down call should only be made
117  * after a corresponding hif_vote_link_up, otherwise you could be
118  * negating a vote from another source. When no votes are present
119  * hif will not guarantee the linkstate after hif_bus_suspend.
120  *
121  * SYNCHRONIZE WITH hif_vote_link_up by only calling in MC thread
122  * and initialization deinitialization sequencences.
123  *
124  * Return: n/a
125  */
126 void hif_vote_link_down(struct hif_opaque_softc *hif_ctx)
127 {
128 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
129 
130 	QDF_BUG(scn);
131 	if (scn->linkstate_vote == 0)
132 		QDF_DEBUG_PANIC("linkstate_vote(%d) has already been 0",
133 				scn->linkstate_vote);
134 
135 	scn->linkstate_vote--;
136 	hif_info("Down_linkstate_vote %d", scn->linkstate_vote);
137 	if (scn->linkstate_vote == 0)
138 		hif_bus_prevent_linkdown(scn, false);
139 }
140 
141 /**
142  * hif_vote_link_up(): vote to prevent bus from suspending
143  *
144  * Makes hif guarantee that fw can message the host normally
145  * durring suspend.
146  *
147  * SYNCHRONIZE WITH hif_vote_link_up by only calling in MC thread
148  * and initialization deinitialization sequencences.
149  *
150  * Return: n/a
151  */
152 void hif_vote_link_up(struct hif_opaque_softc *hif_ctx)
153 {
154 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
155 
156 	QDF_BUG(scn);
157 	scn->linkstate_vote++;
158 	hif_info("Up_linkstate_vote %d", scn->linkstate_vote);
159 	if (scn->linkstate_vote == 1)
160 		hif_bus_prevent_linkdown(scn, true);
161 }
162 
163 /**
164  * hif_can_suspend_link(): query if hif is permitted to suspend the link
165  *
166  * Hif will ensure that the link won't be suspended if the upperlayers
167  * don't want it to.
168  *
169  * SYNCHRONIZATION: MC thread is stopped before bus suspend thus
170  * we don't need extra locking to ensure votes dont change while
171  * we are in the process of suspending or resuming.
172  *
173  * Return: false if hif will guarantee link up durring suspend.
174  */
175 bool hif_can_suspend_link(struct hif_opaque_softc *hif_ctx)
176 {
177 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
178 
179 	QDF_BUG(scn);
180 	return scn->linkstate_vote == 0;
181 }
182 
183 /**
184  * hif_hia_item_address(): hif_hia_item_address
185  * @target_type: target_type
186  * @item_offset: item_offset
187  *
188  * Return: n/a
189  */
190 uint32_t hif_hia_item_address(uint32_t target_type, uint32_t item_offset)
191 {
192 	switch (target_type) {
193 	case TARGET_TYPE_AR6002:
194 		return AR6002_HOST_INTEREST_ADDRESS + item_offset;
195 	case TARGET_TYPE_AR6003:
196 		return AR6003_HOST_INTEREST_ADDRESS + item_offset;
197 	case TARGET_TYPE_AR6004:
198 		return AR6004_HOST_INTEREST_ADDRESS + item_offset;
199 	case TARGET_TYPE_AR6006:
200 		return AR6006_HOST_INTEREST_ADDRESS + item_offset;
201 	case TARGET_TYPE_AR9888:
202 		return AR9888_HOST_INTEREST_ADDRESS + item_offset;
203 	case TARGET_TYPE_AR6320:
204 	case TARGET_TYPE_AR6320V2:
205 		return AR6320_HOST_INTEREST_ADDRESS + item_offset;
206 	case TARGET_TYPE_ADRASTEA:
207 		/* ADRASTEA doesn't have a host interest address */
208 		ASSERT(0);
209 		return 0;
210 	case TARGET_TYPE_AR900B:
211 		return AR900B_HOST_INTEREST_ADDRESS + item_offset;
212 	case TARGET_TYPE_QCA9984:
213 		return QCA9984_HOST_INTEREST_ADDRESS + item_offset;
214 	case TARGET_TYPE_QCA9888:
215 		return QCA9888_HOST_INTEREST_ADDRESS + item_offset;
216 
217 	default:
218 		ASSERT(0);
219 		return 0;
220 	}
221 }
222 
223 /**
224  * hif_max_num_receives_reached() - check max receive is reached
225  * @scn: HIF Context
226  * @count: unsigned int.
227  *
228  * Output check status as bool
229  *
230  * Return: bool
231  */
232 bool hif_max_num_receives_reached(struct hif_softc *scn, unsigned int count)
233 {
234 	if (QDF_IS_EPPING_ENABLED(hif_get_conparam(scn)))
235 		return count > 120;
236 	else
237 		return count > MAX_NUM_OF_RECEIVES;
238 }
239 
240 /**
241  * init_buffer_count() - initial buffer count
242  * @maxSize: qdf_size_t
243  *
244  * routine to modify the initial buffer count to be allocated on an os
245  * platform basis. Platform owner will need to modify this as needed
246  *
247  * Return: qdf_size_t
248  */
249 qdf_size_t init_buffer_count(qdf_size_t maxSize)
250 {
251 	return maxSize;
252 }
253 
254 /**
255  * hif_save_htc_htt_config_endpoint() - save htt_tx_endpoint
256  * @hif_ctx: hif context
257  * @htc_htt_tx_endpoint: htt_tx_endpoint
258  *
259  * Return: void
260  */
261 void hif_save_htc_htt_config_endpoint(struct hif_opaque_softc *hif_ctx,
262 							int htc_htt_tx_endpoint)
263 {
264 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
265 
266 	if (!scn) {
267 		hif_err("scn or scn->hif_sc is NULL!");
268 		return;
269 	}
270 
271 	scn->htc_htt_tx_endpoint = htc_htt_tx_endpoint;
272 }
273 qdf_export_symbol(hif_save_htc_htt_config_endpoint);
274 
275 static const struct qwlan_hw qwlan_hw_list[] = {
276 	{
277 		.id = AR6320_REV1_VERSION,
278 		.subid = 0,
279 		.name = "QCA6174_REV1",
280 	},
281 	{
282 		.id = AR6320_REV1_1_VERSION,
283 		.subid = 0x1,
284 		.name = "QCA6174_REV1_1",
285 	},
286 	{
287 		.id = AR6320_REV1_3_VERSION,
288 		.subid = 0x2,
289 		.name = "QCA6174_REV1_3",
290 	},
291 	{
292 		.id = AR6320_REV2_1_VERSION,
293 		.subid = 0x4,
294 		.name = "QCA6174_REV2_1",
295 	},
296 	{
297 		.id = AR6320_REV2_1_VERSION,
298 		.subid = 0x5,
299 		.name = "QCA6174_REV2_2",
300 	},
301 	{
302 		.id = AR6320_REV3_VERSION,
303 		.subid = 0x6,
304 		.name = "QCA6174_REV2.3",
305 	},
306 	{
307 		.id = AR6320_REV3_VERSION,
308 		.subid = 0x8,
309 		.name = "QCA6174_REV3",
310 	},
311 	{
312 		.id = AR6320_REV3_VERSION,
313 		.subid = 0x9,
314 		.name = "QCA6174_REV3_1",
315 	},
316 	{
317 		.id = AR6320_REV3_2_VERSION,
318 		.subid = 0xA,
319 		.name = "AR6320_REV3_2_VERSION",
320 	},
321 	{
322 		.id = QCA6390_V1,
323 		.subid = 0x0,
324 		.name = "QCA6390_V1",
325 	},
326 	{
327 		.id = QCA6490_V1,
328 		.subid = 0x0,
329 		.name = "QCA6490_V1",
330 	},
331 	{
332 		.id = WCN3990_v1,
333 		.subid = 0x0,
334 		.name = "WCN3990_V1",
335 	},
336 	{
337 		.id = WCN3990_v2,
338 		.subid = 0x0,
339 		.name = "WCN3990_V2",
340 	},
341 	{
342 		.id = WCN3990_v2_1,
343 		.subid = 0x0,
344 		.name = "WCN3990_V2.1",
345 	},
346 	{
347 		.id = WCN3998,
348 		.subid = 0x0,
349 		.name = "WCN3998",
350 	},
351 	{
352 		.id = QCA9379_REV1_VERSION,
353 		.subid = 0xC,
354 		.name = "QCA9379_REV1",
355 	},
356 	{
357 		.id = QCA9379_REV1_VERSION,
358 		.subid = 0xD,
359 		.name = "QCA9379_REV1_1",
360 	},
361 	{
362 		.id = KIWI_V1,
363 		.subid = 0xE,
364 		.name = "KIWI_V1",
365 	}
366 };
367 
368 /**
369  * hif_get_hw_name(): get a human readable name for the hardware
370  * @info: Target Info
371  *
372  * Return: human readable name for the underlying wifi hardware.
373  */
374 static const char *hif_get_hw_name(struct hif_target_info *info)
375 {
376 	int i;
377 
378 	if (info->hw_name)
379 		return info->hw_name;
380 
381 	for (i = 0; i < ARRAY_SIZE(qwlan_hw_list); i++) {
382 		if (info->target_version == qwlan_hw_list[i].id &&
383 		    info->target_revision == qwlan_hw_list[i].subid) {
384 			return qwlan_hw_list[i].name;
385 		}
386 	}
387 
388 	info->hw_name = qdf_mem_malloc(64);
389 	if (!info->hw_name)
390 		return "Unknown Device (nomem)";
391 
392 	i = qdf_snprint(info->hw_name, 64, "HW_VERSION=%x.",
393 			info->target_version);
394 	if (i < 0)
395 		return "Unknown Device (snprintf failure)";
396 	else
397 		return info->hw_name;
398 }
399 
400 /**
401  * hif_get_hw_info(): hif_get_hw_info
402  * @scn: scn
403  * @version: version
404  * @revision: revision
405  *
406  * Return: n/a
407  */
408 void hif_get_hw_info(struct hif_opaque_softc *scn, u32 *version, u32 *revision,
409 			const char **target_name)
410 {
411 	struct hif_target_info *info = hif_get_target_info_handle(scn);
412 	struct hif_softc *sc = HIF_GET_SOFTC(scn);
413 
414 	if (sc->bus_type == QDF_BUS_TYPE_USB)
415 		hif_usb_get_hw_info(sc);
416 
417 	*version = info->target_version;
418 	*revision = info->target_revision;
419 	*target_name = hif_get_hw_name(info);
420 }
421 
422 /**
423  * hif_get_dev_ba(): API to get device base address.
424  * @scn: scn
425  * @version: version
426  * @revision: revision
427  *
428  * Return: n/a
429  */
430 void *hif_get_dev_ba(struct hif_opaque_softc *hif_handle)
431 {
432 	struct hif_softc *scn = (struct hif_softc *)hif_handle;
433 
434 	return scn->mem;
435 }
436 qdf_export_symbol(hif_get_dev_ba);
437 
438 /**
439  * hif_get_dev_ba_ce(): API to get device ce base address.
440  * @scn: scn
441  *
442  * Return: dev mem base address for CE
443  */
444 void *hif_get_dev_ba_ce(struct hif_opaque_softc *hif_handle)
445 {
446 	struct hif_softc *scn = (struct hif_softc *)hif_handle;
447 
448 	return scn->mem_ce;
449 }
450 
451 qdf_export_symbol(hif_get_dev_ba_ce);
452 
453 #ifdef WLAN_CE_INTERRUPT_THRESHOLD_CONFIG
454 /**
455  * hif_get_cfg_from_psoc() - Retrieve ini cfg from psoc
456  * @scn: hif context
457  * @psoc: psoc objmgr handle
458  *
459  * Return: None
460  */
461 static inline
462 void hif_get_cfg_from_psoc(struct hif_softc *scn,
463 			   struct wlan_objmgr_psoc *psoc)
464 {
465 	if (psoc) {
466 		scn->ini_cfg.ce_status_ring_timer_threshold =
467 			cfg_get(psoc,
468 				CFG_CE_STATUS_RING_TIMER_THRESHOLD);
469 		scn->ini_cfg.ce_status_ring_batch_count_threshold =
470 			cfg_get(psoc,
471 				CFG_CE_STATUS_RING_BATCH_COUNT_THRESHOLD);
472 	}
473 }
474 #else
475 static inline
476 void hif_get_cfg_from_psoc(struct hif_softc *scn,
477 			   struct wlan_objmgr_psoc *psoc)
478 {
479 }
480 #endif /* WLAN_CE_INTERRUPT_THRESHOLD_CONFIG */
481 
482 #if defined(HIF_CE_LOG_INFO) || defined(HIF_BUS_LOG_INFO)
483 /**
484  * hif_recovery_notifier_cb - Recovery notifier callback to log
485  *  hang event data
486  * @block: notifier block
487  * @state: state
488  * @data: notifier data
489  *
490  * Return: status
491  */
492 static
493 int hif_recovery_notifier_cb(struct notifier_block *block, unsigned long state,
494 			     void *data)
495 {
496 	struct qdf_notifer_data *notif_data = data;
497 	qdf_notif_block *notif_block;
498 	struct hif_softc *hif_handle;
499 	bool bus_id_invalid;
500 
501 	if (!data || !block)
502 		return -EINVAL;
503 
504 	notif_block = qdf_container_of(block, qdf_notif_block, notif_block);
505 
506 	hif_handle = notif_block->priv_data;
507 	if (!hif_handle)
508 		return -EINVAL;
509 
510 	bus_id_invalid = hif_log_bus_info(hif_handle, notif_data->hang_data,
511 					  &notif_data->offset);
512 	if (bus_id_invalid)
513 		return NOTIFY_STOP_MASK;
514 
515 	hif_log_ce_info(hif_handle, notif_data->hang_data,
516 			&notif_data->offset);
517 
518 	return 0;
519 }
520 
521 /**
522  * hif_register_recovery_notifier - Register hif recovery notifier
523  * @hif_handle: hif handle
524  *
525  * Return: status
526  */
527 static
528 QDF_STATUS hif_register_recovery_notifier(struct hif_softc *hif_handle)
529 {
530 	qdf_notif_block *hif_notifier;
531 
532 	if (!hif_handle)
533 		return QDF_STATUS_E_FAILURE;
534 
535 	hif_notifier = &hif_handle->hif_recovery_notifier;
536 
537 	hif_notifier->notif_block.notifier_call = hif_recovery_notifier_cb;
538 	hif_notifier->priv_data = hif_handle;
539 	return qdf_hang_event_register_notifier(hif_notifier);
540 }
541 
542 /**
543  * hif_unregister_recovery_notifier - Un-register hif recovery notifier
544  * @hif_handle: hif handle
545  *
546  * Return: status
547  */
548 static
549 QDF_STATUS hif_unregister_recovery_notifier(struct hif_softc *hif_handle)
550 {
551 	qdf_notif_block *hif_notifier = &hif_handle->hif_recovery_notifier;
552 
553 	return qdf_hang_event_unregister_notifier(hif_notifier);
554 }
555 #else
556 static inline
557 QDF_STATUS hif_register_recovery_notifier(struct hif_softc *hif_handle)
558 {
559 	return QDF_STATUS_SUCCESS;
560 }
561 
562 static inline
563 QDF_STATUS hif_unregister_recovery_notifier(struct hif_softc *hif_handle)
564 {
565 	return QDF_STATUS_SUCCESS;
566 }
567 #endif
568 
569 #ifdef HIF_CPU_PERF_AFFINE_MASK
570 /**
571  * __hif_cpu_hotplug_notify() - CPU hotplug event handler
572  * @cpu: CPU Id of the CPU generating the event
573  * @cpu_up: true if the CPU is online
574  *
575  * Return: None
576  */
577 static void __hif_cpu_hotplug_notify(void *context,
578 				     uint32_t cpu, bool cpu_up)
579 {
580 	struct hif_softc *scn = context;
581 
582 	if (!scn)
583 		return;
584 	if (hif_is_driver_unloading(scn) || hif_is_recovery_in_progress(scn))
585 		return;
586 
587 	if (cpu_up) {
588 		hif_config_irq_set_perf_affinity_hint(GET_HIF_OPAQUE_HDL(scn));
589 		hif_debug("Setting affinity for online CPU: %d", cpu);
590 	} else {
591 		hif_debug("Skip setting affinity for offline CPU: %d", cpu);
592 	}
593 }
594 
595 /**
596  * hif_cpu_hotplug_notify - cpu core up/down notification
597  * handler
598  * @cpu: CPU generating the event
599  * @cpu_up: true if the CPU is online
600  *
601  * Return: None
602  */
603 static void hif_cpu_hotplug_notify(void *context, uint32_t cpu, bool cpu_up)
604 {
605 	struct qdf_op_sync *op_sync;
606 
607 	if (qdf_op_protect(&op_sync))
608 		return;
609 
610 	__hif_cpu_hotplug_notify(context, cpu, cpu_up);
611 
612 	qdf_op_unprotect(op_sync);
613 }
614 
615 static void hif_cpu_online_cb(void *context, uint32_t cpu)
616 {
617 	hif_cpu_hotplug_notify(context, cpu, true);
618 }
619 
620 static void hif_cpu_before_offline_cb(void *context, uint32_t cpu)
621 {
622 	hif_cpu_hotplug_notify(context, cpu, false);
623 }
624 
625 static void hif_cpuhp_register(struct hif_softc *scn)
626 {
627 	if (!scn) {
628 		hif_info_high("cannot register hotplug notifiers");
629 		return;
630 	}
631 	qdf_cpuhp_register(&scn->cpuhp_event_handle,
632 			   scn,
633 			   hif_cpu_online_cb,
634 			   hif_cpu_before_offline_cb);
635 }
636 
637 static void hif_cpuhp_unregister(struct hif_softc *scn)
638 {
639 	if (!scn) {
640 		hif_info_high("cannot unregister hotplug notifiers");
641 		return;
642 	}
643 	qdf_cpuhp_unregister(&scn->cpuhp_event_handle);
644 }
645 
646 #else
647 static void hif_cpuhp_register(struct hif_softc *scn)
648 {
649 }
650 
651 static void hif_cpuhp_unregister(struct hif_softc *scn)
652 {
653 }
654 #endif /* ifdef HIF_CPU_PERF_AFFINE_MASK */
655 
656 #ifdef HIF_DETECTION_LATENCY_ENABLE
657 
658 void hif_tasklet_latency(struct hif_softc *scn, bool from_timer)
659 {
660 	qdf_time_t ce2_tasklet_sched_time =
661 		scn->latency_detect.ce2_tasklet_sched_time;
662 	qdf_time_t ce2_tasklet_exec_time =
663 		scn->latency_detect.ce2_tasklet_exec_time;
664 	qdf_time_t curr_jiffies = qdf_system_ticks();
665 	uint32_t detect_latency_threshold =
666 		scn->latency_detect.detect_latency_threshold;
667 	int cpu_id = qdf_get_cpu();
668 
669 	/* 2 kinds of check here.
670 	 * from_timer==true:  check if tasklet stall
671 	 * from_timer==false: check tasklet execute comes late
672 	 */
673 
674 	if ((from_timer ?
675 	    qdf_system_time_after(ce2_tasklet_sched_time,
676 				  ce2_tasklet_exec_time) :
677 	    qdf_system_time_after(ce2_tasklet_exec_time,
678 				  ce2_tasklet_sched_time)) &&
679 	    qdf_system_time_after(
680 		curr_jiffies,
681 		ce2_tasklet_sched_time +
682 		qdf_system_msecs_to_ticks(detect_latency_threshold))) {
683 		hif_err("tasklet ce2 latency: from_timer %d, curr_jiffies %lu, ce2_tasklet_sched_time %lu,ce2_tasklet_exec_time %lu, detect_latency_threshold %ums detect_latency_timer_timeout %ums, cpu_id %d, called: %ps",
684 			from_timer, curr_jiffies, ce2_tasklet_sched_time,
685 			ce2_tasklet_exec_time, detect_latency_threshold,
686 			scn->latency_detect.detect_latency_timer_timeout,
687 			cpu_id, (void *)_RET_IP_);
688 		goto latency;
689 	}
690 	return;
691 
692 latency:
693 	qdf_trigger_self_recovery(NULL, QDF_TASKLET_CREDIT_LATENCY_DETECT);
694 }
695 
696 void hif_credit_latency(struct hif_softc *scn, bool from_timer)
697 {
698 	qdf_time_t credit_request_time =
699 		scn->latency_detect.credit_request_time;
700 	qdf_time_t credit_report_time =
701 		scn->latency_detect.credit_report_time;
702 	qdf_time_t curr_jiffies = qdf_system_ticks();
703 	uint32_t detect_latency_threshold =
704 		scn->latency_detect.detect_latency_threshold;
705 	int cpu_id = qdf_get_cpu();
706 
707 	/* 2 kinds of check here.
708 	 * from_timer==true:  check if credit report stall
709 	 * from_timer==false: check credit report comes late
710 	 */
711 
712 	if ((from_timer ?
713 	    qdf_system_time_after(credit_request_time,
714 				  credit_report_time) :
715 	    qdf_system_time_after(credit_report_time,
716 				  credit_request_time)) &&
717 	    qdf_system_time_after(
718 		curr_jiffies,
719 		credit_request_time +
720 		qdf_system_msecs_to_ticks(detect_latency_threshold))) {
721 		hif_err("credit report latency: from timer %d, curr_jiffies %lu, credit_request_time %lu,credit_report_time %lu, detect_latency_threshold %ums, detect_latency_timer_timeout %ums, cpu_id %d, called: %ps",
722 			from_timer, curr_jiffies, credit_request_time,
723 			credit_report_time, detect_latency_threshold,
724 			scn->latency_detect.detect_latency_timer_timeout,
725 			cpu_id, (void *)_RET_IP_);
726 		goto latency;
727 	}
728 	return;
729 
730 latency:
731 	qdf_trigger_self_recovery(NULL, QDF_TASKLET_CREDIT_LATENCY_DETECT);
732 }
733 
734 /**
735  * hif_check_detection_latency(): to check if latency for tasklet/credit
736  *
737  * @scn: hif context
738  * @from_timer: if called from timer handler
739  * @bitmap_type: indicate if check tasklet or credit
740  *
741  * Return: none
742  */
743 void hif_check_detection_latency(struct hif_softc *scn,
744 				 bool from_timer,
745 				 uint32_t bitmap_type)
746 {
747 	if (QDF_GLOBAL_MISSION_MODE != hif_get_conparam(scn))
748 		return;
749 
750 	if (!scn->latency_detect.enable_detection)
751 		return;
752 
753 	if (bitmap_type & BIT(HIF_DETECT_TASKLET))
754 		hif_tasklet_latency(scn, from_timer);
755 
756 	if (bitmap_type & BIT(HIF_DETECT_CREDIT))
757 		hif_credit_latency(scn, from_timer);
758 }
759 
760 static void hif_latency_detect_timeout_handler(void *arg)
761 {
762 	struct hif_softc *scn = (struct hif_softc *)arg;
763 	int next_cpu;
764 
765 	hif_check_detection_latency(scn, true,
766 				    BIT(HIF_DETECT_TASKLET) |
767 				    BIT(HIF_DETECT_CREDIT));
768 
769 	/* it need to make sure timer start on a differnt cpu,
770 	 * so it can detect the tasklet schedule stall, but there
771 	 * is still chance that, after timer has been started, then
772 	 * irq/tasklet happens on the same cpu, then tasklet will
773 	 * execute before softirq timer, if this tasklet stall, the
774 	 * timer can't detect it, we can accept this as a limition,
775 	 * if tasklet stall, anyway other place will detect it, just
776 	 * a little later.
777 	 */
778 	next_cpu = cpumask_any_but(
779 			cpu_active_mask,
780 			scn->latency_detect.ce2_tasklet_sched_cpuid);
781 
782 	if (qdf_unlikely(next_cpu >= nr_cpu_ids)) {
783 		hif_debug("start timer on local");
784 		/* it doesn't found a available cpu, start on local cpu*/
785 		qdf_timer_mod(
786 			&scn->latency_detect.detect_latency_timer,
787 			scn->latency_detect.detect_latency_timer_timeout);
788 	} else {
789 		qdf_timer_start_on(
790 			&scn->latency_detect.detect_latency_timer,
791 			scn->latency_detect.detect_latency_timer_timeout,
792 			next_cpu);
793 	}
794 }
795 
796 static void hif_latency_detect_timer_init(struct hif_softc *scn)
797 {
798 	if (!scn) {
799 		hif_info_high("scn is null");
800 		return;
801 	}
802 
803 	if (QDF_GLOBAL_MISSION_MODE != hif_get_conparam(scn))
804 		return;
805 
806 	scn->latency_detect.detect_latency_timer_timeout =
807 		DETECTION_TIMER_TIMEOUT;
808 	scn->latency_detect.detect_latency_threshold =
809 		DETECTION_LATENCY_THRESHOLD;
810 
811 	hif_info("timer timeout %u, latency threshold %u",
812 		 scn->latency_detect.detect_latency_timer_timeout,
813 		 scn->latency_detect.detect_latency_threshold);
814 
815 	scn->latency_detect.is_timer_started = false;
816 
817 	qdf_timer_init(NULL,
818 		       &scn->latency_detect.detect_latency_timer,
819 		       &hif_latency_detect_timeout_handler,
820 		       scn,
821 		       QDF_TIMER_TYPE_SW_SPIN);
822 }
823 
824 static void hif_latency_detect_timer_deinit(struct hif_softc *scn)
825 {
826 	if (QDF_GLOBAL_MISSION_MODE != hif_get_conparam(scn))
827 		return;
828 
829 	hif_info("deinit timer");
830 	qdf_timer_free(&scn->latency_detect.detect_latency_timer);
831 }
832 
833 void hif_latency_detect_timer_start(struct hif_opaque_softc *hif_ctx)
834 {
835 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
836 
837 	if (QDF_GLOBAL_MISSION_MODE != hif_get_conparam(scn))
838 		return;
839 
840 	hif_info_rl("start timer");
841 	if (scn->latency_detect.is_timer_started) {
842 		hif_info("timer has been started");
843 		return;
844 	}
845 
846 	qdf_timer_start(&scn->latency_detect.detect_latency_timer,
847 			scn->latency_detect.detect_latency_timer_timeout);
848 	scn->latency_detect.is_timer_started = true;
849 }
850 
851 void hif_latency_detect_timer_stop(struct hif_opaque_softc *hif_ctx)
852 {
853 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
854 
855 	if (QDF_GLOBAL_MISSION_MODE != hif_get_conparam(scn))
856 		return;
857 
858 	hif_info_rl("stop timer");
859 
860 	qdf_timer_sync_cancel(&scn->latency_detect.detect_latency_timer);
861 	scn->latency_detect.is_timer_started = false;
862 }
863 
864 void hif_latency_detect_credit_record_time(
865 	enum hif_credit_exchange_type type,
866 	struct hif_opaque_softc *hif_ctx)
867 {
868 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
869 
870 	if (!scn) {
871 		hif_err("Could not do runtime put, scn is null");
872 		return;
873 	}
874 
875 	if (QDF_GLOBAL_MISSION_MODE != hif_get_conparam(scn))
876 		return;
877 
878 	if (HIF_REQUEST_CREDIT == type)
879 		scn->latency_detect.credit_request_time = qdf_system_ticks();
880 	else if (HIF_PROCESS_CREDIT_REPORT == type)
881 		scn->latency_detect.credit_report_time = qdf_system_ticks();
882 
883 	hif_check_detection_latency(scn, false, BIT(HIF_DETECT_CREDIT));
884 }
885 
886 void hif_set_enable_detection(struct hif_opaque_softc *hif_ctx, bool value)
887 {
888 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
889 
890 	if (!scn) {
891 		hif_err("Could not do runtime put, scn is null");
892 		return;
893 	}
894 
895 	if (QDF_GLOBAL_MISSION_MODE != hif_get_conparam(scn))
896 		return;
897 
898 	scn->latency_detect.enable_detection = value;
899 }
900 #else
901 static void hif_latency_detect_timer_init(struct hif_softc *scn)
902 {}
903 
904 static void hif_latency_detect_timer_deinit(struct hif_softc *scn)
905 {}
906 #endif
907 struct hif_opaque_softc *hif_open(qdf_device_t qdf_ctx,
908 				  uint32_t mode,
909 				  enum qdf_bus_type bus_type,
910 				  struct hif_driver_state_callbacks *cbk,
911 				  struct wlan_objmgr_psoc *psoc)
912 {
913 	struct hif_softc *scn;
914 	QDF_STATUS status = QDF_STATUS_SUCCESS;
915 	int bus_context_size = hif_bus_get_context_size(bus_type);
916 
917 	if (bus_context_size == 0) {
918 		hif_err("context size 0 not allowed");
919 		return NULL;
920 	}
921 
922 	scn = (struct hif_softc *)qdf_mem_malloc(bus_context_size);
923 	if (!scn)
924 		return GET_HIF_OPAQUE_HDL(scn);
925 
926 	scn->qdf_dev = qdf_ctx;
927 	scn->hif_con_param = mode;
928 	qdf_atomic_init(&scn->active_tasklet_cnt);
929 
930 	qdf_atomic_init(&scn->active_grp_tasklet_cnt);
931 	qdf_atomic_init(&scn->link_suspended);
932 	qdf_atomic_init(&scn->tasklet_from_intr);
933 	hif_system_pm_set_state_on(GET_HIF_OPAQUE_HDL(scn));
934 	qdf_mem_copy(&scn->callbacks, cbk,
935 		     sizeof(struct hif_driver_state_callbacks));
936 	scn->bus_type  = bus_type;
937 
938 	hif_pm_set_link_state(GET_HIF_OPAQUE_HDL(scn), HIF_PM_LINK_STATE_DOWN);
939 	hif_allow_ep_vote_access(GET_HIF_OPAQUE_HDL(scn));
940 	hif_get_cfg_from_psoc(scn, psoc);
941 
942 	hif_set_event_hist_mask(GET_HIF_OPAQUE_HDL(scn));
943 	status = hif_bus_open(scn, bus_type);
944 	if (status != QDF_STATUS_SUCCESS) {
945 		hif_err("hif_bus_open error = %d, bus_type = %d",
946 			status, bus_type);
947 		qdf_mem_free(scn);
948 		scn = NULL;
949 		goto out;
950 	}
951 
952 	hif_cpuhp_register(scn);
953 	hif_latency_detect_timer_init(scn);
954 
955 out:
956 	return GET_HIF_OPAQUE_HDL(scn);
957 }
958 
959 #ifdef ADRASTEA_RRI_ON_DDR
960 /**
961  * hif_uninit_rri_on_ddr(): free consistent memory allocated for rri
962  * @scn: hif context
963  *
964  * Return: none
965  */
966 void hif_uninit_rri_on_ddr(struct hif_softc *scn)
967 {
968 	if (scn->vaddr_rri_on_ddr)
969 		qdf_mem_free_consistent(scn->qdf_dev, scn->qdf_dev->dev,
970 					(CE_COUNT * sizeof(uint32_t)),
971 					scn->vaddr_rri_on_ddr,
972 					scn->paddr_rri_on_ddr, 0);
973 	scn->vaddr_rri_on_ddr = NULL;
974 }
975 #endif
976 
977 /**
978  * hif_close(): hif_close
979  * @hif_ctx: hif_ctx
980  *
981  * Return: n/a
982  */
983 void hif_close(struct hif_opaque_softc *hif_ctx)
984 {
985 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
986 
987 	if (!scn) {
988 		hif_err("hif_opaque_softc is NULL");
989 		return;
990 	}
991 
992 	hif_latency_detect_timer_deinit(scn);
993 
994 	if (scn->athdiag_procfs_inited) {
995 		athdiag_procfs_remove();
996 		scn->athdiag_procfs_inited = false;
997 	}
998 
999 	if (scn->target_info.hw_name) {
1000 		char *hw_name = scn->target_info.hw_name;
1001 
1002 		scn->target_info.hw_name = "ErrUnloading";
1003 		qdf_mem_free(hw_name);
1004 	}
1005 
1006 	hif_uninit_rri_on_ddr(scn);
1007 	hif_cleanup_static_buf_to_target(scn);
1008 	hif_cpuhp_unregister(scn);
1009 
1010 	hif_bus_close(scn);
1011 
1012 	qdf_mem_free(scn);
1013 }
1014 
1015 /**
1016  * hif_get_num_active_grp_tasklets() - get the number of active
1017  *		datapath group tasklets pending to be completed.
1018  * @scn: HIF context
1019  *
1020  * Returns: the number of datapath group tasklets which are active
1021  */
1022 static inline int hif_get_num_active_grp_tasklets(struct hif_softc *scn)
1023 {
1024 	return qdf_atomic_read(&scn->active_grp_tasklet_cnt);
1025 }
1026 
1027 #if (defined(QCA_WIFI_QCA8074) || defined(QCA_WIFI_QCA6018) || \
1028 	defined(QCA_WIFI_QCA6290) || defined(QCA_WIFI_QCA6390) || \
1029 	defined(QCA_WIFI_QCN9000) || defined(QCA_WIFI_QCA6490) || \
1030 	defined(QCA_WIFI_QCA6750) || defined(QCA_WIFI_QCA5018) || \
1031 	defined(QCA_WIFI_KIWI) || defined(QCA_WIFI_QCN9224) || \
1032 	defined(QCA_WIFI_QCA9574))
1033 /**
1034  * hif_get_num_pending_work() - get the number of entries in
1035  *		the workqueue pending to be completed.
1036  * @scn: HIF context
1037  *
1038  * Returns: the number of tasklets which are active
1039  */
1040 static inline int hif_get_num_pending_work(struct hif_softc *scn)
1041 {
1042 	return hal_get_reg_write_pending_work(scn->hal_soc);
1043 }
1044 #else
1045 
1046 static inline int hif_get_num_pending_work(struct hif_softc *scn)
1047 {
1048 	return 0;
1049 }
1050 #endif
1051 
1052 QDF_STATUS hif_try_complete_tasks(struct hif_softc *scn)
1053 {
1054 	uint32_t task_drain_wait_cnt = 0;
1055 	int tasklet = 0, grp_tasklet = 0, work = 0;
1056 
1057 	while ((tasklet = hif_get_num_active_tasklets(scn)) ||
1058 	       (grp_tasklet = hif_get_num_active_grp_tasklets(scn)) ||
1059 	       (work = hif_get_num_pending_work(scn))) {
1060 		if (++task_drain_wait_cnt > HIF_TASK_DRAIN_WAIT_CNT) {
1061 			hif_err("pending tasklets %d grp tasklets %d work %d",
1062 				tasklet, grp_tasklet, work);
1063 			return QDF_STATUS_E_FAULT;
1064 		}
1065 		hif_info("waiting for tasklets %d grp tasklets %d work %d",
1066 			 tasklet, grp_tasklet, work);
1067 		msleep(10);
1068 	}
1069 
1070 	return QDF_STATUS_SUCCESS;
1071 }
1072 
1073 #if defined(HIF_IPCI) && defined(FEATURE_HAL_DELAYED_REG_WRITE)
1074 QDF_STATUS hif_try_prevent_ep_vote_access(struct hif_opaque_softc *hif_ctx)
1075 {
1076 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1077 	uint32_t work_drain_wait_cnt = 0;
1078 	uint32_t wait_cnt = 0;
1079 	int work = 0;
1080 
1081 	qdf_atomic_set(&scn->dp_ep_vote_access,
1082 		       HIF_EP_VOTE_ACCESS_DISABLE);
1083 	qdf_atomic_set(&scn->ep_vote_access,
1084 		       HIF_EP_VOTE_ACCESS_DISABLE);
1085 
1086 	while ((work = hif_get_num_pending_work(scn))) {
1087 		if (++work_drain_wait_cnt > HIF_WORK_DRAIN_WAIT_CNT) {
1088 			qdf_atomic_set(&scn->dp_ep_vote_access,
1089 				       HIF_EP_VOTE_ACCESS_ENABLE);
1090 			qdf_atomic_set(&scn->ep_vote_access,
1091 				       HIF_EP_VOTE_ACCESS_ENABLE);
1092 			hif_err("timeout wait for pending work %d ", work);
1093 			return QDF_STATUS_E_FAULT;
1094 		}
1095 		qdf_sleep(10);
1096 	}
1097 
1098 	if (pld_is_pci_ep_awake(scn->qdf_dev->dev) == -ENOTSUPP)
1099 	return QDF_STATUS_SUCCESS;
1100 
1101 	while (pld_is_pci_ep_awake(scn->qdf_dev->dev)) {
1102 		if (++wait_cnt > HIF_EP_WAKE_RESET_WAIT_CNT) {
1103 			hif_err("Release EP vote is not proceed by Fw");
1104 			return QDF_STATUS_E_FAULT;
1105 		}
1106 		qdf_sleep(5);
1107 	}
1108 
1109 	return QDF_STATUS_SUCCESS;
1110 }
1111 
1112 void hif_set_ep_intermediate_vote_access(struct hif_opaque_softc *hif_ctx)
1113 {
1114 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1115 	uint8_t vote_access;
1116 
1117 	vote_access = qdf_atomic_read(&scn->ep_vote_access);
1118 
1119 	if (vote_access != HIF_EP_VOTE_ACCESS_DISABLE)
1120 		hif_info("EP vote changed from:%u to intermediate state",
1121 			 vote_access);
1122 
1123 	if (QDF_IS_STATUS_ERROR(hif_try_prevent_ep_vote_access(hif_ctx)))
1124 		QDF_BUG(0);
1125 
1126 	qdf_atomic_set(&scn->ep_vote_access,
1127 		       HIF_EP_VOTE_INTERMEDIATE_ACCESS);
1128 }
1129 
1130 void hif_allow_ep_vote_access(struct hif_opaque_softc *hif_ctx)
1131 {
1132 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1133 
1134 	qdf_atomic_set(&scn->dp_ep_vote_access,
1135 		       HIF_EP_VOTE_ACCESS_ENABLE);
1136 	qdf_atomic_set(&scn->ep_vote_access,
1137 		       HIF_EP_VOTE_ACCESS_ENABLE);
1138 }
1139 
1140 void hif_set_ep_vote_access(struct hif_opaque_softc *hif_ctx,
1141 			    uint8_t type, uint8_t access)
1142 {
1143 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1144 
1145 	if (type == HIF_EP_VOTE_DP_ACCESS)
1146 		qdf_atomic_set(&scn->dp_ep_vote_access, access);
1147 	else
1148 		qdf_atomic_set(&scn->ep_vote_access, access);
1149 }
1150 
1151 uint8_t hif_get_ep_vote_access(struct hif_opaque_softc *hif_ctx,
1152 			       uint8_t type)
1153 {
1154 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1155 
1156 	if (type == HIF_EP_VOTE_DP_ACCESS)
1157 		return qdf_atomic_read(&scn->dp_ep_vote_access);
1158 	else
1159 		return qdf_atomic_read(&scn->ep_vote_access);
1160 }
1161 #endif
1162 
1163 #if (defined(QCA_WIFI_QCA8074) || defined(QCA_WIFI_QCA6018) || \
1164 	defined(QCA_WIFI_QCA6290) || defined(QCA_WIFI_QCA6390) || \
1165 	defined(QCA_WIFI_QCN9000) || defined(QCA_WIFI_QCA6490) || \
1166 	defined(QCA_WIFI_QCA6750) || defined(QCA_WIFI_QCA5018) || \
1167 	defined(QCA_WIFI_KIWI) || defined(QCA_WIFI_QCN9224) || \
1168 	defined(QCA_WIFI_QCA9574))
1169 static QDF_STATUS hif_hal_attach(struct hif_softc *scn)
1170 {
1171 	if (ce_srng_based(scn)) {
1172 		scn->hal_soc = hal_attach(
1173 					hif_softc_to_hif_opaque_softc(scn),
1174 					scn->qdf_dev);
1175 		if (!scn->hal_soc)
1176 			return QDF_STATUS_E_FAILURE;
1177 	}
1178 
1179 	return QDF_STATUS_SUCCESS;
1180 }
1181 
1182 static QDF_STATUS hif_hal_detach(struct hif_softc *scn)
1183 {
1184 	if (ce_srng_based(scn)) {
1185 		hal_detach(scn->hal_soc);
1186 		scn->hal_soc = NULL;
1187 	}
1188 
1189 	return QDF_STATUS_SUCCESS;
1190 }
1191 #else
1192 static QDF_STATUS hif_hal_attach(struct hif_softc *scn)
1193 {
1194 	return QDF_STATUS_SUCCESS;
1195 }
1196 
1197 static QDF_STATUS hif_hal_detach(struct hif_softc *scn)
1198 {
1199 	return QDF_STATUS_SUCCESS;
1200 }
1201 #endif
1202 
1203 int hif_init_dma_mask(struct device *dev, enum qdf_bus_type bus_type)
1204 {
1205 	int ret;
1206 
1207 	switch (bus_type) {
1208 	case QDF_BUS_TYPE_IPCI:
1209 		ret = qdf_set_dma_coherent_mask(dev,
1210 						DMA_COHERENT_MASK_DEFAULT);
1211 		if (ret) {
1212 			hif_err("Failed to set dma mask error = %d", ret);
1213 			return ret;
1214 		}
1215 
1216 		break;
1217 	default:
1218 		/* Follow the existing sequence for other targets */
1219 		break;
1220 	}
1221 
1222 	return 0;
1223 }
1224 
1225 /**
1226  * hif_enable(): hif_enable
1227  * @hif_ctx: hif_ctx
1228  * @dev: dev
1229  * @bdev: bus dev
1230  * @bid: bus ID
1231  * @bus_type: bus type
1232  * @type: enable type
1233  *
1234  * Return: QDF_STATUS
1235  */
1236 QDF_STATUS hif_enable(struct hif_opaque_softc *hif_ctx, struct device *dev,
1237 					  void *bdev,
1238 					  const struct hif_bus_id *bid,
1239 					  enum qdf_bus_type bus_type,
1240 					  enum hif_enable_type type)
1241 {
1242 	QDF_STATUS status;
1243 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1244 
1245 	if (!scn) {
1246 		hif_err("hif_ctx = NULL");
1247 		return QDF_STATUS_E_NULL_VALUE;
1248 	}
1249 
1250 	status = hif_enable_bus(scn, dev, bdev, bid, type);
1251 	if (status != QDF_STATUS_SUCCESS) {
1252 		hif_err("hif_enable_bus error = %d", status);
1253 		return status;
1254 	}
1255 
1256 	hif_pm_set_link_state(GET_HIF_OPAQUE_HDL(scn), HIF_PM_LINK_STATE_UP);
1257 	status = hif_hal_attach(scn);
1258 	if (status != QDF_STATUS_SUCCESS) {
1259 		hif_err("hal attach failed");
1260 		goto disable_bus;
1261 	}
1262 
1263 	if (hif_bus_configure(scn)) {
1264 		hif_err("Target probe failed");
1265 		status = QDF_STATUS_E_FAILURE;
1266 		goto hal_detach;
1267 	}
1268 
1269 	hif_ut_suspend_init(scn);
1270 	hif_register_recovery_notifier(scn);
1271 	hif_latency_detect_timer_start(hif_ctx);
1272 
1273 	/*
1274 	 * Flag to avoid potential unallocated memory access from MSI
1275 	 * interrupt handler which could get scheduled as soon as MSI
1276 	 * is enabled, i.e to take care of the race due to the order
1277 	 * in where MSI is enabled before the memory, that will be
1278 	 * in interrupt handlers, is allocated.
1279 	 */
1280 
1281 	scn->hif_init_done = true;
1282 
1283 	hif_debug("OK");
1284 
1285 	return QDF_STATUS_SUCCESS;
1286 
1287 hal_detach:
1288 	hif_hal_detach(scn);
1289 disable_bus:
1290 	hif_disable_bus(scn);
1291 	return status;
1292 }
1293 
1294 void hif_disable(struct hif_opaque_softc *hif_ctx, enum hif_disable_type type)
1295 {
1296 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1297 
1298 	if (!scn)
1299 		return;
1300 
1301 	hif_set_enable_detection(hif_ctx, false);
1302 	hif_latency_detect_timer_stop(hif_ctx);
1303 
1304 	hif_unregister_recovery_notifier(scn);
1305 
1306 	hif_nointrs(scn);
1307 	if (scn->hif_init_done == false)
1308 		hif_shutdown_device(hif_ctx);
1309 	else
1310 		hif_stop(hif_ctx);
1311 
1312 	hif_hal_detach(scn);
1313 
1314 	hif_pm_set_link_state(hif_ctx, HIF_PM_LINK_STATE_DOWN);
1315 	hif_disable_bus(scn);
1316 
1317 	hif_wlan_disable(scn);
1318 
1319 	scn->notice_send = false;
1320 
1321 	hif_debug("X");
1322 }
1323 
1324 #ifdef CE_TASKLET_DEBUG_ENABLE
1325 void hif_enable_ce_latency_stats(struct hif_opaque_softc *hif_ctx, uint8_t val)
1326 {
1327 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1328 
1329 	if (!scn)
1330 		return;
1331 
1332 	scn->ce_latency_stats = val;
1333 }
1334 #endif
1335 
1336 void hif_display_stats(struct hif_opaque_softc *hif_ctx)
1337 {
1338 	hif_display_bus_stats(hif_ctx);
1339 }
1340 
1341 qdf_export_symbol(hif_display_stats);
1342 
1343 void hif_clear_stats(struct hif_opaque_softc *hif_ctx)
1344 {
1345 	hif_clear_bus_stats(hif_ctx);
1346 }
1347 
1348 /**
1349  * hif_crash_shutdown_dump_bus_register() - dump bus registers
1350  * @hif_ctx: hif_ctx
1351  *
1352  * Return: n/a
1353  */
1354 #if defined(TARGET_RAMDUMP_AFTER_KERNEL_PANIC) && defined(WLAN_FEATURE_BMI)
1355 
1356 static void hif_crash_shutdown_dump_bus_register(void *hif_ctx)
1357 {
1358 	struct hif_opaque_softc *scn = hif_ctx;
1359 
1360 	if (hif_check_soc_status(scn))
1361 		return;
1362 
1363 	if (hif_dump_registers(scn))
1364 		hif_err("Failed to dump bus registers!");
1365 }
1366 
1367 /**
1368  * hif_crash_shutdown(): hif_crash_shutdown
1369  *
1370  * This function is called by the platform driver to dump CE registers
1371  *
1372  * @hif_ctx: hif_ctx
1373  *
1374  * Return: n/a
1375  */
1376 void hif_crash_shutdown(struct hif_opaque_softc *hif_ctx)
1377 {
1378 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1379 
1380 	if (!hif_ctx)
1381 		return;
1382 
1383 	if (scn->bus_type == QDF_BUS_TYPE_SNOC) {
1384 		hif_warn("RAM dump disabled for bustype %d", scn->bus_type);
1385 		return;
1386 	}
1387 
1388 	if (TARGET_STATUS_RESET == scn->target_status) {
1389 		hif_warn("Target is already asserted, ignore!");
1390 		return;
1391 	}
1392 
1393 	if (hif_is_load_or_unload_in_progress(scn)) {
1394 		hif_err("Load/unload is in progress, ignore!");
1395 		return;
1396 	}
1397 
1398 	hif_crash_shutdown_dump_bus_register(hif_ctx);
1399 	hif_set_target_status(hif_ctx, TARGET_STATUS_RESET);
1400 
1401 	if (ol_copy_ramdump(hif_ctx))
1402 		goto out;
1403 
1404 	hif_info("RAM dump collecting completed!");
1405 
1406 out:
1407 	return;
1408 }
1409 #else
1410 void hif_crash_shutdown(struct hif_opaque_softc *hif_ctx)
1411 {
1412 	hif_debug("Collecting target RAM dump disabled");
1413 }
1414 #endif /* TARGET_RAMDUMP_AFTER_KERNEL_PANIC */
1415 
1416 #ifdef QCA_WIFI_3_0
1417 /**
1418  * hif_check_fw_reg(): hif_check_fw_reg
1419  * @scn: scn
1420  * @state:
1421  *
1422  * Return: int
1423  */
1424 int hif_check_fw_reg(struct hif_opaque_softc *scn)
1425 {
1426 	return 0;
1427 }
1428 #endif
1429 
1430 /**
1431  * hif_read_phy_mem_base(): hif_read_phy_mem_base
1432  * @scn: scn
1433  * @phy_mem_base: physical mem base
1434  *
1435  * Return: n/a
1436  */
1437 void hif_read_phy_mem_base(struct hif_softc *scn, qdf_dma_addr_t *phy_mem_base)
1438 {
1439 	*phy_mem_base = scn->mem_pa;
1440 }
1441 qdf_export_symbol(hif_read_phy_mem_base);
1442 
1443 /**
1444  * hif_get_device_type(): hif_get_device_type
1445  * @device_id: device_id
1446  * @revision_id: revision_id
1447  * @hif_type: returned hif_type
1448  * @target_type: returned target_type
1449  *
1450  * Return: int
1451  */
1452 int hif_get_device_type(uint32_t device_id,
1453 			uint32_t revision_id,
1454 			uint32_t *hif_type, uint32_t *target_type)
1455 {
1456 	int ret = 0;
1457 
1458 	switch (device_id) {
1459 	case ADRASTEA_DEVICE_ID_P2_E12:
1460 
1461 		*hif_type = HIF_TYPE_ADRASTEA;
1462 		*target_type = TARGET_TYPE_ADRASTEA;
1463 		break;
1464 
1465 	case AR9888_DEVICE_ID:
1466 		*hif_type = HIF_TYPE_AR9888;
1467 		*target_type = TARGET_TYPE_AR9888;
1468 		break;
1469 
1470 	case AR6320_DEVICE_ID:
1471 		switch (revision_id) {
1472 		case AR6320_FW_1_1:
1473 		case AR6320_FW_1_3:
1474 			*hif_type = HIF_TYPE_AR6320;
1475 			*target_type = TARGET_TYPE_AR6320;
1476 			break;
1477 
1478 		case AR6320_FW_2_0:
1479 		case AR6320_FW_3_0:
1480 		case AR6320_FW_3_2:
1481 			*hif_type = HIF_TYPE_AR6320V2;
1482 			*target_type = TARGET_TYPE_AR6320V2;
1483 			break;
1484 
1485 		default:
1486 			hif_err("dev_id = 0x%x, rev_id = 0x%x",
1487 				device_id, revision_id);
1488 			ret = -ENODEV;
1489 			goto end;
1490 		}
1491 		break;
1492 
1493 	case AR9887_DEVICE_ID:
1494 		*hif_type = HIF_TYPE_AR9888;
1495 		*target_type = TARGET_TYPE_AR9888;
1496 		hif_info(" *********** AR9887 **************");
1497 		break;
1498 
1499 	case QCA9984_DEVICE_ID:
1500 		*hif_type = HIF_TYPE_QCA9984;
1501 		*target_type = TARGET_TYPE_QCA9984;
1502 		hif_info(" *********** QCA9984 *************");
1503 		break;
1504 
1505 	case QCA9888_DEVICE_ID:
1506 		*hif_type = HIF_TYPE_QCA9888;
1507 		*target_type = TARGET_TYPE_QCA9888;
1508 		hif_info(" *********** QCA9888 *************");
1509 		break;
1510 
1511 	case AR900B_DEVICE_ID:
1512 		*hif_type = HIF_TYPE_AR900B;
1513 		*target_type = TARGET_TYPE_AR900B;
1514 		hif_info(" *********** AR900B *************");
1515 		break;
1516 
1517 	case QCA8074_DEVICE_ID:
1518 		*hif_type = HIF_TYPE_QCA8074;
1519 		*target_type = TARGET_TYPE_QCA8074;
1520 		hif_info(" *********** QCA8074  *************");
1521 		break;
1522 
1523 	case QCA6290_EMULATION_DEVICE_ID:
1524 	case QCA6290_DEVICE_ID:
1525 		*hif_type = HIF_TYPE_QCA6290;
1526 		*target_type = TARGET_TYPE_QCA6290;
1527 		hif_info(" *********** QCA6290EMU *************");
1528 		break;
1529 
1530 	case QCN9000_DEVICE_ID:
1531 		*hif_type = HIF_TYPE_QCN9000;
1532 		*target_type = TARGET_TYPE_QCN9000;
1533 		hif_info(" *********** QCN9000 *************");
1534 		break;
1535 
1536 	case QCN9224_DEVICE_ID:
1537 		*hif_type = HIF_TYPE_QCN9224;
1538 		*target_type = TARGET_TYPE_QCN9224;
1539 		hif_info(" *********** QCN9224 *************");
1540 		break;
1541 
1542 	case QCN6122_DEVICE_ID:
1543 		*hif_type = HIF_TYPE_QCN6122;
1544 		*target_type = TARGET_TYPE_QCN6122;
1545 		hif_info(" *********** QCN6122 *************");
1546 		break;
1547 
1548 	case QCN7605_DEVICE_ID:
1549 	case QCN7605_COMPOSITE:
1550 	case QCN7605_STANDALONE:
1551 	case QCN7605_STANDALONE_V2:
1552 	case QCN7605_COMPOSITE_V2:
1553 		*hif_type = HIF_TYPE_QCN7605;
1554 		*target_type = TARGET_TYPE_QCN7605;
1555 		hif_info(" *********** QCN7605 *************");
1556 		break;
1557 
1558 	case QCA6390_DEVICE_ID:
1559 	case QCA6390_EMULATION_DEVICE_ID:
1560 		*hif_type = HIF_TYPE_QCA6390;
1561 		*target_type = TARGET_TYPE_QCA6390;
1562 		hif_info(" *********** QCA6390 *************");
1563 		break;
1564 
1565 	case QCA6490_DEVICE_ID:
1566 	case QCA6490_EMULATION_DEVICE_ID:
1567 		*hif_type = HIF_TYPE_QCA6490;
1568 		*target_type = TARGET_TYPE_QCA6490;
1569 		hif_info(" *********** QCA6490 *************");
1570 		break;
1571 
1572 	case QCA6750_DEVICE_ID:
1573 	case QCA6750_EMULATION_DEVICE_ID:
1574 		*hif_type = HIF_TYPE_QCA6750;
1575 		*target_type = TARGET_TYPE_QCA6750;
1576 		hif_info(" *********** QCA6750 *************");
1577 		break;
1578 
1579 	case KIWI_DEVICE_ID:
1580 		*hif_type = HIF_TYPE_KIWI;
1581 		*target_type = TARGET_TYPE_KIWI;
1582 		hif_info(" *********** KIWI *************");
1583 		break;
1584 
1585 	case QCA8074V2_DEVICE_ID:
1586 		*hif_type = HIF_TYPE_QCA8074V2;
1587 		*target_type = TARGET_TYPE_QCA8074V2;
1588 		hif_info(" *********** QCA8074V2 *************");
1589 		break;
1590 
1591 	case QCA6018_DEVICE_ID:
1592 	case RUMIM2M_DEVICE_ID_NODE0:
1593 	case RUMIM2M_DEVICE_ID_NODE1:
1594 	case RUMIM2M_DEVICE_ID_NODE2:
1595 	case RUMIM2M_DEVICE_ID_NODE3:
1596 	case RUMIM2M_DEVICE_ID_NODE4:
1597 	case RUMIM2M_DEVICE_ID_NODE5:
1598 		*hif_type = HIF_TYPE_QCA6018;
1599 		*target_type = TARGET_TYPE_QCA6018;
1600 		hif_info(" *********** QCA6018 *************");
1601 		break;
1602 
1603 	case QCA5018_DEVICE_ID:
1604 		*hif_type = HIF_TYPE_QCA5018;
1605 		*target_type = TARGET_TYPE_QCA5018;
1606 		hif_info(" *********** qca5018 *************");
1607 		break;
1608 
1609 	case QCA9574_DEVICE_ID:
1610 		*hif_type = HIF_TYPE_QCA9574;
1611 		*target_type = TARGET_TYPE_QCA9574;
1612 		hif_info(" *********** QCA9574 *************");
1613 		break;
1614 
1615 	default:
1616 		hif_err("Unsupported device ID = 0x%x!", device_id);
1617 		ret = -ENODEV;
1618 		break;
1619 	}
1620 
1621 	if (*target_type == TARGET_TYPE_UNKNOWN) {
1622 		hif_err("Unsupported target_type!");
1623 		ret = -ENODEV;
1624 	}
1625 end:
1626 	return ret;
1627 }
1628 
1629 /**
1630  * hif_get_bus_type() - return the bus type
1631  *
1632  * Return: enum qdf_bus_type
1633  */
1634 enum qdf_bus_type hif_get_bus_type(struct hif_opaque_softc *hif_hdl)
1635 {
1636 	struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
1637 
1638 	return scn->bus_type;
1639 }
1640 
1641 /**
1642  * Target info and ini parameters are global to the driver
1643  * Hence these structures are exposed to all the modules in
1644  * the driver and they don't need to maintains multiple copies
1645  * of the same info, instead get the handle from hif and
1646  * modify them in hif
1647  */
1648 
1649 /**
1650  * hif_get_ini_handle() - API to get hif_config_param handle
1651  * @hif_ctx: HIF Context
1652  *
1653  * Return: pointer to hif_config_info
1654  */
1655 struct hif_config_info *hif_get_ini_handle(struct hif_opaque_softc *hif_ctx)
1656 {
1657 	struct hif_softc *sc = HIF_GET_SOFTC(hif_ctx);
1658 
1659 	return &sc->hif_config;
1660 }
1661 
1662 /**
1663  * hif_get_target_info_handle() - API to get hif_target_info handle
1664  * @hif_ctx: HIF context
1665  *
1666  * Return: Pointer to hif_target_info
1667  */
1668 struct hif_target_info *hif_get_target_info_handle(
1669 					struct hif_opaque_softc *hif_ctx)
1670 {
1671 	struct hif_softc *sc = HIF_GET_SOFTC(hif_ctx);
1672 
1673 	return &sc->target_info;
1674 
1675 }
1676 qdf_export_symbol(hif_get_target_info_handle);
1677 
1678 #ifdef RECEIVE_OFFLOAD
1679 void hif_offld_flush_cb_register(struct hif_opaque_softc *scn,
1680 				 void (offld_flush_handler)(void *))
1681 {
1682 	if (hif_napi_enabled(scn, -1))
1683 		hif_napi_rx_offld_flush_cb_register(scn, offld_flush_handler);
1684 	else
1685 		hif_err("NAPI not enabled");
1686 }
1687 qdf_export_symbol(hif_offld_flush_cb_register);
1688 
1689 void hif_offld_flush_cb_deregister(struct hif_opaque_softc *scn)
1690 {
1691 	if (hif_napi_enabled(scn, -1))
1692 		hif_napi_rx_offld_flush_cb_deregister(scn);
1693 	else
1694 		hif_err("NAPI not enabled");
1695 }
1696 qdf_export_symbol(hif_offld_flush_cb_deregister);
1697 
1698 int hif_get_rx_ctx_id(int ctx_id, struct hif_opaque_softc *hif_hdl)
1699 {
1700 	if (hif_napi_enabled(hif_hdl, -1))
1701 		return NAPI_PIPE2ID(ctx_id);
1702 	else
1703 		return ctx_id;
1704 }
1705 #else /* RECEIVE_OFFLOAD */
1706 int hif_get_rx_ctx_id(int ctx_id, struct hif_opaque_softc *hif_hdl)
1707 {
1708 	return 0;
1709 }
1710 qdf_export_symbol(hif_get_rx_ctx_id);
1711 #endif /* RECEIVE_OFFLOAD */
1712 
1713 #if defined(FEATURE_LRO)
1714 
1715 /**
1716  * hif_get_lro_info - Returns LRO instance for instance ID
1717  * @ctx_id: LRO instance ID
1718  * @hif_hdl: HIF Context
1719  *
1720  * Return: Pointer to LRO instance.
1721  */
1722 void *hif_get_lro_info(int ctx_id, struct hif_opaque_softc *hif_hdl)
1723 {
1724 	void *data;
1725 
1726 	if (hif_napi_enabled(hif_hdl, -1))
1727 		data = hif_napi_get_lro_info(hif_hdl, ctx_id);
1728 	else
1729 		data = hif_ce_get_lro_ctx(hif_hdl, ctx_id);
1730 
1731 	return data;
1732 }
1733 #endif
1734 
1735 /**
1736  * hif_get_target_status - API to get target status
1737  * @hif_ctx: HIF Context
1738  *
1739  * Return: enum hif_target_status
1740  */
1741 enum hif_target_status hif_get_target_status(struct hif_opaque_softc *hif_ctx)
1742 {
1743 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1744 
1745 	return scn->target_status;
1746 }
1747 qdf_export_symbol(hif_get_target_status);
1748 
1749 /**
1750  * hif_set_target_status() - API to set target status
1751  * @hif_ctx: HIF Context
1752  * @status: Target Status
1753  *
1754  * Return: void
1755  */
1756 void hif_set_target_status(struct hif_opaque_softc *hif_ctx, enum
1757 			   hif_target_status status)
1758 {
1759 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1760 
1761 	scn->target_status = status;
1762 }
1763 
1764 /**
1765  * hif_init_ini_config() - API to initialize HIF configuration parameters
1766  * @hif_ctx: HIF Context
1767  * @cfg: HIF Configuration
1768  *
1769  * Return: void
1770  */
1771 void hif_init_ini_config(struct hif_opaque_softc *hif_ctx,
1772 			 struct hif_config_info *cfg)
1773 {
1774 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1775 
1776 	qdf_mem_copy(&scn->hif_config, cfg, sizeof(struct hif_config_info));
1777 }
1778 
1779 /**
1780  * hif_get_conparam() - API to get driver mode in HIF
1781  * @scn: HIF Context
1782  *
1783  * Return: driver mode of operation
1784  */
1785 uint32_t hif_get_conparam(struct hif_softc *scn)
1786 {
1787 	if (!scn)
1788 		return 0;
1789 
1790 	return scn->hif_con_param;
1791 }
1792 
1793 /**
1794  * hif_get_callbacks_handle() - API to get callbacks Handle
1795  * @scn: HIF Context
1796  *
1797  * Return: pointer to HIF Callbacks
1798  */
1799 struct hif_driver_state_callbacks *hif_get_callbacks_handle(
1800 							struct hif_softc *scn)
1801 {
1802 	return &scn->callbacks;
1803 }
1804 
1805 /**
1806  * hif_is_driver_unloading() - API to query upper layers if driver is unloading
1807  * @scn: HIF Context
1808  *
1809  * Return: True/False
1810  */
1811 bool hif_is_driver_unloading(struct hif_softc *scn)
1812 {
1813 	struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
1814 
1815 	if (cbk && cbk->is_driver_unloading)
1816 		return cbk->is_driver_unloading(cbk->context);
1817 
1818 	return false;
1819 }
1820 
1821 /**
1822  * hif_is_load_or_unload_in_progress() - API to query upper layers if
1823  * load/unload in progress
1824  * @scn: HIF Context
1825  *
1826  * Return: True/False
1827  */
1828 bool hif_is_load_or_unload_in_progress(struct hif_softc *scn)
1829 {
1830 	struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
1831 
1832 	if (cbk && cbk->is_load_unload_in_progress)
1833 		return cbk->is_load_unload_in_progress(cbk->context);
1834 
1835 	return false;
1836 }
1837 
1838 /**
1839  * hif_is_recovery_in_progress() - API to query upper layers if recovery in
1840  * progress
1841  * @scn: HIF Context
1842  *
1843  * Return: True/False
1844  */
1845 bool hif_is_recovery_in_progress(struct hif_softc *scn)
1846 {
1847 	struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
1848 
1849 	if (cbk && cbk->is_recovery_in_progress)
1850 		return cbk->is_recovery_in_progress(cbk->context);
1851 
1852 	return false;
1853 }
1854 
1855 #if defined(HIF_PCI) || defined(HIF_SNOC) || defined(HIF_AHB) || \
1856     defined(HIF_IPCI)
1857 
1858 /**
1859  * hif_update_pipe_callback() - API to register pipe specific callbacks
1860  * @osc: Opaque softc
1861  * @pipeid: pipe id
1862  * @callbacks: callbacks to register
1863  *
1864  * Return: void
1865  */
1866 
1867 void hif_update_pipe_callback(struct hif_opaque_softc *osc,
1868 					u_int8_t pipeid,
1869 					struct hif_msg_callbacks *callbacks)
1870 {
1871 	struct hif_softc *scn = HIF_GET_SOFTC(osc);
1872 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1873 	struct HIF_CE_pipe_info *pipe_info;
1874 
1875 	QDF_BUG(pipeid < CE_COUNT_MAX);
1876 
1877 	hif_debug("pipeid: %d", pipeid);
1878 
1879 	pipe_info = &hif_state->pipe_info[pipeid];
1880 
1881 	qdf_mem_copy(&pipe_info->pipe_callbacks,
1882 			callbacks, sizeof(pipe_info->pipe_callbacks));
1883 }
1884 qdf_export_symbol(hif_update_pipe_callback);
1885 
1886 /**
1887  * hif_is_target_ready() - API to query if target is in ready state
1888  * progress
1889  * @scn: HIF Context
1890  *
1891  * Return: True/False
1892  */
1893 bool hif_is_target_ready(struct hif_softc *scn)
1894 {
1895 	struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
1896 
1897 	if (cbk && cbk->is_target_ready)
1898 		return cbk->is_target_ready(cbk->context);
1899 	/*
1900 	 * if callback is not registered then there is no way to determine
1901 	 * if target is ready. In-such case return true to indicate that
1902 	 * target is ready.
1903 	 */
1904 	return true;
1905 }
1906 qdf_export_symbol(hif_is_target_ready);
1907 
1908 int hif_get_bandwidth_level(struct hif_opaque_softc *hif_handle)
1909 {
1910 	struct hif_softc *scn = HIF_GET_SOFTC(hif_handle);
1911 	struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
1912 
1913 	if (cbk && cbk->get_bandwidth_level)
1914 		return cbk->get_bandwidth_level(cbk->context);
1915 
1916 	return 0;
1917 }
1918 
1919 qdf_export_symbol(hif_get_bandwidth_level);
1920 
1921 #ifdef DP_MEM_PRE_ALLOC
1922 void *hif_mem_alloc_consistent_unaligned(struct hif_softc *scn,
1923 					 qdf_size_t size,
1924 					 qdf_dma_addr_t *paddr,
1925 					 uint32_t ring_type,
1926 					 uint8_t *is_mem_prealloc)
1927 {
1928 	void *vaddr = NULL;
1929 	struct hif_driver_state_callbacks *cbk =
1930 				hif_get_callbacks_handle(scn);
1931 
1932 	*is_mem_prealloc = false;
1933 	if (cbk && cbk->prealloc_get_consistent_mem_unaligned) {
1934 		vaddr = cbk->prealloc_get_consistent_mem_unaligned(size,
1935 								   paddr,
1936 								   ring_type);
1937 		if (vaddr) {
1938 			*is_mem_prealloc = true;
1939 			goto end;
1940 		}
1941 	}
1942 
1943 	vaddr = qdf_mem_alloc_consistent(scn->qdf_dev,
1944 					 scn->qdf_dev->dev,
1945 					 size,
1946 					 paddr);
1947 end:
1948 	dp_info("%s va_unaligned %pK pa_unaligned %pK size %d ring_type %d",
1949 		*is_mem_prealloc ? "pre-alloc" : "dynamic-alloc", vaddr,
1950 		(void *)*paddr, (int)size, ring_type);
1951 
1952 	return vaddr;
1953 }
1954 
1955 void hif_mem_free_consistent_unaligned(struct hif_softc *scn,
1956 				       qdf_size_t size,
1957 				       void *vaddr,
1958 				       qdf_dma_addr_t paddr,
1959 				       qdf_dma_context_t memctx,
1960 				       uint8_t is_mem_prealloc)
1961 {
1962 	struct hif_driver_state_callbacks *cbk =
1963 				hif_get_callbacks_handle(scn);
1964 
1965 	if (is_mem_prealloc) {
1966 		if (cbk && cbk->prealloc_put_consistent_mem_unaligned) {
1967 			cbk->prealloc_put_consistent_mem_unaligned(vaddr);
1968 		} else {
1969 			dp_warn("dp_prealloc_put_consistent_unligned NULL");
1970 			QDF_BUG(0);
1971 		}
1972 	} else {
1973 		qdf_mem_free_consistent(scn->qdf_dev, scn->qdf_dev->dev,
1974 					size, vaddr, paddr, memctx);
1975 	}
1976 }
1977 #endif
1978 
1979 /**
1980  * hif_batch_send() - API to access hif specific function
1981  * ce_batch_send.
1982  * @osc: HIF Context
1983  * @msdu : list of msdus to be sent
1984  * @transfer_id : transfer id
1985  * @len : donwloaded length
1986  *
1987  * Return: list of msds not sent
1988  */
1989 qdf_nbuf_t hif_batch_send(struct hif_opaque_softc *osc, qdf_nbuf_t msdu,
1990 		uint32_t transfer_id, u_int32_t len, uint32_t sendhead)
1991 {
1992 	void *ce_tx_hdl = hif_get_ce_handle(osc, CE_HTT_TX_CE);
1993 
1994 	if (!ce_tx_hdl)
1995 		return NULL;
1996 
1997 	return ce_batch_send((struct CE_handle *)ce_tx_hdl, msdu, transfer_id,
1998 			len, sendhead);
1999 }
2000 qdf_export_symbol(hif_batch_send);
2001 
2002 /**
2003  * hif_update_tx_ring() - API to access hif specific function
2004  * ce_update_tx_ring.
2005  * @osc: HIF Context
2006  * @num_htt_cmpls : number of htt compl received.
2007  *
2008  * Return: void
2009  */
2010 void hif_update_tx_ring(struct hif_opaque_softc *osc, u_int32_t num_htt_cmpls)
2011 {
2012 	void *ce_tx_hdl = hif_get_ce_handle(osc, CE_HTT_TX_CE);
2013 
2014 	ce_update_tx_ring(ce_tx_hdl, num_htt_cmpls);
2015 }
2016 qdf_export_symbol(hif_update_tx_ring);
2017 
2018 
2019 /**
2020  * hif_send_single() - API to access hif specific function
2021  * ce_send_single.
2022  * @osc: HIF Context
2023  * @msdu : msdu to be sent
2024  * @transfer_id: transfer id
2025  * @len : downloaded length
2026  *
2027  * Return: msdu sent status
2028  */
2029 QDF_STATUS hif_send_single(struct hif_opaque_softc *osc, qdf_nbuf_t msdu,
2030 			   uint32_t transfer_id, u_int32_t len)
2031 {
2032 	void *ce_tx_hdl = hif_get_ce_handle(osc, CE_HTT_TX_CE);
2033 
2034 	if (!ce_tx_hdl)
2035 		return QDF_STATUS_E_NULL_VALUE;
2036 
2037 	return ce_send_single((struct CE_handle *)ce_tx_hdl, msdu, transfer_id,
2038 			len);
2039 }
2040 qdf_export_symbol(hif_send_single);
2041 #endif
2042 
2043 /**
2044  * hif_reg_write() - API to access hif specific function
2045  * hif_write32_mb.
2046  * @hif_ctx : HIF Context
2047  * @offset : offset on which value has to be written
2048  * @value : value to be written
2049  *
2050  * Return: None
2051  */
2052 void hif_reg_write(struct hif_opaque_softc *hif_ctx, uint32_t offset,
2053 		uint32_t value)
2054 {
2055 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2056 
2057 	hif_write32_mb(scn, scn->mem + offset, value);
2058 
2059 }
2060 qdf_export_symbol(hif_reg_write);
2061 
2062 /**
2063  * hif_reg_read() - API to access hif specific function
2064  * hif_read32_mb.
2065  * @hif_ctx : HIF Context
2066  * @offset : offset from which value has to be read
2067  *
2068  * Return: Read value
2069  */
2070 uint32_t hif_reg_read(struct hif_opaque_softc *hif_ctx, uint32_t offset)
2071 {
2072 
2073 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2074 
2075 	return hif_read32_mb(scn, scn->mem + offset);
2076 }
2077 qdf_export_symbol(hif_reg_read);
2078 
2079 /**
2080  * hif_ramdump_handler(): generic ramdump handler
2081  * @scn: struct hif_opaque_softc
2082  *
2083  * Return: None
2084  */
2085 void hif_ramdump_handler(struct hif_opaque_softc *scn)
2086 {
2087 	if (hif_get_bus_type(scn) == QDF_BUS_TYPE_USB)
2088 		hif_usb_ramdump_handler(scn);
2089 }
2090 
2091 hif_pm_wake_irq_type hif_pm_get_wake_irq_type(struct hif_opaque_softc *hif_ctx)
2092 {
2093 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2094 
2095 	return scn->wake_irq_type;
2096 }
2097 
2098 irqreturn_t hif_wake_interrupt_handler(int irq, void *context)
2099 {
2100 	struct hif_softc *scn = context;
2101 	struct hif_opaque_softc *hif_ctx = GET_HIF_OPAQUE_HDL(scn);
2102 
2103 	hif_info("wake interrupt received on irq %d", irq);
2104 
2105 	hif_pm_runtime_set_monitor_wake_intr(hif_ctx, 0);
2106 	hif_pm_runtime_request_resume(hif_ctx, RTPM_ID_WAKE_INTR_HANDLER);
2107 
2108 	if (scn->initial_wakeup_cb)
2109 		scn->initial_wakeup_cb(scn->initial_wakeup_priv);
2110 
2111 	if (hif_is_ut_suspended(scn))
2112 		hif_ut_fw_resume(scn);
2113 
2114 	qdf_pm_system_wakeup();
2115 
2116 	return IRQ_HANDLED;
2117 }
2118 
2119 void hif_set_initial_wakeup_cb(struct hif_opaque_softc *hif_ctx,
2120 			       void (*callback)(void *),
2121 			       void *priv)
2122 {
2123 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2124 
2125 	scn->initial_wakeup_cb = callback;
2126 	scn->initial_wakeup_priv = priv;
2127 }
2128 
2129 void hif_set_ce_service_max_yield_time(struct hif_opaque_softc *hif,
2130 				       uint32_t ce_service_max_yield_time)
2131 {
2132 	struct hif_softc *hif_ctx = HIF_GET_SOFTC(hif);
2133 
2134 	hif_ctx->ce_service_max_yield_time =
2135 		ce_service_max_yield_time * 1000;
2136 }
2137 
2138 unsigned long long
2139 hif_get_ce_service_max_yield_time(struct hif_opaque_softc *hif)
2140 {
2141 	struct hif_softc *hif_ctx = HIF_GET_SOFTC(hif);
2142 
2143 	return hif_ctx->ce_service_max_yield_time;
2144 }
2145 
2146 void hif_set_ce_service_max_rx_ind_flush(struct hif_opaque_softc *hif,
2147 				       uint8_t ce_service_max_rx_ind_flush)
2148 {
2149 	struct hif_softc *hif_ctx = HIF_GET_SOFTC(hif);
2150 
2151 	if (ce_service_max_rx_ind_flush == 0 ||
2152 	    ce_service_max_rx_ind_flush > MSG_FLUSH_NUM)
2153 		hif_ctx->ce_service_max_rx_ind_flush = MSG_FLUSH_NUM;
2154 	else
2155 		hif_ctx->ce_service_max_rx_ind_flush =
2156 						ce_service_max_rx_ind_flush;
2157 }
2158 
2159 #ifdef SYSTEM_PM_CHECK
2160 void __hif_system_pm_set_state(struct hif_opaque_softc *hif,
2161 			       enum hif_system_pm_state state)
2162 {
2163 	struct hif_softc *hif_ctx = HIF_GET_SOFTC(hif);
2164 
2165 	qdf_atomic_set(&hif_ctx->sys_pm_state, state);
2166 }
2167 
2168 int32_t hif_system_pm_get_state(struct hif_opaque_softc *hif)
2169 {
2170 	struct hif_softc *hif_ctx = HIF_GET_SOFTC(hif);
2171 
2172 	return qdf_atomic_read(&hif_ctx->sys_pm_state);
2173 }
2174 
2175 int hif_system_pm_state_check(struct hif_opaque_softc *hif)
2176 {
2177 	struct hif_softc *hif_ctx = HIF_GET_SOFTC(hif);
2178 	int32_t sys_pm_state;
2179 
2180 	if (!hif_ctx) {
2181 		hif_err("hif context is null");
2182 		return -EFAULT;
2183 	}
2184 
2185 	sys_pm_state = qdf_atomic_read(&hif_ctx->sys_pm_state);
2186 	if (sys_pm_state == HIF_SYSTEM_PM_STATE_BUS_SUSPENDING ||
2187 	    sys_pm_state == HIF_SYSTEM_PM_STATE_BUS_SUSPENDED) {
2188 		hif_info("Triggering system wakeup");
2189 		qdf_pm_system_wakeup();
2190 		return -EAGAIN;
2191 	}
2192 
2193 	return 0;
2194 }
2195 #endif
2196