xref: /wlan-dirver/qca-wifi-host-cmn/hif/src/hif_main.c (revision c96d5d25926d2a81a5d1800dffa4ef543a4a54fb)
1 /*
2  * Copyright (c) 2015-2021 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include "targcfg.h"
20 #include "qdf_lock.h"
21 #include "qdf_status.h"
22 #include "qdf_status.h"
23 #include <qdf_atomic.h>         /* qdf_atomic_read */
24 #include <targaddrs.h>
25 #include "hif_io32.h"
26 #include <hif.h>
27 #include <target_type.h>
28 #include "regtable.h"
29 #define ATH_MODULE_NAME hif
30 #include <a_debug.h>
31 #include "hif_main.h"
32 #include "hif_hw_version.h"
33 #if (defined(HIF_PCI) || defined(HIF_SNOC) || defined(HIF_AHB) || \
34      defined(HIF_IPCI))
35 #include "ce_tasklet.h"
36 #include "ce_api.h"
37 #endif
38 #include "qdf_trace.h"
39 #include "qdf_status.h"
40 #include "hif_debug.h"
41 #include "mp_dev.h"
42 #if defined(QCA_WIFI_QCA8074) || defined(QCA_WIFI_QCA6018) || \
43 	defined(QCA_WIFI_QCA5018) || defined(QCA_WIFI_QCA9574)
44 #include "hal_api.h"
45 #endif
46 #include "hif_napi.h"
47 #include "hif_unit_test_suspend_i.h"
48 #include "qdf_module.h"
49 #ifdef HIF_CE_LOG_INFO
50 #include <qdf_notifier.h>
51 #include <qdf_hang_event_notifier.h>
52 #endif
53 #include <linux/cpumask.h>
54 
55 #if defined(HIF_IPCI) && defined(FEATURE_HAL_DELAYED_REG_WRITE)
56 #include <pld_common.h>
57 #endif
58 
59 void hif_dump(struct hif_opaque_softc *hif_ctx, uint8_t cmd_id, bool start)
60 {
61 	hif_trigger_dump(hif_ctx, cmd_id, start);
62 }
63 
64 /**
65  * hif_get_target_id(): hif_get_target_id
66  *
67  * Return the virtual memory base address to the caller
68  *
69  * @scn: hif_softc
70  *
71  * Return: A_target_id_t
72  */
73 A_target_id_t hif_get_target_id(struct hif_softc *scn)
74 {
75 	return scn->mem;
76 }
77 
78 /**
79  * hif_get_targetdef(): hif_get_targetdef
80  * @scn: scn
81  *
82  * Return: void *
83  */
84 void *hif_get_targetdef(struct hif_opaque_softc *hif_ctx)
85 {
86 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
87 
88 	return scn->targetdef;
89 }
90 
91 #ifdef FORCE_WAKE
92 void hif_srng_init_phase(struct hif_opaque_softc *hif_ctx,
93 			 bool init_phase)
94 {
95 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
96 
97 	if (ce_srng_based(scn))
98 		hal_set_init_phase(scn->hal_soc, init_phase);
99 }
100 #endif /* FORCE_WAKE */
101 
102 #ifdef HIF_IPCI
103 void hif_shutdown_notifier_cb(void *hif_ctx)
104 {
105 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
106 
107 	scn->recovery = true;
108 }
109 #endif
110 
111 /**
112  * hif_vote_link_down(): unvote for link up
113  *
114  * Call hif_vote_link_down to release a previous request made using
115  * hif_vote_link_up. A hif_vote_link_down call should only be made
116  * after a corresponding hif_vote_link_up, otherwise you could be
117  * negating a vote from another source. When no votes are present
118  * hif will not guarantee the linkstate after hif_bus_suspend.
119  *
120  * SYNCHRONIZE WITH hif_vote_link_up by only calling in MC thread
121  * and initialization deinitialization sequencences.
122  *
123  * Return: n/a
124  */
125 void hif_vote_link_down(struct hif_opaque_softc *hif_ctx)
126 {
127 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
128 
129 	QDF_BUG(scn);
130 	scn->linkstate_vote--;
131 	hif_info("Down_linkstate_vote %d", scn->linkstate_vote);
132 	if (scn->linkstate_vote == 0)
133 		hif_bus_prevent_linkdown(scn, false);
134 }
135 
136 /**
137  * hif_vote_link_up(): vote to prevent bus from suspending
138  *
139  * Makes hif guarantee that fw can message the host normally
140  * durring suspend.
141  *
142  * SYNCHRONIZE WITH hif_vote_link_up by only calling in MC thread
143  * and initialization deinitialization sequencences.
144  *
145  * Return: n/a
146  */
147 void hif_vote_link_up(struct hif_opaque_softc *hif_ctx)
148 {
149 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
150 
151 	QDF_BUG(scn);
152 	scn->linkstate_vote++;
153 	hif_info("Up_linkstate_vote %d", scn->linkstate_vote);
154 	if (scn->linkstate_vote == 1)
155 		hif_bus_prevent_linkdown(scn, true);
156 }
157 
158 /**
159  * hif_can_suspend_link(): query if hif is permitted to suspend the link
160  *
161  * Hif will ensure that the link won't be suspended if the upperlayers
162  * don't want it to.
163  *
164  * SYNCHRONIZATION: MC thread is stopped before bus suspend thus
165  * we don't need extra locking to ensure votes dont change while
166  * we are in the process of suspending or resuming.
167  *
168  * Return: false if hif will guarantee link up durring suspend.
169  */
170 bool hif_can_suspend_link(struct hif_opaque_softc *hif_ctx)
171 {
172 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
173 
174 	QDF_BUG(scn);
175 	return scn->linkstate_vote == 0;
176 }
177 
178 /**
179  * hif_hia_item_address(): hif_hia_item_address
180  * @target_type: target_type
181  * @item_offset: item_offset
182  *
183  * Return: n/a
184  */
185 uint32_t hif_hia_item_address(uint32_t target_type, uint32_t item_offset)
186 {
187 	switch (target_type) {
188 	case TARGET_TYPE_AR6002:
189 		return AR6002_HOST_INTEREST_ADDRESS + item_offset;
190 	case TARGET_TYPE_AR6003:
191 		return AR6003_HOST_INTEREST_ADDRESS + item_offset;
192 	case TARGET_TYPE_AR6004:
193 		return AR6004_HOST_INTEREST_ADDRESS + item_offset;
194 	case TARGET_TYPE_AR6006:
195 		return AR6006_HOST_INTEREST_ADDRESS + item_offset;
196 	case TARGET_TYPE_AR9888:
197 		return AR9888_HOST_INTEREST_ADDRESS + item_offset;
198 	case TARGET_TYPE_AR6320:
199 	case TARGET_TYPE_AR6320V2:
200 		return AR6320_HOST_INTEREST_ADDRESS + item_offset;
201 	case TARGET_TYPE_ADRASTEA:
202 		/* ADRASTEA doesn't have a host interest address */
203 		ASSERT(0);
204 		return 0;
205 	case TARGET_TYPE_AR900B:
206 		return AR900B_HOST_INTEREST_ADDRESS + item_offset;
207 	case TARGET_TYPE_QCA9984:
208 		return QCA9984_HOST_INTEREST_ADDRESS + item_offset;
209 	case TARGET_TYPE_QCA9888:
210 		return QCA9888_HOST_INTEREST_ADDRESS + item_offset;
211 	case TARGET_TYPE_IPQ4019:
212 		return IPQ4019_HOST_INTEREST_ADDRESS + item_offset;
213 
214 	default:
215 		ASSERT(0);
216 		return 0;
217 	}
218 }
219 
220 /**
221  * hif_max_num_receives_reached() - check max receive is reached
222  * @scn: HIF Context
223  * @count: unsigned int.
224  *
225  * Output check status as bool
226  *
227  * Return: bool
228  */
229 bool hif_max_num_receives_reached(struct hif_softc *scn, unsigned int count)
230 {
231 	if (QDF_IS_EPPING_ENABLED(hif_get_conparam(scn)))
232 		return count > 120;
233 	else
234 		return count > MAX_NUM_OF_RECEIVES;
235 }
236 
237 /**
238  * init_buffer_count() - initial buffer count
239  * @maxSize: qdf_size_t
240  *
241  * routine to modify the initial buffer count to be allocated on an os
242  * platform basis. Platform owner will need to modify this as needed
243  *
244  * Return: qdf_size_t
245  */
246 qdf_size_t init_buffer_count(qdf_size_t maxSize)
247 {
248 	return maxSize;
249 }
250 
251 /**
252  * hif_save_htc_htt_config_endpoint() - save htt_tx_endpoint
253  * @hif_ctx: hif context
254  * @htc_htt_tx_endpoint: htt_tx_endpoint
255  *
256  * Return: void
257  */
258 void hif_save_htc_htt_config_endpoint(struct hif_opaque_softc *hif_ctx,
259 							int htc_htt_tx_endpoint)
260 {
261 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
262 
263 	if (!scn) {
264 		hif_err("scn or scn->hif_sc is NULL!");
265 		return;
266 	}
267 
268 	scn->htc_htt_tx_endpoint = htc_htt_tx_endpoint;
269 }
270 qdf_export_symbol(hif_save_htc_htt_config_endpoint);
271 
272 static const struct qwlan_hw qwlan_hw_list[] = {
273 	{
274 		.id = AR6320_REV1_VERSION,
275 		.subid = 0,
276 		.name = "QCA6174_REV1",
277 	},
278 	{
279 		.id = AR6320_REV1_1_VERSION,
280 		.subid = 0x1,
281 		.name = "QCA6174_REV1_1",
282 	},
283 	{
284 		.id = AR6320_REV1_3_VERSION,
285 		.subid = 0x2,
286 		.name = "QCA6174_REV1_3",
287 	},
288 	{
289 		.id = AR6320_REV2_1_VERSION,
290 		.subid = 0x4,
291 		.name = "QCA6174_REV2_1",
292 	},
293 	{
294 		.id = AR6320_REV2_1_VERSION,
295 		.subid = 0x5,
296 		.name = "QCA6174_REV2_2",
297 	},
298 	{
299 		.id = AR6320_REV3_VERSION,
300 		.subid = 0x6,
301 		.name = "QCA6174_REV2.3",
302 	},
303 	{
304 		.id = AR6320_REV3_VERSION,
305 		.subid = 0x8,
306 		.name = "QCA6174_REV3",
307 	},
308 	{
309 		.id = AR6320_REV3_VERSION,
310 		.subid = 0x9,
311 		.name = "QCA6174_REV3_1",
312 	},
313 	{
314 		.id = AR6320_REV3_2_VERSION,
315 		.subid = 0xA,
316 		.name = "AR6320_REV3_2_VERSION",
317 	},
318 	{
319 		.id = QCA6390_V1,
320 		.subid = 0x0,
321 		.name = "QCA6390_V1",
322 	},
323 	{
324 		.id = QCA6490_V1,
325 		.subid = 0x0,
326 		.name = "QCA6490_V1",
327 	},
328 	{
329 		.id = WCN3990_v1,
330 		.subid = 0x0,
331 		.name = "WCN3990_V1",
332 	},
333 	{
334 		.id = WCN3990_v2,
335 		.subid = 0x0,
336 		.name = "WCN3990_V2",
337 	},
338 	{
339 		.id = WCN3990_v2_1,
340 		.subid = 0x0,
341 		.name = "WCN3990_V2.1",
342 	},
343 	{
344 		.id = WCN3998,
345 		.subid = 0x0,
346 		.name = "WCN3998",
347 	},
348 	{
349 		.id = QCA9379_REV1_VERSION,
350 		.subid = 0xC,
351 		.name = "QCA9379_REV1",
352 	},
353 	{
354 		.id = QCA9379_REV1_VERSION,
355 		.subid = 0xD,
356 		.name = "QCA9379_REV1_1",
357 	},
358 	{
359 		.id = WCN7850_V1,
360 		.subid = 0xE,
361 		.name = "WCN7850_V1",
362 	}
363 };
364 
365 /**
366  * hif_get_hw_name(): get a human readable name for the hardware
367  * @info: Target Info
368  *
369  * Return: human readable name for the underlying wifi hardware.
370  */
371 static const char *hif_get_hw_name(struct hif_target_info *info)
372 {
373 	int i;
374 
375 	if (info->hw_name)
376 		return info->hw_name;
377 
378 	for (i = 0; i < ARRAY_SIZE(qwlan_hw_list); i++) {
379 		if (info->target_version == qwlan_hw_list[i].id &&
380 		    info->target_revision == qwlan_hw_list[i].subid) {
381 			return qwlan_hw_list[i].name;
382 		}
383 	}
384 
385 	info->hw_name = qdf_mem_malloc(64);
386 	if (!info->hw_name)
387 		return "Unknown Device (nomem)";
388 
389 	i = qdf_snprint(info->hw_name, 64, "HW_VERSION=%x.",
390 			info->target_version);
391 	if (i < 0)
392 		return "Unknown Device (snprintf failure)";
393 	else
394 		return info->hw_name;
395 }
396 
397 /**
398  * hif_get_hw_info(): hif_get_hw_info
399  * @scn: scn
400  * @version: version
401  * @revision: revision
402  *
403  * Return: n/a
404  */
405 void hif_get_hw_info(struct hif_opaque_softc *scn, u32 *version, u32 *revision,
406 			const char **target_name)
407 {
408 	struct hif_target_info *info = hif_get_target_info_handle(scn);
409 	struct hif_softc *sc = HIF_GET_SOFTC(scn);
410 
411 	if (sc->bus_type == QDF_BUS_TYPE_USB)
412 		hif_usb_get_hw_info(sc);
413 
414 	*version = info->target_version;
415 	*revision = info->target_revision;
416 	*target_name = hif_get_hw_name(info);
417 }
418 
419 /**
420  * hif_get_dev_ba(): API to get device base address.
421  * @scn: scn
422  * @version: version
423  * @revision: revision
424  *
425  * Return: n/a
426  */
427 void *hif_get_dev_ba(struct hif_opaque_softc *hif_handle)
428 {
429 	struct hif_softc *scn = (struct hif_softc *)hif_handle;
430 
431 	return scn->mem;
432 }
433 qdf_export_symbol(hif_get_dev_ba);
434 
435 /**
436  * hif_get_dev_ba_ce(): API to get device ce base address.
437  * @scn: scn
438  *
439  * Return: dev mem base address for CE
440  */
441 void *hif_get_dev_ba_ce(struct hif_opaque_softc *hif_handle)
442 {
443 	struct hif_softc *scn = (struct hif_softc *)hif_handle;
444 
445 	return scn->mem_ce;
446 }
447 
448 qdf_export_symbol(hif_get_dev_ba_ce);
449 
450 #ifdef WLAN_CE_INTERRUPT_THRESHOLD_CONFIG
451 /**
452  * hif_get_cfg_from_psoc() - Retrieve ini cfg from psoc
453  * @scn: hif context
454  * @psoc: psoc objmgr handle
455  *
456  * Return: None
457  */
458 static inline
459 void hif_get_cfg_from_psoc(struct hif_softc *scn,
460 			   struct wlan_objmgr_psoc *psoc)
461 {
462 	if (psoc) {
463 		scn->ini_cfg.ce_status_ring_timer_threshold =
464 			cfg_get(psoc,
465 				CFG_CE_STATUS_RING_TIMER_THRESHOLD);
466 		scn->ini_cfg.ce_status_ring_batch_count_threshold =
467 			cfg_get(psoc,
468 				CFG_CE_STATUS_RING_BATCH_COUNT_THRESHOLD);
469 	}
470 }
471 #else
472 static inline
473 void hif_get_cfg_from_psoc(struct hif_softc *scn,
474 			   struct wlan_objmgr_psoc *psoc)
475 {
476 }
477 #endif /* WLAN_CE_INTERRUPT_THRESHOLD_CONFIG */
478 
479 #if defined(HIF_CE_LOG_INFO) || defined(HIF_BUS_LOG_INFO)
480 /**
481  * hif_recovery_notifier_cb - Recovery notifier callback to log
482  *  hang event data
483  * @block: notifier block
484  * @state: state
485  * @data: notifier data
486  *
487  * Return: status
488  */
489 static
490 int hif_recovery_notifier_cb(struct notifier_block *block, unsigned long state,
491 			     void *data)
492 {
493 	struct qdf_notifer_data *notif_data = data;
494 	qdf_notif_block *notif_block;
495 	struct hif_softc *hif_handle;
496 	bool bus_id_invalid;
497 
498 	if (!data || !block)
499 		return -EINVAL;
500 
501 	notif_block = qdf_container_of(block, qdf_notif_block, notif_block);
502 
503 	hif_handle = notif_block->priv_data;
504 	if (!hif_handle)
505 		return -EINVAL;
506 
507 	bus_id_invalid = hif_log_bus_info(hif_handle, notif_data->hang_data,
508 					  &notif_data->offset);
509 	if (bus_id_invalid)
510 		return NOTIFY_STOP_MASK;
511 
512 	hif_log_ce_info(hif_handle, notif_data->hang_data,
513 			&notif_data->offset);
514 
515 	return 0;
516 }
517 
518 /**
519  * hif_register_recovery_notifier - Register hif recovery notifier
520  * @hif_handle: hif handle
521  *
522  * Return: status
523  */
524 static
525 QDF_STATUS hif_register_recovery_notifier(struct hif_softc *hif_handle)
526 {
527 	qdf_notif_block *hif_notifier;
528 
529 	if (!hif_handle)
530 		return QDF_STATUS_E_FAILURE;
531 
532 	hif_notifier = &hif_handle->hif_recovery_notifier;
533 
534 	hif_notifier->notif_block.notifier_call = hif_recovery_notifier_cb;
535 	hif_notifier->priv_data = hif_handle;
536 	return qdf_hang_event_register_notifier(hif_notifier);
537 }
538 
539 /**
540  * hif_unregister_recovery_notifier - Un-register hif recovery notifier
541  * @hif_handle: hif handle
542  *
543  * Return: status
544  */
545 static
546 QDF_STATUS hif_unregister_recovery_notifier(struct hif_softc *hif_handle)
547 {
548 	qdf_notif_block *hif_notifier = &hif_handle->hif_recovery_notifier;
549 
550 	return qdf_hang_event_unregister_notifier(hif_notifier);
551 }
552 #else
553 static inline
554 QDF_STATUS hif_register_recovery_notifier(struct hif_softc *hif_handle)
555 {
556 	return QDF_STATUS_SUCCESS;
557 }
558 
559 static inline
560 QDF_STATUS hif_unregister_recovery_notifier(struct hif_softc *hif_handle)
561 {
562 	return QDF_STATUS_SUCCESS;
563 }
564 #endif
565 
566 #ifdef HIF_CPU_PERF_AFFINE_MASK
567 /**
568  * __hif_cpu_hotplug_notify() - CPU hotplug event handler
569  * @cpu: CPU Id of the CPU generating the event
570  * @cpu_up: true if the CPU is online
571  *
572  * Return: None
573  */
574 static void __hif_cpu_hotplug_notify(void *context,
575 				     uint32_t cpu, bool cpu_up)
576 {
577 	struct hif_softc *scn = context;
578 
579 	if (!scn)
580 		return;
581 	if (hif_is_driver_unloading(scn) || hif_is_recovery_in_progress(scn))
582 		return;
583 
584 	if (cpu_up) {
585 		hif_config_irq_set_perf_affinity_hint(GET_HIF_OPAQUE_HDL(scn));
586 		hif_debug("Setting affinity for online CPU: %d", cpu);
587 	} else {
588 		hif_debug("Skip setting affinity for offline CPU: %d", cpu);
589 	}
590 }
591 
592 /**
593  * hif_cpu_hotplug_notify - cpu core up/down notification
594  * handler
595  * @cpu: CPU generating the event
596  * @cpu_up: true if the CPU is online
597  *
598  * Return: None
599  */
600 static void hif_cpu_hotplug_notify(void *context, uint32_t cpu, bool cpu_up)
601 {
602 	struct qdf_op_sync *op_sync;
603 
604 	if (qdf_op_protect(&op_sync))
605 		return;
606 
607 	__hif_cpu_hotplug_notify(context, cpu, cpu_up);
608 
609 	qdf_op_unprotect(op_sync);
610 }
611 
612 static void hif_cpu_online_cb(void *context, uint32_t cpu)
613 {
614 	hif_cpu_hotplug_notify(context, cpu, true);
615 }
616 
617 static void hif_cpu_before_offline_cb(void *context, uint32_t cpu)
618 {
619 	hif_cpu_hotplug_notify(context, cpu, false);
620 }
621 
622 static void hif_cpuhp_register(struct hif_softc *scn)
623 {
624 	if (!scn) {
625 		hif_info_high("cannot register hotplug notifiers");
626 		return;
627 	}
628 	qdf_cpuhp_register(&scn->cpuhp_event_handle,
629 			   scn,
630 			   hif_cpu_online_cb,
631 			   hif_cpu_before_offline_cb);
632 }
633 
634 static void hif_cpuhp_unregister(struct hif_softc *scn)
635 {
636 	if (!scn) {
637 		hif_info_high("cannot unregister hotplug notifiers");
638 		return;
639 	}
640 	qdf_cpuhp_unregister(&scn->cpuhp_event_handle);
641 }
642 
643 #else
644 static void hif_cpuhp_register(struct hif_softc *scn)
645 {
646 }
647 
648 static void hif_cpuhp_unregister(struct hif_softc *scn)
649 {
650 }
651 #endif /* ifdef HIF_CPU_PERF_AFFINE_MASK */
652 
653 #ifdef HIF_DETECTION_LATENCY_ENABLE
654 
655 void hif_tasklet_latency(struct hif_softc *scn, bool from_timer)
656 {
657 	qdf_time_t ce2_tasklet_sched_time =
658 		scn->latency_detect.ce2_tasklet_sched_time;
659 	qdf_time_t ce2_tasklet_exec_time =
660 		scn->latency_detect.ce2_tasklet_exec_time;
661 	qdf_time_t curr_jiffies = qdf_system_ticks();
662 	uint32_t detect_latency_threshold =
663 		scn->latency_detect.detect_latency_threshold;
664 	int cpu_id = qdf_get_cpu();
665 
666 	/* 2 kinds of check here.
667 	 * from_timer==true:  check if tasklet stall
668 	 * from_timer==false: check tasklet execute comes late
669 	 */
670 
671 	if ((from_timer ?
672 	    qdf_system_time_after(ce2_tasklet_sched_time,
673 				  ce2_tasklet_exec_time) :
674 	    qdf_system_time_after(ce2_tasklet_exec_time,
675 				  ce2_tasklet_sched_time)) &&
676 	    qdf_system_time_after(
677 		curr_jiffies,
678 		ce2_tasklet_sched_time +
679 		qdf_system_msecs_to_ticks(detect_latency_threshold))) {
680 		hif_err("tasklet ce2 latency: from_timer %d, curr_jiffies %lu, ce2_tasklet_sched_time %lu,ce2_tasklet_exec_time %lu, detect_latency_threshold %ums detect_latency_timer_timeout %ums, cpu_id %d, called: %ps",
681 			from_timer, curr_jiffies, ce2_tasklet_sched_time,
682 			ce2_tasklet_exec_time, detect_latency_threshold,
683 			scn->latency_detect.detect_latency_timer_timeout,
684 			cpu_id, (void *)_RET_IP_);
685 		goto latency;
686 	}
687 	return;
688 
689 latency:
690 	qdf_trigger_self_recovery(NULL, QDF_TASKLET_CREDIT_LATENCY_DETECT);
691 }
692 
693 void hif_credit_latency(struct hif_softc *scn, bool from_timer)
694 {
695 	qdf_time_t credit_request_time =
696 		scn->latency_detect.credit_request_time;
697 	qdf_time_t credit_report_time =
698 		scn->latency_detect.credit_report_time;
699 	qdf_time_t curr_jiffies = qdf_system_ticks();
700 	uint32_t detect_latency_threshold =
701 		scn->latency_detect.detect_latency_threshold;
702 	int cpu_id = qdf_get_cpu();
703 
704 	/* 2 kinds of check here.
705 	 * from_timer==true:  check if credit report stall
706 	 * from_timer==false: check credit report comes late
707 	 */
708 
709 	if ((from_timer ?
710 	    qdf_system_time_after(credit_request_time,
711 				  credit_report_time) :
712 	    qdf_system_time_after(credit_report_time,
713 				  credit_request_time)) &&
714 	    qdf_system_time_after(
715 		curr_jiffies,
716 		credit_request_time +
717 		qdf_system_msecs_to_ticks(detect_latency_threshold))) {
718 		hif_err("credit report latency: from timer %d, curr_jiffies %lu, credit_request_time %lu,credit_report_time %lu, detect_latency_threshold %ums, detect_latency_timer_timeout %ums, cpu_id %d, called: %ps",
719 			from_timer, curr_jiffies, credit_request_time,
720 			credit_report_time, detect_latency_threshold,
721 			scn->latency_detect.detect_latency_timer_timeout,
722 			cpu_id, (void *)_RET_IP_);
723 		goto latency;
724 	}
725 	return;
726 
727 latency:
728 	qdf_trigger_self_recovery(NULL, QDF_TASKLET_CREDIT_LATENCY_DETECT);
729 }
730 
731 /**
732  * hif_check_detection_latency(): to check if latency for tasklet/credit
733  *
734  * @scn: hif context
735  * @from_timer: if called from timer handler
736  * @bitmap_type: indicate if check tasklet or credit
737  *
738  * Return: none
739  */
740 void hif_check_detection_latency(struct hif_softc *scn,
741 				 bool from_timer,
742 				 uint32_t bitmap_type)
743 {
744 	if (QDF_GLOBAL_MISSION_MODE != hif_get_conparam(scn))
745 		return;
746 
747 	if (!scn->latency_detect.enable_detection)
748 		return;
749 
750 	if (bitmap_type & BIT(HIF_DETECT_TASKLET))
751 		hif_tasklet_latency(scn, from_timer);
752 
753 	if (bitmap_type & BIT(HIF_DETECT_CREDIT))
754 		hif_credit_latency(scn, from_timer);
755 }
756 
757 static void hif_latency_detect_timeout_handler(void *arg)
758 {
759 	struct hif_softc *scn = (struct hif_softc *)arg;
760 	int next_cpu;
761 
762 	hif_check_detection_latency(scn, true,
763 				    BIT(HIF_DETECT_TASKLET) |
764 				    BIT(HIF_DETECT_CREDIT));
765 
766 	/* it need to make sure timer start on a differnt cpu,
767 	 * so it can detect the tasklet schedule stall, but there
768 	 * is still chance that, after timer has been started, then
769 	 * irq/tasklet happens on the same cpu, then tasklet will
770 	 * execute before softirq timer, if this tasklet stall, the
771 	 * timer can't detect it, we can accept this as a limition,
772 	 * if tasklet stall, anyway other place will detect it, just
773 	 * a little later.
774 	 */
775 	next_cpu = cpumask_any_but(
776 			cpu_active_mask,
777 			scn->latency_detect.ce2_tasklet_sched_cpuid);
778 
779 	if (qdf_unlikely(next_cpu >= nr_cpu_ids)) {
780 		hif_debug("start timer on local");
781 		/* it doesn't found a available cpu, start on local cpu*/
782 		qdf_timer_mod(
783 			&scn->latency_detect.detect_latency_timer,
784 			scn->latency_detect.detect_latency_timer_timeout);
785 	} else {
786 		qdf_timer_start_on(
787 			&scn->latency_detect.detect_latency_timer,
788 			scn->latency_detect.detect_latency_timer_timeout,
789 			next_cpu);
790 	}
791 }
792 
793 static void hif_latency_detect_timer_init(struct hif_softc *scn)
794 {
795 	if (!scn) {
796 		hif_info_high("scn is null");
797 		return;
798 	}
799 
800 	if (QDF_GLOBAL_MISSION_MODE != hif_get_conparam(scn))
801 		return;
802 
803 	scn->latency_detect.detect_latency_timer_timeout =
804 		DETECTION_TIMER_TIMEOUT;
805 	scn->latency_detect.detect_latency_threshold =
806 		DETECTION_LATENCY_THRESHOLD;
807 
808 	hif_info("timer timeout %u, latency threshold %u",
809 		 scn->latency_detect.detect_latency_timer_timeout,
810 		 scn->latency_detect.detect_latency_threshold);
811 
812 	scn->latency_detect.is_timer_started = false;
813 
814 	qdf_timer_init(NULL,
815 		       &scn->latency_detect.detect_latency_timer,
816 		       &hif_latency_detect_timeout_handler,
817 		       scn,
818 		       QDF_TIMER_TYPE_SW_SPIN);
819 }
820 
821 static void hif_latency_detect_timer_deinit(struct hif_softc *scn)
822 {
823 	if (QDF_GLOBAL_MISSION_MODE != hif_get_conparam(scn))
824 		return;
825 
826 	hif_info("deinit timer");
827 	qdf_timer_free(&scn->latency_detect.detect_latency_timer);
828 }
829 
830 void hif_latency_detect_timer_start(struct hif_opaque_softc *hif_ctx)
831 {
832 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
833 
834 	if (QDF_GLOBAL_MISSION_MODE != hif_get_conparam(scn))
835 		return;
836 
837 	hif_info_rl("start timer");
838 	if (scn->latency_detect.is_timer_started) {
839 		hif_info("timer has been started");
840 		return;
841 	}
842 
843 	qdf_timer_start(&scn->latency_detect.detect_latency_timer,
844 			scn->latency_detect.detect_latency_timer_timeout);
845 	scn->latency_detect.is_timer_started = true;
846 }
847 
848 void hif_latency_detect_timer_stop(struct hif_opaque_softc *hif_ctx)
849 {
850 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
851 
852 	if (QDF_GLOBAL_MISSION_MODE != hif_get_conparam(scn))
853 		return;
854 
855 	hif_info_rl("stop timer");
856 
857 	qdf_timer_sync_cancel(&scn->latency_detect.detect_latency_timer);
858 	scn->latency_detect.is_timer_started = false;
859 }
860 
861 void hif_latency_detect_credit_record_time(
862 	enum hif_credit_exchange_type type,
863 	struct hif_opaque_softc *hif_ctx)
864 {
865 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
866 
867 	if (!scn) {
868 		hif_err("Could not do runtime put, scn is null");
869 		return;
870 	}
871 
872 	if (QDF_GLOBAL_MISSION_MODE != hif_get_conparam(scn))
873 		return;
874 
875 	if (HIF_REQUEST_CREDIT == type)
876 		scn->latency_detect.credit_request_time = qdf_system_ticks();
877 	else if (HIF_PROCESS_CREDIT_REPORT == type)
878 		scn->latency_detect.credit_report_time = qdf_system_ticks();
879 
880 	hif_check_detection_latency(scn, false, BIT(HIF_DETECT_CREDIT));
881 }
882 
883 void hif_set_enable_detection(struct hif_opaque_softc *hif_ctx, bool value)
884 {
885 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
886 
887 	if (!scn) {
888 		hif_err("Could not do runtime put, scn is null");
889 		return;
890 	}
891 
892 	if (QDF_GLOBAL_MISSION_MODE != hif_get_conparam(scn))
893 		return;
894 
895 	scn->latency_detect.enable_detection = value;
896 }
897 #else
898 static void hif_latency_detect_timer_init(struct hif_softc *scn)
899 {}
900 
901 static void hif_latency_detect_timer_deinit(struct hif_softc *scn)
902 {}
903 #endif
904 struct hif_opaque_softc *hif_open(qdf_device_t qdf_ctx,
905 				  uint32_t mode,
906 				  enum qdf_bus_type bus_type,
907 				  struct hif_driver_state_callbacks *cbk,
908 				  struct wlan_objmgr_psoc *psoc)
909 {
910 	struct hif_softc *scn;
911 	QDF_STATUS status = QDF_STATUS_SUCCESS;
912 	int bus_context_size = hif_bus_get_context_size(bus_type);
913 
914 	if (bus_context_size == 0) {
915 		hif_err("context size 0 not allowed");
916 		return NULL;
917 	}
918 
919 	scn = (struct hif_softc *)qdf_mem_malloc(bus_context_size);
920 	if (!scn)
921 		return GET_HIF_OPAQUE_HDL(scn);
922 
923 	scn->qdf_dev = qdf_ctx;
924 	scn->hif_con_param = mode;
925 	qdf_atomic_init(&scn->active_tasklet_cnt);
926 
927 	qdf_atomic_init(&scn->active_grp_tasklet_cnt);
928 	qdf_atomic_init(&scn->link_suspended);
929 	qdf_atomic_init(&scn->tasklet_from_intr);
930 	hif_system_pm_set_state_on(GET_HIF_OPAQUE_HDL(scn));
931 	qdf_mem_copy(&scn->callbacks, cbk,
932 		     sizeof(struct hif_driver_state_callbacks));
933 	scn->bus_type  = bus_type;
934 
935 	hif_pm_set_link_state(GET_HIF_OPAQUE_HDL(scn), HIF_PM_LINK_STATE_DOWN);
936 	hif_allow_ep_vote_access(GET_HIF_OPAQUE_HDL(scn));
937 	hif_get_cfg_from_psoc(scn, psoc);
938 
939 	hif_set_event_hist_mask(GET_HIF_OPAQUE_HDL(scn));
940 	status = hif_bus_open(scn, bus_type);
941 	if (status != QDF_STATUS_SUCCESS) {
942 		hif_err("hif_bus_open error = %d, bus_type = %d",
943 			status, bus_type);
944 		qdf_mem_free(scn);
945 		scn = NULL;
946 		goto out;
947 	}
948 
949 	hif_cpuhp_register(scn);
950 	hif_latency_detect_timer_init(scn);
951 
952 out:
953 	return GET_HIF_OPAQUE_HDL(scn);
954 }
955 
956 #ifdef ADRASTEA_RRI_ON_DDR
957 /**
958  * hif_uninit_rri_on_ddr(): free consistent memory allocated for rri
959  * @scn: hif context
960  *
961  * Return: none
962  */
963 void hif_uninit_rri_on_ddr(struct hif_softc *scn)
964 {
965 	if (scn->vaddr_rri_on_ddr)
966 		qdf_mem_free_consistent(scn->qdf_dev, scn->qdf_dev->dev,
967 					(CE_COUNT * sizeof(uint32_t)),
968 					scn->vaddr_rri_on_ddr,
969 					scn->paddr_rri_on_ddr, 0);
970 	scn->vaddr_rri_on_ddr = NULL;
971 }
972 #endif
973 
974 /**
975  * hif_close(): hif_close
976  * @hif_ctx: hif_ctx
977  *
978  * Return: n/a
979  */
980 void hif_close(struct hif_opaque_softc *hif_ctx)
981 {
982 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
983 
984 	if (!scn) {
985 		hif_err("hif_opaque_softc is NULL");
986 		return;
987 	}
988 
989 	hif_latency_detect_timer_deinit(scn);
990 
991 	if (scn->athdiag_procfs_inited) {
992 		athdiag_procfs_remove();
993 		scn->athdiag_procfs_inited = false;
994 	}
995 
996 	if (scn->target_info.hw_name) {
997 		char *hw_name = scn->target_info.hw_name;
998 
999 		scn->target_info.hw_name = "ErrUnloading";
1000 		qdf_mem_free(hw_name);
1001 	}
1002 
1003 	hif_uninit_rri_on_ddr(scn);
1004 	hif_cleanup_static_buf_to_target(scn);
1005 	hif_cpuhp_unregister(scn);
1006 
1007 	hif_bus_close(scn);
1008 
1009 	qdf_mem_free(scn);
1010 }
1011 
1012 /**
1013  * hif_get_num_active_grp_tasklets() - get the number of active
1014  *		datapath group tasklets pending to be completed.
1015  * @scn: HIF context
1016  *
1017  * Returns: the number of datapath group tasklets which are active
1018  */
1019 static inline int hif_get_num_active_grp_tasklets(struct hif_softc *scn)
1020 {
1021 	return qdf_atomic_read(&scn->active_grp_tasklet_cnt);
1022 }
1023 
1024 #if (defined(QCA_WIFI_QCA8074) || defined(QCA_WIFI_QCA6018) || \
1025 	defined(QCA_WIFI_QCA6290) || defined(QCA_WIFI_QCA6390) || \
1026 	defined(QCA_WIFI_QCN9000) || defined(QCA_WIFI_QCA6490) || \
1027 	defined(QCA_WIFI_QCA6750) || defined(QCA_WIFI_QCA5018) || \
1028 	defined(QCA_WIFI_WCN7850) || defined(QCA_WIFI_QCN9224) || \
1029 	defined(QCA_WIFI_QCA9574))
1030 /**
1031  * hif_get_num_pending_work() - get the number of entries in
1032  *		the workqueue pending to be completed.
1033  * @scn: HIF context
1034  *
1035  * Returns: the number of tasklets which are active
1036  */
1037 static inline int hif_get_num_pending_work(struct hif_softc *scn)
1038 {
1039 	return hal_get_reg_write_pending_work(scn->hal_soc);
1040 }
1041 #else
1042 
1043 static inline int hif_get_num_pending_work(struct hif_softc *scn)
1044 {
1045 	return 0;
1046 }
1047 #endif
1048 
1049 QDF_STATUS hif_try_complete_tasks(struct hif_softc *scn)
1050 {
1051 	uint32_t task_drain_wait_cnt = 0;
1052 	int tasklet = 0, grp_tasklet = 0, work = 0;
1053 
1054 	while ((tasklet = hif_get_num_active_tasklets(scn)) ||
1055 	       (grp_tasklet = hif_get_num_active_grp_tasklets(scn)) ||
1056 	       (work = hif_get_num_pending_work(scn))) {
1057 		if (++task_drain_wait_cnt > HIF_TASK_DRAIN_WAIT_CNT) {
1058 			hif_err("pending tasklets %d grp tasklets %d work %d",
1059 				tasklet, grp_tasklet, work);
1060 			return QDF_STATUS_E_FAULT;
1061 		}
1062 		hif_info("waiting for tasklets %d grp tasklets %d work %d",
1063 			 tasklet, grp_tasklet, work);
1064 		msleep(10);
1065 	}
1066 
1067 	return QDF_STATUS_SUCCESS;
1068 }
1069 
1070 #if defined(HIF_IPCI) && defined(FEATURE_HAL_DELAYED_REG_WRITE)
1071 QDF_STATUS hif_try_prevent_ep_vote_access(struct hif_opaque_softc *hif_ctx)
1072 {
1073 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1074 	uint32_t work_drain_wait_cnt = 0;
1075 	uint32_t wait_cnt = 0;
1076 	int work = 0;
1077 
1078 	qdf_atomic_set(&scn->dp_ep_vote_access,
1079 		       HIF_EP_VOTE_ACCESS_DISABLE);
1080 	qdf_atomic_set(&scn->ep_vote_access,
1081 		       HIF_EP_VOTE_ACCESS_DISABLE);
1082 
1083 	while ((work = hif_get_num_pending_work(scn))) {
1084 		if (++work_drain_wait_cnt > HIF_WORK_DRAIN_WAIT_CNT) {
1085 			qdf_atomic_set(&scn->dp_ep_vote_access,
1086 				       HIF_EP_VOTE_ACCESS_ENABLE);
1087 			qdf_atomic_set(&scn->ep_vote_access,
1088 				       HIF_EP_VOTE_ACCESS_ENABLE);
1089 			hif_err("timeout wait for pending work %d ", work);
1090 			return QDF_STATUS_E_FAULT;
1091 		}
1092 		qdf_sleep(10);
1093 	}
1094 
1095 	while (pld_is_pci_ep_awake(scn->qdf_dev->dev)) {
1096 		if (++wait_cnt > HIF_EP_WAKE_RESET_WAIT_CNT) {
1097 			hif_err("Release EP vote is not proceed by Fw");
1098 			return QDF_STATUS_E_FAULT;
1099 		}
1100 		qdf_sleep(5);
1101 	}
1102 
1103 	return QDF_STATUS_SUCCESS;
1104 }
1105 
1106 void hif_set_ep_intermediate_vote_access(struct hif_opaque_softc *hif_ctx)
1107 {
1108 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1109 	uint8_t vote_access;
1110 
1111 	vote_access = qdf_atomic_read(&scn->ep_vote_access);
1112 
1113 	if (vote_access != HIF_EP_VOTE_ACCESS_DISABLE)
1114 		hif_info("EP vote changed from:%u to intermediate state",
1115 			 vote_access);
1116 
1117 	if (QDF_IS_STATUS_ERROR(hif_try_prevent_ep_vote_access(hif_ctx)))
1118 		QDF_BUG(0);
1119 
1120 	qdf_atomic_set(&scn->ep_vote_access,
1121 		       HIF_EP_VOTE_INTERMEDIATE_ACCESS);
1122 }
1123 
1124 void hif_allow_ep_vote_access(struct hif_opaque_softc *hif_ctx)
1125 {
1126 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1127 
1128 	qdf_atomic_set(&scn->dp_ep_vote_access,
1129 		       HIF_EP_VOTE_ACCESS_ENABLE);
1130 	qdf_atomic_set(&scn->ep_vote_access,
1131 		       HIF_EP_VOTE_ACCESS_ENABLE);
1132 }
1133 
1134 void hif_set_ep_vote_access(struct hif_opaque_softc *hif_ctx,
1135 			    uint8_t type, uint8_t access)
1136 {
1137 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1138 
1139 	if (type == HIF_EP_VOTE_DP_ACCESS)
1140 		qdf_atomic_set(&scn->dp_ep_vote_access, access);
1141 	else
1142 		qdf_atomic_set(&scn->ep_vote_access, access);
1143 }
1144 
1145 uint8_t hif_get_ep_vote_access(struct hif_opaque_softc *hif_ctx,
1146 			       uint8_t type)
1147 {
1148 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1149 
1150 	if (type == HIF_EP_VOTE_DP_ACCESS)
1151 		return qdf_atomic_read(&scn->dp_ep_vote_access);
1152 	else
1153 		return qdf_atomic_read(&scn->ep_vote_access);
1154 }
1155 #endif
1156 
1157 #if (defined(QCA_WIFI_QCA8074) || defined(QCA_WIFI_QCA6018) || \
1158 	defined(QCA_WIFI_QCA6290) || defined(QCA_WIFI_QCA6390) || \
1159 	defined(QCA_WIFI_QCN9000) || defined(QCA_WIFI_QCA6490) || \
1160 	defined(QCA_WIFI_QCA6750) || defined(QCA_WIFI_QCA5018) || \
1161 	defined(QCA_WIFI_WCN7850) || defined(QCA_WIFI_QCN9224) || \
1162 	defined(QCA_WIFI_QCA9574))
1163 static QDF_STATUS hif_hal_attach(struct hif_softc *scn)
1164 {
1165 	if (ce_srng_based(scn)) {
1166 		scn->hal_soc = hal_attach(
1167 					hif_softc_to_hif_opaque_softc(scn),
1168 					scn->qdf_dev);
1169 		if (!scn->hal_soc)
1170 			return QDF_STATUS_E_FAILURE;
1171 	}
1172 
1173 	return QDF_STATUS_SUCCESS;
1174 }
1175 
1176 static QDF_STATUS hif_hal_detach(struct hif_softc *scn)
1177 {
1178 	if (ce_srng_based(scn)) {
1179 		hal_detach(scn->hal_soc);
1180 		scn->hal_soc = NULL;
1181 	}
1182 
1183 	return QDF_STATUS_SUCCESS;
1184 }
1185 #else
1186 static QDF_STATUS hif_hal_attach(struct hif_softc *scn)
1187 {
1188 	return QDF_STATUS_SUCCESS;
1189 }
1190 
1191 static QDF_STATUS hif_hal_detach(struct hif_softc *scn)
1192 {
1193 	return QDF_STATUS_SUCCESS;
1194 }
1195 #endif
1196 
1197 int hif_init_dma_mask(struct device *dev, enum qdf_bus_type bus_type)
1198 {
1199 	int ret;
1200 
1201 	switch (bus_type) {
1202 	case QDF_BUS_TYPE_IPCI:
1203 		ret = qdf_set_dma_coherent_mask(dev,
1204 						DMA_COHERENT_MASK_DEFAULT);
1205 		if (ret) {
1206 			hif_err("Failed to set dma mask error = %d", ret);
1207 			return ret;
1208 		}
1209 
1210 		break;
1211 	default:
1212 		/* Follow the existing sequence for other targets */
1213 		break;
1214 	}
1215 
1216 	return 0;
1217 }
1218 
1219 /**
1220  * hif_enable(): hif_enable
1221  * @hif_ctx: hif_ctx
1222  * @dev: dev
1223  * @bdev: bus dev
1224  * @bid: bus ID
1225  * @bus_type: bus type
1226  * @type: enable type
1227  *
1228  * Return: QDF_STATUS
1229  */
1230 QDF_STATUS hif_enable(struct hif_opaque_softc *hif_ctx, struct device *dev,
1231 					  void *bdev,
1232 					  const struct hif_bus_id *bid,
1233 					  enum qdf_bus_type bus_type,
1234 					  enum hif_enable_type type)
1235 {
1236 	QDF_STATUS status;
1237 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1238 
1239 	if (!scn) {
1240 		hif_err("hif_ctx = NULL");
1241 		return QDF_STATUS_E_NULL_VALUE;
1242 	}
1243 
1244 	status = hif_enable_bus(scn, dev, bdev, bid, type);
1245 	if (status != QDF_STATUS_SUCCESS) {
1246 		hif_err("hif_enable_bus error = %d", status);
1247 		return status;
1248 	}
1249 
1250 	hif_pm_set_link_state(GET_HIF_OPAQUE_HDL(scn), HIF_PM_LINK_STATE_UP);
1251 	status = hif_hal_attach(scn);
1252 	if (status != QDF_STATUS_SUCCESS) {
1253 		hif_err("hal attach failed");
1254 		goto disable_bus;
1255 	}
1256 
1257 	if (hif_bus_configure(scn)) {
1258 		hif_err("Target probe failed");
1259 		status = QDF_STATUS_E_FAILURE;
1260 		goto hal_detach;
1261 	}
1262 
1263 	hif_ut_suspend_init(scn);
1264 	hif_register_recovery_notifier(scn);
1265 	hif_latency_detect_timer_start(hif_ctx);
1266 
1267 	/*
1268 	 * Flag to avoid potential unallocated memory access from MSI
1269 	 * interrupt handler which could get scheduled as soon as MSI
1270 	 * is enabled, i.e to take care of the race due to the order
1271 	 * in where MSI is enabled before the memory, that will be
1272 	 * in interrupt handlers, is allocated.
1273 	 */
1274 
1275 	scn->hif_init_done = true;
1276 
1277 	hif_debug("OK");
1278 
1279 	return QDF_STATUS_SUCCESS;
1280 
1281 hal_detach:
1282 	hif_hal_detach(scn);
1283 disable_bus:
1284 	hif_disable_bus(scn);
1285 	return status;
1286 }
1287 
1288 void hif_disable(struct hif_opaque_softc *hif_ctx, enum hif_disable_type type)
1289 {
1290 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1291 
1292 	if (!scn)
1293 		return;
1294 
1295 	hif_set_enable_detection(hif_ctx, false);
1296 	hif_latency_detect_timer_stop(hif_ctx);
1297 
1298 	hif_unregister_recovery_notifier(scn);
1299 
1300 	hif_nointrs(scn);
1301 	if (scn->hif_init_done == false)
1302 		hif_shutdown_device(hif_ctx);
1303 	else
1304 		hif_stop(hif_ctx);
1305 
1306 	hif_hal_detach(scn);
1307 
1308 	hif_pm_set_link_state(hif_ctx, HIF_PM_LINK_STATE_DOWN);
1309 	hif_disable_bus(scn);
1310 
1311 	hif_wlan_disable(scn);
1312 
1313 	scn->notice_send = false;
1314 
1315 	hif_debug("X");
1316 }
1317 
1318 #ifdef CE_TASKLET_DEBUG_ENABLE
1319 void hif_enable_ce_latency_stats(struct hif_opaque_softc *hif_ctx, uint8_t val)
1320 {
1321 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1322 
1323 	if (!scn)
1324 		return;
1325 
1326 	scn->ce_latency_stats = val;
1327 }
1328 #endif
1329 
1330 void hif_display_stats(struct hif_opaque_softc *hif_ctx)
1331 {
1332 	hif_display_bus_stats(hif_ctx);
1333 }
1334 
1335 qdf_export_symbol(hif_display_stats);
1336 
1337 void hif_clear_stats(struct hif_opaque_softc *hif_ctx)
1338 {
1339 	hif_clear_bus_stats(hif_ctx);
1340 }
1341 
1342 /**
1343  * hif_crash_shutdown_dump_bus_register() - dump bus registers
1344  * @hif_ctx: hif_ctx
1345  *
1346  * Return: n/a
1347  */
1348 #if defined(TARGET_RAMDUMP_AFTER_KERNEL_PANIC) && defined(WLAN_FEATURE_BMI)
1349 
1350 static void hif_crash_shutdown_dump_bus_register(void *hif_ctx)
1351 {
1352 	struct hif_opaque_softc *scn = hif_ctx;
1353 
1354 	if (hif_check_soc_status(scn))
1355 		return;
1356 
1357 	if (hif_dump_registers(scn))
1358 		hif_err("Failed to dump bus registers!");
1359 }
1360 
1361 /**
1362  * hif_crash_shutdown(): hif_crash_shutdown
1363  *
1364  * This function is called by the platform driver to dump CE registers
1365  *
1366  * @hif_ctx: hif_ctx
1367  *
1368  * Return: n/a
1369  */
1370 void hif_crash_shutdown(struct hif_opaque_softc *hif_ctx)
1371 {
1372 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1373 
1374 	if (!hif_ctx)
1375 		return;
1376 
1377 	if (scn->bus_type == QDF_BUS_TYPE_SNOC) {
1378 		hif_warn("RAM dump disabled for bustype %d", scn->bus_type);
1379 		return;
1380 	}
1381 
1382 	if (TARGET_STATUS_RESET == scn->target_status) {
1383 		hif_warn("Target is already asserted, ignore!");
1384 		return;
1385 	}
1386 
1387 	if (hif_is_load_or_unload_in_progress(scn)) {
1388 		hif_err("Load/unload is in progress, ignore!");
1389 		return;
1390 	}
1391 
1392 	hif_crash_shutdown_dump_bus_register(hif_ctx);
1393 	hif_set_target_status(hif_ctx, TARGET_STATUS_RESET);
1394 
1395 	if (ol_copy_ramdump(hif_ctx))
1396 		goto out;
1397 
1398 	hif_info("RAM dump collecting completed!");
1399 
1400 out:
1401 	return;
1402 }
1403 #else
1404 void hif_crash_shutdown(struct hif_opaque_softc *hif_ctx)
1405 {
1406 	hif_debug("Collecting target RAM dump disabled");
1407 }
1408 #endif /* TARGET_RAMDUMP_AFTER_KERNEL_PANIC */
1409 
1410 #ifdef QCA_WIFI_3_0
1411 /**
1412  * hif_check_fw_reg(): hif_check_fw_reg
1413  * @scn: scn
1414  * @state:
1415  *
1416  * Return: int
1417  */
1418 int hif_check_fw_reg(struct hif_opaque_softc *scn)
1419 {
1420 	return 0;
1421 }
1422 #endif
1423 
1424 /**
1425  * hif_read_phy_mem_base(): hif_read_phy_mem_base
1426  * @scn: scn
1427  * @phy_mem_base: physical mem base
1428  *
1429  * Return: n/a
1430  */
1431 void hif_read_phy_mem_base(struct hif_softc *scn, qdf_dma_addr_t *phy_mem_base)
1432 {
1433 	*phy_mem_base = scn->mem_pa;
1434 }
1435 qdf_export_symbol(hif_read_phy_mem_base);
1436 
1437 /**
1438  * hif_get_device_type(): hif_get_device_type
1439  * @device_id: device_id
1440  * @revision_id: revision_id
1441  * @hif_type: returned hif_type
1442  * @target_type: returned target_type
1443  *
1444  * Return: int
1445  */
1446 int hif_get_device_type(uint32_t device_id,
1447 			uint32_t revision_id,
1448 			uint32_t *hif_type, uint32_t *target_type)
1449 {
1450 	int ret = 0;
1451 
1452 	switch (device_id) {
1453 	case ADRASTEA_DEVICE_ID_P2_E12:
1454 
1455 		*hif_type = HIF_TYPE_ADRASTEA;
1456 		*target_type = TARGET_TYPE_ADRASTEA;
1457 		break;
1458 
1459 	case AR9888_DEVICE_ID:
1460 		*hif_type = HIF_TYPE_AR9888;
1461 		*target_type = TARGET_TYPE_AR9888;
1462 		break;
1463 
1464 	case AR6320_DEVICE_ID:
1465 		switch (revision_id) {
1466 		case AR6320_FW_1_1:
1467 		case AR6320_FW_1_3:
1468 			*hif_type = HIF_TYPE_AR6320;
1469 			*target_type = TARGET_TYPE_AR6320;
1470 			break;
1471 
1472 		case AR6320_FW_2_0:
1473 		case AR6320_FW_3_0:
1474 		case AR6320_FW_3_2:
1475 			*hif_type = HIF_TYPE_AR6320V2;
1476 			*target_type = TARGET_TYPE_AR6320V2;
1477 			break;
1478 
1479 		default:
1480 			hif_err("dev_id = 0x%x, rev_id = 0x%x",
1481 				device_id, revision_id);
1482 			ret = -ENODEV;
1483 			goto end;
1484 		}
1485 		break;
1486 
1487 	case AR9887_DEVICE_ID:
1488 		*hif_type = HIF_TYPE_AR9888;
1489 		*target_type = TARGET_TYPE_AR9888;
1490 		hif_info(" *********** AR9887 **************");
1491 		break;
1492 
1493 	case QCA9984_DEVICE_ID:
1494 		*hif_type = HIF_TYPE_QCA9984;
1495 		*target_type = TARGET_TYPE_QCA9984;
1496 		hif_info(" *********** QCA9984 *************");
1497 		break;
1498 
1499 	case QCA9888_DEVICE_ID:
1500 		*hif_type = HIF_TYPE_QCA9888;
1501 		*target_type = TARGET_TYPE_QCA9888;
1502 		hif_info(" *********** QCA9888 *************");
1503 		break;
1504 
1505 	case AR900B_DEVICE_ID:
1506 		*hif_type = HIF_TYPE_AR900B;
1507 		*target_type = TARGET_TYPE_AR900B;
1508 		hif_info(" *********** AR900B *************");
1509 		break;
1510 
1511 	case IPQ4019_DEVICE_ID:
1512 		*hif_type = HIF_TYPE_IPQ4019;
1513 		*target_type = TARGET_TYPE_IPQ4019;
1514 		hif_info(" *********** IPQ4019  *************");
1515 		break;
1516 
1517 	case QCA8074_DEVICE_ID:
1518 		*hif_type = HIF_TYPE_QCA8074;
1519 		*target_type = TARGET_TYPE_QCA8074;
1520 		hif_info(" *********** QCA8074  *************");
1521 		break;
1522 
1523 	case QCA6290_EMULATION_DEVICE_ID:
1524 	case QCA6290_DEVICE_ID:
1525 		*hif_type = HIF_TYPE_QCA6290;
1526 		*target_type = TARGET_TYPE_QCA6290;
1527 		hif_info(" *********** QCA6290EMU *************");
1528 		break;
1529 
1530 	case QCN9000_DEVICE_ID:
1531 		*hif_type = HIF_TYPE_QCN9000;
1532 		*target_type = TARGET_TYPE_QCN9000;
1533 		hif_info(" *********** QCN9000 *************");
1534 		break;
1535 
1536 	case QCN9224_DEVICE_ID:
1537 		*hif_type = HIF_TYPE_QCN9224;
1538 		*target_type = TARGET_TYPE_QCN9224;
1539 		hif_info(" *********** QCN9224 *************");
1540 		break;
1541 
1542 	case QCN6122_DEVICE_ID:
1543 		*hif_type = HIF_TYPE_QCN6122;
1544 		*target_type = TARGET_TYPE_QCN6122;
1545 		hif_info(" *********** QCN6122 *************");
1546 		break;
1547 
1548 	case QCN7605_DEVICE_ID:
1549 	case QCN7605_COMPOSITE:
1550 	case QCN7605_STANDALONE:
1551 	case QCN7605_STANDALONE_V2:
1552 	case QCN7605_COMPOSITE_V2:
1553 		*hif_type = HIF_TYPE_QCN7605;
1554 		*target_type = TARGET_TYPE_QCN7605;
1555 		hif_info(" *********** QCN7605 *************");
1556 		break;
1557 
1558 	case QCA6390_DEVICE_ID:
1559 	case QCA6390_EMULATION_DEVICE_ID:
1560 		*hif_type = HIF_TYPE_QCA6390;
1561 		*target_type = TARGET_TYPE_QCA6390;
1562 		hif_info(" *********** QCA6390 *************");
1563 		break;
1564 
1565 	case QCA6490_DEVICE_ID:
1566 	case QCA6490_EMULATION_DEVICE_ID:
1567 		*hif_type = HIF_TYPE_QCA6490;
1568 		*target_type = TARGET_TYPE_QCA6490;
1569 		hif_info(" *********** QCA6490 *************");
1570 		break;
1571 
1572 	case QCA6750_DEVICE_ID:
1573 	case QCA6750_EMULATION_DEVICE_ID:
1574 		*hif_type = HIF_TYPE_QCA6750;
1575 		*target_type = TARGET_TYPE_QCA6750;
1576 		hif_info(" *********** QCA6750 *************");
1577 		break;
1578 
1579 	case WCN7850_DEVICE_ID:
1580 		*hif_type = HIF_TYPE_WCN7850;
1581 		*target_type = TARGET_TYPE_WCN7850;
1582 		hif_info(" *********** WCN7850 *************");
1583 		break;
1584 
1585 	case QCA8074V2_DEVICE_ID:
1586 		*hif_type = HIF_TYPE_QCA8074V2;
1587 		*target_type = TARGET_TYPE_QCA8074V2;
1588 		hif_info(" *********** QCA8074V2 *************");
1589 		break;
1590 
1591 	case QCA6018_DEVICE_ID:
1592 	case RUMIM2M_DEVICE_ID_NODE0:
1593 	case RUMIM2M_DEVICE_ID_NODE1:
1594 	case RUMIM2M_DEVICE_ID_NODE2:
1595 	case RUMIM2M_DEVICE_ID_NODE3:
1596 	case RUMIM2M_DEVICE_ID_NODE4:
1597 	case RUMIM2M_DEVICE_ID_NODE5:
1598 		*hif_type = HIF_TYPE_QCA6018;
1599 		*target_type = TARGET_TYPE_QCA6018;
1600 		hif_info(" *********** QCA6018 *************");
1601 		break;
1602 
1603 	case QCA5018_DEVICE_ID:
1604 		*hif_type = HIF_TYPE_QCA5018;
1605 		*target_type = TARGET_TYPE_QCA5018;
1606 		hif_info(" *********** qca5018 *************");
1607 		break;
1608 
1609 	case QCA9574_DEVICE_ID:
1610 		*hif_type = HIF_TYPE_QCA9574;
1611 		*target_type = TARGET_TYPE_QCA9574;
1612 		hif_info(" *********** QCA9574 *************");
1613 		break;
1614 
1615 	default:
1616 		hif_err("Unsupported device ID = 0x%x!", device_id);
1617 		ret = -ENODEV;
1618 		break;
1619 	}
1620 
1621 	if (*target_type == TARGET_TYPE_UNKNOWN) {
1622 		hif_err("Unsupported target_type!");
1623 		ret = -ENODEV;
1624 	}
1625 end:
1626 	return ret;
1627 }
1628 
1629 /**
1630  * hif_get_bus_type() - return the bus type
1631  *
1632  * Return: enum qdf_bus_type
1633  */
1634 enum qdf_bus_type hif_get_bus_type(struct hif_opaque_softc *hif_hdl)
1635 {
1636 	struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
1637 
1638 	return scn->bus_type;
1639 }
1640 
1641 /**
1642  * Target info and ini parameters are global to the driver
1643  * Hence these structures are exposed to all the modules in
1644  * the driver and they don't need to maintains multiple copies
1645  * of the same info, instead get the handle from hif and
1646  * modify them in hif
1647  */
1648 
1649 /**
1650  * hif_get_ini_handle() - API to get hif_config_param handle
1651  * @hif_ctx: HIF Context
1652  *
1653  * Return: pointer to hif_config_info
1654  */
1655 struct hif_config_info *hif_get_ini_handle(struct hif_opaque_softc *hif_ctx)
1656 {
1657 	struct hif_softc *sc = HIF_GET_SOFTC(hif_ctx);
1658 
1659 	return &sc->hif_config;
1660 }
1661 
1662 /**
1663  * hif_get_target_info_handle() - API to get hif_target_info handle
1664  * @hif_ctx: HIF context
1665  *
1666  * Return: Pointer to hif_target_info
1667  */
1668 struct hif_target_info *hif_get_target_info_handle(
1669 					struct hif_opaque_softc *hif_ctx)
1670 {
1671 	struct hif_softc *sc = HIF_GET_SOFTC(hif_ctx);
1672 
1673 	return &sc->target_info;
1674 
1675 }
1676 qdf_export_symbol(hif_get_target_info_handle);
1677 
1678 #ifdef RECEIVE_OFFLOAD
1679 void hif_offld_flush_cb_register(struct hif_opaque_softc *scn,
1680 				 void (offld_flush_handler)(void *))
1681 {
1682 	if (hif_napi_enabled(scn, -1))
1683 		hif_napi_rx_offld_flush_cb_register(scn, offld_flush_handler);
1684 	else
1685 		hif_err("NAPI not enabled");
1686 }
1687 qdf_export_symbol(hif_offld_flush_cb_register);
1688 
1689 void hif_offld_flush_cb_deregister(struct hif_opaque_softc *scn)
1690 {
1691 	if (hif_napi_enabled(scn, -1))
1692 		hif_napi_rx_offld_flush_cb_deregister(scn);
1693 	else
1694 		hif_err("NAPI not enabled");
1695 }
1696 qdf_export_symbol(hif_offld_flush_cb_deregister);
1697 
1698 int hif_get_rx_ctx_id(int ctx_id, struct hif_opaque_softc *hif_hdl)
1699 {
1700 	if (hif_napi_enabled(hif_hdl, -1))
1701 		return NAPI_PIPE2ID(ctx_id);
1702 	else
1703 		return ctx_id;
1704 }
1705 #else /* RECEIVE_OFFLOAD */
1706 int hif_get_rx_ctx_id(int ctx_id, struct hif_opaque_softc *hif_hdl)
1707 {
1708 	return 0;
1709 }
1710 qdf_export_symbol(hif_get_rx_ctx_id);
1711 #endif /* RECEIVE_OFFLOAD */
1712 
1713 #if defined(FEATURE_LRO)
1714 
1715 /**
1716  * hif_get_lro_info - Returns LRO instance for instance ID
1717  * @ctx_id: LRO instance ID
1718  * @hif_hdl: HIF Context
1719  *
1720  * Return: Pointer to LRO instance.
1721  */
1722 void *hif_get_lro_info(int ctx_id, struct hif_opaque_softc *hif_hdl)
1723 {
1724 	void *data;
1725 
1726 	if (hif_napi_enabled(hif_hdl, -1))
1727 		data = hif_napi_get_lro_info(hif_hdl, ctx_id);
1728 	else
1729 		data = hif_ce_get_lro_ctx(hif_hdl, ctx_id);
1730 
1731 	return data;
1732 }
1733 #endif
1734 
1735 /**
1736  * hif_get_target_status - API to get target status
1737  * @hif_ctx: HIF Context
1738  *
1739  * Return: enum hif_target_status
1740  */
1741 enum hif_target_status hif_get_target_status(struct hif_opaque_softc *hif_ctx)
1742 {
1743 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1744 
1745 	return scn->target_status;
1746 }
1747 qdf_export_symbol(hif_get_target_status);
1748 
1749 /**
1750  * hif_set_target_status() - API to set target status
1751  * @hif_ctx: HIF Context
1752  * @status: Target Status
1753  *
1754  * Return: void
1755  */
1756 void hif_set_target_status(struct hif_opaque_softc *hif_ctx, enum
1757 			   hif_target_status status)
1758 {
1759 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1760 
1761 	scn->target_status = status;
1762 }
1763 
1764 /**
1765  * hif_init_ini_config() - API to initialize HIF configuration parameters
1766  * @hif_ctx: HIF Context
1767  * @cfg: HIF Configuration
1768  *
1769  * Return: void
1770  */
1771 void hif_init_ini_config(struct hif_opaque_softc *hif_ctx,
1772 			 struct hif_config_info *cfg)
1773 {
1774 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1775 
1776 	qdf_mem_copy(&scn->hif_config, cfg, sizeof(struct hif_config_info));
1777 }
1778 
1779 /**
1780  * hif_get_conparam() - API to get driver mode in HIF
1781  * @scn: HIF Context
1782  *
1783  * Return: driver mode of operation
1784  */
1785 uint32_t hif_get_conparam(struct hif_softc *scn)
1786 {
1787 	if (!scn)
1788 		return 0;
1789 
1790 	return scn->hif_con_param;
1791 }
1792 
1793 /**
1794  * hif_get_callbacks_handle() - API to get callbacks Handle
1795  * @scn: HIF Context
1796  *
1797  * Return: pointer to HIF Callbacks
1798  */
1799 struct hif_driver_state_callbacks *hif_get_callbacks_handle(
1800 							struct hif_softc *scn)
1801 {
1802 	return &scn->callbacks;
1803 }
1804 
1805 /**
1806  * hif_is_driver_unloading() - API to query upper layers if driver is unloading
1807  * @scn: HIF Context
1808  *
1809  * Return: True/False
1810  */
1811 bool hif_is_driver_unloading(struct hif_softc *scn)
1812 {
1813 	struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
1814 
1815 	if (cbk && cbk->is_driver_unloading)
1816 		return cbk->is_driver_unloading(cbk->context);
1817 
1818 	return false;
1819 }
1820 
1821 /**
1822  * hif_is_load_or_unload_in_progress() - API to query upper layers if
1823  * load/unload in progress
1824  * @scn: HIF Context
1825  *
1826  * Return: True/False
1827  */
1828 bool hif_is_load_or_unload_in_progress(struct hif_softc *scn)
1829 {
1830 	struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
1831 
1832 	if (cbk && cbk->is_load_unload_in_progress)
1833 		return cbk->is_load_unload_in_progress(cbk->context);
1834 
1835 	return false;
1836 }
1837 
1838 /**
1839  * hif_is_recovery_in_progress() - API to query upper layers if recovery in
1840  * progress
1841  * @scn: HIF Context
1842  *
1843  * Return: True/False
1844  */
1845 bool hif_is_recovery_in_progress(struct hif_softc *scn)
1846 {
1847 	struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
1848 
1849 	if (cbk && cbk->is_recovery_in_progress)
1850 		return cbk->is_recovery_in_progress(cbk->context);
1851 
1852 	return false;
1853 }
1854 
1855 #if defined(HIF_PCI) || defined(HIF_SNOC) || defined(HIF_AHB) || \
1856     defined(HIF_IPCI)
1857 
1858 /**
1859  * hif_update_pipe_callback() - API to register pipe specific callbacks
1860  * @osc: Opaque softc
1861  * @pipeid: pipe id
1862  * @callbacks: callbacks to register
1863  *
1864  * Return: void
1865  */
1866 
1867 void hif_update_pipe_callback(struct hif_opaque_softc *osc,
1868 					u_int8_t pipeid,
1869 					struct hif_msg_callbacks *callbacks)
1870 {
1871 	struct hif_softc *scn = HIF_GET_SOFTC(osc);
1872 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1873 	struct HIF_CE_pipe_info *pipe_info;
1874 
1875 	QDF_BUG(pipeid < CE_COUNT_MAX);
1876 
1877 	hif_debug("pipeid: %d", pipeid);
1878 
1879 	pipe_info = &hif_state->pipe_info[pipeid];
1880 
1881 	qdf_mem_copy(&pipe_info->pipe_callbacks,
1882 			callbacks, sizeof(pipe_info->pipe_callbacks));
1883 }
1884 qdf_export_symbol(hif_update_pipe_callback);
1885 
1886 /**
1887  * hif_is_target_ready() - API to query if target is in ready state
1888  * progress
1889  * @scn: HIF Context
1890  *
1891  * Return: True/False
1892  */
1893 bool hif_is_target_ready(struct hif_softc *scn)
1894 {
1895 	struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
1896 
1897 	if (cbk && cbk->is_target_ready)
1898 		return cbk->is_target_ready(cbk->context);
1899 	/*
1900 	 * if callback is not registered then there is no way to determine
1901 	 * if target is ready. In-such case return true to indicate that
1902 	 * target is ready.
1903 	 */
1904 	return true;
1905 }
1906 qdf_export_symbol(hif_is_target_ready);
1907 
1908 int hif_get_bandwidth_level(struct hif_opaque_softc *hif_handle)
1909 {
1910 	struct hif_softc *scn = HIF_GET_SOFTC(hif_handle);
1911 	struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
1912 
1913 	if (cbk && cbk->get_bandwidth_level)
1914 		return cbk->get_bandwidth_level(cbk->context);
1915 
1916 	return 0;
1917 }
1918 
1919 qdf_export_symbol(hif_get_bandwidth_level);
1920 
1921 #ifdef DP_MEM_PRE_ALLOC
1922 void *hif_mem_alloc_consistent_unaligned(struct hif_softc *scn,
1923 					 qdf_size_t size,
1924 					 qdf_dma_addr_t *paddr,
1925 					 uint32_t ring_type,
1926 					 uint8_t *is_mem_prealloc)
1927 {
1928 	void *vaddr = NULL;
1929 	struct hif_driver_state_callbacks *cbk =
1930 				hif_get_callbacks_handle(scn);
1931 
1932 	*is_mem_prealloc = false;
1933 	if (cbk && cbk->prealloc_get_consistent_mem_unaligned) {
1934 		vaddr = cbk->prealloc_get_consistent_mem_unaligned(size,
1935 								   paddr,
1936 								   ring_type);
1937 		if (vaddr) {
1938 			*is_mem_prealloc = true;
1939 			goto end;
1940 		}
1941 	}
1942 
1943 	vaddr = qdf_mem_alloc_consistent(scn->qdf_dev,
1944 					 scn->qdf_dev->dev,
1945 					 size,
1946 					 paddr);
1947 end:
1948 	dp_info("%s va_unaligned %pK pa_unaligned %pK size %d ring_type %d",
1949 		*is_mem_prealloc ? "pre-alloc" : "dynamic-alloc", vaddr,
1950 		(void *)*paddr, (int)size, ring_type);
1951 
1952 	return vaddr;
1953 }
1954 
1955 void hif_mem_free_consistent_unaligned(struct hif_softc *scn,
1956 				       qdf_size_t size,
1957 				       void *vaddr,
1958 				       qdf_dma_addr_t paddr,
1959 				       qdf_dma_context_t memctx,
1960 				       uint8_t is_mem_prealloc)
1961 {
1962 	struct hif_driver_state_callbacks *cbk =
1963 				hif_get_callbacks_handle(scn);
1964 
1965 	if (is_mem_prealloc) {
1966 		if (cbk && cbk->prealloc_put_consistent_mem_unaligned) {
1967 			cbk->prealloc_put_consistent_mem_unaligned(vaddr);
1968 		} else {
1969 			dp_warn("dp_prealloc_put_consistent_unligned NULL");
1970 			QDF_BUG(0);
1971 		}
1972 	} else {
1973 		qdf_mem_free_consistent(scn->qdf_dev, scn->qdf_dev->dev,
1974 					size, vaddr, paddr, memctx);
1975 	}
1976 }
1977 #endif
1978 
1979 /**
1980  * hif_batch_send() - API to access hif specific function
1981  * ce_batch_send.
1982  * @osc: HIF Context
1983  * @msdu : list of msdus to be sent
1984  * @transfer_id : transfer id
1985  * @len : donwloaded length
1986  *
1987  * Return: list of msds not sent
1988  */
1989 qdf_nbuf_t hif_batch_send(struct hif_opaque_softc *osc, qdf_nbuf_t msdu,
1990 		uint32_t transfer_id, u_int32_t len, uint32_t sendhead)
1991 {
1992 	void *ce_tx_hdl = hif_get_ce_handle(osc, CE_HTT_TX_CE);
1993 
1994 	if (!ce_tx_hdl)
1995 		return NULL;
1996 
1997 	return ce_batch_send((struct CE_handle *)ce_tx_hdl, msdu, transfer_id,
1998 			len, sendhead);
1999 }
2000 qdf_export_symbol(hif_batch_send);
2001 
2002 /**
2003  * hif_update_tx_ring() - API to access hif specific function
2004  * ce_update_tx_ring.
2005  * @osc: HIF Context
2006  * @num_htt_cmpls : number of htt compl received.
2007  *
2008  * Return: void
2009  */
2010 void hif_update_tx_ring(struct hif_opaque_softc *osc, u_int32_t num_htt_cmpls)
2011 {
2012 	void *ce_tx_hdl = hif_get_ce_handle(osc, CE_HTT_TX_CE);
2013 
2014 	ce_update_tx_ring(ce_tx_hdl, num_htt_cmpls);
2015 }
2016 qdf_export_symbol(hif_update_tx_ring);
2017 
2018 
2019 /**
2020  * hif_send_single() - API to access hif specific function
2021  * ce_send_single.
2022  * @osc: HIF Context
2023  * @msdu : msdu to be sent
2024  * @transfer_id: transfer id
2025  * @len : downloaded length
2026  *
2027  * Return: msdu sent status
2028  */
2029 QDF_STATUS hif_send_single(struct hif_opaque_softc *osc, qdf_nbuf_t msdu,
2030 			   uint32_t transfer_id, u_int32_t len)
2031 {
2032 	void *ce_tx_hdl = hif_get_ce_handle(osc, CE_HTT_TX_CE);
2033 
2034 	if (!ce_tx_hdl)
2035 		return QDF_STATUS_E_NULL_VALUE;
2036 
2037 	return ce_send_single((struct CE_handle *)ce_tx_hdl, msdu, transfer_id,
2038 			len);
2039 }
2040 qdf_export_symbol(hif_send_single);
2041 #endif
2042 
2043 /**
2044  * hif_reg_write() - API to access hif specific function
2045  * hif_write32_mb.
2046  * @hif_ctx : HIF Context
2047  * @offset : offset on which value has to be written
2048  * @value : value to be written
2049  *
2050  * Return: None
2051  */
2052 void hif_reg_write(struct hif_opaque_softc *hif_ctx, uint32_t offset,
2053 		uint32_t value)
2054 {
2055 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2056 
2057 	hif_write32_mb(scn, scn->mem + offset, value);
2058 
2059 }
2060 qdf_export_symbol(hif_reg_write);
2061 
2062 /**
2063  * hif_reg_read() - API to access hif specific function
2064  * hif_read32_mb.
2065  * @hif_ctx : HIF Context
2066  * @offset : offset from which value has to be read
2067  *
2068  * Return: Read value
2069  */
2070 uint32_t hif_reg_read(struct hif_opaque_softc *hif_ctx, uint32_t offset)
2071 {
2072 
2073 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2074 
2075 	return hif_read32_mb(scn, scn->mem + offset);
2076 }
2077 qdf_export_symbol(hif_reg_read);
2078 
2079 /**
2080  * hif_ramdump_handler(): generic ramdump handler
2081  * @scn: struct hif_opaque_softc
2082  *
2083  * Return: None
2084  */
2085 void hif_ramdump_handler(struct hif_opaque_softc *scn)
2086 {
2087 	if (hif_get_bus_type(scn) == QDF_BUS_TYPE_USB)
2088 		hif_usb_ramdump_handler(scn);
2089 }
2090 
2091 hif_pm_wake_irq_type hif_pm_get_wake_irq_type(struct hif_opaque_softc *hif_ctx)
2092 {
2093 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2094 
2095 	return scn->wake_irq_type;
2096 }
2097 
2098 irqreturn_t hif_wake_interrupt_handler(int irq, void *context)
2099 {
2100 	struct hif_softc *scn = context;
2101 	struct hif_opaque_softc *hif_ctx = GET_HIF_OPAQUE_HDL(scn);
2102 
2103 	hif_info("wake interrupt received on irq %d", irq);
2104 
2105 	if (hif_pm_runtime_get_monitor_wake_intr(hif_ctx)) {
2106 		hif_pm_runtime_set_monitor_wake_intr(hif_ctx, 0);
2107 		hif_pm_runtime_request_resume(hif_ctx,
2108 					      RTPM_ID_WAKE_INTR_HANDLER);
2109 	}
2110 
2111 	if (scn->initial_wakeup_cb)
2112 		scn->initial_wakeup_cb(scn->initial_wakeup_priv);
2113 
2114 	if (hif_is_ut_suspended(scn))
2115 		hif_ut_fw_resume(scn);
2116 
2117 	qdf_pm_system_wakeup();
2118 
2119 	return IRQ_HANDLED;
2120 }
2121 
2122 void hif_set_initial_wakeup_cb(struct hif_opaque_softc *hif_ctx,
2123 			       void (*callback)(void *),
2124 			       void *priv)
2125 {
2126 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2127 
2128 	scn->initial_wakeup_cb = callback;
2129 	scn->initial_wakeup_priv = priv;
2130 }
2131 
2132 void hif_set_ce_service_max_yield_time(struct hif_opaque_softc *hif,
2133 				       uint32_t ce_service_max_yield_time)
2134 {
2135 	struct hif_softc *hif_ctx = HIF_GET_SOFTC(hif);
2136 
2137 	hif_ctx->ce_service_max_yield_time =
2138 		ce_service_max_yield_time * 1000;
2139 }
2140 
2141 unsigned long long
2142 hif_get_ce_service_max_yield_time(struct hif_opaque_softc *hif)
2143 {
2144 	struct hif_softc *hif_ctx = HIF_GET_SOFTC(hif);
2145 
2146 	return hif_ctx->ce_service_max_yield_time;
2147 }
2148 
2149 void hif_set_ce_service_max_rx_ind_flush(struct hif_opaque_softc *hif,
2150 				       uint8_t ce_service_max_rx_ind_flush)
2151 {
2152 	struct hif_softc *hif_ctx = HIF_GET_SOFTC(hif);
2153 
2154 	if (ce_service_max_rx_ind_flush == 0 ||
2155 	    ce_service_max_rx_ind_flush > MSG_FLUSH_NUM)
2156 		hif_ctx->ce_service_max_rx_ind_flush = MSG_FLUSH_NUM;
2157 	else
2158 		hif_ctx->ce_service_max_rx_ind_flush =
2159 						ce_service_max_rx_ind_flush;
2160 }
2161 
2162 #ifdef SYSTEM_PM_CHECK
2163 void __hif_system_pm_set_state(struct hif_opaque_softc *hif,
2164 			       enum hif_system_pm_state state)
2165 {
2166 	struct hif_softc *hif_ctx = HIF_GET_SOFTC(hif);
2167 
2168 	qdf_atomic_set(&hif_ctx->sys_pm_state, state);
2169 }
2170 
2171 int32_t hif_system_pm_get_state(struct hif_opaque_softc *hif)
2172 {
2173 	struct hif_softc *hif_ctx = HIF_GET_SOFTC(hif);
2174 
2175 	return qdf_atomic_read(&hif_ctx->sys_pm_state);
2176 }
2177 
2178 int hif_system_pm_state_check(struct hif_opaque_softc *hif)
2179 {
2180 	struct hif_softc *hif_ctx = HIF_GET_SOFTC(hif);
2181 	int32_t sys_pm_state;
2182 
2183 	if (!hif_ctx) {
2184 		hif_err("hif context is null");
2185 		return -EFAULT;
2186 	}
2187 
2188 	sys_pm_state = qdf_atomic_read(&hif_ctx->sys_pm_state);
2189 	if (sys_pm_state == HIF_SYSTEM_PM_STATE_BUS_SUSPENDING ||
2190 	    sys_pm_state == HIF_SYSTEM_PM_STATE_BUS_SUSPENDED) {
2191 		hif_info("Triggering system wakeup");
2192 		qdf_pm_system_wakeup();
2193 		return -EAGAIN;
2194 	}
2195 
2196 	return 0;
2197 }
2198 #endif
2199