xref: /wlan-dirver/qca-wifi-host-cmn/hif/src/hif_main.c (revision 45a38684b07295822dc8eba39e293408f203eec8)
1 /*
2  * Copyright (c) 2015-2020 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include "targcfg.h"
20 #include "qdf_lock.h"
21 #include "qdf_status.h"
22 #include "qdf_status.h"
23 #include <qdf_atomic.h>         /* qdf_atomic_read */
24 #include <targaddrs.h>
25 #include "hif_io32.h"
26 #include <hif.h>
27 #include <target_type.h>
28 #include "regtable.h"
29 #define ATH_MODULE_NAME hif
30 #include <a_debug.h>
31 #include "hif_main.h"
32 #include "hif_hw_version.h"
33 #if (defined(HIF_PCI) || defined(HIF_SNOC) || defined(HIF_AHB) || \
34      defined(HIF_IPCI))
35 #include "ce_tasklet.h"
36 #include "ce_api.h"
37 #endif
38 #include "qdf_trace.h"
39 #include "qdf_status.h"
40 #include "hif_debug.h"
41 #include "mp_dev.h"
42 #if defined(QCA_WIFI_QCA8074) || defined(QCA_WIFI_QCA6018) || \
43 	defined(QCA_WIFI_QCA5018)
44 #include "hal_api.h"
45 #endif
46 #include "hif_napi.h"
47 #include "hif_unit_test_suspend_i.h"
48 #include "qdf_module.h"
49 #ifdef HIF_CE_LOG_INFO
50 #include <qdf_notifier.h>
51 #include <qdf_hang_event_notifier.h>
52 #endif
53 
54 void hif_dump(struct hif_opaque_softc *hif_ctx, uint8_t cmd_id, bool start)
55 {
56 	hif_trigger_dump(hif_ctx, cmd_id, start);
57 }
58 
59 /**
60  * hif_get_target_id(): hif_get_target_id
61  *
62  * Return the virtual memory base address to the caller
63  *
64  * @scn: hif_softc
65  *
66  * Return: A_target_id_t
67  */
68 A_target_id_t hif_get_target_id(struct hif_softc *scn)
69 {
70 	return scn->mem;
71 }
72 
73 /**
74  * hif_get_targetdef(): hif_get_targetdef
75  * @scn: scn
76  *
77  * Return: void *
78  */
79 void *hif_get_targetdef(struct hif_opaque_softc *hif_ctx)
80 {
81 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
82 
83 	return scn->targetdef;
84 }
85 
86 #ifdef FORCE_WAKE
87 void hif_srng_init_phase(struct hif_opaque_softc *hif_ctx,
88 			 bool init_phase)
89 {
90 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
91 
92 	if (ce_srng_based(scn))
93 		hal_set_init_phase(scn->hal_soc, init_phase);
94 }
95 #endif /* FORCE_WAKE */
96 
97 #ifdef HIF_IPCI
98 void hif_shutdown_notifier_cb(void *hif_ctx)
99 {
100 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
101 
102 	scn->recovery = true;
103 }
104 #endif
105 
106 /**
107  * hif_vote_link_down(): unvote for link up
108  *
109  * Call hif_vote_link_down to release a previous request made using
110  * hif_vote_link_up. A hif_vote_link_down call should only be made
111  * after a corresponding hif_vote_link_up, otherwise you could be
112  * negating a vote from another source. When no votes are present
113  * hif will not guarantee the linkstate after hif_bus_suspend.
114  *
115  * SYNCHRONIZE WITH hif_vote_link_up by only calling in MC thread
116  * and initialization deinitialization sequencences.
117  *
118  * Return: n/a
119  */
120 void hif_vote_link_down(struct hif_opaque_softc *hif_ctx)
121 {
122 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
123 
124 	QDF_BUG(scn);
125 	scn->linkstate_vote--;
126 	HIF_INFO("Down_linkstate_vote %d", scn->linkstate_vote);
127 	if (scn->linkstate_vote == 0)
128 		hif_bus_prevent_linkdown(scn, false);
129 }
130 
131 /**
132  * hif_vote_link_up(): vote to prevent bus from suspending
133  *
134  * Makes hif guarantee that fw can message the host normally
135  * durring suspend.
136  *
137  * SYNCHRONIZE WITH hif_vote_link_up by only calling in MC thread
138  * and initialization deinitialization sequencences.
139  *
140  * Return: n/a
141  */
142 void hif_vote_link_up(struct hif_opaque_softc *hif_ctx)
143 {
144 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
145 
146 	QDF_BUG(scn);
147 	scn->linkstate_vote++;
148 	HIF_INFO("Up_linkstate_vote %d", scn->linkstate_vote);
149 	if (scn->linkstate_vote == 1)
150 		hif_bus_prevent_linkdown(scn, true);
151 }
152 
153 /**
154  * hif_can_suspend_link(): query if hif is permitted to suspend the link
155  *
156  * Hif will ensure that the link won't be suspended if the upperlayers
157  * don't want it to.
158  *
159  * SYNCHRONIZATION: MC thread is stopped before bus suspend thus
160  * we don't need extra locking to ensure votes dont change while
161  * we are in the process of suspending or resuming.
162  *
163  * Return: false if hif will guarantee link up durring suspend.
164  */
165 bool hif_can_suspend_link(struct hif_opaque_softc *hif_ctx)
166 {
167 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
168 
169 	QDF_BUG(scn);
170 	return scn->linkstate_vote == 0;
171 }
172 
173 /**
174  * hif_hia_item_address(): hif_hia_item_address
175  * @target_type: target_type
176  * @item_offset: item_offset
177  *
178  * Return: n/a
179  */
180 uint32_t hif_hia_item_address(uint32_t target_type, uint32_t item_offset)
181 {
182 	switch (target_type) {
183 	case TARGET_TYPE_AR6002:
184 		return AR6002_HOST_INTEREST_ADDRESS + item_offset;
185 	case TARGET_TYPE_AR6003:
186 		return AR6003_HOST_INTEREST_ADDRESS + item_offset;
187 	case TARGET_TYPE_AR6004:
188 		return AR6004_HOST_INTEREST_ADDRESS + item_offset;
189 	case TARGET_TYPE_AR6006:
190 		return AR6006_HOST_INTEREST_ADDRESS + item_offset;
191 	case TARGET_TYPE_AR9888:
192 		return AR9888_HOST_INTEREST_ADDRESS + item_offset;
193 	case TARGET_TYPE_AR6320:
194 	case TARGET_TYPE_AR6320V2:
195 		return AR6320_HOST_INTEREST_ADDRESS + item_offset;
196 	case TARGET_TYPE_ADRASTEA:
197 		/* ADRASTEA doesn't have a host interest address */
198 		ASSERT(0);
199 		return 0;
200 	case TARGET_TYPE_AR900B:
201 		return AR900B_HOST_INTEREST_ADDRESS + item_offset;
202 	case TARGET_TYPE_QCA9984:
203 		return QCA9984_HOST_INTEREST_ADDRESS + item_offset;
204 	case TARGET_TYPE_QCA9888:
205 		return QCA9888_HOST_INTEREST_ADDRESS + item_offset;
206 	case TARGET_TYPE_IPQ4019:
207 		return IPQ4019_HOST_INTEREST_ADDRESS + item_offset;
208 
209 	default:
210 		ASSERT(0);
211 		return 0;
212 	}
213 }
214 
215 /**
216  * hif_max_num_receives_reached() - check max receive is reached
217  * @scn: HIF Context
218  * @count: unsigned int.
219  *
220  * Output check status as bool
221  *
222  * Return: bool
223  */
224 bool hif_max_num_receives_reached(struct hif_softc *scn, unsigned int count)
225 {
226 	if (QDF_IS_EPPING_ENABLED(hif_get_conparam(scn)))
227 		return count > 120;
228 	else
229 		return count > MAX_NUM_OF_RECEIVES;
230 }
231 
232 /**
233  * init_buffer_count() - initial buffer count
234  * @maxSize: qdf_size_t
235  *
236  * routine to modify the initial buffer count to be allocated on an os
237  * platform basis. Platform owner will need to modify this as needed
238  *
239  * Return: qdf_size_t
240  */
241 qdf_size_t init_buffer_count(qdf_size_t maxSize)
242 {
243 	return maxSize;
244 }
245 
246 /**
247  * hif_save_htc_htt_config_endpoint() - save htt_tx_endpoint
248  * @hif_ctx: hif context
249  * @htc_htt_tx_endpoint: htt_tx_endpoint
250  *
251  * Return: void
252  */
253 void hif_save_htc_htt_config_endpoint(struct hif_opaque_softc *hif_ctx,
254 							int htc_htt_tx_endpoint)
255 {
256 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
257 
258 	if (!scn) {
259 		HIF_ERROR("%s: error: scn or scn->hif_sc is NULL!",
260 		       __func__);
261 		return;
262 	}
263 
264 	scn->htc_htt_tx_endpoint = htc_htt_tx_endpoint;
265 }
266 qdf_export_symbol(hif_save_htc_htt_config_endpoint);
267 
268 static const struct qwlan_hw qwlan_hw_list[] = {
269 	{
270 		.id = AR6320_REV1_VERSION,
271 		.subid = 0,
272 		.name = "QCA6174_REV1",
273 	},
274 	{
275 		.id = AR6320_REV1_1_VERSION,
276 		.subid = 0x1,
277 		.name = "QCA6174_REV1_1",
278 	},
279 	{
280 		.id = AR6320_REV1_3_VERSION,
281 		.subid = 0x2,
282 		.name = "QCA6174_REV1_3",
283 	},
284 	{
285 		.id = AR6320_REV2_1_VERSION,
286 		.subid = 0x4,
287 		.name = "QCA6174_REV2_1",
288 	},
289 	{
290 		.id = AR6320_REV2_1_VERSION,
291 		.subid = 0x5,
292 		.name = "QCA6174_REV2_2",
293 	},
294 	{
295 		.id = AR6320_REV3_VERSION,
296 		.subid = 0x6,
297 		.name = "QCA6174_REV2.3",
298 	},
299 	{
300 		.id = AR6320_REV3_VERSION,
301 		.subid = 0x8,
302 		.name = "QCA6174_REV3",
303 	},
304 	{
305 		.id = AR6320_REV3_VERSION,
306 		.subid = 0x9,
307 		.name = "QCA6174_REV3_1",
308 	},
309 	{
310 		.id = AR6320_REV3_2_VERSION,
311 		.subid = 0xA,
312 		.name = "AR6320_REV3_2_VERSION",
313 	},
314 	{
315 		.id = QCA6390_V1,
316 		.subid = 0x0,
317 		.name = "QCA6390_V1",
318 	},
319 	{
320 		.id = QCA6490_V1,
321 		.subid = 0x0,
322 		.name = "QCA6490_V1",
323 	},
324 	{
325 		.id = WCN3990_v1,
326 		.subid = 0x0,
327 		.name = "WCN3990_V1",
328 	},
329 	{
330 		.id = WCN3990_v2,
331 		.subid = 0x0,
332 		.name = "WCN3990_V2",
333 	},
334 	{
335 		.id = WCN3990_v2_1,
336 		.subid = 0x0,
337 		.name = "WCN3990_V2.1",
338 	},
339 	{
340 		.id = WCN3998,
341 		.subid = 0x0,
342 		.name = "WCN3998",
343 	},
344 	{
345 		.id = QCA9379_REV1_VERSION,
346 		.subid = 0xC,
347 		.name = "QCA9379_REV1",
348 	},
349 	{
350 		.id = QCA9379_REV1_VERSION,
351 		.subid = 0xD,
352 		.name = "QCA9379_REV1_1",
353 	}
354 };
355 
356 /**
357  * hif_get_hw_name(): get a human readable name for the hardware
358  * @info: Target Info
359  *
360  * Return: human readable name for the underlying wifi hardware.
361  */
362 static const char *hif_get_hw_name(struct hif_target_info *info)
363 {
364 	int i;
365 
366 	if (info->hw_name)
367 		return info->hw_name;
368 
369 	for (i = 0; i < ARRAY_SIZE(qwlan_hw_list); i++) {
370 		if (info->target_version == qwlan_hw_list[i].id &&
371 		    info->target_revision == qwlan_hw_list[i].subid) {
372 			return qwlan_hw_list[i].name;
373 		}
374 	}
375 
376 	info->hw_name = qdf_mem_malloc(64);
377 	if (!info->hw_name)
378 		return "Unknown Device (nomem)";
379 
380 	i = qdf_snprint(info->hw_name, 64, "HW_VERSION=%x.",
381 			info->target_version);
382 	if (i < 0)
383 		return "Unknown Device (snprintf failure)";
384 	else
385 		return info->hw_name;
386 }
387 
388 /**
389  * hif_get_hw_info(): hif_get_hw_info
390  * @scn: scn
391  * @version: version
392  * @revision: revision
393  *
394  * Return: n/a
395  */
396 void hif_get_hw_info(struct hif_opaque_softc *scn, u32 *version, u32 *revision,
397 			const char **target_name)
398 {
399 	struct hif_target_info *info = hif_get_target_info_handle(scn);
400 	struct hif_softc *sc = HIF_GET_SOFTC(scn);
401 
402 	if (sc->bus_type == QDF_BUS_TYPE_USB)
403 		hif_usb_get_hw_info(sc);
404 
405 	*version = info->target_version;
406 	*revision = info->target_revision;
407 	*target_name = hif_get_hw_name(info);
408 }
409 
410 /**
411  * hif_get_dev_ba(): API to get device base address.
412  * @scn: scn
413  * @version: version
414  * @revision: revision
415  *
416  * Return: n/a
417  */
418 void *hif_get_dev_ba(struct hif_opaque_softc *hif_handle)
419 {
420 	struct hif_softc *scn = (struct hif_softc *)hif_handle;
421 
422 	return scn->mem;
423 }
424 qdf_export_symbol(hif_get_dev_ba);
425 
426 /**
427  * hif_get_dev_ba_ce(): API to get device ce base address.
428  * @scn: scn
429  *
430  * Return: dev mem base address for CE
431  */
432 void *hif_get_dev_ba_ce(struct hif_opaque_softc *hif_handle)
433 {
434 	struct hif_softc *scn = (struct hif_softc *)hif_handle;
435 
436 	return scn->mem_ce;
437 }
438 
439 qdf_export_symbol(hif_get_dev_ba_ce);
440 
441 #ifdef WLAN_CE_INTERRUPT_THRESHOLD_CONFIG
442 /**
443  * hif_get_cfg_from_psoc() - Retrieve ini cfg from psoc
444  * @scn: hif context
445  * @psoc: psoc objmgr handle
446  *
447  * Return: None
448  */
449 static inline
450 void hif_get_cfg_from_psoc(struct hif_softc *scn,
451 			   struct wlan_objmgr_psoc *psoc)
452 {
453 	if (psoc) {
454 		scn->ini_cfg.ce_status_ring_timer_threshold =
455 			cfg_get(psoc,
456 				CFG_CE_STATUS_RING_TIMER_THRESHOLD);
457 		scn->ini_cfg.ce_status_ring_batch_count_threshold =
458 			cfg_get(psoc,
459 				CFG_CE_STATUS_RING_BATCH_COUNT_THRESHOLD);
460 	}
461 }
462 #else
463 static inline
464 void hif_get_cfg_from_psoc(struct hif_softc *scn,
465 			   struct wlan_objmgr_psoc *psoc)
466 {
467 }
468 #endif /* WLAN_CE_INTERRUPT_THRESHOLD_CONFIG */
469 
470 #ifdef HIF_CE_LOG_INFO
471 /**
472  * hif_recovery_notifier_cb - Recovery notifier callback to log
473  *  hang event data
474  * @block: notifier block
475  * @state: state
476  * @data: notifier data
477  *
478  * Return: status
479  */
480 static
481 int hif_recovery_notifier_cb(struct notifier_block *block, unsigned long state,
482 			     void *data)
483 {
484 	struct qdf_notifer_data *notif_data = data;
485 	qdf_notif_block *notif_block;
486 	struct hif_softc *hif_handle;
487 
488 	if (!data || !block)
489 		return -EINVAL;
490 
491 	notif_block = qdf_container_of(block, qdf_notif_block, notif_block);
492 
493 	hif_handle = notif_block->priv_data;
494 	if (!hif_handle)
495 		return -EINVAL;
496 
497 	hif_log_ce_info(hif_handle, notif_data->hang_data,
498 			&notif_data->offset);
499 
500 	return 0;
501 }
502 
503 /**
504  * hif_register_recovery_notifier - Register hif recovery notifier
505  * @hif_handle: hif handle
506  *
507  * Return: status
508  */
509 static
510 QDF_STATUS hif_register_recovery_notifier(struct hif_softc *hif_handle)
511 {
512 	qdf_notif_block *hif_notifier;
513 
514 	if (!hif_handle)
515 		return QDF_STATUS_E_FAILURE;
516 
517 	hif_notifier = &hif_handle->hif_recovery_notifier;
518 
519 	hif_notifier->notif_block.notifier_call = hif_recovery_notifier_cb;
520 	hif_notifier->priv_data = hif_handle;
521 	return qdf_hang_event_register_notifier(hif_notifier);
522 }
523 
524 /**
525  * hif_unregister_recovery_notifier - Un-register hif recovery notifier
526  * @hif_handle: hif handle
527  *
528  * Return: status
529  */
530 static
531 QDF_STATUS hif_unregister_recovery_notifier(struct hif_softc *hif_handle)
532 {
533 	qdf_notif_block *hif_notifier = &hif_handle->hif_recovery_notifier;
534 
535 	return qdf_hang_event_unregister_notifier(hif_notifier);
536 }
537 #else
538 static inline
539 QDF_STATUS hif_register_recovery_notifier(struct hif_softc *hif_handle)
540 {
541 	return QDF_STATUS_SUCCESS;
542 }
543 
544 static inline
545 QDF_STATUS hif_unregister_recovery_notifier(struct hif_softc *hif_handle)
546 {
547 	return QDF_STATUS_SUCCESS;
548 }
549 #endif
550 
551 #ifdef HIF_CPU_PERF_AFFINE_MASK
552 /**
553  * __hif_cpu_hotplug_notify() - CPU hotplug event handler
554  * @cpu: CPU Id of the CPU generating the event
555  * @cpu_up: true if the CPU is online
556  *
557  * Return: None
558  */
559 static void __hif_cpu_hotplug_notify(void *context,
560 				     uint32_t cpu, bool cpu_up)
561 {
562 	struct hif_softc *scn = context;
563 
564 	if (!scn)
565 		return;
566 	if (hif_is_driver_unloading(scn) || hif_is_recovery_in_progress(scn))
567 		return;
568 
569 	if (cpu_up) {
570 		hif_config_irq_set_perf_affinity_hint(GET_HIF_OPAQUE_HDL(scn));
571 		hif_debug("Setting affinity for online CPU: %d", cpu);
572 	} else {
573 		hif_debug("Skip setting affinity for offline CPU: %d", cpu);
574 	}
575 }
576 
577 /**
578  * hif_cpu_hotplug_notify - cpu core up/down notification
579  * handler
580  * @cpu: CPU generating the event
581  * @cpu_up: true if the CPU is online
582  *
583  * Return: None
584  */
585 static void hif_cpu_hotplug_notify(void *context, uint32_t cpu, bool cpu_up)
586 {
587 	struct qdf_op_sync *op_sync;
588 
589 	if (qdf_op_protect(&op_sync))
590 		return;
591 
592 	__hif_cpu_hotplug_notify(context, cpu, cpu_up);
593 
594 	qdf_op_unprotect(op_sync);
595 }
596 
597 static void hif_cpu_online_cb(void *context, uint32_t cpu)
598 {
599 	hif_cpu_hotplug_notify(context, cpu, true);
600 }
601 
602 static void hif_cpu_before_offline_cb(void *context, uint32_t cpu)
603 {
604 	hif_cpu_hotplug_notify(context, cpu, false);
605 }
606 
607 static void hif_cpuhp_register(struct hif_softc *scn)
608 {
609 	if (!scn) {
610 		hif_info_high("cannot register hotplug notifiers");
611 		return;
612 	}
613 	qdf_cpuhp_register(&scn->cpuhp_event_handle,
614 			   scn,
615 			   hif_cpu_online_cb,
616 			   hif_cpu_before_offline_cb);
617 }
618 
619 static void hif_cpuhp_unregister(struct hif_softc *scn)
620 {
621 	if (!scn) {
622 		hif_info_high("cannot unregister hotplug notifiers");
623 		return;
624 	}
625 	qdf_cpuhp_unregister(&scn->cpuhp_event_handle);
626 }
627 
628 #else
629 static void hif_cpuhp_register(struct hif_softc *scn)
630 {
631 }
632 
633 static void hif_cpuhp_unregister(struct hif_softc *scn)
634 {
635 }
636 #endif /* ifdef HIF_CPU_PERF_AFFINE_MASK */
637 
638 struct hif_opaque_softc *hif_open(qdf_device_t qdf_ctx,
639 				  uint32_t mode,
640 				  enum qdf_bus_type bus_type,
641 				  struct hif_driver_state_callbacks *cbk,
642 				  struct wlan_objmgr_psoc *psoc)
643 {
644 	struct hif_softc *scn;
645 	QDF_STATUS status = QDF_STATUS_SUCCESS;
646 	int bus_context_size = hif_bus_get_context_size(bus_type);
647 
648 	if (bus_context_size == 0) {
649 		HIF_ERROR("%s: context size 0 not allowed", __func__);
650 		return NULL;
651 	}
652 
653 	scn = (struct hif_softc *)qdf_mem_malloc(bus_context_size);
654 	if (!scn) {
655 		HIF_ERROR("%s: cannot alloc memory for HIF context of size:%d",
656 						__func__, bus_context_size);
657 		return GET_HIF_OPAQUE_HDL(scn);
658 	}
659 
660 	scn->qdf_dev = qdf_ctx;
661 	scn->hif_con_param = mode;
662 	qdf_atomic_init(&scn->active_tasklet_cnt);
663 	qdf_atomic_init(&scn->active_grp_tasklet_cnt);
664 	qdf_atomic_init(&scn->link_suspended);
665 	qdf_atomic_init(&scn->tasklet_from_intr);
666 	qdf_mem_copy(&scn->callbacks, cbk,
667 		     sizeof(struct hif_driver_state_callbacks));
668 	scn->bus_type  = bus_type;
669 
670 	hif_get_cfg_from_psoc(scn, psoc);
671 
672 	hif_set_event_hist_mask(GET_HIF_OPAQUE_HDL(scn));
673 	status = hif_bus_open(scn, bus_type);
674 	if (status != QDF_STATUS_SUCCESS) {
675 		HIF_ERROR("%s: hif_bus_open error = %d, bus_type = %d",
676 				  __func__, status, bus_type);
677 		qdf_mem_free(scn);
678 		scn = NULL;
679 	}
680 	hif_cpuhp_register(scn);
681 	return GET_HIF_OPAQUE_HDL(scn);
682 }
683 
684 #ifdef ADRASTEA_RRI_ON_DDR
685 /**
686  * hif_uninit_rri_on_ddr(): free consistent memory allocated for rri
687  * @scn: hif context
688  *
689  * Return: none
690  */
691 void hif_uninit_rri_on_ddr(struct hif_softc *scn)
692 {
693 	if (scn->vaddr_rri_on_ddr)
694 		qdf_mem_free_consistent(scn->qdf_dev, scn->qdf_dev->dev,
695 					(CE_COUNT * sizeof(uint32_t)),
696 					scn->vaddr_rri_on_ddr,
697 					scn->paddr_rri_on_ddr, 0);
698 	scn->vaddr_rri_on_ddr = NULL;
699 }
700 #endif
701 
702 /**
703  * hif_close(): hif_close
704  * @hif_ctx: hif_ctx
705  *
706  * Return: n/a
707  */
708 void hif_close(struct hif_opaque_softc *hif_ctx)
709 {
710 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
711 
712 	if (!scn) {
713 		HIF_ERROR("%s: hif_opaque_softc is NULL", __func__);
714 		return;
715 	}
716 
717 	if (scn->athdiag_procfs_inited) {
718 		athdiag_procfs_remove();
719 		scn->athdiag_procfs_inited = false;
720 	}
721 
722 	if (scn->target_info.hw_name) {
723 		char *hw_name = scn->target_info.hw_name;
724 
725 		scn->target_info.hw_name = "ErrUnloading";
726 		qdf_mem_free(hw_name);
727 	}
728 
729 	hif_uninit_rri_on_ddr(scn);
730 	hif_cpuhp_unregister(scn);
731 
732 	hif_bus_close(scn);
733 
734 	qdf_mem_free(scn);
735 }
736 
737 /**
738  * hif_get_num_active_tasklets() - get the number of active
739  *		tasklets pending to be completed.
740  * @scn: HIF context
741  *
742  * Returns: the number of tasklets which are active
743  */
744 static inline int hif_get_num_active_tasklets(struct hif_softc *scn)
745 {
746 	return qdf_atomic_read(&scn->active_tasklet_cnt);
747 }
748 
749 /**
750  * hif_get_num_active_grp_tasklets() - get the number of active
751  *		datapath group tasklets pending to be completed.
752  * @scn: HIF context
753  *
754  * Returns: the number of datapath group tasklets which are active
755  */
756 static inline int hif_get_num_active_grp_tasklets(struct hif_softc *scn)
757 {
758 	return qdf_atomic_read(&scn->active_grp_tasklet_cnt);
759 }
760 
761 #if (defined(QCA_WIFI_QCA8074) || defined(QCA_WIFI_QCA6018) || \
762 	defined(QCA_WIFI_QCA6290) || defined(QCA_WIFI_QCA6390) || \
763 	defined(QCA_WIFI_QCN9000) || defined(QCA_WIFI_QCA6490) || \
764 	defined(QCA_WIFI_QCA6750) || defined(QCA_WIFI_QCA5018))
765 /**
766  * hif_get_num_pending_work() - get the number of entries in
767  *		the workqueue pending to be completed.
768  * @scn: HIF context
769  *
770  * Returns: the number of tasklets which are active
771  */
772 static inline int hif_get_num_pending_work(struct hif_softc *scn)
773 {
774 	return hal_get_reg_write_pending_work(scn->hal_soc);
775 }
776 #else
777 
778 static inline int hif_get_num_pending_work(struct hif_softc *scn)
779 {
780 	return 0;
781 }
782 #endif
783 
784 QDF_STATUS hif_try_complete_tasks(struct hif_softc *scn)
785 {
786 	uint32_t task_drain_wait_cnt = 0;
787 	int tasklet = 0, grp_tasklet = 0, work = 0;
788 
789 	while ((tasklet = hif_get_num_active_tasklets(scn)) ||
790 	       (grp_tasklet = hif_get_num_active_grp_tasklets(scn)) ||
791 	       (work = hif_get_num_pending_work(scn))) {
792 		if (++task_drain_wait_cnt > HIF_TASK_DRAIN_WAIT_CNT) {
793 			hif_err("pending tasklets %d grp tasklets %d work %d",
794 				tasklet, grp_tasklet, work);
795 			return QDF_STATUS_E_FAULT;
796 		}
797 		hif_info("waiting for tasklets %d grp tasklets %d work %d",
798 			 tasklet, grp_tasklet, work);
799 		msleep(10);
800 	}
801 
802 	return QDF_STATUS_SUCCESS;
803 }
804 
805 #if (defined(QCA_WIFI_QCA8074) || defined(QCA_WIFI_QCA6018) || \
806 	defined(QCA_WIFI_QCA6290) || defined(QCA_WIFI_QCA6390) || \
807 	defined(QCA_WIFI_QCN9000) || defined(QCA_WIFI_QCA6490) || \
808 	defined(QCA_WIFI_QCA6750) || defined(QCA_WIFI_QCA5018))
809 static QDF_STATUS hif_hal_attach(struct hif_softc *scn)
810 {
811 	if (ce_srng_based(scn)) {
812 		scn->hal_soc = hal_attach(
813 					hif_softc_to_hif_opaque_softc(scn),
814 					scn->qdf_dev);
815 		if (!scn->hal_soc)
816 			return QDF_STATUS_E_FAILURE;
817 	}
818 
819 	return QDF_STATUS_SUCCESS;
820 }
821 
822 static QDF_STATUS hif_hal_detach(struct hif_softc *scn)
823 {
824 	if (ce_srng_based(scn)) {
825 		hal_detach(scn->hal_soc);
826 		scn->hal_soc = NULL;
827 	}
828 
829 	return QDF_STATUS_SUCCESS;
830 }
831 #else
832 static QDF_STATUS hif_hal_attach(struct hif_softc *scn)
833 {
834 	return QDF_STATUS_SUCCESS;
835 }
836 
837 static QDF_STATUS hif_hal_detach(struct hif_softc *scn)
838 {
839 	return QDF_STATUS_SUCCESS;
840 }
841 #endif
842 
843 /**
844  * hif_enable(): hif_enable
845  * @hif_ctx: hif_ctx
846  * @dev: dev
847  * @bdev: bus dev
848  * @bid: bus ID
849  * @bus_type: bus type
850  * @type: enable type
851  *
852  * Return: QDF_STATUS
853  */
854 QDF_STATUS hif_enable(struct hif_opaque_softc *hif_ctx, struct device *dev,
855 					  void *bdev,
856 					  const struct hif_bus_id *bid,
857 					  enum qdf_bus_type bus_type,
858 					  enum hif_enable_type type)
859 {
860 	QDF_STATUS status;
861 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
862 
863 	if (!scn) {
864 		HIF_ERROR("%s: hif_ctx = NULL", __func__);
865 		return QDF_STATUS_E_NULL_VALUE;
866 	}
867 
868 	status = hif_enable_bus(scn, dev, bdev, bid, type);
869 	if (status != QDF_STATUS_SUCCESS) {
870 		HIF_ERROR("%s: hif_enable_bus error = %d",
871 				  __func__, status);
872 		return status;
873 	}
874 
875 	status = hif_hal_attach(scn);
876 	if (status != QDF_STATUS_SUCCESS) {
877 		HIF_ERROR("%s: hal attach failed", __func__);
878 		goto disable_bus;
879 	}
880 
881 	if (hif_bus_configure(scn)) {
882 		HIF_ERROR("%s: Target probe failed.", __func__);
883 		status = QDF_STATUS_E_FAILURE;
884 		goto hal_detach;
885 	}
886 
887 	hif_ut_suspend_init(scn);
888 	hif_register_recovery_notifier(scn);
889 
890 	/*
891 	 * Flag to avoid potential unallocated memory access from MSI
892 	 * interrupt handler which could get scheduled as soon as MSI
893 	 * is enabled, i.e to take care of the race due to the order
894 	 * in where MSI is enabled before the memory, that will be
895 	 * in interrupt handlers, is allocated.
896 	 */
897 
898 	scn->hif_init_done = true;
899 
900 	HIF_DBG("%s: OK", __func__);
901 
902 	return QDF_STATUS_SUCCESS;
903 
904 hal_detach:
905 	hif_hal_detach(scn);
906 disable_bus:
907 	hif_disable_bus(scn);
908 	return status;
909 }
910 
911 void hif_disable(struct hif_opaque_softc *hif_ctx, enum hif_disable_type type)
912 {
913 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
914 
915 	if (!scn)
916 		return;
917 
918 	hif_unregister_recovery_notifier(scn);
919 
920 	hif_nointrs(scn);
921 	if (scn->hif_init_done == false)
922 		hif_shutdown_device(hif_ctx);
923 	else
924 		hif_stop(hif_ctx);
925 
926 	hif_hal_detach(scn);
927 
928 	hif_disable_bus(scn);
929 
930 	hif_wlan_disable(scn);
931 
932 	scn->notice_send = false;
933 
934 	HIF_DBG("%s: X", __func__);
935 }
936 
937 #ifdef CE_TASKLET_DEBUG_ENABLE
938 void hif_enable_ce_latency_stats(struct hif_opaque_softc *hif_ctx, uint8_t val)
939 {
940 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
941 
942 	if (!scn)
943 		return;
944 
945 	scn->ce_latency_stats = val;
946 }
947 #endif
948 
949 void hif_display_stats(struct hif_opaque_softc *hif_ctx)
950 {
951 	hif_display_bus_stats(hif_ctx);
952 }
953 
954 qdf_export_symbol(hif_display_stats);
955 
956 void hif_clear_stats(struct hif_opaque_softc *hif_ctx)
957 {
958 	hif_clear_bus_stats(hif_ctx);
959 }
960 
961 /**
962  * hif_crash_shutdown_dump_bus_register() - dump bus registers
963  * @hif_ctx: hif_ctx
964  *
965  * Return: n/a
966  */
967 #if defined(TARGET_RAMDUMP_AFTER_KERNEL_PANIC) && defined(WLAN_FEATURE_BMI)
968 
969 static void hif_crash_shutdown_dump_bus_register(void *hif_ctx)
970 {
971 	struct hif_opaque_softc *scn = hif_ctx;
972 
973 	if (hif_check_soc_status(scn))
974 		return;
975 
976 	if (hif_dump_registers(scn))
977 		HIF_ERROR("Failed to dump bus registers!");
978 }
979 
980 /**
981  * hif_crash_shutdown(): hif_crash_shutdown
982  *
983  * This function is called by the platform driver to dump CE registers
984  *
985  * @hif_ctx: hif_ctx
986  *
987  * Return: n/a
988  */
989 void hif_crash_shutdown(struct hif_opaque_softc *hif_ctx)
990 {
991 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
992 
993 	if (!hif_ctx)
994 		return;
995 
996 	if (scn->bus_type == QDF_BUS_TYPE_SNOC) {
997 		HIF_INFO_MED("%s: RAM dump disabled for bustype %d",
998 				__func__, scn->bus_type);
999 		return;
1000 	}
1001 
1002 	if (TARGET_STATUS_RESET == scn->target_status) {
1003 		HIF_INFO_MED("%s: Target is already asserted, ignore!",
1004 			    __func__);
1005 		return;
1006 	}
1007 
1008 	if (hif_is_load_or_unload_in_progress(scn)) {
1009 		HIF_ERROR("%s: Load/unload is in progress, ignore!", __func__);
1010 		return;
1011 	}
1012 
1013 	hif_crash_shutdown_dump_bus_register(hif_ctx);
1014 	hif_set_target_status(hif_ctx, TARGET_STATUS_RESET);
1015 
1016 	if (ol_copy_ramdump(hif_ctx))
1017 		goto out;
1018 
1019 	HIF_INFO_MED("%s: RAM dump collecting completed!", __func__);
1020 
1021 out:
1022 	return;
1023 }
1024 #else
1025 void hif_crash_shutdown(struct hif_opaque_softc *hif_ctx)
1026 {
1027 	HIF_INFO_MED("%s: Collecting target RAM dump disabled",
1028 		__func__);
1029 }
1030 #endif /* TARGET_RAMDUMP_AFTER_KERNEL_PANIC */
1031 
1032 #ifdef QCA_WIFI_3_0
1033 /**
1034  * hif_check_fw_reg(): hif_check_fw_reg
1035  * @scn: scn
1036  * @state:
1037  *
1038  * Return: int
1039  */
1040 int hif_check_fw_reg(struct hif_opaque_softc *scn)
1041 {
1042 	return 0;
1043 }
1044 #endif
1045 
1046 /**
1047  * hif_read_phy_mem_base(): hif_read_phy_mem_base
1048  * @scn: scn
1049  * @phy_mem_base: physical mem base
1050  *
1051  * Return: n/a
1052  */
1053 void hif_read_phy_mem_base(struct hif_softc *scn, qdf_dma_addr_t *phy_mem_base)
1054 {
1055 	*phy_mem_base = scn->mem_pa;
1056 }
1057 qdf_export_symbol(hif_read_phy_mem_base);
1058 
1059 /**
1060  * hif_get_device_type(): hif_get_device_type
1061  * @device_id: device_id
1062  * @revision_id: revision_id
1063  * @hif_type: returned hif_type
1064  * @target_type: returned target_type
1065  *
1066  * Return: int
1067  */
1068 int hif_get_device_type(uint32_t device_id,
1069 			uint32_t revision_id,
1070 			uint32_t *hif_type, uint32_t *target_type)
1071 {
1072 	int ret = 0;
1073 
1074 	switch (device_id) {
1075 	case ADRASTEA_DEVICE_ID_P2_E12:
1076 
1077 		*hif_type = HIF_TYPE_ADRASTEA;
1078 		*target_type = TARGET_TYPE_ADRASTEA;
1079 		break;
1080 
1081 	case AR9888_DEVICE_ID:
1082 		*hif_type = HIF_TYPE_AR9888;
1083 		*target_type = TARGET_TYPE_AR9888;
1084 		break;
1085 
1086 	case AR6320_DEVICE_ID:
1087 		switch (revision_id) {
1088 		case AR6320_FW_1_1:
1089 		case AR6320_FW_1_3:
1090 			*hif_type = HIF_TYPE_AR6320;
1091 			*target_type = TARGET_TYPE_AR6320;
1092 			break;
1093 
1094 		case AR6320_FW_2_0:
1095 		case AR6320_FW_3_0:
1096 		case AR6320_FW_3_2:
1097 			*hif_type = HIF_TYPE_AR6320V2;
1098 			*target_type = TARGET_TYPE_AR6320V2;
1099 			break;
1100 
1101 		default:
1102 			HIF_ERROR("%s: error - dev_id = 0x%x, rev_id = 0x%x",
1103 				   __func__, device_id, revision_id);
1104 			ret = -ENODEV;
1105 			goto end;
1106 		}
1107 		break;
1108 
1109 	case AR9887_DEVICE_ID:
1110 		*hif_type = HIF_TYPE_AR9888;
1111 		*target_type = TARGET_TYPE_AR9888;
1112 		HIF_INFO(" *********** AR9887 **************");
1113 		break;
1114 
1115 	case QCA9984_DEVICE_ID:
1116 		*hif_type = HIF_TYPE_QCA9984;
1117 		*target_type = TARGET_TYPE_QCA9984;
1118 		HIF_INFO(" *********** QCA9984 *************");
1119 		break;
1120 
1121 	case QCA9888_DEVICE_ID:
1122 		*hif_type = HIF_TYPE_QCA9888;
1123 		*target_type = TARGET_TYPE_QCA9888;
1124 		HIF_INFO(" *********** QCA9888 *************");
1125 		break;
1126 
1127 	case AR900B_DEVICE_ID:
1128 		*hif_type = HIF_TYPE_AR900B;
1129 		*target_type = TARGET_TYPE_AR900B;
1130 		HIF_INFO(" *********** AR900B *************");
1131 		break;
1132 
1133 	case IPQ4019_DEVICE_ID:
1134 		*hif_type = HIF_TYPE_IPQ4019;
1135 		*target_type = TARGET_TYPE_IPQ4019;
1136 		HIF_INFO(" *********** IPQ4019  *************");
1137 		break;
1138 
1139 	case QCA8074_DEVICE_ID:
1140 		*hif_type = HIF_TYPE_QCA8074;
1141 		*target_type = TARGET_TYPE_QCA8074;
1142 		HIF_INFO(" *********** QCA8074  *************\n");
1143 		break;
1144 
1145 	case QCA6290_EMULATION_DEVICE_ID:
1146 	case QCA6290_DEVICE_ID:
1147 		*hif_type = HIF_TYPE_QCA6290;
1148 		*target_type = TARGET_TYPE_QCA6290;
1149 		HIF_INFO(" *********** QCA6290EMU *************\n");
1150 		break;
1151 
1152 	case QCN9000_DEVICE_ID:
1153 		*hif_type = HIF_TYPE_QCN9000;
1154 		*target_type = TARGET_TYPE_QCN9000;
1155 		HIF_INFO(" *********** QCN9000 *************\n");
1156 		break;
1157 
1158 	case QCN7605_DEVICE_ID:
1159 	case QCN7605_COMPOSITE:
1160 	case QCN7605_STANDALONE:
1161 	case QCN7605_STANDALONE_V2:
1162 	case QCN7605_COMPOSITE_V2:
1163 		*hif_type = HIF_TYPE_QCN7605;
1164 		*target_type = TARGET_TYPE_QCN7605;
1165 		HIF_INFO(" *********** QCN7605 *************\n");
1166 		break;
1167 
1168 	case QCA6390_DEVICE_ID:
1169 	case QCA6390_EMULATION_DEVICE_ID:
1170 		*hif_type = HIF_TYPE_QCA6390;
1171 		*target_type = TARGET_TYPE_QCA6390;
1172 		HIF_INFO(" *********** QCA6390 *************\n");
1173 		break;
1174 
1175 	case QCA6490_DEVICE_ID:
1176 	case QCA6490_EMULATION_DEVICE_ID:
1177 		*hif_type = HIF_TYPE_QCA6490;
1178 		*target_type = TARGET_TYPE_QCA6490;
1179 		HIF_INFO(" *********** QCA6490 *************\n");
1180 		break;
1181 
1182 	case QCA6750_DEVICE_ID:
1183 	case QCA6750_EMULATION_DEVICE_ID:
1184 		*hif_type = HIF_TYPE_QCA6750;
1185 		*target_type = TARGET_TYPE_QCA6750;
1186 		HIF_INFO(" *********** QCA6750 *************\n");
1187 		break;
1188 
1189 	case QCA8074V2_DEVICE_ID:
1190 		*hif_type = HIF_TYPE_QCA8074V2;
1191 		*target_type = TARGET_TYPE_QCA8074V2;
1192 		HIF_INFO(" *********** QCA8074V2 *************\n");
1193 		break;
1194 
1195 	case QCA6018_DEVICE_ID:
1196 	case RUMIM2M_DEVICE_ID_NODE0:
1197 	case RUMIM2M_DEVICE_ID_NODE1:
1198 	case RUMIM2M_DEVICE_ID_NODE2:
1199 	case RUMIM2M_DEVICE_ID_NODE3:
1200 	case RUMIM2M_DEVICE_ID_NODE4:
1201 	case RUMIM2M_DEVICE_ID_NODE5:
1202 		*hif_type = HIF_TYPE_QCA6018;
1203 		*target_type = TARGET_TYPE_QCA6018;
1204 		HIF_INFO(" *********** QCA6018 *************\n");
1205 		break;
1206 
1207 	case QCA5018_DEVICE_ID:
1208 		*hif_type = HIF_TYPE_QCA5018;
1209 		*target_type = TARGET_TYPE_QCA5018;
1210 		HIF_INFO(" *********** qca5018 *************\n");
1211 		break;
1212 
1213 	default:
1214 		HIF_ERROR("%s: Unsupported device ID = 0x%x!",
1215 			  __func__, device_id);
1216 		ret = -ENODEV;
1217 		break;
1218 	}
1219 
1220 	if (*target_type == TARGET_TYPE_UNKNOWN) {
1221 		HIF_ERROR("%s: Unsupported target_type!", __func__);
1222 		ret = -ENODEV;
1223 	}
1224 end:
1225 	return ret;
1226 }
1227 
1228 /**
1229  * hif_get_bus_type() - return the bus type
1230  *
1231  * Return: enum qdf_bus_type
1232  */
1233 enum qdf_bus_type hif_get_bus_type(struct hif_opaque_softc *hif_hdl)
1234 {
1235 	struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
1236 
1237 	return scn->bus_type;
1238 }
1239 
1240 /**
1241  * Target info and ini parameters are global to the driver
1242  * Hence these structures are exposed to all the modules in
1243  * the driver and they don't need to maintains multiple copies
1244  * of the same info, instead get the handle from hif and
1245  * modify them in hif
1246  */
1247 
1248 /**
1249  * hif_get_ini_handle() - API to get hif_config_param handle
1250  * @hif_ctx: HIF Context
1251  *
1252  * Return: pointer to hif_config_info
1253  */
1254 struct hif_config_info *hif_get_ini_handle(struct hif_opaque_softc *hif_ctx)
1255 {
1256 	struct hif_softc *sc = HIF_GET_SOFTC(hif_ctx);
1257 
1258 	return &sc->hif_config;
1259 }
1260 
1261 /**
1262  * hif_get_target_info_handle() - API to get hif_target_info handle
1263  * @hif_ctx: HIF context
1264  *
1265  * Return: Pointer to hif_target_info
1266  */
1267 struct hif_target_info *hif_get_target_info_handle(
1268 					struct hif_opaque_softc *hif_ctx)
1269 {
1270 	struct hif_softc *sc = HIF_GET_SOFTC(hif_ctx);
1271 
1272 	return &sc->target_info;
1273 
1274 }
1275 qdf_export_symbol(hif_get_target_info_handle);
1276 
1277 #ifdef RECEIVE_OFFLOAD
1278 void hif_offld_flush_cb_register(struct hif_opaque_softc *scn,
1279 				 void (offld_flush_handler)(void *))
1280 {
1281 	if (hif_napi_enabled(scn, -1))
1282 		hif_napi_rx_offld_flush_cb_register(scn, offld_flush_handler);
1283 	else
1284 		HIF_ERROR("NAPI not enabled\n");
1285 }
1286 qdf_export_symbol(hif_offld_flush_cb_register);
1287 
1288 void hif_offld_flush_cb_deregister(struct hif_opaque_softc *scn)
1289 {
1290 	if (hif_napi_enabled(scn, -1))
1291 		hif_napi_rx_offld_flush_cb_deregister(scn);
1292 	else
1293 		HIF_ERROR("NAPI not enabled\n");
1294 }
1295 qdf_export_symbol(hif_offld_flush_cb_deregister);
1296 
1297 int hif_get_rx_ctx_id(int ctx_id, struct hif_opaque_softc *hif_hdl)
1298 {
1299 	if (hif_napi_enabled(hif_hdl, -1))
1300 		return NAPI_PIPE2ID(ctx_id);
1301 	else
1302 		return ctx_id;
1303 }
1304 #else /* RECEIVE_OFFLOAD */
1305 int hif_get_rx_ctx_id(int ctx_id, struct hif_opaque_softc *hif_hdl)
1306 {
1307 	return 0;
1308 }
1309 qdf_export_symbol(hif_get_rx_ctx_id);
1310 #endif /* RECEIVE_OFFLOAD */
1311 
1312 #if defined(FEATURE_LRO)
1313 
1314 /**
1315  * hif_get_lro_info - Returns LRO instance for instance ID
1316  * @ctx_id: LRO instance ID
1317  * @hif_hdl: HIF Context
1318  *
1319  * Return: Pointer to LRO instance.
1320  */
1321 void *hif_get_lro_info(int ctx_id, struct hif_opaque_softc *hif_hdl)
1322 {
1323 	void *data;
1324 
1325 	if (hif_napi_enabled(hif_hdl, -1))
1326 		data = hif_napi_get_lro_info(hif_hdl, ctx_id);
1327 	else
1328 		data = hif_ce_get_lro_ctx(hif_hdl, ctx_id);
1329 
1330 	return data;
1331 }
1332 #endif
1333 
1334 /**
1335  * hif_get_target_status - API to get target status
1336  * @hif_ctx: HIF Context
1337  *
1338  * Return: enum hif_target_status
1339  */
1340 enum hif_target_status hif_get_target_status(struct hif_opaque_softc *hif_ctx)
1341 {
1342 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1343 
1344 	return scn->target_status;
1345 }
1346 qdf_export_symbol(hif_get_target_status);
1347 
1348 /**
1349  * hif_set_target_status() - API to set target status
1350  * @hif_ctx: HIF Context
1351  * @status: Target Status
1352  *
1353  * Return: void
1354  */
1355 void hif_set_target_status(struct hif_opaque_softc *hif_ctx, enum
1356 			   hif_target_status status)
1357 {
1358 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1359 
1360 	scn->target_status = status;
1361 }
1362 
1363 /**
1364  * hif_init_ini_config() - API to initialize HIF configuration parameters
1365  * @hif_ctx: HIF Context
1366  * @cfg: HIF Configuration
1367  *
1368  * Return: void
1369  */
1370 void hif_init_ini_config(struct hif_opaque_softc *hif_ctx,
1371 			 struct hif_config_info *cfg)
1372 {
1373 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1374 
1375 	qdf_mem_copy(&scn->hif_config, cfg, sizeof(struct hif_config_info));
1376 }
1377 
1378 /**
1379  * hif_get_conparam() - API to get driver mode in HIF
1380  * @scn: HIF Context
1381  *
1382  * Return: driver mode of operation
1383  */
1384 uint32_t hif_get_conparam(struct hif_softc *scn)
1385 {
1386 	if (!scn)
1387 		return 0;
1388 
1389 	return scn->hif_con_param;
1390 }
1391 
1392 /**
1393  * hif_get_callbacks_handle() - API to get callbacks Handle
1394  * @scn: HIF Context
1395  *
1396  * Return: pointer to HIF Callbacks
1397  */
1398 struct hif_driver_state_callbacks *hif_get_callbacks_handle(
1399 							struct hif_softc *scn)
1400 {
1401 	return &scn->callbacks;
1402 }
1403 
1404 /**
1405  * hif_is_driver_unloading() - API to query upper layers if driver is unloading
1406  * @scn: HIF Context
1407  *
1408  * Return: True/False
1409  */
1410 bool hif_is_driver_unloading(struct hif_softc *scn)
1411 {
1412 	struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
1413 
1414 	if (cbk && cbk->is_driver_unloading)
1415 		return cbk->is_driver_unloading(cbk->context);
1416 
1417 	return false;
1418 }
1419 
1420 /**
1421  * hif_is_load_or_unload_in_progress() - API to query upper layers if
1422  * load/unload in progress
1423  * @scn: HIF Context
1424  *
1425  * Return: True/False
1426  */
1427 bool hif_is_load_or_unload_in_progress(struct hif_softc *scn)
1428 {
1429 	struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
1430 
1431 	if (cbk && cbk->is_load_unload_in_progress)
1432 		return cbk->is_load_unload_in_progress(cbk->context);
1433 
1434 	return false;
1435 }
1436 
1437 /**
1438  * hif_is_recovery_in_progress() - API to query upper layers if recovery in
1439  * progress
1440  * @scn: HIF Context
1441  *
1442  * Return: True/False
1443  */
1444 bool hif_is_recovery_in_progress(struct hif_softc *scn)
1445 {
1446 	struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
1447 
1448 	if (cbk && cbk->is_recovery_in_progress)
1449 		return cbk->is_recovery_in_progress(cbk->context);
1450 
1451 	return false;
1452 }
1453 
1454 #if defined(HIF_PCI) || defined(HIF_SNOC) || defined(HIF_AHB) || \
1455     defined(HIF_IPCI)
1456 
1457 /**
1458  * hif_update_pipe_callback() - API to register pipe specific callbacks
1459  * @osc: Opaque softc
1460  * @pipeid: pipe id
1461  * @callbacks: callbacks to register
1462  *
1463  * Return: void
1464  */
1465 
1466 void hif_update_pipe_callback(struct hif_opaque_softc *osc,
1467 					u_int8_t pipeid,
1468 					struct hif_msg_callbacks *callbacks)
1469 {
1470 	struct hif_softc *scn = HIF_GET_SOFTC(osc);
1471 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1472 	struct HIF_CE_pipe_info *pipe_info;
1473 
1474 	QDF_BUG(pipeid < CE_COUNT_MAX);
1475 
1476 	HIF_INFO_LO("+%s pipeid %d\n", __func__, pipeid);
1477 
1478 	pipe_info = &hif_state->pipe_info[pipeid];
1479 
1480 	qdf_mem_copy(&pipe_info->pipe_callbacks,
1481 			callbacks, sizeof(pipe_info->pipe_callbacks));
1482 
1483 	HIF_INFO_LO("-%s\n", __func__);
1484 }
1485 qdf_export_symbol(hif_update_pipe_callback);
1486 
1487 /**
1488  * hif_is_target_ready() - API to query if target is in ready state
1489  * progress
1490  * @scn: HIF Context
1491  *
1492  * Return: True/False
1493  */
1494 bool hif_is_target_ready(struct hif_softc *scn)
1495 {
1496 	struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
1497 
1498 	if (cbk && cbk->is_target_ready)
1499 		return cbk->is_target_ready(cbk->context);
1500 	/*
1501 	 * if callback is not registered then there is no way to determine
1502 	 * if target is ready. In-such case return true to indicate that
1503 	 * target is ready.
1504 	 */
1505 	return true;
1506 }
1507 qdf_export_symbol(hif_is_target_ready);
1508 
1509 int hif_get_bandwidth_level(struct hif_opaque_softc *hif_handle)
1510 {
1511 	struct hif_softc *scn = HIF_GET_SOFTC(hif_handle);
1512 	struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
1513 
1514 	if (cbk && cbk->get_bandwidth_level)
1515 		return cbk->get_bandwidth_level(cbk->context);
1516 
1517 	return 0;
1518 }
1519 
1520 qdf_export_symbol(hif_get_bandwidth_level);
1521 
1522 /**
1523  * hif_batch_send() - API to access hif specific function
1524  * ce_batch_send.
1525  * @osc: HIF Context
1526  * @msdu : list of msdus to be sent
1527  * @transfer_id : transfer id
1528  * @len : donwloaded length
1529  *
1530  * Return: list of msds not sent
1531  */
1532 qdf_nbuf_t hif_batch_send(struct hif_opaque_softc *osc, qdf_nbuf_t msdu,
1533 		uint32_t transfer_id, u_int32_t len, uint32_t sendhead)
1534 {
1535 	void *ce_tx_hdl = hif_get_ce_handle(osc, CE_HTT_TX_CE);
1536 
1537 	return ce_batch_send((struct CE_handle *)ce_tx_hdl, msdu, transfer_id,
1538 			len, sendhead);
1539 }
1540 qdf_export_symbol(hif_batch_send);
1541 
1542 /**
1543  * hif_update_tx_ring() - API to access hif specific function
1544  * ce_update_tx_ring.
1545  * @osc: HIF Context
1546  * @num_htt_cmpls : number of htt compl received.
1547  *
1548  * Return: void
1549  */
1550 void hif_update_tx_ring(struct hif_opaque_softc *osc, u_int32_t num_htt_cmpls)
1551 {
1552 	void *ce_tx_hdl = hif_get_ce_handle(osc, CE_HTT_TX_CE);
1553 
1554 	ce_update_tx_ring(ce_tx_hdl, num_htt_cmpls);
1555 }
1556 qdf_export_symbol(hif_update_tx_ring);
1557 
1558 
1559 /**
1560  * hif_send_single() - API to access hif specific function
1561  * ce_send_single.
1562  * @osc: HIF Context
1563  * @msdu : msdu to be sent
1564  * @transfer_id: transfer id
1565  * @len : downloaded length
1566  *
1567  * Return: msdu sent status
1568  */
1569 QDF_STATUS hif_send_single(struct hif_opaque_softc *osc, qdf_nbuf_t msdu,
1570 			   uint32_t transfer_id, u_int32_t len)
1571 {
1572 	void *ce_tx_hdl = hif_get_ce_handle(osc, CE_HTT_TX_CE);
1573 
1574 	if (!ce_tx_hdl)
1575 		return QDF_STATUS_E_NULL_VALUE;
1576 
1577 	return ce_send_single((struct CE_handle *)ce_tx_hdl, msdu, transfer_id,
1578 			len);
1579 }
1580 qdf_export_symbol(hif_send_single);
1581 #endif
1582 
1583 /**
1584  * hif_reg_write() - API to access hif specific function
1585  * hif_write32_mb.
1586  * @hif_ctx : HIF Context
1587  * @offset : offset on which value has to be written
1588  * @value : value to be written
1589  *
1590  * Return: None
1591  */
1592 void hif_reg_write(struct hif_opaque_softc *hif_ctx, uint32_t offset,
1593 		uint32_t value)
1594 {
1595 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1596 
1597 	hif_write32_mb(scn, scn->mem + offset, value);
1598 
1599 }
1600 qdf_export_symbol(hif_reg_write);
1601 
1602 /**
1603  * hif_reg_read() - API to access hif specific function
1604  * hif_read32_mb.
1605  * @hif_ctx : HIF Context
1606  * @offset : offset from which value has to be read
1607  *
1608  * Return: Read value
1609  */
1610 uint32_t hif_reg_read(struct hif_opaque_softc *hif_ctx, uint32_t offset)
1611 {
1612 
1613 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1614 
1615 	return hif_read32_mb(scn, scn->mem + offset);
1616 }
1617 qdf_export_symbol(hif_reg_read);
1618 
1619 /**
1620  * hif_ramdump_handler(): generic ramdump handler
1621  * @scn: struct hif_opaque_softc
1622  *
1623  * Return: None
1624  */
1625 void hif_ramdump_handler(struct hif_opaque_softc *scn)
1626 {
1627 	if (hif_get_bus_type(scn) == QDF_BUS_TYPE_USB)
1628 		hif_usb_ramdump_handler(scn);
1629 }
1630 
1631 irqreturn_t hif_wake_interrupt_handler(int irq, void *context)
1632 {
1633 	struct hif_softc *scn = context;
1634 	struct hif_opaque_softc *hif_ctx = GET_HIF_OPAQUE_HDL(scn);
1635 
1636 	HIF_INFO("wake interrupt received on irq %d", irq);
1637 
1638 	if (hif_pm_runtime_get_monitor_wake_intr(hif_ctx)) {
1639 		hif_pm_runtime_set_monitor_wake_intr(hif_ctx, 0);
1640 		hif_pm_runtime_request_resume(hif_ctx);
1641 	}
1642 
1643 	if (scn->initial_wakeup_cb)
1644 		scn->initial_wakeup_cb(scn->initial_wakeup_priv);
1645 
1646 	if (hif_is_ut_suspended(scn))
1647 		hif_ut_fw_resume(scn);
1648 
1649 	qdf_pm_system_wakeup();
1650 
1651 	return IRQ_HANDLED;
1652 }
1653 
1654 void hif_set_initial_wakeup_cb(struct hif_opaque_softc *hif_ctx,
1655 			       void (*callback)(void *),
1656 			       void *priv)
1657 {
1658 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1659 
1660 	scn->initial_wakeup_cb = callback;
1661 	scn->initial_wakeup_priv = priv;
1662 }
1663 
1664 void hif_set_ce_service_max_yield_time(struct hif_opaque_softc *hif,
1665 				       uint32_t ce_service_max_yield_time)
1666 {
1667 	struct hif_softc *hif_ctx = HIF_GET_SOFTC(hif);
1668 
1669 	hif_ctx->ce_service_max_yield_time =
1670 		ce_service_max_yield_time * 1000;
1671 }
1672 
1673 unsigned long long
1674 hif_get_ce_service_max_yield_time(struct hif_opaque_softc *hif)
1675 {
1676 	struct hif_softc *hif_ctx = HIF_GET_SOFTC(hif);
1677 
1678 	return hif_ctx->ce_service_max_yield_time;
1679 }
1680 
1681 void hif_set_ce_service_max_rx_ind_flush(struct hif_opaque_softc *hif,
1682 				       uint8_t ce_service_max_rx_ind_flush)
1683 {
1684 	struct hif_softc *hif_ctx = HIF_GET_SOFTC(hif);
1685 
1686 	if (ce_service_max_rx_ind_flush == 0 ||
1687 	    ce_service_max_rx_ind_flush > MSG_FLUSH_NUM)
1688 		hif_ctx->ce_service_max_rx_ind_flush = MSG_FLUSH_NUM;
1689 	else
1690 		hif_ctx->ce_service_max_rx_ind_flush =
1691 						ce_service_max_rx_ind_flush;
1692 }
1693