xref: /wlan-dirver/qca-wifi-host-cmn/hif/src/hif_main.c (revision a86b23ee68a2491aede2e03991f3fb37046f4e41)
1 /*
2  * Copyright (c) 2015-2020 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include "targcfg.h"
20 #include "qdf_lock.h"
21 #include "qdf_status.h"
22 #include "qdf_status.h"
23 #include <qdf_atomic.h>         /* qdf_atomic_read */
24 #include <targaddrs.h>
25 #include "hif_io32.h"
26 #include <hif.h>
27 #include <target_type.h>
28 #include "regtable.h"
29 #define ATH_MODULE_NAME hif
30 #include <a_debug.h>
31 #include "hif_main.h"
32 #include "hif_hw_version.h"
33 #if (defined(HIF_PCI) || defined(HIF_SNOC) || defined(HIF_AHB) || \
34      defined(HIF_IPCI))
35 #include "ce_tasklet.h"
36 #include "ce_api.h"
37 #endif
38 #include "qdf_trace.h"
39 #include "qdf_status.h"
40 #include "hif_debug.h"
41 #include "mp_dev.h"
42 #if defined(QCA_WIFI_QCA8074) || defined(QCA_WIFI_QCA6018) || \
43 	defined(QCA_WIFI_QCA5018)
44 #include "hal_api.h"
45 #endif
46 #include "hif_napi.h"
47 #include "hif_unit_test_suspend_i.h"
48 #include "qdf_module.h"
49 #ifdef HIF_CE_LOG_INFO
50 #include <qdf_notifier.h>
51 #include <qdf_hang_event_notifier.h>
52 #endif
53 
54 void hif_dump(struct hif_opaque_softc *hif_ctx, uint8_t cmd_id, bool start)
55 {
56 	hif_trigger_dump(hif_ctx, cmd_id, start);
57 }
58 
59 /**
60  * hif_get_target_id(): hif_get_target_id
61  *
62  * Return the virtual memory base address to the caller
63  *
64  * @scn: hif_softc
65  *
66  * Return: A_target_id_t
67  */
68 A_target_id_t hif_get_target_id(struct hif_softc *scn)
69 {
70 	return scn->mem;
71 }
72 
73 /**
74  * hif_get_targetdef(): hif_get_targetdef
75  * @scn: scn
76  *
77  * Return: void *
78  */
79 void *hif_get_targetdef(struct hif_opaque_softc *hif_ctx)
80 {
81 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
82 
83 	return scn->targetdef;
84 }
85 
86 #ifdef FORCE_WAKE
87 void hif_srng_init_phase(struct hif_opaque_softc *hif_ctx,
88 			 bool init_phase)
89 {
90 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
91 
92 	if (ce_srng_based(scn))
93 		hal_set_init_phase(scn->hal_soc, init_phase);
94 }
95 #endif /* FORCE_WAKE */
96 
97 /**
98  * hif_vote_link_down(): unvote for link up
99  *
100  * Call hif_vote_link_down to release a previous request made using
101  * hif_vote_link_up. A hif_vote_link_down call should only be made
102  * after a corresponding hif_vote_link_up, otherwise you could be
103  * negating a vote from another source. When no votes are present
104  * hif will not guarantee the linkstate after hif_bus_suspend.
105  *
106  * SYNCHRONIZE WITH hif_vote_link_up by only calling in MC thread
107  * and initialization deinitialization sequencences.
108  *
109  * Return: n/a
110  */
111 void hif_vote_link_down(struct hif_opaque_softc *hif_ctx)
112 {
113 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
114 
115 	QDF_BUG(scn);
116 	scn->linkstate_vote--;
117 	HIF_INFO("Down_linkstate_vote %d", scn->linkstate_vote);
118 	if (scn->linkstate_vote == 0)
119 		hif_bus_prevent_linkdown(scn, false);
120 }
121 
122 /**
123  * hif_vote_link_up(): vote to prevent bus from suspending
124  *
125  * Makes hif guarantee that fw can message the host normally
126  * durring suspend.
127  *
128  * SYNCHRONIZE WITH hif_vote_link_up by only calling in MC thread
129  * and initialization deinitialization sequencences.
130  *
131  * Return: n/a
132  */
133 void hif_vote_link_up(struct hif_opaque_softc *hif_ctx)
134 {
135 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
136 
137 	QDF_BUG(scn);
138 	scn->linkstate_vote++;
139 	HIF_INFO("Up_linkstate_vote %d", scn->linkstate_vote);
140 	if (scn->linkstate_vote == 1)
141 		hif_bus_prevent_linkdown(scn, true);
142 }
143 
144 /**
145  * hif_can_suspend_link(): query if hif is permitted to suspend the link
146  *
147  * Hif will ensure that the link won't be suspended if the upperlayers
148  * don't want it to.
149  *
150  * SYNCHRONIZATION: MC thread is stopped before bus suspend thus
151  * we don't need extra locking to ensure votes dont change while
152  * we are in the process of suspending or resuming.
153  *
154  * Return: false if hif will guarantee link up durring suspend.
155  */
156 bool hif_can_suspend_link(struct hif_opaque_softc *hif_ctx)
157 {
158 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
159 
160 	QDF_BUG(scn);
161 	return scn->linkstate_vote == 0;
162 }
163 
164 /**
165  * hif_hia_item_address(): hif_hia_item_address
166  * @target_type: target_type
167  * @item_offset: item_offset
168  *
169  * Return: n/a
170  */
171 uint32_t hif_hia_item_address(uint32_t target_type, uint32_t item_offset)
172 {
173 	switch (target_type) {
174 	case TARGET_TYPE_AR6002:
175 		return AR6002_HOST_INTEREST_ADDRESS + item_offset;
176 	case TARGET_TYPE_AR6003:
177 		return AR6003_HOST_INTEREST_ADDRESS + item_offset;
178 	case TARGET_TYPE_AR6004:
179 		return AR6004_HOST_INTEREST_ADDRESS + item_offset;
180 	case TARGET_TYPE_AR6006:
181 		return AR6006_HOST_INTEREST_ADDRESS + item_offset;
182 	case TARGET_TYPE_AR9888:
183 		return AR9888_HOST_INTEREST_ADDRESS + item_offset;
184 	case TARGET_TYPE_AR6320:
185 	case TARGET_TYPE_AR6320V2:
186 		return AR6320_HOST_INTEREST_ADDRESS + item_offset;
187 	case TARGET_TYPE_ADRASTEA:
188 		/* ADRASTEA doesn't have a host interest address */
189 		ASSERT(0);
190 		return 0;
191 	case TARGET_TYPE_AR900B:
192 		return AR900B_HOST_INTEREST_ADDRESS + item_offset;
193 	case TARGET_TYPE_QCA9984:
194 		return QCA9984_HOST_INTEREST_ADDRESS + item_offset;
195 	case TARGET_TYPE_QCA9888:
196 		return QCA9888_HOST_INTEREST_ADDRESS + item_offset;
197 	case TARGET_TYPE_IPQ4019:
198 		return IPQ4019_HOST_INTEREST_ADDRESS + item_offset;
199 
200 	default:
201 		ASSERT(0);
202 		return 0;
203 	}
204 }
205 
206 /**
207  * hif_max_num_receives_reached() - check max receive is reached
208  * @scn: HIF Context
209  * @count: unsigned int.
210  *
211  * Output check status as bool
212  *
213  * Return: bool
214  */
215 bool hif_max_num_receives_reached(struct hif_softc *scn, unsigned int count)
216 {
217 	if (QDF_IS_EPPING_ENABLED(hif_get_conparam(scn)))
218 		return count > 120;
219 	else
220 		return count > MAX_NUM_OF_RECEIVES;
221 }
222 
223 /**
224  * init_buffer_count() - initial buffer count
225  * @maxSize: qdf_size_t
226  *
227  * routine to modify the initial buffer count to be allocated on an os
228  * platform basis. Platform owner will need to modify this as needed
229  *
230  * Return: qdf_size_t
231  */
232 qdf_size_t init_buffer_count(qdf_size_t maxSize)
233 {
234 	return maxSize;
235 }
236 
237 /**
238  * hif_save_htc_htt_config_endpoint() - save htt_tx_endpoint
239  * @hif_ctx: hif context
240  * @htc_htt_tx_endpoint: htt_tx_endpoint
241  *
242  * Return: void
243  */
244 void hif_save_htc_htt_config_endpoint(struct hif_opaque_softc *hif_ctx,
245 							int htc_htt_tx_endpoint)
246 {
247 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
248 
249 	if (!scn) {
250 		HIF_ERROR("%s: error: scn or scn->hif_sc is NULL!",
251 		       __func__);
252 		return;
253 	}
254 
255 	scn->htc_htt_tx_endpoint = htc_htt_tx_endpoint;
256 }
257 qdf_export_symbol(hif_save_htc_htt_config_endpoint);
258 
259 static const struct qwlan_hw qwlan_hw_list[] = {
260 	{
261 		.id = AR6320_REV1_VERSION,
262 		.subid = 0,
263 		.name = "QCA6174_REV1",
264 	},
265 	{
266 		.id = AR6320_REV1_1_VERSION,
267 		.subid = 0x1,
268 		.name = "QCA6174_REV1_1",
269 	},
270 	{
271 		.id = AR6320_REV1_3_VERSION,
272 		.subid = 0x2,
273 		.name = "QCA6174_REV1_3",
274 	},
275 	{
276 		.id = AR6320_REV2_1_VERSION,
277 		.subid = 0x4,
278 		.name = "QCA6174_REV2_1",
279 	},
280 	{
281 		.id = AR6320_REV2_1_VERSION,
282 		.subid = 0x5,
283 		.name = "QCA6174_REV2_2",
284 	},
285 	{
286 		.id = AR6320_REV3_VERSION,
287 		.subid = 0x6,
288 		.name = "QCA6174_REV2.3",
289 	},
290 	{
291 		.id = AR6320_REV3_VERSION,
292 		.subid = 0x8,
293 		.name = "QCA6174_REV3",
294 	},
295 	{
296 		.id = AR6320_REV3_VERSION,
297 		.subid = 0x9,
298 		.name = "QCA6174_REV3_1",
299 	},
300 	{
301 		.id = AR6320_REV3_2_VERSION,
302 		.subid = 0xA,
303 		.name = "AR6320_REV3_2_VERSION",
304 	},
305 	{
306 		.id = QCA6390_V1,
307 		.subid = 0x0,
308 		.name = "QCA6390_V1",
309 	},
310 	{
311 		.id = QCA6490_V1,
312 		.subid = 0x0,
313 		.name = "QCA6490_V1",
314 	},
315 	{
316 		.id = WCN3990_v1,
317 		.subid = 0x0,
318 		.name = "WCN3990_V1",
319 	},
320 	{
321 		.id = WCN3990_v2,
322 		.subid = 0x0,
323 		.name = "WCN3990_V2",
324 	},
325 	{
326 		.id = WCN3990_v2_1,
327 		.subid = 0x0,
328 		.name = "WCN3990_V2.1",
329 	},
330 	{
331 		.id = WCN3998,
332 		.subid = 0x0,
333 		.name = "WCN3998",
334 	},
335 	{
336 		.id = QCA9379_REV1_VERSION,
337 		.subid = 0xC,
338 		.name = "QCA9379_REV1",
339 	},
340 	{
341 		.id = QCA9379_REV1_VERSION,
342 		.subid = 0xD,
343 		.name = "QCA9379_REV1_1",
344 	}
345 };
346 
347 /**
348  * hif_get_hw_name(): get a human readable name for the hardware
349  * @info: Target Info
350  *
351  * Return: human readable name for the underlying wifi hardware.
352  */
353 static const char *hif_get_hw_name(struct hif_target_info *info)
354 {
355 	int i;
356 
357 	if (info->hw_name)
358 		return info->hw_name;
359 
360 	for (i = 0; i < ARRAY_SIZE(qwlan_hw_list); i++) {
361 		if (info->target_version == qwlan_hw_list[i].id &&
362 		    info->target_revision == qwlan_hw_list[i].subid) {
363 			return qwlan_hw_list[i].name;
364 		}
365 	}
366 
367 	info->hw_name = qdf_mem_malloc(64);
368 	if (!info->hw_name)
369 		return "Unknown Device (nomem)";
370 
371 	i = qdf_snprint(info->hw_name, 64, "HW_VERSION=%x.",
372 			info->target_version);
373 	if (i < 0)
374 		return "Unknown Device (snprintf failure)";
375 	else
376 		return info->hw_name;
377 }
378 
379 /**
380  * hif_get_hw_info(): hif_get_hw_info
381  * @scn: scn
382  * @version: version
383  * @revision: revision
384  *
385  * Return: n/a
386  */
387 void hif_get_hw_info(struct hif_opaque_softc *scn, u32 *version, u32 *revision,
388 			const char **target_name)
389 {
390 	struct hif_target_info *info = hif_get_target_info_handle(scn);
391 	struct hif_softc *sc = HIF_GET_SOFTC(scn);
392 
393 	if (sc->bus_type == QDF_BUS_TYPE_USB)
394 		hif_usb_get_hw_info(sc);
395 
396 	*version = info->target_version;
397 	*revision = info->target_revision;
398 	*target_name = hif_get_hw_name(info);
399 }
400 
401 /**
402  * hif_get_dev_ba(): API to get device base address.
403  * @scn: scn
404  * @version: version
405  * @revision: revision
406  *
407  * Return: n/a
408  */
409 void *hif_get_dev_ba(struct hif_opaque_softc *hif_handle)
410 {
411 	struct hif_softc *scn = (struct hif_softc *)hif_handle;
412 
413 	return scn->mem;
414 }
415 qdf_export_symbol(hif_get_dev_ba);
416 
417 /**
418  * hif_get_dev_ba_ce(): API to get device ce base address.
419  * @scn: scn
420  *
421  * Return: dev mem base address for CE
422  */
423 void *hif_get_dev_ba_ce(struct hif_opaque_softc *hif_handle)
424 {
425 	struct hif_softc *scn = (struct hif_softc *)hif_handle;
426 
427 	return scn->mem_ce;
428 }
429 
430 qdf_export_symbol(hif_get_dev_ba_ce);
431 
432 #ifdef WLAN_CE_INTERRUPT_THRESHOLD_CONFIG
433 /**
434  * hif_get_cfg_from_psoc() - Retrieve ini cfg from psoc
435  * @scn: hif context
436  * @psoc: psoc objmgr handle
437  *
438  * Return: None
439  */
440 static inline
441 void hif_get_cfg_from_psoc(struct hif_softc *scn,
442 			   struct wlan_objmgr_psoc *psoc)
443 {
444 	if (psoc) {
445 		scn->ini_cfg.ce_status_ring_timer_threshold =
446 			cfg_get(psoc,
447 				CFG_CE_STATUS_RING_TIMER_THRESHOLD);
448 		scn->ini_cfg.ce_status_ring_batch_count_threshold =
449 			cfg_get(psoc,
450 				CFG_CE_STATUS_RING_BATCH_COUNT_THRESHOLD);
451 	}
452 }
453 #else
454 static inline
455 void hif_get_cfg_from_psoc(struct hif_softc *scn,
456 			   struct wlan_objmgr_psoc *psoc)
457 {
458 }
459 #endif /* WLAN_CE_INTERRUPT_THRESHOLD_CONFIG */
460 
461 #ifdef HIF_CE_LOG_INFO
462 /**
463  * hif_recovery_notifier_cb - Recovery notifier callback to log
464  *  hang event data
465  * @block: notifier block
466  * @state: state
467  * @data: notifier data
468  *
469  * Return: status
470  */
471 static
472 int hif_recovery_notifier_cb(struct notifier_block *block, unsigned long state,
473 			     void *data)
474 {
475 	struct qdf_notifer_data *notif_data = data;
476 	qdf_notif_block *notif_block;
477 	struct hif_softc *hif_handle;
478 
479 	if (!data || !block)
480 		return -EINVAL;
481 
482 	notif_block = qdf_container_of(block, qdf_notif_block, notif_block);
483 
484 	hif_handle = notif_block->priv_data;
485 	if (!hif_handle)
486 		return -EINVAL;
487 
488 	hif_log_ce_info(hif_handle, notif_data->hang_data,
489 			&notif_data->offset);
490 
491 	return 0;
492 }
493 
494 /**
495  * hif_register_recovery_notifier - Register hif recovery notifier
496  * @hif_handle: hif handle
497  *
498  * Return: status
499  */
500 static
501 QDF_STATUS hif_register_recovery_notifier(struct hif_softc *hif_handle)
502 {
503 	qdf_notif_block *hif_notifier;
504 
505 	if (!hif_handle)
506 		return QDF_STATUS_E_FAILURE;
507 
508 	hif_notifier = &hif_handle->hif_recovery_notifier;
509 
510 	hif_notifier->notif_block.notifier_call = hif_recovery_notifier_cb;
511 	hif_notifier->priv_data = hif_handle;
512 	return qdf_hang_event_register_notifier(hif_notifier);
513 }
514 
515 /**
516  * hif_unregister_recovery_notifier - Un-register hif recovery notifier
517  * @hif_handle: hif handle
518  *
519  * Return: status
520  */
521 static
522 QDF_STATUS hif_unregister_recovery_notifier(struct hif_softc *hif_handle)
523 {
524 	qdf_notif_block *hif_notifier = &hif_handle->hif_recovery_notifier;
525 
526 	return qdf_hang_event_unregister_notifier(hif_notifier);
527 }
528 #else
529 static inline
530 QDF_STATUS hif_register_recovery_notifier(struct hif_softc *hif_handle)
531 {
532 	return QDF_STATUS_SUCCESS;
533 }
534 
535 static inline
536 QDF_STATUS hif_unregister_recovery_notifier(struct hif_softc *hif_handle)
537 {
538 	return QDF_STATUS_SUCCESS;
539 }
540 #endif
541 
542 #ifdef HIF_CPU_PERF_AFFINE_MASK
543 /**
544  * __hif_cpu_hotplug_notify() - CPU hotplug event handler
545  * @cpu: CPU Id of the CPU generating the event
546  * @cpu_up: true if the CPU is online
547  *
548  * Return: None
549  */
550 static void __hif_cpu_hotplug_notify(void *context,
551 				     uint32_t cpu, bool cpu_up)
552 {
553 	struct hif_softc *scn = context;
554 
555 	if (!scn)
556 		return;
557 	if (hif_is_driver_unloading(scn) || hif_is_recovery_in_progress(scn))
558 		return;
559 
560 	if (cpu_up) {
561 		hif_config_irq_set_perf_affinity_hint(GET_HIF_OPAQUE_HDL(scn));
562 		hif_debug("Setting affinity for online CPU: %d", cpu);
563 	} else {
564 		hif_debug("Skip setting affinity for offline CPU: %d", cpu);
565 	}
566 }
567 
568 /**
569  * hif_cpu_hotplug_notify - cpu core up/down notification
570  * handler
571  * @cpu: CPU generating the event
572  * @cpu_up: true if the CPU is online
573  *
574  * Return: None
575  */
576 static void hif_cpu_hotplug_notify(void *context, uint32_t cpu, bool cpu_up)
577 {
578 	struct qdf_op_sync *op_sync;
579 
580 	if (qdf_op_protect(&op_sync))
581 		return;
582 
583 	__hif_cpu_hotplug_notify(context, cpu, cpu_up);
584 
585 	qdf_op_unprotect(op_sync);
586 }
587 
588 static void hif_cpu_online_cb(void *context, uint32_t cpu)
589 {
590 	hif_cpu_hotplug_notify(context, cpu, true);
591 }
592 
593 static void hif_cpu_before_offline_cb(void *context, uint32_t cpu)
594 {
595 	hif_cpu_hotplug_notify(context, cpu, false);
596 }
597 
598 static void hif_cpuhp_register(struct hif_softc *scn)
599 {
600 	if (!scn) {
601 		hif_info_high("cannot register hotplug notifiers");
602 		return;
603 	}
604 	qdf_cpuhp_register(&scn->cpuhp_event_handle,
605 			   scn,
606 			   hif_cpu_online_cb,
607 			   hif_cpu_before_offline_cb);
608 }
609 
610 static void hif_cpuhp_unregister(struct hif_softc *scn)
611 {
612 	if (!scn) {
613 		hif_info_high("cannot unregister hotplug notifiers");
614 		return;
615 	}
616 	qdf_cpuhp_unregister(&scn->cpuhp_event_handle);
617 }
618 
619 #else
620 static void hif_cpuhp_register(struct hif_softc *scn)
621 {
622 }
623 
624 static void hif_cpuhp_unregister(struct hif_softc *scn)
625 {
626 }
627 #endif /* ifdef HIF_CPU_PERF_AFFINE_MASK */
628 
629 struct hif_opaque_softc *hif_open(qdf_device_t qdf_ctx,
630 				  uint32_t mode,
631 				  enum qdf_bus_type bus_type,
632 				  struct hif_driver_state_callbacks *cbk,
633 				  struct wlan_objmgr_psoc *psoc)
634 {
635 	struct hif_softc *scn;
636 	QDF_STATUS status = QDF_STATUS_SUCCESS;
637 	int bus_context_size = hif_bus_get_context_size(bus_type);
638 
639 	if (bus_context_size == 0) {
640 		HIF_ERROR("%s: context size 0 not allowed", __func__);
641 		return NULL;
642 	}
643 
644 	scn = (struct hif_softc *)qdf_mem_malloc(bus_context_size);
645 	if (!scn) {
646 		HIF_ERROR("%s: cannot alloc memory for HIF context of size:%d",
647 						__func__, bus_context_size);
648 		return GET_HIF_OPAQUE_HDL(scn);
649 	}
650 
651 	scn->qdf_dev = qdf_ctx;
652 	scn->hif_con_param = mode;
653 	qdf_atomic_init(&scn->active_tasklet_cnt);
654 	qdf_atomic_init(&scn->active_grp_tasklet_cnt);
655 	qdf_atomic_init(&scn->link_suspended);
656 	qdf_atomic_init(&scn->tasklet_from_intr);
657 	qdf_mem_copy(&scn->callbacks, cbk,
658 		     sizeof(struct hif_driver_state_callbacks));
659 	scn->bus_type  = bus_type;
660 
661 	hif_get_cfg_from_psoc(scn, psoc);
662 
663 	hif_set_event_hist_mask(GET_HIF_OPAQUE_HDL(scn));
664 	status = hif_bus_open(scn, bus_type);
665 	if (status != QDF_STATUS_SUCCESS) {
666 		HIF_ERROR("%s: hif_bus_open error = %d, bus_type = %d",
667 				  __func__, status, bus_type);
668 		qdf_mem_free(scn);
669 		scn = NULL;
670 	}
671 	hif_cpuhp_register(scn);
672 	return GET_HIF_OPAQUE_HDL(scn);
673 }
674 
675 #ifdef ADRASTEA_RRI_ON_DDR
676 /**
677  * hif_uninit_rri_on_ddr(): free consistent memory allocated for rri
678  * @scn: hif context
679  *
680  * Return: none
681  */
682 void hif_uninit_rri_on_ddr(struct hif_softc *scn)
683 {
684 	if (scn->vaddr_rri_on_ddr)
685 		qdf_mem_free_consistent(scn->qdf_dev, scn->qdf_dev->dev,
686 					(CE_COUNT * sizeof(uint32_t)),
687 					scn->vaddr_rri_on_ddr,
688 					scn->paddr_rri_on_ddr, 0);
689 	scn->vaddr_rri_on_ddr = NULL;
690 }
691 #endif
692 
693 /**
694  * hif_close(): hif_close
695  * @hif_ctx: hif_ctx
696  *
697  * Return: n/a
698  */
699 void hif_close(struct hif_opaque_softc *hif_ctx)
700 {
701 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
702 
703 	if (!scn) {
704 		HIF_ERROR("%s: hif_opaque_softc is NULL", __func__);
705 		return;
706 	}
707 
708 	if (scn->athdiag_procfs_inited) {
709 		athdiag_procfs_remove();
710 		scn->athdiag_procfs_inited = false;
711 	}
712 
713 	if (scn->target_info.hw_name) {
714 		char *hw_name = scn->target_info.hw_name;
715 
716 		scn->target_info.hw_name = "ErrUnloading";
717 		qdf_mem_free(hw_name);
718 	}
719 
720 	hif_uninit_rri_on_ddr(scn);
721 	hif_cpuhp_unregister(scn);
722 
723 	hif_bus_close(scn);
724 
725 	qdf_mem_free(scn);
726 }
727 
728 /**
729  * hif_get_num_active_tasklets() - get the number of active
730  *		tasklets pending to be completed.
731  * @scn: HIF context
732  *
733  * Returns: the number of tasklets which are active
734  */
735 static inline int hif_get_num_active_tasklets(struct hif_softc *scn)
736 {
737 	return qdf_atomic_read(&scn->active_tasklet_cnt);
738 }
739 
740 /**
741  * hif_get_num_active_grp_tasklets() - get the number of active
742  *		datapath group tasklets pending to be completed.
743  * @scn: HIF context
744  *
745  * Returns: the number of datapath group tasklets which are active
746  */
747 static inline int hif_get_num_active_grp_tasklets(struct hif_softc *scn)
748 {
749 	return qdf_atomic_read(&scn->active_grp_tasklet_cnt);
750 }
751 
752 #if (defined(QCA_WIFI_QCA8074) || defined(QCA_WIFI_QCA6018) || \
753 	defined(QCA_WIFI_QCA6290) || defined(QCA_WIFI_QCA6390) || \
754 	defined(QCA_WIFI_QCN9000) || defined(QCA_WIFI_QCA6490) || \
755 	defined(QCA_WIFI_QCA6750) || defined(QCA_WIFI_QCA5018))
756 /**
757  * hif_get_num_pending_work() - get the number of entries in
758  *		the workqueue pending to be completed.
759  * @scn: HIF context
760  *
761  * Returns: the number of tasklets which are active
762  */
763 static inline int hif_get_num_pending_work(struct hif_softc *scn)
764 {
765 	return hal_get_reg_write_pending_work(scn->hal_soc);
766 }
767 #else
768 
769 static inline int hif_get_num_pending_work(struct hif_softc *scn)
770 {
771 	return 0;
772 }
773 #endif
774 
775 QDF_STATUS hif_try_complete_tasks(struct hif_softc *scn)
776 {
777 	uint32_t task_drain_wait_cnt = 0;
778 	int tasklet = 0, grp_tasklet = 0, work = 0;
779 
780 	while ((tasklet = hif_get_num_active_tasklets(scn)) ||
781 	       (grp_tasklet = hif_get_num_active_grp_tasklets(scn)) ||
782 	       (work = hif_get_num_pending_work(scn))) {
783 		if (++task_drain_wait_cnt > HIF_TASK_DRAIN_WAIT_CNT) {
784 			hif_err("pending tasklets %d grp tasklets %d work %d",
785 				tasklet, grp_tasklet, work);
786 			return QDF_STATUS_E_FAULT;
787 		}
788 		hif_info("waiting for tasklets %d grp tasklets %d work %d",
789 			 tasklet, grp_tasklet, work);
790 		msleep(10);
791 	}
792 
793 	return QDF_STATUS_SUCCESS;
794 }
795 
796 #if (defined(QCA_WIFI_QCA8074) || defined(QCA_WIFI_QCA6018) || \
797 	defined(QCA_WIFI_QCA6290) || defined(QCA_WIFI_QCA6390) || \
798 	defined(QCA_WIFI_QCN9000) || defined(QCA_WIFI_QCA6490) || \
799 	defined(QCA_WIFI_QCA6750) || defined(QCA_WIFI_QCA5018))
800 static QDF_STATUS hif_hal_attach(struct hif_softc *scn)
801 {
802 	if (ce_srng_based(scn)) {
803 		scn->hal_soc = hal_attach(
804 					hif_softc_to_hif_opaque_softc(scn),
805 					scn->qdf_dev);
806 		if (!scn->hal_soc)
807 			return QDF_STATUS_E_FAILURE;
808 	}
809 
810 	return QDF_STATUS_SUCCESS;
811 }
812 
813 static QDF_STATUS hif_hal_detach(struct hif_softc *scn)
814 {
815 	if (ce_srng_based(scn)) {
816 		hal_detach(scn->hal_soc);
817 		scn->hal_soc = NULL;
818 	}
819 
820 	return QDF_STATUS_SUCCESS;
821 }
822 #else
823 static QDF_STATUS hif_hal_attach(struct hif_softc *scn)
824 {
825 	return QDF_STATUS_SUCCESS;
826 }
827 
828 static QDF_STATUS hif_hal_detach(struct hif_softc *scn)
829 {
830 	return QDF_STATUS_SUCCESS;
831 }
832 #endif
833 
834 /**
835  * hif_enable(): hif_enable
836  * @hif_ctx: hif_ctx
837  * @dev: dev
838  * @bdev: bus dev
839  * @bid: bus ID
840  * @bus_type: bus type
841  * @type: enable type
842  *
843  * Return: QDF_STATUS
844  */
845 QDF_STATUS hif_enable(struct hif_opaque_softc *hif_ctx, struct device *dev,
846 					  void *bdev,
847 					  const struct hif_bus_id *bid,
848 					  enum qdf_bus_type bus_type,
849 					  enum hif_enable_type type)
850 {
851 	QDF_STATUS status;
852 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
853 
854 	if (!scn) {
855 		HIF_ERROR("%s: hif_ctx = NULL", __func__);
856 		return QDF_STATUS_E_NULL_VALUE;
857 	}
858 
859 	status = hif_enable_bus(scn, dev, bdev, bid, type);
860 	if (status != QDF_STATUS_SUCCESS) {
861 		HIF_ERROR("%s: hif_enable_bus error = %d",
862 				  __func__, status);
863 		return status;
864 	}
865 
866 	status = hif_hal_attach(scn);
867 	if (status != QDF_STATUS_SUCCESS) {
868 		HIF_ERROR("%s: hal attach failed", __func__);
869 		goto disable_bus;
870 	}
871 
872 	if (hif_bus_configure(scn)) {
873 		HIF_ERROR("%s: Target probe failed.", __func__);
874 		status = QDF_STATUS_E_FAILURE;
875 		goto hal_detach;
876 	}
877 
878 	hif_ut_suspend_init(scn);
879 	hif_register_recovery_notifier(scn);
880 
881 	/*
882 	 * Flag to avoid potential unallocated memory access from MSI
883 	 * interrupt handler which could get scheduled as soon as MSI
884 	 * is enabled, i.e to take care of the race due to the order
885 	 * in where MSI is enabled before the memory, that will be
886 	 * in interrupt handlers, is allocated.
887 	 */
888 
889 	scn->hif_init_done = true;
890 
891 	HIF_DBG("%s: OK", __func__);
892 
893 	return QDF_STATUS_SUCCESS;
894 
895 hal_detach:
896 	hif_hal_detach(scn);
897 disable_bus:
898 	hif_disable_bus(scn);
899 	return status;
900 }
901 
902 void hif_disable(struct hif_opaque_softc *hif_ctx, enum hif_disable_type type)
903 {
904 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
905 
906 	if (!scn)
907 		return;
908 
909 	hif_unregister_recovery_notifier(scn);
910 
911 	hif_nointrs(scn);
912 	if (scn->hif_init_done == false)
913 		hif_shutdown_device(hif_ctx);
914 	else
915 		hif_stop(hif_ctx);
916 
917 	hif_hal_detach(scn);
918 
919 	hif_disable_bus(scn);
920 
921 	hif_wlan_disable(scn);
922 
923 	scn->notice_send = false;
924 
925 	HIF_DBG("%s: X", __func__);
926 }
927 
928 #ifdef CE_TASKLET_DEBUG_ENABLE
929 void hif_enable_ce_latency_stats(struct hif_opaque_softc *hif_ctx, uint8_t val)
930 {
931 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
932 
933 	if (!scn)
934 		return;
935 
936 	scn->ce_latency_stats = val;
937 }
938 #endif
939 
940 void hif_display_stats(struct hif_opaque_softc *hif_ctx)
941 {
942 	hif_display_bus_stats(hif_ctx);
943 }
944 
945 qdf_export_symbol(hif_display_stats);
946 
947 void hif_clear_stats(struct hif_opaque_softc *hif_ctx)
948 {
949 	hif_clear_bus_stats(hif_ctx);
950 }
951 
952 /**
953  * hif_crash_shutdown_dump_bus_register() - dump bus registers
954  * @hif_ctx: hif_ctx
955  *
956  * Return: n/a
957  */
958 #if defined(TARGET_RAMDUMP_AFTER_KERNEL_PANIC) && defined(WLAN_FEATURE_BMI)
959 
960 static void hif_crash_shutdown_dump_bus_register(void *hif_ctx)
961 {
962 	struct hif_opaque_softc *scn = hif_ctx;
963 
964 	if (hif_check_soc_status(scn))
965 		return;
966 
967 	if (hif_dump_registers(scn))
968 		HIF_ERROR("Failed to dump bus registers!");
969 }
970 
971 /**
972  * hif_crash_shutdown(): hif_crash_shutdown
973  *
974  * This function is called by the platform driver to dump CE registers
975  *
976  * @hif_ctx: hif_ctx
977  *
978  * Return: n/a
979  */
980 void hif_crash_shutdown(struct hif_opaque_softc *hif_ctx)
981 {
982 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
983 
984 	if (!hif_ctx)
985 		return;
986 
987 	if (scn->bus_type == QDF_BUS_TYPE_SNOC) {
988 		HIF_INFO_MED("%s: RAM dump disabled for bustype %d",
989 				__func__, scn->bus_type);
990 		return;
991 	}
992 
993 	if (TARGET_STATUS_RESET == scn->target_status) {
994 		HIF_INFO_MED("%s: Target is already asserted, ignore!",
995 			    __func__);
996 		return;
997 	}
998 
999 	if (hif_is_load_or_unload_in_progress(scn)) {
1000 		HIF_ERROR("%s: Load/unload is in progress, ignore!", __func__);
1001 		return;
1002 	}
1003 
1004 	hif_crash_shutdown_dump_bus_register(hif_ctx);
1005 	hif_set_target_status(hif_ctx, TARGET_STATUS_RESET);
1006 
1007 	if (ol_copy_ramdump(hif_ctx))
1008 		goto out;
1009 
1010 	HIF_INFO_MED("%s: RAM dump collecting completed!", __func__);
1011 
1012 out:
1013 	return;
1014 }
1015 #else
1016 void hif_crash_shutdown(struct hif_opaque_softc *hif_ctx)
1017 {
1018 	HIF_INFO_MED("%s: Collecting target RAM dump disabled",
1019 		__func__);
1020 }
1021 #endif /* TARGET_RAMDUMP_AFTER_KERNEL_PANIC */
1022 
1023 #ifdef QCA_WIFI_3_0
1024 /**
1025  * hif_check_fw_reg(): hif_check_fw_reg
1026  * @scn: scn
1027  * @state:
1028  *
1029  * Return: int
1030  */
1031 int hif_check_fw_reg(struct hif_opaque_softc *scn)
1032 {
1033 	return 0;
1034 }
1035 #endif
1036 
1037 /**
1038  * hif_read_phy_mem_base(): hif_read_phy_mem_base
1039  * @scn: scn
1040  * @phy_mem_base: physical mem base
1041  *
1042  * Return: n/a
1043  */
1044 void hif_read_phy_mem_base(struct hif_softc *scn, qdf_dma_addr_t *phy_mem_base)
1045 {
1046 	*phy_mem_base = scn->mem_pa;
1047 }
1048 qdf_export_symbol(hif_read_phy_mem_base);
1049 
1050 /**
1051  * hif_get_device_type(): hif_get_device_type
1052  * @device_id: device_id
1053  * @revision_id: revision_id
1054  * @hif_type: returned hif_type
1055  * @target_type: returned target_type
1056  *
1057  * Return: int
1058  */
1059 int hif_get_device_type(uint32_t device_id,
1060 			uint32_t revision_id,
1061 			uint32_t *hif_type, uint32_t *target_type)
1062 {
1063 	int ret = 0;
1064 
1065 	switch (device_id) {
1066 	case ADRASTEA_DEVICE_ID_P2_E12:
1067 
1068 		*hif_type = HIF_TYPE_ADRASTEA;
1069 		*target_type = TARGET_TYPE_ADRASTEA;
1070 		break;
1071 
1072 	case AR9888_DEVICE_ID:
1073 		*hif_type = HIF_TYPE_AR9888;
1074 		*target_type = TARGET_TYPE_AR9888;
1075 		break;
1076 
1077 	case AR6320_DEVICE_ID:
1078 		switch (revision_id) {
1079 		case AR6320_FW_1_1:
1080 		case AR6320_FW_1_3:
1081 			*hif_type = HIF_TYPE_AR6320;
1082 			*target_type = TARGET_TYPE_AR6320;
1083 			break;
1084 
1085 		case AR6320_FW_2_0:
1086 		case AR6320_FW_3_0:
1087 		case AR6320_FW_3_2:
1088 			*hif_type = HIF_TYPE_AR6320V2;
1089 			*target_type = TARGET_TYPE_AR6320V2;
1090 			break;
1091 
1092 		default:
1093 			HIF_ERROR("%s: error - dev_id = 0x%x, rev_id = 0x%x",
1094 				   __func__, device_id, revision_id);
1095 			ret = -ENODEV;
1096 			goto end;
1097 		}
1098 		break;
1099 
1100 	case AR9887_DEVICE_ID:
1101 		*hif_type = HIF_TYPE_AR9888;
1102 		*target_type = TARGET_TYPE_AR9888;
1103 		HIF_INFO(" *********** AR9887 **************");
1104 		break;
1105 
1106 	case QCA9984_DEVICE_ID:
1107 		*hif_type = HIF_TYPE_QCA9984;
1108 		*target_type = TARGET_TYPE_QCA9984;
1109 		HIF_INFO(" *********** QCA9984 *************");
1110 		break;
1111 
1112 	case QCA9888_DEVICE_ID:
1113 		*hif_type = HIF_TYPE_QCA9888;
1114 		*target_type = TARGET_TYPE_QCA9888;
1115 		HIF_INFO(" *********** QCA9888 *************");
1116 		break;
1117 
1118 	case AR900B_DEVICE_ID:
1119 		*hif_type = HIF_TYPE_AR900B;
1120 		*target_type = TARGET_TYPE_AR900B;
1121 		HIF_INFO(" *********** AR900B *************");
1122 		break;
1123 
1124 	case IPQ4019_DEVICE_ID:
1125 		*hif_type = HIF_TYPE_IPQ4019;
1126 		*target_type = TARGET_TYPE_IPQ4019;
1127 		HIF_INFO(" *********** IPQ4019  *************");
1128 		break;
1129 
1130 	case QCA8074_DEVICE_ID:
1131 		*hif_type = HIF_TYPE_QCA8074;
1132 		*target_type = TARGET_TYPE_QCA8074;
1133 		HIF_INFO(" *********** QCA8074  *************\n");
1134 		break;
1135 
1136 	case QCA6290_EMULATION_DEVICE_ID:
1137 	case QCA6290_DEVICE_ID:
1138 		*hif_type = HIF_TYPE_QCA6290;
1139 		*target_type = TARGET_TYPE_QCA6290;
1140 		HIF_INFO(" *********** QCA6290EMU *************\n");
1141 		break;
1142 
1143 	case QCN9000_DEVICE_ID:
1144 		*hif_type = HIF_TYPE_QCN9000;
1145 		*target_type = TARGET_TYPE_QCN9000;
1146 		HIF_INFO(" *********** QCN9000 *************\n");
1147 		break;
1148 
1149 	case QCN7605_DEVICE_ID:
1150 	case QCN7605_COMPOSITE:
1151 	case QCN7605_STANDALONE:
1152 	case QCN7605_STANDALONE_V2:
1153 	case QCN7605_COMPOSITE_V2:
1154 		*hif_type = HIF_TYPE_QCN7605;
1155 		*target_type = TARGET_TYPE_QCN7605;
1156 		HIF_INFO(" *********** QCN7605 *************\n");
1157 		break;
1158 
1159 	case QCA6390_DEVICE_ID:
1160 	case QCA6390_EMULATION_DEVICE_ID:
1161 		*hif_type = HIF_TYPE_QCA6390;
1162 		*target_type = TARGET_TYPE_QCA6390;
1163 		HIF_INFO(" *********** QCA6390 *************\n");
1164 		break;
1165 
1166 	case QCA6490_DEVICE_ID:
1167 	case QCA6490_EMULATION_DEVICE_ID:
1168 		*hif_type = HIF_TYPE_QCA6490;
1169 		*target_type = TARGET_TYPE_QCA6490;
1170 		HIF_INFO(" *********** QCA6490 *************\n");
1171 		break;
1172 
1173 	case QCA6750_DEVICE_ID:
1174 	case QCA6750_EMULATION_DEVICE_ID:
1175 		*hif_type = HIF_TYPE_QCA6750;
1176 		*target_type = TARGET_TYPE_QCA6750;
1177 		HIF_INFO(" *********** QCA6750 *************\n");
1178 		break;
1179 
1180 	case QCA8074V2_DEVICE_ID:
1181 		*hif_type = HIF_TYPE_QCA8074V2;
1182 		*target_type = TARGET_TYPE_QCA8074V2;
1183 		HIF_INFO(" *********** QCA8074V2 *************\n");
1184 		break;
1185 
1186 	case QCA6018_DEVICE_ID:
1187 	case RUMIM2M_DEVICE_ID_NODE0:
1188 	case RUMIM2M_DEVICE_ID_NODE1:
1189 	case RUMIM2M_DEVICE_ID_NODE2:
1190 	case RUMIM2M_DEVICE_ID_NODE3:
1191 	case RUMIM2M_DEVICE_ID_NODE4:
1192 	case RUMIM2M_DEVICE_ID_NODE5:
1193 		*hif_type = HIF_TYPE_QCA6018;
1194 		*target_type = TARGET_TYPE_QCA6018;
1195 		HIF_INFO(" *********** QCA6018 *************\n");
1196 		break;
1197 
1198 	case QCA5018_DEVICE_ID:
1199 		*hif_type = HIF_TYPE_QCA5018;
1200 		*target_type = TARGET_TYPE_QCA5018;
1201 		HIF_INFO(" *********** qca5018 *************\n");
1202 		break;
1203 
1204 	default:
1205 		HIF_ERROR("%s: Unsupported device ID = 0x%x!",
1206 			  __func__, device_id);
1207 		ret = -ENODEV;
1208 		break;
1209 	}
1210 
1211 	if (*target_type == TARGET_TYPE_UNKNOWN) {
1212 		HIF_ERROR("%s: Unsupported target_type!", __func__);
1213 		ret = -ENODEV;
1214 	}
1215 end:
1216 	return ret;
1217 }
1218 
1219 /**
1220  * hif_get_bus_type() - return the bus type
1221  *
1222  * Return: enum qdf_bus_type
1223  */
1224 enum qdf_bus_type hif_get_bus_type(struct hif_opaque_softc *hif_hdl)
1225 {
1226 	struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
1227 
1228 	return scn->bus_type;
1229 }
1230 
1231 /**
1232  * Target info and ini parameters are global to the driver
1233  * Hence these structures are exposed to all the modules in
1234  * the driver and they don't need to maintains multiple copies
1235  * of the same info, instead get the handle from hif and
1236  * modify them in hif
1237  */
1238 
1239 /**
1240  * hif_get_ini_handle() - API to get hif_config_param handle
1241  * @hif_ctx: HIF Context
1242  *
1243  * Return: pointer to hif_config_info
1244  */
1245 struct hif_config_info *hif_get_ini_handle(struct hif_opaque_softc *hif_ctx)
1246 {
1247 	struct hif_softc *sc = HIF_GET_SOFTC(hif_ctx);
1248 
1249 	return &sc->hif_config;
1250 }
1251 
1252 /**
1253  * hif_get_target_info_handle() - API to get hif_target_info handle
1254  * @hif_ctx: HIF context
1255  *
1256  * Return: Pointer to hif_target_info
1257  */
1258 struct hif_target_info *hif_get_target_info_handle(
1259 					struct hif_opaque_softc *hif_ctx)
1260 {
1261 	struct hif_softc *sc = HIF_GET_SOFTC(hif_ctx);
1262 
1263 	return &sc->target_info;
1264 
1265 }
1266 qdf_export_symbol(hif_get_target_info_handle);
1267 
1268 #ifdef RECEIVE_OFFLOAD
1269 void hif_offld_flush_cb_register(struct hif_opaque_softc *scn,
1270 				 void (offld_flush_handler)(void *))
1271 {
1272 	if (hif_napi_enabled(scn, -1))
1273 		hif_napi_rx_offld_flush_cb_register(scn, offld_flush_handler);
1274 	else
1275 		HIF_ERROR("NAPI not enabled\n");
1276 }
1277 qdf_export_symbol(hif_offld_flush_cb_register);
1278 
1279 void hif_offld_flush_cb_deregister(struct hif_opaque_softc *scn)
1280 {
1281 	if (hif_napi_enabled(scn, -1))
1282 		hif_napi_rx_offld_flush_cb_deregister(scn);
1283 	else
1284 		HIF_ERROR("NAPI not enabled\n");
1285 }
1286 qdf_export_symbol(hif_offld_flush_cb_deregister);
1287 
1288 int hif_get_rx_ctx_id(int ctx_id, struct hif_opaque_softc *hif_hdl)
1289 {
1290 	if (hif_napi_enabled(hif_hdl, -1))
1291 		return NAPI_PIPE2ID(ctx_id);
1292 	else
1293 		return ctx_id;
1294 }
1295 #else /* RECEIVE_OFFLOAD */
1296 int hif_get_rx_ctx_id(int ctx_id, struct hif_opaque_softc *hif_hdl)
1297 {
1298 	return 0;
1299 }
1300 qdf_export_symbol(hif_get_rx_ctx_id);
1301 #endif /* RECEIVE_OFFLOAD */
1302 
1303 #if defined(FEATURE_LRO)
1304 
1305 /**
1306  * hif_get_lro_info - Returns LRO instance for instance ID
1307  * @ctx_id: LRO instance ID
1308  * @hif_hdl: HIF Context
1309  *
1310  * Return: Pointer to LRO instance.
1311  */
1312 void *hif_get_lro_info(int ctx_id, struct hif_opaque_softc *hif_hdl)
1313 {
1314 	void *data;
1315 
1316 	if (hif_napi_enabled(hif_hdl, -1))
1317 		data = hif_napi_get_lro_info(hif_hdl, ctx_id);
1318 	else
1319 		data = hif_ce_get_lro_ctx(hif_hdl, ctx_id);
1320 
1321 	return data;
1322 }
1323 #endif
1324 
1325 /**
1326  * hif_get_target_status - API to get target status
1327  * @hif_ctx: HIF Context
1328  *
1329  * Return: enum hif_target_status
1330  */
1331 enum hif_target_status hif_get_target_status(struct hif_opaque_softc *hif_ctx)
1332 {
1333 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1334 
1335 	return scn->target_status;
1336 }
1337 qdf_export_symbol(hif_get_target_status);
1338 
1339 /**
1340  * hif_set_target_status() - API to set target status
1341  * @hif_ctx: HIF Context
1342  * @status: Target Status
1343  *
1344  * Return: void
1345  */
1346 void hif_set_target_status(struct hif_opaque_softc *hif_ctx, enum
1347 			   hif_target_status status)
1348 {
1349 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1350 
1351 	scn->target_status = status;
1352 }
1353 
1354 /**
1355  * hif_init_ini_config() - API to initialize HIF configuration parameters
1356  * @hif_ctx: HIF Context
1357  * @cfg: HIF Configuration
1358  *
1359  * Return: void
1360  */
1361 void hif_init_ini_config(struct hif_opaque_softc *hif_ctx,
1362 			 struct hif_config_info *cfg)
1363 {
1364 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1365 
1366 	qdf_mem_copy(&scn->hif_config, cfg, sizeof(struct hif_config_info));
1367 }
1368 
1369 /**
1370  * hif_get_conparam() - API to get driver mode in HIF
1371  * @scn: HIF Context
1372  *
1373  * Return: driver mode of operation
1374  */
1375 uint32_t hif_get_conparam(struct hif_softc *scn)
1376 {
1377 	if (!scn)
1378 		return 0;
1379 
1380 	return scn->hif_con_param;
1381 }
1382 
1383 /**
1384  * hif_get_callbacks_handle() - API to get callbacks Handle
1385  * @scn: HIF Context
1386  *
1387  * Return: pointer to HIF Callbacks
1388  */
1389 struct hif_driver_state_callbacks *hif_get_callbacks_handle(
1390 							struct hif_softc *scn)
1391 {
1392 	return &scn->callbacks;
1393 }
1394 
1395 /**
1396  * hif_is_driver_unloading() - API to query upper layers if driver is unloading
1397  * @scn: HIF Context
1398  *
1399  * Return: True/False
1400  */
1401 bool hif_is_driver_unloading(struct hif_softc *scn)
1402 {
1403 	struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
1404 
1405 	if (cbk && cbk->is_driver_unloading)
1406 		return cbk->is_driver_unloading(cbk->context);
1407 
1408 	return false;
1409 }
1410 
1411 /**
1412  * hif_is_load_or_unload_in_progress() - API to query upper layers if
1413  * load/unload in progress
1414  * @scn: HIF Context
1415  *
1416  * Return: True/False
1417  */
1418 bool hif_is_load_or_unload_in_progress(struct hif_softc *scn)
1419 {
1420 	struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
1421 
1422 	if (cbk && cbk->is_load_unload_in_progress)
1423 		return cbk->is_load_unload_in_progress(cbk->context);
1424 
1425 	return false;
1426 }
1427 
1428 /**
1429  * hif_is_recovery_in_progress() - API to query upper layers if recovery in
1430  * progress
1431  * @scn: HIF Context
1432  *
1433  * Return: True/False
1434  */
1435 bool hif_is_recovery_in_progress(struct hif_softc *scn)
1436 {
1437 	struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
1438 
1439 	if (cbk && cbk->is_recovery_in_progress)
1440 		return cbk->is_recovery_in_progress(cbk->context);
1441 
1442 	return false;
1443 }
1444 
1445 #if defined(HIF_PCI) || defined(HIF_SNOC) || defined(HIF_AHB) || \
1446     defined(HIF_IPCI)
1447 
1448 /**
1449  * hif_update_pipe_callback() - API to register pipe specific callbacks
1450  * @osc: Opaque softc
1451  * @pipeid: pipe id
1452  * @callbacks: callbacks to register
1453  *
1454  * Return: void
1455  */
1456 
1457 void hif_update_pipe_callback(struct hif_opaque_softc *osc,
1458 					u_int8_t pipeid,
1459 					struct hif_msg_callbacks *callbacks)
1460 {
1461 	struct hif_softc *scn = HIF_GET_SOFTC(osc);
1462 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1463 	struct HIF_CE_pipe_info *pipe_info;
1464 
1465 	QDF_BUG(pipeid < CE_COUNT_MAX);
1466 
1467 	HIF_INFO_LO("+%s pipeid %d\n", __func__, pipeid);
1468 
1469 	pipe_info = &hif_state->pipe_info[pipeid];
1470 
1471 	qdf_mem_copy(&pipe_info->pipe_callbacks,
1472 			callbacks, sizeof(pipe_info->pipe_callbacks));
1473 
1474 	HIF_INFO_LO("-%s\n", __func__);
1475 }
1476 qdf_export_symbol(hif_update_pipe_callback);
1477 
1478 /**
1479  * hif_is_target_ready() - API to query if target is in ready state
1480  * progress
1481  * @scn: HIF Context
1482  *
1483  * Return: True/False
1484  */
1485 bool hif_is_target_ready(struct hif_softc *scn)
1486 {
1487 	struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
1488 
1489 	if (cbk && cbk->is_target_ready)
1490 		return cbk->is_target_ready(cbk->context);
1491 	/*
1492 	 * if callback is not registered then there is no way to determine
1493 	 * if target is ready. In-such case return true to indicate that
1494 	 * target is ready.
1495 	 */
1496 	return true;
1497 }
1498 qdf_export_symbol(hif_is_target_ready);
1499 
1500 int hif_get_bandwidth_level(struct hif_opaque_softc *hif_handle)
1501 {
1502 	struct hif_softc *scn = HIF_GET_SOFTC(hif_handle);
1503 	struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
1504 
1505 	if (cbk && cbk->get_bandwidth_level)
1506 		return cbk->get_bandwidth_level(cbk->context);
1507 
1508 	return 0;
1509 }
1510 
1511 qdf_export_symbol(hif_get_bandwidth_level);
1512 
1513 /**
1514  * hif_batch_send() - API to access hif specific function
1515  * ce_batch_send.
1516  * @osc: HIF Context
1517  * @msdu : list of msdus to be sent
1518  * @transfer_id : transfer id
1519  * @len : donwloaded length
1520  *
1521  * Return: list of msds not sent
1522  */
1523 qdf_nbuf_t hif_batch_send(struct hif_opaque_softc *osc, qdf_nbuf_t msdu,
1524 		uint32_t transfer_id, u_int32_t len, uint32_t sendhead)
1525 {
1526 	void *ce_tx_hdl = hif_get_ce_handle(osc, CE_HTT_TX_CE);
1527 
1528 	return ce_batch_send((struct CE_handle *)ce_tx_hdl, msdu, transfer_id,
1529 			len, sendhead);
1530 }
1531 qdf_export_symbol(hif_batch_send);
1532 
1533 /**
1534  * hif_update_tx_ring() - API to access hif specific function
1535  * ce_update_tx_ring.
1536  * @osc: HIF Context
1537  * @num_htt_cmpls : number of htt compl received.
1538  *
1539  * Return: void
1540  */
1541 void hif_update_tx_ring(struct hif_opaque_softc *osc, u_int32_t num_htt_cmpls)
1542 {
1543 	void *ce_tx_hdl = hif_get_ce_handle(osc, CE_HTT_TX_CE);
1544 
1545 	ce_update_tx_ring(ce_tx_hdl, num_htt_cmpls);
1546 }
1547 qdf_export_symbol(hif_update_tx_ring);
1548 
1549 
1550 /**
1551  * hif_send_single() - API to access hif specific function
1552  * ce_send_single.
1553  * @osc: HIF Context
1554  * @msdu : msdu to be sent
1555  * @transfer_id: transfer id
1556  * @len : downloaded length
1557  *
1558  * Return: msdu sent status
1559  */
1560 QDF_STATUS hif_send_single(struct hif_opaque_softc *osc, qdf_nbuf_t msdu,
1561 			   uint32_t transfer_id, u_int32_t len)
1562 {
1563 	void *ce_tx_hdl = hif_get_ce_handle(osc, CE_HTT_TX_CE);
1564 
1565 	if (!ce_tx_hdl)
1566 		return QDF_STATUS_E_NULL_VALUE;
1567 
1568 	return ce_send_single((struct CE_handle *)ce_tx_hdl, msdu, transfer_id,
1569 			len);
1570 }
1571 qdf_export_symbol(hif_send_single);
1572 #endif
1573 
1574 /**
1575  * hif_reg_write() - API to access hif specific function
1576  * hif_write32_mb.
1577  * @hif_ctx : HIF Context
1578  * @offset : offset on which value has to be written
1579  * @value : value to be written
1580  *
1581  * Return: None
1582  */
1583 void hif_reg_write(struct hif_opaque_softc *hif_ctx, uint32_t offset,
1584 		uint32_t value)
1585 {
1586 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1587 
1588 	hif_write32_mb(scn, scn->mem + offset, value);
1589 
1590 }
1591 qdf_export_symbol(hif_reg_write);
1592 
1593 /**
1594  * hif_reg_read() - API to access hif specific function
1595  * hif_read32_mb.
1596  * @hif_ctx : HIF Context
1597  * @offset : offset from which value has to be read
1598  *
1599  * Return: Read value
1600  */
1601 uint32_t hif_reg_read(struct hif_opaque_softc *hif_ctx, uint32_t offset)
1602 {
1603 
1604 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1605 
1606 	return hif_read32_mb(scn, scn->mem + offset);
1607 }
1608 qdf_export_symbol(hif_reg_read);
1609 
1610 /**
1611  * hif_ramdump_handler(): generic ramdump handler
1612  * @scn: struct hif_opaque_softc
1613  *
1614  * Return: None
1615  */
1616 void hif_ramdump_handler(struct hif_opaque_softc *scn)
1617 {
1618 	if (hif_get_bus_type(scn) == QDF_BUS_TYPE_USB)
1619 		hif_usb_ramdump_handler(scn);
1620 }
1621 
1622 irqreturn_t hif_wake_interrupt_handler(int irq, void *context)
1623 {
1624 	struct hif_softc *scn = context;
1625 	struct hif_opaque_softc *hif_ctx = GET_HIF_OPAQUE_HDL(scn);
1626 
1627 	HIF_INFO("wake interrupt received on irq %d", irq);
1628 
1629 	if (hif_pm_runtime_get_monitor_wake_intr(hif_ctx)) {
1630 		hif_pm_runtime_set_monitor_wake_intr(hif_ctx, 0);
1631 		hif_pm_runtime_request_resume(hif_ctx);
1632 	}
1633 
1634 	if (scn->initial_wakeup_cb)
1635 		scn->initial_wakeup_cb(scn->initial_wakeup_priv);
1636 
1637 	if (hif_is_ut_suspended(scn))
1638 		hif_ut_fw_resume(scn);
1639 
1640 	qdf_pm_system_wakeup();
1641 
1642 	return IRQ_HANDLED;
1643 }
1644 
1645 void hif_set_initial_wakeup_cb(struct hif_opaque_softc *hif_ctx,
1646 			       void (*callback)(void *),
1647 			       void *priv)
1648 {
1649 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1650 
1651 	scn->initial_wakeup_cb = callback;
1652 	scn->initial_wakeup_priv = priv;
1653 }
1654 
1655 void hif_set_ce_service_max_yield_time(struct hif_opaque_softc *hif,
1656 				       uint32_t ce_service_max_yield_time)
1657 {
1658 	struct hif_softc *hif_ctx = HIF_GET_SOFTC(hif);
1659 
1660 	hif_ctx->ce_service_max_yield_time =
1661 		ce_service_max_yield_time * 1000;
1662 }
1663 
1664 unsigned long long
1665 hif_get_ce_service_max_yield_time(struct hif_opaque_softc *hif)
1666 {
1667 	struct hif_softc *hif_ctx = HIF_GET_SOFTC(hif);
1668 
1669 	return hif_ctx->ce_service_max_yield_time;
1670 }
1671 
1672 void hif_set_ce_service_max_rx_ind_flush(struct hif_opaque_softc *hif,
1673 				       uint8_t ce_service_max_rx_ind_flush)
1674 {
1675 	struct hif_softc *hif_ctx = HIF_GET_SOFTC(hif);
1676 
1677 	if (ce_service_max_rx_ind_flush == 0 ||
1678 	    ce_service_max_rx_ind_flush > MSG_FLUSH_NUM)
1679 		hif_ctx->ce_service_max_rx_ind_flush = MSG_FLUSH_NUM;
1680 	else
1681 		hif_ctx->ce_service_max_rx_ind_flush =
1682 						ce_service_max_rx_ind_flush;
1683 }
1684