xref: /wlan-dirver/qca-wifi-host-cmn/hif/src/hif_main.c (revision 97f44cd39e4ff816eaa1710279d28cf6b9e65ad9)
1 /*
2  * Copyright (c) 2015-2020 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include "targcfg.h"
20 #include "qdf_lock.h"
21 #include "qdf_status.h"
22 #include "qdf_status.h"
23 #include <qdf_atomic.h>         /* qdf_atomic_read */
24 #include <targaddrs.h>
25 #include "hif_io32.h"
26 #include <hif.h>
27 #include <target_type.h>
28 #include "regtable.h"
29 #define ATH_MODULE_NAME hif
30 #include <a_debug.h>
31 #include "hif_main.h"
32 #include "hif_hw_version.h"
33 #if (defined(HIF_PCI) || defined(HIF_SNOC) || defined(HIF_AHB) || \
34      defined(HIF_IPCI))
35 #include "ce_tasklet.h"
36 #include "ce_api.h"
37 #endif
38 #include "qdf_trace.h"
39 #include "qdf_status.h"
40 #include "hif_debug.h"
41 #include "mp_dev.h"
42 #if defined(QCA_WIFI_QCA8074) || defined(QCA_WIFI_QCA6018) || \
43 	defined(QCA_WIFI_QCA5018)
44 #include "hal_api.h"
45 #endif
46 #include "hif_napi.h"
47 #include "hif_unit_test_suspend_i.h"
48 #include "qdf_module.h"
49 #ifdef HIF_CE_LOG_INFO
50 #include <qdf_notifier.h>
51 #include <qdf_hang_event_notifier.h>
52 #endif
53 
54 void hif_dump(struct hif_opaque_softc *hif_ctx, uint8_t cmd_id, bool start)
55 {
56 	hif_trigger_dump(hif_ctx, cmd_id, start);
57 }
58 
59 /**
60  * hif_get_target_id(): hif_get_target_id
61  *
62  * Return the virtual memory base address to the caller
63  *
64  * @scn: hif_softc
65  *
66  * Return: A_target_id_t
67  */
68 A_target_id_t hif_get_target_id(struct hif_softc *scn)
69 {
70 	return scn->mem;
71 }
72 
73 /**
74  * hif_get_targetdef(): hif_get_targetdef
75  * @scn: scn
76  *
77  * Return: void *
78  */
79 void *hif_get_targetdef(struct hif_opaque_softc *hif_ctx)
80 {
81 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
82 
83 	return scn->targetdef;
84 }
85 
86 #ifdef FORCE_WAKE
87 void hif_srng_init_phase(struct hif_opaque_softc *hif_ctx,
88 			 bool init_phase)
89 {
90 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
91 
92 	if (ce_srng_based(scn))
93 		hal_set_init_phase(scn->hal_soc, init_phase);
94 }
95 #endif /* FORCE_WAKE */
96 
97 #ifdef HIF_IPCI
98 void hif_shutdown_notifier_cb(void *hif_ctx)
99 {
100 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
101 
102 	scn->recovery = true;
103 }
104 #endif
105 
106 /**
107  * hif_vote_link_down(): unvote for link up
108  *
109  * Call hif_vote_link_down to release a previous request made using
110  * hif_vote_link_up. A hif_vote_link_down call should only be made
111  * after a corresponding hif_vote_link_up, otherwise you could be
112  * negating a vote from another source. When no votes are present
113  * hif will not guarantee the linkstate after hif_bus_suspend.
114  *
115  * SYNCHRONIZE WITH hif_vote_link_up by only calling in MC thread
116  * and initialization deinitialization sequencences.
117  *
118  * Return: n/a
119  */
120 void hif_vote_link_down(struct hif_opaque_softc *hif_ctx)
121 {
122 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
123 
124 	QDF_BUG(scn);
125 	scn->linkstate_vote--;
126 	hif_info("Down_linkstate_vote %d", scn->linkstate_vote);
127 	if (scn->linkstate_vote == 0)
128 		hif_bus_prevent_linkdown(scn, false);
129 }
130 
131 /**
132  * hif_vote_link_up(): vote to prevent bus from suspending
133  *
134  * Makes hif guarantee that fw can message the host normally
135  * durring suspend.
136  *
137  * SYNCHRONIZE WITH hif_vote_link_up by only calling in MC thread
138  * and initialization deinitialization sequencences.
139  *
140  * Return: n/a
141  */
142 void hif_vote_link_up(struct hif_opaque_softc *hif_ctx)
143 {
144 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
145 
146 	QDF_BUG(scn);
147 	scn->linkstate_vote++;
148 	hif_info("Up_linkstate_vote %d", scn->linkstate_vote);
149 	if (scn->linkstate_vote == 1)
150 		hif_bus_prevent_linkdown(scn, true);
151 }
152 
153 /**
154  * hif_can_suspend_link(): query if hif is permitted to suspend the link
155  *
156  * Hif will ensure that the link won't be suspended if the upperlayers
157  * don't want it to.
158  *
159  * SYNCHRONIZATION: MC thread is stopped before bus suspend thus
160  * we don't need extra locking to ensure votes dont change while
161  * we are in the process of suspending or resuming.
162  *
163  * Return: false if hif will guarantee link up durring suspend.
164  */
165 bool hif_can_suspend_link(struct hif_opaque_softc *hif_ctx)
166 {
167 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
168 
169 	QDF_BUG(scn);
170 	return scn->linkstate_vote == 0;
171 }
172 
173 /**
174  * hif_hia_item_address(): hif_hia_item_address
175  * @target_type: target_type
176  * @item_offset: item_offset
177  *
178  * Return: n/a
179  */
180 uint32_t hif_hia_item_address(uint32_t target_type, uint32_t item_offset)
181 {
182 	switch (target_type) {
183 	case TARGET_TYPE_AR6002:
184 		return AR6002_HOST_INTEREST_ADDRESS + item_offset;
185 	case TARGET_TYPE_AR6003:
186 		return AR6003_HOST_INTEREST_ADDRESS + item_offset;
187 	case TARGET_TYPE_AR6004:
188 		return AR6004_HOST_INTEREST_ADDRESS + item_offset;
189 	case TARGET_TYPE_AR6006:
190 		return AR6006_HOST_INTEREST_ADDRESS + item_offset;
191 	case TARGET_TYPE_AR9888:
192 		return AR9888_HOST_INTEREST_ADDRESS + item_offset;
193 	case TARGET_TYPE_AR6320:
194 	case TARGET_TYPE_AR6320V2:
195 		return AR6320_HOST_INTEREST_ADDRESS + item_offset;
196 	case TARGET_TYPE_ADRASTEA:
197 		/* ADRASTEA doesn't have a host interest address */
198 		ASSERT(0);
199 		return 0;
200 	case TARGET_TYPE_AR900B:
201 		return AR900B_HOST_INTEREST_ADDRESS + item_offset;
202 	case TARGET_TYPE_QCA9984:
203 		return QCA9984_HOST_INTEREST_ADDRESS + item_offset;
204 	case TARGET_TYPE_QCA9888:
205 		return QCA9888_HOST_INTEREST_ADDRESS + item_offset;
206 	case TARGET_TYPE_IPQ4019:
207 		return IPQ4019_HOST_INTEREST_ADDRESS + item_offset;
208 
209 	default:
210 		ASSERT(0);
211 		return 0;
212 	}
213 }
214 
215 /**
216  * hif_max_num_receives_reached() - check max receive is reached
217  * @scn: HIF Context
218  * @count: unsigned int.
219  *
220  * Output check status as bool
221  *
222  * Return: bool
223  */
224 bool hif_max_num_receives_reached(struct hif_softc *scn, unsigned int count)
225 {
226 	if (QDF_IS_EPPING_ENABLED(hif_get_conparam(scn)))
227 		return count > 120;
228 	else
229 		return count > MAX_NUM_OF_RECEIVES;
230 }
231 
232 /**
233  * init_buffer_count() - initial buffer count
234  * @maxSize: qdf_size_t
235  *
236  * routine to modify the initial buffer count to be allocated on an os
237  * platform basis. Platform owner will need to modify this as needed
238  *
239  * Return: qdf_size_t
240  */
241 qdf_size_t init_buffer_count(qdf_size_t maxSize)
242 {
243 	return maxSize;
244 }
245 
246 /**
247  * hif_save_htc_htt_config_endpoint() - save htt_tx_endpoint
248  * @hif_ctx: hif context
249  * @htc_htt_tx_endpoint: htt_tx_endpoint
250  *
251  * Return: void
252  */
253 void hif_save_htc_htt_config_endpoint(struct hif_opaque_softc *hif_ctx,
254 							int htc_htt_tx_endpoint)
255 {
256 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
257 
258 	if (!scn) {
259 		hif_err("scn or scn->hif_sc is NULL!");
260 		return;
261 	}
262 
263 	scn->htc_htt_tx_endpoint = htc_htt_tx_endpoint;
264 }
265 qdf_export_symbol(hif_save_htc_htt_config_endpoint);
266 
267 static const struct qwlan_hw qwlan_hw_list[] = {
268 	{
269 		.id = AR6320_REV1_VERSION,
270 		.subid = 0,
271 		.name = "QCA6174_REV1",
272 	},
273 	{
274 		.id = AR6320_REV1_1_VERSION,
275 		.subid = 0x1,
276 		.name = "QCA6174_REV1_1",
277 	},
278 	{
279 		.id = AR6320_REV1_3_VERSION,
280 		.subid = 0x2,
281 		.name = "QCA6174_REV1_3",
282 	},
283 	{
284 		.id = AR6320_REV2_1_VERSION,
285 		.subid = 0x4,
286 		.name = "QCA6174_REV2_1",
287 	},
288 	{
289 		.id = AR6320_REV2_1_VERSION,
290 		.subid = 0x5,
291 		.name = "QCA6174_REV2_2",
292 	},
293 	{
294 		.id = AR6320_REV3_VERSION,
295 		.subid = 0x6,
296 		.name = "QCA6174_REV2.3",
297 	},
298 	{
299 		.id = AR6320_REV3_VERSION,
300 		.subid = 0x8,
301 		.name = "QCA6174_REV3",
302 	},
303 	{
304 		.id = AR6320_REV3_VERSION,
305 		.subid = 0x9,
306 		.name = "QCA6174_REV3_1",
307 	},
308 	{
309 		.id = AR6320_REV3_2_VERSION,
310 		.subid = 0xA,
311 		.name = "AR6320_REV3_2_VERSION",
312 	},
313 	{
314 		.id = QCA6390_V1,
315 		.subid = 0x0,
316 		.name = "QCA6390_V1",
317 	},
318 	{
319 		.id = QCA6490_V1,
320 		.subid = 0x0,
321 		.name = "QCA6490_V1",
322 	},
323 	{
324 		.id = WCN3990_v1,
325 		.subid = 0x0,
326 		.name = "WCN3990_V1",
327 	},
328 	{
329 		.id = WCN3990_v2,
330 		.subid = 0x0,
331 		.name = "WCN3990_V2",
332 	},
333 	{
334 		.id = WCN3990_v2_1,
335 		.subid = 0x0,
336 		.name = "WCN3990_V2.1",
337 	},
338 	{
339 		.id = WCN3998,
340 		.subid = 0x0,
341 		.name = "WCN3998",
342 	},
343 	{
344 		.id = QCA9379_REV1_VERSION,
345 		.subid = 0xC,
346 		.name = "QCA9379_REV1",
347 	},
348 	{
349 		.id = QCA9379_REV1_VERSION,
350 		.subid = 0xD,
351 		.name = "QCA9379_REV1_1",
352 	}
353 };
354 
355 /**
356  * hif_get_hw_name(): get a human readable name for the hardware
357  * @info: Target Info
358  *
359  * Return: human readable name for the underlying wifi hardware.
360  */
361 static const char *hif_get_hw_name(struct hif_target_info *info)
362 {
363 	int i;
364 
365 	if (info->hw_name)
366 		return info->hw_name;
367 
368 	for (i = 0; i < ARRAY_SIZE(qwlan_hw_list); i++) {
369 		if (info->target_version == qwlan_hw_list[i].id &&
370 		    info->target_revision == qwlan_hw_list[i].subid) {
371 			return qwlan_hw_list[i].name;
372 		}
373 	}
374 
375 	info->hw_name = qdf_mem_malloc(64);
376 	if (!info->hw_name)
377 		return "Unknown Device (nomem)";
378 
379 	i = qdf_snprint(info->hw_name, 64, "HW_VERSION=%x.",
380 			info->target_version);
381 	if (i < 0)
382 		return "Unknown Device (snprintf failure)";
383 	else
384 		return info->hw_name;
385 }
386 
387 /**
388  * hif_get_hw_info(): hif_get_hw_info
389  * @scn: scn
390  * @version: version
391  * @revision: revision
392  *
393  * Return: n/a
394  */
395 void hif_get_hw_info(struct hif_opaque_softc *scn, u32 *version, u32 *revision,
396 			const char **target_name)
397 {
398 	struct hif_target_info *info = hif_get_target_info_handle(scn);
399 	struct hif_softc *sc = HIF_GET_SOFTC(scn);
400 
401 	if (sc->bus_type == QDF_BUS_TYPE_USB)
402 		hif_usb_get_hw_info(sc);
403 
404 	*version = info->target_version;
405 	*revision = info->target_revision;
406 	*target_name = hif_get_hw_name(info);
407 }
408 
409 /**
410  * hif_get_dev_ba(): API to get device base address.
411  * @scn: scn
412  * @version: version
413  * @revision: revision
414  *
415  * Return: n/a
416  */
417 void *hif_get_dev_ba(struct hif_opaque_softc *hif_handle)
418 {
419 	struct hif_softc *scn = (struct hif_softc *)hif_handle;
420 
421 	return scn->mem;
422 }
423 qdf_export_symbol(hif_get_dev_ba);
424 
425 /**
426  * hif_get_dev_ba_ce(): API to get device ce base address.
427  * @scn: scn
428  *
429  * Return: dev mem base address for CE
430  */
431 void *hif_get_dev_ba_ce(struct hif_opaque_softc *hif_handle)
432 {
433 	struct hif_softc *scn = (struct hif_softc *)hif_handle;
434 
435 	return scn->mem_ce;
436 }
437 
438 qdf_export_symbol(hif_get_dev_ba_ce);
439 
440 #ifdef WLAN_CE_INTERRUPT_THRESHOLD_CONFIG
441 /**
442  * hif_get_cfg_from_psoc() - Retrieve ini cfg from psoc
443  * @scn: hif context
444  * @psoc: psoc objmgr handle
445  *
446  * Return: None
447  */
448 static inline
449 void hif_get_cfg_from_psoc(struct hif_softc *scn,
450 			   struct wlan_objmgr_psoc *psoc)
451 {
452 	if (psoc) {
453 		scn->ini_cfg.ce_status_ring_timer_threshold =
454 			cfg_get(psoc,
455 				CFG_CE_STATUS_RING_TIMER_THRESHOLD);
456 		scn->ini_cfg.ce_status_ring_batch_count_threshold =
457 			cfg_get(psoc,
458 				CFG_CE_STATUS_RING_BATCH_COUNT_THRESHOLD);
459 	}
460 }
461 #else
462 static inline
463 void hif_get_cfg_from_psoc(struct hif_softc *scn,
464 			   struct wlan_objmgr_psoc *psoc)
465 {
466 }
467 #endif /* WLAN_CE_INTERRUPT_THRESHOLD_CONFIG */
468 
469 #if defined(HIF_CE_LOG_INFO) || defined(HIF_BUS_LOG_INFO)
470 /**
471  * hif_recovery_notifier_cb - Recovery notifier callback to log
472  *  hang event data
473  * @block: notifier block
474  * @state: state
475  * @data: notifier data
476  *
477  * Return: status
478  */
479 static
480 int hif_recovery_notifier_cb(struct notifier_block *block, unsigned long state,
481 			     void *data)
482 {
483 	struct qdf_notifer_data *notif_data = data;
484 	qdf_notif_block *notif_block;
485 	struct hif_softc *hif_handle;
486 	bool bus_id_invalid;
487 
488 	if (!data || !block)
489 		return -EINVAL;
490 
491 	notif_block = qdf_container_of(block, qdf_notif_block, notif_block);
492 
493 	hif_handle = notif_block->priv_data;
494 	if (!hif_handle)
495 		return -EINVAL;
496 
497 	bus_id_invalid = hif_log_bus_info(hif_handle, notif_data->hang_data,
498 					  &notif_data->offset);
499 	if (bus_id_invalid)
500 		return NOTIFY_STOP_MASK;
501 
502 	hif_log_ce_info(hif_handle, notif_data->hang_data,
503 			&notif_data->offset);
504 
505 	return 0;
506 }
507 
508 /**
509  * hif_register_recovery_notifier - Register hif recovery notifier
510  * @hif_handle: hif handle
511  *
512  * Return: status
513  */
514 static
515 QDF_STATUS hif_register_recovery_notifier(struct hif_softc *hif_handle)
516 {
517 	qdf_notif_block *hif_notifier;
518 
519 	if (!hif_handle)
520 		return QDF_STATUS_E_FAILURE;
521 
522 	hif_notifier = &hif_handle->hif_recovery_notifier;
523 
524 	hif_notifier->notif_block.notifier_call = hif_recovery_notifier_cb;
525 	hif_notifier->priv_data = hif_handle;
526 	return qdf_hang_event_register_notifier(hif_notifier);
527 }
528 
529 /**
530  * hif_unregister_recovery_notifier - Un-register hif recovery notifier
531  * @hif_handle: hif handle
532  *
533  * Return: status
534  */
535 static
536 QDF_STATUS hif_unregister_recovery_notifier(struct hif_softc *hif_handle)
537 {
538 	qdf_notif_block *hif_notifier = &hif_handle->hif_recovery_notifier;
539 
540 	return qdf_hang_event_unregister_notifier(hif_notifier);
541 }
542 #else
543 static inline
544 QDF_STATUS hif_register_recovery_notifier(struct hif_softc *hif_handle)
545 {
546 	return QDF_STATUS_SUCCESS;
547 }
548 
549 static inline
550 QDF_STATUS hif_unregister_recovery_notifier(struct hif_softc *hif_handle)
551 {
552 	return QDF_STATUS_SUCCESS;
553 }
554 #endif
555 
556 #ifdef HIF_CPU_PERF_AFFINE_MASK
557 /**
558  * __hif_cpu_hotplug_notify() - CPU hotplug event handler
559  * @cpu: CPU Id of the CPU generating the event
560  * @cpu_up: true if the CPU is online
561  *
562  * Return: None
563  */
564 static void __hif_cpu_hotplug_notify(void *context,
565 				     uint32_t cpu, bool cpu_up)
566 {
567 	struct hif_softc *scn = context;
568 
569 	if (!scn)
570 		return;
571 	if (hif_is_driver_unloading(scn) || hif_is_recovery_in_progress(scn))
572 		return;
573 
574 	if (cpu_up) {
575 		hif_config_irq_set_perf_affinity_hint(GET_HIF_OPAQUE_HDL(scn));
576 		hif_debug("Setting affinity for online CPU: %d", cpu);
577 	} else {
578 		hif_debug("Skip setting affinity for offline CPU: %d", cpu);
579 	}
580 }
581 
582 /**
583  * hif_cpu_hotplug_notify - cpu core up/down notification
584  * handler
585  * @cpu: CPU generating the event
586  * @cpu_up: true if the CPU is online
587  *
588  * Return: None
589  */
590 static void hif_cpu_hotplug_notify(void *context, uint32_t cpu, bool cpu_up)
591 {
592 	struct qdf_op_sync *op_sync;
593 
594 	if (qdf_op_protect(&op_sync))
595 		return;
596 
597 	__hif_cpu_hotplug_notify(context, cpu, cpu_up);
598 
599 	qdf_op_unprotect(op_sync);
600 }
601 
602 static void hif_cpu_online_cb(void *context, uint32_t cpu)
603 {
604 	hif_cpu_hotplug_notify(context, cpu, true);
605 }
606 
607 static void hif_cpu_before_offline_cb(void *context, uint32_t cpu)
608 {
609 	hif_cpu_hotplug_notify(context, cpu, false);
610 }
611 
612 static void hif_cpuhp_register(struct hif_softc *scn)
613 {
614 	if (!scn) {
615 		hif_info_high("cannot register hotplug notifiers");
616 		return;
617 	}
618 	qdf_cpuhp_register(&scn->cpuhp_event_handle,
619 			   scn,
620 			   hif_cpu_online_cb,
621 			   hif_cpu_before_offline_cb);
622 }
623 
624 static void hif_cpuhp_unregister(struct hif_softc *scn)
625 {
626 	if (!scn) {
627 		hif_info_high("cannot unregister hotplug notifiers");
628 		return;
629 	}
630 	qdf_cpuhp_unregister(&scn->cpuhp_event_handle);
631 }
632 
633 #else
634 static void hif_cpuhp_register(struct hif_softc *scn)
635 {
636 }
637 
638 static void hif_cpuhp_unregister(struct hif_softc *scn)
639 {
640 }
641 #endif /* ifdef HIF_CPU_PERF_AFFINE_MASK */
642 
643 struct hif_opaque_softc *hif_open(qdf_device_t qdf_ctx,
644 				  uint32_t mode,
645 				  enum qdf_bus_type bus_type,
646 				  struct hif_driver_state_callbacks *cbk,
647 				  struct wlan_objmgr_psoc *psoc)
648 {
649 	struct hif_softc *scn;
650 	QDF_STATUS status = QDF_STATUS_SUCCESS;
651 	int bus_context_size = hif_bus_get_context_size(bus_type);
652 
653 	if (bus_context_size == 0) {
654 		hif_err("context size 0 not allowed");
655 		return NULL;
656 	}
657 
658 	scn = (struct hif_softc *)qdf_mem_malloc(bus_context_size);
659 	if (!scn)
660 		return GET_HIF_OPAQUE_HDL(scn);
661 
662 	scn->qdf_dev = qdf_ctx;
663 	scn->hif_con_param = mode;
664 	qdf_atomic_init(&scn->active_tasklet_cnt);
665 	qdf_atomic_init(&scn->active_grp_tasklet_cnt);
666 	qdf_atomic_init(&scn->link_suspended);
667 	qdf_atomic_init(&scn->tasklet_from_intr);
668 	qdf_mem_copy(&scn->callbacks, cbk,
669 		     sizeof(struct hif_driver_state_callbacks));
670 	scn->bus_type  = bus_type;
671 
672 	hif_get_cfg_from_psoc(scn, psoc);
673 
674 	hif_set_event_hist_mask(GET_HIF_OPAQUE_HDL(scn));
675 	status = hif_bus_open(scn, bus_type);
676 	if (status != QDF_STATUS_SUCCESS) {
677 		hif_err("hif_bus_open error = %d, bus_type = %d",
678 			status, bus_type);
679 		qdf_mem_free(scn);
680 		scn = NULL;
681 	}
682 	hif_cpuhp_register(scn);
683 	return GET_HIF_OPAQUE_HDL(scn);
684 }
685 
686 #ifdef ADRASTEA_RRI_ON_DDR
687 /**
688  * hif_uninit_rri_on_ddr(): free consistent memory allocated for rri
689  * @scn: hif context
690  *
691  * Return: none
692  */
693 void hif_uninit_rri_on_ddr(struct hif_softc *scn)
694 {
695 	if (scn->vaddr_rri_on_ddr)
696 		qdf_mem_free_consistent(scn->qdf_dev, scn->qdf_dev->dev,
697 					(CE_COUNT * sizeof(uint32_t)),
698 					scn->vaddr_rri_on_ddr,
699 					scn->paddr_rri_on_ddr, 0);
700 	scn->vaddr_rri_on_ddr = NULL;
701 }
702 #endif
703 
704 /**
705  * hif_close(): hif_close
706  * @hif_ctx: hif_ctx
707  *
708  * Return: n/a
709  */
710 void hif_close(struct hif_opaque_softc *hif_ctx)
711 {
712 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
713 
714 	if (!scn) {
715 		hif_err("hif_opaque_softc is NULL");
716 		return;
717 	}
718 
719 	if (scn->athdiag_procfs_inited) {
720 		athdiag_procfs_remove();
721 		scn->athdiag_procfs_inited = false;
722 	}
723 
724 	if (scn->target_info.hw_name) {
725 		char *hw_name = scn->target_info.hw_name;
726 
727 		scn->target_info.hw_name = "ErrUnloading";
728 		qdf_mem_free(hw_name);
729 	}
730 
731 	hif_uninit_rri_on_ddr(scn);
732 	hif_cleanup_static_buf_to_target(scn);
733 	hif_cpuhp_unregister(scn);
734 
735 	hif_bus_close(scn);
736 
737 	qdf_mem_free(scn);
738 }
739 
740 /**
741  * hif_get_num_active_tasklets() - get the number of active
742  *		tasklets pending to be completed.
743  * @scn: HIF context
744  *
745  * Returns: the number of tasklets which are active
746  */
747 static inline int hif_get_num_active_tasklets(struct hif_softc *scn)
748 {
749 	return qdf_atomic_read(&scn->active_tasklet_cnt);
750 }
751 
752 /**
753  * hif_get_num_active_grp_tasklets() - get the number of active
754  *		datapath group tasklets pending to be completed.
755  * @scn: HIF context
756  *
757  * Returns: the number of datapath group tasklets which are active
758  */
759 static inline int hif_get_num_active_grp_tasklets(struct hif_softc *scn)
760 {
761 	return qdf_atomic_read(&scn->active_grp_tasklet_cnt);
762 }
763 
764 #if (defined(QCA_WIFI_QCA8074) || defined(QCA_WIFI_QCA6018) || \
765 	defined(QCA_WIFI_QCA6290) || defined(QCA_WIFI_QCA6390) || \
766 	defined(QCA_WIFI_QCN9000) || defined(QCA_WIFI_QCA6490) || \
767 	defined(QCA_WIFI_QCA6750) || defined(QCA_WIFI_QCA5018))
768 /**
769  * hif_get_num_pending_work() - get the number of entries in
770  *		the workqueue pending to be completed.
771  * @scn: HIF context
772  *
773  * Returns: the number of tasklets which are active
774  */
775 static inline int hif_get_num_pending_work(struct hif_softc *scn)
776 {
777 	return hal_get_reg_write_pending_work(scn->hal_soc);
778 }
779 #else
780 
781 static inline int hif_get_num_pending_work(struct hif_softc *scn)
782 {
783 	return 0;
784 }
785 #endif
786 
787 QDF_STATUS hif_try_complete_tasks(struct hif_softc *scn)
788 {
789 	uint32_t task_drain_wait_cnt = 0;
790 	int tasklet = 0, grp_tasklet = 0, work = 0;
791 
792 	while ((tasklet = hif_get_num_active_tasklets(scn)) ||
793 	       (grp_tasklet = hif_get_num_active_grp_tasklets(scn)) ||
794 	       (work = hif_get_num_pending_work(scn))) {
795 		if (++task_drain_wait_cnt > HIF_TASK_DRAIN_WAIT_CNT) {
796 			hif_err("pending tasklets %d grp tasklets %d work %d",
797 				tasklet, grp_tasklet, work);
798 			return QDF_STATUS_E_FAULT;
799 		}
800 		hif_info("waiting for tasklets %d grp tasklets %d work %d",
801 			 tasklet, grp_tasklet, work);
802 		msleep(10);
803 	}
804 
805 	return QDF_STATUS_SUCCESS;
806 }
807 
808 #if (defined(QCA_WIFI_QCA8074) || defined(QCA_WIFI_QCA6018) || \
809 	defined(QCA_WIFI_QCA6290) || defined(QCA_WIFI_QCA6390) || \
810 	defined(QCA_WIFI_QCN9000) || defined(QCA_WIFI_QCA6490) || \
811 	defined(QCA_WIFI_QCA6750) || defined(QCA_WIFI_QCA5018))
812 static QDF_STATUS hif_hal_attach(struct hif_softc *scn)
813 {
814 	if (ce_srng_based(scn)) {
815 		scn->hal_soc = hal_attach(
816 					hif_softc_to_hif_opaque_softc(scn),
817 					scn->qdf_dev);
818 		if (!scn->hal_soc)
819 			return QDF_STATUS_E_FAILURE;
820 	}
821 
822 	return QDF_STATUS_SUCCESS;
823 }
824 
825 static QDF_STATUS hif_hal_detach(struct hif_softc *scn)
826 {
827 	if (ce_srng_based(scn)) {
828 		hal_detach(scn->hal_soc);
829 		scn->hal_soc = NULL;
830 	}
831 
832 	return QDF_STATUS_SUCCESS;
833 }
834 #else
835 static QDF_STATUS hif_hal_attach(struct hif_softc *scn)
836 {
837 	return QDF_STATUS_SUCCESS;
838 }
839 
840 static QDF_STATUS hif_hal_detach(struct hif_softc *scn)
841 {
842 	return QDF_STATUS_SUCCESS;
843 }
844 #endif
845 
846 /**
847  * hif_enable(): hif_enable
848  * @hif_ctx: hif_ctx
849  * @dev: dev
850  * @bdev: bus dev
851  * @bid: bus ID
852  * @bus_type: bus type
853  * @type: enable type
854  *
855  * Return: QDF_STATUS
856  */
857 QDF_STATUS hif_enable(struct hif_opaque_softc *hif_ctx, struct device *dev,
858 					  void *bdev,
859 					  const struct hif_bus_id *bid,
860 					  enum qdf_bus_type bus_type,
861 					  enum hif_enable_type type)
862 {
863 	QDF_STATUS status;
864 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
865 
866 	if (!scn) {
867 		hif_err("hif_ctx = NULL");
868 		return QDF_STATUS_E_NULL_VALUE;
869 	}
870 
871 	status = hif_enable_bus(scn, dev, bdev, bid, type);
872 	if (status != QDF_STATUS_SUCCESS) {
873 		hif_err("hif_enable_bus error = %d", status);
874 		return status;
875 	}
876 
877 	status = hif_hal_attach(scn);
878 	if (status != QDF_STATUS_SUCCESS) {
879 		hif_err("hal attach failed");
880 		goto disable_bus;
881 	}
882 
883 	if (hif_bus_configure(scn)) {
884 		hif_err("Target probe failed");
885 		status = QDF_STATUS_E_FAILURE;
886 		goto hal_detach;
887 	}
888 
889 	hif_ut_suspend_init(scn);
890 	hif_register_recovery_notifier(scn);
891 
892 	/*
893 	 * Flag to avoid potential unallocated memory access from MSI
894 	 * interrupt handler which could get scheduled as soon as MSI
895 	 * is enabled, i.e to take care of the race due to the order
896 	 * in where MSI is enabled before the memory, that will be
897 	 * in interrupt handlers, is allocated.
898 	 */
899 
900 	scn->hif_init_done = true;
901 
902 	hif_debug("OK");
903 
904 	return QDF_STATUS_SUCCESS;
905 
906 hal_detach:
907 	hif_hal_detach(scn);
908 disable_bus:
909 	hif_disable_bus(scn);
910 	return status;
911 }
912 
913 void hif_disable(struct hif_opaque_softc *hif_ctx, enum hif_disable_type type)
914 {
915 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
916 
917 	if (!scn)
918 		return;
919 
920 	hif_unregister_recovery_notifier(scn);
921 
922 	hif_nointrs(scn);
923 	if (scn->hif_init_done == false)
924 		hif_shutdown_device(hif_ctx);
925 	else
926 		hif_stop(hif_ctx);
927 
928 	hif_hal_detach(scn);
929 
930 	hif_disable_bus(scn);
931 
932 	hif_wlan_disable(scn);
933 
934 	scn->notice_send = false;
935 
936 	hif_debug("X");
937 }
938 
939 #ifdef CE_TASKLET_DEBUG_ENABLE
940 void hif_enable_ce_latency_stats(struct hif_opaque_softc *hif_ctx, uint8_t val)
941 {
942 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
943 
944 	if (!scn)
945 		return;
946 
947 	scn->ce_latency_stats = val;
948 }
949 #endif
950 
951 void hif_display_stats(struct hif_opaque_softc *hif_ctx)
952 {
953 	hif_display_bus_stats(hif_ctx);
954 }
955 
956 qdf_export_symbol(hif_display_stats);
957 
958 void hif_clear_stats(struct hif_opaque_softc *hif_ctx)
959 {
960 	hif_clear_bus_stats(hif_ctx);
961 }
962 
963 /**
964  * hif_crash_shutdown_dump_bus_register() - dump bus registers
965  * @hif_ctx: hif_ctx
966  *
967  * Return: n/a
968  */
969 #if defined(TARGET_RAMDUMP_AFTER_KERNEL_PANIC) && defined(WLAN_FEATURE_BMI)
970 
971 static void hif_crash_shutdown_dump_bus_register(void *hif_ctx)
972 {
973 	struct hif_opaque_softc *scn = hif_ctx;
974 
975 	if (hif_check_soc_status(scn))
976 		return;
977 
978 	if (hif_dump_registers(scn))
979 		hif_err("Failed to dump bus registers!");
980 }
981 
982 /**
983  * hif_crash_shutdown(): hif_crash_shutdown
984  *
985  * This function is called by the platform driver to dump CE registers
986  *
987  * @hif_ctx: hif_ctx
988  *
989  * Return: n/a
990  */
991 void hif_crash_shutdown(struct hif_opaque_softc *hif_ctx)
992 {
993 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
994 
995 	if (!hif_ctx)
996 		return;
997 
998 	if (scn->bus_type == QDF_BUS_TYPE_SNOC) {
999 		hif_warn("RAM dump disabled for bustype %d", scn->bus_type);
1000 		return;
1001 	}
1002 
1003 	if (TARGET_STATUS_RESET == scn->target_status) {
1004 		hif_warn("Target is already asserted, ignore!");
1005 		return;
1006 	}
1007 
1008 	if (hif_is_load_or_unload_in_progress(scn)) {
1009 		hif_err("Load/unload is in progress, ignore!");
1010 		return;
1011 	}
1012 
1013 	hif_crash_shutdown_dump_bus_register(hif_ctx);
1014 	hif_set_target_status(hif_ctx, TARGET_STATUS_RESET);
1015 
1016 	if (ol_copy_ramdump(hif_ctx))
1017 		goto out;
1018 
1019 	hif_info("RAM dump collecting completed!");
1020 
1021 out:
1022 	return;
1023 }
1024 #else
1025 void hif_crash_shutdown(struct hif_opaque_softc *hif_ctx)
1026 {
1027 	hif_debug("Collecting target RAM dump disabled");
1028 }
1029 #endif /* TARGET_RAMDUMP_AFTER_KERNEL_PANIC */
1030 
1031 #ifdef QCA_WIFI_3_0
1032 /**
1033  * hif_check_fw_reg(): hif_check_fw_reg
1034  * @scn: scn
1035  * @state:
1036  *
1037  * Return: int
1038  */
1039 int hif_check_fw_reg(struct hif_opaque_softc *scn)
1040 {
1041 	return 0;
1042 }
1043 #endif
1044 
1045 /**
1046  * hif_read_phy_mem_base(): hif_read_phy_mem_base
1047  * @scn: scn
1048  * @phy_mem_base: physical mem base
1049  *
1050  * Return: n/a
1051  */
1052 void hif_read_phy_mem_base(struct hif_softc *scn, qdf_dma_addr_t *phy_mem_base)
1053 {
1054 	*phy_mem_base = scn->mem_pa;
1055 }
1056 qdf_export_symbol(hif_read_phy_mem_base);
1057 
1058 /**
1059  * hif_get_device_type(): hif_get_device_type
1060  * @device_id: device_id
1061  * @revision_id: revision_id
1062  * @hif_type: returned hif_type
1063  * @target_type: returned target_type
1064  *
1065  * Return: int
1066  */
1067 int hif_get_device_type(uint32_t device_id,
1068 			uint32_t revision_id,
1069 			uint32_t *hif_type, uint32_t *target_type)
1070 {
1071 	int ret = 0;
1072 
1073 	switch (device_id) {
1074 	case ADRASTEA_DEVICE_ID_P2_E12:
1075 
1076 		*hif_type = HIF_TYPE_ADRASTEA;
1077 		*target_type = TARGET_TYPE_ADRASTEA;
1078 		break;
1079 
1080 	case AR9888_DEVICE_ID:
1081 		*hif_type = HIF_TYPE_AR9888;
1082 		*target_type = TARGET_TYPE_AR9888;
1083 		break;
1084 
1085 	case AR6320_DEVICE_ID:
1086 		switch (revision_id) {
1087 		case AR6320_FW_1_1:
1088 		case AR6320_FW_1_3:
1089 			*hif_type = HIF_TYPE_AR6320;
1090 			*target_type = TARGET_TYPE_AR6320;
1091 			break;
1092 
1093 		case AR6320_FW_2_0:
1094 		case AR6320_FW_3_0:
1095 		case AR6320_FW_3_2:
1096 			*hif_type = HIF_TYPE_AR6320V2;
1097 			*target_type = TARGET_TYPE_AR6320V2;
1098 			break;
1099 
1100 		default:
1101 			hif_err("dev_id = 0x%x, rev_id = 0x%x",
1102 				device_id, revision_id);
1103 			ret = -ENODEV;
1104 			goto end;
1105 		}
1106 		break;
1107 
1108 	case AR9887_DEVICE_ID:
1109 		*hif_type = HIF_TYPE_AR9888;
1110 		*target_type = TARGET_TYPE_AR9888;
1111 		hif_info(" *********** AR9887 **************");
1112 		break;
1113 
1114 	case QCA9984_DEVICE_ID:
1115 		*hif_type = HIF_TYPE_QCA9984;
1116 		*target_type = TARGET_TYPE_QCA9984;
1117 		hif_info(" *********** QCA9984 *************");
1118 		break;
1119 
1120 	case QCA9888_DEVICE_ID:
1121 		*hif_type = HIF_TYPE_QCA9888;
1122 		*target_type = TARGET_TYPE_QCA9888;
1123 		hif_info(" *********** QCA9888 *************");
1124 		break;
1125 
1126 	case AR900B_DEVICE_ID:
1127 		*hif_type = HIF_TYPE_AR900B;
1128 		*target_type = TARGET_TYPE_AR900B;
1129 		hif_info(" *********** AR900B *************");
1130 		break;
1131 
1132 	case IPQ4019_DEVICE_ID:
1133 		*hif_type = HIF_TYPE_IPQ4019;
1134 		*target_type = TARGET_TYPE_IPQ4019;
1135 		hif_info(" *********** IPQ4019  *************");
1136 		break;
1137 
1138 	case QCA8074_DEVICE_ID:
1139 		*hif_type = HIF_TYPE_QCA8074;
1140 		*target_type = TARGET_TYPE_QCA8074;
1141 		hif_info(" *********** QCA8074  *************");
1142 		break;
1143 
1144 	case QCA6290_EMULATION_DEVICE_ID:
1145 	case QCA6290_DEVICE_ID:
1146 		*hif_type = HIF_TYPE_QCA6290;
1147 		*target_type = TARGET_TYPE_QCA6290;
1148 		hif_info(" *********** QCA6290EMU *************");
1149 		break;
1150 
1151 	case QCN9000_DEVICE_ID:
1152 		*hif_type = HIF_TYPE_QCN9000;
1153 		*target_type = TARGET_TYPE_QCN9000;
1154 		hif_info(" *********** QCN9000 *************");
1155 		break;
1156 
1157 	case QCN9100_DEVICE_ID:
1158 		*hif_type = HIF_TYPE_QCN9100;
1159 		*target_type = TARGET_TYPE_QCN9100;
1160 		hif_info(" *********** QCN9100 *************");
1161 		break;
1162 
1163 	case QCN7605_DEVICE_ID:
1164 	case QCN7605_COMPOSITE:
1165 	case QCN7605_STANDALONE:
1166 	case QCN7605_STANDALONE_V2:
1167 	case QCN7605_COMPOSITE_V2:
1168 		*hif_type = HIF_TYPE_QCN7605;
1169 		*target_type = TARGET_TYPE_QCN7605;
1170 		hif_info(" *********** QCN7605 *************");
1171 		break;
1172 
1173 	case QCA6390_DEVICE_ID:
1174 	case QCA6390_EMULATION_DEVICE_ID:
1175 		*hif_type = HIF_TYPE_QCA6390;
1176 		*target_type = TARGET_TYPE_QCA6390;
1177 		hif_info(" *********** QCA6390 *************");
1178 		break;
1179 
1180 	case QCA6490_DEVICE_ID:
1181 	case QCA6490_EMULATION_DEVICE_ID:
1182 		*hif_type = HIF_TYPE_QCA6490;
1183 		*target_type = TARGET_TYPE_QCA6490;
1184 		hif_info(" *********** QCA6490 *************");
1185 		break;
1186 
1187 	case QCA6750_DEVICE_ID:
1188 	case QCA6750_EMULATION_DEVICE_ID:
1189 		*hif_type = HIF_TYPE_QCA6750;
1190 		*target_type = TARGET_TYPE_QCA6750;
1191 		hif_info(" *********** QCA6750 *************");
1192 		break;
1193 
1194 	case QCA8074V2_DEVICE_ID:
1195 		*hif_type = HIF_TYPE_QCA8074V2;
1196 		*target_type = TARGET_TYPE_QCA8074V2;
1197 		hif_info(" *********** QCA8074V2 *************");
1198 		break;
1199 
1200 	case QCA6018_DEVICE_ID:
1201 	case RUMIM2M_DEVICE_ID_NODE0:
1202 	case RUMIM2M_DEVICE_ID_NODE1:
1203 	case RUMIM2M_DEVICE_ID_NODE2:
1204 	case RUMIM2M_DEVICE_ID_NODE3:
1205 	case RUMIM2M_DEVICE_ID_NODE4:
1206 	case RUMIM2M_DEVICE_ID_NODE5:
1207 		*hif_type = HIF_TYPE_QCA6018;
1208 		*target_type = TARGET_TYPE_QCA6018;
1209 		hif_info(" *********** QCA6018 *************");
1210 		break;
1211 
1212 	case QCA5018_DEVICE_ID:
1213 		*hif_type = HIF_TYPE_QCA5018;
1214 		*target_type = TARGET_TYPE_QCA5018;
1215 		hif_info(" *********** qca5018 *************");
1216 		break;
1217 
1218 	default:
1219 		hif_err("Unsupported device ID = 0x%x!", device_id);
1220 		ret = -ENODEV;
1221 		break;
1222 	}
1223 
1224 	if (*target_type == TARGET_TYPE_UNKNOWN) {
1225 		hif_err("Unsupported target_type!");
1226 		ret = -ENODEV;
1227 	}
1228 end:
1229 	return ret;
1230 }
1231 
1232 /**
1233  * hif_get_bus_type() - return the bus type
1234  *
1235  * Return: enum qdf_bus_type
1236  */
1237 enum qdf_bus_type hif_get_bus_type(struct hif_opaque_softc *hif_hdl)
1238 {
1239 	struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
1240 
1241 	return scn->bus_type;
1242 }
1243 
1244 /**
1245  * Target info and ini parameters are global to the driver
1246  * Hence these structures are exposed to all the modules in
1247  * the driver and they don't need to maintains multiple copies
1248  * of the same info, instead get the handle from hif and
1249  * modify them in hif
1250  */
1251 
1252 /**
1253  * hif_get_ini_handle() - API to get hif_config_param handle
1254  * @hif_ctx: HIF Context
1255  *
1256  * Return: pointer to hif_config_info
1257  */
1258 struct hif_config_info *hif_get_ini_handle(struct hif_opaque_softc *hif_ctx)
1259 {
1260 	struct hif_softc *sc = HIF_GET_SOFTC(hif_ctx);
1261 
1262 	return &sc->hif_config;
1263 }
1264 
1265 /**
1266  * hif_get_target_info_handle() - API to get hif_target_info handle
1267  * @hif_ctx: HIF context
1268  *
1269  * Return: Pointer to hif_target_info
1270  */
1271 struct hif_target_info *hif_get_target_info_handle(
1272 					struct hif_opaque_softc *hif_ctx)
1273 {
1274 	struct hif_softc *sc = HIF_GET_SOFTC(hif_ctx);
1275 
1276 	return &sc->target_info;
1277 
1278 }
1279 qdf_export_symbol(hif_get_target_info_handle);
1280 
1281 #ifdef RECEIVE_OFFLOAD
1282 void hif_offld_flush_cb_register(struct hif_opaque_softc *scn,
1283 				 void (offld_flush_handler)(void *))
1284 {
1285 	if (hif_napi_enabled(scn, -1))
1286 		hif_napi_rx_offld_flush_cb_register(scn, offld_flush_handler);
1287 	else
1288 		hif_err("NAPI not enabled");
1289 }
1290 qdf_export_symbol(hif_offld_flush_cb_register);
1291 
1292 void hif_offld_flush_cb_deregister(struct hif_opaque_softc *scn)
1293 {
1294 	if (hif_napi_enabled(scn, -1))
1295 		hif_napi_rx_offld_flush_cb_deregister(scn);
1296 	else
1297 		hif_err("NAPI not enabled");
1298 }
1299 qdf_export_symbol(hif_offld_flush_cb_deregister);
1300 
1301 int hif_get_rx_ctx_id(int ctx_id, struct hif_opaque_softc *hif_hdl)
1302 {
1303 	if (hif_napi_enabled(hif_hdl, -1))
1304 		return NAPI_PIPE2ID(ctx_id);
1305 	else
1306 		return ctx_id;
1307 }
1308 #else /* RECEIVE_OFFLOAD */
1309 int hif_get_rx_ctx_id(int ctx_id, struct hif_opaque_softc *hif_hdl)
1310 {
1311 	return 0;
1312 }
1313 qdf_export_symbol(hif_get_rx_ctx_id);
1314 #endif /* RECEIVE_OFFLOAD */
1315 
1316 #if defined(FEATURE_LRO)
1317 
1318 /**
1319  * hif_get_lro_info - Returns LRO instance for instance ID
1320  * @ctx_id: LRO instance ID
1321  * @hif_hdl: HIF Context
1322  *
1323  * Return: Pointer to LRO instance.
1324  */
1325 void *hif_get_lro_info(int ctx_id, struct hif_opaque_softc *hif_hdl)
1326 {
1327 	void *data;
1328 
1329 	if (hif_napi_enabled(hif_hdl, -1))
1330 		data = hif_napi_get_lro_info(hif_hdl, ctx_id);
1331 	else
1332 		data = hif_ce_get_lro_ctx(hif_hdl, ctx_id);
1333 
1334 	return data;
1335 }
1336 #endif
1337 
1338 /**
1339  * hif_get_target_status - API to get target status
1340  * @hif_ctx: HIF Context
1341  *
1342  * Return: enum hif_target_status
1343  */
1344 enum hif_target_status hif_get_target_status(struct hif_opaque_softc *hif_ctx)
1345 {
1346 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1347 
1348 	return scn->target_status;
1349 }
1350 qdf_export_symbol(hif_get_target_status);
1351 
1352 /**
1353  * hif_set_target_status() - API to set target status
1354  * @hif_ctx: HIF Context
1355  * @status: Target Status
1356  *
1357  * Return: void
1358  */
1359 void hif_set_target_status(struct hif_opaque_softc *hif_ctx, enum
1360 			   hif_target_status status)
1361 {
1362 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1363 
1364 	scn->target_status = status;
1365 }
1366 
1367 /**
1368  * hif_init_ini_config() - API to initialize HIF configuration parameters
1369  * @hif_ctx: HIF Context
1370  * @cfg: HIF Configuration
1371  *
1372  * Return: void
1373  */
1374 void hif_init_ini_config(struct hif_opaque_softc *hif_ctx,
1375 			 struct hif_config_info *cfg)
1376 {
1377 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1378 
1379 	qdf_mem_copy(&scn->hif_config, cfg, sizeof(struct hif_config_info));
1380 }
1381 
1382 /**
1383  * hif_get_conparam() - API to get driver mode in HIF
1384  * @scn: HIF Context
1385  *
1386  * Return: driver mode of operation
1387  */
1388 uint32_t hif_get_conparam(struct hif_softc *scn)
1389 {
1390 	if (!scn)
1391 		return 0;
1392 
1393 	return scn->hif_con_param;
1394 }
1395 
1396 /**
1397  * hif_get_callbacks_handle() - API to get callbacks Handle
1398  * @scn: HIF Context
1399  *
1400  * Return: pointer to HIF Callbacks
1401  */
1402 struct hif_driver_state_callbacks *hif_get_callbacks_handle(
1403 							struct hif_softc *scn)
1404 {
1405 	return &scn->callbacks;
1406 }
1407 
1408 /**
1409  * hif_is_driver_unloading() - API to query upper layers if driver is unloading
1410  * @scn: HIF Context
1411  *
1412  * Return: True/False
1413  */
1414 bool hif_is_driver_unloading(struct hif_softc *scn)
1415 {
1416 	struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
1417 
1418 	if (cbk && cbk->is_driver_unloading)
1419 		return cbk->is_driver_unloading(cbk->context);
1420 
1421 	return false;
1422 }
1423 
1424 /**
1425  * hif_is_load_or_unload_in_progress() - API to query upper layers if
1426  * load/unload in progress
1427  * @scn: HIF Context
1428  *
1429  * Return: True/False
1430  */
1431 bool hif_is_load_or_unload_in_progress(struct hif_softc *scn)
1432 {
1433 	struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
1434 
1435 	if (cbk && cbk->is_load_unload_in_progress)
1436 		return cbk->is_load_unload_in_progress(cbk->context);
1437 
1438 	return false;
1439 }
1440 
1441 /**
1442  * hif_is_recovery_in_progress() - API to query upper layers if recovery in
1443  * progress
1444  * @scn: HIF Context
1445  *
1446  * Return: True/False
1447  */
1448 bool hif_is_recovery_in_progress(struct hif_softc *scn)
1449 {
1450 	struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
1451 
1452 	if (cbk && cbk->is_recovery_in_progress)
1453 		return cbk->is_recovery_in_progress(cbk->context);
1454 
1455 	return false;
1456 }
1457 
1458 #if defined(HIF_PCI) || defined(HIF_SNOC) || defined(HIF_AHB) || \
1459     defined(HIF_IPCI)
1460 
1461 /**
1462  * hif_update_pipe_callback() - API to register pipe specific callbacks
1463  * @osc: Opaque softc
1464  * @pipeid: pipe id
1465  * @callbacks: callbacks to register
1466  *
1467  * Return: void
1468  */
1469 
1470 void hif_update_pipe_callback(struct hif_opaque_softc *osc,
1471 					u_int8_t pipeid,
1472 					struct hif_msg_callbacks *callbacks)
1473 {
1474 	struct hif_softc *scn = HIF_GET_SOFTC(osc);
1475 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1476 	struct HIF_CE_pipe_info *pipe_info;
1477 
1478 	QDF_BUG(pipeid < CE_COUNT_MAX);
1479 
1480 	hif_debug("pipeid: %d", pipeid);
1481 
1482 	pipe_info = &hif_state->pipe_info[pipeid];
1483 
1484 	qdf_mem_copy(&pipe_info->pipe_callbacks,
1485 			callbacks, sizeof(pipe_info->pipe_callbacks));
1486 }
1487 qdf_export_symbol(hif_update_pipe_callback);
1488 
1489 /**
1490  * hif_is_target_ready() - API to query if target is in ready state
1491  * progress
1492  * @scn: HIF Context
1493  *
1494  * Return: True/False
1495  */
1496 bool hif_is_target_ready(struct hif_softc *scn)
1497 {
1498 	struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
1499 
1500 	if (cbk && cbk->is_target_ready)
1501 		return cbk->is_target_ready(cbk->context);
1502 	/*
1503 	 * if callback is not registered then there is no way to determine
1504 	 * if target is ready. In-such case return true to indicate that
1505 	 * target is ready.
1506 	 */
1507 	return true;
1508 }
1509 qdf_export_symbol(hif_is_target_ready);
1510 
1511 int hif_get_bandwidth_level(struct hif_opaque_softc *hif_handle)
1512 {
1513 	struct hif_softc *scn = HIF_GET_SOFTC(hif_handle);
1514 	struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
1515 
1516 	if (cbk && cbk->get_bandwidth_level)
1517 		return cbk->get_bandwidth_level(cbk->context);
1518 
1519 	return 0;
1520 }
1521 
1522 qdf_export_symbol(hif_get_bandwidth_level);
1523 
1524 #ifdef DP_MEM_PRE_ALLOC
1525 void *hif_mem_alloc_consistent_unaligned(struct hif_softc *scn,
1526 					 qdf_size_t size,
1527 					 qdf_dma_addr_t *paddr,
1528 					 uint32_t ring_type,
1529 					 uint8_t *is_mem_prealloc)
1530 {
1531 	void *vaddr = NULL;
1532 	struct hif_driver_state_callbacks *cbk =
1533 				hif_get_callbacks_handle(scn);
1534 
1535 	*is_mem_prealloc = false;
1536 	if (cbk && cbk->prealloc_get_consistent_mem_unaligned) {
1537 		vaddr = cbk->prealloc_get_consistent_mem_unaligned(size,
1538 								   paddr,
1539 								   ring_type);
1540 		if (vaddr) {
1541 			*is_mem_prealloc = true;
1542 			goto end;
1543 		}
1544 	}
1545 
1546 	vaddr = qdf_mem_alloc_consistent(scn->qdf_dev,
1547 					 scn->qdf_dev->dev,
1548 					 size,
1549 					 paddr);
1550 end:
1551 	dp_info("%s va_unaligned %pK pa_unaligned %pK size %d ring_type %d",
1552 		*is_mem_prealloc ? "pre-alloc" : "dynamic-alloc", vaddr,
1553 		(void *)*paddr, (int)size, ring_type);
1554 
1555 	return vaddr;
1556 }
1557 
1558 void hif_mem_free_consistent_unaligned(struct hif_softc *scn,
1559 				       qdf_size_t size,
1560 				       void *vaddr,
1561 				       qdf_dma_addr_t paddr,
1562 				       qdf_dma_context_t memctx,
1563 				       uint8_t is_mem_prealloc)
1564 {
1565 	struct hif_driver_state_callbacks *cbk =
1566 				hif_get_callbacks_handle(scn);
1567 
1568 	if (is_mem_prealloc) {
1569 		if (cbk && cbk->prealloc_put_consistent_mem_unaligned) {
1570 			cbk->prealloc_put_consistent_mem_unaligned(vaddr);
1571 		} else {
1572 			dp_warn("dp_prealloc_put_consistent_unligned NULL");
1573 			QDF_BUG(0);
1574 		}
1575 	} else {
1576 		qdf_mem_free_consistent(scn->qdf_dev, scn->qdf_dev->dev,
1577 					size, vaddr, paddr, memctx);
1578 	}
1579 }
1580 #endif
1581 
1582 /**
1583  * hif_batch_send() - API to access hif specific function
1584  * ce_batch_send.
1585  * @osc: HIF Context
1586  * @msdu : list of msdus to be sent
1587  * @transfer_id : transfer id
1588  * @len : donwloaded length
1589  *
1590  * Return: list of msds not sent
1591  */
1592 qdf_nbuf_t hif_batch_send(struct hif_opaque_softc *osc, qdf_nbuf_t msdu,
1593 		uint32_t transfer_id, u_int32_t len, uint32_t sendhead)
1594 {
1595 	void *ce_tx_hdl = hif_get_ce_handle(osc, CE_HTT_TX_CE);
1596 
1597 	return ce_batch_send((struct CE_handle *)ce_tx_hdl, msdu, transfer_id,
1598 			len, sendhead);
1599 }
1600 qdf_export_symbol(hif_batch_send);
1601 
1602 /**
1603  * hif_update_tx_ring() - API to access hif specific function
1604  * ce_update_tx_ring.
1605  * @osc: HIF Context
1606  * @num_htt_cmpls : number of htt compl received.
1607  *
1608  * Return: void
1609  */
1610 void hif_update_tx_ring(struct hif_opaque_softc *osc, u_int32_t num_htt_cmpls)
1611 {
1612 	void *ce_tx_hdl = hif_get_ce_handle(osc, CE_HTT_TX_CE);
1613 
1614 	ce_update_tx_ring(ce_tx_hdl, num_htt_cmpls);
1615 }
1616 qdf_export_symbol(hif_update_tx_ring);
1617 
1618 
1619 /**
1620  * hif_send_single() - API to access hif specific function
1621  * ce_send_single.
1622  * @osc: HIF Context
1623  * @msdu : msdu to be sent
1624  * @transfer_id: transfer id
1625  * @len : downloaded length
1626  *
1627  * Return: msdu sent status
1628  */
1629 QDF_STATUS hif_send_single(struct hif_opaque_softc *osc, qdf_nbuf_t msdu,
1630 			   uint32_t transfer_id, u_int32_t len)
1631 {
1632 	void *ce_tx_hdl = hif_get_ce_handle(osc, CE_HTT_TX_CE);
1633 
1634 	if (!ce_tx_hdl)
1635 		return QDF_STATUS_E_NULL_VALUE;
1636 
1637 	return ce_send_single((struct CE_handle *)ce_tx_hdl, msdu, transfer_id,
1638 			len);
1639 }
1640 qdf_export_symbol(hif_send_single);
1641 #endif
1642 
1643 /**
1644  * hif_reg_write() - API to access hif specific function
1645  * hif_write32_mb.
1646  * @hif_ctx : HIF Context
1647  * @offset : offset on which value has to be written
1648  * @value : value to be written
1649  *
1650  * Return: None
1651  */
1652 void hif_reg_write(struct hif_opaque_softc *hif_ctx, uint32_t offset,
1653 		uint32_t value)
1654 {
1655 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1656 
1657 	hif_write32_mb(scn, scn->mem + offset, value);
1658 
1659 }
1660 qdf_export_symbol(hif_reg_write);
1661 
1662 /**
1663  * hif_reg_read() - API to access hif specific function
1664  * hif_read32_mb.
1665  * @hif_ctx : HIF Context
1666  * @offset : offset from which value has to be read
1667  *
1668  * Return: Read value
1669  */
1670 uint32_t hif_reg_read(struct hif_opaque_softc *hif_ctx, uint32_t offset)
1671 {
1672 
1673 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1674 
1675 	return hif_read32_mb(scn, scn->mem + offset);
1676 }
1677 qdf_export_symbol(hif_reg_read);
1678 
1679 /**
1680  * hif_ramdump_handler(): generic ramdump handler
1681  * @scn: struct hif_opaque_softc
1682  *
1683  * Return: None
1684  */
1685 void hif_ramdump_handler(struct hif_opaque_softc *scn)
1686 {
1687 	if (hif_get_bus_type(scn) == QDF_BUS_TYPE_USB)
1688 		hif_usb_ramdump_handler(scn);
1689 }
1690 
1691 irqreturn_t hif_wake_interrupt_handler(int irq, void *context)
1692 {
1693 	struct hif_softc *scn = context;
1694 	struct hif_opaque_softc *hif_ctx = GET_HIF_OPAQUE_HDL(scn);
1695 
1696 	hif_info("wake interrupt received on irq %d", irq);
1697 
1698 	if (hif_pm_runtime_get_monitor_wake_intr(hif_ctx)) {
1699 		hif_pm_runtime_set_monitor_wake_intr(hif_ctx, 0);
1700 		hif_pm_runtime_request_resume(hif_ctx);
1701 	}
1702 
1703 	if (scn->initial_wakeup_cb)
1704 		scn->initial_wakeup_cb(scn->initial_wakeup_priv);
1705 
1706 	if (hif_is_ut_suspended(scn))
1707 		hif_ut_fw_resume(scn);
1708 
1709 	qdf_pm_system_wakeup();
1710 
1711 	return IRQ_HANDLED;
1712 }
1713 
1714 void hif_set_initial_wakeup_cb(struct hif_opaque_softc *hif_ctx,
1715 			       void (*callback)(void *),
1716 			       void *priv)
1717 {
1718 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1719 
1720 	scn->initial_wakeup_cb = callback;
1721 	scn->initial_wakeup_priv = priv;
1722 }
1723 
1724 void hif_set_ce_service_max_yield_time(struct hif_opaque_softc *hif,
1725 				       uint32_t ce_service_max_yield_time)
1726 {
1727 	struct hif_softc *hif_ctx = HIF_GET_SOFTC(hif);
1728 
1729 	hif_ctx->ce_service_max_yield_time =
1730 		ce_service_max_yield_time * 1000;
1731 }
1732 
1733 unsigned long long
1734 hif_get_ce_service_max_yield_time(struct hif_opaque_softc *hif)
1735 {
1736 	struct hif_softc *hif_ctx = HIF_GET_SOFTC(hif);
1737 
1738 	return hif_ctx->ce_service_max_yield_time;
1739 }
1740 
1741 void hif_set_ce_service_max_rx_ind_flush(struct hif_opaque_softc *hif,
1742 				       uint8_t ce_service_max_rx_ind_flush)
1743 {
1744 	struct hif_softc *hif_ctx = HIF_GET_SOFTC(hif);
1745 
1746 	if (ce_service_max_rx_ind_flush == 0 ||
1747 	    ce_service_max_rx_ind_flush > MSG_FLUSH_NUM)
1748 		hif_ctx->ce_service_max_rx_ind_flush = MSG_FLUSH_NUM;
1749 	else
1750 		hif_ctx->ce_service_max_rx_ind_flush =
1751 						ce_service_max_rx_ind_flush;
1752 }
1753