xref: /wlan-dirver/qca-wifi-host-cmn/hif/src/hif_main.c (revision 901120c066e139c7f8a2c8e4820561fdd83c67ef)
1 /*
2  * Copyright (c) 2015-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include "targcfg.h"
21 #include "qdf_lock.h"
22 #include "qdf_status.h"
23 #include "qdf_status.h"
24 #include <qdf_atomic.h>         /* qdf_atomic_read */
25 #include <targaddrs.h>
26 #include "hif_io32.h"
27 #include <hif.h>
28 #include <target_type.h>
29 #include "regtable.h"
30 #define ATH_MODULE_NAME hif
31 #include <a_debug.h>
32 #include "hif_main.h"
33 #include "hif_hw_version.h"
34 #if (defined(HIF_PCI) || defined(HIF_SNOC) || defined(HIF_AHB) || \
35      defined(HIF_IPCI))
36 #include "ce_tasklet.h"
37 #include "ce_api.h"
38 #endif
39 #include "qdf_trace.h"
40 #include "qdf_status.h"
41 #include "hif_debug.h"
42 #include "mp_dev.h"
43 #if defined(QCA_WIFI_QCA8074) || defined(QCA_WIFI_QCA6018) || \
44 	defined(QCA_WIFI_QCA5018) || defined(QCA_WIFI_QCA9574) || \
45 	defined(QCA_WIFI_QCA5332)
46 #include "hal_api.h"
47 #endif
48 #include "hif_napi.h"
49 #include "hif_unit_test_suspend_i.h"
50 #include "qdf_module.h"
51 #ifdef HIF_CE_LOG_INFO
52 #include <qdf_notifier.h>
53 #include <qdf_hang_event_notifier.h>
54 #endif
55 #include <linux/cpumask.h>
56 
57 #if defined(HIF_IPCI) && defined(FEATURE_HAL_DELAYED_REG_WRITE)
58 #include <pld_common.h>
59 #endif
60 
61 void hif_dump(struct hif_opaque_softc *hif_ctx, uint8_t cmd_id, bool start)
62 {
63 	hif_trigger_dump(hif_ctx, cmd_id, start);
64 }
65 
66 /**
67  * hif_get_target_id(): hif_get_target_id
68  *
69  * Return the virtual memory base address to the caller
70  *
71  * @scn: hif_softc
72  *
73  * Return: A_target_id_t
74  */
75 A_target_id_t hif_get_target_id(struct hif_softc *scn)
76 {
77 	return scn->mem;
78 }
79 
80 /**
81  * hif_get_targetdef(): hif_get_targetdef
82  * @scn: scn
83  *
84  * Return: void *
85  */
86 void *hif_get_targetdef(struct hif_opaque_softc *hif_ctx)
87 {
88 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
89 
90 	return scn->targetdef;
91 }
92 
93 #ifdef FORCE_WAKE
94 void hif_srng_init_phase(struct hif_opaque_softc *hif_ctx,
95 			 bool init_phase)
96 {
97 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
98 
99 	if (ce_srng_based(scn))
100 		hal_set_init_phase(scn->hal_soc, init_phase);
101 }
102 #endif /* FORCE_WAKE */
103 
104 #ifdef HIF_IPCI
105 void hif_shutdown_notifier_cb(void *hif_ctx)
106 {
107 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
108 
109 	scn->recovery = true;
110 }
111 #endif
112 
113 /**
114  * hif_vote_link_down(): unvote for link up
115  *
116  * Call hif_vote_link_down to release a previous request made using
117  * hif_vote_link_up. A hif_vote_link_down call should only be made
118  * after a corresponding hif_vote_link_up, otherwise you could be
119  * negating a vote from another source. When no votes are present
120  * hif will not guarantee the linkstate after hif_bus_suspend.
121  *
122  * SYNCHRONIZE WITH hif_vote_link_up by only calling in MC thread
123  * and initialization deinitialization sequencences.
124  *
125  * Return: n/a
126  */
127 void hif_vote_link_down(struct hif_opaque_softc *hif_ctx)
128 {
129 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
130 
131 	QDF_BUG(scn);
132 	if (scn->linkstate_vote == 0)
133 		QDF_DEBUG_PANIC("linkstate_vote(%d) has already been 0",
134 				scn->linkstate_vote);
135 
136 	scn->linkstate_vote--;
137 	hif_info("Down_linkstate_vote %d", scn->linkstate_vote);
138 	if (scn->linkstate_vote == 0)
139 		hif_bus_prevent_linkdown(scn, false);
140 }
141 
142 /**
143  * hif_vote_link_up(): vote to prevent bus from suspending
144  *
145  * Makes hif guarantee that fw can message the host normally
146  * during suspend.
147  *
148  * SYNCHRONIZE WITH hif_vote_link_up by only calling in MC thread
149  * and initialization deinitialization sequencences.
150  *
151  * Return: n/a
152  */
153 void hif_vote_link_up(struct hif_opaque_softc *hif_ctx)
154 {
155 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
156 
157 	QDF_BUG(scn);
158 	scn->linkstate_vote++;
159 	hif_info("Up_linkstate_vote %d", scn->linkstate_vote);
160 	if (scn->linkstate_vote == 1)
161 		hif_bus_prevent_linkdown(scn, true);
162 }
163 
164 /**
165  * hif_can_suspend_link(): query if hif is permitted to suspend the link
166  *
167  * Hif will ensure that the link won't be suspended if the upperlayers
168  * don't want it to.
169  *
170  * SYNCHRONIZATION: MC thread is stopped before bus suspend thus
171  * we don't need extra locking to ensure votes dont change while
172  * we are in the process of suspending or resuming.
173  *
174  * Return: false if hif will guarantee link up during suspend.
175  */
176 bool hif_can_suspend_link(struct hif_opaque_softc *hif_ctx)
177 {
178 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
179 
180 	QDF_BUG(scn);
181 	return scn->linkstate_vote == 0;
182 }
183 
184 /**
185  * hif_hia_item_address(): hif_hia_item_address
186  * @target_type: target_type
187  * @item_offset: item_offset
188  *
189  * Return: n/a
190  */
191 uint32_t hif_hia_item_address(uint32_t target_type, uint32_t item_offset)
192 {
193 	switch (target_type) {
194 	case TARGET_TYPE_AR6002:
195 		return AR6002_HOST_INTEREST_ADDRESS + item_offset;
196 	case TARGET_TYPE_AR6003:
197 		return AR6003_HOST_INTEREST_ADDRESS + item_offset;
198 	case TARGET_TYPE_AR6004:
199 		return AR6004_HOST_INTEREST_ADDRESS + item_offset;
200 	case TARGET_TYPE_AR6006:
201 		return AR6006_HOST_INTEREST_ADDRESS + item_offset;
202 	case TARGET_TYPE_AR9888:
203 		return AR9888_HOST_INTEREST_ADDRESS + item_offset;
204 	case TARGET_TYPE_AR6320:
205 	case TARGET_TYPE_AR6320V2:
206 		return AR6320_HOST_INTEREST_ADDRESS + item_offset;
207 	case TARGET_TYPE_ADRASTEA:
208 		/* ADRASTEA doesn't have a host interest address */
209 		ASSERT(0);
210 		return 0;
211 	case TARGET_TYPE_AR900B:
212 		return AR900B_HOST_INTEREST_ADDRESS + item_offset;
213 	case TARGET_TYPE_QCA9984:
214 		return QCA9984_HOST_INTEREST_ADDRESS + item_offset;
215 	case TARGET_TYPE_QCA9888:
216 		return QCA9888_HOST_INTEREST_ADDRESS + item_offset;
217 
218 	default:
219 		ASSERT(0);
220 		return 0;
221 	}
222 }
223 
224 /**
225  * hif_max_num_receives_reached() - check max receive is reached
226  * @scn: HIF Context
227  * @count: unsigned int.
228  *
229  * Output check status as bool
230  *
231  * Return: bool
232  */
233 bool hif_max_num_receives_reached(struct hif_softc *scn, unsigned int count)
234 {
235 	if (QDF_IS_EPPING_ENABLED(hif_get_conparam(scn)))
236 		return count > 120;
237 	else
238 		return count > MAX_NUM_OF_RECEIVES;
239 }
240 
241 /**
242  * init_buffer_count() - initial buffer count
243  * @maxSize: qdf_size_t
244  *
245  * routine to modify the initial buffer count to be allocated on an os
246  * platform basis. Platform owner will need to modify this as needed
247  *
248  * Return: qdf_size_t
249  */
250 qdf_size_t init_buffer_count(qdf_size_t maxSize)
251 {
252 	return maxSize;
253 }
254 
255 /**
256  * hif_save_htc_htt_config_endpoint() - save htt_tx_endpoint
257  * @hif_ctx: hif context
258  * @htc_htt_tx_endpoint: htt_tx_endpoint
259  *
260  * Return: void
261  */
262 void hif_save_htc_htt_config_endpoint(struct hif_opaque_softc *hif_ctx,
263 							int htc_htt_tx_endpoint)
264 {
265 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
266 
267 	if (!scn) {
268 		hif_err("scn or scn->hif_sc is NULL!");
269 		return;
270 	}
271 
272 	scn->htc_htt_tx_endpoint = htc_htt_tx_endpoint;
273 }
274 qdf_export_symbol(hif_save_htc_htt_config_endpoint);
275 
276 static const struct qwlan_hw qwlan_hw_list[] = {
277 	{
278 		.id = AR6320_REV1_VERSION,
279 		.subid = 0,
280 		.name = "QCA6174_REV1",
281 	},
282 	{
283 		.id = AR6320_REV1_1_VERSION,
284 		.subid = 0x1,
285 		.name = "QCA6174_REV1_1",
286 	},
287 	{
288 		.id = AR6320_REV1_3_VERSION,
289 		.subid = 0x2,
290 		.name = "QCA6174_REV1_3",
291 	},
292 	{
293 		.id = AR6320_REV2_1_VERSION,
294 		.subid = 0x4,
295 		.name = "QCA6174_REV2_1",
296 	},
297 	{
298 		.id = AR6320_REV2_1_VERSION,
299 		.subid = 0x5,
300 		.name = "QCA6174_REV2_2",
301 	},
302 	{
303 		.id = AR6320_REV3_VERSION,
304 		.subid = 0x6,
305 		.name = "QCA6174_REV2.3",
306 	},
307 	{
308 		.id = AR6320_REV3_VERSION,
309 		.subid = 0x8,
310 		.name = "QCA6174_REV3",
311 	},
312 	{
313 		.id = AR6320_REV3_VERSION,
314 		.subid = 0x9,
315 		.name = "QCA6174_REV3_1",
316 	},
317 	{
318 		.id = AR6320_REV3_2_VERSION,
319 		.subid = 0xA,
320 		.name = "AR6320_REV3_2_VERSION",
321 	},
322 	{
323 		.id = QCA6390_V1,
324 		.subid = 0x0,
325 		.name = "QCA6390_V1",
326 	},
327 	{
328 		.id = QCA6490_V1,
329 		.subid = 0x0,
330 		.name = "QCA6490_V1",
331 	},
332 	{
333 		.id = WCN3990_v1,
334 		.subid = 0x0,
335 		.name = "WCN3990_V1",
336 	},
337 	{
338 		.id = WCN3990_v2,
339 		.subid = 0x0,
340 		.name = "WCN3990_V2",
341 	},
342 	{
343 		.id = WCN3990_v2_1,
344 		.subid = 0x0,
345 		.name = "WCN3990_V2.1",
346 	},
347 	{
348 		.id = WCN3998,
349 		.subid = 0x0,
350 		.name = "WCN3998",
351 	},
352 	{
353 		.id = QCA9379_REV1_VERSION,
354 		.subid = 0xC,
355 		.name = "QCA9379_REV1",
356 	},
357 	{
358 		.id = QCA9379_REV1_VERSION,
359 		.subid = 0xD,
360 		.name = "QCA9379_REV1_1",
361 	},
362 	{
363 		.id = MANGO_V1,
364 		.subid = 0xF,
365 		.name = "MANGO_V1",
366 	},
367 	{
368 		.id = KIWI_V1,
369 		.subid = 0,
370 		.name = "KIWI_V1",
371 	},
372 	{
373 		.id = KIWI_V2,
374 		.subid = 0,
375 		.name = "KIWI_V2",
376 	},
377 	{
378 		.id = WCN6750_V1,
379 		.subid = 0,
380 		.name = "WCN6750_V1",
381 	},
382 	{
383 		.id = QCA6490_v2_1,
384 		.subid = 0,
385 		.name = "QCA6490",
386 	},
387 	{
388 		.id = QCA6490_v2,
389 		.subid = 0,
390 		.name = "QCA6490",
391 	},
392 	{
393 		.id = WCN3990_v2_2,
394 		.subid = 0,
395 		.name = "WCN3990_v2_2",
396 	}
397 };
398 
399 /**
400  * hif_get_hw_name(): get a human readable name for the hardware
401  * @info: Target Info
402  *
403  * Return: human readable name for the underlying wifi hardware.
404  */
405 static const char *hif_get_hw_name(struct hif_target_info *info)
406 {
407 	int i;
408 
409 	hif_debug("target version = %d, target revision = %d",
410 		  info->target_version,
411 		  info->target_revision);
412 
413 	if (info->hw_name)
414 		return info->hw_name;
415 
416 	for (i = 0; i < ARRAY_SIZE(qwlan_hw_list); i++) {
417 		if (info->target_version == qwlan_hw_list[i].id &&
418 		    info->target_revision == qwlan_hw_list[i].subid) {
419 			return qwlan_hw_list[i].name;
420 		}
421 	}
422 
423 	info->hw_name = qdf_mem_malloc(64);
424 	if (!info->hw_name)
425 		return "Unknown Device (nomem)";
426 
427 	i = qdf_snprint(info->hw_name, 64, "HW_VERSION=%x.",
428 			info->target_version);
429 	if (i < 0)
430 		return "Unknown Device (snprintf failure)";
431 	else
432 		return info->hw_name;
433 }
434 
435 /**
436  * hif_get_hw_info(): hif_get_hw_info
437  * @scn: scn
438  * @version: version
439  * @revision: revision
440  *
441  * Return: n/a
442  */
443 void hif_get_hw_info(struct hif_opaque_softc *scn, u32 *version, u32 *revision,
444 			const char **target_name)
445 {
446 	struct hif_target_info *info = hif_get_target_info_handle(scn);
447 	struct hif_softc *sc = HIF_GET_SOFTC(scn);
448 
449 	if (sc->bus_type == QDF_BUS_TYPE_USB)
450 		hif_usb_get_hw_info(sc);
451 
452 	*version = info->target_version;
453 	*revision = info->target_revision;
454 	*target_name = hif_get_hw_name(info);
455 }
456 
457 /**
458  * hif_get_dev_ba(): API to get device base address.
459  * @scn: scn
460  * @version: version
461  * @revision: revision
462  *
463  * Return: n/a
464  */
465 void *hif_get_dev_ba(struct hif_opaque_softc *hif_handle)
466 {
467 	struct hif_softc *scn = (struct hif_softc *)hif_handle;
468 
469 	return scn->mem;
470 }
471 qdf_export_symbol(hif_get_dev_ba);
472 
473 /**
474  * hif_get_dev_ba_ce(): API to get device ce base address.
475  * @scn: scn
476  *
477  * Return: dev mem base address for CE
478  */
479 void *hif_get_dev_ba_ce(struct hif_opaque_softc *hif_handle)
480 {
481 	struct hif_softc *scn = (struct hif_softc *)hif_handle;
482 
483 	return scn->mem_ce;
484 }
485 
486 qdf_export_symbol(hif_get_dev_ba_ce);
487 
488 uint32_t hif_get_soc_version(struct hif_opaque_softc *hif_handle)
489 {
490 	struct hif_softc *scn = (struct hif_softc *)hif_handle;
491 
492 	return scn->target_info.soc_version;
493 }
494 
495 qdf_export_symbol(hif_get_soc_version);
496 
497 /**
498  * hif_get_dev_ba_cmem(): API to get device ce base address.
499  * @scn: scn
500  *
501  * Return: dev mem base address for CMEM
502  */
503 void *hif_get_dev_ba_cmem(struct hif_opaque_softc *hif_handle)
504 {
505 	struct hif_softc *scn = (struct hif_softc *)hif_handle;
506 
507 	return scn->mem_cmem;
508 }
509 
510 qdf_export_symbol(hif_get_dev_ba_cmem);
511 
512 #ifdef FEATURE_RUNTIME_PM
513 void hif_runtime_prevent_linkdown(struct hif_softc *scn, bool is_get)
514 {
515 	if (is_get)
516 		qdf_runtime_pm_prevent_suspend(&scn->prevent_linkdown_lock);
517 	else
518 		qdf_runtime_pm_allow_suspend(&scn->prevent_linkdown_lock);
519 }
520 
521 static inline
522 void hif_rtpm_lock_init(struct hif_softc *scn)
523 {
524 	qdf_runtime_lock_init(&scn->prevent_linkdown_lock);
525 }
526 
527 static inline
528 void hif_rtpm_lock_deinit(struct hif_softc *scn)
529 {
530 	qdf_runtime_lock_deinit(&scn->prevent_linkdown_lock);
531 }
532 #else
533 static inline
534 void hif_rtpm_lock_init(struct hif_softc *scn)
535 {
536 }
537 
538 static inline
539 void hif_rtpm_lock_deinit(struct hif_softc *scn)
540 {
541 }
542 #endif
543 
544 #ifdef WLAN_CE_INTERRUPT_THRESHOLD_CONFIG
545 /**
546  * hif_get_cfg_from_psoc() - Retrieve ini cfg from psoc
547  * @scn: hif context
548  * @psoc: psoc objmgr handle
549  *
550  * Return: None
551  */
552 static inline
553 void hif_get_cfg_from_psoc(struct hif_softc *scn,
554 			   struct wlan_objmgr_psoc *psoc)
555 {
556 	if (psoc) {
557 		scn->ini_cfg.ce_status_ring_timer_threshold =
558 			cfg_get(psoc,
559 				CFG_CE_STATUS_RING_TIMER_THRESHOLD);
560 		scn->ini_cfg.ce_status_ring_batch_count_threshold =
561 			cfg_get(psoc,
562 				CFG_CE_STATUS_RING_BATCH_COUNT_THRESHOLD);
563 	}
564 }
565 #else
566 static inline
567 void hif_get_cfg_from_psoc(struct hif_softc *scn,
568 			   struct wlan_objmgr_psoc *psoc)
569 {
570 }
571 #endif /* WLAN_CE_INTERRUPT_THRESHOLD_CONFIG */
572 
573 #if defined(HIF_CE_LOG_INFO) || defined(HIF_BUS_LOG_INFO)
574 /**
575  * hif_recovery_notifier_cb - Recovery notifier callback to log
576  *  hang event data
577  * @block: notifier block
578  * @state: state
579  * @data: notifier data
580  *
581  * Return: status
582  */
583 static
584 int hif_recovery_notifier_cb(struct notifier_block *block, unsigned long state,
585 			     void *data)
586 {
587 	struct qdf_notifer_data *notif_data = data;
588 	qdf_notif_block *notif_block;
589 	struct hif_softc *hif_handle;
590 	bool bus_id_invalid;
591 
592 	if (!data || !block)
593 		return -EINVAL;
594 
595 	notif_block = qdf_container_of(block, qdf_notif_block, notif_block);
596 
597 	hif_handle = notif_block->priv_data;
598 	if (!hif_handle)
599 		return -EINVAL;
600 
601 	bus_id_invalid = hif_log_bus_info(hif_handle, notif_data->hang_data,
602 					  &notif_data->offset);
603 	if (bus_id_invalid)
604 		return NOTIFY_STOP_MASK;
605 
606 	hif_log_ce_info(hif_handle, notif_data->hang_data,
607 			&notif_data->offset);
608 
609 	return 0;
610 }
611 
612 /**
613  * hif_register_recovery_notifier - Register hif recovery notifier
614  * @hif_handle: hif handle
615  *
616  * Return: status
617  */
618 static
619 QDF_STATUS hif_register_recovery_notifier(struct hif_softc *hif_handle)
620 {
621 	qdf_notif_block *hif_notifier;
622 
623 	if (!hif_handle)
624 		return QDF_STATUS_E_FAILURE;
625 
626 	hif_notifier = &hif_handle->hif_recovery_notifier;
627 
628 	hif_notifier->notif_block.notifier_call = hif_recovery_notifier_cb;
629 	hif_notifier->priv_data = hif_handle;
630 	return qdf_hang_event_register_notifier(hif_notifier);
631 }
632 
633 /**
634  * hif_unregister_recovery_notifier - Un-register hif recovery notifier
635  * @hif_handle: hif handle
636  *
637  * Return: status
638  */
639 static
640 QDF_STATUS hif_unregister_recovery_notifier(struct hif_softc *hif_handle)
641 {
642 	qdf_notif_block *hif_notifier = &hif_handle->hif_recovery_notifier;
643 
644 	return qdf_hang_event_unregister_notifier(hif_notifier);
645 }
646 #else
647 static inline
648 QDF_STATUS hif_register_recovery_notifier(struct hif_softc *hif_handle)
649 {
650 	return QDF_STATUS_SUCCESS;
651 }
652 
653 static inline
654 QDF_STATUS hif_unregister_recovery_notifier(struct hif_softc *hif_handle)
655 {
656 	return QDF_STATUS_SUCCESS;
657 }
658 #endif
659 
660 #ifdef HIF_CPU_PERF_AFFINE_MASK
661 /**
662  * __hif_cpu_hotplug_notify() - CPU hotplug event handler
663  * @cpu: CPU Id of the CPU generating the event
664  * @cpu_up: true if the CPU is online
665  *
666  * Return: None
667  */
668 static void __hif_cpu_hotplug_notify(void *context,
669 				     uint32_t cpu, bool cpu_up)
670 {
671 	struct hif_softc *scn = context;
672 
673 	if (!scn)
674 		return;
675 	if (hif_is_driver_unloading(scn) || hif_is_recovery_in_progress(scn))
676 		return;
677 
678 	if (cpu_up) {
679 		hif_config_irq_set_perf_affinity_hint(GET_HIF_OPAQUE_HDL(scn));
680 		hif_debug("Setting affinity for online CPU: %d", cpu);
681 	} else {
682 		hif_debug("Skip setting affinity for offline CPU: %d", cpu);
683 	}
684 }
685 
686 /**
687  * hif_cpu_hotplug_notify - cpu core up/down notification
688  * handler
689  * @cpu: CPU generating the event
690  * @cpu_up: true if the CPU is online
691  *
692  * Return: None
693  */
694 static void hif_cpu_hotplug_notify(void *context, uint32_t cpu, bool cpu_up)
695 {
696 	struct qdf_op_sync *op_sync;
697 
698 	if (qdf_op_protect(&op_sync))
699 		return;
700 
701 	__hif_cpu_hotplug_notify(context, cpu, cpu_up);
702 
703 	qdf_op_unprotect(op_sync);
704 }
705 
706 static void hif_cpu_online_cb(void *context, uint32_t cpu)
707 {
708 	hif_cpu_hotplug_notify(context, cpu, true);
709 }
710 
711 static void hif_cpu_before_offline_cb(void *context, uint32_t cpu)
712 {
713 	hif_cpu_hotplug_notify(context, cpu, false);
714 }
715 
716 static void hif_cpuhp_register(struct hif_softc *scn)
717 {
718 	if (!scn) {
719 		hif_info_high("cannot register hotplug notifiers");
720 		return;
721 	}
722 	qdf_cpuhp_register(&scn->cpuhp_event_handle,
723 			   scn,
724 			   hif_cpu_online_cb,
725 			   hif_cpu_before_offline_cb);
726 }
727 
728 static void hif_cpuhp_unregister(struct hif_softc *scn)
729 {
730 	if (!scn) {
731 		hif_info_high("cannot unregister hotplug notifiers");
732 		return;
733 	}
734 	qdf_cpuhp_unregister(&scn->cpuhp_event_handle);
735 }
736 
737 #else
738 static void hif_cpuhp_register(struct hif_softc *scn)
739 {
740 }
741 
742 static void hif_cpuhp_unregister(struct hif_softc *scn)
743 {
744 }
745 #endif /* ifdef HIF_CPU_PERF_AFFINE_MASK */
746 
747 #ifdef HIF_DETECTION_LATENCY_ENABLE
748 
749 void hif_tasklet_latency(struct hif_softc *scn, bool from_timer)
750 {
751 	qdf_time_t ce2_tasklet_sched_time =
752 		scn->latency_detect.ce2_tasklet_sched_time;
753 	qdf_time_t ce2_tasklet_exec_time =
754 		scn->latency_detect.ce2_tasklet_exec_time;
755 	qdf_time_t curr_jiffies = qdf_system_ticks();
756 	uint32_t detect_latency_threshold =
757 		scn->latency_detect.detect_latency_threshold;
758 	int cpu_id = qdf_get_cpu();
759 
760 	/* 2 kinds of check here.
761 	 * from_timer==true:  check if tasklet stall
762 	 * from_timer==false: check tasklet execute comes late
763 	 */
764 
765 	if ((from_timer ?
766 	    qdf_system_time_after(ce2_tasklet_sched_time,
767 				  ce2_tasklet_exec_time) :
768 	    qdf_system_time_after(ce2_tasklet_exec_time,
769 				  ce2_tasklet_sched_time)) &&
770 	    qdf_system_time_after(
771 		curr_jiffies,
772 		ce2_tasklet_sched_time +
773 		qdf_system_msecs_to_ticks(detect_latency_threshold))) {
774 		hif_err("tasklet ce2 latency: from_timer %d, curr_jiffies %lu, ce2_tasklet_sched_time %lu,ce2_tasklet_exec_time %lu, detect_latency_threshold %ums detect_latency_timer_timeout %ums, cpu_id %d, called: %ps",
775 			from_timer, curr_jiffies, ce2_tasklet_sched_time,
776 			ce2_tasklet_exec_time, detect_latency_threshold,
777 			scn->latency_detect.detect_latency_timer_timeout,
778 			cpu_id, (void *)_RET_IP_);
779 		goto latency;
780 	}
781 	return;
782 
783 latency:
784 	qdf_trigger_self_recovery(NULL, QDF_TASKLET_CREDIT_LATENCY_DETECT);
785 }
786 
787 void hif_credit_latency(struct hif_softc *scn, bool from_timer)
788 {
789 	qdf_time_t credit_request_time =
790 		scn->latency_detect.credit_request_time;
791 	qdf_time_t credit_report_time =
792 		scn->latency_detect.credit_report_time;
793 	qdf_time_t curr_jiffies = qdf_system_ticks();
794 	uint32_t detect_latency_threshold =
795 		scn->latency_detect.detect_latency_threshold;
796 	int cpu_id = qdf_get_cpu();
797 
798 	/* 2 kinds of check here.
799 	 * from_timer==true:  check if credit report stall
800 	 * from_timer==false: check credit report comes late
801 	 */
802 
803 	if ((from_timer ?
804 	    qdf_system_time_after(credit_request_time,
805 				  credit_report_time) :
806 	    qdf_system_time_after(credit_report_time,
807 				  credit_request_time)) &&
808 	    qdf_system_time_after(
809 		curr_jiffies,
810 		credit_request_time +
811 		qdf_system_msecs_to_ticks(detect_latency_threshold))) {
812 		hif_err("credit report latency: from timer %d, curr_jiffies %lu, credit_request_time %lu,credit_report_time %lu, detect_latency_threshold %ums, detect_latency_timer_timeout %ums, cpu_id %d, called: %ps",
813 			from_timer, curr_jiffies, credit_request_time,
814 			credit_report_time, detect_latency_threshold,
815 			scn->latency_detect.detect_latency_timer_timeout,
816 			cpu_id, (void *)_RET_IP_);
817 		goto latency;
818 	}
819 	return;
820 
821 latency:
822 	qdf_trigger_self_recovery(NULL, QDF_TASKLET_CREDIT_LATENCY_DETECT);
823 }
824 
825 /**
826  * hif_check_detection_latency(): to check if latency for tasklet/credit
827  *
828  * @scn: hif context
829  * @from_timer: if called from timer handler
830  * @bitmap_type: indicate if check tasklet or credit
831  *
832  * Return: none
833  */
834 void hif_check_detection_latency(struct hif_softc *scn,
835 				 bool from_timer,
836 				 uint32_t bitmap_type)
837 {
838 	if (QDF_GLOBAL_MISSION_MODE != hif_get_conparam(scn))
839 		return;
840 
841 	if (!scn->latency_detect.enable_detection)
842 		return;
843 
844 	if (bitmap_type & BIT(HIF_DETECT_TASKLET))
845 		hif_tasklet_latency(scn, from_timer);
846 
847 	if (bitmap_type & BIT(HIF_DETECT_CREDIT))
848 		hif_credit_latency(scn, from_timer);
849 }
850 
851 static void hif_latency_detect_timeout_handler(void *arg)
852 {
853 	struct hif_softc *scn = (struct hif_softc *)arg;
854 	int next_cpu;
855 
856 	hif_check_detection_latency(scn, true,
857 				    BIT(HIF_DETECT_TASKLET) |
858 				    BIT(HIF_DETECT_CREDIT));
859 
860 	/* it need to make sure timer start on a different cpu,
861 	 * so it can detect the tasklet schedule stall, but there
862 	 * is still chance that, after timer has been started, then
863 	 * irq/tasklet happens on the same cpu, then tasklet will
864 	 * execute before softirq timer, if this tasklet stall, the
865 	 * timer can't detect it, we can accept this as a limitation,
866 	 * if tasklet stall, anyway other place will detect it, just
867 	 * a little later.
868 	 */
869 	next_cpu = cpumask_any_but(
870 			cpu_active_mask,
871 			scn->latency_detect.ce2_tasklet_sched_cpuid);
872 
873 	if (qdf_unlikely(next_cpu >= nr_cpu_ids)) {
874 		hif_debug("start timer on local");
875 		/* it doesn't found a available cpu, start on local cpu*/
876 		qdf_timer_mod(
877 			&scn->latency_detect.detect_latency_timer,
878 			scn->latency_detect.detect_latency_timer_timeout);
879 	} else {
880 		qdf_timer_start_on(
881 			&scn->latency_detect.detect_latency_timer,
882 			scn->latency_detect.detect_latency_timer_timeout,
883 			next_cpu);
884 	}
885 }
886 
887 static void hif_latency_detect_timer_init(struct hif_softc *scn)
888 {
889 	if (!scn) {
890 		hif_info_high("scn is null");
891 		return;
892 	}
893 
894 	if (QDF_GLOBAL_MISSION_MODE != hif_get_conparam(scn))
895 		return;
896 
897 	scn->latency_detect.detect_latency_timer_timeout =
898 		DETECTION_TIMER_TIMEOUT;
899 	scn->latency_detect.detect_latency_threshold =
900 		DETECTION_LATENCY_THRESHOLD;
901 
902 	hif_info("timer timeout %u, latency threshold %u",
903 		 scn->latency_detect.detect_latency_timer_timeout,
904 		 scn->latency_detect.detect_latency_threshold);
905 
906 	scn->latency_detect.is_timer_started = false;
907 
908 	qdf_timer_init(NULL,
909 		       &scn->latency_detect.detect_latency_timer,
910 		       &hif_latency_detect_timeout_handler,
911 		       scn,
912 		       QDF_TIMER_TYPE_SW_SPIN);
913 }
914 
915 static void hif_latency_detect_timer_deinit(struct hif_softc *scn)
916 {
917 	if (QDF_GLOBAL_MISSION_MODE != hif_get_conparam(scn))
918 		return;
919 
920 	hif_info("deinit timer");
921 	qdf_timer_free(&scn->latency_detect.detect_latency_timer);
922 }
923 
924 void hif_latency_detect_timer_start(struct hif_opaque_softc *hif_ctx)
925 {
926 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
927 
928 	if (QDF_GLOBAL_MISSION_MODE != hif_get_conparam(scn))
929 		return;
930 
931 	hif_debug_rl("start timer");
932 	if (scn->latency_detect.is_timer_started) {
933 		hif_info("timer has been started");
934 		return;
935 	}
936 
937 	qdf_timer_start(&scn->latency_detect.detect_latency_timer,
938 			scn->latency_detect.detect_latency_timer_timeout);
939 	scn->latency_detect.is_timer_started = true;
940 }
941 
942 void hif_latency_detect_timer_stop(struct hif_opaque_softc *hif_ctx)
943 {
944 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
945 
946 	if (QDF_GLOBAL_MISSION_MODE != hif_get_conparam(scn))
947 		return;
948 
949 	hif_debug_rl("stop timer");
950 
951 	qdf_timer_sync_cancel(&scn->latency_detect.detect_latency_timer);
952 	scn->latency_detect.is_timer_started = false;
953 }
954 
955 void hif_latency_detect_credit_record_time(
956 	enum hif_credit_exchange_type type,
957 	struct hif_opaque_softc *hif_ctx)
958 {
959 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
960 
961 	if (!scn) {
962 		hif_err("Could not do runtime put, scn is null");
963 		return;
964 	}
965 
966 	if (QDF_GLOBAL_MISSION_MODE != hif_get_conparam(scn))
967 		return;
968 
969 	if (HIF_REQUEST_CREDIT == type)
970 		scn->latency_detect.credit_request_time = qdf_system_ticks();
971 	else if (HIF_PROCESS_CREDIT_REPORT == type)
972 		scn->latency_detect.credit_report_time = qdf_system_ticks();
973 
974 	hif_check_detection_latency(scn, false, BIT(HIF_DETECT_CREDIT));
975 }
976 
977 void hif_set_enable_detection(struct hif_opaque_softc *hif_ctx, bool value)
978 {
979 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
980 
981 	if (!scn) {
982 		hif_err("Could not do runtime put, scn is null");
983 		return;
984 	}
985 
986 	if (QDF_GLOBAL_MISSION_MODE != hif_get_conparam(scn))
987 		return;
988 
989 	scn->latency_detect.enable_detection = value;
990 }
991 #else
992 static void hif_latency_detect_timer_init(struct hif_softc *scn)
993 {}
994 
995 static void hif_latency_detect_timer_deinit(struct hif_softc *scn)
996 {}
997 #endif
998 struct hif_opaque_softc *hif_open(qdf_device_t qdf_ctx,
999 				  uint32_t mode,
1000 				  enum qdf_bus_type bus_type,
1001 				  struct hif_driver_state_callbacks *cbk,
1002 				  struct wlan_objmgr_psoc *psoc)
1003 {
1004 	struct hif_softc *scn;
1005 	QDF_STATUS status = QDF_STATUS_SUCCESS;
1006 	int bus_context_size = hif_bus_get_context_size(bus_type);
1007 
1008 	if (bus_context_size == 0) {
1009 		hif_err("context size 0 not allowed");
1010 		return NULL;
1011 	}
1012 
1013 	scn = (struct hif_softc *)qdf_mem_malloc(bus_context_size);
1014 	if (!scn)
1015 		return GET_HIF_OPAQUE_HDL(scn);
1016 
1017 	scn->qdf_dev = qdf_ctx;
1018 	scn->hif_con_param = mode;
1019 	qdf_atomic_init(&scn->active_tasklet_cnt);
1020 
1021 	qdf_atomic_init(&scn->active_grp_tasklet_cnt);
1022 	qdf_atomic_init(&scn->link_suspended);
1023 	qdf_atomic_init(&scn->tasklet_from_intr);
1024 	hif_system_pm_set_state_on(GET_HIF_OPAQUE_HDL(scn));
1025 	qdf_mem_copy(&scn->callbacks, cbk,
1026 		     sizeof(struct hif_driver_state_callbacks));
1027 	scn->bus_type  = bus_type;
1028 
1029 	hif_allow_ep_vote_access(GET_HIF_OPAQUE_HDL(scn));
1030 	hif_get_cfg_from_psoc(scn, psoc);
1031 
1032 	hif_set_event_hist_mask(GET_HIF_OPAQUE_HDL(scn));
1033 	status = hif_bus_open(scn, bus_type);
1034 	if (status != QDF_STATUS_SUCCESS) {
1035 		hif_err("hif_bus_open error = %d, bus_type = %d",
1036 			status, bus_type);
1037 		qdf_mem_free(scn);
1038 		scn = NULL;
1039 		goto out;
1040 	}
1041 
1042 	hif_rtpm_lock_init(scn);
1043 
1044 	hif_cpuhp_register(scn);
1045 	hif_latency_detect_timer_init(scn);
1046 
1047 out:
1048 	return GET_HIF_OPAQUE_HDL(scn);
1049 }
1050 
1051 #ifdef ADRASTEA_RRI_ON_DDR
1052 /**
1053  * hif_uninit_rri_on_ddr(): free consistent memory allocated for rri
1054  * @scn: hif context
1055  *
1056  * Return: none
1057  */
1058 void hif_uninit_rri_on_ddr(struct hif_softc *scn)
1059 {
1060 	if (scn->vaddr_rri_on_ddr)
1061 		qdf_mem_free_consistent(scn->qdf_dev, scn->qdf_dev->dev,
1062 					(CE_COUNT * sizeof(uint32_t)),
1063 					scn->vaddr_rri_on_ddr,
1064 					scn->paddr_rri_on_ddr, 0);
1065 	scn->vaddr_rri_on_ddr = NULL;
1066 }
1067 #endif
1068 
1069 /**
1070  * hif_close(): hif_close
1071  * @hif_ctx: hif_ctx
1072  *
1073  * Return: n/a
1074  */
1075 void hif_close(struct hif_opaque_softc *hif_ctx)
1076 {
1077 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1078 
1079 	if (!scn) {
1080 		hif_err("hif_opaque_softc is NULL");
1081 		return;
1082 	}
1083 
1084 	hif_latency_detect_timer_deinit(scn);
1085 
1086 	if (scn->athdiag_procfs_inited) {
1087 		athdiag_procfs_remove();
1088 		scn->athdiag_procfs_inited = false;
1089 	}
1090 
1091 	if (scn->target_info.hw_name) {
1092 		char *hw_name = scn->target_info.hw_name;
1093 
1094 		scn->target_info.hw_name = "ErrUnloading";
1095 		qdf_mem_free(hw_name);
1096 	}
1097 
1098 	hif_uninit_rri_on_ddr(scn);
1099 	hif_cleanup_static_buf_to_target(scn);
1100 	hif_cpuhp_unregister(scn);
1101 	hif_rtpm_lock_deinit(scn);
1102 
1103 	hif_bus_close(scn);
1104 
1105 	qdf_mem_free(scn);
1106 }
1107 
1108 /**
1109  * hif_get_num_active_grp_tasklets() - get the number of active
1110  *		datapath group tasklets pending to be completed.
1111  * @scn: HIF context
1112  *
1113  * Returns: the number of datapath group tasklets which are active
1114  */
1115 static inline int hif_get_num_active_grp_tasklets(struct hif_softc *scn)
1116 {
1117 	return qdf_atomic_read(&scn->active_grp_tasklet_cnt);
1118 }
1119 
1120 #if (defined(QCA_WIFI_QCA8074) || defined(QCA_WIFI_QCA6018) || \
1121 	defined(QCA_WIFI_QCA6290) || defined(QCA_WIFI_QCA6390) || \
1122 	defined(QCA_WIFI_QCN9000) || defined(QCA_WIFI_QCA6490) || \
1123 	defined(QCA_WIFI_QCA6750) || defined(QCA_WIFI_QCA5018) || \
1124 	defined(QCA_WIFI_KIWI) || defined(QCA_WIFI_QCN9224) || \
1125 	defined(QCA_WIFI_QCA9574)) || defined(QCA_WIFI_QCA5332)
1126 /**
1127  * hif_get_num_pending_work() - get the number of entries in
1128  *		the workqueue pending to be completed.
1129  * @scn: HIF context
1130  *
1131  * Returns: the number of tasklets which are active
1132  */
1133 static inline int hif_get_num_pending_work(struct hif_softc *scn)
1134 {
1135 	return hal_get_reg_write_pending_work(scn->hal_soc);
1136 }
1137 #else
1138 
1139 static inline int hif_get_num_pending_work(struct hif_softc *scn)
1140 {
1141 	return 0;
1142 }
1143 #endif
1144 
1145 QDF_STATUS hif_try_complete_tasks(struct hif_softc *scn)
1146 {
1147 	uint32_t task_drain_wait_cnt = 0;
1148 	int tasklet = 0, grp_tasklet = 0, work = 0;
1149 
1150 	while ((tasklet = hif_get_num_active_tasklets(scn)) ||
1151 	       (grp_tasklet = hif_get_num_active_grp_tasklets(scn)) ||
1152 	       (work = hif_get_num_pending_work(scn))) {
1153 		if (++task_drain_wait_cnt > HIF_TASK_DRAIN_WAIT_CNT) {
1154 			hif_err("pending tasklets %d grp tasklets %d work %d",
1155 				tasklet, grp_tasklet, work);
1156 			return QDF_STATUS_E_FAULT;
1157 		}
1158 		hif_info("waiting for tasklets %d grp tasklets %d work %d",
1159 			 tasklet, grp_tasklet, work);
1160 		msleep(10);
1161 	}
1162 
1163 	return QDF_STATUS_SUCCESS;
1164 }
1165 
1166 #if defined(HIF_IPCI) && defined(FEATURE_HAL_DELAYED_REG_WRITE)
1167 QDF_STATUS hif_try_prevent_ep_vote_access(struct hif_opaque_softc *hif_ctx)
1168 {
1169 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1170 	uint32_t work_drain_wait_cnt = 0;
1171 	uint32_t wait_cnt = 0;
1172 	int work = 0;
1173 
1174 	qdf_atomic_set(&scn->dp_ep_vote_access,
1175 		       HIF_EP_VOTE_ACCESS_DISABLE);
1176 	qdf_atomic_set(&scn->ep_vote_access,
1177 		       HIF_EP_VOTE_ACCESS_DISABLE);
1178 
1179 	while ((work = hif_get_num_pending_work(scn))) {
1180 		if (++work_drain_wait_cnt > HIF_WORK_DRAIN_WAIT_CNT) {
1181 			qdf_atomic_set(&scn->dp_ep_vote_access,
1182 				       HIF_EP_VOTE_ACCESS_ENABLE);
1183 			qdf_atomic_set(&scn->ep_vote_access,
1184 				       HIF_EP_VOTE_ACCESS_ENABLE);
1185 			hif_err("timeout wait for pending work %d ", work);
1186 			return QDF_STATUS_E_FAULT;
1187 		}
1188 		qdf_sleep(10);
1189 	}
1190 
1191 	if (pld_is_pci_ep_awake(scn->qdf_dev->dev) == -ENOTSUPP)
1192 	return QDF_STATUS_SUCCESS;
1193 
1194 	while (pld_is_pci_ep_awake(scn->qdf_dev->dev)) {
1195 		if (++wait_cnt > HIF_EP_WAKE_RESET_WAIT_CNT) {
1196 			hif_err("Release EP vote is not proceed by Fw");
1197 			return QDF_STATUS_E_FAULT;
1198 		}
1199 		qdf_sleep(5);
1200 	}
1201 
1202 	return QDF_STATUS_SUCCESS;
1203 }
1204 
1205 void hif_set_ep_intermediate_vote_access(struct hif_opaque_softc *hif_ctx)
1206 {
1207 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1208 	uint8_t vote_access;
1209 
1210 	vote_access = qdf_atomic_read(&scn->ep_vote_access);
1211 
1212 	if (vote_access != HIF_EP_VOTE_ACCESS_DISABLE)
1213 		hif_info("EP vote changed from:%u to intermediate state",
1214 			 vote_access);
1215 
1216 	if (QDF_IS_STATUS_ERROR(hif_try_prevent_ep_vote_access(hif_ctx)))
1217 		QDF_BUG(0);
1218 
1219 	qdf_atomic_set(&scn->ep_vote_access,
1220 		       HIF_EP_VOTE_INTERMEDIATE_ACCESS);
1221 }
1222 
1223 void hif_allow_ep_vote_access(struct hif_opaque_softc *hif_ctx)
1224 {
1225 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1226 
1227 	qdf_atomic_set(&scn->dp_ep_vote_access,
1228 		       HIF_EP_VOTE_ACCESS_ENABLE);
1229 	qdf_atomic_set(&scn->ep_vote_access,
1230 		       HIF_EP_VOTE_ACCESS_ENABLE);
1231 }
1232 
1233 void hif_set_ep_vote_access(struct hif_opaque_softc *hif_ctx,
1234 			    uint8_t type, uint8_t access)
1235 {
1236 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1237 
1238 	if (type == HIF_EP_VOTE_DP_ACCESS)
1239 		qdf_atomic_set(&scn->dp_ep_vote_access, access);
1240 	else
1241 		qdf_atomic_set(&scn->ep_vote_access, access);
1242 }
1243 
1244 uint8_t hif_get_ep_vote_access(struct hif_opaque_softc *hif_ctx,
1245 			       uint8_t type)
1246 {
1247 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1248 
1249 	if (type == HIF_EP_VOTE_DP_ACCESS)
1250 		return qdf_atomic_read(&scn->dp_ep_vote_access);
1251 	else
1252 		return qdf_atomic_read(&scn->ep_vote_access);
1253 }
1254 #endif
1255 
1256 #if (defined(QCA_WIFI_QCA8074) || defined(QCA_WIFI_QCA6018) || \
1257 	defined(QCA_WIFI_QCA6290) || defined(QCA_WIFI_QCA6390) || \
1258 	defined(QCA_WIFI_QCN9000) || defined(QCA_WIFI_QCA6490) || \
1259 	defined(QCA_WIFI_QCA6750) || defined(QCA_WIFI_QCA5018) || \
1260 	defined(QCA_WIFI_KIWI) || defined(QCA_WIFI_QCN9224) || \
1261 	defined(QCA_WIFI_QCA9574)) || defined(QCA_WIFI_QCA5332)
1262 static QDF_STATUS hif_hal_attach(struct hif_softc *scn)
1263 {
1264 	if (ce_srng_based(scn)) {
1265 		scn->hal_soc = hal_attach(
1266 					hif_softc_to_hif_opaque_softc(scn),
1267 					scn->qdf_dev);
1268 		if (!scn->hal_soc)
1269 			return QDF_STATUS_E_FAILURE;
1270 	}
1271 
1272 	return QDF_STATUS_SUCCESS;
1273 }
1274 
1275 static QDF_STATUS hif_hal_detach(struct hif_softc *scn)
1276 {
1277 	if (ce_srng_based(scn)) {
1278 		hal_detach(scn->hal_soc);
1279 		scn->hal_soc = NULL;
1280 	}
1281 
1282 	return QDF_STATUS_SUCCESS;
1283 }
1284 #else
1285 static QDF_STATUS hif_hal_attach(struct hif_softc *scn)
1286 {
1287 	return QDF_STATUS_SUCCESS;
1288 }
1289 
1290 static QDF_STATUS hif_hal_detach(struct hif_softc *scn)
1291 {
1292 	return QDF_STATUS_SUCCESS;
1293 }
1294 #endif
1295 
1296 int hif_init_dma_mask(struct device *dev, enum qdf_bus_type bus_type)
1297 {
1298 	int ret;
1299 
1300 	switch (bus_type) {
1301 	case QDF_BUS_TYPE_IPCI:
1302 		ret = qdf_set_dma_coherent_mask(dev,
1303 						DMA_COHERENT_MASK_DEFAULT);
1304 		if (ret) {
1305 			hif_err("Failed to set dma mask error = %d", ret);
1306 			return ret;
1307 		}
1308 
1309 		break;
1310 	default:
1311 		/* Follow the existing sequence for other targets */
1312 		break;
1313 	}
1314 
1315 	return 0;
1316 }
1317 
1318 /**
1319  * hif_enable(): hif_enable
1320  * @hif_ctx: hif_ctx
1321  * @dev: dev
1322  * @bdev: bus dev
1323  * @bid: bus ID
1324  * @bus_type: bus type
1325  * @type: enable type
1326  *
1327  * Return: QDF_STATUS
1328  */
1329 QDF_STATUS hif_enable(struct hif_opaque_softc *hif_ctx, struct device *dev,
1330 					  void *bdev,
1331 					  const struct hif_bus_id *bid,
1332 					  enum qdf_bus_type bus_type,
1333 					  enum hif_enable_type type)
1334 {
1335 	QDF_STATUS status;
1336 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1337 
1338 	if (!scn) {
1339 		hif_err("hif_ctx = NULL");
1340 		return QDF_STATUS_E_NULL_VALUE;
1341 	}
1342 
1343 	status = hif_enable_bus(scn, dev, bdev, bid, type);
1344 	if (status != QDF_STATUS_SUCCESS) {
1345 		hif_err("hif_enable_bus error = %d", status);
1346 		return status;
1347 	}
1348 
1349 	status = hif_hal_attach(scn);
1350 	if (status != QDF_STATUS_SUCCESS) {
1351 		hif_err("hal attach failed");
1352 		goto disable_bus;
1353 	}
1354 
1355 	if (hif_bus_configure(scn)) {
1356 		hif_err("Target probe failed");
1357 		status = QDF_STATUS_E_FAILURE;
1358 		goto hal_detach;
1359 	}
1360 
1361 	hif_ut_suspend_init(scn);
1362 	hif_register_recovery_notifier(scn);
1363 	hif_latency_detect_timer_start(hif_ctx);
1364 
1365 	/*
1366 	 * Flag to avoid potential unallocated memory access from MSI
1367 	 * interrupt handler which could get scheduled as soon as MSI
1368 	 * is enabled, i.e to take care of the race due to the order
1369 	 * in where MSI is enabled before the memory, that will be
1370 	 * in interrupt handlers, is allocated.
1371 	 */
1372 
1373 	scn->hif_init_done = true;
1374 
1375 	hif_debug("OK");
1376 
1377 	return QDF_STATUS_SUCCESS;
1378 
1379 hal_detach:
1380 	hif_hal_detach(scn);
1381 disable_bus:
1382 	hif_disable_bus(scn);
1383 	return status;
1384 }
1385 
1386 void hif_disable(struct hif_opaque_softc *hif_ctx, enum hif_disable_type type)
1387 {
1388 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1389 
1390 	if (!scn)
1391 		return;
1392 
1393 	hif_set_enable_detection(hif_ctx, false);
1394 	hif_latency_detect_timer_stop(hif_ctx);
1395 
1396 	hif_unregister_recovery_notifier(scn);
1397 
1398 	hif_nointrs(scn);
1399 	if (scn->hif_init_done == false)
1400 		hif_shutdown_device(hif_ctx);
1401 	else
1402 		hif_stop(hif_ctx);
1403 
1404 	hif_hal_detach(scn);
1405 
1406 	hif_disable_bus(scn);
1407 
1408 	hif_wlan_disable(scn);
1409 
1410 	scn->notice_send = false;
1411 
1412 	hif_debug("X");
1413 }
1414 
1415 #ifdef CE_TASKLET_DEBUG_ENABLE
1416 void hif_enable_ce_latency_stats(struct hif_opaque_softc *hif_ctx, uint8_t val)
1417 {
1418 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1419 
1420 	if (!scn)
1421 		return;
1422 
1423 	scn->ce_latency_stats = val;
1424 }
1425 #endif
1426 
1427 void hif_display_stats(struct hif_opaque_softc *hif_ctx)
1428 {
1429 	hif_display_bus_stats(hif_ctx);
1430 }
1431 
1432 qdf_export_symbol(hif_display_stats);
1433 
1434 void hif_clear_stats(struct hif_opaque_softc *hif_ctx)
1435 {
1436 	hif_clear_bus_stats(hif_ctx);
1437 }
1438 
1439 /**
1440  * hif_crash_shutdown_dump_bus_register() - dump bus registers
1441  * @hif_ctx: hif_ctx
1442  *
1443  * Return: n/a
1444  */
1445 #if defined(TARGET_RAMDUMP_AFTER_KERNEL_PANIC) && defined(WLAN_FEATURE_BMI)
1446 
1447 static void hif_crash_shutdown_dump_bus_register(void *hif_ctx)
1448 {
1449 	struct hif_opaque_softc *scn = hif_ctx;
1450 
1451 	if (hif_check_soc_status(scn))
1452 		return;
1453 
1454 	if (hif_dump_registers(scn))
1455 		hif_err("Failed to dump bus registers!");
1456 }
1457 
1458 /**
1459  * hif_crash_shutdown(): hif_crash_shutdown
1460  *
1461  * This function is called by the platform driver to dump CE registers
1462  *
1463  * @hif_ctx: hif_ctx
1464  *
1465  * Return: n/a
1466  */
1467 void hif_crash_shutdown(struct hif_opaque_softc *hif_ctx)
1468 {
1469 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1470 
1471 	if (!hif_ctx)
1472 		return;
1473 
1474 	if (scn->bus_type == QDF_BUS_TYPE_SNOC) {
1475 		hif_warn("RAM dump disabled for bustype %d", scn->bus_type);
1476 		return;
1477 	}
1478 
1479 	if (TARGET_STATUS_RESET == scn->target_status) {
1480 		hif_warn("Target is already asserted, ignore!");
1481 		return;
1482 	}
1483 
1484 	if (hif_is_load_or_unload_in_progress(scn)) {
1485 		hif_err("Load/unload is in progress, ignore!");
1486 		return;
1487 	}
1488 
1489 	hif_crash_shutdown_dump_bus_register(hif_ctx);
1490 	hif_set_target_status(hif_ctx, TARGET_STATUS_RESET);
1491 
1492 	if (ol_copy_ramdump(hif_ctx))
1493 		goto out;
1494 
1495 	hif_info("RAM dump collecting completed!");
1496 
1497 out:
1498 	return;
1499 }
1500 #else
1501 void hif_crash_shutdown(struct hif_opaque_softc *hif_ctx)
1502 {
1503 	hif_debug("Collecting target RAM dump disabled");
1504 }
1505 #endif /* TARGET_RAMDUMP_AFTER_KERNEL_PANIC */
1506 
1507 #ifdef QCA_WIFI_3_0
1508 /**
1509  * hif_check_fw_reg(): hif_check_fw_reg
1510  * @scn: scn
1511  * @state:
1512  *
1513  * Return: int
1514  */
1515 int hif_check_fw_reg(struct hif_opaque_softc *scn)
1516 {
1517 	return 0;
1518 }
1519 #endif
1520 
1521 /**
1522  * hif_read_phy_mem_base(): hif_read_phy_mem_base
1523  * @scn: scn
1524  * @phy_mem_base: physical mem base
1525  *
1526  * Return: n/a
1527  */
1528 void hif_read_phy_mem_base(struct hif_softc *scn, qdf_dma_addr_t *phy_mem_base)
1529 {
1530 	*phy_mem_base = scn->mem_pa;
1531 }
1532 qdf_export_symbol(hif_read_phy_mem_base);
1533 
1534 /**
1535  * hif_get_device_type(): hif_get_device_type
1536  * @device_id: device_id
1537  * @revision_id: revision_id
1538  * @hif_type: returned hif_type
1539  * @target_type: returned target_type
1540  *
1541  * Return: int
1542  */
1543 int hif_get_device_type(uint32_t device_id,
1544 			uint32_t revision_id,
1545 			uint32_t *hif_type, uint32_t *target_type)
1546 {
1547 	int ret = 0;
1548 
1549 	switch (device_id) {
1550 	case ADRASTEA_DEVICE_ID_P2_E12:
1551 
1552 		*hif_type = HIF_TYPE_ADRASTEA;
1553 		*target_type = TARGET_TYPE_ADRASTEA;
1554 		break;
1555 
1556 	case AR9888_DEVICE_ID:
1557 		*hif_type = HIF_TYPE_AR9888;
1558 		*target_type = TARGET_TYPE_AR9888;
1559 		break;
1560 
1561 	case AR6320_DEVICE_ID:
1562 		switch (revision_id) {
1563 		case AR6320_FW_1_1:
1564 		case AR6320_FW_1_3:
1565 			*hif_type = HIF_TYPE_AR6320;
1566 			*target_type = TARGET_TYPE_AR6320;
1567 			break;
1568 
1569 		case AR6320_FW_2_0:
1570 		case AR6320_FW_3_0:
1571 		case AR6320_FW_3_2:
1572 			*hif_type = HIF_TYPE_AR6320V2;
1573 			*target_type = TARGET_TYPE_AR6320V2;
1574 			break;
1575 
1576 		default:
1577 			hif_err("dev_id = 0x%x, rev_id = 0x%x",
1578 				device_id, revision_id);
1579 			ret = -ENODEV;
1580 			goto end;
1581 		}
1582 		break;
1583 
1584 	case AR9887_DEVICE_ID:
1585 		*hif_type = HIF_TYPE_AR9888;
1586 		*target_type = TARGET_TYPE_AR9888;
1587 		hif_info(" *********** AR9887 **************");
1588 		break;
1589 
1590 	case QCA9984_DEVICE_ID:
1591 		*hif_type = HIF_TYPE_QCA9984;
1592 		*target_type = TARGET_TYPE_QCA9984;
1593 		hif_info(" *********** QCA9984 *************");
1594 		break;
1595 
1596 	case QCA9888_DEVICE_ID:
1597 		*hif_type = HIF_TYPE_QCA9888;
1598 		*target_type = TARGET_TYPE_QCA9888;
1599 		hif_info(" *********** QCA9888 *************");
1600 		break;
1601 
1602 	case AR900B_DEVICE_ID:
1603 		*hif_type = HIF_TYPE_AR900B;
1604 		*target_type = TARGET_TYPE_AR900B;
1605 		hif_info(" *********** AR900B *************");
1606 		break;
1607 
1608 	case QCA8074_DEVICE_ID:
1609 		*hif_type = HIF_TYPE_QCA8074;
1610 		*target_type = TARGET_TYPE_QCA8074;
1611 		hif_info(" *********** QCA8074  *************");
1612 		break;
1613 
1614 	case QCA6290_EMULATION_DEVICE_ID:
1615 	case QCA6290_DEVICE_ID:
1616 		*hif_type = HIF_TYPE_QCA6290;
1617 		*target_type = TARGET_TYPE_QCA6290;
1618 		hif_info(" *********** QCA6290EMU *************");
1619 		break;
1620 
1621 	case QCN9000_DEVICE_ID:
1622 		*hif_type = HIF_TYPE_QCN9000;
1623 		*target_type = TARGET_TYPE_QCN9000;
1624 		hif_info(" *********** QCN9000 *************");
1625 		break;
1626 
1627 	case QCN9224_DEVICE_ID:
1628 		*hif_type = HIF_TYPE_QCN9224;
1629 		*target_type = TARGET_TYPE_QCN9224;
1630 		hif_info(" *********** QCN9224 *************");
1631 		break;
1632 
1633 	case QCN6122_DEVICE_ID:
1634 		*hif_type = HIF_TYPE_QCN6122;
1635 		*target_type = TARGET_TYPE_QCN6122;
1636 		hif_info(" *********** QCN6122 *************");
1637 		break;
1638 
1639 	case QCN9160_DEVICE_ID:
1640 		*hif_type = HIF_TYPE_QCN9160;
1641 		*target_type = TARGET_TYPE_QCN9160;
1642 		hif_info(" *********** QCN9160 *************");
1643 		break;
1644 
1645 	case QCN7605_DEVICE_ID:
1646 	case QCN7605_COMPOSITE:
1647 	case QCN7605_STANDALONE:
1648 	case QCN7605_STANDALONE_V2:
1649 	case QCN7605_COMPOSITE_V2:
1650 		*hif_type = HIF_TYPE_QCN7605;
1651 		*target_type = TARGET_TYPE_QCN7605;
1652 		hif_info(" *********** QCN7605 *************");
1653 		break;
1654 
1655 	case QCA6390_DEVICE_ID:
1656 	case QCA6390_EMULATION_DEVICE_ID:
1657 		*hif_type = HIF_TYPE_QCA6390;
1658 		*target_type = TARGET_TYPE_QCA6390;
1659 		hif_info(" *********** QCA6390 *************");
1660 		break;
1661 
1662 	case QCA6490_DEVICE_ID:
1663 	case QCA6490_EMULATION_DEVICE_ID:
1664 		*hif_type = HIF_TYPE_QCA6490;
1665 		*target_type = TARGET_TYPE_QCA6490;
1666 		hif_info(" *********** QCA6490 *************");
1667 		break;
1668 
1669 	case QCA6750_DEVICE_ID:
1670 	case QCA6750_EMULATION_DEVICE_ID:
1671 		*hif_type = HIF_TYPE_QCA6750;
1672 		*target_type = TARGET_TYPE_QCA6750;
1673 		hif_info(" *********** QCA6750 *************");
1674 		break;
1675 
1676 	case KIWI_DEVICE_ID:
1677 		*hif_type = HIF_TYPE_KIWI;
1678 		*target_type = TARGET_TYPE_KIWI;
1679 		hif_info(" *********** KIWI *************");
1680 		break;
1681 
1682 	case MANGO_DEVICE_ID:
1683 		*hif_type = HIF_TYPE_MANGO;
1684 		*target_type = TARGET_TYPE_MANGO;
1685 		hif_info(" *********** MANGO *************");
1686 		break;
1687 
1688 	case QCA8074V2_DEVICE_ID:
1689 		*hif_type = HIF_TYPE_QCA8074V2;
1690 		*target_type = TARGET_TYPE_QCA8074V2;
1691 		hif_info(" *********** QCA8074V2 *************");
1692 		break;
1693 
1694 	case QCA6018_DEVICE_ID:
1695 	case RUMIM2M_DEVICE_ID_NODE0:
1696 	case RUMIM2M_DEVICE_ID_NODE1:
1697 	case RUMIM2M_DEVICE_ID_NODE2:
1698 	case RUMIM2M_DEVICE_ID_NODE3:
1699 	case RUMIM2M_DEVICE_ID_NODE4:
1700 	case RUMIM2M_DEVICE_ID_NODE5:
1701 		*hif_type = HIF_TYPE_QCA6018;
1702 		*target_type = TARGET_TYPE_QCA6018;
1703 		hif_info(" *********** QCA6018 *************");
1704 		break;
1705 
1706 	case QCA5018_DEVICE_ID:
1707 		*hif_type = HIF_TYPE_QCA5018;
1708 		*target_type = TARGET_TYPE_QCA5018;
1709 		hif_info(" *********** qca5018 *************");
1710 		break;
1711 
1712 	case QCA5332_DEVICE_ID:
1713 		*hif_type = HIF_TYPE_QCA5332;
1714 		*target_type = TARGET_TYPE_QCA5332;
1715 		hif_info(" *********** QCA5332 *************");
1716 		break;
1717 
1718 	case QCA9574_DEVICE_ID:
1719 		*hif_type = HIF_TYPE_QCA9574;
1720 		*target_type = TARGET_TYPE_QCA9574;
1721 		hif_info(" *********** QCA9574 *************");
1722 		break;
1723 
1724 	default:
1725 		hif_err("Unsupported device ID = 0x%x!", device_id);
1726 		ret = -ENODEV;
1727 		break;
1728 	}
1729 
1730 	if (*target_type == TARGET_TYPE_UNKNOWN) {
1731 		hif_err("Unsupported target_type!");
1732 		ret = -ENODEV;
1733 	}
1734 end:
1735 	return ret;
1736 }
1737 
1738 /**
1739  * hif_get_bus_type() - return the bus type
1740  *
1741  * Return: enum qdf_bus_type
1742  */
1743 enum qdf_bus_type hif_get_bus_type(struct hif_opaque_softc *hif_hdl)
1744 {
1745 	struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
1746 
1747 	return scn->bus_type;
1748 }
1749 
1750 /**
1751  * Target info and ini parameters are global to the driver
1752  * Hence these structures are exposed to all the modules in
1753  * the driver and they don't need to maintains multiple copies
1754  * of the same info, instead get the handle from hif and
1755  * modify them in hif
1756  */
1757 
1758 /**
1759  * hif_get_ini_handle() - API to get hif_config_param handle
1760  * @hif_ctx: HIF Context
1761  *
1762  * Return: pointer to hif_config_info
1763  */
1764 struct hif_config_info *hif_get_ini_handle(struct hif_opaque_softc *hif_ctx)
1765 {
1766 	struct hif_softc *sc = HIF_GET_SOFTC(hif_ctx);
1767 
1768 	return &sc->hif_config;
1769 }
1770 
1771 /**
1772  * hif_get_target_info_handle() - API to get hif_target_info handle
1773  * @hif_ctx: HIF context
1774  *
1775  * Return: Pointer to hif_target_info
1776  */
1777 struct hif_target_info *hif_get_target_info_handle(
1778 					struct hif_opaque_softc *hif_ctx)
1779 {
1780 	struct hif_softc *sc = HIF_GET_SOFTC(hif_ctx);
1781 
1782 	return &sc->target_info;
1783 
1784 }
1785 qdf_export_symbol(hif_get_target_info_handle);
1786 
1787 #ifdef RECEIVE_OFFLOAD
1788 void hif_offld_flush_cb_register(struct hif_opaque_softc *scn,
1789 				 void (offld_flush_handler)(void *))
1790 {
1791 	if (hif_napi_enabled(scn, -1))
1792 		hif_napi_rx_offld_flush_cb_register(scn, offld_flush_handler);
1793 	else
1794 		hif_err("NAPI not enabled");
1795 }
1796 qdf_export_symbol(hif_offld_flush_cb_register);
1797 
1798 void hif_offld_flush_cb_deregister(struct hif_opaque_softc *scn)
1799 {
1800 	if (hif_napi_enabled(scn, -1))
1801 		hif_napi_rx_offld_flush_cb_deregister(scn);
1802 	else
1803 		hif_err("NAPI not enabled");
1804 }
1805 qdf_export_symbol(hif_offld_flush_cb_deregister);
1806 
1807 int hif_get_rx_ctx_id(int ctx_id, struct hif_opaque_softc *hif_hdl)
1808 {
1809 	if (hif_napi_enabled(hif_hdl, -1))
1810 		return NAPI_PIPE2ID(ctx_id);
1811 	else
1812 		return ctx_id;
1813 }
1814 #else /* RECEIVE_OFFLOAD */
1815 int hif_get_rx_ctx_id(int ctx_id, struct hif_opaque_softc *hif_hdl)
1816 {
1817 	return 0;
1818 }
1819 qdf_export_symbol(hif_get_rx_ctx_id);
1820 #endif /* RECEIVE_OFFLOAD */
1821 
1822 #if defined(FEATURE_LRO)
1823 
1824 /**
1825  * hif_get_lro_info - Returns LRO instance for instance ID
1826  * @ctx_id: LRO instance ID
1827  * @hif_hdl: HIF Context
1828  *
1829  * Return: Pointer to LRO instance.
1830  */
1831 void *hif_get_lro_info(int ctx_id, struct hif_opaque_softc *hif_hdl)
1832 {
1833 	void *data;
1834 
1835 	if (hif_napi_enabled(hif_hdl, -1))
1836 		data = hif_napi_get_lro_info(hif_hdl, ctx_id);
1837 	else
1838 		data = hif_ce_get_lro_ctx(hif_hdl, ctx_id);
1839 
1840 	return data;
1841 }
1842 #endif
1843 
1844 /**
1845  * hif_get_target_status - API to get target status
1846  * @hif_ctx: HIF Context
1847  *
1848  * Return: enum hif_target_status
1849  */
1850 enum hif_target_status hif_get_target_status(struct hif_opaque_softc *hif_ctx)
1851 {
1852 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1853 
1854 	return scn->target_status;
1855 }
1856 qdf_export_symbol(hif_get_target_status);
1857 
1858 /**
1859  * hif_set_target_status() - API to set target status
1860  * @hif_ctx: HIF Context
1861  * @status: Target Status
1862  *
1863  * Return: void
1864  */
1865 void hif_set_target_status(struct hif_opaque_softc *hif_ctx, enum
1866 			   hif_target_status status)
1867 {
1868 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1869 
1870 	scn->target_status = status;
1871 }
1872 
1873 /**
1874  * hif_init_ini_config() - API to initialize HIF configuration parameters
1875  * @hif_ctx: HIF Context
1876  * @cfg: HIF Configuration
1877  *
1878  * Return: void
1879  */
1880 void hif_init_ini_config(struct hif_opaque_softc *hif_ctx,
1881 			 struct hif_config_info *cfg)
1882 {
1883 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1884 
1885 	qdf_mem_copy(&scn->hif_config, cfg, sizeof(struct hif_config_info));
1886 }
1887 
1888 /**
1889  * hif_get_conparam() - API to get driver mode in HIF
1890  * @scn: HIF Context
1891  *
1892  * Return: driver mode of operation
1893  */
1894 uint32_t hif_get_conparam(struct hif_softc *scn)
1895 {
1896 	if (!scn)
1897 		return 0;
1898 
1899 	return scn->hif_con_param;
1900 }
1901 
1902 /**
1903  * hif_get_callbacks_handle() - API to get callbacks Handle
1904  * @scn: HIF Context
1905  *
1906  * Return: pointer to HIF Callbacks
1907  */
1908 struct hif_driver_state_callbacks *hif_get_callbacks_handle(
1909 							struct hif_softc *scn)
1910 {
1911 	return &scn->callbacks;
1912 }
1913 
1914 /**
1915  * hif_is_driver_unloading() - API to query upper layers if driver is unloading
1916  * @scn: HIF Context
1917  *
1918  * Return: True/False
1919  */
1920 bool hif_is_driver_unloading(struct hif_softc *scn)
1921 {
1922 	struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
1923 
1924 	if (cbk && cbk->is_driver_unloading)
1925 		return cbk->is_driver_unloading(cbk->context);
1926 
1927 	return false;
1928 }
1929 
1930 /**
1931  * hif_is_load_or_unload_in_progress() - API to query upper layers if
1932  * load/unload in progress
1933  * @scn: HIF Context
1934  *
1935  * Return: True/False
1936  */
1937 bool hif_is_load_or_unload_in_progress(struct hif_softc *scn)
1938 {
1939 	struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
1940 
1941 	if (cbk && cbk->is_load_unload_in_progress)
1942 		return cbk->is_load_unload_in_progress(cbk->context);
1943 
1944 	return false;
1945 }
1946 
1947 /**
1948  * hif_is_recovery_in_progress() - API to query upper layers if recovery in
1949  * progress
1950  * @scn: HIF Context
1951  *
1952  * Return: True/False
1953  */
1954 bool hif_is_recovery_in_progress(struct hif_softc *scn)
1955 {
1956 	struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
1957 
1958 	if (cbk && cbk->is_recovery_in_progress)
1959 		return cbk->is_recovery_in_progress(cbk->context);
1960 
1961 	return false;
1962 }
1963 
1964 #if defined(HIF_PCI) || defined(HIF_SNOC) || defined(HIF_AHB) || \
1965     defined(HIF_IPCI)
1966 
1967 /**
1968  * hif_update_pipe_callback() - API to register pipe specific callbacks
1969  * @osc: Opaque softc
1970  * @pipeid: pipe id
1971  * @callbacks: callbacks to register
1972  *
1973  * Return: void
1974  */
1975 
1976 void hif_update_pipe_callback(struct hif_opaque_softc *osc,
1977 					u_int8_t pipeid,
1978 					struct hif_msg_callbacks *callbacks)
1979 {
1980 	struct hif_softc *scn = HIF_GET_SOFTC(osc);
1981 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1982 	struct HIF_CE_pipe_info *pipe_info;
1983 
1984 	QDF_BUG(pipeid < CE_COUNT_MAX);
1985 
1986 	hif_debug("pipeid: %d", pipeid);
1987 
1988 	pipe_info = &hif_state->pipe_info[pipeid];
1989 
1990 	qdf_mem_copy(&pipe_info->pipe_callbacks,
1991 			callbacks, sizeof(pipe_info->pipe_callbacks));
1992 }
1993 qdf_export_symbol(hif_update_pipe_callback);
1994 
1995 /**
1996  * hif_is_target_ready() - API to query if target is in ready state
1997  * progress
1998  * @scn: HIF Context
1999  *
2000  * Return: True/False
2001  */
2002 bool hif_is_target_ready(struct hif_softc *scn)
2003 {
2004 	struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
2005 
2006 	if (cbk && cbk->is_target_ready)
2007 		return cbk->is_target_ready(cbk->context);
2008 	/*
2009 	 * if callback is not registered then there is no way to determine
2010 	 * if target is ready. In-such case return true to indicate that
2011 	 * target is ready.
2012 	 */
2013 	return true;
2014 }
2015 qdf_export_symbol(hif_is_target_ready);
2016 
2017 int hif_get_bandwidth_level(struct hif_opaque_softc *hif_handle)
2018 {
2019 	struct hif_softc *scn = HIF_GET_SOFTC(hif_handle);
2020 	struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
2021 
2022 	if (cbk && cbk->get_bandwidth_level)
2023 		return cbk->get_bandwidth_level(cbk->context);
2024 
2025 	return 0;
2026 }
2027 
2028 qdf_export_symbol(hif_get_bandwidth_level);
2029 
2030 #ifdef DP_MEM_PRE_ALLOC
2031 void *hif_mem_alloc_consistent_unaligned(struct hif_softc *scn,
2032 					 qdf_size_t size,
2033 					 qdf_dma_addr_t *paddr,
2034 					 uint32_t ring_type,
2035 					 uint8_t *is_mem_prealloc)
2036 {
2037 	void *vaddr = NULL;
2038 	struct hif_driver_state_callbacks *cbk =
2039 				hif_get_callbacks_handle(scn);
2040 
2041 	*is_mem_prealloc = false;
2042 	if (cbk && cbk->prealloc_get_consistent_mem_unaligned) {
2043 		vaddr = cbk->prealloc_get_consistent_mem_unaligned(size,
2044 								   paddr,
2045 								   ring_type);
2046 		if (vaddr) {
2047 			*is_mem_prealloc = true;
2048 			goto end;
2049 		}
2050 	}
2051 
2052 	vaddr = qdf_mem_alloc_consistent(scn->qdf_dev,
2053 					 scn->qdf_dev->dev,
2054 					 size,
2055 					 paddr);
2056 end:
2057 	dp_info("%s va_unaligned %pK pa_unaligned %pK size %d ring_type %d",
2058 		*is_mem_prealloc ? "pre-alloc" : "dynamic-alloc", vaddr,
2059 		(void *)*paddr, (int)size, ring_type);
2060 
2061 	return vaddr;
2062 }
2063 
2064 void hif_mem_free_consistent_unaligned(struct hif_softc *scn,
2065 				       qdf_size_t size,
2066 				       void *vaddr,
2067 				       qdf_dma_addr_t paddr,
2068 				       qdf_dma_context_t memctx,
2069 				       uint8_t is_mem_prealloc)
2070 {
2071 	struct hif_driver_state_callbacks *cbk =
2072 				hif_get_callbacks_handle(scn);
2073 
2074 	if (is_mem_prealloc) {
2075 		if (cbk && cbk->prealloc_put_consistent_mem_unaligned) {
2076 			cbk->prealloc_put_consistent_mem_unaligned(vaddr);
2077 		} else {
2078 			dp_warn("dp_prealloc_put_consistent_unligned NULL");
2079 			QDF_BUG(0);
2080 		}
2081 	} else {
2082 		qdf_mem_free_consistent(scn->qdf_dev, scn->qdf_dev->dev,
2083 					size, vaddr, paddr, memctx);
2084 	}
2085 }
2086 #endif
2087 
2088 /**
2089  * hif_batch_send() - API to access hif specific function
2090  * ce_batch_send.
2091  * @osc: HIF Context
2092  * @msdu : list of msdus to be sent
2093  * @transfer_id : transfer id
2094  * @len : downloaded length
2095  *
2096  * Return: list of msds not sent
2097  */
2098 qdf_nbuf_t hif_batch_send(struct hif_opaque_softc *osc, qdf_nbuf_t msdu,
2099 		uint32_t transfer_id, u_int32_t len, uint32_t sendhead)
2100 {
2101 	void *ce_tx_hdl = hif_get_ce_handle(osc, CE_HTT_TX_CE);
2102 
2103 	if (!ce_tx_hdl)
2104 		return NULL;
2105 
2106 	return ce_batch_send((struct CE_handle *)ce_tx_hdl, msdu, transfer_id,
2107 			len, sendhead);
2108 }
2109 qdf_export_symbol(hif_batch_send);
2110 
2111 /**
2112  * hif_update_tx_ring() - API to access hif specific function
2113  * ce_update_tx_ring.
2114  * @osc: HIF Context
2115  * @num_htt_cmpls : number of htt compl received.
2116  *
2117  * Return: void
2118  */
2119 void hif_update_tx_ring(struct hif_opaque_softc *osc, u_int32_t num_htt_cmpls)
2120 {
2121 	void *ce_tx_hdl = hif_get_ce_handle(osc, CE_HTT_TX_CE);
2122 
2123 	ce_update_tx_ring(ce_tx_hdl, num_htt_cmpls);
2124 }
2125 qdf_export_symbol(hif_update_tx_ring);
2126 
2127 
2128 /**
2129  * hif_send_single() - API to access hif specific function
2130  * ce_send_single.
2131  * @osc: HIF Context
2132  * @msdu : msdu to be sent
2133  * @transfer_id: transfer id
2134  * @len : downloaded length
2135  *
2136  * Return: msdu sent status
2137  */
2138 QDF_STATUS hif_send_single(struct hif_opaque_softc *osc, qdf_nbuf_t msdu,
2139 			   uint32_t transfer_id, u_int32_t len)
2140 {
2141 	void *ce_tx_hdl = hif_get_ce_handle(osc, CE_HTT_TX_CE);
2142 
2143 	if (!ce_tx_hdl)
2144 		return QDF_STATUS_E_NULL_VALUE;
2145 
2146 	return ce_send_single((struct CE_handle *)ce_tx_hdl, msdu, transfer_id,
2147 			len);
2148 }
2149 qdf_export_symbol(hif_send_single);
2150 #endif
2151 
2152 /**
2153  * hif_reg_write() - API to access hif specific function
2154  * hif_write32_mb.
2155  * @hif_ctx : HIF Context
2156  * @offset : offset on which value has to be written
2157  * @value : value to be written
2158  *
2159  * Return: None
2160  */
2161 void hif_reg_write(struct hif_opaque_softc *hif_ctx, uint32_t offset,
2162 		uint32_t value)
2163 {
2164 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2165 
2166 	hif_write32_mb(scn, scn->mem + offset, value);
2167 
2168 }
2169 qdf_export_symbol(hif_reg_write);
2170 
2171 /**
2172  * hif_reg_read() - API to access hif specific function
2173  * hif_read32_mb.
2174  * @hif_ctx : HIF Context
2175  * @offset : offset from which value has to be read
2176  *
2177  * Return: Read value
2178  */
2179 uint32_t hif_reg_read(struct hif_opaque_softc *hif_ctx, uint32_t offset)
2180 {
2181 
2182 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2183 
2184 	return hif_read32_mb(scn, scn->mem + offset);
2185 }
2186 qdf_export_symbol(hif_reg_read);
2187 
2188 /**
2189  * hif_ramdump_handler(): generic ramdump handler
2190  * @scn: struct hif_opaque_softc
2191  *
2192  * Return: None
2193  */
2194 void hif_ramdump_handler(struct hif_opaque_softc *scn)
2195 {
2196 	if (hif_get_bus_type(scn) == QDF_BUS_TYPE_USB)
2197 		hif_usb_ramdump_handler(scn);
2198 }
2199 
2200 hif_pm_wake_irq_type hif_pm_get_wake_irq_type(struct hif_opaque_softc *hif_ctx)
2201 {
2202 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2203 
2204 	return scn->wake_irq_type;
2205 }
2206 
2207 irqreturn_t hif_wake_interrupt_handler(int irq, void *context)
2208 {
2209 	struct hif_softc *scn = context;
2210 
2211 	hif_info("wake interrupt received on irq %d", irq);
2212 
2213 	hif_rtpm_set_monitor_wake_intr(0);
2214 	hif_rtpm_request_resume();
2215 
2216 	if (scn->initial_wakeup_cb)
2217 		scn->initial_wakeup_cb(scn->initial_wakeup_priv);
2218 
2219 	if (hif_is_ut_suspended(scn))
2220 		hif_ut_fw_resume(scn);
2221 
2222 	qdf_pm_system_wakeup();
2223 
2224 	return IRQ_HANDLED;
2225 }
2226 
2227 void hif_set_initial_wakeup_cb(struct hif_opaque_softc *hif_ctx,
2228 			       void (*callback)(void *),
2229 			       void *priv)
2230 {
2231 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2232 
2233 	scn->initial_wakeup_cb = callback;
2234 	scn->initial_wakeup_priv = priv;
2235 }
2236 
2237 void hif_set_ce_service_max_yield_time(struct hif_opaque_softc *hif,
2238 				       uint32_t ce_service_max_yield_time)
2239 {
2240 	struct hif_softc *hif_ctx = HIF_GET_SOFTC(hif);
2241 
2242 	hif_ctx->ce_service_max_yield_time =
2243 		ce_service_max_yield_time * 1000;
2244 }
2245 
2246 unsigned long long
2247 hif_get_ce_service_max_yield_time(struct hif_opaque_softc *hif)
2248 {
2249 	struct hif_softc *hif_ctx = HIF_GET_SOFTC(hif);
2250 
2251 	return hif_ctx->ce_service_max_yield_time;
2252 }
2253 
2254 void hif_set_ce_service_max_rx_ind_flush(struct hif_opaque_softc *hif,
2255 				       uint8_t ce_service_max_rx_ind_flush)
2256 {
2257 	struct hif_softc *hif_ctx = HIF_GET_SOFTC(hif);
2258 
2259 	if (ce_service_max_rx_ind_flush == 0 ||
2260 	    ce_service_max_rx_ind_flush > MSG_FLUSH_NUM)
2261 		hif_ctx->ce_service_max_rx_ind_flush = MSG_FLUSH_NUM;
2262 	else
2263 		hif_ctx->ce_service_max_rx_ind_flush =
2264 						ce_service_max_rx_ind_flush;
2265 }
2266 
2267 #ifdef SYSTEM_PM_CHECK
2268 void __hif_system_pm_set_state(struct hif_opaque_softc *hif,
2269 			       enum hif_system_pm_state state)
2270 {
2271 	struct hif_softc *hif_ctx = HIF_GET_SOFTC(hif);
2272 
2273 	qdf_atomic_set(&hif_ctx->sys_pm_state, state);
2274 }
2275 
2276 int32_t hif_system_pm_get_state(struct hif_opaque_softc *hif)
2277 {
2278 	struct hif_softc *hif_ctx = HIF_GET_SOFTC(hif);
2279 
2280 	return qdf_atomic_read(&hif_ctx->sys_pm_state);
2281 }
2282 
2283 int hif_system_pm_state_check(struct hif_opaque_softc *hif)
2284 {
2285 	struct hif_softc *hif_ctx = HIF_GET_SOFTC(hif);
2286 	int32_t sys_pm_state;
2287 
2288 	if (!hif_ctx) {
2289 		hif_err("hif context is null");
2290 		return -EFAULT;
2291 	}
2292 
2293 	sys_pm_state = qdf_atomic_read(&hif_ctx->sys_pm_state);
2294 	if (sys_pm_state == HIF_SYSTEM_PM_STATE_BUS_SUSPENDING ||
2295 	    sys_pm_state == HIF_SYSTEM_PM_STATE_BUS_SUSPENDED) {
2296 		hif_info("Triggering system wakeup");
2297 		qdf_pm_system_wakeup();
2298 		return -EAGAIN;
2299 	}
2300 
2301 	return 0;
2302 }
2303 #endif
2304