xref: /wlan-dirver/qca-wifi-host-cmn/hif/src/hif_main.c (revision 8cfe6b10058a04cafb17eed051f2ddf11bee8931)
1 /*
2  * Copyright (c) 2015-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include "targcfg.h"
21 #include "qdf_lock.h"
22 #include "qdf_status.h"
23 #include "qdf_status.h"
24 #include <qdf_atomic.h>         /* qdf_atomic_read */
25 #include <targaddrs.h>
26 #include "hif_io32.h"
27 #include <hif.h>
28 #include <target_type.h>
29 #include "regtable.h"
30 #define ATH_MODULE_NAME hif
31 #include <a_debug.h>
32 #include "hif_main.h"
33 #include "hif_hw_version.h"
34 #if (defined(HIF_PCI) || defined(HIF_SNOC) || defined(HIF_AHB) || \
35      defined(HIF_IPCI))
36 #include "ce_tasklet.h"
37 #include "ce_api.h"
38 #endif
39 #include "qdf_trace.h"
40 #include "qdf_status.h"
41 #include "hif_debug.h"
42 #include "mp_dev.h"
43 #if defined(QCA_WIFI_QCA8074) || defined(QCA_WIFI_QCA6018) || \
44 	defined(QCA_WIFI_QCA5018) || defined(QCA_WIFI_QCA9574) || \
45 	defined(QCA_WIFI_QCA5332)
46 #include "hal_api.h"
47 #endif
48 #include "hif_napi.h"
49 #include "hif_unit_test_suspend_i.h"
50 #include "qdf_module.h"
51 #ifdef HIF_CE_LOG_INFO
52 #include <qdf_notifier.h>
53 #include <qdf_hang_event_notifier.h>
54 #endif
55 #include <linux/cpumask.h>
56 
57 #include <pld_common.h>
58 
59 void hif_dump(struct hif_opaque_softc *hif_ctx, uint8_t cmd_id, bool start)
60 {
61 	hif_trigger_dump(hif_ctx, cmd_id, start);
62 }
63 
64 /**
65  * hif_get_target_id(): hif_get_target_id
66  * @scn: scn
67  *
68  * Return the virtual memory base address to the caller
69  *
70  * @scn: hif_softc
71  *
72  * Return: A_target_id_t
73  */
74 A_target_id_t hif_get_target_id(struct hif_softc *scn)
75 {
76 	return scn->mem;
77 }
78 
79 /**
80  * hif_get_targetdef(): hif_get_targetdef
81  * @hif_ctx: hif context
82  *
83  * Return: void *
84  */
85 void *hif_get_targetdef(struct hif_opaque_softc *hif_ctx)
86 {
87 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
88 
89 	return scn->targetdef;
90 }
91 
92 #ifdef FORCE_WAKE
93 void hif_srng_init_phase(struct hif_opaque_softc *hif_ctx,
94 			 bool init_phase)
95 {
96 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
97 
98 	if (ce_srng_based(scn))
99 		hal_set_init_phase(scn->hal_soc, init_phase);
100 }
101 #endif /* FORCE_WAKE */
102 
103 #ifdef HIF_IPCI
104 void hif_shutdown_notifier_cb(void *hif_ctx)
105 {
106 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
107 
108 	scn->recovery = true;
109 }
110 #endif
111 
112 /**
113  * hif_vote_link_down(): unvote for link up
114  * @hif_ctx: hif context
115  *
116  * Call hif_vote_link_down to release a previous request made using
117  * hif_vote_link_up. A hif_vote_link_down call should only be made
118  * after a corresponding hif_vote_link_up, otherwise you could be
119  * negating a vote from another source. When no votes are present
120  * hif will not guarantee the linkstate after hif_bus_suspend.
121  *
122  * SYNCHRONIZE WITH hif_vote_link_up by only calling in MC thread
123  * and initialization deinitialization sequencences.
124  *
125  * Return: n/a
126  */
127 void hif_vote_link_down(struct hif_opaque_softc *hif_ctx)
128 {
129 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
130 
131 	QDF_BUG(scn);
132 	if (scn->linkstate_vote == 0)
133 		QDF_DEBUG_PANIC("linkstate_vote(%d) has already been 0",
134 				scn->linkstate_vote);
135 
136 	scn->linkstate_vote--;
137 	hif_info("Down_linkstate_vote %d", scn->linkstate_vote);
138 	if (scn->linkstate_vote == 0)
139 		hif_bus_prevent_linkdown(scn, false);
140 }
141 
142 /**
143  * hif_vote_link_up(): vote to prevent bus from suspending
144  * @hif_ctx: hif context
145  *
146  * Makes hif guarantee that fw can message the host normally
147  * during suspend.
148  *
149  * SYNCHRONIZE WITH hif_vote_link_up by only calling in MC thread
150  * and initialization deinitialization sequencences.
151  *
152  * Return: n/a
153  */
154 void hif_vote_link_up(struct hif_opaque_softc *hif_ctx)
155 {
156 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
157 
158 	QDF_BUG(scn);
159 	scn->linkstate_vote++;
160 	hif_info("Up_linkstate_vote %d", scn->linkstate_vote);
161 	if (scn->linkstate_vote == 1)
162 		hif_bus_prevent_linkdown(scn, true);
163 }
164 
165 /**
166  * hif_can_suspend_link(): query if hif is permitted to suspend the link
167  * @hif_ctx: hif context
168  *
169  * Hif will ensure that the link won't be suspended if the upperlayers
170  * don't want it to.
171  *
172  * SYNCHRONIZATION: MC thread is stopped before bus suspend thus
173  * we don't need extra locking to ensure votes dont change while
174  * we are in the process of suspending or resuming.
175  *
176  * Return: false if hif will guarantee link up during suspend.
177  */
178 bool hif_can_suspend_link(struct hif_opaque_softc *hif_ctx)
179 {
180 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
181 
182 	QDF_BUG(scn);
183 	return scn->linkstate_vote == 0;
184 }
185 
186 /**
187  * hif_hia_item_address(): hif_hia_item_address
188  * @target_type: target_type
189  * @item_offset: item_offset
190  *
191  * Return: n/a
192  */
193 uint32_t hif_hia_item_address(uint32_t target_type, uint32_t item_offset)
194 {
195 	switch (target_type) {
196 	case TARGET_TYPE_AR6002:
197 		return AR6002_HOST_INTEREST_ADDRESS + item_offset;
198 	case TARGET_TYPE_AR6003:
199 		return AR6003_HOST_INTEREST_ADDRESS + item_offset;
200 	case TARGET_TYPE_AR6004:
201 		return AR6004_HOST_INTEREST_ADDRESS + item_offset;
202 	case TARGET_TYPE_AR6006:
203 		return AR6006_HOST_INTEREST_ADDRESS + item_offset;
204 	case TARGET_TYPE_AR9888:
205 		return AR9888_HOST_INTEREST_ADDRESS + item_offset;
206 	case TARGET_TYPE_AR6320:
207 	case TARGET_TYPE_AR6320V2:
208 		return AR6320_HOST_INTEREST_ADDRESS + item_offset;
209 	case TARGET_TYPE_ADRASTEA:
210 		/* ADRASTEA doesn't have a host interest address */
211 		ASSERT(0);
212 		return 0;
213 	case TARGET_TYPE_AR900B:
214 		return AR900B_HOST_INTEREST_ADDRESS + item_offset;
215 	case TARGET_TYPE_QCA9984:
216 		return QCA9984_HOST_INTEREST_ADDRESS + item_offset;
217 	case TARGET_TYPE_QCA9888:
218 		return QCA9888_HOST_INTEREST_ADDRESS + item_offset;
219 
220 	default:
221 		ASSERT(0);
222 		return 0;
223 	}
224 }
225 
226 /**
227  * hif_max_num_receives_reached() - check max receive is reached
228  * @scn: HIF Context
229  * @count: unsigned int.
230  *
231  * Output check status as bool
232  *
233  * Return: bool
234  */
235 bool hif_max_num_receives_reached(struct hif_softc *scn, unsigned int count)
236 {
237 	if (QDF_IS_EPPING_ENABLED(hif_get_conparam(scn)))
238 		return count > 120;
239 	else
240 		return count > MAX_NUM_OF_RECEIVES;
241 }
242 
243 /**
244  * init_buffer_count() - initial buffer count
245  * @maxSize: qdf_size_t
246  *
247  * routine to modify the initial buffer count to be allocated on an os
248  * platform basis. Platform owner will need to modify this as needed
249  *
250  * Return: qdf_size_t
251  */
252 qdf_size_t init_buffer_count(qdf_size_t maxSize)
253 {
254 	return maxSize;
255 }
256 
257 /**
258  * hif_save_htc_htt_config_endpoint() - save htt_tx_endpoint
259  * @hif_ctx: hif context
260  * @htc_htt_tx_endpoint: htt_tx_endpoint
261  *
262  * Return: void
263  */
264 void hif_save_htc_htt_config_endpoint(struct hif_opaque_softc *hif_ctx,
265 							int htc_htt_tx_endpoint)
266 {
267 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
268 
269 	if (!scn) {
270 		hif_err("scn or scn->hif_sc is NULL!");
271 		return;
272 	}
273 
274 	scn->htc_htt_tx_endpoint = htc_htt_tx_endpoint;
275 }
276 qdf_export_symbol(hif_save_htc_htt_config_endpoint);
277 
278 static const struct qwlan_hw qwlan_hw_list[] = {
279 	{
280 		.id = AR6320_REV1_VERSION,
281 		.subid = 0,
282 		.name = "QCA6174_REV1",
283 	},
284 	{
285 		.id = AR6320_REV1_1_VERSION,
286 		.subid = 0x1,
287 		.name = "QCA6174_REV1_1",
288 	},
289 	{
290 		.id = AR6320_REV1_3_VERSION,
291 		.subid = 0x2,
292 		.name = "QCA6174_REV1_3",
293 	},
294 	{
295 		.id = AR6320_REV2_1_VERSION,
296 		.subid = 0x4,
297 		.name = "QCA6174_REV2_1",
298 	},
299 	{
300 		.id = AR6320_REV2_1_VERSION,
301 		.subid = 0x5,
302 		.name = "QCA6174_REV2_2",
303 	},
304 	{
305 		.id = AR6320_REV3_VERSION,
306 		.subid = 0x6,
307 		.name = "QCA6174_REV2.3",
308 	},
309 	{
310 		.id = AR6320_REV3_VERSION,
311 		.subid = 0x8,
312 		.name = "QCA6174_REV3",
313 	},
314 	{
315 		.id = AR6320_REV3_VERSION,
316 		.subid = 0x9,
317 		.name = "QCA6174_REV3_1",
318 	},
319 	{
320 		.id = AR6320_REV3_2_VERSION,
321 		.subid = 0xA,
322 		.name = "AR6320_REV3_2_VERSION",
323 	},
324 	{
325 		.id = QCA6390_V1,
326 		.subid = 0x0,
327 		.name = "QCA6390_V1",
328 	},
329 	{
330 		.id = QCA6490_V1,
331 		.subid = 0x0,
332 		.name = "QCA6490_V1",
333 	},
334 	{
335 		.id = WCN3990_v1,
336 		.subid = 0x0,
337 		.name = "WCN3990_V1",
338 	},
339 	{
340 		.id = WCN3990_v2,
341 		.subid = 0x0,
342 		.name = "WCN3990_V2",
343 	},
344 	{
345 		.id = WCN3990_v2_1,
346 		.subid = 0x0,
347 		.name = "WCN3990_V2.1",
348 	},
349 	{
350 		.id = WCN3998,
351 		.subid = 0x0,
352 		.name = "WCN3998",
353 	},
354 	{
355 		.id = QCA9379_REV1_VERSION,
356 		.subid = 0xC,
357 		.name = "QCA9379_REV1",
358 	},
359 	{
360 		.id = QCA9379_REV1_VERSION,
361 		.subid = 0xD,
362 		.name = "QCA9379_REV1_1",
363 	},
364 	{
365 		.id = MANGO_V1,
366 		.subid = 0xF,
367 		.name = "MANGO_V1",
368 	},
369 	{
370 		.id = PEACH_V1,
371 		.subid = 0,
372 		.name = "PEACH_V1",
373 	},
374 
375 	{
376 		.id = KIWI_V1,
377 		.subid = 0,
378 		.name = "KIWI_V1",
379 	},
380 	{
381 		.id = KIWI_V2,
382 		.subid = 0,
383 		.name = "KIWI_V2",
384 	},
385 	{
386 		.id = WCN6750_V1,
387 		.subid = 0,
388 		.name = "WCN6750_V1",
389 	},
390 	{
391 		.id = QCA6490_v2_1,
392 		.subid = 0,
393 		.name = "QCA6490",
394 	},
395 	{
396 		.id = QCA6490_v2,
397 		.subid = 0,
398 		.name = "QCA6490",
399 	},
400 	{
401 		.id = WCN3990_v2_2,
402 		.subid = 0,
403 		.name = "WCN3990_v2_2",
404 	},
405 	{
406 		.id = WCN3990_TALOS,
407 		.subid = 0,
408 		.name = "WCN3990",
409 	},
410 	{
411 		.id = WCN3990_MOOREA,
412 		.subid = 0,
413 		.name = "WCN3990",
414 	},
415 	{
416 		.id = WCN3990_SAIPAN,
417 		.subid = 0,
418 		.name = "WCN3990",
419 	},
420 	{
421 		.id = WCN3990_RENNELL,
422 		.subid = 0,
423 		.name = "WCN3990",
424 	},
425 	{
426 		.id = WCN3990_BITRA,
427 		.subid = 0,
428 		.name = "WCN3990",
429 	},
430 	{
431 		.id = WCN3990_DIVAR,
432 		.subid = 0,
433 		.name = "WCN3990",
434 	},
435 	{
436 		.id = WCN3990_ATHERTON,
437 		.subid = 0,
438 		.name = "WCN3990",
439 	},
440 	{
441 		.id = WCN3990_STRAIT,
442 		.subid = 0,
443 		.name = "WCN3990",
444 	},
445 	{
446 		.id = WCN3990_NETRANI,
447 		.subid = 0,
448 		.name = "WCN3990",
449 	},
450 	{
451 		.id = WCN3990_CLARENCE,
452 		.subid = 0,
453 		.name = "WCN3990",
454 	}
455 };
456 
457 /**
458  * hif_get_hw_name(): get a human readable name for the hardware
459  * @info: Target Info
460  *
461  * Return: human readable name for the underlying wifi hardware.
462  */
463 static const char *hif_get_hw_name(struct hif_target_info *info)
464 {
465 	int i;
466 
467 	hif_debug("target version = %d, target revision = %d",
468 		  info->target_version,
469 		  info->target_revision);
470 
471 	if (info->hw_name)
472 		return info->hw_name;
473 
474 	for (i = 0; i < ARRAY_SIZE(qwlan_hw_list); i++) {
475 		if (info->target_version == qwlan_hw_list[i].id &&
476 		    info->target_revision == qwlan_hw_list[i].subid) {
477 			return qwlan_hw_list[i].name;
478 		}
479 	}
480 
481 	info->hw_name = qdf_mem_malloc(64);
482 	if (!info->hw_name)
483 		return "Unknown Device (nomem)";
484 
485 	i = qdf_snprint(info->hw_name, 64, "HW_VERSION=%x.",
486 			info->target_version);
487 	if (i < 0)
488 		return "Unknown Device (snprintf failure)";
489 	else
490 		return info->hw_name;
491 }
492 
493 /**
494  * hif_get_hw_info(): hif_get_hw_info
495  * @scn: scn
496  * @version: version
497  * @revision: revision
498  * @target_name: target name
499  *
500  * Return: n/a
501  */
502 void hif_get_hw_info(struct hif_opaque_softc *scn, u32 *version, u32 *revision,
503 			const char **target_name)
504 {
505 	struct hif_target_info *info = hif_get_target_info_handle(scn);
506 	struct hif_softc *sc = HIF_GET_SOFTC(scn);
507 
508 	if (sc->bus_type == QDF_BUS_TYPE_USB)
509 		hif_usb_get_hw_info(sc);
510 
511 	*version = info->target_version;
512 	*revision = info->target_revision;
513 	*target_name = hif_get_hw_name(info);
514 }
515 
516 /**
517  * hif_get_dev_ba(): API to get device base address.
518  * @hif_handle: hif handle
519  *
520  * Return: device base address
521  */
522 void *hif_get_dev_ba(struct hif_opaque_softc *hif_handle)
523 {
524 	struct hif_softc *scn = (struct hif_softc *)hif_handle;
525 
526 	return scn->mem;
527 }
528 qdf_export_symbol(hif_get_dev_ba);
529 
530 /**
531  * hif_get_dev_ba_ce(): API to get device ce base address.
532  * @hif_handle: hif handle
533  *
534  * Return: dev mem base address for CE
535  */
536 void *hif_get_dev_ba_ce(struct hif_opaque_softc *hif_handle)
537 {
538 	struct hif_softc *scn = (struct hif_softc *)hif_handle;
539 
540 	return scn->mem_ce;
541 }
542 
543 qdf_export_symbol(hif_get_dev_ba_ce);
544 
545 /**
546  * hif_get_dev_ba_pmm(): API to get device pmm base address.
547  * @hif_handle: scn
548  *
549  * Return: dev mem base address for PMM
550  */
551 
552 void *hif_get_dev_ba_pmm(struct hif_opaque_softc *hif_handle)
553 {
554 	struct hif_softc *scn = (struct hif_softc *)hif_handle;
555 
556 	return scn->mem_pmm_base;
557 }
558 
559 qdf_export_symbol(hif_get_dev_ba_pmm);
560 
561 uint32_t hif_get_soc_version(struct hif_opaque_softc *hif_handle)
562 {
563 	struct hif_softc *scn = (struct hif_softc *)hif_handle;
564 
565 	return scn->target_info.soc_version;
566 }
567 
568 qdf_export_symbol(hif_get_soc_version);
569 
570 /**
571  * hif_get_dev_ba_cmem(): API to get device ce base address.
572  * @hif_handle: hif handle
573  *
574  * Return: dev mem base address for CMEM
575  */
576 void *hif_get_dev_ba_cmem(struct hif_opaque_softc *hif_handle)
577 {
578 	struct hif_softc *scn = (struct hif_softc *)hif_handle;
579 
580 	return scn->mem_cmem;
581 }
582 
583 qdf_export_symbol(hif_get_dev_ba_cmem);
584 
585 #ifdef FEATURE_RUNTIME_PM
586 void hif_runtime_prevent_linkdown(struct hif_softc *scn, bool is_get)
587 {
588 	if (is_get)
589 		qdf_runtime_pm_prevent_suspend(&scn->prevent_linkdown_lock);
590 	else
591 		qdf_runtime_pm_allow_suspend(&scn->prevent_linkdown_lock);
592 }
593 
594 static inline
595 void hif_rtpm_lock_init(struct hif_softc *scn)
596 {
597 	qdf_runtime_lock_init(&scn->prevent_linkdown_lock);
598 }
599 
600 static inline
601 void hif_rtpm_lock_deinit(struct hif_softc *scn)
602 {
603 	qdf_runtime_lock_deinit(&scn->prevent_linkdown_lock);
604 }
605 #else
606 static inline
607 void hif_rtpm_lock_init(struct hif_softc *scn)
608 {
609 }
610 
611 static inline
612 void hif_rtpm_lock_deinit(struct hif_softc *scn)
613 {
614 }
615 #endif
616 
617 #ifdef WLAN_CE_INTERRUPT_THRESHOLD_CONFIG
618 /**
619  * hif_get_interrupt_threshold_cfg_from_psoc() - Retrieve ini cfg from psoc
620  * @scn: hif context
621  * @psoc: psoc objmgr handle
622  *
623  * Return: None
624  */
625 static inline
626 void hif_get_interrupt_threshold_cfg_from_psoc(struct hif_softc *scn,
627 					       struct wlan_objmgr_psoc *psoc)
628 {
629 	if (psoc) {
630 		scn->ini_cfg.ce_status_ring_timer_threshold =
631 			cfg_get(psoc,
632 				CFG_CE_STATUS_RING_TIMER_THRESHOLD);
633 		scn->ini_cfg.ce_status_ring_batch_count_threshold =
634 			cfg_get(psoc,
635 				CFG_CE_STATUS_RING_BATCH_COUNT_THRESHOLD);
636 	}
637 }
638 #else
639 static inline
640 void hif_get_interrupt_threshold_cfg_from_psoc(struct hif_softc *scn,
641 					       struct wlan_objmgr_psoc *psoc)
642 {
643 }
644 #endif /* WLAN_CE_INTERRUPT_THRESHOLD_CONFIG */
645 
646 /**
647  * hif_get_cfg_from_psoc() - Retrieve ini cfg from psoc
648  * @scn: hif context
649  * @psoc: psoc objmgr handle
650  *
651  * Return: None
652  */
653 static inline
654 void hif_get_cfg_from_psoc(struct hif_softc *scn,
655 			   struct wlan_objmgr_psoc *psoc)
656 {
657 	if (psoc) {
658 		scn->ini_cfg.disable_wake_irq =
659 			cfg_get(psoc, CFG_DISABLE_WAKE_IRQ);
660 		/**
661 		 * Wake IRQ can't share the same IRQ with the copy engines
662 		 * In one MSI mode, we don't know whether wake IRQ is triggered
663 		 * or not in wake IRQ handler. known issue CR 2055359
664 		 * If you want to support Wake IRQ. Please allocate at least
665 		 * 2 MSI vector. The first is for wake IRQ while the others
666 		 * share the second vector
667 		 */
668 		if (pld_is_one_msi(scn->qdf_dev->dev)) {
669 			hif_debug("Disable wake IRQ once it is one MSI mode");
670 			scn->ini_cfg.disable_wake_irq = true;
671 		}
672 		hif_get_interrupt_threshold_cfg_from_psoc(scn, psoc);
673 	}
674 }
675 
676 #if defined(HIF_CE_LOG_INFO) || defined(HIF_BUS_LOG_INFO)
677 /**
678  * hif_recovery_notifier_cb - Recovery notifier callback to log
679  *  hang event data
680  * @block: notifier block
681  * @state: state
682  * @data: notifier data
683  *
684  * Return: status
685  */
686 static
687 int hif_recovery_notifier_cb(struct notifier_block *block, unsigned long state,
688 			     void *data)
689 {
690 	struct qdf_notifer_data *notif_data = data;
691 	qdf_notif_block *notif_block;
692 	struct hif_softc *hif_handle;
693 	bool bus_id_invalid;
694 
695 	if (!data || !block)
696 		return -EINVAL;
697 
698 	notif_block = qdf_container_of(block, qdf_notif_block, notif_block);
699 
700 	hif_handle = notif_block->priv_data;
701 	if (!hif_handle)
702 		return -EINVAL;
703 
704 	bus_id_invalid = hif_log_bus_info(hif_handle, notif_data->hang_data,
705 					  &notif_data->offset);
706 	if (bus_id_invalid)
707 		return NOTIFY_STOP_MASK;
708 
709 	hif_log_ce_info(hif_handle, notif_data->hang_data,
710 			&notif_data->offset);
711 
712 	return 0;
713 }
714 
715 /**
716  * hif_register_recovery_notifier - Register hif recovery notifier
717  * @hif_handle: hif handle
718  *
719  * Return: status
720  */
721 static
722 QDF_STATUS hif_register_recovery_notifier(struct hif_softc *hif_handle)
723 {
724 	qdf_notif_block *hif_notifier;
725 
726 	if (!hif_handle)
727 		return QDF_STATUS_E_FAILURE;
728 
729 	hif_notifier = &hif_handle->hif_recovery_notifier;
730 
731 	hif_notifier->notif_block.notifier_call = hif_recovery_notifier_cb;
732 	hif_notifier->priv_data = hif_handle;
733 	return qdf_hang_event_register_notifier(hif_notifier);
734 }
735 
736 /**
737  * hif_unregister_recovery_notifier - Un-register hif recovery notifier
738  * @hif_handle: hif handle
739  *
740  * Return: status
741  */
742 static
743 QDF_STATUS hif_unregister_recovery_notifier(struct hif_softc *hif_handle)
744 {
745 	qdf_notif_block *hif_notifier = &hif_handle->hif_recovery_notifier;
746 
747 	return qdf_hang_event_unregister_notifier(hif_notifier);
748 }
749 #else
750 static inline
751 QDF_STATUS hif_register_recovery_notifier(struct hif_softc *hif_handle)
752 {
753 	return QDF_STATUS_SUCCESS;
754 }
755 
756 static inline
757 QDF_STATUS hif_unregister_recovery_notifier(struct hif_softc *hif_handle)
758 {
759 	return QDF_STATUS_SUCCESS;
760 }
761 #endif
762 
763 #ifdef HIF_CPU_PERF_AFFINE_MASK
764 /**
765  * __hif_cpu_hotplug_notify() - CPU hotplug event handler
766  * @context: HIF context
767  * @cpu: CPU Id of the CPU generating the event
768  * @cpu_up: true if the CPU is online
769  *
770  * Return: None
771  */
772 static void __hif_cpu_hotplug_notify(void *context,
773 				     uint32_t cpu, bool cpu_up)
774 {
775 	struct hif_softc *scn = context;
776 
777 	if (!scn)
778 		return;
779 	if (hif_is_driver_unloading(scn) || hif_is_recovery_in_progress(scn))
780 		return;
781 
782 	if (cpu_up) {
783 		hif_config_irq_set_perf_affinity_hint(GET_HIF_OPAQUE_HDL(scn));
784 		hif_debug("Setting affinity for online CPU: %d", cpu);
785 	} else {
786 		hif_debug("Skip setting affinity for offline CPU: %d", cpu);
787 	}
788 }
789 
790 /**
791  * hif_cpu_hotplug_notify - cpu core up/down notification
792  * handler
793  * @context: HIF context
794  * @cpu: CPU generating the event
795  * @cpu_up: true if the CPU is online
796  *
797  * Return: None
798  */
799 static void hif_cpu_hotplug_notify(void *context, uint32_t cpu, bool cpu_up)
800 {
801 	struct qdf_op_sync *op_sync;
802 
803 	if (qdf_op_protect(&op_sync))
804 		return;
805 
806 	__hif_cpu_hotplug_notify(context, cpu, cpu_up);
807 
808 	qdf_op_unprotect(op_sync);
809 }
810 
811 static void hif_cpu_online_cb(void *context, uint32_t cpu)
812 {
813 	hif_cpu_hotplug_notify(context, cpu, true);
814 }
815 
816 static void hif_cpu_before_offline_cb(void *context, uint32_t cpu)
817 {
818 	hif_cpu_hotplug_notify(context, cpu, false);
819 }
820 
821 static void hif_cpuhp_register(struct hif_softc *scn)
822 {
823 	if (!scn) {
824 		hif_info_high("cannot register hotplug notifiers");
825 		return;
826 	}
827 	qdf_cpuhp_register(&scn->cpuhp_event_handle,
828 			   scn,
829 			   hif_cpu_online_cb,
830 			   hif_cpu_before_offline_cb);
831 }
832 
833 static void hif_cpuhp_unregister(struct hif_softc *scn)
834 {
835 	if (!scn) {
836 		hif_info_high("cannot unregister hotplug notifiers");
837 		return;
838 	}
839 	qdf_cpuhp_unregister(&scn->cpuhp_event_handle);
840 }
841 
842 #else
843 static void hif_cpuhp_register(struct hif_softc *scn)
844 {
845 }
846 
847 static void hif_cpuhp_unregister(struct hif_softc *scn)
848 {
849 }
850 #endif /* ifdef HIF_CPU_PERF_AFFINE_MASK */
851 
852 #ifdef HIF_DETECTION_LATENCY_ENABLE
853 
854 void hif_tasklet_latency(struct hif_softc *scn, bool from_timer)
855 {
856 	qdf_time_t ce2_tasklet_sched_time =
857 		scn->latency_detect.ce2_tasklet_sched_time;
858 	qdf_time_t ce2_tasklet_exec_time =
859 		scn->latency_detect.ce2_tasklet_exec_time;
860 	qdf_time_t curr_jiffies = qdf_system_ticks();
861 	uint32_t detect_latency_threshold =
862 		scn->latency_detect.detect_latency_threshold;
863 	int cpu_id = qdf_get_cpu();
864 
865 	/* 2 kinds of check here.
866 	 * from_timer==true:  check if tasklet stall
867 	 * from_timer==false: check tasklet execute comes late
868 	 */
869 
870 	if ((from_timer ?
871 	    qdf_system_time_after(ce2_tasklet_sched_time,
872 				  ce2_tasklet_exec_time) :
873 	    qdf_system_time_after(ce2_tasklet_exec_time,
874 				  ce2_tasklet_sched_time)) &&
875 	    qdf_system_time_after(
876 		curr_jiffies,
877 		ce2_tasklet_sched_time +
878 		qdf_system_msecs_to_ticks(detect_latency_threshold))) {
879 		hif_err("tasklet ce2 latency: from_timer %d, curr_jiffies %lu, ce2_tasklet_sched_time %lu,ce2_tasklet_exec_time %lu, detect_latency_threshold %ums detect_latency_timer_timeout %ums, cpu_id %d, called: %ps",
880 			from_timer, curr_jiffies, ce2_tasklet_sched_time,
881 			ce2_tasklet_exec_time, detect_latency_threshold,
882 			scn->latency_detect.detect_latency_timer_timeout,
883 			cpu_id, (void *)_RET_IP_);
884 		goto latency;
885 	}
886 	return;
887 
888 latency:
889 	qdf_trigger_self_recovery(NULL, QDF_TASKLET_CREDIT_LATENCY_DETECT);
890 }
891 
892 void hif_credit_latency(struct hif_softc *scn, bool from_timer)
893 {
894 	qdf_time_t credit_request_time =
895 		scn->latency_detect.credit_request_time;
896 	qdf_time_t credit_report_time =
897 		scn->latency_detect.credit_report_time;
898 	qdf_time_t curr_jiffies = qdf_system_ticks();
899 	uint32_t detect_latency_threshold =
900 		scn->latency_detect.detect_latency_threshold;
901 	int cpu_id = qdf_get_cpu();
902 
903 	/* 2 kinds of check here.
904 	 * from_timer==true:  check if credit report stall
905 	 * from_timer==false: check credit report comes late
906 	 */
907 
908 	if ((from_timer ?
909 	    qdf_system_time_after(credit_request_time,
910 				  credit_report_time) :
911 	    qdf_system_time_after(credit_report_time,
912 				  credit_request_time)) &&
913 	    qdf_system_time_after(
914 		curr_jiffies,
915 		credit_request_time +
916 		qdf_system_msecs_to_ticks(detect_latency_threshold))) {
917 		hif_err("credit report latency: from timer %d, curr_jiffies %lu, credit_request_time %lu,credit_report_time %lu, detect_latency_threshold %ums, detect_latency_timer_timeout %ums, cpu_id %d, called: %ps",
918 			from_timer, curr_jiffies, credit_request_time,
919 			credit_report_time, detect_latency_threshold,
920 			scn->latency_detect.detect_latency_timer_timeout,
921 			cpu_id, (void *)_RET_IP_);
922 		goto latency;
923 	}
924 	return;
925 
926 latency:
927 	qdf_trigger_self_recovery(NULL, QDF_TASKLET_CREDIT_LATENCY_DETECT);
928 }
929 
930 /**
931  * hif_check_detection_latency(): to check if latency for tasklet/credit
932  *
933  * @scn: hif context
934  * @from_timer: if called from timer handler
935  * @bitmap_type: indicate if check tasklet or credit
936  *
937  * Return: none
938  */
939 void hif_check_detection_latency(struct hif_softc *scn,
940 				 bool from_timer,
941 				 uint32_t bitmap_type)
942 {
943 	if (QDF_GLOBAL_MISSION_MODE != hif_get_conparam(scn))
944 		return;
945 
946 	if (!scn->latency_detect.enable_detection)
947 		return;
948 
949 	if (bitmap_type & BIT(HIF_DETECT_TASKLET))
950 		hif_tasklet_latency(scn, from_timer);
951 
952 	if (bitmap_type & BIT(HIF_DETECT_CREDIT))
953 		hif_credit_latency(scn, from_timer);
954 }
955 
956 static void hif_latency_detect_timeout_handler(void *arg)
957 {
958 	struct hif_softc *scn = (struct hif_softc *)arg;
959 	int next_cpu;
960 
961 	hif_check_detection_latency(scn, true,
962 				    BIT(HIF_DETECT_TASKLET) |
963 				    BIT(HIF_DETECT_CREDIT));
964 
965 	/* it need to make sure timer start on a different cpu,
966 	 * so it can detect the tasklet schedule stall, but there
967 	 * is still chance that, after timer has been started, then
968 	 * irq/tasklet happens on the same cpu, then tasklet will
969 	 * execute before softirq timer, if this tasklet stall, the
970 	 * timer can't detect it, we can accept this as a limitation,
971 	 * if tasklet stall, anyway other place will detect it, just
972 	 * a little later.
973 	 */
974 	next_cpu = cpumask_any_but(
975 			cpu_active_mask,
976 			scn->latency_detect.ce2_tasklet_sched_cpuid);
977 
978 	if (qdf_unlikely(next_cpu >= nr_cpu_ids)) {
979 		hif_debug("start timer on local");
980 		/* it doesn't found a available cpu, start on local cpu*/
981 		qdf_timer_mod(
982 			&scn->latency_detect.detect_latency_timer,
983 			scn->latency_detect.detect_latency_timer_timeout);
984 	} else {
985 		qdf_timer_start_on(
986 			&scn->latency_detect.detect_latency_timer,
987 			scn->latency_detect.detect_latency_timer_timeout,
988 			next_cpu);
989 	}
990 }
991 
992 static void hif_latency_detect_timer_init(struct hif_softc *scn)
993 {
994 	if (!scn) {
995 		hif_info_high("scn is null");
996 		return;
997 	}
998 
999 	if (QDF_GLOBAL_MISSION_MODE != hif_get_conparam(scn))
1000 		return;
1001 
1002 	scn->latency_detect.detect_latency_timer_timeout =
1003 		DETECTION_TIMER_TIMEOUT;
1004 	scn->latency_detect.detect_latency_threshold =
1005 		DETECTION_LATENCY_THRESHOLD;
1006 
1007 	hif_info("timer timeout %u, latency threshold %u",
1008 		 scn->latency_detect.detect_latency_timer_timeout,
1009 		 scn->latency_detect.detect_latency_threshold);
1010 
1011 	scn->latency_detect.is_timer_started = false;
1012 
1013 	qdf_timer_init(NULL,
1014 		       &scn->latency_detect.detect_latency_timer,
1015 		       &hif_latency_detect_timeout_handler,
1016 		       scn,
1017 		       QDF_TIMER_TYPE_SW_SPIN);
1018 }
1019 
1020 static void hif_latency_detect_timer_deinit(struct hif_softc *scn)
1021 {
1022 	if (QDF_GLOBAL_MISSION_MODE != hif_get_conparam(scn))
1023 		return;
1024 
1025 	hif_info("deinit timer");
1026 	qdf_timer_free(&scn->latency_detect.detect_latency_timer);
1027 }
1028 
1029 void hif_latency_detect_timer_start(struct hif_opaque_softc *hif_ctx)
1030 {
1031 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1032 
1033 	if (QDF_GLOBAL_MISSION_MODE != hif_get_conparam(scn))
1034 		return;
1035 
1036 	hif_debug_rl("start timer");
1037 	if (scn->latency_detect.is_timer_started) {
1038 		hif_info("timer has been started");
1039 		return;
1040 	}
1041 
1042 	qdf_timer_start(&scn->latency_detect.detect_latency_timer,
1043 			scn->latency_detect.detect_latency_timer_timeout);
1044 	scn->latency_detect.is_timer_started = true;
1045 }
1046 
1047 void hif_latency_detect_timer_stop(struct hif_opaque_softc *hif_ctx)
1048 {
1049 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1050 
1051 	if (QDF_GLOBAL_MISSION_MODE != hif_get_conparam(scn))
1052 		return;
1053 
1054 	hif_debug_rl("stop timer");
1055 
1056 	qdf_timer_sync_cancel(&scn->latency_detect.detect_latency_timer);
1057 	scn->latency_detect.is_timer_started = false;
1058 }
1059 
1060 void hif_latency_detect_credit_record_time(
1061 	enum hif_credit_exchange_type type,
1062 	struct hif_opaque_softc *hif_ctx)
1063 {
1064 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1065 
1066 	if (!scn) {
1067 		hif_err("Could not do runtime put, scn is null");
1068 		return;
1069 	}
1070 
1071 	if (QDF_GLOBAL_MISSION_MODE != hif_get_conparam(scn))
1072 		return;
1073 
1074 	if (HIF_REQUEST_CREDIT == type)
1075 		scn->latency_detect.credit_request_time = qdf_system_ticks();
1076 	else if (HIF_PROCESS_CREDIT_REPORT == type)
1077 		scn->latency_detect.credit_report_time = qdf_system_ticks();
1078 
1079 	hif_check_detection_latency(scn, false, BIT(HIF_DETECT_CREDIT));
1080 }
1081 
1082 void hif_set_enable_detection(struct hif_opaque_softc *hif_ctx, bool value)
1083 {
1084 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1085 
1086 	if (!scn) {
1087 		hif_err("Could not do runtime put, scn is null");
1088 		return;
1089 	}
1090 
1091 	if (QDF_GLOBAL_MISSION_MODE != hif_get_conparam(scn))
1092 		return;
1093 
1094 	scn->latency_detect.enable_detection = value;
1095 }
1096 #else
1097 static void hif_latency_detect_timer_init(struct hif_softc *scn)
1098 {}
1099 
1100 static void hif_latency_detect_timer_deinit(struct hif_softc *scn)
1101 {}
1102 #endif
1103 struct hif_opaque_softc *hif_open(qdf_device_t qdf_ctx,
1104 				  uint32_t mode,
1105 				  enum qdf_bus_type bus_type,
1106 				  struct hif_driver_state_callbacks *cbk,
1107 				  struct wlan_objmgr_psoc *psoc)
1108 {
1109 	struct hif_softc *scn;
1110 	QDF_STATUS status = QDF_STATUS_SUCCESS;
1111 	int bus_context_size = hif_bus_get_context_size(bus_type);
1112 
1113 	if (bus_context_size == 0) {
1114 		hif_err("context size 0 not allowed");
1115 		return NULL;
1116 	}
1117 
1118 	scn = (struct hif_softc *)qdf_mem_malloc(bus_context_size);
1119 	if (!scn)
1120 		return GET_HIF_OPAQUE_HDL(scn);
1121 
1122 	scn->qdf_dev = qdf_ctx;
1123 	scn->hif_con_param = mode;
1124 	qdf_atomic_init(&scn->active_tasklet_cnt);
1125 
1126 	qdf_atomic_init(&scn->active_grp_tasklet_cnt);
1127 	qdf_atomic_init(&scn->link_suspended);
1128 	qdf_atomic_init(&scn->tasklet_from_intr);
1129 	hif_system_pm_set_state_on(GET_HIF_OPAQUE_HDL(scn));
1130 	qdf_mem_copy(&scn->callbacks, cbk,
1131 		     sizeof(struct hif_driver_state_callbacks));
1132 	scn->bus_type  = bus_type;
1133 
1134 	hif_allow_ep_vote_access(GET_HIF_OPAQUE_HDL(scn));
1135 	hif_get_cfg_from_psoc(scn, psoc);
1136 
1137 	hif_set_event_hist_mask(GET_HIF_OPAQUE_HDL(scn));
1138 	status = hif_bus_open(scn, bus_type);
1139 	if (status != QDF_STATUS_SUCCESS) {
1140 		hif_err("hif_bus_open error = %d, bus_type = %d",
1141 			status, bus_type);
1142 		qdf_mem_free(scn);
1143 		scn = NULL;
1144 		goto out;
1145 	}
1146 
1147 	hif_rtpm_lock_init(scn);
1148 
1149 	hif_cpuhp_register(scn);
1150 	hif_latency_detect_timer_init(scn);
1151 
1152 out:
1153 	return GET_HIF_OPAQUE_HDL(scn);
1154 }
1155 
1156 #ifdef ADRASTEA_RRI_ON_DDR
1157 /**
1158  * hif_uninit_rri_on_ddr(): free consistent memory allocated for rri
1159  * @scn: hif context
1160  *
1161  * Return: none
1162  */
1163 void hif_uninit_rri_on_ddr(struct hif_softc *scn)
1164 {
1165 	if (scn->vaddr_rri_on_ddr)
1166 		qdf_mem_free_consistent(scn->qdf_dev, scn->qdf_dev->dev,
1167 					(CE_COUNT * sizeof(uint32_t)),
1168 					scn->vaddr_rri_on_ddr,
1169 					scn->paddr_rri_on_ddr, 0);
1170 	scn->vaddr_rri_on_ddr = NULL;
1171 }
1172 #endif
1173 
1174 /**
1175  * hif_close(): hif_close
1176  * @hif_ctx: hif_ctx
1177  *
1178  * Return: n/a
1179  */
1180 void hif_close(struct hif_opaque_softc *hif_ctx)
1181 {
1182 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1183 
1184 	if (!scn) {
1185 		hif_err("hif_opaque_softc is NULL");
1186 		return;
1187 	}
1188 
1189 	hif_latency_detect_timer_deinit(scn);
1190 
1191 	if (scn->athdiag_procfs_inited) {
1192 		athdiag_procfs_remove();
1193 		scn->athdiag_procfs_inited = false;
1194 	}
1195 
1196 	if (scn->target_info.hw_name) {
1197 		char *hw_name = scn->target_info.hw_name;
1198 
1199 		scn->target_info.hw_name = "ErrUnloading";
1200 		qdf_mem_free(hw_name);
1201 	}
1202 
1203 	hif_uninit_rri_on_ddr(scn);
1204 	hif_cleanup_static_buf_to_target(scn);
1205 	hif_cpuhp_unregister(scn);
1206 	hif_rtpm_lock_deinit(scn);
1207 
1208 	hif_bus_close(scn);
1209 
1210 	qdf_mem_free(scn);
1211 }
1212 
1213 /**
1214  * hif_get_num_active_grp_tasklets() - get the number of active
1215  *		datapath group tasklets pending to be completed.
1216  * @scn: HIF context
1217  *
1218  * Returns: the number of datapath group tasklets which are active
1219  */
1220 static inline int hif_get_num_active_grp_tasklets(struct hif_softc *scn)
1221 {
1222 	return qdf_atomic_read(&scn->active_grp_tasklet_cnt);
1223 }
1224 
1225 #if (defined(QCA_WIFI_QCA8074) || defined(QCA_WIFI_QCA6018) || \
1226 	defined(QCA_WIFI_QCA6290) || defined(QCA_WIFI_QCA6390) || \
1227 	defined(QCA_WIFI_QCN9000) || defined(QCA_WIFI_QCA6490) || \
1228 	defined(QCA_WIFI_QCA6750) || defined(QCA_WIFI_QCA5018) || \
1229 	defined(QCA_WIFI_KIWI) || defined(QCA_WIFI_QCN9224) || \
1230 	defined(QCA_WIFI_QCA9574)) || defined(QCA_WIFI_QCA5332)
1231 /**
1232  * hif_get_num_pending_work() - get the number of entries in
1233  *		the workqueue pending to be completed.
1234  * @scn: HIF context
1235  *
1236  * Returns: the number of tasklets which are active
1237  */
1238 static inline int hif_get_num_pending_work(struct hif_softc *scn)
1239 {
1240 	return hal_get_reg_write_pending_work(scn->hal_soc);
1241 }
1242 #else
1243 
1244 static inline int hif_get_num_pending_work(struct hif_softc *scn)
1245 {
1246 	return 0;
1247 }
1248 #endif
1249 
1250 QDF_STATUS hif_try_complete_tasks(struct hif_softc *scn)
1251 {
1252 	uint32_t task_drain_wait_cnt = 0;
1253 	int tasklet = 0, grp_tasklet = 0, work = 0;
1254 
1255 	while ((tasklet = hif_get_num_active_tasklets(scn)) ||
1256 	       (grp_tasklet = hif_get_num_active_grp_tasklets(scn)) ||
1257 	       (work = hif_get_num_pending_work(scn))) {
1258 		if (++task_drain_wait_cnt > HIF_TASK_DRAIN_WAIT_CNT) {
1259 			hif_err("pending tasklets %d grp tasklets %d work %d",
1260 				tasklet, grp_tasklet, work);
1261 			return QDF_STATUS_E_FAULT;
1262 		}
1263 		hif_info("waiting for tasklets %d grp tasklets %d work %d",
1264 			 tasklet, grp_tasklet, work);
1265 		msleep(10);
1266 	}
1267 
1268 	return QDF_STATUS_SUCCESS;
1269 }
1270 
1271 #if defined(HIF_IPCI) && defined(FEATURE_HAL_DELAYED_REG_WRITE)
1272 QDF_STATUS hif_try_prevent_ep_vote_access(struct hif_opaque_softc *hif_ctx)
1273 {
1274 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1275 	uint32_t work_drain_wait_cnt = 0;
1276 	uint32_t wait_cnt = 0;
1277 	int work = 0;
1278 
1279 	qdf_atomic_set(&scn->dp_ep_vote_access,
1280 		       HIF_EP_VOTE_ACCESS_DISABLE);
1281 	qdf_atomic_set(&scn->ep_vote_access,
1282 		       HIF_EP_VOTE_ACCESS_DISABLE);
1283 
1284 	while ((work = hif_get_num_pending_work(scn))) {
1285 		if (++work_drain_wait_cnt > HIF_WORK_DRAIN_WAIT_CNT) {
1286 			qdf_atomic_set(&scn->dp_ep_vote_access,
1287 				       HIF_EP_VOTE_ACCESS_ENABLE);
1288 			qdf_atomic_set(&scn->ep_vote_access,
1289 				       HIF_EP_VOTE_ACCESS_ENABLE);
1290 			hif_err("timeout wait for pending work %d ", work);
1291 			return QDF_STATUS_E_FAULT;
1292 		}
1293 		qdf_sleep(10);
1294 	}
1295 
1296 	if (pld_is_pci_ep_awake(scn->qdf_dev->dev) == -ENOTSUPP)
1297 	return QDF_STATUS_SUCCESS;
1298 
1299 	while (pld_is_pci_ep_awake(scn->qdf_dev->dev)) {
1300 		if (++wait_cnt > HIF_EP_WAKE_RESET_WAIT_CNT) {
1301 			hif_err("Release EP vote is not proceed by Fw");
1302 			return QDF_STATUS_E_FAULT;
1303 		}
1304 		qdf_sleep(5);
1305 	}
1306 
1307 	return QDF_STATUS_SUCCESS;
1308 }
1309 
1310 void hif_set_ep_intermediate_vote_access(struct hif_opaque_softc *hif_ctx)
1311 {
1312 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1313 	uint8_t vote_access;
1314 
1315 	vote_access = qdf_atomic_read(&scn->ep_vote_access);
1316 
1317 	if (vote_access != HIF_EP_VOTE_ACCESS_DISABLE)
1318 		hif_info("EP vote changed from:%u to intermediate state",
1319 			 vote_access);
1320 
1321 	if (QDF_IS_STATUS_ERROR(hif_try_prevent_ep_vote_access(hif_ctx)))
1322 		QDF_BUG(0);
1323 
1324 	qdf_atomic_set(&scn->ep_vote_access,
1325 		       HIF_EP_VOTE_INTERMEDIATE_ACCESS);
1326 }
1327 
1328 void hif_allow_ep_vote_access(struct hif_opaque_softc *hif_ctx)
1329 {
1330 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1331 
1332 	qdf_atomic_set(&scn->dp_ep_vote_access,
1333 		       HIF_EP_VOTE_ACCESS_ENABLE);
1334 	qdf_atomic_set(&scn->ep_vote_access,
1335 		       HIF_EP_VOTE_ACCESS_ENABLE);
1336 }
1337 
1338 void hif_set_ep_vote_access(struct hif_opaque_softc *hif_ctx,
1339 			    uint8_t type, uint8_t access)
1340 {
1341 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1342 
1343 	if (type == HIF_EP_VOTE_DP_ACCESS)
1344 		qdf_atomic_set(&scn->dp_ep_vote_access, access);
1345 	else
1346 		qdf_atomic_set(&scn->ep_vote_access, access);
1347 }
1348 
1349 uint8_t hif_get_ep_vote_access(struct hif_opaque_softc *hif_ctx,
1350 			       uint8_t type)
1351 {
1352 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1353 
1354 	if (type == HIF_EP_VOTE_DP_ACCESS)
1355 		return qdf_atomic_read(&scn->dp_ep_vote_access);
1356 	else
1357 		return qdf_atomic_read(&scn->ep_vote_access);
1358 }
1359 #endif
1360 
1361 #if (defined(QCA_WIFI_QCA8074) || defined(QCA_WIFI_QCA6018) || \
1362 	defined(QCA_WIFI_QCA6290) || defined(QCA_WIFI_QCA6390) || \
1363 	defined(QCA_WIFI_QCN9000) || defined(QCA_WIFI_QCA6490) || \
1364 	defined(QCA_WIFI_QCA6750) || defined(QCA_WIFI_QCA5018) || \
1365 	defined(QCA_WIFI_KIWI) || defined(QCA_WIFI_QCN9224) || \
1366 	defined(QCA_WIFI_QCA9574)) || defined(QCA_WIFI_QCA5332)
1367 static QDF_STATUS hif_hal_attach(struct hif_softc *scn)
1368 {
1369 	if (ce_srng_based(scn)) {
1370 		scn->hal_soc = hal_attach(
1371 					hif_softc_to_hif_opaque_softc(scn),
1372 					scn->qdf_dev);
1373 		if (!scn->hal_soc)
1374 			return QDF_STATUS_E_FAILURE;
1375 	}
1376 
1377 	return QDF_STATUS_SUCCESS;
1378 }
1379 
1380 static QDF_STATUS hif_hal_detach(struct hif_softc *scn)
1381 {
1382 	if (ce_srng_based(scn)) {
1383 		hal_detach(scn->hal_soc);
1384 		scn->hal_soc = NULL;
1385 	}
1386 
1387 	return QDF_STATUS_SUCCESS;
1388 }
1389 #else
1390 static QDF_STATUS hif_hal_attach(struct hif_softc *scn)
1391 {
1392 	return QDF_STATUS_SUCCESS;
1393 }
1394 
1395 static QDF_STATUS hif_hal_detach(struct hif_softc *scn)
1396 {
1397 	return QDF_STATUS_SUCCESS;
1398 }
1399 #endif
1400 
1401 int hif_init_dma_mask(struct device *dev, enum qdf_bus_type bus_type)
1402 {
1403 	int ret;
1404 
1405 	switch (bus_type) {
1406 	case QDF_BUS_TYPE_IPCI:
1407 		ret = qdf_set_dma_coherent_mask(dev,
1408 						DMA_COHERENT_MASK_DEFAULT);
1409 		if (ret) {
1410 			hif_err("Failed to set dma mask error = %d", ret);
1411 			return ret;
1412 		}
1413 
1414 		break;
1415 	default:
1416 		/* Follow the existing sequence for other targets */
1417 		break;
1418 	}
1419 
1420 	return 0;
1421 }
1422 
1423 /**
1424  * hif_enable(): hif_enable
1425  * @hif_ctx: hif_ctx
1426  * @dev: dev
1427  * @bdev: bus dev
1428  * @bid: bus ID
1429  * @bus_type: bus type
1430  * @type: enable type
1431  *
1432  * Return: QDF_STATUS
1433  */
1434 QDF_STATUS hif_enable(struct hif_opaque_softc *hif_ctx, struct device *dev,
1435 					  void *bdev,
1436 					  const struct hif_bus_id *bid,
1437 					  enum qdf_bus_type bus_type,
1438 					  enum hif_enable_type type)
1439 {
1440 	QDF_STATUS status;
1441 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1442 
1443 	if (!scn) {
1444 		hif_err("hif_ctx = NULL");
1445 		return QDF_STATUS_E_NULL_VALUE;
1446 	}
1447 
1448 	status = hif_enable_bus(scn, dev, bdev, bid, type);
1449 	if (status != QDF_STATUS_SUCCESS) {
1450 		hif_err("hif_enable_bus error = %d", status);
1451 		return status;
1452 	}
1453 
1454 	status = hif_hal_attach(scn);
1455 	if (status != QDF_STATUS_SUCCESS) {
1456 		hif_err("hal attach failed");
1457 		goto disable_bus;
1458 	}
1459 
1460 	if (hif_bus_configure(scn)) {
1461 		hif_err("Target probe failed");
1462 		status = QDF_STATUS_E_FAILURE;
1463 		goto hal_detach;
1464 	}
1465 
1466 	hif_ut_suspend_init(scn);
1467 	hif_register_recovery_notifier(scn);
1468 	hif_latency_detect_timer_start(hif_ctx);
1469 
1470 	/*
1471 	 * Flag to avoid potential unallocated memory access from MSI
1472 	 * interrupt handler which could get scheduled as soon as MSI
1473 	 * is enabled, i.e to take care of the race due to the order
1474 	 * in where MSI is enabled before the memory, that will be
1475 	 * in interrupt handlers, is allocated.
1476 	 */
1477 
1478 	scn->hif_init_done = true;
1479 
1480 	hif_debug("OK");
1481 
1482 	return QDF_STATUS_SUCCESS;
1483 
1484 hal_detach:
1485 	hif_hal_detach(scn);
1486 disable_bus:
1487 	hif_disable_bus(scn);
1488 	return status;
1489 }
1490 
1491 void hif_disable(struct hif_opaque_softc *hif_ctx, enum hif_disable_type type)
1492 {
1493 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1494 
1495 	if (!scn)
1496 		return;
1497 
1498 	hif_set_enable_detection(hif_ctx, false);
1499 	hif_latency_detect_timer_stop(hif_ctx);
1500 
1501 	hif_unregister_recovery_notifier(scn);
1502 
1503 	hif_nointrs(scn);
1504 	if (scn->hif_init_done == false)
1505 		hif_shutdown_device(hif_ctx);
1506 	else
1507 		hif_stop(hif_ctx);
1508 
1509 	hif_hal_detach(scn);
1510 
1511 	hif_disable_bus(scn);
1512 
1513 	hif_wlan_disable(scn);
1514 
1515 	scn->notice_send = false;
1516 
1517 	hif_debug("X");
1518 }
1519 
1520 #ifdef CE_TASKLET_DEBUG_ENABLE
1521 void hif_enable_ce_latency_stats(struct hif_opaque_softc *hif_ctx, uint8_t val)
1522 {
1523 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1524 
1525 	if (!scn)
1526 		return;
1527 
1528 	scn->ce_latency_stats = val;
1529 }
1530 #endif
1531 
1532 void hif_display_stats(struct hif_opaque_softc *hif_ctx)
1533 {
1534 	hif_display_bus_stats(hif_ctx);
1535 }
1536 
1537 qdf_export_symbol(hif_display_stats);
1538 
1539 void hif_clear_stats(struct hif_opaque_softc *hif_ctx)
1540 {
1541 	hif_clear_bus_stats(hif_ctx);
1542 }
1543 
1544 /**
1545  * hif_crash_shutdown_dump_bus_register() - dump bus registers
1546  * @hif_ctx: hif_ctx
1547  *
1548  * Return: n/a
1549  */
1550 #if defined(TARGET_RAMDUMP_AFTER_KERNEL_PANIC) && defined(WLAN_FEATURE_BMI)
1551 
1552 static void hif_crash_shutdown_dump_bus_register(void *hif_ctx)
1553 {
1554 	struct hif_opaque_softc *scn = hif_ctx;
1555 
1556 	if (hif_check_soc_status(scn))
1557 		return;
1558 
1559 	if (hif_dump_registers(scn))
1560 		hif_err("Failed to dump bus registers!");
1561 }
1562 
1563 /**
1564  * hif_crash_shutdown(): hif_crash_shutdown
1565  *
1566  * This function is called by the platform driver to dump CE registers
1567  *
1568  * @hif_ctx: hif_ctx
1569  *
1570  * Return: n/a
1571  */
1572 void hif_crash_shutdown(struct hif_opaque_softc *hif_ctx)
1573 {
1574 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1575 
1576 	if (!hif_ctx)
1577 		return;
1578 
1579 	if (scn->bus_type == QDF_BUS_TYPE_SNOC) {
1580 		hif_warn("RAM dump disabled for bustype %d", scn->bus_type);
1581 		return;
1582 	}
1583 
1584 	if (TARGET_STATUS_RESET == scn->target_status) {
1585 		hif_warn("Target is already asserted, ignore!");
1586 		return;
1587 	}
1588 
1589 	if (hif_is_load_or_unload_in_progress(scn)) {
1590 		hif_err("Load/unload is in progress, ignore!");
1591 		return;
1592 	}
1593 
1594 	hif_crash_shutdown_dump_bus_register(hif_ctx);
1595 	hif_set_target_status(hif_ctx, TARGET_STATUS_RESET);
1596 
1597 	if (ol_copy_ramdump(hif_ctx))
1598 		goto out;
1599 
1600 	hif_info("RAM dump collecting completed!");
1601 
1602 out:
1603 	return;
1604 }
1605 #else
1606 void hif_crash_shutdown(struct hif_opaque_softc *hif_ctx)
1607 {
1608 	hif_debug("Collecting target RAM dump disabled");
1609 }
1610 #endif /* TARGET_RAMDUMP_AFTER_KERNEL_PANIC */
1611 
1612 #ifdef QCA_WIFI_3_0
1613 /**
1614  * hif_check_fw_reg(): hif_check_fw_reg
1615  * @scn: scn
1616  *
1617  * Return: int
1618  */
1619 int hif_check_fw_reg(struct hif_opaque_softc *scn)
1620 {
1621 	return 0;
1622 }
1623 #endif
1624 
1625 /**
1626  * hif_read_phy_mem_base(): hif_read_phy_mem_base
1627  * @scn: scn
1628  * @phy_mem_base: physical mem base
1629  *
1630  * Return: n/a
1631  */
1632 void hif_read_phy_mem_base(struct hif_softc *scn, qdf_dma_addr_t *phy_mem_base)
1633 {
1634 	*phy_mem_base = scn->mem_pa;
1635 }
1636 qdf_export_symbol(hif_read_phy_mem_base);
1637 
1638 /**
1639  * hif_get_device_type(): hif_get_device_type
1640  * @device_id: device_id
1641  * @revision_id: revision_id
1642  * @hif_type: returned hif_type
1643  * @target_type: returned target_type
1644  *
1645  * Return: int
1646  */
1647 int hif_get_device_type(uint32_t device_id,
1648 			uint32_t revision_id,
1649 			uint32_t *hif_type, uint32_t *target_type)
1650 {
1651 	int ret = 0;
1652 
1653 	switch (device_id) {
1654 	case ADRASTEA_DEVICE_ID_P2_E12:
1655 
1656 		*hif_type = HIF_TYPE_ADRASTEA;
1657 		*target_type = TARGET_TYPE_ADRASTEA;
1658 		break;
1659 
1660 	case AR9888_DEVICE_ID:
1661 		*hif_type = HIF_TYPE_AR9888;
1662 		*target_type = TARGET_TYPE_AR9888;
1663 		break;
1664 
1665 	case AR6320_DEVICE_ID:
1666 		switch (revision_id) {
1667 		case AR6320_FW_1_1:
1668 		case AR6320_FW_1_3:
1669 			*hif_type = HIF_TYPE_AR6320;
1670 			*target_type = TARGET_TYPE_AR6320;
1671 			break;
1672 
1673 		case AR6320_FW_2_0:
1674 		case AR6320_FW_3_0:
1675 		case AR6320_FW_3_2:
1676 			*hif_type = HIF_TYPE_AR6320V2;
1677 			*target_type = TARGET_TYPE_AR6320V2;
1678 			break;
1679 
1680 		default:
1681 			hif_err("dev_id = 0x%x, rev_id = 0x%x",
1682 				device_id, revision_id);
1683 			ret = -ENODEV;
1684 			goto end;
1685 		}
1686 		break;
1687 
1688 	case AR9887_DEVICE_ID:
1689 		*hif_type = HIF_TYPE_AR9888;
1690 		*target_type = TARGET_TYPE_AR9888;
1691 		hif_info(" *********** AR9887 **************");
1692 		break;
1693 
1694 	case QCA9984_DEVICE_ID:
1695 		*hif_type = HIF_TYPE_QCA9984;
1696 		*target_type = TARGET_TYPE_QCA9984;
1697 		hif_info(" *********** QCA9984 *************");
1698 		break;
1699 
1700 	case QCA9888_DEVICE_ID:
1701 		*hif_type = HIF_TYPE_QCA9888;
1702 		*target_type = TARGET_TYPE_QCA9888;
1703 		hif_info(" *********** QCA9888 *************");
1704 		break;
1705 
1706 	case AR900B_DEVICE_ID:
1707 		*hif_type = HIF_TYPE_AR900B;
1708 		*target_type = TARGET_TYPE_AR900B;
1709 		hif_info(" *********** AR900B *************");
1710 		break;
1711 
1712 	case QCA8074_DEVICE_ID:
1713 		*hif_type = HIF_TYPE_QCA8074;
1714 		*target_type = TARGET_TYPE_QCA8074;
1715 		hif_info(" *********** QCA8074  *************");
1716 		break;
1717 
1718 	case QCA6290_EMULATION_DEVICE_ID:
1719 	case QCA6290_DEVICE_ID:
1720 		*hif_type = HIF_TYPE_QCA6290;
1721 		*target_type = TARGET_TYPE_QCA6290;
1722 		hif_info(" *********** QCA6290EMU *************");
1723 		break;
1724 
1725 	case QCN9000_DEVICE_ID:
1726 		*hif_type = HIF_TYPE_QCN9000;
1727 		*target_type = TARGET_TYPE_QCN9000;
1728 		hif_info(" *********** QCN9000 *************");
1729 		break;
1730 
1731 	case QCN9224_DEVICE_ID:
1732 		*hif_type = HIF_TYPE_QCN9224;
1733 		*target_type = TARGET_TYPE_QCN9224;
1734 		hif_info(" *********** QCN9224 *************");
1735 		break;
1736 
1737 	case QCN6122_DEVICE_ID:
1738 		*hif_type = HIF_TYPE_QCN6122;
1739 		*target_type = TARGET_TYPE_QCN6122;
1740 		hif_info(" *********** QCN6122 *************");
1741 		break;
1742 
1743 	case QCN9160_DEVICE_ID:
1744 		*hif_type = HIF_TYPE_QCN9160;
1745 		*target_type = TARGET_TYPE_QCN9160;
1746 		hif_info(" *********** QCN9160 *************");
1747 		break;
1748 
1749 	case QCN7605_DEVICE_ID:
1750 	case QCN7605_COMPOSITE:
1751 	case QCN7605_STANDALONE:
1752 	case QCN7605_STANDALONE_V2:
1753 	case QCN7605_COMPOSITE_V2:
1754 		*hif_type = HIF_TYPE_QCN7605;
1755 		*target_type = TARGET_TYPE_QCN7605;
1756 		hif_info(" *********** QCN7605 *************");
1757 		break;
1758 
1759 	case QCA6390_DEVICE_ID:
1760 	case QCA6390_EMULATION_DEVICE_ID:
1761 		*hif_type = HIF_TYPE_QCA6390;
1762 		*target_type = TARGET_TYPE_QCA6390;
1763 		hif_info(" *********** QCA6390 *************");
1764 		break;
1765 
1766 	case QCA6490_DEVICE_ID:
1767 	case QCA6490_EMULATION_DEVICE_ID:
1768 		*hif_type = HIF_TYPE_QCA6490;
1769 		*target_type = TARGET_TYPE_QCA6490;
1770 		hif_info(" *********** QCA6490 *************");
1771 		break;
1772 
1773 	case QCA6750_DEVICE_ID:
1774 	case QCA6750_EMULATION_DEVICE_ID:
1775 		*hif_type = HIF_TYPE_QCA6750;
1776 		*target_type = TARGET_TYPE_QCA6750;
1777 		hif_info(" *********** QCA6750 *************");
1778 		break;
1779 
1780 	case KIWI_DEVICE_ID:
1781 		*hif_type = HIF_TYPE_KIWI;
1782 		*target_type = TARGET_TYPE_KIWI;
1783 		hif_info(" *********** KIWI *************");
1784 		break;
1785 
1786 	case MANGO_DEVICE_ID:
1787 		*hif_type = HIF_TYPE_MANGO;
1788 		*target_type = TARGET_TYPE_MANGO;
1789 		hif_info(" *********** MANGO *************");
1790 		break;
1791 
1792 	case PEACH_DEVICE_ID:
1793 		*hif_type = HIF_TYPE_PEACH;
1794 		*target_type = TARGET_TYPE_PEACH;
1795 		hif_info(" *********** PEACH *************");
1796 		break;
1797 
1798 	case QCA8074V2_DEVICE_ID:
1799 		*hif_type = HIF_TYPE_QCA8074V2;
1800 		*target_type = TARGET_TYPE_QCA8074V2;
1801 		hif_info(" *********** QCA8074V2 *************");
1802 		break;
1803 
1804 	case QCA6018_DEVICE_ID:
1805 	case RUMIM2M_DEVICE_ID_NODE0:
1806 	case RUMIM2M_DEVICE_ID_NODE1:
1807 	case RUMIM2M_DEVICE_ID_NODE2:
1808 	case RUMIM2M_DEVICE_ID_NODE3:
1809 	case RUMIM2M_DEVICE_ID_NODE4:
1810 	case RUMIM2M_DEVICE_ID_NODE5:
1811 		*hif_type = HIF_TYPE_QCA6018;
1812 		*target_type = TARGET_TYPE_QCA6018;
1813 		hif_info(" *********** QCA6018 *************");
1814 		break;
1815 
1816 	case QCA5018_DEVICE_ID:
1817 		*hif_type = HIF_TYPE_QCA5018;
1818 		*target_type = TARGET_TYPE_QCA5018;
1819 		hif_info(" *********** qca5018 *************");
1820 		break;
1821 
1822 	case QCA5332_DEVICE_ID:
1823 		*hif_type = HIF_TYPE_QCA5332;
1824 		*target_type = TARGET_TYPE_QCA5332;
1825 		hif_info(" *********** QCA5332 *************");
1826 		break;
1827 
1828 	case QCA9574_DEVICE_ID:
1829 		*hif_type = HIF_TYPE_QCA9574;
1830 		*target_type = TARGET_TYPE_QCA9574;
1831 		hif_info(" *********** QCA9574 *************");
1832 		break;
1833 
1834 	default:
1835 		hif_err("Unsupported device ID = 0x%x!", device_id);
1836 		ret = -ENODEV;
1837 		break;
1838 	}
1839 
1840 	if (*target_type == TARGET_TYPE_UNKNOWN) {
1841 		hif_err("Unsupported target_type!");
1842 		ret = -ENODEV;
1843 	}
1844 end:
1845 	return ret;
1846 }
1847 
1848 /**
1849  * hif_get_bus_type() - return the bus type
1850  * @hif_hdl: HIF Context
1851  *
1852  * Return: enum qdf_bus_type
1853  */
1854 enum qdf_bus_type hif_get_bus_type(struct hif_opaque_softc *hif_hdl)
1855 {
1856 	struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
1857 
1858 	return scn->bus_type;
1859 }
1860 
1861 /*
1862  * Target info and ini parameters are global to the driver
1863  * Hence these structures are exposed to all the modules in
1864  * the driver and they don't need to maintains multiple copies
1865  * of the same info, instead get the handle from hif and
1866  * modify them in hif
1867  */
1868 
1869 /**
1870  * hif_get_ini_handle() - API to get hif_config_param handle
1871  * @hif_ctx: HIF Context
1872  *
1873  * Return: pointer to hif_config_info
1874  */
1875 struct hif_config_info *hif_get_ini_handle(struct hif_opaque_softc *hif_ctx)
1876 {
1877 	struct hif_softc *sc = HIF_GET_SOFTC(hif_ctx);
1878 
1879 	return &sc->hif_config;
1880 }
1881 
1882 /**
1883  * hif_get_target_info_handle() - API to get hif_target_info handle
1884  * @hif_ctx: HIF context
1885  *
1886  * Return: Pointer to hif_target_info
1887  */
1888 struct hif_target_info *hif_get_target_info_handle(
1889 					struct hif_opaque_softc *hif_ctx)
1890 {
1891 	struct hif_softc *sc = HIF_GET_SOFTC(hif_ctx);
1892 
1893 	return &sc->target_info;
1894 
1895 }
1896 qdf_export_symbol(hif_get_target_info_handle);
1897 
1898 #ifdef RECEIVE_OFFLOAD
1899 void hif_offld_flush_cb_register(struct hif_opaque_softc *scn,
1900 				 void (offld_flush_handler)(void *))
1901 {
1902 	if (hif_napi_enabled(scn, -1))
1903 		hif_napi_rx_offld_flush_cb_register(scn, offld_flush_handler);
1904 	else
1905 		hif_err("NAPI not enabled");
1906 }
1907 qdf_export_symbol(hif_offld_flush_cb_register);
1908 
1909 void hif_offld_flush_cb_deregister(struct hif_opaque_softc *scn)
1910 {
1911 	if (hif_napi_enabled(scn, -1))
1912 		hif_napi_rx_offld_flush_cb_deregister(scn);
1913 	else
1914 		hif_err("NAPI not enabled");
1915 }
1916 qdf_export_symbol(hif_offld_flush_cb_deregister);
1917 
1918 int hif_get_rx_ctx_id(int ctx_id, struct hif_opaque_softc *hif_hdl)
1919 {
1920 	if (hif_napi_enabled(hif_hdl, -1))
1921 		return NAPI_PIPE2ID(ctx_id);
1922 	else
1923 		return ctx_id;
1924 }
1925 #else /* RECEIVE_OFFLOAD */
1926 int hif_get_rx_ctx_id(int ctx_id, struct hif_opaque_softc *hif_hdl)
1927 {
1928 	return 0;
1929 }
1930 qdf_export_symbol(hif_get_rx_ctx_id);
1931 #endif /* RECEIVE_OFFLOAD */
1932 
1933 #if defined(FEATURE_LRO)
1934 
1935 /**
1936  * hif_get_lro_info - Returns LRO instance for instance ID
1937  * @ctx_id: LRO instance ID
1938  * @hif_hdl: HIF Context
1939  *
1940  * Return: Pointer to LRO instance.
1941  */
1942 void *hif_get_lro_info(int ctx_id, struct hif_opaque_softc *hif_hdl)
1943 {
1944 	void *data;
1945 
1946 	if (hif_napi_enabled(hif_hdl, -1))
1947 		data = hif_napi_get_lro_info(hif_hdl, ctx_id);
1948 	else
1949 		data = hif_ce_get_lro_ctx(hif_hdl, ctx_id);
1950 
1951 	return data;
1952 }
1953 #endif
1954 
1955 /**
1956  * hif_get_target_status - API to get target status
1957  * @hif_ctx: HIF Context
1958  *
1959  * Return: enum hif_target_status
1960  */
1961 enum hif_target_status hif_get_target_status(struct hif_opaque_softc *hif_ctx)
1962 {
1963 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1964 
1965 	return scn->target_status;
1966 }
1967 qdf_export_symbol(hif_get_target_status);
1968 
1969 /**
1970  * hif_set_target_status() - API to set target status
1971  * @hif_ctx: HIF Context
1972  * @status: Target Status
1973  *
1974  * Return: void
1975  */
1976 void hif_set_target_status(struct hif_opaque_softc *hif_ctx, enum
1977 			   hif_target_status status)
1978 {
1979 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1980 
1981 	scn->target_status = status;
1982 }
1983 
1984 /**
1985  * hif_init_ini_config() - API to initialize HIF configuration parameters
1986  * @hif_ctx: HIF Context
1987  * @cfg: HIF Configuration
1988  *
1989  * Return: void
1990  */
1991 void hif_init_ini_config(struct hif_opaque_softc *hif_ctx,
1992 			 struct hif_config_info *cfg)
1993 {
1994 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1995 
1996 	qdf_mem_copy(&scn->hif_config, cfg, sizeof(struct hif_config_info));
1997 }
1998 
1999 /**
2000  * hif_get_conparam() - API to get driver mode in HIF
2001  * @scn: HIF Context
2002  *
2003  * Return: driver mode of operation
2004  */
2005 uint32_t hif_get_conparam(struct hif_softc *scn)
2006 {
2007 	if (!scn)
2008 		return 0;
2009 
2010 	return scn->hif_con_param;
2011 }
2012 
2013 /**
2014  * hif_get_callbacks_handle() - API to get callbacks Handle
2015  * @scn: HIF Context
2016  *
2017  * Return: pointer to HIF Callbacks
2018  */
2019 struct hif_driver_state_callbacks *hif_get_callbacks_handle(
2020 							struct hif_softc *scn)
2021 {
2022 	return &scn->callbacks;
2023 }
2024 
2025 /**
2026  * hif_is_driver_unloading() - API to query upper layers if driver is unloading
2027  * @scn: HIF Context
2028  *
2029  * Return: True/False
2030  */
2031 bool hif_is_driver_unloading(struct hif_softc *scn)
2032 {
2033 	struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
2034 
2035 	if (cbk && cbk->is_driver_unloading)
2036 		return cbk->is_driver_unloading(cbk->context);
2037 
2038 	return false;
2039 }
2040 
2041 /**
2042  * hif_is_load_or_unload_in_progress() - API to query upper layers if
2043  * load/unload in progress
2044  * @scn: HIF Context
2045  *
2046  * Return: True/False
2047  */
2048 bool hif_is_load_or_unload_in_progress(struct hif_softc *scn)
2049 {
2050 	struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
2051 
2052 	if (cbk && cbk->is_load_unload_in_progress)
2053 		return cbk->is_load_unload_in_progress(cbk->context);
2054 
2055 	return false;
2056 }
2057 
2058 /**
2059  * hif_is_recovery_in_progress() - API to query upper layers if recovery in
2060  * progress
2061  * @scn: HIF Context
2062  *
2063  * Return: True/False
2064  */
2065 bool hif_is_recovery_in_progress(struct hif_softc *scn)
2066 {
2067 	struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
2068 
2069 	if (cbk && cbk->is_recovery_in_progress)
2070 		return cbk->is_recovery_in_progress(cbk->context);
2071 
2072 	return false;
2073 }
2074 
2075 #if defined(HIF_PCI) || defined(HIF_SNOC) || defined(HIF_AHB) || \
2076     defined(HIF_IPCI)
2077 
2078 /**
2079  * hif_update_pipe_callback() - API to register pipe specific callbacks
2080  * @osc: Opaque softc
2081  * @pipeid: pipe id
2082  * @callbacks: callbacks to register
2083  *
2084  * Return: void
2085  */
2086 
2087 void hif_update_pipe_callback(struct hif_opaque_softc *osc,
2088 					u_int8_t pipeid,
2089 					struct hif_msg_callbacks *callbacks)
2090 {
2091 	struct hif_softc *scn = HIF_GET_SOFTC(osc);
2092 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2093 	struct HIF_CE_pipe_info *pipe_info;
2094 
2095 	QDF_BUG(pipeid < CE_COUNT_MAX);
2096 
2097 	hif_debug("pipeid: %d", pipeid);
2098 
2099 	pipe_info = &hif_state->pipe_info[pipeid];
2100 
2101 	qdf_mem_copy(&pipe_info->pipe_callbacks,
2102 			callbacks, sizeof(pipe_info->pipe_callbacks));
2103 }
2104 qdf_export_symbol(hif_update_pipe_callback);
2105 
2106 /**
2107  * hif_is_target_ready() - API to query if target is in ready state
2108  * progress
2109  * @scn: HIF Context
2110  *
2111  * Return: True/False
2112  */
2113 bool hif_is_target_ready(struct hif_softc *scn)
2114 {
2115 	struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
2116 
2117 	if (cbk && cbk->is_target_ready)
2118 		return cbk->is_target_ready(cbk->context);
2119 	/*
2120 	 * if callback is not registered then there is no way to determine
2121 	 * if target is ready. In-such case return true to indicate that
2122 	 * target is ready.
2123 	 */
2124 	return true;
2125 }
2126 qdf_export_symbol(hif_is_target_ready);
2127 
2128 int hif_get_bandwidth_level(struct hif_opaque_softc *hif_handle)
2129 {
2130 	struct hif_softc *scn = HIF_GET_SOFTC(hif_handle);
2131 	struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
2132 
2133 	if (cbk && cbk->get_bandwidth_level)
2134 		return cbk->get_bandwidth_level(cbk->context);
2135 
2136 	return 0;
2137 }
2138 
2139 qdf_export_symbol(hif_get_bandwidth_level);
2140 
2141 #ifdef DP_MEM_PRE_ALLOC
2142 void *hif_mem_alloc_consistent_unaligned(struct hif_softc *scn,
2143 					 qdf_size_t size,
2144 					 qdf_dma_addr_t *paddr,
2145 					 uint32_t ring_type,
2146 					 uint8_t *is_mem_prealloc)
2147 {
2148 	void *vaddr = NULL;
2149 	struct hif_driver_state_callbacks *cbk =
2150 				hif_get_callbacks_handle(scn);
2151 
2152 	*is_mem_prealloc = false;
2153 	if (cbk && cbk->prealloc_get_consistent_mem_unaligned) {
2154 		vaddr = cbk->prealloc_get_consistent_mem_unaligned(size,
2155 								   paddr,
2156 								   ring_type);
2157 		if (vaddr) {
2158 			*is_mem_prealloc = true;
2159 			goto end;
2160 		}
2161 	}
2162 
2163 	vaddr = qdf_mem_alloc_consistent(scn->qdf_dev,
2164 					 scn->qdf_dev->dev,
2165 					 size,
2166 					 paddr);
2167 end:
2168 	dp_info("%s va_unaligned %pK pa_unaligned %pK size %d ring_type %d",
2169 		*is_mem_prealloc ? "pre-alloc" : "dynamic-alloc", vaddr,
2170 		(void *)*paddr, (int)size, ring_type);
2171 
2172 	return vaddr;
2173 }
2174 
2175 void hif_mem_free_consistent_unaligned(struct hif_softc *scn,
2176 				       qdf_size_t size,
2177 				       void *vaddr,
2178 				       qdf_dma_addr_t paddr,
2179 				       qdf_dma_context_t memctx,
2180 				       uint8_t is_mem_prealloc)
2181 {
2182 	struct hif_driver_state_callbacks *cbk =
2183 				hif_get_callbacks_handle(scn);
2184 
2185 	if (is_mem_prealloc) {
2186 		if (cbk && cbk->prealloc_put_consistent_mem_unaligned) {
2187 			cbk->prealloc_put_consistent_mem_unaligned(vaddr);
2188 		} else {
2189 			dp_warn("dp_prealloc_put_consistent_unligned NULL");
2190 			QDF_BUG(0);
2191 		}
2192 	} else {
2193 		qdf_mem_free_consistent(scn->qdf_dev, scn->qdf_dev->dev,
2194 					size, vaddr, paddr, memctx);
2195 	}
2196 }
2197 #endif
2198 
2199 /**
2200  * hif_batch_send() - API to access hif specific function
2201  * ce_batch_send.
2202  * @osc: HIF Context
2203  * @msdu: list of msdus to be sent
2204  * @transfer_id: transfer id
2205  * @len: downloaded length
2206  * @sendhead:
2207  *
2208  * Return: list of msds not sent
2209  */
2210 qdf_nbuf_t hif_batch_send(struct hif_opaque_softc *osc, qdf_nbuf_t msdu,
2211 		uint32_t transfer_id, u_int32_t len, uint32_t sendhead)
2212 {
2213 	void *ce_tx_hdl = hif_get_ce_handle(osc, CE_HTT_TX_CE);
2214 
2215 	if (!ce_tx_hdl)
2216 		return NULL;
2217 
2218 	return ce_batch_send((struct CE_handle *)ce_tx_hdl, msdu, transfer_id,
2219 			len, sendhead);
2220 }
2221 qdf_export_symbol(hif_batch_send);
2222 
2223 /**
2224  * hif_update_tx_ring() - API to access hif specific function
2225  * ce_update_tx_ring.
2226  * @osc: HIF Context
2227  * @num_htt_cmpls: number of htt compl received.
2228  *
2229  * Return: void
2230  */
2231 void hif_update_tx_ring(struct hif_opaque_softc *osc, u_int32_t num_htt_cmpls)
2232 {
2233 	void *ce_tx_hdl = hif_get_ce_handle(osc, CE_HTT_TX_CE);
2234 
2235 	ce_update_tx_ring(ce_tx_hdl, num_htt_cmpls);
2236 }
2237 qdf_export_symbol(hif_update_tx_ring);
2238 
2239 
2240 /**
2241  * hif_send_single() - API to access hif specific function
2242  * ce_send_single.
2243  * @osc: HIF Context
2244  * @msdu : msdu to be sent
2245  * @transfer_id: transfer id
2246  * @len : downloaded length
2247  *
2248  * Return: msdu sent status
2249  */
2250 QDF_STATUS hif_send_single(struct hif_opaque_softc *osc, qdf_nbuf_t msdu,
2251 			   uint32_t transfer_id, u_int32_t len)
2252 {
2253 	void *ce_tx_hdl = hif_get_ce_handle(osc, CE_HTT_TX_CE);
2254 
2255 	if (!ce_tx_hdl)
2256 		return QDF_STATUS_E_NULL_VALUE;
2257 
2258 	return ce_send_single((struct CE_handle *)ce_tx_hdl, msdu, transfer_id,
2259 			len);
2260 }
2261 qdf_export_symbol(hif_send_single);
2262 #endif
2263 
2264 /**
2265  * hif_reg_write() - API to access hif specific function
2266  * hif_write32_mb.
2267  * @hif_ctx : HIF Context
2268  * @offset : offset on which value has to be written
2269  * @value : value to be written
2270  *
2271  * Return: None
2272  */
2273 void hif_reg_write(struct hif_opaque_softc *hif_ctx, uint32_t offset,
2274 		uint32_t value)
2275 {
2276 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2277 
2278 	hif_write32_mb(scn, scn->mem + offset, value);
2279 
2280 }
2281 qdf_export_symbol(hif_reg_write);
2282 
2283 /**
2284  * hif_reg_read() - API to access hif specific function
2285  * hif_read32_mb.
2286  * @hif_ctx : HIF Context
2287  * @offset : offset from which value has to be read
2288  *
2289  * Return: Read value
2290  */
2291 uint32_t hif_reg_read(struct hif_opaque_softc *hif_ctx, uint32_t offset)
2292 {
2293 
2294 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2295 
2296 	return hif_read32_mb(scn, scn->mem + offset);
2297 }
2298 qdf_export_symbol(hif_reg_read);
2299 
2300 /**
2301  * hif_ramdump_handler(): generic ramdump handler
2302  * @scn: struct hif_opaque_softc
2303  *
2304  * Return: None
2305  */
2306 void hif_ramdump_handler(struct hif_opaque_softc *scn)
2307 {
2308 	if (hif_get_bus_type(scn) == QDF_BUS_TYPE_USB)
2309 		hif_usb_ramdump_handler(scn);
2310 }
2311 
2312 hif_pm_wake_irq_type hif_pm_get_wake_irq_type(struct hif_opaque_softc *hif_ctx)
2313 {
2314 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2315 
2316 	return scn->wake_irq_type;
2317 }
2318 
2319 irqreturn_t hif_wake_interrupt_handler(int irq, void *context)
2320 {
2321 	struct hif_softc *scn = context;
2322 
2323 	hif_info("wake interrupt received on irq %d", irq);
2324 
2325 	hif_rtpm_set_monitor_wake_intr(0);
2326 	hif_rtpm_request_resume();
2327 
2328 	if (scn->initial_wakeup_cb)
2329 		scn->initial_wakeup_cb(scn->initial_wakeup_priv);
2330 
2331 	if (hif_is_ut_suspended(scn))
2332 		hif_ut_fw_resume(scn);
2333 
2334 	qdf_pm_system_wakeup();
2335 
2336 	return IRQ_HANDLED;
2337 }
2338 
2339 void hif_set_initial_wakeup_cb(struct hif_opaque_softc *hif_ctx,
2340 			       void (*callback)(void *),
2341 			       void *priv)
2342 {
2343 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2344 
2345 	scn->initial_wakeup_cb = callback;
2346 	scn->initial_wakeup_priv = priv;
2347 }
2348 
2349 void hif_set_ce_service_max_yield_time(struct hif_opaque_softc *hif,
2350 				       uint32_t ce_service_max_yield_time)
2351 {
2352 	struct hif_softc *hif_ctx = HIF_GET_SOFTC(hif);
2353 
2354 	hif_ctx->ce_service_max_yield_time =
2355 		ce_service_max_yield_time * 1000;
2356 }
2357 
2358 unsigned long long
2359 hif_get_ce_service_max_yield_time(struct hif_opaque_softc *hif)
2360 {
2361 	struct hif_softc *hif_ctx = HIF_GET_SOFTC(hif);
2362 
2363 	return hif_ctx->ce_service_max_yield_time;
2364 }
2365 
2366 void hif_set_ce_service_max_rx_ind_flush(struct hif_opaque_softc *hif,
2367 				       uint8_t ce_service_max_rx_ind_flush)
2368 {
2369 	struct hif_softc *hif_ctx = HIF_GET_SOFTC(hif);
2370 
2371 	if (ce_service_max_rx_ind_flush == 0 ||
2372 	    ce_service_max_rx_ind_flush > MSG_FLUSH_NUM)
2373 		hif_ctx->ce_service_max_rx_ind_flush = MSG_FLUSH_NUM;
2374 	else
2375 		hif_ctx->ce_service_max_rx_ind_flush =
2376 						ce_service_max_rx_ind_flush;
2377 }
2378 
2379 #ifdef SYSTEM_PM_CHECK
2380 void __hif_system_pm_set_state(struct hif_opaque_softc *hif,
2381 			       enum hif_system_pm_state state)
2382 {
2383 	struct hif_softc *hif_ctx = HIF_GET_SOFTC(hif);
2384 
2385 	qdf_atomic_set(&hif_ctx->sys_pm_state, state);
2386 }
2387 
2388 int32_t hif_system_pm_get_state(struct hif_opaque_softc *hif)
2389 {
2390 	struct hif_softc *hif_ctx = HIF_GET_SOFTC(hif);
2391 
2392 	return qdf_atomic_read(&hif_ctx->sys_pm_state);
2393 }
2394 
2395 int hif_system_pm_state_check(struct hif_opaque_softc *hif)
2396 {
2397 	struct hif_softc *hif_ctx = HIF_GET_SOFTC(hif);
2398 	int32_t sys_pm_state;
2399 
2400 	if (!hif_ctx) {
2401 		hif_err("hif context is null");
2402 		return -EFAULT;
2403 	}
2404 
2405 	sys_pm_state = qdf_atomic_read(&hif_ctx->sys_pm_state);
2406 	if (sys_pm_state == HIF_SYSTEM_PM_STATE_BUS_SUSPENDING ||
2407 	    sys_pm_state == HIF_SYSTEM_PM_STATE_BUS_SUSPENDED) {
2408 		hif_info("Triggering system wakeup");
2409 		qdf_pm_system_wakeup();
2410 		return -EAGAIN;
2411 	}
2412 
2413 	return 0;
2414 }
2415 #endif
2416