xref: /wlan-dirver/qca-wifi-host-cmn/hif/src/hif_main.h (revision 8cfe6b10058a04cafb17eed051f2ddf11bee8931)
1 /*
2  * Copyright (c) 2013-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 /*
21  * NB: Inappropriate references to "HTC" are used in this (and other)
22  * HIF implementations.  HTC is typically the calling layer, but it
23  * theoretically could be some alternative.
24  */
25 
26 /*
27  * This holds all state needed to process a pending send/recv interrupt.
28  * The information is saved here as soon as the interrupt occurs (thus
29  * allowing the underlying CE to re-use the ring descriptor). The
30  * information here is eventually processed by a completion processing
31  * thread.
32  */
33 
34 #ifndef __HIF_MAIN_H__
35 #define __HIF_MAIN_H__
36 
37 #include <qdf_atomic.h>         /* qdf_atomic_read */
38 #include "qdf_lock.h"
39 #include "cepci.h"
40 #include "hif.h"
41 #include "multibus.h"
42 #include "hif_unit_test_suspend_i.h"
43 #ifdef HIF_CE_LOG_INFO
44 #include "qdf_notifier.h"
45 #endif
46 
47 #define HIF_MIN_SLEEP_INACTIVITY_TIME_MS     50
48 #define HIF_SLEEP_INACTIVITY_TIMER_PERIOD_MS 60
49 
50 #define HIF_MAX_BUDGET 0xFFFF
51 
52 #define HIF_STATS_INC(_handle, _field, _delta) \
53 { \
54 	(_handle)->stats._field += _delta; \
55 }
56 
57 /*
58  * This macro implementation is exposed for efficiency only.
59  * The implementation may change and callers should
60  * consider the targid to be a completely opaque handle.
61  */
62 #define TARGID_TO_PCI_ADDR(targid) (*((A_target_id_t *)(targid)))
63 
64 #ifdef QCA_WIFI_3_0
65 #define DISABLE_L1SS_STATES 1
66 #endif
67 
68 #define MAX_NUM_OF_RECEIVES HIF_NAPI_MAX_RECEIVES
69 
70 #ifdef QCA_WIFI_3_0_ADRASTEA
71 #define ADRASTEA_BU 1
72 #else
73 #define ADRASTEA_BU 0
74 #endif
75 
76 #ifdef QCA_WIFI_3_0
77 #define HAS_FW_INDICATOR 0
78 #else
79 #define HAS_FW_INDICATOR 1
80 #endif
81 
82 
83 #define AR9888_DEVICE_ID (0x003c)
84 #define AR6320_DEVICE_ID (0x003e)
85 #define AR6320_FW_1_1  (0x11)
86 #define AR6320_FW_1_3  (0x13)
87 #define AR6320_FW_2_0  (0x20)
88 #define AR6320_FW_3_0  (0x30)
89 #define AR6320_FW_3_2  (0x32)
90 #define QCA6290_EMULATION_DEVICE_ID (0xabcd)
91 #define QCA6290_DEVICE_ID (0x1100)
92 #define QCN9000_DEVICE_ID (0x1104)
93 #define QCN9224_DEVICE_ID (0x1109)
94 #define QCN6122_DEVICE_ID (0xFFFB)
95 #define QCN9160_DEVICE_ID (0xFFF8)
96 #define QCA6390_EMULATION_DEVICE_ID (0x0108)
97 #define QCA6390_DEVICE_ID (0x1101)
98 /* TODO: change IDs for HastingsPrime */
99 #define QCA6490_EMULATION_DEVICE_ID (0x010a)
100 #define QCA6490_DEVICE_ID (0x1103)
101 #define MANGO_DEVICE_ID (0x110a)
102 #define PEACH_DEVICE_ID (0x110e)
103 
104 /* TODO: change IDs for Moselle */
105 #define QCA6750_EMULATION_DEVICE_ID (0x010c)
106 #define QCA6750_DEVICE_ID (0x1105)
107 
108 /* TODO: change IDs for Hamilton */
109 #define KIWI_DEVICE_ID (0x1107)
110 
111 #define ADRASTEA_DEVICE_ID_P2_E12 (0x7021)
112 #define AR9887_DEVICE_ID    (0x0050)
113 #define AR900B_DEVICE_ID    (0x0040)
114 #define QCA9984_DEVICE_ID   (0x0046)
115 #define QCA9888_DEVICE_ID   (0x0056)
116 #define QCA8074_DEVICE_ID   (0xffff) /* Todo: replace this with
117 					actual number once available.
118 					currently defining this to 0xffff for
119 					emulation purpose */
120 #define QCA8074V2_DEVICE_ID (0xfffe) /* Todo: replace this with actual number */
121 #define QCA6018_DEVICE_ID (0xfffd) /* Todo: replace this with actual number */
122 #define QCA5018_DEVICE_ID (0xfffc) /* Todo: replace this with actual number */
123 #define QCA9574_DEVICE_ID (0xfffa)
124 #define QCA5332_DEVICE_ID (0xfff9)
125 /* Genoa */
126 #define QCN7605_DEVICE_ID  (0x1102) /* Genoa PCIe device ID*/
127 #define QCN7605_COMPOSITE  (0x9901)
128 #define QCN7605_STANDALONE  (0x9900)
129 #define QCN7605_STANDALONE_V2  (0x9902)
130 #define QCN7605_COMPOSITE_V2  (0x9903)
131 
132 #define RUMIM2M_DEVICE_ID_NODE0	0xabc0
133 #define RUMIM2M_DEVICE_ID_NODE1	0xabc1
134 #define RUMIM2M_DEVICE_ID_NODE2	0xabc2
135 #define RUMIM2M_DEVICE_ID_NODE3	0xabc3
136 #define RUMIM2M_DEVICE_ID_NODE4	0xaa10
137 #define RUMIM2M_DEVICE_ID_NODE5	0xaa11
138 
139 #define HIF_GET_PCI_SOFTC(scn) ((struct hif_pci_softc *)scn)
140 #define HIF_GET_IPCI_SOFTC(scn) ((struct hif_ipci_softc *)scn)
141 #define HIF_GET_CE_STATE(scn) ((struct HIF_CE_state *)scn)
142 #define HIF_GET_SDIO_SOFTC(scn) ((struct hif_sdio_softc *)scn)
143 #define HIF_GET_USB_SOFTC(scn) ((struct hif_usb_softc *)scn)
144 #define HIF_GET_USB_DEVICE(scn) ((struct HIF_DEVICE_USB *)scn)
145 #define HIF_GET_SOFTC(scn) ((struct hif_softc *)scn)
146 #define GET_HIF_OPAQUE_HDL(scn) ((struct hif_opaque_softc *)scn)
147 
148 #ifdef QCA_WIFI_QCN9224
149 #define NUM_CE_AVAILABLE 16
150 #else
151 #define NUM_CE_AVAILABLE 12
152 #endif
153 /* Add 1 here to store default configuration in index 0 */
154 #define NUM_CE_CONTEXT (NUM_CE_AVAILABLE + 1)
155 
156 #define CE_INTERRUPT_IDX(x) x
157 
158 struct ce_int_assignment {
159 	uint8_t msi_idx[NUM_CE_AVAILABLE];
160 };
161 
162 struct hif_ce_stats {
163 	int hif_pipe_no_resrc_count;
164 	int ce_ring_delta_fail_count;
165 };
166 
167 #ifdef HIF_DETECTION_LATENCY_ENABLE
168 struct hif_latency_detect {
169 	qdf_timer_t detect_latency_timer;
170 	uint32_t detect_latency_timer_timeout;
171 	bool is_timer_started;
172 	bool enable_detection;
173 	/* threshold when stall happens */
174 	uint32_t detect_latency_threshold;
175 	int ce2_tasklet_sched_cpuid;
176 	qdf_time_t ce2_tasklet_sched_time;
177 	qdf_time_t ce2_tasklet_exec_time;
178 	qdf_time_t credit_request_time;
179 	qdf_time_t credit_report_time;
180 };
181 #endif
182 
183 /*
184  * Note: For MCL, #if defined (HIF_CONFIG_SLUB_DEBUG_ON) needs to be checked
185  * for defined here
186  */
187 #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF)
188 
189 #define HIF_CE_MAX_LATEST_HIST 2
190 
191 struct latest_evt_history {
192 	uint64_t irq_entry_ts;
193 	uint64_t bh_entry_ts;
194 	uint64_t bh_resched_ts;
195 	uint64_t bh_exit_ts;
196 	uint64_t bh_work_ts;
197 	int cpu_id;
198 	uint32_t ring_hp;
199 	uint32_t ring_tp;
200 };
201 
202 struct ce_desc_hist {
203 	qdf_atomic_t history_index[CE_COUNT_MAX];
204 	uint8_t ce_id_hist_map[CE_COUNT_MAX];
205 	bool enable[CE_COUNT_MAX];
206 	bool data_enable[CE_COUNT_MAX];
207 	qdf_mutex_t ce_dbg_datamem_lock[CE_COUNT_MAX];
208 	uint32_t hist_index;
209 	uint32_t hist_id;
210 	void *hist_ev[CE_COUNT_MAX];
211 	struct latest_evt_history latest_evt[HIF_CE_MAX_LATEST_HIST];
212 };
213 
214 void hif_record_latest_evt(struct ce_desc_hist *ce_hist,
215 			   uint8_t type,
216 			   int ce_id, uint64_t time,
217 			   uint32_t hp, uint32_t tp);
218 #endif /*defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF)*/
219 
220 /**
221  * struct hif_cfg() - store ini config parameters in hif layer
222  * @ce_status_ring_timer_threshold: ce status ring timer threshold
223  * @ce_status_ring_batch_count_threshold: ce status ring batch count threshold
224  * @disable_wake_irq: disable wake irq
225  */
226 struct hif_cfg {
227 	uint16_t ce_status_ring_timer_threshold;
228 	uint8_t ce_status_ring_batch_count_threshold;
229 	bool disable_wake_irq;
230 };
231 
232 #ifdef DP_UMAC_HW_RESET_SUPPORT
233 /**
234  * struct hif_umac_reset_ctx - UMAC HW reset context at HIF layer
235  * @intr_tq: Tasklet structure
236  * @cb_handler: Callback handler
237  * @cb_ctx: Argument to be passed to @cb_handler
238  * @os_irq: Interrupt number for this IRQ
239  * @irq_configured: Whether the IRQ has been configured
240  */
241 struct hif_umac_reset_ctx {
242 	struct tasklet_struct intr_tq;
243 	int (*cb_handler)(void *cb_ctx);
244 	void *cb_ctx;
245 	uint32_t os_irq;
246 	bool irq_configured;
247 };
248 #endif
249 
250 struct hif_softc {
251 	struct hif_opaque_softc osc;
252 	struct hif_config_info hif_config;
253 	struct hif_target_info target_info;
254 	void __iomem *mem;
255 	void __iomem *mem_ce;
256 	void __iomem *mem_cmem;
257 	void __iomem *mem_pmm_base;
258 	enum qdf_bus_type bus_type;
259 	struct hif_bus_ops bus_ops;
260 	void *ce_id_to_state[CE_COUNT_MAX];
261 	qdf_device_t qdf_dev;
262 	bool hif_init_done;
263 	bool request_irq_done;
264 	bool ext_grp_irq_configured;
265 	bool free_irq_done;
266 	uint8_t ce_latency_stats;
267 	/* Packet statistics */
268 	struct hif_ce_stats pkt_stats;
269 	enum hif_target_status target_status;
270 	uint64_t event_enable_mask;
271 
272 	struct targetdef_s *targetdef;
273 	struct ce_reg_def *target_ce_def;
274 	struct hostdef_s *hostdef;
275 	struct host_shadow_regs_s *host_shadow_regs;
276 
277 	bool recovery;
278 	bool notice_send;
279 	bool per_ce_irq;
280 	uint32_t ce_irq_summary;
281 	/* No of copy engines supported */
282 	unsigned int ce_count;
283 	struct ce_int_assignment *int_assignment;
284 	atomic_t active_tasklet_cnt;
285 	atomic_t active_grp_tasklet_cnt;
286 	atomic_t link_suspended;
287 	uint32_t *vaddr_rri_on_ddr;
288 	qdf_dma_addr_t paddr_rri_on_ddr;
289 #ifdef CONFIG_BYPASS_QMI
290 	uint32_t *vaddr_qmi_bypass;
291 	qdf_dma_addr_t paddr_qmi_bypass;
292 #endif
293 	int linkstate_vote;
294 	bool fastpath_mode_on;
295 	atomic_t tasklet_from_intr;
296 	int htc_htt_tx_endpoint;
297 	qdf_dma_addr_t mem_pa;
298 	bool athdiag_procfs_inited;
299 #ifdef FEATURE_NAPI
300 	struct qca_napi_data napi_data;
301 #endif /* FEATURE_NAPI */
302 	/* stores ce_service_max_yield_time in ns */
303 	unsigned long long ce_service_max_yield_time;
304 	uint8_t ce_service_max_rx_ind_flush;
305 	struct hif_driver_state_callbacks callbacks;
306 	uint32_t hif_con_param;
307 #ifdef QCA_NSS_WIFI_OFFLOAD_SUPPORT
308 	uint32_t nss_wifi_ol_mode;
309 #endif
310 	void *hal_soc;
311 	struct hif_ut_suspend_context ut_suspend_ctx;
312 	uint32_t hif_attribute;
313 	int wake_irq;
314 	hif_pm_wake_irq_type wake_irq_type;
315 	void (*initial_wakeup_cb)(void *);
316 	void *initial_wakeup_priv;
317 #ifdef REMOVE_PKT_LOG
318 	/* Handle to pktlog device */
319 	void *pktlog_dev;
320 #endif
321 #ifdef WLAN_FEATURE_DP_EVENT_HISTORY
322 	/* Pointer to the srng event history */
323 	struct hif_event_history *evt_hist[HIF_NUM_INT_CONTEXTS];
324 #endif
325 
326 /*
327  * Note: For MCL, #if defined (HIF_CONFIG_SLUB_DEBUG_ON) needs to be checked
328  * for defined here
329  */
330 #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF)
331 	struct ce_desc_hist hif_ce_desc_hist;
332 #endif /*defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF)*/
333 #ifdef IPA_OFFLOAD
334 	qdf_shared_mem_t *ipa_ce_ring;
335 #endif
336 	struct hif_cfg ini_cfg;
337 #ifdef HIF_CE_LOG_INFO
338 	qdf_notif_block hif_recovery_notifier;
339 #endif
340 #ifdef HIF_CPU_PERF_AFFINE_MASK
341 	/* The CPU hotplug event registration handle */
342 	struct qdf_cpuhp_handler *cpuhp_event_handle;
343 #endif
344 	uint32_t irq_unlazy_disable;
345 	/* Should the unlzay support for interrupt delivery be disabled */
346 	/* Flag to indicate whether bus is suspended */
347 	bool bus_suspended;
348 	bool pktlog_init;
349 #ifdef FEATURE_RUNTIME_PM
350 	/* Variable to track the link state change in RTPM */
351 	qdf_atomic_t pm_link_state;
352 #endif
353 #ifdef HIF_DETECTION_LATENCY_ENABLE
354 	struct hif_latency_detect latency_detect;
355 #endif
356 #ifdef FEATURE_RUNTIME_PM
357 	qdf_runtime_lock_t prevent_linkdown_lock;
358 #endif
359 #ifdef SYSTEM_PM_CHECK
360 	qdf_atomic_t sys_pm_state;
361 #endif
362 #if defined(HIF_IPCI) && defined(FEATURE_HAL_DELAYED_REG_WRITE)
363 	qdf_atomic_t dp_ep_vote_access;
364 	qdf_atomic_t ep_vote_access;
365 #endif
366 	/* CMEM address target reserved for host usage */
367 	uint64_t cmem_start;
368 	/* CMEM size target reserved */
369 	uint64_t cmem_size;
370 #ifdef DP_UMAC_HW_RESET_SUPPORT
371 	struct hif_umac_reset_ctx umac_reset_ctx;
372 #endif
373 };
374 
375 static inline
376 void *hif_get_hal_handle(struct hif_opaque_softc *hif_hdl)
377 {
378 	struct hif_softc *sc = (struct hif_softc *)hif_hdl;
379 
380 	if (!sc)
381 		return NULL;
382 
383 	return sc->hal_soc;
384 }
385 
386 /**
387  * hif_get_cmem_info() - get CMEM address and size from HIF handle
388  * @hif_hdl: HIF handle pointer
389  * @cmem_start: pointer for CMEM address
390  * @cmem_size: pointer for CMEM size
391  *
392  * Return: None.
393  */
394 static inline
395 void hif_get_cmem_info(struct hif_opaque_softc *hif_hdl,
396 		       uint64_t *cmem_start,
397 		       uint64_t *cmem_size)
398 {
399 	struct hif_softc *sc = (struct hif_softc *)hif_hdl;
400 
401 	*cmem_start = sc->cmem_start;
402 	*cmem_size = sc->cmem_size;
403 }
404 
405 /**
406  * hif_get_num_active_tasklets() - get the number of active
407  *		tasklets pending to be completed.
408  * @scn: HIF context
409  *
410  * Returns: the number of tasklets which are active
411  */
412 static inline int hif_get_num_active_tasklets(struct hif_softc *scn)
413 {
414 	return qdf_atomic_read(&scn->active_tasklet_cnt);
415 }
416 
417 /*
418  * Max waiting time during Runtime PM suspend to finish all
419  * the tasks. This is in the multiple of 10ms.
420  */
421 #define HIF_TASK_DRAIN_WAIT_CNT 25
422 
423 /**
424  * hif_try_complete_tasks() - Try to complete all the pending tasks
425  * @scn: HIF context
426  *
427  * Try to complete all the pending datapath tasks, i.e. tasklets,
428  * DP group tasklets and works which are queued, in a given time
429  * slot.
430  *
431  * Returns: QDF_STATUS_SUCCESS if all the tasks were completed
432  *	QDF error code, if the time slot exhausted
433  */
434 QDF_STATUS hif_try_complete_tasks(struct hif_softc *scn);
435 
436 #ifdef QCA_NSS_WIFI_OFFLOAD_SUPPORT
437 static inline bool hif_is_nss_wifi_enabled(struct hif_softc *sc)
438 {
439 	return !!(sc->nss_wifi_ol_mode);
440 }
441 #else
442 static inline bool hif_is_nss_wifi_enabled(struct hif_softc *sc)
443 {
444 	return false;
445 }
446 #endif
447 
448 static inline uint8_t hif_is_attribute_set(struct hif_softc *sc,
449 						uint32_t hif_attrib)
450 {
451 	return sc->hif_attribute == hif_attrib;
452 }
453 
454 #ifdef WLAN_FEATURE_DP_EVENT_HISTORY
455 static inline void hif_set_event_hist_mask(struct hif_opaque_softc *hif_handle)
456 {
457 	struct hif_softc *scn = (struct hif_softc *)hif_handle;
458 
459 	scn->event_enable_mask = HIF_EVENT_HIST_ENABLE_MASK;
460 }
461 #else
462 static inline void hif_set_event_hist_mask(struct hif_opaque_softc *hif_handle)
463 {
464 }
465 #endif /* WLAN_FEATURE_DP_EVENT_HISTORY */
466 
467 A_target_id_t hif_get_target_id(struct hif_softc *scn);
468 void hif_dump_pipe_debug_count(struct hif_softc *scn);
469 void hif_display_bus_stats(struct hif_opaque_softc *scn);
470 void hif_clear_bus_stats(struct hif_opaque_softc *scn);
471 bool hif_max_num_receives_reached(struct hif_softc *scn, unsigned int count);
472 void hif_shutdown_device(struct hif_opaque_softc *hif_ctx);
473 int hif_bus_configure(struct hif_softc *scn);
474 void hif_cancel_deferred_target_sleep(struct hif_softc *scn);
475 int hif_config_ce(struct hif_softc *scn);
476 int hif_config_ce_pktlog(struct hif_opaque_softc *hif_ctx);
477 int hif_config_ce_by_id(struct hif_softc *scn, int pipe_num);
478 void hif_unconfig_ce(struct hif_softc *scn);
479 void hif_ce_prepare_config(struct hif_softc *scn);
480 QDF_STATUS hif_ce_open(struct hif_softc *scn);
481 void hif_ce_close(struct hif_softc *scn);
482 int athdiag_procfs_init(void *scn);
483 void athdiag_procfs_remove(void);
484 /* routine to modify the initial buffer count to be allocated on an os
485  * platform basis. Platform owner will need to modify this as needed
486  */
487 qdf_size_t init_buffer_count(qdf_size_t maxSize);
488 
489 irqreturn_t hif_fw_interrupt_handler(int irq, void *arg);
490 int hif_get_device_type(uint32_t device_id,
491 			uint32_t revision_id,
492 			uint32_t *hif_type, uint32_t *target_type);
493 /*These functions are exposed to HDD*/
494 void hif_nointrs(struct hif_softc *scn);
495 void hif_bus_close(struct hif_softc *ol_sc);
496 QDF_STATUS hif_bus_open(struct hif_softc *ol_sc,
497 	enum qdf_bus_type bus_type);
498 QDF_STATUS hif_enable_bus(struct hif_softc *ol_sc, struct device *dev,
499 	void *bdev, const struct hif_bus_id *bid, enum hif_enable_type type);
500 void hif_disable_bus(struct hif_softc *scn);
501 void hif_bus_prevent_linkdown(struct hif_softc *scn, bool flag);
502 int hif_bus_get_context_size(enum qdf_bus_type bus_type);
503 void hif_read_phy_mem_base(struct hif_softc *scn, qdf_dma_addr_t *bar_value);
504 uint32_t hif_get_conparam(struct hif_softc *scn);
505 struct hif_driver_state_callbacks *hif_get_callbacks_handle(
506 							struct hif_softc *scn);
507 bool hif_is_driver_unloading(struct hif_softc *scn);
508 bool hif_is_load_or_unload_in_progress(struct hif_softc *scn);
509 bool hif_is_recovery_in_progress(struct hif_softc *scn);
510 bool hif_is_target_ready(struct hif_softc *scn);
511 
512 /**
513  * hif_get_bandwidth_level() - API to get the current bandwidth level
514  * @hif_handle: HIF Context
515  *
516  * Return: PLD bandwidth level
517  */
518 int hif_get_bandwidth_level(struct hif_opaque_softc *hif_handle);
519 
520 void hif_wlan_disable(struct hif_softc *scn);
521 int hif_target_sleep_state_adjust(struct hif_softc *scn,
522 					 bool sleep_ok,
523 					 bool wait_for_it);
524 
525 #ifdef DP_MEM_PRE_ALLOC
526 void *hif_mem_alloc_consistent_unaligned(struct hif_softc *scn,
527 					 qdf_size_t size,
528 					 qdf_dma_addr_t *paddr,
529 					 uint32_t ring_type,
530 					 uint8_t *is_mem_prealloc);
531 
532 void hif_mem_free_consistent_unaligned(struct hif_softc *scn,
533 				       qdf_size_t size,
534 				       void *vaddr,
535 				       qdf_dma_addr_t paddr,
536 				       qdf_dma_context_t memctx,
537 				       uint8_t is_mem_prealloc);
538 #else
539 static inline
540 void *hif_mem_alloc_consistent_unaligned(struct hif_softc *scn,
541 					 qdf_size_t size,
542 					 qdf_dma_addr_t *paddr,
543 					 uint32_t ring_type,
544 					 uint8_t *is_mem_prealloc)
545 {
546 	return qdf_mem_alloc_consistent(scn->qdf_dev,
547 					scn->qdf_dev->dev,
548 					size,
549 					paddr);
550 }
551 
552 static inline
553 void hif_mem_free_consistent_unaligned(struct hif_softc *scn,
554 				       qdf_size_t size,
555 				       void *vaddr,
556 				       qdf_dma_addr_t paddr,
557 				       qdf_dma_context_t memctx,
558 				       uint8_t is_mem_prealloc)
559 {
560 	return qdf_mem_free_consistent(scn->qdf_dev, scn->qdf_dev->dev,
561 				       size, vaddr, paddr, memctx);
562 }
563 #endif
564 
565 /**
566  * hif_get_rx_ctx_id() - Returns NAPI instance ID based on CE ID
567  * @ctx_id: Rx CE context ID
568  * @hif_hdl: HIF Context
569  *
570  * Return: Rx instance ID
571  */
572 int hif_get_rx_ctx_id(int ctx_id, struct hif_opaque_softc *hif_hdl);
573 void hif_ramdump_handler(struct hif_opaque_softc *scn);
574 #ifdef HIF_USB
575 void hif_usb_get_hw_info(struct hif_softc *scn);
576 void hif_usb_ramdump_handler(struct hif_opaque_softc *scn);
577 #else
578 static inline void hif_usb_get_hw_info(struct hif_softc *scn) {}
579 static inline void hif_usb_ramdump_handler(struct hif_opaque_softc *scn) {}
580 #endif
581 
582 /**
583  * hif_wake_interrupt_handler() - interrupt handler for standalone wake irq
584  * @irq: the irq number that fired
585  * @context: the opaque pointer passed to request_irq()
586  *
587  * Return: an irq return type
588  */
589 irqreturn_t hif_wake_interrupt_handler(int irq, void *context);
590 
591 #ifdef HIF_SNOC
592 bool hif_is_target_register_access_allowed(struct hif_softc *hif_sc);
593 #else
594 static inline
595 bool hif_is_target_register_access_allowed(struct hif_softc *hif_sc)
596 {
597 	return true;
598 }
599 #endif
600 
601 #ifdef ADRASTEA_RRI_ON_DDR
602 void hif_uninit_rri_on_ddr(struct hif_softc *scn);
603 #else
604 static inline
605 void hif_uninit_rri_on_ddr(struct hif_softc *scn) {}
606 #endif
607 void hif_cleanup_static_buf_to_target(struct hif_softc *scn);
608 
609 #ifdef FEATURE_RUNTIME_PM
610 /**
611  * hif_runtime_prevent_linkdown() - prevent or allow a runtime pm from occurring
612  * @scn: hif context
613  * @is_get: prevent linkdown if true otherwise allow
614  *
615  * this api should only be called as part of bus prevent linkdown
616  */
617 void hif_runtime_prevent_linkdown(struct hif_softc *scn, bool is_get);
618 #else
619 static inline
620 void hif_runtime_prevent_linkdown(struct hif_softc *scn, bool is_get)
621 {
622 }
623 #endif
624 
625 #endif /* __HIF_MAIN_H__ */
626