xref: /wlan-dirver/qca-wifi-host-cmn/hif/src/hif_main.h (revision cbe81707988efe2ae91b3ee68cb9464251d5e597)
1 /*
2  * Copyright (c) 2013-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 /*
21  * NB: Inappropriate references to "HTC" are used in this (and other)
22  * HIF implementations.  HTC is typically the calling layer, but it
23  * theoretically could be some alternative.
24  */
25 
26 /*
27  * This holds all state needed to process a pending send/recv interrupt.
28  * The information is saved here as soon as the interrupt occurs (thus
29  * allowing the underlying CE to re-use the ring descriptor). The
30  * information here is eventually processed by a completion processing
31  * thread.
32  */
33 
34 #ifndef __HIF_MAIN_H__
35 #define __HIF_MAIN_H__
36 
37 #include <qdf_atomic.h>         /* qdf_atomic_read */
38 #include "qdf_lock.h"
39 #include "cepci.h"
40 #include "hif.h"
41 #include "multibus.h"
42 #include "hif_unit_test_suspend_i.h"
43 #ifdef HIF_CE_LOG_INFO
44 #include "qdf_notifier.h"
45 #endif
46 #include "pld_common.h"
47 
48 #define HIF_MIN_SLEEP_INACTIVITY_TIME_MS     50
49 #define HIF_SLEEP_INACTIVITY_TIMER_PERIOD_MS 60
50 
51 #define HIF_MAX_BUDGET 0xFFFF
52 
53 #define HIF_STATS_INC(_handle, _field, _delta) \
54 { \
55 	(_handle)->stats._field += _delta; \
56 }
57 
58 /*
59  * This macro implementation is exposed for efficiency only.
60  * The implementation may change and callers should
61  * consider the targid to be a completely opaque handle.
62  */
63 #define TARGID_TO_PCI_ADDR(targid) (*((A_target_id_t *)(targid)))
64 
65 #ifdef QCA_WIFI_3_0
66 #define DISABLE_L1SS_STATES 1
67 #endif
68 
69 #define MAX_NUM_OF_RECEIVES HIF_NAPI_MAX_RECEIVES
70 
71 #ifdef QCA_WIFI_3_0_ADRASTEA
72 #define ADRASTEA_BU 1
73 #else
74 #define ADRASTEA_BU 0
75 #endif
76 
77 #ifdef QCA_WIFI_3_0
78 #define HAS_FW_INDICATOR 0
79 #else
80 #define HAS_FW_INDICATOR 1
81 #endif
82 
83 
84 #define AR9888_DEVICE_ID (0x003c)
85 #define AR6320_DEVICE_ID (0x003e)
86 #define AR6320_FW_1_1  (0x11)
87 #define AR6320_FW_1_3  (0x13)
88 #define AR6320_FW_2_0  (0x20)
89 #define AR6320_FW_3_0  (0x30)
90 #define AR6320_FW_3_2  (0x32)
91 #define QCA6290_EMULATION_DEVICE_ID (0xabcd)
92 #define QCA6290_DEVICE_ID (0x1100)
93 #define QCN9000_DEVICE_ID (0x1104)
94 #define QCN9224_DEVICE_ID (0x1109)
95 #define QCN6122_DEVICE_ID (0xFFFB)
96 #define QCN9160_DEVICE_ID (0xFFF8)
97 #define QCN6432_DEVICE_ID (0xFFF7)
98 #define QCA6390_EMULATION_DEVICE_ID (0x0108)
99 #define QCA6390_DEVICE_ID (0x1101)
100 /* TODO: change IDs for HastingsPrime */
101 #define QCA6490_EMULATION_DEVICE_ID (0x010a)
102 #define QCA6490_DEVICE_ID (0x1103)
103 #define MANGO_DEVICE_ID (0x110a)
104 #define PEACH_DEVICE_ID (0x110e)
105 
106 /* TODO: change IDs for Moselle */
107 #define QCA6750_EMULATION_DEVICE_ID (0x010c)
108 #define QCA6750_DEVICE_ID (0x1105)
109 
110 /* TODO: change IDs for Hamilton */
111 #define KIWI_DEVICE_ID (0x1107)
112 
113 /*TODO: change IDs for Evros */
114 #define WCN6450_DEVICE_ID (0x1108)
115 
116 #define ADRASTEA_DEVICE_ID_P2_E12 (0x7021)
117 #define AR9887_DEVICE_ID    (0x0050)
118 #define AR900B_DEVICE_ID    (0x0040)
119 #define QCA9984_DEVICE_ID   (0x0046)
120 #define QCA9888_DEVICE_ID   (0x0056)
121 #define QCA8074_DEVICE_ID   (0xffff) /* Todo: replace this with
122 					actual number once available.
123 					currently defining this to 0xffff for
124 					emulation purpose */
125 #define QCA8074V2_DEVICE_ID (0xfffe) /* Todo: replace this with actual number */
126 #define QCA6018_DEVICE_ID (0xfffd) /* Todo: replace this with actual number */
127 #define QCA5018_DEVICE_ID (0xfffc) /* Todo: replace this with actual number */
128 #define QCA9574_DEVICE_ID (0xfffa)
129 #define QCA5332_DEVICE_ID (0xfff9)
130 /* Genoa */
131 #define QCN7605_DEVICE_ID  (0x1102) /* Genoa PCIe device ID*/
132 #define QCN7605_COMPOSITE  (0x9901)
133 #define QCN7605_STANDALONE  (0x9900)
134 #define QCN7605_STANDALONE_V2  (0x9902)
135 #define QCN7605_COMPOSITE_V2  (0x9903)
136 
137 #define RUMIM2M_DEVICE_ID_NODE0	0xabc0
138 #define RUMIM2M_DEVICE_ID_NODE1	0xabc1
139 #define RUMIM2M_DEVICE_ID_NODE2	0xabc2
140 #define RUMIM2M_DEVICE_ID_NODE3	0xabc3
141 #define RUMIM2M_DEVICE_ID_NODE4	0xaa10
142 #define RUMIM2M_DEVICE_ID_NODE5	0xaa11
143 
144 #define HIF_GET_PCI_SOFTC(scn) ((struct hif_pci_softc *)scn)
145 #define HIF_GET_IPCI_SOFTC(scn) ((struct hif_ipci_softc *)scn)
146 #define HIF_GET_CE_STATE(scn) ((struct HIF_CE_state *)scn)
147 #define HIF_GET_SDIO_SOFTC(scn) ((struct hif_sdio_softc *)scn)
148 #define HIF_GET_USB_SOFTC(scn) ((struct hif_usb_softc *)scn)
149 #define HIF_GET_USB_DEVICE(scn) ((struct HIF_DEVICE_USB *)scn)
150 #define HIF_GET_SOFTC(scn) ((struct hif_softc *)scn)
151 #define GET_HIF_OPAQUE_HDL(scn) ((struct hif_opaque_softc *)scn)
152 
153 #ifdef QCA_WIFI_QCN9224
154 #define NUM_CE_AVAILABLE 16
155 #else
156 #define NUM_CE_AVAILABLE 12
157 #endif
158 /* Add 1 here to store default configuration in index 0 */
159 #define NUM_CE_CONTEXT (NUM_CE_AVAILABLE + 1)
160 
161 #define CE_INTERRUPT_IDX(x) x
162 
163 #ifdef WLAN_64BIT_DATA_SUPPORT
164 #define RRI_ON_DDR_MEM_SIZE CE_COUNT * sizeof(uint64_t)
165 #else
166 #define RRI_ON_DDR_MEM_SIZE CE_COUNT * sizeof(uint32_t)
167 #endif
168 
169 struct ce_int_assignment {
170 	uint8_t msi_idx[NUM_CE_AVAILABLE];
171 };
172 
173 struct hif_ce_stats {
174 	int hif_pipe_no_resrc_count;
175 	int ce_ring_delta_fail_count;
176 };
177 
178 #ifdef HIF_DETECTION_LATENCY_ENABLE
179 /**
180  * struct hif_tasklet_running_info - running info of tasklet
181  * @sched_cpuid: id of cpu on which the tasklet was scheduled
182  * @sched_time: time when the tasklet was scheduled
183  * @exec_time: time when the tasklet was executed
184  */
185 struct hif_tasklet_running_info {
186 	int sched_cpuid;
187 	qdf_time_t sched_time;
188 	qdf_time_t exec_time;
189 };
190 
191 #define HIF_TASKLET_IN_MONITOR CE_COUNT_MAX
192 
193 struct hif_latency_detect {
194 	qdf_timer_t timer;
195 	uint32_t timeout;
196 	bool is_timer_started;
197 	bool enable_detection;
198 	/* threshold when stall happens */
199 	uint32_t threshold;
200 
201 	/*
202 	 * Bitmap to indicate the enablement of latency detection for
203 	 * the tasklets. bit-X represents for tasklet of WLAN_CE_X,
204 	 * latency detection is enabled on the corresponding tasklet
205 	 * when a bit is set.
206 	 * At the same time, this bitmap also indicates the validity of
207 	 * elements in array 'tasklet_info', bit-X represents for index-X,
208 	 * the corresponding element is valid when a bit is set.
209 	 */
210 	qdf_bitmap(tasklet_bmap, HIF_TASKLET_IN_MONITOR);
211 
212 	/*
213 	 * Array to record running info of tasklets, info of tasklet
214 	 * for WLAN_CE_X is stored at index-X.
215 	 */
216 	struct hif_tasklet_running_info tasklet_info[HIF_TASKLET_IN_MONITOR];
217 	qdf_time_t credit_request_time;
218 	qdf_time_t credit_report_time;
219 };
220 #endif
221 
222 /*
223  * Note: For MCL, #if defined (HIF_CONFIG_SLUB_DEBUG_ON) needs to be checked
224  * for defined here
225  */
226 #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF)
227 
228 #define HIF_CE_MAX_LATEST_HIST 2
229 
230 struct latest_evt_history {
231 	uint64_t irq_entry_ts;
232 	uint64_t bh_entry_ts;
233 	uint64_t bh_resched_ts;
234 	uint64_t bh_exit_ts;
235 	uint64_t bh_work_ts;
236 	int cpu_id;
237 	uint32_t ring_hp;
238 	uint32_t ring_tp;
239 };
240 
241 struct ce_desc_hist {
242 	qdf_atomic_t history_index[CE_COUNT_MAX];
243 	uint8_t ce_id_hist_map[CE_COUNT_MAX];
244 	bool enable[CE_COUNT_MAX];
245 	bool data_enable[CE_COUNT_MAX];
246 	qdf_mutex_t ce_dbg_datamem_lock[CE_COUNT_MAX];
247 	uint32_t hist_index;
248 	uint32_t hist_id;
249 	void *hist_ev[CE_COUNT_MAX];
250 	struct latest_evt_history latest_evt[HIF_CE_MAX_LATEST_HIST];
251 };
252 
253 void hif_record_latest_evt(struct ce_desc_hist *ce_hist,
254 			   uint8_t type,
255 			   int ce_id, uint64_t time,
256 			   uint32_t hp, uint32_t tp);
257 #endif /*defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF)*/
258 
259 /**
260  * struct hif_cfg() - store ini config parameters in hif layer
261  * @ce_status_ring_timer_threshold: ce status ring timer threshold
262  * @ce_status_ring_batch_count_threshold: ce status ring batch count threshold
263  * @disable_wake_irq: disable wake irq
264  */
265 struct hif_cfg {
266 	uint16_t ce_status_ring_timer_threshold;
267 	uint8_t ce_status_ring_batch_count_threshold;
268 	bool disable_wake_irq;
269 };
270 
271 #ifdef DP_UMAC_HW_RESET_SUPPORT
272 /**
273  * struct hif_umac_reset_ctx - UMAC HW reset context at HIF layer
274  * @intr_tq: Tasklet structure
275  * @irq_handler: IRQ handler
276  * @cb_handler: Callback handler
277  * @cb_ctx: Argument to be passed to @cb_handler
278  * @os_irq: Interrupt number for this IRQ
279  * @irq_configured: Whether the IRQ has been configured
280  */
281 struct hif_umac_reset_ctx {
282 	struct tasklet_struct intr_tq;
283 	bool (*irq_handler)(void *cb_ctx);
284 	int (*cb_handler)(void *cb_ctx);
285 	void *cb_ctx;
286 	uint32_t os_irq;
287 	bool irq_configured;
288 };
289 #endif
290 
291 #define MAX_SHADOW_REGS 40
292 
293 #ifdef FEATURE_HIF_DELAYED_REG_WRITE
294 /**
295  * enum hif_reg_sched_delay - ENUM for write sched delay histogram
296  * @HIF_REG_WRITE_SCHED_DELAY_SUB_100us: index for delay < 100us
297  * @HIF_REG_WRITE_SCHED_DELAY_SUB_1000us: index for delay < 1000us
298  * @HIF_REG_WRITE_SCHED_DELAY_SUB_5000us: index for delay < 5000us
299  * @HIF_REG_WRITE_SCHED_DELAY_GT_5000us: index for delay >= 5000us
300  * @HIF_REG_WRITE_SCHED_DELAY_HIST_MAX: Max value (nnsize of histogram array)
301  */
302 enum hif_reg_sched_delay {
303 	HIF_REG_WRITE_SCHED_DELAY_SUB_100us,
304 	HIF_REG_WRITE_SCHED_DELAY_SUB_1000us,
305 	HIF_REG_WRITE_SCHED_DELAY_SUB_5000us,
306 	HIF_REG_WRITE_SCHED_DELAY_GT_5000us,
307 	HIF_REG_WRITE_SCHED_DELAY_HIST_MAX,
308 };
309 
310 /**
311  * struct hif_reg_write_soc_stats - soc stats to keep track of register writes
312  * @enqueues: writes enqueued to delayed work
313  * @dequeues: writes dequeued from delayed work (not written yet)
314  * @coalesces: writes not enqueued since srng is already queued up
315  * @direct: writes not enqueud and writted to register directly
316  * @prevent_l1_fails: prevent l1 API failed
317  * @q_depth: current queue depth in delayed register write queue
318  * @max_q_depth: maximum queue for delayed register write queue
319  * @sched_delay: = kernel work sched delay + bus wakeup delay, histogram
320  * @dequeue_delay: dequeue operation be delayed
321  */
322 struct hif_reg_write_soc_stats {
323 	qdf_atomic_t enqueues;
324 	uint32_t dequeues;
325 	qdf_atomic_t coalesces;
326 	qdf_atomic_t direct;
327 	uint32_t prevent_l1_fails;
328 	qdf_atomic_t q_depth;
329 	uint32_t max_q_depth;
330 	uint32_t sched_delay[HIF_REG_WRITE_SCHED_DELAY_HIST_MAX];
331 	uint32_t dequeue_delay;
332 };
333 
334 /**
335  * struct hif_reg_write_q_elem - delayed register write queue element
336  * @ce_state: CE state queued for a delayed write
337  * @offset: offset of the CE register
338  * @enqueue_val: register value at the time of delayed write enqueue
339  * @dequeue_val: register value at the time of delayed write dequeue
340  * @valid: whether this entry is valid or not
341  * @enqueue_time: enqueue time (qdf_log_timestamp)
342  * @work_scheduled_time: work scheduled time (qdf_log_timestamp)
343  * @dequeue_time: dequeue time (qdf_log_timestamp)
344  * @cpu_id: record cpuid when schedule work
345  */
346 struct hif_reg_write_q_elem {
347 	struct CE_state *ce_state;
348 	uint32_t offset;
349 	uint32_t enqueue_val;
350 	uint32_t dequeue_val;
351 	uint8_t valid;
352 	qdf_time_t enqueue_time;
353 	qdf_time_t work_scheduled_time;
354 	qdf_time_t dequeue_time;
355 	int cpu_id;
356 };
357 #endif
358 
359 struct hif_softc {
360 	struct hif_opaque_softc osc;
361 	struct hif_config_info hif_config;
362 	struct hif_target_info target_info;
363 	void __iomem *mem;
364 	void __iomem *mem_ce;
365 	void __iomem *mem_cmem;
366 	void __iomem *mem_pmm_base;
367 	enum qdf_bus_type bus_type;
368 	struct hif_bus_ops bus_ops;
369 	void *ce_id_to_state[CE_COUNT_MAX];
370 	qdf_device_t qdf_dev;
371 	bool hif_init_done;
372 	bool request_irq_done;
373 	bool ext_grp_irq_configured;
374 	bool free_irq_done;
375 	uint8_t ce_latency_stats;
376 	/* Packet statistics */
377 	struct hif_ce_stats pkt_stats;
378 	enum hif_target_status target_status;
379 	uint64_t event_enable_mask;
380 
381 	struct targetdef_s *targetdef;
382 	struct ce_reg_def *target_ce_def;
383 	struct hostdef_s *hostdef;
384 	struct host_shadow_regs_s *host_shadow_regs;
385 
386 	bool recovery;
387 	bool notice_send;
388 	bool per_ce_irq;
389 	uint32_t ce_irq_summary;
390 	/* No of copy engines supported */
391 	unsigned int ce_count;
392 	struct ce_int_assignment *int_assignment;
393 	atomic_t active_tasklet_cnt;
394 	atomic_t active_grp_tasklet_cnt;
395 	atomic_t link_suspended;
396 	void *vaddr_rri_on_ddr;
397 	atomic_t active_wake_req_cnt;
398 	qdf_dma_addr_t paddr_rri_on_ddr;
399 #ifdef CONFIG_BYPASS_QMI
400 	uint32_t *vaddr_qmi_bypass;
401 	qdf_dma_addr_t paddr_qmi_bypass;
402 #endif
403 	int linkstate_vote;
404 	bool fastpath_mode_on;
405 	atomic_t tasklet_from_intr;
406 	int htc_htt_tx_endpoint;
407 	qdf_dma_addr_t mem_pa;
408 	bool athdiag_procfs_inited;
409 #ifdef FEATURE_NAPI
410 	struct qca_napi_data napi_data;
411 #endif /* FEATURE_NAPI */
412 	/* stores ce_service_max_yield_time in ns */
413 	unsigned long long ce_service_max_yield_time;
414 	uint8_t ce_service_max_rx_ind_flush;
415 	struct hif_driver_state_callbacks callbacks;
416 	uint32_t hif_con_param;
417 #ifdef QCA_NSS_WIFI_OFFLOAD_SUPPORT
418 	uint32_t nss_wifi_ol_mode;
419 #endif
420 	void *hal_soc;
421 	struct hif_ut_suspend_context ut_suspend_ctx;
422 	uint32_t hif_attribute;
423 	int wake_irq;
424 	hif_pm_wake_irq_type wake_irq_type;
425 	void (*initial_wakeup_cb)(void *);
426 	void *initial_wakeup_priv;
427 #ifdef REMOVE_PKT_LOG
428 	/* Handle to pktlog device */
429 	void *pktlog_dev;
430 #endif
431 #ifdef WLAN_FEATURE_DP_EVENT_HISTORY
432 	/* Pointer to the srng event history */
433 	struct hif_event_history *evt_hist[HIF_NUM_INT_CONTEXTS];
434 #endif
435 
436 /*
437  * Note: For MCL, #if defined (HIF_CONFIG_SLUB_DEBUG_ON) needs to be checked
438  * for defined here
439  */
440 #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF)
441 	struct ce_desc_hist hif_ce_desc_hist;
442 #endif /*defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF)*/
443 #ifdef IPA_OFFLOAD
444 	qdf_shared_mem_t *ipa_ce_ring;
445 #endif
446 #ifdef IPA_OPT_WIFI_DP
447 	qdf_atomic_t opt_wifi_dp_rtpm_cnt;
448 #endif
449 	struct hif_cfg ini_cfg;
450 #ifdef HIF_CE_LOG_INFO
451 	qdf_notif_block hif_recovery_notifier;
452 #endif
453 #ifdef HIF_CPU_PERF_AFFINE_MASK
454 	/* The CPU hotplug event registration handle */
455 	struct qdf_cpuhp_handler *cpuhp_event_handle;
456 #endif
457 	uint32_t irq_unlazy_disable;
458 	/* Should the unlzay support for interrupt delivery be disabled */
459 	/* Flag to indicate whether bus is suspended */
460 	bool bus_suspended;
461 	bool pktlog_init;
462 #ifdef FEATURE_RUNTIME_PM
463 	/* Variable to track the link state change in RTPM */
464 	qdf_atomic_t pm_link_state;
465 #endif
466 #ifdef HIF_DETECTION_LATENCY_ENABLE
467 	struct hif_latency_detect latency_detect;
468 #endif
469 #ifdef FEATURE_RUNTIME_PM
470 	qdf_runtime_lock_t prevent_linkdown_lock;
471 #endif
472 #ifdef SYSTEM_PM_CHECK
473 	qdf_atomic_t sys_pm_state;
474 #endif
475 #if defined(HIF_IPCI) && defined(FEATURE_HAL_DELAYED_REG_WRITE)
476 	qdf_atomic_t dp_ep_vote_access;
477 	qdf_atomic_t ep_vote_access;
478 #endif
479 	/* CMEM address target reserved for host usage */
480 	uint64_t cmem_start;
481 	/* CMEM size target reserved */
482 	uint64_t cmem_size;
483 #ifdef DP_UMAC_HW_RESET_SUPPORT
484 	struct hif_umac_reset_ctx umac_reset_ctx;
485 #endif
486 #ifdef CONFIG_SHADOW_V3
487 	struct pld_shadow_reg_v3_cfg shadow_regs[MAX_SHADOW_REGS];
488 	int num_shadow_registers_configured;
489 #endif
490 #ifdef WLAN_FEATURE_AFFINITY_MGR
491 	/* CPU Affinity info of IRQs */
492 	bool affinity_mgr_supported;
493 	uint64_t time_threshold;
494 	struct hif_cpu_affinity ce_irq_cpu_mask[CE_COUNT_MAX];
495 	struct hif_cpu_affinity irq_cpu_mask[HIF_MAX_GROUP][HIF_MAX_GRP_IRQ];
496 	qdf_cpu_mask allowed_mask;
497 #endif
498 #ifdef FEATURE_DIRECT_LINK
499 	struct qdf_mem_multi_page_t dl_recv_pages;
500 	int dl_recv_pipe_num;
501 #endif
502 #ifdef WLAN_FEATURE_CE_RX_BUFFER_REUSE
503 	struct wbuff_mod_handle *wbuff_handle;
504 #endif
505 #ifdef FEATURE_HIF_DELAYED_REG_WRITE
506 	/* queue(array) to hold register writes */
507 	struct hif_reg_write_q_elem *reg_write_queue;
508 	/* delayed work to be queued into workqueue */
509 	qdf_work_t reg_write_work;
510 	/* workqueue for delayed register writes */
511 	qdf_workqueue_t *reg_write_wq;
512 	/* write index used by caller to enqueue delayed work */
513 	qdf_atomic_t write_idx;
514 	/* read index used by worker thread to dequeue/write registers */
515 	uint32_t read_idx;
516 	struct hif_reg_write_soc_stats wstats;
517 	qdf_atomic_t active_work_cnt;
518 #endif /* FEATURE_HIF_DELAYED_REG_WRITE */
519 };
520 
521 #if defined(NUM_SOC_PERF_CLUSTER) && (NUM_SOC_PERF_CLUSTER > 1)
522 static inline uint16_t hif_get_perf_cluster_bitmap(void)
523 {
524 	return (BIT(CPU_CLUSTER_TYPE_PERF) | BIT(CPU_CLUSTER_TYPE_PERF2));
525 }
526 #else /* NUM_SOC_PERF_CLUSTER > 1 */
527 static inline uint16_t hif_get_perf_cluster_bitmap(void)
528 {
529 	return BIT(CPU_CLUSTER_TYPE_PERF);
530 }
531 #endif /* NUM_SOC_PERF_CLUSTER > 1 */
532 
533 static inline
534 void *hif_get_hal_handle(struct hif_opaque_softc *hif_hdl)
535 {
536 	struct hif_softc *sc = (struct hif_softc *)hif_hdl;
537 
538 	if (!sc)
539 		return NULL;
540 
541 	return sc->hal_soc;
542 }
543 
544 /**
545  * hif_get_cmem_info() - get CMEM address and size from HIF handle
546  * @hif_hdl: HIF handle pointer
547  * @cmem_start: pointer for CMEM address
548  * @cmem_size: pointer for CMEM size
549  *
550  * Return: None.
551  */
552 static inline
553 void hif_get_cmem_info(struct hif_opaque_softc *hif_hdl,
554 		       uint64_t *cmem_start,
555 		       uint64_t *cmem_size)
556 {
557 	struct hif_softc *sc = (struct hif_softc *)hif_hdl;
558 
559 	*cmem_start = sc->cmem_start;
560 	*cmem_size = sc->cmem_size;
561 }
562 
563 /**
564  * hif_get_num_active_tasklets() - get the number of active
565  *		tasklets pending to be completed.
566  * @scn: HIF context
567  *
568  * Returns: the number of tasklets which are active
569  */
570 static inline int hif_get_num_active_tasklets(struct hif_softc *scn)
571 {
572 	return qdf_atomic_read(&scn->active_tasklet_cnt);
573 }
574 
575 /*
576  * Max waiting time during Runtime PM suspend to finish all
577  * the tasks. This is in the multiple of 10ms.
578  */
579 #ifdef PANIC_ON_BUG
580 #define HIF_TASK_DRAIN_WAIT_CNT 200
581 #else
582 #define HIF_TASK_DRAIN_WAIT_CNT 25
583 #endif
584 
585 /**
586  * hif_try_complete_tasks() - Try to complete all the pending tasks
587  * @scn: HIF context
588  *
589  * Try to complete all the pending datapath tasks, i.e. tasklets,
590  * DP group tasklets and works which are queued, in a given time
591  * slot.
592  *
593  * Returns: QDF_STATUS_SUCCESS if all the tasks were completed
594  *	QDF error code, if the time slot exhausted
595  */
596 QDF_STATUS hif_try_complete_tasks(struct hif_softc *scn);
597 
598 #ifdef QCA_NSS_WIFI_OFFLOAD_SUPPORT
599 static inline bool hif_is_nss_wifi_enabled(struct hif_softc *sc)
600 {
601 	return !!(sc->nss_wifi_ol_mode);
602 }
603 #else
604 static inline bool hif_is_nss_wifi_enabled(struct hif_softc *sc)
605 {
606 	return false;
607 }
608 #endif
609 
610 static inline uint8_t hif_is_attribute_set(struct hif_softc *sc,
611 						uint32_t hif_attrib)
612 {
613 	return sc->hif_attribute == hif_attrib;
614 }
615 
616 #ifdef WLAN_FEATURE_DP_EVENT_HISTORY
617 static inline void hif_set_event_hist_mask(struct hif_opaque_softc *hif_handle)
618 {
619 	struct hif_softc *scn = (struct hif_softc *)hif_handle;
620 
621 	scn->event_enable_mask = HIF_EVENT_HIST_ENABLE_MASK;
622 }
623 #else
624 static inline void hif_set_event_hist_mask(struct hif_opaque_softc *hif_handle)
625 {
626 }
627 #endif /* WLAN_FEATURE_DP_EVENT_HISTORY */
628 
629 A_target_id_t hif_get_target_id(struct hif_softc *scn);
630 void hif_dump_pipe_debug_count(struct hif_softc *scn);
631 void hif_display_bus_stats(struct hif_opaque_softc *scn);
632 void hif_clear_bus_stats(struct hif_opaque_softc *scn);
633 bool hif_max_num_receives_reached(struct hif_softc *scn, unsigned int count);
634 void hif_shutdown_device(struct hif_opaque_softc *hif_ctx);
635 int hif_bus_configure(struct hif_softc *scn);
636 void hif_cancel_deferred_target_sleep(struct hif_softc *scn);
637 int hif_config_ce(struct hif_softc *scn);
638 int hif_config_ce_pktlog(struct hif_opaque_softc *hif_ctx);
639 int hif_config_ce_by_id(struct hif_softc *scn, int pipe_num);
640 void hif_unconfig_ce(struct hif_softc *scn);
641 void hif_ce_prepare_config(struct hif_softc *scn);
642 QDF_STATUS hif_ce_open(struct hif_softc *scn);
643 void hif_ce_close(struct hif_softc *scn);
644 int athdiag_procfs_init(void *scn);
645 void athdiag_procfs_remove(void);
646 /* routine to modify the initial buffer count to be allocated on an os
647  * platform basis. Platform owner will need to modify this as needed
648  */
649 qdf_size_t init_buffer_count(qdf_size_t maxSize);
650 
651 irqreturn_t hif_fw_interrupt_handler(int irq, void *arg);
652 int hif_get_device_type(uint32_t device_id,
653 			uint32_t revision_id,
654 			uint32_t *hif_type, uint32_t *target_type);
655 /*These functions are exposed to HDD*/
656 void hif_nointrs(struct hif_softc *scn);
657 void hif_bus_close(struct hif_softc *ol_sc);
658 QDF_STATUS hif_bus_open(struct hif_softc *ol_sc,
659 	enum qdf_bus_type bus_type);
660 QDF_STATUS hif_enable_bus(struct hif_softc *ol_sc, struct device *dev,
661 	void *bdev, const struct hif_bus_id *bid, enum hif_enable_type type);
662 void hif_disable_bus(struct hif_softc *scn);
663 void hif_bus_prevent_linkdown(struct hif_softc *scn, bool flag);
664 int hif_bus_get_context_size(enum qdf_bus_type bus_type);
665 void hif_read_phy_mem_base(struct hif_softc *scn, qdf_dma_addr_t *bar_value);
666 uint32_t hif_get_conparam(struct hif_softc *scn);
667 struct hif_driver_state_callbacks *hif_get_callbacks_handle(
668 							struct hif_softc *scn);
669 bool hif_is_driver_unloading(struct hif_softc *scn);
670 bool hif_is_load_or_unload_in_progress(struct hif_softc *scn);
671 bool hif_is_recovery_in_progress(struct hif_softc *scn);
672 bool hif_is_target_ready(struct hif_softc *scn);
673 
674 /**
675  * hif_get_bandwidth_level() - API to get the current bandwidth level
676  * @hif_handle: HIF Context
677  *
678  * Return: PLD bandwidth level
679  */
680 int hif_get_bandwidth_level(struct hif_opaque_softc *hif_handle);
681 
682 void hif_wlan_disable(struct hif_softc *scn);
683 int hif_target_sleep_state_adjust(struct hif_softc *scn,
684 					 bool sleep_ok,
685 					 bool wait_for_it);
686 
687 #ifdef DP_MEM_PRE_ALLOC
688 void *hif_mem_alloc_consistent_unaligned(struct hif_softc *scn,
689 					 qdf_size_t size,
690 					 qdf_dma_addr_t *paddr,
691 					 uint32_t ring_type,
692 					 uint8_t *is_mem_prealloc);
693 
694 void hif_mem_free_consistent_unaligned(struct hif_softc *scn,
695 				       qdf_size_t size,
696 				       void *vaddr,
697 				       qdf_dma_addr_t paddr,
698 				       qdf_dma_context_t memctx,
699 				       uint8_t is_mem_prealloc);
700 
701 /**
702  * hif_prealloc_get_multi_pages() - gets pre-alloc DP multi-pages memory
703  * @scn: HIF context
704  * @desc_type: descriptor type
705  * @elem_size: single element size
706  * @elem_num: total number of elements should be allocated
707  * @pages: multi page information storage
708  * @cacheable: coherent memory or cacheable memory
709  *
710  * Return: None
711  */
712 void hif_prealloc_get_multi_pages(struct hif_softc *scn, uint32_t desc_type,
713 				  qdf_size_t elem_size, uint16_t elem_num,
714 				  struct qdf_mem_multi_page_t *pages,
715 				  bool cacheable);
716 
717 /**
718  * hif_prealloc_put_multi_pages() - puts back pre-alloc DP multi-pages memory
719  * @scn: HIF context
720  * @desc_type: descriptor type
721  * @pages: multi page information storage
722  * @cacheable: coherent memory or cacheable memory
723  *
724  * Return: None
725  */
726 void hif_prealloc_put_multi_pages(struct hif_softc *scn, uint32_t desc_type,
727 				  struct qdf_mem_multi_page_t *pages,
728 				  bool cacheable);
729 #else
730 static inline
731 void *hif_mem_alloc_consistent_unaligned(struct hif_softc *scn,
732 					 qdf_size_t size,
733 					 qdf_dma_addr_t *paddr,
734 					 uint32_t ring_type,
735 					 uint8_t *is_mem_prealloc)
736 {
737 	return qdf_mem_alloc_consistent(scn->qdf_dev,
738 					scn->qdf_dev->dev,
739 					size,
740 					paddr);
741 }
742 
743 static inline
744 void hif_mem_free_consistent_unaligned(struct hif_softc *scn,
745 				       qdf_size_t size,
746 				       void *vaddr,
747 				       qdf_dma_addr_t paddr,
748 				       qdf_dma_context_t memctx,
749 				       uint8_t is_mem_prealloc)
750 {
751 	return qdf_mem_free_consistent(scn->qdf_dev, scn->qdf_dev->dev,
752 				       size, vaddr, paddr, memctx);
753 }
754 
755 static inline
756 void hif_prealloc_get_multi_pages(struct hif_softc *scn, uint32_t desc_type,
757 				  qdf_size_t elem_size, uint16_t elem_num,
758 				  struct qdf_mem_multi_page_t *pages,
759 				  bool cacheable)
760 {
761 	qdf_mem_multi_pages_alloc(scn->qdf_dev, pages,
762 				  elem_size, elem_num, 0, cacheable);
763 }
764 
765 static inline
766 void hif_prealloc_put_multi_pages(struct hif_softc *scn, uint32_t desc_type,
767 				  struct qdf_mem_multi_page_t *pages,
768 				  bool cacheable)
769 {
770 	qdf_mem_multi_pages_free(scn->qdf_dev, pages, 0,
771 				 cacheable);
772 }
773 #endif
774 
775 /**
776  * hif_get_rx_ctx_id() - Returns NAPI instance ID based on CE ID
777  * @ctx_id: Rx CE context ID
778  * @hif_hdl: HIF Context
779  *
780  * Return: Rx instance ID
781  */
782 int hif_get_rx_ctx_id(int ctx_id, struct hif_opaque_softc *hif_hdl);
783 void hif_ramdump_handler(struct hif_opaque_softc *scn);
784 #ifdef HIF_USB
785 void hif_usb_get_hw_info(struct hif_softc *scn);
786 void hif_usb_ramdump_handler(struct hif_opaque_softc *scn);
787 #else
788 static inline void hif_usb_get_hw_info(struct hif_softc *scn) {}
789 static inline void hif_usb_ramdump_handler(struct hif_opaque_softc *scn) {}
790 #endif
791 
792 /**
793  * hif_wake_interrupt_handler() - interrupt handler for standalone wake irq
794  * @irq: the irq number that fired
795  * @context: the opaque pointer passed to request_irq()
796  *
797  * Return: an irq return type
798  */
799 irqreturn_t hif_wake_interrupt_handler(int irq, void *context);
800 
801 #if defined(HIF_SNOC)
802 bool hif_is_target_register_access_allowed(struct hif_softc *hif_sc);
803 #elif defined(HIF_IPCI)
804 static inline bool
805 hif_is_target_register_access_allowed(struct hif_softc *hif_sc)
806 {
807 	return !(hif_sc->recovery);
808 }
809 #else
810 static inline
811 bool hif_is_target_register_access_allowed(struct hif_softc *hif_sc)
812 {
813 	return true;
814 }
815 #endif
816 
817 #ifdef ADRASTEA_RRI_ON_DDR
818 void hif_uninit_rri_on_ddr(struct hif_softc *scn);
819 #else
820 static inline
821 void hif_uninit_rri_on_ddr(struct hif_softc *scn) {}
822 #endif
823 void hif_cleanup_static_buf_to_target(struct hif_softc *scn);
824 
825 #ifdef FEATURE_RUNTIME_PM
826 /**
827  * hif_runtime_prevent_linkdown() - prevent or allow a runtime pm from occurring
828  * @scn: hif context
829  * @is_get: prevent linkdown if true otherwise allow
830  *
831  * this api should only be called as part of bus prevent linkdown
832  */
833 void hif_runtime_prevent_linkdown(struct hif_softc *scn, bool is_get);
834 #else
835 static inline
836 void hif_runtime_prevent_linkdown(struct hif_softc *scn, bool is_get)
837 {
838 }
839 #endif
840 
841 #ifdef HIF_HAL_REG_ACCESS_SUPPORT
842 void hif_reg_window_write(struct hif_softc *scn,
843 			  uint32_t offset, uint32_t value);
844 uint32_t hif_reg_window_read(struct hif_softc *scn, uint32_t offset);
845 #endif
846 
847 #ifdef FEATURE_HIF_DELAYED_REG_WRITE
848 void hif_delayed_reg_write(struct hif_softc *scn, uint32_t ctrl_addr,
849 			   uint32_t val);
850 #endif
851 
852 #if defined(HIF_IPCI) && defined(FEATURE_HAL_DELAYED_REG_WRITE)
853 static inline bool hif_is_ep_vote_access_disabled(struct hif_softc *scn)
854 {
855 	if ((qdf_atomic_read(&scn->dp_ep_vote_access) ==
856 	     HIF_EP_VOTE_ACCESS_DISABLE) &&
857 	    (qdf_atomic_read(&scn->ep_vote_access) ==
858 	     HIF_EP_VOTE_ACCESS_DISABLE))
859 		return true;
860 
861 	return false;
862 }
863 #else
864 static inline bool hif_is_ep_vote_access_disabled(struct hif_softc *scn)
865 {
866 	return false;
867 }
868 #endif
869 #endif /* __HIF_MAIN_H__ */
870