1  /*
2   * Copyright (c) 2013-2021 The Linux Foundation. All rights reserved.
3   * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
4   *
5   * Permission to use, copy, modify, and/or distribute this software for
6   * any purpose with or without fee is hereby granted, provided that the
7   * above copyright notice and this permission notice appear in all
8   * copies.
9   *
10   * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11   * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12   * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13   * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14   * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15   * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16   * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17   * PERFORMANCE OF THIS SOFTWARE.
18   */
19  
20  /*
21   * NB: Inappropriate references to "HTC" are used in this (and other)
22   * HIF implementations.  HTC is typically the calling layer, but it
23   * theoretically could be some alternative.
24   */
25  
26  /*
27   * This holds all state needed to process a pending send/recv interrupt.
28   * The information is saved here as soon as the interrupt occurs (thus
29   * allowing the underlying CE to re-use the ring descriptor). The
30   * information here is eventually processed by a completion processing
31   * thread.
32   */
33  
34  #ifndef __HIF_MAIN_H__
35  #define __HIF_MAIN_H__
36  
37  #include <qdf_atomic.h>         /* qdf_atomic_read */
38  #include "qdf_lock.h"
39  #include "cepci.h"
40  #include "hif.h"
41  #include "multibus.h"
42  #include "hif_unit_test_suspend_i.h"
43  #ifdef HIF_CE_LOG_INFO
44  #include "qdf_notifier.h"
45  #endif
46  #include "pld_common.h"
47  
48  #define HIF_MIN_SLEEP_INACTIVITY_TIME_MS     50
49  #define HIF_SLEEP_INACTIVITY_TIMER_PERIOD_MS 60
50  
51  #define HIF_MAX_BUDGET 0xFFFF
52  
53  #define HIF_STATS_INC(_handle, _field, _delta) \
54  { \
55  	(_handle)->stats._field += _delta; \
56  }
57  
58  /*
59   * This macro implementation is exposed for efficiency only.
60   * The implementation may change and callers should
61   * consider the targid to be a completely opaque handle.
62   */
63  #define TARGID_TO_PCI_ADDR(targid) (*((A_target_id_t *)(targid)))
64  
65  #ifdef QCA_WIFI_3_0
66  #define DISABLE_L1SS_STATES 1
67  #endif
68  
69  #define MAX_NUM_OF_RECEIVES HIF_NAPI_MAX_RECEIVES
70  
71  #ifdef QCA_WIFI_3_0_ADRASTEA
72  #define ADRASTEA_BU 1
73  #else
74  #define ADRASTEA_BU 0
75  #endif
76  
77  #ifdef QCA_WIFI_3_0
78  #define HAS_FW_INDICATOR 0
79  #else
80  #define HAS_FW_INDICATOR 1
81  #endif
82  
83  
84  #define AR9888_DEVICE_ID (0x003c)
85  #define AR6320_DEVICE_ID (0x003e)
86  #define AR6320_FW_1_1  (0x11)
87  #define AR6320_FW_1_3  (0x13)
88  #define AR6320_FW_2_0  (0x20)
89  #define AR6320_FW_3_0  (0x30)
90  #define AR6320_FW_3_2  (0x32)
91  #define QCA6290_EMULATION_DEVICE_ID (0xabcd)
92  #define QCA6290_DEVICE_ID (0x1100)
93  #define QCN9000_DEVICE_ID (0x1104)
94  #define QCN9224_DEVICE_ID (0x1109)
95  #define QCN6122_DEVICE_ID (0xFFFB)
96  #define QCN9160_DEVICE_ID (0xFFF8)
97  #define QCN6432_DEVICE_ID (0xFFF7)
98  #define QCA6390_EMULATION_DEVICE_ID (0x0108)
99  #define QCA6390_DEVICE_ID (0x1101)
100  /* TODO: change IDs for HastingsPrime */
101  #define QCA6490_EMULATION_DEVICE_ID (0x010a)
102  #define QCA6490_DEVICE_ID (0x1103)
103  #define MANGO_DEVICE_ID (0x110a)
104  #define PEACH_DEVICE_ID (0x110e)
105  
106  /* TODO: change IDs for Moselle */
107  #define QCA6750_EMULATION_DEVICE_ID (0x010c)
108  #define QCA6750_DEVICE_ID (0x1105)
109  
110  /* TODO: change IDs for Hamilton */
111  #define KIWI_DEVICE_ID (0x1107)
112  
113  /*TODO: change IDs for Evros */
114  #define WCN6450_DEVICE_ID (0x1108)
115  
116  #define ADRASTEA_DEVICE_ID_P2_E12 (0x7021)
117  #define AR9887_DEVICE_ID    (0x0050)
118  #define AR900B_DEVICE_ID    (0x0040)
119  #define QCA9984_DEVICE_ID   (0x0046)
120  #define QCA9888_DEVICE_ID   (0x0056)
121  #define QCA8074_DEVICE_ID   (0xffff) /* Todo: replace this with
122  					actual number once available.
123  					currently defining this to 0xffff for
124  					emulation purpose */
125  #define QCA8074V2_DEVICE_ID (0xfffe) /* Todo: replace this with actual number */
126  #define QCA6018_DEVICE_ID (0xfffd) /* Todo: replace this with actual number */
127  #define QCA5018_DEVICE_ID (0xfffc) /* Todo: replace this with actual number */
128  #define QCA9574_DEVICE_ID (0xfffa)
129  #define QCA5332_DEVICE_ID (0xfff9)
130  /* Genoa */
131  #define QCN7605_DEVICE_ID  (0x1102) /* Genoa PCIe device ID*/
132  #define QCN7605_COMPOSITE  (0x9901)
133  #define QCN7605_STANDALONE  (0x9900)
134  #define QCN7605_STANDALONE_V2  (0x9902)
135  #define QCN7605_COMPOSITE_V2  (0x9903)
136  
137  #define RUMIM2M_DEVICE_ID_NODE0	0xabc0
138  #define RUMIM2M_DEVICE_ID_NODE1	0xabc1
139  #define RUMIM2M_DEVICE_ID_NODE2	0xabc2
140  #define RUMIM2M_DEVICE_ID_NODE3	0xabc3
141  #define RUMIM2M_DEVICE_ID_NODE4	0xaa10
142  #define RUMIM2M_DEVICE_ID_NODE5	0xaa11
143  
144  #define HIF_GET_PCI_SOFTC(scn) ((struct hif_pci_softc *)scn)
145  #define HIF_GET_IPCI_SOFTC(scn) ((struct hif_ipci_softc *)scn)
146  #define HIF_GET_CE_STATE(scn) ((struct HIF_CE_state *)scn)
147  #define HIF_GET_SDIO_SOFTC(scn) ((struct hif_sdio_softc *)scn)
148  #define HIF_GET_USB_SOFTC(scn) ((struct hif_usb_softc *)scn)
149  #define HIF_GET_USB_DEVICE(scn) ((struct HIF_DEVICE_USB *)scn)
150  #define HIF_GET_SOFTC(scn) ((struct hif_softc *)scn)
151  #define GET_HIF_OPAQUE_HDL(scn) ((struct hif_opaque_softc *)scn)
152  
153  #ifdef QCA_WIFI_QCN9224
154  #define NUM_CE_AVAILABLE 16
155  #else
156  #define NUM_CE_AVAILABLE 12
157  #endif
158  /* Add 1 here to store default configuration in index 0 */
159  #define NUM_CE_CONTEXT (NUM_CE_AVAILABLE + 1)
160  
161  #define CE_INTERRUPT_IDX(x) x
162  
163  #ifdef WLAN_64BIT_DATA_SUPPORT
164  #define RRI_ON_DDR_MEM_SIZE CE_COUNT * sizeof(uint64_t)
165  #else
166  #define RRI_ON_DDR_MEM_SIZE CE_COUNT * sizeof(uint32_t)
167  #endif
168  
169  struct ce_int_assignment {
170  	uint8_t msi_idx[NUM_CE_AVAILABLE];
171  };
172  
173  struct hif_ce_stats {
174  	int hif_pipe_no_resrc_count;
175  	int ce_ring_delta_fail_count;
176  };
177  
178  #ifdef HIF_DETECTION_LATENCY_ENABLE
179  /**
180   * struct hif_tasklet_running_info - running info of tasklet
181   * @sched_cpuid: id of cpu on which the tasklet was scheduled
182   * @sched_time: time when the tasklet was scheduled
183   * @exec_time: time when the tasklet was executed
184   */
185  struct hif_tasklet_running_info {
186  	int sched_cpuid;
187  	qdf_time_t sched_time;
188  	qdf_time_t exec_time;
189  };
190  
191  #define HIF_TASKLET_IN_MONITOR CE_COUNT_MAX
192  
193  struct hif_latency_detect {
194  	qdf_timer_t timer;
195  	uint32_t timeout;
196  	bool is_timer_started;
197  	bool enable_detection;
198  	/* threshold when stall happens */
199  	uint32_t threshold;
200  
201  	/*
202  	 * Bitmap to indicate the enablement of latency detection for
203  	 * the tasklets. bit-X represents for tasklet of WLAN_CE_X,
204  	 * latency detection is enabled on the corresponding tasklet
205  	 * when a bit is set.
206  	 * At the same time, this bitmap also indicates the validity of
207  	 * elements in array 'tasklet_info', bit-X represents for index-X,
208  	 * the corresponding element is valid when a bit is set.
209  	 */
210  	qdf_bitmap(tasklet_bmap, HIF_TASKLET_IN_MONITOR);
211  
212  	/*
213  	 * Array to record running info of tasklets, info of tasklet
214  	 * for WLAN_CE_X is stored at index-X.
215  	 */
216  	struct hif_tasklet_running_info tasklet_info[HIF_TASKLET_IN_MONITOR];
217  	qdf_time_t credit_request_time;
218  	qdf_time_t credit_report_time;
219  };
220  #endif
221  
222  /*
223   * Note: For MCL, #if defined (HIF_CONFIG_SLUB_DEBUG_ON) needs to be checked
224   * for defined here
225   */
226  #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF)
227  
228  #define HIF_CE_MAX_LATEST_HIST 2
229  #define HIF_CE_MAX_LATEST_EVTS 2
230  
231  struct latest_evt_history {
232  	uint64_t irq_entry_ts;
233  	uint64_t bh_entry_ts;
234  	uint64_t bh_resched_ts;
235  	uint64_t bh_exit_ts;
236  	uint64_t bh_work_ts;
237  	int cpu_id;
238  	uint32_t ring_hp;
239  	uint32_t ring_tp;
240  };
241  
242  struct ce_desc_hist {
243  	qdf_atomic_t history_index[CE_COUNT_MAX];
244  	uint8_t ce_id_hist_map[CE_COUNT_MAX];
245  	bool enable[CE_COUNT_MAX];
246  	bool data_enable[CE_COUNT_MAX];
247  	qdf_mutex_t ce_dbg_datamem_lock[CE_COUNT_MAX];
248  	uint32_t hist_index;
249  	uint32_t hist_id;
250  	void *hist_ev[CE_COUNT_MAX];
251  	struct latest_evt_history latest_evts[HIF_CE_MAX_LATEST_HIST][HIF_CE_MAX_LATEST_EVTS];
252  };
253  
254  void hif_record_latest_evt(struct ce_desc_hist *ce_hist,
255  			   uint8_t type,
256  			   int ce_id, uint64_t time,
257  			   uint32_t hp, uint32_t tp);
258  #endif /*defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF)*/
259  
260  /**
261   * struct hif_cfg() - store ini config parameters in hif layer
262   * @ce_status_ring_timer_threshold: ce status ring timer threshold
263   * @ce_status_ring_batch_count_threshold: ce status ring batch count threshold
264   * @disable_wake_irq: disable wake irq
265   */
266  struct hif_cfg {
267  	uint16_t ce_status_ring_timer_threshold;
268  	uint8_t ce_status_ring_batch_count_threshold;
269  	bool disable_wake_irq;
270  };
271  
272  #ifdef DP_UMAC_HW_RESET_SUPPORT
273  /**
274   * struct hif_umac_reset_ctx - UMAC HW reset context at HIF layer
275   * @intr_tq: Tasklet structure
276   * @irq_handler: IRQ handler
277   * @cb_handler: Callback handler
278   * @cb_ctx: Argument to be passed to @cb_handler
279   * @os_irq: Interrupt number for this IRQ
280   * @irq_configured: Whether the IRQ has been configured
281   */
282  struct hif_umac_reset_ctx {
283  	struct tasklet_struct intr_tq;
284  	bool (*irq_handler)(void *cb_ctx);
285  	int (*cb_handler)(void *cb_ctx);
286  	void *cb_ctx;
287  	uint32_t os_irq;
288  	bool irq_configured;
289  };
290  #endif
291  
292  #define MAX_SHADOW_REGS 40
293  
294  #ifdef FEATURE_HIF_DELAYED_REG_WRITE
295  /**
296   * enum hif_reg_sched_delay - ENUM for write sched delay histogram
297   * @HIF_REG_WRITE_SCHED_DELAY_SUB_100us: index for delay < 100us
298   * @HIF_REG_WRITE_SCHED_DELAY_SUB_1000us: index for delay < 1000us
299   * @HIF_REG_WRITE_SCHED_DELAY_SUB_5000us: index for delay < 5000us
300   * @HIF_REG_WRITE_SCHED_DELAY_GT_5000us: index for delay >= 5000us
301   * @HIF_REG_WRITE_SCHED_DELAY_HIST_MAX: Max value (nnsize of histogram array)
302   */
303  enum hif_reg_sched_delay {
304  	HIF_REG_WRITE_SCHED_DELAY_SUB_100us,
305  	HIF_REG_WRITE_SCHED_DELAY_SUB_1000us,
306  	HIF_REG_WRITE_SCHED_DELAY_SUB_5000us,
307  	HIF_REG_WRITE_SCHED_DELAY_GT_5000us,
308  	HIF_REG_WRITE_SCHED_DELAY_HIST_MAX,
309  };
310  
311  /**
312   * struct hif_reg_write_soc_stats - soc stats to keep track of register writes
313   * @enqueues: writes enqueued to delayed work
314   * @dequeues: writes dequeued from delayed work (not written yet)
315   * @coalesces: writes not enqueued since srng is already queued up
316   * @direct: writes not enqueud and writted to register directly
317   * @prevent_l1_fails: prevent l1 API failed
318   * @q_depth: current queue depth in delayed register write queue
319   * @max_q_depth: maximum queue for delayed register write queue
320   * @sched_delay: = kernel work sched delay + bus wakeup delay, histogram
321   * @dequeue_delay: dequeue operation be delayed
322   */
323  struct hif_reg_write_soc_stats {
324  	qdf_atomic_t enqueues;
325  	uint32_t dequeues;
326  	qdf_atomic_t coalesces;
327  	qdf_atomic_t direct;
328  	uint32_t prevent_l1_fails;
329  	qdf_atomic_t q_depth;
330  	uint32_t max_q_depth;
331  	uint32_t sched_delay[HIF_REG_WRITE_SCHED_DELAY_HIST_MAX];
332  	uint32_t dequeue_delay;
333  };
334  
335  /**
336   * struct hif_reg_write_q_elem - delayed register write queue element
337   * @ce_state: CE state queued for a delayed write
338   * @offset: offset of the CE register
339   * @enqueue_val: register value at the time of delayed write enqueue
340   * @dequeue_val: register value at the time of delayed write dequeue
341   * @valid: whether this entry is valid or not
342   * @enqueue_time: enqueue time (qdf_log_timestamp)
343   * @work_scheduled_time: work scheduled time (qdf_log_timestamp)
344   * @dequeue_time: dequeue time (qdf_log_timestamp)
345   * @cpu_id: record cpuid when schedule work
346   */
347  struct hif_reg_write_q_elem {
348  	struct CE_state *ce_state;
349  	uint32_t offset;
350  	uint32_t enqueue_val;
351  	uint32_t dequeue_val;
352  	uint8_t valid;
353  	qdf_time_t enqueue_time;
354  	qdf_time_t work_scheduled_time;
355  	qdf_time_t dequeue_time;
356  	int cpu_id;
357  };
358  #endif
359  
360  struct hif_softc {
361  	struct hif_opaque_softc osc;
362  	struct hif_config_info hif_config;
363  	struct hif_target_info target_info;
364  	void __iomem *mem;
365  	void __iomem *mem_ce;
366  	void __iomem *mem_cmem;
367  	void __iomem *mem_pmm_base;
368  	enum qdf_bus_type bus_type;
369  	struct hif_bus_ops bus_ops;
370  	void *ce_id_to_state[CE_COUNT_MAX];
371  	qdf_device_t qdf_dev;
372  	bool hif_init_done;
373  	bool request_irq_done;
374  	bool ext_grp_irq_configured;
375  	bool free_irq_done;
376  	uint8_t ce_latency_stats;
377  	/* Packet statistics */
378  	struct hif_ce_stats pkt_stats;
379  	enum hif_target_status target_status;
380  	uint64_t event_enable_mask;
381  
382  	struct targetdef_s *targetdef;
383  	struct ce_reg_def *target_ce_def;
384  	struct hostdef_s *hostdef;
385  	struct host_shadow_regs_s *host_shadow_regs;
386  
387  	bool recovery;
388  	bool notice_send;
389  	bool per_ce_irq;
390  	uint32_t ce_irq_summary;
391  	/* No of copy engines supported */
392  	unsigned int ce_count;
393  	struct ce_int_assignment *int_assignment;
394  	atomic_t active_tasklet_cnt;
395  	atomic_t active_grp_tasklet_cnt;
396  	atomic_t active_oom_work_cnt;
397  	atomic_t link_suspended;
398  	void *vaddr_rri_on_ddr;
399  	atomic_t active_wake_req_cnt;
400  	qdf_dma_addr_t paddr_rri_on_ddr;
401  #ifdef CONFIG_BYPASS_QMI
402  	uint32_t *vaddr_qmi_bypass;
403  	qdf_dma_addr_t paddr_qmi_bypass;
404  #endif
405  	int linkstate_vote;
406  	bool fastpath_mode_on;
407  	atomic_t tasklet_from_intr;
408  	int htc_htt_tx_endpoint;
409  	qdf_dma_addr_t mem_pa;
410  	bool athdiag_procfs_inited;
411  #ifdef FEATURE_NAPI
412  	struct qca_napi_data napi_data;
413  #endif /* FEATURE_NAPI */
414  	/* stores ce_service_max_yield_time in ns */
415  	unsigned long long ce_service_max_yield_time;
416  	uint8_t ce_service_max_rx_ind_flush;
417  	struct hif_driver_state_callbacks callbacks;
418  	uint32_t hif_con_param;
419  #ifdef QCA_NSS_WIFI_OFFLOAD_SUPPORT
420  	uint32_t nss_wifi_ol_mode;
421  #endif
422  	void *hal_soc;
423  	struct hif_ut_suspend_context ut_suspend_ctx;
424  	uint32_t hif_attribute;
425  	int wake_irq;
426  	hif_pm_wake_irq_type wake_irq_type;
427  	void (*initial_wakeup_cb)(void *);
428  	void *initial_wakeup_priv;
429  #ifdef REMOVE_PKT_LOG
430  	/* Handle to pktlog device */
431  	void *pktlog_dev;
432  #endif
433  #ifdef WLAN_FEATURE_DP_EVENT_HISTORY
434  	/* Pointer to the srng event history */
435  	struct hif_event_history *evt_hist[HIF_NUM_INT_CONTEXTS];
436  #endif
437  
438  /*
439   * Note: For MCL, #if defined (HIF_CONFIG_SLUB_DEBUG_ON) needs to be checked
440   * for defined here
441   */
442  #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF)
443  	struct ce_desc_hist hif_ce_desc_hist;
444  #endif /*defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF)*/
445  #ifdef IPA_OFFLOAD
446  	qdf_shared_mem_t *ipa_ce_ring;
447  #endif
448  #ifdef IPA_OPT_WIFI_DP
449  	qdf_atomic_t opt_wifi_dp_rtpm_cnt;
450  #endif
451  	struct hif_cfg ini_cfg;
452  #ifdef HIF_CE_LOG_INFO
453  	qdf_notif_block hif_recovery_notifier;
454  #endif
455  #if defined(HIF_CPU_PERF_AFFINE_MASK) || \
456  	defined(FEATURE_ENABLE_CE_DP_IRQ_AFFINE)
457  
458  	/* The CPU hotplug event registration handle */
459  	struct qdf_cpuhp_handler *cpuhp_event_handle;
460  #endif
461  	uint32_t irq_unlazy_disable;
462  	/* Should the unlzay support for interrupt delivery be disabled */
463  	/* Flag to indicate whether bus is suspended */
464  	bool bus_suspended;
465  	bool pktlog_init;
466  #ifdef FEATURE_RUNTIME_PM
467  	/* Variable to track the link state change in RTPM */
468  	qdf_atomic_t pm_link_state;
469  #endif
470  #ifdef HIF_DETECTION_LATENCY_ENABLE
471  	struct hif_latency_detect latency_detect;
472  #endif
473  #ifdef FEATURE_RUNTIME_PM
474  	qdf_runtime_lock_t prevent_linkdown_lock;
475  #endif
476  #ifdef SYSTEM_PM_CHECK
477  	qdf_atomic_t sys_pm_state;
478  #endif
479  #if defined(HIF_IPCI) && defined(FEATURE_HAL_DELAYED_REG_WRITE)
480  	qdf_atomic_t dp_ep_vote_access;
481  	qdf_atomic_t ep_vote_access;
482  #endif
483  	/* CMEM address target reserved for host usage */
484  	uint64_t cmem_start;
485  	/* CMEM size target reserved */
486  	uint64_t cmem_size;
487  #ifdef DP_UMAC_HW_RESET_SUPPORT
488  	struct hif_umac_reset_ctx umac_reset_ctx;
489  #endif
490  #ifdef CONFIG_SHADOW_V3
491  	struct pld_shadow_reg_v3_cfg shadow_regs[MAX_SHADOW_REGS];
492  	int num_shadow_registers_configured;
493  #endif
494  #ifdef WLAN_FEATURE_AFFINITY_MGR
495  	/* CPU Affinity info of IRQs */
496  	bool affinity_mgr_supported;
497  	uint64_t time_threshold;
498  	struct hif_cpu_affinity ce_irq_cpu_mask[CE_COUNT_MAX];
499  	struct hif_cpu_affinity irq_cpu_mask[HIF_MAX_GROUP][HIF_MAX_GRP_IRQ];
500  	qdf_cpu_mask allowed_mask;
501  #endif
502  #ifdef FEATURE_DIRECT_LINK
503  	struct qdf_mem_multi_page_t dl_recv_pages;
504  	int dl_recv_pipe_num;
505  #endif
506  #ifdef WLAN_FEATURE_CE_RX_BUFFER_REUSE
507  	struct wbuff_mod_handle *wbuff_handle;
508  #endif
509  #ifdef FEATURE_HIF_DELAYED_REG_WRITE
510  	/* queue(array) to hold register writes */
511  	struct hif_reg_write_q_elem *reg_write_queue;
512  	/* delayed work to be queued into workqueue */
513  	qdf_work_t reg_write_work;
514  	/* workqueue for delayed register writes */
515  	qdf_workqueue_t *reg_write_wq;
516  	/* write index used by caller to enqueue delayed work */
517  	qdf_atomic_t write_idx;
518  	/* read index used by worker thread to dequeue/write registers */
519  	uint32_t read_idx;
520  	struct hif_reg_write_soc_stats wstats;
521  	qdf_atomic_t active_work_cnt;
522  #endif /* FEATURE_HIF_DELAYED_REG_WRITE */
523  };
524  
525  #if defined(NUM_SOC_PERF_CLUSTER) && (NUM_SOC_PERF_CLUSTER > 1)
hif_get_perf_cluster_bitmap(void)526  static inline uint16_t hif_get_perf_cluster_bitmap(void)
527  {
528  	return (BIT(CPU_CLUSTER_TYPE_PERF) | BIT(CPU_CLUSTER_TYPE_PERF2));
529  }
530  #else /* NUM_SOC_PERF_CLUSTER > 1 */
hif_get_perf_cluster_bitmap(void)531  static inline uint16_t hif_get_perf_cluster_bitmap(void)
532  {
533  	return BIT(CPU_CLUSTER_TYPE_PERF);
534  }
535  #endif /* NUM_SOC_PERF_CLUSTER > 1 */
536  
537  static inline
hif_get_hal_handle(struct hif_opaque_softc * hif_hdl)538  void *hif_get_hal_handle(struct hif_opaque_softc *hif_hdl)
539  {
540  	struct hif_softc *sc = (struct hif_softc *)hif_hdl;
541  
542  	if (!sc)
543  		return NULL;
544  
545  	return sc->hal_soc;
546  }
547  
548  /**
549   * hif_get_cmem_info() - get CMEM address and size from HIF handle
550   * @hif_hdl: HIF handle pointer
551   * @cmem_start: pointer for CMEM address
552   * @cmem_size: pointer for CMEM size
553   *
554   * Return: None.
555   */
556  static inline
hif_get_cmem_info(struct hif_opaque_softc * hif_hdl,uint64_t * cmem_start,uint64_t * cmem_size)557  void hif_get_cmem_info(struct hif_opaque_softc *hif_hdl,
558  		       uint64_t *cmem_start,
559  		       uint64_t *cmem_size)
560  {
561  	struct hif_softc *sc = (struct hif_softc *)hif_hdl;
562  
563  	*cmem_start = sc->cmem_start;
564  	*cmem_size = sc->cmem_size;
565  }
566  
567  /**
568   * hif_get_num_active_tasklets() - get the number of active
569   *		tasklets pending to be completed.
570   * @scn: HIF context
571   *
572   * Returns: the number of tasklets which are active
573   */
hif_get_num_active_tasklets(struct hif_softc * scn)574  static inline int hif_get_num_active_tasklets(struct hif_softc *scn)
575  {
576  	return qdf_atomic_read(&scn->active_tasklet_cnt);
577  }
578  
579  /**
580   * hif_get_num_active_oom_work() - get the number of active
581   *		oom work pending to be completed.
582   * @scn: HIF context
583   *
584   * Returns: the number of oom works which are active
585   */
hif_get_num_active_oom_work(struct hif_softc * scn)586  static inline int hif_get_num_active_oom_work(struct hif_softc *scn)
587  {
588  	return qdf_atomic_read(&scn->active_oom_work_cnt);
589  }
590  
591  /*
592   * Max waiting time during Runtime PM suspend to finish all
593   * the tasks. This is in the multiple of 10ms.
594   */
595  #ifdef PANIC_ON_BUG
596  #define HIF_TASK_DRAIN_WAIT_CNT 200
597  #else
598  #define HIF_TASK_DRAIN_WAIT_CNT 25
599  #endif
600  
601  /**
602   * hif_try_complete_tasks() - Try to complete all the pending tasks
603   * @scn: HIF context
604   *
605   * Try to complete all the pending datapath tasks, i.e. tasklets,
606   * DP group tasklets and works which are queued, in a given time
607   * slot.
608   *
609   * Returns: QDF_STATUS_SUCCESS if all the tasks were completed
610   *	QDF error code, if the time slot exhausted
611   */
612  QDF_STATUS hif_try_complete_tasks(struct hif_softc *scn);
613  
614  #ifdef QCA_NSS_WIFI_OFFLOAD_SUPPORT
hif_is_nss_wifi_enabled(struct hif_softc * sc)615  static inline bool hif_is_nss_wifi_enabled(struct hif_softc *sc)
616  {
617  	return !!(sc->nss_wifi_ol_mode);
618  }
619  #else
hif_is_nss_wifi_enabled(struct hif_softc * sc)620  static inline bool hif_is_nss_wifi_enabled(struct hif_softc *sc)
621  {
622  	return false;
623  }
624  #endif
625  
hif_is_attribute_set(struct hif_softc * sc,uint32_t hif_attrib)626  static inline uint8_t hif_is_attribute_set(struct hif_softc *sc,
627  						uint32_t hif_attrib)
628  {
629  	return sc->hif_attribute == hif_attrib;
630  }
631  
632  #ifdef WLAN_FEATURE_DP_EVENT_HISTORY
hif_set_event_hist_mask(struct hif_opaque_softc * hif_handle)633  static inline void hif_set_event_hist_mask(struct hif_opaque_softc *hif_handle)
634  {
635  	struct hif_softc *scn = (struct hif_softc *)hif_handle;
636  
637  	scn->event_enable_mask = HIF_EVENT_HIST_ENABLE_MASK;
638  }
639  #else
hif_set_event_hist_mask(struct hif_opaque_softc * hif_handle)640  static inline void hif_set_event_hist_mask(struct hif_opaque_softc *hif_handle)
641  {
642  }
643  #endif /* WLAN_FEATURE_DP_EVENT_HISTORY */
644  
645  A_target_id_t hif_get_target_id(struct hif_softc *scn);
646  void hif_dump_pipe_debug_count(struct hif_softc *scn);
647  void hif_display_bus_stats(struct hif_opaque_softc *scn);
648  void hif_clear_bus_stats(struct hif_opaque_softc *scn);
649  bool hif_max_num_receives_reached(struct hif_softc *scn, unsigned int count);
650  void hif_shutdown_device(struct hif_opaque_softc *hif_ctx);
651  int hif_bus_configure(struct hif_softc *scn);
652  void hif_cancel_deferred_target_sleep(struct hif_softc *scn);
653  int hif_config_ce(struct hif_softc *scn);
654  int hif_config_ce_pktlog(struct hif_opaque_softc *hif_ctx);
655  int hif_config_ce_by_id(struct hif_softc *scn, int pipe_num);
656  void hif_unconfig_ce(struct hif_softc *scn);
657  void hif_ce_prepare_config(struct hif_softc *scn);
658  QDF_STATUS hif_ce_open(struct hif_softc *scn);
659  void hif_ce_close(struct hif_softc *scn);
660  int athdiag_procfs_init(void *scn);
661  void athdiag_procfs_remove(void);
662  /* routine to modify the initial buffer count to be allocated on an os
663   * platform basis. Platform owner will need to modify this as needed
664   */
665  qdf_size_t init_buffer_count(qdf_size_t maxSize);
666  
667  irqreturn_t hif_fw_interrupt_handler(int irq, void *arg);
668  int hif_get_device_type(uint32_t device_id,
669  			uint32_t revision_id,
670  			uint32_t *hif_type, uint32_t *target_type);
671  /*These functions are exposed to HDD*/
672  void hif_nointrs(struct hif_softc *scn);
673  void hif_bus_close(struct hif_softc *ol_sc);
674  QDF_STATUS hif_bus_open(struct hif_softc *ol_sc,
675  	enum qdf_bus_type bus_type);
676  QDF_STATUS hif_enable_bus(struct hif_softc *ol_sc, struct device *dev,
677  	void *bdev, const struct hif_bus_id *bid, enum hif_enable_type type);
678  void hif_disable_bus(struct hif_softc *scn);
679  void hif_bus_prevent_linkdown(struct hif_softc *scn, bool flag);
680  int hif_bus_get_context_size(enum qdf_bus_type bus_type);
681  void hif_read_phy_mem_base(struct hif_softc *scn, qdf_dma_addr_t *bar_value);
682  uint32_t hif_get_conparam(struct hif_softc *scn);
683  struct hif_driver_state_callbacks *hif_get_callbacks_handle(
684  							struct hif_softc *scn);
685  bool hif_is_driver_unloading(struct hif_softc *scn);
686  bool hif_is_load_or_unload_in_progress(struct hif_softc *scn);
687  bool hif_is_recovery_in_progress(struct hif_softc *scn);
688  bool hif_is_target_ready(struct hif_softc *scn);
689  
690  /**
691   * hif_get_bandwidth_level() - API to get the current bandwidth level
692   * @hif_handle: HIF Context
693   *
694   * Return: PLD bandwidth level
695   */
696  int hif_get_bandwidth_level(struct hif_opaque_softc *hif_handle);
697  
698  void hif_wlan_disable(struct hif_softc *scn);
699  int hif_target_sleep_state_adjust(struct hif_softc *scn,
700  					 bool sleep_ok,
701  					 bool wait_for_it);
702  
703  #ifdef DP_MEM_PRE_ALLOC
704  void *hif_mem_alloc_consistent_unaligned(struct hif_softc *scn,
705  					 qdf_size_t size,
706  					 qdf_dma_addr_t *paddr,
707  					 uint32_t ring_type,
708  					 uint8_t *is_mem_prealloc);
709  
710  void hif_mem_free_consistent_unaligned(struct hif_softc *scn,
711  				       qdf_size_t size,
712  				       void *vaddr,
713  				       qdf_dma_addr_t paddr,
714  				       qdf_dma_context_t memctx,
715  				       uint8_t is_mem_prealloc);
716  
717  /**
718   * hif_prealloc_get_multi_pages() - gets pre-alloc DP multi-pages memory
719   * @scn: HIF context
720   * @desc_type: descriptor type
721   * @elem_size: single element size
722   * @elem_num: total number of elements should be allocated
723   * @pages: multi page information storage
724   * @cacheable: coherent memory or cacheable memory
725   *
726   * Return: None
727   */
728  void hif_prealloc_get_multi_pages(struct hif_softc *scn, uint32_t desc_type,
729  				  qdf_size_t elem_size, uint16_t elem_num,
730  				  struct qdf_mem_multi_page_t *pages,
731  				  bool cacheable);
732  
733  /**
734   * hif_prealloc_put_multi_pages() - puts back pre-alloc DP multi-pages memory
735   * @scn: HIF context
736   * @desc_type: descriptor type
737   * @pages: multi page information storage
738   * @cacheable: coherent memory or cacheable memory
739   *
740   * Return: None
741   */
742  void hif_prealloc_put_multi_pages(struct hif_softc *scn, uint32_t desc_type,
743  				  struct qdf_mem_multi_page_t *pages,
744  				  bool cacheable);
745  #else
746  static inline
hif_mem_alloc_consistent_unaligned(struct hif_softc * scn,qdf_size_t size,qdf_dma_addr_t * paddr,uint32_t ring_type,uint8_t * is_mem_prealloc)747  void *hif_mem_alloc_consistent_unaligned(struct hif_softc *scn,
748  					 qdf_size_t size,
749  					 qdf_dma_addr_t *paddr,
750  					 uint32_t ring_type,
751  					 uint8_t *is_mem_prealloc)
752  {
753  	return qdf_mem_alloc_consistent(scn->qdf_dev,
754  					scn->qdf_dev->dev,
755  					size,
756  					paddr);
757  }
758  
759  static inline
hif_mem_free_consistent_unaligned(struct hif_softc * scn,qdf_size_t size,void * vaddr,qdf_dma_addr_t paddr,qdf_dma_context_t memctx,uint8_t is_mem_prealloc)760  void hif_mem_free_consistent_unaligned(struct hif_softc *scn,
761  				       qdf_size_t size,
762  				       void *vaddr,
763  				       qdf_dma_addr_t paddr,
764  				       qdf_dma_context_t memctx,
765  				       uint8_t is_mem_prealloc)
766  {
767  	return qdf_mem_free_consistent(scn->qdf_dev, scn->qdf_dev->dev,
768  				       size, vaddr, paddr, memctx);
769  }
770  
771  static inline
hif_prealloc_get_multi_pages(struct hif_softc * scn,uint32_t desc_type,qdf_size_t elem_size,uint16_t elem_num,struct qdf_mem_multi_page_t * pages,bool cacheable)772  void hif_prealloc_get_multi_pages(struct hif_softc *scn, uint32_t desc_type,
773  				  qdf_size_t elem_size, uint16_t elem_num,
774  				  struct qdf_mem_multi_page_t *pages,
775  				  bool cacheable)
776  {
777  	qdf_mem_multi_pages_alloc(scn->qdf_dev, pages,
778  				  elem_size, elem_num, 0, cacheable);
779  }
780  
781  static inline
hif_prealloc_put_multi_pages(struct hif_softc * scn,uint32_t desc_type,struct qdf_mem_multi_page_t * pages,bool cacheable)782  void hif_prealloc_put_multi_pages(struct hif_softc *scn, uint32_t desc_type,
783  				  struct qdf_mem_multi_page_t *pages,
784  				  bool cacheable)
785  {
786  	qdf_mem_multi_pages_free(scn->qdf_dev, pages, 0,
787  				 cacheable);
788  }
789  #endif
790  
791  /**
792   * hif_get_rx_ctx_id() - Returns NAPI instance ID based on CE ID
793   * @ctx_id: Rx CE context ID
794   * @hif_hdl: HIF Context
795   *
796   * Return: Rx instance ID
797   */
798  int hif_get_rx_ctx_id(int ctx_id, struct hif_opaque_softc *hif_hdl);
799  void hif_ramdump_handler(struct hif_opaque_softc *scn);
800  #ifdef HIF_USB
801  void hif_usb_get_hw_info(struct hif_softc *scn);
802  void hif_usb_ramdump_handler(struct hif_opaque_softc *scn);
803  #else
hif_usb_get_hw_info(struct hif_softc * scn)804  static inline void hif_usb_get_hw_info(struct hif_softc *scn) {}
hif_usb_ramdump_handler(struct hif_opaque_softc * scn)805  static inline void hif_usb_ramdump_handler(struct hif_opaque_softc *scn) {}
806  #endif
807  
808  /**
809   * hif_wake_interrupt_handler() - interrupt handler for standalone wake irq
810   * @irq: the irq number that fired
811   * @context: the opaque pointer passed to request_irq()
812   *
813   * Return: an irq return type
814   */
815  irqreturn_t hif_wake_interrupt_handler(int irq, void *context);
816  
817  #if defined(HIF_SNOC)
818  bool hif_is_target_register_access_allowed(struct hif_softc *hif_sc);
819  #elif defined(HIF_IPCI)
820  static inline bool
hif_is_target_register_access_allowed(struct hif_softc * hif_sc)821  hif_is_target_register_access_allowed(struct hif_softc *hif_sc)
822  {
823  	return !(hif_sc->recovery);
824  }
825  #else
826  static inline
hif_is_target_register_access_allowed(struct hif_softc * hif_sc)827  bool hif_is_target_register_access_allowed(struct hif_softc *hif_sc)
828  {
829  	return true;
830  }
831  #endif
832  
833  #ifdef ADRASTEA_RRI_ON_DDR
834  void hif_uninit_rri_on_ddr(struct hif_softc *scn);
835  #else
836  static inline
hif_uninit_rri_on_ddr(struct hif_softc * scn)837  void hif_uninit_rri_on_ddr(struct hif_softc *scn) {}
838  #endif
839  void hif_cleanup_static_buf_to_target(struct hif_softc *scn);
840  
841  #ifdef FEATURE_RUNTIME_PM
842  /**
843   * hif_runtime_prevent_linkdown() - prevent or allow a runtime pm from occurring
844   * @scn: hif context
845   * @is_get: prevent linkdown if true otherwise allow
846   *
847   * this api should only be called as part of bus prevent linkdown
848   */
849  void hif_runtime_prevent_linkdown(struct hif_softc *scn, bool is_get);
850  #else
851  static inline
hif_runtime_prevent_linkdown(struct hif_softc * scn,bool is_get)852  void hif_runtime_prevent_linkdown(struct hif_softc *scn, bool is_get)
853  {
854  }
855  #endif
856  
857  #ifdef HIF_HAL_REG_ACCESS_SUPPORT
858  void hif_reg_window_write(struct hif_softc *scn,
859  			  uint32_t offset, uint32_t value);
860  uint32_t hif_reg_window_read(struct hif_softc *scn, uint32_t offset);
861  #endif
862  
863  #ifdef FEATURE_HIF_DELAYED_REG_WRITE
864  void hif_delayed_reg_write(struct hif_softc *scn, uint32_t ctrl_addr,
865  			   uint32_t val);
866  #endif
867  
868  #if defined(HIF_IPCI) && defined(FEATURE_HAL_DELAYED_REG_WRITE)
hif_is_ep_vote_access_disabled(struct hif_softc * scn)869  static inline bool hif_is_ep_vote_access_disabled(struct hif_softc *scn)
870  {
871  	if ((qdf_atomic_read(&scn->dp_ep_vote_access) ==
872  	     HIF_EP_VOTE_ACCESS_DISABLE) &&
873  	    (qdf_atomic_read(&scn->ep_vote_access) ==
874  	     HIF_EP_VOTE_ACCESS_DISABLE))
875  		return true;
876  
877  	return false;
878  }
879  #else
hif_is_ep_vote_access_disabled(struct hif_softc * scn)880  static inline bool hif_is_ep_vote_access_disabled(struct hif_softc *scn)
881  {
882  	return false;
883  }
884  #endif
885  #endif /* __HIF_MAIN_H__ */
886