1  /*
2   * Copyright (c) 2015-2021 The Linux Foundation. All rights reserved.
3   * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4   *
5   * Permission to use, copy, modify, and/or distribute this software for
6   * any purpose with or without fee is hereby granted, provided that the
7   * above copyright notice and this permission notice appear in all
8   * copies.
9   *
10   * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11   * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12   * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13   * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14   * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15   * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16   * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17   * PERFORMANCE OF THIS SOFTWARE.
18   */
19  
20  #ifndef __CE_H__
21  #define __CE_H__
22  
23  #include "qdf_atomic.h"
24  #include "qdf_lock.h"
25  #include "hif_main.h"
26  #include "qdf_util.h"
27  #include "hif_exec.h"
28  
29  #ifndef DATA_CE_SW_INDEX_NO_INLINE_UPDATE
30  #define DATA_CE_UPDATE_SWINDEX(x, scn, addr)				\
31  		(x = CE_SRC_RING_READ_IDX_GET_FROM_DDR(scn, addr))
32  #else
33  #define DATA_CE_UPDATE_SWINDEX(x, scn, addr)
34  #endif
35  
36  /*
37   * Number of times to check for any pending tx/rx completion on
38   * a copy engine, this count should be big enough. Once we hit
39   * this threshold we'll not check for any Tx/Rx completion in same
40   * interrupt handling. Note that this threshold is only used for
41   * Rx interrupt processing, this can be used tor Tx as well if we
42   * suspect any infinite loop in checking for pending Tx completion.
43   */
44  #define CE_TXRX_COMP_CHECK_THRESHOLD 20
45  
46  #define CE_HTT_T2H_MSG 1
47  #define CE_HTT_H2T_MSG 4
48  
49  #define CE_OFFSET		0x00000400
50  #define CE_USEFUL_SIZE		0x00000058
51  #define CE_ALL_BITMAP  0xFFFF
52  
53  #define HIF_REQUESTED_EVENTS 20
54  /*
55   * enum ce_id_type - Copy engine ID
56   */
57  enum ce_id_type {
58  	CE_ID_0,
59  	CE_ID_1,
60  	CE_ID_2,
61  	CE_ID_3,
62  	CE_ID_4,
63  	CE_ID_5,
64  	CE_ID_6,
65  	CE_ID_7,
66  	CE_ID_8,
67  	CE_ID_9,
68  	CE_ID_10,
69  	CE_ID_11,
70  #ifdef QCA_WIFI_QCN9224
71  	CE_ID_12,
72  	CE_ID_13,
73  	CE_ID_14,
74  	CE_ID_15,
75  #endif
76  	CE_ID_MAX
77  };
78  
79  #ifdef CE_TASKLET_DEBUG_ENABLE
80  /**
81   * enum ce_buckets - CE tasklet time buckets
82   * @CE_BUCKET_500_US: tasklet bucket to store 0-0.5ms
83   * @CE_BUCKET_1_MS: tasklet bucket to store 0.5-1ms
84   * @CE_BUCKET_2_MS: tasklet bucket to store 1-2ms
85   * @CE_BUCKET_5_MS: tasklet bucket to store 2-5ms
86   * @CE_BUCKET_10_MS: tasklet bucket to store 5-10ms
87   * @CE_BUCKET_BEYOND: tasklet bucket to store > 10ms
88   * @CE_BUCKET_MAX: enum max value
89   */
90  enum ce_buckets {
91  	CE_BUCKET_500_US,
92  	CE_BUCKET_1_MS,
93  	CE_BUCKET_2_MS,
94  	CE_BUCKET_5_MS,
95  	CE_BUCKET_10_MS,
96  	CE_BUCKET_BEYOND,
97  	CE_BUCKET_MAX,
98  };
99  #endif
100  
101  enum ce_target_type {
102  	CE_SVC_LEGACY,
103  	CE_SVC_SRNG,
104  	CE_MAX_TARGET_TYPE
105  };
106  
107  enum ol_ath_hif_pkt_ecodes {
108  	HIF_PIPE_NO_RESOURCE = 0
109  };
110  
111  struct HIF_CE_state;
112  
113  /* Per-pipe state. */
114  struct HIF_CE_pipe_info {
115  	/* Handle of underlying Copy Engine */
116  	struct CE_handle *ce_hdl;
117  
118  	/* Our pipe number; facilitates use of pipe_info ptrs. */
119  	uint8_t pipe_num;
120  
121  	/* Convenience back pointer to HIF_CE_state. */
122  	struct HIF_CE_state *HIF_CE_state;
123  
124  	/* Instantaneous number of receive buffers that should be posted */
125  	atomic_t recv_bufs_needed;
126  	qdf_size_t buf_sz;
127  	qdf_spinlock_t recv_bufs_needed_lock;
128  
129  	qdf_spinlock_t completion_freeq_lock;
130  	/* Limit the number of outstanding send requests. */
131  	int num_sends_allowed;
132  
133  	/* adding three counts for debugging ring buffer errors */
134  	uint32_t nbuf_alloc_err_count;
135  	uint32_t nbuf_dma_err_count;
136  	uint32_t nbuf_ce_enqueue_err_count;
137  	struct hif_msg_callbacks pipe_callbacks;
138  };
139  
140  /**
141   * struct ce_tasklet_entry
142   *
143   * @intr_tq: intr_tq
144   * @ce_id: ce_id
145   * @inited: inited
146   * @hi_tasklet_ce:
147   * @hif_ce_state: hif_ce_state
148   */
149  struct ce_tasklet_entry {
150  	struct tasklet_struct intr_tq;
151  	enum ce_id_type ce_id;
152  	bool inited;
153  	bool hi_tasklet_ce;
154  	void *hif_ce_state;
155  };
156  
hif_dummy_grp_done(struct hif_exec_context * grp_entry,int work_done)157  static inline bool hif_dummy_grp_done(struct hif_exec_context *grp_entry, int
158  				      work_done)
159  {
160  	return true;
161  }
162  
163  extern struct hif_execution_ops tasklet_sched_ops;
164  extern struct hif_execution_ops napi_sched_ops;
165  
166  /**
167   * struct ce_stats
168   *
169   * @ce_per_cpu: Stats of the CEs running per CPU
170   * @record_index: Current index to store in time record
171   * @tasklet_sched_entry_ts: Timestamp when tasklet is scheduled
172   * @tasklet_exec_entry_ts: Timestamp when tasklet is started execuiton
173   * @tasklet_exec_time_record: Last N number of tasklets execution time
174   * @tasklet_sched_time_record: Last N number of tasklets scheduled time
175   * @ce_tasklet_exec_bucket: Tasklet execution time buckets
176   * @ce_tasklet_sched_bucket: Tasklet time in queue buckets
177   * @ce_tasklet_exec_last_update: Latest timestamp when bucket is updated
178   * @ce_tasklet_sched_last_update: Latest timestamp when bucket is updated
179   * @ce_ring_full_count:
180   * @ce_manual_tasklet_schedule_count:
181   * @ce_last_manual_tasklet_schedule_ts:
182   */
183  struct ce_stats {
184  	uint32_t ce_per_cpu[CE_COUNT_MAX][QDF_MAX_AVAILABLE_CPU];
185  #ifdef CE_TASKLET_DEBUG_ENABLE
186  	uint32_t record_index[CE_COUNT_MAX];
187  	uint64_t tasklet_sched_entry_ts[CE_COUNT_MAX];
188  	uint64_t tasklet_exec_entry_ts[CE_COUNT_MAX];
189  	uint64_t tasklet_exec_time_record[CE_COUNT_MAX][HIF_REQUESTED_EVENTS];
190  	uint64_t tasklet_sched_time_record[CE_COUNT_MAX][HIF_REQUESTED_EVENTS];
191  	uint64_t ce_tasklet_exec_bucket[CE_COUNT_MAX][CE_BUCKET_MAX];
192  	uint64_t ce_tasklet_sched_bucket[CE_COUNT_MAX][CE_BUCKET_MAX];
193  	uint64_t ce_tasklet_exec_last_update[CE_COUNT_MAX][CE_BUCKET_MAX];
194  	uint64_t ce_tasklet_sched_last_update[CE_COUNT_MAX][CE_BUCKET_MAX];
195  #ifdef CE_TASKLET_SCHEDULE_ON_FULL
196  	uint32_t ce_ring_full_count[CE_COUNT_MAX];
197  	uint32_t ce_manual_tasklet_schedule_count[CE_COUNT_MAX];
198  	uint64_t ce_last_manual_tasklet_schedule_ts[CE_COUNT_MAX];
199  #endif
200  #endif
201  };
202  
203  struct HIF_CE_state {
204  	struct hif_softc ol_sc;
205  	bool started;
206  	struct ce_tasklet_entry tasklets[CE_COUNT_MAX];
207  	struct hif_exec_context *hif_ext_group[HIF_MAX_GROUP];
208  	uint32_t hif_num_extgroup;
209  	qdf_spinlock_t keep_awake_lock;
210  	qdf_spinlock_t irq_reg_lock;
211  	unsigned int keep_awake_count;
212  	bool verified_awake;
213  	bool fake_sleep;
214  	qdf_timer_t sleep_timer;
215  	bool sleep_timer_init;
216  	qdf_time_t sleep_ticks;
217  	uint32_t ce_register_irq_done;
218  
219  	struct CE_pipe_config *target_ce_config;
220  	struct CE_attr *host_ce_config;
221  	uint32_t target_ce_config_sz;
222  	/* Per-pipe state. */
223  	struct HIF_CE_pipe_info pipe_info[CE_COUNT_MAX];
224  	/* to be activated after BMI_DONE */
225  	struct hif_msg_callbacks msg_callbacks_pending;
226  	/* current msg callbacks in use */
227  	struct hif_msg_callbacks msg_callbacks_current;
228  
229  	/* Target address used to signal a pending firmware event */
230  	uint32_t fw_indicator_address;
231  
232  	/* Copy Engine used for Diagnostic Accesses */
233  	struct CE_handle *ce_diag;
234  	struct ce_stats stats;
235  	struct ce_ops *ce_services;
236  	struct service_to_pipe *tgt_svc_map;
237  	int sz_tgt_svc_map;
238  };
239  
240  /*
241   * HIA Map Definition
242   */
243  struct host_interest_area_t {
244  	uint32_t hi_interconnect_state;
245  	uint32_t hi_early_alloc;
246  	uint32_t hi_option_flag2;
247  	uint32_t hi_board_data;
248  	uint32_t hi_board_data_initialized;
249  	uint32_t hi_failure_state;
250  	uint32_t hi_rddi_msi_num;
251  	uint32_t hi_pcie_perst_couple_en;
252  	uint32_t hi_sw_protocol_version;
253  };
254  
255  struct shadow_reg_cfg {
256  	uint16_t ce_id;
257  	uint16_t reg_offset;
258  };
259  
260  struct shadow_reg_v2_cfg {
261  	uint32_t reg_value;
262  };
263  
264  #ifdef CONFIG_BYPASS_QMI
265  
266  #define FW_SHARED_MEM (2 * 1024 * 1024)
267  
268  #ifdef QCN7605_SUPPORT
269  struct msi_cfg {
270  	u16 ce_id;
271  	u16 msi_vector;
272  } qdf_packed;
273  
274  struct ce_info {
275  	u32 rri_over_ddr_low_paddr;
276  	u32 rri_over_ddr_high_paddr;
277  	struct msi_cfg cfg[CE_COUNT_MAX];
278  } qdf_packed;
279  #endif
280  #endif
281  
282  /**
283   * struct ce_index
284   *
285   * @id: CE id
286   * @u: union of legacy_info and srng_info
287   * @sw_index: sw index
288   * @write_index: write index
289   * @hp: ring head pointer
290   * @tp: ring tail pointer
291   * @status_hp: status ring head pointer
292   * @status_tp: status ring tail pointer
293   */
294  struct ce_index {
295  	uint8_t id;
296  	union {
297  		struct {
298  			uint16_t sw_index;
299  			uint16_t write_index;
300  		} legacy_info;
301  		struct {
302  			uint16_t hp;
303  			uint16_t tp;
304  			uint16_t status_hp;
305  			uint16_t status_tp;
306  		} srng_info;
307  	} u;
308  } qdf_packed;
309  
310  /**
311   * struct hang_event_info
312   *
313   * @tlv_header: tlv header
314   * @active_tasklet_count: active tasklet count
315   * @active_grp_tasklet_cnt: active grp tasklet count
316   * @ce_count:
317   * @ce_info: CE info
318   */
319  struct hang_event_info {
320  	uint16_t tlv_header;
321  	uint8_t active_tasklet_count;
322  	uint8_t active_grp_tasklet_cnt;
323  	uint8_t ce_count;
324  	struct ce_index ce_info[CE_COUNT_MAX];
325  } qdf_packed;
326  
327  void hif_ce_stop(struct hif_softc *scn);
328  int hif_dump_ce_registers(struct hif_softc *scn);
329  void
330  hif_ce_dump_target_memory(struct hif_softc *scn, void *ramdump_base,
331  			  uint32_t address, uint32_t size);
332  
333  #ifdef IPA_OFFLOAD
334  void hif_ce_ipa_get_ce_resource(struct hif_softc *scn,
335  			     qdf_shared_mem_t **ce_sr,
336  			     uint32_t *ce_sr_ring_size,
337  			     qdf_dma_addr_t *ce_reg_paddr);
338  #else
339  static inline
hif_ce_ipa_get_ce_resource(struct hif_softc * scn,qdf_shared_mem_t ** ce_sr,uint32_t * ce_sr_ring_size,qdf_dma_addr_t * ce_reg_paddr)340  void hif_ce_ipa_get_ce_resource(struct hif_softc *scn,
341  			     qdf_shared_mem_t **ce_sr,
342  			     uint32_t *ce_sr_ring_size,
343  			     qdf_dma_addr_t *ce_reg_paddr)
344  {
345  }
346  
347  #endif
348  int hif_wlan_enable(struct hif_softc *scn);
349  void ce_enable_polling(void *cestate);
350  void ce_disable_polling(void *cestate);
351  void hif_wlan_disable(struct hif_softc *scn);
352  void hif_get_target_ce_config(struct hif_softc *scn,
353  		struct CE_pipe_config **target_ce_config_ret,
354  		uint32_t *target_ce_config_sz_ret,
355  		struct service_to_pipe **target_service_to_ce_map_ret,
356  		uint32_t *target_service_to_ce_map_sz_ret,
357  		struct shadow_reg_cfg **target_shadow_reg_cfg_v1_ret,
358  		uint32_t *shadow_cfg_v1_sz_ret);
359  
360  #ifdef WLAN_FEATURE_EPPING
361  void hif_ce_prepare_epping_config(struct hif_softc *scn,
362  				  struct HIF_CE_state *hif_state);
363  void hif_select_epping_service_to_pipe_map(struct service_to_pipe
364  					   **tgt_svc_map_to_use,
365  					   uint32_t *sz_tgt_svc_map_to_use);
366  
367  #else
368  static inline
hif_ce_prepare_epping_config(struct hif_softc * scn,struct HIF_CE_state * hif_state)369  void hif_ce_prepare_epping_config(struct hif_softc *scn,
370  				  struct HIF_CE_state *hif_state)
371  { }
372  static inline
hif_select_epping_service_to_pipe_map(struct service_to_pipe ** tgt_svc_map_to_use,uint32_t * sz_tgt_svc_map_to_use)373  void hif_select_epping_service_to_pipe_map(struct service_to_pipe
374  					   **tgt_svc_map_to_use,
375  					   uint32_t *sz_tgt_svc_map_to_use)
376  { }
377  #endif
378  
379  void ce_service_register_module(enum ce_target_type target_type,
380  				struct ce_ops* (*ce_attach)(void));
381  
382  #ifdef CONFIG_SHADOW_V3
383  void hif_get_shadow_reg_config_v3(struct hif_softc *scn,
384  				  struct pld_shadow_reg_v3_cfg **shadow_config,
385  				  int *num_shadow_registers_configured);
386  void hif_preare_shadow_register_cfg_v3(struct hif_softc *scn);
387  #endif
388  #endif /* __CE_H__ */
389