xref: /wlan-dirver/qca-wifi-host-cmn/hif/src/ce/ce_main.h (revision d0c05845839e5f2ba5a8dcebe0cd3e4cd4e8dfcf)
1 /*
2  * Copyright (c) 2015-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #ifndef __CE_H__
21 #define __CE_H__
22 
23 #include "qdf_atomic.h"
24 #include "qdf_lock.h"
25 #include "hif_main.h"
26 #include "qdf_util.h"
27 #include "hif_exec.h"
28 
29 #ifndef DATA_CE_SW_INDEX_NO_INLINE_UPDATE
30 #define DATA_CE_UPDATE_SWINDEX(x, scn, addr)				\
31 		(x = CE_SRC_RING_READ_IDX_GET_FROM_DDR(scn, addr))
32 #else
33 #define DATA_CE_UPDATE_SWINDEX(x, scn, addr)
34 #endif
35 
36 /*
37  * Number of times to check for any pending tx/rx completion on
38  * a copy engine, this count should be big enough. Once we hit
39  * this threashold we'll not check for any Tx/Rx comlpetion in same
40  * interrupt handling. Note that this threashold is only used for
41  * Rx interrupt processing, this can be used tor Tx as well if we
42  * suspect any infinite loop in checking for pending Tx completion.
43  */
44 #define CE_TXRX_COMP_CHECK_THRESHOLD 20
45 
46 #define CE_HTT_T2H_MSG 1
47 #define CE_HTT_H2T_MSG 4
48 
49 #define CE_OFFSET		0x00000400
50 #define CE_USEFUL_SIZE		0x00000058
51 #define CE_ALL_BITMAP  0xFFFF
52 
53 #define HIF_REQUESTED_EVENTS 20
54 /**
55  * enum ce_id_type
56  *
57  * @ce_id_type: Copy engine ID
58  */
59 enum ce_id_type {
60 	CE_ID_0,
61 	CE_ID_1,
62 	CE_ID_2,
63 	CE_ID_3,
64 	CE_ID_4,
65 	CE_ID_5,
66 	CE_ID_6,
67 	CE_ID_7,
68 	CE_ID_8,
69 	CE_ID_9,
70 	CE_ID_10,
71 	CE_ID_11,
72 #ifdef QCA_WIFI_QCN9224
73 	CE_ID_12,
74 	CE_ID_13,
75 	CE_ID_14,
76 	CE_ID_15,
77 #endif
78 	CE_ID_MAX
79 };
80 
81 /**
82  * enum ce_buckets
83  *
84  * @ce_buckets: CE tasklet time buckets
85  * @CE_BUCKET_500_US: tasklet bucket to store 0-0.5ms
86  * @CE_BUCKET_1_MS: tasklet bucket to store 0.5-1ms
87  * @CE_BUCKET_2_MS: tasklet bucket to store 1-2ms
88  * @CE_BUCKET_5_MS: tasklet bucket to store 2-5ms
89  * @CE_BUCKET_10_MS: tasklet bucket to store 5-10ms
90  * @CE_BUCKET_BEYOND: tasklet bucket to store > 10ms
91  * @CE_BUCKET_MAX: enum max value
92  */
93 #ifdef CE_TASKLET_DEBUG_ENABLE
94 enum ce_buckets {
95 	CE_BUCKET_500_US,
96 	CE_BUCKET_1_MS,
97 	CE_BUCKET_2_MS,
98 	CE_BUCKET_5_MS,
99 	CE_BUCKET_10_MS,
100 	CE_BUCKET_BEYOND,
101 	CE_BUCKET_MAX,
102 };
103 #endif
104 
105 enum ce_target_type {
106 	CE_SVC_LEGACY,
107 	CE_SVC_SRNG,
108 	CE_MAX_TARGET_TYPE
109 };
110 
111 enum ol_ath_hif_pkt_ecodes {
112 	HIF_PIPE_NO_RESOURCE = 0
113 };
114 
115 struct HIF_CE_state;
116 
117 /* Per-pipe state. */
118 struct HIF_CE_pipe_info {
119 	/* Handle of underlying Copy Engine */
120 	struct CE_handle *ce_hdl;
121 
122 	/* Our pipe number; facilitiates use of pipe_info ptrs. */
123 	uint8_t pipe_num;
124 
125 	/* Convenience back pointer to HIF_CE_state. */
126 	struct HIF_CE_state *HIF_CE_state;
127 
128 	/* Instantaneous number of receive buffers that should be posted */
129 	atomic_t recv_bufs_needed;
130 	qdf_size_t buf_sz;
131 	qdf_spinlock_t recv_bufs_needed_lock;
132 
133 	qdf_spinlock_t completion_freeq_lock;
134 	/* Limit the number of outstanding send requests. */
135 	int num_sends_allowed;
136 
137 	/* adding three counts for debugging ring buffer errors */
138 	uint32_t nbuf_alloc_err_count;
139 	uint32_t nbuf_dma_err_count;
140 	uint32_t nbuf_ce_enqueue_err_count;
141 	struct hif_msg_callbacks pipe_callbacks;
142 };
143 
144 /**
145  * struct ce_tasklet_entry
146  *
147  * @intr_tq: intr_tq
148  * @ce_id: ce_id
149  * @inited: inited
150  * @hif_ce_state: hif_ce_state
151  * @from_irq: from_irq
152  */
153 struct ce_tasklet_entry {
154 	struct tasklet_struct intr_tq;
155 	enum ce_id_type ce_id;
156 	bool inited;
157 	bool hi_tasklet_ce;
158 	void *hif_ce_state;
159 };
160 
161 static inline bool hif_dummy_grp_done(struct hif_exec_context *grp_entry, int
162 				      work_done)
163 {
164 	return true;
165 }
166 
167 extern struct hif_execution_ops tasklet_sched_ops;
168 extern struct hif_execution_ops napi_sched_ops;
169 
170 /**
171  * struct ce_stats
172  *
173  * @ce_per_cpu: Stats of the CEs running per CPU
174  * @record_index: Current index to store in time record
175  * @tasklet_sched_entry_ts: Timestamp when tasklet is scheduled
176  * @tasklet_exec_entry_ts: Timestamp when tasklet is started execuiton
177  * @tasklet_exec_time_record: Last N number of tasklets execution time
178  * @tasklet_sched_time_record: Last N number of tasklets scheduled time
179  * @ce_tasklet_exec_bucket: Tasklet execution time buckets
180  * @ce_tasklet_sched_bucket: Tasklet time in queue buckets
181  * @ce_tasklet_exec_last_update: Latest timestamp when bucket is updated
182  * @ce_tasklet_sched_last_update: Latest timestamp when bucket is updated
183  */
184 struct ce_stats {
185 	uint32_t ce_per_cpu[CE_COUNT_MAX][QDF_MAX_AVAILABLE_CPU];
186 #ifdef CE_TASKLET_DEBUG_ENABLE
187 	uint32_t record_index[CE_COUNT_MAX];
188 	uint64_t tasklet_sched_entry_ts[CE_COUNT_MAX];
189 	uint64_t tasklet_exec_entry_ts[CE_COUNT_MAX];
190 	uint64_t tasklet_exec_time_record[CE_COUNT_MAX][HIF_REQUESTED_EVENTS];
191 	uint64_t tasklet_sched_time_record[CE_COUNT_MAX][HIF_REQUESTED_EVENTS];
192 	uint64_t ce_tasklet_exec_bucket[CE_COUNT_MAX][CE_BUCKET_MAX];
193 	uint64_t ce_tasklet_sched_bucket[CE_COUNT_MAX][CE_BUCKET_MAX];
194 	uint64_t ce_tasklet_exec_last_update[CE_COUNT_MAX][CE_BUCKET_MAX];
195 	uint64_t ce_tasklet_sched_last_update[CE_COUNT_MAX][CE_BUCKET_MAX];
196 #ifdef CE_TASKLET_SCHEDULE_ON_FULL
197 	uint32_t ce_ring_full_count[CE_COUNT_MAX];
198 	uint32_t ce_manual_tasklet_schedule_count[CE_COUNT_MAX];
199 	uint64_t ce_last_manual_tasklet_schedule_ts[CE_COUNT_MAX];
200 #endif
201 #endif
202 };
203 
204 struct HIF_CE_state {
205 	struct hif_softc ol_sc;
206 	bool started;
207 	struct ce_tasklet_entry tasklets[CE_COUNT_MAX];
208 	struct hif_exec_context *hif_ext_group[HIF_MAX_GROUP];
209 	uint32_t hif_num_extgroup;
210 	qdf_spinlock_t keep_awake_lock;
211 	qdf_spinlock_t irq_reg_lock;
212 	unsigned int keep_awake_count;
213 	bool verified_awake;
214 	bool fake_sleep;
215 	qdf_timer_t sleep_timer;
216 	bool sleep_timer_init;
217 	qdf_time_t sleep_ticks;
218 	uint32_t ce_register_irq_done;
219 
220 	struct CE_pipe_config *target_ce_config;
221 	struct CE_attr *host_ce_config;
222 	uint32_t target_ce_config_sz;
223 	/* Per-pipe state. */
224 	struct HIF_CE_pipe_info pipe_info[CE_COUNT_MAX];
225 	/* to be activated after BMI_DONE */
226 	struct hif_msg_callbacks msg_callbacks_pending;
227 	/* current msg callbacks in use */
228 	struct hif_msg_callbacks msg_callbacks_current;
229 
230 	/* Target address used to signal a pending firmware event */
231 	uint32_t fw_indicator_address;
232 
233 	/* Copy Engine used for Diagnostic Accesses */
234 	struct CE_handle *ce_diag;
235 	struct ce_stats stats;
236 	struct ce_ops *ce_services;
237 	struct service_to_pipe *tgt_svc_map;
238 	int sz_tgt_svc_map;
239 };
240 
241 /*
242  * HIA Map Definition
243  */
244 struct host_interest_area_t {
245 	uint32_t hi_interconnect_state;
246 	uint32_t hi_early_alloc;
247 	uint32_t hi_option_flag2;
248 	uint32_t hi_board_data;
249 	uint32_t hi_board_data_initialized;
250 	uint32_t hi_failure_state;
251 	uint32_t hi_rddi_msi_num;
252 	uint32_t hi_pcie_perst_couple_en;
253 	uint32_t hi_sw_protocol_version;
254 };
255 
256 struct shadow_reg_cfg {
257 	uint16_t ce_id;
258 	uint16_t reg_offset;
259 };
260 
261 struct shadow_reg_v2_cfg {
262 	uint32_t reg_value;
263 };
264 
265 #ifdef CONFIG_BYPASS_QMI
266 
267 #define FW_SHARED_MEM (2 * 1024 * 1024)
268 
269 #ifdef QCN7605_SUPPORT
270 struct msi_cfg {
271 	u16 ce_id;
272 	u16 msi_vector;
273 } qdf_packed;
274 
275 struct ce_info {
276 	u32 rri_over_ddr_low_paddr;
277 	u32 rri_over_ddr_high_paddr;
278 	struct msi_cfg cfg[CE_COUNT_MAX];
279 } qdf_packed;
280 #endif
281 #endif
282 
283 /**
284  * struct ce_index
285  *
286  * @id: CE id
287  * @sw_index: sw index
288  * @write_index: write index
289  * @hp: ring head pointer
290  * @tp: ring tail pointer
291  * @status_hp: status ring head pointer
292  * @status_tp: status ring tail pointer
293  */
294 struct ce_index {
295 	uint8_t id;
296 	union {
297 		struct {
298 			uint16_t sw_index;
299 			uint16_t write_index;
300 		} legacy_info;
301 		struct {
302 			uint16_t hp;
303 			uint16_t tp;
304 			uint16_t status_hp;
305 			uint16_t status_tp;
306 		} srng_info;
307 	} u;
308 } qdf_packed;
309 
310 /**
311  * struct hang_event_info
312  *
313  * @tlv_header: tlv header
314  * @active_tasklet_count: active tasklet count
315  * @active_grp_tasklet_cnt: active grp tasklet count
316  * @ce_info: CE info
317  */
318 struct hang_event_info {
319 	uint16_t tlv_header;
320 	uint8_t active_tasklet_count;
321 	uint8_t active_grp_tasklet_cnt;
322 	uint8_t ce_count;
323 	struct ce_index ce_info[CE_COUNT_MAX];
324 } qdf_packed;
325 
326 void hif_ce_stop(struct hif_softc *scn);
327 int hif_dump_ce_registers(struct hif_softc *scn);
328 void
329 hif_ce_dump_target_memory(struct hif_softc *scn, void *ramdump_base,
330 			  uint32_t address, uint32_t size);
331 
332 #ifdef IPA_OFFLOAD
333 void hif_ce_ipa_get_ce_resource(struct hif_softc *scn,
334 			     qdf_shared_mem_t **ce_sr,
335 			     uint32_t *ce_sr_ring_size,
336 			     qdf_dma_addr_t *ce_reg_paddr);
337 #else
338 static inline
339 void hif_ce_ipa_get_ce_resource(struct hif_softc *scn,
340 			     qdf_shared_mem_t **ce_sr,
341 			     uint32_t *ce_sr_ring_size,
342 			     qdf_dma_addr_t *ce_reg_paddr)
343 {
344 }
345 
346 #endif
347 int hif_wlan_enable(struct hif_softc *scn);
348 void ce_enable_polling(void *cestate);
349 void ce_disable_polling(void *cestate);
350 void hif_wlan_disable(struct hif_softc *scn);
351 void hif_get_target_ce_config(struct hif_softc *scn,
352 		struct CE_pipe_config **target_ce_config_ret,
353 		uint32_t *target_ce_config_sz_ret,
354 		struct service_to_pipe **target_service_to_ce_map_ret,
355 		uint32_t *target_service_to_ce_map_sz_ret,
356 		struct shadow_reg_cfg **target_shadow_reg_cfg_v1_ret,
357 		uint32_t *shadow_cfg_v1_sz_ret);
358 
359 #ifdef WLAN_FEATURE_EPPING
360 void hif_ce_prepare_epping_config(struct hif_softc *scn,
361 				  struct HIF_CE_state *hif_state);
362 void hif_select_epping_service_to_pipe_map(struct service_to_pipe
363 					   **tgt_svc_map_to_use,
364 					   uint32_t *sz_tgt_svc_map_to_use);
365 
366 #else
367 static inline
368 void hif_ce_prepare_epping_config(struct hif_softc *scn,
369 				  struct HIF_CE_state *hif_state)
370 { }
371 static inline
372 void hif_select_epping_service_to_pipe_map(struct service_to_pipe
373 					   **tgt_svc_map_to_use,
374 					   uint32_t *sz_tgt_svc_map_to_use)
375 { }
376 #endif
377 
378 void ce_service_register_module(enum ce_target_type target_type,
379 				struct ce_ops* (*ce_attach)(void));
380 
381 #endif /* __CE_H__ */
382