xref: /wlan-dirver/qca-wifi-host-cmn/hif/src/ce/ce_main.h (revision ffc4a9de53c8817b86d03f8fb3c9a829bfec09d5)
1 /*
2  * Copyright (c) 2015-2021 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #ifndef __CE_H__
20 #define __CE_H__
21 
22 #include "qdf_atomic.h"
23 #include "qdf_lock.h"
24 #include "hif_main.h"
25 #include "qdf_util.h"
26 #include "hif_exec.h"
27 
28 #ifndef DATA_CE_SW_INDEX_NO_INLINE_UPDATE
29 #define DATA_CE_UPDATE_SWINDEX(x, scn, addr)				\
30 		(x = CE_SRC_RING_READ_IDX_GET_FROM_DDR(scn, addr))
31 #else
32 #define DATA_CE_UPDATE_SWINDEX(x, scn, addr)
33 #endif
34 
35 /*
36  * Number of times to check for any pending tx/rx completion on
37  * a copy engine, this count should be big enough. Once we hit
38  * this threashold we'll not check for any Tx/Rx comlpetion in same
39  * interrupt handling. Note that this threashold is only used for
40  * Rx interrupt processing, this can be used tor Tx as well if we
41  * suspect any infinite loop in checking for pending Tx completion.
42  */
43 #define CE_TXRX_COMP_CHECK_THRESHOLD 20
44 
45 #define CE_HTT_T2H_MSG 1
46 #define CE_HTT_H2T_MSG 4
47 
48 #define CE_OFFSET		0x00000400
49 #define CE_USEFUL_SIZE		0x00000058
50 #define CE_ALL_BITMAP  0xFFFF
51 
52 #define HIF_REQUESTED_EVENTS 20
53 /**
54  * enum ce_id_type
55  *
56  * @ce_id_type: Copy engine ID
57  */
58 enum ce_id_type {
59 	CE_ID_0,
60 	CE_ID_1,
61 	CE_ID_2,
62 	CE_ID_3,
63 	CE_ID_4,
64 	CE_ID_5,
65 	CE_ID_6,
66 	CE_ID_7,
67 	CE_ID_8,
68 	CE_ID_9,
69 	CE_ID_10,
70 	CE_ID_11,
71 	CE_ID_MAX
72 };
73 
74 /**
75  * enum ce_buckets
76  *
77  * @ce_buckets: CE tasklet time buckets
78  * @CE_BUCKET_500_US: tasklet bucket to store 0-0.5ms
79  * @CE_BUCKET_1_MS: tasklet bucket to store 0.5-1ms
80  * @CE_BUCKET_2_MS: tasklet bucket to store 1-2ms
81  * @CE_BUCKET_5_MS: tasklet bucket to store 2-5ms
82  * @CE_BUCKET_10_MS: tasklet bucket to store 5-10ms
83  * @CE_BUCKET_BEYOND: tasklet bucket to store > 10ms
84  * @CE_BUCKET_MAX: enum max value
85  */
86 #ifdef CE_TASKLET_DEBUG_ENABLE
87 enum ce_buckets {
88 	CE_BUCKET_500_US,
89 	CE_BUCKET_1_MS,
90 	CE_BUCKET_2_MS,
91 	CE_BUCKET_5_MS,
92 	CE_BUCKET_10_MS,
93 	CE_BUCKET_BEYOND,
94 	CE_BUCKET_MAX,
95 };
96 #endif
97 
98 enum ce_target_type {
99 	CE_SVC_LEGACY,
100 	CE_SVC_SRNG,
101 	CE_MAX_TARGET_TYPE
102 };
103 
104 enum ol_ath_hif_pkt_ecodes {
105 	HIF_PIPE_NO_RESOURCE = 0
106 };
107 
108 struct HIF_CE_state;
109 
110 /* Per-pipe state. */
111 struct HIF_CE_pipe_info {
112 	/* Handle of underlying Copy Engine */
113 	struct CE_handle *ce_hdl;
114 
115 	/* Our pipe number; facilitiates use of pipe_info ptrs. */
116 	uint8_t pipe_num;
117 
118 	/* Convenience back pointer to HIF_CE_state. */
119 	struct HIF_CE_state *HIF_CE_state;
120 
121 	/* Instantaneous number of receive buffers that should be posted */
122 	atomic_t recv_bufs_needed;
123 	qdf_size_t buf_sz;
124 	qdf_spinlock_t recv_bufs_needed_lock;
125 
126 	qdf_spinlock_t completion_freeq_lock;
127 	/* Limit the number of outstanding send requests. */
128 	int num_sends_allowed;
129 
130 	/* adding three counts for debugging ring buffer errors */
131 	uint32_t nbuf_alloc_err_count;
132 	uint32_t nbuf_dma_err_count;
133 	uint32_t nbuf_ce_enqueue_err_count;
134 	struct hif_msg_callbacks pipe_callbacks;
135 };
136 
137 /**
138  * struct ce_tasklet_entry
139  *
140  * @intr_tq: intr_tq
141  * @ce_id: ce_id
142  * @inited: inited
143  * @hif_ce_state: hif_ce_state
144  * @from_irq: from_irq
145  */
146 struct ce_tasklet_entry {
147 	struct tasklet_struct intr_tq;
148 	enum ce_id_type ce_id;
149 	bool inited;
150 	bool hi_tasklet_ce;
151 	void *hif_ce_state;
152 };
153 
154 static inline bool hif_dummy_grp_done(struct hif_exec_context *grp_entry, int
155 				      work_done)
156 {
157 	return true;
158 }
159 
160 extern struct hif_execution_ops tasklet_sched_ops;
161 extern struct hif_execution_ops napi_sched_ops;
162 
163 /**
164  * struct ce_stats
165  *
166  * @ce_per_cpu: Stats of the CEs running per CPU
167  * @record_index: Current index to store in time record
168  * @tasklet_sched_entry_ts: Timestamp when tasklet is scheduled
169  * @tasklet_exec_entry_ts: Timestamp when tasklet is started execuiton
170  * @tasklet_exec_time_record: Last N number of tasklets execution time
171  * @tasklet_sched_time_record: Last N number of tasklets scheduled time
172  * @ce_tasklet_exec_bucket: Tasklet execution time buckets
173  * @ce_tasklet_sched_bucket: Tasklet time in queue buckets
174  * @ce_tasklet_exec_last_update: Latest timestamp when bucket is updated
175  * @ce_tasklet_sched_last_update: Latest timestamp when bucket is updated
176  */
177 struct ce_stats {
178 	uint32_t ce_per_cpu[CE_COUNT_MAX][QDF_MAX_AVAILABLE_CPU];
179 #ifdef CE_TASKLET_DEBUG_ENABLE
180 	uint32_t record_index[CE_COUNT_MAX];
181 	uint64_t tasklet_sched_entry_ts[CE_COUNT_MAX];
182 	uint64_t tasklet_exec_entry_ts[CE_COUNT_MAX];
183 	uint64_t tasklet_exec_time_record[CE_COUNT_MAX][HIF_REQUESTED_EVENTS];
184 	uint64_t tasklet_sched_time_record[CE_COUNT_MAX][HIF_REQUESTED_EVENTS];
185 	uint64_t ce_tasklet_exec_bucket[CE_COUNT_MAX][CE_BUCKET_MAX];
186 	uint64_t ce_tasklet_sched_bucket[CE_COUNT_MAX][CE_BUCKET_MAX];
187 	uint64_t ce_tasklet_exec_last_update[CE_COUNT_MAX][CE_BUCKET_MAX];
188 	uint64_t ce_tasklet_sched_last_update[CE_COUNT_MAX][CE_BUCKET_MAX];
189 #endif
190 };
191 
192 struct HIF_CE_state {
193 	struct hif_softc ol_sc;
194 	bool started;
195 	struct ce_tasklet_entry tasklets[CE_COUNT_MAX];
196 	struct hif_exec_context *hif_ext_group[HIF_MAX_GROUP];
197 	uint32_t hif_num_extgroup;
198 	qdf_spinlock_t keep_awake_lock;
199 	qdf_spinlock_t irq_reg_lock;
200 	unsigned int keep_awake_count;
201 	bool verified_awake;
202 	bool fake_sleep;
203 	qdf_timer_t sleep_timer;
204 	bool sleep_timer_init;
205 	qdf_time_t sleep_ticks;
206 	uint32_t ce_register_irq_done;
207 
208 	struct CE_pipe_config *target_ce_config;
209 	struct CE_attr *host_ce_config;
210 	uint32_t target_ce_config_sz;
211 	/* Per-pipe state. */
212 	struct HIF_CE_pipe_info pipe_info[CE_COUNT_MAX];
213 	/* to be activated after BMI_DONE */
214 	struct hif_msg_callbacks msg_callbacks_pending;
215 	/* current msg callbacks in use */
216 	struct hif_msg_callbacks msg_callbacks_current;
217 
218 	/* Target address used to signal a pending firmware event */
219 	uint32_t fw_indicator_address;
220 
221 	/* Copy Engine used for Diagnostic Accesses */
222 	struct CE_handle *ce_diag;
223 	struct ce_stats stats;
224 	struct ce_ops *ce_services;
225 	struct service_to_pipe *tgt_svc_map;
226 	int sz_tgt_svc_map;
227 };
228 
229 /*
230  * HIA Map Definition
231  */
232 struct host_interest_area_t {
233 	uint32_t hi_interconnect_state;
234 	uint32_t hi_early_alloc;
235 	uint32_t hi_option_flag2;
236 	uint32_t hi_board_data;
237 	uint32_t hi_board_data_initialized;
238 	uint32_t hi_failure_state;
239 	uint32_t hi_rddi_msi_num;
240 	uint32_t hi_pcie_perst_couple_en;
241 	uint32_t hi_sw_protocol_version;
242 };
243 
244 struct shadow_reg_cfg {
245 	uint16_t ce_id;
246 	uint16_t reg_offset;
247 };
248 
249 struct shadow_reg_v2_cfg {
250 	uint32_t reg_value;
251 };
252 
253 #ifdef CONFIG_BYPASS_QMI
254 
255 #define FW_SHARED_MEM (2 * 1024 * 1024)
256 
257 #ifdef QCN7605_SUPPORT
258 struct msi_cfg {
259 	u16 ce_id;
260 	u16 msi_vector;
261 } qdf_packed;
262 
263 struct ce_info {
264 	u32 rri_over_ddr_low_paddr;
265 	u32 rri_over_ddr_high_paddr;
266 	struct msi_cfg cfg[CE_COUNT_MAX];
267 } qdf_packed;
268 #endif
269 #endif
270 
271 /**
272  * struct ce_index
273  *
274  * @id: CE id
275  * @sw_index: sw index
276  * @write_index: write index
277  * @hp: ring head pointer
278  * @tp: ring tail pointer
279  * @status_hp: status ring head pointer
280  * @status_tp: status ring tail pointer
281  */
282 struct ce_index {
283 	uint8_t id;
284 	union {
285 		struct {
286 			uint16_t sw_index;
287 			uint16_t write_index;
288 		} legacy_info;
289 		struct {
290 			uint16_t hp;
291 			uint16_t tp;
292 			uint16_t status_hp;
293 			uint16_t status_tp;
294 		} srng_info;
295 	} u;
296 } qdf_packed;
297 
298 /**
299  * struct hang_event_info
300  *
301  * @tlv_header: tlv header
302  * @active_tasklet_count: active tasklet count
303  * @active_grp_tasklet_cnt: active grp tasklet count
304  * @ce_info: CE info
305  */
306 struct hang_event_info {
307 	uint16_t tlv_header;
308 	uint8_t active_tasklet_count;
309 	uint8_t active_grp_tasklet_cnt;
310 	uint8_t ce_count;
311 	struct ce_index ce_info[CE_COUNT_MAX];
312 } qdf_packed;
313 
314 void hif_ce_stop(struct hif_softc *scn);
315 int hif_dump_ce_registers(struct hif_softc *scn);
316 void
317 hif_ce_dump_target_memory(struct hif_softc *scn, void *ramdump_base,
318 			  uint32_t address, uint32_t size);
319 
320 #ifdef IPA_OFFLOAD
321 void hif_ce_ipa_get_ce_resource(struct hif_softc *scn,
322 			     qdf_shared_mem_t **ce_sr,
323 			     uint32_t *ce_sr_ring_size,
324 			     qdf_dma_addr_t *ce_reg_paddr);
325 #else
326 static inline
327 void hif_ce_ipa_get_ce_resource(struct hif_softc *scn,
328 			     qdf_shared_mem_t **ce_sr,
329 			     uint32_t *ce_sr_ring_size,
330 			     qdf_dma_addr_t *ce_reg_paddr)
331 {
332 }
333 
334 #endif
335 int hif_wlan_enable(struct hif_softc *scn);
336 void ce_enable_polling(void *cestate);
337 void ce_disable_polling(void *cestate);
338 void hif_wlan_disable(struct hif_softc *scn);
339 void hif_get_target_ce_config(struct hif_softc *scn,
340 		struct CE_pipe_config **target_ce_config_ret,
341 		uint32_t *target_ce_config_sz_ret,
342 		struct service_to_pipe **target_service_to_ce_map_ret,
343 		uint32_t *target_service_to_ce_map_sz_ret,
344 		struct shadow_reg_cfg **target_shadow_reg_cfg_v1_ret,
345 		uint32_t *shadow_cfg_v1_sz_ret);
346 
347 #ifdef WLAN_FEATURE_EPPING
348 void hif_ce_prepare_epping_config(struct hif_softc *scn,
349 				  struct HIF_CE_state *hif_state);
350 void hif_select_epping_service_to_pipe_map(struct service_to_pipe
351 					   **tgt_svc_map_to_use,
352 					   uint32_t *sz_tgt_svc_map_to_use);
353 
354 #else
355 static inline
356 void hif_ce_prepare_epping_config(struct hif_softc *scn,
357 				  struct HIF_CE_state *hif_state)
358 { }
359 static inline
360 void hif_select_epping_service_to_pipe_map(struct service_to_pipe
361 					   **tgt_svc_map_to_use,
362 					   uint32_t *sz_tgt_svc_map_to_use)
363 { }
364 #endif
365 
366 void ce_service_register_module(enum ce_target_type target_type,
367 				struct ce_ops* (*ce_attach)(void));
368 
369 #endif /* __CE_H__ */
370