xref: /wlan-dirver/qca-wifi-host-cmn/hif/src/ce/ce_internal.h (revision 4865edfd190c086bbe2c69aae12a8226f877b91e)
1 /*
2  * Copyright (c) 2013-2018 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #ifndef __COPY_ENGINE_INTERNAL_H__
20 #define __COPY_ENGINE_INTERNAL_H__
21 
22 #include <hif.h>                /* A_TARGET_WRITE */
23 
24 /* Copy Engine operational state */
25 enum CE_op_state {
26 	CE_UNUSED,
27 	CE_PAUSED,
28 	CE_RUNNING,
29 	CE_PENDING,
30 };
31 
32 enum ol_ath_hif_ce_ecodes {
33 	CE_RING_DELTA_FAIL = 0
34 };
35 
36 struct CE_src_desc;
37 
38 /* Copy Engine Ring internal state */
39 struct CE_ring_state {
40 
41 	/* Number of entries in this ring; must be power of 2 */
42 	unsigned int nentries;
43 	unsigned int nentries_mask;
44 
45 	/*
46 	 * For dest ring, this is the next index to be processed
47 	 * by software after it was/is received into.
48 	 *
49 	 * For src ring, this is the last descriptor that was sent
50 	 * and completion processed by software.
51 	 *
52 	 * Regardless of src or dest ring, this is an invariant
53 	 * (modulo ring size):
54 	 *     write index >= read index >= sw_index
55 	 */
56 	unsigned int sw_index;
57 	unsigned int write_index;       /* cached copy */
58 	/*
59 	 * For src ring, this is the next index not yet processed by HW.
60 	 * This is a cached copy of the real HW index (read index), used
61 	 * for avoiding reading the HW index register more often than
62 	 * necessary.
63 	 * This extends the invariant:
64 	 *     write index >= read index >= hw_index >= sw_index
65 	 *
66 	 * For dest ring, this is currently unused.
67 	 */
68 	unsigned int hw_index;  /* cached copy */
69 
70 	/* Start of DMA-coherent area reserved for descriptors */
71 	void *base_addr_owner_space_unaligned;  /* Host address space */
72 	qdf_dma_addr_t base_addr_CE_space_unaligned; /* CE address space */
73 
74 	/*
75 	 * Actual start of descriptors.
76 	 * Aligned to descriptor-size boundary.
77 	 * Points into reserved DMA-coherent area, above.
78 	 */
79 	void *base_addr_owner_space;    /* Host address space */
80 	qdf_dma_addr_t base_addr_CE_space;   /* CE address space */
81 	/*
82 	 * Start of shadow copy of descriptors, within regular memory.
83 	 * Aligned to descriptor-size boundary.
84 	 */
85 	char *shadow_base_unaligned;
86 	struct CE_src_desc *shadow_base;
87 
88 	unsigned int low_water_mark_nentries;
89 	unsigned int high_water_mark_nentries;
90 	void *srng_ctx;
91 	void **per_transfer_context;
92 	OS_DMA_MEM_CONTEXT(ce_dmacontext); /* OS Specific DMA context */
93 };
94 
95 /* Copy Engine internal state */
96 struct CE_state {
97 	struct hif_softc *scn;
98 	unsigned int id;
99 	unsigned int attr_flags;  /* CE_ATTR_* */
100 	uint32_t ctrl_addr;       /* relative to BAR */
101 	enum CE_op_state state;
102 
103 #ifdef WLAN_FEATURE_FASTPATH
104 	fastpath_msg_handler fastpath_handler;
105 	void *context;
106 #endif /* WLAN_FEATURE_FASTPATH */
107 	qdf_work_t oom_allocation_work;
108 
109 	ce_send_cb send_cb;
110 	void *send_context;
111 
112 	CE_recv_cb recv_cb;
113 	void *recv_context;
114 
115 	/* misc_cbs - are any callbacks besides send and recv enabled? */
116 	uint8_t misc_cbs;
117 
118 	CE_watermark_cb watermark_cb;
119 	void *wm_context;
120 
121 	/*Record the state of the copy compl interrupt */
122 	int disable_copy_compl_intr;
123 
124 	unsigned int src_sz_max;
125 	struct CE_ring_state *src_ring;
126 	struct CE_ring_state *dest_ring;
127 	struct CE_ring_state *status_ring;
128 	atomic_t rx_pending;
129 
130 	qdf_spinlock_t ce_index_lock;
131 	/* Flag to indicate whether to break out the DPC context */
132 	bool force_break;
133 
134 	/* time in nanoseconds to yield control of napi poll */
135 	unsigned long long ce_service_yield_time;
136 	/* CE service start time in nanoseconds */
137 	unsigned long long ce_service_start_time;
138 	/* Num Of Receive Buffers handled for one interrupt DPC routine */
139 	unsigned int receive_count;
140 	/* epping */
141 	bool timer_inited;
142 	qdf_timer_t poll_timer;
143 
144 	/* datapath - for faster access, use bools instead of a bitmap */
145 	bool htt_tx_data;
146 	bool htt_rx_data;
147 	qdf_lro_ctx_t lro_data;
148 };
149 
150 /* Descriptor rings must be aligned to this boundary */
151 #define CE_DESC_RING_ALIGN 8
152 #define CLOCK_OVERRIDE 0x2
153 
154 #ifdef QCA_WIFI_3_0
155 #define HIF_CE_DESC_ADDR_TO_DMA(desc) \
156 	(qdf_dma_addr_t)(((uint64_t)(desc)->buffer_addr + \
157 	((uint64_t)((desc)->buffer_addr_hi & 0x1F) << 32)))
158 #else
159 #define HIF_CE_DESC_ADDR_TO_DMA(desc) \
160 	(qdf_dma_addr_t)((desc)->buffer_addr)
161 #endif
162 
163 #ifdef QCA_WIFI_3_0
164 struct CE_src_desc {
165 	uint32_t buffer_addr:32;
166 #if _BYTE_ORDER == _BIG_ENDIAN
167 	uint32_t gather:1,
168 		enable_11h:1,
169 		meta_data_low:2, /* fw_metadata_low */
170 		packet_result_offset:12,
171 		toeplitz_hash_enable:1,
172 		addr_y_search_disable:1,
173 		addr_x_search_disable:1,
174 		misc_int_disable:1,
175 		target_int_disable:1,
176 		host_int_disable:1,
177 		dest_byte_swap:1,
178 		byte_swap:1,
179 		type:2,
180 		tx_classify:1,
181 		buffer_addr_hi:5;
182 		uint32_t meta_data:16, /* fw_metadata_high */
183 		nbytes:16;       /* length in register map */
184 #else
185 	uint32_t buffer_addr_hi:5,
186 		tx_classify:1,
187 		type:2,
188 		byte_swap:1,          /* src_byte_swap */
189 		dest_byte_swap:1,
190 		host_int_disable:1,
191 		target_int_disable:1,
192 		misc_int_disable:1,
193 		addr_x_search_disable:1,
194 		addr_y_search_disable:1,
195 		toeplitz_hash_enable:1,
196 		packet_result_offset:12,
197 		meta_data_low:2, /* fw_metadata_low */
198 		enable_11h:1,
199 		gather:1;
200 		uint32_t nbytes:16, /* length in register map */
201 		meta_data:16; /* fw_metadata_high */
202 #endif
203 	uint32_t toeplitz_hash_result:32;
204 };
205 
206 struct CE_dest_desc {
207 	uint32_t buffer_addr:32;
208 #if _BYTE_ORDER == _BIG_ENDIAN
209 	uint32_t gather:1,
210 		enable_11h:1,
211 		meta_data_low:2, /* fw_metadata_low */
212 		packet_result_offset:12,
213 		toeplitz_hash_enable:1,
214 		addr_y_search_disable:1,
215 		addr_x_search_disable:1,
216 		misc_int_disable:1,
217 		target_int_disable:1,
218 		host_int_disable:1,
219 		byte_swap:1,
220 		src_byte_swap:1,
221 		type:2,
222 		tx_classify:1,
223 		buffer_addr_hi:5;
224 		uint32_t meta_data:16, /* fw_metadata_high */
225 		nbytes:16;          /* length in register map */
226 #else
227 	uint32_t buffer_addr_hi:5,
228 		tx_classify:1,
229 		type:2,
230 		src_byte_swap:1,
231 		byte_swap:1,         /* dest_byte_swap */
232 		host_int_disable:1,
233 		target_int_disable:1,
234 		misc_int_disable:1,
235 		addr_x_search_disable:1,
236 		addr_y_search_disable:1,
237 		toeplitz_hash_enable:1,
238 		packet_result_offset:12,
239 		meta_data_low:2, /* fw_metadata_low */
240 		enable_11h:1,
241 		gather:1;
242 		uint32_t nbytes:16, /* length in register map */
243 		meta_data:16;    /* fw_metadata_high */
244 #endif
245 	uint32_t toeplitz_hash_result:32;
246 };
247 #else
248 struct CE_src_desc {
249 	uint32_t buffer_addr;
250 #if _BYTE_ORDER == _BIG_ENDIAN
251 	uint32_t  meta_data:12,
252 		  target_int_disable:1,
253 		  host_int_disable:1,
254 		  byte_swap:1,
255 		  gather:1,
256 		  nbytes:16;
257 #else
258 
259 	uint32_t nbytes:16,
260 		 gather:1,
261 		 byte_swap:1,
262 		 host_int_disable:1,
263 		 target_int_disable:1,
264 		 meta_data:12;
265 #endif
266 };
267 
268 struct CE_dest_desc {
269 	uint32_t buffer_addr;
270 #if _BYTE_ORDER == _BIG_ENDIAN
271 	uint32_t  meta_data:12,
272 		  target_int_disable:1,
273 		  host_int_disable:1,
274 		  byte_swap:1,
275 		  gather:1,
276 		  nbytes:16;
277 #else
278 	uint32_t nbytes:16,
279 		 gather:1,
280 		 byte_swap:1,
281 		 host_int_disable:1,
282 		 target_int_disable:1,
283 		 meta_data:12;
284 #endif
285 };
286 #endif /* QCA_WIFI_3_0 */
287 
288 struct ce_srng_src_desc {
289 	uint32_t buffer_addr_lo;
290 #if _BYTE_ORDER == _BIG_ENDIAN
291 	uint32_t nbytes:16,
292 		 rsvd:4,
293 		 gather:1,
294 		 dest_swap:1,
295 		 byte_swap:1,
296 		 toeplitz_hash_enable:1,
297 		 buffer_addr_hi:8;
298 	uint32_t rsvd1:16,
299 		 meta_data:16;
300 	uint32_t loop_count:4,
301 		 ring_id:8,
302 		 rsvd3:20;
303 #else
304 	uint32_t buffer_addr_hi:8,
305 		 toeplitz_hash_enable:1,
306 		 byte_swap:1,
307 		 dest_swap:1,
308 		 gather:1,
309 		 rsvd:4,
310 		 nbytes:16;
311 	uint32_t meta_data:16,
312 		 rsvd1:16;
313 	uint32_t rsvd3:20,
314 		 ring_id:8,
315 		 loop_count:4;
316 #endif
317 };
318 struct ce_srng_dest_desc {
319 	uint32_t buffer_addr_lo;
320 #if _BYTE_ORDER == _BIG_ENDIAN
321 	uint32_t loop_count:4,
322 		 ring_id:8,
323 		 rsvd1:12,
324 		 buffer_addr_hi:8;
325 #else
326 	uint32_t buffer_addr_hi:8,
327 		 rsvd1:12,
328 		 ring_id:8,
329 		 loop_count:4;
330 #endif
331 };
332 struct ce_srng_dest_status_desc {
333 #if _BYTE_ORDER == _BIG_ENDIAN
334 	uint32_t nbytes:16,
335 		 rsvd:4,
336 		 gather:1,
337 		 dest_swap:1,
338 		 byte_swap:1,
339 		 toeplitz_hash_enable:1,
340 		 rsvd0:8;
341 	uint32_t rsvd1:16,
342 		 meta_data:16;
343 #else
344 	uint32_t rsvd0:8,
345 		 toeplitz_hash_enable:1,
346 		 byte_swap:1,
347 		 dest_swap:1,
348 		 gather:1,
349 		 rsvd:4,
350 		 nbytes:16;
351 	uint32_t meta_data:16,
352 		 rsvd1:16;
353 #endif
354 	uint32_t toeplitz_hash;
355 #if _BYTE_ORDER == _BIG_ENDIAN
356 	uint32_t loop_count:4,
357 		 ring_id:8,
358 		 rsvd3:20;
359 #else
360 	uint32_t rsvd3:20,
361 		 ring_id:8,
362 		 loop_count:4;
363 #endif
364 };
365 
366 #define CE_SENDLIST_ITEMS_MAX 12
367 
368 /**
369  * union ce_desc - unified data type for ce descriptors
370  *
371  * Both src and destination descriptors follow the same format.
372  * They use different data structures for different access symantics.
373  * Here we provice a unifying data type.
374  */
375 union ce_desc {
376 	struct CE_src_desc src_desc;
377 	struct CE_dest_desc dest_desc;
378 };
379 
380 /**
381  * enum hif_ce_event_type - HIF copy engine event type
382  * @HIF_RX_DESC_POST: event recorded before updating write index of RX ring.
383  * @HIF_RX_DESC_COMPLETION: event recorded before updating sw index of RX ring.
384  * @HIF_TX_GATHER_DESC_POST: post gather desc. (no write index update)
385  * @HIF_TX_DESC_POST: event recorded before updating write index of TX ring.
386  * @HIF_TX_DESC_SOFTWARE_POST: event recorded when dropping a write to the write
387  *	index in a normal tx
388  * @HIF_TX_DESC_COMPLETION: event recorded before updating sw index of TX ring.
389  * @FAST_RX_WRITE_INDEX_UPDATE: event recorded before updating the write index
390  *	of the RX ring in fastpath
391  * @FAST_RX_SOFTWARE_INDEX_UPDATE: event recorded before updating the software
392  *	index of the RX ring in fastpath
393  * @FAST_TX_WRITE_INDEX_UPDATE: event recorded before updating the write index
394  *	of the TX ring in fastpath
395  * @FAST_TX_WRITE_INDEX_SOFTWARE_UPDATE: recored when dropping a write to
396  *	the wirte index in fastpath
397  * @FAST_TX_SOFTWARE_INDEX_UPDATE: event recorded before updating the software
398  *	index of the RX ring in fastpath
399  * @HIF_IRQ_EVENT: event recorded in the irq before scheduling the bh
400  * @HIF_CE_TASKLET_ENTRY: records the start of the ce_tasklet
401  * @HIF_CE_TASKLET_RESCHEDULE: records the rescheduling of the wlan_tasklet
402  * @HIF_CE_TASKLET_EXIT: records the exit of the wlan tasklet without reschedule
403  * @HIF_CE_REAP_ENTRY: records when we process completion outside of a bh
404  * @HIF_CE_REAP_EXIT:  records when we process completion outside of a bh
405  * @NAPI_SCHEDULE: records when napi is scheduled from the irq context
406  * @NAPI_POLL_ENTER: records the start of the napi poll function
407  * @NAPI_COMPLETE: records when interrupts are reenabled
408  * @NAPI_POLL_EXIT: records when the napi poll function returns
409  */
410 enum hif_ce_event_type {
411 	HIF_RX_DESC_POST,
412 	HIF_RX_DESC_COMPLETION,
413 	HIF_TX_GATHER_DESC_POST,
414 	HIF_TX_DESC_POST,
415 	HIF_TX_DESC_SOFTWARE_POST,
416 	HIF_TX_DESC_COMPLETION,
417 	FAST_RX_WRITE_INDEX_UPDATE,
418 	FAST_RX_SOFTWARE_INDEX_UPDATE,
419 	FAST_TX_WRITE_INDEX_UPDATE,
420 	FAST_TX_WRITE_INDEX_SOFTWARE_UPDATE,
421 	FAST_TX_SOFTWARE_INDEX_UPDATE,
422 	RESUME_WRITE_INDEX_UPDATE,
423 
424 	HIF_IRQ_EVENT = 0x10,
425 	HIF_CE_TASKLET_ENTRY,
426 	HIF_CE_TASKLET_RESCHEDULE,
427 	HIF_CE_TASKLET_EXIT,
428 	HIF_CE_REAP_ENTRY,
429 	HIF_CE_REAP_EXIT,
430 	NAPI_SCHEDULE,
431 	NAPI_POLL_ENTER,
432 	NAPI_COMPLETE,
433 	NAPI_POLL_EXIT,
434 
435 	HIF_RX_NBUF_ALLOC_FAILURE = 0x20,
436 	HIF_RX_NBUF_MAP_FAILURE,
437 	HIF_RX_NBUF_ENQUEUE_FAILURE,
438 };
439 
440 void ce_init_ce_desc_event_log(struct hif_softc *scn, int ce_id, int size);
441 void ce_deinit_ce_desc_event_log(struct hif_softc *scn, int ce_id);
442 void hif_record_ce_desc_event(struct hif_softc *scn, int ce_id,
443 			      enum hif_ce_event_type type,
444 			      union ce_desc *descriptor, void *memory,
445 			      int index, int len);
446 
447 enum ce_sendlist_type_e {
448 	CE_SIMPLE_BUFFER_TYPE,
449 	/* TBDXXX: CE_RX_DESC_LIST, */
450 };
451 
452 /*
453  * There's a public "ce_sendlist" and a private "ce_sendlist_s".
454  * The former is an opaque structure with sufficient space
455  * to hold the latter.  The latter is the actual structure
456  * definition and it is only used internally.  The opaque version
457  * of the structure allows callers to allocate an instance on the
458  * run-time stack without knowing any of the details of the
459  * structure layout.
460  */
461 struct ce_sendlist_s {
462 	unsigned int num_items;
463 	struct ce_sendlist_item {
464 		enum ce_sendlist_type_e send_type;
465 		dma_addr_t data;        /* e.g. buffer or desc list */
466 		union {
467 			unsigned int nbytes;    /* simple buffer */
468 			unsigned int ndesc;     /* Rx descriptor list */
469 		} u;
470 		/* flags: externally-specified flags;
471 		 * OR-ed with internal flags
472 		 */
473 		uint32_t flags;
474 		uint32_t user_flags;
475 	} item[CE_SENDLIST_ITEMS_MAX];
476 };
477 
478 bool hif_ce_service_should_yield(struct hif_softc *scn, struct CE_state
479 				 *ce_state);
480 
481 #ifdef WLAN_FEATURE_FASTPATH
482 void ce_h2t_tx_ce_cleanup(struct CE_handle *ce_hdl);
483 void ce_t2h_msg_ce_cleanup(struct CE_handle *ce_hdl);
484 #else
485 static inline void ce_h2t_tx_ce_cleanup(struct CE_handle *ce_hdl)
486 {
487 }
488 
489 static inline void ce_t2h_msg_ce_cleanup(struct CE_handle *ce_hdl)
490 {
491 }
492 #endif
493 
494 /* which ring of a CE? */
495 #define CE_RING_SRC  0
496 #define CE_RING_DEST 1
497 #define CE_RING_STATUS 2
498 
499 #define CDC_WAR_MAGIC_STR   0xceef0000
500 #define CDC_WAR_DATA_CE     4
501 
502 /* Additional internal-only ce_send flags */
503 #define CE_SEND_FLAG_GATHER             0x00010000      /* Use Gather */
504 
505 /**
506  * hif_get_wake_ce_id() - gets the copy engine id used for waking up
507  * @scn: The hif context to use
508  * @ce_id: a pointer where the copy engine Id should be populated
509  *
510  * Return: errno
511  */
512 int hif_get_wake_ce_id(struct hif_softc *scn, uint8_t *ce_id);
513 
514 /*
515  * Note: For MCL, #if defined (HIF_CONFIG_SLUB_DEBUG_ON) needs to be checked
516  * for defined here
517  */
518 #if HIF_CE_DEBUG_DATA_BUF
519 #define HIF_CE_HISTORY_MAX 512
520 
521 #define CE_DEBUG_MAX_DATA_BUF_SIZE 64
522 /**
523  * struct hif_ce_desc_event - structure for detailing a ce event
524  * @type: what the event was
525  * @time: when it happened
526  * @descriptor: descriptor enqueued or dequeued
527  * @memory: virtual address that was used
528  * @index: location of the descriptor in the ce ring;
529  * @data: data pointed by descriptor
530  * @actual_data_len: length of the data
531  */
532 struct hif_ce_desc_event {
533 	uint16_t index;
534 	enum hif_ce_event_type type;
535 	uint64_t time;
536 	union ce_desc descriptor;
537 	void *memory;
538 #if HIF_CE_DEBUG_DATA_BUF
539 	uint8_t *data;
540 	ssize_t actual_data_len;
541 #endif
542 };
543 
544 #if HIF_CE_DEBUG_DATA_BUF
545 QDF_STATUS alloc_mem_ce_debug_hist_data(struct hif_softc *scn, uint32_t ce_id);
546 void free_mem_ce_debug_hist_data(struct hif_softc *scn, uint32_t ce_id);
547 #endif /*HIF_CE_DEBUG_DATA_BUF*/
548 #endif /* #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || HIF_CE_DEBUG_DATA_BUF */
549 #endif /* __COPY_ENGINE_INTERNAL_H__ */
550