xref: /wlan-dirver/qca-wifi-host-cmn/hif/src/ce/ce_internal.h (revision 3149adf58a329e17232a4c0e58d460d025edd55a) !
1 /*
2  * Copyright (c) 2013-2018 The Linux Foundation. All rights reserved.
3  *
4  * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
5  *
6  *
7  * Permission to use, copy, modify, and/or distribute this software for
8  * any purpose with or without fee is hereby granted, provided that the
9  * above copyright notice and this permission notice appear in all
10  * copies.
11  *
12  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
13  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
14  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
15  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
16  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
17  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
18  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
19  * PERFORMANCE OF THIS SOFTWARE.
20  */
21 
22 /*
23  * This file was originally distributed by Qualcomm Atheros, Inc.
24  * under proprietary terms before Copyright ownership was assigned
25  * to the Linux Foundation.
26  */
27 #ifndef __COPY_ENGINE_INTERNAL_H__
28 #define __COPY_ENGINE_INTERNAL_H__
29 
30 #include <hif.h>                /* A_TARGET_WRITE */
31 
32 /* Copy Engine operational state */
33 enum CE_op_state {
34 	CE_UNUSED,
35 	CE_PAUSED,
36 	CE_RUNNING,
37 	CE_PENDING,
38 };
39 
40 enum ol_ath_hif_ce_ecodes {
41 	CE_RING_DELTA_FAIL = 0
42 };
43 
44 struct CE_src_desc;
45 
46 /* Copy Engine Ring internal state */
47 struct CE_ring_state {
48 
49 	/* Number of entries in this ring; must be power of 2 */
50 	unsigned int nentries;
51 	unsigned int nentries_mask;
52 
53 	/*
54 	 * For dest ring, this is the next index to be processed
55 	 * by software after it was/is received into.
56 	 *
57 	 * For src ring, this is the last descriptor that was sent
58 	 * and completion processed by software.
59 	 *
60 	 * Regardless of src or dest ring, this is an invariant
61 	 * (modulo ring size):
62 	 *     write index >= read index >= sw_index
63 	 */
64 	unsigned int sw_index;
65 	unsigned int write_index;       /* cached copy */
66 	/*
67 	 * For src ring, this is the next index not yet processed by HW.
68 	 * This is a cached copy of the real HW index (read index), used
69 	 * for avoiding reading the HW index register more often than
70 	 * necessary.
71 	 * This extends the invariant:
72 	 *     write index >= read index >= hw_index >= sw_index
73 	 *
74 	 * For dest ring, this is currently unused.
75 	 */
76 	unsigned int hw_index;  /* cached copy */
77 
78 	/* Start of DMA-coherent area reserved for descriptors */
79 	void *base_addr_owner_space_unaligned;  /* Host address space */
80 	qdf_dma_addr_t base_addr_CE_space_unaligned; /* CE address space */
81 
82 	/*
83 	 * Actual start of descriptors.
84 	 * Aligned to descriptor-size boundary.
85 	 * Points into reserved DMA-coherent area, above.
86 	 */
87 	void *base_addr_owner_space;    /* Host address space */
88 	qdf_dma_addr_t base_addr_CE_space;   /* CE address space */
89 	/*
90 	 * Start of shadow copy of descriptors, within regular memory.
91 	 * Aligned to descriptor-size boundary.
92 	 */
93 	char *shadow_base_unaligned;
94 	struct CE_src_desc *shadow_base;
95 
96 	unsigned int low_water_mark_nentries;
97 	unsigned int high_water_mark_nentries;
98 	void *srng_ctx;
99 	void **per_transfer_context;
100 	OS_DMA_MEM_CONTEXT(ce_dmacontext); /* OS Specific DMA context */
101 };
102 
103 /* Copy Engine internal state */
104 struct CE_state {
105 	struct hif_softc *scn;
106 	unsigned int id;
107 	unsigned int attr_flags;  /* CE_ATTR_* */
108 	uint32_t ctrl_addr;       /* relative to BAR */
109 	enum CE_op_state state;
110 
111 #ifdef WLAN_FEATURE_FASTPATH
112 	fastpath_msg_handler fastpath_handler;
113 	void *context;
114 #endif /* WLAN_FEATURE_FASTPATH */
115 	qdf_work_t oom_allocation_work;
116 
117 	ce_send_cb send_cb;
118 	void *send_context;
119 
120 	CE_recv_cb recv_cb;
121 	void *recv_context;
122 
123 	/* misc_cbs - are any callbacks besides send and recv enabled? */
124 	uint8_t misc_cbs;
125 
126 	CE_watermark_cb watermark_cb;
127 	void *wm_context;
128 
129 	/*Record the state of the copy compl interrupt */
130 	int disable_copy_compl_intr;
131 
132 	unsigned int src_sz_max;
133 	struct CE_ring_state *src_ring;
134 	struct CE_ring_state *dest_ring;
135 	struct CE_ring_state *status_ring;
136 	atomic_t rx_pending;
137 
138 	qdf_spinlock_t ce_index_lock;
139 	/* Flag to indicate whether to break out the DPC context */
140 	bool force_break;
141 
142 	/* time in nanoseconds to yield control of napi poll */
143 	unsigned long long ce_service_yield_time;
144 	/* Num Of Receive Buffers handled for one interrupt DPC routine */
145 	unsigned int receive_count;
146 	/* epping */
147 	bool timer_inited;
148 	qdf_timer_t poll_timer;
149 
150 	/* datapath - for faster access, use bools instead of a bitmap */
151 	bool htt_tx_data;
152 	bool htt_rx_data;
153 	qdf_lro_ctx_t lro_data;
154 };
155 
156 /* Descriptor rings must be aligned to this boundary */
157 #define CE_DESC_RING_ALIGN 8
158 #define CLOCK_OVERRIDE 0x2
159 
160 #ifdef QCA_WIFI_3_0
161 #define HIF_CE_DESC_ADDR_TO_DMA(desc) \
162 	(qdf_dma_addr_t)(((uint64_t)(desc)->buffer_addr + \
163 	((uint64_t)((desc)->buffer_addr_hi & 0x1F) << 32)))
164 #else
165 #define HIF_CE_DESC_ADDR_TO_DMA(desc) \
166 	(qdf_dma_addr_t)((desc)->buffer_addr)
167 #endif
168 
169 #ifdef QCA_WIFI_3_0
170 struct CE_src_desc {
171 	uint32_t buffer_addr:32;
172 #if _BYTE_ORDER == _BIG_ENDIAN
173 	uint32_t gather:1,
174 		enable_11h:1,
175 		meta_data_low:2, /* fw_metadata_low */
176 		packet_result_offset:12,
177 		toeplitz_hash_enable:1,
178 		addr_y_search_disable:1,
179 		addr_x_search_disable:1,
180 		misc_int_disable:1,
181 		target_int_disable:1,
182 		host_int_disable:1,
183 		dest_byte_swap:1,
184 		byte_swap:1,
185 		type:2,
186 		tx_classify:1,
187 		buffer_addr_hi:5;
188 		uint32_t meta_data:16, /* fw_metadata_high */
189 		nbytes:16;       /* length in register map */
190 #else
191 	uint32_t buffer_addr_hi:5,
192 		tx_classify:1,
193 		type:2,
194 		byte_swap:1,          /* src_byte_swap */
195 		dest_byte_swap:1,
196 		host_int_disable:1,
197 		target_int_disable:1,
198 		misc_int_disable:1,
199 		addr_x_search_disable:1,
200 		addr_y_search_disable:1,
201 		toeplitz_hash_enable:1,
202 		packet_result_offset:12,
203 		meta_data_low:2, /* fw_metadata_low */
204 		enable_11h:1,
205 		gather:1;
206 		uint32_t nbytes:16, /* length in register map */
207 		meta_data:16; /* fw_metadata_high */
208 #endif
209 	uint32_t toeplitz_hash_result:32;
210 };
211 
212 struct CE_dest_desc {
213 	uint32_t buffer_addr:32;
214 #if _BYTE_ORDER == _BIG_ENDIAN
215 	uint32_t gather:1,
216 		enable_11h:1,
217 		meta_data_low:2, /* fw_metadata_low */
218 		packet_result_offset:12,
219 		toeplitz_hash_enable:1,
220 		addr_y_search_disable:1,
221 		addr_x_search_disable:1,
222 		misc_int_disable:1,
223 		target_int_disable:1,
224 		host_int_disable:1,
225 		byte_swap:1,
226 		src_byte_swap:1,
227 		type:2,
228 		tx_classify:1,
229 		buffer_addr_hi:5;
230 		uint32_t meta_data:16, /* fw_metadata_high */
231 		nbytes:16;          /* length in register map */
232 #else
233 	uint32_t buffer_addr_hi:5,
234 		tx_classify:1,
235 		type:2,
236 		src_byte_swap:1,
237 		byte_swap:1,         /* dest_byte_swap */
238 		host_int_disable:1,
239 		target_int_disable:1,
240 		misc_int_disable:1,
241 		addr_x_search_disable:1,
242 		addr_y_search_disable:1,
243 		toeplitz_hash_enable:1,
244 		packet_result_offset:12,
245 		meta_data_low:2, /* fw_metadata_low */
246 		enable_11h:1,
247 		gather:1;
248 		uint32_t nbytes:16, /* length in register map */
249 		meta_data:16;    /* fw_metadata_high */
250 #endif
251 	uint32_t toeplitz_hash_result:32;
252 };
253 #else
254 struct CE_src_desc {
255 	uint32_t buffer_addr;
256 #if _BYTE_ORDER == _BIG_ENDIAN
257 	uint32_t  meta_data:12,
258 		  target_int_disable:1,
259 		  host_int_disable:1,
260 		  byte_swap:1,
261 		  gather:1,
262 		  nbytes:16;
263 #else
264 
265 	uint32_t nbytes:16,
266 		 gather:1,
267 		 byte_swap:1,
268 		 host_int_disable:1,
269 		 target_int_disable:1,
270 		 meta_data:12;
271 #endif
272 };
273 
274 struct CE_dest_desc {
275 	uint32_t buffer_addr;
276 #if _BYTE_ORDER == _BIG_ENDIAN
277 	uint32_t  meta_data:12,
278 		  target_int_disable:1,
279 		  host_int_disable:1,
280 		  byte_swap:1,
281 		  gather:1,
282 		  nbytes:16;
283 #else
284 	uint32_t nbytes:16,
285 		 gather:1,
286 		 byte_swap:1,
287 		 host_int_disable:1,
288 		 target_int_disable:1,
289 		 meta_data:12;
290 #endif
291 };
292 #endif /* QCA_WIFI_3_0 */
293 
294 struct ce_srng_src_desc {
295 	uint32_t buffer_addr_lo;
296 #if _BYTE_ORDER == _BIG_ENDIAN
297 	uint32_t nbytes:16,
298 		 rsvd:4,
299 		 gather:1,
300 		 dest_swap:1,
301 		 byte_swap:1,
302 		 toeplitz_hash_enable:1,
303 		 buffer_addr_hi:8;
304 	uint32_t rsvd1:16,
305 		 meta_data:16;
306 	uint32_t loop_count:4,
307 		 ring_id:8,
308 		 rsvd3:20;
309 #else
310 	uint32_t buffer_addr_hi:8,
311 		 toeplitz_hash_enable:1,
312 		 byte_swap:1,
313 		 dest_swap:1,
314 		 gather:1,
315 		 rsvd:4,
316 		 nbytes:16;
317 	uint32_t meta_data:16,
318 		 rsvd1:16;
319 	uint32_t rsvd3:20,
320 		 ring_id:8,
321 		 loop_count:4;
322 #endif
323 };
324 struct ce_srng_dest_desc {
325 	uint32_t buffer_addr_lo;
326 #if _BYTE_ORDER == _BIG_ENDIAN
327 	uint32_t loop_count:4,
328 		 ring_id:8,
329 		 rsvd1:12,
330 		 buffer_addr_hi:8;
331 #else
332 	uint32_t buffer_addr_hi:8,
333 		 rsvd1:12,
334 		 ring_id:8,
335 		 loop_count:4;
336 #endif
337 };
338 struct ce_srng_dest_status_desc {
339 #if _BYTE_ORDER == _BIG_ENDIAN
340 	uint32_t nbytes:16,
341 		 rsvd:4,
342 		 gather:1,
343 		 dest_swap:1,
344 		 byte_swap:1,
345 		 toeplitz_hash_enable:1,
346 		 rsvd0:8;
347 	uint32_t rsvd1:16,
348 		 meta_data:16;
349 #else
350 	uint32_t rsvd0:8,
351 		 toeplitz_hash_enable:1,
352 		 byte_swap:1,
353 		 dest_swap:1,
354 		 gather:1,
355 		 rsvd:4,
356 		 nbytes:16;
357 	uint32_t meta_data:16,
358 		 rsvd1:16;
359 #endif
360 	uint32_t toeplitz_hash;
361 #if _BYTE_ORDER == _BIG_ENDIAN
362 	uint32_t loop_count:4,
363 		 ring_id:8,
364 		 rsvd3:20;
365 #else
366 	uint32_t rsvd3:20,
367 		 ring_id:8,
368 		 loop_count:4;
369 #endif
370 };
371 
372 #define CE_SENDLIST_ITEMS_MAX 12
373 
374 /**
375  * union ce_desc - unified data type for ce descriptors
376  *
377  * Both src and destination descriptors follow the same format.
378  * They use different data structures for different access symantics.
379  * Here we provice a unifying data type.
380  */
381 union ce_desc {
382 	struct CE_src_desc src_desc;
383 	struct CE_dest_desc dest_desc;
384 };
385 
386 /**
387  * enum hif_ce_event_type - HIF copy engine event type
388  * @HIF_RX_DESC_POST: event recorded before updating write index of RX ring.
389  * @HIF_RX_DESC_COMPLETION: event recorded before updating sw index of RX ring.
390  * @HIF_TX_GATHER_DESC_POST: post gather desc. (no write index update)
391  * @HIF_TX_DESC_POST: event recorded before updating write index of TX ring.
392  * @HIF_TX_DESC_SOFTWARE_POST: event recorded when dropping a write to the write
393  *	index in a normal tx
394  * @HIF_TX_DESC_COMPLETION: event recorded before updating sw index of TX ring.
395  * @FAST_RX_WRITE_INDEX_UPDATE: event recorded before updating the write index
396  *	of the RX ring in fastpath
397  * @FAST_RX_SOFTWARE_INDEX_UPDATE: event recorded before updating the software
398  *	index of the RX ring in fastpath
399  * @FAST_TX_WRITE_INDEX_UPDATE: event recorded before updating the write index
400  *	of the TX ring in fastpath
401  * @FAST_TX_WRITE_INDEX_SOFTWARE_UPDATE: recored when dropping a write to
402  *	the wirte index in fastpath
403  * @FAST_TX_SOFTWARE_INDEX_UPDATE: event recorded before updating the software
404  *	index of the RX ring in fastpath
405  * @HIF_IRQ_EVENT: event recorded in the irq before scheduling the bh
406  * @HIF_CE_TASKLET_ENTRY: records the start of the ce_tasklet
407  * @HIF_CE_TASKLET_RESCHEDULE: records the rescheduling of the wlan_tasklet
408  * @HIF_CE_TASKLET_EXIT: records the exit of the wlan tasklet without reschedule
409  * @HIF_CE_REAP_ENTRY: records when we process completion outside of a bh
410  * @HIF_CE_REAP_EXIT:  records when we process completion outside of a bh
411  * @NAPI_SCHEDULE: records when napi is scheduled from the irq context
412  * @NAPI_POLL_ENTER: records the start of the napi poll function
413  * @NAPI_COMPLETE: records when interrupts are reenabled
414  * @NAPI_POLL_EXIT: records when the napi poll function returns
415  */
416 enum hif_ce_event_type {
417 	HIF_RX_DESC_POST,
418 	HIF_RX_DESC_COMPLETION,
419 	HIF_TX_GATHER_DESC_POST,
420 	HIF_TX_DESC_POST,
421 	HIF_TX_DESC_SOFTWARE_POST,
422 	HIF_TX_DESC_COMPLETION,
423 	FAST_RX_WRITE_INDEX_UPDATE,
424 	FAST_RX_SOFTWARE_INDEX_UPDATE,
425 	FAST_TX_WRITE_INDEX_UPDATE,
426 	FAST_TX_WRITE_INDEX_SOFTWARE_UPDATE,
427 	FAST_TX_SOFTWARE_INDEX_UPDATE,
428 	RESUME_WRITE_INDEX_UPDATE,
429 
430 	HIF_IRQ_EVENT = 0x10,
431 	HIF_CE_TASKLET_ENTRY,
432 	HIF_CE_TASKLET_RESCHEDULE,
433 	HIF_CE_TASKLET_EXIT,
434 	HIF_CE_REAP_ENTRY,
435 	HIF_CE_REAP_EXIT,
436 	NAPI_SCHEDULE,
437 	NAPI_POLL_ENTER,
438 	NAPI_COMPLETE,
439 	NAPI_POLL_EXIT,
440 
441 	HIF_RX_NBUF_ALLOC_FAILURE = 0x20,
442 	HIF_RX_NBUF_MAP_FAILURE,
443 	HIF_RX_NBUF_ENQUEUE_FAILURE,
444 };
445 
446 void ce_init_ce_desc_event_log(struct hif_softc *scn, int ce_id, int size);
447 void ce_deinit_ce_desc_event_log(struct hif_softc *scn, int ce_id);
448 void hif_record_ce_desc_event(struct hif_softc *scn, int ce_id,
449 			      enum hif_ce_event_type type,
450 			      union ce_desc *descriptor, void *memory,
451 			      int index, int len);
452 
453 enum ce_sendlist_type_e {
454 	CE_SIMPLE_BUFFER_TYPE,
455 	/* TBDXXX: CE_RX_DESC_LIST, */
456 };
457 
458 /*
459  * There's a public "ce_sendlist" and a private "ce_sendlist_s".
460  * The former is an opaque structure with sufficient space
461  * to hold the latter.  The latter is the actual structure
462  * definition and it is only used internally.  The opaque version
463  * of the structure allows callers to allocate an instance on the
464  * run-time stack without knowing any of the details of the
465  * structure layout.
466  */
467 struct ce_sendlist_s {
468 	unsigned int num_items;
469 	struct ce_sendlist_item {
470 		enum ce_sendlist_type_e send_type;
471 		dma_addr_t data;        /* e.g. buffer or desc list */
472 		union {
473 			unsigned int nbytes;    /* simple buffer */
474 			unsigned int ndesc;     /* Rx descriptor list */
475 		} u;
476 		/* flags: externally-specified flags;
477 		 * OR-ed with internal flags
478 		 */
479 		uint32_t flags;
480 		uint32_t user_flags;
481 	} item[CE_SENDLIST_ITEMS_MAX];
482 };
483 
484 bool hif_ce_service_should_yield(struct hif_softc *scn, struct CE_state
485 				 *ce_state);
486 
487 #ifdef WLAN_FEATURE_FASTPATH
488 void ce_h2t_tx_ce_cleanup(struct CE_handle *ce_hdl);
489 void ce_t2h_msg_ce_cleanup(struct CE_handle *ce_hdl);
490 #else
491 static inline void ce_h2t_tx_ce_cleanup(struct CE_handle *ce_hdl)
492 {
493 }
494 
495 static inline void ce_t2h_msg_ce_cleanup(struct CE_handle *ce_hdl)
496 {
497 }
498 #endif
499 
500 /* which ring of a CE? */
501 #define CE_RING_SRC  0
502 #define CE_RING_DEST 1
503 #define CE_RING_STATUS 2
504 
505 #define CDC_WAR_MAGIC_STR   0xceef0000
506 #define CDC_WAR_DATA_CE     4
507 
508 /* Additional internal-only ce_send flags */
509 #define CE_SEND_FLAG_GATHER             0x00010000      /* Use Gather */
510 
511 /**
512  * hif_get_wake_ce_id() - gets the copy engine id used for waking up
513  * @scn: The hif context to use
514  * @ce_id: a pointer where the copy engine Id should be populated
515  *
516  * Return: errno
517  */
518 int hif_get_wake_ce_id(struct hif_softc *scn, uint8_t *ce_id);
519 
520 /*
521  * Note: For MCL, #if defined (HIF_CONFIG_SLUB_DEBUG_ON) needs to be checked
522  * for defined here
523  */
524 #if HIF_CE_DEBUG_DATA_BUF
525 #define HIF_CE_HISTORY_MAX 512
526 
527 #define CE_DEBUG_MAX_DATA_BUF_SIZE 64
528 /**
529  * struct hif_ce_desc_event - structure for detailing a ce event
530  * @type: what the event was
531  * @time: when it happened
532  * @descriptor: descriptor enqueued or dequeued
533  * @memory: virtual address that was used
534  * @index: location of the descriptor in the ce ring;
535  * @data: data pointed by descriptor
536  * @actual_data_len: length of the data
537  */
538 struct hif_ce_desc_event {
539 	uint16_t index;
540 	enum hif_ce_event_type type;
541 	uint64_t time;
542 	union ce_desc descriptor;
543 	void *memory;
544 #if HIF_CE_DEBUG_DATA_BUF
545 	uint8_t *data;
546 	ssize_t actual_data_len;
547 #endif
548 };
549 
550 #if HIF_CE_DEBUG_DATA_BUF
551 QDF_STATUS alloc_mem_ce_debug_hist_data(struct hif_softc *scn, uint32_t ce_id);
552 void free_mem_ce_debug_hist_data(struct hif_softc *scn, uint32_t ce_id);
553 #endif /*HIF_CE_DEBUG_DATA_BUF*/
554 #endif /* #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || HIF_CE_DEBUG_DATA_BUF */
555 #endif /* __COPY_ENGINE_INTERNAL_H__ */
556