xref: /wlan-dirver/qca-wifi-host-cmn/hif/src/ce/ce_internal.h (revision cd39549564686e1d60a410c477b7c6e9e19791fd)
1 /*
2  * Copyright (c) 2013-2016 The Linux Foundation. All rights reserved.
3  *
4  * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
5  *
6  *
7  * Permission to use, copy, modify, and/or distribute this software for
8  * any purpose with or without fee is hereby granted, provided that the
9  * above copyright notice and this permission notice appear in all
10  * copies.
11  *
12  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
13  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
14  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
15  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
16  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
17  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
18  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
19  * PERFORMANCE OF THIS SOFTWARE.
20  */
21 
22 /*
23  * This file was originally distributed by Qualcomm Atheros, Inc.
24  * under proprietary terms before Copyright ownership was assigned
25  * to the Linux Foundation.
26  */
27 #ifndef __COPY_ENGINE_INTERNAL_H__
28 #define __COPY_ENGINE_INTERNAL_H__
29 
30 #include <hif.h>                /* A_TARGET_WRITE */
31 
32 /* Copy Engine operational state */
33 enum CE_op_state {
34 	CE_UNUSED,
35 	CE_PAUSED,
36 	CE_RUNNING,
37 };
38 
39 enum ol_ath_hif_ce_ecodes {
40 	CE_RING_DELTA_FAIL = 0
41 };
42 
43 struct CE_src_desc;
44 
45 /* Copy Engine Ring internal state */
46 struct CE_ring_state {
47 
48 	/* Number of entries in this ring; must be power of 2 */
49 	unsigned int nentries;
50 	unsigned int nentries_mask;
51 
52 	/*
53 	 * For dest ring, this is the next index to be processed
54 	 * by software after it was/is received into.
55 	 *
56 	 * For src ring, this is the last descriptor that was sent
57 	 * and completion processed by software.
58 	 *
59 	 * Regardless of src or dest ring, this is an invariant
60 	 * (modulo ring size):
61 	 *     write index >= read index >= sw_index
62 	 */
63 	unsigned int sw_index;
64 	unsigned int write_index;       /* cached copy */
65 	/*
66 	 * For src ring, this is the next index not yet processed by HW.
67 	 * This is a cached copy of the real HW index (read index), used
68 	 * for avoiding reading the HW index register more often than
69 	 * necessary.
70 	 * This extends the invariant:
71 	 *     write index >= read index >= hw_index >= sw_index
72 	 *
73 	 * For dest ring, this is currently unused.
74 	 */
75 	unsigned int hw_index;  /* cached copy */
76 
77 	/* Start of DMA-coherent area reserved for descriptors */
78 	void *base_addr_owner_space_unaligned;  /* Host address space */
79 	qdf_dma_addr_t base_addr_CE_space_unaligned; /* CE address space */
80 
81 	/*
82 	 * Actual start of descriptors.
83 	 * Aligned to descriptor-size boundary.
84 	 * Points into reserved DMA-coherent area, above.
85 	 */
86 	void *base_addr_owner_space;    /* Host address space */
87 	qdf_dma_addr_t base_addr_CE_space;   /* CE address space */
88 	/*
89 	 * Start of shadow copy of descriptors, within regular memory.
90 	 * Aligned to descriptor-size boundary.
91 	 */
92 	char *shadow_base_unaligned;
93 	struct CE_src_desc *shadow_base;
94 
95 	unsigned int low_water_mark_nentries;
96 	unsigned int high_water_mark_nentries;
97 	void *srng_ctx;
98 	void **per_transfer_context;
99 	OS_DMA_MEM_CONTEXT(ce_dmacontext) /* OS Specific DMA context */
100 };
101 
102 /* Copy Engine internal state */
103 struct CE_state {
104 	struct hif_softc *scn;
105 	unsigned int id;
106 	unsigned int attr_flags;  /* CE_ATTR_* */
107 	uint32_t ctrl_addr;       /* relative to BAR */
108 	enum CE_op_state state;
109 
110 #ifdef WLAN_FEATURE_FASTPATH
111 	fastpath_msg_handler fastpath_handler;
112 	void *context;
113 #endif /* WLAN_FEATURE_FASTPATH */
114 
115 	ce_send_cb send_cb;
116 	void *send_context;
117 
118 	CE_recv_cb recv_cb;
119 	void *recv_context;
120 
121 	/* misc_cbs - are any callbacks besides send and recv enabled? */
122 	uint8_t misc_cbs;
123 
124 	CE_watermark_cb watermark_cb;
125 	void *wm_context;
126 
127 	/*Record the state of the copy compl interrupt */
128 	int disable_copy_compl_intr;
129 
130 	unsigned int src_sz_max;
131 	struct CE_ring_state *src_ring;
132 	struct CE_ring_state *dest_ring;
133 	struct CE_ring_state *status_ring;
134 	atomic_t rx_pending;
135 
136 	qdf_spinlock_t ce_index_lock;
137 	bool force_break;	/* Flag to indicate whether to
138 				 * break out the DPC context */
139 
140 	qdf_time_t ce_service_yield_time;
141 	unsigned int receive_count;	/* count Num Of Receive Buffers
142 					 * handled for one interrupt
143 					 * DPC routine */
144 	/* epping */
145 	bool timer_inited;
146 	qdf_timer_t poll_timer;
147 
148 	/* datapath - for faster access, use bools instead of a bitmap */
149 	bool htt_tx_data;
150 	bool htt_rx_data;
151 	void (*lro_flush_cb)(void *);
152 	void *lro_data;
153 	qdf_spinlock_t lro_unloading_lock;
154 };
155 
156 /* Descriptor rings must be aligned to this boundary */
157 #define CE_DESC_RING_ALIGN 8
158 #define CLOCK_OVERRIDE 0x2
159 
160 #ifdef QCA_WIFI_3_0
161 #define HIF_CE_DESC_ADDR_TO_DMA(desc) \
162 	(qdf_dma_addr_t)(((uint64_t)(desc)->buffer_addr + \
163 	((uint64_t)((desc)->buffer_addr_hi & 0x1F) << 32)))
164 #else
165 #define HIF_CE_DESC_ADDR_TO_DMA(desc) \
166 	(qdf_dma_addr_t)((desc)->buffer_addr)
167 #endif
168 
169 #ifdef QCA_WIFI_3_0
170 struct CE_src_desc {
171 	uint32_t buffer_addr:32;
172 #if _BYTE_ORDER == _BIG_ENDIAN
173 	uint32_t gather:1,
174 		enable_11h:1,
175 		meta_data_low:2, /* fw_metadata_low */
176 		packet_result_offset:12,
177 		toeplitz_hash_enable:1,
178 		addr_y_search_disable:1,
179 		addr_x_search_disable:1,
180 		misc_int_disable:1,
181 		target_int_disable:1,
182 		host_int_disable:1,
183 		dest_byte_swap:1,
184 		byte_swap:1,
185 		type:2,
186 		tx_classify:1,
187 		buffer_addr_hi:5;
188 		uint32_t meta_data:16, /* fw_metadata_high */
189 		nbytes:16;       /* length in register map */
190 #else
191 	uint32_t buffer_addr_hi:5,
192 		tx_classify:1,
193 		type:2,
194 		byte_swap:1,          /* src_byte_swap */
195 		dest_byte_swap:1,
196 		host_int_disable:1,
197 		target_int_disable:1,
198 		misc_int_disable:1,
199 		addr_x_search_disable:1,
200 		addr_y_search_disable:1,
201 		toeplitz_hash_enable:1,
202 		packet_result_offset:12,
203 		meta_data_low:2, /* fw_metadata_low */
204 		enable_11h:1,
205 		gather:1;
206 		uint32_t nbytes:16, /* length in register map */
207 		meta_data:16; /* fw_metadata_high */
208 #endif
209 	uint32_t toeplitz_hash_result:32;
210 };
211 
212 struct CE_dest_desc {
213 	uint32_t buffer_addr:32;
214 #if _BYTE_ORDER == _BIG_ENDIAN
215 	uint32_t gather:1,
216 		enable_11h:1,
217 		meta_data_low:2, /* fw_metadata_low */
218 		packet_result_offset:12,
219 		toeplitz_hash_enable:1,
220 		addr_y_search_disable:1,
221 		addr_x_search_disable:1,
222 		misc_int_disable:1,
223 		target_int_disable:1,
224 		host_int_disable:1,
225 		byte_swap:1,
226 		src_byte_swap:1,
227 		type:2,
228 		tx_classify:1,
229 		buffer_addr_hi:5;
230 		uint32_t meta_data:16, /* fw_metadata_high */
231 		nbytes:16;          /* length in register map */
232 #else
233 	uint32_t buffer_addr_hi:5,
234 		tx_classify:1,
235 		type:2,
236 		src_byte_swap:1,
237 		byte_swap:1,         /* dest_byte_swap */
238 		host_int_disable:1,
239 		target_int_disable:1,
240 		misc_int_disable:1,
241 		addr_x_search_disable:1,
242 		addr_y_search_disable:1,
243 		toeplitz_hash_enable:1,
244 		packet_result_offset:12,
245 		meta_data_low:2, /* fw_metadata_low */
246 		enable_11h:1,
247 		gather:1;
248 		uint32_t nbytes:16, /* length in register map */
249 		meta_data:16;    /* fw_metadata_high */
250 #endif
251 	uint32_t toeplitz_hash_result:32;
252 };
253 #else
254 struct CE_src_desc {
255 	uint32_t buffer_addr;
256 #if _BYTE_ORDER == _BIG_ENDIAN
257 	uint32_t  meta_data:12,
258 		  target_int_disable:1,
259 		  host_int_disable:1,
260 		  byte_swap:1,
261 		  gather:1,
262 		  nbytes:16;
263 #else
264 
265 	uint32_t nbytes:16,
266 		 gather:1,
267 		 byte_swap:1,
268 		 host_int_disable:1,
269 		 target_int_disable:1,
270 		 meta_data:12;
271 #endif
272 };
273 
274 struct CE_dest_desc {
275 	uint32_t buffer_addr;
276 #if _BYTE_ORDER == _BIG_ENDIAN
277 	uint32_t  meta_data:12,
278 		  target_int_disable:1,
279 		  host_int_disable:1,
280 		  byte_swap:1,
281 		  gather:1,
282 		  nbytes:16;
283 #else
284 	uint32_t nbytes:16,
285 		 gather:1,
286 		 byte_swap:1,
287 		 host_int_disable:1,
288 		 target_int_disable:1,
289 		 meta_data:12;
290 #endif
291 };
292 #endif /* QCA_WIFI_3_0 */
293 
294 struct ce_srng_src_desc {
295 	uint32_t buffer_addr_lo;
296 #if _BYTE_ORDER == _BIG_ENDIAN
297 	uint32_t nbytes:16,
298 		 rsvd:4,
299 		 gather:1,
300 		 dest_swap:1,
301 		 byte_swap:1,
302 		 toeplitz_hash_enable:1,
303 		 buffer_addr_hi:8;
304 	uint32_t rsvd1:16,
305 		 meta_data:16;
306 	uint32_t loop_count:4,
307 		 ring_id:8,
308 		 rsvd3:20;
309 #else
310 	uint32_t buffer_addr_hi:8,
311 		 toeplitz_hash_enable:1,
312 		 byte_swap:1,
313 		 dest_swap:1,
314 		 gather:1,
315 		 rsvd:4,
316 		 nbytes:16;
317 	uint32_t meta_data:16,
318 		 rsvd1:16;
319 	uint32_t rsvd3:20,
320 		 ring_id:8,
321 		 loop_count:4;
322 #endif
323 };
324 struct ce_srng_dest_desc {
325 	uint32_t buffer_addr_lo;
326 #if _BYTE_ORDER == _BIG_ENDIAN
327 	uint32_t loop_count:4,
328 		 ring_id:8,
329 		 rsvd1:12,
330 		 buffer_addr_hi:8;
331 #else
332 	uint32_t buffer_addr_hi:8,
333 		 rsvd1:12,
334 		 ring_id:8,
335 		 loop_count:4;
336 #endif
337 };
338 struct ce_srng_dest_status_desc {
339 #if _BYTE_ORDER == _BIG_ENDIAN
340 	uint32_t nbytes:16,
341 		 rsvd:4,
342 		 gather:1,
343 		 dest_swap:1,
344 		 byte_swap:1,
345 		 toeplitz_hash_enable:1,
346 		 rsvd0:8;
347 	uint32_t rsvd1:16,
348 		 meta_data:16;
349 #else
350 	uint32_t rsvd0:8,
351 		 toeplitz_hash_enable:1,
352 		 byte_swap:1,
353 		 dest_swap:1,
354 		 gather:1,
355 		 rsvd:4,
356 		 nbytes:16;
357 	uint32_t meta_data:16,
358 		 rsvd1:16;
359 #endif
360 	uint32_t toeplitz_hash;
361 #if _BYTE_ORDER == _BIG_ENDIAN
362 	uint32_t loop_count:4,
363 		 ring_id:8,
364 		 rsvd3:20;
365 #else
366 	uint32_t rsvd3:20,
367 		 ring_id:8,
368 		 loop_count:4;
369 #endif
370 };
371 
372 #define CE_SENDLIST_ITEMS_MAX 12
373 
374 /**
375  * union ce_desc - unified data type for ce descriptors
376  *
377  * Both src and destination descriptors follow the same format.
378  * They use different data structures for different access symantics.
379  * Here we provice a unifying data type.
380  */
381 union ce_desc {
382 	struct CE_src_desc src_desc;
383 	struct CE_dest_desc dest_desc;
384 };
385 
386 /**
387  * enum hif_ce_event_type - HIF copy engine event type
388  * @HIF_RX_DESC_POST: event recorded before updating write index of RX ring.
389  * @HIF_RX_DESC_COMPLETION: event recorded before updating sw index of RX ring.
390  * @HIF_TX_GATHER_DESC_POST: post gather desc. (no write index update)
391  * @HIF_TX_DESC_POST: event recorded before updating write index of TX ring.
392  * @HIF_TX_DESC_COMPLETION: event recorded before updating sw index of TX ring.
393  * @FAST_RX_WRITE_INDEX_UPDATE: event recorded before updating the write index
394  *	of the RX ring in fastpath
395  * @FAST_RX_SOFTWARE_INDEX_UPDATE: event recorded before updating the software
396  *	index of the RX ring in fastpath
397  * @FAST_TX_WRITE_INDEX_UPDATE: event recorded before updating the write index
398  *	of the TX ring in fastpath
399  * @FAST_TX_SOFTWARE_INDEX_UPDATE: event recorded before updating the software
400  *	index of the RX ring in fastpath
401  *
402  * @HIF_IRQ_EVENT: event recorded in the irq before scheduling the bh
403  * @HIF_CE_TASKLET_ENTRY: records the start of the ce_tasklet
404  * @HIF_CE_TASKLET_RESCHEDULE: records the rescheduling of the wlan_tasklet
405  * @HIF_CE_TASKLET_EXIT: records the exit of the wlan tasklet without reschedule
406  * @HIF_CE_REAP_ENTRY: records when we process completion outside of a bh
407  * @HIF_CE_REAP_EXIT:  records when we process completion outside of a bh
408  * @NAPI_SCHEDULE: records when napi is scheduled from the irq context
409  * @NAPI_POLL_ENTER: records the start of the napi poll function
410  * @NAPI_COMPLETE: records when interrupts are reenabled
411  * @NAPI_POLL_EXIT: records when the napi poll function returns
412  */
413 enum hif_ce_event_type {
414 	HIF_RX_DESC_POST,
415 	HIF_RX_DESC_COMPLETION,
416 	HIF_TX_GATHER_DESC_POST,
417 	HIF_TX_DESC_POST,
418 	HIF_TX_DESC_COMPLETION,
419 	FAST_RX_WRITE_INDEX_UPDATE,
420 	FAST_RX_SOFTWARE_INDEX_UPDATE,
421 	FAST_TX_WRITE_INDEX_UPDATE,
422 	FAST_TX_SOFTWARE_INDEX_UPDATE,
423 
424 	HIF_IRQ_EVENT = 0x10,
425 	HIF_CE_TASKLET_ENTRY,
426 	HIF_CE_TASKLET_RESCHEDULE,
427 	HIF_CE_TASKLET_EXIT,
428 	HIF_CE_REAP_ENTRY,
429 	HIF_CE_REAP_EXIT,
430 	NAPI_SCHEDULE,
431 	NAPI_POLL_ENTER,
432 	NAPI_COMPLETE,
433 	NAPI_POLL_EXIT,
434 };
435 
436 void ce_init_ce_desc_event_log(int ce_id, int size);
437 void hif_record_ce_desc_event(struct hif_softc *scn, int ce_id,
438 			      enum hif_ce_event_type type,
439 			      union ce_desc *descriptor, void *memory,
440 			      int index);
441 
442 enum ce_sendlist_type_e {
443 	CE_SIMPLE_BUFFER_TYPE,
444 	/* TBDXXX: CE_RX_DESC_LIST, */
445 };
446 
447 /*
448  * There's a public "ce_sendlist" and a private "ce_sendlist_s".
449  * The former is an opaque structure with sufficient space
450  * to hold the latter.  The latter is the actual structure
451  * definition and it is only used internally.  The opaque version
452  * of the structure allows callers to allocate an instance on the
453  * run-time stack without knowing any of the details of the
454  * structure layout.
455  */
456 struct ce_sendlist_s {
457 	unsigned int num_items;
458 	struct ce_sendlist_item {
459 		enum ce_sendlist_type_e send_type;
460 		dma_addr_t data;        /* e.g. buffer or desc list */
461 		union {
462 			unsigned int nbytes;    /* simple buffer */
463 			unsigned int ndesc;     /* Rx descriptor list */
464 		} u;
465 		/* flags: externally-specified flags;
466 		 * OR-ed with internal flags */
467 		uint32_t flags;
468 		uint32_t user_flags;
469 	} item[CE_SENDLIST_ITEMS_MAX];
470 };
471 
472 bool hif_ce_service_should_yield(struct hif_softc *scn, struct CE_state
473 				 *ce_state);
474 
475 #ifdef WLAN_FEATURE_FASTPATH
476 void ce_h2t_tx_ce_cleanup(struct CE_handle *ce_hdl);
477 void ce_t2h_msg_ce_cleanup(struct CE_handle *ce_hdl);
478 #else
479 static inline void ce_h2t_tx_ce_cleanup(struct CE_handle *ce_hdl)
480 {
481 }
482 
483 static inline void ce_t2h_msg_ce_cleanup(struct CE_handle *ce_hdl)
484 {
485 }
486 #endif
487 
488 /* which ring of a CE? */
489 #define CE_RING_SRC  0
490 #define CE_RING_DEST 1
491 #define CE_RING_STATUS 2
492 
493 #define CDC_WAR_MAGIC_STR   0xceef0000
494 #define CDC_WAR_DATA_CE     4
495 
496 /* Additional internal-only ce_send flags */
497 #define CE_SEND_FLAG_GATHER             0x00010000      /* Use Gather */
498 #endif /* __COPY_ENGINE_INTERNAL_H__ */
499