xref: /wlan-dirver/qca-wifi-host-cmn/hif/src/ce/ce_internal.h (revision 1e1def755013f8d953fce9b9cbe4d8836497c0d6)
1 /*
2  * Copyright (c) 2013-2017 The Linux Foundation. All rights reserved.
3  *
4  * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
5  *
6  *
7  * Permission to use, copy, modify, and/or distribute this software for
8  * any purpose with or without fee is hereby granted, provided that the
9  * above copyright notice and this permission notice appear in all
10  * copies.
11  *
12  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
13  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
14  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
15  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
16  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
17  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
18  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
19  * PERFORMANCE OF THIS SOFTWARE.
20  */
21 
22 /*
23  * This file was originally distributed by Qualcomm Atheros, Inc.
24  * under proprietary terms before Copyright ownership was assigned
25  * to the Linux Foundation.
26  */
27 #ifndef __COPY_ENGINE_INTERNAL_H__
28 #define __COPY_ENGINE_INTERNAL_H__
29 
30 #include <hif.h>                /* A_TARGET_WRITE */
31 
32 /* Copy Engine operational state */
33 enum CE_op_state {
34 	CE_UNUSED,
35 	CE_PAUSED,
36 	CE_RUNNING,
37 	CE_PENDING,
38 };
39 
40 enum ol_ath_hif_ce_ecodes {
41 	CE_RING_DELTA_FAIL = 0
42 };
43 
44 struct CE_src_desc;
45 
46 /* Copy Engine Ring internal state */
47 struct CE_ring_state {
48 
49 	/* Number of entries in this ring; must be power of 2 */
50 	unsigned int nentries;
51 	unsigned int nentries_mask;
52 
53 	/*
54 	 * For dest ring, this is the next index to be processed
55 	 * by software after it was/is received into.
56 	 *
57 	 * For src ring, this is the last descriptor that was sent
58 	 * and completion processed by software.
59 	 *
60 	 * Regardless of src or dest ring, this is an invariant
61 	 * (modulo ring size):
62 	 *     write index >= read index >= sw_index
63 	 */
64 	unsigned int sw_index;
65 	unsigned int write_index;       /* cached copy */
66 	/*
67 	 * For src ring, this is the next index not yet processed by HW.
68 	 * This is a cached copy of the real HW index (read index), used
69 	 * for avoiding reading the HW index register more often than
70 	 * necessary.
71 	 * This extends the invariant:
72 	 *     write index >= read index >= hw_index >= sw_index
73 	 *
74 	 * For dest ring, this is currently unused.
75 	 */
76 	unsigned int hw_index;  /* cached copy */
77 
78 	/* Start of DMA-coherent area reserved for descriptors */
79 	void *base_addr_owner_space_unaligned;  /* Host address space */
80 	qdf_dma_addr_t base_addr_CE_space_unaligned; /* CE address space */
81 
82 	/*
83 	 * Actual start of descriptors.
84 	 * Aligned to descriptor-size boundary.
85 	 * Points into reserved DMA-coherent area, above.
86 	 */
87 	void *base_addr_owner_space;    /* Host address space */
88 	qdf_dma_addr_t base_addr_CE_space;   /* CE address space */
89 	/*
90 	 * Start of shadow copy of descriptors, within regular memory.
91 	 * Aligned to descriptor-size boundary.
92 	 */
93 	char *shadow_base_unaligned;
94 	struct CE_src_desc *shadow_base;
95 
96 	unsigned int low_water_mark_nentries;
97 	unsigned int high_water_mark_nentries;
98 	void *srng_ctx;
99 	void **per_transfer_context;
100 	OS_DMA_MEM_CONTEXT(ce_dmacontext); /* OS Specific DMA context */
101 };
102 
103 /* Copy Engine internal state */
104 struct CE_state {
105 	struct hif_softc *scn;
106 	unsigned int id;
107 	unsigned int attr_flags;  /* CE_ATTR_* */
108 	uint32_t ctrl_addr;       /* relative to BAR */
109 	enum CE_op_state state;
110 
111 #ifdef WLAN_FEATURE_FASTPATH
112 	fastpath_msg_handler fastpath_handler;
113 	void *context;
114 #endif /* WLAN_FEATURE_FASTPATH */
115 	qdf_work_t oom_allocation_work;
116 
117 	ce_send_cb send_cb;
118 	void *send_context;
119 
120 	CE_recv_cb recv_cb;
121 	void *recv_context;
122 
123 	/* misc_cbs - are any callbacks besides send and recv enabled? */
124 	uint8_t misc_cbs;
125 
126 	CE_watermark_cb watermark_cb;
127 	void *wm_context;
128 
129 	/*Record the state of the copy compl interrupt */
130 	int disable_copy_compl_intr;
131 
132 	unsigned int src_sz_max;
133 	struct CE_ring_state *src_ring;
134 	struct CE_ring_state *dest_ring;
135 	struct CE_ring_state *status_ring;
136 	atomic_t rx_pending;
137 
138 	qdf_spinlock_t ce_index_lock;
139 	/* Flag to indicate whether to break out the DPC context */
140 	bool force_break;
141 
142 	/* time in nanoseconds to yield control of napi poll */
143 	unsigned long long ce_service_yield_time;
144 	/* Num Of Receive Buffers handled for one interrupt DPC routine */
145 	unsigned int receive_count;
146 	/* epping */
147 	bool timer_inited;
148 	qdf_timer_t poll_timer;
149 
150 	/* datapath - for faster access, use bools instead of a bitmap */
151 	bool htt_tx_data;
152 	bool htt_rx_data;
153 	qdf_lro_ctx_t lro_data;
154 };
155 
156 /* Descriptor rings must be aligned to this boundary */
157 #define CE_DESC_RING_ALIGN 8
158 #define CLOCK_OVERRIDE 0x2
159 
160 #ifdef QCA_WIFI_3_0
161 #define HIF_CE_DESC_ADDR_TO_DMA(desc) \
162 	(qdf_dma_addr_t)(((uint64_t)(desc)->buffer_addr + \
163 	((uint64_t)((desc)->buffer_addr_hi & 0x1F) << 32)))
164 #else
165 #define HIF_CE_DESC_ADDR_TO_DMA(desc) \
166 	(qdf_dma_addr_t)((desc)->buffer_addr)
167 #endif
168 
169 #ifdef QCA_WIFI_3_0
170 struct CE_src_desc {
171 	uint32_t buffer_addr:32;
172 #if _BYTE_ORDER == _BIG_ENDIAN
173 	uint32_t gather:1,
174 		enable_11h:1,
175 		meta_data_low:2, /* fw_metadata_low */
176 		packet_result_offset:12,
177 		toeplitz_hash_enable:1,
178 		addr_y_search_disable:1,
179 		addr_x_search_disable:1,
180 		misc_int_disable:1,
181 		target_int_disable:1,
182 		host_int_disable:1,
183 		dest_byte_swap:1,
184 		byte_swap:1,
185 		type:2,
186 		tx_classify:1,
187 		buffer_addr_hi:5;
188 		uint32_t meta_data:16, /* fw_metadata_high */
189 		nbytes:16;       /* length in register map */
190 #else
191 	uint32_t buffer_addr_hi:5,
192 		tx_classify:1,
193 		type:2,
194 		byte_swap:1,          /* src_byte_swap */
195 		dest_byte_swap:1,
196 		host_int_disable:1,
197 		target_int_disable:1,
198 		misc_int_disable:1,
199 		addr_x_search_disable:1,
200 		addr_y_search_disable:1,
201 		toeplitz_hash_enable:1,
202 		packet_result_offset:12,
203 		meta_data_low:2, /* fw_metadata_low */
204 		enable_11h:1,
205 		gather:1;
206 		uint32_t nbytes:16, /* length in register map */
207 		meta_data:16; /* fw_metadata_high */
208 #endif
209 	uint32_t toeplitz_hash_result:32;
210 };
211 
212 struct CE_dest_desc {
213 	uint32_t buffer_addr:32;
214 #if _BYTE_ORDER == _BIG_ENDIAN
215 	uint32_t gather:1,
216 		enable_11h:1,
217 		meta_data_low:2, /* fw_metadata_low */
218 		packet_result_offset:12,
219 		toeplitz_hash_enable:1,
220 		addr_y_search_disable:1,
221 		addr_x_search_disable:1,
222 		misc_int_disable:1,
223 		target_int_disable:1,
224 		host_int_disable:1,
225 		byte_swap:1,
226 		src_byte_swap:1,
227 		type:2,
228 		tx_classify:1,
229 		buffer_addr_hi:5;
230 		uint32_t meta_data:16, /* fw_metadata_high */
231 		nbytes:16;          /* length in register map */
232 #else
233 	uint32_t buffer_addr_hi:5,
234 		tx_classify:1,
235 		type:2,
236 		src_byte_swap:1,
237 		byte_swap:1,         /* dest_byte_swap */
238 		host_int_disable:1,
239 		target_int_disable:1,
240 		misc_int_disable:1,
241 		addr_x_search_disable:1,
242 		addr_y_search_disable:1,
243 		toeplitz_hash_enable:1,
244 		packet_result_offset:12,
245 		meta_data_low:2, /* fw_metadata_low */
246 		enable_11h:1,
247 		gather:1;
248 		uint32_t nbytes:16, /* length in register map */
249 		meta_data:16;    /* fw_metadata_high */
250 #endif
251 	uint32_t toeplitz_hash_result:32;
252 };
253 #else
254 struct CE_src_desc {
255 	uint32_t buffer_addr;
256 #if _BYTE_ORDER == _BIG_ENDIAN
257 	uint32_t  meta_data:12,
258 		  target_int_disable:1,
259 		  host_int_disable:1,
260 		  byte_swap:1,
261 		  gather:1,
262 		  nbytes:16;
263 #else
264 
265 	uint32_t nbytes:16,
266 		 gather:1,
267 		 byte_swap:1,
268 		 host_int_disable:1,
269 		 target_int_disable:1,
270 		 meta_data:12;
271 #endif
272 };
273 
274 struct CE_dest_desc {
275 	uint32_t buffer_addr;
276 #if _BYTE_ORDER == _BIG_ENDIAN
277 	uint32_t  meta_data:12,
278 		  target_int_disable:1,
279 		  host_int_disable:1,
280 		  byte_swap:1,
281 		  gather:1,
282 		  nbytes:16;
283 #else
284 	uint32_t nbytes:16,
285 		 gather:1,
286 		 byte_swap:1,
287 		 host_int_disable:1,
288 		 target_int_disable:1,
289 		 meta_data:12;
290 #endif
291 };
292 #endif /* QCA_WIFI_3_0 */
293 
294 struct ce_srng_src_desc {
295 	uint32_t buffer_addr_lo;
296 #if _BYTE_ORDER == _BIG_ENDIAN
297 	uint32_t nbytes:16,
298 		 rsvd:4,
299 		 gather:1,
300 		 dest_swap:1,
301 		 byte_swap:1,
302 		 toeplitz_hash_enable:1,
303 		 buffer_addr_hi:8;
304 	uint32_t rsvd1:16,
305 		 meta_data:16;
306 	uint32_t loop_count:4,
307 		 ring_id:8,
308 		 rsvd3:20;
309 #else
310 	uint32_t buffer_addr_hi:8,
311 		 toeplitz_hash_enable:1,
312 		 byte_swap:1,
313 		 dest_swap:1,
314 		 gather:1,
315 		 rsvd:4,
316 		 nbytes:16;
317 	uint32_t meta_data:16,
318 		 rsvd1:16;
319 	uint32_t rsvd3:20,
320 		 ring_id:8,
321 		 loop_count:4;
322 #endif
323 };
324 struct ce_srng_dest_desc {
325 	uint32_t buffer_addr_lo;
326 #if _BYTE_ORDER == _BIG_ENDIAN
327 	uint32_t loop_count:4,
328 		 ring_id:8,
329 		 rsvd1:12,
330 		 buffer_addr_hi:8;
331 #else
332 	uint32_t buffer_addr_hi:8,
333 		 rsvd1:12,
334 		 ring_id:8,
335 		 loop_count:4;
336 #endif
337 };
338 struct ce_srng_dest_status_desc {
339 #if _BYTE_ORDER == _BIG_ENDIAN
340 	uint32_t nbytes:16,
341 		 rsvd:4,
342 		 gather:1,
343 		 dest_swap:1,
344 		 byte_swap:1,
345 		 toeplitz_hash_enable:1,
346 		 rsvd0:8;
347 	uint32_t rsvd1:16,
348 		 meta_data:16;
349 #else
350 	uint32_t rsvd0:8,
351 		 toeplitz_hash_enable:1,
352 		 byte_swap:1,
353 		 dest_swap:1,
354 		 gather:1,
355 		 rsvd:4,
356 		 nbytes:16;
357 	uint32_t meta_data:16,
358 		 rsvd1:16;
359 #endif
360 	uint32_t toeplitz_hash;
361 #if _BYTE_ORDER == _BIG_ENDIAN
362 	uint32_t loop_count:4,
363 		 ring_id:8,
364 		 rsvd3:20;
365 #else
366 	uint32_t rsvd3:20,
367 		 ring_id:8,
368 		 loop_count:4;
369 #endif
370 };
371 
372 #define CE_SENDLIST_ITEMS_MAX 12
373 
374 /**
375  * union ce_desc - unified data type for ce descriptors
376  *
377  * Both src and destination descriptors follow the same format.
378  * They use different data structures for different access symantics.
379  * Here we provice a unifying data type.
380  */
381 union ce_desc {
382 	struct CE_src_desc src_desc;
383 	struct CE_dest_desc dest_desc;
384 };
385 
386 /**
387  * enum hif_ce_event_type - HIF copy engine event type
388  * @HIF_RX_DESC_POST: event recorded before updating write index of RX ring.
389  * @HIF_RX_DESC_COMPLETION: event recorded before updating sw index of RX ring.
390  * @HIF_TX_GATHER_DESC_POST: post gather desc. (no write index update)
391  * @HIF_TX_DESC_POST: event recorded before updating write index of TX ring.
392  * @HIF_TX_DESC_SOFTWARE_POST: event recorded when dropping a write to the write
393  *	index in a normal tx
394  * @HIF_TX_DESC_COMPLETION: event recorded before updating sw index of TX ring.
395  * @FAST_RX_WRITE_INDEX_UPDATE: event recorded before updating the write index
396  *	of the RX ring in fastpath
397  * @FAST_RX_SOFTWARE_INDEX_UPDATE: event recorded before updating the software
398  *	index of the RX ring in fastpath
399  * @FAST_TX_WRITE_INDEX_UPDATE: event recorded before updating the write index
400  *	of the TX ring in fastpath
401  * @FAST_TX_WRITE_INDEX_SOFTWARE_UPDATE: recored when dropping a write to
402  *	the wirte index in fastpath
403  * @FAST_TX_SOFTWARE_INDEX_UPDATE: event recorded before updating the software
404  *	index of the RX ring in fastpath
405  * @HIF_IRQ_EVENT: event recorded in the irq before scheduling the bh
406  * @HIF_CE_TASKLET_ENTRY: records the start of the ce_tasklet
407  * @HIF_CE_TASKLET_RESCHEDULE: records the rescheduling of the wlan_tasklet
408  * @HIF_CE_TASKLET_EXIT: records the exit of the wlan tasklet without reschedule
409  * @HIF_CE_REAP_ENTRY: records when we process completion outside of a bh
410  * @HIF_CE_REAP_EXIT:  records when we process completion outside of a bh
411  * @NAPI_SCHEDULE: records when napi is scheduled from the irq context
412  * @NAPI_POLL_ENTER: records the start of the napi poll function
413  * @NAPI_COMPLETE: records when interrupts are reenabled
414  * @NAPI_POLL_EXIT: records when the napi poll function returns
415  */
416 enum hif_ce_event_type {
417 	HIF_RX_DESC_POST,
418 	HIF_RX_DESC_COMPLETION,
419 	HIF_TX_GATHER_DESC_POST,
420 	HIF_TX_DESC_POST,
421 	HIF_TX_DESC_SOFTWARE_POST,
422 	HIF_TX_DESC_COMPLETION,
423 	FAST_RX_WRITE_INDEX_UPDATE,
424 	FAST_RX_SOFTWARE_INDEX_UPDATE,
425 	FAST_TX_WRITE_INDEX_UPDATE,
426 	FAST_TX_WRITE_INDEX_SOFTWARE_UPDATE,
427 	FAST_TX_SOFTWARE_INDEX_UPDATE,
428 	RESUME_WRITE_INDEX_UPDATE,
429 
430 	HIF_IRQ_EVENT = 0x10,
431 	HIF_CE_TASKLET_ENTRY,
432 	HIF_CE_TASKLET_RESCHEDULE,
433 	HIF_CE_TASKLET_EXIT,
434 	HIF_CE_REAP_ENTRY,
435 	HIF_CE_REAP_EXIT,
436 	NAPI_SCHEDULE,
437 	NAPI_POLL_ENTER,
438 	NAPI_COMPLETE,
439 	NAPI_POLL_EXIT,
440 
441 	HIF_RX_NBUF_ALLOC_FAILURE = 0x20,
442 	HIF_RX_NBUF_MAP_FAILURE,
443 	HIF_RX_NBUF_ENQUEUE_FAILURE,
444 };
445 
446 void ce_init_ce_desc_event_log(int ce_id, int size);
447 void hif_record_ce_desc_event(struct hif_softc *scn, int ce_id,
448 			      enum hif_ce_event_type type,
449 			      union ce_desc *descriptor, void *memory,
450 			      int index);
451 
452 enum ce_sendlist_type_e {
453 	CE_SIMPLE_BUFFER_TYPE,
454 	/* TBDXXX: CE_RX_DESC_LIST, */
455 };
456 
457 /*
458  * There's a public "ce_sendlist" and a private "ce_sendlist_s".
459  * The former is an opaque structure with sufficient space
460  * to hold the latter.  The latter is the actual structure
461  * definition and it is only used internally.  The opaque version
462  * of the structure allows callers to allocate an instance on the
463  * run-time stack without knowing any of the details of the
464  * structure layout.
465  */
466 struct ce_sendlist_s {
467 	unsigned int num_items;
468 	struct ce_sendlist_item {
469 		enum ce_sendlist_type_e send_type;
470 		dma_addr_t data;        /* e.g. buffer or desc list */
471 		union {
472 			unsigned int nbytes;    /* simple buffer */
473 			unsigned int ndesc;     /* Rx descriptor list */
474 		} u;
475 		/* flags: externally-specified flags;
476 		 * OR-ed with internal flags
477 		 */
478 		uint32_t flags;
479 		uint32_t user_flags;
480 	} item[CE_SENDLIST_ITEMS_MAX];
481 };
482 
483 bool hif_ce_service_should_yield(struct hif_softc *scn, struct CE_state
484 				 *ce_state);
485 
486 #ifdef WLAN_FEATURE_FASTPATH
487 void ce_h2t_tx_ce_cleanup(struct CE_handle *ce_hdl);
488 void ce_t2h_msg_ce_cleanup(struct CE_handle *ce_hdl);
489 #else
490 static inline void ce_h2t_tx_ce_cleanup(struct CE_handle *ce_hdl)
491 {
492 }
493 
494 static inline void ce_t2h_msg_ce_cleanup(struct CE_handle *ce_hdl)
495 {
496 }
497 #endif
498 
499 /* which ring of a CE? */
500 #define CE_RING_SRC  0
501 #define CE_RING_DEST 1
502 #define CE_RING_STATUS 2
503 
504 #define CDC_WAR_MAGIC_STR   0xceef0000
505 #define CDC_WAR_DATA_CE     4
506 
507 /* Additional internal-only ce_send flags */
508 #define CE_SEND_FLAG_GATHER             0x00010000      /* Use Gather */
509 
510 /**
511  * hif_get_wake_ce_id() - gets the copy engine id used for waking up
512  * @scn: The hif context to use
513  * @ce_id: a pointer where the copy engine Id should be populated
514  *
515  * Return: errno
516  */
517 int hif_get_wake_ce_id(struct hif_softc *scn, uint8_t *ce_id);
518 #endif /* __COPY_ENGINE_INTERNAL_H__ */
519