xref: /wlan-dirver/qca-wifi-host-cmn/hif/src/ce/ce_internal.h (revision d57e7836dc389f88871517cfeedfdd0f572e4b31)
1 /*
2  * Copyright (c) 2013-2020 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #ifndef __COPY_ENGINE_INTERNAL_H__
20 #define __COPY_ENGINE_INTERNAL_H__
21 
22 #include <hif.h>                /* A_TARGET_WRITE */
23 
24 /* Copy Engine operational state */
25 enum CE_op_state {
26 	CE_UNUSED,
27 	CE_PAUSED,
28 	CE_RUNNING,
29 	CE_PENDING,
30 };
31 
32 enum ol_ath_hif_ce_ecodes {
33 	CE_RING_DELTA_FAIL = 0
34 };
35 
36 struct CE_src_desc;
37 
38 /* Copy Engine Ring internal state */
39 struct CE_ring_state {
40 
41 	/* Number of entries in this ring; must be power of 2 */
42 	unsigned int nentries;
43 	unsigned int nentries_mask;
44 
45 	/*
46 	 * For dest ring, this is the next index to be processed
47 	 * by software after it was/is received into.
48 	 *
49 	 * For src ring, this is the last descriptor that was sent
50 	 * and completion processed by software.
51 	 *
52 	 * Regardless of src or dest ring, this is an invariant
53 	 * (modulo ring size):
54 	 *     write index >= read index >= sw_index
55 	 */
56 	unsigned int sw_index;
57 	unsigned int write_index;       /* cached copy */
58 	/*
59 	 * For src ring, this is the next index not yet processed by HW.
60 	 * This is a cached copy of the real HW index (read index), used
61 	 * for avoiding reading the HW index register more often than
62 	 * necessary.
63 	 * This extends the invariant:
64 	 *     write index >= read index >= hw_index >= sw_index
65 	 *
66 	 * For dest ring, this is currently unused.
67 	 */
68 	unsigned int hw_index;  /* cached copy */
69 
70 	/* Start of DMA-coherent area reserved for descriptors */
71 	void *base_addr_owner_space_unaligned;  /* Host address space */
72 	qdf_dma_addr_t base_addr_CE_space_unaligned; /* CE address space */
73 
74 	/*
75 	 * Actual start of descriptors.
76 	 * Aligned to descriptor-size boundary.
77 	 * Points into reserved DMA-coherent area, above.
78 	 */
79 	void *base_addr_owner_space;    /* Host address space */
80 	qdf_dma_addr_t base_addr_CE_space;   /* CE address space */
81 	/*
82 	 * Start of shadow copy of descriptors, within regular memory.
83 	 * Aligned to descriptor-size boundary.
84 	 */
85 	char *shadow_base_unaligned;
86 	struct CE_src_desc *shadow_base;
87 
88 	unsigned int low_water_mark_nentries;
89 	unsigned int high_water_mark_nentries;
90 	void *srng_ctx;
91 	void **per_transfer_context;
92 	OS_DMA_MEM_CONTEXT(ce_dmacontext); /* OS Specific DMA context */
93 };
94 
95 /* Copy Engine internal state */
96 struct CE_state {
97 	struct hif_softc *scn;
98 	unsigned int id;
99 	unsigned int attr_flags;  /* CE_ATTR_* */
100 	uint32_t ctrl_addr;       /* relative to BAR */
101 	enum CE_op_state state;
102 
103 #ifdef WLAN_FEATURE_FASTPATH
104 	fastpath_msg_handler fastpath_handler;
105 	void *context;
106 #endif /* WLAN_FEATURE_FASTPATH */
107 	qdf_work_t oom_allocation_work;
108 
109 	ce_send_cb send_cb;
110 	void *send_context;
111 
112 	CE_recv_cb recv_cb;
113 	void *recv_context;
114 
115 	/* misc_cbs - are any callbacks besides send and recv enabled? */
116 	uint8_t misc_cbs;
117 
118 	CE_watermark_cb watermark_cb;
119 	void *wm_context;
120 
121 	/*Record the state of the copy compl interrupt */
122 	int disable_copy_compl_intr;
123 
124 	unsigned int src_sz_max;
125 	struct CE_ring_state *src_ring;
126 	struct CE_ring_state *dest_ring;
127 	struct CE_ring_state *status_ring;
128 	atomic_t rx_pending;
129 
130 	qdf_spinlock_t ce_index_lock;
131 	/* Flag to indicate whether to break out the DPC context */
132 	bool force_break;
133 
134 	/* time in nanoseconds to yield control of napi poll */
135 	unsigned long long ce_service_yield_time;
136 	/* CE service start time in nanoseconds */
137 	unsigned long long ce_service_start_time;
138 	/* Num Of Receive Buffers handled for one interrupt DPC routine */
139 	unsigned int receive_count;
140 	/* epping */
141 	bool timer_inited;
142 	qdf_timer_t poll_timer;
143 
144 	/* datapath - for faster access, use bools instead of a bitmap */
145 	bool htt_tx_data;
146 	bool htt_rx_data;
147 	qdf_lro_ctx_t lro_data;
148 
149 	void (*service)(struct hif_softc *scn, int CE_id);
150 };
151 
152 /* Descriptor rings must be aligned to this boundary */
153 #define CE_DESC_RING_ALIGN 8
154 #define CLOCK_OVERRIDE 0x2
155 
156 #ifdef QCA_WIFI_3_0
157 #define HIF_CE_DESC_ADDR_TO_DMA(desc) \
158 	(qdf_dma_addr_t)(((uint64_t)(desc)->buffer_addr + \
159 	((uint64_t)((desc)->buffer_addr_hi & 0x1F) << 32)))
160 #else
161 #define HIF_CE_DESC_ADDR_TO_DMA(desc) \
162 	(qdf_dma_addr_t)((desc)->buffer_addr)
163 #endif
164 
165 #ifdef QCA_WIFI_3_0
166 struct CE_src_desc {
167 	uint32_t buffer_addr:32;
168 #if _BYTE_ORDER == _BIG_ENDIAN
169 	uint32_t gather:1,
170 		enable_11h:1,
171 		meta_data_low:2, /* fw_metadata_low */
172 		packet_result_offset:12,
173 		toeplitz_hash_enable:1,
174 		addr_y_search_disable:1,
175 		addr_x_search_disable:1,
176 		misc_int_disable:1,
177 		target_int_disable:1,
178 		host_int_disable:1,
179 		dest_byte_swap:1,
180 		byte_swap:1,
181 		type:2,
182 		tx_classify:1,
183 		buffer_addr_hi:5;
184 		uint32_t meta_data:16, /* fw_metadata_high */
185 		nbytes:16;       /* length in register map */
186 #else
187 	uint32_t buffer_addr_hi:5,
188 		tx_classify:1,
189 		type:2,
190 		byte_swap:1,          /* src_byte_swap */
191 		dest_byte_swap:1,
192 		host_int_disable:1,
193 		target_int_disable:1,
194 		misc_int_disable:1,
195 		addr_x_search_disable:1,
196 		addr_y_search_disable:1,
197 		toeplitz_hash_enable:1,
198 		packet_result_offset:12,
199 		meta_data_low:2, /* fw_metadata_low */
200 		enable_11h:1,
201 		gather:1;
202 		uint32_t nbytes:16, /* length in register map */
203 		meta_data:16; /* fw_metadata_high */
204 #endif
205 	uint32_t toeplitz_hash_result:32;
206 };
207 
208 struct CE_dest_desc {
209 	uint32_t buffer_addr:32;
210 #if _BYTE_ORDER == _BIG_ENDIAN
211 	uint32_t gather:1,
212 		enable_11h:1,
213 		meta_data_low:2, /* fw_metadata_low */
214 		packet_result_offset:12,
215 		toeplitz_hash_enable:1,
216 		addr_y_search_disable:1,
217 		addr_x_search_disable:1,
218 		misc_int_disable:1,
219 		target_int_disable:1,
220 		host_int_disable:1,
221 		byte_swap:1,
222 		src_byte_swap:1,
223 		type:2,
224 		tx_classify:1,
225 		buffer_addr_hi:5;
226 		uint32_t meta_data:16, /* fw_metadata_high */
227 		nbytes:16;          /* length in register map */
228 #else
229 	uint32_t buffer_addr_hi:5,
230 		tx_classify:1,
231 		type:2,
232 		src_byte_swap:1,
233 		byte_swap:1,         /* dest_byte_swap */
234 		host_int_disable:1,
235 		target_int_disable:1,
236 		misc_int_disable:1,
237 		addr_x_search_disable:1,
238 		addr_y_search_disable:1,
239 		toeplitz_hash_enable:1,
240 		packet_result_offset:12,
241 		meta_data_low:2, /* fw_metadata_low */
242 		enable_11h:1,
243 		gather:1;
244 		uint32_t nbytes:16, /* length in register map */
245 		meta_data:16;    /* fw_metadata_high */
246 #endif
247 	uint32_t toeplitz_hash_result:32;
248 };
249 #else
250 struct CE_src_desc {
251 	uint32_t buffer_addr;
252 #if _BYTE_ORDER == _BIG_ENDIAN
253 	uint32_t  meta_data:12,
254 		  target_int_disable:1,
255 		  host_int_disable:1,
256 		  byte_swap:1,
257 		  gather:1,
258 		  nbytes:16;
259 #else
260 
261 	uint32_t nbytes:16,
262 		 gather:1,
263 		 byte_swap:1,
264 		 host_int_disable:1,
265 		 target_int_disable:1,
266 		 meta_data:12;
267 #endif
268 };
269 
270 struct CE_dest_desc {
271 	uint32_t buffer_addr;
272 #if _BYTE_ORDER == _BIG_ENDIAN
273 	uint32_t  meta_data:12,
274 		  target_int_disable:1,
275 		  host_int_disable:1,
276 		  byte_swap:1,
277 		  gather:1,
278 		  nbytes:16;
279 #else
280 	uint32_t nbytes:16,
281 		 gather:1,
282 		 byte_swap:1,
283 		 host_int_disable:1,
284 		 target_int_disable:1,
285 		 meta_data:12;
286 #endif
287 };
288 #endif /* QCA_WIFI_3_0 */
289 
290 struct ce_srng_src_desc {
291 	uint32_t buffer_addr_lo;
292 #if _BYTE_ORDER == _BIG_ENDIAN
293 	uint32_t nbytes:16,
294 		 rsvd:4,
295 		 gather:1,
296 		 dest_swap:1,
297 		 byte_swap:1,
298 		 toeplitz_hash_enable:1,
299 		 buffer_addr_hi:8;
300 	uint32_t rsvd1:16,
301 		 meta_data:16;
302 	uint32_t loop_count:4,
303 		 ring_id:8,
304 		 rsvd3:20;
305 #else
306 	uint32_t buffer_addr_hi:8,
307 		 toeplitz_hash_enable:1,
308 		 byte_swap:1,
309 		 dest_swap:1,
310 		 gather:1,
311 		 rsvd:4,
312 		 nbytes:16;
313 	uint32_t meta_data:16,
314 		 rsvd1:16;
315 	uint32_t rsvd3:20,
316 		 ring_id:8,
317 		 loop_count:4;
318 #endif
319 };
320 struct ce_srng_dest_desc {
321 	uint32_t buffer_addr_lo;
322 #if _BYTE_ORDER == _BIG_ENDIAN
323 	uint32_t loop_count:4,
324 		 ring_id:8,
325 		 rsvd1:12,
326 		 buffer_addr_hi:8;
327 #else
328 	uint32_t buffer_addr_hi:8,
329 		 rsvd1:12,
330 		 ring_id:8,
331 		 loop_count:4;
332 #endif
333 };
334 struct ce_srng_dest_status_desc {
335 #if _BYTE_ORDER == _BIG_ENDIAN
336 	uint32_t nbytes:16,
337 		 rsvd:4,
338 		 gather:1,
339 		 dest_swap:1,
340 		 byte_swap:1,
341 		 toeplitz_hash_enable:1,
342 		 rsvd0:8;
343 	uint32_t rsvd1:16,
344 		 meta_data:16;
345 #else
346 	uint32_t rsvd0:8,
347 		 toeplitz_hash_enable:1,
348 		 byte_swap:1,
349 		 dest_swap:1,
350 		 gather:1,
351 		 rsvd:4,
352 		 nbytes:16;
353 	uint32_t meta_data:16,
354 		 rsvd1:16;
355 #endif
356 	uint32_t toeplitz_hash;
357 #if _BYTE_ORDER == _BIG_ENDIAN
358 	uint32_t loop_count:4,
359 		 ring_id:8,
360 		 rsvd3:20;
361 #else
362 	uint32_t rsvd3:20,
363 		 ring_id:8,
364 		 loop_count:4;
365 #endif
366 };
367 
368 #define CE_SENDLIST_ITEMS_MAX 12
369 
370 /**
371  * union ce_desc - unified data type for ce descriptors
372  *
373  * Both src and destination descriptors follow the same format.
374  * They use different data structures for different access symantics.
375  * Here we provice a unifying data type.
376  */
377 union ce_desc {
378 	struct CE_src_desc src_desc;
379 	struct CE_dest_desc dest_desc;
380 };
381 
382 /**
383  * union ce_srng_desc - unified data type for ce srng descriptors
384  * @src_desc: ce srng Source ring descriptor
385  * @dest_desc: ce srng destination ring descriptor
386  * @dest_status_desc: ce srng status ring descriptor
387  */
388 union ce_srng_desc {
389 	struct ce_srng_src_desc src_desc;
390 	struct ce_srng_dest_desc dest_desc;
391 	struct ce_srng_dest_status_desc dest_status_desc;
392 };
393 
394 /**
395  * enum hif_ce_event_type - HIF copy engine event type
396  * @HIF_RX_DESC_POST: event recorded before updating write index of RX ring.
397  * @HIF_RX_DESC_COMPLETION: event recorded before updating sw index of RX ring.
398  * @HIF_TX_GATHER_DESC_POST: post gather desc. (no write index update)
399  * @HIF_TX_DESC_POST: event recorded before updating write index of TX ring.
400  * @HIF_TX_DESC_SOFTWARE_POST: event recorded when dropping a write to the write
401  *	index in a normal tx
402  * @HIF_TX_DESC_COMPLETION: event recorded before updating sw index of TX ring.
403  * @FAST_RX_WRITE_INDEX_UPDATE: event recorded before updating the write index
404  *	of the RX ring in fastpath
405  * @FAST_RX_SOFTWARE_INDEX_UPDATE: event recorded before updating the software
406  *	index of the RX ring in fastpath
407  * @FAST_TX_WRITE_INDEX_UPDATE: event recorded before updating the write index
408  *	of the TX ring in fastpath
409  * @FAST_TX_WRITE_INDEX_SOFTWARE_UPDATE: recored when dropping a write to
410  *	the write index in fastpath
411  * @FAST_TX_SOFTWARE_INDEX_UPDATE: event recorded before updating the software
412  *	index of the RX ring in fastpath
413  * @HIF_IRQ_EVENT: event recorded in the irq before scheduling the bh
414  * @HIF_CE_TASKLET_ENTRY: records the start of the ce_tasklet
415  * @HIF_CE_TASKLET_RESCHEDULE: records the rescheduling of the wlan_tasklet
416  * @HIF_CE_TASKLET_EXIT: records the exit of the wlan tasklet without reschedule
417  * @HIF_CE_REAP_ENTRY: records when we process completion outside of a bh
418  * @HIF_CE_REAP_EXIT:  records when we process completion outside of a bh
419  * @NAPI_SCHEDULE: records when napi is scheduled from the irq context
420  * @NAPI_POLL_ENTER: records the start of the napi poll function
421  * @NAPI_COMPLETE: records when interrupts are reenabled
422  * @NAPI_POLL_EXIT: records when the napi poll function returns
423  * @HIF_RX_NBUF_ALLOC_FAILURE: record the packet when nbuf fails to allocate
424  * @HIF_RX_NBUF_MAP_FAILURE: record the packet when dma map fails
425  * @HIF_RX_NBUF_ENQUEUE_FAILURE: record the packet when enqueue to ce fails
426  * @HIF_CE_SRC_RING_BUFFER_POST: record the packet when buffer is posted to ce src ring
427  * @HIF_CE_DEST_RING_BUFFER_POST: record the packet when buffer is posted to ce dst ring
428  * @HIF_CE_DEST_RING_BUFFER_REAP: record the packet when buffer is reaped from ce dst ring
429  * @HIF_CE_DEST_STATUS_RING_REAP: record the packet when status ring is reaped
430  * @HIF_RX_DESC_PRE_NBUF_ALLOC: record the packet before nbuf allocation
431  * @HIF_RX_DESC_PRE_NBUF_MAP: record the packet before nbuf map
432  * @HIF_RX_DESC_POST_NBUF_MAP: record the packet after nbuf map
433  */
434 enum hif_ce_event_type {
435 	HIF_RX_DESC_POST,
436 	HIF_RX_DESC_COMPLETION,
437 	HIF_TX_GATHER_DESC_POST,
438 	HIF_TX_DESC_POST,
439 	HIF_TX_DESC_SOFTWARE_POST,
440 	HIF_TX_DESC_COMPLETION,
441 	FAST_RX_WRITE_INDEX_UPDATE,
442 	FAST_RX_SOFTWARE_INDEX_UPDATE,
443 	FAST_TX_WRITE_INDEX_UPDATE,
444 	FAST_TX_WRITE_INDEX_SOFTWARE_UPDATE,
445 	FAST_TX_SOFTWARE_INDEX_UPDATE,
446 	RESUME_WRITE_INDEX_UPDATE,
447 
448 	HIF_IRQ_EVENT = 0x10,
449 	HIF_CE_TASKLET_ENTRY,
450 	HIF_CE_TASKLET_RESCHEDULE,
451 	HIF_CE_TASKLET_EXIT,
452 	HIF_CE_REAP_ENTRY,
453 	HIF_CE_REAP_EXIT,
454 	NAPI_SCHEDULE,
455 	NAPI_POLL_ENTER,
456 	NAPI_COMPLETE,
457 	NAPI_POLL_EXIT,
458 
459 	HIF_RX_NBUF_ALLOC_FAILURE = 0x20,
460 	HIF_RX_NBUF_MAP_FAILURE,
461 	HIF_RX_NBUF_ENQUEUE_FAILURE,
462 
463 	HIF_CE_SRC_RING_BUFFER_POST,
464 	HIF_CE_DEST_RING_BUFFER_POST,
465 	HIF_CE_DEST_RING_BUFFER_REAP,
466 	HIF_CE_DEST_STATUS_RING_REAP,
467 
468 	HIF_RX_DESC_PRE_NBUF_ALLOC,
469 	HIF_RX_DESC_PRE_NBUF_MAP,
470 	HIF_RX_DESC_POST_NBUF_MAP,
471 
472 	HIF_EVENT_TYPE_MAX,
473 };
474 
475 void ce_init_ce_desc_event_log(struct hif_softc *scn, int ce_id, int size);
476 void ce_deinit_ce_desc_event_log(struct hif_softc *scn, int ce_id);
477 void hif_record_ce_desc_event(struct hif_softc *scn, int ce_id,
478 			      enum hif_ce_event_type type,
479 			      union ce_desc *descriptor, void *memory,
480 			      int index, int len);
481 
482 enum ce_sendlist_type_e {
483 	CE_SIMPLE_BUFFER_TYPE,
484 	/* TBDXXX: CE_RX_DESC_LIST, */
485 };
486 
487 /*
488  * There's a public "ce_sendlist" and a private "ce_sendlist_s".
489  * The former is an opaque structure with sufficient space
490  * to hold the latter.  The latter is the actual structure
491  * definition and it is only used internally.  The opaque version
492  * of the structure allows callers to allocate an instance on the
493  * run-time stack without knowing any of the details of the
494  * structure layout.
495  */
496 struct ce_sendlist_s {
497 	unsigned int num_items;
498 	struct ce_sendlist_item {
499 		enum ce_sendlist_type_e send_type;
500 		dma_addr_t data;        /* e.g. buffer or desc list */
501 		union {
502 			unsigned int nbytes;    /* simple buffer */
503 			unsigned int ndesc;     /* Rx descriptor list */
504 		} u;
505 		/* flags: externally-specified flags;
506 		 * OR-ed with internal flags
507 		 */
508 		uint32_t flags;
509 		uint32_t user_flags;
510 	} item[CE_SENDLIST_ITEMS_MAX];
511 };
512 
513 bool hif_ce_service_should_yield(struct hif_softc *scn, struct CE_state
514 				 *ce_state);
515 
516 #ifdef WLAN_FEATURE_FASTPATH
517 void ce_h2t_tx_ce_cleanup(struct CE_handle *ce_hdl);
518 void ce_t2h_msg_ce_cleanup(struct CE_handle *ce_hdl);
519 #else
520 static inline void ce_h2t_tx_ce_cleanup(struct CE_handle *ce_hdl)
521 {
522 }
523 
524 static inline void ce_t2h_msg_ce_cleanup(struct CE_handle *ce_hdl)
525 {
526 }
527 #endif
528 
529 /* which ring of a CE? */
530 #define CE_RING_SRC  0
531 #define CE_RING_DEST 1
532 #define CE_RING_STATUS 2
533 
534 #define CDC_WAR_MAGIC_STR   0xceef0000
535 #define CDC_WAR_DATA_CE     4
536 
537 /* Additional internal-only ce_send flags */
538 #define CE_SEND_FLAG_GATHER             0x00010000      /* Use Gather */
539 
540 /**
541  * hif_get_wake_ce_id() - gets the copy engine id used for waking up
542  * @scn: The hif context to use
543  * @ce_id: a pointer where the copy engine Id should be populated
544  *
545  * Return: errno
546  */
547 int hif_get_wake_ce_id(struct hif_softc *scn, uint8_t *ce_id);
548 
549 #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF)
550 
551 #ifndef HIF_CE_HISTORY_MAX
552 #define HIF_CE_HISTORY_MAX 1024
553 #endif
554 
555 #define CE_DEBUG_MAX_DATA_BUF_SIZE 64
556 
557 /**
558  * struct hif_ce_desc_event - structure for detailing a ce event
559  * @index: location of the descriptor in the ce ring;
560  * @type: what the event was
561  * @time: when it happened
562  * @current_hp: holds the current ring hp value
563  * @current_tp: holds the current ring tp value
564  * @descriptor: descriptor enqueued or dequeued
565  * @memory: virtual address that was used
566  * @dma_addr: physical/iova address based on smmu status
567  * @dma_to_phy: physical address from iova address
568  * @virt_to_phy: physical address from virtual address
569  * @actual_data_len: length of the data
570  * @data: data pointed by descriptor
571  */
572 struct hif_ce_desc_event {
573 	int index;
574 	enum hif_ce_event_type type;
575 	uint64_t time;
576 	int cpu_id;
577 #ifdef HELIUMPLUS
578 	union ce_desc descriptor;
579 #else
580 	uint32_t current_hp;
581 	uint32_t current_tp;
582 	union ce_srng_desc descriptor;
583 #endif
584 	void *memory;
585 
586 #ifdef HIF_RECORD_PADDR
587 	/* iova/pa based on smmu status */
588 	qdf_dma_addr_t dma_addr;
589 	/* store pa from iova address */
590 	qdf_dma_addr_t dma_to_phy;
591 	/* store pa */
592 	qdf_dma_addr_t virt_to_phy;
593 #endif /* HIF_RECORD_ADDR */
594 
595 #ifdef HIF_CE_DEBUG_DATA_BUF
596 	size_t actual_data_len;
597 	uint8_t *data;
598 #endif /* HIF_CE_DEBUG_DATA_BUF */
599 };
600 #else
601 struct hif_ce_desc_event;
602 #endif /*#if defined(HIF_CONFIG_SLUB_DEBUG_ON)||defined(HIF_CE_DEBUG_DATA_BUF)*/
603 
604 /**
605  * get_next_record_index() - get the next record index
606  * @table_index: atomic index variable to increment
607  * @array_size: array size of the circular buffer
608  *
609  * Increment the atomic index and reserve the value.
610  * Takes care of buffer wrap.
611  * Guaranteed to be thread safe as long as fewer than array_size contexts
612  * try to access the array.  If there are more than array_size contexts
613  * trying to access the array, full locking of the recording process would
614  * be needed to have sane logging.
615  */
616 int get_next_record_index(qdf_atomic_t *table_index, int array_size);
617 
618 #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF)
619 /**
620  * hif_record_ce_srng_desc_event() - Record data pointed by the CE descriptor
621  * @scn: structure detailing a ce event
622  * @ce_id: length of the data
623  * @type: event_type
624  * @descriptor: ce src/dest/status ring descriptor
625  * @memory: nbuf
626  * @index: current sw/write index
627  * @len: len of the buffer
628  * @hal_ring: ce hw ring
629  *
630  * Return: None
631  */
632 void hif_record_ce_srng_desc_event(struct hif_softc *scn, int ce_id,
633 				   enum hif_ce_event_type type,
634 				   union ce_srng_desc *descriptor,
635 				   void *memory, int index,
636 				   int len, void *hal_ring);
637 
638 /**
639  * hif_clear_ce_desc_debug_data() - Clear the contents of hif_ce_desc_event
640  * upto data field before reusing it.
641  *
642  * @event: record every CE event
643  *
644  * Return: None
645  */
646 void hif_clear_ce_desc_debug_data(struct hif_ce_desc_event *event);
647 #else
648 static inline
649 void hif_record_ce_srng_desc_event(struct hif_softc *scn, int ce_id,
650 				   enum hif_ce_event_type type,
651 				   union ce_srng_desc *descriptor,
652 				   void *memory, int index,
653 				   int len, void *hal_ring)
654 {
655 }
656 
657 static inline
658 void hif_clear_ce_desc_debug_data(struct hif_ce_desc_event *event)
659 {
660 }
661 #endif /* HIF_CONFIG_SLUB_DEBUG_ON || HIF_CE_DEBUG_DATA_BUF */
662 
663 #ifdef HIF_CE_DEBUG_DATA_BUF
664 /**
665  * hif_ce_desc_data_record() - Record data pointed by the CE descriptor
666  * @event: structure detailing a ce event
667  * @len: length of the data
668  * Return:
669  */
670 void hif_ce_desc_data_record(struct hif_ce_desc_event *event, int len);
671 
672 QDF_STATUS alloc_mem_ce_debug_hist_data(struct hif_softc *scn, uint32_t ce_id);
673 void free_mem_ce_debug_hist_data(struct hif_softc *scn, uint32_t ce_id);
674 #else
675 static inline
676 QDF_STATUS alloc_mem_ce_debug_hist_data(struct hif_softc *scn, uint32_t ce_id)
677 {
678 	return QDF_STATUS_SUCCESS;
679 }
680 
681 static inline
682 void free_mem_ce_debug_hist_data(struct hif_softc *scn, uint32_t ce_id) { }
683 
684 static inline
685 void hif_ce_desc_data_record(struct hif_ce_desc_event *event, int len)
686 {
687 }
688 #endif /*HIF_CE_DEBUG_DATA_BUF*/
689 
690 #ifdef HIF_CONFIG_SLUB_DEBUG_ON
691 /**
692  * ce_validate_nbytes() - validate nbytes for slub builds on tx descriptors
693  * @nbytes: nbytes value being written into a send descriptor
694  * @ce_state: context of the copy engine
695 
696  * nbytes should be non-zero and less than max configured for the copy engine
697  *
698  * Return: none
699  */
700 static inline void ce_validate_nbytes(uint32_t nbytes,
701 				      struct CE_state *ce_state)
702 {
703 	if (nbytes <= 0 || nbytes > ce_state->src_sz_max)
704 		QDF_BUG(0);
705 }
706 #else
707 static inline void ce_validate_nbytes(uint32_t nbytes,
708 				      struct CE_state *ce_state)
709 {
710 }
711 #endif /* HIF_CONFIG_SLUB_DEBUG_ON */
712 
713 #if defined(HIF_RECORD_PADDR)
714 /**
715  * hif_ce_desc_record_rx_paddr() - record physical address for IOMMU
716  * IOVA addr and MMU virtual addr for Rx
717  * @scn: hif_softc
718  * @nbuf: buffer posted to fw
719  *
720  * record physical address for ce_event_type HIF_RX_DESC_POST and
721  * HIF_RX_DESC_COMPLETION
722  *
723  * Return: none
724  */
725 void hif_ce_desc_record_rx_paddr(struct hif_softc *scn,
726 				 struct hif_ce_desc_event *event,
727 				 qdf_nbuf_t nbuf);
728 #else
729 static inline
730 void hif_ce_desc_record_rx_paddr(struct hif_softc *scn,
731 				 struct hif_ce_desc_event *event,
732 				 qdf_nbuf_t nbuf)
733 {
734 }
735 #endif /* HIF_RECORD_PADDR */
736 #endif /* __COPY_ENGINE_INTERNAL_H__ */
737