xref: /wlan-dirver/qca-wifi-host-cmn/hif/src/ce/ce_internal.h (revision 11f5a63a6cbdda84849a730de22f0a71e635d58c)
1 /*
2  * Copyright (c) 2013-2019 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #ifndef __COPY_ENGINE_INTERNAL_H__
20 #define __COPY_ENGINE_INTERNAL_H__
21 
22 #include <hif.h>                /* A_TARGET_WRITE */
23 
24 /* Copy Engine operational state */
25 enum CE_op_state {
26 	CE_UNUSED,
27 	CE_PAUSED,
28 	CE_RUNNING,
29 	CE_PENDING,
30 };
31 
32 enum ol_ath_hif_ce_ecodes {
33 	CE_RING_DELTA_FAIL = 0
34 };
35 
36 struct CE_src_desc;
37 
38 /* Copy Engine Ring internal state */
39 struct CE_ring_state {
40 
41 	/* Number of entries in this ring; must be power of 2 */
42 	unsigned int nentries;
43 	unsigned int nentries_mask;
44 
45 	/*
46 	 * For dest ring, this is the next index to be processed
47 	 * by software after it was/is received into.
48 	 *
49 	 * For src ring, this is the last descriptor that was sent
50 	 * and completion processed by software.
51 	 *
52 	 * Regardless of src or dest ring, this is an invariant
53 	 * (modulo ring size):
54 	 *     write index >= read index >= sw_index
55 	 */
56 	unsigned int sw_index;
57 	unsigned int write_index;       /* cached copy */
58 	/*
59 	 * For src ring, this is the next index not yet processed by HW.
60 	 * This is a cached copy of the real HW index (read index), used
61 	 * for avoiding reading the HW index register more often than
62 	 * necessary.
63 	 * This extends the invariant:
64 	 *     write index >= read index >= hw_index >= sw_index
65 	 *
66 	 * For dest ring, this is currently unused.
67 	 */
68 	unsigned int hw_index;  /* cached copy */
69 
70 	/* Start of DMA-coherent area reserved for descriptors */
71 	void *base_addr_owner_space_unaligned;  /* Host address space */
72 	qdf_dma_addr_t base_addr_CE_space_unaligned; /* CE address space */
73 
74 	/*
75 	 * Actual start of descriptors.
76 	 * Aligned to descriptor-size boundary.
77 	 * Points into reserved DMA-coherent area, above.
78 	 */
79 	void *base_addr_owner_space;    /* Host address space */
80 	qdf_dma_addr_t base_addr_CE_space;   /* CE address space */
81 	/*
82 	 * Start of shadow copy of descriptors, within regular memory.
83 	 * Aligned to descriptor-size boundary.
84 	 */
85 	char *shadow_base_unaligned;
86 	struct CE_src_desc *shadow_base;
87 
88 	unsigned int low_water_mark_nentries;
89 	unsigned int high_water_mark_nentries;
90 	void *srng_ctx;
91 	void **per_transfer_context;
92 	OS_DMA_MEM_CONTEXT(ce_dmacontext); /* OS Specific DMA context */
93 };
94 
95 /* Copy Engine internal state */
96 struct CE_state {
97 	struct hif_softc *scn;
98 	unsigned int id;
99 	unsigned int attr_flags;  /* CE_ATTR_* */
100 	uint32_t ctrl_addr;       /* relative to BAR */
101 	enum CE_op_state state;
102 
103 #ifdef WLAN_FEATURE_FASTPATH
104 	fastpath_msg_handler fastpath_handler;
105 	void *context;
106 #endif /* WLAN_FEATURE_FASTPATH */
107 	qdf_work_t oom_allocation_work;
108 
109 	ce_send_cb send_cb;
110 	void *send_context;
111 
112 	CE_recv_cb recv_cb;
113 	void *recv_context;
114 
115 	/* misc_cbs - are any callbacks besides send and recv enabled? */
116 	uint8_t misc_cbs;
117 
118 	CE_watermark_cb watermark_cb;
119 	void *wm_context;
120 
121 	/*Record the state of the copy compl interrupt */
122 	int disable_copy_compl_intr;
123 
124 	unsigned int src_sz_max;
125 	struct CE_ring_state *src_ring;
126 	struct CE_ring_state *dest_ring;
127 	struct CE_ring_state *status_ring;
128 	atomic_t rx_pending;
129 
130 	qdf_spinlock_t ce_index_lock;
131 	/* Flag to indicate whether to break out the DPC context */
132 	bool force_break;
133 
134 	/* time in nanoseconds to yield control of napi poll */
135 	unsigned long long ce_service_yield_time;
136 	/* CE service start time in nanoseconds */
137 	unsigned long long ce_service_start_time;
138 	/* Num Of Receive Buffers handled for one interrupt DPC routine */
139 	unsigned int receive_count;
140 	/* epping */
141 	bool timer_inited;
142 	qdf_timer_t poll_timer;
143 
144 	/* datapath - for faster access, use bools instead of a bitmap */
145 	bool htt_tx_data;
146 	bool htt_rx_data;
147 	qdf_lro_ctx_t lro_data;
148 
149 	void (*service)(struct hif_softc *scn, int CE_id);
150 };
151 
152 /* Descriptor rings must be aligned to this boundary */
153 #define CE_DESC_RING_ALIGN 8
154 #define CLOCK_OVERRIDE 0x2
155 
156 #ifdef QCA_WIFI_3_0
157 #define HIF_CE_DESC_ADDR_TO_DMA(desc) \
158 	(qdf_dma_addr_t)(((uint64_t)(desc)->buffer_addr + \
159 	((uint64_t)((desc)->buffer_addr_hi & 0x1F) << 32)))
160 #else
161 #define HIF_CE_DESC_ADDR_TO_DMA(desc) \
162 	(qdf_dma_addr_t)((desc)->buffer_addr)
163 #endif
164 
165 #ifdef QCA_WIFI_3_0
166 struct CE_src_desc {
167 	uint32_t buffer_addr:32;
168 #if _BYTE_ORDER == _BIG_ENDIAN
169 	uint32_t gather:1,
170 		enable_11h:1,
171 		meta_data_low:2, /* fw_metadata_low */
172 		packet_result_offset:12,
173 		toeplitz_hash_enable:1,
174 		addr_y_search_disable:1,
175 		addr_x_search_disable:1,
176 		misc_int_disable:1,
177 		target_int_disable:1,
178 		host_int_disable:1,
179 		dest_byte_swap:1,
180 		byte_swap:1,
181 		type:2,
182 		tx_classify:1,
183 		buffer_addr_hi:5;
184 		uint32_t meta_data:16, /* fw_metadata_high */
185 		nbytes:16;       /* length in register map */
186 #else
187 	uint32_t buffer_addr_hi:5,
188 		tx_classify:1,
189 		type:2,
190 		byte_swap:1,          /* src_byte_swap */
191 		dest_byte_swap:1,
192 		host_int_disable:1,
193 		target_int_disable:1,
194 		misc_int_disable:1,
195 		addr_x_search_disable:1,
196 		addr_y_search_disable:1,
197 		toeplitz_hash_enable:1,
198 		packet_result_offset:12,
199 		meta_data_low:2, /* fw_metadata_low */
200 		enable_11h:1,
201 		gather:1;
202 		uint32_t nbytes:16, /* length in register map */
203 		meta_data:16; /* fw_metadata_high */
204 #endif
205 	uint32_t toeplitz_hash_result:32;
206 };
207 
208 struct CE_dest_desc {
209 	uint32_t buffer_addr:32;
210 #if _BYTE_ORDER == _BIG_ENDIAN
211 	uint32_t gather:1,
212 		enable_11h:1,
213 		meta_data_low:2, /* fw_metadata_low */
214 		packet_result_offset:12,
215 		toeplitz_hash_enable:1,
216 		addr_y_search_disable:1,
217 		addr_x_search_disable:1,
218 		misc_int_disable:1,
219 		target_int_disable:1,
220 		host_int_disable:1,
221 		byte_swap:1,
222 		src_byte_swap:1,
223 		type:2,
224 		tx_classify:1,
225 		buffer_addr_hi:5;
226 		uint32_t meta_data:16, /* fw_metadata_high */
227 		nbytes:16;          /* length in register map */
228 #else
229 	uint32_t buffer_addr_hi:5,
230 		tx_classify:1,
231 		type:2,
232 		src_byte_swap:1,
233 		byte_swap:1,         /* dest_byte_swap */
234 		host_int_disable:1,
235 		target_int_disable:1,
236 		misc_int_disable:1,
237 		addr_x_search_disable:1,
238 		addr_y_search_disable:1,
239 		toeplitz_hash_enable:1,
240 		packet_result_offset:12,
241 		meta_data_low:2, /* fw_metadata_low */
242 		enable_11h:1,
243 		gather:1;
244 		uint32_t nbytes:16, /* length in register map */
245 		meta_data:16;    /* fw_metadata_high */
246 #endif
247 	uint32_t toeplitz_hash_result:32;
248 };
249 #else
250 struct CE_src_desc {
251 	uint32_t buffer_addr;
252 #if _BYTE_ORDER == _BIG_ENDIAN
253 	uint32_t  meta_data:12,
254 		  target_int_disable:1,
255 		  host_int_disable:1,
256 		  byte_swap:1,
257 		  gather:1,
258 		  nbytes:16;
259 #else
260 
261 	uint32_t nbytes:16,
262 		 gather:1,
263 		 byte_swap:1,
264 		 host_int_disable:1,
265 		 target_int_disable:1,
266 		 meta_data:12;
267 #endif
268 };
269 
270 struct CE_dest_desc {
271 	uint32_t buffer_addr;
272 #if _BYTE_ORDER == _BIG_ENDIAN
273 	uint32_t  meta_data:12,
274 		  target_int_disable:1,
275 		  host_int_disable:1,
276 		  byte_swap:1,
277 		  gather:1,
278 		  nbytes:16;
279 #else
280 	uint32_t nbytes:16,
281 		 gather:1,
282 		 byte_swap:1,
283 		 host_int_disable:1,
284 		 target_int_disable:1,
285 		 meta_data:12;
286 #endif
287 };
288 #endif /* QCA_WIFI_3_0 */
289 
290 struct ce_srng_src_desc {
291 	uint32_t buffer_addr_lo;
292 #if _BYTE_ORDER == _BIG_ENDIAN
293 	uint32_t nbytes:16,
294 		 rsvd:4,
295 		 gather:1,
296 		 dest_swap:1,
297 		 byte_swap:1,
298 		 toeplitz_hash_enable:1,
299 		 buffer_addr_hi:8;
300 	uint32_t rsvd1:16,
301 		 meta_data:16;
302 	uint32_t loop_count:4,
303 		 ring_id:8,
304 		 rsvd3:20;
305 #else
306 	uint32_t buffer_addr_hi:8,
307 		 toeplitz_hash_enable:1,
308 		 byte_swap:1,
309 		 dest_swap:1,
310 		 gather:1,
311 		 rsvd:4,
312 		 nbytes:16;
313 	uint32_t meta_data:16,
314 		 rsvd1:16;
315 	uint32_t rsvd3:20,
316 		 ring_id:8,
317 		 loop_count:4;
318 #endif
319 };
320 struct ce_srng_dest_desc {
321 	uint32_t buffer_addr_lo;
322 #if _BYTE_ORDER == _BIG_ENDIAN
323 	uint32_t loop_count:4,
324 		 ring_id:8,
325 		 rsvd1:12,
326 		 buffer_addr_hi:8;
327 #else
328 	uint32_t buffer_addr_hi:8,
329 		 rsvd1:12,
330 		 ring_id:8,
331 		 loop_count:4;
332 #endif
333 };
334 struct ce_srng_dest_status_desc {
335 #if _BYTE_ORDER == _BIG_ENDIAN
336 	uint32_t nbytes:16,
337 		 rsvd:4,
338 		 gather:1,
339 		 dest_swap:1,
340 		 byte_swap:1,
341 		 toeplitz_hash_enable:1,
342 		 rsvd0:8;
343 	uint32_t rsvd1:16,
344 		 meta_data:16;
345 #else
346 	uint32_t rsvd0:8,
347 		 toeplitz_hash_enable:1,
348 		 byte_swap:1,
349 		 dest_swap:1,
350 		 gather:1,
351 		 rsvd:4,
352 		 nbytes:16;
353 	uint32_t meta_data:16,
354 		 rsvd1:16;
355 #endif
356 	uint32_t toeplitz_hash;
357 #if _BYTE_ORDER == _BIG_ENDIAN
358 	uint32_t loop_count:4,
359 		 ring_id:8,
360 		 rsvd3:20;
361 #else
362 	uint32_t rsvd3:20,
363 		 ring_id:8,
364 		 loop_count:4;
365 #endif
366 };
367 
368 #define CE_SENDLIST_ITEMS_MAX 12
369 
370 /**
371  * union ce_desc - unified data type for ce descriptors
372  *
373  * Both src and destination descriptors follow the same format.
374  * They use different data structures for different access symantics.
375  * Here we provice a unifying data type.
376  */
377 union ce_desc {
378 	struct CE_src_desc src_desc;
379 	struct CE_dest_desc dest_desc;
380 };
381 
382 /**
383  * union ce_srng_desc - unified data type for ce srng descriptors
384  * @src_desc: ce srng Source ring descriptor
385  * @dest_desc: ce srng destination ring descriptor
386  * @dest_status_desc: ce srng status ring descriptor
387  */
388 union ce_srng_desc {
389 	struct ce_srng_src_desc src_desc;
390 	struct ce_srng_dest_desc dest_desc;
391 	struct ce_srng_dest_status_desc dest_status_desc;
392 };
393 
394 /**
395  * enum hif_ce_event_type - HIF copy engine event type
396  * @HIF_RX_DESC_POST: event recorded before updating write index of RX ring.
397  * @HIF_RX_DESC_COMPLETION: event recorded before updating sw index of RX ring.
398  * @HIF_TX_GATHER_DESC_POST: post gather desc. (no write index update)
399  * @HIF_TX_DESC_POST: event recorded before updating write index of TX ring.
400  * @HIF_TX_DESC_SOFTWARE_POST: event recorded when dropping a write to the write
401  *	index in a normal tx
402  * @HIF_TX_DESC_COMPLETION: event recorded before updating sw index of TX ring.
403  * @FAST_RX_WRITE_INDEX_UPDATE: event recorded before updating the write index
404  *	of the RX ring in fastpath
405  * @FAST_RX_SOFTWARE_INDEX_UPDATE: event recorded before updating the software
406  *	index of the RX ring in fastpath
407  * @FAST_TX_WRITE_INDEX_UPDATE: event recorded before updating the write index
408  *	of the TX ring in fastpath
409  * @FAST_TX_WRITE_INDEX_SOFTWARE_UPDATE: recored when dropping a write to
410  *	the write index in fastpath
411  * @FAST_TX_SOFTWARE_INDEX_UPDATE: event recorded before updating the software
412  *	index of the RX ring in fastpath
413  * @HIF_IRQ_EVENT: event recorded in the irq before scheduling the bh
414  * @HIF_CE_TASKLET_ENTRY: records the start of the ce_tasklet
415  * @HIF_CE_TASKLET_RESCHEDULE: records the rescheduling of the wlan_tasklet
416  * @HIF_CE_TASKLET_EXIT: records the exit of the wlan tasklet without reschedule
417  * @HIF_CE_REAP_ENTRY: records when we process completion outside of a bh
418  * @HIF_CE_REAP_EXIT:  records when we process completion outside of a bh
419  * @NAPI_SCHEDULE: records when napi is scheduled from the irq context
420  * @NAPI_POLL_ENTER: records the start of the napi poll function
421  * @NAPI_COMPLETE: records when interrupts are reenabled
422  * @NAPI_POLL_EXIT: records when the napi poll function returns
423  * @HIF_RX_NBUF_ALLOC_FAILURE: record the packet when nbuf fails to allocate
424  * @HIF_RX_NBUF_MAP_FAILURE: record the packet when dma map fails
425  * @HIF_RX_NBUF_ENQUEUE_FAILURE: record the packet when enqueue to ce fails
426  * @HIF_CE_SRC_RING_BUFFER_POST: record the packet when buffer is posted to ce src ring
427  * @HIF_CE_DEST_RING_BUFFER_POST: record the packet when buffer is posted to ce dst ring
428  * @HIF_CE_DEST_RING_BUFFER_REAP: record the packet when buffer is reaped from ce dst ring
429  * @HIF_CE_DEST_STATUS_RING_REAP: record the packet when status ring is reaped
430  * @HIF_RX_DESC_PRE_NBUF_ALLOC: record the packet before nbuf allocation
431  * @HIF_RX_DESC_PRE_NBUF_MAP: record the packet before nbuf map
432  * @HIF_RX_DESC_POST_NBUF_MAP: record the packet after nbuf map
433  */
434 enum hif_ce_event_type {
435 	HIF_RX_DESC_POST,
436 	HIF_RX_DESC_COMPLETION,
437 	HIF_TX_GATHER_DESC_POST,
438 	HIF_TX_DESC_POST,
439 	HIF_TX_DESC_SOFTWARE_POST,
440 	HIF_TX_DESC_COMPLETION,
441 	FAST_RX_WRITE_INDEX_UPDATE,
442 	FAST_RX_SOFTWARE_INDEX_UPDATE,
443 	FAST_TX_WRITE_INDEX_UPDATE,
444 	FAST_TX_WRITE_INDEX_SOFTWARE_UPDATE,
445 	FAST_TX_SOFTWARE_INDEX_UPDATE,
446 	RESUME_WRITE_INDEX_UPDATE,
447 
448 	HIF_IRQ_EVENT = 0x10,
449 	HIF_CE_TASKLET_ENTRY,
450 	HIF_CE_TASKLET_RESCHEDULE,
451 	HIF_CE_TASKLET_EXIT,
452 	HIF_CE_REAP_ENTRY,
453 	HIF_CE_REAP_EXIT,
454 	NAPI_SCHEDULE,
455 	NAPI_POLL_ENTER,
456 	NAPI_COMPLETE,
457 	NAPI_POLL_EXIT,
458 
459 	HIF_RX_NBUF_ALLOC_FAILURE = 0x20,
460 	HIF_RX_NBUF_MAP_FAILURE,
461 	HIF_RX_NBUF_ENQUEUE_FAILURE,
462 
463 	HIF_CE_SRC_RING_BUFFER_POST,
464 	HIF_CE_DEST_RING_BUFFER_POST,
465 	HIF_CE_DEST_RING_BUFFER_REAP,
466 	HIF_CE_DEST_STATUS_RING_REAP,
467 
468 	HIF_RX_DESC_PRE_NBUF_ALLOC,
469 	HIF_RX_DESC_PRE_NBUF_MAP,
470 	HIF_RX_DESC_POST_NBUF_MAP,
471 };
472 
473 void ce_init_ce_desc_event_log(struct hif_softc *scn, int ce_id, int size);
474 void ce_deinit_ce_desc_event_log(struct hif_softc *scn, int ce_id);
475 void hif_record_ce_desc_event(struct hif_softc *scn, int ce_id,
476 			      enum hif_ce_event_type type,
477 			      union ce_desc *descriptor, void *memory,
478 			      int index, int len);
479 
480 enum ce_sendlist_type_e {
481 	CE_SIMPLE_BUFFER_TYPE,
482 	/* TBDXXX: CE_RX_DESC_LIST, */
483 };
484 
485 /*
486  * There's a public "ce_sendlist" and a private "ce_sendlist_s".
487  * The former is an opaque structure with sufficient space
488  * to hold the latter.  The latter is the actual structure
489  * definition and it is only used internally.  The opaque version
490  * of the structure allows callers to allocate an instance on the
491  * run-time stack without knowing any of the details of the
492  * structure layout.
493  */
494 struct ce_sendlist_s {
495 	unsigned int num_items;
496 	struct ce_sendlist_item {
497 		enum ce_sendlist_type_e send_type;
498 		dma_addr_t data;        /* e.g. buffer or desc list */
499 		union {
500 			unsigned int nbytes;    /* simple buffer */
501 			unsigned int ndesc;     /* Rx descriptor list */
502 		} u;
503 		/* flags: externally-specified flags;
504 		 * OR-ed with internal flags
505 		 */
506 		uint32_t flags;
507 		uint32_t user_flags;
508 	} item[CE_SENDLIST_ITEMS_MAX];
509 };
510 
511 bool hif_ce_service_should_yield(struct hif_softc *scn, struct CE_state
512 				 *ce_state);
513 
514 #ifdef WLAN_FEATURE_FASTPATH
515 void ce_h2t_tx_ce_cleanup(struct CE_handle *ce_hdl);
516 void ce_t2h_msg_ce_cleanup(struct CE_handle *ce_hdl);
517 #else
518 static inline void ce_h2t_tx_ce_cleanup(struct CE_handle *ce_hdl)
519 {
520 }
521 
522 static inline void ce_t2h_msg_ce_cleanup(struct CE_handle *ce_hdl)
523 {
524 }
525 #endif
526 
527 /* which ring of a CE? */
528 #define CE_RING_SRC  0
529 #define CE_RING_DEST 1
530 #define CE_RING_STATUS 2
531 
532 #define CDC_WAR_MAGIC_STR   0xceef0000
533 #define CDC_WAR_DATA_CE     4
534 
535 /* Additional internal-only ce_send flags */
536 #define CE_SEND_FLAG_GATHER             0x00010000      /* Use Gather */
537 
538 /**
539  * hif_get_wake_ce_id() - gets the copy engine id used for waking up
540  * @scn: The hif context to use
541  * @ce_id: a pointer where the copy engine Id should be populated
542  *
543  * Return: errno
544  */
545 int hif_get_wake_ce_id(struct hif_softc *scn, uint8_t *ce_id);
546 
547 #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF)
548 
549 #ifndef HIF_CE_HISTORY_MAX
550 #define HIF_CE_HISTORY_MAX 1024
551 #endif
552 
553 #define CE_DEBUG_MAX_DATA_BUF_SIZE 64
554 
555 /**
556  * struct hif_ce_desc_event - structure for detailing a ce event
557  * @index: location of the descriptor in the ce ring;
558  * @type: what the event was
559  * @time: when it happened
560  * @current_hp: holds the current ring hp value
561  * @current_tp: holds the current ring tp value
562  * @descriptor: descriptor enqueued or dequeued
563  * @memory: virtual address that was used
564  * @dma_addr: physical/iova address based on smmu status
565  * @dma_to_phy: physical address from iova address
566  * @virt_to_phy: physical address from virtual address
567  * @actual_data_len: length of the data
568  * @data: data pointed by descriptor
569  */
570 struct hif_ce_desc_event {
571 	int index;
572 	enum hif_ce_event_type type;
573 	uint64_t time;
574 #ifdef HELIUMPLUS
575 	union ce_desc descriptor;
576 #else
577 	uint32_t current_hp;
578 	uint32_t current_tp;
579 	union ce_srng_desc descriptor;
580 #endif
581 	void *memory;
582 
583 #ifdef HIF_RECORD_PADDR
584 	/* iova/pa based on smmu status */
585 	qdf_dma_addr_t dma_addr;
586 	/* store pa from iova address */
587 	qdf_dma_addr_t dma_to_phy;
588 	/* store pa */
589 	qdf_dma_addr_t virt_to_phy;
590 #endif /* HIF_RECORD_ADDR */
591 
592 #ifdef HIF_CE_DEBUG_DATA_BUF
593 	size_t actual_data_len;
594 	uint8_t *data;
595 #endif /* HIF_CE_DEBUG_DATA_BUF */
596 };
597 #else
598 struct hif_ce_desc_event;
599 #endif /*#if defined(HIF_CONFIG_SLUB_DEBUG_ON)||defined(HIF_CE_DEBUG_DATA_BUF)*/
600 
601 /**
602  * get_next_record_index() - get the next record index
603  * @table_index: atomic index variable to increment
604  * @array_size: array size of the circular buffer
605  *
606  * Increment the atomic index and reserve the value.
607  * Takes care of buffer wrap.
608  * Guaranteed to be thread safe as long as fewer than array_size contexts
609  * try to access the array.  If there are more than array_size contexts
610  * trying to access the array, full locking of the recording process would
611  * be needed to have sane logging.
612  */
613 int get_next_record_index(qdf_atomic_t *table_index, int array_size);
614 
615 #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF)
616 /**
617  * hif_record_ce_srng_desc_event() - Record data pointed by the CE descriptor
618  * @scn: structure detailing a ce event
619  * @ce_id: length of the data
620  * @type: event_type
621  * @descriptor: ce src/dest/status ring descriptor
622  * @memory: nbuf
623  * @index: current sw/write index
624  * @len: len of the buffer
625  * @hal_ring: ce hw ring
626  *
627  * Return: None
628  */
629 void hif_record_ce_srng_desc_event(struct hif_softc *scn, int ce_id,
630 				   enum hif_ce_event_type type,
631 				   union ce_srng_desc *descriptor,
632 				   void *memory, int index,
633 				   int len, void *hal_ring);
634 
635 /**
636  * hif_clear_ce_desc_debug_data() - Clear the contents of hif_ce_desc_event
637  * upto data field before reusing it.
638  *
639  * @event: record every CE event
640  *
641  * Return: None
642  */
643 void hif_clear_ce_desc_debug_data(struct hif_ce_desc_event *event);
644 #else
645 static inline
646 void hif_record_ce_srng_desc_event(struct hif_softc *scn, int ce_id,
647 				   enum hif_ce_event_type type,
648 				   union ce_srng_desc *descriptor,
649 				   void *memory, int index,
650 				   int len, void *hal_ring)
651 {
652 }
653 
654 static inline
655 void hif_clear_ce_desc_debug_data(struct hif_ce_desc_event *event)
656 {
657 }
658 #endif /* HIF_CONFIG_SLUB_DEBUG_ON || HIF_CE_DEBUG_DATA_BUF */
659 
660 #ifdef HIF_CE_DEBUG_DATA_BUF
661 /**
662  * hif_ce_desc_data_record() - Record data pointed by the CE descriptor
663  * @event: structure detailing a ce event
664  * @len: length of the data
665  * Return:
666  */
667 void hif_ce_desc_data_record(struct hif_ce_desc_event *event, int len);
668 
669 QDF_STATUS alloc_mem_ce_debug_hist_data(struct hif_softc *scn, uint32_t ce_id);
670 void free_mem_ce_debug_hist_data(struct hif_softc *scn, uint32_t ce_id);
671 #else
672 static inline
673 QDF_STATUS alloc_mem_ce_debug_hist_data(struct hif_softc *scn, uint32_t ce_id)
674 {
675 	return QDF_STATUS_SUCCESS;
676 }
677 
678 static inline
679 void free_mem_ce_debug_hist_data(struct hif_softc *scn, uint32_t ce_id) { }
680 
681 static inline
682 void hif_ce_desc_data_record(struct hif_ce_desc_event *event, int len)
683 {
684 }
685 #endif /*HIF_CE_DEBUG_DATA_BUF*/
686 
687 #ifdef HIF_CONFIG_SLUB_DEBUG_ON
688 /**
689  * ce_validate_nbytes() - validate nbytes for slub builds on tx descriptors
690  * @nbytes: nbytes value being written into a send descriptor
691  * @ce_state: context of the copy engine
692 
693  * nbytes should be non-zero and less than max configured for the copy engine
694  *
695  * Return: none
696  */
697 static inline void ce_validate_nbytes(uint32_t nbytes,
698 				      struct CE_state *ce_state)
699 {
700 	if (nbytes <= 0 || nbytes > ce_state->src_sz_max)
701 		QDF_BUG(0);
702 }
703 #else
704 static inline void ce_validate_nbytes(uint32_t nbytes,
705 				      struct CE_state *ce_state)
706 {
707 }
708 #endif /* HIF_CONFIG_SLUB_DEBUG_ON */
709 
710 #if defined(HIF_RECORD_PADDR)
711 /**
712  * hif_ce_desc_record_rx_paddr() - record physical address for IOMMU
713  * IOVA addr and MMU virtual addr for Rx
714  * @scn: hif_softc
715  * @nbuf: buffer posted to fw
716  *
717  * record physical address for ce_event_type HIF_RX_DESC_POST and
718  * HIF_RX_DESC_COMPLETION
719  *
720  * Return: none
721  */
722 void hif_ce_desc_record_rx_paddr(struct hif_softc *scn,
723 				 struct hif_ce_desc_event *event,
724 				 qdf_nbuf_t nbuf);
725 #else
726 static inline
727 void hif_ce_desc_record_rx_paddr(struct hif_softc *scn,
728 				 struct hif_ce_desc_event *event,
729 				 qdf_nbuf_t nbuf)
730 {
731 }
732 #endif /* HIF_RECORD_PADDR */
733 #endif /* __COPY_ENGINE_INTERNAL_H__ */
734