xref: /wlan-dirver/qca-wifi-host-cmn/hif/src/ce/ce_internal.h (revision 878d42c770e8f4f39f616b20412de44faeced7b9)
1 /*
2  * Copyright (c) 2013-2021 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #ifndef __COPY_ENGINE_INTERNAL_H__
20 #define __COPY_ENGINE_INTERNAL_H__
21 
22 #include <hif.h>                /* A_TARGET_WRITE */
23 
24 /* Copy Engine operational state */
25 enum CE_op_state {
26 	CE_UNUSED,
27 	CE_PAUSED,
28 	CE_RUNNING,
29 	CE_PENDING,
30 };
31 
32 enum ol_ath_hif_ce_ecodes {
33 	CE_RING_DELTA_FAIL = 0
34 };
35 
36 struct CE_src_desc;
37 
38 /* Copy Engine Ring internal state */
39 struct CE_ring_state {
40 
41 	/* Number of entries in this ring; must be power of 2 */
42 	unsigned int nentries;
43 	unsigned int nentries_mask;
44 
45 	/*
46 	 * For dest ring, this is the next index to be processed
47 	 * by software after it was/is received into.
48 	 *
49 	 * For src ring, this is the last descriptor that was sent
50 	 * and completion processed by software.
51 	 *
52 	 * Regardless of src or dest ring, this is an invariant
53 	 * (modulo ring size):
54 	 *     write index >= read index >= sw_index
55 	 */
56 	unsigned int sw_index;
57 	unsigned int write_index;       /* cached copy */
58 	/*
59 	 * For src ring, this is the next index not yet processed by HW.
60 	 * This is a cached copy of the real HW index (read index), used
61 	 * for avoiding reading the HW index register more often than
62 	 * necessary.
63 	 * This extends the invariant:
64 	 *     write index >= read index >= hw_index >= sw_index
65 	 *
66 	 * For dest ring, this is currently unused.
67 	 */
68 	unsigned int hw_index;  /* cached copy */
69 
70 	/* Start of DMA-coherent area reserved for descriptors */
71 	void *base_addr_owner_space_unaligned;  /* Host address space */
72 	qdf_dma_addr_t base_addr_CE_space_unaligned; /* CE address space */
73 
74 	/*
75 	 * Actual start of descriptors.
76 	 * Aligned to descriptor-size boundary.
77 	 * Points into reserved DMA-coherent area, above.
78 	 */
79 	void *base_addr_owner_space;    /* Host address space */
80 	qdf_dma_addr_t base_addr_CE_space;   /* CE address space */
81 	/*
82 	 * Start of shadow copy of descriptors, within regular memory.
83 	 * Aligned to descriptor-size boundary.
84 	 */
85 	char *shadow_base_unaligned;
86 	struct CE_src_desc *shadow_base;
87 
88 	unsigned int low_water_mark_nentries;
89 	unsigned int high_water_mark_nentries;
90 	void *srng_ctx;
91 	void **per_transfer_context;
92 
93 	/* HAL CE ring type */
94 	uint32_t hal_ring_type;
95 	/* ring memory prealloc */
96 	uint8_t is_ring_prealloc;
97 
98 	OS_DMA_MEM_CONTEXT(ce_dmacontext); /* OS Specific DMA context */
99 };
100 
101 /* Copy Engine internal state */
102 struct CE_state {
103 	struct hif_softc *scn;
104 	unsigned int id;
105 	unsigned int attr_flags;  /* CE_ATTR_* */
106 	uint32_t ctrl_addr;       /* relative to BAR */
107 	enum CE_op_state state;
108 
109 #ifdef WLAN_FEATURE_FASTPATH
110 	fastpath_msg_handler fastpath_handler;
111 	void *context;
112 #endif /* WLAN_FEATURE_FASTPATH */
113 	qdf_work_t oom_allocation_work;
114 
115 	ce_send_cb send_cb;
116 	void *send_context;
117 
118 	CE_recv_cb recv_cb;
119 	void *recv_context;
120 
121 	/* misc_cbs - are any callbacks besides send and recv enabled? */
122 	uint8_t misc_cbs;
123 
124 	CE_watermark_cb watermark_cb;
125 	void *wm_context;
126 
127 	/*Record the state of the copy compl interrupt */
128 	int disable_copy_compl_intr;
129 
130 	unsigned int src_sz_max;
131 	struct CE_ring_state *src_ring;
132 	struct CE_ring_state *dest_ring;
133 	struct CE_ring_state *status_ring;
134 	atomic_t rx_pending;
135 
136 	qdf_spinlock_t ce_index_lock;
137 	/* Flag to indicate whether to break out the DPC context */
138 	bool force_break;
139 
140 	/* time in nanoseconds to yield control of napi poll */
141 	unsigned long long ce_service_yield_time;
142 	/* CE service start time in nanoseconds */
143 	unsigned long long ce_service_start_time;
144 	/* Num Of Receive Buffers handled for one interrupt DPC routine */
145 	unsigned int receive_count;
146 	/* epping */
147 	bool timer_inited;
148 	qdf_timer_t poll_timer;
149 
150 	/* datapath - for faster access, use bools instead of a bitmap */
151 	bool htt_tx_data;
152 	bool htt_rx_data;
153 	qdf_lro_ctx_t lro_data;
154 
155 	void (*service)(struct hif_softc *scn, int CE_id);
156 };
157 
158 /* Descriptor rings must be aligned to this boundary */
159 #define CE_DESC_RING_ALIGN 8
160 #define CLOCK_OVERRIDE 0x2
161 
162 #ifdef QCA_WIFI_3_0
163 #define HIF_CE_DESC_ADDR_TO_DMA(desc) \
164 	(qdf_dma_addr_t)(((uint64_t)(desc)->buffer_addr + \
165 	((uint64_t)((desc)->buffer_addr_hi & 0x1F) << 32)))
166 #else
167 #define HIF_CE_DESC_ADDR_TO_DMA(desc) \
168 	(qdf_dma_addr_t)((desc)->buffer_addr)
169 #endif
170 
171 #ifdef QCA_WIFI_3_0
172 struct CE_src_desc {
173 	uint32_t buffer_addr:32;
174 #if _BYTE_ORDER == _BIG_ENDIAN
175 	uint32_t gather:1,
176 		enable_11h:1,
177 		meta_data_low:2, /* fw_metadata_low */
178 		packet_result_offset:12,
179 		toeplitz_hash_enable:1,
180 		addr_y_search_disable:1,
181 		addr_x_search_disable:1,
182 		misc_int_disable:1,
183 		target_int_disable:1,
184 		host_int_disable:1,
185 		dest_byte_swap:1,
186 		byte_swap:1,
187 		type:2,
188 		tx_classify:1,
189 		buffer_addr_hi:5;
190 		uint32_t meta_data:16, /* fw_metadata_high */
191 		nbytes:16;       /* length in register map */
192 #else
193 	uint32_t buffer_addr_hi:5,
194 		tx_classify:1,
195 		type:2,
196 		byte_swap:1,          /* src_byte_swap */
197 		dest_byte_swap:1,
198 		host_int_disable:1,
199 		target_int_disable:1,
200 		misc_int_disable:1,
201 		addr_x_search_disable:1,
202 		addr_y_search_disable:1,
203 		toeplitz_hash_enable:1,
204 		packet_result_offset:12,
205 		meta_data_low:2, /* fw_metadata_low */
206 		enable_11h:1,
207 		gather:1;
208 		uint32_t nbytes:16, /* length in register map */
209 		meta_data:16; /* fw_metadata_high */
210 #endif
211 	uint32_t toeplitz_hash_result:32;
212 };
213 
214 struct CE_dest_desc {
215 	uint32_t buffer_addr:32;
216 #if _BYTE_ORDER == _BIG_ENDIAN
217 	uint32_t gather:1,
218 		enable_11h:1,
219 		meta_data_low:2, /* fw_metadata_low */
220 		packet_result_offset:12,
221 		toeplitz_hash_enable:1,
222 		addr_y_search_disable:1,
223 		addr_x_search_disable:1,
224 		misc_int_disable:1,
225 		target_int_disable:1,
226 		host_int_disable:1,
227 		byte_swap:1,
228 		src_byte_swap:1,
229 		type:2,
230 		tx_classify:1,
231 		buffer_addr_hi:5;
232 		uint32_t meta_data:16, /* fw_metadata_high */
233 		nbytes:16;          /* length in register map */
234 #else
235 	uint32_t buffer_addr_hi:5,
236 		tx_classify:1,
237 		type:2,
238 		src_byte_swap:1,
239 		byte_swap:1,         /* dest_byte_swap */
240 		host_int_disable:1,
241 		target_int_disable:1,
242 		misc_int_disable:1,
243 		addr_x_search_disable:1,
244 		addr_y_search_disable:1,
245 		toeplitz_hash_enable:1,
246 		packet_result_offset:12,
247 		meta_data_low:2, /* fw_metadata_low */
248 		enable_11h:1,
249 		gather:1;
250 		uint32_t nbytes:16, /* length in register map */
251 		meta_data:16;    /* fw_metadata_high */
252 #endif
253 	uint32_t toeplitz_hash_result:32;
254 };
255 #else
256 struct CE_src_desc {
257 	uint32_t buffer_addr;
258 #if _BYTE_ORDER == _BIG_ENDIAN
259 	uint32_t  meta_data:12,
260 		  target_int_disable:1,
261 		  host_int_disable:1,
262 		  byte_swap:1,
263 		  gather:1,
264 		  nbytes:16;
265 #else
266 
267 	uint32_t nbytes:16,
268 		 gather:1,
269 		 byte_swap:1,
270 		 host_int_disable:1,
271 		 target_int_disable:1,
272 		 meta_data:12;
273 #endif
274 };
275 
276 struct CE_dest_desc {
277 	uint32_t buffer_addr;
278 #if _BYTE_ORDER == _BIG_ENDIAN
279 	uint32_t  meta_data:12,
280 		  target_int_disable:1,
281 		  host_int_disable:1,
282 		  byte_swap:1,
283 		  gather:1,
284 		  nbytes:16;
285 #else
286 	uint32_t nbytes:16,
287 		 gather:1,
288 		 byte_swap:1,
289 		 host_int_disable:1,
290 		 target_int_disable:1,
291 		 meta_data:12;
292 #endif
293 };
294 #endif /* QCA_WIFI_3_0 */
295 
296 struct ce_srng_src_desc {
297 	uint32_t buffer_addr_lo;
298 #if _BYTE_ORDER == _BIG_ENDIAN
299 	uint32_t nbytes:16,
300 		 rsvd:4,
301 		 gather:1,
302 		 dest_swap:1,
303 		 byte_swap:1,
304 		 toeplitz_hash_enable:1,
305 		 buffer_addr_hi:8;
306 	uint32_t rsvd1:16,
307 		 meta_data:16;
308 	uint32_t loop_count:4,
309 		 ring_id:8,
310 		 rsvd3:20;
311 #else
312 	uint32_t buffer_addr_hi:8,
313 		 toeplitz_hash_enable:1,
314 		 byte_swap:1,
315 		 dest_swap:1,
316 		 gather:1,
317 		 rsvd:4,
318 		 nbytes:16;
319 	uint32_t meta_data:16,
320 		 rsvd1:16;
321 	uint32_t rsvd3:20,
322 		 ring_id:8,
323 		 loop_count:4;
324 #endif
325 };
326 struct ce_srng_dest_desc {
327 	uint32_t buffer_addr_lo;
328 #if _BYTE_ORDER == _BIG_ENDIAN
329 	uint32_t loop_count:4,
330 		 ring_id:8,
331 		 rsvd1:12,
332 		 buffer_addr_hi:8;
333 #else
334 	uint32_t buffer_addr_hi:8,
335 		 rsvd1:12,
336 		 ring_id:8,
337 		 loop_count:4;
338 #endif
339 };
340 struct ce_srng_dest_status_desc {
341 #if _BYTE_ORDER == _BIG_ENDIAN
342 	uint32_t nbytes:16,
343 		 rsvd:4,
344 		 gather:1,
345 		 dest_swap:1,
346 		 byte_swap:1,
347 		 toeplitz_hash_enable:1,
348 		 rsvd0:8;
349 	uint32_t rsvd1:16,
350 		 meta_data:16;
351 #else
352 	uint32_t rsvd0:8,
353 		 toeplitz_hash_enable:1,
354 		 byte_swap:1,
355 		 dest_swap:1,
356 		 gather:1,
357 		 rsvd:4,
358 		 nbytes:16;
359 	uint32_t meta_data:16,
360 		 rsvd1:16;
361 #endif
362 	uint32_t toeplitz_hash;
363 #if _BYTE_ORDER == _BIG_ENDIAN
364 	uint32_t loop_count:4,
365 		 ring_id:8,
366 		 rsvd3:20;
367 #else
368 	uint32_t rsvd3:20,
369 		 ring_id:8,
370 		 loop_count:4;
371 #endif
372 };
373 
374 #define CE_SENDLIST_ITEMS_MAX 12
375 
376 /**
377  * union ce_desc - unified data type for ce descriptors
378  *
379  * Both src and destination descriptors follow the same format.
380  * They use different data structures for different access symantics.
381  * Here we provice a unifying data type.
382  */
383 union ce_desc {
384 	struct CE_src_desc src_desc;
385 	struct CE_dest_desc dest_desc;
386 };
387 
388 /**
389  * union ce_srng_desc - unified data type for ce srng descriptors
390  * @src_desc: ce srng Source ring descriptor
391  * @dest_desc: ce srng destination ring descriptor
392  * @dest_status_desc: ce srng status ring descriptor
393  */
394 union ce_srng_desc {
395 	struct ce_srng_src_desc src_desc;
396 	struct ce_srng_dest_desc dest_desc;
397 	struct ce_srng_dest_status_desc dest_status_desc;
398 };
399 
400 /**
401  * enum hif_ce_event_type - HIF copy engine event type
402  * @HIF_RX_DESC_POST: event recorded before updating write index of RX ring.
403  * @HIF_RX_DESC_COMPLETION: event recorded before updating sw index of RX ring.
404  * @HIF_TX_GATHER_DESC_POST: post gather desc. (no write index update)
405  * @HIF_TX_DESC_POST: event recorded before updating write index of TX ring.
406  * @HIF_TX_DESC_SOFTWARE_POST: event recorded when dropping a write to the write
407  *	index in a normal tx
408  * @HIF_TX_DESC_COMPLETION: event recorded before updating sw index of TX ring.
409  * @FAST_RX_WRITE_INDEX_UPDATE: event recorded before updating the write index
410  *	of the RX ring in fastpath
411  * @FAST_RX_SOFTWARE_INDEX_UPDATE: event recorded before updating the software
412  *	index of the RX ring in fastpath
413  * @FAST_TX_WRITE_INDEX_UPDATE: event recorded before updating the write index
414  *	of the TX ring in fastpath
415  * @FAST_TX_WRITE_INDEX_SOFTWARE_UPDATE: recored when dropping a write to
416  *	the write index in fastpath
417  * @FAST_TX_SOFTWARE_INDEX_UPDATE: event recorded before updating the software
418  *	index of the RX ring in fastpath
419  * @HIF_IRQ_EVENT: event recorded in the irq before scheduling the bh
420  * @HIF_CE_TASKLET_ENTRY: records the start of the ce_tasklet
421  * @HIF_CE_TASKLET_RESCHEDULE: records the rescheduling of the wlan_tasklet
422  * @HIF_CE_TASKLET_EXIT: records the exit of the wlan tasklet without reschedule
423  * @HIF_CE_REAP_ENTRY: records when we process completion outside of a bh
424  * @HIF_CE_REAP_EXIT:  records when we process completion outside of a bh
425  * @NAPI_SCHEDULE: records when napi is scheduled from the irq context
426  * @NAPI_POLL_ENTER: records the start of the napi poll function
427  * @NAPI_COMPLETE: records when interrupts are reenabled
428  * @NAPI_POLL_EXIT: records when the napi poll function returns
429  * @HIF_RX_NBUF_ALLOC_FAILURE: record the packet when nbuf fails to allocate
430  * @HIF_RX_NBUF_MAP_FAILURE: record the packet when dma map fails
431  * @HIF_RX_NBUF_ENQUEUE_FAILURE: record the packet when enqueue to ce fails
432  * @HIF_CE_SRC_RING_BUFFER_POST: record the packet when buffer is posted to ce src ring
433  * @HIF_CE_DEST_RING_BUFFER_POST: record the packet when buffer is posted to ce dst ring
434  * @HIF_CE_DEST_RING_BUFFER_REAP: record the packet when buffer is reaped from ce dst ring
435  * @HIF_CE_DEST_STATUS_RING_REAP: record the packet when status ring is reaped
436  * @HIF_RX_DESC_PRE_NBUF_ALLOC: record the packet before nbuf allocation
437  * @HIF_RX_DESC_PRE_NBUF_MAP: record the packet before nbuf map
438  * @HIF_RX_DESC_POST_NBUF_MAP: record the packet after nbuf map
439  */
440 enum hif_ce_event_type {
441 	HIF_RX_DESC_POST,
442 	HIF_RX_DESC_COMPLETION,
443 	HIF_TX_GATHER_DESC_POST,
444 	HIF_TX_DESC_POST,
445 	HIF_TX_DESC_SOFTWARE_POST,
446 	HIF_TX_DESC_COMPLETION,
447 	FAST_RX_WRITE_INDEX_UPDATE,
448 	FAST_RX_SOFTWARE_INDEX_UPDATE,
449 	FAST_TX_WRITE_INDEX_UPDATE,
450 	FAST_TX_WRITE_INDEX_SOFTWARE_UPDATE,
451 	FAST_TX_SOFTWARE_INDEX_UPDATE,
452 	RESUME_WRITE_INDEX_UPDATE,
453 
454 	HIF_IRQ_EVENT = 0x10,
455 	HIF_CE_TASKLET_ENTRY,
456 	HIF_CE_TASKLET_RESCHEDULE,
457 	HIF_CE_TASKLET_EXIT,
458 	HIF_CE_REAP_ENTRY,
459 	HIF_CE_REAP_EXIT,
460 	NAPI_SCHEDULE,
461 	NAPI_POLL_ENTER,
462 	NAPI_COMPLETE,
463 	NAPI_POLL_EXIT,
464 
465 	HIF_RX_NBUF_ALLOC_FAILURE = 0x20,
466 	HIF_RX_NBUF_MAP_FAILURE,
467 	HIF_RX_NBUF_ENQUEUE_FAILURE,
468 
469 	HIF_CE_SRC_RING_BUFFER_POST,
470 	HIF_CE_DEST_RING_BUFFER_POST,
471 	HIF_CE_DEST_RING_BUFFER_REAP,
472 	HIF_CE_DEST_STATUS_RING_REAP,
473 
474 	HIF_RX_DESC_PRE_NBUF_ALLOC,
475 	HIF_RX_DESC_PRE_NBUF_MAP,
476 	HIF_RX_DESC_POST_NBUF_MAP,
477 
478 	HIF_EVENT_TYPE_MAX,
479 };
480 
481 void ce_init_ce_desc_event_log(struct hif_softc *scn, int ce_id, int size);
482 void ce_deinit_ce_desc_event_log(struct hif_softc *scn, int ce_id);
483 void hif_record_ce_desc_event(struct hif_softc *scn, int ce_id,
484 			      enum hif_ce_event_type type,
485 			      union ce_desc *descriptor, void *memory,
486 			      int index, int len);
487 
488 enum ce_sendlist_type_e {
489 	CE_SIMPLE_BUFFER_TYPE,
490 	/* TBDXXX: CE_RX_DESC_LIST, */
491 };
492 
493 /*
494  * There's a public "ce_sendlist" and a private "ce_sendlist_s".
495  * The former is an opaque structure with sufficient space
496  * to hold the latter.  The latter is the actual structure
497  * definition and it is only used internally.  The opaque version
498  * of the structure allows callers to allocate an instance on the
499  * run-time stack without knowing any of the details of the
500  * structure layout.
501  */
502 struct ce_sendlist_s {
503 	unsigned int num_items;
504 	struct ce_sendlist_item {
505 		enum ce_sendlist_type_e send_type;
506 		dma_addr_t data;        /* e.g. buffer or desc list */
507 		union {
508 			unsigned int nbytes;    /* simple buffer */
509 			unsigned int ndesc;     /* Rx descriptor list */
510 		} u;
511 		/* flags: externally-specified flags;
512 		 * OR-ed with internal flags
513 		 */
514 		uint32_t flags;
515 		uint32_t user_flags;
516 	} item[CE_SENDLIST_ITEMS_MAX];
517 };
518 
519 bool hif_ce_service_should_yield(struct hif_softc *scn, struct CE_state
520 				 *ce_state);
521 
522 #ifdef WLAN_FEATURE_FASTPATH
523 void ce_h2t_tx_ce_cleanup(struct CE_handle *ce_hdl);
524 void ce_t2h_msg_ce_cleanup(struct CE_handle *ce_hdl);
525 #else
526 static inline void ce_h2t_tx_ce_cleanup(struct CE_handle *ce_hdl)
527 {
528 }
529 
530 static inline void ce_t2h_msg_ce_cleanup(struct CE_handle *ce_hdl)
531 {
532 }
533 #endif
534 
535 /* which ring of a CE? */
536 #define CE_RING_SRC  0
537 #define CE_RING_DEST 1
538 #define CE_RING_STATUS 2
539 
540 #define CDC_WAR_MAGIC_STR   0xceef0000
541 #define CDC_WAR_DATA_CE     4
542 
543 /* Additional internal-only ce_send flags */
544 #define CE_SEND_FLAG_GATHER             0x00010000      /* Use Gather */
545 
546 /**
547  * hif_get_wake_ce_id() - gets the copy engine id used for waking up
548  * @scn: The hif context to use
549  * @ce_id: a pointer where the copy engine Id should be populated
550  *
551  * Return: errno
552  */
553 int hif_get_wake_ce_id(struct hif_softc *scn, uint8_t *ce_id);
554 
555 /**
556  * hif_get_fw_diag_ce_id() - gets the copy engine id used for FW diag
557  * @scn: The hif context to use
558  * @ce_id: a pointer where the copy engine Id should be populated
559  *
560  * Return: errno
561  */
562 int hif_get_fw_diag_ce_id(struct hif_softc *scn, uint8_t *ce_id);
563 
564 #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF)
565 
566 #ifndef HIF_CE_HISTORY_MAX
567 #define HIF_CE_HISTORY_MAX 1024
568 #endif
569 
570 #define CE_DEBUG_MAX_DATA_BUF_SIZE 64
571 
572 /**
573  * struct hif_ce_desc_event - structure for detailing a ce event
574  * @index: location of the descriptor in the ce ring;
575  * @type: what the event was
576  * @time: when it happened
577  * @current_hp: holds the current ring hp value
578  * @current_tp: holds the current ring tp value
579  * @descriptor: descriptor enqueued or dequeued
580  * @memory: virtual address that was used
581  * @dma_addr: physical/iova address based on smmu status
582  * @dma_to_phy: physical address from iova address
583  * @virt_to_phy: physical address from virtual address
584  * @actual_data_len: length of the data
585  * @data: data pointed by descriptor
586  */
587 struct hif_ce_desc_event {
588 	int index;
589 	enum hif_ce_event_type type;
590 	uint64_t time;
591 	int cpu_id;
592 #ifdef HELIUMPLUS
593 	union ce_desc descriptor;
594 #else
595 	uint32_t current_hp;
596 	uint32_t current_tp;
597 	union ce_srng_desc descriptor;
598 #endif
599 	void *memory;
600 
601 #ifdef HIF_RECORD_PADDR
602 	/* iova/pa based on smmu status */
603 	qdf_dma_addr_t dma_addr;
604 	/* store pa from iova address */
605 	qdf_dma_addr_t dma_to_phy;
606 	/* store pa */
607 	qdf_dma_addr_t virt_to_phy;
608 #endif /* HIF_RECORD_ADDR */
609 
610 #ifdef HIF_CE_DEBUG_DATA_BUF
611 	size_t actual_data_len;
612 	uint8_t *data;
613 #endif /* HIF_CE_DEBUG_DATA_BUF */
614 };
615 #else
616 struct hif_ce_desc_event;
617 #endif /*#if defined(HIF_CONFIG_SLUB_DEBUG_ON)||defined(HIF_CE_DEBUG_DATA_BUF)*/
618 
619 /**
620  * get_next_record_index() - get the next record index
621  * @table_index: atomic index variable to increment
622  * @array_size: array size of the circular buffer
623  *
624  * Increment the atomic index and reserve the value.
625  * Takes care of buffer wrap.
626  * Guaranteed to be thread safe as long as fewer than array_size contexts
627  * try to access the array.  If there are more than array_size contexts
628  * trying to access the array, full locking of the recording process would
629  * be needed to have sane logging.
630  */
631 int get_next_record_index(qdf_atomic_t *table_index, int array_size);
632 
633 #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF)
634 /**
635  * hif_record_ce_srng_desc_event() - Record data pointed by the CE descriptor
636  * @scn: structure detailing a ce event
637  * @ce_id: length of the data
638  * @type: event_type
639  * @descriptor: ce src/dest/status ring descriptor
640  * @memory: nbuf
641  * @index: current sw/write index
642  * @len: len of the buffer
643  * @hal_ring: ce hw ring
644  *
645  * Return: None
646  */
647 void hif_record_ce_srng_desc_event(struct hif_softc *scn, int ce_id,
648 				   enum hif_ce_event_type type,
649 				   union ce_srng_desc *descriptor,
650 				   void *memory, int index,
651 				   int len, void *hal_ring);
652 
653 /**
654  * hif_clear_ce_desc_debug_data() - Clear the contents of hif_ce_desc_event
655  * upto data field before reusing it.
656  *
657  * @event: record every CE event
658  *
659  * Return: None
660  */
661 void hif_clear_ce_desc_debug_data(struct hif_ce_desc_event *event);
662 #else
663 static inline
664 void hif_record_ce_srng_desc_event(struct hif_softc *scn, int ce_id,
665 				   enum hif_ce_event_type type,
666 				   union ce_srng_desc *descriptor,
667 				   void *memory, int index,
668 				   int len, void *hal_ring)
669 {
670 }
671 
672 static inline
673 void hif_clear_ce_desc_debug_data(struct hif_ce_desc_event *event)
674 {
675 }
676 #endif /* HIF_CONFIG_SLUB_DEBUG_ON || HIF_CE_DEBUG_DATA_BUF */
677 
678 #ifdef HIF_CE_DEBUG_DATA_BUF
679 /**
680  * hif_ce_desc_data_record() - Record data pointed by the CE descriptor
681  * @event: structure detailing a ce event
682  * @len: length of the data
683  * Return:
684  */
685 void hif_ce_desc_data_record(struct hif_ce_desc_event *event, int len);
686 
687 QDF_STATUS alloc_mem_ce_debug_hist_data(struct hif_softc *scn, uint32_t ce_id);
688 void free_mem_ce_debug_hist_data(struct hif_softc *scn, uint32_t ce_id);
689 #else
690 static inline
691 QDF_STATUS alloc_mem_ce_debug_hist_data(struct hif_softc *scn, uint32_t ce_id)
692 {
693 	return QDF_STATUS_SUCCESS;
694 }
695 
696 static inline
697 void free_mem_ce_debug_hist_data(struct hif_softc *scn, uint32_t ce_id) { }
698 
699 static inline
700 void hif_ce_desc_data_record(struct hif_ce_desc_event *event, int len)
701 {
702 }
703 #endif /*HIF_CE_DEBUG_DATA_BUF*/
704 
705 #ifdef HIF_CONFIG_SLUB_DEBUG_ON
706 /**
707  * ce_validate_nbytes() - validate nbytes for slub builds on tx descriptors
708  * @nbytes: nbytes value being written into a send descriptor
709  * @ce_state: context of the copy engine
710 
711  * nbytes should be non-zero and less than max configured for the copy engine
712  *
713  * Return: none
714  */
715 static inline void ce_validate_nbytes(uint32_t nbytes,
716 				      struct CE_state *ce_state)
717 {
718 	if (nbytes <= 0 || nbytes > ce_state->src_sz_max)
719 		QDF_BUG(0);
720 }
721 #else
722 static inline void ce_validate_nbytes(uint32_t nbytes,
723 				      struct CE_state *ce_state)
724 {
725 }
726 #endif /* HIF_CONFIG_SLUB_DEBUG_ON */
727 
728 #if defined(HIF_RECORD_PADDR)
729 /**
730  * hif_ce_desc_record_rx_paddr() - record physical address for IOMMU
731  * IOVA addr and MMU virtual addr for Rx
732  * @scn: hif_softc
733  * @nbuf: buffer posted to fw
734  *
735  * record physical address for ce_event_type HIF_RX_DESC_POST and
736  * HIF_RX_DESC_COMPLETION
737  *
738  * Return: none
739  */
740 void hif_ce_desc_record_rx_paddr(struct hif_softc *scn,
741 				 struct hif_ce_desc_event *event,
742 				 qdf_nbuf_t nbuf);
743 #else
744 static inline
745 void hif_ce_desc_record_rx_paddr(struct hif_softc *scn,
746 				 struct hif_ce_desc_event *event,
747 				 qdf_nbuf_t nbuf)
748 {
749 }
750 #endif /* HIF_RECORD_PADDR */
751 #endif /* __COPY_ENGINE_INTERNAL_H__ */
752