xref: /wlan-dirver/qca-wifi-host-cmn/hif/src/ce/ce_internal.h (revision 0ce3a8e5493253d9ed0eb4d5cc91ce576740ae1e)
1 /*
2  * Copyright (c) 2013-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #ifndef __COPY_ENGINE_INTERNAL_H__
21 #define __COPY_ENGINE_INTERNAL_H__
22 
23 #include <hif.h>                /* A_TARGET_WRITE */
24 
25 /* Copy Engine operational state */
26 enum CE_op_state {
27 	CE_UNUSED,
28 	CE_PAUSED,
29 	CE_RUNNING,
30 	CE_PENDING,
31 };
32 
33 enum ol_ath_hif_ce_ecodes {
34 	CE_RING_DELTA_FAIL = 0
35 };
36 
37 struct CE_src_desc;
38 
39 /* Copy Engine Ring internal state */
40 struct CE_ring_state {
41 
42 	/* Number of entries in this ring; must be power of 2 */
43 	unsigned int nentries;
44 	unsigned int nentries_mask;
45 
46 	/*
47 	 * For dest ring, this is the next index to be processed
48 	 * by software after it was/is received into.
49 	 *
50 	 * For src ring, this is the last descriptor that was sent
51 	 * and completion processed by software.
52 	 *
53 	 * Regardless of src or dest ring, this is an invariant
54 	 * (modulo ring size):
55 	 *     write index >= read index >= sw_index
56 	 */
57 	unsigned int sw_index;
58 	unsigned int write_index;       /* cached copy */
59 	/*
60 	 * For src ring, this is the next index not yet processed by HW.
61 	 * This is a cached copy of the real HW index (read index), used
62 	 * for avoiding reading the HW index register more often than
63 	 * necessary.
64 	 * This extends the invariant:
65 	 *     write index >= read index >= hw_index >= sw_index
66 	 *
67 	 * For dest ring, this is currently unused.
68 	 */
69 	unsigned int hw_index;  /* cached copy */
70 
71 	/* Start of DMA-coherent area reserved for descriptors */
72 	void *base_addr_owner_space_unaligned;  /* Host address space */
73 	qdf_dma_addr_t base_addr_CE_space_unaligned; /* CE address space */
74 
75 	/*
76 	 * Actual start of descriptors.
77 	 * Aligned to descriptor-size boundary.
78 	 * Points into reserved DMA-coherent area, above.
79 	 */
80 	void *base_addr_owner_space;    /* Host address space */
81 	qdf_dma_addr_t base_addr_CE_space;   /* CE address space */
82 	/*
83 	 * Start of shadow copy of descriptors, within regular memory.
84 	 * Aligned to descriptor-size boundary.
85 	 */
86 	char *shadow_base_unaligned;
87 	struct CE_src_desc *shadow_base;
88 
89 	unsigned int low_water_mark_nentries;
90 	unsigned int high_water_mark_nentries;
91 	void *srng_ctx;
92 	void **per_transfer_context;
93 
94 	/* HAL CE ring type */
95 	uint32_t hal_ring_type;
96 	/* ring memory prealloc */
97 	uint8_t is_ring_prealloc;
98 
99 	OS_DMA_MEM_CONTEXT(ce_dmacontext); /* OS Specific DMA context */
100 };
101 
102 /* Copy Engine internal state */
103 struct CE_state {
104 	struct hif_softc *scn;
105 	unsigned int id;
106 	unsigned int attr_flags;  /* CE_ATTR_* */
107 	uint32_t ctrl_addr;       /* relative to BAR */
108 	enum CE_op_state state;
109 
110 #ifdef WLAN_FEATURE_FASTPATH
111 	fastpath_msg_handler fastpath_handler;
112 	void *context;
113 #endif /* WLAN_FEATURE_FASTPATH */
114 	qdf_work_t oom_allocation_work;
115 
116 	ce_send_cb send_cb;
117 	void *send_context;
118 
119 	CE_recv_cb recv_cb;
120 	void *recv_context;
121 
122 	/* misc_cbs - are any callbacks besides send and recv enabled? */
123 	uint8_t misc_cbs;
124 
125 	CE_watermark_cb watermark_cb;
126 	void *wm_context;
127 
128 	/*Record the state of the copy compl interrupt */
129 	int disable_copy_compl_intr;
130 
131 	unsigned int src_sz_max;
132 	struct CE_ring_state *src_ring;
133 	struct CE_ring_state *dest_ring;
134 	struct CE_ring_state *status_ring;
135 	atomic_t rx_pending;
136 
137 	qdf_spinlock_t ce_index_lock;
138 #ifdef CE_TASKLET_SCHEDULE_ON_FULL
139 	qdf_spinlock_t ce_interrupt_lock;
140 #endif
141 	/* Flag to indicate whether to break out the DPC context */
142 	bool force_break;
143 
144 	/* time in nanoseconds to yield control of napi poll */
145 	unsigned long long ce_service_yield_time;
146 	/* CE service start time in nanoseconds */
147 	unsigned long long ce_service_start_time;
148 	/* Num Of Receive Buffers handled for one interrupt DPC routine */
149 	unsigned int receive_count;
150 	/* epping */
151 	bool timer_inited;
152 	qdf_timer_t poll_timer;
153 
154 	/* datapath - for faster access, use bools instead of a bitmap */
155 	bool htt_tx_data;
156 	bool htt_rx_data;
157 	qdf_lro_ctx_t lro_data;
158 
159 	void (*service)(struct hif_softc *scn, int CE_id);
160 #ifdef WLAN_TRACEPOINTS
161 	/* CE tasklet sched time in nanoseconds */
162 	unsigned long long ce_tasklet_sched_time;
163 #endif
164 };
165 
166 /* Descriptor rings must be aligned to this boundary */
167 #define CE_DESC_RING_ALIGN 8
168 #define CLOCK_OVERRIDE 0x2
169 
170 #ifdef QCA_WIFI_3_0
171 #define HIF_CE_DESC_ADDR_TO_DMA(desc) \
172 	(qdf_dma_addr_t)(((uint64_t)(desc)->buffer_addr + \
173 	((uint64_t)((desc)->buffer_addr_hi & 0x1F) << 32)))
174 #else
175 #define HIF_CE_DESC_ADDR_TO_DMA(desc) \
176 	(qdf_dma_addr_t)((desc)->buffer_addr)
177 #endif
178 
179 #ifdef QCA_WIFI_3_0
180 struct CE_src_desc {
181 	uint32_t buffer_addr:32;
182 #if _BYTE_ORDER == _BIG_ENDIAN
183 	uint32_t gather:1,
184 		enable_11h:1,
185 		meta_data_low:2, /* fw_metadata_low */
186 		packet_result_offset:12,
187 		toeplitz_hash_enable:1,
188 		addr_y_search_disable:1,
189 		addr_x_search_disable:1,
190 		misc_int_disable:1,
191 		target_int_disable:1,
192 		host_int_disable:1,
193 		dest_byte_swap:1,
194 		byte_swap:1,
195 		type:2,
196 		tx_classify:1,
197 		buffer_addr_hi:5;
198 		uint32_t meta_data:16, /* fw_metadata_high */
199 		nbytes:16;       /* length in register map */
200 #else
201 	uint32_t buffer_addr_hi:5,
202 		tx_classify:1,
203 		type:2,
204 		byte_swap:1,          /* src_byte_swap */
205 		dest_byte_swap:1,
206 		host_int_disable:1,
207 		target_int_disable:1,
208 		misc_int_disable:1,
209 		addr_x_search_disable:1,
210 		addr_y_search_disable:1,
211 		toeplitz_hash_enable:1,
212 		packet_result_offset:12,
213 		meta_data_low:2, /* fw_metadata_low */
214 		enable_11h:1,
215 		gather:1;
216 		uint32_t nbytes:16, /* length in register map */
217 		meta_data:16; /* fw_metadata_high */
218 #endif
219 	uint32_t toeplitz_hash_result:32;
220 };
221 
222 struct CE_dest_desc {
223 	uint32_t buffer_addr:32;
224 #if _BYTE_ORDER == _BIG_ENDIAN
225 	uint32_t gather:1,
226 		enable_11h:1,
227 		meta_data_low:2, /* fw_metadata_low */
228 		packet_result_offset:12,
229 		toeplitz_hash_enable:1,
230 		addr_y_search_disable:1,
231 		addr_x_search_disable:1,
232 		misc_int_disable:1,
233 		target_int_disable:1,
234 		host_int_disable:1,
235 		byte_swap:1,
236 		src_byte_swap:1,
237 		type:2,
238 		tx_classify:1,
239 		buffer_addr_hi:5;
240 		uint32_t meta_data:16, /* fw_metadata_high */
241 		nbytes:16;          /* length in register map */
242 #else
243 	uint32_t buffer_addr_hi:5,
244 		tx_classify:1,
245 		type:2,
246 		src_byte_swap:1,
247 		byte_swap:1,         /* dest_byte_swap */
248 		host_int_disable:1,
249 		target_int_disable:1,
250 		misc_int_disable:1,
251 		addr_x_search_disable:1,
252 		addr_y_search_disable:1,
253 		toeplitz_hash_enable:1,
254 		packet_result_offset:12,
255 		meta_data_low:2, /* fw_metadata_low */
256 		enable_11h:1,
257 		gather:1;
258 		uint32_t nbytes:16, /* length in register map */
259 		meta_data:16;    /* fw_metadata_high */
260 #endif
261 	uint32_t toeplitz_hash_result:32;
262 };
263 #else
264 struct CE_src_desc {
265 	uint32_t buffer_addr;
266 #if _BYTE_ORDER == _BIG_ENDIAN
267 	uint32_t  meta_data:12,
268 		  target_int_disable:1,
269 		  host_int_disable:1,
270 		  byte_swap:1,
271 		  gather:1,
272 		  nbytes:16;
273 #else
274 
275 	uint32_t nbytes:16,
276 		 gather:1,
277 		 byte_swap:1,
278 		 host_int_disable:1,
279 		 target_int_disable:1,
280 		 meta_data:12;
281 #endif
282 };
283 
284 struct CE_dest_desc {
285 	uint32_t buffer_addr;
286 #if _BYTE_ORDER == _BIG_ENDIAN
287 	uint32_t  meta_data:12,
288 		  target_int_disable:1,
289 		  host_int_disable:1,
290 		  byte_swap:1,
291 		  gather:1,
292 		  nbytes:16;
293 #else
294 	uint32_t nbytes:16,
295 		 gather:1,
296 		 byte_swap:1,
297 		 host_int_disable:1,
298 		 target_int_disable:1,
299 		 meta_data:12;
300 #endif
301 };
302 #endif /* QCA_WIFI_3_0 */
303 
304 struct ce_srng_src_desc {
305 	uint32_t buffer_addr_lo;
306 #if _BYTE_ORDER == _BIG_ENDIAN
307 	uint32_t nbytes:16,
308 		 rsvd:4,
309 		 gather:1,
310 		 dest_swap:1,
311 		 byte_swap:1,
312 		 toeplitz_hash_enable:1,
313 		 buffer_addr_hi:8;
314 	uint32_t rsvd1:16,
315 		 meta_data:16;
316 	uint32_t loop_count:4,
317 		 ring_id:8,
318 		 rsvd3:20;
319 #else
320 	uint32_t buffer_addr_hi:8,
321 		 toeplitz_hash_enable:1,
322 		 byte_swap:1,
323 		 dest_swap:1,
324 		 gather:1,
325 		 rsvd:4,
326 		 nbytes:16;
327 	uint32_t meta_data:16,
328 		 rsvd1:16;
329 	uint32_t rsvd3:20,
330 		 ring_id:8,
331 		 loop_count:4;
332 #endif
333 };
334 struct ce_srng_dest_desc {
335 	uint32_t buffer_addr_lo;
336 #if _BYTE_ORDER == _BIG_ENDIAN
337 	uint32_t loop_count:4,
338 		 ring_id:8,
339 		 rsvd1:12,
340 		 buffer_addr_hi:8;
341 #else
342 	uint32_t buffer_addr_hi:8,
343 		 rsvd1:12,
344 		 ring_id:8,
345 		 loop_count:4;
346 #endif
347 };
348 struct ce_srng_dest_status_desc {
349 #if _BYTE_ORDER == _BIG_ENDIAN
350 	uint32_t nbytes:16,
351 		 rsvd:4,
352 		 gather:1,
353 		 dest_swap:1,
354 		 byte_swap:1,
355 		 toeplitz_hash_enable:1,
356 		 rsvd0:8;
357 	uint32_t rsvd1:16,
358 		 meta_data:16;
359 #else
360 	uint32_t rsvd0:8,
361 		 toeplitz_hash_enable:1,
362 		 byte_swap:1,
363 		 dest_swap:1,
364 		 gather:1,
365 		 rsvd:4,
366 		 nbytes:16;
367 	uint32_t meta_data:16,
368 		 rsvd1:16;
369 #endif
370 	uint32_t toeplitz_hash;
371 #if _BYTE_ORDER == _BIG_ENDIAN
372 	uint32_t loop_count:4,
373 		 ring_id:8,
374 		 rsvd3:20;
375 #else
376 	uint32_t rsvd3:20,
377 		 ring_id:8,
378 		 loop_count:4;
379 #endif
380 };
381 
382 #define CE_SENDLIST_ITEMS_MAX 12
383 
384 /**
385  * union ce_desc - unified data type for ce descriptors
386  * @src_desc: source descriptor
387  * @dest_desc: destination descriptor
388  *
389  * Both src and destination descriptors follow the same format.
390  * They use different data structures for different access semantics.
391  * Here we provide a unifying data type.
392  */
393 union ce_desc {
394 	struct CE_src_desc src_desc;
395 	struct CE_dest_desc dest_desc;
396 };
397 
398 /**
399  * union ce_srng_desc - unified data type for ce srng descriptors
400  * @src_desc: ce srng Source ring descriptor
401  * @dest_desc: ce srng destination ring descriptor
402  * @dest_status_desc: ce srng status ring descriptor
403  */
404 union ce_srng_desc {
405 	struct ce_srng_src_desc src_desc;
406 	struct ce_srng_dest_desc dest_desc;
407 	struct ce_srng_dest_status_desc dest_status_desc;
408 };
409 
410 /**
411  * enum hif_ce_event_type - HIF copy engine event type
412  * @HIF_RX_DESC_POST: event recorded before updating write index of RX ring.
413  * @HIF_RX_DESC_COMPLETION: event recorded before updating sw index of RX ring.
414  * @HIF_TX_GATHER_DESC_POST: post gather desc. (no write index update)
415  * @HIF_TX_DESC_POST: event recorded before updating write index of TX ring.
416  * @HIF_TX_DESC_SOFTWARE_POST: event recorded when dropping a write to the write
417  *	index in a normal tx
418  * @HIF_TX_DESC_COMPLETION: event recorded before updating sw index of TX ring.
419  * @FAST_RX_WRITE_INDEX_UPDATE: event recorded before updating the write index
420  *	of the RX ring in fastpath
421  * @FAST_RX_SOFTWARE_INDEX_UPDATE: event recorded before updating the software
422  *	index of the RX ring in fastpath
423  * @FAST_TX_WRITE_INDEX_UPDATE: event recorded before updating the write index
424  *	of the TX ring in fastpath
425  * @FAST_TX_WRITE_INDEX_SOFTWARE_UPDATE: recorded when dropping a write to
426  *	the write index in fastpath
427  * @FAST_TX_SOFTWARE_INDEX_UPDATE: event recorded before updating the software
428  *	index of the RX ring in fastpath
429  * @RESUME_WRITE_INDEX_UPDATE:
430  * @HIF_IRQ_EVENT: event recorded in the irq before scheduling the bh
431  * @HIF_CE_TASKLET_ENTRY: records the start of the ce_tasklet
432  * @HIF_CE_TASKLET_RESCHEDULE: records the rescheduling of the wlan_tasklet
433  * @HIF_CE_TASKLET_EXIT: records the exit of the wlan tasklet without reschedule
434  * @HIF_CE_REAP_ENTRY: records when we process completion outside of a bh
435  * @HIF_CE_REAP_EXIT:  records when we process completion outside of a bh
436  * @NAPI_SCHEDULE: records when napi is scheduled from the irq context
437  * @NAPI_POLL_ENTER: records the start of the napi poll function
438  * @NAPI_COMPLETE: records when interrupts are re-enabled
439  * @NAPI_POLL_EXIT: records when the napi poll function returns
440  * @HIF_RX_NBUF_ALLOC_FAILURE: record the packet when nbuf fails to allocate
441  * @HIF_RX_NBUF_MAP_FAILURE: record the packet when dma map fails
442  * @HIF_RX_NBUF_ENQUEUE_FAILURE: record the packet when enqueue to ce fails
443  * @HIF_CE_SRC_RING_BUFFER_POST: record the packet when buffer is posted to ce src ring
444  * @HIF_CE_DEST_RING_BUFFER_POST: record the packet when buffer is posted to ce dst ring
445  * @HIF_CE_DEST_RING_BUFFER_REAP: record the packet when buffer is reaped from ce dst ring
446  * @HIF_CE_DEST_STATUS_RING_REAP: record the packet when status ring is reaped
447  * @HIF_RX_DESC_PRE_NBUF_ALLOC: record the packet before nbuf allocation
448  * @HIF_RX_DESC_PRE_NBUF_MAP: record the packet before nbuf map
449  * @HIF_RX_DESC_POST_NBUF_MAP: record the packet after nbuf map
450  * @HIF_EVENT_TYPE_MAX: max event
451  */
452 enum hif_ce_event_type {
453 	HIF_RX_DESC_POST,
454 	HIF_RX_DESC_COMPLETION,
455 	HIF_TX_GATHER_DESC_POST,
456 	HIF_TX_DESC_POST,
457 	HIF_TX_DESC_SOFTWARE_POST,
458 	HIF_TX_DESC_COMPLETION,
459 	FAST_RX_WRITE_INDEX_UPDATE,
460 	FAST_RX_SOFTWARE_INDEX_UPDATE,
461 	FAST_TX_WRITE_INDEX_UPDATE,
462 	FAST_TX_WRITE_INDEX_SOFTWARE_UPDATE,
463 	FAST_TX_SOFTWARE_INDEX_UPDATE,
464 	RESUME_WRITE_INDEX_UPDATE,
465 
466 	HIF_IRQ_EVENT = 0x10,
467 	HIF_CE_TASKLET_ENTRY,
468 	HIF_CE_TASKLET_RESCHEDULE,
469 	HIF_CE_TASKLET_EXIT,
470 	HIF_CE_REAP_ENTRY,
471 	HIF_CE_REAP_EXIT,
472 	NAPI_SCHEDULE,
473 	NAPI_POLL_ENTER,
474 	NAPI_COMPLETE,
475 	NAPI_POLL_EXIT,
476 
477 	HIF_RX_NBUF_ALLOC_FAILURE = 0x20,
478 	HIF_RX_NBUF_MAP_FAILURE,
479 	HIF_RX_NBUF_ENQUEUE_FAILURE,
480 
481 	HIF_CE_SRC_RING_BUFFER_POST,
482 	HIF_CE_DEST_RING_BUFFER_POST,
483 	HIF_CE_DEST_RING_BUFFER_REAP,
484 	HIF_CE_DEST_STATUS_RING_REAP,
485 
486 	HIF_RX_DESC_PRE_NBUF_ALLOC,
487 	HIF_RX_DESC_PRE_NBUF_MAP,
488 	HIF_RX_DESC_POST_NBUF_MAP,
489 
490 	HIF_EVENT_TYPE_MAX,
491 };
492 
493 void ce_init_ce_desc_event_log(struct hif_softc *scn, int ce_id, int size);
494 void ce_deinit_ce_desc_event_log(struct hif_softc *scn, int ce_id);
495 void hif_record_ce_desc_event(struct hif_softc *scn, int ce_id,
496 			      enum hif_ce_event_type type,
497 			      union ce_desc *descriptor, void *memory,
498 			      int index, int len);
499 
500 enum ce_sendlist_type_e {
501 	CE_SIMPLE_BUFFER_TYPE,
502 	/* TBDXXX: CE_RX_DESC_LIST, */
503 };
504 
505 /*
506  * There's a public "ce_sendlist" and a private "ce_sendlist_s".
507  * The former is an opaque structure with sufficient space
508  * to hold the latter.  The latter is the actual structure
509  * definition and it is only used internally.  The opaque version
510  * of the structure allows callers to allocate an instance on the
511  * run-time stack without knowing any of the details of the
512  * structure layout.
513  */
514 struct ce_sendlist_s {
515 	unsigned int num_items;
516 	struct ce_sendlist_item {
517 		enum ce_sendlist_type_e send_type;
518 		dma_addr_t data;        /* e.g. buffer or desc list */
519 		union {
520 			unsigned int nbytes;    /* simple buffer */
521 			unsigned int ndesc;     /* Rx descriptor list */
522 		} u;
523 		/* flags: externally-specified flags;
524 		 * OR-ed with internal flags
525 		 */
526 		uint32_t flags;
527 		uint32_t user_flags;
528 	} item[CE_SENDLIST_ITEMS_MAX];
529 };
530 
531 bool hif_ce_service_should_yield(struct hif_softc *scn, struct CE_state
532 				 *ce_state);
533 
534 #ifdef WLAN_FEATURE_FASTPATH
535 void ce_h2t_tx_ce_cleanup(struct CE_handle *ce_hdl);
536 void ce_t2h_msg_ce_cleanup(struct CE_handle *ce_hdl);
537 #else
538 static inline void ce_h2t_tx_ce_cleanup(struct CE_handle *ce_hdl)
539 {
540 }
541 
542 static inline void ce_t2h_msg_ce_cleanup(struct CE_handle *ce_hdl)
543 {
544 }
545 #endif
546 
547 /* which ring of a CE? */
548 #define CE_RING_SRC  0
549 #define CE_RING_DEST 1
550 #define CE_RING_STATUS 2
551 
552 #define CDC_WAR_MAGIC_STR   0xceef0000
553 #define CDC_WAR_DATA_CE     4
554 
555 /* Additional internal-only ce_send flags */
556 #define CE_SEND_FLAG_GATHER             0x00010000      /* Use Gather */
557 
558 /**
559  * hif_get_wake_ce_id() - gets the copy engine id used for waking up
560  * @scn: The hif context to use
561  * @ce_id: a pointer where the copy engine Id should be populated
562  *
563  * Return: errno
564  */
565 int hif_get_wake_ce_id(struct hif_softc *scn, uint8_t *ce_id);
566 
567 /**
568  * hif_get_fw_diag_ce_id() - gets the copy engine id used for FW diag
569  * @scn: The hif context to use
570  * @ce_id: a pointer where the copy engine Id should be populated
571  *
572  * Return: errno
573  */
574 int hif_get_fw_diag_ce_id(struct hif_softc *scn, uint8_t *ce_id);
575 
576 #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF)
577 
578 #ifndef HIF_CE_HISTORY_MAX
579 #define HIF_CE_HISTORY_MAX 1024
580 #endif
581 
582 #define CE_DEBUG_MAX_DATA_BUF_SIZE 64
583 
584 /**
585  * struct hif_ce_desc_event - structure for detailing a ce event
586  * @index: location of the descriptor in the ce ring;
587  * @type: what the event was
588  * @time: when it happened
589  * @cpu_id:
590  * @current_hp: holds the current ring hp value
591  * @current_tp: holds the current ring tp value
592  * @descriptor: descriptor enqueued or dequeued
593  * @memory: virtual address that was used
594  * @dma_addr: physical/iova address based on smmu status
595  * @dma_to_phy: physical address from iova address
596  * @virt_to_phy: physical address from virtual address
597  * @actual_data_len: length of the data
598  * @data: data pointed by descriptor
599  */
600 struct hif_ce_desc_event {
601 	int index;
602 	enum hif_ce_event_type type;
603 	uint64_t time;
604 	int cpu_id;
605 #ifdef HELIUMPLUS
606 	union ce_desc descriptor;
607 #else
608 	uint32_t current_hp;
609 	uint32_t current_tp;
610 	union ce_srng_desc descriptor;
611 #endif
612 	void *memory;
613 
614 #ifdef HIF_RECORD_PADDR
615 	/* iova/pa based on smmu status */
616 	qdf_dma_addr_t dma_addr;
617 	/* store pa from iova address */
618 	qdf_dma_addr_t dma_to_phy;
619 	/* store pa */
620 	qdf_dma_addr_t virt_to_phy;
621 #endif /* HIF_RECORD_ADDR */
622 
623 #ifdef HIF_CE_DEBUG_DATA_BUF
624 	size_t actual_data_len;
625 	uint8_t *data;
626 #endif /* HIF_CE_DEBUG_DATA_BUF */
627 };
628 #else
629 struct hif_ce_desc_event;
630 #endif /*#if defined(HIF_CONFIG_SLUB_DEBUG_ON)||defined(HIF_CE_DEBUG_DATA_BUF)*/
631 
632 /**
633  * get_next_record_index() - get the next record index
634  * @table_index: atomic index variable to increment
635  * @array_size: array size of the circular buffer
636  *
637  * Increment the atomic index and reserve the value.
638  * Takes care of buffer wrap.
639  * Guaranteed to be thread safe as long as fewer than array_size contexts
640  * try to access the array.  If there are more than array_size contexts
641  * trying to access the array, full locking of the recording process would
642  * be needed to have sane logging.
643  */
644 int get_next_record_index(qdf_atomic_t *table_index, int array_size);
645 
646 #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF)
647 /**
648  * hif_record_ce_srng_desc_event() - Record data pointed by the CE descriptor
649  * @scn: structure detailing a ce event
650  * @ce_id: length of the data
651  * @type: event_type
652  * @descriptor: ce src/dest/status ring descriptor
653  * @memory: nbuf
654  * @index: current sw/write index
655  * @len: len of the buffer
656  * @hal_ring: ce hw ring
657  *
658  * Return: None
659  */
660 void hif_record_ce_srng_desc_event(struct hif_softc *scn, int ce_id,
661 				   enum hif_ce_event_type type,
662 				   union ce_srng_desc *descriptor,
663 				   void *memory, int index,
664 				   int len, void *hal_ring);
665 
666 /**
667  * hif_clear_ce_desc_debug_data() - Clear the contents of hif_ce_desc_event
668  * upto data field before reusing it.
669  *
670  * @event: record every CE event
671  *
672  * Return: None
673  */
674 void hif_clear_ce_desc_debug_data(struct hif_ce_desc_event *event);
675 #else
676 static inline
677 void hif_record_ce_srng_desc_event(struct hif_softc *scn, int ce_id,
678 				   enum hif_ce_event_type type,
679 				   union ce_srng_desc *descriptor,
680 				   void *memory, int index,
681 				   int len, void *hal_ring)
682 {
683 }
684 
685 static inline
686 void hif_clear_ce_desc_debug_data(struct hif_ce_desc_event *event)
687 {
688 }
689 #endif /* HIF_CONFIG_SLUB_DEBUG_ON || HIF_CE_DEBUG_DATA_BUF */
690 
691 #ifdef HIF_CE_DEBUG_DATA_BUF
692 /**
693  * hif_ce_desc_data_record() - Record data pointed by the CE descriptor
694  * @event: structure detailing a ce event
695  * @len: length of the data
696  * Return:
697  */
698 void hif_ce_desc_data_record(struct hif_ce_desc_event *event, int len);
699 
700 QDF_STATUS alloc_mem_ce_debug_hist_data(struct hif_softc *scn, uint32_t ce_id);
701 void free_mem_ce_debug_hist_data(struct hif_softc *scn, uint32_t ce_id);
702 #else
703 static inline
704 QDF_STATUS alloc_mem_ce_debug_hist_data(struct hif_softc *scn, uint32_t ce_id)
705 {
706 	return QDF_STATUS_SUCCESS;
707 }
708 
709 static inline
710 void free_mem_ce_debug_hist_data(struct hif_softc *scn, uint32_t ce_id) { }
711 
712 static inline
713 void hif_ce_desc_data_record(struct hif_ce_desc_event *event, int len)
714 {
715 }
716 #endif /*HIF_CE_DEBUG_DATA_BUF*/
717 
718 #ifdef HIF_CONFIG_SLUB_DEBUG_ON
719 /**
720  * ce_validate_nbytes() - validate nbytes for slub builds on tx descriptors
721  * @nbytes: nbytes value being written into a send descriptor
722  * @ce_state: context of the copy engine
723  *
724  * nbytes should be non-zero and less than max configured for the copy engine
725  *
726  * Return: none
727  */
728 static inline void ce_validate_nbytes(uint32_t nbytes,
729 				      struct CE_state *ce_state)
730 {
731 	if (nbytes <= 0 || nbytes > ce_state->src_sz_max)
732 		QDF_BUG(0);
733 }
734 #else
735 static inline void ce_validate_nbytes(uint32_t nbytes,
736 				      struct CE_state *ce_state)
737 {
738 }
739 #endif /* HIF_CONFIG_SLUB_DEBUG_ON */
740 
741 #if defined(HIF_RECORD_PADDR)
742 /**
743  * hif_ce_desc_record_rx_paddr() - record physical address for IOMMU
744  * IOVA addr and MMU virtual addr for Rx
745  * @scn: hif_softc
746  * @event: event details
747  * @nbuf: buffer posted to fw
748  *
749  * record physical address for ce_event_type HIF_RX_DESC_POST and
750  * HIF_RX_DESC_COMPLETION
751  *
752  * Return: none
753  */
754 void hif_ce_desc_record_rx_paddr(struct hif_softc *scn,
755 				 struct hif_ce_desc_event *event,
756 				 qdf_nbuf_t nbuf);
757 #else
758 static inline
759 void hif_ce_desc_record_rx_paddr(struct hif_softc *scn,
760 				 struct hif_ce_desc_event *event,
761 				 qdf_nbuf_t nbuf)
762 {
763 }
764 #endif /* HIF_RECORD_PADDR */
765 #endif /* __COPY_ENGINE_INTERNAL_H__ */
766