xref: /wlan-dirver/qca-wifi-host-cmn/hif/src/ce/ce_internal.h (revision d0c05845839e5f2ba5a8dcebe0cd3e4cd4e8dfcf)
1 /*
2  * Copyright (c) 2013-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #ifndef __COPY_ENGINE_INTERNAL_H__
21 #define __COPY_ENGINE_INTERNAL_H__
22 
23 #include <hif.h>                /* A_TARGET_WRITE */
24 
25 /* Copy Engine operational state */
26 enum CE_op_state {
27 	CE_UNUSED,
28 	CE_PAUSED,
29 	CE_RUNNING,
30 	CE_PENDING,
31 };
32 
33 enum ol_ath_hif_ce_ecodes {
34 	CE_RING_DELTA_FAIL = 0
35 };
36 
37 struct CE_src_desc;
38 
39 /* Copy Engine Ring internal state */
40 struct CE_ring_state {
41 
42 	/* Number of entries in this ring; must be power of 2 */
43 	unsigned int nentries;
44 	unsigned int nentries_mask;
45 
46 	/*
47 	 * For dest ring, this is the next index to be processed
48 	 * by software after it was/is received into.
49 	 *
50 	 * For src ring, this is the last descriptor that was sent
51 	 * and completion processed by software.
52 	 *
53 	 * Regardless of src or dest ring, this is an invariant
54 	 * (modulo ring size):
55 	 *     write index >= read index >= sw_index
56 	 */
57 	unsigned int sw_index;
58 	unsigned int write_index;       /* cached copy */
59 	/*
60 	 * For src ring, this is the next index not yet processed by HW.
61 	 * This is a cached copy of the real HW index (read index), used
62 	 * for avoiding reading the HW index register more often than
63 	 * necessary.
64 	 * This extends the invariant:
65 	 *     write index >= read index >= hw_index >= sw_index
66 	 *
67 	 * For dest ring, this is currently unused.
68 	 */
69 	unsigned int hw_index;  /* cached copy */
70 
71 	/* Start of DMA-coherent area reserved for descriptors */
72 	void *base_addr_owner_space_unaligned;  /* Host address space */
73 	qdf_dma_addr_t base_addr_CE_space_unaligned; /* CE address space */
74 
75 	/*
76 	 * Actual start of descriptors.
77 	 * Aligned to descriptor-size boundary.
78 	 * Points into reserved DMA-coherent area, above.
79 	 */
80 	void *base_addr_owner_space;    /* Host address space */
81 	qdf_dma_addr_t base_addr_CE_space;   /* CE address space */
82 	/*
83 	 * Start of shadow copy of descriptors, within regular memory.
84 	 * Aligned to descriptor-size boundary.
85 	 */
86 	char *shadow_base_unaligned;
87 	struct CE_src_desc *shadow_base;
88 
89 	unsigned int low_water_mark_nentries;
90 	unsigned int high_water_mark_nentries;
91 	void *srng_ctx;
92 	void **per_transfer_context;
93 
94 	/* HAL CE ring type */
95 	uint32_t hal_ring_type;
96 	/* ring memory prealloc */
97 	uint8_t is_ring_prealloc;
98 
99 	OS_DMA_MEM_CONTEXT(ce_dmacontext); /* OS Specific DMA context */
100 };
101 
102 /* Copy Engine internal state */
103 struct CE_state {
104 	struct hif_softc *scn;
105 	unsigned int id;
106 	unsigned int attr_flags;  /* CE_ATTR_* */
107 	uint32_t ctrl_addr;       /* relative to BAR */
108 	enum CE_op_state state;
109 
110 #ifdef WLAN_FEATURE_FASTPATH
111 	fastpath_msg_handler fastpath_handler;
112 	void *context;
113 #endif /* WLAN_FEATURE_FASTPATH */
114 	qdf_work_t oom_allocation_work;
115 
116 	ce_send_cb send_cb;
117 	void *send_context;
118 
119 	CE_recv_cb recv_cb;
120 	void *recv_context;
121 
122 	/* misc_cbs - are any callbacks besides send and recv enabled? */
123 	uint8_t misc_cbs;
124 
125 	CE_watermark_cb watermark_cb;
126 	void *wm_context;
127 
128 	/*Record the state of the copy compl interrupt */
129 	int disable_copy_compl_intr;
130 
131 	unsigned int src_sz_max;
132 	struct CE_ring_state *src_ring;
133 	struct CE_ring_state *dest_ring;
134 	struct CE_ring_state *status_ring;
135 	atomic_t rx_pending;
136 
137 	qdf_spinlock_t ce_index_lock;
138 	/* Flag to indicate whether to break out the DPC context */
139 	bool force_break;
140 
141 	/* time in nanoseconds to yield control of napi poll */
142 	unsigned long long ce_service_yield_time;
143 	/* CE service start time in nanoseconds */
144 	unsigned long long ce_service_start_time;
145 	/* Num Of Receive Buffers handled for one interrupt DPC routine */
146 	unsigned int receive_count;
147 	/* epping */
148 	bool timer_inited;
149 	qdf_timer_t poll_timer;
150 
151 	/* datapath - for faster access, use bools instead of a bitmap */
152 	bool htt_tx_data;
153 	bool htt_rx_data;
154 	qdf_lro_ctx_t lro_data;
155 
156 	void (*service)(struct hif_softc *scn, int CE_id);
157 #ifdef WLAN_TRACEPOINTS
158 	/* CE tasklet sched time in nanoseconds */
159 	unsigned long long ce_tasklet_sched_time;
160 #endif
161 };
162 
163 /* Descriptor rings must be aligned to this boundary */
164 #define CE_DESC_RING_ALIGN 8
165 #define CLOCK_OVERRIDE 0x2
166 
167 #ifdef QCA_WIFI_3_0
168 #define HIF_CE_DESC_ADDR_TO_DMA(desc) \
169 	(qdf_dma_addr_t)(((uint64_t)(desc)->buffer_addr + \
170 	((uint64_t)((desc)->buffer_addr_hi & 0x1F) << 32)))
171 #else
172 #define HIF_CE_DESC_ADDR_TO_DMA(desc) \
173 	(qdf_dma_addr_t)((desc)->buffer_addr)
174 #endif
175 
176 #ifdef QCA_WIFI_3_0
177 struct CE_src_desc {
178 	uint32_t buffer_addr:32;
179 #if _BYTE_ORDER == _BIG_ENDIAN
180 	uint32_t gather:1,
181 		enable_11h:1,
182 		meta_data_low:2, /* fw_metadata_low */
183 		packet_result_offset:12,
184 		toeplitz_hash_enable:1,
185 		addr_y_search_disable:1,
186 		addr_x_search_disable:1,
187 		misc_int_disable:1,
188 		target_int_disable:1,
189 		host_int_disable:1,
190 		dest_byte_swap:1,
191 		byte_swap:1,
192 		type:2,
193 		tx_classify:1,
194 		buffer_addr_hi:5;
195 		uint32_t meta_data:16, /* fw_metadata_high */
196 		nbytes:16;       /* length in register map */
197 #else
198 	uint32_t buffer_addr_hi:5,
199 		tx_classify:1,
200 		type:2,
201 		byte_swap:1,          /* src_byte_swap */
202 		dest_byte_swap:1,
203 		host_int_disable:1,
204 		target_int_disable:1,
205 		misc_int_disable:1,
206 		addr_x_search_disable:1,
207 		addr_y_search_disable:1,
208 		toeplitz_hash_enable:1,
209 		packet_result_offset:12,
210 		meta_data_low:2, /* fw_metadata_low */
211 		enable_11h:1,
212 		gather:1;
213 		uint32_t nbytes:16, /* length in register map */
214 		meta_data:16; /* fw_metadata_high */
215 #endif
216 	uint32_t toeplitz_hash_result:32;
217 };
218 
219 struct CE_dest_desc {
220 	uint32_t buffer_addr:32;
221 #if _BYTE_ORDER == _BIG_ENDIAN
222 	uint32_t gather:1,
223 		enable_11h:1,
224 		meta_data_low:2, /* fw_metadata_low */
225 		packet_result_offset:12,
226 		toeplitz_hash_enable:1,
227 		addr_y_search_disable:1,
228 		addr_x_search_disable:1,
229 		misc_int_disable:1,
230 		target_int_disable:1,
231 		host_int_disable:1,
232 		byte_swap:1,
233 		src_byte_swap:1,
234 		type:2,
235 		tx_classify:1,
236 		buffer_addr_hi:5;
237 		uint32_t meta_data:16, /* fw_metadata_high */
238 		nbytes:16;          /* length in register map */
239 #else
240 	uint32_t buffer_addr_hi:5,
241 		tx_classify:1,
242 		type:2,
243 		src_byte_swap:1,
244 		byte_swap:1,         /* dest_byte_swap */
245 		host_int_disable:1,
246 		target_int_disable:1,
247 		misc_int_disable:1,
248 		addr_x_search_disable:1,
249 		addr_y_search_disable:1,
250 		toeplitz_hash_enable:1,
251 		packet_result_offset:12,
252 		meta_data_low:2, /* fw_metadata_low */
253 		enable_11h:1,
254 		gather:1;
255 		uint32_t nbytes:16, /* length in register map */
256 		meta_data:16;    /* fw_metadata_high */
257 #endif
258 	uint32_t toeplitz_hash_result:32;
259 };
260 #else
261 struct CE_src_desc {
262 	uint32_t buffer_addr;
263 #if _BYTE_ORDER == _BIG_ENDIAN
264 	uint32_t  meta_data:12,
265 		  target_int_disable:1,
266 		  host_int_disable:1,
267 		  byte_swap:1,
268 		  gather:1,
269 		  nbytes:16;
270 #else
271 
272 	uint32_t nbytes:16,
273 		 gather:1,
274 		 byte_swap:1,
275 		 host_int_disable:1,
276 		 target_int_disable:1,
277 		 meta_data:12;
278 #endif
279 };
280 
281 struct CE_dest_desc {
282 	uint32_t buffer_addr;
283 #if _BYTE_ORDER == _BIG_ENDIAN
284 	uint32_t  meta_data:12,
285 		  target_int_disable:1,
286 		  host_int_disable:1,
287 		  byte_swap:1,
288 		  gather:1,
289 		  nbytes:16;
290 #else
291 	uint32_t nbytes:16,
292 		 gather:1,
293 		 byte_swap:1,
294 		 host_int_disable:1,
295 		 target_int_disable:1,
296 		 meta_data:12;
297 #endif
298 };
299 #endif /* QCA_WIFI_3_0 */
300 
301 struct ce_srng_src_desc {
302 	uint32_t buffer_addr_lo;
303 #if _BYTE_ORDER == _BIG_ENDIAN
304 	uint32_t nbytes:16,
305 		 rsvd:4,
306 		 gather:1,
307 		 dest_swap:1,
308 		 byte_swap:1,
309 		 toeplitz_hash_enable:1,
310 		 buffer_addr_hi:8;
311 	uint32_t rsvd1:16,
312 		 meta_data:16;
313 	uint32_t loop_count:4,
314 		 ring_id:8,
315 		 rsvd3:20;
316 #else
317 	uint32_t buffer_addr_hi:8,
318 		 toeplitz_hash_enable:1,
319 		 byte_swap:1,
320 		 dest_swap:1,
321 		 gather:1,
322 		 rsvd:4,
323 		 nbytes:16;
324 	uint32_t meta_data:16,
325 		 rsvd1:16;
326 	uint32_t rsvd3:20,
327 		 ring_id:8,
328 		 loop_count:4;
329 #endif
330 };
331 struct ce_srng_dest_desc {
332 	uint32_t buffer_addr_lo;
333 #if _BYTE_ORDER == _BIG_ENDIAN
334 	uint32_t loop_count:4,
335 		 ring_id:8,
336 		 rsvd1:12,
337 		 buffer_addr_hi:8;
338 #else
339 	uint32_t buffer_addr_hi:8,
340 		 rsvd1:12,
341 		 ring_id:8,
342 		 loop_count:4;
343 #endif
344 };
345 struct ce_srng_dest_status_desc {
346 #if _BYTE_ORDER == _BIG_ENDIAN
347 	uint32_t nbytes:16,
348 		 rsvd:4,
349 		 gather:1,
350 		 dest_swap:1,
351 		 byte_swap:1,
352 		 toeplitz_hash_enable:1,
353 		 rsvd0:8;
354 	uint32_t rsvd1:16,
355 		 meta_data:16;
356 #else
357 	uint32_t rsvd0:8,
358 		 toeplitz_hash_enable:1,
359 		 byte_swap:1,
360 		 dest_swap:1,
361 		 gather:1,
362 		 rsvd:4,
363 		 nbytes:16;
364 	uint32_t meta_data:16,
365 		 rsvd1:16;
366 #endif
367 	uint32_t toeplitz_hash;
368 #if _BYTE_ORDER == _BIG_ENDIAN
369 	uint32_t loop_count:4,
370 		 ring_id:8,
371 		 rsvd3:20;
372 #else
373 	uint32_t rsvd3:20,
374 		 ring_id:8,
375 		 loop_count:4;
376 #endif
377 };
378 
379 #define CE_SENDLIST_ITEMS_MAX 12
380 
381 /**
382  * union ce_desc - unified data type for ce descriptors
383  *
384  * Both src and destination descriptors follow the same format.
385  * They use different data structures for different access symantics.
386  * Here we provice a unifying data type.
387  */
388 union ce_desc {
389 	struct CE_src_desc src_desc;
390 	struct CE_dest_desc dest_desc;
391 };
392 
393 /**
394  * union ce_srng_desc - unified data type for ce srng descriptors
395  * @src_desc: ce srng Source ring descriptor
396  * @dest_desc: ce srng destination ring descriptor
397  * @dest_status_desc: ce srng status ring descriptor
398  */
399 union ce_srng_desc {
400 	struct ce_srng_src_desc src_desc;
401 	struct ce_srng_dest_desc dest_desc;
402 	struct ce_srng_dest_status_desc dest_status_desc;
403 };
404 
405 /**
406  * enum hif_ce_event_type - HIF copy engine event type
407  * @HIF_RX_DESC_POST: event recorded before updating write index of RX ring.
408  * @HIF_RX_DESC_COMPLETION: event recorded before updating sw index of RX ring.
409  * @HIF_TX_GATHER_DESC_POST: post gather desc. (no write index update)
410  * @HIF_TX_DESC_POST: event recorded before updating write index of TX ring.
411  * @HIF_TX_DESC_SOFTWARE_POST: event recorded when dropping a write to the write
412  *	index in a normal tx
413  * @HIF_TX_DESC_COMPLETION: event recorded before updating sw index of TX ring.
414  * @FAST_RX_WRITE_INDEX_UPDATE: event recorded before updating the write index
415  *	of the RX ring in fastpath
416  * @FAST_RX_SOFTWARE_INDEX_UPDATE: event recorded before updating the software
417  *	index of the RX ring in fastpath
418  * @FAST_TX_WRITE_INDEX_UPDATE: event recorded before updating the write index
419  *	of the TX ring in fastpath
420  * @FAST_TX_WRITE_INDEX_SOFTWARE_UPDATE: recored when dropping a write to
421  *	the write index in fastpath
422  * @FAST_TX_SOFTWARE_INDEX_UPDATE: event recorded before updating the software
423  *	index of the RX ring in fastpath
424  * @HIF_IRQ_EVENT: event recorded in the irq before scheduling the bh
425  * @HIF_CE_TASKLET_ENTRY: records the start of the ce_tasklet
426  * @HIF_CE_TASKLET_RESCHEDULE: records the rescheduling of the wlan_tasklet
427  * @HIF_CE_TASKLET_EXIT: records the exit of the wlan tasklet without reschedule
428  * @HIF_CE_REAP_ENTRY: records when we process completion outside of a bh
429  * @HIF_CE_REAP_EXIT:  records when we process completion outside of a bh
430  * @NAPI_SCHEDULE: records when napi is scheduled from the irq context
431  * @NAPI_POLL_ENTER: records the start of the napi poll function
432  * @NAPI_COMPLETE: records when interrupts are reenabled
433  * @NAPI_POLL_EXIT: records when the napi poll function returns
434  * @HIF_RX_NBUF_ALLOC_FAILURE: record the packet when nbuf fails to allocate
435  * @HIF_RX_NBUF_MAP_FAILURE: record the packet when dma map fails
436  * @HIF_RX_NBUF_ENQUEUE_FAILURE: record the packet when enqueue to ce fails
437  * @HIF_CE_SRC_RING_BUFFER_POST: record the packet when buffer is posted to ce src ring
438  * @HIF_CE_DEST_RING_BUFFER_POST: record the packet when buffer is posted to ce dst ring
439  * @HIF_CE_DEST_RING_BUFFER_REAP: record the packet when buffer is reaped from ce dst ring
440  * @HIF_CE_DEST_STATUS_RING_REAP: record the packet when status ring is reaped
441  * @HIF_RX_DESC_PRE_NBUF_ALLOC: record the packet before nbuf allocation
442  * @HIF_RX_DESC_PRE_NBUF_MAP: record the packet before nbuf map
443  * @HIF_RX_DESC_POST_NBUF_MAP: record the packet after nbuf map
444  */
445 enum hif_ce_event_type {
446 	HIF_RX_DESC_POST,
447 	HIF_RX_DESC_COMPLETION,
448 	HIF_TX_GATHER_DESC_POST,
449 	HIF_TX_DESC_POST,
450 	HIF_TX_DESC_SOFTWARE_POST,
451 	HIF_TX_DESC_COMPLETION,
452 	FAST_RX_WRITE_INDEX_UPDATE,
453 	FAST_RX_SOFTWARE_INDEX_UPDATE,
454 	FAST_TX_WRITE_INDEX_UPDATE,
455 	FAST_TX_WRITE_INDEX_SOFTWARE_UPDATE,
456 	FAST_TX_SOFTWARE_INDEX_UPDATE,
457 	RESUME_WRITE_INDEX_UPDATE,
458 
459 	HIF_IRQ_EVENT = 0x10,
460 	HIF_CE_TASKLET_ENTRY,
461 	HIF_CE_TASKLET_RESCHEDULE,
462 	HIF_CE_TASKLET_EXIT,
463 	HIF_CE_REAP_ENTRY,
464 	HIF_CE_REAP_EXIT,
465 	NAPI_SCHEDULE,
466 	NAPI_POLL_ENTER,
467 	NAPI_COMPLETE,
468 	NAPI_POLL_EXIT,
469 
470 	HIF_RX_NBUF_ALLOC_FAILURE = 0x20,
471 	HIF_RX_NBUF_MAP_FAILURE,
472 	HIF_RX_NBUF_ENQUEUE_FAILURE,
473 
474 	HIF_CE_SRC_RING_BUFFER_POST,
475 	HIF_CE_DEST_RING_BUFFER_POST,
476 	HIF_CE_DEST_RING_BUFFER_REAP,
477 	HIF_CE_DEST_STATUS_RING_REAP,
478 
479 	HIF_RX_DESC_PRE_NBUF_ALLOC,
480 	HIF_RX_DESC_PRE_NBUF_MAP,
481 	HIF_RX_DESC_POST_NBUF_MAP,
482 
483 	HIF_EVENT_TYPE_MAX,
484 };
485 
486 void ce_init_ce_desc_event_log(struct hif_softc *scn, int ce_id, int size);
487 void ce_deinit_ce_desc_event_log(struct hif_softc *scn, int ce_id);
488 void hif_record_ce_desc_event(struct hif_softc *scn, int ce_id,
489 			      enum hif_ce_event_type type,
490 			      union ce_desc *descriptor, void *memory,
491 			      int index, int len);
492 
493 enum ce_sendlist_type_e {
494 	CE_SIMPLE_BUFFER_TYPE,
495 	/* TBDXXX: CE_RX_DESC_LIST, */
496 };
497 
498 /*
499  * There's a public "ce_sendlist" and a private "ce_sendlist_s".
500  * The former is an opaque structure with sufficient space
501  * to hold the latter.  The latter is the actual structure
502  * definition and it is only used internally.  The opaque version
503  * of the structure allows callers to allocate an instance on the
504  * run-time stack without knowing any of the details of the
505  * structure layout.
506  */
507 struct ce_sendlist_s {
508 	unsigned int num_items;
509 	struct ce_sendlist_item {
510 		enum ce_sendlist_type_e send_type;
511 		dma_addr_t data;        /* e.g. buffer or desc list */
512 		union {
513 			unsigned int nbytes;    /* simple buffer */
514 			unsigned int ndesc;     /* Rx descriptor list */
515 		} u;
516 		/* flags: externally-specified flags;
517 		 * OR-ed with internal flags
518 		 */
519 		uint32_t flags;
520 		uint32_t user_flags;
521 	} item[CE_SENDLIST_ITEMS_MAX];
522 };
523 
524 bool hif_ce_service_should_yield(struct hif_softc *scn, struct CE_state
525 				 *ce_state);
526 
527 #ifdef WLAN_FEATURE_FASTPATH
528 void ce_h2t_tx_ce_cleanup(struct CE_handle *ce_hdl);
529 void ce_t2h_msg_ce_cleanup(struct CE_handle *ce_hdl);
530 #else
531 static inline void ce_h2t_tx_ce_cleanup(struct CE_handle *ce_hdl)
532 {
533 }
534 
535 static inline void ce_t2h_msg_ce_cleanup(struct CE_handle *ce_hdl)
536 {
537 }
538 #endif
539 
540 /* which ring of a CE? */
541 #define CE_RING_SRC  0
542 #define CE_RING_DEST 1
543 #define CE_RING_STATUS 2
544 
545 #define CDC_WAR_MAGIC_STR   0xceef0000
546 #define CDC_WAR_DATA_CE     4
547 
548 /* Additional internal-only ce_send flags */
549 #define CE_SEND_FLAG_GATHER             0x00010000      /* Use Gather */
550 
551 /**
552  * hif_get_wake_ce_id() - gets the copy engine id used for waking up
553  * @scn: The hif context to use
554  * @ce_id: a pointer where the copy engine Id should be populated
555  *
556  * Return: errno
557  */
558 int hif_get_wake_ce_id(struct hif_softc *scn, uint8_t *ce_id);
559 
560 /**
561  * hif_get_fw_diag_ce_id() - gets the copy engine id used for FW diag
562  * @scn: The hif context to use
563  * @ce_id: a pointer where the copy engine Id should be populated
564  *
565  * Return: errno
566  */
567 int hif_get_fw_diag_ce_id(struct hif_softc *scn, uint8_t *ce_id);
568 
569 #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF)
570 
571 #ifndef HIF_CE_HISTORY_MAX
572 #define HIF_CE_HISTORY_MAX 1024
573 #endif
574 
575 #define CE_DEBUG_MAX_DATA_BUF_SIZE 64
576 
577 /**
578  * struct hif_ce_desc_event - structure for detailing a ce event
579  * @index: location of the descriptor in the ce ring;
580  * @type: what the event was
581  * @time: when it happened
582  * @current_hp: holds the current ring hp value
583  * @current_tp: holds the current ring tp value
584  * @descriptor: descriptor enqueued or dequeued
585  * @memory: virtual address that was used
586  * @dma_addr: physical/iova address based on smmu status
587  * @dma_to_phy: physical address from iova address
588  * @virt_to_phy: physical address from virtual address
589  * @actual_data_len: length of the data
590  * @data: data pointed by descriptor
591  */
592 struct hif_ce_desc_event {
593 	int index;
594 	enum hif_ce_event_type type;
595 	uint64_t time;
596 	int cpu_id;
597 #ifdef HELIUMPLUS
598 	union ce_desc descriptor;
599 #else
600 	uint32_t current_hp;
601 	uint32_t current_tp;
602 	union ce_srng_desc descriptor;
603 #endif
604 	void *memory;
605 
606 #ifdef HIF_RECORD_PADDR
607 	/* iova/pa based on smmu status */
608 	qdf_dma_addr_t dma_addr;
609 	/* store pa from iova address */
610 	qdf_dma_addr_t dma_to_phy;
611 	/* store pa */
612 	qdf_dma_addr_t virt_to_phy;
613 #endif /* HIF_RECORD_ADDR */
614 
615 #ifdef HIF_CE_DEBUG_DATA_BUF
616 	size_t actual_data_len;
617 	uint8_t *data;
618 #endif /* HIF_CE_DEBUG_DATA_BUF */
619 };
620 #else
621 struct hif_ce_desc_event;
622 #endif /*#if defined(HIF_CONFIG_SLUB_DEBUG_ON)||defined(HIF_CE_DEBUG_DATA_BUF)*/
623 
624 /**
625  * get_next_record_index() - get the next record index
626  * @table_index: atomic index variable to increment
627  * @array_size: array size of the circular buffer
628  *
629  * Increment the atomic index and reserve the value.
630  * Takes care of buffer wrap.
631  * Guaranteed to be thread safe as long as fewer than array_size contexts
632  * try to access the array.  If there are more than array_size contexts
633  * trying to access the array, full locking of the recording process would
634  * be needed to have sane logging.
635  */
636 int get_next_record_index(qdf_atomic_t *table_index, int array_size);
637 
638 #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF)
639 /**
640  * hif_record_ce_srng_desc_event() - Record data pointed by the CE descriptor
641  * @scn: structure detailing a ce event
642  * @ce_id: length of the data
643  * @type: event_type
644  * @descriptor: ce src/dest/status ring descriptor
645  * @memory: nbuf
646  * @index: current sw/write index
647  * @len: len of the buffer
648  * @hal_ring: ce hw ring
649  *
650  * Return: None
651  */
652 void hif_record_ce_srng_desc_event(struct hif_softc *scn, int ce_id,
653 				   enum hif_ce_event_type type,
654 				   union ce_srng_desc *descriptor,
655 				   void *memory, int index,
656 				   int len, void *hal_ring);
657 
658 /**
659  * hif_clear_ce_desc_debug_data() - Clear the contents of hif_ce_desc_event
660  * upto data field before reusing it.
661  *
662  * @event: record every CE event
663  *
664  * Return: None
665  */
666 void hif_clear_ce_desc_debug_data(struct hif_ce_desc_event *event);
667 #else
668 static inline
669 void hif_record_ce_srng_desc_event(struct hif_softc *scn, int ce_id,
670 				   enum hif_ce_event_type type,
671 				   union ce_srng_desc *descriptor,
672 				   void *memory, int index,
673 				   int len, void *hal_ring)
674 {
675 }
676 
677 static inline
678 void hif_clear_ce_desc_debug_data(struct hif_ce_desc_event *event)
679 {
680 }
681 #endif /* HIF_CONFIG_SLUB_DEBUG_ON || HIF_CE_DEBUG_DATA_BUF */
682 
683 #ifdef HIF_CE_DEBUG_DATA_BUF
684 /**
685  * hif_ce_desc_data_record() - Record data pointed by the CE descriptor
686  * @event: structure detailing a ce event
687  * @len: length of the data
688  * Return:
689  */
690 void hif_ce_desc_data_record(struct hif_ce_desc_event *event, int len);
691 
692 QDF_STATUS alloc_mem_ce_debug_hist_data(struct hif_softc *scn, uint32_t ce_id);
693 void free_mem_ce_debug_hist_data(struct hif_softc *scn, uint32_t ce_id);
694 #else
695 static inline
696 QDF_STATUS alloc_mem_ce_debug_hist_data(struct hif_softc *scn, uint32_t ce_id)
697 {
698 	return QDF_STATUS_SUCCESS;
699 }
700 
701 static inline
702 void free_mem_ce_debug_hist_data(struct hif_softc *scn, uint32_t ce_id) { }
703 
704 static inline
705 void hif_ce_desc_data_record(struct hif_ce_desc_event *event, int len)
706 {
707 }
708 #endif /*HIF_CE_DEBUG_DATA_BUF*/
709 
710 #ifdef HIF_CONFIG_SLUB_DEBUG_ON
711 /**
712  * ce_validate_nbytes() - validate nbytes for slub builds on tx descriptors
713  * @nbytes: nbytes value being written into a send descriptor
714  * @ce_state: context of the copy engine
715 
716  * nbytes should be non-zero and less than max configured for the copy engine
717  *
718  * Return: none
719  */
720 static inline void ce_validate_nbytes(uint32_t nbytes,
721 				      struct CE_state *ce_state)
722 {
723 	if (nbytes <= 0 || nbytes > ce_state->src_sz_max)
724 		QDF_BUG(0);
725 }
726 #else
727 static inline void ce_validate_nbytes(uint32_t nbytes,
728 				      struct CE_state *ce_state)
729 {
730 }
731 #endif /* HIF_CONFIG_SLUB_DEBUG_ON */
732 
733 #if defined(HIF_RECORD_PADDR)
734 /**
735  * hif_ce_desc_record_rx_paddr() - record physical address for IOMMU
736  * IOVA addr and MMU virtual addr for Rx
737  * @scn: hif_softc
738  * @nbuf: buffer posted to fw
739  *
740  * record physical address for ce_event_type HIF_RX_DESC_POST and
741  * HIF_RX_DESC_COMPLETION
742  *
743  * Return: none
744  */
745 void hif_ce_desc_record_rx_paddr(struct hif_softc *scn,
746 				 struct hif_ce_desc_event *event,
747 				 qdf_nbuf_t nbuf);
748 #else
749 static inline
750 void hif_ce_desc_record_rx_paddr(struct hif_softc *scn,
751 				 struct hif_ce_desc_event *event,
752 				 qdf_nbuf_t nbuf)
753 {
754 }
755 #endif /* HIF_RECORD_PADDR */
756 #endif /* __COPY_ENGINE_INTERNAL_H__ */
757