1  /*
2   * Copyright (c) 2013-2021 The Linux Foundation. All rights reserved.
3   * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
4   *
5   * Permission to use, copy, modify, and/or distribute this software for
6   * any purpose with or without fee is hereby granted, provided that the
7   * above copyright notice and this permission notice appear in all
8   * copies.
9   *
10   * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11   * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12   * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13   * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14   * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15   * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16   * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17   * PERFORMANCE OF THIS SOFTWARE.
18   */
19  
20  #ifndef __COPY_ENGINE_INTERNAL_H__
21  #define __COPY_ENGINE_INTERNAL_H__
22  
23  #include <hif.h>                /* A_TARGET_WRITE */
24  
25  #ifndef QCA_WIFI_WCN6450
26  /* Mask for packet offset in the CE descriptor */
27  #define CE_DESC_PKT_OFFSET_BIT_M 0x0FFF0000
28  
29  /* Packet offset start bit position in CE descriptor */
30  #define CE_DESC_PKT_OFFSET_BIT_S 16
31  
32  /* Packet type start bit position in CE descriptor */
33  #define CE_DESC_PKT_TYPE_BIT_S 6
34  
35  /* Tx classify start bit position in CE descriptor */
36  #define CE_DESC_TX_CLASSIFY_BIT_S 5
37  #else
38  /* Mask for packet offset in the CE descriptor */
39  #define CE_DESC_PKT_OFFSET_BIT_M 0x7FF80000
40  
41  /* Packet offset start bit position in CE descriptor */
42  #define CE_DESC_PKT_OFFSET_BIT_S  19
43  
44  /* Packet type start bit position in CE descriptor */
45  #define CE_DESC_PKT_TYPE_BIT_S   9
46  
47  /* Tx classify start bit position in CE descriptor */
48  #define CE_DESC_TX_CLASSIFY_BIT_S   8
49  #endif
50  
51  /* Copy Engine operational state */
52  enum CE_op_state {
53  	CE_UNUSED,
54  	CE_PAUSED,
55  	CE_RUNNING,
56  	CE_PENDING,
57  };
58  
59  enum ol_ath_hif_ce_ecodes {
60  	CE_RING_DELTA_FAIL = 0
61  };
62  
63  struct CE_src_desc;
64  
65  /* CE ring BIT mask
66   * CE_RING_FLUSH_EVENT: flush ce ring index in case of link down
67   */
68  #define CE_RING_FLUSH_EVENT BIT(0)
69  
70  /* Copy Engine Ring internal state */
71  struct CE_ring_state {
72  
73  	/* Number of entries in this ring; must be power of 2 */
74  	unsigned int nentries;
75  	unsigned int nentries_mask;
76  
77  	/*
78  	 * For dest ring, this is the next index to be processed
79  	 * by software after it was/is received into.
80  	 *
81  	 * For src ring, this is the last descriptor that was sent
82  	 * and completion processed by software.
83  	 *
84  	 * Regardless of src or dest ring, this is an invariant
85  	 * (modulo ring size):
86  	 *     write index >= read index >= sw_index
87  	 */
88  	unsigned int sw_index;
89  	unsigned int write_index;       /* cached copy */
90  	/*
91  	 * For src ring, this is the next index not yet processed by HW.
92  	 * This is a cached copy of the real HW index (read index), used
93  	 * for avoiding reading the HW index register more often than
94  	 * necessary.
95  	 * This extends the invariant:
96  	 *     write index >= read index >= hw_index >= sw_index
97  	 *
98  	 * For dest ring, this is currently unused.
99  	 */
100  	unsigned int hw_index;  /* cached copy */
101  
102  	/* Start of DMA-coherent area reserved for descriptors */
103  	void *base_addr_owner_space_unaligned;  /* Host address space */
104  	qdf_dma_addr_t base_addr_CE_space_unaligned; /* CE address space */
105  
106  	/*
107  	 * Actual start of descriptors.
108  	 * Aligned to descriptor-size boundary.
109  	 * Points into reserved DMA-coherent area, above.
110  	 */
111  	void *base_addr_owner_space;    /* Host address space */
112  	qdf_dma_addr_t base_addr_CE_space;   /* CE address space */
113  	/*
114  	 * Start of shadow copy of descriptors, within regular memory.
115  	 * Aligned to descriptor-size boundary.
116  	 */
117  	char *shadow_base_unaligned;
118  	struct CE_src_desc *shadow_base;
119  
120  	unsigned int low_water_mark_nentries;
121  	unsigned int high_water_mark_nentries;
122  	void *srng_ctx;
123  	void **per_transfer_context;
124  
125  	/* HAL CE ring type */
126  	uint32_t hal_ring_type;
127  	/* ring memory prealloc */
128  	uint8_t is_ring_prealloc;
129  
130  	OS_DMA_MEM_CONTEXT(ce_dmacontext); /* OS Specific DMA context */
131  
132  	uint32_t flush_count;
133  	/*ce ring event */
134  	unsigned long event;
135  	/* last flushed time stamp */
136  	uint64_t last_flush_ts;
137  };
138  
139  #ifdef FEATURE_HIF_DELAYED_REG_WRITE
140  /**
141   * struct ce_reg_write_stats - stats to keep track of register writes
142   * @enqueues: writes enqueued to delayed work
143   * @dequeues: writes dequeued from delayed work (not written yet)
144   * @coalesces: writes not enqueued since srng is already queued up
145   * @direct: writes not enqueued and written to register directly
146   * @dequeue_delay: dequeue operation be delayed
147   */
148  struct ce_reg_write_stats {
149  	uint32_t enqueues;
150  	uint32_t dequeues;
151  	uint32_t coalesces;
152  	uint32_t direct;
153  	uint32_t dequeue_delay;
154  };
155  #endif
156  
157  /* Copy Engine internal state */
158  struct CE_state {
159  	struct hif_softc *scn;
160  	unsigned int id;
161  	unsigned int attr_flags;  /* CE_ATTR_* */
162  	uint32_t ctrl_addr;       /* relative to BAR */
163  	enum CE_op_state state;
164  
165  #ifdef WLAN_FEATURE_FASTPATH
166  	fastpath_msg_handler fastpath_handler;
167  	void *context;
168  #endif /* WLAN_FEATURE_FASTPATH */
169  	qdf_work_t oom_allocation_work;
170  
171  	ce_send_cb send_cb;
172  	void *send_context;
173  
174  	CE_recv_cb recv_cb;
175  	void *recv_context;
176  
177  	/* misc_cbs - are any callbacks besides send and recv enabled? */
178  	uint8_t misc_cbs;
179  
180  	CE_watermark_cb watermark_cb;
181  	void *wm_context;
182  
183  #ifdef CUSTOM_CB_SCHEDULER_SUPPORT
184  	qdf_atomic_t custom_cb_pending;
185  	void (*custom_cb)(void *arg);
186  	void *custom_cb_context;
187  #endif /* CUSTOM_CB_SCHEDULER_SUPPORT */
188  	/*Record the state of the copy compl interrupt */
189  	int disable_copy_compl_intr;
190  
191  	unsigned int src_sz_max;
192  	struct CE_ring_state *src_ring;
193  	struct CE_ring_state *dest_ring;
194  	struct CE_ring_state *status_ring;
195  	atomic_t rx_pending;
196  
197  	qdf_spinlock_t ce_index_lock;
198  #ifdef CE_TASKLET_SCHEDULE_ON_FULL
199  	qdf_spinlock_t ce_interrupt_lock;
200  #endif
201  	/* Flag to indicate whether to break out the DPC context */
202  	bool force_break;
203  
204  	/* time in nanoseconds to yield control of napi poll */
205  	unsigned long long ce_service_yield_time;
206  	/* CE service start time in nanoseconds */
207  	unsigned long long ce_service_start_time;
208  	/* Num Of Receive Buffers handled for one interrupt DPC routine */
209  	unsigned int receive_count;
210  	/* epping */
211  	bool timer_inited;
212  	qdf_timer_t poll_timer;
213  
214  	/* datapath - for faster access, use bools instead of a bitmap */
215  	bool htt_tx_data;
216  	bool htt_rx_data;
217  	qdf_lro_ctx_t lro_data;
218  
219  	void (*service)(struct hif_softc *scn, int CE_id);
220  #ifdef WLAN_TRACEPOINTS
221  	/* CE tasklet sched time in nanoseconds */
222  	unsigned long long ce_tasklet_sched_time;
223  #endif
224  	bool msi_supported;
225  	bool batch_intr_supported;
226  #ifdef FEATURE_HIF_DELAYED_REG_WRITE
227  	struct ce_reg_write_stats wstats;
228  	uint8_t reg_write_in_progress;
229  	qdf_time_t last_dequeue_time;
230  #endif
231  	uint32_t ce_wrt_idx_offset;
232  };
233  
234  /* Descriptor rings must be aligned to this boundary */
235  #define CE_DESC_RING_ALIGN 8
236  #define CLOCK_OVERRIDE 0x2
237  
238  #ifdef QCA_WIFI_3_0
239  #define HIF_CE_DESC_ADDR_TO_DMA(desc) \
240  	(qdf_dma_addr_t)(((uint64_t)(desc)->buffer_addr + \
241  	((uint64_t)((desc)->buffer_addr_hi & CE_RING_BASE_ADDR_HIGH_MASK) << \
242  	 32)))
243  #else
244  #define HIF_CE_DESC_ADDR_TO_DMA(desc) \
245  	(qdf_dma_addr_t)((desc)->buffer_addr)
246  #endif
247  
248  #if defined(QCA_WIFI_WCN6450)
249  struct CE_src_desc {
250  	uint32_t buffer_addr:32;
251  #if _BYTE_ORDER == _BIG_ENDIAN
252  	uint32_t gather:1,
253  		 packet_result_offset:12,
254  		 toeplitz_hash_enable:1, /* reserved */
255  		 addr_x_search_disable:1, /* reserved */
256  		 addr_y_search_disable:1, /* reserved */
257  		 misc_int_disable:1,
258  		 target_int_disable:1,
259  		 host_int_disable:1,
260  		 dest_byte_swap:1,
261  		 byte_swap:1,
262  		 type:2, /* reserved */
263  		 tx_classify:1,
264  		 buffer_addr_hi:8;
265  	uint32_t meta_data:16,
266  		 nbytes:16;
267  #else
268  	uint32_t buffer_addr_hi:8,
269  		 tx_classify:1,
270  		 type:2, /* reserved */
271  		 byte_swap:1, /* src_byte_swap */
272  		 dest_byte_swap:1,
273  		 host_int_disable:1,
274  		 target_int_disable:1,
275  		 misc_int_disable:1,
276  		 addr_y_search_disable:1, /* reserved */
277  		 addr_x_search_disable:1, /* reserved */
278  		 toeplitz_hash_enable:1, /* reserved */
279  		 packet_result_offset:12,
280  		 gather:1;
281  	uint32_t nbytes:16,
282  		 meta_data:16;
283  #endif
284  	uint32_t toeplitz_hash_result:32;
285  };
286  
287  struct CE_dest_desc {
288  	uint32_t buffer_addr:32;
289  #if _BYTE_ORDER == _BIG_ENDIAN
290  	uint32_t gather:1,
291  		 packet_result_offset:12,
292  		 toeplitz_hash_enable:1, /* reserved */
293  		 addr_x_search_disable:1, /* reserved */
294  		 addr_y_search_disable:1, /* reserved */
295  		 misc_int_disable:1,
296  		 target_int_disable:1,
297  		 host_int_disable:1,
298  		 byte_swap:1, /* dest_byte_swap */
299  		 src_byte_swap:1,
300  		 type:2, /* reserved */
301  		 tx_classify:1,
302  		 buffer_addr_hi:8;
303  	uint32_t meta_data:16,
304  		 nbytes:16;
305  #else
306  	uint32_t buffer_addr_hi:8,
307  		 tx_classify:1,
308  		 type:2, /* reserved */
309  		 src_byte_swap:1,
310  		 byte_swap:1, /* dest_byte_swap */
311  		 host_int_disable:1,
312  		 target_int_disable:1,
313  		 misc_int_disable:1,
314  		 addr_y_search_disable:1, /* reserved */
315  		 addr_x_search_disable:1, /* reserved */
316  		 toeplitz_hash_enable:1, /* reserved */
317  		 packet_result_offset:12,
318  		 gather:1;
319  	uint32_t nbytes:16,
320  		 meta_data:16;
321  #endif
322  	uint32_t toeplitz_hash_result:32;
323  };
324  #elif defined(QCA_WIFI_3_0)
325  struct CE_src_desc {
326  	uint32_t buffer_addr:32;
327  #if _BYTE_ORDER == _BIG_ENDIAN
328  	uint32_t gather:1,
329  		enable_11h:1,
330  		meta_data_low:2, /* fw_metadata_low */
331  		packet_result_offset:12,
332  		toeplitz_hash_enable:1,
333  		addr_y_search_disable:1,
334  		addr_x_search_disable:1,
335  		misc_int_disable:1,
336  		target_int_disable:1,
337  		host_int_disable:1,
338  		dest_byte_swap:1,
339  		byte_swap:1,
340  		type:2,
341  		tx_classify:1,
342  		buffer_addr_hi:5;
343  		uint32_t meta_data:16, /* fw_metadata_high */
344  		nbytes:16;       /* length in register map */
345  #else
346  	uint32_t buffer_addr_hi:5,
347  		tx_classify:1,
348  		type:2,
349  		byte_swap:1,          /* src_byte_swap */
350  		dest_byte_swap:1,
351  		host_int_disable:1,
352  		target_int_disable:1,
353  		misc_int_disable:1,
354  		addr_x_search_disable:1,
355  		addr_y_search_disable:1,
356  		toeplitz_hash_enable:1,
357  		packet_result_offset:12,
358  		meta_data_low:2, /* fw_metadata_low */
359  		enable_11h:1,
360  		gather:1;
361  		uint32_t nbytes:16, /* length in register map */
362  		meta_data:16; /* fw_metadata_high */
363  #endif
364  	uint32_t toeplitz_hash_result:32;
365  };
366  
367  struct CE_dest_desc {
368  	uint32_t buffer_addr:32;
369  #if _BYTE_ORDER == _BIG_ENDIAN
370  	uint32_t gather:1,
371  		enable_11h:1,
372  		meta_data_low:2, /* fw_metadata_low */
373  		packet_result_offset:12,
374  		toeplitz_hash_enable:1,
375  		addr_y_search_disable:1,
376  		addr_x_search_disable:1,
377  		misc_int_disable:1,
378  		target_int_disable:1,
379  		host_int_disable:1,
380  		byte_swap:1,
381  		src_byte_swap:1,
382  		type:2,
383  		tx_classify:1,
384  		buffer_addr_hi:5;
385  		uint32_t meta_data:16, /* fw_metadata_high */
386  		nbytes:16;          /* length in register map */
387  #else
388  	uint32_t buffer_addr_hi:5,
389  		tx_classify:1,
390  		type:2,
391  		src_byte_swap:1,
392  		byte_swap:1,         /* dest_byte_swap */
393  		host_int_disable:1,
394  		target_int_disable:1,
395  		misc_int_disable:1,
396  		addr_x_search_disable:1,
397  		addr_y_search_disable:1,
398  		toeplitz_hash_enable:1,
399  		packet_result_offset:12,
400  		meta_data_low:2, /* fw_metadata_low */
401  		enable_11h:1,
402  		gather:1;
403  		uint32_t nbytes:16, /* length in register map */
404  		meta_data:16;    /* fw_metadata_high */
405  #endif
406  	uint32_t toeplitz_hash_result:32;
407  };
408  #else
409  struct CE_src_desc {
410  	uint32_t buffer_addr;
411  #if _BYTE_ORDER == _BIG_ENDIAN
412  	uint32_t  meta_data:12,
413  		  target_int_disable:1,
414  		  host_int_disable:1,
415  		  byte_swap:1,
416  		  gather:1,
417  		  nbytes:16;
418  #else
419  
420  	uint32_t nbytes:16,
421  		 gather:1,
422  		 byte_swap:1,
423  		 host_int_disable:1,
424  		 target_int_disable:1,
425  		 meta_data:12;
426  #endif
427  };
428  
429  struct CE_dest_desc {
430  	uint32_t buffer_addr;
431  #if _BYTE_ORDER == _BIG_ENDIAN
432  	uint32_t  meta_data:12,
433  		  target_int_disable:1,
434  		  host_int_disable:1,
435  		  byte_swap:1,
436  		  gather:1,
437  		  nbytes:16;
438  #else
439  	uint32_t nbytes:16,
440  		 gather:1,
441  		 byte_swap:1,
442  		 host_int_disable:1,
443  		 target_int_disable:1,
444  		 meta_data:12;
445  #endif
446  };
447  #endif /* QCA_WIFI_WCN6450 */
448  
449  struct ce_srng_src_desc {
450  	uint32_t buffer_addr_lo;
451  #if _BYTE_ORDER == _BIG_ENDIAN
452  	uint32_t nbytes:16,
453  		 rsvd:4,
454  		 gather:1,
455  		 dest_swap:1,
456  		 byte_swap:1,
457  		 toeplitz_hash_enable:1,
458  		 buffer_addr_hi:8;
459  	uint32_t rsvd1:16,
460  		 meta_data:16;
461  	uint32_t loop_count:4,
462  		 ring_id:8,
463  		 rsvd3:20;
464  #else
465  	uint32_t buffer_addr_hi:8,
466  		 toeplitz_hash_enable:1,
467  		 byte_swap:1,
468  		 dest_swap:1,
469  		 gather:1,
470  		 rsvd:4,
471  		 nbytes:16;
472  	uint32_t meta_data:16,
473  		 rsvd1:16;
474  	uint32_t rsvd3:20,
475  		 ring_id:8,
476  		 loop_count:4;
477  #endif
478  };
479  struct ce_srng_dest_desc {
480  	uint32_t buffer_addr_lo;
481  #if _BYTE_ORDER == _BIG_ENDIAN
482  	uint32_t loop_count:4,
483  		 ring_id:8,
484  		 rsvd1:12,
485  		 buffer_addr_hi:8;
486  #else
487  	uint32_t buffer_addr_hi:8,
488  		 rsvd1:12,
489  		 ring_id:8,
490  		 loop_count:4;
491  #endif
492  };
493  struct ce_srng_dest_status_desc {
494  #if _BYTE_ORDER == _BIG_ENDIAN
495  	uint32_t nbytes:16,
496  		 rsvd:4,
497  		 gather:1,
498  		 dest_swap:1,
499  		 byte_swap:1,
500  		 toeplitz_hash_enable:1,
501  		 rsvd0:8;
502  	uint32_t rsvd1:16,
503  		 meta_data:16;
504  #else
505  	uint32_t rsvd0:8,
506  		 toeplitz_hash_enable:1,
507  		 byte_swap:1,
508  		 dest_swap:1,
509  		 gather:1,
510  		 rsvd:4,
511  		 nbytes:16;
512  	uint32_t meta_data:16,
513  		 rsvd1:16;
514  #endif
515  	uint32_t toeplitz_hash;
516  #if _BYTE_ORDER == _BIG_ENDIAN
517  	uint32_t loop_count:4,
518  		 ring_id:8,
519  		 rsvd3:20;
520  #else
521  	uint32_t rsvd3:20,
522  		 ring_id:8,
523  		 loop_count:4;
524  #endif
525  };
526  
527  #define CE_SENDLIST_ITEMS_MAX 12
528  
529  /**
530   * union ce_desc - unified data type for ce descriptors
531   * @src_desc: source descriptor
532   * @dest_desc: destination descriptor
533   *
534   * Both src and destination descriptors follow the same format.
535   * They use different data structures for different access semantics.
536   * Here we provide a unifying data type.
537   */
538  union ce_desc {
539  	struct CE_src_desc src_desc;
540  	struct CE_dest_desc dest_desc;
541  };
542  
543  /**
544   * union ce_srng_desc - unified data type for ce srng descriptors
545   * @src_desc: ce srng Source ring descriptor
546   * @dest_desc: ce srng destination ring descriptor
547   * @dest_status_desc: ce srng status ring descriptor
548   */
549  union ce_srng_desc {
550  	struct ce_srng_src_desc src_desc;
551  	struct ce_srng_dest_desc dest_desc;
552  	struct ce_srng_dest_status_desc dest_status_desc;
553  };
554  
555  /**
556   * enum hif_ce_event_type - HIF copy engine event type
557   * @HIF_RX_DESC_POST: event recorded before updating write index of RX ring.
558   * @HIF_RX_DESC_COMPLETION: event recorded before updating sw index of RX ring.
559   * @HIF_TX_GATHER_DESC_POST: post gather desc. (no write index update)
560   * @HIF_TX_DESC_POST: event recorded before updating write index of TX ring.
561   * @HIF_TX_DESC_SOFTWARE_POST: event recorded when dropping a write to the write
562   *	index in a normal tx
563   * @HIF_TX_DESC_COMPLETION: event recorded before updating sw index of TX ring.
564   * @FAST_RX_WRITE_INDEX_UPDATE: event recorded before updating the write index
565   *	of the RX ring in fastpath
566   * @FAST_RX_SOFTWARE_INDEX_UPDATE: event recorded before updating the software
567   *	index of the RX ring in fastpath
568   * @FAST_TX_WRITE_INDEX_UPDATE: event recorded before updating the write index
569   *	of the TX ring in fastpath
570   * @FAST_TX_WRITE_INDEX_SOFTWARE_UPDATE: recorded when dropping a write to
571   *	the write index in fastpath
572   * @FAST_TX_SOFTWARE_INDEX_UPDATE: event recorded before updating the software
573   *	index of the RX ring in fastpath
574   * @RESUME_WRITE_INDEX_UPDATE:
575   * @HIF_IRQ_EVENT: event recorded in the irq before scheduling the bh
576   * @HIF_CE_TASKLET_ENTRY: records the start of the ce_tasklet
577   * @HIF_CE_TASKLET_RESCHEDULE: records the rescheduling of the wlan_tasklet
578   * @HIF_CE_TASKLET_REAP_REPOLL: records the repoll of the wlan_tasklet
579   * @HIF_CE_TASKLET_EXIT: records the exit of the wlan tasklet without reschedule
580   * @HIF_CE_REAP_ENTRY: records when we process completion outside of a bh
581   * @HIF_CE_REAP_EXIT:  records when we process completion outside of a bh
582   * @NAPI_SCHEDULE: records when napi is scheduled from the irq context
583   * @NAPI_POLL_ENTER: records the start of the napi poll function
584   * @NAPI_COMPLETE: records when interrupts are re-enabled
585   * @NAPI_POLL_EXIT: records when the napi poll function returns
586   * @HIF_RX_NBUF_ALLOC_FAILURE: record the packet when nbuf fails to allocate
587   * @HIF_RX_NBUF_MAP_FAILURE: record the packet when dma map fails
588   * @HIF_RX_NBUF_ENQUEUE_FAILURE: record the packet when enqueue to ce fails
589   * @HIF_CE_SRC_RING_BUFFER_POST: record the packet when buffer is posted to ce src ring
590   * @HIF_CE_DEST_RING_BUFFER_POST: record the packet when buffer is posted to ce dst ring
591   * @HIF_CE_DEST_RING_BUFFER_REAP: record the packet when buffer is reaped from ce dst ring
592   * @HIF_CE_DEST_STATUS_RING_REAP: record the packet when status ring is reaped
593   * @HIF_RX_DESC_PRE_NBUF_ALLOC: record the packet before nbuf allocation
594   * @HIF_RX_DESC_PRE_NBUF_MAP: record the packet before nbuf map
595   * @HIF_RX_DESC_POST_NBUF_MAP: record the packet after nbuf map
596   * @HIF_EVENT_TYPE_MAX: max event
597   */
598  enum hif_ce_event_type {
599  	HIF_RX_DESC_POST,
600  	HIF_RX_DESC_COMPLETION,
601  	HIF_TX_GATHER_DESC_POST,
602  	HIF_TX_DESC_POST,
603  	HIF_TX_DESC_SOFTWARE_POST,
604  	HIF_TX_DESC_COMPLETION,
605  	FAST_RX_WRITE_INDEX_UPDATE,
606  	FAST_RX_SOFTWARE_INDEX_UPDATE,
607  	FAST_TX_WRITE_INDEX_UPDATE,
608  	FAST_TX_WRITE_INDEX_SOFTWARE_UPDATE,
609  	FAST_TX_SOFTWARE_INDEX_UPDATE,
610  	RESUME_WRITE_INDEX_UPDATE,
611  
612  	HIF_IRQ_EVENT = 0x10,
613  	HIF_CE_TASKLET_ENTRY,
614  	HIF_CE_TASKLET_RESCHEDULE,
615  	HIF_CE_TASKLET_REAP_REPOLL,
616  	HIF_CE_TASKLET_EXIT,
617  	HIF_CE_REAP_ENTRY,
618  	HIF_CE_REAP_EXIT,
619  	NAPI_SCHEDULE,
620  	NAPI_POLL_ENTER,
621  	NAPI_COMPLETE,
622  	NAPI_POLL_EXIT,
623  
624  	HIF_RX_NBUF_ALLOC_FAILURE = 0x20,
625  	HIF_RX_NBUF_MAP_FAILURE,
626  	HIF_RX_NBUF_ENQUEUE_FAILURE,
627  
628  	HIF_CE_SRC_RING_BUFFER_POST,
629  	HIF_CE_DEST_RING_BUFFER_POST,
630  	HIF_CE_DEST_RING_BUFFER_REAP,
631  	HIF_CE_DEST_STATUS_RING_REAP,
632  
633  	HIF_RX_DESC_PRE_NBUF_ALLOC,
634  	HIF_RX_DESC_PRE_NBUF_MAP,
635  	HIF_RX_DESC_POST_NBUF_MAP,
636  
637  	HIF_EVENT_TYPE_MAX,
638  };
639  
640  void ce_init_ce_desc_event_log(struct hif_softc *scn, int ce_id, int size);
641  void ce_deinit_ce_desc_event_log(struct hif_softc *scn, int ce_id);
642  void hif_record_ce_desc_event(struct hif_softc *scn, int ce_id,
643  			      enum hif_ce_event_type type,
644  			      union ce_desc *descriptor, void *memory,
645  			      int index, int len);
646  
647  enum ce_sendlist_type_e {
648  	CE_SIMPLE_BUFFER_TYPE,
649  	/* TBDXXX: CE_RX_DESC_LIST, */
650  };
651  
652  /*
653   * There's a public "ce_sendlist" and a private "ce_sendlist_s".
654   * The former is an opaque structure with sufficient space
655   * to hold the latter.  The latter is the actual structure
656   * definition and it is only used internally.  The opaque version
657   * of the structure allows callers to allocate an instance on the
658   * run-time stack without knowing any of the details of the
659   * structure layout.
660   */
661  struct ce_sendlist_s {
662  	unsigned int num_items;
663  	struct ce_sendlist_item {
664  		enum ce_sendlist_type_e send_type;
665  		dma_addr_t data;        /* e.g. buffer or desc list */
666  		union {
667  			unsigned int nbytes;    /* simple buffer */
668  			unsigned int ndesc;     /* Rx descriptor list */
669  		} u;
670  		/* flags: externally-specified flags;
671  		 * OR-ed with internal flags
672  		 */
673  		uint32_t flags;
674  		uint32_t user_flags;
675  	} item[CE_SENDLIST_ITEMS_MAX];
676  };
677  
678  bool hif_ce_service_should_yield(struct hif_softc *scn, struct CE_state
679  				 *ce_state);
680  
681  #ifdef WLAN_FEATURE_FASTPATH
682  void ce_h2t_tx_ce_cleanup(struct CE_handle *ce_hdl);
683  void ce_t2h_msg_ce_cleanup(struct CE_handle *ce_hdl);
684  #else
ce_h2t_tx_ce_cleanup(struct CE_handle * ce_hdl)685  static inline void ce_h2t_tx_ce_cleanup(struct CE_handle *ce_hdl)
686  {
687  }
688  
ce_t2h_msg_ce_cleanup(struct CE_handle * ce_hdl)689  static inline void ce_t2h_msg_ce_cleanup(struct CE_handle *ce_hdl)
690  {
691  }
692  #endif
693  
694  /* which ring of a CE? */
695  #define CE_RING_SRC  0
696  #define CE_RING_DEST 1
697  #define CE_RING_STATUS 2
698  
699  #define CDC_WAR_MAGIC_STR   0xceef0000
700  #define CDC_WAR_DATA_CE     4
701  
702  /* Additional internal-only ce_send flags */
703  #define CE_SEND_FLAG_GATHER             0x00010000      /* Use Gather */
704  
705  /**
706   * hif_get_wake_ce_id() - gets the copy engine id used for waking up
707   * @scn: The hif context to use
708   * @ce_id: a pointer where the copy engine Id should be populated
709   *
710   * Return: errno
711   */
712  int hif_get_wake_ce_id(struct hif_softc *scn, uint8_t *ce_id);
713  
714  /**
715   * hif_get_fw_diag_ce_id() - gets the copy engine id used for FW diag
716   * @scn: The hif context to use
717   * @ce_id: a pointer where the copy engine Id should be populated
718   *
719   * Return: errno
720   */
721  int hif_get_fw_diag_ce_id(struct hif_softc *scn, uint8_t *ce_id);
722  
723  #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF)
724  
725  #ifndef HIF_CE_HISTORY_MAX
726  #if defined(CONFIG_SLUB_DEBUG_ON)
727  #define HIF_CE_HISTORY_MAX 1024
728  #else
729  #define HIF_CE_HISTORY_MAX 768
730  #endif /* CONFIG_SLUB_DEBUG_ON */
731  #endif /* !HIF_CE_HISTORY_MAX */
732  
733  #define CE_DEBUG_MAX_DATA_BUF_SIZE 64
734  
735  /**
736   * struct hif_ce_desc_event - structure for detailing a ce event
737   * @index: location of the descriptor in the ce ring;
738   * @type: what the event was
739   * @time: when it happened
740   * @cpu_id:
741   * @current_hp: holds the current ring hp value
742   * @current_tp: holds the current ring tp value
743   * @descriptor: descriptor enqueued or dequeued
744   * @memory: virtual address that was used
745   * @dma_addr: physical/iova address based on smmu status
746   * @dma_to_phy: physical address from iova address
747   * @virt_to_phy: physical address from virtual address
748   * @actual_data_len: length of the data
749   * @data: data pointed by descriptor
750   */
751  struct hif_ce_desc_event {
752  	int index;
753  	enum hif_ce_event_type type;
754  	uint64_t time;
755  	int cpu_id;
756  #ifdef HELIUMPLUS
757  	union ce_desc descriptor;
758  #else
759  	uint32_t current_hp;
760  	uint32_t current_tp;
761  	union ce_srng_desc descriptor;
762  #endif
763  	void *memory;
764  
765  #ifdef HIF_RECORD_PADDR
766  	/* iova/pa based on smmu status */
767  	qdf_dma_addr_t dma_addr;
768  	/* store pa from iova address */
769  	qdf_dma_addr_t dma_to_phy;
770  	/* store pa */
771  	qdf_dma_addr_t virt_to_phy;
772  #endif /* HIF_RECORD_ADDR */
773  
774  #ifdef HIF_CE_DEBUG_DATA_BUF
775  	size_t actual_data_len;
776  	uint8_t *data;
777  #endif /* HIF_CE_DEBUG_DATA_BUF */
778  };
779  #else
780  struct hif_ce_desc_event;
781  #endif /*#if defined(HIF_CONFIG_SLUB_DEBUG_ON)||defined(HIF_CE_DEBUG_DATA_BUF)*/
782  
783  /**
784   * get_next_record_index() - get the next record index
785   * @table_index: atomic index variable to increment
786   * @array_size: array size of the circular buffer
787   *
788   * Increment the atomic index and reserve the value.
789   * Takes care of buffer wrap.
790   * Guaranteed to be thread safe as long as fewer than array_size contexts
791   * try to access the array.  If there are more than array_size contexts
792   * trying to access the array, full locking of the recording process would
793   * be needed to have sane logging.
794   */
795  int get_next_record_index(qdf_atomic_t *table_index, int array_size);
796  
797  #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF)
798  /**
799   * hif_record_ce_srng_desc_event() - Record data pointed by the CE descriptor
800   * @scn: structure detailing a ce event
801   * @ce_id: length of the data
802   * @type: event_type
803   * @descriptor: ce src/dest/status ring descriptor
804   * @memory: nbuf
805   * @index: current sw/write index
806   * @len: len of the buffer
807   * @hal_ring: ce hw ring
808   *
809   * Return: None
810   */
811  void hif_record_ce_srng_desc_event(struct hif_softc *scn, int ce_id,
812  				   enum hif_ce_event_type type,
813  				   union ce_srng_desc *descriptor,
814  				   void *memory, int index,
815  				   int len, void *hal_ring);
816  
817  /**
818   * hif_clear_ce_desc_debug_data() - Clear the contents of hif_ce_desc_event
819   * upto data field before reusing it.
820   *
821   * @event: record every CE event
822   *
823   * Return: None
824   */
825  void hif_clear_ce_desc_debug_data(struct hif_ce_desc_event *event);
826  #else
827  static inline
hif_record_ce_srng_desc_event(struct hif_softc * scn,int ce_id,enum hif_ce_event_type type,union ce_srng_desc * descriptor,void * memory,int index,int len,void * hal_ring)828  void hif_record_ce_srng_desc_event(struct hif_softc *scn, int ce_id,
829  				   enum hif_ce_event_type type,
830  				   union ce_srng_desc *descriptor,
831  				   void *memory, int index,
832  				   int len, void *hal_ring)
833  {
834  }
835  
836  static inline
hif_clear_ce_desc_debug_data(struct hif_ce_desc_event * event)837  void hif_clear_ce_desc_debug_data(struct hif_ce_desc_event *event)
838  {
839  }
840  #endif /* HIF_CONFIG_SLUB_DEBUG_ON || HIF_CE_DEBUG_DATA_BUF */
841  
842  #ifdef HIF_CE_DEBUG_DATA_BUF
843  /**
844   * hif_ce_desc_data_record() - Record data pointed by the CE descriptor
845   * @event: structure detailing a ce event
846   * @len: length of the data
847   * Return:
848   */
849  void hif_ce_desc_data_record(struct hif_ce_desc_event *event, int len);
850  
851  QDF_STATUS alloc_mem_ce_debug_hist_data(struct hif_softc *scn, uint32_t ce_id);
852  void free_mem_ce_debug_hist_data(struct hif_softc *scn, uint32_t ce_id);
853  #else
854  static inline
alloc_mem_ce_debug_hist_data(struct hif_softc * scn,uint32_t ce_id)855  QDF_STATUS alloc_mem_ce_debug_hist_data(struct hif_softc *scn, uint32_t ce_id)
856  {
857  	return QDF_STATUS_SUCCESS;
858  }
859  
860  static inline
free_mem_ce_debug_hist_data(struct hif_softc * scn,uint32_t ce_id)861  void free_mem_ce_debug_hist_data(struct hif_softc *scn, uint32_t ce_id) { }
862  
863  static inline
hif_ce_desc_data_record(struct hif_ce_desc_event * event,int len)864  void hif_ce_desc_data_record(struct hif_ce_desc_event *event, int len)
865  {
866  }
867  #endif /*HIF_CE_DEBUG_DATA_BUF*/
868  
869  #ifdef HIF_CONFIG_SLUB_DEBUG_ON
870  /**
871   * ce_validate_nbytes() - validate nbytes for slub builds on tx descriptors
872   * @nbytes: nbytes value being written into a send descriptor
873   * @ce_state: context of the copy engine
874   *
875   * nbytes should be non-zero and less than max configured for the copy engine
876   *
877   * Return: none
878   */
ce_validate_nbytes(uint32_t nbytes,struct CE_state * ce_state)879  static inline void ce_validate_nbytes(uint32_t nbytes,
880  				      struct CE_state *ce_state)
881  {
882  	if (nbytes <= 0 || nbytes > ce_state->src_sz_max)
883  		QDF_BUG(0);
884  }
885  #else
ce_validate_nbytes(uint32_t nbytes,struct CE_state * ce_state)886  static inline void ce_validate_nbytes(uint32_t nbytes,
887  				      struct CE_state *ce_state)
888  {
889  }
890  #endif /* HIF_CONFIG_SLUB_DEBUG_ON */
891  
892  #if defined(HIF_RECORD_PADDR)
893  /**
894   * hif_ce_desc_record_rx_paddr() - record physical address for IOMMU
895   * IOVA addr and MMU virtual addr for Rx
896   * @scn: hif_softc
897   * @event: event details
898   * @nbuf: buffer posted to fw
899   *
900   * record physical address for ce_event_type HIF_RX_DESC_POST and
901   * HIF_RX_DESC_COMPLETION
902   *
903   * Return: none
904   */
905  void hif_ce_desc_record_rx_paddr(struct hif_softc *scn,
906  				 struct hif_ce_desc_event *event,
907  				 qdf_nbuf_t nbuf);
908  #else
909  static inline
hif_ce_desc_record_rx_paddr(struct hif_softc * scn,struct hif_ce_desc_event * event,qdf_nbuf_t nbuf)910  void hif_ce_desc_record_rx_paddr(struct hif_softc *scn,
911  				 struct hif_ce_desc_event *event,
912  				 qdf_nbuf_t nbuf)
913  {
914  }
915  #endif /* HIF_RECORD_PADDR */
916  
ce_ring_aquire_lock(struct CE_handle * handle)917  static inline void ce_ring_aquire_lock(struct CE_handle *handle)
918  {
919  	struct CE_state *ce_state = (struct CE_state *)handle;
920  
921  	qdf_spin_lock_bh(&ce_state->ce_index_lock);
922  }
923  
ce_ring_release_lock(struct CE_handle * handle)924  static inline void ce_ring_release_lock(struct CE_handle *handle)
925  {
926  	struct CE_state *ce_state = (struct CE_state *)handle;
927  
928  	qdf_spin_unlock_bh(&ce_state->ce_index_lock);
929  }
930  
931  /*
932   * ce_ring_clear_event() - Clear ring event
933   * @ring: Ring pointer
934   * @event: ring event type
935   *
936   */
ce_ring_clear_event(struct CE_ring_state * ring,int event)937  static inline void ce_ring_clear_event(struct CE_ring_state *ring, int event)
938  {
939  	qdf_atomic_clear_bit(event, &ring->event);
940  }
941  
942  /*
943   * ce_ring_set_event() - Set ring event
944   * @ring: Ring pointer
945   * @event: Ring event type
946   *
947   */
ce_ring_set_event(struct CE_ring_state * ring,int event)948  static inline void ce_ring_set_event(struct CE_ring_state *ring, int event)
949  {
950  	qdf_atomic_set_bit(event, &ring->event);
951  }
952  
953  /*
954   * ce_ring_get_clear_event() - Clear ring event and return old value
955   * @ring: Ring pointer
956   * @event: Ring event type
957   *
958   */
ce_ring_get_clear_event(struct CE_ring_state * ring,int event)959  static inline int ce_ring_get_clear_event(struct CE_ring_state *ring, int event)
960  {
961  	return qdf_atomic_test_and_clear_bit(event, &ring->event);
962  }
963  
ce_ring_inc_flush_cnt(struct CE_ring_state * ring)964  static inline void ce_ring_inc_flush_cnt(struct CE_ring_state *ring)
965  {
966  	ring->flush_count++;
967  }
968  #endif /* __COPY_ENGINE_INTERNAL_H__ */
969