xref: /wlan-dirver/qca-wifi-host-cmn/hif/src/ce/ce_api.h (revision fab64e0adccd40ee2362fd4cef2067f112bf97ab)
1 /*
2  * Copyright (c) 2013-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #ifndef __COPY_ENGINE_API_H__
21 #define __COPY_ENGINE_API_H__
22 
23 #include "pld_common.h"
24 #include "ce_main.h"
25 #include "hif_main.h"
26 
27 /* TBDXXX: Use int return values for consistency with Target */
28 
29 /* TBDXXX: Perhaps merge Host/Target-->common */
30 
31 /*
32  * Copy Engine support: low-level Target-side Copy Engine API.
33  * This is a hardware access layer used by code that understands
34  * how to use copy engines.
35  */
36 
37 /*
38  * A "struct CE_handle *" serves as an opaque pointer-sized
39  * handle to a specific copy engine.
40  */
41 struct CE_handle;
42 
43 /*
44  * "Send Completion" callback type for Send Completion Notification.
45  *
46  * If a Send Completion callback is registered and one or more sends
47  * have completed, the callback is invoked.
48  *
49  * per_ce_send_context is a context supplied by the calling layer
50  * (via ce_send_cb_register). It is associated with a copy engine.
51  *
52  * per_transfer_send_context is context supplied by the calling layer
53  * (via the "send" call).  It may be different for each invocation
54  * of send.
55  *
56  * The buffer parameter is the first byte sent of the first buffer
57  * sent (if more than one buffer).
58  *
59  * nbytes is the number of bytes of that buffer that were sent.
60  *
61  * transfer_id matches the value used when the buffer or
62  * buf_list was sent.
63  *
64  * Implementation note: Pops 1 completed send buffer from Source ring
65  */
66 typedef void (*ce_send_cb)(struct CE_handle *copyeng,
67 			   void *per_ce_send_context,
68 			   void *per_transfer_send_context,
69 			   qdf_dma_addr_t buffer,
70 			   unsigned int nbytes,
71 			   unsigned int transfer_id,
72 			   unsigned int sw_index,
73 			   unsigned int hw_index,
74 			   uint32_t toeplitz_hash_result);
75 
76 /*
77  * "Buffer Received" callback type for Buffer Received Notification.
78  *
79  * Implementation note: Pops 1 completed recv buffer from Dest ring
80  */
81 typedef void (*CE_recv_cb)(struct CE_handle *copyeng,
82 		   void *per_CE_recv_context,
83 		   void *per_transfer_recv_context,
84 		   qdf_dma_addr_t buffer,
85 		   unsigned int nbytes,
86 		   unsigned int transfer_id,
87 		   unsigned int flags);
88 
89 /*
90  * Copy Engine Watermark callback type.
91  *
92  * Allows upper layers to be notified when watermarks are reached:
93  *   space is available and/or running short in a source ring
94  *   buffers are exhausted and/or abundant in a destination ring
95  *
96  * The flags parameter indicates which condition triggered this
97  * callback.  See CE_WM_FLAG_*.
98  *
99  * Watermark APIs are provided to allow upper layers "batch"
100  * descriptor processing and to allow upper layers to
101  * throttle/unthrottle.
102  */
103 typedef void (*CE_watermark_cb)(struct CE_handle *copyeng,
104 				void *per_CE_wm_context, unsigned int flags);
105 
106 
107 #define CE_WM_FLAG_SEND_HIGH   1
108 #define CE_WM_FLAG_SEND_LOW    2
109 #define CE_WM_FLAG_RECV_HIGH   4
110 #define CE_WM_FLAG_RECV_LOW    8
111 #define CE_HTT_TX_CE           4
112 
113 
114 /**
115  * ce_service_srng_init() - Initialization routine for CE services
116  *                          in SRNG based targets
117  * Return : None
118  */
119 void ce_service_srng_init(void);
120 
121 /**
122  * ce_service_legacy_init() - Initialization routine for CE services
123  *                            in legacy targets
124  * Return : None
125  */
126 void ce_service_legacy_init(void);
127 
128 /* A list of buffers to be gathered and sent */
129 struct ce_sendlist;
130 
131 /* Copy Engine settable attributes */
132 struct CE_attr;
133 
134 /*==================Send=====================================================*/
135 
136 /* ce_send flags */
137 /* disable ring's byte swap, even if the default policy is to swap */
138 #define CE_SEND_FLAG_SWAP_DISABLE        1
139 
140 /*
141  * Queue a source buffer to be sent to an anonymous destination buffer.
142  *   copyeng         - which copy engine to use
143  *   buffer          - address of buffer
144  *   nbytes          - number of bytes to send
145  *   transfer_id     - arbitrary ID; reflected to destination
146  *   flags           - CE_SEND_FLAG_* values
147  * Returns QDF_STATUS.
148  *
149  * Note: If no flags are specified, use CE's default data swap mode.
150  *
151  * Implementation note: pushes 1 buffer to Source ring
152  */
153 QDF_STATUS ce_send(struct CE_handle *copyeng,
154 		   void *per_transfer_send_context,
155 		   qdf_dma_addr_t buffer,
156 		   unsigned int nbytes,
157 		   unsigned int transfer_id,
158 		   unsigned int flags,
159 		   unsigned int user_flags);
160 
161 #ifdef WLAN_FEATURE_FASTPATH
162 int ce_send_fast(struct CE_handle *copyeng, qdf_nbuf_t msdu,
163 	unsigned int transfer_id, uint32_t download_len);
164 
165 #endif
166 
167 /*
168  * ce_enqueue_desc() - enqueu desc to CE ring.
169  * @copyeng: which copy engine to use
170  * @msdu: data buffer
171  * @transfer_id: arbitrary ID; reflected to destination
172  * @download_len: length of the packet download to FW.
173  *
174  */
175 int ce_enqueue_desc(struct CE_handle *copyeng, qdf_nbuf_t msdu,
176 		    unsigned int transfer_id, uint32_t download_len);
177 
178 void ce_update_tx_ring(struct CE_handle *ce_tx_hdl, uint32_t num_htt_cmpls);
179 extern qdf_nbuf_t ce_batch_send(struct CE_handle *ce_tx_hdl,
180 		qdf_nbuf_t msdu,
181 		uint32_t transfer_id,
182 		uint32_t len,
183 		uint32_t sendhead);
184 
185 QDF_STATUS ce_send_single(struct CE_handle *ce_tx_hdl,
186 			  qdf_nbuf_t msdu,
187 			  uint32_t transfer_id,
188 			  uint32_t len);
189 /*
190  * Register a Send Callback function.
191  * This function is called as soon as the contents of a Send
192  * have reached the destination, unless disable_interrupts is
193  * requested.  In this case, the callback is invoked when the
194  * send status is polled, shortly after the send completes.
195  */
196 void ce_send_cb_register(struct CE_handle *copyeng,
197 			 ce_send_cb fn_ptr,
198 			 void *per_ce_send_context, int disable_interrupts);
199 
200 /*
201  * Return the size of a SendList. This allows the caller to allocate
202  * a SendList while the SendList structure remains opaque.
203  */
204 unsigned int ce_sendlist_sizeof(void);
205 
206 /* Initialize a sendlist */
207 void ce_sendlist_init(struct ce_sendlist *sendlist);
208 
209 /**
210  * ce_sendlist_buf_add() - Append a simple buffer (address/length) to a sendlist
211  * @sendlist: Sendlist
212  * @buffer: buffer
213  * @nbytes: number of bytes to append
214  * @flags: flags
215  * @user_flags: user flags
216  *
217  * Return: QDF_STATUS
218  */
219 QDF_STATUS ce_sendlist_buf_add(struct ce_sendlist *sendlist,
220 			       qdf_dma_addr_t buffer,
221 			       unsigned int nbytes,
222 			       /* OR-ed with internal flags */
223 			       uint32_t flags,
224 			       uint32_t user_flags);
225 
226 /*
227  * ce_sendlist_send() - Queue a "sendlist" of buffers to be sent using gather to
228  * a single anonymous destination buffer
229  * @copyeng: which copy engine to use
230  * @per_transfer_send_context: Per transfer send context
231  * @sendlist: list of simple buffers to send using gather
232  * @transfer_id: arbitrary ID; reflected to destination
233  *
234  * Implementation note: Pushes multiple buffers with Gather to Source ring.
235  *
236  * Return: QDF_STATUS
237  */
238 QDF_STATUS ce_sendlist_send(struct CE_handle *copyeng,
239 			    void *per_transfer_send_context,
240 			    struct ce_sendlist *sendlist,
241 			    unsigned int transfer_id);
242 
243 /*==================Recv=====================================================*/
244 
245 /**
246  * ce_recv_buf_enqueue() -  Make a buffer available to receive. The buffer must
247  * be at least of a minimal size appropriate for this copy engine (src_sz_max
248  * attribute).
249  * @copyeng: which copy engine to use
250  * @per_transfer_recv_context: context passed back to caller's recv_cb
251  * @buffer: address of buffer in CE space
252  *
253  * Implementation note: Pushes a buffer to Dest ring.
254  *
255  * Return: QDF_STATUS.
256  */
257 QDF_STATUS ce_recv_buf_enqueue(struct CE_handle *copyeng,
258 			       void *per_transfer_recv_context,
259 			       qdf_dma_addr_t buffer);
260 
261 /*
262  * Register a Receive Callback function.
263  * This function is called as soon as data is received
264  * from the source.
265  */
266 void ce_recv_cb_register(struct CE_handle *copyeng,
267 			 CE_recv_cb fn_ptr,
268 			 void *per_CE_recv_context,
269 			 int disable_interrupts);
270 
271 /*==================CE Watermark=============================================*/
272 
273 /*
274  * Register a Watermark Callback function.
275  * This function is called as soon as a watermark level
276  * is crossed.  A Watermark Callback function is free to
277  * handle received data "en masse"; but then some coordination
278  * is required with a registered Receive Callback function.
279  * [Suggestion: Either handle Receives in a Receive Callback
280  * or en masse in a Watermark Callback; but not both.]
281  */
282 void ce_watermark_cb_register(struct CE_handle *copyeng,
283 			  CE_watermark_cb fn_ptr,
284 			  void *per_CE_wm_context);
285 
286 /*
287  * Set low/high watermarks for the send/source side of a copy engine.
288  *
289  * Typically, the destination side CPU manages watermarks for
290  * the receive side and the source side CPU manages watermarks
291  * for the send side.
292  *
293  * A low watermark of 0 is never hit (so the watermark function
294  * will never be called for a Low Watermark condition).
295  *
296  * A high watermark equal to nentries is never hit (so the
297  * watermark function will never be called for a High Watermark
298  * condition).
299  */
300 void ce_send_watermarks_set(struct CE_handle *copyeng,
301 			    unsigned int low_alert_nentries,
302 			    unsigned int high_alert_nentries);
303 
304 /* Set low/high watermarks for the receive/destination side of copy engine. */
305 void ce_recv_watermarks_set(struct CE_handle *copyeng,
306 			    unsigned int low_alert_nentries,
307 			    unsigned int high_alert_nentries);
308 
309 /*
310  * Return the number of entries that can be queued
311  * to a ring at an instant in time.
312  *
313  * For source ring, does not imply that destination-side
314  * buffers are available; merely indicates descriptor space
315  * in the source ring.
316  *
317  * For destination ring, does not imply that previously
318  * received buffers have been processed; merely indicates
319  * descriptor space in destination ring.
320  *
321  * Mainly for use with CE Watermark callback.
322  */
323 unsigned int ce_send_entries_avail(struct CE_handle *copyeng);
324 unsigned int ce_recv_entries_avail(struct CE_handle *copyeng);
325 
326 /* recv flags */
327 /* Data is byte-swapped */
328 #define CE_RECV_FLAG_SWAPPED            1
329 
330 /**
331  * ce_completed_recv_next() - Supply data for the next completed unprocessed
332  * receive descriptor.
333  * @copyeng: which copy engine to use
334  * @per_CE_contextp: CE context
335  * @per_transfer_contextp: Transfer context
336  * @bufferp: buffer pointer
337  * @nbytesp: number of bytes
338  * @transfer_idp: Transfer idp
339  * @flagsp: flags
340  *
341  * For use
342  *    with CE Watermark callback,
343  *    in a recv_cb function when processing buf_lists
344  *    in a recv_cb function in order to mitigate recv_cb's.
345  *
346  * Implementation note: Pops buffer from Dest ring.
347  *
348  * Return: QDF_STATUS
349  */
350 QDF_STATUS ce_completed_recv_next(struct CE_handle *copyeng,
351 				  void **per_CE_contextp,
352 				  void **per_transfer_contextp,
353 				  qdf_dma_addr_t *bufferp,
354 				  unsigned int *nbytesp,
355 				  unsigned int *transfer_idp,
356 				  unsigned int *flagsp);
357 
358 /**
359  * ce_completed_send_next() - Supply data for the next completed unprocessed
360  * send descriptor.
361  * @copyeng: which copy engine to use
362  * @per_CE_contextp: CE context
363  * @per_transfer_contextp: Transfer context
364  * @bufferp: buffer pointer
365  * @nbytesp: number of bytes
366  * @transfer_idp: Transfer idp
367  * @sw_idx: SW index
368  * @hw_idx: HW index
369  * @toeplitz_hash_result: toeplitz hash result
370  *
371  * For use
372  *    with CE Watermark callback
373  *    in a send_cb function in order to mitigate send_cb's.
374  *
375  * Implementation note: Pops 1 completed send buffer from Source ring
376  *
377  * Return: QDF_STATUS
378  */
379 QDF_STATUS ce_completed_send_next(struct CE_handle *copyeng,
380 				  void **per_CE_contextp,
381 				  void **per_transfer_contextp,
382 				  qdf_dma_addr_t *bufferp,
383 				  unsigned int *nbytesp,
384 				  unsigned int *transfer_idp,
385 				  unsigned int *sw_idx,
386 				  unsigned int *hw_idx,
387 				  uint32_t *toeplitz_hash_result);
388 
389 #ifdef CUSTOM_CB_SCHEDULER_SUPPORT
390 /*==================CE custom callbacks=================================*/
391 
392 /**
393  * ce_register_custom_cb() - Helper API to register the custom callback
394  * @copyeng: Pointer to CE handle
395  * @custom_cb: Custom call back function pointer
396  * @custom_cb_context: Custom callback context
397  *
398  * return: void
399  */
400 void
401 ce_register_custom_cb(struct CE_handle *copyeng, void (*custom_cb)(void *),
402 		      void *custom_cb_context);
403 
404 /**
405  * ce_unregister_custom_cb() - Helper API to unregister the custom callback
406  * @copyeng: Pointer to CE handle
407  *
408  * return: void
409  */
410 void
411 ce_unregister_custom_cb(struct CE_handle *copyeng);
412 
413 /**
414  * ce_enable_custom_cb() - Helper API to enable the custom callback
415  * @copyeng: Pointer to CE handle
416  *
417  * return: void
418  */
419 void
420 ce_enable_custom_cb(struct CE_handle *copyeng);
421 
422 /**
423  * ce_disable_custom_cb() - Helper API to disable the custom callback
424  * @copyeng: Pointer to CE handle
425  *
426  * return: void
427  */
428 void
429 ce_disable_custom_cb(struct CE_handle *copyeng);
430 #endif /* CUSTOM_CB_SCHEDULER_SUPPORT */
431 
432 /*==================CE Engine Initialization=================================*/
433 
434 /* Initialize an instance of a CE */
435 struct CE_handle *ce_init(struct hif_softc *scn,
436 			  unsigned int CE_id, struct CE_attr *attr);
437 
438 /*
439  * hif_ce_desc_history_log_register() - Register hif_ce_desc_history buffers
440  * to SSR driver dump.
441  *
442  * Return: None
443  */
444 void hif_ce_desc_history_log_register(void);
445 
446 /*==================CE Engine Shutdown=======================================*/
447 /*
448  * Support clean shutdown by allowing the caller to revoke
449  * receive buffers.  Target DMA must be stopped before using
450  * this API.
451  */
452 QDF_STATUS
453 ce_revoke_recv_next(struct CE_handle *copyeng,
454 		    void **per_CE_contextp,
455 		    void **per_transfer_contextp,
456 		    qdf_dma_addr_t *bufferp);
457 
458 /*
459  * Support clean shutdown by allowing the caller to cancel
460  * pending sends.  Target DMA must be stopped before using
461  * this API.
462  */
463 QDF_STATUS
464 ce_cancel_send_next(struct CE_handle *copyeng,
465 		    void **per_CE_contextp,
466 		    void **per_transfer_contextp,
467 		    qdf_dma_addr_t *bufferp,
468 		    unsigned int *nbytesp,
469 		    unsigned int *transfer_idp,
470 		    uint32_t *toeplitz_hash_result);
471 
472 void ce_fini(struct CE_handle *copyeng);
473 
474 /*
475  * hif_ce_desc_history_log_unregister() - unregister hif_ce_desc_history
476  * buffers from SSR driver dump.
477  *
478  * Return: None
479  */
480 void hif_ce_desc_history_log_unregister(void);
481 
482 /*==================CE Interrupt Handlers====================================*/
483 void ce_per_engine_service_any(int irq, struct hif_softc *scn);
484 int ce_per_engine_service(struct hif_softc *scn, unsigned int CE_id);
485 void ce_per_engine_servicereap(struct hif_softc *scn, unsigned int CE_id);
486 
487 /*===================CE cmpl interrupt Enable/Disable =======================*/
488 void ce_disable_any_copy_compl_intr_nolock(struct hif_softc *scn);
489 void ce_enable_any_copy_compl_intr_nolock(struct hif_softc *scn);
490 
491 /* API to check if any of the copy engine pipes has
492  * pending frames for processing
493  */
494 bool ce_get_rx_pending(struct hif_softc *scn);
495 
496 /**
497  * war_ce_src_ring_write_idx_set() - Set write index for CE source ring
498  * @scn: HIF context
499  * @ctrl_addr: address
500  * @write_index: write index
501  *
502  * Return: None
503  */
504 void war_ce_src_ring_write_idx_set(struct hif_softc *scn,
505 				   u32 ctrl_addr, unsigned int write_index);
506 
507 /* CE_attr.flags values */
508 #define CE_ATTR_NO_SNOOP             0x01 /* Use NonSnooping PCIe accesses? */
509 #define CE_ATTR_BYTE_SWAP_DATA       0x02 /* Byte swap data words */
510 #define CE_ATTR_SWIZZLE_DESCRIPTORS  0x04 /* Swizzle descriptors? */
511 #define CE_ATTR_DISABLE_INTR         0x08 /* no interrupt on copy completion */
512 #define CE_ATTR_ENABLE_POLL          0x10 /* poll for residue descriptors */
513 #define CE_ATTR_DIAG                 0x20 /* Diag CE */
514 #define CE_ATTR_INIT_ON_DEMAND       0x40 /* Initialized on demand */
515 #define CE_ATTR_HI_TASKLET           0x80 /* HI_TASKLET CE */
516 
517 /**
518  * struct CE_attr - Attributes of an instance of a Copy Engine
519  * @flags:         CE_ATTR_* values
520  * @priority:      TBD
521  * @src_nentries:  #entries in source ring - Must be a power of 2
522  * @src_sz_max:    Max source send size for this CE. This is also the minimum
523  *                 size of a destination buffer
524  * @dest_nentries: #entries in destination ring - Must be a power of 2
525  * @reserved:      Future Use
526  */
527 struct CE_attr {
528 	unsigned int flags;
529 	unsigned int priority;
530 	unsigned int src_nentries;
531 	unsigned int src_sz_max;
532 	unsigned int dest_nentries;
533 	void *reserved;
534 };
535 
536 /*
537  * When using sendlist_send to transfer multiple buffer fragments, the
538  * transfer context of each fragment, except last one, will be filled
539  * with CE_SENDLIST_ITEM_CTXT. CE_completed_send will return success for
540  * each fragment done with send and the transfer context would be
541  * CE_SENDLIST_ITEM_CTXT. Upper layer could use this to identify the
542  * status of a send completion.
543  */
544 #define CE_SENDLIST_ITEM_CTXT   ((void *)0xcecebeef)
545 
546 /*
547  * This is an opaque type that is at least large enough to hold
548  * a sendlist. A sendlist can only be accessed through CE APIs,
549  * but this allows a sendlist to be allocated on the run-time
550  * stack.  TBDXXX: un-opaque would be simpler...
551  */
552 struct ce_sendlist {
553 	unsigned int word[62];
554 };
555 
556 #define ATH_ISR_NOSCHED  0x0000  /* Do not schedule bottom half/DPC */
557 #define ATH_ISR_SCHED    0x0001  /* Schedule the bottom half for execution */
558 #define ATH_ISR_NOTMINE  0x0002  /* for shared IRQ's */
559 
560 #ifdef IPA_OFFLOAD
561 void ce_ipa_get_resource(struct CE_handle *ce,
562 			 qdf_shared_mem_t **ce_sr,
563 			 uint32_t *ce_sr_ring_size,
564 			 qdf_dma_addr_t *ce_reg_paddr);
565 #else
566 /**
567  * ce_ipa_get_resource() - get uc resource on copyengine
568  * @ce: copyengine context
569  * @ce_sr: copyengine source ring resource info
570  * @ce_sr_ring_size: copyengine source ring size
571  * @ce_reg_paddr: copyengine register physical address
572  *
573  * Copy engine should release resource to micro controller
574  * Micro controller needs
575  *  - Copy engine source descriptor base address
576  *  - Copy engine source descriptor size
577  *  - PCI BAR address to access copy engine register
578  *
579  * Return: None
580  */
581 static inline void ce_ipa_get_resource(struct CE_handle *ce,
582 			 qdf_shared_mem_t **ce_sr,
583 			 uint32_t *ce_sr_ring_size,
584 			 qdf_dma_addr_t *ce_reg_paddr)
585 {
586 }
587 #endif /* IPA_OFFLOAD */
588 
589 static inline void ce_pkt_error_count_incr(
590 	struct HIF_CE_state *_hif_state,
591 	enum ol_ath_hif_pkt_ecodes _hif_ecode)
592 {
593 	struct hif_softc *scn = HIF_GET_SOFTC(_hif_state);
594 
595 	if (_hif_ecode == HIF_PIPE_NO_RESOURCE)
596 		(scn->pkt_stats.hif_pipe_no_resrc_count)
597 		+= 1;
598 }
599 
600 bool ce_check_rx_pending(struct CE_state *CE_state);
601 void *hif_ce_get_lro_ctx(struct hif_opaque_softc *hif_hdl, int ctx_id);
602 struct ce_ops *ce_services_srng(void);
603 struct ce_ops *ce_services_legacy(void);
604 bool ce_srng_based(struct hif_softc *scn);
605 /* Forward declaration */
606 struct CE_ring_state;
607 
608 struct ce_ops {
609 	uint32_t (*ce_get_desc_size)(uint8_t ring_type);
610 	int (*ce_ring_setup)(struct hif_softc *scn, uint8_t ring_type,
611 		uint32_t ce_id, struct CE_ring_state *ring,
612 		struct CE_attr *attr);
613 	void (*ce_srng_cleanup)(struct hif_softc *scn,
614 				struct CE_state *CE_state, uint8_t ring_type);
615 	QDF_STATUS (*ce_send_nolock)(struct CE_handle *copyeng,
616 				     void *per_transfer_context,
617 				     qdf_dma_addr_t buffer,
618 				     uint32_t nbytes,
619 				     uint32_t transfer_id,
620 				     uint32_t flags,
621 				     uint32_t user_flags);
622 	QDF_STATUS (*ce_sendlist_send)(struct CE_handle *copyeng,
623 				       void *per_transfer_context,
624 				       struct ce_sendlist *sendlist,
625 				       unsigned int transfer_id);
626 	QDF_STATUS (*ce_revoke_recv_next)(struct CE_handle *copyeng,
627 			void **per_CE_contextp,
628 			void **per_transfer_contextp,
629 			qdf_dma_addr_t *bufferp);
630 	QDF_STATUS (*ce_cancel_send_next)(struct CE_handle *copyeng,
631 			void **per_CE_contextp, void **per_transfer_contextp,
632 			qdf_dma_addr_t *bufferp, unsigned int *nbytesp,
633 			unsigned int *transfer_idp,
634 			uint32_t *toeplitz_hash_result);
635 	QDF_STATUS (*ce_recv_buf_enqueue)(struct CE_handle *copyeng,
636 					  void *per_recv_context,
637 					  qdf_dma_addr_t buffer);
638 	bool (*watermark_int)(struct CE_state *CE_state, unsigned int *flags);
639 	QDF_STATUS (*ce_completed_recv_next_nolock)(
640 			struct CE_state *CE_state,
641 			void **per_CE_contextp,
642 			void **per_transfer_contextp,
643 			qdf_dma_addr_t *bufferp,
644 			unsigned int *nbytesp,
645 			unsigned int *transfer_idp,
646 			unsigned int *flagsp);
647 	QDF_STATUS (*ce_completed_send_next_nolock)(
648 			struct CE_state *CE_state,
649 			void **per_CE_contextp,
650 			void **per_transfer_contextp,
651 			qdf_dma_addr_t *bufferp,
652 			unsigned int *nbytesp,
653 			unsigned int *transfer_idp,
654 			unsigned int *sw_idx,
655 			unsigned int *hw_idx,
656 			uint32_t *toeplitz_hash_result);
657 	unsigned int (*ce_recv_entries_done_nolock)(struct hif_softc *scn,
658 			struct CE_state *CE_state);
659 	unsigned int (*ce_send_entries_done_nolock)(struct hif_softc *scn,
660 			    struct CE_state *CE_state);
661 	void (*ce_per_engine_handler_adjust)(struct CE_state *CE_state,
662 			     int disable_copy_compl_intr);
663 	void (*ce_prepare_shadow_register_v2_cfg)(struct hif_softc *scn,
664 			    struct pld_shadow_reg_v2_cfg **shadow_config,
665 			    int *num_shadow_registers_configured);
666 	int (*ce_get_index_info)(struct hif_softc *scn, void *ce_state,
667 				 struct ce_index *info);
668 #ifdef CONFIG_SHADOW_V3
669 	void (*ce_prepare_shadow_register_v3_cfg)(struct hif_softc *scn,
670 			    struct pld_shadow_reg_v3_cfg **shadow_config,
671 			    int *num_shadow_registers_configured);
672 #endif
673 #ifdef FEATURE_DIRECT_LINK
674 	QDF_STATUS (*ce_set_irq_config_by_ceid)(struct hif_softc *scn,
675 						uint8_t ce_id, uint64_t addr,
676 						uint32_t data);
677 	uint16_t (*ce_get_direct_link_dest_buffers)(struct hif_softc *scn,
678 						    uint64_t **dma_addr,
679 						    uint32_t *buf_size);
680 	QDF_STATUS (*ce_get_direct_link_ring_info)(struct hif_softc *scn,
681 					   struct hif_direct_link_ce_info *info,
682 					   uint8_t max_ce_info_len);
683 #endif
684 };
685 
686 int hif_ce_bus_early_suspend(struct hif_softc *scn);
687 int hif_ce_bus_late_resume(struct hif_softc *scn);
688 
689 /*
690  * ce_engine_service_reg:
691  * @scn: hif_context
692  * @CE_id: Copy engine ID
693  *
694  * Called from ce_per_engine_service and goes through the regular interrupt
695  * handling that does not involve the WLAN fast path feature.
696  *
697  * Returns void
698  */
699 void ce_engine_service_reg(struct hif_softc *scn, int CE_id);
700 
701 /**
702  * ce_per_engine_service_fast() - CE handler routine to service fastpath msgs
703  * @scn: hif_context
704  * @ce_id: Copy engine ID
705  *
706  * Return: void
707  */
708 void ce_per_engine_service_fast(struct hif_softc *scn, int ce_id);
709 
710 void ce_tx_ring_write_idx_update_wrapper(struct CE_handle *ce_tx_hdl,
711 					int coalesce);
712 
713 /*
714  * ce_ring_flush_write_idx() - CE handler to flush write index
715  * @ce_tx_hdl: ce handle
716  * @force_flush: force flush the write idx if it set to true.
717  *
718  * Returns void
719  */
720 void ce_flush_tx_ring_write_idx(struct CE_handle *ce_tx_hdl, bool force_flush);
721 #endif /* __COPY_ENGINE_API_H__ */
722