1  /*
2   * Copyright (c) 2013-2021 The Linux Foundation. All rights reserved.
3   * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4   *
5   * Permission to use, copy, modify, and/or distribute this software for
6   * any purpose with or without fee is hereby granted, provided that the
7   * above copyright notice and this permission notice appear in all
8   * copies.
9   *
10   * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11   * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12   * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13   * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14   * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15   * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16   * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17   * PERFORMANCE OF THIS SOFTWARE.
18   */
19  
20  #ifndef __COPY_ENGINE_API_H__
21  #define __COPY_ENGINE_API_H__
22  
23  #include "pld_common.h"
24  #include "ce_main.h"
25  #include "hif_main.h"
26  
27  /* TBDXXX: Use int return values for consistency with Target */
28  
29  /* TBDXXX: Perhaps merge Host/Target-->common */
30  
31  /*
32   * Copy Engine support: low-level Target-side Copy Engine API.
33   * This is a hardware access layer used by code that understands
34   * how to use copy engines.
35   */
36  
37  /*
38   * A "struct CE_handle *" serves as an opaque pointer-sized
39   * handle to a specific copy engine.
40   */
41  struct CE_handle;
42  
43  /*
44   * "Send Completion" callback type for Send Completion Notification.
45   *
46   * If a Send Completion callback is registered and one or more sends
47   * have completed, the callback is invoked.
48   *
49   * per_ce_send_context is a context supplied by the calling layer
50   * (via ce_send_cb_register). It is associated with a copy engine.
51   *
52   * per_transfer_send_context is context supplied by the calling layer
53   * (via the "send" call).  It may be different for each invocation
54   * of send.
55   *
56   * The buffer parameter is the first byte sent of the first buffer
57   * sent (if more than one buffer).
58   *
59   * nbytes is the number of bytes of that buffer that were sent.
60   *
61   * transfer_id matches the value used when the buffer or
62   * buf_list was sent.
63   *
64   * Implementation note: Pops 1 completed send buffer from Source ring
65   */
66  typedef void (*ce_send_cb)(struct CE_handle *copyeng,
67  			   void *per_ce_send_context,
68  			   void *per_transfer_send_context,
69  			   qdf_dma_addr_t buffer,
70  			   unsigned int nbytes,
71  			   unsigned int transfer_id,
72  			   unsigned int sw_index,
73  			   unsigned int hw_index,
74  			   uint32_t toeplitz_hash_result);
75  
76  /*
77   * "Buffer Received" callback type for Buffer Received Notification.
78   *
79   * Implementation note: Pops 1 completed recv buffer from Dest ring
80   */
81  typedef void (*CE_recv_cb)(struct CE_handle *copyeng,
82  		   void *per_CE_recv_context,
83  		   void *per_transfer_recv_context,
84  		   qdf_dma_addr_t buffer,
85  		   unsigned int nbytes,
86  		   unsigned int transfer_id,
87  		   unsigned int flags);
88  
89  /*
90   * Copy Engine Watermark callback type.
91   *
92   * Allows upper layers to be notified when watermarks are reached:
93   *   space is available and/or running short in a source ring
94   *   buffers are exhausted and/or abundant in a destination ring
95   *
96   * The flags parameter indicates which condition triggered this
97   * callback.  See CE_WM_FLAG_*.
98   *
99   * Watermark APIs are provided to allow upper layers "batch"
100   * descriptor processing and to allow upper layers to
101   * throttle/unthrottle.
102   */
103  typedef void (*CE_watermark_cb)(struct CE_handle *copyeng,
104  				void *per_CE_wm_context, unsigned int flags);
105  
106  
107  #define CE_WM_FLAG_SEND_HIGH   1
108  #define CE_WM_FLAG_SEND_LOW    2
109  #define CE_WM_FLAG_RECV_HIGH   4
110  #define CE_WM_FLAG_RECV_LOW    8
111  #define CE_HTT_TX_CE           4
112  
113  
114  /**
115   * ce_service_srng_init() - Initialization routine for CE services
116   *                          in SRNG based targets
117   * Return : None
118   */
119  void ce_service_srng_init(void);
120  
121  /**
122   * ce_service_legacy_init() - Initialization routine for CE services
123   *                            in legacy targets
124   * Return : None
125   */
126  void ce_service_legacy_init(void);
127  
128  /* A list of buffers to be gathered and sent */
129  struct ce_sendlist;
130  
131  /* Copy Engine settable attributes */
132  struct CE_attr;
133  
134  /*==================Send=====================================================*/
135  
136  /* ce_send flags */
137  /* disable ring's byte swap, even if the default policy is to swap */
138  #define CE_SEND_FLAG_SWAP_DISABLE        1
139  
140  /*
141   * Queue a source buffer to be sent to an anonymous destination buffer.
142   *   copyeng         - which copy engine to use
143   *   buffer          - address of buffer
144   *   nbytes          - number of bytes to send
145   *   transfer_id     - arbitrary ID; reflected to destination
146   *   flags           - CE_SEND_FLAG_* values
147   * Returns QDF_STATUS.
148   *
149   * Note: If no flags are specified, use CE's default data swap mode.
150   *
151   * Implementation note: pushes 1 buffer to Source ring
152   */
153  QDF_STATUS ce_send(struct CE_handle *copyeng,
154  		   void *per_transfer_send_context,
155  		   qdf_dma_addr_t buffer,
156  		   unsigned int nbytes,
157  		   unsigned int transfer_id,
158  		   unsigned int flags,
159  		   unsigned int user_flags);
160  
161  #ifdef WLAN_FEATURE_FASTPATH
162  int ce_send_fast(struct CE_handle *copyeng, qdf_nbuf_t msdu,
163  	unsigned int transfer_id, uint32_t download_len);
164  
165  #endif
166  
167  /*
168   * ce_enqueue_desc() - enqueu desc to CE ring.
169   * @copyeng: which copy engine to use
170   * @msdu: data buffer
171   * @transfer_id: arbitrary ID; reflected to destination
172   * @download_len: length of the packet download to FW.
173   *
174   */
175  int ce_enqueue_desc(struct CE_handle *copyeng, qdf_nbuf_t msdu,
176  		    unsigned int transfer_id, uint32_t download_len);
177  
178  void ce_update_tx_ring(struct CE_handle *ce_tx_hdl, uint32_t num_htt_cmpls);
179  extern qdf_nbuf_t ce_batch_send(struct CE_handle *ce_tx_hdl,
180  		qdf_nbuf_t msdu,
181  		uint32_t transfer_id,
182  		uint32_t len,
183  		uint32_t sendhead);
184  
185  QDF_STATUS ce_send_single(struct CE_handle *ce_tx_hdl,
186  			  qdf_nbuf_t msdu,
187  			  uint32_t transfer_id,
188  			  uint32_t len);
189  /*
190   * Register a Send Callback function.
191   * This function is called as soon as the contents of a Send
192   * have reached the destination, unless disable_interrupts is
193   * requested.  In this case, the callback is invoked when the
194   * send status is polled, shortly after the send completes.
195   */
196  void ce_send_cb_register(struct CE_handle *copyeng,
197  			 ce_send_cb fn_ptr,
198  			 void *per_ce_send_context, int disable_interrupts);
199  
200  /*
201   * Return the size of a SendList. This allows the caller to allocate
202   * a SendList while the SendList structure remains opaque.
203   */
204  unsigned int ce_sendlist_sizeof(void);
205  
206  /* Initialize a sendlist */
207  void ce_sendlist_init(struct ce_sendlist *sendlist);
208  
209  /**
210   * ce_sendlist_buf_add() - Append a simple buffer (address/length) to a sendlist
211   * @sendlist: Sendlist
212   * @buffer: buffer
213   * @nbytes: number of bytes to append
214   * @flags: flags
215   * @user_flags: user flags
216   *
217   * Return: QDF_STATUS
218   */
219  QDF_STATUS ce_sendlist_buf_add(struct ce_sendlist *sendlist,
220  			       qdf_dma_addr_t buffer,
221  			       unsigned int nbytes,
222  			       /* OR-ed with internal flags */
223  			       uint32_t flags,
224  			       uint32_t user_flags);
225  
226  /*
227   * ce_sendlist_send() - Queue a "sendlist" of buffers to be sent using gather to
228   * a single anonymous destination buffer
229   * @copyeng: which copy engine to use
230   * @per_transfer_send_context: Per transfer send context
231   * @sendlist: list of simple buffers to send using gather
232   * @transfer_id: arbitrary ID; reflected to destination
233   *
234   * Implementation note: Pushes multiple buffers with Gather to Source ring.
235   *
236   * Return: QDF_STATUS
237   */
238  QDF_STATUS ce_sendlist_send(struct CE_handle *copyeng,
239  			    void *per_transfer_send_context,
240  			    struct ce_sendlist *sendlist,
241  			    unsigned int transfer_id);
242  
243  /*==================Recv=====================================================*/
244  
245  /**
246   * ce_recv_buf_enqueue() -  Make a buffer available to receive. The buffer must
247   * be at least of a minimal size appropriate for this copy engine (src_sz_max
248   * attribute).
249   * @copyeng: which copy engine to use
250   * @per_transfer_recv_context: context passed back to caller's recv_cb
251   * @buffer: address of buffer in CE space
252   *
253   * Implementation note: Pushes a buffer to Dest ring.
254   *
255   * Return: QDF_STATUS.
256   */
257  QDF_STATUS ce_recv_buf_enqueue(struct CE_handle *copyeng,
258  			       void *per_transfer_recv_context,
259  			       qdf_dma_addr_t buffer);
260  
261  /*
262   * Register a Receive Callback function.
263   * This function is called as soon as data is received
264   * from the source.
265   */
266  void ce_recv_cb_register(struct CE_handle *copyeng,
267  			 CE_recv_cb fn_ptr,
268  			 void *per_CE_recv_context,
269  			 int disable_interrupts);
270  
271  /*==================CE Watermark=============================================*/
272  
273  /*
274   * Register a Watermark Callback function.
275   * This function is called as soon as a watermark level
276   * is crossed.  A Watermark Callback function is free to
277   * handle received data "en masse"; but then some coordination
278   * is required with a registered Receive Callback function.
279   * [Suggestion: Either handle Receives in a Receive Callback
280   * or en masse in a Watermark Callback; but not both.]
281   */
282  void ce_watermark_cb_register(struct CE_handle *copyeng,
283  			  CE_watermark_cb fn_ptr,
284  			  void *per_CE_wm_context);
285  
286  /*
287   * Set low/high watermarks for the send/source side of a copy engine.
288   *
289   * Typically, the destination side CPU manages watermarks for
290   * the receive side and the source side CPU manages watermarks
291   * for the send side.
292   *
293   * A low watermark of 0 is never hit (so the watermark function
294   * will never be called for a Low Watermark condition).
295   *
296   * A high watermark equal to nentries is never hit (so the
297   * watermark function will never be called for a High Watermark
298   * condition).
299   */
300  void ce_send_watermarks_set(struct CE_handle *copyeng,
301  			    unsigned int low_alert_nentries,
302  			    unsigned int high_alert_nentries);
303  
304  /* Set low/high watermarks for the receive/destination side of copy engine. */
305  void ce_recv_watermarks_set(struct CE_handle *copyeng,
306  			    unsigned int low_alert_nentries,
307  			    unsigned int high_alert_nentries);
308  
309  /*
310   * Return the number of entries that can be queued
311   * to a ring at an instant in time.
312   *
313   * For source ring, does not imply that destination-side
314   * buffers are available; merely indicates descriptor space
315   * in the source ring.
316   *
317   * For destination ring, does not imply that previously
318   * received buffers have been processed; merely indicates
319   * descriptor space in destination ring.
320   *
321   * Mainly for use with CE Watermark callback.
322   */
323  unsigned int ce_send_entries_avail(struct CE_handle *copyeng);
324  unsigned int ce_recv_entries_avail(struct CE_handle *copyeng);
325  
326  /* recv flags */
327  /* Data is byte-swapped */
328  #define CE_RECV_FLAG_SWAPPED            1
329  
330  /**
331   * ce_completed_recv_next() - Supply data for the next completed unprocessed
332   * receive descriptor.
333   * @copyeng: which copy engine to use
334   * @per_CE_contextp: CE context
335   * @per_transfer_contextp: Transfer context
336   * @bufferp: buffer pointer
337   * @nbytesp: number of bytes
338   * @transfer_idp: Transfer idp
339   * @flagsp: flags
340   *
341   * For use
342   *    with CE Watermark callback,
343   *    in a recv_cb function when processing buf_lists
344   *    in a recv_cb function in order to mitigate recv_cb's.
345   *
346   * Implementation note: Pops buffer from Dest ring.
347   *
348   * Return: QDF_STATUS
349   */
350  QDF_STATUS ce_completed_recv_next(struct CE_handle *copyeng,
351  				  void **per_CE_contextp,
352  				  void **per_transfer_contextp,
353  				  qdf_dma_addr_t *bufferp,
354  				  unsigned int *nbytesp,
355  				  unsigned int *transfer_idp,
356  				  unsigned int *flagsp);
357  
358  /**
359   * ce_completed_send_next() - Supply data for the next completed unprocessed
360   * send descriptor.
361   * @copyeng: which copy engine to use
362   * @per_CE_contextp: CE context
363   * @per_transfer_contextp: Transfer context
364   * @bufferp: buffer pointer
365   * @nbytesp: number of bytes
366   * @transfer_idp: Transfer idp
367   * @sw_idx: SW index
368   * @hw_idx: HW index
369   * @toeplitz_hash_result: toeplitz hash result
370   *
371   * For use
372   *    with CE Watermark callback
373   *    in a send_cb function in order to mitigate send_cb's.
374   *
375   * Implementation note: Pops 1 completed send buffer from Source ring
376   *
377   * Return: QDF_STATUS
378   */
379  QDF_STATUS ce_completed_send_next(struct CE_handle *copyeng,
380  				  void **per_CE_contextp,
381  				  void **per_transfer_contextp,
382  				  qdf_dma_addr_t *bufferp,
383  				  unsigned int *nbytesp,
384  				  unsigned int *transfer_idp,
385  				  unsigned int *sw_idx,
386  				  unsigned int *hw_idx,
387  				  uint32_t *toeplitz_hash_result);
388  
389  #ifdef CUSTOM_CB_SCHEDULER_SUPPORT
390  /*==================CE custom callbacks=================================*/
391  
392  /**
393   * ce_register_custom_cb() - Helper API to register the custom callback
394   * @copyeng: Pointer to CE handle
395   * @custom_cb: Custom call back function pointer
396   * @custom_cb_context: Custom callback context
397   *
398   * return: void
399   */
400  void
401  ce_register_custom_cb(struct CE_handle *copyeng, void (*custom_cb)(void *),
402  		      void *custom_cb_context);
403  
404  /**
405   * ce_unregister_custom_cb() - Helper API to unregister the custom callback
406   * @copyeng: Pointer to CE handle
407   *
408   * return: void
409   */
410  void
411  ce_unregister_custom_cb(struct CE_handle *copyeng);
412  
413  /**
414   * ce_enable_custom_cb() - Helper API to enable the custom callback
415   * @copyeng: Pointer to CE handle
416   *
417   * return: void
418   */
419  void
420  ce_enable_custom_cb(struct CE_handle *copyeng);
421  
422  /**
423   * ce_disable_custom_cb() - Helper API to disable the custom callback
424   * @copyeng: Pointer to CE handle
425   *
426   * return: void
427   */
428  void
429  ce_disable_custom_cb(struct CE_handle *copyeng);
430  #endif /* CUSTOM_CB_SCHEDULER_SUPPORT */
431  
432  /*==================CE Engine Initialization=================================*/
433  
434  /* Initialize an instance of a CE */
435  struct CE_handle *ce_init(struct hif_softc *scn,
436  			  unsigned int CE_id, struct CE_attr *attr);
437  
438  /*
439   * hif_ce_desc_history_log_register() - Register hif_ce_desc_history buffers
440   * to SSR driver dump.
441   * @scn: HIF context
442   *
443   * Return: None
444   */
445  void hif_ce_desc_history_log_register(struct hif_softc *scn);
446  
447  /*==================CE Engine Shutdown=======================================*/
448  /*
449   * Support clean shutdown by allowing the caller to revoke
450   * receive buffers.  Target DMA must be stopped before using
451   * this API.
452   */
453  QDF_STATUS
454  ce_revoke_recv_next(struct CE_handle *copyeng,
455  		    void **per_CE_contextp,
456  		    void **per_transfer_contextp,
457  		    qdf_dma_addr_t *bufferp);
458  
459  /*
460   * Support clean shutdown by allowing the caller to cancel
461   * pending sends.  Target DMA must be stopped before using
462   * this API.
463   */
464  QDF_STATUS
465  ce_cancel_send_next(struct CE_handle *copyeng,
466  		    void **per_CE_contextp,
467  		    void **per_transfer_contextp,
468  		    qdf_dma_addr_t *bufferp,
469  		    unsigned int *nbytesp,
470  		    unsigned int *transfer_idp,
471  		    uint32_t *toeplitz_hash_result);
472  
473  void ce_fini(struct CE_handle *copyeng);
474  
475  /*
476   * hif_ce_desc_history_log_unregister() - unregister hif_ce_desc_history
477   * buffers from SSR driver dump.
478   *
479   * Return: None
480   */
481  void hif_ce_desc_history_log_unregister(void);
482  
483  /*==================CE Interrupt Handlers====================================*/
484  void ce_per_engine_service_any(int irq, struct hif_softc *scn);
485  int ce_per_engine_service(struct hif_softc *scn, unsigned int CE_id);
486  void ce_per_engine_servicereap(struct hif_softc *scn, unsigned int CE_id);
487  
488  /*===================CE cmpl interrupt Enable/Disable =======================*/
489  void ce_disable_any_copy_compl_intr_nolock(struct hif_softc *scn);
490  void ce_enable_any_copy_compl_intr_nolock(struct hif_softc *scn);
491  
492  /* API to check if any of the copy engine pipes has
493   * pending frames for processing
494   */
495  bool ce_get_rx_pending(struct hif_softc *scn);
496  
497  /**
498   * war_ce_src_ring_write_idx_set() - Set write index for CE source ring
499   * @scn: HIF context
500   * @ctrl_addr: address
501   * @write_index: write index
502   *
503   * Return: None
504   */
505  void war_ce_src_ring_write_idx_set(struct hif_softc *scn,
506  				   u32 ctrl_addr, unsigned int write_index);
507  
508  /* CE_attr.flags values */
509  #define CE_ATTR_NO_SNOOP             0x01 /* Use NonSnooping PCIe accesses? */
510  #define CE_ATTR_BYTE_SWAP_DATA       0x02 /* Byte swap data words */
511  #define CE_ATTR_SWIZZLE_DESCRIPTORS  0x04 /* Swizzle descriptors? */
512  #define CE_ATTR_DISABLE_INTR         0x08 /* no interrupt on copy completion */
513  #define CE_ATTR_ENABLE_POLL          0x10 /* poll for residue descriptors */
514  #define CE_ATTR_DIAG                 0x20 /* Diag CE */
515  #define CE_ATTR_INIT_ON_DEMAND       0x40 /* Initialized on demand */
516  #define CE_ATTR_HI_TASKLET           0x80 /* HI_TASKLET CE */
517  
518  /**
519   * struct CE_attr - Attributes of an instance of a Copy Engine
520   * @flags:         CE_ATTR_* values
521   * @priority:      TBD
522   * @src_nentries:  #entries in source ring - Must be a power of 2
523   * @src_sz_max:    Max source send size for this CE. This is also the minimum
524   *                 size of a destination buffer
525   * @dest_nentries: #entries in destination ring - Must be a power of 2
526   * @reserved:      Future Use
527   */
528  struct CE_attr {
529  	unsigned int flags;
530  	unsigned int priority;
531  	unsigned int src_nentries;
532  	unsigned int src_sz_max;
533  	unsigned int dest_nentries;
534  	void *reserved;
535  };
536  
537  /*
538   * When using sendlist_send to transfer multiple buffer fragments, the
539   * transfer context of each fragment, except last one, will be filled
540   * with CE_SENDLIST_ITEM_CTXT. CE_completed_send will return success for
541   * each fragment done with send and the transfer context would be
542   * CE_SENDLIST_ITEM_CTXT. Upper layer could use this to identify the
543   * status of a send completion.
544   */
545  #define CE_SENDLIST_ITEM_CTXT   ((void *)0xcecebeef)
546  
547  /*
548   * This is an opaque type that is at least large enough to hold
549   * a sendlist. A sendlist can only be accessed through CE APIs,
550   * but this allows a sendlist to be allocated on the run-time
551   * stack.  TBDXXX: un-opaque would be simpler...
552   */
553  struct ce_sendlist {
554  	unsigned int word[62];
555  };
556  
557  #define ATH_ISR_NOSCHED  0x0000  /* Do not schedule bottom half/DPC */
558  #define ATH_ISR_SCHED    0x0001  /* Schedule the bottom half for execution */
559  #define ATH_ISR_NOTMINE  0x0002  /* for shared IRQ's */
560  
561  #ifdef IPA_OFFLOAD
562  void ce_ipa_get_resource(struct CE_handle *ce,
563  			 qdf_shared_mem_t **ce_sr,
564  			 uint32_t *ce_sr_ring_size,
565  			 qdf_dma_addr_t *ce_reg_paddr);
566  #else
567  /**
568   * ce_ipa_get_resource() - get uc resource on copyengine
569   * @ce: copyengine context
570   * @ce_sr: copyengine source ring resource info
571   * @ce_sr_ring_size: copyengine source ring size
572   * @ce_reg_paddr: copyengine register physical address
573   *
574   * Copy engine should release resource to micro controller
575   * Micro controller needs
576   *  - Copy engine source descriptor base address
577   *  - Copy engine source descriptor size
578   *  - PCI BAR address to access copy engine register
579   *
580   * Return: None
581   */
ce_ipa_get_resource(struct CE_handle * ce,qdf_shared_mem_t ** ce_sr,uint32_t * ce_sr_ring_size,qdf_dma_addr_t * ce_reg_paddr)582  static inline void ce_ipa_get_resource(struct CE_handle *ce,
583  			 qdf_shared_mem_t **ce_sr,
584  			 uint32_t *ce_sr_ring_size,
585  			 qdf_dma_addr_t *ce_reg_paddr)
586  {
587  }
588  #endif /* IPA_OFFLOAD */
589  
ce_pkt_error_count_incr(struct HIF_CE_state * _hif_state,enum ol_ath_hif_pkt_ecodes _hif_ecode)590  static inline void ce_pkt_error_count_incr(
591  	struct HIF_CE_state *_hif_state,
592  	enum ol_ath_hif_pkt_ecodes _hif_ecode)
593  {
594  	struct hif_softc *scn = HIF_GET_SOFTC(_hif_state);
595  
596  	if (_hif_ecode == HIF_PIPE_NO_RESOURCE)
597  		(scn->pkt_stats.hif_pipe_no_resrc_count)
598  		+= 1;
599  }
600  
601  bool ce_check_rx_pending(struct CE_state *CE_state);
602  void *hif_ce_get_lro_ctx(struct hif_opaque_softc *hif_hdl, int ctx_id);
603  struct ce_ops *ce_services_srng(void);
604  struct ce_ops *ce_services_legacy(void);
605  bool ce_srng_based(struct hif_softc *scn);
606  /* Forward declaration */
607  struct CE_ring_state;
608  
609  struct ce_ops {
610  	uint32_t (*ce_get_desc_size)(uint8_t ring_type);
611  	int (*ce_ring_setup)(struct hif_softc *scn, uint8_t ring_type,
612  		uint32_t ce_id, struct CE_ring_state *ring,
613  		struct CE_attr *attr);
614  	void (*ce_srng_cleanup)(struct hif_softc *scn,
615  				struct CE_state *CE_state, uint8_t ring_type);
616  	QDF_STATUS (*ce_send_nolock)(struct CE_handle *copyeng,
617  				     void *per_transfer_context,
618  				     qdf_dma_addr_t buffer,
619  				     uint32_t nbytes,
620  				     uint32_t transfer_id,
621  				     uint32_t flags,
622  				     uint32_t user_flags);
623  	QDF_STATUS (*ce_sendlist_send)(struct CE_handle *copyeng,
624  				       void *per_transfer_context,
625  				       struct ce_sendlist *sendlist,
626  				       unsigned int transfer_id);
627  	QDF_STATUS (*ce_revoke_recv_next)(struct CE_handle *copyeng,
628  			void **per_CE_contextp,
629  			void **per_transfer_contextp,
630  			qdf_dma_addr_t *bufferp);
631  	QDF_STATUS (*ce_cancel_send_next)(struct CE_handle *copyeng,
632  			void **per_CE_contextp, void **per_transfer_contextp,
633  			qdf_dma_addr_t *bufferp, unsigned int *nbytesp,
634  			unsigned int *transfer_idp,
635  			uint32_t *toeplitz_hash_result);
636  	QDF_STATUS (*ce_recv_buf_enqueue)(struct CE_handle *copyeng,
637  					  void *per_recv_context,
638  					  qdf_dma_addr_t buffer);
639  	bool (*watermark_int)(struct CE_state *CE_state, unsigned int *flags);
640  	QDF_STATUS (*ce_completed_recv_next_nolock)(
641  			struct CE_state *CE_state,
642  			void **per_CE_contextp,
643  			void **per_transfer_contextp,
644  			qdf_dma_addr_t *bufferp,
645  			unsigned int *nbytesp,
646  			unsigned int *transfer_idp,
647  			unsigned int *flagsp);
648  	QDF_STATUS (*ce_completed_send_next_nolock)(
649  			struct CE_state *CE_state,
650  			void **per_CE_contextp,
651  			void **per_transfer_contextp,
652  			qdf_dma_addr_t *bufferp,
653  			unsigned int *nbytesp,
654  			unsigned int *transfer_idp,
655  			unsigned int *sw_idx,
656  			unsigned int *hw_idx,
657  			uint32_t *toeplitz_hash_result);
658  	unsigned int (*ce_recv_entries_done_nolock)(struct hif_softc *scn,
659  			struct CE_state *CE_state);
660  	unsigned int (*ce_send_entries_done_nolock)(struct hif_softc *scn,
661  			    struct CE_state *CE_state);
662  	void (*ce_per_engine_handler_adjust)(struct CE_state *CE_state,
663  			     int disable_copy_compl_intr);
664  	void (*ce_prepare_shadow_register_v2_cfg)(struct hif_softc *scn,
665  			    struct pld_shadow_reg_v2_cfg **shadow_config,
666  			    int *num_shadow_registers_configured);
667  	int (*ce_get_index_info)(struct hif_softc *scn, void *ce_state,
668  				 struct ce_index *info);
669  #ifdef CONFIG_SHADOW_V3
670  	void (*ce_prepare_shadow_register_v3_cfg)(struct hif_softc *scn,
671  			    struct pld_shadow_reg_v3_cfg **shadow_config,
672  			    int *num_shadow_registers_configured);
673  #endif
674  #ifdef FEATURE_DIRECT_LINK
675  	QDF_STATUS (*ce_set_irq_config_by_ceid)(struct hif_softc *scn,
676  						uint8_t ce_id, uint64_t addr,
677  						uint32_t data);
678  	uint16_t (*ce_get_direct_link_dest_buffers)(struct hif_softc *scn,
679  						    uint64_t **dma_addr,
680  						    uint32_t *buf_size);
681  	QDF_STATUS (*ce_get_direct_link_ring_info)(struct hif_softc *scn,
682  					   struct hif_direct_link_ce_info *info,
683  					   uint8_t max_ce_info_len);
684  #endif
685  };
686  
687  int hif_ce_bus_early_suspend(struct hif_softc *scn);
688  int hif_ce_bus_late_resume(struct hif_softc *scn);
689  
690  /*
691   * ce_engine_service_reg:
692   * @scn: hif_context
693   * @CE_id: Copy engine ID
694   *
695   * Called from ce_per_engine_service and goes through the regular interrupt
696   * handling that does not involve the WLAN fast path feature.
697   *
698   * Returns void
699   */
700  void ce_engine_service_reg(struct hif_softc *scn, int CE_id);
701  
702  /**
703   * ce_per_engine_service_fast() - CE handler routine to service fastpath msgs
704   * @scn: hif_context
705   * @ce_id: Copy engine ID
706   *
707   * Return: void
708   */
709  void ce_per_engine_service_fast(struct hif_softc *scn, int ce_id);
710  
711  void ce_tx_ring_write_idx_update_wrapper(struct CE_handle *ce_tx_hdl,
712  					int coalesce);
713  
714  /*
715   * ce_ring_flush_write_idx() - CE handler to flush write index
716   * @ce_tx_hdl: ce handle
717   * @force_flush: force flush the write idx if it set to true.
718   *
719   * Returns void
720   */
721  void ce_flush_tx_ring_write_idx(struct CE_handle *ce_tx_hdl, bool force_flush);
722  #endif /* __COPY_ENGINE_API_H__ */
723