xref: /wlan-dirver/qca-wifi-host-cmn/hif/src/ce/ce_api.h (revision 97f44cd39e4ff816eaa1710279d28cf6b9e65ad9)
1 /*
2  * Copyright (c) 2013-2020 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #ifndef __COPY_ENGINE_API_H__
20 #define __COPY_ENGINE_API_H__
21 
22 #include "pld_common.h"
23 #include "ce_main.h"
24 #include "hif_main.h"
25 
26 /* TBDXXX: Use int return values for consistency with Target */
27 
28 /* TBDXXX: Perhaps merge Host/Target-->common */
29 
30 /*
31  * Copy Engine support: low-level Target-side Copy Engine API.
32  * This is a hardware access layer used by code that understands
33  * how to use copy engines.
34  */
35 
36 /*
37  * A "struct CE_handle *" serves as an opaque pointer-sized
38  * handle to a specific copy engine.
39  */
40 struct CE_handle;
41 
42 /*
43  * "Send Completion" callback type for Send Completion Notification.
44  *
45  * If a Send Completion callback is registered and one or more sends
46  * have completed, the callback is invoked.
47  *
48  * per_ce_send_context is a context supplied by the calling layer
49  * (via ce_send_cb_register). It is associated with a copy engine.
50  *
51  * per_transfer_send_context is context supplied by the calling layer
52  * (via the "send" call).  It may be different for each invocation
53  * of send.
54  *
55  * The buffer parameter is the first byte sent of the first buffer
56  * sent (if more than one buffer).
57  *
58  * nbytes is the number of bytes of that buffer that were sent.
59  *
60  * transfer_id matches the value used when the buffer or
61  * buf_list was sent.
62  *
63  * Implementation note: Pops 1 completed send buffer from Source ring
64  */
65 typedef void (*ce_send_cb)(struct CE_handle *copyeng,
66 			   void *per_ce_send_context,
67 			   void *per_transfer_send_context,
68 			   qdf_dma_addr_t buffer,
69 			   unsigned int nbytes,
70 			   unsigned int transfer_id,
71 			   unsigned int sw_index,
72 			   unsigned int hw_index,
73 			   uint32_t toeplitz_hash_result);
74 
75 /*
76  * "Buffer Received" callback type for Buffer Received Notification.
77  *
78  * Implementation note: Pops 1 completed recv buffer from Dest ring
79  */
80 typedef void (*CE_recv_cb)(struct CE_handle *copyeng,
81 		   void *per_CE_recv_context,
82 		   void *per_transfer_recv_context,
83 		   qdf_dma_addr_t buffer,
84 		   unsigned int nbytes,
85 		   unsigned int transfer_id,
86 		   unsigned int flags);
87 
88 /*
89  * Copy Engine Watermark callback type.
90  *
91  * Allows upper layers to be notified when watermarks are reached:
92  *   space is available and/or running short in a source ring
93  *   buffers are exhausted and/or abundant in a destination ring
94  *
95  * The flags parameter indicates which condition triggered this
96  * callback.  See CE_WM_FLAG_*.
97  *
98  * Watermark APIs are provided to allow upper layers "batch"
99  * descriptor processing and to allow upper layers to
100  * throttle/unthrottle.
101  */
102 typedef void (*CE_watermark_cb)(struct CE_handle *copyeng,
103 				void *per_CE_wm_context, unsigned int flags);
104 
105 
106 #define CE_WM_FLAG_SEND_HIGH   1
107 #define CE_WM_FLAG_SEND_LOW    2
108 #define CE_WM_FLAG_RECV_HIGH   4
109 #define CE_WM_FLAG_RECV_LOW    8
110 #define CE_HTT_TX_CE           4
111 
112 
113 /**
114  * ce_service_srng_init() - Initialization routine for CE services
115  *                          in SRNG based targets
116  * Return : None
117  */
118 void ce_service_srng_init(void);
119 
120 /**
121  * ce_service_legacy_init() - Initialization routine for CE services
122  *                            in legacy targets
123  * Return : None
124  */
125 void ce_service_legacy_init(void);
126 
127 /* A list of buffers to be gathered and sent */
128 struct ce_sendlist;
129 
130 /* Copy Engine settable attributes */
131 struct CE_attr;
132 
133 /*==================Send=====================================================*/
134 
135 /* ce_send flags */
136 /* disable ring's byte swap, even if the default policy is to swap */
137 #define CE_SEND_FLAG_SWAP_DISABLE        1
138 
139 /*
140  * Queue a source buffer to be sent to an anonymous destination buffer.
141  *   copyeng         - which copy engine to use
142  *   buffer          - address of buffer
143  *   nbytes          - number of bytes to send
144  *   transfer_id     - arbitrary ID; reflected to destination
145  *   flags           - CE_SEND_FLAG_* values
146  * Returns QDF_STATUS.
147  *
148  * Note: If no flags are specified, use CE's default data swap mode.
149  *
150  * Implementation note: pushes 1 buffer to Source ring
151  */
152 QDF_STATUS ce_send(struct CE_handle *copyeng,
153 		   void *per_transfer_send_context,
154 		   qdf_dma_addr_t buffer,
155 		   unsigned int nbytes,
156 		   unsigned int transfer_id,
157 		   unsigned int flags,
158 		   unsigned int user_flags);
159 
160 #ifdef WLAN_FEATURE_FASTPATH
161 int ce_send_fast(struct CE_handle *copyeng, qdf_nbuf_t msdu,
162 	unsigned int transfer_id, uint32_t download_len);
163 
164 #endif
165 
166 void ce_update_tx_ring(struct CE_handle *ce_tx_hdl, uint32_t num_htt_cmpls);
167 extern qdf_nbuf_t ce_batch_send(struct CE_handle *ce_tx_hdl,
168 		qdf_nbuf_t msdu,
169 		uint32_t transfer_id,
170 		uint32_t len,
171 		uint32_t sendhead);
172 
173 QDF_STATUS ce_send_single(struct CE_handle *ce_tx_hdl,
174 			  qdf_nbuf_t msdu,
175 			  uint32_t transfer_id,
176 			  uint32_t len);
177 /*
178  * Register a Send Callback function.
179  * This function is called as soon as the contents of a Send
180  * have reached the destination, unless disable_interrupts is
181  * requested.  In this case, the callback is invoked when the
182  * send status is polled, shortly after the send completes.
183  */
184 void ce_send_cb_register(struct CE_handle *copyeng,
185 			 ce_send_cb fn_ptr,
186 			 void *per_ce_send_context, int disable_interrupts);
187 
188 /*
189  * Return the size of a SendList. This allows the caller to allocate
190  * a SendList while the SendList structure remains opaque.
191  */
192 unsigned int ce_sendlist_sizeof(void);
193 
194 /* Initialize a sendlist */
195 void ce_sendlist_init(struct ce_sendlist *sendlist);
196 
197 /**
198  * ce_sendlist_buf_add() - Append a simple buffer (address/length) to a sendlist
199  * @sendlist: Sendlist
200  * @buffer: buffer
201  * @nbytes: numer of bytes to append
202  * @flags: flags
203  * @user_flags: user flags
204  *
205  * Return: QDF_STATUS
206  */
207 QDF_STATUS ce_sendlist_buf_add(struct ce_sendlist *sendlist,
208 			       qdf_dma_addr_t buffer,
209 			       unsigned int nbytes,
210 			       /* OR-ed with internal flags */
211 			       uint32_t flags,
212 			       uint32_t user_flags);
213 
214 /*
215  * ce_sendlist_send() - Queue a "sendlist" of buffers to be sent using gather to
216  * a single anonymous destination buffer
217  * @copyeng: which copy engine to use
218  * @per_transfer_send_context: Per transfer send context
219  * @sendlist: list of simple buffers to send using gather
220  * @transfer_id: arbitrary ID; reflected to destination
221  *
222  * Implementation note: Pushes multiple buffers with Gather to Source ring.
223  *
224  * Return: QDF_STATUS
225  */
226 QDF_STATUS ce_sendlist_send(struct CE_handle *copyeng,
227 			    void *per_transfer_send_context,
228 			    struct ce_sendlist *sendlist,
229 			    unsigned int transfer_id);
230 
231 /*==================Recv=====================================================*/
232 
233 /**
234  * ce_recv_buf_enqueue() -  Make a buffer available to receive. The buffer must
235  * be at least of a minimal size appropriate for this copy engine (src_sz_max
236  * attribute).
237  * @copyeng: which copy engine to use
238  * @per_transfer_recv_context: context passed back to caller's recv_cb
239  * @buffer: address of buffer in CE space
240  *
241  * Implementation note: Pushes a buffer to Dest ring.
242  *
243  * Return: QDF_STATUS.
244  */
245 QDF_STATUS ce_recv_buf_enqueue(struct CE_handle *copyeng,
246 			       void *per_transfer_recv_context,
247 			       qdf_dma_addr_t buffer);
248 
249 /*
250  * Register a Receive Callback function.
251  * This function is called as soon as data is received
252  * from the source.
253  */
254 void ce_recv_cb_register(struct CE_handle *copyeng,
255 			 CE_recv_cb fn_ptr,
256 			 void *per_CE_recv_context,
257 			 int disable_interrupts);
258 
259 /*==================CE Watermark=============================================*/
260 
261 /*
262  * Register a Watermark Callback function.
263  * This function is called as soon as a watermark level
264  * is crossed.  A Watermark Callback function is free to
265  * handle received data "en masse"; but then some coordination
266  * is required with a registered Receive Callback function.
267  * [Suggestion: Either handle Receives in a Receive Callback
268  * or en masse in a Watermark Callback; but not both.]
269  */
270 void ce_watermark_cb_register(struct CE_handle *copyeng,
271 			  CE_watermark_cb fn_ptr,
272 			  void *per_CE_wm_context);
273 
274 /*
275  * Set low/high watermarks for the send/source side of a copy engine.
276  *
277  * Typically, the destination side CPU manages watermarks for
278  * the receive side and the source side CPU manages watermarks
279  * for the send side.
280  *
281  * A low watermark of 0 is never hit (so the watermark function
282  * will never be called for a Low Watermark condition).
283  *
284  * A high watermark equal to nentries is never hit (so the
285  * watermark function will never be called for a High Watermark
286  * condition).
287  */
288 void ce_send_watermarks_set(struct CE_handle *copyeng,
289 			    unsigned int low_alert_nentries,
290 			    unsigned int high_alert_nentries);
291 
292 /* Set low/high watermarks for the receive/destination side of copy engine. */
293 void ce_recv_watermarks_set(struct CE_handle *copyeng,
294 			    unsigned int low_alert_nentries,
295 			    unsigned int high_alert_nentries);
296 
297 /*
298  * Return the number of entries that can be queued
299  * to a ring at an instant in time.
300  *
301  * For source ring, does not imply that destination-side
302  * buffers are available; merely indicates descriptor space
303  * in the source ring.
304  *
305  * For destination ring, does not imply that previously
306  * received buffers have been processed; merely indicates
307  * descriptor space in destination ring.
308  *
309  * Mainly for use with CE Watermark callback.
310  */
311 unsigned int ce_send_entries_avail(struct CE_handle *copyeng);
312 unsigned int ce_recv_entries_avail(struct CE_handle *copyeng);
313 
314 /* recv flags */
315 /* Data is byte-swapped */
316 #define CE_RECV_FLAG_SWAPPED            1
317 
318 /**
319  * ce_completed_recv_next() - Supply data for the next completed unprocessed
320  * receive descriptor.
321  * @copyeng: which copy engine to use
322  * @per_CE_contextp: CE context
323  * @per_transfer_contextp: Transfer context
324  * @bufferp: buffer pointer
325  * @nbytesp: number of bytes
326  * @transfer_idp: Transfer idp
327  * @flagsp: flags
328  *
329  * For use
330  *    with CE Watermark callback,
331  *    in a recv_cb function when processing buf_lists
332  *    in a recv_cb function in order to mitigate recv_cb's.
333  *
334  * Implementation note: Pops buffer from Dest ring.
335  *
336  * Return: QDF_STATUS
337  */
338 QDF_STATUS ce_completed_recv_next(struct CE_handle *copyeng,
339 				  void **per_CE_contextp,
340 				  void **per_transfer_contextp,
341 				  qdf_dma_addr_t *bufferp,
342 				  unsigned int *nbytesp,
343 				  unsigned int *transfer_idp,
344 				  unsigned int *flagsp);
345 
346 /**
347  * ce_completed_send_next() - Supply data for the next completed unprocessed
348  * send descriptor.
349  * @copyeng: which copy engine to use
350  * @per_CE_contextp: CE context
351  * @per_transfer_contextp: Transfer context
352  * @bufferp: buffer pointer
353  * @nbytesp: number of bytes
354  * @transfer_idp: Transfer idp
355  * @sw_idx: SW index
356  * @hw_idx: HW index
357  * @toeplitz_hash_result: toeplitz hash result
358  *
359  * For use
360  *    with CE Watermark callback
361  *    in a send_cb function in order to mitigate send_cb's.
362  *
363  * Implementation note: Pops 1 completed send buffer from Source ring
364  *
365  * Return: QDF_STATUS
366  */
367 QDF_STATUS ce_completed_send_next(struct CE_handle *copyeng,
368 				  void **per_CE_contextp,
369 				  void **per_transfer_contextp,
370 				  qdf_dma_addr_t *bufferp,
371 				  unsigned int *nbytesp,
372 				  unsigned int *transfer_idp,
373 				  unsigned int *sw_idx,
374 				  unsigned int *hw_idx,
375 				  uint32_t *toeplitz_hash_result);
376 
377 /*==================CE Engine Initialization=================================*/
378 
379 /* Initialize an instance of a CE */
380 struct CE_handle *ce_init(struct hif_softc *scn,
381 			  unsigned int CE_id, struct CE_attr *attr);
382 
383 /*==================CE Engine Shutdown=======================================*/
384 /*
385  * Support clean shutdown by allowing the caller to revoke
386  * receive buffers.  Target DMA must be stopped before using
387  * this API.
388  */
389 QDF_STATUS
390 ce_revoke_recv_next(struct CE_handle *copyeng,
391 		    void **per_CE_contextp,
392 		    void **per_transfer_contextp,
393 		    qdf_dma_addr_t *bufferp);
394 
395 /*
396  * Support clean shutdown by allowing the caller to cancel
397  * pending sends.  Target DMA must be stopped before using
398  * this API.
399  */
400 QDF_STATUS
401 ce_cancel_send_next(struct CE_handle *copyeng,
402 		    void **per_CE_contextp,
403 		    void **per_transfer_contextp,
404 		    qdf_dma_addr_t *bufferp,
405 		    unsigned int *nbytesp,
406 		    unsigned int *transfer_idp,
407 		    uint32_t *toeplitz_hash_result);
408 
409 void ce_fini(struct CE_handle *copyeng);
410 
411 /*==================CE Interrupt Handlers====================================*/
412 void ce_per_engine_service_any(int irq, struct hif_softc *scn);
413 int ce_per_engine_service(struct hif_softc *scn, unsigned int CE_id);
414 void ce_per_engine_servicereap(struct hif_softc *scn, unsigned int CE_id);
415 
416 /*===================CE cmpl interrupt Enable/Disable =======================*/
417 void ce_disable_any_copy_compl_intr_nolock(struct hif_softc *scn);
418 void ce_enable_any_copy_compl_intr_nolock(struct hif_softc *scn);
419 
420 /* API to check if any of the copy engine pipes has
421  * pending frames for prcoessing
422  */
423 bool ce_get_rx_pending(struct hif_softc *scn);
424 
425 /**
426  * war_ce_src_ring_write_idx_set() - Set write index for CE source ring
427  *
428  * Return: None
429  */
430 void war_ce_src_ring_write_idx_set(struct hif_softc *scn,
431 				   u32 ctrl_addr, unsigned int write_index);
432 
433 /* CE_attr.flags values */
434 #define CE_ATTR_NO_SNOOP             0x01 /* Use NonSnooping PCIe accesses? */
435 #define CE_ATTR_BYTE_SWAP_DATA       0x02 /* Byte swap data words */
436 #define CE_ATTR_SWIZZLE_DESCRIPTORS  0x04 /* Swizzle descriptors? */
437 #define CE_ATTR_DISABLE_INTR         0x08 /* no interrupt on copy completion */
438 #define CE_ATTR_ENABLE_POLL          0x10 /* poll for residue descriptors */
439 #define CE_ATTR_DIAG                 0x20 /* Diag CE */
440 #define CE_ATTR_INIT_ON_DEMAND       0x40 /* Initialized on demand */
441 
442 /**
443  * struct CE_attr - Attributes of an instance of a Copy Engine
444  * @flags:         CE_ATTR_* values
445  * @priority:      TBD
446  * @src_nentries:  #entries in source ring - Must be a power of 2
447  * @src_sz_max:    Max source send size for this CE. This is also the minimum
448  *                 size of a destination buffer
449  * @dest_nentries: #entries in destination ring - Must be a power of 2
450  * @reserved:      Future Use
451  */
452 struct CE_attr {
453 	unsigned int flags;
454 	unsigned int priority;
455 	unsigned int src_nentries;
456 	unsigned int src_sz_max;
457 	unsigned int dest_nentries;
458 	void *reserved;
459 };
460 
461 /*
462  * When using sendlist_send to transfer multiple buffer fragments, the
463  * transfer context of each fragment, except last one, will be filled
464  * with CE_SENDLIST_ITEM_CTXT. CE_completed_send will return success for
465  * each fragment done with send and the transfer context would be
466  * CE_SENDLIST_ITEM_CTXT. Upper layer could use this to identify the
467  * status of a send completion.
468  */
469 #define CE_SENDLIST_ITEM_CTXT   ((void *)0xcecebeef)
470 
471 /*
472  * This is an opaque type that is at least large enough to hold
473  * a sendlist. A sendlist can only be accessed through CE APIs,
474  * but this allows a sendlist to be allocated on the run-time
475  * stack.  TBDXXX: un-opaque would be simpler...
476  */
477 struct ce_sendlist {
478 	unsigned int word[62];
479 };
480 
481 #define ATH_ISR_NOSCHED  0x0000  /* Do not schedule bottom half/DPC */
482 #define ATH_ISR_SCHED    0x0001  /* Schedule the bottom half for execution */
483 #define ATH_ISR_NOTMINE  0x0002  /* for shared IRQ's */
484 
485 #ifdef IPA_OFFLOAD
486 void ce_ipa_get_resource(struct CE_handle *ce,
487 			 qdf_shared_mem_t **ce_sr,
488 			 uint32_t *ce_sr_ring_size,
489 			 qdf_dma_addr_t *ce_reg_paddr);
490 #else
491 /**
492  * ce_ipa_get_resource() - get uc resource on copyengine
493  * @ce: copyengine context
494  * @ce_sr: copyengine source ring resource info
495  * @ce_sr_ring_size: copyengine source ring size
496  * @ce_reg_paddr: copyengine register physical address
497  *
498  * Copy engine should release resource to micro controller
499  * Micro controller needs
500  *  - Copy engine source descriptor base address
501  *  - Copy engine source descriptor size
502  *  - PCI BAR address to access copy engine regiser
503  *
504  * Return: None
505  */
506 static inline void ce_ipa_get_resource(struct CE_handle *ce,
507 			 qdf_shared_mem_t **ce_sr,
508 			 uint32_t *ce_sr_ring_size,
509 			 qdf_dma_addr_t *ce_reg_paddr)
510 {
511 }
512 #endif /* IPA_OFFLOAD */
513 
514 static inline void ce_pkt_error_count_incr(
515 	struct HIF_CE_state *_hif_state,
516 	enum ol_ath_hif_pkt_ecodes _hif_ecode)
517 {
518 	struct hif_softc *scn = HIF_GET_SOFTC(_hif_state);
519 
520 	if (_hif_ecode == HIF_PIPE_NO_RESOURCE)
521 		(scn->pkt_stats.hif_pipe_no_resrc_count)
522 		+= 1;
523 }
524 
525 bool ce_check_rx_pending(struct CE_state *CE_state);
526 void *hif_ce_get_lro_ctx(struct hif_opaque_softc *hif_hdl, int ctx_id);
527 struct ce_ops *ce_services_srng(void);
528 struct ce_ops *ce_services_legacy(void);
529 bool ce_srng_based(struct hif_softc *scn);
530 /* Forward declaration */
531 struct CE_ring_state;
532 
533 struct ce_ops {
534 	uint32_t (*ce_get_desc_size)(uint8_t ring_type);
535 	int (*ce_ring_setup)(struct hif_softc *scn, uint8_t ring_type,
536 		uint32_t ce_id, struct CE_ring_state *ring,
537 		struct CE_attr *attr);
538 	QDF_STATUS (*ce_send_nolock)(struct CE_handle *copyeng,
539 				     void *per_transfer_context,
540 				     qdf_dma_addr_t buffer,
541 				     uint32_t nbytes,
542 				     uint32_t transfer_id,
543 				     uint32_t flags,
544 				     uint32_t user_flags);
545 	QDF_STATUS (*ce_sendlist_send)(struct CE_handle *copyeng,
546 				       void *per_transfer_context,
547 				       struct ce_sendlist *sendlist,
548 				       unsigned int transfer_id);
549 	QDF_STATUS (*ce_revoke_recv_next)(struct CE_handle *copyeng,
550 			void **per_CE_contextp,
551 			void **per_transfer_contextp,
552 			qdf_dma_addr_t *bufferp);
553 	QDF_STATUS (*ce_cancel_send_next)(struct CE_handle *copyeng,
554 			void **per_CE_contextp, void **per_transfer_contextp,
555 			qdf_dma_addr_t *bufferp, unsigned int *nbytesp,
556 			unsigned int *transfer_idp,
557 			uint32_t *toeplitz_hash_result);
558 	QDF_STATUS (*ce_recv_buf_enqueue)(struct CE_handle *copyeng,
559 					  void *per_recv_context,
560 					  qdf_dma_addr_t buffer);
561 	bool (*watermark_int)(struct CE_state *CE_state, unsigned int *flags);
562 	QDF_STATUS (*ce_completed_recv_next_nolock)(
563 			struct CE_state *CE_state,
564 			void **per_CE_contextp,
565 			void **per_transfer_contextp,
566 			qdf_dma_addr_t *bufferp,
567 			unsigned int *nbytesp,
568 			unsigned int *transfer_idp,
569 			unsigned int *flagsp);
570 	QDF_STATUS (*ce_completed_send_next_nolock)(
571 			struct CE_state *CE_state,
572 			void **per_CE_contextp,
573 			void **per_transfer_contextp,
574 			qdf_dma_addr_t *bufferp,
575 			unsigned int *nbytesp,
576 			unsigned int *transfer_idp,
577 			unsigned int *sw_idx,
578 			unsigned int *hw_idx,
579 			uint32_t *toeplitz_hash_result);
580 	unsigned int (*ce_recv_entries_done_nolock)(struct hif_softc *scn,
581 			struct CE_state *CE_state);
582 	unsigned int (*ce_send_entries_done_nolock)(struct hif_softc *scn,
583 			    struct CE_state *CE_state);
584 	void (*ce_per_engine_handler_adjust)(struct CE_state *CE_state,
585 			     int disable_copy_compl_intr);
586 	void (*ce_prepare_shadow_register_v2_cfg)(struct hif_softc *scn,
587 			    struct pld_shadow_reg_v2_cfg **shadow_config,
588 			    int *num_shadow_registers_configured);
589 	int (*ce_get_index_info)(struct hif_softc *scn, void *ce_state,
590 				 struct ce_index *info);
591 };
592 
593 int hif_ce_bus_early_suspend(struct hif_softc *scn);
594 int hif_ce_bus_late_resume(struct hif_softc *scn);
595 
596 /*
597  * ce_engine_service_reg:
598  * @scn: hif_context
599  * @CE_id: Copy engine ID
600  *
601  * Called from ce_per_engine_service and goes through the regular interrupt
602  * handling that does not involve the WLAN fast path feature.
603  *
604  * Returns void
605  */
606 void ce_engine_service_reg(struct hif_softc *scn, int CE_id);
607 
608 /**
609  * ce_per_engine_service_fast() - CE handler routine to service fastpath msgs
610  * @scn: hif_context
611  * @ce_id: Copy engine ID
612  *
613  * Return: void
614  */
615 void ce_per_engine_service_fast(struct hif_softc *scn, int ce_id);
616 
617 #endif /* __COPY_ENGINE_API_H__ */
618