xref: /wlan-dirver/qca-wifi-host-cmn/hif/src/ce/ce_api.h (revision 11f5a63a6cbdda84849a730de22f0a71e635d58c)
1 /*
2  * Copyright (c) 2013-2019 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #ifndef __COPY_ENGINE_API_H__
20 #define __COPY_ENGINE_API_H__
21 
22 #include "pld_common.h"
23 #include "ce_main.h"
24 #include "hif_main.h"
25 
26 /* TBDXXX: Use int return values for consistency with Target */
27 
28 /* TBDXXX: Perhaps merge Host/Target-->common */
29 
30 /*
31  * Copy Engine support: low-level Target-side Copy Engine API.
32  * This is a hardware access layer used by code that understands
33  * how to use copy engines.
34  */
35 
36 /*
37  * A "struct CE_handle *" serves as an opaque pointer-sized
38  * handle to a specific copy engine.
39  */
40 struct CE_handle;
41 
42 /*
43  * "Send Completion" callback type for Send Completion Notification.
44  *
45  * If a Send Completion callback is registered and one or more sends
46  * have completed, the callback is invoked.
47  *
48  * per_ce_send_context is a context supplied by the calling layer
49  * (via ce_send_cb_register). It is associated with a copy engine.
50  *
51  * per_transfer_send_context is context supplied by the calling layer
52  * (via the "send" call).  It may be different for each invocation
53  * of send.
54  *
55  * The buffer parameter is the first byte sent of the first buffer
56  * sent (if more than one buffer).
57  *
58  * nbytes is the number of bytes of that buffer that were sent.
59  *
60  * transfer_id matches the value used when the buffer or
61  * buf_list was sent.
62  *
63  * Implementation note: Pops 1 completed send buffer from Source ring
64  */
65 typedef void (*ce_send_cb)(struct CE_handle *copyeng,
66 			   void *per_ce_send_context,
67 			   void *per_transfer_send_context,
68 			   qdf_dma_addr_t buffer,
69 			   unsigned int nbytes,
70 			   unsigned int transfer_id,
71 			   unsigned int sw_index,
72 			   unsigned int hw_index,
73 			   uint32_t toeplitz_hash_result);
74 
75 /*
76  * "Buffer Received" callback type for Buffer Received Notification.
77  *
78  * Implementation note: Pops 1 completed recv buffer from Dest ring
79  */
80 typedef void (*CE_recv_cb)(struct CE_handle *copyeng,
81 		   void *per_CE_recv_context,
82 		   void *per_transfer_recv_context,
83 		   qdf_dma_addr_t buffer,
84 		   unsigned int nbytes,
85 		   unsigned int transfer_id,
86 		   unsigned int flags);
87 
88 /*
89  * Copy Engine Watermark callback type.
90  *
91  * Allows upper layers to be notified when watermarks are reached:
92  *   space is available and/or running short in a source ring
93  *   buffers are exhausted and/or abundant in a destination ring
94  *
95  * The flags parameter indicates which condition triggered this
96  * callback.  See CE_WM_FLAG_*.
97  *
98  * Watermark APIs are provided to allow upper layers "batch"
99  * descriptor processing and to allow upper layers to
100  * throttle/unthrottle.
101  */
102 typedef void (*CE_watermark_cb)(struct CE_handle *copyeng,
103 				void *per_CE_wm_context, unsigned int flags);
104 
105 
106 #define CE_WM_FLAG_SEND_HIGH   1
107 #define CE_WM_FLAG_SEND_LOW    2
108 #define CE_WM_FLAG_RECV_HIGH   4
109 #define CE_WM_FLAG_RECV_LOW    8
110 #define CE_HTT_TX_CE           4
111 
112 
113 /**
114  * ce_service_srng_init() - Initialization routine for CE services
115  *                          in SRNG based targets
116  * Return : None
117  */
118 void ce_service_srng_init(void);
119 
120 /**
121  * ce_service_legacy_init() - Initialization routine for CE services
122  *                            in legacy targets
123  * Return : None
124  */
125 void ce_service_legacy_init(void);
126 
127 /* A list of buffers to be gathered and sent */
128 struct ce_sendlist;
129 
130 /* Copy Engine settable attributes */
131 struct CE_attr;
132 
133 /*==================Send=====================================================*/
134 
135 /* ce_send flags */
136 /* disable ring's byte swap, even if the default policy is to swap */
137 #define CE_SEND_FLAG_SWAP_DISABLE        1
138 
139 /*
140  * Queue a source buffer to be sent to an anonymous destination buffer.
141  *   copyeng         - which copy engine to use
142  *   buffer          - address of buffer
143  *   nbytes          - number of bytes to send
144  *   transfer_id     - arbitrary ID; reflected to destination
145  *   flags           - CE_SEND_FLAG_* values
146  * Returns 0 on success; otherwise an error status.
147  *
148  * Note: If no flags are specified, use CE's default data swap mode.
149  *
150  * Implementation note: pushes 1 buffer to Source ring
151  */
152 int ce_send(struct CE_handle *copyeng,
153 		void *per_transfer_send_context,
154 		qdf_dma_addr_t buffer,
155 		unsigned int nbytes,
156 		unsigned int transfer_id,
157 		unsigned int flags,
158 		unsigned int user_flags);
159 
160 #ifdef WLAN_FEATURE_FASTPATH
161 int ce_send_fast(struct CE_handle *copyeng, qdf_nbuf_t msdu,
162 	unsigned int transfer_id, uint32_t download_len);
163 
164 #endif
165 
166 void ce_update_tx_ring(struct CE_handle *ce_tx_hdl, uint32_t num_htt_cmpls);
167 extern qdf_nbuf_t ce_batch_send(struct CE_handle *ce_tx_hdl,
168 		qdf_nbuf_t msdu,
169 		uint32_t transfer_id,
170 		uint32_t len,
171 		uint32_t sendhead);
172 
173 QDF_STATUS ce_send_single(struct CE_handle *ce_tx_hdl,
174 			  qdf_nbuf_t msdu,
175 			  uint32_t transfer_id,
176 			  uint32_t len);
177 /*
178  * Register a Send Callback function.
179  * This function is called as soon as the contents of a Send
180  * have reached the destination, unless disable_interrupts is
181  * requested.  In this case, the callback is invoked when the
182  * send status is polled, shortly after the send completes.
183  */
184 void ce_send_cb_register(struct CE_handle *copyeng,
185 			 ce_send_cb fn_ptr,
186 			 void *per_ce_send_context, int disable_interrupts);
187 
188 /*
189  * Return the size of a SendList. This allows the caller to allocate
190  * a SendList while the SendList structure remains opaque.
191  */
192 unsigned int ce_sendlist_sizeof(void);
193 
194 /* Initialize a sendlist */
195 void ce_sendlist_init(struct ce_sendlist *sendlist);
196 
197 /* Append a simple buffer (address/length) to a sendlist. */
198 int ce_sendlist_buf_add(struct ce_sendlist *sendlist,
199 		qdf_dma_addr_t buffer,
200 		unsigned int nbytes,
201 		/* OR-ed with internal flags */
202 		uint32_t flags,
203 		uint32_t user_flags);
204 
205 /*
206  * Queue a "sendlist" of buffers to be sent using gather to a single
207  * anonymous destination buffer
208  *   copyeng         - which copy engine to use
209  *   sendlist        - list of simple buffers to send using gather
210  *   transfer_id     - arbitrary ID; reflected to destination
211  * Returns 0 on success; otherwise an error status.
212  *
213  * Implementation note: Pushes multiple buffers with Gather to Source ring.
214  */
215 int ce_sendlist_send(struct CE_handle *copyeng,
216 		void *per_transfer_send_context,
217 		struct ce_sendlist *sendlist,
218 		unsigned int transfer_id);
219 
220 /*==================Recv=====================================================*/
221 
222 /*
223  * Make a buffer available to receive. The buffer must be at least of a
224  * minimal size appropriate for this copy engine (src_sz_max attribute).
225  *   copyeng                    - which copy engine to use
226  *   per_transfer_recv_context  - context passed back to caller's recv_cb
227  *   buffer                     - address of buffer in CE space
228  * Returns 0 on success; otherwise an error status.
229  *
230  * Implementation note: Pushes a buffer to Dest ring.
231  */
232 int ce_recv_buf_enqueue(struct CE_handle *copyeng,
233 			void *per_transfer_recv_context,
234 			qdf_dma_addr_t buffer);
235 
236 /*
237  * Register a Receive Callback function.
238  * This function is called as soon as data is received
239  * from the source.
240  */
241 void ce_recv_cb_register(struct CE_handle *copyeng,
242 			 CE_recv_cb fn_ptr,
243 			 void *per_CE_recv_context,
244 			 int disable_interrupts);
245 
246 /*==================CE Watermark=============================================*/
247 
248 /*
249  * Register a Watermark Callback function.
250  * This function is called as soon as a watermark level
251  * is crossed.  A Watermark Callback function is free to
252  * handle received data "en masse"; but then some coordination
253  * is required with a registered Receive Callback function.
254  * [Suggestion: Either handle Receives in a Receive Callback
255  * or en masse in a Watermark Callback; but not both.]
256  */
257 void ce_watermark_cb_register(struct CE_handle *copyeng,
258 			  CE_watermark_cb fn_ptr,
259 			  void *per_CE_wm_context);
260 
261 /*
262  * Set low/high watermarks for the send/source side of a copy engine.
263  *
264  * Typically, the destination side CPU manages watermarks for
265  * the receive side and the source side CPU manages watermarks
266  * for the send side.
267  *
268  * A low watermark of 0 is never hit (so the watermark function
269  * will never be called for a Low Watermark condition).
270  *
271  * A high watermark equal to nentries is never hit (so the
272  * watermark function will never be called for a High Watermark
273  * condition).
274  */
275 void ce_send_watermarks_set(struct CE_handle *copyeng,
276 			    unsigned int low_alert_nentries,
277 			    unsigned int high_alert_nentries);
278 
279 /* Set low/high watermarks for the receive/destination side of copy engine. */
280 void ce_recv_watermarks_set(struct CE_handle *copyeng,
281 			    unsigned int low_alert_nentries,
282 			    unsigned int high_alert_nentries);
283 
284 /*
285  * Return the number of entries that can be queued
286  * to a ring at an instant in time.
287  *
288  * For source ring, does not imply that destination-side
289  * buffers are available; merely indicates descriptor space
290  * in the source ring.
291  *
292  * For destination ring, does not imply that previously
293  * received buffers have been processed; merely indicates
294  * descriptor space in destination ring.
295  *
296  * Mainly for use with CE Watermark callback.
297  */
298 unsigned int ce_send_entries_avail(struct CE_handle *copyeng);
299 unsigned int ce_recv_entries_avail(struct CE_handle *copyeng);
300 
301 /* recv flags */
302 /* Data is byte-swapped */
303 #define CE_RECV_FLAG_SWAPPED            1
304 
305 /*
306  * Supply data for the next completed unprocessed receive descriptor.
307  *
308  * For use
309  *    with CE Watermark callback,
310  *    in a recv_cb function when processing buf_lists
311  *    in a recv_cb function in order to mitigate recv_cb's.
312  *
313  * Implementation note: Pops buffer from Dest ring.
314  */
315 int ce_completed_recv_next(struct CE_handle *copyeng,
316 			   void **per_CE_contextp,
317 			   void **per_transfer_contextp,
318 			   qdf_dma_addr_t *bufferp,
319 			   unsigned int *nbytesp,
320 			   unsigned int *transfer_idp,
321 			   unsigned int *flagsp);
322 
323 /*
324  * Supply data for the next completed unprocessed send descriptor.
325  *
326  * For use
327  *    with CE Watermark callback
328  *    in a send_cb function in order to mitigate send_cb's.
329  *
330  * Implementation note: Pops 1 completed send buffer from Source ring
331  */
332 int ce_completed_send_next(struct CE_handle *copyeng,
333 			   void **per_CE_contextp,
334 			   void **per_transfer_contextp,
335 			   qdf_dma_addr_t *bufferp,
336 			   unsigned int *nbytesp,
337 			   unsigned int *transfer_idp,
338 			   unsigned int *sw_idx,
339 			   unsigned int *hw_idx,
340 			   uint32_t *toeplitz_hash_result);
341 
342 /*==================CE Engine Initialization=================================*/
343 
344 /* Initialize an instance of a CE */
345 struct CE_handle *ce_init(struct hif_softc *scn,
346 			  unsigned int CE_id, struct CE_attr *attr);
347 
348 /*==================CE Engine Shutdown=======================================*/
349 /*
350  * Support clean shutdown by allowing the caller to revoke
351  * receive buffers.  Target DMA must be stopped before using
352  * this API.
353  */
354 QDF_STATUS
355 ce_revoke_recv_next(struct CE_handle *copyeng,
356 		    void **per_CE_contextp,
357 		    void **per_transfer_contextp,
358 		    qdf_dma_addr_t *bufferp);
359 
360 /*
361  * Support clean shutdown by allowing the caller to cancel
362  * pending sends.  Target DMA must be stopped before using
363  * this API.
364  */
365 QDF_STATUS
366 ce_cancel_send_next(struct CE_handle *copyeng,
367 		    void **per_CE_contextp,
368 		    void **per_transfer_contextp,
369 		    qdf_dma_addr_t *bufferp,
370 		    unsigned int *nbytesp,
371 		    unsigned int *transfer_idp,
372 		    uint32_t *toeplitz_hash_result);
373 
374 void ce_fini(struct CE_handle *copyeng);
375 
376 /*==================CE Interrupt Handlers====================================*/
377 void ce_per_engine_service_any(int irq, struct hif_softc *scn);
378 int ce_per_engine_service(struct hif_softc *scn, unsigned int CE_id);
379 void ce_per_engine_servicereap(struct hif_softc *scn, unsigned int CE_id);
380 
381 /*===================CE cmpl interrupt Enable/Disable =======================*/
382 void ce_disable_any_copy_compl_intr_nolock(struct hif_softc *scn);
383 void ce_enable_any_copy_compl_intr_nolock(struct hif_softc *scn);
384 
385 /* API to check if any of the copy engine pipes has
386  * pending frames for prcoessing
387  */
388 bool ce_get_rx_pending(struct hif_softc *scn);
389 
390 /**
391  * war_ce_src_ring_write_idx_set() - Set write index for CE source ring
392  *
393  * Return: None
394  */
395 void war_ce_src_ring_write_idx_set(struct hif_softc *scn,
396 				   u32 ctrl_addr, unsigned int write_index);
397 
398 /* CE_attr.flags values */
399 #define CE_ATTR_NO_SNOOP             0x01 /* Use NonSnooping PCIe accesses? */
400 #define CE_ATTR_BYTE_SWAP_DATA       0x02 /* Byte swap data words */
401 #define CE_ATTR_SWIZZLE_DESCRIPTORS  0x04 /* Swizzle descriptors? */
402 #define CE_ATTR_DISABLE_INTR         0x08 /* no interrupt on copy completion */
403 #define CE_ATTR_ENABLE_POLL          0x10 /* poll for residue descriptors */
404 #define CE_ATTR_DIAG                 0x20 /* Diag CE */
405 
406 /**
407  * struct CE_attr - Attributes of an instance of a Copy Engine
408  * @flags:         CE_ATTR_* values
409  * @priority:      TBD
410  * @src_nentries:  #entries in source ring - Must be a power of 2
411  * @src_sz_max:    Max source send size for this CE. This is also the minimum
412  *                 size of a destination buffer
413  * @dest_nentries: #entries in destination ring - Must be a power of 2
414  * @reserved:      Future Use
415  */
416 struct CE_attr {
417 	unsigned int flags;
418 	unsigned int priority;
419 	unsigned int src_nentries;
420 	unsigned int src_sz_max;
421 	unsigned int dest_nentries;
422 	void *reserved;
423 };
424 
425 /*
426  * When using sendlist_send to transfer multiple buffer fragments, the
427  * transfer context of each fragment, except last one, will be filled
428  * with CE_SENDLIST_ITEM_CTXT. CE_completed_send will return success for
429  * each fragment done with send and the transfer context would be
430  * CE_SENDLIST_ITEM_CTXT. Upper layer could use this to identify the
431  * status of a send completion.
432  */
433 #define CE_SENDLIST_ITEM_CTXT   ((void *)0xcecebeef)
434 
435 /*
436  * This is an opaque type that is at least large enough to hold
437  * a sendlist. A sendlist can only be accessed through CE APIs,
438  * but this allows a sendlist to be allocated on the run-time
439  * stack.  TBDXXX: un-opaque would be simpler...
440  */
441 struct ce_sendlist {
442 	unsigned int word[62];
443 };
444 
445 #define ATH_ISR_NOSCHED  0x0000  /* Do not schedule bottom half/DPC */
446 #define ATH_ISR_SCHED    0x0001  /* Schedule the bottom half for execution */
447 #define ATH_ISR_NOTMINE  0x0002  /* for shared IRQ's */
448 
449 #ifdef IPA_OFFLOAD
450 void ce_ipa_get_resource(struct CE_handle *ce,
451 			 qdf_shared_mem_t **ce_sr,
452 			 uint32_t *ce_sr_ring_size,
453 			 qdf_dma_addr_t *ce_reg_paddr);
454 #else
455 /**
456  * ce_ipa_get_resource() - get uc resource on copyengine
457  * @ce: copyengine context
458  * @ce_sr: copyengine source ring resource info
459  * @ce_sr_ring_size: copyengine source ring size
460  * @ce_reg_paddr: copyengine register physical address
461  *
462  * Copy engine should release resource to micro controller
463  * Micro controller needs
464  *  - Copy engine source descriptor base address
465  *  - Copy engine source descriptor size
466  *  - PCI BAR address to access copy engine regiser
467  *
468  * Return: None
469  */
470 static inline void ce_ipa_get_resource(struct CE_handle *ce,
471 			 qdf_shared_mem_t **ce_sr,
472 			 uint32_t *ce_sr_ring_size,
473 			 qdf_dma_addr_t *ce_reg_paddr)
474 {
475 }
476 #endif /* IPA_OFFLOAD */
477 
478 static inline void ce_pkt_error_count_incr(
479 	struct HIF_CE_state *_hif_state,
480 	enum ol_ath_hif_pkt_ecodes _hif_ecode)
481 {
482 	struct hif_softc *scn = HIF_GET_SOFTC(_hif_state);
483 
484 	if (_hif_ecode == HIF_PIPE_NO_RESOURCE)
485 		(scn->pkt_stats.hif_pipe_no_resrc_count)
486 		+= 1;
487 }
488 
489 bool ce_check_rx_pending(struct CE_state *CE_state);
490 void *hif_ce_get_lro_ctx(struct hif_opaque_softc *hif_hdl, int ctx_id);
491 struct ce_ops *ce_services_srng(void);
492 struct ce_ops *ce_services_legacy(void);
493 bool ce_srng_based(struct hif_softc *scn);
494 /* Forward declaration */
495 struct CE_ring_state;
496 
497 struct ce_ops {
498 	uint32_t (*ce_get_desc_size)(uint8_t ring_type);
499 	int (*ce_ring_setup)(struct hif_softc *scn, uint8_t ring_type,
500 		uint32_t ce_id, struct CE_ring_state *ring,
501 		struct CE_attr *attr);
502 	int (*ce_send_nolock)(struct CE_handle *copyeng,
503 			   void *per_transfer_context,
504 			   qdf_dma_addr_t buffer,
505 			   uint32_t nbytes,
506 			   uint32_t transfer_id,
507 			   uint32_t flags,
508 			   uint32_t user_flags);
509 	int (*ce_sendlist_send)(struct CE_handle *copyeng,
510 			void *per_transfer_context,
511 			struct ce_sendlist *sendlist, unsigned int transfer_id);
512 	QDF_STATUS (*ce_revoke_recv_next)(struct CE_handle *copyeng,
513 			void **per_CE_contextp,
514 			void **per_transfer_contextp,
515 			qdf_dma_addr_t *bufferp);
516 	QDF_STATUS (*ce_cancel_send_next)(struct CE_handle *copyeng,
517 			void **per_CE_contextp, void **per_transfer_contextp,
518 			qdf_dma_addr_t *bufferp, unsigned int *nbytesp,
519 			unsigned int *transfer_idp,
520 			uint32_t *toeplitz_hash_result);
521 	int (*ce_recv_buf_enqueue)(struct CE_handle *copyeng,
522 			void *per_recv_context, qdf_dma_addr_t buffer);
523 	bool (*watermark_int)(struct CE_state *CE_state, unsigned int *flags);
524 	int (*ce_completed_recv_next_nolock)(struct CE_state *CE_state,
525 			void **per_CE_contextp,
526 			void **per_transfer_contextp,
527 			qdf_dma_addr_t *bufferp,
528 			unsigned int *nbytesp,
529 			unsigned int *transfer_idp,
530 			unsigned int *flagsp);
531 	int (*ce_completed_send_next_nolock)(struct CE_state *CE_state,
532 			void **per_CE_contextp,
533 			void **per_transfer_contextp,
534 			qdf_dma_addr_t *bufferp,
535 			unsigned int *nbytesp,
536 			unsigned int *transfer_idp,
537 			unsigned int *sw_idx,
538 			unsigned int *hw_idx,
539 			uint32_t *toeplitz_hash_result);
540 	unsigned int (*ce_recv_entries_done_nolock)(struct hif_softc *scn,
541 			struct CE_state *CE_state);
542 	unsigned int (*ce_send_entries_done_nolock)(struct hif_softc *scn,
543 			    struct CE_state *CE_state);
544 	void (*ce_per_engine_handler_adjust)(struct CE_state *CE_state,
545 			     int disable_copy_compl_intr);
546 	void (*ce_prepare_shadow_register_v2_cfg)(struct hif_softc *scn,
547 			    struct pld_shadow_reg_v2_cfg **shadow_config,
548 			    int *num_shadow_registers_configured);
549 };
550 
551 int hif_ce_bus_early_suspend(struct hif_softc *scn);
552 int hif_ce_bus_late_resume(struct hif_softc *scn);
553 
554 /*
555  * ce_engine_service_reg:
556  * @scn: hif_context
557  * @CE_id: Copy engine ID
558  *
559  * Called from ce_per_engine_service and goes through the regular interrupt
560  * handling that does not involve the WLAN fast path feature.
561  *
562  * Returns void
563  */
564 void ce_engine_service_reg(struct hif_softc *scn, int CE_id);
565 
566 /**
567  * ce_per_engine_service_fast() - CE handler routine to service fastpath msgs
568  * @scn: hif_context
569  * @ce_id: Copy engine ID
570  *
571  * Return: void
572  */
573 void ce_per_engine_service_fast(struct hif_softc *scn, int ce_id);
574 
575 #endif /* __COPY_ENGINE_API_H__ */
576