xref: /wlan-dirver/qca-wifi-host-cmn/hif/src/ce/ce_api.h (revision 1397a33f48ea6455be40871470b286e535820eb8)
1 /*
2  * Copyright (c) 2013-2018 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #ifndef __COPY_ENGINE_API_H__
20 #define __COPY_ENGINE_API_H__
21 
22 #include "pld_common.h"
23 #include "ce_main.h"
24 #include "hif_main.h"
25 
26 /* TBDXXX: Use int return values for consistency with Target */
27 
28 /* TBDXXX: Perhaps merge Host/Target-->common */
29 
30 /*
31  * Copy Engine support: low-level Target-side Copy Engine API.
32  * This is a hardware access layer used by code that understands
33  * how to use copy engines.
34  */
35 
36 /*
37  * A "struct CE_handle *" serves as an opaque pointer-sized
38  * handle to a specific copy engine.
39  */
40 struct CE_handle;
41 
42 /*
43  * "Send Completion" callback type for Send Completion Notification.
44  *
45  * If a Send Completion callback is registered and one or more sends
46  * have completed, the callback is invoked.
47  *
48  * per_ce_send_context is a context supplied by the calling layer
49  * (via ce_send_cb_register). It is associated with a copy engine.
50  *
51  * per_transfer_send_context is context supplied by the calling layer
52  * (via the "send" call).  It may be different for each invocation
53  * of send.
54  *
55  * The buffer parameter is the first byte sent of the first buffer
56  * sent (if more than one buffer).
57  *
58  * nbytes is the number of bytes of that buffer that were sent.
59  *
60  * transfer_id matches the value used when the buffer or
61  * buf_list was sent.
62  *
63  * Implementation note: Pops 1 completed send buffer from Source ring
64  */
65 typedef void (*ce_send_cb)(struct CE_handle *copyeng,
66 			   void *per_ce_send_context,
67 			   void *per_transfer_send_context,
68 			   qdf_dma_addr_t buffer,
69 			   unsigned int nbytes,
70 			   unsigned int transfer_id,
71 			   unsigned int sw_index,
72 			   unsigned int hw_index,
73 			   uint32_t toeplitz_hash_result);
74 
75 /*
76  * "Buffer Received" callback type for Buffer Received Notification.
77  *
78  * Implementation note: Pops 1 completed recv buffer from Dest ring
79  */
80 typedef void (*CE_recv_cb)(struct CE_handle *copyeng,
81 		   void *per_CE_recv_context,
82 		   void *per_transfer_recv_context,
83 		   qdf_dma_addr_t buffer,
84 		   unsigned int nbytes,
85 		   unsigned int transfer_id,
86 		   unsigned int flags);
87 
88 /*
89  * Copy Engine Watermark callback type.
90  *
91  * Allows upper layers to be notified when watermarks are reached:
92  *   space is available and/or running short in a source ring
93  *   buffers are exhausted and/or abundant in a destination ring
94  *
95  * The flags parameter indicates which condition triggered this
96  * callback.  See CE_WM_FLAG_*.
97  *
98  * Watermark APIs are provided to allow upper layers "batch"
99  * descriptor processing and to allow upper layers to
100  * throttle/unthrottle.
101  */
102 typedef void (*CE_watermark_cb)(struct CE_handle *copyeng,
103 				void *per_CE_wm_context, unsigned int flags);
104 
105 
106 #define CE_WM_FLAG_SEND_HIGH   1
107 #define CE_WM_FLAG_SEND_LOW    2
108 #define CE_WM_FLAG_RECV_HIGH   4
109 #define CE_WM_FLAG_RECV_LOW    8
110 #define CE_HTT_TX_CE           4
111 
112 
113 /**
114  * ce_service_srng_init() - Initialization routine for CE services
115  *                          in SRNG based targets
116  * Return : None
117  */
118 void ce_service_srng_init(void);
119 
120 /**
121  * ce_service_legacy_init() - Initialization routine for CE services
122  *                            in legacy targets
123  * Return : None
124  */
125 void ce_service_legacy_init(void);
126 
127 /* A list of buffers to be gathered and sent */
128 struct ce_sendlist;
129 
130 /* Copy Engine settable attributes */
131 struct CE_attr;
132 
133 /*==================Send=====================================================*/
134 
135 /* ce_send flags */
136 /* disable ring's byte swap, even if the default policy is to swap */
137 #define CE_SEND_FLAG_SWAP_DISABLE        1
138 
139 /*
140  * Queue a source buffer to be sent to an anonymous destination buffer.
141  *   copyeng         - which copy engine to use
142  *   buffer          - address of buffer
143  *   nbytes          - number of bytes to send
144  *   transfer_id     - arbitrary ID; reflected to destination
145  *   flags           - CE_SEND_FLAG_* values
146  * Returns 0 on success; otherwise an error status.
147  *
148  * Note: If no flags are specified, use CE's default data swap mode.
149  *
150  * Implementation note: pushes 1 buffer to Source ring
151  */
152 int ce_send(struct CE_handle *copyeng,
153 		void *per_transfer_send_context,
154 		qdf_dma_addr_t buffer,
155 		unsigned int nbytes,
156 		unsigned int transfer_id,
157 		unsigned int flags,
158 		unsigned int user_flags);
159 
160 #ifdef WLAN_FEATURE_FASTPATH
161 int ce_send_fast(struct CE_handle *copyeng, qdf_nbuf_t msdu,
162 	unsigned int transfer_id, uint32_t download_len);
163 
164 #endif
165 
166 void ce_update_tx_ring(struct CE_handle *ce_tx_hdl, uint32_t num_htt_cmpls);
167 extern qdf_nbuf_t ce_batch_send(struct CE_handle *ce_tx_hdl,
168 		qdf_nbuf_t msdu,
169 		uint32_t transfer_id,
170 		uint32_t len,
171 		uint32_t sendhead);
172 
173 extern int ce_send_single(struct CE_handle *ce_tx_hdl,
174 		qdf_nbuf_t msdu,
175 		uint32_t transfer_id,
176 		uint32_t len);
177 /*
178  * Register a Send Callback function.
179  * This function is called as soon as the contents of a Send
180  * have reached the destination, unless disable_interrupts is
181  * requested.  In this case, the callback is invoked when the
182  * send status is polled, shortly after the send completes.
183  */
184 void ce_send_cb_register(struct CE_handle *copyeng,
185 			 ce_send_cb fn_ptr,
186 			 void *per_ce_send_context, int disable_interrupts);
187 
188 /*
189  * Return the size of a SendList. This allows the caller to allocate
190  * a SendList while the SendList structure remains opaque.
191  */
192 unsigned int ce_sendlist_sizeof(void);
193 
194 /* Initialize a sendlist */
195 void ce_sendlist_init(struct ce_sendlist *sendlist);
196 
197 /* Append a simple buffer (address/length) to a sendlist. */
198 int ce_sendlist_buf_add(struct ce_sendlist *sendlist,
199 		qdf_dma_addr_t buffer,
200 		unsigned int nbytes,
201 		/* OR-ed with internal flags */
202 		uint32_t flags,
203 		uint32_t user_flags);
204 
205 /*
206  * Queue a "sendlist" of buffers to be sent using gather to a single
207  * anonymous destination buffer
208  *   copyeng         - which copy engine to use
209  *   sendlist        - list of simple buffers to send using gather
210  *   transfer_id     - arbitrary ID; reflected to destination
211  * Returns 0 on success; otherwise an error status.
212  *
213  * Implementation note: Pushes multiple buffers with Gather to Source ring.
214  */
215 int ce_sendlist_send(struct CE_handle *copyeng,
216 		void *per_transfer_send_context,
217 		struct ce_sendlist *sendlist,
218 		unsigned int transfer_id);
219 
220 /*==================Recv=====================================================*/
221 
222 /*
223  * Make a buffer available to receive. The buffer must be at least of a
224  * minimal size appropriate for this copy engine (src_sz_max attribute).
225  *   copyeng                    - which copy engine to use
226  *   per_transfer_recv_context  - context passed back to caller's recv_cb
227  *   buffer                     - address of buffer in CE space
228  * Returns 0 on success; otherwise an error status.
229  *
230  * Implementation note: Pushes a buffer to Dest ring.
231  */
232 int ce_recv_buf_enqueue(struct CE_handle *copyeng,
233 			void *per_transfer_recv_context,
234 			qdf_dma_addr_t buffer);
235 
236 /*
237  * Register a Receive Callback function.
238  * This function is called as soon as data is received
239  * from the source.
240  */
241 void ce_recv_cb_register(struct CE_handle *copyeng,
242 			 CE_recv_cb fn_ptr,
243 			 void *per_CE_recv_context,
244 			 int disable_interrupts);
245 
246 /*==================CE Watermark=============================================*/
247 
248 /*
249  * Register a Watermark Callback function.
250  * This function is called as soon as a watermark level
251  * is crossed.  A Watermark Callback function is free to
252  * handle received data "en masse"; but then some coordination
253  * is required with a registered Receive Callback function.
254  * [Suggestion: Either handle Receives in a Receive Callback
255  * or en masse in a Watermark Callback; but not both.]
256  */
257 void ce_watermark_cb_register(struct CE_handle *copyeng,
258 			  CE_watermark_cb fn_ptr,
259 			  void *per_CE_wm_context);
260 
261 /*
262  * Set low/high watermarks for the send/source side of a copy engine.
263  *
264  * Typically, the destination side CPU manages watermarks for
265  * the receive side and the source side CPU manages watermarks
266  * for the send side.
267  *
268  * A low watermark of 0 is never hit (so the watermark function
269  * will never be called for a Low Watermark condition).
270  *
271  * A high watermark equal to nentries is never hit (so the
272  * watermark function will never be called for a High Watermark
273  * condition).
274  */
275 void ce_send_watermarks_set(struct CE_handle *copyeng,
276 			    unsigned int low_alert_nentries,
277 			    unsigned int high_alert_nentries);
278 
279 /* Set low/high watermarks for the receive/destination side of copy engine. */
280 void ce_recv_watermarks_set(struct CE_handle *copyeng,
281 			    unsigned int low_alert_nentries,
282 			    unsigned int high_alert_nentries);
283 
284 /*
285  * Return the number of entries that can be queued
286  * to a ring at an instant in time.
287  *
288  * For source ring, does not imply that destination-side
289  * buffers are available; merely indicates descriptor space
290  * in the source ring.
291  *
292  * For destination ring, does not imply that previously
293  * received buffers have been processed; merely indicates
294  * descriptor space in destination ring.
295  *
296  * Mainly for use with CE Watermark callback.
297  */
298 unsigned int ce_send_entries_avail(struct CE_handle *copyeng);
299 unsigned int ce_recv_entries_avail(struct CE_handle *copyeng);
300 
301 /*
302  * Return the number of entries in the ring that are ready
303  * to be processed by software.
304  *
305  * For source ring, the number of descriptors that have
306  * been completed and can now be overwritten with new send
307  * descriptors.
308  *
309  * For destination ring, the number of descriptors that
310  * are available to be processed (newly received buffers).
311  */
312 unsigned int ce_send_entries_done(struct CE_handle *copyeng);
313 unsigned int ce_recv_entries_done(struct CE_handle *copyeng);
314 
315 /* recv flags */
316 /* Data is byte-swapped */
317 #define CE_RECV_FLAG_SWAPPED            1
318 
319 /*
320  * Supply data for the next completed unprocessed receive descriptor.
321  *
322  * For use
323  *    with CE Watermark callback,
324  *    in a recv_cb function when processing buf_lists
325  *    in a recv_cb function in order to mitigate recv_cb's.
326  *
327  * Implementation note: Pops buffer from Dest ring.
328  */
329 int ce_completed_recv_next(struct CE_handle *copyeng,
330 			   void **per_CE_contextp,
331 			   void **per_transfer_contextp,
332 			   qdf_dma_addr_t *bufferp,
333 			   unsigned int *nbytesp,
334 			   unsigned int *transfer_idp,
335 			   unsigned int *flagsp);
336 
337 /*
338  * Supply data for the next completed unprocessed send descriptor.
339  *
340  * For use
341  *    with CE Watermark callback
342  *    in a send_cb function in order to mitigate send_cb's.
343  *
344  * Implementation note: Pops 1 completed send buffer from Source ring
345  */
346 int ce_completed_send_next(struct CE_handle *copyeng,
347 			   void **per_CE_contextp,
348 			   void **per_transfer_contextp,
349 			   qdf_dma_addr_t *bufferp,
350 			   unsigned int *nbytesp,
351 			   unsigned int *transfer_idp,
352 			   unsigned int *sw_idx,
353 			   unsigned int *hw_idx,
354 			   uint32_t *toeplitz_hash_result);
355 
356 /*==================CE Engine Initialization=================================*/
357 
358 /* Initialize an instance of a CE */
359 struct CE_handle *ce_init(struct hif_softc *scn,
360 			  unsigned int CE_id, struct CE_attr *attr);
361 
362 /*==================CE Engine Shutdown=======================================*/
363 /*
364  * Support clean shutdown by allowing the caller to revoke
365  * receive buffers.  Target DMA must be stopped before using
366  * this API.
367  */
368 QDF_STATUS
369 ce_revoke_recv_next(struct CE_handle *copyeng,
370 		    void **per_CE_contextp,
371 		    void **per_transfer_contextp,
372 		    qdf_dma_addr_t *bufferp);
373 
374 /*
375  * Support clean shutdown by allowing the caller to cancel
376  * pending sends.  Target DMA must be stopped before using
377  * this API.
378  */
379 QDF_STATUS
380 ce_cancel_send_next(struct CE_handle *copyeng,
381 		    void **per_CE_contextp,
382 		    void **per_transfer_contextp,
383 		    qdf_dma_addr_t *bufferp,
384 		    unsigned int *nbytesp,
385 		    unsigned int *transfer_idp,
386 		    uint32_t *toeplitz_hash_result);
387 
388 void ce_fini(struct CE_handle *copyeng);
389 
390 /*==================CE Interrupt Handlers====================================*/
391 void ce_per_engine_service_any(int irq, struct hif_softc *scn);
392 int ce_per_engine_service(struct hif_softc *scn, unsigned int CE_id);
393 void ce_per_engine_servicereap(struct hif_softc *scn, unsigned int CE_id);
394 
395 /*===================CE cmpl interrupt Enable/Disable =======================*/
396 void ce_disable_any_copy_compl_intr_nolock(struct hif_softc *scn);
397 void ce_enable_any_copy_compl_intr_nolock(struct hif_softc *scn);
398 
399 /* API to check if any of the copy engine pipes has
400  * pending frames for prcoessing
401  */
402 bool ce_get_rx_pending(struct hif_softc *scn);
403 
404 /**
405  * war_ce_src_ring_write_idx_set() - Set write index for CE source ring
406  *
407  * Return: None
408  */
409 void war_ce_src_ring_write_idx_set(struct hif_softc *scn,
410 				   u32 ctrl_addr, unsigned int write_index);
411 
412 /* CE_attr.flags values */
413 #define CE_ATTR_NO_SNOOP             0x01 /* Use NonSnooping PCIe accesses? */
414 #define CE_ATTR_BYTE_SWAP_DATA       0x02 /* Byte swap data words */
415 #define CE_ATTR_SWIZZLE_DESCRIPTORS  0x04 /* Swizzle descriptors? */
416 #define CE_ATTR_DISABLE_INTR         0x08 /* no interrupt on copy completion */
417 #define CE_ATTR_ENABLE_POLL          0x10 /* poll for residue descriptors */
418 #define CE_ATTR_DIAG                 0x20 /* Diag CE */
419 
420 /**
421  * struct CE_attr - Attributes of an instance of a Copy Engine
422  * @flags:         CE_ATTR_* values
423  * @priority:      TBD
424  * @src_nentries:  #entries in source ring - Must be a power of 2
425  * @src_sz_max:    Max source send size for this CE. This is also the minimum
426  *                 size of a destination buffer
427  * @dest_nentries: #entries in destination ring - Must be a power of 2
428  * @reserved:      Future Use
429  */
430 struct CE_attr {
431 	unsigned int flags;
432 	unsigned int priority;
433 	unsigned int src_nentries;
434 	unsigned int src_sz_max;
435 	unsigned int dest_nentries;
436 	void *reserved;
437 };
438 
439 /*
440  * When using sendlist_send to transfer multiple buffer fragments, the
441  * transfer context of each fragment, except last one, will be filled
442  * with CE_SENDLIST_ITEM_CTXT. CE_completed_send will return success for
443  * each fragment done with send and the transfer context would be
444  * CE_SENDLIST_ITEM_CTXT. Upper layer could use this to identify the
445  * status of a send completion.
446  */
447 #define CE_SENDLIST_ITEM_CTXT   ((void *)0xcecebeef)
448 
449 /*
450  * This is an opaque type that is at least large enough to hold
451  * a sendlist. A sendlist can only be accessed through CE APIs,
452  * but this allows a sendlist to be allocated on the run-time
453  * stack.  TBDXXX: un-opaque would be simpler...
454  */
455 struct ce_sendlist {
456 	unsigned int word[62];
457 };
458 
459 #define ATH_ISR_NOSCHED  0x0000  /* Do not schedule bottom half/DPC */
460 #define ATH_ISR_SCHED    0x0001  /* Schedule the bottom half for execution */
461 #define ATH_ISR_NOTMINE  0x0002  /* for shared IRQ's */
462 
463 #ifdef IPA_OFFLOAD
464 void ce_ipa_get_resource(struct CE_handle *ce,
465 			 qdf_shared_mem_t **ce_sr,
466 			 uint32_t *ce_sr_ring_size,
467 			 qdf_dma_addr_t *ce_reg_paddr);
468 #else
469 /**
470  * ce_ipa_get_resource() - get uc resource on copyengine
471  * @ce: copyengine context
472  * @ce_sr: copyengine source ring resource info
473  * @ce_sr_ring_size: copyengine source ring size
474  * @ce_reg_paddr: copyengine register physical address
475  *
476  * Copy engine should release resource to micro controller
477  * Micro controller needs
478  *  - Copy engine source descriptor base address
479  *  - Copy engine source descriptor size
480  *  - PCI BAR address to access copy engine regiser
481  *
482  * Return: None
483  */
484 static inline void ce_ipa_get_resource(struct CE_handle *ce,
485 			 qdf_shared_mem_t **ce_sr,
486 			 uint32_t *ce_sr_ring_size,
487 			 qdf_dma_addr_t *ce_reg_paddr)
488 {
489 }
490 #endif /* IPA_OFFLOAD */
491 
492 static inline void ce_pkt_error_count_incr(
493 	struct HIF_CE_state *_hif_state,
494 	enum ol_ath_hif_pkt_ecodes _hif_ecode)
495 {
496 	struct hif_softc *scn = HIF_GET_SOFTC(_hif_state);
497 
498 	if (_hif_ecode == HIF_PIPE_NO_RESOURCE)
499 		(scn->pkt_stats.hif_pipe_no_resrc_count)
500 		+= 1;
501 }
502 
503 bool ce_check_rx_pending(struct CE_state *CE_state);
504 void *hif_ce_get_lro_ctx(struct hif_opaque_softc *hif_hdl, int ctx_id);
505 struct ce_ops *ce_services_srng(void);
506 struct ce_ops *ce_services_legacy(void);
507 bool ce_srng_based(struct hif_softc *scn);
508 /* Forward declaration */
509 struct CE_ring_state;
510 
511 struct ce_ops {
512 	uint32_t (*ce_get_desc_size)(uint8_t ring_type);
513 	int (*ce_ring_setup)(struct hif_softc *scn, uint8_t ring_type,
514 		uint32_t ce_id, struct CE_ring_state *ring,
515 		struct CE_attr *attr);
516 	int (*ce_send_nolock)(struct CE_handle *copyeng,
517 			   void *per_transfer_context,
518 			   qdf_dma_addr_t buffer,
519 			   uint32_t nbytes,
520 			   uint32_t transfer_id,
521 			   uint32_t flags,
522 			   uint32_t user_flags);
523 	int (*ce_sendlist_send)(struct CE_handle *copyeng,
524 			void *per_transfer_context,
525 			struct ce_sendlist *sendlist, unsigned int transfer_id);
526 	QDF_STATUS (*ce_revoke_recv_next)(struct CE_handle *copyeng,
527 			void **per_CE_contextp,
528 			void **per_transfer_contextp,
529 			qdf_dma_addr_t *bufferp);
530 	QDF_STATUS (*ce_cancel_send_next)(struct CE_handle *copyeng,
531 			void **per_CE_contextp, void **per_transfer_contextp,
532 			qdf_dma_addr_t *bufferp, unsigned int *nbytesp,
533 			unsigned int *transfer_idp,
534 			uint32_t *toeplitz_hash_result);
535 	int (*ce_recv_buf_enqueue)(struct CE_handle *copyeng,
536 			void *per_recv_context, qdf_dma_addr_t buffer);
537 	bool (*watermark_int)(struct CE_state *CE_state, unsigned int *flags);
538 	int (*ce_completed_recv_next_nolock)(struct CE_state *CE_state,
539 			void **per_CE_contextp,
540 			void **per_transfer_contextp,
541 			qdf_dma_addr_t *bufferp,
542 			unsigned int *nbytesp,
543 			unsigned int *transfer_idp,
544 			unsigned int *flagsp);
545 	int (*ce_completed_send_next_nolock)(struct CE_state *CE_state,
546 			void **per_CE_contextp,
547 			void **per_transfer_contextp,
548 			qdf_dma_addr_t *bufferp,
549 			unsigned int *nbytesp,
550 			unsigned int *transfer_idp,
551 			unsigned int *sw_idx,
552 			unsigned int *hw_idx,
553 			uint32_t *toeplitz_hash_result);
554 	unsigned int (*ce_recv_entries_done_nolock)(struct hif_softc *scn,
555 			struct CE_state *CE_state);
556 	unsigned int (*ce_send_entries_done_nolock)(struct hif_softc *scn,
557 			    struct CE_state *CE_state);
558 	void (*ce_per_engine_handler_adjust)(struct CE_state *CE_state,
559 			     int disable_copy_compl_intr);
560 	void (*ce_prepare_shadow_register_v2_cfg)(struct hif_softc *scn,
561 			    struct pld_shadow_reg_v2_cfg **shadow_config,
562 			    int *num_shadow_registers_configured);
563 };
564 
565 int hif_ce_bus_early_suspend(struct hif_softc *scn);
566 int hif_ce_bus_late_resume(struct hif_softc *scn);
567 
568 /*
569  * ce_engine_service_reg:
570  * @scn: hif_context
571  * @CE_id: Copy engine ID
572  *
573  * Called from ce_per_engine_service and goes through the regular interrupt
574  * handling that does not involve the WLAN fast path feature.
575  *
576  * Returns void
577  */
578 void ce_engine_service_reg(struct hif_softc *scn, int CE_id);
579 
580 /**
581  * ce_per_engine_service_fast() - CE handler routine to service fastpath msgs
582  * @scn: hif_context
583  * @ce_id: Copy engine ID
584  *
585  * Return: void
586  */
587 void ce_per_engine_service_fast(struct hif_softc *scn, int ce_id);
588 
589 #endif /* __COPY_ENGINE_API_H__ */
590