xref: /wlan-dirver/qca-wifi-host-cmn/hif/src/ce/ce_api.h (revision 928e3ecad0fabf5320100a0d8fbde785757aa071)
1 /*
2  * Copyright (c) 2013-2017 The Linux Foundation. All rights reserved.
3  *
4  * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
5  *
6  *
7  * Permission to use, copy, modify, and/or distribute this software for
8  * any purpose with or without fee is hereby granted, provided that the
9  * above copyright notice and this permission notice appear in all
10  * copies.
11  *
12  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
13  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
14  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
15  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
16  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
17  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
18  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
19  * PERFORMANCE OF THIS SOFTWARE.
20  */
21 
22 /*
23  * This file was originally distributed by Qualcomm Atheros, Inc.
24  * under proprietary terms before Copyright ownership was assigned
25  * to the Linux Foundation.
26  */
27 
28 #ifndef __COPY_ENGINE_API_H__
29 #define __COPY_ENGINE_API_H__
30 
31 #include "pld_common.h"
32 #include "ce_main.h"
33 #include "hif_main.h"
34 
35 /* TBDXXX: Use int return values for consistency with Target */
36 
37 /* TBDXXX: Perhaps merge Host/Target-->common */
38 
39 /*
40  * Copy Engine support: low-level Target-side Copy Engine API.
41  * This is a hardware access layer used by code that understands
42  * how to use copy engines.
43  */
44 
45 /*
46  * A "struct CE_handle *" serves as an opaque pointer-sized
47  * handle to a specific copy engine.
48  */
49 struct CE_handle;
50 
51 /*
52  * "Send Completion" callback type for Send Completion Notification.
53  *
54  * If a Send Completion callback is registered and one or more sends
55  * have completed, the callback is invoked.
56  *
57  * per_ce_send_context is a context supplied by the calling layer
58  * (via ce_send_cb_register). It is associated with a copy engine.
59  *
60  * per_transfer_send_context is context supplied by the calling layer
61  * (via the "send" call).  It may be different for each invocation
62  * of send.
63  *
64  * The buffer parameter is the first byte sent of the first buffer
65  * sent (if more than one buffer).
66  *
67  * nbytes is the number of bytes of that buffer that were sent.
68  *
69  * transfer_id matches the value used when the buffer or
70  * buf_list was sent.
71  *
72  * Implementation note: Pops 1 completed send buffer from Source ring
73  */
74 typedef void (*ce_send_cb)(struct CE_handle *copyeng,
75 			   void *per_ce_send_context,
76 			   void *per_transfer_send_context,
77 			   qdf_dma_addr_t buffer,
78 			   unsigned int nbytes,
79 			   unsigned int transfer_id,
80 			   unsigned int sw_index,
81 			   unsigned int hw_index,
82 			   uint32_t toeplitz_hash_result);
83 
84 /*
85  * "Buffer Received" callback type for Buffer Received Notification.
86  *
87  * Implementation note: Pops 1 completed recv buffer from Dest ring
88  */
89 typedef void (*CE_recv_cb)(struct CE_handle *copyeng,
90 		   void *per_CE_recv_context,
91 		   void *per_transfer_recv_context,
92 		   qdf_dma_addr_t buffer,
93 		   unsigned int nbytes,
94 		   unsigned int transfer_id,
95 		   unsigned int flags);
96 
97 /*
98  * Copy Engine Watermark callback type.
99  *
100  * Allows upper layers to be notified when watermarks are reached:
101  *   space is available and/or running short in a source ring
102  *   buffers are exhausted and/or abundant in a destination ring
103  *
104  * The flags parameter indicates which condition triggered this
105  * callback.  See CE_WM_FLAG_*.
106  *
107  * Watermark APIs are provided to allow upper layers "batch"
108  * descriptor processing and to allow upper layers to
109  * throttle/unthrottle.
110  */
111 typedef void (*CE_watermark_cb)(struct CE_handle *copyeng,
112 				void *per_CE_wm_context, unsigned int flags);
113 
114 #define CE_WM_FLAG_SEND_HIGH   1
115 #define CE_WM_FLAG_SEND_LOW    2
116 #define CE_WM_FLAG_RECV_HIGH   4
117 #define CE_WM_FLAG_RECV_LOW    8
118 #define CE_HTT_TX_CE           4
119 
120 /* A list of buffers to be gathered and sent */
121 struct ce_sendlist;
122 
123 /* Copy Engine settable attributes */
124 struct CE_attr;
125 
126 /*==================Send=====================================================*/
127 
128 /* ce_send flags */
129 /* disable ring's byte swap, even if the default policy is to swap */
130 #define CE_SEND_FLAG_SWAP_DISABLE        1
131 
132 /*
133  * Queue a source buffer to be sent to an anonymous destination buffer.
134  *   copyeng         - which copy engine to use
135  *   buffer          - address of buffer
136  *   nbytes          - number of bytes to send
137  *   transfer_id     - arbitrary ID; reflected to destination
138  *   flags           - CE_SEND_FLAG_* values
139  * Returns 0 on success; otherwise an error status.
140  *
141  * Note: If no flags are specified, use CE's default data swap mode.
142  *
143  * Implementation note: pushes 1 buffer to Source ring
144  */
145 int ce_send(struct CE_handle *copyeng,
146 		void *per_transfer_send_context,
147 		qdf_dma_addr_t buffer,
148 		unsigned int nbytes,
149 		unsigned int transfer_id,
150 		unsigned int flags,
151 		unsigned int user_flags);
152 
153 #ifdef WLAN_FEATURE_FASTPATH
154 int ce_send_fast(struct CE_handle *copyeng, qdf_nbuf_t msdu,
155 	unsigned int transfer_id, uint32_t download_len);
156 
157 #endif
158 
159 void ce_update_tx_ring(struct CE_handle *ce_tx_hdl, uint32_t num_htt_cmpls);
160 extern qdf_nbuf_t ce_batch_send(struct CE_handle *ce_tx_hdl,
161 		qdf_nbuf_t msdu,
162 		uint32_t transfer_id,
163 		uint32_t len,
164 		uint32_t sendhead);
165 
166 extern int ce_send_single(struct CE_handle *ce_tx_hdl,
167 		qdf_nbuf_t msdu,
168 		uint32_t transfer_id,
169 		uint32_t len);
170 /*
171  * Register a Send Callback function.
172  * This function is called as soon as the contents of a Send
173  * have reached the destination, unless disable_interrupts is
174  * requested.  In this case, the callback is invoked when the
175  * send status is polled, shortly after the send completes.
176  */
177 void ce_send_cb_register(struct CE_handle *copyeng,
178 			 ce_send_cb fn_ptr,
179 			 void *per_ce_send_context, int disable_interrupts);
180 
181 /*
182  * Return the size of a SendList. This allows the caller to allocate
183  * a SendList while the SendList structure remains opaque.
184  */
185 unsigned int ce_sendlist_sizeof(void);
186 
187 /* Initialize a sendlist */
188 void ce_sendlist_init(struct ce_sendlist *sendlist);
189 
190 /* Append a simple buffer (address/length) to a sendlist. */
191 int ce_sendlist_buf_add(struct ce_sendlist *sendlist,
192 		qdf_dma_addr_t buffer,
193 		unsigned int nbytes,
194 		uint32_t flags, /* OR-ed with internal flags */
195 		uint32_t user_flags);
196 
197 /*
198  * Queue a "sendlist" of buffers to be sent using gather to a single
199  * anonymous destination buffer
200  *   copyeng         - which copy engine to use
201  *   sendlist        - list of simple buffers to send using gather
202  *   transfer_id     - arbitrary ID; reflected to destination
203  * Returns 0 on success; otherwise an error status.
204  *
205  * Implemenation note: Pushes multiple buffers with Gather to Source ring.
206  */
207 int ce_sendlist_send(struct CE_handle *copyeng,
208 		void *per_transfer_send_context,
209 		struct ce_sendlist *sendlist,
210 		unsigned int transfer_id);
211 
212 /*==================Recv=====================================================*/
213 
214 /*
215  * Make a buffer available to receive. The buffer must be at least of a
216  * minimal size appropriate for this copy engine (src_sz_max attribute).
217  *   copyeng                    - which copy engine to use
218  *   per_transfer_recv_context  - context passed back to caller's recv_cb
219  *   buffer                     - address of buffer in CE space
220  * Returns 0 on success; otherwise an error status.
221  *
222  * Implemenation note: Pushes a buffer to Dest ring.
223  */
224 int ce_recv_buf_enqueue(struct CE_handle *copyeng,
225 			void *per_transfer_recv_context,
226 			qdf_dma_addr_t buffer);
227 
228 /*
229  * Register a Receive Callback function.
230  * This function is called as soon as data is received
231  * from the source.
232  */
233 void ce_recv_cb_register(struct CE_handle *copyeng,
234 			 CE_recv_cb fn_ptr,
235 			 void *per_CE_recv_context,
236 			 int disable_interrupts);
237 
238 /*==================CE Watermark=============================================*/
239 
240 /*
241  * Register a Watermark Callback function.
242  * This function is called as soon as a watermark level
243  * is crossed.  A Watermark Callback function is free to
244  * handle received data "en masse"; but then some coordination
245  * is required with a registered Receive Callback function.
246  * [Suggestion: Either handle Receives in a Receive Callback
247  * or en masse in a Watermark Callback; but not both.]
248  */
249 void ce_watermark_cb_register(struct CE_handle *copyeng,
250 			  CE_watermark_cb fn_ptr,
251 			  void *per_CE_wm_context);
252 
253 /*
254  * Set low/high watermarks for the send/source side of a copy engine.
255  *
256  * Typically, the destination side CPU manages watermarks for
257  * the receive side and the source side CPU manages watermarks
258  * for the send side.
259  *
260  * A low watermark of 0 is never hit (so the watermark function
261  * will never be called for a Low Watermark condition).
262  *
263  * A high watermark equal to nentries is never hit (so the
264  * watermark function will never be called for a High Watermark
265  * condition).
266  */
267 void ce_send_watermarks_set(struct CE_handle *copyeng,
268 			    unsigned int low_alert_nentries,
269 			    unsigned int high_alert_nentries);
270 
271 /* Set low/high watermarks for the receive/destination side of copy engine. */
272 void ce_recv_watermarks_set(struct CE_handle *copyeng,
273 			    unsigned int low_alert_nentries,
274 			    unsigned int high_alert_nentries);
275 
276 /*
277  * Return the number of entries that can be queued
278  * to a ring at an instant in time.
279  *
280  * For source ring, does not imply that destination-side
281  * buffers are available; merely indicates descriptor space
282  * in the source ring.
283  *
284  * For destination ring, does not imply that previously
285  * received buffers have been processed; merely indicates
286  * descriptor space in destination ring.
287  *
288  * Mainly for use with CE Watermark callback.
289  */
290 unsigned int ce_send_entries_avail(struct CE_handle *copyeng);
291 unsigned int ce_recv_entries_avail(struct CE_handle *copyeng);
292 
293 /*
294  * Return the number of entries in the ring that are ready
295  * to be processed by software.
296  *
297  * For source ring, the number of descriptors that have
298  * been completed and can now be overwritten with new send
299  * descriptors.
300  *
301  * For destination ring, the number of descriptors that
302  * are available to be processed (newly received buffers).
303  */
304 unsigned int ce_send_entries_done(struct CE_handle *copyeng);
305 unsigned int ce_recv_entries_done(struct CE_handle *copyeng);
306 
307 /* recv flags */
308 /* Data is byte-swapped */
309 #define CE_RECV_FLAG_SWAPPED            1
310 
311 /*
312  * Supply data for the next completed unprocessed receive descriptor.
313  *
314  * For use
315  *    with CE Watermark callback,
316  *    in a recv_cb function when processing buf_lists
317  *    in a recv_cb function in order to mitigate recv_cb's.
318  *
319  * Implemenation note: Pops buffer from Dest ring.
320  */
321 int ce_completed_recv_next(struct CE_handle *copyeng,
322 			   void **per_CE_contextp,
323 			   void **per_transfer_contextp,
324 			   qdf_dma_addr_t *bufferp,
325 			   unsigned int *nbytesp,
326 			   unsigned int *transfer_idp,
327 			   unsigned int *flagsp);
328 
329 /*
330  * Supply data for the next completed unprocessed send descriptor.
331  *
332  * For use
333  *    with CE Watermark callback
334  *    in a send_cb function in order to mitigate send_cb's.
335  *
336  * Implementation note: Pops 1 completed send buffer from Source ring
337  */
338 int ce_completed_send_next(struct CE_handle *copyeng,
339 			   void **per_CE_contextp,
340 			   void **per_transfer_contextp,
341 			   qdf_dma_addr_t *bufferp,
342 			   unsigned int *nbytesp,
343 			   unsigned int *transfer_idp,
344 			   unsigned int *sw_idx,
345 			   unsigned int *hw_idx,
346 			   uint32_t *toeplitz_hash_result);
347 
348 /*==================CE Engine Initialization=================================*/
349 
350 /* Initialize an instance of a CE */
351 struct CE_handle *ce_init(struct hif_softc *scn,
352 			  unsigned int CE_id, struct CE_attr *attr);
353 
354 /*==================CE Engine Shutdown=======================================*/
355 /*
356  * Support clean shutdown by allowing the caller to revoke
357  * receive buffers.  Target DMA must be stopped before using
358  * this API.
359  */
360 QDF_STATUS
361 ce_revoke_recv_next(struct CE_handle *copyeng,
362 		    void **per_CE_contextp,
363 		    void **per_transfer_contextp,
364 		    qdf_dma_addr_t *bufferp);
365 
366 /*
367  * Support clean shutdown by allowing the caller to cancel
368  * pending sends.  Target DMA must be stopped before using
369  * this API.
370  */
371 QDF_STATUS
372 ce_cancel_send_next(struct CE_handle *copyeng,
373 		    void **per_CE_contextp,
374 		    void **per_transfer_contextp,
375 		    qdf_dma_addr_t *bufferp,
376 		    unsigned int *nbytesp,
377 		    unsigned int *transfer_idp,
378 		    uint32_t *toeplitz_hash_result);
379 
380 void ce_fini(struct CE_handle *copyeng);
381 
382 /*==================CE Interrupt Handlers====================================*/
383 void ce_per_engine_service_any(int irq, struct hif_softc *scn);
384 int ce_per_engine_service(struct hif_softc *scn, unsigned int CE_id);
385 void ce_per_engine_servicereap(struct hif_softc *scn, unsigned int CE_id);
386 
387 /*===================CE cmpl interrupt Enable/Disable =======================*/
388 void ce_disable_any_copy_compl_intr_nolock(struct hif_softc *scn);
389 void ce_enable_any_copy_compl_intr_nolock(struct hif_softc *scn);
390 
391 /* API to check if any of the copy engine pipes has
392  * pending frames for prcoessing
393  */
394 bool ce_get_rx_pending(struct hif_softc *scn);
395 
396 /* CE_attr.flags values */
397 #define CE_ATTR_NO_SNOOP             0x01 /* Use NonSnooping PCIe accesses? */
398 #define CE_ATTR_BYTE_SWAP_DATA       0x02 /* Byte swap data words */
399 #define CE_ATTR_SWIZZLE_DESCRIPTORS  0x04 /* Swizzle descriptors? */
400 #define CE_ATTR_DISABLE_INTR         0x08 /* no interrupt on copy completion */
401 #define CE_ATTR_ENABLE_POLL          0x10 /* poll for residue descriptors */
402 #define CE_ATTR_DIAG                 0x20 /* Diag CE */
403 
404 /* Attributes of an instance of a Copy Engine */
405 struct CE_attr {
406 	unsigned int flags;         /* CE_ATTR_* values */
407 	unsigned int priority;      /* TBD */
408 	unsigned int src_nentries;  /* #entries in source ring -
409 				     * Must be a power of 2 */
410 	unsigned int src_sz_max;    /* Max source send size for this CE.
411 				     * This is also the minimum size of
412 				     * a destination buffer. */
413 	unsigned int dest_nentries; /* #entries in destination ring -
414 				     * Must be a power of 2 */
415 	void *reserved;             /* Future use */
416 };
417 
418 /*
419  * When using sendlist_send to transfer multiple buffer fragments, the
420  * transfer context of each fragment, except last one, will be filled
421  * with CE_SENDLIST_ITEM_CTXT. CE_completed_send will return success for
422  * each fragment done with send and the transfer context would be
423  * CE_SENDLIST_ITEM_CTXT. Upper layer could use this to identify the
424  * status of a send completion.
425  */
426 #define CE_SENDLIST_ITEM_CTXT   ((void *)0xcecebeef)
427 
428 /*
429  * This is an opaque type that is at least large enough to hold
430  * a sendlist. A sendlist can only be accessed through CE APIs,
431  * but this allows a sendlist to be allocated on the run-time
432  * stack.  TBDXXX: un-opaque would be simpler...
433  */
434 struct ce_sendlist {
435 	unsigned int word[62];
436 };
437 
438 #define ATH_ISR_NOSCHED  0x0000  /* Do not schedule bottom half/DPC */
439 #define ATH_ISR_SCHED    0x0001  /* Schedule the bottom half for execution */
440 #define ATH_ISR_NOTMINE  0x0002  /* for shared IRQ's */
441 
442 #ifdef IPA_OFFLOAD
443 void ce_ipa_get_resource(struct CE_handle *ce,
444 			 qdf_dma_addr_t *ce_sr_base_paddr,
445 			 uint32_t *ce_sr_ring_size,
446 			 qdf_dma_addr_t *ce_reg_paddr);
447 #else
448 /**
449  * ce_ipa_get_resource() - get uc resource on copyengine
450  * @ce: copyengine context
451  * @ce_sr_base_paddr: copyengine source ring base physical address
452  * @ce_sr_ring_size: copyengine source ring size
453  * @ce_reg_paddr: copyengine register physical address
454  *
455  * Copy engine should release resource to micro controller
456  * Micro controller needs
457  *  - Copy engine source descriptor base address
458  *  - Copy engine source descriptor size
459  *  - PCI BAR address to access copy engine regiser
460  *
461  * Return: None
462  */
463 static inline void ce_ipa_get_resource(struct CE_handle *ce,
464 			 qdf_dma_addr_t *ce_sr_base_paddr,
465 			 uint32_t *ce_sr_ring_size,
466 			 qdf_dma_addr_t *ce_reg_paddr)
467 {
468 	return;
469 }
470 #endif /* IPA_OFFLOAD */
471 
472 static inline void ce_pkt_error_count_incr(
473 	struct HIF_CE_state *_hif_state,
474 	enum ol_ath_hif_pkt_ecodes _hif_ecode)
475 {
476 	struct hif_softc *scn = HIF_GET_SOFTC(_hif_state);
477 
478 	if (_hif_ecode == HIF_PIPE_NO_RESOURCE)
479 		(scn->pkt_stats.hif_pipe_no_resrc_count)
480 		+= 1;
481 }
482 
483 bool ce_check_rx_pending(struct CE_state *CE_state);
484 void *hif_ce_get_lro_ctx(struct hif_opaque_softc *hif_hdl, int ctx_id);
485 #if defined(FEATURE_LRO)
486 int ce_lro_flush_cb_register(struct hif_opaque_softc *scn,
487 			     void (handler)(void *),
488 			     void *(lro_init_handler)(void));
489 int ce_lro_flush_cb_deregister(struct hif_opaque_softc *hif_hdl,
490 			       void (lro_deinit_cb)(void *));
491 #endif
492 struct ce_ops *ce_services_srng(void);
493 struct ce_ops *ce_services_legacy(void);
494 bool ce_srng_based(struct hif_softc *scn);
495 /* Forward declaration */
496 struct CE_ring_state;
497 
498 struct ce_ops {
499 	uint32_t (*ce_get_desc_size)(uint8_t ring_type);
500 	void (*ce_ring_setup)(struct hif_softc *scn, uint8_t ring_type,
501 		uint32_t ce_id, struct CE_ring_state *ring,
502 		struct CE_attr *attr);
503 	int (*ce_send_nolock)(struct CE_handle *copyeng,
504 			   void *per_transfer_context,
505 			   qdf_dma_addr_t buffer,
506 			   uint32_t nbytes,
507 			   uint32_t transfer_id,
508 			   uint32_t flags,
509 			   uint32_t user_flags);
510 	int (*ce_sendlist_send)(struct CE_handle *copyeng,
511 			void *per_transfer_context,
512 			struct ce_sendlist *sendlist, unsigned int transfer_id);
513 	QDF_STATUS (*ce_revoke_recv_next)(struct CE_handle *copyeng,
514 			void **per_CE_contextp,
515 			void **per_transfer_contextp,
516 			qdf_dma_addr_t *bufferp);
517 	QDF_STATUS (*ce_cancel_send_next)(struct CE_handle *copyeng,
518 			void **per_CE_contextp, void **per_transfer_contextp,
519 			qdf_dma_addr_t *bufferp, unsigned int *nbytesp,
520 			unsigned int *transfer_idp,
521 			uint32_t *toeplitz_hash_result);
522 	int (*ce_recv_buf_enqueue)(struct CE_handle *copyeng,
523 			void *per_recv_context, qdf_dma_addr_t buffer);
524 	bool (*watermark_int)(struct CE_state *CE_state, unsigned int *flags);
525 	int (*ce_completed_recv_next_nolock)(struct CE_state *CE_state,
526 			void **per_CE_contextp,
527 			void **per_transfer_contextp,
528 			qdf_dma_addr_t *bufferp,
529 			unsigned int *nbytesp,
530 			unsigned int *transfer_idp,
531 			unsigned int *flagsp);
532 	int (*ce_completed_send_next_nolock)(struct CE_state *CE_state,
533 			void **per_CE_contextp,
534 			void **per_transfer_contextp,
535 			qdf_dma_addr_t *bufferp,
536 			unsigned int *nbytesp,
537 			unsigned int *transfer_idp,
538 			unsigned int *sw_idx,
539 			unsigned int *hw_idx,
540 			uint32_t *toeplitz_hash_result);
541 	unsigned int (*ce_recv_entries_done_nolock)(struct hif_softc *scn,
542 			struct CE_state *CE_state);
543 	unsigned int (*ce_send_entries_done_nolock)(struct hif_softc *scn,
544 			    struct CE_state *CE_state);
545 	void (*ce_per_engine_handler_adjust)(struct CE_state *CE_state,
546 			     int disable_copy_compl_intr);
547 	void (*ce_prepare_shadow_register_v2_cfg)(struct hif_softc *scn,
548 			    struct pld_shadow_reg_v2_cfg **shadow_config,
549 			    int *num_shadow_registers_configured);
550 
551 };
552 
553 int hif_ce_bus_early_suspend(struct hif_softc *scn);
554 int hif_ce_bus_late_resume(struct hif_softc *scn);
555 #endif /* __COPY_ENGINE_API_H__ */
556