xref: /wlan-dirver/qca-wifi-host-cmn/hif/src/ce/ce_api.h (revision 6ecd284e5a94a1c96e26d571dd47419ac305990d)
1 /*
2  * Copyright (c) 2013-2018 The Linux Foundation. All rights reserved.
3  *
4  * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
5  *
6  *
7  * Permission to use, copy, modify, and/or distribute this software for
8  * any purpose with or without fee is hereby granted, provided that the
9  * above copyright notice and this permission notice appear in all
10  * copies.
11  *
12  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
13  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
14  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
15  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
16  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
17  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
18  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
19  * PERFORMANCE OF THIS SOFTWARE.
20  */
21 
22 /*
23  * This file was originally distributed by Qualcomm Atheros, Inc.
24  * under proprietary terms before Copyright ownership was assigned
25  * to the Linux Foundation.
26  */
27 
28 #ifndef __COPY_ENGINE_API_H__
29 #define __COPY_ENGINE_API_H__
30 
31 #include "pld_common.h"
32 #include "ce_main.h"
33 #include "hif_main.h"
34 
35 /* TBDXXX: Use int return values for consistency with Target */
36 
37 /* TBDXXX: Perhaps merge Host/Target-->common */
38 
39 /*
40  * Copy Engine support: low-level Target-side Copy Engine API.
41  * This is a hardware access layer used by code that understands
42  * how to use copy engines.
43  */
44 
45 /*
46  * A "struct CE_handle *" serves as an opaque pointer-sized
47  * handle to a specific copy engine.
48  */
49 struct CE_handle;
50 
51 /*
52  * "Send Completion" callback type for Send Completion Notification.
53  *
54  * If a Send Completion callback is registered and one or more sends
55  * have completed, the callback is invoked.
56  *
57  * per_ce_send_context is a context supplied by the calling layer
58  * (via ce_send_cb_register). It is associated with a copy engine.
59  *
60  * per_transfer_send_context is context supplied by the calling layer
61  * (via the "send" call).  It may be different for each invocation
62  * of send.
63  *
64  * The buffer parameter is the first byte sent of the first buffer
65  * sent (if more than one buffer).
66  *
67  * nbytes is the number of bytes of that buffer that were sent.
68  *
69  * transfer_id matches the value used when the buffer or
70  * buf_list was sent.
71  *
72  * Implementation note: Pops 1 completed send buffer from Source ring
73  */
74 typedef void (*ce_send_cb)(struct CE_handle *copyeng,
75 			   void *per_ce_send_context,
76 			   void *per_transfer_send_context,
77 			   qdf_dma_addr_t buffer,
78 			   unsigned int nbytes,
79 			   unsigned int transfer_id,
80 			   unsigned int sw_index,
81 			   unsigned int hw_index,
82 			   uint32_t toeplitz_hash_result);
83 
84 /*
85  * "Buffer Received" callback type for Buffer Received Notification.
86  *
87  * Implementation note: Pops 1 completed recv buffer from Dest ring
88  */
89 typedef void (*CE_recv_cb)(struct CE_handle *copyeng,
90 		   void *per_CE_recv_context,
91 		   void *per_transfer_recv_context,
92 		   qdf_dma_addr_t buffer,
93 		   unsigned int nbytes,
94 		   unsigned int transfer_id,
95 		   unsigned int flags);
96 
97 /*
98  * Copy Engine Watermark callback type.
99  *
100  * Allows upper layers to be notified when watermarks are reached:
101  *   space is available and/or running short in a source ring
102  *   buffers are exhausted and/or abundant in a destination ring
103  *
104  * The flags parameter indicates which condition triggered this
105  * callback.  See CE_WM_FLAG_*.
106  *
107  * Watermark APIs are provided to allow upper layers "batch"
108  * descriptor processing and to allow upper layers to
109  * throttle/unthrottle.
110  */
111 typedef void (*CE_watermark_cb)(struct CE_handle *copyeng,
112 				void *per_CE_wm_context, unsigned int flags);
113 
114 #define CE_WM_FLAG_SEND_HIGH   1
115 #define CE_WM_FLAG_SEND_LOW    2
116 #define CE_WM_FLAG_RECV_HIGH   4
117 #define CE_WM_FLAG_RECV_LOW    8
118 #define CE_HTT_TX_CE           4
119 
120 /* A list of buffers to be gathered and sent */
121 struct ce_sendlist;
122 
123 /* Copy Engine settable attributes */
124 struct CE_attr;
125 
126 /*==================Send=====================================================*/
127 
128 /* ce_send flags */
129 /* disable ring's byte swap, even if the default policy is to swap */
130 #define CE_SEND_FLAG_SWAP_DISABLE        1
131 
132 /*
133  * Queue a source buffer to be sent to an anonymous destination buffer.
134  *   copyeng         - which copy engine to use
135  *   buffer          - address of buffer
136  *   nbytes          - number of bytes to send
137  *   transfer_id     - arbitrary ID; reflected to destination
138  *   flags           - CE_SEND_FLAG_* values
139  * Returns 0 on success; otherwise an error status.
140  *
141  * Note: If no flags are specified, use CE's default data swap mode.
142  *
143  * Implementation note: pushes 1 buffer to Source ring
144  */
145 int ce_send(struct CE_handle *copyeng,
146 		void *per_transfer_send_context,
147 		qdf_dma_addr_t buffer,
148 		unsigned int nbytes,
149 		unsigned int transfer_id,
150 		unsigned int flags,
151 		unsigned int user_flags);
152 
153 #ifdef WLAN_FEATURE_FASTPATH
154 int ce_send_fast(struct CE_handle *copyeng, qdf_nbuf_t msdu,
155 	unsigned int transfer_id, uint32_t download_len);
156 
157 #endif
158 
159 void ce_update_tx_ring(struct CE_handle *ce_tx_hdl, uint32_t num_htt_cmpls);
160 extern qdf_nbuf_t ce_batch_send(struct CE_handle *ce_tx_hdl,
161 		qdf_nbuf_t msdu,
162 		uint32_t transfer_id,
163 		uint32_t len,
164 		uint32_t sendhead);
165 
166 extern int ce_send_single(struct CE_handle *ce_tx_hdl,
167 		qdf_nbuf_t msdu,
168 		uint32_t transfer_id,
169 		uint32_t len);
170 /*
171  * Register a Send Callback function.
172  * This function is called as soon as the contents of a Send
173  * have reached the destination, unless disable_interrupts is
174  * requested.  In this case, the callback is invoked when the
175  * send status is polled, shortly after the send completes.
176  */
177 void ce_send_cb_register(struct CE_handle *copyeng,
178 			 ce_send_cb fn_ptr,
179 			 void *per_ce_send_context, int disable_interrupts);
180 
181 /*
182  * Return the size of a SendList. This allows the caller to allocate
183  * a SendList while the SendList structure remains opaque.
184  */
185 unsigned int ce_sendlist_sizeof(void);
186 
187 /* Initialize a sendlist */
188 void ce_sendlist_init(struct ce_sendlist *sendlist);
189 
190 /* Append a simple buffer (address/length) to a sendlist. */
191 int ce_sendlist_buf_add(struct ce_sendlist *sendlist,
192 		qdf_dma_addr_t buffer,
193 		unsigned int nbytes,
194 		/* OR-ed with internal flags */
195 		uint32_t flags,
196 		uint32_t user_flags);
197 
198 /*
199  * Queue a "sendlist" of buffers to be sent using gather to a single
200  * anonymous destination buffer
201  *   copyeng         - which copy engine to use
202  *   sendlist        - list of simple buffers to send using gather
203  *   transfer_id     - arbitrary ID; reflected to destination
204  * Returns 0 on success; otherwise an error status.
205  *
206  * Implemenation note: Pushes multiple buffers with Gather to Source ring.
207  */
208 int ce_sendlist_send(struct CE_handle *copyeng,
209 		void *per_transfer_send_context,
210 		struct ce_sendlist *sendlist,
211 		unsigned int transfer_id);
212 
213 /*==================Recv=====================================================*/
214 
215 /*
216  * Make a buffer available to receive. The buffer must be at least of a
217  * minimal size appropriate for this copy engine (src_sz_max attribute).
218  *   copyeng                    - which copy engine to use
219  *   per_transfer_recv_context  - context passed back to caller's recv_cb
220  *   buffer                     - address of buffer in CE space
221  * Returns 0 on success; otherwise an error status.
222  *
223  * Implemenation note: Pushes a buffer to Dest ring.
224  */
225 int ce_recv_buf_enqueue(struct CE_handle *copyeng,
226 			void *per_transfer_recv_context,
227 			qdf_dma_addr_t buffer);
228 
229 /*
230  * Register a Receive Callback function.
231  * This function is called as soon as data is received
232  * from the source.
233  */
234 void ce_recv_cb_register(struct CE_handle *copyeng,
235 			 CE_recv_cb fn_ptr,
236 			 void *per_CE_recv_context,
237 			 int disable_interrupts);
238 
239 /*==================CE Watermark=============================================*/
240 
241 /*
242  * Register a Watermark Callback function.
243  * This function is called as soon as a watermark level
244  * is crossed.  A Watermark Callback function is free to
245  * handle received data "en masse"; but then some coordination
246  * is required with a registered Receive Callback function.
247  * [Suggestion: Either handle Receives in a Receive Callback
248  * or en masse in a Watermark Callback; but not both.]
249  */
250 void ce_watermark_cb_register(struct CE_handle *copyeng,
251 			  CE_watermark_cb fn_ptr,
252 			  void *per_CE_wm_context);
253 
254 /*
255  * Set low/high watermarks for the send/source side of a copy engine.
256  *
257  * Typically, the destination side CPU manages watermarks for
258  * the receive side and the source side CPU manages watermarks
259  * for the send side.
260  *
261  * A low watermark of 0 is never hit (so the watermark function
262  * will never be called for a Low Watermark condition).
263  *
264  * A high watermark equal to nentries is never hit (so the
265  * watermark function will never be called for a High Watermark
266  * condition).
267  */
268 void ce_send_watermarks_set(struct CE_handle *copyeng,
269 			    unsigned int low_alert_nentries,
270 			    unsigned int high_alert_nentries);
271 
272 /* Set low/high watermarks for the receive/destination side of copy engine. */
273 void ce_recv_watermarks_set(struct CE_handle *copyeng,
274 			    unsigned int low_alert_nentries,
275 			    unsigned int high_alert_nentries);
276 
277 /*
278  * Return the number of entries that can be queued
279  * to a ring at an instant in time.
280  *
281  * For source ring, does not imply that destination-side
282  * buffers are available; merely indicates descriptor space
283  * in the source ring.
284  *
285  * For destination ring, does not imply that previously
286  * received buffers have been processed; merely indicates
287  * descriptor space in destination ring.
288  *
289  * Mainly for use with CE Watermark callback.
290  */
291 unsigned int ce_send_entries_avail(struct CE_handle *copyeng);
292 unsigned int ce_recv_entries_avail(struct CE_handle *copyeng);
293 
294 /*
295  * Return the number of entries in the ring that are ready
296  * to be processed by software.
297  *
298  * For source ring, the number of descriptors that have
299  * been completed and can now be overwritten with new send
300  * descriptors.
301  *
302  * For destination ring, the number of descriptors that
303  * are available to be processed (newly received buffers).
304  */
305 unsigned int ce_send_entries_done(struct CE_handle *copyeng);
306 unsigned int ce_recv_entries_done(struct CE_handle *copyeng);
307 
308 /* recv flags */
309 /* Data is byte-swapped */
310 #define CE_RECV_FLAG_SWAPPED            1
311 
312 /*
313  * Supply data for the next completed unprocessed receive descriptor.
314  *
315  * For use
316  *    with CE Watermark callback,
317  *    in a recv_cb function when processing buf_lists
318  *    in a recv_cb function in order to mitigate recv_cb's.
319  *
320  * Implemenation note: Pops buffer from Dest ring.
321  */
322 int ce_completed_recv_next(struct CE_handle *copyeng,
323 			   void **per_CE_contextp,
324 			   void **per_transfer_contextp,
325 			   qdf_dma_addr_t *bufferp,
326 			   unsigned int *nbytesp,
327 			   unsigned int *transfer_idp,
328 			   unsigned int *flagsp);
329 
330 /*
331  * Supply data for the next completed unprocessed send descriptor.
332  *
333  * For use
334  *    with CE Watermark callback
335  *    in a send_cb function in order to mitigate send_cb's.
336  *
337  * Implementation note: Pops 1 completed send buffer from Source ring
338  */
339 int ce_completed_send_next(struct CE_handle *copyeng,
340 			   void **per_CE_contextp,
341 			   void **per_transfer_contextp,
342 			   qdf_dma_addr_t *bufferp,
343 			   unsigned int *nbytesp,
344 			   unsigned int *transfer_idp,
345 			   unsigned int *sw_idx,
346 			   unsigned int *hw_idx,
347 			   uint32_t *toeplitz_hash_result);
348 
349 /*==================CE Engine Initialization=================================*/
350 
351 /* Initialize an instance of a CE */
352 struct CE_handle *ce_init(struct hif_softc *scn,
353 			  unsigned int CE_id, struct CE_attr *attr);
354 
355 /*==================CE Engine Shutdown=======================================*/
356 /*
357  * Support clean shutdown by allowing the caller to revoke
358  * receive buffers.  Target DMA must be stopped before using
359  * this API.
360  */
361 QDF_STATUS
362 ce_revoke_recv_next(struct CE_handle *copyeng,
363 		    void **per_CE_contextp,
364 		    void **per_transfer_contextp,
365 		    qdf_dma_addr_t *bufferp);
366 
367 /*
368  * Support clean shutdown by allowing the caller to cancel
369  * pending sends.  Target DMA must be stopped before using
370  * this API.
371  */
372 QDF_STATUS
373 ce_cancel_send_next(struct CE_handle *copyeng,
374 		    void **per_CE_contextp,
375 		    void **per_transfer_contextp,
376 		    qdf_dma_addr_t *bufferp,
377 		    unsigned int *nbytesp,
378 		    unsigned int *transfer_idp,
379 		    uint32_t *toeplitz_hash_result);
380 
381 void ce_fini(struct CE_handle *copyeng);
382 
383 /*==================CE Interrupt Handlers====================================*/
384 void ce_per_engine_service_any(int irq, struct hif_softc *scn);
385 int ce_per_engine_service(struct hif_softc *scn, unsigned int CE_id);
386 void ce_per_engine_servicereap(struct hif_softc *scn, unsigned int CE_id);
387 
388 /*===================CE cmpl interrupt Enable/Disable =======================*/
389 void ce_disable_any_copy_compl_intr_nolock(struct hif_softc *scn);
390 void ce_enable_any_copy_compl_intr_nolock(struct hif_softc *scn);
391 
392 /* API to check if any of the copy engine pipes has
393  * pending frames for prcoessing
394  */
395 bool ce_get_rx_pending(struct hif_softc *scn);
396 
397 /* CE_attr.flags values */
398 #define CE_ATTR_NO_SNOOP             0x01 /* Use NonSnooping PCIe accesses? */
399 #define CE_ATTR_BYTE_SWAP_DATA       0x02 /* Byte swap data words */
400 #define CE_ATTR_SWIZZLE_DESCRIPTORS  0x04 /* Swizzle descriptors? */
401 #define CE_ATTR_DISABLE_INTR         0x08 /* no interrupt on copy completion */
402 #define CE_ATTR_ENABLE_POLL          0x10 /* poll for residue descriptors */
403 #define CE_ATTR_DIAG                 0x20 /* Diag CE */
404 
405 /**
406  * stuct CE_attr - Attributes of an instance of a Copy Engine
407  * @flags:         CE_ATTR_* values
408  * @priority:      TBD
409  * @src_nentries:  #entries in source ring - Must be a power of 2
410  * @src_sz_max:    Max source send size for this CE. This is also the minimum
411  *                 size of a destination buffer
412  * @dest_nentries: #entries in destination ring - Must be a power of 2
413  * @reserved:      Future Use
414  */
415 struct CE_attr {
416 	unsigned int flags;
417 	unsigned int priority;
418 	unsigned int src_nentries;
419 	unsigned int src_sz_max;
420 	unsigned int dest_nentries;
421 	void *reserved;
422 };
423 
424 /*
425  * When using sendlist_send to transfer multiple buffer fragments, the
426  * transfer context of each fragment, except last one, will be filled
427  * with CE_SENDLIST_ITEM_CTXT. CE_completed_send will return success for
428  * each fragment done with send and the transfer context would be
429  * CE_SENDLIST_ITEM_CTXT. Upper layer could use this to identify the
430  * status of a send completion.
431  */
432 #define CE_SENDLIST_ITEM_CTXT   ((void *)0xcecebeef)
433 
434 /*
435  * This is an opaque type that is at least large enough to hold
436  * a sendlist. A sendlist can only be accessed through CE APIs,
437  * but this allows a sendlist to be allocated on the run-time
438  * stack.  TBDXXX: un-opaque would be simpler...
439  */
440 struct ce_sendlist {
441 	unsigned int word[62];
442 };
443 
444 #define ATH_ISR_NOSCHED  0x0000  /* Do not schedule bottom half/DPC */
445 #define ATH_ISR_SCHED    0x0001  /* Schedule the bottom half for execution */
446 #define ATH_ISR_NOTMINE  0x0002  /* for shared IRQ's */
447 
448 #ifdef IPA_OFFLOAD
449 void ce_ipa_get_resource(struct CE_handle *ce,
450 			 qdf_shared_mem_t **ce_sr,
451 			 uint32_t *ce_sr_ring_size,
452 			 qdf_dma_addr_t *ce_reg_paddr);
453 #else
454 /**
455  * ce_ipa_get_resource() - get uc resource on copyengine
456  * @ce: copyengine context
457  * @ce_sr: copyengine source ring resource info
458  * @ce_sr_ring_size: copyengine source ring size
459  * @ce_reg_paddr: copyengine register physical address
460  *
461  * Copy engine should release resource to micro controller
462  * Micro controller needs
463  *  - Copy engine source descriptor base address
464  *  - Copy engine source descriptor size
465  *  - PCI BAR address to access copy engine regiser
466  *
467  * Return: None
468  */
469 static inline void ce_ipa_get_resource(struct CE_handle *ce,
470 			 qdf_shared_mem_t **ce_sr,
471 			 uint32_t *ce_sr_ring_size,
472 			 qdf_dma_addr_t *ce_reg_paddr)
473 {
474 }
475 #endif /* IPA_OFFLOAD */
476 
477 static inline void ce_pkt_error_count_incr(
478 	struct HIF_CE_state *_hif_state,
479 	enum ol_ath_hif_pkt_ecodes _hif_ecode)
480 {
481 	struct hif_softc *scn = HIF_GET_SOFTC(_hif_state);
482 
483 	if (_hif_ecode == HIF_PIPE_NO_RESOURCE)
484 		(scn->pkt_stats.hif_pipe_no_resrc_count)
485 		+= 1;
486 }
487 
488 bool ce_check_rx_pending(struct CE_state *CE_state);
489 void *hif_ce_get_lro_ctx(struct hif_opaque_softc *hif_hdl, int ctx_id);
490 struct ce_ops *ce_services_srng(void);
491 struct ce_ops *ce_services_legacy(void);
492 bool ce_srng_based(struct hif_softc *scn);
493 /* Forward declaration */
494 struct CE_ring_state;
495 
496 struct ce_ops {
497 	uint32_t (*ce_get_desc_size)(uint8_t ring_type);
498 	int (*ce_ring_setup)(struct hif_softc *scn, uint8_t ring_type,
499 		uint32_t ce_id, struct CE_ring_state *ring,
500 		struct CE_attr *attr);
501 	int (*ce_send_nolock)(struct CE_handle *copyeng,
502 			   void *per_transfer_context,
503 			   qdf_dma_addr_t buffer,
504 			   uint32_t nbytes,
505 			   uint32_t transfer_id,
506 			   uint32_t flags,
507 			   uint32_t user_flags);
508 	int (*ce_sendlist_send)(struct CE_handle *copyeng,
509 			void *per_transfer_context,
510 			struct ce_sendlist *sendlist, unsigned int transfer_id);
511 	QDF_STATUS (*ce_revoke_recv_next)(struct CE_handle *copyeng,
512 			void **per_CE_contextp,
513 			void **per_transfer_contextp,
514 			qdf_dma_addr_t *bufferp);
515 	QDF_STATUS (*ce_cancel_send_next)(struct CE_handle *copyeng,
516 			void **per_CE_contextp, void **per_transfer_contextp,
517 			qdf_dma_addr_t *bufferp, unsigned int *nbytesp,
518 			unsigned int *transfer_idp,
519 			uint32_t *toeplitz_hash_result);
520 	int (*ce_recv_buf_enqueue)(struct CE_handle *copyeng,
521 			void *per_recv_context, qdf_dma_addr_t buffer);
522 	bool (*watermark_int)(struct CE_state *CE_state, unsigned int *flags);
523 	int (*ce_completed_recv_next_nolock)(struct CE_state *CE_state,
524 			void **per_CE_contextp,
525 			void **per_transfer_contextp,
526 			qdf_dma_addr_t *bufferp,
527 			unsigned int *nbytesp,
528 			unsigned int *transfer_idp,
529 			unsigned int *flagsp);
530 	int (*ce_completed_send_next_nolock)(struct CE_state *CE_state,
531 			void **per_CE_contextp,
532 			void **per_transfer_contextp,
533 			qdf_dma_addr_t *bufferp,
534 			unsigned int *nbytesp,
535 			unsigned int *transfer_idp,
536 			unsigned int *sw_idx,
537 			unsigned int *hw_idx,
538 			uint32_t *toeplitz_hash_result);
539 	unsigned int (*ce_recv_entries_done_nolock)(struct hif_softc *scn,
540 			struct CE_state *CE_state);
541 	unsigned int (*ce_send_entries_done_nolock)(struct hif_softc *scn,
542 			    struct CE_state *CE_state);
543 	void (*ce_per_engine_handler_adjust)(struct CE_state *CE_state,
544 			     int disable_copy_compl_intr);
545 	void (*ce_prepare_shadow_register_v2_cfg)(struct hif_softc *scn,
546 			    struct pld_shadow_reg_v2_cfg **shadow_config,
547 			    int *num_shadow_registers_configured);
548 
549 };
550 
551 int hif_ce_bus_early_suspend(struct hif_softc *scn);
552 int hif_ce_bus_late_resume(struct hif_softc *scn);
553 #endif /* __COPY_ENGINE_API_H__ */
554