xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_tx.h (revision d7655884fd8af7c220d4af07f117506d4e44e3c6)
1 /*
2  * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 #ifndef __DP_TX_H
19 #define __DP_TX_H
20 
21 #include <qdf_types.h>
22 #include <qdf_nbuf.h>
23 #include "dp_types.h"
24 
25 
26 #define DP_INVALID_VDEV_ID 0xFF
27 
28 #define DP_TX_MAX_NUM_FRAGS 6
29 
30 #define DP_TX_DESC_FLAG_SIMPLE		0x1
31 #define DP_TX_DESC_FLAG_TO_FW		0x2
32 #define DP_TX_DESC_FLAG_FRAG		0x4
33 #define DP_TX_DESC_FLAG_RAW		0x8
34 #define DP_TX_DESC_FLAG_MESH		0x10
35 #define DP_TX_DESC_FLAG_QUEUED_TX	0x20
36 #define DP_TX_DESC_FLAG_COMPLETED_TX	0x40
37 #define DP_TX_DESC_FLAG_ME		0x80
38 #define DP_TX_DESC_FLAG_TDLS_FRAME	0x100
39 #define DP_TX_DESC_FLAG_ALLOCATED	0x200
40 #define DP_TX_DESC_FLAG_MESH_MODE	0x400
41 
42 #define DP_TX_EXT_DESC_FLAG_METADATA_VALID 0x1
43 
44 #define DP_TX_FREE_SINGLE_BUF(soc, buf)                  \
45 do {                                                           \
46 	qdf_nbuf_unmap(soc->osdev, buf, QDF_DMA_TO_DEVICE);  \
47 	qdf_nbuf_free(buf);                                    \
48 } while (0)
49 
50 #define OCB_HEADER_VERSION	 1
51 
52 #ifdef TX_PER_PDEV_DESC_POOL
53 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
54 #define DP_TX_GET_DESC_POOL_ID(vdev) (vdev->vdev_id)
55 #else /* QCA_LL_TX_FLOW_CONTROL_V2 */
56 #define DP_TX_GET_DESC_POOL_ID(vdev) (vdev->pdev->pdev_id)
57 #endif /* QCA_LL_TX_FLOW_CONTROL_V2 */
58 	#define DP_TX_GET_RING_ID(vdev) (vdev->pdev->pdev_id)
59 #else
60 	#ifdef TX_PER_VDEV_DESC_POOL
61 		#define DP_TX_GET_DESC_POOL_ID(vdev) (vdev->vdev_id)
62 		#define DP_TX_GET_RING_ID(vdev) (vdev->pdev->pdev_id)
63 	#endif /* TX_PER_VDEV_DESC_POOL */
64 #endif /* TX_PER_PDEV_DESC_POOL */
65 #define DP_TX_QUEUE_MASK 0x3
66 
67 /* number of dwords for htt_tx_msdu_desc_ext2_t */
68 #define DP_TX_MSDU_INFO_META_DATA_DWORDS 7
69 
70 /**
71  * struct dp_tx_frag_info_s
72  * @vaddr: hlos vritual address for buffer
73  * @paddr_lo: physical address lower 32bits
74  * @paddr_hi: physical address higher bits
75  * @len: length of the buffer
76  */
77 struct dp_tx_frag_info_s {
78 	uint8_t  *vaddr;
79 	uint32_t paddr_lo;
80 	uint16_t paddr_hi;
81 	uint16_t len;
82 };
83 
84 /**
85  * struct dp_tx_seg_info_s - Segmentation Descriptor
86  * @nbuf: NBUF pointer if segment corresponds to separate nbuf
87  * @frag_cnt: Fragment count in this segment
88  * @total_len: Total length of segment
89  * @frags: per-Fragment information
90  * @next: pointer to next MSDU segment
91  */
92 struct dp_tx_seg_info_s  {
93 	qdf_nbuf_t nbuf;
94 	uint16_t frag_cnt;
95 	uint16_t total_len;
96 	struct dp_tx_frag_info_s frags[DP_TX_MAX_NUM_FRAGS];
97 	struct dp_tx_seg_info_s *next;
98 };
99 
100 /**
101  * struct dp_tx_sg_info_s - Scatter Gather Descriptor
102  * @num_segs: Number of segments (TSO/ME) in the frame
103  * @total_len: Total length of the frame
104  * @curr_seg: Points to current segment descriptor to be processed. Chain of
105  * 	      descriptors for SG frames/multicast-unicast converted packets.
106  *
107  * Used for SG (802.3 or Raw) frames and Multicast-Unicast converted frames to
108  * carry fragmentation information
109  * Raw Frames will be handed over to driver as an SKB chain with MPDU boundaries
110  * indicated through flags in SKB CB (first_msdu and last_msdu). This will be
111  * converted into set of skb sg (nr_frags) structures.
112  */
113 struct dp_tx_sg_info_s {
114 	uint32_t num_segs;
115 	uint32_t total_len;
116 	struct dp_tx_seg_info_s *curr_seg;
117 };
118 
119 /**
120  * struct dp_tx_queue - Tx queue
121  * @desc_pool_id: Descriptor Pool to be used for the tx queue
122  * @ring_id: TCL descriptor ring ID corresponding to the tx queue
123  *
124  * Tx queue contains information of the software (Descriptor pool)
125  * and hardware resources (TCL ring id) to be used for a particular
126  * transmit queue (obtained from skb_queue_mapping in case of linux)
127  */
128 struct dp_tx_queue {
129 	uint8_t desc_pool_id;
130 	uint8_t ring_id;
131 };
132 
133 /**
134  * struct dp_tx_msdu_info_s - MSDU Descriptor
135  * @frm_type: Frame type - Regular/TSO/SG/Multicast enhancement
136  * @tx_queue: Tx queue on which this MSDU should be transmitted
137  * @num_seg: Number of segments (TSO)
138  * @tid: TID (override) that is sent from HLOS
139  * @u.tso_info: TSO information for TSO frame types
140  * 	     (chain of the TSO segments, number of segments)
141  * @u.sg_info: Scatter Gather information for non-TSO SG frames
142  * @meta_data: Mesh meta header information
143  * @exception_fw: Duplicate frame to be sent to firmware
144  * @ppdu_cookie: 16-bit ppdu_cookie that has to be replayed back in completions
145  * @ix_tx_sniffer: Indicates if the packet has to be sniffed
146  *
147  * This structure holds the complete MSDU information needed to program the
148  * Hardware TCL and MSDU extension descriptors for different frame types
149  *
150  */
151 struct dp_tx_msdu_info_s {
152 	enum dp_tx_frm_type frm_type;
153 	struct dp_tx_queue tx_queue;
154 	uint32_t num_seg;
155 	uint8_t tid;
156 	uint8_t exception_fw;
157 	uint8_t is_tx_sniffer;
158 	uint8_t search_type;
159 	union {
160 		struct qdf_tso_info_t tso_info;
161 		struct dp_tx_sg_info_s sg_info;
162 	} u;
163 	uint32_t meta_data[DP_TX_MSDU_INFO_META_DATA_DWORDS];
164 	uint16_t ppdu_cookie;
165 	uint16_t ast_idx;
166 	uint16_t ast_hash;
167 };
168 
169 /**
170  * dp_tx_deinit_pair_by_index() - Deinit TX rings based on index
171  * @soc: core txrx context
172  * @index: index of ring to deinit
173  *
174  * Deinit 1 TCL and 1 WBM2SW release ring on as needed basis using
175  * index of the respective TCL/WBM2SW release in soc structure.
176  * For example, if the index is 2 then &soc->tcl_data_ring[2]
177  * and &soc->tx_comp_ring[2] will be deinitialized.
178  *
179  * Return: none
180  */
181 void dp_tx_deinit_pair_by_index(struct dp_soc *soc, int index);
182 
183 QDF_STATUS dp_tx_vdev_attach(struct dp_vdev *vdev);
184 QDF_STATUS dp_tx_vdev_detach(struct dp_vdev *vdev);
185 void dp_tx_vdev_update_search_flags(struct dp_vdev *vdev);
186 void dp_tx_tso_cmn_desc_pool_deinit(struct dp_soc *soc, uint8_t num_pool);
187 void dp_tx_tso_cmn_desc_pool_free(struct dp_soc *soc, uint8_t num_pool);
188 QDF_STATUS dp_tx_tso_cmn_desc_pool_alloc(struct dp_soc *soc,
189 					 uint8_t num_pool,
190 					 uint16_t num_desc);
191 QDF_STATUS dp_tx_tso_cmn_desc_pool_init(struct dp_soc *soc,
192 					uint8_t num_pool,
193 					uint16_t num_desc);
194 
195 void dp_tx_tso_cmn_desc_pool_deinit(struct dp_soc *soc, uint8_t num_pool);
196 void dp_tx_tso_cmn_desc_pool_free(struct dp_soc *soc, uint8_t num_pool);
197 void dp_soc_tx_desc_sw_pools_free(struct dp_soc *soc);
198 void dp_soc_tx_desc_sw_pools_deinit(struct dp_soc *soc);
199 QDF_STATUS dp_tx_tso_cmn_desc_pool_alloc(struct dp_soc *soc,
200 					 uint8_t num_pool,
201 					 uint16_t num_desc);
202 QDF_STATUS dp_tx_tso_cmn_desc_pool_init(struct dp_soc *soc,
203 					uint8_t num_pool,
204 					uint16_t num_desc);
205 QDF_STATUS dp_soc_tx_desc_sw_pools_alloc(struct dp_soc *soc);
206 QDF_STATUS dp_soc_tx_desc_sw_pools_init(struct dp_soc *soc);
207 
208 /**
209  * dp_tso_attach() - TSO Attach handler
210  * @txrx_soc: Opaque Dp handle
211  *
212  * Reserve TSO descriptor buffers
213  *
214  * Return: QDF_STATUS_E_FAILURE on failure or
215  * QDF_STATUS_SUCCESS on success
216  */
217 QDF_STATUS dp_tso_soc_attach(struct cdp_soc_t *txrx_soc);
218 
219 /**
220  * dp_tso_detach() - TSO Detach handler
221  * @txrx_soc: Opaque Dp handle
222  *
223  * Deallocate TSO descriptor buffers
224  *
225  * Return: QDF_STATUS_E_FAILURE on failure or
226  * QDF_STATUS_SUCCESS on success
227  */
228 QDF_STATUS dp_tso_soc_detach(struct cdp_soc_t *txrx_soc);
229 
230 QDF_STATUS dp_tx_pdev_init(struct dp_pdev *pdev);
231 
232 qdf_nbuf_t dp_tx_send(struct cdp_soc_t *soc, uint8_t vdev_id, qdf_nbuf_t nbuf);
233 
234 qdf_nbuf_t dp_tx_send_vdev_id_check(struct cdp_soc_t *soc, uint8_t vdev_id,
235 				    qdf_nbuf_t nbuf);
236 
237 qdf_nbuf_t dp_tx_send_exception(struct cdp_soc_t *soc, uint8_t vdev_id,
238 				qdf_nbuf_t nbuf,
239 				struct cdp_tx_exception_metadata *tx_exc);
240 
241 qdf_nbuf_t dp_tx_send_exception_vdev_id_check(struct cdp_soc_t *soc,
242 					      uint8_t vdev_id,
243 					      qdf_nbuf_t nbuf,
244 				struct cdp_tx_exception_metadata *tx_exc);
245 
246 qdf_nbuf_t dp_tx_send_mesh(struct cdp_soc_t *soc, uint8_t vdev_id,
247 			   qdf_nbuf_t nbuf);
248 qdf_nbuf_t
249 dp_tx_send_msdu_single(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
250 		       struct dp_tx_msdu_info_s *msdu_info, uint16_t peer_id,
251 		       struct cdp_tx_exception_metadata *tx_exc_metadata);
252 
253 #if QDF_LOCK_STATS
254 noinline qdf_nbuf_t
255 dp_tx_send_msdu_multiple(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
256 			 struct dp_tx_msdu_info_s *msdu_info);
257 #else
258 qdf_nbuf_t dp_tx_send_msdu_multiple(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
259 				    struct dp_tx_msdu_info_s *msdu_info);
260 #endif
261 #ifdef FEATURE_WLAN_TDLS
262 /**
263  * dp_tx_non_std() - Allow the control-path SW to send data frames
264  * @soc_hdl: Datapath soc handle
265  * @vdev_id: id of vdev
266  * @tx_spec: what non-standard handling to apply to the tx data frames
267  * @msdu_list: NULL-terminated list of tx MSDUs
268  *
269  * Return: NULL on success,
270  *         nbuf when it fails to send
271  */
272 qdf_nbuf_t dp_tx_non_std(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
273 			 enum ol_tx_spec tx_spec, qdf_nbuf_t msdu_list);
274 #endif
275 int dp_tx_frame_is_drop(struct dp_vdev *vdev, uint8_t *srcmac, uint8_t *dstmac);
276 
277 /**
278  * dp_tx_comp_handler() - Tx completion handler
279  * @int_ctx: pointer to DP interrupt context
280  * @soc: core txrx main context
281  * @hal_srng: Opaque HAL SRNG pointer
282  * @ring_id: completion ring id
283  * @quota: No. of packets/descriptors that can be serviced in one loop
284  *
285  * This function will collect hardware release ring element contents and
286  * handle descriptor contents. Based on contents, free packet or handle error
287  * conditions
288  *
289  * Return: Number of TX completions processed
290  */
291 uint32_t dp_tx_comp_handler(struct dp_intr *int_ctx, struct dp_soc *soc,
292 			    hal_ring_handle_t hal_srng, uint8_t ring_id,
293 			    uint32_t quota);
294 
295 QDF_STATUS
296 dp_tx_prepare_send_me(struct dp_vdev *vdev, qdf_nbuf_t nbuf);
297 
298 QDF_STATUS
299 dp_tx_prepare_send_igmp_me(struct dp_vdev *vdev, qdf_nbuf_t nbuf);
300 
301 #ifndef FEATURE_WDS
302 static inline void dp_tx_mec_handler(struct dp_vdev *vdev, uint8_t *status)
303 {
304 	return;
305 }
306 #endif
307 
308 #ifndef ATH_SUPPORT_IQUE
309 static inline void dp_tx_me_exit(struct dp_pdev *pdev)
310 {
311 	return;
312 }
313 #endif
314 
315 #ifndef QCA_MULTIPASS_SUPPORT
316 static inline
317 bool dp_tx_multipass_process(struct dp_soc *soc, struct dp_vdev *vdev,
318 			     qdf_nbuf_t nbuf,
319 			     struct dp_tx_msdu_info_s *msdu_info)
320 {
321 	return true;
322 }
323 
324 static inline
325 void dp_tx_vdev_multipass_deinit(struct dp_vdev *vdev)
326 {
327 }
328 
329 #else
330 bool dp_tx_multipass_process(struct dp_soc *soc, struct dp_vdev *vdev,
331 			     qdf_nbuf_t nbuf,
332 			     struct dp_tx_msdu_info_s *msdu_info);
333 
334 void dp_tx_vdev_multipass_deinit(struct dp_vdev *vdev);
335 #endif
336 
337 /**
338  * dp_tx_get_queue() - Returns Tx queue IDs to be used for this Tx frame
339  * @vdev: DP Virtual device handle
340  * @nbuf: Buffer pointer
341  * @queue: queue ids container for nbuf
342  *
343  * TX packet queue has 2 instances, software descriptors id and dma ring id
344  * Based on tx feature and hardware configuration queue id combination could be
345  * different.
346  * For example -
347  * With XPS enabled,all TX descriptor pools and dma ring are assigned per cpu id
348  * With no XPS,lock based resource protection, Descriptor pool ids are different
349  * for each vdev, dma ring id will be same as single pdev id
350  *
351  * Return: None
352  */
353 #ifdef QCA_OL_TX_MULTIQ_SUPPORT
354 static inline void dp_tx_get_queue(struct dp_vdev *vdev,
355 				   qdf_nbuf_t nbuf, struct dp_tx_queue *queue)
356 {
357 	uint16_t queue_offset = qdf_nbuf_get_queue_mapping(nbuf) &
358 				DP_TX_QUEUE_MASK;
359 
360 	queue->desc_pool_id = queue_offset;
361 	queue->ring_id = qdf_get_cpu();
362 
363 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
364 		  "%s, pool_id:%d ring_id: %d",
365 		  __func__, queue->desc_pool_id, queue->ring_id);
366 }
367 
368 /*
369  * dp_tx_get_hal_ring_hdl()- Get the hal_tx_ring_hdl for data transmission
370  * @dp_soc - DP soc structure pointer
371  * @ring_id - Transmit Queue/ring_id to be used when XPS is enabled
372  *
373  * Return - HAL ring handle
374  */
375 static inline hal_ring_handle_t dp_tx_get_hal_ring_hdl(struct dp_soc *soc,
376 						       uint8_t ring_id)
377 {
378 	if (ring_id == soc->num_tcl_data_rings)
379 		return soc->tcl_cmd_credit_ring.hal_srng;
380 
381 	return soc->tcl_data_ring[ring_id].hal_srng;
382 }
383 
384 /*
385  * dp_tx_get_rbm_id()- Get the RBM ID for data transmission completion.
386  * @dp_soc - DP soc structure pointer
387  * @ring_id - Transmit Queue/ring_id to be used when XPS is enabled
388  *
389  * Return - HAL ring handle
390  */
391 static inline uint8_t dp_tx_get_rbm_id(struct dp_soc *doc,
392 				       uint8_t ring_id)
393 {
394 	return (ring_id ? HAL_WBM_SW0_BM_ID + (ring_id - 1) :
395 			  HAL_WBM_SW2_BM_ID);
396 }
397 
398 #else /* QCA_OL_TX_MULTIQ_SUPPORT */
399 static inline void dp_tx_get_queue(struct dp_vdev *vdev,
400 				   qdf_nbuf_t nbuf, struct dp_tx_queue *queue)
401 {
402 	/* get flow id */
403 	queue->desc_pool_id = DP_TX_GET_DESC_POOL_ID(vdev);
404 	queue->ring_id = DP_TX_GET_RING_ID(vdev);
405 
406 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
407 		  "%s, pool_id:%d ring_id: %d",
408 		  __func__, queue->desc_pool_id, queue->ring_id);
409 }
410 
411 static inline hal_ring_handle_t dp_tx_get_hal_ring_hdl(struct dp_soc *soc,
412 						       uint8_t ring_id)
413 {
414 	return soc->tcl_data_ring[ring_id].hal_srng;
415 }
416 
417 static inline uint8_t dp_tx_get_rbm_id(struct dp_soc *soc,
418 				       uint8_t ring_id)
419 {
420 	return (ring_id + HAL_WBM_SW0_BM_ID);
421 }
422 #endif
423 
424 #ifdef QCA_OL_TX_LOCK_LESS_ACCESS
425 /*
426  * dp_tx_hal_ring_access_start()- hal_tx_ring access for data transmission
427  * @dp_soc - DP soc structure pointer
428  * @hal_ring_hdl - HAL ring handle
429  *
430  * Return - None
431  */
432 static inline int dp_tx_hal_ring_access_start(struct dp_soc *soc,
433 					      hal_ring_handle_t hal_ring_hdl)
434 {
435 	return hal_srng_access_start_unlocked(soc->hal_soc, hal_ring_hdl);
436 }
437 
438 /*
439  * dp_tx_hal_ring_access_end()- hal_tx_ring access for data transmission
440  * @dp_soc - DP soc structure pointer
441  * @hal_ring_hdl - HAL ring handle
442  *
443  * Return - None
444  */
445 static inline void dp_tx_hal_ring_access_end(struct dp_soc *soc,
446 					     hal_ring_handle_t hal_ring_hdl)
447 {
448 	hal_srng_access_end_unlocked(soc->hal_soc, hal_ring_hdl);
449 }
450 
451 /*
452  * dp_tx_hal_ring_access_reap()- hal_tx_ring access for data transmission
453  * @dp_soc - DP soc structure pointer
454  * @hal_ring_hdl - HAL ring handle
455  *
456  * Return - None
457  */
458 static inline void dp_tx_hal_ring_access_end_reap(struct dp_soc *soc,
459 						  hal_ring_handle_t
460 						  hal_ring_hdl)
461 {
462 }
463 
464 #else
465 static inline int dp_tx_hal_ring_access_start(struct dp_soc *soc,
466 					      hal_ring_handle_t hal_ring_hdl)
467 {
468 	return hal_srng_access_start(soc->hal_soc, hal_ring_hdl);
469 }
470 
471 static inline void dp_tx_hal_ring_access_end(struct dp_soc *soc,
472 					     hal_ring_handle_t hal_ring_hdl)
473 {
474 	hal_srng_access_end(soc->hal_soc, hal_ring_hdl);
475 }
476 
477 static inline void dp_tx_hal_ring_access_end_reap(struct dp_soc *soc,
478 						  hal_ring_handle_t
479 						  hal_ring_hdl)
480 {
481 	hal_srng_access_end_reap(soc->hal_soc, hal_ring_hdl);
482 }
483 #endif
484 
485 #ifdef FEATURE_PERPKT_INFO
486 QDF_STATUS
487 dp_get_completion_indication_for_stack(struct dp_soc *soc,
488 				       struct dp_pdev *pdev,
489 				       struct dp_peer *peer,
490 				       struct hal_tx_completion_status *ts,
491 				       qdf_nbuf_t netbuf,
492 				       uint64_t time_latency);
493 
494 void  dp_send_completion_to_stack(struct dp_soc *soc,  struct dp_pdev *pdev,
495 		uint16_t peer_id, uint32_t ppdu_id,
496 		qdf_nbuf_t netbuf);
497 #endif
498 
499 void  dp_iterate_update_peer_list(struct cdp_pdev *pdev_hdl);
500 
501 #ifdef ATH_TX_PRI_OVERRIDE
502 #define DP_TX_TID_OVERRIDE(_msdu_info, _nbuf) \
503 	((_msdu_info)->tid = qdf_nbuf_get_priority(_nbuf))
504 #else
505 #define DP_TX_TID_OVERRIDE(_msdu_info, _nbuf)
506 #endif
507 
508 void
509 dp_handle_wbm_internal_error(struct dp_soc *soc, void *hal_desc,
510 			     uint32_t buf_type);
511 
512 /* TODO TX_FEATURE_NOT_YET */
513 static inline void dp_tx_comp_process_exception(struct dp_tx_desc_s *tx_desc)
514 {
515 	return;
516 }
517 /* TODO TX_FEATURE_NOT_YET */
518 
519 #ifndef WLAN_TX_PKT_CAPTURE_ENH
520 static inline
521 QDF_STATUS dp_peer_set_tx_capture_enabled(struct dp_pdev *pdev,
522 					  struct dp_peer *peer_handle,
523 					  uint8_t value, uint8_t *peer_mac)
524 {
525 	return QDF_STATUS_SUCCESS;
526 }
527 #endif
528 void dp_tx_desc_flush(struct dp_pdev *pdev, struct dp_vdev *vdev,
529 		      bool force_free);
530 #endif
531