xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_tx.h (revision 11f5a63a6cbdda84849a730de22f0a71e635d58c)
1 /*
2  * Copyright (c) 2016-2019 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 #ifndef __DP_TX_H
19 #define __DP_TX_H
20 
21 #include <qdf_types.h>
22 #include <qdf_nbuf.h>
23 #include "dp_types.h"
24 
25 
26 #define DP_TX_MAX_NUM_FRAGS 6
27 
28 #define DP_TX_DESC_FLAG_ALLOCATED	0x1
29 #define DP_TX_DESC_FLAG_TO_FW		0x2
30 #define DP_TX_DESC_FLAG_FRAG		0x4
31 #define DP_TX_DESC_FLAG_RAW		0x8
32 #define DP_TX_DESC_FLAG_MESH		0x10
33 #define DP_TX_DESC_FLAG_QUEUED_TX	0x20
34 #define DP_TX_DESC_FLAG_COMPLETED_TX	0x40
35 #define DP_TX_DESC_FLAG_ME		0x80
36 #define DP_TX_DESC_FLAG_TDLS_FRAME	0x100
37 
38 #define DP_TX_FREE_SINGLE_BUF(soc, buf)                  \
39 do {                                                           \
40 	qdf_nbuf_unmap(soc->osdev, buf, QDF_DMA_TO_DEVICE);  \
41 	qdf_nbuf_free(buf);                                    \
42 } while (0)
43 
44 #define OCB_HEADER_VERSION	 1
45 
46 #ifdef TX_PER_PDEV_DESC_POOL
47 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
48 #define DP_TX_GET_DESC_POOL_ID(vdev) (vdev->vdev_id)
49 #else /* QCA_LL_TX_FLOW_CONTROL_V2 */
50 #define DP_TX_GET_DESC_POOL_ID(vdev) (vdev->pdev->pdev_id)
51 #endif /* QCA_LL_TX_FLOW_CONTROL_V2 */
52 	#define DP_TX_GET_RING_ID(vdev) (vdev->pdev->pdev_id)
53 #else
54 	#ifdef TX_PER_VDEV_DESC_POOL
55 		#define DP_TX_GET_DESC_POOL_ID(vdev) (vdev->vdev_id)
56 		#define DP_TX_GET_RING_ID(vdev) (vdev->pdev->pdev_id)
57 	#endif /* TX_PER_VDEV_DESC_POOL */
58 #endif /* TX_PER_PDEV_DESC_POOL */
59 #define DP_TX_QUEUE_MASK 0x3
60 /**
61  * struct dp_tx_frag_info_s
62  * @vaddr: hlos vritual address for buffer
63  * @paddr_lo: physical address lower 32bits
64  * @paddr_hi: physical address higher bits
65  * @len: length of the buffer
66  */
67 struct dp_tx_frag_info_s {
68 	uint8_t  *vaddr;
69 	uint32_t paddr_lo;
70 	uint16_t paddr_hi;
71 	uint16_t len;
72 };
73 
74 /**
75  * struct dp_tx_seg_info_s - Segmentation Descriptor
76  * @nbuf: NBUF pointer if segment corresponds to separate nbuf
77  * @frag_cnt: Fragment count in this segment
78  * @total_len: Total length of segment
79  * @frags: per-Fragment information
80  * @next: pointer to next MSDU segment
81  */
82 struct dp_tx_seg_info_s  {
83 	qdf_nbuf_t nbuf;
84 	uint16_t frag_cnt;
85 	uint16_t total_len;
86 	struct dp_tx_frag_info_s frags[DP_TX_MAX_NUM_FRAGS];
87 	struct dp_tx_seg_info_s *next;
88 };
89 
90 /**
91  * struct dp_tx_sg_info_s - Scatter Gather Descriptor
92  * @num_segs: Number of segments (TSO/ME) in the frame
93  * @total_len: Total length of the frame
94  * @curr_seg: Points to current segment descriptor to be processed. Chain of
95  * 	      descriptors for SG frames/multicast-unicast converted packets.
96  *
97  * Used for SG (802.3 or Raw) frames and Multicast-Unicast converted frames to
98  * carry fragmentation information
99  * Raw Frames will be handed over to driver as an SKB chain with MPDU boundaries
100  * indicated through flags in SKB CB (first_msdu and last_msdu). This will be
101  * converted into set of skb sg (nr_frags) structures.
102  */
103 struct dp_tx_sg_info_s {
104 	uint32_t num_segs;
105 	uint32_t total_len;
106 	struct dp_tx_seg_info_s *curr_seg;
107 };
108 
109 /**
110  * struct dp_tx_queue - Tx queue
111  * @desc_pool_id: Descriptor Pool to be used for the tx queue
112  * @ring_id: TCL descriptor ring ID corresponding to the tx queue
113  *
114  * Tx queue contains information of the software (Descriptor pool)
115  * and hardware resources (TCL ring id) to be used for a particular
116  * transmit queue (obtained from skb_queue_mapping in case of linux)
117  */
118 struct dp_tx_queue {
119 	uint8_t desc_pool_id;
120 	uint8_t ring_id;
121 };
122 
123 /**
124  * struct dp_tx_msdu_info_s - MSDU Descriptor
125  * @frm_type: Frame type - Regular/TSO/SG/Multicast enhancement
126  * @tx_queue: Tx queue on which this MSDU should be transmitted
127  * @num_seg: Number of segments (TSO)
128  * @tid: TID (override) that is sent from HLOS
129  * @u.tso_info: TSO information for TSO frame types
130  * 	     (chain of the TSO segments, number of segments)
131  * @u.sg_info: Scatter Gather information for non-TSO SG frames
132  * @meta_data: Mesh meta header information
133  * @exception_fw: Duplicate frame to be sent to firmware
134  * @ppdu_cookie: 16-bit ppdu_cookie that has to be replayed back in completions
135  * @ix_tx_sniffer: Indicates if the packet has to be sniffed
136  *
137  * This structure holds the complete MSDU information needed to program the
138  * Hardware TCL and MSDU extension descriptors for different frame types
139  *
140  */
141 struct dp_tx_msdu_info_s {
142 	enum dp_tx_frm_type frm_type;
143 	struct dp_tx_queue tx_queue;
144 	uint32_t num_seg;
145 	uint8_t tid;
146 	union {
147 		struct qdf_tso_info_t tso_info;
148 		struct dp_tx_sg_info_s sg_info;
149 	} u;
150 	uint32_t meta_data[7];
151 	uint8_t exception_fw;
152 	uint16_t ppdu_cookie;
153 	uint8_t is_tx_sniffer;
154 };
155 
156 QDF_STATUS dp_tx_vdev_attach(struct dp_vdev *vdev);
157 QDF_STATUS dp_tx_vdev_detach(struct dp_vdev *vdev);
158 void dp_tx_vdev_update_search_flags(struct dp_vdev *vdev);
159 
160 QDF_STATUS dp_tx_soc_attach(struct dp_soc *soc);
161 QDF_STATUS dp_tx_soc_detach(struct dp_soc *soc);
162 
163 /**
164  * dp_tso_attach() - TSO Attach handler
165  * @txrx_soc: Opaque Dp handle
166  *
167  * Reserve TSO descriptor buffers
168  *
169  * Return: QDF_STATUS_E_FAILURE on failure or
170  * QDF_STATUS_SUCCESS on success
171  */
172 QDF_STATUS dp_tso_soc_attach(void *txrx_soc);
173 
174 /**
175  * dp_tso_detach() - TSO Detach handler
176  * @txrx_soc: Opaque Dp handle
177  *
178  * Deallocate TSO descriptor buffers
179  *
180  * Return: QDF_STATUS_E_FAILURE on failure or
181  * QDF_STATUS_SUCCESS on success
182  */
183 QDF_STATUS dp_tso_soc_detach(void *txrx_soc);
184 
185 QDF_STATUS dp_tx_pdev_detach(struct dp_pdev *pdev);
186 QDF_STATUS dp_tx_pdev_attach(struct dp_pdev *pdev);
187 
188 qdf_nbuf_t dp_tx_send(struct cdp_vdev *data_vdev, qdf_nbuf_t nbuf);
189 qdf_nbuf_t dp_tx_send_exception(struct cdp_vdev *data_vdev, qdf_nbuf_t nbuf,
190 				struct cdp_tx_exception_metadata *tx_exc);
191 qdf_nbuf_t dp_tx_send_mesh(struct cdp_vdev *data_vdev, qdf_nbuf_t nbuf);
192 qdf_nbuf_t
193 dp_tx_send_msdu_single(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
194 		       struct dp_tx_msdu_info_s *msdu_info, uint16_t peer_id,
195 		       struct cdp_tx_exception_metadata *tx_exc_metadata);
196 
197 #if QDF_LOCK_STATS
198 noinline qdf_nbuf_t
199 dp_tx_send_msdu_multiple(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
200 			 struct dp_tx_msdu_info_s *msdu_info);
201 #else
202 qdf_nbuf_t dp_tx_send_msdu_multiple(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
203 				    struct dp_tx_msdu_info_s *msdu_info);
204 #endif
205 #ifdef FEATURE_WLAN_TDLS
206 qdf_nbuf_t dp_tx_non_std(struct cdp_vdev *vdev_handle,
207 		enum ol_tx_spec tx_spec, qdf_nbuf_t msdu_list);
208 #endif
209 
210 /**
211  * dp_tx_comp_handler() - Tx completion handler
212  * @int_ctx: pointer to DP interrupt context
213  * @soc: core txrx main context
214  * @hal_srng: Opaque HAL SRNG pointer
215  * @ring_id: completion ring id
216  * @quota: No. of packets/descriptors that can be serviced in one loop
217  *
218  * This function will collect hardware release ring element contents and
219  * handle descriptor contents. Based on contents, free packet or handle error
220  * conditions
221  *
222  * Return: Number of TX completions processed
223  */
224 uint32_t dp_tx_comp_handler(struct dp_intr *int_ctx, struct dp_soc *soc,
225 			    hal_ring_handle_t hal_srng, uint8_t ring_id,
226 			    uint32_t quota);
227 
228 QDF_STATUS
229 dp_tx_prepare_send_me(struct dp_vdev *vdev, qdf_nbuf_t nbuf);
230 
231 #ifndef FEATURE_WDS
232 static inline void dp_tx_mec_handler(struct dp_vdev *vdev, uint8_t *status)
233 {
234 	return;
235 }
236 #endif
237 
238 #ifndef ATH_SUPPORT_IQUE
239 static inline void dp_tx_me_exit(struct dp_pdev *pdev)
240 {
241 	return;
242 }
243 #endif
244 
245 #ifndef QCA_MULTIPASS_SUPPORT
246 static inline
247 bool dp_tx_multipass_process(struct dp_soc *soc, struct dp_vdev *vdev,
248 			     qdf_nbuf_t nbuf,
249 			     struct dp_tx_msdu_info_s *msdu_info)
250 {
251 	return true;
252 }
253 
254 static inline
255 void dp_tx_vdev_multipass_deinit(struct dp_vdev *vdev)
256 {
257 }
258 
259 #else
260 bool dp_tx_multipass_process(struct dp_soc *soc, struct dp_vdev *vdev,
261 			     qdf_nbuf_t nbuf,
262 			     struct dp_tx_msdu_info_s *msdu_info);
263 
264 void dp_tx_vdev_multipass_deinit(struct dp_vdev *vdev);
265 #endif
266 
267 /**
268  * dp_tx_get_queue() - Returns Tx queue IDs to be used for this Tx frame
269  * @vdev: DP Virtual device handle
270  * @nbuf: Buffer pointer
271  * @queue: queue ids container for nbuf
272  *
273  * TX packet queue has 2 instances, software descriptors id and dma ring id
274  * Based on tx feature and hardware configuration queue id combination could be
275  * different.
276  * For example -
277  * With XPS enabled,all TX descriptor pools and dma ring are assigned per cpu id
278  * With no XPS,lock based resource protection, Descriptor pool ids are different
279  * for each vdev, dma ring id will be same as single pdev id
280  *
281  * Return: None
282  */
283 #ifdef QCA_OL_TX_MULTIQ_SUPPORT
284 static inline void dp_tx_get_queue(struct dp_vdev *vdev,
285 				   qdf_nbuf_t nbuf, struct dp_tx_queue *queue)
286 {
287 	uint16_t queue_offset = qdf_nbuf_get_queue_mapping(nbuf) &
288 				DP_TX_QUEUE_MASK;
289 
290 	queue->desc_pool_id = queue_offset;
291 	queue->ring_id = vdev->pdev->soc->tx_ring_map[queue_offset];
292 
293 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
294 		  "%s, pool_id:%d ring_id: %d",
295 		  __func__, queue->desc_pool_id, queue->ring_id);
296 }
297 #else /* QCA_OL_TX_MULTIQ_SUPPORT */
298 static inline void dp_tx_get_queue(struct dp_vdev *vdev,
299 				   qdf_nbuf_t nbuf, struct dp_tx_queue *queue)
300 {
301 	/* get flow id */
302 	queue->desc_pool_id = DP_TX_GET_DESC_POOL_ID(vdev);
303 	queue->ring_id = DP_TX_GET_RING_ID(vdev);
304 
305 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
306 		  "%s, pool_id:%d ring_id: %d",
307 		  __func__, queue->desc_pool_id, queue->ring_id);
308 }
309 #endif
310 #ifdef FEATURE_PERPKT_INFO
311 QDF_STATUS
312 dp_get_completion_indication_for_stack(struct dp_soc *soc,
313 				       struct dp_pdev *pdev,
314 				       struct dp_peer *peer,
315 				       struct hal_tx_completion_status *ts,
316 				       qdf_nbuf_t netbuf,
317 				       uint64_t time_latency);
318 
319 void  dp_send_completion_to_stack(struct dp_soc *soc,  struct dp_pdev *pdev,
320 		uint16_t peer_id, uint32_t ppdu_id,
321 		qdf_nbuf_t netbuf);
322 #endif
323 
324 void  dp_iterate_update_peer_list(struct cdp_pdev *pdev_hdl);
325 
326 #ifdef ATH_TX_PRI_OVERRIDE
327 #define DP_TX_TID_OVERRIDE(_msdu_info, _nbuf) \
328 	((_msdu_info)->tid = qdf_nbuf_get_priority(_nbuf))
329 #else
330 #define DP_TX_TID_OVERRIDE(_msdu_info, _nbuf)
331 #endif
332 
333 void
334 dp_handle_wbm_internal_error(struct dp_soc *soc, void *hal_desc,
335 			     uint32_t buf_type);
336 
337 /* TODO TX_FEATURE_NOT_YET */
338 static inline void dp_tx_comp_process_exception(struct dp_tx_desc_s *tx_desc)
339 {
340 	return;
341 }
342 /* TODO TX_FEATURE_NOT_YET */
343 #endif
344