1 /*
2  * Copyright (c) 2011, 2014-2019 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 /**
20  * @file ol_tx_desc.h
21  * @brief API definitions for the tx descriptor module within the data SW.
22  */
23 #ifndef _OL_TX_DESC__H_
24 #define _OL_TX_DESC__H_
25 
26 #include "queue.h"          /* TAILQ_HEAD */
27 #include <qdf_nbuf.h>           /* qdf_nbuf_t */
28 #include <cdp_txrx_cmn.h>       /* ol_txrx_vdev_t, etc. */
29 #include <ol_txrx_internal.h>   /*TXRX_ASSERT2 */
30 #include <ol_htt_tx_api.h>
31 
32 #define DIV_BY_8	3
33 #define DIV_BY_32	5
34 #define MOD_BY_8	0x7
35 #define MOD_BY_32	0x1F
36 
37 struct ol_tx_desc_t *
38 ol_tx_desc_alloc_wrapper(struct ol_txrx_pdev_t *pdev,
39 			 struct ol_txrx_vdev_t *vdev,
40 			 struct ol_txrx_msdu_info_t *msdu_info);
41 
42 
43 /**
44  * @brief Allocate and initialize a tx descriptor for a LL system.
45  * @details
46  *  Allocate a tx descriptor pair for a new tx frame - a SW tx descriptor
47  *  for private use within the host data SW, and a HTT tx descriptor for
48  *  downloading tx meta-data to the target FW/HW.
49  *  Fill in the fields of this pair of tx descriptors based on the
50  *  information in the netbuf.
51  *  For LL, this includes filling in a fragmentation descriptor to
52  *  specify to the MAC HW where to find the tx frame's fragments.
53  *
54  * @param pdev - the data physical device sending the data
55  *      (for accessing the tx desc pool)
56  * @param vdev - the virtual device sending the data
57  *      (for specifying the transmitter address for multicast / broadcast data)
58  * @param netbuf - the tx frame
59  * @param msdu_info - tx meta-data
60  */
61 struct ol_tx_desc_t *ol_tx_desc_ll(struct ol_txrx_pdev_t *pdev,
62 				   struct ol_txrx_vdev_t *vdev,
63 				   qdf_nbuf_t netbuf,
64 				   struct ol_txrx_msdu_info_t *msdu_info);
65 
66 
67 /**
68  * @brief Allocate and initialize a tx descriptor for a HL system.
69  * @details
70  *  Allocate a tx descriptor pair for a new tx frame - a SW tx descriptor
71  *  for private use within the host data SW, and a HTT tx descriptor for
72  *  downloading tx meta-data to the target FW/HW.
73  *  Fill in the fields of this pair of tx descriptors based on the
74  *  information in the netbuf.
75  *
76  * @param pdev - the data physical device sending the data
77  *      (for accessing the tx desc pool)
78  * @param vdev - the virtual device sending the data
79  *      (for specifying the transmitter address for multicast / broadcast data)
80  * @param netbuf - the tx frame
81  * @param msdu_info - tx meta-data
82  */
83 struct ol_tx_desc_t *
84 ol_tx_desc_hl(
85 		struct ol_txrx_pdev_t *pdev,
86 		struct ol_txrx_vdev_t *vdev,
87 		qdf_nbuf_t netbuf,
88 		struct ol_txrx_msdu_info_t *msdu_info);
89 
90 
91 /**
92  * @brief Use a tx descriptor ID to find the corresponding descriptor object.
93  *
94  * @param pdev - the data physical device sending the data
95  * @param tx_desc_id - the ID of the descriptor in question
96  * @return the descriptor object that has the specified ID
97  */
ol_tx_desc_find(struct ol_txrx_pdev_t * pdev,uint16_t tx_desc_id)98 static inline struct ol_tx_desc_t *ol_tx_desc_find(
99 			struct ol_txrx_pdev_t *pdev, uint16_t tx_desc_id)
100 {
101 	void **td_base = (void **)pdev->tx_desc.desc_pages.cacheable_pages;
102 
103 	return &((union ol_tx_desc_list_elem_t *)
104 		(td_base[tx_desc_id >> pdev->tx_desc.page_divider] +
105 		(pdev->tx_desc.desc_reserved_size *
106 		(tx_desc_id & pdev->tx_desc.offset_filter))))->tx_desc;
107 }
108 
109 /**
110  * @brief Use a tx descriptor ID to find the corresponding descriptor object
111  *    and add sanity check.
112  *
113  * @param pdev - the data physical device sending the data
114  * @param tx_desc_id - the ID of the descriptor in question
115  * @return the descriptor object that has the specified ID,
116  *    if failure, will return NULL.
117  */
118 
119 #ifdef QCA_SUPPORT_TXDESC_SANITY_CHECKS
120 static inline struct ol_tx_desc_t *
ol_tx_desc_find_check(struct ol_txrx_pdev_t * pdev,u_int16_t tx_desc_id)121 ol_tx_desc_find_check(struct ol_txrx_pdev_t *pdev, u_int16_t tx_desc_id)
122 {
123 	struct ol_tx_desc_t *tx_desc;
124 
125 	if (tx_desc_id >= pdev->tx_desc.pool_size)
126 		return NULL;
127 
128 	tx_desc = ol_tx_desc_find(pdev, tx_desc_id);
129 
130 	if (tx_desc->pkt_type == ol_tx_frm_freed)
131 		return NULL;
132 
133 	return tx_desc;
134 }
135 
136 #else
137 
138 static inline struct ol_tx_desc_t *
ol_tx_desc_find_check(struct ol_txrx_pdev_t * pdev,u_int16_t tx_desc_id)139 ol_tx_desc_find_check(struct ol_txrx_pdev_t *pdev, u_int16_t tx_desc_id)
140 {
141 	struct ol_tx_desc_t *tx_desc;
142 
143 	if (tx_desc_id >= pdev->tx_desc.pool_size)
144 		return NULL;
145 
146 	tx_desc = ol_tx_desc_find(pdev, tx_desc_id);
147 
148 	/* check against invalid tx_desc_id */
149 	if (ol_cfg_is_high_latency(pdev->ctrl_pdev) && !tx_desc->vdev)
150 		return NULL;
151 
152 	return tx_desc;
153 }
154 #endif
155 
156 /**
157  * @brief Free a list of tx descriptors and the tx frames they refer to.
158  * @details
159  *  Free a batch of "standard" tx descriptors and their tx frames.
160  *  Free each tx descriptor, by returning it to the freelist.
161  *  Unmap each netbuf, and free the netbufs as a batch.
162  *  Irregular tx frames like TSO or management frames that require
163  *  special handling are processed by the ol_tx_desc_frame_free_nonstd
164  *  function rather than this function.
165  *
166  * @param pdev - the data physical device that sent the data
167  * @param tx_descs - a list of SW tx descriptors for the tx frames
168  * @param had_error - bool indication of whether the transmission failed.
169  *            This is provided to callback functions that get notified of
170  *            the tx frame completion.
171  */
172 void ol_tx_desc_frame_list_free(struct ol_txrx_pdev_t *pdev,
173 				ol_tx_desc_list *tx_descs, int had_error);
174 
175 /**
176  * @brief Free a non-standard tx frame and its tx descriptor.
177  * @details
178  *  Check the tx frame type (e.g. TSO vs. management) to determine what
179  *  special steps, if any, need to be performed prior to freeing the
180  *  tx frame and its tx descriptor.
181  *  This function can also be used to free single standard tx frames.
182  *  After performing any special steps based on tx frame type, free the
183  *  tx descriptor, i.e. return it to the freelist, and unmap and
184  *  free the netbuf referenced by the tx descriptor.
185  *
186  * @param pdev - the data physical device that sent the data
187  * @param tx_desc - the SW tx descriptor for the tx frame that was sent
188  * @param had_error - bool indication of whether the transmission failed.
189  *            This is provided to callback functions that get notified of
190  *            the tx frame completion.
191  */
192 void ol_tx_desc_frame_free_nonstd(struct ol_txrx_pdev_t *pdev,
193 				  struct ol_tx_desc_t *tx_desc, int had_error);
194 
195 /*
196  * @brief Determine the ID of a tx descriptor.
197  *
198  * @param pdev - the physical device that is sending the data
199  * @param tx_desc - the descriptor whose ID is being determined
200  * @return numeric ID that uniquely identifies the tx descriptor
201  */
202 static inline uint16_t
ol_tx_desc_id(struct ol_txrx_pdev_t * pdev,struct ol_tx_desc_t * tx_desc)203 ol_tx_desc_id(struct ol_txrx_pdev_t *pdev, struct ol_tx_desc_t *tx_desc)
204 {
205 	TXRX_ASSERT2(tx_desc->id < pdev->tx_desc.pool_size);
206 	return tx_desc->id;
207 }
208 
209 /*
210  * @brief Retrieves the beacon headr for the vdev
211  * @param pdev - opaque pointe to scn
212  * @param vdevid - vdev id
213  * @return void pointer to the beacon header for the given vdev
214  */
215 
216 void *ol_ath_get_bcn_header(struct cdp_cfg *cfg_pdev, A_UINT32 vdev_id);
217 
218 /*
219  * @brief Free a tx descriptor, without freeing the matching frame.
220  * @details
221  *  This function is using during the function call that submits tx frames
222  *  into the txrx layer, for cases where a tx descriptor is successfully
223  *  allocated, but for other reasons the frame could not be accepted.
224  *
225  * @param pdev - the data physical device that is sending the data
226  * @param tx_desc - the descriptor being freed
227  */
228 void ol_tx_desc_free(struct ol_txrx_pdev_t *pdev, struct ol_tx_desc_t *tx_desc);
229 
230 #if defined(FEATURE_TSO)
231 struct qdf_tso_seg_elem_t *ol_tso_alloc_segment(struct ol_txrx_pdev_t *pdev);
232 
233 void ol_tso_free_segment(struct ol_txrx_pdev_t *pdev,
234 	 struct qdf_tso_seg_elem_t *tso_seg);
235 struct qdf_tso_num_seg_elem_t *ol_tso_num_seg_alloc(
236 				struct ol_txrx_pdev_t *pdev);
237 void ol_tso_num_seg_free(struct ol_txrx_pdev_t *pdev,
238 	 struct qdf_tso_num_seg_elem_t *tso_num_seg);
239 void ol_free_remaining_tso_segs(ol_txrx_vdev_handle vdev,
240 				struct ol_txrx_msdu_info_t *msdu_info,
241 				bool is_tso_seg_mapping_done);
242 
243 #else
244 #define ol_tso_alloc_segment(pdev) /*no-op*/
245 #define ol_tso_free_segment(pdev, tso_seg) /*no-op*/
246 #define ol_tso_num_seg_alloc(pdev) /*no-op*/
247 #define ol_tso_num_seg_free(pdev, tso_num_seg) /*no-op*/
248 /*no-op*/
249 #define ol_free_remaining_tso_segs(vdev, msdu_info, is_tso_seg_mapping_done)
250 #endif
251 
252 /**
253  * ol_tx_get_desc_global_pool() - get descriptor from global pool
254  * @pdev: pdev handler
255  *
256  * Caller needs to take lock and do sanity checks.
257  *
258  * Return: tx descriptor
259  */
260 static inline
ol_tx_get_desc_global_pool(struct ol_txrx_pdev_t * pdev)261 struct ol_tx_desc_t *ol_tx_get_desc_global_pool(struct ol_txrx_pdev_t *pdev)
262 {
263 	struct ol_tx_desc_t *tx_desc = &pdev->tx_desc.freelist->tx_desc;
264 
265 	pdev->tx_desc.freelist = pdev->tx_desc.freelist->next;
266 	pdev->tx_desc.num_free--;
267 	return tx_desc;
268 }
269 
270 /**
271  * ol_tx_put_desc_global_pool() - put descriptor to global pool freelist
272  * @pdev: pdev handle
273  * @tx_desc: tx descriptor
274  *
275  * Caller needs to take lock and do sanity checks.
276  *
277  * Return: none
278  */
279 static inline
ol_tx_put_desc_global_pool(struct ol_txrx_pdev_t * pdev,struct ol_tx_desc_t * tx_desc)280 void ol_tx_put_desc_global_pool(struct ol_txrx_pdev_t *pdev,
281 			struct ol_tx_desc_t *tx_desc)
282 {
283 	((union ol_tx_desc_list_elem_t *)tx_desc)->next =
284 					pdev->tx_desc.freelist;
285 	pdev->tx_desc.freelist =
286 			 (union ol_tx_desc_list_elem_t *)tx_desc;
287 	pdev->tx_desc.num_free++;
288 }
289 
290 
291 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
292 
293 #ifdef QCA_LL_TX_FLOW_CONTROL_RESIZE
294 int ol_tx_distribute_descs_to_deficient_pools_from_global_pool(void);
295 #else
296 static inline
ol_tx_distribute_descs_to_deficient_pools_from_global_pool(void)297 int ol_tx_distribute_descs_to_deficient_pools_from_global_pool(void)
298 {
299 	return 0;
300 }
301 #endif
302 
303 int ol_tx_free_invalid_flow_pool(struct ol_tx_flow_pool_t *pool);
304 /**
305  * ol_tx_get_desc_flow_pool() - get descriptor from flow pool
306  * @pool: flow pool
307  *
308  * Caller needs to take lock and do sanity checks.
309  *
310  * Return: tx descriptor
311  */
312 static inline
ol_tx_get_desc_flow_pool(struct ol_tx_flow_pool_t * pool)313 struct ol_tx_desc_t *ol_tx_get_desc_flow_pool(struct ol_tx_flow_pool_t *pool)
314 {
315 	struct ol_tx_desc_t *tx_desc = &pool->freelist->tx_desc;
316 
317 	pool->freelist = pool->freelist->next;
318 	pool->avail_desc--;
319 	return tx_desc;
320 }
321 
322 /**
323  * ol_tx_put_desc_flow_pool() - put descriptor to flow pool freelist
324  * @pool: flow pool
325  * @tx_desc: tx descriptor
326  *
327  * Caller needs to take lock and do sanity checks.
328  *
329  * Return: none
330  */
331 static inline
ol_tx_put_desc_flow_pool(struct ol_tx_flow_pool_t * pool,struct ol_tx_desc_t * tx_desc)332 void ol_tx_put_desc_flow_pool(struct ol_tx_flow_pool_t *pool,
333 			struct ol_tx_desc_t *tx_desc)
334 {
335 	tx_desc->pool = pool;
336 	((union ol_tx_desc_list_elem_t *)tx_desc)->next = pool->freelist;
337 	pool->freelist = (union ol_tx_desc_list_elem_t *)tx_desc;
338 	pool->avail_desc++;
339 }
340 
341 #else
ol_tx_free_invalid_flow_pool(void * pool)342 static inline int ol_tx_free_invalid_flow_pool(void *pool)
343 {
344 	return 0;
345 }
346 #endif
347 
348 #ifdef DESC_DUP_DETECT_DEBUG
349 /**
350  * ol_tx_desc_dup_detect_init() - initialize descriptor duplication logic
351  * @pdev: pdev handle
352  * @pool_size: global pool size
353  *
354  * Return: none
355  */
356 static inline
ol_tx_desc_dup_detect_init(struct ol_txrx_pdev_t * pdev,uint16_t pool_size)357 void ol_tx_desc_dup_detect_init(struct ol_txrx_pdev_t *pdev, uint16_t pool_size)
358 {
359 	uint16_t size = (pool_size >> DIV_BY_8) +
360 		sizeof(*pdev->tx_desc.free_list_bitmap);
361 	pdev->tx_desc.free_list_bitmap = qdf_mem_malloc(size);
362 }
363 
364 /**
365  * ol_tx_desc_dup_detect_deinit() - deinit descriptor duplication logic
366  * @pdev: pdev handle
367  *
368  * Return: none
369  */
370 static inline
ol_tx_desc_dup_detect_deinit(struct ol_txrx_pdev_t * pdev)371 void ol_tx_desc_dup_detect_deinit(struct ol_txrx_pdev_t *pdev)
372 {
373 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
374 		  "%s: pool_size %d num_free %d\n", __func__,
375 		pdev->tx_desc.pool_size, pdev->tx_desc.num_free);
376 	if (pdev->tx_desc.free_list_bitmap)
377 		qdf_mem_free(pdev->tx_desc.free_list_bitmap);
378 }
379 
380 /**
381  * ol_tx_desc_dup_detect_set() - set bit for msdu_id
382  * @pdev: pdev handle
383  * @tx_desc: tx descriptor
384  *
385  * Return: none
386  */
387 static inline
ol_tx_desc_dup_detect_set(struct ol_txrx_pdev_t * pdev,struct ol_tx_desc_t * tx_desc)388 void ol_tx_desc_dup_detect_set(struct ol_txrx_pdev_t *pdev,
389 				struct ol_tx_desc_t *tx_desc)
390 {
391 	uint16_t msdu_id = ol_tx_desc_id(pdev, tx_desc);
392 	bool test;
393 
394 	if (!pdev->tx_desc.free_list_bitmap)
395 		return;
396 
397 	if (qdf_unlikely(msdu_id > pdev->tx_desc.pool_size)) {
398 		qdf_print("msdu_id %d > pool_size %d",
399 			  msdu_id, pdev->tx_desc.pool_size);
400 		QDF_BUG(0);
401 	}
402 
403 	test = test_and_set_bit(msdu_id, pdev->tx_desc.free_list_bitmap);
404 	if (qdf_unlikely(test)) {
405 		uint16_t size = (pdev->tx_desc.pool_size >> DIV_BY_8) +
406 			((pdev->tx_desc.pool_size & MOD_BY_8) ? 1 : 0);
407 		qdf_print("duplicate msdu_id %d detected!!", msdu_id);
408 		qdf_trace_hex_dump(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
409 		(void *)pdev->tx_desc.free_list_bitmap, size);
410 		QDF_BUG(0);
411 	}
412 }
413 
414 /**
415  * ol_tx_desc_dup_detect_reset() - reset bit for msdu_id
416  * @pdev: pdev handle
417  * @tx_desc: tx descriptor
418  *
419  * Return: none
420  */
421 static inline
ol_tx_desc_dup_detect_reset(struct ol_txrx_pdev_t * pdev,struct ol_tx_desc_t * tx_desc)422 void ol_tx_desc_dup_detect_reset(struct ol_txrx_pdev_t *pdev,
423 				 struct ol_tx_desc_t *tx_desc)
424 {
425 	uint16_t msdu_id = ol_tx_desc_id(pdev, tx_desc);
426 	bool test;
427 
428 	if (!pdev->tx_desc.free_list_bitmap)
429 		return;
430 
431 	if (qdf_unlikely(msdu_id > pdev->tx_desc.pool_size)) {
432 		qdf_print("msdu_id %d > pool_size %d",
433 			  msdu_id, pdev->tx_desc.pool_size);
434 		QDF_BUG(0);
435 	}
436 
437 	test = !test_and_clear_bit(msdu_id, pdev->tx_desc.free_list_bitmap);
438 	if (qdf_unlikely(test)) {
439 		uint16_t size = (pdev->tx_desc.pool_size >> DIV_BY_8) +
440 			((pdev->tx_desc.pool_size & MOD_BY_8) ? 1 : 0);
441 		qdf_print("duplicate free msg received for msdu_id %d!!\n",
442 								 msdu_id);
443 		qdf_trace_hex_dump(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
444 		(void *)pdev->tx_desc.free_list_bitmap, size);
445 		QDF_BUG(0);
446 	}
447 }
448 #else
449 static inline
ol_tx_desc_dup_detect_init(struct ol_txrx_pdev_t * pdev,uint16_t size)450 void ol_tx_desc_dup_detect_init(struct ol_txrx_pdev_t *pdev, uint16_t size)
451 {
452 }
453 
454 static inline
ol_tx_desc_dup_detect_deinit(struct ol_txrx_pdev_t * pdev)455 void ol_tx_desc_dup_detect_deinit(struct ol_txrx_pdev_t *pdev)
456 {
457 }
458 
459 static inline
ol_tx_desc_dup_detect_set(struct ol_txrx_pdev_t * pdev,struct ol_tx_desc_t * tx_desc)460 void ol_tx_desc_dup_detect_set(struct ol_txrx_pdev_t *pdev,
461 				struct ol_tx_desc_t *tx_desc)
462 {
463 }
464 
465 static inline
ol_tx_desc_dup_detect_reset(struct ol_txrx_pdev_t * pdev,struct ol_tx_desc_t * tx_desc)466 void ol_tx_desc_dup_detect_reset(struct ol_txrx_pdev_t *pdev,
467 				 struct ol_tx_desc_t *tx_desc)
468 {
469 }
470 #endif
471 
472 enum extension_header_type
473 ol_tx_get_ext_header_type(struct ol_txrx_vdev_t *vdev,
474 	qdf_nbuf_t netbuf);
475 enum extension_header_type
476 ol_tx_get_wisa_ext_type(qdf_nbuf_t netbuf);
477 
478 
479 #endif /* _OL_TX_DESC__H_ */
480