xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_tx_desc.h (revision a175314c51a4ce5cec2835cc8a8c7dc0c1810915)
1 /*
2  * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #ifndef DP_TX_DESC_H
20 #define DP_TX_DESC_H
21 
22 #include "dp_types.h"
23 #include "dp_tx.h"
24 #include "dp_internal.h"
25 
26 #ifdef TX_PER_PDEV_DESC_POOL
27 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
28 #define DP_TX_GET_DESC_POOL_ID(vdev) (vdev->vdev_id)
29 #else /* QCA_LL_TX_FLOW_CONTROL_V2 */
30 #define DP_TX_GET_DESC_POOL_ID(vdev) (vdev->pdev->pdev_id)
31 #endif /* QCA_LL_TX_FLOW_CONTROL_V2 */
32 	#define DP_TX_GET_RING_ID(vdev) (vdev->pdev->pdev_id)
33 #else
34 	#ifdef TX_PER_VDEV_DESC_POOL
35 		#define DP_TX_GET_DESC_POOL_ID(vdev) (vdev->vdev_id)
36 		#define DP_TX_GET_RING_ID(vdev) (vdev->pdev->pdev_id)
37 	#endif /* TX_PER_VDEV_DESC_POOL */
38 #endif /* TX_PER_PDEV_DESC_POOL */
39 
40 /**
41  * 21 bits cookie
42  * 2 bits pool id 0 ~ 3,
43  * 10 bits page id 0 ~ 1023
44  * 5 bits offset id 0 ~ 31 (Desc size = 128, Num descs per page = 4096/128 = 32)
45  */
46 /* ???Ring ID needed??? */
47 #define DP_TX_DESC_ID_POOL_MASK    0x018000
48 #define DP_TX_DESC_ID_POOL_OS      15
49 #define DP_TX_DESC_ID_PAGE_MASK    0x007FE0
50 #define DP_TX_DESC_ID_PAGE_OS      5
51 #define DP_TX_DESC_ID_OFFSET_MASK  0x00001F
52 #define DP_TX_DESC_ID_OFFSET_OS    0
53 
54 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
55 #define TX_DESC_LOCK_CREATE(lock)
56 #define TX_DESC_LOCK_DESTROY(lock)
57 #define TX_DESC_LOCK_LOCK(lock)
58 #define TX_DESC_LOCK_UNLOCK(lock)
59 #define IS_TX_DESC_POOL_STATUS_INACTIVE(pool) \
60 	((pool)->status == FLOW_POOL_INACTIVE)
61 #define TX_DESC_POOL_MEMBER_CLEAN(_tx_desc_pool)       \
62 do {                                                   \
63 	(_tx_desc_pool)->elem_size = 0;                \
64 	(_tx_desc_pool)->freelist = NULL;              \
65 	(_tx_desc_pool)->pool_size = 0;                \
66 	(_tx_desc_pool)->avail_desc = 0;               \
67 	(_tx_desc_pool)->start_th = 0;                 \
68 	(_tx_desc_pool)->stop_th = 0;                  \
69 	(_tx_desc_pool)->status = FLOW_POOL_INACTIVE;  \
70 } while (0)
71 #else /* !QCA_LL_TX_FLOW_CONTROL_V2 */
72 #define TX_DESC_LOCK_CREATE(lock)  qdf_spinlock_create(lock)
73 #define TX_DESC_LOCK_DESTROY(lock) qdf_spinlock_destroy(lock)
74 #define TX_DESC_LOCK_LOCK(lock)    qdf_spin_lock_bh(lock)
75 #define TX_DESC_LOCK_UNLOCK(lock)  qdf_spin_unlock_bh(lock)
76 #define IS_TX_DESC_POOL_STATUS_INACTIVE(pool) (false)
77 #define TX_DESC_POOL_MEMBER_CLEAN(_tx_desc_pool)       \
78 do {                                                   \
79 	(_tx_desc_pool)->elem_size = 0;                \
80 	(_tx_desc_pool)->num_allocated = 0;            \
81 	(_tx_desc_pool)->freelist = NULL;              \
82 	(_tx_desc_pool)->elem_count = 0;               \
83 	(_tx_desc_pool)->num_free = 0;                 \
84 } while (0)
85 #endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */
86 #define MAX_POOL_BUFF_COUNT 10000
87 
88 QDF_STATUS dp_tx_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
89 		uint16_t num_elem);
90 QDF_STATUS dp_tx_desc_pool_free(struct dp_soc *soc, uint8_t pool_id);
91 QDF_STATUS dp_tx_ext_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
92 		uint16_t num_elem);
93 QDF_STATUS dp_tx_ext_desc_pool_free(struct dp_soc *soc, uint8_t pool_id);
94 QDF_STATUS dp_tx_tso_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
95 		uint16_t num_elem);
96 void dp_tx_tso_desc_pool_free(struct dp_soc *soc, uint8_t pool_id);
97 QDF_STATUS dp_tx_tso_num_seg_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
98 		uint16_t num_elem);
99 void dp_tx_tso_num_seg_pool_free(struct dp_soc *soc, uint8_t pool_id);
100 
101 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
102 void dp_tx_flow_control_init(struct dp_soc *);
103 void dp_tx_flow_control_deinit(struct dp_soc *);
104 
105 QDF_STATUS dp_txrx_register_pause_cb(struct cdp_soc_t *soc,
106 	tx_pause_callback pause_cb);
107 QDF_STATUS dp_tx_flow_pool_map(struct cdp_soc_t *soc, struct cdp_pdev *pdev,
108 				uint8_t vdev_id);
109 void dp_tx_flow_pool_unmap(struct cdp_soc_t *soc, struct cdp_pdev *pdev,
110 			   uint8_t vdev_id);
111 void dp_tx_clear_flow_pool_stats(struct dp_soc *soc);
112 struct dp_tx_desc_pool_s *dp_tx_create_flow_pool(struct dp_soc *soc,
113 	uint8_t flow_pool_id, uint16_t flow_pool_size);
114 
115 QDF_STATUS dp_tx_flow_pool_map_handler(struct dp_pdev *pdev, uint8_t flow_id,
116 	uint8_t flow_type, uint8_t flow_pool_id, uint16_t flow_pool_size);
117 void dp_tx_flow_pool_unmap_handler(struct dp_pdev *pdev, uint8_t flow_id,
118 	uint8_t flow_type, uint8_t flow_pool_id);
119 
120 /**
121  * dp_tx_get_desc_flow_pool() - get descriptor from flow pool
122  * @pool: flow pool
123  *
124  * Caller needs to take lock and do sanity checks.
125  *
126  * Return: tx descriptor
127  */
128 static inline
129 struct dp_tx_desc_s *dp_tx_get_desc_flow_pool(struct dp_tx_desc_pool_s *pool)
130 {
131 	struct dp_tx_desc_s *tx_desc = pool->freelist;
132 
133 	pool->freelist = pool->freelist->next;
134 	pool->avail_desc--;
135 	return tx_desc;
136 }
137 
138 /**
139  * ol_tx_put_desc_flow_pool() - put descriptor to flow pool freelist
140  * @pool: flow pool
141  * @tx_desc: tx descriptor
142  *
143  * Caller needs to take lock and do sanity checks.
144  *
145  * Return: none
146  */
147 static inline
148 void dp_tx_put_desc_flow_pool(struct dp_tx_desc_pool_s *pool,
149 			struct dp_tx_desc_s *tx_desc)
150 {
151 	tx_desc->next = pool->freelist;
152 	pool->freelist = tx_desc;
153 	pool->avail_desc++;
154 }
155 
156 
157 /**
158  * dp_tx_desc_alloc() - Allocate a Software Tx Descriptor from given pool
159  *
160  * @soc Handle to DP SoC structure
161  * @pool_id
162  *
163  * Return:
164  */
165 static inline struct dp_tx_desc_s *
166 dp_tx_desc_alloc(struct dp_soc *soc, uint8_t desc_pool_id)
167 {
168 	struct dp_tx_desc_s *tx_desc = NULL;
169 	struct dp_tx_desc_pool_s *pool = &soc->tx_desc[desc_pool_id];
170 
171 	if (pool) {
172 		qdf_spin_lock_bh(&pool->flow_pool_lock);
173 		if (pool->status <= FLOW_POOL_ACTIVE_PAUSED &&
174 		    pool->avail_desc) {
175 			tx_desc = dp_tx_get_desc_flow_pool(pool);
176 			tx_desc->pool_id = desc_pool_id;
177 			tx_desc->flags = DP_TX_DESC_FLAG_ALLOCATED;
178 			if (qdf_unlikely(pool->avail_desc < pool->stop_th)) {
179 				pool->status = FLOW_POOL_ACTIVE_PAUSED;
180 				qdf_spin_unlock_bh(&pool->flow_pool_lock);
181 				/* pause network queues */
182 				soc->pause_cb(desc_pool_id,
183 					       WLAN_STOP_ALL_NETIF_QUEUE,
184 					       WLAN_DATA_FLOW_CONTROL);
185 			} else {
186 				qdf_spin_unlock_bh(&pool->flow_pool_lock);
187 			}
188 		} else {
189 			pool->pkt_drop_no_desc++;
190 			qdf_spin_unlock_bh(&pool->flow_pool_lock);
191 		}
192 	} else {
193 		soc->pool_stats.pkt_drop_no_pool++;
194 	}
195 
196 
197 	return tx_desc;
198 }
199 
200 /**
201  * dp_tx_desc_free() - Fee a tx descriptor and attach it to free list
202  *
203  * @soc Handle to DP SoC structure
204  * @pool_id
205  * @tx_desc
206  *
207  * Return: None
208  */
209 static inline void
210 dp_tx_desc_free(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
211 		uint8_t desc_pool_id)
212 {
213 	struct dp_tx_desc_pool_s *pool = &soc->tx_desc[desc_pool_id];
214 
215 	qdf_spin_lock_bh(&pool->flow_pool_lock);
216 	tx_desc->flags = 0;
217 	dp_tx_put_desc_flow_pool(pool, tx_desc);
218 	switch (pool->status) {
219 	case FLOW_POOL_ACTIVE_PAUSED:
220 		if (pool->avail_desc > pool->start_th) {
221 			soc->pause_cb(pool->flow_pool_id,
222 				       WLAN_WAKE_ALL_NETIF_QUEUE,
223 				       WLAN_DATA_FLOW_CONTROL);
224 			pool->status = FLOW_POOL_ACTIVE_UNPAUSED;
225 		}
226 		break;
227 	case FLOW_POOL_INVALID:
228 		if (pool->avail_desc == pool->pool_size) {
229 			dp_tx_desc_pool_free(soc, desc_pool_id);
230 			qdf_spin_unlock_bh(&pool->flow_pool_lock);
231 			qdf_print("%s %d pool is freed!!\n",
232 				 __func__, __LINE__);
233 			return;
234 		}
235 		break;
236 
237 	case FLOW_POOL_ACTIVE_UNPAUSED:
238 		break;
239 	default:
240 		qdf_print("%s %d pool is INACTIVE State!!\n",
241 				 __func__, __LINE__);
242 		break;
243 	};
244 
245 	qdf_spin_unlock_bh(&pool->flow_pool_lock);
246 
247 }
248 #else /* QCA_LL_TX_FLOW_CONTROL_V2 */
249 
250 static inline void dp_tx_flow_control_init(struct dp_soc *handle)
251 {
252 }
253 
254 static inline void dp_tx_flow_control_deinit(struct dp_soc *handle)
255 {
256 }
257 
258 static inline QDF_STATUS dp_tx_flow_pool_map_handler(struct dp_pdev *pdev,
259 	uint8_t flow_id, uint8_t flow_type, uint8_t flow_pool_id,
260 	uint16_t flow_pool_size)
261 {
262 	return QDF_STATUS_SUCCESS;
263 }
264 
265 static inline void dp_tx_flow_pool_unmap_handler(struct dp_pdev *pdev,
266 	uint8_t flow_id, uint8_t flow_type, uint8_t flow_pool_id)
267 {
268 }
269 
270 /**
271  * dp_tx_desc_alloc() - Allocate a Software Tx Descriptor from given pool
272  *
273  * @param soc Handle to DP SoC structure
274  * @param pool_id
275  *
276  * Return:
277  */
278 static inline struct dp_tx_desc_s *dp_tx_desc_alloc(struct dp_soc *soc,
279 						uint8_t desc_pool_id)
280 {
281 	struct dp_tx_desc_s *tx_desc = NULL;
282 
283 	TX_DESC_LOCK_LOCK(&soc->tx_desc[desc_pool_id].lock);
284 
285 	tx_desc = soc->tx_desc[desc_pool_id].freelist;
286 
287 	/* Pool is exhausted */
288 	if (!tx_desc) {
289 		TX_DESC_LOCK_UNLOCK(&soc->tx_desc[desc_pool_id].lock);
290 		return NULL;
291 	}
292 
293 	soc->tx_desc[desc_pool_id].freelist =
294 		soc->tx_desc[desc_pool_id].freelist->next;
295 	soc->tx_desc[desc_pool_id].num_allocated++;
296 	soc->tx_desc[desc_pool_id].num_free--;
297 
298 	tx_desc->flags = DP_TX_DESC_FLAG_ALLOCATED;
299 
300 	TX_DESC_LOCK_UNLOCK(&soc->tx_desc[desc_pool_id].lock);
301 
302 	return tx_desc;
303 }
304 
305 /**
306  * dp_tx_desc_alloc_multiple() - Allocate batch of software Tx Descriptors
307  *                            from given pool
308  * @soc: Handle to DP SoC structure
309  * @pool_id: pool id should pick up
310  * @num_requested: number of required descriptor
311  *
312  * allocate multiple tx descriptor and make a link
313  *
314  * Return: h_desc first descriptor pointer
315  */
316 static inline struct dp_tx_desc_s *dp_tx_desc_alloc_multiple(
317 		struct dp_soc *soc, uint8_t desc_pool_id, uint8_t num_requested)
318 {
319 	struct dp_tx_desc_s *c_desc = NULL, *h_desc = NULL;
320 	uint8_t count;
321 
322 	TX_DESC_LOCK_LOCK(&soc->tx_desc[desc_pool_id].lock);
323 
324 	if ((num_requested == 0) ||
325 			(soc->tx_desc[desc_pool_id].num_free < num_requested)) {
326 		TX_DESC_LOCK_UNLOCK(&soc->tx_desc[desc_pool_id].lock);
327 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
328 			"%s, No Free Desc: Available(%d) num_requested(%d)",
329 			__func__, soc->tx_desc[desc_pool_id].num_free,
330 			num_requested);
331 		return NULL;
332 	}
333 
334 	h_desc = soc->tx_desc[desc_pool_id].freelist;
335 
336 	/* h_desc should never be NULL since num_free > requested */
337 	qdf_assert_always(h_desc);
338 
339 	c_desc = h_desc;
340 	for (count = 0; count < (num_requested - 1); count++) {
341 		c_desc->flags = DP_TX_DESC_FLAG_ALLOCATED;
342 		c_desc = c_desc->next;
343 	}
344 	soc->tx_desc[desc_pool_id].num_free -= count;
345 	soc->tx_desc[desc_pool_id].num_allocated += count;
346 	soc->tx_desc[desc_pool_id].freelist = c_desc->next;
347 	c_desc->next = NULL;
348 
349 	TX_DESC_LOCK_UNLOCK(&soc->tx_desc[desc_pool_id].lock);
350 	return h_desc;
351 }
352 
353 /**
354  * dp_tx_desc_free() - Fee a tx descriptor and attach it to free list
355  *
356  * @soc Handle to DP SoC structure
357  * @pool_id
358  * @tx_desc
359  */
360 static inline void
361 dp_tx_desc_free(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
362 		uint8_t desc_pool_id)
363 {
364 	TX_DESC_LOCK_LOCK(&soc->tx_desc[desc_pool_id].lock);
365 
366 	tx_desc->flags = 0;
367 	tx_desc->next = soc->tx_desc[desc_pool_id].freelist;
368 	soc->tx_desc[desc_pool_id].freelist = tx_desc;
369 	soc->tx_desc[desc_pool_id].num_allocated--;
370 	soc->tx_desc[desc_pool_id].num_free++;
371 
372 
373 	TX_DESC_LOCK_UNLOCK(&soc->tx_desc[desc_pool_id].lock);
374 }
375 #endif /* QCA_LL_TX_FLOW_CONTROL_V2 */
376 
377 #ifdef QCA_DP_TX_DESC_ID_CHECK
378 /**
379  * dp_tx_is_desc_id_valid() - check is the tx desc id valid
380  *
381  * @soc Handle to DP SoC structure
382  * @tx_desc_id
383  *
384  * Return: true or false
385  */
386 static inline bool
387 dp_tx_is_desc_id_valid(struct dp_soc *soc, uint32_t tx_desc_id)
388 {
389 	uint8_t pool_id;
390 	uint16_t page_id, offset;
391 	struct dp_tx_desc_pool_s *pool;
392 
393 	pool_id = (tx_desc_id & DP_TX_DESC_ID_POOL_MASK) >>
394 			DP_TX_DESC_ID_POOL_OS;
395 	/* Pool ID is out of limit */
396 	if (pool_id > wlan_cfg_get_num_tx_desc_pool(
397 				soc->wlan_cfg_ctx)) {
398 		QDF_TRACE(QDF_MODULE_ID_DP,
399 			  QDF_TRACE_LEVEL_FATAL,
400 			  "%s:Tx Comp pool id %d not valid",
401 			  __func__,
402 			  pool_id);
403 		goto warn_exit;
404 	}
405 
406 	pool = &soc->tx_desc[pool_id];
407 	/* the pool is freed */
408 	if (IS_TX_DESC_POOL_STATUS_INACTIVE(pool)) {
409 		QDF_TRACE(QDF_MODULE_ID_DP,
410 			  QDF_TRACE_LEVEL_FATAL,
411 			  "%s:the pool %d has been freed",
412 			  __func__,
413 			  pool_id);
414 		goto warn_exit;
415 	}
416 
417 	page_id = (tx_desc_id & DP_TX_DESC_ID_PAGE_MASK) >>
418 				DP_TX_DESC_ID_PAGE_OS;
419 	/* the page id is out of limit */
420 	if (page_id >= pool->desc_pages.num_pages) {
421 		QDF_TRACE(QDF_MODULE_ID_DP,
422 			  QDF_TRACE_LEVEL_FATAL,
423 			  "%s:the page id %d invalid, pool id %d, num_page %d",
424 			  __func__,
425 			  page_id,
426 			  pool_id,
427 			  pool->desc_pages.num_pages);
428 		goto warn_exit;
429 	}
430 
431 	offset = (tx_desc_id & DP_TX_DESC_ID_OFFSET_MASK) >>
432 				DP_TX_DESC_ID_OFFSET_OS;
433 	/* the offset is out of limit */
434 	if (offset >= pool->desc_pages.num_element_per_page) {
435 		QDF_TRACE(QDF_MODULE_ID_DP,
436 			  QDF_TRACE_LEVEL_FATAL,
437 			  "%s:offset %d invalid, pool%d,num_elem_per_page %d",
438 			  __func__,
439 			  offset,
440 			  pool_id,
441 			  pool->desc_pages.num_element_per_page);
442 		goto warn_exit;
443 	}
444 
445 	return true;
446 
447 warn_exit:
448 	QDF_TRACE(QDF_MODULE_ID_DP,
449 		  QDF_TRACE_LEVEL_FATAL,
450 		  "%s:Tx desc id 0x%x not valid",
451 		  __func__,
452 		  tx_desc_id);
453 	qdf_assert_always(0);
454 	return false;
455 }
456 
457 #else
458 static inline bool
459 dp_tx_is_desc_id_valid(struct dp_soc *soc, uint32_t tx_desc_id)
460 {
461 	return true;
462 }
463 #endif /* QCA_DP_TX_DESC_ID_CHECK */
464 
465 /**
466  * dp_tx_desc_find() - find dp tx descriptor from cokie
467  * @soc - handle for the device sending the data
468  * @tx_desc_id - the ID of the descriptor in question
469  * @return the descriptor object that has the specified ID
470  *
471  *  Use a tx descriptor ID to find the corresponding descriptor object.
472  *
473  */
474 static inline struct dp_tx_desc_s *dp_tx_desc_find(struct dp_soc *soc,
475 		uint8_t pool_id, uint16_t page_id, uint16_t offset)
476 {
477 	struct dp_tx_desc_pool_s *tx_desc_pool = &((soc)->tx_desc[(pool_id)]);
478 
479 	return tx_desc_pool->desc_pages.cacheable_pages[page_id] +
480 		tx_desc_pool->elem_size * offset;
481 }
482 
483 /**
484  * dp_tx_ext_desc_alloc() - Get tx extension descriptor from pool
485  * @soc: handle for the device sending the data
486  * @pool_id: target pool id
487  *
488  * Return: None
489  */
490 static inline
491 struct dp_tx_ext_desc_elem_s *dp_tx_ext_desc_alloc(struct dp_soc *soc,
492 		uint8_t desc_pool_id)
493 {
494 	struct dp_tx_ext_desc_elem_s *c_elem;
495 
496 	qdf_spin_lock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
497 	if (soc->tx_ext_desc[desc_pool_id].num_free <= 0) {
498 		qdf_spin_unlock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
499 		return NULL;
500 	}
501 	c_elem = soc->tx_ext_desc[desc_pool_id].freelist;
502 	soc->tx_ext_desc[desc_pool_id].freelist =
503 		soc->tx_ext_desc[desc_pool_id].freelist->next;
504 	soc->tx_ext_desc[desc_pool_id].num_free--;
505 	qdf_spin_unlock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
506 	return c_elem;
507 }
508 
509 /**
510  * dp_tx_ext_desc_free() - Release tx extension descriptor to the pool
511  * @soc: handle for the device sending the data
512  * @pool_id: target pool id
513  * @elem: ext descriptor pointer should release
514  *
515  * Return: None
516  */
517 static inline void dp_tx_ext_desc_free(struct dp_soc *soc,
518 	struct dp_tx_ext_desc_elem_s *elem, uint8_t desc_pool_id)
519 {
520 	qdf_spin_lock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
521 	elem->next = soc->tx_ext_desc[desc_pool_id].freelist;
522 	soc->tx_ext_desc[desc_pool_id].freelist = elem;
523 	soc->tx_ext_desc[desc_pool_id].num_free++;
524 	qdf_spin_unlock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
525 	return;
526 }
527 
528 /**
529  * dp_tx_ext_desc_free_multiple() - Fee multiple tx extension descriptor and
530  *                           attach it to free list
531  * @soc: Handle to DP SoC structure
532  * @desc_pool_id: pool id should pick up
533  * @elem: tx descriptor should be freed
534  * @num_free: number of descriptors should be freed
535  *
536  * Return: none
537  */
538 static inline void dp_tx_ext_desc_free_multiple(struct dp_soc *soc,
539 		struct dp_tx_ext_desc_elem_s *elem, uint8_t desc_pool_id,
540 		uint8_t num_free)
541 {
542 	struct dp_tx_ext_desc_elem_s *head, *tail, *c_elem;
543 	uint8_t freed = num_free;
544 
545 	/* caller should always guarantee atleast list of num_free nodes */
546 	qdf_assert_always(head);
547 
548 	head = elem;
549 	c_elem = head;
550 	tail = head;
551 	while (c_elem && freed) {
552 		tail = c_elem;
553 		c_elem = c_elem->next;
554 		freed--;
555 	}
556 
557 	/* caller should always guarantee atleast list of num_free nodes */
558 	qdf_assert_always(tail);
559 
560 	qdf_spin_lock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
561 	tail->next = soc->tx_ext_desc[desc_pool_id].freelist;
562 	soc->tx_ext_desc[desc_pool_id].freelist = head;
563 	soc->tx_ext_desc[desc_pool_id].num_free += num_free;
564 	qdf_spin_unlock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
565 
566 	return;
567 }
568 
569 #if defined(FEATURE_TSO)
570 /**
571  * dp_tx_tso_desc_alloc() - function to allocate a TSO segment
572  * @soc: device soc instance
573  * @pool_id: pool id should pick up tso descriptor
574  *
575  * Allocates a TSO segment element from the free list held in
576  * the soc
577  *
578  * Return: tso_seg, tso segment memory pointer
579  */
580 static inline struct qdf_tso_seg_elem_t *dp_tx_tso_desc_alloc(
581 		struct dp_soc *soc, uint8_t pool_id)
582 {
583 	struct qdf_tso_seg_elem_t *tso_seg = NULL;
584 
585 	qdf_spin_lock_bh(&soc->tx_tso_desc[pool_id].lock);
586 	if (soc->tx_tso_desc[pool_id].freelist) {
587 		soc->tx_tso_desc[pool_id].num_free--;
588 		tso_seg = soc->tx_tso_desc[pool_id].freelist;
589 		soc->tx_tso_desc[pool_id].freelist =
590 			soc->tx_tso_desc[pool_id].freelist->next;
591 	}
592 	qdf_spin_unlock_bh(&soc->tx_tso_desc[pool_id].lock);
593 
594 	return tso_seg;
595 }
596 
597 /**
598  * dp_tx_tso_desc_free() - function to free a TSO segment
599  * @soc: device soc instance
600  * @pool_id: pool id should pick up tso descriptor
601  * @tso_seg: tso segment memory pointer
602  *
603  * Returns a TSO segment element to the free list held in the
604  * HTT pdev
605  *
606  * Return: none
607  */
608 static inline void dp_tx_tso_desc_free(struct dp_soc *soc,
609 		uint8_t pool_id, struct qdf_tso_seg_elem_t *tso_seg)
610 {
611 	qdf_spin_lock_bh(&soc->tx_tso_desc[pool_id].lock);
612 	tso_seg->next = soc->tx_tso_desc[pool_id].freelist;
613 	soc->tx_tso_desc[pool_id].freelist = tso_seg;
614 	soc->tx_tso_desc[pool_id].num_free++;
615 	qdf_spin_unlock_bh(&soc->tx_tso_desc[pool_id].lock);
616 }
617 
618 static inline
619 struct qdf_tso_num_seg_elem_t  *dp_tso_num_seg_alloc(struct dp_soc *soc,
620 		uint8_t pool_id)
621 {
622 	struct qdf_tso_num_seg_elem_t *tso_num_seg = NULL;
623 
624 	qdf_spin_lock_bh(&soc->tx_tso_num_seg[pool_id].lock);
625 	if (soc->tx_tso_num_seg[pool_id].freelist) {
626 		soc->tx_tso_num_seg[pool_id].num_free--;
627 		tso_num_seg = soc->tx_tso_num_seg[pool_id].freelist;
628 		soc->tx_tso_num_seg[pool_id].freelist =
629 			soc->tx_tso_num_seg[pool_id].freelist->next;
630 	}
631 	qdf_spin_unlock_bh(&soc->tx_tso_num_seg[pool_id].lock);
632 
633 	return tso_num_seg;
634 }
635 
636 static inline
637 void dp_tso_num_seg_free(struct dp_soc *soc,
638 		uint8_t pool_id, struct qdf_tso_num_seg_elem_t *tso_num_seg)
639 {
640 	qdf_spin_lock_bh(&soc->tx_tso_num_seg[pool_id].lock);
641 	tso_num_seg->next = soc->tx_tso_num_seg[pool_id].freelist;
642 	soc->tx_tso_num_seg[pool_id].freelist = tso_num_seg;
643 	soc->tx_tso_num_seg[pool_id].num_free++;
644 	qdf_spin_unlock_bh(&soc->tx_tso_num_seg[pool_id].lock);
645 }
646 #endif
647 
648 /*
649  * dp_tx_me_alloc_buf() Alloc descriptor from me pool
650  * @pdev DP_PDEV handle for datapath
651  *
652  * Return:dp_tx_me_buf_t(buf)
653  */
654 static inline struct dp_tx_me_buf_t*
655 dp_tx_me_alloc_buf(struct dp_pdev *pdev)
656 {
657 	struct dp_tx_me_buf_t *buf = NULL;
658 	qdf_spin_lock_bh(&pdev->tx_mutex);
659 	if (pdev->me_buf.freelist) {
660 		buf = pdev->me_buf.freelist;
661 		pdev->me_buf.freelist = pdev->me_buf.freelist->next;
662 		pdev->me_buf.buf_in_use++;
663 	} else {
664 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
665 				"Error allocating memory in pool");
666 		qdf_spin_unlock_bh(&pdev->tx_mutex);
667 		return NULL;
668 	}
669 	qdf_spin_unlock_bh(&pdev->tx_mutex);
670 	return buf;
671 }
672 
673 /*
674  * dp_tx_me_free_buf() - Free me descriptor and add it to pool
675  * @pdev: DP_PDEV handle for datapath
676  * @buf : Allocated ME BUF
677  *
678  * Return:void
679  */
680 static inline void
681 dp_tx_me_free_buf(struct dp_pdev *pdev, struct dp_tx_me_buf_t *buf)
682 {
683 	qdf_spin_lock_bh(&pdev->tx_mutex);
684 	buf->next = pdev->me_buf.freelist;
685 	pdev->me_buf.freelist = buf;
686 	pdev->me_buf.buf_in_use--;
687 	qdf_spin_unlock_bh(&pdev->tx_mutex);
688 }
689 #endif /* DP_TX_DESC_H */
690