xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_tx_desc.h (revision 92d87f51612f6c3b2285266215edee8911647c2f)
1 /*
2  * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #ifndef DP_TX_DESC_H
20 #define DP_TX_DESC_H
21 
22 #include "dp_types.h"
23 #include "dp_tx.h"
24 #include "dp_internal.h"
25 
26 #ifdef TX_PER_PDEV_DESC_POOL
27 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
28 #define DP_TX_GET_DESC_POOL_ID(vdev) (vdev->vdev_id)
29 #else /* QCA_LL_TX_FLOW_CONTROL_V2 */
30 #define DP_TX_GET_DESC_POOL_ID(vdev) (vdev->pdev->pdev_id)
31 #endif /* QCA_LL_TX_FLOW_CONTROL_V2 */
32 	#define DP_TX_GET_RING_ID(vdev) (vdev->pdev->pdev_id)
33 #else
34 	#ifdef TX_PER_VDEV_DESC_POOL
35 		#define DP_TX_GET_DESC_POOL_ID(vdev) (vdev->vdev_id)
36 		#define DP_TX_GET_RING_ID(vdev) (vdev->pdev->pdev_id)
37 	#endif /* TX_PER_VDEV_DESC_POOL */
38 #endif /* TX_PER_PDEV_DESC_POOL */
39 
40 /**
41  * 21 bits cookie
42  * 2 bits pool id 0 ~ 3,
43  * 10 bits page id 0 ~ 1023
44  * 5 bits offset id 0 ~ 31 (Desc size = 128, Num descs per page = 4096/128 = 32)
45  */
46 /* ???Ring ID needed??? */
47 #define DP_TX_DESC_ID_POOL_MASK    0x018000
48 #define DP_TX_DESC_ID_POOL_OS      15
49 #define DP_TX_DESC_ID_PAGE_MASK    0x007FE0
50 #define DP_TX_DESC_ID_PAGE_OS      5
51 #define DP_TX_DESC_ID_OFFSET_MASK  0x00001F
52 #define DP_TX_DESC_ID_OFFSET_OS    0
53 
54 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
55 #define TX_DESC_LOCK_CREATE(lock)
56 #define TX_DESC_LOCK_DESTROY(lock)
57 #define TX_DESC_LOCK_LOCK(lock)
58 #define TX_DESC_LOCK_UNLOCK(lock)
59 #define TX_DESC_POOL_MEMBER_CLEAN(_tx_desc_pool)       \
60 do {                                                   \
61 	(_tx_desc_pool)->elem_size = 0;                \
62 	(_tx_desc_pool)->freelist = NULL;              \
63 	(_tx_desc_pool)->pool_size = 0;                \
64 	(_tx_desc_pool)->avail_desc = 0;               \
65 	(_tx_desc_pool)->start_th = 0;                 \
66 	(_tx_desc_pool)->stop_th = 0;                  \
67 	(_tx_desc_pool)->status = FLOW_POOL_INACTIVE;  \
68 } while (0)
69 #else /* !QCA_LL_TX_FLOW_CONTROL_V2 */
70 #define TX_DESC_LOCK_CREATE(lock)  qdf_spinlock_create(lock)
71 #define TX_DESC_LOCK_DESTROY(lock) qdf_spinlock_destroy(lock)
72 #define TX_DESC_LOCK_LOCK(lock)    qdf_spin_lock_bh(lock)
73 #define TX_DESC_LOCK_UNLOCK(lock)  qdf_spin_unlock_bh(lock)
74 #define TX_DESC_POOL_MEMBER_CLEAN(_tx_desc_pool)       \
75 do {                                                   \
76 	(_tx_desc_pool)->elem_size = 0;                \
77 	(_tx_desc_pool)->num_allocated = 0;            \
78 	(_tx_desc_pool)->freelist = NULL;              \
79 	(_tx_desc_pool)->elem_count = 0;               \
80 	(_tx_desc_pool)->num_free = 0;                 \
81 } while (0)
82 #endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */
83 #define MAX_POOL_BUFF_COUNT 10000
84 
85 QDF_STATUS dp_tx_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
86 		uint16_t num_elem);
87 QDF_STATUS dp_tx_desc_pool_free(struct dp_soc *soc, uint8_t pool_id);
88 QDF_STATUS dp_tx_ext_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
89 		uint16_t num_elem);
90 QDF_STATUS dp_tx_ext_desc_pool_free(struct dp_soc *soc, uint8_t pool_id);
91 QDF_STATUS dp_tx_tso_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
92 		uint16_t num_elem);
93 void dp_tx_tso_desc_pool_free(struct dp_soc *soc, uint8_t pool_id);
94 QDF_STATUS dp_tx_tso_num_seg_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
95 		uint16_t num_elem);
96 void dp_tx_tso_num_seg_pool_free(struct dp_soc *soc, uint8_t pool_id);
97 
98 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
99 void dp_tx_flow_control_init(struct dp_soc *);
100 void dp_tx_flow_control_deinit(struct dp_soc *);
101 
102 QDF_STATUS dp_txrx_register_pause_cb(struct cdp_soc_t *soc,
103 	tx_pause_callback pause_cb);
104 QDF_STATUS dp_tx_flow_pool_map(struct cdp_soc_t *soc, struct cdp_pdev *pdev,
105 				uint8_t vdev_id);
106 void dp_tx_flow_pool_unmap(struct cdp_soc_t *soc, struct cdp_pdev *pdev,
107 			   uint8_t vdev_id);
108 void dp_tx_clear_flow_pool_stats(struct dp_soc *soc);
109 struct dp_tx_desc_pool_s *dp_tx_create_flow_pool(struct dp_soc *soc,
110 	uint8_t flow_pool_id, uint16_t flow_pool_size);
111 
112 QDF_STATUS dp_tx_flow_pool_map_handler(struct dp_pdev *pdev, uint8_t flow_id,
113 	uint8_t flow_type, uint8_t flow_pool_id, uint16_t flow_pool_size);
114 void dp_tx_flow_pool_unmap_handler(struct dp_pdev *pdev, uint8_t flow_id,
115 	uint8_t flow_type, uint8_t flow_pool_id);
116 
117 /**
118  * dp_tx_get_desc_flow_pool() - get descriptor from flow pool
119  * @pool: flow pool
120  *
121  * Caller needs to take lock and do sanity checks.
122  *
123  * Return: tx descriptor
124  */
125 static inline
126 struct dp_tx_desc_s *dp_tx_get_desc_flow_pool(struct dp_tx_desc_pool_s *pool)
127 {
128 	struct dp_tx_desc_s *tx_desc = pool->freelist;
129 
130 	pool->freelist = pool->freelist->next;
131 	pool->avail_desc--;
132 	return tx_desc;
133 }
134 
135 /**
136  * ol_tx_put_desc_flow_pool() - put descriptor to flow pool freelist
137  * @pool: flow pool
138  * @tx_desc: tx descriptor
139  *
140  * Caller needs to take lock and do sanity checks.
141  *
142  * Return: none
143  */
144 static inline
145 void dp_tx_put_desc_flow_pool(struct dp_tx_desc_pool_s *pool,
146 			struct dp_tx_desc_s *tx_desc)
147 {
148 	tx_desc->next = pool->freelist;
149 	pool->freelist = tx_desc;
150 	pool->avail_desc++;
151 }
152 
153 
154 /**
155  * dp_tx_desc_alloc() - Allocate a Software Tx Descriptor from given pool
156  *
157  * @soc Handle to DP SoC structure
158  * @pool_id
159  *
160  * Return:
161  */
162 static inline struct dp_tx_desc_s *
163 dp_tx_desc_alloc(struct dp_soc *soc, uint8_t desc_pool_id)
164 {
165 	struct dp_tx_desc_s *tx_desc = NULL;
166 	struct dp_tx_desc_pool_s *pool = &soc->tx_desc[desc_pool_id];
167 
168 	if (pool) {
169 		qdf_spin_lock_bh(&pool->flow_pool_lock);
170 		if (pool->status <= FLOW_POOL_ACTIVE_PAUSED &&
171 		    pool->avail_desc) {
172 			tx_desc = dp_tx_get_desc_flow_pool(pool);
173 			tx_desc->pool_id = desc_pool_id;
174 			tx_desc->flags = DP_TX_DESC_FLAG_ALLOCATED;
175 			if (qdf_unlikely(pool->avail_desc < pool->stop_th)) {
176 				pool->status = FLOW_POOL_ACTIVE_PAUSED;
177 				qdf_spin_unlock_bh(&pool->flow_pool_lock);
178 				/* pause network queues */
179 				soc->pause_cb(desc_pool_id,
180 					       WLAN_STOP_ALL_NETIF_QUEUE,
181 					       WLAN_DATA_FLOW_CONTROL);
182 			} else {
183 				qdf_spin_unlock_bh(&pool->flow_pool_lock);
184 			}
185 		} else {
186 			pool->pkt_drop_no_desc++;
187 			qdf_spin_unlock_bh(&pool->flow_pool_lock);
188 		}
189 	} else {
190 		soc->pool_stats.pkt_drop_no_pool++;
191 	}
192 
193 
194 	return tx_desc;
195 }
196 
197 /**
198  * dp_tx_desc_free() - Fee a tx descriptor and attach it to free list
199  *
200  * @soc Handle to DP SoC structure
201  * @pool_id
202  * @tx_desc
203  *
204  * Return: None
205  */
206 static inline void
207 dp_tx_desc_free(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
208 		uint8_t desc_pool_id)
209 {
210 	struct dp_tx_desc_pool_s *pool = &soc->tx_desc[desc_pool_id];
211 
212 	qdf_spin_lock_bh(&pool->flow_pool_lock);
213 	tx_desc->flags = 0;
214 	dp_tx_put_desc_flow_pool(pool, tx_desc);
215 	switch (pool->status) {
216 	case FLOW_POOL_ACTIVE_PAUSED:
217 		if (pool->avail_desc > pool->start_th) {
218 			soc->pause_cb(pool->flow_pool_id,
219 				       WLAN_WAKE_ALL_NETIF_QUEUE,
220 				       WLAN_DATA_FLOW_CONTROL);
221 			pool->status = FLOW_POOL_ACTIVE_UNPAUSED;
222 		}
223 		break;
224 	case FLOW_POOL_INVALID:
225 		if (pool->avail_desc == pool->pool_size) {
226 			dp_tx_desc_pool_free(soc, desc_pool_id);
227 			qdf_spin_unlock_bh(&pool->flow_pool_lock);
228 			qdf_print("%s %d pool is freed!!\n",
229 				 __func__, __LINE__);
230 			return;
231 		}
232 		break;
233 
234 	case FLOW_POOL_ACTIVE_UNPAUSED:
235 		break;
236 	default:
237 		qdf_print("%s %d pool is INACTIVE State!!\n",
238 				 __func__, __LINE__);
239 		break;
240 	};
241 
242 	qdf_spin_unlock_bh(&pool->flow_pool_lock);
243 
244 }
245 #else /* QCA_LL_TX_FLOW_CONTROL_V2 */
246 
247 static inline void dp_tx_flow_control_init(struct dp_soc *handle)
248 {
249 }
250 
251 static inline void dp_tx_flow_control_deinit(struct dp_soc *handle)
252 {
253 }
254 
255 static inline QDF_STATUS dp_tx_flow_pool_map_handler(struct dp_pdev *pdev,
256 	uint8_t flow_id, uint8_t flow_type, uint8_t flow_pool_id,
257 	uint16_t flow_pool_size)
258 {
259 	return QDF_STATUS_SUCCESS;
260 }
261 
262 static inline void dp_tx_flow_pool_unmap_handler(struct dp_pdev *pdev,
263 	uint8_t flow_id, uint8_t flow_type, uint8_t flow_pool_id)
264 {
265 }
266 
267 /**
268  * dp_tx_desc_alloc() - Allocate a Software Tx Descriptor from given pool
269  *
270  * @param soc Handle to DP SoC structure
271  * @param pool_id
272  *
273  * Return:
274  */
275 static inline struct dp_tx_desc_s *dp_tx_desc_alloc(struct dp_soc *soc,
276 						uint8_t desc_pool_id)
277 {
278 	struct dp_tx_desc_s *tx_desc = NULL;
279 
280 	TX_DESC_LOCK_LOCK(&soc->tx_desc[desc_pool_id].lock);
281 
282 	tx_desc = soc->tx_desc[desc_pool_id].freelist;
283 
284 	/* Pool is exhausted */
285 	if (!tx_desc) {
286 		TX_DESC_LOCK_UNLOCK(&soc->tx_desc[desc_pool_id].lock);
287 		return NULL;
288 	}
289 
290 	soc->tx_desc[desc_pool_id].freelist =
291 		soc->tx_desc[desc_pool_id].freelist->next;
292 	soc->tx_desc[desc_pool_id].num_allocated++;
293 	soc->tx_desc[desc_pool_id].num_free--;
294 
295 	tx_desc->flags = DP_TX_DESC_FLAG_ALLOCATED;
296 
297 	TX_DESC_LOCK_UNLOCK(&soc->tx_desc[desc_pool_id].lock);
298 
299 	return tx_desc;
300 }
301 
302 /**
303  * dp_tx_desc_alloc_multiple() - Allocate batch of software Tx Descriptors
304  *                            from given pool
305  * @soc: Handle to DP SoC structure
306  * @pool_id: pool id should pick up
307  * @num_requested: number of required descriptor
308  *
309  * allocate multiple tx descriptor and make a link
310  *
311  * Return: h_desc first descriptor pointer
312  */
313 static inline struct dp_tx_desc_s *dp_tx_desc_alloc_multiple(
314 		struct dp_soc *soc, uint8_t desc_pool_id, uint8_t num_requested)
315 {
316 	struct dp_tx_desc_s *c_desc = NULL, *h_desc = NULL;
317 	uint8_t count;
318 
319 	TX_DESC_LOCK_LOCK(&soc->tx_desc[desc_pool_id].lock);
320 
321 	if ((num_requested == 0) ||
322 			(soc->tx_desc[desc_pool_id].num_free < num_requested)) {
323 		TX_DESC_LOCK_UNLOCK(&soc->tx_desc[desc_pool_id].lock);
324 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
325 			"%s, No Free Desc: Available(%d) num_requested(%d)",
326 			__func__, soc->tx_desc[desc_pool_id].num_free,
327 			num_requested);
328 		return NULL;
329 	}
330 
331 	h_desc = soc->tx_desc[desc_pool_id].freelist;
332 
333 	/* h_desc should never be NULL since num_free > requested */
334 	qdf_assert_always(h_desc);
335 
336 	c_desc = h_desc;
337 	for (count = 0; count < (num_requested - 1); count++) {
338 		c_desc->flags = DP_TX_DESC_FLAG_ALLOCATED;
339 		c_desc = c_desc->next;
340 	}
341 	soc->tx_desc[desc_pool_id].num_free -= count;
342 	soc->tx_desc[desc_pool_id].num_allocated += count;
343 	soc->tx_desc[desc_pool_id].freelist = c_desc->next;
344 	c_desc->next = NULL;
345 
346 	TX_DESC_LOCK_UNLOCK(&soc->tx_desc[desc_pool_id].lock);
347 	return h_desc;
348 }
349 
350 /**
351  * dp_tx_desc_free() - Fee a tx descriptor and attach it to free list
352  *
353  * @soc Handle to DP SoC structure
354  * @pool_id
355  * @tx_desc
356  */
357 static inline void
358 dp_tx_desc_free(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
359 		uint8_t desc_pool_id)
360 {
361 	TX_DESC_LOCK_LOCK(&soc->tx_desc[desc_pool_id].lock);
362 
363 	tx_desc->flags = 0;
364 	tx_desc->next = soc->tx_desc[desc_pool_id].freelist;
365 	soc->tx_desc[desc_pool_id].freelist = tx_desc;
366 	soc->tx_desc[desc_pool_id].num_allocated--;
367 	soc->tx_desc[desc_pool_id].num_free++;
368 
369 
370 	TX_DESC_LOCK_UNLOCK(&soc->tx_desc[desc_pool_id].lock);
371 }
372 #endif /* QCA_LL_TX_FLOW_CONTROL_V2 */
373 
374 /**
375  * dp_tx_desc_find() - find dp tx descriptor from cokie
376  * @soc - handle for the device sending the data
377  * @tx_desc_id - the ID of the descriptor in question
378  * @return the descriptor object that has the specified ID
379  *
380  *  Use a tx descriptor ID to find the corresponding descriptor object.
381  *
382  */
383 static inline struct dp_tx_desc_s *dp_tx_desc_find(struct dp_soc *soc,
384 		uint8_t pool_id, uint16_t page_id, uint16_t offset)
385 {
386 	struct dp_tx_desc_pool_s *tx_desc_pool = &((soc)->tx_desc[(pool_id)]);
387 
388 	return tx_desc_pool->desc_pages.cacheable_pages[page_id] +
389 		tx_desc_pool->elem_size * offset;
390 }
391 
392 /**
393  * dp_tx_ext_desc_alloc() - Get tx extension descriptor from pool
394  * @soc: handle for the device sending the data
395  * @pool_id: target pool id
396  *
397  * Return: None
398  */
399 static inline
400 struct dp_tx_ext_desc_elem_s *dp_tx_ext_desc_alloc(struct dp_soc *soc,
401 		uint8_t desc_pool_id)
402 {
403 	struct dp_tx_ext_desc_elem_s *c_elem;
404 
405 	qdf_spin_lock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
406 	if (soc->tx_ext_desc[desc_pool_id].num_free <= 0) {
407 		qdf_spin_unlock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
408 		return NULL;
409 	}
410 	c_elem = soc->tx_ext_desc[desc_pool_id].freelist;
411 	soc->tx_ext_desc[desc_pool_id].freelist =
412 		soc->tx_ext_desc[desc_pool_id].freelist->next;
413 	soc->tx_ext_desc[desc_pool_id].num_free--;
414 	qdf_spin_unlock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
415 	return c_elem;
416 }
417 
418 /**
419  * dp_tx_ext_desc_free() - Release tx extension descriptor to the pool
420  * @soc: handle for the device sending the data
421  * @pool_id: target pool id
422  * @elem: ext descriptor pointer should release
423  *
424  * Return: None
425  */
426 static inline void dp_tx_ext_desc_free(struct dp_soc *soc,
427 	struct dp_tx_ext_desc_elem_s *elem, uint8_t desc_pool_id)
428 {
429 	qdf_spin_lock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
430 	elem->next = soc->tx_ext_desc[desc_pool_id].freelist;
431 	soc->tx_ext_desc[desc_pool_id].freelist = elem;
432 	soc->tx_ext_desc[desc_pool_id].num_free++;
433 	qdf_spin_unlock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
434 	return;
435 }
436 
437 /**
438  * dp_tx_ext_desc_free_multiple() - Fee multiple tx extension descriptor and
439  *                           attach it to free list
440  * @soc: Handle to DP SoC structure
441  * @desc_pool_id: pool id should pick up
442  * @elem: tx descriptor should be freed
443  * @num_free: number of descriptors should be freed
444  *
445  * Return: none
446  */
447 static inline void dp_tx_ext_desc_free_multiple(struct dp_soc *soc,
448 		struct dp_tx_ext_desc_elem_s *elem, uint8_t desc_pool_id,
449 		uint8_t num_free)
450 {
451 	struct dp_tx_ext_desc_elem_s *head, *tail, *c_elem;
452 	uint8_t freed = num_free;
453 
454 	/* caller should always guarantee atleast list of num_free nodes */
455 	qdf_assert_always(head);
456 
457 	head = elem;
458 	c_elem = head;
459 	tail = head;
460 	while (c_elem && freed) {
461 		tail = c_elem;
462 		c_elem = c_elem->next;
463 		freed--;
464 	}
465 
466 	/* caller should always guarantee atleast list of num_free nodes */
467 	qdf_assert_always(tail);
468 
469 	qdf_spin_lock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
470 	tail->next = soc->tx_ext_desc[desc_pool_id].freelist;
471 	soc->tx_ext_desc[desc_pool_id].freelist = head;
472 	soc->tx_ext_desc[desc_pool_id].num_free += num_free;
473 	qdf_spin_unlock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
474 
475 	return;
476 }
477 
478 #if defined(FEATURE_TSO)
479 /**
480  * dp_tx_tso_desc_alloc() - function to allocate a TSO segment
481  * @soc: device soc instance
482  * @pool_id: pool id should pick up tso descriptor
483  *
484  * Allocates a TSO segment element from the free list held in
485  * the soc
486  *
487  * Return: tso_seg, tso segment memory pointer
488  */
489 static inline struct qdf_tso_seg_elem_t *dp_tx_tso_desc_alloc(
490 		struct dp_soc *soc, uint8_t pool_id)
491 {
492 	struct qdf_tso_seg_elem_t *tso_seg = NULL;
493 
494 	qdf_spin_lock_bh(&soc->tx_tso_desc[pool_id].lock);
495 	if (soc->tx_tso_desc[pool_id].freelist) {
496 		soc->tx_tso_desc[pool_id].num_free--;
497 		tso_seg = soc->tx_tso_desc[pool_id].freelist;
498 		soc->tx_tso_desc[pool_id].freelist =
499 			soc->tx_tso_desc[pool_id].freelist->next;
500 	}
501 	qdf_spin_unlock_bh(&soc->tx_tso_desc[pool_id].lock);
502 
503 	return tso_seg;
504 }
505 
506 /**
507  * dp_tx_tso_desc_free() - function to free a TSO segment
508  * @soc: device soc instance
509  * @pool_id: pool id should pick up tso descriptor
510  * @tso_seg: tso segment memory pointer
511  *
512  * Returns a TSO segment element to the free list held in the
513  * HTT pdev
514  *
515  * Return: none
516  */
517 static inline void dp_tx_tso_desc_free(struct dp_soc *soc,
518 		uint8_t pool_id, struct qdf_tso_seg_elem_t *tso_seg)
519 {
520 	qdf_spin_lock_bh(&soc->tx_tso_desc[pool_id].lock);
521 	tso_seg->next = soc->tx_tso_desc[pool_id].freelist;
522 	soc->tx_tso_desc[pool_id].freelist = tso_seg;
523 	soc->tx_tso_desc[pool_id].num_free++;
524 	qdf_spin_unlock_bh(&soc->tx_tso_desc[pool_id].lock);
525 }
526 
527 static inline
528 struct qdf_tso_num_seg_elem_t  *dp_tso_num_seg_alloc(struct dp_soc *soc,
529 		uint8_t pool_id)
530 {
531 	struct qdf_tso_num_seg_elem_t *tso_num_seg = NULL;
532 
533 	qdf_spin_lock_bh(&soc->tx_tso_num_seg[pool_id].lock);
534 	if (soc->tx_tso_num_seg[pool_id].freelist) {
535 		soc->tx_tso_num_seg[pool_id].num_free--;
536 		tso_num_seg = soc->tx_tso_num_seg[pool_id].freelist;
537 		soc->tx_tso_num_seg[pool_id].freelist =
538 			soc->tx_tso_num_seg[pool_id].freelist->next;
539 	}
540 	qdf_spin_unlock_bh(&soc->tx_tso_num_seg[pool_id].lock);
541 
542 	return tso_num_seg;
543 }
544 
545 static inline
546 void dp_tso_num_seg_free(struct dp_soc *soc,
547 		uint8_t pool_id, struct qdf_tso_num_seg_elem_t *tso_num_seg)
548 {
549 	qdf_spin_lock_bh(&soc->tx_tso_num_seg[pool_id].lock);
550 	tso_num_seg->next = soc->tx_tso_num_seg[pool_id].freelist;
551 	soc->tx_tso_num_seg[pool_id].freelist = tso_num_seg;
552 	soc->tx_tso_num_seg[pool_id].num_free++;
553 	qdf_spin_unlock_bh(&soc->tx_tso_num_seg[pool_id].lock);
554 }
555 #endif
556 
557 /*
558  * dp_tx_me_alloc_buf() Alloc descriptor from me pool
559  * @pdev DP_PDEV handle for datapath
560  *
561  * Return:dp_tx_me_buf_t(buf)
562  */
563 static inline struct dp_tx_me_buf_t*
564 dp_tx_me_alloc_buf(struct dp_pdev *pdev)
565 {
566 	struct dp_tx_me_buf_t *buf = NULL;
567 	qdf_spin_lock_bh(&pdev->tx_mutex);
568 	if (pdev->me_buf.freelist) {
569 		buf = pdev->me_buf.freelist;
570 		pdev->me_buf.freelist = pdev->me_buf.freelist->next;
571 		pdev->me_buf.buf_in_use++;
572 	} else {
573 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
574 				"Error allocating memory in pool");
575 		qdf_spin_unlock_bh(&pdev->tx_mutex);
576 		return NULL;
577 	}
578 	qdf_spin_unlock_bh(&pdev->tx_mutex);
579 	return buf;
580 }
581 
582 /*
583  * dp_tx_me_free_buf() - Free me descriptor and add it to pool
584  * @pdev: DP_PDEV handle for datapath
585  * @buf : Allocated ME BUF
586  *
587  * Return:void
588  */
589 static inline void
590 dp_tx_me_free_buf(struct dp_pdev *pdev, struct dp_tx_me_buf_t *buf)
591 {
592 	qdf_spin_lock_bh(&pdev->tx_mutex);
593 	buf->next = pdev->me_buf.freelist;
594 	pdev->me_buf.freelist = buf;
595 	pdev->me_buf.buf_in_use--;
596 	qdf_spin_unlock_bh(&pdev->tx_mutex);
597 }
598 #endif /* DP_TX_DESC_H */
599