xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_tx_desc.h (revision d78dedc9dd8c4ee677ac1649d1d42f2a7c3cc1b7)
1 /*
2  * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #ifndef DP_TX_DESC_H
20 #define DP_TX_DESC_H
21 
22 #include "dp_types.h"
23 #include "dp_tx.h"
24 #include "dp_internal.h"
25 
26 #ifdef TX_PER_PDEV_DESC_POOL
27 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
28 #define DP_TX_GET_DESC_POOL_ID(vdev) (vdev->vdev_id)
29 #else /* QCA_LL_TX_FLOW_CONTROL_V2 */
30 #define DP_TX_GET_DESC_POOL_ID(vdev) (vdev->pdev->pdev_id)
31 #endif /* QCA_LL_TX_FLOW_CONTROL_V2 */
32 	#define DP_TX_GET_RING_ID(vdev) (vdev->pdev->pdev_id)
33 #else
34 	#ifdef TX_PER_VDEV_DESC_POOL
35 		#define DP_TX_GET_DESC_POOL_ID(vdev) (vdev->vdev_id)
36 		#define DP_TX_GET_RING_ID(vdev) (vdev->pdev->pdev_id)
37 	#endif /* TX_PER_VDEV_DESC_POOL */
38 #endif /* TX_PER_PDEV_DESC_POOL */
39 
40 /**
41  * 21 bits cookie
42  * 2 bits pool id 0 ~ 3,
43  * 10 bits page id 0 ~ 1023
44  * 5 bits offset id 0 ~ 31 (Desc size = 128, Num descs per page = 4096/128 = 32)
45  */
46 /* ???Ring ID needed??? */
47 #define DP_TX_DESC_ID_POOL_MASK    0x018000
48 #define DP_TX_DESC_ID_POOL_OS      15
49 #define DP_TX_DESC_ID_PAGE_MASK    0x007FE0
50 #define DP_TX_DESC_ID_PAGE_OS      5
51 #define DP_TX_DESC_ID_OFFSET_MASK  0x00001F
52 #define DP_TX_DESC_ID_OFFSET_OS    0
53 
54 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
55 #define TX_DESC_LOCK_CREATE(lock)
56 #define TX_DESC_LOCK_DESTROY(lock)
57 #define TX_DESC_LOCK_LOCK(lock)
58 #define TX_DESC_LOCK_UNLOCK(lock)
59 #else /* !QCA_LL_TX_FLOW_CONTROL_V2 */
60 #define TX_DESC_LOCK_CREATE(lock)  qdf_spinlock_create(lock)
61 #define TX_DESC_LOCK_DESTROY(lock) qdf_spinlock_destroy(lock)
62 #define TX_DESC_LOCK_LOCK(lock)    qdf_spin_lock_bh(lock)
63 #define TX_DESC_LOCK_UNLOCK(lock)  qdf_spin_unlock_bh(lock)
64 #endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */
65 #define MAX_POOL_BUFF_COUNT 10000
66 
67 QDF_STATUS dp_tx_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
68 		uint16_t num_elem);
69 QDF_STATUS dp_tx_desc_pool_free(struct dp_soc *soc, uint8_t pool_id);
70 QDF_STATUS dp_tx_ext_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
71 		uint16_t num_elem);
72 QDF_STATUS dp_tx_ext_desc_pool_free(struct dp_soc *soc, uint8_t pool_id);
73 QDF_STATUS dp_tx_tso_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
74 		uint16_t num_elem);
75 void dp_tx_tso_desc_pool_free(struct dp_soc *soc, uint8_t pool_id);
76 QDF_STATUS dp_tx_tso_num_seg_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
77 		uint16_t num_elem);
78 void dp_tx_tso_num_seg_pool_free(struct dp_soc *soc, uint8_t pool_id);
79 
80 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
81 void dp_tx_flow_control_init(struct dp_soc *);
82 void dp_tx_flow_control_deinit(struct dp_soc *);
83 
84 QDF_STATUS dp_txrx_register_pause_cb(struct cdp_soc_t *soc,
85 	tx_pause_callback pause_cb);
86 QDF_STATUS dp_tx_flow_pool_map(struct cdp_soc_t *soc, struct cdp_pdev *pdev,
87 				uint8_t vdev_id);
88 void dp_tx_flow_pool_unmap(struct cdp_soc_t *soc, struct cdp_pdev *pdev,
89 			   uint8_t vdev_id);
90 void dp_tx_clear_flow_pool_stats(struct dp_soc *soc);
91 struct dp_tx_desc_pool_s *dp_tx_create_flow_pool(struct dp_soc *soc,
92 	uint8_t flow_pool_id, uint16_t flow_pool_size);
93 
94 QDF_STATUS dp_tx_flow_pool_map_handler(struct dp_pdev *pdev, uint8_t flow_id,
95 	uint8_t flow_type, uint8_t flow_pool_id, uint16_t flow_pool_size);
96 void dp_tx_flow_pool_unmap_handler(struct dp_pdev *pdev, uint8_t flow_id,
97 	uint8_t flow_type, uint8_t flow_pool_id);
98 
99 /**
100  * dp_tx_get_desc_flow_pool() - get descriptor from flow pool
101  * @pool: flow pool
102  *
103  * Caller needs to take lock and do sanity checks.
104  *
105  * Return: tx descriptor
106  */
107 static inline
108 struct dp_tx_desc_s *dp_tx_get_desc_flow_pool(struct dp_tx_desc_pool_s *pool)
109 {
110 	struct dp_tx_desc_s *tx_desc = pool->freelist;
111 
112 	pool->freelist = pool->freelist->next;
113 	pool->avail_desc--;
114 	return tx_desc;
115 }
116 
117 /**
118  * ol_tx_put_desc_flow_pool() - put descriptor to flow pool freelist
119  * @pool: flow pool
120  * @tx_desc: tx descriptor
121  *
122  * Caller needs to take lock and do sanity checks.
123  *
124  * Return: none
125  */
126 static inline
127 void dp_tx_put_desc_flow_pool(struct dp_tx_desc_pool_s *pool,
128 			struct dp_tx_desc_s *tx_desc)
129 {
130 	tx_desc->next = pool->freelist;
131 	pool->freelist = tx_desc;
132 	pool->avail_desc++;
133 }
134 
135 
136 /**
137  * dp_tx_desc_alloc() - Allocate a Software Tx Descriptor from given pool
138  *
139  * @soc Handle to DP SoC structure
140  * @pool_id
141  *
142  * Return:
143  */
144 static inline struct dp_tx_desc_s *
145 dp_tx_desc_alloc(struct dp_soc *soc, uint8_t desc_pool_id)
146 {
147 	struct dp_tx_desc_s *tx_desc = NULL;
148 	struct dp_tx_desc_pool_s *pool = &soc->tx_desc[desc_pool_id];
149 
150 	if (pool) {
151 		qdf_spin_lock_bh(&pool->flow_pool_lock);
152 		if (pool->avail_desc) {
153 			tx_desc = dp_tx_get_desc_flow_pool(pool);
154 			tx_desc->pool_id = desc_pool_id;
155 			tx_desc->flags = DP_TX_DESC_FLAG_ALLOCATED;
156 			if (qdf_unlikely(pool->avail_desc < pool->stop_th)) {
157 				pool->status = FLOW_POOL_ACTIVE_PAUSED;
158 				qdf_spin_unlock_bh(&pool->flow_pool_lock);
159 				/* pause network queues */
160 				soc->pause_cb(desc_pool_id,
161 					       WLAN_STOP_ALL_NETIF_QUEUE,
162 					       WLAN_DATA_FLOW_CONTROL);
163 			} else {
164 				qdf_spin_unlock_bh(&pool->flow_pool_lock);
165 			}
166 		} else {
167 			pool->pkt_drop_no_desc++;
168 			qdf_spin_unlock_bh(&pool->flow_pool_lock);
169 		}
170 	} else {
171 		soc->pool_stats.pkt_drop_no_pool++;
172 	}
173 
174 
175 	return tx_desc;
176 }
177 
178 /**
179  * dp_tx_desc_free() - Fee a tx descriptor and attach it to free list
180  *
181  * @soc Handle to DP SoC structure
182  * @pool_id
183  * @tx_desc
184  *
185  * Return: None
186  */
187 static inline void
188 dp_tx_desc_free(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
189 		uint8_t desc_pool_id)
190 {
191 	struct dp_tx_desc_pool_s *pool = &soc->tx_desc[desc_pool_id];
192 
193 	qdf_spin_lock_bh(&pool->flow_pool_lock);
194 	tx_desc->flags = 0;
195 	dp_tx_put_desc_flow_pool(pool, tx_desc);
196 	switch (pool->status) {
197 	case FLOW_POOL_ACTIVE_PAUSED:
198 		if (pool->avail_desc > pool->start_th) {
199 			soc->pause_cb(pool->flow_pool_id,
200 				       WLAN_WAKE_ALL_NETIF_QUEUE,
201 				       WLAN_DATA_FLOW_CONTROL);
202 			pool->status = FLOW_POOL_ACTIVE_UNPAUSED;
203 		}
204 		break;
205 	case FLOW_POOL_INVALID:
206 		if (pool->avail_desc == pool->pool_size) {
207 			dp_tx_desc_pool_free(soc, desc_pool_id);
208 			pool->status = FLOW_POOL_INACTIVE;
209 			qdf_spin_unlock_bh(&pool->flow_pool_lock);
210 			qdf_print("%s %d pool is freed!!\n",
211 				 __func__, __LINE__);
212 			return;
213 		}
214 		break;
215 
216 	case FLOW_POOL_ACTIVE_UNPAUSED:
217 		break;
218 	default:
219 		qdf_print("%s %d pool is INACTIVE State!!\n",
220 				 __func__, __LINE__);
221 		break;
222 	};
223 
224 	qdf_spin_unlock_bh(&pool->flow_pool_lock);
225 
226 }
227 #else /* QCA_LL_TX_FLOW_CONTROL_V2 */
228 
229 static inline void dp_tx_flow_control_init(struct dp_soc *handle)
230 {
231 }
232 
233 static inline void dp_tx_flow_control_deinit(struct dp_soc *handle)
234 {
235 }
236 
237 static inline QDF_STATUS dp_tx_flow_pool_map_handler(struct dp_pdev *pdev,
238 	uint8_t flow_id, uint8_t flow_type, uint8_t flow_pool_id,
239 	uint16_t flow_pool_size)
240 {
241 	return QDF_STATUS_SUCCESS;
242 }
243 
244 static inline void dp_tx_flow_pool_unmap_handler(struct dp_pdev *pdev,
245 	uint8_t flow_id, uint8_t flow_type, uint8_t flow_pool_id)
246 {
247 }
248 
249 /**
250  * dp_tx_desc_alloc() - Allocate a Software Tx Descriptor from given pool
251  *
252  * @param soc Handle to DP SoC structure
253  * @param pool_id
254  *
255  * Return:
256  */
257 static inline struct dp_tx_desc_s *dp_tx_desc_alloc(struct dp_soc *soc,
258 						uint8_t desc_pool_id)
259 {
260 	struct dp_tx_desc_s *tx_desc = NULL;
261 
262 	TX_DESC_LOCK_LOCK(&soc->tx_desc[desc_pool_id].lock);
263 
264 	tx_desc = soc->tx_desc[desc_pool_id].freelist;
265 
266 	/* Pool is exhausted */
267 	if (!tx_desc) {
268 		TX_DESC_LOCK_UNLOCK(&soc->tx_desc[desc_pool_id].lock);
269 		return NULL;
270 	}
271 
272 	soc->tx_desc[desc_pool_id].freelist =
273 		soc->tx_desc[desc_pool_id].freelist->next;
274 	soc->tx_desc[desc_pool_id].num_allocated++;
275 	soc->tx_desc[desc_pool_id].num_free--;
276 
277 	tx_desc->flags = DP_TX_DESC_FLAG_ALLOCATED;
278 
279 	TX_DESC_LOCK_UNLOCK(&soc->tx_desc[desc_pool_id].lock);
280 
281 	return tx_desc;
282 }
283 
284 /**
285  * dp_tx_desc_alloc_multiple() - Allocate batch of software Tx Descriptors
286  *                            from given pool
287  * @soc: Handle to DP SoC structure
288  * @pool_id: pool id should pick up
289  * @num_requested: number of required descriptor
290  *
291  * allocate multiple tx descriptor and make a link
292  *
293  * Return: h_desc first descriptor pointer
294  */
295 static inline struct dp_tx_desc_s *dp_tx_desc_alloc_multiple(
296 		struct dp_soc *soc, uint8_t desc_pool_id, uint8_t num_requested)
297 {
298 	struct dp_tx_desc_s *c_desc = NULL, *h_desc = NULL;
299 	uint8_t count;
300 
301 	TX_DESC_LOCK_LOCK(&soc->tx_desc[desc_pool_id].lock);
302 
303 	if ((num_requested == 0) ||
304 			(soc->tx_desc[desc_pool_id].num_free < num_requested)) {
305 		TX_DESC_LOCK_UNLOCK(&soc->tx_desc[desc_pool_id].lock);
306 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
307 			"%s, No Free Desc: Available(%d) num_requested(%d)",
308 			__func__, soc->tx_desc[desc_pool_id].num_free,
309 			num_requested);
310 		return NULL;
311 	}
312 
313 	h_desc = soc->tx_desc[desc_pool_id].freelist;
314 
315 	/* h_desc should never be NULL since num_free > requested */
316 	qdf_assert_always(h_desc);
317 
318 	c_desc = h_desc;
319 	for (count = 0; count < (num_requested - 1); count++) {
320 		c_desc->flags = DP_TX_DESC_FLAG_ALLOCATED;
321 		c_desc = c_desc->next;
322 	}
323 	soc->tx_desc[desc_pool_id].num_free -= count;
324 	soc->tx_desc[desc_pool_id].num_allocated += count;
325 	soc->tx_desc[desc_pool_id].freelist = c_desc->next;
326 	c_desc->next = NULL;
327 
328 	TX_DESC_LOCK_UNLOCK(&soc->tx_desc[desc_pool_id].lock);
329 	return h_desc;
330 }
331 
332 /**
333  * dp_tx_desc_free() - Fee a tx descriptor and attach it to free list
334  *
335  * @soc Handle to DP SoC structure
336  * @pool_id
337  * @tx_desc
338  */
339 static inline void
340 dp_tx_desc_free(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
341 		uint8_t desc_pool_id)
342 {
343 	TX_DESC_LOCK_LOCK(&soc->tx_desc[desc_pool_id].lock);
344 
345 	tx_desc->flags = 0;
346 	tx_desc->next = soc->tx_desc[desc_pool_id].freelist;
347 	soc->tx_desc[desc_pool_id].freelist = tx_desc;
348 	soc->tx_desc[desc_pool_id].num_allocated--;
349 	soc->tx_desc[desc_pool_id].num_free++;
350 
351 
352 	TX_DESC_LOCK_UNLOCK(&soc->tx_desc[desc_pool_id].lock);
353 }
354 #endif /* QCA_LL_TX_FLOW_CONTROL_V2 */
355 
356 /**
357  * dp_tx_desc_find() - find dp tx descriptor from cokie
358  * @soc - handle for the device sending the data
359  * @tx_desc_id - the ID of the descriptor in question
360  * @return the descriptor object that has the specified ID
361  *
362  *  Use a tx descriptor ID to find the corresponding descriptor object.
363  *
364  */
365 static inline struct dp_tx_desc_s *dp_tx_desc_find(struct dp_soc *soc,
366 		uint8_t pool_id, uint16_t page_id, uint16_t offset)
367 {
368 	struct dp_tx_desc_pool_s *tx_desc_pool = &((soc)->tx_desc[(pool_id)]);
369 
370 	return tx_desc_pool->desc_pages.cacheable_pages[page_id] +
371 		tx_desc_pool->elem_size * offset;
372 }
373 
374 /**
375  * dp_tx_ext_desc_alloc() - Get tx extension descriptor from pool
376  * @soc: handle for the device sending the data
377  * @pool_id: target pool id
378  *
379  * Return: None
380  */
381 static inline
382 struct dp_tx_ext_desc_elem_s *dp_tx_ext_desc_alloc(struct dp_soc *soc,
383 		uint8_t desc_pool_id)
384 {
385 	struct dp_tx_ext_desc_elem_s *c_elem;
386 
387 	qdf_spin_lock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
388 	if (soc->tx_ext_desc[desc_pool_id].num_free <= 0) {
389 		qdf_spin_unlock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
390 		return NULL;
391 	}
392 	c_elem = soc->tx_ext_desc[desc_pool_id].freelist;
393 	soc->tx_ext_desc[desc_pool_id].freelist =
394 		soc->tx_ext_desc[desc_pool_id].freelist->next;
395 	soc->tx_ext_desc[desc_pool_id].num_free--;
396 	qdf_spin_unlock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
397 	return c_elem;
398 }
399 
400 /**
401  * dp_tx_ext_desc_free() - Release tx extension descriptor to the pool
402  * @soc: handle for the device sending the data
403  * @pool_id: target pool id
404  * @elem: ext descriptor pointer should release
405  *
406  * Return: None
407  */
408 static inline void dp_tx_ext_desc_free(struct dp_soc *soc,
409 	struct dp_tx_ext_desc_elem_s *elem, uint8_t desc_pool_id)
410 {
411 	qdf_spin_lock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
412 	elem->next = soc->tx_ext_desc[desc_pool_id].freelist;
413 	soc->tx_ext_desc[desc_pool_id].freelist = elem;
414 	soc->tx_ext_desc[desc_pool_id].num_free++;
415 	qdf_spin_unlock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
416 	return;
417 }
418 
419 /**
420  * dp_tx_ext_desc_free_multiple() - Fee multiple tx extension descriptor and
421  *                           attach it to free list
422  * @soc: Handle to DP SoC structure
423  * @desc_pool_id: pool id should pick up
424  * @elem: tx descriptor should be freed
425  * @num_free: number of descriptors should be freed
426  *
427  * Return: none
428  */
429 static inline void dp_tx_ext_desc_free_multiple(struct dp_soc *soc,
430 		struct dp_tx_ext_desc_elem_s *elem, uint8_t desc_pool_id,
431 		uint8_t num_free)
432 {
433 	struct dp_tx_ext_desc_elem_s *head, *tail, *c_elem;
434 	uint8_t freed = num_free;
435 
436 	/* caller should always guarantee atleast list of num_free nodes */
437 	qdf_assert_always(head);
438 
439 	head = elem;
440 	c_elem = head;
441 	tail = head;
442 	while (c_elem && freed) {
443 		tail = c_elem;
444 		c_elem = c_elem->next;
445 		freed--;
446 	}
447 
448 	/* caller should always guarantee atleast list of num_free nodes */
449 	qdf_assert_always(tail);
450 
451 	qdf_spin_lock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
452 	tail->next = soc->tx_ext_desc[desc_pool_id].freelist;
453 	soc->tx_ext_desc[desc_pool_id].freelist = head;
454 	soc->tx_ext_desc[desc_pool_id].num_free += num_free;
455 	qdf_spin_unlock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
456 
457 	return;
458 }
459 
460 #if defined(FEATURE_TSO)
461 /**
462  * dp_tx_tso_desc_alloc() - function to allocate a TSO segment
463  * @soc: device soc instance
464  * @pool_id: pool id should pick up tso descriptor
465  *
466  * Allocates a TSO segment element from the free list held in
467  * the soc
468  *
469  * Return: tso_seg, tso segment memory pointer
470  */
471 static inline struct qdf_tso_seg_elem_t *dp_tx_tso_desc_alloc(
472 		struct dp_soc *soc, uint8_t pool_id)
473 {
474 	struct qdf_tso_seg_elem_t *tso_seg = NULL;
475 
476 	qdf_spin_lock_bh(&soc->tx_tso_desc[pool_id].lock);
477 	if (soc->tx_tso_desc[pool_id].freelist) {
478 		soc->tx_tso_desc[pool_id].num_free--;
479 		tso_seg = soc->tx_tso_desc[pool_id].freelist;
480 		soc->tx_tso_desc[pool_id].freelist =
481 			soc->tx_tso_desc[pool_id].freelist->next;
482 	}
483 	qdf_spin_unlock_bh(&soc->tx_tso_desc[pool_id].lock);
484 
485 	return tso_seg;
486 }
487 
488 /**
489  * dp_tx_tso_desc_free() - function to free a TSO segment
490  * @soc: device soc instance
491  * @pool_id: pool id should pick up tso descriptor
492  * @tso_seg: tso segment memory pointer
493  *
494  * Returns a TSO segment element to the free list held in the
495  * HTT pdev
496  *
497  * Return: none
498  */
499 static inline void dp_tx_tso_desc_free(struct dp_soc *soc,
500 		uint8_t pool_id, struct qdf_tso_seg_elem_t *tso_seg)
501 {
502 	qdf_spin_lock_bh(&soc->tx_tso_desc[pool_id].lock);
503 	tso_seg->next = soc->tx_tso_desc[pool_id].freelist;
504 	soc->tx_tso_desc[pool_id].freelist = tso_seg;
505 	soc->tx_tso_desc[pool_id].num_free++;
506 	qdf_spin_unlock_bh(&soc->tx_tso_desc[pool_id].lock);
507 }
508 
509 static inline
510 struct qdf_tso_num_seg_elem_t  *dp_tso_num_seg_alloc(struct dp_soc *soc,
511 		uint8_t pool_id)
512 {
513 	struct qdf_tso_num_seg_elem_t *tso_num_seg = NULL;
514 
515 	qdf_spin_lock_bh(&soc->tx_tso_num_seg[pool_id].lock);
516 	if (soc->tx_tso_num_seg[pool_id].freelist) {
517 		soc->tx_tso_num_seg[pool_id].num_free--;
518 		tso_num_seg = soc->tx_tso_num_seg[pool_id].freelist;
519 		soc->tx_tso_num_seg[pool_id].freelist =
520 			soc->tx_tso_num_seg[pool_id].freelist->next;
521 	}
522 	qdf_spin_unlock_bh(&soc->tx_tso_num_seg[pool_id].lock);
523 
524 	return tso_num_seg;
525 }
526 
527 static inline
528 void dp_tso_num_seg_free(struct dp_soc *soc,
529 		uint8_t pool_id, struct qdf_tso_num_seg_elem_t *tso_num_seg)
530 {
531 	qdf_spin_lock_bh(&soc->tx_tso_num_seg[pool_id].lock);
532 	tso_num_seg->next = soc->tx_tso_num_seg[pool_id].freelist;
533 	soc->tx_tso_num_seg[pool_id].freelist = tso_num_seg;
534 	soc->tx_tso_num_seg[pool_id].num_free++;
535 	qdf_spin_unlock_bh(&soc->tx_tso_num_seg[pool_id].lock);
536 }
537 #endif
538 
539 /*
540  * dp_tx_me_alloc_buf() Alloc descriptor from me pool
541  * @pdev DP_PDEV handle for datapath
542  *
543  * Return:dp_tx_me_buf_t(buf)
544  */
545 static inline struct dp_tx_me_buf_t*
546 dp_tx_me_alloc_buf(struct dp_pdev *pdev)
547 {
548 	struct dp_tx_me_buf_t *buf = NULL;
549 	qdf_spin_lock_bh(&pdev->tx_mutex);
550 	if (pdev->me_buf.freelist) {
551 		buf = pdev->me_buf.freelist;
552 		pdev->me_buf.freelist = pdev->me_buf.freelist->next;
553 		pdev->me_buf.buf_in_use++;
554 	} else {
555 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
556 				"Error allocating memory in pool");
557 		qdf_spin_unlock_bh(&pdev->tx_mutex);
558 		return NULL;
559 	}
560 	qdf_spin_unlock_bh(&pdev->tx_mutex);
561 	return buf;
562 }
563 
564 /*
565  * dp_tx_me_free_buf() - Free me descriptor and add it to pool
566  * @pdev: DP_PDEV handle for datapath
567  * @buf : Allocated ME BUF
568  *
569  * Return:void
570  */
571 static inline void
572 dp_tx_me_free_buf(struct dp_pdev *pdev, struct dp_tx_me_buf_t *buf)
573 {
574 	qdf_spin_lock_bh(&pdev->tx_mutex);
575 	buf->next = pdev->me_buf.freelist;
576 	pdev->me_buf.freelist = buf;
577 	pdev->me_buf.buf_in_use--;
578 	qdf_spin_unlock_bh(&pdev->tx_mutex);
579 }
580 #endif /* DP_TX_DESC_H */
581