xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_tx_desc.h (revision 4865edfd190c086bbe2c69aae12a8226f877b91e)
1 /*
2  * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #ifndef DP_TX_DESC_H
20 #define DP_TX_DESC_H
21 
22 #include "dp_types.h"
23 #include "dp_tx.h"
24 #include "dp_internal.h"
25 
26 #ifdef TX_PER_PDEV_DESC_POOL
27 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
28 #define DP_TX_GET_DESC_POOL_ID(vdev) (vdev->vdev_id)
29 #else /* QCA_LL_TX_FLOW_CONTROL_V2 */
30 #define DP_TX_GET_DESC_POOL_ID(vdev) (vdev->pdev->pdev_id)
31 #endif /* QCA_LL_TX_FLOW_CONTROL_V2 */
32 	#define DP_TX_GET_RING_ID(vdev) (vdev->pdev->pdev_id)
33 #else
34 	#ifdef TX_PER_VDEV_DESC_POOL
35 		#define DP_TX_GET_DESC_POOL_ID(vdev) (vdev->vdev_id)
36 		#define DP_TX_GET_RING_ID(vdev) (vdev->pdev->pdev_id)
37 	#endif /* TX_PER_VDEV_DESC_POOL */
38 #endif /* TX_PER_PDEV_DESC_POOL */
39 
40 /**
41  * 21 bits cookie
42  * 2 bits pool id 0 ~ 3,
43  * 10 bits page id 0 ~ 1023
44  * 5 bits offset id 0 ~ 31 (Desc size = 128, Num descs per page = 4096/128 = 32)
45  */
46 /* ???Ring ID needed??? */
47 #define DP_TX_DESC_ID_POOL_MASK    0x018000
48 #define DP_TX_DESC_ID_POOL_OS      15
49 #define DP_TX_DESC_ID_PAGE_MASK    0x007FE0
50 #define DP_TX_DESC_ID_PAGE_OS      5
51 #define DP_TX_DESC_ID_OFFSET_MASK  0x00001F
52 #define DP_TX_DESC_ID_OFFSET_OS    0
53 
54 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
55 #define TX_DESC_LOCK_CREATE(lock)
56 #define TX_DESC_LOCK_DESTROY(lock)
57 #define TX_DESC_LOCK_LOCK(lock)
58 #define TX_DESC_LOCK_UNLOCK(lock)
59 #else /* !QCA_LL_TX_FLOW_CONTROL_V2 */
60 #define TX_DESC_LOCK_CREATE(lock)  qdf_spinlock_create(lock)
61 #define TX_DESC_LOCK_DESTROY(lock) qdf_spinlock_destroy(lock)
62 #define TX_DESC_LOCK_LOCK(lock)    qdf_spin_lock_bh(lock)
63 #define TX_DESC_LOCK_UNLOCK(lock)  qdf_spin_unlock_bh(lock)
64 #endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */
65 #define MAX_POOL_BUFF_COUNT 10000
66 
67 QDF_STATUS dp_tx_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
68 		uint16_t num_elem);
69 QDF_STATUS dp_tx_desc_pool_free(struct dp_soc *soc, uint8_t pool_id);
70 QDF_STATUS dp_tx_ext_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
71 		uint16_t num_elem);
72 QDF_STATUS dp_tx_ext_desc_pool_free(struct dp_soc *soc, uint8_t pool_id);
73 QDF_STATUS dp_tx_tso_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
74 		uint16_t num_elem);
75 void dp_tx_tso_desc_pool_free(struct dp_soc *soc, uint8_t pool_id);
76 QDF_STATUS dp_tx_tso_num_seg_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
77 		uint16_t num_elem);
78 void dp_tx_tso_num_seg_pool_free(struct dp_soc *soc, uint8_t pool_id);
79 
80 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
81 void dp_tx_flow_control_init(struct dp_soc *);
82 void dp_tx_flow_control_deinit(struct dp_soc *);
83 
84 QDF_STATUS dp_txrx_register_pause_cb(struct cdp_soc_t *soc,
85 	tx_pause_callback pause_cb);
86 QDF_STATUS dp_tx_flow_pool_map(struct cdp_soc_t *soc, struct cdp_pdev *pdev,
87 				uint8_t vdev_id);
88 void dp_tx_flow_pool_unmap(struct cdp_soc_t *soc, struct cdp_pdev *pdev,
89 			   uint8_t vdev_id);
90 void dp_tx_clear_flow_pool_stats(struct dp_soc *soc);
91 struct dp_tx_desc_pool_s *dp_tx_create_flow_pool(struct dp_soc *soc,
92 	uint8_t flow_pool_id, uint16_t flow_pool_size);
93 
94 QDF_STATUS dp_tx_flow_pool_map_handler(struct dp_pdev *pdev, uint8_t flow_id,
95 	uint8_t flow_type, uint8_t flow_pool_id, uint16_t flow_pool_size);
96 void dp_tx_flow_pool_unmap_handler(struct dp_pdev *pdev, uint8_t flow_id,
97 	uint8_t flow_type, uint8_t flow_pool_id);
98 
99 /**
100  * dp_tx_get_desc_flow_pool() - get descriptor from flow pool
101  * @pool: flow pool
102  *
103  * Caller needs to take lock and do sanity checks.
104  *
105  * Return: tx descriptor
106  */
107 static inline
108 struct dp_tx_desc_s *dp_tx_get_desc_flow_pool(struct dp_tx_desc_pool_s *pool)
109 {
110 	struct dp_tx_desc_s *tx_desc = pool->freelist;
111 
112 	pool->freelist = pool->freelist->next;
113 	pool->avail_desc--;
114 	return tx_desc;
115 }
116 
117 /**
118  * ol_tx_put_desc_flow_pool() - put descriptor to flow pool freelist
119  * @pool: flow pool
120  * @tx_desc: tx descriptor
121  *
122  * Caller needs to take lock and do sanity checks.
123  *
124  * Return: none
125  */
126 static inline
127 void dp_tx_put_desc_flow_pool(struct dp_tx_desc_pool_s *pool,
128 			struct dp_tx_desc_s *tx_desc)
129 {
130 	tx_desc->next = pool->freelist;
131 	pool->freelist = tx_desc;
132 	pool->avail_desc++;
133 }
134 
135 
136 /**
137  * dp_tx_desc_alloc() - Allocate a Software Tx Descriptor from given pool
138  *
139  * @soc Handle to DP SoC structure
140  * @pool_id
141  *
142  * Return:
143  */
144 static inline struct dp_tx_desc_s *
145 dp_tx_desc_alloc(struct dp_soc *soc, uint8_t desc_pool_id)
146 {
147 	struct dp_tx_desc_s *tx_desc = NULL;
148 	struct dp_tx_desc_pool_s *pool = &soc->tx_desc[desc_pool_id];
149 
150 	if (pool) {
151 		qdf_spin_lock_bh(&pool->flow_pool_lock);
152 		if (pool->avail_desc) {
153 			tx_desc = dp_tx_get_desc_flow_pool(pool);
154 			tx_desc->pool_id = desc_pool_id;
155 			tx_desc->flags = DP_TX_DESC_FLAG_ALLOCATED;
156 			if (qdf_unlikely(pool->avail_desc < pool->stop_th)) {
157 				pool->status = FLOW_POOL_ACTIVE_PAUSED;
158 				qdf_spin_unlock_bh(&pool->flow_pool_lock);
159 				/* pause network queues */
160 				soc->pause_cb(desc_pool_id,
161 					       WLAN_STOP_ALL_NETIF_QUEUE,
162 					       WLAN_DATA_FLOW_CONTROL);
163 			} else {
164 				qdf_spin_unlock_bh(&pool->flow_pool_lock);
165 			}
166 		} else {
167 			pool->pkt_drop_no_desc++;
168 			qdf_spin_unlock_bh(&pool->flow_pool_lock);
169 		}
170 	} else {
171 		soc->pool_stats.pkt_drop_no_pool++;
172 	}
173 
174 
175 	return tx_desc;
176 }
177 
178 /**
179  * dp_tx_desc_free() - Fee a tx descriptor and attach it to free list
180  *
181  * @soc Handle to DP SoC structure
182  * @pool_id
183  * @tx_desc
184  *
185  * Return: None
186  */
187 static inline void
188 dp_tx_desc_free(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
189 		uint8_t desc_pool_id)
190 {
191 	struct dp_tx_desc_pool_s *pool = &soc->tx_desc[desc_pool_id];
192 
193 	qdf_spin_lock_bh(&pool->flow_pool_lock);
194 	tx_desc->flags = 0;
195 	dp_tx_put_desc_flow_pool(pool, tx_desc);
196 	switch (pool->status) {
197 	case FLOW_POOL_ACTIVE_PAUSED:
198 		if (pool->avail_desc > pool->start_th) {
199 			soc->pause_cb(pool->flow_pool_id,
200 				       WLAN_WAKE_ALL_NETIF_QUEUE,
201 				       WLAN_DATA_FLOW_CONTROL);
202 			pool->status = FLOW_POOL_ACTIVE_UNPAUSED;
203 		}
204 		break;
205 	case FLOW_POOL_INVALID:
206 		if (pool->avail_desc == pool->pool_size) {
207 			dp_tx_desc_pool_free(soc, desc_pool_id);
208 			pool->status = FLOW_POOL_INACTIVE;
209 			qdf_spin_unlock_bh(&pool->flow_pool_lock);
210 			qdf_print("%s %d pool is freed!!\n",
211 				 __func__, __LINE__);
212 			return;
213 		}
214 		break;
215 
216 	case FLOW_POOL_ACTIVE_UNPAUSED:
217 		break;
218 	default:
219 		qdf_print("%s %d pool is INACTIVE State!!\n",
220 				 __func__, __LINE__);
221 		break;
222 	};
223 
224 	qdf_spin_unlock_bh(&pool->flow_pool_lock);
225 
226 }
227 #else /* QCA_LL_TX_FLOW_CONTROL_V2 */
228 
229 static inline void dp_tx_flow_control_init(struct dp_soc *handle)
230 {
231 }
232 
233 static inline void dp_tx_flow_control_deinit(struct dp_soc *handle)
234 {
235 }
236 
237 static inline QDF_STATUS dp_tx_flow_pool_map_handler(struct dp_pdev *pdev,
238 	uint8_t flow_id, uint8_t flow_type, uint8_t flow_pool_id,
239 	uint16_t flow_pool_size)
240 {
241 	return QDF_STATUS_SUCCESS;
242 }
243 
244 static inline void dp_tx_flow_pool_unmap_handler(struct dp_pdev *pdev,
245 	uint8_t flow_id, uint8_t flow_type, uint8_t flow_pool_id)
246 {
247 }
248 
249 /**
250  * dp_tx_desc_alloc() - Allocate a Software Tx Descriptor from given pool
251  *
252  * @param soc Handle to DP SoC structure
253  * @param pool_id
254  *
255  * Return:
256  */
257 static inline struct dp_tx_desc_s *dp_tx_desc_alloc(struct dp_soc *soc,
258 						uint8_t desc_pool_id)
259 {
260 	struct dp_tx_desc_s *tx_desc = NULL;
261 
262 	TX_DESC_LOCK_LOCK(&soc->tx_desc[desc_pool_id].lock);
263 
264 	tx_desc = soc->tx_desc[desc_pool_id].freelist;
265 
266 	/* Pool is exhausted */
267 	if (!tx_desc) {
268 		TX_DESC_LOCK_UNLOCK(&soc->tx_desc[desc_pool_id].lock);
269 		return NULL;
270 	}
271 
272 	soc->tx_desc[desc_pool_id].freelist =
273 		soc->tx_desc[desc_pool_id].freelist->next;
274 	soc->tx_desc[desc_pool_id].num_allocated++;
275 	soc->tx_desc[desc_pool_id].num_free--;
276 
277 	DP_STATS_INC(soc, tx.desc_in_use, 1);
278 	tx_desc->flags = DP_TX_DESC_FLAG_ALLOCATED;
279 
280 	TX_DESC_LOCK_UNLOCK(&soc->tx_desc[desc_pool_id].lock);
281 
282 	return tx_desc;
283 }
284 
285 /**
286  * dp_tx_desc_alloc_multiple() - Allocate batch of software Tx Descriptors
287  *                            from given pool
288  * @soc: Handle to DP SoC structure
289  * @pool_id: pool id should pick up
290  * @num_requested: number of required descriptor
291  *
292  * allocate multiple tx descriptor and make a link
293  *
294  * Return: h_desc first descriptor pointer
295  */
296 static inline struct dp_tx_desc_s *dp_tx_desc_alloc_multiple(
297 		struct dp_soc *soc, uint8_t desc_pool_id, uint8_t num_requested)
298 {
299 	struct dp_tx_desc_s *c_desc = NULL, *h_desc = NULL;
300 	uint8_t count;
301 
302 	TX_DESC_LOCK_LOCK(&soc->tx_desc[desc_pool_id].lock);
303 
304 	if ((num_requested == 0) ||
305 			(soc->tx_desc[desc_pool_id].num_free < num_requested)) {
306 		TX_DESC_LOCK_UNLOCK(&soc->tx_desc[desc_pool_id].lock);
307 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
308 			"%s, No Free Desc: Available(%d) num_requested(%d)",
309 			__func__, soc->tx_desc[desc_pool_id].num_free,
310 			num_requested);
311 		return NULL;
312 	}
313 
314 	h_desc = soc->tx_desc[desc_pool_id].freelist;
315 
316 	/* h_desc should never be NULL since num_free > requested */
317 	qdf_assert_always(h_desc);
318 
319 	c_desc = h_desc;
320 	for (count = 0; count < (num_requested - 1); count++) {
321 		c_desc->flags = DP_TX_DESC_FLAG_ALLOCATED;
322 		c_desc = c_desc->next;
323 	}
324 	soc->tx_desc[desc_pool_id].num_free -= count;
325 	soc->tx_desc[desc_pool_id].num_allocated += count;
326 	soc->tx_desc[desc_pool_id].freelist = c_desc->next;
327 	c_desc->next = NULL;
328 
329 	TX_DESC_LOCK_UNLOCK(&soc->tx_desc[desc_pool_id].lock);
330 	return h_desc;
331 }
332 
333 /**
334  * dp_tx_desc_free() - Fee a tx descriptor and attach it to free list
335  *
336  * @soc Handle to DP SoC structure
337  * @pool_id
338  * @tx_desc
339  */
340 static inline void
341 dp_tx_desc_free(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
342 		uint8_t desc_pool_id)
343 {
344 	TX_DESC_LOCK_LOCK(&soc->tx_desc[desc_pool_id].lock);
345 
346 	tx_desc->flags = 0;
347 	tx_desc->next = soc->tx_desc[desc_pool_id].freelist;
348 	soc->tx_desc[desc_pool_id].freelist = tx_desc;
349 	DP_STATS_DEC(soc, tx.desc_in_use, 1);
350 	soc->tx_desc[desc_pool_id].num_allocated--;
351 	soc->tx_desc[desc_pool_id].num_free++;
352 
353 
354 	TX_DESC_LOCK_UNLOCK(&soc->tx_desc[desc_pool_id].lock);
355 }
356 #endif /* QCA_LL_TX_FLOW_CONTROL_V2 */
357 
358 /**
359  * dp_tx_desc_find() - find dp tx descriptor from cokie
360  * @soc - handle for the device sending the data
361  * @tx_desc_id - the ID of the descriptor in question
362  * @return the descriptor object that has the specified ID
363  *
364  *  Use a tx descriptor ID to find the corresponding descriptor object.
365  *
366  */
367 static inline struct dp_tx_desc_s *dp_tx_desc_find(struct dp_soc *soc,
368 		uint8_t pool_id, uint16_t page_id, uint16_t offset)
369 {
370 	struct dp_tx_desc_pool_s *tx_desc_pool = &((soc)->tx_desc[(pool_id)]);
371 
372 	return tx_desc_pool->desc_pages.cacheable_pages[page_id] +
373 		tx_desc_pool->elem_size * offset;
374 }
375 
376 /**
377  * dp_tx_ext_desc_alloc() - Get tx extension descriptor from pool
378  * @soc: handle for the device sending the data
379  * @pool_id: target pool id
380  *
381  * Return: None
382  */
383 static inline
384 struct dp_tx_ext_desc_elem_s *dp_tx_ext_desc_alloc(struct dp_soc *soc,
385 		uint8_t desc_pool_id)
386 {
387 	struct dp_tx_ext_desc_elem_s *c_elem;
388 
389 	qdf_spin_lock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
390 	if (soc->tx_ext_desc[desc_pool_id].num_free <= 0) {
391 		qdf_spin_unlock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
392 		return NULL;
393 	}
394 	c_elem = soc->tx_ext_desc[desc_pool_id].freelist;
395 	soc->tx_ext_desc[desc_pool_id].freelist =
396 		soc->tx_ext_desc[desc_pool_id].freelist->next;
397 	soc->tx_ext_desc[desc_pool_id].num_free--;
398 	qdf_spin_unlock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
399 	return c_elem;
400 }
401 
402 /**
403  * dp_tx_ext_desc_free() - Release tx extension descriptor to the pool
404  * @soc: handle for the device sending the data
405  * @pool_id: target pool id
406  * @elem: ext descriptor pointer should release
407  *
408  * Return: None
409  */
410 static inline void dp_tx_ext_desc_free(struct dp_soc *soc,
411 	struct dp_tx_ext_desc_elem_s *elem, uint8_t desc_pool_id)
412 {
413 	qdf_spin_lock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
414 	elem->next = soc->tx_ext_desc[desc_pool_id].freelist;
415 	soc->tx_ext_desc[desc_pool_id].freelist = elem;
416 	soc->tx_ext_desc[desc_pool_id].num_free++;
417 	qdf_spin_unlock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
418 	return;
419 }
420 
421 /**
422  * dp_tx_ext_desc_free_multiple() - Fee multiple tx extension descriptor and
423  *                           attach it to free list
424  * @soc: Handle to DP SoC structure
425  * @desc_pool_id: pool id should pick up
426  * @elem: tx descriptor should be freed
427  * @num_free: number of descriptors should be freed
428  *
429  * Return: none
430  */
431 static inline void dp_tx_ext_desc_free_multiple(struct dp_soc *soc,
432 		struct dp_tx_ext_desc_elem_s *elem, uint8_t desc_pool_id,
433 		uint8_t num_free)
434 {
435 	struct dp_tx_ext_desc_elem_s *head, *tail, *c_elem;
436 	uint8_t freed = num_free;
437 
438 	/* caller should always guarantee atleast list of num_free nodes */
439 	qdf_assert_always(head);
440 
441 	head = elem;
442 	c_elem = head;
443 	tail = head;
444 	while (c_elem && freed) {
445 		tail = c_elem;
446 		c_elem = c_elem->next;
447 		freed--;
448 	}
449 
450 	/* caller should always guarantee atleast list of num_free nodes */
451 	qdf_assert_always(tail);
452 
453 	qdf_spin_lock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
454 	tail->next = soc->tx_ext_desc[desc_pool_id].freelist;
455 	soc->tx_ext_desc[desc_pool_id].freelist = head;
456 	soc->tx_ext_desc[desc_pool_id].num_free += num_free;
457 	qdf_spin_unlock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
458 
459 	return;
460 }
461 
462 #if defined(FEATURE_TSO)
463 /**
464  * dp_tx_tso_desc_alloc() - function to allocate a TSO segment
465  * @soc: device soc instance
466  * @pool_id: pool id should pick up tso descriptor
467  *
468  * Allocates a TSO segment element from the free list held in
469  * the soc
470  *
471  * Return: tso_seg, tso segment memory pointer
472  */
473 static inline struct qdf_tso_seg_elem_t *dp_tx_tso_desc_alloc(
474 		struct dp_soc *soc, uint8_t pool_id)
475 {
476 	struct qdf_tso_seg_elem_t *tso_seg = NULL;
477 
478 	qdf_spin_lock_bh(&soc->tx_tso_desc[pool_id].lock);
479 	if (soc->tx_tso_desc[pool_id].freelist) {
480 		soc->tx_tso_desc[pool_id].num_free--;
481 		tso_seg = soc->tx_tso_desc[pool_id].freelist;
482 		soc->tx_tso_desc[pool_id].freelist =
483 			soc->tx_tso_desc[pool_id].freelist->next;
484 	}
485 	qdf_spin_unlock_bh(&soc->tx_tso_desc[pool_id].lock);
486 
487 	return tso_seg;
488 }
489 
490 /**
491  * dp_tx_tso_desc_free() - function to free a TSO segment
492  * @soc: device soc instance
493  * @pool_id: pool id should pick up tso descriptor
494  * @tso_seg: tso segment memory pointer
495  *
496  * Returns a TSO segment element to the free list held in the
497  * HTT pdev
498  *
499  * Return: none
500  */
501 static inline void dp_tx_tso_desc_free(struct dp_soc *soc,
502 		uint8_t pool_id, struct qdf_tso_seg_elem_t *tso_seg)
503 {
504 	qdf_spin_lock_bh(&soc->tx_tso_desc[pool_id].lock);
505 	tso_seg->next = soc->tx_tso_desc[pool_id].freelist;
506 	soc->tx_tso_desc[pool_id].freelist = tso_seg;
507 	soc->tx_tso_desc[pool_id].num_free++;
508 	qdf_spin_unlock_bh(&soc->tx_tso_desc[pool_id].lock);
509 }
510 
511 static inline
512 struct qdf_tso_num_seg_elem_t  *dp_tso_num_seg_alloc(struct dp_soc *soc,
513 		uint8_t pool_id)
514 {
515 	struct qdf_tso_num_seg_elem_t *tso_num_seg = NULL;
516 
517 	qdf_spin_lock_bh(&soc->tx_tso_num_seg[pool_id].lock);
518 	if (soc->tx_tso_num_seg[pool_id].freelist) {
519 		soc->tx_tso_num_seg[pool_id].num_free--;
520 		tso_num_seg = soc->tx_tso_num_seg[pool_id].freelist;
521 		soc->tx_tso_num_seg[pool_id].freelist =
522 			soc->tx_tso_num_seg[pool_id].freelist->next;
523 	}
524 	qdf_spin_unlock_bh(&soc->tx_tso_num_seg[pool_id].lock);
525 
526 	return tso_num_seg;
527 }
528 
529 static inline
530 void dp_tso_num_seg_free(struct dp_soc *soc,
531 		uint8_t pool_id, struct qdf_tso_num_seg_elem_t *tso_num_seg)
532 {
533 	qdf_spin_lock_bh(&soc->tx_tso_num_seg[pool_id].lock);
534 	tso_num_seg->next = soc->tx_tso_num_seg[pool_id].freelist;
535 	soc->tx_tso_num_seg[pool_id].freelist = tso_num_seg;
536 	soc->tx_tso_num_seg[pool_id].num_free++;
537 	qdf_spin_unlock_bh(&soc->tx_tso_num_seg[pool_id].lock);
538 }
539 #endif
540 
541 /*
542  * dp_tx_me_alloc_buf() Alloc descriptor from me pool
543  * @pdev DP_PDEV handle for datapath
544  *
545  * Return:dp_tx_me_buf_t(buf)
546  */
547 static inline struct dp_tx_me_buf_t*
548 dp_tx_me_alloc_buf(struct dp_pdev *pdev)
549 {
550 	struct dp_tx_me_buf_t *buf = NULL;
551 	qdf_spin_lock_bh(&pdev->tx_mutex);
552 	if (pdev->me_buf.freelist) {
553 		buf = pdev->me_buf.freelist;
554 		pdev->me_buf.freelist = pdev->me_buf.freelist->next;
555 		pdev->me_buf.buf_in_use++;
556 	} else {
557 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
558 				"Error allocating memory in pool");
559 		qdf_spin_unlock_bh(&pdev->tx_mutex);
560 		return NULL;
561 	}
562 	qdf_spin_unlock_bh(&pdev->tx_mutex);
563 	return buf;
564 }
565 
566 /*
567  * dp_tx_me_free_buf() - Free me descriptor and add it to pool
568  * @pdev: DP_PDEV handle for datapath
569  * @buf : Allocated ME BUF
570  *
571  * Return:void
572  */
573 static inline void
574 dp_tx_me_free_buf(struct dp_pdev *pdev, struct dp_tx_me_buf_t *buf)
575 {
576 	qdf_spin_lock_bh(&pdev->tx_mutex);
577 	buf->next = pdev->me_buf.freelist;
578 	pdev->me_buf.freelist = buf;
579 	pdev->me_buf.buf_in_use--;
580 	qdf_spin_unlock_bh(&pdev->tx_mutex);
581 }
582 #endif /* DP_TX_DESC_H */
583