xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_tx_desc.h (revision 6ecd284e5a94a1c96e26d571dd47419ac305990d)
1 /*
2  * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #ifndef DP_TX_DESC_H
20 #define DP_TX_DESC_H
21 
22 #include "dp_types.h"
23 #include "dp_tx.h"
24 #include "dp_internal.h"
25 
26 /**
27  * 21 bits cookie
28  * 2 bits pool id 0 ~ 3,
29  * 10 bits page id 0 ~ 1023
30  * 5 bits offset id 0 ~ 31 (Desc size = 128, Num descs per page = 4096/128 = 32)
31  */
32 /* ???Ring ID needed??? */
33 #define DP_TX_DESC_ID_POOL_MASK    0x018000
34 #define DP_TX_DESC_ID_POOL_OS      15
35 #define DP_TX_DESC_ID_PAGE_MASK    0x007FE0
36 #define DP_TX_DESC_ID_PAGE_OS      5
37 #define DP_TX_DESC_ID_OFFSET_MASK  0x00001F
38 #define DP_TX_DESC_ID_OFFSET_OS    0
39 
40 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
41 #define TX_DESC_LOCK_CREATE(lock)
42 #define TX_DESC_LOCK_DESTROY(lock)
43 #define TX_DESC_LOCK_LOCK(lock)
44 #define TX_DESC_LOCK_UNLOCK(lock)
45 #else /* !QCA_LL_TX_FLOW_CONTROL_V2 */
46 #define TX_DESC_LOCK_CREATE(lock)  qdf_spinlock_create(lock)
47 #define TX_DESC_LOCK_DESTROY(lock) qdf_spinlock_destroy(lock)
48 #define TX_DESC_LOCK_LOCK(lock)    qdf_spin_lock_bh(lock)
49 #define TX_DESC_LOCK_UNLOCK(lock)  qdf_spin_unlock_bh(lock)
50 #endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */
51 #define MAX_POOL_BUFF_COUNT 10000
52 
53 QDF_STATUS dp_tx_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
54 		uint16_t num_elem);
55 QDF_STATUS dp_tx_desc_pool_free(struct dp_soc *soc, uint8_t pool_id);
56 QDF_STATUS dp_tx_ext_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
57 		uint16_t num_elem);
58 QDF_STATUS dp_tx_ext_desc_pool_free(struct dp_soc *soc, uint8_t pool_id);
59 QDF_STATUS dp_tx_tso_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
60 		uint16_t num_elem);
61 void dp_tx_tso_desc_pool_free(struct dp_soc *soc, uint8_t pool_id);
62 QDF_STATUS dp_tx_tso_num_seg_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
63 		uint16_t num_elem);
64 void dp_tx_tso_num_seg_pool_free(struct dp_soc *soc, uint8_t pool_id);
65 
66 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
67 void dp_tx_flow_control_init(struct dp_soc *);
68 void dp_tx_flow_control_deinit(struct dp_soc *);
69 
70 QDF_STATUS dp_txrx_register_pause_cb(struct cdp_soc_t *soc,
71 	tx_pause_callback pause_cb);
72 QDF_STATUS dp_tx_flow_pool_map(struct cdp_soc_t *soc, struct cdp_pdev *pdev,
73 				uint8_t vdev_id);
74 void dp_tx_flow_pool_unmap(struct cdp_soc_t *soc, struct cdp_pdev *pdev,
75 			   uint8_t vdev_id);
76 void dp_tx_clear_flow_pool_stats(struct dp_soc *soc);
77 struct dp_tx_desc_pool_s *dp_tx_create_flow_pool(struct dp_soc *soc,
78 	uint8_t flow_pool_id, uint16_t flow_pool_size);
79 
80 QDF_STATUS dp_tx_flow_pool_map_handler(struct dp_pdev *pdev, uint8_t flow_id,
81 	uint8_t flow_type, uint8_t flow_pool_id, uint16_t flow_pool_size);
82 void dp_tx_flow_pool_unmap_handler(struct dp_pdev *pdev, uint8_t flow_id,
83 	uint8_t flow_type, uint8_t flow_pool_id);
84 
85 /**
86  * dp_tx_get_desc_flow_pool() - get descriptor from flow pool
87  * @pool: flow pool
88  *
89  * Caller needs to take lock and do sanity checks.
90  *
91  * Return: tx descriptor
92  */
93 static inline
94 struct dp_tx_desc_s *dp_tx_get_desc_flow_pool(struct dp_tx_desc_pool_s *pool)
95 {
96 	struct dp_tx_desc_s *tx_desc = pool->freelist;
97 
98 	pool->freelist = pool->freelist->next;
99 	pool->avail_desc--;
100 	return tx_desc;
101 }
102 
103 /**
104  * ol_tx_put_desc_flow_pool() - put descriptor to flow pool freelist
105  * @pool: flow pool
106  * @tx_desc: tx descriptor
107  *
108  * Caller needs to take lock and do sanity checks.
109  *
110  * Return: none
111  */
112 static inline
113 void dp_tx_put_desc_flow_pool(struct dp_tx_desc_pool_s *pool,
114 			struct dp_tx_desc_s *tx_desc)
115 {
116 	tx_desc->next = pool->freelist;
117 	pool->freelist = tx_desc;
118 	pool->avail_desc++;
119 }
120 
121 
122 /**
123  * dp_tx_desc_alloc() - Allocate a Software Tx Descriptor from given pool
124  *
125  * @soc Handle to DP SoC structure
126  * @pool_id
127  *
128  * Return:
129  */
130 static inline struct dp_tx_desc_s *
131 dp_tx_desc_alloc(struct dp_soc *soc, uint8_t desc_pool_id)
132 {
133 	struct dp_tx_desc_s *tx_desc = NULL;
134 	struct dp_tx_desc_pool_s *pool = &soc->tx_desc[desc_pool_id];
135 
136 	if (pool) {
137 		qdf_spin_lock_bh(&pool->flow_pool_lock);
138 		if (pool->avail_desc) {
139 			tx_desc = dp_tx_get_desc_flow_pool(pool);
140 			tx_desc->pool_id = desc_pool_id;
141 			tx_desc->flags = DP_TX_DESC_FLAG_ALLOCATED;
142 			if (qdf_unlikely(pool->avail_desc < pool->stop_th)) {
143 				pool->status = FLOW_POOL_ACTIVE_PAUSED;
144 				qdf_spin_unlock_bh(&pool->flow_pool_lock);
145 				/* pause network queues */
146 				soc->pause_cb(desc_pool_id,
147 					       WLAN_STOP_ALL_NETIF_QUEUE,
148 					       WLAN_DATA_FLOW_CONTROL);
149 			} else {
150 				qdf_spin_unlock_bh(&pool->flow_pool_lock);
151 			}
152 		} else {
153 			pool->pkt_drop_no_desc++;
154 			qdf_spin_unlock_bh(&pool->flow_pool_lock);
155 		}
156 	} else {
157 		soc->pool_stats.pkt_drop_no_pool++;
158 	}
159 
160 
161 	return tx_desc;
162 }
163 
164 /**
165  * dp_tx_desc_free() - Fee a tx descriptor and attach it to free list
166  *
167  * @soc Handle to DP SoC structure
168  * @pool_id
169  * @tx_desc
170  *
171  * Return: None
172  */
173 static inline void
174 dp_tx_desc_free(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
175 		uint8_t desc_pool_id)
176 {
177 	struct dp_tx_desc_pool_s *pool = &soc->tx_desc[desc_pool_id];
178 
179 	qdf_spin_lock_bh(&pool->flow_pool_lock);
180 	dp_tx_put_desc_flow_pool(pool, tx_desc);
181 	switch (pool->status) {
182 	case FLOW_POOL_ACTIVE_PAUSED:
183 		if (pool->avail_desc > pool->start_th) {
184 			soc->pause_cb(pool->flow_pool_id,
185 				       WLAN_WAKE_ALL_NETIF_QUEUE,
186 				       WLAN_DATA_FLOW_CONTROL);
187 			pool->status = FLOW_POOL_ACTIVE_UNPAUSED;
188 		}
189 		break;
190 	case FLOW_POOL_INVALID:
191 		if (pool->avail_desc == pool->pool_size) {
192 			dp_tx_desc_pool_free(soc, desc_pool_id);
193 			pool->status = FLOW_POOL_INACTIVE;
194 			qdf_spin_unlock_bh(&pool->flow_pool_lock);
195 			qdf_print("%s %d pool is freed!!\n",
196 				 __func__, __LINE__);
197 			return;
198 		}
199 		break;
200 
201 	case FLOW_POOL_ACTIVE_UNPAUSED:
202 		break;
203 	default:
204 		qdf_print("%s %d pool is INACTIVE State!!\n",
205 				 __func__, __LINE__);
206 		break;
207 	};
208 
209 	qdf_spin_unlock_bh(&pool->flow_pool_lock);
210 
211 }
212 #else /* QCA_LL_TX_FLOW_CONTROL_V2 */
213 
214 static inline void dp_tx_flow_control_init(struct dp_soc *handle)
215 {
216 }
217 
218 static inline void dp_tx_flow_control_deinit(struct dp_soc *handle)
219 {
220 }
221 
222 static inline QDF_STATUS dp_tx_flow_pool_map_handler(struct dp_pdev *pdev,
223 	uint8_t flow_id, uint8_t flow_type, uint8_t flow_pool_id,
224 	uint16_t flow_pool_size)
225 {
226 	return QDF_STATUS_SUCCESS;
227 }
228 
229 static inline void dp_tx_flow_pool_unmap_handler(struct dp_pdev *pdev,
230 	uint8_t flow_id, uint8_t flow_type, uint8_t flow_pool_id)
231 {
232 }
233 
234 /**
235  * dp_tx_desc_alloc() - Allocate a Software Tx Descriptor from given pool
236  *
237  * @param soc Handle to DP SoC structure
238  * @param pool_id
239  *
240  * Return:
241  */
242 static inline struct dp_tx_desc_s *dp_tx_desc_alloc(struct dp_soc *soc,
243 						uint8_t desc_pool_id)
244 {
245 	struct dp_tx_desc_s *tx_desc = NULL;
246 
247 	TX_DESC_LOCK_LOCK(&soc->tx_desc[desc_pool_id].lock);
248 
249 	tx_desc = soc->tx_desc[desc_pool_id].freelist;
250 
251 	/* Pool is exhausted */
252 	if (!tx_desc) {
253 		TX_DESC_LOCK_UNLOCK(&soc->tx_desc[desc_pool_id].lock);
254 		return NULL;
255 	}
256 
257 	soc->tx_desc[desc_pool_id].freelist =
258 		soc->tx_desc[desc_pool_id].freelist->next;
259 	soc->tx_desc[desc_pool_id].num_allocated++;
260 	soc->tx_desc[desc_pool_id].num_free--;
261 
262 	DP_STATS_INC(soc, tx.desc_in_use, 1);
263 	tx_desc->flags = DP_TX_DESC_FLAG_ALLOCATED;
264 
265 	TX_DESC_LOCK_UNLOCK(&soc->tx_desc[desc_pool_id].lock);
266 
267 	return tx_desc;
268 }
269 
270 /**
271  * dp_tx_desc_alloc_multiple() - Allocate batch of software Tx Descriptors
272  *                            from given pool
273  * @soc: Handle to DP SoC structure
274  * @pool_id: pool id should pick up
275  * @num_requested: number of required descriptor
276  *
277  * allocate multiple tx descriptor and make a link
278  *
279  * Return: h_desc first descriptor pointer
280  */
281 static inline struct dp_tx_desc_s *dp_tx_desc_alloc_multiple(
282 		struct dp_soc *soc, uint8_t desc_pool_id, uint8_t num_requested)
283 {
284 	struct dp_tx_desc_s *c_desc = NULL, *h_desc = NULL;
285 	uint8_t count;
286 
287 	TX_DESC_LOCK_LOCK(&soc->tx_desc[desc_pool_id].lock);
288 
289 	if ((num_requested == 0) ||
290 			(soc->tx_desc[desc_pool_id].num_free < num_requested)) {
291 		TX_DESC_LOCK_UNLOCK(&soc->tx_desc[desc_pool_id].lock);
292 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
293 			"%s, No Free Desc: Available(%d) num_requested(%d)",
294 			__func__, soc->tx_desc[desc_pool_id].num_free,
295 			num_requested);
296 		return NULL;
297 	}
298 
299 	h_desc = soc->tx_desc[desc_pool_id].freelist;
300 
301 	/* h_desc should never be NULL since num_free > requested */
302 	qdf_assert_always(h_desc);
303 
304 	c_desc = h_desc;
305 	for (count = 0; count < (num_requested - 1); count++) {
306 		c_desc->flags = DP_TX_DESC_FLAG_ALLOCATED;
307 		c_desc = c_desc->next;
308 	}
309 	soc->tx_desc[desc_pool_id].num_free -= count;
310 	soc->tx_desc[desc_pool_id].num_allocated += count;
311 	soc->tx_desc[desc_pool_id].freelist = c_desc->next;
312 	c_desc->next = NULL;
313 
314 	TX_DESC_LOCK_UNLOCK(&soc->tx_desc[desc_pool_id].lock);
315 	return h_desc;
316 }
317 
318 /**
319  * dp_tx_desc_free() - Fee a tx descriptor and attach it to free list
320  *
321  * @soc Handle to DP SoC structure
322  * @pool_id
323  * @tx_desc
324  */
325 static inline void
326 dp_tx_desc_free(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
327 		uint8_t desc_pool_id)
328 {
329 	TX_DESC_LOCK_LOCK(&soc->tx_desc[desc_pool_id].lock);
330 
331 	tx_desc->flags = 0;
332 	tx_desc->next = soc->tx_desc[desc_pool_id].freelist;
333 	soc->tx_desc[desc_pool_id].freelist = tx_desc;
334 	DP_STATS_DEC(soc, tx.desc_in_use, 1);
335 	soc->tx_desc[desc_pool_id].num_allocated--;
336 	soc->tx_desc[desc_pool_id].num_free++;
337 
338 
339 	TX_DESC_LOCK_UNLOCK(&soc->tx_desc[desc_pool_id].lock);
340 }
341 #endif /* QCA_LL_TX_FLOW_CONTROL_V2 */
342 
343 /**
344  * dp_tx_desc_find() - find dp tx descriptor from cokie
345  * @soc - handle for the device sending the data
346  * @tx_desc_id - the ID of the descriptor in question
347  * @return the descriptor object that has the specified ID
348  *
349  *  Use a tx descriptor ID to find the corresponding descriptor object.
350  *
351  */
352 static inline struct dp_tx_desc_s *dp_tx_desc_find(struct dp_soc *soc,
353 		uint8_t pool_id, uint16_t page_id, uint16_t offset)
354 {
355 	struct dp_tx_desc_pool_s *tx_desc_pool = &((soc)->tx_desc[(pool_id)]);
356 
357 	return tx_desc_pool->desc_pages.cacheable_pages[page_id] +
358 		tx_desc_pool->elem_size * offset;
359 }
360 
361 /**
362  * dp_tx_ext_desc_alloc() - Get tx extension descriptor from pool
363  * @soc: handle for the device sending the data
364  * @pool_id: target pool id
365  *
366  * Return: None
367  */
368 static inline
369 struct dp_tx_ext_desc_elem_s *dp_tx_ext_desc_alloc(struct dp_soc *soc,
370 		uint8_t desc_pool_id)
371 {
372 	struct dp_tx_ext_desc_elem_s *c_elem;
373 
374 	qdf_spin_lock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
375 	if (soc->tx_ext_desc[desc_pool_id].num_free <= 0) {
376 		qdf_spin_unlock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
377 		return NULL;
378 	}
379 	c_elem = soc->tx_ext_desc[desc_pool_id].freelist;
380 	soc->tx_ext_desc[desc_pool_id].freelist =
381 		soc->tx_ext_desc[desc_pool_id].freelist->next;
382 	soc->tx_ext_desc[desc_pool_id].num_free--;
383 	qdf_spin_unlock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
384 	return c_elem;
385 }
386 
387 /**
388  * dp_tx_ext_desc_free() - Release tx extension descriptor to the pool
389  * @soc: handle for the device sending the data
390  * @pool_id: target pool id
391  * @elem: ext descriptor pointer should release
392  *
393  * Return: None
394  */
395 static inline void dp_tx_ext_desc_free(struct dp_soc *soc,
396 	struct dp_tx_ext_desc_elem_s *elem, uint8_t desc_pool_id)
397 {
398 	qdf_spin_lock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
399 	elem->next = soc->tx_ext_desc[desc_pool_id].freelist;
400 	soc->tx_ext_desc[desc_pool_id].freelist = elem;
401 	soc->tx_ext_desc[desc_pool_id].num_free++;
402 	qdf_spin_unlock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
403 	return;
404 }
405 
406 /**
407  * dp_tx_ext_desc_free_multiple() - Fee multiple tx extension descriptor and
408  *                           attach it to free list
409  * @soc: Handle to DP SoC structure
410  * @desc_pool_id: pool id should pick up
411  * @elem: tx descriptor should be freed
412  * @num_free: number of descriptors should be freed
413  *
414  * Return: none
415  */
416 static inline void dp_tx_ext_desc_free_multiple(struct dp_soc *soc,
417 		struct dp_tx_ext_desc_elem_s *elem, uint8_t desc_pool_id,
418 		uint8_t num_free)
419 {
420 	struct dp_tx_ext_desc_elem_s *head, *tail, *c_elem;
421 	uint8_t freed = num_free;
422 
423 	/* caller should always guarantee atleast list of num_free nodes */
424 	qdf_assert_always(head);
425 
426 	head = elem;
427 	c_elem = head;
428 	tail = head;
429 	while (c_elem && freed) {
430 		tail = c_elem;
431 		c_elem = c_elem->next;
432 		freed--;
433 	}
434 
435 	/* caller should always guarantee atleast list of num_free nodes */
436 	qdf_assert_always(tail);
437 
438 	qdf_spin_lock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
439 	tail->next = soc->tx_ext_desc[desc_pool_id].freelist;
440 	soc->tx_ext_desc[desc_pool_id].freelist = head;
441 	soc->tx_ext_desc[desc_pool_id].num_free += num_free;
442 	qdf_spin_unlock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
443 
444 	return;
445 }
446 
447 #if defined(FEATURE_TSO)
448 /**
449  * dp_tx_tso_desc_alloc() - function to allocate a TSO segment
450  * @soc: device soc instance
451  * @pool_id: pool id should pick up tso descriptor
452  *
453  * Allocates a TSO segment element from the free list held in
454  * the soc
455  *
456  * Return: tso_seg, tso segment memory pointer
457  */
458 static inline struct qdf_tso_seg_elem_t *dp_tx_tso_desc_alloc(
459 		struct dp_soc *soc, uint8_t pool_id)
460 {
461 	struct qdf_tso_seg_elem_t *tso_seg = NULL;
462 
463 	qdf_spin_lock_bh(&soc->tx_tso_desc[pool_id].lock);
464 	if (soc->tx_tso_desc[pool_id].freelist) {
465 		soc->tx_tso_desc[pool_id].num_free--;
466 		tso_seg = soc->tx_tso_desc[pool_id].freelist;
467 		soc->tx_tso_desc[pool_id].freelist =
468 			soc->tx_tso_desc[pool_id].freelist->next;
469 	}
470 	qdf_spin_unlock_bh(&soc->tx_tso_desc[pool_id].lock);
471 
472 	return tso_seg;
473 }
474 
475 /**
476  * dp_tx_tso_desc_free() - function to free a TSO segment
477  * @soc: device soc instance
478  * @pool_id: pool id should pick up tso descriptor
479  * @tso_seg: tso segment memory pointer
480  *
481  * Returns a TSO segment element to the free list held in the
482  * HTT pdev
483  *
484  * Return: none
485  */
486 static inline void dp_tx_tso_desc_free(struct dp_soc *soc,
487 		uint8_t pool_id, struct qdf_tso_seg_elem_t *tso_seg)
488 {
489 	qdf_spin_lock_bh(&soc->tx_tso_desc[pool_id].lock);
490 	tso_seg->next = soc->tx_tso_desc[pool_id].freelist;
491 	soc->tx_tso_desc[pool_id].freelist = tso_seg;
492 	soc->tx_tso_desc[pool_id].num_free++;
493 	qdf_spin_unlock_bh(&soc->tx_tso_desc[pool_id].lock);
494 }
495 
496 static inline
497 struct qdf_tso_num_seg_elem_t  *dp_tso_num_seg_alloc(struct dp_soc *soc,
498 		uint8_t pool_id)
499 {
500 	struct qdf_tso_num_seg_elem_t *tso_num_seg = NULL;
501 
502 	qdf_spin_lock_bh(&soc->tx_tso_num_seg[pool_id].lock);
503 	if (soc->tx_tso_num_seg[pool_id].freelist) {
504 		soc->tx_tso_num_seg[pool_id].num_free--;
505 		tso_num_seg = soc->tx_tso_num_seg[pool_id].freelist;
506 		soc->tx_tso_num_seg[pool_id].freelist =
507 			soc->tx_tso_num_seg[pool_id].freelist->next;
508 	}
509 	qdf_spin_unlock_bh(&soc->tx_tso_num_seg[pool_id].lock);
510 
511 	return tso_num_seg;
512 }
513 
514 static inline
515 void dp_tso_num_seg_free(struct dp_soc *soc,
516 		uint8_t pool_id, struct qdf_tso_num_seg_elem_t *tso_num_seg)
517 {
518 	qdf_spin_lock_bh(&soc->tx_tso_num_seg[pool_id].lock);
519 	tso_num_seg->next = soc->tx_tso_num_seg[pool_id].freelist;
520 	soc->tx_tso_num_seg[pool_id].freelist = tso_num_seg;
521 	soc->tx_tso_num_seg[pool_id].num_free++;
522 	qdf_spin_unlock_bh(&soc->tx_tso_num_seg[pool_id].lock);
523 }
524 #endif
525 
526 /*
527  * dp_tx_me_alloc_buf() Alloc descriptor from me pool
528  * @pdev DP_PDEV handle for datapath
529  *
530  * Return:dp_tx_me_buf_t(buf)
531  */
532 static inline struct dp_tx_me_buf_t*
533 dp_tx_me_alloc_buf(struct dp_pdev *pdev)
534 {
535 	struct dp_tx_me_buf_t *buf = NULL;
536 	qdf_spin_lock_bh(&pdev->tx_mutex);
537 	if (pdev->me_buf.freelist) {
538 		buf = pdev->me_buf.freelist;
539 		pdev->me_buf.freelist = pdev->me_buf.freelist->next;
540 		pdev->me_buf.buf_in_use++;
541 	} else {
542 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
543 				"Error allocating memory in pool");
544 		qdf_spin_unlock_bh(&pdev->tx_mutex);
545 		return NULL;
546 	}
547 	qdf_spin_unlock_bh(&pdev->tx_mutex);
548 	return buf;
549 }
550 
551 /*
552  * dp_tx_me_free_buf() - Free me descriptor and add it to pool
553  * @pdev: DP_PDEV handle for datapath
554  * @buf : Allocated ME BUF
555  *
556  * Return:void
557  */
558 static inline void
559 dp_tx_me_free_buf(struct dp_pdev *pdev, struct dp_tx_me_buf_t *buf)
560 {
561 	qdf_spin_lock_bh(&pdev->tx_mutex);
562 	buf->next = pdev->me_buf.freelist;
563 	pdev->me_buf.freelist = buf;
564 	pdev->me_buf.buf_in_use--;
565 	qdf_spin_unlock_bh(&pdev->tx_mutex);
566 }
567 #endif /* DP_TX_DESC_H */
568