xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_tx_desc.h (revision 8c3c4172fbd442a68f7b879958acb6794236aee0)
1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #ifndef DP_TX_DESC_H
21 #define DP_TX_DESC_H
22 
23 #include "dp_types.h"
24 #include "dp_tx.h"
25 #include "dp_internal.h"
26 
27 /**
28  * 21 bits cookie
29  * 2 bits pool id 0 ~ 3,
30  * 10 bits page id 0 ~ 1023
31  * 5 bits offset id 0 ~ 31 (Desc size = 128, Num descs per page = 4096/128 = 32)
32  */
33 /* ???Ring ID needed??? */
34 #define DP_TX_DESC_ID_POOL_MASK    0x018000
35 #define DP_TX_DESC_ID_POOL_OS      15
36 #define DP_TX_DESC_ID_PAGE_MASK    0x007FE0
37 #define DP_TX_DESC_ID_PAGE_OS      5
38 #define DP_TX_DESC_ID_OFFSET_MASK  0x00001F
39 #define DP_TX_DESC_ID_OFFSET_OS    0
40 
41 /**
42  * Compilation assert on tx desc size
43  *
44  * if assert is hit please update POOL_MASK,
45  * PAGE_MASK according to updated size
46  *
47  * for current PAGE mask allowed size range of tx_desc
48  * is between 128 and 256
49  */
50 QDF_COMPILE_TIME_ASSERT(dp_tx_desc_size,
51 			((sizeof(struct dp_tx_desc_s)) <=
52 			 (PAGE_SIZE >> DP_TX_DESC_ID_PAGE_OS)) &&
53 			((sizeof(struct dp_tx_desc_s)) >
54 			 (PAGE_SIZE >> (DP_TX_DESC_ID_PAGE_OS + 1))));
55 
56 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
57 #define TX_DESC_LOCK_CREATE(lock)
58 #define TX_DESC_LOCK_DESTROY(lock)
59 #define TX_DESC_LOCK_LOCK(lock)
60 #define TX_DESC_LOCK_UNLOCK(lock)
61 #define IS_TX_DESC_POOL_STATUS_INACTIVE(pool) \
62 	((pool)->status == FLOW_POOL_INACTIVE)
63 #ifdef QCA_AC_BASED_FLOW_CONTROL
64 #define TX_DESC_POOL_MEMBER_CLEAN(_tx_desc_pool)       \
65 	dp_tx_flow_pool_member_clean(_tx_desc_pool)
66 
67 #else /* !QCA_AC_BASED_FLOW_CONTROL */
68 #define TX_DESC_POOL_MEMBER_CLEAN(_tx_desc_pool)       \
69 do {                                                   \
70 	(_tx_desc_pool)->elem_size = 0;                \
71 	(_tx_desc_pool)->freelist = NULL;              \
72 	(_tx_desc_pool)->pool_size = 0;                \
73 	(_tx_desc_pool)->avail_desc = 0;               \
74 	(_tx_desc_pool)->start_th = 0;                 \
75 	(_tx_desc_pool)->stop_th = 0;                  \
76 	(_tx_desc_pool)->status = FLOW_POOL_INACTIVE;  \
77 } while (0)
78 #endif /* QCA_AC_BASED_FLOW_CONTROL */
79 #else /* !QCA_LL_TX_FLOW_CONTROL_V2 */
80 #define TX_DESC_LOCK_CREATE(lock)  qdf_spinlock_create(lock)
81 #define TX_DESC_LOCK_DESTROY(lock) qdf_spinlock_destroy(lock)
82 #define TX_DESC_LOCK_LOCK(lock)    qdf_spin_lock_bh(lock)
83 #define TX_DESC_LOCK_UNLOCK(lock)  qdf_spin_unlock_bh(lock)
84 #define IS_TX_DESC_POOL_STATUS_INACTIVE(pool) (false)
85 #define TX_DESC_POOL_MEMBER_CLEAN(_tx_desc_pool)       \
86 do {                                                   \
87 	(_tx_desc_pool)->elem_size = 0;                \
88 	(_tx_desc_pool)->num_allocated = 0;            \
89 	(_tx_desc_pool)->freelist = NULL;              \
90 	(_tx_desc_pool)->elem_count = 0;               \
91 	(_tx_desc_pool)->num_free = 0;                 \
92 } while (0)
93 #endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */
94 #define MAX_POOL_BUFF_COUNT 10000
95 
96 #ifdef DP_TX_TRACKING
97 static inline void dp_tx_desc_set_magic(struct dp_tx_desc_s *tx_desc,
98 					uint32_t magic_pattern)
99 {
100 	tx_desc->magic = magic_pattern;
101 }
102 #else
103 static inline void dp_tx_desc_set_magic(struct dp_tx_desc_s *tx_desc,
104 					uint32_t magic_pattern)
105 {
106 }
107 #endif
108 
109 QDF_STATUS dp_tx_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
110 				 uint16_t num_elem);
111 QDF_STATUS dp_tx_desc_pool_init(struct dp_soc *soc, uint8_t pool_id,
112 				uint16_t num_elem);
113 void dp_tx_desc_pool_free(struct dp_soc *soc, uint8_t pool_id);
114 void dp_tx_desc_pool_deinit(struct dp_soc *soc, uint8_t pool_id);
115 
116 QDF_STATUS dp_tx_ext_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
117 				     uint16_t num_elem);
118 QDF_STATUS dp_tx_ext_desc_pool_init(struct dp_soc *soc, uint8_t pool_id,
119 				    uint16_t num_elem);
120 void dp_tx_ext_desc_pool_free(struct dp_soc *soc, uint8_t pool_id);
121 void dp_tx_ext_desc_pool_deinit(struct dp_soc *soc, uint8_t pool_id);
122 
123 QDF_STATUS dp_tx_tso_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
124 				     uint16_t num_elem);
125 QDF_STATUS dp_tx_tso_desc_pool_init(struct dp_soc *soc, uint8_t pool_id,
126 				    uint16_t num_elem);
127 void dp_tx_tso_desc_pool_free(struct dp_soc *soc, uint8_t pool_id);
128 void dp_tx_tso_desc_pool_deinit(struct dp_soc *soc, uint8_t pool_id);
129 
130 QDF_STATUS dp_tx_tso_num_seg_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
131 		uint16_t num_elem);
132 QDF_STATUS dp_tx_tso_num_seg_pool_init(struct dp_soc *soc, uint8_t pool_id,
133 				       uint16_t num_elem);
134 void dp_tx_tso_num_seg_pool_free(struct dp_soc *soc, uint8_t pool_id);
135 void dp_tx_tso_num_seg_pool_deinit(struct dp_soc *soc, uint8_t pool_id);
136 
137 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
138 void dp_tx_flow_control_init(struct dp_soc *);
139 void dp_tx_flow_control_deinit(struct dp_soc *);
140 
141 QDF_STATUS dp_txrx_register_pause_cb(struct cdp_soc_t *soc,
142 	tx_pause_callback pause_cb);
143 QDF_STATUS dp_tx_flow_pool_map(struct cdp_soc_t *soc, uint8_t pdev_id,
144 			       uint8_t vdev_id);
145 void dp_tx_flow_pool_unmap(struct cdp_soc_t *handle, uint8_t pdev_id,
146 			   uint8_t vdev_id);
147 void dp_tx_clear_flow_pool_stats(struct dp_soc *soc);
148 struct dp_tx_desc_pool_s *dp_tx_create_flow_pool(struct dp_soc *soc,
149 	uint8_t flow_pool_id, uint16_t flow_pool_size);
150 
151 QDF_STATUS dp_tx_flow_pool_map_handler(struct dp_pdev *pdev, uint8_t flow_id,
152 	uint8_t flow_type, uint8_t flow_pool_id, uint16_t flow_pool_size);
153 void dp_tx_flow_pool_unmap_handler(struct dp_pdev *pdev, uint8_t flow_id,
154 	uint8_t flow_type, uint8_t flow_pool_id);
155 
156 /**
157  * dp_tx_get_desc_flow_pool() - get descriptor from flow pool
158  * @pool: flow pool
159  *
160  * Caller needs to take lock and do sanity checks.
161  *
162  * Return: tx descriptor
163  */
164 static inline
165 struct dp_tx_desc_s *dp_tx_get_desc_flow_pool(struct dp_tx_desc_pool_s *pool)
166 {
167 	struct dp_tx_desc_s *tx_desc = pool->freelist;
168 
169 	pool->freelist = pool->freelist->next;
170 	pool->avail_desc--;
171 	return tx_desc;
172 }
173 
174 /**
175  * ol_tx_put_desc_flow_pool() - put descriptor to flow pool freelist
176  * @pool: flow pool
177  * @tx_desc: tx descriptor
178  *
179  * Caller needs to take lock and do sanity checks.
180  *
181  * Return: none
182  */
183 static inline
184 void dp_tx_put_desc_flow_pool(struct dp_tx_desc_pool_s *pool,
185 			struct dp_tx_desc_s *tx_desc)
186 {
187 	tx_desc->next = pool->freelist;
188 	pool->freelist = tx_desc;
189 	pool->avail_desc++;
190 }
191 
192 #ifdef QCA_AC_BASED_FLOW_CONTROL
193 
194 /**
195  * dp_tx_flow_pool_member_clean() - Clean the members of TX flow pool
196  *
197  * @pool: flow pool
198  *
199  * Return: None
200  */
201 static inline void
202 dp_tx_flow_pool_member_clean(struct dp_tx_desc_pool_s *pool)
203 {
204 	pool->elem_size = 0;
205 	pool->freelist = NULL;
206 	pool->pool_size = 0;
207 	pool->avail_desc = 0;
208 	qdf_mem_zero(pool->start_th, FL_TH_MAX);
209 	qdf_mem_zero(pool->stop_th, FL_TH_MAX);
210 	pool->status = FLOW_POOL_INACTIVE;
211 }
212 
213 /**
214  * dp_tx_is_threshold_reached() - Check if current avail desc meet threshold
215  *
216  * @pool: flow pool
217  * @avail_desc: available descriptor number
218  *
219  * Return: true if threshold is met, false if not
220  */
221 static inline bool
222 dp_tx_is_threshold_reached(struct dp_tx_desc_pool_s *pool, uint16_t avail_desc)
223 {
224 	if (qdf_unlikely(avail_desc == pool->stop_th[DP_TH_BE_BK]))
225 		return true;
226 	else if (qdf_unlikely(avail_desc == pool->stop_th[DP_TH_VI]))
227 		return true;
228 	else if (qdf_unlikely(avail_desc == pool->stop_th[DP_TH_VO]))
229 		return true;
230 	else if (qdf_unlikely(avail_desc == pool->stop_th[DP_TH_HI]))
231 		return true;
232 	else
233 		return false;
234 }
235 
236 /**
237  * dp_tx_adjust_flow_pool_state() - Adjust flow pool state
238  *
239  * @soc: dp soc
240  * @pool: flow pool
241  */
242 static inline void
243 dp_tx_adjust_flow_pool_state(struct dp_soc *soc,
244 			     struct dp_tx_desc_pool_s *pool)
245 {
246 	if (pool->avail_desc > pool->stop_th[DP_TH_BE_BK]) {
247 		pool->status = FLOW_POOL_ACTIVE_UNPAUSED;
248 		return;
249 	} else if (pool->avail_desc <= pool->stop_th[DP_TH_BE_BK] &&
250 		   pool->avail_desc > pool->stop_th[DP_TH_VI]) {
251 		pool->status = FLOW_POOL_BE_BK_PAUSED;
252 	} else if (pool->avail_desc <= pool->stop_th[DP_TH_VI] &&
253 		   pool->avail_desc > pool->stop_th[DP_TH_VO]) {
254 		pool->status = FLOW_POOL_VI_PAUSED;
255 	} else if (pool->avail_desc <= pool->stop_th[DP_TH_VO] &&
256 		   pool->avail_desc > pool->stop_th[DP_TH_HI]) {
257 		pool->status = FLOW_POOL_VO_PAUSED;
258 	} else if (pool->avail_desc <= pool->stop_th[DP_TH_HI]) {
259 		pool->status = FLOW_POOL_ACTIVE_PAUSED;
260 	}
261 
262 	switch (pool->status) {
263 	case FLOW_POOL_ACTIVE_PAUSED:
264 		soc->pause_cb(pool->flow_pool_id,
265 			      WLAN_NETIF_PRIORITY_QUEUE_OFF,
266 			      WLAN_DATA_FLOW_CTRL_PRI);
267 		/* fallthrough */
268 
269 	case FLOW_POOL_VO_PAUSED:
270 		soc->pause_cb(pool->flow_pool_id,
271 			      WLAN_NETIF_VO_QUEUE_OFF,
272 			      WLAN_DATA_FLOW_CTRL_VO);
273 		/* fallthrough */
274 
275 	case FLOW_POOL_VI_PAUSED:
276 		soc->pause_cb(pool->flow_pool_id,
277 			      WLAN_NETIF_VI_QUEUE_OFF,
278 			      WLAN_DATA_FLOW_CTRL_VI);
279 		/* fallthrough */
280 
281 	case FLOW_POOL_BE_BK_PAUSED:
282 		soc->pause_cb(pool->flow_pool_id,
283 			      WLAN_NETIF_BE_BK_QUEUE_OFF,
284 			      WLAN_DATA_FLOW_CTRL_BE_BK);
285 		break;
286 	default:
287 		dp_err("Invalid pool staus:%u to adjust", pool->status);
288 	}
289 }
290 
291 /**
292  * dp_tx_desc_alloc() - Allocate a Software Tx descriptor from given pool
293  *
294  * @soc: Handle to DP SoC structure
295  * @desc_pool_id: ID of the flow control fool
296  *
297  * Return: TX descriptor allocated or NULL
298  */
299 static inline struct dp_tx_desc_s *
300 dp_tx_desc_alloc(struct dp_soc *soc, uint8_t desc_pool_id)
301 {
302 	struct dp_tx_desc_s *tx_desc = NULL;
303 	struct dp_tx_desc_pool_s *pool = &soc->tx_desc[desc_pool_id];
304 	bool is_pause = false;
305 	enum netif_action_type act = WLAN_NETIF_ACTION_TYPE_NONE;
306 	enum dp_fl_ctrl_threshold level = DP_TH_BE_BK;
307 	enum netif_reason_type reason;
308 
309 	if (qdf_likely(pool)) {
310 		qdf_spin_lock_bh(&pool->flow_pool_lock);
311 		if (qdf_likely(pool->avail_desc &&
312 		    pool->status != FLOW_POOL_INVALID &&
313 		    pool->status != FLOW_POOL_INACTIVE)) {
314 			tx_desc = dp_tx_get_desc_flow_pool(pool);
315 			tx_desc->pool_id = desc_pool_id;
316 			tx_desc->flags = DP_TX_DESC_FLAG_ALLOCATED;
317 			dp_tx_desc_set_magic(tx_desc,
318 					     DP_TX_MAGIC_PATTERN_INUSE);
319 			is_pause = dp_tx_is_threshold_reached(pool,
320 							      pool->avail_desc);
321 
322 			if (qdf_unlikely(pool->status ==
323 					 FLOW_POOL_ACTIVE_UNPAUSED_REATTACH)) {
324 				dp_tx_adjust_flow_pool_state(soc, pool);
325 				is_pause = false;
326 			}
327 
328 			if (qdf_unlikely(is_pause)) {
329 				switch (pool->status) {
330 				case FLOW_POOL_ACTIVE_UNPAUSED:
331 					/* pause network BE\BK queue */
332 					act = WLAN_NETIF_BE_BK_QUEUE_OFF;
333 					reason = WLAN_DATA_FLOW_CTRL_BE_BK;
334 					level = DP_TH_BE_BK;
335 					pool->status = FLOW_POOL_BE_BK_PAUSED;
336 					break;
337 				case FLOW_POOL_BE_BK_PAUSED:
338 					/* pause network VI queue */
339 					act = WLAN_NETIF_VI_QUEUE_OFF;
340 					reason = WLAN_DATA_FLOW_CTRL_VI;
341 					level = DP_TH_VI;
342 					pool->status = FLOW_POOL_VI_PAUSED;
343 					break;
344 				case FLOW_POOL_VI_PAUSED:
345 					/* pause network VO queue */
346 					act = WLAN_NETIF_VO_QUEUE_OFF;
347 					reason = WLAN_DATA_FLOW_CTRL_VO;
348 					level = DP_TH_VO;
349 					pool->status = FLOW_POOL_VO_PAUSED;
350 					break;
351 				case FLOW_POOL_VO_PAUSED:
352 					/* pause network HI PRI queue */
353 					act = WLAN_NETIF_PRIORITY_QUEUE_OFF;
354 					reason = WLAN_DATA_FLOW_CTRL_PRI;
355 					level = DP_TH_HI;
356 					pool->status = FLOW_POOL_ACTIVE_PAUSED;
357 					break;
358 				case FLOW_POOL_ACTIVE_PAUSED:
359 					act = WLAN_NETIF_ACTION_TYPE_NONE;
360 					break;
361 				default:
362 					dp_err_rl("pool status is %d!",
363 						  pool->status);
364 					break;
365 				}
366 
367 				if (act != WLAN_NETIF_ACTION_TYPE_NONE) {
368 					pool->latest_pause_time[level] =
369 						qdf_get_system_timestamp();
370 					soc->pause_cb(desc_pool_id,
371 						      act,
372 						      reason);
373 				}
374 			}
375 		} else {
376 			pool->pkt_drop_no_desc++;
377 		}
378 		qdf_spin_unlock_bh(&pool->flow_pool_lock);
379 	} else {
380 		soc->pool_stats.pkt_drop_no_pool++;
381 	}
382 
383 	return tx_desc;
384 }
385 
386 /**
387  * dp_tx_desc_free() - Fee a tx descriptor and attach it to free list
388  *
389  * @soc: Handle to DP SoC structure
390  * @tx_desc: the tx descriptor to be freed
391  * @desc_pool_id: ID of the flow control fool
392  *
393  * Return: None
394  */
395 static inline void
396 dp_tx_desc_free(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
397 		uint8_t desc_pool_id)
398 {
399 	struct dp_tx_desc_pool_s *pool = &soc->tx_desc[desc_pool_id];
400 	qdf_time_t unpause_time = qdf_get_system_timestamp(), pause_dur;
401 	enum netif_action_type act = WLAN_WAKE_ALL_NETIF_QUEUE;
402 	enum netif_reason_type reason;
403 
404 	qdf_spin_lock_bh(&pool->flow_pool_lock);
405 	tx_desc->vdev_id = DP_INVALID_VDEV_ID;
406 	tx_desc->nbuf = NULL;
407 	tx_desc->flags = 0;
408 	dp_tx_desc_set_magic(tx_desc, DP_TX_MAGIC_PATTERN_FREE);
409 	tx_desc->timestamp = 0;
410 	dp_tx_put_desc_flow_pool(pool, tx_desc);
411 	switch (pool->status) {
412 	case FLOW_POOL_ACTIVE_PAUSED:
413 		if (pool->avail_desc > pool->start_th[DP_TH_HI]) {
414 			act = WLAN_NETIF_PRIORITY_QUEUE_ON;
415 			reason = WLAN_DATA_FLOW_CTRL_PRI;
416 			pool->status = FLOW_POOL_VO_PAUSED;
417 
418 			/* Update maxinum pause duration for HI queue */
419 			pause_dur = unpause_time -
420 					pool->latest_pause_time[DP_TH_HI];
421 			if (pool->max_pause_time[DP_TH_HI] < pause_dur)
422 				pool->max_pause_time[DP_TH_HI] = pause_dur;
423 		}
424 		break;
425 	case FLOW_POOL_VO_PAUSED:
426 		if (pool->avail_desc > pool->start_th[DP_TH_VO]) {
427 			act = WLAN_NETIF_VO_QUEUE_ON;
428 			reason = WLAN_DATA_FLOW_CTRL_VO;
429 			pool->status = FLOW_POOL_VI_PAUSED;
430 
431 			/* Update maxinum pause duration for VO queue */
432 			pause_dur = unpause_time -
433 					pool->latest_pause_time[DP_TH_VO];
434 			if (pool->max_pause_time[DP_TH_VO] < pause_dur)
435 				pool->max_pause_time[DP_TH_VO] = pause_dur;
436 		}
437 		break;
438 	case FLOW_POOL_VI_PAUSED:
439 		if (pool->avail_desc > pool->start_th[DP_TH_VI]) {
440 			act = WLAN_NETIF_VI_QUEUE_ON;
441 			reason = WLAN_DATA_FLOW_CTRL_VI;
442 			pool->status = FLOW_POOL_BE_BK_PAUSED;
443 
444 			/* Update maxinum pause duration for VI queue */
445 			pause_dur = unpause_time -
446 					pool->latest_pause_time[DP_TH_VI];
447 			if (pool->max_pause_time[DP_TH_VI] < pause_dur)
448 				pool->max_pause_time[DP_TH_VI] = pause_dur;
449 		}
450 		break;
451 	case FLOW_POOL_BE_BK_PAUSED:
452 		if (pool->avail_desc > pool->start_th[DP_TH_BE_BK]) {
453 			act = WLAN_NETIF_BE_BK_QUEUE_ON;
454 			reason = WLAN_DATA_FLOW_CTRL_BE_BK;
455 			pool->status = FLOW_POOL_ACTIVE_UNPAUSED;
456 
457 			/* Update maxinum pause duration for BE_BK queue */
458 			pause_dur = unpause_time -
459 					pool->latest_pause_time[DP_TH_BE_BK];
460 			if (pool->max_pause_time[DP_TH_BE_BK] < pause_dur)
461 				pool->max_pause_time[DP_TH_BE_BK] = pause_dur;
462 		}
463 		break;
464 	case FLOW_POOL_INVALID:
465 		if (pool->avail_desc == pool->pool_size) {
466 			dp_tx_desc_pool_deinit(soc, desc_pool_id);
467 			dp_tx_desc_pool_free(soc, desc_pool_id);
468 			qdf_spin_unlock_bh(&pool->flow_pool_lock);
469 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
470 				  "%s %d pool is freed!!",
471 				  __func__, __LINE__);
472 			return;
473 		}
474 		break;
475 
476 	case FLOW_POOL_ACTIVE_UNPAUSED:
477 		break;
478 	default:
479 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
480 			  "%s %d pool is INACTIVE State!!",
481 			  __func__, __LINE__);
482 		break;
483 	};
484 
485 	if (act != WLAN_WAKE_ALL_NETIF_QUEUE)
486 		soc->pause_cb(pool->flow_pool_id,
487 			      act, reason);
488 	qdf_spin_unlock_bh(&pool->flow_pool_lock);
489 }
490 #else /* QCA_AC_BASED_FLOW_CONTROL */
491 
492 static inline bool
493 dp_tx_is_threshold_reached(struct dp_tx_desc_pool_s *pool, uint16_t avail_desc)
494 {
495 	if (qdf_unlikely(avail_desc < pool->stop_th))
496 		return true;
497 	else
498 		return false;
499 }
500 
501 /**
502  * dp_tx_desc_alloc() - Allocate a Software Tx Descriptor from given pool
503  *
504  * @soc Handle to DP SoC structure
505  * @pool_id
506  *
507  * Return:
508  */
509 static inline struct dp_tx_desc_s *
510 dp_tx_desc_alloc(struct dp_soc *soc, uint8_t desc_pool_id)
511 {
512 	struct dp_tx_desc_s *tx_desc = NULL;
513 	struct dp_tx_desc_pool_s *pool = &soc->tx_desc[desc_pool_id];
514 
515 	if (pool) {
516 		qdf_spin_lock_bh(&pool->flow_pool_lock);
517 		if (pool->status <= FLOW_POOL_ACTIVE_PAUSED &&
518 		    pool->avail_desc) {
519 			tx_desc = dp_tx_get_desc_flow_pool(pool);
520 			tx_desc->pool_id = desc_pool_id;
521 			tx_desc->flags = DP_TX_DESC_FLAG_ALLOCATED;
522 			dp_tx_desc_set_magic(tx_desc,
523 					     DP_TX_MAGIC_PATTERN_INUSE);
524 			if (qdf_unlikely(pool->avail_desc < pool->stop_th)) {
525 				pool->status = FLOW_POOL_ACTIVE_PAUSED;
526 				qdf_spin_unlock_bh(&pool->flow_pool_lock);
527 				/* pause network queues */
528 				soc->pause_cb(desc_pool_id,
529 					       WLAN_STOP_ALL_NETIF_QUEUE,
530 					       WLAN_DATA_FLOW_CONTROL);
531 			} else {
532 				qdf_spin_unlock_bh(&pool->flow_pool_lock);
533 			}
534 
535 			/*
536 			 * If one packet is going to be sent, PM usage count
537 			 * needs to be incremented by one to prevent future
538 			 * runtime suspend. This should be tied with the
539 			 * success of allocating one descriptor. It will be
540 			 * decremented after the packet has been sent.
541 			 */
542 			hif_pm_runtime_get_noresume(
543 				soc->hif_handle,
544 				RTPM_ID_DP_TX_DESC_ALLOC_FREE);
545 		} else {
546 			pool->pkt_drop_no_desc++;
547 			qdf_spin_unlock_bh(&pool->flow_pool_lock);
548 		}
549 	} else {
550 		soc->pool_stats.pkt_drop_no_pool++;
551 	}
552 
553 
554 	return tx_desc;
555 }
556 
557 /**
558  * dp_tx_desc_free() - Fee a tx descriptor and attach it to free list
559  *
560  * @soc Handle to DP SoC structure
561  * @pool_id
562  * @tx_desc
563  *
564  * Return: None
565  */
566 static inline void
567 dp_tx_desc_free(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
568 		uint8_t desc_pool_id)
569 {
570 	struct dp_tx_desc_pool_s *pool = &soc->tx_desc[desc_pool_id];
571 
572 	qdf_spin_lock_bh(&pool->flow_pool_lock);
573 	tx_desc->vdev_id = DP_INVALID_VDEV_ID;
574 	tx_desc->nbuf = NULL;
575 	tx_desc->flags = 0;
576 	dp_tx_desc_set_magic(tx_desc, DP_TX_MAGIC_PATTERN_FREE);
577 	tx_desc->timestamp = 0;
578 	dp_tx_put_desc_flow_pool(pool, tx_desc);
579 	switch (pool->status) {
580 	case FLOW_POOL_ACTIVE_PAUSED:
581 		if (pool->avail_desc > pool->start_th) {
582 			soc->pause_cb(pool->flow_pool_id,
583 				       WLAN_WAKE_ALL_NETIF_QUEUE,
584 				       WLAN_DATA_FLOW_CONTROL);
585 			pool->status = FLOW_POOL_ACTIVE_UNPAUSED;
586 		}
587 		break;
588 	case FLOW_POOL_INVALID:
589 		if (pool->avail_desc == pool->pool_size) {
590 			dp_tx_desc_pool_deinit(soc, desc_pool_id);
591 			dp_tx_desc_pool_free(soc, desc_pool_id);
592 			qdf_spin_unlock_bh(&pool->flow_pool_lock);
593 			qdf_print("%s %d pool is freed!!",
594 				  __func__, __LINE__);
595 			goto out;
596 		}
597 		break;
598 
599 	case FLOW_POOL_ACTIVE_UNPAUSED:
600 		break;
601 	default:
602 		qdf_print("%s %d pool is INACTIVE State!!",
603 			  __func__, __LINE__);
604 		break;
605 	};
606 
607 	qdf_spin_unlock_bh(&pool->flow_pool_lock);
608 
609 out:
610 	/**
611 	 * Decrement PM usage count if the packet has been sent. This
612 	 * should be tied with the success of freeing one descriptor.
613 	 */
614 	hif_pm_runtime_put(soc->hif_handle,
615 			   RTPM_ID_DP_TX_DESC_ALLOC_FREE);
616 }
617 
618 #endif /* QCA_AC_BASED_FLOW_CONTROL */
619 
620 static inline bool
621 dp_tx_desc_thresh_reached(struct cdp_soc_t *soc_hdl, uint8_t vdev_id)
622 {
623 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
624 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
625 						     DP_MOD_ID_CDP);
626 	struct dp_tx_desc_pool_s *pool;
627 	bool status;
628 
629 	if (!vdev)
630 		return false;
631 
632 	pool = vdev->pool;
633 	status = dp_tx_is_threshold_reached(pool, pool->avail_desc);
634 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
635 
636 	return status;
637 }
638 #else /* QCA_LL_TX_FLOW_CONTROL_V2 */
639 
640 static inline void dp_tx_flow_control_init(struct dp_soc *handle)
641 {
642 }
643 
644 static inline void dp_tx_flow_control_deinit(struct dp_soc *handle)
645 {
646 }
647 
648 static inline QDF_STATUS dp_tx_flow_pool_map_handler(struct dp_pdev *pdev,
649 	uint8_t flow_id, uint8_t flow_type, uint8_t flow_pool_id,
650 	uint16_t flow_pool_size)
651 {
652 	return QDF_STATUS_SUCCESS;
653 }
654 
655 static inline void dp_tx_flow_pool_unmap_handler(struct dp_pdev *pdev,
656 	uint8_t flow_id, uint8_t flow_type, uint8_t flow_pool_id)
657 {
658 }
659 
660 #ifdef QCA_DP_TX_HW_SW_NBUF_DESC_PREFETCH
661 static inline
662 void dp_tx_prefetch_desc(struct dp_tx_desc_s *tx_desc)
663 {
664 	if (tx_desc)
665 		prefetch(tx_desc);
666 }
667 #else
668 static inline
669 void dp_tx_prefetch_desc(struct dp_tx_desc_s *tx_desc)
670 {
671 }
672 #endif
673 
674 /**
675  * dp_tx_desc_alloc() - Allocate a Software Tx Descriptor from given pool
676  *
677  * @param soc Handle to DP SoC structure
678  * @param pool_id
679  *
680  * Return:
681  */
682 static inline struct dp_tx_desc_s *dp_tx_desc_alloc(struct dp_soc *soc,
683 						uint8_t desc_pool_id)
684 {
685 	struct dp_tx_desc_s *tx_desc = NULL;
686 	struct dp_tx_desc_pool_s *pool = &soc->tx_desc[desc_pool_id];
687 
688 	TX_DESC_LOCK_LOCK(&pool->lock);
689 
690 	tx_desc = pool->freelist;
691 
692 	/* Pool is exhausted */
693 	if (!tx_desc) {
694 		TX_DESC_LOCK_UNLOCK(&pool->lock);
695 		return NULL;
696 	}
697 
698 	pool->freelist = pool->freelist->next;
699 	pool->num_allocated++;
700 	pool->num_free--;
701 	dp_tx_prefetch_desc(pool->freelist);
702 
703 	tx_desc->flags = DP_TX_DESC_FLAG_ALLOCATED;
704 
705 	TX_DESC_LOCK_UNLOCK(&pool->lock);
706 
707 	return tx_desc;
708 }
709 
710 /**
711  * dp_tx_desc_alloc_multiple() - Allocate batch of software Tx Descriptors
712  *                            from given pool
713  * @soc: Handle to DP SoC structure
714  * @pool_id: pool id should pick up
715  * @num_requested: number of required descriptor
716  *
717  * allocate multiple tx descriptor and make a link
718  *
719  * Return: h_desc first descriptor pointer
720  */
721 static inline struct dp_tx_desc_s *dp_tx_desc_alloc_multiple(
722 		struct dp_soc *soc, uint8_t desc_pool_id, uint8_t num_requested)
723 {
724 	struct dp_tx_desc_s *c_desc = NULL, *h_desc = NULL;
725 	uint8_t count;
726 	struct dp_tx_desc_pool_s *pool = &soc->tx_desc[desc_pool_id];
727 
728 	TX_DESC_LOCK_LOCK(&pool->lock);
729 
730 	if ((num_requested == 0) ||
731 			(pool->num_free < num_requested)) {
732 		TX_DESC_LOCK_UNLOCK(&pool->lock);
733 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
734 			"%s, No Free Desc: Available(%d) num_requested(%d)",
735 			__func__, pool->num_free,
736 			num_requested);
737 		return NULL;
738 	}
739 
740 	h_desc = pool->freelist;
741 
742 	/* h_desc should never be NULL since num_free > requested */
743 	qdf_assert_always(h_desc);
744 
745 	c_desc = h_desc;
746 	for (count = 0; count < (num_requested - 1); count++) {
747 		c_desc->flags = DP_TX_DESC_FLAG_ALLOCATED;
748 		c_desc = c_desc->next;
749 	}
750 	pool->num_free -= count;
751 	pool->num_allocated += count;
752 	pool->freelist = c_desc->next;
753 	c_desc->next = NULL;
754 
755 	TX_DESC_LOCK_UNLOCK(&pool->lock);
756 	return h_desc;
757 }
758 
759 /**
760  * dp_tx_desc_free() - Fee a tx descriptor and attach it to free list
761  *
762  * @soc Handle to DP SoC structure
763  * @pool_id
764  * @tx_desc
765  */
766 static inline void
767 dp_tx_desc_free(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
768 		uint8_t desc_pool_id)
769 {
770 	struct dp_tx_desc_pool_s *pool = NULL;
771 	tx_desc->vdev_id = DP_INVALID_VDEV_ID;
772 	tx_desc->nbuf = NULL;
773 	tx_desc->flags = 0;
774 
775 	pool = &soc->tx_desc[desc_pool_id];
776 	TX_DESC_LOCK_LOCK(&pool->lock);
777 	tx_desc->next = pool->freelist;
778 	pool->freelist = tx_desc;
779 	pool->num_allocated--;
780 	pool->num_free++;
781 	TX_DESC_LOCK_UNLOCK(&pool->lock);
782 }
783 
784 #endif /* QCA_LL_TX_FLOW_CONTROL_V2 */
785 
786 #ifdef QCA_DP_TX_DESC_ID_CHECK
787 /**
788  * dp_tx_is_desc_id_valid() - check is the tx desc id valid
789  *
790  * @soc Handle to DP SoC structure
791  * @tx_desc_id
792  *
793  * Return: true or false
794  */
795 static inline bool
796 dp_tx_is_desc_id_valid(struct dp_soc *soc, uint32_t tx_desc_id)
797 {
798 	uint8_t pool_id;
799 	uint16_t page_id, offset;
800 	struct dp_tx_desc_pool_s *pool;
801 
802 	pool_id = (tx_desc_id & DP_TX_DESC_ID_POOL_MASK) >>
803 			DP_TX_DESC_ID_POOL_OS;
804 	/* Pool ID is out of limit */
805 	if (pool_id > wlan_cfg_get_num_tx_desc_pool(
806 				soc->wlan_cfg_ctx)) {
807 		QDF_TRACE(QDF_MODULE_ID_DP,
808 			  QDF_TRACE_LEVEL_FATAL,
809 			  "%s:Tx Comp pool id %d not valid",
810 			  __func__,
811 			  pool_id);
812 		goto warn_exit;
813 	}
814 
815 	pool = &soc->tx_desc[pool_id];
816 	/* the pool is freed */
817 	if (IS_TX_DESC_POOL_STATUS_INACTIVE(pool)) {
818 		QDF_TRACE(QDF_MODULE_ID_DP,
819 			  QDF_TRACE_LEVEL_FATAL,
820 			  "%s:the pool %d has been freed",
821 			  __func__,
822 			  pool_id);
823 		goto warn_exit;
824 	}
825 
826 	page_id = (tx_desc_id & DP_TX_DESC_ID_PAGE_MASK) >>
827 				DP_TX_DESC_ID_PAGE_OS;
828 	/* the page id is out of limit */
829 	if (page_id >= pool->desc_pages.num_pages) {
830 		QDF_TRACE(QDF_MODULE_ID_DP,
831 			  QDF_TRACE_LEVEL_FATAL,
832 			  "%s:the page id %d invalid, pool id %d, num_page %d",
833 			  __func__,
834 			  page_id,
835 			  pool_id,
836 			  pool->desc_pages.num_pages);
837 		goto warn_exit;
838 	}
839 
840 	offset = (tx_desc_id & DP_TX_DESC_ID_OFFSET_MASK) >>
841 				DP_TX_DESC_ID_OFFSET_OS;
842 	/* the offset is out of limit */
843 	if (offset >= pool->desc_pages.num_element_per_page) {
844 		QDF_TRACE(QDF_MODULE_ID_DP,
845 			  QDF_TRACE_LEVEL_FATAL,
846 			  "%s:offset %d invalid, pool%d,num_elem_per_page %d",
847 			  __func__,
848 			  offset,
849 			  pool_id,
850 			  pool->desc_pages.num_element_per_page);
851 		goto warn_exit;
852 	}
853 
854 	return true;
855 
856 warn_exit:
857 	QDF_TRACE(QDF_MODULE_ID_DP,
858 		  QDF_TRACE_LEVEL_FATAL,
859 		  "%s:Tx desc id 0x%x not valid",
860 		  __func__,
861 		  tx_desc_id);
862 	qdf_assert_always(0);
863 	return false;
864 }
865 
866 #else
867 static inline bool
868 dp_tx_is_desc_id_valid(struct dp_soc *soc, uint32_t tx_desc_id)
869 {
870 	return true;
871 }
872 #endif /* QCA_DP_TX_DESC_ID_CHECK */
873 
874 #ifdef QCA_DP_TX_DESC_FAST_COMP_ENABLE
875 static inline void dp_tx_desc_update_fast_comp_flag(struct dp_soc *soc,
876 						    struct dp_tx_desc_s *desc,
877 						    uint8_t allow_fast_comp)
878 {
879 	if (qdf_likely(!(desc->flags & DP_TX_DESC_FLAG_TO_FW)) &&
880 	    qdf_likely(allow_fast_comp)) {
881 		desc->flags |= DP_TX_DESC_FLAG_SIMPLE;
882 	}
883 }
884 #else
885 static inline void dp_tx_desc_update_fast_comp_flag(struct dp_soc *soc,
886 						    struct dp_tx_desc_s *desc,
887 						    uint8_t allow_fast_comp)
888 {
889 }
890 #endif /* QCA_DP_TX_DESC_FAST_COMP_ENABLE */
891 
892 /**
893  * dp_tx_desc_find() - find dp tx descriptor from cokie
894  * @soc - handle for the device sending the data
895  * @tx_desc_id - the ID of the descriptor in question
896  * @return the descriptor object that has the specified ID
897  *
898  *  Use a tx descriptor ID to find the corresponding descriptor object.
899  *
900  */
901 static inline struct dp_tx_desc_s *dp_tx_desc_find(struct dp_soc *soc,
902 		uint8_t pool_id, uint16_t page_id, uint16_t offset)
903 {
904 	struct dp_tx_desc_pool_s *tx_desc_pool = &soc->tx_desc[pool_id];
905 
906 	return tx_desc_pool->desc_pages.cacheable_pages[page_id] +
907 		tx_desc_pool->elem_size * offset;
908 }
909 
910 /**
911  * dp_tx_ext_desc_alloc() - Get tx extension descriptor from pool
912  * @soc: handle for the device sending the data
913  * @pool_id: target pool id
914  *
915  * Return: None
916  */
917 static inline
918 struct dp_tx_ext_desc_elem_s *dp_tx_ext_desc_alloc(struct dp_soc *soc,
919 		uint8_t desc_pool_id)
920 {
921 	struct dp_tx_ext_desc_elem_s *c_elem;
922 
923 	qdf_spin_lock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
924 	if (soc->tx_ext_desc[desc_pool_id].num_free <= 0) {
925 		qdf_spin_unlock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
926 		return NULL;
927 	}
928 	c_elem = soc->tx_ext_desc[desc_pool_id].freelist;
929 	soc->tx_ext_desc[desc_pool_id].freelist =
930 		soc->tx_ext_desc[desc_pool_id].freelist->next;
931 	soc->tx_ext_desc[desc_pool_id].num_free--;
932 	qdf_spin_unlock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
933 	return c_elem;
934 }
935 
936 /**
937  * dp_tx_ext_desc_free() - Release tx extension descriptor to the pool
938  * @soc: handle for the device sending the data
939  * @pool_id: target pool id
940  * @elem: ext descriptor pointer should release
941  *
942  * Return: None
943  */
944 static inline void dp_tx_ext_desc_free(struct dp_soc *soc,
945 	struct dp_tx_ext_desc_elem_s *elem, uint8_t desc_pool_id)
946 {
947 	qdf_spin_lock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
948 	elem->next = soc->tx_ext_desc[desc_pool_id].freelist;
949 	soc->tx_ext_desc[desc_pool_id].freelist = elem;
950 	soc->tx_ext_desc[desc_pool_id].num_free++;
951 	qdf_spin_unlock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
952 	return;
953 }
954 
955 /**
956  * dp_tx_ext_desc_free_multiple() - Fee multiple tx extension descriptor and
957  *                           attach it to free list
958  * @soc: Handle to DP SoC structure
959  * @desc_pool_id: pool id should pick up
960  * @elem: tx descriptor should be freed
961  * @num_free: number of descriptors should be freed
962  *
963  * Return: none
964  */
965 static inline void dp_tx_ext_desc_free_multiple(struct dp_soc *soc,
966 		struct dp_tx_ext_desc_elem_s *elem, uint8_t desc_pool_id,
967 		uint8_t num_free)
968 {
969 	struct dp_tx_ext_desc_elem_s *head, *tail, *c_elem;
970 	uint8_t freed = num_free;
971 
972 	/* caller should always guarantee atleast list of num_free nodes */
973 	qdf_assert_always(elem);
974 
975 	head = elem;
976 	c_elem = head;
977 	tail = head;
978 	while (c_elem && freed) {
979 		tail = c_elem;
980 		c_elem = c_elem->next;
981 		freed--;
982 	}
983 
984 	/* caller should always guarantee atleast list of num_free nodes */
985 	qdf_assert_always(tail);
986 
987 	qdf_spin_lock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
988 	tail->next = soc->tx_ext_desc[desc_pool_id].freelist;
989 	soc->tx_ext_desc[desc_pool_id].freelist = head;
990 	soc->tx_ext_desc[desc_pool_id].num_free += num_free;
991 	qdf_spin_unlock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
992 
993 	return;
994 }
995 
996 #if defined(FEATURE_TSO)
997 /**
998  * dp_tx_tso_desc_alloc() - function to allocate a TSO segment
999  * @soc: device soc instance
1000  * @pool_id: pool id should pick up tso descriptor
1001  *
1002  * Allocates a TSO segment element from the free list held in
1003  * the soc
1004  *
1005  * Return: tso_seg, tso segment memory pointer
1006  */
1007 static inline struct qdf_tso_seg_elem_t *dp_tx_tso_desc_alloc(
1008 		struct dp_soc *soc, uint8_t pool_id)
1009 {
1010 	struct qdf_tso_seg_elem_t *tso_seg = NULL;
1011 
1012 	qdf_spin_lock_bh(&soc->tx_tso_desc[pool_id].lock);
1013 	if (soc->tx_tso_desc[pool_id].freelist) {
1014 		soc->tx_tso_desc[pool_id].num_free--;
1015 		tso_seg = soc->tx_tso_desc[pool_id].freelist;
1016 		soc->tx_tso_desc[pool_id].freelist =
1017 			soc->tx_tso_desc[pool_id].freelist->next;
1018 	}
1019 	qdf_spin_unlock_bh(&soc->tx_tso_desc[pool_id].lock);
1020 
1021 	return tso_seg;
1022 }
1023 
1024 /**
1025  * dp_tx_tso_desc_free() - function to free a TSO segment
1026  * @soc: device soc instance
1027  * @pool_id: pool id should pick up tso descriptor
1028  * @tso_seg: tso segment memory pointer
1029  *
1030  * Returns a TSO segment element to the free list held in the
1031  * HTT pdev
1032  *
1033  * Return: none
1034  */
1035 static inline void dp_tx_tso_desc_free(struct dp_soc *soc,
1036 		uint8_t pool_id, struct qdf_tso_seg_elem_t *tso_seg)
1037 {
1038 	qdf_spin_lock_bh(&soc->tx_tso_desc[pool_id].lock);
1039 	tso_seg->next = soc->tx_tso_desc[pool_id].freelist;
1040 	soc->tx_tso_desc[pool_id].freelist = tso_seg;
1041 	soc->tx_tso_desc[pool_id].num_free++;
1042 	qdf_spin_unlock_bh(&soc->tx_tso_desc[pool_id].lock);
1043 }
1044 
1045 static inline
1046 struct qdf_tso_num_seg_elem_t  *dp_tso_num_seg_alloc(struct dp_soc *soc,
1047 		uint8_t pool_id)
1048 {
1049 	struct qdf_tso_num_seg_elem_t *tso_num_seg = NULL;
1050 
1051 	qdf_spin_lock_bh(&soc->tx_tso_num_seg[pool_id].lock);
1052 	if (soc->tx_tso_num_seg[pool_id].freelist) {
1053 		soc->tx_tso_num_seg[pool_id].num_free--;
1054 		tso_num_seg = soc->tx_tso_num_seg[pool_id].freelist;
1055 		soc->tx_tso_num_seg[pool_id].freelist =
1056 			soc->tx_tso_num_seg[pool_id].freelist->next;
1057 	}
1058 	qdf_spin_unlock_bh(&soc->tx_tso_num_seg[pool_id].lock);
1059 
1060 	return tso_num_seg;
1061 }
1062 
1063 static inline
1064 void dp_tso_num_seg_free(struct dp_soc *soc,
1065 		uint8_t pool_id, struct qdf_tso_num_seg_elem_t *tso_num_seg)
1066 {
1067 	qdf_spin_lock_bh(&soc->tx_tso_num_seg[pool_id].lock);
1068 	tso_num_seg->next = soc->tx_tso_num_seg[pool_id].freelist;
1069 	soc->tx_tso_num_seg[pool_id].freelist = tso_num_seg;
1070 	soc->tx_tso_num_seg[pool_id].num_free++;
1071 	qdf_spin_unlock_bh(&soc->tx_tso_num_seg[pool_id].lock);
1072 }
1073 #endif
1074 
1075 /*
1076  * dp_tx_me_alloc_buf() Alloc descriptor from me pool
1077  * @pdev DP_PDEV handle for datapath
1078  *
1079  * Return:dp_tx_me_buf_t(buf)
1080  */
1081 static inline struct dp_tx_me_buf_t*
1082 dp_tx_me_alloc_buf(struct dp_pdev *pdev)
1083 {
1084 	struct dp_tx_me_buf_t *buf = NULL;
1085 	qdf_spin_lock_bh(&pdev->tx_mutex);
1086 	if (pdev->me_buf.freelist) {
1087 		buf = pdev->me_buf.freelist;
1088 		pdev->me_buf.freelist = pdev->me_buf.freelist->next;
1089 		pdev->me_buf.buf_in_use++;
1090 	} else {
1091 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1092 				"Error allocating memory in pool");
1093 		qdf_spin_unlock_bh(&pdev->tx_mutex);
1094 		return NULL;
1095 	}
1096 	qdf_spin_unlock_bh(&pdev->tx_mutex);
1097 	return buf;
1098 }
1099 
1100 /*
1101  * dp_tx_me_free_buf() - Unmap the buffer holding the dest
1102  * address, free me descriptor and add it to the free-pool
1103  * @pdev: DP_PDEV handle for datapath
1104  * @buf : Allocated ME BUF
1105  *
1106  * Return:void
1107  */
1108 static inline void
1109 dp_tx_me_free_buf(struct dp_pdev *pdev, struct dp_tx_me_buf_t *buf)
1110 {
1111 	/*
1112 	 * If the buf containing mac address was mapped,
1113 	 * it must be unmapped before freeing the me_buf.
1114 	 * The "paddr_macbuf" member in the me_buf structure
1115 	 * holds the mapped physical address and it must be
1116 	 * set to 0 after unmapping.
1117 	 */
1118 	if (buf->paddr_macbuf) {
1119 		qdf_mem_unmap_nbytes_single(pdev->soc->osdev,
1120 					    buf->paddr_macbuf,
1121 					    QDF_DMA_TO_DEVICE,
1122 					    QDF_MAC_ADDR_SIZE);
1123 		buf->paddr_macbuf = 0;
1124 	}
1125 	qdf_spin_lock_bh(&pdev->tx_mutex);
1126 	buf->next = pdev->me_buf.freelist;
1127 	pdev->me_buf.freelist = buf;
1128 	pdev->me_buf.buf_in_use--;
1129 	qdf_spin_unlock_bh(&pdev->tx_mutex);
1130 }
1131 #endif /* DP_TX_DESC_H */
1132