xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_tx_desc.h (revision d0c05845839e5f2ba5a8dcebe0cd3e4cd4e8dfcf)
1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #ifndef DP_TX_DESC_H
21 #define DP_TX_DESC_H
22 
23 #include "dp_types.h"
24 #include "dp_tx.h"
25 #include "dp_internal.h"
26 
27 /**
28  * 21 bits cookie
29  * 2 bits pool id 0 ~ 3,
30  * 10 bits page id 0 ~ 1023
31  * 5 bits offset id 0 ~ 31 (Desc size = 128, Num descs per page = 4096/128 = 32)
32  */
33 /* ???Ring ID needed??? */
34 #define DP_TX_DESC_ID_POOL_MASK    0x018000
35 #define DP_TX_DESC_ID_POOL_OS      15
36 #define DP_TX_DESC_ID_PAGE_MASK    0x007FE0
37 #define DP_TX_DESC_ID_PAGE_OS      5
38 #define DP_TX_DESC_ID_OFFSET_MASK  0x00001F
39 #define DP_TX_DESC_ID_OFFSET_OS    0
40 
41 /**
42  * Compilation assert on tx desc size
43  *
44  * if assert is hit please update POOL_MASK,
45  * PAGE_MASK according to updated size
46  *
47  * for current PAGE mask allowed size range of tx_desc
48  * is between 128 and 256
49  */
50 QDF_COMPILE_TIME_ASSERT(dp_tx_desc_size,
51 			((sizeof(struct dp_tx_desc_s)) <=
52 			 (DP_BLOCKMEM_SIZE >> DP_TX_DESC_ID_PAGE_OS)) &&
53 			((sizeof(struct dp_tx_desc_s)) >
54 			 (DP_BLOCKMEM_SIZE >> (DP_TX_DESC_ID_PAGE_OS + 1)))
55 		       );
56 
57 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
58 #define TX_DESC_LOCK_CREATE(lock)
59 #define TX_DESC_LOCK_DESTROY(lock)
60 #define TX_DESC_LOCK_LOCK(lock)
61 #define TX_DESC_LOCK_UNLOCK(lock)
62 #define IS_TX_DESC_POOL_STATUS_INACTIVE(pool) \
63 	((pool)->status == FLOW_POOL_INACTIVE)
64 #ifdef QCA_AC_BASED_FLOW_CONTROL
65 #define TX_DESC_POOL_MEMBER_CLEAN(_tx_desc_pool)       \
66 	dp_tx_flow_pool_member_clean(_tx_desc_pool)
67 
68 #else /* !QCA_AC_BASED_FLOW_CONTROL */
69 #define TX_DESC_POOL_MEMBER_CLEAN(_tx_desc_pool)       \
70 do {                                                   \
71 	(_tx_desc_pool)->elem_size = 0;                \
72 	(_tx_desc_pool)->freelist = NULL;              \
73 	(_tx_desc_pool)->pool_size = 0;                \
74 	(_tx_desc_pool)->avail_desc = 0;               \
75 	(_tx_desc_pool)->start_th = 0;                 \
76 	(_tx_desc_pool)->stop_th = 0;                  \
77 	(_tx_desc_pool)->status = FLOW_POOL_INACTIVE;  \
78 } while (0)
79 #endif /* QCA_AC_BASED_FLOW_CONTROL */
80 #else /* !QCA_LL_TX_FLOW_CONTROL_V2 */
81 #define TX_DESC_LOCK_CREATE(lock)  qdf_spinlock_create(lock)
82 #define TX_DESC_LOCK_DESTROY(lock) qdf_spinlock_destroy(lock)
83 #define TX_DESC_LOCK_LOCK(lock)    qdf_spin_lock_bh(lock)
84 #define TX_DESC_LOCK_UNLOCK(lock)  qdf_spin_unlock_bh(lock)
85 #define IS_TX_DESC_POOL_STATUS_INACTIVE(pool) (false)
86 #define TX_DESC_POOL_MEMBER_CLEAN(_tx_desc_pool)       \
87 do {                                                   \
88 	(_tx_desc_pool)->elem_size = 0;                \
89 	(_tx_desc_pool)->num_allocated = 0;            \
90 	(_tx_desc_pool)->freelist = NULL;              \
91 	(_tx_desc_pool)->elem_count = 0;               \
92 	(_tx_desc_pool)->num_free = 0;                 \
93 } while (0)
94 #endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */
95 #define MAX_POOL_BUFF_COUNT 10000
96 
97 #ifdef DP_TX_TRACKING
98 static inline void dp_tx_desc_set_magic(struct dp_tx_desc_s *tx_desc,
99 					uint32_t magic_pattern)
100 {
101 	tx_desc->magic = magic_pattern;
102 }
103 #else
104 static inline void dp_tx_desc_set_magic(struct dp_tx_desc_s *tx_desc,
105 					uint32_t magic_pattern)
106 {
107 }
108 #endif
109 
110 QDF_STATUS dp_tx_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
111 				 uint32_t num_elem);
112 QDF_STATUS dp_tx_desc_pool_init(struct dp_soc *soc, uint8_t pool_id,
113 				uint32_t num_elem);
114 void dp_tx_desc_pool_free(struct dp_soc *soc, uint8_t pool_id);
115 void dp_tx_desc_pool_deinit(struct dp_soc *soc, uint8_t pool_id);
116 
117 QDF_STATUS dp_tx_ext_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
118 				     uint32_t num_elem);
119 QDF_STATUS dp_tx_ext_desc_pool_init(struct dp_soc *soc, uint8_t pool_id,
120 				    uint32_t num_elem);
121 void dp_tx_ext_desc_pool_free(struct dp_soc *soc, uint8_t pool_id);
122 void dp_tx_ext_desc_pool_deinit(struct dp_soc *soc, uint8_t pool_id);
123 
124 QDF_STATUS dp_tx_tso_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
125 				     uint32_t num_elem);
126 QDF_STATUS dp_tx_tso_desc_pool_init(struct dp_soc *soc, uint8_t pool_id,
127 				    uint32_t num_elem);
128 void dp_tx_tso_desc_pool_free(struct dp_soc *soc, uint8_t pool_id);
129 void dp_tx_tso_desc_pool_deinit(struct dp_soc *soc, uint8_t pool_id);
130 
131 QDF_STATUS dp_tx_tso_num_seg_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
132 		uint32_t num_elem);
133 QDF_STATUS dp_tx_tso_num_seg_pool_init(struct dp_soc *soc, uint8_t pool_id,
134 				       uint32_t num_elem);
135 void dp_tx_tso_num_seg_pool_free(struct dp_soc *soc, uint8_t pool_id);
136 void dp_tx_tso_num_seg_pool_deinit(struct dp_soc *soc, uint8_t pool_id);
137 
138 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
139 void dp_tx_flow_control_init(struct dp_soc *);
140 void dp_tx_flow_control_deinit(struct dp_soc *);
141 
142 QDF_STATUS dp_txrx_register_pause_cb(struct cdp_soc_t *soc,
143 	tx_pause_callback pause_cb);
144 QDF_STATUS dp_tx_flow_pool_map(struct cdp_soc_t *soc, uint8_t pdev_id,
145 			       uint8_t vdev_id);
146 void dp_tx_flow_pool_unmap(struct cdp_soc_t *handle, uint8_t pdev_id,
147 			   uint8_t vdev_id);
148 void dp_tx_clear_flow_pool_stats(struct dp_soc *soc);
149 struct dp_tx_desc_pool_s *dp_tx_create_flow_pool(struct dp_soc *soc,
150 	uint8_t flow_pool_id, uint32_t flow_pool_size);
151 
152 QDF_STATUS dp_tx_flow_pool_map_handler(struct dp_pdev *pdev, uint8_t flow_id,
153 	uint8_t flow_type, uint8_t flow_pool_id, uint32_t flow_pool_size);
154 void dp_tx_flow_pool_unmap_handler(struct dp_pdev *pdev, uint8_t flow_id,
155 	uint8_t flow_type, uint8_t flow_pool_id);
156 
157 /**
158  * dp_tx_get_desc_flow_pool() - get descriptor from flow pool
159  * @pool: flow pool
160  *
161  * Caller needs to take lock and do sanity checks.
162  *
163  * Return: tx descriptor
164  */
165 static inline
166 struct dp_tx_desc_s *dp_tx_get_desc_flow_pool(struct dp_tx_desc_pool_s *pool)
167 {
168 	struct dp_tx_desc_s *tx_desc = pool->freelist;
169 
170 	pool->freelist = pool->freelist->next;
171 	pool->avail_desc--;
172 	return tx_desc;
173 }
174 
175 /**
176  * ol_tx_put_desc_flow_pool() - put descriptor to flow pool freelist
177  * @pool: flow pool
178  * @tx_desc: tx descriptor
179  *
180  * Caller needs to take lock and do sanity checks.
181  *
182  * Return: none
183  */
184 static inline
185 void dp_tx_put_desc_flow_pool(struct dp_tx_desc_pool_s *pool,
186 			struct dp_tx_desc_s *tx_desc)
187 {
188 	tx_desc->next = pool->freelist;
189 	pool->freelist = tx_desc;
190 	pool->avail_desc++;
191 }
192 
193 #ifdef QCA_AC_BASED_FLOW_CONTROL
194 
195 /**
196  * dp_tx_flow_pool_member_clean() - Clean the members of TX flow pool
197  *
198  * @pool: flow pool
199  *
200  * Return: None
201  */
202 static inline void
203 dp_tx_flow_pool_member_clean(struct dp_tx_desc_pool_s *pool)
204 {
205 	pool->elem_size = 0;
206 	pool->freelist = NULL;
207 	pool->pool_size = 0;
208 	pool->avail_desc = 0;
209 	qdf_mem_zero(pool->start_th, FL_TH_MAX);
210 	qdf_mem_zero(pool->stop_th, FL_TH_MAX);
211 	pool->status = FLOW_POOL_INACTIVE;
212 }
213 
214 /**
215  * dp_tx_is_threshold_reached() - Check if current avail desc meet threshold
216  *
217  * @pool: flow pool
218  * @avail_desc: available descriptor number
219  *
220  * Return: true if threshold is met, false if not
221  */
222 static inline bool
223 dp_tx_is_threshold_reached(struct dp_tx_desc_pool_s *pool, uint16_t avail_desc)
224 {
225 	if (qdf_unlikely(avail_desc == pool->stop_th[DP_TH_BE_BK]))
226 		return true;
227 	else if (qdf_unlikely(avail_desc == pool->stop_th[DP_TH_VI]))
228 		return true;
229 	else if (qdf_unlikely(avail_desc == pool->stop_th[DP_TH_VO]))
230 		return true;
231 	else if (qdf_unlikely(avail_desc == pool->stop_th[DP_TH_HI]))
232 		return true;
233 	else
234 		return false;
235 }
236 
237 /**
238  * dp_tx_adjust_flow_pool_state() - Adjust flow pool state
239  *
240  * @soc: dp soc
241  * @pool: flow pool
242  */
243 static inline void
244 dp_tx_adjust_flow_pool_state(struct dp_soc *soc,
245 			     struct dp_tx_desc_pool_s *pool)
246 {
247 	if (pool->avail_desc > pool->stop_th[DP_TH_BE_BK]) {
248 		pool->status = FLOW_POOL_ACTIVE_UNPAUSED;
249 		return;
250 	} else if (pool->avail_desc <= pool->stop_th[DP_TH_BE_BK] &&
251 		   pool->avail_desc > pool->stop_th[DP_TH_VI]) {
252 		pool->status = FLOW_POOL_BE_BK_PAUSED;
253 	} else if (pool->avail_desc <= pool->stop_th[DP_TH_VI] &&
254 		   pool->avail_desc > pool->stop_th[DP_TH_VO]) {
255 		pool->status = FLOW_POOL_VI_PAUSED;
256 	} else if (pool->avail_desc <= pool->stop_th[DP_TH_VO] &&
257 		   pool->avail_desc > pool->stop_th[DP_TH_HI]) {
258 		pool->status = FLOW_POOL_VO_PAUSED;
259 	} else if (pool->avail_desc <= pool->stop_th[DP_TH_HI]) {
260 		pool->status = FLOW_POOL_ACTIVE_PAUSED;
261 	}
262 
263 	switch (pool->status) {
264 	case FLOW_POOL_ACTIVE_PAUSED:
265 		soc->pause_cb(pool->flow_pool_id,
266 			      WLAN_NETIF_PRIORITY_QUEUE_OFF,
267 			      WLAN_DATA_FLOW_CTRL_PRI);
268 		fallthrough;
269 
270 	case FLOW_POOL_VO_PAUSED:
271 		soc->pause_cb(pool->flow_pool_id,
272 			      WLAN_NETIF_VO_QUEUE_OFF,
273 			      WLAN_DATA_FLOW_CTRL_VO);
274 		fallthrough;
275 
276 	case FLOW_POOL_VI_PAUSED:
277 		soc->pause_cb(pool->flow_pool_id,
278 			      WLAN_NETIF_VI_QUEUE_OFF,
279 			      WLAN_DATA_FLOW_CTRL_VI);
280 		fallthrough;
281 
282 	case FLOW_POOL_BE_BK_PAUSED:
283 		soc->pause_cb(pool->flow_pool_id,
284 			      WLAN_NETIF_BE_BK_QUEUE_OFF,
285 			      WLAN_DATA_FLOW_CTRL_BE_BK);
286 		break;
287 	default:
288 		dp_err("Invalid pool staus:%u to adjust", pool->status);
289 	}
290 }
291 
292 /**
293  * dp_tx_desc_alloc() - Allocate a Software Tx descriptor from given pool
294  *
295  * @soc: Handle to DP SoC structure
296  * @desc_pool_id: ID of the flow control fool
297  *
298  * Return: TX descriptor allocated or NULL
299  */
300 static inline struct dp_tx_desc_s *
301 dp_tx_desc_alloc(struct dp_soc *soc, uint8_t desc_pool_id)
302 {
303 	struct dp_tx_desc_s *tx_desc = NULL;
304 	struct dp_tx_desc_pool_s *pool = &soc->tx_desc[desc_pool_id];
305 	bool is_pause = false;
306 	enum netif_action_type act = WLAN_NETIF_ACTION_TYPE_NONE;
307 	enum dp_fl_ctrl_threshold level = DP_TH_BE_BK;
308 	enum netif_reason_type reason;
309 
310 	if (qdf_likely(pool)) {
311 		qdf_spin_lock_bh(&pool->flow_pool_lock);
312 		if (qdf_likely(pool->avail_desc &&
313 		    pool->status != FLOW_POOL_INVALID &&
314 		    pool->status != FLOW_POOL_INACTIVE)) {
315 			tx_desc = dp_tx_get_desc_flow_pool(pool);
316 			tx_desc->pool_id = desc_pool_id;
317 			tx_desc->flags = DP_TX_DESC_FLAG_ALLOCATED;
318 			dp_tx_desc_set_magic(tx_desc,
319 					     DP_TX_MAGIC_PATTERN_INUSE);
320 			is_pause = dp_tx_is_threshold_reached(pool,
321 							      pool->avail_desc);
322 
323 			if (qdf_unlikely(pool->status ==
324 					 FLOW_POOL_ACTIVE_UNPAUSED_REATTACH)) {
325 				dp_tx_adjust_flow_pool_state(soc, pool);
326 				is_pause = false;
327 			}
328 
329 			if (qdf_unlikely(is_pause)) {
330 				switch (pool->status) {
331 				case FLOW_POOL_ACTIVE_UNPAUSED:
332 					/* pause network BE\BK queue */
333 					act = WLAN_NETIF_BE_BK_QUEUE_OFF;
334 					reason = WLAN_DATA_FLOW_CTRL_BE_BK;
335 					level = DP_TH_BE_BK;
336 					pool->status = FLOW_POOL_BE_BK_PAUSED;
337 					break;
338 				case FLOW_POOL_BE_BK_PAUSED:
339 					/* pause network VI queue */
340 					act = WLAN_NETIF_VI_QUEUE_OFF;
341 					reason = WLAN_DATA_FLOW_CTRL_VI;
342 					level = DP_TH_VI;
343 					pool->status = FLOW_POOL_VI_PAUSED;
344 					break;
345 				case FLOW_POOL_VI_PAUSED:
346 					/* pause network VO queue */
347 					act = WLAN_NETIF_VO_QUEUE_OFF;
348 					reason = WLAN_DATA_FLOW_CTRL_VO;
349 					level = DP_TH_VO;
350 					pool->status = FLOW_POOL_VO_PAUSED;
351 					break;
352 				case FLOW_POOL_VO_PAUSED:
353 					/* pause network HI PRI queue */
354 					act = WLAN_NETIF_PRIORITY_QUEUE_OFF;
355 					reason = WLAN_DATA_FLOW_CTRL_PRI;
356 					level = DP_TH_HI;
357 					pool->status = FLOW_POOL_ACTIVE_PAUSED;
358 					break;
359 				case FLOW_POOL_ACTIVE_PAUSED:
360 					act = WLAN_NETIF_ACTION_TYPE_NONE;
361 					break;
362 				default:
363 					dp_err_rl("pool status is %d!",
364 						  pool->status);
365 					break;
366 				}
367 
368 				if (act != WLAN_NETIF_ACTION_TYPE_NONE) {
369 					pool->latest_pause_time[level] =
370 						qdf_get_system_timestamp();
371 					soc->pause_cb(desc_pool_id,
372 						      act,
373 						      reason);
374 				}
375 			}
376 		} else {
377 			pool->pkt_drop_no_desc++;
378 		}
379 		qdf_spin_unlock_bh(&pool->flow_pool_lock);
380 	} else {
381 		soc->pool_stats.pkt_drop_no_pool++;
382 	}
383 
384 	return tx_desc;
385 }
386 
387 /**
388  * dp_tx_desc_free() - Fee a tx descriptor and attach it to free list
389  *
390  * @soc: Handle to DP SoC structure
391  * @tx_desc: the tx descriptor to be freed
392  * @desc_pool_id: ID of the flow control fool
393  *
394  * Return: None
395  */
396 static inline void
397 dp_tx_desc_free(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
398 		uint8_t desc_pool_id)
399 {
400 	struct dp_tx_desc_pool_s *pool = &soc->tx_desc[desc_pool_id];
401 	qdf_time_t unpause_time = qdf_get_system_timestamp(), pause_dur;
402 	enum netif_action_type act = WLAN_WAKE_ALL_NETIF_QUEUE;
403 	enum netif_reason_type reason;
404 
405 	qdf_spin_lock_bh(&pool->flow_pool_lock);
406 	tx_desc->vdev_id = DP_INVALID_VDEV_ID;
407 	tx_desc->nbuf = NULL;
408 	tx_desc->flags = 0;
409 	dp_tx_desc_set_magic(tx_desc, DP_TX_MAGIC_PATTERN_FREE);
410 	dp_tx_put_desc_flow_pool(pool, tx_desc);
411 	switch (pool->status) {
412 	case FLOW_POOL_ACTIVE_PAUSED:
413 		if (pool->avail_desc > pool->start_th[DP_TH_HI]) {
414 			act = WLAN_NETIF_PRIORITY_QUEUE_ON;
415 			reason = WLAN_DATA_FLOW_CTRL_PRI;
416 			pool->status = FLOW_POOL_VO_PAUSED;
417 
418 			/* Update maxinum pause duration for HI queue */
419 			pause_dur = unpause_time -
420 					pool->latest_pause_time[DP_TH_HI];
421 			if (pool->max_pause_time[DP_TH_HI] < pause_dur)
422 				pool->max_pause_time[DP_TH_HI] = pause_dur;
423 		}
424 		break;
425 	case FLOW_POOL_VO_PAUSED:
426 		if (pool->avail_desc > pool->start_th[DP_TH_VO]) {
427 			act = WLAN_NETIF_VO_QUEUE_ON;
428 			reason = WLAN_DATA_FLOW_CTRL_VO;
429 			pool->status = FLOW_POOL_VI_PAUSED;
430 
431 			/* Update maxinum pause duration for VO queue */
432 			pause_dur = unpause_time -
433 					pool->latest_pause_time[DP_TH_VO];
434 			if (pool->max_pause_time[DP_TH_VO] < pause_dur)
435 				pool->max_pause_time[DP_TH_VO] = pause_dur;
436 		}
437 		break;
438 	case FLOW_POOL_VI_PAUSED:
439 		if (pool->avail_desc > pool->start_th[DP_TH_VI]) {
440 			act = WLAN_NETIF_VI_QUEUE_ON;
441 			reason = WLAN_DATA_FLOW_CTRL_VI;
442 			pool->status = FLOW_POOL_BE_BK_PAUSED;
443 
444 			/* Update maxinum pause duration for VI queue */
445 			pause_dur = unpause_time -
446 					pool->latest_pause_time[DP_TH_VI];
447 			if (pool->max_pause_time[DP_TH_VI] < pause_dur)
448 				pool->max_pause_time[DP_TH_VI] = pause_dur;
449 		}
450 		break;
451 	case FLOW_POOL_BE_BK_PAUSED:
452 		if (pool->avail_desc > pool->start_th[DP_TH_BE_BK]) {
453 			act = WLAN_NETIF_BE_BK_QUEUE_ON;
454 			reason = WLAN_DATA_FLOW_CTRL_BE_BK;
455 			pool->status = FLOW_POOL_ACTIVE_UNPAUSED;
456 
457 			/* Update maxinum pause duration for BE_BK queue */
458 			pause_dur = unpause_time -
459 					pool->latest_pause_time[DP_TH_BE_BK];
460 			if (pool->max_pause_time[DP_TH_BE_BK] < pause_dur)
461 				pool->max_pause_time[DP_TH_BE_BK] = pause_dur;
462 		}
463 		break;
464 	case FLOW_POOL_INVALID:
465 		if (pool->avail_desc == pool->pool_size) {
466 			dp_tx_desc_pool_deinit(soc, desc_pool_id);
467 			dp_tx_desc_pool_free(soc, desc_pool_id);
468 			qdf_spin_unlock_bh(&pool->flow_pool_lock);
469 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
470 				  "%s %d pool is freed!!",
471 				  __func__, __LINE__);
472 			return;
473 		}
474 		break;
475 
476 	case FLOW_POOL_ACTIVE_UNPAUSED:
477 		break;
478 	default:
479 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
480 			  "%s %d pool is INACTIVE State!!",
481 			  __func__, __LINE__);
482 		break;
483 	};
484 
485 	if (act != WLAN_WAKE_ALL_NETIF_QUEUE)
486 		soc->pause_cb(pool->flow_pool_id,
487 			      act, reason);
488 	qdf_spin_unlock_bh(&pool->flow_pool_lock);
489 }
490 #else /* QCA_AC_BASED_FLOW_CONTROL */
491 
492 static inline bool
493 dp_tx_is_threshold_reached(struct dp_tx_desc_pool_s *pool, uint16_t avail_desc)
494 {
495 	if (qdf_unlikely(avail_desc < pool->stop_th))
496 		return true;
497 	else
498 		return false;
499 }
500 
501 /**
502  * dp_tx_desc_alloc() - Allocate a Software Tx Descriptor from given pool
503  *
504  * @soc Handle to DP SoC structure
505  * @pool_id
506  *
507  * Return:
508  */
509 static inline struct dp_tx_desc_s *
510 dp_tx_desc_alloc(struct dp_soc *soc, uint8_t desc_pool_id)
511 {
512 	struct dp_tx_desc_s *tx_desc = NULL;
513 	struct dp_tx_desc_pool_s *pool = &soc->tx_desc[desc_pool_id];
514 
515 	if (pool) {
516 		qdf_spin_lock_bh(&pool->flow_pool_lock);
517 		if (pool->status <= FLOW_POOL_ACTIVE_PAUSED &&
518 		    pool->avail_desc) {
519 			tx_desc = dp_tx_get_desc_flow_pool(pool);
520 			tx_desc->pool_id = desc_pool_id;
521 			tx_desc->flags = DP_TX_DESC_FLAG_ALLOCATED;
522 			dp_tx_desc_set_magic(tx_desc,
523 					     DP_TX_MAGIC_PATTERN_INUSE);
524 			if (qdf_unlikely(pool->avail_desc < pool->stop_th)) {
525 				pool->status = FLOW_POOL_ACTIVE_PAUSED;
526 				qdf_spin_unlock_bh(&pool->flow_pool_lock);
527 				/* pause network queues */
528 				soc->pause_cb(desc_pool_id,
529 					       WLAN_STOP_ALL_NETIF_QUEUE,
530 					       WLAN_DATA_FLOW_CONTROL);
531 			} else {
532 				qdf_spin_unlock_bh(&pool->flow_pool_lock);
533 			}
534 		} else {
535 			pool->pkt_drop_no_desc++;
536 			qdf_spin_unlock_bh(&pool->flow_pool_lock);
537 		}
538 	} else {
539 		soc->pool_stats.pkt_drop_no_pool++;
540 	}
541 
542 	return tx_desc;
543 }
544 
545 /**
546  * dp_tx_desc_free() - Fee a tx descriptor and attach it to free list
547  *
548  * @soc Handle to DP SoC structure
549  * @pool_id
550  * @tx_desc
551  *
552  * Return: None
553  */
554 static inline void
555 dp_tx_desc_free(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
556 		uint8_t desc_pool_id)
557 {
558 	struct dp_tx_desc_pool_s *pool = &soc->tx_desc[desc_pool_id];
559 
560 	qdf_spin_lock_bh(&pool->flow_pool_lock);
561 	tx_desc->vdev_id = DP_INVALID_VDEV_ID;
562 	tx_desc->nbuf = NULL;
563 	tx_desc->flags = 0;
564 	dp_tx_desc_set_magic(tx_desc, DP_TX_MAGIC_PATTERN_FREE);
565 	dp_tx_put_desc_flow_pool(pool, tx_desc);
566 	switch (pool->status) {
567 	case FLOW_POOL_ACTIVE_PAUSED:
568 		if (pool->avail_desc > pool->start_th) {
569 			soc->pause_cb(pool->flow_pool_id,
570 				       WLAN_WAKE_ALL_NETIF_QUEUE,
571 				       WLAN_DATA_FLOW_CONTROL);
572 			pool->status = FLOW_POOL_ACTIVE_UNPAUSED;
573 		}
574 		break;
575 	case FLOW_POOL_INVALID:
576 		if (pool->avail_desc == pool->pool_size) {
577 			dp_tx_desc_pool_deinit(soc, desc_pool_id);
578 			dp_tx_desc_pool_free(soc, desc_pool_id);
579 			qdf_spin_unlock_bh(&pool->flow_pool_lock);
580 			qdf_print("%s %d pool is freed!!",
581 				  __func__, __LINE__);
582 			return;
583 		}
584 		break;
585 
586 	case FLOW_POOL_ACTIVE_UNPAUSED:
587 		break;
588 	default:
589 		qdf_print("%s %d pool is INACTIVE State!!",
590 			  __func__, __LINE__);
591 		break;
592 	};
593 
594 	qdf_spin_unlock_bh(&pool->flow_pool_lock);
595 }
596 
597 #endif /* QCA_AC_BASED_FLOW_CONTROL */
598 
599 static inline bool
600 dp_tx_desc_thresh_reached(struct cdp_soc_t *soc_hdl, uint8_t vdev_id)
601 {
602 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
603 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
604 						     DP_MOD_ID_CDP);
605 	struct dp_tx_desc_pool_s *pool;
606 	bool status;
607 
608 	if (!vdev)
609 		return false;
610 
611 	pool = vdev->pool;
612 	status = dp_tx_is_threshold_reached(pool, pool->avail_desc);
613 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
614 
615 	return status;
616 }
617 #else /* QCA_LL_TX_FLOW_CONTROL_V2 */
618 
619 static inline void dp_tx_flow_control_init(struct dp_soc *handle)
620 {
621 }
622 
623 static inline void dp_tx_flow_control_deinit(struct dp_soc *handle)
624 {
625 }
626 
627 static inline QDF_STATUS dp_tx_flow_pool_map_handler(struct dp_pdev *pdev,
628 	uint8_t flow_id, uint8_t flow_type, uint8_t flow_pool_id,
629 	uint32_t flow_pool_size)
630 {
631 	return QDF_STATUS_SUCCESS;
632 }
633 
634 static inline void dp_tx_flow_pool_unmap_handler(struct dp_pdev *pdev,
635 	uint8_t flow_id, uint8_t flow_type, uint8_t flow_pool_id)
636 {
637 }
638 
639 #ifdef QCA_DP_TX_HW_SW_NBUF_DESC_PREFETCH
640 static inline
641 void dp_tx_prefetch_desc(struct dp_tx_desc_s *tx_desc)
642 {
643 	if (tx_desc)
644 		prefetch(tx_desc);
645 }
646 #else
647 static inline
648 void dp_tx_prefetch_desc(struct dp_tx_desc_s *tx_desc)
649 {
650 }
651 #endif
652 
653 /**
654  * dp_tx_desc_alloc() - Allocate a Software Tx Descriptor from given pool
655  *
656  * @param soc Handle to DP SoC structure
657  * @param pool_id
658  *
659  * Return:
660  */
661 static inline struct dp_tx_desc_s *dp_tx_desc_alloc(struct dp_soc *soc,
662 						uint8_t desc_pool_id)
663 {
664 	struct dp_tx_desc_s *tx_desc = NULL;
665 	struct dp_tx_desc_pool_s *pool = &soc->tx_desc[desc_pool_id];
666 
667 	TX_DESC_LOCK_LOCK(&pool->lock);
668 
669 	tx_desc = pool->freelist;
670 
671 	/* Pool is exhausted */
672 	if (!tx_desc) {
673 		TX_DESC_LOCK_UNLOCK(&pool->lock);
674 		return NULL;
675 	}
676 
677 	pool->freelist = pool->freelist->next;
678 	pool->num_allocated++;
679 	pool->num_free--;
680 	dp_tx_prefetch_desc(pool->freelist);
681 
682 	tx_desc->flags = DP_TX_DESC_FLAG_ALLOCATED;
683 
684 	TX_DESC_LOCK_UNLOCK(&pool->lock);
685 
686 	return tx_desc;
687 }
688 
689 /**
690  * dp_tx_desc_alloc_multiple() - Allocate batch of software Tx Descriptors
691  *                            from given pool
692  * @soc: Handle to DP SoC structure
693  * @pool_id: pool id should pick up
694  * @num_requested: number of required descriptor
695  *
696  * allocate multiple tx descriptor and make a link
697  *
698  * Return: h_desc first descriptor pointer
699  */
700 static inline struct dp_tx_desc_s *dp_tx_desc_alloc_multiple(
701 		struct dp_soc *soc, uint8_t desc_pool_id, uint8_t num_requested)
702 {
703 	struct dp_tx_desc_s *c_desc = NULL, *h_desc = NULL;
704 	uint8_t count;
705 	struct dp_tx_desc_pool_s *pool = &soc->tx_desc[desc_pool_id];
706 
707 	TX_DESC_LOCK_LOCK(&pool->lock);
708 
709 	if ((num_requested == 0) ||
710 			(pool->num_free < num_requested)) {
711 		TX_DESC_LOCK_UNLOCK(&pool->lock);
712 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
713 			"%s, No Free Desc: Available(%d) num_requested(%d)",
714 			__func__, pool->num_free,
715 			num_requested);
716 		return NULL;
717 	}
718 
719 	h_desc = pool->freelist;
720 
721 	/* h_desc should never be NULL since num_free > requested */
722 	qdf_assert_always(h_desc);
723 
724 	c_desc = h_desc;
725 	for (count = 0; count < (num_requested - 1); count++) {
726 		c_desc->flags = DP_TX_DESC_FLAG_ALLOCATED;
727 		c_desc = c_desc->next;
728 	}
729 	pool->num_free -= count;
730 	pool->num_allocated += count;
731 	pool->freelist = c_desc->next;
732 	c_desc->next = NULL;
733 
734 	TX_DESC_LOCK_UNLOCK(&pool->lock);
735 	return h_desc;
736 }
737 
738 /**
739  * dp_tx_desc_free() - Fee a tx descriptor and attach it to free list
740  *
741  * @soc Handle to DP SoC structure
742  * @pool_id
743  * @tx_desc
744  */
745 static inline void
746 dp_tx_desc_free(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
747 		uint8_t desc_pool_id)
748 {
749 	struct dp_tx_desc_pool_s *pool = NULL;
750 	tx_desc->vdev_id = DP_INVALID_VDEV_ID;
751 	tx_desc->nbuf = NULL;
752 	tx_desc->flags = 0;
753 
754 	pool = &soc->tx_desc[desc_pool_id];
755 	TX_DESC_LOCK_LOCK(&pool->lock);
756 	tx_desc->next = pool->freelist;
757 	pool->freelist = tx_desc;
758 	pool->num_allocated--;
759 	pool->num_free++;
760 	TX_DESC_LOCK_UNLOCK(&pool->lock);
761 }
762 
763 #endif /* QCA_LL_TX_FLOW_CONTROL_V2 */
764 
765 #ifdef QCA_DP_TX_DESC_ID_CHECK
766 /**
767  * dp_tx_is_desc_id_valid() - check is the tx desc id valid
768  *
769  * @soc Handle to DP SoC structure
770  * @tx_desc_id
771  *
772  * Return: true or false
773  */
774 static inline bool
775 dp_tx_is_desc_id_valid(struct dp_soc *soc, uint32_t tx_desc_id)
776 {
777 	uint8_t pool_id;
778 	uint16_t page_id, offset;
779 	struct dp_tx_desc_pool_s *pool;
780 
781 	pool_id = (tx_desc_id & DP_TX_DESC_ID_POOL_MASK) >>
782 			DP_TX_DESC_ID_POOL_OS;
783 	/* Pool ID is out of limit */
784 	if (pool_id > wlan_cfg_get_num_tx_desc_pool(
785 				soc->wlan_cfg_ctx)) {
786 		QDF_TRACE(QDF_MODULE_ID_DP,
787 			  QDF_TRACE_LEVEL_FATAL,
788 			  "%s:Tx Comp pool id %d not valid",
789 			  __func__,
790 			  pool_id);
791 		goto warn_exit;
792 	}
793 
794 	pool = &soc->tx_desc[pool_id];
795 	/* the pool is freed */
796 	if (IS_TX_DESC_POOL_STATUS_INACTIVE(pool)) {
797 		QDF_TRACE(QDF_MODULE_ID_DP,
798 			  QDF_TRACE_LEVEL_FATAL,
799 			  "%s:the pool %d has been freed",
800 			  __func__,
801 			  pool_id);
802 		goto warn_exit;
803 	}
804 
805 	page_id = (tx_desc_id & DP_TX_DESC_ID_PAGE_MASK) >>
806 				DP_TX_DESC_ID_PAGE_OS;
807 	/* the page id is out of limit */
808 	if (page_id >= pool->desc_pages.num_pages) {
809 		QDF_TRACE(QDF_MODULE_ID_DP,
810 			  QDF_TRACE_LEVEL_FATAL,
811 			  "%s:the page id %d invalid, pool id %d, num_page %d",
812 			  __func__,
813 			  page_id,
814 			  pool_id,
815 			  pool->desc_pages.num_pages);
816 		goto warn_exit;
817 	}
818 
819 	offset = (tx_desc_id & DP_TX_DESC_ID_OFFSET_MASK) >>
820 				DP_TX_DESC_ID_OFFSET_OS;
821 	/* the offset is out of limit */
822 	if (offset >= pool->desc_pages.num_element_per_page) {
823 		QDF_TRACE(QDF_MODULE_ID_DP,
824 			  QDF_TRACE_LEVEL_FATAL,
825 			  "%s:offset %d invalid, pool%d,num_elem_per_page %d",
826 			  __func__,
827 			  offset,
828 			  pool_id,
829 			  pool->desc_pages.num_element_per_page);
830 		goto warn_exit;
831 	}
832 
833 	return true;
834 
835 warn_exit:
836 	QDF_TRACE(QDF_MODULE_ID_DP,
837 		  QDF_TRACE_LEVEL_FATAL,
838 		  "%s:Tx desc id 0x%x not valid",
839 		  __func__,
840 		  tx_desc_id);
841 	qdf_assert_always(0);
842 	return false;
843 }
844 
845 #else
846 static inline bool
847 dp_tx_is_desc_id_valid(struct dp_soc *soc, uint32_t tx_desc_id)
848 {
849 	return true;
850 }
851 #endif /* QCA_DP_TX_DESC_ID_CHECK */
852 
853 #ifdef QCA_DP_TX_DESC_FAST_COMP_ENABLE
854 static inline void dp_tx_desc_update_fast_comp_flag(struct dp_soc *soc,
855 						    struct dp_tx_desc_s *desc,
856 						    uint8_t allow_fast_comp)
857 {
858 	if (qdf_likely(!(desc->flags & DP_TX_DESC_FLAG_TO_FW)) &&
859 	    qdf_likely(allow_fast_comp)) {
860 		desc->flags |= DP_TX_DESC_FLAG_SIMPLE;
861 	}
862 }
863 #else
864 static inline void dp_tx_desc_update_fast_comp_flag(struct dp_soc *soc,
865 						    struct dp_tx_desc_s *desc,
866 						    uint8_t allow_fast_comp)
867 {
868 }
869 #endif /* QCA_DP_TX_DESC_FAST_COMP_ENABLE */
870 
871 /**
872  * dp_tx_desc_find() - find dp tx descriptor from cokie
873  * @soc - handle for the device sending the data
874  * @tx_desc_id - the ID of the descriptor in question
875  * @return the descriptor object that has the specified ID
876  *
877  *  Use a tx descriptor ID to find the corresponding descriptor object.
878  *
879  */
880 static inline struct dp_tx_desc_s *dp_tx_desc_find(struct dp_soc *soc,
881 		uint8_t pool_id, uint16_t page_id, uint16_t offset)
882 {
883 	struct dp_tx_desc_pool_s *tx_desc_pool = &soc->tx_desc[pool_id];
884 
885 	return tx_desc_pool->desc_pages.cacheable_pages[page_id] +
886 		tx_desc_pool->elem_size * offset;
887 }
888 
889 /**
890  * dp_tx_ext_desc_alloc() - Get tx extension descriptor from pool
891  * @soc: handle for the device sending the data
892  * @pool_id: target pool id
893  *
894  * Return: None
895  */
896 static inline
897 struct dp_tx_ext_desc_elem_s *dp_tx_ext_desc_alloc(struct dp_soc *soc,
898 		uint8_t desc_pool_id)
899 {
900 	struct dp_tx_ext_desc_elem_s *c_elem;
901 
902 	qdf_spin_lock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
903 	if (soc->tx_ext_desc[desc_pool_id].num_free <= 0) {
904 		qdf_spin_unlock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
905 		return NULL;
906 	}
907 	c_elem = soc->tx_ext_desc[desc_pool_id].freelist;
908 	soc->tx_ext_desc[desc_pool_id].freelist =
909 		soc->tx_ext_desc[desc_pool_id].freelist->next;
910 	soc->tx_ext_desc[desc_pool_id].num_free--;
911 	qdf_spin_unlock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
912 	return c_elem;
913 }
914 
915 /**
916  * dp_tx_ext_desc_free() - Release tx extension descriptor to the pool
917  * @soc: handle for the device sending the data
918  * @pool_id: target pool id
919  * @elem: ext descriptor pointer should release
920  *
921  * Return: None
922  */
923 static inline void dp_tx_ext_desc_free(struct dp_soc *soc,
924 	struct dp_tx_ext_desc_elem_s *elem, uint8_t desc_pool_id)
925 {
926 	qdf_spin_lock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
927 	elem->next = soc->tx_ext_desc[desc_pool_id].freelist;
928 	soc->tx_ext_desc[desc_pool_id].freelist = elem;
929 	soc->tx_ext_desc[desc_pool_id].num_free++;
930 	qdf_spin_unlock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
931 	return;
932 }
933 
934 /**
935  * dp_tx_ext_desc_free_multiple() - Fee multiple tx extension descriptor and
936  *                           attach it to free list
937  * @soc: Handle to DP SoC structure
938  * @desc_pool_id: pool id should pick up
939  * @elem: tx descriptor should be freed
940  * @num_free: number of descriptors should be freed
941  *
942  * Return: none
943  */
944 static inline void dp_tx_ext_desc_free_multiple(struct dp_soc *soc,
945 		struct dp_tx_ext_desc_elem_s *elem, uint8_t desc_pool_id,
946 		uint8_t num_free)
947 {
948 	struct dp_tx_ext_desc_elem_s *head, *tail, *c_elem;
949 	uint8_t freed = num_free;
950 
951 	/* caller should always guarantee atleast list of num_free nodes */
952 	qdf_assert_always(elem);
953 
954 	head = elem;
955 	c_elem = head;
956 	tail = head;
957 	while (c_elem && freed) {
958 		tail = c_elem;
959 		c_elem = c_elem->next;
960 		freed--;
961 	}
962 
963 	/* caller should always guarantee atleast list of num_free nodes */
964 	qdf_assert_always(tail);
965 
966 	qdf_spin_lock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
967 	tail->next = soc->tx_ext_desc[desc_pool_id].freelist;
968 	soc->tx_ext_desc[desc_pool_id].freelist = head;
969 	soc->tx_ext_desc[desc_pool_id].num_free += num_free;
970 	qdf_spin_unlock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
971 
972 	return;
973 }
974 
975 #if defined(FEATURE_TSO)
976 /**
977  * dp_tx_tso_desc_alloc() - function to allocate a TSO segment
978  * @soc: device soc instance
979  * @pool_id: pool id should pick up tso descriptor
980  *
981  * Allocates a TSO segment element from the free list held in
982  * the soc
983  *
984  * Return: tso_seg, tso segment memory pointer
985  */
986 static inline struct qdf_tso_seg_elem_t *dp_tx_tso_desc_alloc(
987 		struct dp_soc *soc, uint8_t pool_id)
988 {
989 	struct qdf_tso_seg_elem_t *tso_seg = NULL;
990 
991 	qdf_spin_lock_bh(&soc->tx_tso_desc[pool_id].lock);
992 	if (soc->tx_tso_desc[pool_id].freelist) {
993 		soc->tx_tso_desc[pool_id].num_free--;
994 		tso_seg = soc->tx_tso_desc[pool_id].freelist;
995 		soc->tx_tso_desc[pool_id].freelist =
996 			soc->tx_tso_desc[pool_id].freelist->next;
997 	}
998 	qdf_spin_unlock_bh(&soc->tx_tso_desc[pool_id].lock);
999 
1000 	return tso_seg;
1001 }
1002 
1003 /**
1004  * dp_tx_tso_desc_free() - function to free a TSO segment
1005  * @soc: device soc instance
1006  * @pool_id: pool id should pick up tso descriptor
1007  * @tso_seg: tso segment memory pointer
1008  *
1009  * Returns a TSO segment element to the free list held in the
1010  * HTT pdev
1011  *
1012  * Return: none
1013  */
1014 static inline void dp_tx_tso_desc_free(struct dp_soc *soc,
1015 		uint8_t pool_id, struct qdf_tso_seg_elem_t *tso_seg)
1016 {
1017 	qdf_spin_lock_bh(&soc->tx_tso_desc[pool_id].lock);
1018 	tso_seg->next = soc->tx_tso_desc[pool_id].freelist;
1019 	soc->tx_tso_desc[pool_id].freelist = tso_seg;
1020 	soc->tx_tso_desc[pool_id].num_free++;
1021 	qdf_spin_unlock_bh(&soc->tx_tso_desc[pool_id].lock);
1022 }
1023 
1024 static inline
1025 struct qdf_tso_num_seg_elem_t  *dp_tso_num_seg_alloc(struct dp_soc *soc,
1026 		uint8_t pool_id)
1027 {
1028 	struct qdf_tso_num_seg_elem_t *tso_num_seg = NULL;
1029 
1030 	qdf_spin_lock_bh(&soc->tx_tso_num_seg[pool_id].lock);
1031 	if (soc->tx_tso_num_seg[pool_id].freelist) {
1032 		soc->tx_tso_num_seg[pool_id].num_free--;
1033 		tso_num_seg = soc->tx_tso_num_seg[pool_id].freelist;
1034 		soc->tx_tso_num_seg[pool_id].freelist =
1035 			soc->tx_tso_num_seg[pool_id].freelist->next;
1036 	}
1037 	qdf_spin_unlock_bh(&soc->tx_tso_num_seg[pool_id].lock);
1038 
1039 	return tso_num_seg;
1040 }
1041 
1042 static inline
1043 void dp_tso_num_seg_free(struct dp_soc *soc,
1044 		uint8_t pool_id, struct qdf_tso_num_seg_elem_t *tso_num_seg)
1045 {
1046 	qdf_spin_lock_bh(&soc->tx_tso_num_seg[pool_id].lock);
1047 	tso_num_seg->next = soc->tx_tso_num_seg[pool_id].freelist;
1048 	soc->tx_tso_num_seg[pool_id].freelist = tso_num_seg;
1049 	soc->tx_tso_num_seg[pool_id].num_free++;
1050 	qdf_spin_unlock_bh(&soc->tx_tso_num_seg[pool_id].lock);
1051 }
1052 #endif
1053 
1054 /*
1055  * dp_tx_me_alloc_buf() Alloc descriptor from me pool
1056  * @pdev DP_PDEV handle for datapath
1057  *
1058  * Return:dp_tx_me_buf_t(buf)
1059  */
1060 static inline struct dp_tx_me_buf_t*
1061 dp_tx_me_alloc_buf(struct dp_pdev *pdev)
1062 {
1063 	struct dp_tx_me_buf_t *buf = NULL;
1064 	qdf_spin_lock_bh(&pdev->tx_mutex);
1065 	if (pdev->me_buf.freelist) {
1066 		buf = pdev->me_buf.freelist;
1067 		pdev->me_buf.freelist = pdev->me_buf.freelist->next;
1068 		pdev->me_buf.buf_in_use++;
1069 	} else {
1070 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1071 				"Error allocating memory in pool");
1072 		qdf_spin_unlock_bh(&pdev->tx_mutex);
1073 		return NULL;
1074 	}
1075 	qdf_spin_unlock_bh(&pdev->tx_mutex);
1076 	return buf;
1077 }
1078 
1079 /*
1080  * dp_tx_me_free_buf() - Unmap the buffer holding the dest
1081  * address, free me descriptor and add it to the free-pool
1082  * @pdev: DP_PDEV handle for datapath
1083  * @buf : Allocated ME BUF
1084  *
1085  * Return:void
1086  */
1087 static inline void
1088 dp_tx_me_free_buf(struct dp_pdev *pdev, struct dp_tx_me_buf_t *buf)
1089 {
1090 	/*
1091 	 * If the buf containing mac address was mapped,
1092 	 * it must be unmapped before freeing the me_buf.
1093 	 * The "paddr_macbuf" member in the me_buf structure
1094 	 * holds the mapped physical address and it must be
1095 	 * set to 0 after unmapping.
1096 	 */
1097 	if (buf->paddr_macbuf) {
1098 		qdf_mem_unmap_nbytes_single(pdev->soc->osdev,
1099 					    buf->paddr_macbuf,
1100 					    QDF_DMA_TO_DEVICE,
1101 					    QDF_MAC_ADDR_SIZE);
1102 		buf->paddr_macbuf = 0;
1103 	}
1104 	qdf_spin_lock_bh(&pdev->tx_mutex);
1105 	buf->next = pdev->me_buf.freelist;
1106 	pdev->me_buf.freelist = buf;
1107 	pdev->me_buf.buf_in_use--;
1108 	qdf_spin_unlock_bh(&pdev->tx_mutex);
1109 }
1110 #endif /* DP_TX_DESC_H */
1111