xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_tx_desc.h (revision 8b3dca18206e1a0461492f082fa6e270b092c035)
1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #ifndef DP_TX_DESC_H
21 #define DP_TX_DESC_H
22 
23 #include "dp_types.h"
24 #include "dp_tx.h"
25 #include "dp_internal.h"
26 
27 /**
28  * 21 bits cookie
29  * 2 bits pool id 0 ~ 3,
30  * 10 bits page id 0 ~ 1023
31  * 5 bits offset id 0 ~ 31 (Desc size = 128, Num descs per page = 4096/128 = 32)
32  */
33 /* ???Ring ID needed??? */
34 #define DP_TX_DESC_ID_POOL_MASK    0x018000
35 #define DP_TX_DESC_ID_POOL_OS      15
36 #define DP_TX_DESC_ID_PAGE_MASK    0x007FE0
37 #define DP_TX_DESC_ID_PAGE_OS      5
38 #define DP_TX_DESC_ID_OFFSET_MASK  0x00001F
39 #define DP_TX_DESC_ID_OFFSET_OS    0
40 
41 /**
42  * Compilation assert on tx desc size
43  *
44  * if assert is hit please update POOL_MASK,
45  * PAGE_MASK according to updated size
46  *
47  * for current PAGE mask allowed size range of tx_desc
48  * is between 128 and 256
49  */
50 QDF_COMPILE_TIME_ASSERT(dp_tx_desc_size,
51 			((sizeof(struct dp_tx_desc_s)) <=
52 			 (DP_BLOCKMEM_SIZE >> DP_TX_DESC_ID_PAGE_OS)) &&
53 			((sizeof(struct dp_tx_desc_s)) >
54 			 (DP_BLOCKMEM_SIZE >> (DP_TX_DESC_ID_PAGE_OS + 1)))
55 		       );
56 
57 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
58 #define TX_DESC_LOCK_CREATE(lock)
59 #define TX_DESC_LOCK_DESTROY(lock)
60 #define TX_DESC_LOCK_LOCK(lock)
61 #define TX_DESC_LOCK_UNLOCK(lock)
62 #define IS_TX_DESC_POOL_STATUS_INACTIVE(pool) \
63 	((pool)->status == FLOW_POOL_INACTIVE)
64 #ifdef QCA_AC_BASED_FLOW_CONTROL
65 #define TX_DESC_POOL_MEMBER_CLEAN(_tx_desc_pool)       \
66 	dp_tx_flow_pool_member_clean(_tx_desc_pool)
67 
68 #else /* !QCA_AC_BASED_FLOW_CONTROL */
69 #define TX_DESC_POOL_MEMBER_CLEAN(_tx_desc_pool)       \
70 do {                                                   \
71 	(_tx_desc_pool)->elem_size = 0;                \
72 	(_tx_desc_pool)->freelist = NULL;              \
73 	(_tx_desc_pool)->pool_size = 0;                \
74 	(_tx_desc_pool)->avail_desc = 0;               \
75 	(_tx_desc_pool)->start_th = 0;                 \
76 	(_tx_desc_pool)->stop_th = 0;                  \
77 	(_tx_desc_pool)->status = FLOW_POOL_INACTIVE;  \
78 } while (0)
79 #endif /* QCA_AC_BASED_FLOW_CONTROL */
80 #else /* !QCA_LL_TX_FLOW_CONTROL_V2 */
81 #define TX_DESC_LOCK_CREATE(lock)  qdf_spinlock_create(lock)
82 #define TX_DESC_LOCK_DESTROY(lock) qdf_spinlock_destroy(lock)
83 #define TX_DESC_LOCK_LOCK(lock)    qdf_spin_lock_bh(lock)
84 #define TX_DESC_LOCK_UNLOCK(lock)  qdf_spin_unlock_bh(lock)
85 #define IS_TX_DESC_POOL_STATUS_INACTIVE(pool) (false)
86 #define TX_DESC_POOL_MEMBER_CLEAN(_tx_desc_pool)       \
87 do {                                                   \
88 	(_tx_desc_pool)->elem_size = 0;                \
89 	(_tx_desc_pool)->num_allocated = 0;            \
90 	(_tx_desc_pool)->freelist = NULL;              \
91 	(_tx_desc_pool)->elem_count = 0;               \
92 	(_tx_desc_pool)->num_free = 0;                 \
93 } while (0)
94 #endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */
95 #define MAX_POOL_BUFF_COUNT 10000
96 
97 #ifdef DP_TX_TRACKING
98 static inline void dp_tx_desc_set_magic(struct dp_tx_desc_s *tx_desc,
99 					uint32_t magic_pattern)
100 {
101 	tx_desc->magic = magic_pattern;
102 }
103 #else
104 static inline void dp_tx_desc_set_magic(struct dp_tx_desc_s *tx_desc,
105 					uint32_t magic_pattern)
106 {
107 }
108 #endif
109 
110 QDF_STATUS dp_tx_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
111 				 uint32_t num_elem);
112 QDF_STATUS dp_tx_desc_pool_init(struct dp_soc *soc, uint8_t pool_id,
113 				uint32_t num_elem);
114 void dp_tx_desc_pool_free(struct dp_soc *soc, uint8_t pool_id);
115 void dp_tx_desc_pool_deinit(struct dp_soc *soc, uint8_t pool_id);
116 
117 QDF_STATUS dp_tx_ext_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
118 				     uint32_t num_elem);
119 QDF_STATUS dp_tx_ext_desc_pool_init(struct dp_soc *soc, uint8_t pool_id,
120 				    uint32_t num_elem);
121 void dp_tx_ext_desc_pool_free(struct dp_soc *soc, uint8_t pool_id);
122 void dp_tx_ext_desc_pool_deinit(struct dp_soc *soc, uint8_t pool_id);
123 
124 QDF_STATUS dp_tx_tso_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
125 				     uint32_t num_elem);
126 QDF_STATUS dp_tx_tso_desc_pool_init(struct dp_soc *soc, uint8_t pool_id,
127 				    uint32_t num_elem);
128 void dp_tx_tso_desc_pool_free(struct dp_soc *soc, uint8_t pool_id);
129 void dp_tx_tso_desc_pool_deinit(struct dp_soc *soc, uint8_t pool_id);
130 
131 QDF_STATUS dp_tx_tso_num_seg_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
132 		uint32_t num_elem);
133 QDF_STATUS dp_tx_tso_num_seg_pool_init(struct dp_soc *soc, uint8_t pool_id,
134 				       uint32_t num_elem);
135 void dp_tx_tso_num_seg_pool_free(struct dp_soc *soc, uint8_t pool_id);
136 void dp_tx_tso_num_seg_pool_deinit(struct dp_soc *soc, uint8_t pool_id);
137 
138 #ifdef DP_UMAC_HW_RESET_SUPPORT
139 void dp_tx_desc_pool_cleanup(struct dp_soc *soc, qdf_nbuf_t *nbuf_list);
140 #endif
141 
142 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
143 void dp_tx_flow_control_init(struct dp_soc *);
144 void dp_tx_flow_control_deinit(struct dp_soc *);
145 
146 QDF_STATUS dp_txrx_register_pause_cb(struct cdp_soc_t *soc,
147 	tx_pause_callback pause_cb);
148 QDF_STATUS dp_tx_flow_pool_map(struct cdp_soc_t *soc, uint8_t pdev_id,
149 			       uint8_t vdev_id);
150 void dp_tx_flow_pool_unmap(struct cdp_soc_t *handle, uint8_t pdev_id,
151 			   uint8_t vdev_id);
152 void dp_tx_clear_flow_pool_stats(struct dp_soc *soc);
153 struct dp_tx_desc_pool_s *dp_tx_create_flow_pool(struct dp_soc *soc,
154 	uint8_t flow_pool_id, uint32_t flow_pool_size);
155 
156 QDF_STATUS dp_tx_flow_pool_map_handler(struct dp_pdev *pdev, uint8_t flow_id,
157 	uint8_t flow_type, uint8_t flow_pool_id, uint32_t flow_pool_size);
158 void dp_tx_flow_pool_unmap_handler(struct dp_pdev *pdev, uint8_t flow_id,
159 	uint8_t flow_type, uint8_t flow_pool_id);
160 
161 /**
162  * dp_tx_get_desc_flow_pool() - get descriptor from flow pool
163  * @pool: flow pool
164  *
165  * Caller needs to take lock and do sanity checks.
166  *
167  * Return: tx descriptor
168  */
169 static inline
170 struct dp_tx_desc_s *dp_tx_get_desc_flow_pool(struct dp_tx_desc_pool_s *pool)
171 {
172 	struct dp_tx_desc_s *tx_desc = pool->freelist;
173 
174 	pool->freelist = pool->freelist->next;
175 	pool->avail_desc--;
176 	return tx_desc;
177 }
178 
179 /**
180  * ol_tx_put_desc_flow_pool() - put descriptor to flow pool freelist
181  * @pool: flow pool
182  * @tx_desc: tx descriptor
183  *
184  * Caller needs to take lock and do sanity checks.
185  *
186  * Return: none
187  */
188 static inline
189 void dp_tx_put_desc_flow_pool(struct dp_tx_desc_pool_s *pool,
190 			struct dp_tx_desc_s *tx_desc)
191 {
192 	tx_desc->next = pool->freelist;
193 	pool->freelist = tx_desc;
194 	pool->avail_desc++;
195 }
196 
197 #ifdef QCA_AC_BASED_FLOW_CONTROL
198 
199 /**
200  * dp_tx_flow_pool_member_clean() - Clean the members of TX flow pool
201  *
202  * @pool: flow pool
203  *
204  * Return: None
205  */
206 static inline void
207 dp_tx_flow_pool_member_clean(struct dp_tx_desc_pool_s *pool)
208 {
209 	pool->elem_size = 0;
210 	pool->freelist = NULL;
211 	pool->pool_size = 0;
212 	pool->avail_desc = 0;
213 	qdf_mem_zero(pool->start_th, FL_TH_MAX);
214 	qdf_mem_zero(pool->stop_th, FL_TH_MAX);
215 	pool->status = FLOW_POOL_INACTIVE;
216 }
217 
218 /**
219  * dp_tx_is_threshold_reached() - Check if current avail desc meet threshold
220  *
221  * @pool: flow pool
222  * @avail_desc: available descriptor number
223  *
224  * Return: true if threshold is met, false if not
225  */
226 static inline bool
227 dp_tx_is_threshold_reached(struct dp_tx_desc_pool_s *pool, uint16_t avail_desc)
228 {
229 	if (qdf_unlikely(avail_desc == pool->stop_th[DP_TH_BE_BK]))
230 		return true;
231 	else if (qdf_unlikely(avail_desc == pool->stop_th[DP_TH_VI]))
232 		return true;
233 	else if (qdf_unlikely(avail_desc == pool->stop_th[DP_TH_VO]))
234 		return true;
235 	else if (qdf_unlikely(avail_desc == pool->stop_th[DP_TH_HI]))
236 		return true;
237 	else
238 		return false;
239 }
240 
241 /**
242  * dp_tx_adjust_flow_pool_state() - Adjust flow pool state
243  *
244  * @soc: dp soc
245  * @pool: flow pool
246  */
247 static inline void
248 dp_tx_adjust_flow_pool_state(struct dp_soc *soc,
249 			     struct dp_tx_desc_pool_s *pool)
250 {
251 	if (pool->avail_desc > pool->stop_th[DP_TH_BE_BK]) {
252 		pool->status = FLOW_POOL_ACTIVE_UNPAUSED;
253 		return;
254 	} else if (pool->avail_desc <= pool->stop_th[DP_TH_BE_BK] &&
255 		   pool->avail_desc > pool->stop_th[DP_TH_VI]) {
256 		pool->status = FLOW_POOL_BE_BK_PAUSED;
257 	} else if (pool->avail_desc <= pool->stop_th[DP_TH_VI] &&
258 		   pool->avail_desc > pool->stop_th[DP_TH_VO]) {
259 		pool->status = FLOW_POOL_VI_PAUSED;
260 	} else if (pool->avail_desc <= pool->stop_th[DP_TH_VO] &&
261 		   pool->avail_desc > pool->stop_th[DP_TH_HI]) {
262 		pool->status = FLOW_POOL_VO_PAUSED;
263 	} else if (pool->avail_desc <= pool->stop_th[DP_TH_HI]) {
264 		pool->status = FLOW_POOL_ACTIVE_PAUSED;
265 	}
266 
267 	switch (pool->status) {
268 	case FLOW_POOL_ACTIVE_PAUSED:
269 		soc->pause_cb(pool->flow_pool_id,
270 			      WLAN_NETIF_PRIORITY_QUEUE_OFF,
271 			      WLAN_DATA_FLOW_CTRL_PRI);
272 		fallthrough;
273 
274 	case FLOW_POOL_VO_PAUSED:
275 		soc->pause_cb(pool->flow_pool_id,
276 			      WLAN_NETIF_VO_QUEUE_OFF,
277 			      WLAN_DATA_FLOW_CTRL_VO);
278 		fallthrough;
279 
280 	case FLOW_POOL_VI_PAUSED:
281 		soc->pause_cb(pool->flow_pool_id,
282 			      WLAN_NETIF_VI_QUEUE_OFF,
283 			      WLAN_DATA_FLOW_CTRL_VI);
284 		fallthrough;
285 
286 	case FLOW_POOL_BE_BK_PAUSED:
287 		soc->pause_cb(pool->flow_pool_id,
288 			      WLAN_NETIF_BE_BK_QUEUE_OFF,
289 			      WLAN_DATA_FLOW_CTRL_BE_BK);
290 		break;
291 	default:
292 		dp_err("Invalid pool staus:%u to adjust", pool->status);
293 	}
294 }
295 
296 /**
297  * dp_tx_desc_alloc() - Allocate a Software Tx descriptor from given pool
298  *
299  * @soc: Handle to DP SoC structure
300  * @desc_pool_id: ID of the flow control fool
301  *
302  * Return: TX descriptor allocated or NULL
303  */
304 static inline struct dp_tx_desc_s *
305 dp_tx_desc_alloc(struct dp_soc *soc, uint8_t desc_pool_id)
306 {
307 	struct dp_tx_desc_s *tx_desc = NULL;
308 	struct dp_tx_desc_pool_s *pool = &soc->tx_desc[desc_pool_id];
309 	bool is_pause = false;
310 	enum netif_action_type act = WLAN_NETIF_ACTION_TYPE_NONE;
311 	enum dp_fl_ctrl_threshold level = DP_TH_BE_BK;
312 	enum netif_reason_type reason;
313 
314 	if (qdf_likely(pool)) {
315 		qdf_spin_lock_bh(&pool->flow_pool_lock);
316 		if (qdf_likely(pool->avail_desc &&
317 		    pool->status != FLOW_POOL_INVALID &&
318 		    pool->status != FLOW_POOL_INACTIVE)) {
319 			tx_desc = dp_tx_get_desc_flow_pool(pool);
320 			tx_desc->pool_id = desc_pool_id;
321 			tx_desc->flags = DP_TX_DESC_FLAG_ALLOCATED;
322 			dp_tx_desc_set_magic(tx_desc,
323 					     DP_TX_MAGIC_PATTERN_INUSE);
324 			is_pause = dp_tx_is_threshold_reached(pool,
325 							      pool->avail_desc);
326 
327 			if (qdf_unlikely(pool->status ==
328 					 FLOW_POOL_ACTIVE_UNPAUSED_REATTACH)) {
329 				dp_tx_adjust_flow_pool_state(soc, pool);
330 				is_pause = false;
331 			}
332 
333 			if (qdf_unlikely(is_pause)) {
334 				switch (pool->status) {
335 				case FLOW_POOL_ACTIVE_UNPAUSED:
336 					/* pause network BE\BK queue */
337 					act = WLAN_NETIF_BE_BK_QUEUE_OFF;
338 					reason = WLAN_DATA_FLOW_CTRL_BE_BK;
339 					level = DP_TH_BE_BK;
340 					pool->status = FLOW_POOL_BE_BK_PAUSED;
341 					break;
342 				case FLOW_POOL_BE_BK_PAUSED:
343 					/* pause network VI queue */
344 					act = WLAN_NETIF_VI_QUEUE_OFF;
345 					reason = WLAN_DATA_FLOW_CTRL_VI;
346 					level = DP_TH_VI;
347 					pool->status = FLOW_POOL_VI_PAUSED;
348 					break;
349 				case FLOW_POOL_VI_PAUSED:
350 					/* pause network VO queue */
351 					act = WLAN_NETIF_VO_QUEUE_OFF;
352 					reason = WLAN_DATA_FLOW_CTRL_VO;
353 					level = DP_TH_VO;
354 					pool->status = FLOW_POOL_VO_PAUSED;
355 					break;
356 				case FLOW_POOL_VO_PAUSED:
357 					/* pause network HI PRI queue */
358 					act = WLAN_NETIF_PRIORITY_QUEUE_OFF;
359 					reason = WLAN_DATA_FLOW_CTRL_PRI;
360 					level = DP_TH_HI;
361 					pool->status = FLOW_POOL_ACTIVE_PAUSED;
362 					break;
363 				case FLOW_POOL_ACTIVE_PAUSED:
364 					act = WLAN_NETIF_ACTION_TYPE_NONE;
365 					break;
366 				default:
367 					dp_err_rl("pool status is %d!",
368 						  pool->status);
369 					break;
370 				}
371 
372 				if (act != WLAN_NETIF_ACTION_TYPE_NONE) {
373 					pool->latest_pause_time[level] =
374 						qdf_get_system_timestamp();
375 					soc->pause_cb(desc_pool_id,
376 						      act,
377 						      reason);
378 				}
379 			}
380 		} else {
381 			pool->pkt_drop_no_desc++;
382 		}
383 		qdf_spin_unlock_bh(&pool->flow_pool_lock);
384 	} else {
385 		dp_err_rl("NULL desc pool pool_id %d", desc_pool_id);
386 		soc->pool_stats.pkt_drop_no_pool++;
387 	}
388 
389 	return tx_desc;
390 }
391 
392 /**
393  * dp_tx_desc_free() - Fee a tx descriptor and attach it to free list
394  *
395  * @soc: Handle to DP SoC structure
396  * @tx_desc: the tx descriptor to be freed
397  * @desc_pool_id: ID of the flow control fool
398  *
399  * Return: None
400  */
401 static inline void
402 dp_tx_desc_free(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
403 		uint8_t desc_pool_id)
404 {
405 	struct dp_tx_desc_pool_s *pool = &soc->tx_desc[desc_pool_id];
406 	qdf_time_t unpause_time = qdf_get_system_timestamp(), pause_dur;
407 	enum netif_action_type act = WLAN_WAKE_ALL_NETIF_QUEUE;
408 	enum netif_reason_type reason;
409 
410 	qdf_spin_lock_bh(&pool->flow_pool_lock);
411 	tx_desc->vdev_id = DP_INVALID_VDEV_ID;
412 	tx_desc->nbuf = NULL;
413 	tx_desc->flags = 0;
414 	dp_tx_desc_set_magic(tx_desc, DP_TX_MAGIC_PATTERN_FREE);
415 	dp_tx_put_desc_flow_pool(pool, tx_desc);
416 	switch (pool->status) {
417 	case FLOW_POOL_ACTIVE_PAUSED:
418 		if (pool->avail_desc > pool->start_th[DP_TH_HI]) {
419 			act = WLAN_NETIF_PRIORITY_QUEUE_ON;
420 			reason = WLAN_DATA_FLOW_CTRL_PRI;
421 			pool->status = FLOW_POOL_VO_PAUSED;
422 
423 			/* Update maxinum pause duration for HI queue */
424 			pause_dur = unpause_time -
425 					pool->latest_pause_time[DP_TH_HI];
426 			if (pool->max_pause_time[DP_TH_HI] < pause_dur)
427 				pool->max_pause_time[DP_TH_HI] = pause_dur;
428 		}
429 		break;
430 	case FLOW_POOL_VO_PAUSED:
431 		if (pool->avail_desc > pool->start_th[DP_TH_VO]) {
432 			act = WLAN_NETIF_VO_QUEUE_ON;
433 			reason = WLAN_DATA_FLOW_CTRL_VO;
434 			pool->status = FLOW_POOL_VI_PAUSED;
435 
436 			/* Update maxinum pause duration for VO queue */
437 			pause_dur = unpause_time -
438 					pool->latest_pause_time[DP_TH_VO];
439 			if (pool->max_pause_time[DP_TH_VO] < pause_dur)
440 				pool->max_pause_time[DP_TH_VO] = pause_dur;
441 		}
442 		break;
443 	case FLOW_POOL_VI_PAUSED:
444 		if (pool->avail_desc > pool->start_th[DP_TH_VI]) {
445 			act = WLAN_NETIF_VI_QUEUE_ON;
446 			reason = WLAN_DATA_FLOW_CTRL_VI;
447 			pool->status = FLOW_POOL_BE_BK_PAUSED;
448 
449 			/* Update maxinum pause duration for VI queue */
450 			pause_dur = unpause_time -
451 					pool->latest_pause_time[DP_TH_VI];
452 			if (pool->max_pause_time[DP_TH_VI] < pause_dur)
453 				pool->max_pause_time[DP_TH_VI] = pause_dur;
454 		}
455 		break;
456 	case FLOW_POOL_BE_BK_PAUSED:
457 		if (pool->avail_desc > pool->start_th[DP_TH_BE_BK]) {
458 			act = WLAN_NETIF_BE_BK_QUEUE_ON;
459 			reason = WLAN_DATA_FLOW_CTRL_BE_BK;
460 			pool->status = FLOW_POOL_ACTIVE_UNPAUSED;
461 
462 			/* Update maxinum pause duration for BE_BK queue */
463 			pause_dur = unpause_time -
464 					pool->latest_pause_time[DP_TH_BE_BK];
465 			if (pool->max_pause_time[DP_TH_BE_BK] < pause_dur)
466 				pool->max_pause_time[DP_TH_BE_BK] = pause_dur;
467 		}
468 		break;
469 	case FLOW_POOL_INVALID:
470 		if (pool->avail_desc == pool->pool_size) {
471 			dp_tx_desc_pool_deinit(soc, desc_pool_id);
472 			dp_tx_desc_pool_free(soc, desc_pool_id);
473 			qdf_spin_unlock_bh(&pool->flow_pool_lock);
474 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
475 				  "%s %d pool is freed!!",
476 				  __func__, __LINE__);
477 			return;
478 		}
479 		break;
480 
481 	case FLOW_POOL_ACTIVE_UNPAUSED:
482 		break;
483 	default:
484 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
485 			  "%s %d pool is INACTIVE State!!",
486 			  __func__, __LINE__);
487 		break;
488 	};
489 
490 	if (act != WLAN_WAKE_ALL_NETIF_QUEUE)
491 		soc->pause_cb(pool->flow_pool_id,
492 			      act, reason);
493 	qdf_spin_unlock_bh(&pool->flow_pool_lock);
494 }
495 #else /* QCA_AC_BASED_FLOW_CONTROL */
496 
497 static inline bool
498 dp_tx_is_threshold_reached(struct dp_tx_desc_pool_s *pool, uint16_t avail_desc)
499 {
500 	if (qdf_unlikely(avail_desc < pool->stop_th))
501 		return true;
502 	else
503 		return false;
504 }
505 
506 /**
507  * dp_tx_desc_alloc() - Allocate a Software Tx Descriptor from given pool
508  *
509  * @soc Handle to DP SoC structure
510  * @pool_id
511  *
512  * Return:
513  */
514 static inline struct dp_tx_desc_s *
515 dp_tx_desc_alloc(struct dp_soc *soc, uint8_t desc_pool_id)
516 {
517 	struct dp_tx_desc_s *tx_desc = NULL;
518 	struct dp_tx_desc_pool_s *pool = &soc->tx_desc[desc_pool_id];
519 
520 	if (pool) {
521 		qdf_spin_lock_bh(&pool->flow_pool_lock);
522 		if (pool->status <= FLOW_POOL_ACTIVE_PAUSED &&
523 		    pool->avail_desc) {
524 			tx_desc = dp_tx_get_desc_flow_pool(pool);
525 			tx_desc->pool_id = desc_pool_id;
526 			tx_desc->flags = DP_TX_DESC_FLAG_ALLOCATED;
527 			dp_tx_desc_set_magic(tx_desc,
528 					     DP_TX_MAGIC_PATTERN_INUSE);
529 			if (qdf_unlikely(pool->avail_desc < pool->stop_th)) {
530 				pool->status = FLOW_POOL_ACTIVE_PAUSED;
531 				qdf_spin_unlock_bh(&pool->flow_pool_lock);
532 				/* pause network queues */
533 				soc->pause_cb(desc_pool_id,
534 					       WLAN_STOP_ALL_NETIF_QUEUE,
535 					       WLAN_DATA_FLOW_CONTROL);
536 			} else {
537 				qdf_spin_unlock_bh(&pool->flow_pool_lock);
538 			}
539 		} else {
540 			pool->pkt_drop_no_desc++;
541 			qdf_spin_unlock_bh(&pool->flow_pool_lock);
542 		}
543 	} else {
544 		soc->pool_stats.pkt_drop_no_pool++;
545 	}
546 
547 	return tx_desc;
548 }
549 
550 /**
551  * dp_tx_desc_free() - Fee a tx descriptor and attach it to free list
552  *
553  * @soc Handle to DP SoC structure
554  * @pool_id
555  * @tx_desc
556  *
557  * Return: None
558  */
559 static inline void
560 dp_tx_desc_free(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
561 		uint8_t desc_pool_id)
562 {
563 	struct dp_tx_desc_pool_s *pool = &soc->tx_desc[desc_pool_id];
564 
565 	qdf_spin_lock_bh(&pool->flow_pool_lock);
566 	tx_desc->vdev_id = DP_INVALID_VDEV_ID;
567 	tx_desc->nbuf = NULL;
568 	tx_desc->flags = 0;
569 	dp_tx_desc_set_magic(tx_desc, DP_TX_MAGIC_PATTERN_FREE);
570 	dp_tx_put_desc_flow_pool(pool, tx_desc);
571 	switch (pool->status) {
572 	case FLOW_POOL_ACTIVE_PAUSED:
573 		if (pool->avail_desc > pool->start_th) {
574 			soc->pause_cb(pool->flow_pool_id,
575 				       WLAN_WAKE_ALL_NETIF_QUEUE,
576 				       WLAN_DATA_FLOW_CONTROL);
577 			pool->status = FLOW_POOL_ACTIVE_UNPAUSED;
578 		}
579 		break;
580 	case FLOW_POOL_INVALID:
581 		if (pool->avail_desc == pool->pool_size) {
582 			dp_tx_desc_pool_deinit(soc, desc_pool_id);
583 			dp_tx_desc_pool_free(soc, desc_pool_id);
584 			qdf_spin_unlock_bh(&pool->flow_pool_lock);
585 			qdf_print("%s %d pool is freed!!",
586 				  __func__, __LINE__);
587 			return;
588 		}
589 		break;
590 
591 	case FLOW_POOL_ACTIVE_UNPAUSED:
592 		break;
593 	default:
594 		qdf_print("%s %d pool is INACTIVE State!!",
595 			  __func__, __LINE__);
596 		break;
597 	};
598 
599 	qdf_spin_unlock_bh(&pool->flow_pool_lock);
600 }
601 
602 #endif /* QCA_AC_BASED_FLOW_CONTROL */
603 
604 static inline bool
605 dp_tx_desc_thresh_reached(struct cdp_soc_t *soc_hdl, uint8_t vdev_id)
606 {
607 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
608 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
609 						     DP_MOD_ID_CDP);
610 	struct dp_tx_desc_pool_s *pool;
611 	bool status;
612 
613 	if (!vdev)
614 		return false;
615 
616 	pool = vdev->pool;
617 	status = dp_tx_is_threshold_reached(pool, pool->avail_desc);
618 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
619 
620 	return status;
621 }
622 #else /* QCA_LL_TX_FLOW_CONTROL_V2 */
623 
624 static inline void dp_tx_flow_control_init(struct dp_soc *handle)
625 {
626 }
627 
628 static inline void dp_tx_flow_control_deinit(struct dp_soc *handle)
629 {
630 }
631 
632 static inline QDF_STATUS dp_tx_flow_pool_map_handler(struct dp_pdev *pdev,
633 	uint8_t flow_id, uint8_t flow_type, uint8_t flow_pool_id,
634 	uint32_t flow_pool_size)
635 {
636 	return QDF_STATUS_SUCCESS;
637 }
638 
639 static inline void dp_tx_flow_pool_unmap_handler(struct dp_pdev *pdev,
640 	uint8_t flow_id, uint8_t flow_type, uint8_t flow_pool_id)
641 {
642 }
643 
644 #ifdef QCA_DP_TX_HW_SW_NBUF_DESC_PREFETCH
645 static inline
646 void dp_tx_prefetch_desc(struct dp_tx_desc_s *tx_desc)
647 {
648 	if (tx_desc)
649 		prefetch(tx_desc);
650 }
651 #else
652 static inline
653 void dp_tx_prefetch_desc(struct dp_tx_desc_s *tx_desc)
654 {
655 }
656 #endif
657 
658 /**
659  * dp_tx_desc_alloc() - Allocate a Software Tx Descriptor from given pool
660  *
661  * @param soc Handle to DP SoC structure
662  * @param pool_id
663  *
664  * Return:
665  */
666 static inline struct dp_tx_desc_s *dp_tx_desc_alloc(struct dp_soc *soc,
667 						uint8_t desc_pool_id)
668 {
669 	struct dp_tx_desc_s *tx_desc = NULL;
670 	struct dp_tx_desc_pool_s *pool = &soc->tx_desc[desc_pool_id];
671 
672 	TX_DESC_LOCK_LOCK(&pool->lock);
673 
674 	tx_desc = pool->freelist;
675 
676 	/* Pool is exhausted */
677 	if (!tx_desc) {
678 		TX_DESC_LOCK_UNLOCK(&pool->lock);
679 		return NULL;
680 	}
681 
682 	pool->freelist = pool->freelist->next;
683 	pool->num_allocated++;
684 	pool->num_free--;
685 	dp_tx_prefetch_desc(pool->freelist);
686 
687 	tx_desc->flags = DP_TX_DESC_FLAG_ALLOCATED;
688 
689 	TX_DESC_LOCK_UNLOCK(&pool->lock);
690 
691 	return tx_desc;
692 }
693 
694 /**
695  * dp_tx_desc_alloc_multiple() - Allocate batch of software Tx Descriptors
696  *                            from given pool
697  * @soc: Handle to DP SoC structure
698  * @pool_id: pool id should pick up
699  * @num_requested: number of required descriptor
700  *
701  * allocate multiple tx descriptor and make a link
702  *
703  * Return: h_desc first descriptor pointer
704  */
705 static inline struct dp_tx_desc_s *dp_tx_desc_alloc_multiple(
706 		struct dp_soc *soc, uint8_t desc_pool_id, uint8_t num_requested)
707 {
708 	struct dp_tx_desc_s *c_desc = NULL, *h_desc = NULL;
709 	uint8_t count;
710 	struct dp_tx_desc_pool_s *pool = &soc->tx_desc[desc_pool_id];
711 
712 	TX_DESC_LOCK_LOCK(&pool->lock);
713 
714 	if ((num_requested == 0) ||
715 			(pool->num_free < num_requested)) {
716 		TX_DESC_LOCK_UNLOCK(&pool->lock);
717 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
718 			"%s, No Free Desc: Available(%d) num_requested(%d)",
719 			__func__, pool->num_free,
720 			num_requested);
721 		return NULL;
722 	}
723 
724 	h_desc = pool->freelist;
725 
726 	/* h_desc should never be NULL since num_free > requested */
727 	qdf_assert_always(h_desc);
728 
729 	c_desc = h_desc;
730 	for (count = 0; count < (num_requested - 1); count++) {
731 		c_desc->flags = DP_TX_DESC_FLAG_ALLOCATED;
732 		c_desc = c_desc->next;
733 	}
734 	pool->num_free -= count;
735 	pool->num_allocated += count;
736 	pool->freelist = c_desc->next;
737 	c_desc->next = NULL;
738 
739 	TX_DESC_LOCK_UNLOCK(&pool->lock);
740 	return h_desc;
741 }
742 
743 /**
744  * dp_tx_desc_free() - Fee a tx descriptor and attach it to free list
745  *
746  * @soc Handle to DP SoC structure
747  * @pool_id
748  * @tx_desc
749  */
750 static inline void
751 dp_tx_desc_free(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
752 		uint8_t desc_pool_id)
753 {
754 	struct dp_tx_desc_pool_s *pool = NULL;
755 	tx_desc->vdev_id = DP_INVALID_VDEV_ID;
756 	tx_desc->nbuf = NULL;
757 	tx_desc->flags = 0;
758 
759 	pool = &soc->tx_desc[desc_pool_id];
760 	TX_DESC_LOCK_LOCK(&pool->lock);
761 	tx_desc->next = pool->freelist;
762 	pool->freelist = tx_desc;
763 	pool->num_allocated--;
764 	pool->num_free++;
765 	TX_DESC_LOCK_UNLOCK(&pool->lock);
766 }
767 
768 #endif /* QCA_LL_TX_FLOW_CONTROL_V2 */
769 
770 #ifdef QCA_DP_TX_DESC_ID_CHECK
771 /**
772  * dp_tx_is_desc_id_valid() - check is the tx desc id valid
773  *
774  * @soc Handle to DP SoC structure
775  * @tx_desc_id
776  *
777  * Return: true or false
778  */
779 static inline bool
780 dp_tx_is_desc_id_valid(struct dp_soc *soc, uint32_t tx_desc_id)
781 {
782 	uint8_t pool_id;
783 	uint16_t page_id, offset;
784 	struct dp_tx_desc_pool_s *pool;
785 
786 	pool_id = (tx_desc_id & DP_TX_DESC_ID_POOL_MASK) >>
787 			DP_TX_DESC_ID_POOL_OS;
788 	/* Pool ID is out of limit */
789 	if (pool_id > wlan_cfg_get_num_tx_desc_pool(
790 				soc->wlan_cfg_ctx)) {
791 		QDF_TRACE(QDF_MODULE_ID_DP,
792 			  QDF_TRACE_LEVEL_FATAL,
793 			  "%s:Tx Comp pool id %d not valid",
794 			  __func__,
795 			  pool_id);
796 		goto warn_exit;
797 	}
798 
799 	pool = &soc->tx_desc[pool_id];
800 	/* the pool is freed */
801 	if (IS_TX_DESC_POOL_STATUS_INACTIVE(pool)) {
802 		QDF_TRACE(QDF_MODULE_ID_DP,
803 			  QDF_TRACE_LEVEL_FATAL,
804 			  "%s:the pool %d has been freed",
805 			  __func__,
806 			  pool_id);
807 		goto warn_exit;
808 	}
809 
810 	page_id = (tx_desc_id & DP_TX_DESC_ID_PAGE_MASK) >>
811 				DP_TX_DESC_ID_PAGE_OS;
812 	/* the page id is out of limit */
813 	if (page_id >= pool->desc_pages.num_pages) {
814 		QDF_TRACE(QDF_MODULE_ID_DP,
815 			  QDF_TRACE_LEVEL_FATAL,
816 			  "%s:the page id %d invalid, pool id %d, num_page %d",
817 			  __func__,
818 			  page_id,
819 			  pool_id,
820 			  pool->desc_pages.num_pages);
821 		goto warn_exit;
822 	}
823 
824 	offset = (tx_desc_id & DP_TX_DESC_ID_OFFSET_MASK) >>
825 				DP_TX_DESC_ID_OFFSET_OS;
826 	/* the offset is out of limit */
827 	if (offset >= pool->desc_pages.num_element_per_page) {
828 		QDF_TRACE(QDF_MODULE_ID_DP,
829 			  QDF_TRACE_LEVEL_FATAL,
830 			  "%s:offset %d invalid, pool%d,num_elem_per_page %d",
831 			  __func__,
832 			  offset,
833 			  pool_id,
834 			  pool->desc_pages.num_element_per_page);
835 		goto warn_exit;
836 	}
837 
838 	return true;
839 
840 warn_exit:
841 	QDF_TRACE(QDF_MODULE_ID_DP,
842 		  QDF_TRACE_LEVEL_FATAL,
843 		  "%s:Tx desc id 0x%x not valid",
844 		  __func__,
845 		  tx_desc_id);
846 	qdf_assert_always(0);
847 	return false;
848 }
849 
850 #else
851 static inline bool
852 dp_tx_is_desc_id_valid(struct dp_soc *soc, uint32_t tx_desc_id)
853 {
854 	return true;
855 }
856 #endif /* QCA_DP_TX_DESC_ID_CHECK */
857 
858 #ifdef QCA_DP_TX_DESC_FAST_COMP_ENABLE
859 static inline void dp_tx_desc_update_fast_comp_flag(struct dp_soc *soc,
860 						    struct dp_tx_desc_s *desc,
861 						    uint8_t allow_fast_comp)
862 {
863 	if (qdf_likely(!(desc->flags & DP_TX_DESC_FLAG_TO_FW)) &&
864 	    qdf_likely(allow_fast_comp)) {
865 		desc->flags |= DP_TX_DESC_FLAG_SIMPLE;
866 	}
867 }
868 #else
869 static inline void dp_tx_desc_update_fast_comp_flag(struct dp_soc *soc,
870 						    struct dp_tx_desc_s *desc,
871 						    uint8_t allow_fast_comp)
872 {
873 }
874 #endif /* QCA_DP_TX_DESC_FAST_COMP_ENABLE */
875 
876 /**
877  * dp_tx_desc_find() - find dp tx descriptor from cokie
878  * @soc - handle for the device sending the data
879  * @tx_desc_id - the ID of the descriptor in question
880  * @return the descriptor object that has the specified ID
881  *
882  *  Use a tx descriptor ID to find the corresponding descriptor object.
883  *
884  */
885 static inline struct dp_tx_desc_s *dp_tx_desc_find(struct dp_soc *soc,
886 		uint8_t pool_id, uint16_t page_id, uint16_t offset)
887 {
888 	struct dp_tx_desc_pool_s *tx_desc_pool = &soc->tx_desc[pool_id];
889 
890 	return tx_desc_pool->desc_pages.cacheable_pages[page_id] +
891 		tx_desc_pool->elem_size * offset;
892 }
893 
894 /**
895  * dp_tx_ext_desc_alloc() - Get tx extension descriptor from pool
896  * @soc: handle for the device sending the data
897  * @pool_id: target pool id
898  *
899  * Return: None
900  */
901 static inline
902 struct dp_tx_ext_desc_elem_s *dp_tx_ext_desc_alloc(struct dp_soc *soc,
903 		uint8_t desc_pool_id)
904 {
905 	struct dp_tx_ext_desc_elem_s *c_elem;
906 
907 	qdf_spin_lock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
908 	if (soc->tx_ext_desc[desc_pool_id].num_free <= 0) {
909 		qdf_spin_unlock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
910 		return NULL;
911 	}
912 	c_elem = soc->tx_ext_desc[desc_pool_id].freelist;
913 	soc->tx_ext_desc[desc_pool_id].freelist =
914 		soc->tx_ext_desc[desc_pool_id].freelist->next;
915 	soc->tx_ext_desc[desc_pool_id].num_free--;
916 	qdf_spin_unlock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
917 	return c_elem;
918 }
919 
920 /**
921  * dp_tx_ext_desc_free() - Release tx extension descriptor to the pool
922  * @soc: handle for the device sending the data
923  * @pool_id: target pool id
924  * @elem: ext descriptor pointer should release
925  *
926  * Return: None
927  */
928 static inline void dp_tx_ext_desc_free(struct dp_soc *soc,
929 	struct dp_tx_ext_desc_elem_s *elem, uint8_t desc_pool_id)
930 {
931 	qdf_spin_lock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
932 	elem->next = soc->tx_ext_desc[desc_pool_id].freelist;
933 	soc->tx_ext_desc[desc_pool_id].freelist = elem;
934 	soc->tx_ext_desc[desc_pool_id].num_free++;
935 	qdf_spin_unlock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
936 	return;
937 }
938 
939 /**
940  * dp_tx_ext_desc_free_multiple() - Fee multiple tx extension descriptor and
941  *                           attach it to free list
942  * @soc: Handle to DP SoC structure
943  * @desc_pool_id: pool id should pick up
944  * @elem: tx descriptor should be freed
945  * @num_free: number of descriptors should be freed
946  *
947  * Return: none
948  */
949 static inline void dp_tx_ext_desc_free_multiple(struct dp_soc *soc,
950 		struct dp_tx_ext_desc_elem_s *elem, uint8_t desc_pool_id,
951 		uint8_t num_free)
952 {
953 	struct dp_tx_ext_desc_elem_s *head, *tail, *c_elem;
954 	uint8_t freed = num_free;
955 
956 	/* caller should always guarantee atleast list of num_free nodes */
957 	qdf_assert_always(elem);
958 
959 	head = elem;
960 	c_elem = head;
961 	tail = head;
962 	while (c_elem && freed) {
963 		tail = c_elem;
964 		c_elem = c_elem->next;
965 		freed--;
966 	}
967 
968 	/* caller should always guarantee atleast list of num_free nodes */
969 	qdf_assert_always(tail);
970 
971 	qdf_spin_lock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
972 	tail->next = soc->tx_ext_desc[desc_pool_id].freelist;
973 	soc->tx_ext_desc[desc_pool_id].freelist = head;
974 	soc->tx_ext_desc[desc_pool_id].num_free += num_free;
975 	qdf_spin_unlock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
976 
977 	return;
978 }
979 
980 #if defined(FEATURE_TSO)
981 /**
982  * dp_tx_tso_desc_alloc() - function to allocate a TSO segment
983  * @soc: device soc instance
984  * @pool_id: pool id should pick up tso descriptor
985  *
986  * Allocates a TSO segment element from the free list held in
987  * the soc
988  *
989  * Return: tso_seg, tso segment memory pointer
990  */
991 static inline struct qdf_tso_seg_elem_t *dp_tx_tso_desc_alloc(
992 		struct dp_soc *soc, uint8_t pool_id)
993 {
994 	struct qdf_tso_seg_elem_t *tso_seg = NULL;
995 
996 	qdf_spin_lock_bh(&soc->tx_tso_desc[pool_id].lock);
997 	if (soc->tx_tso_desc[pool_id].freelist) {
998 		soc->tx_tso_desc[pool_id].num_free--;
999 		tso_seg = soc->tx_tso_desc[pool_id].freelist;
1000 		soc->tx_tso_desc[pool_id].freelist =
1001 			soc->tx_tso_desc[pool_id].freelist->next;
1002 	}
1003 	qdf_spin_unlock_bh(&soc->tx_tso_desc[pool_id].lock);
1004 
1005 	return tso_seg;
1006 }
1007 
1008 /**
1009  * dp_tx_tso_desc_free() - function to free a TSO segment
1010  * @soc: device soc instance
1011  * @pool_id: pool id should pick up tso descriptor
1012  * @tso_seg: tso segment memory pointer
1013  *
1014  * Returns a TSO segment element to the free list held in the
1015  * HTT pdev
1016  *
1017  * Return: none
1018  */
1019 static inline void dp_tx_tso_desc_free(struct dp_soc *soc,
1020 		uint8_t pool_id, struct qdf_tso_seg_elem_t *tso_seg)
1021 {
1022 	qdf_spin_lock_bh(&soc->tx_tso_desc[pool_id].lock);
1023 	tso_seg->next = soc->tx_tso_desc[pool_id].freelist;
1024 	soc->tx_tso_desc[pool_id].freelist = tso_seg;
1025 	soc->tx_tso_desc[pool_id].num_free++;
1026 	qdf_spin_unlock_bh(&soc->tx_tso_desc[pool_id].lock);
1027 }
1028 
1029 static inline
1030 struct qdf_tso_num_seg_elem_t  *dp_tso_num_seg_alloc(struct dp_soc *soc,
1031 		uint8_t pool_id)
1032 {
1033 	struct qdf_tso_num_seg_elem_t *tso_num_seg = NULL;
1034 
1035 	qdf_spin_lock_bh(&soc->tx_tso_num_seg[pool_id].lock);
1036 	if (soc->tx_tso_num_seg[pool_id].freelist) {
1037 		soc->tx_tso_num_seg[pool_id].num_free--;
1038 		tso_num_seg = soc->tx_tso_num_seg[pool_id].freelist;
1039 		soc->tx_tso_num_seg[pool_id].freelist =
1040 			soc->tx_tso_num_seg[pool_id].freelist->next;
1041 	}
1042 	qdf_spin_unlock_bh(&soc->tx_tso_num_seg[pool_id].lock);
1043 
1044 	return tso_num_seg;
1045 }
1046 
1047 static inline
1048 void dp_tso_num_seg_free(struct dp_soc *soc,
1049 		uint8_t pool_id, struct qdf_tso_num_seg_elem_t *tso_num_seg)
1050 {
1051 	qdf_spin_lock_bh(&soc->tx_tso_num_seg[pool_id].lock);
1052 	tso_num_seg->next = soc->tx_tso_num_seg[pool_id].freelist;
1053 	soc->tx_tso_num_seg[pool_id].freelist = tso_num_seg;
1054 	soc->tx_tso_num_seg[pool_id].num_free++;
1055 	qdf_spin_unlock_bh(&soc->tx_tso_num_seg[pool_id].lock);
1056 }
1057 #endif
1058 
1059 /*
1060  * dp_tx_me_alloc_buf() Alloc descriptor from me pool
1061  * @pdev DP_PDEV handle for datapath
1062  *
1063  * Return:dp_tx_me_buf_t(buf)
1064  */
1065 static inline struct dp_tx_me_buf_t*
1066 dp_tx_me_alloc_buf(struct dp_pdev *pdev)
1067 {
1068 	struct dp_tx_me_buf_t *buf = NULL;
1069 	qdf_spin_lock_bh(&pdev->tx_mutex);
1070 	if (pdev->me_buf.freelist) {
1071 		buf = pdev->me_buf.freelist;
1072 		pdev->me_buf.freelist = pdev->me_buf.freelist->next;
1073 		pdev->me_buf.buf_in_use++;
1074 	} else {
1075 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1076 				"Error allocating memory in pool");
1077 		qdf_spin_unlock_bh(&pdev->tx_mutex);
1078 		return NULL;
1079 	}
1080 	qdf_spin_unlock_bh(&pdev->tx_mutex);
1081 	return buf;
1082 }
1083 
1084 /*
1085  * dp_tx_me_free_buf() - Unmap the buffer holding the dest
1086  * address, free me descriptor and add it to the free-pool
1087  * @pdev: DP_PDEV handle for datapath
1088  * @buf : Allocated ME BUF
1089  *
1090  * Return:void
1091  */
1092 static inline void
1093 dp_tx_me_free_buf(struct dp_pdev *pdev, struct dp_tx_me_buf_t *buf)
1094 {
1095 	/*
1096 	 * If the buf containing mac address was mapped,
1097 	 * it must be unmapped before freeing the me_buf.
1098 	 * The "paddr_macbuf" member in the me_buf structure
1099 	 * holds the mapped physical address and it must be
1100 	 * set to 0 after unmapping.
1101 	 */
1102 	if (buf->paddr_macbuf) {
1103 		qdf_mem_unmap_nbytes_single(pdev->soc->osdev,
1104 					    buf->paddr_macbuf,
1105 					    QDF_DMA_TO_DEVICE,
1106 					    QDF_MAC_ADDR_SIZE);
1107 		buf->paddr_macbuf = 0;
1108 	}
1109 	qdf_spin_lock_bh(&pdev->tx_mutex);
1110 	buf->next = pdev->me_buf.freelist;
1111 	pdev->me_buf.freelist = buf;
1112 	pdev->me_buf.buf_in_use--;
1113 	qdf_spin_unlock_bh(&pdev->tx_mutex);
1114 }
1115 #endif /* DP_TX_DESC_H */
1116