xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_tx_desc.h (revision 2888b71da71bce103343119fa1b31f4a0cee07c8)
1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #ifndef DP_TX_DESC_H
21 #define DP_TX_DESC_H
22 
23 #include "dp_types.h"
24 #include "dp_tx.h"
25 #include "dp_internal.h"
26 
27 /**
28  * 21 bits cookie
29  * 2 bits pool id 0 ~ 3,
30  * 10 bits page id 0 ~ 1023
31  * 5 bits offset id 0 ~ 31 (Desc size = 128, Num descs per page = 4096/128 = 32)
32  */
33 /* ???Ring ID needed??? */
34 #define DP_TX_DESC_ID_POOL_MASK    0x018000
35 #define DP_TX_DESC_ID_POOL_OS      15
36 #define DP_TX_DESC_ID_PAGE_MASK    0x007FE0
37 #define DP_TX_DESC_ID_PAGE_OS      5
38 #define DP_TX_DESC_ID_OFFSET_MASK  0x00001F
39 #define DP_TX_DESC_ID_OFFSET_OS    0
40 
41 /**
42  * Compilation assert on tx desc size
43  *
44  * if assert is hit please update POOL_MASK,
45  * PAGE_MASK according to updated size
46  *
47  * for current PAGE mask allowed size range of tx_desc
48  * is between 128 and 256
49  */
50 QDF_COMPILE_TIME_ASSERT(dp_tx_desc_size,
51 			((sizeof(struct dp_tx_desc_s)) <=
52 			 (DP_BLOCKMEM_SIZE >> DP_TX_DESC_ID_PAGE_OS)) &&
53 			((sizeof(struct dp_tx_desc_s)) >
54 			 (DP_BLOCKMEM_SIZE >> (DP_TX_DESC_ID_PAGE_OS + 1)))
55 		       );
56 
57 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
58 #define TX_DESC_LOCK_CREATE(lock)
59 #define TX_DESC_LOCK_DESTROY(lock)
60 #define TX_DESC_LOCK_LOCK(lock)
61 #define TX_DESC_LOCK_UNLOCK(lock)
62 #define IS_TX_DESC_POOL_STATUS_INACTIVE(pool) \
63 	((pool)->status == FLOW_POOL_INACTIVE)
64 #ifdef QCA_AC_BASED_FLOW_CONTROL
65 #define TX_DESC_POOL_MEMBER_CLEAN(_tx_desc_pool)       \
66 	dp_tx_flow_pool_member_clean(_tx_desc_pool)
67 
68 #else /* !QCA_AC_BASED_FLOW_CONTROL */
69 #define TX_DESC_POOL_MEMBER_CLEAN(_tx_desc_pool)       \
70 do {                                                   \
71 	(_tx_desc_pool)->elem_size = 0;                \
72 	(_tx_desc_pool)->freelist = NULL;              \
73 	(_tx_desc_pool)->pool_size = 0;                \
74 	(_tx_desc_pool)->avail_desc = 0;               \
75 	(_tx_desc_pool)->start_th = 0;                 \
76 	(_tx_desc_pool)->stop_th = 0;                  \
77 	(_tx_desc_pool)->status = FLOW_POOL_INACTIVE;  \
78 } while (0)
79 #endif /* QCA_AC_BASED_FLOW_CONTROL */
80 #else /* !QCA_LL_TX_FLOW_CONTROL_V2 */
81 #define TX_DESC_LOCK_CREATE(lock)  qdf_spinlock_create(lock)
82 #define TX_DESC_LOCK_DESTROY(lock) qdf_spinlock_destroy(lock)
83 #define TX_DESC_LOCK_LOCK(lock)    qdf_spin_lock_bh(lock)
84 #define TX_DESC_LOCK_UNLOCK(lock)  qdf_spin_unlock_bh(lock)
85 #define IS_TX_DESC_POOL_STATUS_INACTIVE(pool) (false)
86 #define TX_DESC_POOL_MEMBER_CLEAN(_tx_desc_pool)       \
87 do {                                                   \
88 	(_tx_desc_pool)->elem_size = 0;                \
89 	(_tx_desc_pool)->num_allocated = 0;            \
90 	(_tx_desc_pool)->freelist = NULL;              \
91 	(_tx_desc_pool)->elem_count = 0;               \
92 	(_tx_desc_pool)->num_free = 0;                 \
93 } while (0)
94 #endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */
95 #define MAX_POOL_BUFF_COUNT 10000
96 
97 #ifdef DP_TX_TRACKING
98 static inline void dp_tx_desc_set_magic(struct dp_tx_desc_s *tx_desc,
99 					uint32_t magic_pattern)
100 {
101 	tx_desc->magic = magic_pattern;
102 }
103 #else
104 static inline void dp_tx_desc_set_magic(struct dp_tx_desc_s *tx_desc,
105 					uint32_t magic_pattern)
106 {
107 }
108 #endif
109 
110 QDF_STATUS dp_tx_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
111 				 uint32_t num_elem);
112 QDF_STATUS dp_tx_desc_pool_init(struct dp_soc *soc, uint8_t pool_id,
113 				uint32_t num_elem);
114 void dp_tx_desc_pool_free(struct dp_soc *soc, uint8_t pool_id);
115 void dp_tx_desc_pool_deinit(struct dp_soc *soc, uint8_t pool_id);
116 
117 QDF_STATUS dp_tx_ext_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
118 				     uint32_t num_elem);
119 QDF_STATUS dp_tx_ext_desc_pool_init(struct dp_soc *soc, uint8_t pool_id,
120 				    uint32_t num_elem);
121 void dp_tx_ext_desc_pool_free(struct dp_soc *soc, uint8_t pool_id);
122 void dp_tx_ext_desc_pool_deinit(struct dp_soc *soc, uint8_t pool_id);
123 
124 QDF_STATUS dp_tx_tso_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
125 				     uint32_t num_elem);
126 QDF_STATUS dp_tx_tso_desc_pool_init(struct dp_soc *soc, uint8_t pool_id,
127 				    uint32_t num_elem);
128 void dp_tx_tso_desc_pool_free(struct dp_soc *soc, uint8_t pool_id);
129 void dp_tx_tso_desc_pool_deinit(struct dp_soc *soc, uint8_t pool_id);
130 
131 QDF_STATUS dp_tx_tso_num_seg_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
132 		uint32_t num_elem);
133 QDF_STATUS dp_tx_tso_num_seg_pool_init(struct dp_soc *soc, uint8_t pool_id,
134 				       uint32_t num_elem);
135 void dp_tx_tso_num_seg_pool_free(struct dp_soc *soc, uint8_t pool_id);
136 void dp_tx_tso_num_seg_pool_deinit(struct dp_soc *soc, uint8_t pool_id);
137 
138 #ifdef DP_UMAC_HW_RESET_SUPPORT
139 void dp_tx_desc_pool_cleanup(struct dp_soc *soc, qdf_nbuf_t *nbuf_list);
140 #endif
141 
142 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
143 void dp_tx_flow_control_init(struct dp_soc *);
144 void dp_tx_flow_control_deinit(struct dp_soc *);
145 
146 QDF_STATUS dp_txrx_register_pause_cb(struct cdp_soc_t *soc,
147 	tx_pause_callback pause_cb);
148 QDF_STATUS dp_tx_flow_pool_map(struct cdp_soc_t *soc, uint8_t pdev_id,
149 			       uint8_t vdev_id);
150 void dp_tx_flow_pool_unmap(struct cdp_soc_t *handle, uint8_t pdev_id,
151 			   uint8_t vdev_id);
152 void dp_tx_clear_flow_pool_stats(struct dp_soc *soc);
153 struct dp_tx_desc_pool_s *dp_tx_create_flow_pool(struct dp_soc *soc,
154 	uint8_t flow_pool_id, uint32_t flow_pool_size);
155 
156 QDF_STATUS dp_tx_flow_pool_map_handler(struct dp_pdev *pdev, uint8_t flow_id,
157 	uint8_t flow_type, uint8_t flow_pool_id, uint32_t flow_pool_size);
158 void dp_tx_flow_pool_unmap_handler(struct dp_pdev *pdev, uint8_t flow_id,
159 	uint8_t flow_type, uint8_t flow_pool_id);
160 
161 /**
162  * dp_tx_get_desc_flow_pool() - get descriptor from flow pool
163  * @pool: flow pool
164  *
165  * Caller needs to take lock and do sanity checks.
166  *
167  * Return: tx descriptor
168  */
169 static inline
170 struct dp_tx_desc_s *dp_tx_get_desc_flow_pool(struct dp_tx_desc_pool_s *pool)
171 {
172 	struct dp_tx_desc_s *tx_desc = pool->freelist;
173 
174 	pool->freelist = pool->freelist->next;
175 	pool->avail_desc--;
176 	return tx_desc;
177 }
178 
179 /**
180  * ol_tx_put_desc_flow_pool() - put descriptor to flow pool freelist
181  * @pool: flow pool
182  * @tx_desc: tx descriptor
183  *
184  * Caller needs to take lock and do sanity checks.
185  *
186  * Return: none
187  */
188 static inline
189 void dp_tx_put_desc_flow_pool(struct dp_tx_desc_pool_s *pool,
190 			struct dp_tx_desc_s *tx_desc)
191 {
192 	tx_desc->next = pool->freelist;
193 	pool->freelist = tx_desc;
194 	pool->avail_desc++;
195 }
196 
197 #ifdef QCA_AC_BASED_FLOW_CONTROL
198 
199 /**
200  * dp_tx_flow_pool_member_clean() - Clean the members of TX flow pool
201  *
202  * @pool: flow pool
203  *
204  * Return: None
205  */
206 static inline void
207 dp_tx_flow_pool_member_clean(struct dp_tx_desc_pool_s *pool)
208 {
209 	pool->elem_size = 0;
210 	pool->freelist = NULL;
211 	pool->pool_size = 0;
212 	pool->avail_desc = 0;
213 	qdf_mem_zero(pool->start_th, FL_TH_MAX);
214 	qdf_mem_zero(pool->stop_th, FL_TH_MAX);
215 	pool->status = FLOW_POOL_INACTIVE;
216 }
217 
218 /**
219  * dp_tx_is_threshold_reached() - Check if current avail desc meet threshold
220  *
221  * @pool: flow pool
222  * @avail_desc: available descriptor number
223  *
224  * Return: true if threshold is met, false if not
225  */
226 static inline bool
227 dp_tx_is_threshold_reached(struct dp_tx_desc_pool_s *pool, uint16_t avail_desc)
228 {
229 	if (qdf_unlikely(avail_desc == pool->stop_th[DP_TH_BE_BK]))
230 		return true;
231 	else if (qdf_unlikely(avail_desc == pool->stop_th[DP_TH_VI]))
232 		return true;
233 	else if (qdf_unlikely(avail_desc == pool->stop_th[DP_TH_VO]))
234 		return true;
235 	else if (qdf_unlikely(avail_desc == pool->stop_th[DP_TH_HI]))
236 		return true;
237 	else
238 		return false;
239 }
240 
241 /**
242  * dp_tx_adjust_flow_pool_state() - Adjust flow pool state
243  *
244  * @soc: dp soc
245  * @pool: flow pool
246  */
247 static inline void
248 dp_tx_adjust_flow_pool_state(struct dp_soc *soc,
249 			     struct dp_tx_desc_pool_s *pool)
250 {
251 	if (pool->avail_desc > pool->stop_th[DP_TH_BE_BK]) {
252 		pool->status = FLOW_POOL_ACTIVE_UNPAUSED;
253 		return;
254 	} else if (pool->avail_desc <= pool->stop_th[DP_TH_BE_BK] &&
255 		   pool->avail_desc > pool->stop_th[DP_TH_VI]) {
256 		pool->status = FLOW_POOL_BE_BK_PAUSED;
257 	} else if (pool->avail_desc <= pool->stop_th[DP_TH_VI] &&
258 		   pool->avail_desc > pool->stop_th[DP_TH_VO]) {
259 		pool->status = FLOW_POOL_VI_PAUSED;
260 	} else if (pool->avail_desc <= pool->stop_th[DP_TH_VO] &&
261 		   pool->avail_desc > pool->stop_th[DP_TH_HI]) {
262 		pool->status = FLOW_POOL_VO_PAUSED;
263 	} else if (pool->avail_desc <= pool->stop_th[DP_TH_HI]) {
264 		pool->status = FLOW_POOL_ACTIVE_PAUSED;
265 	}
266 
267 	switch (pool->status) {
268 	case FLOW_POOL_ACTIVE_PAUSED:
269 		soc->pause_cb(pool->flow_pool_id,
270 			      WLAN_NETIF_PRIORITY_QUEUE_OFF,
271 			      WLAN_DATA_FLOW_CTRL_PRI);
272 		fallthrough;
273 
274 	case FLOW_POOL_VO_PAUSED:
275 		soc->pause_cb(pool->flow_pool_id,
276 			      WLAN_NETIF_VO_QUEUE_OFF,
277 			      WLAN_DATA_FLOW_CTRL_VO);
278 		fallthrough;
279 
280 	case FLOW_POOL_VI_PAUSED:
281 		soc->pause_cb(pool->flow_pool_id,
282 			      WLAN_NETIF_VI_QUEUE_OFF,
283 			      WLAN_DATA_FLOW_CTRL_VI);
284 		fallthrough;
285 
286 	case FLOW_POOL_BE_BK_PAUSED:
287 		soc->pause_cb(pool->flow_pool_id,
288 			      WLAN_NETIF_BE_BK_QUEUE_OFF,
289 			      WLAN_DATA_FLOW_CTRL_BE_BK);
290 		break;
291 	default:
292 		dp_err("Invalid pool staus:%u to adjust", pool->status);
293 	}
294 }
295 
296 /**
297  * dp_tx_desc_alloc() - Allocate a Software Tx descriptor from given pool
298  *
299  * @soc: Handle to DP SoC structure
300  * @desc_pool_id: ID of the flow control fool
301  *
302  * Return: TX descriptor allocated or NULL
303  */
304 static inline struct dp_tx_desc_s *
305 dp_tx_desc_alloc(struct dp_soc *soc, uint8_t desc_pool_id)
306 {
307 	struct dp_tx_desc_s *tx_desc = NULL;
308 	struct dp_tx_desc_pool_s *pool = &soc->tx_desc[desc_pool_id];
309 	bool is_pause = false;
310 	enum netif_action_type act = WLAN_NETIF_ACTION_TYPE_NONE;
311 	enum dp_fl_ctrl_threshold level = DP_TH_BE_BK;
312 	enum netif_reason_type reason;
313 
314 	if (qdf_likely(pool)) {
315 		qdf_spin_lock_bh(&pool->flow_pool_lock);
316 		if (qdf_likely(pool->avail_desc &&
317 		    pool->status != FLOW_POOL_INVALID &&
318 		    pool->status != FLOW_POOL_INACTIVE)) {
319 			tx_desc = dp_tx_get_desc_flow_pool(pool);
320 			tx_desc->pool_id = desc_pool_id;
321 			tx_desc->flags = DP_TX_DESC_FLAG_ALLOCATED;
322 			dp_tx_desc_set_magic(tx_desc,
323 					     DP_TX_MAGIC_PATTERN_INUSE);
324 			is_pause = dp_tx_is_threshold_reached(pool,
325 							      pool->avail_desc);
326 
327 			if (qdf_unlikely(pool->status ==
328 					 FLOW_POOL_ACTIVE_UNPAUSED_REATTACH)) {
329 				dp_tx_adjust_flow_pool_state(soc, pool);
330 				is_pause = false;
331 			}
332 
333 			if (qdf_unlikely(is_pause)) {
334 				switch (pool->status) {
335 				case FLOW_POOL_ACTIVE_UNPAUSED:
336 					/* pause network BE\BK queue */
337 					act = WLAN_NETIF_BE_BK_QUEUE_OFF;
338 					reason = WLAN_DATA_FLOW_CTRL_BE_BK;
339 					level = DP_TH_BE_BK;
340 					pool->status = FLOW_POOL_BE_BK_PAUSED;
341 					break;
342 				case FLOW_POOL_BE_BK_PAUSED:
343 					/* pause network VI queue */
344 					act = WLAN_NETIF_VI_QUEUE_OFF;
345 					reason = WLAN_DATA_FLOW_CTRL_VI;
346 					level = DP_TH_VI;
347 					pool->status = FLOW_POOL_VI_PAUSED;
348 					break;
349 				case FLOW_POOL_VI_PAUSED:
350 					/* pause network VO queue */
351 					act = WLAN_NETIF_VO_QUEUE_OFF;
352 					reason = WLAN_DATA_FLOW_CTRL_VO;
353 					level = DP_TH_VO;
354 					pool->status = FLOW_POOL_VO_PAUSED;
355 					break;
356 				case FLOW_POOL_VO_PAUSED:
357 					/* pause network HI PRI queue */
358 					act = WLAN_NETIF_PRIORITY_QUEUE_OFF;
359 					reason = WLAN_DATA_FLOW_CTRL_PRI;
360 					level = DP_TH_HI;
361 					pool->status = FLOW_POOL_ACTIVE_PAUSED;
362 					break;
363 				case FLOW_POOL_ACTIVE_PAUSED:
364 					act = WLAN_NETIF_ACTION_TYPE_NONE;
365 					break;
366 				default:
367 					dp_err_rl("pool status is %d!",
368 						  pool->status);
369 					break;
370 				}
371 
372 				if (act != WLAN_NETIF_ACTION_TYPE_NONE) {
373 					pool->latest_pause_time[level] =
374 						qdf_get_system_timestamp();
375 					soc->pause_cb(desc_pool_id,
376 						      act,
377 						      reason);
378 				}
379 			}
380 		} else {
381 			pool->pkt_drop_no_desc++;
382 		}
383 		qdf_spin_unlock_bh(&pool->flow_pool_lock);
384 	} else {
385 		soc->pool_stats.pkt_drop_no_pool++;
386 	}
387 
388 	return tx_desc;
389 }
390 
391 /**
392  * dp_tx_desc_free() - Fee a tx descriptor and attach it to free list
393  *
394  * @soc: Handle to DP SoC structure
395  * @tx_desc: the tx descriptor to be freed
396  * @desc_pool_id: ID of the flow control fool
397  *
398  * Return: None
399  */
400 static inline void
401 dp_tx_desc_free(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
402 		uint8_t desc_pool_id)
403 {
404 	struct dp_tx_desc_pool_s *pool = &soc->tx_desc[desc_pool_id];
405 	qdf_time_t unpause_time = qdf_get_system_timestamp(), pause_dur;
406 	enum netif_action_type act = WLAN_WAKE_ALL_NETIF_QUEUE;
407 	enum netif_reason_type reason;
408 
409 	qdf_spin_lock_bh(&pool->flow_pool_lock);
410 	tx_desc->vdev_id = DP_INVALID_VDEV_ID;
411 	tx_desc->nbuf = NULL;
412 	tx_desc->flags = 0;
413 	dp_tx_desc_set_magic(tx_desc, DP_TX_MAGIC_PATTERN_FREE);
414 	dp_tx_put_desc_flow_pool(pool, tx_desc);
415 	switch (pool->status) {
416 	case FLOW_POOL_ACTIVE_PAUSED:
417 		if (pool->avail_desc > pool->start_th[DP_TH_HI]) {
418 			act = WLAN_NETIF_PRIORITY_QUEUE_ON;
419 			reason = WLAN_DATA_FLOW_CTRL_PRI;
420 			pool->status = FLOW_POOL_VO_PAUSED;
421 
422 			/* Update maxinum pause duration for HI queue */
423 			pause_dur = unpause_time -
424 					pool->latest_pause_time[DP_TH_HI];
425 			if (pool->max_pause_time[DP_TH_HI] < pause_dur)
426 				pool->max_pause_time[DP_TH_HI] = pause_dur;
427 		}
428 		break;
429 	case FLOW_POOL_VO_PAUSED:
430 		if (pool->avail_desc > pool->start_th[DP_TH_VO]) {
431 			act = WLAN_NETIF_VO_QUEUE_ON;
432 			reason = WLAN_DATA_FLOW_CTRL_VO;
433 			pool->status = FLOW_POOL_VI_PAUSED;
434 
435 			/* Update maxinum pause duration for VO queue */
436 			pause_dur = unpause_time -
437 					pool->latest_pause_time[DP_TH_VO];
438 			if (pool->max_pause_time[DP_TH_VO] < pause_dur)
439 				pool->max_pause_time[DP_TH_VO] = pause_dur;
440 		}
441 		break;
442 	case FLOW_POOL_VI_PAUSED:
443 		if (pool->avail_desc > pool->start_th[DP_TH_VI]) {
444 			act = WLAN_NETIF_VI_QUEUE_ON;
445 			reason = WLAN_DATA_FLOW_CTRL_VI;
446 			pool->status = FLOW_POOL_BE_BK_PAUSED;
447 
448 			/* Update maxinum pause duration for VI queue */
449 			pause_dur = unpause_time -
450 					pool->latest_pause_time[DP_TH_VI];
451 			if (pool->max_pause_time[DP_TH_VI] < pause_dur)
452 				pool->max_pause_time[DP_TH_VI] = pause_dur;
453 		}
454 		break;
455 	case FLOW_POOL_BE_BK_PAUSED:
456 		if (pool->avail_desc > pool->start_th[DP_TH_BE_BK]) {
457 			act = WLAN_NETIF_BE_BK_QUEUE_ON;
458 			reason = WLAN_DATA_FLOW_CTRL_BE_BK;
459 			pool->status = FLOW_POOL_ACTIVE_UNPAUSED;
460 
461 			/* Update maxinum pause duration for BE_BK queue */
462 			pause_dur = unpause_time -
463 					pool->latest_pause_time[DP_TH_BE_BK];
464 			if (pool->max_pause_time[DP_TH_BE_BK] < pause_dur)
465 				pool->max_pause_time[DP_TH_BE_BK] = pause_dur;
466 		}
467 		break;
468 	case FLOW_POOL_INVALID:
469 		if (pool->avail_desc == pool->pool_size) {
470 			dp_tx_desc_pool_deinit(soc, desc_pool_id);
471 			dp_tx_desc_pool_free(soc, desc_pool_id);
472 			qdf_spin_unlock_bh(&pool->flow_pool_lock);
473 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
474 				  "%s %d pool is freed!!",
475 				  __func__, __LINE__);
476 			return;
477 		}
478 		break;
479 
480 	case FLOW_POOL_ACTIVE_UNPAUSED:
481 		break;
482 	default:
483 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
484 			  "%s %d pool is INACTIVE State!!",
485 			  __func__, __LINE__);
486 		break;
487 	};
488 
489 	if (act != WLAN_WAKE_ALL_NETIF_QUEUE)
490 		soc->pause_cb(pool->flow_pool_id,
491 			      act, reason);
492 	qdf_spin_unlock_bh(&pool->flow_pool_lock);
493 }
494 #else /* QCA_AC_BASED_FLOW_CONTROL */
495 
496 static inline bool
497 dp_tx_is_threshold_reached(struct dp_tx_desc_pool_s *pool, uint16_t avail_desc)
498 {
499 	if (qdf_unlikely(avail_desc < pool->stop_th))
500 		return true;
501 	else
502 		return false;
503 }
504 
505 /**
506  * dp_tx_desc_alloc() - Allocate a Software Tx Descriptor from given pool
507  *
508  * @soc Handle to DP SoC structure
509  * @pool_id
510  *
511  * Return:
512  */
513 static inline struct dp_tx_desc_s *
514 dp_tx_desc_alloc(struct dp_soc *soc, uint8_t desc_pool_id)
515 {
516 	struct dp_tx_desc_s *tx_desc = NULL;
517 	struct dp_tx_desc_pool_s *pool = &soc->tx_desc[desc_pool_id];
518 
519 	if (pool) {
520 		qdf_spin_lock_bh(&pool->flow_pool_lock);
521 		if (pool->status <= FLOW_POOL_ACTIVE_PAUSED &&
522 		    pool->avail_desc) {
523 			tx_desc = dp_tx_get_desc_flow_pool(pool);
524 			tx_desc->pool_id = desc_pool_id;
525 			tx_desc->flags = DP_TX_DESC_FLAG_ALLOCATED;
526 			dp_tx_desc_set_magic(tx_desc,
527 					     DP_TX_MAGIC_PATTERN_INUSE);
528 			if (qdf_unlikely(pool->avail_desc < pool->stop_th)) {
529 				pool->status = FLOW_POOL_ACTIVE_PAUSED;
530 				qdf_spin_unlock_bh(&pool->flow_pool_lock);
531 				/* pause network queues */
532 				soc->pause_cb(desc_pool_id,
533 					       WLAN_STOP_ALL_NETIF_QUEUE,
534 					       WLAN_DATA_FLOW_CONTROL);
535 			} else {
536 				qdf_spin_unlock_bh(&pool->flow_pool_lock);
537 			}
538 		} else {
539 			pool->pkt_drop_no_desc++;
540 			qdf_spin_unlock_bh(&pool->flow_pool_lock);
541 		}
542 	} else {
543 		soc->pool_stats.pkt_drop_no_pool++;
544 	}
545 
546 	return tx_desc;
547 }
548 
549 /**
550  * dp_tx_desc_free() - Fee a tx descriptor and attach it to free list
551  *
552  * @soc Handle to DP SoC structure
553  * @pool_id
554  * @tx_desc
555  *
556  * Return: None
557  */
558 static inline void
559 dp_tx_desc_free(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
560 		uint8_t desc_pool_id)
561 {
562 	struct dp_tx_desc_pool_s *pool = &soc->tx_desc[desc_pool_id];
563 
564 	qdf_spin_lock_bh(&pool->flow_pool_lock);
565 	tx_desc->vdev_id = DP_INVALID_VDEV_ID;
566 	tx_desc->nbuf = NULL;
567 	tx_desc->flags = 0;
568 	dp_tx_desc_set_magic(tx_desc, DP_TX_MAGIC_PATTERN_FREE);
569 	dp_tx_put_desc_flow_pool(pool, tx_desc);
570 	switch (pool->status) {
571 	case FLOW_POOL_ACTIVE_PAUSED:
572 		if (pool->avail_desc > pool->start_th) {
573 			soc->pause_cb(pool->flow_pool_id,
574 				       WLAN_WAKE_ALL_NETIF_QUEUE,
575 				       WLAN_DATA_FLOW_CONTROL);
576 			pool->status = FLOW_POOL_ACTIVE_UNPAUSED;
577 		}
578 		break;
579 	case FLOW_POOL_INVALID:
580 		if (pool->avail_desc == pool->pool_size) {
581 			dp_tx_desc_pool_deinit(soc, desc_pool_id);
582 			dp_tx_desc_pool_free(soc, desc_pool_id);
583 			qdf_spin_unlock_bh(&pool->flow_pool_lock);
584 			qdf_print("%s %d pool is freed!!",
585 				  __func__, __LINE__);
586 			return;
587 		}
588 		break;
589 
590 	case FLOW_POOL_ACTIVE_UNPAUSED:
591 		break;
592 	default:
593 		qdf_print("%s %d pool is INACTIVE State!!",
594 			  __func__, __LINE__);
595 		break;
596 	};
597 
598 	qdf_spin_unlock_bh(&pool->flow_pool_lock);
599 }
600 
601 #endif /* QCA_AC_BASED_FLOW_CONTROL */
602 
603 static inline bool
604 dp_tx_desc_thresh_reached(struct cdp_soc_t *soc_hdl, uint8_t vdev_id)
605 {
606 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
607 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
608 						     DP_MOD_ID_CDP);
609 	struct dp_tx_desc_pool_s *pool;
610 	bool status;
611 
612 	if (!vdev)
613 		return false;
614 
615 	pool = vdev->pool;
616 	status = dp_tx_is_threshold_reached(pool, pool->avail_desc);
617 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
618 
619 	return status;
620 }
621 #else /* QCA_LL_TX_FLOW_CONTROL_V2 */
622 
623 static inline void dp_tx_flow_control_init(struct dp_soc *handle)
624 {
625 }
626 
627 static inline void dp_tx_flow_control_deinit(struct dp_soc *handle)
628 {
629 }
630 
631 static inline QDF_STATUS dp_tx_flow_pool_map_handler(struct dp_pdev *pdev,
632 	uint8_t flow_id, uint8_t flow_type, uint8_t flow_pool_id,
633 	uint32_t flow_pool_size)
634 {
635 	return QDF_STATUS_SUCCESS;
636 }
637 
638 static inline void dp_tx_flow_pool_unmap_handler(struct dp_pdev *pdev,
639 	uint8_t flow_id, uint8_t flow_type, uint8_t flow_pool_id)
640 {
641 }
642 
643 #ifdef QCA_DP_TX_HW_SW_NBUF_DESC_PREFETCH
644 static inline
645 void dp_tx_prefetch_desc(struct dp_tx_desc_s *tx_desc)
646 {
647 	if (tx_desc)
648 		prefetch(tx_desc);
649 }
650 #else
651 static inline
652 void dp_tx_prefetch_desc(struct dp_tx_desc_s *tx_desc)
653 {
654 }
655 #endif
656 
657 /**
658  * dp_tx_desc_alloc() - Allocate a Software Tx Descriptor from given pool
659  *
660  * @param soc Handle to DP SoC structure
661  * @param pool_id
662  *
663  * Return:
664  */
665 static inline struct dp_tx_desc_s *dp_tx_desc_alloc(struct dp_soc *soc,
666 						uint8_t desc_pool_id)
667 {
668 	struct dp_tx_desc_s *tx_desc = NULL;
669 	struct dp_tx_desc_pool_s *pool = &soc->tx_desc[desc_pool_id];
670 
671 	TX_DESC_LOCK_LOCK(&pool->lock);
672 
673 	tx_desc = pool->freelist;
674 
675 	/* Pool is exhausted */
676 	if (!tx_desc) {
677 		TX_DESC_LOCK_UNLOCK(&pool->lock);
678 		return NULL;
679 	}
680 
681 	pool->freelist = pool->freelist->next;
682 	pool->num_allocated++;
683 	pool->num_free--;
684 	dp_tx_prefetch_desc(pool->freelist);
685 
686 	tx_desc->flags = DP_TX_DESC_FLAG_ALLOCATED;
687 
688 	TX_DESC_LOCK_UNLOCK(&pool->lock);
689 
690 	return tx_desc;
691 }
692 
693 /**
694  * dp_tx_desc_alloc_multiple() - Allocate batch of software Tx Descriptors
695  *                            from given pool
696  * @soc: Handle to DP SoC structure
697  * @pool_id: pool id should pick up
698  * @num_requested: number of required descriptor
699  *
700  * allocate multiple tx descriptor and make a link
701  *
702  * Return: h_desc first descriptor pointer
703  */
704 static inline struct dp_tx_desc_s *dp_tx_desc_alloc_multiple(
705 		struct dp_soc *soc, uint8_t desc_pool_id, uint8_t num_requested)
706 {
707 	struct dp_tx_desc_s *c_desc = NULL, *h_desc = NULL;
708 	uint8_t count;
709 	struct dp_tx_desc_pool_s *pool = &soc->tx_desc[desc_pool_id];
710 
711 	TX_DESC_LOCK_LOCK(&pool->lock);
712 
713 	if ((num_requested == 0) ||
714 			(pool->num_free < num_requested)) {
715 		TX_DESC_LOCK_UNLOCK(&pool->lock);
716 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
717 			"%s, No Free Desc: Available(%d) num_requested(%d)",
718 			__func__, pool->num_free,
719 			num_requested);
720 		return NULL;
721 	}
722 
723 	h_desc = pool->freelist;
724 
725 	/* h_desc should never be NULL since num_free > requested */
726 	qdf_assert_always(h_desc);
727 
728 	c_desc = h_desc;
729 	for (count = 0; count < (num_requested - 1); count++) {
730 		c_desc->flags = DP_TX_DESC_FLAG_ALLOCATED;
731 		c_desc = c_desc->next;
732 	}
733 	pool->num_free -= count;
734 	pool->num_allocated += count;
735 	pool->freelist = c_desc->next;
736 	c_desc->next = NULL;
737 
738 	TX_DESC_LOCK_UNLOCK(&pool->lock);
739 	return h_desc;
740 }
741 
742 /**
743  * dp_tx_desc_free() - Fee a tx descriptor and attach it to free list
744  *
745  * @soc Handle to DP SoC structure
746  * @pool_id
747  * @tx_desc
748  */
749 static inline void
750 dp_tx_desc_free(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
751 		uint8_t desc_pool_id)
752 {
753 	struct dp_tx_desc_pool_s *pool = NULL;
754 	tx_desc->vdev_id = DP_INVALID_VDEV_ID;
755 	tx_desc->nbuf = NULL;
756 	tx_desc->flags = 0;
757 
758 	pool = &soc->tx_desc[desc_pool_id];
759 	TX_DESC_LOCK_LOCK(&pool->lock);
760 	tx_desc->next = pool->freelist;
761 	pool->freelist = tx_desc;
762 	pool->num_allocated--;
763 	pool->num_free++;
764 	TX_DESC_LOCK_UNLOCK(&pool->lock);
765 }
766 
767 #endif /* QCA_LL_TX_FLOW_CONTROL_V2 */
768 
769 #ifdef QCA_DP_TX_DESC_ID_CHECK
770 /**
771  * dp_tx_is_desc_id_valid() - check is the tx desc id valid
772  *
773  * @soc Handle to DP SoC structure
774  * @tx_desc_id
775  *
776  * Return: true or false
777  */
778 static inline bool
779 dp_tx_is_desc_id_valid(struct dp_soc *soc, uint32_t tx_desc_id)
780 {
781 	uint8_t pool_id;
782 	uint16_t page_id, offset;
783 	struct dp_tx_desc_pool_s *pool;
784 
785 	pool_id = (tx_desc_id & DP_TX_DESC_ID_POOL_MASK) >>
786 			DP_TX_DESC_ID_POOL_OS;
787 	/* Pool ID is out of limit */
788 	if (pool_id > wlan_cfg_get_num_tx_desc_pool(
789 				soc->wlan_cfg_ctx)) {
790 		QDF_TRACE(QDF_MODULE_ID_DP,
791 			  QDF_TRACE_LEVEL_FATAL,
792 			  "%s:Tx Comp pool id %d not valid",
793 			  __func__,
794 			  pool_id);
795 		goto warn_exit;
796 	}
797 
798 	pool = &soc->tx_desc[pool_id];
799 	/* the pool is freed */
800 	if (IS_TX_DESC_POOL_STATUS_INACTIVE(pool)) {
801 		QDF_TRACE(QDF_MODULE_ID_DP,
802 			  QDF_TRACE_LEVEL_FATAL,
803 			  "%s:the pool %d has been freed",
804 			  __func__,
805 			  pool_id);
806 		goto warn_exit;
807 	}
808 
809 	page_id = (tx_desc_id & DP_TX_DESC_ID_PAGE_MASK) >>
810 				DP_TX_DESC_ID_PAGE_OS;
811 	/* the page id is out of limit */
812 	if (page_id >= pool->desc_pages.num_pages) {
813 		QDF_TRACE(QDF_MODULE_ID_DP,
814 			  QDF_TRACE_LEVEL_FATAL,
815 			  "%s:the page id %d invalid, pool id %d, num_page %d",
816 			  __func__,
817 			  page_id,
818 			  pool_id,
819 			  pool->desc_pages.num_pages);
820 		goto warn_exit;
821 	}
822 
823 	offset = (tx_desc_id & DP_TX_DESC_ID_OFFSET_MASK) >>
824 				DP_TX_DESC_ID_OFFSET_OS;
825 	/* the offset is out of limit */
826 	if (offset >= pool->desc_pages.num_element_per_page) {
827 		QDF_TRACE(QDF_MODULE_ID_DP,
828 			  QDF_TRACE_LEVEL_FATAL,
829 			  "%s:offset %d invalid, pool%d,num_elem_per_page %d",
830 			  __func__,
831 			  offset,
832 			  pool_id,
833 			  pool->desc_pages.num_element_per_page);
834 		goto warn_exit;
835 	}
836 
837 	return true;
838 
839 warn_exit:
840 	QDF_TRACE(QDF_MODULE_ID_DP,
841 		  QDF_TRACE_LEVEL_FATAL,
842 		  "%s:Tx desc id 0x%x not valid",
843 		  __func__,
844 		  tx_desc_id);
845 	qdf_assert_always(0);
846 	return false;
847 }
848 
849 #else
850 static inline bool
851 dp_tx_is_desc_id_valid(struct dp_soc *soc, uint32_t tx_desc_id)
852 {
853 	return true;
854 }
855 #endif /* QCA_DP_TX_DESC_ID_CHECK */
856 
857 #ifdef QCA_DP_TX_DESC_FAST_COMP_ENABLE
858 static inline void dp_tx_desc_update_fast_comp_flag(struct dp_soc *soc,
859 						    struct dp_tx_desc_s *desc,
860 						    uint8_t allow_fast_comp)
861 {
862 	if (qdf_likely(!(desc->flags & DP_TX_DESC_FLAG_TO_FW)) &&
863 	    qdf_likely(allow_fast_comp)) {
864 		desc->flags |= DP_TX_DESC_FLAG_SIMPLE;
865 	}
866 }
867 #else
868 static inline void dp_tx_desc_update_fast_comp_flag(struct dp_soc *soc,
869 						    struct dp_tx_desc_s *desc,
870 						    uint8_t allow_fast_comp)
871 {
872 }
873 #endif /* QCA_DP_TX_DESC_FAST_COMP_ENABLE */
874 
875 /**
876  * dp_tx_desc_find() - find dp tx descriptor from cokie
877  * @soc - handle for the device sending the data
878  * @tx_desc_id - the ID of the descriptor in question
879  * @return the descriptor object that has the specified ID
880  *
881  *  Use a tx descriptor ID to find the corresponding descriptor object.
882  *
883  */
884 static inline struct dp_tx_desc_s *dp_tx_desc_find(struct dp_soc *soc,
885 		uint8_t pool_id, uint16_t page_id, uint16_t offset)
886 {
887 	struct dp_tx_desc_pool_s *tx_desc_pool = &soc->tx_desc[pool_id];
888 
889 	return tx_desc_pool->desc_pages.cacheable_pages[page_id] +
890 		tx_desc_pool->elem_size * offset;
891 }
892 
893 /**
894  * dp_tx_ext_desc_alloc() - Get tx extension descriptor from pool
895  * @soc: handle for the device sending the data
896  * @pool_id: target pool id
897  *
898  * Return: None
899  */
900 static inline
901 struct dp_tx_ext_desc_elem_s *dp_tx_ext_desc_alloc(struct dp_soc *soc,
902 		uint8_t desc_pool_id)
903 {
904 	struct dp_tx_ext_desc_elem_s *c_elem;
905 
906 	qdf_spin_lock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
907 	if (soc->tx_ext_desc[desc_pool_id].num_free <= 0) {
908 		qdf_spin_unlock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
909 		return NULL;
910 	}
911 	c_elem = soc->tx_ext_desc[desc_pool_id].freelist;
912 	soc->tx_ext_desc[desc_pool_id].freelist =
913 		soc->tx_ext_desc[desc_pool_id].freelist->next;
914 	soc->tx_ext_desc[desc_pool_id].num_free--;
915 	qdf_spin_unlock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
916 	return c_elem;
917 }
918 
919 /**
920  * dp_tx_ext_desc_free() - Release tx extension descriptor to the pool
921  * @soc: handle for the device sending the data
922  * @pool_id: target pool id
923  * @elem: ext descriptor pointer should release
924  *
925  * Return: None
926  */
927 static inline void dp_tx_ext_desc_free(struct dp_soc *soc,
928 	struct dp_tx_ext_desc_elem_s *elem, uint8_t desc_pool_id)
929 {
930 	qdf_spin_lock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
931 	elem->next = soc->tx_ext_desc[desc_pool_id].freelist;
932 	soc->tx_ext_desc[desc_pool_id].freelist = elem;
933 	soc->tx_ext_desc[desc_pool_id].num_free++;
934 	qdf_spin_unlock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
935 	return;
936 }
937 
938 /**
939  * dp_tx_ext_desc_free_multiple() - Fee multiple tx extension descriptor and
940  *                           attach it to free list
941  * @soc: Handle to DP SoC structure
942  * @desc_pool_id: pool id should pick up
943  * @elem: tx descriptor should be freed
944  * @num_free: number of descriptors should be freed
945  *
946  * Return: none
947  */
948 static inline void dp_tx_ext_desc_free_multiple(struct dp_soc *soc,
949 		struct dp_tx_ext_desc_elem_s *elem, uint8_t desc_pool_id,
950 		uint8_t num_free)
951 {
952 	struct dp_tx_ext_desc_elem_s *head, *tail, *c_elem;
953 	uint8_t freed = num_free;
954 
955 	/* caller should always guarantee atleast list of num_free nodes */
956 	qdf_assert_always(elem);
957 
958 	head = elem;
959 	c_elem = head;
960 	tail = head;
961 	while (c_elem && freed) {
962 		tail = c_elem;
963 		c_elem = c_elem->next;
964 		freed--;
965 	}
966 
967 	/* caller should always guarantee atleast list of num_free nodes */
968 	qdf_assert_always(tail);
969 
970 	qdf_spin_lock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
971 	tail->next = soc->tx_ext_desc[desc_pool_id].freelist;
972 	soc->tx_ext_desc[desc_pool_id].freelist = head;
973 	soc->tx_ext_desc[desc_pool_id].num_free += num_free;
974 	qdf_spin_unlock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
975 
976 	return;
977 }
978 
979 #if defined(FEATURE_TSO)
980 /**
981  * dp_tx_tso_desc_alloc() - function to allocate a TSO segment
982  * @soc: device soc instance
983  * @pool_id: pool id should pick up tso descriptor
984  *
985  * Allocates a TSO segment element from the free list held in
986  * the soc
987  *
988  * Return: tso_seg, tso segment memory pointer
989  */
990 static inline struct qdf_tso_seg_elem_t *dp_tx_tso_desc_alloc(
991 		struct dp_soc *soc, uint8_t pool_id)
992 {
993 	struct qdf_tso_seg_elem_t *tso_seg = NULL;
994 
995 	qdf_spin_lock_bh(&soc->tx_tso_desc[pool_id].lock);
996 	if (soc->tx_tso_desc[pool_id].freelist) {
997 		soc->tx_tso_desc[pool_id].num_free--;
998 		tso_seg = soc->tx_tso_desc[pool_id].freelist;
999 		soc->tx_tso_desc[pool_id].freelist =
1000 			soc->tx_tso_desc[pool_id].freelist->next;
1001 	}
1002 	qdf_spin_unlock_bh(&soc->tx_tso_desc[pool_id].lock);
1003 
1004 	return tso_seg;
1005 }
1006 
1007 /**
1008  * dp_tx_tso_desc_free() - function to free a TSO segment
1009  * @soc: device soc instance
1010  * @pool_id: pool id should pick up tso descriptor
1011  * @tso_seg: tso segment memory pointer
1012  *
1013  * Returns a TSO segment element to the free list held in the
1014  * HTT pdev
1015  *
1016  * Return: none
1017  */
1018 static inline void dp_tx_tso_desc_free(struct dp_soc *soc,
1019 		uint8_t pool_id, struct qdf_tso_seg_elem_t *tso_seg)
1020 {
1021 	qdf_spin_lock_bh(&soc->tx_tso_desc[pool_id].lock);
1022 	tso_seg->next = soc->tx_tso_desc[pool_id].freelist;
1023 	soc->tx_tso_desc[pool_id].freelist = tso_seg;
1024 	soc->tx_tso_desc[pool_id].num_free++;
1025 	qdf_spin_unlock_bh(&soc->tx_tso_desc[pool_id].lock);
1026 }
1027 
1028 static inline
1029 struct qdf_tso_num_seg_elem_t  *dp_tso_num_seg_alloc(struct dp_soc *soc,
1030 		uint8_t pool_id)
1031 {
1032 	struct qdf_tso_num_seg_elem_t *tso_num_seg = NULL;
1033 
1034 	qdf_spin_lock_bh(&soc->tx_tso_num_seg[pool_id].lock);
1035 	if (soc->tx_tso_num_seg[pool_id].freelist) {
1036 		soc->tx_tso_num_seg[pool_id].num_free--;
1037 		tso_num_seg = soc->tx_tso_num_seg[pool_id].freelist;
1038 		soc->tx_tso_num_seg[pool_id].freelist =
1039 			soc->tx_tso_num_seg[pool_id].freelist->next;
1040 	}
1041 	qdf_spin_unlock_bh(&soc->tx_tso_num_seg[pool_id].lock);
1042 
1043 	return tso_num_seg;
1044 }
1045 
1046 static inline
1047 void dp_tso_num_seg_free(struct dp_soc *soc,
1048 		uint8_t pool_id, struct qdf_tso_num_seg_elem_t *tso_num_seg)
1049 {
1050 	qdf_spin_lock_bh(&soc->tx_tso_num_seg[pool_id].lock);
1051 	tso_num_seg->next = soc->tx_tso_num_seg[pool_id].freelist;
1052 	soc->tx_tso_num_seg[pool_id].freelist = tso_num_seg;
1053 	soc->tx_tso_num_seg[pool_id].num_free++;
1054 	qdf_spin_unlock_bh(&soc->tx_tso_num_seg[pool_id].lock);
1055 }
1056 #endif
1057 
1058 /*
1059  * dp_tx_me_alloc_buf() Alloc descriptor from me pool
1060  * @pdev DP_PDEV handle for datapath
1061  *
1062  * Return:dp_tx_me_buf_t(buf)
1063  */
1064 static inline struct dp_tx_me_buf_t*
1065 dp_tx_me_alloc_buf(struct dp_pdev *pdev)
1066 {
1067 	struct dp_tx_me_buf_t *buf = NULL;
1068 	qdf_spin_lock_bh(&pdev->tx_mutex);
1069 	if (pdev->me_buf.freelist) {
1070 		buf = pdev->me_buf.freelist;
1071 		pdev->me_buf.freelist = pdev->me_buf.freelist->next;
1072 		pdev->me_buf.buf_in_use++;
1073 	} else {
1074 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1075 				"Error allocating memory in pool");
1076 		qdf_spin_unlock_bh(&pdev->tx_mutex);
1077 		return NULL;
1078 	}
1079 	qdf_spin_unlock_bh(&pdev->tx_mutex);
1080 	return buf;
1081 }
1082 
1083 /*
1084  * dp_tx_me_free_buf() - Unmap the buffer holding the dest
1085  * address, free me descriptor and add it to the free-pool
1086  * @pdev: DP_PDEV handle for datapath
1087  * @buf : Allocated ME BUF
1088  *
1089  * Return:void
1090  */
1091 static inline void
1092 dp_tx_me_free_buf(struct dp_pdev *pdev, struct dp_tx_me_buf_t *buf)
1093 {
1094 	/*
1095 	 * If the buf containing mac address was mapped,
1096 	 * it must be unmapped before freeing the me_buf.
1097 	 * The "paddr_macbuf" member in the me_buf structure
1098 	 * holds the mapped physical address and it must be
1099 	 * set to 0 after unmapping.
1100 	 */
1101 	if (buf->paddr_macbuf) {
1102 		qdf_mem_unmap_nbytes_single(pdev->soc->osdev,
1103 					    buf->paddr_macbuf,
1104 					    QDF_DMA_TO_DEVICE,
1105 					    QDF_MAC_ADDR_SIZE);
1106 		buf->paddr_macbuf = 0;
1107 	}
1108 	qdf_spin_lock_bh(&pdev->tx_mutex);
1109 	buf->next = pdev->me_buf.freelist;
1110 	pdev->me_buf.freelist = buf;
1111 	pdev->me_buf.buf_in_use--;
1112 	qdf_spin_unlock_bh(&pdev->tx_mutex);
1113 }
1114 #endif /* DP_TX_DESC_H */
1115