xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_tx_desc.h (revision 11f5a63a6cbdda84849a730de22f0a71e635d58c)
1 /*
2  * Copyright (c) 2016-2019 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #ifndef DP_TX_DESC_H
20 #define DP_TX_DESC_H
21 
22 #include "dp_types.h"
23 #include "dp_tx.h"
24 #include "dp_internal.h"
25 
26 
27 /**
28  * 21 bits cookie
29  * 2 bits pool id 0 ~ 3,
30  * 10 bits page id 0 ~ 1023
31  * 5 bits offset id 0 ~ 31 (Desc size = 128, Num descs per page = 4096/128 = 32)
32  */
33 /* ???Ring ID needed??? */
34 #define DP_TX_DESC_ID_POOL_MASK    0x018000
35 #define DP_TX_DESC_ID_POOL_OS      15
36 #define DP_TX_DESC_ID_PAGE_MASK    0x007FE0
37 #define DP_TX_DESC_ID_PAGE_OS      5
38 #define DP_TX_DESC_ID_OFFSET_MASK  0x00001F
39 #define DP_TX_DESC_ID_OFFSET_OS    0
40 
41 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
42 #define TX_DESC_LOCK_CREATE(lock)
43 #define TX_DESC_LOCK_DESTROY(lock)
44 #define TX_DESC_LOCK_LOCK(lock)
45 #define TX_DESC_LOCK_UNLOCK(lock)
46 #define IS_TX_DESC_POOL_STATUS_INACTIVE(pool) \
47 	((pool)->status == FLOW_POOL_INACTIVE)
48 #ifdef QCA_AC_BASED_FLOW_CONTROL
49 #define TX_DESC_POOL_MEMBER_CLEAN(_tx_desc_pool)       \
50 	dp_tx_flow_pool_member_clean(_tx_desc_pool)
51 
52 #else /* !QCA_AC_BASED_FLOW_CONTROL */
53 #define TX_DESC_POOL_MEMBER_CLEAN(_tx_desc_pool)       \
54 do {                                                   \
55 	(_tx_desc_pool)->elem_size = 0;                \
56 	(_tx_desc_pool)->freelist = NULL;              \
57 	(_tx_desc_pool)->pool_size = 0;                \
58 	(_tx_desc_pool)->avail_desc = 0;               \
59 	(_tx_desc_pool)->start_th = 0;                 \
60 	(_tx_desc_pool)->stop_th = 0;                  \
61 	(_tx_desc_pool)->status = FLOW_POOL_INACTIVE;  \
62 } while (0)
63 #endif /* QCA_AC_BASED_FLOW_CONTROL */
64 #else /* !QCA_LL_TX_FLOW_CONTROL_V2 */
65 #define TX_DESC_LOCK_CREATE(lock)  qdf_spinlock_create(lock)
66 #define TX_DESC_LOCK_DESTROY(lock) qdf_spinlock_destroy(lock)
67 #define TX_DESC_LOCK_LOCK(lock)    qdf_spin_lock_bh(lock)
68 #define TX_DESC_LOCK_UNLOCK(lock)  qdf_spin_unlock_bh(lock)
69 #define IS_TX_DESC_POOL_STATUS_INACTIVE(pool) (false)
70 #define TX_DESC_POOL_MEMBER_CLEAN(_tx_desc_pool)       \
71 do {                                                   \
72 	(_tx_desc_pool)->elem_size = 0;                \
73 	(_tx_desc_pool)->num_allocated = 0;            \
74 	(_tx_desc_pool)->freelist = NULL;              \
75 	(_tx_desc_pool)->elem_count = 0;               \
76 	(_tx_desc_pool)->num_free = 0;                 \
77 } while (0)
78 #endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */
79 #define MAX_POOL_BUFF_COUNT 10000
80 
81 QDF_STATUS dp_tx_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
82 		uint16_t num_elem);
83 QDF_STATUS dp_tx_desc_pool_free(struct dp_soc *soc, uint8_t pool_id);
84 QDF_STATUS dp_tx_ext_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
85 		uint16_t num_elem);
86 QDF_STATUS dp_tx_ext_desc_pool_free(struct dp_soc *soc, uint8_t pool_id);
87 QDF_STATUS dp_tx_tso_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
88 		uint16_t num_elem);
89 void dp_tx_tso_desc_pool_free(struct dp_soc *soc, uint8_t pool_id);
90 QDF_STATUS dp_tx_tso_num_seg_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
91 		uint16_t num_elem);
92 void dp_tx_tso_num_seg_pool_free(struct dp_soc *soc, uint8_t pool_id);
93 
94 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
95 void dp_tx_flow_control_init(struct dp_soc *);
96 void dp_tx_flow_control_deinit(struct dp_soc *);
97 
98 QDF_STATUS dp_txrx_register_pause_cb(struct cdp_soc_t *soc,
99 	tx_pause_callback pause_cb);
100 QDF_STATUS dp_tx_flow_pool_map(struct cdp_soc_t *soc, struct cdp_pdev *pdev,
101 				uint8_t vdev_id);
102 void dp_tx_flow_pool_unmap(struct cdp_soc_t *soc, struct cdp_pdev *pdev,
103 			   uint8_t vdev_id);
104 void dp_tx_clear_flow_pool_stats(struct dp_soc *soc);
105 struct dp_tx_desc_pool_s *dp_tx_create_flow_pool(struct dp_soc *soc,
106 	uint8_t flow_pool_id, uint16_t flow_pool_size);
107 
108 QDF_STATUS dp_tx_flow_pool_map_handler(struct dp_pdev *pdev, uint8_t flow_id,
109 	uint8_t flow_type, uint8_t flow_pool_id, uint16_t flow_pool_size);
110 void dp_tx_flow_pool_unmap_handler(struct dp_pdev *pdev, uint8_t flow_id,
111 	uint8_t flow_type, uint8_t flow_pool_id);
112 
113 /**
114  * dp_tx_get_desc_flow_pool() - get descriptor from flow pool
115  * @pool: flow pool
116  *
117  * Caller needs to take lock and do sanity checks.
118  *
119  * Return: tx descriptor
120  */
121 static inline
122 struct dp_tx_desc_s *dp_tx_get_desc_flow_pool(struct dp_tx_desc_pool_s *pool)
123 {
124 	struct dp_tx_desc_s *tx_desc = pool->freelist;
125 
126 	pool->freelist = pool->freelist->next;
127 	pool->avail_desc--;
128 	return tx_desc;
129 }
130 
131 /**
132  * ol_tx_put_desc_flow_pool() - put descriptor to flow pool freelist
133  * @pool: flow pool
134  * @tx_desc: tx descriptor
135  *
136  * Caller needs to take lock and do sanity checks.
137  *
138  * Return: none
139  */
140 static inline
141 void dp_tx_put_desc_flow_pool(struct dp_tx_desc_pool_s *pool,
142 			struct dp_tx_desc_s *tx_desc)
143 {
144 	tx_desc->next = pool->freelist;
145 	pool->freelist = tx_desc;
146 	pool->avail_desc++;
147 }
148 
149 #ifdef QCA_AC_BASED_FLOW_CONTROL
150 
151 /**
152  * dp_tx_flow_pool_member_clean() - Clean the members of TX flow pool
153  *
154  * @pool: flow pool
155  *
156  * Return: None
157  */
158 static inline void
159 dp_tx_flow_pool_member_clean(struct dp_tx_desc_pool_s *pool)
160 {
161 	pool->elem_size = 0;
162 	pool->freelist = NULL;
163 	pool->pool_size = 0;
164 	pool->avail_desc = 0;
165 	qdf_mem_zero(pool->start_th, FL_TH_MAX);
166 	qdf_mem_zero(pool->stop_th, FL_TH_MAX);
167 	pool->status = FLOW_POOL_INACTIVE;
168 }
169 
170 /**
171  * dp_tx_is_threshold_reached() - Check if current avail desc meet threshold
172  *
173  * @pool: flow pool
174  * @avail_desc: available descriptor number
175  *
176  * Return: true if threshold is met, false if not
177  */
178 static inline bool
179 dp_tx_is_threshold_reached(struct dp_tx_desc_pool_s *pool, uint16_t avail_desc)
180 {
181 	if (qdf_unlikely(avail_desc == pool->stop_th[DP_TH_BE_BK]))
182 		return true;
183 	else if (qdf_unlikely(avail_desc == pool->stop_th[DP_TH_VI]))
184 		return true;
185 	else if (qdf_unlikely(avail_desc == pool->stop_th[DP_TH_VO]))
186 		return true;
187 	else if (qdf_unlikely(avail_desc == pool->stop_th[DP_TH_HI]))
188 		return true;
189 	else
190 		return false;
191 }
192 
193 /**
194  * dp_tx_desc_alloc() - Allocate a Software Tx descriptor from given pool
195  *
196  * @soc: Handle to DP SoC structure
197  * @desc_pool_id: ID of the flow control fool
198  *
199  * Return: TX descriptor allocated or NULL
200  */
201 static inline struct dp_tx_desc_s *
202 dp_tx_desc_alloc(struct dp_soc *soc, uint8_t desc_pool_id)
203 {
204 	struct dp_tx_desc_s *tx_desc = NULL;
205 	struct dp_tx_desc_pool_s *pool = &soc->tx_desc[desc_pool_id];
206 	bool is_pause = false;
207 	enum netif_action_type act = WLAN_NETIF_ACTION_TYPE_NONE;
208 	enum dp_fl_ctrl_threshold level = DP_TH_BE_BK;
209 
210 	if (qdf_likely(pool)) {
211 		qdf_spin_lock_bh(&pool->flow_pool_lock);
212 		if (qdf_likely(pool->avail_desc)) {
213 			tx_desc = dp_tx_get_desc_flow_pool(pool);
214 			tx_desc->pool_id = desc_pool_id;
215 			tx_desc->flags = DP_TX_DESC_FLAG_ALLOCATED;
216 			is_pause = dp_tx_is_threshold_reached(pool,
217 							      pool->avail_desc);
218 
219 			if (qdf_unlikely(is_pause)) {
220 				switch (pool->status) {
221 				case FLOW_POOL_ACTIVE_UNPAUSED:
222 					/* pause network BE\BK queue */
223 					act = WLAN_NETIF_BE_BK_QUEUE_OFF;
224 					level = DP_TH_BE_BK;
225 					pool->status = FLOW_POOL_BE_BK_PAUSED;
226 					break;
227 				case FLOW_POOL_BE_BK_PAUSED:
228 					/* pause network VI queue */
229 					act = WLAN_NETIF_VI_QUEUE_OFF;
230 					level = DP_TH_VI;
231 					pool->status = FLOW_POOL_VI_PAUSED;
232 					break;
233 				case FLOW_POOL_VI_PAUSED:
234 					/* pause network VO queue */
235 					act = WLAN_NETIF_VO_QUEUE_OFF;
236 					level = DP_TH_VO;
237 					pool->status = FLOW_POOL_VO_PAUSED;
238 					break;
239 				case FLOW_POOL_VO_PAUSED:
240 					/* pause network HI PRI queue */
241 					act = WLAN_NETIF_PRIORITY_QUEUE_OFF;
242 					level = DP_TH_HI;
243 					pool->status = FLOW_POOL_ACTIVE_PAUSED;
244 					break;
245 				case FLOW_POOL_ACTIVE_PAUSED:
246 					act = WLAN_NETIF_ACTION_TYPE_NONE;
247 					break;
248 				default:
249 					dp_err_rl("pool status is %d!",
250 						  pool->status);
251 					break;
252 				}
253 
254 				if (act != WLAN_NETIF_ACTION_TYPE_NONE) {
255 					pool->latest_pause_time[level] =
256 						qdf_get_system_timestamp();
257 					soc->pause_cb(desc_pool_id,
258 						      act,
259 						      WLAN_DATA_FLOW_CONTROL);
260 				}
261 			}
262 		} else {
263 			pool->pkt_drop_no_desc++;
264 		}
265 		qdf_spin_unlock_bh(&pool->flow_pool_lock);
266 	} else {
267 		soc->pool_stats.pkt_drop_no_pool++;
268 	}
269 
270 	return tx_desc;
271 }
272 
273 /**
274  * dp_tx_desc_free() - Fee a tx descriptor and attach it to free list
275  *
276  * @soc: Handle to DP SoC structure
277  * @tx_desc: the tx descriptor to be freed
278  * @desc_pool_id: ID of the flow control fool
279  *
280  * Return: None
281  */
282 static inline void
283 dp_tx_desc_free(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
284 		uint8_t desc_pool_id)
285 {
286 	struct dp_tx_desc_pool_s *pool = &soc->tx_desc[desc_pool_id];
287 	qdf_time_t unpause_time = qdf_get_system_timestamp(), pause_dur;
288 	enum netif_action_type act = WLAN_WAKE_ALL_NETIF_QUEUE;
289 
290 	qdf_spin_lock_bh(&pool->flow_pool_lock);
291 	tx_desc->vdev = NULL;
292 	tx_desc->nbuf = NULL;
293 	tx_desc->flags = 0;
294 	dp_tx_put_desc_flow_pool(pool, tx_desc);
295 	switch (pool->status) {
296 	case FLOW_POOL_ACTIVE_PAUSED:
297 		if (pool->avail_desc > pool->start_th[DP_TH_HI]) {
298 			act = WLAN_NETIF_PRIORITY_QUEUE_ON;
299 			pool->status = FLOW_POOL_VO_PAUSED;
300 
301 			/* Update maxinum pause duration for HI queue */
302 			pause_dur = unpause_time -
303 					pool->latest_pause_time[DP_TH_HI];
304 			if (pool->max_pause_time[DP_TH_HI] < pause_dur)
305 				pool->max_pause_time[DP_TH_HI] = pause_dur;
306 		}
307 		break;
308 	case FLOW_POOL_VO_PAUSED:
309 		if (pool->avail_desc > pool->start_th[DP_TH_VO]) {
310 			act = WLAN_NETIF_VO_QUEUE_ON;
311 			pool->status = FLOW_POOL_VI_PAUSED;
312 
313 			/* Update maxinum pause duration for VO queue */
314 			pause_dur = unpause_time -
315 					pool->latest_pause_time[DP_TH_VO];
316 			if (pool->max_pause_time[DP_TH_VO] < pause_dur)
317 				pool->max_pause_time[DP_TH_VO] = pause_dur;
318 		}
319 		break;
320 	case FLOW_POOL_VI_PAUSED:
321 		if (pool->avail_desc > pool->start_th[DP_TH_VI]) {
322 			act = WLAN_NETIF_VI_QUEUE_ON;
323 			pool->status = FLOW_POOL_BE_BK_PAUSED;
324 
325 			/* Update maxinum pause duration for VI queue */
326 			pause_dur = unpause_time -
327 					pool->latest_pause_time[DP_TH_VI];
328 			if (pool->max_pause_time[DP_TH_VI] < pause_dur)
329 				pool->max_pause_time[DP_TH_VI] = pause_dur;
330 		}
331 		break;
332 	case FLOW_POOL_BE_BK_PAUSED:
333 		if (pool->avail_desc > pool->start_th[DP_TH_BE_BK]) {
334 			act = WLAN_WAKE_NON_PRIORITY_QUEUE;
335 			pool->status = FLOW_POOL_ACTIVE_UNPAUSED;
336 
337 			/* Update maxinum pause duration for BE_BK queue */
338 			pause_dur = unpause_time -
339 					pool->latest_pause_time[DP_TH_BE_BK];
340 			if (pool->max_pause_time[DP_TH_BE_BK] < pause_dur)
341 				pool->max_pause_time[DP_TH_BE_BK] = pause_dur;
342 		}
343 		break;
344 	case FLOW_POOL_INVALID:
345 		if (pool->avail_desc == pool->pool_size) {
346 			dp_tx_desc_pool_free(soc, desc_pool_id);
347 			qdf_spin_unlock_bh(&pool->flow_pool_lock);
348 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
349 				  "%s %d pool is freed!!",
350 				  __func__, __LINE__);
351 			return;
352 		}
353 		break;
354 
355 	case FLOW_POOL_ACTIVE_UNPAUSED:
356 		break;
357 	default:
358 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
359 			  "%s %d pool is INACTIVE State!!",
360 			  __func__, __LINE__);
361 		break;
362 	};
363 
364 	if (act != WLAN_WAKE_ALL_NETIF_QUEUE)
365 		soc->pause_cb(pool->flow_pool_id,
366 			      act, WLAN_DATA_FLOW_CONTROL);
367 	qdf_spin_unlock_bh(&pool->flow_pool_lock);
368 }
369 #else /* QCA_AC_BASED_FLOW_CONTROL */
370 
371 static inline bool
372 dp_tx_is_threshold_reached(struct dp_tx_desc_pool_s *pool, uint16_t avail_desc)
373 {
374 	if (qdf_unlikely(avail_desc < pool->stop_th))
375 		return true;
376 	else
377 		return false;
378 }
379 
380 /**
381  * dp_tx_desc_alloc() - Allocate a Software Tx Descriptor from given pool
382  *
383  * @soc Handle to DP SoC structure
384  * @pool_id
385  *
386  * Return:
387  */
388 static inline struct dp_tx_desc_s *
389 dp_tx_desc_alloc(struct dp_soc *soc, uint8_t desc_pool_id)
390 {
391 	struct dp_tx_desc_s *tx_desc = NULL;
392 	struct dp_tx_desc_pool_s *pool = &soc->tx_desc[desc_pool_id];
393 
394 	if (pool) {
395 		qdf_spin_lock_bh(&pool->flow_pool_lock);
396 		if (pool->status <= FLOW_POOL_ACTIVE_PAUSED &&
397 		    pool->avail_desc) {
398 			tx_desc = dp_tx_get_desc_flow_pool(pool);
399 			tx_desc->pool_id = desc_pool_id;
400 			tx_desc->flags = DP_TX_DESC_FLAG_ALLOCATED;
401 			if (qdf_unlikely(pool->avail_desc < pool->stop_th)) {
402 				pool->status = FLOW_POOL_ACTIVE_PAUSED;
403 				qdf_spin_unlock_bh(&pool->flow_pool_lock);
404 				/* pause network queues */
405 				soc->pause_cb(desc_pool_id,
406 					       WLAN_STOP_ALL_NETIF_QUEUE,
407 					       WLAN_DATA_FLOW_CONTROL);
408 			} else {
409 				qdf_spin_unlock_bh(&pool->flow_pool_lock);
410 			}
411 
412 			/*
413 			 * If one packet is going to be sent, PM usage count
414 			 * needs to be incremented by one to prevent future
415 			 * runtime suspend. This should be tied with the
416 			 * success of allocating one descriptor. It will be
417 			 * decremented after the packet has been sent.
418 			 */
419 			hif_pm_runtime_get_noresume(soc->hif_handle);
420 		} else {
421 			pool->pkt_drop_no_desc++;
422 			qdf_spin_unlock_bh(&pool->flow_pool_lock);
423 		}
424 	} else {
425 		soc->pool_stats.pkt_drop_no_pool++;
426 	}
427 
428 
429 	return tx_desc;
430 }
431 
432 /**
433  * dp_tx_desc_free() - Fee a tx descriptor and attach it to free list
434  *
435  * @soc Handle to DP SoC structure
436  * @pool_id
437  * @tx_desc
438  *
439  * Return: None
440  */
441 static inline void
442 dp_tx_desc_free(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
443 		uint8_t desc_pool_id)
444 {
445 	struct dp_tx_desc_pool_s *pool = &soc->tx_desc[desc_pool_id];
446 
447 	qdf_spin_lock_bh(&pool->flow_pool_lock);
448 	tx_desc->vdev = NULL;
449 	tx_desc->nbuf = NULL;
450 	tx_desc->flags = 0;
451 	dp_tx_put_desc_flow_pool(pool, tx_desc);
452 	switch (pool->status) {
453 	case FLOW_POOL_ACTIVE_PAUSED:
454 		if (pool->avail_desc > pool->start_th) {
455 			soc->pause_cb(pool->flow_pool_id,
456 				       WLAN_WAKE_ALL_NETIF_QUEUE,
457 				       WLAN_DATA_FLOW_CONTROL);
458 			pool->status = FLOW_POOL_ACTIVE_UNPAUSED;
459 		}
460 		break;
461 	case FLOW_POOL_INVALID:
462 		if (pool->avail_desc == pool->pool_size) {
463 			dp_tx_desc_pool_free(soc, desc_pool_id);
464 			qdf_spin_unlock_bh(&pool->flow_pool_lock);
465 			qdf_print("%s %d pool is freed!!",
466 				  __func__, __LINE__);
467 			goto out;
468 		}
469 		break;
470 
471 	case FLOW_POOL_ACTIVE_UNPAUSED:
472 		break;
473 	default:
474 		qdf_print("%s %d pool is INACTIVE State!!",
475 			  __func__, __LINE__);
476 		break;
477 	};
478 
479 	qdf_spin_unlock_bh(&pool->flow_pool_lock);
480 
481 out:
482 	/**
483 	 * Decrement PM usage count if the packet has been sent. This
484 	 * should be tied with the success of freeing one descriptor.
485 	 */
486 	hif_pm_runtime_put(soc->hif_handle);
487 }
488 
489 #endif /* QCA_AC_BASED_FLOW_CONTROL */
490 
491 static inline bool
492 dp_tx_desc_thresh_reached(struct cdp_vdev *vdev)
493 {
494 	struct dp_vdev *dp_vdev = (struct dp_vdev *)vdev;
495 	struct dp_tx_desc_pool_s *pool;
496 
497 	if (!vdev)
498 		return false;
499 
500 	pool = dp_vdev->pool;
501 
502 	return  dp_tx_is_threshold_reached(pool, pool->avail_desc);
503 }
504 #else /* QCA_LL_TX_FLOW_CONTROL_V2 */
505 
506 static inline void dp_tx_flow_control_init(struct dp_soc *handle)
507 {
508 }
509 
510 static inline void dp_tx_flow_control_deinit(struct dp_soc *handle)
511 {
512 }
513 
514 static inline QDF_STATUS dp_tx_flow_pool_map_handler(struct dp_pdev *pdev,
515 	uint8_t flow_id, uint8_t flow_type, uint8_t flow_pool_id,
516 	uint16_t flow_pool_size)
517 {
518 	return QDF_STATUS_SUCCESS;
519 }
520 
521 static inline void dp_tx_flow_pool_unmap_handler(struct dp_pdev *pdev,
522 	uint8_t flow_id, uint8_t flow_type, uint8_t flow_pool_id)
523 {
524 }
525 
526 /**
527  * dp_tx_desc_alloc() - Allocate a Software Tx Descriptor from given pool
528  *
529  * @param soc Handle to DP SoC structure
530  * @param pool_id
531  *
532  * Return:
533  */
534 static inline struct dp_tx_desc_s *dp_tx_desc_alloc(struct dp_soc *soc,
535 						uint8_t desc_pool_id)
536 {
537 	struct dp_tx_desc_s *tx_desc = NULL;
538 
539 	TX_DESC_LOCK_LOCK(&soc->tx_desc[desc_pool_id].lock);
540 
541 	tx_desc = soc->tx_desc[desc_pool_id].freelist;
542 
543 	/* Pool is exhausted */
544 	if (!tx_desc) {
545 		TX_DESC_LOCK_UNLOCK(&soc->tx_desc[desc_pool_id].lock);
546 		return NULL;
547 	}
548 
549 	soc->tx_desc[desc_pool_id].freelist =
550 		soc->tx_desc[desc_pool_id].freelist->next;
551 	soc->tx_desc[desc_pool_id].num_allocated++;
552 	soc->tx_desc[desc_pool_id].num_free--;
553 
554 	tx_desc->flags = DP_TX_DESC_FLAG_ALLOCATED;
555 
556 	TX_DESC_LOCK_UNLOCK(&soc->tx_desc[desc_pool_id].lock);
557 
558 	return tx_desc;
559 }
560 
561 /**
562  * dp_tx_desc_alloc_multiple() - Allocate batch of software Tx Descriptors
563  *                            from given pool
564  * @soc: Handle to DP SoC structure
565  * @pool_id: pool id should pick up
566  * @num_requested: number of required descriptor
567  *
568  * allocate multiple tx descriptor and make a link
569  *
570  * Return: h_desc first descriptor pointer
571  */
572 static inline struct dp_tx_desc_s *dp_tx_desc_alloc_multiple(
573 		struct dp_soc *soc, uint8_t desc_pool_id, uint8_t num_requested)
574 {
575 	struct dp_tx_desc_s *c_desc = NULL, *h_desc = NULL;
576 	uint8_t count;
577 
578 	TX_DESC_LOCK_LOCK(&soc->tx_desc[desc_pool_id].lock);
579 
580 	if ((num_requested == 0) ||
581 			(soc->tx_desc[desc_pool_id].num_free < num_requested)) {
582 		TX_DESC_LOCK_UNLOCK(&soc->tx_desc[desc_pool_id].lock);
583 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
584 			"%s, No Free Desc: Available(%d) num_requested(%d)",
585 			__func__, soc->tx_desc[desc_pool_id].num_free,
586 			num_requested);
587 		return NULL;
588 	}
589 
590 	h_desc = soc->tx_desc[desc_pool_id].freelist;
591 
592 	/* h_desc should never be NULL since num_free > requested */
593 	qdf_assert_always(h_desc);
594 
595 	c_desc = h_desc;
596 	for (count = 0; count < (num_requested - 1); count++) {
597 		c_desc->flags = DP_TX_DESC_FLAG_ALLOCATED;
598 		c_desc = c_desc->next;
599 	}
600 	soc->tx_desc[desc_pool_id].num_free -= count;
601 	soc->tx_desc[desc_pool_id].num_allocated += count;
602 	soc->tx_desc[desc_pool_id].freelist = c_desc->next;
603 	c_desc->next = NULL;
604 
605 	TX_DESC_LOCK_UNLOCK(&soc->tx_desc[desc_pool_id].lock);
606 	return h_desc;
607 }
608 
609 /**
610  * dp_tx_desc_free() - Fee a tx descriptor and attach it to free list
611  *
612  * @soc Handle to DP SoC structure
613  * @pool_id
614  * @tx_desc
615  */
616 static inline void
617 dp_tx_desc_free(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
618 		uint8_t desc_pool_id)
619 {
620 	TX_DESC_LOCK_LOCK(&soc->tx_desc[desc_pool_id].lock);
621 
622 	tx_desc->vdev = NULL;
623 	tx_desc->nbuf = NULL;
624 	tx_desc->flags = 0;
625 	tx_desc->next = soc->tx_desc[desc_pool_id].freelist;
626 	soc->tx_desc[desc_pool_id].freelist = tx_desc;
627 	soc->tx_desc[desc_pool_id].num_allocated--;
628 	soc->tx_desc[desc_pool_id].num_free++;
629 
630 
631 	TX_DESC_LOCK_UNLOCK(&soc->tx_desc[desc_pool_id].lock);
632 }
633 #endif /* QCA_LL_TX_FLOW_CONTROL_V2 */
634 
635 #ifdef QCA_DP_TX_DESC_ID_CHECK
636 /**
637  * dp_tx_is_desc_id_valid() - check is the tx desc id valid
638  *
639  * @soc Handle to DP SoC structure
640  * @tx_desc_id
641  *
642  * Return: true or false
643  */
644 static inline bool
645 dp_tx_is_desc_id_valid(struct dp_soc *soc, uint32_t tx_desc_id)
646 {
647 	uint8_t pool_id;
648 	uint16_t page_id, offset;
649 	struct dp_tx_desc_pool_s *pool;
650 
651 	pool_id = (tx_desc_id & DP_TX_DESC_ID_POOL_MASK) >>
652 			DP_TX_DESC_ID_POOL_OS;
653 	/* Pool ID is out of limit */
654 	if (pool_id > wlan_cfg_get_num_tx_desc_pool(
655 				soc->wlan_cfg_ctx)) {
656 		QDF_TRACE(QDF_MODULE_ID_DP,
657 			  QDF_TRACE_LEVEL_FATAL,
658 			  "%s:Tx Comp pool id %d not valid",
659 			  __func__,
660 			  pool_id);
661 		goto warn_exit;
662 	}
663 
664 	pool = &soc->tx_desc[pool_id];
665 	/* the pool is freed */
666 	if (IS_TX_DESC_POOL_STATUS_INACTIVE(pool)) {
667 		QDF_TRACE(QDF_MODULE_ID_DP,
668 			  QDF_TRACE_LEVEL_FATAL,
669 			  "%s:the pool %d has been freed",
670 			  __func__,
671 			  pool_id);
672 		goto warn_exit;
673 	}
674 
675 	page_id = (tx_desc_id & DP_TX_DESC_ID_PAGE_MASK) >>
676 				DP_TX_DESC_ID_PAGE_OS;
677 	/* the page id is out of limit */
678 	if (page_id >= pool->desc_pages.num_pages) {
679 		QDF_TRACE(QDF_MODULE_ID_DP,
680 			  QDF_TRACE_LEVEL_FATAL,
681 			  "%s:the page id %d invalid, pool id %d, num_page %d",
682 			  __func__,
683 			  page_id,
684 			  pool_id,
685 			  pool->desc_pages.num_pages);
686 		goto warn_exit;
687 	}
688 
689 	offset = (tx_desc_id & DP_TX_DESC_ID_OFFSET_MASK) >>
690 				DP_TX_DESC_ID_OFFSET_OS;
691 	/* the offset is out of limit */
692 	if (offset >= pool->desc_pages.num_element_per_page) {
693 		QDF_TRACE(QDF_MODULE_ID_DP,
694 			  QDF_TRACE_LEVEL_FATAL,
695 			  "%s:offset %d invalid, pool%d,num_elem_per_page %d",
696 			  __func__,
697 			  offset,
698 			  pool_id,
699 			  pool->desc_pages.num_element_per_page);
700 		goto warn_exit;
701 	}
702 
703 	return true;
704 
705 warn_exit:
706 	QDF_TRACE(QDF_MODULE_ID_DP,
707 		  QDF_TRACE_LEVEL_FATAL,
708 		  "%s:Tx desc id 0x%x not valid",
709 		  __func__,
710 		  tx_desc_id);
711 	qdf_assert_always(0);
712 	return false;
713 }
714 
715 #else
716 static inline bool
717 dp_tx_is_desc_id_valid(struct dp_soc *soc, uint32_t tx_desc_id)
718 {
719 	return true;
720 }
721 #endif /* QCA_DP_TX_DESC_ID_CHECK */
722 
723 /**
724  * dp_tx_desc_find() - find dp tx descriptor from cokie
725  * @soc - handle for the device sending the data
726  * @tx_desc_id - the ID of the descriptor in question
727  * @return the descriptor object that has the specified ID
728  *
729  *  Use a tx descriptor ID to find the corresponding descriptor object.
730  *
731  */
732 static inline struct dp_tx_desc_s *dp_tx_desc_find(struct dp_soc *soc,
733 		uint8_t pool_id, uint16_t page_id, uint16_t offset)
734 {
735 	struct dp_tx_desc_pool_s *tx_desc_pool = &((soc)->tx_desc[(pool_id)]);
736 
737 	return tx_desc_pool->desc_pages.cacheable_pages[page_id] +
738 		tx_desc_pool->elem_size * offset;
739 }
740 
741 /**
742  * dp_tx_ext_desc_alloc() - Get tx extension descriptor from pool
743  * @soc: handle for the device sending the data
744  * @pool_id: target pool id
745  *
746  * Return: None
747  */
748 static inline
749 struct dp_tx_ext_desc_elem_s *dp_tx_ext_desc_alloc(struct dp_soc *soc,
750 		uint8_t desc_pool_id)
751 {
752 	struct dp_tx_ext_desc_elem_s *c_elem;
753 
754 	qdf_spin_lock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
755 	if (soc->tx_ext_desc[desc_pool_id].num_free <= 0) {
756 		qdf_spin_unlock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
757 		return NULL;
758 	}
759 	c_elem = soc->tx_ext_desc[desc_pool_id].freelist;
760 	soc->tx_ext_desc[desc_pool_id].freelist =
761 		soc->tx_ext_desc[desc_pool_id].freelist->next;
762 	soc->tx_ext_desc[desc_pool_id].num_free--;
763 	qdf_spin_unlock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
764 	return c_elem;
765 }
766 
767 /**
768  * dp_tx_ext_desc_free() - Release tx extension descriptor to the pool
769  * @soc: handle for the device sending the data
770  * @pool_id: target pool id
771  * @elem: ext descriptor pointer should release
772  *
773  * Return: None
774  */
775 static inline void dp_tx_ext_desc_free(struct dp_soc *soc,
776 	struct dp_tx_ext_desc_elem_s *elem, uint8_t desc_pool_id)
777 {
778 	qdf_spin_lock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
779 	elem->next = soc->tx_ext_desc[desc_pool_id].freelist;
780 	soc->tx_ext_desc[desc_pool_id].freelist = elem;
781 	soc->tx_ext_desc[desc_pool_id].num_free++;
782 	qdf_spin_unlock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
783 	return;
784 }
785 
786 /**
787  * dp_tx_ext_desc_free_multiple() - Fee multiple tx extension descriptor and
788  *                           attach it to free list
789  * @soc: Handle to DP SoC structure
790  * @desc_pool_id: pool id should pick up
791  * @elem: tx descriptor should be freed
792  * @num_free: number of descriptors should be freed
793  *
794  * Return: none
795  */
796 static inline void dp_tx_ext_desc_free_multiple(struct dp_soc *soc,
797 		struct dp_tx_ext_desc_elem_s *elem, uint8_t desc_pool_id,
798 		uint8_t num_free)
799 {
800 	struct dp_tx_ext_desc_elem_s *head, *tail, *c_elem;
801 	uint8_t freed = num_free;
802 
803 	/* caller should always guarantee atleast list of num_free nodes */
804 	qdf_assert_always(head);
805 
806 	head = elem;
807 	c_elem = head;
808 	tail = head;
809 	while (c_elem && freed) {
810 		tail = c_elem;
811 		c_elem = c_elem->next;
812 		freed--;
813 	}
814 
815 	/* caller should always guarantee atleast list of num_free nodes */
816 	qdf_assert_always(tail);
817 
818 	qdf_spin_lock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
819 	tail->next = soc->tx_ext_desc[desc_pool_id].freelist;
820 	soc->tx_ext_desc[desc_pool_id].freelist = head;
821 	soc->tx_ext_desc[desc_pool_id].num_free += num_free;
822 	qdf_spin_unlock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
823 
824 	return;
825 }
826 
827 #if defined(FEATURE_TSO)
828 /**
829  * dp_tx_tso_desc_alloc() - function to allocate a TSO segment
830  * @soc: device soc instance
831  * @pool_id: pool id should pick up tso descriptor
832  *
833  * Allocates a TSO segment element from the free list held in
834  * the soc
835  *
836  * Return: tso_seg, tso segment memory pointer
837  */
838 static inline struct qdf_tso_seg_elem_t *dp_tx_tso_desc_alloc(
839 		struct dp_soc *soc, uint8_t pool_id)
840 {
841 	struct qdf_tso_seg_elem_t *tso_seg = NULL;
842 
843 	qdf_spin_lock_bh(&soc->tx_tso_desc[pool_id].lock);
844 	if (soc->tx_tso_desc[pool_id].freelist) {
845 		soc->tx_tso_desc[pool_id].num_free--;
846 		tso_seg = soc->tx_tso_desc[pool_id].freelist;
847 		soc->tx_tso_desc[pool_id].freelist =
848 			soc->tx_tso_desc[pool_id].freelist->next;
849 	}
850 	qdf_spin_unlock_bh(&soc->tx_tso_desc[pool_id].lock);
851 
852 	return tso_seg;
853 }
854 
855 /**
856  * dp_tx_tso_desc_free() - function to free a TSO segment
857  * @soc: device soc instance
858  * @pool_id: pool id should pick up tso descriptor
859  * @tso_seg: tso segment memory pointer
860  *
861  * Returns a TSO segment element to the free list held in the
862  * HTT pdev
863  *
864  * Return: none
865  */
866 static inline void dp_tx_tso_desc_free(struct dp_soc *soc,
867 		uint8_t pool_id, struct qdf_tso_seg_elem_t *tso_seg)
868 {
869 	qdf_spin_lock_bh(&soc->tx_tso_desc[pool_id].lock);
870 	tso_seg->next = soc->tx_tso_desc[pool_id].freelist;
871 	soc->tx_tso_desc[pool_id].freelist = tso_seg;
872 	soc->tx_tso_desc[pool_id].num_free++;
873 	qdf_spin_unlock_bh(&soc->tx_tso_desc[pool_id].lock);
874 }
875 
876 static inline
877 struct qdf_tso_num_seg_elem_t  *dp_tso_num_seg_alloc(struct dp_soc *soc,
878 		uint8_t pool_id)
879 {
880 	struct qdf_tso_num_seg_elem_t *tso_num_seg = NULL;
881 
882 	qdf_spin_lock_bh(&soc->tx_tso_num_seg[pool_id].lock);
883 	if (soc->tx_tso_num_seg[pool_id].freelist) {
884 		soc->tx_tso_num_seg[pool_id].num_free--;
885 		tso_num_seg = soc->tx_tso_num_seg[pool_id].freelist;
886 		soc->tx_tso_num_seg[pool_id].freelist =
887 			soc->tx_tso_num_seg[pool_id].freelist->next;
888 	}
889 	qdf_spin_unlock_bh(&soc->tx_tso_num_seg[pool_id].lock);
890 
891 	return tso_num_seg;
892 }
893 
894 static inline
895 void dp_tso_num_seg_free(struct dp_soc *soc,
896 		uint8_t pool_id, struct qdf_tso_num_seg_elem_t *tso_num_seg)
897 {
898 	qdf_spin_lock_bh(&soc->tx_tso_num_seg[pool_id].lock);
899 	tso_num_seg->next = soc->tx_tso_num_seg[pool_id].freelist;
900 	soc->tx_tso_num_seg[pool_id].freelist = tso_num_seg;
901 	soc->tx_tso_num_seg[pool_id].num_free++;
902 	qdf_spin_unlock_bh(&soc->tx_tso_num_seg[pool_id].lock);
903 }
904 #endif
905 
906 /*
907  * dp_tx_me_alloc_buf() Alloc descriptor from me pool
908  * @pdev DP_PDEV handle for datapath
909  *
910  * Return:dp_tx_me_buf_t(buf)
911  */
912 static inline struct dp_tx_me_buf_t*
913 dp_tx_me_alloc_buf(struct dp_pdev *pdev)
914 {
915 	struct dp_tx_me_buf_t *buf = NULL;
916 	qdf_spin_lock_bh(&pdev->tx_mutex);
917 	if (pdev->me_buf.freelist) {
918 		buf = pdev->me_buf.freelist;
919 		pdev->me_buf.freelist = pdev->me_buf.freelist->next;
920 		pdev->me_buf.buf_in_use++;
921 	} else {
922 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
923 				"Error allocating memory in pool");
924 		qdf_spin_unlock_bh(&pdev->tx_mutex);
925 		return NULL;
926 	}
927 	qdf_spin_unlock_bh(&pdev->tx_mutex);
928 	return buf;
929 }
930 
931 /*
932  * dp_tx_me_free_buf() - Free me descriptor and add it to pool
933  * @pdev: DP_PDEV handle for datapath
934  * @buf : Allocated ME BUF
935  *
936  * Return:void
937  */
938 static inline void
939 dp_tx_me_free_buf(struct dp_pdev *pdev, struct dp_tx_me_buf_t *buf)
940 {
941 	qdf_spin_lock_bh(&pdev->tx_mutex);
942 	buf->next = pdev->me_buf.freelist;
943 	pdev->me_buf.freelist = buf;
944 	pdev->me_buf.buf_in_use--;
945 	qdf_spin_unlock_bh(&pdev->tx_mutex);
946 }
947 #endif /* DP_TX_DESC_H */
948