xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_tx_desc.h (revision 6d768494e5ce14eb1603a695c86739d12ecc6ec2) !
1 /*
2  * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #ifndef DP_TX_DESC_H
20 #define DP_TX_DESC_H
21 
22 #include "dp_types.h"
23 #include "dp_tx.h"
24 #include "dp_internal.h"
25 
26 /**
27  * 21 bits cookie
28  * 2 bits pool id 0 ~ 3,
29  * 10 bits page id 0 ~ 1023
30  * 5 bits offset id 0 ~ 31 (Desc size = 128, Num descs per page = 4096/128 = 32)
31  */
32 /* ???Ring ID needed??? */
33 #define DP_TX_DESC_ID_POOL_MASK    0x018000
34 #define DP_TX_DESC_ID_POOL_OS      15
35 #define DP_TX_DESC_ID_PAGE_MASK    0x007FE0
36 #define DP_TX_DESC_ID_PAGE_OS      5
37 #define DP_TX_DESC_ID_OFFSET_MASK  0x00001F
38 #define DP_TX_DESC_ID_OFFSET_OS    0
39 
40 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
41 #define TX_DESC_LOCK_CREATE(lock)
42 #define TX_DESC_LOCK_DESTROY(lock)
43 #define TX_DESC_LOCK_LOCK(lock)
44 #define TX_DESC_LOCK_UNLOCK(lock)
45 #define IS_TX_DESC_POOL_STATUS_INACTIVE(pool) \
46 	((pool)->status == FLOW_POOL_INACTIVE)
47 #ifdef QCA_AC_BASED_FLOW_CONTROL
48 #define TX_DESC_POOL_MEMBER_CLEAN(_tx_desc_pool)       \
49 	dp_tx_flow_pool_member_clean(_tx_desc_pool)
50 
51 #else /* !QCA_AC_BASED_FLOW_CONTROL */
52 #define TX_DESC_POOL_MEMBER_CLEAN(_tx_desc_pool)       \
53 do {                                                   \
54 	(_tx_desc_pool)->elem_size = 0;                \
55 	(_tx_desc_pool)->freelist = NULL;              \
56 	(_tx_desc_pool)->pool_size = 0;                \
57 	(_tx_desc_pool)->avail_desc = 0;               \
58 	(_tx_desc_pool)->start_th = 0;                 \
59 	(_tx_desc_pool)->stop_th = 0;                  \
60 	(_tx_desc_pool)->status = FLOW_POOL_INACTIVE;  \
61 } while (0)
62 #endif /* QCA_AC_BASED_FLOW_CONTROL */
63 #else /* !QCA_LL_TX_FLOW_CONTROL_V2 */
64 #define TX_DESC_LOCK_CREATE(lock)  qdf_spinlock_create(lock)
65 #define TX_DESC_LOCK_DESTROY(lock) qdf_spinlock_destroy(lock)
66 #define TX_DESC_LOCK_LOCK(lock)    qdf_spin_lock_bh(lock)
67 #define TX_DESC_LOCK_UNLOCK(lock)  qdf_spin_unlock_bh(lock)
68 #define IS_TX_DESC_POOL_STATUS_INACTIVE(pool) (false)
69 #define TX_DESC_POOL_MEMBER_CLEAN(_tx_desc_pool)       \
70 do {                                                   \
71 	(_tx_desc_pool)->elem_size = 0;                \
72 	(_tx_desc_pool)->num_allocated = 0;            \
73 	(_tx_desc_pool)->freelist = NULL;              \
74 	(_tx_desc_pool)->elem_count = 0;               \
75 	(_tx_desc_pool)->num_free = 0;                 \
76 } while (0)
77 #endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */
78 #define MAX_POOL_BUFF_COUNT 10000
79 
80 QDF_STATUS dp_tx_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
81 				 uint16_t num_elem);
82 QDF_STATUS dp_tx_desc_pool_init(struct dp_soc *soc, uint8_t pool_id,
83 				uint16_t num_elem);
84 void dp_tx_desc_pool_free(struct dp_soc *soc, uint8_t pool_id);
85 void dp_tx_desc_pool_deinit(struct dp_soc *soc, uint8_t pool_id);
86 
87 QDF_STATUS dp_tx_ext_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
88 				     uint16_t num_elem);
89 QDF_STATUS dp_tx_ext_desc_pool_init(struct dp_soc *soc, uint8_t pool_id,
90 				    uint16_t num_elem);
91 void dp_tx_ext_desc_pool_free(struct dp_soc *soc, uint8_t pool_id);
92 void dp_tx_ext_desc_pool_deinit(struct dp_soc *soc, uint8_t pool_id);
93 
94 QDF_STATUS dp_tx_tso_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
95 				     uint16_t num_elem);
96 QDF_STATUS dp_tx_tso_desc_pool_init(struct dp_soc *soc, uint8_t pool_id,
97 				    uint16_t num_elem);
98 void dp_tx_tso_desc_pool_free(struct dp_soc *soc, uint8_t pool_id);
99 void dp_tx_tso_desc_pool_deinit(struct dp_soc *soc, uint8_t pool_id);
100 
101 QDF_STATUS dp_tx_tso_num_seg_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
102 		uint16_t num_elem);
103 QDF_STATUS dp_tx_tso_num_seg_pool_init(struct dp_soc *soc, uint8_t pool_id,
104 				       uint16_t num_elem);
105 void dp_tx_tso_num_seg_pool_free(struct dp_soc *soc, uint8_t pool_id);
106 void dp_tx_tso_num_seg_pool_deinit(struct dp_soc *soc, uint8_t pool_id);
107 
108 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
109 void dp_tx_flow_control_init(struct dp_soc *);
110 void dp_tx_flow_control_deinit(struct dp_soc *);
111 
112 QDF_STATUS dp_txrx_register_pause_cb(struct cdp_soc_t *soc,
113 	tx_pause_callback pause_cb);
114 QDF_STATUS dp_tx_flow_pool_map(struct cdp_soc_t *soc, uint8_t pdev_id,
115 			       uint8_t vdev_id);
116 void dp_tx_flow_pool_unmap(struct cdp_soc_t *handle, uint8_t pdev_id,
117 			   uint8_t vdev_id);
118 void dp_tx_clear_flow_pool_stats(struct dp_soc *soc);
119 struct dp_tx_desc_pool_s *dp_tx_create_flow_pool(struct dp_soc *soc,
120 	uint8_t flow_pool_id, uint16_t flow_pool_size);
121 
122 QDF_STATUS dp_tx_flow_pool_map_handler(struct dp_pdev *pdev, uint8_t flow_id,
123 	uint8_t flow_type, uint8_t flow_pool_id, uint16_t flow_pool_size);
124 void dp_tx_flow_pool_unmap_handler(struct dp_pdev *pdev, uint8_t flow_id,
125 	uint8_t flow_type, uint8_t flow_pool_id);
126 
127 /**
128  * dp_tx_get_desc_flow_pool() - get descriptor from flow pool
129  * @pool: flow pool
130  *
131  * Caller needs to take lock and do sanity checks.
132  *
133  * Return: tx descriptor
134  */
135 static inline
136 struct dp_tx_desc_s *dp_tx_get_desc_flow_pool(struct dp_tx_desc_pool_s *pool)
137 {
138 	struct dp_tx_desc_s *tx_desc = pool->freelist;
139 
140 	pool->freelist = pool->freelist->next;
141 	pool->avail_desc--;
142 	return tx_desc;
143 }
144 
145 /**
146  * ol_tx_put_desc_flow_pool() - put descriptor to flow pool freelist
147  * @pool: flow pool
148  * @tx_desc: tx descriptor
149  *
150  * Caller needs to take lock and do sanity checks.
151  *
152  * Return: none
153  */
154 static inline
155 void dp_tx_put_desc_flow_pool(struct dp_tx_desc_pool_s *pool,
156 			struct dp_tx_desc_s *tx_desc)
157 {
158 	tx_desc->next = pool->freelist;
159 	pool->freelist = tx_desc;
160 	pool->avail_desc++;
161 }
162 
163 #ifdef QCA_AC_BASED_FLOW_CONTROL
164 
165 /**
166  * dp_tx_flow_pool_member_clean() - Clean the members of TX flow pool
167  *
168  * @pool: flow pool
169  *
170  * Return: None
171  */
172 static inline void
173 dp_tx_flow_pool_member_clean(struct dp_tx_desc_pool_s *pool)
174 {
175 	pool->elem_size = 0;
176 	pool->freelist = NULL;
177 	pool->pool_size = 0;
178 	pool->avail_desc = 0;
179 	qdf_mem_zero(pool->start_th, FL_TH_MAX);
180 	qdf_mem_zero(pool->stop_th, FL_TH_MAX);
181 	pool->status = FLOW_POOL_INACTIVE;
182 }
183 
184 /**
185  * dp_tx_is_threshold_reached() - Check if current avail desc meet threshold
186  *
187  * @pool: flow pool
188  * @avail_desc: available descriptor number
189  *
190  * Return: true if threshold is met, false if not
191  */
192 static inline bool
193 dp_tx_is_threshold_reached(struct dp_tx_desc_pool_s *pool, uint16_t avail_desc)
194 {
195 	if (qdf_unlikely(avail_desc == pool->stop_th[DP_TH_BE_BK]))
196 		return true;
197 	else if (qdf_unlikely(avail_desc == pool->stop_th[DP_TH_VI]))
198 		return true;
199 	else if (qdf_unlikely(avail_desc == pool->stop_th[DP_TH_VO]))
200 		return true;
201 	else if (qdf_unlikely(avail_desc == pool->stop_th[DP_TH_HI]))
202 		return true;
203 	else
204 		return false;
205 }
206 
207 /**
208  * dp_tx_desc_alloc() - Allocate a Software Tx descriptor from given pool
209  *
210  * @soc: Handle to DP SoC structure
211  * @desc_pool_id: ID of the flow control fool
212  *
213  * Return: TX descriptor allocated or NULL
214  */
215 static inline struct dp_tx_desc_s *
216 dp_tx_desc_alloc(struct dp_soc *soc, uint8_t desc_pool_id)
217 {
218 	struct dp_tx_desc_s *tx_desc = NULL;
219 	struct dp_tx_desc_pool_s *pool = &soc->tx_desc[desc_pool_id];
220 	bool is_pause = false;
221 	enum netif_action_type act = WLAN_NETIF_ACTION_TYPE_NONE;
222 	enum dp_fl_ctrl_threshold level = DP_TH_BE_BK;
223 
224 	if (qdf_likely(pool)) {
225 		qdf_spin_lock_bh(&pool->flow_pool_lock);
226 		if (qdf_likely(pool->avail_desc)) {
227 			tx_desc = dp_tx_get_desc_flow_pool(pool);
228 			tx_desc->pool_id = desc_pool_id;
229 			tx_desc->flags = DP_TX_DESC_FLAG_ALLOCATED;
230 			is_pause = dp_tx_is_threshold_reached(pool,
231 							      pool->avail_desc);
232 
233 			if (qdf_unlikely(is_pause)) {
234 				switch (pool->status) {
235 				case FLOW_POOL_ACTIVE_UNPAUSED:
236 					/* pause network BE\BK queue */
237 					act = WLAN_NETIF_BE_BK_QUEUE_OFF;
238 					level = DP_TH_BE_BK;
239 					pool->status = FLOW_POOL_BE_BK_PAUSED;
240 					break;
241 				case FLOW_POOL_BE_BK_PAUSED:
242 					/* pause network VI queue */
243 					act = WLAN_NETIF_VI_QUEUE_OFF;
244 					level = DP_TH_VI;
245 					pool->status = FLOW_POOL_VI_PAUSED;
246 					break;
247 				case FLOW_POOL_VI_PAUSED:
248 					/* pause network VO queue */
249 					act = WLAN_NETIF_VO_QUEUE_OFF;
250 					level = DP_TH_VO;
251 					pool->status = FLOW_POOL_VO_PAUSED;
252 					break;
253 				case FLOW_POOL_VO_PAUSED:
254 					/* pause network HI PRI queue */
255 					act = WLAN_NETIF_PRIORITY_QUEUE_OFF;
256 					level = DP_TH_HI;
257 					pool->status = FLOW_POOL_ACTIVE_PAUSED;
258 					break;
259 				case FLOW_POOL_ACTIVE_PAUSED:
260 					act = WLAN_NETIF_ACTION_TYPE_NONE;
261 					break;
262 				default:
263 					dp_err_rl("pool status is %d!",
264 						  pool->status);
265 					break;
266 				}
267 
268 				if (act != WLAN_NETIF_ACTION_TYPE_NONE) {
269 					pool->latest_pause_time[level] =
270 						qdf_get_system_timestamp();
271 					soc->pause_cb(desc_pool_id,
272 						      act,
273 						      WLAN_DATA_FLOW_CONTROL);
274 				}
275 			}
276 		} else {
277 			pool->pkt_drop_no_desc++;
278 		}
279 		qdf_spin_unlock_bh(&pool->flow_pool_lock);
280 	} else {
281 		soc->pool_stats.pkt_drop_no_pool++;
282 	}
283 
284 	return tx_desc;
285 }
286 
287 /**
288  * dp_tx_desc_free() - Fee a tx descriptor and attach it to free list
289  *
290  * @soc: Handle to DP SoC structure
291  * @tx_desc: the tx descriptor to be freed
292  * @desc_pool_id: ID of the flow control fool
293  *
294  * Return: None
295  */
296 static inline void
297 dp_tx_desc_free(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
298 		uint8_t desc_pool_id)
299 {
300 	struct dp_tx_desc_pool_s *pool = &soc->tx_desc[desc_pool_id];
301 	qdf_time_t unpause_time = qdf_get_system_timestamp(), pause_dur;
302 	enum netif_action_type act = WLAN_WAKE_ALL_NETIF_QUEUE;
303 
304 	qdf_spin_lock_bh(&pool->flow_pool_lock);
305 	tx_desc->vdev = NULL;
306 	tx_desc->nbuf = NULL;
307 	tx_desc->flags = 0;
308 	dp_tx_put_desc_flow_pool(pool, tx_desc);
309 	switch (pool->status) {
310 	case FLOW_POOL_ACTIVE_PAUSED:
311 		if (pool->avail_desc > pool->start_th[DP_TH_HI]) {
312 			act = WLAN_NETIF_PRIORITY_QUEUE_ON;
313 			pool->status = FLOW_POOL_VO_PAUSED;
314 
315 			/* Update maxinum pause duration for HI queue */
316 			pause_dur = unpause_time -
317 					pool->latest_pause_time[DP_TH_HI];
318 			if (pool->max_pause_time[DP_TH_HI] < pause_dur)
319 				pool->max_pause_time[DP_TH_HI] = pause_dur;
320 		}
321 		break;
322 	case FLOW_POOL_VO_PAUSED:
323 		if (pool->avail_desc > pool->start_th[DP_TH_VO]) {
324 			act = WLAN_NETIF_VO_QUEUE_ON;
325 			pool->status = FLOW_POOL_VI_PAUSED;
326 
327 			/* Update maxinum pause duration for VO queue */
328 			pause_dur = unpause_time -
329 					pool->latest_pause_time[DP_TH_VO];
330 			if (pool->max_pause_time[DP_TH_VO] < pause_dur)
331 				pool->max_pause_time[DP_TH_VO] = pause_dur;
332 		}
333 		break;
334 	case FLOW_POOL_VI_PAUSED:
335 		if (pool->avail_desc > pool->start_th[DP_TH_VI]) {
336 			act = WLAN_NETIF_VI_QUEUE_ON;
337 			pool->status = FLOW_POOL_BE_BK_PAUSED;
338 
339 			/* Update maxinum pause duration for VI queue */
340 			pause_dur = unpause_time -
341 					pool->latest_pause_time[DP_TH_VI];
342 			if (pool->max_pause_time[DP_TH_VI] < pause_dur)
343 				pool->max_pause_time[DP_TH_VI] = pause_dur;
344 		}
345 		break;
346 	case FLOW_POOL_BE_BK_PAUSED:
347 		if (pool->avail_desc > pool->start_th[DP_TH_BE_BK]) {
348 			act = WLAN_WAKE_NON_PRIORITY_QUEUE;
349 			pool->status = FLOW_POOL_ACTIVE_UNPAUSED;
350 
351 			/* Update maxinum pause duration for BE_BK queue */
352 			pause_dur = unpause_time -
353 					pool->latest_pause_time[DP_TH_BE_BK];
354 			if (pool->max_pause_time[DP_TH_BE_BK] < pause_dur)
355 				pool->max_pause_time[DP_TH_BE_BK] = pause_dur;
356 		}
357 		break;
358 	case FLOW_POOL_INVALID:
359 		if (pool->avail_desc == pool->pool_size) {
360 			dp_tx_desc_pool_deinit(soc, desc_pool_id);
361 			dp_tx_desc_pool_free(soc, desc_pool_id);
362 			qdf_spin_unlock_bh(&pool->flow_pool_lock);
363 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
364 				  "%s %d pool is freed!!",
365 				  __func__, __LINE__);
366 			return;
367 		}
368 		break;
369 
370 	case FLOW_POOL_ACTIVE_UNPAUSED:
371 		break;
372 	default:
373 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
374 			  "%s %d pool is INACTIVE State!!",
375 			  __func__, __LINE__);
376 		break;
377 	};
378 
379 	if (act != WLAN_WAKE_ALL_NETIF_QUEUE)
380 		soc->pause_cb(pool->flow_pool_id,
381 			      act, WLAN_DATA_FLOW_CONTROL);
382 	qdf_spin_unlock_bh(&pool->flow_pool_lock);
383 }
384 #else /* QCA_AC_BASED_FLOW_CONTROL */
385 
386 static inline bool
387 dp_tx_is_threshold_reached(struct dp_tx_desc_pool_s *pool, uint16_t avail_desc)
388 {
389 	if (qdf_unlikely(avail_desc < pool->stop_th))
390 		return true;
391 	else
392 		return false;
393 }
394 
395 /**
396  * dp_tx_desc_alloc() - Allocate a Software Tx Descriptor from given pool
397  *
398  * @soc Handle to DP SoC structure
399  * @pool_id
400  *
401  * Return:
402  */
403 static inline struct dp_tx_desc_s *
404 dp_tx_desc_alloc(struct dp_soc *soc, uint8_t desc_pool_id)
405 {
406 	struct dp_tx_desc_s *tx_desc = NULL;
407 	struct dp_tx_desc_pool_s *pool = &soc->tx_desc[desc_pool_id];
408 
409 	if (pool) {
410 		qdf_spin_lock_bh(&pool->flow_pool_lock);
411 		if (pool->status <= FLOW_POOL_ACTIVE_PAUSED &&
412 		    pool->avail_desc) {
413 			tx_desc = dp_tx_get_desc_flow_pool(pool);
414 			tx_desc->pool_id = desc_pool_id;
415 			tx_desc->flags = DP_TX_DESC_FLAG_ALLOCATED;
416 			if (qdf_unlikely(pool->avail_desc < pool->stop_th)) {
417 				pool->status = FLOW_POOL_ACTIVE_PAUSED;
418 				qdf_spin_unlock_bh(&pool->flow_pool_lock);
419 				/* pause network queues */
420 				soc->pause_cb(desc_pool_id,
421 					       WLAN_STOP_ALL_NETIF_QUEUE,
422 					       WLAN_DATA_FLOW_CONTROL);
423 			} else {
424 				qdf_spin_unlock_bh(&pool->flow_pool_lock);
425 			}
426 
427 			/*
428 			 * If one packet is going to be sent, PM usage count
429 			 * needs to be incremented by one to prevent future
430 			 * runtime suspend. This should be tied with the
431 			 * success of allocating one descriptor. It will be
432 			 * decremented after the packet has been sent.
433 			 */
434 			hif_pm_runtime_get_noresume(
435 				soc->hif_handle,
436 				RTPM_ID_DP_TX_DESC_ALLOC_FREE);
437 		} else {
438 			pool->pkt_drop_no_desc++;
439 			qdf_spin_unlock_bh(&pool->flow_pool_lock);
440 		}
441 	} else {
442 		soc->pool_stats.pkt_drop_no_pool++;
443 	}
444 
445 
446 	return tx_desc;
447 }
448 
449 /**
450  * dp_tx_desc_free() - Fee a tx descriptor and attach it to free list
451  *
452  * @soc Handle to DP SoC structure
453  * @pool_id
454  * @tx_desc
455  *
456  * Return: None
457  */
458 static inline void
459 dp_tx_desc_free(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
460 		uint8_t desc_pool_id)
461 {
462 	struct dp_tx_desc_pool_s *pool = &soc->tx_desc[desc_pool_id];
463 
464 	qdf_spin_lock_bh(&pool->flow_pool_lock);
465 	tx_desc->vdev = NULL;
466 	tx_desc->nbuf = NULL;
467 	tx_desc->flags = 0;
468 	dp_tx_put_desc_flow_pool(pool, tx_desc);
469 	switch (pool->status) {
470 	case FLOW_POOL_ACTIVE_PAUSED:
471 		if (pool->avail_desc > pool->start_th) {
472 			soc->pause_cb(pool->flow_pool_id,
473 				       WLAN_WAKE_ALL_NETIF_QUEUE,
474 				       WLAN_DATA_FLOW_CONTROL);
475 			pool->status = FLOW_POOL_ACTIVE_UNPAUSED;
476 		}
477 		break;
478 	case FLOW_POOL_INVALID:
479 		if (pool->avail_desc == pool->pool_size) {
480 			dp_tx_desc_pool_deinit(soc, desc_pool_id);
481 			dp_tx_desc_pool_free(soc, desc_pool_id);
482 			qdf_spin_unlock_bh(&pool->flow_pool_lock);
483 			qdf_print("%s %d pool is freed!!",
484 				  __func__, __LINE__);
485 			goto out;
486 		}
487 		break;
488 
489 	case FLOW_POOL_ACTIVE_UNPAUSED:
490 		break;
491 	default:
492 		qdf_print("%s %d pool is INACTIVE State!!",
493 			  __func__, __LINE__);
494 		break;
495 	};
496 
497 	qdf_spin_unlock_bh(&pool->flow_pool_lock);
498 
499 out:
500 	/**
501 	 * Decrement PM usage count if the packet has been sent. This
502 	 * should be tied with the success of freeing one descriptor.
503 	 */
504 	hif_pm_runtime_put(soc->hif_handle,
505 			   RTPM_ID_DP_TX_DESC_ALLOC_FREE);
506 }
507 
508 #endif /* QCA_AC_BASED_FLOW_CONTROL */
509 
510 static inline bool
511 dp_tx_desc_thresh_reached(struct cdp_soc_t *soc_hdl, uint8_t vdev_id)
512 {
513 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
514 	struct dp_vdev *vdev = dp_get_vdev_from_soc_vdev_id_wifi3(soc,
515 								  vdev_id);
516 	struct dp_tx_desc_pool_s *pool;
517 
518 	if (!vdev)
519 		return false;
520 
521 	pool = vdev->pool;
522 
523 	return  dp_tx_is_threshold_reached(pool, pool->avail_desc);
524 }
525 #else /* QCA_LL_TX_FLOW_CONTROL_V2 */
526 
527 static inline void dp_tx_flow_control_init(struct dp_soc *handle)
528 {
529 }
530 
531 static inline void dp_tx_flow_control_deinit(struct dp_soc *handle)
532 {
533 }
534 
535 static inline QDF_STATUS dp_tx_flow_pool_map_handler(struct dp_pdev *pdev,
536 	uint8_t flow_id, uint8_t flow_type, uint8_t flow_pool_id,
537 	uint16_t flow_pool_size)
538 {
539 	return QDF_STATUS_SUCCESS;
540 }
541 
542 static inline void dp_tx_flow_pool_unmap_handler(struct dp_pdev *pdev,
543 	uint8_t flow_id, uint8_t flow_type, uint8_t flow_pool_id)
544 {
545 }
546 
547 /**
548  * dp_tx_desc_alloc() - Allocate a Software Tx Descriptor from given pool
549  *
550  * @param soc Handle to DP SoC structure
551  * @param pool_id
552  *
553  * Return:
554  */
555 static inline struct dp_tx_desc_s *dp_tx_desc_alloc(struct dp_soc *soc,
556 						uint8_t desc_pool_id)
557 {
558 	struct dp_tx_desc_s *tx_desc = NULL;
559 	struct dp_tx_desc_pool_s *pool = &soc->tx_desc[desc_pool_id];
560 
561 	TX_DESC_LOCK_LOCK(&pool->lock);
562 
563 	tx_desc = pool->freelist;
564 
565 	/* Pool is exhausted */
566 	if (!tx_desc) {
567 		TX_DESC_LOCK_UNLOCK(&pool->lock);
568 		return NULL;
569 	}
570 
571 	pool->freelist = pool->freelist->next;
572 	pool->num_allocated++;
573 	pool->num_free--;
574 
575 	tx_desc->flags = DP_TX_DESC_FLAG_ALLOCATED;
576 
577 	TX_DESC_LOCK_UNLOCK(&pool->lock);
578 
579 	return tx_desc;
580 }
581 
582 /**
583  * dp_tx_desc_alloc_multiple() - Allocate batch of software Tx Descriptors
584  *                            from given pool
585  * @soc: Handle to DP SoC structure
586  * @pool_id: pool id should pick up
587  * @num_requested: number of required descriptor
588  *
589  * allocate multiple tx descriptor and make a link
590  *
591  * Return: h_desc first descriptor pointer
592  */
593 static inline struct dp_tx_desc_s *dp_tx_desc_alloc_multiple(
594 		struct dp_soc *soc, uint8_t desc_pool_id, uint8_t num_requested)
595 {
596 	struct dp_tx_desc_s *c_desc = NULL, *h_desc = NULL;
597 	uint8_t count;
598 	struct dp_tx_desc_pool_s *pool = &soc->tx_desc[desc_pool_id];
599 
600 	TX_DESC_LOCK_LOCK(&pool->lock);
601 
602 	if ((num_requested == 0) ||
603 			(pool->num_free < num_requested)) {
604 		TX_DESC_LOCK_UNLOCK(&pool->lock);
605 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
606 			"%s, No Free Desc: Available(%d) num_requested(%d)",
607 			__func__, pool->num_free,
608 			num_requested);
609 		return NULL;
610 	}
611 
612 	h_desc = pool->freelist;
613 
614 	/* h_desc should never be NULL since num_free > requested */
615 	qdf_assert_always(h_desc);
616 
617 	c_desc = h_desc;
618 	for (count = 0; count < (num_requested - 1); count++) {
619 		c_desc->flags = DP_TX_DESC_FLAG_ALLOCATED;
620 		c_desc = c_desc->next;
621 	}
622 	pool->num_free -= count;
623 	pool->num_allocated += count;
624 	pool->freelist = c_desc->next;
625 	c_desc->next = NULL;
626 
627 	TX_DESC_LOCK_UNLOCK(&pool->lock);
628 	return h_desc;
629 }
630 
631 /**
632  * dp_tx_desc_free() - Fee a tx descriptor and attach it to free list
633  *
634  * @soc Handle to DP SoC structure
635  * @pool_id
636  * @tx_desc
637  */
638 static inline void
639 dp_tx_desc_free(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
640 		uint8_t desc_pool_id)
641 {
642 	struct dp_tx_desc_pool_s *pool = NULL;
643 	tx_desc->vdev = NULL;
644 	tx_desc->nbuf = NULL;
645 	tx_desc->flags = 0;
646 
647 	pool = &soc->tx_desc[desc_pool_id];
648 	TX_DESC_LOCK_LOCK(&pool->lock);
649 	tx_desc->next = pool->freelist;
650 	pool->freelist = tx_desc;
651 	pool->num_allocated--;
652 	pool->num_free++;
653 	TX_DESC_LOCK_UNLOCK(&pool->lock);
654 }
655 
656 #endif /* QCA_LL_TX_FLOW_CONTROL_V2 */
657 
658 #ifdef QCA_DP_TX_DESC_ID_CHECK
659 /**
660  * dp_tx_is_desc_id_valid() - check is the tx desc id valid
661  *
662  * @soc Handle to DP SoC structure
663  * @tx_desc_id
664  *
665  * Return: true or false
666  */
667 static inline bool
668 dp_tx_is_desc_id_valid(struct dp_soc *soc, uint32_t tx_desc_id)
669 {
670 	uint8_t pool_id;
671 	uint16_t page_id, offset;
672 	struct dp_tx_desc_pool_s *pool;
673 
674 	pool_id = (tx_desc_id & DP_TX_DESC_ID_POOL_MASK) >>
675 			DP_TX_DESC_ID_POOL_OS;
676 	/* Pool ID is out of limit */
677 	if (pool_id > wlan_cfg_get_num_tx_desc_pool(
678 				soc->wlan_cfg_ctx)) {
679 		QDF_TRACE(QDF_MODULE_ID_DP,
680 			  QDF_TRACE_LEVEL_FATAL,
681 			  "%s:Tx Comp pool id %d not valid",
682 			  __func__,
683 			  pool_id);
684 		goto warn_exit;
685 	}
686 
687 	pool = &soc->tx_desc[pool_id];
688 	/* the pool is freed */
689 	if (IS_TX_DESC_POOL_STATUS_INACTIVE(pool)) {
690 		QDF_TRACE(QDF_MODULE_ID_DP,
691 			  QDF_TRACE_LEVEL_FATAL,
692 			  "%s:the pool %d has been freed",
693 			  __func__,
694 			  pool_id);
695 		goto warn_exit;
696 	}
697 
698 	page_id = (tx_desc_id & DP_TX_DESC_ID_PAGE_MASK) >>
699 				DP_TX_DESC_ID_PAGE_OS;
700 	/* the page id is out of limit */
701 	if (page_id >= pool->desc_pages.num_pages) {
702 		QDF_TRACE(QDF_MODULE_ID_DP,
703 			  QDF_TRACE_LEVEL_FATAL,
704 			  "%s:the page id %d invalid, pool id %d, num_page %d",
705 			  __func__,
706 			  page_id,
707 			  pool_id,
708 			  pool->desc_pages.num_pages);
709 		goto warn_exit;
710 	}
711 
712 	offset = (tx_desc_id & DP_TX_DESC_ID_OFFSET_MASK) >>
713 				DP_TX_DESC_ID_OFFSET_OS;
714 	/* the offset is out of limit */
715 	if (offset >= pool->desc_pages.num_element_per_page) {
716 		QDF_TRACE(QDF_MODULE_ID_DP,
717 			  QDF_TRACE_LEVEL_FATAL,
718 			  "%s:offset %d invalid, pool%d,num_elem_per_page %d",
719 			  __func__,
720 			  offset,
721 			  pool_id,
722 			  pool->desc_pages.num_element_per_page);
723 		goto warn_exit;
724 	}
725 
726 	return true;
727 
728 warn_exit:
729 	QDF_TRACE(QDF_MODULE_ID_DP,
730 		  QDF_TRACE_LEVEL_FATAL,
731 		  "%s:Tx desc id 0x%x not valid",
732 		  __func__,
733 		  tx_desc_id);
734 	qdf_assert_always(0);
735 	return false;
736 }
737 
738 #else
739 static inline bool
740 dp_tx_is_desc_id_valid(struct dp_soc *soc, uint32_t tx_desc_id)
741 {
742 	return true;
743 }
744 #endif /* QCA_DP_TX_DESC_ID_CHECK */
745 
746 #ifdef QCA_DP_TX_DESC_FAST_COMP_ENABLE
747 static inline void dp_tx_desc_update_fast_comp_flag(struct dp_soc *soc,
748 						    struct dp_tx_desc_s *desc,
749 						    uint8_t allow_fast_comp)
750 {
751 	if (qdf_likely(!(desc->flags & DP_TX_DESC_FLAG_TO_FW)) &&
752 	    qdf_likely(allow_fast_comp)) {
753 		desc->flags |= DP_TX_DESC_FLAG_SIMPLE;
754 	}
755 }
756 #else
757 static inline void dp_tx_desc_update_fast_comp_flag(struct dp_soc *soc,
758 						    struct dp_tx_desc_s *desc,
759 						    uint8_t allow_fast_comp)
760 {
761 }
762 #endif /* QCA_DP_TX_DESC_FAST_COMP_ENABLE */
763 
764 /**
765  * dp_tx_desc_find() - find dp tx descriptor from cokie
766  * @soc - handle for the device sending the data
767  * @tx_desc_id - the ID of the descriptor in question
768  * @return the descriptor object that has the specified ID
769  *
770  *  Use a tx descriptor ID to find the corresponding descriptor object.
771  *
772  */
773 static inline struct dp_tx_desc_s *dp_tx_desc_find(struct dp_soc *soc,
774 		uint8_t pool_id, uint16_t page_id, uint16_t offset)
775 {
776 	struct dp_tx_desc_pool_s *tx_desc_pool = &((soc)->tx_desc[(pool_id)]);
777 
778 	return tx_desc_pool->desc_pages.cacheable_pages[page_id] +
779 		tx_desc_pool->elem_size * offset;
780 }
781 
782 /**
783  * dp_tx_ext_desc_alloc() - Get tx extension descriptor from pool
784  * @soc: handle for the device sending the data
785  * @pool_id: target pool id
786  *
787  * Return: None
788  */
789 static inline
790 struct dp_tx_ext_desc_elem_s *dp_tx_ext_desc_alloc(struct dp_soc *soc,
791 		uint8_t desc_pool_id)
792 {
793 	struct dp_tx_ext_desc_elem_s *c_elem;
794 
795 	qdf_spin_lock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
796 	if (soc->tx_ext_desc[desc_pool_id].num_free <= 0) {
797 		qdf_spin_unlock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
798 		return NULL;
799 	}
800 	c_elem = soc->tx_ext_desc[desc_pool_id].freelist;
801 	soc->tx_ext_desc[desc_pool_id].freelist =
802 		soc->tx_ext_desc[desc_pool_id].freelist->next;
803 	soc->tx_ext_desc[desc_pool_id].num_free--;
804 	qdf_spin_unlock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
805 	return c_elem;
806 }
807 
808 /**
809  * dp_tx_ext_desc_free() - Release tx extension descriptor to the pool
810  * @soc: handle for the device sending the data
811  * @pool_id: target pool id
812  * @elem: ext descriptor pointer should release
813  *
814  * Return: None
815  */
816 static inline void dp_tx_ext_desc_free(struct dp_soc *soc,
817 	struct dp_tx_ext_desc_elem_s *elem, uint8_t desc_pool_id)
818 {
819 	qdf_spin_lock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
820 	elem->next = soc->tx_ext_desc[desc_pool_id].freelist;
821 	soc->tx_ext_desc[desc_pool_id].freelist = elem;
822 	soc->tx_ext_desc[desc_pool_id].num_free++;
823 	qdf_spin_unlock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
824 	return;
825 }
826 
827 /**
828  * dp_tx_ext_desc_free_multiple() - Fee multiple tx extension descriptor and
829  *                           attach it to free list
830  * @soc: Handle to DP SoC structure
831  * @desc_pool_id: pool id should pick up
832  * @elem: tx descriptor should be freed
833  * @num_free: number of descriptors should be freed
834  *
835  * Return: none
836  */
837 static inline void dp_tx_ext_desc_free_multiple(struct dp_soc *soc,
838 		struct dp_tx_ext_desc_elem_s *elem, uint8_t desc_pool_id,
839 		uint8_t num_free)
840 {
841 	struct dp_tx_ext_desc_elem_s *head, *tail, *c_elem;
842 	uint8_t freed = num_free;
843 
844 	/* caller should always guarantee atleast list of num_free nodes */
845 	qdf_assert_always(elem);
846 
847 	head = elem;
848 	c_elem = head;
849 	tail = head;
850 	while (c_elem && freed) {
851 		tail = c_elem;
852 		c_elem = c_elem->next;
853 		freed--;
854 	}
855 
856 	/* caller should always guarantee atleast list of num_free nodes */
857 	qdf_assert_always(tail);
858 
859 	qdf_spin_lock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
860 	tail->next = soc->tx_ext_desc[desc_pool_id].freelist;
861 	soc->tx_ext_desc[desc_pool_id].freelist = head;
862 	soc->tx_ext_desc[desc_pool_id].num_free += num_free;
863 	qdf_spin_unlock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
864 
865 	return;
866 }
867 
868 #if defined(FEATURE_TSO)
869 /**
870  * dp_tx_tso_desc_alloc() - function to allocate a TSO segment
871  * @soc: device soc instance
872  * @pool_id: pool id should pick up tso descriptor
873  *
874  * Allocates a TSO segment element from the free list held in
875  * the soc
876  *
877  * Return: tso_seg, tso segment memory pointer
878  */
879 static inline struct qdf_tso_seg_elem_t *dp_tx_tso_desc_alloc(
880 		struct dp_soc *soc, uint8_t pool_id)
881 {
882 	struct qdf_tso_seg_elem_t *tso_seg = NULL;
883 
884 	qdf_spin_lock_bh(&soc->tx_tso_desc[pool_id].lock);
885 	if (soc->tx_tso_desc[pool_id].freelist) {
886 		soc->tx_tso_desc[pool_id].num_free--;
887 		tso_seg = soc->tx_tso_desc[pool_id].freelist;
888 		soc->tx_tso_desc[pool_id].freelist =
889 			soc->tx_tso_desc[pool_id].freelist->next;
890 	}
891 	qdf_spin_unlock_bh(&soc->tx_tso_desc[pool_id].lock);
892 
893 	return tso_seg;
894 }
895 
896 /**
897  * dp_tx_tso_desc_free() - function to free a TSO segment
898  * @soc: device soc instance
899  * @pool_id: pool id should pick up tso descriptor
900  * @tso_seg: tso segment memory pointer
901  *
902  * Returns a TSO segment element to the free list held in the
903  * HTT pdev
904  *
905  * Return: none
906  */
907 static inline void dp_tx_tso_desc_free(struct dp_soc *soc,
908 		uint8_t pool_id, struct qdf_tso_seg_elem_t *tso_seg)
909 {
910 	qdf_spin_lock_bh(&soc->tx_tso_desc[pool_id].lock);
911 	tso_seg->next = soc->tx_tso_desc[pool_id].freelist;
912 	soc->tx_tso_desc[pool_id].freelist = tso_seg;
913 	soc->tx_tso_desc[pool_id].num_free++;
914 	qdf_spin_unlock_bh(&soc->tx_tso_desc[pool_id].lock);
915 }
916 
917 static inline
918 struct qdf_tso_num_seg_elem_t  *dp_tso_num_seg_alloc(struct dp_soc *soc,
919 		uint8_t pool_id)
920 {
921 	struct qdf_tso_num_seg_elem_t *tso_num_seg = NULL;
922 
923 	qdf_spin_lock_bh(&soc->tx_tso_num_seg[pool_id].lock);
924 	if (soc->tx_tso_num_seg[pool_id].freelist) {
925 		soc->tx_tso_num_seg[pool_id].num_free--;
926 		tso_num_seg = soc->tx_tso_num_seg[pool_id].freelist;
927 		soc->tx_tso_num_seg[pool_id].freelist =
928 			soc->tx_tso_num_seg[pool_id].freelist->next;
929 	}
930 	qdf_spin_unlock_bh(&soc->tx_tso_num_seg[pool_id].lock);
931 
932 	return tso_num_seg;
933 }
934 
935 static inline
936 void dp_tso_num_seg_free(struct dp_soc *soc,
937 		uint8_t pool_id, struct qdf_tso_num_seg_elem_t *tso_num_seg)
938 {
939 	qdf_spin_lock_bh(&soc->tx_tso_num_seg[pool_id].lock);
940 	tso_num_seg->next = soc->tx_tso_num_seg[pool_id].freelist;
941 	soc->tx_tso_num_seg[pool_id].freelist = tso_num_seg;
942 	soc->tx_tso_num_seg[pool_id].num_free++;
943 	qdf_spin_unlock_bh(&soc->tx_tso_num_seg[pool_id].lock);
944 }
945 #endif
946 
947 /*
948  * dp_tx_me_alloc_buf() Alloc descriptor from me pool
949  * @pdev DP_PDEV handle for datapath
950  *
951  * Return:dp_tx_me_buf_t(buf)
952  */
953 static inline struct dp_tx_me_buf_t*
954 dp_tx_me_alloc_buf(struct dp_pdev *pdev)
955 {
956 	struct dp_tx_me_buf_t *buf = NULL;
957 	qdf_spin_lock_bh(&pdev->tx_mutex);
958 	if (pdev->me_buf.freelist) {
959 		buf = pdev->me_buf.freelist;
960 		pdev->me_buf.freelist = pdev->me_buf.freelist->next;
961 		pdev->me_buf.buf_in_use++;
962 	} else {
963 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
964 				"Error allocating memory in pool");
965 		qdf_spin_unlock_bh(&pdev->tx_mutex);
966 		return NULL;
967 	}
968 	qdf_spin_unlock_bh(&pdev->tx_mutex);
969 	return buf;
970 }
971 
972 /*
973  * dp_tx_me_free_buf() - Free me descriptor and add it to pool
974  * @pdev: DP_PDEV handle for datapath
975  * @buf : Allocated ME BUF
976  *
977  * Return:void
978  */
979 static inline void
980 dp_tx_me_free_buf(struct dp_pdev *pdev, struct dp_tx_me_buf_t *buf)
981 {
982 	qdf_spin_lock_bh(&pdev->tx_mutex);
983 	buf->next = pdev->me_buf.freelist;
984 	pdev->me_buf.freelist = buf;
985 	pdev->me_buf.buf_in_use--;
986 	qdf_spin_unlock_bh(&pdev->tx_mutex);
987 }
988 #endif /* DP_TX_DESC_H */
989