xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_tx_desc.h (revision 3b7d2086205cc4b82a36a180614a8914e54e8fed)
1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #ifndef DP_TX_DESC_H
21 #define DP_TX_DESC_H
22 
23 #include "dp_types.h"
24 #include "dp_tx.h"
25 #include "dp_internal.h"
26 
27 /*
28  * 21 bits cookie
29  * 2 bits pool id 0 ~ 3,
30  * 10 bits page id 0 ~ 1023
31  * 5 bits offset id 0 ~ 31 (Desc size = 128, Num descs per page = 4096/128 = 32)
32  */
33 /* ???Ring ID needed??? */
34 
35 /* TODO: Need to revisit this change for Rhine */
36 #ifdef WLAN_SOFTUMAC_SUPPORT
37 #define DP_TX_DESC_ID_POOL_MASK    0x018000
38 #define DP_TX_DESC_ID_POOL_OS      15
39 #define DP_TX_DESC_ID_PAGE_MASK    0x007FF0
40 #define DP_TX_DESC_ID_PAGE_OS      4
41 #define DP_TX_DESC_ID_OFFSET_MASK  0x00000F
42 #define DP_TX_DESC_ID_OFFSET_OS    0
43 #else
44 #define DP_TX_DESC_ID_POOL_MASK    0x018000
45 #define DP_TX_DESC_ID_POOL_OS      15
46 #define DP_TX_DESC_ID_PAGE_MASK    0x007FE0
47 #define DP_TX_DESC_ID_PAGE_OS      5
48 #define DP_TX_DESC_ID_OFFSET_MASK  0x00001F
49 #define DP_TX_DESC_ID_OFFSET_OS    0
50 #endif /* WLAN_SOFTUMAC_SUPPORT */
51 
52 /*
53  * Compilation assert on tx desc size
54  *
55  * if assert is hit please update POOL_MASK,
56  * PAGE_MASK according to updated size
57  *
58  * for current PAGE mask allowed size range of tx_desc
59  * is between 128 and 256
60  */
61 QDF_COMPILE_TIME_ASSERT(dp_tx_desc_size,
62 			((sizeof(struct dp_tx_desc_s)) <=
63 			 (DP_BLOCKMEM_SIZE >> DP_TX_DESC_ID_PAGE_OS)) &&
64 			((sizeof(struct dp_tx_desc_s)) >
65 			 (DP_BLOCKMEM_SIZE >> (DP_TX_DESC_ID_PAGE_OS + 1)))
66 		       );
67 
68 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
69 #define TX_DESC_LOCK_CREATE(lock)
70 #define TX_DESC_LOCK_DESTROY(lock)
71 #define TX_DESC_LOCK_LOCK(lock)
72 #define TX_DESC_LOCK_UNLOCK(lock)
73 #define IS_TX_DESC_POOL_STATUS_INACTIVE(pool) \
74 	((pool)->status == FLOW_POOL_INACTIVE)
75 #ifdef QCA_AC_BASED_FLOW_CONTROL
76 #define TX_DESC_POOL_MEMBER_CLEAN(_tx_desc_pool)       \
77 	dp_tx_flow_pool_member_clean(_tx_desc_pool)
78 
79 #else /* !QCA_AC_BASED_FLOW_CONTROL */
80 #define TX_DESC_POOL_MEMBER_CLEAN(_tx_desc_pool)       \
81 do {                                                   \
82 	(_tx_desc_pool)->elem_size = 0;                \
83 	(_tx_desc_pool)->freelist = NULL;              \
84 	(_tx_desc_pool)->pool_size = 0;                \
85 	(_tx_desc_pool)->avail_desc = 0;               \
86 	(_tx_desc_pool)->start_th = 0;                 \
87 	(_tx_desc_pool)->stop_th = 0;                  \
88 	(_tx_desc_pool)->status = FLOW_POOL_INACTIVE;  \
89 } while (0)
90 #endif /* QCA_AC_BASED_FLOW_CONTROL */
91 #else /* !QCA_LL_TX_FLOW_CONTROL_V2 */
92 #define TX_DESC_LOCK_CREATE(lock)  qdf_spinlock_create(lock)
93 #define TX_DESC_LOCK_DESTROY(lock) qdf_spinlock_destroy(lock)
94 #define TX_DESC_LOCK_LOCK(lock)    qdf_spin_lock_bh(lock)
95 #define TX_DESC_LOCK_UNLOCK(lock)  qdf_spin_unlock_bh(lock)
96 #define IS_TX_DESC_POOL_STATUS_INACTIVE(pool) (false)
97 #define TX_DESC_POOL_MEMBER_CLEAN(_tx_desc_pool)       \
98 do {                                                   \
99 	(_tx_desc_pool)->elem_size = 0;                \
100 	(_tx_desc_pool)->num_allocated = 0;            \
101 	(_tx_desc_pool)->freelist = NULL;              \
102 	(_tx_desc_pool)->elem_count = 0;               \
103 	(_tx_desc_pool)->num_free = 0;                 \
104 } while (0)
105 #endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */
106 #define MAX_POOL_BUFF_COUNT 10000
107 
108 #ifdef DP_TX_TRACKING
109 static inline void dp_tx_desc_set_magic(struct dp_tx_desc_s *tx_desc,
110 					uint32_t magic_pattern)
111 {
112 	tx_desc->magic = magic_pattern;
113 }
114 #else
115 static inline void dp_tx_desc_set_magic(struct dp_tx_desc_s *tx_desc,
116 					uint32_t magic_pattern)
117 {
118 }
119 #endif
120 
121 /**
122  * dp_tx_desc_pool_alloc() - Allocate Tx Descriptor pool(s)
123  * @soc: Handle to DP SoC structure
124  * @pool_id: pool to allocate
125  * @num_elem: Number of descriptor elements per pool
126  * @spcl_tx_desc: if special desc
127  *
128  * This function allocates memory for SW tx descriptors
129  * (used within host for tx data path).
130  * The number of tx descriptors required will be large
131  * since based on number of clients (1024 clients x 3 radios),
132  * outstanding MSDUs stored in TQM queues and LMAC queues will be significantly
133  * large.
134  *
135  * To avoid allocating a large contiguous memory, it uses multi_page_alloc qdf
136  * function to allocate memory
137  * in multiple pages. It then iterates through the memory allocated across pages
138  * and links each descriptor
139  * to next descriptor, taking care of page boundaries.
140  *
141  * Since WiFi 3.0 HW supports multiple Tx rings, multiple pools are allocated,
142  * one for each ring;
143  * This minimizes lock contention when hard_start_xmit is called
144  * from multiple CPUs.
145  * Alternately, multiple pools can be used for multiple VDEVs for VDEV level
146  * flow control.
147  *
148  * Return: Status code. 0 for success.
149  */
150 QDF_STATUS dp_tx_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
151 				 uint32_t num_elem, bool spcl_tx_desc);
152 
153 /**
154  * dp_tx_desc_pool_init() - Initialize Tx Descriptor pool(s)
155  * @soc: Handle to DP SoC structure
156  * @pool_id: pool to allocate
157  * @num_elem: Number of descriptor elements per pool
158  * @spcl_tx_desc: if special desc
159  *
160  * Return: QDF_STATUS_SUCCESS
161  *	   QDF_STATUS_E_FAULT
162  */
163 QDF_STATUS dp_tx_desc_pool_init(struct dp_soc *soc, uint8_t pool_id,
164 				uint32_t num_elem, bool spcl_tx_desc);
165 
166 /**
167  * dp_tx_desc_pool_free() -  Free the tx dexcriptor pools
168  * @soc: Handle to DP SoC structure
169  * @pool_id: pool to free
170  * @spcl_tx_desc: if special desc
171  *
172  */
173 void dp_tx_desc_pool_free(struct dp_soc *soc, uint8_t pool_id,
174 			  bool spcl_tx_desc);
175 
176 /**
177  * dp_tx_desc_pool_deinit() - de-initialize Tx Descriptor pool(s)
178  * @soc: Handle to DP SoC structure
179  * @pool_id: pool to de-initialize
180  * @spcl_tx_desc: if special desc
181  *
182  */
183 void dp_tx_desc_pool_deinit(struct dp_soc *soc, uint8_t pool_id,
184 			    bool spcl_tx_desc);
185 
186 /**
187  * dp_tx_ext_desc_pool_alloc_by_id() - allocate TX extension Descriptor pool
188  *                                     based on pool ID
189  * @soc: Handle to DP SoC structure
190  * @num_elem: Number of descriptor elements per pool
191  * @pool_id: Pool ID
192  *
193  * Return - QDF_STATUS_SUCCESS
194  *	    QDF_STATUS_E_NOMEM
195  */
196 QDF_STATUS dp_tx_ext_desc_pool_alloc_by_id(struct dp_soc *soc,
197 					   uint32_t num_elem,
198 					   uint8_t pool_id);
199 /**
200  * dp_tx_ext_desc_pool_alloc() - allocate Tx extension Descriptor pool(s)
201  * @soc: Handle to DP SoC structure
202  * @num_pool: Number of pools to allocate
203  * @num_elem: Number of descriptor elements per pool
204  *
205  * Return: QDF_STATUS_SUCCESS
206  *	   QDF_STATUS_E_NOMEM
207  */
208 QDF_STATUS dp_tx_ext_desc_pool_alloc(struct dp_soc *soc, uint8_t num_pool,
209 				     uint32_t num_elem);
210 
211 /**
212  * dp_tx_ext_desc_pool_init_by_id() - initialize Tx extension Descriptor pool
213  *                                    based on pool ID
214  * @soc: Handle to DP SoC structure
215  * @num_elem: Number of descriptor elements per pool
216  * @pool_id: Pool ID
217  *
218  * Return - QDF_STATUS_SUCCESS
219  *	    QDF_STATUS_E_FAULT
220  */
221 QDF_STATUS dp_tx_ext_desc_pool_init_by_id(struct dp_soc *soc, uint32_t num_elem,
222 					  uint8_t pool_id);
223 
224 /**
225  * dp_tx_ext_desc_pool_init() - initialize Tx extension Descriptor pool(s)
226  * @soc: Handle to DP SoC structure
227  * @num_pool: Number of pools to initialize
228  * @num_elem: Number of descriptor elements per pool
229  *
230  * Return: QDF_STATUS_SUCCESS
231  *	   QDF_STATUS_E_NOMEM
232  */
233 QDF_STATUS dp_tx_ext_desc_pool_init(struct dp_soc *soc, uint8_t num_pool,
234 				    uint32_t num_elem);
235 
236 /**
237  * dp_tx_ext_desc_pool_free_by_id() - free TX extension Descriptor pool
238  *                                    based on pool ID
239  * @soc: Handle to DP SoC structure
240  * @pool_id: Pool ID
241  *
242  */
243 void dp_tx_ext_desc_pool_free_by_id(struct dp_soc *soc, uint8_t pool_id);
244 
245 /**
246  * dp_tx_ext_desc_pool_free() -  free Tx extension Descriptor pool(s)
247  * @soc: Handle to DP SoC structure
248  * @num_pool: Number of pools to free
249  *
250  */
251 void dp_tx_ext_desc_pool_free(struct dp_soc *soc, uint8_t num_pool);
252 
253 /**
254  * dp_tx_ext_desc_pool_deinit_by_id() - deinit Tx extension Descriptor pool
255  *                                      based on pool ID
256  * @soc: Handle to DP SoC structure
257  * @pool_id: Pool ID
258  *
259  */
260 void dp_tx_ext_desc_pool_deinit_by_id(struct dp_soc *soc, uint8_t pool_id);
261 
262 /**
263  * dp_tx_ext_desc_pool_deinit() -  deinit Tx extension Descriptor pool(s)
264  * @soc: Handle to DP SoC structure
265  * @num_pool: Number of pools to de-initialize
266  *
267  */
268 void dp_tx_ext_desc_pool_deinit(struct dp_soc *soc, uint8_t num_pool);
269 
270 /**
271  * dp_tx_tso_desc_pool_alloc_by_id() - allocate TSO Descriptor pool based
272  *                                     on pool ID
273  * @soc: Handle to DP SoC structure
274  * @num_elem: Number of descriptor elements per pool
275  * @pool_id: Pool ID
276  *
277  * Return - QDF_STATUS_SUCCESS
278  *	    QDF_STATUS_E_NOMEM
279  */
280 QDF_STATUS dp_tx_tso_desc_pool_alloc_by_id(struct dp_soc *soc, uint32_t num_elem,
281 					   uint8_t pool_id);
282 
283 /**
284  * dp_tx_tso_desc_pool_alloc() - allocate TSO Descriptor pool(s)
285  * @soc: Handle to DP SoC structure
286  * @num_pool: Number of pools to allocate
287  * @num_elem: Number of descriptor elements per pool
288  *
289  * Return: QDF_STATUS_SUCCESS
290  *	   QDF_STATUS_E_NOMEM
291  */
292 QDF_STATUS dp_tx_tso_desc_pool_alloc(struct dp_soc *soc, uint8_t num_pool,
293 				     uint32_t num_elem);
294 
295 /**
296  * dp_tx_tso_desc_pool_init_by_id() - initialize TSO Descriptor pool
297  *                                    based on pool ID
298  * @soc: Handle to DP SoC structure
299  * @num_elem: Number of descriptor elements per pool
300  * @pool_id: Pool ID
301  *
302  * Return - QDF_STATUS_SUCCESS
303  *	    QDF_STATUS_E_NOMEM
304  */
305 QDF_STATUS dp_tx_tso_desc_pool_init_by_id(struct dp_soc *soc, uint32_t num_elem,
306 					  uint8_t pool_id);
307 
308 /**
309  * dp_tx_tso_desc_pool_init() - initialize TSO Descriptor pool(s)
310  * @soc: Handle to DP SoC structure
311  * @num_pool: Number of pools to initialize
312  * @num_elem: Number of descriptor elements per pool
313  *
314  * Return: QDF_STATUS_SUCCESS
315  *	   QDF_STATUS_E_NOMEM
316  */
317 QDF_STATUS dp_tx_tso_desc_pool_init(struct dp_soc *soc, uint8_t num_pool,
318 				    uint32_t num_elem);
319 
320 /**
321  * dp_tx_tso_desc_pool_free_by_id() - free TSO Descriptor pool based on pool ID
322  * @soc: Handle to DP SoC structure
323  * @pool_id: Pool ID
324  */
325 void dp_tx_tso_desc_pool_free_by_id(struct dp_soc *soc, uint8_t pool_id);
326 
327 /**
328  * dp_tx_tso_desc_pool_free() - free TSO Descriptor pool(s)
329  * @soc: Handle to DP SoC structure
330  * @num_pool: Number of pools to free
331  *
332  */
333 void dp_tx_tso_desc_pool_free(struct dp_soc *soc, uint8_t num_pool);
334 
335 /**
336  * dp_tx_tso_desc_pool_deinit_by_id() - deinitialize TSO Descriptor pool
337  *                                      based on pool ID
338  * @soc: Handle to DP SoC structure
339  * @pool_id: Pool ID
340  */
341 void dp_tx_tso_desc_pool_deinit_by_id(struct dp_soc *soc, uint8_t pool_id);
342 
343 /**
344  * dp_tx_tso_desc_pool_deinit() - deinitialize TSO Descriptor pool(s)
345  * @soc: Handle to DP SoC structure
346  * @num_pool: Number of pools to free
347  *
348  */
349 void dp_tx_tso_desc_pool_deinit(struct dp_soc *soc, uint8_t num_pool);
350 
351 /**
352  * dp_tx_tso_num_seg_pool_alloc_by_id() - Allocate descriptors that tracks the
353  *                             fragments in each tso segment based on pool ID
354  * @soc: handle to dp soc structure
355  * @num_elem: total number of descriptors to be allocated
356  * @pool_id: Pool ID
357  *
358  * Return - QDF_STATUS_SUCCESS
359  *	    QDF_STATUS_E_NOMEM
360  */
361 QDF_STATUS dp_tx_tso_num_seg_pool_alloc_by_id(struct dp_soc *soc,
362 					      uint32_t num_elem,
363 					      uint8_t pool_id);
364 
365 /**
366  * dp_tx_tso_num_seg_pool_alloc() - Allocate descriptors that tracks the
367  *                              fragments in each tso segment
368  *
369  * @soc: handle to dp soc structure
370  * @num_pool: number of pools to allocate
371  * @num_elem: total number of descriptors to be allocated
372  *
373  * Return: QDF_STATUS_SUCCESS
374  *	   QDF_STATUS_E_NOMEM
375  */
376 QDF_STATUS dp_tx_tso_num_seg_pool_alloc(struct dp_soc *soc, uint8_t num_pool,
377 					uint32_t num_elem);
378 
379 /**
380  * dp_tx_tso_num_seg_pool_init_by_id() - Initialize descriptors that tracks the
381  *                              fragments in each tso segment based on pool ID
382  *
383  * @soc: handle to dp soc structure
384  * @num_elem: total number of descriptors to be initialized
385  * @pool_id: Pool ID
386  *
387  * Return - QDF_STATUS_SUCCESS
388  *	    QDF_STATUS_E_FAULT
389  */
390 QDF_STATUS dp_tx_tso_num_seg_pool_init_by_id(struct dp_soc *soc,
391 					     uint32_t num_elem,
392 					     uint8_t pool_id);
393 
394 /**
395  * dp_tx_tso_num_seg_pool_init() - Initialize descriptors that tracks the
396  *                              fragments in each tso segment
397  *
398  * @soc: handle to dp soc structure
399  * @num_pool: number of pools to initialize
400  * @num_elem: total number of descriptors to be initialized
401  *
402  * Return: QDF_STATUS_SUCCESS
403  *	   QDF_STATUS_E_FAULT
404  */
405 QDF_STATUS dp_tx_tso_num_seg_pool_init(struct dp_soc *soc, uint8_t num_pool,
406 				       uint32_t num_elem);
407 
408 /**
409  * dp_tx_tso_num_seg_pool_free_by_id() - free descriptors that tracks the
410  *                              fragments in each tso segment based on pool ID
411  *
412  * @soc: handle to dp soc structure
413  * @pool_id: Pool ID
414  */
415 void dp_tx_tso_num_seg_pool_free_by_id(struct dp_soc *soc, uint8_t pool_id);
416 
417 /**
418  * dp_tx_tso_num_seg_pool_free() - free descriptors that tracks the
419  *                              fragments in each tso segment
420  *
421  * @soc: handle to dp soc structure
422  * @num_pool: number of pools to free
423  */
424 void dp_tx_tso_num_seg_pool_free(struct dp_soc *soc, uint8_t num_pool);
425 
426 /**
427  * dp_tx_tso_num_seg_pool_deinit_by_id() - de-initialize descriptors that tracks
428  *                           the fragments in each tso segment based on pool ID
429  * @soc: handle to dp soc structure
430  * @pool_id: Pool ID
431  */
432 void dp_tx_tso_num_seg_pool_deinit_by_id(struct dp_soc *soc, uint8_t pool_id);
433 
434 /**
435  * dp_tx_tso_num_seg_pool_deinit() - de-initialize descriptors that tracks the
436  *                              fragments in each tso segment
437  *
438  * @soc: handle to dp soc structure
439  * @num_pool: number of pools to de-initialize
440  *
441  * Return: QDF_STATUS_SUCCESS
442  *	   QDF_STATUS_E_FAULT
443  */
444 void dp_tx_tso_num_seg_pool_deinit(struct dp_soc *soc, uint8_t num_pool);
445 
446 #ifdef DP_UMAC_HW_RESET_SUPPORT
447 /**
448  * dp_tx_desc_pool_cleanup() -  Clean up the tx dexcriptor pools
449  * @soc: Handle to DP SoC structure
450  * @nbuf_list: nbuf list for delayed free
451  *
452  */
453 void dp_tx_desc_pool_cleanup(struct dp_soc *soc, qdf_nbuf_t *nbuf_list);
454 #endif
455 
456 /**
457  * dp_tx_desc_clear() - Clear contents of tx desc
458  * @tx_desc: descriptor to free
459  *
460  * Return: none
461  */
462 static inline void
463 dp_tx_desc_clear(struct dp_tx_desc_s *tx_desc)
464 {
465 	tx_desc->vdev_id = DP_INVALID_VDEV_ID;
466 	tx_desc->nbuf = NULL;
467 	tx_desc->flags = 0;
468 	tx_desc->next = NULL;
469 }
470 
471 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
472 void dp_tx_flow_control_init(struct dp_soc *);
473 void dp_tx_flow_control_deinit(struct dp_soc *);
474 
475 QDF_STATUS dp_txrx_register_pause_cb(struct cdp_soc_t *soc,
476 	tx_pause_callback pause_cb);
477 QDF_STATUS dp_tx_flow_pool_map(struct cdp_soc_t *soc, uint8_t pdev_id,
478 			       uint8_t vdev_id);
479 void dp_tx_flow_pool_unmap(struct cdp_soc_t *handle, uint8_t pdev_id,
480 			   uint8_t vdev_id);
481 void dp_tx_clear_flow_pool_stats(struct dp_soc *soc);
482 struct dp_tx_desc_pool_s *dp_tx_create_flow_pool(struct dp_soc *soc,
483 	uint8_t flow_pool_id, uint32_t flow_pool_size);
484 
485 QDF_STATUS dp_tx_flow_pool_map_handler(struct dp_pdev *pdev, uint8_t flow_id,
486 	uint8_t flow_type, uint8_t flow_pool_id, uint32_t flow_pool_size);
487 void dp_tx_flow_pool_unmap_handler(struct dp_pdev *pdev, uint8_t flow_id,
488 	uint8_t flow_type, uint8_t flow_pool_id);
489 
490 /**
491  * dp_tx_get_desc_flow_pool() - get descriptor from flow pool
492  * @pool: flow pool
493  *
494  * Caller needs to take lock and do sanity checks.
495  *
496  * Return: tx descriptor
497  */
498 static inline
499 struct dp_tx_desc_s *dp_tx_get_desc_flow_pool(struct dp_tx_desc_pool_s *pool)
500 {
501 	struct dp_tx_desc_s *tx_desc = pool->freelist;
502 
503 	pool->freelist = pool->freelist->next;
504 	pool->avail_desc--;
505 	return tx_desc;
506 }
507 
508 /**
509  * dp_tx_put_desc_flow_pool() - put descriptor to flow pool freelist
510  * @pool: flow pool
511  * @tx_desc: tx descriptor
512  *
513  * Caller needs to take lock and do sanity checks.
514  *
515  * Return: none
516  */
517 static inline
518 void dp_tx_put_desc_flow_pool(struct dp_tx_desc_pool_s *pool,
519 			struct dp_tx_desc_s *tx_desc)
520 {
521 	tx_desc->next = pool->freelist;
522 	pool->freelist = tx_desc;
523 	pool->avail_desc++;
524 }
525 
526 static inline void
527 dp_tx_desc_free_list(struct dp_tx_desc_pool_s *pool,
528 		     struct dp_tx_desc_s *head_desc,
529 		     struct dp_tx_desc_s *tail_desc,
530 		     uint32_t fast_desc_count)
531 {
532 }
533 
534 #ifdef QCA_AC_BASED_FLOW_CONTROL
535 
536 /**
537  * dp_tx_flow_pool_member_clean() - Clean the members of TX flow pool
538  * @pool: flow pool
539  *
540  * Return: None
541  */
542 static inline void
543 dp_tx_flow_pool_member_clean(struct dp_tx_desc_pool_s *pool)
544 {
545 	pool->elem_size = 0;
546 	pool->freelist = NULL;
547 	pool->pool_size = 0;
548 	pool->avail_desc = 0;
549 	qdf_mem_zero(pool->start_th, FL_TH_MAX);
550 	qdf_mem_zero(pool->stop_th, FL_TH_MAX);
551 	pool->status = FLOW_POOL_INACTIVE;
552 }
553 
554 /**
555  * dp_tx_is_threshold_reached() - Check if current avail desc meet threshold
556  * @pool: flow pool
557  * @avail_desc: available descriptor number
558  *
559  * Return: true if threshold is met, false if not
560  */
561 static inline bool
562 dp_tx_is_threshold_reached(struct dp_tx_desc_pool_s *pool, uint16_t avail_desc)
563 {
564 	if (qdf_unlikely(avail_desc == pool->stop_th[DP_TH_BE_BK]))
565 		return true;
566 	else if (qdf_unlikely(avail_desc == pool->stop_th[DP_TH_VI]))
567 		return true;
568 	else if (qdf_unlikely(avail_desc == pool->stop_th[DP_TH_VO]))
569 		return true;
570 	else if (qdf_unlikely(avail_desc == pool->stop_th[DP_TH_HI]))
571 		return true;
572 	else
573 		return false;
574 }
575 
576 /**
577  * dp_tx_adjust_flow_pool_state() - Adjust flow pool state
578  * @soc: dp soc
579  * @pool: flow pool
580  */
581 static inline void
582 dp_tx_adjust_flow_pool_state(struct dp_soc *soc,
583 			     struct dp_tx_desc_pool_s *pool)
584 {
585 	if (pool->avail_desc > pool->stop_th[DP_TH_BE_BK]) {
586 		pool->status = FLOW_POOL_ACTIVE_UNPAUSED;
587 		return;
588 	} else if (pool->avail_desc <= pool->stop_th[DP_TH_BE_BK] &&
589 		   pool->avail_desc > pool->stop_th[DP_TH_VI]) {
590 		pool->status = FLOW_POOL_BE_BK_PAUSED;
591 	} else if (pool->avail_desc <= pool->stop_th[DP_TH_VI] &&
592 		   pool->avail_desc > pool->stop_th[DP_TH_VO]) {
593 		pool->status = FLOW_POOL_VI_PAUSED;
594 	} else if (pool->avail_desc <= pool->stop_th[DP_TH_VO] &&
595 		   pool->avail_desc > pool->stop_th[DP_TH_HI]) {
596 		pool->status = FLOW_POOL_VO_PAUSED;
597 	} else if (pool->avail_desc <= pool->stop_th[DP_TH_HI]) {
598 		pool->status = FLOW_POOL_ACTIVE_PAUSED;
599 	}
600 
601 	switch (pool->status) {
602 	case FLOW_POOL_ACTIVE_PAUSED:
603 		soc->pause_cb(pool->flow_pool_id,
604 			      WLAN_NETIF_PRIORITY_QUEUE_OFF,
605 			      WLAN_DATA_FLOW_CTRL_PRI);
606 		fallthrough;
607 
608 	case FLOW_POOL_VO_PAUSED:
609 		soc->pause_cb(pool->flow_pool_id,
610 			      WLAN_NETIF_VO_QUEUE_OFF,
611 			      WLAN_DATA_FLOW_CTRL_VO);
612 		fallthrough;
613 
614 	case FLOW_POOL_VI_PAUSED:
615 		soc->pause_cb(pool->flow_pool_id,
616 			      WLAN_NETIF_VI_QUEUE_OFF,
617 			      WLAN_DATA_FLOW_CTRL_VI);
618 		fallthrough;
619 
620 	case FLOW_POOL_BE_BK_PAUSED:
621 		soc->pause_cb(pool->flow_pool_id,
622 			      WLAN_NETIF_BE_BK_QUEUE_OFF,
623 			      WLAN_DATA_FLOW_CTRL_BE_BK);
624 		break;
625 	default:
626 		dp_err("Invalid pool status:%u to adjust", pool->status);
627 	}
628 }
629 
630 /**
631  * dp_tx_desc_alloc() - Allocate a Software Tx descriptor from given pool
632  * @soc: Handle to DP SoC structure
633  * @desc_pool_id: ID of the flow control fool
634  *
635  * Return: TX descriptor allocated or NULL
636  */
637 static inline struct dp_tx_desc_s *
638 dp_tx_desc_alloc(struct dp_soc *soc, uint8_t desc_pool_id)
639 {
640 	struct dp_tx_desc_s *tx_desc = NULL;
641 	struct dp_tx_desc_pool_s *pool = &soc->tx_desc[desc_pool_id];
642 	bool is_pause = false;
643 	enum netif_action_type act = WLAN_NETIF_ACTION_TYPE_NONE;
644 	enum dp_fl_ctrl_threshold level = DP_TH_BE_BK;
645 	enum netif_reason_type reason;
646 
647 	if (qdf_likely(pool)) {
648 		qdf_spin_lock_bh(&pool->flow_pool_lock);
649 		if (qdf_likely(pool->avail_desc &&
650 		    pool->status != FLOW_POOL_INVALID &&
651 		    pool->status != FLOW_POOL_INACTIVE)) {
652 			tx_desc = dp_tx_get_desc_flow_pool(pool);
653 			tx_desc->pool_id = desc_pool_id;
654 			tx_desc->flags = DP_TX_DESC_FLAG_ALLOCATED;
655 			dp_tx_desc_set_magic(tx_desc,
656 					     DP_TX_MAGIC_PATTERN_INUSE);
657 			is_pause = dp_tx_is_threshold_reached(pool,
658 							      pool->avail_desc);
659 
660 			if (qdf_unlikely(pool->status ==
661 					 FLOW_POOL_ACTIVE_UNPAUSED_REATTACH)) {
662 				dp_tx_adjust_flow_pool_state(soc, pool);
663 				is_pause = false;
664 			}
665 
666 			if (qdf_unlikely(is_pause)) {
667 				switch (pool->status) {
668 				case FLOW_POOL_ACTIVE_UNPAUSED:
669 					/* pause network BE\BK queue */
670 					act = WLAN_NETIF_BE_BK_QUEUE_OFF;
671 					reason = WLAN_DATA_FLOW_CTRL_BE_BK;
672 					level = DP_TH_BE_BK;
673 					pool->status = FLOW_POOL_BE_BK_PAUSED;
674 					break;
675 				case FLOW_POOL_BE_BK_PAUSED:
676 					/* pause network VI queue */
677 					act = WLAN_NETIF_VI_QUEUE_OFF;
678 					reason = WLAN_DATA_FLOW_CTRL_VI;
679 					level = DP_TH_VI;
680 					pool->status = FLOW_POOL_VI_PAUSED;
681 					break;
682 				case FLOW_POOL_VI_PAUSED:
683 					/* pause network VO queue */
684 					act = WLAN_NETIF_VO_QUEUE_OFF;
685 					reason = WLAN_DATA_FLOW_CTRL_VO;
686 					level = DP_TH_VO;
687 					pool->status = FLOW_POOL_VO_PAUSED;
688 					break;
689 				case FLOW_POOL_VO_PAUSED:
690 					/* pause network HI PRI queue */
691 					act = WLAN_NETIF_PRIORITY_QUEUE_OFF;
692 					reason = WLAN_DATA_FLOW_CTRL_PRI;
693 					level = DP_TH_HI;
694 					pool->status = FLOW_POOL_ACTIVE_PAUSED;
695 					break;
696 				case FLOW_POOL_ACTIVE_PAUSED:
697 					act = WLAN_NETIF_ACTION_TYPE_NONE;
698 					break;
699 				default:
700 					dp_err_rl("pool status is %d!",
701 						  pool->status);
702 					break;
703 				}
704 
705 				if (act != WLAN_NETIF_ACTION_TYPE_NONE) {
706 					pool->latest_pause_time[level] =
707 						qdf_get_system_timestamp();
708 					soc->pause_cb(desc_pool_id,
709 						      act,
710 						      reason);
711 				}
712 			}
713 		} else {
714 			pool->pkt_drop_no_desc++;
715 		}
716 		qdf_spin_unlock_bh(&pool->flow_pool_lock);
717 	} else {
718 		dp_err_rl("NULL desc pool pool_id %d", desc_pool_id);
719 		soc->pool_stats.pkt_drop_no_pool++;
720 	}
721 
722 	return tx_desc;
723 }
724 
725 /**
726  * dp_tx_desc_free() - Free a tx descriptor and attach it to free list
727  * @soc: Handle to DP SoC structure
728  * @tx_desc: the tx descriptor to be freed
729  * @desc_pool_id: ID of the flow control pool
730  *
731  * Return: None
732  */
733 static inline void
734 dp_tx_desc_free(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
735 		uint8_t desc_pool_id)
736 {
737 	struct dp_tx_desc_pool_s *pool = &soc->tx_desc[desc_pool_id];
738 	qdf_time_t unpause_time = qdf_get_system_timestamp(), pause_dur;
739 	enum netif_action_type act = WLAN_WAKE_ALL_NETIF_QUEUE;
740 	enum netif_reason_type reason;
741 
742 	qdf_spin_lock_bh(&pool->flow_pool_lock);
743 	tx_desc->vdev_id = DP_INVALID_VDEV_ID;
744 	tx_desc->nbuf = NULL;
745 	tx_desc->flags = 0;
746 	dp_tx_desc_set_magic(tx_desc, DP_TX_MAGIC_PATTERN_FREE);
747 	dp_tx_put_desc_flow_pool(pool, tx_desc);
748 	switch (pool->status) {
749 	case FLOW_POOL_ACTIVE_PAUSED:
750 		if (pool->avail_desc > pool->start_th[DP_TH_HI]) {
751 			act = WLAN_NETIF_PRIORITY_QUEUE_ON;
752 			reason = WLAN_DATA_FLOW_CTRL_PRI;
753 			pool->status = FLOW_POOL_VO_PAUSED;
754 
755 			/* Update maximum pause duration for HI queue */
756 			pause_dur = unpause_time -
757 					pool->latest_pause_time[DP_TH_HI];
758 			if (pool->max_pause_time[DP_TH_HI] < pause_dur)
759 				pool->max_pause_time[DP_TH_HI] = pause_dur;
760 		}
761 		break;
762 	case FLOW_POOL_VO_PAUSED:
763 		if (pool->avail_desc > pool->start_th[DP_TH_VO]) {
764 			act = WLAN_NETIF_VO_QUEUE_ON;
765 			reason = WLAN_DATA_FLOW_CTRL_VO;
766 			pool->status = FLOW_POOL_VI_PAUSED;
767 
768 			/* Update maximum pause duration for VO queue */
769 			pause_dur = unpause_time -
770 					pool->latest_pause_time[DP_TH_VO];
771 			if (pool->max_pause_time[DP_TH_VO] < pause_dur)
772 				pool->max_pause_time[DP_TH_VO] = pause_dur;
773 		}
774 		break;
775 	case FLOW_POOL_VI_PAUSED:
776 		if (pool->avail_desc > pool->start_th[DP_TH_VI]) {
777 			act = WLAN_NETIF_VI_QUEUE_ON;
778 			reason = WLAN_DATA_FLOW_CTRL_VI;
779 			pool->status = FLOW_POOL_BE_BK_PAUSED;
780 
781 			/* Update maximum pause duration for VI queue */
782 			pause_dur = unpause_time -
783 					pool->latest_pause_time[DP_TH_VI];
784 			if (pool->max_pause_time[DP_TH_VI] < pause_dur)
785 				pool->max_pause_time[DP_TH_VI] = pause_dur;
786 		}
787 		break;
788 	case FLOW_POOL_BE_BK_PAUSED:
789 		if (pool->avail_desc > pool->start_th[DP_TH_BE_BK]) {
790 			act = WLAN_NETIF_BE_BK_QUEUE_ON;
791 			reason = WLAN_DATA_FLOW_CTRL_BE_BK;
792 			pool->status = FLOW_POOL_ACTIVE_UNPAUSED;
793 
794 			/* Update maximum pause duration for BE_BK queue */
795 			pause_dur = unpause_time -
796 					pool->latest_pause_time[DP_TH_BE_BK];
797 			if (pool->max_pause_time[DP_TH_BE_BK] < pause_dur)
798 				pool->max_pause_time[DP_TH_BE_BK] = pause_dur;
799 		}
800 		break;
801 	case FLOW_POOL_INVALID:
802 		if (pool->avail_desc == pool->pool_size) {
803 			dp_tx_desc_pool_deinit(soc, desc_pool_id, false);
804 			dp_tx_desc_pool_free(soc, desc_pool_id, false);
805 			qdf_spin_unlock_bh(&pool->flow_pool_lock);
806 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
807 				  "%s %d pool is freed!!",
808 				  __func__, __LINE__);
809 			return;
810 		}
811 		break;
812 
813 	case FLOW_POOL_ACTIVE_UNPAUSED:
814 		break;
815 	default:
816 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
817 			  "%s %d pool is INACTIVE State!!",
818 			  __func__, __LINE__);
819 		break;
820 	};
821 
822 	if (act != WLAN_WAKE_ALL_NETIF_QUEUE)
823 		soc->pause_cb(pool->flow_pool_id,
824 			      act, reason);
825 	qdf_spin_unlock_bh(&pool->flow_pool_lock);
826 }
827 
828 static inline void
829 dp_tx_spcl_desc_free(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
830 		     uint8_t desc_pool_id)
831 {
832 }
833 
834 static inline struct dp_tx_desc_s *dp_tx_spcl_desc_alloc(struct dp_soc *soc,
835 							 uint8_t desc_pool_id)
836 {
837 	return NULL;
838 }
839 #else /* QCA_AC_BASED_FLOW_CONTROL */
840 
841 static inline bool
842 dp_tx_is_threshold_reached(struct dp_tx_desc_pool_s *pool, uint16_t avail_desc)
843 {
844 	if (qdf_unlikely(avail_desc < pool->stop_th))
845 		return true;
846 	else
847 		return false;
848 }
849 
850 /**
851  * dp_tx_desc_alloc() - Allocate a Software Tx Descriptor from given pool
852  * @soc: Handle to DP SoC structure
853  * @desc_pool_id:
854  *
855  * Return: Tx descriptor or NULL
856  */
857 static inline struct dp_tx_desc_s *
858 dp_tx_desc_alloc(struct dp_soc *soc, uint8_t desc_pool_id)
859 {
860 	struct dp_tx_desc_s *tx_desc = NULL;
861 	struct dp_tx_desc_pool_s *pool = &soc->tx_desc[desc_pool_id];
862 
863 	if (pool) {
864 		qdf_spin_lock_bh(&pool->flow_pool_lock);
865 		if (pool->status <= FLOW_POOL_ACTIVE_PAUSED &&
866 		    pool->avail_desc) {
867 			tx_desc = dp_tx_get_desc_flow_pool(pool);
868 			tx_desc->pool_id = desc_pool_id;
869 			tx_desc->flags = DP_TX_DESC_FLAG_ALLOCATED;
870 			dp_tx_desc_set_magic(tx_desc,
871 					     DP_TX_MAGIC_PATTERN_INUSE);
872 			if (qdf_unlikely(pool->avail_desc < pool->stop_th)) {
873 				pool->status = FLOW_POOL_ACTIVE_PAUSED;
874 				qdf_spin_unlock_bh(&pool->flow_pool_lock);
875 				/* pause network queues */
876 				soc->pause_cb(desc_pool_id,
877 					       WLAN_STOP_ALL_NETIF_QUEUE,
878 					       WLAN_DATA_FLOW_CONTROL);
879 			} else {
880 				qdf_spin_unlock_bh(&pool->flow_pool_lock);
881 			}
882 		} else {
883 			pool->pkt_drop_no_desc++;
884 			qdf_spin_unlock_bh(&pool->flow_pool_lock);
885 		}
886 	} else {
887 		soc->pool_stats.pkt_drop_no_pool++;
888 	}
889 
890 	return tx_desc;
891 }
892 
893 static inline struct dp_tx_desc_s *dp_tx_spcl_desc_alloc(struct dp_soc *soc,
894 							 uint8_t desc_pool_id)
895 {
896 	return NULL;
897 }
898 /**
899  * dp_tx_desc_free() - Free a tx descriptor and attach it to free list
900  * @soc: Handle to DP SoC structure
901  * @tx_desc: Descriptor to free
902  * @desc_pool_id: Descriptor pool Id
903  *
904  * Return: None
905  */
906 static inline void
907 dp_tx_desc_free(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
908 		uint8_t desc_pool_id)
909 {
910 	struct dp_tx_desc_pool_s *pool = &soc->tx_desc[desc_pool_id];
911 
912 	qdf_spin_lock_bh(&pool->flow_pool_lock);
913 	tx_desc->vdev_id = DP_INVALID_VDEV_ID;
914 	tx_desc->nbuf = NULL;
915 	tx_desc->flags = 0;
916 	dp_tx_desc_set_magic(tx_desc, DP_TX_MAGIC_PATTERN_FREE);
917 	dp_tx_put_desc_flow_pool(pool, tx_desc);
918 	switch (pool->status) {
919 	case FLOW_POOL_ACTIVE_PAUSED:
920 		if (pool->avail_desc > pool->start_th) {
921 			soc->pause_cb(pool->flow_pool_id,
922 				       WLAN_WAKE_ALL_NETIF_QUEUE,
923 				       WLAN_DATA_FLOW_CONTROL);
924 			pool->status = FLOW_POOL_ACTIVE_UNPAUSED;
925 		}
926 		break;
927 	case FLOW_POOL_INVALID:
928 		if (pool->avail_desc == pool->pool_size) {
929 			dp_tx_desc_pool_deinit(soc, desc_pool_id, false);
930 			dp_tx_desc_pool_free(soc, desc_pool_id, false);
931 			qdf_spin_unlock_bh(&pool->flow_pool_lock);
932 			qdf_print("%s %d pool is freed!!",
933 				  __func__, __LINE__);
934 			return;
935 		}
936 		break;
937 
938 	case FLOW_POOL_ACTIVE_UNPAUSED:
939 		break;
940 	default:
941 		qdf_print("%s %d pool is INACTIVE State!!",
942 			  __func__, __LINE__);
943 		break;
944 	};
945 
946 	qdf_spin_unlock_bh(&pool->flow_pool_lock);
947 }
948 
949 static inline void
950 dp_tx_spcl_desc_free(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
951 		     uint8_t desc_pool_id)
952 {
953 }
954 #endif /* QCA_AC_BASED_FLOW_CONTROL */
955 
956 static inline bool
957 dp_tx_desc_thresh_reached(struct cdp_soc_t *soc_hdl, uint8_t vdev_id)
958 {
959 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
960 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
961 						     DP_MOD_ID_CDP);
962 	struct dp_tx_desc_pool_s *pool;
963 	bool status;
964 
965 	if (!vdev)
966 		return false;
967 
968 	pool = vdev->pool;
969 	status = dp_tx_is_threshold_reached(pool, pool->avail_desc);
970 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
971 
972 	return status;
973 }
974 #else /* QCA_LL_TX_FLOW_CONTROL_V2 */
975 
976 static inline void dp_tx_flow_control_init(struct dp_soc *handle)
977 {
978 }
979 
980 static inline void dp_tx_flow_control_deinit(struct dp_soc *handle)
981 {
982 }
983 
984 static inline QDF_STATUS dp_tx_flow_pool_map_handler(struct dp_pdev *pdev,
985 	uint8_t flow_id, uint8_t flow_type, uint8_t flow_pool_id,
986 	uint32_t flow_pool_size)
987 {
988 	return QDF_STATUS_SUCCESS;
989 }
990 
991 static inline void dp_tx_flow_pool_unmap_handler(struct dp_pdev *pdev,
992 	uint8_t flow_id, uint8_t flow_type, uint8_t flow_pool_id)
993 {
994 }
995 
996 #ifdef QCA_DP_TX_HW_SW_NBUF_DESC_PREFETCH
997 static inline
998 void dp_tx_prefetch_desc(struct dp_tx_desc_s *tx_desc)
999 {
1000 	if (tx_desc)
1001 		prefetch(tx_desc);
1002 }
1003 #else
1004 static inline
1005 void dp_tx_prefetch_desc(struct dp_tx_desc_s *tx_desc)
1006 {
1007 }
1008 #endif
1009 
1010 /**
1011  * dp_tx_desc_alloc() - Allocate a Software Tx Descriptor from given pool
1012  * @soc: Handle to DP SoC structure
1013  * @desc_pool_id: pool id
1014  *
1015  * Return: Tx Descriptor or NULL
1016  */
1017 static inline struct dp_tx_desc_s *dp_tx_desc_alloc(struct dp_soc *soc,
1018 						uint8_t desc_pool_id)
1019 {
1020 	struct dp_tx_desc_s *tx_desc = NULL;
1021 	struct dp_tx_desc_pool_s *pool = NULL;
1022 
1023 	pool = dp_get_tx_desc_pool(soc, desc_pool_id);
1024 
1025 	TX_DESC_LOCK_LOCK(&pool->lock);
1026 
1027 	tx_desc = pool->freelist;
1028 
1029 	/* Pool is exhausted */
1030 	if (!tx_desc) {
1031 		TX_DESC_LOCK_UNLOCK(&pool->lock);
1032 		return NULL;
1033 	}
1034 
1035 	pool->freelist = pool->freelist->next;
1036 	pool->num_allocated++;
1037 	pool->num_free--;
1038 	dp_tx_prefetch_desc(pool->freelist);
1039 
1040 	tx_desc->flags = DP_TX_DESC_FLAG_ALLOCATED;
1041 
1042 	TX_DESC_LOCK_UNLOCK(&pool->lock);
1043 
1044 	return tx_desc;
1045 }
1046 
1047 static inline struct dp_tx_desc_s *dp_tx_spcl_desc_alloc(struct dp_soc *soc,
1048 							 uint8_t desc_pool_id)
1049 {
1050 	struct dp_tx_desc_s *tx_desc = NULL;
1051 	struct dp_tx_desc_pool_s *pool = NULL;
1052 
1053 	pool = dp_get_spcl_tx_desc_pool(soc, desc_pool_id);
1054 
1055 	TX_DESC_LOCK_LOCK(&pool->lock);
1056 
1057 	tx_desc = pool->freelist;
1058 
1059 	/* Pool is exhausted */
1060 	if (!tx_desc) {
1061 		TX_DESC_LOCK_UNLOCK(&pool->lock);
1062 		return NULL;
1063 	}
1064 
1065 	pool->freelist = pool->freelist->next;
1066 	pool->num_allocated++;
1067 	pool->num_free--;
1068 	dp_tx_prefetch_desc(pool->freelist);
1069 
1070 	tx_desc->flags = DP_TX_DESC_FLAG_ALLOCATED;
1071 	tx_desc->flags |= DP_TX_DESC_FLAG_SPECIAL;
1072 
1073 	TX_DESC_LOCK_UNLOCK(&pool->lock);
1074 
1075 	return tx_desc;
1076 }
1077 
1078 /**
1079  * dp_tx_desc_alloc_multiple() - Allocate batch of software Tx Descriptors
1080  *                            from given pool
1081  * @soc: Handle to DP SoC structure
1082  * @desc_pool_id: pool id should pick up
1083  * @num_requested: number of required descriptor
1084  *
1085  * allocate multiple tx descriptor and make a link
1086  *
1087  * Return: first descriptor pointer or NULL
1088  */
1089 static inline struct dp_tx_desc_s *dp_tx_desc_alloc_multiple(
1090 		struct dp_soc *soc, uint8_t desc_pool_id, uint8_t num_requested)
1091 {
1092 	struct dp_tx_desc_s *c_desc = NULL, *h_desc = NULL;
1093 	uint8_t count;
1094 	struct dp_tx_desc_pool_s *pool = NULL;
1095 
1096 	pool = dp_get_tx_desc_pool(soc, desc_pool_id);
1097 
1098 	TX_DESC_LOCK_LOCK(&pool->lock);
1099 
1100 	if ((num_requested == 0) ||
1101 			(pool->num_free < num_requested)) {
1102 		TX_DESC_LOCK_UNLOCK(&pool->lock);
1103 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1104 			"%s, No Free Desc: Available(%d) num_requested(%d)",
1105 			__func__, pool->num_free,
1106 			num_requested);
1107 		return NULL;
1108 	}
1109 
1110 	h_desc = pool->freelist;
1111 
1112 	/* h_desc should never be NULL since num_free > requested */
1113 	qdf_assert_always(h_desc);
1114 
1115 	c_desc = h_desc;
1116 	for (count = 0; count < (num_requested - 1); count++) {
1117 		c_desc->flags = DP_TX_DESC_FLAG_ALLOCATED;
1118 		c_desc = c_desc->next;
1119 	}
1120 	pool->num_free -= count;
1121 	pool->num_allocated += count;
1122 	pool->freelist = c_desc->next;
1123 	c_desc->next = NULL;
1124 
1125 	TX_DESC_LOCK_UNLOCK(&pool->lock);
1126 	return h_desc;
1127 }
1128 
1129 /**
1130  * dp_tx_desc_free() - Free a tx descriptor and attach it to free list
1131  * @soc: Handle to DP SoC structure
1132  * @tx_desc: descriptor to free
1133  * @desc_pool_id: ID of the free pool
1134  */
1135 static inline void
1136 dp_tx_desc_free(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
1137 		uint8_t desc_pool_id)
1138 {
1139 	struct dp_tx_desc_pool_s *pool = NULL;
1140 
1141 	dp_tx_desc_clear(tx_desc);
1142 	pool = dp_get_tx_desc_pool(soc, desc_pool_id);
1143 	TX_DESC_LOCK_LOCK(&pool->lock);
1144 	tx_desc->next = pool->freelist;
1145 	pool->freelist = tx_desc;
1146 	pool->num_allocated--;
1147 	pool->num_free++;
1148 	TX_DESC_LOCK_UNLOCK(&pool->lock);
1149 }
1150 
1151 static inline void
1152 dp_tx_spcl_desc_free(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
1153 		     uint8_t desc_pool_id)
1154 {
1155 	struct dp_tx_desc_pool_s *pool = NULL;
1156 
1157 	dp_tx_desc_clear(tx_desc);
1158 
1159 	pool = dp_get_spcl_tx_desc_pool(soc, desc_pool_id);
1160 	TX_DESC_LOCK_LOCK(&pool->lock);
1161 	tx_desc->next = pool->freelist;
1162 	pool->freelist = tx_desc;
1163 	pool->num_allocated--;
1164 	pool->num_free++;
1165 	TX_DESC_LOCK_UNLOCK(&pool->lock);
1166 }
1167 
1168 static inline void
1169 dp_tx_desc_free_list(struct dp_tx_desc_pool_s *pool,
1170 		     struct dp_tx_desc_s *head_desc,
1171 		     struct dp_tx_desc_s *tail_desc,
1172 		     uint32_t fast_desc_count)
1173 {
1174 	TX_DESC_LOCK_LOCK(&pool->lock);
1175 	pool->num_allocated -= fast_desc_count;
1176 	pool->num_free += fast_desc_count;
1177 	tail_desc->next = pool->freelist;
1178 	pool->freelist = head_desc;
1179 	TX_DESC_LOCK_UNLOCK(&pool->lock);
1180 }
1181 
1182 #endif /* QCA_LL_TX_FLOW_CONTROL_V2 */
1183 
1184 #ifdef QCA_DP_TX_DESC_ID_CHECK
1185 /**
1186  * dp_tx_is_desc_id_valid() - check is the tx desc id valid
1187  * @soc: Handle to DP SoC structure
1188  * @tx_desc_id:
1189  *
1190  * Return: true or false
1191  */
1192 static inline bool
1193 dp_tx_is_desc_id_valid(struct dp_soc *soc, uint32_t tx_desc_id)
1194 {
1195 	uint8_t pool_id;
1196 	uint16_t page_id, offset;
1197 	struct dp_tx_desc_pool_s *pool;
1198 
1199 	pool_id = (tx_desc_id & DP_TX_DESC_ID_POOL_MASK) >>
1200 			DP_TX_DESC_ID_POOL_OS;
1201 	/* Pool ID is out of limit */
1202 	if (pool_id > wlan_cfg_get_num_tx_desc_pool(
1203 				soc->wlan_cfg_ctx)) {
1204 		QDF_TRACE(QDF_MODULE_ID_DP,
1205 			  QDF_TRACE_LEVEL_FATAL,
1206 			  "%s:Tx Comp pool id %d not valid",
1207 			  __func__,
1208 			  pool_id);
1209 		goto warn_exit;
1210 	}
1211 
1212 	pool = &soc->tx_desc[pool_id];
1213 	/* the pool is freed */
1214 	if (IS_TX_DESC_POOL_STATUS_INACTIVE(pool)) {
1215 		QDF_TRACE(QDF_MODULE_ID_DP,
1216 			  QDF_TRACE_LEVEL_FATAL,
1217 			  "%s:the pool %d has been freed",
1218 			  __func__,
1219 			  pool_id);
1220 		goto warn_exit;
1221 	}
1222 
1223 	page_id = (tx_desc_id & DP_TX_DESC_ID_PAGE_MASK) >>
1224 				DP_TX_DESC_ID_PAGE_OS;
1225 	/* the page id is out of limit */
1226 	if (page_id >= pool->desc_pages.num_pages) {
1227 		QDF_TRACE(QDF_MODULE_ID_DP,
1228 			  QDF_TRACE_LEVEL_FATAL,
1229 			  "%s:the page id %d invalid, pool id %d, num_page %d",
1230 			  __func__,
1231 			  page_id,
1232 			  pool_id,
1233 			  pool->desc_pages.num_pages);
1234 		goto warn_exit;
1235 	}
1236 
1237 	offset = (tx_desc_id & DP_TX_DESC_ID_OFFSET_MASK) >>
1238 				DP_TX_DESC_ID_OFFSET_OS;
1239 	/* the offset is out of limit */
1240 	if (offset >= pool->desc_pages.num_element_per_page) {
1241 		QDF_TRACE(QDF_MODULE_ID_DP,
1242 			  QDF_TRACE_LEVEL_FATAL,
1243 			  "%s:offset %d invalid, pool%d,num_elem_per_page %d",
1244 			  __func__,
1245 			  offset,
1246 			  pool_id,
1247 			  pool->desc_pages.num_element_per_page);
1248 		goto warn_exit;
1249 	}
1250 
1251 	return true;
1252 
1253 warn_exit:
1254 	QDF_TRACE(QDF_MODULE_ID_DP,
1255 		  QDF_TRACE_LEVEL_FATAL,
1256 		  "%s:Tx desc id 0x%x not valid",
1257 		  __func__,
1258 		  tx_desc_id);
1259 	qdf_assert_always(0);
1260 	return false;
1261 }
1262 
1263 #else
1264 static inline bool
1265 dp_tx_is_desc_id_valid(struct dp_soc *soc, uint32_t tx_desc_id)
1266 {
1267 	return true;
1268 }
1269 #endif /* QCA_DP_TX_DESC_ID_CHECK */
1270 
1271 #ifdef QCA_DP_TX_DESC_FAST_COMP_ENABLE
1272 static inline void dp_tx_desc_update_fast_comp_flag(struct dp_soc *soc,
1273 						    struct dp_tx_desc_s *desc,
1274 						    uint8_t allow_fast_comp)
1275 {
1276 	if (qdf_likely(!(desc->flags & DP_TX_DESC_FLAG_TO_FW)) &&
1277 	    qdf_likely(allow_fast_comp)) {
1278 		desc->flags |= DP_TX_DESC_FLAG_SIMPLE;
1279 	}
1280 }
1281 #else
1282 static inline void dp_tx_desc_update_fast_comp_flag(struct dp_soc *soc,
1283 						    struct dp_tx_desc_s *desc,
1284 						    uint8_t allow_fast_comp)
1285 {
1286 }
1287 #endif /* QCA_DP_TX_DESC_FAST_COMP_ENABLE */
1288 
1289 /**
1290  * dp_tx_desc_find() - find dp tx descriptor from pool/page/offset
1291  * @soc: handle for the device sending the data
1292  * @pool_id:
1293  * @page_id:
1294  * @offset:
1295  *
1296  * Use page and offset to find the corresponding descriptor object in
1297  * the given descriptor pool.
1298  *
1299  * Return: the descriptor object that has the specified ID
1300  */
1301 static inline struct dp_tx_desc_s *dp_tx_desc_find(struct dp_soc *soc,
1302 		uint8_t pool_id, uint16_t page_id, uint16_t offset)
1303 {
1304 	struct dp_tx_desc_pool_s *tx_desc_pool = NULL;
1305 
1306 	tx_desc_pool = dp_get_tx_desc_pool(soc, pool_id);
1307 
1308 	return tx_desc_pool->desc_pages.cacheable_pages[page_id] +
1309 		tx_desc_pool->elem_size * offset;
1310 }
1311 
1312 /**
1313  * dp_tx_ext_desc_alloc() - Get tx extension descriptor from pool
1314  * @soc: handle for the device sending the data
1315  * @desc_pool_id: target pool id
1316  *
1317  * Return: None
1318  */
1319 static inline
1320 struct dp_tx_ext_desc_elem_s *dp_tx_ext_desc_alloc(struct dp_soc *soc,
1321 		uint8_t desc_pool_id)
1322 {
1323 	struct dp_tx_ext_desc_elem_s *c_elem;
1324 
1325 	desc_pool_id = dp_tx_ext_desc_pool_override(desc_pool_id);
1326 	qdf_spin_lock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
1327 	if (soc->tx_ext_desc[desc_pool_id].num_free <= 0) {
1328 		qdf_spin_unlock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
1329 		return NULL;
1330 	}
1331 	c_elem = soc->tx_ext_desc[desc_pool_id].freelist;
1332 	soc->tx_ext_desc[desc_pool_id].freelist =
1333 		soc->tx_ext_desc[desc_pool_id].freelist->next;
1334 	soc->tx_ext_desc[desc_pool_id].num_free--;
1335 	qdf_spin_unlock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
1336 	return c_elem;
1337 }
1338 
1339 /**
1340  * dp_tx_ext_desc_free() - Release tx extension descriptor to the pool
1341  * @soc: handle for the device sending the data
1342  * @elem: ext descriptor pointer should release
1343  * @desc_pool_id: target pool id
1344  *
1345  * Return: None
1346  */
1347 static inline void dp_tx_ext_desc_free(struct dp_soc *soc,
1348 	struct dp_tx_ext_desc_elem_s *elem, uint8_t desc_pool_id)
1349 {
1350 	desc_pool_id = dp_tx_ext_desc_pool_override(desc_pool_id);
1351 	qdf_spin_lock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
1352 	elem->next = soc->tx_ext_desc[desc_pool_id].freelist;
1353 	soc->tx_ext_desc[desc_pool_id].freelist = elem;
1354 	soc->tx_ext_desc[desc_pool_id].num_free++;
1355 	qdf_spin_unlock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
1356 	return;
1357 }
1358 
1359 /**
1360  * dp_tx_ext_desc_free_multiple() - Free multiple tx extension descriptor and
1361  *                           attach it to free list
1362  * @soc: Handle to DP SoC structure
1363  * @desc_pool_id: pool id should pick up
1364  * @elem: tx descriptor should be freed
1365  * @num_free: number of descriptors should be freed
1366  *
1367  * Return: none
1368  */
1369 static inline void dp_tx_ext_desc_free_multiple(struct dp_soc *soc,
1370 		struct dp_tx_ext_desc_elem_s *elem, uint8_t desc_pool_id,
1371 		uint8_t num_free)
1372 {
1373 	struct dp_tx_ext_desc_elem_s *head, *tail, *c_elem;
1374 	uint8_t freed = num_free;
1375 
1376 	/* caller should always guarantee atleast list of num_free nodes */
1377 	qdf_assert_always(elem);
1378 
1379 	head = elem;
1380 	c_elem = head;
1381 	tail = head;
1382 	while (c_elem && freed) {
1383 		tail = c_elem;
1384 		c_elem = c_elem->next;
1385 		freed--;
1386 	}
1387 
1388 	/* caller should always guarantee atleast list of num_free nodes */
1389 	qdf_assert_always(tail);
1390 
1391 	desc_pool_id = dp_tx_ext_desc_pool_override(desc_pool_id);
1392 	qdf_spin_lock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
1393 	tail->next = soc->tx_ext_desc[desc_pool_id].freelist;
1394 	soc->tx_ext_desc[desc_pool_id].freelist = head;
1395 	soc->tx_ext_desc[desc_pool_id].num_free += num_free;
1396 	qdf_spin_unlock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
1397 
1398 	return;
1399 }
1400 
1401 #if defined(FEATURE_TSO)
1402 /**
1403  * dp_tx_tso_desc_alloc() - function to allocate a TSO segment
1404  * @soc: device soc instance
1405  * @pool_id: pool id should pick up tso descriptor
1406  *
1407  * Allocates a TSO segment element from the free list held in
1408  * the soc
1409  *
1410  * Return: tso_seg, tso segment memory pointer
1411  */
1412 static inline struct qdf_tso_seg_elem_t *dp_tx_tso_desc_alloc(
1413 		struct dp_soc *soc, uint8_t pool_id)
1414 {
1415 	struct qdf_tso_seg_elem_t *tso_seg = NULL;
1416 
1417 	qdf_spin_lock_bh(&soc->tx_tso_desc[pool_id].lock);
1418 	if (soc->tx_tso_desc[pool_id].freelist) {
1419 		soc->tx_tso_desc[pool_id].num_free--;
1420 		tso_seg = soc->tx_tso_desc[pool_id].freelist;
1421 		soc->tx_tso_desc[pool_id].freelist =
1422 			soc->tx_tso_desc[pool_id].freelist->next;
1423 	}
1424 	qdf_spin_unlock_bh(&soc->tx_tso_desc[pool_id].lock);
1425 
1426 	return tso_seg;
1427 }
1428 
1429 /**
1430  * dp_tx_tso_desc_free() - function to free a TSO segment
1431  * @soc: device soc instance
1432  * @pool_id: pool id should pick up tso descriptor
1433  * @tso_seg: tso segment memory pointer
1434  *
1435  * Returns a TSO segment element to the free list held in the
1436  * HTT pdev
1437  *
1438  * Return: none
1439  */
1440 static inline void dp_tx_tso_desc_free(struct dp_soc *soc,
1441 		uint8_t pool_id, struct qdf_tso_seg_elem_t *tso_seg)
1442 {
1443 	qdf_spin_lock_bh(&soc->tx_tso_desc[pool_id].lock);
1444 	tso_seg->next = soc->tx_tso_desc[pool_id].freelist;
1445 	soc->tx_tso_desc[pool_id].freelist = tso_seg;
1446 	soc->tx_tso_desc[pool_id].num_free++;
1447 	qdf_spin_unlock_bh(&soc->tx_tso_desc[pool_id].lock);
1448 }
1449 
1450 static inline
1451 struct qdf_tso_num_seg_elem_t  *dp_tso_num_seg_alloc(struct dp_soc *soc,
1452 		uint8_t pool_id)
1453 {
1454 	struct qdf_tso_num_seg_elem_t *tso_num_seg = NULL;
1455 
1456 	qdf_spin_lock_bh(&soc->tx_tso_num_seg[pool_id].lock);
1457 	if (soc->tx_tso_num_seg[pool_id].freelist) {
1458 		soc->tx_tso_num_seg[pool_id].num_free--;
1459 		tso_num_seg = soc->tx_tso_num_seg[pool_id].freelist;
1460 		soc->tx_tso_num_seg[pool_id].freelist =
1461 			soc->tx_tso_num_seg[pool_id].freelist->next;
1462 	}
1463 	qdf_spin_unlock_bh(&soc->tx_tso_num_seg[pool_id].lock);
1464 
1465 	return tso_num_seg;
1466 }
1467 
1468 static inline
1469 void dp_tso_num_seg_free(struct dp_soc *soc,
1470 		uint8_t pool_id, struct qdf_tso_num_seg_elem_t *tso_num_seg)
1471 {
1472 	qdf_spin_lock_bh(&soc->tx_tso_num_seg[pool_id].lock);
1473 	tso_num_seg->next = soc->tx_tso_num_seg[pool_id].freelist;
1474 	soc->tx_tso_num_seg[pool_id].freelist = tso_num_seg;
1475 	soc->tx_tso_num_seg[pool_id].num_free++;
1476 	qdf_spin_unlock_bh(&soc->tx_tso_num_seg[pool_id].lock);
1477 }
1478 #endif
1479 
1480 /**
1481  * dp_tx_me_alloc_buf() - Alloc descriptor from me pool
1482  * @pdev: DP_PDEV handle for datapath
1483  *
1484  * Return: tx descriptor on success, NULL on error
1485  */
1486 static inline struct dp_tx_me_buf_t*
1487 dp_tx_me_alloc_buf(struct dp_pdev *pdev)
1488 {
1489 	struct dp_tx_me_buf_t *buf = NULL;
1490 	qdf_spin_lock_bh(&pdev->tx_mutex);
1491 	if (pdev->me_buf.freelist) {
1492 		buf = pdev->me_buf.freelist;
1493 		pdev->me_buf.freelist = pdev->me_buf.freelist->next;
1494 		pdev->me_buf.buf_in_use++;
1495 	} else {
1496 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1497 				"Error allocating memory in pool");
1498 		qdf_spin_unlock_bh(&pdev->tx_mutex);
1499 		return NULL;
1500 	}
1501 	qdf_spin_unlock_bh(&pdev->tx_mutex);
1502 	return buf;
1503 }
1504 
1505 /**
1506  * dp_tx_me_free_buf() - Unmap the buffer holding the dest
1507  * address, free me descriptor and add it to the free-pool
1508  * @pdev: DP_PDEV handle for datapath
1509  * @buf : Allocated ME BUF
1510  *
1511  * Return:void
1512  */
1513 static inline void
1514 dp_tx_me_free_buf(struct dp_pdev *pdev, struct dp_tx_me_buf_t *buf)
1515 {
1516 	/*
1517 	 * If the buf containing mac address was mapped,
1518 	 * it must be unmapped before freeing the me_buf.
1519 	 * The "paddr_macbuf" member in the me_buf structure
1520 	 * holds the mapped physical address and it must be
1521 	 * set to 0 after unmapping.
1522 	 */
1523 	if (buf->paddr_macbuf) {
1524 		qdf_mem_unmap_nbytes_single(pdev->soc->osdev,
1525 					    buf->paddr_macbuf,
1526 					    QDF_DMA_TO_DEVICE,
1527 					    QDF_MAC_ADDR_SIZE);
1528 		buf->paddr_macbuf = 0;
1529 	}
1530 	qdf_spin_lock_bh(&pdev->tx_mutex);
1531 	buf->next = pdev->me_buf.freelist;
1532 	pdev->me_buf.freelist = buf;
1533 	pdev->me_buf.buf_in_use--;
1534 	qdf_spin_unlock_bh(&pdev->tx_mutex);
1535 }
1536 #endif /* DP_TX_DESC_H */
1537