xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_tx_desc.h (revision 253c31efe9e0954a64b7296de139724c5ad074a1)
1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #ifndef DP_TX_DESC_H
21 #define DP_TX_DESC_H
22 
23 #include "dp_types.h"
24 #include "dp_tx.h"
25 #include "dp_internal.h"
26 
27 /*
28  * 21 bits cookie
29  * 1 bit special pool indicator
30  * 3 bits unused
31  * 2 bits pool id 0 ~ 3,
32  * 10 bits page id 0 ~ 1023
33  * 5 bits offset id 0 ~ 31 (Desc size = 128, Num descs per page = 4096/128 = 32)
34  */
35 /* ???Ring ID needed??? */
36 
37 /* TODO: Need to revisit this change for Rhine */
38 #ifdef WLAN_SOFTUMAC_SUPPORT
39 #define DP_TX_DESC_ID_SPCL_MASK    0x100000
40 #define DP_TX_DESC_ID_SPCL_OS      20
41 #define DP_TX_DESC_ID_POOL_MASK    0x018000
42 #define DP_TX_DESC_ID_POOL_OS      15
43 #define DP_TX_DESC_ID_PAGE_MASK    0x007FF0
44 #define DP_TX_DESC_ID_PAGE_OS      4
45 #define DP_TX_DESC_ID_OFFSET_MASK  0x00000F
46 #define DP_TX_DESC_ID_OFFSET_OS    0
47 #else
48 #define DP_TX_DESC_ID_SPCL_MASK    0x100000
49 #define DP_TX_DESC_ID_SPCL_OS      20
50 #define DP_TX_DESC_ID_POOL_MASK    0x018000
51 #define DP_TX_DESC_ID_POOL_OS      15
52 #define DP_TX_DESC_ID_PAGE_MASK    0x007FE0
53 #define DP_TX_DESC_ID_PAGE_OS      5
54 #define DP_TX_DESC_ID_OFFSET_MASK  0x00001F
55 #define DP_TX_DESC_ID_OFFSET_OS    0
56 #endif /* WLAN_SOFTUMAC_SUPPORT */
57 
58 /*
59  * Compilation assert on tx desc size
60  *
61  * if assert is hit please update POOL_MASK,
62  * PAGE_MASK according to updated size
63  *
64  * for current PAGE mask allowed size range of tx_desc
65  * is between 128 and 256
66  */
67 QDF_COMPILE_TIME_ASSERT(dp_tx_desc_size,
68 			((sizeof(struct dp_tx_desc_s)) <=
69 			 (DP_BLOCKMEM_SIZE >> DP_TX_DESC_ID_PAGE_OS)) &&
70 			((sizeof(struct dp_tx_desc_s)) >
71 			 (DP_BLOCKMEM_SIZE >> (DP_TX_DESC_ID_PAGE_OS + 1)))
72 		       );
73 
74 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
75 #define TX_DESC_LOCK_CREATE(lock)
76 #define TX_DESC_LOCK_DESTROY(lock)
77 #define TX_DESC_LOCK_LOCK(lock)
78 #define TX_DESC_LOCK_UNLOCK(lock)
79 #define IS_TX_DESC_POOL_STATUS_INACTIVE(pool) \
80 	((pool)->status == FLOW_POOL_INACTIVE)
81 #ifdef QCA_AC_BASED_FLOW_CONTROL
82 #define TX_DESC_POOL_MEMBER_CLEAN(_tx_desc_pool)       \
83 	dp_tx_flow_pool_member_clean(_tx_desc_pool)
84 
85 #else /* !QCA_AC_BASED_FLOW_CONTROL */
86 #define TX_DESC_POOL_MEMBER_CLEAN(_tx_desc_pool)       \
87 do {                                                   \
88 	(_tx_desc_pool)->elem_size = 0;                \
89 	(_tx_desc_pool)->freelist = NULL;              \
90 	(_tx_desc_pool)->pool_size = 0;                \
91 	(_tx_desc_pool)->avail_desc = 0;               \
92 	(_tx_desc_pool)->start_th = 0;                 \
93 	(_tx_desc_pool)->stop_th = 0;                  \
94 	(_tx_desc_pool)->status = FLOW_POOL_INACTIVE;  \
95 } while (0)
96 #endif /* QCA_AC_BASED_FLOW_CONTROL */
97 #else /* !QCA_LL_TX_FLOW_CONTROL_V2 */
98 #define TX_DESC_LOCK_CREATE(lock)  qdf_spinlock_create(lock)
99 #define TX_DESC_LOCK_DESTROY(lock) qdf_spinlock_destroy(lock)
100 #define TX_DESC_LOCK_LOCK(lock)    qdf_spin_lock_bh(lock)
101 #define TX_DESC_LOCK_UNLOCK(lock)  qdf_spin_unlock_bh(lock)
102 #define IS_TX_DESC_POOL_STATUS_INACTIVE(pool) (false)
103 #define TX_DESC_POOL_MEMBER_CLEAN(_tx_desc_pool)       \
104 do {                                                   \
105 	(_tx_desc_pool)->elem_size = 0;                \
106 	(_tx_desc_pool)->num_allocated = 0;            \
107 	(_tx_desc_pool)->freelist = NULL;              \
108 	(_tx_desc_pool)->elem_count = 0;               \
109 	(_tx_desc_pool)->num_free = 0;                 \
110 } while (0)
111 #endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */
112 #define MAX_POOL_BUFF_COUNT 10000
113 
114 #ifdef DP_TX_TRACKING
115 static inline void dp_tx_desc_set_magic(struct dp_tx_desc_s *tx_desc,
116 					uint32_t magic_pattern)
117 {
118 	tx_desc->magic = magic_pattern;
119 }
120 #else
121 static inline void dp_tx_desc_set_magic(struct dp_tx_desc_s *tx_desc,
122 					uint32_t magic_pattern)
123 {
124 }
125 #endif
126 
127 /**
128  * dp_tx_desc_pool_alloc() - Allocate Tx Descriptor pool(s)
129  * @soc: Handle to DP SoC structure
130  * @pool_id: pool to allocate
131  * @num_elem: Number of descriptor elements per pool
132  * @spcl_tx_desc: if special desc
133  *
134  * This function allocates memory for SW tx descriptors
135  * (used within host for tx data path).
136  * The number of tx descriptors required will be large
137  * since based on number of clients (1024 clients x 3 radios),
138  * outstanding MSDUs stored in TQM queues and LMAC queues will be significantly
139  * large.
140  *
141  * To avoid allocating a large contiguous memory, it uses multi_page_alloc qdf
142  * function to allocate memory
143  * in multiple pages. It then iterates through the memory allocated across pages
144  * and links each descriptor
145  * to next descriptor, taking care of page boundaries.
146  *
147  * Since WiFi 3.0 HW supports multiple Tx rings, multiple pools are allocated,
148  * one for each ring;
149  * This minimizes lock contention when hard_start_xmit is called
150  * from multiple CPUs.
151  * Alternately, multiple pools can be used for multiple VDEVs for VDEV level
152  * flow control.
153  *
154  * Return: Status code. 0 for success.
155  */
156 QDF_STATUS dp_tx_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
157 				 uint32_t num_elem, bool spcl_tx_desc);
158 
159 /**
160  * dp_tx_desc_pool_init() - Initialize Tx Descriptor pool(s)
161  * @soc: Handle to DP SoC structure
162  * @pool_id: pool to allocate
163  * @num_elem: Number of descriptor elements per pool
164  * @spcl_tx_desc: if special desc
165  *
166  * Return: QDF_STATUS_SUCCESS
167  *	   QDF_STATUS_E_FAULT
168  */
169 QDF_STATUS dp_tx_desc_pool_init(struct dp_soc *soc, uint8_t pool_id,
170 				uint32_t num_elem, bool spcl_tx_desc);
171 
172 /**
173  * dp_tx_desc_pool_free() -  Free the tx dexcriptor pools
174  * @soc: Handle to DP SoC structure
175  * @pool_id: pool to free
176  * @spcl_tx_desc: if special desc
177  *
178  */
179 void dp_tx_desc_pool_free(struct dp_soc *soc, uint8_t pool_id,
180 			  bool spcl_tx_desc);
181 
182 /**
183  * dp_tx_desc_pool_deinit() - de-initialize Tx Descriptor pool(s)
184  * @soc: Handle to DP SoC structure
185  * @pool_id: pool to de-initialize
186  * @spcl_tx_desc: if special desc
187  *
188  */
189 void dp_tx_desc_pool_deinit(struct dp_soc *soc, uint8_t pool_id,
190 			    bool spcl_tx_desc);
191 
192 /**
193  * dp_tx_ext_desc_pool_alloc_by_id() - allocate TX extension Descriptor pool
194  *                                     based on pool ID
195  * @soc: Handle to DP SoC structure
196  * @num_elem: Number of descriptor elements per pool
197  * @pool_id: Pool ID
198  *
199  * Return - QDF_STATUS_SUCCESS
200  *	    QDF_STATUS_E_NOMEM
201  */
202 QDF_STATUS dp_tx_ext_desc_pool_alloc_by_id(struct dp_soc *soc,
203 					   uint32_t num_elem,
204 					   uint8_t pool_id);
205 /**
206  * dp_tx_ext_desc_pool_alloc() - allocate Tx extension Descriptor pool(s)
207  * @soc: Handle to DP SoC structure
208  * @num_pool: Number of pools to allocate
209  * @num_elem: Number of descriptor elements per pool
210  *
211  * Return: QDF_STATUS_SUCCESS
212  *	   QDF_STATUS_E_NOMEM
213  */
214 QDF_STATUS dp_tx_ext_desc_pool_alloc(struct dp_soc *soc, uint8_t num_pool,
215 				     uint32_t num_elem);
216 
217 /**
218  * dp_tx_ext_desc_pool_init_by_id() - initialize Tx extension Descriptor pool
219  *                                    based on pool ID
220  * @soc: Handle to DP SoC structure
221  * @num_elem: Number of descriptor elements per pool
222  * @pool_id: Pool ID
223  *
224  * Return - QDF_STATUS_SUCCESS
225  *	    QDF_STATUS_E_FAULT
226  */
227 QDF_STATUS dp_tx_ext_desc_pool_init_by_id(struct dp_soc *soc, uint32_t num_elem,
228 					  uint8_t pool_id);
229 
230 /**
231  * dp_tx_ext_desc_pool_init() - initialize Tx extension Descriptor pool(s)
232  * @soc: Handle to DP SoC structure
233  * @num_pool: Number of pools to initialize
234  * @num_elem: Number of descriptor elements per pool
235  *
236  * Return: QDF_STATUS_SUCCESS
237  *	   QDF_STATUS_E_NOMEM
238  */
239 QDF_STATUS dp_tx_ext_desc_pool_init(struct dp_soc *soc, uint8_t num_pool,
240 				    uint32_t num_elem);
241 
242 /**
243  * dp_tx_ext_desc_pool_free_by_id() - free TX extension Descriptor pool
244  *                                    based on pool ID
245  * @soc: Handle to DP SoC structure
246  * @pool_id: Pool ID
247  *
248  */
249 void dp_tx_ext_desc_pool_free_by_id(struct dp_soc *soc, uint8_t pool_id);
250 
251 /**
252  * dp_tx_ext_desc_pool_free() -  free Tx extension Descriptor pool(s)
253  * @soc: Handle to DP SoC structure
254  * @num_pool: Number of pools to free
255  *
256  */
257 void dp_tx_ext_desc_pool_free(struct dp_soc *soc, uint8_t num_pool);
258 
259 /**
260  * dp_tx_ext_desc_pool_deinit_by_id() - deinit Tx extension Descriptor pool
261  *                                      based on pool ID
262  * @soc: Handle to DP SoC structure
263  * @pool_id: Pool ID
264  *
265  */
266 void dp_tx_ext_desc_pool_deinit_by_id(struct dp_soc *soc, uint8_t pool_id);
267 
268 /**
269  * dp_tx_ext_desc_pool_deinit() -  deinit Tx extension Descriptor pool(s)
270  * @soc: Handle to DP SoC structure
271  * @num_pool: Number of pools to de-initialize
272  *
273  */
274 void dp_tx_ext_desc_pool_deinit(struct dp_soc *soc, uint8_t num_pool);
275 
276 /**
277  * dp_tx_tso_desc_pool_alloc_by_id() - allocate TSO Descriptor pool based
278  *                                     on pool ID
279  * @soc: Handle to DP SoC structure
280  * @num_elem: Number of descriptor elements per pool
281  * @pool_id: Pool ID
282  *
283  * Return - QDF_STATUS_SUCCESS
284  *	    QDF_STATUS_E_NOMEM
285  */
286 QDF_STATUS dp_tx_tso_desc_pool_alloc_by_id(struct dp_soc *soc, uint32_t num_elem,
287 					   uint8_t pool_id);
288 
289 /**
290  * dp_tx_tso_desc_pool_alloc() - allocate TSO Descriptor pool(s)
291  * @soc: Handle to DP SoC structure
292  * @num_pool: Number of pools to allocate
293  * @num_elem: Number of descriptor elements per pool
294  *
295  * Return: QDF_STATUS_SUCCESS
296  *	   QDF_STATUS_E_NOMEM
297  */
298 QDF_STATUS dp_tx_tso_desc_pool_alloc(struct dp_soc *soc, uint8_t num_pool,
299 				     uint32_t num_elem);
300 
301 /**
302  * dp_tx_tso_desc_pool_init_by_id() - initialize TSO Descriptor pool
303  *                                    based on pool ID
304  * @soc: Handle to DP SoC structure
305  * @num_elem: Number of descriptor elements per pool
306  * @pool_id: Pool ID
307  *
308  * Return - QDF_STATUS_SUCCESS
309  *	    QDF_STATUS_E_NOMEM
310  */
311 QDF_STATUS dp_tx_tso_desc_pool_init_by_id(struct dp_soc *soc, uint32_t num_elem,
312 					  uint8_t pool_id);
313 
314 /**
315  * dp_tx_tso_desc_pool_init() - initialize TSO Descriptor pool(s)
316  * @soc: Handle to DP SoC structure
317  * @num_pool: Number of pools to initialize
318  * @num_elem: Number of descriptor elements per pool
319  *
320  * Return: QDF_STATUS_SUCCESS
321  *	   QDF_STATUS_E_NOMEM
322  */
323 QDF_STATUS dp_tx_tso_desc_pool_init(struct dp_soc *soc, uint8_t num_pool,
324 				    uint32_t num_elem);
325 
326 /**
327  * dp_tx_tso_desc_pool_free_by_id() - free TSO Descriptor pool based on pool ID
328  * @soc: Handle to DP SoC structure
329  * @pool_id: Pool ID
330  */
331 void dp_tx_tso_desc_pool_free_by_id(struct dp_soc *soc, uint8_t pool_id);
332 
333 /**
334  * dp_tx_tso_desc_pool_free() - free TSO Descriptor pool(s)
335  * @soc: Handle to DP SoC structure
336  * @num_pool: Number of pools to free
337  *
338  */
339 void dp_tx_tso_desc_pool_free(struct dp_soc *soc, uint8_t num_pool);
340 
341 /**
342  * dp_tx_tso_desc_pool_deinit_by_id() - deinitialize TSO Descriptor pool
343  *                                      based on pool ID
344  * @soc: Handle to DP SoC structure
345  * @pool_id: Pool ID
346  */
347 void dp_tx_tso_desc_pool_deinit_by_id(struct dp_soc *soc, uint8_t pool_id);
348 
349 /**
350  * dp_tx_tso_desc_pool_deinit() - deinitialize TSO Descriptor pool(s)
351  * @soc: Handle to DP SoC structure
352  * @num_pool: Number of pools to free
353  *
354  */
355 void dp_tx_tso_desc_pool_deinit(struct dp_soc *soc, uint8_t num_pool);
356 
357 /**
358  * dp_tx_tso_num_seg_pool_alloc_by_id() - Allocate descriptors that tracks the
359  *                             fragments in each tso segment based on pool ID
360  * @soc: handle to dp soc structure
361  * @num_elem: total number of descriptors to be allocated
362  * @pool_id: Pool ID
363  *
364  * Return - QDF_STATUS_SUCCESS
365  *	    QDF_STATUS_E_NOMEM
366  */
367 QDF_STATUS dp_tx_tso_num_seg_pool_alloc_by_id(struct dp_soc *soc,
368 					      uint32_t num_elem,
369 					      uint8_t pool_id);
370 
371 /**
372  * dp_tx_tso_num_seg_pool_alloc() - Allocate descriptors that tracks the
373  *                              fragments in each tso segment
374  *
375  * @soc: handle to dp soc structure
376  * @num_pool: number of pools to allocate
377  * @num_elem: total number of descriptors to be allocated
378  *
379  * Return: QDF_STATUS_SUCCESS
380  *	   QDF_STATUS_E_NOMEM
381  */
382 QDF_STATUS dp_tx_tso_num_seg_pool_alloc(struct dp_soc *soc, uint8_t num_pool,
383 					uint32_t num_elem);
384 
385 /**
386  * dp_tx_tso_num_seg_pool_init_by_id() - Initialize descriptors that tracks the
387  *                              fragments in each tso segment based on pool ID
388  *
389  * @soc: handle to dp soc structure
390  * @num_elem: total number of descriptors to be initialized
391  * @pool_id: Pool ID
392  *
393  * Return - QDF_STATUS_SUCCESS
394  *	    QDF_STATUS_E_FAULT
395  */
396 QDF_STATUS dp_tx_tso_num_seg_pool_init_by_id(struct dp_soc *soc,
397 					     uint32_t num_elem,
398 					     uint8_t pool_id);
399 
400 /**
401  * dp_tx_tso_num_seg_pool_init() - Initialize descriptors that tracks the
402  *                              fragments in each tso segment
403  *
404  * @soc: handle to dp soc structure
405  * @num_pool: number of pools to initialize
406  * @num_elem: total number of descriptors to be initialized
407  *
408  * Return: QDF_STATUS_SUCCESS
409  *	   QDF_STATUS_E_FAULT
410  */
411 QDF_STATUS dp_tx_tso_num_seg_pool_init(struct dp_soc *soc, uint8_t num_pool,
412 				       uint32_t num_elem);
413 
414 /**
415  * dp_tx_tso_num_seg_pool_free_by_id() - free descriptors that tracks the
416  *                              fragments in each tso segment based on pool ID
417  *
418  * @soc: handle to dp soc structure
419  * @pool_id: Pool ID
420  */
421 void dp_tx_tso_num_seg_pool_free_by_id(struct dp_soc *soc, uint8_t pool_id);
422 
423 /**
424  * dp_tx_tso_num_seg_pool_free() - free descriptors that tracks the
425  *                              fragments in each tso segment
426  *
427  * @soc: handle to dp soc structure
428  * @num_pool: number of pools to free
429  */
430 void dp_tx_tso_num_seg_pool_free(struct dp_soc *soc, uint8_t num_pool);
431 
432 /**
433  * dp_tx_tso_num_seg_pool_deinit_by_id() - de-initialize descriptors that tracks
434  *                           the fragments in each tso segment based on pool ID
435  * @soc: handle to dp soc structure
436  * @pool_id: Pool ID
437  */
438 void dp_tx_tso_num_seg_pool_deinit_by_id(struct dp_soc *soc, uint8_t pool_id);
439 
440 /**
441  * dp_tx_tso_num_seg_pool_deinit() - de-initialize descriptors that tracks the
442  *                              fragments in each tso segment
443  *
444  * @soc: handle to dp soc structure
445  * @num_pool: number of pools to de-initialize
446  *
447  * Return: QDF_STATUS_SUCCESS
448  *	   QDF_STATUS_E_FAULT
449  */
450 void dp_tx_tso_num_seg_pool_deinit(struct dp_soc *soc, uint8_t num_pool);
451 
452 #ifdef DP_UMAC_HW_RESET_SUPPORT
453 /**
454  * dp_tx_desc_pool_cleanup() -  Clean up the tx dexcriptor pools
455  * @soc: Handle to DP SoC structure
456  * @nbuf_list: nbuf list for delayed free
457  *
458  */
459 void dp_tx_desc_pool_cleanup(struct dp_soc *soc, qdf_nbuf_t *nbuf_list);
460 #endif
461 
462 /**
463  * dp_tx_desc_clear() - Clear contents of tx desc
464  * @tx_desc: descriptor to free
465  *
466  * Return: none
467  */
468 static inline void
469 dp_tx_desc_clear(struct dp_tx_desc_s *tx_desc)
470 {
471 	tx_desc->vdev_id = DP_INVALID_VDEV_ID;
472 	tx_desc->nbuf = NULL;
473 	tx_desc->flags = 0;
474 	tx_desc->next = NULL;
475 }
476 
477 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
478 void dp_tx_flow_control_init(struct dp_soc *);
479 void dp_tx_flow_control_deinit(struct dp_soc *);
480 
481 QDF_STATUS dp_txrx_register_pause_cb(struct cdp_soc_t *soc,
482 	tx_pause_callback pause_cb);
483 QDF_STATUS dp_tx_flow_pool_map(struct cdp_soc_t *soc, uint8_t pdev_id,
484 			       uint8_t vdev_id);
485 void dp_tx_flow_pool_unmap(struct cdp_soc_t *handle, uint8_t pdev_id,
486 			   uint8_t vdev_id);
487 void dp_tx_clear_flow_pool_stats(struct dp_soc *soc);
488 struct dp_tx_desc_pool_s *dp_tx_create_flow_pool(struct dp_soc *soc,
489 	uint8_t flow_pool_id, uint32_t flow_pool_size);
490 
491 QDF_STATUS dp_tx_flow_pool_map_handler(struct dp_pdev *pdev, uint8_t flow_id,
492 	uint8_t flow_type, uint8_t flow_pool_id, uint32_t flow_pool_size);
493 void dp_tx_flow_pool_unmap_handler(struct dp_pdev *pdev, uint8_t flow_id,
494 	uint8_t flow_type, uint8_t flow_pool_id);
495 
496 /**
497  * dp_tx_get_desc_flow_pool() - get descriptor from flow pool
498  * @pool: flow pool
499  *
500  * Caller needs to take lock and do sanity checks.
501  *
502  * Return: tx descriptor
503  */
504 static inline
505 struct dp_tx_desc_s *dp_tx_get_desc_flow_pool(struct dp_tx_desc_pool_s *pool)
506 {
507 	struct dp_tx_desc_s *tx_desc = pool->freelist;
508 
509 	pool->freelist = pool->freelist->next;
510 	pool->avail_desc--;
511 	return tx_desc;
512 }
513 
514 /**
515  * dp_tx_put_desc_flow_pool() - put descriptor to flow pool freelist
516  * @pool: flow pool
517  * @tx_desc: tx descriptor
518  *
519  * Caller needs to take lock and do sanity checks.
520  *
521  * Return: none
522  */
523 static inline
524 void dp_tx_put_desc_flow_pool(struct dp_tx_desc_pool_s *pool,
525 			struct dp_tx_desc_s *tx_desc)
526 {
527 	tx_desc->next = pool->freelist;
528 	pool->freelist = tx_desc;
529 	pool->avail_desc++;
530 }
531 
532 static inline void
533 dp_tx_desc_free_list(struct dp_tx_desc_pool_s *pool,
534 		     struct dp_tx_desc_s *head_desc,
535 		     struct dp_tx_desc_s *tail_desc,
536 		     uint32_t fast_desc_count)
537 {
538 }
539 
540 #ifdef QCA_AC_BASED_FLOW_CONTROL
541 
542 /**
543  * dp_tx_flow_pool_member_clean() - Clean the members of TX flow pool
544  * @pool: flow pool
545  *
546  * Return: None
547  */
548 static inline void
549 dp_tx_flow_pool_member_clean(struct dp_tx_desc_pool_s *pool)
550 {
551 	pool->elem_size = 0;
552 	pool->freelist = NULL;
553 	pool->pool_size = 0;
554 	pool->avail_desc = 0;
555 	qdf_mem_zero(pool->start_th, FL_TH_MAX);
556 	qdf_mem_zero(pool->stop_th, FL_TH_MAX);
557 	pool->status = FLOW_POOL_INACTIVE;
558 }
559 
560 /**
561  * dp_tx_is_threshold_reached() - Check if current avail desc meet threshold
562  * @pool: flow pool
563  * @avail_desc: available descriptor number
564  *
565  * Return: true if threshold is met, false if not
566  */
567 static inline bool
568 dp_tx_is_threshold_reached(struct dp_tx_desc_pool_s *pool, uint16_t avail_desc)
569 {
570 	if (qdf_unlikely(avail_desc == pool->stop_th[DP_TH_BE_BK]))
571 		return true;
572 	else if (qdf_unlikely(avail_desc == pool->stop_th[DP_TH_VI]))
573 		return true;
574 	else if (qdf_unlikely(avail_desc == pool->stop_th[DP_TH_VO]))
575 		return true;
576 	else if (qdf_unlikely(avail_desc == pool->stop_th[DP_TH_HI]))
577 		return true;
578 	else
579 		return false;
580 }
581 
582 /**
583  * dp_tx_adjust_flow_pool_state() - Adjust flow pool state
584  * @soc: dp soc
585  * @pool: flow pool
586  */
587 static inline void
588 dp_tx_adjust_flow_pool_state(struct dp_soc *soc,
589 			     struct dp_tx_desc_pool_s *pool)
590 {
591 	if (pool->avail_desc > pool->stop_th[DP_TH_BE_BK]) {
592 		pool->status = FLOW_POOL_ACTIVE_UNPAUSED;
593 		return;
594 	} else if (pool->avail_desc <= pool->stop_th[DP_TH_BE_BK] &&
595 		   pool->avail_desc > pool->stop_th[DP_TH_VI]) {
596 		pool->status = FLOW_POOL_BE_BK_PAUSED;
597 	} else if (pool->avail_desc <= pool->stop_th[DP_TH_VI] &&
598 		   pool->avail_desc > pool->stop_th[DP_TH_VO]) {
599 		pool->status = FLOW_POOL_VI_PAUSED;
600 	} else if (pool->avail_desc <= pool->stop_th[DP_TH_VO] &&
601 		   pool->avail_desc > pool->stop_th[DP_TH_HI]) {
602 		pool->status = FLOW_POOL_VO_PAUSED;
603 	} else if (pool->avail_desc <= pool->stop_th[DP_TH_HI]) {
604 		pool->status = FLOW_POOL_ACTIVE_PAUSED;
605 	}
606 
607 	switch (pool->status) {
608 	case FLOW_POOL_ACTIVE_PAUSED:
609 		soc->pause_cb(pool->flow_pool_id,
610 			      WLAN_NETIF_PRIORITY_QUEUE_OFF,
611 			      WLAN_DATA_FLOW_CTRL_PRI);
612 		fallthrough;
613 
614 	case FLOW_POOL_VO_PAUSED:
615 		soc->pause_cb(pool->flow_pool_id,
616 			      WLAN_NETIF_VO_QUEUE_OFF,
617 			      WLAN_DATA_FLOW_CTRL_VO);
618 		fallthrough;
619 
620 	case FLOW_POOL_VI_PAUSED:
621 		soc->pause_cb(pool->flow_pool_id,
622 			      WLAN_NETIF_VI_QUEUE_OFF,
623 			      WLAN_DATA_FLOW_CTRL_VI);
624 		fallthrough;
625 
626 	case FLOW_POOL_BE_BK_PAUSED:
627 		soc->pause_cb(pool->flow_pool_id,
628 			      WLAN_NETIF_BE_BK_QUEUE_OFF,
629 			      WLAN_DATA_FLOW_CTRL_BE_BK);
630 		break;
631 	default:
632 		dp_err("Invalid pool status:%u to adjust", pool->status);
633 	}
634 }
635 
636 /**
637  * dp_tx_desc_alloc() - Allocate a Software Tx descriptor from given pool
638  * @soc: Handle to DP SoC structure
639  * @desc_pool_id: ID of the flow control fool
640  *
641  * Return: TX descriptor allocated or NULL
642  */
643 static inline struct dp_tx_desc_s *
644 dp_tx_desc_alloc(struct dp_soc *soc, uint8_t desc_pool_id)
645 {
646 	struct dp_tx_desc_s *tx_desc = NULL;
647 	struct dp_tx_desc_pool_s *pool = &soc->tx_desc[desc_pool_id];
648 	bool is_pause = false;
649 	enum netif_action_type act = WLAN_NETIF_ACTION_TYPE_NONE;
650 	enum dp_fl_ctrl_threshold level = DP_TH_BE_BK;
651 	enum netif_reason_type reason;
652 
653 	if (qdf_likely(pool)) {
654 		qdf_spin_lock_bh(&pool->flow_pool_lock);
655 		if (qdf_likely(pool->avail_desc &&
656 		    pool->status != FLOW_POOL_INVALID &&
657 		    pool->status != FLOW_POOL_INACTIVE)) {
658 			tx_desc = dp_tx_get_desc_flow_pool(pool);
659 			tx_desc->pool_id = desc_pool_id;
660 			tx_desc->flags = DP_TX_DESC_FLAG_ALLOCATED;
661 			dp_tx_desc_set_magic(tx_desc,
662 					     DP_TX_MAGIC_PATTERN_INUSE);
663 			is_pause = dp_tx_is_threshold_reached(pool,
664 							      pool->avail_desc);
665 
666 			if (qdf_unlikely(pool->status ==
667 					 FLOW_POOL_ACTIVE_UNPAUSED_REATTACH)) {
668 				dp_tx_adjust_flow_pool_state(soc, pool);
669 				is_pause = false;
670 			}
671 
672 			if (qdf_unlikely(is_pause)) {
673 				switch (pool->status) {
674 				case FLOW_POOL_ACTIVE_UNPAUSED:
675 					/* pause network BE\BK queue */
676 					act = WLAN_NETIF_BE_BK_QUEUE_OFF;
677 					reason = WLAN_DATA_FLOW_CTRL_BE_BK;
678 					level = DP_TH_BE_BK;
679 					pool->status = FLOW_POOL_BE_BK_PAUSED;
680 					break;
681 				case FLOW_POOL_BE_BK_PAUSED:
682 					/* pause network VI queue */
683 					act = WLAN_NETIF_VI_QUEUE_OFF;
684 					reason = WLAN_DATA_FLOW_CTRL_VI;
685 					level = DP_TH_VI;
686 					pool->status = FLOW_POOL_VI_PAUSED;
687 					break;
688 				case FLOW_POOL_VI_PAUSED:
689 					/* pause network VO queue */
690 					act = WLAN_NETIF_VO_QUEUE_OFF;
691 					reason = WLAN_DATA_FLOW_CTRL_VO;
692 					level = DP_TH_VO;
693 					pool->status = FLOW_POOL_VO_PAUSED;
694 					break;
695 				case FLOW_POOL_VO_PAUSED:
696 					/* pause network HI PRI queue */
697 					act = WLAN_NETIF_PRIORITY_QUEUE_OFF;
698 					reason = WLAN_DATA_FLOW_CTRL_PRI;
699 					level = DP_TH_HI;
700 					pool->status = FLOW_POOL_ACTIVE_PAUSED;
701 					break;
702 				case FLOW_POOL_ACTIVE_PAUSED:
703 					act = WLAN_NETIF_ACTION_TYPE_NONE;
704 					break;
705 				default:
706 					dp_err_rl("pool status is %d!",
707 						  pool->status);
708 					break;
709 				}
710 
711 				if (act != WLAN_NETIF_ACTION_TYPE_NONE) {
712 					pool->latest_pause_time[level] =
713 						qdf_get_system_timestamp();
714 					soc->pause_cb(desc_pool_id,
715 						      act,
716 						      reason);
717 				}
718 			}
719 		} else {
720 			pool->pkt_drop_no_desc++;
721 		}
722 		qdf_spin_unlock_bh(&pool->flow_pool_lock);
723 	} else {
724 		dp_err_rl("NULL desc pool pool_id %d", desc_pool_id);
725 		soc->pool_stats.pkt_drop_no_pool++;
726 	}
727 
728 	return tx_desc;
729 }
730 
731 /**
732  * dp_tx_desc_free() - Free a tx descriptor and attach it to free list
733  * @soc: Handle to DP SoC structure
734  * @tx_desc: the tx descriptor to be freed
735  * @desc_pool_id: ID of the flow control pool
736  *
737  * Return: None
738  */
739 static inline void
740 dp_tx_desc_free(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
741 		uint8_t desc_pool_id)
742 {
743 	struct dp_tx_desc_pool_s *pool = &soc->tx_desc[desc_pool_id];
744 	qdf_time_t unpause_time = qdf_get_system_timestamp(), pause_dur;
745 	enum netif_action_type act = WLAN_WAKE_ALL_NETIF_QUEUE;
746 	enum netif_reason_type reason;
747 
748 	qdf_spin_lock_bh(&pool->flow_pool_lock);
749 	tx_desc->vdev_id = DP_INVALID_VDEV_ID;
750 	tx_desc->nbuf = NULL;
751 	tx_desc->flags = 0;
752 	dp_tx_desc_set_magic(tx_desc, DP_TX_MAGIC_PATTERN_FREE);
753 	dp_tx_put_desc_flow_pool(pool, tx_desc);
754 	switch (pool->status) {
755 	case FLOW_POOL_ACTIVE_PAUSED:
756 		if (pool->avail_desc > pool->start_th[DP_TH_HI]) {
757 			act = WLAN_NETIF_PRIORITY_QUEUE_ON;
758 			reason = WLAN_DATA_FLOW_CTRL_PRI;
759 			pool->status = FLOW_POOL_VO_PAUSED;
760 
761 			/* Update maximum pause duration for HI queue */
762 			pause_dur = unpause_time -
763 					pool->latest_pause_time[DP_TH_HI];
764 			if (pool->max_pause_time[DP_TH_HI] < pause_dur)
765 				pool->max_pause_time[DP_TH_HI] = pause_dur;
766 		}
767 		break;
768 	case FLOW_POOL_VO_PAUSED:
769 		if (pool->avail_desc > pool->start_th[DP_TH_VO]) {
770 			act = WLAN_NETIF_VO_QUEUE_ON;
771 			reason = WLAN_DATA_FLOW_CTRL_VO;
772 			pool->status = FLOW_POOL_VI_PAUSED;
773 
774 			/* Update maximum pause duration for VO queue */
775 			pause_dur = unpause_time -
776 					pool->latest_pause_time[DP_TH_VO];
777 			if (pool->max_pause_time[DP_TH_VO] < pause_dur)
778 				pool->max_pause_time[DP_TH_VO] = pause_dur;
779 		}
780 		break;
781 	case FLOW_POOL_VI_PAUSED:
782 		if (pool->avail_desc > pool->start_th[DP_TH_VI]) {
783 			act = WLAN_NETIF_VI_QUEUE_ON;
784 			reason = WLAN_DATA_FLOW_CTRL_VI;
785 			pool->status = FLOW_POOL_BE_BK_PAUSED;
786 
787 			/* Update maximum pause duration for VI queue */
788 			pause_dur = unpause_time -
789 					pool->latest_pause_time[DP_TH_VI];
790 			if (pool->max_pause_time[DP_TH_VI] < pause_dur)
791 				pool->max_pause_time[DP_TH_VI] = pause_dur;
792 		}
793 		break;
794 	case FLOW_POOL_BE_BK_PAUSED:
795 		if (pool->avail_desc > pool->start_th[DP_TH_BE_BK]) {
796 			act = WLAN_NETIF_BE_BK_QUEUE_ON;
797 			reason = WLAN_DATA_FLOW_CTRL_BE_BK;
798 			pool->status = FLOW_POOL_ACTIVE_UNPAUSED;
799 
800 			/* Update maximum pause duration for BE_BK queue */
801 			pause_dur = unpause_time -
802 					pool->latest_pause_time[DP_TH_BE_BK];
803 			if (pool->max_pause_time[DP_TH_BE_BK] < pause_dur)
804 				pool->max_pause_time[DP_TH_BE_BK] = pause_dur;
805 		}
806 		break;
807 	case FLOW_POOL_INVALID:
808 		if (pool->avail_desc == pool->pool_size) {
809 			dp_tx_desc_pool_deinit(soc, desc_pool_id, false);
810 			dp_tx_desc_pool_free(soc, desc_pool_id, false);
811 			qdf_spin_unlock_bh(&pool->flow_pool_lock);
812 			dp_err_rl("pool %d is freed!!", desc_pool_id);
813 			return;
814 		}
815 		break;
816 
817 	case FLOW_POOL_ACTIVE_UNPAUSED:
818 		break;
819 
820 	case FLOW_POOL_ACTIVE_UNPAUSED_REATTACH:
821 		fallthrough;
822 	default:
823 		dp_err_rl("pool %d status: %d",
824 			  desc_pool_id, pool->status);
825 		break;
826 	};
827 
828 	if (act != WLAN_WAKE_ALL_NETIF_QUEUE)
829 		soc->pause_cb(pool->flow_pool_id,
830 			      act, reason);
831 	qdf_spin_unlock_bh(&pool->flow_pool_lock);
832 }
833 
834 static inline void
835 dp_tx_spcl_desc_free(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
836 		     uint8_t desc_pool_id)
837 {
838 }
839 
840 static inline struct dp_tx_desc_s *dp_tx_spcl_desc_alloc(struct dp_soc *soc,
841 							 uint8_t desc_pool_id)
842 {
843 	return NULL;
844 }
845 #else /* QCA_AC_BASED_FLOW_CONTROL */
846 
847 static inline bool
848 dp_tx_is_threshold_reached(struct dp_tx_desc_pool_s *pool, uint16_t avail_desc)
849 {
850 	if (qdf_unlikely(avail_desc < pool->stop_th))
851 		return true;
852 	else
853 		return false;
854 }
855 
856 /**
857  * dp_tx_desc_alloc() - Allocate a Software Tx Descriptor from given pool
858  * @soc: Handle to DP SoC structure
859  * @desc_pool_id:
860  *
861  * Return: Tx descriptor or NULL
862  */
863 static inline struct dp_tx_desc_s *
864 dp_tx_desc_alloc(struct dp_soc *soc, uint8_t desc_pool_id)
865 {
866 	struct dp_tx_desc_s *tx_desc = NULL;
867 	struct dp_tx_desc_pool_s *pool = &soc->tx_desc[desc_pool_id];
868 
869 	if (pool) {
870 		qdf_spin_lock_bh(&pool->flow_pool_lock);
871 		if (pool->status <= FLOW_POOL_ACTIVE_PAUSED &&
872 		    pool->avail_desc) {
873 			tx_desc = dp_tx_get_desc_flow_pool(pool);
874 			tx_desc->pool_id = desc_pool_id;
875 			tx_desc->flags = DP_TX_DESC_FLAG_ALLOCATED;
876 			dp_tx_desc_set_magic(tx_desc,
877 					     DP_TX_MAGIC_PATTERN_INUSE);
878 			if (qdf_unlikely(pool->avail_desc < pool->stop_th)) {
879 				pool->status = FLOW_POOL_ACTIVE_PAUSED;
880 				qdf_spin_unlock_bh(&pool->flow_pool_lock);
881 				/* pause network queues */
882 				soc->pause_cb(desc_pool_id,
883 					       WLAN_STOP_ALL_NETIF_QUEUE,
884 					       WLAN_DATA_FLOW_CONTROL);
885 			} else {
886 				qdf_spin_unlock_bh(&pool->flow_pool_lock);
887 			}
888 		} else {
889 			pool->pkt_drop_no_desc++;
890 			qdf_spin_unlock_bh(&pool->flow_pool_lock);
891 		}
892 	} else {
893 		soc->pool_stats.pkt_drop_no_pool++;
894 	}
895 
896 	return tx_desc;
897 }
898 
899 static inline struct dp_tx_desc_s *dp_tx_spcl_desc_alloc(struct dp_soc *soc,
900 							 uint8_t desc_pool_id)
901 {
902 	return NULL;
903 }
904 /**
905  * dp_tx_desc_free() - Free a tx descriptor and attach it to free list
906  * @soc: Handle to DP SoC structure
907  * @tx_desc: Descriptor to free
908  * @desc_pool_id: Descriptor pool Id
909  *
910  * Return: None
911  */
912 static inline void
913 dp_tx_desc_free(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
914 		uint8_t desc_pool_id)
915 {
916 	struct dp_tx_desc_pool_s *pool = &soc->tx_desc[desc_pool_id];
917 
918 	qdf_spin_lock_bh(&pool->flow_pool_lock);
919 	tx_desc->vdev_id = DP_INVALID_VDEV_ID;
920 	tx_desc->nbuf = NULL;
921 	tx_desc->flags = 0;
922 	dp_tx_desc_set_magic(tx_desc, DP_TX_MAGIC_PATTERN_FREE);
923 	dp_tx_put_desc_flow_pool(pool, tx_desc);
924 	switch (pool->status) {
925 	case FLOW_POOL_ACTIVE_PAUSED:
926 		if (pool->avail_desc > pool->start_th) {
927 			soc->pause_cb(pool->flow_pool_id,
928 				       WLAN_WAKE_ALL_NETIF_QUEUE,
929 				       WLAN_DATA_FLOW_CONTROL);
930 			pool->status = FLOW_POOL_ACTIVE_UNPAUSED;
931 		}
932 		break;
933 	case FLOW_POOL_INVALID:
934 		if (pool->avail_desc == pool->pool_size) {
935 			dp_tx_desc_pool_deinit(soc, desc_pool_id, false);
936 			dp_tx_desc_pool_free(soc, desc_pool_id, false);
937 			qdf_spin_unlock_bh(&pool->flow_pool_lock);
938 			qdf_print("%s %d pool is freed!!",
939 				  __func__, __LINE__);
940 			return;
941 		}
942 		break;
943 
944 	case FLOW_POOL_ACTIVE_UNPAUSED:
945 		break;
946 	default:
947 		qdf_print("%s %d pool is INACTIVE State!!",
948 			  __func__, __LINE__);
949 		break;
950 	};
951 
952 	qdf_spin_unlock_bh(&pool->flow_pool_lock);
953 }
954 
955 static inline void
956 dp_tx_spcl_desc_free(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
957 		     uint8_t desc_pool_id)
958 {
959 }
960 #endif /* QCA_AC_BASED_FLOW_CONTROL */
961 
962 static inline bool
963 dp_tx_desc_thresh_reached(struct cdp_soc_t *soc_hdl, uint8_t vdev_id)
964 {
965 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
966 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
967 						     DP_MOD_ID_CDP);
968 	struct dp_tx_desc_pool_s *pool;
969 	bool status;
970 
971 	if (!vdev)
972 		return false;
973 
974 	pool = vdev->pool;
975 	status = dp_tx_is_threshold_reached(pool, pool->avail_desc);
976 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
977 
978 	return status;
979 }
980 #else /* QCA_LL_TX_FLOW_CONTROL_V2 */
981 
982 static inline void dp_tx_flow_control_init(struct dp_soc *handle)
983 {
984 }
985 
986 static inline void dp_tx_flow_control_deinit(struct dp_soc *handle)
987 {
988 }
989 
990 static inline QDF_STATUS dp_tx_flow_pool_map_handler(struct dp_pdev *pdev,
991 	uint8_t flow_id, uint8_t flow_type, uint8_t flow_pool_id,
992 	uint32_t flow_pool_size)
993 {
994 	return QDF_STATUS_SUCCESS;
995 }
996 
997 static inline void dp_tx_flow_pool_unmap_handler(struct dp_pdev *pdev,
998 	uint8_t flow_id, uint8_t flow_type, uint8_t flow_pool_id)
999 {
1000 }
1001 
1002 #ifdef QCA_DP_TX_HW_SW_NBUF_DESC_PREFETCH
1003 static inline
1004 void dp_tx_prefetch_desc(struct dp_tx_desc_s *tx_desc)
1005 {
1006 	if (tx_desc)
1007 		prefetch(tx_desc);
1008 }
1009 #else
1010 static inline
1011 void dp_tx_prefetch_desc(struct dp_tx_desc_s *tx_desc)
1012 {
1013 }
1014 #endif
1015 
1016 /**
1017  * dp_tx_desc_alloc() - Allocate a Software Tx Descriptor from given pool
1018  * @soc: Handle to DP SoC structure
1019  * @desc_pool_id: pool id
1020  *
1021  * Return: Tx Descriptor or NULL
1022  */
1023 static inline struct dp_tx_desc_s *dp_tx_desc_alloc(struct dp_soc *soc,
1024 						uint8_t desc_pool_id)
1025 {
1026 	struct dp_tx_desc_s *tx_desc = NULL;
1027 	struct dp_tx_desc_pool_s *pool = NULL;
1028 
1029 	pool = dp_get_tx_desc_pool(soc, desc_pool_id);
1030 
1031 	TX_DESC_LOCK_LOCK(&pool->lock);
1032 
1033 	tx_desc = pool->freelist;
1034 
1035 	/* Pool is exhausted */
1036 	if (!tx_desc) {
1037 		TX_DESC_LOCK_UNLOCK(&pool->lock);
1038 		return NULL;
1039 	}
1040 
1041 	pool->freelist = pool->freelist->next;
1042 	pool->num_allocated++;
1043 	pool->num_free--;
1044 	dp_tx_prefetch_desc(pool->freelist);
1045 
1046 	tx_desc->flags = DP_TX_DESC_FLAG_ALLOCATED;
1047 
1048 	TX_DESC_LOCK_UNLOCK(&pool->lock);
1049 
1050 	return tx_desc;
1051 }
1052 
1053 static inline struct dp_tx_desc_s *dp_tx_spcl_desc_alloc(struct dp_soc *soc,
1054 							 uint8_t desc_pool_id)
1055 {
1056 	struct dp_tx_desc_s *tx_desc = NULL;
1057 	struct dp_tx_desc_pool_s *pool = NULL;
1058 
1059 	pool = dp_get_spcl_tx_desc_pool(soc, desc_pool_id);
1060 
1061 	TX_DESC_LOCK_LOCK(&pool->lock);
1062 
1063 	tx_desc = pool->freelist;
1064 
1065 	/* Pool is exhausted */
1066 	if (!tx_desc) {
1067 		TX_DESC_LOCK_UNLOCK(&pool->lock);
1068 		return NULL;
1069 	}
1070 
1071 	pool->freelist = pool->freelist->next;
1072 	pool->num_allocated++;
1073 	pool->num_free--;
1074 	dp_tx_prefetch_desc(pool->freelist);
1075 
1076 	tx_desc->flags = DP_TX_DESC_FLAG_ALLOCATED;
1077 	tx_desc->flags |= DP_TX_DESC_FLAG_SPECIAL;
1078 
1079 	TX_DESC_LOCK_UNLOCK(&pool->lock);
1080 
1081 	return tx_desc;
1082 }
1083 
1084 /**
1085  * dp_tx_desc_alloc_multiple() - Allocate batch of software Tx Descriptors
1086  *                            from given pool
1087  * @soc: Handle to DP SoC structure
1088  * @desc_pool_id: pool id should pick up
1089  * @num_requested: number of required descriptor
1090  *
1091  * allocate multiple tx descriptor and make a link
1092  *
1093  * Return: first descriptor pointer or NULL
1094  */
1095 static inline struct dp_tx_desc_s *dp_tx_desc_alloc_multiple(
1096 		struct dp_soc *soc, uint8_t desc_pool_id, uint8_t num_requested)
1097 {
1098 	struct dp_tx_desc_s *c_desc = NULL, *h_desc = NULL;
1099 	uint8_t count;
1100 	struct dp_tx_desc_pool_s *pool = NULL;
1101 
1102 	pool = dp_get_tx_desc_pool(soc, desc_pool_id);
1103 
1104 	TX_DESC_LOCK_LOCK(&pool->lock);
1105 
1106 	if ((num_requested == 0) ||
1107 			(pool->num_free < num_requested)) {
1108 		TX_DESC_LOCK_UNLOCK(&pool->lock);
1109 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1110 			"%s, No Free Desc: Available(%d) num_requested(%d)",
1111 			__func__, pool->num_free,
1112 			num_requested);
1113 		return NULL;
1114 	}
1115 
1116 	h_desc = pool->freelist;
1117 
1118 	/* h_desc should never be NULL since num_free > requested */
1119 	qdf_assert_always(h_desc);
1120 
1121 	c_desc = h_desc;
1122 	for (count = 0; count < (num_requested - 1); count++) {
1123 		c_desc->flags = DP_TX_DESC_FLAG_ALLOCATED;
1124 		c_desc = c_desc->next;
1125 	}
1126 	pool->num_free -= count;
1127 	pool->num_allocated += count;
1128 	pool->freelist = c_desc->next;
1129 	c_desc->next = NULL;
1130 
1131 	TX_DESC_LOCK_UNLOCK(&pool->lock);
1132 	return h_desc;
1133 }
1134 
1135 /**
1136  * dp_tx_desc_free() - Free a tx descriptor and attach it to free list
1137  * @soc: Handle to DP SoC structure
1138  * @tx_desc: descriptor to free
1139  * @desc_pool_id: ID of the free pool
1140  */
1141 static inline void
1142 dp_tx_desc_free(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
1143 		uint8_t desc_pool_id)
1144 {
1145 	struct dp_tx_desc_pool_s *pool = NULL;
1146 
1147 	dp_tx_desc_clear(tx_desc);
1148 	pool = dp_get_tx_desc_pool(soc, desc_pool_id);
1149 	TX_DESC_LOCK_LOCK(&pool->lock);
1150 	tx_desc->next = pool->freelist;
1151 	pool->freelist = tx_desc;
1152 	pool->num_allocated--;
1153 	pool->num_free++;
1154 	TX_DESC_LOCK_UNLOCK(&pool->lock);
1155 }
1156 
1157 static inline void
1158 dp_tx_spcl_desc_free(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
1159 		     uint8_t desc_pool_id)
1160 {
1161 	struct dp_tx_desc_pool_s *pool = NULL;
1162 
1163 	dp_tx_desc_clear(tx_desc);
1164 
1165 	pool = dp_get_spcl_tx_desc_pool(soc, desc_pool_id);
1166 	TX_DESC_LOCK_LOCK(&pool->lock);
1167 	tx_desc->next = pool->freelist;
1168 	pool->freelist = tx_desc;
1169 	pool->num_allocated--;
1170 	pool->num_free++;
1171 	TX_DESC_LOCK_UNLOCK(&pool->lock);
1172 }
1173 
1174 static inline void
1175 dp_tx_desc_free_list(struct dp_tx_desc_pool_s *pool,
1176 		     struct dp_tx_desc_s *head_desc,
1177 		     struct dp_tx_desc_s *tail_desc,
1178 		     uint32_t fast_desc_count)
1179 {
1180 	TX_DESC_LOCK_LOCK(&pool->lock);
1181 	pool->num_allocated -= fast_desc_count;
1182 	pool->num_free += fast_desc_count;
1183 	tail_desc->next = pool->freelist;
1184 	pool->freelist = head_desc;
1185 	TX_DESC_LOCK_UNLOCK(&pool->lock);
1186 }
1187 
1188 #endif /* QCA_LL_TX_FLOW_CONTROL_V2 */
1189 
1190 #ifdef QCA_DP_TX_DESC_ID_CHECK
1191 /**
1192  * dp_tx_is_desc_id_valid() - check is the tx desc id valid
1193  * @soc: Handle to DP SoC structure
1194  * @tx_desc_id:
1195  *
1196  * Return: true or false
1197  */
1198 static inline bool
1199 dp_tx_is_desc_id_valid(struct dp_soc *soc, uint32_t tx_desc_id)
1200 {
1201 	uint8_t pool_id;
1202 	uint16_t page_id, offset;
1203 	struct dp_tx_desc_pool_s *pool;
1204 
1205 	pool_id = (tx_desc_id & DP_TX_DESC_ID_POOL_MASK) >>
1206 			DP_TX_DESC_ID_POOL_OS;
1207 	/* Pool ID is out of limit */
1208 	if (pool_id > wlan_cfg_get_num_tx_desc_pool(
1209 				soc->wlan_cfg_ctx)) {
1210 		QDF_TRACE(QDF_MODULE_ID_DP,
1211 			  QDF_TRACE_LEVEL_FATAL,
1212 			  "%s:Tx Comp pool id %d not valid",
1213 			  __func__,
1214 			  pool_id);
1215 		goto warn_exit;
1216 	}
1217 
1218 	pool = &soc->tx_desc[pool_id];
1219 	/* the pool is freed */
1220 	if (IS_TX_DESC_POOL_STATUS_INACTIVE(pool)) {
1221 		QDF_TRACE(QDF_MODULE_ID_DP,
1222 			  QDF_TRACE_LEVEL_FATAL,
1223 			  "%s:the pool %d has been freed",
1224 			  __func__,
1225 			  pool_id);
1226 		goto warn_exit;
1227 	}
1228 
1229 	page_id = (tx_desc_id & DP_TX_DESC_ID_PAGE_MASK) >>
1230 				DP_TX_DESC_ID_PAGE_OS;
1231 	/* the page id is out of limit */
1232 	if (page_id >= pool->desc_pages.num_pages) {
1233 		QDF_TRACE(QDF_MODULE_ID_DP,
1234 			  QDF_TRACE_LEVEL_FATAL,
1235 			  "%s:the page id %d invalid, pool id %d, num_page %d",
1236 			  __func__,
1237 			  page_id,
1238 			  pool_id,
1239 			  pool->desc_pages.num_pages);
1240 		goto warn_exit;
1241 	}
1242 
1243 	offset = (tx_desc_id & DP_TX_DESC_ID_OFFSET_MASK) >>
1244 				DP_TX_DESC_ID_OFFSET_OS;
1245 	/* the offset is out of limit */
1246 	if (offset >= pool->desc_pages.num_element_per_page) {
1247 		QDF_TRACE(QDF_MODULE_ID_DP,
1248 			  QDF_TRACE_LEVEL_FATAL,
1249 			  "%s:offset %d invalid, pool%d,num_elem_per_page %d",
1250 			  __func__,
1251 			  offset,
1252 			  pool_id,
1253 			  pool->desc_pages.num_element_per_page);
1254 		goto warn_exit;
1255 	}
1256 
1257 	return true;
1258 
1259 warn_exit:
1260 	QDF_TRACE(QDF_MODULE_ID_DP,
1261 		  QDF_TRACE_LEVEL_FATAL,
1262 		  "%s:Tx desc id 0x%x not valid",
1263 		  __func__,
1264 		  tx_desc_id);
1265 	qdf_assert_always(0);
1266 	return false;
1267 }
1268 
1269 #else
1270 static inline bool
1271 dp_tx_is_desc_id_valid(struct dp_soc *soc, uint32_t tx_desc_id)
1272 {
1273 	return true;
1274 }
1275 #endif /* QCA_DP_TX_DESC_ID_CHECK */
1276 
1277 #ifdef QCA_DP_TX_DESC_FAST_COMP_ENABLE
1278 static inline void dp_tx_desc_update_fast_comp_flag(struct dp_soc *soc,
1279 						    struct dp_tx_desc_s *desc,
1280 						    uint8_t allow_fast_comp)
1281 {
1282 	if (qdf_likely(!(desc->flags & DP_TX_DESC_FLAG_TO_FW)) &&
1283 	    qdf_likely(allow_fast_comp))
1284 		desc->flags |= DP_TX_DESC_FLAG_SIMPLE;
1285 
1286 	if (qdf_likely(desc->nbuf->is_from_recycler) &&
1287 	    qdf_likely(desc->nbuf->fast_xmit))
1288 		desc->flags |= DP_TX_DESC_FLAG_FAST;
1289 }
1290 
1291 #else
1292 static inline void dp_tx_desc_update_fast_comp_flag(struct dp_soc *soc,
1293 						    struct dp_tx_desc_s *desc,
1294 						    uint8_t allow_fast_comp)
1295 {
1296 }
1297 #endif /* QCA_DP_TX_DESC_FAST_COMP_ENABLE */
1298 
1299 /**
1300  * dp_tx_desc_find() - find dp tx descriptor from pool/page/offset
1301  * @soc: handle for the device sending the data
1302  * @pool_id: pool id
1303  * @page_id: page id
1304  * @offset: offset from base address
1305  * @spcl_pool: bit to indicate if this is a special pool
1306  *
1307  * Use page and offset to find the corresponding descriptor object in
1308  * the given descriptor pool.
1309  *
1310  * Return: the descriptor object that has the specified ID
1311  */
1312 static inline
1313 struct dp_tx_desc_s *dp_tx_desc_find(struct dp_soc *soc,
1314 				     uint8_t pool_id, uint16_t page_id,
1315 				     uint16_t offset, bool spcl_pool)
1316 {
1317 	struct dp_tx_desc_pool_s *tx_desc_pool = NULL;
1318 
1319 	tx_desc_pool = spcl_pool ? dp_get_spcl_tx_desc_pool(soc, pool_id) :
1320 				dp_get_tx_desc_pool(soc, pool_id);
1321 
1322 	return tx_desc_pool->desc_pages.cacheable_pages[page_id] +
1323 		tx_desc_pool->elem_size * offset;
1324 }
1325 
1326 /**
1327  * dp_tx_ext_desc_alloc() - Get tx extension descriptor from pool
1328  * @soc: handle for the device sending the data
1329  * @desc_pool_id: target pool id
1330  *
1331  * Return: None
1332  */
1333 static inline
1334 struct dp_tx_ext_desc_elem_s *dp_tx_ext_desc_alloc(struct dp_soc *soc,
1335 		uint8_t desc_pool_id)
1336 {
1337 	struct dp_tx_ext_desc_elem_s *c_elem;
1338 
1339 	desc_pool_id = dp_tx_ext_desc_pool_override(desc_pool_id);
1340 	qdf_spin_lock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
1341 	if (soc->tx_ext_desc[desc_pool_id].num_free <= 0) {
1342 		qdf_spin_unlock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
1343 		return NULL;
1344 	}
1345 	c_elem = soc->tx_ext_desc[desc_pool_id].freelist;
1346 	soc->tx_ext_desc[desc_pool_id].freelist =
1347 		soc->tx_ext_desc[desc_pool_id].freelist->next;
1348 	soc->tx_ext_desc[desc_pool_id].num_free--;
1349 	qdf_spin_unlock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
1350 	return c_elem;
1351 }
1352 
1353 /**
1354  * dp_tx_ext_desc_free() - Release tx extension descriptor to the pool
1355  * @soc: handle for the device sending the data
1356  * @elem: ext descriptor pointer should release
1357  * @desc_pool_id: target pool id
1358  *
1359  * Return: None
1360  */
1361 static inline void dp_tx_ext_desc_free(struct dp_soc *soc,
1362 	struct dp_tx_ext_desc_elem_s *elem, uint8_t desc_pool_id)
1363 {
1364 	desc_pool_id = dp_tx_ext_desc_pool_override(desc_pool_id);
1365 	qdf_spin_lock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
1366 	elem->next = soc->tx_ext_desc[desc_pool_id].freelist;
1367 	soc->tx_ext_desc[desc_pool_id].freelist = elem;
1368 	soc->tx_ext_desc[desc_pool_id].num_free++;
1369 	qdf_spin_unlock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
1370 	return;
1371 }
1372 
1373 /**
1374  * dp_tx_ext_desc_free_multiple() - Free multiple tx extension descriptor and
1375  *                           attach it to free list
1376  * @soc: Handle to DP SoC structure
1377  * @desc_pool_id: pool id should pick up
1378  * @elem: tx descriptor should be freed
1379  * @num_free: number of descriptors should be freed
1380  *
1381  * Return: none
1382  */
1383 static inline void dp_tx_ext_desc_free_multiple(struct dp_soc *soc,
1384 		struct dp_tx_ext_desc_elem_s *elem, uint8_t desc_pool_id,
1385 		uint8_t num_free)
1386 {
1387 	struct dp_tx_ext_desc_elem_s *head, *tail, *c_elem;
1388 	uint8_t freed = num_free;
1389 
1390 	/* caller should always guarantee atleast list of num_free nodes */
1391 	qdf_assert_always(elem);
1392 
1393 	head = elem;
1394 	c_elem = head;
1395 	tail = head;
1396 	while (c_elem && freed) {
1397 		tail = c_elem;
1398 		c_elem = c_elem->next;
1399 		freed--;
1400 	}
1401 
1402 	/* caller should always guarantee atleast list of num_free nodes */
1403 	qdf_assert_always(tail);
1404 
1405 	desc_pool_id = dp_tx_ext_desc_pool_override(desc_pool_id);
1406 	qdf_spin_lock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
1407 	tail->next = soc->tx_ext_desc[desc_pool_id].freelist;
1408 	soc->tx_ext_desc[desc_pool_id].freelist = head;
1409 	soc->tx_ext_desc[desc_pool_id].num_free += num_free;
1410 	qdf_spin_unlock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
1411 
1412 	return;
1413 }
1414 
1415 #if defined(FEATURE_TSO)
1416 /**
1417  * dp_tx_tso_desc_alloc() - function to allocate a TSO segment
1418  * @soc: device soc instance
1419  * @pool_id: pool id should pick up tso descriptor
1420  *
1421  * Allocates a TSO segment element from the free list held in
1422  * the soc
1423  *
1424  * Return: tso_seg, tso segment memory pointer
1425  */
1426 static inline struct qdf_tso_seg_elem_t *dp_tx_tso_desc_alloc(
1427 		struct dp_soc *soc, uint8_t pool_id)
1428 {
1429 	struct qdf_tso_seg_elem_t *tso_seg = NULL;
1430 
1431 	qdf_spin_lock_bh(&soc->tx_tso_desc[pool_id].lock);
1432 	if (soc->tx_tso_desc[pool_id].freelist) {
1433 		soc->tx_tso_desc[pool_id].num_free--;
1434 		tso_seg = soc->tx_tso_desc[pool_id].freelist;
1435 		soc->tx_tso_desc[pool_id].freelist =
1436 			soc->tx_tso_desc[pool_id].freelist->next;
1437 	}
1438 	qdf_spin_unlock_bh(&soc->tx_tso_desc[pool_id].lock);
1439 
1440 	return tso_seg;
1441 }
1442 
1443 /**
1444  * dp_tx_tso_desc_free() - function to free a TSO segment
1445  * @soc: device soc instance
1446  * @pool_id: pool id should pick up tso descriptor
1447  * @tso_seg: tso segment memory pointer
1448  *
1449  * Returns a TSO segment element to the free list held in the
1450  * HTT pdev
1451  *
1452  * Return: none
1453  */
1454 static inline void dp_tx_tso_desc_free(struct dp_soc *soc,
1455 		uint8_t pool_id, struct qdf_tso_seg_elem_t *tso_seg)
1456 {
1457 	qdf_spin_lock_bh(&soc->tx_tso_desc[pool_id].lock);
1458 	tso_seg->next = soc->tx_tso_desc[pool_id].freelist;
1459 	soc->tx_tso_desc[pool_id].freelist = tso_seg;
1460 	soc->tx_tso_desc[pool_id].num_free++;
1461 	qdf_spin_unlock_bh(&soc->tx_tso_desc[pool_id].lock);
1462 }
1463 
1464 static inline
1465 struct qdf_tso_num_seg_elem_t  *dp_tso_num_seg_alloc(struct dp_soc *soc,
1466 		uint8_t pool_id)
1467 {
1468 	struct qdf_tso_num_seg_elem_t *tso_num_seg = NULL;
1469 
1470 	qdf_spin_lock_bh(&soc->tx_tso_num_seg[pool_id].lock);
1471 	if (soc->tx_tso_num_seg[pool_id].freelist) {
1472 		soc->tx_tso_num_seg[pool_id].num_free--;
1473 		tso_num_seg = soc->tx_tso_num_seg[pool_id].freelist;
1474 		soc->tx_tso_num_seg[pool_id].freelist =
1475 			soc->tx_tso_num_seg[pool_id].freelist->next;
1476 	}
1477 	qdf_spin_unlock_bh(&soc->tx_tso_num_seg[pool_id].lock);
1478 
1479 	return tso_num_seg;
1480 }
1481 
1482 static inline
1483 void dp_tso_num_seg_free(struct dp_soc *soc,
1484 		uint8_t pool_id, struct qdf_tso_num_seg_elem_t *tso_num_seg)
1485 {
1486 	qdf_spin_lock_bh(&soc->tx_tso_num_seg[pool_id].lock);
1487 	tso_num_seg->next = soc->tx_tso_num_seg[pool_id].freelist;
1488 	soc->tx_tso_num_seg[pool_id].freelist = tso_num_seg;
1489 	soc->tx_tso_num_seg[pool_id].num_free++;
1490 	qdf_spin_unlock_bh(&soc->tx_tso_num_seg[pool_id].lock);
1491 }
1492 #endif
1493 
1494 /**
1495  * dp_tx_me_alloc_buf() - Alloc descriptor from me pool
1496  * @pdev: DP_PDEV handle for datapath
1497  *
1498  * Return: tx descriptor on success, NULL on error
1499  */
1500 static inline struct dp_tx_me_buf_t*
1501 dp_tx_me_alloc_buf(struct dp_pdev *pdev)
1502 {
1503 	struct dp_tx_me_buf_t *buf = NULL;
1504 	qdf_spin_lock_bh(&pdev->tx_mutex);
1505 	if (pdev->me_buf.freelist) {
1506 		buf = pdev->me_buf.freelist;
1507 		pdev->me_buf.freelist = pdev->me_buf.freelist->next;
1508 		pdev->me_buf.buf_in_use++;
1509 	} else {
1510 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1511 				"Error allocating memory in pool");
1512 		qdf_spin_unlock_bh(&pdev->tx_mutex);
1513 		return NULL;
1514 	}
1515 	qdf_spin_unlock_bh(&pdev->tx_mutex);
1516 	return buf;
1517 }
1518 
1519 /**
1520  * dp_tx_me_free_buf() - Unmap the buffer holding the dest
1521  * address, free me descriptor and add it to the free-pool
1522  * @pdev: DP_PDEV handle for datapath
1523  * @buf : Allocated ME BUF
1524  *
1525  * Return:void
1526  */
1527 static inline void
1528 dp_tx_me_free_buf(struct dp_pdev *pdev, struct dp_tx_me_buf_t *buf)
1529 {
1530 	/*
1531 	 * If the buf containing mac address was mapped,
1532 	 * it must be unmapped before freeing the me_buf.
1533 	 * The "paddr_macbuf" member in the me_buf structure
1534 	 * holds the mapped physical address and it must be
1535 	 * set to 0 after unmapping.
1536 	 */
1537 	if (buf->paddr_macbuf) {
1538 		qdf_mem_unmap_nbytes_single(pdev->soc->osdev,
1539 					    buf->paddr_macbuf,
1540 					    QDF_DMA_TO_DEVICE,
1541 					    QDF_MAC_ADDR_SIZE);
1542 		buf->paddr_macbuf = 0;
1543 	}
1544 	qdf_spin_lock_bh(&pdev->tx_mutex);
1545 	buf->next = pdev->me_buf.freelist;
1546 	pdev->me_buf.freelist = buf;
1547 	pdev->me_buf.buf_in_use--;
1548 	qdf_spin_unlock_bh(&pdev->tx_mutex);
1549 }
1550 #endif /* DP_TX_DESC_H */
1551