xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_tx_desc.h (revision dae10a5fbc53d54c53c4ba24fa018ad8b1e7c008)
1 /*
2  * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #ifndef DP_TX_DESC_H
20 #define DP_TX_DESC_H
21 
22 #include "dp_types.h"
23 #include "dp_tx.h"
24 #include "dp_internal.h"
25 
26 #ifdef TX_PER_PDEV_DESC_POOL
27 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
28 #define DP_TX_GET_DESC_POOL_ID(vdev) (vdev->vdev_id)
29 #else /* QCA_LL_TX_FLOW_CONTROL_V2 */
30 #define DP_TX_GET_DESC_POOL_ID(vdev) (vdev->pdev->pdev_id)
31 #endif /* QCA_LL_TX_FLOW_CONTROL_V2 */
32 	#define DP_TX_GET_RING_ID(vdev) (vdev->pdev->pdev_id)
33 #else
34 	#ifdef TX_PER_VDEV_DESC_POOL
35 		#define DP_TX_GET_DESC_POOL_ID(vdev) (vdev->vdev_id)
36 		#define DP_TX_GET_RING_ID(vdev) (vdev->pdev->pdev_id)
37 	#endif /* TX_PER_VDEV_DESC_POOL */
38 #endif /* TX_PER_PDEV_DESC_POOL */
39 
40 /**
41  * 21 bits cookie
42  * 2 bits pool id 0 ~ 3,
43  * 10 bits page id 0 ~ 1023
44  * 5 bits offset id 0 ~ 31 (Desc size = 128, Num descs per page = 4096/128 = 32)
45  */
46 /* ???Ring ID needed??? */
47 #define DP_TX_DESC_ID_POOL_MASK    0x018000
48 #define DP_TX_DESC_ID_POOL_OS      15
49 #define DP_TX_DESC_ID_PAGE_MASK    0x007FE0
50 #define DP_TX_DESC_ID_PAGE_OS      5
51 #define DP_TX_DESC_ID_OFFSET_MASK  0x00001F
52 #define DP_TX_DESC_ID_OFFSET_OS    0
53 
54 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
55 #define TX_DESC_LOCK_CREATE(lock)
56 #define TX_DESC_LOCK_DESTROY(lock)
57 #define TX_DESC_LOCK_LOCK(lock)
58 #define TX_DESC_LOCK_UNLOCK(lock)
59 #define IS_TX_DESC_POOL_STATUS_INACTIVE(pool) \
60 	((pool)->status == FLOW_POOL_INACTIVE)
61 #ifdef QCA_AC_BASED_FLOW_CONTROL
62 #define TX_DESC_POOL_MEMBER_CLEAN(_tx_desc_pool)       \
63 	dp_tx_flow_pool_member_clean(_tx_desc_pool)
64 
65 #else /* !QCA_AC_BASED_FLOW_CONTROL */
66 #define TX_DESC_POOL_MEMBER_CLEAN(_tx_desc_pool)       \
67 do {                                                   \
68 	(_tx_desc_pool)->elem_size = 0;                \
69 	(_tx_desc_pool)->freelist = NULL;              \
70 	(_tx_desc_pool)->pool_size = 0;                \
71 	(_tx_desc_pool)->avail_desc = 0;               \
72 	(_tx_desc_pool)->start_th = 0;                 \
73 	(_tx_desc_pool)->stop_th = 0;                  \
74 	(_tx_desc_pool)->status = FLOW_POOL_INACTIVE;  \
75 } while (0)
76 #endif /* QCA_AC_BASED_FLOW_CONTROL */
77 #else /* !QCA_LL_TX_FLOW_CONTROL_V2 */
78 #define TX_DESC_LOCK_CREATE(lock)  qdf_spinlock_create(lock)
79 #define TX_DESC_LOCK_DESTROY(lock) qdf_spinlock_destroy(lock)
80 #define TX_DESC_LOCK_LOCK(lock)    qdf_spin_lock_bh(lock)
81 #define TX_DESC_LOCK_UNLOCK(lock)  qdf_spin_unlock_bh(lock)
82 #define IS_TX_DESC_POOL_STATUS_INACTIVE(pool) (false)
83 #define TX_DESC_POOL_MEMBER_CLEAN(_tx_desc_pool)       \
84 do {                                                   \
85 	(_tx_desc_pool)->elem_size = 0;                \
86 	(_tx_desc_pool)->num_allocated = 0;            \
87 	(_tx_desc_pool)->freelist = NULL;              \
88 	(_tx_desc_pool)->elem_count = 0;               \
89 	(_tx_desc_pool)->num_free = 0;                 \
90 } while (0)
91 #endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */
92 #define MAX_POOL_BUFF_COUNT 10000
93 
94 QDF_STATUS dp_tx_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
95 		uint16_t num_elem);
96 QDF_STATUS dp_tx_desc_pool_free(struct dp_soc *soc, uint8_t pool_id);
97 QDF_STATUS dp_tx_ext_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
98 		uint16_t num_elem);
99 QDF_STATUS dp_tx_ext_desc_pool_free(struct dp_soc *soc, uint8_t pool_id);
100 QDF_STATUS dp_tx_tso_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
101 		uint16_t num_elem);
102 void dp_tx_tso_desc_pool_free(struct dp_soc *soc, uint8_t pool_id);
103 QDF_STATUS dp_tx_tso_num_seg_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
104 		uint16_t num_elem);
105 void dp_tx_tso_num_seg_pool_free(struct dp_soc *soc, uint8_t pool_id);
106 
107 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
108 void dp_tx_flow_control_init(struct dp_soc *);
109 void dp_tx_flow_control_deinit(struct dp_soc *);
110 
111 QDF_STATUS dp_txrx_register_pause_cb(struct cdp_soc_t *soc,
112 	tx_pause_callback pause_cb);
113 QDF_STATUS dp_tx_flow_pool_map(struct cdp_soc_t *soc, struct cdp_pdev *pdev,
114 				uint8_t vdev_id);
115 void dp_tx_flow_pool_unmap(struct cdp_soc_t *soc, struct cdp_pdev *pdev,
116 			   uint8_t vdev_id);
117 void dp_tx_clear_flow_pool_stats(struct dp_soc *soc);
118 struct dp_tx_desc_pool_s *dp_tx_create_flow_pool(struct dp_soc *soc,
119 	uint8_t flow_pool_id, uint16_t flow_pool_size);
120 
121 QDF_STATUS dp_tx_flow_pool_map_handler(struct dp_pdev *pdev, uint8_t flow_id,
122 	uint8_t flow_type, uint8_t flow_pool_id, uint16_t flow_pool_size);
123 void dp_tx_flow_pool_unmap_handler(struct dp_pdev *pdev, uint8_t flow_id,
124 	uint8_t flow_type, uint8_t flow_pool_id);
125 
126 /**
127  * dp_tx_get_desc_flow_pool() - get descriptor from flow pool
128  * @pool: flow pool
129  *
130  * Caller needs to take lock and do sanity checks.
131  *
132  * Return: tx descriptor
133  */
134 static inline
135 struct dp_tx_desc_s *dp_tx_get_desc_flow_pool(struct dp_tx_desc_pool_s *pool)
136 {
137 	struct dp_tx_desc_s *tx_desc = pool->freelist;
138 
139 	pool->freelist = pool->freelist->next;
140 	pool->avail_desc--;
141 	return tx_desc;
142 }
143 
144 /**
145  * ol_tx_put_desc_flow_pool() - put descriptor to flow pool freelist
146  * @pool: flow pool
147  * @tx_desc: tx descriptor
148  *
149  * Caller needs to take lock and do sanity checks.
150  *
151  * Return: none
152  */
153 static inline
154 void dp_tx_put_desc_flow_pool(struct dp_tx_desc_pool_s *pool,
155 			struct dp_tx_desc_s *tx_desc)
156 {
157 	tx_desc->next = pool->freelist;
158 	pool->freelist = tx_desc;
159 	pool->avail_desc++;
160 }
161 
162 #ifdef QCA_AC_BASED_FLOW_CONTROL
163 
164 /**
165  * dp_tx_flow_pool_member_clean() - Clean the members of TX flow pool
166  *
167  * @pool: flow pool
168  *
169  * Return: None
170  */
171 static inline void
172 dp_tx_flow_pool_member_clean(struct dp_tx_desc_pool_s *pool)
173 {
174 	pool->elem_size = 0;
175 	pool->freelist = NULL;
176 	pool->pool_size = 0;
177 	pool->avail_desc = 0;
178 	qdf_mem_zero(pool->start_th, FL_TH_MAX);
179 	qdf_mem_zero(pool->stop_th, FL_TH_MAX);
180 	pool->status = FLOW_POOL_INACTIVE;
181 }
182 
183 /**
184  * dp_tx_is_threshold_reached() - Check if current avail desc meet threshold
185  *
186  * @pool: flow pool
187  * @avail_desc: available descriptor number
188  *
189  * Return: true if threshold is met, false if not
190  */
191 static inline bool
192 dp_tx_is_threshold_reached(struct dp_tx_desc_pool_s *pool, uint16_t avail_desc)
193 {
194 	if (qdf_unlikely(avail_desc == pool->stop_th[DP_TH_BE_BK]))
195 		return true;
196 	else if (qdf_unlikely(avail_desc == pool->stop_th[DP_TH_VI]))
197 		return true;
198 	else if (qdf_unlikely(avail_desc == pool->stop_th[DP_TH_VO]))
199 		return true;
200 	else if (qdf_unlikely(avail_desc == pool->stop_th[DP_TH_HI]))
201 		return true;
202 	else
203 		return false;
204 }
205 
206 /**
207  * dp_tx_desc_alloc() - Allocate a Software Tx descriptor from given pool
208  *
209  * @soc: Handle to DP SoC structure
210  * @desc_pool_id: ID of the flow control fool
211  *
212  * Return: TX descriptor allocated or NULL
213  */
214 static inline struct dp_tx_desc_s *
215 dp_tx_desc_alloc(struct dp_soc *soc, uint8_t desc_pool_id)
216 {
217 	struct dp_tx_desc_s *tx_desc = NULL;
218 	struct dp_tx_desc_pool_s *pool = &soc->tx_desc[desc_pool_id];
219 	bool is_pause = false;
220 	enum netif_action_type act = WLAN_WAKE_NON_PRIORITY_QUEUE;
221 	enum dp_fl_ctrl_threshold level = DP_TH_BE_BK;
222 
223 	if (qdf_likely(pool)) {
224 		qdf_spin_lock_bh(&pool->flow_pool_lock);
225 		if (qdf_likely(pool->avail_desc)) {
226 			tx_desc = dp_tx_get_desc_flow_pool(pool);
227 			tx_desc->pool_id = desc_pool_id;
228 			tx_desc->flags = DP_TX_DESC_FLAG_ALLOCATED;
229 			is_pause = dp_tx_is_threshold_reached(pool,
230 							      pool->avail_desc);
231 
232 			if (qdf_unlikely(is_pause)) {
233 				switch (pool->status) {
234 				case FLOW_POOL_ACTIVE_UNPAUSED:
235 					/* pause network BE\BK queue */
236 					act = WLAN_NETIF_BE_BK_QUEUE_OFF;
237 					level = DP_TH_BE_BK;
238 					pool->status = FLOW_POOL_BE_BK_PAUSED;
239 					break;
240 				case FLOW_POOL_BE_BK_PAUSED:
241 					/* pause network VI queue */
242 					act = WLAN_NETIF_VI_QUEUE_OFF;
243 					level = DP_TH_VI;
244 					pool->status = FLOW_POOL_VI_PAUSED;
245 					break;
246 				case FLOW_POOL_VI_PAUSED:
247 					/* pause network VO queue */
248 					act = WLAN_NETIF_VO_QUEUE_OFF;
249 					level = DP_TH_VO;
250 					pool->status = FLOW_POOL_VO_PAUSED;
251 					break;
252 				case FLOW_POOL_VO_PAUSED:
253 					/* pause network HI PRI queue */
254 					act = WLAN_NETIF_PRIORITY_QUEUE_OFF;
255 					level = DP_TH_HI;
256 					pool->status = FLOW_POOL_ACTIVE_PAUSED;
257 					break;
258 				default:
259 					QDF_TRACE(QDF_MODULE_ID_DP,
260 						  QDF_TRACE_LEVEL_ERROR,
261 						  "%s %d pool is %d status!",
262 						  __func__, __LINE__,
263 						  pool->status);
264 					break;
265 				}
266 				pool->latest_pause_time[level] =
267 					qdf_get_system_timestamp();
268 				soc->pause_cb(desc_pool_id,
269 					      act, WLAN_DATA_FLOW_CONTROL);
270 				qdf_spin_unlock_bh(&pool->flow_pool_lock);
271 			} else {
272 				qdf_spin_unlock_bh(&pool->flow_pool_lock);
273 			}
274 		} else {
275 			pool->pkt_drop_no_desc++;
276 			qdf_spin_unlock_bh(&pool->flow_pool_lock);
277 		}
278 	} else {
279 		soc->pool_stats.pkt_drop_no_pool++;
280 	}
281 
282 	return tx_desc;
283 }
284 
285 /**
286  * dp_tx_desc_free() - Fee a tx descriptor and attach it to free list
287  *
288  * @soc: Handle to DP SoC structure
289  * @tx_desc: the tx descriptor to be freed
290  * @desc_pool_id: ID of the flow control fool
291  *
292  * Return: None
293  */
294 static inline void
295 dp_tx_desc_free(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
296 		uint8_t desc_pool_id)
297 {
298 	struct dp_tx_desc_pool_s *pool = &soc->tx_desc[desc_pool_id];
299 	qdf_time_t unpause_time = qdf_get_system_timestamp(), pause_dur;
300 	enum netif_action_type act = WLAN_WAKE_ALL_NETIF_QUEUE;
301 
302 	qdf_spin_lock_bh(&pool->flow_pool_lock);
303 	tx_desc->flags = 0;
304 	dp_tx_put_desc_flow_pool(pool, tx_desc);
305 	switch (pool->status) {
306 	case FLOW_POOL_ACTIVE_PAUSED:
307 		if (pool->avail_desc > pool->start_th[DP_TH_HI]) {
308 			act = WLAN_NETIF_PRIORITY_QUEUE_ON;
309 			pool->status = FLOW_POOL_VO_PAUSED;
310 
311 			/* Update maxinum pause duration for HI queue */
312 			pause_dur = unpause_time -
313 					pool->latest_pause_time[DP_TH_HI];
314 			if (pool->max_pause_time[DP_TH_HI] < pause_dur)
315 				pool->max_pause_time[DP_TH_HI] = pause_dur;
316 		}
317 		break;
318 	case FLOW_POOL_VO_PAUSED:
319 		if (pool->avail_desc > pool->start_th[DP_TH_VO]) {
320 			act = WLAN_NETIF_VO_QUEUE_ON;
321 			pool->status = FLOW_POOL_VI_PAUSED;
322 
323 			/* Update maxinum pause duration for VO queue */
324 			pause_dur = unpause_time -
325 					pool->latest_pause_time[DP_TH_VO];
326 			if (pool->max_pause_time[DP_TH_VO] < pause_dur)
327 				pool->max_pause_time[DP_TH_VO] = pause_dur;
328 		}
329 		break;
330 	case FLOW_POOL_VI_PAUSED:
331 		if (pool->avail_desc > pool->start_th[DP_TH_VI]) {
332 			act = WLAN_NETIF_VI_QUEUE_ON;
333 			pool->status = FLOW_POOL_BE_BK_PAUSED;
334 
335 			/* Update maxinum pause duration for VI queue */
336 			pause_dur = unpause_time -
337 					pool->latest_pause_time[DP_TH_VI];
338 			if (pool->max_pause_time[DP_TH_VI] < pause_dur)
339 				pool->max_pause_time[DP_TH_VI] = pause_dur;
340 		}
341 		break;
342 	case FLOW_POOL_BE_BK_PAUSED:
343 		if (pool->avail_desc > pool->start_th[DP_TH_BE_BK]) {
344 			act = WLAN_WAKE_NON_PRIORITY_QUEUE;
345 			pool->status = FLOW_POOL_ACTIVE_UNPAUSED;
346 
347 			/* Update maxinum pause duration for BE_BK queue */
348 			pause_dur = unpause_time -
349 					pool->latest_pause_time[DP_TH_BE_BK];
350 			if (pool->max_pause_time[DP_TH_BE_BK] < pause_dur)
351 				pool->max_pause_time[DP_TH_BE_BK] = pause_dur;
352 		}
353 		break;
354 	case FLOW_POOL_INVALID:
355 		if (pool->avail_desc == pool->pool_size) {
356 			dp_tx_desc_pool_free(soc, desc_pool_id);
357 			qdf_spin_unlock_bh(&pool->flow_pool_lock);
358 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
359 				  "%s %d pool is freed!!",
360 				  __func__, __LINE__);
361 			return;
362 		}
363 		break;
364 
365 	case FLOW_POOL_ACTIVE_UNPAUSED:
366 		break;
367 	default:
368 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
369 			  "%s %d pool is INACTIVE State!!",
370 			  __func__, __LINE__);
371 		break;
372 	};
373 
374 	if (act != WLAN_WAKE_ALL_NETIF_QUEUE)
375 		soc->pause_cb(pool->flow_pool_id,
376 			      act, WLAN_DATA_FLOW_CONTROL);
377 	qdf_spin_unlock_bh(&pool->flow_pool_lock);
378 }
379 #else /* QCA_AC_BASED_FLOW_CONTROL */
380 /**
381  * dp_tx_desc_alloc() - Allocate a Software Tx Descriptor from given pool
382  *
383  * @soc Handle to DP SoC structure
384  * @pool_id
385  *
386  * Return:
387  */
388 static inline struct dp_tx_desc_s *
389 dp_tx_desc_alloc(struct dp_soc *soc, uint8_t desc_pool_id)
390 {
391 	struct dp_tx_desc_s *tx_desc = NULL;
392 	struct dp_tx_desc_pool_s *pool = &soc->tx_desc[desc_pool_id];
393 
394 	if (pool) {
395 		qdf_spin_lock_bh(&pool->flow_pool_lock);
396 		if (pool->status <= FLOW_POOL_ACTIVE_PAUSED &&
397 		    pool->avail_desc) {
398 			tx_desc = dp_tx_get_desc_flow_pool(pool);
399 			tx_desc->pool_id = desc_pool_id;
400 			tx_desc->flags = DP_TX_DESC_FLAG_ALLOCATED;
401 			if (qdf_unlikely(pool->avail_desc < pool->stop_th)) {
402 				pool->status = FLOW_POOL_ACTIVE_PAUSED;
403 				qdf_spin_unlock_bh(&pool->flow_pool_lock);
404 				/* pause network queues */
405 				soc->pause_cb(desc_pool_id,
406 					       WLAN_STOP_ALL_NETIF_QUEUE,
407 					       WLAN_DATA_FLOW_CONTROL);
408 			} else {
409 				qdf_spin_unlock_bh(&pool->flow_pool_lock);
410 			}
411 
412 			/*
413 			 * If one packet is going to be sent, PM usage count
414 			 * needs to be incremented by one to prevent future
415 			 * runtime suspend. This should be tied with the
416 			 * success of allocating one descriptor. It will be
417 			 * decremented after the packet has been sent.
418 			 */
419 			hif_pm_runtime_get_noresume(soc->hif_handle);
420 		} else {
421 			pool->pkt_drop_no_desc++;
422 			qdf_spin_unlock_bh(&pool->flow_pool_lock);
423 		}
424 	} else {
425 		soc->pool_stats.pkt_drop_no_pool++;
426 	}
427 
428 
429 	return tx_desc;
430 }
431 
432 /**
433  * dp_tx_desc_free() - Fee a tx descriptor and attach it to free list
434  *
435  * @soc Handle to DP SoC structure
436  * @pool_id
437  * @tx_desc
438  *
439  * Return: None
440  */
441 static inline void
442 dp_tx_desc_free(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
443 		uint8_t desc_pool_id)
444 {
445 	struct dp_tx_desc_pool_s *pool = &soc->tx_desc[desc_pool_id];
446 
447 	qdf_spin_lock_bh(&pool->flow_pool_lock);
448 	tx_desc->flags = 0;
449 	dp_tx_put_desc_flow_pool(pool, tx_desc);
450 	switch (pool->status) {
451 	case FLOW_POOL_ACTIVE_PAUSED:
452 		if (pool->avail_desc > pool->start_th) {
453 			soc->pause_cb(pool->flow_pool_id,
454 				       WLAN_WAKE_ALL_NETIF_QUEUE,
455 				       WLAN_DATA_FLOW_CONTROL);
456 			pool->status = FLOW_POOL_ACTIVE_UNPAUSED;
457 		}
458 		break;
459 	case FLOW_POOL_INVALID:
460 		if (pool->avail_desc == pool->pool_size) {
461 			dp_tx_desc_pool_free(soc, desc_pool_id);
462 			qdf_spin_unlock_bh(&pool->flow_pool_lock);
463 			qdf_print("%s %d pool is freed!!",
464 				  __func__, __LINE__);
465 			goto out;
466 		}
467 		break;
468 
469 	case FLOW_POOL_ACTIVE_UNPAUSED:
470 		break;
471 	default:
472 		qdf_print("%s %d pool is INACTIVE State!!",
473 			  __func__, __LINE__);
474 		break;
475 	};
476 
477 	qdf_spin_unlock_bh(&pool->flow_pool_lock);
478 
479 out:
480 	/**
481 	 * Decrement PM usage count if the packet has been sent. This
482 	 * should be tied with the success of freeing one descriptor.
483 	 */
484 	hif_pm_runtime_put(soc->hif_handle);
485 }
486 
487 #endif /* QCA_AC_BASED_FLOW_CONTROL */
488 #else /* QCA_LL_TX_FLOW_CONTROL_V2 */
489 
490 static inline void dp_tx_flow_control_init(struct dp_soc *handle)
491 {
492 }
493 
494 static inline void dp_tx_flow_control_deinit(struct dp_soc *handle)
495 {
496 }
497 
498 static inline QDF_STATUS dp_tx_flow_pool_map_handler(struct dp_pdev *pdev,
499 	uint8_t flow_id, uint8_t flow_type, uint8_t flow_pool_id,
500 	uint16_t flow_pool_size)
501 {
502 	return QDF_STATUS_SUCCESS;
503 }
504 
505 static inline void dp_tx_flow_pool_unmap_handler(struct dp_pdev *pdev,
506 	uint8_t flow_id, uint8_t flow_type, uint8_t flow_pool_id)
507 {
508 }
509 
510 /**
511  * dp_tx_desc_alloc() - Allocate a Software Tx Descriptor from given pool
512  *
513  * @param soc Handle to DP SoC structure
514  * @param pool_id
515  *
516  * Return:
517  */
518 static inline struct dp_tx_desc_s *dp_tx_desc_alloc(struct dp_soc *soc,
519 						uint8_t desc_pool_id)
520 {
521 	struct dp_tx_desc_s *tx_desc = NULL;
522 
523 	TX_DESC_LOCK_LOCK(&soc->tx_desc[desc_pool_id].lock);
524 
525 	tx_desc = soc->tx_desc[desc_pool_id].freelist;
526 
527 	/* Pool is exhausted */
528 	if (!tx_desc) {
529 		TX_DESC_LOCK_UNLOCK(&soc->tx_desc[desc_pool_id].lock);
530 		return NULL;
531 	}
532 
533 	soc->tx_desc[desc_pool_id].freelist =
534 		soc->tx_desc[desc_pool_id].freelist->next;
535 	soc->tx_desc[desc_pool_id].num_allocated++;
536 	soc->tx_desc[desc_pool_id].num_free--;
537 
538 	tx_desc->timestamp = qdf_ktime_to_ms(qdf_ktime_get());
539 
540 	tx_desc->flags = DP_TX_DESC_FLAG_ALLOCATED;
541 
542 	TX_DESC_LOCK_UNLOCK(&soc->tx_desc[desc_pool_id].lock);
543 
544 	return tx_desc;
545 }
546 
547 /**
548  * dp_tx_desc_alloc_multiple() - Allocate batch of software Tx Descriptors
549  *                            from given pool
550  * @soc: Handle to DP SoC structure
551  * @pool_id: pool id should pick up
552  * @num_requested: number of required descriptor
553  *
554  * allocate multiple tx descriptor and make a link
555  *
556  * Return: h_desc first descriptor pointer
557  */
558 static inline struct dp_tx_desc_s *dp_tx_desc_alloc_multiple(
559 		struct dp_soc *soc, uint8_t desc_pool_id, uint8_t num_requested)
560 {
561 	struct dp_tx_desc_s *c_desc = NULL, *h_desc = NULL;
562 	uint8_t count;
563 
564 	TX_DESC_LOCK_LOCK(&soc->tx_desc[desc_pool_id].lock);
565 
566 	if ((num_requested == 0) ||
567 			(soc->tx_desc[desc_pool_id].num_free < num_requested)) {
568 		TX_DESC_LOCK_UNLOCK(&soc->tx_desc[desc_pool_id].lock);
569 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
570 			"%s, No Free Desc: Available(%d) num_requested(%d)",
571 			__func__, soc->tx_desc[desc_pool_id].num_free,
572 			num_requested);
573 		return NULL;
574 	}
575 
576 	h_desc = soc->tx_desc[desc_pool_id].freelist;
577 
578 	/* h_desc should never be NULL since num_free > requested */
579 	qdf_assert_always(h_desc);
580 
581 	c_desc = h_desc;
582 	for (count = 0; count < (num_requested - 1); count++) {
583 		c_desc->flags = DP_TX_DESC_FLAG_ALLOCATED;
584 		c_desc = c_desc->next;
585 	}
586 	soc->tx_desc[desc_pool_id].num_free -= count;
587 	soc->tx_desc[desc_pool_id].num_allocated += count;
588 	soc->tx_desc[desc_pool_id].freelist = c_desc->next;
589 	c_desc->next = NULL;
590 
591 	TX_DESC_LOCK_UNLOCK(&soc->tx_desc[desc_pool_id].lock);
592 	return h_desc;
593 }
594 
595 /**
596  * dp_tx_desc_free() - Fee a tx descriptor and attach it to free list
597  *
598  * @soc Handle to DP SoC structure
599  * @pool_id
600  * @tx_desc
601  */
602 static inline void
603 dp_tx_desc_free(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
604 		uint8_t desc_pool_id)
605 {
606 	TX_DESC_LOCK_LOCK(&soc->tx_desc[desc_pool_id].lock);
607 
608 	tx_desc->flags = 0;
609 	tx_desc->next = soc->tx_desc[desc_pool_id].freelist;
610 	soc->tx_desc[desc_pool_id].freelist = tx_desc;
611 	soc->tx_desc[desc_pool_id].num_allocated--;
612 	soc->tx_desc[desc_pool_id].num_free++;
613 
614 
615 	TX_DESC_LOCK_UNLOCK(&soc->tx_desc[desc_pool_id].lock);
616 }
617 #endif /* QCA_LL_TX_FLOW_CONTROL_V2 */
618 
619 #ifdef QCA_DP_TX_DESC_ID_CHECK
620 /**
621  * dp_tx_is_desc_id_valid() - check is the tx desc id valid
622  *
623  * @soc Handle to DP SoC structure
624  * @tx_desc_id
625  *
626  * Return: true or false
627  */
628 static inline bool
629 dp_tx_is_desc_id_valid(struct dp_soc *soc, uint32_t tx_desc_id)
630 {
631 	uint8_t pool_id;
632 	uint16_t page_id, offset;
633 	struct dp_tx_desc_pool_s *pool;
634 
635 	pool_id = (tx_desc_id & DP_TX_DESC_ID_POOL_MASK) >>
636 			DP_TX_DESC_ID_POOL_OS;
637 	/* Pool ID is out of limit */
638 	if (pool_id > wlan_cfg_get_num_tx_desc_pool(
639 				soc->wlan_cfg_ctx)) {
640 		QDF_TRACE(QDF_MODULE_ID_DP,
641 			  QDF_TRACE_LEVEL_FATAL,
642 			  "%s:Tx Comp pool id %d not valid",
643 			  __func__,
644 			  pool_id);
645 		goto warn_exit;
646 	}
647 
648 	pool = &soc->tx_desc[pool_id];
649 	/* the pool is freed */
650 	if (IS_TX_DESC_POOL_STATUS_INACTIVE(pool)) {
651 		QDF_TRACE(QDF_MODULE_ID_DP,
652 			  QDF_TRACE_LEVEL_FATAL,
653 			  "%s:the pool %d has been freed",
654 			  __func__,
655 			  pool_id);
656 		goto warn_exit;
657 	}
658 
659 	page_id = (tx_desc_id & DP_TX_DESC_ID_PAGE_MASK) >>
660 				DP_TX_DESC_ID_PAGE_OS;
661 	/* the page id is out of limit */
662 	if (page_id >= pool->desc_pages.num_pages) {
663 		QDF_TRACE(QDF_MODULE_ID_DP,
664 			  QDF_TRACE_LEVEL_FATAL,
665 			  "%s:the page id %d invalid, pool id %d, num_page %d",
666 			  __func__,
667 			  page_id,
668 			  pool_id,
669 			  pool->desc_pages.num_pages);
670 		goto warn_exit;
671 	}
672 
673 	offset = (tx_desc_id & DP_TX_DESC_ID_OFFSET_MASK) >>
674 				DP_TX_DESC_ID_OFFSET_OS;
675 	/* the offset is out of limit */
676 	if (offset >= pool->desc_pages.num_element_per_page) {
677 		QDF_TRACE(QDF_MODULE_ID_DP,
678 			  QDF_TRACE_LEVEL_FATAL,
679 			  "%s:offset %d invalid, pool%d,num_elem_per_page %d",
680 			  __func__,
681 			  offset,
682 			  pool_id,
683 			  pool->desc_pages.num_element_per_page);
684 		goto warn_exit;
685 	}
686 
687 	return true;
688 
689 warn_exit:
690 	QDF_TRACE(QDF_MODULE_ID_DP,
691 		  QDF_TRACE_LEVEL_FATAL,
692 		  "%s:Tx desc id 0x%x not valid",
693 		  __func__,
694 		  tx_desc_id);
695 	qdf_assert_always(0);
696 	return false;
697 }
698 
699 #else
700 static inline bool
701 dp_tx_is_desc_id_valid(struct dp_soc *soc, uint32_t tx_desc_id)
702 {
703 	return true;
704 }
705 #endif /* QCA_DP_TX_DESC_ID_CHECK */
706 
707 /**
708  * dp_tx_desc_find() - find dp tx descriptor from cokie
709  * @soc - handle for the device sending the data
710  * @tx_desc_id - the ID of the descriptor in question
711  * @return the descriptor object that has the specified ID
712  *
713  *  Use a tx descriptor ID to find the corresponding descriptor object.
714  *
715  */
716 static inline struct dp_tx_desc_s *dp_tx_desc_find(struct dp_soc *soc,
717 		uint8_t pool_id, uint16_t page_id, uint16_t offset)
718 {
719 	struct dp_tx_desc_pool_s *tx_desc_pool = &((soc)->tx_desc[(pool_id)]);
720 
721 	return tx_desc_pool->desc_pages.cacheable_pages[page_id] +
722 		tx_desc_pool->elem_size * offset;
723 }
724 
725 /**
726  * dp_tx_ext_desc_alloc() - Get tx extension descriptor from pool
727  * @soc: handle for the device sending the data
728  * @pool_id: target pool id
729  *
730  * Return: None
731  */
732 static inline
733 struct dp_tx_ext_desc_elem_s *dp_tx_ext_desc_alloc(struct dp_soc *soc,
734 		uint8_t desc_pool_id)
735 {
736 	struct dp_tx_ext_desc_elem_s *c_elem;
737 
738 	qdf_spin_lock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
739 	if (soc->tx_ext_desc[desc_pool_id].num_free <= 0) {
740 		qdf_spin_unlock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
741 		return NULL;
742 	}
743 	c_elem = soc->tx_ext_desc[desc_pool_id].freelist;
744 	soc->tx_ext_desc[desc_pool_id].freelist =
745 		soc->tx_ext_desc[desc_pool_id].freelist->next;
746 	soc->tx_ext_desc[desc_pool_id].num_free--;
747 	qdf_spin_unlock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
748 	return c_elem;
749 }
750 
751 /**
752  * dp_tx_ext_desc_free() - Release tx extension descriptor to the pool
753  * @soc: handle for the device sending the data
754  * @pool_id: target pool id
755  * @elem: ext descriptor pointer should release
756  *
757  * Return: None
758  */
759 static inline void dp_tx_ext_desc_free(struct dp_soc *soc,
760 	struct dp_tx_ext_desc_elem_s *elem, uint8_t desc_pool_id)
761 {
762 	qdf_spin_lock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
763 	elem->next = soc->tx_ext_desc[desc_pool_id].freelist;
764 	soc->tx_ext_desc[desc_pool_id].freelist = elem;
765 	soc->tx_ext_desc[desc_pool_id].num_free++;
766 	qdf_spin_unlock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
767 	return;
768 }
769 
770 /**
771  * dp_tx_ext_desc_free_multiple() - Fee multiple tx extension descriptor and
772  *                           attach it to free list
773  * @soc: Handle to DP SoC structure
774  * @desc_pool_id: pool id should pick up
775  * @elem: tx descriptor should be freed
776  * @num_free: number of descriptors should be freed
777  *
778  * Return: none
779  */
780 static inline void dp_tx_ext_desc_free_multiple(struct dp_soc *soc,
781 		struct dp_tx_ext_desc_elem_s *elem, uint8_t desc_pool_id,
782 		uint8_t num_free)
783 {
784 	struct dp_tx_ext_desc_elem_s *head, *tail, *c_elem;
785 	uint8_t freed = num_free;
786 
787 	/* caller should always guarantee atleast list of num_free nodes */
788 	qdf_assert_always(head);
789 
790 	head = elem;
791 	c_elem = head;
792 	tail = head;
793 	while (c_elem && freed) {
794 		tail = c_elem;
795 		c_elem = c_elem->next;
796 		freed--;
797 	}
798 
799 	/* caller should always guarantee atleast list of num_free nodes */
800 	qdf_assert_always(tail);
801 
802 	qdf_spin_lock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
803 	tail->next = soc->tx_ext_desc[desc_pool_id].freelist;
804 	soc->tx_ext_desc[desc_pool_id].freelist = head;
805 	soc->tx_ext_desc[desc_pool_id].num_free += num_free;
806 	qdf_spin_unlock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
807 
808 	return;
809 }
810 
811 #if defined(FEATURE_TSO)
812 /**
813  * dp_tx_tso_desc_alloc() - function to allocate a TSO segment
814  * @soc: device soc instance
815  * @pool_id: pool id should pick up tso descriptor
816  *
817  * Allocates a TSO segment element from the free list held in
818  * the soc
819  *
820  * Return: tso_seg, tso segment memory pointer
821  */
822 static inline struct qdf_tso_seg_elem_t *dp_tx_tso_desc_alloc(
823 		struct dp_soc *soc, uint8_t pool_id)
824 {
825 	struct qdf_tso_seg_elem_t *tso_seg = NULL;
826 
827 	qdf_spin_lock_bh(&soc->tx_tso_desc[pool_id].lock);
828 	if (soc->tx_tso_desc[pool_id].freelist) {
829 		soc->tx_tso_desc[pool_id].num_free--;
830 		tso_seg = soc->tx_tso_desc[pool_id].freelist;
831 		soc->tx_tso_desc[pool_id].freelist =
832 			soc->tx_tso_desc[pool_id].freelist->next;
833 	}
834 	qdf_spin_unlock_bh(&soc->tx_tso_desc[pool_id].lock);
835 
836 	return tso_seg;
837 }
838 
839 /**
840  * dp_tx_tso_desc_free() - function to free a TSO segment
841  * @soc: device soc instance
842  * @pool_id: pool id should pick up tso descriptor
843  * @tso_seg: tso segment memory pointer
844  *
845  * Returns a TSO segment element to the free list held in the
846  * HTT pdev
847  *
848  * Return: none
849  */
850 static inline void dp_tx_tso_desc_free(struct dp_soc *soc,
851 		uint8_t pool_id, struct qdf_tso_seg_elem_t *tso_seg)
852 {
853 	qdf_spin_lock_bh(&soc->tx_tso_desc[pool_id].lock);
854 	tso_seg->next = soc->tx_tso_desc[pool_id].freelist;
855 	soc->tx_tso_desc[pool_id].freelist = tso_seg;
856 	soc->tx_tso_desc[pool_id].num_free++;
857 	qdf_spin_unlock_bh(&soc->tx_tso_desc[pool_id].lock);
858 }
859 
860 static inline
861 struct qdf_tso_num_seg_elem_t  *dp_tso_num_seg_alloc(struct dp_soc *soc,
862 		uint8_t pool_id)
863 {
864 	struct qdf_tso_num_seg_elem_t *tso_num_seg = NULL;
865 
866 	qdf_spin_lock_bh(&soc->tx_tso_num_seg[pool_id].lock);
867 	if (soc->tx_tso_num_seg[pool_id].freelist) {
868 		soc->tx_tso_num_seg[pool_id].num_free--;
869 		tso_num_seg = soc->tx_tso_num_seg[pool_id].freelist;
870 		soc->tx_tso_num_seg[pool_id].freelist =
871 			soc->tx_tso_num_seg[pool_id].freelist->next;
872 	}
873 	qdf_spin_unlock_bh(&soc->tx_tso_num_seg[pool_id].lock);
874 
875 	return tso_num_seg;
876 }
877 
878 static inline
879 void dp_tso_num_seg_free(struct dp_soc *soc,
880 		uint8_t pool_id, struct qdf_tso_num_seg_elem_t *tso_num_seg)
881 {
882 	qdf_spin_lock_bh(&soc->tx_tso_num_seg[pool_id].lock);
883 	tso_num_seg->next = soc->tx_tso_num_seg[pool_id].freelist;
884 	soc->tx_tso_num_seg[pool_id].freelist = tso_num_seg;
885 	soc->tx_tso_num_seg[pool_id].num_free++;
886 	qdf_spin_unlock_bh(&soc->tx_tso_num_seg[pool_id].lock);
887 }
888 #endif
889 
890 /*
891  * dp_tx_me_alloc_buf() Alloc descriptor from me pool
892  * @pdev DP_PDEV handle for datapath
893  *
894  * Return:dp_tx_me_buf_t(buf)
895  */
896 static inline struct dp_tx_me_buf_t*
897 dp_tx_me_alloc_buf(struct dp_pdev *pdev)
898 {
899 	struct dp_tx_me_buf_t *buf = NULL;
900 	qdf_spin_lock_bh(&pdev->tx_mutex);
901 	if (pdev->me_buf.freelist) {
902 		buf = pdev->me_buf.freelist;
903 		pdev->me_buf.freelist = pdev->me_buf.freelist->next;
904 		pdev->me_buf.buf_in_use++;
905 	} else {
906 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
907 				"Error allocating memory in pool");
908 		qdf_spin_unlock_bh(&pdev->tx_mutex);
909 		return NULL;
910 	}
911 	qdf_spin_unlock_bh(&pdev->tx_mutex);
912 	return buf;
913 }
914 
915 /*
916  * dp_tx_me_free_buf() - Free me descriptor and add it to pool
917  * @pdev: DP_PDEV handle for datapath
918  * @buf : Allocated ME BUF
919  *
920  * Return:void
921  */
922 static inline void
923 dp_tx_me_free_buf(struct dp_pdev *pdev, struct dp_tx_me_buf_t *buf)
924 {
925 	qdf_spin_lock_bh(&pdev->tx_mutex);
926 	buf->next = pdev->me_buf.freelist;
927 	pdev->me_buf.freelist = buf;
928 	pdev->me_buf.buf_in_use--;
929 	qdf_spin_unlock_bh(&pdev->tx_mutex);
930 }
931 #endif /* DP_TX_DESC_H */
932