xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_tx_desc.h (revision bea437e2293c3d4fb1b5704fcf633aedac996962)
1 /*
2  * Copyright (c) 2016-2019 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #ifndef DP_TX_DESC_H
20 #define DP_TX_DESC_H
21 
22 #include "dp_types.h"
23 #include "dp_tx.h"
24 #include "dp_internal.h"
25 
26 /**
27  * 21 bits cookie
28  * 2 bits pool id 0 ~ 3,
29  * 10 bits page id 0 ~ 1023
30  * 5 bits offset id 0 ~ 31 (Desc size = 128, Num descs per page = 4096/128 = 32)
31  */
32 /* ???Ring ID needed??? */
33 #define DP_TX_DESC_ID_POOL_MASK    0x018000
34 #define DP_TX_DESC_ID_POOL_OS      15
35 #define DP_TX_DESC_ID_PAGE_MASK    0x007FE0
36 #define DP_TX_DESC_ID_PAGE_OS      5
37 #define DP_TX_DESC_ID_OFFSET_MASK  0x00001F
38 #define DP_TX_DESC_ID_OFFSET_OS    0
39 
40 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
41 #define TX_DESC_LOCK_CREATE(lock)
42 #define TX_DESC_LOCK_DESTROY(lock)
43 #define TX_DESC_LOCK_LOCK(lock)
44 #define TX_DESC_LOCK_UNLOCK(lock)
45 #define IS_TX_DESC_POOL_STATUS_INACTIVE(pool) \
46 	((pool)->status == FLOW_POOL_INACTIVE)
47 #ifdef QCA_AC_BASED_FLOW_CONTROL
48 #define TX_DESC_POOL_MEMBER_CLEAN(_tx_desc_pool)       \
49 	dp_tx_flow_pool_member_clean(_tx_desc_pool)
50 
51 #else /* !QCA_AC_BASED_FLOW_CONTROL */
52 #define TX_DESC_POOL_MEMBER_CLEAN(_tx_desc_pool)       \
53 do {                                                   \
54 	(_tx_desc_pool)->elem_size = 0;                \
55 	(_tx_desc_pool)->freelist = NULL;              \
56 	(_tx_desc_pool)->pool_size = 0;                \
57 	(_tx_desc_pool)->avail_desc = 0;               \
58 	(_tx_desc_pool)->start_th = 0;                 \
59 	(_tx_desc_pool)->stop_th = 0;                  \
60 	(_tx_desc_pool)->status = FLOW_POOL_INACTIVE;  \
61 } while (0)
62 #endif /* QCA_AC_BASED_FLOW_CONTROL */
63 #else /* !QCA_LL_TX_FLOW_CONTROL_V2 */
64 #define TX_DESC_LOCK_CREATE(lock)  qdf_spinlock_create(lock)
65 #define TX_DESC_LOCK_DESTROY(lock) qdf_spinlock_destroy(lock)
66 #define TX_DESC_LOCK_LOCK(lock)    qdf_spin_lock_bh(lock)
67 #define TX_DESC_LOCK_UNLOCK(lock)  qdf_spin_unlock_bh(lock)
68 #define IS_TX_DESC_POOL_STATUS_INACTIVE(pool) (false)
69 #define TX_DESC_POOL_MEMBER_CLEAN(_tx_desc_pool)       \
70 do {                                                   \
71 	(_tx_desc_pool)->elem_size = 0;                \
72 	(_tx_desc_pool)->num_allocated = 0;            \
73 	(_tx_desc_pool)->freelist = NULL;              \
74 	(_tx_desc_pool)->elem_count = 0;               \
75 	(_tx_desc_pool)->num_free = 0;                 \
76 } while (0)
77 #endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */
78 #define MAX_POOL_BUFF_COUNT 10000
79 
80 QDF_STATUS dp_tx_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
81 		uint16_t num_elem);
82 QDF_STATUS dp_tx_desc_pool_free(struct dp_soc *soc, uint8_t pool_id);
83 QDF_STATUS dp_tx_ext_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
84 		uint16_t num_elem);
85 QDF_STATUS dp_tx_ext_desc_pool_free(struct dp_soc *soc, uint8_t pool_id);
86 QDF_STATUS dp_tx_tso_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
87 		uint16_t num_elem);
88 void dp_tx_tso_desc_pool_free(struct dp_soc *soc, uint8_t pool_id);
89 QDF_STATUS dp_tx_tso_num_seg_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
90 		uint16_t num_elem);
91 void dp_tx_tso_num_seg_pool_free(struct dp_soc *soc, uint8_t pool_id);
92 
93 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
94 void dp_tx_flow_control_init(struct dp_soc *);
95 void dp_tx_flow_control_deinit(struct dp_soc *);
96 
97 QDF_STATUS dp_txrx_register_pause_cb(struct cdp_soc_t *soc,
98 	tx_pause_callback pause_cb);
99 QDF_STATUS dp_tx_flow_pool_map(struct cdp_soc_t *soc, uint8_t pdev_id,
100 			       uint8_t vdev_id);
101 void dp_tx_flow_pool_unmap(struct cdp_soc_t *handle, uint8_t pdev_id,
102 			   uint8_t vdev_id);
103 void dp_tx_clear_flow_pool_stats(struct dp_soc *soc);
104 struct dp_tx_desc_pool_s *dp_tx_create_flow_pool(struct dp_soc *soc,
105 	uint8_t flow_pool_id, uint16_t flow_pool_size);
106 
107 QDF_STATUS dp_tx_flow_pool_map_handler(struct dp_pdev *pdev, uint8_t flow_id,
108 	uint8_t flow_type, uint8_t flow_pool_id, uint16_t flow_pool_size);
109 void dp_tx_flow_pool_unmap_handler(struct dp_pdev *pdev, uint8_t flow_id,
110 	uint8_t flow_type, uint8_t flow_pool_id);
111 
112 /**
113  * dp_tx_get_desc_flow_pool() - get descriptor from flow pool
114  * @pool: flow pool
115  *
116  * Caller needs to take lock and do sanity checks.
117  *
118  * Return: tx descriptor
119  */
120 static inline
121 struct dp_tx_desc_s *dp_tx_get_desc_flow_pool(struct dp_tx_desc_pool_s *pool)
122 {
123 	struct dp_tx_desc_s *tx_desc = pool->freelist;
124 
125 	pool->freelist = pool->freelist->next;
126 	pool->avail_desc--;
127 	return tx_desc;
128 }
129 
130 /**
131  * ol_tx_put_desc_flow_pool() - put descriptor to flow pool freelist
132  * @pool: flow pool
133  * @tx_desc: tx descriptor
134  *
135  * Caller needs to take lock and do sanity checks.
136  *
137  * Return: none
138  */
139 static inline
140 void dp_tx_put_desc_flow_pool(struct dp_tx_desc_pool_s *pool,
141 			struct dp_tx_desc_s *tx_desc)
142 {
143 	tx_desc->next = pool->freelist;
144 	pool->freelist = tx_desc;
145 	pool->avail_desc++;
146 }
147 
148 #ifdef QCA_AC_BASED_FLOW_CONTROL
149 
150 /**
151  * dp_tx_flow_pool_member_clean() - Clean the members of TX flow pool
152  *
153  * @pool: flow pool
154  *
155  * Return: None
156  */
157 static inline void
158 dp_tx_flow_pool_member_clean(struct dp_tx_desc_pool_s *pool)
159 {
160 	pool->elem_size = 0;
161 	pool->freelist = NULL;
162 	pool->pool_size = 0;
163 	pool->avail_desc = 0;
164 	qdf_mem_zero(pool->start_th, FL_TH_MAX);
165 	qdf_mem_zero(pool->stop_th, FL_TH_MAX);
166 	pool->status = FLOW_POOL_INACTIVE;
167 }
168 
169 /**
170  * dp_tx_is_threshold_reached() - Check if current avail desc meet threshold
171  *
172  * @pool: flow pool
173  * @avail_desc: available descriptor number
174  *
175  * Return: true if threshold is met, false if not
176  */
177 static inline bool
178 dp_tx_is_threshold_reached(struct dp_tx_desc_pool_s *pool, uint16_t avail_desc)
179 {
180 	if (qdf_unlikely(avail_desc == pool->stop_th[DP_TH_BE_BK]))
181 		return true;
182 	else if (qdf_unlikely(avail_desc == pool->stop_th[DP_TH_VI]))
183 		return true;
184 	else if (qdf_unlikely(avail_desc == pool->stop_th[DP_TH_VO]))
185 		return true;
186 	else if (qdf_unlikely(avail_desc == pool->stop_th[DP_TH_HI]))
187 		return true;
188 	else
189 		return false;
190 }
191 
192 /**
193  * dp_tx_desc_alloc() - Allocate a Software Tx descriptor from given pool
194  *
195  * @soc: Handle to DP SoC structure
196  * @desc_pool_id: ID of the flow control fool
197  *
198  * Return: TX descriptor allocated or NULL
199  */
200 static inline struct dp_tx_desc_s *
201 dp_tx_desc_alloc(struct dp_soc *soc, uint8_t desc_pool_id)
202 {
203 	struct dp_tx_desc_s *tx_desc = NULL;
204 	struct dp_tx_desc_pool_s *pool = &soc->tx_desc[desc_pool_id];
205 	bool is_pause = false;
206 	enum netif_action_type act = WLAN_NETIF_ACTION_TYPE_NONE;
207 	enum dp_fl_ctrl_threshold level = DP_TH_BE_BK;
208 
209 	if (qdf_likely(pool)) {
210 		qdf_spin_lock_bh(&pool->flow_pool_lock);
211 		if (qdf_likely(pool->avail_desc)) {
212 			tx_desc = dp_tx_get_desc_flow_pool(pool);
213 			tx_desc->pool_id = desc_pool_id;
214 			tx_desc->flags = DP_TX_DESC_FLAG_ALLOCATED;
215 			is_pause = dp_tx_is_threshold_reached(pool,
216 							      pool->avail_desc);
217 
218 			if (qdf_unlikely(is_pause)) {
219 				switch (pool->status) {
220 				case FLOW_POOL_ACTIVE_UNPAUSED:
221 					/* pause network BE\BK queue */
222 					act = WLAN_NETIF_BE_BK_QUEUE_OFF;
223 					level = DP_TH_BE_BK;
224 					pool->status = FLOW_POOL_BE_BK_PAUSED;
225 					break;
226 				case FLOW_POOL_BE_BK_PAUSED:
227 					/* pause network VI queue */
228 					act = WLAN_NETIF_VI_QUEUE_OFF;
229 					level = DP_TH_VI;
230 					pool->status = FLOW_POOL_VI_PAUSED;
231 					break;
232 				case FLOW_POOL_VI_PAUSED:
233 					/* pause network VO queue */
234 					act = WLAN_NETIF_VO_QUEUE_OFF;
235 					level = DP_TH_VO;
236 					pool->status = FLOW_POOL_VO_PAUSED;
237 					break;
238 				case FLOW_POOL_VO_PAUSED:
239 					/* pause network HI PRI queue */
240 					act = WLAN_NETIF_PRIORITY_QUEUE_OFF;
241 					level = DP_TH_HI;
242 					pool->status = FLOW_POOL_ACTIVE_PAUSED;
243 					break;
244 				case FLOW_POOL_ACTIVE_PAUSED:
245 					act = WLAN_NETIF_ACTION_TYPE_NONE;
246 					break;
247 				default:
248 					dp_err_rl("pool status is %d!",
249 						  pool->status);
250 					break;
251 				}
252 
253 				if (act != WLAN_NETIF_ACTION_TYPE_NONE) {
254 					pool->latest_pause_time[level] =
255 						qdf_get_system_timestamp();
256 					soc->pause_cb(desc_pool_id,
257 						      act,
258 						      WLAN_DATA_FLOW_CONTROL);
259 				}
260 			}
261 		} else {
262 			pool->pkt_drop_no_desc++;
263 		}
264 		qdf_spin_unlock_bh(&pool->flow_pool_lock);
265 	} else {
266 		soc->pool_stats.pkt_drop_no_pool++;
267 	}
268 
269 	return tx_desc;
270 }
271 
272 /**
273  * dp_tx_desc_free() - Fee a tx descriptor and attach it to free list
274  *
275  * @soc: Handle to DP SoC structure
276  * @tx_desc: the tx descriptor to be freed
277  * @desc_pool_id: ID of the flow control fool
278  *
279  * Return: None
280  */
281 static inline void
282 dp_tx_desc_free(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
283 		uint8_t desc_pool_id)
284 {
285 	struct dp_tx_desc_pool_s *pool = &soc->tx_desc[desc_pool_id];
286 	qdf_time_t unpause_time = qdf_get_system_timestamp(), pause_dur;
287 	enum netif_action_type act = WLAN_WAKE_ALL_NETIF_QUEUE;
288 
289 	qdf_spin_lock_bh(&pool->flow_pool_lock);
290 	tx_desc->vdev = NULL;
291 	tx_desc->nbuf = NULL;
292 	tx_desc->flags = 0;
293 	dp_tx_put_desc_flow_pool(pool, tx_desc);
294 	switch (pool->status) {
295 	case FLOW_POOL_ACTIVE_PAUSED:
296 		if (pool->avail_desc > pool->start_th[DP_TH_HI]) {
297 			act = WLAN_NETIF_PRIORITY_QUEUE_ON;
298 			pool->status = FLOW_POOL_VO_PAUSED;
299 
300 			/* Update maxinum pause duration for HI queue */
301 			pause_dur = unpause_time -
302 					pool->latest_pause_time[DP_TH_HI];
303 			if (pool->max_pause_time[DP_TH_HI] < pause_dur)
304 				pool->max_pause_time[DP_TH_HI] = pause_dur;
305 		}
306 		break;
307 	case FLOW_POOL_VO_PAUSED:
308 		if (pool->avail_desc > pool->start_th[DP_TH_VO]) {
309 			act = WLAN_NETIF_VO_QUEUE_ON;
310 			pool->status = FLOW_POOL_VI_PAUSED;
311 
312 			/* Update maxinum pause duration for VO queue */
313 			pause_dur = unpause_time -
314 					pool->latest_pause_time[DP_TH_VO];
315 			if (pool->max_pause_time[DP_TH_VO] < pause_dur)
316 				pool->max_pause_time[DP_TH_VO] = pause_dur;
317 		}
318 		break;
319 	case FLOW_POOL_VI_PAUSED:
320 		if (pool->avail_desc > pool->start_th[DP_TH_VI]) {
321 			act = WLAN_NETIF_VI_QUEUE_ON;
322 			pool->status = FLOW_POOL_BE_BK_PAUSED;
323 
324 			/* Update maxinum pause duration for VI queue */
325 			pause_dur = unpause_time -
326 					pool->latest_pause_time[DP_TH_VI];
327 			if (pool->max_pause_time[DP_TH_VI] < pause_dur)
328 				pool->max_pause_time[DP_TH_VI] = pause_dur;
329 		}
330 		break;
331 	case FLOW_POOL_BE_BK_PAUSED:
332 		if (pool->avail_desc > pool->start_th[DP_TH_BE_BK]) {
333 			act = WLAN_WAKE_NON_PRIORITY_QUEUE;
334 			pool->status = FLOW_POOL_ACTIVE_UNPAUSED;
335 
336 			/* Update maxinum pause duration for BE_BK queue */
337 			pause_dur = unpause_time -
338 					pool->latest_pause_time[DP_TH_BE_BK];
339 			if (pool->max_pause_time[DP_TH_BE_BK] < pause_dur)
340 				pool->max_pause_time[DP_TH_BE_BK] = pause_dur;
341 		}
342 		break;
343 	case FLOW_POOL_INVALID:
344 		if (pool->avail_desc == pool->pool_size) {
345 			dp_tx_desc_pool_free(soc, desc_pool_id);
346 			qdf_spin_unlock_bh(&pool->flow_pool_lock);
347 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
348 				  "%s %d pool is freed!!",
349 				  __func__, __LINE__);
350 			return;
351 		}
352 		break;
353 
354 	case FLOW_POOL_ACTIVE_UNPAUSED:
355 		break;
356 	default:
357 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
358 			  "%s %d pool is INACTIVE State!!",
359 			  __func__, __LINE__);
360 		break;
361 	};
362 
363 	if (act != WLAN_WAKE_ALL_NETIF_QUEUE)
364 		soc->pause_cb(pool->flow_pool_id,
365 			      act, WLAN_DATA_FLOW_CONTROL);
366 	qdf_spin_unlock_bh(&pool->flow_pool_lock);
367 }
368 #else /* QCA_AC_BASED_FLOW_CONTROL */
369 
370 static inline bool
371 dp_tx_is_threshold_reached(struct dp_tx_desc_pool_s *pool, uint16_t avail_desc)
372 {
373 	if (qdf_unlikely(avail_desc < pool->stop_th))
374 		return true;
375 	else
376 		return false;
377 }
378 
379 /**
380  * dp_tx_desc_alloc() - Allocate a Software Tx Descriptor from given pool
381  *
382  * @soc Handle to DP SoC structure
383  * @pool_id
384  *
385  * Return:
386  */
387 static inline struct dp_tx_desc_s *
388 dp_tx_desc_alloc(struct dp_soc *soc, uint8_t desc_pool_id)
389 {
390 	struct dp_tx_desc_s *tx_desc = NULL;
391 	struct dp_tx_desc_pool_s *pool = &soc->tx_desc[desc_pool_id];
392 
393 	if (pool) {
394 		qdf_spin_lock_bh(&pool->flow_pool_lock);
395 		if (pool->status <= FLOW_POOL_ACTIVE_PAUSED &&
396 		    pool->avail_desc) {
397 			tx_desc = dp_tx_get_desc_flow_pool(pool);
398 			tx_desc->pool_id = desc_pool_id;
399 			tx_desc->flags = DP_TX_DESC_FLAG_ALLOCATED;
400 			if (qdf_unlikely(pool->avail_desc < pool->stop_th)) {
401 				pool->status = FLOW_POOL_ACTIVE_PAUSED;
402 				qdf_spin_unlock_bh(&pool->flow_pool_lock);
403 				/* pause network queues */
404 				soc->pause_cb(desc_pool_id,
405 					       WLAN_STOP_ALL_NETIF_QUEUE,
406 					       WLAN_DATA_FLOW_CONTROL);
407 			} else {
408 				qdf_spin_unlock_bh(&pool->flow_pool_lock);
409 			}
410 
411 			/*
412 			 * If one packet is going to be sent, PM usage count
413 			 * needs to be incremented by one to prevent future
414 			 * runtime suspend. This should be tied with the
415 			 * success of allocating one descriptor. It will be
416 			 * decremented after the packet has been sent.
417 			 */
418 			hif_pm_runtime_get_noresume(soc->hif_handle);
419 		} else {
420 			pool->pkt_drop_no_desc++;
421 			qdf_spin_unlock_bh(&pool->flow_pool_lock);
422 		}
423 	} else {
424 		soc->pool_stats.pkt_drop_no_pool++;
425 	}
426 
427 
428 	return tx_desc;
429 }
430 
431 /**
432  * dp_tx_desc_free() - Fee a tx descriptor and attach it to free list
433  *
434  * @soc Handle to DP SoC structure
435  * @pool_id
436  * @tx_desc
437  *
438  * Return: None
439  */
440 static inline void
441 dp_tx_desc_free(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
442 		uint8_t desc_pool_id)
443 {
444 	struct dp_tx_desc_pool_s *pool = &soc->tx_desc[desc_pool_id];
445 
446 	qdf_spin_lock_bh(&pool->flow_pool_lock);
447 	tx_desc->vdev = NULL;
448 	tx_desc->nbuf = NULL;
449 	tx_desc->flags = 0;
450 	dp_tx_put_desc_flow_pool(pool, tx_desc);
451 	switch (pool->status) {
452 	case FLOW_POOL_ACTIVE_PAUSED:
453 		if (pool->avail_desc > pool->start_th) {
454 			soc->pause_cb(pool->flow_pool_id,
455 				       WLAN_WAKE_ALL_NETIF_QUEUE,
456 				       WLAN_DATA_FLOW_CONTROL);
457 			pool->status = FLOW_POOL_ACTIVE_UNPAUSED;
458 		}
459 		break;
460 	case FLOW_POOL_INVALID:
461 		if (pool->avail_desc == pool->pool_size) {
462 			dp_tx_desc_pool_free(soc, desc_pool_id);
463 			qdf_spin_unlock_bh(&pool->flow_pool_lock);
464 			qdf_print("%s %d pool is freed!!",
465 				  __func__, __LINE__);
466 			goto out;
467 		}
468 		break;
469 
470 	case FLOW_POOL_ACTIVE_UNPAUSED:
471 		break;
472 	default:
473 		qdf_print("%s %d pool is INACTIVE State!!",
474 			  __func__, __LINE__);
475 		break;
476 	};
477 
478 	qdf_spin_unlock_bh(&pool->flow_pool_lock);
479 
480 out:
481 	/**
482 	 * Decrement PM usage count if the packet has been sent. This
483 	 * should be tied with the success of freeing one descriptor.
484 	 */
485 	hif_pm_runtime_put(soc->hif_handle);
486 }
487 
488 #endif /* QCA_AC_BASED_FLOW_CONTROL */
489 
490 static inline bool
491 dp_tx_desc_thresh_reached(struct cdp_soc_t *soc_hdl, uint8_t vdev_id)
492 {
493 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
494 	struct dp_vdev *vdev = dp_get_vdev_from_soc_vdev_id_wifi3(soc,
495 								  vdev_id);
496 	struct dp_tx_desc_pool_s *pool;
497 
498 	if (!vdev)
499 		return false;
500 
501 	pool = vdev->pool;
502 
503 	return  dp_tx_is_threshold_reached(pool, pool->avail_desc);
504 }
505 #else /* QCA_LL_TX_FLOW_CONTROL_V2 */
506 
507 static inline void dp_tx_flow_control_init(struct dp_soc *handle)
508 {
509 }
510 
511 static inline void dp_tx_flow_control_deinit(struct dp_soc *handle)
512 {
513 }
514 
515 static inline QDF_STATUS dp_tx_flow_pool_map_handler(struct dp_pdev *pdev,
516 	uint8_t flow_id, uint8_t flow_type, uint8_t flow_pool_id,
517 	uint16_t flow_pool_size)
518 {
519 	return QDF_STATUS_SUCCESS;
520 }
521 
522 static inline void dp_tx_flow_pool_unmap_handler(struct dp_pdev *pdev,
523 	uint8_t flow_id, uint8_t flow_type, uint8_t flow_pool_id)
524 {
525 }
526 
527 /**
528  * dp_tx_desc_alloc() - Allocate a Software Tx Descriptor from given pool
529  *
530  * @param soc Handle to DP SoC structure
531  * @param pool_id
532  *
533  * Return:
534  */
535 static inline struct dp_tx_desc_s *dp_tx_desc_alloc(struct dp_soc *soc,
536 						uint8_t desc_pool_id)
537 {
538 	struct dp_tx_desc_s *tx_desc = NULL;
539 
540 	TX_DESC_LOCK_LOCK(&soc->tx_desc[desc_pool_id].lock);
541 
542 	tx_desc = soc->tx_desc[desc_pool_id].freelist;
543 
544 	/* Pool is exhausted */
545 	if (!tx_desc) {
546 		TX_DESC_LOCK_UNLOCK(&soc->tx_desc[desc_pool_id].lock);
547 		return NULL;
548 	}
549 
550 	soc->tx_desc[desc_pool_id].freelist =
551 		soc->tx_desc[desc_pool_id].freelist->next;
552 	soc->tx_desc[desc_pool_id].num_allocated++;
553 	soc->tx_desc[desc_pool_id].num_free--;
554 
555 	tx_desc->flags = DP_TX_DESC_FLAG_ALLOCATED;
556 
557 	TX_DESC_LOCK_UNLOCK(&soc->tx_desc[desc_pool_id].lock);
558 
559 	return tx_desc;
560 }
561 
562 /**
563  * dp_tx_desc_alloc_multiple() - Allocate batch of software Tx Descriptors
564  *                            from given pool
565  * @soc: Handle to DP SoC structure
566  * @pool_id: pool id should pick up
567  * @num_requested: number of required descriptor
568  *
569  * allocate multiple tx descriptor and make a link
570  *
571  * Return: h_desc first descriptor pointer
572  */
573 static inline struct dp_tx_desc_s *dp_tx_desc_alloc_multiple(
574 		struct dp_soc *soc, uint8_t desc_pool_id, uint8_t num_requested)
575 {
576 	struct dp_tx_desc_s *c_desc = NULL, *h_desc = NULL;
577 	uint8_t count;
578 
579 	TX_DESC_LOCK_LOCK(&soc->tx_desc[desc_pool_id].lock);
580 
581 	if ((num_requested == 0) ||
582 			(soc->tx_desc[desc_pool_id].num_free < num_requested)) {
583 		TX_DESC_LOCK_UNLOCK(&soc->tx_desc[desc_pool_id].lock);
584 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
585 			"%s, No Free Desc: Available(%d) num_requested(%d)",
586 			__func__, soc->tx_desc[desc_pool_id].num_free,
587 			num_requested);
588 		return NULL;
589 	}
590 
591 	h_desc = soc->tx_desc[desc_pool_id].freelist;
592 
593 	/* h_desc should never be NULL since num_free > requested */
594 	qdf_assert_always(h_desc);
595 
596 	c_desc = h_desc;
597 	for (count = 0; count < (num_requested - 1); count++) {
598 		c_desc->flags = DP_TX_DESC_FLAG_ALLOCATED;
599 		c_desc = c_desc->next;
600 	}
601 	soc->tx_desc[desc_pool_id].num_free -= count;
602 	soc->tx_desc[desc_pool_id].num_allocated += count;
603 	soc->tx_desc[desc_pool_id].freelist = c_desc->next;
604 	c_desc->next = NULL;
605 
606 	TX_DESC_LOCK_UNLOCK(&soc->tx_desc[desc_pool_id].lock);
607 	return h_desc;
608 }
609 
610 /**
611  * dp_tx_desc_free() - Fee a tx descriptor and attach it to free list
612  *
613  * @soc Handle to DP SoC structure
614  * @pool_id
615  * @tx_desc
616  */
617 static inline void
618 dp_tx_desc_free(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
619 		uint8_t desc_pool_id)
620 {
621 	TX_DESC_LOCK_LOCK(&soc->tx_desc[desc_pool_id].lock);
622 
623 	tx_desc->vdev = NULL;
624 	tx_desc->nbuf = NULL;
625 	tx_desc->flags = 0;
626 	tx_desc->next = soc->tx_desc[desc_pool_id].freelist;
627 	soc->tx_desc[desc_pool_id].freelist = tx_desc;
628 	soc->tx_desc[desc_pool_id].num_allocated--;
629 	soc->tx_desc[desc_pool_id].num_free++;
630 
631 
632 	TX_DESC_LOCK_UNLOCK(&soc->tx_desc[desc_pool_id].lock);
633 }
634 #endif /* QCA_LL_TX_FLOW_CONTROL_V2 */
635 
636 #ifdef QCA_DP_TX_DESC_ID_CHECK
637 /**
638  * dp_tx_is_desc_id_valid() - check is the tx desc id valid
639  *
640  * @soc Handle to DP SoC structure
641  * @tx_desc_id
642  *
643  * Return: true or false
644  */
645 static inline bool
646 dp_tx_is_desc_id_valid(struct dp_soc *soc, uint32_t tx_desc_id)
647 {
648 	uint8_t pool_id;
649 	uint16_t page_id, offset;
650 	struct dp_tx_desc_pool_s *pool;
651 
652 	pool_id = (tx_desc_id & DP_TX_DESC_ID_POOL_MASK) >>
653 			DP_TX_DESC_ID_POOL_OS;
654 	/* Pool ID is out of limit */
655 	if (pool_id > wlan_cfg_get_num_tx_desc_pool(
656 				soc->wlan_cfg_ctx)) {
657 		QDF_TRACE(QDF_MODULE_ID_DP,
658 			  QDF_TRACE_LEVEL_FATAL,
659 			  "%s:Tx Comp pool id %d not valid",
660 			  __func__,
661 			  pool_id);
662 		goto warn_exit;
663 	}
664 
665 	pool = &soc->tx_desc[pool_id];
666 	/* the pool is freed */
667 	if (IS_TX_DESC_POOL_STATUS_INACTIVE(pool)) {
668 		QDF_TRACE(QDF_MODULE_ID_DP,
669 			  QDF_TRACE_LEVEL_FATAL,
670 			  "%s:the pool %d has been freed",
671 			  __func__,
672 			  pool_id);
673 		goto warn_exit;
674 	}
675 
676 	page_id = (tx_desc_id & DP_TX_DESC_ID_PAGE_MASK) >>
677 				DP_TX_DESC_ID_PAGE_OS;
678 	/* the page id is out of limit */
679 	if (page_id >= pool->desc_pages.num_pages) {
680 		QDF_TRACE(QDF_MODULE_ID_DP,
681 			  QDF_TRACE_LEVEL_FATAL,
682 			  "%s:the page id %d invalid, pool id %d, num_page %d",
683 			  __func__,
684 			  page_id,
685 			  pool_id,
686 			  pool->desc_pages.num_pages);
687 		goto warn_exit;
688 	}
689 
690 	offset = (tx_desc_id & DP_TX_DESC_ID_OFFSET_MASK) >>
691 				DP_TX_DESC_ID_OFFSET_OS;
692 	/* the offset is out of limit */
693 	if (offset >= pool->desc_pages.num_element_per_page) {
694 		QDF_TRACE(QDF_MODULE_ID_DP,
695 			  QDF_TRACE_LEVEL_FATAL,
696 			  "%s:offset %d invalid, pool%d,num_elem_per_page %d",
697 			  __func__,
698 			  offset,
699 			  pool_id,
700 			  pool->desc_pages.num_element_per_page);
701 		goto warn_exit;
702 	}
703 
704 	return true;
705 
706 warn_exit:
707 	QDF_TRACE(QDF_MODULE_ID_DP,
708 		  QDF_TRACE_LEVEL_FATAL,
709 		  "%s:Tx desc id 0x%x not valid",
710 		  __func__,
711 		  tx_desc_id);
712 	qdf_assert_always(0);
713 	return false;
714 }
715 
716 #else
717 static inline bool
718 dp_tx_is_desc_id_valid(struct dp_soc *soc, uint32_t tx_desc_id)
719 {
720 	return true;
721 }
722 #endif /* QCA_DP_TX_DESC_ID_CHECK */
723 
724 /**
725  * dp_tx_desc_find() - find dp tx descriptor from cokie
726  * @soc - handle for the device sending the data
727  * @tx_desc_id - the ID of the descriptor in question
728  * @return the descriptor object that has the specified ID
729  *
730  *  Use a tx descriptor ID to find the corresponding descriptor object.
731  *
732  */
733 static inline struct dp_tx_desc_s *dp_tx_desc_find(struct dp_soc *soc,
734 		uint8_t pool_id, uint16_t page_id, uint16_t offset)
735 {
736 	struct dp_tx_desc_pool_s *tx_desc_pool = &((soc)->tx_desc[(pool_id)]);
737 
738 	return tx_desc_pool->desc_pages.cacheable_pages[page_id] +
739 		tx_desc_pool->elem_size * offset;
740 }
741 
742 /**
743  * dp_tx_ext_desc_alloc() - Get tx extension descriptor from pool
744  * @soc: handle for the device sending the data
745  * @pool_id: target pool id
746  *
747  * Return: None
748  */
749 static inline
750 struct dp_tx_ext_desc_elem_s *dp_tx_ext_desc_alloc(struct dp_soc *soc,
751 		uint8_t desc_pool_id)
752 {
753 	struct dp_tx_ext_desc_elem_s *c_elem;
754 
755 	qdf_spin_lock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
756 	if (soc->tx_ext_desc[desc_pool_id].num_free <= 0) {
757 		qdf_spin_unlock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
758 		return NULL;
759 	}
760 	c_elem = soc->tx_ext_desc[desc_pool_id].freelist;
761 	soc->tx_ext_desc[desc_pool_id].freelist =
762 		soc->tx_ext_desc[desc_pool_id].freelist->next;
763 	soc->tx_ext_desc[desc_pool_id].num_free--;
764 	qdf_spin_unlock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
765 	return c_elem;
766 }
767 
768 /**
769  * dp_tx_ext_desc_free() - Release tx extension descriptor to the pool
770  * @soc: handle for the device sending the data
771  * @pool_id: target pool id
772  * @elem: ext descriptor pointer should release
773  *
774  * Return: None
775  */
776 static inline void dp_tx_ext_desc_free(struct dp_soc *soc,
777 	struct dp_tx_ext_desc_elem_s *elem, uint8_t desc_pool_id)
778 {
779 	qdf_spin_lock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
780 	elem->next = soc->tx_ext_desc[desc_pool_id].freelist;
781 	soc->tx_ext_desc[desc_pool_id].freelist = elem;
782 	soc->tx_ext_desc[desc_pool_id].num_free++;
783 	qdf_spin_unlock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
784 	return;
785 }
786 
787 /**
788  * dp_tx_ext_desc_free_multiple() - Fee multiple tx extension descriptor and
789  *                           attach it to free list
790  * @soc: Handle to DP SoC structure
791  * @desc_pool_id: pool id should pick up
792  * @elem: tx descriptor should be freed
793  * @num_free: number of descriptors should be freed
794  *
795  * Return: none
796  */
797 static inline void dp_tx_ext_desc_free_multiple(struct dp_soc *soc,
798 		struct dp_tx_ext_desc_elem_s *elem, uint8_t desc_pool_id,
799 		uint8_t num_free)
800 {
801 	struct dp_tx_ext_desc_elem_s *head, *tail, *c_elem;
802 	uint8_t freed = num_free;
803 
804 	/* caller should always guarantee atleast list of num_free nodes */
805 	qdf_assert_always(elem);
806 
807 	head = elem;
808 	c_elem = head;
809 	tail = head;
810 	while (c_elem && freed) {
811 		tail = c_elem;
812 		c_elem = c_elem->next;
813 		freed--;
814 	}
815 
816 	/* caller should always guarantee atleast list of num_free nodes */
817 	qdf_assert_always(tail);
818 
819 	qdf_spin_lock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
820 	tail->next = soc->tx_ext_desc[desc_pool_id].freelist;
821 	soc->tx_ext_desc[desc_pool_id].freelist = head;
822 	soc->tx_ext_desc[desc_pool_id].num_free += num_free;
823 	qdf_spin_unlock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
824 
825 	return;
826 }
827 
828 #if defined(FEATURE_TSO)
829 /**
830  * dp_tx_tso_desc_alloc() - function to allocate a TSO segment
831  * @soc: device soc instance
832  * @pool_id: pool id should pick up tso descriptor
833  *
834  * Allocates a TSO segment element from the free list held in
835  * the soc
836  *
837  * Return: tso_seg, tso segment memory pointer
838  */
839 static inline struct qdf_tso_seg_elem_t *dp_tx_tso_desc_alloc(
840 		struct dp_soc *soc, uint8_t pool_id)
841 {
842 	struct qdf_tso_seg_elem_t *tso_seg = NULL;
843 
844 	qdf_spin_lock_bh(&soc->tx_tso_desc[pool_id].lock);
845 	if (soc->tx_tso_desc[pool_id].freelist) {
846 		soc->tx_tso_desc[pool_id].num_free--;
847 		tso_seg = soc->tx_tso_desc[pool_id].freelist;
848 		soc->tx_tso_desc[pool_id].freelist =
849 			soc->tx_tso_desc[pool_id].freelist->next;
850 	}
851 	qdf_spin_unlock_bh(&soc->tx_tso_desc[pool_id].lock);
852 
853 	return tso_seg;
854 }
855 
856 /**
857  * dp_tx_tso_desc_free() - function to free a TSO segment
858  * @soc: device soc instance
859  * @pool_id: pool id should pick up tso descriptor
860  * @tso_seg: tso segment memory pointer
861  *
862  * Returns a TSO segment element to the free list held in the
863  * HTT pdev
864  *
865  * Return: none
866  */
867 static inline void dp_tx_tso_desc_free(struct dp_soc *soc,
868 		uint8_t pool_id, struct qdf_tso_seg_elem_t *tso_seg)
869 {
870 	qdf_spin_lock_bh(&soc->tx_tso_desc[pool_id].lock);
871 	tso_seg->next = soc->tx_tso_desc[pool_id].freelist;
872 	soc->tx_tso_desc[pool_id].freelist = tso_seg;
873 	soc->tx_tso_desc[pool_id].num_free++;
874 	qdf_spin_unlock_bh(&soc->tx_tso_desc[pool_id].lock);
875 }
876 
877 static inline
878 struct qdf_tso_num_seg_elem_t  *dp_tso_num_seg_alloc(struct dp_soc *soc,
879 		uint8_t pool_id)
880 {
881 	struct qdf_tso_num_seg_elem_t *tso_num_seg = NULL;
882 
883 	qdf_spin_lock_bh(&soc->tx_tso_num_seg[pool_id].lock);
884 	if (soc->tx_tso_num_seg[pool_id].freelist) {
885 		soc->tx_tso_num_seg[pool_id].num_free--;
886 		tso_num_seg = soc->tx_tso_num_seg[pool_id].freelist;
887 		soc->tx_tso_num_seg[pool_id].freelist =
888 			soc->tx_tso_num_seg[pool_id].freelist->next;
889 	}
890 	qdf_spin_unlock_bh(&soc->tx_tso_num_seg[pool_id].lock);
891 
892 	return tso_num_seg;
893 }
894 
895 static inline
896 void dp_tso_num_seg_free(struct dp_soc *soc,
897 		uint8_t pool_id, struct qdf_tso_num_seg_elem_t *tso_num_seg)
898 {
899 	qdf_spin_lock_bh(&soc->tx_tso_num_seg[pool_id].lock);
900 	tso_num_seg->next = soc->tx_tso_num_seg[pool_id].freelist;
901 	soc->tx_tso_num_seg[pool_id].freelist = tso_num_seg;
902 	soc->tx_tso_num_seg[pool_id].num_free++;
903 	qdf_spin_unlock_bh(&soc->tx_tso_num_seg[pool_id].lock);
904 }
905 #endif
906 
907 /*
908  * dp_tx_me_alloc_buf() Alloc descriptor from me pool
909  * @pdev DP_PDEV handle for datapath
910  *
911  * Return:dp_tx_me_buf_t(buf)
912  */
913 static inline struct dp_tx_me_buf_t*
914 dp_tx_me_alloc_buf(struct dp_pdev *pdev)
915 {
916 	struct dp_tx_me_buf_t *buf = NULL;
917 	qdf_spin_lock_bh(&pdev->tx_mutex);
918 	if (pdev->me_buf.freelist) {
919 		buf = pdev->me_buf.freelist;
920 		pdev->me_buf.freelist = pdev->me_buf.freelist->next;
921 		pdev->me_buf.buf_in_use++;
922 	} else {
923 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
924 				"Error allocating memory in pool");
925 		qdf_spin_unlock_bh(&pdev->tx_mutex);
926 		return NULL;
927 	}
928 	qdf_spin_unlock_bh(&pdev->tx_mutex);
929 	return buf;
930 }
931 
932 /*
933  * dp_tx_me_free_buf() - Free me descriptor and add it to pool
934  * @pdev: DP_PDEV handle for datapath
935  * @buf : Allocated ME BUF
936  *
937  * Return:void
938  */
939 static inline void
940 dp_tx_me_free_buf(struct dp_pdev *pdev, struct dp_tx_me_buf_t *buf)
941 {
942 	qdf_spin_lock_bh(&pdev->tx_mutex);
943 	buf->next = pdev->me_buf.freelist;
944 	pdev->me_buf.freelist = buf;
945 	pdev->me_buf.buf_in_use--;
946 	qdf_spin_unlock_bh(&pdev->tx_mutex);
947 }
948 #endif /* DP_TX_DESC_H */
949