xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_tx_desc.h (revision 1f55ed1a9f5050d8da228aa8dd3fff7c0242aa71)
1 /*
2  * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #ifndef DP_TX_DESC_H
20 #define DP_TX_DESC_H
21 
22 #include "dp_types.h"
23 #include "dp_tx.h"
24 #include "dp_internal.h"
25 
26 #ifdef TX_PER_PDEV_DESC_POOL
27 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
28 #define DP_TX_GET_DESC_POOL_ID(vdev) (vdev->vdev_id)
29 #else /* QCA_LL_TX_FLOW_CONTROL_V2 */
30 #define DP_TX_GET_DESC_POOL_ID(vdev) (vdev->pdev->pdev_id)
31 #endif /* QCA_LL_TX_FLOW_CONTROL_V2 */
32 	#define DP_TX_GET_RING_ID(vdev) (vdev->pdev->pdev_id)
33 #else
34 	#ifdef TX_PER_VDEV_DESC_POOL
35 		#define DP_TX_GET_DESC_POOL_ID(vdev) (vdev->vdev_id)
36 		#define DP_TX_GET_RING_ID(vdev) (vdev->pdev->pdev_id)
37 	#endif /* TX_PER_VDEV_DESC_POOL */
38 #endif /* TX_PER_PDEV_DESC_POOL */
39 
40 /**
41  * 21 bits cookie
42  * 2 bits pool id 0 ~ 3,
43  * 10 bits page id 0 ~ 1023
44  * 5 bits offset id 0 ~ 31 (Desc size = 128, Num descs per page = 4096/128 = 32)
45  */
46 /* ???Ring ID needed??? */
47 #define DP_TX_DESC_ID_POOL_MASK    0x018000
48 #define DP_TX_DESC_ID_POOL_OS      15
49 #define DP_TX_DESC_ID_PAGE_MASK    0x007FE0
50 #define DP_TX_DESC_ID_PAGE_OS      5
51 #define DP_TX_DESC_ID_OFFSET_MASK  0x00001F
52 #define DP_TX_DESC_ID_OFFSET_OS    0
53 
54 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
55 #define TX_DESC_LOCK_CREATE(lock)
56 #define TX_DESC_LOCK_DESTROY(lock)
57 #define TX_DESC_LOCK_LOCK(lock)
58 #define TX_DESC_LOCK_UNLOCK(lock)
59 #define IS_TX_DESC_POOL_STATUS_INACTIVE(pool) \
60 	((pool)->status == FLOW_POOL_INACTIVE)
61 #ifdef QCA_AC_BASED_FLOW_CONTROL
62 #define TX_DESC_POOL_MEMBER_CLEAN(_tx_desc_pool)       \
63 	dp_tx_flow_pool_member_clean(_tx_desc_pool)
64 
65 #else /* !QCA_AC_BASED_FLOW_CONTROL */
66 #define TX_DESC_POOL_MEMBER_CLEAN(_tx_desc_pool)       \
67 do {                                                   \
68 	(_tx_desc_pool)->elem_size = 0;                \
69 	(_tx_desc_pool)->freelist = NULL;              \
70 	(_tx_desc_pool)->pool_size = 0;                \
71 	(_tx_desc_pool)->avail_desc = 0;               \
72 	(_tx_desc_pool)->start_th = 0;                 \
73 	(_tx_desc_pool)->stop_th = 0;                  \
74 	(_tx_desc_pool)->status = FLOW_POOL_INACTIVE;  \
75 } while (0)
76 #endif /* QCA_AC_BASED_FLOW_CONTROL */
77 #else /* !QCA_LL_TX_FLOW_CONTROL_V2 */
78 #define TX_DESC_LOCK_CREATE(lock)  qdf_spinlock_create(lock)
79 #define TX_DESC_LOCK_DESTROY(lock) qdf_spinlock_destroy(lock)
80 #define TX_DESC_LOCK_LOCK(lock)    qdf_spin_lock_bh(lock)
81 #define TX_DESC_LOCK_UNLOCK(lock)  qdf_spin_unlock_bh(lock)
82 #define IS_TX_DESC_POOL_STATUS_INACTIVE(pool) (false)
83 #define TX_DESC_POOL_MEMBER_CLEAN(_tx_desc_pool)       \
84 do {                                                   \
85 	(_tx_desc_pool)->elem_size = 0;                \
86 	(_tx_desc_pool)->num_allocated = 0;            \
87 	(_tx_desc_pool)->freelist = NULL;              \
88 	(_tx_desc_pool)->elem_count = 0;               \
89 	(_tx_desc_pool)->num_free = 0;                 \
90 } while (0)
91 #endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */
92 #define MAX_POOL_BUFF_COUNT 10000
93 
94 QDF_STATUS dp_tx_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
95 		uint16_t num_elem);
96 QDF_STATUS dp_tx_desc_pool_free(struct dp_soc *soc, uint8_t pool_id);
97 QDF_STATUS dp_tx_ext_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
98 		uint16_t num_elem);
99 QDF_STATUS dp_tx_ext_desc_pool_free(struct dp_soc *soc, uint8_t pool_id);
100 QDF_STATUS dp_tx_tso_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
101 		uint16_t num_elem);
102 void dp_tx_tso_desc_pool_free(struct dp_soc *soc, uint8_t pool_id);
103 QDF_STATUS dp_tx_tso_num_seg_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
104 		uint16_t num_elem);
105 void dp_tx_tso_num_seg_pool_free(struct dp_soc *soc, uint8_t pool_id);
106 
107 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
108 void dp_tx_flow_control_init(struct dp_soc *);
109 void dp_tx_flow_control_deinit(struct dp_soc *);
110 
111 QDF_STATUS dp_txrx_register_pause_cb(struct cdp_soc_t *soc,
112 	tx_pause_callback pause_cb);
113 QDF_STATUS dp_tx_flow_pool_map(struct cdp_soc_t *soc, struct cdp_pdev *pdev,
114 				uint8_t vdev_id);
115 void dp_tx_flow_pool_unmap(struct cdp_soc_t *soc, struct cdp_pdev *pdev,
116 			   uint8_t vdev_id);
117 void dp_tx_clear_flow_pool_stats(struct dp_soc *soc);
118 struct dp_tx_desc_pool_s *dp_tx_create_flow_pool(struct dp_soc *soc,
119 	uint8_t flow_pool_id, uint16_t flow_pool_size);
120 
121 QDF_STATUS dp_tx_flow_pool_map_handler(struct dp_pdev *pdev, uint8_t flow_id,
122 	uint8_t flow_type, uint8_t flow_pool_id, uint16_t flow_pool_size);
123 void dp_tx_flow_pool_unmap_handler(struct dp_pdev *pdev, uint8_t flow_id,
124 	uint8_t flow_type, uint8_t flow_pool_id);
125 
126 /**
127  * dp_tx_get_desc_flow_pool() - get descriptor from flow pool
128  * @pool: flow pool
129  *
130  * Caller needs to take lock and do sanity checks.
131  *
132  * Return: tx descriptor
133  */
134 static inline
135 struct dp_tx_desc_s *dp_tx_get_desc_flow_pool(struct dp_tx_desc_pool_s *pool)
136 {
137 	struct dp_tx_desc_s *tx_desc = pool->freelist;
138 
139 	pool->freelist = pool->freelist->next;
140 	pool->avail_desc--;
141 	return tx_desc;
142 }
143 
144 /**
145  * ol_tx_put_desc_flow_pool() - put descriptor to flow pool freelist
146  * @pool: flow pool
147  * @tx_desc: tx descriptor
148  *
149  * Caller needs to take lock and do sanity checks.
150  *
151  * Return: none
152  */
153 static inline
154 void dp_tx_put_desc_flow_pool(struct dp_tx_desc_pool_s *pool,
155 			struct dp_tx_desc_s *tx_desc)
156 {
157 	tx_desc->next = pool->freelist;
158 	pool->freelist = tx_desc;
159 	pool->avail_desc++;
160 }
161 
162 #ifdef QCA_AC_BASED_FLOW_CONTROL
163 
164 /**
165  * dp_tx_flow_pool_member_clean() - Clean the members of TX flow pool
166  *
167  * @pool: flow pool
168  *
169  * Return: None
170  */
171 static inline void
172 dp_tx_flow_pool_member_clean(struct dp_tx_desc_pool_s *pool)
173 {
174 	pool->elem_size = 0;
175 	pool->freelist = NULL;
176 	pool->pool_size = 0;
177 	pool->avail_desc = 0;
178 	qdf_mem_zero(pool->start_th, FL_TH_MAX);
179 	qdf_mem_zero(pool->stop_th, FL_TH_MAX);
180 	pool->status = FLOW_POOL_INACTIVE;
181 }
182 
183 /**
184  * dp_tx_is_threshold_reached() - Check if current avail desc meet threshold
185  *
186  * @pool: flow pool
187  * @avail_desc: available descriptor number
188  *
189  * Return: true if threshold is met, false if not
190  */
191 static inline bool
192 dp_tx_is_threshold_reached(struct dp_tx_desc_pool_s *pool, uint16_t avail_desc)
193 {
194 	if (qdf_unlikely(avail_desc == pool->stop_th[DP_TH_BE_BK]))
195 		return true;
196 	else if (qdf_unlikely(avail_desc == pool->stop_th[DP_TH_VI]))
197 		return true;
198 	else if (qdf_unlikely(avail_desc == pool->stop_th[DP_TH_VO]))
199 		return true;
200 	else if (qdf_unlikely(avail_desc == pool->stop_th[DP_TH_HI]))
201 		return true;
202 	else
203 		return false;
204 }
205 
206 /**
207  * dp_tx_desc_alloc() - Allocate a Software Tx descriptor from given pool
208  *
209  * @soc: Handle to DP SoC structure
210  * @desc_pool_id: ID of the flow control fool
211  *
212  * Return: TX descriptor allocated or NULL
213  */
214 static inline struct dp_tx_desc_s *
215 dp_tx_desc_alloc(struct dp_soc *soc, uint8_t desc_pool_id)
216 {
217 	struct dp_tx_desc_s *tx_desc = NULL;
218 	struct dp_tx_desc_pool_s *pool = &soc->tx_desc[desc_pool_id];
219 	bool is_pause = false;
220 	enum netif_action_type act = WLAN_WAKE_NON_PRIORITY_QUEUE;
221 	enum dp_fl_ctrl_threshold level = DP_TH_BE_BK;
222 
223 	if (qdf_likely(pool)) {
224 		qdf_spin_lock_bh(&pool->flow_pool_lock);
225 		if (qdf_likely(pool->avail_desc)) {
226 			tx_desc = dp_tx_get_desc_flow_pool(pool);
227 			tx_desc->pool_id = desc_pool_id;
228 			tx_desc->flags = DP_TX_DESC_FLAG_ALLOCATED;
229 			is_pause = dp_tx_is_threshold_reached(pool,
230 							      pool->avail_desc);
231 
232 			if (qdf_unlikely(is_pause)) {
233 				switch (pool->status) {
234 				case FLOW_POOL_ACTIVE_UNPAUSED:
235 					/* pause network BE\BK queue */
236 					act = WLAN_NETIF_BE_BK_QUEUE_OFF;
237 					level = DP_TH_BE_BK;
238 					pool->status = FLOW_POOL_BE_BK_PAUSED;
239 					break;
240 				case FLOW_POOL_BE_BK_PAUSED:
241 					/* pause network VI queue */
242 					act = WLAN_NETIF_VI_QUEUE_OFF;
243 					level = DP_TH_VI;
244 					pool->status = FLOW_POOL_VI_PAUSED;
245 					break;
246 				case FLOW_POOL_VI_PAUSED:
247 					/* pause network VO queue */
248 					act = WLAN_NETIF_VO_QUEUE_OFF;
249 					level = DP_TH_VO;
250 					pool->status = FLOW_POOL_VO_PAUSED;
251 					break;
252 				case FLOW_POOL_VO_PAUSED:
253 					/* pause network HI PRI queue */
254 					act = WLAN_NETIF_PRIORITY_QUEUE_OFF;
255 					level = DP_TH_HI;
256 					pool->status = FLOW_POOL_ACTIVE_PAUSED;
257 					break;
258 				default:
259 					QDF_TRACE(QDF_MODULE_ID_DP,
260 						  QDF_TRACE_LEVEL_ERROR,
261 						  "%s %d pool is %d status!",
262 						  __func__, __LINE__,
263 						  pool->status);
264 					break;
265 				}
266 				pool->latest_pause_time[level] =
267 					qdf_get_system_timestamp();
268 				soc->pause_cb(desc_pool_id,
269 					      act, WLAN_DATA_FLOW_CONTROL);
270 			}
271 		} else {
272 			pool->pkt_drop_no_desc++;
273 		}
274 		qdf_spin_unlock_bh(&pool->flow_pool_lock);
275 	} else {
276 		soc->pool_stats.pkt_drop_no_pool++;
277 	}
278 
279 	return tx_desc;
280 }
281 
282 /**
283  * dp_tx_desc_free() - Fee a tx descriptor and attach it to free list
284  *
285  * @soc: Handle to DP SoC structure
286  * @tx_desc: the tx descriptor to be freed
287  * @desc_pool_id: ID of the flow control fool
288  *
289  * Return: None
290  */
291 static inline void
292 dp_tx_desc_free(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
293 		uint8_t desc_pool_id)
294 {
295 	struct dp_tx_desc_pool_s *pool = &soc->tx_desc[desc_pool_id];
296 	qdf_time_t unpause_time = qdf_get_system_timestamp(), pause_dur;
297 	enum netif_action_type act = WLAN_WAKE_ALL_NETIF_QUEUE;
298 
299 	qdf_spin_lock_bh(&pool->flow_pool_lock);
300 	tx_desc->flags = 0;
301 	dp_tx_put_desc_flow_pool(pool, tx_desc);
302 	switch (pool->status) {
303 	case FLOW_POOL_ACTIVE_PAUSED:
304 		if (pool->avail_desc > pool->start_th[DP_TH_HI]) {
305 			act = WLAN_NETIF_PRIORITY_QUEUE_ON;
306 			pool->status = FLOW_POOL_VO_PAUSED;
307 
308 			/* Update maxinum pause duration for HI queue */
309 			pause_dur = unpause_time -
310 					pool->latest_pause_time[DP_TH_HI];
311 			if (pool->max_pause_time[DP_TH_HI] < pause_dur)
312 				pool->max_pause_time[DP_TH_HI] = pause_dur;
313 		}
314 		break;
315 	case FLOW_POOL_VO_PAUSED:
316 		if (pool->avail_desc > pool->start_th[DP_TH_VO]) {
317 			act = WLAN_NETIF_VO_QUEUE_ON;
318 			pool->status = FLOW_POOL_VI_PAUSED;
319 
320 			/* Update maxinum pause duration for VO queue */
321 			pause_dur = unpause_time -
322 					pool->latest_pause_time[DP_TH_VO];
323 			if (pool->max_pause_time[DP_TH_VO] < pause_dur)
324 				pool->max_pause_time[DP_TH_VO] = pause_dur;
325 		}
326 		break;
327 	case FLOW_POOL_VI_PAUSED:
328 		if (pool->avail_desc > pool->start_th[DP_TH_VI]) {
329 			act = WLAN_NETIF_VI_QUEUE_ON;
330 			pool->status = FLOW_POOL_BE_BK_PAUSED;
331 
332 			/* Update maxinum pause duration for VI queue */
333 			pause_dur = unpause_time -
334 					pool->latest_pause_time[DP_TH_VI];
335 			if (pool->max_pause_time[DP_TH_VI] < pause_dur)
336 				pool->max_pause_time[DP_TH_VI] = pause_dur;
337 		}
338 		break;
339 	case FLOW_POOL_BE_BK_PAUSED:
340 		if (pool->avail_desc > pool->start_th[DP_TH_BE_BK]) {
341 			act = WLAN_WAKE_NON_PRIORITY_QUEUE;
342 			pool->status = FLOW_POOL_ACTIVE_UNPAUSED;
343 
344 			/* Update maxinum pause duration for BE_BK queue */
345 			pause_dur = unpause_time -
346 					pool->latest_pause_time[DP_TH_BE_BK];
347 			if (pool->max_pause_time[DP_TH_BE_BK] < pause_dur)
348 				pool->max_pause_time[DP_TH_BE_BK] = pause_dur;
349 		}
350 		break;
351 	case FLOW_POOL_INVALID:
352 		if (pool->avail_desc == pool->pool_size) {
353 			dp_tx_desc_pool_free(soc, desc_pool_id);
354 			qdf_spin_unlock_bh(&pool->flow_pool_lock);
355 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
356 				  "%s %d pool is freed!!",
357 				  __func__, __LINE__);
358 			return;
359 		}
360 		break;
361 
362 	case FLOW_POOL_ACTIVE_UNPAUSED:
363 		break;
364 	default:
365 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
366 			  "%s %d pool is INACTIVE State!!",
367 			  __func__, __LINE__);
368 		break;
369 	};
370 
371 	if (act != WLAN_WAKE_ALL_NETIF_QUEUE)
372 		soc->pause_cb(pool->flow_pool_id,
373 			      act, WLAN_DATA_FLOW_CONTROL);
374 	qdf_spin_unlock_bh(&pool->flow_pool_lock);
375 }
376 #else /* QCA_AC_BASED_FLOW_CONTROL */
377 
378 static inline bool
379 dp_tx_is_threshold_reached(struct dp_tx_desc_pool_s *pool, uint16_t avail_desc)
380 {
381 	if (qdf_unlikely(avail_desc < pool->stop_th))
382 		return true;
383 	else
384 		return false;
385 }
386 
387 /**
388  * dp_tx_desc_alloc() - Allocate a Software Tx Descriptor from given pool
389  *
390  * @soc Handle to DP SoC structure
391  * @pool_id
392  *
393  * Return:
394  */
395 static inline struct dp_tx_desc_s *
396 dp_tx_desc_alloc(struct dp_soc *soc, uint8_t desc_pool_id)
397 {
398 	struct dp_tx_desc_s *tx_desc = NULL;
399 	struct dp_tx_desc_pool_s *pool = &soc->tx_desc[desc_pool_id];
400 
401 	if (pool) {
402 		qdf_spin_lock_bh(&pool->flow_pool_lock);
403 		if (pool->status <= FLOW_POOL_ACTIVE_PAUSED &&
404 		    pool->avail_desc) {
405 			tx_desc = dp_tx_get_desc_flow_pool(pool);
406 			tx_desc->pool_id = desc_pool_id;
407 			tx_desc->flags = DP_TX_DESC_FLAG_ALLOCATED;
408 			if (qdf_unlikely(pool->avail_desc < pool->stop_th)) {
409 				pool->status = FLOW_POOL_ACTIVE_PAUSED;
410 				qdf_spin_unlock_bh(&pool->flow_pool_lock);
411 				/* pause network queues */
412 				soc->pause_cb(desc_pool_id,
413 					       WLAN_STOP_ALL_NETIF_QUEUE,
414 					       WLAN_DATA_FLOW_CONTROL);
415 			} else {
416 				qdf_spin_unlock_bh(&pool->flow_pool_lock);
417 			}
418 
419 			/*
420 			 * If one packet is going to be sent, PM usage count
421 			 * needs to be incremented by one to prevent future
422 			 * runtime suspend. This should be tied with the
423 			 * success of allocating one descriptor. It will be
424 			 * decremented after the packet has been sent.
425 			 */
426 			hif_pm_runtime_get_noresume(soc->hif_handle);
427 		} else {
428 			pool->pkt_drop_no_desc++;
429 			qdf_spin_unlock_bh(&pool->flow_pool_lock);
430 		}
431 	} else {
432 		soc->pool_stats.pkt_drop_no_pool++;
433 	}
434 
435 
436 	return tx_desc;
437 }
438 
439 /**
440  * dp_tx_desc_free() - Fee a tx descriptor and attach it to free list
441  *
442  * @soc Handle to DP SoC structure
443  * @pool_id
444  * @tx_desc
445  *
446  * Return: None
447  */
448 static inline void
449 dp_tx_desc_free(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
450 		uint8_t desc_pool_id)
451 {
452 	struct dp_tx_desc_pool_s *pool = &soc->tx_desc[desc_pool_id];
453 
454 	qdf_spin_lock_bh(&pool->flow_pool_lock);
455 	tx_desc->flags = 0;
456 	dp_tx_put_desc_flow_pool(pool, tx_desc);
457 	switch (pool->status) {
458 	case FLOW_POOL_ACTIVE_PAUSED:
459 		if (pool->avail_desc > pool->start_th) {
460 			soc->pause_cb(pool->flow_pool_id,
461 				       WLAN_WAKE_ALL_NETIF_QUEUE,
462 				       WLAN_DATA_FLOW_CONTROL);
463 			pool->status = FLOW_POOL_ACTIVE_UNPAUSED;
464 		}
465 		break;
466 	case FLOW_POOL_INVALID:
467 		if (pool->avail_desc == pool->pool_size) {
468 			dp_tx_desc_pool_free(soc, desc_pool_id);
469 			qdf_spin_unlock_bh(&pool->flow_pool_lock);
470 			qdf_print("%s %d pool is freed!!",
471 				  __func__, __LINE__);
472 			goto out;
473 		}
474 		break;
475 
476 	case FLOW_POOL_ACTIVE_UNPAUSED:
477 		break;
478 	default:
479 		qdf_print("%s %d pool is INACTIVE State!!",
480 			  __func__, __LINE__);
481 		break;
482 	};
483 
484 	qdf_spin_unlock_bh(&pool->flow_pool_lock);
485 
486 out:
487 	/**
488 	 * Decrement PM usage count if the packet has been sent. This
489 	 * should be tied with the success of freeing one descriptor.
490 	 */
491 	hif_pm_runtime_put(soc->hif_handle);
492 }
493 
494 #endif /* QCA_AC_BASED_FLOW_CONTROL */
495 
496 static inline bool
497 dp_tx_desc_thresh_reached(struct cdp_vdev *vdev)
498 {
499 	struct dp_vdev *dp_vdev = (struct dp_vdev *)vdev;
500 	struct dp_tx_desc_pool_s *pool;
501 
502 	if (!vdev)
503 		return false;
504 
505 	pool = dp_vdev->pool;
506 
507 	return  dp_tx_is_threshold_reached(pool, pool->avail_desc);
508 }
509 
510 #else /* QCA_LL_TX_FLOW_CONTROL_V2 */
511 
512 static inline void dp_tx_flow_control_init(struct dp_soc *handle)
513 {
514 }
515 
516 static inline void dp_tx_flow_control_deinit(struct dp_soc *handle)
517 {
518 }
519 
520 static inline QDF_STATUS dp_tx_flow_pool_map_handler(struct dp_pdev *pdev,
521 	uint8_t flow_id, uint8_t flow_type, uint8_t flow_pool_id,
522 	uint16_t flow_pool_size)
523 {
524 	return QDF_STATUS_SUCCESS;
525 }
526 
527 static inline void dp_tx_flow_pool_unmap_handler(struct dp_pdev *pdev,
528 	uint8_t flow_id, uint8_t flow_type, uint8_t flow_pool_id)
529 {
530 }
531 
532 /**
533  * dp_tx_desc_alloc() - Allocate a Software Tx Descriptor from given pool
534  *
535  * @param soc Handle to DP SoC structure
536  * @param pool_id
537  *
538  * Return:
539  */
540 static inline struct dp_tx_desc_s *dp_tx_desc_alloc(struct dp_soc *soc,
541 						uint8_t desc_pool_id)
542 {
543 	struct dp_tx_desc_s *tx_desc = NULL;
544 
545 	TX_DESC_LOCK_LOCK(&soc->tx_desc[desc_pool_id].lock);
546 
547 	tx_desc = soc->tx_desc[desc_pool_id].freelist;
548 
549 	/* Pool is exhausted */
550 	if (!tx_desc) {
551 		TX_DESC_LOCK_UNLOCK(&soc->tx_desc[desc_pool_id].lock);
552 		return NULL;
553 	}
554 
555 	soc->tx_desc[desc_pool_id].freelist =
556 		soc->tx_desc[desc_pool_id].freelist->next;
557 	soc->tx_desc[desc_pool_id].num_allocated++;
558 	soc->tx_desc[desc_pool_id].num_free--;
559 
560 	tx_desc->timestamp = qdf_ktime_to_ms(qdf_ktime_get());
561 
562 	tx_desc->flags = DP_TX_DESC_FLAG_ALLOCATED;
563 
564 	TX_DESC_LOCK_UNLOCK(&soc->tx_desc[desc_pool_id].lock);
565 
566 	return tx_desc;
567 }
568 
569 /**
570  * dp_tx_desc_alloc_multiple() - Allocate batch of software Tx Descriptors
571  *                            from given pool
572  * @soc: Handle to DP SoC structure
573  * @pool_id: pool id should pick up
574  * @num_requested: number of required descriptor
575  *
576  * allocate multiple tx descriptor and make a link
577  *
578  * Return: h_desc first descriptor pointer
579  */
580 static inline struct dp_tx_desc_s *dp_tx_desc_alloc_multiple(
581 		struct dp_soc *soc, uint8_t desc_pool_id, uint8_t num_requested)
582 {
583 	struct dp_tx_desc_s *c_desc = NULL, *h_desc = NULL;
584 	uint8_t count;
585 
586 	TX_DESC_LOCK_LOCK(&soc->tx_desc[desc_pool_id].lock);
587 
588 	if ((num_requested == 0) ||
589 			(soc->tx_desc[desc_pool_id].num_free < num_requested)) {
590 		TX_DESC_LOCK_UNLOCK(&soc->tx_desc[desc_pool_id].lock);
591 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
592 			"%s, No Free Desc: Available(%d) num_requested(%d)",
593 			__func__, soc->tx_desc[desc_pool_id].num_free,
594 			num_requested);
595 		return NULL;
596 	}
597 
598 	h_desc = soc->tx_desc[desc_pool_id].freelist;
599 
600 	/* h_desc should never be NULL since num_free > requested */
601 	qdf_assert_always(h_desc);
602 
603 	c_desc = h_desc;
604 	for (count = 0; count < (num_requested - 1); count++) {
605 		c_desc->flags = DP_TX_DESC_FLAG_ALLOCATED;
606 		c_desc = c_desc->next;
607 	}
608 	soc->tx_desc[desc_pool_id].num_free -= count;
609 	soc->tx_desc[desc_pool_id].num_allocated += count;
610 	soc->tx_desc[desc_pool_id].freelist = c_desc->next;
611 	c_desc->next = NULL;
612 
613 	TX_DESC_LOCK_UNLOCK(&soc->tx_desc[desc_pool_id].lock);
614 	return h_desc;
615 }
616 
617 /**
618  * dp_tx_desc_free() - Fee a tx descriptor and attach it to free list
619  *
620  * @soc Handle to DP SoC structure
621  * @pool_id
622  * @tx_desc
623  */
624 static inline void
625 dp_tx_desc_free(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
626 		uint8_t desc_pool_id)
627 {
628 	TX_DESC_LOCK_LOCK(&soc->tx_desc[desc_pool_id].lock);
629 
630 	tx_desc->vdev = NULL;
631 	tx_desc->nbuf = NULL;
632 	tx_desc->flags = 0;
633 	tx_desc->next = soc->tx_desc[desc_pool_id].freelist;
634 	soc->tx_desc[desc_pool_id].freelist = tx_desc;
635 	soc->tx_desc[desc_pool_id].num_allocated--;
636 	soc->tx_desc[desc_pool_id].num_free++;
637 
638 
639 	TX_DESC_LOCK_UNLOCK(&soc->tx_desc[desc_pool_id].lock);
640 }
641 #endif /* QCA_LL_TX_FLOW_CONTROL_V2 */
642 
643 #ifdef QCA_DP_TX_DESC_ID_CHECK
644 /**
645  * dp_tx_is_desc_id_valid() - check is the tx desc id valid
646  *
647  * @soc Handle to DP SoC structure
648  * @tx_desc_id
649  *
650  * Return: true or false
651  */
652 static inline bool
653 dp_tx_is_desc_id_valid(struct dp_soc *soc, uint32_t tx_desc_id)
654 {
655 	uint8_t pool_id;
656 	uint16_t page_id, offset;
657 	struct dp_tx_desc_pool_s *pool;
658 
659 	pool_id = (tx_desc_id & DP_TX_DESC_ID_POOL_MASK) >>
660 			DP_TX_DESC_ID_POOL_OS;
661 	/* Pool ID is out of limit */
662 	if (pool_id > wlan_cfg_get_num_tx_desc_pool(
663 				soc->wlan_cfg_ctx)) {
664 		QDF_TRACE(QDF_MODULE_ID_DP,
665 			  QDF_TRACE_LEVEL_FATAL,
666 			  "%s:Tx Comp pool id %d not valid",
667 			  __func__,
668 			  pool_id);
669 		goto warn_exit;
670 	}
671 
672 	pool = &soc->tx_desc[pool_id];
673 	/* the pool is freed */
674 	if (IS_TX_DESC_POOL_STATUS_INACTIVE(pool)) {
675 		QDF_TRACE(QDF_MODULE_ID_DP,
676 			  QDF_TRACE_LEVEL_FATAL,
677 			  "%s:the pool %d has been freed",
678 			  __func__,
679 			  pool_id);
680 		goto warn_exit;
681 	}
682 
683 	page_id = (tx_desc_id & DP_TX_DESC_ID_PAGE_MASK) >>
684 				DP_TX_DESC_ID_PAGE_OS;
685 	/* the page id is out of limit */
686 	if (page_id >= pool->desc_pages.num_pages) {
687 		QDF_TRACE(QDF_MODULE_ID_DP,
688 			  QDF_TRACE_LEVEL_FATAL,
689 			  "%s:the page id %d invalid, pool id %d, num_page %d",
690 			  __func__,
691 			  page_id,
692 			  pool_id,
693 			  pool->desc_pages.num_pages);
694 		goto warn_exit;
695 	}
696 
697 	offset = (tx_desc_id & DP_TX_DESC_ID_OFFSET_MASK) >>
698 				DP_TX_DESC_ID_OFFSET_OS;
699 	/* the offset is out of limit */
700 	if (offset >= pool->desc_pages.num_element_per_page) {
701 		QDF_TRACE(QDF_MODULE_ID_DP,
702 			  QDF_TRACE_LEVEL_FATAL,
703 			  "%s:offset %d invalid, pool%d,num_elem_per_page %d",
704 			  __func__,
705 			  offset,
706 			  pool_id,
707 			  pool->desc_pages.num_element_per_page);
708 		goto warn_exit;
709 	}
710 
711 	return true;
712 
713 warn_exit:
714 	QDF_TRACE(QDF_MODULE_ID_DP,
715 		  QDF_TRACE_LEVEL_FATAL,
716 		  "%s:Tx desc id 0x%x not valid",
717 		  __func__,
718 		  tx_desc_id);
719 	qdf_assert_always(0);
720 	return false;
721 }
722 
723 #else
724 static inline bool
725 dp_tx_is_desc_id_valid(struct dp_soc *soc, uint32_t tx_desc_id)
726 {
727 	return true;
728 }
729 #endif /* QCA_DP_TX_DESC_ID_CHECK */
730 
731 /**
732  * dp_tx_desc_find() - find dp tx descriptor from cokie
733  * @soc - handle for the device sending the data
734  * @tx_desc_id - the ID of the descriptor in question
735  * @return the descriptor object that has the specified ID
736  *
737  *  Use a tx descriptor ID to find the corresponding descriptor object.
738  *
739  */
740 static inline struct dp_tx_desc_s *dp_tx_desc_find(struct dp_soc *soc,
741 		uint8_t pool_id, uint16_t page_id, uint16_t offset)
742 {
743 	struct dp_tx_desc_pool_s *tx_desc_pool = &((soc)->tx_desc[(pool_id)]);
744 
745 	return tx_desc_pool->desc_pages.cacheable_pages[page_id] +
746 		tx_desc_pool->elem_size * offset;
747 }
748 
749 /**
750  * dp_tx_ext_desc_alloc() - Get tx extension descriptor from pool
751  * @soc: handle for the device sending the data
752  * @pool_id: target pool id
753  *
754  * Return: None
755  */
756 static inline
757 struct dp_tx_ext_desc_elem_s *dp_tx_ext_desc_alloc(struct dp_soc *soc,
758 		uint8_t desc_pool_id)
759 {
760 	struct dp_tx_ext_desc_elem_s *c_elem;
761 
762 	qdf_spin_lock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
763 	if (soc->tx_ext_desc[desc_pool_id].num_free <= 0) {
764 		qdf_spin_unlock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
765 		return NULL;
766 	}
767 	c_elem = soc->tx_ext_desc[desc_pool_id].freelist;
768 	soc->tx_ext_desc[desc_pool_id].freelist =
769 		soc->tx_ext_desc[desc_pool_id].freelist->next;
770 	soc->tx_ext_desc[desc_pool_id].num_free--;
771 	qdf_spin_unlock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
772 	return c_elem;
773 }
774 
775 /**
776  * dp_tx_ext_desc_free() - Release tx extension descriptor to the pool
777  * @soc: handle for the device sending the data
778  * @pool_id: target pool id
779  * @elem: ext descriptor pointer should release
780  *
781  * Return: None
782  */
783 static inline void dp_tx_ext_desc_free(struct dp_soc *soc,
784 	struct dp_tx_ext_desc_elem_s *elem, uint8_t desc_pool_id)
785 {
786 	qdf_spin_lock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
787 	elem->next = soc->tx_ext_desc[desc_pool_id].freelist;
788 	soc->tx_ext_desc[desc_pool_id].freelist = elem;
789 	soc->tx_ext_desc[desc_pool_id].num_free++;
790 	qdf_spin_unlock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
791 	return;
792 }
793 
794 /**
795  * dp_tx_ext_desc_free_multiple() - Fee multiple tx extension descriptor and
796  *                           attach it to free list
797  * @soc: Handle to DP SoC structure
798  * @desc_pool_id: pool id should pick up
799  * @elem: tx descriptor should be freed
800  * @num_free: number of descriptors should be freed
801  *
802  * Return: none
803  */
804 static inline void dp_tx_ext_desc_free_multiple(struct dp_soc *soc,
805 		struct dp_tx_ext_desc_elem_s *elem, uint8_t desc_pool_id,
806 		uint8_t num_free)
807 {
808 	struct dp_tx_ext_desc_elem_s *head, *tail, *c_elem;
809 	uint8_t freed = num_free;
810 
811 	/* caller should always guarantee atleast list of num_free nodes */
812 	qdf_assert_always(head);
813 
814 	head = elem;
815 	c_elem = head;
816 	tail = head;
817 	while (c_elem && freed) {
818 		tail = c_elem;
819 		c_elem = c_elem->next;
820 		freed--;
821 	}
822 
823 	/* caller should always guarantee atleast list of num_free nodes */
824 	qdf_assert_always(tail);
825 
826 	qdf_spin_lock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
827 	tail->next = soc->tx_ext_desc[desc_pool_id].freelist;
828 	soc->tx_ext_desc[desc_pool_id].freelist = head;
829 	soc->tx_ext_desc[desc_pool_id].num_free += num_free;
830 	qdf_spin_unlock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
831 
832 	return;
833 }
834 
835 #if defined(FEATURE_TSO)
836 /**
837  * dp_tx_tso_desc_alloc() - function to allocate a TSO segment
838  * @soc: device soc instance
839  * @pool_id: pool id should pick up tso descriptor
840  *
841  * Allocates a TSO segment element from the free list held in
842  * the soc
843  *
844  * Return: tso_seg, tso segment memory pointer
845  */
846 static inline struct qdf_tso_seg_elem_t *dp_tx_tso_desc_alloc(
847 		struct dp_soc *soc, uint8_t pool_id)
848 {
849 	struct qdf_tso_seg_elem_t *tso_seg = NULL;
850 
851 	qdf_spin_lock_bh(&soc->tx_tso_desc[pool_id].lock);
852 	if (soc->tx_tso_desc[pool_id].freelist) {
853 		soc->tx_tso_desc[pool_id].num_free--;
854 		tso_seg = soc->tx_tso_desc[pool_id].freelist;
855 		soc->tx_tso_desc[pool_id].freelist =
856 			soc->tx_tso_desc[pool_id].freelist->next;
857 	}
858 	qdf_spin_unlock_bh(&soc->tx_tso_desc[pool_id].lock);
859 
860 	return tso_seg;
861 }
862 
863 /**
864  * dp_tx_tso_desc_free() - function to free a TSO segment
865  * @soc: device soc instance
866  * @pool_id: pool id should pick up tso descriptor
867  * @tso_seg: tso segment memory pointer
868  *
869  * Returns a TSO segment element to the free list held in the
870  * HTT pdev
871  *
872  * Return: none
873  */
874 static inline void dp_tx_tso_desc_free(struct dp_soc *soc,
875 		uint8_t pool_id, struct qdf_tso_seg_elem_t *tso_seg)
876 {
877 	qdf_spin_lock_bh(&soc->tx_tso_desc[pool_id].lock);
878 	tso_seg->next = soc->tx_tso_desc[pool_id].freelist;
879 	soc->tx_tso_desc[pool_id].freelist = tso_seg;
880 	soc->tx_tso_desc[pool_id].num_free++;
881 	qdf_spin_unlock_bh(&soc->tx_tso_desc[pool_id].lock);
882 }
883 
884 static inline
885 struct qdf_tso_num_seg_elem_t  *dp_tso_num_seg_alloc(struct dp_soc *soc,
886 		uint8_t pool_id)
887 {
888 	struct qdf_tso_num_seg_elem_t *tso_num_seg = NULL;
889 
890 	qdf_spin_lock_bh(&soc->tx_tso_num_seg[pool_id].lock);
891 	if (soc->tx_tso_num_seg[pool_id].freelist) {
892 		soc->tx_tso_num_seg[pool_id].num_free--;
893 		tso_num_seg = soc->tx_tso_num_seg[pool_id].freelist;
894 		soc->tx_tso_num_seg[pool_id].freelist =
895 			soc->tx_tso_num_seg[pool_id].freelist->next;
896 	}
897 	qdf_spin_unlock_bh(&soc->tx_tso_num_seg[pool_id].lock);
898 
899 	return tso_num_seg;
900 }
901 
902 static inline
903 void dp_tso_num_seg_free(struct dp_soc *soc,
904 		uint8_t pool_id, struct qdf_tso_num_seg_elem_t *tso_num_seg)
905 {
906 	qdf_spin_lock_bh(&soc->tx_tso_num_seg[pool_id].lock);
907 	tso_num_seg->next = soc->tx_tso_num_seg[pool_id].freelist;
908 	soc->tx_tso_num_seg[pool_id].freelist = tso_num_seg;
909 	soc->tx_tso_num_seg[pool_id].num_free++;
910 	qdf_spin_unlock_bh(&soc->tx_tso_num_seg[pool_id].lock);
911 }
912 #endif
913 
914 /*
915  * dp_tx_me_alloc_buf() Alloc descriptor from me pool
916  * @pdev DP_PDEV handle for datapath
917  *
918  * Return:dp_tx_me_buf_t(buf)
919  */
920 static inline struct dp_tx_me_buf_t*
921 dp_tx_me_alloc_buf(struct dp_pdev *pdev)
922 {
923 	struct dp_tx_me_buf_t *buf = NULL;
924 	qdf_spin_lock_bh(&pdev->tx_mutex);
925 	if (pdev->me_buf.freelist) {
926 		buf = pdev->me_buf.freelist;
927 		pdev->me_buf.freelist = pdev->me_buf.freelist->next;
928 		pdev->me_buf.buf_in_use++;
929 	} else {
930 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
931 				"Error allocating memory in pool");
932 		qdf_spin_unlock_bh(&pdev->tx_mutex);
933 		return NULL;
934 	}
935 	qdf_spin_unlock_bh(&pdev->tx_mutex);
936 	return buf;
937 }
938 
939 /*
940  * dp_tx_me_free_buf() - Free me descriptor and add it to pool
941  * @pdev: DP_PDEV handle for datapath
942  * @buf : Allocated ME BUF
943  *
944  * Return:void
945  */
946 static inline void
947 dp_tx_me_free_buf(struct dp_pdev *pdev, struct dp_tx_me_buf_t *buf)
948 {
949 	qdf_spin_lock_bh(&pdev->tx_mutex);
950 	buf->next = pdev->me_buf.freelist;
951 	pdev->me_buf.freelist = buf;
952 	pdev->me_buf.buf_in_use--;
953 	qdf_spin_unlock_bh(&pdev->tx_mutex);
954 }
955 #endif /* DP_TX_DESC_H */
956