xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_tx_desc.h (revision dd4dc88b837a295134aa9869114a2efee0f4894b)
1 /*
2  * Copyright (c) 2016-2019 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #ifndef DP_TX_DESC_H
20 #define DP_TX_DESC_H
21 
22 #include "dp_types.h"
23 #include "dp_tx.h"
24 #include "dp_internal.h"
25 
26 #ifdef TX_PER_PDEV_DESC_POOL
27 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
28 #define DP_TX_GET_DESC_POOL_ID(vdev) (vdev->vdev_id)
29 #else /* QCA_LL_TX_FLOW_CONTROL_V2 */
30 #define DP_TX_GET_DESC_POOL_ID(vdev) (vdev->pdev->pdev_id)
31 #endif /* QCA_LL_TX_FLOW_CONTROL_V2 */
32 	#define DP_TX_GET_RING_ID(vdev) (vdev->pdev->pdev_id)
33 #else
34 	#ifdef TX_PER_VDEV_DESC_POOL
35 		#define DP_TX_GET_DESC_POOL_ID(vdev) (vdev->vdev_id)
36 		#define DP_TX_GET_RING_ID(vdev) (vdev->pdev->pdev_id)
37 	#endif /* TX_PER_VDEV_DESC_POOL */
38 #endif /* TX_PER_PDEV_DESC_POOL */
39 
40 /**
41  * 21 bits cookie
42  * 2 bits pool id 0 ~ 3,
43  * 10 bits page id 0 ~ 1023
44  * 5 bits offset id 0 ~ 31 (Desc size = 128, Num descs per page = 4096/128 = 32)
45  */
46 /* ???Ring ID needed??? */
47 #define DP_TX_DESC_ID_POOL_MASK    0x018000
48 #define DP_TX_DESC_ID_POOL_OS      15
49 #define DP_TX_DESC_ID_PAGE_MASK    0x007FE0
50 #define DP_TX_DESC_ID_PAGE_OS      5
51 #define DP_TX_DESC_ID_OFFSET_MASK  0x00001F
52 #define DP_TX_DESC_ID_OFFSET_OS    0
53 
54 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
55 #define TX_DESC_LOCK_CREATE(lock)
56 #define TX_DESC_LOCK_DESTROY(lock)
57 #define TX_DESC_LOCK_LOCK(lock)
58 #define TX_DESC_LOCK_UNLOCK(lock)
59 #define IS_TX_DESC_POOL_STATUS_INACTIVE(pool) \
60 	((pool)->status == FLOW_POOL_INACTIVE)
61 #ifdef QCA_AC_BASED_FLOW_CONTROL
62 #define TX_DESC_POOL_MEMBER_CLEAN(_tx_desc_pool)       \
63 	dp_tx_flow_pool_member_clean(_tx_desc_pool)
64 
65 #else /* !QCA_AC_BASED_FLOW_CONTROL */
66 #define TX_DESC_POOL_MEMBER_CLEAN(_tx_desc_pool)       \
67 do {                                                   \
68 	(_tx_desc_pool)->elem_size = 0;                \
69 	(_tx_desc_pool)->freelist = NULL;              \
70 	(_tx_desc_pool)->pool_size = 0;                \
71 	(_tx_desc_pool)->avail_desc = 0;               \
72 	(_tx_desc_pool)->start_th = 0;                 \
73 	(_tx_desc_pool)->stop_th = 0;                  \
74 	(_tx_desc_pool)->status = FLOW_POOL_INACTIVE;  \
75 } while (0)
76 #endif /* QCA_AC_BASED_FLOW_CONTROL */
77 #else /* !QCA_LL_TX_FLOW_CONTROL_V2 */
78 #define TX_DESC_LOCK_CREATE(lock)  qdf_spinlock_create(lock)
79 #define TX_DESC_LOCK_DESTROY(lock) qdf_spinlock_destroy(lock)
80 #define TX_DESC_LOCK_LOCK(lock)    qdf_spin_lock_bh(lock)
81 #define TX_DESC_LOCK_UNLOCK(lock)  qdf_spin_unlock_bh(lock)
82 #define IS_TX_DESC_POOL_STATUS_INACTIVE(pool) (false)
83 #define TX_DESC_POOL_MEMBER_CLEAN(_tx_desc_pool)       \
84 do {                                                   \
85 	(_tx_desc_pool)->elem_size = 0;                \
86 	(_tx_desc_pool)->num_allocated = 0;            \
87 	(_tx_desc_pool)->freelist = NULL;              \
88 	(_tx_desc_pool)->elem_count = 0;               \
89 	(_tx_desc_pool)->num_free = 0;                 \
90 } while (0)
91 #endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */
92 #define MAX_POOL_BUFF_COUNT 10000
93 
94 QDF_STATUS dp_tx_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
95 		uint16_t num_elem);
96 QDF_STATUS dp_tx_desc_pool_free(struct dp_soc *soc, uint8_t pool_id);
97 QDF_STATUS dp_tx_ext_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
98 		uint16_t num_elem);
99 QDF_STATUS dp_tx_ext_desc_pool_free(struct dp_soc *soc, uint8_t pool_id);
100 QDF_STATUS dp_tx_tso_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
101 		uint16_t num_elem);
102 void dp_tx_tso_desc_pool_free(struct dp_soc *soc, uint8_t pool_id);
103 QDF_STATUS dp_tx_tso_num_seg_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
104 		uint16_t num_elem);
105 void dp_tx_tso_num_seg_pool_free(struct dp_soc *soc, uint8_t pool_id);
106 
107 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
108 void dp_tx_flow_control_init(struct dp_soc *);
109 void dp_tx_flow_control_deinit(struct dp_soc *);
110 
111 QDF_STATUS dp_txrx_register_pause_cb(struct cdp_soc_t *soc,
112 	tx_pause_callback pause_cb);
113 QDF_STATUS dp_tx_flow_pool_map(struct cdp_soc_t *soc, struct cdp_pdev *pdev,
114 				uint8_t vdev_id);
115 void dp_tx_flow_pool_unmap(struct cdp_soc_t *soc, struct cdp_pdev *pdev,
116 			   uint8_t vdev_id);
117 void dp_tx_clear_flow_pool_stats(struct dp_soc *soc);
118 struct dp_tx_desc_pool_s *dp_tx_create_flow_pool(struct dp_soc *soc,
119 	uint8_t flow_pool_id, uint16_t flow_pool_size);
120 
121 QDF_STATUS dp_tx_flow_pool_map_handler(struct dp_pdev *pdev, uint8_t flow_id,
122 	uint8_t flow_type, uint8_t flow_pool_id, uint16_t flow_pool_size);
123 void dp_tx_flow_pool_unmap_handler(struct dp_pdev *pdev, uint8_t flow_id,
124 	uint8_t flow_type, uint8_t flow_pool_id);
125 
126 /**
127  * dp_tx_get_desc_flow_pool() - get descriptor from flow pool
128  * @pool: flow pool
129  *
130  * Caller needs to take lock and do sanity checks.
131  *
132  * Return: tx descriptor
133  */
134 static inline
135 struct dp_tx_desc_s *dp_tx_get_desc_flow_pool(struct dp_tx_desc_pool_s *pool)
136 {
137 	struct dp_tx_desc_s *tx_desc = pool->freelist;
138 
139 	pool->freelist = pool->freelist->next;
140 	pool->avail_desc--;
141 	return tx_desc;
142 }
143 
144 /**
145  * ol_tx_put_desc_flow_pool() - put descriptor to flow pool freelist
146  * @pool: flow pool
147  * @tx_desc: tx descriptor
148  *
149  * Caller needs to take lock and do sanity checks.
150  *
151  * Return: none
152  */
153 static inline
154 void dp_tx_put_desc_flow_pool(struct dp_tx_desc_pool_s *pool,
155 			struct dp_tx_desc_s *tx_desc)
156 {
157 	tx_desc->next = pool->freelist;
158 	pool->freelist = tx_desc;
159 	pool->avail_desc++;
160 }
161 
162 #ifdef QCA_AC_BASED_FLOW_CONTROL
163 
164 /**
165  * dp_tx_flow_pool_member_clean() - Clean the members of TX flow pool
166  *
167  * @pool: flow pool
168  *
169  * Return: None
170  */
171 static inline void
172 dp_tx_flow_pool_member_clean(struct dp_tx_desc_pool_s *pool)
173 {
174 	pool->elem_size = 0;
175 	pool->freelist = NULL;
176 	pool->pool_size = 0;
177 	pool->avail_desc = 0;
178 	qdf_mem_zero(pool->start_th, FL_TH_MAX);
179 	qdf_mem_zero(pool->stop_th, FL_TH_MAX);
180 	pool->status = FLOW_POOL_INACTIVE;
181 }
182 
183 /**
184  * dp_tx_is_threshold_reached() - Check if current avail desc meet threshold
185  *
186  * @pool: flow pool
187  * @avail_desc: available descriptor number
188  *
189  * Return: true if threshold is met, false if not
190  */
191 static inline bool
192 dp_tx_is_threshold_reached(struct dp_tx_desc_pool_s *pool, uint16_t avail_desc)
193 {
194 	if (qdf_unlikely(avail_desc == pool->stop_th[DP_TH_BE_BK]))
195 		return true;
196 	else if (qdf_unlikely(avail_desc == pool->stop_th[DP_TH_VI]))
197 		return true;
198 	else if (qdf_unlikely(avail_desc == pool->stop_th[DP_TH_VO]))
199 		return true;
200 	else if (qdf_unlikely(avail_desc == pool->stop_th[DP_TH_HI]))
201 		return true;
202 	else
203 		return false;
204 }
205 
206 /**
207  * dp_tx_desc_alloc() - Allocate a Software Tx descriptor from given pool
208  *
209  * @soc: Handle to DP SoC structure
210  * @desc_pool_id: ID of the flow control fool
211  *
212  * Return: TX descriptor allocated or NULL
213  */
214 static inline struct dp_tx_desc_s *
215 dp_tx_desc_alloc(struct dp_soc *soc, uint8_t desc_pool_id)
216 {
217 	struct dp_tx_desc_s *tx_desc = NULL;
218 	struct dp_tx_desc_pool_s *pool = &soc->tx_desc[desc_pool_id];
219 	bool is_pause = false;
220 	enum netif_action_type act = WLAN_NETIF_ACTION_TYPE_NONE;
221 	enum dp_fl_ctrl_threshold level = DP_TH_BE_BK;
222 
223 	if (qdf_likely(pool)) {
224 		qdf_spin_lock_bh(&pool->flow_pool_lock);
225 		if (qdf_likely(pool->avail_desc)) {
226 			tx_desc = dp_tx_get_desc_flow_pool(pool);
227 			tx_desc->pool_id = desc_pool_id;
228 			tx_desc->flags = DP_TX_DESC_FLAG_ALLOCATED;
229 			is_pause = dp_tx_is_threshold_reached(pool,
230 							      pool->avail_desc);
231 
232 			if (qdf_unlikely(is_pause)) {
233 				switch (pool->status) {
234 				case FLOW_POOL_ACTIVE_UNPAUSED:
235 					/* pause network BE\BK queue */
236 					act = WLAN_NETIF_BE_BK_QUEUE_OFF;
237 					level = DP_TH_BE_BK;
238 					pool->status = FLOW_POOL_BE_BK_PAUSED;
239 					break;
240 				case FLOW_POOL_BE_BK_PAUSED:
241 					/* pause network VI queue */
242 					act = WLAN_NETIF_VI_QUEUE_OFF;
243 					level = DP_TH_VI;
244 					pool->status = FLOW_POOL_VI_PAUSED;
245 					break;
246 				case FLOW_POOL_VI_PAUSED:
247 					/* pause network VO queue */
248 					act = WLAN_NETIF_VO_QUEUE_OFF;
249 					level = DP_TH_VO;
250 					pool->status = FLOW_POOL_VO_PAUSED;
251 					break;
252 				case FLOW_POOL_VO_PAUSED:
253 					/* pause network HI PRI queue */
254 					act = WLAN_NETIF_PRIORITY_QUEUE_OFF;
255 					level = DP_TH_HI;
256 					pool->status = FLOW_POOL_ACTIVE_PAUSED;
257 					break;
258 				case FLOW_POOL_ACTIVE_PAUSED:
259 					act = WLAN_NETIF_ACTION_TYPE_NONE;
260 					break;
261 				default:
262 					dp_err_rl("pool status is %d!",
263 						  pool->status);
264 					break;
265 				}
266 
267 				if (act != WLAN_NETIF_ACTION_TYPE_NONE) {
268 					pool->latest_pause_time[level] =
269 						qdf_get_system_timestamp();
270 					soc->pause_cb(desc_pool_id,
271 						      act,
272 						      WLAN_DATA_FLOW_CONTROL);
273 				}
274 			}
275 		} else {
276 			pool->pkt_drop_no_desc++;
277 		}
278 		qdf_spin_unlock_bh(&pool->flow_pool_lock);
279 	} else {
280 		soc->pool_stats.pkt_drop_no_pool++;
281 	}
282 
283 	return tx_desc;
284 }
285 
286 /**
287  * dp_tx_desc_free() - Fee a tx descriptor and attach it to free list
288  *
289  * @soc: Handle to DP SoC structure
290  * @tx_desc: the tx descriptor to be freed
291  * @desc_pool_id: ID of the flow control fool
292  *
293  * Return: None
294  */
295 static inline void
296 dp_tx_desc_free(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
297 		uint8_t desc_pool_id)
298 {
299 	struct dp_tx_desc_pool_s *pool = &soc->tx_desc[desc_pool_id];
300 	qdf_time_t unpause_time = qdf_get_system_timestamp(), pause_dur;
301 	enum netif_action_type act = WLAN_WAKE_ALL_NETIF_QUEUE;
302 
303 	qdf_spin_lock_bh(&pool->flow_pool_lock);
304 	tx_desc->vdev = NULL;
305 	tx_desc->nbuf = NULL;
306 	tx_desc->flags = 0;
307 	dp_tx_put_desc_flow_pool(pool, tx_desc);
308 	switch (pool->status) {
309 	case FLOW_POOL_ACTIVE_PAUSED:
310 		if (pool->avail_desc > pool->start_th[DP_TH_HI]) {
311 			act = WLAN_NETIF_PRIORITY_QUEUE_ON;
312 			pool->status = FLOW_POOL_VO_PAUSED;
313 
314 			/* Update maxinum pause duration for HI queue */
315 			pause_dur = unpause_time -
316 					pool->latest_pause_time[DP_TH_HI];
317 			if (pool->max_pause_time[DP_TH_HI] < pause_dur)
318 				pool->max_pause_time[DP_TH_HI] = pause_dur;
319 		}
320 		break;
321 	case FLOW_POOL_VO_PAUSED:
322 		if (pool->avail_desc > pool->start_th[DP_TH_VO]) {
323 			act = WLAN_NETIF_VO_QUEUE_ON;
324 			pool->status = FLOW_POOL_VI_PAUSED;
325 
326 			/* Update maxinum pause duration for VO queue */
327 			pause_dur = unpause_time -
328 					pool->latest_pause_time[DP_TH_VO];
329 			if (pool->max_pause_time[DP_TH_VO] < pause_dur)
330 				pool->max_pause_time[DP_TH_VO] = pause_dur;
331 		}
332 		break;
333 	case FLOW_POOL_VI_PAUSED:
334 		if (pool->avail_desc > pool->start_th[DP_TH_VI]) {
335 			act = WLAN_NETIF_VI_QUEUE_ON;
336 			pool->status = FLOW_POOL_BE_BK_PAUSED;
337 
338 			/* Update maxinum pause duration for VI queue */
339 			pause_dur = unpause_time -
340 					pool->latest_pause_time[DP_TH_VI];
341 			if (pool->max_pause_time[DP_TH_VI] < pause_dur)
342 				pool->max_pause_time[DP_TH_VI] = pause_dur;
343 		}
344 		break;
345 	case FLOW_POOL_BE_BK_PAUSED:
346 		if (pool->avail_desc > pool->start_th[DP_TH_BE_BK]) {
347 			act = WLAN_WAKE_NON_PRIORITY_QUEUE;
348 			pool->status = FLOW_POOL_ACTIVE_UNPAUSED;
349 
350 			/* Update maxinum pause duration for BE_BK queue */
351 			pause_dur = unpause_time -
352 					pool->latest_pause_time[DP_TH_BE_BK];
353 			if (pool->max_pause_time[DP_TH_BE_BK] < pause_dur)
354 				pool->max_pause_time[DP_TH_BE_BK] = pause_dur;
355 		}
356 		break;
357 	case FLOW_POOL_INVALID:
358 		if (pool->avail_desc == pool->pool_size) {
359 			dp_tx_desc_pool_free(soc, desc_pool_id);
360 			qdf_spin_unlock_bh(&pool->flow_pool_lock);
361 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
362 				  "%s %d pool is freed!!",
363 				  __func__, __LINE__);
364 			return;
365 		}
366 		break;
367 
368 	case FLOW_POOL_ACTIVE_UNPAUSED:
369 		break;
370 	default:
371 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
372 			  "%s %d pool is INACTIVE State!!",
373 			  __func__, __LINE__);
374 		break;
375 	};
376 
377 	if (act != WLAN_WAKE_ALL_NETIF_QUEUE)
378 		soc->pause_cb(pool->flow_pool_id,
379 			      act, WLAN_DATA_FLOW_CONTROL);
380 	qdf_spin_unlock_bh(&pool->flow_pool_lock);
381 }
382 #else /* QCA_AC_BASED_FLOW_CONTROL */
383 
384 static inline bool
385 dp_tx_is_threshold_reached(struct dp_tx_desc_pool_s *pool, uint16_t avail_desc)
386 {
387 	if (qdf_unlikely(avail_desc < pool->stop_th))
388 		return true;
389 	else
390 		return false;
391 }
392 
393 /**
394  * dp_tx_desc_alloc() - Allocate a Software Tx Descriptor from given pool
395  *
396  * @soc Handle to DP SoC structure
397  * @pool_id
398  *
399  * Return:
400  */
401 static inline struct dp_tx_desc_s *
402 dp_tx_desc_alloc(struct dp_soc *soc, uint8_t desc_pool_id)
403 {
404 	struct dp_tx_desc_s *tx_desc = NULL;
405 	struct dp_tx_desc_pool_s *pool = &soc->tx_desc[desc_pool_id];
406 
407 	if (pool) {
408 		qdf_spin_lock_bh(&pool->flow_pool_lock);
409 		if (pool->status <= FLOW_POOL_ACTIVE_PAUSED &&
410 		    pool->avail_desc) {
411 			tx_desc = dp_tx_get_desc_flow_pool(pool);
412 			tx_desc->pool_id = desc_pool_id;
413 			tx_desc->flags = DP_TX_DESC_FLAG_ALLOCATED;
414 			if (qdf_unlikely(pool->avail_desc < pool->stop_th)) {
415 				pool->status = FLOW_POOL_ACTIVE_PAUSED;
416 				qdf_spin_unlock_bh(&pool->flow_pool_lock);
417 				/* pause network queues */
418 				soc->pause_cb(desc_pool_id,
419 					       WLAN_STOP_ALL_NETIF_QUEUE,
420 					       WLAN_DATA_FLOW_CONTROL);
421 			} else {
422 				qdf_spin_unlock_bh(&pool->flow_pool_lock);
423 			}
424 
425 			/*
426 			 * If one packet is going to be sent, PM usage count
427 			 * needs to be incremented by one to prevent future
428 			 * runtime suspend. This should be tied with the
429 			 * success of allocating one descriptor. It will be
430 			 * decremented after the packet has been sent.
431 			 */
432 			hif_pm_runtime_get_noresume(soc->hif_handle);
433 		} else {
434 			pool->pkt_drop_no_desc++;
435 			qdf_spin_unlock_bh(&pool->flow_pool_lock);
436 		}
437 	} else {
438 		soc->pool_stats.pkt_drop_no_pool++;
439 	}
440 
441 
442 	return tx_desc;
443 }
444 
445 /**
446  * dp_tx_desc_free() - Fee a tx descriptor and attach it to free list
447  *
448  * @soc Handle to DP SoC structure
449  * @pool_id
450  * @tx_desc
451  *
452  * Return: None
453  */
454 static inline void
455 dp_tx_desc_free(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
456 		uint8_t desc_pool_id)
457 {
458 	struct dp_tx_desc_pool_s *pool = &soc->tx_desc[desc_pool_id];
459 
460 	qdf_spin_lock_bh(&pool->flow_pool_lock);
461 	tx_desc->vdev = NULL;
462 	tx_desc->nbuf = NULL;
463 	tx_desc->flags = 0;
464 	dp_tx_put_desc_flow_pool(pool, tx_desc);
465 	switch (pool->status) {
466 	case FLOW_POOL_ACTIVE_PAUSED:
467 		if (pool->avail_desc > pool->start_th) {
468 			soc->pause_cb(pool->flow_pool_id,
469 				       WLAN_WAKE_ALL_NETIF_QUEUE,
470 				       WLAN_DATA_FLOW_CONTROL);
471 			pool->status = FLOW_POOL_ACTIVE_UNPAUSED;
472 		}
473 		break;
474 	case FLOW_POOL_INVALID:
475 		if (pool->avail_desc == pool->pool_size) {
476 			dp_tx_desc_pool_free(soc, desc_pool_id);
477 			qdf_spin_unlock_bh(&pool->flow_pool_lock);
478 			qdf_print("%s %d pool is freed!!",
479 				  __func__, __LINE__);
480 			goto out;
481 		}
482 		break;
483 
484 	case FLOW_POOL_ACTIVE_UNPAUSED:
485 		break;
486 	default:
487 		qdf_print("%s %d pool is INACTIVE State!!",
488 			  __func__, __LINE__);
489 		break;
490 	};
491 
492 	qdf_spin_unlock_bh(&pool->flow_pool_lock);
493 
494 out:
495 	/**
496 	 * Decrement PM usage count if the packet has been sent. This
497 	 * should be tied with the success of freeing one descriptor.
498 	 */
499 	hif_pm_runtime_put(soc->hif_handle);
500 }
501 
502 #endif /* QCA_AC_BASED_FLOW_CONTROL */
503 
504 static inline bool
505 dp_tx_desc_thresh_reached(struct cdp_vdev *vdev)
506 {
507 	struct dp_vdev *dp_vdev = (struct dp_vdev *)vdev;
508 	struct dp_tx_desc_pool_s *pool;
509 
510 	if (!vdev)
511 		return false;
512 
513 	pool = dp_vdev->pool;
514 
515 	return  dp_tx_is_threshold_reached(pool, pool->avail_desc);
516 }
517 #else /* QCA_LL_TX_FLOW_CONTROL_V2 */
518 
519 static inline void dp_tx_flow_control_init(struct dp_soc *handle)
520 {
521 }
522 
523 static inline void dp_tx_flow_control_deinit(struct dp_soc *handle)
524 {
525 }
526 
527 static inline QDF_STATUS dp_tx_flow_pool_map_handler(struct dp_pdev *pdev,
528 	uint8_t flow_id, uint8_t flow_type, uint8_t flow_pool_id,
529 	uint16_t flow_pool_size)
530 {
531 	return QDF_STATUS_SUCCESS;
532 }
533 
534 static inline void dp_tx_flow_pool_unmap_handler(struct dp_pdev *pdev,
535 	uint8_t flow_id, uint8_t flow_type, uint8_t flow_pool_id)
536 {
537 }
538 
539 /**
540  * dp_tx_desc_alloc() - Allocate a Software Tx Descriptor from given pool
541  *
542  * @param soc Handle to DP SoC structure
543  * @param pool_id
544  *
545  * Return:
546  */
547 static inline struct dp_tx_desc_s *dp_tx_desc_alloc(struct dp_soc *soc,
548 						uint8_t desc_pool_id)
549 {
550 	struct dp_tx_desc_s *tx_desc = NULL;
551 
552 	TX_DESC_LOCK_LOCK(&soc->tx_desc[desc_pool_id].lock);
553 
554 	tx_desc = soc->tx_desc[desc_pool_id].freelist;
555 
556 	/* Pool is exhausted */
557 	if (!tx_desc) {
558 		TX_DESC_LOCK_UNLOCK(&soc->tx_desc[desc_pool_id].lock);
559 		return NULL;
560 	}
561 
562 	soc->tx_desc[desc_pool_id].freelist =
563 		soc->tx_desc[desc_pool_id].freelist->next;
564 	soc->tx_desc[desc_pool_id].num_allocated++;
565 	soc->tx_desc[desc_pool_id].num_free--;
566 
567 	tx_desc->flags = DP_TX_DESC_FLAG_ALLOCATED;
568 
569 	TX_DESC_LOCK_UNLOCK(&soc->tx_desc[desc_pool_id].lock);
570 
571 	return tx_desc;
572 }
573 
574 /**
575  * dp_tx_desc_alloc_multiple() - Allocate batch of software Tx Descriptors
576  *                            from given pool
577  * @soc: Handle to DP SoC structure
578  * @pool_id: pool id should pick up
579  * @num_requested: number of required descriptor
580  *
581  * allocate multiple tx descriptor and make a link
582  *
583  * Return: h_desc first descriptor pointer
584  */
585 static inline struct dp_tx_desc_s *dp_tx_desc_alloc_multiple(
586 		struct dp_soc *soc, uint8_t desc_pool_id, uint8_t num_requested)
587 {
588 	struct dp_tx_desc_s *c_desc = NULL, *h_desc = NULL;
589 	uint8_t count;
590 
591 	TX_DESC_LOCK_LOCK(&soc->tx_desc[desc_pool_id].lock);
592 
593 	if ((num_requested == 0) ||
594 			(soc->tx_desc[desc_pool_id].num_free < num_requested)) {
595 		TX_DESC_LOCK_UNLOCK(&soc->tx_desc[desc_pool_id].lock);
596 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
597 			"%s, No Free Desc: Available(%d) num_requested(%d)",
598 			__func__, soc->tx_desc[desc_pool_id].num_free,
599 			num_requested);
600 		return NULL;
601 	}
602 
603 	h_desc = soc->tx_desc[desc_pool_id].freelist;
604 
605 	/* h_desc should never be NULL since num_free > requested */
606 	qdf_assert_always(h_desc);
607 
608 	c_desc = h_desc;
609 	for (count = 0; count < (num_requested - 1); count++) {
610 		c_desc->flags = DP_TX_DESC_FLAG_ALLOCATED;
611 		c_desc = c_desc->next;
612 	}
613 	soc->tx_desc[desc_pool_id].num_free -= count;
614 	soc->tx_desc[desc_pool_id].num_allocated += count;
615 	soc->tx_desc[desc_pool_id].freelist = c_desc->next;
616 	c_desc->next = NULL;
617 
618 	TX_DESC_LOCK_UNLOCK(&soc->tx_desc[desc_pool_id].lock);
619 	return h_desc;
620 }
621 
622 /**
623  * dp_tx_desc_free() - Fee a tx descriptor and attach it to free list
624  *
625  * @soc Handle to DP SoC structure
626  * @pool_id
627  * @tx_desc
628  */
629 static inline void
630 dp_tx_desc_free(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
631 		uint8_t desc_pool_id)
632 {
633 	TX_DESC_LOCK_LOCK(&soc->tx_desc[desc_pool_id].lock);
634 
635 	tx_desc->vdev = NULL;
636 	tx_desc->nbuf = NULL;
637 	tx_desc->flags = 0;
638 	tx_desc->next = soc->tx_desc[desc_pool_id].freelist;
639 	soc->tx_desc[desc_pool_id].freelist = tx_desc;
640 	soc->tx_desc[desc_pool_id].num_allocated--;
641 	soc->tx_desc[desc_pool_id].num_free++;
642 
643 
644 	TX_DESC_LOCK_UNLOCK(&soc->tx_desc[desc_pool_id].lock);
645 }
646 #endif /* QCA_LL_TX_FLOW_CONTROL_V2 */
647 
648 #ifdef QCA_DP_TX_DESC_ID_CHECK
649 /**
650  * dp_tx_is_desc_id_valid() - check is the tx desc id valid
651  *
652  * @soc Handle to DP SoC structure
653  * @tx_desc_id
654  *
655  * Return: true or false
656  */
657 static inline bool
658 dp_tx_is_desc_id_valid(struct dp_soc *soc, uint32_t tx_desc_id)
659 {
660 	uint8_t pool_id;
661 	uint16_t page_id, offset;
662 	struct dp_tx_desc_pool_s *pool;
663 
664 	pool_id = (tx_desc_id & DP_TX_DESC_ID_POOL_MASK) >>
665 			DP_TX_DESC_ID_POOL_OS;
666 	/* Pool ID is out of limit */
667 	if (pool_id > wlan_cfg_get_num_tx_desc_pool(
668 				soc->wlan_cfg_ctx)) {
669 		QDF_TRACE(QDF_MODULE_ID_DP,
670 			  QDF_TRACE_LEVEL_FATAL,
671 			  "%s:Tx Comp pool id %d not valid",
672 			  __func__,
673 			  pool_id);
674 		goto warn_exit;
675 	}
676 
677 	pool = &soc->tx_desc[pool_id];
678 	/* the pool is freed */
679 	if (IS_TX_DESC_POOL_STATUS_INACTIVE(pool)) {
680 		QDF_TRACE(QDF_MODULE_ID_DP,
681 			  QDF_TRACE_LEVEL_FATAL,
682 			  "%s:the pool %d has been freed",
683 			  __func__,
684 			  pool_id);
685 		goto warn_exit;
686 	}
687 
688 	page_id = (tx_desc_id & DP_TX_DESC_ID_PAGE_MASK) >>
689 				DP_TX_DESC_ID_PAGE_OS;
690 	/* the page id is out of limit */
691 	if (page_id >= pool->desc_pages.num_pages) {
692 		QDF_TRACE(QDF_MODULE_ID_DP,
693 			  QDF_TRACE_LEVEL_FATAL,
694 			  "%s:the page id %d invalid, pool id %d, num_page %d",
695 			  __func__,
696 			  page_id,
697 			  pool_id,
698 			  pool->desc_pages.num_pages);
699 		goto warn_exit;
700 	}
701 
702 	offset = (tx_desc_id & DP_TX_DESC_ID_OFFSET_MASK) >>
703 				DP_TX_DESC_ID_OFFSET_OS;
704 	/* the offset is out of limit */
705 	if (offset >= pool->desc_pages.num_element_per_page) {
706 		QDF_TRACE(QDF_MODULE_ID_DP,
707 			  QDF_TRACE_LEVEL_FATAL,
708 			  "%s:offset %d invalid, pool%d,num_elem_per_page %d",
709 			  __func__,
710 			  offset,
711 			  pool_id,
712 			  pool->desc_pages.num_element_per_page);
713 		goto warn_exit;
714 	}
715 
716 	return true;
717 
718 warn_exit:
719 	QDF_TRACE(QDF_MODULE_ID_DP,
720 		  QDF_TRACE_LEVEL_FATAL,
721 		  "%s:Tx desc id 0x%x not valid",
722 		  __func__,
723 		  tx_desc_id);
724 	qdf_assert_always(0);
725 	return false;
726 }
727 
728 #else
729 static inline bool
730 dp_tx_is_desc_id_valid(struct dp_soc *soc, uint32_t tx_desc_id)
731 {
732 	return true;
733 }
734 #endif /* QCA_DP_TX_DESC_ID_CHECK */
735 
736 /**
737  * dp_tx_desc_find() - find dp tx descriptor from cokie
738  * @soc - handle for the device sending the data
739  * @tx_desc_id - the ID of the descriptor in question
740  * @return the descriptor object that has the specified ID
741  *
742  *  Use a tx descriptor ID to find the corresponding descriptor object.
743  *
744  */
745 static inline struct dp_tx_desc_s *dp_tx_desc_find(struct dp_soc *soc,
746 		uint8_t pool_id, uint16_t page_id, uint16_t offset)
747 {
748 	struct dp_tx_desc_pool_s *tx_desc_pool = &((soc)->tx_desc[(pool_id)]);
749 
750 	return tx_desc_pool->desc_pages.cacheable_pages[page_id] +
751 		tx_desc_pool->elem_size * offset;
752 }
753 
754 /**
755  * dp_tx_ext_desc_alloc() - Get tx extension descriptor from pool
756  * @soc: handle for the device sending the data
757  * @pool_id: target pool id
758  *
759  * Return: None
760  */
761 static inline
762 struct dp_tx_ext_desc_elem_s *dp_tx_ext_desc_alloc(struct dp_soc *soc,
763 		uint8_t desc_pool_id)
764 {
765 	struct dp_tx_ext_desc_elem_s *c_elem;
766 
767 	qdf_spin_lock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
768 	if (soc->tx_ext_desc[desc_pool_id].num_free <= 0) {
769 		qdf_spin_unlock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
770 		return NULL;
771 	}
772 	c_elem = soc->tx_ext_desc[desc_pool_id].freelist;
773 	soc->tx_ext_desc[desc_pool_id].freelist =
774 		soc->tx_ext_desc[desc_pool_id].freelist->next;
775 	soc->tx_ext_desc[desc_pool_id].num_free--;
776 	qdf_spin_unlock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
777 	return c_elem;
778 }
779 
780 /**
781  * dp_tx_ext_desc_free() - Release tx extension descriptor to the pool
782  * @soc: handle for the device sending the data
783  * @pool_id: target pool id
784  * @elem: ext descriptor pointer should release
785  *
786  * Return: None
787  */
788 static inline void dp_tx_ext_desc_free(struct dp_soc *soc,
789 	struct dp_tx_ext_desc_elem_s *elem, uint8_t desc_pool_id)
790 {
791 	qdf_spin_lock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
792 	elem->next = soc->tx_ext_desc[desc_pool_id].freelist;
793 	soc->tx_ext_desc[desc_pool_id].freelist = elem;
794 	soc->tx_ext_desc[desc_pool_id].num_free++;
795 	qdf_spin_unlock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
796 	return;
797 }
798 
799 /**
800  * dp_tx_ext_desc_free_multiple() - Fee multiple tx extension descriptor and
801  *                           attach it to free list
802  * @soc: Handle to DP SoC structure
803  * @desc_pool_id: pool id should pick up
804  * @elem: tx descriptor should be freed
805  * @num_free: number of descriptors should be freed
806  *
807  * Return: none
808  */
809 static inline void dp_tx_ext_desc_free_multiple(struct dp_soc *soc,
810 		struct dp_tx_ext_desc_elem_s *elem, uint8_t desc_pool_id,
811 		uint8_t num_free)
812 {
813 	struct dp_tx_ext_desc_elem_s *head, *tail, *c_elem;
814 	uint8_t freed = num_free;
815 
816 	/* caller should always guarantee atleast list of num_free nodes */
817 	qdf_assert_always(head);
818 
819 	head = elem;
820 	c_elem = head;
821 	tail = head;
822 	while (c_elem && freed) {
823 		tail = c_elem;
824 		c_elem = c_elem->next;
825 		freed--;
826 	}
827 
828 	/* caller should always guarantee atleast list of num_free nodes */
829 	qdf_assert_always(tail);
830 
831 	qdf_spin_lock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
832 	tail->next = soc->tx_ext_desc[desc_pool_id].freelist;
833 	soc->tx_ext_desc[desc_pool_id].freelist = head;
834 	soc->tx_ext_desc[desc_pool_id].num_free += num_free;
835 	qdf_spin_unlock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
836 
837 	return;
838 }
839 
840 #if defined(FEATURE_TSO)
841 /**
842  * dp_tx_tso_desc_alloc() - function to allocate a TSO segment
843  * @soc: device soc instance
844  * @pool_id: pool id should pick up tso descriptor
845  *
846  * Allocates a TSO segment element from the free list held in
847  * the soc
848  *
849  * Return: tso_seg, tso segment memory pointer
850  */
851 static inline struct qdf_tso_seg_elem_t *dp_tx_tso_desc_alloc(
852 		struct dp_soc *soc, uint8_t pool_id)
853 {
854 	struct qdf_tso_seg_elem_t *tso_seg = NULL;
855 
856 	qdf_spin_lock_bh(&soc->tx_tso_desc[pool_id].lock);
857 	if (soc->tx_tso_desc[pool_id].freelist) {
858 		soc->tx_tso_desc[pool_id].num_free--;
859 		tso_seg = soc->tx_tso_desc[pool_id].freelist;
860 		soc->tx_tso_desc[pool_id].freelist =
861 			soc->tx_tso_desc[pool_id].freelist->next;
862 	}
863 	qdf_spin_unlock_bh(&soc->tx_tso_desc[pool_id].lock);
864 
865 	return tso_seg;
866 }
867 
868 /**
869  * dp_tx_tso_desc_free() - function to free a TSO segment
870  * @soc: device soc instance
871  * @pool_id: pool id should pick up tso descriptor
872  * @tso_seg: tso segment memory pointer
873  *
874  * Returns a TSO segment element to the free list held in the
875  * HTT pdev
876  *
877  * Return: none
878  */
879 static inline void dp_tx_tso_desc_free(struct dp_soc *soc,
880 		uint8_t pool_id, struct qdf_tso_seg_elem_t *tso_seg)
881 {
882 	qdf_spin_lock_bh(&soc->tx_tso_desc[pool_id].lock);
883 	tso_seg->next = soc->tx_tso_desc[pool_id].freelist;
884 	soc->tx_tso_desc[pool_id].freelist = tso_seg;
885 	soc->tx_tso_desc[pool_id].num_free++;
886 	qdf_spin_unlock_bh(&soc->tx_tso_desc[pool_id].lock);
887 }
888 
889 static inline
890 struct qdf_tso_num_seg_elem_t  *dp_tso_num_seg_alloc(struct dp_soc *soc,
891 		uint8_t pool_id)
892 {
893 	struct qdf_tso_num_seg_elem_t *tso_num_seg = NULL;
894 
895 	qdf_spin_lock_bh(&soc->tx_tso_num_seg[pool_id].lock);
896 	if (soc->tx_tso_num_seg[pool_id].freelist) {
897 		soc->tx_tso_num_seg[pool_id].num_free--;
898 		tso_num_seg = soc->tx_tso_num_seg[pool_id].freelist;
899 		soc->tx_tso_num_seg[pool_id].freelist =
900 			soc->tx_tso_num_seg[pool_id].freelist->next;
901 	}
902 	qdf_spin_unlock_bh(&soc->tx_tso_num_seg[pool_id].lock);
903 
904 	return tso_num_seg;
905 }
906 
907 static inline
908 void dp_tso_num_seg_free(struct dp_soc *soc,
909 		uint8_t pool_id, struct qdf_tso_num_seg_elem_t *tso_num_seg)
910 {
911 	qdf_spin_lock_bh(&soc->tx_tso_num_seg[pool_id].lock);
912 	tso_num_seg->next = soc->tx_tso_num_seg[pool_id].freelist;
913 	soc->tx_tso_num_seg[pool_id].freelist = tso_num_seg;
914 	soc->tx_tso_num_seg[pool_id].num_free++;
915 	qdf_spin_unlock_bh(&soc->tx_tso_num_seg[pool_id].lock);
916 }
917 #endif
918 
919 /*
920  * dp_tx_me_alloc_buf() Alloc descriptor from me pool
921  * @pdev DP_PDEV handle for datapath
922  *
923  * Return:dp_tx_me_buf_t(buf)
924  */
925 static inline struct dp_tx_me_buf_t*
926 dp_tx_me_alloc_buf(struct dp_pdev *pdev)
927 {
928 	struct dp_tx_me_buf_t *buf = NULL;
929 	qdf_spin_lock_bh(&pdev->tx_mutex);
930 	if (pdev->me_buf.freelist) {
931 		buf = pdev->me_buf.freelist;
932 		pdev->me_buf.freelist = pdev->me_buf.freelist->next;
933 		pdev->me_buf.buf_in_use++;
934 	} else {
935 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
936 				"Error allocating memory in pool");
937 		qdf_spin_unlock_bh(&pdev->tx_mutex);
938 		return NULL;
939 	}
940 	qdf_spin_unlock_bh(&pdev->tx_mutex);
941 	return buf;
942 }
943 
944 /*
945  * dp_tx_me_free_buf() - Free me descriptor and add it to pool
946  * @pdev: DP_PDEV handle for datapath
947  * @buf : Allocated ME BUF
948  *
949  * Return:void
950  */
951 static inline void
952 dp_tx_me_free_buf(struct dp_pdev *pdev, struct dp_tx_me_buf_t *buf)
953 {
954 	qdf_spin_lock_bh(&pdev->tx_mutex);
955 	buf->next = pdev->me_buf.freelist;
956 	pdev->me_buf.freelist = buf;
957 	pdev->me_buf.buf_in_use--;
958 	qdf_spin_unlock_bh(&pdev->tx_mutex);
959 }
960 #endif /* DP_TX_DESC_H */
961