xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_tx_desc.h (revision 45a38684b07295822dc8eba39e293408f203eec8)
1 /*
2  * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #ifndef DP_TX_DESC_H
20 #define DP_TX_DESC_H
21 
22 #include "dp_types.h"
23 #include "dp_tx.h"
24 #include "dp_internal.h"
25 
26 /**
27  * 21 bits cookie
28  * 2 bits pool id 0 ~ 3,
29  * 10 bits page id 0 ~ 1023
30  * 5 bits offset id 0 ~ 31 (Desc size = 128, Num descs per page = 4096/128 = 32)
31  */
32 /* ???Ring ID needed??? */
33 #define DP_TX_DESC_ID_POOL_MASK    0x018000
34 #define DP_TX_DESC_ID_POOL_OS      15
35 #define DP_TX_DESC_ID_PAGE_MASK    0x007FE0
36 #define DP_TX_DESC_ID_PAGE_OS      5
37 #define DP_TX_DESC_ID_OFFSET_MASK  0x00001F
38 #define DP_TX_DESC_ID_OFFSET_OS    0
39 
40 /**
41  * Compilation assert on tx desc size
42  *
43  * if assert is hit please update POOL_MASK,
44  * PAGE_MASK according to updated size
45  *
46  * for current PAGE mask allowed size range of tx_desc
47  * is between 128 and 256
48  */
49 QDF_COMPILE_TIME_ASSERT(dp_tx_desc_size,
50 			((sizeof(struct dp_tx_desc_s)) <=
51 			 (PAGE_SIZE >> DP_TX_DESC_ID_PAGE_OS)) &&
52 			((sizeof(struct dp_tx_desc_s)) >
53 			 (PAGE_SIZE >> (DP_TX_DESC_ID_PAGE_OS + 1))));
54 
55 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
56 #define TX_DESC_LOCK_CREATE(lock)
57 #define TX_DESC_LOCK_DESTROY(lock)
58 #define TX_DESC_LOCK_LOCK(lock)
59 #define TX_DESC_LOCK_UNLOCK(lock)
60 #define IS_TX_DESC_POOL_STATUS_INACTIVE(pool) \
61 	((pool)->status == FLOW_POOL_INACTIVE)
62 #ifdef QCA_AC_BASED_FLOW_CONTROL
63 #define TX_DESC_POOL_MEMBER_CLEAN(_tx_desc_pool)       \
64 	dp_tx_flow_pool_member_clean(_tx_desc_pool)
65 
66 #else /* !QCA_AC_BASED_FLOW_CONTROL */
67 #define TX_DESC_POOL_MEMBER_CLEAN(_tx_desc_pool)       \
68 do {                                                   \
69 	(_tx_desc_pool)->elem_size = 0;                \
70 	(_tx_desc_pool)->freelist = NULL;              \
71 	(_tx_desc_pool)->pool_size = 0;                \
72 	(_tx_desc_pool)->avail_desc = 0;               \
73 	(_tx_desc_pool)->start_th = 0;                 \
74 	(_tx_desc_pool)->stop_th = 0;                  \
75 	(_tx_desc_pool)->status = FLOW_POOL_INACTIVE;  \
76 } while (0)
77 #endif /* QCA_AC_BASED_FLOW_CONTROL */
78 #else /* !QCA_LL_TX_FLOW_CONTROL_V2 */
79 #define TX_DESC_LOCK_CREATE(lock)  qdf_spinlock_create(lock)
80 #define TX_DESC_LOCK_DESTROY(lock) qdf_spinlock_destroy(lock)
81 #define TX_DESC_LOCK_LOCK(lock)    qdf_spin_lock_bh(lock)
82 #define TX_DESC_LOCK_UNLOCK(lock)  qdf_spin_unlock_bh(lock)
83 #define IS_TX_DESC_POOL_STATUS_INACTIVE(pool) (false)
84 #define TX_DESC_POOL_MEMBER_CLEAN(_tx_desc_pool)       \
85 do {                                                   \
86 	(_tx_desc_pool)->elem_size = 0;                \
87 	(_tx_desc_pool)->num_allocated = 0;            \
88 	(_tx_desc_pool)->freelist = NULL;              \
89 	(_tx_desc_pool)->elem_count = 0;               \
90 	(_tx_desc_pool)->num_free = 0;                 \
91 } while (0)
92 #endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */
93 #define MAX_POOL_BUFF_COUNT 10000
94 
95 QDF_STATUS dp_tx_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
96 				 uint16_t num_elem);
97 QDF_STATUS dp_tx_desc_pool_init(struct dp_soc *soc, uint8_t pool_id,
98 				uint16_t num_elem);
99 void dp_tx_desc_pool_free(struct dp_soc *soc, uint8_t pool_id);
100 void dp_tx_desc_pool_deinit(struct dp_soc *soc, uint8_t pool_id);
101 
102 QDF_STATUS dp_tx_ext_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
103 				     uint16_t num_elem);
104 QDF_STATUS dp_tx_ext_desc_pool_init(struct dp_soc *soc, uint8_t pool_id,
105 				    uint16_t num_elem);
106 void dp_tx_ext_desc_pool_free(struct dp_soc *soc, uint8_t pool_id);
107 void dp_tx_ext_desc_pool_deinit(struct dp_soc *soc, uint8_t pool_id);
108 
109 QDF_STATUS dp_tx_tso_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
110 				     uint16_t num_elem);
111 QDF_STATUS dp_tx_tso_desc_pool_init(struct dp_soc *soc, uint8_t pool_id,
112 				    uint16_t num_elem);
113 void dp_tx_tso_desc_pool_free(struct dp_soc *soc, uint8_t pool_id);
114 void dp_tx_tso_desc_pool_deinit(struct dp_soc *soc, uint8_t pool_id);
115 
116 QDF_STATUS dp_tx_tso_num_seg_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
117 		uint16_t num_elem);
118 QDF_STATUS dp_tx_tso_num_seg_pool_init(struct dp_soc *soc, uint8_t pool_id,
119 				       uint16_t num_elem);
120 void dp_tx_tso_num_seg_pool_free(struct dp_soc *soc, uint8_t pool_id);
121 void dp_tx_tso_num_seg_pool_deinit(struct dp_soc *soc, uint8_t pool_id);
122 
123 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
124 void dp_tx_flow_control_init(struct dp_soc *);
125 void dp_tx_flow_control_deinit(struct dp_soc *);
126 
127 QDF_STATUS dp_txrx_register_pause_cb(struct cdp_soc_t *soc,
128 	tx_pause_callback pause_cb);
129 QDF_STATUS dp_tx_flow_pool_map(struct cdp_soc_t *soc, uint8_t pdev_id,
130 			       uint8_t vdev_id);
131 void dp_tx_flow_pool_unmap(struct cdp_soc_t *handle, uint8_t pdev_id,
132 			   uint8_t vdev_id);
133 void dp_tx_clear_flow_pool_stats(struct dp_soc *soc);
134 struct dp_tx_desc_pool_s *dp_tx_create_flow_pool(struct dp_soc *soc,
135 	uint8_t flow_pool_id, uint16_t flow_pool_size);
136 
137 QDF_STATUS dp_tx_flow_pool_map_handler(struct dp_pdev *pdev, uint8_t flow_id,
138 	uint8_t flow_type, uint8_t flow_pool_id, uint16_t flow_pool_size);
139 void dp_tx_flow_pool_unmap_handler(struct dp_pdev *pdev, uint8_t flow_id,
140 	uint8_t flow_type, uint8_t flow_pool_id);
141 
142 /**
143  * dp_tx_get_desc_flow_pool() - get descriptor from flow pool
144  * @pool: flow pool
145  *
146  * Caller needs to take lock and do sanity checks.
147  *
148  * Return: tx descriptor
149  */
150 static inline
151 struct dp_tx_desc_s *dp_tx_get_desc_flow_pool(struct dp_tx_desc_pool_s *pool)
152 {
153 	struct dp_tx_desc_s *tx_desc = pool->freelist;
154 
155 	pool->freelist = pool->freelist->next;
156 	pool->avail_desc--;
157 	return tx_desc;
158 }
159 
160 /**
161  * ol_tx_put_desc_flow_pool() - put descriptor to flow pool freelist
162  * @pool: flow pool
163  * @tx_desc: tx descriptor
164  *
165  * Caller needs to take lock and do sanity checks.
166  *
167  * Return: none
168  */
169 static inline
170 void dp_tx_put_desc_flow_pool(struct dp_tx_desc_pool_s *pool,
171 			struct dp_tx_desc_s *tx_desc)
172 {
173 	tx_desc->next = pool->freelist;
174 	pool->freelist = tx_desc;
175 	pool->avail_desc++;
176 }
177 
178 #ifdef QCA_AC_BASED_FLOW_CONTROL
179 
180 /**
181  * dp_tx_flow_pool_member_clean() - Clean the members of TX flow pool
182  *
183  * @pool: flow pool
184  *
185  * Return: None
186  */
187 static inline void
188 dp_tx_flow_pool_member_clean(struct dp_tx_desc_pool_s *pool)
189 {
190 	pool->elem_size = 0;
191 	pool->freelist = NULL;
192 	pool->pool_size = 0;
193 	pool->avail_desc = 0;
194 	qdf_mem_zero(pool->start_th, FL_TH_MAX);
195 	qdf_mem_zero(pool->stop_th, FL_TH_MAX);
196 	pool->status = FLOW_POOL_INACTIVE;
197 }
198 
199 /**
200  * dp_tx_is_threshold_reached() - Check if current avail desc meet threshold
201  *
202  * @pool: flow pool
203  * @avail_desc: available descriptor number
204  *
205  * Return: true if threshold is met, false if not
206  */
207 static inline bool
208 dp_tx_is_threshold_reached(struct dp_tx_desc_pool_s *pool, uint16_t avail_desc)
209 {
210 	if (qdf_unlikely(avail_desc == pool->stop_th[DP_TH_BE_BK]))
211 		return true;
212 	else if (qdf_unlikely(avail_desc == pool->stop_th[DP_TH_VI]))
213 		return true;
214 	else if (qdf_unlikely(avail_desc == pool->stop_th[DP_TH_VO]))
215 		return true;
216 	else if (qdf_unlikely(avail_desc == pool->stop_th[DP_TH_HI]))
217 		return true;
218 	else
219 		return false;
220 }
221 
222 /**
223  * dp_tx_desc_alloc() - Allocate a Software Tx descriptor from given pool
224  *
225  * @soc: Handle to DP SoC structure
226  * @desc_pool_id: ID of the flow control fool
227  *
228  * Return: TX descriptor allocated or NULL
229  */
230 static inline struct dp_tx_desc_s *
231 dp_tx_desc_alloc(struct dp_soc *soc, uint8_t desc_pool_id)
232 {
233 	struct dp_tx_desc_s *tx_desc = NULL;
234 	struct dp_tx_desc_pool_s *pool = &soc->tx_desc[desc_pool_id];
235 	bool is_pause = false;
236 	enum netif_action_type act = WLAN_NETIF_ACTION_TYPE_NONE;
237 	enum dp_fl_ctrl_threshold level = DP_TH_BE_BK;
238 
239 	if (qdf_likely(pool)) {
240 		qdf_spin_lock_bh(&pool->flow_pool_lock);
241 		if (qdf_likely(pool->avail_desc)) {
242 			tx_desc = dp_tx_get_desc_flow_pool(pool);
243 			tx_desc->pool_id = desc_pool_id;
244 			tx_desc->flags = DP_TX_DESC_FLAG_ALLOCATED;
245 			is_pause = dp_tx_is_threshold_reached(pool,
246 							      pool->avail_desc);
247 
248 			if (qdf_unlikely(is_pause)) {
249 				switch (pool->status) {
250 				case FLOW_POOL_ACTIVE_UNPAUSED:
251 					/* pause network BE\BK queue */
252 					act = WLAN_NETIF_BE_BK_QUEUE_OFF;
253 					level = DP_TH_BE_BK;
254 					pool->status = FLOW_POOL_BE_BK_PAUSED;
255 					break;
256 				case FLOW_POOL_BE_BK_PAUSED:
257 					/* pause network VI queue */
258 					act = WLAN_NETIF_VI_QUEUE_OFF;
259 					level = DP_TH_VI;
260 					pool->status = FLOW_POOL_VI_PAUSED;
261 					break;
262 				case FLOW_POOL_VI_PAUSED:
263 					/* pause network VO queue */
264 					act = WLAN_NETIF_VO_QUEUE_OFF;
265 					level = DP_TH_VO;
266 					pool->status = FLOW_POOL_VO_PAUSED;
267 					break;
268 				case FLOW_POOL_VO_PAUSED:
269 					/* pause network HI PRI queue */
270 					act = WLAN_NETIF_PRIORITY_QUEUE_OFF;
271 					level = DP_TH_HI;
272 					pool->status = FLOW_POOL_ACTIVE_PAUSED;
273 					break;
274 				case FLOW_POOL_ACTIVE_PAUSED:
275 					act = WLAN_NETIF_ACTION_TYPE_NONE;
276 					break;
277 				default:
278 					dp_err_rl("pool status is %d!",
279 						  pool->status);
280 					break;
281 				}
282 
283 				if (act != WLAN_NETIF_ACTION_TYPE_NONE) {
284 					pool->latest_pause_time[level] =
285 						qdf_get_system_timestamp();
286 					soc->pause_cb(desc_pool_id,
287 						      act,
288 						      WLAN_DATA_FLOW_CONTROL);
289 				}
290 			}
291 		} else {
292 			pool->pkt_drop_no_desc++;
293 		}
294 		qdf_spin_unlock_bh(&pool->flow_pool_lock);
295 	} else {
296 		soc->pool_stats.pkt_drop_no_pool++;
297 	}
298 
299 	return tx_desc;
300 }
301 
302 /**
303  * dp_tx_desc_free() - Fee a tx descriptor and attach it to free list
304  *
305  * @soc: Handle to DP SoC structure
306  * @tx_desc: the tx descriptor to be freed
307  * @desc_pool_id: ID of the flow control fool
308  *
309  * Return: None
310  */
311 static inline void
312 dp_tx_desc_free(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
313 		uint8_t desc_pool_id)
314 {
315 	struct dp_tx_desc_pool_s *pool = &soc->tx_desc[desc_pool_id];
316 	qdf_time_t unpause_time = qdf_get_system_timestamp(), pause_dur;
317 	enum netif_action_type act = WLAN_WAKE_ALL_NETIF_QUEUE;
318 
319 	qdf_spin_lock_bh(&pool->flow_pool_lock);
320 	tx_desc->vdev = NULL;
321 	tx_desc->nbuf = NULL;
322 	tx_desc->flags = 0;
323 	dp_tx_put_desc_flow_pool(pool, tx_desc);
324 	switch (pool->status) {
325 	case FLOW_POOL_ACTIVE_PAUSED:
326 		if (pool->avail_desc > pool->start_th[DP_TH_HI]) {
327 			act = WLAN_NETIF_PRIORITY_QUEUE_ON;
328 			pool->status = FLOW_POOL_VO_PAUSED;
329 
330 			/* Update maxinum pause duration for HI queue */
331 			pause_dur = unpause_time -
332 					pool->latest_pause_time[DP_TH_HI];
333 			if (pool->max_pause_time[DP_TH_HI] < pause_dur)
334 				pool->max_pause_time[DP_TH_HI] = pause_dur;
335 		}
336 		break;
337 	case FLOW_POOL_VO_PAUSED:
338 		if (pool->avail_desc > pool->start_th[DP_TH_VO]) {
339 			act = WLAN_NETIF_VO_QUEUE_ON;
340 			pool->status = FLOW_POOL_VI_PAUSED;
341 
342 			/* Update maxinum pause duration for VO queue */
343 			pause_dur = unpause_time -
344 					pool->latest_pause_time[DP_TH_VO];
345 			if (pool->max_pause_time[DP_TH_VO] < pause_dur)
346 				pool->max_pause_time[DP_TH_VO] = pause_dur;
347 		}
348 		break;
349 	case FLOW_POOL_VI_PAUSED:
350 		if (pool->avail_desc > pool->start_th[DP_TH_VI]) {
351 			act = WLAN_NETIF_VI_QUEUE_ON;
352 			pool->status = FLOW_POOL_BE_BK_PAUSED;
353 
354 			/* Update maxinum pause duration for VI queue */
355 			pause_dur = unpause_time -
356 					pool->latest_pause_time[DP_TH_VI];
357 			if (pool->max_pause_time[DP_TH_VI] < pause_dur)
358 				pool->max_pause_time[DP_TH_VI] = pause_dur;
359 		}
360 		break;
361 	case FLOW_POOL_BE_BK_PAUSED:
362 		if (pool->avail_desc > pool->start_th[DP_TH_BE_BK]) {
363 			act = WLAN_WAKE_NON_PRIORITY_QUEUE;
364 			pool->status = FLOW_POOL_ACTIVE_UNPAUSED;
365 
366 			/* Update maxinum pause duration for BE_BK queue */
367 			pause_dur = unpause_time -
368 					pool->latest_pause_time[DP_TH_BE_BK];
369 			if (pool->max_pause_time[DP_TH_BE_BK] < pause_dur)
370 				pool->max_pause_time[DP_TH_BE_BK] = pause_dur;
371 		}
372 		break;
373 	case FLOW_POOL_INVALID:
374 		if (pool->avail_desc == pool->pool_size) {
375 			dp_tx_desc_pool_deinit(soc, desc_pool_id);
376 			dp_tx_desc_pool_free(soc, desc_pool_id);
377 			qdf_spin_unlock_bh(&pool->flow_pool_lock);
378 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
379 				  "%s %d pool is freed!!",
380 				  __func__, __LINE__);
381 			return;
382 		}
383 		break;
384 
385 	case FLOW_POOL_ACTIVE_UNPAUSED:
386 		break;
387 	default:
388 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
389 			  "%s %d pool is INACTIVE State!!",
390 			  __func__, __LINE__);
391 		break;
392 	};
393 
394 	if (act != WLAN_WAKE_ALL_NETIF_QUEUE)
395 		soc->pause_cb(pool->flow_pool_id,
396 			      act, WLAN_DATA_FLOW_CONTROL);
397 	qdf_spin_unlock_bh(&pool->flow_pool_lock);
398 }
399 #else /* QCA_AC_BASED_FLOW_CONTROL */
400 
401 static inline bool
402 dp_tx_is_threshold_reached(struct dp_tx_desc_pool_s *pool, uint16_t avail_desc)
403 {
404 	if (qdf_unlikely(avail_desc < pool->stop_th))
405 		return true;
406 	else
407 		return false;
408 }
409 
410 /**
411  * dp_tx_desc_alloc() - Allocate a Software Tx Descriptor from given pool
412  *
413  * @soc Handle to DP SoC structure
414  * @pool_id
415  *
416  * Return:
417  */
418 static inline struct dp_tx_desc_s *
419 dp_tx_desc_alloc(struct dp_soc *soc, uint8_t desc_pool_id)
420 {
421 	struct dp_tx_desc_s *tx_desc = NULL;
422 	struct dp_tx_desc_pool_s *pool = &soc->tx_desc[desc_pool_id];
423 
424 	if (pool) {
425 		qdf_spin_lock_bh(&pool->flow_pool_lock);
426 		if (pool->status <= FLOW_POOL_ACTIVE_PAUSED &&
427 		    pool->avail_desc) {
428 			tx_desc = dp_tx_get_desc_flow_pool(pool);
429 			tx_desc->pool_id = desc_pool_id;
430 			tx_desc->flags = DP_TX_DESC_FLAG_ALLOCATED;
431 			if (qdf_unlikely(pool->avail_desc < pool->stop_th)) {
432 				pool->status = FLOW_POOL_ACTIVE_PAUSED;
433 				qdf_spin_unlock_bh(&pool->flow_pool_lock);
434 				/* pause network queues */
435 				soc->pause_cb(desc_pool_id,
436 					       WLAN_STOP_ALL_NETIF_QUEUE,
437 					       WLAN_DATA_FLOW_CONTROL);
438 			} else {
439 				qdf_spin_unlock_bh(&pool->flow_pool_lock);
440 			}
441 
442 			/*
443 			 * If one packet is going to be sent, PM usage count
444 			 * needs to be incremented by one to prevent future
445 			 * runtime suspend. This should be tied with the
446 			 * success of allocating one descriptor. It will be
447 			 * decremented after the packet has been sent.
448 			 */
449 			hif_pm_runtime_get_noresume(
450 				soc->hif_handle,
451 				RTPM_ID_DP_TX_DESC_ALLOC_FREE);
452 		} else {
453 			pool->pkt_drop_no_desc++;
454 			qdf_spin_unlock_bh(&pool->flow_pool_lock);
455 		}
456 	} else {
457 		soc->pool_stats.pkt_drop_no_pool++;
458 	}
459 
460 
461 	return tx_desc;
462 }
463 
464 /**
465  * dp_tx_desc_free() - Fee a tx descriptor and attach it to free list
466  *
467  * @soc Handle to DP SoC structure
468  * @pool_id
469  * @tx_desc
470  *
471  * Return: None
472  */
473 static inline void
474 dp_tx_desc_free(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
475 		uint8_t desc_pool_id)
476 {
477 	struct dp_tx_desc_pool_s *pool = &soc->tx_desc[desc_pool_id];
478 
479 	qdf_spin_lock_bh(&pool->flow_pool_lock);
480 	tx_desc->vdev = NULL;
481 	tx_desc->nbuf = NULL;
482 	tx_desc->flags = 0;
483 	dp_tx_put_desc_flow_pool(pool, tx_desc);
484 	switch (pool->status) {
485 	case FLOW_POOL_ACTIVE_PAUSED:
486 		if (pool->avail_desc > pool->start_th) {
487 			soc->pause_cb(pool->flow_pool_id,
488 				       WLAN_WAKE_ALL_NETIF_QUEUE,
489 				       WLAN_DATA_FLOW_CONTROL);
490 			pool->status = FLOW_POOL_ACTIVE_UNPAUSED;
491 		}
492 		break;
493 	case FLOW_POOL_INVALID:
494 		if (pool->avail_desc == pool->pool_size) {
495 			dp_tx_desc_pool_deinit(soc, desc_pool_id);
496 			dp_tx_desc_pool_free(soc, desc_pool_id);
497 			qdf_spin_unlock_bh(&pool->flow_pool_lock);
498 			qdf_print("%s %d pool is freed!!",
499 				  __func__, __LINE__);
500 			goto out;
501 		}
502 		break;
503 
504 	case FLOW_POOL_ACTIVE_UNPAUSED:
505 		break;
506 	default:
507 		qdf_print("%s %d pool is INACTIVE State!!",
508 			  __func__, __LINE__);
509 		break;
510 	};
511 
512 	qdf_spin_unlock_bh(&pool->flow_pool_lock);
513 
514 out:
515 	/**
516 	 * Decrement PM usage count if the packet has been sent. This
517 	 * should be tied with the success of freeing one descriptor.
518 	 */
519 	hif_pm_runtime_put(soc->hif_handle,
520 			   RTPM_ID_DP_TX_DESC_ALLOC_FREE);
521 }
522 
523 #endif /* QCA_AC_BASED_FLOW_CONTROL */
524 
525 static inline bool
526 dp_tx_desc_thresh_reached(struct cdp_soc_t *soc_hdl, uint8_t vdev_id)
527 {
528 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
529 	struct dp_vdev *vdev = dp_get_vdev_from_soc_vdev_id_wifi3(soc,
530 								  vdev_id);
531 	struct dp_tx_desc_pool_s *pool;
532 
533 	if (!vdev)
534 		return false;
535 
536 	pool = vdev->pool;
537 
538 	return  dp_tx_is_threshold_reached(pool, pool->avail_desc);
539 }
540 #else /* QCA_LL_TX_FLOW_CONTROL_V2 */
541 
542 static inline void dp_tx_flow_control_init(struct dp_soc *handle)
543 {
544 }
545 
546 static inline void dp_tx_flow_control_deinit(struct dp_soc *handle)
547 {
548 }
549 
550 static inline QDF_STATUS dp_tx_flow_pool_map_handler(struct dp_pdev *pdev,
551 	uint8_t flow_id, uint8_t flow_type, uint8_t flow_pool_id,
552 	uint16_t flow_pool_size)
553 {
554 	return QDF_STATUS_SUCCESS;
555 }
556 
557 static inline void dp_tx_flow_pool_unmap_handler(struct dp_pdev *pdev,
558 	uint8_t flow_id, uint8_t flow_type, uint8_t flow_pool_id)
559 {
560 }
561 
562 /**
563  * dp_tx_desc_alloc() - Allocate a Software Tx Descriptor from given pool
564  *
565  * @param soc Handle to DP SoC structure
566  * @param pool_id
567  *
568  * Return:
569  */
570 static inline struct dp_tx_desc_s *dp_tx_desc_alloc(struct dp_soc *soc,
571 						uint8_t desc_pool_id)
572 {
573 	struct dp_tx_desc_s *tx_desc = NULL;
574 	struct dp_tx_desc_pool_s *pool = &soc->tx_desc[desc_pool_id];
575 
576 	TX_DESC_LOCK_LOCK(&pool->lock);
577 
578 	tx_desc = pool->freelist;
579 
580 	/* Pool is exhausted */
581 	if (!tx_desc) {
582 		TX_DESC_LOCK_UNLOCK(&pool->lock);
583 		return NULL;
584 	}
585 
586 	pool->freelist = pool->freelist->next;
587 	pool->num_allocated++;
588 	pool->num_free--;
589 
590 	tx_desc->flags = DP_TX_DESC_FLAG_ALLOCATED;
591 
592 	TX_DESC_LOCK_UNLOCK(&pool->lock);
593 
594 	return tx_desc;
595 }
596 
597 /**
598  * dp_tx_desc_alloc_multiple() - Allocate batch of software Tx Descriptors
599  *                            from given pool
600  * @soc: Handle to DP SoC structure
601  * @pool_id: pool id should pick up
602  * @num_requested: number of required descriptor
603  *
604  * allocate multiple tx descriptor and make a link
605  *
606  * Return: h_desc first descriptor pointer
607  */
608 static inline struct dp_tx_desc_s *dp_tx_desc_alloc_multiple(
609 		struct dp_soc *soc, uint8_t desc_pool_id, uint8_t num_requested)
610 {
611 	struct dp_tx_desc_s *c_desc = NULL, *h_desc = NULL;
612 	uint8_t count;
613 	struct dp_tx_desc_pool_s *pool = &soc->tx_desc[desc_pool_id];
614 
615 	TX_DESC_LOCK_LOCK(&pool->lock);
616 
617 	if ((num_requested == 0) ||
618 			(pool->num_free < num_requested)) {
619 		TX_DESC_LOCK_UNLOCK(&pool->lock);
620 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
621 			"%s, No Free Desc: Available(%d) num_requested(%d)",
622 			__func__, pool->num_free,
623 			num_requested);
624 		return NULL;
625 	}
626 
627 	h_desc = pool->freelist;
628 
629 	/* h_desc should never be NULL since num_free > requested */
630 	qdf_assert_always(h_desc);
631 
632 	c_desc = h_desc;
633 	for (count = 0; count < (num_requested - 1); count++) {
634 		c_desc->flags = DP_TX_DESC_FLAG_ALLOCATED;
635 		c_desc = c_desc->next;
636 	}
637 	pool->num_free -= count;
638 	pool->num_allocated += count;
639 	pool->freelist = c_desc->next;
640 	c_desc->next = NULL;
641 
642 	TX_DESC_LOCK_UNLOCK(&pool->lock);
643 	return h_desc;
644 }
645 
646 /**
647  * dp_tx_desc_free() - Fee a tx descriptor and attach it to free list
648  *
649  * @soc Handle to DP SoC structure
650  * @pool_id
651  * @tx_desc
652  */
653 static inline void
654 dp_tx_desc_free(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
655 		uint8_t desc_pool_id)
656 {
657 	struct dp_tx_desc_pool_s *pool = NULL;
658 	tx_desc->vdev = NULL;
659 	tx_desc->nbuf = NULL;
660 	tx_desc->flags = 0;
661 
662 	pool = &soc->tx_desc[desc_pool_id];
663 	TX_DESC_LOCK_LOCK(&pool->lock);
664 	tx_desc->next = pool->freelist;
665 	pool->freelist = tx_desc;
666 	pool->num_allocated--;
667 	pool->num_free++;
668 	TX_DESC_LOCK_UNLOCK(&pool->lock);
669 }
670 
671 #endif /* QCA_LL_TX_FLOW_CONTROL_V2 */
672 
673 #ifdef QCA_DP_TX_DESC_ID_CHECK
674 /**
675  * dp_tx_is_desc_id_valid() - check is the tx desc id valid
676  *
677  * @soc Handle to DP SoC structure
678  * @tx_desc_id
679  *
680  * Return: true or false
681  */
682 static inline bool
683 dp_tx_is_desc_id_valid(struct dp_soc *soc, uint32_t tx_desc_id)
684 {
685 	uint8_t pool_id;
686 	uint16_t page_id, offset;
687 	struct dp_tx_desc_pool_s *pool;
688 
689 	pool_id = (tx_desc_id & DP_TX_DESC_ID_POOL_MASK) >>
690 			DP_TX_DESC_ID_POOL_OS;
691 	/* Pool ID is out of limit */
692 	if (pool_id > wlan_cfg_get_num_tx_desc_pool(
693 				soc->wlan_cfg_ctx)) {
694 		QDF_TRACE(QDF_MODULE_ID_DP,
695 			  QDF_TRACE_LEVEL_FATAL,
696 			  "%s:Tx Comp pool id %d not valid",
697 			  __func__,
698 			  pool_id);
699 		goto warn_exit;
700 	}
701 
702 	pool = &soc->tx_desc[pool_id];
703 	/* the pool is freed */
704 	if (IS_TX_DESC_POOL_STATUS_INACTIVE(pool)) {
705 		QDF_TRACE(QDF_MODULE_ID_DP,
706 			  QDF_TRACE_LEVEL_FATAL,
707 			  "%s:the pool %d has been freed",
708 			  __func__,
709 			  pool_id);
710 		goto warn_exit;
711 	}
712 
713 	page_id = (tx_desc_id & DP_TX_DESC_ID_PAGE_MASK) >>
714 				DP_TX_DESC_ID_PAGE_OS;
715 	/* the page id is out of limit */
716 	if (page_id >= pool->desc_pages.num_pages) {
717 		QDF_TRACE(QDF_MODULE_ID_DP,
718 			  QDF_TRACE_LEVEL_FATAL,
719 			  "%s:the page id %d invalid, pool id %d, num_page %d",
720 			  __func__,
721 			  page_id,
722 			  pool_id,
723 			  pool->desc_pages.num_pages);
724 		goto warn_exit;
725 	}
726 
727 	offset = (tx_desc_id & DP_TX_DESC_ID_OFFSET_MASK) >>
728 				DP_TX_DESC_ID_OFFSET_OS;
729 	/* the offset is out of limit */
730 	if (offset >= pool->desc_pages.num_element_per_page) {
731 		QDF_TRACE(QDF_MODULE_ID_DP,
732 			  QDF_TRACE_LEVEL_FATAL,
733 			  "%s:offset %d invalid, pool%d,num_elem_per_page %d",
734 			  __func__,
735 			  offset,
736 			  pool_id,
737 			  pool->desc_pages.num_element_per_page);
738 		goto warn_exit;
739 	}
740 
741 	return true;
742 
743 warn_exit:
744 	QDF_TRACE(QDF_MODULE_ID_DP,
745 		  QDF_TRACE_LEVEL_FATAL,
746 		  "%s:Tx desc id 0x%x not valid",
747 		  __func__,
748 		  tx_desc_id);
749 	qdf_assert_always(0);
750 	return false;
751 }
752 
753 #else
754 static inline bool
755 dp_tx_is_desc_id_valid(struct dp_soc *soc, uint32_t tx_desc_id)
756 {
757 	return true;
758 }
759 #endif /* QCA_DP_TX_DESC_ID_CHECK */
760 
761 #ifdef QCA_DP_TX_DESC_FAST_COMP_ENABLE
762 static inline void dp_tx_desc_update_fast_comp_flag(struct dp_soc *soc,
763 						    struct dp_tx_desc_s *desc,
764 						    uint8_t allow_fast_comp)
765 {
766 	if (qdf_likely(!(desc->flags & DP_TX_DESC_FLAG_TO_FW)) &&
767 	    qdf_likely(allow_fast_comp)) {
768 		desc->flags |= DP_TX_DESC_FLAG_SIMPLE;
769 	}
770 }
771 #else
772 static inline void dp_tx_desc_update_fast_comp_flag(struct dp_soc *soc,
773 						    struct dp_tx_desc_s *desc,
774 						    uint8_t allow_fast_comp)
775 {
776 }
777 #endif /* QCA_DP_TX_DESC_FAST_COMP_ENABLE */
778 
779 /**
780  * dp_tx_desc_find() - find dp tx descriptor from cokie
781  * @soc - handle for the device sending the data
782  * @tx_desc_id - the ID of the descriptor in question
783  * @return the descriptor object that has the specified ID
784  *
785  *  Use a tx descriptor ID to find the corresponding descriptor object.
786  *
787  */
788 static inline struct dp_tx_desc_s *dp_tx_desc_find(struct dp_soc *soc,
789 		uint8_t pool_id, uint16_t page_id, uint16_t offset)
790 {
791 	struct dp_tx_desc_pool_s *tx_desc_pool = &((soc)->tx_desc[(pool_id)]);
792 
793 	return tx_desc_pool->desc_pages.cacheable_pages[page_id] +
794 		tx_desc_pool->elem_size * offset;
795 }
796 
797 /**
798  * dp_tx_ext_desc_alloc() - Get tx extension descriptor from pool
799  * @soc: handle for the device sending the data
800  * @pool_id: target pool id
801  *
802  * Return: None
803  */
804 static inline
805 struct dp_tx_ext_desc_elem_s *dp_tx_ext_desc_alloc(struct dp_soc *soc,
806 		uint8_t desc_pool_id)
807 {
808 	struct dp_tx_ext_desc_elem_s *c_elem;
809 
810 	qdf_spin_lock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
811 	if (soc->tx_ext_desc[desc_pool_id].num_free <= 0) {
812 		qdf_spin_unlock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
813 		return NULL;
814 	}
815 	c_elem = soc->tx_ext_desc[desc_pool_id].freelist;
816 	soc->tx_ext_desc[desc_pool_id].freelist =
817 		soc->tx_ext_desc[desc_pool_id].freelist->next;
818 	soc->tx_ext_desc[desc_pool_id].num_free--;
819 	qdf_spin_unlock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
820 	return c_elem;
821 }
822 
823 /**
824  * dp_tx_ext_desc_free() - Release tx extension descriptor to the pool
825  * @soc: handle for the device sending the data
826  * @pool_id: target pool id
827  * @elem: ext descriptor pointer should release
828  *
829  * Return: None
830  */
831 static inline void dp_tx_ext_desc_free(struct dp_soc *soc,
832 	struct dp_tx_ext_desc_elem_s *elem, uint8_t desc_pool_id)
833 {
834 	qdf_spin_lock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
835 	elem->next = soc->tx_ext_desc[desc_pool_id].freelist;
836 	soc->tx_ext_desc[desc_pool_id].freelist = elem;
837 	soc->tx_ext_desc[desc_pool_id].num_free++;
838 	qdf_spin_unlock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
839 	return;
840 }
841 
842 /**
843  * dp_tx_ext_desc_free_multiple() - Fee multiple tx extension descriptor and
844  *                           attach it to free list
845  * @soc: Handle to DP SoC structure
846  * @desc_pool_id: pool id should pick up
847  * @elem: tx descriptor should be freed
848  * @num_free: number of descriptors should be freed
849  *
850  * Return: none
851  */
852 static inline void dp_tx_ext_desc_free_multiple(struct dp_soc *soc,
853 		struct dp_tx_ext_desc_elem_s *elem, uint8_t desc_pool_id,
854 		uint8_t num_free)
855 {
856 	struct dp_tx_ext_desc_elem_s *head, *tail, *c_elem;
857 	uint8_t freed = num_free;
858 
859 	/* caller should always guarantee atleast list of num_free nodes */
860 	qdf_assert_always(elem);
861 
862 	head = elem;
863 	c_elem = head;
864 	tail = head;
865 	while (c_elem && freed) {
866 		tail = c_elem;
867 		c_elem = c_elem->next;
868 		freed--;
869 	}
870 
871 	/* caller should always guarantee atleast list of num_free nodes */
872 	qdf_assert_always(tail);
873 
874 	qdf_spin_lock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
875 	tail->next = soc->tx_ext_desc[desc_pool_id].freelist;
876 	soc->tx_ext_desc[desc_pool_id].freelist = head;
877 	soc->tx_ext_desc[desc_pool_id].num_free += num_free;
878 	qdf_spin_unlock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
879 
880 	return;
881 }
882 
883 #if defined(FEATURE_TSO)
884 /**
885  * dp_tx_tso_desc_alloc() - function to allocate a TSO segment
886  * @soc: device soc instance
887  * @pool_id: pool id should pick up tso descriptor
888  *
889  * Allocates a TSO segment element from the free list held in
890  * the soc
891  *
892  * Return: tso_seg, tso segment memory pointer
893  */
894 static inline struct qdf_tso_seg_elem_t *dp_tx_tso_desc_alloc(
895 		struct dp_soc *soc, uint8_t pool_id)
896 {
897 	struct qdf_tso_seg_elem_t *tso_seg = NULL;
898 
899 	qdf_spin_lock_bh(&soc->tx_tso_desc[pool_id].lock);
900 	if (soc->tx_tso_desc[pool_id].freelist) {
901 		soc->tx_tso_desc[pool_id].num_free--;
902 		tso_seg = soc->tx_tso_desc[pool_id].freelist;
903 		soc->tx_tso_desc[pool_id].freelist =
904 			soc->tx_tso_desc[pool_id].freelist->next;
905 	}
906 	qdf_spin_unlock_bh(&soc->tx_tso_desc[pool_id].lock);
907 
908 	return tso_seg;
909 }
910 
911 /**
912  * dp_tx_tso_desc_free() - function to free a TSO segment
913  * @soc: device soc instance
914  * @pool_id: pool id should pick up tso descriptor
915  * @tso_seg: tso segment memory pointer
916  *
917  * Returns a TSO segment element to the free list held in the
918  * HTT pdev
919  *
920  * Return: none
921  */
922 static inline void dp_tx_tso_desc_free(struct dp_soc *soc,
923 		uint8_t pool_id, struct qdf_tso_seg_elem_t *tso_seg)
924 {
925 	qdf_spin_lock_bh(&soc->tx_tso_desc[pool_id].lock);
926 	tso_seg->next = soc->tx_tso_desc[pool_id].freelist;
927 	soc->tx_tso_desc[pool_id].freelist = tso_seg;
928 	soc->tx_tso_desc[pool_id].num_free++;
929 	qdf_spin_unlock_bh(&soc->tx_tso_desc[pool_id].lock);
930 }
931 
932 static inline
933 struct qdf_tso_num_seg_elem_t  *dp_tso_num_seg_alloc(struct dp_soc *soc,
934 		uint8_t pool_id)
935 {
936 	struct qdf_tso_num_seg_elem_t *tso_num_seg = NULL;
937 
938 	qdf_spin_lock_bh(&soc->tx_tso_num_seg[pool_id].lock);
939 	if (soc->tx_tso_num_seg[pool_id].freelist) {
940 		soc->tx_tso_num_seg[pool_id].num_free--;
941 		tso_num_seg = soc->tx_tso_num_seg[pool_id].freelist;
942 		soc->tx_tso_num_seg[pool_id].freelist =
943 			soc->tx_tso_num_seg[pool_id].freelist->next;
944 	}
945 	qdf_spin_unlock_bh(&soc->tx_tso_num_seg[pool_id].lock);
946 
947 	return tso_num_seg;
948 }
949 
950 static inline
951 void dp_tso_num_seg_free(struct dp_soc *soc,
952 		uint8_t pool_id, struct qdf_tso_num_seg_elem_t *tso_num_seg)
953 {
954 	qdf_spin_lock_bh(&soc->tx_tso_num_seg[pool_id].lock);
955 	tso_num_seg->next = soc->tx_tso_num_seg[pool_id].freelist;
956 	soc->tx_tso_num_seg[pool_id].freelist = tso_num_seg;
957 	soc->tx_tso_num_seg[pool_id].num_free++;
958 	qdf_spin_unlock_bh(&soc->tx_tso_num_seg[pool_id].lock);
959 }
960 #endif
961 
962 /*
963  * dp_tx_me_alloc_buf() Alloc descriptor from me pool
964  * @pdev DP_PDEV handle for datapath
965  *
966  * Return:dp_tx_me_buf_t(buf)
967  */
968 static inline struct dp_tx_me_buf_t*
969 dp_tx_me_alloc_buf(struct dp_pdev *pdev)
970 {
971 	struct dp_tx_me_buf_t *buf = NULL;
972 	qdf_spin_lock_bh(&pdev->tx_mutex);
973 	if (pdev->me_buf.freelist) {
974 		buf = pdev->me_buf.freelist;
975 		pdev->me_buf.freelist = pdev->me_buf.freelist->next;
976 		pdev->me_buf.buf_in_use++;
977 	} else {
978 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
979 				"Error allocating memory in pool");
980 		qdf_spin_unlock_bh(&pdev->tx_mutex);
981 		return NULL;
982 	}
983 	qdf_spin_unlock_bh(&pdev->tx_mutex);
984 	return buf;
985 }
986 
987 /*
988  * dp_tx_me_free_buf() - Free me descriptor and add it to pool
989  * @pdev: DP_PDEV handle for datapath
990  * @buf : Allocated ME BUF
991  *
992  * Return:void
993  */
994 static inline void
995 dp_tx_me_free_buf(struct dp_pdev *pdev, struct dp_tx_me_buf_t *buf)
996 {
997 	qdf_spin_lock_bh(&pdev->tx_mutex);
998 	buf->next = pdev->me_buf.freelist;
999 	pdev->me_buf.freelist = buf;
1000 	pdev->me_buf.buf_in_use--;
1001 	qdf_spin_unlock_bh(&pdev->tx_mutex);
1002 }
1003 #endif /* DP_TX_DESC_H */
1004