xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_tx_desc.h (revision 97f44cd39e4ff816eaa1710279d28cf6b9e65ad9)
1 /*
2  * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #ifndef DP_TX_DESC_H
20 #define DP_TX_DESC_H
21 
22 #include "dp_types.h"
23 #include "dp_tx.h"
24 #include "dp_internal.h"
25 
26 /**
27  * 21 bits cookie
28  * 2 bits pool id 0 ~ 3,
29  * 10 bits page id 0 ~ 1023
30  * 5 bits offset id 0 ~ 31 (Desc size = 128, Num descs per page = 4096/128 = 32)
31  */
32 /* ???Ring ID needed??? */
33 #define DP_TX_DESC_ID_POOL_MASK    0x018000
34 #define DP_TX_DESC_ID_POOL_OS      15
35 #define DP_TX_DESC_ID_PAGE_MASK    0x007FE0
36 #define DP_TX_DESC_ID_PAGE_OS      5
37 #define DP_TX_DESC_ID_OFFSET_MASK  0x00001F
38 #define DP_TX_DESC_ID_OFFSET_OS    0
39 
40 /**
41  * Compilation assert on tx desc size
42  *
43  * if assert is hit please update POOL_MASK,
44  * PAGE_MASK according to updated size
45  *
46  * for current PAGE mask allowed size range of tx_desc
47  * is between 128 and 256
48  */
49 QDF_COMPILE_TIME_ASSERT(dp_tx_desc_size,
50 			((sizeof(struct dp_tx_desc_s)) <=
51 			 (PAGE_SIZE >> DP_TX_DESC_ID_PAGE_OS)) &&
52 			((sizeof(struct dp_tx_desc_s)) >
53 			 (PAGE_SIZE >> (DP_TX_DESC_ID_PAGE_OS + 1))));
54 
55 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
56 #define TX_DESC_LOCK_CREATE(lock)
57 #define TX_DESC_LOCK_DESTROY(lock)
58 #define TX_DESC_LOCK_LOCK(lock)
59 #define TX_DESC_LOCK_UNLOCK(lock)
60 #define IS_TX_DESC_POOL_STATUS_INACTIVE(pool) \
61 	((pool)->status == FLOW_POOL_INACTIVE)
62 #ifdef QCA_AC_BASED_FLOW_CONTROL
63 #define TX_DESC_POOL_MEMBER_CLEAN(_tx_desc_pool)       \
64 	dp_tx_flow_pool_member_clean(_tx_desc_pool)
65 
66 #else /* !QCA_AC_BASED_FLOW_CONTROL */
67 #define TX_DESC_POOL_MEMBER_CLEAN(_tx_desc_pool)       \
68 do {                                                   \
69 	(_tx_desc_pool)->elem_size = 0;                \
70 	(_tx_desc_pool)->freelist = NULL;              \
71 	(_tx_desc_pool)->pool_size = 0;                \
72 	(_tx_desc_pool)->avail_desc = 0;               \
73 	(_tx_desc_pool)->start_th = 0;                 \
74 	(_tx_desc_pool)->stop_th = 0;                  \
75 	(_tx_desc_pool)->status = FLOW_POOL_INACTIVE;  \
76 } while (0)
77 #endif /* QCA_AC_BASED_FLOW_CONTROL */
78 #else /* !QCA_LL_TX_FLOW_CONTROL_V2 */
79 #define TX_DESC_LOCK_CREATE(lock)  qdf_spinlock_create(lock)
80 #define TX_DESC_LOCK_DESTROY(lock) qdf_spinlock_destroy(lock)
81 #define TX_DESC_LOCK_LOCK(lock)    qdf_spin_lock_bh(lock)
82 #define TX_DESC_LOCK_UNLOCK(lock)  qdf_spin_unlock_bh(lock)
83 #define IS_TX_DESC_POOL_STATUS_INACTIVE(pool) (false)
84 #define TX_DESC_POOL_MEMBER_CLEAN(_tx_desc_pool)       \
85 do {                                                   \
86 	(_tx_desc_pool)->elem_size = 0;                \
87 	(_tx_desc_pool)->num_allocated = 0;            \
88 	(_tx_desc_pool)->freelist = NULL;              \
89 	(_tx_desc_pool)->elem_count = 0;               \
90 	(_tx_desc_pool)->num_free = 0;                 \
91 } while (0)
92 #endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */
93 #define MAX_POOL_BUFF_COUNT 10000
94 
95 QDF_STATUS dp_tx_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
96 				 uint16_t num_elem);
97 QDF_STATUS dp_tx_desc_pool_init(struct dp_soc *soc, uint8_t pool_id,
98 				uint16_t num_elem);
99 void dp_tx_desc_pool_free(struct dp_soc *soc, uint8_t pool_id);
100 void dp_tx_desc_pool_deinit(struct dp_soc *soc, uint8_t pool_id);
101 
102 QDF_STATUS dp_tx_ext_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
103 				     uint16_t num_elem);
104 QDF_STATUS dp_tx_ext_desc_pool_init(struct dp_soc *soc, uint8_t pool_id,
105 				    uint16_t num_elem);
106 void dp_tx_ext_desc_pool_free(struct dp_soc *soc, uint8_t pool_id);
107 void dp_tx_ext_desc_pool_deinit(struct dp_soc *soc, uint8_t pool_id);
108 
109 QDF_STATUS dp_tx_tso_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
110 				     uint16_t num_elem);
111 QDF_STATUS dp_tx_tso_desc_pool_init(struct dp_soc *soc, uint8_t pool_id,
112 				    uint16_t num_elem);
113 void dp_tx_tso_desc_pool_free(struct dp_soc *soc, uint8_t pool_id);
114 void dp_tx_tso_desc_pool_deinit(struct dp_soc *soc, uint8_t pool_id);
115 
116 QDF_STATUS dp_tx_tso_num_seg_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
117 		uint16_t num_elem);
118 QDF_STATUS dp_tx_tso_num_seg_pool_init(struct dp_soc *soc, uint8_t pool_id,
119 				       uint16_t num_elem);
120 void dp_tx_tso_num_seg_pool_free(struct dp_soc *soc, uint8_t pool_id);
121 void dp_tx_tso_num_seg_pool_deinit(struct dp_soc *soc, uint8_t pool_id);
122 
123 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
124 void dp_tx_flow_control_init(struct dp_soc *);
125 void dp_tx_flow_control_deinit(struct dp_soc *);
126 
127 QDF_STATUS dp_txrx_register_pause_cb(struct cdp_soc_t *soc,
128 	tx_pause_callback pause_cb);
129 QDF_STATUS dp_tx_flow_pool_map(struct cdp_soc_t *soc, uint8_t pdev_id,
130 			       uint8_t vdev_id);
131 void dp_tx_flow_pool_unmap(struct cdp_soc_t *handle, uint8_t pdev_id,
132 			   uint8_t vdev_id);
133 void dp_tx_clear_flow_pool_stats(struct dp_soc *soc);
134 struct dp_tx_desc_pool_s *dp_tx_create_flow_pool(struct dp_soc *soc,
135 	uint8_t flow_pool_id, uint16_t flow_pool_size);
136 
137 QDF_STATUS dp_tx_flow_pool_map_handler(struct dp_pdev *pdev, uint8_t flow_id,
138 	uint8_t flow_type, uint8_t flow_pool_id, uint16_t flow_pool_size);
139 void dp_tx_flow_pool_unmap_handler(struct dp_pdev *pdev, uint8_t flow_id,
140 	uint8_t flow_type, uint8_t flow_pool_id);
141 
142 /**
143  * dp_tx_get_desc_flow_pool() - get descriptor from flow pool
144  * @pool: flow pool
145  *
146  * Caller needs to take lock and do sanity checks.
147  *
148  * Return: tx descriptor
149  */
150 static inline
151 struct dp_tx_desc_s *dp_tx_get_desc_flow_pool(struct dp_tx_desc_pool_s *pool)
152 {
153 	struct dp_tx_desc_s *tx_desc = pool->freelist;
154 
155 	pool->freelist = pool->freelist->next;
156 	pool->avail_desc--;
157 	return tx_desc;
158 }
159 
160 /**
161  * ol_tx_put_desc_flow_pool() - put descriptor to flow pool freelist
162  * @pool: flow pool
163  * @tx_desc: tx descriptor
164  *
165  * Caller needs to take lock and do sanity checks.
166  *
167  * Return: none
168  */
169 static inline
170 void dp_tx_put_desc_flow_pool(struct dp_tx_desc_pool_s *pool,
171 			struct dp_tx_desc_s *tx_desc)
172 {
173 	tx_desc->next = pool->freelist;
174 	pool->freelist = tx_desc;
175 	pool->avail_desc++;
176 }
177 
178 #ifdef QCA_AC_BASED_FLOW_CONTROL
179 
180 /**
181  * dp_tx_flow_pool_member_clean() - Clean the members of TX flow pool
182  *
183  * @pool: flow pool
184  *
185  * Return: None
186  */
187 static inline void
188 dp_tx_flow_pool_member_clean(struct dp_tx_desc_pool_s *pool)
189 {
190 	pool->elem_size = 0;
191 	pool->freelist = NULL;
192 	pool->pool_size = 0;
193 	pool->avail_desc = 0;
194 	qdf_mem_zero(pool->start_th, FL_TH_MAX);
195 	qdf_mem_zero(pool->stop_th, FL_TH_MAX);
196 	pool->status = FLOW_POOL_INACTIVE;
197 }
198 
199 /**
200  * dp_tx_is_threshold_reached() - Check if current avail desc meet threshold
201  *
202  * @pool: flow pool
203  * @avail_desc: available descriptor number
204  *
205  * Return: true if threshold is met, false if not
206  */
207 static inline bool
208 dp_tx_is_threshold_reached(struct dp_tx_desc_pool_s *pool, uint16_t avail_desc)
209 {
210 	if (qdf_unlikely(avail_desc == pool->stop_th[DP_TH_BE_BK]))
211 		return true;
212 	else if (qdf_unlikely(avail_desc == pool->stop_th[DP_TH_VI]))
213 		return true;
214 	else if (qdf_unlikely(avail_desc == pool->stop_th[DP_TH_VO]))
215 		return true;
216 	else if (qdf_unlikely(avail_desc == pool->stop_th[DP_TH_HI]))
217 		return true;
218 	else
219 		return false;
220 }
221 
222 /**
223  * dp_tx_desc_alloc() - Allocate a Software Tx descriptor from given pool
224  *
225  * @soc: Handle to DP SoC structure
226  * @desc_pool_id: ID of the flow control fool
227  *
228  * Return: TX descriptor allocated or NULL
229  */
230 static inline struct dp_tx_desc_s *
231 dp_tx_desc_alloc(struct dp_soc *soc, uint8_t desc_pool_id)
232 {
233 	struct dp_tx_desc_s *tx_desc = NULL;
234 	struct dp_tx_desc_pool_s *pool = &soc->tx_desc[desc_pool_id];
235 	bool is_pause = false;
236 	enum netif_action_type act = WLAN_NETIF_ACTION_TYPE_NONE;
237 	enum dp_fl_ctrl_threshold level = DP_TH_BE_BK;
238 
239 	if (qdf_likely(pool)) {
240 		qdf_spin_lock_bh(&pool->flow_pool_lock);
241 		if (qdf_likely(pool->avail_desc)) {
242 			tx_desc = dp_tx_get_desc_flow_pool(pool);
243 			tx_desc->pool_id = desc_pool_id;
244 			tx_desc->flags = DP_TX_DESC_FLAG_ALLOCATED;
245 			is_pause = dp_tx_is_threshold_reached(pool,
246 							      pool->avail_desc);
247 
248 			if (qdf_unlikely(is_pause)) {
249 				switch (pool->status) {
250 				case FLOW_POOL_ACTIVE_UNPAUSED:
251 					/* pause network BE\BK queue */
252 					act = WLAN_NETIF_BE_BK_QUEUE_OFF;
253 					level = DP_TH_BE_BK;
254 					pool->status = FLOW_POOL_BE_BK_PAUSED;
255 					break;
256 				case FLOW_POOL_BE_BK_PAUSED:
257 					/* pause network VI queue */
258 					act = WLAN_NETIF_VI_QUEUE_OFF;
259 					level = DP_TH_VI;
260 					pool->status = FLOW_POOL_VI_PAUSED;
261 					break;
262 				case FLOW_POOL_VI_PAUSED:
263 					/* pause network VO queue */
264 					act = WLAN_NETIF_VO_QUEUE_OFF;
265 					level = DP_TH_VO;
266 					pool->status = FLOW_POOL_VO_PAUSED;
267 					break;
268 				case FLOW_POOL_VO_PAUSED:
269 					/* pause network HI PRI queue */
270 					act = WLAN_NETIF_PRIORITY_QUEUE_OFF;
271 					level = DP_TH_HI;
272 					pool->status = FLOW_POOL_ACTIVE_PAUSED;
273 					break;
274 				case FLOW_POOL_ACTIVE_PAUSED:
275 					act = WLAN_NETIF_ACTION_TYPE_NONE;
276 					break;
277 				default:
278 					dp_err_rl("pool status is %d!",
279 						  pool->status);
280 					break;
281 				}
282 
283 				if (act != WLAN_NETIF_ACTION_TYPE_NONE) {
284 					pool->latest_pause_time[level] =
285 						qdf_get_system_timestamp();
286 					soc->pause_cb(desc_pool_id,
287 						      act,
288 						      WLAN_DATA_FLOW_CONTROL);
289 				}
290 			}
291 		} else {
292 			pool->pkt_drop_no_desc++;
293 		}
294 		qdf_spin_unlock_bh(&pool->flow_pool_lock);
295 	} else {
296 		soc->pool_stats.pkt_drop_no_pool++;
297 	}
298 
299 	return tx_desc;
300 }
301 
302 /**
303  * dp_tx_desc_free() - Fee a tx descriptor and attach it to free list
304  *
305  * @soc: Handle to DP SoC structure
306  * @tx_desc: the tx descriptor to be freed
307  * @desc_pool_id: ID of the flow control fool
308  *
309  * Return: None
310  */
311 static inline void
312 dp_tx_desc_free(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
313 		uint8_t desc_pool_id)
314 {
315 	struct dp_tx_desc_pool_s *pool = &soc->tx_desc[desc_pool_id];
316 	qdf_time_t unpause_time = qdf_get_system_timestamp(), pause_dur;
317 	enum netif_action_type act = WLAN_WAKE_ALL_NETIF_QUEUE;
318 
319 	qdf_spin_lock_bh(&pool->flow_pool_lock);
320 	tx_desc->vdev_id = DP_INVALID_VDEV_ID;
321 	tx_desc->nbuf = NULL;
322 	tx_desc->flags = 0;
323 	dp_tx_put_desc_flow_pool(pool, tx_desc);
324 	switch (pool->status) {
325 	case FLOW_POOL_ACTIVE_PAUSED:
326 		if (pool->avail_desc > pool->start_th[DP_TH_HI]) {
327 			act = WLAN_NETIF_PRIORITY_QUEUE_ON;
328 			pool->status = FLOW_POOL_VO_PAUSED;
329 
330 			/* Update maxinum pause duration for HI queue */
331 			pause_dur = unpause_time -
332 					pool->latest_pause_time[DP_TH_HI];
333 			if (pool->max_pause_time[DP_TH_HI] < pause_dur)
334 				pool->max_pause_time[DP_TH_HI] = pause_dur;
335 		}
336 		break;
337 	case FLOW_POOL_VO_PAUSED:
338 		if (pool->avail_desc > pool->start_th[DP_TH_VO]) {
339 			act = WLAN_NETIF_VO_QUEUE_ON;
340 			pool->status = FLOW_POOL_VI_PAUSED;
341 
342 			/* Update maxinum pause duration for VO queue */
343 			pause_dur = unpause_time -
344 					pool->latest_pause_time[DP_TH_VO];
345 			if (pool->max_pause_time[DP_TH_VO] < pause_dur)
346 				pool->max_pause_time[DP_TH_VO] = pause_dur;
347 		}
348 		break;
349 	case FLOW_POOL_VI_PAUSED:
350 		if (pool->avail_desc > pool->start_th[DP_TH_VI]) {
351 			act = WLAN_NETIF_VI_QUEUE_ON;
352 			pool->status = FLOW_POOL_BE_BK_PAUSED;
353 
354 			/* Update maxinum pause duration for VI queue */
355 			pause_dur = unpause_time -
356 					pool->latest_pause_time[DP_TH_VI];
357 			if (pool->max_pause_time[DP_TH_VI] < pause_dur)
358 				pool->max_pause_time[DP_TH_VI] = pause_dur;
359 		}
360 		break;
361 	case FLOW_POOL_BE_BK_PAUSED:
362 		if (pool->avail_desc > pool->start_th[DP_TH_BE_BK]) {
363 			act = WLAN_WAKE_NON_PRIORITY_QUEUE;
364 			pool->status = FLOW_POOL_ACTIVE_UNPAUSED;
365 
366 			/* Update maxinum pause duration for BE_BK queue */
367 			pause_dur = unpause_time -
368 					pool->latest_pause_time[DP_TH_BE_BK];
369 			if (pool->max_pause_time[DP_TH_BE_BK] < pause_dur)
370 				pool->max_pause_time[DP_TH_BE_BK] = pause_dur;
371 		}
372 		break;
373 	case FLOW_POOL_INVALID:
374 		if (pool->avail_desc == pool->pool_size) {
375 			dp_tx_desc_pool_deinit(soc, desc_pool_id);
376 			dp_tx_desc_pool_free(soc, desc_pool_id);
377 			qdf_spin_unlock_bh(&pool->flow_pool_lock);
378 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
379 				  "%s %d pool is freed!!",
380 				  __func__, __LINE__);
381 			return;
382 		}
383 		break;
384 
385 	case FLOW_POOL_ACTIVE_UNPAUSED:
386 		break;
387 	default:
388 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
389 			  "%s %d pool is INACTIVE State!!",
390 			  __func__, __LINE__);
391 		break;
392 	};
393 
394 	if (act != WLAN_WAKE_ALL_NETIF_QUEUE)
395 		soc->pause_cb(pool->flow_pool_id,
396 			      act, WLAN_DATA_FLOW_CONTROL);
397 	qdf_spin_unlock_bh(&pool->flow_pool_lock);
398 }
399 #else /* QCA_AC_BASED_FLOW_CONTROL */
400 
401 static inline bool
402 dp_tx_is_threshold_reached(struct dp_tx_desc_pool_s *pool, uint16_t avail_desc)
403 {
404 	if (qdf_unlikely(avail_desc < pool->stop_th))
405 		return true;
406 	else
407 		return false;
408 }
409 
410 /**
411  * dp_tx_desc_alloc() - Allocate a Software Tx Descriptor from given pool
412  *
413  * @soc Handle to DP SoC structure
414  * @pool_id
415  *
416  * Return:
417  */
418 static inline struct dp_tx_desc_s *
419 dp_tx_desc_alloc(struct dp_soc *soc, uint8_t desc_pool_id)
420 {
421 	struct dp_tx_desc_s *tx_desc = NULL;
422 	struct dp_tx_desc_pool_s *pool = &soc->tx_desc[desc_pool_id];
423 
424 	if (pool) {
425 		qdf_spin_lock_bh(&pool->flow_pool_lock);
426 		if (pool->status <= FLOW_POOL_ACTIVE_PAUSED &&
427 		    pool->avail_desc) {
428 			tx_desc = dp_tx_get_desc_flow_pool(pool);
429 			tx_desc->pool_id = desc_pool_id;
430 			tx_desc->flags = DP_TX_DESC_FLAG_ALLOCATED;
431 			if (qdf_unlikely(pool->avail_desc < pool->stop_th)) {
432 				pool->status = FLOW_POOL_ACTIVE_PAUSED;
433 				qdf_spin_unlock_bh(&pool->flow_pool_lock);
434 				/* pause network queues */
435 				soc->pause_cb(desc_pool_id,
436 					       WLAN_STOP_ALL_NETIF_QUEUE,
437 					       WLAN_DATA_FLOW_CONTROL);
438 			} else {
439 				qdf_spin_unlock_bh(&pool->flow_pool_lock);
440 			}
441 
442 			/*
443 			 * If one packet is going to be sent, PM usage count
444 			 * needs to be incremented by one to prevent future
445 			 * runtime suspend. This should be tied with the
446 			 * success of allocating one descriptor. It will be
447 			 * decremented after the packet has been sent.
448 			 */
449 			hif_pm_runtime_get_noresume(
450 				soc->hif_handle,
451 				RTPM_ID_DP_TX_DESC_ALLOC_FREE);
452 		} else {
453 			pool->pkt_drop_no_desc++;
454 			qdf_spin_unlock_bh(&pool->flow_pool_lock);
455 		}
456 	} else {
457 		soc->pool_stats.pkt_drop_no_pool++;
458 	}
459 
460 
461 	return tx_desc;
462 }
463 
464 /**
465  * dp_tx_desc_free() - Fee a tx descriptor and attach it to free list
466  *
467  * @soc Handle to DP SoC structure
468  * @pool_id
469  * @tx_desc
470  *
471  * Return: None
472  */
473 static inline void
474 dp_tx_desc_free(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
475 		uint8_t desc_pool_id)
476 {
477 	struct dp_tx_desc_pool_s *pool = &soc->tx_desc[desc_pool_id];
478 
479 	qdf_spin_lock_bh(&pool->flow_pool_lock);
480 	tx_desc->vdev_id = DP_INVALID_VDEV_ID;
481 	tx_desc->nbuf = NULL;
482 	tx_desc->flags = 0;
483 	dp_tx_put_desc_flow_pool(pool, tx_desc);
484 	switch (pool->status) {
485 	case FLOW_POOL_ACTIVE_PAUSED:
486 		if (pool->avail_desc > pool->start_th) {
487 			soc->pause_cb(pool->flow_pool_id,
488 				       WLAN_WAKE_ALL_NETIF_QUEUE,
489 				       WLAN_DATA_FLOW_CONTROL);
490 			pool->status = FLOW_POOL_ACTIVE_UNPAUSED;
491 		}
492 		break;
493 	case FLOW_POOL_INVALID:
494 		if (pool->avail_desc == pool->pool_size) {
495 			dp_tx_desc_pool_deinit(soc, desc_pool_id);
496 			dp_tx_desc_pool_free(soc, desc_pool_id);
497 			qdf_spin_unlock_bh(&pool->flow_pool_lock);
498 			qdf_print("%s %d pool is freed!!",
499 				  __func__, __LINE__);
500 			goto out;
501 		}
502 		break;
503 
504 	case FLOW_POOL_ACTIVE_UNPAUSED:
505 		break;
506 	default:
507 		qdf_print("%s %d pool is INACTIVE State!!",
508 			  __func__, __LINE__);
509 		break;
510 	};
511 
512 	qdf_spin_unlock_bh(&pool->flow_pool_lock);
513 
514 out:
515 	/**
516 	 * Decrement PM usage count if the packet has been sent. This
517 	 * should be tied with the success of freeing one descriptor.
518 	 */
519 	hif_pm_runtime_put(soc->hif_handle,
520 			   RTPM_ID_DP_TX_DESC_ALLOC_FREE);
521 }
522 
523 #endif /* QCA_AC_BASED_FLOW_CONTROL */
524 
525 static inline bool
526 dp_tx_desc_thresh_reached(struct cdp_soc_t *soc_hdl, uint8_t vdev_id)
527 {
528 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
529 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
530 						     DP_MOD_ID_CDP);
531 	struct dp_tx_desc_pool_s *pool;
532 	bool status;
533 
534 	if (!vdev)
535 		return false;
536 
537 	pool = vdev->pool;
538 	status = dp_tx_is_threshold_reached(pool, pool->avail_desc);
539 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
540 
541 	return status;
542 }
543 #else /* QCA_LL_TX_FLOW_CONTROL_V2 */
544 
545 static inline void dp_tx_flow_control_init(struct dp_soc *handle)
546 {
547 }
548 
549 static inline void dp_tx_flow_control_deinit(struct dp_soc *handle)
550 {
551 }
552 
553 static inline QDF_STATUS dp_tx_flow_pool_map_handler(struct dp_pdev *pdev,
554 	uint8_t flow_id, uint8_t flow_type, uint8_t flow_pool_id,
555 	uint16_t flow_pool_size)
556 {
557 	return QDF_STATUS_SUCCESS;
558 }
559 
560 static inline void dp_tx_flow_pool_unmap_handler(struct dp_pdev *pdev,
561 	uint8_t flow_id, uint8_t flow_type, uint8_t flow_pool_id)
562 {
563 }
564 
565 /**
566  * dp_tx_desc_alloc() - Allocate a Software Tx Descriptor from given pool
567  *
568  * @param soc Handle to DP SoC structure
569  * @param pool_id
570  *
571  * Return:
572  */
573 static inline struct dp_tx_desc_s *dp_tx_desc_alloc(struct dp_soc *soc,
574 						uint8_t desc_pool_id)
575 {
576 	struct dp_tx_desc_s *tx_desc = NULL;
577 	struct dp_tx_desc_pool_s *pool = &soc->tx_desc[desc_pool_id];
578 
579 	TX_DESC_LOCK_LOCK(&pool->lock);
580 
581 	tx_desc = pool->freelist;
582 
583 	/* Pool is exhausted */
584 	if (!tx_desc) {
585 		TX_DESC_LOCK_UNLOCK(&pool->lock);
586 		return NULL;
587 	}
588 
589 	pool->freelist = pool->freelist->next;
590 	pool->num_allocated++;
591 	pool->num_free--;
592 
593 	tx_desc->flags = DP_TX_DESC_FLAG_ALLOCATED;
594 
595 	TX_DESC_LOCK_UNLOCK(&pool->lock);
596 
597 	return tx_desc;
598 }
599 
600 /**
601  * dp_tx_desc_alloc_multiple() - Allocate batch of software Tx Descriptors
602  *                            from given pool
603  * @soc: Handle to DP SoC structure
604  * @pool_id: pool id should pick up
605  * @num_requested: number of required descriptor
606  *
607  * allocate multiple tx descriptor and make a link
608  *
609  * Return: h_desc first descriptor pointer
610  */
611 static inline struct dp_tx_desc_s *dp_tx_desc_alloc_multiple(
612 		struct dp_soc *soc, uint8_t desc_pool_id, uint8_t num_requested)
613 {
614 	struct dp_tx_desc_s *c_desc = NULL, *h_desc = NULL;
615 	uint8_t count;
616 	struct dp_tx_desc_pool_s *pool = &soc->tx_desc[desc_pool_id];
617 
618 	TX_DESC_LOCK_LOCK(&pool->lock);
619 
620 	if ((num_requested == 0) ||
621 			(pool->num_free < num_requested)) {
622 		TX_DESC_LOCK_UNLOCK(&pool->lock);
623 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
624 			"%s, No Free Desc: Available(%d) num_requested(%d)",
625 			__func__, pool->num_free,
626 			num_requested);
627 		return NULL;
628 	}
629 
630 	h_desc = pool->freelist;
631 
632 	/* h_desc should never be NULL since num_free > requested */
633 	qdf_assert_always(h_desc);
634 
635 	c_desc = h_desc;
636 	for (count = 0; count < (num_requested - 1); count++) {
637 		c_desc->flags = DP_TX_DESC_FLAG_ALLOCATED;
638 		c_desc = c_desc->next;
639 	}
640 	pool->num_free -= count;
641 	pool->num_allocated += count;
642 	pool->freelist = c_desc->next;
643 	c_desc->next = NULL;
644 
645 	TX_DESC_LOCK_UNLOCK(&pool->lock);
646 	return h_desc;
647 }
648 
649 /**
650  * dp_tx_desc_free() - Fee a tx descriptor and attach it to free list
651  *
652  * @soc Handle to DP SoC structure
653  * @pool_id
654  * @tx_desc
655  */
656 static inline void
657 dp_tx_desc_free(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
658 		uint8_t desc_pool_id)
659 {
660 	struct dp_tx_desc_pool_s *pool = NULL;
661 	tx_desc->vdev_id = DP_INVALID_VDEV_ID;
662 	tx_desc->nbuf = NULL;
663 	tx_desc->flags = 0;
664 
665 	pool = &soc->tx_desc[desc_pool_id];
666 	TX_DESC_LOCK_LOCK(&pool->lock);
667 	tx_desc->next = pool->freelist;
668 	pool->freelist = tx_desc;
669 	pool->num_allocated--;
670 	pool->num_free++;
671 	TX_DESC_LOCK_UNLOCK(&pool->lock);
672 }
673 
674 #endif /* QCA_LL_TX_FLOW_CONTROL_V2 */
675 
676 #ifdef QCA_DP_TX_DESC_ID_CHECK
677 /**
678  * dp_tx_is_desc_id_valid() - check is the tx desc id valid
679  *
680  * @soc Handle to DP SoC structure
681  * @tx_desc_id
682  *
683  * Return: true or false
684  */
685 static inline bool
686 dp_tx_is_desc_id_valid(struct dp_soc *soc, uint32_t tx_desc_id)
687 {
688 	uint8_t pool_id;
689 	uint16_t page_id, offset;
690 	struct dp_tx_desc_pool_s *pool;
691 
692 	pool_id = (tx_desc_id & DP_TX_DESC_ID_POOL_MASK) >>
693 			DP_TX_DESC_ID_POOL_OS;
694 	/* Pool ID is out of limit */
695 	if (pool_id > wlan_cfg_get_num_tx_desc_pool(
696 				soc->wlan_cfg_ctx)) {
697 		QDF_TRACE(QDF_MODULE_ID_DP,
698 			  QDF_TRACE_LEVEL_FATAL,
699 			  "%s:Tx Comp pool id %d not valid",
700 			  __func__,
701 			  pool_id);
702 		goto warn_exit;
703 	}
704 
705 	pool = &soc->tx_desc[pool_id];
706 	/* the pool is freed */
707 	if (IS_TX_DESC_POOL_STATUS_INACTIVE(pool)) {
708 		QDF_TRACE(QDF_MODULE_ID_DP,
709 			  QDF_TRACE_LEVEL_FATAL,
710 			  "%s:the pool %d has been freed",
711 			  __func__,
712 			  pool_id);
713 		goto warn_exit;
714 	}
715 
716 	page_id = (tx_desc_id & DP_TX_DESC_ID_PAGE_MASK) >>
717 				DP_TX_DESC_ID_PAGE_OS;
718 	/* the page id is out of limit */
719 	if (page_id >= pool->desc_pages.num_pages) {
720 		QDF_TRACE(QDF_MODULE_ID_DP,
721 			  QDF_TRACE_LEVEL_FATAL,
722 			  "%s:the page id %d invalid, pool id %d, num_page %d",
723 			  __func__,
724 			  page_id,
725 			  pool_id,
726 			  pool->desc_pages.num_pages);
727 		goto warn_exit;
728 	}
729 
730 	offset = (tx_desc_id & DP_TX_DESC_ID_OFFSET_MASK) >>
731 				DP_TX_DESC_ID_OFFSET_OS;
732 	/* the offset is out of limit */
733 	if (offset >= pool->desc_pages.num_element_per_page) {
734 		QDF_TRACE(QDF_MODULE_ID_DP,
735 			  QDF_TRACE_LEVEL_FATAL,
736 			  "%s:offset %d invalid, pool%d,num_elem_per_page %d",
737 			  __func__,
738 			  offset,
739 			  pool_id,
740 			  pool->desc_pages.num_element_per_page);
741 		goto warn_exit;
742 	}
743 
744 	return true;
745 
746 warn_exit:
747 	QDF_TRACE(QDF_MODULE_ID_DP,
748 		  QDF_TRACE_LEVEL_FATAL,
749 		  "%s:Tx desc id 0x%x not valid",
750 		  __func__,
751 		  tx_desc_id);
752 	qdf_assert_always(0);
753 	return false;
754 }
755 
756 #else
757 static inline bool
758 dp_tx_is_desc_id_valid(struct dp_soc *soc, uint32_t tx_desc_id)
759 {
760 	return true;
761 }
762 #endif /* QCA_DP_TX_DESC_ID_CHECK */
763 
764 #ifdef QCA_DP_TX_DESC_FAST_COMP_ENABLE
765 static inline void dp_tx_desc_update_fast_comp_flag(struct dp_soc *soc,
766 						    struct dp_tx_desc_s *desc,
767 						    uint8_t allow_fast_comp)
768 {
769 	if (qdf_likely(!(desc->flags & DP_TX_DESC_FLAG_TO_FW)) &&
770 	    qdf_likely(allow_fast_comp)) {
771 		desc->flags |= DP_TX_DESC_FLAG_SIMPLE;
772 	}
773 }
774 #else
775 static inline void dp_tx_desc_update_fast_comp_flag(struct dp_soc *soc,
776 						    struct dp_tx_desc_s *desc,
777 						    uint8_t allow_fast_comp)
778 {
779 }
780 #endif /* QCA_DP_TX_DESC_FAST_COMP_ENABLE */
781 
782 /**
783  * dp_tx_desc_find() - find dp tx descriptor from cokie
784  * @soc - handle for the device sending the data
785  * @tx_desc_id - the ID of the descriptor in question
786  * @return the descriptor object that has the specified ID
787  *
788  *  Use a tx descriptor ID to find the corresponding descriptor object.
789  *
790  */
791 static inline struct dp_tx_desc_s *dp_tx_desc_find(struct dp_soc *soc,
792 		uint8_t pool_id, uint16_t page_id, uint16_t offset)
793 {
794 	struct dp_tx_desc_pool_s *tx_desc_pool = &((soc)->tx_desc[(pool_id)]);
795 
796 	return tx_desc_pool->desc_pages.cacheable_pages[page_id] +
797 		tx_desc_pool->elem_size * offset;
798 }
799 
800 /**
801  * dp_tx_ext_desc_alloc() - Get tx extension descriptor from pool
802  * @soc: handle for the device sending the data
803  * @pool_id: target pool id
804  *
805  * Return: None
806  */
807 static inline
808 struct dp_tx_ext_desc_elem_s *dp_tx_ext_desc_alloc(struct dp_soc *soc,
809 		uint8_t desc_pool_id)
810 {
811 	struct dp_tx_ext_desc_elem_s *c_elem;
812 
813 	qdf_spin_lock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
814 	if (soc->tx_ext_desc[desc_pool_id].num_free <= 0) {
815 		qdf_spin_unlock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
816 		return NULL;
817 	}
818 	c_elem = soc->tx_ext_desc[desc_pool_id].freelist;
819 	soc->tx_ext_desc[desc_pool_id].freelist =
820 		soc->tx_ext_desc[desc_pool_id].freelist->next;
821 	soc->tx_ext_desc[desc_pool_id].num_free--;
822 	qdf_spin_unlock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
823 	return c_elem;
824 }
825 
826 /**
827  * dp_tx_ext_desc_free() - Release tx extension descriptor to the pool
828  * @soc: handle for the device sending the data
829  * @pool_id: target pool id
830  * @elem: ext descriptor pointer should release
831  *
832  * Return: None
833  */
834 static inline void dp_tx_ext_desc_free(struct dp_soc *soc,
835 	struct dp_tx_ext_desc_elem_s *elem, uint8_t desc_pool_id)
836 {
837 	qdf_spin_lock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
838 	elem->next = soc->tx_ext_desc[desc_pool_id].freelist;
839 	soc->tx_ext_desc[desc_pool_id].freelist = elem;
840 	soc->tx_ext_desc[desc_pool_id].num_free++;
841 	qdf_spin_unlock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
842 	return;
843 }
844 
845 /**
846  * dp_tx_ext_desc_free_multiple() - Fee multiple tx extension descriptor and
847  *                           attach it to free list
848  * @soc: Handle to DP SoC structure
849  * @desc_pool_id: pool id should pick up
850  * @elem: tx descriptor should be freed
851  * @num_free: number of descriptors should be freed
852  *
853  * Return: none
854  */
855 static inline void dp_tx_ext_desc_free_multiple(struct dp_soc *soc,
856 		struct dp_tx_ext_desc_elem_s *elem, uint8_t desc_pool_id,
857 		uint8_t num_free)
858 {
859 	struct dp_tx_ext_desc_elem_s *head, *tail, *c_elem;
860 	uint8_t freed = num_free;
861 
862 	/* caller should always guarantee atleast list of num_free nodes */
863 	qdf_assert_always(elem);
864 
865 	head = elem;
866 	c_elem = head;
867 	tail = head;
868 	while (c_elem && freed) {
869 		tail = c_elem;
870 		c_elem = c_elem->next;
871 		freed--;
872 	}
873 
874 	/* caller should always guarantee atleast list of num_free nodes */
875 	qdf_assert_always(tail);
876 
877 	qdf_spin_lock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
878 	tail->next = soc->tx_ext_desc[desc_pool_id].freelist;
879 	soc->tx_ext_desc[desc_pool_id].freelist = head;
880 	soc->tx_ext_desc[desc_pool_id].num_free += num_free;
881 	qdf_spin_unlock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
882 
883 	return;
884 }
885 
886 #if defined(FEATURE_TSO)
887 /**
888  * dp_tx_tso_desc_alloc() - function to allocate a TSO segment
889  * @soc: device soc instance
890  * @pool_id: pool id should pick up tso descriptor
891  *
892  * Allocates a TSO segment element from the free list held in
893  * the soc
894  *
895  * Return: tso_seg, tso segment memory pointer
896  */
897 static inline struct qdf_tso_seg_elem_t *dp_tx_tso_desc_alloc(
898 		struct dp_soc *soc, uint8_t pool_id)
899 {
900 	struct qdf_tso_seg_elem_t *tso_seg = NULL;
901 
902 	qdf_spin_lock_bh(&soc->tx_tso_desc[pool_id].lock);
903 	if (soc->tx_tso_desc[pool_id].freelist) {
904 		soc->tx_tso_desc[pool_id].num_free--;
905 		tso_seg = soc->tx_tso_desc[pool_id].freelist;
906 		soc->tx_tso_desc[pool_id].freelist =
907 			soc->tx_tso_desc[pool_id].freelist->next;
908 	}
909 	qdf_spin_unlock_bh(&soc->tx_tso_desc[pool_id].lock);
910 
911 	return tso_seg;
912 }
913 
914 /**
915  * dp_tx_tso_desc_free() - function to free a TSO segment
916  * @soc: device soc instance
917  * @pool_id: pool id should pick up tso descriptor
918  * @tso_seg: tso segment memory pointer
919  *
920  * Returns a TSO segment element to the free list held in the
921  * HTT pdev
922  *
923  * Return: none
924  */
925 static inline void dp_tx_tso_desc_free(struct dp_soc *soc,
926 		uint8_t pool_id, struct qdf_tso_seg_elem_t *tso_seg)
927 {
928 	qdf_spin_lock_bh(&soc->tx_tso_desc[pool_id].lock);
929 	tso_seg->next = soc->tx_tso_desc[pool_id].freelist;
930 	soc->tx_tso_desc[pool_id].freelist = tso_seg;
931 	soc->tx_tso_desc[pool_id].num_free++;
932 	qdf_spin_unlock_bh(&soc->tx_tso_desc[pool_id].lock);
933 }
934 
935 static inline
936 struct qdf_tso_num_seg_elem_t  *dp_tso_num_seg_alloc(struct dp_soc *soc,
937 		uint8_t pool_id)
938 {
939 	struct qdf_tso_num_seg_elem_t *tso_num_seg = NULL;
940 
941 	qdf_spin_lock_bh(&soc->tx_tso_num_seg[pool_id].lock);
942 	if (soc->tx_tso_num_seg[pool_id].freelist) {
943 		soc->tx_tso_num_seg[pool_id].num_free--;
944 		tso_num_seg = soc->tx_tso_num_seg[pool_id].freelist;
945 		soc->tx_tso_num_seg[pool_id].freelist =
946 			soc->tx_tso_num_seg[pool_id].freelist->next;
947 	}
948 	qdf_spin_unlock_bh(&soc->tx_tso_num_seg[pool_id].lock);
949 
950 	return tso_num_seg;
951 }
952 
953 static inline
954 void dp_tso_num_seg_free(struct dp_soc *soc,
955 		uint8_t pool_id, struct qdf_tso_num_seg_elem_t *tso_num_seg)
956 {
957 	qdf_spin_lock_bh(&soc->tx_tso_num_seg[pool_id].lock);
958 	tso_num_seg->next = soc->tx_tso_num_seg[pool_id].freelist;
959 	soc->tx_tso_num_seg[pool_id].freelist = tso_num_seg;
960 	soc->tx_tso_num_seg[pool_id].num_free++;
961 	qdf_spin_unlock_bh(&soc->tx_tso_num_seg[pool_id].lock);
962 }
963 #endif
964 
965 /*
966  * dp_tx_me_alloc_buf() Alloc descriptor from me pool
967  * @pdev DP_PDEV handle for datapath
968  *
969  * Return:dp_tx_me_buf_t(buf)
970  */
971 static inline struct dp_tx_me_buf_t*
972 dp_tx_me_alloc_buf(struct dp_pdev *pdev)
973 {
974 	struct dp_tx_me_buf_t *buf = NULL;
975 	qdf_spin_lock_bh(&pdev->tx_mutex);
976 	if (pdev->me_buf.freelist) {
977 		buf = pdev->me_buf.freelist;
978 		pdev->me_buf.freelist = pdev->me_buf.freelist->next;
979 		pdev->me_buf.buf_in_use++;
980 	} else {
981 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
982 				"Error allocating memory in pool");
983 		qdf_spin_unlock_bh(&pdev->tx_mutex);
984 		return NULL;
985 	}
986 	qdf_spin_unlock_bh(&pdev->tx_mutex);
987 	return buf;
988 }
989 
990 /*
991  * dp_tx_me_free_buf() - Unmap the buffer holding the dest
992  * address, free me descriptor and add it to the free-pool
993  * @pdev: DP_PDEV handle for datapath
994  * @buf : Allocated ME BUF
995  *
996  * Return:void
997  */
998 static inline void
999 dp_tx_me_free_buf(struct dp_pdev *pdev, struct dp_tx_me_buf_t *buf)
1000 {
1001 	/*
1002 	 * If the buf containing mac address was mapped,
1003 	 * it must be unmapped before freeing the me_buf.
1004 	 * The "paddr_macbuf" member in the me_buf structure
1005 	 * holds the mapped physical address and it must be
1006 	 * set to 0 after unmapping.
1007 	 */
1008 	if (buf->paddr_macbuf) {
1009 		qdf_mem_unmap_nbytes_single(pdev->soc->osdev,
1010 					    buf->paddr_macbuf,
1011 					    QDF_DMA_TO_DEVICE,
1012 					    QDF_MAC_ADDR_SIZE);
1013 		buf->paddr_macbuf = 0;
1014 	}
1015 	qdf_spin_lock_bh(&pdev->tx_mutex);
1016 	buf->next = pdev->me_buf.freelist;
1017 	pdev->me_buf.freelist = buf;
1018 	pdev->me_buf.buf_in_use--;
1019 	qdf_spin_unlock_bh(&pdev->tx_mutex);
1020 }
1021 #endif /* DP_TX_DESC_H */
1022