xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_tx_desc.h (revision 302a1d9701784af5f4797b1a9fe07ae820b51907)
1 /*
2  * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #ifndef DP_TX_DESC_H
20 #define DP_TX_DESC_H
21 
22 #include "dp_types.h"
23 #include "dp_tx.h"
24 #include "dp_internal.h"
25 
26 #ifdef TX_PER_PDEV_DESC_POOL
27 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
28 #define DP_TX_GET_DESC_POOL_ID(vdev) (vdev->vdev_id)
29 #else /* QCA_LL_TX_FLOW_CONTROL_V2 */
30 #define DP_TX_GET_DESC_POOL_ID(vdev) (vdev->pdev->pdev_id)
31 #endif /* QCA_LL_TX_FLOW_CONTROL_V2 */
32 	#define DP_TX_GET_RING_ID(vdev) (vdev->pdev->pdev_id)
33 #else
34 	#ifdef TX_PER_VDEV_DESC_POOL
35 		#define DP_TX_GET_DESC_POOL_ID(vdev) (vdev->vdev_id)
36 		#define DP_TX_GET_RING_ID(vdev) (vdev->pdev->pdev_id)
37 	#endif /* TX_PER_VDEV_DESC_POOL */
38 #endif /* TX_PER_PDEV_DESC_POOL */
39 
40 /**
41  * 21 bits cookie
42  * 2 bits pool id 0 ~ 3,
43  * 10 bits page id 0 ~ 1023
44  * 5 bits offset id 0 ~ 31 (Desc size = 128, Num descs per page = 4096/128 = 32)
45  */
46 /* ???Ring ID needed??? */
47 #define DP_TX_DESC_ID_POOL_MASK    0x018000
48 #define DP_TX_DESC_ID_POOL_OS      15
49 #define DP_TX_DESC_ID_PAGE_MASK    0x007FE0
50 #define DP_TX_DESC_ID_PAGE_OS      5
51 #define DP_TX_DESC_ID_OFFSET_MASK  0x00001F
52 #define DP_TX_DESC_ID_OFFSET_OS    0
53 
54 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
55 #define TX_DESC_LOCK_CREATE(lock)
56 #define TX_DESC_LOCK_DESTROY(lock)
57 #define TX_DESC_LOCK_LOCK(lock)
58 #define TX_DESC_LOCK_UNLOCK(lock)
59 #define IS_TX_DESC_POOL_STATUS_INACTIVE(pool) \
60 	((pool)->status == FLOW_POOL_INACTIVE)
61 #define TX_DESC_POOL_MEMBER_CLEAN(_tx_desc_pool)       \
62 do {                                                   \
63 	(_tx_desc_pool)->elem_size = 0;                \
64 	(_tx_desc_pool)->freelist = NULL;              \
65 	(_tx_desc_pool)->pool_size = 0;                \
66 	(_tx_desc_pool)->avail_desc = 0;               \
67 	(_tx_desc_pool)->start_th = 0;                 \
68 	(_tx_desc_pool)->stop_th = 0;                  \
69 	(_tx_desc_pool)->status = FLOW_POOL_INACTIVE;  \
70 } while (0)
71 #else /* !QCA_LL_TX_FLOW_CONTROL_V2 */
72 #define TX_DESC_LOCK_CREATE(lock)  qdf_spinlock_create(lock)
73 #define TX_DESC_LOCK_DESTROY(lock) qdf_spinlock_destroy(lock)
74 #define TX_DESC_LOCK_LOCK(lock)    qdf_spin_lock_bh(lock)
75 #define TX_DESC_LOCK_UNLOCK(lock)  qdf_spin_unlock_bh(lock)
76 #define IS_TX_DESC_POOL_STATUS_INACTIVE(pool) (false)
77 #define TX_DESC_POOL_MEMBER_CLEAN(_tx_desc_pool)       \
78 do {                                                   \
79 	(_tx_desc_pool)->elem_size = 0;                \
80 	(_tx_desc_pool)->num_allocated = 0;            \
81 	(_tx_desc_pool)->freelist = NULL;              \
82 	(_tx_desc_pool)->elem_count = 0;               \
83 	(_tx_desc_pool)->num_free = 0;                 \
84 } while (0)
85 #endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */
86 #define MAX_POOL_BUFF_COUNT 10000
87 
88 QDF_STATUS dp_tx_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
89 		uint16_t num_elem);
90 QDF_STATUS dp_tx_desc_pool_free(struct dp_soc *soc, uint8_t pool_id);
91 QDF_STATUS dp_tx_ext_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
92 		uint16_t num_elem);
93 QDF_STATUS dp_tx_ext_desc_pool_free(struct dp_soc *soc, uint8_t pool_id);
94 QDF_STATUS dp_tx_tso_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
95 		uint16_t num_elem);
96 void dp_tx_tso_desc_pool_free(struct dp_soc *soc, uint8_t pool_id);
97 QDF_STATUS dp_tx_tso_num_seg_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
98 		uint16_t num_elem);
99 void dp_tx_tso_num_seg_pool_free(struct dp_soc *soc, uint8_t pool_id);
100 
101 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
102 void dp_tx_flow_control_init(struct dp_soc *);
103 void dp_tx_flow_control_deinit(struct dp_soc *);
104 
105 QDF_STATUS dp_txrx_register_pause_cb(struct cdp_soc_t *soc,
106 	tx_pause_callback pause_cb);
107 QDF_STATUS dp_tx_flow_pool_map(struct cdp_soc_t *soc, struct cdp_pdev *pdev,
108 				uint8_t vdev_id);
109 void dp_tx_flow_pool_unmap(struct cdp_soc_t *soc, struct cdp_pdev *pdev,
110 			   uint8_t vdev_id);
111 void dp_tx_clear_flow_pool_stats(struct dp_soc *soc);
112 struct dp_tx_desc_pool_s *dp_tx_create_flow_pool(struct dp_soc *soc,
113 	uint8_t flow_pool_id, uint16_t flow_pool_size);
114 
115 QDF_STATUS dp_tx_flow_pool_map_handler(struct dp_pdev *pdev, uint8_t flow_id,
116 	uint8_t flow_type, uint8_t flow_pool_id, uint16_t flow_pool_size);
117 void dp_tx_flow_pool_unmap_handler(struct dp_pdev *pdev, uint8_t flow_id,
118 	uint8_t flow_type, uint8_t flow_pool_id);
119 
120 /**
121  * dp_tx_get_desc_flow_pool() - get descriptor from flow pool
122  * @pool: flow pool
123  *
124  * Caller needs to take lock and do sanity checks.
125  *
126  * Return: tx descriptor
127  */
128 static inline
129 struct dp_tx_desc_s *dp_tx_get_desc_flow_pool(struct dp_tx_desc_pool_s *pool)
130 {
131 	struct dp_tx_desc_s *tx_desc = pool->freelist;
132 
133 	pool->freelist = pool->freelist->next;
134 	pool->avail_desc--;
135 	return tx_desc;
136 }
137 
138 /**
139  * ol_tx_put_desc_flow_pool() - put descriptor to flow pool freelist
140  * @pool: flow pool
141  * @tx_desc: tx descriptor
142  *
143  * Caller needs to take lock and do sanity checks.
144  *
145  * Return: none
146  */
147 static inline
148 void dp_tx_put_desc_flow_pool(struct dp_tx_desc_pool_s *pool,
149 			struct dp_tx_desc_s *tx_desc)
150 {
151 	tx_desc->next = pool->freelist;
152 	pool->freelist = tx_desc;
153 	pool->avail_desc++;
154 }
155 
156 
157 /**
158  * dp_tx_desc_alloc() - Allocate a Software Tx Descriptor from given pool
159  *
160  * @soc Handle to DP SoC structure
161  * @pool_id
162  *
163  * Return:
164  */
165 static inline struct dp_tx_desc_s *
166 dp_tx_desc_alloc(struct dp_soc *soc, uint8_t desc_pool_id)
167 {
168 	struct dp_tx_desc_s *tx_desc = NULL;
169 	struct dp_tx_desc_pool_s *pool = &soc->tx_desc[desc_pool_id];
170 
171 	if (pool) {
172 		qdf_spin_lock_bh(&pool->flow_pool_lock);
173 		if (pool->status <= FLOW_POOL_ACTIVE_PAUSED &&
174 		    pool->avail_desc) {
175 			tx_desc = dp_tx_get_desc_flow_pool(pool);
176 			tx_desc->pool_id = desc_pool_id;
177 			tx_desc->flags = DP_TX_DESC_FLAG_ALLOCATED;
178 			if (qdf_unlikely(pool->avail_desc < pool->stop_th)) {
179 				pool->status = FLOW_POOL_ACTIVE_PAUSED;
180 				qdf_spin_unlock_bh(&pool->flow_pool_lock);
181 				/* pause network queues */
182 				soc->pause_cb(desc_pool_id,
183 					       WLAN_STOP_ALL_NETIF_QUEUE,
184 					       WLAN_DATA_FLOW_CONTROL);
185 			} else {
186 				qdf_spin_unlock_bh(&pool->flow_pool_lock);
187 			}
188 
189 			/*
190 			 * If one packet is going to be sent, PM usage count
191 			 * needs to be incremented by one to prevent future
192 			 * runtime suspend. This should be tied with the
193 			 * success of allocating one descriptor. It will be
194 			 * decremented after the packet has been sent.
195 			 */
196 			hif_pm_runtime_get_noresume(soc->hif_handle);
197 		} else {
198 			pool->pkt_drop_no_desc++;
199 			qdf_spin_unlock_bh(&pool->flow_pool_lock);
200 		}
201 	} else {
202 		soc->pool_stats.pkt_drop_no_pool++;
203 	}
204 
205 
206 	return tx_desc;
207 }
208 
209 /**
210  * dp_tx_desc_free() - Fee a tx descriptor and attach it to free list
211  *
212  * @soc Handle to DP SoC structure
213  * @pool_id
214  * @tx_desc
215  *
216  * Return: None
217  */
218 static inline void
219 dp_tx_desc_free(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
220 		uint8_t desc_pool_id)
221 {
222 	struct dp_tx_desc_pool_s *pool = &soc->tx_desc[desc_pool_id];
223 
224 	qdf_spin_lock_bh(&pool->flow_pool_lock);
225 	tx_desc->flags = 0;
226 	dp_tx_put_desc_flow_pool(pool, tx_desc);
227 	switch (pool->status) {
228 	case FLOW_POOL_ACTIVE_PAUSED:
229 		if (pool->avail_desc > pool->start_th) {
230 			soc->pause_cb(pool->flow_pool_id,
231 				       WLAN_WAKE_ALL_NETIF_QUEUE,
232 				       WLAN_DATA_FLOW_CONTROL);
233 			pool->status = FLOW_POOL_ACTIVE_UNPAUSED;
234 		}
235 		break;
236 	case FLOW_POOL_INVALID:
237 		if (pool->avail_desc == pool->pool_size) {
238 			dp_tx_desc_pool_free(soc, desc_pool_id);
239 			qdf_spin_unlock_bh(&pool->flow_pool_lock);
240 			qdf_print("%s %d pool is freed!!",
241 				  __func__, __LINE__);
242 			goto out;
243 		}
244 		break;
245 
246 	case FLOW_POOL_ACTIVE_UNPAUSED:
247 		break;
248 	default:
249 		qdf_print("%s %d pool is INACTIVE State!!",
250 			  __func__, __LINE__);
251 		break;
252 	};
253 
254 	qdf_spin_unlock_bh(&pool->flow_pool_lock);
255 
256 out:
257 	/**
258 	 * Decrement PM usage count if the packet has been sent. This
259 	 * should be tied with the success of freeing one descriptor.
260 	 */
261 	hif_pm_runtime_put(soc->hif_handle);
262 }
263 #else /* QCA_LL_TX_FLOW_CONTROL_V2 */
264 
265 static inline void dp_tx_flow_control_init(struct dp_soc *handle)
266 {
267 }
268 
269 static inline void dp_tx_flow_control_deinit(struct dp_soc *handle)
270 {
271 }
272 
273 static inline QDF_STATUS dp_tx_flow_pool_map_handler(struct dp_pdev *pdev,
274 	uint8_t flow_id, uint8_t flow_type, uint8_t flow_pool_id,
275 	uint16_t flow_pool_size)
276 {
277 	return QDF_STATUS_SUCCESS;
278 }
279 
280 static inline void dp_tx_flow_pool_unmap_handler(struct dp_pdev *pdev,
281 	uint8_t flow_id, uint8_t flow_type, uint8_t flow_pool_id)
282 {
283 }
284 
285 /**
286  * dp_tx_desc_alloc() - Allocate a Software Tx Descriptor from given pool
287  *
288  * @param soc Handle to DP SoC structure
289  * @param pool_id
290  *
291  * Return:
292  */
293 static inline struct dp_tx_desc_s *dp_tx_desc_alloc(struct dp_soc *soc,
294 						uint8_t desc_pool_id)
295 {
296 	struct dp_tx_desc_s *tx_desc = NULL;
297 
298 	TX_DESC_LOCK_LOCK(&soc->tx_desc[desc_pool_id].lock);
299 
300 	tx_desc = soc->tx_desc[desc_pool_id].freelist;
301 
302 	/* Pool is exhausted */
303 	if (!tx_desc) {
304 		TX_DESC_LOCK_UNLOCK(&soc->tx_desc[desc_pool_id].lock);
305 		return NULL;
306 	}
307 
308 	soc->tx_desc[desc_pool_id].freelist =
309 		soc->tx_desc[desc_pool_id].freelist->next;
310 	soc->tx_desc[desc_pool_id].num_allocated++;
311 	soc->tx_desc[desc_pool_id].num_free--;
312 
313 	tx_desc->timestamp = qdf_ktime_to_ms(qdf_ktime_get());
314 
315 	tx_desc->flags = DP_TX_DESC_FLAG_ALLOCATED;
316 
317 	TX_DESC_LOCK_UNLOCK(&soc->tx_desc[desc_pool_id].lock);
318 
319 	return tx_desc;
320 }
321 
322 /**
323  * dp_tx_desc_alloc_multiple() - Allocate batch of software Tx Descriptors
324  *                            from given pool
325  * @soc: Handle to DP SoC structure
326  * @pool_id: pool id should pick up
327  * @num_requested: number of required descriptor
328  *
329  * allocate multiple tx descriptor and make a link
330  *
331  * Return: h_desc first descriptor pointer
332  */
333 static inline struct dp_tx_desc_s *dp_tx_desc_alloc_multiple(
334 		struct dp_soc *soc, uint8_t desc_pool_id, uint8_t num_requested)
335 {
336 	struct dp_tx_desc_s *c_desc = NULL, *h_desc = NULL;
337 	uint8_t count;
338 
339 	TX_DESC_LOCK_LOCK(&soc->tx_desc[desc_pool_id].lock);
340 
341 	if ((num_requested == 0) ||
342 			(soc->tx_desc[desc_pool_id].num_free < num_requested)) {
343 		TX_DESC_LOCK_UNLOCK(&soc->tx_desc[desc_pool_id].lock);
344 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
345 			"%s, No Free Desc: Available(%d) num_requested(%d)",
346 			__func__, soc->tx_desc[desc_pool_id].num_free,
347 			num_requested);
348 		return NULL;
349 	}
350 
351 	h_desc = soc->tx_desc[desc_pool_id].freelist;
352 
353 	/* h_desc should never be NULL since num_free > requested */
354 	qdf_assert_always(h_desc);
355 
356 	c_desc = h_desc;
357 	for (count = 0; count < (num_requested - 1); count++) {
358 		c_desc->flags = DP_TX_DESC_FLAG_ALLOCATED;
359 		c_desc = c_desc->next;
360 	}
361 	soc->tx_desc[desc_pool_id].num_free -= count;
362 	soc->tx_desc[desc_pool_id].num_allocated += count;
363 	soc->tx_desc[desc_pool_id].freelist = c_desc->next;
364 	c_desc->next = NULL;
365 
366 	TX_DESC_LOCK_UNLOCK(&soc->tx_desc[desc_pool_id].lock);
367 	return h_desc;
368 }
369 
370 /**
371  * dp_tx_desc_free() - Fee a tx descriptor and attach it to free list
372  *
373  * @soc Handle to DP SoC structure
374  * @pool_id
375  * @tx_desc
376  */
377 static inline void
378 dp_tx_desc_free(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
379 		uint8_t desc_pool_id)
380 {
381 	TX_DESC_LOCK_LOCK(&soc->tx_desc[desc_pool_id].lock);
382 
383 	tx_desc->flags = 0;
384 	tx_desc->next = soc->tx_desc[desc_pool_id].freelist;
385 	soc->tx_desc[desc_pool_id].freelist = tx_desc;
386 	soc->tx_desc[desc_pool_id].num_allocated--;
387 	soc->tx_desc[desc_pool_id].num_free++;
388 
389 
390 	TX_DESC_LOCK_UNLOCK(&soc->tx_desc[desc_pool_id].lock);
391 }
392 #endif /* QCA_LL_TX_FLOW_CONTROL_V2 */
393 
394 #ifdef QCA_DP_TX_DESC_ID_CHECK
395 /**
396  * dp_tx_is_desc_id_valid() - check is the tx desc id valid
397  *
398  * @soc Handle to DP SoC structure
399  * @tx_desc_id
400  *
401  * Return: true or false
402  */
403 static inline bool
404 dp_tx_is_desc_id_valid(struct dp_soc *soc, uint32_t tx_desc_id)
405 {
406 	uint8_t pool_id;
407 	uint16_t page_id, offset;
408 	struct dp_tx_desc_pool_s *pool;
409 
410 	pool_id = (tx_desc_id & DP_TX_DESC_ID_POOL_MASK) >>
411 			DP_TX_DESC_ID_POOL_OS;
412 	/* Pool ID is out of limit */
413 	if (pool_id > wlan_cfg_get_num_tx_desc_pool(
414 				soc->wlan_cfg_ctx)) {
415 		QDF_TRACE(QDF_MODULE_ID_DP,
416 			  QDF_TRACE_LEVEL_FATAL,
417 			  "%s:Tx Comp pool id %d not valid",
418 			  __func__,
419 			  pool_id);
420 		goto warn_exit;
421 	}
422 
423 	pool = &soc->tx_desc[pool_id];
424 	/* the pool is freed */
425 	if (IS_TX_DESC_POOL_STATUS_INACTIVE(pool)) {
426 		QDF_TRACE(QDF_MODULE_ID_DP,
427 			  QDF_TRACE_LEVEL_FATAL,
428 			  "%s:the pool %d has been freed",
429 			  __func__,
430 			  pool_id);
431 		goto warn_exit;
432 	}
433 
434 	page_id = (tx_desc_id & DP_TX_DESC_ID_PAGE_MASK) >>
435 				DP_TX_DESC_ID_PAGE_OS;
436 	/* the page id is out of limit */
437 	if (page_id >= pool->desc_pages.num_pages) {
438 		QDF_TRACE(QDF_MODULE_ID_DP,
439 			  QDF_TRACE_LEVEL_FATAL,
440 			  "%s:the page id %d invalid, pool id %d, num_page %d",
441 			  __func__,
442 			  page_id,
443 			  pool_id,
444 			  pool->desc_pages.num_pages);
445 		goto warn_exit;
446 	}
447 
448 	offset = (tx_desc_id & DP_TX_DESC_ID_OFFSET_MASK) >>
449 				DP_TX_DESC_ID_OFFSET_OS;
450 	/* the offset is out of limit */
451 	if (offset >= pool->desc_pages.num_element_per_page) {
452 		QDF_TRACE(QDF_MODULE_ID_DP,
453 			  QDF_TRACE_LEVEL_FATAL,
454 			  "%s:offset %d invalid, pool%d,num_elem_per_page %d",
455 			  __func__,
456 			  offset,
457 			  pool_id,
458 			  pool->desc_pages.num_element_per_page);
459 		goto warn_exit;
460 	}
461 
462 	return true;
463 
464 warn_exit:
465 	QDF_TRACE(QDF_MODULE_ID_DP,
466 		  QDF_TRACE_LEVEL_FATAL,
467 		  "%s:Tx desc id 0x%x not valid",
468 		  __func__,
469 		  tx_desc_id);
470 	qdf_assert_always(0);
471 	return false;
472 }
473 
474 #else
475 static inline bool
476 dp_tx_is_desc_id_valid(struct dp_soc *soc, uint32_t tx_desc_id)
477 {
478 	return true;
479 }
480 #endif /* QCA_DP_TX_DESC_ID_CHECK */
481 
482 /**
483  * dp_tx_desc_find() - find dp tx descriptor from cokie
484  * @soc - handle for the device sending the data
485  * @tx_desc_id - the ID of the descriptor in question
486  * @return the descriptor object that has the specified ID
487  *
488  *  Use a tx descriptor ID to find the corresponding descriptor object.
489  *
490  */
491 static inline struct dp_tx_desc_s *dp_tx_desc_find(struct dp_soc *soc,
492 		uint8_t pool_id, uint16_t page_id, uint16_t offset)
493 {
494 	struct dp_tx_desc_pool_s *tx_desc_pool = &((soc)->tx_desc[(pool_id)]);
495 
496 	return tx_desc_pool->desc_pages.cacheable_pages[page_id] +
497 		tx_desc_pool->elem_size * offset;
498 }
499 
500 /**
501  * dp_tx_ext_desc_alloc() - Get tx extension descriptor from pool
502  * @soc: handle for the device sending the data
503  * @pool_id: target pool id
504  *
505  * Return: None
506  */
507 static inline
508 struct dp_tx_ext_desc_elem_s *dp_tx_ext_desc_alloc(struct dp_soc *soc,
509 		uint8_t desc_pool_id)
510 {
511 	struct dp_tx_ext_desc_elem_s *c_elem;
512 
513 	qdf_spin_lock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
514 	if (soc->tx_ext_desc[desc_pool_id].num_free <= 0) {
515 		qdf_spin_unlock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
516 		return NULL;
517 	}
518 	c_elem = soc->tx_ext_desc[desc_pool_id].freelist;
519 	soc->tx_ext_desc[desc_pool_id].freelist =
520 		soc->tx_ext_desc[desc_pool_id].freelist->next;
521 	soc->tx_ext_desc[desc_pool_id].num_free--;
522 	qdf_spin_unlock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
523 	return c_elem;
524 }
525 
526 /**
527  * dp_tx_ext_desc_free() - Release tx extension descriptor to the pool
528  * @soc: handle for the device sending the data
529  * @pool_id: target pool id
530  * @elem: ext descriptor pointer should release
531  *
532  * Return: None
533  */
534 static inline void dp_tx_ext_desc_free(struct dp_soc *soc,
535 	struct dp_tx_ext_desc_elem_s *elem, uint8_t desc_pool_id)
536 {
537 	qdf_spin_lock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
538 	elem->next = soc->tx_ext_desc[desc_pool_id].freelist;
539 	soc->tx_ext_desc[desc_pool_id].freelist = elem;
540 	soc->tx_ext_desc[desc_pool_id].num_free++;
541 	qdf_spin_unlock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
542 	return;
543 }
544 
545 /**
546  * dp_tx_ext_desc_free_multiple() - Fee multiple tx extension descriptor and
547  *                           attach it to free list
548  * @soc: Handle to DP SoC structure
549  * @desc_pool_id: pool id should pick up
550  * @elem: tx descriptor should be freed
551  * @num_free: number of descriptors should be freed
552  *
553  * Return: none
554  */
555 static inline void dp_tx_ext_desc_free_multiple(struct dp_soc *soc,
556 		struct dp_tx_ext_desc_elem_s *elem, uint8_t desc_pool_id,
557 		uint8_t num_free)
558 {
559 	struct dp_tx_ext_desc_elem_s *head, *tail, *c_elem;
560 	uint8_t freed = num_free;
561 
562 	/* caller should always guarantee atleast list of num_free nodes */
563 	qdf_assert_always(head);
564 
565 	head = elem;
566 	c_elem = head;
567 	tail = head;
568 	while (c_elem && freed) {
569 		tail = c_elem;
570 		c_elem = c_elem->next;
571 		freed--;
572 	}
573 
574 	/* caller should always guarantee atleast list of num_free nodes */
575 	qdf_assert_always(tail);
576 
577 	qdf_spin_lock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
578 	tail->next = soc->tx_ext_desc[desc_pool_id].freelist;
579 	soc->tx_ext_desc[desc_pool_id].freelist = head;
580 	soc->tx_ext_desc[desc_pool_id].num_free += num_free;
581 	qdf_spin_unlock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
582 
583 	return;
584 }
585 
586 #if defined(FEATURE_TSO)
587 /**
588  * dp_tx_tso_desc_alloc() - function to allocate a TSO segment
589  * @soc: device soc instance
590  * @pool_id: pool id should pick up tso descriptor
591  *
592  * Allocates a TSO segment element from the free list held in
593  * the soc
594  *
595  * Return: tso_seg, tso segment memory pointer
596  */
597 static inline struct qdf_tso_seg_elem_t *dp_tx_tso_desc_alloc(
598 		struct dp_soc *soc, uint8_t pool_id)
599 {
600 	struct qdf_tso_seg_elem_t *tso_seg = NULL;
601 
602 	qdf_spin_lock_bh(&soc->tx_tso_desc[pool_id].lock);
603 	if (soc->tx_tso_desc[pool_id].freelist) {
604 		soc->tx_tso_desc[pool_id].num_free--;
605 		tso_seg = soc->tx_tso_desc[pool_id].freelist;
606 		soc->tx_tso_desc[pool_id].freelist =
607 			soc->tx_tso_desc[pool_id].freelist->next;
608 	}
609 	qdf_spin_unlock_bh(&soc->tx_tso_desc[pool_id].lock);
610 
611 	return tso_seg;
612 }
613 
614 /**
615  * dp_tx_tso_desc_free() - function to free a TSO segment
616  * @soc: device soc instance
617  * @pool_id: pool id should pick up tso descriptor
618  * @tso_seg: tso segment memory pointer
619  *
620  * Returns a TSO segment element to the free list held in the
621  * HTT pdev
622  *
623  * Return: none
624  */
625 static inline void dp_tx_tso_desc_free(struct dp_soc *soc,
626 		uint8_t pool_id, struct qdf_tso_seg_elem_t *tso_seg)
627 {
628 	qdf_spin_lock_bh(&soc->tx_tso_desc[pool_id].lock);
629 	tso_seg->next = soc->tx_tso_desc[pool_id].freelist;
630 	soc->tx_tso_desc[pool_id].freelist = tso_seg;
631 	soc->tx_tso_desc[pool_id].num_free++;
632 	qdf_spin_unlock_bh(&soc->tx_tso_desc[pool_id].lock);
633 }
634 
635 static inline
636 struct qdf_tso_num_seg_elem_t  *dp_tso_num_seg_alloc(struct dp_soc *soc,
637 		uint8_t pool_id)
638 {
639 	struct qdf_tso_num_seg_elem_t *tso_num_seg = NULL;
640 
641 	qdf_spin_lock_bh(&soc->tx_tso_num_seg[pool_id].lock);
642 	if (soc->tx_tso_num_seg[pool_id].freelist) {
643 		soc->tx_tso_num_seg[pool_id].num_free--;
644 		tso_num_seg = soc->tx_tso_num_seg[pool_id].freelist;
645 		soc->tx_tso_num_seg[pool_id].freelist =
646 			soc->tx_tso_num_seg[pool_id].freelist->next;
647 	}
648 	qdf_spin_unlock_bh(&soc->tx_tso_num_seg[pool_id].lock);
649 
650 	return tso_num_seg;
651 }
652 
653 static inline
654 void dp_tso_num_seg_free(struct dp_soc *soc,
655 		uint8_t pool_id, struct qdf_tso_num_seg_elem_t *tso_num_seg)
656 {
657 	qdf_spin_lock_bh(&soc->tx_tso_num_seg[pool_id].lock);
658 	tso_num_seg->next = soc->tx_tso_num_seg[pool_id].freelist;
659 	soc->tx_tso_num_seg[pool_id].freelist = tso_num_seg;
660 	soc->tx_tso_num_seg[pool_id].num_free++;
661 	qdf_spin_unlock_bh(&soc->tx_tso_num_seg[pool_id].lock);
662 }
663 #endif
664 
665 /*
666  * dp_tx_me_alloc_buf() Alloc descriptor from me pool
667  * @pdev DP_PDEV handle for datapath
668  *
669  * Return:dp_tx_me_buf_t(buf)
670  */
671 static inline struct dp_tx_me_buf_t*
672 dp_tx_me_alloc_buf(struct dp_pdev *pdev)
673 {
674 	struct dp_tx_me_buf_t *buf = NULL;
675 	qdf_spin_lock_bh(&pdev->tx_mutex);
676 	if (pdev->me_buf.freelist) {
677 		buf = pdev->me_buf.freelist;
678 		pdev->me_buf.freelist = pdev->me_buf.freelist->next;
679 		pdev->me_buf.buf_in_use++;
680 	} else {
681 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
682 				"Error allocating memory in pool");
683 		qdf_spin_unlock_bh(&pdev->tx_mutex);
684 		return NULL;
685 	}
686 	qdf_spin_unlock_bh(&pdev->tx_mutex);
687 	return buf;
688 }
689 
690 /*
691  * dp_tx_me_free_buf() - Free me descriptor and add it to pool
692  * @pdev: DP_PDEV handle for datapath
693  * @buf : Allocated ME BUF
694  *
695  * Return:void
696  */
697 static inline void
698 dp_tx_me_free_buf(struct dp_pdev *pdev, struct dp_tx_me_buf_t *buf)
699 {
700 	qdf_spin_lock_bh(&pdev->tx_mutex);
701 	buf->next = pdev->me_buf.freelist;
702 	pdev->me_buf.freelist = buf;
703 	pdev->me_buf.buf_in_use--;
704 	qdf_spin_unlock_bh(&pdev->tx_mutex);
705 }
706 #endif /* DP_TX_DESC_H */
707