xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_rx_desc.c (revision 8b3dca18206e1a0461492f082fa6e270b092c035)
1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include "dp_types.h"
21 #include "dp_rx.h"
22 #include "dp_ipa.h"
23 #include <qdf_module.h>
24 
25 #ifdef RX_DESC_MULTI_PAGE_ALLOC
26 A_COMPILE_TIME_ASSERT(cookie_size_check,
27 		      (DP_BLOCKMEM_SIZE /
28 		       sizeof(union dp_rx_desc_list_elem_t))
29 		      <= (1 << DP_RX_DESC_PAGE_ID_SHIFT));
30 
31 /*
32  * dp_rx_desc_pool_is_allocated() - check if memory is allocated for the
33  *					rx descriptor pool
34  *
35  * @rx_desc_pool: rx descriptor pool pointer
36  * Return: QDF_STATUS  QDF_STATUS_SUCCESS
37  *		       QDF_STATUS_E_NOMEM
38  */
39 QDF_STATUS dp_rx_desc_pool_is_allocated(struct rx_desc_pool *rx_desc_pool)
40 {
41 	if (!rx_desc_pool->desc_pages.num_pages) {
42 		dp_err("Multi page alloc fail, size=%d, elem=%d",
43 		       rx_desc_pool->elem_size, rx_desc_pool->pool_size);
44 		return QDF_STATUS_E_NOMEM;
45 	}
46 	return QDF_STATUS_SUCCESS;
47 }
48 
49 qdf_export_symbol(dp_rx_desc_pool_is_allocated);
50 
51 /*
52  * dp_rx_desc_pool_alloc() - Allocate a memory pool for software rx
53  *			     descriptors
54  *
55  * @soc: core txrx main context
56  * @num_elem: number of rx descriptors (size of the pool)
57  * @rx_desc_pool: rx descriptor pool pointer
58  *
59  * Return: QDF_STATUS  QDF_STATUS_SUCCESS
60  *		       QDF_STATUS_E_NOMEM
61  *		       QDF_STATUS_E_FAULT
62  */
63 QDF_STATUS dp_rx_desc_pool_alloc(struct dp_soc *soc,
64 				 uint32_t num_elem,
65 				 struct rx_desc_pool *rx_desc_pool)
66 {
67 	uint32_t desc_size;
68 	union dp_rx_desc_list_elem_t *rx_desc_elem;
69 
70 	desc_size = sizeof(*rx_desc_elem);
71 	rx_desc_pool->elem_size = desc_size;
72 	rx_desc_pool->desc_pages.page_size = DP_BLOCKMEM_SIZE;
73 	dp_desc_multi_pages_mem_alloc(soc, rx_desc_pool->desc_type,
74 				      &rx_desc_pool->desc_pages,
75 				      desc_size, num_elem, 0, true);
76 	if (!rx_desc_pool->desc_pages.num_pages) {
77 		qdf_err("Multi page alloc fail,size=%d, elem=%d",
78 			desc_size, num_elem);
79 		return QDF_STATUS_E_NOMEM;
80 	}
81 
82 	if (qdf_mem_multi_page_link(soc->osdev,
83 				    &rx_desc_pool->desc_pages,
84 				    desc_size, num_elem, true)) {
85 		qdf_err("overflow num link,size=%d, elem=%d",
86 			desc_size, num_elem);
87 		goto free_rx_desc_pool;
88 	}
89 	return QDF_STATUS_SUCCESS;
90 
91 free_rx_desc_pool:
92 	dp_rx_desc_pool_free(soc, rx_desc_pool);
93 
94 	return QDF_STATUS_E_FAULT;
95 }
96 
97 qdf_export_symbol(dp_rx_desc_pool_alloc);
98 
99 QDF_STATUS dp_rx_desc_pool_init_generic(struct dp_soc *soc,
100 				  struct rx_desc_pool *rx_desc_pool,
101 				  uint32_t pool_id)
102 {
103 	uint32_t id, page_id, offset, num_desc_per_page;
104 	uint32_t count = 0;
105 	union dp_rx_desc_list_elem_t *rx_desc_elem;
106 
107 	num_desc_per_page = rx_desc_pool->desc_pages.num_element_per_page;
108 
109 	rx_desc_elem = rx_desc_pool->freelist;
110 	while (rx_desc_elem) {
111 		page_id = count / num_desc_per_page;
112 		offset = count % num_desc_per_page;
113 		/*
114 		 * Below cookie size is from REO destination ring
115 		 * reo_destination_ring -> buffer_addr_info -> sw_buffer_cookie
116 		 * cookie size = 21 bits
117 		 * 8 bits - offset
118 		 * 8 bits - page ID
119 		 * 4 bits - pool ID
120 		 */
121 		id = ((pool_id << DP_RX_DESC_POOL_ID_SHIFT) |
122 		      (page_id << DP_RX_DESC_PAGE_ID_SHIFT) |
123 		      offset);
124 		rx_desc_elem->rx_desc.cookie = id;
125 		rx_desc_elem->rx_desc.pool_id = pool_id;
126 		rx_desc_elem->rx_desc.in_use = 0;
127 		rx_desc_elem = rx_desc_elem->next;
128 		count++;
129 	}
130 	return QDF_STATUS_SUCCESS;
131 }
132 
133 /*
134  * dp_rx_desc_pool_init() - Initialize the software RX descriptor pool
135  *			convert the pool of memory into a list of
136  *			rx descriptors and create locks to access this
137  *			list of rx descriptors.
138  *
139  * @soc: core txrx main context
140  * @pool_id: pool_id which is one of 3 mac_ids
141  * @pool_size: size of the rx descriptor pool
142  * @rx_desc_pool: rx descriptor pool pointer
143  */
144 void dp_rx_desc_pool_init(struct dp_soc *soc, uint32_t pool_id,
145 			  uint32_t pool_size, struct rx_desc_pool *rx_desc_pool)
146 {
147 	QDF_STATUS status;
148 
149 	/* Initialize the lock */
150 	qdf_spinlock_create(&rx_desc_pool->lock);
151 
152 	qdf_spin_lock_bh(&rx_desc_pool->lock);
153 	rx_desc_pool->pool_size = pool_size;
154 
155 	rx_desc_pool->freelist = (union dp_rx_desc_list_elem_t *)
156 				  *rx_desc_pool->desc_pages.cacheable_pages;
157 
158 	status = soc->arch_ops.dp_rx_desc_pool_init(soc, rx_desc_pool,
159 						    pool_id);
160 	if (!QDF_IS_STATUS_SUCCESS(status))
161 		dp_err("RX desc pool initialization failed");
162 
163 	qdf_spin_unlock_bh(&rx_desc_pool->lock);
164 }
165 
166 qdf_export_symbol(dp_rx_desc_pool_init);
167 
168 union dp_rx_desc_list_elem_t *dp_rx_desc_find(uint16_t page_id, uint16_t offset,
169 					      struct rx_desc_pool *rx_desc_pool)
170 {
171 	return rx_desc_pool->desc_pages.cacheable_pages[page_id] +
172 		rx_desc_pool->elem_size * offset;
173 }
174 
175 static QDF_STATUS dp_rx_desc_nbuf_collect(struct dp_soc *soc,
176 					  struct rx_desc_pool *rx_desc_pool,
177 					  qdf_nbuf_t *nbuf_unmap_list,
178 					  qdf_nbuf_t *nbuf_free_list)
179 {
180 	uint32_t i, num_desc, page_id, offset, num_desc_per_page;
181 	union dp_rx_desc_list_elem_t *rx_desc_elem;
182 	struct dp_rx_desc *rx_desc;
183 
184 	if (qdf_unlikely(!(rx_desc_pool->desc_pages.cacheable_pages))) {
185 		qdf_err("No pages found on this desc pool");
186 		return QDF_STATUS_E_INVAL;
187 	}
188 	num_desc = rx_desc_pool->pool_size;
189 	num_desc_per_page = rx_desc_pool->desc_pages.num_element_per_page;
190 	for (i = 0; i < num_desc; i++) {
191 		page_id = i / num_desc_per_page;
192 		offset = i % num_desc_per_page;
193 		rx_desc_elem = dp_rx_desc_find(page_id, offset, rx_desc_pool);
194 		rx_desc = &rx_desc_elem->rx_desc;
195 		dp_rx_desc_free_dbg_info(rx_desc);
196 		if (rx_desc->in_use) {
197 			if (!rx_desc->unmapped) {
198 				DP_RX_HEAD_APPEND(*nbuf_unmap_list,
199 						  rx_desc->nbuf);
200 				rx_desc->unmapped = 1;
201 			} else {
202 				DP_RX_HEAD_APPEND(*nbuf_free_list,
203 						  rx_desc->nbuf);
204 			}
205 		}
206 	}
207 	return QDF_STATUS_SUCCESS;
208 }
209 
210 static void dp_rx_desc_nbuf_cleanup(struct dp_soc *soc,
211 				    qdf_nbuf_t nbuf_unmap_list,
212 				    qdf_nbuf_t nbuf_free_list,
213 				    uint16_t buf_size)
214 {
215 	qdf_nbuf_t nbuf = nbuf_unmap_list;
216 	qdf_nbuf_t next;
217 
218 	while (nbuf) {
219 		next = nbuf->next;
220 
221 		if (dp_ipa_handle_rx_buf_smmu_mapping(soc, nbuf, buf_size,
222 					   false, __func__, __LINE__))
223 			dp_info_rl("Unable to unmap nbuf: %pK", nbuf);
224 		qdf_nbuf_unmap_nbytes_single(soc->osdev, nbuf,
225 					     QDF_DMA_BIDIRECTIONAL, buf_size);
226 		dp_rx_nbuf_free(nbuf);
227 		nbuf = next;
228 	}
229 
230 	nbuf = nbuf_free_list;
231 	while (nbuf) {
232 		next = nbuf->next;
233 		dp_rx_nbuf_free(nbuf);
234 		nbuf = next;
235 	}
236 }
237 
238 void dp_rx_desc_nbuf_and_pool_free(struct dp_soc *soc, uint32_t pool_id,
239 				   struct rx_desc_pool *rx_desc_pool)
240 {
241 	qdf_nbuf_t nbuf_unmap_list = NULL;
242 	qdf_nbuf_t nbuf_free_list = NULL;
243 
244 	qdf_spin_lock_bh(&rx_desc_pool->lock);
245 	dp_rx_desc_nbuf_collect(soc, rx_desc_pool,
246 				&nbuf_unmap_list, &nbuf_free_list);
247 	qdf_spin_unlock_bh(&rx_desc_pool->lock);
248 	dp_rx_desc_nbuf_cleanup(soc, nbuf_unmap_list, nbuf_free_list,
249 				rx_desc_pool->buf_size);
250 	qdf_spinlock_destroy(&rx_desc_pool->lock);
251 }
252 
253 void dp_rx_desc_nbuf_free(struct dp_soc *soc,
254 			  struct rx_desc_pool *rx_desc_pool)
255 {
256 	qdf_nbuf_t nbuf_unmap_list = NULL;
257 	qdf_nbuf_t nbuf_free_list = NULL;
258 	qdf_spin_lock_bh(&rx_desc_pool->lock);
259 	dp_rx_desc_nbuf_collect(soc, rx_desc_pool,
260 				&nbuf_unmap_list, &nbuf_free_list);
261 	qdf_spin_unlock_bh(&rx_desc_pool->lock);
262 	dp_rx_desc_nbuf_cleanup(soc, nbuf_unmap_list, nbuf_free_list,
263 				rx_desc_pool->buf_size);
264 }
265 
266 qdf_export_symbol(dp_rx_desc_nbuf_free);
267 
268 void dp_rx_desc_pool_free(struct dp_soc *soc,
269 			  struct rx_desc_pool *rx_desc_pool)
270 {
271 	if (qdf_unlikely(!(rx_desc_pool->desc_pages.cacheable_pages)))
272 		return;
273 
274 	dp_desc_multi_pages_mem_free(soc, rx_desc_pool->desc_type,
275 				     &rx_desc_pool->desc_pages, 0, true);
276 }
277 
278 qdf_export_symbol(dp_rx_desc_pool_free);
279 
280 void dp_rx_desc_pool_deinit(struct dp_soc *soc,
281 			    struct rx_desc_pool *rx_desc_pool,
282 			    uint32_t pool_id)
283 {
284 	qdf_spin_lock_bh(&rx_desc_pool->lock);
285 
286 	rx_desc_pool->freelist = NULL;
287 	rx_desc_pool->pool_size = 0;
288 
289 	/* Deinitialize rx mon desr frag flag */
290 	rx_desc_pool->rx_mon_dest_frag_enable = false;
291 
292 	soc->arch_ops.dp_rx_desc_pool_deinit(soc, rx_desc_pool, pool_id);
293 
294 	qdf_spin_unlock_bh(&rx_desc_pool->lock);
295 	qdf_spinlock_destroy(&rx_desc_pool->lock);
296 }
297 
298 qdf_export_symbol(dp_rx_desc_pool_deinit);
299 #else
300 /*
301  * dp_rx_desc_pool_is_allocated() - check if memory is allocated for the
302  *					rx descriptor pool
303  *
304  * @rx_desc_pool: rx descriptor pool pointer
305  *
306  * Return: QDF_STATUS  QDF_STATUS_SUCCESS
307  *		       QDF_STATUS_E_NOMEM
308  */
309 QDF_STATUS dp_rx_desc_pool_is_allocated(struct rx_desc_pool *rx_desc_pool)
310 {
311 	if (!rx_desc_pool->array) {
312 		dp_err("nss-wifi<4> skip Rx refil");
313 		return QDF_STATUS_E_NOMEM;
314 	}
315 	return QDF_STATUS_SUCCESS;
316 }
317 
318 qdf_export_symbol(dp_rx_desc_pool_is_allocated);
319 
320 /*
321  * dp_rx_desc_pool_alloc() - Allocate a memory pool for software rx
322  *			     descriptors
323  *
324  * @soc: core txrx main context
325  * @num_elem: number of rx descriptors (size of the pool)
326  * @rx_desc_pool: rx descriptor pool pointer
327  *
328  * Return: QDF_STATUS  QDF_STATUS_SUCCESS
329  *		       QDF_STATUS_E_NOMEM
330  *		       QDF_STATUS_E_FAULT
331  */
332 QDF_STATUS dp_rx_desc_pool_alloc(struct dp_soc *soc,
333 				 uint32_t pool_size,
334 				 struct rx_desc_pool *rx_desc_pool)
335 {
336 	rx_desc_pool->array = qdf_mem_malloc(pool_size *
337 				     sizeof(union dp_rx_desc_list_elem_t));
338 
339 	if (!(rx_desc_pool->array)) {
340 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
341 			  "RX Desc Pool allocation failed");
342 		return QDF_STATUS_E_NOMEM;
343 	}
344 	return QDF_STATUS_SUCCESS;
345 }
346 
347 qdf_export_symbol(dp_rx_desc_pool_alloc);
348 
349 QDF_STATUS dp_rx_desc_pool_init_generic(struct dp_soc *soc,
350 				  struct rx_desc_pool *rx_desc_pool,
351 				  uint32_t pool_id)
352 {
353 	int i;
354 
355 	for (i = 0; i <= rx_desc_pool->pool_size - 1; i++) {
356 		if (i == rx_desc_pool->pool_size - 1)
357 			rx_desc_pool->array[i].next = NULL;
358 		else
359 			rx_desc_pool->array[i].next =
360 				&rx_desc_pool->array[i + 1];
361 		rx_desc_pool->array[i].rx_desc.cookie = i | (pool_id << 18);
362 		rx_desc_pool->array[i].rx_desc.pool_id = pool_id;
363 		rx_desc_pool->array[i].rx_desc.in_use = 0;
364 	}
365 	return QDF_STATUS_SUCCESS;
366 }
367 
368 /*
369  * dp_rx_desc_pool_init() - Initialize the software RX descriptor pool
370  *			convert the pool of memory into a list of
371  *			rx descriptors and create locks to access this
372  *			list of rx descriptors.
373  *
374  * @soc: core txrx main context
375  * @pool_id: pool_id which is one of 3 mac_ids
376  * @pool_size: size of the rx descriptor pool
377  * @rx_desc_pool: rx descriptor pool pointer
378  */
379 void dp_rx_desc_pool_init(struct dp_soc *soc, uint32_t pool_id,
380 			  uint32_t pool_size, struct rx_desc_pool *rx_desc_pool)
381 {
382 	QDF_STATUS status;
383 
384 	/* Initialize the lock */
385 	qdf_spinlock_create(&rx_desc_pool->lock);
386 
387 	qdf_spin_lock_bh(&rx_desc_pool->lock);
388 	rx_desc_pool->pool_size = pool_size;
389 
390 	/* link SW rx descs into a freelist */
391 	rx_desc_pool->freelist = &rx_desc_pool->array[0];
392 	qdf_mem_zero(rx_desc_pool->freelist, rx_desc_pool->pool_size);
393 
394 	status = soc->arch_ops.dp_rx_desc_pool_init(soc, rx_desc_pool,
395 						    pool_id);
396 	if (!QDF_IS_STATUS_SUCCESS(status))
397 		dp_err("RX desc pool initialization failed");
398 
399 	qdf_spin_unlock_bh(&rx_desc_pool->lock);
400 }
401 
402 qdf_export_symbol(dp_rx_desc_pool_init);
403 
404 void dp_rx_desc_nbuf_and_pool_free(struct dp_soc *soc, uint32_t pool_id,
405 				   struct rx_desc_pool *rx_desc_pool)
406 {
407 	qdf_nbuf_t nbuf;
408 	int i;
409 
410 	qdf_spin_lock_bh(&rx_desc_pool->lock);
411 	for (i = 0; i < rx_desc_pool->pool_size; i++) {
412 		if (rx_desc_pool->array[i].rx_desc.in_use) {
413 			nbuf = rx_desc_pool->array[i].rx_desc.nbuf;
414 
415 			if (!(rx_desc_pool->array[i].rx_desc.unmapped)) {
416 				dp_rx_nbuf_unmap_pool(soc, rx_desc_pool, nbuf);
417 				rx_desc_pool->array[i].rx_desc.unmapped = 1;
418 			}
419 			dp_rx_nbuf_free(nbuf);
420 		}
421 	}
422 	qdf_mem_free(rx_desc_pool->array);
423 	qdf_spin_unlock_bh(&rx_desc_pool->lock);
424 	qdf_spinlock_destroy(&rx_desc_pool->lock);
425 }
426 
427 void dp_rx_desc_nbuf_free(struct dp_soc *soc,
428 			  struct rx_desc_pool *rx_desc_pool)
429 {
430 	qdf_nbuf_t nbuf;
431 	int i;
432 
433 	qdf_spin_lock_bh(&rx_desc_pool->lock);
434 	for (i = 0; i < rx_desc_pool->pool_size; i++) {
435 		dp_rx_desc_free_dbg_info(&rx_desc_pool->array[i].rx_desc);
436 		if (rx_desc_pool->array[i].rx_desc.in_use) {
437 			nbuf = rx_desc_pool->array[i].rx_desc.nbuf;
438 
439 			if (!(rx_desc_pool->array[i].rx_desc.unmapped)) {
440 				dp_rx_nbuf_unmap_pool(soc, rx_desc_pool, nbuf);
441 				rx_desc_pool->array[i].rx_desc.unmapped = 1;
442 			}
443 			dp_rx_nbuf_free(nbuf);
444 		}
445 	}
446 	qdf_spin_unlock_bh(&rx_desc_pool->lock);
447 }
448 
449 qdf_export_symbol(dp_rx_desc_nbuf_free);
450 
451 /**
452  * dp_rx_desc_frag_free() - Free desc frag buffer
453  *
454  * @soc: core txrx main context
455  * @rx_desc_pool: rx descriptor pool pointer
456  *
457  * Return: None
458  */
459 #ifdef DP_RX_MON_MEM_FRAG
460 void dp_rx_desc_frag_free(struct dp_soc *soc,
461 			  struct rx_desc_pool *rx_desc_pool)
462 {
463 	qdf_dma_addr_t paddr;
464 	qdf_frag_t vaddr;
465 	int i;
466 
467 	qdf_spin_lock_bh(&rx_desc_pool->lock);
468 	for (i = 0; i < rx_desc_pool->pool_size; i++) {
469 		if (rx_desc_pool->array[i].rx_desc.in_use) {
470 			paddr = rx_desc_pool->array[i].rx_desc.paddr_buf_start;
471 			vaddr = rx_desc_pool->array[i].rx_desc.rx_buf_start;
472 
473 			dp_rx_desc_free_dbg_info(&rx_desc_pool->array[i].rx_desc);
474 			if (!(rx_desc_pool->array[i].rx_desc.unmapped)) {
475 				qdf_mem_unmap_page(soc->osdev, paddr,
476 						   rx_desc_pool->buf_size,
477 						   QDF_DMA_FROM_DEVICE);
478 				rx_desc_pool->array[i].rx_desc.unmapped = 1;
479 			}
480 			qdf_frag_free(vaddr);
481 		}
482 	}
483 	qdf_spin_unlock_bh(&rx_desc_pool->lock);
484 }
485 
486 qdf_export_symbol(dp_rx_desc_frag_free);
487 #endif
488 
489 void dp_rx_desc_pool_free(struct dp_soc *soc,
490 			  struct rx_desc_pool *rx_desc_pool)
491 {
492 	qdf_mem_free(rx_desc_pool->array);
493 }
494 
495 qdf_export_symbol(dp_rx_desc_pool_free);
496 
497 void dp_rx_desc_pool_deinit(struct dp_soc *soc,
498 			    struct rx_desc_pool *rx_desc_pool,
499 			    uint32_t pool_id)
500 {
501 	qdf_spin_lock_bh(&rx_desc_pool->lock);
502 
503 	rx_desc_pool->freelist = NULL;
504 	rx_desc_pool->pool_size = 0;
505 
506 	/* Deinitialize rx mon desr frag flag */
507 	rx_desc_pool->rx_mon_dest_frag_enable = false;
508 
509 	soc->arch_ops.dp_rx_desc_pool_deinit(soc, rx_desc_pool, pool_id);
510 
511 	qdf_spin_unlock_bh(&rx_desc_pool->lock);
512 	qdf_spinlock_destroy(&rx_desc_pool->lock);
513 }
514 
515 qdf_export_symbol(dp_rx_desc_pool_deinit);
516 
517 #endif /* RX_DESC_MULTI_PAGE_ALLOC */
518 
519 void dp_rx_desc_pool_deinit_generic(struct dp_soc *soc,
520 			       struct rx_desc_pool *rx_desc_pool,
521 			       uint32_t pool_id)
522 {
523 }
524 
525 /*
526  * dp_rx_get_free_desc_list() - provide a list of descriptors from
527  *				the free rx desc pool.
528  *
529  * @soc: core txrx main context
530  * @pool_id: pool_id which is one of 3 mac_ids
531  * @rx_desc_pool: rx descriptor pool pointer
532  * @num_descs: number of descs requested from freelist
533  * @desc_list: attach the descs to this list (output parameter)
534  * @tail: attach the point to last desc of free list (output parameter)
535  *
536  * Return: number of descs allocated from free list.
537  */
538 uint16_t dp_rx_get_free_desc_list(struct dp_soc *soc, uint32_t pool_id,
539 				struct rx_desc_pool *rx_desc_pool,
540 				uint16_t num_descs,
541 				union dp_rx_desc_list_elem_t **desc_list,
542 				union dp_rx_desc_list_elem_t **tail)
543 {
544 	uint16_t count;
545 
546 	qdf_spin_lock_bh(&rx_desc_pool->lock);
547 
548 	*desc_list = *tail = rx_desc_pool->freelist;
549 
550 	for (count = 0; count < num_descs; count++) {
551 
552 		if (qdf_unlikely(!rx_desc_pool->freelist)) {
553 			qdf_spin_unlock_bh(&rx_desc_pool->lock);
554 			return count;
555 		}
556 		*tail = rx_desc_pool->freelist;
557 		rx_desc_pool->freelist = rx_desc_pool->freelist->next;
558 	}
559 	(*tail)->next = NULL;
560 	qdf_spin_unlock_bh(&rx_desc_pool->lock);
561 	return count;
562 }
563 
564 qdf_export_symbol(dp_rx_get_free_desc_list);
565 
566 /*
567  * dp_rx_add_desc_list_to_free_list() - append unused desc_list back to
568  *					freelist.
569  *
570  * @soc: core txrx main context
571  * @local_desc_list: local desc list provided by the caller
572  * @tail: attach the point to last desc of local desc list
573  * @pool_id: pool_id which is one of 3 mac_ids
574  * @rx_desc_pool: rx descriptor pool pointer
575  */
576 void dp_rx_add_desc_list_to_free_list(struct dp_soc *soc,
577 				union dp_rx_desc_list_elem_t **local_desc_list,
578 				union dp_rx_desc_list_elem_t **tail,
579 				uint16_t pool_id,
580 				struct rx_desc_pool *rx_desc_pool)
581 {
582 	union dp_rx_desc_list_elem_t *temp_list = NULL;
583 
584 	qdf_spin_lock_bh(&rx_desc_pool->lock);
585 
586 
587 	temp_list = rx_desc_pool->freelist;
588 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
589 	"temp_list: %pK, *local_desc_list: %pK, *tail: %pK (*tail)->next: %pK",
590 	temp_list, *local_desc_list, *tail, (*tail)->next);
591 	rx_desc_pool->freelist = *local_desc_list;
592 	(*tail)->next = temp_list;
593 	*tail = NULL;
594 	*local_desc_list = NULL;
595 
596 	qdf_spin_unlock_bh(&rx_desc_pool->lock);
597 }
598 
599 qdf_export_symbol(dp_rx_add_desc_list_to_free_list);
600