xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_rx_desc.c (revision 2f4b444fb7e689b83a4ab0e7b3b38f0bf4def8e0)
1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include "dp_types.h"
20 #include "dp_rx.h"
21 #include "dp_ipa.h"
22 #include <qdf_module.h>
23 
24 #ifdef RX_DESC_MULTI_PAGE_ALLOC
25 A_COMPILE_TIME_ASSERT(cookie_size_check,
26 		      PAGE_SIZE / sizeof(union dp_rx_desc_list_elem_t) <=
27 		      1 << DP_RX_DESC_PAGE_ID_SHIFT);
28 
29 /*
30  * dp_rx_desc_pool_is_allocated() - check if memory is allocated for the
31  *					rx descriptor pool
32  *
33  * @rx_desc_pool: rx descriptor pool pointer
34  * Return: QDF_STATUS  QDF_STATUS_SUCCESS
35  *		       QDF_STATUS_E_NOMEM
36  */
37 QDF_STATUS dp_rx_desc_pool_is_allocated(struct rx_desc_pool *rx_desc_pool)
38 {
39 	if (!rx_desc_pool->desc_pages.num_pages) {
40 		dp_err("Multi page alloc fail, size=%d, elem=%d",
41 		       rx_desc_pool->elem_size, rx_desc_pool->pool_size);
42 		return QDF_STATUS_E_NOMEM;
43 	}
44 	return QDF_STATUS_SUCCESS;
45 }
46 
47 qdf_export_symbol(dp_rx_desc_pool_is_allocated);
48 
49 /*
50  * dp_rx_desc_pool_alloc() - Allocate a memory pool for software rx
51  *			     descriptors
52  *
53  * @soc: core txrx main context
54  * @num_elem: number of rx descriptors (size of the pool)
55  * @rx_desc_pool: rx descriptor pool pointer
56  *
57  * Return: QDF_STATUS  QDF_STATUS_SUCCESS
58  *		       QDF_STATUS_E_NOMEM
59  *		       QDF_STATUS_E_FAULT
60  */
61 QDF_STATUS dp_rx_desc_pool_alloc(struct dp_soc *soc,
62 				 uint32_t num_elem,
63 				 struct rx_desc_pool *rx_desc_pool)
64 {
65 	uint32_t desc_size;
66 	union dp_rx_desc_list_elem_t *rx_desc_elem;
67 
68 	desc_size = sizeof(*rx_desc_elem);
69 	rx_desc_pool->elem_size = desc_size;
70 
71 	dp_desc_multi_pages_mem_alloc(soc, rx_desc_pool->desc_type,
72 				      &rx_desc_pool->desc_pages,
73 				      desc_size, num_elem, 0, true);
74 	if (!rx_desc_pool->desc_pages.num_pages) {
75 		qdf_err("Multi page alloc fail,size=%d, elem=%d",
76 			desc_size, num_elem);
77 		return QDF_STATUS_E_NOMEM;
78 	}
79 
80 	if (qdf_mem_multi_page_link(soc->osdev,
81 				    &rx_desc_pool->desc_pages,
82 				    desc_size, num_elem, true)) {
83 		qdf_err("overflow num link,size=%d, elem=%d",
84 			desc_size, num_elem);
85 		goto free_rx_desc_pool;
86 	}
87 	return QDF_STATUS_SUCCESS;
88 
89 free_rx_desc_pool:
90 	dp_rx_desc_pool_free(soc, rx_desc_pool);
91 
92 	return QDF_STATUS_E_FAULT;
93 }
94 
95 qdf_export_symbol(dp_rx_desc_pool_alloc);
96 
97 QDF_STATUS dp_rx_desc_pool_init_generic(struct dp_soc *soc,
98 				  struct rx_desc_pool *rx_desc_pool,
99 				  uint32_t pool_id)
100 {
101 	uint32_t id, page_id, offset, num_desc_per_page;
102 	uint32_t count = 0;
103 	union dp_rx_desc_list_elem_t *rx_desc_elem;
104 
105 	num_desc_per_page = rx_desc_pool->desc_pages.num_element_per_page;
106 
107 	rx_desc_elem = rx_desc_pool->freelist;
108 	while (rx_desc_elem) {
109 		page_id = count / num_desc_per_page;
110 		offset = count % num_desc_per_page;
111 		/*
112 		 * Below cookie size is from REO destination ring
113 		 * reo_destination_ring -> buffer_addr_info -> sw_buffer_cookie
114 		 * cookie size = 21 bits
115 		 * 8 bits - offset
116 		 * 8 bits - page ID
117 		 * 4 bits - pool ID
118 		 */
119 		id = ((pool_id << DP_RX_DESC_POOL_ID_SHIFT) |
120 		      (page_id << DP_RX_DESC_PAGE_ID_SHIFT) |
121 		      offset);
122 		rx_desc_elem->rx_desc.cookie = id;
123 		rx_desc_elem->rx_desc.pool_id = pool_id;
124 		rx_desc_elem->rx_desc.in_use = 0;
125 		rx_desc_elem = rx_desc_elem->next;
126 		count++;
127 	}
128 	return QDF_STATUS_SUCCESS;
129 }
130 
131 /*
132  * dp_rx_desc_pool_init() - Initialize the software RX descriptor pool
133  *			convert the pool of memory into a list of
134  *			rx descriptors and create locks to access this
135  *			list of rx descriptors.
136  *
137  * @soc: core txrx main context
138  * @pool_id: pool_id which is one of 3 mac_ids
139  * @pool_size: size of the rx descriptor pool
140  * @rx_desc_pool: rx descriptor pool pointer
141  */
142 void dp_rx_desc_pool_init(struct dp_soc *soc, uint32_t pool_id,
143 			  uint32_t pool_size, struct rx_desc_pool *rx_desc_pool)
144 {
145 	QDF_STATUS status;
146 
147 	/* Initialize the lock */
148 	qdf_spinlock_create(&rx_desc_pool->lock);
149 
150 	qdf_spin_lock_bh(&rx_desc_pool->lock);
151 	rx_desc_pool->pool_size = pool_size;
152 
153 	rx_desc_pool->freelist = (union dp_rx_desc_list_elem_t *)
154 				  *rx_desc_pool->desc_pages.cacheable_pages;
155 
156 	status = soc->arch_ops.dp_rx_desc_pool_init(soc, rx_desc_pool,
157 						    pool_id);
158 	if (!QDF_IS_STATUS_SUCCESS(status))
159 		dp_err("RX desc pool initialization failed");
160 
161 	qdf_spin_unlock_bh(&rx_desc_pool->lock);
162 }
163 
164 qdf_export_symbol(dp_rx_desc_pool_init);
165 
166 union dp_rx_desc_list_elem_t *dp_rx_desc_find(uint16_t page_id, uint16_t offset,
167 					      struct rx_desc_pool *rx_desc_pool)
168 {
169 	return rx_desc_pool->desc_pages.cacheable_pages[page_id] +
170 		rx_desc_pool->elem_size * offset;
171 }
172 
173 static QDF_STATUS dp_rx_desc_nbuf_collect(struct dp_soc *soc,
174 					  struct rx_desc_pool *rx_desc_pool,
175 					  qdf_nbuf_t *nbuf_unmap_list,
176 					  qdf_nbuf_t *nbuf_free_list)
177 {
178 	uint32_t i, num_desc, page_id, offset, num_desc_per_page;
179 	union dp_rx_desc_list_elem_t *rx_desc_elem;
180 	struct dp_rx_desc *rx_desc;
181 
182 	if (qdf_unlikely(!(rx_desc_pool->desc_pages.cacheable_pages))) {
183 		qdf_err("No pages found on this desc pool");
184 		return QDF_STATUS_E_INVAL;
185 	}
186 	num_desc = rx_desc_pool->pool_size;
187 	num_desc_per_page = rx_desc_pool->desc_pages.num_element_per_page;
188 	for (i = 0; i < num_desc; i++) {
189 		page_id = i / num_desc_per_page;
190 		offset = i % num_desc_per_page;
191 		rx_desc_elem = dp_rx_desc_find(page_id, offset, rx_desc_pool);
192 		rx_desc = &rx_desc_elem->rx_desc;
193 		dp_rx_desc_free_dbg_info(rx_desc);
194 		if (rx_desc->in_use) {
195 			if (!rx_desc->unmapped) {
196 				DP_RX_HEAD_APPEND(*nbuf_unmap_list,
197 						  rx_desc->nbuf);
198 				rx_desc->unmapped = 1;
199 			} else {
200 				DP_RX_HEAD_APPEND(*nbuf_free_list,
201 						  rx_desc->nbuf);
202 			}
203 		}
204 	}
205 	return QDF_STATUS_SUCCESS;
206 }
207 
208 static void dp_rx_desc_nbuf_cleanup(struct dp_soc *soc,
209 				    qdf_nbuf_t nbuf_unmap_list,
210 				    qdf_nbuf_t nbuf_free_list,
211 				    uint16_t buf_size)
212 {
213 	qdf_nbuf_t nbuf = nbuf_unmap_list;
214 	qdf_nbuf_t next;
215 
216 	while (nbuf) {
217 		next = nbuf->next;
218 		if (dp_ipa_handle_rx_buf_smmu_mapping(soc, nbuf, buf_size,
219 						      false))
220 			dp_info_rl("Unable to unmap nbuf: %pK", nbuf);
221 		qdf_nbuf_unmap_nbytes_single(soc->osdev, nbuf,
222 					     QDF_DMA_BIDIRECTIONAL, buf_size);
223 		qdf_nbuf_free(nbuf);
224 		nbuf = next;
225 	}
226 
227 	nbuf = nbuf_free_list;
228 	while (nbuf) {
229 		next = nbuf->next;
230 		qdf_nbuf_free(nbuf);
231 		nbuf = next;
232 	}
233 }
234 
235 void dp_rx_desc_nbuf_and_pool_free(struct dp_soc *soc, uint32_t pool_id,
236 				   struct rx_desc_pool *rx_desc_pool)
237 {
238 	qdf_nbuf_t nbuf_unmap_list = NULL;
239 	qdf_nbuf_t nbuf_free_list = NULL;
240 
241 	qdf_spin_lock_bh(&rx_desc_pool->lock);
242 	dp_rx_desc_nbuf_collect(soc, rx_desc_pool,
243 				&nbuf_unmap_list, &nbuf_free_list);
244 	qdf_spin_unlock_bh(&rx_desc_pool->lock);
245 	dp_rx_desc_nbuf_cleanup(soc, nbuf_unmap_list, nbuf_free_list,
246 				rx_desc_pool->buf_size);
247 	qdf_spinlock_destroy(&rx_desc_pool->lock);
248 }
249 
250 void dp_rx_desc_nbuf_free(struct dp_soc *soc,
251 			  struct rx_desc_pool *rx_desc_pool)
252 {
253 	qdf_nbuf_t nbuf_unmap_list = NULL;
254 	qdf_nbuf_t nbuf_free_list = NULL;
255 	qdf_spin_lock_bh(&rx_desc_pool->lock);
256 	dp_rx_desc_nbuf_collect(soc, rx_desc_pool,
257 				&nbuf_unmap_list, &nbuf_free_list);
258 	qdf_spin_unlock_bh(&rx_desc_pool->lock);
259 	dp_rx_desc_nbuf_cleanup(soc, nbuf_unmap_list, nbuf_free_list,
260 				rx_desc_pool->buf_size);
261 }
262 
263 qdf_export_symbol(dp_rx_desc_nbuf_free);
264 
265 void dp_rx_desc_pool_free(struct dp_soc *soc,
266 			  struct rx_desc_pool *rx_desc_pool)
267 {
268 	if (qdf_unlikely(!(rx_desc_pool->desc_pages.cacheable_pages)))
269 		return;
270 
271 	dp_desc_multi_pages_mem_free(soc, rx_desc_pool->desc_type,
272 				     &rx_desc_pool->desc_pages, 0, true);
273 }
274 
275 qdf_export_symbol(dp_rx_desc_pool_free);
276 
277 void dp_rx_desc_pool_deinit(struct dp_soc *soc,
278 			    struct rx_desc_pool *rx_desc_pool,
279 			    uint32_t pool_id)
280 {
281 	qdf_spin_lock_bh(&rx_desc_pool->lock);
282 
283 	rx_desc_pool->freelist = NULL;
284 	rx_desc_pool->pool_size = 0;
285 
286 	/* Deinitialize rx mon desr frag flag */
287 	rx_desc_pool->rx_mon_dest_frag_enable = false;
288 
289 	soc->arch_ops.dp_rx_desc_pool_deinit(soc, rx_desc_pool, pool_id);
290 
291 	qdf_spin_unlock_bh(&rx_desc_pool->lock);
292 	qdf_spinlock_destroy(&rx_desc_pool->lock);
293 }
294 
295 qdf_export_symbol(dp_rx_desc_pool_deinit);
296 #else
297 /*
298  * dp_rx_desc_pool_is_allocated() - check if memory is allocated for the
299  *					rx descriptor pool
300  *
301  * @rx_desc_pool: rx descriptor pool pointer
302  *
303  * Return: QDF_STATUS  QDF_STATUS_SUCCESS
304  *		       QDF_STATUS_E_NOMEM
305  */
306 QDF_STATUS dp_rx_desc_pool_is_allocated(struct rx_desc_pool *rx_desc_pool)
307 {
308 	if (!rx_desc_pool->array) {
309 		dp_err("nss-wifi<4> skip Rx refil");
310 		return QDF_STATUS_E_NOMEM;
311 	}
312 	return QDF_STATUS_SUCCESS;
313 }
314 
315 qdf_export_symbol(dp_rx_desc_pool_is_allocated);
316 
317 /*
318  * dp_rx_desc_pool_alloc() - Allocate a memory pool for software rx
319  *			     descriptors
320  *
321  * @soc: core txrx main context
322  * @num_elem: number of rx descriptors (size of the pool)
323  * @rx_desc_pool: rx descriptor pool pointer
324  *
325  * Return: QDF_STATUS  QDF_STATUS_SUCCESS
326  *		       QDF_STATUS_E_NOMEM
327  *		       QDF_STATUS_E_FAULT
328  */
329 QDF_STATUS dp_rx_desc_pool_alloc(struct dp_soc *soc,
330 				 uint32_t pool_size,
331 				 struct rx_desc_pool *rx_desc_pool)
332 {
333 	rx_desc_pool->array = qdf_mem_malloc(pool_size *
334 				     sizeof(union dp_rx_desc_list_elem_t));
335 
336 	if (!(rx_desc_pool->array)) {
337 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
338 			  "RX Desc Pool allocation failed");
339 		return QDF_STATUS_E_NOMEM;
340 	}
341 	return QDF_STATUS_SUCCESS;
342 }
343 
344 qdf_export_symbol(dp_rx_desc_pool_alloc);
345 
346 QDF_STATUS dp_rx_desc_pool_init_generic(struct dp_soc *soc,
347 				  struct rx_desc_pool *rx_desc_pool,
348 				  uint32_t pool_id)
349 {
350 	int i;
351 
352 	for (i = 0; i <= rx_desc_pool->pool_size - 1; i++) {
353 		if (i == rx_desc_pool->pool_size - 1)
354 			rx_desc_pool->array[i].next = NULL;
355 		else
356 			rx_desc_pool->array[i].next =
357 				&rx_desc_pool->array[i + 1];
358 		rx_desc_pool->array[i].rx_desc.cookie = i | (pool_id << 18);
359 		rx_desc_pool->array[i].rx_desc.pool_id = pool_id;
360 		rx_desc_pool->array[i].rx_desc.in_use = 0;
361 	}
362 	return QDF_STATUS_SUCCESS;
363 }
364 
365 /*
366  * dp_rx_desc_pool_init() - Initialize the software RX descriptor pool
367  *			convert the pool of memory into a list of
368  *			rx descriptors and create locks to access this
369  *			list of rx descriptors.
370  *
371  * @soc: core txrx main context
372  * @pool_id: pool_id which is one of 3 mac_ids
373  * @pool_size: size of the rx descriptor pool
374  * @rx_desc_pool: rx descriptor pool pointer
375  */
376 void dp_rx_desc_pool_init(struct dp_soc *soc, uint32_t pool_id,
377 			  uint32_t pool_size, struct rx_desc_pool *rx_desc_pool)
378 {
379 	QDF_STATUS status;
380 
381 	/* Initialize the lock */
382 	qdf_spinlock_create(&rx_desc_pool->lock);
383 
384 	qdf_spin_lock_bh(&rx_desc_pool->lock);
385 	rx_desc_pool->pool_size = pool_size;
386 
387 	/* link SW rx descs into a freelist */
388 	rx_desc_pool->freelist = &rx_desc_pool->array[0];
389 	qdf_mem_zero(rx_desc_pool->freelist, rx_desc_pool->pool_size);
390 
391 	status = soc->arch_ops.dp_rx_desc_pool_init(soc, rx_desc_pool,
392 						    pool_id);
393 	if (!QDF_IS_STATUS_SUCCESS(status))
394 		dp_err("RX desc pool initialization failed");
395 
396 	qdf_spin_unlock_bh(&rx_desc_pool->lock);
397 }
398 
399 qdf_export_symbol(dp_rx_desc_pool_init);
400 
401 void dp_rx_desc_nbuf_and_pool_free(struct dp_soc *soc, uint32_t pool_id,
402 				   struct rx_desc_pool *rx_desc_pool)
403 {
404 	qdf_nbuf_t nbuf;
405 	int i;
406 
407 	qdf_spin_lock_bh(&rx_desc_pool->lock);
408 	for (i = 0; i < rx_desc_pool->pool_size; i++) {
409 		if (rx_desc_pool->array[i].rx_desc.in_use) {
410 			nbuf = rx_desc_pool->array[i].rx_desc.nbuf;
411 
412 			if (!(rx_desc_pool->array[i].rx_desc.unmapped)) {
413 				dp_ipa_handle_rx_buf_smmu_mapping(
414 							soc, nbuf,
415 							rx_desc_pool->buf_size,
416 							false);
417 				qdf_nbuf_unmap_nbytes_single(
418 							soc->osdev, nbuf,
419 							QDF_DMA_FROM_DEVICE,
420 							rx_desc_pool->buf_size);
421 				rx_desc_pool->array[i].rx_desc.unmapped = 1;
422 			}
423 			qdf_nbuf_free(nbuf);
424 		}
425 	}
426 	qdf_mem_free(rx_desc_pool->array);
427 	qdf_spin_unlock_bh(&rx_desc_pool->lock);
428 	qdf_spinlock_destroy(&rx_desc_pool->lock);
429 }
430 
431 void dp_rx_desc_nbuf_free(struct dp_soc *soc,
432 			  struct rx_desc_pool *rx_desc_pool)
433 {
434 	qdf_nbuf_t nbuf;
435 	int i;
436 
437 	qdf_spin_lock_bh(&rx_desc_pool->lock);
438 	for (i = 0; i < rx_desc_pool->pool_size; i++) {
439 		if (rx_desc_pool->array[i].rx_desc.in_use) {
440 			nbuf = rx_desc_pool->array[i].rx_desc.nbuf;
441 
442 			if (!(rx_desc_pool->array[i].rx_desc.unmapped)) {
443 				dp_ipa_handle_rx_buf_smmu_mapping(
444 						soc, nbuf,
445 						rx_desc_pool->buf_size,
446 						false);
447 				qdf_nbuf_unmap_nbytes_single(
448 							soc->osdev, nbuf,
449 							QDF_DMA_FROM_DEVICE,
450 							rx_desc_pool->buf_size);
451 				rx_desc_pool->array[i].rx_desc.unmapped = 1;
452 			}
453 			qdf_nbuf_free(nbuf);
454 		}
455 	}
456 	qdf_spin_unlock_bh(&rx_desc_pool->lock);
457 }
458 
459 qdf_export_symbol(dp_rx_desc_nbuf_free);
460 
461 /**
462  * dp_rx_desc_frag_free() - Free desc frag buffer
463  *
464  * @soc: core txrx main context
465  * @rx_desc_pool: rx descriptor pool pointer
466  *
467  * Return: None
468  */
469 #ifdef DP_RX_MON_MEM_FRAG
470 void dp_rx_desc_frag_free(struct dp_soc *soc,
471 			  struct rx_desc_pool *rx_desc_pool)
472 {
473 	qdf_dma_addr_t paddr;
474 	qdf_frag_t vaddr;
475 	int i;
476 
477 	qdf_spin_lock_bh(&rx_desc_pool->lock);
478 	for (i = 0; i < rx_desc_pool->pool_size; i++) {
479 		if (rx_desc_pool->array[i].rx_desc.in_use) {
480 			paddr = rx_desc_pool->array[i].rx_desc.paddr_buf_start;
481 			vaddr = rx_desc_pool->array[i].rx_desc.rx_buf_start;
482 
483 			if (!(rx_desc_pool->array[i].rx_desc.unmapped)) {
484 				qdf_mem_unmap_page(soc->osdev, paddr,
485 						   rx_desc_pool->buf_size,
486 						   QDF_DMA_FROM_DEVICE);
487 				rx_desc_pool->array[i].rx_desc.unmapped = 1;
488 			}
489 			qdf_frag_free(vaddr);
490 		}
491 	}
492 	qdf_spin_unlock_bh(&rx_desc_pool->lock);
493 }
494 
495 qdf_export_symbol(dp_rx_desc_frag_free);
496 #endif
497 
498 void dp_rx_desc_pool_free(struct dp_soc *soc,
499 			  struct rx_desc_pool *rx_desc_pool)
500 {
501 	qdf_mem_free(rx_desc_pool->array);
502 }
503 
504 qdf_export_symbol(dp_rx_desc_pool_free);
505 
506 void dp_rx_desc_pool_deinit(struct dp_soc *soc,
507 			    struct rx_desc_pool *rx_desc_pool,
508 			    uint32_t pool_id)
509 {
510 	qdf_spin_lock_bh(&rx_desc_pool->lock);
511 
512 	rx_desc_pool->freelist = NULL;
513 	rx_desc_pool->pool_size = 0;
514 
515 	/* Deinitialize rx mon desr frag flag */
516 	rx_desc_pool->rx_mon_dest_frag_enable = false;
517 
518 	soc->arch_ops.dp_rx_desc_pool_deinit(soc, rx_desc_pool, pool_id);
519 
520 	qdf_spin_unlock_bh(&rx_desc_pool->lock);
521 	qdf_spinlock_destroy(&rx_desc_pool->lock);
522 }
523 
524 qdf_export_symbol(dp_rx_desc_pool_deinit);
525 
526 #endif /* RX_DESC_MULTI_PAGE_ALLOC */
527 
528 void dp_rx_desc_pool_deinit_generic(struct dp_soc *soc,
529 			       struct rx_desc_pool *rx_desc_pool,
530 			       uint32_t pool_id)
531 {
532 }
533 
534 /*
535  * dp_rx_get_free_desc_list() - provide a list of descriptors from
536  *				the free rx desc pool.
537  *
538  * @soc: core txrx main context
539  * @pool_id: pool_id which is one of 3 mac_ids
540  * @rx_desc_pool: rx descriptor pool pointer
541  * @num_descs: number of descs requested from freelist
542  * @desc_list: attach the descs to this list (output parameter)
543  * @tail: attach the point to last desc of free list (output parameter)
544  *
545  * Return: number of descs allocated from free list.
546  */
547 uint16_t dp_rx_get_free_desc_list(struct dp_soc *soc, uint32_t pool_id,
548 				struct rx_desc_pool *rx_desc_pool,
549 				uint16_t num_descs,
550 				union dp_rx_desc_list_elem_t **desc_list,
551 				union dp_rx_desc_list_elem_t **tail)
552 {
553 	uint16_t count;
554 
555 	qdf_spin_lock_bh(&rx_desc_pool->lock);
556 
557 	*desc_list = *tail = rx_desc_pool->freelist;
558 
559 	for (count = 0; count < num_descs; count++) {
560 
561 		if (qdf_unlikely(!rx_desc_pool->freelist)) {
562 			qdf_spin_unlock_bh(&rx_desc_pool->lock);
563 			return count;
564 		}
565 		*tail = rx_desc_pool->freelist;
566 		rx_desc_pool->freelist = rx_desc_pool->freelist->next;
567 	}
568 	(*tail)->next = NULL;
569 	qdf_spin_unlock_bh(&rx_desc_pool->lock);
570 	return count;
571 }
572 
573 qdf_export_symbol(dp_rx_get_free_desc_list);
574 
575 /*
576  * dp_rx_add_desc_list_to_free_list() - append unused desc_list back to
577  *					freelist.
578  *
579  * @soc: core txrx main context
580  * @local_desc_list: local desc list provided by the caller
581  * @tail: attach the point to last desc of local desc list
582  * @pool_id: pool_id which is one of 3 mac_ids
583  * @rx_desc_pool: rx descriptor pool pointer
584  */
585 void dp_rx_add_desc_list_to_free_list(struct dp_soc *soc,
586 				union dp_rx_desc_list_elem_t **local_desc_list,
587 				union dp_rx_desc_list_elem_t **tail,
588 				uint16_t pool_id,
589 				struct rx_desc_pool *rx_desc_pool)
590 {
591 	union dp_rx_desc_list_elem_t *temp_list = NULL;
592 
593 	qdf_spin_lock_bh(&rx_desc_pool->lock);
594 
595 
596 	temp_list = rx_desc_pool->freelist;
597 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
598 	"temp_list: %pK, *local_desc_list: %pK, *tail: %pK (*tail)->next: %pK",
599 	temp_list, *local_desc_list, *tail, (*tail)->next);
600 	rx_desc_pool->freelist = *local_desc_list;
601 	(*tail)->next = temp_list;
602 	*tail = NULL;
603 	*local_desc_list = NULL;
604 
605 	qdf_spin_unlock_bh(&rx_desc_pool->lock);
606 }
607 
608 qdf_export_symbol(dp_rx_add_desc_list_to_free_list);
609