xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_rx_desc.c (revision 70a19e16789e308182f63b15c75decec7bf0b342)
1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include "dp_types.h"
21 #include "dp_rx.h"
22 #include "dp_ipa.h"
23 #include <qdf_module.h>
24 
25 #ifdef RX_DESC_MULTI_PAGE_ALLOC
26 A_COMPILE_TIME_ASSERT(cookie_size_check,
27 		      (DP_BLOCKMEM_SIZE /
28 		       sizeof(union dp_rx_desc_list_elem_t))
29 		      <= (1 << DP_RX_DESC_PAGE_ID_SHIFT));
30 
31 /*
32  * dp_rx_desc_pool_is_allocated() - check if memory is allocated for the
33  *					rx descriptor pool
34  *
35  * @rx_desc_pool: rx descriptor pool pointer
36  * Return: QDF_STATUS  QDF_STATUS_SUCCESS
37  *		       QDF_STATUS_E_NOMEM
38  */
39 QDF_STATUS dp_rx_desc_pool_is_allocated(struct rx_desc_pool *rx_desc_pool)
40 {
41 	if (!rx_desc_pool->desc_pages.num_pages) {
42 		dp_err("Multi page alloc fail, size=%d, elem=%d",
43 		       rx_desc_pool->elem_size, rx_desc_pool->pool_size);
44 		return QDF_STATUS_E_NOMEM;
45 	}
46 	return QDF_STATUS_SUCCESS;
47 }
48 
49 qdf_export_symbol(dp_rx_desc_pool_is_allocated);
50 
51 /*
52  * dp_rx_desc_pool_alloc() - Allocate a memory pool for software rx
53  *			     descriptors
54  *
55  * @soc: core txrx main context
56  * @num_elem: number of rx descriptors (size of the pool)
57  * @rx_desc_pool: rx descriptor pool pointer
58  *
59  * Return: QDF_STATUS  QDF_STATUS_SUCCESS
60  *		       QDF_STATUS_E_NOMEM
61  *		       QDF_STATUS_E_FAULT
62  */
63 QDF_STATUS dp_rx_desc_pool_alloc(struct dp_soc *soc,
64 				 uint32_t num_elem,
65 				 struct rx_desc_pool *rx_desc_pool)
66 {
67 	uint32_t desc_size;
68 	union dp_rx_desc_list_elem_t *rx_desc_elem;
69 
70 	desc_size = sizeof(*rx_desc_elem);
71 	rx_desc_pool->elem_size = desc_size;
72 	rx_desc_pool->desc_pages.page_size = DP_BLOCKMEM_SIZE;
73 	dp_desc_multi_pages_mem_alloc(soc, rx_desc_pool->desc_type,
74 				      &rx_desc_pool->desc_pages,
75 				      desc_size, num_elem, 0, true);
76 	if (!rx_desc_pool->desc_pages.num_pages) {
77 		qdf_err("Multi page alloc fail,size=%d, elem=%d",
78 			desc_size, num_elem);
79 		return QDF_STATUS_E_NOMEM;
80 	}
81 
82 	if (qdf_mem_multi_page_link(soc->osdev,
83 				    &rx_desc_pool->desc_pages,
84 				    desc_size, num_elem, true)) {
85 		qdf_err("overflow num link,size=%d, elem=%d",
86 			desc_size, num_elem);
87 		goto free_rx_desc_pool;
88 	}
89 	return QDF_STATUS_SUCCESS;
90 
91 free_rx_desc_pool:
92 	dp_rx_desc_pool_free(soc, rx_desc_pool);
93 
94 	return QDF_STATUS_E_FAULT;
95 }
96 
97 qdf_export_symbol(dp_rx_desc_pool_alloc);
98 
99 QDF_STATUS dp_rx_desc_pool_init_generic(struct dp_soc *soc,
100 				  struct rx_desc_pool *rx_desc_pool,
101 				  uint32_t pool_id)
102 {
103 	uint32_t id, page_id, offset, num_desc_per_page;
104 	uint32_t count = 0;
105 	union dp_rx_desc_list_elem_t *rx_desc_elem;
106 
107 	num_desc_per_page = rx_desc_pool->desc_pages.num_element_per_page;
108 
109 	rx_desc_elem = rx_desc_pool->freelist;
110 	while (rx_desc_elem) {
111 		page_id = count / num_desc_per_page;
112 		offset = count % num_desc_per_page;
113 		/*
114 		 * Below cookie size is from REO destination ring
115 		 * reo_destination_ring -> buffer_addr_info -> sw_buffer_cookie
116 		 * cookie size = 21 bits
117 		 * 8 bits - offset
118 		 * 8 bits - page ID
119 		 * 4 bits - pool ID
120 		 */
121 		id = ((pool_id << DP_RX_DESC_POOL_ID_SHIFT) |
122 		      (page_id << DP_RX_DESC_PAGE_ID_SHIFT) |
123 		      offset);
124 		rx_desc_elem->rx_desc.cookie = id;
125 		rx_desc_elem->rx_desc.pool_id = pool_id;
126 		rx_desc_elem->rx_desc.in_use = 0;
127 		rx_desc_elem = rx_desc_elem->next;
128 		count++;
129 	}
130 	return QDF_STATUS_SUCCESS;
131 }
132 
133 /*
134  * dp_rx_desc_pool_init() - Initialize the software RX descriptor pool
135  *			convert the pool of memory into a list of
136  *			rx descriptors and create locks to access this
137  *			list of rx descriptors.
138  *
139  * @soc: core txrx main context
140  * @pool_id: pool_id which is one of 3 mac_ids
141  * @pool_size: size of the rx descriptor pool
142  * @rx_desc_pool: rx descriptor pool pointer
143  */
144 void dp_rx_desc_pool_init(struct dp_soc *soc, uint32_t pool_id,
145 			  uint32_t pool_size, struct rx_desc_pool *rx_desc_pool)
146 {
147 	QDF_STATUS status;
148 
149 	/* Initialize the lock */
150 	qdf_spinlock_create(&rx_desc_pool->lock);
151 
152 	qdf_spin_lock_bh(&rx_desc_pool->lock);
153 	rx_desc_pool->pool_size = pool_size;
154 
155 	rx_desc_pool->freelist = (union dp_rx_desc_list_elem_t *)
156 				  *rx_desc_pool->desc_pages.cacheable_pages;
157 
158 	status = soc->arch_ops.dp_rx_desc_pool_init(soc, rx_desc_pool,
159 						    pool_id);
160 	if (!QDF_IS_STATUS_SUCCESS(status))
161 		dp_err("RX desc pool initialization failed");
162 
163 	qdf_spin_unlock_bh(&rx_desc_pool->lock);
164 }
165 
166 qdf_export_symbol(dp_rx_desc_pool_init);
167 
168 union dp_rx_desc_list_elem_t *dp_rx_desc_find(uint16_t page_id, uint16_t offset,
169 					      struct rx_desc_pool *rx_desc_pool)
170 {
171 	return rx_desc_pool->desc_pages.cacheable_pages[page_id] +
172 		rx_desc_pool->elem_size * offset;
173 }
174 
175 static QDF_STATUS dp_rx_desc_nbuf_collect(struct dp_soc *soc,
176 					  struct rx_desc_pool *rx_desc_pool,
177 					  qdf_nbuf_t *nbuf_unmap_list,
178 					  qdf_nbuf_t *nbuf_free_list)
179 {
180 	uint32_t i, num_desc, page_id, offset, num_desc_per_page;
181 	union dp_rx_desc_list_elem_t *rx_desc_elem;
182 	struct dp_rx_desc *rx_desc;
183 
184 	if (qdf_unlikely(!(rx_desc_pool->desc_pages.cacheable_pages))) {
185 		qdf_err("No pages found on this desc pool");
186 		return QDF_STATUS_E_INVAL;
187 	}
188 	num_desc = rx_desc_pool->pool_size;
189 	num_desc_per_page = rx_desc_pool->desc_pages.num_element_per_page;
190 	for (i = 0; i < num_desc; i++) {
191 		page_id = i / num_desc_per_page;
192 		offset = i % num_desc_per_page;
193 		rx_desc_elem = dp_rx_desc_find(page_id, offset, rx_desc_pool);
194 		rx_desc = &rx_desc_elem->rx_desc;
195 		dp_rx_desc_free_dbg_info(rx_desc);
196 		if (rx_desc->in_use) {
197 			if (!rx_desc->unmapped) {
198 				DP_RX_HEAD_APPEND(*nbuf_unmap_list,
199 						  rx_desc->nbuf);
200 				rx_desc->unmapped = 1;
201 			} else {
202 				DP_RX_HEAD_APPEND(*nbuf_free_list,
203 						  rx_desc->nbuf);
204 			}
205 		}
206 	}
207 	return QDF_STATUS_SUCCESS;
208 }
209 
210 static void dp_rx_desc_nbuf_cleanup(struct dp_soc *soc,
211 				    qdf_nbuf_t nbuf_unmap_list,
212 				    qdf_nbuf_t nbuf_free_list,
213 				    uint16_t buf_size,
214 				    bool is_mon_pool)
215 {
216 	qdf_nbuf_t nbuf = nbuf_unmap_list;
217 	qdf_nbuf_t next;
218 
219 	while (nbuf) {
220 		next = nbuf->next;
221 
222 		if (!is_mon_pool)
223 			dp_audio_smmu_unmap(soc->osdev,
224 					    QDF_NBUF_CB_PADDR(nbuf),
225 					    buf_size);
226 
227 		if (dp_ipa_handle_rx_buf_smmu_mapping(soc, nbuf, buf_size,
228 					   false, __func__, __LINE__))
229 			dp_info_rl("Unable to unmap nbuf: %pK", nbuf);
230 		qdf_nbuf_unmap_nbytes_single(soc->osdev, nbuf,
231 					     QDF_DMA_BIDIRECTIONAL, buf_size);
232 		dp_rx_nbuf_free(nbuf);
233 		nbuf = next;
234 	}
235 
236 	nbuf = nbuf_free_list;
237 	while (nbuf) {
238 		next = nbuf->next;
239 		dp_rx_nbuf_free(nbuf);
240 		nbuf = next;
241 	}
242 }
243 
244 void dp_rx_desc_nbuf_and_pool_free(struct dp_soc *soc, uint32_t pool_id,
245 				   struct rx_desc_pool *rx_desc_pool)
246 {
247 	qdf_nbuf_t nbuf_unmap_list = NULL;
248 	qdf_nbuf_t nbuf_free_list = NULL;
249 
250 	qdf_spin_lock_bh(&rx_desc_pool->lock);
251 	dp_rx_desc_nbuf_collect(soc, rx_desc_pool,
252 				&nbuf_unmap_list, &nbuf_free_list);
253 	qdf_spin_unlock_bh(&rx_desc_pool->lock);
254 	dp_rx_desc_nbuf_cleanup(soc, nbuf_unmap_list, nbuf_free_list,
255 				rx_desc_pool->buf_size, false);
256 	qdf_spinlock_destroy(&rx_desc_pool->lock);
257 }
258 
259 void dp_rx_desc_nbuf_free(struct dp_soc *soc,
260 			  struct rx_desc_pool *rx_desc_pool,
261 			  bool is_mon_pool)
262 {
263 	qdf_nbuf_t nbuf_unmap_list = NULL;
264 	qdf_nbuf_t nbuf_free_list = NULL;
265 	qdf_spin_lock_bh(&rx_desc_pool->lock);
266 	dp_rx_desc_nbuf_collect(soc, rx_desc_pool,
267 				&nbuf_unmap_list, &nbuf_free_list);
268 	qdf_spin_unlock_bh(&rx_desc_pool->lock);
269 	dp_rx_desc_nbuf_cleanup(soc, nbuf_unmap_list, nbuf_free_list,
270 				rx_desc_pool->buf_size, is_mon_pool);
271 }
272 
273 qdf_export_symbol(dp_rx_desc_nbuf_free);
274 
275 void dp_rx_desc_pool_free(struct dp_soc *soc,
276 			  struct rx_desc_pool *rx_desc_pool)
277 {
278 	if (qdf_unlikely(!(rx_desc_pool->desc_pages.cacheable_pages)))
279 		return;
280 
281 	dp_desc_multi_pages_mem_free(soc, rx_desc_pool->desc_type,
282 				     &rx_desc_pool->desc_pages, 0, true);
283 }
284 
285 qdf_export_symbol(dp_rx_desc_pool_free);
286 
287 void dp_rx_desc_pool_deinit(struct dp_soc *soc,
288 			    struct rx_desc_pool *rx_desc_pool,
289 			    uint32_t pool_id)
290 {
291 	qdf_spin_lock_bh(&rx_desc_pool->lock);
292 
293 	rx_desc_pool->freelist = NULL;
294 	rx_desc_pool->pool_size = 0;
295 
296 	/* Deinitialize rx mon desr frag flag */
297 	rx_desc_pool->rx_mon_dest_frag_enable = false;
298 
299 	soc->arch_ops.dp_rx_desc_pool_deinit(soc, rx_desc_pool, pool_id);
300 
301 	qdf_spin_unlock_bh(&rx_desc_pool->lock);
302 	qdf_spinlock_destroy(&rx_desc_pool->lock);
303 }
304 
305 qdf_export_symbol(dp_rx_desc_pool_deinit);
306 #else
307 /*
308  * dp_rx_desc_pool_is_allocated() - check if memory is allocated for the
309  *					rx descriptor pool
310  *
311  * @rx_desc_pool: rx descriptor pool pointer
312  *
313  * Return: QDF_STATUS  QDF_STATUS_SUCCESS
314  *		       QDF_STATUS_E_NOMEM
315  */
316 QDF_STATUS dp_rx_desc_pool_is_allocated(struct rx_desc_pool *rx_desc_pool)
317 {
318 	if (!rx_desc_pool->array) {
319 		dp_err("nss-wifi<4> skip Rx refil");
320 		return QDF_STATUS_E_NOMEM;
321 	}
322 	return QDF_STATUS_SUCCESS;
323 }
324 
325 qdf_export_symbol(dp_rx_desc_pool_is_allocated);
326 
327 /*
328  * dp_rx_desc_pool_alloc() - Allocate a memory pool for software rx
329  *			     descriptors
330  *
331  * @soc: core txrx main context
332  * @num_elem: number of rx descriptors (size of the pool)
333  * @rx_desc_pool: rx descriptor pool pointer
334  *
335  * Return: QDF_STATUS  QDF_STATUS_SUCCESS
336  *		       QDF_STATUS_E_NOMEM
337  *		       QDF_STATUS_E_FAULT
338  */
339 QDF_STATUS dp_rx_desc_pool_alloc(struct dp_soc *soc,
340 				 uint32_t pool_size,
341 				 struct rx_desc_pool *rx_desc_pool)
342 {
343 	rx_desc_pool->array = qdf_mem_malloc(pool_size *
344 				     sizeof(union dp_rx_desc_list_elem_t));
345 
346 	if (!(rx_desc_pool->array)) {
347 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
348 			  "RX Desc Pool allocation failed");
349 		return QDF_STATUS_E_NOMEM;
350 	}
351 	return QDF_STATUS_SUCCESS;
352 }
353 
354 qdf_export_symbol(dp_rx_desc_pool_alloc);
355 
356 QDF_STATUS dp_rx_desc_pool_init_generic(struct dp_soc *soc,
357 				  struct rx_desc_pool *rx_desc_pool,
358 				  uint32_t pool_id)
359 {
360 	int i;
361 
362 	for (i = 0; i <= rx_desc_pool->pool_size - 1; i++) {
363 		if (i == rx_desc_pool->pool_size - 1)
364 			rx_desc_pool->array[i].next = NULL;
365 		else
366 			rx_desc_pool->array[i].next =
367 				&rx_desc_pool->array[i + 1];
368 		rx_desc_pool->array[i].rx_desc.cookie = i | (pool_id << 18);
369 		rx_desc_pool->array[i].rx_desc.pool_id = pool_id;
370 		rx_desc_pool->array[i].rx_desc.in_use = 0;
371 	}
372 	return QDF_STATUS_SUCCESS;
373 }
374 
375 /*
376  * dp_rx_desc_pool_init() - Initialize the software RX descriptor pool
377  *			convert the pool of memory into a list of
378  *			rx descriptors and create locks to access this
379  *			list of rx descriptors.
380  *
381  * @soc: core txrx main context
382  * @pool_id: pool_id which is one of 3 mac_ids
383  * @pool_size: size of the rx descriptor pool
384  * @rx_desc_pool: rx descriptor pool pointer
385  */
386 void dp_rx_desc_pool_init(struct dp_soc *soc, uint32_t pool_id,
387 			  uint32_t pool_size, struct rx_desc_pool *rx_desc_pool)
388 {
389 	QDF_STATUS status;
390 
391 	/* Initialize the lock */
392 	qdf_spinlock_create(&rx_desc_pool->lock);
393 
394 	qdf_spin_lock_bh(&rx_desc_pool->lock);
395 	rx_desc_pool->pool_size = pool_size;
396 
397 	/* link SW rx descs into a freelist */
398 	rx_desc_pool->freelist = &rx_desc_pool->array[0];
399 	qdf_mem_zero(rx_desc_pool->freelist, rx_desc_pool->pool_size);
400 
401 	status = soc->arch_ops.dp_rx_desc_pool_init(soc, rx_desc_pool,
402 						    pool_id);
403 	if (!QDF_IS_STATUS_SUCCESS(status))
404 		dp_err("RX desc pool initialization failed");
405 
406 	qdf_spin_unlock_bh(&rx_desc_pool->lock);
407 }
408 
409 qdf_export_symbol(dp_rx_desc_pool_init);
410 
411 void dp_rx_desc_nbuf_and_pool_free(struct dp_soc *soc, uint32_t pool_id,
412 				   struct rx_desc_pool *rx_desc_pool)
413 {
414 	qdf_nbuf_t nbuf;
415 	int i;
416 
417 	qdf_spin_lock_bh(&rx_desc_pool->lock);
418 	for (i = 0; i < rx_desc_pool->pool_size; i++) {
419 		if (rx_desc_pool->array[i].rx_desc.in_use) {
420 			nbuf = rx_desc_pool->array[i].rx_desc.nbuf;
421 
422 			if (!(rx_desc_pool->array[i].rx_desc.unmapped)) {
423 				dp_rx_nbuf_unmap_pool(soc, rx_desc_pool, nbuf);
424 				rx_desc_pool->array[i].rx_desc.unmapped = 1;
425 			}
426 			dp_rx_nbuf_free(nbuf);
427 		}
428 	}
429 	qdf_mem_free(rx_desc_pool->array);
430 	qdf_spin_unlock_bh(&rx_desc_pool->lock);
431 	qdf_spinlock_destroy(&rx_desc_pool->lock);
432 }
433 
434 void dp_rx_desc_nbuf_free(struct dp_soc *soc,
435 			  struct rx_desc_pool *rx_desc_pool,
436 			  bool is_mon_pool)
437 {
438 	qdf_nbuf_t nbuf;
439 	int i;
440 
441 	qdf_spin_lock_bh(&rx_desc_pool->lock);
442 	for (i = 0; i < rx_desc_pool->pool_size; i++) {
443 		dp_rx_desc_free_dbg_info(&rx_desc_pool->array[i].rx_desc);
444 		if (rx_desc_pool->array[i].rx_desc.in_use) {
445 			nbuf = rx_desc_pool->array[i].rx_desc.nbuf;
446 
447 			if (!(rx_desc_pool->array[i].rx_desc.unmapped)) {
448 				dp_rx_nbuf_unmap_pool(soc, rx_desc_pool, nbuf);
449 				rx_desc_pool->array[i].rx_desc.unmapped = 1;
450 			}
451 			dp_rx_nbuf_free(nbuf);
452 		}
453 	}
454 	qdf_spin_unlock_bh(&rx_desc_pool->lock);
455 }
456 
457 qdf_export_symbol(dp_rx_desc_nbuf_free);
458 
459 /**
460  * dp_rx_desc_frag_free() - Free desc frag buffer
461  *
462  * @soc: core txrx main context
463  * @rx_desc_pool: rx descriptor pool pointer
464  *
465  * Return: None
466  */
467 #ifdef DP_RX_MON_MEM_FRAG
468 void dp_rx_desc_frag_free(struct dp_soc *soc,
469 			  struct rx_desc_pool *rx_desc_pool)
470 {
471 	qdf_dma_addr_t paddr;
472 	qdf_frag_t vaddr;
473 	int i;
474 
475 	qdf_spin_lock_bh(&rx_desc_pool->lock);
476 	for (i = 0; i < rx_desc_pool->pool_size; i++) {
477 		if (rx_desc_pool->array[i].rx_desc.in_use) {
478 			paddr = rx_desc_pool->array[i].rx_desc.paddr_buf_start;
479 			vaddr = rx_desc_pool->array[i].rx_desc.rx_buf_start;
480 
481 			dp_rx_desc_free_dbg_info(&rx_desc_pool->array[i].rx_desc);
482 			if (!(rx_desc_pool->array[i].rx_desc.unmapped)) {
483 				qdf_mem_unmap_page(soc->osdev, paddr,
484 						   rx_desc_pool->buf_size,
485 						   QDF_DMA_FROM_DEVICE);
486 				rx_desc_pool->array[i].rx_desc.unmapped = 1;
487 			}
488 			qdf_frag_free(vaddr);
489 		}
490 	}
491 	qdf_spin_unlock_bh(&rx_desc_pool->lock);
492 }
493 
494 qdf_export_symbol(dp_rx_desc_frag_free);
495 #endif
496 
497 void dp_rx_desc_pool_free(struct dp_soc *soc,
498 			  struct rx_desc_pool *rx_desc_pool)
499 {
500 	qdf_mem_free(rx_desc_pool->array);
501 }
502 
503 qdf_export_symbol(dp_rx_desc_pool_free);
504 
505 void dp_rx_desc_pool_deinit(struct dp_soc *soc,
506 			    struct rx_desc_pool *rx_desc_pool,
507 			    uint32_t pool_id)
508 {
509 	qdf_spin_lock_bh(&rx_desc_pool->lock);
510 
511 	rx_desc_pool->freelist = NULL;
512 	rx_desc_pool->pool_size = 0;
513 
514 	/* Deinitialize rx mon desr frag flag */
515 	rx_desc_pool->rx_mon_dest_frag_enable = false;
516 
517 	soc->arch_ops.dp_rx_desc_pool_deinit(soc, rx_desc_pool, pool_id);
518 
519 	qdf_spin_unlock_bh(&rx_desc_pool->lock);
520 	qdf_spinlock_destroy(&rx_desc_pool->lock);
521 }
522 
523 qdf_export_symbol(dp_rx_desc_pool_deinit);
524 
525 #endif /* RX_DESC_MULTI_PAGE_ALLOC */
526 
527 void dp_rx_desc_pool_deinit_generic(struct dp_soc *soc,
528 			       struct rx_desc_pool *rx_desc_pool,
529 			       uint32_t pool_id)
530 {
531 }
532 
533 /*
534  * dp_rx_get_free_desc_list() - provide a list of descriptors from
535  *				the free rx desc pool.
536  *
537  * @soc: core txrx main context
538  * @pool_id: pool_id which is one of 3 mac_ids
539  * @rx_desc_pool: rx descriptor pool pointer
540  * @num_descs: number of descs requested from freelist
541  * @desc_list: attach the descs to this list (output parameter)
542  * @tail: attach the point to last desc of free list (output parameter)
543  *
544  * Return: number of descs allocated from free list.
545  */
546 uint16_t dp_rx_get_free_desc_list(struct dp_soc *soc, uint32_t pool_id,
547 				struct rx_desc_pool *rx_desc_pool,
548 				uint16_t num_descs,
549 				union dp_rx_desc_list_elem_t **desc_list,
550 				union dp_rx_desc_list_elem_t **tail)
551 {
552 	uint16_t count;
553 
554 	qdf_spin_lock_bh(&rx_desc_pool->lock);
555 
556 	*desc_list = *tail = rx_desc_pool->freelist;
557 
558 	for (count = 0; count < num_descs; count++) {
559 
560 		if (qdf_unlikely(!rx_desc_pool->freelist)) {
561 			qdf_spin_unlock_bh(&rx_desc_pool->lock);
562 			return count;
563 		}
564 		*tail = rx_desc_pool->freelist;
565 		rx_desc_pool->freelist = rx_desc_pool->freelist->next;
566 	}
567 	(*tail)->next = NULL;
568 	qdf_spin_unlock_bh(&rx_desc_pool->lock);
569 	return count;
570 }
571 
572 qdf_export_symbol(dp_rx_get_free_desc_list);
573 
574 /*
575  * dp_rx_add_desc_list_to_free_list() - append unused desc_list back to
576  *					freelist.
577  *
578  * @soc: core txrx main context
579  * @local_desc_list: local desc list provided by the caller
580  * @tail: attach the point to last desc of local desc list
581  * @pool_id: pool_id which is one of 3 mac_ids
582  * @rx_desc_pool: rx descriptor pool pointer
583  */
584 void dp_rx_add_desc_list_to_free_list(struct dp_soc *soc,
585 				union dp_rx_desc_list_elem_t **local_desc_list,
586 				union dp_rx_desc_list_elem_t **tail,
587 				uint16_t pool_id,
588 				struct rx_desc_pool *rx_desc_pool)
589 {
590 	union dp_rx_desc_list_elem_t *temp_list = NULL;
591 
592 	qdf_spin_lock_bh(&rx_desc_pool->lock);
593 
594 
595 	temp_list = rx_desc_pool->freelist;
596 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
597 	"temp_list: %pK, *local_desc_list: %pK, *tail: %pK (*tail)->next: %pK",
598 	temp_list, *local_desc_list, *tail, (*tail)->next);
599 	rx_desc_pool->freelist = *local_desc_list;
600 	(*tail)->next = temp_list;
601 	*tail = NULL;
602 	*local_desc_list = NULL;
603 
604 	qdf_spin_unlock_bh(&rx_desc_pool->lock);
605 }
606 
607 qdf_export_symbol(dp_rx_add_desc_list_to_free_list);
608