1 /* 2 * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved. 3 * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. 4 * 5 * Permission to use, copy, modify, and/or distribute this software for 6 * any purpose with or without fee is hereby granted, provided that the 7 * above copyright notice and this permission notice appear in all 8 * copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 11 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 12 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 13 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 14 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 15 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 16 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 17 * PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20 #include "dp_types.h" 21 #include "dp_rx.h" 22 #include "dp_ipa.h" 23 #include <qdf_module.h> 24 25 #ifdef RX_DESC_MULTI_PAGE_ALLOC 26 A_COMPILE_TIME_ASSERT(cookie_size_check, 27 (DP_BLOCKMEM_SIZE / 28 sizeof(union dp_rx_desc_list_elem_t)) 29 <= (1 << DP_RX_DESC_PAGE_ID_SHIFT)); 30 31 /* 32 * dp_rx_desc_pool_is_allocated() - check if memory is allocated for the 33 * rx descriptor pool 34 * 35 * @rx_desc_pool: rx descriptor pool pointer 36 * Return: QDF_STATUS QDF_STATUS_SUCCESS 37 * QDF_STATUS_E_NOMEM 38 */ 39 QDF_STATUS dp_rx_desc_pool_is_allocated(struct rx_desc_pool *rx_desc_pool) 40 { 41 if (!rx_desc_pool->desc_pages.num_pages) { 42 dp_err("Multi page alloc fail, size=%d, elem=%d", 43 rx_desc_pool->elem_size, rx_desc_pool->pool_size); 44 return QDF_STATUS_E_NOMEM; 45 } 46 return QDF_STATUS_SUCCESS; 47 } 48 49 qdf_export_symbol(dp_rx_desc_pool_is_allocated); 50 51 /* 52 * dp_rx_desc_pool_alloc() - Allocate a memory pool for software rx 53 * descriptors 54 * 55 * @soc: core txrx main context 56 * @num_elem: number of rx descriptors (size of the pool) 57 * @rx_desc_pool: rx descriptor pool pointer 58 * 59 * Return: QDF_STATUS QDF_STATUS_SUCCESS 60 * QDF_STATUS_E_NOMEM 61 * QDF_STATUS_E_FAULT 62 */ 63 QDF_STATUS dp_rx_desc_pool_alloc(struct dp_soc *soc, 64 uint32_t num_elem, 65 struct rx_desc_pool *rx_desc_pool) 66 { 67 uint32_t desc_size; 68 union dp_rx_desc_list_elem_t *rx_desc_elem; 69 70 desc_size = sizeof(*rx_desc_elem); 71 rx_desc_pool->elem_size = desc_size; 72 rx_desc_pool->desc_pages.page_size = DP_BLOCKMEM_SIZE; 73 dp_desc_multi_pages_mem_alloc(soc, rx_desc_pool->desc_type, 74 &rx_desc_pool->desc_pages, 75 desc_size, num_elem, 0, true); 76 if (!rx_desc_pool->desc_pages.num_pages) { 77 qdf_err("Multi page alloc fail,size=%d, elem=%d", 78 desc_size, num_elem); 79 return QDF_STATUS_E_NOMEM; 80 } 81 82 if (qdf_mem_multi_page_link(soc->osdev, 83 &rx_desc_pool->desc_pages, 84 desc_size, num_elem, true)) { 85 qdf_err("overflow num link,size=%d, elem=%d", 86 desc_size, num_elem); 87 goto free_rx_desc_pool; 88 } 89 return QDF_STATUS_SUCCESS; 90 91 free_rx_desc_pool: 92 dp_rx_desc_pool_free(soc, rx_desc_pool); 93 94 return QDF_STATUS_E_FAULT; 95 } 96 97 qdf_export_symbol(dp_rx_desc_pool_alloc); 98 99 QDF_STATUS dp_rx_desc_pool_init_generic(struct dp_soc *soc, 100 struct rx_desc_pool *rx_desc_pool, 101 uint32_t pool_id) 102 { 103 uint32_t id, page_id, offset, num_desc_per_page; 104 uint32_t count = 0; 105 union dp_rx_desc_list_elem_t *rx_desc_elem; 106 107 num_desc_per_page = rx_desc_pool->desc_pages.num_element_per_page; 108 109 rx_desc_elem = rx_desc_pool->freelist; 110 while (rx_desc_elem) { 111 page_id = count / num_desc_per_page; 112 offset = count % num_desc_per_page; 113 /* 114 * Below cookie size is from REO destination ring 115 * reo_destination_ring -> buffer_addr_info -> sw_buffer_cookie 116 * cookie size = 21 bits 117 * 8 bits - offset 118 * 8 bits - page ID 119 * 4 bits - pool ID 120 */ 121 id = ((pool_id << DP_RX_DESC_POOL_ID_SHIFT) | 122 (page_id << DP_RX_DESC_PAGE_ID_SHIFT) | 123 offset); 124 rx_desc_elem->rx_desc.cookie = id; 125 rx_desc_elem->rx_desc.pool_id = pool_id; 126 rx_desc_elem->rx_desc.in_use = 0; 127 rx_desc_elem = rx_desc_elem->next; 128 count++; 129 } 130 return QDF_STATUS_SUCCESS; 131 } 132 133 /* 134 * dp_rx_desc_pool_init() - Initialize the software RX descriptor pool 135 * convert the pool of memory into a list of 136 * rx descriptors and create locks to access this 137 * list of rx descriptors. 138 * 139 * @soc: core txrx main context 140 * @pool_id: pool_id which is one of 3 mac_ids 141 * @pool_size: size of the rx descriptor pool 142 * @rx_desc_pool: rx descriptor pool pointer 143 */ 144 void dp_rx_desc_pool_init(struct dp_soc *soc, uint32_t pool_id, 145 uint32_t pool_size, struct rx_desc_pool *rx_desc_pool) 146 { 147 QDF_STATUS status; 148 149 /* Initialize the lock */ 150 qdf_spinlock_create(&rx_desc_pool->lock); 151 152 qdf_spin_lock_bh(&rx_desc_pool->lock); 153 rx_desc_pool->pool_size = pool_size; 154 155 rx_desc_pool->freelist = (union dp_rx_desc_list_elem_t *) 156 *rx_desc_pool->desc_pages.cacheable_pages; 157 158 status = soc->arch_ops.dp_rx_desc_pool_init(soc, rx_desc_pool, 159 pool_id); 160 if (!QDF_IS_STATUS_SUCCESS(status)) 161 dp_err("RX desc pool initialization failed"); 162 163 qdf_spin_unlock_bh(&rx_desc_pool->lock); 164 } 165 166 qdf_export_symbol(dp_rx_desc_pool_init); 167 168 union dp_rx_desc_list_elem_t *dp_rx_desc_find(uint16_t page_id, uint16_t offset, 169 struct rx_desc_pool *rx_desc_pool) 170 { 171 return rx_desc_pool->desc_pages.cacheable_pages[page_id] + 172 rx_desc_pool->elem_size * offset; 173 } 174 175 static QDF_STATUS dp_rx_desc_nbuf_collect(struct dp_soc *soc, 176 struct rx_desc_pool *rx_desc_pool, 177 qdf_nbuf_t *nbuf_unmap_list, 178 qdf_nbuf_t *nbuf_free_list) 179 { 180 uint32_t i, num_desc, page_id, offset, num_desc_per_page; 181 union dp_rx_desc_list_elem_t *rx_desc_elem; 182 struct dp_rx_desc *rx_desc; 183 184 if (qdf_unlikely(!(rx_desc_pool->desc_pages.cacheable_pages))) { 185 qdf_err("No pages found on this desc pool"); 186 return QDF_STATUS_E_INVAL; 187 } 188 num_desc = rx_desc_pool->pool_size; 189 num_desc_per_page = rx_desc_pool->desc_pages.num_element_per_page; 190 for (i = 0; i < num_desc; i++) { 191 page_id = i / num_desc_per_page; 192 offset = i % num_desc_per_page; 193 rx_desc_elem = dp_rx_desc_find(page_id, offset, rx_desc_pool); 194 rx_desc = &rx_desc_elem->rx_desc; 195 dp_rx_desc_free_dbg_info(rx_desc); 196 if (rx_desc->in_use) { 197 if (!rx_desc->unmapped) { 198 DP_RX_HEAD_APPEND(*nbuf_unmap_list, 199 rx_desc->nbuf); 200 rx_desc->unmapped = 1; 201 } else { 202 DP_RX_HEAD_APPEND(*nbuf_free_list, 203 rx_desc->nbuf); 204 } 205 } 206 } 207 return QDF_STATUS_SUCCESS; 208 } 209 210 static void dp_rx_desc_nbuf_cleanup(struct dp_soc *soc, 211 qdf_nbuf_t nbuf_unmap_list, 212 qdf_nbuf_t nbuf_free_list, 213 uint16_t buf_size) 214 { 215 qdf_nbuf_t nbuf = nbuf_unmap_list; 216 qdf_nbuf_t next; 217 218 while (nbuf) { 219 next = nbuf->next; 220 if (dp_ipa_handle_rx_buf_smmu_mapping(soc, nbuf, buf_size, 221 false)) 222 dp_info_rl("Unable to unmap nbuf: %pK", nbuf); 223 qdf_nbuf_unmap_nbytes_single(soc->osdev, nbuf, 224 QDF_DMA_BIDIRECTIONAL, buf_size); 225 dp_rx_nbuf_free(nbuf); 226 nbuf = next; 227 } 228 229 nbuf = nbuf_free_list; 230 while (nbuf) { 231 next = nbuf->next; 232 dp_rx_nbuf_free(nbuf); 233 nbuf = next; 234 } 235 } 236 237 void dp_rx_desc_nbuf_and_pool_free(struct dp_soc *soc, uint32_t pool_id, 238 struct rx_desc_pool *rx_desc_pool) 239 { 240 qdf_nbuf_t nbuf_unmap_list = NULL; 241 qdf_nbuf_t nbuf_free_list = NULL; 242 243 qdf_spin_lock_bh(&rx_desc_pool->lock); 244 dp_rx_desc_nbuf_collect(soc, rx_desc_pool, 245 &nbuf_unmap_list, &nbuf_free_list); 246 qdf_spin_unlock_bh(&rx_desc_pool->lock); 247 dp_rx_desc_nbuf_cleanup(soc, nbuf_unmap_list, nbuf_free_list, 248 rx_desc_pool->buf_size); 249 qdf_spinlock_destroy(&rx_desc_pool->lock); 250 } 251 252 void dp_rx_desc_nbuf_free(struct dp_soc *soc, 253 struct rx_desc_pool *rx_desc_pool) 254 { 255 qdf_nbuf_t nbuf_unmap_list = NULL; 256 qdf_nbuf_t nbuf_free_list = NULL; 257 qdf_spin_lock_bh(&rx_desc_pool->lock); 258 dp_rx_desc_nbuf_collect(soc, rx_desc_pool, 259 &nbuf_unmap_list, &nbuf_free_list); 260 qdf_spin_unlock_bh(&rx_desc_pool->lock); 261 dp_rx_desc_nbuf_cleanup(soc, nbuf_unmap_list, nbuf_free_list, 262 rx_desc_pool->buf_size); 263 } 264 265 qdf_export_symbol(dp_rx_desc_nbuf_free); 266 267 void dp_rx_desc_pool_free(struct dp_soc *soc, 268 struct rx_desc_pool *rx_desc_pool) 269 { 270 if (qdf_unlikely(!(rx_desc_pool->desc_pages.cacheable_pages))) 271 return; 272 273 dp_desc_multi_pages_mem_free(soc, rx_desc_pool->desc_type, 274 &rx_desc_pool->desc_pages, 0, true); 275 } 276 277 qdf_export_symbol(dp_rx_desc_pool_free); 278 279 void dp_rx_desc_pool_deinit(struct dp_soc *soc, 280 struct rx_desc_pool *rx_desc_pool, 281 uint32_t pool_id) 282 { 283 qdf_spin_lock_bh(&rx_desc_pool->lock); 284 285 rx_desc_pool->freelist = NULL; 286 rx_desc_pool->pool_size = 0; 287 288 /* Deinitialize rx mon desr frag flag */ 289 rx_desc_pool->rx_mon_dest_frag_enable = false; 290 291 soc->arch_ops.dp_rx_desc_pool_deinit(soc, rx_desc_pool, pool_id); 292 293 qdf_spin_unlock_bh(&rx_desc_pool->lock); 294 qdf_spinlock_destroy(&rx_desc_pool->lock); 295 } 296 297 qdf_export_symbol(dp_rx_desc_pool_deinit); 298 #else 299 /* 300 * dp_rx_desc_pool_is_allocated() - check if memory is allocated for the 301 * rx descriptor pool 302 * 303 * @rx_desc_pool: rx descriptor pool pointer 304 * 305 * Return: QDF_STATUS QDF_STATUS_SUCCESS 306 * QDF_STATUS_E_NOMEM 307 */ 308 QDF_STATUS dp_rx_desc_pool_is_allocated(struct rx_desc_pool *rx_desc_pool) 309 { 310 if (!rx_desc_pool->array) { 311 dp_err("nss-wifi<4> skip Rx refil"); 312 return QDF_STATUS_E_NOMEM; 313 } 314 return QDF_STATUS_SUCCESS; 315 } 316 317 qdf_export_symbol(dp_rx_desc_pool_is_allocated); 318 319 /* 320 * dp_rx_desc_pool_alloc() - Allocate a memory pool for software rx 321 * descriptors 322 * 323 * @soc: core txrx main context 324 * @num_elem: number of rx descriptors (size of the pool) 325 * @rx_desc_pool: rx descriptor pool pointer 326 * 327 * Return: QDF_STATUS QDF_STATUS_SUCCESS 328 * QDF_STATUS_E_NOMEM 329 * QDF_STATUS_E_FAULT 330 */ 331 QDF_STATUS dp_rx_desc_pool_alloc(struct dp_soc *soc, 332 uint32_t pool_size, 333 struct rx_desc_pool *rx_desc_pool) 334 { 335 rx_desc_pool->array = qdf_mem_malloc(pool_size * 336 sizeof(union dp_rx_desc_list_elem_t)); 337 338 if (!(rx_desc_pool->array)) { 339 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, 340 "RX Desc Pool allocation failed"); 341 return QDF_STATUS_E_NOMEM; 342 } 343 return QDF_STATUS_SUCCESS; 344 } 345 346 qdf_export_symbol(dp_rx_desc_pool_alloc); 347 348 QDF_STATUS dp_rx_desc_pool_init_generic(struct dp_soc *soc, 349 struct rx_desc_pool *rx_desc_pool, 350 uint32_t pool_id) 351 { 352 int i; 353 354 for (i = 0; i <= rx_desc_pool->pool_size - 1; i++) { 355 if (i == rx_desc_pool->pool_size - 1) 356 rx_desc_pool->array[i].next = NULL; 357 else 358 rx_desc_pool->array[i].next = 359 &rx_desc_pool->array[i + 1]; 360 rx_desc_pool->array[i].rx_desc.cookie = i | (pool_id << 18); 361 rx_desc_pool->array[i].rx_desc.pool_id = pool_id; 362 rx_desc_pool->array[i].rx_desc.in_use = 0; 363 } 364 return QDF_STATUS_SUCCESS; 365 } 366 367 /* 368 * dp_rx_desc_pool_init() - Initialize the software RX descriptor pool 369 * convert the pool of memory into a list of 370 * rx descriptors and create locks to access this 371 * list of rx descriptors. 372 * 373 * @soc: core txrx main context 374 * @pool_id: pool_id which is one of 3 mac_ids 375 * @pool_size: size of the rx descriptor pool 376 * @rx_desc_pool: rx descriptor pool pointer 377 */ 378 void dp_rx_desc_pool_init(struct dp_soc *soc, uint32_t pool_id, 379 uint32_t pool_size, struct rx_desc_pool *rx_desc_pool) 380 { 381 QDF_STATUS status; 382 383 /* Initialize the lock */ 384 qdf_spinlock_create(&rx_desc_pool->lock); 385 386 qdf_spin_lock_bh(&rx_desc_pool->lock); 387 rx_desc_pool->pool_size = pool_size; 388 389 /* link SW rx descs into a freelist */ 390 rx_desc_pool->freelist = &rx_desc_pool->array[0]; 391 qdf_mem_zero(rx_desc_pool->freelist, rx_desc_pool->pool_size); 392 393 status = soc->arch_ops.dp_rx_desc_pool_init(soc, rx_desc_pool, 394 pool_id); 395 if (!QDF_IS_STATUS_SUCCESS(status)) 396 dp_err("RX desc pool initialization failed"); 397 398 qdf_spin_unlock_bh(&rx_desc_pool->lock); 399 } 400 401 qdf_export_symbol(dp_rx_desc_pool_init); 402 403 void dp_rx_desc_nbuf_and_pool_free(struct dp_soc *soc, uint32_t pool_id, 404 struct rx_desc_pool *rx_desc_pool) 405 { 406 qdf_nbuf_t nbuf; 407 int i; 408 409 qdf_spin_lock_bh(&rx_desc_pool->lock); 410 for (i = 0; i < rx_desc_pool->pool_size; i++) { 411 if (rx_desc_pool->array[i].rx_desc.in_use) { 412 nbuf = rx_desc_pool->array[i].rx_desc.nbuf; 413 414 if (!(rx_desc_pool->array[i].rx_desc.unmapped)) { 415 dp_rx_nbuf_unmap_pool(soc, rx_desc_pool, nbuf); 416 rx_desc_pool->array[i].rx_desc.unmapped = 1; 417 } 418 dp_rx_nbuf_free(nbuf); 419 } 420 } 421 qdf_mem_free(rx_desc_pool->array); 422 qdf_spin_unlock_bh(&rx_desc_pool->lock); 423 qdf_spinlock_destroy(&rx_desc_pool->lock); 424 } 425 426 void dp_rx_desc_nbuf_free(struct dp_soc *soc, 427 struct rx_desc_pool *rx_desc_pool) 428 { 429 qdf_nbuf_t nbuf; 430 int i; 431 432 qdf_spin_lock_bh(&rx_desc_pool->lock); 433 for (i = 0; i < rx_desc_pool->pool_size; i++) { 434 dp_rx_desc_free_dbg_info(&rx_desc_pool->array[i].rx_desc); 435 if (rx_desc_pool->array[i].rx_desc.in_use) { 436 nbuf = rx_desc_pool->array[i].rx_desc.nbuf; 437 438 if (!(rx_desc_pool->array[i].rx_desc.unmapped)) { 439 dp_rx_nbuf_unmap_pool(soc, rx_desc_pool, nbuf); 440 rx_desc_pool->array[i].rx_desc.unmapped = 1; 441 } 442 dp_rx_nbuf_free(nbuf); 443 } 444 } 445 qdf_spin_unlock_bh(&rx_desc_pool->lock); 446 } 447 448 qdf_export_symbol(dp_rx_desc_nbuf_free); 449 450 /** 451 * dp_rx_desc_frag_free() - Free desc frag buffer 452 * 453 * @soc: core txrx main context 454 * @rx_desc_pool: rx descriptor pool pointer 455 * 456 * Return: None 457 */ 458 #ifdef DP_RX_MON_MEM_FRAG 459 void dp_rx_desc_frag_free(struct dp_soc *soc, 460 struct rx_desc_pool *rx_desc_pool) 461 { 462 qdf_dma_addr_t paddr; 463 qdf_frag_t vaddr; 464 int i; 465 466 qdf_spin_lock_bh(&rx_desc_pool->lock); 467 for (i = 0; i < rx_desc_pool->pool_size; i++) { 468 if (rx_desc_pool->array[i].rx_desc.in_use) { 469 paddr = rx_desc_pool->array[i].rx_desc.paddr_buf_start; 470 vaddr = rx_desc_pool->array[i].rx_desc.rx_buf_start; 471 472 dp_rx_desc_free_dbg_info(&rx_desc_pool->array[i].rx_desc); 473 if (!(rx_desc_pool->array[i].rx_desc.unmapped)) { 474 qdf_mem_unmap_page(soc->osdev, paddr, 475 rx_desc_pool->buf_size, 476 QDF_DMA_FROM_DEVICE); 477 rx_desc_pool->array[i].rx_desc.unmapped = 1; 478 } 479 qdf_frag_free(vaddr); 480 } 481 } 482 qdf_spin_unlock_bh(&rx_desc_pool->lock); 483 } 484 485 qdf_export_symbol(dp_rx_desc_frag_free); 486 #endif 487 488 void dp_rx_desc_pool_free(struct dp_soc *soc, 489 struct rx_desc_pool *rx_desc_pool) 490 { 491 qdf_mem_free(rx_desc_pool->array); 492 } 493 494 qdf_export_symbol(dp_rx_desc_pool_free); 495 496 void dp_rx_desc_pool_deinit(struct dp_soc *soc, 497 struct rx_desc_pool *rx_desc_pool, 498 uint32_t pool_id) 499 { 500 qdf_spin_lock_bh(&rx_desc_pool->lock); 501 502 rx_desc_pool->freelist = NULL; 503 rx_desc_pool->pool_size = 0; 504 505 /* Deinitialize rx mon desr frag flag */ 506 rx_desc_pool->rx_mon_dest_frag_enable = false; 507 508 soc->arch_ops.dp_rx_desc_pool_deinit(soc, rx_desc_pool, pool_id); 509 510 qdf_spin_unlock_bh(&rx_desc_pool->lock); 511 qdf_spinlock_destroy(&rx_desc_pool->lock); 512 } 513 514 qdf_export_symbol(dp_rx_desc_pool_deinit); 515 516 #endif /* RX_DESC_MULTI_PAGE_ALLOC */ 517 518 void dp_rx_desc_pool_deinit_generic(struct dp_soc *soc, 519 struct rx_desc_pool *rx_desc_pool, 520 uint32_t pool_id) 521 { 522 } 523 524 /* 525 * dp_rx_get_free_desc_list() - provide a list of descriptors from 526 * the free rx desc pool. 527 * 528 * @soc: core txrx main context 529 * @pool_id: pool_id which is one of 3 mac_ids 530 * @rx_desc_pool: rx descriptor pool pointer 531 * @num_descs: number of descs requested from freelist 532 * @desc_list: attach the descs to this list (output parameter) 533 * @tail: attach the point to last desc of free list (output parameter) 534 * 535 * Return: number of descs allocated from free list. 536 */ 537 uint16_t dp_rx_get_free_desc_list(struct dp_soc *soc, uint32_t pool_id, 538 struct rx_desc_pool *rx_desc_pool, 539 uint16_t num_descs, 540 union dp_rx_desc_list_elem_t **desc_list, 541 union dp_rx_desc_list_elem_t **tail) 542 { 543 uint16_t count; 544 545 qdf_spin_lock_bh(&rx_desc_pool->lock); 546 547 *desc_list = *tail = rx_desc_pool->freelist; 548 549 for (count = 0; count < num_descs; count++) { 550 551 if (qdf_unlikely(!rx_desc_pool->freelist)) { 552 qdf_spin_unlock_bh(&rx_desc_pool->lock); 553 return count; 554 } 555 *tail = rx_desc_pool->freelist; 556 rx_desc_pool->freelist = rx_desc_pool->freelist->next; 557 } 558 (*tail)->next = NULL; 559 qdf_spin_unlock_bh(&rx_desc_pool->lock); 560 return count; 561 } 562 563 qdf_export_symbol(dp_rx_get_free_desc_list); 564 565 /* 566 * dp_rx_add_desc_list_to_free_list() - append unused desc_list back to 567 * freelist. 568 * 569 * @soc: core txrx main context 570 * @local_desc_list: local desc list provided by the caller 571 * @tail: attach the point to last desc of local desc list 572 * @pool_id: pool_id which is one of 3 mac_ids 573 * @rx_desc_pool: rx descriptor pool pointer 574 */ 575 void dp_rx_add_desc_list_to_free_list(struct dp_soc *soc, 576 union dp_rx_desc_list_elem_t **local_desc_list, 577 union dp_rx_desc_list_elem_t **tail, 578 uint16_t pool_id, 579 struct rx_desc_pool *rx_desc_pool) 580 { 581 union dp_rx_desc_list_elem_t *temp_list = NULL; 582 583 qdf_spin_lock_bh(&rx_desc_pool->lock); 584 585 586 temp_list = rx_desc_pool->freelist; 587 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 588 "temp_list: %pK, *local_desc_list: %pK, *tail: %pK (*tail)->next: %pK", 589 temp_list, *local_desc_list, *tail, (*tail)->next); 590 rx_desc_pool->freelist = *local_desc_list; 591 (*tail)->next = temp_list; 592 *tail = NULL; 593 *local_desc_list = NULL; 594 595 qdf_spin_unlock_bh(&rx_desc_pool->lock); 596 } 597 598 qdf_export_symbol(dp_rx_add_desc_list_to_free_list); 599