1 /* 2 * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved. 3 * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved. 4 * 5 * Permission to use, copy, modify, and/or distribute this software for 6 * any purpose with or without fee is hereby granted, provided that the 7 * above copyright notice and this permission notice appear in all 8 * copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 11 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 12 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 13 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 14 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 15 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 16 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 17 * PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20 #include "dp_types.h" 21 #include "dp_rx.h" 22 #include "dp_ipa.h" 23 #include <qdf_module.h> 24 25 #ifdef RX_DESC_MULTI_PAGE_ALLOC 26 A_COMPILE_TIME_ASSERT(cookie_size_check, 27 (DP_BLOCKMEM_SIZE / 28 sizeof(union dp_rx_desc_list_elem_t)) 29 <= (1 << DP_RX_DESC_PAGE_ID_SHIFT)); 30 31 QDF_STATUS dp_rx_desc_pool_is_allocated(struct rx_desc_pool *rx_desc_pool) 32 { 33 if (!rx_desc_pool->desc_pages.num_pages) { 34 dp_err("Multi page alloc fail, size=%d, elem=%d", 35 rx_desc_pool->elem_size, rx_desc_pool->pool_size); 36 return QDF_STATUS_E_NOMEM; 37 } 38 return QDF_STATUS_SUCCESS; 39 } 40 41 qdf_export_symbol(dp_rx_desc_pool_is_allocated); 42 43 QDF_STATUS dp_rx_desc_pool_alloc(struct dp_soc *soc, 44 uint32_t num_elem, 45 struct rx_desc_pool *rx_desc_pool) 46 { 47 uint32_t desc_size; 48 union dp_rx_desc_list_elem_t *rx_desc_elem; 49 50 desc_size = sizeof(*rx_desc_elem); 51 rx_desc_pool->elem_size = desc_size; 52 rx_desc_pool->desc_pages.page_size = DP_BLOCKMEM_SIZE; 53 dp_desc_multi_pages_mem_alloc(soc, rx_desc_pool->desc_type, 54 &rx_desc_pool->desc_pages, 55 desc_size, num_elem, 0, true); 56 if (!rx_desc_pool->desc_pages.num_pages) { 57 qdf_err("Multi page alloc fail,size=%d, elem=%d", 58 desc_size, num_elem); 59 return QDF_STATUS_E_NOMEM; 60 } 61 62 if (qdf_mem_multi_page_link(soc->osdev, 63 &rx_desc_pool->desc_pages, 64 desc_size, num_elem, true)) { 65 qdf_err("overflow num link,size=%d, elem=%d", 66 desc_size, num_elem); 67 goto free_rx_desc_pool; 68 } 69 return QDF_STATUS_SUCCESS; 70 71 free_rx_desc_pool: 72 dp_rx_desc_pool_free(soc, rx_desc_pool); 73 74 return QDF_STATUS_E_FAULT; 75 } 76 77 qdf_export_symbol(dp_rx_desc_pool_alloc); 78 79 QDF_STATUS dp_rx_desc_pool_init_generic(struct dp_soc *soc, 80 struct rx_desc_pool *rx_desc_pool, 81 uint32_t pool_id) 82 { 83 uint32_t id, page_id, offset, num_desc_per_page; 84 uint32_t count = 0; 85 union dp_rx_desc_list_elem_t *rx_desc_elem; 86 87 num_desc_per_page = rx_desc_pool->desc_pages.num_element_per_page; 88 89 rx_desc_elem = rx_desc_pool->freelist; 90 while (rx_desc_elem) { 91 page_id = count / num_desc_per_page; 92 offset = count % num_desc_per_page; 93 /* 94 * Below cookie size is from REO destination ring 95 * reo_destination_ring -> buffer_addr_info -> sw_buffer_cookie 96 * cookie size = 21 bits 97 * 8 bits - offset 98 * 8 bits - page ID 99 * 4 bits - pool ID 100 */ 101 id = ((pool_id << DP_RX_DESC_POOL_ID_SHIFT) | 102 (page_id << DP_RX_DESC_PAGE_ID_SHIFT) | 103 offset); 104 rx_desc_elem->rx_desc.cookie = id; 105 rx_desc_elem->rx_desc.pool_id = pool_id; 106 rx_desc_elem->rx_desc.in_use = 0; 107 rx_desc_elem = rx_desc_elem->next; 108 count++; 109 } 110 return QDF_STATUS_SUCCESS; 111 } 112 113 void dp_rx_desc_pool_init(struct dp_soc *soc, uint32_t pool_id, 114 uint32_t pool_size, struct rx_desc_pool *rx_desc_pool) 115 { 116 QDF_STATUS status; 117 118 /* Initialize the lock */ 119 qdf_spinlock_create(&rx_desc_pool->lock); 120 121 qdf_spin_lock_bh(&rx_desc_pool->lock); 122 rx_desc_pool->pool_size = pool_size; 123 124 rx_desc_pool->freelist = (union dp_rx_desc_list_elem_t *) 125 *rx_desc_pool->desc_pages.cacheable_pages; 126 127 status = soc->arch_ops.dp_rx_desc_pool_init(soc, rx_desc_pool, 128 pool_id); 129 if (!QDF_IS_STATUS_SUCCESS(status)) 130 dp_err("RX desc pool initialization failed"); 131 132 qdf_spin_unlock_bh(&rx_desc_pool->lock); 133 } 134 135 qdf_export_symbol(dp_rx_desc_pool_init); 136 137 union dp_rx_desc_list_elem_t *dp_rx_desc_find(uint16_t page_id, uint16_t offset, 138 struct rx_desc_pool *rx_desc_pool) 139 { 140 return rx_desc_pool->desc_pages.cacheable_pages[page_id] + 141 rx_desc_pool->elem_size * offset; 142 } 143 144 static QDF_STATUS dp_rx_desc_nbuf_collect(struct dp_soc *soc, 145 struct rx_desc_pool *rx_desc_pool, 146 qdf_nbuf_t *nbuf_unmap_list, 147 qdf_nbuf_t *nbuf_free_list) 148 { 149 uint32_t i, num_desc, page_id, offset, num_desc_per_page; 150 union dp_rx_desc_list_elem_t *rx_desc_elem; 151 struct dp_rx_desc *rx_desc; 152 153 if (qdf_unlikely(!(rx_desc_pool->desc_pages.cacheable_pages))) { 154 qdf_err("No pages found on this desc pool"); 155 return QDF_STATUS_E_INVAL; 156 } 157 num_desc = rx_desc_pool->pool_size; 158 num_desc_per_page = rx_desc_pool->desc_pages.num_element_per_page; 159 for (i = 0; i < num_desc; i++) { 160 page_id = i / num_desc_per_page; 161 offset = i % num_desc_per_page; 162 rx_desc_elem = dp_rx_desc_find(page_id, offset, rx_desc_pool); 163 rx_desc = &rx_desc_elem->rx_desc; 164 dp_rx_desc_free_dbg_info(rx_desc); 165 if (rx_desc->in_use) { 166 if (!rx_desc->unmapped) { 167 DP_RX_HEAD_APPEND(*nbuf_unmap_list, 168 rx_desc->nbuf); 169 rx_desc->unmapped = 1; 170 } else { 171 DP_RX_HEAD_APPEND(*nbuf_free_list, 172 rx_desc->nbuf); 173 } 174 } 175 } 176 return QDF_STATUS_SUCCESS; 177 } 178 179 static void dp_rx_desc_nbuf_cleanup(struct dp_soc *soc, 180 qdf_nbuf_t nbuf_unmap_list, 181 qdf_nbuf_t nbuf_free_list, 182 uint16_t buf_size, 183 bool is_mon_pool) 184 { 185 qdf_nbuf_t nbuf = nbuf_unmap_list; 186 qdf_nbuf_t next; 187 188 while (nbuf) { 189 next = nbuf->next; 190 191 if (!is_mon_pool) 192 dp_audio_smmu_unmap(soc->osdev, 193 QDF_NBUF_CB_PADDR(nbuf), 194 buf_size); 195 196 if (qdf_atomic_read(&soc->ipa_mapped)) { 197 if (dp_ipa_handle_rx_buf_smmu_mapping( 198 soc, nbuf, buf_size, 199 false, __func__, 200 __LINE__)) 201 dp_info_rl("Unable to unmap nbuf: %pK", nbuf); 202 } 203 qdf_nbuf_unmap_nbytes_single(soc->osdev, nbuf, 204 QDF_DMA_BIDIRECTIONAL, buf_size); 205 dp_rx_nbuf_free(nbuf); 206 nbuf = next; 207 } 208 209 nbuf = nbuf_free_list; 210 while (nbuf) { 211 next = nbuf->next; 212 dp_rx_nbuf_free(nbuf); 213 nbuf = next; 214 } 215 } 216 217 void dp_rx_desc_nbuf_and_pool_free(struct dp_soc *soc, uint32_t pool_id, 218 struct rx_desc_pool *rx_desc_pool) 219 { 220 qdf_nbuf_t nbuf_unmap_list = NULL; 221 qdf_nbuf_t nbuf_free_list = NULL; 222 223 qdf_spin_lock_bh(&rx_desc_pool->lock); 224 dp_rx_desc_nbuf_collect(soc, rx_desc_pool, 225 &nbuf_unmap_list, &nbuf_free_list); 226 qdf_spin_unlock_bh(&rx_desc_pool->lock); 227 dp_rx_desc_nbuf_cleanup(soc, nbuf_unmap_list, nbuf_free_list, 228 rx_desc_pool->buf_size, false); 229 qdf_spinlock_destroy(&rx_desc_pool->lock); 230 } 231 232 void dp_rx_desc_nbuf_free(struct dp_soc *soc, 233 struct rx_desc_pool *rx_desc_pool, 234 bool is_mon_pool) 235 { 236 qdf_nbuf_t nbuf_unmap_list = NULL; 237 qdf_nbuf_t nbuf_free_list = NULL; 238 qdf_spin_lock_bh(&rx_desc_pool->lock); 239 dp_rx_desc_nbuf_collect(soc, rx_desc_pool, 240 &nbuf_unmap_list, &nbuf_free_list); 241 qdf_spin_unlock_bh(&rx_desc_pool->lock); 242 dp_rx_desc_nbuf_cleanup(soc, nbuf_unmap_list, nbuf_free_list, 243 rx_desc_pool->buf_size, is_mon_pool); 244 } 245 246 qdf_export_symbol(dp_rx_desc_nbuf_free); 247 248 void dp_rx_desc_pool_free(struct dp_soc *soc, 249 struct rx_desc_pool *rx_desc_pool) 250 { 251 if (qdf_unlikely(!(rx_desc_pool->desc_pages.cacheable_pages))) 252 return; 253 254 dp_desc_multi_pages_mem_free(soc, rx_desc_pool->desc_type, 255 &rx_desc_pool->desc_pages, 0, true); 256 } 257 258 qdf_export_symbol(dp_rx_desc_pool_free); 259 260 void dp_rx_desc_pool_deinit(struct dp_soc *soc, 261 struct rx_desc_pool *rx_desc_pool, 262 uint32_t pool_id) 263 { 264 qdf_spin_lock_bh(&rx_desc_pool->lock); 265 266 rx_desc_pool->freelist = NULL; 267 rx_desc_pool->pool_size = 0; 268 269 /* Deinitialize rx mon desr frag flag */ 270 rx_desc_pool->rx_mon_dest_frag_enable = false; 271 272 soc->arch_ops.dp_rx_desc_pool_deinit(soc, rx_desc_pool, pool_id); 273 274 qdf_spin_unlock_bh(&rx_desc_pool->lock); 275 qdf_spinlock_destroy(&rx_desc_pool->lock); 276 } 277 278 qdf_export_symbol(dp_rx_desc_pool_deinit); 279 #else 280 QDF_STATUS dp_rx_desc_pool_is_allocated(struct rx_desc_pool *rx_desc_pool) 281 { 282 if (!rx_desc_pool->array) { 283 dp_err("nss-wifi<4> skip Rx refil"); 284 return QDF_STATUS_E_NOMEM; 285 } 286 return QDF_STATUS_SUCCESS; 287 } 288 289 qdf_export_symbol(dp_rx_desc_pool_is_allocated); 290 291 QDF_STATUS dp_rx_desc_pool_alloc(struct dp_soc *soc, 292 uint32_t pool_size, 293 struct rx_desc_pool *rx_desc_pool) 294 { 295 rx_desc_pool->array = qdf_mem_common_alloc(pool_size * 296 sizeof(union dp_rx_desc_list_elem_t)); 297 298 if (!(rx_desc_pool->array)) { 299 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, 300 "RX Desc Pool allocation failed"); 301 return QDF_STATUS_E_NOMEM; 302 } 303 return QDF_STATUS_SUCCESS; 304 } 305 306 qdf_export_symbol(dp_rx_desc_pool_alloc); 307 308 QDF_STATUS dp_rx_desc_pool_init_generic(struct dp_soc *soc, 309 struct rx_desc_pool *rx_desc_pool, 310 uint32_t pool_id) 311 { 312 int i; 313 314 for (i = 0; i <= rx_desc_pool->pool_size - 1; i++) { 315 if (i == rx_desc_pool->pool_size - 1) 316 rx_desc_pool->array[i].next = NULL; 317 else 318 rx_desc_pool->array[i].next = 319 &rx_desc_pool->array[i + 1]; 320 rx_desc_pool->array[i].rx_desc.cookie = i | (pool_id << 18); 321 rx_desc_pool->array[i].rx_desc.pool_id = pool_id; 322 rx_desc_pool->array[i].rx_desc.in_use = 0; 323 } 324 return QDF_STATUS_SUCCESS; 325 } 326 327 void dp_rx_desc_pool_init(struct dp_soc *soc, uint32_t pool_id, 328 uint32_t pool_size, struct rx_desc_pool *rx_desc_pool) 329 { 330 QDF_STATUS status; 331 332 /* Initialize the lock */ 333 qdf_spinlock_create(&rx_desc_pool->lock); 334 335 qdf_spin_lock_bh(&rx_desc_pool->lock); 336 rx_desc_pool->pool_size = pool_size; 337 338 /* link SW rx descs into a freelist */ 339 rx_desc_pool->freelist = &rx_desc_pool->array[0]; 340 qdf_mem_zero(rx_desc_pool->freelist, rx_desc_pool->pool_size); 341 342 status = soc->arch_ops.dp_rx_desc_pool_init(soc, rx_desc_pool, 343 pool_id); 344 if (!QDF_IS_STATUS_SUCCESS(status)) 345 dp_err("RX desc pool initialization failed"); 346 347 qdf_spin_unlock_bh(&rx_desc_pool->lock); 348 } 349 350 qdf_export_symbol(dp_rx_desc_pool_init); 351 352 #ifdef WLAN_SUPPORT_PPEDS 353 static inline 354 qdf_nbuf_t dp_rx_desc_get_nbuf(struct rx_desc_pool *rx_desc_pool, int i) 355 { 356 if (rx_desc_pool->array[i].rx_desc.has_reuse_nbuf) 357 return rx_desc_pool->array[i].rx_desc.reuse_nbuf; 358 else 359 return rx_desc_pool->array[i].rx_desc.nbuf; 360 } 361 #else 362 static inline 363 qdf_nbuf_t dp_rx_desc_get_nbuf(struct rx_desc_pool *rx_desc_pool, int i) 364 { 365 return rx_desc_pool->array[i].rx_desc.nbuf; 366 } 367 #endif 368 369 void dp_rx_desc_nbuf_and_pool_free(struct dp_soc *soc, uint32_t pool_id, 370 struct rx_desc_pool *rx_desc_pool) 371 { 372 qdf_nbuf_t nbuf; 373 int i; 374 375 qdf_spin_lock_bh(&rx_desc_pool->lock); 376 for (i = 0; i < rx_desc_pool->pool_size; i++) { 377 if (rx_desc_pool->array[i].rx_desc.in_use) { 378 nbuf = dp_rx_desc_get_nbuf(rx_desc_pool, i); 379 380 if (!(rx_desc_pool->array[i].rx_desc.unmapped)) { 381 dp_rx_nbuf_unmap_pool(soc, rx_desc_pool, nbuf); 382 rx_desc_pool->array[i].rx_desc.unmapped = 1; 383 } 384 dp_rx_nbuf_free(nbuf); 385 } 386 } 387 qdf_mem_common_free(rx_desc_pool->array); 388 qdf_spin_unlock_bh(&rx_desc_pool->lock); 389 qdf_spinlock_destroy(&rx_desc_pool->lock); 390 } 391 392 void dp_rx_desc_nbuf_free(struct dp_soc *soc, 393 struct rx_desc_pool *rx_desc_pool, 394 bool is_mon_pool) 395 { 396 qdf_nbuf_t nbuf; 397 int i; 398 399 qdf_spin_lock_bh(&rx_desc_pool->lock); 400 for (i = 0; i < rx_desc_pool->pool_size; i++) { 401 dp_rx_desc_free_dbg_info(&rx_desc_pool->array[i].rx_desc); 402 if (rx_desc_pool->array[i].rx_desc.in_use) { 403 nbuf = dp_rx_desc_get_nbuf(rx_desc_pool, i); 404 405 if (!(rx_desc_pool->array[i].rx_desc.unmapped)) { 406 dp_rx_nbuf_unmap_pool(soc, rx_desc_pool, nbuf); 407 rx_desc_pool->array[i].rx_desc.unmapped = 1; 408 } 409 dp_rx_nbuf_free(nbuf); 410 } 411 } 412 qdf_spin_unlock_bh(&rx_desc_pool->lock); 413 } 414 415 qdf_export_symbol(dp_rx_desc_nbuf_free); 416 417 #ifdef DP_RX_MON_MEM_FRAG 418 void dp_rx_desc_frag_free(struct dp_soc *soc, 419 struct rx_desc_pool *rx_desc_pool) 420 { 421 qdf_dma_addr_t paddr; 422 qdf_frag_t vaddr; 423 int i; 424 425 qdf_spin_lock_bh(&rx_desc_pool->lock); 426 for (i = 0; i < rx_desc_pool->pool_size; i++) { 427 if (rx_desc_pool->array[i].rx_desc.in_use) { 428 paddr = rx_desc_pool->array[i].rx_desc.paddr_buf_start; 429 vaddr = rx_desc_pool->array[i].rx_desc.rx_buf_start; 430 431 dp_rx_desc_free_dbg_info(&rx_desc_pool->array[i].rx_desc); 432 if (!(rx_desc_pool->array[i].rx_desc.unmapped)) { 433 qdf_mem_unmap_page(soc->osdev, paddr, 434 rx_desc_pool->buf_size, 435 QDF_DMA_FROM_DEVICE); 436 rx_desc_pool->array[i].rx_desc.unmapped = 1; 437 } 438 qdf_frag_free(vaddr); 439 } 440 } 441 qdf_spin_unlock_bh(&rx_desc_pool->lock); 442 } 443 444 qdf_export_symbol(dp_rx_desc_frag_free); 445 #endif 446 447 void dp_rx_desc_pool_free(struct dp_soc *soc, 448 struct rx_desc_pool *rx_desc_pool) 449 { 450 qdf_mem_common_free(rx_desc_pool->array); 451 } 452 453 qdf_export_symbol(dp_rx_desc_pool_free); 454 455 void dp_rx_desc_pool_deinit(struct dp_soc *soc, 456 struct rx_desc_pool *rx_desc_pool, 457 uint32_t pool_id) 458 { 459 if (rx_desc_pool->pool_size) { 460 qdf_spin_lock_bh(&rx_desc_pool->lock); 461 462 rx_desc_pool->freelist = NULL; 463 rx_desc_pool->pool_size = 0; 464 465 /* Deinitialize rx mon dest frag flag */ 466 rx_desc_pool->rx_mon_dest_frag_enable = false; 467 468 soc->arch_ops.dp_rx_desc_pool_deinit(soc, rx_desc_pool, 469 pool_id); 470 471 qdf_spin_unlock_bh(&rx_desc_pool->lock); 472 qdf_spinlock_destroy(&rx_desc_pool->lock); 473 } 474 } 475 476 qdf_export_symbol(dp_rx_desc_pool_deinit); 477 478 #endif /* RX_DESC_MULTI_PAGE_ALLOC */ 479 480 void dp_rx_desc_pool_deinit_generic(struct dp_soc *soc, 481 struct rx_desc_pool *rx_desc_pool, 482 uint32_t pool_id) 483 { 484 } 485 486 uint16_t dp_rx_get_free_desc_list(struct dp_soc *soc, uint32_t pool_id, 487 struct rx_desc_pool *rx_desc_pool, 488 uint16_t num_descs, 489 union dp_rx_desc_list_elem_t **desc_list, 490 union dp_rx_desc_list_elem_t **tail) 491 { 492 uint16_t count; 493 494 qdf_spin_lock_bh(&rx_desc_pool->lock); 495 496 *desc_list = *tail = rx_desc_pool->freelist; 497 498 for (count = 0; count < num_descs; count++) { 499 500 if (qdf_unlikely(!rx_desc_pool->freelist)) { 501 qdf_spin_unlock_bh(&rx_desc_pool->lock); 502 return count; 503 } 504 *tail = rx_desc_pool->freelist; 505 rx_desc_pool->freelist = rx_desc_pool->freelist->next; 506 } 507 (*tail)->next = NULL; 508 qdf_spin_unlock_bh(&rx_desc_pool->lock); 509 return count; 510 } 511 512 qdf_export_symbol(dp_rx_get_free_desc_list); 513 514 void dp_rx_add_desc_list_to_free_list(struct dp_soc *soc, 515 union dp_rx_desc_list_elem_t **local_desc_list, 516 union dp_rx_desc_list_elem_t **tail, 517 uint16_t pool_id, 518 struct rx_desc_pool *rx_desc_pool) 519 { 520 union dp_rx_desc_list_elem_t *temp_list = NULL; 521 522 qdf_spin_lock_bh(&rx_desc_pool->lock); 523 524 525 temp_list = rx_desc_pool->freelist; 526 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 527 "temp_list: %pK, *local_desc_list: %pK, *tail: %pK (*tail)->next: %pK", 528 temp_list, *local_desc_list, *tail, (*tail)->next); 529 rx_desc_pool->freelist = *local_desc_list; 530 (*tail)->next = temp_list; 531 *tail = NULL; 532 *local_desc_list = NULL; 533 534 qdf_spin_unlock_bh(&rx_desc_pool->lock); 535 } 536 537 qdf_export_symbol(dp_rx_add_desc_list_to_free_list); 538