1 /* 2 * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved. 3 * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. 4 * 5 * Permission to use, copy, modify, and/or distribute this software for 6 * any purpose with or without fee is hereby granted, provided that the 7 * above copyright notice and this permission notice appear in all 8 * copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 11 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 12 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 13 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 14 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 15 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 16 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 17 * PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20 #include "hal_hw_headers.h" 21 #include "dp_types.h" 22 #include "dp_rx.h" 23 #include "dp_tx.h" 24 #include "dp_peer.h" 25 #include "hal_rx.h" 26 #include "hal_api.h" 27 #include "qdf_nbuf.h" 28 #ifdef MESH_MODE_SUPPORT 29 #include "if_meta_hdr.h" 30 #endif 31 #include "dp_internal.h" 32 #include "dp_ipa.h" 33 #include "dp_hist.h" 34 #include "dp_rx_buffer_pool.h" 35 #ifdef WIFI_MONITOR_SUPPORT 36 #include "dp_htt.h" 37 #include <dp_mon.h> 38 #endif 39 #ifdef FEATURE_WDS 40 #include "dp_txrx_wds.h" 41 #endif 42 #ifdef DP_RATETABLE_SUPPORT 43 #include "dp_ratetable.h" 44 #endif 45 46 #ifdef DUP_RX_DESC_WAR 47 void dp_rx_dump_info_and_assert(struct dp_soc *soc, 48 hal_ring_handle_t hal_ring, 49 hal_ring_desc_t ring_desc, 50 struct dp_rx_desc *rx_desc) 51 { 52 void *hal_soc = soc->hal_soc; 53 54 hal_srng_dump_ring_desc(hal_soc, hal_ring, ring_desc); 55 dp_rx_desc_dump(rx_desc); 56 } 57 #else 58 void dp_rx_dump_info_and_assert(struct dp_soc *soc, 59 hal_ring_handle_t hal_ring_hdl, 60 hal_ring_desc_t ring_desc, 61 struct dp_rx_desc *rx_desc) 62 { 63 hal_soc_handle_t hal_soc = soc->hal_soc; 64 65 dp_rx_desc_dump(rx_desc); 66 hal_srng_dump_ring_desc(hal_soc, hal_ring_hdl, ring_desc); 67 hal_srng_dump_ring(hal_soc, hal_ring_hdl); 68 qdf_assert_always(0); 69 } 70 #endif 71 72 #ifndef QCA_HOST_MODE_WIFI_DISABLED 73 #ifdef RX_DESC_SANITY_WAR 74 QDF_STATUS dp_rx_desc_sanity(struct dp_soc *soc, hal_soc_handle_t hal_soc, 75 hal_ring_handle_t hal_ring_hdl, 76 hal_ring_desc_t ring_desc, 77 struct dp_rx_desc *rx_desc) 78 { 79 uint8_t return_buffer_manager; 80 81 if (qdf_unlikely(!rx_desc)) { 82 /* 83 * This is an unlikely case where the cookie obtained 84 * from the ring_desc is invalid and hence we are not 85 * able to find the corresponding rx_desc 86 */ 87 goto fail; 88 } 89 90 return_buffer_manager = hal_rx_ret_buf_manager_get(hal_soc, ring_desc); 91 if (qdf_unlikely(!(return_buffer_manager == 92 HAL_RX_BUF_RBM_SW1_BM(soc->wbm_sw0_bm_id) || 93 return_buffer_manager == 94 HAL_RX_BUF_RBM_SW3_BM(soc->wbm_sw0_bm_id)))) { 95 goto fail; 96 } 97 98 return QDF_STATUS_SUCCESS; 99 100 fail: 101 DP_STATS_INC(soc, rx.err.invalid_cookie, 1); 102 dp_err("Ring Desc:"); 103 hal_srng_dump_ring_desc(hal_soc, hal_ring_hdl, 104 ring_desc); 105 return QDF_STATUS_E_NULL_VALUE; 106 107 } 108 #endif 109 #endif /* QCA_HOST_MODE_WIFI_DISABLED */ 110 111 /** 112 * dp_pdev_frag_alloc_and_map() - Allocate frag for desc buffer and map 113 * 114 * @dp_soc: struct dp_soc * 115 * @nbuf_frag_info_t: nbuf frag info 116 * @dp_pdev: struct dp_pdev * 117 * @rx_desc_pool: Rx desc pool 118 * 119 * Return: QDF_STATUS 120 */ 121 #ifdef DP_RX_MON_MEM_FRAG 122 static inline QDF_STATUS 123 dp_pdev_frag_alloc_and_map(struct dp_soc *dp_soc, 124 struct dp_rx_nbuf_frag_info *nbuf_frag_info_t, 125 struct dp_pdev *dp_pdev, 126 struct rx_desc_pool *rx_desc_pool) 127 { 128 QDF_STATUS ret = QDF_STATUS_E_FAILURE; 129 130 (nbuf_frag_info_t->virt_addr).vaddr = 131 qdf_frag_alloc(NULL, rx_desc_pool->buf_size); 132 133 if (!((nbuf_frag_info_t->virt_addr).vaddr)) { 134 dp_err("Frag alloc failed"); 135 DP_STATS_INC(dp_pdev, replenish.frag_alloc_fail, 1); 136 return QDF_STATUS_E_NOMEM; 137 } 138 139 ret = qdf_mem_map_page(dp_soc->osdev, 140 (nbuf_frag_info_t->virt_addr).vaddr, 141 QDF_DMA_FROM_DEVICE, 142 rx_desc_pool->buf_size, 143 &nbuf_frag_info_t->paddr); 144 145 if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) { 146 qdf_frag_free((nbuf_frag_info_t->virt_addr).vaddr); 147 dp_err("Frag map failed"); 148 DP_STATS_INC(dp_pdev, replenish.map_err, 1); 149 return QDF_STATUS_E_FAULT; 150 } 151 152 return QDF_STATUS_SUCCESS; 153 } 154 #else 155 static inline QDF_STATUS 156 dp_pdev_frag_alloc_and_map(struct dp_soc *dp_soc, 157 struct dp_rx_nbuf_frag_info *nbuf_frag_info_t, 158 struct dp_pdev *dp_pdev, 159 struct rx_desc_pool *rx_desc_pool) 160 { 161 return QDF_STATUS_SUCCESS; 162 } 163 #endif /* DP_RX_MON_MEM_FRAG */ 164 165 #ifdef WLAN_FEATURE_DP_RX_RING_HISTORY 166 /** 167 * dp_rx_refill_ring_record_entry() - Record an entry into refill_ring history 168 * @soc: Datapath soc structure 169 * @ring_num: Refill ring number 170 * @num_req: number of buffers requested for refill 171 * @num_refill: number of buffers refilled 172 * 173 * Returns: None 174 */ 175 static inline void 176 dp_rx_refill_ring_record_entry(struct dp_soc *soc, uint8_t ring_num, 177 hal_ring_handle_t hal_ring_hdl, 178 uint32_t num_req, uint32_t num_refill) 179 { 180 struct dp_refill_info_record *record; 181 uint32_t idx; 182 uint32_t tp; 183 uint32_t hp; 184 185 if (qdf_unlikely(ring_num >= MAX_PDEV_CNT || 186 !soc->rx_refill_ring_history[ring_num])) 187 return; 188 189 idx = dp_history_get_next_index(&soc->rx_refill_ring_history[ring_num]->index, 190 DP_RX_REFILL_HIST_MAX); 191 192 /* No NULL check needed for record since its an array */ 193 record = &soc->rx_refill_ring_history[ring_num]->entry[idx]; 194 195 hal_get_sw_hptp(soc->hal_soc, hal_ring_hdl, &tp, &hp); 196 record->timestamp = qdf_get_log_timestamp(); 197 record->num_req = num_req; 198 record->num_refill = num_refill; 199 record->hp = hp; 200 record->tp = tp; 201 } 202 #else 203 static inline void 204 dp_rx_refill_ring_record_entry(struct dp_soc *soc, uint8_t ring_num, 205 hal_ring_handle_t hal_ring_hdl, 206 uint32_t num_req, uint32_t num_refill) 207 { 208 } 209 #endif 210 211 /** 212 * dp_pdev_nbuf_alloc_and_map() - Allocate nbuf for desc buffer and map 213 * 214 * @dp_soc: struct dp_soc * 215 * @mac_id: Mac id 216 * @num_entries_avail: num_entries_avail 217 * @nbuf_frag_info_t: nbuf frag info 218 * @dp_pdev: struct dp_pdev * 219 * @rx_desc_pool: Rx desc pool 220 * 221 * Return: QDF_STATUS 222 */ 223 static inline QDF_STATUS 224 dp_pdev_nbuf_alloc_and_map_replenish(struct dp_soc *dp_soc, 225 uint32_t mac_id, 226 uint32_t num_entries_avail, 227 struct dp_rx_nbuf_frag_info *nbuf_frag_info_t, 228 struct dp_pdev *dp_pdev, 229 struct rx_desc_pool *rx_desc_pool) 230 { 231 QDF_STATUS ret = QDF_STATUS_E_FAILURE; 232 233 (nbuf_frag_info_t->virt_addr).nbuf = 234 dp_rx_buffer_pool_nbuf_alloc(dp_soc, 235 mac_id, 236 rx_desc_pool, 237 num_entries_avail); 238 if (!((nbuf_frag_info_t->virt_addr).nbuf)) { 239 dp_err("nbuf alloc failed"); 240 DP_STATS_INC(dp_pdev, replenish.nbuf_alloc_fail, 1); 241 return QDF_STATUS_E_NOMEM; 242 } 243 244 ret = dp_rx_buffer_pool_nbuf_map(dp_soc, rx_desc_pool, 245 nbuf_frag_info_t); 246 if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) { 247 dp_rx_buffer_pool_nbuf_free(dp_soc, 248 (nbuf_frag_info_t->virt_addr).nbuf, mac_id); 249 dp_err("nbuf map failed"); 250 DP_STATS_INC(dp_pdev, replenish.map_err, 1); 251 return QDF_STATUS_E_FAULT; 252 } 253 254 nbuf_frag_info_t->paddr = 255 qdf_nbuf_get_frag_paddr((nbuf_frag_info_t->virt_addr).nbuf, 0); 256 dp_ipa_handle_rx_buf_smmu_mapping(dp_soc, (qdf_nbuf_t)( 257 (nbuf_frag_info_t->virt_addr).nbuf), 258 rx_desc_pool->buf_size, 259 true, __func__, __LINE__); 260 261 ret = dp_check_paddr(dp_soc, &((nbuf_frag_info_t->virt_addr).nbuf), 262 &nbuf_frag_info_t->paddr, 263 rx_desc_pool); 264 if (ret == QDF_STATUS_E_FAILURE) { 265 DP_STATS_INC(dp_pdev, replenish.x86_fail, 1); 266 return QDF_STATUS_E_ADDRNOTAVAIL; 267 } 268 269 return QDF_STATUS_SUCCESS; 270 } 271 272 #if defined(QCA_DP_RX_NBUF_NO_MAP_UNMAP) && !defined(BUILD_X86) 273 QDF_STATUS 274 __dp_rx_buffers_no_map_lt_replenish(struct dp_soc *soc, uint32_t mac_id, 275 struct dp_srng *dp_rxdma_srng, 276 struct rx_desc_pool *rx_desc_pool) 277 { 278 struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id); 279 uint32_t count; 280 void *rxdma_ring_entry; 281 union dp_rx_desc_list_elem_t *next = NULL; 282 void *rxdma_srng; 283 qdf_nbuf_t nbuf; 284 qdf_dma_addr_t paddr; 285 uint16_t num_entries_avail = 0; 286 uint16_t num_alloc_desc = 0; 287 union dp_rx_desc_list_elem_t *desc_list = NULL; 288 union dp_rx_desc_list_elem_t *tail = NULL; 289 int sync_hw_ptr = 0; 290 291 rxdma_srng = dp_rxdma_srng->hal_srng; 292 293 if (qdf_unlikely(!dp_pdev)) { 294 dp_rx_err("%pK: pdev is null for mac_id = %d", soc, mac_id); 295 return QDF_STATUS_E_FAILURE; 296 } 297 298 if (qdf_unlikely(!rxdma_srng)) { 299 dp_rx_debug("%pK: rxdma srng not initialized", soc); 300 return QDF_STATUS_E_FAILURE; 301 } 302 303 hal_srng_access_start(soc->hal_soc, rxdma_srng); 304 305 num_entries_avail = hal_srng_src_num_avail(soc->hal_soc, 306 rxdma_srng, 307 sync_hw_ptr); 308 309 dp_rx_debug("%pK: no of available entries in rxdma ring: %d", 310 soc, num_entries_avail); 311 312 if (qdf_unlikely(num_entries_avail < 313 ((dp_rxdma_srng->num_entries * 3) / 4))) { 314 hal_srng_access_end(soc->hal_soc, rxdma_srng); 315 return QDF_STATUS_E_FAILURE; 316 } 317 318 DP_STATS_INC(dp_pdev, replenish.low_thresh_intrs, 1); 319 num_alloc_desc = dp_rx_get_free_desc_list(soc, mac_id, 320 rx_desc_pool, 321 num_entries_avail, 322 &desc_list, 323 &tail); 324 325 if (!num_alloc_desc) { 326 dp_rx_err("%pK: no free rx_descs in freelist", soc); 327 DP_STATS_INC(dp_pdev, err.desc_lt_alloc_fail, 328 num_entries_avail); 329 hal_srng_access_end(soc->hal_soc, rxdma_srng); 330 return QDF_STATUS_E_NOMEM; 331 } 332 333 for (count = 0; count < num_alloc_desc; count++) { 334 next = desc_list->next; 335 qdf_prefetch(next); 336 nbuf = dp_rx_nbuf_alloc(soc, rx_desc_pool); 337 if (qdf_unlikely(!nbuf)) { 338 DP_STATS_INC(dp_pdev, replenish.nbuf_alloc_fail, 1); 339 break; 340 } 341 342 paddr = dp_rx_nbuf_sync_no_dsb(soc, nbuf, 343 rx_desc_pool->buf_size); 344 345 rxdma_ring_entry = hal_srng_src_get_next(soc->hal_soc, 346 rxdma_srng); 347 qdf_assert_always(rxdma_ring_entry); 348 349 desc_list->rx_desc.nbuf = nbuf; 350 desc_list->rx_desc.rx_buf_start = nbuf->data; 351 desc_list->rx_desc.unmapped = 0; 352 353 /* rx_desc.in_use should be zero at this time*/ 354 qdf_assert_always(desc_list->rx_desc.in_use == 0); 355 356 desc_list->rx_desc.in_use = 1; 357 desc_list->rx_desc.in_err_state = 0; 358 359 hal_rxdma_buff_addr_info_set(soc->hal_soc, rxdma_ring_entry, 360 paddr, 361 desc_list->rx_desc.cookie, 362 rx_desc_pool->owner); 363 364 desc_list = next; 365 } 366 qdf_dsb(); 367 hal_srng_access_end(soc->hal_soc, rxdma_srng); 368 369 /* No need to count the number of bytes received during replenish. 370 * Therefore set replenish.pkts.bytes as 0. 371 */ 372 DP_STATS_INC_PKT(dp_pdev, replenish.pkts, count, 0); 373 DP_STATS_INC(dp_pdev, buf_freelist, (num_alloc_desc - count)); 374 /* 375 * add any available free desc back to the free list 376 */ 377 if (desc_list) 378 dp_rx_add_desc_list_to_free_list(soc, &desc_list, &tail, 379 mac_id, rx_desc_pool); 380 381 return QDF_STATUS_SUCCESS; 382 } 383 384 QDF_STATUS 385 __dp_rx_buffers_no_map_replenish(struct dp_soc *soc, uint32_t mac_id, 386 struct dp_srng *dp_rxdma_srng, 387 struct rx_desc_pool *rx_desc_pool, 388 uint32_t num_req_buffers, 389 union dp_rx_desc_list_elem_t **desc_list, 390 union dp_rx_desc_list_elem_t **tail) 391 { 392 struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id); 393 uint32_t count; 394 void *rxdma_ring_entry; 395 union dp_rx_desc_list_elem_t *next; 396 void *rxdma_srng; 397 qdf_nbuf_t nbuf; 398 qdf_nbuf_t nbuf_next; 399 qdf_nbuf_t nbuf_head = NULL; 400 qdf_nbuf_t nbuf_tail = NULL; 401 qdf_dma_addr_t paddr; 402 403 rxdma_srng = dp_rxdma_srng->hal_srng; 404 405 if (qdf_unlikely(!dp_pdev)) { 406 dp_rx_err("%pK: pdev is null for mac_id = %d", 407 soc, mac_id); 408 return QDF_STATUS_E_FAILURE; 409 } 410 411 if (qdf_unlikely(!rxdma_srng)) { 412 dp_rx_debug("%pK: rxdma srng not initialized", soc); 413 DP_STATS_INC(dp_pdev, replenish.rxdma_err, num_req_buffers); 414 return QDF_STATUS_E_FAILURE; 415 } 416 417 /* Allocate required number of nbufs */ 418 for (count = 0; count < num_req_buffers; count++) { 419 nbuf = dp_rx_nbuf_alloc(soc, rx_desc_pool); 420 if (qdf_unlikely(!nbuf)) { 421 DP_STATS_INC(dp_pdev, replenish.nbuf_alloc_fail, 1); 422 /* Update num_req_buffers to nbufs allocated count */ 423 num_req_buffers = count; 424 break; 425 } 426 427 paddr = dp_rx_nbuf_sync_no_dsb(soc, nbuf, 428 rx_desc_pool->buf_size); 429 430 QDF_NBUF_CB_PADDR(nbuf) = paddr; 431 DP_RX_LIST_APPEND(nbuf_head, 432 nbuf_tail, 433 nbuf); 434 } 435 qdf_dsb(); 436 437 nbuf = nbuf_head; 438 hal_srng_access_start(soc->hal_soc, rxdma_srng); 439 440 for (count = 0; count < num_req_buffers; count++) { 441 next = (*desc_list)->next; 442 nbuf_next = nbuf->next; 443 qdf_prefetch(next); 444 445 rxdma_ring_entry = (struct dp_buffer_addr_info *) 446 hal_srng_src_get_next(soc->hal_soc, rxdma_srng); 447 448 if (!rxdma_ring_entry) 449 break; 450 451 (*desc_list)->rx_desc.nbuf = nbuf; 452 (*desc_list)->rx_desc.rx_buf_start = nbuf->data; 453 (*desc_list)->rx_desc.unmapped = 0; 454 455 /* rx_desc.in_use should be zero at this time*/ 456 qdf_assert_always((*desc_list)->rx_desc.in_use == 0); 457 458 (*desc_list)->rx_desc.in_use = 1; 459 (*desc_list)->rx_desc.in_err_state = 0; 460 461 hal_rxdma_buff_addr_info_set(soc->hal_soc, rxdma_ring_entry, 462 QDF_NBUF_CB_PADDR(nbuf), 463 (*desc_list)->rx_desc.cookie, 464 rx_desc_pool->owner); 465 466 *desc_list = next; 467 nbuf = nbuf_next; 468 } 469 hal_srng_access_end(soc->hal_soc, rxdma_srng); 470 471 /* No need to count the number of bytes received during replenish. 472 * Therefore set replenish.pkts.bytes as 0. 473 */ 474 DP_STATS_INC_PKT(dp_pdev, replenish.pkts, count, 0); 475 DP_STATS_INC(dp_pdev, buf_freelist, (num_req_buffers - count)); 476 /* 477 * add any available free desc back to the free list 478 */ 479 if (*desc_list) 480 dp_rx_add_desc_list_to_free_list(soc, desc_list, tail, 481 mac_id, rx_desc_pool); 482 while (nbuf) { 483 nbuf_next = nbuf->next; 484 dp_rx_nbuf_unmap_pool(soc, rx_desc_pool, nbuf); 485 qdf_nbuf_free(nbuf); 486 nbuf = nbuf_next; 487 } 488 489 return QDF_STATUS_SUCCESS; 490 } 491 492 QDF_STATUS __dp_pdev_rx_buffers_no_map_attach(struct dp_soc *soc, 493 uint32_t mac_id, 494 struct dp_srng *dp_rxdma_srng, 495 struct rx_desc_pool *rx_desc_pool, 496 uint32_t num_req_buffers) 497 { 498 struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id); 499 uint32_t count; 500 uint32_t nr_descs = 0; 501 void *rxdma_ring_entry; 502 union dp_rx_desc_list_elem_t *next; 503 void *rxdma_srng; 504 qdf_nbuf_t nbuf; 505 qdf_dma_addr_t paddr; 506 union dp_rx_desc_list_elem_t *desc_list = NULL; 507 union dp_rx_desc_list_elem_t *tail = NULL; 508 509 rxdma_srng = dp_rxdma_srng->hal_srng; 510 511 if (qdf_unlikely(!dp_pdev)) { 512 dp_rx_err("%pK: pdev is null for mac_id = %d", 513 soc, mac_id); 514 return QDF_STATUS_E_FAILURE; 515 } 516 517 if (qdf_unlikely(!rxdma_srng)) { 518 dp_rx_debug("%pK: rxdma srng not initialized", soc); 519 DP_STATS_INC(dp_pdev, replenish.rxdma_err, num_req_buffers); 520 return QDF_STATUS_E_FAILURE; 521 } 522 523 dp_rx_debug("%pK: requested %d buffers for replenish", 524 soc, num_req_buffers); 525 526 nr_descs = dp_rx_get_free_desc_list(soc, mac_id, rx_desc_pool, 527 num_req_buffers, &desc_list, &tail); 528 if (!nr_descs) { 529 dp_err("no free rx_descs in freelist"); 530 DP_STATS_INC(dp_pdev, err.desc_alloc_fail, num_req_buffers); 531 return QDF_STATUS_E_NOMEM; 532 } 533 534 dp_debug("got %u RX descs for driver attach", nr_descs); 535 536 hal_srng_access_start(soc->hal_soc, rxdma_srng); 537 538 for (count = 0; count < nr_descs; count++) { 539 next = desc_list->next; 540 qdf_prefetch(next); 541 nbuf = dp_rx_nbuf_alloc(soc, rx_desc_pool); 542 if (qdf_unlikely(!nbuf)) { 543 DP_STATS_INC(dp_pdev, replenish.nbuf_alloc_fail, 1); 544 break; 545 } 546 547 paddr = dp_rx_nbuf_sync_no_dsb(soc, nbuf, 548 rx_desc_pool->buf_size); 549 rxdma_ring_entry = (struct dp_buffer_addr_info *) 550 hal_srng_src_get_next(soc->hal_soc, rxdma_srng); 551 if (!rxdma_ring_entry) 552 break; 553 554 qdf_assert_always(rxdma_ring_entry); 555 556 desc_list->rx_desc.nbuf = nbuf; 557 desc_list->rx_desc.rx_buf_start = nbuf->data; 558 desc_list->rx_desc.unmapped = 0; 559 560 /* rx_desc.in_use should be zero at this time*/ 561 qdf_assert_always(desc_list->rx_desc.in_use == 0); 562 563 desc_list->rx_desc.in_use = 1; 564 desc_list->rx_desc.in_err_state = 0; 565 566 hal_rxdma_buff_addr_info_set(soc->hal_soc, rxdma_ring_entry, 567 paddr, 568 desc_list->rx_desc.cookie, 569 rx_desc_pool->owner); 570 571 desc_list = next; 572 } 573 qdf_dsb(); 574 hal_srng_access_end(soc->hal_soc, rxdma_srng); 575 576 /* No need to count the number of bytes received during replenish. 577 * Therefore set replenish.pkts.bytes as 0. 578 */ 579 DP_STATS_INC_PKT(dp_pdev, replenish.pkts, count, 0); 580 581 return QDF_STATUS_SUCCESS; 582 } 583 #endif 584 585 #ifdef DP_UMAC_HW_RESET_SUPPORT 586 #if defined(QCA_DP_RX_NBUF_NO_MAP_UNMAP) && !defined(BUILD_X86) 587 static inline 588 qdf_dma_addr_t dp_rx_rep_retrieve_paddr(struct dp_soc *dp_soc, qdf_nbuf_t nbuf, 589 uint32_t buf_size) 590 { 591 return dp_rx_nbuf_sync_no_dsb(dp_soc, nbuf, buf_size); 592 } 593 #else 594 static inline 595 qdf_dma_addr_t dp_rx_rep_retrieve_paddr(struct dp_soc *dp_soc, qdf_nbuf_t nbuf, 596 uint32_t buf_size) 597 { 598 return qdf_nbuf_get_frag_paddr(nbuf, 0); 599 } 600 #endif 601 602 /* 603 * dp_rx_desc_replenish() - Replenish the rx descriptors one at a time 604 * 605 * @soc: core txrx main context 606 * @dp_rxdma_srng: rxdma ring 607 * @rx_desc_pool: rx descriptor pool 608 * @rx_desc:rx descriptor 609 * 610 * Return: void 611 */ 612 static inline 613 void dp_rx_desc_replenish(struct dp_soc *soc, struct dp_srng *dp_rxdma_srng, 614 struct rx_desc_pool *rx_desc_pool, 615 struct dp_rx_desc *rx_desc) 616 { 617 void *rxdma_srng; 618 void *rxdma_ring_entry; 619 qdf_dma_addr_t paddr; 620 621 rxdma_srng = dp_rxdma_srng->hal_srng; 622 623 /* No one else should be accessing the srng at this point */ 624 hal_srng_access_start_unlocked(soc->hal_soc, rxdma_srng); 625 626 rxdma_ring_entry = hal_srng_src_get_next(soc->hal_soc, rxdma_srng); 627 628 qdf_assert_always(rxdma_ring_entry); 629 rx_desc->in_err_state = 0; 630 631 paddr = dp_rx_rep_retrieve_paddr(soc, rx_desc->nbuf, 632 rx_desc_pool->buf_size); 633 hal_rxdma_buff_addr_info_set(soc->hal_soc, rxdma_ring_entry, paddr, 634 rx_desc->cookie, rx_desc_pool->owner); 635 636 hal_srng_access_end_unlocked(soc->hal_soc, rxdma_srng); 637 } 638 639 /* 640 * dp_rx_desc_reuse() - Reuse the rx descriptors to fill the rx buf ring 641 * 642 * @soc: core txrx main context 643 * @nbuf_list: nbuf list for delayed free 644 * 645 * Return: void 646 */ 647 void dp_rx_desc_reuse(struct dp_soc *soc, qdf_nbuf_t *nbuf_list) 648 { 649 int mac_id, i, j; 650 union dp_rx_desc_list_elem_t *head = NULL; 651 union dp_rx_desc_list_elem_t *tail = NULL; 652 653 for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) { 654 struct dp_srng *dp_rxdma_srng = 655 &soc->rx_refill_buf_ring[mac_id]; 656 struct rx_desc_pool *rx_desc_pool = &soc->rx_desc_buf[mac_id]; 657 uint32_t rx_sw_desc_num = rx_desc_pool->pool_size; 658 /* Only fill up 1/3 of the ring size */ 659 uint32_t num_req_decs; 660 661 if (!dp_rxdma_srng || !dp_rxdma_srng->hal_srng || 662 !rx_desc_pool->array) 663 continue; 664 665 num_req_decs = dp_rxdma_srng->num_entries / 3; 666 667 for (i = 0, j = 0; i < rx_sw_desc_num; i++) { 668 struct dp_rx_desc *rx_desc = 669 (struct dp_rx_desc *)&rx_desc_pool->array[i]; 670 671 if (rx_desc->in_use) { 672 if (j < dp_rxdma_srng->num_entries) { 673 dp_rx_desc_replenish(soc, dp_rxdma_srng, 674 rx_desc_pool, 675 rx_desc); 676 } else { 677 dp_rx_nbuf_unmap(soc, rx_desc, 0); 678 rx_desc->unmapped = 0; 679 680 rx_desc->nbuf->next = *nbuf_list; 681 *nbuf_list = rx_desc->nbuf; 682 683 dp_rx_add_to_free_desc_list(&head, 684 &tail, 685 rx_desc); 686 } 687 j++; 688 } 689 } 690 691 if (head) 692 dp_rx_add_desc_list_to_free_list(soc, &head, &tail, 693 mac_id, rx_desc_pool); 694 695 /* If num of descs in use were less, then we need to replenish 696 * the ring with some buffers 697 */ 698 head = NULL; 699 tail = NULL; 700 701 if (j < (num_req_decs - 1)) 702 dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng, 703 rx_desc_pool, 704 ((num_req_decs - 1) - j), 705 &head, &tail, true); 706 } 707 } 708 #endif 709 710 /* 711 * dp_rx_buffers_replenish() - replenish rxdma ring with rx nbufs 712 * called during dp rx initialization 713 * and at the end of dp_rx_process. 714 * 715 * @soc: core txrx main context 716 * @mac_id: mac_id which is one of 3 mac_ids 717 * @dp_rxdma_srng: dp rxdma circular ring 718 * @rx_desc_pool: Pointer to free Rx descriptor pool 719 * @num_req_buffers: number of buffer to be replenished 720 * @desc_list: list of descs if called from dp_rx_process 721 * or NULL during dp rx initialization or out of buffer 722 * interrupt. 723 * @tail: tail of descs list 724 * @req_only: If true don't replenish more than req buffers 725 * @func_name: name of the caller function 726 * Return: return success or failure 727 */ 728 QDF_STATUS __dp_rx_buffers_replenish(struct dp_soc *dp_soc, uint32_t mac_id, 729 struct dp_srng *dp_rxdma_srng, 730 struct rx_desc_pool *rx_desc_pool, 731 uint32_t num_req_buffers, 732 union dp_rx_desc_list_elem_t **desc_list, 733 union dp_rx_desc_list_elem_t **tail, 734 bool req_only, const char *func_name) 735 { 736 uint32_t num_alloc_desc; 737 uint16_t num_desc_to_free = 0; 738 struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(dp_soc, mac_id); 739 uint32_t num_entries_avail; 740 uint32_t count; 741 uint32_t extra_buffers; 742 int sync_hw_ptr = 1; 743 struct dp_rx_nbuf_frag_info nbuf_frag_info = {0}; 744 void *rxdma_ring_entry; 745 union dp_rx_desc_list_elem_t *next; 746 QDF_STATUS ret; 747 void *rxdma_srng; 748 union dp_rx_desc_list_elem_t *desc_list_append = NULL; 749 union dp_rx_desc_list_elem_t *tail_append = NULL; 750 union dp_rx_desc_list_elem_t *temp_list = NULL; 751 752 rxdma_srng = dp_rxdma_srng->hal_srng; 753 754 if (qdf_unlikely(!dp_pdev)) { 755 dp_rx_err("%pK: pdev is null for mac_id = %d", 756 dp_soc, mac_id); 757 return QDF_STATUS_E_FAILURE; 758 } 759 760 if (qdf_unlikely(!rxdma_srng)) { 761 dp_rx_debug("%pK: rxdma srng not initialized", dp_soc); 762 DP_STATS_INC(dp_pdev, replenish.rxdma_err, num_req_buffers); 763 return QDF_STATUS_E_FAILURE; 764 } 765 766 dp_verbose_debug("%pK: requested %d buffers for replenish", 767 dp_soc, num_req_buffers); 768 769 hal_srng_access_start(dp_soc->hal_soc, rxdma_srng); 770 771 num_entries_avail = hal_srng_src_num_avail(dp_soc->hal_soc, 772 rxdma_srng, 773 sync_hw_ptr); 774 775 dp_verbose_debug("%pK: no of available entries in rxdma ring: %d", 776 dp_soc, num_entries_avail); 777 778 if (!req_only && !(*desc_list) && (num_entries_avail > 779 ((dp_rxdma_srng->num_entries * 3) / 4))) { 780 num_req_buffers = num_entries_avail; 781 DP_STATS_INC(dp_pdev, replenish.low_thresh_intrs, 1); 782 } else if (num_entries_avail < num_req_buffers) { 783 num_desc_to_free = num_req_buffers - num_entries_avail; 784 num_req_buffers = num_entries_avail; 785 } else if ((*desc_list) && 786 dp_rxdma_srng->num_entries - num_entries_avail < 787 CRITICAL_BUFFER_THRESHOLD) { 788 /* set extra buffers to CRITICAL_BUFFER_THRESHOLD only if 789 * total buff requested after adding extra buffers is less 790 * than or equal to num entries available, else set it to max 791 * possible additional buffers available at that moment 792 */ 793 extra_buffers = 794 ((num_req_buffers + CRITICAL_BUFFER_THRESHOLD) > num_entries_avail) ? 795 (num_entries_avail - num_req_buffers) : 796 CRITICAL_BUFFER_THRESHOLD; 797 /* Append some free descriptors to tail */ 798 num_alloc_desc = 799 dp_rx_get_free_desc_list(dp_soc, mac_id, 800 rx_desc_pool, 801 extra_buffers, 802 &desc_list_append, 803 &tail_append); 804 805 if (num_alloc_desc) { 806 temp_list = *desc_list; 807 *desc_list = desc_list_append; 808 tail_append->next = temp_list; 809 num_req_buffers += num_alloc_desc; 810 811 DP_STATS_DEC(dp_pdev, 812 replenish.free_list, 813 num_alloc_desc); 814 } else 815 dp_err_rl("%pK: no free rx_descs in freelist", dp_soc); 816 } 817 818 if (qdf_unlikely(!num_req_buffers)) { 819 num_desc_to_free = num_req_buffers; 820 hal_srng_access_end(dp_soc->hal_soc, rxdma_srng); 821 goto free_descs; 822 } 823 824 /* 825 * if desc_list is NULL, allocate the descs from freelist 826 */ 827 if (!(*desc_list)) { 828 num_alloc_desc = dp_rx_get_free_desc_list(dp_soc, mac_id, 829 rx_desc_pool, 830 num_req_buffers, 831 desc_list, 832 tail); 833 834 if (!num_alloc_desc) { 835 dp_rx_err("%pK: no free rx_descs in freelist", dp_soc); 836 DP_STATS_INC(dp_pdev, err.desc_alloc_fail, 837 num_req_buffers); 838 hal_srng_access_end(dp_soc->hal_soc, rxdma_srng); 839 return QDF_STATUS_E_NOMEM; 840 } 841 842 dp_verbose_debug("%pK: %d rx desc allocated", dp_soc, 843 num_alloc_desc); 844 num_req_buffers = num_alloc_desc; 845 } 846 847 848 count = 0; 849 850 while (count < num_req_buffers) { 851 /* Flag is set while pdev rx_desc_pool initialization */ 852 if (qdf_unlikely(rx_desc_pool->rx_mon_dest_frag_enable)) 853 ret = dp_pdev_frag_alloc_and_map(dp_soc, 854 &nbuf_frag_info, 855 dp_pdev, 856 rx_desc_pool); 857 else 858 ret = dp_pdev_nbuf_alloc_and_map_replenish(dp_soc, 859 mac_id, 860 num_entries_avail, &nbuf_frag_info, 861 dp_pdev, rx_desc_pool); 862 863 if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) { 864 if (qdf_unlikely(ret == QDF_STATUS_E_FAULT)) 865 continue; 866 break; 867 } 868 869 count++; 870 871 rxdma_ring_entry = hal_srng_src_get_next(dp_soc->hal_soc, 872 rxdma_srng); 873 qdf_assert_always(rxdma_ring_entry); 874 875 next = (*desc_list)->next; 876 877 /* Flag is set while pdev rx_desc_pool initialization */ 878 if (qdf_unlikely(rx_desc_pool->rx_mon_dest_frag_enable)) 879 dp_rx_desc_frag_prep(&((*desc_list)->rx_desc), 880 &nbuf_frag_info); 881 else 882 dp_rx_desc_prep(&((*desc_list)->rx_desc), 883 &nbuf_frag_info); 884 885 /* rx_desc.in_use should be zero at this time*/ 886 qdf_assert_always((*desc_list)->rx_desc.in_use == 0); 887 888 (*desc_list)->rx_desc.in_use = 1; 889 (*desc_list)->rx_desc.in_err_state = 0; 890 dp_rx_desc_update_dbg_info(&(*desc_list)->rx_desc, 891 func_name, RX_DESC_REPLENISHED); 892 dp_verbose_debug("rx_netbuf=%pK, paddr=0x%llx, cookie=%d", 893 nbuf_frag_info.virt_addr.nbuf, 894 (unsigned long long)(nbuf_frag_info.paddr), 895 (*desc_list)->rx_desc.cookie); 896 897 hal_rxdma_buff_addr_info_set(dp_soc->hal_soc, rxdma_ring_entry, 898 nbuf_frag_info.paddr, 899 (*desc_list)->rx_desc.cookie, 900 rx_desc_pool->owner); 901 902 *desc_list = next; 903 904 } 905 906 dp_rx_refill_ring_record_entry(dp_soc, dp_pdev->lmac_id, rxdma_srng, 907 num_req_buffers, count); 908 909 hal_srng_access_end(dp_soc->hal_soc, rxdma_srng); 910 911 dp_rx_schedule_refill_thread(dp_soc); 912 913 dp_verbose_debug("replenished buffers %d, rx desc added back to free list %u", 914 count, num_desc_to_free); 915 916 /* No need to count the number of bytes received during replenish. 917 * Therefore set replenish.pkts.bytes as 0. 918 */ 919 DP_STATS_INC_PKT(dp_pdev, replenish.pkts, count, 0); 920 DP_STATS_INC(dp_pdev, replenish.free_list, num_req_buffers - count); 921 922 free_descs: 923 DP_STATS_INC(dp_pdev, buf_freelist, num_desc_to_free); 924 /* 925 * add any available free desc back to the free list 926 */ 927 if (*desc_list) 928 dp_rx_add_desc_list_to_free_list(dp_soc, desc_list, tail, 929 mac_id, rx_desc_pool); 930 931 return QDF_STATUS_SUCCESS; 932 } 933 934 qdf_export_symbol(__dp_rx_buffers_replenish); 935 936 /* 937 * dp_rx_deliver_raw() - process RAW mode pkts and hand over the 938 * pkts to RAW mode simulation to 939 * decapsulate the pkt. 940 * 941 * @vdev: vdev on which RAW mode is enabled 942 * @nbuf_list: list of RAW pkts to process 943 * @txrx_peer: peer object from which the pkt is rx 944 * 945 * Return: void 946 */ 947 void 948 dp_rx_deliver_raw(struct dp_vdev *vdev, qdf_nbuf_t nbuf_list, 949 struct dp_txrx_peer *txrx_peer) 950 { 951 qdf_nbuf_t deliver_list_head = NULL; 952 qdf_nbuf_t deliver_list_tail = NULL; 953 qdf_nbuf_t nbuf; 954 955 nbuf = nbuf_list; 956 while (nbuf) { 957 qdf_nbuf_t next = qdf_nbuf_next(nbuf); 958 959 DP_RX_LIST_APPEND(deliver_list_head, deliver_list_tail, nbuf); 960 961 DP_STATS_INC(vdev->pdev, rx_raw_pkts, 1); 962 DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, rx.raw, 1, 963 qdf_nbuf_len(nbuf)); 964 /* 965 * reset the chfrag_start and chfrag_end bits in nbuf cb 966 * as this is a non-amsdu pkt and RAW mode simulation expects 967 * these bit s to be 0 for non-amsdu pkt. 968 */ 969 if (qdf_nbuf_is_rx_chfrag_start(nbuf) && 970 qdf_nbuf_is_rx_chfrag_end(nbuf)) { 971 qdf_nbuf_set_rx_chfrag_start(nbuf, 0); 972 qdf_nbuf_set_rx_chfrag_end(nbuf, 0); 973 } 974 975 nbuf = next; 976 } 977 978 vdev->osif_rsim_rx_decap(vdev->osif_vdev, &deliver_list_head, 979 &deliver_list_tail); 980 981 vdev->osif_rx(vdev->osif_vdev, deliver_list_head); 982 } 983 984 #ifndef QCA_HOST_MODE_WIFI_DISABLED 985 #ifndef FEATURE_WDS 986 void dp_rx_da_learn(struct dp_soc *soc, uint8_t *rx_tlv_hdr, 987 struct dp_txrx_peer *ta_peer, qdf_nbuf_t nbuf) 988 { 989 } 990 #endif 991 992 #ifdef QCA_SUPPORT_TX_MIN_RATES_FOR_SPECIAL_FRAMES 993 /* 994 * dp_classify_critical_pkts() - API for marking critical packets 995 * @soc: dp_soc context 996 * @vdev: vdev on which packet is to be sent 997 * @nbuf: nbuf that has to be classified 998 * 999 * The function parses the packet, identifies whether its a critical frame and 1000 * marks QDF_NBUF_CB_TX_EXTRA_IS_CRITICAL bit in qdf_nbuf_cb for the nbuf. 1001 * Code for marking which frames are CRITICAL is accessed via callback. 1002 * EAPOL, ARP, DHCP, DHCPv6, ICMPv6 NS/NA are the typical critical frames. 1003 * 1004 * Return: None 1005 */ 1006 static 1007 void dp_classify_critical_pkts(struct dp_soc *soc, struct dp_vdev *vdev, 1008 qdf_nbuf_t nbuf) 1009 { 1010 if (vdev->tx_classify_critical_pkt_cb) 1011 vdev->tx_classify_critical_pkt_cb(vdev->osif_vdev, nbuf); 1012 } 1013 #else 1014 static inline 1015 void dp_classify_critical_pkts(struct dp_soc *soc, struct dp_vdev *vdev, 1016 qdf_nbuf_t nbuf) 1017 { 1018 } 1019 #endif 1020 1021 #ifdef QCA_OL_TX_MULTIQ_SUPPORT 1022 static inline 1023 void dp_rx_nbuf_queue_mapping_set(qdf_nbuf_t nbuf, uint8_t ring_id) 1024 { 1025 qdf_nbuf_set_queue_mapping(nbuf, ring_id); 1026 } 1027 #else 1028 static inline 1029 void dp_rx_nbuf_queue_mapping_set(qdf_nbuf_t nbuf, uint8_t ring_id) 1030 { 1031 } 1032 #endif 1033 1034 /* 1035 * dp_rx_intrabss_mcbc_fwd() - Does intrabss forward for mcast packets 1036 * 1037 * @soc: core txrx main context 1038 * @ta_peer : source peer entry 1039 * @rx_tlv_hdr : start address of rx tlvs 1040 * @nbuf : nbuf that has to be intrabss forwarded 1041 * @tid_stats : tid stats pointer 1042 * 1043 * Return: bool: true if it is forwarded else false 1044 */ 1045 bool dp_rx_intrabss_mcbc_fwd(struct dp_soc *soc, struct dp_txrx_peer *ta_peer, 1046 uint8_t *rx_tlv_hdr, qdf_nbuf_t nbuf, 1047 struct cdp_tid_rx_stats *tid_stats) 1048 { 1049 uint16_t len; 1050 qdf_nbuf_t nbuf_copy; 1051 1052 if (dp_rx_intrabss_eapol_drop_check(soc, ta_peer, rx_tlv_hdr, 1053 nbuf)) 1054 return true; 1055 1056 if (!dp_rx_check_ndi_mdns_fwding(ta_peer, nbuf)) 1057 return false; 1058 1059 /* If the source peer in the isolation list 1060 * then dont forward instead push to bridge stack 1061 */ 1062 if (dp_get_peer_isolation(ta_peer)) 1063 return false; 1064 1065 nbuf_copy = qdf_nbuf_copy(nbuf); 1066 if (!nbuf_copy) 1067 return false; 1068 1069 len = QDF_NBUF_CB_RX_PKT_LEN(nbuf); 1070 1071 qdf_mem_set(nbuf_copy->cb, 0x0, sizeof(nbuf_copy->cb)); 1072 dp_classify_critical_pkts(soc, ta_peer->vdev, nbuf_copy); 1073 1074 if (soc->arch_ops.dp_rx_intrabss_handle_nawds(soc, ta_peer, nbuf_copy, 1075 tid_stats)) 1076 return false; 1077 1078 if (dp_tx_send((struct cdp_soc_t *)soc, 1079 ta_peer->vdev->vdev_id, nbuf_copy)) { 1080 DP_PEER_PER_PKT_STATS_INC_PKT(ta_peer, rx.intra_bss.fail, 1, 1081 len); 1082 tid_stats->fail_cnt[INTRABSS_DROP]++; 1083 dp_rx_nbuf_free(nbuf_copy); 1084 } else { 1085 DP_PEER_PER_PKT_STATS_INC_PKT(ta_peer, rx.intra_bss.pkts, 1, 1086 len); 1087 tid_stats->intrabss_cnt++; 1088 } 1089 return false; 1090 } 1091 1092 /* 1093 * dp_rx_intrabss_ucast_fwd() - Does intrabss forward for unicast packets 1094 * 1095 * @soc: core txrx main context 1096 * @ta_peer: source peer entry 1097 * @tx_vdev_id: VDEV ID for Intra-BSS TX 1098 * @rx_tlv_hdr: start address of rx tlvs 1099 * @nbuf: nbuf that has to be intrabss forwarded 1100 * @tid_stats: tid stats pointer 1101 * 1102 * Return: bool: true if it is forwarded else false 1103 */ 1104 bool dp_rx_intrabss_ucast_fwd(struct dp_soc *soc, struct dp_txrx_peer *ta_peer, 1105 uint8_t tx_vdev_id, 1106 uint8_t *rx_tlv_hdr, qdf_nbuf_t nbuf, 1107 struct cdp_tid_rx_stats *tid_stats) 1108 { 1109 uint16_t len; 1110 1111 len = QDF_NBUF_CB_RX_PKT_LEN(nbuf); 1112 1113 /* linearize the nbuf just before we send to 1114 * dp_tx_send() 1115 */ 1116 if (qdf_unlikely(qdf_nbuf_is_frag(nbuf))) { 1117 if (qdf_nbuf_linearize(nbuf) == -ENOMEM) 1118 return false; 1119 1120 nbuf = qdf_nbuf_unshare(nbuf); 1121 if (!nbuf) { 1122 DP_PEER_PER_PKT_STATS_INC_PKT(ta_peer, 1123 rx.intra_bss.fail, 1124 1, len); 1125 /* return true even though the pkt is 1126 * not forwarded. Basically skb_unshare 1127 * failed and we want to continue with 1128 * next nbuf. 1129 */ 1130 tid_stats->fail_cnt[INTRABSS_DROP]++; 1131 return false; 1132 } 1133 } 1134 1135 qdf_mem_set(nbuf->cb, 0x0, sizeof(nbuf->cb)); 1136 dp_classify_critical_pkts(soc, ta_peer->vdev, nbuf); 1137 1138 if (!dp_tx_send((struct cdp_soc_t *)soc, 1139 tx_vdev_id, nbuf)) { 1140 DP_PEER_PER_PKT_STATS_INC_PKT(ta_peer, rx.intra_bss.pkts, 1, 1141 len); 1142 } else { 1143 DP_PEER_PER_PKT_STATS_INC_PKT(ta_peer, rx.intra_bss.fail, 1, 1144 len); 1145 tid_stats->fail_cnt[INTRABSS_DROP]++; 1146 return false; 1147 } 1148 1149 return true; 1150 } 1151 1152 #endif /* QCA_HOST_MODE_WIFI_DISABLED */ 1153 1154 #ifdef MESH_MODE_SUPPORT 1155 1156 /** 1157 * dp_rx_fill_mesh_stats() - Fills the mesh per packet receive stats 1158 * 1159 * @vdev: DP Virtual device handle 1160 * @nbuf: Buffer pointer 1161 * @rx_tlv_hdr: start of rx tlv header 1162 * @txrx_peer: pointer to peer 1163 * 1164 * This function allocated memory for mesh receive stats and fill the 1165 * required stats. Stores the memory address in skb cb. 1166 * 1167 * Return: void 1168 */ 1169 1170 void dp_rx_fill_mesh_stats(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 1171 uint8_t *rx_tlv_hdr, 1172 struct dp_txrx_peer *txrx_peer) 1173 { 1174 struct mesh_recv_hdr_s *rx_info = NULL; 1175 uint32_t pkt_type; 1176 uint32_t nss; 1177 uint32_t rate_mcs; 1178 uint32_t bw; 1179 uint8_t primary_chan_num; 1180 uint32_t center_chan_freq; 1181 struct dp_soc *soc = vdev->pdev->soc; 1182 struct dp_peer *peer; 1183 struct dp_peer *primary_link_peer; 1184 struct dp_soc *link_peer_soc; 1185 cdp_peer_stats_param_t buf = {0}; 1186 1187 /* fill recv mesh stats */ 1188 rx_info = qdf_mem_malloc(sizeof(struct mesh_recv_hdr_s)); 1189 1190 /* upper layers are responsible to free this memory */ 1191 1192 if (!rx_info) { 1193 dp_rx_err("%pK: Memory allocation failed for mesh rx stats", 1194 vdev->pdev->soc); 1195 DP_STATS_INC(vdev->pdev, mesh_mem_alloc, 1); 1196 return; 1197 } 1198 1199 rx_info->rs_flags = MESH_RXHDR_VER1; 1200 if (qdf_nbuf_is_rx_chfrag_start(nbuf)) 1201 rx_info->rs_flags |= MESH_RX_FIRST_MSDU; 1202 1203 if (qdf_nbuf_is_rx_chfrag_end(nbuf)) 1204 rx_info->rs_flags |= MESH_RX_LAST_MSDU; 1205 1206 peer = dp_peer_get_ref_by_id(soc, txrx_peer->peer_id, DP_MOD_ID_MESH); 1207 if (peer) { 1208 if (hal_rx_tlv_get_is_decrypted(soc->hal_soc, rx_tlv_hdr)) { 1209 rx_info->rs_flags |= MESH_RX_DECRYPTED; 1210 rx_info->rs_keyix = hal_rx_msdu_get_keyid(soc->hal_soc, 1211 rx_tlv_hdr); 1212 if (vdev->osif_get_key) 1213 vdev->osif_get_key(vdev->osif_vdev, 1214 &rx_info->rs_decryptkey[0], 1215 &peer->mac_addr.raw[0], 1216 rx_info->rs_keyix); 1217 } 1218 1219 dp_peer_unref_delete(peer, DP_MOD_ID_MESH); 1220 } 1221 1222 primary_link_peer = dp_get_primary_link_peer_by_id(soc, 1223 txrx_peer->peer_id, 1224 DP_MOD_ID_MESH); 1225 1226 if (qdf_likely(primary_link_peer)) { 1227 link_peer_soc = primary_link_peer->vdev->pdev->soc; 1228 dp_monitor_peer_get_stats_param(link_peer_soc, 1229 primary_link_peer, 1230 cdp_peer_rx_snr, &buf); 1231 rx_info->rs_snr = buf.rx_snr; 1232 dp_peer_unref_delete(primary_link_peer, DP_MOD_ID_MESH); 1233 } 1234 1235 rx_info->rs_rssi = rx_info->rs_snr + DP_DEFAULT_NOISEFLOOR; 1236 1237 soc = vdev->pdev->soc; 1238 primary_chan_num = hal_rx_tlv_get_freq(soc->hal_soc, rx_tlv_hdr); 1239 center_chan_freq = hal_rx_tlv_get_freq(soc->hal_soc, rx_tlv_hdr) >> 16; 1240 1241 if (soc->cdp_soc.ol_ops && soc->cdp_soc.ol_ops->freq_to_band) { 1242 rx_info->rs_band = soc->cdp_soc.ol_ops->freq_to_band( 1243 soc->ctrl_psoc, 1244 vdev->pdev->pdev_id, 1245 center_chan_freq); 1246 } 1247 rx_info->rs_channel = primary_chan_num; 1248 pkt_type = hal_rx_tlv_get_pkt_type(soc->hal_soc, rx_tlv_hdr); 1249 rate_mcs = hal_rx_tlv_rate_mcs_get(soc->hal_soc, rx_tlv_hdr); 1250 bw = hal_rx_tlv_bw_get(soc->hal_soc, rx_tlv_hdr); 1251 nss = hal_rx_msdu_start_nss_get(soc->hal_soc, rx_tlv_hdr); 1252 rx_info->rs_ratephy1 = rate_mcs | (nss << 0x8) | (pkt_type << 16) | 1253 (bw << 24); 1254 1255 qdf_nbuf_set_rx_fctx_type(nbuf, (void *)rx_info, CB_FTYPE_MESH_RX_INFO); 1256 1257 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_MED, 1258 FL("Mesh rx stats: flags %x, rssi %x, chn %x, rate %x, kix %x, snr %x"), 1259 rx_info->rs_flags, 1260 rx_info->rs_rssi, 1261 rx_info->rs_channel, 1262 rx_info->rs_ratephy1, 1263 rx_info->rs_keyix, 1264 rx_info->rs_snr); 1265 1266 } 1267 1268 /** 1269 * dp_rx_filter_mesh_packets() - Filters mesh unwanted packets 1270 * 1271 * @vdev: DP Virtual device handle 1272 * @nbuf: Buffer pointer 1273 * @rx_tlv_hdr: start of rx tlv header 1274 * 1275 * This checks if the received packet is matching any filter out 1276 * catogery and and drop the packet if it matches. 1277 * 1278 * Return: status(0 indicates drop, 1 indicate to no drop) 1279 */ 1280 1281 QDF_STATUS dp_rx_filter_mesh_packets(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 1282 uint8_t *rx_tlv_hdr) 1283 { 1284 union dp_align_mac_addr mac_addr; 1285 struct dp_soc *soc = vdev->pdev->soc; 1286 1287 if (qdf_unlikely(vdev->mesh_rx_filter)) { 1288 if (vdev->mesh_rx_filter & MESH_FILTER_OUT_FROMDS) 1289 if (hal_rx_mpdu_get_fr_ds(soc->hal_soc, 1290 rx_tlv_hdr)) 1291 return QDF_STATUS_SUCCESS; 1292 1293 if (vdev->mesh_rx_filter & MESH_FILTER_OUT_TODS) 1294 if (hal_rx_mpdu_get_to_ds(soc->hal_soc, 1295 rx_tlv_hdr)) 1296 return QDF_STATUS_SUCCESS; 1297 1298 if (vdev->mesh_rx_filter & MESH_FILTER_OUT_NODS) 1299 if (!hal_rx_mpdu_get_fr_ds(soc->hal_soc, 1300 rx_tlv_hdr) && 1301 !hal_rx_mpdu_get_to_ds(soc->hal_soc, 1302 rx_tlv_hdr)) 1303 return QDF_STATUS_SUCCESS; 1304 1305 if (vdev->mesh_rx_filter & MESH_FILTER_OUT_RA) { 1306 if (hal_rx_mpdu_get_addr1(soc->hal_soc, 1307 rx_tlv_hdr, 1308 &mac_addr.raw[0])) 1309 return QDF_STATUS_E_FAILURE; 1310 1311 if (!qdf_mem_cmp(&mac_addr.raw[0], 1312 &vdev->mac_addr.raw[0], 1313 QDF_MAC_ADDR_SIZE)) 1314 return QDF_STATUS_SUCCESS; 1315 } 1316 1317 if (vdev->mesh_rx_filter & MESH_FILTER_OUT_TA) { 1318 if (hal_rx_mpdu_get_addr2(soc->hal_soc, 1319 rx_tlv_hdr, 1320 &mac_addr.raw[0])) 1321 return QDF_STATUS_E_FAILURE; 1322 1323 if (!qdf_mem_cmp(&mac_addr.raw[0], 1324 &vdev->mac_addr.raw[0], 1325 QDF_MAC_ADDR_SIZE)) 1326 return QDF_STATUS_SUCCESS; 1327 } 1328 } 1329 1330 return QDF_STATUS_E_FAILURE; 1331 } 1332 1333 #else 1334 void dp_rx_fill_mesh_stats(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 1335 uint8_t *rx_tlv_hdr, struct dp_txrx_peer *peer) 1336 { 1337 } 1338 1339 QDF_STATUS dp_rx_filter_mesh_packets(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 1340 uint8_t *rx_tlv_hdr) 1341 { 1342 return QDF_STATUS_E_FAILURE; 1343 } 1344 1345 #endif 1346 1347 #ifdef FEATURE_NAC_RSSI 1348 /** 1349 * dp_rx_process_invalid_peer(): Function to pass invalid peer list to umac 1350 * @soc: DP SOC handle 1351 * @mpdu: mpdu for which peer is invalid 1352 * @mac_id: mac_id which is one of 3 mac_ids(Assuming mac_id and 1353 * pool_id has same mapping) 1354 * 1355 * return: integer type 1356 */ 1357 uint8_t dp_rx_process_invalid_peer(struct dp_soc *soc, qdf_nbuf_t mpdu, 1358 uint8_t mac_id) 1359 { 1360 struct dp_invalid_peer_msg msg; 1361 struct dp_vdev *vdev = NULL; 1362 struct dp_pdev *pdev = NULL; 1363 struct ieee80211_frame *wh; 1364 qdf_nbuf_t curr_nbuf, next_nbuf; 1365 uint8_t *rx_tlv_hdr = qdf_nbuf_data(mpdu); 1366 uint8_t *rx_pkt_hdr = NULL; 1367 int i = 0; 1368 1369 if (!HAL_IS_DECAP_FORMAT_RAW(soc->hal_soc, rx_tlv_hdr)) { 1370 dp_rx_debug("%pK: Drop decapped frames", soc); 1371 goto free; 1372 } 1373 1374 /* In RAW packet, packet header will be part of data */ 1375 rx_pkt_hdr = rx_tlv_hdr + soc->rx_pkt_tlv_size; 1376 wh = (struct ieee80211_frame *)rx_pkt_hdr; 1377 1378 if (!DP_FRAME_IS_DATA(wh)) { 1379 dp_rx_debug("%pK: NAWDS valid only for data frames", soc); 1380 goto free; 1381 } 1382 1383 if (qdf_nbuf_len(mpdu) < sizeof(struct ieee80211_frame)) { 1384 dp_rx_err("%pK: Invalid nbuf length", soc); 1385 goto free; 1386 } 1387 1388 /* In DMAC case the rx_desc_pools are common across PDEVs 1389 * so PDEV cannot be derived from the pool_id. 1390 * 1391 * link_id need to derived from the TLV tag word which is 1392 * disabled by default. For now adding a WAR to get vdev 1393 * with brute force this need to fixed with word based subscription 1394 * support is added by enabling TLV tag word 1395 */ 1396 if (soc->features.dmac_cmn_src_rxbuf_ring_enabled) { 1397 for (i = 0; i < MAX_PDEV_CNT; i++) { 1398 pdev = soc->pdev_list[i]; 1399 1400 if (!pdev || qdf_unlikely(pdev->is_pdev_down)) 1401 continue; 1402 1403 TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) { 1404 if (qdf_mem_cmp(wh->i_addr1, vdev->mac_addr.raw, 1405 QDF_MAC_ADDR_SIZE) == 0) { 1406 goto out; 1407 } 1408 } 1409 } 1410 } else { 1411 pdev = dp_get_pdev_for_lmac_id(soc, mac_id); 1412 1413 if (!pdev || qdf_unlikely(pdev->is_pdev_down)) { 1414 dp_rx_err("%pK: PDEV %s", 1415 soc, !pdev ? "not found" : "down"); 1416 goto free; 1417 } 1418 1419 if (dp_monitor_filter_neighbour_peer(pdev, rx_pkt_hdr) == 1420 QDF_STATUS_SUCCESS) 1421 return 0; 1422 1423 TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) { 1424 if (qdf_mem_cmp(wh->i_addr1, vdev->mac_addr.raw, 1425 QDF_MAC_ADDR_SIZE) == 0) { 1426 goto out; 1427 } 1428 } 1429 } 1430 1431 if (!vdev) { 1432 dp_rx_err("%pK: VDEV not found", soc); 1433 goto free; 1434 } 1435 out: 1436 msg.wh = wh; 1437 qdf_nbuf_pull_head(mpdu, soc->rx_pkt_tlv_size); 1438 msg.nbuf = mpdu; 1439 msg.vdev_id = vdev->vdev_id; 1440 1441 /* 1442 * NOTE: Only valid for HKv1. 1443 * If smart monitor mode is enabled on RE, we are getting invalid 1444 * peer frames with RA as STA mac of RE and the TA not matching 1445 * with any NAC list or the the BSSID.Such frames need to dropped 1446 * in order to avoid HM_WDS false addition. 1447 */ 1448 if (pdev->soc->cdp_soc.ol_ops->rx_invalid_peer) { 1449 if (dp_monitor_drop_inv_peer_pkts(vdev) == QDF_STATUS_SUCCESS) { 1450 dp_rx_warn("%pK: Drop inv peer pkts with STA RA:%pm", 1451 soc, wh->i_addr1); 1452 goto free; 1453 } 1454 pdev->soc->cdp_soc.ol_ops->rx_invalid_peer( 1455 (struct cdp_ctrl_objmgr_psoc *)soc->ctrl_psoc, 1456 pdev->pdev_id, &msg); 1457 } 1458 1459 free: 1460 /* Drop and free packet */ 1461 curr_nbuf = mpdu; 1462 while (curr_nbuf) { 1463 next_nbuf = qdf_nbuf_next(curr_nbuf); 1464 dp_rx_nbuf_free(curr_nbuf); 1465 curr_nbuf = next_nbuf; 1466 } 1467 1468 return 0; 1469 } 1470 1471 /** 1472 * dp_rx_process_invalid_peer_wrapper(): Function to wrap invalid peer handler 1473 * @soc: DP SOC handle 1474 * @mpdu: mpdu for which peer is invalid 1475 * @mpdu_done: if an mpdu is completed 1476 * @mac_id: mac_id which is one of 3 mac_ids(Assuming mac_id and 1477 * pool_id has same mapping) 1478 * 1479 * return: integer type 1480 */ 1481 void dp_rx_process_invalid_peer_wrapper(struct dp_soc *soc, 1482 qdf_nbuf_t mpdu, bool mpdu_done, 1483 uint8_t mac_id) 1484 { 1485 /* Only trigger the process when mpdu is completed */ 1486 if (mpdu_done) 1487 dp_rx_process_invalid_peer(soc, mpdu, mac_id); 1488 } 1489 #else 1490 uint8_t dp_rx_process_invalid_peer(struct dp_soc *soc, qdf_nbuf_t mpdu, 1491 uint8_t mac_id) 1492 { 1493 qdf_nbuf_t curr_nbuf, next_nbuf; 1494 struct dp_pdev *pdev; 1495 struct dp_vdev *vdev = NULL; 1496 struct ieee80211_frame *wh; 1497 uint8_t *rx_tlv_hdr = qdf_nbuf_data(mpdu); 1498 uint8_t *rx_pkt_hdr = hal_rx_pkt_hdr_get(soc->hal_soc, rx_tlv_hdr); 1499 1500 wh = (struct ieee80211_frame *)rx_pkt_hdr; 1501 1502 if (!DP_FRAME_IS_DATA(wh)) { 1503 QDF_TRACE_ERROR_RL(QDF_MODULE_ID_DP, 1504 "only for data frames"); 1505 goto free; 1506 } 1507 1508 if (qdf_nbuf_len(mpdu) < sizeof(struct ieee80211_frame)) { 1509 dp_rx_info_rl("%pK: Invalid nbuf length", soc); 1510 goto free; 1511 } 1512 1513 pdev = dp_get_pdev_for_lmac_id(soc, mac_id); 1514 if (!pdev) { 1515 dp_rx_info_rl("%pK: PDEV not found", soc); 1516 goto free; 1517 } 1518 1519 qdf_spin_lock_bh(&pdev->vdev_list_lock); 1520 DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) { 1521 if (qdf_mem_cmp(wh->i_addr1, vdev->mac_addr.raw, 1522 QDF_MAC_ADDR_SIZE) == 0) { 1523 qdf_spin_unlock_bh(&pdev->vdev_list_lock); 1524 goto out; 1525 } 1526 } 1527 qdf_spin_unlock_bh(&pdev->vdev_list_lock); 1528 1529 if (!vdev) { 1530 dp_rx_info_rl("%pK: VDEV not found", soc); 1531 goto free; 1532 } 1533 1534 out: 1535 if (soc->cdp_soc.ol_ops->rx_invalid_peer) 1536 soc->cdp_soc.ol_ops->rx_invalid_peer(vdev->vdev_id, wh); 1537 free: 1538 1539 /* Drop and free packet */ 1540 curr_nbuf = mpdu; 1541 while (curr_nbuf) { 1542 next_nbuf = qdf_nbuf_next(curr_nbuf); 1543 dp_rx_nbuf_free(curr_nbuf); 1544 curr_nbuf = next_nbuf; 1545 } 1546 1547 /* Reset the head and tail pointers */ 1548 pdev = dp_get_pdev_for_lmac_id(soc, mac_id); 1549 if (pdev) { 1550 pdev->invalid_peer_head_msdu = NULL; 1551 pdev->invalid_peer_tail_msdu = NULL; 1552 } 1553 1554 return 0; 1555 } 1556 1557 void dp_rx_process_invalid_peer_wrapper(struct dp_soc *soc, 1558 qdf_nbuf_t mpdu, bool mpdu_done, 1559 uint8_t mac_id) 1560 { 1561 /* Process the nbuf */ 1562 dp_rx_process_invalid_peer(soc, mpdu, mac_id); 1563 } 1564 #endif 1565 1566 #ifndef QCA_HOST_MODE_WIFI_DISABLED 1567 1568 #ifdef RECEIVE_OFFLOAD 1569 /** 1570 * dp_rx_print_offload_info() - Print offload info from RX TLV 1571 * @soc: dp soc handle 1572 * @msdu: MSDU for which the offload info is to be printed 1573 * 1574 * Return: None 1575 */ 1576 static void dp_rx_print_offload_info(struct dp_soc *soc, 1577 qdf_nbuf_t msdu) 1578 { 1579 dp_verbose_debug("----------------------RX DESC LRO/GRO----------------------"); 1580 dp_verbose_debug("lro_eligible 0x%x", 1581 QDF_NBUF_CB_RX_LRO_ELIGIBLE(msdu)); 1582 dp_verbose_debug("pure_ack 0x%x", QDF_NBUF_CB_RX_TCP_PURE_ACK(msdu)); 1583 dp_verbose_debug("chksum 0x%x", QDF_NBUF_CB_RX_TCP_CHKSUM(msdu)); 1584 dp_verbose_debug("TCP seq num 0x%x", QDF_NBUF_CB_RX_TCP_SEQ_NUM(msdu)); 1585 dp_verbose_debug("TCP ack num 0x%x", QDF_NBUF_CB_RX_TCP_ACK_NUM(msdu)); 1586 dp_verbose_debug("TCP window 0x%x", QDF_NBUF_CB_RX_TCP_WIN(msdu)); 1587 dp_verbose_debug("TCP protocol 0x%x", QDF_NBUF_CB_RX_TCP_PROTO(msdu)); 1588 dp_verbose_debug("TCP offset 0x%x", QDF_NBUF_CB_RX_TCP_OFFSET(msdu)); 1589 dp_verbose_debug("toeplitz 0x%x", QDF_NBUF_CB_RX_FLOW_ID(msdu)); 1590 dp_verbose_debug("---------------------------------------------------------"); 1591 } 1592 1593 /** 1594 * dp_rx_fill_gro_info() - Fill GRO info from RX TLV into skb->cb 1595 * @soc: DP SOC handle 1596 * @rx_tlv: RX TLV received for the msdu 1597 * @msdu: msdu for which GRO info needs to be filled 1598 * @rx_ol_pkt_cnt: counter to be incremented for GRO eligible packets 1599 * 1600 * Return: None 1601 */ 1602 void dp_rx_fill_gro_info(struct dp_soc *soc, uint8_t *rx_tlv, 1603 qdf_nbuf_t msdu, uint32_t *rx_ol_pkt_cnt) 1604 { 1605 struct hal_offload_info offload_info; 1606 1607 if (!wlan_cfg_is_gro_enabled(soc->wlan_cfg_ctx)) 1608 return; 1609 1610 if (hal_rx_tlv_get_offload_info(soc->hal_soc, rx_tlv, &offload_info)) 1611 return; 1612 1613 *rx_ol_pkt_cnt = *rx_ol_pkt_cnt + 1; 1614 1615 QDF_NBUF_CB_RX_LRO_ELIGIBLE(msdu) = offload_info.lro_eligible; 1616 QDF_NBUF_CB_RX_TCP_PURE_ACK(msdu) = offload_info.tcp_pure_ack; 1617 QDF_NBUF_CB_RX_TCP_CHKSUM(msdu) = 1618 hal_rx_tlv_get_tcp_chksum(soc->hal_soc, 1619 rx_tlv); 1620 QDF_NBUF_CB_RX_TCP_SEQ_NUM(msdu) = offload_info.tcp_seq_num; 1621 QDF_NBUF_CB_RX_TCP_ACK_NUM(msdu) = offload_info.tcp_ack_num; 1622 QDF_NBUF_CB_RX_TCP_WIN(msdu) = offload_info.tcp_win; 1623 QDF_NBUF_CB_RX_TCP_PROTO(msdu) = offload_info.tcp_proto; 1624 QDF_NBUF_CB_RX_IPV6_PROTO(msdu) = offload_info.ipv6_proto; 1625 QDF_NBUF_CB_RX_TCP_OFFSET(msdu) = offload_info.tcp_offset; 1626 QDF_NBUF_CB_RX_FLOW_ID(msdu) = offload_info.flow_id; 1627 1628 dp_rx_print_offload_info(soc, msdu); 1629 } 1630 #endif /* RECEIVE_OFFLOAD */ 1631 1632 /** 1633 * dp_rx_adjust_nbuf_len() - set appropriate msdu length in nbuf. 1634 * 1635 * @soc: DP soc handle 1636 * @nbuf: pointer to msdu. 1637 * @mpdu_len: mpdu length 1638 * @l3_pad_len: L3 padding length by HW 1639 * 1640 * Return: returns true if nbuf is last msdu of mpdu else returns false. 1641 */ 1642 static inline bool dp_rx_adjust_nbuf_len(struct dp_soc *soc, 1643 qdf_nbuf_t nbuf, 1644 uint16_t *mpdu_len, 1645 uint32_t l3_pad_len) 1646 { 1647 bool last_nbuf; 1648 uint32_t pkt_hdr_size; 1649 1650 pkt_hdr_size = soc->rx_pkt_tlv_size + l3_pad_len; 1651 1652 if ((*mpdu_len + pkt_hdr_size) > RX_DATA_BUFFER_SIZE) { 1653 qdf_nbuf_set_pktlen(nbuf, RX_DATA_BUFFER_SIZE); 1654 last_nbuf = false; 1655 *mpdu_len -= (RX_DATA_BUFFER_SIZE - pkt_hdr_size); 1656 } else { 1657 qdf_nbuf_set_pktlen(nbuf, (*mpdu_len + pkt_hdr_size)); 1658 last_nbuf = true; 1659 *mpdu_len = 0; 1660 } 1661 1662 return last_nbuf; 1663 } 1664 1665 /** 1666 * dp_get_l3_hdr_pad_len() - get L3 header padding length. 1667 * 1668 * @soc: DP soc handle 1669 * @nbuf: pointer to msdu. 1670 * 1671 * Return: returns padding length in bytes. 1672 */ 1673 static inline uint32_t dp_get_l3_hdr_pad_len(struct dp_soc *soc, 1674 qdf_nbuf_t nbuf) 1675 { 1676 uint32_t l3_hdr_pad = 0; 1677 uint8_t *rx_tlv_hdr; 1678 struct hal_rx_msdu_metadata msdu_metadata; 1679 1680 while (nbuf) { 1681 if (!qdf_nbuf_is_rx_chfrag_cont(nbuf)) { 1682 /* scattered msdu end with continuation is 0 */ 1683 rx_tlv_hdr = qdf_nbuf_data(nbuf); 1684 hal_rx_msdu_metadata_get(soc->hal_soc, 1685 rx_tlv_hdr, 1686 &msdu_metadata); 1687 l3_hdr_pad = msdu_metadata.l3_hdr_pad; 1688 break; 1689 } 1690 nbuf = nbuf->next; 1691 } 1692 1693 return l3_hdr_pad; 1694 } 1695 1696 /** 1697 * dp_rx_sg_create() - create a frag_list for MSDUs which are spread across 1698 * multiple nbufs. 1699 * @soc: DP SOC handle 1700 * @nbuf: pointer to the first msdu of an amsdu. 1701 * 1702 * This function implements the creation of RX frag_list for cases 1703 * where an MSDU is spread across multiple nbufs. 1704 * 1705 * Return: returns the head nbuf which contains complete frag_list. 1706 */ 1707 qdf_nbuf_t dp_rx_sg_create(struct dp_soc *soc, qdf_nbuf_t nbuf) 1708 { 1709 qdf_nbuf_t parent, frag_list, next = NULL; 1710 uint16_t frag_list_len = 0; 1711 uint16_t mpdu_len; 1712 bool last_nbuf; 1713 uint32_t l3_hdr_pad_offset = 0; 1714 1715 /* 1716 * Use msdu len got from REO entry descriptor instead since 1717 * there is case the RX PKT TLV is corrupted while msdu_len 1718 * from REO descriptor is right for non-raw RX scatter msdu. 1719 */ 1720 mpdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf); 1721 1722 /* 1723 * this is a case where the complete msdu fits in one single nbuf. 1724 * in this case HW sets both start and end bit and we only need to 1725 * reset these bits for RAW mode simulator to decap the pkt 1726 */ 1727 if (qdf_nbuf_is_rx_chfrag_start(nbuf) && 1728 qdf_nbuf_is_rx_chfrag_end(nbuf)) { 1729 qdf_nbuf_set_pktlen(nbuf, mpdu_len + soc->rx_pkt_tlv_size); 1730 qdf_nbuf_pull_head(nbuf, soc->rx_pkt_tlv_size); 1731 return nbuf; 1732 } 1733 1734 l3_hdr_pad_offset = dp_get_l3_hdr_pad_len(soc, nbuf); 1735 /* 1736 * This is a case where we have multiple msdus (A-MSDU) spread across 1737 * multiple nbufs. here we create a fraglist out of these nbufs. 1738 * 1739 * the moment we encounter a nbuf with continuation bit set we 1740 * know for sure we have an MSDU which is spread across multiple 1741 * nbufs. We loop through and reap nbufs till we reach last nbuf. 1742 */ 1743 parent = nbuf; 1744 frag_list = nbuf->next; 1745 nbuf = nbuf->next; 1746 1747 /* 1748 * set the start bit in the first nbuf we encounter with continuation 1749 * bit set. This has the proper mpdu length set as it is the first 1750 * msdu of the mpdu. this becomes the parent nbuf and the subsequent 1751 * nbufs will form the frag_list of the parent nbuf. 1752 */ 1753 qdf_nbuf_set_rx_chfrag_start(parent, 1); 1754 /* 1755 * L3 header padding is only needed for the 1st buffer 1756 * in a scattered msdu 1757 */ 1758 last_nbuf = dp_rx_adjust_nbuf_len(soc, parent, &mpdu_len, 1759 l3_hdr_pad_offset); 1760 1761 /* 1762 * MSDU cont bit is set but reported MPDU length can fit 1763 * in to single buffer 1764 * 1765 * Increment error stats and avoid SG list creation 1766 */ 1767 if (last_nbuf) { 1768 DP_STATS_INC(soc, rx.err.msdu_continuation_err, 1); 1769 qdf_nbuf_pull_head(parent, 1770 soc->rx_pkt_tlv_size + l3_hdr_pad_offset); 1771 return parent; 1772 } 1773 1774 /* 1775 * this is where we set the length of the fragments which are 1776 * associated to the parent nbuf. We iterate through the frag_list 1777 * till we hit the last_nbuf of the list. 1778 */ 1779 do { 1780 last_nbuf = dp_rx_adjust_nbuf_len(soc, nbuf, &mpdu_len, 0); 1781 qdf_nbuf_pull_head(nbuf, 1782 soc->rx_pkt_tlv_size); 1783 frag_list_len += qdf_nbuf_len(nbuf); 1784 1785 if (last_nbuf) { 1786 next = nbuf->next; 1787 nbuf->next = NULL; 1788 break; 1789 } else if (qdf_nbuf_is_rx_chfrag_end(nbuf)) { 1790 dp_err("Invalid packet length\n"); 1791 qdf_assert_always(0); 1792 } 1793 nbuf = nbuf->next; 1794 } while (!last_nbuf); 1795 1796 qdf_nbuf_set_rx_chfrag_start(nbuf, 0); 1797 qdf_nbuf_append_ext_list(parent, frag_list, frag_list_len); 1798 parent->next = next; 1799 1800 qdf_nbuf_pull_head(parent, 1801 soc->rx_pkt_tlv_size + l3_hdr_pad_offset); 1802 return parent; 1803 } 1804 1805 #ifdef DP_RX_SG_FRAME_SUPPORT 1806 /** 1807 * dp_rx_is_sg_supported() - SG packets processing supported or not. 1808 * 1809 * Return: returns true when processing is supported else false. 1810 */ 1811 bool dp_rx_is_sg_supported(void) 1812 { 1813 return true; 1814 } 1815 #else 1816 bool dp_rx_is_sg_supported(void) 1817 { 1818 return false; 1819 } 1820 #endif 1821 1822 #endif /* QCA_HOST_MODE_WIFI_DISABLED */ 1823 1824 #ifdef QCA_PEER_EXT_STATS 1825 /* 1826 * dp_rx_compute_tid_delay - Computer per TID delay stats 1827 * @peer: DP soc context 1828 * @nbuf: NBuffer 1829 * 1830 * Return: Void 1831 */ 1832 void dp_rx_compute_tid_delay(struct cdp_delay_tid_stats *stats, 1833 qdf_nbuf_t nbuf) 1834 { 1835 struct cdp_delay_rx_stats *rx_delay = &stats->rx_delay; 1836 uint32_t to_stack = qdf_nbuf_get_timedelta_ms(nbuf); 1837 1838 dp_hist_update_stats(&rx_delay->to_stack_delay, to_stack); 1839 } 1840 #endif /* QCA_PEER_EXT_STATS */ 1841 1842 /** 1843 * dp_rx_compute_delay() - Compute and fill in all timestamps 1844 * to pass in correct fields 1845 * 1846 * @vdev: pdev handle 1847 * @tx_desc: tx descriptor 1848 * @tid: tid value 1849 * Return: none 1850 */ 1851 void dp_rx_compute_delay(struct dp_vdev *vdev, qdf_nbuf_t nbuf) 1852 { 1853 uint8_t ring_id = QDF_NBUF_CB_RX_CTX_ID(nbuf); 1854 int64_t current_ts = qdf_ktime_to_ms(qdf_ktime_get()); 1855 uint32_t to_stack = qdf_nbuf_get_timedelta_ms(nbuf); 1856 uint8_t tid = qdf_nbuf_get_tid_val(nbuf); 1857 uint32_t interframe_delay = 1858 (uint32_t)(current_ts - vdev->prev_rx_deliver_tstamp); 1859 struct cdp_tid_rx_stats *rstats = 1860 &vdev->pdev->stats.tid_stats.tid_rx_stats[ring_id][tid]; 1861 1862 dp_update_delay_stats(NULL, rstats, to_stack, tid, 1863 CDP_DELAY_STATS_REAP_STACK, ring_id, false); 1864 /* 1865 * Update interframe delay stats calculated at deliver_data_ol point. 1866 * Value of vdev->prev_rx_deliver_tstamp will be 0 for 1st frame, so 1867 * interframe delay will not be calculate correctly for 1st frame. 1868 * On the other side, this will help in avoiding extra per packet check 1869 * of vdev->prev_rx_deliver_tstamp. 1870 */ 1871 dp_update_delay_stats(NULL, rstats, interframe_delay, tid, 1872 CDP_DELAY_STATS_RX_INTERFRAME, ring_id, false); 1873 vdev->prev_rx_deliver_tstamp = current_ts; 1874 } 1875 1876 /** 1877 * dp_rx_drop_nbuf_list() - drop an nbuf list 1878 * @pdev: dp pdev reference 1879 * @buf_list: buffer list to be dropepd 1880 * 1881 * Return: int (number of bufs dropped) 1882 */ 1883 static inline int dp_rx_drop_nbuf_list(struct dp_pdev *pdev, 1884 qdf_nbuf_t buf_list) 1885 { 1886 struct cdp_tid_rx_stats *stats = NULL; 1887 uint8_t tid = 0, ring_id = 0; 1888 int num_dropped = 0; 1889 qdf_nbuf_t buf, next_buf; 1890 1891 buf = buf_list; 1892 while (buf) { 1893 ring_id = QDF_NBUF_CB_RX_CTX_ID(buf); 1894 next_buf = qdf_nbuf_queue_next(buf); 1895 tid = qdf_nbuf_get_tid_val(buf); 1896 if (qdf_likely(pdev)) { 1897 stats = &pdev->stats.tid_stats.tid_rx_stats[ring_id][tid]; 1898 stats->fail_cnt[INVALID_PEER_VDEV]++; 1899 stats->delivered_to_stack--; 1900 } 1901 dp_rx_nbuf_free(buf); 1902 buf = next_buf; 1903 num_dropped++; 1904 } 1905 1906 return num_dropped; 1907 } 1908 1909 #ifdef QCA_SUPPORT_WDS_EXTENDED 1910 /** 1911 * dp_rx_deliver_to_stack_ext() - Deliver to netdev per sta 1912 * @soc: core txrx main context 1913 * @vdev: vdev 1914 * @txrx_peer: txrx peer 1915 * @nbuf_head: skb list head 1916 * 1917 * Return: true if packet is delivered to netdev per STA. 1918 */ 1919 static inline bool 1920 dp_rx_deliver_to_stack_ext(struct dp_soc *soc, struct dp_vdev *vdev, 1921 struct dp_txrx_peer *txrx_peer, qdf_nbuf_t nbuf_head) 1922 { 1923 /* 1924 * When extended WDS is disabled, frames are sent to AP netdevice. 1925 */ 1926 if (qdf_likely(!vdev->wds_ext_enabled)) 1927 return false; 1928 1929 /* 1930 * There can be 2 cases: 1931 * 1. Send frame to parent netdev if its not for netdev per STA 1932 * 2. If frame is meant for netdev per STA: 1933 * a. Send frame to appropriate netdev using registered fp. 1934 * b. If fp is NULL, drop the frames. 1935 */ 1936 if (!txrx_peer->wds_ext.init) 1937 return false; 1938 1939 if (txrx_peer->osif_rx) 1940 txrx_peer->osif_rx(txrx_peer->wds_ext.osif_peer, nbuf_head); 1941 else 1942 dp_rx_drop_nbuf_list(vdev->pdev, nbuf_head); 1943 1944 return true; 1945 } 1946 1947 #else 1948 static inline bool 1949 dp_rx_deliver_to_stack_ext(struct dp_soc *soc, struct dp_vdev *vdev, 1950 struct dp_txrx_peer *txrx_peer, qdf_nbuf_t nbuf_head) 1951 { 1952 return false; 1953 } 1954 #endif 1955 1956 #ifdef PEER_CACHE_RX_PKTS 1957 /** 1958 * dp_rx_flush_rx_cached() - flush cached rx frames 1959 * @peer: peer 1960 * @drop: flag to drop frames or forward to net stack 1961 * 1962 * Return: None 1963 */ 1964 void dp_rx_flush_rx_cached(struct dp_peer *peer, bool drop) 1965 { 1966 struct dp_peer_cached_bufq *bufqi; 1967 struct dp_rx_cached_buf *cache_buf = NULL; 1968 ol_txrx_rx_fp data_rx = NULL; 1969 int num_buff_elem; 1970 QDF_STATUS status; 1971 1972 /* 1973 * Flush dp cached frames only for mld peers and legacy peers, as 1974 * link peers don't store cached frames 1975 */ 1976 if (IS_MLO_DP_LINK_PEER(peer)) 1977 return; 1978 1979 if (!peer->txrx_peer) { 1980 dp_err("txrx_peer NULL!! peer mac_addr("QDF_MAC_ADDR_FMT")", 1981 QDF_MAC_ADDR_REF(peer->mac_addr.raw)); 1982 return; 1983 } 1984 1985 if (qdf_atomic_inc_return(&peer->txrx_peer->flush_in_progress) > 1) { 1986 qdf_atomic_dec(&peer->txrx_peer->flush_in_progress); 1987 return; 1988 } 1989 1990 qdf_spin_lock_bh(&peer->peer_info_lock); 1991 if (peer->state >= OL_TXRX_PEER_STATE_CONN && peer->vdev->osif_rx) 1992 data_rx = peer->vdev->osif_rx; 1993 else 1994 drop = true; 1995 qdf_spin_unlock_bh(&peer->peer_info_lock); 1996 1997 bufqi = &peer->txrx_peer->bufq_info; 1998 1999 qdf_spin_lock_bh(&bufqi->bufq_lock); 2000 qdf_list_remove_front(&bufqi->cached_bufq, 2001 (qdf_list_node_t **)&cache_buf); 2002 while (cache_buf) { 2003 num_buff_elem = QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST( 2004 cache_buf->buf); 2005 bufqi->entries -= num_buff_elem; 2006 qdf_spin_unlock_bh(&bufqi->bufq_lock); 2007 if (drop) { 2008 bufqi->dropped = dp_rx_drop_nbuf_list(peer->vdev->pdev, 2009 cache_buf->buf); 2010 } else { 2011 /* Flush the cached frames to OSIF DEV */ 2012 status = data_rx(peer->vdev->osif_vdev, cache_buf->buf); 2013 if (status != QDF_STATUS_SUCCESS) 2014 bufqi->dropped = dp_rx_drop_nbuf_list( 2015 peer->vdev->pdev, 2016 cache_buf->buf); 2017 } 2018 qdf_mem_free(cache_buf); 2019 cache_buf = NULL; 2020 qdf_spin_lock_bh(&bufqi->bufq_lock); 2021 qdf_list_remove_front(&bufqi->cached_bufq, 2022 (qdf_list_node_t **)&cache_buf); 2023 } 2024 qdf_spin_unlock_bh(&bufqi->bufq_lock); 2025 qdf_atomic_dec(&peer->txrx_peer->flush_in_progress); 2026 } 2027 2028 /** 2029 * dp_rx_enqueue_rx() - cache rx frames 2030 * @peer: peer 2031 * @txrx_peer: DP txrx_peer 2032 * @rx_buf_list: cache buffer list 2033 * 2034 * Return: None 2035 */ 2036 static QDF_STATUS 2037 dp_rx_enqueue_rx(struct dp_peer *peer, 2038 struct dp_txrx_peer *txrx_peer, 2039 qdf_nbuf_t rx_buf_list) 2040 { 2041 struct dp_rx_cached_buf *cache_buf; 2042 struct dp_peer_cached_bufq *bufqi = &txrx_peer->bufq_info; 2043 int num_buff_elem; 2044 QDF_STATUS ret = QDF_STATUS_SUCCESS; 2045 struct dp_soc *soc = txrx_peer->vdev->pdev->soc; 2046 struct dp_peer *ta_peer = NULL; 2047 2048 /* 2049 * If peer id is invalid which likely peer map has not completed, 2050 * then need caller provide dp_peer pointer, else it's ok to use 2051 * txrx_peer->peer_id to get dp_peer. 2052 */ 2053 if (peer) { 2054 if (QDF_STATUS_SUCCESS == 2055 dp_peer_get_ref(soc, peer, DP_MOD_ID_RX)) 2056 ta_peer = peer; 2057 } else { 2058 ta_peer = dp_peer_get_ref_by_id(soc, txrx_peer->peer_id, 2059 DP_MOD_ID_RX); 2060 } 2061 2062 if (!ta_peer) { 2063 bufqi->dropped = dp_rx_drop_nbuf_list(txrx_peer->vdev->pdev, 2064 rx_buf_list); 2065 return QDF_STATUS_E_INVAL; 2066 } 2067 2068 dp_debug_rl("bufq->curr %d bufq->drops %d", bufqi->entries, 2069 bufqi->dropped); 2070 if (!ta_peer->valid) { 2071 bufqi->dropped = dp_rx_drop_nbuf_list(txrx_peer->vdev->pdev, 2072 rx_buf_list); 2073 ret = QDF_STATUS_E_INVAL; 2074 goto fail; 2075 } 2076 2077 qdf_spin_lock_bh(&bufqi->bufq_lock); 2078 if (bufqi->entries >= bufqi->thresh) { 2079 bufqi->dropped = dp_rx_drop_nbuf_list(txrx_peer->vdev->pdev, 2080 rx_buf_list); 2081 qdf_spin_unlock_bh(&bufqi->bufq_lock); 2082 ret = QDF_STATUS_E_RESOURCES; 2083 goto fail; 2084 } 2085 qdf_spin_unlock_bh(&bufqi->bufq_lock); 2086 2087 num_buff_elem = QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(rx_buf_list); 2088 2089 cache_buf = qdf_mem_malloc_atomic(sizeof(*cache_buf)); 2090 if (!cache_buf) { 2091 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 2092 "Failed to allocate buf to cache rx frames"); 2093 bufqi->dropped = dp_rx_drop_nbuf_list(txrx_peer->vdev->pdev, 2094 rx_buf_list); 2095 ret = QDF_STATUS_E_NOMEM; 2096 goto fail; 2097 } 2098 2099 cache_buf->buf = rx_buf_list; 2100 2101 qdf_spin_lock_bh(&bufqi->bufq_lock); 2102 qdf_list_insert_back(&bufqi->cached_bufq, 2103 &cache_buf->node); 2104 bufqi->entries += num_buff_elem; 2105 qdf_spin_unlock_bh(&bufqi->bufq_lock); 2106 2107 fail: 2108 dp_peer_unref_delete(ta_peer, DP_MOD_ID_RX); 2109 return ret; 2110 } 2111 2112 static inline 2113 bool dp_rx_is_peer_cache_bufq_supported(void) 2114 { 2115 return true; 2116 } 2117 #else 2118 static inline 2119 bool dp_rx_is_peer_cache_bufq_supported(void) 2120 { 2121 return false; 2122 } 2123 2124 static inline QDF_STATUS 2125 dp_rx_enqueue_rx(struct dp_peer *peer, 2126 struct dp_txrx_peer *txrx_peer, 2127 qdf_nbuf_t rx_buf_list) 2128 { 2129 return QDF_STATUS_SUCCESS; 2130 } 2131 #endif 2132 2133 #ifndef DELIVERY_TO_STACK_STATUS_CHECK 2134 /** 2135 * dp_rx_check_delivery_to_stack() - Deliver pkts to network 2136 * using the appropriate call back functions. 2137 * @soc: soc 2138 * @vdev: vdev 2139 * @peer: peer 2140 * @nbuf_head: skb list head 2141 * @nbuf_tail: skb list tail 2142 * 2143 * Return: None 2144 */ 2145 static void dp_rx_check_delivery_to_stack(struct dp_soc *soc, 2146 struct dp_vdev *vdev, 2147 struct dp_txrx_peer *txrx_peer, 2148 qdf_nbuf_t nbuf_head) 2149 { 2150 if (qdf_unlikely(dp_rx_deliver_to_stack_ext(soc, vdev, 2151 txrx_peer, nbuf_head))) 2152 return; 2153 2154 /* Function pointer initialized only when FISA is enabled */ 2155 if (vdev->osif_fisa_rx) 2156 /* on failure send it via regular path */ 2157 vdev->osif_fisa_rx(soc, vdev, nbuf_head); 2158 else 2159 vdev->osif_rx(vdev->osif_vdev, nbuf_head); 2160 } 2161 2162 #else 2163 /** 2164 * dp_rx_check_delivery_to_stack() - Deliver pkts to network 2165 * using the appropriate call back functions. 2166 * @soc: soc 2167 * @vdev: vdev 2168 * @txrx_peer: txrx peer 2169 * @nbuf_head: skb list head 2170 * @nbuf_tail: skb list tail 2171 * 2172 * Check the return status of the call back function and drop 2173 * the packets if the return status indicates a failure. 2174 * 2175 * Return: None 2176 */ 2177 static void dp_rx_check_delivery_to_stack(struct dp_soc *soc, 2178 struct dp_vdev *vdev, 2179 struct dp_txrx_peer *txrx_peer, 2180 qdf_nbuf_t nbuf_head) 2181 { 2182 int num_nbuf = 0; 2183 QDF_STATUS ret_val = QDF_STATUS_E_FAILURE; 2184 2185 /* Function pointer initialized only when FISA is enabled */ 2186 if (vdev->osif_fisa_rx) 2187 /* on failure send it via regular path */ 2188 ret_val = vdev->osif_fisa_rx(soc, vdev, nbuf_head); 2189 else if (vdev->osif_rx) 2190 ret_val = vdev->osif_rx(vdev->osif_vdev, nbuf_head); 2191 2192 if (!QDF_IS_STATUS_SUCCESS(ret_val)) { 2193 num_nbuf = dp_rx_drop_nbuf_list(vdev->pdev, nbuf_head); 2194 DP_STATS_INC(soc, rx.err.rejected, num_nbuf); 2195 if (txrx_peer) 2196 DP_PEER_STATS_FLAT_DEC(txrx_peer, to_stack.num, 2197 num_nbuf); 2198 } 2199 } 2200 #endif /* ifdef DELIVERY_TO_STACK_STATUS_CHECK */ 2201 2202 /* 2203 * dp_rx_validate_rx_callbacks() - validate rx callbacks 2204 * @soc DP soc 2205 * @vdev: DP vdev handle 2206 * @txrx_peer: pointer to the txrx peer object 2207 * nbuf_head: skb list head 2208 * 2209 * Return: QDF_STATUS - QDF_STATUS_SUCCESS 2210 * QDF_STATUS_E_FAILURE 2211 */ 2212 static inline QDF_STATUS 2213 dp_rx_validate_rx_callbacks(struct dp_soc *soc, 2214 struct dp_vdev *vdev, 2215 struct dp_txrx_peer *txrx_peer, 2216 qdf_nbuf_t nbuf_head) 2217 { 2218 int num_nbuf; 2219 2220 if (qdf_unlikely(!vdev || vdev->delete.pending)) { 2221 num_nbuf = dp_rx_drop_nbuf_list(NULL, nbuf_head); 2222 /* 2223 * This is a special case where vdev is invalid, 2224 * so we cannot know the pdev to which this packet 2225 * belonged. Hence we update the soc rx error stats. 2226 */ 2227 DP_STATS_INC(soc, rx.err.invalid_vdev, num_nbuf); 2228 return QDF_STATUS_E_FAILURE; 2229 } 2230 2231 /* 2232 * highly unlikely to have a vdev without a registered rx 2233 * callback function. if so let us free the nbuf_list. 2234 */ 2235 if (qdf_unlikely(!vdev->osif_rx)) { 2236 if (txrx_peer && dp_rx_is_peer_cache_bufq_supported()) { 2237 dp_rx_enqueue_rx(NULL, txrx_peer, nbuf_head); 2238 } else { 2239 num_nbuf = dp_rx_drop_nbuf_list(vdev->pdev, 2240 nbuf_head); 2241 DP_PEER_TO_STACK_DECC(txrx_peer, num_nbuf, 2242 vdev->pdev->enhanced_stats_en); 2243 } 2244 return QDF_STATUS_E_FAILURE; 2245 } 2246 2247 return QDF_STATUS_SUCCESS; 2248 } 2249 2250 QDF_STATUS dp_rx_deliver_to_stack(struct dp_soc *soc, 2251 struct dp_vdev *vdev, 2252 struct dp_txrx_peer *txrx_peer, 2253 qdf_nbuf_t nbuf_head, 2254 qdf_nbuf_t nbuf_tail) 2255 { 2256 if (dp_rx_validate_rx_callbacks(soc, vdev, txrx_peer, nbuf_head) != 2257 QDF_STATUS_SUCCESS) 2258 return QDF_STATUS_E_FAILURE; 2259 2260 if (qdf_unlikely(vdev->rx_decap_type == htt_cmn_pkt_type_raw) || 2261 (vdev->rx_decap_type == htt_cmn_pkt_type_native_wifi)) { 2262 vdev->osif_rsim_rx_decap(vdev->osif_vdev, &nbuf_head, 2263 &nbuf_tail); 2264 } 2265 2266 dp_rx_check_delivery_to_stack(soc, vdev, txrx_peer, nbuf_head); 2267 2268 return QDF_STATUS_SUCCESS; 2269 } 2270 2271 #ifdef QCA_SUPPORT_EAPOL_OVER_CONTROL_PORT 2272 QDF_STATUS dp_rx_eapol_deliver_to_stack(struct dp_soc *soc, 2273 struct dp_vdev *vdev, 2274 struct dp_txrx_peer *txrx_peer, 2275 qdf_nbuf_t nbuf_head, 2276 qdf_nbuf_t nbuf_tail) 2277 { 2278 if (dp_rx_validate_rx_callbacks(soc, vdev, txrx_peer, nbuf_head) != 2279 QDF_STATUS_SUCCESS) 2280 return QDF_STATUS_E_FAILURE; 2281 2282 vdev->osif_rx_eapol(vdev->osif_vdev, nbuf_head); 2283 2284 return QDF_STATUS_SUCCESS; 2285 } 2286 #endif 2287 2288 #ifndef QCA_HOST_MODE_WIFI_DISABLED 2289 #ifdef VDEV_PEER_PROTOCOL_COUNT 2290 #define dp_rx_msdu_stats_update_prot_cnts(vdev_hdl, nbuf, txrx_peer) \ 2291 { \ 2292 qdf_nbuf_t nbuf_local; \ 2293 struct dp_txrx_peer *txrx_peer_local; \ 2294 struct dp_vdev *vdev_local = vdev_hdl; \ 2295 do { \ 2296 if (qdf_likely(!((vdev_local)->peer_protocol_count_track))) \ 2297 break; \ 2298 nbuf_local = nbuf; \ 2299 txrx_peer_local = txrx_peer; \ 2300 if (qdf_unlikely(qdf_nbuf_is_frag((nbuf_local)))) \ 2301 break; \ 2302 else if (qdf_unlikely(qdf_nbuf_is_raw_frame((nbuf_local)))) \ 2303 break; \ 2304 dp_vdev_peer_stats_update_protocol_cnt((vdev_local), \ 2305 (nbuf_local), \ 2306 (txrx_peer_local), 0, 1); \ 2307 } while (0); \ 2308 } 2309 #else 2310 #define dp_rx_msdu_stats_update_prot_cnts(vdev_hdl, nbuf, txrx_peer) 2311 #endif 2312 2313 #ifdef FEATURE_RX_LINKSPEED_ROAM_TRIGGER 2314 /** 2315 * dp_rx_rates_stats_update() - update rate stats 2316 * from rx msdu. 2317 * @soc: datapath soc handle 2318 * @nbuf: received msdu buffer 2319 * @rx_tlv_hdr: rx tlv header 2320 * @txrx_peer: datapath txrx_peer handle 2321 * @sgi: Short Guard Interval 2322 * @mcs: Modulation and Coding Set 2323 * @nss: Number of Spatial Streams 2324 * @bw: BandWidth 2325 * @pkt_type: Corresponds to preamble 2326 * 2327 * To be precisely record rates, following factors are considered: 2328 * Exclude specific frames, ARP, DHCP, ssdp, etc. 2329 * Make sure to affect rx throughput as least as possible. 2330 * 2331 * Return: void 2332 */ 2333 static void 2334 dp_rx_rates_stats_update(struct dp_soc *soc, qdf_nbuf_t nbuf, 2335 uint8_t *rx_tlv_hdr, struct dp_txrx_peer *txrx_peer, 2336 uint32_t sgi, uint32_t mcs, 2337 uint32_t nss, uint32_t bw, uint32_t pkt_type) 2338 { 2339 uint32_t rix; 2340 uint16_t ratecode; 2341 uint32_t avg_rx_rate; 2342 uint32_t ratekbps; 2343 enum cdp_punctured_modes punc_mode = NO_PUNCTURE; 2344 2345 if (soc->high_throughput || 2346 dp_rx_data_is_specific(soc->hal_soc, rx_tlv_hdr, nbuf)) { 2347 return; 2348 } 2349 2350 DP_PEER_EXTD_STATS_UPD(txrx_peer, rx.rx_rate, mcs); 2351 2352 /* In 11b mode, the nss we get from tlv is 0, invalid and should be 1 */ 2353 if (qdf_unlikely(pkt_type == DOT11_B)) 2354 nss = 1; 2355 2356 /* here pkt_type corresponds to preamble */ 2357 ratekbps = dp_getrateindex(sgi, 2358 mcs, 2359 nss - 1, 2360 pkt_type, 2361 bw, 2362 punc_mode, 2363 &rix, 2364 &ratecode); 2365 DP_PEER_EXTD_STATS_UPD(txrx_peer, rx.last_rx_rate, ratekbps); 2366 avg_rx_rate = 2367 dp_ath_rate_lpf(txrx_peer->stats.extd_stats.rx.avg_rx_rate, 2368 ratekbps); 2369 DP_PEER_EXTD_STATS_UPD(txrx_peer, rx.avg_rx_rate, avg_rx_rate); 2370 DP_PEER_EXTD_STATS_UPD(txrx_peer, rx.nss_info, nss); 2371 DP_PEER_EXTD_STATS_UPD(txrx_peer, rx.mcs_info, mcs); 2372 DP_PEER_EXTD_STATS_UPD(txrx_peer, rx.bw_info, bw); 2373 DP_PEER_EXTD_STATS_UPD(txrx_peer, rx.gi_info, sgi); 2374 DP_PEER_EXTD_STATS_UPD(txrx_peer, rx.preamble_info, pkt_type); 2375 } 2376 #else 2377 static inline void 2378 dp_rx_rates_stats_update(struct dp_soc *soc, qdf_nbuf_t nbuf, 2379 uint8_t *rx_tlv_hdr, struct dp_txrx_peer *txrx_peer, 2380 uint32_t sgi, uint32_t mcs, 2381 uint32_t nss, uint32_t bw, uint32_t pkt_type) 2382 { 2383 } 2384 #endif /* FEATURE_RX_LINKSPEED_ROAM_TRIGGER */ 2385 2386 #ifndef QCA_ENHANCED_STATS_SUPPORT 2387 /** 2388 * dp_rx_msdu_extd_stats_update(): Update Rx extended path stats for peer 2389 * 2390 * @soc: datapath soc handle 2391 * @nbuf: received msdu buffer 2392 * @rx_tlv_hdr: rx tlv header 2393 * @txrx_peer: datapath txrx_peer handle 2394 * 2395 * Return: void 2396 */ 2397 static inline 2398 void dp_rx_msdu_extd_stats_update(struct dp_soc *soc, qdf_nbuf_t nbuf, 2399 uint8_t *rx_tlv_hdr, 2400 struct dp_txrx_peer *txrx_peer) 2401 { 2402 bool is_ampdu; 2403 uint32_t sgi, mcs, tid, nss, bw, reception_type, pkt_type; 2404 uint8_t dst_mcs_idx; 2405 2406 /* 2407 * TODO - For KIWI this field is present in ring_desc 2408 * Try to use ring desc instead of tlv. 2409 */ 2410 is_ampdu = hal_rx_mpdu_info_ampdu_flag_get(soc->hal_soc, rx_tlv_hdr); 2411 DP_PEER_EXTD_STATS_INCC(txrx_peer, rx.ampdu_cnt, 1, is_ampdu); 2412 DP_PEER_EXTD_STATS_INCC(txrx_peer, rx.non_ampdu_cnt, 1, !(is_ampdu)); 2413 2414 sgi = hal_rx_tlv_sgi_get(soc->hal_soc, rx_tlv_hdr); 2415 mcs = hal_rx_tlv_rate_mcs_get(soc->hal_soc, rx_tlv_hdr); 2416 tid = qdf_nbuf_get_tid_val(nbuf); 2417 bw = hal_rx_tlv_bw_get(soc->hal_soc, rx_tlv_hdr); 2418 reception_type = hal_rx_msdu_start_reception_type_get(soc->hal_soc, 2419 rx_tlv_hdr); 2420 nss = hal_rx_msdu_start_nss_get(soc->hal_soc, rx_tlv_hdr); 2421 pkt_type = hal_rx_tlv_get_pkt_type(soc->hal_soc, rx_tlv_hdr); 2422 /* do HW to SW pkt type conversion */ 2423 pkt_type = (pkt_type >= HAL_DOT11_MAX ? DOT11_MAX : 2424 hal_2_dp_pkt_type_map[pkt_type]); 2425 2426 DP_PEER_EXTD_STATS_INCC(txrx_peer, rx.rx_mpdu_cnt[mcs], 1, 2427 ((mcs < MAX_MCS) && QDF_NBUF_CB_RX_CHFRAG_START(nbuf))); 2428 DP_PEER_EXTD_STATS_INCC(txrx_peer, rx.rx_mpdu_cnt[MAX_MCS - 1], 1, 2429 ((mcs >= MAX_MCS) && QDF_NBUF_CB_RX_CHFRAG_START(nbuf))); 2430 DP_PEER_EXTD_STATS_INC(txrx_peer, rx.bw[bw], 1); 2431 /* 2432 * only if nss > 0 and pkt_type is 11N/AC/AX, 2433 * then increase index [nss - 1] in array counter. 2434 */ 2435 if (nss > 0 && CDP_IS_PKT_TYPE_SUPPORT_NSS(pkt_type)) 2436 DP_PEER_EXTD_STATS_INC(txrx_peer, rx.nss[nss - 1], 1); 2437 2438 DP_PEER_EXTD_STATS_INC(txrx_peer, rx.sgi_count[sgi], 1); 2439 DP_PEER_PER_PKT_STATS_INCC(txrx_peer, rx.err.mic_err, 1, 2440 hal_rx_tlv_mic_err_get(soc->hal_soc, 2441 rx_tlv_hdr)); 2442 DP_PEER_PER_PKT_STATS_INCC(txrx_peer, rx.err.decrypt_err, 1, 2443 hal_rx_tlv_decrypt_err_get(soc->hal_soc, 2444 rx_tlv_hdr)); 2445 2446 DP_PEER_EXTD_STATS_INC(txrx_peer, rx.wme_ac_type[TID_TO_WME_AC(tid)], 1); 2447 DP_PEER_EXTD_STATS_INC(txrx_peer, rx.reception_type[reception_type], 1); 2448 2449 dst_mcs_idx = dp_get_mcs_array_index_by_pkt_type_mcs(pkt_type, mcs); 2450 if (MCS_INVALID_ARRAY_INDEX != dst_mcs_idx) 2451 DP_PEER_EXTD_STATS_INC(txrx_peer, 2452 rx.pkt_type[pkt_type].mcs_count[dst_mcs_idx], 2453 1); 2454 2455 dp_rx_rates_stats_update(soc, nbuf, rx_tlv_hdr, txrx_peer, 2456 sgi, mcs, nss, bw, pkt_type); 2457 } 2458 #else 2459 static inline 2460 void dp_rx_msdu_extd_stats_update(struct dp_soc *soc, qdf_nbuf_t nbuf, 2461 uint8_t *rx_tlv_hdr, 2462 struct dp_txrx_peer *txrx_peer) 2463 { 2464 } 2465 #endif 2466 2467 #if defined(DP_PKT_STATS_PER_LMAC) && defined(WLAN_FEATURE_11BE_MLO) 2468 static inline void 2469 dp_peer_update_rx_pkt_per_lmac(struct dp_txrx_peer *txrx_peer, 2470 qdf_nbuf_t nbuf) 2471 { 2472 uint8_t lmac_id = qdf_nbuf_get_lmac_id(nbuf); 2473 2474 if (qdf_unlikely(lmac_id >= CDP_MAX_LMACS)) { 2475 dp_err_rl("Invalid lmac_id: %u vdev_id: %u", 2476 lmac_id, QDF_NBUF_CB_RX_VDEV_ID(nbuf)); 2477 2478 if (qdf_likely(txrx_peer)) 2479 dp_err_rl("peer_id: %u", txrx_peer->peer_id); 2480 2481 return; 2482 } 2483 2484 /* only count stats per lmac for MLO connection*/ 2485 DP_PEER_PER_PKT_STATS_INCC_PKT(txrx_peer, rx.rx_lmac[lmac_id], 1, 2486 QDF_NBUF_CB_RX_PKT_LEN(nbuf), 2487 txrx_peer->mld_peer); 2488 } 2489 #else 2490 static inline void 2491 dp_peer_update_rx_pkt_per_lmac(struct dp_txrx_peer *txrx_peer, 2492 qdf_nbuf_t nbuf) 2493 { 2494 } 2495 #endif 2496 2497 /** 2498 * dp_rx_msdu_stats_update() - update per msdu stats. 2499 * @soc: core txrx main context 2500 * @nbuf: pointer to the first msdu of an amsdu. 2501 * @rx_tlv_hdr: pointer to the start of RX TLV headers. 2502 * @txrx_peer: pointer to the txrx peer object. 2503 * @ring_id: reo dest ring number on which pkt is reaped. 2504 * @tid_stats: per tid rx stats. 2505 * 2506 * update all the per msdu stats for that nbuf. 2507 * Return: void 2508 */ 2509 void dp_rx_msdu_stats_update(struct dp_soc *soc, qdf_nbuf_t nbuf, 2510 uint8_t *rx_tlv_hdr, 2511 struct dp_txrx_peer *txrx_peer, 2512 uint8_t ring_id, 2513 struct cdp_tid_rx_stats *tid_stats) 2514 { 2515 bool is_not_amsdu; 2516 struct dp_vdev *vdev = txrx_peer->vdev; 2517 bool enh_flag; 2518 qdf_ether_header_t *eh; 2519 uint16_t msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf); 2520 2521 dp_rx_msdu_stats_update_prot_cnts(vdev, nbuf, txrx_peer); 2522 is_not_amsdu = qdf_nbuf_is_rx_chfrag_start(nbuf) & 2523 qdf_nbuf_is_rx_chfrag_end(nbuf); 2524 DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, rx.rcvd_reo[ring_id], 1, 2525 msdu_len); 2526 DP_PEER_PER_PKT_STATS_INCC(txrx_peer, rx.non_amsdu_cnt, 1, 2527 is_not_amsdu); 2528 DP_PEER_PER_PKT_STATS_INCC(txrx_peer, rx.amsdu_cnt, 1, !is_not_amsdu); 2529 DP_PEER_PER_PKT_STATS_INCC(txrx_peer, rx.rx_retries, 1, 2530 qdf_nbuf_is_rx_retry_flag(nbuf)); 2531 dp_peer_update_rx_pkt_per_lmac(txrx_peer, nbuf); 2532 tid_stats->msdu_cnt++; 2533 if (qdf_unlikely(qdf_nbuf_is_da_mcbc(nbuf) && 2534 (vdev->rx_decap_type == htt_cmn_pkt_type_ethernet))) { 2535 eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf); 2536 enh_flag = vdev->pdev->enhanced_stats_en; 2537 DP_PEER_MC_INCC_PKT(txrx_peer, 1, msdu_len, enh_flag); 2538 tid_stats->mcast_msdu_cnt++; 2539 if (QDF_IS_ADDR_BROADCAST(eh->ether_dhost)) { 2540 DP_PEER_BC_INCC_PKT(txrx_peer, 1, msdu_len, enh_flag); 2541 tid_stats->bcast_msdu_cnt++; 2542 } 2543 } 2544 2545 txrx_peer->stats.per_pkt_stats.rx.last_rx_ts = qdf_system_ticks(); 2546 2547 dp_rx_msdu_extd_stats_update(soc, nbuf, rx_tlv_hdr, txrx_peer); 2548 } 2549 2550 #ifndef WDS_VENDOR_EXTENSION 2551 int dp_wds_rx_policy_check(uint8_t *rx_tlv_hdr, 2552 struct dp_vdev *vdev, 2553 struct dp_txrx_peer *txrx_peer) 2554 { 2555 return 1; 2556 } 2557 #endif 2558 2559 #ifdef RX_DESC_DEBUG_CHECK 2560 /** 2561 * dp_rx_desc_nbuf_sanity_check - Add sanity check to catch REO rx_desc paddr 2562 * corruption 2563 * 2564 * @ring_desc: REO ring descriptor 2565 * @rx_desc: Rx descriptor 2566 * 2567 * Return: NONE 2568 */ 2569 QDF_STATUS dp_rx_desc_nbuf_sanity_check(struct dp_soc *soc, 2570 hal_ring_desc_t ring_desc, 2571 struct dp_rx_desc *rx_desc) 2572 { 2573 struct hal_buf_info hbi; 2574 2575 hal_rx_reo_buf_paddr_get(soc->hal_soc, ring_desc, &hbi); 2576 /* Sanity check for possible buffer paddr corruption */ 2577 if (dp_rx_desc_paddr_sanity_check(rx_desc, (&hbi)->paddr)) 2578 return QDF_STATUS_SUCCESS; 2579 2580 return QDF_STATUS_E_FAILURE; 2581 } 2582 2583 /** 2584 * dp_rx_desc_nbuf_len_sanity_check - Add sanity check to catch Rx buffer 2585 * out of bound access from H.W 2586 * 2587 * @soc: DP soc 2588 * @pkt_len: Packet length received from H.W 2589 * 2590 * Return: NONE 2591 */ 2592 static inline void 2593 dp_rx_desc_nbuf_len_sanity_check(struct dp_soc *soc, 2594 uint32_t pkt_len) 2595 { 2596 struct rx_desc_pool *rx_desc_pool; 2597 2598 rx_desc_pool = &soc->rx_desc_buf[0]; 2599 qdf_assert_always(pkt_len <= rx_desc_pool->buf_size); 2600 } 2601 #else 2602 static inline void 2603 dp_rx_desc_nbuf_len_sanity_check(struct dp_soc *soc, uint32_t pkt_len) { } 2604 #endif 2605 2606 #ifdef DP_RX_PKT_NO_PEER_DELIVER 2607 #ifdef DP_RX_UDP_OVER_PEER_ROAM 2608 /** 2609 * dp_rx_is_udp_allowed_over_roam_peer() - check if udp data received 2610 * during roaming 2611 * @vdev: dp_vdev pointer 2612 * @rx_tlv_hdr: rx tlv header 2613 * @nbuf: pkt skb pointer 2614 * 2615 * This function will check if rx udp data is received from authorised 2616 * roamed peer before peer map indication is received from FW after 2617 * roaming. This is needed for VoIP scenarios in which packet loss 2618 * expected during roaming is minimal. 2619 * 2620 * Return: bool 2621 */ 2622 static bool dp_rx_is_udp_allowed_over_roam_peer(struct dp_vdev *vdev, 2623 uint8_t *rx_tlv_hdr, 2624 qdf_nbuf_t nbuf) 2625 { 2626 char *hdr_desc; 2627 struct ieee80211_frame *wh = NULL; 2628 2629 hdr_desc = hal_rx_desc_get_80211_hdr(vdev->pdev->soc->hal_soc, 2630 rx_tlv_hdr); 2631 wh = (struct ieee80211_frame *)hdr_desc; 2632 2633 if (vdev->roaming_peer_status == 2634 WLAN_ROAM_PEER_AUTH_STATUS_AUTHENTICATED && 2635 !qdf_mem_cmp(vdev->roaming_peer_mac.raw, wh->i_addr2, 2636 QDF_MAC_ADDR_SIZE) && (qdf_nbuf_is_ipv4_udp_pkt(nbuf) || 2637 qdf_nbuf_is_ipv6_udp_pkt(nbuf))) 2638 return true; 2639 2640 return false; 2641 } 2642 #else 2643 static bool dp_rx_is_udp_allowed_over_roam_peer(struct dp_vdev *vdev, 2644 uint8_t *rx_tlv_hdr, 2645 qdf_nbuf_t nbuf) 2646 { 2647 return false; 2648 } 2649 #endif 2650 /** 2651 * dp_rx_deliver_to_stack_no_peer() - try deliver rx data even if 2652 * no corresbonding peer found 2653 * @soc: core txrx main context 2654 * @nbuf: pkt skb pointer 2655 * 2656 * This function will try to deliver some RX special frames to stack 2657 * even there is no peer matched found. for instance, LFR case, some 2658 * eapol data will be sent to host before peer_map done. 2659 * 2660 * Return: None 2661 */ 2662 void dp_rx_deliver_to_stack_no_peer(struct dp_soc *soc, qdf_nbuf_t nbuf) 2663 { 2664 uint16_t peer_id; 2665 uint8_t vdev_id; 2666 struct dp_vdev *vdev = NULL; 2667 uint32_t l2_hdr_offset = 0; 2668 uint16_t msdu_len = 0; 2669 uint32_t pkt_len = 0; 2670 uint8_t *rx_tlv_hdr; 2671 uint32_t frame_mask = FRAME_MASK_IPV4_ARP | FRAME_MASK_IPV4_DHCP | 2672 FRAME_MASK_IPV4_EAPOL | FRAME_MASK_IPV6_DHCP; 2673 bool is_special_frame = false; 2674 struct dp_peer *peer = NULL; 2675 2676 peer_id = QDF_NBUF_CB_RX_PEER_ID(nbuf); 2677 if (peer_id > soc->max_peer_id) 2678 goto deliver_fail; 2679 2680 vdev_id = QDF_NBUF_CB_RX_VDEV_ID(nbuf); 2681 vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_RX); 2682 if (!vdev || vdev->delete.pending) 2683 goto deliver_fail; 2684 2685 if (qdf_unlikely(qdf_nbuf_is_frag(nbuf))) 2686 goto deliver_fail; 2687 2688 rx_tlv_hdr = qdf_nbuf_data(nbuf); 2689 l2_hdr_offset = 2690 hal_rx_msdu_end_l3_hdr_padding_get(soc->hal_soc, rx_tlv_hdr); 2691 2692 msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf); 2693 pkt_len = msdu_len + l2_hdr_offset + soc->rx_pkt_tlv_size; 2694 QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(nbuf) = 1; 2695 2696 qdf_nbuf_set_pktlen(nbuf, pkt_len); 2697 qdf_nbuf_pull_head(nbuf, soc->rx_pkt_tlv_size + l2_hdr_offset); 2698 2699 is_special_frame = dp_rx_is_special_frame(nbuf, frame_mask); 2700 if (qdf_likely(vdev->osif_rx)) { 2701 if (is_special_frame || 2702 dp_rx_is_udp_allowed_over_roam_peer(vdev, rx_tlv_hdr, 2703 nbuf)) { 2704 qdf_nbuf_set_exc_frame(nbuf, 1); 2705 if (QDF_STATUS_SUCCESS != 2706 vdev->osif_rx(vdev->osif_vdev, nbuf)) 2707 goto deliver_fail; 2708 2709 DP_STATS_INC(soc, rx.err.pkt_delivered_no_peer, 1); 2710 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_RX); 2711 return; 2712 } 2713 } else if (is_special_frame) { 2714 /* 2715 * If MLO connection, txrx_peer for link peer does not exist, 2716 * try to store these RX packets to txrx_peer's bufq of MLD 2717 * peer until vdev->osif_rx is registered from CP and flush 2718 * them to stack. 2719 */ 2720 peer = dp_peer_get_tgt_peer_by_id(soc, peer_id, 2721 DP_MOD_ID_RX); 2722 if (!peer) 2723 goto deliver_fail; 2724 2725 /* only check for MLO connection */ 2726 if (IS_MLO_DP_MLD_PEER(peer) && peer->txrx_peer && 2727 dp_rx_is_peer_cache_bufq_supported()) { 2728 qdf_nbuf_set_exc_frame(nbuf, 1); 2729 2730 if (QDF_STATUS_SUCCESS == 2731 dp_rx_enqueue_rx(peer, peer->txrx_peer, nbuf)) { 2732 DP_STATS_INC(soc, 2733 rx.err.pkt_delivered_no_peer, 2734 1); 2735 } else { 2736 DP_STATS_INC(soc, 2737 rx.err.rx_invalid_peer.num, 2738 1); 2739 } 2740 2741 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_RX); 2742 dp_peer_unref_delete(peer, DP_MOD_ID_RX); 2743 return; 2744 } 2745 2746 dp_peer_unref_delete(peer, DP_MOD_ID_RX); 2747 } 2748 2749 deliver_fail: 2750 DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1, 2751 QDF_NBUF_CB_RX_PKT_LEN(nbuf)); 2752 dp_rx_nbuf_free(nbuf); 2753 if (vdev) 2754 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_RX); 2755 } 2756 #else 2757 void dp_rx_deliver_to_stack_no_peer(struct dp_soc *soc, qdf_nbuf_t nbuf) 2758 { 2759 DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1, 2760 QDF_NBUF_CB_RX_PKT_LEN(nbuf)); 2761 dp_rx_nbuf_free(nbuf); 2762 } 2763 #endif 2764 2765 /** 2766 * dp_rx_srng_get_num_pending() - get number of pending entries 2767 * @hal_soc: hal soc opaque pointer 2768 * @hal_ring: opaque pointer to the HAL Rx Ring 2769 * @num_entries: number of entries in the hal_ring. 2770 * @near_full: pointer to a boolean. This is set if ring is near full. 2771 * 2772 * The function returns the number of entries in a destination ring which are 2773 * yet to be reaped. The function also checks if the ring is near full. 2774 * If more than half of the ring needs to be reaped, the ring is considered 2775 * approaching full. 2776 * The function useses hal_srng_dst_num_valid_locked to get the number of valid 2777 * entries. It should not be called within a SRNG lock. HW pointer value is 2778 * synced into cached_hp. 2779 * 2780 * Return: Number of pending entries if any 2781 */ 2782 uint32_t dp_rx_srng_get_num_pending(hal_soc_handle_t hal_soc, 2783 hal_ring_handle_t hal_ring_hdl, 2784 uint32_t num_entries, 2785 bool *near_full) 2786 { 2787 uint32_t num_pending = 0; 2788 2789 num_pending = hal_srng_dst_num_valid_locked(hal_soc, 2790 hal_ring_hdl, 2791 true); 2792 2793 if (num_entries && (num_pending >= num_entries >> 1)) 2794 *near_full = true; 2795 else 2796 *near_full = false; 2797 2798 return num_pending; 2799 } 2800 2801 #endif /* QCA_HOST_MODE_WIFI_DISABLED */ 2802 2803 #ifdef WLAN_SUPPORT_RX_FISA 2804 void dp_rx_skip_tlvs(struct dp_soc *soc, qdf_nbuf_t nbuf, uint32_t l3_padding) 2805 { 2806 QDF_NBUF_CB_RX_PACKET_L3_HDR_PAD(nbuf) = l3_padding; 2807 qdf_nbuf_pull_head(nbuf, l3_padding + soc->rx_pkt_tlv_size); 2808 } 2809 #else 2810 void dp_rx_skip_tlvs(struct dp_soc *soc, qdf_nbuf_t nbuf, uint32_t l3_padding) 2811 { 2812 qdf_nbuf_pull_head(nbuf, l3_padding + soc->rx_pkt_tlv_size); 2813 } 2814 #endif 2815 2816 #ifndef QCA_HOST_MODE_WIFI_DISABLED 2817 2818 #ifdef DP_RX_DROP_RAW_FRM 2819 /** 2820 * dp_rx_is_raw_frame_dropped() - if raw frame nbuf, free and drop 2821 * @nbuf: pkt skb pointer 2822 * 2823 * Return: true - raw frame, dropped 2824 * false - not raw frame, do nothing 2825 */ 2826 bool dp_rx_is_raw_frame_dropped(qdf_nbuf_t nbuf) 2827 { 2828 if (qdf_nbuf_is_raw_frame(nbuf)) { 2829 dp_rx_nbuf_free(nbuf); 2830 return true; 2831 } 2832 2833 return false; 2834 } 2835 #endif 2836 2837 #ifdef WLAN_FEATURE_DP_RX_RING_HISTORY 2838 /** 2839 * dp_rx_ring_record_entry() - Record an entry into the rx ring history. 2840 * @soc: Datapath soc structure 2841 * @ring_num: REO ring number 2842 * @ring_desc: REO ring descriptor 2843 * 2844 * Returns: None 2845 */ 2846 void 2847 dp_rx_ring_record_entry(struct dp_soc *soc, uint8_t ring_num, 2848 hal_ring_desc_t ring_desc) 2849 { 2850 struct dp_buf_info_record *record; 2851 struct hal_buf_info hbi; 2852 uint32_t idx; 2853 2854 if (qdf_unlikely(!soc->rx_ring_history[ring_num])) 2855 return; 2856 2857 hal_rx_reo_buf_paddr_get(soc->hal_soc, ring_desc, &hbi); 2858 2859 /* buffer_addr_info is the first element of ring_desc */ 2860 hal_rx_buf_cookie_rbm_get(soc->hal_soc, (uint32_t *)ring_desc, 2861 &hbi); 2862 2863 idx = dp_history_get_next_index(&soc->rx_ring_history[ring_num]->index, 2864 DP_RX_HIST_MAX); 2865 2866 /* No NULL check needed for record since its an array */ 2867 record = &soc->rx_ring_history[ring_num]->entry[idx]; 2868 2869 record->timestamp = qdf_get_log_timestamp(); 2870 record->hbi.paddr = hbi.paddr; 2871 record->hbi.sw_cookie = hbi.sw_cookie; 2872 record->hbi.rbm = hbi.rbm; 2873 } 2874 #endif 2875 2876 #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR 2877 /** 2878 * dp_rx_update_stats() - Update soc level rx packet count 2879 * @soc: DP soc handle 2880 * @nbuf: nbuf received 2881 * 2882 * Returns: none 2883 */ 2884 void dp_rx_update_stats(struct dp_soc *soc, qdf_nbuf_t nbuf) 2885 { 2886 DP_STATS_INC_PKT(soc, rx.ingress, 1, 2887 QDF_NBUF_CB_RX_PKT_LEN(nbuf)); 2888 } 2889 #endif 2890 2891 #ifdef WLAN_FEATURE_PKT_CAPTURE_V2 2892 /** 2893 * dp_rx_deliver_to_pkt_capture() - deliver rx packet to packet capture 2894 * @soc : dp_soc handle 2895 * @pdev: dp_pdev handle 2896 * @peer_id: peer_id of the peer for which completion came 2897 * @ppdu_id: ppdu_id 2898 * @netbuf: Buffer pointer 2899 * 2900 * This function is used to deliver rx packet to packet capture 2901 */ 2902 void dp_rx_deliver_to_pkt_capture(struct dp_soc *soc, struct dp_pdev *pdev, 2903 uint16_t peer_id, uint32_t is_offload, 2904 qdf_nbuf_t netbuf) 2905 { 2906 if (wlan_cfg_get_pkt_capture_mode(soc->wlan_cfg_ctx)) 2907 dp_wdi_event_handler(WDI_EVENT_PKT_CAPTURE_RX_DATA, soc, netbuf, 2908 peer_id, is_offload, pdev->pdev_id); 2909 } 2910 2911 void dp_rx_deliver_to_pkt_capture_no_peer(struct dp_soc *soc, qdf_nbuf_t nbuf, 2912 uint32_t is_offload) 2913 { 2914 if (wlan_cfg_get_pkt_capture_mode(soc->wlan_cfg_ctx)) 2915 dp_wdi_event_handler(WDI_EVENT_PKT_CAPTURE_RX_DATA_NO_PEER, 2916 soc, nbuf, HTT_INVALID_VDEV, 2917 is_offload, 0); 2918 } 2919 #endif 2920 2921 #endif /* QCA_HOST_MODE_WIFI_DISABLED */ 2922 2923 QDF_STATUS dp_rx_vdev_detach(struct dp_vdev *vdev) 2924 { 2925 QDF_STATUS ret; 2926 2927 if (vdev->osif_rx_flush) { 2928 ret = vdev->osif_rx_flush(vdev->osif_vdev, vdev->vdev_id); 2929 if (!QDF_IS_STATUS_SUCCESS(ret)) { 2930 dp_err("Failed to flush rx pkts for vdev %d\n", 2931 vdev->vdev_id); 2932 return ret; 2933 } 2934 } 2935 2936 return QDF_STATUS_SUCCESS; 2937 } 2938 2939 static QDF_STATUS 2940 dp_pdev_nbuf_alloc_and_map(struct dp_soc *dp_soc, 2941 struct dp_rx_nbuf_frag_info *nbuf_frag_info_t, 2942 struct dp_pdev *dp_pdev, 2943 struct rx_desc_pool *rx_desc_pool) 2944 { 2945 QDF_STATUS ret = QDF_STATUS_E_FAILURE; 2946 2947 (nbuf_frag_info_t->virt_addr).nbuf = 2948 qdf_nbuf_alloc(dp_soc->osdev, rx_desc_pool->buf_size, 2949 RX_BUFFER_RESERVATION, 2950 rx_desc_pool->buf_alignment, FALSE); 2951 if (!((nbuf_frag_info_t->virt_addr).nbuf)) { 2952 dp_err("nbuf alloc failed"); 2953 DP_STATS_INC(dp_pdev, replenish.nbuf_alloc_fail, 1); 2954 return ret; 2955 } 2956 2957 ret = qdf_nbuf_map_nbytes_single(dp_soc->osdev, 2958 (nbuf_frag_info_t->virt_addr).nbuf, 2959 QDF_DMA_FROM_DEVICE, 2960 rx_desc_pool->buf_size); 2961 2962 if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) { 2963 qdf_nbuf_free((nbuf_frag_info_t->virt_addr).nbuf); 2964 dp_err("nbuf map failed"); 2965 DP_STATS_INC(dp_pdev, replenish.map_err, 1); 2966 return ret; 2967 } 2968 2969 nbuf_frag_info_t->paddr = 2970 qdf_nbuf_get_frag_paddr((nbuf_frag_info_t->virt_addr).nbuf, 0); 2971 2972 ret = dp_check_paddr(dp_soc, &((nbuf_frag_info_t->virt_addr).nbuf), 2973 &nbuf_frag_info_t->paddr, 2974 rx_desc_pool); 2975 if (ret == QDF_STATUS_E_FAILURE) { 2976 dp_err("nbuf check x86 failed"); 2977 DP_STATS_INC(dp_pdev, replenish.x86_fail, 1); 2978 return ret; 2979 } 2980 2981 return QDF_STATUS_SUCCESS; 2982 } 2983 2984 QDF_STATUS 2985 dp_pdev_rx_buffers_attach(struct dp_soc *dp_soc, uint32_t mac_id, 2986 struct dp_srng *dp_rxdma_srng, 2987 struct rx_desc_pool *rx_desc_pool, 2988 uint32_t num_req_buffers) 2989 { 2990 struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(dp_soc, mac_id); 2991 hal_ring_handle_t rxdma_srng = dp_rxdma_srng->hal_srng; 2992 union dp_rx_desc_list_elem_t *next; 2993 void *rxdma_ring_entry; 2994 qdf_dma_addr_t paddr; 2995 struct dp_rx_nbuf_frag_info *nf_info; 2996 uint32_t nr_descs, nr_nbuf = 0, nr_nbuf_total = 0; 2997 uint32_t buffer_index, nbuf_ptrs_per_page; 2998 qdf_nbuf_t nbuf; 2999 QDF_STATUS ret; 3000 int page_idx, total_pages; 3001 union dp_rx_desc_list_elem_t *desc_list = NULL; 3002 union dp_rx_desc_list_elem_t *tail = NULL; 3003 int sync_hw_ptr = 1; 3004 uint32_t num_entries_avail; 3005 3006 if (qdf_unlikely(!dp_pdev)) { 3007 dp_rx_err("%pK: pdev is null for mac_id = %d", 3008 dp_soc, mac_id); 3009 return QDF_STATUS_E_FAILURE; 3010 } 3011 3012 if (qdf_unlikely(!rxdma_srng)) { 3013 DP_STATS_INC(dp_pdev, replenish.rxdma_err, num_req_buffers); 3014 return QDF_STATUS_E_FAILURE; 3015 } 3016 3017 dp_debug("requested %u RX buffers for driver attach", num_req_buffers); 3018 3019 hal_srng_access_start(dp_soc->hal_soc, rxdma_srng); 3020 num_entries_avail = hal_srng_src_num_avail(dp_soc->hal_soc, 3021 rxdma_srng, 3022 sync_hw_ptr); 3023 hal_srng_access_end(dp_soc->hal_soc, rxdma_srng); 3024 3025 if (!num_entries_avail) { 3026 dp_err("Num of available entries is zero, nothing to do"); 3027 return QDF_STATUS_E_NOMEM; 3028 } 3029 3030 if (num_entries_avail < num_req_buffers) 3031 num_req_buffers = num_entries_avail; 3032 3033 nr_descs = dp_rx_get_free_desc_list(dp_soc, mac_id, rx_desc_pool, 3034 num_req_buffers, &desc_list, &tail); 3035 if (!nr_descs) { 3036 dp_err("no free rx_descs in freelist"); 3037 DP_STATS_INC(dp_pdev, err.desc_alloc_fail, num_req_buffers); 3038 return QDF_STATUS_E_NOMEM; 3039 } 3040 3041 dp_debug("got %u RX descs for driver attach", nr_descs); 3042 3043 /* 3044 * Try to allocate pointers to the nbuf one page at a time. 3045 * Take pointers that can fit in one page of memory and 3046 * iterate through the total descriptors that need to be 3047 * allocated in order of pages. Reuse the pointers that 3048 * have been allocated to fit in one page across each 3049 * iteration to index into the nbuf. 3050 */ 3051 total_pages = (nr_descs * sizeof(*nf_info)) / DP_BLOCKMEM_SIZE; 3052 3053 /* 3054 * Add an extra page to store the remainder if any 3055 */ 3056 if ((nr_descs * sizeof(*nf_info)) % DP_BLOCKMEM_SIZE) 3057 total_pages++; 3058 nf_info = qdf_mem_malloc(DP_BLOCKMEM_SIZE); 3059 if (!nf_info) { 3060 dp_err("failed to allocate nbuf array"); 3061 DP_STATS_INC(dp_pdev, replenish.rxdma_err, num_req_buffers); 3062 QDF_BUG(0); 3063 return QDF_STATUS_E_NOMEM; 3064 } 3065 nbuf_ptrs_per_page = DP_BLOCKMEM_SIZE / sizeof(*nf_info); 3066 3067 for (page_idx = 0; page_idx < total_pages; page_idx++) { 3068 qdf_mem_zero(nf_info, DP_BLOCKMEM_SIZE); 3069 3070 for (nr_nbuf = 0; nr_nbuf < nbuf_ptrs_per_page; nr_nbuf++) { 3071 /* 3072 * The last page of buffer pointers may not be required 3073 * completely based on the number of descriptors. Below 3074 * check will ensure we are allocating only the 3075 * required number of descriptors. 3076 */ 3077 if (nr_nbuf_total >= nr_descs) 3078 break; 3079 /* Flag is set while pdev rx_desc_pool initialization */ 3080 if (qdf_unlikely(rx_desc_pool->rx_mon_dest_frag_enable)) 3081 ret = dp_pdev_frag_alloc_and_map(dp_soc, 3082 &nf_info[nr_nbuf], dp_pdev, 3083 rx_desc_pool); 3084 else 3085 ret = dp_pdev_nbuf_alloc_and_map(dp_soc, 3086 &nf_info[nr_nbuf], dp_pdev, 3087 rx_desc_pool); 3088 if (QDF_IS_STATUS_ERROR(ret)) 3089 break; 3090 3091 nr_nbuf_total++; 3092 } 3093 3094 hal_srng_access_start(dp_soc->hal_soc, rxdma_srng); 3095 3096 for (buffer_index = 0; buffer_index < nr_nbuf; buffer_index++) { 3097 rxdma_ring_entry = 3098 hal_srng_src_get_next(dp_soc->hal_soc, 3099 rxdma_srng); 3100 qdf_assert_always(rxdma_ring_entry); 3101 3102 next = desc_list->next; 3103 paddr = nf_info[buffer_index].paddr; 3104 nbuf = nf_info[buffer_index].virt_addr.nbuf; 3105 3106 /* Flag is set while pdev rx_desc_pool initialization */ 3107 if (qdf_unlikely(rx_desc_pool->rx_mon_dest_frag_enable)) 3108 dp_rx_desc_frag_prep(&desc_list->rx_desc, 3109 &nf_info[buffer_index]); 3110 else 3111 dp_rx_desc_prep(&desc_list->rx_desc, 3112 &nf_info[buffer_index]); 3113 desc_list->rx_desc.in_use = 1; 3114 dp_rx_desc_alloc_dbg_info(&desc_list->rx_desc); 3115 dp_rx_desc_update_dbg_info(&desc_list->rx_desc, 3116 __func__, 3117 RX_DESC_REPLENISHED); 3118 3119 hal_rxdma_buff_addr_info_set(dp_soc->hal_soc ,rxdma_ring_entry, paddr, 3120 desc_list->rx_desc.cookie, 3121 rx_desc_pool->owner); 3122 3123 dp_ipa_handle_rx_buf_smmu_mapping( 3124 dp_soc, nbuf, 3125 rx_desc_pool->buf_size, true, 3126 __func__, __LINE__); 3127 3128 desc_list = next; 3129 } 3130 3131 dp_rx_refill_ring_record_entry(dp_soc, dp_pdev->lmac_id, 3132 rxdma_srng, nr_nbuf, nr_nbuf); 3133 hal_srng_access_end(dp_soc->hal_soc, rxdma_srng); 3134 } 3135 3136 dp_info("filled %u RX buffers for driver attach", nr_nbuf_total); 3137 qdf_mem_free(nf_info); 3138 3139 if (!nr_nbuf_total) { 3140 dp_err("No nbuf's allocated"); 3141 QDF_BUG(0); 3142 return QDF_STATUS_E_RESOURCES; 3143 } 3144 3145 /* No need to count the number of bytes received during replenish. 3146 * Therefore set replenish.pkts.bytes as 0. 3147 */ 3148 DP_STATS_INC_PKT(dp_pdev, replenish.pkts, nr_nbuf, 0); 3149 3150 return QDF_STATUS_SUCCESS; 3151 } 3152 3153 qdf_export_symbol(dp_pdev_rx_buffers_attach); 3154 3155 /** 3156 * dp_rx_enable_mon_dest_frag() - Enable frag processing for 3157 * monitor destination ring via frag. 3158 * 3159 * Enable this flag only for monitor destination buffer processing 3160 * if DP_RX_MON_MEM_FRAG feature is enabled. 3161 * If flag is set then frag based function will be called for alloc, 3162 * map, prep desc and free ops for desc buffer else normal nbuf based 3163 * function will be called. 3164 * 3165 * @rx_desc_pool: Rx desc pool 3166 * @is_mon_dest_desc: Is it for monitor dest buffer 3167 * 3168 * Return: None 3169 */ 3170 #ifdef DP_RX_MON_MEM_FRAG 3171 void dp_rx_enable_mon_dest_frag(struct rx_desc_pool *rx_desc_pool, 3172 bool is_mon_dest_desc) 3173 { 3174 rx_desc_pool->rx_mon_dest_frag_enable = is_mon_dest_desc; 3175 if (is_mon_dest_desc) 3176 dp_alert("Feature DP_RX_MON_MEM_FRAG for mon_dest is enabled"); 3177 } 3178 #else 3179 void dp_rx_enable_mon_dest_frag(struct rx_desc_pool *rx_desc_pool, 3180 bool is_mon_dest_desc) 3181 { 3182 rx_desc_pool->rx_mon_dest_frag_enable = false; 3183 if (is_mon_dest_desc) 3184 dp_alert("Feature DP_RX_MON_MEM_FRAG for mon_dest is disabled"); 3185 } 3186 #endif 3187 3188 qdf_export_symbol(dp_rx_enable_mon_dest_frag); 3189 3190 /* 3191 * dp_rx_pdev_desc_pool_alloc() - allocate memory for software rx descriptor 3192 * pool 3193 * 3194 * @pdev: core txrx pdev context 3195 * 3196 * Return: QDF_STATUS - QDF_STATUS_SUCCESS 3197 * QDF_STATUS_E_NOMEM 3198 */ 3199 QDF_STATUS 3200 dp_rx_pdev_desc_pool_alloc(struct dp_pdev *pdev) 3201 { 3202 struct dp_soc *soc = pdev->soc; 3203 uint32_t rxdma_entries; 3204 uint32_t rx_sw_desc_num; 3205 struct dp_srng *dp_rxdma_srng; 3206 struct rx_desc_pool *rx_desc_pool; 3207 uint32_t status = QDF_STATUS_SUCCESS; 3208 int mac_for_pdev; 3209 3210 mac_for_pdev = pdev->lmac_id; 3211 if (wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx)) { 3212 dp_rx_info("%pK: nss-wifi<4> skip Rx refil %d", 3213 soc, mac_for_pdev); 3214 return status; 3215 } 3216 3217 dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_for_pdev]; 3218 rxdma_entries = dp_rxdma_srng->num_entries; 3219 3220 rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev]; 3221 rx_sw_desc_num = wlan_cfg_get_dp_soc_rx_sw_desc_num(soc->wlan_cfg_ctx); 3222 3223 rx_desc_pool->desc_type = DP_RX_DESC_BUF_TYPE; 3224 status = dp_rx_desc_pool_alloc(soc, 3225 rx_sw_desc_num, 3226 rx_desc_pool); 3227 if (status != QDF_STATUS_SUCCESS) 3228 return status; 3229 3230 return status; 3231 } 3232 3233 /* 3234 * dp_rx_pdev_desc_pool_free() - free software rx descriptor pool 3235 * 3236 * @pdev: core txrx pdev context 3237 */ 3238 void dp_rx_pdev_desc_pool_free(struct dp_pdev *pdev) 3239 { 3240 int mac_for_pdev = pdev->lmac_id; 3241 struct dp_soc *soc = pdev->soc; 3242 struct rx_desc_pool *rx_desc_pool; 3243 3244 rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev]; 3245 3246 dp_rx_desc_pool_free(soc, rx_desc_pool); 3247 } 3248 3249 /* 3250 * dp_rx_pdev_desc_pool_init() - initialize software rx descriptors 3251 * 3252 * @pdev: core txrx pdev context 3253 * 3254 * Return: QDF_STATUS - QDF_STATUS_SUCCESS 3255 * QDF_STATUS_E_NOMEM 3256 */ 3257 QDF_STATUS dp_rx_pdev_desc_pool_init(struct dp_pdev *pdev) 3258 { 3259 int mac_for_pdev = pdev->lmac_id; 3260 struct dp_soc *soc = pdev->soc; 3261 uint32_t rxdma_entries; 3262 uint32_t rx_sw_desc_num; 3263 struct dp_srng *dp_rxdma_srng; 3264 struct rx_desc_pool *rx_desc_pool; 3265 3266 rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev]; 3267 if (wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx)) { 3268 /** 3269 * If NSS is enabled, rx_desc_pool is already filled. 3270 * Hence, just disable desc_pool frag flag. 3271 */ 3272 dp_rx_enable_mon_dest_frag(rx_desc_pool, false); 3273 3274 dp_rx_info("%pK: nss-wifi<4> skip Rx refil %d", 3275 soc, mac_for_pdev); 3276 return QDF_STATUS_SUCCESS; 3277 } 3278 3279 if (dp_rx_desc_pool_is_allocated(rx_desc_pool) == QDF_STATUS_E_NOMEM) 3280 return QDF_STATUS_E_NOMEM; 3281 3282 dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_for_pdev]; 3283 rxdma_entries = dp_rxdma_srng->num_entries; 3284 3285 soc->process_rx_status = CONFIG_PROCESS_RX_STATUS; 3286 3287 rx_sw_desc_num = 3288 wlan_cfg_get_dp_soc_rx_sw_desc_num(soc->wlan_cfg_ctx); 3289 3290 rx_desc_pool->owner = dp_rx_get_rx_bm_id(soc); 3291 rx_desc_pool->buf_size = RX_DATA_BUFFER_SIZE; 3292 rx_desc_pool->buf_alignment = RX_DATA_BUFFER_ALIGNMENT; 3293 /* Disable monitor dest processing via frag */ 3294 dp_rx_enable_mon_dest_frag(rx_desc_pool, false); 3295 3296 dp_rx_desc_pool_init(soc, mac_for_pdev, 3297 rx_sw_desc_num, rx_desc_pool); 3298 return QDF_STATUS_SUCCESS; 3299 } 3300 3301 /* 3302 * dp_rx_pdev_desc_pool_deinit() - de-initialize software rx descriptor pools 3303 * @pdev: core txrx pdev context 3304 * 3305 * This function resets the freelist of rx descriptors and destroys locks 3306 * associated with this list of descriptors. 3307 */ 3308 void dp_rx_pdev_desc_pool_deinit(struct dp_pdev *pdev) 3309 { 3310 int mac_for_pdev = pdev->lmac_id; 3311 struct dp_soc *soc = pdev->soc; 3312 struct rx_desc_pool *rx_desc_pool; 3313 3314 rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev]; 3315 3316 dp_rx_desc_pool_deinit(soc, rx_desc_pool, mac_for_pdev); 3317 } 3318 3319 /* 3320 * dp_rx_pdev_buffers_alloc() - Allocate nbufs (skbs) and replenish RxDMA ring 3321 * 3322 * @pdev: core txrx pdev context 3323 * 3324 * Return: QDF_STATUS - QDF_STATUS_SUCCESS 3325 * QDF_STATUS_E_NOMEM 3326 */ 3327 QDF_STATUS 3328 dp_rx_pdev_buffers_alloc(struct dp_pdev *pdev) 3329 { 3330 int mac_for_pdev = pdev->lmac_id; 3331 struct dp_soc *soc = pdev->soc; 3332 struct dp_srng *dp_rxdma_srng; 3333 struct rx_desc_pool *rx_desc_pool; 3334 uint32_t rxdma_entries; 3335 3336 dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_for_pdev]; 3337 rxdma_entries = dp_rxdma_srng->num_entries; 3338 3339 rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev]; 3340 3341 /* Initialize RX buffer pool which will be 3342 * used during low memory conditions 3343 */ 3344 dp_rx_buffer_pool_init(soc, mac_for_pdev); 3345 3346 return dp_pdev_rx_buffers_attach_simple(soc, mac_for_pdev, 3347 dp_rxdma_srng, 3348 rx_desc_pool, 3349 rxdma_entries - 1); 3350 } 3351 3352 /* 3353 * dp_rx_pdev_buffers_free - Free nbufs (skbs) 3354 * 3355 * @pdev: core txrx pdev context 3356 */ 3357 void 3358 dp_rx_pdev_buffers_free(struct dp_pdev *pdev) 3359 { 3360 int mac_for_pdev = pdev->lmac_id; 3361 struct dp_soc *soc = pdev->soc; 3362 struct rx_desc_pool *rx_desc_pool; 3363 3364 rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev]; 3365 3366 dp_rx_desc_nbuf_free(soc, rx_desc_pool); 3367 dp_rx_buffer_pool_deinit(soc, mac_for_pdev); 3368 } 3369 3370 #ifdef DP_RX_SPECIAL_FRAME_NEED 3371 bool dp_rx_deliver_special_frame(struct dp_soc *soc, 3372 struct dp_txrx_peer *txrx_peer, 3373 qdf_nbuf_t nbuf, uint32_t frame_mask, 3374 uint8_t *rx_tlv_hdr) 3375 { 3376 uint32_t l2_hdr_offset = 0; 3377 uint16_t msdu_len = 0; 3378 uint32_t skip_len; 3379 3380 l2_hdr_offset = 3381 hal_rx_msdu_end_l3_hdr_padding_get(soc->hal_soc, rx_tlv_hdr); 3382 3383 if (qdf_unlikely(qdf_nbuf_is_frag(nbuf))) { 3384 skip_len = l2_hdr_offset; 3385 } else { 3386 msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf); 3387 skip_len = l2_hdr_offset + soc->rx_pkt_tlv_size; 3388 qdf_nbuf_set_pktlen(nbuf, msdu_len + skip_len); 3389 } 3390 3391 QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(nbuf) = 1; 3392 dp_rx_set_hdr_pad(nbuf, l2_hdr_offset); 3393 qdf_nbuf_pull_head(nbuf, skip_len); 3394 3395 if (txrx_peer->vdev) { 3396 dp_rx_send_pktlog(soc, txrx_peer->vdev->pdev, nbuf, 3397 QDF_TX_RX_STATUS_OK); 3398 } 3399 3400 if (dp_rx_is_special_frame(nbuf, frame_mask)) { 3401 dp_info("special frame, mpdu sn 0x%x", 3402 hal_rx_get_rx_sequence(soc->hal_soc, rx_tlv_hdr)); 3403 qdf_nbuf_set_exc_frame(nbuf, 1); 3404 dp_rx_deliver_to_stack(soc, txrx_peer->vdev, txrx_peer, 3405 nbuf, NULL); 3406 return true; 3407 } 3408 3409 return false; 3410 } 3411 #endif 3412 3413 #ifdef WLAN_FEATURE_MARK_FIRST_WAKEUP_PACKET 3414 void dp_rx_mark_first_packet_after_wow_wakeup(struct dp_pdev *pdev, 3415 uint8_t *rx_tlv, 3416 qdf_nbuf_t nbuf) 3417 { 3418 struct dp_soc *soc; 3419 3420 if (!pdev->is_first_wakeup_packet) 3421 return; 3422 3423 soc = pdev->soc; 3424 if (hal_get_first_wow_wakeup_packet(soc->hal_soc, rx_tlv)) { 3425 qdf_nbuf_mark_wakeup_frame(nbuf); 3426 dp_info("First packet after WOW Wakeup rcvd"); 3427 } 3428 } 3429 #endif 3430