1 /* 2 * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved. 3 * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved. 4 * 5 * Permission to use, copy, modify, and/or distribute this software for 6 * any purpose with or without fee is hereby granted, provided that the 7 * above copyright notice and this permission notice appear in all 8 * copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 11 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 12 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 13 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 14 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 15 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 16 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 17 * PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20 #include "hal_hw_headers.h" 21 #include "dp_types.h" 22 #include "dp_rx.h" 23 #include "dp_tx.h" 24 #include "dp_peer.h" 25 #include "hal_rx.h" 26 #include "hal_api.h" 27 #include "qdf_nbuf.h" 28 #ifdef MESH_MODE_SUPPORT 29 #include "if_meta_hdr.h" 30 #endif 31 #include "dp_internal.h" 32 #include "dp_ipa.h" 33 #include "dp_hist.h" 34 #include "dp_rx_buffer_pool.h" 35 #ifdef WIFI_MONITOR_SUPPORT 36 #include "dp_htt.h" 37 #include <dp_mon.h> 38 #endif 39 #ifdef FEATURE_WDS 40 #include "dp_txrx_wds.h" 41 #endif 42 #ifdef DP_RATETABLE_SUPPORT 43 #include "dp_ratetable.h" 44 #endif 45 46 #ifdef DUP_RX_DESC_WAR 47 void dp_rx_dump_info_and_assert(struct dp_soc *soc, 48 hal_ring_handle_t hal_ring, 49 hal_ring_desc_t ring_desc, 50 struct dp_rx_desc *rx_desc) 51 { 52 void *hal_soc = soc->hal_soc; 53 54 hal_srng_dump_ring_desc(hal_soc, hal_ring, ring_desc); 55 dp_rx_desc_dump(rx_desc); 56 } 57 #else 58 void dp_rx_dump_info_and_assert(struct dp_soc *soc, 59 hal_ring_handle_t hal_ring_hdl, 60 hal_ring_desc_t ring_desc, 61 struct dp_rx_desc *rx_desc) 62 { 63 hal_soc_handle_t hal_soc = soc->hal_soc; 64 65 dp_rx_desc_dump(rx_desc); 66 hal_srng_dump_ring_desc(hal_soc, hal_ring_hdl, ring_desc); 67 hal_srng_dump_ring(hal_soc, hal_ring_hdl); 68 qdf_assert_always(0); 69 } 70 #endif 71 72 #ifndef QCA_HOST_MODE_WIFI_DISABLED 73 #ifdef RX_DESC_SANITY_WAR 74 QDF_STATUS dp_rx_desc_sanity(struct dp_soc *soc, hal_soc_handle_t hal_soc, 75 hal_ring_handle_t hal_ring_hdl, 76 hal_ring_desc_t ring_desc, 77 struct dp_rx_desc *rx_desc) 78 { 79 uint8_t return_buffer_manager; 80 81 if (qdf_unlikely(!rx_desc)) { 82 /* 83 * This is an unlikely case where the cookie obtained 84 * from the ring_desc is invalid and hence we are not 85 * able to find the corresponding rx_desc 86 */ 87 goto fail; 88 } 89 90 return_buffer_manager = hal_rx_ret_buf_manager_get(hal_soc, ring_desc); 91 if (qdf_unlikely(!(return_buffer_manager == 92 HAL_RX_BUF_RBM_SW1_BM(soc->wbm_sw0_bm_id) || 93 return_buffer_manager == 94 HAL_RX_BUF_RBM_SW3_BM(soc->wbm_sw0_bm_id)))) { 95 goto fail; 96 } 97 98 return QDF_STATUS_SUCCESS; 99 100 fail: 101 DP_STATS_INC(soc, rx.err.invalid_cookie, 1); 102 dp_err("Ring Desc:"); 103 hal_srng_dump_ring_desc(hal_soc, hal_ring_hdl, 104 ring_desc); 105 return QDF_STATUS_E_NULL_VALUE; 106 107 } 108 #endif 109 #endif /* QCA_HOST_MODE_WIFI_DISABLED */ 110 111 /** 112 * dp_pdev_frag_alloc_and_map() - Allocate frag for desc buffer and map 113 * 114 * @dp_soc: struct dp_soc * 115 * @nbuf_frag_info_t: nbuf frag info 116 * @dp_pdev: struct dp_pdev * 117 * @rx_desc_pool: Rx desc pool 118 * 119 * Return: QDF_STATUS 120 */ 121 #ifdef DP_RX_MON_MEM_FRAG 122 static inline QDF_STATUS 123 dp_pdev_frag_alloc_and_map(struct dp_soc *dp_soc, 124 struct dp_rx_nbuf_frag_info *nbuf_frag_info_t, 125 struct dp_pdev *dp_pdev, 126 struct rx_desc_pool *rx_desc_pool) 127 { 128 QDF_STATUS ret = QDF_STATUS_E_FAILURE; 129 130 (nbuf_frag_info_t->virt_addr).vaddr = 131 qdf_frag_alloc(NULL, rx_desc_pool->buf_size); 132 133 if (!((nbuf_frag_info_t->virt_addr).vaddr)) { 134 dp_err("Frag alloc failed"); 135 DP_STATS_INC(dp_pdev, replenish.frag_alloc_fail, 1); 136 return QDF_STATUS_E_NOMEM; 137 } 138 139 ret = qdf_mem_map_page(dp_soc->osdev, 140 (nbuf_frag_info_t->virt_addr).vaddr, 141 QDF_DMA_FROM_DEVICE, 142 rx_desc_pool->buf_size, 143 &nbuf_frag_info_t->paddr); 144 145 if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) { 146 qdf_frag_free((nbuf_frag_info_t->virt_addr).vaddr); 147 dp_err("Frag map failed"); 148 DP_STATS_INC(dp_pdev, replenish.map_err, 1); 149 return QDF_STATUS_E_FAULT; 150 } 151 152 return QDF_STATUS_SUCCESS; 153 } 154 #else 155 static inline QDF_STATUS 156 dp_pdev_frag_alloc_and_map(struct dp_soc *dp_soc, 157 struct dp_rx_nbuf_frag_info *nbuf_frag_info_t, 158 struct dp_pdev *dp_pdev, 159 struct rx_desc_pool *rx_desc_pool) 160 { 161 return QDF_STATUS_SUCCESS; 162 } 163 #endif /* DP_RX_MON_MEM_FRAG */ 164 165 #ifdef WLAN_FEATURE_DP_RX_RING_HISTORY 166 /** 167 * dp_rx_refill_ring_record_entry() - Record an entry into refill_ring history 168 * @soc: Datapath soc structure 169 * @ring_num: Refill ring number 170 * @hal_ring_hdl: 171 * @num_req: number of buffers requested for refill 172 * @num_refill: number of buffers refilled 173 * 174 * Return: None 175 */ 176 static inline void 177 dp_rx_refill_ring_record_entry(struct dp_soc *soc, uint8_t ring_num, 178 hal_ring_handle_t hal_ring_hdl, 179 uint32_t num_req, uint32_t num_refill) 180 { 181 struct dp_refill_info_record *record; 182 uint32_t idx; 183 uint32_t tp; 184 uint32_t hp; 185 186 if (qdf_unlikely(ring_num >= MAX_PDEV_CNT || 187 !soc->rx_refill_ring_history[ring_num])) 188 return; 189 190 idx = dp_history_get_next_index(&soc->rx_refill_ring_history[ring_num]->index, 191 DP_RX_REFILL_HIST_MAX); 192 193 /* No NULL check needed for record since its an array */ 194 record = &soc->rx_refill_ring_history[ring_num]->entry[idx]; 195 196 hal_get_sw_hptp(soc->hal_soc, hal_ring_hdl, &tp, &hp); 197 record->timestamp = qdf_get_log_timestamp(); 198 record->num_req = num_req; 199 record->num_refill = num_refill; 200 record->hp = hp; 201 record->tp = tp; 202 } 203 #else 204 static inline void 205 dp_rx_refill_ring_record_entry(struct dp_soc *soc, uint8_t ring_num, 206 hal_ring_handle_t hal_ring_hdl, 207 uint32_t num_req, uint32_t num_refill) 208 { 209 } 210 #endif 211 212 /** 213 * dp_pdev_nbuf_alloc_and_map_replenish() - Allocate nbuf for desc buffer and 214 * map 215 * @dp_soc: struct dp_soc * 216 * @mac_id: Mac id 217 * @num_entries_avail: num_entries_avail 218 * @nbuf_frag_info_t: nbuf frag info 219 * @dp_pdev: struct dp_pdev * 220 * @rx_desc_pool: Rx desc pool 221 * 222 * Return: QDF_STATUS 223 */ 224 static inline QDF_STATUS 225 dp_pdev_nbuf_alloc_and_map_replenish(struct dp_soc *dp_soc, 226 uint32_t mac_id, 227 uint32_t num_entries_avail, 228 struct dp_rx_nbuf_frag_info *nbuf_frag_info_t, 229 struct dp_pdev *dp_pdev, 230 struct rx_desc_pool *rx_desc_pool) 231 { 232 QDF_STATUS ret = QDF_STATUS_E_FAILURE; 233 234 (nbuf_frag_info_t->virt_addr).nbuf = 235 dp_rx_buffer_pool_nbuf_alloc(dp_soc, 236 mac_id, 237 rx_desc_pool, 238 num_entries_avail); 239 if (!((nbuf_frag_info_t->virt_addr).nbuf)) { 240 dp_err("nbuf alloc failed"); 241 DP_STATS_INC(dp_pdev, replenish.nbuf_alloc_fail, 1); 242 return QDF_STATUS_E_NOMEM; 243 } 244 245 ret = dp_rx_buffer_pool_nbuf_map(dp_soc, rx_desc_pool, 246 nbuf_frag_info_t); 247 if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) { 248 dp_rx_buffer_pool_nbuf_free(dp_soc, 249 (nbuf_frag_info_t->virt_addr).nbuf, mac_id); 250 dp_err("nbuf map failed"); 251 DP_STATS_INC(dp_pdev, replenish.map_err, 1); 252 return QDF_STATUS_E_FAULT; 253 } 254 255 nbuf_frag_info_t->paddr = 256 qdf_nbuf_get_frag_paddr((nbuf_frag_info_t->virt_addr).nbuf, 0); 257 dp_ipa_handle_rx_buf_smmu_mapping(dp_soc, (qdf_nbuf_t)( 258 (nbuf_frag_info_t->virt_addr).nbuf), 259 rx_desc_pool->buf_size, 260 true, __func__, __LINE__); 261 262 ret = dp_check_paddr(dp_soc, &((nbuf_frag_info_t->virt_addr).nbuf), 263 &nbuf_frag_info_t->paddr, 264 rx_desc_pool); 265 if (ret == QDF_STATUS_E_FAILURE) { 266 DP_STATS_INC(dp_pdev, replenish.x86_fail, 1); 267 return QDF_STATUS_E_ADDRNOTAVAIL; 268 } 269 270 return QDF_STATUS_SUCCESS; 271 } 272 273 #if defined(QCA_DP_RX_NBUF_NO_MAP_UNMAP) && !defined(BUILD_X86) 274 QDF_STATUS 275 __dp_rx_buffers_no_map_lt_replenish(struct dp_soc *soc, uint32_t mac_id, 276 struct dp_srng *dp_rxdma_srng, 277 struct rx_desc_pool *rx_desc_pool) 278 { 279 struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id); 280 uint32_t count; 281 void *rxdma_ring_entry; 282 union dp_rx_desc_list_elem_t *next = NULL; 283 void *rxdma_srng; 284 qdf_nbuf_t nbuf; 285 qdf_dma_addr_t paddr; 286 uint16_t num_entries_avail = 0; 287 uint16_t num_alloc_desc = 0; 288 union dp_rx_desc_list_elem_t *desc_list = NULL; 289 union dp_rx_desc_list_elem_t *tail = NULL; 290 int sync_hw_ptr = 0; 291 292 rxdma_srng = dp_rxdma_srng->hal_srng; 293 294 if (qdf_unlikely(!dp_pdev)) { 295 dp_rx_err("%pK: pdev is null for mac_id = %d", soc, mac_id); 296 return QDF_STATUS_E_FAILURE; 297 } 298 299 if (qdf_unlikely(!rxdma_srng)) { 300 dp_rx_debug("%pK: rxdma srng not initialized", soc); 301 return QDF_STATUS_E_FAILURE; 302 } 303 304 hal_srng_access_start(soc->hal_soc, rxdma_srng); 305 306 num_entries_avail = hal_srng_src_num_avail(soc->hal_soc, 307 rxdma_srng, 308 sync_hw_ptr); 309 310 dp_rx_debug("%pK: no of available entries in rxdma ring: %d", 311 soc, num_entries_avail); 312 313 if (qdf_unlikely(num_entries_avail < 314 ((dp_rxdma_srng->num_entries * 3) / 4))) { 315 hal_srng_access_end(soc->hal_soc, rxdma_srng); 316 return QDF_STATUS_E_FAILURE; 317 } 318 319 DP_STATS_INC(dp_pdev, replenish.low_thresh_intrs, 1); 320 num_alloc_desc = dp_rx_get_free_desc_list(soc, mac_id, 321 rx_desc_pool, 322 num_entries_avail, 323 &desc_list, 324 &tail); 325 326 if (!num_alloc_desc) { 327 dp_rx_err("%pK: no free rx_descs in freelist", soc); 328 DP_STATS_INC(dp_pdev, err.desc_lt_alloc_fail, 329 num_entries_avail); 330 hal_srng_access_end(soc->hal_soc, rxdma_srng); 331 return QDF_STATUS_E_NOMEM; 332 } 333 334 for (count = 0; count < num_alloc_desc; count++) { 335 next = desc_list->next; 336 qdf_prefetch(next); 337 nbuf = dp_rx_nbuf_alloc(soc, rx_desc_pool); 338 if (qdf_unlikely(!nbuf)) { 339 DP_STATS_INC(dp_pdev, replenish.nbuf_alloc_fail, 1); 340 break; 341 } 342 343 paddr = dp_rx_nbuf_sync_no_dsb(soc, nbuf, 344 rx_desc_pool->buf_size); 345 346 rxdma_ring_entry = hal_srng_src_get_next(soc->hal_soc, 347 rxdma_srng); 348 qdf_assert_always(rxdma_ring_entry); 349 350 desc_list->rx_desc.nbuf = nbuf; 351 desc_list->rx_desc.rx_buf_start = nbuf->data; 352 desc_list->rx_desc.unmapped = 0; 353 354 /* rx_desc.in_use should be zero at this time*/ 355 qdf_assert_always(desc_list->rx_desc.in_use == 0); 356 357 desc_list->rx_desc.in_use = 1; 358 desc_list->rx_desc.in_err_state = 0; 359 360 hal_rxdma_buff_addr_info_set(soc->hal_soc, rxdma_ring_entry, 361 paddr, 362 desc_list->rx_desc.cookie, 363 rx_desc_pool->owner); 364 365 desc_list = next; 366 } 367 qdf_dsb(); 368 hal_srng_access_end(soc->hal_soc, rxdma_srng); 369 370 /* No need to count the number of bytes received during replenish. 371 * Therefore set replenish.pkts.bytes as 0. 372 */ 373 DP_STATS_INC_PKT(dp_pdev, replenish.pkts, count, 0); 374 DP_STATS_INC(dp_pdev, buf_freelist, (num_alloc_desc - count)); 375 /* 376 * add any available free desc back to the free list 377 */ 378 if (desc_list) 379 dp_rx_add_desc_list_to_free_list(soc, &desc_list, &tail, 380 mac_id, rx_desc_pool); 381 382 return QDF_STATUS_SUCCESS; 383 } 384 385 QDF_STATUS 386 __dp_rx_buffers_no_map_replenish(struct dp_soc *soc, uint32_t mac_id, 387 struct dp_srng *dp_rxdma_srng, 388 struct rx_desc_pool *rx_desc_pool, 389 uint32_t num_req_buffers, 390 union dp_rx_desc_list_elem_t **desc_list, 391 union dp_rx_desc_list_elem_t **tail) 392 { 393 struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id); 394 uint32_t count; 395 void *rxdma_ring_entry; 396 union dp_rx_desc_list_elem_t *next; 397 void *rxdma_srng; 398 qdf_nbuf_t nbuf; 399 qdf_nbuf_t nbuf_next; 400 qdf_nbuf_t nbuf_head = NULL; 401 qdf_nbuf_t nbuf_tail = NULL; 402 qdf_dma_addr_t paddr; 403 404 rxdma_srng = dp_rxdma_srng->hal_srng; 405 406 if (qdf_unlikely(!dp_pdev)) { 407 dp_rx_err("%pK: pdev is null for mac_id = %d", 408 soc, mac_id); 409 return QDF_STATUS_E_FAILURE; 410 } 411 412 if (qdf_unlikely(!rxdma_srng)) { 413 dp_rx_debug("%pK: rxdma srng not initialized", soc); 414 DP_STATS_INC(dp_pdev, replenish.rxdma_err, num_req_buffers); 415 return QDF_STATUS_E_FAILURE; 416 } 417 418 /* Allocate required number of nbufs */ 419 for (count = 0; count < num_req_buffers; count++) { 420 nbuf = dp_rx_nbuf_alloc(soc, rx_desc_pool); 421 if (qdf_unlikely(!nbuf)) { 422 DP_STATS_INC(dp_pdev, replenish.nbuf_alloc_fail, 1); 423 /* Update num_req_buffers to nbufs allocated count */ 424 num_req_buffers = count; 425 break; 426 } 427 428 paddr = dp_rx_nbuf_sync_no_dsb(soc, nbuf, 429 rx_desc_pool->buf_size); 430 431 QDF_NBUF_CB_PADDR(nbuf) = paddr; 432 DP_RX_LIST_APPEND(nbuf_head, 433 nbuf_tail, 434 nbuf); 435 } 436 qdf_dsb(); 437 438 nbuf = nbuf_head; 439 hal_srng_access_start(soc->hal_soc, rxdma_srng); 440 441 for (count = 0; count < num_req_buffers; count++) { 442 next = (*desc_list)->next; 443 nbuf_next = nbuf->next; 444 qdf_prefetch(next); 445 446 rxdma_ring_entry = (struct dp_buffer_addr_info *) 447 hal_srng_src_get_next(soc->hal_soc, rxdma_srng); 448 449 if (!rxdma_ring_entry) 450 break; 451 452 (*desc_list)->rx_desc.nbuf = nbuf; 453 (*desc_list)->rx_desc.rx_buf_start = nbuf->data; 454 (*desc_list)->rx_desc.unmapped = 0; 455 456 /* rx_desc.in_use should be zero at this time*/ 457 qdf_assert_always((*desc_list)->rx_desc.in_use == 0); 458 459 (*desc_list)->rx_desc.in_use = 1; 460 (*desc_list)->rx_desc.in_err_state = 0; 461 462 hal_rxdma_buff_addr_info_set(soc->hal_soc, rxdma_ring_entry, 463 QDF_NBUF_CB_PADDR(nbuf), 464 (*desc_list)->rx_desc.cookie, 465 rx_desc_pool->owner); 466 467 *desc_list = next; 468 nbuf = nbuf_next; 469 } 470 hal_srng_access_end(soc->hal_soc, rxdma_srng); 471 472 /* No need to count the number of bytes received during replenish. 473 * Therefore set replenish.pkts.bytes as 0. 474 */ 475 DP_STATS_INC_PKT(dp_pdev, replenish.pkts, count, 0); 476 DP_STATS_INC(dp_pdev, buf_freelist, (num_req_buffers - count)); 477 /* 478 * add any available free desc back to the free list 479 */ 480 if (*desc_list) 481 dp_rx_add_desc_list_to_free_list(soc, desc_list, tail, 482 mac_id, rx_desc_pool); 483 while (nbuf) { 484 nbuf_next = nbuf->next; 485 dp_rx_nbuf_unmap_pool(soc, rx_desc_pool, nbuf); 486 qdf_nbuf_free(nbuf); 487 nbuf = nbuf_next; 488 } 489 490 return QDF_STATUS_SUCCESS; 491 } 492 493 QDF_STATUS __dp_pdev_rx_buffers_no_map_attach(struct dp_soc *soc, 494 uint32_t mac_id, 495 struct dp_srng *dp_rxdma_srng, 496 struct rx_desc_pool *rx_desc_pool, 497 uint32_t num_req_buffers) 498 { 499 struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id); 500 uint32_t count; 501 uint32_t nr_descs = 0; 502 void *rxdma_ring_entry; 503 union dp_rx_desc_list_elem_t *next; 504 void *rxdma_srng; 505 qdf_nbuf_t nbuf; 506 qdf_dma_addr_t paddr; 507 union dp_rx_desc_list_elem_t *desc_list = NULL; 508 union dp_rx_desc_list_elem_t *tail = NULL; 509 510 rxdma_srng = dp_rxdma_srng->hal_srng; 511 512 if (qdf_unlikely(!dp_pdev)) { 513 dp_rx_err("%pK: pdev is null for mac_id = %d", 514 soc, mac_id); 515 return QDF_STATUS_E_FAILURE; 516 } 517 518 if (qdf_unlikely(!rxdma_srng)) { 519 dp_rx_debug("%pK: rxdma srng not initialized", soc); 520 DP_STATS_INC(dp_pdev, replenish.rxdma_err, num_req_buffers); 521 return QDF_STATUS_E_FAILURE; 522 } 523 524 dp_rx_debug("%pK: requested %d buffers for replenish", 525 soc, num_req_buffers); 526 527 nr_descs = dp_rx_get_free_desc_list(soc, mac_id, rx_desc_pool, 528 num_req_buffers, &desc_list, &tail); 529 if (!nr_descs) { 530 dp_err("no free rx_descs in freelist"); 531 DP_STATS_INC(dp_pdev, err.desc_alloc_fail, num_req_buffers); 532 return QDF_STATUS_E_NOMEM; 533 } 534 535 dp_debug("got %u RX descs for driver attach", nr_descs); 536 537 hal_srng_access_start(soc->hal_soc, rxdma_srng); 538 539 for (count = 0; count < nr_descs; count++) { 540 next = desc_list->next; 541 qdf_prefetch(next); 542 nbuf = dp_rx_nbuf_alloc(soc, rx_desc_pool); 543 if (qdf_unlikely(!nbuf)) { 544 DP_STATS_INC(dp_pdev, replenish.nbuf_alloc_fail, 1); 545 break; 546 } 547 548 paddr = dp_rx_nbuf_sync_no_dsb(soc, nbuf, 549 rx_desc_pool->buf_size); 550 rxdma_ring_entry = (struct dp_buffer_addr_info *) 551 hal_srng_src_get_next(soc->hal_soc, rxdma_srng); 552 if (!rxdma_ring_entry) 553 break; 554 555 qdf_assert_always(rxdma_ring_entry); 556 557 desc_list->rx_desc.nbuf = nbuf; 558 desc_list->rx_desc.rx_buf_start = nbuf->data; 559 desc_list->rx_desc.unmapped = 0; 560 561 /* rx_desc.in_use should be zero at this time*/ 562 qdf_assert_always(desc_list->rx_desc.in_use == 0); 563 564 desc_list->rx_desc.in_use = 1; 565 desc_list->rx_desc.in_err_state = 0; 566 567 hal_rxdma_buff_addr_info_set(soc->hal_soc, rxdma_ring_entry, 568 paddr, 569 desc_list->rx_desc.cookie, 570 rx_desc_pool->owner); 571 572 desc_list = next; 573 } 574 qdf_dsb(); 575 hal_srng_access_end(soc->hal_soc, rxdma_srng); 576 577 /* No need to count the number of bytes received during replenish. 578 * Therefore set replenish.pkts.bytes as 0. 579 */ 580 DP_STATS_INC_PKT(dp_pdev, replenish.pkts, count, 0); 581 582 return QDF_STATUS_SUCCESS; 583 } 584 #endif 585 586 #ifdef DP_UMAC_HW_RESET_SUPPORT 587 #if defined(QCA_DP_RX_NBUF_NO_MAP_UNMAP) && !defined(BUILD_X86) 588 static inline 589 qdf_dma_addr_t dp_rx_rep_retrieve_paddr(struct dp_soc *dp_soc, qdf_nbuf_t nbuf, 590 uint32_t buf_size) 591 { 592 return dp_rx_nbuf_sync_no_dsb(dp_soc, nbuf, buf_size); 593 } 594 #else 595 static inline 596 qdf_dma_addr_t dp_rx_rep_retrieve_paddr(struct dp_soc *dp_soc, qdf_nbuf_t nbuf, 597 uint32_t buf_size) 598 { 599 return qdf_nbuf_get_frag_paddr(nbuf, 0); 600 } 601 #endif 602 603 /** 604 * dp_rx_desc_replenish() - Replenish the rx descriptors one at a time 605 * @soc: core txrx main context 606 * @dp_rxdma_srng: rxdma ring 607 * @rx_desc_pool: rx descriptor pool 608 * @rx_desc:rx descriptor 609 * 610 * Return: void 611 */ 612 static inline 613 void dp_rx_desc_replenish(struct dp_soc *soc, struct dp_srng *dp_rxdma_srng, 614 struct rx_desc_pool *rx_desc_pool, 615 struct dp_rx_desc *rx_desc) 616 { 617 void *rxdma_srng; 618 void *rxdma_ring_entry; 619 qdf_dma_addr_t paddr; 620 621 rxdma_srng = dp_rxdma_srng->hal_srng; 622 623 /* No one else should be accessing the srng at this point */ 624 hal_srng_access_start_unlocked(soc->hal_soc, rxdma_srng); 625 626 rxdma_ring_entry = hal_srng_src_get_next(soc->hal_soc, rxdma_srng); 627 628 qdf_assert_always(rxdma_ring_entry); 629 rx_desc->in_err_state = 0; 630 631 paddr = dp_rx_rep_retrieve_paddr(soc, rx_desc->nbuf, 632 rx_desc_pool->buf_size); 633 hal_rxdma_buff_addr_info_set(soc->hal_soc, rxdma_ring_entry, paddr, 634 rx_desc->cookie, rx_desc_pool->owner); 635 636 hal_srng_access_end_unlocked(soc->hal_soc, rxdma_srng); 637 } 638 639 void dp_rx_desc_reuse(struct dp_soc *soc, qdf_nbuf_t *nbuf_list) 640 { 641 int mac_id, i, j; 642 union dp_rx_desc_list_elem_t *head = NULL; 643 union dp_rx_desc_list_elem_t *tail = NULL; 644 645 for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) { 646 struct dp_srng *dp_rxdma_srng = 647 &soc->rx_refill_buf_ring[mac_id]; 648 struct rx_desc_pool *rx_desc_pool = &soc->rx_desc_buf[mac_id]; 649 uint32_t rx_sw_desc_num = rx_desc_pool->pool_size; 650 /* Only fill up 1/3 of the ring size */ 651 uint32_t num_req_decs; 652 653 if (!dp_rxdma_srng || !dp_rxdma_srng->hal_srng || 654 !rx_desc_pool->array) 655 continue; 656 657 num_req_decs = dp_rxdma_srng->num_entries / 3; 658 659 for (i = 0, j = 0; i < rx_sw_desc_num; i++) { 660 struct dp_rx_desc *rx_desc = 661 (struct dp_rx_desc *)&rx_desc_pool->array[i]; 662 663 if (rx_desc->in_use) { 664 if (j < (dp_rxdma_srng->num_entries - 1)) { 665 dp_rx_desc_replenish(soc, dp_rxdma_srng, 666 rx_desc_pool, 667 rx_desc); 668 } else { 669 dp_rx_nbuf_unmap(soc, rx_desc, 0); 670 rx_desc->unmapped = 0; 671 672 rx_desc->nbuf->next = *nbuf_list; 673 *nbuf_list = rx_desc->nbuf; 674 675 dp_rx_add_to_free_desc_list(&head, 676 &tail, 677 rx_desc); 678 } 679 j++; 680 } 681 } 682 683 if (head) 684 dp_rx_add_desc_list_to_free_list(soc, &head, &tail, 685 mac_id, rx_desc_pool); 686 687 /* If num of descs in use were less, then we need to replenish 688 * the ring with some buffers 689 */ 690 head = NULL; 691 tail = NULL; 692 693 if (j < (num_req_decs - 1)) 694 dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng, 695 rx_desc_pool, 696 ((num_req_decs - 1) - j), 697 &head, &tail, true); 698 } 699 } 700 #endif 701 702 QDF_STATUS __dp_rx_buffers_replenish(struct dp_soc *dp_soc, uint32_t mac_id, 703 struct dp_srng *dp_rxdma_srng, 704 struct rx_desc_pool *rx_desc_pool, 705 uint32_t num_req_buffers, 706 union dp_rx_desc_list_elem_t **desc_list, 707 union dp_rx_desc_list_elem_t **tail, 708 bool req_only, const char *func_name) 709 { 710 uint32_t num_alloc_desc; 711 uint16_t num_desc_to_free = 0; 712 struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(dp_soc, mac_id); 713 uint32_t num_entries_avail; 714 uint32_t count; 715 uint32_t extra_buffers; 716 int sync_hw_ptr = 1; 717 struct dp_rx_nbuf_frag_info nbuf_frag_info = {0}; 718 void *rxdma_ring_entry; 719 union dp_rx_desc_list_elem_t *next; 720 QDF_STATUS ret; 721 void *rxdma_srng; 722 union dp_rx_desc_list_elem_t *desc_list_append = NULL; 723 union dp_rx_desc_list_elem_t *tail_append = NULL; 724 union dp_rx_desc_list_elem_t *temp_list = NULL; 725 726 rxdma_srng = dp_rxdma_srng->hal_srng; 727 728 if (qdf_unlikely(!dp_pdev)) { 729 dp_rx_err("%pK: pdev is null for mac_id = %d", 730 dp_soc, mac_id); 731 return QDF_STATUS_E_FAILURE; 732 } 733 734 if (qdf_unlikely(!rxdma_srng)) { 735 dp_rx_debug("%pK: rxdma srng not initialized", dp_soc); 736 DP_STATS_INC(dp_pdev, replenish.rxdma_err, num_req_buffers); 737 return QDF_STATUS_E_FAILURE; 738 } 739 740 dp_verbose_debug("%pK: requested %d buffers for replenish", 741 dp_soc, num_req_buffers); 742 743 hal_srng_access_start(dp_soc->hal_soc, rxdma_srng); 744 745 num_entries_avail = hal_srng_src_num_avail(dp_soc->hal_soc, 746 rxdma_srng, 747 sync_hw_ptr); 748 749 dp_verbose_debug("%pK: no of available entries in rxdma ring: %d", 750 dp_soc, num_entries_avail); 751 752 if (!req_only && !(*desc_list) && (num_entries_avail > 753 ((dp_rxdma_srng->num_entries * 3) / 4))) { 754 num_req_buffers = num_entries_avail; 755 DP_STATS_INC(dp_pdev, replenish.low_thresh_intrs, 1); 756 } else if (num_entries_avail < num_req_buffers) { 757 num_desc_to_free = num_req_buffers - num_entries_avail; 758 num_req_buffers = num_entries_avail; 759 } else if ((*desc_list) && 760 dp_rxdma_srng->num_entries - num_entries_avail < 761 CRITICAL_BUFFER_THRESHOLD) { 762 /* set extra buffers to CRITICAL_BUFFER_THRESHOLD only if 763 * total buff requested after adding extra buffers is less 764 * than or equal to num entries available, else set it to max 765 * possible additional buffers available at that moment 766 */ 767 extra_buffers = 768 ((num_req_buffers + CRITICAL_BUFFER_THRESHOLD) > num_entries_avail) ? 769 (num_entries_avail - num_req_buffers) : 770 CRITICAL_BUFFER_THRESHOLD; 771 /* Append some free descriptors to tail */ 772 num_alloc_desc = 773 dp_rx_get_free_desc_list(dp_soc, mac_id, 774 rx_desc_pool, 775 extra_buffers, 776 &desc_list_append, 777 &tail_append); 778 779 if (num_alloc_desc) { 780 temp_list = *desc_list; 781 *desc_list = desc_list_append; 782 tail_append->next = temp_list; 783 num_req_buffers += num_alloc_desc; 784 785 DP_STATS_DEC(dp_pdev, 786 replenish.free_list, 787 num_alloc_desc); 788 } else 789 dp_err_rl("%pK: no free rx_descs in freelist", dp_soc); 790 } 791 792 if (qdf_unlikely(!num_req_buffers)) { 793 num_desc_to_free = num_req_buffers; 794 hal_srng_access_end(dp_soc->hal_soc, rxdma_srng); 795 goto free_descs; 796 } 797 798 /* 799 * if desc_list is NULL, allocate the descs from freelist 800 */ 801 if (!(*desc_list)) { 802 num_alloc_desc = dp_rx_get_free_desc_list(dp_soc, mac_id, 803 rx_desc_pool, 804 num_req_buffers, 805 desc_list, 806 tail); 807 808 if (!num_alloc_desc) { 809 dp_rx_err("%pK: no free rx_descs in freelist", dp_soc); 810 DP_STATS_INC(dp_pdev, err.desc_alloc_fail, 811 num_req_buffers); 812 hal_srng_access_end(dp_soc->hal_soc, rxdma_srng); 813 return QDF_STATUS_E_NOMEM; 814 } 815 816 dp_verbose_debug("%pK: %d rx desc allocated", dp_soc, 817 num_alloc_desc); 818 num_req_buffers = num_alloc_desc; 819 } 820 821 822 count = 0; 823 824 while (count < num_req_buffers) { 825 /* Flag is set while pdev rx_desc_pool initialization */ 826 if (qdf_unlikely(rx_desc_pool->rx_mon_dest_frag_enable)) 827 ret = dp_pdev_frag_alloc_and_map(dp_soc, 828 &nbuf_frag_info, 829 dp_pdev, 830 rx_desc_pool); 831 else 832 ret = dp_pdev_nbuf_alloc_and_map_replenish(dp_soc, 833 mac_id, 834 num_entries_avail, &nbuf_frag_info, 835 dp_pdev, rx_desc_pool); 836 837 if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) { 838 if (qdf_unlikely(ret == QDF_STATUS_E_FAULT)) 839 continue; 840 break; 841 } 842 843 count++; 844 845 rxdma_ring_entry = hal_srng_src_get_next(dp_soc->hal_soc, 846 rxdma_srng); 847 qdf_assert_always(rxdma_ring_entry); 848 849 next = (*desc_list)->next; 850 851 /* Flag is set while pdev rx_desc_pool initialization */ 852 if (qdf_unlikely(rx_desc_pool->rx_mon_dest_frag_enable)) 853 dp_rx_desc_frag_prep(&((*desc_list)->rx_desc), 854 &nbuf_frag_info); 855 else 856 dp_rx_desc_prep(&((*desc_list)->rx_desc), 857 &nbuf_frag_info); 858 859 /* rx_desc.in_use should be zero at this time*/ 860 qdf_assert_always((*desc_list)->rx_desc.in_use == 0); 861 862 (*desc_list)->rx_desc.in_use = 1; 863 (*desc_list)->rx_desc.in_err_state = 0; 864 dp_rx_desc_update_dbg_info(&(*desc_list)->rx_desc, 865 func_name, RX_DESC_REPLENISHED); 866 dp_verbose_debug("rx_netbuf=%pK, paddr=0x%llx, cookie=%d", 867 nbuf_frag_info.virt_addr.nbuf, 868 (unsigned long long)(nbuf_frag_info.paddr), 869 (*desc_list)->rx_desc.cookie); 870 871 hal_rxdma_buff_addr_info_set(dp_soc->hal_soc, rxdma_ring_entry, 872 nbuf_frag_info.paddr, 873 (*desc_list)->rx_desc.cookie, 874 rx_desc_pool->owner); 875 876 *desc_list = next; 877 878 } 879 880 dp_rx_refill_ring_record_entry(dp_soc, dp_pdev->lmac_id, rxdma_srng, 881 num_req_buffers, count); 882 883 hal_srng_access_end(dp_soc->hal_soc, rxdma_srng); 884 885 dp_rx_schedule_refill_thread(dp_soc); 886 887 dp_verbose_debug("replenished buffers %d, rx desc added back to free list %u", 888 count, num_desc_to_free); 889 890 /* No need to count the number of bytes received during replenish. 891 * Therefore set replenish.pkts.bytes as 0. 892 */ 893 DP_STATS_INC_PKT(dp_pdev, replenish.pkts, count, 0); 894 DP_STATS_INC(dp_pdev, replenish.free_list, num_req_buffers - count); 895 896 free_descs: 897 DP_STATS_INC(dp_pdev, buf_freelist, num_desc_to_free); 898 /* 899 * add any available free desc back to the free list 900 */ 901 if (*desc_list) 902 dp_rx_add_desc_list_to_free_list(dp_soc, desc_list, tail, 903 mac_id, rx_desc_pool); 904 905 return QDF_STATUS_SUCCESS; 906 } 907 908 qdf_export_symbol(__dp_rx_buffers_replenish); 909 910 void 911 dp_rx_deliver_raw(struct dp_vdev *vdev, qdf_nbuf_t nbuf_list, 912 struct dp_txrx_peer *txrx_peer) 913 { 914 qdf_nbuf_t deliver_list_head = NULL; 915 qdf_nbuf_t deliver_list_tail = NULL; 916 qdf_nbuf_t nbuf; 917 918 nbuf = nbuf_list; 919 while (nbuf) { 920 qdf_nbuf_t next = qdf_nbuf_next(nbuf); 921 922 DP_RX_LIST_APPEND(deliver_list_head, deliver_list_tail, nbuf); 923 924 DP_STATS_INC(vdev->pdev, rx_raw_pkts, 1); 925 DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, rx.raw, 1, 926 qdf_nbuf_len(nbuf)); 927 /* 928 * reset the chfrag_start and chfrag_end bits in nbuf cb 929 * as this is a non-amsdu pkt and RAW mode simulation expects 930 * these bit s to be 0 for non-amsdu pkt. 931 */ 932 if (qdf_nbuf_is_rx_chfrag_start(nbuf) && 933 qdf_nbuf_is_rx_chfrag_end(nbuf)) { 934 qdf_nbuf_set_rx_chfrag_start(nbuf, 0); 935 qdf_nbuf_set_rx_chfrag_end(nbuf, 0); 936 } 937 938 nbuf = next; 939 } 940 941 vdev->osif_rsim_rx_decap(vdev->osif_vdev, &deliver_list_head, 942 &deliver_list_tail); 943 944 vdev->osif_rx(vdev->osif_vdev, deliver_list_head); 945 } 946 947 #ifndef QCA_HOST_MODE_WIFI_DISABLED 948 #ifndef FEATURE_WDS 949 void dp_rx_da_learn(struct dp_soc *soc, uint8_t *rx_tlv_hdr, 950 struct dp_txrx_peer *ta_peer, qdf_nbuf_t nbuf) 951 { 952 } 953 #endif 954 955 #ifdef QCA_SUPPORT_TX_MIN_RATES_FOR_SPECIAL_FRAMES 956 /** 957 * dp_classify_critical_pkts() - API for marking critical packets 958 * @soc: dp_soc context 959 * @vdev: vdev on which packet is to be sent 960 * @nbuf: nbuf that has to be classified 961 * 962 * The function parses the packet, identifies whether its a critical frame and 963 * marks QDF_NBUF_CB_TX_EXTRA_IS_CRITICAL bit in qdf_nbuf_cb for the nbuf. 964 * Code for marking which frames are CRITICAL is accessed via callback. 965 * EAPOL, ARP, DHCP, DHCPv6, ICMPv6 NS/NA are the typical critical frames. 966 * 967 * Return: None 968 */ 969 static 970 void dp_classify_critical_pkts(struct dp_soc *soc, struct dp_vdev *vdev, 971 qdf_nbuf_t nbuf) 972 { 973 if (vdev->tx_classify_critical_pkt_cb) 974 vdev->tx_classify_critical_pkt_cb(vdev->osif_vdev, nbuf); 975 } 976 #else 977 static inline 978 void dp_classify_critical_pkts(struct dp_soc *soc, struct dp_vdev *vdev, 979 qdf_nbuf_t nbuf) 980 { 981 } 982 #endif 983 984 #ifdef QCA_OL_TX_MULTIQ_SUPPORT 985 static inline 986 void dp_rx_nbuf_queue_mapping_set(qdf_nbuf_t nbuf, uint8_t ring_id) 987 { 988 qdf_nbuf_set_queue_mapping(nbuf, ring_id); 989 } 990 #else 991 static inline 992 void dp_rx_nbuf_queue_mapping_set(qdf_nbuf_t nbuf, uint8_t ring_id) 993 { 994 } 995 #endif 996 997 bool dp_rx_intrabss_mcbc_fwd(struct dp_soc *soc, struct dp_txrx_peer *ta_peer, 998 uint8_t *rx_tlv_hdr, qdf_nbuf_t nbuf, 999 struct cdp_tid_rx_stats *tid_stats) 1000 { 1001 uint16_t len; 1002 qdf_nbuf_t nbuf_copy; 1003 1004 if (dp_rx_intrabss_eapol_drop_check(soc, ta_peer, rx_tlv_hdr, 1005 nbuf)) 1006 return true; 1007 1008 if (!dp_rx_check_ndi_mdns_fwding(ta_peer, nbuf)) 1009 return false; 1010 1011 /* If the source peer in the isolation list 1012 * then dont forward instead push to bridge stack 1013 */ 1014 if (dp_get_peer_isolation(ta_peer)) 1015 return false; 1016 1017 nbuf_copy = qdf_nbuf_copy(nbuf); 1018 if (!nbuf_copy) 1019 return false; 1020 1021 len = QDF_NBUF_CB_RX_PKT_LEN(nbuf); 1022 1023 qdf_mem_set(nbuf_copy->cb, 0x0, sizeof(nbuf_copy->cb)); 1024 dp_classify_critical_pkts(soc, ta_peer->vdev, nbuf_copy); 1025 1026 if (soc->arch_ops.dp_rx_intrabss_mcast_handler(soc, ta_peer, 1027 nbuf_copy, 1028 tid_stats)) 1029 return false; 1030 1031 /* Don't send packets if tx is paused */ 1032 if (!soc->is_tx_pause && 1033 !dp_tx_send((struct cdp_soc_t *)soc, 1034 ta_peer->vdev->vdev_id, nbuf_copy)) { 1035 DP_PEER_PER_PKT_STATS_INC_PKT(ta_peer, rx.intra_bss.pkts, 1, 1036 len); 1037 tid_stats->intrabss_cnt++; 1038 } else { 1039 DP_PEER_PER_PKT_STATS_INC_PKT(ta_peer, rx.intra_bss.fail, 1, 1040 len); 1041 tid_stats->fail_cnt[INTRABSS_DROP]++; 1042 dp_rx_nbuf_free(nbuf_copy); 1043 } 1044 return false; 1045 } 1046 1047 bool dp_rx_intrabss_ucast_fwd(struct dp_soc *soc, struct dp_txrx_peer *ta_peer, 1048 uint8_t tx_vdev_id, 1049 uint8_t *rx_tlv_hdr, qdf_nbuf_t nbuf, 1050 struct cdp_tid_rx_stats *tid_stats) 1051 { 1052 uint16_t len; 1053 1054 len = QDF_NBUF_CB_RX_PKT_LEN(nbuf); 1055 1056 /* linearize the nbuf just before we send to 1057 * dp_tx_send() 1058 */ 1059 if (qdf_unlikely(qdf_nbuf_is_frag(nbuf))) { 1060 if (qdf_nbuf_linearize(nbuf) == -ENOMEM) 1061 return false; 1062 1063 nbuf = qdf_nbuf_unshare(nbuf); 1064 if (!nbuf) { 1065 DP_PEER_PER_PKT_STATS_INC_PKT(ta_peer, 1066 rx.intra_bss.fail, 1067 1, len); 1068 /* return true even though the pkt is 1069 * not forwarded. Basically skb_unshare 1070 * failed and we want to continue with 1071 * next nbuf. 1072 */ 1073 tid_stats->fail_cnt[INTRABSS_DROP]++; 1074 return false; 1075 } 1076 } 1077 1078 qdf_mem_set(nbuf->cb, 0x0, sizeof(nbuf->cb)); 1079 dp_classify_critical_pkts(soc, ta_peer->vdev, nbuf); 1080 1081 /* Don't send packets if tx is paused */ 1082 if (!soc->is_tx_pause && !dp_tx_send((struct cdp_soc_t *)soc, 1083 tx_vdev_id, nbuf)) { 1084 DP_PEER_PER_PKT_STATS_INC_PKT(ta_peer, rx.intra_bss.pkts, 1, 1085 len); 1086 } else { 1087 DP_PEER_PER_PKT_STATS_INC_PKT(ta_peer, rx.intra_bss.fail, 1, 1088 len); 1089 tid_stats->fail_cnt[INTRABSS_DROP]++; 1090 return false; 1091 } 1092 1093 return true; 1094 } 1095 1096 #endif /* QCA_HOST_MODE_WIFI_DISABLED */ 1097 1098 #ifdef MESH_MODE_SUPPORT 1099 1100 void dp_rx_fill_mesh_stats(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 1101 uint8_t *rx_tlv_hdr, 1102 struct dp_txrx_peer *txrx_peer) 1103 { 1104 struct mesh_recv_hdr_s *rx_info = NULL; 1105 uint32_t pkt_type; 1106 uint32_t nss; 1107 uint32_t rate_mcs; 1108 uint32_t bw; 1109 uint8_t primary_chan_num; 1110 uint32_t center_chan_freq; 1111 struct dp_soc *soc = vdev->pdev->soc; 1112 struct dp_peer *peer; 1113 struct dp_peer *primary_link_peer; 1114 struct dp_soc *link_peer_soc; 1115 cdp_peer_stats_param_t buf = {0}; 1116 1117 /* fill recv mesh stats */ 1118 rx_info = qdf_mem_malloc(sizeof(struct mesh_recv_hdr_s)); 1119 1120 /* upper layers are responsible to free this memory */ 1121 1122 if (!rx_info) { 1123 dp_rx_err("%pK: Memory allocation failed for mesh rx stats", 1124 vdev->pdev->soc); 1125 DP_STATS_INC(vdev->pdev, mesh_mem_alloc, 1); 1126 return; 1127 } 1128 1129 rx_info->rs_flags = MESH_RXHDR_VER1; 1130 if (qdf_nbuf_is_rx_chfrag_start(nbuf)) 1131 rx_info->rs_flags |= MESH_RX_FIRST_MSDU; 1132 1133 if (qdf_nbuf_is_rx_chfrag_end(nbuf)) 1134 rx_info->rs_flags |= MESH_RX_LAST_MSDU; 1135 1136 peer = dp_peer_get_ref_by_id(soc, txrx_peer->peer_id, DP_MOD_ID_MESH); 1137 if (peer) { 1138 if (hal_rx_tlv_get_is_decrypted(soc->hal_soc, rx_tlv_hdr)) { 1139 rx_info->rs_flags |= MESH_RX_DECRYPTED; 1140 rx_info->rs_keyix = hal_rx_msdu_get_keyid(soc->hal_soc, 1141 rx_tlv_hdr); 1142 if (vdev->osif_get_key) 1143 vdev->osif_get_key(vdev->osif_vdev, 1144 &rx_info->rs_decryptkey[0], 1145 &peer->mac_addr.raw[0], 1146 rx_info->rs_keyix); 1147 } 1148 1149 dp_peer_unref_delete(peer, DP_MOD_ID_MESH); 1150 } 1151 1152 primary_link_peer = dp_get_primary_link_peer_by_id(soc, 1153 txrx_peer->peer_id, 1154 DP_MOD_ID_MESH); 1155 1156 if (qdf_likely(primary_link_peer)) { 1157 link_peer_soc = primary_link_peer->vdev->pdev->soc; 1158 dp_monitor_peer_get_stats_param(link_peer_soc, 1159 primary_link_peer, 1160 cdp_peer_rx_snr, &buf); 1161 rx_info->rs_snr = buf.rx_snr; 1162 dp_peer_unref_delete(primary_link_peer, DP_MOD_ID_MESH); 1163 } 1164 1165 rx_info->rs_rssi = rx_info->rs_snr + DP_DEFAULT_NOISEFLOOR; 1166 1167 soc = vdev->pdev->soc; 1168 primary_chan_num = hal_rx_tlv_get_freq(soc->hal_soc, rx_tlv_hdr); 1169 center_chan_freq = hal_rx_tlv_get_freq(soc->hal_soc, rx_tlv_hdr) >> 16; 1170 1171 if (soc->cdp_soc.ol_ops && soc->cdp_soc.ol_ops->freq_to_band) { 1172 rx_info->rs_band = soc->cdp_soc.ol_ops->freq_to_band( 1173 soc->ctrl_psoc, 1174 vdev->pdev->pdev_id, 1175 center_chan_freq); 1176 } 1177 rx_info->rs_channel = primary_chan_num; 1178 pkt_type = hal_rx_tlv_get_pkt_type(soc->hal_soc, rx_tlv_hdr); 1179 rate_mcs = hal_rx_tlv_rate_mcs_get(soc->hal_soc, rx_tlv_hdr); 1180 bw = hal_rx_tlv_bw_get(soc->hal_soc, rx_tlv_hdr); 1181 nss = hal_rx_msdu_start_nss_get(soc->hal_soc, rx_tlv_hdr); 1182 rx_info->rs_ratephy1 = rate_mcs | (nss << 0x8) | (pkt_type << 16) | 1183 (bw << 24); 1184 1185 qdf_nbuf_set_rx_fctx_type(nbuf, (void *)rx_info, CB_FTYPE_MESH_RX_INFO); 1186 1187 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_MED, 1188 FL("Mesh rx stats: flags %x, rssi %x, chn %x, rate %x, kix %x, snr %x"), 1189 rx_info->rs_flags, 1190 rx_info->rs_rssi, 1191 rx_info->rs_channel, 1192 rx_info->rs_ratephy1, 1193 rx_info->rs_keyix, 1194 rx_info->rs_snr); 1195 1196 } 1197 1198 QDF_STATUS dp_rx_filter_mesh_packets(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 1199 uint8_t *rx_tlv_hdr) 1200 { 1201 union dp_align_mac_addr mac_addr; 1202 struct dp_soc *soc = vdev->pdev->soc; 1203 1204 if (qdf_unlikely(vdev->mesh_rx_filter)) { 1205 if (vdev->mesh_rx_filter & MESH_FILTER_OUT_FROMDS) 1206 if (hal_rx_mpdu_get_fr_ds(soc->hal_soc, 1207 rx_tlv_hdr)) 1208 return QDF_STATUS_SUCCESS; 1209 1210 if (vdev->mesh_rx_filter & MESH_FILTER_OUT_TODS) 1211 if (hal_rx_mpdu_get_to_ds(soc->hal_soc, 1212 rx_tlv_hdr)) 1213 return QDF_STATUS_SUCCESS; 1214 1215 if (vdev->mesh_rx_filter & MESH_FILTER_OUT_NODS) 1216 if (!hal_rx_mpdu_get_fr_ds(soc->hal_soc, 1217 rx_tlv_hdr) && 1218 !hal_rx_mpdu_get_to_ds(soc->hal_soc, 1219 rx_tlv_hdr)) 1220 return QDF_STATUS_SUCCESS; 1221 1222 if (vdev->mesh_rx_filter & MESH_FILTER_OUT_RA) { 1223 if (hal_rx_mpdu_get_addr1(soc->hal_soc, 1224 rx_tlv_hdr, 1225 &mac_addr.raw[0])) 1226 return QDF_STATUS_E_FAILURE; 1227 1228 if (!qdf_mem_cmp(&mac_addr.raw[0], 1229 &vdev->mac_addr.raw[0], 1230 QDF_MAC_ADDR_SIZE)) 1231 return QDF_STATUS_SUCCESS; 1232 } 1233 1234 if (vdev->mesh_rx_filter & MESH_FILTER_OUT_TA) { 1235 if (hal_rx_mpdu_get_addr2(soc->hal_soc, 1236 rx_tlv_hdr, 1237 &mac_addr.raw[0])) 1238 return QDF_STATUS_E_FAILURE; 1239 1240 if (!qdf_mem_cmp(&mac_addr.raw[0], 1241 &vdev->mac_addr.raw[0], 1242 QDF_MAC_ADDR_SIZE)) 1243 return QDF_STATUS_SUCCESS; 1244 } 1245 } 1246 1247 return QDF_STATUS_E_FAILURE; 1248 } 1249 1250 #else 1251 void dp_rx_fill_mesh_stats(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 1252 uint8_t *rx_tlv_hdr, struct dp_txrx_peer *peer) 1253 { 1254 } 1255 1256 QDF_STATUS dp_rx_filter_mesh_packets(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 1257 uint8_t *rx_tlv_hdr) 1258 { 1259 return QDF_STATUS_E_FAILURE; 1260 } 1261 1262 #endif 1263 1264 #ifdef RX_PEER_INVALID_ENH 1265 uint8_t dp_rx_process_invalid_peer(struct dp_soc *soc, qdf_nbuf_t mpdu, 1266 uint8_t mac_id) 1267 { 1268 struct dp_invalid_peer_msg msg; 1269 struct dp_vdev *vdev = NULL; 1270 struct dp_pdev *pdev = NULL; 1271 struct ieee80211_frame *wh; 1272 qdf_nbuf_t curr_nbuf, next_nbuf; 1273 uint8_t *rx_tlv_hdr = qdf_nbuf_data(mpdu); 1274 uint8_t *rx_pkt_hdr = NULL; 1275 int i = 0; 1276 1277 if (!HAL_IS_DECAP_FORMAT_RAW(soc->hal_soc, rx_tlv_hdr)) { 1278 dp_rx_debug("%pK: Drop decapped frames", soc); 1279 goto free; 1280 } 1281 1282 /* In RAW packet, packet header will be part of data */ 1283 rx_pkt_hdr = rx_tlv_hdr + soc->rx_pkt_tlv_size; 1284 wh = (struct ieee80211_frame *)rx_pkt_hdr; 1285 1286 if (!DP_FRAME_IS_DATA(wh)) { 1287 dp_rx_debug("%pK: NAWDS valid only for data frames", soc); 1288 goto free; 1289 } 1290 1291 if (qdf_nbuf_len(mpdu) < sizeof(struct ieee80211_frame)) { 1292 dp_rx_err("%pK: Invalid nbuf length", soc); 1293 goto free; 1294 } 1295 1296 /* In DMAC case the rx_desc_pools are common across PDEVs 1297 * so PDEV cannot be derived from the pool_id. 1298 * 1299 * link_id need to derived from the TLV tag word which is 1300 * disabled by default. For now adding a WAR to get vdev 1301 * with brute force this need to fixed with word based subscription 1302 * support is added by enabling TLV tag word 1303 */ 1304 if (soc->features.dmac_cmn_src_rxbuf_ring_enabled) { 1305 for (i = 0; i < MAX_PDEV_CNT; i++) { 1306 pdev = soc->pdev_list[i]; 1307 1308 if (!pdev || qdf_unlikely(pdev->is_pdev_down)) 1309 continue; 1310 1311 TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) { 1312 if (qdf_mem_cmp(wh->i_addr1, vdev->mac_addr.raw, 1313 QDF_MAC_ADDR_SIZE) == 0) { 1314 goto out; 1315 } 1316 } 1317 } 1318 } else { 1319 pdev = dp_get_pdev_for_lmac_id(soc, mac_id); 1320 1321 if (!pdev || qdf_unlikely(pdev->is_pdev_down)) { 1322 dp_rx_err("%pK: PDEV %s", 1323 soc, !pdev ? "not found" : "down"); 1324 goto free; 1325 } 1326 1327 if (dp_monitor_filter_neighbour_peer(pdev, rx_pkt_hdr) == 1328 QDF_STATUS_SUCCESS) 1329 return 0; 1330 1331 TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) { 1332 if (qdf_mem_cmp(wh->i_addr1, vdev->mac_addr.raw, 1333 QDF_MAC_ADDR_SIZE) == 0) { 1334 goto out; 1335 } 1336 } 1337 } 1338 1339 if (!vdev) { 1340 dp_rx_err("%pK: VDEV not found", soc); 1341 goto free; 1342 } 1343 out: 1344 msg.wh = wh; 1345 qdf_nbuf_pull_head(mpdu, soc->rx_pkt_tlv_size); 1346 msg.nbuf = mpdu; 1347 msg.vdev_id = vdev->vdev_id; 1348 1349 /* 1350 * NOTE: Only valid for HKv1. 1351 * If smart monitor mode is enabled on RE, we are getting invalid 1352 * peer frames with RA as STA mac of RE and the TA not matching 1353 * with any NAC list or the the BSSID.Such frames need to dropped 1354 * in order to avoid HM_WDS false addition. 1355 */ 1356 if (pdev->soc->cdp_soc.ol_ops->rx_invalid_peer) { 1357 if (dp_monitor_drop_inv_peer_pkts(vdev) == QDF_STATUS_SUCCESS) { 1358 dp_rx_warn("%pK: Drop inv peer pkts with STA RA:%pm", 1359 soc, wh->i_addr1); 1360 goto free; 1361 } 1362 pdev->soc->cdp_soc.ol_ops->rx_invalid_peer( 1363 (struct cdp_ctrl_objmgr_psoc *)soc->ctrl_psoc, 1364 pdev->pdev_id, &msg); 1365 } 1366 1367 free: 1368 /* Drop and free packet */ 1369 curr_nbuf = mpdu; 1370 while (curr_nbuf) { 1371 next_nbuf = qdf_nbuf_next(curr_nbuf); 1372 dp_rx_nbuf_free(curr_nbuf); 1373 curr_nbuf = next_nbuf; 1374 } 1375 1376 return 0; 1377 } 1378 1379 void dp_rx_process_invalid_peer_wrapper(struct dp_soc *soc, 1380 qdf_nbuf_t mpdu, bool mpdu_done, 1381 uint8_t mac_id) 1382 { 1383 /* Only trigger the process when mpdu is completed */ 1384 if (mpdu_done) 1385 dp_rx_process_invalid_peer(soc, mpdu, mac_id); 1386 } 1387 #else 1388 uint8_t dp_rx_process_invalid_peer(struct dp_soc *soc, qdf_nbuf_t mpdu, 1389 uint8_t mac_id) 1390 { 1391 qdf_nbuf_t curr_nbuf, next_nbuf; 1392 struct dp_pdev *pdev; 1393 struct dp_vdev *vdev = NULL; 1394 struct ieee80211_frame *wh; 1395 struct dp_peer *peer = NULL; 1396 uint8_t *rx_tlv_hdr = qdf_nbuf_data(mpdu); 1397 uint8_t *rx_pkt_hdr = hal_rx_pkt_hdr_get(soc->hal_soc, rx_tlv_hdr); 1398 1399 wh = (struct ieee80211_frame *)rx_pkt_hdr; 1400 1401 if (!DP_FRAME_IS_DATA(wh)) { 1402 QDF_TRACE_ERROR_RL(QDF_MODULE_ID_DP, 1403 "only for data frames"); 1404 goto free; 1405 } 1406 1407 if (qdf_nbuf_len(mpdu) < sizeof(struct ieee80211_frame)) { 1408 dp_rx_info_rl("%pK: Invalid nbuf length", soc); 1409 goto free; 1410 } 1411 1412 pdev = dp_get_pdev_for_lmac_id(soc, mac_id); 1413 if (!pdev) { 1414 dp_rx_info_rl("%pK: PDEV not found", soc); 1415 goto free; 1416 } 1417 1418 qdf_spin_lock_bh(&pdev->vdev_list_lock); 1419 DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) { 1420 if (qdf_mem_cmp(wh->i_addr1, vdev->mac_addr.raw, 1421 QDF_MAC_ADDR_SIZE) == 0) { 1422 qdf_spin_unlock_bh(&pdev->vdev_list_lock); 1423 goto out; 1424 } 1425 } 1426 qdf_spin_unlock_bh(&pdev->vdev_list_lock); 1427 1428 if (!vdev) { 1429 dp_rx_info_rl("%pK: VDEV not found", soc); 1430 goto free; 1431 } 1432 1433 out: 1434 if (vdev->opmode == wlan_op_mode_ap) { 1435 peer = dp_peer_find_hash_find(soc, wh->i_addr2, 0, 1436 vdev->vdev_id, 1437 DP_MOD_ID_RX_ERR); 1438 /* If SA is a valid peer in vdev, 1439 * don't send disconnect 1440 */ 1441 if (peer) { 1442 dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR); 1443 DP_STATS_INC(soc, rx.err.decrypt_err_drop, 1); 1444 dp_err_rl("invalid peer frame with correct SA/RA is freed"); 1445 goto free; 1446 } 1447 } 1448 1449 if (soc->cdp_soc.ol_ops->rx_invalid_peer) 1450 soc->cdp_soc.ol_ops->rx_invalid_peer(vdev->vdev_id, wh); 1451 free: 1452 1453 /* Drop and free packet */ 1454 curr_nbuf = mpdu; 1455 while (curr_nbuf) { 1456 next_nbuf = qdf_nbuf_next(curr_nbuf); 1457 dp_rx_nbuf_free(curr_nbuf); 1458 curr_nbuf = next_nbuf; 1459 } 1460 1461 /* Reset the head and tail pointers */ 1462 pdev = dp_get_pdev_for_lmac_id(soc, mac_id); 1463 if (pdev) { 1464 pdev->invalid_peer_head_msdu = NULL; 1465 pdev->invalid_peer_tail_msdu = NULL; 1466 } 1467 1468 return 0; 1469 } 1470 1471 void dp_rx_process_invalid_peer_wrapper(struct dp_soc *soc, 1472 qdf_nbuf_t mpdu, bool mpdu_done, 1473 uint8_t mac_id) 1474 { 1475 /* Process the nbuf */ 1476 dp_rx_process_invalid_peer(soc, mpdu, mac_id); 1477 } 1478 #endif 1479 1480 #ifndef QCA_HOST_MODE_WIFI_DISABLED 1481 1482 #ifdef RECEIVE_OFFLOAD 1483 /** 1484 * dp_rx_print_offload_info() - Print offload info from RX TLV 1485 * @soc: dp soc handle 1486 * @msdu: MSDU for which the offload info is to be printed 1487 * 1488 * Return: None 1489 */ 1490 static void dp_rx_print_offload_info(struct dp_soc *soc, 1491 qdf_nbuf_t msdu) 1492 { 1493 dp_verbose_debug("----------------------RX DESC LRO/GRO----------------------"); 1494 dp_verbose_debug("lro_eligible 0x%x", 1495 QDF_NBUF_CB_RX_LRO_ELIGIBLE(msdu)); 1496 dp_verbose_debug("pure_ack 0x%x", QDF_NBUF_CB_RX_TCP_PURE_ACK(msdu)); 1497 dp_verbose_debug("chksum 0x%x", QDF_NBUF_CB_RX_TCP_CHKSUM(msdu)); 1498 dp_verbose_debug("TCP seq num 0x%x", QDF_NBUF_CB_RX_TCP_SEQ_NUM(msdu)); 1499 dp_verbose_debug("TCP ack num 0x%x", QDF_NBUF_CB_RX_TCP_ACK_NUM(msdu)); 1500 dp_verbose_debug("TCP window 0x%x", QDF_NBUF_CB_RX_TCP_WIN(msdu)); 1501 dp_verbose_debug("TCP protocol 0x%x", QDF_NBUF_CB_RX_TCP_PROTO(msdu)); 1502 dp_verbose_debug("TCP offset 0x%x", QDF_NBUF_CB_RX_TCP_OFFSET(msdu)); 1503 dp_verbose_debug("toeplitz 0x%x", QDF_NBUF_CB_RX_FLOW_ID(msdu)); 1504 dp_verbose_debug("---------------------------------------------------------"); 1505 } 1506 1507 void dp_rx_fill_gro_info(struct dp_soc *soc, uint8_t *rx_tlv, 1508 qdf_nbuf_t msdu, uint32_t *rx_ol_pkt_cnt) 1509 { 1510 struct hal_offload_info offload_info; 1511 1512 if (!wlan_cfg_is_gro_enabled(soc->wlan_cfg_ctx)) 1513 return; 1514 1515 if (hal_rx_tlv_get_offload_info(soc->hal_soc, rx_tlv, &offload_info)) 1516 return; 1517 1518 *rx_ol_pkt_cnt = *rx_ol_pkt_cnt + 1; 1519 1520 QDF_NBUF_CB_RX_LRO_ELIGIBLE(msdu) = offload_info.lro_eligible; 1521 QDF_NBUF_CB_RX_TCP_PURE_ACK(msdu) = offload_info.tcp_pure_ack; 1522 QDF_NBUF_CB_RX_TCP_CHKSUM(msdu) = 1523 hal_rx_tlv_get_tcp_chksum(soc->hal_soc, 1524 rx_tlv); 1525 QDF_NBUF_CB_RX_TCP_SEQ_NUM(msdu) = offload_info.tcp_seq_num; 1526 QDF_NBUF_CB_RX_TCP_ACK_NUM(msdu) = offload_info.tcp_ack_num; 1527 QDF_NBUF_CB_RX_TCP_WIN(msdu) = offload_info.tcp_win; 1528 QDF_NBUF_CB_RX_TCP_PROTO(msdu) = offload_info.tcp_proto; 1529 QDF_NBUF_CB_RX_IPV6_PROTO(msdu) = offload_info.ipv6_proto; 1530 QDF_NBUF_CB_RX_TCP_OFFSET(msdu) = offload_info.tcp_offset; 1531 QDF_NBUF_CB_RX_FLOW_ID(msdu) = offload_info.flow_id; 1532 1533 dp_rx_print_offload_info(soc, msdu); 1534 } 1535 #endif /* RECEIVE_OFFLOAD */ 1536 1537 /** 1538 * dp_rx_adjust_nbuf_len() - set appropriate msdu length in nbuf. 1539 * 1540 * @soc: DP soc handle 1541 * @nbuf: pointer to msdu. 1542 * @mpdu_len: mpdu length 1543 * @l3_pad_len: L3 padding length by HW 1544 * 1545 * Return: returns true if nbuf is last msdu of mpdu else returns false. 1546 */ 1547 static inline bool dp_rx_adjust_nbuf_len(struct dp_soc *soc, 1548 qdf_nbuf_t nbuf, 1549 uint16_t *mpdu_len, 1550 uint32_t l3_pad_len) 1551 { 1552 bool last_nbuf; 1553 uint32_t pkt_hdr_size; 1554 1555 pkt_hdr_size = soc->rx_pkt_tlv_size + l3_pad_len; 1556 1557 if ((*mpdu_len + pkt_hdr_size) > RX_DATA_BUFFER_SIZE) { 1558 qdf_nbuf_set_pktlen(nbuf, RX_DATA_BUFFER_SIZE); 1559 last_nbuf = false; 1560 *mpdu_len -= (RX_DATA_BUFFER_SIZE - pkt_hdr_size); 1561 } else { 1562 qdf_nbuf_set_pktlen(nbuf, (*mpdu_len + pkt_hdr_size)); 1563 last_nbuf = true; 1564 *mpdu_len = 0; 1565 } 1566 1567 return last_nbuf; 1568 } 1569 1570 /** 1571 * dp_get_l3_hdr_pad_len() - get L3 header padding length. 1572 * 1573 * @soc: DP soc handle 1574 * @nbuf: pointer to msdu. 1575 * 1576 * Return: returns padding length in bytes. 1577 */ 1578 static inline uint32_t dp_get_l3_hdr_pad_len(struct dp_soc *soc, 1579 qdf_nbuf_t nbuf) 1580 { 1581 uint32_t l3_hdr_pad = 0; 1582 uint8_t *rx_tlv_hdr; 1583 struct hal_rx_msdu_metadata msdu_metadata; 1584 1585 while (nbuf) { 1586 if (!qdf_nbuf_is_rx_chfrag_cont(nbuf)) { 1587 /* scattered msdu end with continuation is 0 */ 1588 rx_tlv_hdr = qdf_nbuf_data(nbuf); 1589 hal_rx_msdu_metadata_get(soc->hal_soc, 1590 rx_tlv_hdr, 1591 &msdu_metadata); 1592 l3_hdr_pad = msdu_metadata.l3_hdr_pad; 1593 break; 1594 } 1595 nbuf = nbuf->next; 1596 } 1597 1598 return l3_hdr_pad; 1599 } 1600 1601 qdf_nbuf_t dp_rx_sg_create(struct dp_soc *soc, qdf_nbuf_t nbuf) 1602 { 1603 qdf_nbuf_t parent, frag_list, next = NULL; 1604 uint16_t frag_list_len = 0; 1605 uint16_t mpdu_len; 1606 bool last_nbuf; 1607 uint32_t l3_hdr_pad_offset = 0; 1608 1609 /* 1610 * Use msdu len got from REO entry descriptor instead since 1611 * there is case the RX PKT TLV is corrupted while msdu_len 1612 * from REO descriptor is right for non-raw RX scatter msdu. 1613 */ 1614 mpdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf); 1615 1616 /* 1617 * this is a case where the complete msdu fits in one single nbuf. 1618 * in this case HW sets both start and end bit and we only need to 1619 * reset these bits for RAW mode simulator to decap the pkt 1620 */ 1621 if (qdf_nbuf_is_rx_chfrag_start(nbuf) && 1622 qdf_nbuf_is_rx_chfrag_end(nbuf)) { 1623 qdf_nbuf_set_pktlen(nbuf, mpdu_len + soc->rx_pkt_tlv_size); 1624 qdf_nbuf_pull_head(nbuf, soc->rx_pkt_tlv_size); 1625 return nbuf; 1626 } 1627 1628 l3_hdr_pad_offset = dp_get_l3_hdr_pad_len(soc, nbuf); 1629 /* 1630 * This is a case where we have multiple msdus (A-MSDU) spread across 1631 * multiple nbufs. here we create a fraglist out of these nbufs. 1632 * 1633 * the moment we encounter a nbuf with continuation bit set we 1634 * know for sure we have an MSDU which is spread across multiple 1635 * nbufs. We loop through and reap nbufs till we reach last nbuf. 1636 */ 1637 parent = nbuf; 1638 frag_list = nbuf->next; 1639 nbuf = nbuf->next; 1640 1641 /* 1642 * set the start bit in the first nbuf we encounter with continuation 1643 * bit set. This has the proper mpdu length set as it is the first 1644 * msdu of the mpdu. this becomes the parent nbuf and the subsequent 1645 * nbufs will form the frag_list of the parent nbuf. 1646 */ 1647 qdf_nbuf_set_rx_chfrag_start(parent, 1); 1648 /* 1649 * L3 header padding is only needed for the 1st buffer 1650 * in a scattered msdu 1651 */ 1652 last_nbuf = dp_rx_adjust_nbuf_len(soc, parent, &mpdu_len, 1653 l3_hdr_pad_offset); 1654 1655 /* 1656 * MSDU cont bit is set but reported MPDU length can fit 1657 * in to single buffer 1658 * 1659 * Increment error stats and avoid SG list creation 1660 */ 1661 if (last_nbuf) { 1662 DP_STATS_INC(soc, rx.err.msdu_continuation_err, 1); 1663 qdf_nbuf_pull_head(parent, 1664 soc->rx_pkt_tlv_size + l3_hdr_pad_offset); 1665 return parent; 1666 } 1667 1668 /* 1669 * this is where we set the length of the fragments which are 1670 * associated to the parent nbuf. We iterate through the frag_list 1671 * till we hit the last_nbuf of the list. 1672 */ 1673 do { 1674 last_nbuf = dp_rx_adjust_nbuf_len(soc, nbuf, &mpdu_len, 0); 1675 qdf_nbuf_pull_head(nbuf, 1676 soc->rx_pkt_tlv_size); 1677 frag_list_len += qdf_nbuf_len(nbuf); 1678 1679 if (last_nbuf) { 1680 next = nbuf->next; 1681 nbuf->next = NULL; 1682 break; 1683 } else if (qdf_nbuf_is_rx_chfrag_end(nbuf)) { 1684 dp_err("Invalid packet length\n"); 1685 qdf_assert_always(0); 1686 } 1687 nbuf = nbuf->next; 1688 } while (!last_nbuf); 1689 1690 qdf_nbuf_set_rx_chfrag_start(nbuf, 0); 1691 qdf_nbuf_append_ext_list(parent, frag_list, frag_list_len); 1692 parent->next = next; 1693 1694 qdf_nbuf_pull_head(parent, 1695 soc->rx_pkt_tlv_size + l3_hdr_pad_offset); 1696 return parent; 1697 } 1698 1699 #ifdef DP_RX_SG_FRAME_SUPPORT 1700 bool dp_rx_is_sg_supported(void) 1701 { 1702 return true; 1703 } 1704 #else 1705 bool dp_rx_is_sg_supported(void) 1706 { 1707 return false; 1708 } 1709 #endif 1710 1711 #endif /* QCA_HOST_MODE_WIFI_DISABLED */ 1712 1713 #ifdef QCA_PEER_EXT_STATS 1714 void dp_rx_compute_tid_delay(struct cdp_delay_tid_stats *stats, 1715 qdf_nbuf_t nbuf) 1716 { 1717 struct cdp_delay_rx_stats *rx_delay = &stats->rx_delay; 1718 uint32_t to_stack = qdf_nbuf_get_timedelta_ms(nbuf); 1719 1720 dp_hist_update_stats(&rx_delay->to_stack_delay, to_stack); 1721 } 1722 #endif /* QCA_PEER_EXT_STATS */ 1723 1724 void dp_rx_compute_delay(struct dp_vdev *vdev, qdf_nbuf_t nbuf) 1725 { 1726 uint8_t ring_id = QDF_NBUF_CB_RX_CTX_ID(nbuf); 1727 int64_t current_ts = qdf_ktime_to_ms(qdf_ktime_get()); 1728 uint32_t to_stack = qdf_nbuf_get_timedelta_ms(nbuf); 1729 uint8_t tid = qdf_nbuf_get_tid_val(nbuf); 1730 uint32_t interframe_delay = 1731 (uint32_t)(current_ts - vdev->prev_rx_deliver_tstamp); 1732 struct cdp_tid_rx_stats *rstats = 1733 &vdev->pdev->stats.tid_stats.tid_rx_stats[ring_id][tid]; 1734 1735 dp_update_delay_stats(NULL, rstats, to_stack, tid, 1736 CDP_DELAY_STATS_REAP_STACK, ring_id, false); 1737 /* 1738 * Update interframe delay stats calculated at deliver_data_ol point. 1739 * Value of vdev->prev_rx_deliver_tstamp will be 0 for 1st frame, so 1740 * interframe delay will not be calculate correctly for 1st frame. 1741 * On the other side, this will help in avoiding extra per packet check 1742 * of vdev->prev_rx_deliver_tstamp. 1743 */ 1744 dp_update_delay_stats(NULL, rstats, interframe_delay, tid, 1745 CDP_DELAY_STATS_RX_INTERFRAME, ring_id, false); 1746 vdev->prev_rx_deliver_tstamp = current_ts; 1747 } 1748 1749 /** 1750 * dp_rx_drop_nbuf_list() - drop an nbuf list 1751 * @pdev: dp pdev reference 1752 * @buf_list: buffer list to be dropepd 1753 * 1754 * Return: int (number of bufs dropped) 1755 */ 1756 static inline int dp_rx_drop_nbuf_list(struct dp_pdev *pdev, 1757 qdf_nbuf_t buf_list) 1758 { 1759 struct cdp_tid_rx_stats *stats = NULL; 1760 uint8_t tid = 0, ring_id = 0; 1761 int num_dropped = 0; 1762 qdf_nbuf_t buf, next_buf; 1763 1764 buf = buf_list; 1765 while (buf) { 1766 ring_id = QDF_NBUF_CB_RX_CTX_ID(buf); 1767 next_buf = qdf_nbuf_queue_next(buf); 1768 tid = qdf_nbuf_get_tid_val(buf); 1769 if (qdf_likely(pdev)) { 1770 stats = &pdev->stats.tid_stats.tid_rx_stats[ring_id][tid]; 1771 stats->fail_cnt[INVALID_PEER_VDEV]++; 1772 stats->delivered_to_stack--; 1773 } 1774 dp_rx_nbuf_free(buf); 1775 buf = next_buf; 1776 num_dropped++; 1777 } 1778 1779 return num_dropped; 1780 } 1781 1782 #ifdef QCA_SUPPORT_WDS_EXTENDED 1783 /** 1784 * dp_rx_deliver_to_stack_ext() - Deliver to netdev per sta 1785 * @soc: core txrx main context 1786 * @vdev: vdev 1787 * @txrx_peer: txrx peer 1788 * @nbuf_head: skb list head 1789 * 1790 * Return: true if packet is delivered to netdev per STA. 1791 */ 1792 static inline bool 1793 dp_rx_deliver_to_stack_ext(struct dp_soc *soc, struct dp_vdev *vdev, 1794 struct dp_txrx_peer *txrx_peer, qdf_nbuf_t nbuf_head) 1795 { 1796 /* 1797 * When extended WDS is disabled, frames are sent to AP netdevice. 1798 */ 1799 if (qdf_likely(!vdev->wds_ext_enabled)) 1800 return false; 1801 1802 /* 1803 * There can be 2 cases: 1804 * 1. Send frame to parent netdev if its not for netdev per STA 1805 * 2. If frame is meant for netdev per STA: 1806 * a. Send frame to appropriate netdev using registered fp. 1807 * b. If fp is NULL, drop the frames. 1808 */ 1809 if (!txrx_peer->wds_ext.init) 1810 return false; 1811 1812 if (txrx_peer->osif_rx) 1813 txrx_peer->osif_rx(txrx_peer->wds_ext.osif_peer, nbuf_head); 1814 else 1815 dp_rx_drop_nbuf_list(vdev->pdev, nbuf_head); 1816 1817 return true; 1818 } 1819 1820 #else 1821 static inline bool 1822 dp_rx_deliver_to_stack_ext(struct dp_soc *soc, struct dp_vdev *vdev, 1823 struct dp_txrx_peer *txrx_peer, qdf_nbuf_t nbuf_head) 1824 { 1825 return false; 1826 } 1827 #endif 1828 1829 #ifdef PEER_CACHE_RX_PKTS 1830 void dp_rx_flush_rx_cached(struct dp_peer *peer, bool drop) 1831 { 1832 struct dp_peer_cached_bufq *bufqi; 1833 struct dp_rx_cached_buf *cache_buf = NULL; 1834 ol_txrx_rx_fp data_rx = NULL; 1835 int num_buff_elem; 1836 QDF_STATUS status; 1837 1838 /* 1839 * Flush dp cached frames only for mld peers and legacy peers, as 1840 * link peers don't store cached frames 1841 */ 1842 if (IS_MLO_DP_LINK_PEER(peer)) 1843 return; 1844 1845 if (!peer->txrx_peer) { 1846 dp_err("txrx_peer NULL!! peer mac_addr("QDF_MAC_ADDR_FMT")", 1847 QDF_MAC_ADDR_REF(peer->mac_addr.raw)); 1848 return; 1849 } 1850 1851 if (qdf_atomic_inc_return(&peer->txrx_peer->flush_in_progress) > 1) { 1852 qdf_atomic_dec(&peer->txrx_peer->flush_in_progress); 1853 return; 1854 } 1855 1856 qdf_spin_lock_bh(&peer->peer_info_lock); 1857 if (peer->state >= OL_TXRX_PEER_STATE_CONN && peer->vdev->osif_rx) 1858 data_rx = peer->vdev->osif_rx; 1859 else 1860 drop = true; 1861 qdf_spin_unlock_bh(&peer->peer_info_lock); 1862 1863 bufqi = &peer->txrx_peer->bufq_info; 1864 1865 qdf_spin_lock_bh(&bufqi->bufq_lock); 1866 qdf_list_remove_front(&bufqi->cached_bufq, 1867 (qdf_list_node_t **)&cache_buf); 1868 while (cache_buf) { 1869 num_buff_elem = QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST( 1870 cache_buf->buf); 1871 bufqi->entries -= num_buff_elem; 1872 qdf_spin_unlock_bh(&bufqi->bufq_lock); 1873 if (drop) { 1874 bufqi->dropped = dp_rx_drop_nbuf_list(peer->vdev->pdev, 1875 cache_buf->buf); 1876 } else { 1877 /* Flush the cached frames to OSIF DEV */ 1878 status = data_rx(peer->vdev->osif_vdev, cache_buf->buf); 1879 if (status != QDF_STATUS_SUCCESS) 1880 bufqi->dropped = dp_rx_drop_nbuf_list( 1881 peer->vdev->pdev, 1882 cache_buf->buf); 1883 } 1884 qdf_mem_free(cache_buf); 1885 cache_buf = NULL; 1886 qdf_spin_lock_bh(&bufqi->bufq_lock); 1887 qdf_list_remove_front(&bufqi->cached_bufq, 1888 (qdf_list_node_t **)&cache_buf); 1889 } 1890 qdf_spin_unlock_bh(&bufqi->bufq_lock); 1891 qdf_atomic_dec(&peer->txrx_peer->flush_in_progress); 1892 } 1893 1894 /** 1895 * dp_rx_enqueue_rx() - cache rx frames 1896 * @peer: peer 1897 * @txrx_peer: DP txrx_peer 1898 * @rx_buf_list: cache buffer list 1899 * 1900 * Return: None 1901 */ 1902 static QDF_STATUS 1903 dp_rx_enqueue_rx(struct dp_peer *peer, 1904 struct dp_txrx_peer *txrx_peer, 1905 qdf_nbuf_t rx_buf_list) 1906 { 1907 struct dp_rx_cached_buf *cache_buf; 1908 struct dp_peer_cached_bufq *bufqi = &txrx_peer->bufq_info; 1909 int num_buff_elem; 1910 QDF_STATUS ret = QDF_STATUS_SUCCESS; 1911 struct dp_soc *soc = txrx_peer->vdev->pdev->soc; 1912 struct dp_peer *ta_peer = NULL; 1913 1914 /* 1915 * If peer id is invalid which likely peer map has not completed, 1916 * then need caller provide dp_peer pointer, else it's ok to use 1917 * txrx_peer->peer_id to get dp_peer. 1918 */ 1919 if (peer) { 1920 if (QDF_STATUS_SUCCESS == 1921 dp_peer_get_ref(soc, peer, DP_MOD_ID_RX)) 1922 ta_peer = peer; 1923 } else { 1924 ta_peer = dp_peer_get_ref_by_id(soc, txrx_peer->peer_id, 1925 DP_MOD_ID_RX); 1926 } 1927 1928 if (!ta_peer) { 1929 bufqi->dropped = dp_rx_drop_nbuf_list(txrx_peer->vdev->pdev, 1930 rx_buf_list); 1931 return QDF_STATUS_E_INVAL; 1932 } 1933 1934 dp_debug_rl("bufq->curr %d bufq->drops %d", bufqi->entries, 1935 bufqi->dropped); 1936 if (!ta_peer->valid) { 1937 bufqi->dropped = dp_rx_drop_nbuf_list(txrx_peer->vdev->pdev, 1938 rx_buf_list); 1939 ret = QDF_STATUS_E_INVAL; 1940 goto fail; 1941 } 1942 1943 qdf_spin_lock_bh(&bufqi->bufq_lock); 1944 if (bufqi->entries >= bufqi->thresh) { 1945 bufqi->dropped = dp_rx_drop_nbuf_list(txrx_peer->vdev->pdev, 1946 rx_buf_list); 1947 qdf_spin_unlock_bh(&bufqi->bufq_lock); 1948 ret = QDF_STATUS_E_RESOURCES; 1949 goto fail; 1950 } 1951 qdf_spin_unlock_bh(&bufqi->bufq_lock); 1952 1953 num_buff_elem = QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(rx_buf_list); 1954 1955 cache_buf = qdf_mem_malloc_atomic(sizeof(*cache_buf)); 1956 if (!cache_buf) { 1957 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1958 "Failed to allocate buf to cache rx frames"); 1959 bufqi->dropped = dp_rx_drop_nbuf_list(txrx_peer->vdev->pdev, 1960 rx_buf_list); 1961 ret = QDF_STATUS_E_NOMEM; 1962 goto fail; 1963 } 1964 1965 cache_buf->buf = rx_buf_list; 1966 1967 qdf_spin_lock_bh(&bufqi->bufq_lock); 1968 qdf_list_insert_back(&bufqi->cached_bufq, 1969 &cache_buf->node); 1970 bufqi->entries += num_buff_elem; 1971 qdf_spin_unlock_bh(&bufqi->bufq_lock); 1972 1973 fail: 1974 dp_peer_unref_delete(ta_peer, DP_MOD_ID_RX); 1975 return ret; 1976 } 1977 1978 static inline 1979 bool dp_rx_is_peer_cache_bufq_supported(void) 1980 { 1981 return true; 1982 } 1983 #else 1984 static inline 1985 bool dp_rx_is_peer_cache_bufq_supported(void) 1986 { 1987 return false; 1988 } 1989 1990 static inline QDF_STATUS 1991 dp_rx_enqueue_rx(struct dp_peer *peer, 1992 struct dp_txrx_peer *txrx_peer, 1993 qdf_nbuf_t rx_buf_list) 1994 { 1995 return QDF_STATUS_SUCCESS; 1996 } 1997 #endif 1998 1999 #ifndef DELIVERY_TO_STACK_STATUS_CHECK 2000 /** 2001 * dp_rx_check_delivery_to_stack() - Deliver pkts to network 2002 * using the appropriate call back functions. 2003 * @soc: soc 2004 * @vdev: vdev 2005 * @txrx_peer: peer 2006 * @nbuf_head: skb list head 2007 * 2008 * Return: None 2009 */ 2010 static void dp_rx_check_delivery_to_stack(struct dp_soc *soc, 2011 struct dp_vdev *vdev, 2012 struct dp_txrx_peer *txrx_peer, 2013 qdf_nbuf_t nbuf_head) 2014 { 2015 if (qdf_unlikely(dp_rx_deliver_to_stack_ext(soc, vdev, 2016 txrx_peer, nbuf_head))) 2017 return; 2018 2019 /* Function pointer initialized only when FISA is enabled */ 2020 if (vdev->osif_fisa_rx) 2021 /* on failure send it via regular path */ 2022 vdev->osif_fisa_rx(soc, vdev, nbuf_head); 2023 else 2024 vdev->osif_rx(vdev->osif_vdev, nbuf_head); 2025 } 2026 2027 #else 2028 /** 2029 * dp_rx_check_delivery_to_stack() - Deliver pkts to network 2030 * using the appropriate call back functions. 2031 * @soc: soc 2032 * @vdev: vdev 2033 * @txrx_peer: txrx peer 2034 * @nbuf_head: skb list head 2035 * 2036 * Check the return status of the call back function and drop 2037 * the packets if the return status indicates a failure. 2038 * 2039 * Return: None 2040 */ 2041 static void dp_rx_check_delivery_to_stack(struct dp_soc *soc, 2042 struct dp_vdev *vdev, 2043 struct dp_txrx_peer *txrx_peer, 2044 qdf_nbuf_t nbuf_head) 2045 { 2046 int num_nbuf = 0; 2047 QDF_STATUS ret_val = QDF_STATUS_E_FAILURE; 2048 2049 /* Function pointer initialized only when FISA is enabled */ 2050 if (vdev->osif_fisa_rx) 2051 /* on failure send it via regular path */ 2052 ret_val = vdev->osif_fisa_rx(soc, vdev, nbuf_head); 2053 else if (vdev->osif_rx) 2054 ret_val = vdev->osif_rx(vdev->osif_vdev, nbuf_head); 2055 2056 if (!QDF_IS_STATUS_SUCCESS(ret_val)) { 2057 num_nbuf = dp_rx_drop_nbuf_list(vdev->pdev, nbuf_head); 2058 DP_STATS_INC(soc, rx.err.rejected, num_nbuf); 2059 if (txrx_peer) 2060 DP_PEER_STATS_FLAT_DEC(txrx_peer, to_stack.num, 2061 num_nbuf); 2062 } 2063 } 2064 #endif /* ifdef DELIVERY_TO_STACK_STATUS_CHECK */ 2065 2066 /** 2067 * dp_rx_validate_rx_callbacks() - validate rx callbacks 2068 * @soc: DP soc 2069 * @vdev: DP vdev handle 2070 * @txrx_peer: pointer to the txrx peer object 2071 * @nbuf_head: skb list head 2072 * 2073 * Return: QDF_STATUS - QDF_STATUS_SUCCESS 2074 * QDF_STATUS_E_FAILURE 2075 */ 2076 static inline QDF_STATUS 2077 dp_rx_validate_rx_callbacks(struct dp_soc *soc, 2078 struct dp_vdev *vdev, 2079 struct dp_txrx_peer *txrx_peer, 2080 qdf_nbuf_t nbuf_head) 2081 { 2082 int num_nbuf; 2083 2084 if (qdf_unlikely(!vdev || vdev->delete.pending)) { 2085 num_nbuf = dp_rx_drop_nbuf_list(NULL, nbuf_head); 2086 /* 2087 * This is a special case where vdev is invalid, 2088 * so we cannot know the pdev to which this packet 2089 * belonged. Hence we update the soc rx error stats. 2090 */ 2091 DP_STATS_INC(soc, rx.err.invalid_vdev, num_nbuf); 2092 return QDF_STATUS_E_FAILURE; 2093 } 2094 2095 /* 2096 * highly unlikely to have a vdev without a registered rx 2097 * callback function. if so let us free the nbuf_list. 2098 */ 2099 if (qdf_unlikely(!vdev->osif_rx)) { 2100 if (txrx_peer && dp_rx_is_peer_cache_bufq_supported()) { 2101 dp_rx_enqueue_rx(NULL, txrx_peer, nbuf_head); 2102 } else { 2103 num_nbuf = dp_rx_drop_nbuf_list(vdev->pdev, 2104 nbuf_head); 2105 DP_PEER_TO_STACK_DECC(txrx_peer, num_nbuf, 2106 vdev->pdev->enhanced_stats_en); 2107 } 2108 return QDF_STATUS_E_FAILURE; 2109 } 2110 2111 return QDF_STATUS_SUCCESS; 2112 } 2113 2114 QDF_STATUS dp_rx_deliver_to_stack(struct dp_soc *soc, 2115 struct dp_vdev *vdev, 2116 struct dp_txrx_peer *txrx_peer, 2117 qdf_nbuf_t nbuf_head, 2118 qdf_nbuf_t nbuf_tail) 2119 { 2120 if (dp_rx_validate_rx_callbacks(soc, vdev, txrx_peer, nbuf_head) != 2121 QDF_STATUS_SUCCESS) 2122 return QDF_STATUS_E_FAILURE; 2123 2124 if (qdf_unlikely(vdev->rx_decap_type == htt_cmn_pkt_type_raw) || 2125 (vdev->rx_decap_type == htt_cmn_pkt_type_native_wifi)) { 2126 vdev->osif_rsim_rx_decap(vdev->osif_vdev, &nbuf_head, 2127 &nbuf_tail); 2128 } 2129 2130 dp_rx_check_delivery_to_stack(soc, vdev, txrx_peer, nbuf_head); 2131 2132 return QDF_STATUS_SUCCESS; 2133 } 2134 2135 #ifdef QCA_SUPPORT_EAPOL_OVER_CONTROL_PORT 2136 QDF_STATUS dp_rx_eapol_deliver_to_stack(struct dp_soc *soc, 2137 struct dp_vdev *vdev, 2138 struct dp_txrx_peer *txrx_peer, 2139 qdf_nbuf_t nbuf_head, 2140 qdf_nbuf_t nbuf_tail) 2141 { 2142 if (dp_rx_validate_rx_callbacks(soc, vdev, txrx_peer, nbuf_head) != 2143 QDF_STATUS_SUCCESS) 2144 return QDF_STATUS_E_FAILURE; 2145 2146 vdev->osif_rx_eapol(vdev->osif_vdev, nbuf_head); 2147 2148 return QDF_STATUS_SUCCESS; 2149 } 2150 #endif 2151 2152 #ifndef QCA_HOST_MODE_WIFI_DISABLED 2153 #ifdef VDEV_PEER_PROTOCOL_COUNT 2154 #define dp_rx_msdu_stats_update_prot_cnts(vdev_hdl, nbuf, txrx_peer) \ 2155 { \ 2156 qdf_nbuf_t nbuf_local; \ 2157 struct dp_txrx_peer *txrx_peer_local; \ 2158 struct dp_vdev *vdev_local = vdev_hdl; \ 2159 do { \ 2160 if (qdf_likely(!((vdev_local)->peer_protocol_count_track))) \ 2161 break; \ 2162 nbuf_local = nbuf; \ 2163 txrx_peer_local = txrx_peer; \ 2164 if (qdf_unlikely(qdf_nbuf_is_frag((nbuf_local)))) \ 2165 break; \ 2166 else if (qdf_unlikely(qdf_nbuf_is_raw_frame((nbuf_local)))) \ 2167 break; \ 2168 dp_vdev_peer_stats_update_protocol_cnt((vdev_local), \ 2169 (nbuf_local), \ 2170 (txrx_peer_local), 0, 1); \ 2171 } while (0); \ 2172 } 2173 #else 2174 #define dp_rx_msdu_stats_update_prot_cnts(vdev_hdl, nbuf, txrx_peer) 2175 #endif 2176 2177 #ifdef FEATURE_RX_LINKSPEED_ROAM_TRIGGER 2178 /** 2179 * dp_rx_rates_stats_update() - update rate stats 2180 * from rx msdu. 2181 * @soc: datapath soc handle 2182 * @nbuf: received msdu buffer 2183 * @rx_tlv_hdr: rx tlv header 2184 * @txrx_peer: datapath txrx_peer handle 2185 * @sgi: Short Guard Interval 2186 * @mcs: Modulation and Coding Set 2187 * @nss: Number of Spatial Streams 2188 * @bw: BandWidth 2189 * @pkt_type: Corresponds to preamble 2190 * 2191 * To be precisely record rates, following factors are considered: 2192 * Exclude specific frames, ARP, DHCP, ssdp, etc. 2193 * Make sure to affect rx throughput as least as possible. 2194 * 2195 * Return: void 2196 */ 2197 static void 2198 dp_rx_rates_stats_update(struct dp_soc *soc, qdf_nbuf_t nbuf, 2199 uint8_t *rx_tlv_hdr, struct dp_txrx_peer *txrx_peer, 2200 uint32_t sgi, uint32_t mcs, 2201 uint32_t nss, uint32_t bw, uint32_t pkt_type) 2202 { 2203 uint32_t rix; 2204 uint16_t ratecode; 2205 uint32_t avg_rx_rate; 2206 uint32_t ratekbps; 2207 enum cdp_punctured_modes punc_mode = NO_PUNCTURE; 2208 2209 if (soc->high_throughput || 2210 dp_rx_data_is_specific(soc->hal_soc, rx_tlv_hdr, nbuf)) { 2211 return; 2212 } 2213 2214 DP_PEER_EXTD_STATS_UPD(txrx_peer, rx.rx_rate, mcs); 2215 2216 /* In 11b mode, the nss we get from tlv is 0, invalid and should be 1 */ 2217 if (qdf_unlikely(pkt_type == DOT11_B)) 2218 nss = 1; 2219 2220 /* here pkt_type corresponds to preamble */ 2221 ratekbps = dp_getrateindex(sgi, 2222 mcs, 2223 nss - 1, 2224 pkt_type, 2225 bw, 2226 punc_mode, 2227 &rix, 2228 &ratecode); 2229 DP_PEER_EXTD_STATS_UPD(txrx_peer, rx.last_rx_rate, ratekbps); 2230 avg_rx_rate = 2231 dp_ath_rate_lpf(txrx_peer->stats.extd_stats.rx.avg_rx_rate, 2232 ratekbps); 2233 DP_PEER_EXTD_STATS_UPD(txrx_peer, rx.avg_rx_rate, avg_rx_rate); 2234 DP_PEER_EXTD_STATS_UPD(txrx_peer, rx.nss_info, nss); 2235 DP_PEER_EXTD_STATS_UPD(txrx_peer, rx.mcs_info, mcs); 2236 DP_PEER_EXTD_STATS_UPD(txrx_peer, rx.bw_info, bw); 2237 DP_PEER_EXTD_STATS_UPD(txrx_peer, rx.gi_info, sgi); 2238 DP_PEER_EXTD_STATS_UPD(txrx_peer, rx.preamble_info, pkt_type); 2239 } 2240 #else 2241 static inline void 2242 dp_rx_rates_stats_update(struct dp_soc *soc, qdf_nbuf_t nbuf, 2243 uint8_t *rx_tlv_hdr, struct dp_txrx_peer *txrx_peer, 2244 uint32_t sgi, uint32_t mcs, 2245 uint32_t nss, uint32_t bw, uint32_t pkt_type) 2246 { 2247 } 2248 #endif /* FEATURE_RX_LINKSPEED_ROAM_TRIGGER */ 2249 2250 #ifndef QCA_ENHANCED_STATS_SUPPORT 2251 /** 2252 * dp_rx_msdu_extd_stats_update(): Update Rx extended path stats for peer 2253 * 2254 * @soc: datapath soc handle 2255 * @nbuf: received msdu buffer 2256 * @rx_tlv_hdr: rx tlv header 2257 * @txrx_peer: datapath txrx_peer handle 2258 * 2259 * Return: void 2260 */ 2261 static inline 2262 void dp_rx_msdu_extd_stats_update(struct dp_soc *soc, qdf_nbuf_t nbuf, 2263 uint8_t *rx_tlv_hdr, 2264 struct dp_txrx_peer *txrx_peer) 2265 { 2266 bool is_ampdu; 2267 uint32_t sgi, mcs, tid, nss, bw, reception_type, pkt_type; 2268 uint8_t dst_mcs_idx; 2269 2270 /* 2271 * TODO - For KIWI this field is present in ring_desc 2272 * Try to use ring desc instead of tlv. 2273 */ 2274 is_ampdu = hal_rx_mpdu_info_ampdu_flag_get(soc->hal_soc, rx_tlv_hdr); 2275 DP_PEER_EXTD_STATS_INCC(txrx_peer, rx.ampdu_cnt, 1, is_ampdu); 2276 DP_PEER_EXTD_STATS_INCC(txrx_peer, rx.non_ampdu_cnt, 1, !(is_ampdu)); 2277 2278 sgi = hal_rx_tlv_sgi_get(soc->hal_soc, rx_tlv_hdr); 2279 mcs = hal_rx_tlv_rate_mcs_get(soc->hal_soc, rx_tlv_hdr); 2280 tid = qdf_nbuf_get_tid_val(nbuf); 2281 bw = hal_rx_tlv_bw_get(soc->hal_soc, rx_tlv_hdr); 2282 reception_type = hal_rx_msdu_start_reception_type_get(soc->hal_soc, 2283 rx_tlv_hdr); 2284 nss = hal_rx_msdu_start_nss_get(soc->hal_soc, rx_tlv_hdr); 2285 pkt_type = hal_rx_tlv_get_pkt_type(soc->hal_soc, rx_tlv_hdr); 2286 /* do HW to SW pkt type conversion */ 2287 pkt_type = (pkt_type >= HAL_DOT11_MAX ? DOT11_MAX : 2288 hal_2_dp_pkt_type_map[pkt_type]); 2289 2290 DP_PEER_EXTD_STATS_INCC(txrx_peer, rx.rx_mpdu_cnt[mcs], 1, 2291 ((mcs < MAX_MCS) && QDF_NBUF_CB_RX_CHFRAG_START(nbuf))); 2292 DP_PEER_EXTD_STATS_INCC(txrx_peer, rx.rx_mpdu_cnt[MAX_MCS - 1], 1, 2293 ((mcs >= MAX_MCS) && QDF_NBUF_CB_RX_CHFRAG_START(nbuf))); 2294 DP_PEER_EXTD_STATS_INC(txrx_peer, rx.bw[bw], 1); 2295 /* 2296 * only if nss > 0 and pkt_type is 11N/AC/AX, 2297 * then increase index [nss - 1] in array counter. 2298 */ 2299 if (nss > 0 && CDP_IS_PKT_TYPE_SUPPORT_NSS(pkt_type)) 2300 DP_PEER_EXTD_STATS_INC(txrx_peer, rx.nss[nss - 1], 1); 2301 2302 DP_PEER_EXTD_STATS_INC(txrx_peer, rx.sgi_count[sgi], 1); 2303 DP_PEER_PER_PKT_STATS_INCC(txrx_peer, rx.err.mic_err, 1, 2304 hal_rx_tlv_mic_err_get(soc->hal_soc, 2305 rx_tlv_hdr)); 2306 DP_PEER_PER_PKT_STATS_INCC(txrx_peer, rx.err.decrypt_err, 1, 2307 hal_rx_tlv_decrypt_err_get(soc->hal_soc, 2308 rx_tlv_hdr)); 2309 2310 DP_PEER_EXTD_STATS_INC(txrx_peer, rx.wme_ac_type[TID_TO_WME_AC(tid)], 1); 2311 DP_PEER_EXTD_STATS_INC(txrx_peer, rx.reception_type[reception_type], 1); 2312 2313 dst_mcs_idx = dp_get_mcs_array_index_by_pkt_type_mcs(pkt_type, mcs); 2314 if (MCS_INVALID_ARRAY_INDEX != dst_mcs_idx) 2315 DP_PEER_EXTD_STATS_INC(txrx_peer, 2316 rx.pkt_type[pkt_type].mcs_count[dst_mcs_idx], 2317 1); 2318 2319 dp_rx_rates_stats_update(soc, nbuf, rx_tlv_hdr, txrx_peer, 2320 sgi, mcs, nss, bw, pkt_type); 2321 } 2322 #else 2323 static inline 2324 void dp_rx_msdu_extd_stats_update(struct dp_soc *soc, qdf_nbuf_t nbuf, 2325 uint8_t *rx_tlv_hdr, 2326 struct dp_txrx_peer *txrx_peer) 2327 { 2328 } 2329 #endif 2330 2331 #if defined(DP_PKT_STATS_PER_LMAC) && defined(WLAN_FEATURE_11BE_MLO) 2332 static inline void 2333 dp_peer_update_rx_pkt_per_lmac(struct dp_txrx_peer *txrx_peer, 2334 qdf_nbuf_t nbuf) 2335 { 2336 uint8_t lmac_id = qdf_nbuf_get_lmac_id(nbuf); 2337 2338 if (qdf_unlikely(lmac_id >= CDP_MAX_LMACS)) { 2339 dp_err_rl("Invalid lmac_id: %u vdev_id: %u", 2340 lmac_id, QDF_NBUF_CB_RX_VDEV_ID(nbuf)); 2341 2342 if (qdf_likely(txrx_peer)) 2343 dp_err_rl("peer_id: %u", txrx_peer->peer_id); 2344 2345 return; 2346 } 2347 2348 /* only count stats per lmac for MLO connection*/ 2349 DP_PEER_PER_PKT_STATS_INCC_PKT(txrx_peer, rx.rx_lmac[lmac_id], 1, 2350 QDF_NBUF_CB_RX_PKT_LEN(nbuf), 2351 txrx_peer->mld_peer); 2352 } 2353 #else 2354 static inline void 2355 dp_peer_update_rx_pkt_per_lmac(struct dp_txrx_peer *txrx_peer, 2356 qdf_nbuf_t nbuf) 2357 { 2358 } 2359 #endif 2360 2361 void dp_rx_msdu_stats_update(struct dp_soc *soc, qdf_nbuf_t nbuf, 2362 uint8_t *rx_tlv_hdr, 2363 struct dp_txrx_peer *txrx_peer, 2364 uint8_t ring_id, 2365 struct cdp_tid_rx_stats *tid_stats) 2366 { 2367 bool is_not_amsdu; 2368 struct dp_vdev *vdev = txrx_peer->vdev; 2369 bool enh_flag; 2370 qdf_ether_header_t *eh; 2371 uint16_t msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf); 2372 2373 dp_rx_msdu_stats_update_prot_cnts(vdev, nbuf, txrx_peer); 2374 is_not_amsdu = qdf_nbuf_is_rx_chfrag_start(nbuf) & 2375 qdf_nbuf_is_rx_chfrag_end(nbuf); 2376 DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, rx.rcvd_reo[ring_id], 1, 2377 msdu_len); 2378 DP_PEER_PER_PKT_STATS_INCC(txrx_peer, rx.non_amsdu_cnt, 1, 2379 is_not_amsdu); 2380 DP_PEER_PER_PKT_STATS_INCC(txrx_peer, rx.amsdu_cnt, 1, !is_not_amsdu); 2381 DP_PEER_PER_PKT_STATS_INCC(txrx_peer, rx.rx_retries, 1, 2382 qdf_nbuf_is_rx_retry_flag(nbuf)); 2383 dp_peer_update_rx_pkt_per_lmac(txrx_peer, nbuf); 2384 tid_stats->msdu_cnt++; 2385 if (qdf_unlikely(qdf_nbuf_is_da_mcbc(nbuf) && 2386 (vdev->rx_decap_type == htt_cmn_pkt_type_ethernet))) { 2387 eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf); 2388 enh_flag = vdev->pdev->enhanced_stats_en; 2389 DP_PEER_MC_INCC_PKT(txrx_peer, 1, msdu_len, enh_flag); 2390 tid_stats->mcast_msdu_cnt++; 2391 if (QDF_IS_ADDR_BROADCAST(eh->ether_dhost)) { 2392 DP_PEER_BC_INCC_PKT(txrx_peer, 1, msdu_len, enh_flag); 2393 tid_stats->bcast_msdu_cnt++; 2394 } 2395 } 2396 2397 txrx_peer->stats.per_pkt_stats.rx.last_rx_ts = qdf_system_ticks(); 2398 2399 dp_rx_msdu_extd_stats_update(soc, nbuf, rx_tlv_hdr, txrx_peer); 2400 } 2401 2402 #ifndef WDS_VENDOR_EXTENSION 2403 int dp_wds_rx_policy_check(uint8_t *rx_tlv_hdr, 2404 struct dp_vdev *vdev, 2405 struct dp_txrx_peer *txrx_peer) 2406 { 2407 return 1; 2408 } 2409 #endif 2410 2411 #ifdef RX_DESC_DEBUG_CHECK 2412 QDF_STATUS dp_rx_desc_nbuf_sanity_check(struct dp_soc *soc, 2413 hal_ring_desc_t ring_desc, 2414 struct dp_rx_desc *rx_desc) 2415 { 2416 struct hal_buf_info hbi; 2417 2418 hal_rx_reo_buf_paddr_get(soc->hal_soc, ring_desc, &hbi); 2419 /* Sanity check for possible buffer paddr corruption */ 2420 if (dp_rx_desc_paddr_sanity_check(rx_desc, (&hbi)->paddr)) 2421 return QDF_STATUS_SUCCESS; 2422 2423 return QDF_STATUS_E_FAILURE; 2424 } 2425 2426 /** 2427 * dp_rx_desc_nbuf_len_sanity_check - Add sanity check to catch Rx buffer 2428 * out of bound access from H.W 2429 * 2430 * @soc: DP soc 2431 * @pkt_len: Packet length received from H.W 2432 * 2433 * Return: NONE 2434 */ 2435 static inline void 2436 dp_rx_desc_nbuf_len_sanity_check(struct dp_soc *soc, 2437 uint32_t pkt_len) 2438 { 2439 struct rx_desc_pool *rx_desc_pool; 2440 2441 rx_desc_pool = &soc->rx_desc_buf[0]; 2442 qdf_assert_always(pkt_len <= rx_desc_pool->buf_size); 2443 } 2444 #else 2445 static inline void 2446 dp_rx_desc_nbuf_len_sanity_check(struct dp_soc *soc, uint32_t pkt_len) { } 2447 #endif 2448 2449 #ifdef DP_RX_PKT_NO_PEER_DELIVER 2450 #ifdef DP_RX_UDP_OVER_PEER_ROAM 2451 /** 2452 * dp_rx_is_udp_allowed_over_roam_peer() - check if udp data received 2453 * during roaming 2454 * @vdev: dp_vdev pointer 2455 * @rx_tlv_hdr: rx tlv header 2456 * @nbuf: pkt skb pointer 2457 * 2458 * This function will check if rx udp data is received from authorised 2459 * roamed peer before peer map indication is received from FW after 2460 * roaming. This is needed for VoIP scenarios in which packet loss 2461 * expected during roaming is minimal. 2462 * 2463 * Return: bool 2464 */ 2465 static bool dp_rx_is_udp_allowed_over_roam_peer(struct dp_vdev *vdev, 2466 uint8_t *rx_tlv_hdr, 2467 qdf_nbuf_t nbuf) 2468 { 2469 char *hdr_desc; 2470 struct ieee80211_frame *wh = NULL; 2471 2472 hdr_desc = hal_rx_desc_get_80211_hdr(vdev->pdev->soc->hal_soc, 2473 rx_tlv_hdr); 2474 wh = (struct ieee80211_frame *)hdr_desc; 2475 2476 if (vdev->roaming_peer_status == 2477 WLAN_ROAM_PEER_AUTH_STATUS_AUTHENTICATED && 2478 !qdf_mem_cmp(vdev->roaming_peer_mac.raw, wh->i_addr2, 2479 QDF_MAC_ADDR_SIZE) && (qdf_nbuf_is_ipv4_udp_pkt(nbuf) || 2480 qdf_nbuf_is_ipv6_udp_pkt(nbuf))) 2481 return true; 2482 2483 return false; 2484 } 2485 #else 2486 static bool dp_rx_is_udp_allowed_over_roam_peer(struct dp_vdev *vdev, 2487 uint8_t *rx_tlv_hdr, 2488 qdf_nbuf_t nbuf) 2489 { 2490 return false; 2491 } 2492 #endif 2493 void dp_rx_deliver_to_stack_no_peer(struct dp_soc *soc, qdf_nbuf_t nbuf) 2494 { 2495 uint16_t peer_id; 2496 uint8_t vdev_id; 2497 struct dp_vdev *vdev = NULL; 2498 uint32_t l2_hdr_offset = 0; 2499 uint16_t msdu_len = 0; 2500 uint32_t pkt_len = 0; 2501 uint8_t *rx_tlv_hdr; 2502 uint32_t frame_mask = FRAME_MASK_IPV4_ARP | FRAME_MASK_IPV4_DHCP | 2503 FRAME_MASK_IPV4_EAPOL | FRAME_MASK_IPV6_DHCP; 2504 bool is_special_frame = false; 2505 struct dp_peer *peer = NULL; 2506 2507 peer_id = QDF_NBUF_CB_RX_PEER_ID(nbuf); 2508 if (peer_id > soc->max_peer_id) 2509 goto deliver_fail; 2510 2511 vdev_id = QDF_NBUF_CB_RX_VDEV_ID(nbuf); 2512 vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_RX); 2513 if (!vdev || vdev->delete.pending) 2514 goto deliver_fail; 2515 2516 if (qdf_unlikely(qdf_nbuf_is_frag(nbuf))) 2517 goto deliver_fail; 2518 2519 rx_tlv_hdr = qdf_nbuf_data(nbuf); 2520 l2_hdr_offset = 2521 hal_rx_msdu_end_l3_hdr_padding_get(soc->hal_soc, rx_tlv_hdr); 2522 2523 msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf); 2524 pkt_len = msdu_len + l2_hdr_offset + soc->rx_pkt_tlv_size; 2525 QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(nbuf) = 1; 2526 2527 qdf_nbuf_set_pktlen(nbuf, pkt_len); 2528 qdf_nbuf_pull_head(nbuf, soc->rx_pkt_tlv_size + l2_hdr_offset); 2529 2530 is_special_frame = dp_rx_is_special_frame(nbuf, frame_mask); 2531 if (qdf_likely(vdev->osif_rx)) { 2532 if (is_special_frame || 2533 dp_rx_is_udp_allowed_over_roam_peer(vdev, rx_tlv_hdr, 2534 nbuf)) { 2535 qdf_nbuf_set_exc_frame(nbuf, 1); 2536 if (QDF_STATUS_SUCCESS != 2537 vdev->osif_rx(vdev->osif_vdev, nbuf)) 2538 goto deliver_fail; 2539 2540 DP_STATS_INC(soc, rx.err.pkt_delivered_no_peer, 1); 2541 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_RX); 2542 return; 2543 } 2544 } else if (is_special_frame) { 2545 /* 2546 * If MLO connection, txrx_peer for link peer does not exist, 2547 * try to store these RX packets to txrx_peer's bufq of MLD 2548 * peer until vdev->osif_rx is registered from CP and flush 2549 * them to stack. 2550 */ 2551 peer = dp_peer_get_tgt_peer_by_id(soc, peer_id, 2552 DP_MOD_ID_RX); 2553 if (!peer) 2554 goto deliver_fail; 2555 2556 /* only check for MLO connection */ 2557 if (IS_MLO_DP_MLD_PEER(peer) && peer->txrx_peer && 2558 dp_rx_is_peer_cache_bufq_supported()) { 2559 qdf_nbuf_set_exc_frame(nbuf, 1); 2560 2561 if (QDF_STATUS_SUCCESS == 2562 dp_rx_enqueue_rx(peer, peer->txrx_peer, nbuf)) { 2563 DP_STATS_INC(soc, 2564 rx.err.pkt_delivered_no_peer, 2565 1); 2566 } else { 2567 DP_STATS_INC(soc, 2568 rx.err.rx_invalid_peer.num, 2569 1); 2570 } 2571 2572 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_RX); 2573 dp_peer_unref_delete(peer, DP_MOD_ID_RX); 2574 return; 2575 } 2576 2577 dp_peer_unref_delete(peer, DP_MOD_ID_RX); 2578 } 2579 2580 deliver_fail: 2581 DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1, 2582 QDF_NBUF_CB_RX_PKT_LEN(nbuf)); 2583 dp_rx_nbuf_free(nbuf); 2584 if (vdev) 2585 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_RX); 2586 } 2587 #else 2588 void dp_rx_deliver_to_stack_no_peer(struct dp_soc *soc, qdf_nbuf_t nbuf) 2589 { 2590 DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1, 2591 QDF_NBUF_CB_RX_PKT_LEN(nbuf)); 2592 dp_rx_nbuf_free(nbuf); 2593 } 2594 #endif 2595 2596 uint32_t dp_rx_srng_get_num_pending(hal_soc_handle_t hal_soc, 2597 hal_ring_handle_t hal_ring_hdl, 2598 uint32_t num_entries, 2599 bool *near_full) 2600 { 2601 uint32_t num_pending = 0; 2602 2603 num_pending = hal_srng_dst_num_valid_locked(hal_soc, 2604 hal_ring_hdl, 2605 true); 2606 2607 if (num_entries && (num_pending >= num_entries >> 1)) 2608 *near_full = true; 2609 else 2610 *near_full = false; 2611 2612 return num_pending; 2613 } 2614 2615 #endif /* QCA_HOST_MODE_WIFI_DISABLED */ 2616 2617 #ifdef WLAN_SUPPORT_RX_FISA 2618 void dp_rx_skip_tlvs(struct dp_soc *soc, qdf_nbuf_t nbuf, uint32_t l3_padding) 2619 { 2620 QDF_NBUF_CB_RX_PACKET_L3_HDR_PAD(nbuf) = l3_padding; 2621 qdf_nbuf_pull_head(nbuf, l3_padding + soc->rx_pkt_tlv_size); 2622 } 2623 #else 2624 void dp_rx_skip_tlvs(struct dp_soc *soc, qdf_nbuf_t nbuf, uint32_t l3_padding) 2625 { 2626 qdf_nbuf_pull_head(nbuf, l3_padding + soc->rx_pkt_tlv_size); 2627 } 2628 #endif 2629 2630 #ifndef QCA_HOST_MODE_WIFI_DISABLED 2631 2632 #ifdef DP_RX_DROP_RAW_FRM 2633 bool dp_rx_is_raw_frame_dropped(qdf_nbuf_t nbuf) 2634 { 2635 if (qdf_nbuf_is_raw_frame(nbuf)) { 2636 dp_rx_nbuf_free(nbuf); 2637 return true; 2638 } 2639 2640 return false; 2641 } 2642 #endif 2643 2644 #ifdef WLAN_FEATURE_DP_RX_RING_HISTORY 2645 void 2646 dp_rx_ring_record_entry(struct dp_soc *soc, uint8_t ring_num, 2647 hal_ring_desc_t ring_desc) 2648 { 2649 struct dp_buf_info_record *record; 2650 struct hal_buf_info hbi; 2651 uint32_t idx; 2652 2653 if (qdf_unlikely(!soc->rx_ring_history[ring_num])) 2654 return; 2655 2656 hal_rx_reo_buf_paddr_get(soc->hal_soc, ring_desc, &hbi); 2657 2658 /* buffer_addr_info is the first element of ring_desc */ 2659 hal_rx_buf_cookie_rbm_get(soc->hal_soc, (uint32_t *)ring_desc, 2660 &hbi); 2661 2662 idx = dp_history_get_next_index(&soc->rx_ring_history[ring_num]->index, 2663 DP_RX_HIST_MAX); 2664 2665 /* No NULL check needed for record since its an array */ 2666 record = &soc->rx_ring_history[ring_num]->entry[idx]; 2667 2668 record->timestamp = qdf_get_log_timestamp(); 2669 record->hbi.paddr = hbi.paddr; 2670 record->hbi.sw_cookie = hbi.sw_cookie; 2671 record->hbi.rbm = hbi.rbm; 2672 } 2673 #endif 2674 2675 #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR 2676 void dp_rx_update_stats(struct dp_soc *soc, qdf_nbuf_t nbuf) 2677 { 2678 DP_STATS_INC_PKT(soc, rx.ingress, 1, 2679 QDF_NBUF_CB_RX_PKT_LEN(nbuf)); 2680 } 2681 #endif 2682 2683 #ifdef WLAN_FEATURE_PKT_CAPTURE_V2 2684 void dp_rx_deliver_to_pkt_capture(struct dp_soc *soc, struct dp_pdev *pdev, 2685 uint16_t peer_id, uint32_t is_offload, 2686 qdf_nbuf_t netbuf) 2687 { 2688 if (wlan_cfg_get_pkt_capture_mode(soc->wlan_cfg_ctx)) 2689 dp_wdi_event_handler(WDI_EVENT_PKT_CAPTURE_RX_DATA, soc, netbuf, 2690 peer_id, is_offload, pdev->pdev_id); 2691 } 2692 2693 void dp_rx_deliver_to_pkt_capture_no_peer(struct dp_soc *soc, qdf_nbuf_t nbuf, 2694 uint32_t is_offload) 2695 { 2696 if (wlan_cfg_get_pkt_capture_mode(soc->wlan_cfg_ctx)) 2697 dp_wdi_event_handler(WDI_EVENT_PKT_CAPTURE_RX_DATA_NO_PEER, 2698 soc, nbuf, HTT_INVALID_VDEV, 2699 is_offload, 0); 2700 } 2701 #endif 2702 2703 #endif /* QCA_HOST_MODE_WIFI_DISABLED */ 2704 2705 QDF_STATUS dp_rx_vdev_detach(struct dp_vdev *vdev) 2706 { 2707 QDF_STATUS ret; 2708 2709 if (vdev->osif_rx_flush) { 2710 ret = vdev->osif_rx_flush(vdev->osif_vdev, vdev->vdev_id); 2711 if (!QDF_IS_STATUS_SUCCESS(ret)) { 2712 dp_err("Failed to flush rx pkts for vdev %d\n", 2713 vdev->vdev_id); 2714 return ret; 2715 } 2716 } 2717 2718 return QDF_STATUS_SUCCESS; 2719 } 2720 2721 static QDF_STATUS 2722 dp_pdev_nbuf_alloc_and_map(struct dp_soc *dp_soc, 2723 struct dp_rx_nbuf_frag_info *nbuf_frag_info_t, 2724 struct dp_pdev *dp_pdev, 2725 struct rx_desc_pool *rx_desc_pool) 2726 { 2727 QDF_STATUS ret = QDF_STATUS_E_FAILURE; 2728 2729 (nbuf_frag_info_t->virt_addr).nbuf = 2730 qdf_nbuf_alloc(dp_soc->osdev, rx_desc_pool->buf_size, 2731 RX_BUFFER_RESERVATION, 2732 rx_desc_pool->buf_alignment, FALSE); 2733 if (!((nbuf_frag_info_t->virt_addr).nbuf)) { 2734 dp_err("nbuf alloc failed"); 2735 DP_STATS_INC(dp_pdev, replenish.nbuf_alloc_fail, 1); 2736 return ret; 2737 } 2738 2739 ret = qdf_nbuf_map_nbytes_single(dp_soc->osdev, 2740 (nbuf_frag_info_t->virt_addr).nbuf, 2741 QDF_DMA_FROM_DEVICE, 2742 rx_desc_pool->buf_size); 2743 2744 if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) { 2745 qdf_nbuf_free((nbuf_frag_info_t->virt_addr).nbuf); 2746 dp_err("nbuf map failed"); 2747 DP_STATS_INC(dp_pdev, replenish.map_err, 1); 2748 return ret; 2749 } 2750 2751 nbuf_frag_info_t->paddr = 2752 qdf_nbuf_get_frag_paddr((nbuf_frag_info_t->virt_addr).nbuf, 0); 2753 2754 ret = dp_check_paddr(dp_soc, &((nbuf_frag_info_t->virt_addr).nbuf), 2755 &nbuf_frag_info_t->paddr, 2756 rx_desc_pool); 2757 if (ret == QDF_STATUS_E_FAILURE) { 2758 dp_err("nbuf check x86 failed"); 2759 DP_STATS_INC(dp_pdev, replenish.x86_fail, 1); 2760 return ret; 2761 } 2762 2763 return QDF_STATUS_SUCCESS; 2764 } 2765 2766 QDF_STATUS 2767 dp_pdev_rx_buffers_attach(struct dp_soc *dp_soc, uint32_t mac_id, 2768 struct dp_srng *dp_rxdma_srng, 2769 struct rx_desc_pool *rx_desc_pool, 2770 uint32_t num_req_buffers) 2771 { 2772 struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(dp_soc, mac_id); 2773 hal_ring_handle_t rxdma_srng = dp_rxdma_srng->hal_srng; 2774 union dp_rx_desc_list_elem_t *next; 2775 void *rxdma_ring_entry; 2776 qdf_dma_addr_t paddr; 2777 struct dp_rx_nbuf_frag_info *nf_info; 2778 uint32_t nr_descs, nr_nbuf = 0, nr_nbuf_total = 0; 2779 uint32_t buffer_index, nbuf_ptrs_per_page; 2780 qdf_nbuf_t nbuf; 2781 QDF_STATUS ret; 2782 int page_idx, total_pages; 2783 union dp_rx_desc_list_elem_t *desc_list = NULL; 2784 union dp_rx_desc_list_elem_t *tail = NULL; 2785 int sync_hw_ptr = 1; 2786 uint32_t num_entries_avail; 2787 2788 if (qdf_unlikely(!dp_pdev)) { 2789 dp_rx_err("%pK: pdev is null for mac_id = %d", 2790 dp_soc, mac_id); 2791 return QDF_STATUS_E_FAILURE; 2792 } 2793 2794 if (qdf_unlikely(!rxdma_srng)) { 2795 DP_STATS_INC(dp_pdev, replenish.rxdma_err, num_req_buffers); 2796 return QDF_STATUS_E_FAILURE; 2797 } 2798 2799 dp_debug("requested %u RX buffers for driver attach", num_req_buffers); 2800 2801 hal_srng_access_start(dp_soc->hal_soc, rxdma_srng); 2802 num_entries_avail = hal_srng_src_num_avail(dp_soc->hal_soc, 2803 rxdma_srng, 2804 sync_hw_ptr); 2805 hal_srng_access_end(dp_soc->hal_soc, rxdma_srng); 2806 2807 if (!num_entries_avail) { 2808 dp_err("Num of available entries is zero, nothing to do"); 2809 return QDF_STATUS_E_NOMEM; 2810 } 2811 2812 if (num_entries_avail < num_req_buffers) 2813 num_req_buffers = num_entries_avail; 2814 2815 nr_descs = dp_rx_get_free_desc_list(dp_soc, mac_id, rx_desc_pool, 2816 num_req_buffers, &desc_list, &tail); 2817 if (!nr_descs) { 2818 dp_err("no free rx_descs in freelist"); 2819 DP_STATS_INC(dp_pdev, err.desc_alloc_fail, num_req_buffers); 2820 return QDF_STATUS_E_NOMEM; 2821 } 2822 2823 dp_debug("got %u RX descs for driver attach", nr_descs); 2824 2825 /* 2826 * Try to allocate pointers to the nbuf one page at a time. 2827 * Take pointers that can fit in one page of memory and 2828 * iterate through the total descriptors that need to be 2829 * allocated in order of pages. Reuse the pointers that 2830 * have been allocated to fit in one page across each 2831 * iteration to index into the nbuf. 2832 */ 2833 total_pages = (nr_descs * sizeof(*nf_info)) / DP_BLOCKMEM_SIZE; 2834 2835 /* 2836 * Add an extra page to store the remainder if any 2837 */ 2838 if ((nr_descs * sizeof(*nf_info)) % DP_BLOCKMEM_SIZE) 2839 total_pages++; 2840 nf_info = qdf_mem_malloc(DP_BLOCKMEM_SIZE); 2841 if (!nf_info) { 2842 dp_err("failed to allocate nbuf array"); 2843 DP_STATS_INC(dp_pdev, replenish.rxdma_err, num_req_buffers); 2844 QDF_BUG(0); 2845 return QDF_STATUS_E_NOMEM; 2846 } 2847 nbuf_ptrs_per_page = DP_BLOCKMEM_SIZE / sizeof(*nf_info); 2848 2849 for (page_idx = 0; page_idx < total_pages; page_idx++) { 2850 qdf_mem_zero(nf_info, DP_BLOCKMEM_SIZE); 2851 2852 for (nr_nbuf = 0; nr_nbuf < nbuf_ptrs_per_page; nr_nbuf++) { 2853 /* 2854 * The last page of buffer pointers may not be required 2855 * completely based on the number of descriptors. Below 2856 * check will ensure we are allocating only the 2857 * required number of descriptors. 2858 */ 2859 if (nr_nbuf_total >= nr_descs) 2860 break; 2861 /* Flag is set while pdev rx_desc_pool initialization */ 2862 if (qdf_unlikely(rx_desc_pool->rx_mon_dest_frag_enable)) 2863 ret = dp_pdev_frag_alloc_and_map(dp_soc, 2864 &nf_info[nr_nbuf], dp_pdev, 2865 rx_desc_pool); 2866 else 2867 ret = dp_pdev_nbuf_alloc_and_map(dp_soc, 2868 &nf_info[nr_nbuf], dp_pdev, 2869 rx_desc_pool); 2870 if (QDF_IS_STATUS_ERROR(ret)) 2871 break; 2872 2873 nr_nbuf_total++; 2874 } 2875 2876 hal_srng_access_start(dp_soc->hal_soc, rxdma_srng); 2877 2878 for (buffer_index = 0; buffer_index < nr_nbuf; buffer_index++) { 2879 rxdma_ring_entry = 2880 hal_srng_src_get_next(dp_soc->hal_soc, 2881 rxdma_srng); 2882 qdf_assert_always(rxdma_ring_entry); 2883 2884 next = desc_list->next; 2885 paddr = nf_info[buffer_index].paddr; 2886 nbuf = nf_info[buffer_index].virt_addr.nbuf; 2887 2888 /* Flag is set while pdev rx_desc_pool initialization */ 2889 if (qdf_unlikely(rx_desc_pool->rx_mon_dest_frag_enable)) 2890 dp_rx_desc_frag_prep(&desc_list->rx_desc, 2891 &nf_info[buffer_index]); 2892 else 2893 dp_rx_desc_prep(&desc_list->rx_desc, 2894 &nf_info[buffer_index]); 2895 desc_list->rx_desc.in_use = 1; 2896 dp_rx_desc_alloc_dbg_info(&desc_list->rx_desc); 2897 dp_rx_desc_update_dbg_info(&desc_list->rx_desc, 2898 __func__, 2899 RX_DESC_REPLENISHED); 2900 2901 hal_rxdma_buff_addr_info_set(dp_soc->hal_soc ,rxdma_ring_entry, paddr, 2902 desc_list->rx_desc.cookie, 2903 rx_desc_pool->owner); 2904 2905 dp_ipa_handle_rx_buf_smmu_mapping( 2906 dp_soc, nbuf, 2907 rx_desc_pool->buf_size, true, 2908 __func__, __LINE__); 2909 2910 dp_audio_smmu_map(dp_soc->osdev, 2911 qdf_mem_paddr_from_dmaaddr(dp_soc->osdev, 2912 QDF_NBUF_CB_PADDR(nbuf)), 2913 QDF_NBUF_CB_PADDR(nbuf), 2914 rx_desc_pool->buf_size); 2915 2916 desc_list = next; 2917 } 2918 2919 dp_rx_refill_ring_record_entry(dp_soc, dp_pdev->lmac_id, 2920 rxdma_srng, nr_nbuf, nr_nbuf); 2921 hal_srng_access_end(dp_soc->hal_soc, rxdma_srng); 2922 } 2923 2924 dp_info("filled %u RX buffers for driver attach", nr_nbuf_total); 2925 qdf_mem_free(nf_info); 2926 2927 if (!nr_nbuf_total) { 2928 dp_err("No nbuf's allocated"); 2929 QDF_BUG(0); 2930 return QDF_STATUS_E_RESOURCES; 2931 } 2932 2933 /* No need to count the number of bytes received during replenish. 2934 * Therefore set replenish.pkts.bytes as 0. 2935 */ 2936 DP_STATS_INC_PKT(dp_pdev, replenish.pkts, nr_nbuf, 0); 2937 2938 return QDF_STATUS_SUCCESS; 2939 } 2940 2941 qdf_export_symbol(dp_pdev_rx_buffers_attach); 2942 2943 #ifdef DP_RX_MON_MEM_FRAG 2944 void dp_rx_enable_mon_dest_frag(struct rx_desc_pool *rx_desc_pool, 2945 bool is_mon_dest_desc) 2946 { 2947 rx_desc_pool->rx_mon_dest_frag_enable = is_mon_dest_desc; 2948 if (is_mon_dest_desc) 2949 dp_alert("Feature DP_RX_MON_MEM_FRAG for mon_dest is enabled"); 2950 } 2951 #else 2952 void dp_rx_enable_mon_dest_frag(struct rx_desc_pool *rx_desc_pool, 2953 bool is_mon_dest_desc) 2954 { 2955 rx_desc_pool->rx_mon_dest_frag_enable = false; 2956 if (is_mon_dest_desc) 2957 dp_alert("Feature DP_RX_MON_MEM_FRAG for mon_dest is disabled"); 2958 } 2959 #endif 2960 2961 qdf_export_symbol(dp_rx_enable_mon_dest_frag); 2962 2963 QDF_STATUS 2964 dp_rx_pdev_desc_pool_alloc(struct dp_pdev *pdev) 2965 { 2966 struct dp_soc *soc = pdev->soc; 2967 uint32_t rxdma_entries; 2968 uint32_t rx_sw_desc_num; 2969 struct dp_srng *dp_rxdma_srng; 2970 struct rx_desc_pool *rx_desc_pool; 2971 uint32_t status = QDF_STATUS_SUCCESS; 2972 int mac_for_pdev; 2973 2974 mac_for_pdev = pdev->lmac_id; 2975 if (wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx)) { 2976 dp_rx_info("%pK: nss-wifi<4> skip Rx refil %d", 2977 soc, mac_for_pdev); 2978 return status; 2979 } 2980 2981 dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_for_pdev]; 2982 rxdma_entries = dp_rxdma_srng->num_entries; 2983 2984 rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev]; 2985 rx_sw_desc_num = wlan_cfg_get_dp_soc_rx_sw_desc_num(soc->wlan_cfg_ctx); 2986 2987 rx_desc_pool->desc_type = DP_RX_DESC_BUF_TYPE; 2988 status = dp_rx_desc_pool_alloc(soc, 2989 rx_sw_desc_num, 2990 rx_desc_pool); 2991 if (status != QDF_STATUS_SUCCESS) 2992 return status; 2993 2994 return status; 2995 } 2996 2997 void dp_rx_pdev_desc_pool_free(struct dp_pdev *pdev) 2998 { 2999 int mac_for_pdev = pdev->lmac_id; 3000 struct dp_soc *soc = pdev->soc; 3001 struct rx_desc_pool *rx_desc_pool; 3002 3003 rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev]; 3004 3005 dp_rx_desc_pool_free(soc, rx_desc_pool); 3006 } 3007 3008 QDF_STATUS dp_rx_pdev_desc_pool_init(struct dp_pdev *pdev) 3009 { 3010 int mac_for_pdev = pdev->lmac_id; 3011 struct dp_soc *soc = pdev->soc; 3012 uint32_t rxdma_entries; 3013 uint32_t rx_sw_desc_num; 3014 struct dp_srng *dp_rxdma_srng; 3015 struct rx_desc_pool *rx_desc_pool; 3016 3017 rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev]; 3018 if (wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx)) { 3019 /* 3020 * If NSS is enabled, rx_desc_pool is already filled. 3021 * Hence, just disable desc_pool frag flag. 3022 */ 3023 dp_rx_enable_mon_dest_frag(rx_desc_pool, false); 3024 3025 dp_rx_info("%pK: nss-wifi<4> skip Rx refil %d", 3026 soc, mac_for_pdev); 3027 return QDF_STATUS_SUCCESS; 3028 } 3029 3030 if (dp_rx_desc_pool_is_allocated(rx_desc_pool) == QDF_STATUS_E_NOMEM) 3031 return QDF_STATUS_E_NOMEM; 3032 3033 dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_for_pdev]; 3034 rxdma_entries = dp_rxdma_srng->num_entries; 3035 3036 soc->process_rx_status = CONFIG_PROCESS_RX_STATUS; 3037 3038 rx_sw_desc_num = 3039 wlan_cfg_get_dp_soc_rx_sw_desc_num(soc->wlan_cfg_ctx); 3040 3041 rx_desc_pool->owner = dp_rx_get_rx_bm_id(soc); 3042 rx_desc_pool->buf_size = RX_DATA_BUFFER_SIZE; 3043 rx_desc_pool->buf_alignment = RX_DATA_BUFFER_ALIGNMENT; 3044 /* Disable monitor dest processing via frag */ 3045 dp_rx_enable_mon_dest_frag(rx_desc_pool, false); 3046 3047 dp_rx_desc_pool_init(soc, mac_for_pdev, 3048 rx_sw_desc_num, rx_desc_pool); 3049 return QDF_STATUS_SUCCESS; 3050 } 3051 3052 void dp_rx_pdev_desc_pool_deinit(struct dp_pdev *pdev) 3053 { 3054 int mac_for_pdev = pdev->lmac_id; 3055 struct dp_soc *soc = pdev->soc; 3056 struct rx_desc_pool *rx_desc_pool; 3057 3058 rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev]; 3059 3060 dp_rx_desc_pool_deinit(soc, rx_desc_pool, mac_for_pdev); 3061 } 3062 3063 QDF_STATUS 3064 dp_rx_pdev_buffers_alloc(struct dp_pdev *pdev) 3065 { 3066 int mac_for_pdev = pdev->lmac_id; 3067 struct dp_soc *soc = pdev->soc; 3068 struct dp_srng *dp_rxdma_srng; 3069 struct rx_desc_pool *rx_desc_pool; 3070 uint32_t rxdma_entries; 3071 3072 dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_for_pdev]; 3073 rxdma_entries = dp_rxdma_srng->num_entries; 3074 3075 rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev]; 3076 3077 /* Initialize RX buffer pool which will be 3078 * used during low memory conditions 3079 */ 3080 dp_rx_buffer_pool_init(soc, mac_for_pdev); 3081 3082 return dp_pdev_rx_buffers_attach_simple(soc, mac_for_pdev, 3083 dp_rxdma_srng, 3084 rx_desc_pool, 3085 rxdma_entries - 1); 3086 } 3087 3088 void 3089 dp_rx_pdev_buffers_free(struct dp_pdev *pdev) 3090 { 3091 int mac_for_pdev = pdev->lmac_id; 3092 struct dp_soc *soc = pdev->soc; 3093 struct rx_desc_pool *rx_desc_pool; 3094 3095 rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev]; 3096 3097 dp_rx_desc_nbuf_free(soc, rx_desc_pool, false); 3098 dp_rx_buffer_pool_deinit(soc, mac_for_pdev); 3099 } 3100 3101 #ifdef DP_RX_SPECIAL_FRAME_NEED 3102 bool dp_rx_deliver_special_frame(struct dp_soc *soc, 3103 struct dp_txrx_peer *txrx_peer, 3104 qdf_nbuf_t nbuf, uint32_t frame_mask, 3105 uint8_t *rx_tlv_hdr) 3106 { 3107 uint32_t l2_hdr_offset = 0; 3108 uint16_t msdu_len = 0; 3109 uint32_t skip_len; 3110 3111 l2_hdr_offset = 3112 hal_rx_msdu_end_l3_hdr_padding_get(soc->hal_soc, rx_tlv_hdr); 3113 3114 if (qdf_unlikely(qdf_nbuf_is_frag(nbuf))) { 3115 skip_len = l2_hdr_offset; 3116 } else { 3117 msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf); 3118 skip_len = l2_hdr_offset + soc->rx_pkt_tlv_size; 3119 qdf_nbuf_set_pktlen(nbuf, msdu_len + skip_len); 3120 } 3121 3122 QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(nbuf) = 1; 3123 dp_rx_set_hdr_pad(nbuf, l2_hdr_offset); 3124 qdf_nbuf_pull_head(nbuf, skip_len); 3125 3126 if (txrx_peer->vdev) { 3127 dp_rx_send_pktlog(soc, txrx_peer->vdev->pdev, nbuf, 3128 QDF_TX_RX_STATUS_OK); 3129 } 3130 3131 if (dp_rx_is_special_frame(nbuf, frame_mask)) { 3132 dp_info("special frame, mpdu sn 0x%x", 3133 hal_rx_get_rx_sequence(soc->hal_soc, rx_tlv_hdr)); 3134 qdf_nbuf_set_exc_frame(nbuf, 1); 3135 dp_rx_deliver_to_stack(soc, txrx_peer->vdev, txrx_peer, 3136 nbuf, NULL); 3137 return true; 3138 } 3139 3140 return false; 3141 } 3142 #endif 3143 3144 #ifdef WLAN_FEATURE_MARK_FIRST_WAKEUP_PACKET 3145 void dp_rx_mark_first_packet_after_wow_wakeup(struct dp_pdev *pdev, 3146 uint8_t *rx_tlv, 3147 qdf_nbuf_t nbuf) 3148 { 3149 struct dp_soc *soc; 3150 3151 if (!pdev->is_first_wakeup_packet) 3152 return; 3153 3154 soc = pdev->soc; 3155 if (hal_get_first_wow_wakeup_packet(soc->hal_soc, rx_tlv)) { 3156 qdf_nbuf_mark_wakeup_frame(nbuf); 3157 dp_info("First packet after WOW Wakeup rcvd"); 3158 } 3159 } 3160 #endif 3161