1 /* 2 * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved. 3 * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved. 4 * 5 * Permission to use, copy, modify, and/or distribute this software for 6 * any purpose with or without fee is hereby granted, provided that the 7 * above copyright notice and this permission notice appear in all 8 * copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 11 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 12 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 13 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 14 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 15 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 16 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 17 * PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20 #include "hal_hw_headers.h" 21 #include "dp_types.h" 22 #include "dp_rx.h" 23 #include "dp_tx.h" 24 #include "dp_peer.h" 25 #include "hal_rx.h" 26 #include "hal_api.h" 27 #include "qdf_nbuf.h" 28 #ifdef MESH_MODE_SUPPORT 29 #include "if_meta_hdr.h" 30 #endif 31 #include "dp_internal.h" 32 #include "dp_ipa.h" 33 #include "dp_hist.h" 34 #include "dp_rx_buffer_pool.h" 35 #ifdef WIFI_MONITOR_SUPPORT 36 #include "dp_htt.h" 37 #include <dp_mon.h> 38 #endif 39 #ifdef FEATURE_WDS 40 #include "dp_txrx_wds.h" 41 #endif 42 #ifdef DP_RATETABLE_SUPPORT 43 #include "dp_ratetable.h" 44 #endif 45 46 #ifdef DUP_RX_DESC_WAR 47 void dp_rx_dump_info_and_assert(struct dp_soc *soc, 48 hal_ring_handle_t hal_ring, 49 hal_ring_desc_t ring_desc, 50 struct dp_rx_desc *rx_desc) 51 { 52 void *hal_soc = soc->hal_soc; 53 54 hal_srng_dump_ring_desc(hal_soc, hal_ring, ring_desc); 55 dp_rx_desc_dump(rx_desc); 56 } 57 #else 58 void dp_rx_dump_info_and_assert(struct dp_soc *soc, 59 hal_ring_handle_t hal_ring_hdl, 60 hal_ring_desc_t ring_desc, 61 struct dp_rx_desc *rx_desc) 62 { 63 hal_soc_handle_t hal_soc = soc->hal_soc; 64 65 dp_rx_desc_dump(rx_desc); 66 hal_srng_dump_ring_desc(hal_soc, hal_ring_hdl, ring_desc); 67 hal_srng_dump_ring(hal_soc, hal_ring_hdl); 68 qdf_assert_always(0); 69 } 70 #endif 71 72 #ifndef QCA_HOST_MODE_WIFI_DISABLED 73 #ifdef RX_DESC_SANITY_WAR 74 QDF_STATUS dp_rx_desc_sanity(struct dp_soc *soc, hal_soc_handle_t hal_soc, 75 hal_ring_handle_t hal_ring_hdl, 76 hal_ring_desc_t ring_desc, 77 struct dp_rx_desc *rx_desc) 78 { 79 uint8_t return_buffer_manager; 80 81 if (qdf_unlikely(!rx_desc)) { 82 /* 83 * This is an unlikely case where the cookie obtained 84 * from the ring_desc is invalid and hence we are not 85 * able to find the corresponding rx_desc 86 */ 87 goto fail; 88 } 89 90 return_buffer_manager = hal_rx_ret_buf_manager_get(hal_soc, ring_desc); 91 if (qdf_unlikely(!(return_buffer_manager == 92 HAL_RX_BUF_RBM_SW1_BM(soc->wbm_sw0_bm_id) || 93 return_buffer_manager == 94 HAL_RX_BUF_RBM_SW3_BM(soc->wbm_sw0_bm_id)))) { 95 goto fail; 96 } 97 98 return QDF_STATUS_SUCCESS; 99 100 fail: 101 DP_STATS_INC(soc, rx.err.invalid_cookie, 1); 102 dp_err("Ring Desc:"); 103 hal_srng_dump_ring_desc(hal_soc, hal_ring_hdl, 104 ring_desc); 105 return QDF_STATUS_E_NULL_VALUE; 106 107 } 108 #endif 109 #endif /* QCA_HOST_MODE_WIFI_DISABLED */ 110 111 /** 112 * dp_pdev_frag_alloc_and_map() - Allocate frag for desc buffer and map 113 * 114 * @dp_soc: struct dp_soc * 115 * @nbuf_frag_info_t: nbuf frag info 116 * @dp_pdev: struct dp_pdev * 117 * @rx_desc_pool: Rx desc pool 118 * 119 * Return: QDF_STATUS 120 */ 121 #ifdef DP_RX_MON_MEM_FRAG 122 static inline QDF_STATUS 123 dp_pdev_frag_alloc_and_map(struct dp_soc *dp_soc, 124 struct dp_rx_nbuf_frag_info *nbuf_frag_info_t, 125 struct dp_pdev *dp_pdev, 126 struct rx_desc_pool *rx_desc_pool) 127 { 128 QDF_STATUS ret = QDF_STATUS_E_FAILURE; 129 130 (nbuf_frag_info_t->virt_addr).vaddr = 131 qdf_frag_alloc(NULL, rx_desc_pool->buf_size); 132 133 if (!((nbuf_frag_info_t->virt_addr).vaddr)) { 134 dp_err("Frag alloc failed"); 135 DP_STATS_INC(dp_pdev, replenish.frag_alloc_fail, 1); 136 return QDF_STATUS_E_NOMEM; 137 } 138 139 ret = qdf_mem_map_page(dp_soc->osdev, 140 (nbuf_frag_info_t->virt_addr).vaddr, 141 QDF_DMA_FROM_DEVICE, 142 rx_desc_pool->buf_size, 143 &nbuf_frag_info_t->paddr); 144 145 if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) { 146 qdf_frag_free((nbuf_frag_info_t->virt_addr).vaddr); 147 dp_err("Frag map failed"); 148 DP_STATS_INC(dp_pdev, replenish.map_err, 1); 149 return QDF_STATUS_E_FAULT; 150 } 151 152 return QDF_STATUS_SUCCESS; 153 } 154 #else 155 static inline QDF_STATUS 156 dp_pdev_frag_alloc_and_map(struct dp_soc *dp_soc, 157 struct dp_rx_nbuf_frag_info *nbuf_frag_info_t, 158 struct dp_pdev *dp_pdev, 159 struct rx_desc_pool *rx_desc_pool) 160 { 161 return QDF_STATUS_SUCCESS; 162 } 163 #endif /* DP_RX_MON_MEM_FRAG */ 164 165 #ifdef WLAN_FEATURE_DP_RX_RING_HISTORY 166 /** 167 * dp_rx_refill_ring_record_entry() - Record an entry into refill_ring history 168 * @soc: Datapath soc structure 169 * @ring_num: Refill ring number 170 * @num_req: number of buffers requested for refill 171 * @num_refill: number of buffers refilled 172 * 173 * Returns: None 174 */ 175 static inline void 176 dp_rx_refill_ring_record_entry(struct dp_soc *soc, uint8_t ring_num, 177 hal_ring_handle_t hal_ring_hdl, 178 uint32_t num_req, uint32_t num_refill) 179 { 180 struct dp_refill_info_record *record; 181 uint32_t idx; 182 uint32_t tp; 183 uint32_t hp; 184 185 if (qdf_unlikely(ring_num >= MAX_PDEV_CNT || 186 !soc->rx_refill_ring_history[ring_num])) 187 return; 188 189 idx = dp_history_get_next_index(&soc->rx_refill_ring_history[ring_num]->index, 190 DP_RX_REFILL_HIST_MAX); 191 192 /* No NULL check needed for record since its an array */ 193 record = &soc->rx_refill_ring_history[ring_num]->entry[idx]; 194 195 hal_get_sw_hptp(soc->hal_soc, hal_ring_hdl, &tp, &hp); 196 record->timestamp = qdf_get_log_timestamp(); 197 record->num_req = num_req; 198 record->num_refill = num_refill; 199 record->hp = hp; 200 record->tp = tp; 201 } 202 #else 203 static inline void 204 dp_rx_refill_ring_record_entry(struct dp_soc *soc, uint8_t ring_num, 205 hal_ring_handle_t hal_ring_hdl, 206 uint32_t num_req, uint32_t num_refill) 207 { 208 } 209 #endif 210 211 /** 212 * dp_pdev_nbuf_alloc_and_map() - Allocate nbuf for desc buffer and map 213 * 214 * @dp_soc: struct dp_soc * 215 * @mac_id: Mac id 216 * @num_entries_avail: num_entries_avail 217 * @nbuf_frag_info_t: nbuf frag info 218 * @dp_pdev: struct dp_pdev * 219 * @rx_desc_pool: Rx desc pool 220 * 221 * Return: QDF_STATUS 222 */ 223 static inline QDF_STATUS 224 dp_pdev_nbuf_alloc_and_map_replenish(struct dp_soc *dp_soc, 225 uint32_t mac_id, 226 uint32_t num_entries_avail, 227 struct dp_rx_nbuf_frag_info *nbuf_frag_info_t, 228 struct dp_pdev *dp_pdev, 229 struct rx_desc_pool *rx_desc_pool) 230 { 231 QDF_STATUS ret = QDF_STATUS_E_FAILURE; 232 233 (nbuf_frag_info_t->virt_addr).nbuf = 234 dp_rx_buffer_pool_nbuf_alloc(dp_soc, 235 mac_id, 236 rx_desc_pool, 237 num_entries_avail); 238 if (!((nbuf_frag_info_t->virt_addr).nbuf)) { 239 dp_err("nbuf alloc failed"); 240 DP_STATS_INC(dp_pdev, replenish.nbuf_alloc_fail, 1); 241 return QDF_STATUS_E_NOMEM; 242 } 243 244 ret = dp_rx_buffer_pool_nbuf_map(dp_soc, rx_desc_pool, 245 nbuf_frag_info_t); 246 if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) { 247 dp_rx_buffer_pool_nbuf_free(dp_soc, 248 (nbuf_frag_info_t->virt_addr).nbuf, mac_id); 249 dp_err("nbuf map failed"); 250 DP_STATS_INC(dp_pdev, replenish.map_err, 1); 251 return QDF_STATUS_E_FAULT; 252 } 253 254 nbuf_frag_info_t->paddr = 255 qdf_nbuf_get_frag_paddr((nbuf_frag_info_t->virt_addr).nbuf, 0); 256 dp_ipa_handle_rx_buf_smmu_mapping(dp_soc, (qdf_nbuf_t)( 257 (nbuf_frag_info_t->virt_addr).nbuf), 258 rx_desc_pool->buf_size, 259 true, __func__, __LINE__); 260 261 ret = dp_check_paddr(dp_soc, &((nbuf_frag_info_t->virt_addr).nbuf), 262 &nbuf_frag_info_t->paddr, 263 rx_desc_pool); 264 if (ret == QDF_STATUS_E_FAILURE) { 265 DP_STATS_INC(dp_pdev, replenish.x86_fail, 1); 266 return QDF_STATUS_E_ADDRNOTAVAIL; 267 } 268 269 return QDF_STATUS_SUCCESS; 270 } 271 272 #if defined(QCA_DP_RX_NBUF_NO_MAP_UNMAP) && !defined(BUILD_X86) 273 QDF_STATUS 274 __dp_rx_buffers_no_map_lt_replenish(struct dp_soc *soc, uint32_t mac_id, 275 struct dp_srng *dp_rxdma_srng, 276 struct rx_desc_pool *rx_desc_pool) 277 { 278 struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id); 279 uint32_t count; 280 void *rxdma_ring_entry; 281 union dp_rx_desc_list_elem_t *next = NULL; 282 void *rxdma_srng; 283 qdf_nbuf_t nbuf; 284 qdf_dma_addr_t paddr; 285 uint16_t num_entries_avail = 0; 286 uint16_t num_alloc_desc = 0; 287 union dp_rx_desc_list_elem_t *desc_list = NULL; 288 union dp_rx_desc_list_elem_t *tail = NULL; 289 int sync_hw_ptr = 0; 290 291 rxdma_srng = dp_rxdma_srng->hal_srng; 292 293 if (qdf_unlikely(!dp_pdev)) { 294 dp_rx_err("%pK: pdev is null for mac_id = %d", soc, mac_id); 295 return QDF_STATUS_E_FAILURE; 296 } 297 298 if (qdf_unlikely(!rxdma_srng)) { 299 dp_rx_debug("%pK: rxdma srng not initialized", soc); 300 return QDF_STATUS_E_FAILURE; 301 } 302 303 hal_srng_access_start(soc->hal_soc, rxdma_srng); 304 305 num_entries_avail = hal_srng_src_num_avail(soc->hal_soc, 306 rxdma_srng, 307 sync_hw_ptr); 308 309 dp_rx_debug("%pK: no of available entries in rxdma ring: %d", 310 soc, num_entries_avail); 311 312 if (qdf_unlikely(num_entries_avail < 313 ((dp_rxdma_srng->num_entries * 3) / 4))) { 314 hal_srng_access_end(soc->hal_soc, rxdma_srng); 315 return QDF_STATUS_E_FAILURE; 316 } 317 318 DP_STATS_INC(dp_pdev, replenish.low_thresh_intrs, 1); 319 num_alloc_desc = dp_rx_get_free_desc_list(soc, mac_id, 320 rx_desc_pool, 321 num_entries_avail, 322 &desc_list, 323 &tail); 324 325 if (!num_alloc_desc) { 326 dp_rx_err("%pK: no free rx_descs in freelist", soc); 327 DP_STATS_INC(dp_pdev, err.desc_lt_alloc_fail, 328 num_entries_avail); 329 hal_srng_access_end(soc->hal_soc, rxdma_srng); 330 return QDF_STATUS_E_NOMEM; 331 } 332 333 for (count = 0; count < num_alloc_desc; count++) { 334 next = desc_list->next; 335 qdf_prefetch(next); 336 nbuf = dp_rx_nbuf_alloc(soc, rx_desc_pool); 337 if (qdf_unlikely(!nbuf)) { 338 DP_STATS_INC(dp_pdev, replenish.nbuf_alloc_fail, 1); 339 break; 340 } 341 342 paddr = dp_rx_nbuf_sync_no_dsb(soc, nbuf, 343 rx_desc_pool->buf_size); 344 345 rxdma_ring_entry = hal_srng_src_get_next(soc->hal_soc, 346 rxdma_srng); 347 qdf_assert_always(rxdma_ring_entry); 348 349 desc_list->rx_desc.nbuf = nbuf; 350 desc_list->rx_desc.rx_buf_start = nbuf->data; 351 desc_list->rx_desc.unmapped = 0; 352 353 /* rx_desc.in_use should be zero at this time*/ 354 qdf_assert_always(desc_list->rx_desc.in_use == 0); 355 356 desc_list->rx_desc.in_use = 1; 357 desc_list->rx_desc.in_err_state = 0; 358 359 hal_rxdma_buff_addr_info_set(soc->hal_soc, rxdma_ring_entry, 360 paddr, 361 desc_list->rx_desc.cookie, 362 rx_desc_pool->owner); 363 364 desc_list = next; 365 } 366 qdf_dsb(); 367 hal_srng_access_end(soc->hal_soc, rxdma_srng); 368 369 /* No need to count the number of bytes received during replenish. 370 * Therefore set replenish.pkts.bytes as 0. 371 */ 372 DP_STATS_INC_PKT(dp_pdev, replenish.pkts, count, 0); 373 DP_STATS_INC(dp_pdev, buf_freelist, (num_alloc_desc - count)); 374 /* 375 * add any available free desc back to the free list 376 */ 377 if (desc_list) 378 dp_rx_add_desc_list_to_free_list(soc, &desc_list, &tail, 379 mac_id, rx_desc_pool); 380 381 return QDF_STATUS_SUCCESS; 382 } 383 384 QDF_STATUS 385 __dp_rx_buffers_no_map_replenish(struct dp_soc *soc, uint32_t mac_id, 386 struct dp_srng *dp_rxdma_srng, 387 struct rx_desc_pool *rx_desc_pool, 388 uint32_t num_req_buffers, 389 union dp_rx_desc_list_elem_t **desc_list, 390 union dp_rx_desc_list_elem_t **tail) 391 { 392 struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id); 393 uint32_t count; 394 void *rxdma_ring_entry; 395 union dp_rx_desc_list_elem_t *next; 396 void *rxdma_srng; 397 qdf_nbuf_t nbuf; 398 qdf_nbuf_t nbuf_next; 399 qdf_nbuf_t nbuf_head = NULL; 400 qdf_nbuf_t nbuf_tail = NULL; 401 qdf_dma_addr_t paddr; 402 403 rxdma_srng = dp_rxdma_srng->hal_srng; 404 405 if (qdf_unlikely(!dp_pdev)) { 406 dp_rx_err("%pK: pdev is null for mac_id = %d", 407 soc, mac_id); 408 return QDF_STATUS_E_FAILURE; 409 } 410 411 if (qdf_unlikely(!rxdma_srng)) { 412 dp_rx_debug("%pK: rxdma srng not initialized", soc); 413 DP_STATS_INC(dp_pdev, replenish.rxdma_err, num_req_buffers); 414 return QDF_STATUS_E_FAILURE; 415 } 416 417 /* Allocate required number of nbufs */ 418 for (count = 0; count < num_req_buffers; count++) { 419 nbuf = dp_rx_nbuf_alloc(soc, rx_desc_pool); 420 if (qdf_unlikely(!nbuf)) { 421 DP_STATS_INC(dp_pdev, replenish.nbuf_alloc_fail, 1); 422 /* Update num_req_buffers to nbufs allocated count */ 423 num_req_buffers = count; 424 break; 425 } 426 427 paddr = dp_rx_nbuf_sync_no_dsb(soc, nbuf, 428 rx_desc_pool->buf_size); 429 430 QDF_NBUF_CB_PADDR(nbuf) = paddr; 431 DP_RX_LIST_APPEND(nbuf_head, 432 nbuf_tail, 433 nbuf); 434 } 435 qdf_dsb(); 436 437 nbuf = nbuf_head; 438 hal_srng_access_start(soc->hal_soc, rxdma_srng); 439 440 for (count = 0; count < num_req_buffers; count++) { 441 next = (*desc_list)->next; 442 nbuf_next = nbuf->next; 443 qdf_prefetch(next); 444 445 rxdma_ring_entry = (struct dp_buffer_addr_info *) 446 hal_srng_src_get_next(soc->hal_soc, rxdma_srng); 447 448 if (!rxdma_ring_entry) 449 break; 450 451 (*desc_list)->rx_desc.nbuf = nbuf; 452 (*desc_list)->rx_desc.rx_buf_start = nbuf->data; 453 (*desc_list)->rx_desc.unmapped = 0; 454 455 /* rx_desc.in_use should be zero at this time*/ 456 qdf_assert_always((*desc_list)->rx_desc.in_use == 0); 457 458 (*desc_list)->rx_desc.in_use = 1; 459 (*desc_list)->rx_desc.in_err_state = 0; 460 461 hal_rxdma_buff_addr_info_set(soc->hal_soc, rxdma_ring_entry, 462 QDF_NBUF_CB_PADDR(nbuf), 463 (*desc_list)->rx_desc.cookie, 464 rx_desc_pool->owner); 465 466 *desc_list = next; 467 nbuf = nbuf_next; 468 } 469 hal_srng_access_end(soc->hal_soc, rxdma_srng); 470 471 /* No need to count the number of bytes received during replenish. 472 * Therefore set replenish.pkts.bytes as 0. 473 */ 474 DP_STATS_INC_PKT(dp_pdev, replenish.pkts, count, 0); 475 DP_STATS_INC(dp_pdev, buf_freelist, (num_req_buffers - count)); 476 /* 477 * add any available free desc back to the free list 478 */ 479 if (*desc_list) 480 dp_rx_add_desc_list_to_free_list(soc, desc_list, tail, 481 mac_id, rx_desc_pool); 482 while (nbuf) { 483 nbuf_next = nbuf->next; 484 dp_rx_nbuf_unmap_pool(soc, rx_desc_pool, nbuf); 485 qdf_nbuf_free(nbuf); 486 nbuf = nbuf_next; 487 } 488 489 return QDF_STATUS_SUCCESS; 490 } 491 492 QDF_STATUS __dp_pdev_rx_buffers_no_map_attach(struct dp_soc *soc, 493 uint32_t mac_id, 494 struct dp_srng *dp_rxdma_srng, 495 struct rx_desc_pool *rx_desc_pool, 496 uint32_t num_req_buffers) 497 { 498 struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id); 499 uint32_t count; 500 uint32_t nr_descs = 0; 501 void *rxdma_ring_entry; 502 union dp_rx_desc_list_elem_t *next; 503 void *rxdma_srng; 504 qdf_nbuf_t nbuf; 505 qdf_dma_addr_t paddr; 506 union dp_rx_desc_list_elem_t *desc_list = NULL; 507 union dp_rx_desc_list_elem_t *tail = NULL; 508 509 rxdma_srng = dp_rxdma_srng->hal_srng; 510 511 if (qdf_unlikely(!dp_pdev)) { 512 dp_rx_err("%pK: pdev is null for mac_id = %d", 513 soc, mac_id); 514 return QDF_STATUS_E_FAILURE; 515 } 516 517 if (qdf_unlikely(!rxdma_srng)) { 518 dp_rx_debug("%pK: rxdma srng not initialized", soc); 519 DP_STATS_INC(dp_pdev, replenish.rxdma_err, num_req_buffers); 520 return QDF_STATUS_E_FAILURE; 521 } 522 523 dp_rx_debug("%pK: requested %d buffers for replenish", 524 soc, num_req_buffers); 525 526 nr_descs = dp_rx_get_free_desc_list(soc, mac_id, rx_desc_pool, 527 num_req_buffers, &desc_list, &tail); 528 if (!nr_descs) { 529 dp_err("no free rx_descs in freelist"); 530 DP_STATS_INC(dp_pdev, err.desc_alloc_fail, num_req_buffers); 531 return QDF_STATUS_E_NOMEM; 532 } 533 534 dp_debug("got %u RX descs for driver attach", nr_descs); 535 536 hal_srng_access_start(soc->hal_soc, rxdma_srng); 537 538 for (count = 0; count < nr_descs; count++) { 539 next = desc_list->next; 540 qdf_prefetch(next); 541 nbuf = dp_rx_nbuf_alloc(soc, rx_desc_pool); 542 if (qdf_unlikely(!nbuf)) { 543 DP_STATS_INC(dp_pdev, replenish.nbuf_alloc_fail, 1); 544 break; 545 } 546 547 paddr = dp_rx_nbuf_sync_no_dsb(soc, nbuf, 548 rx_desc_pool->buf_size); 549 rxdma_ring_entry = (struct dp_buffer_addr_info *) 550 hal_srng_src_get_next(soc->hal_soc, rxdma_srng); 551 if (!rxdma_ring_entry) 552 break; 553 554 qdf_assert_always(rxdma_ring_entry); 555 556 desc_list->rx_desc.nbuf = nbuf; 557 desc_list->rx_desc.rx_buf_start = nbuf->data; 558 desc_list->rx_desc.unmapped = 0; 559 560 /* rx_desc.in_use should be zero at this time*/ 561 qdf_assert_always(desc_list->rx_desc.in_use == 0); 562 563 desc_list->rx_desc.in_use = 1; 564 desc_list->rx_desc.in_err_state = 0; 565 566 hal_rxdma_buff_addr_info_set(soc->hal_soc, rxdma_ring_entry, 567 paddr, 568 desc_list->rx_desc.cookie, 569 rx_desc_pool->owner); 570 571 desc_list = next; 572 } 573 qdf_dsb(); 574 hal_srng_access_end(soc->hal_soc, rxdma_srng); 575 576 /* No need to count the number of bytes received during replenish. 577 * Therefore set replenish.pkts.bytes as 0. 578 */ 579 DP_STATS_INC_PKT(dp_pdev, replenish.pkts, count, 0); 580 581 return QDF_STATUS_SUCCESS; 582 } 583 #endif 584 585 #ifdef DP_UMAC_HW_RESET_SUPPORT 586 #if defined(QCA_DP_RX_NBUF_NO_MAP_UNMAP) && !defined(BUILD_X86) 587 static inline 588 qdf_dma_addr_t dp_rx_rep_retrieve_paddr(struct dp_soc *dp_soc, qdf_nbuf_t nbuf, 589 uint32_t buf_size) 590 { 591 return dp_rx_nbuf_sync_no_dsb(dp_soc, nbuf, buf_size); 592 } 593 #else 594 static inline 595 qdf_dma_addr_t dp_rx_rep_retrieve_paddr(struct dp_soc *dp_soc, qdf_nbuf_t nbuf, 596 uint32_t buf_size) 597 { 598 return qdf_nbuf_get_frag_paddr(nbuf, 0); 599 } 600 #endif 601 602 /* 603 * dp_rx_desc_replenish() - Replenish the rx descriptors one at a time 604 * 605 * @soc: core txrx main context 606 * @dp_rxdma_srng: rxdma ring 607 * @rx_desc_pool: rx descriptor pool 608 * @rx_desc:rx descriptor 609 * 610 * Return: void 611 */ 612 static inline 613 void dp_rx_desc_replenish(struct dp_soc *soc, struct dp_srng *dp_rxdma_srng, 614 struct rx_desc_pool *rx_desc_pool, 615 struct dp_rx_desc *rx_desc) 616 { 617 void *rxdma_srng; 618 void *rxdma_ring_entry; 619 qdf_dma_addr_t paddr; 620 621 rxdma_srng = dp_rxdma_srng->hal_srng; 622 623 /* No one else should be accessing the srng at this point */ 624 hal_srng_access_start_unlocked(soc->hal_soc, rxdma_srng); 625 626 rxdma_ring_entry = hal_srng_src_get_next(soc->hal_soc, rxdma_srng); 627 628 qdf_assert_always(rxdma_ring_entry); 629 rx_desc->in_err_state = 0; 630 631 paddr = dp_rx_rep_retrieve_paddr(soc, rx_desc->nbuf, 632 rx_desc_pool->buf_size); 633 hal_rxdma_buff_addr_info_set(soc->hal_soc, rxdma_ring_entry, paddr, 634 rx_desc->cookie, rx_desc_pool->owner); 635 636 hal_srng_access_end_unlocked(soc->hal_soc, rxdma_srng); 637 } 638 639 /* 640 * dp_rx_desc_reuse() - Reuse the rx descriptors to fill the rx buf ring 641 * 642 * @soc: core txrx main context 643 * @nbuf_list: nbuf list for delayed free 644 * 645 * Return: void 646 */ 647 void dp_rx_desc_reuse(struct dp_soc *soc, qdf_nbuf_t *nbuf_list) 648 { 649 int mac_id, i, j; 650 union dp_rx_desc_list_elem_t *head = NULL; 651 union dp_rx_desc_list_elem_t *tail = NULL; 652 653 for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) { 654 struct dp_srng *dp_rxdma_srng = 655 &soc->rx_refill_buf_ring[mac_id]; 656 struct rx_desc_pool *rx_desc_pool = &soc->rx_desc_buf[mac_id]; 657 uint32_t rx_sw_desc_num = rx_desc_pool->pool_size; 658 /* Only fill up 1/3 of the ring size */ 659 uint32_t num_req_decs; 660 661 if (!dp_rxdma_srng || !dp_rxdma_srng->hal_srng || 662 !rx_desc_pool->array) 663 continue; 664 665 num_req_decs = dp_rxdma_srng->num_entries / 3; 666 667 for (i = 0, j = 0; i < rx_sw_desc_num; i++) { 668 struct dp_rx_desc *rx_desc = 669 (struct dp_rx_desc *)&rx_desc_pool->array[i]; 670 671 if (rx_desc->in_use) { 672 if (j < dp_rxdma_srng->num_entries) { 673 dp_rx_desc_replenish(soc, dp_rxdma_srng, 674 rx_desc_pool, 675 rx_desc); 676 } else { 677 dp_rx_nbuf_unmap(soc, rx_desc, 0); 678 rx_desc->unmapped = 0; 679 680 rx_desc->nbuf->next = *nbuf_list; 681 *nbuf_list = rx_desc->nbuf; 682 683 dp_rx_add_to_free_desc_list(&head, 684 &tail, 685 rx_desc); 686 } 687 j++; 688 } 689 } 690 691 if (head) 692 dp_rx_add_desc_list_to_free_list(soc, &head, &tail, 693 mac_id, rx_desc_pool); 694 695 /* If num of descs in use were less, then we need to replenish 696 * the ring with some buffers 697 */ 698 head = NULL; 699 tail = NULL; 700 701 if (j < (num_req_decs - 1)) 702 dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng, 703 rx_desc_pool, 704 ((num_req_decs - 1) - j), 705 &head, &tail, true); 706 } 707 } 708 #endif 709 710 /* 711 * dp_rx_buffers_replenish() - replenish rxdma ring with rx nbufs 712 * called during dp rx initialization 713 * and at the end of dp_rx_process. 714 * 715 * @soc: core txrx main context 716 * @mac_id: mac_id which is one of 3 mac_ids 717 * @dp_rxdma_srng: dp rxdma circular ring 718 * @rx_desc_pool: Pointer to free Rx descriptor pool 719 * @num_req_buffers: number of buffer to be replenished 720 * @desc_list: list of descs if called from dp_rx_process 721 * or NULL during dp rx initialization or out of buffer 722 * interrupt. 723 * @tail: tail of descs list 724 * @req_only: If true don't replenish more than req buffers 725 * @func_name: name of the caller function 726 * Return: return success or failure 727 */ 728 QDF_STATUS __dp_rx_buffers_replenish(struct dp_soc *dp_soc, uint32_t mac_id, 729 struct dp_srng *dp_rxdma_srng, 730 struct rx_desc_pool *rx_desc_pool, 731 uint32_t num_req_buffers, 732 union dp_rx_desc_list_elem_t **desc_list, 733 union dp_rx_desc_list_elem_t **tail, 734 bool req_only, const char *func_name) 735 { 736 uint32_t num_alloc_desc; 737 uint16_t num_desc_to_free = 0; 738 struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(dp_soc, mac_id); 739 uint32_t num_entries_avail; 740 uint32_t count; 741 uint32_t extra_buffers; 742 int sync_hw_ptr = 1; 743 struct dp_rx_nbuf_frag_info nbuf_frag_info = {0}; 744 void *rxdma_ring_entry; 745 union dp_rx_desc_list_elem_t *next; 746 QDF_STATUS ret; 747 void *rxdma_srng; 748 union dp_rx_desc_list_elem_t *desc_list_append = NULL; 749 union dp_rx_desc_list_elem_t *tail_append = NULL; 750 union dp_rx_desc_list_elem_t *temp_list = NULL; 751 752 rxdma_srng = dp_rxdma_srng->hal_srng; 753 754 if (qdf_unlikely(!dp_pdev)) { 755 dp_rx_err("%pK: pdev is null for mac_id = %d", 756 dp_soc, mac_id); 757 return QDF_STATUS_E_FAILURE; 758 } 759 760 if (qdf_unlikely(!rxdma_srng)) { 761 dp_rx_debug("%pK: rxdma srng not initialized", dp_soc); 762 DP_STATS_INC(dp_pdev, replenish.rxdma_err, num_req_buffers); 763 return QDF_STATUS_E_FAILURE; 764 } 765 766 dp_verbose_debug("%pK: requested %d buffers for replenish", 767 dp_soc, num_req_buffers); 768 769 hal_srng_access_start(dp_soc->hal_soc, rxdma_srng); 770 771 num_entries_avail = hal_srng_src_num_avail(dp_soc->hal_soc, 772 rxdma_srng, 773 sync_hw_ptr); 774 775 dp_verbose_debug("%pK: no of available entries in rxdma ring: %d", 776 dp_soc, num_entries_avail); 777 778 if (!req_only && !(*desc_list) && (num_entries_avail > 779 ((dp_rxdma_srng->num_entries * 3) / 4))) { 780 num_req_buffers = num_entries_avail; 781 DP_STATS_INC(dp_pdev, replenish.low_thresh_intrs, 1); 782 } else if (num_entries_avail < num_req_buffers) { 783 num_desc_to_free = num_req_buffers - num_entries_avail; 784 num_req_buffers = num_entries_avail; 785 } else if ((*desc_list) && 786 dp_rxdma_srng->num_entries - num_entries_avail < 787 CRITICAL_BUFFER_THRESHOLD) { 788 /* set extra buffers to CRITICAL_BUFFER_THRESHOLD only if 789 * total buff requested after adding extra buffers is less 790 * than or equal to num entries available, else set it to max 791 * possible additional buffers available at that moment 792 */ 793 extra_buffers = 794 ((num_req_buffers + CRITICAL_BUFFER_THRESHOLD) > num_entries_avail) ? 795 (num_entries_avail - num_req_buffers) : 796 CRITICAL_BUFFER_THRESHOLD; 797 /* Append some free descriptors to tail */ 798 num_alloc_desc = 799 dp_rx_get_free_desc_list(dp_soc, mac_id, 800 rx_desc_pool, 801 extra_buffers, 802 &desc_list_append, 803 &tail_append); 804 805 if (num_alloc_desc) { 806 temp_list = *desc_list; 807 *desc_list = desc_list_append; 808 tail_append->next = temp_list; 809 num_req_buffers += num_alloc_desc; 810 811 DP_STATS_DEC(dp_pdev, 812 replenish.free_list, 813 num_alloc_desc); 814 } else 815 dp_err_rl("%pK: no free rx_descs in freelist", dp_soc); 816 } 817 818 if (qdf_unlikely(!num_req_buffers)) { 819 num_desc_to_free = num_req_buffers; 820 hal_srng_access_end(dp_soc->hal_soc, rxdma_srng); 821 goto free_descs; 822 } 823 824 /* 825 * if desc_list is NULL, allocate the descs from freelist 826 */ 827 if (!(*desc_list)) { 828 num_alloc_desc = dp_rx_get_free_desc_list(dp_soc, mac_id, 829 rx_desc_pool, 830 num_req_buffers, 831 desc_list, 832 tail); 833 834 if (!num_alloc_desc) { 835 dp_rx_err("%pK: no free rx_descs in freelist", dp_soc); 836 DP_STATS_INC(dp_pdev, err.desc_alloc_fail, 837 num_req_buffers); 838 hal_srng_access_end(dp_soc->hal_soc, rxdma_srng); 839 return QDF_STATUS_E_NOMEM; 840 } 841 842 dp_verbose_debug("%pK: %d rx desc allocated", dp_soc, 843 num_alloc_desc); 844 num_req_buffers = num_alloc_desc; 845 } 846 847 848 count = 0; 849 850 while (count < num_req_buffers) { 851 /* Flag is set while pdev rx_desc_pool initialization */ 852 if (qdf_unlikely(rx_desc_pool->rx_mon_dest_frag_enable)) 853 ret = dp_pdev_frag_alloc_and_map(dp_soc, 854 &nbuf_frag_info, 855 dp_pdev, 856 rx_desc_pool); 857 else 858 ret = dp_pdev_nbuf_alloc_and_map_replenish(dp_soc, 859 mac_id, 860 num_entries_avail, &nbuf_frag_info, 861 dp_pdev, rx_desc_pool); 862 863 if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) { 864 if (qdf_unlikely(ret == QDF_STATUS_E_FAULT)) 865 continue; 866 break; 867 } 868 869 count++; 870 871 rxdma_ring_entry = hal_srng_src_get_next(dp_soc->hal_soc, 872 rxdma_srng); 873 qdf_assert_always(rxdma_ring_entry); 874 875 next = (*desc_list)->next; 876 877 /* Flag is set while pdev rx_desc_pool initialization */ 878 if (qdf_unlikely(rx_desc_pool->rx_mon_dest_frag_enable)) 879 dp_rx_desc_frag_prep(&((*desc_list)->rx_desc), 880 &nbuf_frag_info); 881 else 882 dp_rx_desc_prep(&((*desc_list)->rx_desc), 883 &nbuf_frag_info); 884 885 /* rx_desc.in_use should be zero at this time*/ 886 qdf_assert_always((*desc_list)->rx_desc.in_use == 0); 887 888 (*desc_list)->rx_desc.in_use = 1; 889 (*desc_list)->rx_desc.in_err_state = 0; 890 dp_rx_desc_update_dbg_info(&(*desc_list)->rx_desc, 891 func_name, RX_DESC_REPLENISHED); 892 dp_verbose_debug("rx_netbuf=%pK, paddr=0x%llx, cookie=%d", 893 nbuf_frag_info.virt_addr.nbuf, 894 (unsigned long long)(nbuf_frag_info.paddr), 895 (*desc_list)->rx_desc.cookie); 896 897 hal_rxdma_buff_addr_info_set(dp_soc->hal_soc, rxdma_ring_entry, 898 nbuf_frag_info.paddr, 899 (*desc_list)->rx_desc.cookie, 900 rx_desc_pool->owner); 901 902 *desc_list = next; 903 904 } 905 906 dp_rx_refill_ring_record_entry(dp_soc, dp_pdev->lmac_id, rxdma_srng, 907 num_req_buffers, count); 908 909 hal_srng_access_end(dp_soc->hal_soc, rxdma_srng); 910 911 dp_rx_schedule_refill_thread(dp_soc); 912 913 dp_verbose_debug("replenished buffers %d, rx desc added back to free list %u", 914 count, num_desc_to_free); 915 916 /* No need to count the number of bytes received during replenish. 917 * Therefore set replenish.pkts.bytes as 0. 918 */ 919 DP_STATS_INC_PKT(dp_pdev, replenish.pkts, count, 0); 920 DP_STATS_INC(dp_pdev, replenish.free_list, num_req_buffers - count); 921 922 free_descs: 923 DP_STATS_INC(dp_pdev, buf_freelist, num_desc_to_free); 924 /* 925 * add any available free desc back to the free list 926 */ 927 if (*desc_list) 928 dp_rx_add_desc_list_to_free_list(dp_soc, desc_list, tail, 929 mac_id, rx_desc_pool); 930 931 return QDF_STATUS_SUCCESS; 932 } 933 934 qdf_export_symbol(__dp_rx_buffers_replenish); 935 936 /* 937 * dp_rx_deliver_raw() - process RAW mode pkts and hand over the 938 * pkts to RAW mode simulation to 939 * decapsulate the pkt. 940 * 941 * @vdev: vdev on which RAW mode is enabled 942 * @nbuf_list: list of RAW pkts to process 943 * @txrx_peer: peer object from which the pkt is rx 944 * 945 * Return: void 946 */ 947 void 948 dp_rx_deliver_raw(struct dp_vdev *vdev, qdf_nbuf_t nbuf_list, 949 struct dp_txrx_peer *txrx_peer) 950 { 951 qdf_nbuf_t deliver_list_head = NULL; 952 qdf_nbuf_t deliver_list_tail = NULL; 953 qdf_nbuf_t nbuf; 954 955 nbuf = nbuf_list; 956 while (nbuf) { 957 qdf_nbuf_t next = qdf_nbuf_next(nbuf); 958 959 DP_RX_LIST_APPEND(deliver_list_head, deliver_list_tail, nbuf); 960 961 DP_STATS_INC(vdev->pdev, rx_raw_pkts, 1); 962 DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, rx.raw, 1, 963 qdf_nbuf_len(nbuf)); 964 /* 965 * reset the chfrag_start and chfrag_end bits in nbuf cb 966 * as this is a non-amsdu pkt and RAW mode simulation expects 967 * these bit s to be 0 for non-amsdu pkt. 968 */ 969 if (qdf_nbuf_is_rx_chfrag_start(nbuf) && 970 qdf_nbuf_is_rx_chfrag_end(nbuf)) { 971 qdf_nbuf_set_rx_chfrag_start(nbuf, 0); 972 qdf_nbuf_set_rx_chfrag_end(nbuf, 0); 973 } 974 975 nbuf = next; 976 } 977 978 vdev->osif_rsim_rx_decap(vdev->osif_vdev, &deliver_list_head, 979 &deliver_list_tail); 980 981 vdev->osif_rx(vdev->osif_vdev, deliver_list_head); 982 } 983 984 #ifndef QCA_HOST_MODE_WIFI_DISABLED 985 #ifndef FEATURE_WDS 986 void dp_rx_da_learn(struct dp_soc *soc, uint8_t *rx_tlv_hdr, 987 struct dp_txrx_peer *ta_peer, qdf_nbuf_t nbuf) 988 { 989 } 990 #endif 991 992 #ifdef QCA_SUPPORT_TX_MIN_RATES_FOR_SPECIAL_FRAMES 993 /* 994 * dp_classify_critical_pkts() - API for marking critical packets 995 * @soc: dp_soc context 996 * @vdev: vdev on which packet is to be sent 997 * @nbuf: nbuf that has to be classified 998 * 999 * The function parses the packet, identifies whether its a critical frame and 1000 * marks QDF_NBUF_CB_TX_EXTRA_IS_CRITICAL bit in qdf_nbuf_cb for the nbuf. 1001 * Code for marking which frames are CRITICAL is accessed via callback. 1002 * EAPOL, ARP, DHCP, DHCPv6, ICMPv6 NS/NA are the typical critical frames. 1003 * 1004 * Return: None 1005 */ 1006 static 1007 void dp_classify_critical_pkts(struct dp_soc *soc, struct dp_vdev *vdev, 1008 qdf_nbuf_t nbuf) 1009 { 1010 if (vdev->tx_classify_critical_pkt_cb) 1011 vdev->tx_classify_critical_pkt_cb(vdev->osif_vdev, nbuf); 1012 } 1013 #else 1014 static inline 1015 void dp_classify_critical_pkts(struct dp_soc *soc, struct dp_vdev *vdev, 1016 qdf_nbuf_t nbuf) 1017 { 1018 } 1019 #endif 1020 1021 #ifdef QCA_OL_TX_MULTIQ_SUPPORT 1022 static inline 1023 void dp_rx_nbuf_queue_mapping_set(qdf_nbuf_t nbuf, uint8_t ring_id) 1024 { 1025 qdf_nbuf_set_queue_mapping(nbuf, ring_id); 1026 } 1027 #else 1028 static inline 1029 void dp_rx_nbuf_queue_mapping_set(qdf_nbuf_t nbuf, uint8_t ring_id) 1030 { 1031 } 1032 #endif 1033 1034 /* 1035 * dp_rx_intrabss_mcbc_fwd() - Does intrabss forward for mcast packets 1036 * 1037 * @soc: core txrx main context 1038 * @ta_peer : source peer entry 1039 * @rx_tlv_hdr : start address of rx tlvs 1040 * @nbuf : nbuf that has to be intrabss forwarded 1041 * @tid_stats : tid stats pointer 1042 * 1043 * Return: bool: true if it is forwarded else false 1044 */ 1045 bool dp_rx_intrabss_mcbc_fwd(struct dp_soc *soc, struct dp_txrx_peer *ta_peer, 1046 uint8_t *rx_tlv_hdr, qdf_nbuf_t nbuf, 1047 struct cdp_tid_rx_stats *tid_stats) 1048 { 1049 uint16_t len; 1050 qdf_nbuf_t nbuf_copy; 1051 1052 if (dp_rx_intrabss_eapol_drop_check(soc, ta_peer, rx_tlv_hdr, 1053 nbuf)) 1054 return true; 1055 1056 if (!dp_rx_check_ndi_mdns_fwding(ta_peer, nbuf)) 1057 return false; 1058 1059 /* If the source peer in the isolation list 1060 * then dont forward instead push to bridge stack 1061 */ 1062 if (dp_get_peer_isolation(ta_peer)) 1063 return false; 1064 1065 nbuf_copy = qdf_nbuf_copy(nbuf); 1066 if (!nbuf_copy) 1067 return false; 1068 1069 len = QDF_NBUF_CB_RX_PKT_LEN(nbuf); 1070 1071 qdf_mem_set(nbuf_copy->cb, 0x0, sizeof(nbuf_copy->cb)); 1072 dp_classify_critical_pkts(soc, ta_peer->vdev, nbuf_copy); 1073 1074 if (soc->arch_ops.dp_rx_intrabss_mcast_handler(soc, ta_peer, 1075 nbuf_copy, 1076 tid_stats)) 1077 return false; 1078 1079 /* Don't send packets if tx is paused */ 1080 if (!soc->is_tx_pause && 1081 !dp_tx_send((struct cdp_soc_t *)soc, 1082 ta_peer->vdev->vdev_id, nbuf_copy)) { 1083 DP_PEER_PER_PKT_STATS_INC_PKT(ta_peer, rx.intra_bss.pkts, 1, 1084 len); 1085 tid_stats->intrabss_cnt++; 1086 } else { 1087 DP_PEER_PER_PKT_STATS_INC_PKT(ta_peer, rx.intra_bss.fail, 1, 1088 len); 1089 tid_stats->fail_cnt[INTRABSS_DROP]++; 1090 dp_rx_nbuf_free(nbuf_copy); 1091 } 1092 return false; 1093 } 1094 1095 /* 1096 * dp_rx_intrabss_ucast_fwd() - Does intrabss forward for unicast packets 1097 * 1098 * @soc: core txrx main context 1099 * @ta_peer: source peer entry 1100 * @tx_vdev_id: VDEV ID for Intra-BSS TX 1101 * @rx_tlv_hdr: start address of rx tlvs 1102 * @nbuf: nbuf that has to be intrabss forwarded 1103 * @tid_stats: tid stats pointer 1104 * 1105 * Return: bool: true if it is forwarded else false 1106 */ 1107 bool dp_rx_intrabss_ucast_fwd(struct dp_soc *soc, struct dp_txrx_peer *ta_peer, 1108 uint8_t tx_vdev_id, 1109 uint8_t *rx_tlv_hdr, qdf_nbuf_t nbuf, 1110 struct cdp_tid_rx_stats *tid_stats) 1111 { 1112 uint16_t len; 1113 1114 len = QDF_NBUF_CB_RX_PKT_LEN(nbuf); 1115 1116 /* linearize the nbuf just before we send to 1117 * dp_tx_send() 1118 */ 1119 if (qdf_unlikely(qdf_nbuf_is_frag(nbuf))) { 1120 if (qdf_nbuf_linearize(nbuf) == -ENOMEM) 1121 return false; 1122 1123 nbuf = qdf_nbuf_unshare(nbuf); 1124 if (!nbuf) { 1125 DP_PEER_PER_PKT_STATS_INC_PKT(ta_peer, 1126 rx.intra_bss.fail, 1127 1, len); 1128 /* return true even though the pkt is 1129 * not forwarded. Basically skb_unshare 1130 * failed and we want to continue with 1131 * next nbuf. 1132 */ 1133 tid_stats->fail_cnt[INTRABSS_DROP]++; 1134 return false; 1135 } 1136 } 1137 1138 qdf_mem_set(nbuf->cb, 0x0, sizeof(nbuf->cb)); 1139 dp_classify_critical_pkts(soc, ta_peer->vdev, nbuf); 1140 1141 /* Don't send packets if tx is paused */ 1142 if (!soc->is_tx_pause && !dp_tx_send((struct cdp_soc_t *)soc, 1143 tx_vdev_id, nbuf)) { 1144 DP_PEER_PER_PKT_STATS_INC_PKT(ta_peer, rx.intra_bss.pkts, 1, 1145 len); 1146 } else { 1147 DP_PEER_PER_PKT_STATS_INC_PKT(ta_peer, rx.intra_bss.fail, 1, 1148 len); 1149 tid_stats->fail_cnt[INTRABSS_DROP]++; 1150 return false; 1151 } 1152 1153 return true; 1154 } 1155 1156 #endif /* QCA_HOST_MODE_WIFI_DISABLED */ 1157 1158 #ifdef MESH_MODE_SUPPORT 1159 1160 /** 1161 * dp_rx_fill_mesh_stats() - Fills the mesh per packet receive stats 1162 * 1163 * @vdev: DP Virtual device handle 1164 * @nbuf: Buffer pointer 1165 * @rx_tlv_hdr: start of rx tlv header 1166 * @txrx_peer: pointer to peer 1167 * 1168 * This function allocated memory for mesh receive stats and fill the 1169 * required stats. Stores the memory address in skb cb. 1170 * 1171 * Return: void 1172 */ 1173 1174 void dp_rx_fill_mesh_stats(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 1175 uint8_t *rx_tlv_hdr, 1176 struct dp_txrx_peer *txrx_peer) 1177 { 1178 struct mesh_recv_hdr_s *rx_info = NULL; 1179 uint32_t pkt_type; 1180 uint32_t nss; 1181 uint32_t rate_mcs; 1182 uint32_t bw; 1183 uint8_t primary_chan_num; 1184 uint32_t center_chan_freq; 1185 struct dp_soc *soc = vdev->pdev->soc; 1186 struct dp_peer *peer; 1187 struct dp_peer *primary_link_peer; 1188 struct dp_soc *link_peer_soc; 1189 cdp_peer_stats_param_t buf = {0}; 1190 1191 /* fill recv mesh stats */ 1192 rx_info = qdf_mem_malloc(sizeof(struct mesh_recv_hdr_s)); 1193 1194 /* upper layers are responsible to free this memory */ 1195 1196 if (!rx_info) { 1197 dp_rx_err("%pK: Memory allocation failed for mesh rx stats", 1198 vdev->pdev->soc); 1199 DP_STATS_INC(vdev->pdev, mesh_mem_alloc, 1); 1200 return; 1201 } 1202 1203 rx_info->rs_flags = MESH_RXHDR_VER1; 1204 if (qdf_nbuf_is_rx_chfrag_start(nbuf)) 1205 rx_info->rs_flags |= MESH_RX_FIRST_MSDU; 1206 1207 if (qdf_nbuf_is_rx_chfrag_end(nbuf)) 1208 rx_info->rs_flags |= MESH_RX_LAST_MSDU; 1209 1210 peer = dp_peer_get_ref_by_id(soc, txrx_peer->peer_id, DP_MOD_ID_MESH); 1211 if (peer) { 1212 if (hal_rx_tlv_get_is_decrypted(soc->hal_soc, rx_tlv_hdr)) { 1213 rx_info->rs_flags |= MESH_RX_DECRYPTED; 1214 rx_info->rs_keyix = hal_rx_msdu_get_keyid(soc->hal_soc, 1215 rx_tlv_hdr); 1216 if (vdev->osif_get_key) 1217 vdev->osif_get_key(vdev->osif_vdev, 1218 &rx_info->rs_decryptkey[0], 1219 &peer->mac_addr.raw[0], 1220 rx_info->rs_keyix); 1221 } 1222 1223 dp_peer_unref_delete(peer, DP_MOD_ID_MESH); 1224 } 1225 1226 primary_link_peer = dp_get_primary_link_peer_by_id(soc, 1227 txrx_peer->peer_id, 1228 DP_MOD_ID_MESH); 1229 1230 if (qdf_likely(primary_link_peer)) { 1231 link_peer_soc = primary_link_peer->vdev->pdev->soc; 1232 dp_monitor_peer_get_stats_param(link_peer_soc, 1233 primary_link_peer, 1234 cdp_peer_rx_snr, &buf); 1235 rx_info->rs_snr = buf.rx_snr; 1236 dp_peer_unref_delete(primary_link_peer, DP_MOD_ID_MESH); 1237 } 1238 1239 rx_info->rs_rssi = rx_info->rs_snr + DP_DEFAULT_NOISEFLOOR; 1240 1241 soc = vdev->pdev->soc; 1242 primary_chan_num = hal_rx_tlv_get_freq(soc->hal_soc, rx_tlv_hdr); 1243 center_chan_freq = hal_rx_tlv_get_freq(soc->hal_soc, rx_tlv_hdr) >> 16; 1244 1245 if (soc->cdp_soc.ol_ops && soc->cdp_soc.ol_ops->freq_to_band) { 1246 rx_info->rs_band = soc->cdp_soc.ol_ops->freq_to_band( 1247 soc->ctrl_psoc, 1248 vdev->pdev->pdev_id, 1249 center_chan_freq); 1250 } 1251 rx_info->rs_channel = primary_chan_num; 1252 pkt_type = hal_rx_tlv_get_pkt_type(soc->hal_soc, rx_tlv_hdr); 1253 rate_mcs = hal_rx_tlv_rate_mcs_get(soc->hal_soc, rx_tlv_hdr); 1254 bw = hal_rx_tlv_bw_get(soc->hal_soc, rx_tlv_hdr); 1255 nss = hal_rx_msdu_start_nss_get(soc->hal_soc, rx_tlv_hdr); 1256 rx_info->rs_ratephy1 = rate_mcs | (nss << 0x8) | (pkt_type << 16) | 1257 (bw << 24); 1258 1259 qdf_nbuf_set_rx_fctx_type(nbuf, (void *)rx_info, CB_FTYPE_MESH_RX_INFO); 1260 1261 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_MED, 1262 FL("Mesh rx stats: flags %x, rssi %x, chn %x, rate %x, kix %x, snr %x"), 1263 rx_info->rs_flags, 1264 rx_info->rs_rssi, 1265 rx_info->rs_channel, 1266 rx_info->rs_ratephy1, 1267 rx_info->rs_keyix, 1268 rx_info->rs_snr); 1269 1270 } 1271 1272 /** 1273 * dp_rx_filter_mesh_packets() - Filters mesh unwanted packets 1274 * 1275 * @vdev: DP Virtual device handle 1276 * @nbuf: Buffer pointer 1277 * @rx_tlv_hdr: start of rx tlv header 1278 * 1279 * This checks if the received packet is matching any filter out 1280 * catogery and and drop the packet if it matches. 1281 * 1282 * Return: status(0 indicates drop, 1 indicate to no drop) 1283 */ 1284 1285 QDF_STATUS dp_rx_filter_mesh_packets(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 1286 uint8_t *rx_tlv_hdr) 1287 { 1288 union dp_align_mac_addr mac_addr; 1289 struct dp_soc *soc = vdev->pdev->soc; 1290 1291 if (qdf_unlikely(vdev->mesh_rx_filter)) { 1292 if (vdev->mesh_rx_filter & MESH_FILTER_OUT_FROMDS) 1293 if (hal_rx_mpdu_get_fr_ds(soc->hal_soc, 1294 rx_tlv_hdr)) 1295 return QDF_STATUS_SUCCESS; 1296 1297 if (vdev->mesh_rx_filter & MESH_FILTER_OUT_TODS) 1298 if (hal_rx_mpdu_get_to_ds(soc->hal_soc, 1299 rx_tlv_hdr)) 1300 return QDF_STATUS_SUCCESS; 1301 1302 if (vdev->mesh_rx_filter & MESH_FILTER_OUT_NODS) 1303 if (!hal_rx_mpdu_get_fr_ds(soc->hal_soc, 1304 rx_tlv_hdr) && 1305 !hal_rx_mpdu_get_to_ds(soc->hal_soc, 1306 rx_tlv_hdr)) 1307 return QDF_STATUS_SUCCESS; 1308 1309 if (vdev->mesh_rx_filter & MESH_FILTER_OUT_RA) { 1310 if (hal_rx_mpdu_get_addr1(soc->hal_soc, 1311 rx_tlv_hdr, 1312 &mac_addr.raw[0])) 1313 return QDF_STATUS_E_FAILURE; 1314 1315 if (!qdf_mem_cmp(&mac_addr.raw[0], 1316 &vdev->mac_addr.raw[0], 1317 QDF_MAC_ADDR_SIZE)) 1318 return QDF_STATUS_SUCCESS; 1319 } 1320 1321 if (vdev->mesh_rx_filter & MESH_FILTER_OUT_TA) { 1322 if (hal_rx_mpdu_get_addr2(soc->hal_soc, 1323 rx_tlv_hdr, 1324 &mac_addr.raw[0])) 1325 return QDF_STATUS_E_FAILURE; 1326 1327 if (!qdf_mem_cmp(&mac_addr.raw[0], 1328 &vdev->mac_addr.raw[0], 1329 QDF_MAC_ADDR_SIZE)) 1330 return QDF_STATUS_SUCCESS; 1331 } 1332 } 1333 1334 return QDF_STATUS_E_FAILURE; 1335 } 1336 1337 #else 1338 void dp_rx_fill_mesh_stats(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 1339 uint8_t *rx_tlv_hdr, struct dp_txrx_peer *peer) 1340 { 1341 } 1342 1343 QDF_STATUS dp_rx_filter_mesh_packets(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 1344 uint8_t *rx_tlv_hdr) 1345 { 1346 return QDF_STATUS_E_FAILURE; 1347 } 1348 1349 #endif 1350 1351 #ifdef RX_PEER_INVALID_ENH 1352 /** 1353 * dp_rx_process_invalid_peer(): Function to pass invalid peer list to umac 1354 * @soc: DP SOC handle 1355 * @mpdu: mpdu for which peer is invalid 1356 * @mac_id: mac_id which is one of 3 mac_ids(Assuming mac_id and 1357 * pool_id has same mapping) 1358 * 1359 * return: integer type 1360 */ 1361 uint8_t dp_rx_process_invalid_peer(struct dp_soc *soc, qdf_nbuf_t mpdu, 1362 uint8_t mac_id) 1363 { 1364 struct dp_invalid_peer_msg msg; 1365 struct dp_vdev *vdev = NULL; 1366 struct dp_pdev *pdev = NULL; 1367 struct ieee80211_frame *wh; 1368 qdf_nbuf_t curr_nbuf, next_nbuf; 1369 uint8_t *rx_tlv_hdr = qdf_nbuf_data(mpdu); 1370 uint8_t *rx_pkt_hdr = NULL; 1371 int i = 0; 1372 1373 if (!HAL_IS_DECAP_FORMAT_RAW(soc->hal_soc, rx_tlv_hdr)) { 1374 dp_rx_debug("%pK: Drop decapped frames", soc); 1375 goto free; 1376 } 1377 1378 /* In RAW packet, packet header will be part of data */ 1379 rx_pkt_hdr = rx_tlv_hdr + soc->rx_pkt_tlv_size; 1380 wh = (struct ieee80211_frame *)rx_pkt_hdr; 1381 1382 if (!DP_FRAME_IS_DATA(wh)) { 1383 dp_rx_debug("%pK: NAWDS valid only for data frames", soc); 1384 goto free; 1385 } 1386 1387 if (qdf_nbuf_len(mpdu) < sizeof(struct ieee80211_frame)) { 1388 dp_rx_err("%pK: Invalid nbuf length", soc); 1389 goto free; 1390 } 1391 1392 /* In DMAC case the rx_desc_pools are common across PDEVs 1393 * so PDEV cannot be derived from the pool_id. 1394 * 1395 * link_id need to derived from the TLV tag word which is 1396 * disabled by default. For now adding a WAR to get vdev 1397 * with brute force this need to fixed with word based subscription 1398 * support is added by enabling TLV tag word 1399 */ 1400 if (soc->features.dmac_cmn_src_rxbuf_ring_enabled) { 1401 for (i = 0; i < MAX_PDEV_CNT; i++) { 1402 pdev = soc->pdev_list[i]; 1403 1404 if (!pdev || qdf_unlikely(pdev->is_pdev_down)) 1405 continue; 1406 1407 TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) { 1408 if (qdf_mem_cmp(wh->i_addr1, vdev->mac_addr.raw, 1409 QDF_MAC_ADDR_SIZE) == 0) { 1410 goto out; 1411 } 1412 } 1413 } 1414 } else { 1415 pdev = dp_get_pdev_for_lmac_id(soc, mac_id); 1416 1417 if (!pdev || qdf_unlikely(pdev->is_pdev_down)) { 1418 dp_rx_err("%pK: PDEV %s", 1419 soc, !pdev ? "not found" : "down"); 1420 goto free; 1421 } 1422 1423 if (dp_monitor_filter_neighbour_peer(pdev, rx_pkt_hdr) == 1424 QDF_STATUS_SUCCESS) 1425 return 0; 1426 1427 TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) { 1428 if (qdf_mem_cmp(wh->i_addr1, vdev->mac_addr.raw, 1429 QDF_MAC_ADDR_SIZE) == 0) { 1430 goto out; 1431 } 1432 } 1433 } 1434 1435 if (!vdev) { 1436 dp_rx_err("%pK: VDEV not found", soc); 1437 goto free; 1438 } 1439 out: 1440 msg.wh = wh; 1441 qdf_nbuf_pull_head(mpdu, soc->rx_pkt_tlv_size); 1442 msg.nbuf = mpdu; 1443 msg.vdev_id = vdev->vdev_id; 1444 1445 /* 1446 * NOTE: Only valid for HKv1. 1447 * If smart monitor mode is enabled on RE, we are getting invalid 1448 * peer frames with RA as STA mac of RE and the TA not matching 1449 * with any NAC list or the the BSSID.Such frames need to dropped 1450 * in order to avoid HM_WDS false addition. 1451 */ 1452 if (pdev->soc->cdp_soc.ol_ops->rx_invalid_peer) { 1453 if (dp_monitor_drop_inv_peer_pkts(vdev) == QDF_STATUS_SUCCESS) { 1454 dp_rx_warn("%pK: Drop inv peer pkts with STA RA:%pm", 1455 soc, wh->i_addr1); 1456 goto free; 1457 } 1458 pdev->soc->cdp_soc.ol_ops->rx_invalid_peer( 1459 (struct cdp_ctrl_objmgr_psoc *)soc->ctrl_psoc, 1460 pdev->pdev_id, &msg); 1461 } 1462 1463 free: 1464 /* Drop and free packet */ 1465 curr_nbuf = mpdu; 1466 while (curr_nbuf) { 1467 next_nbuf = qdf_nbuf_next(curr_nbuf); 1468 dp_rx_nbuf_free(curr_nbuf); 1469 curr_nbuf = next_nbuf; 1470 } 1471 1472 return 0; 1473 } 1474 1475 /** 1476 * dp_rx_process_invalid_peer_wrapper(): Function to wrap invalid peer handler 1477 * @soc: DP SOC handle 1478 * @mpdu: mpdu for which peer is invalid 1479 * @mpdu_done: if an mpdu is completed 1480 * @mac_id: mac_id which is one of 3 mac_ids(Assuming mac_id and 1481 * pool_id has same mapping) 1482 * 1483 * return: integer type 1484 */ 1485 void dp_rx_process_invalid_peer_wrapper(struct dp_soc *soc, 1486 qdf_nbuf_t mpdu, bool mpdu_done, 1487 uint8_t mac_id) 1488 { 1489 /* Only trigger the process when mpdu is completed */ 1490 if (mpdu_done) 1491 dp_rx_process_invalid_peer(soc, mpdu, mac_id); 1492 } 1493 #else 1494 uint8_t dp_rx_process_invalid_peer(struct dp_soc *soc, qdf_nbuf_t mpdu, 1495 uint8_t mac_id) 1496 { 1497 qdf_nbuf_t curr_nbuf, next_nbuf; 1498 struct dp_pdev *pdev; 1499 struct dp_vdev *vdev = NULL; 1500 struct ieee80211_frame *wh; 1501 uint8_t *rx_tlv_hdr = qdf_nbuf_data(mpdu); 1502 uint8_t *rx_pkt_hdr = hal_rx_pkt_hdr_get(soc->hal_soc, rx_tlv_hdr); 1503 1504 wh = (struct ieee80211_frame *)rx_pkt_hdr; 1505 1506 if (!DP_FRAME_IS_DATA(wh)) { 1507 QDF_TRACE_ERROR_RL(QDF_MODULE_ID_DP, 1508 "only for data frames"); 1509 goto free; 1510 } 1511 1512 if (qdf_nbuf_len(mpdu) < sizeof(struct ieee80211_frame)) { 1513 dp_rx_info_rl("%pK: Invalid nbuf length", soc); 1514 goto free; 1515 } 1516 1517 pdev = dp_get_pdev_for_lmac_id(soc, mac_id); 1518 if (!pdev) { 1519 dp_rx_info_rl("%pK: PDEV not found", soc); 1520 goto free; 1521 } 1522 1523 qdf_spin_lock_bh(&pdev->vdev_list_lock); 1524 DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) { 1525 if (qdf_mem_cmp(wh->i_addr1, vdev->mac_addr.raw, 1526 QDF_MAC_ADDR_SIZE) == 0) { 1527 qdf_spin_unlock_bh(&pdev->vdev_list_lock); 1528 goto out; 1529 } 1530 } 1531 qdf_spin_unlock_bh(&pdev->vdev_list_lock); 1532 1533 if (!vdev) { 1534 dp_rx_info_rl("%pK: VDEV not found", soc); 1535 goto free; 1536 } 1537 1538 out: 1539 if (soc->cdp_soc.ol_ops->rx_invalid_peer) 1540 soc->cdp_soc.ol_ops->rx_invalid_peer(vdev->vdev_id, wh); 1541 free: 1542 1543 /* Drop and free packet */ 1544 curr_nbuf = mpdu; 1545 while (curr_nbuf) { 1546 next_nbuf = qdf_nbuf_next(curr_nbuf); 1547 dp_rx_nbuf_free(curr_nbuf); 1548 curr_nbuf = next_nbuf; 1549 } 1550 1551 /* Reset the head and tail pointers */ 1552 pdev = dp_get_pdev_for_lmac_id(soc, mac_id); 1553 if (pdev) { 1554 pdev->invalid_peer_head_msdu = NULL; 1555 pdev->invalid_peer_tail_msdu = NULL; 1556 } 1557 1558 return 0; 1559 } 1560 1561 void dp_rx_process_invalid_peer_wrapper(struct dp_soc *soc, 1562 qdf_nbuf_t mpdu, bool mpdu_done, 1563 uint8_t mac_id) 1564 { 1565 /* Process the nbuf */ 1566 dp_rx_process_invalid_peer(soc, mpdu, mac_id); 1567 } 1568 #endif 1569 1570 #ifndef QCA_HOST_MODE_WIFI_DISABLED 1571 1572 #ifdef RECEIVE_OFFLOAD 1573 /** 1574 * dp_rx_print_offload_info() - Print offload info from RX TLV 1575 * @soc: dp soc handle 1576 * @msdu: MSDU for which the offload info is to be printed 1577 * 1578 * Return: None 1579 */ 1580 static void dp_rx_print_offload_info(struct dp_soc *soc, 1581 qdf_nbuf_t msdu) 1582 { 1583 dp_verbose_debug("----------------------RX DESC LRO/GRO----------------------"); 1584 dp_verbose_debug("lro_eligible 0x%x", 1585 QDF_NBUF_CB_RX_LRO_ELIGIBLE(msdu)); 1586 dp_verbose_debug("pure_ack 0x%x", QDF_NBUF_CB_RX_TCP_PURE_ACK(msdu)); 1587 dp_verbose_debug("chksum 0x%x", QDF_NBUF_CB_RX_TCP_CHKSUM(msdu)); 1588 dp_verbose_debug("TCP seq num 0x%x", QDF_NBUF_CB_RX_TCP_SEQ_NUM(msdu)); 1589 dp_verbose_debug("TCP ack num 0x%x", QDF_NBUF_CB_RX_TCP_ACK_NUM(msdu)); 1590 dp_verbose_debug("TCP window 0x%x", QDF_NBUF_CB_RX_TCP_WIN(msdu)); 1591 dp_verbose_debug("TCP protocol 0x%x", QDF_NBUF_CB_RX_TCP_PROTO(msdu)); 1592 dp_verbose_debug("TCP offset 0x%x", QDF_NBUF_CB_RX_TCP_OFFSET(msdu)); 1593 dp_verbose_debug("toeplitz 0x%x", QDF_NBUF_CB_RX_FLOW_ID(msdu)); 1594 dp_verbose_debug("---------------------------------------------------------"); 1595 } 1596 1597 /** 1598 * dp_rx_fill_gro_info() - Fill GRO info from RX TLV into skb->cb 1599 * @soc: DP SOC handle 1600 * @rx_tlv: RX TLV received for the msdu 1601 * @msdu: msdu for which GRO info needs to be filled 1602 * @rx_ol_pkt_cnt: counter to be incremented for GRO eligible packets 1603 * 1604 * Return: None 1605 */ 1606 void dp_rx_fill_gro_info(struct dp_soc *soc, uint8_t *rx_tlv, 1607 qdf_nbuf_t msdu, uint32_t *rx_ol_pkt_cnt) 1608 { 1609 struct hal_offload_info offload_info; 1610 1611 if (!wlan_cfg_is_gro_enabled(soc->wlan_cfg_ctx)) 1612 return; 1613 1614 if (hal_rx_tlv_get_offload_info(soc->hal_soc, rx_tlv, &offload_info)) 1615 return; 1616 1617 *rx_ol_pkt_cnt = *rx_ol_pkt_cnt + 1; 1618 1619 QDF_NBUF_CB_RX_LRO_ELIGIBLE(msdu) = offload_info.lro_eligible; 1620 QDF_NBUF_CB_RX_TCP_PURE_ACK(msdu) = offload_info.tcp_pure_ack; 1621 QDF_NBUF_CB_RX_TCP_CHKSUM(msdu) = 1622 hal_rx_tlv_get_tcp_chksum(soc->hal_soc, 1623 rx_tlv); 1624 QDF_NBUF_CB_RX_TCP_SEQ_NUM(msdu) = offload_info.tcp_seq_num; 1625 QDF_NBUF_CB_RX_TCP_ACK_NUM(msdu) = offload_info.tcp_ack_num; 1626 QDF_NBUF_CB_RX_TCP_WIN(msdu) = offload_info.tcp_win; 1627 QDF_NBUF_CB_RX_TCP_PROTO(msdu) = offload_info.tcp_proto; 1628 QDF_NBUF_CB_RX_IPV6_PROTO(msdu) = offload_info.ipv6_proto; 1629 QDF_NBUF_CB_RX_TCP_OFFSET(msdu) = offload_info.tcp_offset; 1630 QDF_NBUF_CB_RX_FLOW_ID(msdu) = offload_info.flow_id; 1631 1632 dp_rx_print_offload_info(soc, msdu); 1633 } 1634 #endif /* RECEIVE_OFFLOAD */ 1635 1636 /** 1637 * dp_rx_adjust_nbuf_len() - set appropriate msdu length in nbuf. 1638 * 1639 * @soc: DP soc handle 1640 * @nbuf: pointer to msdu. 1641 * @mpdu_len: mpdu length 1642 * @l3_pad_len: L3 padding length by HW 1643 * 1644 * Return: returns true if nbuf is last msdu of mpdu else returns false. 1645 */ 1646 static inline bool dp_rx_adjust_nbuf_len(struct dp_soc *soc, 1647 qdf_nbuf_t nbuf, 1648 uint16_t *mpdu_len, 1649 uint32_t l3_pad_len) 1650 { 1651 bool last_nbuf; 1652 uint32_t pkt_hdr_size; 1653 1654 pkt_hdr_size = soc->rx_pkt_tlv_size + l3_pad_len; 1655 1656 if ((*mpdu_len + pkt_hdr_size) > RX_DATA_BUFFER_SIZE) { 1657 qdf_nbuf_set_pktlen(nbuf, RX_DATA_BUFFER_SIZE); 1658 last_nbuf = false; 1659 *mpdu_len -= (RX_DATA_BUFFER_SIZE - pkt_hdr_size); 1660 } else { 1661 qdf_nbuf_set_pktlen(nbuf, (*mpdu_len + pkt_hdr_size)); 1662 last_nbuf = true; 1663 *mpdu_len = 0; 1664 } 1665 1666 return last_nbuf; 1667 } 1668 1669 /** 1670 * dp_get_l3_hdr_pad_len() - get L3 header padding length. 1671 * 1672 * @soc: DP soc handle 1673 * @nbuf: pointer to msdu. 1674 * 1675 * Return: returns padding length in bytes. 1676 */ 1677 static inline uint32_t dp_get_l3_hdr_pad_len(struct dp_soc *soc, 1678 qdf_nbuf_t nbuf) 1679 { 1680 uint32_t l3_hdr_pad = 0; 1681 uint8_t *rx_tlv_hdr; 1682 struct hal_rx_msdu_metadata msdu_metadata; 1683 1684 while (nbuf) { 1685 if (!qdf_nbuf_is_rx_chfrag_cont(nbuf)) { 1686 /* scattered msdu end with continuation is 0 */ 1687 rx_tlv_hdr = qdf_nbuf_data(nbuf); 1688 hal_rx_msdu_metadata_get(soc->hal_soc, 1689 rx_tlv_hdr, 1690 &msdu_metadata); 1691 l3_hdr_pad = msdu_metadata.l3_hdr_pad; 1692 break; 1693 } 1694 nbuf = nbuf->next; 1695 } 1696 1697 return l3_hdr_pad; 1698 } 1699 1700 /** 1701 * dp_rx_sg_create() - create a frag_list for MSDUs which are spread across 1702 * multiple nbufs. 1703 * @soc: DP SOC handle 1704 * @nbuf: pointer to the first msdu of an amsdu. 1705 * 1706 * This function implements the creation of RX frag_list for cases 1707 * where an MSDU is spread across multiple nbufs. 1708 * 1709 * Return: returns the head nbuf which contains complete frag_list. 1710 */ 1711 qdf_nbuf_t dp_rx_sg_create(struct dp_soc *soc, qdf_nbuf_t nbuf) 1712 { 1713 qdf_nbuf_t parent, frag_list, next = NULL; 1714 uint16_t frag_list_len = 0; 1715 uint16_t mpdu_len; 1716 bool last_nbuf; 1717 uint32_t l3_hdr_pad_offset = 0; 1718 1719 /* 1720 * Use msdu len got from REO entry descriptor instead since 1721 * there is case the RX PKT TLV is corrupted while msdu_len 1722 * from REO descriptor is right for non-raw RX scatter msdu. 1723 */ 1724 mpdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf); 1725 1726 /* 1727 * this is a case where the complete msdu fits in one single nbuf. 1728 * in this case HW sets both start and end bit and we only need to 1729 * reset these bits for RAW mode simulator to decap the pkt 1730 */ 1731 if (qdf_nbuf_is_rx_chfrag_start(nbuf) && 1732 qdf_nbuf_is_rx_chfrag_end(nbuf)) { 1733 qdf_nbuf_set_pktlen(nbuf, mpdu_len + soc->rx_pkt_tlv_size); 1734 qdf_nbuf_pull_head(nbuf, soc->rx_pkt_tlv_size); 1735 return nbuf; 1736 } 1737 1738 l3_hdr_pad_offset = dp_get_l3_hdr_pad_len(soc, nbuf); 1739 /* 1740 * This is a case where we have multiple msdus (A-MSDU) spread across 1741 * multiple nbufs. here we create a fraglist out of these nbufs. 1742 * 1743 * the moment we encounter a nbuf with continuation bit set we 1744 * know for sure we have an MSDU which is spread across multiple 1745 * nbufs. We loop through and reap nbufs till we reach last nbuf. 1746 */ 1747 parent = nbuf; 1748 frag_list = nbuf->next; 1749 nbuf = nbuf->next; 1750 1751 /* 1752 * set the start bit in the first nbuf we encounter with continuation 1753 * bit set. This has the proper mpdu length set as it is the first 1754 * msdu of the mpdu. this becomes the parent nbuf and the subsequent 1755 * nbufs will form the frag_list of the parent nbuf. 1756 */ 1757 qdf_nbuf_set_rx_chfrag_start(parent, 1); 1758 /* 1759 * L3 header padding is only needed for the 1st buffer 1760 * in a scattered msdu 1761 */ 1762 last_nbuf = dp_rx_adjust_nbuf_len(soc, parent, &mpdu_len, 1763 l3_hdr_pad_offset); 1764 1765 /* 1766 * MSDU cont bit is set but reported MPDU length can fit 1767 * in to single buffer 1768 * 1769 * Increment error stats and avoid SG list creation 1770 */ 1771 if (last_nbuf) { 1772 DP_STATS_INC(soc, rx.err.msdu_continuation_err, 1); 1773 qdf_nbuf_pull_head(parent, 1774 soc->rx_pkt_tlv_size + l3_hdr_pad_offset); 1775 return parent; 1776 } 1777 1778 /* 1779 * this is where we set the length of the fragments which are 1780 * associated to the parent nbuf. We iterate through the frag_list 1781 * till we hit the last_nbuf of the list. 1782 */ 1783 do { 1784 last_nbuf = dp_rx_adjust_nbuf_len(soc, nbuf, &mpdu_len, 0); 1785 qdf_nbuf_pull_head(nbuf, 1786 soc->rx_pkt_tlv_size); 1787 frag_list_len += qdf_nbuf_len(nbuf); 1788 1789 if (last_nbuf) { 1790 next = nbuf->next; 1791 nbuf->next = NULL; 1792 break; 1793 } else if (qdf_nbuf_is_rx_chfrag_end(nbuf)) { 1794 dp_err("Invalid packet length\n"); 1795 qdf_assert_always(0); 1796 } 1797 nbuf = nbuf->next; 1798 } while (!last_nbuf); 1799 1800 qdf_nbuf_set_rx_chfrag_start(nbuf, 0); 1801 qdf_nbuf_append_ext_list(parent, frag_list, frag_list_len); 1802 parent->next = next; 1803 1804 qdf_nbuf_pull_head(parent, 1805 soc->rx_pkt_tlv_size + l3_hdr_pad_offset); 1806 return parent; 1807 } 1808 1809 #ifdef DP_RX_SG_FRAME_SUPPORT 1810 /** 1811 * dp_rx_is_sg_supported() - SG packets processing supported or not. 1812 * 1813 * Return: returns true when processing is supported else false. 1814 */ 1815 bool dp_rx_is_sg_supported(void) 1816 { 1817 return true; 1818 } 1819 #else 1820 bool dp_rx_is_sg_supported(void) 1821 { 1822 return false; 1823 } 1824 #endif 1825 1826 #endif /* QCA_HOST_MODE_WIFI_DISABLED */ 1827 1828 #ifdef QCA_PEER_EXT_STATS 1829 /* 1830 * dp_rx_compute_tid_delay - Computer per TID delay stats 1831 * @peer: DP soc context 1832 * @nbuf: NBuffer 1833 * 1834 * Return: Void 1835 */ 1836 void dp_rx_compute_tid_delay(struct cdp_delay_tid_stats *stats, 1837 qdf_nbuf_t nbuf) 1838 { 1839 struct cdp_delay_rx_stats *rx_delay = &stats->rx_delay; 1840 uint32_t to_stack = qdf_nbuf_get_timedelta_ms(nbuf); 1841 1842 dp_hist_update_stats(&rx_delay->to_stack_delay, to_stack); 1843 } 1844 #endif /* QCA_PEER_EXT_STATS */ 1845 1846 /** 1847 * dp_rx_compute_delay() - Compute and fill in all timestamps 1848 * to pass in correct fields 1849 * 1850 * @vdev: pdev handle 1851 * @tx_desc: tx descriptor 1852 * @tid: tid value 1853 * Return: none 1854 */ 1855 void dp_rx_compute_delay(struct dp_vdev *vdev, qdf_nbuf_t nbuf) 1856 { 1857 uint8_t ring_id = QDF_NBUF_CB_RX_CTX_ID(nbuf); 1858 int64_t current_ts = qdf_ktime_to_ms(qdf_ktime_get()); 1859 uint32_t to_stack = qdf_nbuf_get_timedelta_ms(nbuf); 1860 uint8_t tid = qdf_nbuf_get_tid_val(nbuf); 1861 uint32_t interframe_delay = 1862 (uint32_t)(current_ts - vdev->prev_rx_deliver_tstamp); 1863 struct cdp_tid_rx_stats *rstats = 1864 &vdev->pdev->stats.tid_stats.tid_rx_stats[ring_id][tid]; 1865 1866 dp_update_delay_stats(NULL, rstats, to_stack, tid, 1867 CDP_DELAY_STATS_REAP_STACK, ring_id, false); 1868 /* 1869 * Update interframe delay stats calculated at deliver_data_ol point. 1870 * Value of vdev->prev_rx_deliver_tstamp will be 0 for 1st frame, so 1871 * interframe delay will not be calculate correctly for 1st frame. 1872 * On the other side, this will help in avoiding extra per packet check 1873 * of vdev->prev_rx_deliver_tstamp. 1874 */ 1875 dp_update_delay_stats(NULL, rstats, interframe_delay, tid, 1876 CDP_DELAY_STATS_RX_INTERFRAME, ring_id, false); 1877 vdev->prev_rx_deliver_tstamp = current_ts; 1878 } 1879 1880 /** 1881 * dp_rx_drop_nbuf_list() - drop an nbuf list 1882 * @pdev: dp pdev reference 1883 * @buf_list: buffer list to be dropepd 1884 * 1885 * Return: int (number of bufs dropped) 1886 */ 1887 static inline int dp_rx_drop_nbuf_list(struct dp_pdev *pdev, 1888 qdf_nbuf_t buf_list) 1889 { 1890 struct cdp_tid_rx_stats *stats = NULL; 1891 uint8_t tid = 0, ring_id = 0; 1892 int num_dropped = 0; 1893 qdf_nbuf_t buf, next_buf; 1894 1895 buf = buf_list; 1896 while (buf) { 1897 ring_id = QDF_NBUF_CB_RX_CTX_ID(buf); 1898 next_buf = qdf_nbuf_queue_next(buf); 1899 tid = qdf_nbuf_get_tid_val(buf); 1900 if (qdf_likely(pdev)) { 1901 stats = &pdev->stats.tid_stats.tid_rx_stats[ring_id][tid]; 1902 stats->fail_cnt[INVALID_PEER_VDEV]++; 1903 stats->delivered_to_stack--; 1904 } 1905 dp_rx_nbuf_free(buf); 1906 buf = next_buf; 1907 num_dropped++; 1908 } 1909 1910 return num_dropped; 1911 } 1912 1913 #ifdef QCA_SUPPORT_WDS_EXTENDED 1914 /** 1915 * dp_rx_deliver_to_stack_ext() - Deliver to netdev per sta 1916 * @soc: core txrx main context 1917 * @vdev: vdev 1918 * @txrx_peer: txrx peer 1919 * @nbuf_head: skb list head 1920 * 1921 * Return: true if packet is delivered to netdev per STA. 1922 */ 1923 static inline bool 1924 dp_rx_deliver_to_stack_ext(struct dp_soc *soc, struct dp_vdev *vdev, 1925 struct dp_txrx_peer *txrx_peer, qdf_nbuf_t nbuf_head) 1926 { 1927 /* 1928 * When extended WDS is disabled, frames are sent to AP netdevice. 1929 */ 1930 if (qdf_likely(!vdev->wds_ext_enabled)) 1931 return false; 1932 1933 /* 1934 * There can be 2 cases: 1935 * 1. Send frame to parent netdev if its not for netdev per STA 1936 * 2. If frame is meant for netdev per STA: 1937 * a. Send frame to appropriate netdev using registered fp. 1938 * b. If fp is NULL, drop the frames. 1939 */ 1940 if (!txrx_peer->wds_ext.init) 1941 return false; 1942 1943 if (txrx_peer->osif_rx) 1944 txrx_peer->osif_rx(txrx_peer->wds_ext.osif_peer, nbuf_head); 1945 else 1946 dp_rx_drop_nbuf_list(vdev->pdev, nbuf_head); 1947 1948 return true; 1949 } 1950 1951 #else 1952 static inline bool 1953 dp_rx_deliver_to_stack_ext(struct dp_soc *soc, struct dp_vdev *vdev, 1954 struct dp_txrx_peer *txrx_peer, qdf_nbuf_t nbuf_head) 1955 { 1956 return false; 1957 } 1958 #endif 1959 1960 #ifdef PEER_CACHE_RX_PKTS 1961 /** 1962 * dp_rx_flush_rx_cached() - flush cached rx frames 1963 * @peer: peer 1964 * @drop: flag to drop frames or forward to net stack 1965 * 1966 * Return: None 1967 */ 1968 void dp_rx_flush_rx_cached(struct dp_peer *peer, bool drop) 1969 { 1970 struct dp_peer_cached_bufq *bufqi; 1971 struct dp_rx_cached_buf *cache_buf = NULL; 1972 ol_txrx_rx_fp data_rx = NULL; 1973 int num_buff_elem; 1974 QDF_STATUS status; 1975 1976 /* 1977 * Flush dp cached frames only for mld peers and legacy peers, as 1978 * link peers don't store cached frames 1979 */ 1980 if (IS_MLO_DP_LINK_PEER(peer)) 1981 return; 1982 1983 if (!peer->txrx_peer) { 1984 dp_err("txrx_peer NULL!! peer mac_addr("QDF_MAC_ADDR_FMT")", 1985 QDF_MAC_ADDR_REF(peer->mac_addr.raw)); 1986 return; 1987 } 1988 1989 if (qdf_atomic_inc_return(&peer->txrx_peer->flush_in_progress) > 1) { 1990 qdf_atomic_dec(&peer->txrx_peer->flush_in_progress); 1991 return; 1992 } 1993 1994 qdf_spin_lock_bh(&peer->peer_info_lock); 1995 if (peer->state >= OL_TXRX_PEER_STATE_CONN && peer->vdev->osif_rx) 1996 data_rx = peer->vdev->osif_rx; 1997 else 1998 drop = true; 1999 qdf_spin_unlock_bh(&peer->peer_info_lock); 2000 2001 bufqi = &peer->txrx_peer->bufq_info; 2002 2003 qdf_spin_lock_bh(&bufqi->bufq_lock); 2004 qdf_list_remove_front(&bufqi->cached_bufq, 2005 (qdf_list_node_t **)&cache_buf); 2006 while (cache_buf) { 2007 num_buff_elem = QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST( 2008 cache_buf->buf); 2009 bufqi->entries -= num_buff_elem; 2010 qdf_spin_unlock_bh(&bufqi->bufq_lock); 2011 if (drop) { 2012 bufqi->dropped = dp_rx_drop_nbuf_list(peer->vdev->pdev, 2013 cache_buf->buf); 2014 } else { 2015 /* Flush the cached frames to OSIF DEV */ 2016 status = data_rx(peer->vdev->osif_vdev, cache_buf->buf); 2017 if (status != QDF_STATUS_SUCCESS) 2018 bufqi->dropped = dp_rx_drop_nbuf_list( 2019 peer->vdev->pdev, 2020 cache_buf->buf); 2021 } 2022 qdf_mem_free(cache_buf); 2023 cache_buf = NULL; 2024 qdf_spin_lock_bh(&bufqi->bufq_lock); 2025 qdf_list_remove_front(&bufqi->cached_bufq, 2026 (qdf_list_node_t **)&cache_buf); 2027 } 2028 qdf_spin_unlock_bh(&bufqi->bufq_lock); 2029 qdf_atomic_dec(&peer->txrx_peer->flush_in_progress); 2030 } 2031 2032 /** 2033 * dp_rx_enqueue_rx() - cache rx frames 2034 * @peer: peer 2035 * @txrx_peer: DP txrx_peer 2036 * @rx_buf_list: cache buffer list 2037 * 2038 * Return: None 2039 */ 2040 static QDF_STATUS 2041 dp_rx_enqueue_rx(struct dp_peer *peer, 2042 struct dp_txrx_peer *txrx_peer, 2043 qdf_nbuf_t rx_buf_list) 2044 { 2045 struct dp_rx_cached_buf *cache_buf; 2046 struct dp_peer_cached_bufq *bufqi = &txrx_peer->bufq_info; 2047 int num_buff_elem; 2048 QDF_STATUS ret = QDF_STATUS_SUCCESS; 2049 struct dp_soc *soc = txrx_peer->vdev->pdev->soc; 2050 struct dp_peer *ta_peer = NULL; 2051 2052 /* 2053 * If peer id is invalid which likely peer map has not completed, 2054 * then need caller provide dp_peer pointer, else it's ok to use 2055 * txrx_peer->peer_id to get dp_peer. 2056 */ 2057 if (peer) { 2058 if (QDF_STATUS_SUCCESS == 2059 dp_peer_get_ref(soc, peer, DP_MOD_ID_RX)) 2060 ta_peer = peer; 2061 } else { 2062 ta_peer = dp_peer_get_ref_by_id(soc, txrx_peer->peer_id, 2063 DP_MOD_ID_RX); 2064 } 2065 2066 if (!ta_peer) { 2067 bufqi->dropped = dp_rx_drop_nbuf_list(txrx_peer->vdev->pdev, 2068 rx_buf_list); 2069 return QDF_STATUS_E_INVAL; 2070 } 2071 2072 dp_debug_rl("bufq->curr %d bufq->drops %d", bufqi->entries, 2073 bufqi->dropped); 2074 if (!ta_peer->valid) { 2075 bufqi->dropped = dp_rx_drop_nbuf_list(txrx_peer->vdev->pdev, 2076 rx_buf_list); 2077 ret = QDF_STATUS_E_INVAL; 2078 goto fail; 2079 } 2080 2081 qdf_spin_lock_bh(&bufqi->bufq_lock); 2082 if (bufqi->entries >= bufqi->thresh) { 2083 bufqi->dropped = dp_rx_drop_nbuf_list(txrx_peer->vdev->pdev, 2084 rx_buf_list); 2085 qdf_spin_unlock_bh(&bufqi->bufq_lock); 2086 ret = QDF_STATUS_E_RESOURCES; 2087 goto fail; 2088 } 2089 qdf_spin_unlock_bh(&bufqi->bufq_lock); 2090 2091 num_buff_elem = QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(rx_buf_list); 2092 2093 cache_buf = qdf_mem_malloc_atomic(sizeof(*cache_buf)); 2094 if (!cache_buf) { 2095 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 2096 "Failed to allocate buf to cache rx frames"); 2097 bufqi->dropped = dp_rx_drop_nbuf_list(txrx_peer->vdev->pdev, 2098 rx_buf_list); 2099 ret = QDF_STATUS_E_NOMEM; 2100 goto fail; 2101 } 2102 2103 cache_buf->buf = rx_buf_list; 2104 2105 qdf_spin_lock_bh(&bufqi->bufq_lock); 2106 qdf_list_insert_back(&bufqi->cached_bufq, 2107 &cache_buf->node); 2108 bufqi->entries += num_buff_elem; 2109 qdf_spin_unlock_bh(&bufqi->bufq_lock); 2110 2111 fail: 2112 dp_peer_unref_delete(ta_peer, DP_MOD_ID_RX); 2113 return ret; 2114 } 2115 2116 static inline 2117 bool dp_rx_is_peer_cache_bufq_supported(void) 2118 { 2119 return true; 2120 } 2121 #else 2122 static inline 2123 bool dp_rx_is_peer_cache_bufq_supported(void) 2124 { 2125 return false; 2126 } 2127 2128 static inline QDF_STATUS 2129 dp_rx_enqueue_rx(struct dp_peer *peer, 2130 struct dp_txrx_peer *txrx_peer, 2131 qdf_nbuf_t rx_buf_list) 2132 { 2133 return QDF_STATUS_SUCCESS; 2134 } 2135 #endif 2136 2137 #ifndef DELIVERY_TO_STACK_STATUS_CHECK 2138 /** 2139 * dp_rx_check_delivery_to_stack() - Deliver pkts to network 2140 * using the appropriate call back functions. 2141 * @soc: soc 2142 * @vdev: vdev 2143 * @peer: peer 2144 * @nbuf_head: skb list head 2145 * @nbuf_tail: skb list tail 2146 * 2147 * Return: None 2148 */ 2149 static void dp_rx_check_delivery_to_stack(struct dp_soc *soc, 2150 struct dp_vdev *vdev, 2151 struct dp_txrx_peer *txrx_peer, 2152 qdf_nbuf_t nbuf_head) 2153 { 2154 if (qdf_unlikely(dp_rx_deliver_to_stack_ext(soc, vdev, 2155 txrx_peer, nbuf_head))) 2156 return; 2157 2158 /* Function pointer initialized only when FISA is enabled */ 2159 if (vdev->osif_fisa_rx) 2160 /* on failure send it via regular path */ 2161 vdev->osif_fisa_rx(soc, vdev, nbuf_head); 2162 else 2163 vdev->osif_rx(vdev->osif_vdev, nbuf_head); 2164 } 2165 2166 #else 2167 /** 2168 * dp_rx_check_delivery_to_stack() - Deliver pkts to network 2169 * using the appropriate call back functions. 2170 * @soc: soc 2171 * @vdev: vdev 2172 * @txrx_peer: txrx peer 2173 * @nbuf_head: skb list head 2174 * @nbuf_tail: skb list tail 2175 * 2176 * Check the return status of the call back function and drop 2177 * the packets if the return status indicates a failure. 2178 * 2179 * Return: None 2180 */ 2181 static void dp_rx_check_delivery_to_stack(struct dp_soc *soc, 2182 struct dp_vdev *vdev, 2183 struct dp_txrx_peer *txrx_peer, 2184 qdf_nbuf_t nbuf_head) 2185 { 2186 int num_nbuf = 0; 2187 QDF_STATUS ret_val = QDF_STATUS_E_FAILURE; 2188 2189 /* Function pointer initialized only when FISA is enabled */ 2190 if (vdev->osif_fisa_rx) 2191 /* on failure send it via regular path */ 2192 ret_val = vdev->osif_fisa_rx(soc, vdev, nbuf_head); 2193 else if (vdev->osif_rx) 2194 ret_val = vdev->osif_rx(vdev->osif_vdev, nbuf_head); 2195 2196 if (!QDF_IS_STATUS_SUCCESS(ret_val)) { 2197 num_nbuf = dp_rx_drop_nbuf_list(vdev->pdev, nbuf_head); 2198 DP_STATS_INC(soc, rx.err.rejected, num_nbuf); 2199 if (txrx_peer) 2200 DP_PEER_STATS_FLAT_DEC(txrx_peer, to_stack.num, 2201 num_nbuf); 2202 } 2203 } 2204 #endif /* ifdef DELIVERY_TO_STACK_STATUS_CHECK */ 2205 2206 /* 2207 * dp_rx_validate_rx_callbacks() - validate rx callbacks 2208 * @soc DP soc 2209 * @vdev: DP vdev handle 2210 * @txrx_peer: pointer to the txrx peer object 2211 * nbuf_head: skb list head 2212 * 2213 * Return: QDF_STATUS - QDF_STATUS_SUCCESS 2214 * QDF_STATUS_E_FAILURE 2215 */ 2216 static inline QDF_STATUS 2217 dp_rx_validate_rx_callbacks(struct dp_soc *soc, 2218 struct dp_vdev *vdev, 2219 struct dp_txrx_peer *txrx_peer, 2220 qdf_nbuf_t nbuf_head) 2221 { 2222 int num_nbuf; 2223 2224 if (qdf_unlikely(!vdev || vdev->delete.pending)) { 2225 num_nbuf = dp_rx_drop_nbuf_list(NULL, nbuf_head); 2226 /* 2227 * This is a special case where vdev is invalid, 2228 * so we cannot know the pdev to which this packet 2229 * belonged. Hence we update the soc rx error stats. 2230 */ 2231 DP_STATS_INC(soc, rx.err.invalid_vdev, num_nbuf); 2232 return QDF_STATUS_E_FAILURE; 2233 } 2234 2235 /* 2236 * highly unlikely to have a vdev without a registered rx 2237 * callback function. if so let us free the nbuf_list. 2238 */ 2239 if (qdf_unlikely(!vdev->osif_rx)) { 2240 if (txrx_peer && dp_rx_is_peer_cache_bufq_supported()) { 2241 dp_rx_enqueue_rx(NULL, txrx_peer, nbuf_head); 2242 } else { 2243 num_nbuf = dp_rx_drop_nbuf_list(vdev->pdev, 2244 nbuf_head); 2245 DP_PEER_TO_STACK_DECC(txrx_peer, num_nbuf, 2246 vdev->pdev->enhanced_stats_en); 2247 } 2248 return QDF_STATUS_E_FAILURE; 2249 } 2250 2251 return QDF_STATUS_SUCCESS; 2252 } 2253 2254 QDF_STATUS dp_rx_deliver_to_stack(struct dp_soc *soc, 2255 struct dp_vdev *vdev, 2256 struct dp_txrx_peer *txrx_peer, 2257 qdf_nbuf_t nbuf_head, 2258 qdf_nbuf_t nbuf_tail) 2259 { 2260 if (dp_rx_validate_rx_callbacks(soc, vdev, txrx_peer, nbuf_head) != 2261 QDF_STATUS_SUCCESS) 2262 return QDF_STATUS_E_FAILURE; 2263 2264 if (qdf_unlikely(vdev->rx_decap_type == htt_cmn_pkt_type_raw) || 2265 (vdev->rx_decap_type == htt_cmn_pkt_type_native_wifi)) { 2266 vdev->osif_rsim_rx_decap(vdev->osif_vdev, &nbuf_head, 2267 &nbuf_tail); 2268 } 2269 2270 dp_rx_check_delivery_to_stack(soc, vdev, txrx_peer, nbuf_head); 2271 2272 return QDF_STATUS_SUCCESS; 2273 } 2274 2275 #ifdef QCA_SUPPORT_EAPOL_OVER_CONTROL_PORT 2276 QDF_STATUS dp_rx_eapol_deliver_to_stack(struct dp_soc *soc, 2277 struct dp_vdev *vdev, 2278 struct dp_txrx_peer *txrx_peer, 2279 qdf_nbuf_t nbuf_head, 2280 qdf_nbuf_t nbuf_tail) 2281 { 2282 if (dp_rx_validate_rx_callbacks(soc, vdev, txrx_peer, nbuf_head) != 2283 QDF_STATUS_SUCCESS) 2284 return QDF_STATUS_E_FAILURE; 2285 2286 vdev->osif_rx_eapol(vdev->osif_vdev, nbuf_head); 2287 2288 return QDF_STATUS_SUCCESS; 2289 } 2290 #endif 2291 2292 #ifndef QCA_HOST_MODE_WIFI_DISABLED 2293 #ifdef VDEV_PEER_PROTOCOL_COUNT 2294 #define dp_rx_msdu_stats_update_prot_cnts(vdev_hdl, nbuf, txrx_peer) \ 2295 { \ 2296 qdf_nbuf_t nbuf_local; \ 2297 struct dp_txrx_peer *txrx_peer_local; \ 2298 struct dp_vdev *vdev_local = vdev_hdl; \ 2299 do { \ 2300 if (qdf_likely(!((vdev_local)->peer_protocol_count_track))) \ 2301 break; \ 2302 nbuf_local = nbuf; \ 2303 txrx_peer_local = txrx_peer; \ 2304 if (qdf_unlikely(qdf_nbuf_is_frag((nbuf_local)))) \ 2305 break; \ 2306 else if (qdf_unlikely(qdf_nbuf_is_raw_frame((nbuf_local)))) \ 2307 break; \ 2308 dp_vdev_peer_stats_update_protocol_cnt((vdev_local), \ 2309 (nbuf_local), \ 2310 (txrx_peer_local), 0, 1); \ 2311 } while (0); \ 2312 } 2313 #else 2314 #define dp_rx_msdu_stats_update_prot_cnts(vdev_hdl, nbuf, txrx_peer) 2315 #endif 2316 2317 #ifdef FEATURE_RX_LINKSPEED_ROAM_TRIGGER 2318 /** 2319 * dp_rx_rates_stats_update() - update rate stats 2320 * from rx msdu. 2321 * @soc: datapath soc handle 2322 * @nbuf: received msdu buffer 2323 * @rx_tlv_hdr: rx tlv header 2324 * @txrx_peer: datapath txrx_peer handle 2325 * @sgi: Short Guard Interval 2326 * @mcs: Modulation and Coding Set 2327 * @nss: Number of Spatial Streams 2328 * @bw: BandWidth 2329 * @pkt_type: Corresponds to preamble 2330 * 2331 * To be precisely record rates, following factors are considered: 2332 * Exclude specific frames, ARP, DHCP, ssdp, etc. 2333 * Make sure to affect rx throughput as least as possible. 2334 * 2335 * Return: void 2336 */ 2337 static void 2338 dp_rx_rates_stats_update(struct dp_soc *soc, qdf_nbuf_t nbuf, 2339 uint8_t *rx_tlv_hdr, struct dp_txrx_peer *txrx_peer, 2340 uint32_t sgi, uint32_t mcs, 2341 uint32_t nss, uint32_t bw, uint32_t pkt_type) 2342 { 2343 uint32_t rix; 2344 uint16_t ratecode; 2345 uint32_t avg_rx_rate; 2346 uint32_t ratekbps; 2347 enum cdp_punctured_modes punc_mode = NO_PUNCTURE; 2348 2349 if (soc->high_throughput || 2350 dp_rx_data_is_specific(soc->hal_soc, rx_tlv_hdr, nbuf)) { 2351 return; 2352 } 2353 2354 DP_PEER_EXTD_STATS_UPD(txrx_peer, rx.rx_rate, mcs); 2355 2356 /* In 11b mode, the nss we get from tlv is 0, invalid and should be 1 */ 2357 if (qdf_unlikely(pkt_type == DOT11_B)) 2358 nss = 1; 2359 2360 /* here pkt_type corresponds to preamble */ 2361 ratekbps = dp_getrateindex(sgi, 2362 mcs, 2363 nss - 1, 2364 pkt_type, 2365 bw, 2366 punc_mode, 2367 &rix, 2368 &ratecode); 2369 DP_PEER_EXTD_STATS_UPD(txrx_peer, rx.last_rx_rate, ratekbps); 2370 avg_rx_rate = 2371 dp_ath_rate_lpf(txrx_peer->stats.extd_stats.rx.avg_rx_rate, 2372 ratekbps); 2373 DP_PEER_EXTD_STATS_UPD(txrx_peer, rx.avg_rx_rate, avg_rx_rate); 2374 DP_PEER_EXTD_STATS_UPD(txrx_peer, rx.nss_info, nss); 2375 DP_PEER_EXTD_STATS_UPD(txrx_peer, rx.mcs_info, mcs); 2376 DP_PEER_EXTD_STATS_UPD(txrx_peer, rx.bw_info, bw); 2377 DP_PEER_EXTD_STATS_UPD(txrx_peer, rx.gi_info, sgi); 2378 DP_PEER_EXTD_STATS_UPD(txrx_peer, rx.preamble_info, pkt_type); 2379 } 2380 #else 2381 static inline void 2382 dp_rx_rates_stats_update(struct dp_soc *soc, qdf_nbuf_t nbuf, 2383 uint8_t *rx_tlv_hdr, struct dp_txrx_peer *txrx_peer, 2384 uint32_t sgi, uint32_t mcs, 2385 uint32_t nss, uint32_t bw, uint32_t pkt_type) 2386 { 2387 } 2388 #endif /* FEATURE_RX_LINKSPEED_ROAM_TRIGGER */ 2389 2390 #ifndef QCA_ENHANCED_STATS_SUPPORT 2391 /** 2392 * dp_rx_msdu_extd_stats_update(): Update Rx extended path stats for peer 2393 * 2394 * @soc: datapath soc handle 2395 * @nbuf: received msdu buffer 2396 * @rx_tlv_hdr: rx tlv header 2397 * @txrx_peer: datapath txrx_peer handle 2398 * 2399 * Return: void 2400 */ 2401 static inline 2402 void dp_rx_msdu_extd_stats_update(struct dp_soc *soc, qdf_nbuf_t nbuf, 2403 uint8_t *rx_tlv_hdr, 2404 struct dp_txrx_peer *txrx_peer) 2405 { 2406 bool is_ampdu; 2407 uint32_t sgi, mcs, tid, nss, bw, reception_type, pkt_type; 2408 uint8_t dst_mcs_idx; 2409 2410 /* 2411 * TODO - For KIWI this field is present in ring_desc 2412 * Try to use ring desc instead of tlv. 2413 */ 2414 is_ampdu = hal_rx_mpdu_info_ampdu_flag_get(soc->hal_soc, rx_tlv_hdr); 2415 DP_PEER_EXTD_STATS_INCC(txrx_peer, rx.ampdu_cnt, 1, is_ampdu); 2416 DP_PEER_EXTD_STATS_INCC(txrx_peer, rx.non_ampdu_cnt, 1, !(is_ampdu)); 2417 2418 sgi = hal_rx_tlv_sgi_get(soc->hal_soc, rx_tlv_hdr); 2419 mcs = hal_rx_tlv_rate_mcs_get(soc->hal_soc, rx_tlv_hdr); 2420 tid = qdf_nbuf_get_tid_val(nbuf); 2421 bw = hal_rx_tlv_bw_get(soc->hal_soc, rx_tlv_hdr); 2422 reception_type = hal_rx_msdu_start_reception_type_get(soc->hal_soc, 2423 rx_tlv_hdr); 2424 nss = hal_rx_msdu_start_nss_get(soc->hal_soc, rx_tlv_hdr); 2425 pkt_type = hal_rx_tlv_get_pkt_type(soc->hal_soc, rx_tlv_hdr); 2426 /* do HW to SW pkt type conversion */ 2427 pkt_type = (pkt_type >= HAL_DOT11_MAX ? DOT11_MAX : 2428 hal_2_dp_pkt_type_map[pkt_type]); 2429 2430 DP_PEER_EXTD_STATS_INCC(txrx_peer, rx.rx_mpdu_cnt[mcs], 1, 2431 ((mcs < MAX_MCS) && QDF_NBUF_CB_RX_CHFRAG_START(nbuf))); 2432 DP_PEER_EXTD_STATS_INCC(txrx_peer, rx.rx_mpdu_cnt[MAX_MCS - 1], 1, 2433 ((mcs >= MAX_MCS) && QDF_NBUF_CB_RX_CHFRAG_START(nbuf))); 2434 DP_PEER_EXTD_STATS_INC(txrx_peer, rx.bw[bw], 1); 2435 /* 2436 * only if nss > 0 and pkt_type is 11N/AC/AX, 2437 * then increase index [nss - 1] in array counter. 2438 */ 2439 if (nss > 0 && CDP_IS_PKT_TYPE_SUPPORT_NSS(pkt_type)) 2440 DP_PEER_EXTD_STATS_INC(txrx_peer, rx.nss[nss - 1], 1); 2441 2442 DP_PEER_EXTD_STATS_INC(txrx_peer, rx.sgi_count[sgi], 1); 2443 DP_PEER_PER_PKT_STATS_INCC(txrx_peer, rx.err.mic_err, 1, 2444 hal_rx_tlv_mic_err_get(soc->hal_soc, 2445 rx_tlv_hdr)); 2446 DP_PEER_PER_PKT_STATS_INCC(txrx_peer, rx.err.decrypt_err, 1, 2447 hal_rx_tlv_decrypt_err_get(soc->hal_soc, 2448 rx_tlv_hdr)); 2449 2450 DP_PEER_EXTD_STATS_INC(txrx_peer, rx.wme_ac_type[TID_TO_WME_AC(tid)], 1); 2451 DP_PEER_EXTD_STATS_INC(txrx_peer, rx.reception_type[reception_type], 1); 2452 2453 dst_mcs_idx = dp_get_mcs_array_index_by_pkt_type_mcs(pkt_type, mcs); 2454 if (MCS_INVALID_ARRAY_INDEX != dst_mcs_idx) 2455 DP_PEER_EXTD_STATS_INC(txrx_peer, 2456 rx.pkt_type[pkt_type].mcs_count[dst_mcs_idx], 2457 1); 2458 2459 dp_rx_rates_stats_update(soc, nbuf, rx_tlv_hdr, txrx_peer, 2460 sgi, mcs, nss, bw, pkt_type); 2461 } 2462 #else 2463 static inline 2464 void dp_rx_msdu_extd_stats_update(struct dp_soc *soc, qdf_nbuf_t nbuf, 2465 uint8_t *rx_tlv_hdr, 2466 struct dp_txrx_peer *txrx_peer) 2467 { 2468 } 2469 #endif 2470 2471 #if defined(DP_PKT_STATS_PER_LMAC) && defined(WLAN_FEATURE_11BE_MLO) 2472 static inline void 2473 dp_peer_update_rx_pkt_per_lmac(struct dp_txrx_peer *txrx_peer, 2474 qdf_nbuf_t nbuf) 2475 { 2476 uint8_t lmac_id = qdf_nbuf_get_lmac_id(nbuf); 2477 2478 if (qdf_unlikely(lmac_id >= CDP_MAX_LMACS)) { 2479 dp_err_rl("Invalid lmac_id: %u vdev_id: %u", 2480 lmac_id, QDF_NBUF_CB_RX_VDEV_ID(nbuf)); 2481 2482 if (qdf_likely(txrx_peer)) 2483 dp_err_rl("peer_id: %u", txrx_peer->peer_id); 2484 2485 return; 2486 } 2487 2488 /* only count stats per lmac for MLO connection*/ 2489 DP_PEER_PER_PKT_STATS_INCC_PKT(txrx_peer, rx.rx_lmac[lmac_id], 1, 2490 QDF_NBUF_CB_RX_PKT_LEN(nbuf), 2491 txrx_peer->mld_peer); 2492 } 2493 #else 2494 static inline void 2495 dp_peer_update_rx_pkt_per_lmac(struct dp_txrx_peer *txrx_peer, 2496 qdf_nbuf_t nbuf) 2497 { 2498 } 2499 #endif 2500 2501 /** 2502 * dp_rx_msdu_stats_update() - update per msdu stats. 2503 * @soc: core txrx main context 2504 * @nbuf: pointer to the first msdu of an amsdu. 2505 * @rx_tlv_hdr: pointer to the start of RX TLV headers. 2506 * @txrx_peer: pointer to the txrx peer object. 2507 * @ring_id: reo dest ring number on which pkt is reaped. 2508 * @tid_stats: per tid rx stats. 2509 * 2510 * update all the per msdu stats for that nbuf. 2511 * Return: void 2512 */ 2513 void dp_rx_msdu_stats_update(struct dp_soc *soc, qdf_nbuf_t nbuf, 2514 uint8_t *rx_tlv_hdr, 2515 struct dp_txrx_peer *txrx_peer, 2516 uint8_t ring_id, 2517 struct cdp_tid_rx_stats *tid_stats) 2518 { 2519 bool is_not_amsdu; 2520 struct dp_vdev *vdev = txrx_peer->vdev; 2521 bool enh_flag; 2522 qdf_ether_header_t *eh; 2523 uint16_t msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf); 2524 2525 dp_rx_msdu_stats_update_prot_cnts(vdev, nbuf, txrx_peer); 2526 is_not_amsdu = qdf_nbuf_is_rx_chfrag_start(nbuf) & 2527 qdf_nbuf_is_rx_chfrag_end(nbuf); 2528 DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, rx.rcvd_reo[ring_id], 1, 2529 msdu_len); 2530 DP_PEER_PER_PKT_STATS_INCC(txrx_peer, rx.non_amsdu_cnt, 1, 2531 is_not_amsdu); 2532 DP_PEER_PER_PKT_STATS_INCC(txrx_peer, rx.amsdu_cnt, 1, !is_not_amsdu); 2533 DP_PEER_PER_PKT_STATS_INCC(txrx_peer, rx.rx_retries, 1, 2534 qdf_nbuf_is_rx_retry_flag(nbuf)); 2535 dp_peer_update_rx_pkt_per_lmac(txrx_peer, nbuf); 2536 tid_stats->msdu_cnt++; 2537 if (qdf_unlikely(qdf_nbuf_is_da_mcbc(nbuf) && 2538 (vdev->rx_decap_type == htt_cmn_pkt_type_ethernet))) { 2539 eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf); 2540 enh_flag = vdev->pdev->enhanced_stats_en; 2541 DP_PEER_MC_INCC_PKT(txrx_peer, 1, msdu_len, enh_flag); 2542 tid_stats->mcast_msdu_cnt++; 2543 if (QDF_IS_ADDR_BROADCAST(eh->ether_dhost)) { 2544 DP_PEER_BC_INCC_PKT(txrx_peer, 1, msdu_len, enh_flag); 2545 tid_stats->bcast_msdu_cnt++; 2546 } 2547 } 2548 2549 txrx_peer->stats.per_pkt_stats.rx.last_rx_ts = qdf_system_ticks(); 2550 2551 dp_rx_msdu_extd_stats_update(soc, nbuf, rx_tlv_hdr, txrx_peer); 2552 } 2553 2554 #ifndef WDS_VENDOR_EXTENSION 2555 int dp_wds_rx_policy_check(uint8_t *rx_tlv_hdr, 2556 struct dp_vdev *vdev, 2557 struct dp_txrx_peer *txrx_peer) 2558 { 2559 return 1; 2560 } 2561 #endif 2562 2563 #ifdef RX_DESC_DEBUG_CHECK 2564 /** 2565 * dp_rx_desc_nbuf_sanity_check - Add sanity check to catch REO rx_desc paddr 2566 * corruption 2567 * 2568 * @ring_desc: REO ring descriptor 2569 * @rx_desc: Rx descriptor 2570 * 2571 * Return: NONE 2572 */ 2573 QDF_STATUS dp_rx_desc_nbuf_sanity_check(struct dp_soc *soc, 2574 hal_ring_desc_t ring_desc, 2575 struct dp_rx_desc *rx_desc) 2576 { 2577 struct hal_buf_info hbi; 2578 2579 hal_rx_reo_buf_paddr_get(soc->hal_soc, ring_desc, &hbi); 2580 /* Sanity check for possible buffer paddr corruption */ 2581 if (dp_rx_desc_paddr_sanity_check(rx_desc, (&hbi)->paddr)) 2582 return QDF_STATUS_SUCCESS; 2583 2584 return QDF_STATUS_E_FAILURE; 2585 } 2586 2587 /** 2588 * dp_rx_desc_nbuf_len_sanity_check - Add sanity check to catch Rx buffer 2589 * out of bound access from H.W 2590 * 2591 * @soc: DP soc 2592 * @pkt_len: Packet length received from H.W 2593 * 2594 * Return: NONE 2595 */ 2596 static inline void 2597 dp_rx_desc_nbuf_len_sanity_check(struct dp_soc *soc, 2598 uint32_t pkt_len) 2599 { 2600 struct rx_desc_pool *rx_desc_pool; 2601 2602 rx_desc_pool = &soc->rx_desc_buf[0]; 2603 qdf_assert_always(pkt_len <= rx_desc_pool->buf_size); 2604 } 2605 #else 2606 static inline void 2607 dp_rx_desc_nbuf_len_sanity_check(struct dp_soc *soc, uint32_t pkt_len) { } 2608 #endif 2609 2610 #ifdef DP_RX_PKT_NO_PEER_DELIVER 2611 #ifdef DP_RX_UDP_OVER_PEER_ROAM 2612 /** 2613 * dp_rx_is_udp_allowed_over_roam_peer() - check if udp data received 2614 * during roaming 2615 * @vdev: dp_vdev pointer 2616 * @rx_tlv_hdr: rx tlv header 2617 * @nbuf: pkt skb pointer 2618 * 2619 * This function will check if rx udp data is received from authorised 2620 * roamed peer before peer map indication is received from FW after 2621 * roaming. This is needed for VoIP scenarios in which packet loss 2622 * expected during roaming is minimal. 2623 * 2624 * Return: bool 2625 */ 2626 static bool dp_rx_is_udp_allowed_over_roam_peer(struct dp_vdev *vdev, 2627 uint8_t *rx_tlv_hdr, 2628 qdf_nbuf_t nbuf) 2629 { 2630 char *hdr_desc; 2631 struct ieee80211_frame *wh = NULL; 2632 2633 hdr_desc = hal_rx_desc_get_80211_hdr(vdev->pdev->soc->hal_soc, 2634 rx_tlv_hdr); 2635 wh = (struct ieee80211_frame *)hdr_desc; 2636 2637 if (vdev->roaming_peer_status == 2638 WLAN_ROAM_PEER_AUTH_STATUS_AUTHENTICATED && 2639 !qdf_mem_cmp(vdev->roaming_peer_mac.raw, wh->i_addr2, 2640 QDF_MAC_ADDR_SIZE) && (qdf_nbuf_is_ipv4_udp_pkt(nbuf) || 2641 qdf_nbuf_is_ipv6_udp_pkt(nbuf))) 2642 return true; 2643 2644 return false; 2645 } 2646 #else 2647 static bool dp_rx_is_udp_allowed_over_roam_peer(struct dp_vdev *vdev, 2648 uint8_t *rx_tlv_hdr, 2649 qdf_nbuf_t nbuf) 2650 { 2651 return false; 2652 } 2653 #endif 2654 /** 2655 * dp_rx_deliver_to_stack_no_peer() - try deliver rx data even if 2656 * no corresbonding peer found 2657 * @soc: core txrx main context 2658 * @nbuf: pkt skb pointer 2659 * 2660 * This function will try to deliver some RX special frames to stack 2661 * even there is no peer matched found. for instance, LFR case, some 2662 * eapol data will be sent to host before peer_map done. 2663 * 2664 * Return: None 2665 */ 2666 void dp_rx_deliver_to_stack_no_peer(struct dp_soc *soc, qdf_nbuf_t nbuf) 2667 { 2668 uint16_t peer_id; 2669 uint8_t vdev_id; 2670 struct dp_vdev *vdev = NULL; 2671 uint32_t l2_hdr_offset = 0; 2672 uint16_t msdu_len = 0; 2673 uint32_t pkt_len = 0; 2674 uint8_t *rx_tlv_hdr; 2675 uint32_t frame_mask = FRAME_MASK_IPV4_ARP | FRAME_MASK_IPV4_DHCP | 2676 FRAME_MASK_IPV4_EAPOL | FRAME_MASK_IPV6_DHCP; 2677 bool is_special_frame = false; 2678 struct dp_peer *peer = NULL; 2679 2680 peer_id = QDF_NBUF_CB_RX_PEER_ID(nbuf); 2681 if (peer_id > soc->max_peer_id) 2682 goto deliver_fail; 2683 2684 vdev_id = QDF_NBUF_CB_RX_VDEV_ID(nbuf); 2685 vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_RX); 2686 if (!vdev || vdev->delete.pending) 2687 goto deliver_fail; 2688 2689 if (qdf_unlikely(qdf_nbuf_is_frag(nbuf))) 2690 goto deliver_fail; 2691 2692 rx_tlv_hdr = qdf_nbuf_data(nbuf); 2693 l2_hdr_offset = 2694 hal_rx_msdu_end_l3_hdr_padding_get(soc->hal_soc, rx_tlv_hdr); 2695 2696 msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf); 2697 pkt_len = msdu_len + l2_hdr_offset + soc->rx_pkt_tlv_size; 2698 QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(nbuf) = 1; 2699 2700 qdf_nbuf_set_pktlen(nbuf, pkt_len); 2701 qdf_nbuf_pull_head(nbuf, soc->rx_pkt_tlv_size + l2_hdr_offset); 2702 2703 is_special_frame = dp_rx_is_special_frame(nbuf, frame_mask); 2704 if (qdf_likely(vdev->osif_rx)) { 2705 if (is_special_frame || 2706 dp_rx_is_udp_allowed_over_roam_peer(vdev, rx_tlv_hdr, 2707 nbuf)) { 2708 qdf_nbuf_set_exc_frame(nbuf, 1); 2709 if (QDF_STATUS_SUCCESS != 2710 vdev->osif_rx(vdev->osif_vdev, nbuf)) 2711 goto deliver_fail; 2712 2713 DP_STATS_INC(soc, rx.err.pkt_delivered_no_peer, 1); 2714 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_RX); 2715 return; 2716 } 2717 } else if (is_special_frame) { 2718 /* 2719 * If MLO connection, txrx_peer for link peer does not exist, 2720 * try to store these RX packets to txrx_peer's bufq of MLD 2721 * peer until vdev->osif_rx is registered from CP and flush 2722 * them to stack. 2723 */ 2724 peer = dp_peer_get_tgt_peer_by_id(soc, peer_id, 2725 DP_MOD_ID_RX); 2726 if (!peer) 2727 goto deliver_fail; 2728 2729 /* only check for MLO connection */ 2730 if (IS_MLO_DP_MLD_PEER(peer) && peer->txrx_peer && 2731 dp_rx_is_peer_cache_bufq_supported()) { 2732 qdf_nbuf_set_exc_frame(nbuf, 1); 2733 2734 if (QDF_STATUS_SUCCESS == 2735 dp_rx_enqueue_rx(peer, peer->txrx_peer, nbuf)) { 2736 DP_STATS_INC(soc, 2737 rx.err.pkt_delivered_no_peer, 2738 1); 2739 } else { 2740 DP_STATS_INC(soc, 2741 rx.err.rx_invalid_peer.num, 2742 1); 2743 } 2744 2745 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_RX); 2746 dp_peer_unref_delete(peer, DP_MOD_ID_RX); 2747 return; 2748 } 2749 2750 dp_peer_unref_delete(peer, DP_MOD_ID_RX); 2751 } 2752 2753 deliver_fail: 2754 DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1, 2755 QDF_NBUF_CB_RX_PKT_LEN(nbuf)); 2756 dp_rx_nbuf_free(nbuf); 2757 if (vdev) 2758 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_RX); 2759 } 2760 #else 2761 void dp_rx_deliver_to_stack_no_peer(struct dp_soc *soc, qdf_nbuf_t nbuf) 2762 { 2763 DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1, 2764 QDF_NBUF_CB_RX_PKT_LEN(nbuf)); 2765 dp_rx_nbuf_free(nbuf); 2766 } 2767 #endif 2768 2769 /** 2770 * dp_rx_srng_get_num_pending() - get number of pending entries 2771 * @hal_soc: hal soc opaque pointer 2772 * @hal_ring: opaque pointer to the HAL Rx Ring 2773 * @num_entries: number of entries in the hal_ring. 2774 * @near_full: pointer to a boolean. This is set if ring is near full. 2775 * 2776 * The function returns the number of entries in a destination ring which are 2777 * yet to be reaped. The function also checks if the ring is near full. 2778 * If more than half of the ring needs to be reaped, the ring is considered 2779 * approaching full. 2780 * The function useses hal_srng_dst_num_valid_locked to get the number of valid 2781 * entries. It should not be called within a SRNG lock. HW pointer value is 2782 * synced into cached_hp. 2783 * 2784 * Return: Number of pending entries if any 2785 */ 2786 uint32_t dp_rx_srng_get_num_pending(hal_soc_handle_t hal_soc, 2787 hal_ring_handle_t hal_ring_hdl, 2788 uint32_t num_entries, 2789 bool *near_full) 2790 { 2791 uint32_t num_pending = 0; 2792 2793 num_pending = hal_srng_dst_num_valid_locked(hal_soc, 2794 hal_ring_hdl, 2795 true); 2796 2797 if (num_entries && (num_pending >= num_entries >> 1)) 2798 *near_full = true; 2799 else 2800 *near_full = false; 2801 2802 return num_pending; 2803 } 2804 2805 #endif /* QCA_HOST_MODE_WIFI_DISABLED */ 2806 2807 #ifdef WLAN_SUPPORT_RX_FISA 2808 void dp_rx_skip_tlvs(struct dp_soc *soc, qdf_nbuf_t nbuf, uint32_t l3_padding) 2809 { 2810 QDF_NBUF_CB_RX_PACKET_L3_HDR_PAD(nbuf) = l3_padding; 2811 qdf_nbuf_pull_head(nbuf, l3_padding + soc->rx_pkt_tlv_size); 2812 } 2813 #else 2814 void dp_rx_skip_tlvs(struct dp_soc *soc, qdf_nbuf_t nbuf, uint32_t l3_padding) 2815 { 2816 qdf_nbuf_pull_head(nbuf, l3_padding + soc->rx_pkt_tlv_size); 2817 } 2818 #endif 2819 2820 #ifndef QCA_HOST_MODE_WIFI_DISABLED 2821 2822 #ifdef DP_RX_DROP_RAW_FRM 2823 /** 2824 * dp_rx_is_raw_frame_dropped() - if raw frame nbuf, free and drop 2825 * @nbuf: pkt skb pointer 2826 * 2827 * Return: true - raw frame, dropped 2828 * false - not raw frame, do nothing 2829 */ 2830 bool dp_rx_is_raw_frame_dropped(qdf_nbuf_t nbuf) 2831 { 2832 if (qdf_nbuf_is_raw_frame(nbuf)) { 2833 dp_rx_nbuf_free(nbuf); 2834 return true; 2835 } 2836 2837 return false; 2838 } 2839 #endif 2840 2841 #ifdef WLAN_FEATURE_DP_RX_RING_HISTORY 2842 /** 2843 * dp_rx_ring_record_entry() - Record an entry into the rx ring history. 2844 * @soc: Datapath soc structure 2845 * @ring_num: REO ring number 2846 * @ring_desc: REO ring descriptor 2847 * 2848 * Returns: None 2849 */ 2850 void 2851 dp_rx_ring_record_entry(struct dp_soc *soc, uint8_t ring_num, 2852 hal_ring_desc_t ring_desc) 2853 { 2854 struct dp_buf_info_record *record; 2855 struct hal_buf_info hbi; 2856 uint32_t idx; 2857 2858 if (qdf_unlikely(!soc->rx_ring_history[ring_num])) 2859 return; 2860 2861 hal_rx_reo_buf_paddr_get(soc->hal_soc, ring_desc, &hbi); 2862 2863 /* buffer_addr_info is the first element of ring_desc */ 2864 hal_rx_buf_cookie_rbm_get(soc->hal_soc, (uint32_t *)ring_desc, 2865 &hbi); 2866 2867 idx = dp_history_get_next_index(&soc->rx_ring_history[ring_num]->index, 2868 DP_RX_HIST_MAX); 2869 2870 /* No NULL check needed for record since its an array */ 2871 record = &soc->rx_ring_history[ring_num]->entry[idx]; 2872 2873 record->timestamp = qdf_get_log_timestamp(); 2874 record->hbi.paddr = hbi.paddr; 2875 record->hbi.sw_cookie = hbi.sw_cookie; 2876 record->hbi.rbm = hbi.rbm; 2877 } 2878 #endif 2879 2880 #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR 2881 /** 2882 * dp_rx_update_stats() - Update soc level rx packet count 2883 * @soc: DP soc handle 2884 * @nbuf: nbuf received 2885 * 2886 * Returns: none 2887 */ 2888 void dp_rx_update_stats(struct dp_soc *soc, qdf_nbuf_t nbuf) 2889 { 2890 DP_STATS_INC_PKT(soc, rx.ingress, 1, 2891 QDF_NBUF_CB_RX_PKT_LEN(nbuf)); 2892 } 2893 #endif 2894 2895 #ifdef WLAN_FEATURE_PKT_CAPTURE_V2 2896 /** 2897 * dp_rx_deliver_to_pkt_capture() - deliver rx packet to packet capture 2898 * @soc : dp_soc handle 2899 * @pdev: dp_pdev handle 2900 * @peer_id: peer_id of the peer for which completion came 2901 * @ppdu_id: ppdu_id 2902 * @netbuf: Buffer pointer 2903 * 2904 * This function is used to deliver rx packet to packet capture 2905 */ 2906 void dp_rx_deliver_to_pkt_capture(struct dp_soc *soc, struct dp_pdev *pdev, 2907 uint16_t peer_id, uint32_t is_offload, 2908 qdf_nbuf_t netbuf) 2909 { 2910 if (wlan_cfg_get_pkt_capture_mode(soc->wlan_cfg_ctx)) 2911 dp_wdi_event_handler(WDI_EVENT_PKT_CAPTURE_RX_DATA, soc, netbuf, 2912 peer_id, is_offload, pdev->pdev_id); 2913 } 2914 2915 void dp_rx_deliver_to_pkt_capture_no_peer(struct dp_soc *soc, qdf_nbuf_t nbuf, 2916 uint32_t is_offload) 2917 { 2918 if (wlan_cfg_get_pkt_capture_mode(soc->wlan_cfg_ctx)) 2919 dp_wdi_event_handler(WDI_EVENT_PKT_CAPTURE_RX_DATA_NO_PEER, 2920 soc, nbuf, HTT_INVALID_VDEV, 2921 is_offload, 0); 2922 } 2923 #endif 2924 2925 #endif /* QCA_HOST_MODE_WIFI_DISABLED */ 2926 2927 QDF_STATUS dp_rx_vdev_detach(struct dp_vdev *vdev) 2928 { 2929 QDF_STATUS ret; 2930 2931 if (vdev->osif_rx_flush) { 2932 ret = vdev->osif_rx_flush(vdev->osif_vdev, vdev->vdev_id); 2933 if (!QDF_IS_STATUS_SUCCESS(ret)) { 2934 dp_err("Failed to flush rx pkts for vdev %d\n", 2935 vdev->vdev_id); 2936 return ret; 2937 } 2938 } 2939 2940 return QDF_STATUS_SUCCESS; 2941 } 2942 2943 static QDF_STATUS 2944 dp_pdev_nbuf_alloc_and_map(struct dp_soc *dp_soc, 2945 struct dp_rx_nbuf_frag_info *nbuf_frag_info_t, 2946 struct dp_pdev *dp_pdev, 2947 struct rx_desc_pool *rx_desc_pool) 2948 { 2949 QDF_STATUS ret = QDF_STATUS_E_FAILURE; 2950 2951 (nbuf_frag_info_t->virt_addr).nbuf = 2952 qdf_nbuf_alloc(dp_soc->osdev, rx_desc_pool->buf_size, 2953 RX_BUFFER_RESERVATION, 2954 rx_desc_pool->buf_alignment, FALSE); 2955 if (!((nbuf_frag_info_t->virt_addr).nbuf)) { 2956 dp_err("nbuf alloc failed"); 2957 DP_STATS_INC(dp_pdev, replenish.nbuf_alloc_fail, 1); 2958 return ret; 2959 } 2960 2961 ret = qdf_nbuf_map_nbytes_single(dp_soc->osdev, 2962 (nbuf_frag_info_t->virt_addr).nbuf, 2963 QDF_DMA_FROM_DEVICE, 2964 rx_desc_pool->buf_size); 2965 2966 if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) { 2967 qdf_nbuf_free((nbuf_frag_info_t->virt_addr).nbuf); 2968 dp_err("nbuf map failed"); 2969 DP_STATS_INC(dp_pdev, replenish.map_err, 1); 2970 return ret; 2971 } 2972 2973 nbuf_frag_info_t->paddr = 2974 qdf_nbuf_get_frag_paddr((nbuf_frag_info_t->virt_addr).nbuf, 0); 2975 2976 ret = dp_check_paddr(dp_soc, &((nbuf_frag_info_t->virt_addr).nbuf), 2977 &nbuf_frag_info_t->paddr, 2978 rx_desc_pool); 2979 if (ret == QDF_STATUS_E_FAILURE) { 2980 dp_err("nbuf check x86 failed"); 2981 DP_STATS_INC(dp_pdev, replenish.x86_fail, 1); 2982 return ret; 2983 } 2984 2985 return QDF_STATUS_SUCCESS; 2986 } 2987 2988 QDF_STATUS 2989 dp_pdev_rx_buffers_attach(struct dp_soc *dp_soc, uint32_t mac_id, 2990 struct dp_srng *dp_rxdma_srng, 2991 struct rx_desc_pool *rx_desc_pool, 2992 uint32_t num_req_buffers) 2993 { 2994 struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(dp_soc, mac_id); 2995 hal_ring_handle_t rxdma_srng = dp_rxdma_srng->hal_srng; 2996 union dp_rx_desc_list_elem_t *next; 2997 void *rxdma_ring_entry; 2998 qdf_dma_addr_t paddr; 2999 struct dp_rx_nbuf_frag_info *nf_info; 3000 uint32_t nr_descs, nr_nbuf = 0, nr_nbuf_total = 0; 3001 uint32_t buffer_index, nbuf_ptrs_per_page; 3002 qdf_nbuf_t nbuf; 3003 QDF_STATUS ret; 3004 int page_idx, total_pages; 3005 union dp_rx_desc_list_elem_t *desc_list = NULL; 3006 union dp_rx_desc_list_elem_t *tail = NULL; 3007 int sync_hw_ptr = 1; 3008 uint32_t num_entries_avail; 3009 3010 if (qdf_unlikely(!dp_pdev)) { 3011 dp_rx_err("%pK: pdev is null for mac_id = %d", 3012 dp_soc, mac_id); 3013 return QDF_STATUS_E_FAILURE; 3014 } 3015 3016 if (qdf_unlikely(!rxdma_srng)) { 3017 DP_STATS_INC(dp_pdev, replenish.rxdma_err, num_req_buffers); 3018 return QDF_STATUS_E_FAILURE; 3019 } 3020 3021 dp_debug("requested %u RX buffers for driver attach", num_req_buffers); 3022 3023 hal_srng_access_start(dp_soc->hal_soc, rxdma_srng); 3024 num_entries_avail = hal_srng_src_num_avail(dp_soc->hal_soc, 3025 rxdma_srng, 3026 sync_hw_ptr); 3027 hal_srng_access_end(dp_soc->hal_soc, rxdma_srng); 3028 3029 if (!num_entries_avail) { 3030 dp_err("Num of available entries is zero, nothing to do"); 3031 return QDF_STATUS_E_NOMEM; 3032 } 3033 3034 if (num_entries_avail < num_req_buffers) 3035 num_req_buffers = num_entries_avail; 3036 3037 nr_descs = dp_rx_get_free_desc_list(dp_soc, mac_id, rx_desc_pool, 3038 num_req_buffers, &desc_list, &tail); 3039 if (!nr_descs) { 3040 dp_err("no free rx_descs in freelist"); 3041 DP_STATS_INC(dp_pdev, err.desc_alloc_fail, num_req_buffers); 3042 return QDF_STATUS_E_NOMEM; 3043 } 3044 3045 dp_debug("got %u RX descs for driver attach", nr_descs); 3046 3047 /* 3048 * Try to allocate pointers to the nbuf one page at a time. 3049 * Take pointers that can fit in one page of memory and 3050 * iterate through the total descriptors that need to be 3051 * allocated in order of pages. Reuse the pointers that 3052 * have been allocated to fit in one page across each 3053 * iteration to index into the nbuf. 3054 */ 3055 total_pages = (nr_descs * sizeof(*nf_info)) / DP_BLOCKMEM_SIZE; 3056 3057 /* 3058 * Add an extra page to store the remainder if any 3059 */ 3060 if ((nr_descs * sizeof(*nf_info)) % DP_BLOCKMEM_SIZE) 3061 total_pages++; 3062 nf_info = qdf_mem_malloc(DP_BLOCKMEM_SIZE); 3063 if (!nf_info) { 3064 dp_err("failed to allocate nbuf array"); 3065 DP_STATS_INC(dp_pdev, replenish.rxdma_err, num_req_buffers); 3066 QDF_BUG(0); 3067 return QDF_STATUS_E_NOMEM; 3068 } 3069 nbuf_ptrs_per_page = DP_BLOCKMEM_SIZE / sizeof(*nf_info); 3070 3071 for (page_idx = 0; page_idx < total_pages; page_idx++) { 3072 qdf_mem_zero(nf_info, DP_BLOCKMEM_SIZE); 3073 3074 for (nr_nbuf = 0; nr_nbuf < nbuf_ptrs_per_page; nr_nbuf++) { 3075 /* 3076 * The last page of buffer pointers may not be required 3077 * completely based on the number of descriptors. Below 3078 * check will ensure we are allocating only the 3079 * required number of descriptors. 3080 */ 3081 if (nr_nbuf_total >= nr_descs) 3082 break; 3083 /* Flag is set while pdev rx_desc_pool initialization */ 3084 if (qdf_unlikely(rx_desc_pool->rx_mon_dest_frag_enable)) 3085 ret = dp_pdev_frag_alloc_and_map(dp_soc, 3086 &nf_info[nr_nbuf], dp_pdev, 3087 rx_desc_pool); 3088 else 3089 ret = dp_pdev_nbuf_alloc_and_map(dp_soc, 3090 &nf_info[nr_nbuf], dp_pdev, 3091 rx_desc_pool); 3092 if (QDF_IS_STATUS_ERROR(ret)) 3093 break; 3094 3095 nr_nbuf_total++; 3096 } 3097 3098 hal_srng_access_start(dp_soc->hal_soc, rxdma_srng); 3099 3100 for (buffer_index = 0; buffer_index < nr_nbuf; buffer_index++) { 3101 rxdma_ring_entry = 3102 hal_srng_src_get_next(dp_soc->hal_soc, 3103 rxdma_srng); 3104 qdf_assert_always(rxdma_ring_entry); 3105 3106 next = desc_list->next; 3107 paddr = nf_info[buffer_index].paddr; 3108 nbuf = nf_info[buffer_index].virt_addr.nbuf; 3109 3110 /* Flag is set while pdev rx_desc_pool initialization */ 3111 if (qdf_unlikely(rx_desc_pool->rx_mon_dest_frag_enable)) 3112 dp_rx_desc_frag_prep(&desc_list->rx_desc, 3113 &nf_info[buffer_index]); 3114 else 3115 dp_rx_desc_prep(&desc_list->rx_desc, 3116 &nf_info[buffer_index]); 3117 desc_list->rx_desc.in_use = 1; 3118 dp_rx_desc_alloc_dbg_info(&desc_list->rx_desc); 3119 dp_rx_desc_update_dbg_info(&desc_list->rx_desc, 3120 __func__, 3121 RX_DESC_REPLENISHED); 3122 3123 hal_rxdma_buff_addr_info_set(dp_soc->hal_soc ,rxdma_ring_entry, paddr, 3124 desc_list->rx_desc.cookie, 3125 rx_desc_pool->owner); 3126 3127 dp_ipa_handle_rx_buf_smmu_mapping( 3128 dp_soc, nbuf, 3129 rx_desc_pool->buf_size, true, 3130 __func__, __LINE__); 3131 3132 dp_audio_smmu_map(dp_soc->osdev, 3133 qdf_mem_paddr_from_dmaaddr(dp_soc->osdev, 3134 QDF_NBUF_CB_PADDR(nbuf)), 3135 QDF_NBUF_CB_PADDR(nbuf), 3136 rx_desc_pool->buf_size); 3137 3138 desc_list = next; 3139 } 3140 3141 dp_rx_refill_ring_record_entry(dp_soc, dp_pdev->lmac_id, 3142 rxdma_srng, nr_nbuf, nr_nbuf); 3143 hal_srng_access_end(dp_soc->hal_soc, rxdma_srng); 3144 } 3145 3146 dp_info("filled %u RX buffers for driver attach", nr_nbuf_total); 3147 qdf_mem_free(nf_info); 3148 3149 if (!nr_nbuf_total) { 3150 dp_err("No nbuf's allocated"); 3151 QDF_BUG(0); 3152 return QDF_STATUS_E_RESOURCES; 3153 } 3154 3155 /* No need to count the number of bytes received during replenish. 3156 * Therefore set replenish.pkts.bytes as 0. 3157 */ 3158 DP_STATS_INC_PKT(dp_pdev, replenish.pkts, nr_nbuf, 0); 3159 3160 return QDF_STATUS_SUCCESS; 3161 } 3162 3163 qdf_export_symbol(dp_pdev_rx_buffers_attach); 3164 3165 /** 3166 * dp_rx_enable_mon_dest_frag() - Enable frag processing for 3167 * monitor destination ring via frag. 3168 * 3169 * Enable this flag only for monitor destination buffer processing 3170 * if DP_RX_MON_MEM_FRAG feature is enabled. 3171 * If flag is set then frag based function will be called for alloc, 3172 * map, prep desc and free ops for desc buffer else normal nbuf based 3173 * function will be called. 3174 * 3175 * @rx_desc_pool: Rx desc pool 3176 * @is_mon_dest_desc: Is it for monitor dest buffer 3177 * 3178 * Return: None 3179 */ 3180 #ifdef DP_RX_MON_MEM_FRAG 3181 void dp_rx_enable_mon_dest_frag(struct rx_desc_pool *rx_desc_pool, 3182 bool is_mon_dest_desc) 3183 { 3184 rx_desc_pool->rx_mon_dest_frag_enable = is_mon_dest_desc; 3185 if (is_mon_dest_desc) 3186 dp_alert("Feature DP_RX_MON_MEM_FRAG for mon_dest is enabled"); 3187 } 3188 #else 3189 void dp_rx_enable_mon_dest_frag(struct rx_desc_pool *rx_desc_pool, 3190 bool is_mon_dest_desc) 3191 { 3192 rx_desc_pool->rx_mon_dest_frag_enable = false; 3193 if (is_mon_dest_desc) 3194 dp_alert("Feature DP_RX_MON_MEM_FRAG for mon_dest is disabled"); 3195 } 3196 #endif 3197 3198 qdf_export_symbol(dp_rx_enable_mon_dest_frag); 3199 3200 /* 3201 * dp_rx_pdev_desc_pool_alloc() - allocate memory for software rx descriptor 3202 * pool 3203 * 3204 * @pdev: core txrx pdev context 3205 * 3206 * Return: QDF_STATUS - QDF_STATUS_SUCCESS 3207 * QDF_STATUS_E_NOMEM 3208 */ 3209 QDF_STATUS 3210 dp_rx_pdev_desc_pool_alloc(struct dp_pdev *pdev) 3211 { 3212 struct dp_soc *soc = pdev->soc; 3213 uint32_t rxdma_entries; 3214 uint32_t rx_sw_desc_num; 3215 struct dp_srng *dp_rxdma_srng; 3216 struct rx_desc_pool *rx_desc_pool; 3217 uint32_t status = QDF_STATUS_SUCCESS; 3218 int mac_for_pdev; 3219 3220 mac_for_pdev = pdev->lmac_id; 3221 if (wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx)) { 3222 dp_rx_info("%pK: nss-wifi<4> skip Rx refil %d", 3223 soc, mac_for_pdev); 3224 return status; 3225 } 3226 3227 dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_for_pdev]; 3228 rxdma_entries = dp_rxdma_srng->num_entries; 3229 3230 rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev]; 3231 rx_sw_desc_num = wlan_cfg_get_dp_soc_rx_sw_desc_num(soc->wlan_cfg_ctx); 3232 3233 rx_desc_pool->desc_type = DP_RX_DESC_BUF_TYPE; 3234 status = dp_rx_desc_pool_alloc(soc, 3235 rx_sw_desc_num, 3236 rx_desc_pool); 3237 if (status != QDF_STATUS_SUCCESS) 3238 return status; 3239 3240 return status; 3241 } 3242 3243 /* 3244 * dp_rx_pdev_desc_pool_free() - free software rx descriptor pool 3245 * 3246 * @pdev: core txrx pdev context 3247 */ 3248 void dp_rx_pdev_desc_pool_free(struct dp_pdev *pdev) 3249 { 3250 int mac_for_pdev = pdev->lmac_id; 3251 struct dp_soc *soc = pdev->soc; 3252 struct rx_desc_pool *rx_desc_pool; 3253 3254 rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev]; 3255 3256 dp_rx_desc_pool_free(soc, rx_desc_pool); 3257 } 3258 3259 /* 3260 * dp_rx_pdev_desc_pool_init() - initialize software rx descriptors 3261 * 3262 * @pdev: core txrx pdev context 3263 * 3264 * Return: QDF_STATUS - QDF_STATUS_SUCCESS 3265 * QDF_STATUS_E_NOMEM 3266 */ 3267 QDF_STATUS dp_rx_pdev_desc_pool_init(struct dp_pdev *pdev) 3268 { 3269 int mac_for_pdev = pdev->lmac_id; 3270 struct dp_soc *soc = pdev->soc; 3271 uint32_t rxdma_entries; 3272 uint32_t rx_sw_desc_num; 3273 struct dp_srng *dp_rxdma_srng; 3274 struct rx_desc_pool *rx_desc_pool; 3275 3276 rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev]; 3277 if (wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx)) { 3278 /** 3279 * If NSS is enabled, rx_desc_pool is already filled. 3280 * Hence, just disable desc_pool frag flag. 3281 */ 3282 dp_rx_enable_mon_dest_frag(rx_desc_pool, false); 3283 3284 dp_rx_info("%pK: nss-wifi<4> skip Rx refil %d", 3285 soc, mac_for_pdev); 3286 return QDF_STATUS_SUCCESS; 3287 } 3288 3289 if (dp_rx_desc_pool_is_allocated(rx_desc_pool) == QDF_STATUS_E_NOMEM) 3290 return QDF_STATUS_E_NOMEM; 3291 3292 dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_for_pdev]; 3293 rxdma_entries = dp_rxdma_srng->num_entries; 3294 3295 soc->process_rx_status = CONFIG_PROCESS_RX_STATUS; 3296 3297 rx_sw_desc_num = 3298 wlan_cfg_get_dp_soc_rx_sw_desc_num(soc->wlan_cfg_ctx); 3299 3300 rx_desc_pool->owner = dp_rx_get_rx_bm_id(soc); 3301 rx_desc_pool->buf_size = RX_DATA_BUFFER_SIZE; 3302 rx_desc_pool->buf_alignment = RX_DATA_BUFFER_ALIGNMENT; 3303 /* Disable monitor dest processing via frag */ 3304 dp_rx_enable_mon_dest_frag(rx_desc_pool, false); 3305 3306 dp_rx_desc_pool_init(soc, mac_for_pdev, 3307 rx_sw_desc_num, rx_desc_pool); 3308 return QDF_STATUS_SUCCESS; 3309 } 3310 3311 /* 3312 * dp_rx_pdev_desc_pool_deinit() - de-initialize software rx descriptor pools 3313 * @pdev: core txrx pdev context 3314 * 3315 * This function resets the freelist of rx descriptors and destroys locks 3316 * associated with this list of descriptors. 3317 */ 3318 void dp_rx_pdev_desc_pool_deinit(struct dp_pdev *pdev) 3319 { 3320 int mac_for_pdev = pdev->lmac_id; 3321 struct dp_soc *soc = pdev->soc; 3322 struct rx_desc_pool *rx_desc_pool; 3323 3324 rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev]; 3325 3326 dp_rx_desc_pool_deinit(soc, rx_desc_pool, mac_for_pdev); 3327 } 3328 3329 /* 3330 * dp_rx_pdev_buffers_alloc() - Allocate nbufs (skbs) and replenish RxDMA ring 3331 * 3332 * @pdev: core txrx pdev context 3333 * 3334 * Return: QDF_STATUS - QDF_STATUS_SUCCESS 3335 * QDF_STATUS_E_NOMEM 3336 */ 3337 QDF_STATUS 3338 dp_rx_pdev_buffers_alloc(struct dp_pdev *pdev) 3339 { 3340 int mac_for_pdev = pdev->lmac_id; 3341 struct dp_soc *soc = pdev->soc; 3342 struct dp_srng *dp_rxdma_srng; 3343 struct rx_desc_pool *rx_desc_pool; 3344 uint32_t rxdma_entries; 3345 3346 dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_for_pdev]; 3347 rxdma_entries = dp_rxdma_srng->num_entries; 3348 3349 rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev]; 3350 3351 /* Initialize RX buffer pool which will be 3352 * used during low memory conditions 3353 */ 3354 dp_rx_buffer_pool_init(soc, mac_for_pdev); 3355 3356 return dp_pdev_rx_buffers_attach_simple(soc, mac_for_pdev, 3357 dp_rxdma_srng, 3358 rx_desc_pool, 3359 rxdma_entries - 1); 3360 } 3361 3362 /* 3363 * dp_rx_pdev_buffers_free - Free nbufs (skbs) 3364 * 3365 * @pdev: core txrx pdev context 3366 */ 3367 void 3368 dp_rx_pdev_buffers_free(struct dp_pdev *pdev) 3369 { 3370 int mac_for_pdev = pdev->lmac_id; 3371 struct dp_soc *soc = pdev->soc; 3372 struct rx_desc_pool *rx_desc_pool; 3373 3374 rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev]; 3375 3376 dp_rx_desc_nbuf_free(soc, rx_desc_pool, false); 3377 dp_rx_buffer_pool_deinit(soc, mac_for_pdev); 3378 } 3379 3380 #ifdef DP_RX_SPECIAL_FRAME_NEED 3381 bool dp_rx_deliver_special_frame(struct dp_soc *soc, 3382 struct dp_txrx_peer *txrx_peer, 3383 qdf_nbuf_t nbuf, uint32_t frame_mask, 3384 uint8_t *rx_tlv_hdr) 3385 { 3386 uint32_t l2_hdr_offset = 0; 3387 uint16_t msdu_len = 0; 3388 uint32_t skip_len; 3389 3390 l2_hdr_offset = 3391 hal_rx_msdu_end_l3_hdr_padding_get(soc->hal_soc, rx_tlv_hdr); 3392 3393 if (qdf_unlikely(qdf_nbuf_is_frag(nbuf))) { 3394 skip_len = l2_hdr_offset; 3395 } else { 3396 msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf); 3397 skip_len = l2_hdr_offset + soc->rx_pkt_tlv_size; 3398 qdf_nbuf_set_pktlen(nbuf, msdu_len + skip_len); 3399 } 3400 3401 QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(nbuf) = 1; 3402 dp_rx_set_hdr_pad(nbuf, l2_hdr_offset); 3403 qdf_nbuf_pull_head(nbuf, skip_len); 3404 3405 if (txrx_peer->vdev) { 3406 dp_rx_send_pktlog(soc, txrx_peer->vdev->pdev, nbuf, 3407 QDF_TX_RX_STATUS_OK); 3408 } 3409 3410 if (dp_rx_is_special_frame(nbuf, frame_mask)) { 3411 dp_info("special frame, mpdu sn 0x%x", 3412 hal_rx_get_rx_sequence(soc->hal_soc, rx_tlv_hdr)); 3413 qdf_nbuf_set_exc_frame(nbuf, 1); 3414 dp_rx_deliver_to_stack(soc, txrx_peer->vdev, txrx_peer, 3415 nbuf, NULL); 3416 return true; 3417 } 3418 3419 return false; 3420 } 3421 #endif 3422 3423 #ifdef WLAN_FEATURE_MARK_FIRST_WAKEUP_PACKET 3424 void dp_rx_mark_first_packet_after_wow_wakeup(struct dp_pdev *pdev, 3425 uint8_t *rx_tlv, 3426 qdf_nbuf_t nbuf) 3427 { 3428 struct dp_soc *soc; 3429 3430 if (!pdev->is_first_wakeup_packet) 3431 return; 3432 3433 soc = pdev->soc; 3434 if (hal_get_first_wow_wakeup_packet(soc->hal_soc, rx_tlv)) { 3435 qdf_nbuf_mark_wakeup_frame(nbuf); 3436 dp_info("First packet after WOW Wakeup rcvd"); 3437 } 3438 } 3439 #endif 3440