1 /* 2 * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved. 3 * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. 4 * 5 * Permission to use, copy, modify, and/or distribute this software for 6 * any purpose with or without fee is hereby granted, provided that the 7 * above copyright notice and this permission notice appear in all 8 * copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 11 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 12 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 13 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 14 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 15 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 16 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 17 * PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20 #include "hal_hw_headers.h" 21 #include "dp_types.h" 22 #include "dp_rx.h" 23 #include "dp_tx.h" 24 #include "dp_peer.h" 25 #include "hal_rx.h" 26 #include "hal_api.h" 27 #include "qdf_nbuf.h" 28 #ifdef MESH_MODE_SUPPORT 29 #include "if_meta_hdr.h" 30 #endif 31 #include "dp_internal.h" 32 #include "dp_ipa.h" 33 #include "dp_hist.h" 34 #include "dp_rx_buffer_pool.h" 35 #ifdef WIFI_MONITOR_SUPPORT 36 #include "dp_htt.h" 37 #include <dp_mon.h> 38 #endif 39 #ifdef FEATURE_WDS 40 #include "dp_txrx_wds.h" 41 #endif 42 #ifdef DP_RATETABLE_SUPPORT 43 #include "dp_ratetable.h" 44 #endif 45 46 #ifdef DUP_RX_DESC_WAR 47 void dp_rx_dump_info_and_assert(struct dp_soc *soc, 48 hal_ring_handle_t hal_ring, 49 hal_ring_desc_t ring_desc, 50 struct dp_rx_desc *rx_desc) 51 { 52 void *hal_soc = soc->hal_soc; 53 54 hal_srng_dump_ring_desc(hal_soc, hal_ring, ring_desc); 55 dp_rx_desc_dump(rx_desc); 56 } 57 #else 58 void dp_rx_dump_info_and_assert(struct dp_soc *soc, 59 hal_ring_handle_t hal_ring_hdl, 60 hal_ring_desc_t ring_desc, 61 struct dp_rx_desc *rx_desc) 62 { 63 hal_soc_handle_t hal_soc = soc->hal_soc; 64 65 dp_rx_desc_dump(rx_desc); 66 hal_srng_dump_ring_desc(hal_soc, hal_ring_hdl, ring_desc); 67 hal_srng_dump_ring(hal_soc, hal_ring_hdl); 68 qdf_assert_always(0); 69 } 70 #endif 71 72 #ifndef QCA_HOST_MODE_WIFI_DISABLED 73 #ifdef RX_DESC_SANITY_WAR 74 QDF_STATUS dp_rx_desc_sanity(struct dp_soc *soc, hal_soc_handle_t hal_soc, 75 hal_ring_handle_t hal_ring_hdl, 76 hal_ring_desc_t ring_desc, 77 struct dp_rx_desc *rx_desc) 78 { 79 uint8_t return_buffer_manager; 80 81 if (qdf_unlikely(!rx_desc)) { 82 /* 83 * This is an unlikely case where the cookie obtained 84 * from the ring_desc is invalid and hence we are not 85 * able to find the corresponding rx_desc 86 */ 87 goto fail; 88 } 89 90 return_buffer_manager = hal_rx_ret_buf_manager_get(hal_soc, ring_desc); 91 if (qdf_unlikely(!(return_buffer_manager == 92 HAL_RX_BUF_RBM_SW1_BM(soc->wbm_sw0_bm_id) || 93 return_buffer_manager == 94 HAL_RX_BUF_RBM_SW3_BM(soc->wbm_sw0_bm_id)))) { 95 goto fail; 96 } 97 98 return QDF_STATUS_SUCCESS; 99 100 fail: 101 DP_STATS_INC(soc, rx.err.invalid_cookie, 1); 102 dp_err("Ring Desc:"); 103 hal_srng_dump_ring_desc(hal_soc, hal_ring_hdl, 104 ring_desc); 105 return QDF_STATUS_E_NULL_VALUE; 106 107 } 108 #endif 109 #endif /* QCA_HOST_MODE_WIFI_DISABLED */ 110 111 /** 112 * dp_pdev_frag_alloc_and_map() - Allocate frag for desc buffer and map 113 * 114 * @dp_soc: struct dp_soc * 115 * @nbuf_frag_info_t: nbuf frag info 116 * @dp_pdev: struct dp_pdev * 117 * @rx_desc_pool: Rx desc pool 118 * 119 * Return: QDF_STATUS 120 */ 121 #ifdef DP_RX_MON_MEM_FRAG 122 static inline QDF_STATUS 123 dp_pdev_frag_alloc_and_map(struct dp_soc *dp_soc, 124 struct dp_rx_nbuf_frag_info *nbuf_frag_info_t, 125 struct dp_pdev *dp_pdev, 126 struct rx_desc_pool *rx_desc_pool) 127 { 128 QDF_STATUS ret = QDF_STATUS_E_FAILURE; 129 130 (nbuf_frag_info_t->virt_addr).vaddr = 131 qdf_frag_alloc(NULL, rx_desc_pool->buf_size); 132 133 if (!((nbuf_frag_info_t->virt_addr).vaddr)) { 134 dp_err("Frag alloc failed"); 135 DP_STATS_INC(dp_pdev, replenish.frag_alloc_fail, 1); 136 return QDF_STATUS_E_NOMEM; 137 } 138 139 ret = qdf_mem_map_page(dp_soc->osdev, 140 (nbuf_frag_info_t->virt_addr).vaddr, 141 QDF_DMA_FROM_DEVICE, 142 rx_desc_pool->buf_size, 143 &nbuf_frag_info_t->paddr); 144 145 if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) { 146 qdf_frag_free((nbuf_frag_info_t->virt_addr).vaddr); 147 dp_err("Frag map failed"); 148 DP_STATS_INC(dp_pdev, replenish.map_err, 1); 149 return QDF_STATUS_E_FAULT; 150 } 151 152 return QDF_STATUS_SUCCESS; 153 } 154 #else 155 static inline QDF_STATUS 156 dp_pdev_frag_alloc_and_map(struct dp_soc *dp_soc, 157 struct dp_rx_nbuf_frag_info *nbuf_frag_info_t, 158 struct dp_pdev *dp_pdev, 159 struct rx_desc_pool *rx_desc_pool) 160 { 161 return QDF_STATUS_SUCCESS; 162 } 163 #endif /* DP_RX_MON_MEM_FRAG */ 164 165 #ifdef WLAN_FEATURE_DP_RX_RING_HISTORY 166 /** 167 * dp_rx_refill_ring_record_entry() - Record an entry into refill_ring history 168 * @soc: Datapath soc structure 169 * @ring_num: Refill ring number 170 * @num_req: number of buffers requested for refill 171 * @num_refill: number of buffers refilled 172 * 173 * Returns: None 174 */ 175 static inline void 176 dp_rx_refill_ring_record_entry(struct dp_soc *soc, uint8_t ring_num, 177 hal_ring_handle_t hal_ring_hdl, 178 uint32_t num_req, uint32_t num_refill) 179 { 180 struct dp_refill_info_record *record; 181 uint32_t idx; 182 uint32_t tp; 183 uint32_t hp; 184 185 if (qdf_unlikely(ring_num >= MAX_PDEV_CNT || 186 !soc->rx_refill_ring_history[ring_num])) 187 return; 188 189 idx = dp_history_get_next_index(&soc->rx_refill_ring_history[ring_num]->index, 190 DP_RX_REFILL_HIST_MAX); 191 192 /* No NULL check needed for record since its an array */ 193 record = &soc->rx_refill_ring_history[ring_num]->entry[idx]; 194 195 hal_get_sw_hptp(soc->hal_soc, hal_ring_hdl, &tp, &hp); 196 record->timestamp = qdf_get_log_timestamp(); 197 record->num_req = num_req; 198 record->num_refill = num_refill; 199 record->hp = hp; 200 record->tp = tp; 201 } 202 #else 203 static inline void 204 dp_rx_refill_ring_record_entry(struct dp_soc *soc, uint8_t ring_num, 205 hal_ring_handle_t hal_ring_hdl, 206 uint32_t num_req, uint32_t num_refill) 207 { 208 } 209 #endif 210 211 /** 212 * dp_pdev_nbuf_alloc_and_map() - Allocate nbuf for desc buffer and map 213 * 214 * @dp_soc: struct dp_soc * 215 * @mac_id: Mac id 216 * @num_entries_avail: num_entries_avail 217 * @nbuf_frag_info_t: nbuf frag info 218 * @dp_pdev: struct dp_pdev * 219 * @rx_desc_pool: Rx desc pool 220 * 221 * Return: QDF_STATUS 222 */ 223 static inline QDF_STATUS 224 dp_pdev_nbuf_alloc_and_map_replenish(struct dp_soc *dp_soc, 225 uint32_t mac_id, 226 uint32_t num_entries_avail, 227 struct dp_rx_nbuf_frag_info *nbuf_frag_info_t, 228 struct dp_pdev *dp_pdev, 229 struct rx_desc_pool *rx_desc_pool) 230 { 231 QDF_STATUS ret = QDF_STATUS_E_FAILURE; 232 233 (nbuf_frag_info_t->virt_addr).nbuf = 234 dp_rx_buffer_pool_nbuf_alloc(dp_soc, 235 mac_id, 236 rx_desc_pool, 237 num_entries_avail); 238 if (!((nbuf_frag_info_t->virt_addr).nbuf)) { 239 dp_err("nbuf alloc failed"); 240 DP_STATS_INC(dp_pdev, replenish.nbuf_alloc_fail, 1); 241 return QDF_STATUS_E_NOMEM; 242 } 243 244 ret = dp_rx_buffer_pool_nbuf_map(dp_soc, rx_desc_pool, 245 nbuf_frag_info_t); 246 if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) { 247 dp_rx_buffer_pool_nbuf_free(dp_soc, 248 (nbuf_frag_info_t->virt_addr).nbuf, mac_id); 249 dp_err("nbuf map failed"); 250 DP_STATS_INC(dp_pdev, replenish.map_err, 1); 251 return QDF_STATUS_E_FAULT; 252 } 253 254 nbuf_frag_info_t->paddr = 255 qdf_nbuf_get_frag_paddr((nbuf_frag_info_t->virt_addr).nbuf, 0); 256 257 dp_ipa_handle_rx_buf_smmu_mapping(dp_soc, 258 (qdf_nbuf_t)((nbuf_frag_info_t->virt_addr).nbuf), 259 rx_desc_pool->buf_size, 260 true); 261 262 ret = dp_check_paddr(dp_soc, &((nbuf_frag_info_t->virt_addr).nbuf), 263 &nbuf_frag_info_t->paddr, 264 rx_desc_pool); 265 if (ret == QDF_STATUS_E_FAILURE) { 266 DP_STATS_INC(dp_pdev, replenish.x86_fail, 1); 267 return QDF_STATUS_E_ADDRNOTAVAIL; 268 } 269 270 return QDF_STATUS_SUCCESS; 271 } 272 273 #if defined(QCA_DP_RX_NBUF_NO_MAP_UNMAP) && !defined(BUILD_X86) 274 QDF_STATUS 275 __dp_rx_buffers_no_map_lt_replenish(struct dp_soc *soc, uint32_t mac_id, 276 struct dp_srng *dp_rxdma_srng, 277 struct rx_desc_pool *rx_desc_pool) 278 { 279 struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id); 280 uint32_t count; 281 void *rxdma_ring_entry; 282 union dp_rx_desc_list_elem_t *next = NULL; 283 void *rxdma_srng; 284 qdf_nbuf_t nbuf; 285 qdf_dma_addr_t paddr; 286 uint16_t num_entries_avail = 0; 287 uint16_t num_alloc_desc = 0; 288 union dp_rx_desc_list_elem_t *desc_list = NULL; 289 union dp_rx_desc_list_elem_t *tail = NULL; 290 int sync_hw_ptr = 0; 291 292 rxdma_srng = dp_rxdma_srng->hal_srng; 293 294 if (qdf_unlikely(!dp_pdev)) { 295 dp_rx_err("%pK: pdev is null for mac_id = %d", soc, mac_id); 296 return QDF_STATUS_E_FAILURE; 297 } 298 299 if (qdf_unlikely(!rxdma_srng)) { 300 dp_rx_debug("%pK: rxdma srng not initialized", soc); 301 return QDF_STATUS_E_FAILURE; 302 } 303 304 hal_srng_access_start(soc->hal_soc, rxdma_srng); 305 306 num_entries_avail = hal_srng_src_num_avail(soc->hal_soc, 307 rxdma_srng, 308 sync_hw_ptr); 309 310 dp_rx_debug("%pK: no of available entries in rxdma ring: %d", 311 soc, num_entries_avail); 312 313 if (qdf_unlikely(num_entries_avail < 314 ((dp_rxdma_srng->num_entries * 3) / 4))) { 315 hal_srng_access_end(soc->hal_soc, rxdma_srng); 316 return QDF_STATUS_E_FAILURE; 317 } 318 319 DP_STATS_INC(dp_pdev, replenish.low_thresh_intrs, 1); 320 321 num_alloc_desc = dp_rx_get_free_desc_list(soc, mac_id, 322 rx_desc_pool, 323 num_entries_avail, 324 &desc_list, 325 &tail); 326 327 if (!num_alloc_desc) { 328 dp_rx_err("%pK: no free rx_descs in freelist", soc); 329 DP_STATS_INC(dp_pdev, err.desc_lt_alloc_fail, 330 num_entries_avail); 331 hal_srng_access_end(soc->hal_soc, rxdma_srng); 332 return QDF_STATUS_E_NOMEM; 333 } 334 335 for (count = 0; count < num_alloc_desc; count++) { 336 next = desc_list->next; 337 qdf_prefetch(next); 338 nbuf = dp_rx_nbuf_alloc(soc, rx_desc_pool); 339 if (qdf_unlikely(!nbuf)) { 340 DP_STATS_INC(dp_pdev, replenish.nbuf_alloc_fail, 1); 341 break; 342 } 343 344 paddr = dp_rx_nbuf_sync_no_dsb(soc, nbuf, 345 rx_desc_pool->buf_size); 346 347 rxdma_ring_entry = hal_srng_src_get_next(soc->hal_soc, 348 rxdma_srng); 349 qdf_assert_always(rxdma_ring_entry); 350 351 desc_list->rx_desc.nbuf = nbuf; 352 desc_list->rx_desc.rx_buf_start = nbuf->data; 353 desc_list->rx_desc.unmapped = 0; 354 355 /* rx_desc.in_use should be zero at this time*/ 356 qdf_assert_always(desc_list->rx_desc.in_use == 0); 357 358 desc_list->rx_desc.in_use = 1; 359 desc_list->rx_desc.in_err_state = 0; 360 361 hal_rxdma_buff_addr_info_set(soc->hal_soc, rxdma_ring_entry, 362 paddr, 363 desc_list->rx_desc.cookie, 364 rx_desc_pool->owner); 365 366 desc_list = next; 367 } 368 qdf_dsb(); 369 hal_srng_access_end(soc->hal_soc, rxdma_srng); 370 371 /* No need to count the number of bytes received during replenish. 372 * Therefore set replenish.pkts.bytes as 0. 373 */ 374 DP_STATS_INC_PKT(dp_pdev, replenish.pkts, count, 0); 375 DP_STATS_INC(dp_pdev, buf_freelist, (num_alloc_desc - count)); 376 /* 377 * add any available free desc back to the free list 378 */ 379 if (desc_list) 380 dp_rx_add_desc_list_to_free_list(soc, &desc_list, &tail, 381 mac_id, rx_desc_pool); 382 383 return QDF_STATUS_SUCCESS; 384 } 385 386 QDF_STATUS 387 __dp_rx_buffers_no_map_replenish(struct dp_soc *soc, uint32_t mac_id, 388 struct dp_srng *dp_rxdma_srng, 389 struct rx_desc_pool *rx_desc_pool, 390 uint32_t num_req_buffers, 391 union dp_rx_desc_list_elem_t **desc_list, 392 union dp_rx_desc_list_elem_t **tail) 393 { 394 struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id); 395 uint32_t count; 396 void *rxdma_ring_entry; 397 union dp_rx_desc_list_elem_t *next; 398 void *rxdma_srng; 399 qdf_nbuf_t nbuf; 400 qdf_dma_addr_t paddr; 401 402 rxdma_srng = dp_rxdma_srng->hal_srng; 403 404 if (qdf_unlikely(!dp_pdev)) { 405 dp_rx_err("%pK: pdev is null for mac_id = %d", 406 soc, mac_id); 407 return QDF_STATUS_E_FAILURE; 408 } 409 410 if (qdf_unlikely(!rxdma_srng)) { 411 dp_rx_debug("%pK: rxdma srng not initialized", soc); 412 DP_STATS_INC(dp_pdev, replenish.rxdma_err, num_req_buffers); 413 return QDF_STATUS_E_FAILURE; 414 } 415 416 dp_rx_debug("%pK: requested %d buffers for replenish", 417 soc, num_req_buffers); 418 419 hal_srng_access_start(soc->hal_soc, rxdma_srng); 420 421 for (count = 0; count < num_req_buffers; count++) { 422 next = (*desc_list)->next; 423 qdf_prefetch(next); 424 nbuf = dp_rx_nbuf_alloc(soc, rx_desc_pool); 425 if (qdf_unlikely(!nbuf)) { 426 DP_STATS_INC(dp_pdev, replenish.nbuf_alloc_fail, 1); 427 break; 428 } 429 430 paddr = dp_rx_nbuf_sync_no_dsb(soc, nbuf, 431 rx_desc_pool->buf_size); 432 rxdma_ring_entry = (struct dp_buffer_addr_info *) 433 hal_srng_src_get_next(soc->hal_soc, rxdma_srng); 434 if (!rxdma_ring_entry) 435 break; 436 437 qdf_assert_always(rxdma_ring_entry); 438 439 (*desc_list)->rx_desc.nbuf = nbuf; 440 (*desc_list)->rx_desc.rx_buf_start = nbuf->data; 441 (*desc_list)->rx_desc.unmapped = 0; 442 443 /* rx_desc.in_use should be zero at this time*/ 444 qdf_assert_always((*desc_list)->rx_desc.in_use == 0); 445 446 (*desc_list)->rx_desc.in_use = 1; 447 (*desc_list)->rx_desc.in_err_state = 0; 448 449 hal_rxdma_buff_addr_info_set(soc->hal_soc, rxdma_ring_entry, 450 paddr, 451 (*desc_list)->rx_desc.cookie, 452 rx_desc_pool->owner); 453 454 *desc_list = next; 455 } 456 qdf_dsb(); 457 hal_srng_access_end(soc->hal_soc, rxdma_srng); 458 459 /* No need to count the number of bytes received during replenish. 460 * Therefore set replenish.pkts.bytes as 0. 461 */ 462 DP_STATS_INC_PKT(dp_pdev, replenish.pkts, count, 0); 463 DP_STATS_INC(dp_pdev, buf_freelist, (num_req_buffers - count)); 464 /* 465 * add any available free desc back to the free list 466 */ 467 if (*desc_list) 468 dp_rx_add_desc_list_to_free_list(soc, desc_list, tail, 469 mac_id, rx_desc_pool); 470 471 return QDF_STATUS_SUCCESS; 472 } 473 474 QDF_STATUS __dp_pdev_rx_buffers_no_map_attach(struct dp_soc *soc, 475 uint32_t mac_id, 476 struct dp_srng *dp_rxdma_srng, 477 struct rx_desc_pool *rx_desc_pool, 478 uint32_t num_req_buffers) 479 { 480 struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id); 481 uint32_t count; 482 uint32_t nr_descs = 0; 483 void *rxdma_ring_entry; 484 union dp_rx_desc_list_elem_t *next; 485 void *rxdma_srng; 486 qdf_nbuf_t nbuf; 487 qdf_dma_addr_t paddr; 488 union dp_rx_desc_list_elem_t *desc_list = NULL; 489 union dp_rx_desc_list_elem_t *tail = NULL; 490 491 rxdma_srng = dp_rxdma_srng->hal_srng; 492 493 if (qdf_unlikely(!dp_pdev)) { 494 dp_rx_err("%pK: pdev is null for mac_id = %d", 495 soc, mac_id); 496 return QDF_STATUS_E_FAILURE; 497 } 498 499 if (qdf_unlikely(!rxdma_srng)) { 500 dp_rx_debug("%pK: rxdma srng not initialized", soc); 501 DP_STATS_INC(dp_pdev, replenish.rxdma_err, num_req_buffers); 502 return QDF_STATUS_E_FAILURE; 503 } 504 505 dp_rx_debug("%pK: requested %d buffers for replenish", 506 soc, num_req_buffers); 507 508 nr_descs = dp_rx_get_free_desc_list(soc, mac_id, rx_desc_pool, 509 num_req_buffers, &desc_list, &tail); 510 if (!nr_descs) { 511 dp_err("no free rx_descs in freelist"); 512 DP_STATS_INC(dp_pdev, err.desc_alloc_fail, num_req_buffers); 513 return QDF_STATUS_E_NOMEM; 514 } 515 516 dp_debug("got %u RX descs for driver attach", nr_descs); 517 518 hal_srng_access_start(soc->hal_soc, rxdma_srng); 519 520 for (count = 0; count < nr_descs; count++) { 521 next = desc_list->next; 522 qdf_prefetch(next); 523 nbuf = dp_rx_nbuf_alloc(soc, rx_desc_pool); 524 if (qdf_unlikely(!nbuf)) { 525 DP_STATS_INC(dp_pdev, replenish.nbuf_alloc_fail, 1); 526 break; 527 } 528 529 paddr = dp_rx_nbuf_sync_no_dsb(soc, nbuf, 530 rx_desc_pool->buf_size); 531 rxdma_ring_entry = (struct dp_buffer_addr_info *) 532 hal_srng_src_get_next(soc->hal_soc, rxdma_srng); 533 if (!rxdma_ring_entry) 534 break; 535 536 qdf_assert_always(rxdma_ring_entry); 537 538 desc_list->rx_desc.nbuf = nbuf; 539 desc_list->rx_desc.rx_buf_start = nbuf->data; 540 desc_list->rx_desc.unmapped = 0; 541 542 /* rx_desc.in_use should be zero at this time*/ 543 qdf_assert_always(desc_list->rx_desc.in_use == 0); 544 545 desc_list->rx_desc.in_use = 1; 546 desc_list->rx_desc.in_err_state = 0; 547 548 hal_rxdma_buff_addr_info_set(soc->hal_soc, rxdma_ring_entry, 549 paddr, 550 desc_list->rx_desc.cookie, 551 rx_desc_pool->owner); 552 553 desc_list = next; 554 } 555 qdf_dsb(); 556 hal_srng_access_end(soc->hal_soc, rxdma_srng); 557 558 /* No need to count the number of bytes received during replenish. 559 * Therefore set replenish.pkts.bytes as 0. 560 */ 561 DP_STATS_INC_PKT(dp_pdev, replenish.pkts, count, 0); 562 563 return QDF_STATUS_SUCCESS; 564 } 565 #endif 566 567 #ifdef DP_UMAC_HW_RESET_SUPPORT 568 #if defined(QCA_DP_RX_NBUF_NO_MAP_UNMAP) && !defined(BUILD_X86) 569 static inline 570 qdf_dma_addr_t dp_rx_rep_retrieve_paddr(struct dp_soc *dp_soc, qdf_nbuf_t nbuf, 571 uint32_t buf_size) 572 { 573 return dp_rx_nbuf_sync_no_dsb(soc, nbuf, rx_desc_pool->buf_size); 574 } 575 #else 576 static inline 577 qdf_dma_addr_t dp_rx_rep_retrieve_paddr(struct dp_soc *dp_soc, qdf_nbuf_t nbuf, 578 uint32_t buf_size) 579 { 580 return qdf_nbuf_get_frag_paddr(nbuf, 0); 581 } 582 #endif 583 584 /* 585 * dp_rx_desc_replenish() - Replenish the rx descriptors one at a time 586 * 587 * @soc: core txrx main context 588 * @dp_rxdma_srng: rxdma ring 589 * @rx_desc_pool: rx descriptor pool 590 * @rx_desc:rx descriptor 591 * 592 * Return: void 593 */ 594 static inline 595 void dp_rx_desc_replenish(struct dp_soc *soc, struct dp_srng *dp_rxdma_srng, 596 struct rx_desc_pool *rx_desc_pool, 597 struct dp_rx_desc *rx_desc) 598 { 599 void *rxdma_srng; 600 void *rxdma_ring_entry; 601 qdf_dma_addr_t paddr; 602 603 rxdma_srng = dp_rxdma_srng->hal_srng; 604 605 /* No one else should be accessing the srng at this point */ 606 hal_srng_access_start_unlocked(soc->hal_soc, rxdma_srng); 607 608 rxdma_ring_entry = hal_srng_src_get_next(soc->hal_soc, rxdma_srng); 609 610 qdf_assert_always(rxdma_ring_entry); 611 rx_desc->in_err_state = 0; 612 613 paddr = dp_rx_rep_retrieve_paddr(soc, rx_desc->nbuf, 614 rx_desc_pool->buf_size); 615 hal_rxdma_buff_addr_info_set(soc->hal_soc, rxdma_ring_entry, paddr, 616 rx_desc->cookie, rx_desc_pool->owner); 617 618 hal_srng_access_end_unlocked(soc->hal_soc, rxdma_srng); 619 } 620 621 /* 622 * dp_rx_desc_reuse() - Reuse the rx descriptors to fill the rx buf ring 623 * 624 * @soc: core txrx main context 625 * @nbuf_list: nbuf list for delayed free 626 * 627 * Return: void 628 */ 629 void dp_rx_desc_reuse(struct dp_soc *soc, qdf_nbuf_t *nbuf_list) 630 { 631 int mac_id, i, j; 632 union dp_rx_desc_list_elem_t *head = NULL; 633 union dp_rx_desc_list_elem_t *tail = NULL; 634 635 for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) { 636 struct dp_srng *dp_rxdma_srng = 637 &soc->rx_refill_buf_ring[mac_id]; 638 struct rx_desc_pool *rx_desc_pool = &soc->rx_desc_buf[mac_id]; 639 uint32_t rx_sw_desc_num = rx_desc_pool->pool_size; 640 /* Only fill up 1/3 of the ring size */ 641 uint32_t num_req_decs; 642 643 if (!dp_rxdma_srng || !dp_rxdma_srng->hal_srng || 644 !rx_desc_pool->array) 645 continue; 646 647 num_req_decs = dp_rxdma_srng->num_entries / 3; 648 649 for (i = 0, j = 0; i < rx_sw_desc_num; i++) { 650 struct dp_rx_desc *rx_desc = 651 (struct dp_rx_desc *)&rx_desc_pool->array[i]; 652 653 if (rx_desc->in_use) { 654 if (j < dp_rxdma_srng->num_entries) { 655 dp_rx_desc_replenish(soc, dp_rxdma_srng, 656 rx_desc_pool, 657 rx_desc); 658 } else { 659 dp_rx_nbuf_unmap(soc, rx_desc, 0); 660 rx_desc->unmapped = 0; 661 662 rx_desc->nbuf->next = *nbuf_list; 663 *nbuf_list = rx_desc->nbuf; 664 665 dp_rx_add_to_free_desc_list(&head, 666 &tail, 667 rx_desc); 668 } 669 j++; 670 } 671 } 672 673 if (head) 674 dp_rx_add_desc_list_to_free_list(soc, &head, &tail, 675 mac_id, rx_desc_pool); 676 677 /* If num of descs in use were less, then we need to replenish 678 * the ring with some buffers 679 */ 680 head = NULL; 681 tail = NULL; 682 683 if (j < (num_req_decs - 1)) 684 dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng, 685 rx_desc_pool, 686 ((num_req_decs - 1) - j), 687 &head, &tail, true); 688 } 689 } 690 #endif 691 692 /* 693 * dp_rx_buffers_replenish() - replenish rxdma ring with rx nbufs 694 * called during dp rx initialization 695 * and at the end of dp_rx_process. 696 * 697 * @soc: core txrx main context 698 * @mac_id: mac_id which is one of 3 mac_ids 699 * @dp_rxdma_srng: dp rxdma circular ring 700 * @rx_desc_pool: Pointer to free Rx descriptor pool 701 * @num_req_buffers: number of buffer to be replenished 702 * @desc_list: list of descs if called from dp_rx_process 703 * or NULL during dp rx initialization or out of buffer 704 * interrupt. 705 * @tail: tail of descs list 706 * @req_only: If true don't replenish more than req buffers 707 * @func_name: name of the caller function 708 * Return: return success or failure 709 */ 710 QDF_STATUS __dp_rx_buffers_replenish(struct dp_soc *dp_soc, uint32_t mac_id, 711 struct dp_srng *dp_rxdma_srng, 712 struct rx_desc_pool *rx_desc_pool, 713 uint32_t num_req_buffers, 714 union dp_rx_desc_list_elem_t **desc_list, 715 union dp_rx_desc_list_elem_t **tail, 716 bool req_only, const char *func_name) 717 { 718 uint32_t num_alloc_desc; 719 uint16_t num_desc_to_free = 0; 720 struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(dp_soc, mac_id); 721 uint32_t num_entries_avail; 722 uint32_t count; 723 int sync_hw_ptr = 1; 724 struct dp_rx_nbuf_frag_info nbuf_frag_info = {0}; 725 void *rxdma_ring_entry; 726 union dp_rx_desc_list_elem_t *next; 727 QDF_STATUS ret; 728 void *rxdma_srng; 729 union dp_rx_desc_list_elem_t *desc_list_append = NULL; 730 union dp_rx_desc_list_elem_t *tail_append = NULL; 731 union dp_rx_desc_list_elem_t *temp_list = NULL; 732 733 rxdma_srng = dp_rxdma_srng->hal_srng; 734 735 if (qdf_unlikely(!dp_pdev)) { 736 dp_rx_err("%pK: pdev is null for mac_id = %d", 737 dp_soc, mac_id); 738 return QDF_STATUS_E_FAILURE; 739 } 740 741 if (qdf_unlikely(!rxdma_srng)) { 742 dp_rx_debug("%pK: rxdma srng not initialized", dp_soc); 743 DP_STATS_INC(dp_pdev, replenish.rxdma_err, num_req_buffers); 744 return QDF_STATUS_E_FAILURE; 745 } 746 747 dp_rx_debug("%pK: requested %d buffers for replenish", 748 dp_soc, num_req_buffers); 749 750 hal_srng_access_start(dp_soc->hal_soc, rxdma_srng); 751 752 num_entries_avail = hal_srng_src_num_avail(dp_soc->hal_soc, 753 rxdma_srng, 754 sync_hw_ptr); 755 756 dp_rx_debug("%pK: no of available entries in rxdma ring: %d", 757 dp_soc, num_entries_avail); 758 759 if (!req_only && !(*desc_list) && (num_entries_avail > 760 ((dp_rxdma_srng->num_entries * 3) / 4))) { 761 num_req_buffers = num_entries_avail; 762 } else if (num_entries_avail < num_req_buffers) { 763 num_desc_to_free = num_req_buffers - num_entries_avail; 764 num_req_buffers = num_entries_avail; 765 } else if ((*desc_list) && 766 dp_rxdma_srng->num_entries - num_entries_avail < 767 CRITICAL_BUFFER_THRESHOLD) { 768 /* Append some free descriptors to tail */ 769 num_alloc_desc = 770 dp_rx_get_free_desc_list(dp_soc, mac_id, 771 rx_desc_pool, 772 CRITICAL_BUFFER_THRESHOLD, 773 &desc_list_append, 774 &tail_append); 775 776 if (num_alloc_desc) { 777 temp_list = *desc_list; 778 *desc_list = desc_list_append; 779 tail_append->next = temp_list; 780 num_req_buffers += num_alloc_desc; 781 782 DP_STATS_DEC(dp_pdev, 783 replenish.free_list, 784 num_alloc_desc); 785 } else 786 dp_err_rl("%pK: no free rx_descs in freelist", dp_soc); 787 } 788 789 if (qdf_unlikely(!num_req_buffers)) { 790 num_desc_to_free = num_req_buffers; 791 hal_srng_access_end(dp_soc->hal_soc, rxdma_srng); 792 goto free_descs; 793 } 794 795 /* 796 * if desc_list is NULL, allocate the descs from freelist 797 */ 798 if (!(*desc_list)) { 799 num_alloc_desc = dp_rx_get_free_desc_list(dp_soc, mac_id, 800 rx_desc_pool, 801 num_req_buffers, 802 desc_list, 803 tail); 804 805 if (!num_alloc_desc) { 806 dp_rx_err("%pK: no free rx_descs in freelist", dp_soc); 807 DP_STATS_INC(dp_pdev, err.desc_alloc_fail, 808 num_req_buffers); 809 hal_srng_access_end(dp_soc->hal_soc, rxdma_srng); 810 return QDF_STATUS_E_NOMEM; 811 } 812 813 dp_rx_debug("%pK: %d rx desc allocated", dp_soc, num_alloc_desc); 814 num_req_buffers = num_alloc_desc; 815 } 816 817 818 count = 0; 819 820 while (count < num_req_buffers) { 821 /* Flag is set while pdev rx_desc_pool initialization */ 822 if (qdf_unlikely(rx_desc_pool->rx_mon_dest_frag_enable)) 823 ret = dp_pdev_frag_alloc_and_map(dp_soc, 824 &nbuf_frag_info, 825 dp_pdev, 826 rx_desc_pool); 827 else 828 ret = dp_pdev_nbuf_alloc_and_map_replenish(dp_soc, 829 mac_id, 830 num_entries_avail, &nbuf_frag_info, 831 dp_pdev, rx_desc_pool); 832 833 if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) { 834 if (qdf_unlikely(ret == QDF_STATUS_E_FAULT)) 835 continue; 836 break; 837 } 838 839 count++; 840 841 rxdma_ring_entry = hal_srng_src_get_next(dp_soc->hal_soc, 842 rxdma_srng); 843 qdf_assert_always(rxdma_ring_entry); 844 845 next = (*desc_list)->next; 846 847 /* Flag is set while pdev rx_desc_pool initialization */ 848 if (qdf_unlikely(rx_desc_pool->rx_mon_dest_frag_enable)) 849 dp_rx_desc_frag_prep(&((*desc_list)->rx_desc), 850 &nbuf_frag_info); 851 else 852 dp_rx_desc_prep(&((*desc_list)->rx_desc), 853 &nbuf_frag_info); 854 855 /* rx_desc.in_use should be zero at this time*/ 856 qdf_assert_always((*desc_list)->rx_desc.in_use == 0); 857 858 (*desc_list)->rx_desc.in_use = 1; 859 (*desc_list)->rx_desc.in_err_state = 0; 860 dp_rx_desc_update_dbg_info(&(*desc_list)->rx_desc, 861 func_name, RX_DESC_REPLENISHED); 862 dp_verbose_debug("rx_netbuf=%pK, paddr=0x%llx, cookie=%d", 863 nbuf_frag_info.virt_addr.nbuf, 864 (unsigned long long)(nbuf_frag_info.paddr), 865 (*desc_list)->rx_desc.cookie); 866 867 hal_rxdma_buff_addr_info_set(dp_soc->hal_soc, rxdma_ring_entry, 868 nbuf_frag_info.paddr, 869 (*desc_list)->rx_desc.cookie, 870 rx_desc_pool->owner); 871 872 *desc_list = next; 873 874 } 875 876 dp_rx_refill_ring_record_entry(dp_soc, dp_pdev->lmac_id, rxdma_srng, 877 num_req_buffers, count); 878 879 hal_srng_access_end(dp_soc->hal_soc, rxdma_srng); 880 881 dp_rx_schedule_refill_thread(dp_soc); 882 883 dp_verbose_debug("replenished buffers %d, rx desc added back to free list %u", 884 count, num_desc_to_free); 885 886 /* No need to count the number of bytes received during replenish. 887 * Therefore set replenish.pkts.bytes as 0. 888 */ 889 DP_STATS_INC_PKT(dp_pdev, replenish.pkts, count, 0); 890 DP_STATS_INC(dp_pdev, replenish.free_list, num_req_buffers - count); 891 892 free_descs: 893 DP_STATS_INC(dp_pdev, buf_freelist, num_desc_to_free); 894 /* 895 * add any available free desc back to the free list 896 */ 897 if (*desc_list) 898 dp_rx_add_desc_list_to_free_list(dp_soc, desc_list, tail, 899 mac_id, rx_desc_pool); 900 901 return QDF_STATUS_SUCCESS; 902 } 903 904 qdf_export_symbol(__dp_rx_buffers_replenish); 905 906 /* 907 * dp_rx_deliver_raw() - process RAW mode pkts and hand over the 908 * pkts to RAW mode simulation to 909 * decapsulate the pkt. 910 * 911 * @vdev: vdev on which RAW mode is enabled 912 * @nbuf_list: list of RAW pkts to process 913 * @txrx_peer: peer object from which the pkt is rx 914 * 915 * Return: void 916 */ 917 void 918 dp_rx_deliver_raw(struct dp_vdev *vdev, qdf_nbuf_t nbuf_list, 919 struct dp_txrx_peer *txrx_peer) 920 { 921 qdf_nbuf_t deliver_list_head = NULL; 922 qdf_nbuf_t deliver_list_tail = NULL; 923 qdf_nbuf_t nbuf; 924 925 nbuf = nbuf_list; 926 while (nbuf) { 927 qdf_nbuf_t next = qdf_nbuf_next(nbuf); 928 929 DP_RX_LIST_APPEND(deliver_list_head, deliver_list_tail, nbuf); 930 931 DP_STATS_INC(vdev->pdev, rx_raw_pkts, 1); 932 DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, rx.raw, 1, 933 qdf_nbuf_len(nbuf)); 934 /* 935 * reset the chfrag_start and chfrag_end bits in nbuf cb 936 * as this is a non-amsdu pkt and RAW mode simulation expects 937 * these bit s to be 0 for non-amsdu pkt. 938 */ 939 if (qdf_nbuf_is_rx_chfrag_start(nbuf) && 940 qdf_nbuf_is_rx_chfrag_end(nbuf)) { 941 qdf_nbuf_set_rx_chfrag_start(nbuf, 0); 942 qdf_nbuf_set_rx_chfrag_end(nbuf, 0); 943 } 944 945 nbuf = next; 946 } 947 948 vdev->osif_rsim_rx_decap(vdev->osif_vdev, &deliver_list_head, 949 &deliver_list_tail); 950 951 vdev->osif_rx(vdev->osif_vdev, deliver_list_head); 952 } 953 954 #ifndef QCA_HOST_MODE_WIFI_DISABLED 955 #ifndef FEATURE_WDS 956 void dp_rx_da_learn(struct dp_soc *soc, uint8_t *rx_tlv_hdr, 957 struct dp_txrx_peer *ta_peer, qdf_nbuf_t nbuf) 958 { 959 } 960 #endif 961 962 #ifdef QCA_SUPPORT_TX_MIN_RATES_FOR_SPECIAL_FRAMES 963 /* 964 * dp_classify_critical_pkts() - API for marking critical packets 965 * @soc: dp_soc context 966 * @vdev: vdev on which packet is to be sent 967 * @nbuf: nbuf that has to be classified 968 * 969 * The function parses the packet, identifies whether its a critical frame and 970 * marks QDF_NBUF_CB_TX_EXTRA_IS_CRITICAL bit in qdf_nbuf_cb for the nbuf. 971 * Code for marking which frames are CRITICAL is accessed via callback. 972 * EAPOL, ARP, DHCP, DHCPv6, ICMPv6 NS/NA are the typical critical frames. 973 * 974 * Return: None 975 */ 976 static 977 void dp_classify_critical_pkts(struct dp_soc *soc, struct dp_vdev *vdev, 978 qdf_nbuf_t nbuf) 979 { 980 if (vdev->tx_classify_critical_pkt_cb) 981 vdev->tx_classify_critical_pkt_cb(vdev->osif_vdev, nbuf); 982 } 983 #else 984 static inline 985 void dp_classify_critical_pkts(struct dp_soc *soc, struct dp_vdev *vdev, 986 qdf_nbuf_t nbuf) 987 { 988 } 989 #endif 990 991 #ifdef QCA_OL_TX_MULTIQ_SUPPORT 992 static inline 993 void dp_rx_nbuf_queue_mapping_set(qdf_nbuf_t nbuf, uint8_t ring_id) 994 { 995 qdf_nbuf_set_queue_mapping(nbuf, ring_id); 996 } 997 #else 998 static inline 999 void dp_rx_nbuf_queue_mapping_set(qdf_nbuf_t nbuf, uint8_t ring_id) 1000 { 1001 } 1002 #endif 1003 1004 /* 1005 * dp_rx_intrabss_mcbc_fwd() - Does intrabss forward for mcast packets 1006 * 1007 * @soc: core txrx main context 1008 * @ta_peer : source peer entry 1009 * @rx_tlv_hdr : start address of rx tlvs 1010 * @nbuf : nbuf that has to be intrabss forwarded 1011 * @tid_stats : tid stats pointer 1012 * 1013 * Return: bool: true if it is forwarded else false 1014 */ 1015 bool dp_rx_intrabss_mcbc_fwd(struct dp_soc *soc, struct dp_txrx_peer *ta_peer, 1016 uint8_t *rx_tlv_hdr, qdf_nbuf_t nbuf, 1017 struct cdp_tid_rx_stats *tid_stats) 1018 { 1019 uint16_t len; 1020 qdf_nbuf_t nbuf_copy; 1021 1022 if (dp_rx_intrabss_eapol_drop_check(soc, ta_peer, rx_tlv_hdr, 1023 nbuf)) 1024 return true; 1025 1026 if (!dp_rx_check_ndi_mdns_fwding(ta_peer, nbuf)) 1027 return false; 1028 1029 /* If the source peer in the isolation list 1030 * then dont forward instead push to bridge stack 1031 */ 1032 if (dp_get_peer_isolation(ta_peer)) 1033 return false; 1034 1035 nbuf_copy = qdf_nbuf_copy(nbuf); 1036 if (!nbuf_copy) 1037 return false; 1038 1039 len = QDF_NBUF_CB_RX_PKT_LEN(nbuf); 1040 1041 qdf_mem_set(nbuf_copy->cb, 0x0, sizeof(nbuf_copy->cb)); 1042 dp_classify_critical_pkts(soc, ta_peer->vdev, nbuf_copy); 1043 1044 if (soc->arch_ops.dp_rx_intrabss_handle_nawds(soc, ta_peer, nbuf_copy, 1045 tid_stats)) 1046 return false; 1047 1048 if (dp_tx_send((struct cdp_soc_t *)soc, 1049 ta_peer->vdev->vdev_id, nbuf_copy)) { 1050 DP_PEER_PER_PKT_STATS_INC_PKT(ta_peer, rx.intra_bss.fail, 1, 1051 len); 1052 tid_stats->fail_cnt[INTRABSS_DROP]++; 1053 dp_rx_nbuf_free(nbuf_copy); 1054 } else { 1055 DP_PEER_PER_PKT_STATS_INC_PKT(ta_peer, rx.intra_bss.pkts, 1, 1056 len); 1057 tid_stats->intrabss_cnt++; 1058 } 1059 return false; 1060 } 1061 1062 /* 1063 * dp_rx_intrabss_ucast_fwd() - Does intrabss forward for unicast packets 1064 * 1065 * @soc: core txrx main context 1066 * @ta_peer: source peer entry 1067 * @tx_vdev_id: VDEV ID for Intra-BSS TX 1068 * @rx_tlv_hdr: start address of rx tlvs 1069 * @nbuf: nbuf that has to be intrabss forwarded 1070 * @tid_stats: tid stats pointer 1071 * 1072 * Return: bool: true if it is forwarded else false 1073 */ 1074 bool dp_rx_intrabss_ucast_fwd(struct dp_soc *soc, struct dp_txrx_peer *ta_peer, 1075 uint8_t tx_vdev_id, 1076 uint8_t *rx_tlv_hdr, qdf_nbuf_t nbuf, 1077 struct cdp_tid_rx_stats *tid_stats) 1078 { 1079 uint16_t len; 1080 1081 len = QDF_NBUF_CB_RX_PKT_LEN(nbuf); 1082 1083 /* linearize the nbuf just before we send to 1084 * dp_tx_send() 1085 */ 1086 if (qdf_unlikely(qdf_nbuf_is_frag(nbuf))) { 1087 if (qdf_nbuf_linearize(nbuf) == -ENOMEM) 1088 return false; 1089 1090 nbuf = qdf_nbuf_unshare(nbuf); 1091 if (!nbuf) { 1092 DP_PEER_PER_PKT_STATS_INC_PKT(ta_peer, 1093 rx.intra_bss.fail, 1094 1, len); 1095 /* return true even though the pkt is 1096 * not forwarded. Basically skb_unshare 1097 * failed and we want to continue with 1098 * next nbuf. 1099 */ 1100 tid_stats->fail_cnt[INTRABSS_DROP]++; 1101 return false; 1102 } 1103 } 1104 1105 qdf_mem_set(nbuf->cb, 0x0, sizeof(nbuf->cb)); 1106 dp_classify_critical_pkts(soc, ta_peer->vdev, nbuf); 1107 1108 if (!dp_tx_send((struct cdp_soc_t *)soc, 1109 tx_vdev_id, nbuf)) { 1110 DP_PEER_PER_PKT_STATS_INC_PKT(ta_peer, rx.intra_bss.pkts, 1, 1111 len); 1112 } else { 1113 DP_PEER_PER_PKT_STATS_INC_PKT(ta_peer, rx.intra_bss.fail, 1, 1114 len); 1115 tid_stats->fail_cnt[INTRABSS_DROP]++; 1116 return false; 1117 } 1118 1119 return true; 1120 } 1121 1122 #endif /* QCA_HOST_MODE_WIFI_DISABLED */ 1123 1124 #ifdef MESH_MODE_SUPPORT 1125 1126 /** 1127 * dp_rx_fill_mesh_stats() - Fills the mesh per packet receive stats 1128 * 1129 * @vdev: DP Virtual device handle 1130 * @nbuf: Buffer pointer 1131 * @rx_tlv_hdr: start of rx tlv header 1132 * @txrx_peer: pointer to peer 1133 * 1134 * This function allocated memory for mesh receive stats and fill the 1135 * required stats. Stores the memory address in skb cb. 1136 * 1137 * Return: void 1138 */ 1139 1140 void dp_rx_fill_mesh_stats(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 1141 uint8_t *rx_tlv_hdr, 1142 struct dp_txrx_peer *txrx_peer) 1143 { 1144 struct mesh_recv_hdr_s *rx_info = NULL; 1145 uint32_t pkt_type; 1146 uint32_t nss; 1147 uint32_t rate_mcs; 1148 uint32_t bw; 1149 uint8_t primary_chan_num; 1150 uint32_t center_chan_freq; 1151 struct dp_soc *soc = vdev->pdev->soc; 1152 struct dp_peer *peer; 1153 struct dp_peer *primary_link_peer; 1154 struct dp_soc *link_peer_soc; 1155 cdp_peer_stats_param_t buf = {0}; 1156 1157 /* fill recv mesh stats */ 1158 rx_info = qdf_mem_malloc(sizeof(struct mesh_recv_hdr_s)); 1159 1160 /* upper layers are resposible to free this memory */ 1161 1162 if (!rx_info) { 1163 dp_rx_err("%pK: Memory allocation failed for mesh rx stats", 1164 vdev->pdev->soc); 1165 DP_STATS_INC(vdev->pdev, mesh_mem_alloc, 1); 1166 return; 1167 } 1168 1169 rx_info->rs_flags = MESH_RXHDR_VER1; 1170 if (qdf_nbuf_is_rx_chfrag_start(nbuf)) 1171 rx_info->rs_flags |= MESH_RX_FIRST_MSDU; 1172 1173 if (qdf_nbuf_is_rx_chfrag_end(nbuf)) 1174 rx_info->rs_flags |= MESH_RX_LAST_MSDU; 1175 1176 peer = dp_peer_get_ref_by_id(soc, txrx_peer->peer_id, DP_MOD_ID_MESH); 1177 if (peer) { 1178 if (hal_rx_tlv_get_is_decrypted(soc->hal_soc, rx_tlv_hdr)) { 1179 rx_info->rs_flags |= MESH_RX_DECRYPTED; 1180 rx_info->rs_keyix = hal_rx_msdu_get_keyid(soc->hal_soc, 1181 rx_tlv_hdr); 1182 if (vdev->osif_get_key) 1183 vdev->osif_get_key(vdev->osif_vdev, 1184 &rx_info->rs_decryptkey[0], 1185 &peer->mac_addr.raw[0], 1186 rx_info->rs_keyix); 1187 } 1188 1189 dp_peer_unref_delete(peer, DP_MOD_ID_MESH); 1190 } 1191 1192 primary_link_peer = dp_get_primary_link_peer_by_id(soc, 1193 txrx_peer->peer_id, 1194 DP_MOD_ID_MESH); 1195 1196 if (qdf_likely(primary_link_peer)) { 1197 link_peer_soc = primary_link_peer->vdev->pdev->soc; 1198 dp_monitor_peer_get_stats_param(link_peer_soc, 1199 primary_link_peer, 1200 cdp_peer_rx_snr, &buf); 1201 rx_info->rs_snr = buf.rx_snr; 1202 dp_peer_unref_delete(primary_link_peer, DP_MOD_ID_MESH); 1203 } 1204 1205 rx_info->rs_rssi = rx_info->rs_snr + DP_DEFAULT_NOISEFLOOR; 1206 1207 soc = vdev->pdev->soc; 1208 primary_chan_num = hal_rx_tlv_get_freq(soc->hal_soc, rx_tlv_hdr); 1209 center_chan_freq = hal_rx_tlv_get_freq(soc->hal_soc, rx_tlv_hdr) >> 16; 1210 1211 if (soc->cdp_soc.ol_ops && soc->cdp_soc.ol_ops->freq_to_band) { 1212 rx_info->rs_band = soc->cdp_soc.ol_ops->freq_to_band( 1213 soc->ctrl_psoc, 1214 vdev->pdev->pdev_id, 1215 center_chan_freq); 1216 } 1217 rx_info->rs_channel = primary_chan_num; 1218 pkt_type = hal_rx_tlv_get_pkt_type(soc->hal_soc, rx_tlv_hdr); 1219 rate_mcs = hal_rx_tlv_rate_mcs_get(soc->hal_soc, rx_tlv_hdr); 1220 bw = hal_rx_tlv_bw_get(soc->hal_soc, rx_tlv_hdr); 1221 nss = hal_rx_msdu_start_nss_get(soc->hal_soc, rx_tlv_hdr); 1222 rx_info->rs_ratephy1 = rate_mcs | (nss << 0x8) | (pkt_type << 16) | 1223 (bw << 24); 1224 1225 qdf_nbuf_set_rx_fctx_type(nbuf, (void *)rx_info, CB_FTYPE_MESH_RX_INFO); 1226 1227 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_MED, 1228 FL("Mesh rx stats: flags %x, rssi %x, chn %x, rate %x, kix %x, snr %x"), 1229 rx_info->rs_flags, 1230 rx_info->rs_rssi, 1231 rx_info->rs_channel, 1232 rx_info->rs_ratephy1, 1233 rx_info->rs_keyix, 1234 rx_info->rs_snr); 1235 1236 } 1237 1238 /** 1239 * dp_rx_filter_mesh_packets() - Filters mesh unwanted packets 1240 * 1241 * @vdev: DP Virtual device handle 1242 * @nbuf: Buffer pointer 1243 * @rx_tlv_hdr: start of rx tlv header 1244 * 1245 * This checks if the received packet is matching any filter out 1246 * catogery and and drop the packet if it matches. 1247 * 1248 * Return: status(0 indicates drop, 1 indicate to no drop) 1249 */ 1250 1251 QDF_STATUS dp_rx_filter_mesh_packets(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 1252 uint8_t *rx_tlv_hdr) 1253 { 1254 union dp_align_mac_addr mac_addr; 1255 struct dp_soc *soc = vdev->pdev->soc; 1256 1257 if (qdf_unlikely(vdev->mesh_rx_filter)) { 1258 if (vdev->mesh_rx_filter & MESH_FILTER_OUT_FROMDS) 1259 if (hal_rx_mpdu_get_fr_ds(soc->hal_soc, 1260 rx_tlv_hdr)) 1261 return QDF_STATUS_SUCCESS; 1262 1263 if (vdev->mesh_rx_filter & MESH_FILTER_OUT_TODS) 1264 if (hal_rx_mpdu_get_to_ds(soc->hal_soc, 1265 rx_tlv_hdr)) 1266 return QDF_STATUS_SUCCESS; 1267 1268 if (vdev->mesh_rx_filter & MESH_FILTER_OUT_NODS) 1269 if (!hal_rx_mpdu_get_fr_ds(soc->hal_soc, 1270 rx_tlv_hdr) && 1271 !hal_rx_mpdu_get_to_ds(soc->hal_soc, 1272 rx_tlv_hdr)) 1273 return QDF_STATUS_SUCCESS; 1274 1275 if (vdev->mesh_rx_filter & MESH_FILTER_OUT_RA) { 1276 if (hal_rx_mpdu_get_addr1(soc->hal_soc, 1277 rx_tlv_hdr, 1278 &mac_addr.raw[0])) 1279 return QDF_STATUS_E_FAILURE; 1280 1281 if (!qdf_mem_cmp(&mac_addr.raw[0], 1282 &vdev->mac_addr.raw[0], 1283 QDF_MAC_ADDR_SIZE)) 1284 return QDF_STATUS_SUCCESS; 1285 } 1286 1287 if (vdev->mesh_rx_filter & MESH_FILTER_OUT_TA) { 1288 if (hal_rx_mpdu_get_addr2(soc->hal_soc, 1289 rx_tlv_hdr, 1290 &mac_addr.raw[0])) 1291 return QDF_STATUS_E_FAILURE; 1292 1293 if (!qdf_mem_cmp(&mac_addr.raw[0], 1294 &vdev->mac_addr.raw[0], 1295 QDF_MAC_ADDR_SIZE)) 1296 return QDF_STATUS_SUCCESS; 1297 } 1298 } 1299 1300 return QDF_STATUS_E_FAILURE; 1301 } 1302 1303 #else 1304 void dp_rx_fill_mesh_stats(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 1305 uint8_t *rx_tlv_hdr, struct dp_txrx_peer *peer) 1306 { 1307 } 1308 1309 QDF_STATUS dp_rx_filter_mesh_packets(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 1310 uint8_t *rx_tlv_hdr) 1311 { 1312 return QDF_STATUS_E_FAILURE; 1313 } 1314 1315 #endif 1316 1317 #ifdef FEATURE_NAC_RSSI 1318 /** 1319 * dp_rx_process_invalid_peer(): Function to pass invalid peer list to umac 1320 * @soc: DP SOC handle 1321 * @mpdu: mpdu for which peer is invalid 1322 * @mac_id: mac_id which is one of 3 mac_ids(Assuming mac_id and 1323 * pool_id has same mapping) 1324 * 1325 * return: integer type 1326 */ 1327 uint8_t dp_rx_process_invalid_peer(struct dp_soc *soc, qdf_nbuf_t mpdu, 1328 uint8_t mac_id) 1329 { 1330 struct dp_invalid_peer_msg msg; 1331 struct dp_vdev *vdev = NULL; 1332 struct dp_pdev *pdev = NULL; 1333 struct ieee80211_frame *wh; 1334 qdf_nbuf_t curr_nbuf, next_nbuf; 1335 uint8_t *rx_tlv_hdr = qdf_nbuf_data(mpdu); 1336 uint8_t *rx_pkt_hdr = NULL; 1337 int i = 0; 1338 1339 if (!HAL_IS_DECAP_FORMAT_RAW(soc->hal_soc, rx_tlv_hdr)) { 1340 dp_rx_debug("%pK: Drop decapped frames", soc); 1341 goto free; 1342 } 1343 1344 /* In RAW packet, packet header will be part of data */ 1345 rx_pkt_hdr = rx_tlv_hdr + soc->rx_pkt_tlv_size; 1346 wh = (struct ieee80211_frame *)rx_pkt_hdr; 1347 1348 if (!DP_FRAME_IS_DATA(wh)) { 1349 dp_rx_debug("%pK: NAWDS valid only for data frames", soc); 1350 goto free; 1351 } 1352 1353 if (qdf_nbuf_len(mpdu) < sizeof(struct ieee80211_frame)) { 1354 dp_rx_err("%pK: Invalid nbuf length", soc); 1355 goto free; 1356 } 1357 1358 /* In DMAC case the rx_desc_pools are common across PDEVs 1359 * so PDEV cannot be derived from the pool_id. 1360 * 1361 * link_id need to derived from the TLV tag word which is 1362 * disabled by default. For now adding a WAR to get vdev 1363 * with brute force this need to fixed with word based subscription 1364 * support is added by enabling TLV tag word 1365 */ 1366 if (soc->features.dmac_cmn_src_rxbuf_ring_enabled) { 1367 for (i = 0; i < MAX_PDEV_CNT; i++) { 1368 pdev = soc->pdev_list[i]; 1369 1370 if (!pdev || qdf_unlikely(pdev->is_pdev_down)) 1371 continue; 1372 1373 TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) { 1374 if (qdf_mem_cmp(wh->i_addr1, vdev->mac_addr.raw, 1375 QDF_MAC_ADDR_SIZE) == 0) { 1376 goto out; 1377 } 1378 } 1379 } 1380 } else { 1381 pdev = dp_get_pdev_for_lmac_id(soc, mac_id); 1382 1383 if (!pdev || qdf_unlikely(pdev->is_pdev_down)) { 1384 dp_rx_err("%pK: PDEV %s", 1385 soc, !pdev ? "not found" : "down"); 1386 goto free; 1387 } 1388 1389 if (dp_monitor_filter_neighbour_peer(pdev, rx_pkt_hdr) == 1390 QDF_STATUS_SUCCESS) 1391 return 0; 1392 1393 TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) { 1394 if (qdf_mem_cmp(wh->i_addr1, vdev->mac_addr.raw, 1395 QDF_MAC_ADDR_SIZE) == 0) { 1396 goto out; 1397 } 1398 } 1399 } 1400 1401 if (!vdev) { 1402 dp_rx_err("%pK: VDEV not found", soc); 1403 goto free; 1404 } 1405 out: 1406 msg.wh = wh; 1407 qdf_nbuf_pull_head(mpdu, soc->rx_pkt_tlv_size); 1408 msg.nbuf = mpdu; 1409 msg.vdev_id = vdev->vdev_id; 1410 1411 /* 1412 * NOTE: Only valid for HKv1. 1413 * If smart monitor mode is enabled on RE, we are getting invalid 1414 * peer frames with RA as STA mac of RE and the TA not matching 1415 * with any NAC list or the the BSSID.Such frames need to dropped 1416 * in order to avoid HM_WDS false addition. 1417 */ 1418 if (pdev->soc->cdp_soc.ol_ops->rx_invalid_peer) { 1419 if (dp_monitor_drop_inv_peer_pkts(vdev) == QDF_STATUS_SUCCESS) { 1420 dp_rx_warn("%pK: Drop inv peer pkts with STA RA:%pm", 1421 soc, wh->i_addr1); 1422 goto free; 1423 } 1424 pdev->soc->cdp_soc.ol_ops->rx_invalid_peer( 1425 (struct cdp_ctrl_objmgr_psoc *)soc->ctrl_psoc, 1426 pdev->pdev_id, &msg); 1427 } 1428 1429 free: 1430 /* Drop and free packet */ 1431 curr_nbuf = mpdu; 1432 while (curr_nbuf) { 1433 next_nbuf = qdf_nbuf_next(curr_nbuf); 1434 dp_rx_nbuf_free(curr_nbuf); 1435 curr_nbuf = next_nbuf; 1436 } 1437 1438 return 0; 1439 } 1440 1441 /** 1442 * dp_rx_process_invalid_peer_wrapper(): Function to wrap invalid peer handler 1443 * @soc: DP SOC handle 1444 * @mpdu: mpdu for which peer is invalid 1445 * @mpdu_done: if an mpdu is completed 1446 * @mac_id: mac_id which is one of 3 mac_ids(Assuming mac_id and 1447 * pool_id has same mapping) 1448 * 1449 * return: integer type 1450 */ 1451 void dp_rx_process_invalid_peer_wrapper(struct dp_soc *soc, 1452 qdf_nbuf_t mpdu, bool mpdu_done, 1453 uint8_t mac_id) 1454 { 1455 /* Only trigger the process when mpdu is completed */ 1456 if (mpdu_done) 1457 dp_rx_process_invalid_peer(soc, mpdu, mac_id); 1458 } 1459 #else 1460 uint8_t dp_rx_process_invalid_peer(struct dp_soc *soc, qdf_nbuf_t mpdu, 1461 uint8_t mac_id) 1462 { 1463 qdf_nbuf_t curr_nbuf, next_nbuf; 1464 struct dp_pdev *pdev; 1465 struct dp_vdev *vdev = NULL; 1466 struct ieee80211_frame *wh; 1467 uint8_t *rx_tlv_hdr = qdf_nbuf_data(mpdu); 1468 uint8_t *rx_pkt_hdr = hal_rx_pkt_hdr_get(soc->hal_soc, rx_tlv_hdr); 1469 1470 wh = (struct ieee80211_frame *)rx_pkt_hdr; 1471 1472 if (!DP_FRAME_IS_DATA(wh)) { 1473 QDF_TRACE_ERROR_RL(QDF_MODULE_ID_DP, 1474 "only for data frames"); 1475 goto free; 1476 } 1477 1478 if (qdf_nbuf_len(mpdu) < sizeof(struct ieee80211_frame)) { 1479 dp_rx_info_rl("%pK: Invalid nbuf length", soc); 1480 goto free; 1481 } 1482 1483 pdev = dp_get_pdev_for_lmac_id(soc, mac_id); 1484 if (!pdev) { 1485 dp_rx_info_rl("%pK: PDEV not found", soc); 1486 goto free; 1487 } 1488 1489 qdf_spin_lock_bh(&pdev->vdev_list_lock); 1490 DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) { 1491 if (qdf_mem_cmp(wh->i_addr1, vdev->mac_addr.raw, 1492 QDF_MAC_ADDR_SIZE) == 0) { 1493 qdf_spin_unlock_bh(&pdev->vdev_list_lock); 1494 goto out; 1495 } 1496 } 1497 qdf_spin_unlock_bh(&pdev->vdev_list_lock); 1498 1499 if (!vdev) { 1500 dp_rx_info_rl("%pK: VDEV not found", soc); 1501 goto free; 1502 } 1503 1504 out: 1505 if (soc->cdp_soc.ol_ops->rx_invalid_peer) 1506 soc->cdp_soc.ol_ops->rx_invalid_peer(vdev->vdev_id, wh); 1507 free: 1508 /* reset the head and tail pointers */ 1509 pdev = dp_get_pdev_for_lmac_id(soc, mac_id); 1510 if (pdev) { 1511 pdev->invalid_peer_head_msdu = NULL; 1512 pdev->invalid_peer_tail_msdu = NULL; 1513 } 1514 1515 /* Drop and free packet */ 1516 curr_nbuf = mpdu; 1517 while (curr_nbuf) { 1518 next_nbuf = qdf_nbuf_next(curr_nbuf); 1519 dp_rx_nbuf_free(curr_nbuf); 1520 curr_nbuf = next_nbuf; 1521 } 1522 1523 /* Reset the head and tail pointers */ 1524 pdev = dp_get_pdev_for_lmac_id(soc, mac_id); 1525 if (pdev) { 1526 pdev->invalid_peer_head_msdu = NULL; 1527 pdev->invalid_peer_tail_msdu = NULL; 1528 } 1529 1530 return 0; 1531 } 1532 1533 void dp_rx_process_invalid_peer_wrapper(struct dp_soc *soc, 1534 qdf_nbuf_t mpdu, bool mpdu_done, 1535 uint8_t mac_id) 1536 { 1537 /* Process the nbuf */ 1538 dp_rx_process_invalid_peer(soc, mpdu, mac_id); 1539 } 1540 #endif 1541 1542 #ifndef QCA_HOST_MODE_WIFI_DISABLED 1543 1544 #ifdef RECEIVE_OFFLOAD 1545 /** 1546 * dp_rx_print_offload_info() - Print offload info from RX TLV 1547 * @soc: dp soc handle 1548 * @msdu: MSDU for which the offload info is to be printed 1549 * 1550 * Return: None 1551 */ 1552 static void dp_rx_print_offload_info(struct dp_soc *soc, 1553 qdf_nbuf_t msdu) 1554 { 1555 dp_verbose_debug("----------------------RX DESC LRO/GRO----------------------"); 1556 dp_verbose_debug("lro_eligible 0x%x", 1557 QDF_NBUF_CB_RX_LRO_ELIGIBLE(msdu)); 1558 dp_verbose_debug("pure_ack 0x%x", QDF_NBUF_CB_RX_TCP_PURE_ACK(msdu)); 1559 dp_verbose_debug("chksum 0x%x", QDF_NBUF_CB_RX_TCP_CHKSUM(msdu)); 1560 dp_verbose_debug("TCP seq num 0x%x", QDF_NBUF_CB_RX_TCP_SEQ_NUM(msdu)); 1561 dp_verbose_debug("TCP ack num 0x%x", QDF_NBUF_CB_RX_TCP_ACK_NUM(msdu)); 1562 dp_verbose_debug("TCP window 0x%x", QDF_NBUF_CB_RX_TCP_WIN(msdu)); 1563 dp_verbose_debug("TCP protocol 0x%x", QDF_NBUF_CB_RX_TCP_PROTO(msdu)); 1564 dp_verbose_debug("TCP offset 0x%x", QDF_NBUF_CB_RX_TCP_OFFSET(msdu)); 1565 dp_verbose_debug("toeplitz 0x%x", QDF_NBUF_CB_RX_FLOW_ID(msdu)); 1566 dp_verbose_debug("---------------------------------------------------------"); 1567 } 1568 1569 /** 1570 * dp_rx_fill_gro_info() - Fill GRO info from RX TLV into skb->cb 1571 * @soc: DP SOC handle 1572 * @rx_tlv: RX TLV received for the msdu 1573 * @msdu: msdu for which GRO info needs to be filled 1574 * @rx_ol_pkt_cnt: counter to be incremented for GRO eligible packets 1575 * 1576 * Return: None 1577 */ 1578 void dp_rx_fill_gro_info(struct dp_soc *soc, uint8_t *rx_tlv, 1579 qdf_nbuf_t msdu, uint32_t *rx_ol_pkt_cnt) 1580 { 1581 struct hal_offload_info offload_info; 1582 1583 if (!wlan_cfg_is_gro_enabled(soc->wlan_cfg_ctx)) 1584 return; 1585 1586 if (hal_rx_tlv_get_offload_info(soc->hal_soc, rx_tlv, &offload_info)) 1587 return; 1588 1589 *rx_ol_pkt_cnt = *rx_ol_pkt_cnt + 1; 1590 1591 QDF_NBUF_CB_RX_LRO_ELIGIBLE(msdu) = offload_info.lro_eligible; 1592 QDF_NBUF_CB_RX_TCP_PURE_ACK(msdu) = offload_info.tcp_pure_ack; 1593 QDF_NBUF_CB_RX_TCP_CHKSUM(msdu) = 1594 hal_rx_tlv_get_tcp_chksum(soc->hal_soc, 1595 rx_tlv); 1596 QDF_NBUF_CB_RX_TCP_SEQ_NUM(msdu) = offload_info.tcp_seq_num; 1597 QDF_NBUF_CB_RX_TCP_ACK_NUM(msdu) = offload_info.tcp_ack_num; 1598 QDF_NBUF_CB_RX_TCP_WIN(msdu) = offload_info.tcp_win; 1599 QDF_NBUF_CB_RX_TCP_PROTO(msdu) = offload_info.tcp_proto; 1600 QDF_NBUF_CB_RX_IPV6_PROTO(msdu) = offload_info.ipv6_proto; 1601 QDF_NBUF_CB_RX_TCP_OFFSET(msdu) = offload_info.tcp_offset; 1602 QDF_NBUF_CB_RX_FLOW_ID(msdu) = offload_info.flow_id; 1603 1604 dp_rx_print_offload_info(soc, msdu); 1605 } 1606 #endif /* RECEIVE_OFFLOAD */ 1607 1608 /** 1609 * dp_rx_adjust_nbuf_len() - set appropriate msdu length in nbuf. 1610 * 1611 * @soc: DP soc handle 1612 * @nbuf: pointer to msdu. 1613 * @mpdu_len: mpdu length 1614 * @l3_pad_len: L3 padding length by HW 1615 * 1616 * Return: returns true if nbuf is last msdu of mpdu else retuns false. 1617 */ 1618 static inline bool dp_rx_adjust_nbuf_len(struct dp_soc *soc, 1619 qdf_nbuf_t nbuf, 1620 uint16_t *mpdu_len, 1621 uint32_t l3_pad_len) 1622 { 1623 bool last_nbuf; 1624 uint32_t pkt_hdr_size; 1625 1626 pkt_hdr_size = soc->rx_pkt_tlv_size + l3_pad_len; 1627 1628 if ((*mpdu_len + pkt_hdr_size) > RX_DATA_BUFFER_SIZE) { 1629 qdf_nbuf_set_pktlen(nbuf, RX_DATA_BUFFER_SIZE); 1630 last_nbuf = false; 1631 *mpdu_len -= (RX_DATA_BUFFER_SIZE - pkt_hdr_size); 1632 } else { 1633 qdf_nbuf_set_pktlen(nbuf, (*mpdu_len + pkt_hdr_size)); 1634 last_nbuf = true; 1635 *mpdu_len = 0; 1636 } 1637 1638 return last_nbuf; 1639 } 1640 1641 /** 1642 * dp_get_l3_hdr_pad_len() - get L3 header padding length. 1643 * 1644 * @soc: DP soc handle 1645 * @nbuf: pointer to msdu. 1646 * 1647 * Return: returns padding length in bytes. 1648 */ 1649 static inline uint32_t dp_get_l3_hdr_pad_len(struct dp_soc *soc, 1650 qdf_nbuf_t nbuf) 1651 { 1652 uint32_t l3_hdr_pad = 0; 1653 uint8_t *rx_tlv_hdr; 1654 struct hal_rx_msdu_metadata msdu_metadata; 1655 1656 while (nbuf) { 1657 if (!qdf_nbuf_is_rx_chfrag_cont(nbuf)) { 1658 /* scattered msdu end with continuation is 0 */ 1659 rx_tlv_hdr = qdf_nbuf_data(nbuf); 1660 hal_rx_msdu_metadata_get(soc->hal_soc, 1661 rx_tlv_hdr, 1662 &msdu_metadata); 1663 l3_hdr_pad = msdu_metadata.l3_hdr_pad; 1664 break; 1665 } 1666 nbuf = nbuf->next; 1667 } 1668 1669 return l3_hdr_pad; 1670 } 1671 1672 /** 1673 * dp_rx_sg_create() - create a frag_list for MSDUs which are spread across 1674 * multiple nbufs. 1675 * @soc: DP SOC handle 1676 * @nbuf: pointer to the first msdu of an amsdu. 1677 * 1678 * This function implements the creation of RX frag_list for cases 1679 * where an MSDU is spread across multiple nbufs. 1680 * 1681 * Return: returns the head nbuf which contains complete frag_list. 1682 */ 1683 qdf_nbuf_t dp_rx_sg_create(struct dp_soc *soc, qdf_nbuf_t nbuf) 1684 { 1685 qdf_nbuf_t parent, frag_list, next = NULL; 1686 uint16_t frag_list_len = 0; 1687 uint16_t mpdu_len; 1688 bool last_nbuf; 1689 uint32_t l3_hdr_pad_offset = 0; 1690 1691 /* 1692 * Use msdu len got from REO entry descriptor instead since 1693 * there is case the RX PKT TLV is corrupted while msdu_len 1694 * from REO descriptor is right for non-raw RX scatter msdu. 1695 */ 1696 mpdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf); 1697 1698 /* 1699 * this is a case where the complete msdu fits in one single nbuf. 1700 * in this case HW sets both start and end bit and we only need to 1701 * reset these bits for RAW mode simulator to decap the pkt 1702 */ 1703 if (qdf_nbuf_is_rx_chfrag_start(nbuf) && 1704 qdf_nbuf_is_rx_chfrag_end(nbuf)) { 1705 qdf_nbuf_set_pktlen(nbuf, mpdu_len + soc->rx_pkt_tlv_size); 1706 qdf_nbuf_pull_head(nbuf, soc->rx_pkt_tlv_size); 1707 return nbuf; 1708 } 1709 1710 l3_hdr_pad_offset = dp_get_l3_hdr_pad_len(soc, nbuf); 1711 /* 1712 * This is a case where we have multiple msdus (A-MSDU) spread across 1713 * multiple nbufs. here we create a fraglist out of these nbufs. 1714 * 1715 * the moment we encounter a nbuf with continuation bit set we 1716 * know for sure we have an MSDU which is spread across multiple 1717 * nbufs. We loop through and reap nbufs till we reach last nbuf. 1718 */ 1719 parent = nbuf; 1720 frag_list = nbuf->next; 1721 nbuf = nbuf->next; 1722 1723 /* 1724 * set the start bit in the first nbuf we encounter with continuation 1725 * bit set. This has the proper mpdu length set as it is the first 1726 * msdu of the mpdu. this becomes the parent nbuf and the subsequent 1727 * nbufs will form the frag_list of the parent nbuf. 1728 */ 1729 qdf_nbuf_set_rx_chfrag_start(parent, 1); 1730 /* 1731 * L3 header padding is only needed for the 1st buffer 1732 * in a scattered msdu 1733 */ 1734 last_nbuf = dp_rx_adjust_nbuf_len(soc, parent, &mpdu_len, 1735 l3_hdr_pad_offset); 1736 1737 /* 1738 * MSDU cont bit is set but reported MPDU length can fit 1739 * in to single buffer 1740 * 1741 * Increment error stats and avoid SG list creation 1742 */ 1743 if (last_nbuf) { 1744 DP_STATS_INC(soc, rx.err.msdu_continuation_err, 1); 1745 qdf_nbuf_pull_head(parent, 1746 soc->rx_pkt_tlv_size + l3_hdr_pad_offset); 1747 return parent; 1748 } 1749 1750 /* 1751 * this is where we set the length of the fragments which are 1752 * associated to the parent nbuf. We iterate through the frag_list 1753 * till we hit the last_nbuf of the list. 1754 */ 1755 do { 1756 last_nbuf = dp_rx_adjust_nbuf_len(soc, nbuf, &mpdu_len, 0); 1757 qdf_nbuf_pull_head(nbuf, 1758 soc->rx_pkt_tlv_size); 1759 frag_list_len += qdf_nbuf_len(nbuf); 1760 1761 if (last_nbuf) { 1762 next = nbuf->next; 1763 nbuf->next = NULL; 1764 break; 1765 } else if (qdf_nbuf_is_rx_chfrag_end(nbuf)) { 1766 dp_err("Invalid packet length\n"); 1767 qdf_assert_always(0); 1768 } 1769 nbuf = nbuf->next; 1770 } while (!last_nbuf); 1771 1772 qdf_nbuf_set_rx_chfrag_start(nbuf, 0); 1773 qdf_nbuf_append_ext_list(parent, frag_list, frag_list_len); 1774 parent->next = next; 1775 1776 qdf_nbuf_pull_head(parent, 1777 soc->rx_pkt_tlv_size + l3_hdr_pad_offset); 1778 return parent; 1779 } 1780 1781 #endif /* QCA_HOST_MODE_WIFI_DISABLED */ 1782 1783 #ifdef QCA_PEER_EXT_STATS 1784 /* 1785 * dp_rx_compute_tid_delay - Computer per TID delay stats 1786 * @peer: DP soc context 1787 * @nbuf: NBuffer 1788 * 1789 * Return: Void 1790 */ 1791 void dp_rx_compute_tid_delay(struct cdp_delay_tid_stats *stats, 1792 qdf_nbuf_t nbuf) 1793 { 1794 struct cdp_delay_rx_stats *rx_delay = &stats->rx_delay; 1795 uint32_t to_stack = qdf_nbuf_get_timedelta_ms(nbuf); 1796 1797 dp_hist_update_stats(&rx_delay->to_stack_delay, to_stack); 1798 } 1799 #endif /* QCA_PEER_EXT_STATS */ 1800 1801 /** 1802 * dp_rx_compute_delay() - Compute and fill in all timestamps 1803 * to pass in correct fields 1804 * 1805 * @vdev: pdev handle 1806 * @tx_desc: tx descriptor 1807 * @tid: tid value 1808 * Return: none 1809 */ 1810 void dp_rx_compute_delay(struct dp_vdev *vdev, qdf_nbuf_t nbuf) 1811 { 1812 uint8_t ring_id = QDF_NBUF_CB_RX_CTX_ID(nbuf); 1813 int64_t current_ts = qdf_ktime_to_ms(qdf_ktime_get()); 1814 uint32_t to_stack = qdf_nbuf_get_timedelta_ms(nbuf); 1815 uint8_t tid = qdf_nbuf_get_tid_val(nbuf); 1816 uint32_t interframe_delay = 1817 (uint32_t)(current_ts - vdev->prev_rx_deliver_tstamp); 1818 struct cdp_tid_rx_stats *rstats = 1819 &vdev->pdev->stats.tid_stats.tid_rx_stats[ring_id][tid]; 1820 1821 dp_update_delay_stats(NULL, rstats, to_stack, tid, 1822 CDP_DELAY_STATS_REAP_STACK, ring_id, false); 1823 /* 1824 * Update interframe delay stats calculated at deliver_data_ol point. 1825 * Value of vdev->prev_rx_deliver_tstamp will be 0 for 1st frame, so 1826 * interframe delay will not be calculate correctly for 1st frame. 1827 * On the other side, this will help in avoiding extra per packet check 1828 * of vdev->prev_rx_deliver_tstamp. 1829 */ 1830 dp_update_delay_stats(NULL, rstats, interframe_delay, tid, 1831 CDP_DELAY_STATS_RX_INTERFRAME, ring_id, false); 1832 vdev->prev_rx_deliver_tstamp = current_ts; 1833 } 1834 1835 /** 1836 * dp_rx_drop_nbuf_list() - drop an nbuf list 1837 * @pdev: dp pdev reference 1838 * @buf_list: buffer list to be dropepd 1839 * 1840 * Return: int (number of bufs dropped) 1841 */ 1842 static inline int dp_rx_drop_nbuf_list(struct dp_pdev *pdev, 1843 qdf_nbuf_t buf_list) 1844 { 1845 struct cdp_tid_rx_stats *stats = NULL; 1846 uint8_t tid = 0, ring_id = 0; 1847 int num_dropped = 0; 1848 qdf_nbuf_t buf, next_buf; 1849 1850 buf = buf_list; 1851 while (buf) { 1852 ring_id = QDF_NBUF_CB_RX_CTX_ID(buf); 1853 next_buf = qdf_nbuf_queue_next(buf); 1854 tid = qdf_nbuf_get_tid_val(buf); 1855 if (qdf_likely(pdev)) { 1856 stats = &pdev->stats.tid_stats.tid_rx_stats[ring_id][tid]; 1857 stats->fail_cnt[INVALID_PEER_VDEV]++; 1858 stats->delivered_to_stack--; 1859 } 1860 dp_rx_nbuf_free(buf); 1861 buf = next_buf; 1862 num_dropped++; 1863 } 1864 1865 return num_dropped; 1866 } 1867 1868 #ifdef QCA_SUPPORT_WDS_EXTENDED 1869 /** 1870 * dp_rx_deliver_to_stack_ext() - Deliver to netdev per sta 1871 * @soc: core txrx main context 1872 * @vdev: vdev 1873 * @txrx_peer: txrx peer 1874 * @nbuf_head: skb list head 1875 * 1876 * Return: true if packet is delivered to netdev per STA. 1877 */ 1878 static inline bool 1879 dp_rx_deliver_to_stack_ext(struct dp_soc *soc, struct dp_vdev *vdev, 1880 struct dp_txrx_peer *txrx_peer, qdf_nbuf_t nbuf_head) 1881 { 1882 /* 1883 * When extended WDS is disabled, frames are sent to AP netdevice. 1884 */ 1885 if (qdf_likely(!vdev->wds_ext_enabled)) 1886 return false; 1887 1888 /* 1889 * There can be 2 cases: 1890 * 1. Send frame to parent netdev if its not for netdev per STA 1891 * 2. If frame is meant for netdev per STA: 1892 * a. Send frame to appropriate netdev using registered fp. 1893 * b. If fp is NULL, drop the frames. 1894 */ 1895 if (!txrx_peer->wds_ext.init) 1896 return false; 1897 1898 if (txrx_peer->osif_rx) 1899 txrx_peer->osif_rx(txrx_peer->wds_ext.osif_peer, nbuf_head); 1900 else 1901 dp_rx_drop_nbuf_list(vdev->pdev, nbuf_head); 1902 1903 return true; 1904 } 1905 1906 #else 1907 static inline bool 1908 dp_rx_deliver_to_stack_ext(struct dp_soc *soc, struct dp_vdev *vdev, 1909 struct dp_txrx_peer *txrx_peer, qdf_nbuf_t nbuf_head) 1910 { 1911 return false; 1912 } 1913 #endif 1914 1915 #ifdef PEER_CACHE_RX_PKTS 1916 /** 1917 * dp_rx_flush_rx_cached() - flush cached rx frames 1918 * @peer: peer 1919 * @drop: flag to drop frames or forward to net stack 1920 * 1921 * Return: None 1922 */ 1923 void dp_rx_flush_rx_cached(struct dp_peer *peer, bool drop) 1924 { 1925 struct dp_peer_cached_bufq *bufqi; 1926 struct dp_rx_cached_buf *cache_buf = NULL; 1927 ol_txrx_rx_fp data_rx = NULL; 1928 int num_buff_elem; 1929 QDF_STATUS status; 1930 1931 /* 1932 * Flush dp cached frames only for mld peers and legacy peers, as 1933 * link peers don't store cached frames 1934 */ 1935 if (IS_MLO_DP_LINK_PEER(peer)) 1936 return; 1937 1938 if (!peer->txrx_peer) { 1939 dp_err("txrx_peer NULL!! peer mac_addr("QDF_MAC_ADDR_FMT")", 1940 QDF_MAC_ADDR_REF(peer->mac_addr.raw)); 1941 return; 1942 } 1943 1944 if (qdf_atomic_inc_return(&peer->txrx_peer->flush_in_progress) > 1) { 1945 qdf_atomic_dec(&peer->txrx_peer->flush_in_progress); 1946 return; 1947 } 1948 1949 qdf_spin_lock_bh(&peer->peer_info_lock); 1950 if (peer->state >= OL_TXRX_PEER_STATE_CONN && peer->vdev->osif_rx) 1951 data_rx = peer->vdev->osif_rx; 1952 else 1953 drop = true; 1954 qdf_spin_unlock_bh(&peer->peer_info_lock); 1955 1956 bufqi = &peer->txrx_peer->bufq_info; 1957 1958 qdf_spin_lock_bh(&bufqi->bufq_lock); 1959 qdf_list_remove_front(&bufqi->cached_bufq, 1960 (qdf_list_node_t **)&cache_buf); 1961 while (cache_buf) { 1962 num_buff_elem = QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST( 1963 cache_buf->buf); 1964 bufqi->entries -= num_buff_elem; 1965 qdf_spin_unlock_bh(&bufqi->bufq_lock); 1966 if (drop) { 1967 bufqi->dropped = dp_rx_drop_nbuf_list(peer->vdev->pdev, 1968 cache_buf->buf); 1969 } else { 1970 /* Flush the cached frames to OSIF DEV */ 1971 status = data_rx(peer->vdev->osif_vdev, cache_buf->buf); 1972 if (status != QDF_STATUS_SUCCESS) 1973 bufqi->dropped = dp_rx_drop_nbuf_list( 1974 peer->vdev->pdev, 1975 cache_buf->buf); 1976 } 1977 qdf_mem_free(cache_buf); 1978 cache_buf = NULL; 1979 qdf_spin_lock_bh(&bufqi->bufq_lock); 1980 qdf_list_remove_front(&bufqi->cached_bufq, 1981 (qdf_list_node_t **)&cache_buf); 1982 } 1983 qdf_spin_unlock_bh(&bufqi->bufq_lock); 1984 qdf_atomic_dec(&peer->txrx_peer->flush_in_progress); 1985 } 1986 1987 /** 1988 * dp_rx_enqueue_rx() - cache rx frames 1989 * @peer: peer 1990 * @rx_buf_list: cache buffer list 1991 * 1992 * Return: None 1993 */ 1994 static QDF_STATUS 1995 dp_rx_enqueue_rx(struct dp_txrx_peer *txrx_peer, qdf_nbuf_t rx_buf_list) 1996 { 1997 struct dp_rx_cached_buf *cache_buf; 1998 struct dp_peer_cached_bufq *bufqi = &txrx_peer->bufq_info; 1999 int num_buff_elem; 2000 QDF_STATUS ret = QDF_STATUS_SUCCESS; 2001 struct dp_soc *soc = txrx_peer->vdev->pdev->soc; 2002 struct dp_peer *peer = dp_peer_get_ref_by_id(soc, txrx_peer->peer_id, 2003 DP_MOD_ID_RX); 2004 2005 if (!peer) { 2006 bufqi->dropped = dp_rx_drop_nbuf_list(txrx_peer->vdev->pdev, 2007 rx_buf_list); 2008 return QDF_STATUS_E_INVAL; 2009 } 2010 2011 dp_debug_rl("bufq->curr %d bufq->drops %d", bufqi->entries, 2012 bufqi->dropped); 2013 if (!peer->valid) { 2014 bufqi->dropped = dp_rx_drop_nbuf_list(txrx_peer->vdev->pdev, 2015 rx_buf_list); 2016 ret = QDF_STATUS_E_INVAL; 2017 goto fail; 2018 } 2019 2020 qdf_spin_lock_bh(&bufqi->bufq_lock); 2021 if (bufqi->entries >= bufqi->thresh) { 2022 bufqi->dropped = dp_rx_drop_nbuf_list(txrx_peer->vdev->pdev, 2023 rx_buf_list); 2024 qdf_spin_unlock_bh(&bufqi->bufq_lock); 2025 ret = QDF_STATUS_E_RESOURCES; 2026 goto fail; 2027 } 2028 qdf_spin_unlock_bh(&bufqi->bufq_lock); 2029 2030 num_buff_elem = QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(rx_buf_list); 2031 2032 cache_buf = qdf_mem_malloc_atomic(sizeof(*cache_buf)); 2033 if (!cache_buf) { 2034 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 2035 "Failed to allocate buf to cache rx frames"); 2036 bufqi->dropped = dp_rx_drop_nbuf_list(txrx_peer->vdev->pdev, 2037 rx_buf_list); 2038 ret = QDF_STATUS_E_NOMEM; 2039 goto fail; 2040 } 2041 2042 cache_buf->buf = rx_buf_list; 2043 2044 qdf_spin_lock_bh(&bufqi->bufq_lock); 2045 qdf_list_insert_back(&bufqi->cached_bufq, 2046 &cache_buf->node); 2047 bufqi->entries += num_buff_elem; 2048 qdf_spin_unlock_bh(&bufqi->bufq_lock); 2049 2050 fail: 2051 dp_peer_unref_delete(peer, DP_MOD_ID_RX); 2052 return ret; 2053 } 2054 2055 static inline 2056 bool dp_rx_is_peer_cache_bufq_supported(void) 2057 { 2058 return true; 2059 } 2060 #else 2061 static inline 2062 bool dp_rx_is_peer_cache_bufq_supported(void) 2063 { 2064 return false; 2065 } 2066 2067 static inline QDF_STATUS 2068 dp_rx_enqueue_rx(struct dp_txrx_peer *txrx_peer, qdf_nbuf_t rx_buf_list) 2069 { 2070 return QDF_STATUS_SUCCESS; 2071 } 2072 #endif 2073 2074 #ifndef DELIVERY_TO_STACK_STATUS_CHECK 2075 /** 2076 * dp_rx_check_delivery_to_stack() - Deliver pkts to network 2077 * using the appropriate call back functions. 2078 * @soc: soc 2079 * @vdev: vdev 2080 * @peer: peer 2081 * @nbuf_head: skb list head 2082 * @nbuf_tail: skb list tail 2083 * 2084 * Return: None 2085 */ 2086 static void dp_rx_check_delivery_to_stack(struct dp_soc *soc, 2087 struct dp_vdev *vdev, 2088 struct dp_txrx_peer *txrx_peer, 2089 qdf_nbuf_t nbuf_head) 2090 { 2091 if (qdf_unlikely(dp_rx_deliver_to_stack_ext(soc, vdev, 2092 txrx_peer, nbuf_head))) 2093 return; 2094 2095 /* Function pointer initialized only when FISA is enabled */ 2096 if (vdev->osif_fisa_rx) 2097 /* on failure send it via regular path */ 2098 vdev->osif_fisa_rx(soc, vdev, nbuf_head); 2099 else 2100 vdev->osif_rx(vdev->osif_vdev, nbuf_head); 2101 } 2102 2103 #else 2104 /** 2105 * dp_rx_check_delivery_to_stack() - Deliver pkts to network 2106 * using the appropriate call back functions. 2107 * @soc: soc 2108 * @vdev: vdev 2109 * @txrx_peer: txrx peer 2110 * @nbuf_head: skb list head 2111 * @nbuf_tail: skb list tail 2112 * 2113 * Check the return status of the call back function and drop 2114 * the packets if the return status indicates a failure. 2115 * 2116 * Return: None 2117 */ 2118 static void dp_rx_check_delivery_to_stack(struct dp_soc *soc, 2119 struct dp_vdev *vdev, 2120 struct dp_txrx_peer *txrx_peer, 2121 qdf_nbuf_t nbuf_head) 2122 { 2123 int num_nbuf = 0; 2124 QDF_STATUS ret_val = QDF_STATUS_E_FAILURE; 2125 2126 /* Function pointer initialized only when FISA is enabled */ 2127 if (vdev->osif_fisa_rx) 2128 /* on failure send it via regular path */ 2129 ret_val = vdev->osif_fisa_rx(soc, vdev, nbuf_head); 2130 else if (vdev->osif_rx) 2131 ret_val = vdev->osif_rx(vdev->osif_vdev, nbuf_head); 2132 2133 if (!QDF_IS_STATUS_SUCCESS(ret_val)) { 2134 num_nbuf = dp_rx_drop_nbuf_list(vdev->pdev, nbuf_head); 2135 DP_STATS_INC(soc, rx.err.rejected, num_nbuf); 2136 if (txrx_peer) 2137 DP_PEER_STATS_FLAT_DEC(txrx_peer, to_stack.num, 2138 num_nbuf); 2139 } 2140 } 2141 #endif /* ifdef DELIVERY_TO_STACK_STATUS_CHECK */ 2142 2143 /* 2144 * dp_rx_validate_rx_callbacks() - validate rx callbacks 2145 * @soc DP soc 2146 * @vdev: DP vdev handle 2147 * @txrx_peer: pointer to the txrx peer object 2148 * nbuf_head: skb list head 2149 * 2150 * Return: QDF_STATUS - QDF_STATUS_SUCCESS 2151 * QDF_STATUS_E_FAILURE 2152 */ 2153 static inline QDF_STATUS 2154 dp_rx_validate_rx_callbacks(struct dp_soc *soc, 2155 struct dp_vdev *vdev, 2156 struct dp_txrx_peer *txrx_peer, 2157 qdf_nbuf_t nbuf_head) 2158 { 2159 int num_nbuf; 2160 2161 if (qdf_unlikely(!vdev || vdev->delete.pending)) { 2162 num_nbuf = dp_rx_drop_nbuf_list(NULL, nbuf_head); 2163 /* 2164 * This is a special case where vdev is invalid, 2165 * so we cannot know the pdev to which this packet 2166 * belonged. Hence we update the soc rx error stats. 2167 */ 2168 DP_STATS_INC(soc, rx.err.invalid_vdev, num_nbuf); 2169 return QDF_STATUS_E_FAILURE; 2170 } 2171 2172 /* 2173 * highly unlikely to have a vdev without a registered rx 2174 * callback function. if so let us free the nbuf_list. 2175 */ 2176 if (qdf_unlikely(!vdev->osif_rx)) { 2177 if (txrx_peer && dp_rx_is_peer_cache_bufq_supported()) { 2178 dp_rx_enqueue_rx(txrx_peer, nbuf_head); 2179 } else { 2180 num_nbuf = dp_rx_drop_nbuf_list(vdev->pdev, 2181 nbuf_head); 2182 DP_PEER_TO_STACK_DECC(txrx_peer, num_nbuf, 2183 vdev->pdev->enhanced_stats_en); 2184 } 2185 return QDF_STATUS_E_FAILURE; 2186 } 2187 2188 return QDF_STATUS_SUCCESS; 2189 } 2190 2191 QDF_STATUS dp_rx_deliver_to_stack(struct dp_soc *soc, 2192 struct dp_vdev *vdev, 2193 struct dp_txrx_peer *txrx_peer, 2194 qdf_nbuf_t nbuf_head, 2195 qdf_nbuf_t nbuf_tail) 2196 { 2197 if (dp_rx_validate_rx_callbacks(soc, vdev, txrx_peer, nbuf_head) != 2198 QDF_STATUS_SUCCESS) 2199 return QDF_STATUS_E_FAILURE; 2200 2201 if (qdf_unlikely(vdev->rx_decap_type == htt_cmn_pkt_type_raw) || 2202 (vdev->rx_decap_type == htt_cmn_pkt_type_native_wifi)) { 2203 vdev->osif_rsim_rx_decap(vdev->osif_vdev, &nbuf_head, 2204 &nbuf_tail); 2205 } 2206 2207 dp_rx_check_delivery_to_stack(soc, vdev, txrx_peer, nbuf_head); 2208 2209 return QDF_STATUS_SUCCESS; 2210 } 2211 2212 #ifdef QCA_SUPPORT_EAPOL_OVER_CONTROL_PORT 2213 QDF_STATUS dp_rx_eapol_deliver_to_stack(struct dp_soc *soc, 2214 struct dp_vdev *vdev, 2215 struct dp_txrx_peer *txrx_peer, 2216 qdf_nbuf_t nbuf_head, 2217 qdf_nbuf_t nbuf_tail) 2218 { 2219 if (dp_rx_validate_rx_callbacks(soc, vdev, txrx_peer, nbuf_head) != 2220 QDF_STATUS_SUCCESS) 2221 return QDF_STATUS_E_FAILURE; 2222 2223 vdev->osif_rx_eapol(vdev->osif_vdev, nbuf_head); 2224 2225 return QDF_STATUS_SUCCESS; 2226 } 2227 #endif 2228 2229 #ifndef QCA_HOST_MODE_WIFI_DISABLED 2230 #ifdef VDEV_PEER_PROTOCOL_COUNT 2231 #define dp_rx_msdu_stats_update_prot_cnts(vdev_hdl, nbuf, txrx_peer) \ 2232 { \ 2233 qdf_nbuf_t nbuf_local; \ 2234 struct dp_txrx_peer *txrx_peer_local; \ 2235 struct dp_vdev *vdev_local = vdev_hdl; \ 2236 do { \ 2237 if (qdf_likely(!((vdev_local)->peer_protocol_count_track))) \ 2238 break; \ 2239 nbuf_local = nbuf; \ 2240 txrx_peer_local = txrx_peer; \ 2241 if (qdf_unlikely(qdf_nbuf_is_frag((nbuf_local)))) \ 2242 break; \ 2243 else if (qdf_unlikely(qdf_nbuf_is_raw_frame((nbuf_local)))) \ 2244 break; \ 2245 dp_vdev_peer_stats_update_protocol_cnt((vdev_local), \ 2246 (nbuf_local), \ 2247 (txrx_peer_local), 0, 1); \ 2248 } while (0); \ 2249 } 2250 #else 2251 #define dp_rx_msdu_stats_update_prot_cnts(vdev_hdl, nbuf, txrx_peer) 2252 #endif 2253 2254 #ifdef FEATURE_RX_LINKSPEED_ROAM_TRIGGER 2255 /** 2256 * dp_rx_rates_stats_update() - update rate stats 2257 * from rx msdu. 2258 * @soc: datapath soc handle 2259 * @nbuf: received msdu buffer 2260 * @rx_tlv_hdr: rx tlv header 2261 * @txrx_peer: datapath txrx_peer handle 2262 * @sgi: Short Guard Interval 2263 * @mcs: Modulation and Coding Set 2264 * @nss: Number of Spatial Streams 2265 * @bw: BandWidth 2266 * @pkt_type: Corresponds to preamble 2267 * 2268 * To be precisely record rates, following factors are considered: 2269 * Exclude specific frames, ARP, DHCP, ssdp, etc. 2270 * Make sure to affect rx throughput as least as possible. 2271 * 2272 * Return: void 2273 */ 2274 static void 2275 dp_rx_rates_stats_update(struct dp_soc *soc, qdf_nbuf_t nbuf, 2276 uint8_t *rx_tlv_hdr, struct dp_txrx_peer *txrx_peer, 2277 uint32_t sgi, uint32_t mcs, 2278 uint32_t nss, uint32_t bw, uint32_t pkt_type) 2279 { 2280 uint32_t rix; 2281 uint16_t ratecode; 2282 uint32_t avg_rx_rate; 2283 uint32_t ratekbps; 2284 enum cdp_punctured_modes punc_mode = NO_PUNCTURE; 2285 2286 if (soc->high_throughput || 2287 dp_rx_data_is_specific(soc->hal_soc, rx_tlv_hdr, nbuf)) { 2288 return; 2289 } 2290 2291 DP_PEER_EXTD_STATS_UPD(txrx_peer, rx.rx_rate, mcs); 2292 2293 /* In 11b mode, the nss we get from tlv is 0, invalid and should be 1 */ 2294 if (pkt_type == DOT11_B) 2295 nss = 1; 2296 2297 /* here pkt_type corresponds to preamble */ 2298 ratekbps = dp_getrateindex(sgi, 2299 mcs, 2300 nss - 1, 2301 pkt_type, 2302 bw, 2303 punc_mode, 2304 &rix, 2305 &ratecode); 2306 DP_PEER_EXTD_STATS_UPD(txrx_peer, rx.last_rx_rate, ratekbps); 2307 avg_rx_rate = 2308 dp_ath_rate_lpf(txrx_peer->stats.extd_stats.rx.avg_rx_rate, 2309 ratekbps); 2310 DP_PEER_EXTD_STATS_UPD(txrx_peer, rx.avg_rx_rate, avg_rx_rate); 2311 DP_PEER_EXTD_STATS_UPD(txrx_peer, rx.nss_info, nss); 2312 DP_PEER_EXTD_STATS_UPD(txrx_peer, rx.mcs_info, mcs); 2313 DP_PEER_EXTD_STATS_UPD(txrx_peer, rx.bw_info, bw); 2314 DP_PEER_EXTD_STATS_UPD(txrx_peer, rx.gi_info, sgi); 2315 DP_PEER_EXTD_STATS_UPD(txrx_peer, rx.preamble_info, pkt_type); 2316 } 2317 #else 2318 static inline void 2319 dp_rx_rates_stats_update(struct dp_soc *soc, qdf_nbuf_t nbuf, 2320 uint8_t *rx_tlv_hdr, struct dp_txrx_peer *txrx_peer, 2321 uint32_t sgi, uint32_t mcs, 2322 uint32_t nss, uint32_t bw, uint32_t pkt_type) 2323 { 2324 } 2325 #endif /* FEATURE_RX_LINKSPEED_ROAM_TRIGGER */ 2326 2327 #ifndef QCA_ENHANCED_STATS_SUPPORT 2328 /** 2329 * dp_rx_msdu_extd_stats_update(): Update Rx extended path stats for peer 2330 * 2331 * @soc: datapath soc handle 2332 * @nbuf: received msdu buffer 2333 * @rx_tlv_hdr: rx tlv header 2334 * @txrx_peer: datapath txrx_peer handle 2335 * 2336 * Return: void 2337 */ 2338 static inline 2339 void dp_rx_msdu_extd_stats_update(struct dp_soc *soc, qdf_nbuf_t nbuf, 2340 uint8_t *rx_tlv_hdr, 2341 struct dp_txrx_peer *txrx_peer) 2342 { 2343 bool is_ampdu; 2344 uint32_t sgi, mcs, tid, nss, bw, reception_type, pkt_type; 2345 uint8_t dst_mcs_idx; 2346 2347 /* 2348 * TODO - For KIWI this field is present in ring_desc 2349 * Try to use ring desc instead of tlv. 2350 */ 2351 is_ampdu = hal_rx_mpdu_info_ampdu_flag_get(soc->hal_soc, rx_tlv_hdr); 2352 DP_PEER_EXTD_STATS_INCC(txrx_peer, rx.ampdu_cnt, 1, is_ampdu); 2353 DP_PEER_EXTD_STATS_INCC(txrx_peer, rx.non_ampdu_cnt, 1, !(is_ampdu)); 2354 2355 sgi = hal_rx_tlv_sgi_get(soc->hal_soc, rx_tlv_hdr); 2356 mcs = hal_rx_tlv_rate_mcs_get(soc->hal_soc, rx_tlv_hdr); 2357 tid = qdf_nbuf_get_tid_val(nbuf); 2358 bw = hal_rx_tlv_bw_get(soc->hal_soc, rx_tlv_hdr); 2359 reception_type = hal_rx_msdu_start_reception_type_get(soc->hal_soc, 2360 rx_tlv_hdr); 2361 nss = hal_rx_msdu_start_nss_get(soc->hal_soc, rx_tlv_hdr); 2362 pkt_type = hal_rx_tlv_get_pkt_type(soc->hal_soc, rx_tlv_hdr); 2363 /* do HW to SW pkt type conversion */ 2364 pkt_type = (pkt_type >= HAL_DOT11_MAX ? DOT11_MAX : 2365 hal_2_dp_pkt_type_map[pkt_type]); 2366 2367 DP_PEER_EXTD_STATS_INCC(txrx_peer, rx.rx_mpdu_cnt[mcs], 1, 2368 ((mcs < MAX_MCS) && QDF_NBUF_CB_RX_CHFRAG_START(nbuf))); 2369 DP_PEER_EXTD_STATS_INCC(txrx_peer, rx.rx_mpdu_cnt[MAX_MCS - 1], 1, 2370 ((mcs >= MAX_MCS) && QDF_NBUF_CB_RX_CHFRAG_START(nbuf))); 2371 DP_PEER_EXTD_STATS_INC(txrx_peer, rx.bw[bw], 1); 2372 /* 2373 * only if nss > 0 and pkt_type is 11N/AC/AX, 2374 * then increase index [nss - 1] in array counter. 2375 */ 2376 if (nss > 0 && CDP_IS_PKT_TYPE_SUPPORT_NSS(pkt_type)) 2377 DP_PEER_EXTD_STATS_INC(txrx_peer, rx.nss[nss - 1], 1); 2378 2379 DP_PEER_EXTD_STATS_INC(txrx_peer, rx.sgi_count[sgi], 1); 2380 DP_PEER_PER_PKT_STATS_INCC(txrx_peer, rx.err.mic_err, 1, 2381 hal_rx_tlv_mic_err_get(soc->hal_soc, 2382 rx_tlv_hdr)); 2383 DP_PEER_PER_PKT_STATS_INCC(txrx_peer, rx.err.decrypt_err, 1, 2384 hal_rx_tlv_decrypt_err_get(soc->hal_soc, 2385 rx_tlv_hdr)); 2386 2387 DP_PEER_EXTD_STATS_INC(txrx_peer, rx.wme_ac_type[TID_TO_WME_AC(tid)], 1); 2388 DP_PEER_EXTD_STATS_INC(txrx_peer, rx.reception_type[reception_type], 1); 2389 2390 dst_mcs_idx = dp_get_mcs_array_index_by_pkt_type_mcs(pkt_type, mcs); 2391 if (MCS_INVALID_ARRAY_INDEX != dst_mcs_idx) 2392 DP_PEER_EXTD_STATS_INC(txrx_peer, 2393 rx.pkt_type[pkt_type].mcs_count[dst_mcs_idx], 2394 1); 2395 2396 dp_rx_rates_stats_update(soc, nbuf, rx_tlv_hdr, txrx_peer, 2397 sgi, mcs, nss, bw, pkt_type); 2398 } 2399 #else 2400 static inline 2401 void dp_rx_msdu_extd_stats_update(struct dp_soc *soc, qdf_nbuf_t nbuf, 2402 uint8_t *rx_tlv_hdr, 2403 struct dp_txrx_peer *txrx_peer) 2404 { 2405 } 2406 #endif 2407 2408 #if defined(DP_PKT_STATS_PER_LMAC) && defined(WLAN_FEATURE_11BE_MLO) 2409 static inline void 2410 dp_peer_update_rx_pkt_per_lmac(struct dp_txrx_peer *txrx_peer, 2411 qdf_nbuf_t nbuf) 2412 { 2413 uint8_t lmac_id = qdf_nbuf_get_lmac_id(nbuf); 2414 2415 /* only count stats per lmac for MLO connection*/ 2416 DP_PEER_PER_PKT_STATS_INCC_PKT(txrx_peer, rx.rx_lmac[lmac_id], 1, 2417 QDF_NBUF_CB_RX_PKT_LEN(nbuf), 2418 txrx_peer->mld_peer); 2419 } 2420 #else 2421 static inline void 2422 dp_peer_update_rx_pkt_per_lmac(struct dp_txrx_peer *txrx_peer, 2423 qdf_nbuf_t nbuf) 2424 { 2425 } 2426 #endif 2427 2428 /** 2429 * dp_rx_msdu_stats_update() - update per msdu stats. 2430 * @soc: core txrx main context 2431 * @nbuf: pointer to the first msdu of an amsdu. 2432 * @rx_tlv_hdr: pointer to the start of RX TLV headers. 2433 * @txrx_peer: pointer to the txrx peer object. 2434 * @ring_id: reo dest ring number on which pkt is reaped. 2435 * @tid_stats: per tid rx stats. 2436 * 2437 * update all the per msdu stats for that nbuf. 2438 * Return: void 2439 */ 2440 void dp_rx_msdu_stats_update(struct dp_soc *soc, qdf_nbuf_t nbuf, 2441 uint8_t *rx_tlv_hdr, 2442 struct dp_txrx_peer *txrx_peer, 2443 uint8_t ring_id, 2444 struct cdp_tid_rx_stats *tid_stats) 2445 { 2446 bool is_not_amsdu; 2447 struct dp_vdev *vdev = txrx_peer->vdev; 2448 bool enh_flag; 2449 qdf_ether_header_t *eh; 2450 uint16_t msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf); 2451 2452 dp_rx_msdu_stats_update_prot_cnts(vdev, nbuf, txrx_peer); 2453 is_not_amsdu = qdf_nbuf_is_rx_chfrag_start(nbuf) & 2454 qdf_nbuf_is_rx_chfrag_end(nbuf); 2455 DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, rx.rcvd_reo[ring_id], 1, 2456 msdu_len); 2457 DP_PEER_PER_PKT_STATS_INCC(txrx_peer, rx.non_amsdu_cnt, 1, 2458 is_not_amsdu); 2459 DP_PEER_PER_PKT_STATS_INCC(txrx_peer, rx.amsdu_cnt, 1, !is_not_amsdu); 2460 DP_PEER_PER_PKT_STATS_INCC(txrx_peer, rx.rx_retries, 1, 2461 qdf_nbuf_is_rx_retry_flag(nbuf)); 2462 dp_peer_update_rx_pkt_per_lmac(txrx_peer, nbuf); 2463 tid_stats->msdu_cnt++; 2464 if (qdf_unlikely(qdf_nbuf_is_da_mcbc(nbuf) && 2465 (vdev->rx_decap_type == htt_cmn_pkt_type_ethernet))) { 2466 eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf); 2467 enh_flag = vdev->pdev->enhanced_stats_en; 2468 DP_PEER_MC_INCC_PKT(txrx_peer, 1, msdu_len, enh_flag); 2469 tid_stats->mcast_msdu_cnt++; 2470 if (QDF_IS_ADDR_BROADCAST(eh->ether_dhost)) { 2471 DP_PEER_BC_INCC_PKT(txrx_peer, 1, msdu_len, enh_flag); 2472 tid_stats->bcast_msdu_cnt++; 2473 } 2474 } 2475 2476 txrx_peer->stats.per_pkt_stats.rx.last_rx_ts = qdf_system_ticks(); 2477 2478 dp_rx_msdu_extd_stats_update(soc, nbuf, rx_tlv_hdr, txrx_peer); 2479 } 2480 2481 #ifndef WDS_VENDOR_EXTENSION 2482 int dp_wds_rx_policy_check(uint8_t *rx_tlv_hdr, 2483 struct dp_vdev *vdev, 2484 struct dp_txrx_peer *txrx_peer) 2485 { 2486 return 1; 2487 } 2488 #endif 2489 2490 #ifdef RX_DESC_DEBUG_CHECK 2491 /** 2492 * dp_rx_desc_nbuf_sanity_check - Add sanity check to catch REO rx_desc paddr 2493 * corruption 2494 * 2495 * @ring_desc: REO ring descriptor 2496 * @rx_desc: Rx descriptor 2497 * 2498 * Return: NONE 2499 */ 2500 QDF_STATUS dp_rx_desc_nbuf_sanity_check(struct dp_soc *soc, 2501 hal_ring_desc_t ring_desc, 2502 struct dp_rx_desc *rx_desc) 2503 { 2504 struct hal_buf_info hbi; 2505 2506 hal_rx_reo_buf_paddr_get(soc->hal_soc, ring_desc, &hbi); 2507 /* Sanity check for possible buffer paddr corruption */ 2508 if (dp_rx_desc_paddr_sanity_check(rx_desc, (&hbi)->paddr)) 2509 return QDF_STATUS_SUCCESS; 2510 2511 return QDF_STATUS_E_FAILURE; 2512 } 2513 2514 /** 2515 * dp_rx_desc_nbuf_len_sanity_check - Add sanity check to catch Rx buffer 2516 * out of bound access from H.W 2517 * 2518 * @soc: DP soc 2519 * @pkt_len: Packet length received from H.W 2520 * 2521 * Return: NONE 2522 */ 2523 static inline void 2524 dp_rx_desc_nbuf_len_sanity_check(struct dp_soc *soc, 2525 uint32_t pkt_len) 2526 { 2527 struct rx_desc_pool *rx_desc_pool; 2528 2529 rx_desc_pool = &soc->rx_desc_buf[0]; 2530 qdf_assert_always(pkt_len <= rx_desc_pool->buf_size); 2531 } 2532 #else 2533 static inline void 2534 dp_rx_desc_nbuf_len_sanity_check(struct dp_soc *soc, uint32_t pkt_len) { } 2535 #endif 2536 2537 #ifdef DP_RX_PKT_NO_PEER_DELIVER 2538 #ifdef DP_RX_UDP_OVER_PEER_ROAM 2539 /** 2540 * dp_rx_is_udp_allowed_over_roam_peer() - check if udp data received 2541 * during roaming 2542 * @vdev: dp_vdev pointer 2543 * @rx_tlv_hdr: rx tlv header 2544 * @nbuf: pkt skb pointer 2545 * 2546 * This function will check if rx udp data is received from authorised 2547 * roamed peer before peer map indication is received from FW after 2548 * roaming. This is needed for VoIP scenarios in which packet loss 2549 * expected during roaming is minimal. 2550 * 2551 * Return: bool 2552 */ 2553 static bool dp_rx_is_udp_allowed_over_roam_peer(struct dp_vdev *vdev, 2554 uint8_t *rx_tlv_hdr, 2555 qdf_nbuf_t nbuf) 2556 { 2557 char *hdr_desc; 2558 struct ieee80211_frame *wh = NULL; 2559 2560 hdr_desc = hal_rx_desc_get_80211_hdr(vdev->pdev->soc->hal_soc, 2561 rx_tlv_hdr); 2562 wh = (struct ieee80211_frame *)hdr_desc; 2563 2564 if (vdev->roaming_peer_status == 2565 WLAN_ROAM_PEER_AUTH_STATUS_AUTHENTICATED && 2566 !qdf_mem_cmp(vdev->roaming_peer_mac.raw, wh->i_addr2, 2567 QDF_MAC_ADDR_SIZE) && (qdf_nbuf_is_ipv4_udp_pkt(nbuf) || 2568 qdf_nbuf_is_ipv6_udp_pkt(nbuf))) 2569 return true; 2570 2571 return false; 2572 } 2573 #else 2574 static bool dp_rx_is_udp_allowed_over_roam_peer(struct dp_vdev *vdev, 2575 uint8_t *rx_tlv_hdr, 2576 qdf_nbuf_t nbuf) 2577 { 2578 return false; 2579 } 2580 #endif 2581 /** 2582 * dp_rx_deliver_to_stack_no_peer() - try deliver rx data even if 2583 * no corresbonding peer found 2584 * @soc: core txrx main context 2585 * @nbuf: pkt skb pointer 2586 * 2587 * This function will try to deliver some RX special frames to stack 2588 * even there is no peer matched found. for instance, LFR case, some 2589 * eapol data will be sent to host before peer_map done. 2590 * 2591 * Return: None 2592 */ 2593 void dp_rx_deliver_to_stack_no_peer(struct dp_soc *soc, qdf_nbuf_t nbuf) 2594 { 2595 uint16_t peer_id; 2596 uint8_t vdev_id; 2597 struct dp_vdev *vdev = NULL; 2598 uint32_t l2_hdr_offset = 0; 2599 uint16_t msdu_len = 0; 2600 uint32_t pkt_len = 0; 2601 uint8_t *rx_tlv_hdr; 2602 uint32_t frame_mask = FRAME_MASK_IPV4_ARP | FRAME_MASK_IPV4_DHCP | 2603 FRAME_MASK_IPV4_EAPOL | FRAME_MASK_IPV6_DHCP; 2604 2605 peer_id = QDF_NBUF_CB_RX_PEER_ID(nbuf); 2606 if (peer_id > soc->max_peer_id) 2607 goto deliver_fail; 2608 2609 vdev_id = QDF_NBUF_CB_RX_VDEV_ID(nbuf); 2610 vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_RX); 2611 if (!vdev || vdev->delete.pending || !vdev->osif_rx) 2612 goto deliver_fail; 2613 2614 if (qdf_unlikely(qdf_nbuf_is_frag(nbuf))) 2615 goto deliver_fail; 2616 2617 rx_tlv_hdr = qdf_nbuf_data(nbuf); 2618 l2_hdr_offset = 2619 hal_rx_msdu_end_l3_hdr_padding_get(soc->hal_soc, rx_tlv_hdr); 2620 2621 msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf); 2622 pkt_len = msdu_len + l2_hdr_offset + soc->rx_pkt_tlv_size; 2623 QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(nbuf) = 1; 2624 2625 qdf_nbuf_set_pktlen(nbuf, pkt_len); 2626 qdf_nbuf_pull_head(nbuf, soc->rx_pkt_tlv_size + l2_hdr_offset); 2627 2628 if (dp_rx_is_special_frame(nbuf, frame_mask) || 2629 dp_rx_is_udp_allowed_over_roam_peer(vdev, rx_tlv_hdr, nbuf)) { 2630 qdf_nbuf_set_exc_frame(nbuf, 1); 2631 if (QDF_STATUS_SUCCESS != 2632 vdev->osif_rx(vdev->osif_vdev, nbuf)) 2633 goto deliver_fail; 2634 DP_STATS_INC(soc, rx.err.pkt_delivered_no_peer, 1); 2635 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_RX); 2636 return; 2637 } 2638 2639 deliver_fail: 2640 DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1, 2641 QDF_NBUF_CB_RX_PKT_LEN(nbuf)); 2642 dp_rx_nbuf_free(nbuf); 2643 if (vdev) 2644 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_RX); 2645 } 2646 #else 2647 void dp_rx_deliver_to_stack_no_peer(struct dp_soc *soc, qdf_nbuf_t nbuf) 2648 { 2649 DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1, 2650 QDF_NBUF_CB_RX_PKT_LEN(nbuf)); 2651 dp_rx_nbuf_free(nbuf); 2652 } 2653 #endif 2654 2655 /** 2656 * dp_rx_srng_get_num_pending() - get number of pending entries 2657 * @hal_soc: hal soc opaque pointer 2658 * @hal_ring: opaque pointer to the HAL Rx Ring 2659 * @num_entries: number of entries in the hal_ring. 2660 * @near_full: pointer to a boolean. This is set if ring is near full. 2661 * 2662 * The function returns the number of entries in a destination ring which are 2663 * yet to be reaped. The function also checks if the ring is near full. 2664 * If more than half of the ring needs to be reaped, the ring is considered 2665 * approaching full. 2666 * The function useses hal_srng_dst_num_valid_locked to get the number of valid 2667 * entries. It should not be called within a SRNG lock. HW pointer value is 2668 * synced into cached_hp. 2669 * 2670 * Return: Number of pending entries if any 2671 */ 2672 uint32_t dp_rx_srng_get_num_pending(hal_soc_handle_t hal_soc, 2673 hal_ring_handle_t hal_ring_hdl, 2674 uint32_t num_entries, 2675 bool *near_full) 2676 { 2677 uint32_t num_pending = 0; 2678 2679 num_pending = hal_srng_dst_num_valid_locked(hal_soc, 2680 hal_ring_hdl, 2681 true); 2682 2683 if (num_entries && (num_pending >= num_entries >> 1)) 2684 *near_full = true; 2685 else 2686 *near_full = false; 2687 2688 return num_pending; 2689 } 2690 2691 #endif /* QCA_HOST_MODE_WIFI_DISABLED */ 2692 2693 #ifdef WLAN_SUPPORT_RX_FISA 2694 void dp_rx_skip_tlvs(struct dp_soc *soc, qdf_nbuf_t nbuf, uint32_t l3_padding) 2695 { 2696 QDF_NBUF_CB_RX_PACKET_L3_HDR_PAD(nbuf) = l3_padding; 2697 qdf_nbuf_pull_head(nbuf, l3_padding + soc->rx_pkt_tlv_size); 2698 } 2699 #else 2700 void dp_rx_skip_tlvs(struct dp_soc *soc, qdf_nbuf_t nbuf, uint32_t l3_padding) 2701 { 2702 qdf_nbuf_pull_head(nbuf, l3_padding + soc->rx_pkt_tlv_size); 2703 } 2704 #endif 2705 2706 #ifndef QCA_HOST_MODE_WIFI_DISABLED 2707 2708 #ifdef DP_RX_DROP_RAW_FRM 2709 /** 2710 * dp_rx_is_raw_frame_dropped() - if raw frame nbuf, free and drop 2711 * @nbuf: pkt skb pointer 2712 * 2713 * Return: true - raw frame, dropped 2714 * false - not raw frame, do nothing 2715 */ 2716 bool dp_rx_is_raw_frame_dropped(qdf_nbuf_t nbuf) 2717 { 2718 if (qdf_nbuf_is_raw_frame(nbuf)) { 2719 dp_rx_nbuf_free(nbuf); 2720 return true; 2721 } 2722 2723 return false; 2724 } 2725 #endif 2726 2727 #ifdef WLAN_FEATURE_DP_RX_RING_HISTORY 2728 /** 2729 * dp_rx_ring_record_entry() - Record an entry into the rx ring history. 2730 * @soc: Datapath soc structure 2731 * @ring_num: REO ring number 2732 * @ring_desc: REO ring descriptor 2733 * 2734 * Returns: None 2735 */ 2736 void 2737 dp_rx_ring_record_entry(struct dp_soc *soc, uint8_t ring_num, 2738 hal_ring_desc_t ring_desc) 2739 { 2740 struct dp_buf_info_record *record; 2741 struct hal_buf_info hbi; 2742 uint32_t idx; 2743 2744 if (qdf_unlikely(!soc->rx_ring_history[ring_num])) 2745 return; 2746 2747 hal_rx_reo_buf_paddr_get(soc->hal_soc, ring_desc, &hbi); 2748 2749 /* buffer_addr_info is the first element of ring_desc */ 2750 hal_rx_buf_cookie_rbm_get(soc->hal_soc, (uint32_t *)ring_desc, 2751 &hbi); 2752 2753 idx = dp_history_get_next_index(&soc->rx_ring_history[ring_num]->index, 2754 DP_RX_HIST_MAX); 2755 2756 /* No NULL check needed for record since its an array */ 2757 record = &soc->rx_ring_history[ring_num]->entry[idx]; 2758 2759 record->timestamp = qdf_get_log_timestamp(); 2760 record->hbi.paddr = hbi.paddr; 2761 record->hbi.sw_cookie = hbi.sw_cookie; 2762 record->hbi.rbm = hbi.rbm; 2763 } 2764 #endif 2765 2766 #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR 2767 /** 2768 * dp_rx_update_stats() - Update soc level rx packet count 2769 * @soc: DP soc handle 2770 * @nbuf: nbuf received 2771 * 2772 * Returns: none 2773 */ 2774 void dp_rx_update_stats(struct dp_soc *soc, qdf_nbuf_t nbuf) 2775 { 2776 DP_STATS_INC_PKT(soc, rx.ingress, 1, 2777 QDF_NBUF_CB_RX_PKT_LEN(nbuf)); 2778 } 2779 #endif 2780 2781 #ifdef WLAN_FEATURE_PKT_CAPTURE_V2 2782 /** 2783 * dp_rx_deliver_to_pkt_capture() - deliver rx packet to packet capture 2784 * @soc : dp_soc handle 2785 * @pdev: dp_pdev handle 2786 * @peer_id: peer_id of the peer for which completion came 2787 * @ppdu_id: ppdu_id 2788 * @netbuf: Buffer pointer 2789 * 2790 * This function is used to deliver rx packet to packet capture 2791 */ 2792 void dp_rx_deliver_to_pkt_capture(struct dp_soc *soc, struct dp_pdev *pdev, 2793 uint16_t peer_id, uint32_t is_offload, 2794 qdf_nbuf_t netbuf) 2795 { 2796 if (wlan_cfg_get_pkt_capture_mode(soc->wlan_cfg_ctx)) 2797 dp_wdi_event_handler(WDI_EVENT_PKT_CAPTURE_RX_DATA, soc, netbuf, 2798 peer_id, is_offload, pdev->pdev_id); 2799 } 2800 2801 void dp_rx_deliver_to_pkt_capture_no_peer(struct dp_soc *soc, qdf_nbuf_t nbuf, 2802 uint32_t is_offload) 2803 { 2804 if (wlan_cfg_get_pkt_capture_mode(soc->wlan_cfg_ctx)) 2805 dp_wdi_event_handler(WDI_EVENT_PKT_CAPTURE_RX_DATA_NO_PEER, 2806 soc, nbuf, HTT_INVALID_VDEV, 2807 is_offload, 0); 2808 } 2809 #endif 2810 2811 #endif /* QCA_HOST_MODE_WIFI_DISABLED */ 2812 2813 QDF_STATUS dp_rx_vdev_detach(struct dp_vdev *vdev) 2814 { 2815 QDF_STATUS ret; 2816 2817 if (vdev->osif_rx_flush) { 2818 ret = vdev->osif_rx_flush(vdev->osif_vdev, vdev->vdev_id); 2819 if (!QDF_IS_STATUS_SUCCESS(ret)) { 2820 dp_err("Failed to flush rx pkts for vdev %d\n", 2821 vdev->vdev_id); 2822 return ret; 2823 } 2824 } 2825 2826 return QDF_STATUS_SUCCESS; 2827 } 2828 2829 static QDF_STATUS 2830 dp_pdev_nbuf_alloc_and_map(struct dp_soc *dp_soc, 2831 struct dp_rx_nbuf_frag_info *nbuf_frag_info_t, 2832 struct dp_pdev *dp_pdev, 2833 struct rx_desc_pool *rx_desc_pool) 2834 { 2835 QDF_STATUS ret = QDF_STATUS_E_FAILURE; 2836 2837 (nbuf_frag_info_t->virt_addr).nbuf = 2838 qdf_nbuf_alloc(dp_soc->osdev, rx_desc_pool->buf_size, 2839 RX_BUFFER_RESERVATION, 2840 rx_desc_pool->buf_alignment, FALSE); 2841 if (!((nbuf_frag_info_t->virt_addr).nbuf)) { 2842 dp_err("nbuf alloc failed"); 2843 DP_STATS_INC(dp_pdev, replenish.nbuf_alloc_fail, 1); 2844 return ret; 2845 } 2846 2847 ret = qdf_nbuf_map_nbytes_single(dp_soc->osdev, 2848 (nbuf_frag_info_t->virt_addr).nbuf, 2849 QDF_DMA_FROM_DEVICE, 2850 rx_desc_pool->buf_size); 2851 2852 if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) { 2853 qdf_nbuf_free((nbuf_frag_info_t->virt_addr).nbuf); 2854 dp_err("nbuf map failed"); 2855 DP_STATS_INC(dp_pdev, replenish.map_err, 1); 2856 return ret; 2857 } 2858 2859 nbuf_frag_info_t->paddr = 2860 qdf_nbuf_get_frag_paddr((nbuf_frag_info_t->virt_addr).nbuf, 0); 2861 2862 ret = dp_check_paddr(dp_soc, &((nbuf_frag_info_t->virt_addr).nbuf), 2863 &nbuf_frag_info_t->paddr, 2864 rx_desc_pool); 2865 if (ret == QDF_STATUS_E_FAILURE) { 2866 dp_err("nbuf check x86 failed"); 2867 DP_STATS_INC(dp_pdev, replenish.x86_fail, 1); 2868 return ret; 2869 } 2870 2871 return QDF_STATUS_SUCCESS; 2872 } 2873 2874 QDF_STATUS 2875 dp_pdev_rx_buffers_attach(struct dp_soc *dp_soc, uint32_t mac_id, 2876 struct dp_srng *dp_rxdma_srng, 2877 struct rx_desc_pool *rx_desc_pool, 2878 uint32_t num_req_buffers) 2879 { 2880 struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(dp_soc, mac_id); 2881 hal_ring_handle_t rxdma_srng = dp_rxdma_srng->hal_srng; 2882 union dp_rx_desc_list_elem_t *next; 2883 void *rxdma_ring_entry; 2884 qdf_dma_addr_t paddr; 2885 struct dp_rx_nbuf_frag_info *nf_info; 2886 uint32_t nr_descs, nr_nbuf = 0, nr_nbuf_total = 0; 2887 uint32_t buffer_index, nbuf_ptrs_per_page; 2888 qdf_nbuf_t nbuf; 2889 QDF_STATUS ret; 2890 int page_idx, total_pages; 2891 union dp_rx_desc_list_elem_t *desc_list = NULL; 2892 union dp_rx_desc_list_elem_t *tail = NULL; 2893 int sync_hw_ptr = 1; 2894 uint32_t num_entries_avail; 2895 2896 if (qdf_unlikely(!dp_pdev)) { 2897 dp_rx_err("%pK: pdev is null for mac_id = %d", 2898 dp_soc, mac_id); 2899 return QDF_STATUS_E_FAILURE; 2900 } 2901 2902 if (qdf_unlikely(!rxdma_srng)) { 2903 DP_STATS_INC(dp_pdev, replenish.rxdma_err, num_req_buffers); 2904 return QDF_STATUS_E_FAILURE; 2905 } 2906 2907 dp_debug("requested %u RX buffers for driver attach", num_req_buffers); 2908 2909 hal_srng_access_start(dp_soc->hal_soc, rxdma_srng); 2910 num_entries_avail = hal_srng_src_num_avail(dp_soc->hal_soc, 2911 rxdma_srng, 2912 sync_hw_ptr); 2913 hal_srng_access_end(dp_soc->hal_soc, rxdma_srng); 2914 2915 if (!num_entries_avail) { 2916 dp_err("Num of available entries is zero, nothing to do"); 2917 return QDF_STATUS_E_NOMEM; 2918 } 2919 2920 if (num_entries_avail < num_req_buffers) 2921 num_req_buffers = num_entries_avail; 2922 2923 nr_descs = dp_rx_get_free_desc_list(dp_soc, mac_id, rx_desc_pool, 2924 num_req_buffers, &desc_list, &tail); 2925 if (!nr_descs) { 2926 dp_err("no free rx_descs in freelist"); 2927 DP_STATS_INC(dp_pdev, err.desc_alloc_fail, num_req_buffers); 2928 return QDF_STATUS_E_NOMEM; 2929 } 2930 2931 dp_debug("got %u RX descs for driver attach", nr_descs); 2932 2933 /* 2934 * Try to allocate pointers to the nbuf one page at a time. 2935 * Take pointers that can fit in one page of memory and 2936 * iterate through the total descriptors that need to be 2937 * allocated in order of pages. Reuse the pointers that 2938 * have been allocated to fit in one page across each 2939 * iteration to index into the nbuf. 2940 */ 2941 total_pages = (nr_descs * sizeof(*nf_info)) / DP_BLOCKMEM_SIZE; 2942 2943 /* 2944 * Add an extra page to store the remainder if any 2945 */ 2946 if ((nr_descs * sizeof(*nf_info)) % DP_BLOCKMEM_SIZE) 2947 total_pages++; 2948 nf_info = qdf_mem_malloc(DP_BLOCKMEM_SIZE); 2949 if (!nf_info) { 2950 dp_err("failed to allocate nbuf array"); 2951 DP_STATS_INC(dp_pdev, replenish.rxdma_err, num_req_buffers); 2952 QDF_BUG(0); 2953 return QDF_STATUS_E_NOMEM; 2954 } 2955 nbuf_ptrs_per_page = DP_BLOCKMEM_SIZE / sizeof(*nf_info); 2956 2957 for (page_idx = 0; page_idx < total_pages; page_idx++) { 2958 qdf_mem_zero(nf_info, DP_BLOCKMEM_SIZE); 2959 2960 for (nr_nbuf = 0; nr_nbuf < nbuf_ptrs_per_page; nr_nbuf++) { 2961 /* 2962 * The last page of buffer pointers may not be required 2963 * completely based on the number of descriptors. Below 2964 * check will ensure we are allocating only the 2965 * required number of descriptors. 2966 */ 2967 if (nr_nbuf_total >= nr_descs) 2968 break; 2969 /* Flag is set while pdev rx_desc_pool initialization */ 2970 if (qdf_unlikely(rx_desc_pool->rx_mon_dest_frag_enable)) 2971 ret = dp_pdev_frag_alloc_and_map(dp_soc, 2972 &nf_info[nr_nbuf], dp_pdev, 2973 rx_desc_pool); 2974 else 2975 ret = dp_pdev_nbuf_alloc_and_map(dp_soc, 2976 &nf_info[nr_nbuf], dp_pdev, 2977 rx_desc_pool); 2978 if (QDF_IS_STATUS_ERROR(ret)) 2979 break; 2980 2981 nr_nbuf_total++; 2982 } 2983 2984 hal_srng_access_start(dp_soc->hal_soc, rxdma_srng); 2985 2986 for (buffer_index = 0; buffer_index < nr_nbuf; buffer_index++) { 2987 rxdma_ring_entry = 2988 hal_srng_src_get_next(dp_soc->hal_soc, 2989 rxdma_srng); 2990 qdf_assert_always(rxdma_ring_entry); 2991 2992 next = desc_list->next; 2993 paddr = nf_info[buffer_index].paddr; 2994 nbuf = nf_info[buffer_index].virt_addr.nbuf; 2995 2996 /* Flag is set while pdev rx_desc_pool initialization */ 2997 if (qdf_unlikely(rx_desc_pool->rx_mon_dest_frag_enable)) 2998 dp_rx_desc_frag_prep(&desc_list->rx_desc, 2999 &nf_info[buffer_index]); 3000 else 3001 dp_rx_desc_prep(&desc_list->rx_desc, 3002 &nf_info[buffer_index]); 3003 desc_list->rx_desc.in_use = 1; 3004 dp_rx_desc_alloc_dbg_info(&desc_list->rx_desc); 3005 dp_rx_desc_update_dbg_info(&desc_list->rx_desc, 3006 __func__, 3007 RX_DESC_REPLENISHED); 3008 3009 hal_rxdma_buff_addr_info_set(dp_soc->hal_soc ,rxdma_ring_entry, paddr, 3010 desc_list->rx_desc.cookie, 3011 rx_desc_pool->owner); 3012 dp_ipa_handle_rx_buf_smmu_mapping( 3013 dp_soc, nbuf, 3014 rx_desc_pool->buf_size, 3015 true); 3016 3017 desc_list = next; 3018 } 3019 3020 dp_rx_refill_ring_record_entry(dp_soc, dp_pdev->lmac_id, 3021 rxdma_srng, nr_nbuf, nr_nbuf); 3022 hal_srng_access_end(dp_soc->hal_soc, rxdma_srng); 3023 } 3024 3025 dp_info("filled %u RX buffers for driver attach", nr_nbuf_total); 3026 qdf_mem_free(nf_info); 3027 3028 if (!nr_nbuf_total) { 3029 dp_err("No nbuf's allocated"); 3030 QDF_BUG(0); 3031 return QDF_STATUS_E_RESOURCES; 3032 } 3033 3034 /* No need to count the number of bytes received during replenish. 3035 * Therefore set replenish.pkts.bytes as 0. 3036 */ 3037 DP_STATS_INC_PKT(dp_pdev, replenish.pkts, nr_nbuf, 0); 3038 3039 return QDF_STATUS_SUCCESS; 3040 } 3041 3042 qdf_export_symbol(dp_pdev_rx_buffers_attach); 3043 3044 /** 3045 * dp_rx_enable_mon_dest_frag() - Enable frag processing for 3046 * monitor destination ring via frag. 3047 * 3048 * Enable this flag only for monitor destination buffer processing 3049 * if DP_RX_MON_MEM_FRAG feature is enabled. 3050 * If flag is set then frag based function will be called for alloc, 3051 * map, prep desc and free ops for desc buffer else normal nbuf based 3052 * function will be called. 3053 * 3054 * @rx_desc_pool: Rx desc pool 3055 * @is_mon_dest_desc: Is it for monitor dest buffer 3056 * 3057 * Return: None 3058 */ 3059 #ifdef DP_RX_MON_MEM_FRAG 3060 void dp_rx_enable_mon_dest_frag(struct rx_desc_pool *rx_desc_pool, 3061 bool is_mon_dest_desc) 3062 { 3063 rx_desc_pool->rx_mon_dest_frag_enable = is_mon_dest_desc; 3064 if (is_mon_dest_desc) 3065 dp_alert("Feature DP_RX_MON_MEM_FRAG for mon_dest is enabled"); 3066 } 3067 #else 3068 void dp_rx_enable_mon_dest_frag(struct rx_desc_pool *rx_desc_pool, 3069 bool is_mon_dest_desc) 3070 { 3071 rx_desc_pool->rx_mon_dest_frag_enable = false; 3072 if (is_mon_dest_desc) 3073 dp_alert("Feature DP_RX_MON_MEM_FRAG for mon_dest is disabled"); 3074 } 3075 #endif 3076 3077 qdf_export_symbol(dp_rx_enable_mon_dest_frag); 3078 3079 /* 3080 * dp_rx_pdev_desc_pool_alloc() - allocate memory for software rx descriptor 3081 * pool 3082 * 3083 * @pdev: core txrx pdev context 3084 * 3085 * Return: QDF_STATUS - QDF_STATUS_SUCCESS 3086 * QDF_STATUS_E_NOMEM 3087 */ 3088 QDF_STATUS 3089 dp_rx_pdev_desc_pool_alloc(struct dp_pdev *pdev) 3090 { 3091 struct dp_soc *soc = pdev->soc; 3092 uint32_t rxdma_entries; 3093 uint32_t rx_sw_desc_num; 3094 struct dp_srng *dp_rxdma_srng; 3095 struct rx_desc_pool *rx_desc_pool; 3096 uint32_t status = QDF_STATUS_SUCCESS; 3097 int mac_for_pdev; 3098 3099 mac_for_pdev = pdev->lmac_id; 3100 if (wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx)) { 3101 dp_rx_info("%pK: nss-wifi<4> skip Rx refil %d", 3102 soc, mac_for_pdev); 3103 return status; 3104 } 3105 3106 dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_for_pdev]; 3107 rxdma_entries = dp_rxdma_srng->num_entries; 3108 3109 rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev]; 3110 rx_sw_desc_num = wlan_cfg_get_dp_soc_rx_sw_desc_num(soc->wlan_cfg_ctx); 3111 3112 rx_desc_pool->desc_type = DP_RX_DESC_BUF_TYPE; 3113 status = dp_rx_desc_pool_alloc(soc, 3114 rx_sw_desc_num, 3115 rx_desc_pool); 3116 if (status != QDF_STATUS_SUCCESS) 3117 return status; 3118 3119 return status; 3120 } 3121 3122 /* 3123 * dp_rx_pdev_desc_pool_free() - free software rx descriptor pool 3124 * 3125 * @pdev: core txrx pdev context 3126 */ 3127 void dp_rx_pdev_desc_pool_free(struct dp_pdev *pdev) 3128 { 3129 int mac_for_pdev = pdev->lmac_id; 3130 struct dp_soc *soc = pdev->soc; 3131 struct rx_desc_pool *rx_desc_pool; 3132 3133 rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev]; 3134 3135 dp_rx_desc_pool_free(soc, rx_desc_pool); 3136 } 3137 3138 /* 3139 * dp_rx_pdev_desc_pool_init() - initialize software rx descriptors 3140 * 3141 * @pdev: core txrx pdev context 3142 * 3143 * Return: QDF_STATUS - QDF_STATUS_SUCCESS 3144 * QDF_STATUS_E_NOMEM 3145 */ 3146 QDF_STATUS dp_rx_pdev_desc_pool_init(struct dp_pdev *pdev) 3147 { 3148 int mac_for_pdev = pdev->lmac_id; 3149 struct dp_soc *soc = pdev->soc; 3150 uint32_t rxdma_entries; 3151 uint32_t rx_sw_desc_num; 3152 struct dp_srng *dp_rxdma_srng; 3153 struct rx_desc_pool *rx_desc_pool; 3154 3155 rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev]; 3156 if (wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx)) { 3157 /** 3158 * If NSS is enabled, rx_desc_pool is already filled. 3159 * Hence, just disable desc_pool frag flag. 3160 */ 3161 dp_rx_enable_mon_dest_frag(rx_desc_pool, false); 3162 3163 dp_rx_info("%pK: nss-wifi<4> skip Rx refil %d", 3164 soc, mac_for_pdev); 3165 return QDF_STATUS_SUCCESS; 3166 } 3167 3168 if (dp_rx_desc_pool_is_allocated(rx_desc_pool) == QDF_STATUS_E_NOMEM) 3169 return QDF_STATUS_E_NOMEM; 3170 3171 dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_for_pdev]; 3172 rxdma_entries = dp_rxdma_srng->num_entries; 3173 3174 soc->process_rx_status = CONFIG_PROCESS_RX_STATUS; 3175 3176 rx_sw_desc_num = 3177 wlan_cfg_get_dp_soc_rx_sw_desc_num(soc->wlan_cfg_ctx); 3178 3179 rx_desc_pool->owner = dp_rx_get_rx_bm_id(soc); 3180 rx_desc_pool->buf_size = RX_DATA_BUFFER_SIZE; 3181 rx_desc_pool->buf_alignment = RX_DATA_BUFFER_ALIGNMENT; 3182 /* Disable monitor dest processing via frag */ 3183 dp_rx_enable_mon_dest_frag(rx_desc_pool, false); 3184 3185 dp_rx_desc_pool_init(soc, mac_for_pdev, 3186 rx_sw_desc_num, rx_desc_pool); 3187 return QDF_STATUS_SUCCESS; 3188 } 3189 3190 /* 3191 * dp_rx_pdev_desc_pool_deinit() - de-initialize software rx descriptor pools 3192 * @pdev: core txrx pdev context 3193 * 3194 * This function resets the freelist of rx descriptors and destroys locks 3195 * associated with this list of descriptors. 3196 */ 3197 void dp_rx_pdev_desc_pool_deinit(struct dp_pdev *pdev) 3198 { 3199 int mac_for_pdev = pdev->lmac_id; 3200 struct dp_soc *soc = pdev->soc; 3201 struct rx_desc_pool *rx_desc_pool; 3202 3203 rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev]; 3204 3205 dp_rx_desc_pool_deinit(soc, rx_desc_pool, mac_for_pdev); 3206 } 3207 3208 /* 3209 * dp_rx_pdev_buffers_alloc() - Allocate nbufs (skbs) and replenish RxDMA ring 3210 * 3211 * @pdev: core txrx pdev context 3212 * 3213 * Return: QDF_STATUS - QDF_STATUS_SUCCESS 3214 * QDF_STATUS_E_NOMEM 3215 */ 3216 QDF_STATUS 3217 dp_rx_pdev_buffers_alloc(struct dp_pdev *pdev) 3218 { 3219 int mac_for_pdev = pdev->lmac_id; 3220 struct dp_soc *soc = pdev->soc; 3221 struct dp_srng *dp_rxdma_srng; 3222 struct rx_desc_pool *rx_desc_pool; 3223 uint32_t rxdma_entries; 3224 3225 dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_for_pdev]; 3226 rxdma_entries = dp_rxdma_srng->num_entries; 3227 3228 rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev]; 3229 3230 /* Initialize RX buffer pool which will be 3231 * used during low memory conditions 3232 */ 3233 dp_rx_buffer_pool_init(soc, mac_for_pdev); 3234 3235 return dp_pdev_rx_buffers_attach_simple(soc, mac_for_pdev, 3236 dp_rxdma_srng, 3237 rx_desc_pool, 3238 rxdma_entries - 1); 3239 } 3240 3241 /* 3242 * dp_rx_pdev_buffers_free - Free nbufs (skbs) 3243 * 3244 * @pdev: core txrx pdev context 3245 */ 3246 void 3247 dp_rx_pdev_buffers_free(struct dp_pdev *pdev) 3248 { 3249 int mac_for_pdev = pdev->lmac_id; 3250 struct dp_soc *soc = pdev->soc; 3251 struct rx_desc_pool *rx_desc_pool; 3252 3253 rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev]; 3254 3255 dp_rx_desc_nbuf_free(soc, rx_desc_pool); 3256 dp_rx_buffer_pool_deinit(soc, mac_for_pdev); 3257 } 3258 3259 #ifdef DP_RX_SPECIAL_FRAME_NEED 3260 bool dp_rx_deliver_special_frame(struct dp_soc *soc, 3261 struct dp_txrx_peer *txrx_peer, 3262 qdf_nbuf_t nbuf, uint32_t frame_mask, 3263 uint8_t *rx_tlv_hdr) 3264 { 3265 uint32_t l2_hdr_offset = 0; 3266 uint16_t msdu_len = 0; 3267 uint32_t skip_len; 3268 3269 l2_hdr_offset = 3270 hal_rx_msdu_end_l3_hdr_padding_get(soc->hal_soc, rx_tlv_hdr); 3271 3272 if (qdf_unlikely(qdf_nbuf_is_frag(nbuf))) { 3273 skip_len = l2_hdr_offset; 3274 } else { 3275 msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf); 3276 skip_len = l2_hdr_offset + soc->rx_pkt_tlv_size; 3277 qdf_nbuf_set_pktlen(nbuf, msdu_len + skip_len); 3278 } 3279 3280 QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(nbuf) = 1; 3281 dp_rx_set_hdr_pad(nbuf, l2_hdr_offset); 3282 qdf_nbuf_pull_head(nbuf, skip_len); 3283 3284 if (txrx_peer->vdev) { 3285 dp_rx_send_pktlog(soc, txrx_peer->vdev->pdev, nbuf, 3286 QDF_TX_RX_STATUS_OK); 3287 } 3288 3289 if (dp_rx_is_special_frame(nbuf, frame_mask)) { 3290 dp_info("special frame, mpdu sn 0x%x", 3291 hal_rx_get_rx_sequence(soc->hal_soc, rx_tlv_hdr)); 3292 qdf_nbuf_set_exc_frame(nbuf, 1); 3293 dp_rx_deliver_to_stack(soc, txrx_peer->vdev, txrx_peer, 3294 nbuf, NULL); 3295 return true; 3296 } 3297 3298 return false; 3299 } 3300 #endif 3301 3302 #ifdef WLAN_FEATURE_MARK_FIRST_WAKEUP_PACKET 3303 void dp_rx_mark_first_packet_after_wow_wakeup(struct dp_pdev *pdev, 3304 uint8_t *rx_tlv, 3305 qdf_nbuf_t nbuf) 3306 { 3307 struct dp_soc *soc; 3308 3309 if (!pdev->is_first_wakeup_packet) 3310 return; 3311 3312 soc = pdev->soc; 3313 if (hal_get_first_wow_wakeup_packet(soc->hal_soc, rx_tlv)) { 3314 qdf_nbuf_mark_wakeup_frame(nbuf); 3315 dp_info("First packet after WOW Wakeup rcvd"); 3316 } 3317 } 3318 #endif 3319