1 /* 2 * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved. 3 * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. 4 * 5 * Permission to use, copy, modify, and/or distribute this software for 6 * any purpose with or without fee is hereby granted, provided that the 7 * above copyright notice and this permission notice appear in all 8 * copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 11 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 12 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 13 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 14 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 15 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 16 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 17 * PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20 #include "hal_hw_headers.h" 21 #include "dp_types.h" 22 #include "dp_rx.h" 23 #include "dp_tx.h" 24 #include "dp_peer.h" 25 #include "hal_rx.h" 26 #include "hal_api.h" 27 #include "qdf_nbuf.h" 28 #ifdef MESH_MODE_SUPPORT 29 #include "if_meta_hdr.h" 30 #endif 31 #include "dp_internal.h" 32 #include "dp_ipa.h" 33 #include "dp_hist.h" 34 #include "dp_rx_buffer_pool.h" 35 #ifdef WIFI_MONITOR_SUPPORT 36 #include "dp_htt.h" 37 #include <dp_mon.h> 38 #endif 39 #ifdef FEATURE_WDS 40 #include "dp_txrx_wds.h" 41 #endif 42 #ifdef DP_RATETABLE_SUPPORT 43 #include "dp_ratetable.h" 44 #endif 45 46 #ifdef DUP_RX_DESC_WAR 47 void dp_rx_dump_info_and_assert(struct dp_soc *soc, 48 hal_ring_handle_t hal_ring, 49 hal_ring_desc_t ring_desc, 50 struct dp_rx_desc *rx_desc) 51 { 52 void *hal_soc = soc->hal_soc; 53 54 hal_srng_dump_ring_desc(hal_soc, hal_ring, ring_desc); 55 dp_rx_desc_dump(rx_desc); 56 } 57 #else 58 void dp_rx_dump_info_and_assert(struct dp_soc *soc, 59 hal_ring_handle_t hal_ring_hdl, 60 hal_ring_desc_t ring_desc, 61 struct dp_rx_desc *rx_desc) 62 { 63 hal_soc_handle_t hal_soc = soc->hal_soc; 64 65 dp_rx_desc_dump(rx_desc); 66 hal_srng_dump_ring_desc(hal_soc, hal_ring_hdl, ring_desc); 67 hal_srng_dump_ring(hal_soc, hal_ring_hdl); 68 qdf_assert_always(0); 69 } 70 #endif 71 72 #ifndef QCA_HOST_MODE_WIFI_DISABLED 73 #ifdef RX_DESC_SANITY_WAR 74 QDF_STATUS dp_rx_desc_sanity(struct dp_soc *soc, hal_soc_handle_t hal_soc, 75 hal_ring_handle_t hal_ring_hdl, 76 hal_ring_desc_t ring_desc, 77 struct dp_rx_desc *rx_desc) 78 { 79 uint8_t return_buffer_manager; 80 81 if (qdf_unlikely(!rx_desc)) { 82 /* 83 * This is an unlikely case where the cookie obtained 84 * from the ring_desc is invalid and hence we are not 85 * able to find the corresponding rx_desc 86 */ 87 goto fail; 88 } 89 90 return_buffer_manager = hal_rx_ret_buf_manager_get(hal_soc, ring_desc); 91 if (qdf_unlikely(!(return_buffer_manager == 92 HAL_RX_BUF_RBM_SW1_BM(soc->wbm_sw0_bm_id) || 93 return_buffer_manager == 94 HAL_RX_BUF_RBM_SW3_BM(soc->wbm_sw0_bm_id)))) { 95 goto fail; 96 } 97 98 return QDF_STATUS_SUCCESS; 99 100 fail: 101 DP_STATS_INC(soc, rx.err.invalid_cookie, 1); 102 dp_err("Ring Desc:"); 103 hal_srng_dump_ring_desc(hal_soc, hal_ring_hdl, 104 ring_desc); 105 return QDF_STATUS_E_NULL_VALUE; 106 107 } 108 #endif 109 #endif /* QCA_HOST_MODE_WIFI_DISABLED */ 110 111 /** 112 * dp_pdev_frag_alloc_and_map() - Allocate frag for desc buffer and map 113 * 114 * @dp_soc: struct dp_soc * 115 * @nbuf_frag_info_t: nbuf frag info 116 * @dp_pdev: struct dp_pdev * 117 * @rx_desc_pool: Rx desc pool 118 * 119 * Return: QDF_STATUS 120 */ 121 #ifdef DP_RX_MON_MEM_FRAG 122 static inline QDF_STATUS 123 dp_pdev_frag_alloc_and_map(struct dp_soc *dp_soc, 124 struct dp_rx_nbuf_frag_info *nbuf_frag_info_t, 125 struct dp_pdev *dp_pdev, 126 struct rx_desc_pool *rx_desc_pool) 127 { 128 QDF_STATUS ret = QDF_STATUS_E_FAILURE; 129 130 (nbuf_frag_info_t->virt_addr).vaddr = 131 qdf_frag_alloc(NULL, rx_desc_pool->buf_size); 132 133 if (!((nbuf_frag_info_t->virt_addr).vaddr)) { 134 dp_err("Frag alloc failed"); 135 DP_STATS_INC(dp_pdev, replenish.frag_alloc_fail, 1); 136 return QDF_STATUS_E_NOMEM; 137 } 138 139 ret = qdf_mem_map_page(dp_soc->osdev, 140 (nbuf_frag_info_t->virt_addr).vaddr, 141 QDF_DMA_FROM_DEVICE, 142 rx_desc_pool->buf_size, 143 &nbuf_frag_info_t->paddr); 144 145 if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) { 146 qdf_frag_free((nbuf_frag_info_t->virt_addr).vaddr); 147 dp_err("Frag map failed"); 148 DP_STATS_INC(dp_pdev, replenish.map_err, 1); 149 return QDF_STATUS_E_FAULT; 150 } 151 152 return QDF_STATUS_SUCCESS; 153 } 154 #else 155 static inline QDF_STATUS 156 dp_pdev_frag_alloc_and_map(struct dp_soc *dp_soc, 157 struct dp_rx_nbuf_frag_info *nbuf_frag_info_t, 158 struct dp_pdev *dp_pdev, 159 struct rx_desc_pool *rx_desc_pool) 160 { 161 return QDF_STATUS_SUCCESS; 162 } 163 #endif /* DP_RX_MON_MEM_FRAG */ 164 165 #ifdef WLAN_FEATURE_DP_RX_RING_HISTORY 166 /** 167 * dp_rx_refill_ring_record_entry() - Record an entry into refill_ring history 168 * @soc: Datapath soc structure 169 * @ring_num: Refill ring number 170 * @num_req: number of buffers requested for refill 171 * @num_refill: number of buffers refilled 172 * 173 * Returns: None 174 */ 175 static inline void 176 dp_rx_refill_ring_record_entry(struct dp_soc *soc, uint8_t ring_num, 177 hal_ring_handle_t hal_ring_hdl, 178 uint32_t num_req, uint32_t num_refill) 179 { 180 struct dp_refill_info_record *record; 181 uint32_t idx; 182 uint32_t tp; 183 uint32_t hp; 184 185 if (qdf_unlikely(ring_num >= MAX_PDEV_CNT || 186 !soc->rx_refill_ring_history[ring_num])) 187 return; 188 189 idx = dp_history_get_next_index(&soc->rx_refill_ring_history[ring_num]->index, 190 DP_RX_REFILL_HIST_MAX); 191 192 /* No NULL check needed for record since its an array */ 193 record = &soc->rx_refill_ring_history[ring_num]->entry[idx]; 194 195 hal_get_sw_hptp(soc->hal_soc, hal_ring_hdl, &tp, &hp); 196 record->timestamp = qdf_get_log_timestamp(); 197 record->num_req = num_req; 198 record->num_refill = num_refill; 199 record->hp = hp; 200 record->tp = tp; 201 } 202 #else 203 static inline void 204 dp_rx_refill_ring_record_entry(struct dp_soc *soc, uint8_t ring_num, 205 hal_ring_handle_t hal_ring_hdl, 206 uint32_t num_req, uint32_t num_refill) 207 { 208 } 209 #endif 210 211 /** 212 * dp_pdev_nbuf_alloc_and_map() - Allocate nbuf for desc buffer and map 213 * 214 * @dp_soc: struct dp_soc * 215 * @mac_id: Mac id 216 * @num_entries_avail: num_entries_avail 217 * @nbuf_frag_info_t: nbuf frag info 218 * @dp_pdev: struct dp_pdev * 219 * @rx_desc_pool: Rx desc pool 220 * 221 * Return: QDF_STATUS 222 */ 223 static inline QDF_STATUS 224 dp_pdev_nbuf_alloc_and_map_replenish(struct dp_soc *dp_soc, 225 uint32_t mac_id, 226 uint32_t num_entries_avail, 227 struct dp_rx_nbuf_frag_info *nbuf_frag_info_t, 228 struct dp_pdev *dp_pdev, 229 struct rx_desc_pool *rx_desc_pool) 230 { 231 QDF_STATUS ret = QDF_STATUS_E_FAILURE; 232 233 (nbuf_frag_info_t->virt_addr).nbuf = 234 dp_rx_buffer_pool_nbuf_alloc(dp_soc, 235 mac_id, 236 rx_desc_pool, 237 num_entries_avail); 238 if (!((nbuf_frag_info_t->virt_addr).nbuf)) { 239 dp_err("nbuf alloc failed"); 240 DP_STATS_INC(dp_pdev, replenish.nbuf_alloc_fail, 1); 241 return QDF_STATUS_E_NOMEM; 242 } 243 244 ret = dp_rx_buffer_pool_nbuf_map(dp_soc, rx_desc_pool, 245 nbuf_frag_info_t); 246 if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) { 247 dp_rx_buffer_pool_nbuf_free(dp_soc, 248 (nbuf_frag_info_t->virt_addr).nbuf, mac_id); 249 dp_err("nbuf map failed"); 250 DP_STATS_INC(dp_pdev, replenish.map_err, 1); 251 return QDF_STATUS_E_FAULT; 252 } 253 254 nbuf_frag_info_t->paddr = 255 qdf_nbuf_get_frag_paddr((nbuf_frag_info_t->virt_addr).nbuf, 0); 256 dp_ipa_handle_rx_buf_smmu_mapping(dp_soc, (qdf_nbuf_t)( 257 (nbuf_frag_info_t->virt_addr).nbuf), 258 rx_desc_pool->buf_size, 259 true, __func__, __LINE__); 260 261 ret = dp_check_paddr(dp_soc, &((nbuf_frag_info_t->virt_addr).nbuf), 262 &nbuf_frag_info_t->paddr, 263 rx_desc_pool); 264 if (ret == QDF_STATUS_E_FAILURE) { 265 DP_STATS_INC(dp_pdev, replenish.x86_fail, 1); 266 return QDF_STATUS_E_ADDRNOTAVAIL; 267 } 268 269 return QDF_STATUS_SUCCESS; 270 } 271 272 #if defined(QCA_DP_RX_NBUF_NO_MAP_UNMAP) && !defined(BUILD_X86) 273 QDF_STATUS 274 __dp_rx_buffers_no_map_lt_replenish(struct dp_soc *soc, uint32_t mac_id, 275 struct dp_srng *dp_rxdma_srng, 276 struct rx_desc_pool *rx_desc_pool) 277 { 278 struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id); 279 uint32_t count; 280 void *rxdma_ring_entry; 281 union dp_rx_desc_list_elem_t *next = NULL; 282 void *rxdma_srng; 283 qdf_nbuf_t nbuf; 284 qdf_dma_addr_t paddr; 285 uint16_t num_entries_avail = 0; 286 uint16_t num_alloc_desc = 0; 287 union dp_rx_desc_list_elem_t *desc_list = NULL; 288 union dp_rx_desc_list_elem_t *tail = NULL; 289 int sync_hw_ptr = 0; 290 291 rxdma_srng = dp_rxdma_srng->hal_srng; 292 293 if (qdf_unlikely(!dp_pdev)) { 294 dp_rx_err("%pK: pdev is null for mac_id = %d", soc, mac_id); 295 return QDF_STATUS_E_FAILURE; 296 } 297 298 if (qdf_unlikely(!rxdma_srng)) { 299 dp_rx_debug("%pK: rxdma srng not initialized", soc); 300 return QDF_STATUS_E_FAILURE; 301 } 302 303 hal_srng_access_start(soc->hal_soc, rxdma_srng); 304 305 num_entries_avail = hal_srng_src_num_avail(soc->hal_soc, 306 rxdma_srng, 307 sync_hw_ptr); 308 309 dp_rx_debug("%pK: no of available entries in rxdma ring: %d", 310 soc, num_entries_avail); 311 312 if (qdf_unlikely(num_entries_avail < 313 ((dp_rxdma_srng->num_entries * 3) / 4))) { 314 hal_srng_access_end(soc->hal_soc, rxdma_srng); 315 return QDF_STATUS_E_FAILURE; 316 } 317 318 DP_STATS_INC(dp_pdev, replenish.low_thresh_intrs, 1); 319 num_alloc_desc = dp_rx_get_free_desc_list(soc, mac_id, 320 rx_desc_pool, 321 num_entries_avail, 322 &desc_list, 323 &tail); 324 325 if (!num_alloc_desc) { 326 dp_rx_err("%pK: no free rx_descs in freelist", soc); 327 DP_STATS_INC(dp_pdev, err.desc_lt_alloc_fail, 328 num_entries_avail); 329 hal_srng_access_end(soc->hal_soc, rxdma_srng); 330 return QDF_STATUS_E_NOMEM; 331 } 332 333 for (count = 0; count < num_alloc_desc; count++) { 334 next = desc_list->next; 335 qdf_prefetch(next); 336 nbuf = dp_rx_nbuf_alloc(soc, rx_desc_pool); 337 if (qdf_unlikely(!nbuf)) { 338 DP_STATS_INC(dp_pdev, replenish.nbuf_alloc_fail, 1); 339 break; 340 } 341 342 paddr = dp_rx_nbuf_sync_no_dsb(soc, nbuf, 343 rx_desc_pool->buf_size); 344 345 rxdma_ring_entry = hal_srng_src_get_next(soc->hal_soc, 346 rxdma_srng); 347 qdf_assert_always(rxdma_ring_entry); 348 349 desc_list->rx_desc.nbuf = nbuf; 350 desc_list->rx_desc.rx_buf_start = nbuf->data; 351 desc_list->rx_desc.unmapped = 0; 352 353 /* rx_desc.in_use should be zero at this time*/ 354 qdf_assert_always(desc_list->rx_desc.in_use == 0); 355 356 desc_list->rx_desc.in_use = 1; 357 desc_list->rx_desc.in_err_state = 0; 358 359 hal_rxdma_buff_addr_info_set(soc->hal_soc, rxdma_ring_entry, 360 paddr, 361 desc_list->rx_desc.cookie, 362 rx_desc_pool->owner); 363 364 desc_list = next; 365 } 366 qdf_dsb(); 367 hal_srng_access_end(soc->hal_soc, rxdma_srng); 368 369 /* No need to count the number of bytes received during replenish. 370 * Therefore set replenish.pkts.bytes as 0. 371 */ 372 DP_STATS_INC_PKT(dp_pdev, replenish.pkts, count, 0); 373 DP_STATS_INC(dp_pdev, buf_freelist, (num_alloc_desc - count)); 374 /* 375 * add any available free desc back to the free list 376 */ 377 if (desc_list) 378 dp_rx_add_desc_list_to_free_list(soc, &desc_list, &tail, 379 mac_id, rx_desc_pool); 380 381 return QDF_STATUS_SUCCESS; 382 } 383 384 QDF_STATUS 385 __dp_rx_buffers_no_map_replenish(struct dp_soc *soc, uint32_t mac_id, 386 struct dp_srng *dp_rxdma_srng, 387 struct rx_desc_pool *rx_desc_pool, 388 uint32_t num_req_buffers, 389 union dp_rx_desc_list_elem_t **desc_list, 390 union dp_rx_desc_list_elem_t **tail) 391 { 392 struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id); 393 uint32_t count; 394 void *rxdma_ring_entry; 395 union dp_rx_desc_list_elem_t *next; 396 void *rxdma_srng; 397 qdf_nbuf_t nbuf; 398 qdf_dma_addr_t paddr; 399 400 rxdma_srng = dp_rxdma_srng->hal_srng; 401 402 if (qdf_unlikely(!dp_pdev)) { 403 dp_rx_err("%pK: pdev is null for mac_id = %d", 404 soc, mac_id); 405 return QDF_STATUS_E_FAILURE; 406 } 407 408 if (qdf_unlikely(!rxdma_srng)) { 409 dp_rx_debug("%pK: rxdma srng not initialized", soc); 410 DP_STATS_INC(dp_pdev, replenish.rxdma_err, num_req_buffers); 411 return QDF_STATUS_E_FAILURE; 412 } 413 414 dp_rx_debug("%pK: requested %d buffers for replenish", 415 soc, num_req_buffers); 416 417 hal_srng_access_start(soc->hal_soc, rxdma_srng); 418 419 for (count = 0; count < num_req_buffers; count++) { 420 next = (*desc_list)->next; 421 qdf_prefetch(next); 422 nbuf = dp_rx_nbuf_alloc(soc, rx_desc_pool); 423 if (qdf_unlikely(!nbuf)) { 424 DP_STATS_INC(dp_pdev, replenish.nbuf_alloc_fail, 1); 425 break; 426 } 427 428 paddr = dp_rx_nbuf_sync_no_dsb(soc, nbuf, 429 rx_desc_pool->buf_size); 430 rxdma_ring_entry = (struct dp_buffer_addr_info *) 431 hal_srng_src_get_next(soc->hal_soc, rxdma_srng); 432 if (!rxdma_ring_entry) 433 break; 434 435 qdf_assert_always(rxdma_ring_entry); 436 437 (*desc_list)->rx_desc.nbuf = nbuf; 438 (*desc_list)->rx_desc.rx_buf_start = nbuf->data; 439 (*desc_list)->rx_desc.unmapped = 0; 440 441 /* rx_desc.in_use should be zero at this time*/ 442 qdf_assert_always((*desc_list)->rx_desc.in_use == 0); 443 444 (*desc_list)->rx_desc.in_use = 1; 445 (*desc_list)->rx_desc.in_err_state = 0; 446 447 hal_rxdma_buff_addr_info_set(soc->hal_soc, rxdma_ring_entry, 448 paddr, 449 (*desc_list)->rx_desc.cookie, 450 rx_desc_pool->owner); 451 452 *desc_list = next; 453 } 454 qdf_dsb(); 455 hal_srng_access_end(soc->hal_soc, rxdma_srng); 456 457 /* No need to count the number of bytes received during replenish. 458 * Therefore set replenish.pkts.bytes as 0. 459 */ 460 DP_STATS_INC_PKT(dp_pdev, replenish.pkts, count, 0); 461 DP_STATS_INC(dp_pdev, buf_freelist, (num_req_buffers - count)); 462 /* 463 * add any available free desc back to the free list 464 */ 465 if (*desc_list) 466 dp_rx_add_desc_list_to_free_list(soc, desc_list, tail, 467 mac_id, rx_desc_pool); 468 469 return QDF_STATUS_SUCCESS; 470 } 471 472 QDF_STATUS __dp_pdev_rx_buffers_no_map_attach(struct dp_soc *soc, 473 uint32_t mac_id, 474 struct dp_srng *dp_rxdma_srng, 475 struct rx_desc_pool *rx_desc_pool, 476 uint32_t num_req_buffers) 477 { 478 struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id); 479 uint32_t count; 480 uint32_t nr_descs = 0; 481 void *rxdma_ring_entry; 482 union dp_rx_desc_list_elem_t *next; 483 void *rxdma_srng; 484 qdf_nbuf_t nbuf; 485 qdf_dma_addr_t paddr; 486 union dp_rx_desc_list_elem_t *desc_list = NULL; 487 union dp_rx_desc_list_elem_t *tail = NULL; 488 489 rxdma_srng = dp_rxdma_srng->hal_srng; 490 491 if (qdf_unlikely(!dp_pdev)) { 492 dp_rx_err("%pK: pdev is null for mac_id = %d", 493 soc, mac_id); 494 return QDF_STATUS_E_FAILURE; 495 } 496 497 if (qdf_unlikely(!rxdma_srng)) { 498 dp_rx_debug("%pK: rxdma srng not initialized", soc); 499 DP_STATS_INC(dp_pdev, replenish.rxdma_err, num_req_buffers); 500 return QDF_STATUS_E_FAILURE; 501 } 502 503 dp_rx_debug("%pK: requested %d buffers for replenish", 504 soc, num_req_buffers); 505 506 nr_descs = dp_rx_get_free_desc_list(soc, mac_id, rx_desc_pool, 507 num_req_buffers, &desc_list, &tail); 508 if (!nr_descs) { 509 dp_err("no free rx_descs in freelist"); 510 DP_STATS_INC(dp_pdev, err.desc_alloc_fail, num_req_buffers); 511 return QDF_STATUS_E_NOMEM; 512 } 513 514 dp_debug("got %u RX descs for driver attach", nr_descs); 515 516 hal_srng_access_start(soc->hal_soc, rxdma_srng); 517 518 for (count = 0; count < nr_descs; count++) { 519 next = desc_list->next; 520 qdf_prefetch(next); 521 nbuf = dp_rx_nbuf_alloc(soc, rx_desc_pool); 522 if (qdf_unlikely(!nbuf)) { 523 DP_STATS_INC(dp_pdev, replenish.nbuf_alloc_fail, 1); 524 break; 525 } 526 527 paddr = dp_rx_nbuf_sync_no_dsb(soc, nbuf, 528 rx_desc_pool->buf_size); 529 rxdma_ring_entry = (struct dp_buffer_addr_info *) 530 hal_srng_src_get_next(soc->hal_soc, rxdma_srng); 531 if (!rxdma_ring_entry) 532 break; 533 534 qdf_assert_always(rxdma_ring_entry); 535 536 desc_list->rx_desc.nbuf = nbuf; 537 desc_list->rx_desc.rx_buf_start = nbuf->data; 538 desc_list->rx_desc.unmapped = 0; 539 540 /* rx_desc.in_use should be zero at this time*/ 541 qdf_assert_always(desc_list->rx_desc.in_use == 0); 542 543 desc_list->rx_desc.in_use = 1; 544 desc_list->rx_desc.in_err_state = 0; 545 546 hal_rxdma_buff_addr_info_set(soc->hal_soc, rxdma_ring_entry, 547 paddr, 548 desc_list->rx_desc.cookie, 549 rx_desc_pool->owner); 550 551 desc_list = next; 552 } 553 qdf_dsb(); 554 hal_srng_access_end(soc->hal_soc, rxdma_srng); 555 556 /* No need to count the number of bytes received during replenish. 557 * Therefore set replenish.pkts.bytes as 0. 558 */ 559 DP_STATS_INC_PKT(dp_pdev, replenish.pkts, count, 0); 560 561 return QDF_STATUS_SUCCESS; 562 } 563 #endif 564 565 #ifdef DP_UMAC_HW_RESET_SUPPORT 566 #if defined(QCA_DP_RX_NBUF_NO_MAP_UNMAP) && !defined(BUILD_X86) 567 static inline 568 qdf_dma_addr_t dp_rx_rep_retrieve_paddr(struct dp_soc *dp_soc, qdf_nbuf_t nbuf, 569 uint32_t buf_size) 570 { 571 return dp_rx_nbuf_sync_no_dsb(dp_soc, nbuf, buf_size); 572 } 573 #else 574 static inline 575 qdf_dma_addr_t dp_rx_rep_retrieve_paddr(struct dp_soc *dp_soc, qdf_nbuf_t nbuf, 576 uint32_t buf_size) 577 { 578 return qdf_nbuf_get_frag_paddr(nbuf, 0); 579 } 580 #endif 581 582 /* 583 * dp_rx_desc_replenish() - Replenish the rx descriptors one at a time 584 * 585 * @soc: core txrx main context 586 * @dp_rxdma_srng: rxdma ring 587 * @rx_desc_pool: rx descriptor pool 588 * @rx_desc:rx descriptor 589 * 590 * Return: void 591 */ 592 static inline 593 void dp_rx_desc_replenish(struct dp_soc *soc, struct dp_srng *dp_rxdma_srng, 594 struct rx_desc_pool *rx_desc_pool, 595 struct dp_rx_desc *rx_desc) 596 { 597 void *rxdma_srng; 598 void *rxdma_ring_entry; 599 qdf_dma_addr_t paddr; 600 601 rxdma_srng = dp_rxdma_srng->hal_srng; 602 603 /* No one else should be accessing the srng at this point */ 604 hal_srng_access_start_unlocked(soc->hal_soc, rxdma_srng); 605 606 rxdma_ring_entry = hal_srng_src_get_next(soc->hal_soc, rxdma_srng); 607 608 qdf_assert_always(rxdma_ring_entry); 609 rx_desc->in_err_state = 0; 610 611 paddr = dp_rx_rep_retrieve_paddr(soc, rx_desc->nbuf, 612 rx_desc_pool->buf_size); 613 hal_rxdma_buff_addr_info_set(soc->hal_soc, rxdma_ring_entry, paddr, 614 rx_desc->cookie, rx_desc_pool->owner); 615 616 hal_srng_access_end_unlocked(soc->hal_soc, rxdma_srng); 617 } 618 619 /* 620 * dp_rx_desc_reuse() - Reuse the rx descriptors to fill the rx buf ring 621 * 622 * @soc: core txrx main context 623 * @nbuf_list: nbuf list for delayed free 624 * 625 * Return: void 626 */ 627 void dp_rx_desc_reuse(struct dp_soc *soc, qdf_nbuf_t *nbuf_list) 628 { 629 int mac_id, i, j; 630 union dp_rx_desc_list_elem_t *head = NULL; 631 union dp_rx_desc_list_elem_t *tail = NULL; 632 633 for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) { 634 struct dp_srng *dp_rxdma_srng = 635 &soc->rx_refill_buf_ring[mac_id]; 636 struct rx_desc_pool *rx_desc_pool = &soc->rx_desc_buf[mac_id]; 637 uint32_t rx_sw_desc_num = rx_desc_pool->pool_size; 638 /* Only fill up 1/3 of the ring size */ 639 uint32_t num_req_decs; 640 641 if (!dp_rxdma_srng || !dp_rxdma_srng->hal_srng || 642 !rx_desc_pool->array) 643 continue; 644 645 num_req_decs = dp_rxdma_srng->num_entries / 3; 646 647 for (i = 0, j = 0; i < rx_sw_desc_num; i++) { 648 struct dp_rx_desc *rx_desc = 649 (struct dp_rx_desc *)&rx_desc_pool->array[i]; 650 651 if (rx_desc->in_use) { 652 if (j < dp_rxdma_srng->num_entries) { 653 dp_rx_desc_replenish(soc, dp_rxdma_srng, 654 rx_desc_pool, 655 rx_desc); 656 } else { 657 dp_rx_nbuf_unmap(soc, rx_desc, 0); 658 rx_desc->unmapped = 0; 659 660 rx_desc->nbuf->next = *nbuf_list; 661 *nbuf_list = rx_desc->nbuf; 662 663 dp_rx_add_to_free_desc_list(&head, 664 &tail, 665 rx_desc); 666 } 667 j++; 668 } 669 } 670 671 if (head) 672 dp_rx_add_desc_list_to_free_list(soc, &head, &tail, 673 mac_id, rx_desc_pool); 674 675 /* If num of descs in use were less, then we need to replenish 676 * the ring with some buffers 677 */ 678 head = NULL; 679 tail = NULL; 680 681 if (j < (num_req_decs - 1)) 682 dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng, 683 rx_desc_pool, 684 ((num_req_decs - 1) - j), 685 &head, &tail, true); 686 } 687 } 688 #endif 689 690 /* 691 * dp_rx_buffers_replenish() - replenish rxdma ring with rx nbufs 692 * called during dp rx initialization 693 * and at the end of dp_rx_process. 694 * 695 * @soc: core txrx main context 696 * @mac_id: mac_id which is one of 3 mac_ids 697 * @dp_rxdma_srng: dp rxdma circular ring 698 * @rx_desc_pool: Pointer to free Rx descriptor pool 699 * @num_req_buffers: number of buffer to be replenished 700 * @desc_list: list of descs if called from dp_rx_process 701 * or NULL during dp rx initialization or out of buffer 702 * interrupt. 703 * @tail: tail of descs list 704 * @req_only: If true don't replenish more than req buffers 705 * @func_name: name of the caller function 706 * Return: return success or failure 707 */ 708 QDF_STATUS __dp_rx_buffers_replenish(struct dp_soc *dp_soc, uint32_t mac_id, 709 struct dp_srng *dp_rxdma_srng, 710 struct rx_desc_pool *rx_desc_pool, 711 uint32_t num_req_buffers, 712 union dp_rx_desc_list_elem_t **desc_list, 713 union dp_rx_desc_list_elem_t **tail, 714 bool req_only, const char *func_name) 715 { 716 uint32_t num_alloc_desc; 717 uint16_t num_desc_to_free = 0; 718 struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(dp_soc, mac_id); 719 uint32_t num_entries_avail; 720 uint32_t count; 721 int sync_hw_ptr = 1; 722 struct dp_rx_nbuf_frag_info nbuf_frag_info = {0}; 723 void *rxdma_ring_entry; 724 union dp_rx_desc_list_elem_t *next; 725 QDF_STATUS ret; 726 void *rxdma_srng; 727 union dp_rx_desc_list_elem_t *desc_list_append = NULL; 728 union dp_rx_desc_list_elem_t *tail_append = NULL; 729 union dp_rx_desc_list_elem_t *temp_list = NULL; 730 731 rxdma_srng = dp_rxdma_srng->hal_srng; 732 733 if (qdf_unlikely(!dp_pdev)) { 734 dp_rx_err("%pK: pdev is null for mac_id = %d", 735 dp_soc, mac_id); 736 return QDF_STATUS_E_FAILURE; 737 } 738 739 if (qdf_unlikely(!rxdma_srng)) { 740 dp_rx_debug("%pK: rxdma srng not initialized", dp_soc); 741 DP_STATS_INC(dp_pdev, replenish.rxdma_err, num_req_buffers); 742 return QDF_STATUS_E_FAILURE; 743 } 744 745 dp_verbose_debug("%pK: requested %d buffers for replenish", 746 dp_soc, num_req_buffers); 747 748 hal_srng_access_start(dp_soc->hal_soc, rxdma_srng); 749 750 num_entries_avail = hal_srng_src_num_avail(dp_soc->hal_soc, 751 rxdma_srng, 752 sync_hw_ptr); 753 754 dp_verbose_debug("%pK: no of available entries in rxdma ring: %d", 755 dp_soc, num_entries_avail); 756 757 if (!req_only && !(*desc_list) && (num_entries_avail > 758 ((dp_rxdma_srng->num_entries * 3) / 4))) { 759 num_req_buffers = num_entries_avail; 760 DP_STATS_INC(dp_pdev, replenish.low_thresh_intrs, 1); 761 } else if (num_entries_avail < num_req_buffers) { 762 num_desc_to_free = num_req_buffers - num_entries_avail; 763 num_req_buffers = num_entries_avail; 764 } else if ((*desc_list) && 765 dp_rxdma_srng->num_entries - num_entries_avail < 766 CRITICAL_BUFFER_THRESHOLD) { 767 /* Append some free descriptors to tail */ 768 num_alloc_desc = 769 dp_rx_get_free_desc_list(dp_soc, mac_id, 770 rx_desc_pool, 771 CRITICAL_BUFFER_THRESHOLD, 772 &desc_list_append, 773 &tail_append); 774 775 if (num_alloc_desc) { 776 temp_list = *desc_list; 777 *desc_list = desc_list_append; 778 tail_append->next = temp_list; 779 num_req_buffers += num_alloc_desc; 780 781 DP_STATS_DEC(dp_pdev, 782 replenish.free_list, 783 num_alloc_desc); 784 } else 785 dp_err_rl("%pK: no free rx_descs in freelist", dp_soc); 786 } 787 788 if (qdf_unlikely(!num_req_buffers)) { 789 num_desc_to_free = num_req_buffers; 790 hal_srng_access_end(dp_soc->hal_soc, rxdma_srng); 791 goto free_descs; 792 } 793 794 /* 795 * if desc_list is NULL, allocate the descs from freelist 796 */ 797 if (!(*desc_list)) { 798 num_alloc_desc = dp_rx_get_free_desc_list(dp_soc, mac_id, 799 rx_desc_pool, 800 num_req_buffers, 801 desc_list, 802 tail); 803 804 if (!num_alloc_desc) { 805 dp_rx_err("%pK: no free rx_descs in freelist", dp_soc); 806 DP_STATS_INC(dp_pdev, err.desc_alloc_fail, 807 num_req_buffers); 808 hal_srng_access_end(dp_soc->hal_soc, rxdma_srng); 809 return QDF_STATUS_E_NOMEM; 810 } 811 812 dp_verbose_debug("%pK: %d rx desc allocated", dp_soc, 813 num_alloc_desc); 814 num_req_buffers = num_alloc_desc; 815 } 816 817 818 count = 0; 819 820 while (count < num_req_buffers) { 821 /* Flag is set while pdev rx_desc_pool initialization */ 822 if (qdf_unlikely(rx_desc_pool->rx_mon_dest_frag_enable)) 823 ret = dp_pdev_frag_alloc_and_map(dp_soc, 824 &nbuf_frag_info, 825 dp_pdev, 826 rx_desc_pool); 827 else 828 ret = dp_pdev_nbuf_alloc_and_map_replenish(dp_soc, 829 mac_id, 830 num_entries_avail, &nbuf_frag_info, 831 dp_pdev, rx_desc_pool); 832 833 if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) { 834 if (qdf_unlikely(ret == QDF_STATUS_E_FAULT)) 835 continue; 836 break; 837 } 838 839 count++; 840 841 rxdma_ring_entry = hal_srng_src_get_next(dp_soc->hal_soc, 842 rxdma_srng); 843 qdf_assert_always(rxdma_ring_entry); 844 845 next = (*desc_list)->next; 846 847 /* Flag is set while pdev rx_desc_pool initialization */ 848 if (qdf_unlikely(rx_desc_pool->rx_mon_dest_frag_enable)) 849 dp_rx_desc_frag_prep(&((*desc_list)->rx_desc), 850 &nbuf_frag_info); 851 else 852 dp_rx_desc_prep(&((*desc_list)->rx_desc), 853 &nbuf_frag_info); 854 855 /* rx_desc.in_use should be zero at this time*/ 856 qdf_assert_always((*desc_list)->rx_desc.in_use == 0); 857 858 (*desc_list)->rx_desc.in_use = 1; 859 (*desc_list)->rx_desc.in_err_state = 0; 860 dp_rx_desc_update_dbg_info(&(*desc_list)->rx_desc, 861 func_name, RX_DESC_REPLENISHED); 862 dp_verbose_debug("rx_netbuf=%pK, paddr=0x%llx, cookie=%d", 863 nbuf_frag_info.virt_addr.nbuf, 864 (unsigned long long)(nbuf_frag_info.paddr), 865 (*desc_list)->rx_desc.cookie); 866 867 hal_rxdma_buff_addr_info_set(dp_soc->hal_soc, rxdma_ring_entry, 868 nbuf_frag_info.paddr, 869 (*desc_list)->rx_desc.cookie, 870 rx_desc_pool->owner); 871 872 *desc_list = next; 873 874 } 875 876 dp_rx_refill_ring_record_entry(dp_soc, dp_pdev->lmac_id, rxdma_srng, 877 num_req_buffers, count); 878 879 hal_srng_access_end(dp_soc->hal_soc, rxdma_srng); 880 881 dp_rx_schedule_refill_thread(dp_soc); 882 883 dp_verbose_debug("replenished buffers %d, rx desc added back to free list %u", 884 count, num_desc_to_free); 885 886 /* No need to count the number of bytes received during replenish. 887 * Therefore set replenish.pkts.bytes as 0. 888 */ 889 DP_STATS_INC_PKT(dp_pdev, replenish.pkts, count, 0); 890 DP_STATS_INC(dp_pdev, replenish.free_list, num_req_buffers - count); 891 892 free_descs: 893 DP_STATS_INC(dp_pdev, buf_freelist, num_desc_to_free); 894 /* 895 * add any available free desc back to the free list 896 */ 897 if (*desc_list) 898 dp_rx_add_desc_list_to_free_list(dp_soc, desc_list, tail, 899 mac_id, rx_desc_pool); 900 901 return QDF_STATUS_SUCCESS; 902 } 903 904 qdf_export_symbol(__dp_rx_buffers_replenish); 905 906 /* 907 * dp_rx_deliver_raw() - process RAW mode pkts and hand over the 908 * pkts to RAW mode simulation to 909 * decapsulate the pkt. 910 * 911 * @vdev: vdev on which RAW mode is enabled 912 * @nbuf_list: list of RAW pkts to process 913 * @txrx_peer: peer object from which the pkt is rx 914 * 915 * Return: void 916 */ 917 void 918 dp_rx_deliver_raw(struct dp_vdev *vdev, qdf_nbuf_t nbuf_list, 919 struct dp_txrx_peer *txrx_peer) 920 { 921 qdf_nbuf_t deliver_list_head = NULL; 922 qdf_nbuf_t deliver_list_tail = NULL; 923 qdf_nbuf_t nbuf; 924 925 nbuf = nbuf_list; 926 while (nbuf) { 927 qdf_nbuf_t next = qdf_nbuf_next(nbuf); 928 929 DP_RX_LIST_APPEND(deliver_list_head, deliver_list_tail, nbuf); 930 931 DP_STATS_INC(vdev->pdev, rx_raw_pkts, 1); 932 DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, rx.raw, 1, 933 qdf_nbuf_len(nbuf)); 934 /* 935 * reset the chfrag_start and chfrag_end bits in nbuf cb 936 * as this is a non-amsdu pkt and RAW mode simulation expects 937 * these bit s to be 0 for non-amsdu pkt. 938 */ 939 if (qdf_nbuf_is_rx_chfrag_start(nbuf) && 940 qdf_nbuf_is_rx_chfrag_end(nbuf)) { 941 qdf_nbuf_set_rx_chfrag_start(nbuf, 0); 942 qdf_nbuf_set_rx_chfrag_end(nbuf, 0); 943 } 944 945 nbuf = next; 946 } 947 948 vdev->osif_rsim_rx_decap(vdev->osif_vdev, &deliver_list_head, 949 &deliver_list_tail); 950 951 vdev->osif_rx(vdev->osif_vdev, deliver_list_head); 952 } 953 954 #ifndef QCA_HOST_MODE_WIFI_DISABLED 955 #ifndef FEATURE_WDS 956 void dp_rx_da_learn(struct dp_soc *soc, uint8_t *rx_tlv_hdr, 957 struct dp_txrx_peer *ta_peer, qdf_nbuf_t nbuf) 958 { 959 } 960 #endif 961 962 #ifdef QCA_SUPPORT_TX_MIN_RATES_FOR_SPECIAL_FRAMES 963 /* 964 * dp_classify_critical_pkts() - API for marking critical packets 965 * @soc: dp_soc context 966 * @vdev: vdev on which packet is to be sent 967 * @nbuf: nbuf that has to be classified 968 * 969 * The function parses the packet, identifies whether its a critical frame and 970 * marks QDF_NBUF_CB_TX_EXTRA_IS_CRITICAL bit in qdf_nbuf_cb for the nbuf. 971 * Code for marking which frames are CRITICAL is accessed via callback. 972 * EAPOL, ARP, DHCP, DHCPv6, ICMPv6 NS/NA are the typical critical frames. 973 * 974 * Return: None 975 */ 976 static 977 void dp_classify_critical_pkts(struct dp_soc *soc, struct dp_vdev *vdev, 978 qdf_nbuf_t nbuf) 979 { 980 if (vdev->tx_classify_critical_pkt_cb) 981 vdev->tx_classify_critical_pkt_cb(vdev->osif_vdev, nbuf); 982 } 983 #else 984 static inline 985 void dp_classify_critical_pkts(struct dp_soc *soc, struct dp_vdev *vdev, 986 qdf_nbuf_t nbuf) 987 { 988 } 989 #endif 990 991 #ifdef QCA_OL_TX_MULTIQ_SUPPORT 992 static inline 993 void dp_rx_nbuf_queue_mapping_set(qdf_nbuf_t nbuf, uint8_t ring_id) 994 { 995 qdf_nbuf_set_queue_mapping(nbuf, ring_id); 996 } 997 #else 998 static inline 999 void dp_rx_nbuf_queue_mapping_set(qdf_nbuf_t nbuf, uint8_t ring_id) 1000 { 1001 } 1002 #endif 1003 1004 /* 1005 * dp_rx_intrabss_mcbc_fwd() - Does intrabss forward for mcast packets 1006 * 1007 * @soc: core txrx main context 1008 * @ta_peer : source peer entry 1009 * @rx_tlv_hdr : start address of rx tlvs 1010 * @nbuf : nbuf that has to be intrabss forwarded 1011 * @tid_stats : tid stats pointer 1012 * 1013 * Return: bool: true if it is forwarded else false 1014 */ 1015 bool dp_rx_intrabss_mcbc_fwd(struct dp_soc *soc, struct dp_txrx_peer *ta_peer, 1016 uint8_t *rx_tlv_hdr, qdf_nbuf_t nbuf, 1017 struct cdp_tid_rx_stats *tid_stats) 1018 { 1019 uint16_t len; 1020 qdf_nbuf_t nbuf_copy; 1021 1022 if (dp_rx_intrabss_eapol_drop_check(soc, ta_peer, rx_tlv_hdr, 1023 nbuf)) 1024 return true; 1025 1026 if (!dp_rx_check_ndi_mdns_fwding(ta_peer, nbuf)) 1027 return false; 1028 1029 /* If the source peer in the isolation list 1030 * then dont forward instead push to bridge stack 1031 */ 1032 if (dp_get_peer_isolation(ta_peer)) 1033 return false; 1034 1035 nbuf_copy = qdf_nbuf_copy(nbuf); 1036 if (!nbuf_copy) 1037 return false; 1038 1039 len = QDF_NBUF_CB_RX_PKT_LEN(nbuf); 1040 1041 qdf_mem_set(nbuf_copy->cb, 0x0, sizeof(nbuf_copy->cb)); 1042 dp_classify_critical_pkts(soc, ta_peer->vdev, nbuf_copy); 1043 1044 if (soc->arch_ops.dp_rx_intrabss_handle_nawds(soc, ta_peer, nbuf_copy, 1045 tid_stats)) 1046 return false; 1047 1048 if (dp_tx_send((struct cdp_soc_t *)soc, 1049 ta_peer->vdev->vdev_id, nbuf_copy)) { 1050 DP_PEER_PER_PKT_STATS_INC_PKT(ta_peer, rx.intra_bss.fail, 1, 1051 len); 1052 tid_stats->fail_cnt[INTRABSS_DROP]++; 1053 dp_rx_nbuf_free(nbuf_copy); 1054 } else { 1055 DP_PEER_PER_PKT_STATS_INC_PKT(ta_peer, rx.intra_bss.pkts, 1, 1056 len); 1057 tid_stats->intrabss_cnt++; 1058 } 1059 return false; 1060 } 1061 1062 /* 1063 * dp_rx_intrabss_ucast_fwd() - Does intrabss forward for unicast packets 1064 * 1065 * @soc: core txrx main context 1066 * @ta_peer: source peer entry 1067 * @tx_vdev_id: VDEV ID for Intra-BSS TX 1068 * @rx_tlv_hdr: start address of rx tlvs 1069 * @nbuf: nbuf that has to be intrabss forwarded 1070 * @tid_stats: tid stats pointer 1071 * 1072 * Return: bool: true if it is forwarded else false 1073 */ 1074 bool dp_rx_intrabss_ucast_fwd(struct dp_soc *soc, struct dp_txrx_peer *ta_peer, 1075 uint8_t tx_vdev_id, 1076 uint8_t *rx_tlv_hdr, qdf_nbuf_t nbuf, 1077 struct cdp_tid_rx_stats *tid_stats) 1078 { 1079 uint16_t len; 1080 1081 len = QDF_NBUF_CB_RX_PKT_LEN(nbuf); 1082 1083 /* linearize the nbuf just before we send to 1084 * dp_tx_send() 1085 */ 1086 if (qdf_unlikely(qdf_nbuf_is_frag(nbuf))) { 1087 if (qdf_nbuf_linearize(nbuf) == -ENOMEM) 1088 return false; 1089 1090 nbuf = qdf_nbuf_unshare(nbuf); 1091 if (!nbuf) { 1092 DP_PEER_PER_PKT_STATS_INC_PKT(ta_peer, 1093 rx.intra_bss.fail, 1094 1, len); 1095 /* return true even though the pkt is 1096 * not forwarded. Basically skb_unshare 1097 * failed and we want to continue with 1098 * next nbuf. 1099 */ 1100 tid_stats->fail_cnt[INTRABSS_DROP]++; 1101 return false; 1102 } 1103 } 1104 1105 qdf_mem_set(nbuf->cb, 0x0, sizeof(nbuf->cb)); 1106 dp_classify_critical_pkts(soc, ta_peer->vdev, nbuf); 1107 1108 if (!dp_tx_send((struct cdp_soc_t *)soc, 1109 tx_vdev_id, nbuf)) { 1110 DP_PEER_PER_PKT_STATS_INC_PKT(ta_peer, rx.intra_bss.pkts, 1, 1111 len); 1112 } else { 1113 DP_PEER_PER_PKT_STATS_INC_PKT(ta_peer, rx.intra_bss.fail, 1, 1114 len); 1115 tid_stats->fail_cnt[INTRABSS_DROP]++; 1116 return false; 1117 } 1118 1119 return true; 1120 } 1121 1122 #endif /* QCA_HOST_MODE_WIFI_DISABLED */ 1123 1124 #ifdef MESH_MODE_SUPPORT 1125 1126 /** 1127 * dp_rx_fill_mesh_stats() - Fills the mesh per packet receive stats 1128 * 1129 * @vdev: DP Virtual device handle 1130 * @nbuf: Buffer pointer 1131 * @rx_tlv_hdr: start of rx tlv header 1132 * @txrx_peer: pointer to peer 1133 * 1134 * This function allocated memory for mesh receive stats and fill the 1135 * required stats. Stores the memory address in skb cb. 1136 * 1137 * Return: void 1138 */ 1139 1140 void dp_rx_fill_mesh_stats(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 1141 uint8_t *rx_tlv_hdr, 1142 struct dp_txrx_peer *txrx_peer) 1143 { 1144 struct mesh_recv_hdr_s *rx_info = NULL; 1145 uint32_t pkt_type; 1146 uint32_t nss; 1147 uint32_t rate_mcs; 1148 uint32_t bw; 1149 uint8_t primary_chan_num; 1150 uint32_t center_chan_freq; 1151 struct dp_soc *soc = vdev->pdev->soc; 1152 struct dp_peer *peer; 1153 struct dp_peer *primary_link_peer; 1154 struct dp_soc *link_peer_soc; 1155 cdp_peer_stats_param_t buf = {0}; 1156 1157 /* fill recv mesh stats */ 1158 rx_info = qdf_mem_malloc(sizeof(struct mesh_recv_hdr_s)); 1159 1160 /* upper layers are responsible to free this memory */ 1161 1162 if (!rx_info) { 1163 dp_rx_err("%pK: Memory allocation failed for mesh rx stats", 1164 vdev->pdev->soc); 1165 DP_STATS_INC(vdev->pdev, mesh_mem_alloc, 1); 1166 return; 1167 } 1168 1169 rx_info->rs_flags = MESH_RXHDR_VER1; 1170 if (qdf_nbuf_is_rx_chfrag_start(nbuf)) 1171 rx_info->rs_flags |= MESH_RX_FIRST_MSDU; 1172 1173 if (qdf_nbuf_is_rx_chfrag_end(nbuf)) 1174 rx_info->rs_flags |= MESH_RX_LAST_MSDU; 1175 1176 peer = dp_peer_get_ref_by_id(soc, txrx_peer->peer_id, DP_MOD_ID_MESH); 1177 if (peer) { 1178 if (hal_rx_tlv_get_is_decrypted(soc->hal_soc, rx_tlv_hdr)) { 1179 rx_info->rs_flags |= MESH_RX_DECRYPTED; 1180 rx_info->rs_keyix = hal_rx_msdu_get_keyid(soc->hal_soc, 1181 rx_tlv_hdr); 1182 if (vdev->osif_get_key) 1183 vdev->osif_get_key(vdev->osif_vdev, 1184 &rx_info->rs_decryptkey[0], 1185 &peer->mac_addr.raw[0], 1186 rx_info->rs_keyix); 1187 } 1188 1189 dp_peer_unref_delete(peer, DP_MOD_ID_MESH); 1190 } 1191 1192 primary_link_peer = dp_get_primary_link_peer_by_id(soc, 1193 txrx_peer->peer_id, 1194 DP_MOD_ID_MESH); 1195 1196 if (qdf_likely(primary_link_peer)) { 1197 link_peer_soc = primary_link_peer->vdev->pdev->soc; 1198 dp_monitor_peer_get_stats_param(link_peer_soc, 1199 primary_link_peer, 1200 cdp_peer_rx_snr, &buf); 1201 rx_info->rs_snr = buf.rx_snr; 1202 dp_peer_unref_delete(primary_link_peer, DP_MOD_ID_MESH); 1203 } 1204 1205 rx_info->rs_rssi = rx_info->rs_snr + DP_DEFAULT_NOISEFLOOR; 1206 1207 soc = vdev->pdev->soc; 1208 primary_chan_num = hal_rx_tlv_get_freq(soc->hal_soc, rx_tlv_hdr); 1209 center_chan_freq = hal_rx_tlv_get_freq(soc->hal_soc, rx_tlv_hdr) >> 16; 1210 1211 if (soc->cdp_soc.ol_ops && soc->cdp_soc.ol_ops->freq_to_band) { 1212 rx_info->rs_band = soc->cdp_soc.ol_ops->freq_to_band( 1213 soc->ctrl_psoc, 1214 vdev->pdev->pdev_id, 1215 center_chan_freq); 1216 } 1217 rx_info->rs_channel = primary_chan_num; 1218 pkt_type = hal_rx_tlv_get_pkt_type(soc->hal_soc, rx_tlv_hdr); 1219 rate_mcs = hal_rx_tlv_rate_mcs_get(soc->hal_soc, rx_tlv_hdr); 1220 bw = hal_rx_tlv_bw_get(soc->hal_soc, rx_tlv_hdr); 1221 nss = hal_rx_msdu_start_nss_get(soc->hal_soc, rx_tlv_hdr); 1222 rx_info->rs_ratephy1 = rate_mcs | (nss << 0x8) | (pkt_type << 16) | 1223 (bw << 24); 1224 1225 qdf_nbuf_set_rx_fctx_type(nbuf, (void *)rx_info, CB_FTYPE_MESH_RX_INFO); 1226 1227 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_MED, 1228 FL("Mesh rx stats: flags %x, rssi %x, chn %x, rate %x, kix %x, snr %x"), 1229 rx_info->rs_flags, 1230 rx_info->rs_rssi, 1231 rx_info->rs_channel, 1232 rx_info->rs_ratephy1, 1233 rx_info->rs_keyix, 1234 rx_info->rs_snr); 1235 1236 } 1237 1238 /** 1239 * dp_rx_filter_mesh_packets() - Filters mesh unwanted packets 1240 * 1241 * @vdev: DP Virtual device handle 1242 * @nbuf: Buffer pointer 1243 * @rx_tlv_hdr: start of rx tlv header 1244 * 1245 * This checks if the received packet is matching any filter out 1246 * catogery and and drop the packet if it matches. 1247 * 1248 * Return: status(0 indicates drop, 1 indicate to no drop) 1249 */ 1250 1251 QDF_STATUS dp_rx_filter_mesh_packets(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 1252 uint8_t *rx_tlv_hdr) 1253 { 1254 union dp_align_mac_addr mac_addr; 1255 struct dp_soc *soc = vdev->pdev->soc; 1256 1257 if (qdf_unlikely(vdev->mesh_rx_filter)) { 1258 if (vdev->mesh_rx_filter & MESH_FILTER_OUT_FROMDS) 1259 if (hal_rx_mpdu_get_fr_ds(soc->hal_soc, 1260 rx_tlv_hdr)) 1261 return QDF_STATUS_SUCCESS; 1262 1263 if (vdev->mesh_rx_filter & MESH_FILTER_OUT_TODS) 1264 if (hal_rx_mpdu_get_to_ds(soc->hal_soc, 1265 rx_tlv_hdr)) 1266 return QDF_STATUS_SUCCESS; 1267 1268 if (vdev->mesh_rx_filter & MESH_FILTER_OUT_NODS) 1269 if (!hal_rx_mpdu_get_fr_ds(soc->hal_soc, 1270 rx_tlv_hdr) && 1271 !hal_rx_mpdu_get_to_ds(soc->hal_soc, 1272 rx_tlv_hdr)) 1273 return QDF_STATUS_SUCCESS; 1274 1275 if (vdev->mesh_rx_filter & MESH_FILTER_OUT_RA) { 1276 if (hal_rx_mpdu_get_addr1(soc->hal_soc, 1277 rx_tlv_hdr, 1278 &mac_addr.raw[0])) 1279 return QDF_STATUS_E_FAILURE; 1280 1281 if (!qdf_mem_cmp(&mac_addr.raw[0], 1282 &vdev->mac_addr.raw[0], 1283 QDF_MAC_ADDR_SIZE)) 1284 return QDF_STATUS_SUCCESS; 1285 } 1286 1287 if (vdev->mesh_rx_filter & MESH_FILTER_OUT_TA) { 1288 if (hal_rx_mpdu_get_addr2(soc->hal_soc, 1289 rx_tlv_hdr, 1290 &mac_addr.raw[0])) 1291 return QDF_STATUS_E_FAILURE; 1292 1293 if (!qdf_mem_cmp(&mac_addr.raw[0], 1294 &vdev->mac_addr.raw[0], 1295 QDF_MAC_ADDR_SIZE)) 1296 return QDF_STATUS_SUCCESS; 1297 } 1298 } 1299 1300 return QDF_STATUS_E_FAILURE; 1301 } 1302 1303 #else 1304 void dp_rx_fill_mesh_stats(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 1305 uint8_t *rx_tlv_hdr, struct dp_txrx_peer *peer) 1306 { 1307 } 1308 1309 QDF_STATUS dp_rx_filter_mesh_packets(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 1310 uint8_t *rx_tlv_hdr) 1311 { 1312 return QDF_STATUS_E_FAILURE; 1313 } 1314 1315 #endif 1316 1317 #ifdef FEATURE_NAC_RSSI 1318 /** 1319 * dp_rx_process_invalid_peer(): Function to pass invalid peer list to umac 1320 * @soc: DP SOC handle 1321 * @mpdu: mpdu for which peer is invalid 1322 * @mac_id: mac_id which is one of 3 mac_ids(Assuming mac_id and 1323 * pool_id has same mapping) 1324 * 1325 * return: integer type 1326 */ 1327 uint8_t dp_rx_process_invalid_peer(struct dp_soc *soc, qdf_nbuf_t mpdu, 1328 uint8_t mac_id) 1329 { 1330 struct dp_invalid_peer_msg msg; 1331 struct dp_vdev *vdev = NULL; 1332 struct dp_pdev *pdev = NULL; 1333 struct ieee80211_frame *wh; 1334 qdf_nbuf_t curr_nbuf, next_nbuf; 1335 uint8_t *rx_tlv_hdr = qdf_nbuf_data(mpdu); 1336 uint8_t *rx_pkt_hdr = NULL; 1337 int i = 0; 1338 1339 if (!HAL_IS_DECAP_FORMAT_RAW(soc->hal_soc, rx_tlv_hdr)) { 1340 dp_rx_debug("%pK: Drop decapped frames", soc); 1341 goto free; 1342 } 1343 1344 /* In RAW packet, packet header will be part of data */ 1345 rx_pkt_hdr = rx_tlv_hdr + soc->rx_pkt_tlv_size; 1346 wh = (struct ieee80211_frame *)rx_pkt_hdr; 1347 1348 if (!DP_FRAME_IS_DATA(wh)) { 1349 dp_rx_debug("%pK: NAWDS valid only for data frames", soc); 1350 goto free; 1351 } 1352 1353 if (qdf_nbuf_len(mpdu) < sizeof(struct ieee80211_frame)) { 1354 dp_rx_err("%pK: Invalid nbuf length", soc); 1355 goto free; 1356 } 1357 1358 /* In DMAC case the rx_desc_pools are common across PDEVs 1359 * so PDEV cannot be derived from the pool_id. 1360 * 1361 * link_id need to derived from the TLV tag word which is 1362 * disabled by default. For now adding a WAR to get vdev 1363 * with brute force this need to fixed with word based subscription 1364 * support is added by enabling TLV tag word 1365 */ 1366 if (soc->features.dmac_cmn_src_rxbuf_ring_enabled) { 1367 for (i = 0; i < MAX_PDEV_CNT; i++) { 1368 pdev = soc->pdev_list[i]; 1369 1370 if (!pdev || qdf_unlikely(pdev->is_pdev_down)) 1371 continue; 1372 1373 TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) { 1374 if (qdf_mem_cmp(wh->i_addr1, vdev->mac_addr.raw, 1375 QDF_MAC_ADDR_SIZE) == 0) { 1376 goto out; 1377 } 1378 } 1379 } 1380 } else { 1381 pdev = dp_get_pdev_for_lmac_id(soc, mac_id); 1382 1383 if (!pdev || qdf_unlikely(pdev->is_pdev_down)) { 1384 dp_rx_err("%pK: PDEV %s", 1385 soc, !pdev ? "not found" : "down"); 1386 goto free; 1387 } 1388 1389 if (dp_monitor_filter_neighbour_peer(pdev, rx_pkt_hdr) == 1390 QDF_STATUS_SUCCESS) 1391 return 0; 1392 1393 TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) { 1394 if (qdf_mem_cmp(wh->i_addr1, vdev->mac_addr.raw, 1395 QDF_MAC_ADDR_SIZE) == 0) { 1396 goto out; 1397 } 1398 } 1399 } 1400 1401 if (!vdev) { 1402 dp_rx_err("%pK: VDEV not found", soc); 1403 goto free; 1404 } 1405 out: 1406 msg.wh = wh; 1407 qdf_nbuf_pull_head(mpdu, soc->rx_pkt_tlv_size); 1408 msg.nbuf = mpdu; 1409 msg.vdev_id = vdev->vdev_id; 1410 1411 /* 1412 * NOTE: Only valid for HKv1. 1413 * If smart monitor mode is enabled on RE, we are getting invalid 1414 * peer frames with RA as STA mac of RE and the TA not matching 1415 * with any NAC list or the the BSSID.Such frames need to dropped 1416 * in order to avoid HM_WDS false addition. 1417 */ 1418 if (pdev->soc->cdp_soc.ol_ops->rx_invalid_peer) { 1419 if (dp_monitor_drop_inv_peer_pkts(vdev) == QDF_STATUS_SUCCESS) { 1420 dp_rx_warn("%pK: Drop inv peer pkts with STA RA:%pm", 1421 soc, wh->i_addr1); 1422 goto free; 1423 } 1424 pdev->soc->cdp_soc.ol_ops->rx_invalid_peer( 1425 (struct cdp_ctrl_objmgr_psoc *)soc->ctrl_psoc, 1426 pdev->pdev_id, &msg); 1427 } 1428 1429 free: 1430 /* Drop and free packet */ 1431 curr_nbuf = mpdu; 1432 while (curr_nbuf) { 1433 next_nbuf = qdf_nbuf_next(curr_nbuf); 1434 dp_rx_nbuf_free(curr_nbuf); 1435 curr_nbuf = next_nbuf; 1436 } 1437 1438 return 0; 1439 } 1440 1441 /** 1442 * dp_rx_process_invalid_peer_wrapper(): Function to wrap invalid peer handler 1443 * @soc: DP SOC handle 1444 * @mpdu: mpdu for which peer is invalid 1445 * @mpdu_done: if an mpdu is completed 1446 * @mac_id: mac_id which is one of 3 mac_ids(Assuming mac_id and 1447 * pool_id has same mapping) 1448 * 1449 * return: integer type 1450 */ 1451 void dp_rx_process_invalid_peer_wrapper(struct dp_soc *soc, 1452 qdf_nbuf_t mpdu, bool mpdu_done, 1453 uint8_t mac_id) 1454 { 1455 /* Only trigger the process when mpdu is completed */ 1456 if (mpdu_done) 1457 dp_rx_process_invalid_peer(soc, mpdu, mac_id); 1458 } 1459 #else 1460 uint8_t dp_rx_process_invalid_peer(struct dp_soc *soc, qdf_nbuf_t mpdu, 1461 uint8_t mac_id) 1462 { 1463 qdf_nbuf_t curr_nbuf, next_nbuf; 1464 struct dp_pdev *pdev; 1465 struct dp_vdev *vdev = NULL; 1466 struct ieee80211_frame *wh; 1467 uint8_t *rx_tlv_hdr = qdf_nbuf_data(mpdu); 1468 uint8_t *rx_pkt_hdr = hal_rx_pkt_hdr_get(soc->hal_soc, rx_tlv_hdr); 1469 1470 wh = (struct ieee80211_frame *)rx_pkt_hdr; 1471 1472 if (!DP_FRAME_IS_DATA(wh)) { 1473 QDF_TRACE_ERROR_RL(QDF_MODULE_ID_DP, 1474 "only for data frames"); 1475 goto free; 1476 } 1477 1478 if (qdf_nbuf_len(mpdu) < sizeof(struct ieee80211_frame)) { 1479 dp_rx_info_rl("%pK: Invalid nbuf length", soc); 1480 goto free; 1481 } 1482 1483 pdev = dp_get_pdev_for_lmac_id(soc, mac_id); 1484 if (!pdev) { 1485 dp_rx_info_rl("%pK: PDEV not found", soc); 1486 goto free; 1487 } 1488 1489 qdf_spin_lock_bh(&pdev->vdev_list_lock); 1490 DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) { 1491 if (qdf_mem_cmp(wh->i_addr1, vdev->mac_addr.raw, 1492 QDF_MAC_ADDR_SIZE) == 0) { 1493 qdf_spin_unlock_bh(&pdev->vdev_list_lock); 1494 goto out; 1495 } 1496 } 1497 qdf_spin_unlock_bh(&pdev->vdev_list_lock); 1498 1499 if (!vdev) { 1500 dp_rx_info_rl("%pK: VDEV not found", soc); 1501 goto free; 1502 } 1503 1504 out: 1505 if (soc->cdp_soc.ol_ops->rx_invalid_peer) 1506 soc->cdp_soc.ol_ops->rx_invalid_peer(vdev->vdev_id, wh); 1507 free: 1508 1509 /* Drop and free packet */ 1510 curr_nbuf = mpdu; 1511 while (curr_nbuf) { 1512 next_nbuf = qdf_nbuf_next(curr_nbuf); 1513 dp_rx_nbuf_free(curr_nbuf); 1514 curr_nbuf = next_nbuf; 1515 } 1516 1517 /* Reset the head and tail pointers */ 1518 pdev = dp_get_pdev_for_lmac_id(soc, mac_id); 1519 if (pdev) { 1520 pdev->invalid_peer_head_msdu = NULL; 1521 pdev->invalid_peer_tail_msdu = NULL; 1522 } 1523 1524 return 0; 1525 } 1526 1527 void dp_rx_process_invalid_peer_wrapper(struct dp_soc *soc, 1528 qdf_nbuf_t mpdu, bool mpdu_done, 1529 uint8_t mac_id) 1530 { 1531 /* Process the nbuf */ 1532 dp_rx_process_invalid_peer(soc, mpdu, mac_id); 1533 } 1534 #endif 1535 1536 #ifndef QCA_HOST_MODE_WIFI_DISABLED 1537 1538 #ifdef RECEIVE_OFFLOAD 1539 /** 1540 * dp_rx_print_offload_info() - Print offload info from RX TLV 1541 * @soc: dp soc handle 1542 * @msdu: MSDU for which the offload info is to be printed 1543 * 1544 * Return: None 1545 */ 1546 static void dp_rx_print_offload_info(struct dp_soc *soc, 1547 qdf_nbuf_t msdu) 1548 { 1549 dp_verbose_debug("----------------------RX DESC LRO/GRO----------------------"); 1550 dp_verbose_debug("lro_eligible 0x%x", 1551 QDF_NBUF_CB_RX_LRO_ELIGIBLE(msdu)); 1552 dp_verbose_debug("pure_ack 0x%x", QDF_NBUF_CB_RX_TCP_PURE_ACK(msdu)); 1553 dp_verbose_debug("chksum 0x%x", QDF_NBUF_CB_RX_TCP_CHKSUM(msdu)); 1554 dp_verbose_debug("TCP seq num 0x%x", QDF_NBUF_CB_RX_TCP_SEQ_NUM(msdu)); 1555 dp_verbose_debug("TCP ack num 0x%x", QDF_NBUF_CB_RX_TCP_ACK_NUM(msdu)); 1556 dp_verbose_debug("TCP window 0x%x", QDF_NBUF_CB_RX_TCP_WIN(msdu)); 1557 dp_verbose_debug("TCP protocol 0x%x", QDF_NBUF_CB_RX_TCP_PROTO(msdu)); 1558 dp_verbose_debug("TCP offset 0x%x", QDF_NBUF_CB_RX_TCP_OFFSET(msdu)); 1559 dp_verbose_debug("toeplitz 0x%x", QDF_NBUF_CB_RX_FLOW_ID(msdu)); 1560 dp_verbose_debug("---------------------------------------------------------"); 1561 } 1562 1563 /** 1564 * dp_rx_fill_gro_info() - Fill GRO info from RX TLV into skb->cb 1565 * @soc: DP SOC handle 1566 * @rx_tlv: RX TLV received for the msdu 1567 * @msdu: msdu for which GRO info needs to be filled 1568 * @rx_ol_pkt_cnt: counter to be incremented for GRO eligible packets 1569 * 1570 * Return: None 1571 */ 1572 void dp_rx_fill_gro_info(struct dp_soc *soc, uint8_t *rx_tlv, 1573 qdf_nbuf_t msdu, uint32_t *rx_ol_pkt_cnt) 1574 { 1575 struct hal_offload_info offload_info; 1576 1577 if (!wlan_cfg_is_gro_enabled(soc->wlan_cfg_ctx)) 1578 return; 1579 1580 if (hal_rx_tlv_get_offload_info(soc->hal_soc, rx_tlv, &offload_info)) 1581 return; 1582 1583 *rx_ol_pkt_cnt = *rx_ol_pkt_cnt + 1; 1584 1585 QDF_NBUF_CB_RX_LRO_ELIGIBLE(msdu) = offload_info.lro_eligible; 1586 QDF_NBUF_CB_RX_TCP_PURE_ACK(msdu) = offload_info.tcp_pure_ack; 1587 QDF_NBUF_CB_RX_TCP_CHKSUM(msdu) = 1588 hal_rx_tlv_get_tcp_chksum(soc->hal_soc, 1589 rx_tlv); 1590 QDF_NBUF_CB_RX_TCP_SEQ_NUM(msdu) = offload_info.tcp_seq_num; 1591 QDF_NBUF_CB_RX_TCP_ACK_NUM(msdu) = offload_info.tcp_ack_num; 1592 QDF_NBUF_CB_RX_TCP_WIN(msdu) = offload_info.tcp_win; 1593 QDF_NBUF_CB_RX_TCP_PROTO(msdu) = offload_info.tcp_proto; 1594 QDF_NBUF_CB_RX_IPV6_PROTO(msdu) = offload_info.ipv6_proto; 1595 QDF_NBUF_CB_RX_TCP_OFFSET(msdu) = offload_info.tcp_offset; 1596 QDF_NBUF_CB_RX_FLOW_ID(msdu) = offload_info.flow_id; 1597 1598 dp_rx_print_offload_info(soc, msdu); 1599 } 1600 #endif /* RECEIVE_OFFLOAD */ 1601 1602 /** 1603 * dp_rx_adjust_nbuf_len() - set appropriate msdu length in nbuf. 1604 * 1605 * @soc: DP soc handle 1606 * @nbuf: pointer to msdu. 1607 * @mpdu_len: mpdu length 1608 * @l3_pad_len: L3 padding length by HW 1609 * 1610 * Return: returns true if nbuf is last msdu of mpdu else returns false. 1611 */ 1612 static inline bool dp_rx_adjust_nbuf_len(struct dp_soc *soc, 1613 qdf_nbuf_t nbuf, 1614 uint16_t *mpdu_len, 1615 uint32_t l3_pad_len) 1616 { 1617 bool last_nbuf; 1618 uint32_t pkt_hdr_size; 1619 1620 pkt_hdr_size = soc->rx_pkt_tlv_size + l3_pad_len; 1621 1622 if ((*mpdu_len + pkt_hdr_size) > RX_DATA_BUFFER_SIZE) { 1623 qdf_nbuf_set_pktlen(nbuf, RX_DATA_BUFFER_SIZE); 1624 last_nbuf = false; 1625 *mpdu_len -= (RX_DATA_BUFFER_SIZE - pkt_hdr_size); 1626 } else { 1627 qdf_nbuf_set_pktlen(nbuf, (*mpdu_len + pkt_hdr_size)); 1628 last_nbuf = true; 1629 *mpdu_len = 0; 1630 } 1631 1632 return last_nbuf; 1633 } 1634 1635 /** 1636 * dp_get_l3_hdr_pad_len() - get L3 header padding length. 1637 * 1638 * @soc: DP soc handle 1639 * @nbuf: pointer to msdu. 1640 * 1641 * Return: returns padding length in bytes. 1642 */ 1643 static inline uint32_t dp_get_l3_hdr_pad_len(struct dp_soc *soc, 1644 qdf_nbuf_t nbuf) 1645 { 1646 uint32_t l3_hdr_pad = 0; 1647 uint8_t *rx_tlv_hdr; 1648 struct hal_rx_msdu_metadata msdu_metadata; 1649 1650 while (nbuf) { 1651 if (!qdf_nbuf_is_rx_chfrag_cont(nbuf)) { 1652 /* scattered msdu end with continuation is 0 */ 1653 rx_tlv_hdr = qdf_nbuf_data(nbuf); 1654 hal_rx_msdu_metadata_get(soc->hal_soc, 1655 rx_tlv_hdr, 1656 &msdu_metadata); 1657 l3_hdr_pad = msdu_metadata.l3_hdr_pad; 1658 break; 1659 } 1660 nbuf = nbuf->next; 1661 } 1662 1663 return l3_hdr_pad; 1664 } 1665 1666 /** 1667 * dp_rx_sg_create() - create a frag_list for MSDUs which are spread across 1668 * multiple nbufs. 1669 * @soc: DP SOC handle 1670 * @nbuf: pointer to the first msdu of an amsdu. 1671 * 1672 * This function implements the creation of RX frag_list for cases 1673 * where an MSDU is spread across multiple nbufs. 1674 * 1675 * Return: returns the head nbuf which contains complete frag_list. 1676 */ 1677 qdf_nbuf_t dp_rx_sg_create(struct dp_soc *soc, qdf_nbuf_t nbuf) 1678 { 1679 qdf_nbuf_t parent, frag_list, next = NULL; 1680 uint16_t frag_list_len = 0; 1681 uint16_t mpdu_len; 1682 bool last_nbuf; 1683 uint32_t l3_hdr_pad_offset = 0; 1684 1685 /* 1686 * Use msdu len got from REO entry descriptor instead since 1687 * there is case the RX PKT TLV is corrupted while msdu_len 1688 * from REO descriptor is right for non-raw RX scatter msdu. 1689 */ 1690 mpdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf); 1691 1692 /* 1693 * this is a case where the complete msdu fits in one single nbuf. 1694 * in this case HW sets both start and end bit and we only need to 1695 * reset these bits for RAW mode simulator to decap the pkt 1696 */ 1697 if (qdf_nbuf_is_rx_chfrag_start(nbuf) && 1698 qdf_nbuf_is_rx_chfrag_end(nbuf)) { 1699 qdf_nbuf_set_pktlen(nbuf, mpdu_len + soc->rx_pkt_tlv_size); 1700 qdf_nbuf_pull_head(nbuf, soc->rx_pkt_tlv_size); 1701 return nbuf; 1702 } 1703 1704 l3_hdr_pad_offset = dp_get_l3_hdr_pad_len(soc, nbuf); 1705 /* 1706 * This is a case where we have multiple msdus (A-MSDU) spread across 1707 * multiple nbufs. here we create a fraglist out of these nbufs. 1708 * 1709 * the moment we encounter a nbuf with continuation bit set we 1710 * know for sure we have an MSDU which is spread across multiple 1711 * nbufs. We loop through and reap nbufs till we reach last nbuf. 1712 */ 1713 parent = nbuf; 1714 frag_list = nbuf->next; 1715 nbuf = nbuf->next; 1716 1717 /* 1718 * set the start bit in the first nbuf we encounter with continuation 1719 * bit set. This has the proper mpdu length set as it is the first 1720 * msdu of the mpdu. this becomes the parent nbuf and the subsequent 1721 * nbufs will form the frag_list of the parent nbuf. 1722 */ 1723 qdf_nbuf_set_rx_chfrag_start(parent, 1); 1724 /* 1725 * L3 header padding is only needed for the 1st buffer 1726 * in a scattered msdu 1727 */ 1728 last_nbuf = dp_rx_adjust_nbuf_len(soc, parent, &mpdu_len, 1729 l3_hdr_pad_offset); 1730 1731 /* 1732 * MSDU cont bit is set but reported MPDU length can fit 1733 * in to single buffer 1734 * 1735 * Increment error stats and avoid SG list creation 1736 */ 1737 if (last_nbuf) { 1738 DP_STATS_INC(soc, rx.err.msdu_continuation_err, 1); 1739 qdf_nbuf_pull_head(parent, 1740 soc->rx_pkt_tlv_size + l3_hdr_pad_offset); 1741 return parent; 1742 } 1743 1744 /* 1745 * this is where we set the length of the fragments which are 1746 * associated to the parent nbuf. We iterate through the frag_list 1747 * till we hit the last_nbuf of the list. 1748 */ 1749 do { 1750 last_nbuf = dp_rx_adjust_nbuf_len(soc, nbuf, &mpdu_len, 0); 1751 qdf_nbuf_pull_head(nbuf, 1752 soc->rx_pkt_tlv_size); 1753 frag_list_len += qdf_nbuf_len(nbuf); 1754 1755 if (last_nbuf) { 1756 next = nbuf->next; 1757 nbuf->next = NULL; 1758 break; 1759 } else if (qdf_nbuf_is_rx_chfrag_end(nbuf)) { 1760 dp_err("Invalid packet length\n"); 1761 qdf_assert_always(0); 1762 } 1763 nbuf = nbuf->next; 1764 } while (!last_nbuf); 1765 1766 qdf_nbuf_set_rx_chfrag_start(nbuf, 0); 1767 qdf_nbuf_append_ext_list(parent, frag_list, frag_list_len); 1768 parent->next = next; 1769 1770 qdf_nbuf_pull_head(parent, 1771 soc->rx_pkt_tlv_size + l3_hdr_pad_offset); 1772 return parent; 1773 } 1774 1775 #endif /* QCA_HOST_MODE_WIFI_DISABLED */ 1776 1777 #ifdef QCA_PEER_EXT_STATS 1778 /* 1779 * dp_rx_compute_tid_delay - Computer per TID delay stats 1780 * @peer: DP soc context 1781 * @nbuf: NBuffer 1782 * 1783 * Return: Void 1784 */ 1785 void dp_rx_compute_tid_delay(struct cdp_delay_tid_stats *stats, 1786 qdf_nbuf_t nbuf) 1787 { 1788 struct cdp_delay_rx_stats *rx_delay = &stats->rx_delay; 1789 uint32_t to_stack = qdf_nbuf_get_timedelta_ms(nbuf); 1790 1791 dp_hist_update_stats(&rx_delay->to_stack_delay, to_stack); 1792 } 1793 #endif /* QCA_PEER_EXT_STATS */ 1794 1795 /** 1796 * dp_rx_compute_delay() - Compute and fill in all timestamps 1797 * to pass in correct fields 1798 * 1799 * @vdev: pdev handle 1800 * @tx_desc: tx descriptor 1801 * @tid: tid value 1802 * Return: none 1803 */ 1804 void dp_rx_compute_delay(struct dp_vdev *vdev, qdf_nbuf_t nbuf) 1805 { 1806 uint8_t ring_id = QDF_NBUF_CB_RX_CTX_ID(nbuf); 1807 int64_t current_ts = qdf_ktime_to_ms(qdf_ktime_get()); 1808 uint32_t to_stack = qdf_nbuf_get_timedelta_ms(nbuf); 1809 uint8_t tid = qdf_nbuf_get_tid_val(nbuf); 1810 uint32_t interframe_delay = 1811 (uint32_t)(current_ts - vdev->prev_rx_deliver_tstamp); 1812 struct cdp_tid_rx_stats *rstats = 1813 &vdev->pdev->stats.tid_stats.tid_rx_stats[ring_id][tid]; 1814 1815 dp_update_delay_stats(NULL, rstats, to_stack, tid, 1816 CDP_DELAY_STATS_REAP_STACK, ring_id, false); 1817 /* 1818 * Update interframe delay stats calculated at deliver_data_ol point. 1819 * Value of vdev->prev_rx_deliver_tstamp will be 0 for 1st frame, so 1820 * interframe delay will not be calculate correctly for 1st frame. 1821 * On the other side, this will help in avoiding extra per packet check 1822 * of vdev->prev_rx_deliver_tstamp. 1823 */ 1824 dp_update_delay_stats(NULL, rstats, interframe_delay, tid, 1825 CDP_DELAY_STATS_RX_INTERFRAME, ring_id, false); 1826 vdev->prev_rx_deliver_tstamp = current_ts; 1827 } 1828 1829 /** 1830 * dp_rx_drop_nbuf_list() - drop an nbuf list 1831 * @pdev: dp pdev reference 1832 * @buf_list: buffer list to be dropepd 1833 * 1834 * Return: int (number of bufs dropped) 1835 */ 1836 static inline int dp_rx_drop_nbuf_list(struct dp_pdev *pdev, 1837 qdf_nbuf_t buf_list) 1838 { 1839 struct cdp_tid_rx_stats *stats = NULL; 1840 uint8_t tid = 0, ring_id = 0; 1841 int num_dropped = 0; 1842 qdf_nbuf_t buf, next_buf; 1843 1844 buf = buf_list; 1845 while (buf) { 1846 ring_id = QDF_NBUF_CB_RX_CTX_ID(buf); 1847 next_buf = qdf_nbuf_queue_next(buf); 1848 tid = qdf_nbuf_get_tid_val(buf); 1849 if (qdf_likely(pdev)) { 1850 stats = &pdev->stats.tid_stats.tid_rx_stats[ring_id][tid]; 1851 stats->fail_cnt[INVALID_PEER_VDEV]++; 1852 stats->delivered_to_stack--; 1853 } 1854 dp_rx_nbuf_free(buf); 1855 buf = next_buf; 1856 num_dropped++; 1857 } 1858 1859 return num_dropped; 1860 } 1861 1862 #ifdef QCA_SUPPORT_WDS_EXTENDED 1863 /** 1864 * dp_rx_deliver_to_stack_ext() - Deliver to netdev per sta 1865 * @soc: core txrx main context 1866 * @vdev: vdev 1867 * @txrx_peer: txrx peer 1868 * @nbuf_head: skb list head 1869 * 1870 * Return: true if packet is delivered to netdev per STA. 1871 */ 1872 static inline bool 1873 dp_rx_deliver_to_stack_ext(struct dp_soc *soc, struct dp_vdev *vdev, 1874 struct dp_txrx_peer *txrx_peer, qdf_nbuf_t nbuf_head) 1875 { 1876 /* 1877 * When extended WDS is disabled, frames are sent to AP netdevice. 1878 */ 1879 if (qdf_likely(!vdev->wds_ext_enabled)) 1880 return false; 1881 1882 /* 1883 * There can be 2 cases: 1884 * 1. Send frame to parent netdev if its not for netdev per STA 1885 * 2. If frame is meant for netdev per STA: 1886 * a. Send frame to appropriate netdev using registered fp. 1887 * b. If fp is NULL, drop the frames. 1888 */ 1889 if (!txrx_peer->wds_ext.init) 1890 return false; 1891 1892 if (txrx_peer->osif_rx) 1893 txrx_peer->osif_rx(txrx_peer->wds_ext.osif_peer, nbuf_head); 1894 else 1895 dp_rx_drop_nbuf_list(vdev->pdev, nbuf_head); 1896 1897 return true; 1898 } 1899 1900 #else 1901 static inline bool 1902 dp_rx_deliver_to_stack_ext(struct dp_soc *soc, struct dp_vdev *vdev, 1903 struct dp_txrx_peer *txrx_peer, qdf_nbuf_t nbuf_head) 1904 { 1905 return false; 1906 } 1907 #endif 1908 1909 #ifdef PEER_CACHE_RX_PKTS 1910 /** 1911 * dp_rx_flush_rx_cached() - flush cached rx frames 1912 * @peer: peer 1913 * @drop: flag to drop frames or forward to net stack 1914 * 1915 * Return: None 1916 */ 1917 void dp_rx_flush_rx_cached(struct dp_peer *peer, bool drop) 1918 { 1919 struct dp_peer_cached_bufq *bufqi; 1920 struct dp_rx_cached_buf *cache_buf = NULL; 1921 ol_txrx_rx_fp data_rx = NULL; 1922 int num_buff_elem; 1923 QDF_STATUS status; 1924 1925 /* 1926 * Flush dp cached frames only for mld peers and legacy peers, as 1927 * link peers don't store cached frames 1928 */ 1929 if (IS_MLO_DP_LINK_PEER(peer)) 1930 return; 1931 1932 if (!peer->txrx_peer) { 1933 dp_err("txrx_peer NULL!! peer mac_addr("QDF_MAC_ADDR_FMT")", 1934 QDF_MAC_ADDR_REF(peer->mac_addr.raw)); 1935 return; 1936 } 1937 1938 if (qdf_atomic_inc_return(&peer->txrx_peer->flush_in_progress) > 1) { 1939 qdf_atomic_dec(&peer->txrx_peer->flush_in_progress); 1940 return; 1941 } 1942 1943 qdf_spin_lock_bh(&peer->peer_info_lock); 1944 if (peer->state >= OL_TXRX_PEER_STATE_CONN && peer->vdev->osif_rx) 1945 data_rx = peer->vdev->osif_rx; 1946 else 1947 drop = true; 1948 qdf_spin_unlock_bh(&peer->peer_info_lock); 1949 1950 bufqi = &peer->txrx_peer->bufq_info; 1951 1952 qdf_spin_lock_bh(&bufqi->bufq_lock); 1953 qdf_list_remove_front(&bufqi->cached_bufq, 1954 (qdf_list_node_t **)&cache_buf); 1955 while (cache_buf) { 1956 num_buff_elem = QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST( 1957 cache_buf->buf); 1958 bufqi->entries -= num_buff_elem; 1959 qdf_spin_unlock_bh(&bufqi->bufq_lock); 1960 if (drop) { 1961 bufqi->dropped = dp_rx_drop_nbuf_list(peer->vdev->pdev, 1962 cache_buf->buf); 1963 } else { 1964 /* Flush the cached frames to OSIF DEV */ 1965 status = data_rx(peer->vdev->osif_vdev, cache_buf->buf); 1966 if (status != QDF_STATUS_SUCCESS) 1967 bufqi->dropped = dp_rx_drop_nbuf_list( 1968 peer->vdev->pdev, 1969 cache_buf->buf); 1970 } 1971 qdf_mem_free(cache_buf); 1972 cache_buf = NULL; 1973 qdf_spin_lock_bh(&bufqi->bufq_lock); 1974 qdf_list_remove_front(&bufqi->cached_bufq, 1975 (qdf_list_node_t **)&cache_buf); 1976 } 1977 qdf_spin_unlock_bh(&bufqi->bufq_lock); 1978 qdf_atomic_dec(&peer->txrx_peer->flush_in_progress); 1979 } 1980 1981 /** 1982 * dp_rx_enqueue_rx() - cache rx frames 1983 * @peer: peer 1984 * @rx_buf_list: cache buffer list 1985 * 1986 * Return: None 1987 */ 1988 static QDF_STATUS 1989 dp_rx_enqueue_rx(struct dp_txrx_peer *txrx_peer, qdf_nbuf_t rx_buf_list) 1990 { 1991 struct dp_rx_cached_buf *cache_buf; 1992 struct dp_peer_cached_bufq *bufqi = &txrx_peer->bufq_info; 1993 int num_buff_elem; 1994 QDF_STATUS ret = QDF_STATUS_SUCCESS; 1995 struct dp_soc *soc = txrx_peer->vdev->pdev->soc; 1996 struct dp_peer *peer = dp_peer_get_ref_by_id(soc, txrx_peer->peer_id, 1997 DP_MOD_ID_RX); 1998 1999 if (!peer) { 2000 bufqi->dropped = dp_rx_drop_nbuf_list(txrx_peer->vdev->pdev, 2001 rx_buf_list); 2002 return QDF_STATUS_E_INVAL; 2003 } 2004 2005 dp_debug_rl("bufq->curr %d bufq->drops %d", bufqi->entries, 2006 bufqi->dropped); 2007 if (!peer->valid) { 2008 bufqi->dropped = dp_rx_drop_nbuf_list(txrx_peer->vdev->pdev, 2009 rx_buf_list); 2010 ret = QDF_STATUS_E_INVAL; 2011 goto fail; 2012 } 2013 2014 qdf_spin_lock_bh(&bufqi->bufq_lock); 2015 if (bufqi->entries >= bufqi->thresh) { 2016 bufqi->dropped = dp_rx_drop_nbuf_list(txrx_peer->vdev->pdev, 2017 rx_buf_list); 2018 qdf_spin_unlock_bh(&bufqi->bufq_lock); 2019 ret = QDF_STATUS_E_RESOURCES; 2020 goto fail; 2021 } 2022 qdf_spin_unlock_bh(&bufqi->bufq_lock); 2023 2024 num_buff_elem = QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(rx_buf_list); 2025 2026 cache_buf = qdf_mem_malloc_atomic(sizeof(*cache_buf)); 2027 if (!cache_buf) { 2028 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 2029 "Failed to allocate buf to cache rx frames"); 2030 bufqi->dropped = dp_rx_drop_nbuf_list(txrx_peer->vdev->pdev, 2031 rx_buf_list); 2032 ret = QDF_STATUS_E_NOMEM; 2033 goto fail; 2034 } 2035 2036 cache_buf->buf = rx_buf_list; 2037 2038 qdf_spin_lock_bh(&bufqi->bufq_lock); 2039 qdf_list_insert_back(&bufqi->cached_bufq, 2040 &cache_buf->node); 2041 bufqi->entries += num_buff_elem; 2042 qdf_spin_unlock_bh(&bufqi->bufq_lock); 2043 2044 fail: 2045 dp_peer_unref_delete(peer, DP_MOD_ID_RX); 2046 return ret; 2047 } 2048 2049 static inline 2050 bool dp_rx_is_peer_cache_bufq_supported(void) 2051 { 2052 return true; 2053 } 2054 #else 2055 static inline 2056 bool dp_rx_is_peer_cache_bufq_supported(void) 2057 { 2058 return false; 2059 } 2060 2061 static inline QDF_STATUS 2062 dp_rx_enqueue_rx(struct dp_txrx_peer *txrx_peer, qdf_nbuf_t rx_buf_list) 2063 { 2064 return QDF_STATUS_SUCCESS; 2065 } 2066 #endif 2067 2068 #ifndef DELIVERY_TO_STACK_STATUS_CHECK 2069 /** 2070 * dp_rx_check_delivery_to_stack() - Deliver pkts to network 2071 * using the appropriate call back functions. 2072 * @soc: soc 2073 * @vdev: vdev 2074 * @peer: peer 2075 * @nbuf_head: skb list head 2076 * @nbuf_tail: skb list tail 2077 * 2078 * Return: None 2079 */ 2080 static void dp_rx_check_delivery_to_stack(struct dp_soc *soc, 2081 struct dp_vdev *vdev, 2082 struct dp_txrx_peer *txrx_peer, 2083 qdf_nbuf_t nbuf_head) 2084 { 2085 if (qdf_unlikely(dp_rx_deliver_to_stack_ext(soc, vdev, 2086 txrx_peer, nbuf_head))) 2087 return; 2088 2089 /* Function pointer initialized only when FISA is enabled */ 2090 if (vdev->osif_fisa_rx) 2091 /* on failure send it via regular path */ 2092 vdev->osif_fisa_rx(soc, vdev, nbuf_head); 2093 else 2094 vdev->osif_rx(vdev->osif_vdev, nbuf_head); 2095 } 2096 2097 #else 2098 /** 2099 * dp_rx_check_delivery_to_stack() - Deliver pkts to network 2100 * using the appropriate call back functions. 2101 * @soc: soc 2102 * @vdev: vdev 2103 * @txrx_peer: txrx peer 2104 * @nbuf_head: skb list head 2105 * @nbuf_tail: skb list tail 2106 * 2107 * Check the return status of the call back function and drop 2108 * the packets if the return status indicates a failure. 2109 * 2110 * Return: None 2111 */ 2112 static void dp_rx_check_delivery_to_stack(struct dp_soc *soc, 2113 struct dp_vdev *vdev, 2114 struct dp_txrx_peer *txrx_peer, 2115 qdf_nbuf_t nbuf_head) 2116 { 2117 int num_nbuf = 0; 2118 QDF_STATUS ret_val = QDF_STATUS_E_FAILURE; 2119 2120 /* Function pointer initialized only when FISA is enabled */ 2121 if (vdev->osif_fisa_rx) 2122 /* on failure send it via regular path */ 2123 ret_val = vdev->osif_fisa_rx(soc, vdev, nbuf_head); 2124 else if (vdev->osif_rx) 2125 ret_val = vdev->osif_rx(vdev->osif_vdev, nbuf_head); 2126 2127 if (!QDF_IS_STATUS_SUCCESS(ret_val)) { 2128 num_nbuf = dp_rx_drop_nbuf_list(vdev->pdev, nbuf_head); 2129 DP_STATS_INC(soc, rx.err.rejected, num_nbuf); 2130 if (txrx_peer) 2131 DP_PEER_STATS_FLAT_DEC(txrx_peer, to_stack.num, 2132 num_nbuf); 2133 } 2134 } 2135 #endif /* ifdef DELIVERY_TO_STACK_STATUS_CHECK */ 2136 2137 /* 2138 * dp_rx_validate_rx_callbacks() - validate rx callbacks 2139 * @soc DP soc 2140 * @vdev: DP vdev handle 2141 * @txrx_peer: pointer to the txrx peer object 2142 * nbuf_head: skb list head 2143 * 2144 * Return: QDF_STATUS - QDF_STATUS_SUCCESS 2145 * QDF_STATUS_E_FAILURE 2146 */ 2147 static inline QDF_STATUS 2148 dp_rx_validate_rx_callbacks(struct dp_soc *soc, 2149 struct dp_vdev *vdev, 2150 struct dp_txrx_peer *txrx_peer, 2151 qdf_nbuf_t nbuf_head) 2152 { 2153 int num_nbuf; 2154 2155 if (qdf_unlikely(!vdev || vdev->delete.pending)) { 2156 num_nbuf = dp_rx_drop_nbuf_list(NULL, nbuf_head); 2157 /* 2158 * This is a special case where vdev is invalid, 2159 * so we cannot know the pdev to which this packet 2160 * belonged. Hence we update the soc rx error stats. 2161 */ 2162 DP_STATS_INC(soc, rx.err.invalid_vdev, num_nbuf); 2163 return QDF_STATUS_E_FAILURE; 2164 } 2165 2166 /* 2167 * highly unlikely to have a vdev without a registered rx 2168 * callback function. if so let us free the nbuf_list. 2169 */ 2170 if (qdf_unlikely(!vdev->osif_rx)) { 2171 if (txrx_peer && dp_rx_is_peer_cache_bufq_supported()) { 2172 dp_rx_enqueue_rx(txrx_peer, nbuf_head); 2173 } else { 2174 num_nbuf = dp_rx_drop_nbuf_list(vdev->pdev, 2175 nbuf_head); 2176 DP_PEER_TO_STACK_DECC(txrx_peer, num_nbuf, 2177 vdev->pdev->enhanced_stats_en); 2178 } 2179 return QDF_STATUS_E_FAILURE; 2180 } 2181 2182 return QDF_STATUS_SUCCESS; 2183 } 2184 2185 QDF_STATUS dp_rx_deliver_to_stack(struct dp_soc *soc, 2186 struct dp_vdev *vdev, 2187 struct dp_txrx_peer *txrx_peer, 2188 qdf_nbuf_t nbuf_head, 2189 qdf_nbuf_t nbuf_tail) 2190 { 2191 if (dp_rx_validate_rx_callbacks(soc, vdev, txrx_peer, nbuf_head) != 2192 QDF_STATUS_SUCCESS) 2193 return QDF_STATUS_E_FAILURE; 2194 2195 if (qdf_unlikely(vdev->rx_decap_type == htt_cmn_pkt_type_raw) || 2196 (vdev->rx_decap_type == htt_cmn_pkt_type_native_wifi)) { 2197 vdev->osif_rsim_rx_decap(vdev->osif_vdev, &nbuf_head, 2198 &nbuf_tail); 2199 } 2200 2201 dp_rx_check_delivery_to_stack(soc, vdev, txrx_peer, nbuf_head); 2202 2203 return QDF_STATUS_SUCCESS; 2204 } 2205 2206 #ifdef QCA_SUPPORT_EAPOL_OVER_CONTROL_PORT 2207 QDF_STATUS dp_rx_eapol_deliver_to_stack(struct dp_soc *soc, 2208 struct dp_vdev *vdev, 2209 struct dp_txrx_peer *txrx_peer, 2210 qdf_nbuf_t nbuf_head, 2211 qdf_nbuf_t nbuf_tail) 2212 { 2213 if (dp_rx_validate_rx_callbacks(soc, vdev, txrx_peer, nbuf_head) != 2214 QDF_STATUS_SUCCESS) 2215 return QDF_STATUS_E_FAILURE; 2216 2217 vdev->osif_rx_eapol(vdev->osif_vdev, nbuf_head); 2218 2219 return QDF_STATUS_SUCCESS; 2220 } 2221 #endif 2222 2223 #ifndef QCA_HOST_MODE_WIFI_DISABLED 2224 #ifdef VDEV_PEER_PROTOCOL_COUNT 2225 #define dp_rx_msdu_stats_update_prot_cnts(vdev_hdl, nbuf, txrx_peer) \ 2226 { \ 2227 qdf_nbuf_t nbuf_local; \ 2228 struct dp_txrx_peer *txrx_peer_local; \ 2229 struct dp_vdev *vdev_local = vdev_hdl; \ 2230 do { \ 2231 if (qdf_likely(!((vdev_local)->peer_protocol_count_track))) \ 2232 break; \ 2233 nbuf_local = nbuf; \ 2234 txrx_peer_local = txrx_peer; \ 2235 if (qdf_unlikely(qdf_nbuf_is_frag((nbuf_local)))) \ 2236 break; \ 2237 else if (qdf_unlikely(qdf_nbuf_is_raw_frame((nbuf_local)))) \ 2238 break; \ 2239 dp_vdev_peer_stats_update_protocol_cnt((vdev_local), \ 2240 (nbuf_local), \ 2241 (txrx_peer_local), 0, 1); \ 2242 } while (0); \ 2243 } 2244 #else 2245 #define dp_rx_msdu_stats_update_prot_cnts(vdev_hdl, nbuf, txrx_peer) 2246 #endif 2247 2248 #ifdef FEATURE_RX_LINKSPEED_ROAM_TRIGGER 2249 /** 2250 * dp_rx_rates_stats_update() - update rate stats 2251 * from rx msdu. 2252 * @soc: datapath soc handle 2253 * @nbuf: received msdu buffer 2254 * @rx_tlv_hdr: rx tlv header 2255 * @txrx_peer: datapath txrx_peer handle 2256 * @sgi: Short Guard Interval 2257 * @mcs: Modulation and Coding Set 2258 * @nss: Number of Spatial Streams 2259 * @bw: BandWidth 2260 * @pkt_type: Corresponds to preamble 2261 * 2262 * To be precisely record rates, following factors are considered: 2263 * Exclude specific frames, ARP, DHCP, ssdp, etc. 2264 * Make sure to affect rx throughput as least as possible. 2265 * 2266 * Return: void 2267 */ 2268 static void 2269 dp_rx_rates_stats_update(struct dp_soc *soc, qdf_nbuf_t nbuf, 2270 uint8_t *rx_tlv_hdr, struct dp_txrx_peer *txrx_peer, 2271 uint32_t sgi, uint32_t mcs, 2272 uint32_t nss, uint32_t bw, uint32_t pkt_type) 2273 { 2274 uint32_t rix; 2275 uint16_t ratecode; 2276 uint32_t avg_rx_rate; 2277 uint32_t ratekbps; 2278 enum cdp_punctured_modes punc_mode = NO_PUNCTURE; 2279 2280 if (soc->high_throughput || 2281 dp_rx_data_is_specific(soc->hal_soc, rx_tlv_hdr, nbuf)) { 2282 return; 2283 } 2284 2285 DP_PEER_EXTD_STATS_UPD(txrx_peer, rx.rx_rate, mcs); 2286 2287 /* In 11b mode, the nss we get from tlv is 0, invalid and should be 1 */ 2288 if (qdf_unlikely(pkt_type == DOT11_B)) 2289 nss = 1; 2290 2291 /* here pkt_type corresponds to preamble */ 2292 ratekbps = dp_getrateindex(sgi, 2293 mcs, 2294 nss - 1, 2295 pkt_type, 2296 bw, 2297 punc_mode, 2298 &rix, 2299 &ratecode); 2300 DP_PEER_EXTD_STATS_UPD(txrx_peer, rx.last_rx_rate, ratekbps); 2301 avg_rx_rate = 2302 dp_ath_rate_lpf(txrx_peer->stats.extd_stats.rx.avg_rx_rate, 2303 ratekbps); 2304 DP_PEER_EXTD_STATS_UPD(txrx_peer, rx.avg_rx_rate, avg_rx_rate); 2305 DP_PEER_EXTD_STATS_UPD(txrx_peer, rx.nss_info, nss); 2306 DP_PEER_EXTD_STATS_UPD(txrx_peer, rx.mcs_info, mcs); 2307 DP_PEER_EXTD_STATS_UPD(txrx_peer, rx.bw_info, bw); 2308 DP_PEER_EXTD_STATS_UPD(txrx_peer, rx.gi_info, sgi); 2309 DP_PEER_EXTD_STATS_UPD(txrx_peer, rx.preamble_info, pkt_type); 2310 } 2311 #else 2312 static inline void 2313 dp_rx_rates_stats_update(struct dp_soc *soc, qdf_nbuf_t nbuf, 2314 uint8_t *rx_tlv_hdr, struct dp_txrx_peer *txrx_peer, 2315 uint32_t sgi, uint32_t mcs, 2316 uint32_t nss, uint32_t bw, uint32_t pkt_type) 2317 { 2318 } 2319 #endif /* FEATURE_RX_LINKSPEED_ROAM_TRIGGER */ 2320 2321 #ifndef QCA_ENHANCED_STATS_SUPPORT 2322 /** 2323 * dp_rx_msdu_extd_stats_update(): Update Rx extended path stats for peer 2324 * 2325 * @soc: datapath soc handle 2326 * @nbuf: received msdu buffer 2327 * @rx_tlv_hdr: rx tlv header 2328 * @txrx_peer: datapath txrx_peer handle 2329 * 2330 * Return: void 2331 */ 2332 static inline 2333 void dp_rx_msdu_extd_stats_update(struct dp_soc *soc, qdf_nbuf_t nbuf, 2334 uint8_t *rx_tlv_hdr, 2335 struct dp_txrx_peer *txrx_peer) 2336 { 2337 bool is_ampdu; 2338 uint32_t sgi, mcs, tid, nss, bw, reception_type, pkt_type; 2339 uint8_t dst_mcs_idx; 2340 2341 /* 2342 * TODO - For KIWI this field is present in ring_desc 2343 * Try to use ring desc instead of tlv. 2344 */ 2345 is_ampdu = hal_rx_mpdu_info_ampdu_flag_get(soc->hal_soc, rx_tlv_hdr); 2346 DP_PEER_EXTD_STATS_INCC(txrx_peer, rx.ampdu_cnt, 1, is_ampdu); 2347 DP_PEER_EXTD_STATS_INCC(txrx_peer, rx.non_ampdu_cnt, 1, !(is_ampdu)); 2348 2349 sgi = hal_rx_tlv_sgi_get(soc->hal_soc, rx_tlv_hdr); 2350 mcs = hal_rx_tlv_rate_mcs_get(soc->hal_soc, rx_tlv_hdr); 2351 tid = qdf_nbuf_get_tid_val(nbuf); 2352 bw = hal_rx_tlv_bw_get(soc->hal_soc, rx_tlv_hdr); 2353 reception_type = hal_rx_msdu_start_reception_type_get(soc->hal_soc, 2354 rx_tlv_hdr); 2355 nss = hal_rx_msdu_start_nss_get(soc->hal_soc, rx_tlv_hdr); 2356 pkt_type = hal_rx_tlv_get_pkt_type(soc->hal_soc, rx_tlv_hdr); 2357 /* do HW to SW pkt type conversion */ 2358 pkt_type = (pkt_type >= HAL_DOT11_MAX ? DOT11_MAX : 2359 hal_2_dp_pkt_type_map[pkt_type]); 2360 2361 DP_PEER_EXTD_STATS_INCC(txrx_peer, rx.rx_mpdu_cnt[mcs], 1, 2362 ((mcs < MAX_MCS) && QDF_NBUF_CB_RX_CHFRAG_START(nbuf))); 2363 DP_PEER_EXTD_STATS_INCC(txrx_peer, rx.rx_mpdu_cnt[MAX_MCS - 1], 1, 2364 ((mcs >= MAX_MCS) && QDF_NBUF_CB_RX_CHFRAG_START(nbuf))); 2365 DP_PEER_EXTD_STATS_INC(txrx_peer, rx.bw[bw], 1); 2366 /* 2367 * only if nss > 0 and pkt_type is 11N/AC/AX, 2368 * then increase index [nss - 1] in array counter. 2369 */ 2370 if (nss > 0 && CDP_IS_PKT_TYPE_SUPPORT_NSS(pkt_type)) 2371 DP_PEER_EXTD_STATS_INC(txrx_peer, rx.nss[nss - 1], 1); 2372 2373 DP_PEER_EXTD_STATS_INC(txrx_peer, rx.sgi_count[sgi], 1); 2374 DP_PEER_PER_PKT_STATS_INCC(txrx_peer, rx.err.mic_err, 1, 2375 hal_rx_tlv_mic_err_get(soc->hal_soc, 2376 rx_tlv_hdr)); 2377 DP_PEER_PER_PKT_STATS_INCC(txrx_peer, rx.err.decrypt_err, 1, 2378 hal_rx_tlv_decrypt_err_get(soc->hal_soc, 2379 rx_tlv_hdr)); 2380 2381 DP_PEER_EXTD_STATS_INC(txrx_peer, rx.wme_ac_type[TID_TO_WME_AC(tid)], 1); 2382 DP_PEER_EXTD_STATS_INC(txrx_peer, rx.reception_type[reception_type], 1); 2383 2384 dst_mcs_idx = dp_get_mcs_array_index_by_pkt_type_mcs(pkt_type, mcs); 2385 if (MCS_INVALID_ARRAY_INDEX != dst_mcs_idx) 2386 DP_PEER_EXTD_STATS_INC(txrx_peer, 2387 rx.pkt_type[pkt_type].mcs_count[dst_mcs_idx], 2388 1); 2389 2390 dp_rx_rates_stats_update(soc, nbuf, rx_tlv_hdr, txrx_peer, 2391 sgi, mcs, nss, bw, pkt_type); 2392 } 2393 #else 2394 static inline 2395 void dp_rx_msdu_extd_stats_update(struct dp_soc *soc, qdf_nbuf_t nbuf, 2396 uint8_t *rx_tlv_hdr, 2397 struct dp_txrx_peer *txrx_peer) 2398 { 2399 } 2400 #endif 2401 2402 #if defined(DP_PKT_STATS_PER_LMAC) && defined(WLAN_FEATURE_11BE_MLO) 2403 static inline void 2404 dp_peer_update_rx_pkt_per_lmac(struct dp_txrx_peer *txrx_peer, 2405 qdf_nbuf_t nbuf) 2406 { 2407 uint8_t lmac_id = qdf_nbuf_get_lmac_id(nbuf); 2408 2409 if (qdf_unlikely(lmac_id >= CDP_MAX_LMACS)) { 2410 dp_err_rl("Invalid lmac_id: %u vdev_id: %u", 2411 lmac_id, QDF_NBUF_CB_RX_VDEV_ID(nbuf)); 2412 2413 if (qdf_likely(txrx_peer)) 2414 dp_err_rl("peer_id: %u", txrx_peer->peer_id); 2415 2416 return; 2417 } 2418 2419 /* only count stats per lmac for MLO connection*/ 2420 DP_PEER_PER_PKT_STATS_INCC_PKT(txrx_peer, rx.rx_lmac[lmac_id], 1, 2421 QDF_NBUF_CB_RX_PKT_LEN(nbuf), 2422 txrx_peer->mld_peer); 2423 } 2424 #else 2425 static inline void 2426 dp_peer_update_rx_pkt_per_lmac(struct dp_txrx_peer *txrx_peer, 2427 qdf_nbuf_t nbuf) 2428 { 2429 } 2430 #endif 2431 2432 /** 2433 * dp_rx_msdu_stats_update() - update per msdu stats. 2434 * @soc: core txrx main context 2435 * @nbuf: pointer to the first msdu of an amsdu. 2436 * @rx_tlv_hdr: pointer to the start of RX TLV headers. 2437 * @txrx_peer: pointer to the txrx peer object. 2438 * @ring_id: reo dest ring number on which pkt is reaped. 2439 * @tid_stats: per tid rx stats. 2440 * 2441 * update all the per msdu stats for that nbuf. 2442 * Return: void 2443 */ 2444 void dp_rx_msdu_stats_update(struct dp_soc *soc, qdf_nbuf_t nbuf, 2445 uint8_t *rx_tlv_hdr, 2446 struct dp_txrx_peer *txrx_peer, 2447 uint8_t ring_id, 2448 struct cdp_tid_rx_stats *tid_stats) 2449 { 2450 bool is_not_amsdu; 2451 struct dp_vdev *vdev = txrx_peer->vdev; 2452 bool enh_flag; 2453 qdf_ether_header_t *eh; 2454 uint16_t msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf); 2455 2456 dp_rx_msdu_stats_update_prot_cnts(vdev, nbuf, txrx_peer); 2457 is_not_amsdu = qdf_nbuf_is_rx_chfrag_start(nbuf) & 2458 qdf_nbuf_is_rx_chfrag_end(nbuf); 2459 DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, rx.rcvd_reo[ring_id], 1, 2460 msdu_len); 2461 DP_PEER_PER_PKT_STATS_INCC(txrx_peer, rx.non_amsdu_cnt, 1, 2462 is_not_amsdu); 2463 DP_PEER_PER_PKT_STATS_INCC(txrx_peer, rx.amsdu_cnt, 1, !is_not_amsdu); 2464 DP_PEER_PER_PKT_STATS_INCC(txrx_peer, rx.rx_retries, 1, 2465 qdf_nbuf_is_rx_retry_flag(nbuf)); 2466 dp_peer_update_rx_pkt_per_lmac(txrx_peer, nbuf); 2467 tid_stats->msdu_cnt++; 2468 if (qdf_unlikely(qdf_nbuf_is_da_mcbc(nbuf) && 2469 (vdev->rx_decap_type == htt_cmn_pkt_type_ethernet))) { 2470 eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf); 2471 enh_flag = vdev->pdev->enhanced_stats_en; 2472 DP_PEER_MC_INCC_PKT(txrx_peer, 1, msdu_len, enh_flag); 2473 tid_stats->mcast_msdu_cnt++; 2474 if (QDF_IS_ADDR_BROADCAST(eh->ether_dhost)) { 2475 DP_PEER_BC_INCC_PKT(txrx_peer, 1, msdu_len, enh_flag); 2476 tid_stats->bcast_msdu_cnt++; 2477 } 2478 } 2479 2480 txrx_peer->stats.per_pkt_stats.rx.last_rx_ts = qdf_system_ticks(); 2481 2482 dp_rx_msdu_extd_stats_update(soc, nbuf, rx_tlv_hdr, txrx_peer); 2483 } 2484 2485 #ifndef WDS_VENDOR_EXTENSION 2486 int dp_wds_rx_policy_check(uint8_t *rx_tlv_hdr, 2487 struct dp_vdev *vdev, 2488 struct dp_txrx_peer *txrx_peer) 2489 { 2490 return 1; 2491 } 2492 #endif 2493 2494 #ifdef RX_DESC_DEBUG_CHECK 2495 /** 2496 * dp_rx_desc_nbuf_sanity_check - Add sanity check to catch REO rx_desc paddr 2497 * corruption 2498 * 2499 * @ring_desc: REO ring descriptor 2500 * @rx_desc: Rx descriptor 2501 * 2502 * Return: NONE 2503 */ 2504 QDF_STATUS dp_rx_desc_nbuf_sanity_check(struct dp_soc *soc, 2505 hal_ring_desc_t ring_desc, 2506 struct dp_rx_desc *rx_desc) 2507 { 2508 struct hal_buf_info hbi; 2509 2510 hal_rx_reo_buf_paddr_get(soc->hal_soc, ring_desc, &hbi); 2511 /* Sanity check for possible buffer paddr corruption */ 2512 if (dp_rx_desc_paddr_sanity_check(rx_desc, (&hbi)->paddr)) 2513 return QDF_STATUS_SUCCESS; 2514 2515 return QDF_STATUS_E_FAILURE; 2516 } 2517 2518 /** 2519 * dp_rx_desc_nbuf_len_sanity_check - Add sanity check to catch Rx buffer 2520 * out of bound access from H.W 2521 * 2522 * @soc: DP soc 2523 * @pkt_len: Packet length received from H.W 2524 * 2525 * Return: NONE 2526 */ 2527 static inline void 2528 dp_rx_desc_nbuf_len_sanity_check(struct dp_soc *soc, 2529 uint32_t pkt_len) 2530 { 2531 struct rx_desc_pool *rx_desc_pool; 2532 2533 rx_desc_pool = &soc->rx_desc_buf[0]; 2534 qdf_assert_always(pkt_len <= rx_desc_pool->buf_size); 2535 } 2536 #else 2537 static inline void 2538 dp_rx_desc_nbuf_len_sanity_check(struct dp_soc *soc, uint32_t pkt_len) { } 2539 #endif 2540 2541 #ifdef DP_RX_PKT_NO_PEER_DELIVER 2542 #ifdef DP_RX_UDP_OVER_PEER_ROAM 2543 /** 2544 * dp_rx_is_udp_allowed_over_roam_peer() - check if udp data received 2545 * during roaming 2546 * @vdev: dp_vdev pointer 2547 * @rx_tlv_hdr: rx tlv header 2548 * @nbuf: pkt skb pointer 2549 * 2550 * This function will check if rx udp data is received from authorised 2551 * roamed peer before peer map indication is received from FW after 2552 * roaming. This is needed for VoIP scenarios in which packet loss 2553 * expected during roaming is minimal. 2554 * 2555 * Return: bool 2556 */ 2557 static bool dp_rx_is_udp_allowed_over_roam_peer(struct dp_vdev *vdev, 2558 uint8_t *rx_tlv_hdr, 2559 qdf_nbuf_t nbuf) 2560 { 2561 char *hdr_desc; 2562 struct ieee80211_frame *wh = NULL; 2563 2564 hdr_desc = hal_rx_desc_get_80211_hdr(vdev->pdev->soc->hal_soc, 2565 rx_tlv_hdr); 2566 wh = (struct ieee80211_frame *)hdr_desc; 2567 2568 if (vdev->roaming_peer_status == 2569 WLAN_ROAM_PEER_AUTH_STATUS_AUTHENTICATED && 2570 !qdf_mem_cmp(vdev->roaming_peer_mac.raw, wh->i_addr2, 2571 QDF_MAC_ADDR_SIZE) && (qdf_nbuf_is_ipv4_udp_pkt(nbuf) || 2572 qdf_nbuf_is_ipv6_udp_pkt(nbuf))) 2573 return true; 2574 2575 return false; 2576 } 2577 #else 2578 static bool dp_rx_is_udp_allowed_over_roam_peer(struct dp_vdev *vdev, 2579 uint8_t *rx_tlv_hdr, 2580 qdf_nbuf_t nbuf) 2581 { 2582 return false; 2583 } 2584 #endif 2585 /** 2586 * dp_rx_deliver_to_stack_no_peer() - try deliver rx data even if 2587 * no corresbonding peer found 2588 * @soc: core txrx main context 2589 * @nbuf: pkt skb pointer 2590 * 2591 * This function will try to deliver some RX special frames to stack 2592 * even there is no peer matched found. for instance, LFR case, some 2593 * eapol data will be sent to host before peer_map done. 2594 * 2595 * Return: None 2596 */ 2597 void dp_rx_deliver_to_stack_no_peer(struct dp_soc *soc, qdf_nbuf_t nbuf) 2598 { 2599 uint16_t peer_id; 2600 uint8_t vdev_id; 2601 struct dp_vdev *vdev = NULL; 2602 uint32_t l2_hdr_offset = 0; 2603 uint16_t msdu_len = 0; 2604 uint32_t pkt_len = 0; 2605 uint8_t *rx_tlv_hdr; 2606 uint32_t frame_mask = FRAME_MASK_IPV4_ARP | FRAME_MASK_IPV4_DHCP | 2607 FRAME_MASK_IPV4_EAPOL | FRAME_MASK_IPV6_DHCP; 2608 2609 peer_id = QDF_NBUF_CB_RX_PEER_ID(nbuf); 2610 if (peer_id > soc->max_peer_id) 2611 goto deliver_fail; 2612 2613 vdev_id = QDF_NBUF_CB_RX_VDEV_ID(nbuf); 2614 vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_RX); 2615 if (!vdev || vdev->delete.pending || !vdev->osif_rx) 2616 goto deliver_fail; 2617 2618 if (qdf_unlikely(qdf_nbuf_is_frag(nbuf))) 2619 goto deliver_fail; 2620 2621 rx_tlv_hdr = qdf_nbuf_data(nbuf); 2622 l2_hdr_offset = 2623 hal_rx_msdu_end_l3_hdr_padding_get(soc->hal_soc, rx_tlv_hdr); 2624 2625 msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf); 2626 pkt_len = msdu_len + l2_hdr_offset + soc->rx_pkt_tlv_size; 2627 QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(nbuf) = 1; 2628 2629 qdf_nbuf_set_pktlen(nbuf, pkt_len); 2630 qdf_nbuf_pull_head(nbuf, soc->rx_pkt_tlv_size + l2_hdr_offset); 2631 2632 if (dp_rx_is_special_frame(nbuf, frame_mask) || 2633 dp_rx_is_udp_allowed_over_roam_peer(vdev, rx_tlv_hdr, nbuf)) { 2634 qdf_nbuf_set_exc_frame(nbuf, 1); 2635 if (QDF_STATUS_SUCCESS != 2636 vdev->osif_rx(vdev->osif_vdev, nbuf)) 2637 goto deliver_fail; 2638 DP_STATS_INC(soc, rx.err.pkt_delivered_no_peer, 1); 2639 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_RX); 2640 return; 2641 } 2642 2643 deliver_fail: 2644 DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1, 2645 QDF_NBUF_CB_RX_PKT_LEN(nbuf)); 2646 dp_rx_nbuf_free(nbuf); 2647 if (vdev) 2648 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_RX); 2649 } 2650 #else 2651 void dp_rx_deliver_to_stack_no_peer(struct dp_soc *soc, qdf_nbuf_t nbuf) 2652 { 2653 DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1, 2654 QDF_NBUF_CB_RX_PKT_LEN(nbuf)); 2655 dp_rx_nbuf_free(nbuf); 2656 } 2657 #endif 2658 2659 /** 2660 * dp_rx_srng_get_num_pending() - get number of pending entries 2661 * @hal_soc: hal soc opaque pointer 2662 * @hal_ring: opaque pointer to the HAL Rx Ring 2663 * @num_entries: number of entries in the hal_ring. 2664 * @near_full: pointer to a boolean. This is set if ring is near full. 2665 * 2666 * The function returns the number of entries in a destination ring which are 2667 * yet to be reaped. The function also checks if the ring is near full. 2668 * If more than half of the ring needs to be reaped, the ring is considered 2669 * approaching full. 2670 * The function useses hal_srng_dst_num_valid_locked to get the number of valid 2671 * entries. It should not be called within a SRNG lock. HW pointer value is 2672 * synced into cached_hp. 2673 * 2674 * Return: Number of pending entries if any 2675 */ 2676 uint32_t dp_rx_srng_get_num_pending(hal_soc_handle_t hal_soc, 2677 hal_ring_handle_t hal_ring_hdl, 2678 uint32_t num_entries, 2679 bool *near_full) 2680 { 2681 uint32_t num_pending = 0; 2682 2683 num_pending = hal_srng_dst_num_valid_locked(hal_soc, 2684 hal_ring_hdl, 2685 true); 2686 2687 if (num_entries && (num_pending >= num_entries >> 1)) 2688 *near_full = true; 2689 else 2690 *near_full = false; 2691 2692 return num_pending; 2693 } 2694 2695 #endif /* QCA_HOST_MODE_WIFI_DISABLED */ 2696 2697 #ifdef WLAN_SUPPORT_RX_FISA 2698 void dp_rx_skip_tlvs(struct dp_soc *soc, qdf_nbuf_t nbuf, uint32_t l3_padding) 2699 { 2700 QDF_NBUF_CB_RX_PACKET_L3_HDR_PAD(nbuf) = l3_padding; 2701 qdf_nbuf_pull_head(nbuf, l3_padding + soc->rx_pkt_tlv_size); 2702 } 2703 #else 2704 void dp_rx_skip_tlvs(struct dp_soc *soc, qdf_nbuf_t nbuf, uint32_t l3_padding) 2705 { 2706 qdf_nbuf_pull_head(nbuf, l3_padding + soc->rx_pkt_tlv_size); 2707 } 2708 #endif 2709 2710 #ifndef QCA_HOST_MODE_WIFI_DISABLED 2711 2712 #ifdef DP_RX_DROP_RAW_FRM 2713 /** 2714 * dp_rx_is_raw_frame_dropped() - if raw frame nbuf, free and drop 2715 * @nbuf: pkt skb pointer 2716 * 2717 * Return: true - raw frame, dropped 2718 * false - not raw frame, do nothing 2719 */ 2720 bool dp_rx_is_raw_frame_dropped(qdf_nbuf_t nbuf) 2721 { 2722 if (qdf_nbuf_is_raw_frame(nbuf)) { 2723 dp_rx_nbuf_free(nbuf); 2724 return true; 2725 } 2726 2727 return false; 2728 } 2729 #endif 2730 2731 #ifdef WLAN_FEATURE_DP_RX_RING_HISTORY 2732 /** 2733 * dp_rx_ring_record_entry() - Record an entry into the rx ring history. 2734 * @soc: Datapath soc structure 2735 * @ring_num: REO ring number 2736 * @ring_desc: REO ring descriptor 2737 * 2738 * Returns: None 2739 */ 2740 void 2741 dp_rx_ring_record_entry(struct dp_soc *soc, uint8_t ring_num, 2742 hal_ring_desc_t ring_desc) 2743 { 2744 struct dp_buf_info_record *record; 2745 struct hal_buf_info hbi; 2746 uint32_t idx; 2747 2748 if (qdf_unlikely(!soc->rx_ring_history[ring_num])) 2749 return; 2750 2751 hal_rx_reo_buf_paddr_get(soc->hal_soc, ring_desc, &hbi); 2752 2753 /* buffer_addr_info is the first element of ring_desc */ 2754 hal_rx_buf_cookie_rbm_get(soc->hal_soc, (uint32_t *)ring_desc, 2755 &hbi); 2756 2757 idx = dp_history_get_next_index(&soc->rx_ring_history[ring_num]->index, 2758 DP_RX_HIST_MAX); 2759 2760 /* No NULL check needed for record since its an array */ 2761 record = &soc->rx_ring_history[ring_num]->entry[idx]; 2762 2763 record->timestamp = qdf_get_log_timestamp(); 2764 record->hbi.paddr = hbi.paddr; 2765 record->hbi.sw_cookie = hbi.sw_cookie; 2766 record->hbi.rbm = hbi.rbm; 2767 } 2768 #endif 2769 2770 #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR 2771 /** 2772 * dp_rx_update_stats() - Update soc level rx packet count 2773 * @soc: DP soc handle 2774 * @nbuf: nbuf received 2775 * 2776 * Returns: none 2777 */ 2778 void dp_rx_update_stats(struct dp_soc *soc, qdf_nbuf_t nbuf) 2779 { 2780 DP_STATS_INC_PKT(soc, rx.ingress, 1, 2781 QDF_NBUF_CB_RX_PKT_LEN(nbuf)); 2782 } 2783 #endif 2784 2785 #ifdef WLAN_FEATURE_PKT_CAPTURE_V2 2786 /** 2787 * dp_rx_deliver_to_pkt_capture() - deliver rx packet to packet capture 2788 * @soc : dp_soc handle 2789 * @pdev: dp_pdev handle 2790 * @peer_id: peer_id of the peer for which completion came 2791 * @ppdu_id: ppdu_id 2792 * @netbuf: Buffer pointer 2793 * 2794 * This function is used to deliver rx packet to packet capture 2795 */ 2796 void dp_rx_deliver_to_pkt_capture(struct dp_soc *soc, struct dp_pdev *pdev, 2797 uint16_t peer_id, uint32_t is_offload, 2798 qdf_nbuf_t netbuf) 2799 { 2800 if (wlan_cfg_get_pkt_capture_mode(soc->wlan_cfg_ctx)) 2801 dp_wdi_event_handler(WDI_EVENT_PKT_CAPTURE_RX_DATA, soc, netbuf, 2802 peer_id, is_offload, pdev->pdev_id); 2803 } 2804 2805 void dp_rx_deliver_to_pkt_capture_no_peer(struct dp_soc *soc, qdf_nbuf_t nbuf, 2806 uint32_t is_offload) 2807 { 2808 if (wlan_cfg_get_pkt_capture_mode(soc->wlan_cfg_ctx)) 2809 dp_wdi_event_handler(WDI_EVENT_PKT_CAPTURE_RX_DATA_NO_PEER, 2810 soc, nbuf, HTT_INVALID_VDEV, 2811 is_offload, 0); 2812 } 2813 #endif 2814 2815 #endif /* QCA_HOST_MODE_WIFI_DISABLED */ 2816 2817 QDF_STATUS dp_rx_vdev_detach(struct dp_vdev *vdev) 2818 { 2819 QDF_STATUS ret; 2820 2821 if (vdev->osif_rx_flush) { 2822 ret = vdev->osif_rx_flush(vdev->osif_vdev, vdev->vdev_id); 2823 if (!QDF_IS_STATUS_SUCCESS(ret)) { 2824 dp_err("Failed to flush rx pkts for vdev %d\n", 2825 vdev->vdev_id); 2826 return ret; 2827 } 2828 } 2829 2830 return QDF_STATUS_SUCCESS; 2831 } 2832 2833 static QDF_STATUS 2834 dp_pdev_nbuf_alloc_and_map(struct dp_soc *dp_soc, 2835 struct dp_rx_nbuf_frag_info *nbuf_frag_info_t, 2836 struct dp_pdev *dp_pdev, 2837 struct rx_desc_pool *rx_desc_pool) 2838 { 2839 QDF_STATUS ret = QDF_STATUS_E_FAILURE; 2840 2841 (nbuf_frag_info_t->virt_addr).nbuf = 2842 qdf_nbuf_alloc(dp_soc->osdev, rx_desc_pool->buf_size, 2843 RX_BUFFER_RESERVATION, 2844 rx_desc_pool->buf_alignment, FALSE); 2845 if (!((nbuf_frag_info_t->virt_addr).nbuf)) { 2846 dp_err("nbuf alloc failed"); 2847 DP_STATS_INC(dp_pdev, replenish.nbuf_alloc_fail, 1); 2848 return ret; 2849 } 2850 2851 ret = qdf_nbuf_map_nbytes_single(dp_soc->osdev, 2852 (nbuf_frag_info_t->virt_addr).nbuf, 2853 QDF_DMA_FROM_DEVICE, 2854 rx_desc_pool->buf_size); 2855 2856 if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) { 2857 qdf_nbuf_free((nbuf_frag_info_t->virt_addr).nbuf); 2858 dp_err("nbuf map failed"); 2859 DP_STATS_INC(dp_pdev, replenish.map_err, 1); 2860 return ret; 2861 } 2862 2863 nbuf_frag_info_t->paddr = 2864 qdf_nbuf_get_frag_paddr((nbuf_frag_info_t->virt_addr).nbuf, 0); 2865 2866 ret = dp_check_paddr(dp_soc, &((nbuf_frag_info_t->virt_addr).nbuf), 2867 &nbuf_frag_info_t->paddr, 2868 rx_desc_pool); 2869 if (ret == QDF_STATUS_E_FAILURE) { 2870 dp_err("nbuf check x86 failed"); 2871 DP_STATS_INC(dp_pdev, replenish.x86_fail, 1); 2872 return ret; 2873 } 2874 2875 return QDF_STATUS_SUCCESS; 2876 } 2877 2878 QDF_STATUS 2879 dp_pdev_rx_buffers_attach(struct dp_soc *dp_soc, uint32_t mac_id, 2880 struct dp_srng *dp_rxdma_srng, 2881 struct rx_desc_pool *rx_desc_pool, 2882 uint32_t num_req_buffers) 2883 { 2884 struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(dp_soc, mac_id); 2885 hal_ring_handle_t rxdma_srng = dp_rxdma_srng->hal_srng; 2886 union dp_rx_desc_list_elem_t *next; 2887 void *rxdma_ring_entry; 2888 qdf_dma_addr_t paddr; 2889 struct dp_rx_nbuf_frag_info *nf_info; 2890 uint32_t nr_descs, nr_nbuf = 0, nr_nbuf_total = 0; 2891 uint32_t buffer_index, nbuf_ptrs_per_page; 2892 qdf_nbuf_t nbuf; 2893 QDF_STATUS ret; 2894 int page_idx, total_pages; 2895 union dp_rx_desc_list_elem_t *desc_list = NULL; 2896 union dp_rx_desc_list_elem_t *tail = NULL; 2897 int sync_hw_ptr = 1; 2898 uint32_t num_entries_avail; 2899 2900 if (qdf_unlikely(!dp_pdev)) { 2901 dp_rx_err("%pK: pdev is null for mac_id = %d", 2902 dp_soc, mac_id); 2903 return QDF_STATUS_E_FAILURE; 2904 } 2905 2906 if (qdf_unlikely(!rxdma_srng)) { 2907 DP_STATS_INC(dp_pdev, replenish.rxdma_err, num_req_buffers); 2908 return QDF_STATUS_E_FAILURE; 2909 } 2910 2911 dp_debug("requested %u RX buffers for driver attach", num_req_buffers); 2912 2913 hal_srng_access_start(dp_soc->hal_soc, rxdma_srng); 2914 num_entries_avail = hal_srng_src_num_avail(dp_soc->hal_soc, 2915 rxdma_srng, 2916 sync_hw_ptr); 2917 hal_srng_access_end(dp_soc->hal_soc, rxdma_srng); 2918 2919 if (!num_entries_avail) { 2920 dp_err("Num of available entries is zero, nothing to do"); 2921 return QDF_STATUS_E_NOMEM; 2922 } 2923 2924 if (num_entries_avail < num_req_buffers) 2925 num_req_buffers = num_entries_avail; 2926 2927 nr_descs = dp_rx_get_free_desc_list(dp_soc, mac_id, rx_desc_pool, 2928 num_req_buffers, &desc_list, &tail); 2929 if (!nr_descs) { 2930 dp_err("no free rx_descs in freelist"); 2931 DP_STATS_INC(dp_pdev, err.desc_alloc_fail, num_req_buffers); 2932 return QDF_STATUS_E_NOMEM; 2933 } 2934 2935 dp_debug("got %u RX descs for driver attach", nr_descs); 2936 2937 /* 2938 * Try to allocate pointers to the nbuf one page at a time. 2939 * Take pointers that can fit in one page of memory and 2940 * iterate through the total descriptors that need to be 2941 * allocated in order of pages. Reuse the pointers that 2942 * have been allocated to fit in one page across each 2943 * iteration to index into the nbuf. 2944 */ 2945 total_pages = (nr_descs * sizeof(*nf_info)) / DP_BLOCKMEM_SIZE; 2946 2947 /* 2948 * Add an extra page to store the remainder if any 2949 */ 2950 if ((nr_descs * sizeof(*nf_info)) % DP_BLOCKMEM_SIZE) 2951 total_pages++; 2952 nf_info = qdf_mem_malloc(DP_BLOCKMEM_SIZE); 2953 if (!nf_info) { 2954 dp_err("failed to allocate nbuf array"); 2955 DP_STATS_INC(dp_pdev, replenish.rxdma_err, num_req_buffers); 2956 QDF_BUG(0); 2957 return QDF_STATUS_E_NOMEM; 2958 } 2959 nbuf_ptrs_per_page = DP_BLOCKMEM_SIZE / sizeof(*nf_info); 2960 2961 for (page_idx = 0; page_idx < total_pages; page_idx++) { 2962 qdf_mem_zero(nf_info, DP_BLOCKMEM_SIZE); 2963 2964 for (nr_nbuf = 0; nr_nbuf < nbuf_ptrs_per_page; nr_nbuf++) { 2965 /* 2966 * The last page of buffer pointers may not be required 2967 * completely based on the number of descriptors. Below 2968 * check will ensure we are allocating only the 2969 * required number of descriptors. 2970 */ 2971 if (nr_nbuf_total >= nr_descs) 2972 break; 2973 /* Flag is set while pdev rx_desc_pool initialization */ 2974 if (qdf_unlikely(rx_desc_pool->rx_mon_dest_frag_enable)) 2975 ret = dp_pdev_frag_alloc_and_map(dp_soc, 2976 &nf_info[nr_nbuf], dp_pdev, 2977 rx_desc_pool); 2978 else 2979 ret = dp_pdev_nbuf_alloc_and_map(dp_soc, 2980 &nf_info[nr_nbuf], dp_pdev, 2981 rx_desc_pool); 2982 if (QDF_IS_STATUS_ERROR(ret)) 2983 break; 2984 2985 nr_nbuf_total++; 2986 } 2987 2988 hal_srng_access_start(dp_soc->hal_soc, rxdma_srng); 2989 2990 for (buffer_index = 0; buffer_index < nr_nbuf; buffer_index++) { 2991 rxdma_ring_entry = 2992 hal_srng_src_get_next(dp_soc->hal_soc, 2993 rxdma_srng); 2994 qdf_assert_always(rxdma_ring_entry); 2995 2996 next = desc_list->next; 2997 paddr = nf_info[buffer_index].paddr; 2998 nbuf = nf_info[buffer_index].virt_addr.nbuf; 2999 3000 /* Flag is set while pdev rx_desc_pool initialization */ 3001 if (qdf_unlikely(rx_desc_pool->rx_mon_dest_frag_enable)) 3002 dp_rx_desc_frag_prep(&desc_list->rx_desc, 3003 &nf_info[buffer_index]); 3004 else 3005 dp_rx_desc_prep(&desc_list->rx_desc, 3006 &nf_info[buffer_index]); 3007 desc_list->rx_desc.in_use = 1; 3008 dp_rx_desc_alloc_dbg_info(&desc_list->rx_desc); 3009 dp_rx_desc_update_dbg_info(&desc_list->rx_desc, 3010 __func__, 3011 RX_DESC_REPLENISHED); 3012 3013 hal_rxdma_buff_addr_info_set(dp_soc->hal_soc ,rxdma_ring_entry, paddr, 3014 desc_list->rx_desc.cookie, 3015 rx_desc_pool->owner); 3016 3017 dp_ipa_handle_rx_buf_smmu_mapping( 3018 dp_soc, nbuf, 3019 rx_desc_pool->buf_size, true, 3020 __func__, __LINE__); 3021 3022 desc_list = next; 3023 } 3024 3025 dp_rx_refill_ring_record_entry(dp_soc, dp_pdev->lmac_id, 3026 rxdma_srng, nr_nbuf, nr_nbuf); 3027 hal_srng_access_end(dp_soc->hal_soc, rxdma_srng); 3028 } 3029 3030 dp_info("filled %u RX buffers for driver attach", nr_nbuf_total); 3031 qdf_mem_free(nf_info); 3032 3033 if (!nr_nbuf_total) { 3034 dp_err("No nbuf's allocated"); 3035 QDF_BUG(0); 3036 return QDF_STATUS_E_RESOURCES; 3037 } 3038 3039 /* No need to count the number of bytes received during replenish. 3040 * Therefore set replenish.pkts.bytes as 0. 3041 */ 3042 DP_STATS_INC_PKT(dp_pdev, replenish.pkts, nr_nbuf, 0); 3043 3044 return QDF_STATUS_SUCCESS; 3045 } 3046 3047 qdf_export_symbol(dp_pdev_rx_buffers_attach); 3048 3049 /** 3050 * dp_rx_enable_mon_dest_frag() - Enable frag processing for 3051 * monitor destination ring via frag. 3052 * 3053 * Enable this flag only for monitor destination buffer processing 3054 * if DP_RX_MON_MEM_FRAG feature is enabled. 3055 * If flag is set then frag based function will be called for alloc, 3056 * map, prep desc and free ops for desc buffer else normal nbuf based 3057 * function will be called. 3058 * 3059 * @rx_desc_pool: Rx desc pool 3060 * @is_mon_dest_desc: Is it for monitor dest buffer 3061 * 3062 * Return: None 3063 */ 3064 #ifdef DP_RX_MON_MEM_FRAG 3065 void dp_rx_enable_mon_dest_frag(struct rx_desc_pool *rx_desc_pool, 3066 bool is_mon_dest_desc) 3067 { 3068 rx_desc_pool->rx_mon_dest_frag_enable = is_mon_dest_desc; 3069 if (is_mon_dest_desc) 3070 dp_alert("Feature DP_RX_MON_MEM_FRAG for mon_dest is enabled"); 3071 } 3072 #else 3073 void dp_rx_enable_mon_dest_frag(struct rx_desc_pool *rx_desc_pool, 3074 bool is_mon_dest_desc) 3075 { 3076 rx_desc_pool->rx_mon_dest_frag_enable = false; 3077 if (is_mon_dest_desc) 3078 dp_alert("Feature DP_RX_MON_MEM_FRAG for mon_dest is disabled"); 3079 } 3080 #endif 3081 3082 qdf_export_symbol(dp_rx_enable_mon_dest_frag); 3083 3084 /* 3085 * dp_rx_pdev_desc_pool_alloc() - allocate memory for software rx descriptor 3086 * pool 3087 * 3088 * @pdev: core txrx pdev context 3089 * 3090 * Return: QDF_STATUS - QDF_STATUS_SUCCESS 3091 * QDF_STATUS_E_NOMEM 3092 */ 3093 QDF_STATUS 3094 dp_rx_pdev_desc_pool_alloc(struct dp_pdev *pdev) 3095 { 3096 struct dp_soc *soc = pdev->soc; 3097 uint32_t rxdma_entries; 3098 uint32_t rx_sw_desc_num; 3099 struct dp_srng *dp_rxdma_srng; 3100 struct rx_desc_pool *rx_desc_pool; 3101 uint32_t status = QDF_STATUS_SUCCESS; 3102 int mac_for_pdev; 3103 3104 mac_for_pdev = pdev->lmac_id; 3105 if (wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx)) { 3106 dp_rx_info("%pK: nss-wifi<4> skip Rx refil %d", 3107 soc, mac_for_pdev); 3108 return status; 3109 } 3110 3111 dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_for_pdev]; 3112 rxdma_entries = dp_rxdma_srng->num_entries; 3113 3114 rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev]; 3115 rx_sw_desc_num = wlan_cfg_get_dp_soc_rx_sw_desc_num(soc->wlan_cfg_ctx); 3116 3117 rx_desc_pool->desc_type = DP_RX_DESC_BUF_TYPE; 3118 status = dp_rx_desc_pool_alloc(soc, 3119 rx_sw_desc_num, 3120 rx_desc_pool); 3121 if (status != QDF_STATUS_SUCCESS) 3122 return status; 3123 3124 return status; 3125 } 3126 3127 /* 3128 * dp_rx_pdev_desc_pool_free() - free software rx descriptor pool 3129 * 3130 * @pdev: core txrx pdev context 3131 */ 3132 void dp_rx_pdev_desc_pool_free(struct dp_pdev *pdev) 3133 { 3134 int mac_for_pdev = pdev->lmac_id; 3135 struct dp_soc *soc = pdev->soc; 3136 struct rx_desc_pool *rx_desc_pool; 3137 3138 rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev]; 3139 3140 dp_rx_desc_pool_free(soc, rx_desc_pool); 3141 } 3142 3143 /* 3144 * dp_rx_pdev_desc_pool_init() - initialize software rx descriptors 3145 * 3146 * @pdev: core txrx pdev context 3147 * 3148 * Return: QDF_STATUS - QDF_STATUS_SUCCESS 3149 * QDF_STATUS_E_NOMEM 3150 */ 3151 QDF_STATUS dp_rx_pdev_desc_pool_init(struct dp_pdev *pdev) 3152 { 3153 int mac_for_pdev = pdev->lmac_id; 3154 struct dp_soc *soc = pdev->soc; 3155 uint32_t rxdma_entries; 3156 uint32_t rx_sw_desc_num; 3157 struct dp_srng *dp_rxdma_srng; 3158 struct rx_desc_pool *rx_desc_pool; 3159 3160 rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev]; 3161 if (wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx)) { 3162 /** 3163 * If NSS is enabled, rx_desc_pool is already filled. 3164 * Hence, just disable desc_pool frag flag. 3165 */ 3166 dp_rx_enable_mon_dest_frag(rx_desc_pool, false); 3167 3168 dp_rx_info("%pK: nss-wifi<4> skip Rx refil %d", 3169 soc, mac_for_pdev); 3170 return QDF_STATUS_SUCCESS; 3171 } 3172 3173 if (dp_rx_desc_pool_is_allocated(rx_desc_pool) == QDF_STATUS_E_NOMEM) 3174 return QDF_STATUS_E_NOMEM; 3175 3176 dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_for_pdev]; 3177 rxdma_entries = dp_rxdma_srng->num_entries; 3178 3179 soc->process_rx_status = CONFIG_PROCESS_RX_STATUS; 3180 3181 rx_sw_desc_num = 3182 wlan_cfg_get_dp_soc_rx_sw_desc_num(soc->wlan_cfg_ctx); 3183 3184 rx_desc_pool->owner = dp_rx_get_rx_bm_id(soc); 3185 rx_desc_pool->buf_size = RX_DATA_BUFFER_SIZE; 3186 rx_desc_pool->buf_alignment = RX_DATA_BUFFER_ALIGNMENT; 3187 /* Disable monitor dest processing via frag */ 3188 dp_rx_enable_mon_dest_frag(rx_desc_pool, false); 3189 3190 dp_rx_desc_pool_init(soc, mac_for_pdev, 3191 rx_sw_desc_num, rx_desc_pool); 3192 return QDF_STATUS_SUCCESS; 3193 } 3194 3195 /* 3196 * dp_rx_pdev_desc_pool_deinit() - de-initialize software rx descriptor pools 3197 * @pdev: core txrx pdev context 3198 * 3199 * This function resets the freelist of rx descriptors and destroys locks 3200 * associated with this list of descriptors. 3201 */ 3202 void dp_rx_pdev_desc_pool_deinit(struct dp_pdev *pdev) 3203 { 3204 int mac_for_pdev = pdev->lmac_id; 3205 struct dp_soc *soc = pdev->soc; 3206 struct rx_desc_pool *rx_desc_pool; 3207 3208 rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev]; 3209 3210 dp_rx_desc_pool_deinit(soc, rx_desc_pool, mac_for_pdev); 3211 } 3212 3213 /* 3214 * dp_rx_pdev_buffers_alloc() - Allocate nbufs (skbs) and replenish RxDMA ring 3215 * 3216 * @pdev: core txrx pdev context 3217 * 3218 * Return: QDF_STATUS - QDF_STATUS_SUCCESS 3219 * QDF_STATUS_E_NOMEM 3220 */ 3221 QDF_STATUS 3222 dp_rx_pdev_buffers_alloc(struct dp_pdev *pdev) 3223 { 3224 int mac_for_pdev = pdev->lmac_id; 3225 struct dp_soc *soc = pdev->soc; 3226 struct dp_srng *dp_rxdma_srng; 3227 struct rx_desc_pool *rx_desc_pool; 3228 uint32_t rxdma_entries; 3229 3230 dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_for_pdev]; 3231 rxdma_entries = dp_rxdma_srng->num_entries; 3232 3233 rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev]; 3234 3235 /* Initialize RX buffer pool which will be 3236 * used during low memory conditions 3237 */ 3238 dp_rx_buffer_pool_init(soc, mac_for_pdev); 3239 3240 return dp_pdev_rx_buffers_attach_simple(soc, mac_for_pdev, 3241 dp_rxdma_srng, 3242 rx_desc_pool, 3243 rxdma_entries - 1); 3244 } 3245 3246 /* 3247 * dp_rx_pdev_buffers_free - Free nbufs (skbs) 3248 * 3249 * @pdev: core txrx pdev context 3250 */ 3251 void 3252 dp_rx_pdev_buffers_free(struct dp_pdev *pdev) 3253 { 3254 int mac_for_pdev = pdev->lmac_id; 3255 struct dp_soc *soc = pdev->soc; 3256 struct rx_desc_pool *rx_desc_pool; 3257 3258 rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev]; 3259 3260 dp_rx_desc_nbuf_free(soc, rx_desc_pool); 3261 dp_rx_buffer_pool_deinit(soc, mac_for_pdev); 3262 } 3263 3264 #ifdef DP_RX_SPECIAL_FRAME_NEED 3265 bool dp_rx_deliver_special_frame(struct dp_soc *soc, 3266 struct dp_txrx_peer *txrx_peer, 3267 qdf_nbuf_t nbuf, uint32_t frame_mask, 3268 uint8_t *rx_tlv_hdr) 3269 { 3270 uint32_t l2_hdr_offset = 0; 3271 uint16_t msdu_len = 0; 3272 uint32_t skip_len; 3273 3274 l2_hdr_offset = 3275 hal_rx_msdu_end_l3_hdr_padding_get(soc->hal_soc, rx_tlv_hdr); 3276 3277 if (qdf_unlikely(qdf_nbuf_is_frag(nbuf))) { 3278 skip_len = l2_hdr_offset; 3279 } else { 3280 msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf); 3281 skip_len = l2_hdr_offset + soc->rx_pkt_tlv_size; 3282 qdf_nbuf_set_pktlen(nbuf, msdu_len + skip_len); 3283 } 3284 3285 QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(nbuf) = 1; 3286 dp_rx_set_hdr_pad(nbuf, l2_hdr_offset); 3287 qdf_nbuf_pull_head(nbuf, skip_len); 3288 3289 if (txrx_peer->vdev) { 3290 dp_rx_send_pktlog(soc, txrx_peer->vdev->pdev, nbuf, 3291 QDF_TX_RX_STATUS_OK); 3292 } 3293 3294 if (dp_rx_is_special_frame(nbuf, frame_mask)) { 3295 dp_info("special frame, mpdu sn 0x%x", 3296 hal_rx_get_rx_sequence(soc->hal_soc, rx_tlv_hdr)); 3297 qdf_nbuf_set_exc_frame(nbuf, 1); 3298 dp_rx_deliver_to_stack(soc, txrx_peer->vdev, txrx_peer, 3299 nbuf, NULL); 3300 return true; 3301 } 3302 3303 return false; 3304 } 3305 #endif 3306 3307 #ifdef WLAN_FEATURE_MARK_FIRST_WAKEUP_PACKET 3308 void dp_rx_mark_first_packet_after_wow_wakeup(struct dp_pdev *pdev, 3309 uint8_t *rx_tlv, 3310 qdf_nbuf_t nbuf) 3311 { 3312 struct dp_soc *soc; 3313 3314 if (!pdev->is_first_wakeup_packet) 3315 return; 3316 3317 soc = pdev->soc; 3318 if (hal_get_first_wow_wakeup_packet(soc->hal_soc, rx_tlv)) { 3319 qdf_nbuf_mark_wakeup_frame(nbuf); 3320 dp_info("First packet after WOW Wakeup rcvd"); 3321 } 3322 } 3323 #endif 3324