1 /* 2 * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved. 3 * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved. 4 * 5 * Permission to use, copy, modify, and/or distribute this software for 6 * any purpose with or without fee is hereby granted, provided that the 7 * above copyright notice and this permission notice appear in all 8 * copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 11 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 12 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 13 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 14 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 15 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 16 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 17 * PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20 #include "hal_hw_headers.h" 21 #include "dp_types.h" 22 #include "dp_rx.h" 23 #include "dp_tx.h" 24 #include "dp_peer.h" 25 #include "hal_rx.h" 26 #include "hal_api.h" 27 #include "qdf_nbuf.h" 28 #ifdef MESH_MODE_SUPPORT 29 #include "if_meta_hdr.h" 30 #endif 31 #include "dp_internal.h" 32 #include "dp_ipa.h" 33 #include "dp_hist.h" 34 #include "dp_rx_buffer_pool.h" 35 #ifdef WIFI_MONITOR_SUPPORT 36 #include "dp_htt.h" 37 #include <dp_mon.h> 38 #endif 39 #ifdef FEATURE_WDS 40 #include "dp_txrx_wds.h" 41 #endif 42 #ifdef DP_RATETABLE_SUPPORT 43 #include "dp_ratetable.h" 44 #endif 45 #include "enet.h" 46 47 #ifndef WLAN_SOFTUMAC_SUPPORT /* WLAN_SOFTUMAC_SUPPORT */ 48 49 #ifdef DUP_RX_DESC_WAR 50 void dp_rx_dump_info_and_assert(struct dp_soc *soc, 51 hal_ring_handle_t hal_ring, 52 hal_ring_desc_t ring_desc, 53 struct dp_rx_desc *rx_desc) 54 { 55 void *hal_soc = soc->hal_soc; 56 57 hal_srng_dump_ring_desc(hal_soc, hal_ring, ring_desc); 58 dp_rx_desc_dump(rx_desc); 59 } 60 #else 61 void dp_rx_dump_info_and_assert(struct dp_soc *soc, 62 hal_ring_handle_t hal_ring_hdl, 63 hal_ring_desc_t ring_desc, 64 struct dp_rx_desc *rx_desc) 65 { 66 hal_soc_handle_t hal_soc = soc->hal_soc; 67 68 dp_rx_desc_dump(rx_desc); 69 hal_srng_dump_ring_desc(hal_soc, hal_ring_hdl, ring_desc); 70 hal_srng_dump_ring(hal_soc, hal_ring_hdl); 71 qdf_assert_always(0); 72 } 73 #endif 74 75 #ifndef QCA_HOST_MODE_WIFI_DISABLED 76 #ifdef RX_DESC_SANITY_WAR 77 QDF_STATUS dp_rx_desc_sanity(struct dp_soc *soc, hal_soc_handle_t hal_soc, 78 hal_ring_handle_t hal_ring_hdl, 79 hal_ring_desc_t ring_desc, 80 struct dp_rx_desc *rx_desc) 81 { 82 uint8_t return_buffer_manager; 83 84 if (qdf_unlikely(!rx_desc)) { 85 /* 86 * This is an unlikely case where the cookie obtained 87 * from the ring_desc is invalid and hence we are not 88 * able to find the corresponding rx_desc 89 */ 90 goto fail; 91 } 92 93 return_buffer_manager = hal_rx_ret_buf_manager_get(hal_soc, ring_desc); 94 if (qdf_unlikely(!(return_buffer_manager == 95 HAL_RX_BUF_RBM_SW1_BM(soc->wbm_sw0_bm_id) || 96 return_buffer_manager == 97 HAL_RX_BUF_RBM_SW3_BM(soc->wbm_sw0_bm_id)))) { 98 goto fail; 99 } 100 101 return QDF_STATUS_SUCCESS; 102 103 fail: 104 DP_STATS_INC(soc, rx.err.invalid_cookie, 1); 105 dp_err("Ring Desc:"); 106 hal_srng_dump_ring_desc(hal_soc, hal_ring_hdl, 107 ring_desc); 108 return QDF_STATUS_E_NULL_VALUE; 109 110 } 111 #endif 112 113 uint32_t dp_rx_srng_get_num_pending(hal_soc_handle_t hal_soc, 114 hal_ring_handle_t hal_ring_hdl, 115 uint32_t num_entries, 116 bool *near_full) 117 { 118 uint32_t num_pending = 0; 119 120 num_pending = hal_srng_dst_num_valid_locked(hal_soc, 121 hal_ring_hdl, 122 true); 123 124 if (num_entries && (num_pending >= num_entries >> 1)) 125 *near_full = true; 126 else 127 *near_full = false; 128 129 return num_pending; 130 } 131 132 #ifdef RX_DESC_DEBUG_CHECK 133 QDF_STATUS dp_rx_desc_nbuf_sanity_check(struct dp_soc *soc, 134 hal_ring_desc_t ring_desc, 135 struct dp_rx_desc *rx_desc) 136 { 137 struct hal_buf_info hbi; 138 139 hal_rx_reo_buf_paddr_get(soc->hal_soc, ring_desc, &hbi); 140 /* Sanity check for possible buffer paddr corruption */ 141 if (dp_rx_desc_paddr_sanity_check(rx_desc, (&hbi)->paddr)) 142 return QDF_STATUS_SUCCESS; 143 144 return QDF_STATUS_E_FAILURE; 145 } 146 147 /** 148 * dp_rx_desc_nbuf_len_sanity_check - Add sanity check to catch Rx buffer 149 * out of bound access from H.W 150 * 151 * @soc: DP soc 152 * @pkt_len: Packet length received from H.W 153 * 154 * Return: NONE 155 */ 156 static inline void 157 dp_rx_desc_nbuf_len_sanity_check(struct dp_soc *soc, 158 uint32_t pkt_len) 159 { 160 struct rx_desc_pool *rx_desc_pool; 161 162 rx_desc_pool = &soc->rx_desc_buf[0]; 163 qdf_assert_always(pkt_len <= rx_desc_pool->buf_size); 164 } 165 #else 166 static inline void 167 dp_rx_desc_nbuf_len_sanity_check(struct dp_soc *soc, uint32_t pkt_len) { } 168 #endif 169 170 #ifdef WLAN_FEATURE_DP_RX_RING_HISTORY 171 void 172 dp_rx_ring_record_entry(struct dp_soc *soc, uint8_t ring_num, 173 hal_ring_desc_t ring_desc) 174 { 175 struct dp_buf_info_record *record; 176 struct hal_buf_info hbi; 177 uint32_t idx; 178 179 if (qdf_unlikely(!soc->rx_ring_history[ring_num])) 180 return; 181 182 hal_rx_reo_buf_paddr_get(soc->hal_soc, ring_desc, &hbi); 183 184 /* buffer_addr_info is the first element of ring_desc */ 185 hal_rx_buf_cookie_rbm_get(soc->hal_soc, (uint32_t *)ring_desc, 186 &hbi); 187 188 idx = dp_history_get_next_index(&soc->rx_ring_history[ring_num]->index, 189 DP_RX_HIST_MAX); 190 191 /* No NULL check needed for record since its an array */ 192 record = &soc->rx_ring_history[ring_num]->entry[idx]; 193 194 record->timestamp = qdf_get_log_timestamp(); 195 record->hbi.paddr = hbi.paddr; 196 record->hbi.sw_cookie = hbi.sw_cookie; 197 record->hbi.rbm = hbi.rbm; 198 } 199 #endif 200 201 #ifdef WLAN_FEATURE_MARK_FIRST_WAKEUP_PACKET 202 void dp_rx_mark_first_packet_after_wow_wakeup(struct dp_pdev *pdev, 203 uint8_t *rx_tlv, 204 qdf_nbuf_t nbuf) 205 { 206 struct dp_soc *soc; 207 208 if (!pdev->is_first_wakeup_packet) 209 return; 210 211 soc = pdev->soc; 212 if (hal_get_first_wow_wakeup_packet(soc->hal_soc, rx_tlv)) { 213 qdf_nbuf_mark_wakeup_frame(nbuf); 214 dp_info("First packet after WOW Wakeup rcvd"); 215 } 216 } 217 #endif 218 219 #endif /* QCA_HOST_MODE_WIFI_DISABLED */ 220 #endif /* WLAN_SOFTUMAC_SUPPORT */ 221 222 /** 223 * dp_pdev_frag_alloc_and_map() - Allocate frag for desc buffer and map 224 * 225 * @dp_soc: struct dp_soc * 226 * @nbuf_frag_info_t: nbuf frag info 227 * @dp_pdev: struct dp_pdev * 228 * @rx_desc_pool: Rx desc pool 229 * 230 * Return: QDF_STATUS 231 */ 232 #ifdef DP_RX_MON_MEM_FRAG 233 static inline QDF_STATUS 234 dp_pdev_frag_alloc_and_map(struct dp_soc *dp_soc, 235 struct dp_rx_nbuf_frag_info *nbuf_frag_info_t, 236 struct dp_pdev *dp_pdev, 237 struct rx_desc_pool *rx_desc_pool) 238 { 239 QDF_STATUS ret = QDF_STATUS_E_FAILURE; 240 241 (nbuf_frag_info_t->virt_addr).vaddr = 242 qdf_frag_alloc(&rx_desc_pool->pf_cache, rx_desc_pool->buf_size); 243 244 if (!((nbuf_frag_info_t->virt_addr).vaddr)) { 245 dp_err("Frag alloc failed"); 246 DP_STATS_INC(dp_pdev, replenish.frag_alloc_fail, 1); 247 return QDF_STATUS_E_NOMEM; 248 } 249 250 ret = qdf_mem_map_page(dp_soc->osdev, 251 (nbuf_frag_info_t->virt_addr).vaddr, 252 QDF_DMA_FROM_DEVICE, 253 rx_desc_pool->buf_size, 254 &nbuf_frag_info_t->paddr); 255 256 if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) { 257 qdf_frag_free((nbuf_frag_info_t->virt_addr).vaddr); 258 dp_err("Frag map failed"); 259 DP_STATS_INC(dp_pdev, replenish.map_err, 1); 260 return QDF_STATUS_E_FAULT; 261 } 262 263 return QDF_STATUS_SUCCESS; 264 } 265 #else 266 static inline QDF_STATUS 267 dp_pdev_frag_alloc_and_map(struct dp_soc *dp_soc, 268 struct dp_rx_nbuf_frag_info *nbuf_frag_info_t, 269 struct dp_pdev *dp_pdev, 270 struct rx_desc_pool *rx_desc_pool) 271 { 272 return QDF_STATUS_SUCCESS; 273 } 274 #endif /* DP_RX_MON_MEM_FRAG */ 275 276 #ifdef WLAN_FEATURE_DP_RX_RING_HISTORY 277 /** 278 * dp_rx_refill_ring_record_entry() - Record an entry into refill_ring history 279 * @soc: Datapath soc structure 280 * @ring_num: Refill ring number 281 * @hal_ring_hdl: 282 * @num_req: number of buffers requested for refill 283 * @num_refill: number of buffers refilled 284 * 285 * Return: None 286 */ 287 static inline void 288 dp_rx_refill_ring_record_entry(struct dp_soc *soc, uint8_t ring_num, 289 hal_ring_handle_t hal_ring_hdl, 290 uint32_t num_req, uint32_t num_refill) 291 { 292 struct dp_refill_info_record *record; 293 uint32_t idx; 294 uint32_t tp; 295 uint32_t hp; 296 297 if (qdf_unlikely(ring_num >= MAX_PDEV_CNT || 298 !soc->rx_refill_ring_history[ring_num])) 299 return; 300 301 idx = dp_history_get_next_index(&soc->rx_refill_ring_history[ring_num]->index, 302 DP_RX_REFILL_HIST_MAX); 303 304 /* No NULL check needed for record since its an array */ 305 record = &soc->rx_refill_ring_history[ring_num]->entry[idx]; 306 307 hal_get_sw_hptp(soc->hal_soc, hal_ring_hdl, &tp, &hp); 308 record->timestamp = qdf_get_log_timestamp(); 309 record->num_req = num_req; 310 record->num_refill = num_refill; 311 record->hp = hp; 312 record->tp = tp; 313 } 314 #else 315 static inline void 316 dp_rx_refill_ring_record_entry(struct dp_soc *soc, uint8_t ring_num, 317 hal_ring_handle_t hal_ring_hdl, 318 uint32_t num_req, uint32_t num_refill) 319 { 320 } 321 #endif 322 323 /** 324 * dp_pdev_nbuf_alloc_and_map_replenish() - Allocate nbuf for desc buffer and 325 * map 326 * @dp_soc: struct dp_soc * 327 * @mac_id: Mac id 328 * @num_entries_avail: num_entries_avail 329 * @nbuf_frag_info_t: nbuf frag info 330 * @dp_pdev: struct dp_pdev * 331 * @rx_desc_pool: Rx desc pool 332 * 333 * Return: QDF_STATUS 334 */ 335 static inline QDF_STATUS 336 dp_pdev_nbuf_alloc_and_map_replenish(struct dp_soc *dp_soc, 337 uint32_t mac_id, 338 uint32_t num_entries_avail, 339 struct dp_rx_nbuf_frag_info *nbuf_frag_info_t, 340 struct dp_pdev *dp_pdev, 341 struct rx_desc_pool *rx_desc_pool) 342 { 343 QDF_STATUS ret = QDF_STATUS_E_FAILURE; 344 345 (nbuf_frag_info_t->virt_addr).nbuf = 346 dp_rx_buffer_pool_nbuf_alloc(dp_soc, 347 mac_id, 348 rx_desc_pool, 349 num_entries_avail); 350 if (!((nbuf_frag_info_t->virt_addr).nbuf)) { 351 dp_err("nbuf alloc failed"); 352 DP_STATS_INC(dp_pdev, replenish.nbuf_alloc_fail, 1); 353 return QDF_STATUS_E_NOMEM; 354 } 355 356 ret = dp_rx_buffer_pool_nbuf_map(dp_soc, rx_desc_pool, 357 nbuf_frag_info_t); 358 if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) { 359 dp_rx_buffer_pool_nbuf_free(dp_soc, 360 (nbuf_frag_info_t->virt_addr).nbuf, mac_id); 361 dp_err("nbuf map failed"); 362 DP_STATS_INC(dp_pdev, replenish.map_err, 1); 363 return QDF_STATUS_E_FAULT; 364 } 365 366 nbuf_frag_info_t->paddr = 367 qdf_nbuf_get_frag_paddr((nbuf_frag_info_t->virt_addr).nbuf, 0); 368 dp_ipa_handle_rx_buf_smmu_mapping(dp_soc, (qdf_nbuf_t)( 369 (nbuf_frag_info_t->virt_addr).nbuf), 370 rx_desc_pool->buf_size, 371 true, __func__, __LINE__); 372 373 ret = dp_check_paddr(dp_soc, &((nbuf_frag_info_t->virt_addr).nbuf), 374 &nbuf_frag_info_t->paddr, 375 rx_desc_pool); 376 if (ret == QDF_STATUS_E_FAILURE) { 377 DP_STATS_INC(dp_pdev, replenish.x86_fail, 1); 378 return QDF_STATUS_E_ADDRNOTAVAIL; 379 } 380 381 return QDF_STATUS_SUCCESS; 382 } 383 384 #if defined(QCA_DP_RX_NBUF_NO_MAP_UNMAP) && !defined(BUILD_X86) 385 QDF_STATUS 386 __dp_rx_buffers_no_map_lt_replenish(struct dp_soc *soc, uint32_t mac_id, 387 struct dp_srng *dp_rxdma_srng, 388 struct rx_desc_pool *rx_desc_pool) 389 { 390 struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id); 391 uint32_t count; 392 void *rxdma_ring_entry; 393 union dp_rx_desc_list_elem_t *next = NULL; 394 void *rxdma_srng; 395 qdf_nbuf_t nbuf; 396 qdf_dma_addr_t paddr; 397 uint16_t num_entries_avail = 0; 398 uint16_t num_alloc_desc = 0; 399 union dp_rx_desc_list_elem_t *desc_list = NULL; 400 union dp_rx_desc_list_elem_t *tail = NULL; 401 int sync_hw_ptr = 0; 402 403 rxdma_srng = dp_rxdma_srng->hal_srng; 404 405 if (qdf_unlikely(!dp_pdev)) { 406 dp_rx_err("%pK: pdev is null for mac_id = %d", soc, mac_id); 407 return QDF_STATUS_E_FAILURE; 408 } 409 410 if (qdf_unlikely(!rxdma_srng)) { 411 dp_rx_debug("%pK: rxdma srng not initialized", soc); 412 return QDF_STATUS_E_FAILURE; 413 } 414 415 hal_srng_access_start(soc->hal_soc, rxdma_srng); 416 417 num_entries_avail = hal_srng_src_num_avail(soc->hal_soc, 418 rxdma_srng, 419 sync_hw_ptr); 420 421 dp_rx_debug("%pK: no of available entries in rxdma ring: %d", 422 soc, num_entries_avail); 423 424 if (qdf_unlikely(num_entries_avail < 425 ((dp_rxdma_srng->num_entries * 3) / 4))) { 426 hal_srng_access_end(soc->hal_soc, rxdma_srng); 427 return QDF_STATUS_E_FAILURE; 428 } 429 430 DP_STATS_INC(dp_pdev, replenish.low_thresh_intrs, 1); 431 num_alloc_desc = dp_rx_get_free_desc_list(soc, mac_id, 432 rx_desc_pool, 433 num_entries_avail, 434 &desc_list, 435 &tail); 436 437 if (!num_alloc_desc) { 438 dp_rx_err("%pK: no free rx_descs in freelist", soc); 439 DP_STATS_INC(dp_pdev, err.desc_lt_alloc_fail, 440 num_entries_avail); 441 hal_srng_access_end(soc->hal_soc, rxdma_srng); 442 return QDF_STATUS_E_NOMEM; 443 } 444 445 for (count = 0; count < num_alloc_desc; count++) { 446 next = desc_list->next; 447 qdf_prefetch(next); 448 nbuf = dp_rx_nbuf_alloc(soc, rx_desc_pool); 449 if (qdf_unlikely(!nbuf)) { 450 DP_STATS_INC(dp_pdev, replenish.nbuf_alloc_fail, 1); 451 break; 452 } 453 454 paddr = dp_rx_nbuf_sync_no_dsb(soc, nbuf, 455 rx_desc_pool->buf_size); 456 457 rxdma_ring_entry = hal_srng_src_get_next(soc->hal_soc, 458 rxdma_srng); 459 qdf_assert_always(rxdma_ring_entry); 460 461 desc_list->rx_desc.nbuf = nbuf; 462 dp_rx_set_reuse_nbuf(&desc_list->rx_desc, nbuf); 463 desc_list->rx_desc.rx_buf_start = nbuf->data; 464 desc_list->rx_desc.paddr_buf_start = paddr; 465 desc_list->rx_desc.unmapped = 0; 466 467 /* rx_desc.in_use should be zero at this time*/ 468 qdf_assert_always(desc_list->rx_desc.in_use == 0); 469 470 desc_list->rx_desc.in_use = 1; 471 desc_list->rx_desc.in_err_state = 0; 472 473 hal_rxdma_buff_addr_info_set(soc->hal_soc, rxdma_ring_entry, 474 paddr, 475 desc_list->rx_desc.cookie, 476 rx_desc_pool->owner); 477 478 desc_list = next; 479 } 480 qdf_dsb(); 481 hal_srng_access_end(soc->hal_soc, rxdma_srng); 482 483 /* No need to count the number of bytes received during replenish. 484 * Therefore set replenish.pkts.bytes as 0. 485 */ 486 DP_STATS_INC_PKT(dp_pdev, replenish.pkts, count, 0); 487 DP_STATS_INC(dp_pdev, buf_freelist, (num_alloc_desc - count)); 488 /* 489 * add any available free desc back to the free list 490 */ 491 if (desc_list) 492 dp_rx_add_desc_list_to_free_list(soc, &desc_list, &tail, 493 mac_id, rx_desc_pool); 494 495 return QDF_STATUS_SUCCESS; 496 } 497 498 QDF_STATUS 499 __dp_rx_buffers_no_map_replenish(struct dp_soc *soc, uint32_t mac_id, 500 struct dp_srng *dp_rxdma_srng, 501 struct rx_desc_pool *rx_desc_pool, 502 uint32_t num_req_buffers, 503 union dp_rx_desc_list_elem_t **desc_list, 504 union dp_rx_desc_list_elem_t **tail) 505 { 506 struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id); 507 uint32_t count; 508 void *rxdma_ring_entry; 509 union dp_rx_desc_list_elem_t *next; 510 void *rxdma_srng; 511 qdf_nbuf_t nbuf; 512 qdf_nbuf_t nbuf_next; 513 qdf_nbuf_t nbuf_head = NULL; 514 qdf_nbuf_t nbuf_tail = NULL; 515 qdf_dma_addr_t paddr; 516 517 rxdma_srng = dp_rxdma_srng->hal_srng; 518 519 if (qdf_unlikely(!dp_pdev)) { 520 dp_rx_err("%pK: pdev is null for mac_id = %d", 521 soc, mac_id); 522 return QDF_STATUS_E_FAILURE; 523 } 524 525 if (qdf_unlikely(!rxdma_srng)) { 526 dp_rx_debug("%pK: rxdma srng not initialized", soc); 527 DP_STATS_INC(dp_pdev, replenish.rxdma_err, num_req_buffers); 528 return QDF_STATUS_E_FAILURE; 529 } 530 531 /* Allocate required number of nbufs */ 532 for (count = 0; count < num_req_buffers; count++) { 533 nbuf = dp_rx_nbuf_alloc(soc, rx_desc_pool); 534 if (qdf_unlikely(!nbuf)) { 535 DP_STATS_INC(dp_pdev, replenish.nbuf_alloc_fail, 1); 536 /* Update num_req_buffers to nbufs allocated count */ 537 num_req_buffers = count; 538 break; 539 } 540 541 paddr = dp_rx_nbuf_sync_no_dsb(soc, nbuf, 542 rx_desc_pool->buf_size); 543 544 QDF_NBUF_CB_PADDR(nbuf) = paddr; 545 DP_RX_LIST_APPEND(nbuf_head, 546 nbuf_tail, 547 nbuf); 548 } 549 qdf_dsb(); 550 551 nbuf = nbuf_head; 552 hal_srng_access_start(soc->hal_soc, rxdma_srng); 553 554 for (count = 0; count < num_req_buffers; count++) { 555 next = (*desc_list)->next; 556 nbuf_next = nbuf->next; 557 qdf_prefetch(next); 558 559 rxdma_ring_entry = (struct dp_buffer_addr_info *) 560 hal_srng_src_get_next(soc->hal_soc, rxdma_srng); 561 562 if (!rxdma_ring_entry) 563 break; 564 565 (*desc_list)->rx_desc.nbuf = nbuf; 566 dp_rx_set_reuse_nbuf(&(*desc_list)->rx_desc, nbuf); 567 (*desc_list)->rx_desc.rx_buf_start = nbuf->data; 568 (*desc_list)->rx_desc.paddr_buf_start = QDF_NBUF_CB_PADDR(nbuf); 569 (*desc_list)->rx_desc.unmapped = 0; 570 571 /* rx_desc.in_use should be zero at this time*/ 572 qdf_assert_always((*desc_list)->rx_desc.in_use == 0); 573 574 (*desc_list)->rx_desc.in_use = 1; 575 (*desc_list)->rx_desc.in_err_state = 0; 576 577 hal_rxdma_buff_addr_info_set(soc->hal_soc, rxdma_ring_entry, 578 QDF_NBUF_CB_PADDR(nbuf), 579 (*desc_list)->rx_desc.cookie, 580 rx_desc_pool->owner); 581 582 *desc_list = next; 583 nbuf = nbuf_next; 584 } 585 hal_srng_access_end(soc->hal_soc, rxdma_srng); 586 587 /* No need to count the number of bytes received during replenish. 588 * Therefore set replenish.pkts.bytes as 0. 589 */ 590 DP_STATS_INC_PKT(dp_pdev, replenish.pkts, count, 0); 591 DP_STATS_INC(dp_pdev, buf_freelist, (num_req_buffers - count)); 592 /* 593 * add any available free desc back to the free list 594 */ 595 if (*desc_list) 596 dp_rx_add_desc_list_to_free_list(soc, desc_list, tail, 597 mac_id, rx_desc_pool); 598 while (nbuf) { 599 nbuf_next = nbuf->next; 600 dp_rx_nbuf_unmap_pool(soc, rx_desc_pool, nbuf); 601 qdf_nbuf_free(nbuf); 602 nbuf = nbuf_next; 603 } 604 605 return QDF_STATUS_SUCCESS; 606 } 607 608 #ifdef WLAN_SUPPORT_PPEDS 609 QDF_STATUS 610 __dp_rx_comp2refill_replenish(struct dp_soc *soc, uint32_t mac_id, 611 struct dp_srng *dp_rxdma_srng, 612 struct rx_desc_pool *rx_desc_pool, 613 uint32_t num_req_buffers, 614 union dp_rx_desc_list_elem_t **desc_list, 615 union dp_rx_desc_list_elem_t **tail) 616 { 617 struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id); 618 uint32_t count; 619 void *rxdma_ring_entry; 620 union dp_rx_desc_list_elem_t *next; 621 union dp_rx_desc_list_elem_t *cur; 622 void *rxdma_srng; 623 qdf_nbuf_t nbuf; 624 625 rxdma_srng = dp_rxdma_srng->hal_srng; 626 627 if (qdf_unlikely(!dp_pdev)) { 628 dp_rx_err("%pK: pdev is null for mac_id = %d", 629 soc, mac_id); 630 return QDF_STATUS_E_FAILURE; 631 } 632 633 if (qdf_unlikely(!rxdma_srng)) { 634 dp_rx_debug("%pK: rxdma srng not initialized", soc); 635 DP_STATS_INC(dp_pdev, replenish.rxdma_err, num_req_buffers); 636 return QDF_STATUS_E_FAILURE; 637 } 638 639 hal_srng_access_start(soc->hal_soc, rxdma_srng); 640 641 for (count = 0; count < num_req_buffers; count++) { 642 next = (*desc_list)->next; 643 qdf_prefetch(next); 644 645 rxdma_ring_entry = (struct dp_buffer_addr_info *) 646 hal_srng_src_get_next(soc->hal_soc, rxdma_srng); 647 648 if (!rxdma_ring_entry) 649 break; 650 651 (*desc_list)->rx_desc.in_use = 1; 652 (*desc_list)->rx_desc.in_err_state = 0; 653 (*desc_list)->rx_desc.nbuf = (*desc_list)->rx_desc.reuse_nbuf; 654 655 hal_rxdma_buff_addr_info_set(soc->hal_soc, rxdma_ring_entry, 656 (*desc_list)->rx_desc.paddr_buf_start, 657 (*desc_list)->rx_desc.cookie, 658 rx_desc_pool->owner); 659 660 *desc_list = next; 661 } 662 hal_srng_access_end(soc->hal_soc, rxdma_srng); 663 664 /* No need to count the number of bytes received during replenish. 665 * Therefore set replenish.pkts.bytes as 0. 666 */ 667 DP_STATS_INC_PKT(dp_pdev, replenish.pkts, count, 0); 668 DP_STATS_INC(dp_pdev, buf_freelist, (num_req_buffers - count)); 669 670 /* 671 * add any available free desc back to the free list 672 */ 673 cur = *desc_list; 674 for ( ; count < num_req_buffers; count++) { 675 next = cur->next; 676 qdf_prefetch(next); 677 678 nbuf = cur->rx_desc.reuse_nbuf; 679 680 cur->rx_desc.nbuf = NULL; 681 cur->rx_desc.in_use = 0; 682 cur->rx_desc.has_reuse_nbuf = false; 683 cur->rx_desc.reuse_nbuf = NULL; 684 if (!nbuf->recycled_for_ds) 685 dp_rx_nbuf_unmap_pool(soc, rx_desc_pool, nbuf); 686 687 nbuf->recycled_for_ds = 0; 688 nbuf->fast_recycled = 0; 689 qdf_nbuf_free(nbuf); 690 cur = next; 691 } 692 693 if (*desc_list) 694 dp_rx_add_desc_list_to_free_list(soc, desc_list, tail, 695 mac_id, rx_desc_pool); 696 697 return QDF_STATUS_SUCCESS; 698 } 699 #endif 700 701 QDF_STATUS __dp_pdev_rx_buffers_no_map_attach(struct dp_soc *soc, 702 uint32_t mac_id, 703 struct dp_srng *dp_rxdma_srng, 704 struct rx_desc_pool *rx_desc_pool, 705 uint32_t num_req_buffers) 706 { 707 struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id); 708 uint32_t count; 709 uint32_t nr_descs = 0; 710 void *rxdma_ring_entry; 711 union dp_rx_desc_list_elem_t *next; 712 void *rxdma_srng; 713 qdf_nbuf_t nbuf; 714 qdf_dma_addr_t paddr; 715 union dp_rx_desc_list_elem_t *desc_list = NULL; 716 union dp_rx_desc_list_elem_t *tail = NULL; 717 718 rxdma_srng = dp_rxdma_srng->hal_srng; 719 720 if (qdf_unlikely(!dp_pdev)) { 721 dp_rx_err("%pK: pdev is null for mac_id = %d", 722 soc, mac_id); 723 return QDF_STATUS_E_FAILURE; 724 } 725 726 if (qdf_unlikely(!rxdma_srng)) { 727 dp_rx_debug("%pK: rxdma srng not initialized", soc); 728 DP_STATS_INC(dp_pdev, replenish.rxdma_err, num_req_buffers); 729 return QDF_STATUS_E_FAILURE; 730 } 731 732 dp_rx_debug("%pK: requested %d buffers for replenish", 733 soc, num_req_buffers); 734 735 nr_descs = dp_rx_get_free_desc_list(soc, mac_id, rx_desc_pool, 736 num_req_buffers, &desc_list, &tail); 737 if (!nr_descs) { 738 dp_err("no free rx_descs in freelist"); 739 DP_STATS_INC(dp_pdev, err.desc_alloc_fail, num_req_buffers); 740 return QDF_STATUS_E_NOMEM; 741 } 742 743 dp_debug("got %u RX descs for driver attach", nr_descs); 744 745 hal_srng_access_start(soc->hal_soc, rxdma_srng); 746 747 for (count = 0; count < nr_descs; count++) { 748 next = desc_list->next; 749 qdf_prefetch(next); 750 nbuf = dp_rx_nbuf_alloc(soc, rx_desc_pool); 751 if (qdf_unlikely(!nbuf)) { 752 DP_STATS_INC(dp_pdev, replenish.nbuf_alloc_fail, 1); 753 break; 754 } 755 756 paddr = dp_rx_nbuf_sync_no_dsb(soc, nbuf, 757 rx_desc_pool->buf_size); 758 rxdma_ring_entry = (struct dp_buffer_addr_info *) 759 hal_srng_src_get_next(soc->hal_soc, rxdma_srng); 760 if (!rxdma_ring_entry) { 761 qdf_nbuf_free(nbuf); 762 break; 763 } 764 765 desc_list->rx_desc.nbuf = nbuf; 766 dp_rx_set_reuse_nbuf(&desc_list->rx_desc, nbuf); 767 desc_list->rx_desc.rx_buf_start = nbuf->data; 768 desc_list->rx_desc.paddr_buf_start = paddr; 769 desc_list->rx_desc.unmapped = 0; 770 771 /* rx_desc.in_use should be zero at this time*/ 772 qdf_assert_always(desc_list->rx_desc.in_use == 0); 773 774 desc_list->rx_desc.in_use = 1; 775 desc_list->rx_desc.in_err_state = 0; 776 777 hal_rxdma_buff_addr_info_set(soc->hal_soc, rxdma_ring_entry, 778 paddr, 779 desc_list->rx_desc.cookie, 780 rx_desc_pool->owner); 781 782 desc_list = next; 783 } 784 qdf_dsb(); 785 hal_srng_access_end(soc->hal_soc, rxdma_srng); 786 787 /* No need to count the number of bytes received during replenish. 788 * Therefore set replenish.pkts.bytes as 0. 789 */ 790 DP_STATS_INC_PKT(dp_pdev, replenish.pkts, count, 0); 791 792 return QDF_STATUS_SUCCESS; 793 } 794 #endif 795 796 #ifdef DP_UMAC_HW_RESET_SUPPORT 797 #if defined(QCA_DP_RX_NBUF_NO_MAP_UNMAP) && !defined(BUILD_X86) 798 static inline 799 qdf_dma_addr_t dp_rx_rep_retrieve_paddr(struct dp_soc *dp_soc, qdf_nbuf_t nbuf, 800 uint32_t buf_size) 801 { 802 return dp_rx_nbuf_sync_no_dsb(dp_soc, nbuf, buf_size); 803 } 804 #else 805 static inline 806 qdf_dma_addr_t dp_rx_rep_retrieve_paddr(struct dp_soc *dp_soc, qdf_nbuf_t nbuf, 807 uint32_t buf_size) 808 { 809 return qdf_nbuf_get_frag_paddr(nbuf, 0); 810 } 811 #endif 812 813 /** 814 * dp_rx_desc_replenish() - Replenish the rx descriptors one at a time 815 * @soc: core txrx main context 816 * @dp_rxdma_srng: rxdma ring 817 * @rx_desc_pool: rx descriptor pool 818 * @rx_desc:rx descriptor 819 * 820 * Return: void 821 */ 822 static inline 823 void dp_rx_desc_replenish(struct dp_soc *soc, struct dp_srng *dp_rxdma_srng, 824 struct rx_desc_pool *rx_desc_pool, 825 struct dp_rx_desc *rx_desc) 826 { 827 void *rxdma_srng; 828 void *rxdma_ring_entry; 829 qdf_dma_addr_t paddr; 830 831 rxdma_srng = dp_rxdma_srng->hal_srng; 832 833 /* No one else should be accessing the srng at this point */ 834 hal_srng_access_start_unlocked(soc->hal_soc, rxdma_srng); 835 836 rxdma_ring_entry = hal_srng_src_get_next(soc->hal_soc, rxdma_srng); 837 838 qdf_assert_always(rxdma_ring_entry); 839 rx_desc->in_err_state = 0; 840 841 paddr = dp_rx_rep_retrieve_paddr(soc, rx_desc->nbuf, 842 rx_desc_pool->buf_size); 843 hal_rxdma_buff_addr_info_set(soc->hal_soc, rxdma_ring_entry, paddr, 844 rx_desc->cookie, rx_desc_pool->owner); 845 846 hal_srng_access_end_unlocked(soc->hal_soc, rxdma_srng); 847 } 848 849 void dp_rx_desc_reuse(struct dp_soc *soc, qdf_nbuf_t *nbuf_list) 850 { 851 int mac_id, i, j; 852 union dp_rx_desc_list_elem_t *head = NULL; 853 union dp_rx_desc_list_elem_t *tail = NULL; 854 855 for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) { 856 struct dp_srng *dp_rxdma_srng = 857 &soc->rx_refill_buf_ring[mac_id]; 858 struct rx_desc_pool *rx_desc_pool = &soc->rx_desc_buf[mac_id]; 859 uint32_t rx_sw_desc_num = rx_desc_pool->pool_size; 860 /* Only fill up 1/3 of the ring size */ 861 uint32_t num_req_decs; 862 863 if (!dp_rxdma_srng || !dp_rxdma_srng->hal_srng || 864 !rx_desc_pool->array) 865 continue; 866 867 num_req_decs = dp_rxdma_srng->num_entries / 3; 868 869 for (i = 0, j = 0; i < rx_sw_desc_num; i++) { 870 struct dp_rx_desc *rx_desc = 871 (struct dp_rx_desc *)&rx_desc_pool->array[i]; 872 873 if (rx_desc->in_use) { 874 if (j < (dp_rxdma_srng->num_entries - 1)) { 875 dp_rx_desc_replenish(soc, dp_rxdma_srng, 876 rx_desc_pool, 877 rx_desc); 878 } else { 879 dp_rx_nbuf_unmap(soc, rx_desc, 0); 880 rx_desc->unmapped = 0; 881 882 rx_desc->nbuf->next = *nbuf_list; 883 *nbuf_list = rx_desc->nbuf; 884 885 dp_rx_add_to_free_desc_list(&head, 886 &tail, 887 rx_desc); 888 } 889 j++; 890 } 891 } 892 893 if (head) 894 dp_rx_add_desc_list_to_free_list(soc, &head, &tail, 895 mac_id, rx_desc_pool); 896 897 /* If num of descs in use were less, then we need to replenish 898 * the ring with some buffers 899 */ 900 head = NULL; 901 tail = NULL; 902 903 if (j < (num_req_decs - 1)) 904 dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng, 905 rx_desc_pool, 906 ((num_req_decs - 1) - j), 907 &head, &tail, true); 908 } 909 } 910 #endif 911 912 QDF_STATUS __dp_rx_buffers_replenish(struct dp_soc *dp_soc, uint32_t mac_id, 913 struct dp_srng *dp_rxdma_srng, 914 struct rx_desc_pool *rx_desc_pool, 915 uint32_t num_req_buffers, 916 union dp_rx_desc_list_elem_t **desc_list, 917 union dp_rx_desc_list_elem_t **tail, 918 bool req_only, const char *func_name) 919 { 920 uint32_t num_alloc_desc; 921 uint16_t num_desc_to_free = 0; 922 struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(dp_soc, mac_id); 923 uint32_t num_entries_avail; 924 uint32_t count; 925 uint32_t extra_buffers; 926 int sync_hw_ptr = 1; 927 struct dp_rx_nbuf_frag_info nbuf_frag_info = {0}; 928 void *rxdma_ring_entry; 929 union dp_rx_desc_list_elem_t *next; 930 QDF_STATUS ret; 931 void *rxdma_srng; 932 union dp_rx_desc_list_elem_t *desc_list_append = NULL; 933 union dp_rx_desc_list_elem_t *tail_append = NULL; 934 union dp_rx_desc_list_elem_t *temp_list = NULL; 935 936 rxdma_srng = dp_rxdma_srng->hal_srng; 937 938 if (qdf_unlikely(!dp_pdev)) { 939 dp_rx_err("%pK: pdev is null for mac_id = %d", 940 dp_soc, mac_id); 941 return QDF_STATUS_E_FAILURE; 942 } 943 944 if (qdf_unlikely(!rxdma_srng)) { 945 dp_rx_debug("%pK: rxdma srng not initialized", dp_soc); 946 DP_STATS_INC(dp_pdev, replenish.rxdma_err, num_req_buffers); 947 return QDF_STATUS_E_FAILURE; 948 } 949 950 dp_verbose_debug("%pK: requested %d buffers for replenish", 951 dp_soc, num_req_buffers); 952 953 hal_srng_access_start(dp_soc->hal_soc, rxdma_srng); 954 955 num_entries_avail = hal_srng_src_num_avail(dp_soc->hal_soc, 956 rxdma_srng, 957 sync_hw_ptr); 958 959 dp_verbose_debug("%pK: no of available entries in rxdma ring: %d", 960 dp_soc, num_entries_avail); 961 962 if (!req_only && !(*desc_list) && (num_entries_avail > 963 ((dp_rxdma_srng->num_entries * 3) / 4))) { 964 num_req_buffers = num_entries_avail; 965 DP_STATS_INC(dp_pdev, replenish.low_thresh_intrs, 1); 966 } else if (num_entries_avail < num_req_buffers) { 967 num_desc_to_free = num_req_buffers - num_entries_avail; 968 num_req_buffers = num_entries_avail; 969 } else if ((*desc_list) && 970 dp_rxdma_srng->num_entries - num_entries_avail < 971 CRITICAL_BUFFER_THRESHOLD) { 972 /* set extra buffers to CRITICAL_BUFFER_THRESHOLD only if 973 * total buff requested after adding extra buffers is less 974 * than or equal to num entries available, else set it to max 975 * possible additional buffers available at that moment 976 */ 977 extra_buffers = 978 ((num_req_buffers + CRITICAL_BUFFER_THRESHOLD) > num_entries_avail) ? 979 (num_entries_avail - num_req_buffers) : 980 CRITICAL_BUFFER_THRESHOLD; 981 /* Append some free descriptors to tail */ 982 num_alloc_desc = 983 dp_rx_get_free_desc_list(dp_soc, mac_id, 984 rx_desc_pool, 985 extra_buffers, 986 &desc_list_append, 987 &tail_append); 988 989 if (num_alloc_desc) { 990 temp_list = *desc_list; 991 *desc_list = desc_list_append; 992 tail_append->next = temp_list; 993 num_req_buffers += num_alloc_desc; 994 995 DP_STATS_DEC(dp_pdev, 996 replenish.free_list, 997 num_alloc_desc); 998 } else 999 dp_err_rl("%pK: no free rx_descs in freelist", dp_soc); 1000 } 1001 1002 if (qdf_unlikely(!num_req_buffers)) { 1003 num_desc_to_free = num_req_buffers; 1004 hal_srng_access_end(dp_soc->hal_soc, rxdma_srng); 1005 goto free_descs; 1006 } 1007 1008 /* 1009 * if desc_list is NULL, allocate the descs from freelist 1010 */ 1011 if (!(*desc_list)) { 1012 num_alloc_desc = dp_rx_get_free_desc_list(dp_soc, mac_id, 1013 rx_desc_pool, 1014 num_req_buffers, 1015 desc_list, 1016 tail); 1017 1018 if (!num_alloc_desc) { 1019 dp_rx_err("%pK: no free rx_descs in freelist", dp_soc); 1020 DP_STATS_INC(dp_pdev, err.desc_alloc_fail, 1021 num_req_buffers); 1022 hal_srng_access_end(dp_soc->hal_soc, rxdma_srng); 1023 return QDF_STATUS_E_NOMEM; 1024 } 1025 1026 dp_verbose_debug("%pK: %d rx desc allocated", dp_soc, 1027 num_alloc_desc); 1028 num_req_buffers = num_alloc_desc; 1029 } 1030 1031 1032 count = 0; 1033 1034 while (count < num_req_buffers) { 1035 /* Flag is set while pdev rx_desc_pool initialization */ 1036 if (qdf_unlikely(rx_desc_pool->rx_mon_dest_frag_enable)) 1037 ret = dp_pdev_frag_alloc_and_map(dp_soc, 1038 &nbuf_frag_info, 1039 dp_pdev, 1040 rx_desc_pool); 1041 else 1042 ret = dp_pdev_nbuf_alloc_and_map_replenish(dp_soc, 1043 mac_id, 1044 num_entries_avail, &nbuf_frag_info, 1045 dp_pdev, rx_desc_pool); 1046 1047 if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) { 1048 if (qdf_unlikely(ret == QDF_STATUS_E_FAULT)) 1049 continue; 1050 break; 1051 } 1052 1053 count++; 1054 1055 rxdma_ring_entry = hal_srng_src_get_next(dp_soc->hal_soc, 1056 rxdma_srng); 1057 qdf_assert_always(rxdma_ring_entry); 1058 1059 next = (*desc_list)->next; 1060 1061 /* Flag is set while pdev rx_desc_pool initialization */ 1062 if (qdf_unlikely(rx_desc_pool->rx_mon_dest_frag_enable)) 1063 dp_rx_desc_frag_prep(&((*desc_list)->rx_desc), 1064 &nbuf_frag_info); 1065 else 1066 dp_rx_desc_prep(&((*desc_list)->rx_desc), 1067 &nbuf_frag_info); 1068 1069 /* rx_desc.in_use should be zero at this time*/ 1070 qdf_assert_always((*desc_list)->rx_desc.in_use == 0); 1071 1072 (*desc_list)->rx_desc.in_use = 1; 1073 (*desc_list)->rx_desc.in_err_state = 0; 1074 dp_rx_desc_update_dbg_info(&(*desc_list)->rx_desc, 1075 func_name, RX_DESC_REPLENISHED); 1076 dp_verbose_debug("rx_netbuf=%pK, paddr=0x%llx, cookie=%d", 1077 nbuf_frag_info.virt_addr.nbuf, 1078 (unsigned long long)(nbuf_frag_info.paddr), 1079 (*desc_list)->rx_desc.cookie); 1080 1081 hal_rxdma_buff_addr_info_set(dp_soc->hal_soc, rxdma_ring_entry, 1082 nbuf_frag_info.paddr, 1083 (*desc_list)->rx_desc.cookie, 1084 rx_desc_pool->owner); 1085 1086 *desc_list = next; 1087 1088 } 1089 1090 dp_rx_refill_ring_record_entry(dp_soc, dp_pdev->lmac_id, rxdma_srng, 1091 num_req_buffers, count); 1092 1093 hal_srng_access_end(dp_soc->hal_soc, rxdma_srng); 1094 1095 dp_rx_schedule_refill_thread(dp_soc); 1096 1097 dp_verbose_debug("replenished buffers %d, rx desc added back to free list %u", 1098 count, num_desc_to_free); 1099 1100 /* No need to count the number of bytes received during replenish. 1101 * Therefore set replenish.pkts.bytes as 0. 1102 */ 1103 DP_STATS_INC_PKT(dp_pdev, replenish.pkts, count, 0); 1104 DP_STATS_INC(dp_pdev, replenish.free_list, num_req_buffers - count); 1105 1106 free_descs: 1107 DP_STATS_INC(dp_pdev, buf_freelist, num_desc_to_free); 1108 /* 1109 * add any available free desc back to the free list 1110 */ 1111 if (*desc_list) 1112 dp_rx_add_desc_list_to_free_list(dp_soc, desc_list, tail, 1113 mac_id, rx_desc_pool); 1114 1115 return QDF_STATUS_SUCCESS; 1116 } 1117 1118 qdf_export_symbol(__dp_rx_buffers_replenish); 1119 1120 void 1121 dp_rx_deliver_raw(struct dp_vdev *vdev, qdf_nbuf_t nbuf_list, 1122 struct dp_txrx_peer *txrx_peer, uint8_t link_id) 1123 { 1124 qdf_nbuf_t deliver_list_head = NULL; 1125 qdf_nbuf_t deliver_list_tail = NULL; 1126 qdf_nbuf_t nbuf; 1127 1128 nbuf = nbuf_list; 1129 while (nbuf) { 1130 qdf_nbuf_t next = qdf_nbuf_next(nbuf); 1131 1132 DP_RX_LIST_APPEND(deliver_list_head, deliver_list_tail, nbuf); 1133 1134 DP_STATS_INC(vdev->pdev, rx_raw_pkts, 1); 1135 DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, rx.raw, 1, 1136 qdf_nbuf_len(nbuf), link_id); 1137 1138 nbuf = next; 1139 } 1140 1141 vdev->osif_rsim_rx_decap(vdev->osif_vdev, &deliver_list_head, 1142 &deliver_list_tail); 1143 1144 vdev->osif_rx(vdev->osif_vdev, deliver_list_head); 1145 } 1146 1147 #ifndef QCA_HOST_MODE_WIFI_DISABLED 1148 #ifndef FEATURE_WDS 1149 void dp_rx_da_learn(struct dp_soc *soc, uint8_t *rx_tlv_hdr, 1150 struct dp_txrx_peer *ta_peer, qdf_nbuf_t nbuf) 1151 { 1152 } 1153 #endif 1154 1155 #ifdef QCA_SUPPORT_TX_MIN_RATES_FOR_SPECIAL_FRAMES 1156 /** 1157 * dp_classify_critical_pkts() - API for marking critical packets 1158 * @soc: dp_soc context 1159 * @vdev: vdev on which packet is to be sent 1160 * @nbuf: nbuf that has to be classified 1161 * 1162 * The function parses the packet, identifies whether its a critical frame and 1163 * marks QDF_NBUF_CB_TX_EXTRA_IS_CRITICAL bit in qdf_nbuf_cb for the nbuf. 1164 * Code for marking which frames are CRITICAL is accessed via callback. 1165 * EAPOL, ARP, DHCP, DHCPv6, ICMPv6 NS/NA are the typical critical frames. 1166 * 1167 * Return: None 1168 */ 1169 static 1170 void dp_classify_critical_pkts(struct dp_soc *soc, struct dp_vdev *vdev, 1171 qdf_nbuf_t nbuf) 1172 { 1173 if (vdev->tx_classify_critical_pkt_cb) 1174 vdev->tx_classify_critical_pkt_cb(vdev->osif_vdev, nbuf); 1175 } 1176 #else 1177 static inline 1178 void dp_classify_critical_pkts(struct dp_soc *soc, struct dp_vdev *vdev, 1179 qdf_nbuf_t nbuf) 1180 { 1181 } 1182 #endif 1183 1184 #ifdef QCA_OL_TX_MULTIQ_SUPPORT 1185 static inline 1186 void dp_rx_nbuf_queue_mapping_set(qdf_nbuf_t nbuf, uint8_t ring_id) 1187 { 1188 qdf_nbuf_set_queue_mapping(nbuf, ring_id); 1189 } 1190 #else 1191 static inline 1192 void dp_rx_nbuf_queue_mapping_set(qdf_nbuf_t nbuf, uint8_t ring_id) 1193 { 1194 } 1195 #endif 1196 1197 bool dp_rx_intrabss_mcbc_fwd(struct dp_soc *soc, struct dp_txrx_peer *ta_peer, 1198 uint8_t *rx_tlv_hdr, qdf_nbuf_t nbuf, 1199 struct cdp_tid_rx_stats *tid_stats, 1200 uint8_t link_id) 1201 { 1202 uint16_t len; 1203 qdf_nbuf_t nbuf_copy; 1204 1205 if (dp_rx_intrabss_eapol_drop_check(soc, ta_peer, rx_tlv_hdr, 1206 nbuf)) 1207 return true; 1208 1209 if (!dp_rx_check_ndi_mdns_fwding(ta_peer, nbuf, link_id)) 1210 return false; 1211 1212 /* If the source peer in the isolation list 1213 * then dont forward instead push to bridge stack 1214 */ 1215 if (dp_get_peer_isolation(ta_peer)) 1216 return false; 1217 1218 nbuf_copy = qdf_nbuf_copy(nbuf); 1219 if (!nbuf_copy) 1220 return false; 1221 1222 len = QDF_NBUF_CB_RX_PKT_LEN(nbuf); 1223 1224 qdf_mem_set(nbuf_copy->cb, 0x0, sizeof(nbuf_copy->cb)); 1225 dp_classify_critical_pkts(soc, ta_peer->vdev, nbuf_copy); 1226 1227 if (soc->arch_ops.dp_rx_intrabss_mcast_handler(soc, ta_peer, 1228 nbuf_copy, 1229 tid_stats, 1230 link_id)) 1231 return false; 1232 1233 /* Don't send packets if tx is paused */ 1234 if (!soc->is_tx_pause && 1235 !dp_tx_send((struct cdp_soc_t *)soc, 1236 ta_peer->vdev->vdev_id, nbuf_copy)) { 1237 DP_PEER_PER_PKT_STATS_INC_PKT(ta_peer, rx.intra_bss.pkts, 1, 1238 len, link_id); 1239 tid_stats->intrabss_cnt++; 1240 } else { 1241 DP_PEER_PER_PKT_STATS_INC_PKT(ta_peer, rx.intra_bss.fail, 1, 1242 len, link_id); 1243 tid_stats->fail_cnt[INTRABSS_DROP]++; 1244 dp_rx_nbuf_free(nbuf_copy); 1245 } 1246 return false; 1247 } 1248 1249 bool dp_rx_intrabss_ucast_fwd(struct dp_soc *soc, struct dp_txrx_peer *ta_peer, 1250 uint8_t tx_vdev_id, 1251 uint8_t *rx_tlv_hdr, qdf_nbuf_t nbuf, 1252 struct cdp_tid_rx_stats *tid_stats, 1253 uint8_t link_id) 1254 { 1255 uint16_t len; 1256 1257 len = QDF_NBUF_CB_RX_PKT_LEN(nbuf); 1258 1259 /* linearize the nbuf just before we send to 1260 * dp_tx_send() 1261 */ 1262 if (qdf_unlikely(qdf_nbuf_is_frag(nbuf))) { 1263 if (qdf_nbuf_linearize(nbuf) == -ENOMEM) 1264 return false; 1265 1266 nbuf = qdf_nbuf_unshare(nbuf); 1267 if (!nbuf) { 1268 DP_PEER_PER_PKT_STATS_INC_PKT(ta_peer, 1269 rx.intra_bss.fail, 1270 1, len, link_id); 1271 /* return true even though the pkt is 1272 * not forwarded. Basically skb_unshare 1273 * failed and we want to continue with 1274 * next nbuf. 1275 */ 1276 tid_stats->fail_cnt[INTRABSS_DROP]++; 1277 return false; 1278 } 1279 } 1280 1281 qdf_mem_set(nbuf->cb, 0x0, sizeof(nbuf->cb)); 1282 dp_classify_critical_pkts(soc, ta_peer->vdev, nbuf); 1283 1284 /* Don't send packets if tx is paused */ 1285 if (!soc->is_tx_pause && !dp_tx_send((struct cdp_soc_t *)soc, 1286 tx_vdev_id, nbuf)) { 1287 DP_PEER_PER_PKT_STATS_INC_PKT(ta_peer, rx.intra_bss.pkts, 1, 1288 len, link_id); 1289 } else { 1290 DP_PEER_PER_PKT_STATS_INC_PKT(ta_peer, rx.intra_bss.fail, 1, 1291 len, link_id); 1292 tid_stats->fail_cnt[INTRABSS_DROP]++; 1293 return false; 1294 } 1295 1296 return true; 1297 } 1298 1299 #endif /* QCA_HOST_MODE_WIFI_DISABLED */ 1300 1301 #ifdef MESH_MODE_SUPPORT 1302 1303 void dp_rx_fill_mesh_stats(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 1304 uint8_t *rx_tlv_hdr, 1305 struct dp_txrx_peer *txrx_peer) 1306 { 1307 struct mesh_recv_hdr_s *rx_info = NULL; 1308 uint32_t pkt_type; 1309 uint32_t nss; 1310 uint32_t rate_mcs; 1311 uint32_t bw; 1312 uint8_t primary_chan_num; 1313 uint32_t center_chan_freq; 1314 struct dp_soc *soc = vdev->pdev->soc; 1315 struct dp_peer *peer; 1316 struct dp_peer *primary_link_peer; 1317 struct dp_soc *link_peer_soc; 1318 cdp_peer_stats_param_t buf = {0}; 1319 1320 /* fill recv mesh stats */ 1321 rx_info = qdf_mem_malloc(sizeof(struct mesh_recv_hdr_s)); 1322 1323 /* upper layers are responsible to free this memory */ 1324 1325 if (!rx_info) { 1326 dp_rx_err("%pK: Memory allocation failed for mesh rx stats", 1327 vdev->pdev->soc); 1328 DP_STATS_INC(vdev->pdev, mesh_mem_alloc, 1); 1329 return; 1330 } 1331 1332 rx_info->rs_flags = MESH_RXHDR_VER1; 1333 if (qdf_nbuf_is_rx_chfrag_start(nbuf)) 1334 rx_info->rs_flags |= MESH_RX_FIRST_MSDU; 1335 1336 if (qdf_nbuf_is_rx_chfrag_end(nbuf)) 1337 rx_info->rs_flags |= MESH_RX_LAST_MSDU; 1338 1339 peer = dp_peer_get_ref_by_id(soc, txrx_peer->peer_id, DP_MOD_ID_MESH); 1340 if (peer) { 1341 if (hal_rx_tlv_get_is_decrypted(soc->hal_soc, rx_tlv_hdr)) { 1342 rx_info->rs_flags |= MESH_RX_DECRYPTED; 1343 rx_info->rs_keyix = hal_rx_msdu_get_keyid(soc->hal_soc, 1344 rx_tlv_hdr); 1345 if (vdev->osif_get_key) 1346 vdev->osif_get_key(vdev->osif_vdev, 1347 &rx_info->rs_decryptkey[0], 1348 &peer->mac_addr.raw[0], 1349 rx_info->rs_keyix); 1350 } 1351 1352 dp_peer_unref_delete(peer, DP_MOD_ID_MESH); 1353 } 1354 1355 primary_link_peer = dp_get_primary_link_peer_by_id(soc, 1356 txrx_peer->peer_id, 1357 DP_MOD_ID_MESH); 1358 1359 if (qdf_likely(primary_link_peer)) { 1360 link_peer_soc = primary_link_peer->vdev->pdev->soc; 1361 dp_monitor_peer_get_stats_param(link_peer_soc, 1362 primary_link_peer, 1363 cdp_peer_rx_snr, &buf); 1364 rx_info->rs_snr = buf.rx_snr; 1365 dp_peer_unref_delete(primary_link_peer, DP_MOD_ID_MESH); 1366 } 1367 1368 rx_info->rs_rssi = rx_info->rs_snr + DP_DEFAULT_NOISEFLOOR; 1369 1370 soc = vdev->pdev->soc; 1371 primary_chan_num = hal_rx_tlv_get_freq(soc->hal_soc, rx_tlv_hdr); 1372 center_chan_freq = hal_rx_tlv_get_freq(soc->hal_soc, rx_tlv_hdr) >> 16; 1373 1374 if (soc->cdp_soc.ol_ops && soc->cdp_soc.ol_ops->freq_to_band) { 1375 rx_info->rs_band = soc->cdp_soc.ol_ops->freq_to_band( 1376 soc->ctrl_psoc, 1377 vdev->pdev->pdev_id, 1378 center_chan_freq); 1379 } 1380 rx_info->rs_channel = primary_chan_num; 1381 pkt_type = hal_rx_tlv_get_pkt_type(soc->hal_soc, rx_tlv_hdr); 1382 rate_mcs = hal_rx_tlv_rate_mcs_get(soc->hal_soc, rx_tlv_hdr); 1383 bw = hal_rx_tlv_bw_get(soc->hal_soc, rx_tlv_hdr); 1384 nss = hal_rx_msdu_start_nss_get(soc->hal_soc, rx_tlv_hdr); 1385 1386 /* 1387 * The MCS index does not start with 0 when NSS>1 in HT mode. 1388 * MCS params for optional 20/40MHz, NSS=1~3, EQM(NSS>1): 1389 * ------------------------------------------------------ 1390 * NSS | 1 | 2 | 3 | 4 1391 * ------------------------------------------------------ 1392 * MCS index: HT20 | 0 ~ 7 | 8 ~ 15 | 16 ~ 23 | 24 ~ 31 1393 * ------------------------------------------------------ 1394 * MCS index: HT40 | 0 ~ 7 | 8 ~ 15 | 16 ~ 23 | 24 ~ 31 1395 * ------------------------------------------------------ 1396 * Currently, the MAX_NSS=2. If NSS>2, MCS index = 8 * (NSS-1) 1397 */ 1398 if ((pkt_type == DOT11_N) && (nss == 2)) 1399 rate_mcs += 8; 1400 1401 rx_info->rs_ratephy1 = rate_mcs | (nss << 0x8) | (pkt_type << 16) | 1402 (bw << 24); 1403 1404 qdf_nbuf_set_rx_fctx_type(nbuf, (void *)rx_info, CB_FTYPE_MESH_RX_INFO); 1405 1406 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_MED, 1407 FL("Mesh rx stats: flags %x, rssi %x, chn %x, rate %x, kix %x, snr %x"), 1408 rx_info->rs_flags, 1409 rx_info->rs_rssi, 1410 rx_info->rs_channel, 1411 rx_info->rs_ratephy1, 1412 rx_info->rs_keyix, 1413 rx_info->rs_snr); 1414 1415 } 1416 1417 QDF_STATUS dp_rx_filter_mesh_packets(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 1418 uint8_t *rx_tlv_hdr) 1419 { 1420 union dp_align_mac_addr mac_addr; 1421 struct dp_soc *soc = vdev->pdev->soc; 1422 1423 if (qdf_unlikely(vdev->mesh_rx_filter)) { 1424 if (vdev->mesh_rx_filter & MESH_FILTER_OUT_FROMDS) 1425 if (hal_rx_mpdu_get_fr_ds(soc->hal_soc, 1426 rx_tlv_hdr)) 1427 return QDF_STATUS_SUCCESS; 1428 1429 if (vdev->mesh_rx_filter & MESH_FILTER_OUT_TODS) 1430 if (hal_rx_mpdu_get_to_ds(soc->hal_soc, 1431 rx_tlv_hdr)) 1432 return QDF_STATUS_SUCCESS; 1433 1434 if (vdev->mesh_rx_filter & MESH_FILTER_OUT_NODS) 1435 if (!hal_rx_mpdu_get_fr_ds(soc->hal_soc, 1436 rx_tlv_hdr) && 1437 !hal_rx_mpdu_get_to_ds(soc->hal_soc, 1438 rx_tlv_hdr)) 1439 return QDF_STATUS_SUCCESS; 1440 1441 if (vdev->mesh_rx_filter & MESH_FILTER_OUT_RA) { 1442 if (hal_rx_mpdu_get_addr1(soc->hal_soc, 1443 rx_tlv_hdr, 1444 &mac_addr.raw[0])) 1445 return QDF_STATUS_E_FAILURE; 1446 1447 if (!qdf_mem_cmp(&mac_addr.raw[0], 1448 &vdev->mac_addr.raw[0], 1449 QDF_MAC_ADDR_SIZE)) 1450 return QDF_STATUS_SUCCESS; 1451 } 1452 1453 if (vdev->mesh_rx_filter & MESH_FILTER_OUT_TA) { 1454 if (hal_rx_mpdu_get_addr2(soc->hal_soc, 1455 rx_tlv_hdr, 1456 &mac_addr.raw[0])) 1457 return QDF_STATUS_E_FAILURE; 1458 1459 if (!qdf_mem_cmp(&mac_addr.raw[0], 1460 &vdev->mac_addr.raw[0], 1461 QDF_MAC_ADDR_SIZE)) 1462 return QDF_STATUS_SUCCESS; 1463 } 1464 } 1465 1466 return QDF_STATUS_E_FAILURE; 1467 } 1468 1469 #else 1470 void dp_rx_fill_mesh_stats(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 1471 uint8_t *rx_tlv_hdr, struct dp_txrx_peer *peer) 1472 { 1473 } 1474 1475 QDF_STATUS dp_rx_filter_mesh_packets(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 1476 uint8_t *rx_tlv_hdr) 1477 { 1478 return QDF_STATUS_E_FAILURE; 1479 } 1480 1481 #endif 1482 1483 #ifdef RX_PEER_INVALID_ENH 1484 uint8_t dp_rx_process_invalid_peer(struct dp_soc *soc, qdf_nbuf_t mpdu, 1485 uint8_t mac_id) 1486 { 1487 struct dp_invalid_peer_msg msg; 1488 struct dp_vdev *vdev = NULL; 1489 struct dp_pdev *pdev = NULL; 1490 struct ieee80211_frame *wh; 1491 qdf_nbuf_t curr_nbuf, next_nbuf; 1492 uint8_t *rx_tlv_hdr = qdf_nbuf_data(mpdu); 1493 uint8_t *rx_pkt_hdr = NULL; 1494 int i = 0; 1495 uint32_t nbuf_len; 1496 1497 if (!HAL_IS_DECAP_FORMAT_RAW(soc->hal_soc, rx_tlv_hdr)) { 1498 dp_rx_debug("%pK: Drop decapped frames", soc); 1499 goto free; 1500 } 1501 1502 /* In RAW packet, packet header will be part of data */ 1503 rx_pkt_hdr = rx_tlv_hdr + soc->rx_pkt_tlv_size; 1504 wh = (struct ieee80211_frame *)rx_pkt_hdr; 1505 1506 if (!DP_FRAME_IS_DATA(wh)) { 1507 dp_rx_debug("%pK: NAWDS valid only for data frames", soc); 1508 goto free; 1509 } 1510 1511 nbuf_len = qdf_nbuf_len(mpdu); 1512 if (nbuf_len < sizeof(struct ieee80211_frame)) { 1513 dp_rx_err("%pK: Invalid nbuf length: %u", soc, nbuf_len); 1514 goto free; 1515 } 1516 1517 /* In DMAC case the rx_desc_pools are common across PDEVs 1518 * so PDEV cannot be derived from the pool_id. 1519 * 1520 * link_id need to derived from the TLV tag word which is 1521 * disabled by default. For now adding a WAR to get vdev 1522 * with brute force this need to fixed with word based subscription 1523 * support is added by enabling TLV tag word 1524 */ 1525 if (soc->features.dmac_cmn_src_rxbuf_ring_enabled) { 1526 for (i = 0; i < MAX_PDEV_CNT; i++) { 1527 pdev = soc->pdev_list[i]; 1528 1529 if (!pdev || qdf_unlikely(pdev->is_pdev_down)) 1530 continue; 1531 1532 TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) { 1533 if (qdf_mem_cmp(wh->i_addr1, vdev->mac_addr.raw, 1534 QDF_MAC_ADDR_SIZE) == 0) { 1535 goto out; 1536 } 1537 } 1538 } 1539 } else { 1540 pdev = dp_get_pdev_for_lmac_id(soc, mac_id); 1541 1542 if (!pdev || qdf_unlikely(pdev->is_pdev_down)) { 1543 dp_rx_err("%pK: PDEV %s", 1544 soc, !pdev ? "not found" : "down"); 1545 goto free; 1546 } 1547 1548 if (dp_monitor_filter_neighbour_peer(pdev, rx_pkt_hdr) == 1549 QDF_STATUS_SUCCESS) 1550 return 0; 1551 1552 TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) { 1553 if (qdf_mem_cmp(wh->i_addr1, vdev->mac_addr.raw, 1554 QDF_MAC_ADDR_SIZE) == 0) { 1555 goto out; 1556 } 1557 } 1558 } 1559 1560 if (!vdev) { 1561 dp_rx_err("%pK: VDEV not found", soc); 1562 goto free; 1563 } 1564 out: 1565 msg.wh = wh; 1566 qdf_nbuf_pull_head(mpdu, soc->rx_pkt_tlv_size); 1567 msg.nbuf = mpdu; 1568 msg.vdev_id = vdev->vdev_id; 1569 1570 /* 1571 * NOTE: Only valid for HKv1. 1572 * If smart monitor mode is enabled on RE, we are getting invalid 1573 * peer frames with RA as STA mac of RE and the TA not matching 1574 * with any NAC list or the the BSSID.Such frames need to dropped 1575 * in order to avoid HM_WDS false addition. 1576 */ 1577 if (pdev->soc->cdp_soc.ol_ops->rx_invalid_peer) { 1578 if (dp_monitor_drop_inv_peer_pkts(vdev) == QDF_STATUS_SUCCESS) { 1579 dp_rx_warn("%pK: Drop inv peer pkts with STA RA:%pm", 1580 soc, wh->i_addr1); 1581 goto free; 1582 } 1583 pdev->soc->cdp_soc.ol_ops->rx_invalid_peer( 1584 (struct cdp_ctrl_objmgr_psoc *)soc->ctrl_psoc, 1585 pdev->pdev_id, &msg); 1586 } 1587 1588 free: 1589 /* Drop and free packet */ 1590 curr_nbuf = mpdu; 1591 while (curr_nbuf) { 1592 next_nbuf = qdf_nbuf_next(curr_nbuf); 1593 dp_rx_nbuf_free(curr_nbuf); 1594 curr_nbuf = next_nbuf; 1595 } 1596 1597 return 0; 1598 } 1599 1600 void dp_rx_process_invalid_peer_wrapper(struct dp_soc *soc, 1601 qdf_nbuf_t mpdu, bool mpdu_done, 1602 uint8_t mac_id) 1603 { 1604 /* Only trigger the process when mpdu is completed */ 1605 if (mpdu_done) 1606 dp_rx_process_invalid_peer(soc, mpdu, mac_id); 1607 } 1608 #else 1609 uint8_t dp_rx_process_invalid_peer(struct dp_soc *soc, qdf_nbuf_t mpdu, 1610 uint8_t mac_id) 1611 { 1612 qdf_nbuf_t curr_nbuf, next_nbuf; 1613 struct dp_pdev *pdev; 1614 struct dp_vdev *vdev = NULL; 1615 struct ieee80211_frame *wh; 1616 struct dp_peer *peer = NULL; 1617 uint8_t *rx_tlv_hdr = qdf_nbuf_data(mpdu); 1618 uint8_t *rx_pkt_hdr = hal_rx_pkt_hdr_get(soc->hal_soc, rx_tlv_hdr); 1619 uint32_t nbuf_len; 1620 1621 wh = (struct ieee80211_frame *)rx_pkt_hdr; 1622 1623 if (!DP_FRAME_IS_DATA(wh)) { 1624 QDF_TRACE_ERROR_RL(QDF_MODULE_ID_DP, 1625 "only for data frames"); 1626 goto free; 1627 } 1628 1629 nbuf_len = qdf_nbuf_len(mpdu); 1630 if (nbuf_len < sizeof(struct ieee80211_frame)) { 1631 dp_rx_info_rl("%pK: Invalid nbuf length: %u", soc, nbuf_len); 1632 goto free; 1633 } 1634 1635 pdev = dp_get_pdev_for_lmac_id(soc, mac_id); 1636 if (!pdev) { 1637 dp_rx_info_rl("%pK: PDEV not found", soc); 1638 goto free; 1639 } 1640 1641 qdf_spin_lock_bh(&pdev->vdev_list_lock); 1642 DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) { 1643 if (qdf_mem_cmp(wh->i_addr1, vdev->mac_addr.raw, 1644 QDF_MAC_ADDR_SIZE) == 0) { 1645 qdf_spin_unlock_bh(&pdev->vdev_list_lock); 1646 goto out; 1647 } 1648 } 1649 qdf_spin_unlock_bh(&pdev->vdev_list_lock); 1650 1651 if (!vdev) { 1652 dp_rx_info_rl("%pK: VDEV not found", soc); 1653 goto free; 1654 } 1655 1656 out: 1657 if (vdev->opmode == wlan_op_mode_ap) { 1658 peer = dp_peer_find_hash_find(soc, wh->i_addr2, 0, 1659 vdev->vdev_id, 1660 DP_MOD_ID_RX_ERR); 1661 /* If SA is a valid peer in vdev, 1662 * don't send disconnect 1663 */ 1664 if (peer) { 1665 dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR); 1666 DP_STATS_INC(soc, rx.err.decrypt_err_drop, 1); 1667 dp_err_rl("invalid peer frame with correct SA/RA is freed"); 1668 goto free; 1669 } 1670 } 1671 1672 if (soc->cdp_soc.ol_ops->rx_invalid_peer) 1673 soc->cdp_soc.ol_ops->rx_invalid_peer(vdev->vdev_id, wh); 1674 free: 1675 1676 /* Drop and free packet */ 1677 curr_nbuf = mpdu; 1678 while (curr_nbuf) { 1679 next_nbuf = qdf_nbuf_next(curr_nbuf); 1680 dp_rx_nbuf_free(curr_nbuf); 1681 curr_nbuf = next_nbuf; 1682 } 1683 1684 /* Reset the head and tail pointers */ 1685 pdev = dp_get_pdev_for_lmac_id(soc, mac_id); 1686 if (pdev) { 1687 pdev->invalid_peer_head_msdu = NULL; 1688 pdev->invalid_peer_tail_msdu = NULL; 1689 } 1690 1691 return 0; 1692 } 1693 1694 void dp_rx_process_invalid_peer_wrapper(struct dp_soc *soc, 1695 qdf_nbuf_t mpdu, bool mpdu_done, 1696 uint8_t mac_id) 1697 { 1698 /* Process the nbuf */ 1699 dp_rx_process_invalid_peer(soc, mpdu, mac_id); 1700 } 1701 #endif 1702 1703 #ifndef QCA_HOST_MODE_WIFI_DISABLED 1704 1705 #ifdef RECEIVE_OFFLOAD 1706 /** 1707 * dp_rx_print_offload_info() - Print offload info from RX TLV 1708 * @soc: dp soc handle 1709 * @msdu: MSDU for which the offload info is to be printed 1710 * @ofl_info: offload info saved in hal_offload_info structure 1711 * 1712 * Return: None 1713 */ 1714 static void dp_rx_print_offload_info(struct dp_soc *soc, 1715 qdf_nbuf_t msdu, 1716 struct hal_offload_info *ofl_info) 1717 { 1718 dp_verbose_debug("----------------------RX DESC LRO/GRO----------------------"); 1719 dp_verbose_debug("lro_eligible 0x%x", 1720 QDF_NBUF_CB_RX_LRO_ELIGIBLE(msdu)); 1721 dp_verbose_debug("pure_ack 0x%x", QDF_NBUF_CB_RX_TCP_PURE_ACK(msdu)); 1722 dp_verbose_debug("chksum 0x%x", QDF_NBUF_CB_RX_TCP_CHKSUM(msdu)); 1723 dp_verbose_debug("TCP seq num 0x%x", ofl_info->tcp_seq_num); 1724 dp_verbose_debug("TCP ack num 0x%x", ofl_info->tcp_ack_num); 1725 dp_verbose_debug("TCP window 0x%x", QDF_NBUF_CB_RX_TCP_WIN(msdu)); 1726 dp_verbose_debug("TCP protocol 0x%x", QDF_NBUF_CB_RX_TCP_PROTO(msdu)); 1727 dp_verbose_debug("TCP offset 0x%x", QDF_NBUF_CB_RX_TCP_OFFSET(msdu)); 1728 dp_verbose_debug("toeplitz 0x%x", QDF_NBUF_CB_RX_FLOW_ID(msdu)); 1729 dp_verbose_debug("---------------------------------------------------------"); 1730 } 1731 1732 void dp_rx_fill_gro_info(struct dp_soc *soc, uint8_t *rx_tlv, 1733 qdf_nbuf_t msdu, uint32_t *rx_ol_pkt_cnt) 1734 { 1735 struct hal_offload_info offload_info; 1736 1737 if (!wlan_cfg_is_gro_enabled(soc->wlan_cfg_ctx)) 1738 return; 1739 1740 if (hal_rx_tlv_get_offload_info(soc->hal_soc, rx_tlv, &offload_info)) 1741 return; 1742 1743 *rx_ol_pkt_cnt = *rx_ol_pkt_cnt + 1; 1744 1745 QDF_NBUF_CB_RX_LRO_ELIGIBLE(msdu) = offload_info.lro_eligible; 1746 QDF_NBUF_CB_RX_TCP_PURE_ACK(msdu) = offload_info.tcp_pure_ack; 1747 QDF_NBUF_CB_RX_TCP_CHKSUM(msdu) = 1748 hal_rx_tlv_get_tcp_chksum(soc->hal_soc, 1749 rx_tlv); 1750 QDF_NBUF_CB_RX_TCP_WIN(msdu) = offload_info.tcp_win; 1751 QDF_NBUF_CB_RX_TCP_PROTO(msdu) = offload_info.tcp_proto; 1752 QDF_NBUF_CB_RX_IPV6_PROTO(msdu) = offload_info.ipv6_proto; 1753 QDF_NBUF_CB_RX_TCP_OFFSET(msdu) = offload_info.tcp_offset; 1754 QDF_NBUF_CB_RX_FLOW_ID(msdu) = offload_info.flow_id; 1755 1756 dp_rx_print_offload_info(soc, msdu, &offload_info); 1757 } 1758 #endif /* RECEIVE_OFFLOAD */ 1759 1760 /** 1761 * dp_rx_adjust_nbuf_len() - set appropriate msdu length in nbuf. 1762 * 1763 * @soc: DP soc handle 1764 * @nbuf: pointer to msdu. 1765 * @mpdu_len: mpdu length 1766 * @l3_pad_len: L3 padding length by HW 1767 * 1768 * Return: returns true if nbuf is last msdu of mpdu else returns false. 1769 */ 1770 static inline bool dp_rx_adjust_nbuf_len(struct dp_soc *soc, 1771 qdf_nbuf_t nbuf, 1772 uint16_t *mpdu_len, 1773 uint32_t l3_pad_len) 1774 { 1775 bool last_nbuf; 1776 uint32_t pkt_hdr_size; 1777 uint16_t buf_size; 1778 1779 buf_size = wlan_cfg_rx_buffer_size(soc->wlan_cfg_ctx); 1780 1781 pkt_hdr_size = soc->rx_pkt_tlv_size + l3_pad_len; 1782 1783 if ((*mpdu_len + pkt_hdr_size) > buf_size) { 1784 qdf_nbuf_set_pktlen(nbuf, buf_size); 1785 last_nbuf = false; 1786 *mpdu_len -= (buf_size - pkt_hdr_size); 1787 } else { 1788 qdf_nbuf_set_pktlen(nbuf, (*mpdu_len + pkt_hdr_size)); 1789 last_nbuf = true; 1790 *mpdu_len = 0; 1791 } 1792 1793 return last_nbuf; 1794 } 1795 1796 /** 1797 * dp_get_l3_hdr_pad_len() - get L3 header padding length. 1798 * 1799 * @soc: DP soc handle 1800 * @nbuf: pointer to msdu. 1801 * 1802 * Return: returns padding length in bytes. 1803 */ 1804 static inline uint32_t dp_get_l3_hdr_pad_len(struct dp_soc *soc, 1805 qdf_nbuf_t nbuf) 1806 { 1807 uint32_t l3_hdr_pad = 0; 1808 uint8_t *rx_tlv_hdr; 1809 struct hal_rx_msdu_metadata msdu_metadata; 1810 1811 while (nbuf) { 1812 if (!qdf_nbuf_is_rx_chfrag_cont(nbuf)) { 1813 /* scattered msdu end with continuation is 0 */ 1814 rx_tlv_hdr = qdf_nbuf_data(nbuf); 1815 hal_rx_msdu_metadata_get(soc->hal_soc, 1816 rx_tlv_hdr, 1817 &msdu_metadata); 1818 l3_hdr_pad = msdu_metadata.l3_hdr_pad; 1819 break; 1820 } 1821 nbuf = nbuf->next; 1822 } 1823 1824 return l3_hdr_pad; 1825 } 1826 1827 qdf_nbuf_t dp_rx_sg_create(struct dp_soc *soc, qdf_nbuf_t nbuf) 1828 { 1829 qdf_nbuf_t parent, frag_list, next = NULL; 1830 uint16_t frag_list_len = 0; 1831 uint16_t mpdu_len; 1832 bool last_nbuf; 1833 uint32_t l3_hdr_pad_offset = 0; 1834 1835 /* 1836 * Use msdu len got from REO entry descriptor instead since 1837 * there is case the RX PKT TLV is corrupted while msdu_len 1838 * from REO descriptor is right for non-raw RX scatter msdu. 1839 */ 1840 mpdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf); 1841 1842 /* 1843 * this is a case where the complete msdu fits in one single nbuf. 1844 * in this case HW sets both start and end bit and we only need to 1845 * reset these bits for RAW mode simulator to decap the pkt 1846 */ 1847 if (qdf_nbuf_is_rx_chfrag_start(nbuf) && 1848 qdf_nbuf_is_rx_chfrag_end(nbuf)) { 1849 qdf_nbuf_set_pktlen(nbuf, mpdu_len + soc->rx_pkt_tlv_size); 1850 qdf_nbuf_pull_head(nbuf, soc->rx_pkt_tlv_size); 1851 return nbuf; 1852 } 1853 1854 l3_hdr_pad_offset = dp_get_l3_hdr_pad_len(soc, nbuf); 1855 /* 1856 * This is a case where we have multiple msdus (A-MSDU) spread across 1857 * multiple nbufs. here we create a fraglist out of these nbufs. 1858 * 1859 * the moment we encounter a nbuf with continuation bit set we 1860 * know for sure we have an MSDU which is spread across multiple 1861 * nbufs. We loop through and reap nbufs till we reach last nbuf. 1862 */ 1863 parent = nbuf; 1864 frag_list = nbuf->next; 1865 nbuf = nbuf->next; 1866 1867 /* 1868 * set the start bit in the first nbuf we encounter with continuation 1869 * bit set. This has the proper mpdu length set as it is the first 1870 * msdu of the mpdu. this becomes the parent nbuf and the subsequent 1871 * nbufs will form the frag_list of the parent nbuf. 1872 */ 1873 qdf_nbuf_set_rx_chfrag_start(parent, 1); 1874 /* 1875 * L3 header padding is only needed for the 1st buffer 1876 * in a scattered msdu 1877 */ 1878 last_nbuf = dp_rx_adjust_nbuf_len(soc, parent, &mpdu_len, 1879 l3_hdr_pad_offset); 1880 1881 /* 1882 * MSDU cont bit is set but reported MPDU length can fit 1883 * in to single buffer 1884 * 1885 * Increment error stats and avoid SG list creation 1886 */ 1887 if (last_nbuf) { 1888 DP_STATS_INC(soc, rx.err.msdu_continuation_err, 1); 1889 qdf_nbuf_pull_head(parent, 1890 soc->rx_pkt_tlv_size + l3_hdr_pad_offset); 1891 return parent; 1892 } 1893 1894 /* 1895 * this is where we set the length of the fragments which are 1896 * associated to the parent nbuf. We iterate through the frag_list 1897 * till we hit the last_nbuf of the list. 1898 */ 1899 do { 1900 last_nbuf = dp_rx_adjust_nbuf_len(soc, nbuf, &mpdu_len, 0); 1901 qdf_nbuf_pull_head(nbuf, 1902 soc->rx_pkt_tlv_size); 1903 frag_list_len += qdf_nbuf_len(nbuf); 1904 1905 if (last_nbuf) { 1906 next = nbuf->next; 1907 nbuf->next = NULL; 1908 break; 1909 } else if (qdf_nbuf_is_rx_chfrag_end(nbuf)) { 1910 dp_err("Invalid packet length"); 1911 qdf_assert_always(0); 1912 } 1913 nbuf = nbuf->next; 1914 } while (!last_nbuf); 1915 1916 qdf_nbuf_set_rx_chfrag_start(nbuf, 0); 1917 qdf_nbuf_append_ext_list(parent, frag_list, frag_list_len); 1918 parent->next = next; 1919 1920 qdf_nbuf_pull_head(parent, 1921 soc->rx_pkt_tlv_size + l3_hdr_pad_offset); 1922 return parent; 1923 } 1924 1925 #ifdef DP_RX_SG_FRAME_SUPPORT 1926 bool dp_rx_is_sg_supported(void) 1927 { 1928 return true; 1929 } 1930 #else 1931 bool dp_rx_is_sg_supported(void) 1932 { 1933 return false; 1934 } 1935 #endif 1936 1937 #endif /* QCA_HOST_MODE_WIFI_DISABLED */ 1938 1939 #ifdef QCA_PEER_EXT_STATS 1940 void dp_rx_compute_tid_delay(struct cdp_delay_tid_stats *stats, 1941 qdf_nbuf_t nbuf) 1942 { 1943 struct cdp_delay_rx_stats *rx_delay = &stats->rx_delay; 1944 uint32_t to_stack = qdf_nbuf_get_timedelta_ms(nbuf); 1945 1946 dp_hist_update_stats(&rx_delay->to_stack_delay, to_stack); 1947 } 1948 #endif /* QCA_PEER_EXT_STATS */ 1949 1950 void dp_rx_compute_delay(struct dp_vdev *vdev, qdf_nbuf_t nbuf) 1951 { 1952 uint8_t ring_id = QDF_NBUF_CB_RX_CTX_ID(nbuf); 1953 int64_t current_ts = qdf_ktime_to_ms(qdf_ktime_get()); 1954 uint32_t to_stack = qdf_nbuf_get_timedelta_ms(nbuf); 1955 uint8_t tid = qdf_nbuf_get_tid_val(nbuf); 1956 uint32_t interframe_delay = 1957 (uint32_t)(current_ts - vdev->prev_rx_deliver_tstamp); 1958 struct cdp_tid_rx_stats *rstats = 1959 &vdev->pdev->stats.tid_stats.tid_rx_stats[ring_id][tid]; 1960 1961 dp_update_delay_stats(NULL, rstats, to_stack, tid, 1962 CDP_DELAY_STATS_REAP_STACK, ring_id, false); 1963 /* 1964 * Update interframe delay stats calculated at deliver_data_ol point. 1965 * Value of vdev->prev_rx_deliver_tstamp will be 0 for 1st frame, so 1966 * interframe delay will not be calculate correctly for 1st frame. 1967 * On the other side, this will help in avoiding extra per packet check 1968 * of vdev->prev_rx_deliver_tstamp. 1969 */ 1970 dp_update_delay_stats(NULL, rstats, interframe_delay, tid, 1971 CDP_DELAY_STATS_RX_INTERFRAME, ring_id, false); 1972 vdev->prev_rx_deliver_tstamp = current_ts; 1973 } 1974 1975 /** 1976 * dp_rx_drop_nbuf_list() - drop an nbuf list 1977 * @pdev: dp pdev reference 1978 * @buf_list: buffer list to be dropepd 1979 * 1980 * Return: int (number of bufs dropped) 1981 */ 1982 static inline int dp_rx_drop_nbuf_list(struct dp_pdev *pdev, 1983 qdf_nbuf_t buf_list) 1984 { 1985 struct cdp_tid_rx_stats *stats = NULL; 1986 uint8_t tid = 0, ring_id = 0; 1987 int num_dropped = 0; 1988 qdf_nbuf_t buf, next_buf; 1989 1990 buf = buf_list; 1991 while (buf) { 1992 ring_id = QDF_NBUF_CB_RX_CTX_ID(buf); 1993 next_buf = qdf_nbuf_queue_next(buf); 1994 tid = qdf_nbuf_get_tid_val(buf); 1995 if (qdf_likely(pdev)) { 1996 stats = &pdev->stats.tid_stats.tid_rx_stats[ring_id][tid]; 1997 stats->fail_cnt[INVALID_PEER_VDEV]++; 1998 stats->delivered_to_stack--; 1999 } 2000 dp_rx_nbuf_free(buf); 2001 buf = next_buf; 2002 num_dropped++; 2003 } 2004 2005 return num_dropped; 2006 } 2007 2008 #ifdef QCA_SUPPORT_WDS_EXTENDED 2009 /** 2010 * dp_rx_deliver_to_stack_ext() - Deliver to netdev per sta 2011 * @soc: core txrx main context 2012 * @vdev: vdev 2013 * @txrx_peer: txrx peer 2014 * @nbuf_head: skb list head 2015 * 2016 * Return: true if packet is delivered to netdev per STA. 2017 */ 2018 bool 2019 dp_rx_deliver_to_stack_ext(struct dp_soc *soc, struct dp_vdev *vdev, 2020 struct dp_txrx_peer *txrx_peer, qdf_nbuf_t nbuf_head) 2021 { 2022 /* 2023 * When extended WDS is disabled, frames are sent to AP netdevice. 2024 */ 2025 if (qdf_likely(!vdev->wds_ext_enabled)) 2026 return false; 2027 2028 /* 2029 * There can be 2 cases: 2030 * 1. Send frame to parent netdev if its not for netdev per STA 2031 * 2. If frame is meant for netdev per STA: 2032 * a. Send frame to appropriate netdev using registered fp. 2033 * b. If fp is NULL, drop the frames. 2034 */ 2035 if (!txrx_peer->wds_ext.init) 2036 return false; 2037 2038 if (txrx_peer->osif_rx) 2039 txrx_peer->osif_rx(txrx_peer->wds_ext.osif_peer, nbuf_head); 2040 else 2041 dp_rx_drop_nbuf_list(vdev->pdev, nbuf_head); 2042 2043 return true; 2044 } 2045 2046 #else 2047 static inline bool 2048 dp_rx_deliver_to_stack_ext(struct dp_soc *soc, struct dp_vdev *vdev, 2049 struct dp_txrx_peer *txrx_peer, qdf_nbuf_t nbuf_head) 2050 { 2051 return false; 2052 } 2053 #endif 2054 2055 #ifdef PEER_CACHE_RX_PKTS 2056 #if defined(WLAN_FEATURE_11BE_MLO) && defined(DP_MLO_LINK_STATS_SUPPORT) 2057 /** 2058 * dp_set_nbuf_band() - Set band in nbuf cb 2059 * @peer: dp_peer 2060 * @nbuf: nbuf 2061 * 2062 * Return: None 2063 */ 2064 static inline void 2065 dp_set_nbuf_band(struct dp_peer *peer, qdf_nbuf_t nbuf) 2066 { 2067 uint8_t link_id = 0; 2068 2069 link_id = dp_rx_get_stats_arr_idx_from_link_id(nbuf, peer->txrx_peer); 2070 dp_rx_set_nbuf_band(nbuf, peer->txrx_peer, link_id); 2071 } 2072 #else 2073 static inline void 2074 dp_set_nbuf_band(struct dp_peer *peer, qdf_nbuf_t nbuf) 2075 { 2076 } 2077 #endif 2078 2079 void dp_rx_flush_rx_cached(struct dp_peer *peer, bool drop) 2080 { 2081 struct dp_peer_cached_bufq *bufqi; 2082 struct dp_rx_cached_buf *cache_buf = NULL; 2083 ol_txrx_rx_fp data_rx = NULL; 2084 int num_buff_elem; 2085 QDF_STATUS status; 2086 2087 /* 2088 * Flush dp cached frames only for mld peers and legacy peers, as 2089 * link peers don't store cached frames 2090 */ 2091 if (IS_MLO_DP_LINK_PEER(peer)) 2092 return; 2093 2094 if (!peer->txrx_peer) { 2095 dp_err("txrx_peer NULL!! peer mac_addr("QDF_MAC_ADDR_FMT")", 2096 QDF_MAC_ADDR_REF(peer->mac_addr.raw)); 2097 return; 2098 } 2099 2100 if (qdf_atomic_inc_return(&peer->txrx_peer->flush_in_progress) > 1) { 2101 qdf_atomic_dec(&peer->txrx_peer->flush_in_progress); 2102 return; 2103 } 2104 2105 qdf_spin_lock_bh(&peer->peer_info_lock); 2106 if (peer->state >= OL_TXRX_PEER_STATE_CONN && peer->vdev->osif_rx) 2107 data_rx = peer->vdev->osif_rx; 2108 else 2109 drop = true; 2110 qdf_spin_unlock_bh(&peer->peer_info_lock); 2111 2112 bufqi = &peer->txrx_peer->bufq_info; 2113 2114 qdf_spin_lock_bh(&bufqi->bufq_lock); 2115 qdf_list_remove_front(&bufqi->cached_bufq, 2116 (qdf_list_node_t **)&cache_buf); 2117 while (cache_buf) { 2118 num_buff_elem = QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST( 2119 cache_buf->buf); 2120 bufqi->entries -= num_buff_elem; 2121 qdf_spin_unlock_bh(&bufqi->bufq_lock); 2122 if (drop) { 2123 bufqi->dropped = dp_rx_drop_nbuf_list(peer->vdev->pdev, 2124 cache_buf->buf); 2125 } else { 2126 dp_set_nbuf_band(peer, cache_buf->buf); 2127 /* Flush the cached frames to OSIF DEV */ 2128 status = data_rx(peer->vdev->osif_vdev, cache_buf->buf); 2129 if (status != QDF_STATUS_SUCCESS) 2130 bufqi->dropped = dp_rx_drop_nbuf_list( 2131 peer->vdev->pdev, 2132 cache_buf->buf); 2133 } 2134 qdf_mem_free(cache_buf); 2135 cache_buf = NULL; 2136 qdf_spin_lock_bh(&bufqi->bufq_lock); 2137 qdf_list_remove_front(&bufqi->cached_bufq, 2138 (qdf_list_node_t **)&cache_buf); 2139 } 2140 qdf_spin_unlock_bh(&bufqi->bufq_lock); 2141 qdf_atomic_dec(&peer->txrx_peer->flush_in_progress); 2142 } 2143 2144 /** 2145 * dp_rx_enqueue_rx() - cache rx frames 2146 * @peer: peer 2147 * @txrx_peer: DP txrx_peer 2148 * @rx_buf_list: cache buffer list 2149 * 2150 * Return: None 2151 */ 2152 static QDF_STATUS 2153 dp_rx_enqueue_rx(struct dp_peer *peer, 2154 struct dp_txrx_peer *txrx_peer, 2155 qdf_nbuf_t rx_buf_list) 2156 { 2157 struct dp_rx_cached_buf *cache_buf; 2158 struct dp_peer_cached_bufq *bufqi = &txrx_peer->bufq_info; 2159 int num_buff_elem; 2160 QDF_STATUS ret = QDF_STATUS_SUCCESS; 2161 struct dp_soc *soc = txrx_peer->vdev->pdev->soc; 2162 struct dp_peer *ta_peer = NULL; 2163 2164 /* 2165 * If peer id is invalid which likely peer map has not completed, 2166 * then need caller provide dp_peer pointer, else it's ok to use 2167 * txrx_peer->peer_id to get dp_peer. 2168 */ 2169 if (peer) { 2170 if (QDF_STATUS_SUCCESS == 2171 dp_peer_get_ref(soc, peer, DP_MOD_ID_RX)) 2172 ta_peer = peer; 2173 } else { 2174 ta_peer = dp_peer_get_ref_by_id(soc, txrx_peer->peer_id, 2175 DP_MOD_ID_RX); 2176 } 2177 2178 if (!ta_peer) { 2179 bufqi->dropped = dp_rx_drop_nbuf_list(txrx_peer->vdev->pdev, 2180 rx_buf_list); 2181 return QDF_STATUS_E_INVAL; 2182 } 2183 2184 dp_debug_rl("bufq->curr %d bufq->drops %d", bufqi->entries, 2185 bufqi->dropped); 2186 if (!ta_peer->valid) { 2187 bufqi->dropped = dp_rx_drop_nbuf_list(txrx_peer->vdev->pdev, 2188 rx_buf_list); 2189 ret = QDF_STATUS_E_INVAL; 2190 goto fail; 2191 } 2192 2193 qdf_spin_lock_bh(&bufqi->bufq_lock); 2194 if (bufqi->entries >= bufqi->thresh) { 2195 bufqi->dropped = dp_rx_drop_nbuf_list(txrx_peer->vdev->pdev, 2196 rx_buf_list); 2197 qdf_spin_unlock_bh(&bufqi->bufq_lock); 2198 ret = QDF_STATUS_E_RESOURCES; 2199 goto fail; 2200 } 2201 qdf_spin_unlock_bh(&bufqi->bufq_lock); 2202 2203 num_buff_elem = QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(rx_buf_list); 2204 2205 cache_buf = qdf_mem_malloc_atomic(sizeof(*cache_buf)); 2206 if (!cache_buf) { 2207 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 2208 "Failed to allocate buf to cache rx frames"); 2209 bufqi->dropped = dp_rx_drop_nbuf_list(txrx_peer->vdev->pdev, 2210 rx_buf_list); 2211 ret = QDF_STATUS_E_NOMEM; 2212 goto fail; 2213 } 2214 2215 cache_buf->buf = rx_buf_list; 2216 2217 qdf_spin_lock_bh(&bufqi->bufq_lock); 2218 qdf_list_insert_back(&bufqi->cached_bufq, 2219 &cache_buf->node); 2220 bufqi->entries += num_buff_elem; 2221 qdf_spin_unlock_bh(&bufqi->bufq_lock); 2222 2223 fail: 2224 dp_peer_unref_delete(ta_peer, DP_MOD_ID_RX); 2225 return ret; 2226 } 2227 2228 static inline 2229 bool dp_rx_is_peer_cache_bufq_supported(void) 2230 { 2231 return true; 2232 } 2233 #else 2234 static inline 2235 bool dp_rx_is_peer_cache_bufq_supported(void) 2236 { 2237 return false; 2238 } 2239 2240 static inline QDF_STATUS 2241 dp_rx_enqueue_rx(struct dp_peer *peer, 2242 struct dp_txrx_peer *txrx_peer, 2243 qdf_nbuf_t rx_buf_list) 2244 { 2245 return QDF_STATUS_SUCCESS; 2246 } 2247 #endif 2248 2249 #ifndef DELIVERY_TO_STACK_STATUS_CHECK 2250 /** 2251 * dp_rx_check_delivery_to_stack() - Deliver pkts to network 2252 * using the appropriate call back functions. 2253 * @soc: soc 2254 * @vdev: vdev 2255 * @txrx_peer: peer 2256 * @nbuf_head: skb list head 2257 * 2258 * Return: None 2259 */ 2260 static void dp_rx_check_delivery_to_stack(struct dp_soc *soc, 2261 struct dp_vdev *vdev, 2262 struct dp_txrx_peer *txrx_peer, 2263 qdf_nbuf_t nbuf_head) 2264 { 2265 if (qdf_unlikely(dp_rx_deliver_to_stack_ext(soc, vdev, 2266 txrx_peer, nbuf_head))) 2267 return; 2268 2269 /* Function pointer initialized only when FISA is enabled */ 2270 if (vdev->osif_fisa_rx) 2271 /* on failure send it via regular path */ 2272 vdev->osif_fisa_rx(soc, vdev, nbuf_head); 2273 else 2274 vdev->osif_rx(vdev->osif_vdev, nbuf_head); 2275 } 2276 2277 #else 2278 /** 2279 * dp_rx_check_delivery_to_stack() - Deliver pkts to network 2280 * using the appropriate call back functions. 2281 * @soc: soc 2282 * @vdev: vdev 2283 * @txrx_peer: txrx peer 2284 * @nbuf_head: skb list head 2285 * 2286 * Check the return status of the call back function and drop 2287 * the packets if the return status indicates a failure. 2288 * 2289 * Return: None 2290 */ 2291 static void dp_rx_check_delivery_to_stack(struct dp_soc *soc, 2292 struct dp_vdev *vdev, 2293 struct dp_txrx_peer *txrx_peer, 2294 qdf_nbuf_t nbuf_head) 2295 { 2296 int num_nbuf = 0; 2297 QDF_STATUS ret_val = QDF_STATUS_E_FAILURE; 2298 2299 /* Function pointer initialized only when FISA is enabled */ 2300 if (vdev->osif_fisa_rx) 2301 /* on failure send it via regular path */ 2302 ret_val = vdev->osif_fisa_rx(soc, vdev, nbuf_head); 2303 else if (vdev->osif_rx) 2304 ret_val = vdev->osif_rx(vdev->osif_vdev, nbuf_head); 2305 2306 if (!QDF_IS_STATUS_SUCCESS(ret_val)) { 2307 num_nbuf = dp_rx_drop_nbuf_list(vdev->pdev, nbuf_head); 2308 DP_STATS_INC(soc, rx.err.rejected, num_nbuf); 2309 if (txrx_peer) 2310 DP_PEER_STATS_FLAT_DEC(txrx_peer, to_stack.num, 2311 num_nbuf); 2312 } 2313 } 2314 #endif /* ifdef DELIVERY_TO_STACK_STATUS_CHECK */ 2315 2316 /** 2317 * dp_rx_validate_rx_callbacks() - validate rx callbacks 2318 * @soc: DP soc 2319 * @vdev: DP vdev handle 2320 * @txrx_peer: pointer to the txrx peer object 2321 * @nbuf_head: skb list head 2322 * 2323 * Return: QDF_STATUS - QDF_STATUS_SUCCESS 2324 * QDF_STATUS_E_FAILURE 2325 */ 2326 static inline QDF_STATUS 2327 dp_rx_validate_rx_callbacks(struct dp_soc *soc, 2328 struct dp_vdev *vdev, 2329 struct dp_txrx_peer *txrx_peer, 2330 qdf_nbuf_t nbuf_head) 2331 { 2332 int num_nbuf; 2333 2334 if (qdf_unlikely(!vdev || vdev->delete.pending)) { 2335 num_nbuf = dp_rx_drop_nbuf_list(NULL, nbuf_head); 2336 /* 2337 * This is a special case where vdev is invalid, 2338 * so we cannot know the pdev to which this packet 2339 * belonged. Hence we update the soc rx error stats. 2340 */ 2341 DP_STATS_INC(soc, rx.err.invalid_vdev, num_nbuf); 2342 return QDF_STATUS_E_FAILURE; 2343 } 2344 2345 /* 2346 * highly unlikely to have a vdev without a registered rx 2347 * callback function. if so let us free the nbuf_list. 2348 */ 2349 if (qdf_unlikely(!vdev->osif_rx)) { 2350 if (txrx_peer && dp_rx_is_peer_cache_bufq_supported()) { 2351 dp_rx_enqueue_rx(NULL, txrx_peer, nbuf_head); 2352 } else { 2353 num_nbuf = dp_rx_drop_nbuf_list(vdev->pdev, 2354 nbuf_head); 2355 DP_PEER_TO_STACK_DECC(txrx_peer, num_nbuf, 2356 vdev->pdev->enhanced_stats_en); 2357 } 2358 return QDF_STATUS_E_FAILURE; 2359 } 2360 2361 return QDF_STATUS_SUCCESS; 2362 } 2363 2364 #if defined(WLAN_FEATURE_11BE_MLO) && defined(RAW_PKT_MLD_ADDR_CONVERSION) 2365 static void dp_rx_raw_pkt_mld_addr_conv(struct dp_soc *soc, 2366 struct dp_vdev *vdev, 2367 struct dp_txrx_peer *txrx_peer, 2368 qdf_nbuf_t nbuf_head) 2369 { 2370 qdf_nbuf_t nbuf, next; 2371 struct dp_peer *peer = NULL; 2372 struct ieee80211_frame *wh = NULL; 2373 2374 if (vdev->rx_decap_type == htt_cmn_pkt_type_native_wifi) 2375 return; 2376 2377 peer = dp_peer_get_ref_by_id(soc, txrx_peer->peer_id, 2378 DP_MOD_ID_RX); 2379 2380 if (!peer) 2381 return; 2382 2383 if (!IS_MLO_DP_MLD_PEER(peer)) { 2384 dp_peer_unref_delete(peer, DP_MOD_ID_RX); 2385 return; 2386 } 2387 2388 nbuf = nbuf_head; 2389 while (nbuf) { 2390 next = nbuf->next; 2391 wh = (struct ieee80211_frame *)qdf_nbuf_data(nbuf); 2392 qdf_mem_copy(wh->i_addr1, vdev->mld_mac_addr.raw, 2393 QDF_MAC_ADDR_SIZE); 2394 qdf_mem_copy(wh->i_addr2, peer->mac_addr.raw, 2395 QDF_MAC_ADDR_SIZE); 2396 nbuf = next; 2397 } 2398 2399 dp_peer_unref_delete(peer, DP_MOD_ID_RX); 2400 } 2401 #else 2402 static inline 2403 void dp_rx_raw_pkt_mld_addr_conv(struct dp_soc *soc, 2404 struct dp_vdev *vdev, 2405 struct dp_txrx_peer *txrx_peer, 2406 qdf_nbuf_t nbuf_head) 2407 { } 2408 #endif 2409 2410 QDF_STATUS dp_rx_deliver_to_stack(struct dp_soc *soc, 2411 struct dp_vdev *vdev, 2412 struct dp_txrx_peer *txrx_peer, 2413 qdf_nbuf_t nbuf_head, 2414 qdf_nbuf_t nbuf_tail) 2415 { 2416 if (dp_rx_validate_rx_callbacks(soc, vdev, txrx_peer, nbuf_head) != 2417 QDF_STATUS_SUCCESS) 2418 return QDF_STATUS_E_FAILURE; 2419 2420 if (qdf_unlikely(vdev->rx_decap_type == htt_cmn_pkt_type_raw) || 2421 (vdev->rx_decap_type == htt_cmn_pkt_type_native_wifi)) { 2422 dp_rx_raw_pkt_mld_addr_conv(soc, vdev, txrx_peer, nbuf_head); 2423 vdev->osif_rsim_rx_decap(vdev->osif_vdev, &nbuf_head, 2424 &nbuf_tail); 2425 } 2426 2427 dp_rx_check_delivery_to_stack(soc, vdev, txrx_peer, nbuf_head); 2428 2429 return QDF_STATUS_SUCCESS; 2430 } 2431 2432 #ifdef QCA_SUPPORT_EAPOL_OVER_CONTROL_PORT 2433 QDF_STATUS dp_rx_eapol_deliver_to_stack(struct dp_soc *soc, 2434 struct dp_vdev *vdev, 2435 struct dp_txrx_peer *txrx_peer, 2436 qdf_nbuf_t nbuf_head, 2437 qdf_nbuf_t nbuf_tail) 2438 { 2439 if (dp_rx_validate_rx_callbacks(soc, vdev, txrx_peer, nbuf_head) != 2440 QDF_STATUS_SUCCESS) 2441 return QDF_STATUS_E_FAILURE; 2442 2443 vdev->osif_rx_eapol(vdev->osif_vdev, nbuf_head); 2444 2445 return QDF_STATUS_SUCCESS; 2446 } 2447 #endif 2448 2449 #ifndef QCA_HOST_MODE_WIFI_DISABLED 2450 #ifdef VDEV_PEER_PROTOCOL_COUNT 2451 #define dp_rx_msdu_stats_update_prot_cnts(vdev_hdl, nbuf, txrx_peer) \ 2452 { \ 2453 qdf_nbuf_t nbuf_local; \ 2454 struct dp_txrx_peer *txrx_peer_local; \ 2455 struct dp_vdev *vdev_local = vdev_hdl; \ 2456 do { \ 2457 if (qdf_likely(!((vdev_local)->peer_protocol_count_track))) \ 2458 break; \ 2459 nbuf_local = nbuf; \ 2460 txrx_peer_local = txrx_peer; \ 2461 if (qdf_unlikely(qdf_nbuf_is_frag((nbuf_local)))) \ 2462 break; \ 2463 else if (qdf_unlikely(qdf_nbuf_is_raw_frame((nbuf_local)))) \ 2464 break; \ 2465 dp_vdev_peer_stats_update_protocol_cnt((vdev_local), \ 2466 (nbuf_local), \ 2467 (txrx_peer_local), 0, 1); \ 2468 } while (0); \ 2469 } 2470 #else 2471 #define dp_rx_msdu_stats_update_prot_cnts(vdev_hdl, nbuf, txrx_peer) 2472 #endif 2473 2474 #ifdef FEATURE_RX_LINKSPEED_ROAM_TRIGGER 2475 /** 2476 * dp_rx_rates_stats_update() - update rate stats 2477 * from rx msdu. 2478 * @soc: datapath soc handle 2479 * @nbuf: received msdu buffer 2480 * @rx_tlv_hdr: rx tlv header 2481 * @txrx_peer: datapath txrx_peer handle 2482 * @sgi: Short Guard Interval 2483 * @mcs: Modulation and Coding Set 2484 * @nss: Number of Spatial Streams 2485 * @bw: BandWidth 2486 * @pkt_type: Corresponds to preamble 2487 * @link_id: Link Id on which packet is received 2488 * 2489 * To be precisely record rates, following factors are considered: 2490 * Exclude specific frames, ARP, DHCP, ssdp, etc. 2491 * Make sure to affect rx throughput as least as possible. 2492 * 2493 * Return: void 2494 */ 2495 static void 2496 dp_rx_rates_stats_update(struct dp_soc *soc, qdf_nbuf_t nbuf, 2497 uint8_t *rx_tlv_hdr, struct dp_txrx_peer *txrx_peer, 2498 uint32_t sgi, uint32_t mcs, 2499 uint32_t nss, uint32_t bw, uint32_t pkt_type, 2500 uint8_t link_id) 2501 { 2502 uint32_t rix; 2503 uint16_t ratecode; 2504 uint32_t avg_rx_rate; 2505 uint32_t ratekbps; 2506 enum cdp_punctured_modes punc_mode = NO_PUNCTURE; 2507 2508 if (soc->high_throughput || 2509 dp_rx_data_is_specific(soc->hal_soc, rx_tlv_hdr, nbuf)) { 2510 return; 2511 } 2512 2513 DP_PEER_EXTD_STATS_UPD(txrx_peer, rx.rx_rate, mcs, link_id); 2514 2515 /* In 11b mode, the nss we get from tlv is 0, invalid and should be 1 */ 2516 if (qdf_unlikely(pkt_type == DOT11_B)) 2517 nss = 1; 2518 2519 /* here pkt_type corresponds to preamble */ 2520 ratekbps = dp_getrateindex(sgi, 2521 mcs, 2522 nss - 1, 2523 pkt_type, 2524 bw, 2525 punc_mode, 2526 &rix, 2527 &ratecode); 2528 DP_PEER_EXTD_STATS_UPD(txrx_peer, rx.last_rx_rate, ratekbps, link_id); 2529 avg_rx_rate = 2530 dp_ath_rate_lpf( 2531 txrx_peer->stats[link_id].extd_stats.rx.avg_rx_rate, 2532 ratekbps); 2533 DP_PEER_EXTD_STATS_UPD(txrx_peer, rx.avg_rx_rate, avg_rx_rate, link_id); 2534 DP_PEER_EXTD_STATS_UPD(txrx_peer, rx.nss_info, nss, link_id); 2535 DP_PEER_EXTD_STATS_UPD(txrx_peer, rx.mcs_info, mcs, link_id); 2536 DP_PEER_EXTD_STATS_UPD(txrx_peer, rx.bw_info, bw, link_id); 2537 DP_PEER_EXTD_STATS_UPD(txrx_peer, rx.gi_info, sgi, link_id); 2538 DP_PEER_EXTD_STATS_UPD(txrx_peer, rx.preamble_info, pkt_type, link_id); 2539 } 2540 #else 2541 static inline void 2542 dp_rx_rates_stats_update(struct dp_soc *soc, qdf_nbuf_t nbuf, 2543 uint8_t *rx_tlv_hdr, struct dp_txrx_peer *txrx_peer, 2544 uint32_t sgi, uint32_t mcs, 2545 uint32_t nss, uint32_t bw, uint32_t pkt_type, 2546 uint8_t link_id) 2547 { 2548 } 2549 #endif /* FEATURE_RX_LINKSPEED_ROAM_TRIGGER */ 2550 2551 #ifndef QCA_ENHANCED_STATS_SUPPORT 2552 /** 2553 * dp_rx_msdu_extd_stats_update(): Update Rx extended path stats for peer 2554 * 2555 * @soc: datapath soc handle 2556 * @nbuf: received msdu buffer 2557 * @rx_tlv_hdr: rx tlv header 2558 * @txrx_peer: datapath txrx_peer handle 2559 * @link_id: link id on which the packet is received 2560 * 2561 * Return: void 2562 */ 2563 static inline 2564 void dp_rx_msdu_extd_stats_update(struct dp_soc *soc, qdf_nbuf_t nbuf, 2565 uint8_t *rx_tlv_hdr, 2566 struct dp_txrx_peer *txrx_peer, 2567 uint8_t link_id) 2568 { 2569 bool is_ampdu; 2570 uint32_t sgi, mcs, tid, nss, bw, reception_type, pkt_type; 2571 uint8_t dst_mcs_idx; 2572 2573 /* 2574 * TODO - For KIWI this field is present in ring_desc 2575 * Try to use ring desc instead of tlv. 2576 */ 2577 is_ampdu = hal_rx_mpdu_info_ampdu_flag_get(soc->hal_soc, rx_tlv_hdr); 2578 DP_PEER_EXTD_STATS_INCC(txrx_peer, rx.ampdu_cnt, 1, is_ampdu, link_id); 2579 DP_PEER_EXTD_STATS_INCC(txrx_peer, rx.non_ampdu_cnt, 1, !(is_ampdu), 2580 link_id); 2581 2582 sgi = hal_rx_tlv_sgi_get(soc->hal_soc, rx_tlv_hdr); 2583 mcs = hal_rx_tlv_rate_mcs_get(soc->hal_soc, rx_tlv_hdr); 2584 tid = qdf_nbuf_get_tid_val(nbuf); 2585 bw = hal_rx_tlv_bw_get(soc->hal_soc, rx_tlv_hdr); 2586 reception_type = hal_rx_msdu_start_reception_type_get(soc->hal_soc, 2587 rx_tlv_hdr); 2588 nss = hal_rx_msdu_start_nss_get(soc->hal_soc, rx_tlv_hdr); 2589 pkt_type = hal_rx_tlv_get_pkt_type(soc->hal_soc, rx_tlv_hdr); 2590 /* do HW to SW pkt type conversion */ 2591 pkt_type = (pkt_type >= HAL_DOT11_MAX ? DOT11_MAX : 2592 hal_2_dp_pkt_type_map[pkt_type]); 2593 2594 /* 2595 * The MCS index does not start with 0 when NSS>1 in HT mode. 2596 * MCS params for optional 20/40MHz, NSS=1~3, EQM(NSS>1): 2597 * ------------------------------------------------------ 2598 * NSS | 1 | 2 | 3 | 4 2599 * ------------------------------------------------------ 2600 * MCS index: HT20 | 0 ~ 7 | 8 ~ 15 | 16 ~ 23 | 24 ~ 31 2601 * ------------------------------------------------------ 2602 * MCS index: HT40 | 0 ~ 7 | 8 ~ 15 | 16 ~ 23 | 24 ~ 31 2603 * ------------------------------------------------------ 2604 * Currently, the MAX_NSS=2. If NSS>2, MCS index = 8 * (NSS-1) 2605 */ 2606 if ((pkt_type == DOT11_N) && (nss == 2)) 2607 mcs += 8; 2608 2609 DP_PEER_EXTD_STATS_INCC(txrx_peer, rx.rx_mpdu_cnt[mcs], 1, 2610 ((mcs < MAX_MCS) && QDF_NBUF_CB_RX_CHFRAG_START(nbuf)), 2611 link_id); 2612 DP_PEER_EXTD_STATS_INCC(txrx_peer, rx.rx_mpdu_cnt[MAX_MCS - 1], 1, 2613 ((mcs >= MAX_MCS) && QDF_NBUF_CB_RX_CHFRAG_START(nbuf)), 2614 link_id); 2615 DP_PEER_EXTD_STATS_INC(txrx_peer, rx.bw[bw], 1, link_id); 2616 /* 2617 * only if nss > 0 and pkt_type is 11N/AC/AX, 2618 * then increase index [nss - 1] in array counter. 2619 */ 2620 if (nss > 0 && CDP_IS_PKT_TYPE_SUPPORT_NSS(pkt_type)) 2621 DP_PEER_EXTD_STATS_INC(txrx_peer, rx.nss[nss - 1], 1, link_id); 2622 2623 DP_PEER_EXTD_STATS_INC(txrx_peer, rx.sgi_count[sgi], 1, link_id); 2624 DP_PEER_PER_PKT_STATS_INCC(txrx_peer, rx.err.mic_err, 1, 2625 hal_rx_tlv_mic_err_get(soc->hal_soc, 2626 rx_tlv_hdr), link_id); 2627 DP_PEER_PER_PKT_STATS_INCC(txrx_peer, rx.err.decrypt_err, 1, 2628 hal_rx_tlv_decrypt_err_get(soc->hal_soc, 2629 rx_tlv_hdr), link_id); 2630 2631 DP_PEER_EXTD_STATS_INC(txrx_peer, rx.wme_ac_type[TID_TO_WME_AC(tid)], 1, 2632 link_id); 2633 DP_PEER_EXTD_STATS_INC(txrx_peer, rx.reception_type[reception_type], 1, 2634 link_id); 2635 2636 dst_mcs_idx = dp_get_mcs_array_index_by_pkt_type_mcs(pkt_type, mcs); 2637 if (MCS_INVALID_ARRAY_INDEX != dst_mcs_idx) 2638 DP_PEER_EXTD_STATS_INC(txrx_peer, 2639 rx.pkt_type[pkt_type].mcs_count[dst_mcs_idx], 2640 1, link_id); 2641 2642 dp_rx_rates_stats_update(soc, nbuf, rx_tlv_hdr, txrx_peer, 2643 sgi, mcs, nss, bw, pkt_type, link_id); 2644 } 2645 #else 2646 static inline 2647 void dp_rx_msdu_extd_stats_update(struct dp_soc *soc, qdf_nbuf_t nbuf, 2648 uint8_t *rx_tlv_hdr, 2649 struct dp_txrx_peer *txrx_peer, 2650 uint8_t link_id) 2651 { 2652 } 2653 #endif 2654 2655 #if defined(DP_PKT_STATS_PER_LMAC) && defined(WLAN_FEATURE_11BE_MLO) 2656 static inline void 2657 dp_peer_update_rx_pkt_per_lmac(struct dp_txrx_peer *txrx_peer, 2658 qdf_nbuf_t nbuf, uint8_t link_id) 2659 { 2660 uint8_t lmac_id = qdf_nbuf_get_lmac_id(nbuf); 2661 2662 if (qdf_unlikely(lmac_id >= CDP_MAX_LMACS)) { 2663 dp_err_rl("Invalid lmac_id: %u vdev_id: %u", 2664 lmac_id, QDF_NBUF_CB_RX_VDEV_ID(nbuf)); 2665 2666 if (qdf_likely(txrx_peer)) 2667 dp_err_rl("peer_id: %u", txrx_peer->peer_id); 2668 2669 return; 2670 } 2671 2672 /* only count stats per lmac for MLO connection*/ 2673 DP_PEER_PER_PKT_STATS_INCC_PKT(txrx_peer, rx.rx_lmac[lmac_id], 1, 2674 QDF_NBUF_CB_RX_PKT_LEN(nbuf), 2675 txrx_peer->is_mld_peer, link_id); 2676 } 2677 #else 2678 static inline void 2679 dp_peer_update_rx_pkt_per_lmac(struct dp_txrx_peer *txrx_peer, 2680 qdf_nbuf_t nbuf, uint8_t link_id) 2681 { 2682 } 2683 #endif 2684 2685 void dp_rx_msdu_stats_update(struct dp_soc *soc, qdf_nbuf_t nbuf, 2686 uint8_t *rx_tlv_hdr, 2687 struct dp_txrx_peer *txrx_peer, 2688 uint8_t ring_id, 2689 struct cdp_tid_rx_stats *tid_stats, 2690 uint8_t link_id) 2691 { 2692 bool is_not_amsdu; 2693 struct dp_vdev *vdev = txrx_peer->vdev; 2694 uint8_t enh_flag; 2695 qdf_ether_header_t *eh; 2696 uint16_t msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf); 2697 2698 dp_rx_msdu_stats_update_prot_cnts(vdev, nbuf, txrx_peer); 2699 is_not_amsdu = qdf_nbuf_is_rx_chfrag_start(nbuf) & 2700 qdf_nbuf_is_rx_chfrag_end(nbuf); 2701 DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, rx.rcvd_reo[ring_id], 1, 2702 msdu_len, link_id); 2703 DP_PEER_PER_PKT_STATS_INCC(txrx_peer, rx.non_amsdu_cnt, 1, 2704 is_not_amsdu, link_id); 2705 DP_PEER_PER_PKT_STATS_INCC(txrx_peer, rx.amsdu_cnt, 1, 2706 !is_not_amsdu, link_id); 2707 DP_PEER_PER_PKT_STATS_INCC(txrx_peer, rx.rx_retries, 1, 2708 qdf_nbuf_is_rx_retry_flag(nbuf), link_id); 2709 dp_peer_update_rx_pkt_per_lmac(txrx_peer, nbuf, link_id); 2710 tid_stats->msdu_cnt++; 2711 enh_flag = vdev->pdev->enhanced_stats_en; 2712 if (qdf_unlikely(qdf_nbuf_is_da_mcbc(nbuf) && 2713 (vdev->rx_decap_type == htt_cmn_pkt_type_ethernet))) { 2714 eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf); 2715 DP_PEER_MC_INCC_PKT(txrx_peer, 1, msdu_len, enh_flag, link_id); 2716 tid_stats->mcast_msdu_cnt++; 2717 if (QDF_IS_ADDR_BROADCAST(eh->ether_dhost)) { 2718 DP_PEER_BC_INCC_PKT(txrx_peer, 1, msdu_len, 2719 enh_flag, link_id); 2720 tid_stats->bcast_msdu_cnt++; 2721 } 2722 } else { 2723 DP_PEER_UC_INCC_PKT(txrx_peer, 1, msdu_len, 2724 enh_flag, link_id); 2725 } 2726 2727 txrx_peer->stats[link_id].per_pkt_stats.rx.last_rx_ts = 2728 qdf_system_ticks(); 2729 2730 dp_rx_msdu_extd_stats_update(soc, nbuf, rx_tlv_hdr, 2731 txrx_peer, link_id); 2732 } 2733 2734 #ifndef WDS_VENDOR_EXTENSION 2735 int dp_wds_rx_policy_check(uint8_t *rx_tlv_hdr, 2736 struct dp_vdev *vdev, 2737 struct dp_txrx_peer *txrx_peer) 2738 { 2739 return 1; 2740 } 2741 #endif 2742 2743 #ifdef DP_RX_PKT_NO_PEER_DELIVER 2744 #ifdef DP_RX_UDP_OVER_PEER_ROAM 2745 /** 2746 * dp_rx_is_udp_allowed_over_roam_peer() - check if udp data received 2747 * during roaming 2748 * @vdev: dp_vdev pointer 2749 * @rx_tlv_hdr: rx tlv header 2750 * @nbuf: pkt skb pointer 2751 * 2752 * This function will check if rx udp data is received from authorised 2753 * roamed peer before peer map indication is received from FW after 2754 * roaming. This is needed for VoIP scenarios in which packet loss 2755 * expected during roaming is minimal. 2756 * 2757 * Return: bool 2758 */ 2759 static bool dp_rx_is_udp_allowed_over_roam_peer(struct dp_vdev *vdev, 2760 uint8_t *rx_tlv_hdr, 2761 qdf_nbuf_t nbuf) 2762 { 2763 char *hdr_desc; 2764 struct ieee80211_frame *wh = NULL; 2765 2766 hdr_desc = hal_rx_desc_get_80211_hdr(vdev->pdev->soc->hal_soc, 2767 rx_tlv_hdr); 2768 wh = (struct ieee80211_frame *)hdr_desc; 2769 2770 if (vdev->roaming_peer_status == 2771 WLAN_ROAM_PEER_AUTH_STATUS_AUTHENTICATED && 2772 !qdf_mem_cmp(vdev->roaming_peer_mac.raw, wh->i_addr2, 2773 QDF_MAC_ADDR_SIZE) && (qdf_nbuf_is_ipv4_udp_pkt(nbuf) || 2774 qdf_nbuf_is_ipv6_udp_pkt(nbuf))) 2775 return true; 2776 2777 return false; 2778 } 2779 #else 2780 static bool dp_rx_is_udp_allowed_over_roam_peer(struct dp_vdev *vdev, 2781 uint8_t *rx_tlv_hdr, 2782 qdf_nbuf_t nbuf) 2783 { 2784 return false; 2785 } 2786 #endif 2787 void dp_rx_deliver_to_stack_no_peer(struct dp_soc *soc, qdf_nbuf_t nbuf) 2788 { 2789 uint16_t peer_id; 2790 uint8_t vdev_id; 2791 struct dp_vdev *vdev = NULL; 2792 uint32_t l2_hdr_offset = 0; 2793 uint16_t msdu_len = 0; 2794 uint32_t pkt_len = 0; 2795 uint8_t *rx_tlv_hdr; 2796 uint32_t frame_mask = FRAME_MASK_IPV4_ARP | FRAME_MASK_IPV4_DHCP | 2797 FRAME_MASK_IPV4_EAPOL | FRAME_MASK_IPV6_DHCP; 2798 bool is_special_frame = false; 2799 struct dp_peer *peer = NULL; 2800 2801 peer_id = QDF_NBUF_CB_RX_PEER_ID(nbuf); 2802 if (peer_id > soc->max_peer_id) 2803 goto deliver_fail; 2804 2805 vdev_id = QDF_NBUF_CB_RX_VDEV_ID(nbuf); 2806 vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_RX); 2807 if (!vdev || vdev->delete.pending) 2808 goto deliver_fail; 2809 2810 if (qdf_unlikely(qdf_nbuf_is_frag(nbuf))) 2811 goto deliver_fail; 2812 2813 rx_tlv_hdr = qdf_nbuf_data(nbuf); 2814 l2_hdr_offset = 2815 hal_rx_msdu_end_l3_hdr_padding_get(soc->hal_soc, rx_tlv_hdr); 2816 2817 msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf); 2818 pkt_len = msdu_len + l2_hdr_offset + soc->rx_pkt_tlv_size; 2819 QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(nbuf) = 1; 2820 2821 qdf_nbuf_set_pktlen(nbuf, pkt_len); 2822 qdf_nbuf_pull_head(nbuf, soc->rx_pkt_tlv_size + l2_hdr_offset); 2823 2824 is_special_frame = dp_rx_is_special_frame(nbuf, frame_mask); 2825 if (qdf_likely(vdev->osif_rx)) { 2826 if (is_special_frame || 2827 dp_rx_is_udp_allowed_over_roam_peer(vdev, rx_tlv_hdr, 2828 nbuf)) { 2829 qdf_nbuf_set_exc_frame(nbuf, 1); 2830 if (QDF_STATUS_SUCCESS != 2831 vdev->osif_rx(vdev->osif_vdev, nbuf)) 2832 goto deliver_fail; 2833 2834 DP_STATS_INC(soc, rx.err.pkt_delivered_no_peer, 1); 2835 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_RX); 2836 return; 2837 } 2838 } else if (is_special_frame) { 2839 /* 2840 * If MLO connection, txrx_peer for link peer does not exist, 2841 * try to store these RX packets to txrx_peer's bufq of MLD 2842 * peer until vdev->osif_rx is registered from CP and flush 2843 * them to stack. 2844 */ 2845 peer = dp_peer_get_tgt_peer_by_id(soc, peer_id, 2846 DP_MOD_ID_RX); 2847 if (!peer) 2848 goto deliver_fail; 2849 2850 /* only check for MLO connection */ 2851 if (IS_MLO_DP_MLD_PEER(peer) && peer->txrx_peer && 2852 dp_rx_is_peer_cache_bufq_supported()) { 2853 qdf_nbuf_set_exc_frame(nbuf, 1); 2854 2855 if (QDF_STATUS_SUCCESS == 2856 dp_rx_enqueue_rx(peer, peer->txrx_peer, nbuf)) { 2857 DP_STATS_INC(soc, 2858 rx.err.pkt_delivered_no_peer, 2859 1); 2860 } else { 2861 DP_STATS_INC(soc, 2862 rx.err.rx_invalid_peer.num, 2863 1); 2864 } 2865 2866 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_RX); 2867 dp_peer_unref_delete(peer, DP_MOD_ID_RX); 2868 return; 2869 } 2870 2871 dp_peer_unref_delete(peer, DP_MOD_ID_RX); 2872 } 2873 2874 deliver_fail: 2875 DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1, 2876 QDF_NBUF_CB_RX_PKT_LEN(nbuf)); 2877 dp_rx_nbuf_free(nbuf); 2878 if (vdev) 2879 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_RX); 2880 } 2881 #else 2882 void dp_rx_deliver_to_stack_no_peer(struct dp_soc *soc, qdf_nbuf_t nbuf) 2883 { 2884 DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1, 2885 QDF_NBUF_CB_RX_PKT_LEN(nbuf)); 2886 dp_rx_nbuf_free(nbuf); 2887 } 2888 #endif 2889 2890 #endif /* QCA_HOST_MODE_WIFI_DISABLED */ 2891 2892 #ifdef WLAN_SUPPORT_RX_FISA 2893 QDF_STATUS dp_fisa_config(ol_txrx_soc_handle cdp_soc, uint8_t pdev_id, 2894 enum cdp_fisa_config_id config_id, 2895 union cdp_fisa_config *cfg) 2896 { 2897 struct dp_soc *soc = (struct dp_soc *)cdp_soc; 2898 struct dp_pdev *pdev; 2899 QDF_STATUS status; 2900 2901 pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); 2902 if (!pdev) { 2903 dp_err("pdev is NULL for pdev_id %u", pdev_id); 2904 return QDF_STATUS_E_INVAL; 2905 } 2906 2907 switch (config_id) { 2908 case CDP_FISA_HTT_RX_FISA_CFG: 2909 status = dp_htt_rx_fisa_config(pdev, cfg->fisa_config); 2910 break; 2911 case CDP_FISA_HTT_RX_FSE_OP_CFG: 2912 status = dp_htt_rx_flow_fse_operation(pdev, cfg->fse_op_cmd); 2913 break; 2914 case CDP_FISA_HTT_RX_FSE_SETUP_CFG: 2915 status = dp_htt_rx_flow_fst_setup(pdev, cfg->fse_setup_info); 2916 break; 2917 default: 2918 status = QDF_STATUS_E_INVAL; 2919 } 2920 2921 return status; 2922 } 2923 2924 void dp_rx_skip_tlvs(struct dp_soc *soc, qdf_nbuf_t nbuf, uint32_t l3_padding) 2925 { 2926 QDF_NBUF_CB_RX_PACKET_L3_HDR_PAD(nbuf) = l3_padding; 2927 qdf_nbuf_pull_head(nbuf, l3_padding + soc->rx_pkt_tlv_size); 2928 } 2929 #else 2930 void dp_rx_skip_tlvs(struct dp_soc *soc, qdf_nbuf_t nbuf, uint32_t l3_padding) 2931 { 2932 qdf_nbuf_pull_head(nbuf, l3_padding + soc->rx_pkt_tlv_size); 2933 } 2934 #endif 2935 2936 #ifndef QCA_HOST_MODE_WIFI_DISABLED 2937 2938 #ifdef DP_RX_DROP_RAW_FRM 2939 bool dp_rx_is_raw_frame_dropped(qdf_nbuf_t nbuf) 2940 { 2941 if (qdf_nbuf_is_raw_frame(nbuf)) { 2942 dp_rx_nbuf_free(nbuf); 2943 return true; 2944 } 2945 2946 return false; 2947 } 2948 #endif 2949 2950 #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR 2951 void dp_rx_update_stats(struct dp_soc *soc, qdf_nbuf_t nbuf) 2952 { 2953 DP_STATS_INC_PKT(soc, rx.ingress, 1, 2954 QDF_NBUF_CB_RX_PKT_LEN(nbuf)); 2955 } 2956 #endif 2957 2958 #ifdef WLAN_FEATURE_PKT_CAPTURE_V2 2959 void dp_rx_deliver_to_pkt_capture(struct dp_soc *soc, struct dp_pdev *pdev, 2960 uint16_t peer_id, uint32_t is_offload, 2961 qdf_nbuf_t netbuf) 2962 { 2963 if (wlan_cfg_get_pkt_capture_mode(soc->wlan_cfg_ctx)) 2964 dp_wdi_event_handler(WDI_EVENT_PKT_CAPTURE_RX_DATA, soc, netbuf, 2965 peer_id, is_offload, pdev->pdev_id); 2966 } 2967 2968 void dp_rx_deliver_to_pkt_capture_no_peer(struct dp_soc *soc, qdf_nbuf_t nbuf, 2969 uint32_t is_offload) 2970 { 2971 if (wlan_cfg_get_pkt_capture_mode(soc->wlan_cfg_ctx)) 2972 dp_wdi_event_handler(WDI_EVENT_PKT_CAPTURE_RX_DATA_NO_PEER, 2973 soc, nbuf, HTT_INVALID_VDEV, 2974 is_offload, 0); 2975 } 2976 #endif 2977 2978 #endif /* QCA_HOST_MODE_WIFI_DISABLED */ 2979 2980 QDF_STATUS dp_rx_vdev_detach(struct dp_vdev *vdev) 2981 { 2982 QDF_STATUS ret; 2983 2984 if (vdev->osif_rx_flush) { 2985 ret = vdev->osif_rx_flush(vdev->osif_vdev, vdev->vdev_id); 2986 if (!QDF_IS_STATUS_SUCCESS(ret)) { 2987 dp_err("Failed to flush rx pkts for vdev %d", 2988 vdev->vdev_id); 2989 return ret; 2990 } 2991 } 2992 2993 return QDF_STATUS_SUCCESS; 2994 } 2995 2996 static QDF_STATUS 2997 dp_pdev_nbuf_alloc_and_map(struct dp_soc *dp_soc, 2998 struct dp_rx_nbuf_frag_info *nbuf_frag_info_t, 2999 struct dp_pdev *dp_pdev, 3000 struct rx_desc_pool *rx_desc_pool, 3001 bool dp_buf_page_frag_alloc_enable) 3002 { 3003 QDF_STATUS ret = QDF_STATUS_E_FAILURE; 3004 3005 if (dp_buf_page_frag_alloc_enable) { 3006 (nbuf_frag_info_t->virt_addr).nbuf = 3007 qdf_nbuf_frag_alloc(dp_soc->osdev, 3008 rx_desc_pool->buf_size, 3009 RX_BUFFER_RESERVATION, 3010 rx_desc_pool->buf_alignment, FALSE); 3011 } else { 3012 (nbuf_frag_info_t->virt_addr).nbuf = 3013 qdf_nbuf_alloc(dp_soc->osdev, rx_desc_pool->buf_size, 3014 RX_BUFFER_RESERVATION, 3015 rx_desc_pool->buf_alignment, FALSE); 3016 } 3017 if (!((nbuf_frag_info_t->virt_addr).nbuf)) { 3018 dp_err("nbuf alloc failed"); 3019 DP_STATS_INC(dp_pdev, replenish.nbuf_alloc_fail, 1); 3020 return ret; 3021 } 3022 3023 ret = qdf_nbuf_map_nbytes_single(dp_soc->osdev, 3024 (nbuf_frag_info_t->virt_addr).nbuf, 3025 QDF_DMA_FROM_DEVICE, 3026 rx_desc_pool->buf_size); 3027 3028 if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) { 3029 qdf_nbuf_free((nbuf_frag_info_t->virt_addr).nbuf); 3030 dp_err("nbuf map failed"); 3031 DP_STATS_INC(dp_pdev, replenish.map_err, 1); 3032 return ret; 3033 } 3034 3035 nbuf_frag_info_t->paddr = 3036 qdf_nbuf_get_frag_paddr((nbuf_frag_info_t->virt_addr).nbuf, 0); 3037 3038 ret = dp_check_paddr(dp_soc, &((nbuf_frag_info_t->virt_addr).nbuf), 3039 &nbuf_frag_info_t->paddr, 3040 rx_desc_pool); 3041 if (ret == QDF_STATUS_E_FAILURE) { 3042 dp_err("nbuf check x86 failed"); 3043 DP_STATS_INC(dp_pdev, replenish.x86_fail, 1); 3044 return ret; 3045 } 3046 3047 return QDF_STATUS_SUCCESS; 3048 } 3049 3050 QDF_STATUS 3051 dp_pdev_rx_buffers_attach(struct dp_soc *dp_soc, uint32_t mac_id, 3052 struct dp_srng *dp_rxdma_srng, 3053 struct rx_desc_pool *rx_desc_pool, 3054 uint32_t num_req_buffers) 3055 { 3056 struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(dp_soc, mac_id); 3057 hal_ring_handle_t rxdma_srng = dp_rxdma_srng->hal_srng; 3058 union dp_rx_desc_list_elem_t *next; 3059 void *rxdma_ring_entry; 3060 qdf_dma_addr_t paddr; 3061 struct dp_rx_nbuf_frag_info *nf_info; 3062 uint32_t nr_descs, nr_nbuf = 0, nr_nbuf_total = 0; 3063 uint32_t buffer_index, nbuf_ptrs_per_page; 3064 qdf_nbuf_t nbuf; 3065 QDF_STATUS ret; 3066 int page_idx, total_pages; 3067 union dp_rx_desc_list_elem_t *desc_list = NULL; 3068 union dp_rx_desc_list_elem_t *tail = NULL; 3069 int sync_hw_ptr = 1; 3070 uint32_t num_entries_avail; 3071 bool dp_buf_page_frag_alloc_enable; 3072 3073 if (qdf_unlikely(!dp_pdev)) { 3074 dp_rx_err("%pK: pdev is null for mac_id = %d", 3075 dp_soc, mac_id); 3076 return QDF_STATUS_E_FAILURE; 3077 } 3078 3079 dp_buf_page_frag_alloc_enable = 3080 wlan_cfg_is_dp_buf_page_frag_alloc_enable(dp_soc->wlan_cfg_ctx); 3081 3082 if (qdf_unlikely(!rxdma_srng)) { 3083 DP_STATS_INC(dp_pdev, replenish.rxdma_err, num_req_buffers); 3084 return QDF_STATUS_E_FAILURE; 3085 } 3086 3087 dp_debug("requested %u RX buffers for driver attach", num_req_buffers); 3088 3089 hal_srng_access_start(dp_soc->hal_soc, rxdma_srng); 3090 num_entries_avail = hal_srng_src_num_avail(dp_soc->hal_soc, 3091 rxdma_srng, 3092 sync_hw_ptr); 3093 hal_srng_access_end(dp_soc->hal_soc, rxdma_srng); 3094 3095 if (!num_entries_avail) { 3096 dp_err("Num of available entries is zero, nothing to do"); 3097 return QDF_STATUS_E_NOMEM; 3098 } 3099 3100 if (num_entries_avail < num_req_buffers) 3101 num_req_buffers = num_entries_avail; 3102 3103 nr_descs = dp_rx_get_free_desc_list(dp_soc, mac_id, rx_desc_pool, 3104 num_req_buffers, &desc_list, &tail); 3105 if (!nr_descs) { 3106 dp_err("no free rx_descs in freelist"); 3107 DP_STATS_INC(dp_pdev, err.desc_alloc_fail, num_req_buffers); 3108 return QDF_STATUS_E_NOMEM; 3109 } 3110 3111 dp_debug("got %u RX descs for driver attach", nr_descs); 3112 3113 /* 3114 * Try to allocate pointers to the nbuf one page at a time. 3115 * Take pointers that can fit in one page of memory and 3116 * iterate through the total descriptors that need to be 3117 * allocated in order of pages. Reuse the pointers that 3118 * have been allocated to fit in one page across each 3119 * iteration to index into the nbuf. 3120 */ 3121 total_pages = (nr_descs * sizeof(*nf_info)) / DP_BLOCKMEM_SIZE; 3122 3123 /* 3124 * Add an extra page to store the remainder if any 3125 */ 3126 if ((nr_descs * sizeof(*nf_info)) % DP_BLOCKMEM_SIZE) 3127 total_pages++; 3128 nf_info = qdf_mem_malloc(DP_BLOCKMEM_SIZE); 3129 if (!nf_info) { 3130 dp_err("failed to allocate nbuf array"); 3131 DP_STATS_INC(dp_pdev, replenish.rxdma_err, num_req_buffers); 3132 QDF_BUG(0); 3133 return QDF_STATUS_E_NOMEM; 3134 } 3135 nbuf_ptrs_per_page = DP_BLOCKMEM_SIZE / sizeof(*nf_info); 3136 3137 for (page_idx = 0; page_idx < total_pages; page_idx++) { 3138 qdf_mem_zero(nf_info, DP_BLOCKMEM_SIZE); 3139 3140 for (nr_nbuf = 0; nr_nbuf < nbuf_ptrs_per_page; nr_nbuf++) { 3141 /* 3142 * The last page of buffer pointers may not be required 3143 * completely based on the number of descriptors. Below 3144 * check will ensure we are allocating only the 3145 * required number of descriptors. 3146 */ 3147 if (nr_nbuf_total >= nr_descs) 3148 break; 3149 /* Flag is set while pdev rx_desc_pool initialization */ 3150 if (qdf_unlikely(rx_desc_pool->rx_mon_dest_frag_enable)) 3151 ret = dp_pdev_frag_alloc_and_map(dp_soc, 3152 &nf_info[nr_nbuf], dp_pdev, 3153 rx_desc_pool); 3154 else 3155 ret = dp_pdev_nbuf_alloc_and_map(dp_soc, 3156 &nf_info[nr_nbuf], dp_pdev, 3157 rx_desc_pool, 3158 dp_buf_page_frag_alloc_enable); 3159 if (QDF_IS_STATUS_ERROR(ret)) 3160 break; 3161 3162 nr_nbuf_total++; 3163 } 3164 3165 hal_srng_access_start(dp_soc->hal_soc, rxdma_srng); 3166 3167 for (buffer_index = 0; buffer_index < nr_nbuf; buffer_index++) { 3168 rxdma_ring_entry = 3169 hal_srng_src_get_next(dp_soc->hal_soc, 3170 rxdma_srng); 3171 qdf_assert_always(rxdma_ring_entry); 3172 3173 next = desc_list->next; 3174 paddr = nf_info[buffer_index].paddr; 3175 nbuf = nf_info[buffer_index].virt_addr.nbuf; 3176 3177 /* Flag is set while pdev rx_desc_pool initialization */ 3178 if (qdf_unlikely(rx_desc_pool->rx_mon_dest_frag_enable)) 3179 dp_rx_desc_frag_prep(&desc_list->rx_desc, 3180 &nf_info[buffer_index]); 3181 else 3182 dp_rx_desc_prep(&desc_list->rx_desc, 3183 &nf_info[buffer_index]); 3184 desc_list->rx_desc.in_use = 1; 3185 dp_rx_desc_alloc_dbg_info(&desc_list->rx_desc); 3186 dp_rx_desc_update_dbg_info(&desc_list->rx_desc, 3187 __func__, 3188 RX_DESC_REPLENISHED); 3189 3190 hal_rxdma_buff_addr_info_set(dp_soc->hal_soc ,rxdma_ring_entry, paddr, 3191 desc_list->rx_desc.cookie, 3192 rx_desc_pool->owner); 3193 3194 dp_ipa_handle_rx_buf_smmu_mapping( 3195 dp_soc, nbuf, 3196 rx_desc_pool->buf_size, true, 3197 __func__, __LINE__); 3198 3199 dp_audio_smmu_map(dp_soc->osdev, 3200 qdf_mem_paddr_from_dmaaddr(dp_soc->osdev, 3201 QDF_NBUF_CB_PADDR(nbuf)), 3202 QDF_NBUF_CB_PADDR(nbuf), 3203 rx_desc_pool->buf_size); 3204 3205 desc_list = next; 3206 } 3207 3208 dp_rx_refill_ring_record_entry(dp_soc, dp_pdev->lmac_id, 3209 rxdma_srng, nr_nbuf, nr_nbuf); 3210 hal_srng_access_end(dp_soc->hal_soc, rxdma_srng); 3211 } 3212 3213 dp_info("filled %u RX buffers for driver attach", nr_nbuf_total); 3214 qdf_mem_free(nf_info); 3215 3216 if (!nr_nbuf_total) { 3217 dp_err("No nbuf's allocated"); 3218 QDF_BUG(0); 3219 return QDF_STATUS_E_RESOURCES; 3220 } 3221 3222 /* No need to count the number of bytes received during replenish. 3223 * Therefore set replenish.pkts.bytes as 0. 3224 */ 3225 DP_STATS_INC_PKT(dp_pdev, replenish.pkts, nr_nbuf, 0); 3226 3227 return QDF_STATUS_SUCCESS; 3228 } 3229 3230 qdf_export_symbol(dp_pdev_rx_buffers_attach); 3231 3232 #ifdef DP_RX_MON_MEM_FRAG 3233 void dp_rx_enable_mon_dest_frag(struct rx_desc_pool *rx_desc_pool, 3234 bool is_mon_dest_desc) 3235 { 3236 rx_desc_pool->rx_mon_dest_frag_enable = is_mon_dest_desc; 3237 if (is_mon_dest_desc) 3238 dp_alert("Feature DP_RX_MON_MEM_FRAG for mon_dest is enabled"); 3239 else 3240 qdf_frag_cache_drain(&rx_desc_pool->pf_cache); 3241 } 3242 #else 3243 void dp_rx_enable_mon_dest_frag(struct rx_desc_pool *rx_desc_pool, 3244 bool is_mon_dest_desc) 3245 { 3246 rx_desc_pool->rx_mon_dest_frag_enable = false; 3247 if (is_mon_dest_desc) 3248 dp_alert("Feature DP_RX_MON_MEM_FRAG for mon_dest is disabled"); 3249 } 3250 #endif 3251 3252 qdf_export_symbol(dp_rx_enable_mon_dest_frag); 3253 3254 QDF_STATUS 3255 dp_rx_pdev_desc_pool_alloc(struct dp_pdev *pdev) 3256 { 3257 struct dp_soc *soc = pdev->soc; 3258 uint32_t rxdma_entries; 3259 uint32_t rx_sw_desc_num; 3260 struct dp_srng *dp_rxdma_srng; 3261 struct rx_desc_pool *rx_desc_pool; 3262 uint32_t status = QDF_STATUS_SUCCESS; 3263 int mac_for_pdev; 3264 3265 mac_for_pdev = pdev->lmac_id; 3266 if (wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx)) { 3267 dp_rx_info("%pK: nss-wifi<4> skip Rx refil %d", 3268 soc, mac_for_pdev); 3269 return status; 3270 } 3271 3272 dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_for_pdev]; 3273 rxdma_entries = dp_rxdma_srng->num_entries; 3274 3275 rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev]; 3276 rx_sw_desc_num = wlan_cfg_get_dp_soc_rx_sw_desc_num(soc->wlan_cfg_ctx); 3277 3278 rx_desc_pool->desc_type = QDF_DP_RX_DESC_BUF_TYPE; 3279 status = dp_rx_desc_pool_alloc(soc, 3280 rx_sw_desc_num, 3281 rx_desc_pool); 3282 if (status != QDF_STATUS_SUCCESS) 3283 return status; 3284 3285 return status; 3286 } 3287 3288 void dp_rx_pdev_desc_pool_free(struct dp_pdev *pdev) 3289 { 3290 int mac_for_pdev = pdev->lmac_id; 3291 struct dp_soc *soc = pdev->soc; 3292 struct rx_desc_pool *rx_desc_pool; 3293 3294 rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev]; 3295 3296 dp_rx_desc_pool_free(soc, rx_desc_pool); 3297 } 3298 3299 QDF_STATUS dp_rx_pdev_desc_pool_init(struct dp_pdev *pdev) 3300 { 3301 int mac_for_pdev = pdev->lmac_id; 3302 struct dp_soc *soc = pdev->soc; 3303 uint32_t rxdma_entries; 3304 uint32_t rx_sw_desc_num; 3305 struct dp_srng *dp_rxdma_srng; 3306 struct rx_desc_pool *rx_desc_pool; 3307 uint32_t target_type = hal_get_target_type(soc->hal_soc); 3308 uint16_t buf_size; 3309 3310 buf_size = wlan_cfg_rx_buffer_size(soc->wlan_cfg_ctx); 3311 rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev]; 3312 3313 if (wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx)) { 3314 /* 3315 * If NSS is enabled, rx_desc_pool is already filled. 3316 * Hence, just disable desc_pool frag flag. 3317 */ 3318 dp_rx_enable_mon_dest_frag(rx_desc_pool, false); 3319 3320 dp_rx_info("%pK: nss-wifi<4> skip Rx refil %d", 3321 soc, mac_for_pdev); 3322 return QDF_STATUS_SUCCESS; 3323 } 3324 3325 if (dp_rx_desc_pool_is_allocated(rx_desc_pool) == QDF_STATUS_E_NOMEM) 3326 return QDF_STATUS_E_NOMEM; 3327 3328 dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_for_pdev]; 3329 rxdma_entries = dp_rxdma_srng->num_entries; 3330 3331 soc->process_rx_status = CONFIG_PROCESS_RX_STATUS; 3332 3333 rx_sw_desc_num = 3334 wlan_cfg_get_dp_soc_rx_sw_desc_num(soc->wlan_cfg_ctx); 3335 3336 rx_desc_pool->owner = dp_rx_get_rx_bm_id(soc); 3337 rx_desc_pool->buf_size = buf_size; 3338 rx_desc_pool->buf_alignment = RX_DATA_BUFFER_ALIGNMENT; 3339 /* Disable monitor dest processing via frag */ 3340 if (target_type == TARGET_TYPE_QCN9160) { 3341 rx_desc_pool->buf_size = RX_MONITOR_BUFFER_SIZE; 3342 rx_desc_pool->buf_alignment = RX_MONITOR_BUFFER_ALIGNMENT; 3343 dp_rx_enable_mon_dest_frag(rx_desc_pool, true); 3344 } else { 3345 dp_rx_enable_mon_dest_frag(rx_desc_pool, false); 3346 } 3347 3348 dp_rx_desc_pool_init(soc, mac_for_pdev, 3349 rx_sw_desc_num, rx_desc_pool); 3350 return QDF_STATUS_SUCCESS; 3351 } 3352 3353 void dp_rx_pdev_desc_pool_deinit(struct dp_pdev *pdev) 3354 { 3355 int mac_for_pdev = pdev->lmac_id; 3356 struct dp_soc *soc = pdev->soc; 3357 struct rx_desc_pool *rx_desc_pool; 3358 3359 rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev]; 3360 3361 dp_rx_desc_pool_deinit(soc, rx_desc_pool, mac_for_pdev); 3362 } 3363 3364 QDF_STATUS 3365 dp_rx_pdev_buffers_alloc(struct dp_pdev *pdev) 3366 { 3367 int mac_for_pdev = pdev->lmac_id; 3368 struct dp_soc *soc = pdev->soc; 3369 struct dp_srng *dp_rxdma_srng; 3370 struct rx_desc_pool *rx_desc_pool; 3371 uint32_t rxdma_entries; 3372 uint32_t target_type = hal_get_target_type(soc->hal_soc); 3373 3374 dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_for_pdev]; 3375 rxdma_entries = dp_rxdma_srng->num_entries; 3376 3377 rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev]; 3378 3379 /* Initialize RX buffer pool which will be 3380 * used during low memory conditions 3381 */ 3382 dp_rx_buffer_pool_init(soc, mac_for_pdev); 3383 3384 if (target_type == TARGET_TYPE_QCN9160) 3385 return dp_pdev_rx_buffers_attach(soc, mac_for_pdev, 3386 dp_rxdma_srng, 3387 rx_desc_pool, 3388 rxdma_entries - 1); 3389 else 3390 return dp_pdev_rx_buffers_attach_simple(soc, mac_for_pdev, 3391 dp_rxdma_srng, 3392 rx_desc_pool, 3393 rxdma_entries - 1); 3394 } 3395 3396 void 3397 dp_rx_pdev_buffers_free(struct dp_pdev *pdev) 3398 { 3399 int mac_for_pdev = pdev->lmac_id; 3400 struct dp_soc *soc = pdev->soc; 3401 struct rx_desc_pool *rx_desc_pool; 3402 uint32_t target_type = hal_get_target_type(soc->hal_soc); 3403 3404 rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev]; 3405 3406 if (target_type == TARGET_TYPE_QCN9160) 3407 dp_rx_desc_frag_free(soc, rx_desc_pool); 3408 else 3409 dp_rx_desc_nbuf_free(soc, rx_desc_pool, false); 3410 3411 dp_rx_buffer_pool_deinit(soc, mac_for_pdev); 3412 } 3413 3414 #ifdef DP_RX_SPECIAL_FRAME_NEED 3415 bool dp_rx_deliver_special_frame(struct dp_soc *soc, 3416 struct dp_txrx_peer *txrx_peer, 3417 qdf_nbuf_t nbuf, uint32_t frame_mask, 3418 uint8_t *rx_tlv_hdr) 3419 { 3420 uint32_t l2_hdr_offset = 0; 3421 uint16_t msdu_len = 0; 3422 uint32_t skip_len; 3423 3424 l2_hdr_offset = 3425 hal_rx_msdu_end_l3_hdr_padding_get(soc->hal_soc, rx_tlv_hdr); 3426 3427 if (qdf_unlikely(qdf_nbuf_is_frag(nbuf))) { 3428 skip_len = l2_hdr_offset; 3429 } else { 3430 msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf); 3431 skip_len = l2_hdr_offset + soc->rx_pkt_tlv_size; 3432 qdf_nbuf_set_pktlen(nbuf, msdu_len + skip_len); 3433 } 3434 3435 QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(nbuf) = 1; 3436 dp_rx_set_hdr_pad(nbuf, l2_hdr_offset); 3437 qdf_nbuf_pull_head(nbuf, skip_len); 3438 3439 if (txrx_peer->vdev) { 3440 dp_rx_send_pktlog(soc, txrx_peer->vdev->pdev, nbuf, 3441 QDF_TX_RX_STATUS_OK); 3442 } 3443 3444 if (dp_rx_is_special_frame(nbuf, frame_mask)) { 3445 dp_info("special frame, mpdu sn 0x%x", 3446 hal_rx_get_rx_sequence(soc->hal_soc, rx_tlv_hdr)); 3447 qdf_nbuf_set_exc_frame(nbuf, 1); 3448 dp_rx_deliver_to_stack(soc, txrx_peer->vdev, txrx_peer, 3449 nbuf, NULL); 3450 return true; 3451 } 3452 3453 return false; 3454 } 3455 #endif 3456 3457 #ifdef QCA_MULTIPASS_SUPPORT 3458 bool dp_rx_multipass_process(struct dp_txrx_peer *txrx_peer, qdf_nbuf_t nbuf, 3459 uint8_t tid) 3460 { 3461 struct vlan_ethhdr *vethhdrp; 3462 3463 if (qdf_unlikely(!txrx_peer->vlan_id)) 3464 return true; 3465 3466 vethhdrp = (struct vlan_ethhdr *)qdf_nbuf_data(nbuf); 3467 /* 3468 * h_vlan_proto & h_vlan_TCI should be 0x8100 & zero respectively 3469 * as it is expected to be padded by 0 3470 * return false if frame doesn't have above tag so that caller will 3471 * drop the frame. 3472 */ 3473 if (qdf_unlikely(vethhdrp->h_vlan_proto != htons(QDF_ETH_TYPE_8021Q)) || 3474 qdf_unlikely(vethhdrp->h_vlan_TCI != 0)) 3475 return false; 3476 3477 vethhdrp->h_vlan_TCI = htons(((tid & 0x7) << VLAN_PRIO_SHIFT) | 3478 (txrx_peer->vlan_id & VLAN_VID_MASK)); 3479 3480 if (vethhdrp->h_vlan_encapsulated_proto == htons(ETHERTYPE_PAE)) 3481 dp_tx_remove_vlan_tag(txrx_peer->vdev, nbuf); 3482 3483 return true; 3484 } 3485 #endif /* QCA_MULTIPASS_SUPPORT */ 3486