1 /* 2 * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved. 3 * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved. 4 * 5 * Permission to use, copy, modify, and/or distribute this software for 6 * any purpose with or without fee is hereby granted, provided that the 7 * above copyright notice and this permission notice appear in all 8 * copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 11 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 12 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 13 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 14 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 15 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 16 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 17 * PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20 #include "hal_hw_headers.h" 21 #include "dp_types.h" 22 #include "dp_rx.h" 23 #include "dp_tx.h" 24 #include "dp_peer.h" 25 #include "hal_rx.h" 26 #include "hal_api.h" 27 #include "qdf_nbuf.h" 28 #ifdef MESH_MODE_SUPPORT 29 #include "if_meta_hdr.h" 30 #endif 31 #include "dp_internal.h" 32 #include "dp_ipa.h" 33 #include "dp_hist.h" 34 #include "dp_rx_buffer_pool.h" 35 #ifdef WIFI_MONITOR_SUPPORT 36 #include "dp_htt.h" 37 #include <dp_mon.h> 38 #endif 39 #ifdef FEATURE_WDS 40 #include "dp_txrx_wds.h" 41 #endif 42 #ifdef DP_RATETABLE_SUPPORT 43 #include "dp_ratetable.h" 44 #endif 45 #include "enet.h" 46 47 #ifndef WLAN_SOFTUMAC_SUPPORT /* WLAN_SOFTUMAC_SUPPORT */ 48 49 #ifdef DUP_RX_DESC_WAR 50 void dp_rx_dump_info_and_assert(struct dp_soc *soc, 51 hal_ring_handle_t hal_ring, 52 hal_ring_desc_t ring_desc, 53 struct dp_rx_desc *rx_desc) 54 { 55 void *hal_soc = soc->hal_soc; 56 57 hal_srng_dump_ring_desc(hal_soc, hal_ring, ring_desc); 58 dp_rx_desc_dump(rx_desc); 59 } 60 #else 61 void dp_rx_dump_info_and_assert(struct dp_soc *soc, 62 hal_ring_handle_t hal_ring_hdl, 63 hal_ring_desc_t ring_desc, 64 struct dp_rx_desc *rx_desc) 65 { 66 hal_soc_handle_t hal_soc = soc->hal_soc; 67 68 dp_rx_desc_dump(rx_desc); 69 hal_srng_dump_ring_desc(hal_soc, hal_ring_hdl, ring_desc); 70 hal_srng_dump_ring(hal_soc, hal_ring_hdl); 71 qdf_assert_always(0); 72 } 73 #endif 74 75 #ifndef QCA_HOST_MODE_WIFI_DISABLED 76 #ifdef RX_DESC_SANITY_WAR 77 QDF_STATUS dp_rx_desc_sanity(struct dp_soc *soc, hal_soc_handle_t hal_soc, 78 hal_ring_handle_t hal_ring_hdl, 79 hal_ring_desc_t ring_desc, 80 struct dp_rx_desc *rx_desc) 81 { 82 uint8_t return_buffer_manager; 83 84 if (qdf_unlikely(!rx_desc)) { 85 /* 86 * This is an unlikely case where the cookie obtained 87 * from the ring_desc is invalid and hence we are not 88 * able to find the corresponding rx_desc 89 */ 90 goto fail; 91 } 92 93 return_buffer_manager = hal_rx_ret_buf_manager_get(hal_soc, ring_desc); 94 if (qdf_unlikely(!(return_buffer_manager == 95 HAL_RX_BUF_RBM_SW1_BM(soc->wbm_sw0_bm_id) || 96 return_buffer_manager == 97 HAL_RX_BUF_RBM_SW3_BM(soc->wbm_sw0_bm_id)))) { 98 goto fail; 99 } 100 101 return QDF_STATUS_SUCCESS; 102 103 fail: 104 DP_STATS_INC(soc, rx.err.invalid_cookie, 1); 105 dp_err("Ring Desc:"); 106 hal_srng_dump_ring_desc(hal_soc, hal_ring_hdl, 107 ring_desc); 108 return QDF_STATUS_E_NULL_VALUE; 109 110 } 111 #endif 112 113 uint32_t dp_rx_srng_get_num_pending(hal_soc_handle_t hal_soc, 114 hal_ring_handle_t hal_ring_hdl, 115 uint32_t num_entries, 116 bool *near_full) 117 { 118 uint32_t num_pending = 0; 119 120 num_pending = hal_srng_dst_num_valid_locked(hal_soc, 121 hal_ring_hdl, 122 true); 123 124 if (num_entries && (num_pending >= num_entries >> 1)) 125 *near_full = true; 126 else 127 *near_full = false; 128 129 return num_pending; 130 } 131 132 #ifdef RX_DESC_DEBUG_CHECK 133 QDF_STATUS dp_rx_desc_nbuf_sanity_check(struct dp_soc *soc, 134 hal_ring_desc_t ring_desc, 135 struct dp_rx_desc *rx_desc) 136 { 137 struct hal_buf_info hbi; 138 139 hal_rx_reo_buf_paddr_get(soc->hal_soc, ring_desc, &hbi); 140 /* Sanity check for possible buffer paddr corruption */ 141 if (dp_rx_desc_paddr_sanity_check(rx_desc, (&hbi)->paddr)) 142 return QDF_STATUS_SUCCESS; 143 144 return QDF_STATUS_E_FAILURE; 145 } 146 147 /** 148 * dp_rx_desc_nbuf_len_sanity_check - Add sanity check to catch Rx buffer 149 * out of bound access from H.W 150 * 151 * @soc: DP soc 152 * @pkt_len: Packet length received from H.W 153 * 154 * Return: NONE 155 */ 156 static inline void 157 dp_rx_desc_nbuf_len_sanity_check(struct dp_soc *soc, 158 uint32_t pkt_len) 159 { 160 struct rx_desc_pool *rx_desc_pool; 161 162 rx_desc_pool = &soc->rx_desc_buf[0]; 163 qdf_assert_always(pkt_len <= rx_desc_pool->buf_size); 164 } 165 #else 166 static inline void 167 dp_rx_desc_nbuf_len_sanity_check(struct dp_soc *soc, uint32_t pkt_len) { } 168 #endif 169 170 #ifdef WLAN_FEATURE_DP_RX_RING_HISTORY 171 void 172 dp_rx_ring_record_entry(struct dp_soc *soc, uint8_t ring_num, 173 hal_ring_desc_t ring_desc) 174 { 175 struct dp_buf_info_record *record; 176 struct hal_buf_info hbi; 177 uint32_t idx; 178 179 if (qdf_unlikely(!soc->rx_ring_history[ring_num])) 180 return; 181 182 hal_rx_reo_buf_paddr_get(soc->hal_soc, ring_desc, &hbi); 183 184 /* buffer_addr_info is the first element of ring_desc */ 185 hal_rx_buf_cookie_rbm_get(soc->hal_soc, (uint32_t *)ring_desc, 186 &hbi); 187 188 idx = dp_history_get_next_index(&soc->rx_ring_history[ring_num]->index, 189 DP_RX_HIST_MAX); 190 191 /* No NULL check needed for record since its an array */ 192 record = &soc->rx_ring_history[ring_num]->entry[idx]; 193 194 record->timestamp = qdf_get_log_timestamp(); 195 record->hbi.paddr = hbi.paddr; 196 record->hbi.sw_cookie = hbi.sw_cookie; 197 record->hbi.rbm = hbi.rbm; 198 } 199 #endif 200 201 #ifdef WLAN_FEATURE_MARK_FIRST_WAKEUP_PACKET 202 void dp_rx_mark_first_packet_after_wow_wakeup(struct dp_pdev *pdev, 203 uint8_t *rx_tlv, 204 qdf_nbuf_t nbuf) 205 { 206 struct dp_soc *soc; 207 208 if (!pdev->is_first_wakeup_packet) 209 return; 210 211 soc = pdev->soc; 212 if (hal_get_first_wow_wakeup_packet(soc->hal_soc, rx_tlv)) { 213 qdf_nbuf_mark_wakeup_frame(nbuf); 214 dp_info("First packet after WOW Wakeup rcvd"); 215 } 216 } 217 #endif 218 219 #endif /* QCA_HOST_MODE_WIFI_DISABLED */ 220 #endif /* WLAN_SOFTUMAC_SUPPORT */ 221 222 /** 223 * dp_pdev_frag_alloc_and_map() - Allocate frag for desc buffer and map 224 * 225 * @dp_soc: struct dp_soc * 226 * @nbuf_frag_info_t: nbuf frag info 227 * @dp_pdev: struct dp_pdev * 228 * @rx_desc_pool: Rx desc pool 229 * 230 * Return: QDF_STATUS 231 */ 232 #ifdef DP_RX_MON_MEM_FRAG 233 static inline QDF_STATUS 234 dp_pdev_frag_alloc_and_map(struct dp_soc *dp_soc, 235 struct dp_rx_nbuf_frag_info *nbuf_frag_info_t, 236 struct dp_pdev *dp_pdev, 237 struct rx_desc_pool *rx_desc_pool) 238 { 239 QDF_STATUS ret = QDF_STATUS_E_FAILURE; 240 241 (nbuf_frag_info_t->virt_addr).vaddr = 242 qdf_frag_alloc(NULL, rx_desc_pool->buf_size); 243 244 if (!((nbuf_frag_info_t->virt_addr).vaddr)) { 245 dp_err("Frag alloc failed"); 246 DP_STATS_INC(dp_pdev, replenish.frag_alloc_fail, 1); 247 return QDF_STATUS_E_NOMEM; 248 } 249 250 ret = qdf_mem_map_page(dp_soc->osdev, 251 (nbuf_frag_info_t->virt_addr).vaddr, 252 QDF_DMA_FROM_DEVICE, 253 rx_desc_pool->buf_size, 254 &nbuf_frag_info_t->paddr); 255 256 if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) { 257 qdf_frag_free((nbuf_frag_info_t->virt_addr).vaddr); 258 dp_err("Frag map failed"); 259 DP_STATS_INC(dp_pdev, replenish.map_err, 1); 260 return QDF_STATUS_E_FAULT; 261 } 262 263 return QDF_STATUS_SUCCESS; 264 } 265 #else 266 static inline QDF_STATUS 267 dp_pdev_frag_alloc_and_map(struct dp_soc *dp_soc, 268 struct dp_rx_nbuf_frag_info *nbuf_frag_info_t, 269 struct dp_pdev *dp_pdev, 270 struct rx_desc_pool *rx_desc_pool) 271 { 272 return QDF_STATUS_SUCCESS; 273 } 274 #endif /* DP_RX_MON_MEM_FRAG */ 275 276 #ifdef WLAN_FEATURE_DP_RX_RING_HISTORY 277 /** 278 * dp_rx_refill_ring_record_entry() - Record an entry into refill_ring history 279 * @soc: Datapath soc structure 280 * @ring_num: Refill ring number 281 * @hal_ring_hdl: 282 * @num_req: number of buffers requested for refill 283 * @num_refill: number of buffers refilled 284 * 285 * Return: None 286 */ 287 static inline void 288 dp_rx_refill_ring_record_entry(struct dp_soc *soc, uint8_t ring_num, 289 hal_ring_handle_t hal_ring_hdl, 290 uint32_t num_req, uint32_t num_refill) 291 { 292 struct dp_refill_info_record *record; 293 uint32_t idx; 294 uint32_t tp; 295 uint32_t hp; 296 297 if (qdf_unlikely(ring_num >= MAX_PDEV_CNT || 298 !soc->rx_refill_ring_history[ring_num])) 299 return; 300 301 idx = dp_history_get_next_index(&soc->rx_refill_ring_history[ring_num]->index, 302 DP_RX_REFILL_HIST_MAX); 303 304 /* No NULL check needed for record since its an array */ 305 record = &soc->rx_refill_ring_history[ring_num]->entry[idx]; 306 307 hal_get_sw_hptp(soc->hal_soc, hal_ring_hdl, &tp, &hp); 308 record->timestamp = qdf_get_log_timestamp(); 309 record->num_req = num_req; 310 record->num_refill = num_refill; 311 record->hp = hp; 312 record->tp = tp; 313 } 314 #else 315 static inline void 316 dp_rx_refill_ring_record_entry(struct dp_soc *soc, uint8_t ring_num, 317 hal_ring_handle_t hal_ring_hdl, 318 uint32_t num_req, uint32_t num_refill) 319 { 320 } 321 #endif 322 323 /** 324 * dp_pdev_nbuf_alloc_and_map_replenish() - Allocate nbuf for desc buffer and 325 * map 326 * @dp_soc: struct dp_soc * 327 * @mac_id: Mac id 328 * @num_entries_avail: num_entries_avail 329 * @nbuf_frag_info_t: nbuf frag info 330 * @dp_pdev: struct dp_pdev * 331 * @rx_desc_pool: Rx desc pool 332 * 333 * Return: QDF_STATUS 334 */ 335 static inline QDF_STATUS 336 dp_pdev_nbuf_alloc_and_map_replenish(struct dp_soc *dp_soc, 337 uint32_t mac_id, 338 uint32_t num_entries_avail, 339 struct dp_rx_nbuf_frag_info *nbuf_frag_info_t, 340 struct dp_pdev *dp_pdev, 341 struct rx_desc_pool *rx_desc_pool) 342 { 343 QDF_STATUS ret = QDF_STATUS_E_FAILURE; 344 345 (nbuf_frag_info_t->virt_addr).nbuf = 346 dp_rx_buffer_pool_nbuf_alloc(dp_soc, 347 mac_id, 348 rx_desc_pool, 349 num_entries_avail); 350 if (!((nbuf_frag_info_t->virt_addr).nbuf)) { 351 dp_err("nbuf alloc failed"); 352 DP_STATS_INC(dp_pdev, replenish.nbuf_alloc_fail, 1); 353 return QDF_STATUS_E_NOMEM; 354 } 355 356 ret = dp_rx_buffer_pool_nbuf_map(dp_soc, rx_desc_pool, 357 nbuf_frag_info_t); 358 if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) { 359 dp_rx_buffer_pool_nbuf_free(dp_soc, 360 (nbuf_frag_info_t->virt_addr).nbuf, mac_id); 361 dp_err("nbuf map failed"); 362 DP_STATS_INC(dp_pdev, replenish.map_err, 1); 363 return QDF_STATUS_E_FAULT; 364 } 365 366 nbuf_frag_info_t->paddr = 367 qdf_nbuf_get_frag_paddr((nbuf_frag_info_t->virt_addr).nbuf, 0); 368 if (qdf_atomic_read(&dp_soc->ipa_mapped)) 369 dp_ipa_handle_rx_buf_smmu_mapping(dp_soc, (qdf_nbuf_t)( 370 (nbuf_frag_info_t->virt_addr).nbuf), 371 rx_desc_pool->buf_size, 372 true, __func__, __LINE__); 373 374 ret = dp_check_paddr(dp_soc, &((nbuf_frag_info_t->virt_addr).nbuf), 375 &nbuf_frag_info_t->paddr, 376 rx_desc_pool); 377 if (ret == QDF_STATUS_E_FAILURE) { 378 DP_STATS_INC(dp_pdev, replenish.x86_fail, 1); 379 return QDF_STATUS_E_ADDRNOTAVAIL; 380 } 381 382 return QDF_STATUS_SUCCESS; 383 } 384 385 #if defined(QCA_DP_RX_NBUF_NO_MAP_UNMAP) && !defined(BUILD_X86) 386 QDF_STATUS 387 __dp_rx_buffers_no_map_lt_replenish(struct dp_soc *soc, uint32_t mac_id, 388 struct dp_srng *dp_rxdma_srng, 389 struct rx_desc_pool *rx_desc_pool) 390 { 391 struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id); 392 uint32_t count; 393 void *rxdma_ring_entry; 394 union dp_rx_desc_list_elem_t *next = NULL; 395 void *rxdma_srng; 396 qdf_nbuf_t nbuf; 397 qdf_dma_addr_t paddr; 398 uint16_t num_entries_avail = 0; 399 uint16_t num_alloc_desc = 0; 400 union dp_rx_desc_list_elem_t *desc_list = NULL; 401 union dp_rx_desc_list_elem_t *tail = NULL; 402 int sync_hw_ptr = 0; 403 404 rxdma_srng = dp_rxdma_srng->hal_srng; 405 406 if (qdf_unlikely(!dp_pdev)) { 407 dp_rx_err("%pK: pdev is null for mac_id = %d", soc, mac_id); 408 return QDF_STATUS_E_FAILURE; 409 } 410 411 if (qdf_unlikely(!rxdma_srng)) { 412 dp_rx_debug("%pK: rxdma srng not initialized", soc); 413 return QDF_STATUS_E_FAILURE; 414 } 415 416 hal_srng_access_start(soc->hal_soc, rxdma_srng); 417 418 num_entries_avail = hal_srng_src_num_avail(soc->hal_soc, 419 rxdma_srng, 420 sync_hw_ptr); 421 422 dp_rx_debug("%pK: no of available entries in rxdma ring: %d", 423 soc, num_entries_avail); 424 425 if (qdf_unlikely(num_entries_avail < 426 ((dp_rxdma_srng->num_entries * 3) / 4))) { 427 hal_srng_access_end(soc->hal_soc, rxdma_srng); 428 return QDF_STATUS_E_FAILURE; 429 } 430 431 DP_STATS_INC(dp_pdev, replenish.low_thresh_intrs, 1); 432 num_alloc_desc = dp_rx_get_free_desc_list(soc, mac_id, 433 rx_desc_pool, 434 num_entries_avail, 435 &desc_list, 436 &tail); 437 438 if (!num_alloc_desc) { 439 dp_rx_err("%pK: no free rx_descs in freelist", soc); 440 DP_STATS_INC(dp_pdev, err.desc_lt_alloc_fail, 441 num_entries_avail); 442 hal_srng_access_end(soc->hal_soc, rxdma_srng); 443 return QDF_STATUS_E_NOMEM; 444 } 445 446 for (count = 0; count < num_alloc_desc; count++) { 447 next = desc_list->next; 448 qdf_prefetch(next); 449 nbuf = dp_rx_nbuf_alloc(soc, rx_desc_pool); 450 if (qdf_unlikely(!nbuf)) { 451 DP_STATS_INC(dp_pdev, replenish.nbuf_alloc_fail, 1); 452 break; 453 } 454 455 paddr = dp_rx_nbuf_sync_no_dsb(soc, nbuf, 456 rx_desc_pool->buf_size); 457 458 rxdma_ring_entry = hal_srng_src_get_next(soc->hal_soc, 459 rxdma_srng); 460 qdf_assert_always(rxdma_ring_entry); 461 462 desc_list->rx_desc.nbuf = nbuf; 463 dp_rx_set_reuse_nbuf(&desc_list->rx_desc, nbuf); 464 desc_list->rx_desc.rx_buf_start = nbuf->data; 465 desc_list->rx_desc.paddr_buf_start = paddr; 466 desc_list->rx_desc.unmapped = 0; 467 468 /* rx_desc.in_use should be zero at this time*/ 469 qdf_assert_always(desc_list->rx_desc.in_use == 0); 470 471 desc_list->rx_desc.in_use = 1; 472 desc_list->rx_desc.in_err_state = 0; 473 474 hal_rxdma_buff_addr_info_set(soc->hal_soc, rxdma_ring_entry, 475 paddr, 476 desc_list->rx_desc.cookie, 477 rx_desc_pool->owner); 478 479 desc_list = next; 480 } 481 qdf_dsb(); 482 hal_srng_access_end(soc->hal_soc, rxdma_srng); 483 484 /* No need to count the number of bytes received during replenish. 485 * Therefore set replenish.pkts.bytes as 0. 486 */ 487 DP_STATS_INC_PKT(dp_pdev, replenish.pkts, count, 0); 488 DP_STATS_INC(dp_pdev, buf_freelist, (num_alloc_desc - count)); 489 /* 490 * add any available free desc back to the free list 491 */ 492 if (desc_list) 493 dp_rx_add_desc_list_to_free_list(soc, &desc_list, &tail, 494 mac_id, rx_desc_pool); 495 496 return QDF_STATUS_SUCCESS; 497 } 498 499 QDF_STATUS 500 __dp_rx_buffers_no_map_replenish(struct dp_soc *soc, uint32_t mac_id, 501 struct dp_srng *dp_rxdma_srng, 502 struct rx_desc_pool *rx_desc_pool, 503 uint32_t num_req_buffers, 504 union dp_rx_desc_list_elem_t **desc_list, 505 union dp_rx_desc_list_elem_t **tail) 506 { 507 struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id); 508 uint32_t count; 509 void *rxdma_ring_entry; 510 union dp_rx_desc_list_elem_t *next; 511 void *rxdma_srng; 512 qdf_nbuf_t nbuf; 513 qdf_nbuf_t nbuf_next; 514 qdf_nbuf_t nbuf_head = NULL; 515 qdf_nbuf_t nbuf_tail = NULL; 516 qdf_dma_addr_t paddr; 517 518 rxdma_srng = dp_rxdma_srng->hal_srng; 519 520 if (qdf_unlikely(!dp_pdev)) { 521 dp_rx_err("%pK: pdev is null for mac_id = %d", 522 soc, mac_id); 523 return QDF_STATUS_E_FAILURE; 524 } 525 526 if (qdf_unlikely(!rxdma_srng)) { 527 dp_rx_debug("%pK: rxdma srng not initialized", soc); 528 DP_STATS_INC(dp_pdev, replenish.rxdma_err, num_req_buffers); 529 return QDF_STATUS_E_FAILURE; 530 } 531 532 /* Allocate required number of nbufs */ 533 for (count = 0; count < num_req_buffers; count++) { 534 nbuf = dp_rx_nbuf_alloc(soc, rx_desc_pool); 535 if (qdf_unlikely(!nbuf)) { 536 DP_STATS_INC(dp_pdev, replenish.nbuf_alloc_fail, 1); 537 /* Update num_req_buffers to nbufs allocated count */ 538 num_req_buffers = count; 539 break; 540 } 541 542 paddr = dp_rx_nbuf_sync_no_dsb(soc, nbuf, 543 rx_desc_pool->buf_size); 544 545 QDF_NBUF_CB_PADDR(nbuf) = paddr; 546 DP_RX_LIST_APPEND(nbuf_head, 547 nbuf_tail, 548 nbuf); 549 } 550 qdf_dsb(); 551 552 nbuf = nbuf_head; 553 hal_srng_access_start(soc->hal_soc, rxdma_srng); 554 555 for (count = 0; count < num_req_buffers; count++) { 556 next = (*desc_list)->next; 557 nbuf_next = nbuf->next; 558 qdf_prefetch(next); 559 560 rxdma_ring_entry = (struct dp_buffer_addr_info *) 561 hal_srng_src_get_next(soc->hal_soc, rxdma_srng); 562 563 if (!rxdma_ring_entry) 564 break; 565 566 (*desc_list)->rx_desc.nbuf = nbuf; 567 dp_rx_set_reuse_nbuf(&(*desc_list)->rx_desc, nbuf); 568 (*desc_list)->rx_desc.rx_buf_start = nbuf->data; 569 (*desc_list)->rx_desc.paddr_buf_start = QDF_NBUF_CB_PADDR(nbuf); 570 (*desc_list)->rx_desc.unmapped = 0; 571 572 /* rx_desc.in_use should be zero at this time*/ 573 qdf_assert_always((*desc_list)->rx_desc.in_use == 0); 574 575 (*desc_list)->rx_desc.in_use = 1; 576 (*desc_list)->rx_desc.in_err_state = 0; 577 578 hal_rxdma_buff_addr_info_set(soc->hal_soc, rxdma_ring_entry, 579 QDF_NBUF_CB_PADDR(nbuf), 580 (*desc_list)->rx_desc.cookie, 581 rx_desc_pool->owner); 582 583 *desc_list = next; 584 nbuf = nbuf_next; 585 } 586 hal_srng_access_end(soc->hal_soc, rxdma_srng); 587 588 /* No need to count the number of bytes received during replenish. 589 * Therefore set replenish.pkts.bytes as 0. 590 */ 591 DP_STATS_INC_PKT(dp_pdev, replenish.pkts, count, 0); 592 DP_STATS_INC(dp_pdev, buf_freelist, (num_req_buffers - count)); 593 /* 594 * add any available free desc back to the free list 595 */ 596 if (*desc_list) 597 dp_rx_add_desc_list_to_free_list(soc, desc_list, tail, 598 mac_id, rx_desc_pool); 599 while (nbuf) { 600 nbuf_next = nbuf->next; 601 dp_rx_nbuf_unmap_pool(soc, rx_desc_pool, nbuf); 602 qdf_nbuf_free(nbuf); 603 nbuf = nbuf_next; 604 } 605 606 return QDF_STATUS_SUCCESS; 607 } 608 609 #ifdef WLAN_SUPPORT_PPEDS 610 QDF_STATUS 611 __dp_rx_comp2refill_replenish(struct dp_soc *soc, uint32_t mac_id, 612 struct dp_srng *dp_rxdma_srng, 613 struct rx_desc_pool *rx_desc_pool, 614 uint32_t num_req_buffers, 615 union dp_rx_desc_list_elem_t **desc_list, 616 union dp_rx_desc_list_elem_t **tail) 617 { 618 struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id); 619 uint32_t count; 620 void *rxdma_ring_entry; 621 union dp_rx_desc_list_elem_t *next; 622 union dp_rx_desc_list_elem_t *cur; 623 void *rxdma_srng; 624 qdf_nbuf_t nbuf; 625 626 rxdma_srng = dp_rxdma_srng->hal_srng; 627 628 if (qdf_unlikely(!dp_pdev)) { 629 dp_rx_err("%pK: pdev is null for mac_id = %d", 630 soc, mac_id); 631 return QDF_STATUS_E_FAILURE; 632 } 633 634 if (qdf_unlikely(!rxdma_srng)) { 635 dp_rx_debug("%pK: rxdma srng not initialized", soc); 636 DP_STATS_INC(dp_pdev, replenish.rxdma_err, num_req_buffers); 637 return QDF_STATUS_E_FAILURE; 638 } 639 640 hal_srng_access_start(soc->hal_soc, rxdma_srng); 641 642 for (count = 0; count < num_req_buffers; count++) { 643 next = (*desc_list)->next; 644 qdf_prefetch(next); 645 646 rxdma_ring_entry = (struct dp_buffer_addr_info *) 647 hal_srng_src_get_next(soc->hal_soc, rxdma_srng); 648 649 if (!rxdma_ring_entry) 650 break; 651 652 (*desc_list)->rx_desc.in_use = 1; 653 (*desc_list)->rx_desc.in_err_state = 0; 654 (*desc_list)->rx_desc.nbuf = (*desc_list)->rx_desc.reuse_nbuf; 655 656 hal_rxdma_buff_addr_info_set(soc->hal_soc, rxdma_ring_entry, 657 (*desc_list)->rx_desc.paddr_buf_start, 658 (*desc_list)->rx_desc.cookie, 659 rx_desc_pool->owner); 660 661 *desc_list = next; 662 } 663 hal_srng_access_end(soc->hal_soc, rxdma_srng); 664 665 /* No need to count the number of bytes received during replenish. 666 * Therefore set replenish.pkts.bytes as 0. 667 */ 668 DP_STATS_INC_PKT(dp_pdev, replenish.pkts, count, 0); 669 DP_STATS_INC(dp_pdev, buf_freelist, (num_req_buffers - count)); 670 671 /* 672 * add any available free desc back to the free list 673 */ 674 cur = *desc_list; 675 for ( ; count < num_req_buffers; count++) { 676 next = cur->next; 677 qdf_prefetch(next); 678 679 nbuf = cur->rx_desc.reuse_nbuf; 680 681 cur->rx_desc.nbuf = NULL; 682 cur->rx_desc.in_use = 0; 683 cur->rx_desc.has_reuse_nbuf = false; 684 cur->rx_desc.reuse_nbuf = NULL; 685 if (!nbuf->recycled_for_ds) 686 dp_rx_nbuf_unmap_pool(soc, rx_desc_pool, nbuf); 687 688 nbuf->recycled_for_ds = 0; 689 nbuf->fast_recycled = 0; 690 qdf_nbuf_free(nbuf); 691 cur = next; 692 } 693 694 if (*desc_list) 695 dp_rx_add_desc_list_to_free_list(soc, desc_list, tail, 696 mac_id, rx_desc_pool); 697 698 return QDF_STATUS_SUCCESS; 699 } 700 #endif 701 702 QDF_STATUS __dp_pdev_rx_buffers_no_map_attach(struct dp_soc *soc, 703 uint32_t mac_id, 704 struct dp_srng *dp_rxdma_srng, 705 struct rx_desc_pool *rx_desc_pool, 706 uint32_t num_req_buffers) 707 { 708 struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id); 709 uint32_t count; 710 uint32_t nr_descs = 0; 711 void *rxdma_ring_entry; 712 union dp_rx_desc_list_elem_t *next; 713 void *rxdma_srng; 714 qdf_nbuf_t nbuf; 715 qdf_dma_addr_t paddr; 716 union dp_rx_desc_list_elem_t *desc_list = NULL; 717 union dp_rx_desc_list_elem_t *tail = NULL; 718 719 rxdma_srng = dp_rxdma_srng->hal_srng; 720 721 if (qdf_unlikely(!dp_pdev)) { 722 dp_rx_err("%pK: pdev is null for mac_id = %d", 723 soc, mac_id); 724 return QDF_STATUS_E_FAILURE; 725 } 726 727 if (qdf_unlikely(!rxdma_srng)) { 728 dp_rx_debug("%pK: rxdma srng not initialized", soc); 729 DP_STATS_INC(dp_pdev, replenish.rxdma_err, num_req_buffers); 730 return QDF_STATUS_E_FAILURE; 731 } 732 733 dp_rx_debug("%pK: requested %d buffers for replenish", 734 soc, num_req_buffers); 735 736 nr_descs = dp_rx_get_free_desc_list(soc, mac_id, rx_desc_pool, 737 num_req_buffers, &desc_list, &tail); 738 if (!nr_descs) { 739 dp_err("no free rx_descs in freelist"); 740 DP_STATS_INC(dp_pdev, err.desc_alloc_fail, num_req_buffers); 741 return QDF_STATUS_E_NOMEM; 742 } 743 744 dp_debug("got %u RX descs for driver attach", nr_descs); 745 746 hal_srng_access_start(soc->hal_soc, rxdma_srng); 747 748 for (count = 0; count < nr_descs; count++) { 749 next = desc_list->next; 750 qdf_prefetch(next); 751 nbuf = dp_rx_nbuf_alloc(soc, rx_desc_pool); 752 if (qdf_unlikely(!nbuf)) { 753 DP_STATS_INC(dp_pdev, replenish.nbuf_alloc_fail, 1); 754 break; 755 } 756 757 paddr = dp_rx_nbuf_sync_no_dsb(soc, nbuf, 758 rx_desc_pool->buf_size); 759 rxdma_ring_entry = (struct dp_buffer_addr_info *) 760 hal_srng_src_get_next(soc->hal_soc, rxdma_srng); 761 if (!rxdma_ring_entry) 762 break; 763 764 desc_list->rx_desc.nbuf = nbuf; 765 dp_rx_set_reuse_nbuf(&desc_list->rx_desc, nbuf); 766 desc_list->rx_desc.rx_buf_start = nbuf->data; 767 desc_list->rx_desc.paddr_buf_start = paddr; 768 desc_list->rx_desc.unmapped = 0; 769 770 /* rx_desc.in_use should be zero at this time*/ 771 qdf_assert_always(desc_list->rx_desc.in_use == 0); 772 773 desc_list->rx_desc.in_use = 1; 774 desc_list->rx_desc.in_err_state = 0; 775 776 hal_rxdma_buff_addr_info_set(soc->hal_soc, rxdma_ring_entry, 777 paddr, 778 desc_list->rx_desc.cookie, 779 rx_desc_pool->owner); 780 781 desc_list = next; 782 } 783 qdf_dsb(); 784 hal_srng_access_end(soc->hal_soc, rxdma_srng); 785 786 /* No need to count the number of bytes received during replenish. 787 * Therefore set replenish.pkts.bytes as 0. 788 */ 789 DP_STATS_INC_PKT(dp_pdev, replenish.pkts, count, 0); 790 791 return QDF_STATUS_SUCCESS; 792 } 793 #endif 794 795 #ifdef DP_UMAC_HW_RESET_SUPPORT 796 #if defined(QCA_DP_RX_NBUF_NO_MAP_UNMAP) && !defined(BUILD_X86) 797 static inline 798 qdf_dma_addr_t dp_rx_rep_retrieve_paddr(struct dp_soc *dp_soc, qdf_nbuf_t nbuf, 799 uint32_t buf_size) 800 { 801 return dp_rx_nbuf_sync_no_dsb(dp_soc, nbuf, buf_size); 802 } 803 #else 804 static inline 805 qdf_dma_addr_t dp_rx_rep_retrieve_paddr(struct dp_soc *dp_soc, qdf_nbuf_t nbuf, 806 uint32_t buf_size) 807 { 808 return qdf_nbuf_get_frag_paddr(nbuf, 0); 809 } 810 #endif 811 812 /** 813 * dp_rx_desc_replenish() - Replenish the rx descriptors one at a time 814 * @soc: core txrx main context 815 * @dp_rxdma_srng: rxdma ring 816 * @rx_desc_pool: rx descriptor pool 817 * @rx_desc:rx descriptor 818 * 819 * Return: void 820 */ 821 static inline 822 void dp_rx_desc_replenish(struct dp_soc *soc, struct dp_srng *dp_rxdma_srng, 823 struct rx_desc_pool *rx_desc_pool, 824 struct dp_rx_desc *rx_desc) 825 { 826 void *rxdma_srng; 827 void *rxdma_ring_entry; 828 qdf_dma_addr_t paddr; 829 830 rxdma_srng = dp_rxdma_srng->hal_srng; 831 832 /* No one else should be accessing the srng at this point */ 833 hal_srng_access_start_unlocked(soc->hal_soc, rxdma_srng); 834 835 rxdma_ring_entry = hal_srng_src_get_next(soc->hal_soc, rxdma_srng); 836 837 qdf_assert_always(rxdma_ring_entry); 838 rx_desc->in_err_state = 0; 839 840 paddr = dp_rx_rep_retrieve_paddr(soc, rx_desc->nbuf, 841 rx_desc_pool->buf_size); 842 hal_rxdma_buff_addr_info_set(soc->hal_soc, rxdma_ring_entry, paddr, 843 rx_desc->cookie, rx_desc_pool->owner); 844 845 hal_srng_access_end_unlocked(soc->hal_soc, rxdma_srng); 846 } 847 848 void dp_rx_desc_reuse(struct dp_soc *soc, qdf_nbuf_t *nbuf_list) 849 { 850 int mac_id, i, j; 851 union dp_rx_desc_list_elem_t *head = NULL; 852 union dp_rx_desc_list_elem_t *tail = NULL; 853 854 for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) { 855 struct dp_srng *dp_rxdma_srng = 856 &soc->rx_refill_buf_ring[mac_id]; 857 struct rx_desc_pool *rx_desc_pool = &soc->rx_desc_buf[mac_id]; 858 uint32_t rx_sw_desc_num = rx_desc_pool->pool_size; 859 /* Only fill up 1/3 of the ring size */ 860 uint32_t num_req_decs; 861 862 if (!dp_rxdma_srng || !dp_rxdma_srng->hal_srng || 863 !rx_desc_pool->array) 864 continue; 865 866 num_req_decs = dp_rxdma_srng->num_entries / 3; 867 868 for (i = 0, j = 0; i < rx_sw_desc_num; i++) { 869 struct dp_rx_desc *rx_desc = 870 (struct dp_rx_desc *)&rx_desc_pool->array[i]; 871 872 if (rx_desc->in_use) { 873 if (j < (dp_rxdma_srng->num_entries - 1)) { 874 dp_rx_desc_replenish(soc, dp_rxdma_srng, 875 rx_desc_pool, 876 rx_desc); 877 } else { 878 dp_rx_nbuf_unmap(soc, rx_desc, 0); 879 rx_desc->unmapped = 0; 880 881 rx_desc->nbuf->next = *nbuf_list; 882 *nbuf_list = rx_desc->nbuf; 883 884 dp_rx_add_to_free_desc_list(&head, 885 &tail, 886 rx_desc); 887 } 888 j++; 889 } 890 } 891 892 if (head) 893 dp_rx_add_desc_list_to_free_list(soc, &head, &tail, 894 mac_id, rx_desc_pool); 895 896 /* If num of descs in use were less, then we need to replenish 897 * the ring with some buffers 898 */ 899 head = NULL; 900 tail = NULL; 901 902 if (j < (num_req_decs - 1)) 903 dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng, 904 rx_desc_pool, 905 ((num_req_decs - 1) - j), 906 &head, &tail, true); 907 } 908 } 909 #endif 910 911 QDF_STATUS __dp_rx_buffers_replenish(struct dp_soc *dp_soc, uint32_t mac_id, 912 struct dp_srng *dp_rxdma_srng, 913 struct rx_desc_pool *rx_desc_pool, 914 uint32_t num_req_buffers, 915 union dp_rx_desc_list_elem_t **desc_list, 916 union dp_rx_desc_list_elem_t **tail, 917 bool req_only, const char *func_name) 918 { 919 uint32_t num_alloc_desc; 920 uint16_t num_desc_to_free = 0; 921 struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(dp_soc, mac_id); 922 uint32_t num_entries_avail; 923 uint32_t count; 924 uint32_t extra_buffers; 925 int sync_hw_ptr = 1; 926 struct dp_rx_nbuf_frag_info nbuf_frag_info = {0}; 927 void *rxdma_ring_entry; 928 union dp_rx_desc_list_elem_t *next; 929 QDF_STATUS ret; 930 void *rxdma_srng; 931 union dp_rx_desc_list_elem_t *desc_list_append = NULL; 932 union dp_rx_desc_list_elem_t *tail_append = NULL; 933 union dp_rx_desc_list_elem_t *temp_list = NULL; 934 935 rxdma_srng = dp_rxdma_srng->hal_srng; 936 937 if (qdf_unlikely(!dp_pdev)) { 938 dp_rx_err("%pK: pdev is null for mac_id = %d", 939 dp_soc, mac_id); 940 return QDF_STATUS_E_FAILURE; 941 } 942 943 if (qdf_unlikely(!rxdma_srng)) { 944 dp_rx_debug("%pK: rxdma srng not initialized", dp_soc); 945 DP_STATS_INC(dp_pdev, replenish.rxdma_err, num_req_buffers); 946 return QDF_STATUS_E_FAILURE; 947 } 948 949 dp_verbose_debug("%pK: requested %d buffers for replenish", 950 dp_soc, num_req_buffers); 951 952 hal_srng_access_start(dp_soc->hal_soc, rxdma_srng); 953 954 num_entries_avail = hal_srng_src_num_avail(dp_soc->hal_soc, 955 rxdma_srng, 956 sync_hw_ptr); 957 958 dp_verbose_debug("%pK: no of available entries in rxdma ring: %d", 959 dp_soc, num_entries_avail); 960 961 if (!req_only && !(*desc_list) && (num_entries_avail > 962 ((dp_rxdma_srng->num_entries * 3) / 4))) { 963 num_req_buffers = num_entries_avail; 964 DP_STATS_INC(dp_pdev, replenish.low_thresh_intrs, 1); 965 } else if (num_entries_avail < num_req_buffers) { 966 num_desc_to_free = num_req_buffers - num_entries_avail; 967 num_req_buffers = num_entries_avail; 968 } else if ((*desc_list) && 969 dp_rxdma_srng->num_entries - num_entries_avail < 970 CRITICAL_BUFFER_THRESHOLD) { 971 /* set extra buffers to CRITICAL_BUFFER_THRESHOLD only if 972 * total buff requested after adding extra buffers is less 973 * than or equal to num entries available, else set it to max 974 * possible additional buffers available at that moment 975 */ 976 extra_buffers = 977 ((num_req_buffers + CRITICAL_BUFFER_THRESHOLD) > num_entries_avail) ? 978 (num_entries_avail - num_req_buffers) : 979 CRITICAL_BUFFER_THRESHOLD; 980 /* Append some free descriptors to tail */ 981 num_alloc_desc = 982 dp_rx_get_free_desc_list(dp_soc, mac_id, 983 rx_desc_pool, 984 extra_buffers, 985 &desc_list_append, 986 &tail_append); 987 988 if (num_alloc_desc) { 989 temp_list = *desc_list; 990 *desc_list = desc_list_append; 991 tail_append->next = temp_list; 992 num_req_buffers += num_alloc_desc; 993 994 DP_STATS_DEC(dp_pdev, 995 replenish.free_list, 996 num_alloc_desc); 997 } else 998 dp_err_rl("%pK: no free rx_descs in freelist", dp_soc); 999 } 1000 1001 if (qdf_unlikely(!num_req_buffers)) { 1002 num_desc_to_free = num_req_buffers; 1003 hal_srng_access_end(dp_soc->hal_soc, rxdma_srng); 1004 goto free_descs; 1005 } 1006 1007 /* 1008 * if desc_list is NULL, allocate the descs from freelist 1009 */ 1010 if (!(*desc_list)) { 1011 num_alloc_desc = dp_rx_get_free_desc_list(dp_soc, mac_id, 1012 rx_desc_pool, 1013 num_req_buffers, 1014 desc_list, 1015 tail); 1016 1017 if (!num_alloc_desc) { 1018 dp_rx_err("%pK: no free rx_descs in freelist", dp_soc); 1019 DP_STATS_INC(dp_pdev, err.desc_alloc_fail, 1020 num_req_buffers); 1021 hal_srng_access_end(dp_soc->hal_soc, rxdma_srng); 1022 return QDF_STATUS_E_NOMEM; 1023 } 1024 1025 dp_verbose_debug("%pK: %d rx desc allocated", dp_soc, 1026 num_alloc_desc); 1027 num_req_buffers = num_alloc_desc; 1028 } 1029 1030 1031 count = 0; 1032 1033 while (count < num_req_buffers) { 1034 /* Flag is set while pdev rx_desc_pool initialization */ 1035 if (qdf_unlikely(rx_desc_pool->rx_mon_dest_frag_enable)) 1036 ret = dp_pdev_frag_alloc_and_map(dp_soc, 1037 &nbuf_frag_info, 1038 dp_pdev, 1039 rx_desc_pool); 1040 else 1041 ret = dp_pdev_nbuf_alloc_and_map_replenish(dp_soc, 1042 mac_id, 1043 num_entries_avail, &nbuf_frag_info, 1044 dp_pdev, rx_desc_pool); 1045 1046 if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) { 1047 if (qdf_unlikely(ret == QDF_STATUS_E_FAULT)) 1048 continue; 1049 break; 1050 } 1051 1052 count++; 1053 1054 rxdma_ring_entry = hal_srng_src_get_next(dp_soc->hal_soc, 1055 rxdma_srng); 1056 qdf_assert_always(rxdma_ring_entry); 1057 1058 next = (*desc_list)->next; 1059 1060 /* Flag is set while pdev rx_desc_pool initialization */ 1061 if (qdf_unlikely(rx_desc_pool->rx_mon_dest_frag_enable)) 1062 dp_rx_desc_frag_prep(&((*desc_list)->rx_desc), 1063 &nbuf_frag_info); 1064 else 1065 dp_rx_desc_prep(&((*desc_list)->rx_desc), 1066 &nbuf_frag_info); 1067 1068 /* rx_desc.in_use should be zero at this time*/ 1069 qdf_assert_always((*desc_list)->rx_desc.in_use == 0); 1070 1071 (*desc_list)->rx_desc.in_use = 1; 1072 (*desc_list)->rx_desc.in_err_state = 0; 1073 dp_rx_desc_update_dbg_info(&(*desc_list)->rx_desc, 1074 func_name, RX_DESC_REPLENISHED); 1075 dp_verbose_debug("rx_netbuf=%pK, paddr=0x%llx, cookie=%d", 1076 nbuf_frag_info.virt_addr.nbuf, 1077 (unsigned long long)(nbuf_frag_info.paddr), 1078 (*desc_list)->rx_desc.cookie); 1079 1080 hal_rxdma_buff_addr_info_set(dp_soc->hal_soc, rxdma_ring_entry, 1081 nbuf_frag_info.paddr, 1082 (*desc_list)->rx_desc.cookie, 1083 rx_desc_pool->owner); 1084 1085 *desc_list = next; 1086 1087 } 1088 1089 dp_rx_refill_ring_record_entry(dp_soc, dp_pdev->lmac_id, rxdma_srng, 1090 num_req_buffers, count); 1091 1092 hal_srng_access_end(dp_soc->hal_soc, rxdma_srng); 1093 1094 dp_rx_schedule_refill_thread(dp_soc); 1095 1096 dp_verbose_debug("replenished buffers %d, rx desc added back to free list %u", 1097 count, num_desc_to_free); 1098 1099 /* No need to count the number of bytes received during replenish. 1100 * Therefore set replenish.pkts.bytes as 0. 1101 */ 1102 DP_STATS_INC_PKT(dp_pdev, replenish.pkts, count, 0); 1103 DP_STATS_INC(dp_pdev, replenish.free_list, num_req_buffers - count); 1104 1105 free_descs: 1106 DP_STATS_INC(dp_pdev, buf_freelist, num_desc_to_free); 1107 /* 1108 * add any available free desc back to the free list 1109 */ 1110 if (*desc_list) 1111 dp_rx_add_desc_list_to_free_list(dp_soc, desc_list, tail, 1112 mac_id, rx_desc_pool); 1113 1114 return QDF_STATUS_SUCCESS; 1115 } 1116 1117 qdf_export_symbol(__dp_rx_buffers_replenish); 1118 1119 void 1120 dp_rx_deliver_raw(struct dp_vdev *vdev, qdf_nbuf_t nbuf_list, 1121 struct dp_txrx_peer *txrx_peer, uint8_t link_id) 1122 { 1123 qdf_nbuf_t deliver_list_head = NULL; 1124 qdf_nbuf_t deliver_list_tail = NULL; 1125 qdf_nbuf_t nbuf; 1126 1127 nbuf = nbuf_list; 1128 while (nbuf) { 1129 qdf_nbuf_t next = qdf_nbuf_next(nbuf); 1130 1131 DP_RX_LIST_APPEND(deliver_list_head, deliver_list_tail, nbuf); 1132 1133 DP_STATS_INC(vdev->pdev, rx_raw_pkts, 1); 1134 DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, rx.raw, 1, 1135 qdf_nbuf_len(nbuf), link_id); 1136 1137 nbuf = next; 1138 } 1139 1140 vdev->osif_rsim_rx_decap(vdev->osif_vdev, &deliver_list_head, 1141 &deliver_list_tail); 1142 1143 vdev->osif_rx(vdev->osif_vdev, deliver_list_head); 1144 } 1145 1146 #ifndef QCA_HOST_MODE_WIFI_DISABLED 1147 #ifndef FEATURE_WDS 1148 void dp_rx_da_learn(struct dp_soc *soc, uint8_t *rx_tlv_hdr, 1149 struct dp_txrx_peer *ta_peer, qdf_nbuf_t nbuf) 1150 { 1151 } 1152 #endif 1153 1154 #ifdef QCA_SUPPORT_TX_MIN_RATES_FOR_SPECIAL_FRAMES 1155 /** 1156 * dp_classify_critical_pkts() - API for marking critical packets 1157 * @soc: dp_soc context 1158 * @vdev: vdev on which packet is to be sent 1159 * @nbuf: nbuf that has to be classified 1160 * 1161 * The function parses the packet, identifies whether its a critical frame and 1162 * marks QDF_NBUF_CB_TX_EXTRA_IS_CRITICAL bit in qdf_nbuf_cb for the nbuf. 1163 * Code for marking which frames are CRITICAL is accessed via callback. 1164 * EAPOL, ARP, DHCP, DHCPv6, ICMPv6 NS/NA are the typical critical frames. 1165 * 1166 * Return: None 1167 */ 1168 static 1169 void dp_classify_critical_pkts(struct dp_soc *soc, struct dp_vdev *vdev, 1170 qdf_nbuf_t nbuf) 1171 { 1172 if (vdev->tx_classify_critical_pkt_cb) 1173 vdev->tx_classify_critical_pkt_cb(vdev->osif_vdev, nbuf); 1174 } 1175 #else 1176 static inline 1177 void dp_classify_critical_pkts(struct dp_soc *soc, struct dp_vdev *vdev, 1178 qdf_nbuf_t nbuf) 1179 { 1180 } 1181 #endif 1182 1183 #ifdef QCA_OL_TX_MULTIQ_SUPPORT 1184 static inline 1185 void dp_rx_nbuf_queue_mapping_set(qdf_nbuf_t nbuf, uint8_t ring_id) 1186 { 1187 qdf_nbuf_set_queue_mapping(nbuf, ring_id); 1188 } 1189 #else 1190 static inline 1191 void dp_rx_nbuf_queue_mapping_set(qdf_nbuf_t nbuf, uint8_t ring_id) 1192 { 1193 } 1194 #endif 1195 1196 bool dp_rx_intrabss_mcbc_fwd(struct dp_soc *soc, struct dp_txrx_peer *ta_peer, 1197 uint8_t *rx_tlv_hdr, qdf_nbuf_t nbuf, 1198 struct cdp_tid_rx_stats *tid_stats, 1199 uint8_t link_id) 1200 { 1201 uint16_t len; 1202 qdf_nbuf_t nbuf_copy; 1203 1204 if (dp_rx_intrabss_eapol_drop_check(soc, ta_peer, rx_tlv_hdr, 1205 nbuf)) 1206 return true; 1207 1208 if (!dp_rx_check_ndi_mdns_fwding(ta_peer, nbuf, link_id)) 1209 return false; 1210 1211 /* If the source peer in the isolation list 1212 * then dont forward instead push to bridge stack 1213 */ 1214 if (dp_get_peer_isolation(ta_peer)) 1215 return false; 1216 1217 nbuf_copy = qdf_nbuf_copy(nbuf); 1218 if (!nbuf_copy) 1219 return false; 1220 1221 len = QDF_NBUF_CB_RX_PKT_LEN(nbuf); 1222 1223 qdf_mem_set(nbuf_copy->cb, 0x0, sizeof(nbuf_copy->cb)); 1224 dp_classify_critical_pkts(soc, ta_peer->vdev, nbuf_copy); 1225 1226 if (soc->arch_ops.dp_rx_intrabss_mcast_handler(soc, ta_peer, 1227 nbuf_copy, 1228 tid_stats, 1229 link_id)) 1230 return false; 1231 1232 /* Don't send packets if tx is paused */ 1233 if (!soc->is_tx_pause && 1234 !dp_tx_send((struct cdp_soc_t *)soc, 1235 ta_peer->vdev->vdev_id, nbuf_copy)) { 1236 DP_PEER_PER_PKT_STATS_INC_PKT(ta_peer, rx.intra_bss.pkts, 1, 1237 len, link_id); 1238 tid_stats->intrabss_cnt++; 1239 } else { 1240 DP_PEER_PER_PKT_STATS_INC_PKT(ta_peer, rx.intra_bss.fail, 1, 1241 len, link_id); 1242 tid_stats->fail_cnt[INTRABSS_DROP]++; 1243 dp_rx_nbuf_free(nbuf_copy); 1244 } 1245 return false; 1246 } 1247 1248 bool dp_rx_intrabss_ucast_fwd(struct dp_soc *soc, struct dp_txrx_peer *ta_peer, 1249 uint8_t tx_vdev_id, 1250 uint8_t *rx_tlv_hdr, qdf_nbuf_t nbuf, 1251 struct cdp_tid_rx_stats *tid_stats, 1252 uint8_t link_id) 1253 { 1254 uint16_t len; 1255 1256 len = QDF_NBUF_CB_RX_PKT_LEN(nbuf); 1257 1258 /* linearize the nbuf just before we send to 1259 * dp_tx_send() 1260 */ 1261 if (qdf_unlikely(qdf_nbuf_is_frag(nbuf))) { 1262 if (qdf_nbuf_linearize(nbuf) == -ENOMEM) 1263 return false; 1264 1265 nbuf = qdf_nbuf_unshare(nbuf); 1266 if (!nbuf) { 1267 DP_PEER_PER_PKT_STATS_INC_PKT(ta_peer, 1268 rx.intra_bss.fail, 1269 1, len, link_id); 1270 /* return true even though the pkt is 1271 * not forwarded. Basically skb_unshare 1272 * failed and we want to continue with 1273 * next nbuf. 1274 */ 1275 tid_stats->fail_cnt[INTRABSS_DROP]++; 1276 return false; 1277 } 1278 } 1279 1280 qdf_mem_set(nbuf->cb, 0x0, sizeof(nbuf->cb)); 1281 dp_classify_critical_pkts(soc, ta_peer->vdev, nbuf); 1282 1283 /* Don't send packets if tx is paused */ 1284 if (!soc->is_tx_pause && !dp_tx_send((struct cdp_soc_t *)soc, 1285 tx_vdev_id, nbuf)) { 1286 DP_PEER_PER_PKT_STATS_INC_PKT(ta_peer, rx.intra_bss.pkts, 1, 1287 len, link_id); 1288 } else { 1289 DP_PEER_PER_PKT_STATS_INC_PKT(ta_peer, rx.intra_bss.fail, 1, 1290 len, link_id); 1291 tid_stats->fail_cnt[INTRABSS_DROP]++; 1292 return false; 1293 } 1294 1295 return true; 1296 } 1297 1298 #endif /* QCA_HOST_MODE_WIFI_DISABLED */ 1299 1300 #ifdef MESH_MODE_SUPPORT 1301 1302 void dp_rx_fill_mesh_stats(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 1303 uint8_t *rx_tlv_hdr, 1304 struct dp_txrx_peer *txrx_peer) 1305 { 1306 struct mesh_recv_hdr_s *rx_info = NULL; 1307 uint32_t pkt_type; 1308 uint32_t nss; 1309 uint32_t rate_mcs; 1310 uint32_t bw; 1311 uint8_t primary_chan_num; 1312 uint32_t center_chan_freq; 1313 struct dp_soc *soc = vdev->pdev->soc; 1314 struct dp_peer *peer; 1315 struct dp_peer *primary_link_peer; 1316 struct dp_soc *link_peer_soc; 1317 cdp_peer_stats_param_t buf = {0}; 1318 1319 /* fill recv mesh stats */ 1320 rx_info = qdf_mem_malloc(sizeof(struct mesh_recv_hdr_s)); 1321 1322 /* upper layers are responsible to free this memory */ 1323 1324 if (!rx_info) { 1325 dp_rx_err("%pK: Memory allocation failed for mesh rx stats", 1326 vdev->pdev->soc); 1327 DP_STATS_INC(vdev->pdev, mesh_mem_alloc, 1); 1328 return; 1329 } 1330 1331 rx_info->rs_flags = MESH_RXHDR_VER1; 1332 if (qdf_nbuf_is_rx_chfrag_start(nbuf)) 1333 rx_info->rs_flags |= MESH_RX_FIRST_MSDU; 1334 1335 if (qdf_nbuf_is_rx_chfrag_end(nbuf)) 1336 rx_info->rs_flags |= MESH_RX_LAST_MSDU; 1337 1338 peer = dp_peer_get_ref_by_id(soc, txrx_peer->peer_id, DP_MOD_ID_MESH); 1339 if (peer) { 1340 if (hal_rx_tlv_get_is_decrypted(soc->hal_soc, rx_tlv_hdr)) { 1341 rx_info->rs_flags |= MESH_RX_DECRYPTED; 1342 rx_info->rs_keyix = hal_rx_msdu_get_keyid(soc->hal_soc, 1343 rx_tlv_hdr); 1344 if (vdev->osif_get_key) 1345 vdev->osif_get_key(vdev->osif_vdev, 1346 &rx_info->rs_decryptkey[0], 1347 &peer->mac_addr.raw[0], 1348 rx_info->rs_keyix); 1349 } 1350 1351 dp_peer_unref_delete(peer, DP_MOD_ID_MESH); 1352 } 1353 1354 primary_link_peer = dp_get_primary_link_peer_by_id(soc, 1355 txrx_peer->peer_id, 1356 DP_MOD_ID_MESH); 1357 1358 if (qdf_likely(primary_link_peer)) { 1359 link_peer_soc = primary_link_peer->vdev->pdev->soc; 1360 dp_monitor_peer_get_stats_param(link_peer_soc, 1361 primary_link_peer, 1362 cdp_peer_rx_snr, &buf); 1363 rx_info->rs_snr = buf.rx_snr; 1364 dp_peer_unref_delete(primary_link_peer, DP_MOD_ID_MESH); 1365 } 1366 1367 rx_info->rs_rssi = rx_info->rs_snr + DP_DEFAULT_NOISEFLOOR; 1368 1369 soc = vdev->pdev->soc; 1370 primary_chan_num = hal_rx_tlv_get_freq(soc->hal_soc, rx_tlv_hdr); 1371 center_chan_freq = hal_rx_tlv_get_freq(soc->hal_soc, rx_tlv_hdr) >> 16; 1372 1373 if (soc->cdp_soc.ol_ops && soc->cdp_soc.ol_ops->freq_to_band) { 1374 rx_info->rs_band = soc->cdp_soc.ol_ops->freq_to_band( 1375 soc->ctrl_psoc, 1376 vdev->pdev->pdev_id, 1377 center_chan_freq); 1378 } 1379 rx_info->rs_channel = primary_chan_num; 1380 pkt_type = hal_rx_tlv_get_pkt_type(soc->hal_soc, rx_tlv_hdr); 1381 rate_mcs = hal_rx_tlv_rate_mcs_get(soc->hal_soc, rx_tlv_hdr); 1382 bw = hal_rx_tlv_bw_get(soc->hal_soc, rx_tlv_hdr); 1383 nss = hal_rx_msdu_start_nss_get(soc->hal_soc, rx_tlv_hdr); 1384 1385 /* 1386 * The MCS index does not start with 0 when NSS>1 in HT mode. 1387 * MCS params for optional 20/40MHz, NSS=1~3, EQM(NSS>1): 1388 * ------------------------------------------------------ 1389 * NSS | 1 | 2 | 3 | 4 1390 * ------------------------------------------------------ 1391 * MCS index: HT20 | 0 ~ 7 | 8 ~ 15 | 16 ~ 23 | 24 ~ 31 1392 * ------------------------------------------------------ 1393 * MCS index: HT40 | 0 ~ 7 | 8 ~ 15 | 16 ~ 23 | 24 ~ 31 1394 * ------------------------------------------------------ 1395 * Currently, the MAX_NSS=2. If NSS>2, MCS index = 8 * (NSS-1) 1396 */ 1397 if ((pkt_type == DOT11_N) && (nss == 2)) 1398 rate_mcs += 8; 1399 1400 rx_info->rs_ratephy1 = rate_mcs | (nss << 0x8) | (pkt_type << 16) | 1401 (bw << 24); 1402 1403 qdf_nbuf_set_rx_fctx_type(nbuf, (void *)rx_info, CB_FTYPE_MESH_RX_INFO); 1404 1405 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_MED, 1406 FL("Mesh rx stats: flags %x, rssi %x, chn %x, rate %x, kix %x, snr %x"), 1407 rx_info->rs_flags, 1408 rx_info->rs_rssi, 1409 rx_info->rs_channel, 1410 rx_info->rs_ratephy1, 1411 rx_info->rs_keyix, 1412 rx_info->rs_snr); 1413 1414 } 1415 1416 QDF_STATUS dp_rx_filter_mesh_packets(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 1417 uint8_t *rx_tlv_hdr) 1418 { 1419 union dp_align_mac_addr mac_addr; 1420 struct dp_soc *soc = vdev->pdev->soc; 1421 1422 if (qdf_unlikely(vdev->mesh_rx_filter)) { 1423 if (vdev->mesh_rx_filter & MESH_FILTER_OUT_FROMDS) 1424 if (hal_rx_mpdu_get_fr_ds(soc->hal_soc, 1425 rx_tlv_hdr)) 1426 return QDF_STATUS_SUCCESS; 1427 1428 if (vdev->mesh_rx_filter & MESH_FILTER_OUT_TODS) 1429 if (hal_rx_mpdu_get_to_ds(soc->hal_soc, 1430 rx_tlv_hdr)) 1431 return QDF_STATUS_SUCCESS; 1432 1433 if (vdev->mesh_rx_filter & MESH_FILTER_OUT_NODS) 1434 if (!hal_rx_mpdu_get_fr_ds(soc->hal_soc, 1435 rx_tlv_hdr) && 1436 !hal_rx_mpdu_get_to_ds(soc->hal_soc, 1437 rx_tlv_hdr)) 1438 return QDF_STATUS_SUCCESS; 1439 1440 if (vdev->mesh_rx_filter & MESH_FILTER_OUT_RA) { 1441 if (hal_rx_mpdu_get_addr1(soc->hal_soc, 1442 rx_tlv_hdr, 1443 &mac_addr.raw[0])) 1444 return QDF_STATUS_E_FAILURE; 1445 1446 if (!qdf_mem_cmp(&mac_addr.raw[0], 1447 &vdev->mac_addr.raw[0], 1448 QDF_MAC_ADDR_SIZE)) 1449 return QDF_STATUS_SUCCESS; 1450 } 1451 1452 if (vdev->mesh_rx_filter & MESH_FILTER_OUT_TA) { 1453 if (hal_rx_mpdu_get_addr2(soc->hal_soc, 1454 rx_tlv_hdr, 1455 &mac_addr.raw[0])) 1456 return QDF_STATUS_E_FAILURE; 1457 1458 if (!qdf_mem_cmp(&mac_addr.raw[0], 1459 &vdev->mac_addr.raw[0], 1460 QDF_MAC_ADDR_SIZE)) 1461 return QDF_STATUS_SUCCESS; 1462 } 1463 } 1464 1465 return QDF_STATUS_E_FAILURE; 1466 } 1467 1468 #else 1469 void dp_rx_fill_mesh_stats(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 1470 uint8_t *rx_tlv_hdr, struct dp_txrx_peer *peer) 1471 { 1472 } 1473 1474 QDF_STATUS dp_rx_filter_mesh_packets(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 1475 uint8_t *rx_tlv_hdr) 1476 { 1477 return QDF_STATUS_E_FAILURE; 1478 } 1479 1480 #endif 1481 1482 #ifdef RX_PEER_INVALID_ENH 1483 uint8_t dp_rx_process_invalid_peer(struct dp_soc *soc, qdf_nbuf_t mpdu, 1484 uint8_t mac_id) 1485 { 1486 struct dp_invalid_peer_msg msg; 1487 struct dp_vdev *vdev = NULL; 1488 struct dp_pdev *pdev = NULL; 1489 struct ieee80211_frame *wh; 1490 qdf_nbuf_t curr_nbuf, next_nbuf; 1491 uint8_t *rx_tlv_hdr = qdf_nbuf_data(mpdu); 1492 uint8_t *rx_pkt_hdr = NULL; 1493 int i = 0; 1494 uint32_t nbuf_len; 1495 1496 if (!HAL_IS_DECAP_FORMAT_RAW(soc->hal_soc, rx_tlv_hdr)) { 1497 dp_rx_debug("%pK: Drop decapped frames", soc); 1498 goto free; 1499 } 1500 1501 /* In RAW packet, packet header will be part of data */ 1502 rx_pkt_hdr = rx_tlv_hdr + soc->rx_pkt_tlv_size; 1503 wh = (struct ieee80211_frame *)rx_pkt_hdr; 1504 1505 if (!DP_FRAME_IS_DATA(wh)) { 1506 dp_rx_debug("%pK: NAWDS valid only for data frames", soc); 1507 goto free; 1508 } 1509 1510 nbuf_len = qdf_nbuf_len(mpdu); 1511 if (nbuf_len < sizeof(struct ieee80211_frame)) { 1512 dp_rx_err("%pK: Invalid nbuf length: %u", soc, nbuf_len); 1513 goto free; 1514 } 1515 1516 /* In DMAC case the rx_desc_pools are common across PDEVs 1517 * so PDEV cannot be derived from the pool_id. 1518 * 1519 * link_id need to derived from the TLV tag word which is 1520 * disabled by default. For now adding a WAR to get vdev 1521 * with brute force this need to fixed with word based subscription 1522 * support is added by enabling TLV tag word 1523 */ 1524 if (soc->features.dmac_cmn_src_rxbuf_ring_enabled) { 1525 for (i = 0; i < MAX_PDEV_CNT; i++) { 1526 pdev = soc->pdev_list[i]; 1527 1528 if (!pdev || qdf_unlikely(pdev->is_pdev_down)) 1529 continue; 1530 1531 TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) { 1532 if (qdf_mem_cmp(wh->i_addr1, vdev->mac_addr.raw, 1533 QDF_MAC_ADDR_SIZE) == 0) { 1534 goto out; 1535 } 1536 } 1537 } 1538 } else { 1539 pdev = dp_get_pdev_for_lmac_id(soc, mac_id); 1540 1541 if (!pdev || qdf_unlikely(pdev->is_pdev_down)) { 1542 dp_rx_err("%pK: PDEV %s", 1543 soc, !pdev ? "not found" : "down"); 1544 goto free; 1545 } 1546 1547 if (dp_monitor_filter_neighbour_peer(pdev, rx_pkt_hdr) == 1548 QDF_STATUS_SUCCESS) 1549 return 0; 1550 1551 TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) { 1552 if (qdf_mem_cmp(wh->i_addr1, vdev->mac_addr.raw, 1553 QDF_MAC_ADDR_SIZE) == 0) { 1554 goto out; 1555 } 1556 } 1557 } 1558 1559 if (!vdev) { 1560 dp_rx_err("%pK: VDEV not found", soc); 1561 goto free; 1562 } 1563 out: 1564 msg.wh = wh; 1565 qdf_nbuf_pull_head(mpdu, soc->rx_pkt_tlv_size); 1566 msg.nbuf = mpdu; 1567 msg.vdev_id = vdev->vdev_id; 1568 1569 /* 1570 * NOTE: Only valid for HKv1. 1571 * If smart monitor mode is enabled on RE, we are getting invalid 1572 * peer frames with RA as STA mac of RE and the TA not matching 1573 * with any NAC list or the the BSSID.Such frames need to dropped 1574 * in order to avoid HM_WDS false addition. 1575 */ 1576 if (pdev->soc->cdp_soc.ol_ops->rx_invalid_peer) { 1577 if (dp_monitor_drop_inv_peer_pkts(vdev) == QDF_STATUS_SUCCESS) { 1578 dp_rx_warn("%pK: Drop inv peer pkts with STA RA:%pm", 1579 soc, wh->i_addr1); 1580 goto free; 1581 } 1582 pdev->soc->cdp_soc.ol_ops->rx_invalid_peer( 1583 (struct cdp_ctrl_objmgr_psoc *)soc->ctrl_psoc, 1584 pdev->pdev_id, &msg); 1585 } 1586 1587 free: 1588 /* Drop and free packet */ 1589 curr_nbuf = mpdu; 1590 while (curr_nbuf) { 1591 next_nbuf = qdf_nbuf_next(curr_nbuf); 1592 dp_rx_nbuf_free(curr_nbuf); 1593 curr_nbuf = next_nbuf; 1594 } 1595 1596 return 0; 1597 } 1598 1599 void dp_rx_process_invalid_peer_wrapper(struct dp_soc *soc, 1600 qdf_nbuf_t mpdu, bool mpdu_done, 1601 uint8_t mac_id) 1602 { 1603 /* Only trigger the process when mpdu is completed */ 1604 if (mpdu_done) 1605 dp_rx_process_invalid_peer(soc, mpdu, mac_id); 1606 } 1607 #else 1608 uint8_t dp_rx_process_invalid_peer(struct dp_soc *soc, qdf_nbuf_t mpdu, 1609 uint8_t mac_id) 1610 { 1611 qdf_nbuf_t curr_nbuf, next_nbuf; 1612 struct dp_pdev *pdev; 1613 struct dp_vdev *vdev = NULL; 1614 struct ieee80211_frame *wh; 1615 struct dp_peer *peer = NULL; 1616 uint8_t *rx_tlv_hdr = qdf_nbuf_data(mpdu); 1617 uint8_t *rx_pkt_hdr = hal_rx_pkt_hdr_get(soc->hal_soc, rx_tlv_hdr); 1618 uint32_t nbuf_len; 1619 1620 wh = (struct ieee80211_frame *)rx_pkt_hdr; 1621 1622 if (!DP_FRAME_IS_DATA(wh)) { 1623 QDF_TRACE_ERROR_RL(QDF_MODULE_ID_DP, 1624 "only for data frames"); 1625 goto free; 1626 } 1627 1628 nbuf_len = qdf_nbuf_len(mpdu); 1629 if (nbuf_len < sizeof(struct ieee80211_frame)) { 1630 dp_rx_info_rl("%pK: Invalid nbuf length: %u", soc, nbuf_len); 1631 goto free; 1632 } 1633 1634 pdev = dp_get_pdev_for_lmac_id(soc, mac_id); 1635 if (!pdev) { 1636 dp_rx_info_rl("%pK: PDEV not found", soc); 1637 goto free; 1638 } 1639 1640 qdf_spin_lock_bh(&pdev->vdev_list_lock); 1641 DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) { 1642 if (qdf_mem_cmp(wh->i_addr1, vdev->mac_addr.raw, 1643 QDF_MAC_ADDR_SIZE) == 0) { 1644 qdf_spin_unlock_bh(&pdev->vdev_list_lock); 1645 goto out; 1646 } 1647 } 1648 qdf_spin_unlock_bh(&pdev->vdev_list_lock); 1649 1650 if (!vdev) { 1651 dp_rx_info_rl("%pK: VDEV not found", soc); 1652 goto free; 1653 } 1654 1655 out: 1656 if (vdev->opmode == wlan_op_mode_ap) { 1657 peer = dp_peer_find_hash_find(soc, wh->i_addr2, 0, 1658 vdev->vdev_id, 1659 DP_MOD_ID_RX_ERR); 1660 /* If SA is a valid peer in vdev, 1661 * don't send disconnect 1662 */ 1663 if (peer) { 1664 dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR); 1665 DP_STATS_INC(soc, rx.err.decrypt_err_drop, 1); 1666 dp_err_rl("invalid peer frame with correct SA/RA is freed"); 1667 goto free; 1668 } 1669 } 1670 1671 if (soc->cdp_soc.ol_ops->rx_invalid_peer) 1672 soc->cdp_soc.ol_ops->rx_invalid_peer(vdev->vdev_id, wh); 1673 free: 1674 1675 /* Drop and free packet */ 1676 curr_nbuf = mpdu; 1677 while (curr_nbuf) { 1678 next_nbuf = qdf_nbuf_next(curr_nbuf); 1679 dp_rx_nbuf_free(curr_nbuf); 1680 curr_nbuf = next_nbuf; 1681 } 1682 1683 /* Reset the head and tail pointers */ 1684 pdev = dp_get_pdev_for_lmac_id(soc, mac_id); 1685 if (pdev) { 1686 pdev->invalid_peer_head_msdu = NULL; 1687 pdev->invalid_peer_tail_msdu = NULL; 1688 } 1689 1690 return 0; 1691 } 1692 1693 void dp_rx_process_invalid_peer_wrapper(struct dp_soc *soc, 1694 qdf_nbuf_t mpdu, bool mpdu_done, 1695 uint8_t mac_id) 1696 { 1697 /* Process the nbuf */ 1698 dp_rx_process_invalid_peer(soc, mpdu, mac_id); 1699 } 1700 #endif 1701 1702 #ifndef QCA_HOST_MODE_WIFI_DISABLED 1703 1704 #ifdef RECEIVE_OFFLOAD 1705 /** 1706 * dp_rx_print_offload_info() - Print offload info from RX TLV 1707 * @soc: dp soc handle 1708 * @msdu: MSDU for which the offload info is to be printed 1709 * @ofl_info: offload info saved in hal_offload_info structure 1710 * 1711 * Return: None 1712 */ 1713 static void dp_rx_print_offload_info(struct dp_soc *soc, 1714 qdf_nbuf_t msdu, 1715 struct hal_offload_info *ofl_info) 1716 { 1717 dp_verbose_debug("----------------------RX DESC LRO/GRO----------------------"); 1718 dp_verbose_debug("lro_eligible 0x%x", 1719 QDF_NBUF_CB_RX_LRO_ELIGIBLE(msdu)); 1720 dp_verbose_debug("pure_ack 0x%x", QDF_NBUF_CB_RX_TCP_PURE_ACK(msdu)); 1721 dp_verbose_debug("chksum 0x%x", QDF_NBUF_CB_RX_TCP_CHKSUM(msdu)); 1722 dp_verbose_debug("TCP seq num 0x%x", ofl_info->tcp_seq_num); 1723 dp_verbose_debug("TCP ack num 0x%x", ofl_info->tcp_ack_num); 1724 dp_verbose_debug("TCP window 0x%x", QDF_NBUF_CB_RX_TCP_WIN(msdu)); 1725 dp_verbose_debug("TCP protocol 0x%x", QDF_NBUF_CB_RX_TCP_PROTO(msdu)); 1726 dp_verbose_debug("TCP offset 0x%x", QDF_NBUF_CB_RX_TCP_OFFSET(msdu)); 1727 dp_verbose_debug("toeplitz 0x%x", QDF_NBUF_CB_RX_FLOW_ID(msdu)); 1728 dp_verbose_debug("---------------------------------------------------------"); 1729 } 1730 1731 void dp_rx_fill_gro_info(struct dp_soc *soc, uint8_t *rx_tlv, 1732 qdf_nbuf_t msdu, uint32_t *rx_ol_pkt_cnt) 1733 { 1734 struct hal_offload_info offload_info; 1735 1736 if (!wlan_cfg_is_gro_enabled(soc->wlan_cfg_ctx)) 1737 return; 1738 1739 if (hal_rx_tlv_get_offload_info(soc->hal_soc, rx_tlv, &offload_info)) 1740 return; 1741 1742 *rx_ol_pkt_cnt = *rx_ol_pkt_cnt + 1; 1743 1744 QDF_NBUF_CB_RX_LRO_ELIGIBLE(msdu) = offload_info.lro_eligible; 1745 QDF_NBUF_CB_RX_TCP_PURE_ACK(msdu) = offload_info.tcp_pure_ack; 1746 QDF_NBUF_CB_RX_TCP_CHKSUM(msdu) = 1747 hal_rx_tlv_get_tcp_chksum(soc->hal_soc, 1748 rx_tlv); 1749 QDF_NBUF_CB_RX_TCP_WIN(msdu) = offload_info.tcp_win; 1750 QDF_NBUF_CB_RX_TCP_PROTO(msdu) = offload_info.tcp_proto; 1751 QDF_NBUF_CB_RX_IPV6_PROTO(msdu) = offload_info.ipv6_proto; 1752 QDF_NBUF_CB_RX_TCP_OFFSET(msdu) = offload_info.tcp_offset; 1753 QDF_NBUF_CB_RX_FLOW_ID(msdu) = offload_info.flow_id; 1754 1755 dp_rx_print_offload_info(soc, msdu, &offload_info); 1756 } 1757 #endif /* RECEIVE_OFFLOAD */ 1758 1759 /** 1760 * dp_rx_adjust_nbuf_len() - set appropriate msdu length in nbuf. 1761 * 1762 * @soc: DP soc handle 1763 * @nbuf: pointer to msdu. 1764 * @mpdu_len: mpdu length 1765 * @l3_pad_len: L3 padding length by HW 1766 * 1767 * Return: returns true if nbuf is last msdu of mpdu else returns false. 1768 */ 1769 static inline bool dp_rx_adjust_nbuf_len(struct dp_soc *soc, 1770 qdf_nbuf_t nbuf, 1771 uint16_t *mpdu_len, 1772 uint32_t l3_pad_len) 1773 { 1774 bool last_nbuf; 1775 uint32_t pkt_hdr_size; 1776 uint16_t buf_size; 1777 1778 buf_size = wlan_cfg_rx_buffer_size(soc->wlan_cfg_ctx); 1779 1780 pkt_hdr_size = soc->rx_pkt_tlv_size + l3_pad_len; 1781 1782 if ((*mpdu_len + pkt_hdr_size) > buf_size) { 1783 qdf_nbuf_set_pktlen(nbuf, buf_size); 1784 last_nbuf = false; 1785 *mpdu_len -= (buf_size - pkt_hdr_size); 1786 } else { 1787 qdf_nbuf_set_pktlen(nbuf, (*mpdu_len + pkt_hdr_size)); 1788 last_nbuf = true; 1789 *mpdu_len = 0; 1790 } 1791 1792 return last_nbuf; 1793 } 1794 1795 /** 1796 * dp_get_l3_hdr_pad_len() - get L3 header padding length. 1797 * 1798 * @soc: DP soc handle 1799 * @nbuf: pointer to msdu. 1800 * 1801 * Return: returns padding length in bytes. 1802 */ 1803 static inline uint32_t dp_get_l3_hdr_pad_len(struct dp_soc *soc, 1804 qdf_nbuf_t nbuf) 1805 { 1806 uint32_t l3_hdr_pad = 0; 1807 uint8_t *rx_tlv_hdr; 1808 struct hal_rx_msdu_metadata msdu_metadata; 1809 1810 while (nbuf) { 1811 if (!qdf_nbuf_is_rx_chfrag_cont(nbuf)) { 1812 /* scattered msdu end with continuation is 0 */ 1813 rx_tlv_hdr = qdf_nbuf_data(nbuf); 1814 hal_rx_msdu_metadata_get(soc->hal_soc, 1815 rx_tlv_hdr, 1816 &msdu_metadata); 1817 l3_hdr_pad = msdu_metadata.l3_hdr_pad; 1818 break; 1819 } 1820 nbuf = nbuf->next; 1821 } 1822 1823 return l3_hdr_pad; 1824 } 1825 1826 qdf_nbuf_t dp_rx_sg_create(struct dp_soc *soc, qdf_nbuf_t nbuf) 1827 { 1828 qdf_nbuf_t parent, frag_list, next = NULL; 1829 uint16_t frag_list_len = 0; 1830 uint16_t mpdu_len; 1831 bool last_nbuf; 1832 uint32_t l3_hdr_pad_offset = 0; 1833 1834 /* 1835 * Use msdu len got from REO entry descriptor instead since 1836 * there is case the RX PKT TLV is corrupted while msdu_len 1837 * from REO descriptor is right for non-raw RX scatter msdu. 1838 */ 1839 mpdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf); 1840 1841 /* 1842 * this is a case where the complete msdu fits in one single nbuf. 1843 * in this case HW sets both start and end bit and we only need to 1844 * reset these bits for RAW mode simulator to decap the pkt 1845 */ 1846 if (qdf_nbuf_is_rx_chfrag_start(nbuf) && 1847 qdf_nbuf_is_rx_chfrag_end(nbuf)) { 1848 qdf_nbuf_set_pktlen(nbuf, mpdu_len + soc->rx_pkt_tlv_size); 1849 qdf_nbuf_pull_head(nbuf, soc->rx_pkt_tlv_size); 1850 return nbuf; 1851 } 1852 1853 l3_hdr_pad_offset = dp_get_l3_hdr_pad_len(soc, nbuf); 1854 /* 1855 * This is a case where we have multiple msdus (A-MSDU) spread across 1856 * multiple nbufs. here we create a fraglist out of these nbufs. 1857 * 1858 * the moment we encounter a nbuf with continuation bit set we 1859 * know for sure we have an MSDU which is spread across multiple 1860 * nbufs. We loop through and reap nbufs till we reach last nbuf. 1861 */ 1862 parent = nbuf; 1863 frag_list = nbuf->next; 1864 nbuf = nbuf->next; 1865 1866 /* 1867 * set the start bit in the first nbuf we encounter with continuation 1868 * bit set. This has the proper mpdu length set as it is the first 1869 * msdu of the mpdu. this becomes the parent nbuf and the subsequent 1870 * nbufs will form the frag_list of the parent nbuf. 1871 */ 1872 qdf_nbuf_set_rx_chfrag_start(parent, 1); 1873 /* 1874 * L3 header padding is only needed for the 1st buffer 1875 * in a scattered msdu 1876 */ 1877 last_nbuf = dp_rx_adjust_nbuf_len(soc, parent, &mpdu_len, 1878 l3_hdr_pad_offset); 1879 1880 /* 1881 * MSDU cont bit is set but reported MPDU length can fit 1882 * in to single buffer 1883 * 1884 * Increment error stats and avoid SG list creation 1885 */ 1886 if (last_nbuf) { 1887 DP_STATS_INC(soc, rx.err.msdu_continuation_err, 1); 1888 qdf_nbuf_pull_head(parent, 1889 soc->rx_pkt_tlv_size + l3_hdr_pad_offset); 1890 return parent; 1891 } 1892 1893 /* 1894 * this is where we set the length of the fragments which are 1895 * associated to the parent nbuf. We iterate through the frag_list 1896 * till we hit the last_nbuf of the list. 1897 */ 1898 do { 1899 last_nbuf = dp_rx_adjust_nbuf_len(soc, nbuf, &mpdu_len, 0); 1900 qdf_nbuf_pull_head(nbuf, 1901 soc->rx_pkt_tlv_size); 1902 frag_list_len += qdf_nbuf_len(nbuf); 1903 1904 if (last_nbuf) { 1905 next = nbuf->next; 1906 nbuf->next = NULL; 1907 break; 1908 } else if (qdf_nbuf_is_rx_chfrag_end(nbuf)) { 1909 dp_err("Invalid packet length"); 1910 qdf_assert_always(0); 1911 } 1912 nbuf = nbuf->next; 1913 } while (!last_nbuf); 1914 1915 qdf_nbuf_set_rx_chfrag_start(nbuf, 0); 1916 qdf_nbuf_append_ext_list(parent, frag_list, frag_list_len); 1917 parent->next = next; 1918 1919 qdf_nbuf_pull_head(parent, 1920 soc->rx_pkt_tlv_size + l3_hdr_pad_offset); 1921 return parent; 1922 } 1923 1924 #ifdef DP_RX_SG_FRAME_SUPPORT 1925 bool dp_rx_is_sg_supported(void) 1926 { 1927 return true; 1928 } 1929 #else 1930 bool dp_rx_is_sg_supported(void) 1931 { 1932 return false; 1933 } 1934 #endif 1935 1936 #endif /* QCA_HOST_MODE_WIFI_DISABLED */ 1937 1938 #ifdef QCA_PEER_EXT_STATS 1939 void dp_rx_compute_tid_delay(struct cdp_delay_tid_stats *stats, 1940 qdf_nbuf_t nbuf) 1941 { 1942 struct cdp_delay_rx_stats *rx_delay = &stats->rx_delay; 1943 uint32_t to_stack = qdf_nbuf_get_timedelta_ms(nbuf); 1944 1945 dp_hist_update_stats(&rx_delay->to_stack_delay, to_stack); 1946 } 1947 #endif /* QCA_PEER_EXT_STATS */ 1948 1949 void dp_rx_compute_delay(struct dp_vdev *vdev, qdf_nbuf_t nbuf) 1950 { 1951 uint8_t ring_id = QDF_NBUF_CB_RX_CTX_ID(nbuf); 1952 int64_t current_ts = qdf_ktime_to_ms(qdf_ktime_get()); 1953 uint32_t to_stack = qdf_nbuf_get_timedelta_ms(nbuf); 1954 uint8_t tid = qdf_nbuf_get_tid_val(nbuf); 1955 uint32_t interframe_delay = 1956 (uint32_t)(current_ts - vdev->prev_rx_deliver_tstamp); 1957 struct cdp_tid_rx_stats *rstats = 1958 &vdev->pdev->stats.tid_stats.tid_rx_stats[ring_id][tid]; 1959 1960 dp_update_delay_stats(NULL, rstats, to_stack, tid, 1961 CDP_DELAY_STATS_REAP_STACK, ring_id, false); 1962 /* 1963 * Update interframe delay stats calculated at deliver_data_ol point. 1964 * Value of vdev->prev_rx_deliver_tstamp will be 0 for 1st frame, so 1965 * interframe delay will not be calculate correctly for 1st frame. 1966 * On the other side, this will help in avoiding extra per packet check 1967 * of vdev->prev_rx_deliver_tstamp. 1968 */ 1969 dp_update_delay_stats(NULL, rstats, interframe_delay, tid, 1970 CDP_DELAY_STATS_RX_INTERFRAME, ring_id, false); 1971 vdev->prev_rx_deliver_tstamp = current_ts; 1972 } 1973 1974 /** 1975 * dp_rx_drop_nbuf_list() - drop an nbuf list 1976 * @pdev: dp pdev reference 1977 * @buf_list: buffer list to be dropepd 1978 * 1979 * Return: int (number of bufs dropped) 1980 */ 1981 static inline int dp_rx_drop_nbuf_list(struct dp_pdev *pdev, 1982 qdf_nbuf_t buf_list) 1983 { 1984 struct cdp_tid_rx_stats *stats = NULL; 1985 uint8_t tid = 0, ring_id = 0; 1986 int num_dropped = 0; 1987 qdf_nbuf_t buf, next_buf; 1988 1989 buf = buf_list; 1990 while (buf) { 1991 ring_id = QDF_NBUF_CB_RX_CTX_ID(buf); 1992 next_buf = qdf_nbuf_queue_next(buf); 1993 tid = qdf_nbuf_get_tid_val(buf); 1994 if (qdf_likely(pdev)) { 1995 stats = &pdev->stats.tid_stats.tid_rx_stats[ring_id][tid]; 1996 stats->fail_cnt[INVALID_PEER_VDEV]++; 1997 stats->delivered_to_stack--; 1998 } 1999 dp_rx_nbuf_free(buf); 2000 buf = next_buf; 2001 num_dropped++; 2002 } 2003 2004 return num_dropped; 2005 } 2006 2007 #ifdef QCA_SUPPORT_WDS_EXTENDED 2008 /** 2009 * dp_rx_deliver_to_stack_ext() - Deliver to netdev per sta 2010 * @soc: core txrx main context 2011 * @vdev: vdev 2012 * @txrx_peer: txrx peer 2013 * @nbuf_head: skb list head 2014 * 2015 * Return: true if packet is delivered to netdev per STA. 2016 */ 2017 static inline bool 2018 dp_rx_deliver_to_stack_ext(struct dp_soc *soc, struct dp_vdev *vdev, 2019 struct dp_txrx_peer *txrx_peer, qdf_nbuf_t nbuf_head) 2020 { 2021 /* 2022 * When extended WDS is disabled, frames are sent to AP netdevice. 2023 */ 2024 if (qdf_likely(!vdev->wds_ext_enabled)) 2025 return false; 2026 2027 /* 2028 * There can be 2 cases: 2029 * 1. Send frame to parent netdev if its not for netdev per STA 2030 * 2. If frame is meant for netdev per STA: 2031 * a. Send frame to appropriate netdev using registered fp. 2032 * b. If fp is NULL, drop the frames. 2033 */ 2034 if (!txrx_peer->wds_ext.init) 2035 return false; 2036 2037 if (txrx_peer->osif_rx) 2038 txrx_peer->osif_rx(txrx_peer->wds_ext.osif_peer, nbuf_head); 2039 else 2040 dp_rx_drop_nbuf_list(vdev->pdev, nbuf_head); 2041 2042 return true; 2043 } 2044 2045 #else 2046 static inline bool 2047 dp_rx_deliver_to_stack_ext(struct dp_soc *soc, struct dp_vdev *vdev, 2048 struct dp_txrx_peer *txrx_peer, qdf_nbuf_t nbuf_head) 2049 { 2050 return false; 2051 } 2052 #endif 2053 2054 #ifdef PEER_CACHE_RX_PKTS 2055 #if defined(WLAN_FEATURE_11BE_MLO) && defined(DP_MLO_LINK_STATS_SUPPORT) 2056 /** 2057 * dp_set_nbuf_band() - Set band in nbuf cb 2058 * @peer: dp_peer 2059 * @nbuf: nbuf 2060 * 2061 * Return: None 2062 */ 2063 static inline void 2064 dp_set_nbuf_band(struct dp_peer *peer, qdf_nbuf_t nbuf) 2065 { 2066 uint8_t link_id = 0; 2067 2068 link_id = dp_rx_get_stats_arr_idx_from_link_id(nbuf, peer->txrx_peer); 2069 dp_rx_set_nbuf_band(nbuf, peer->txrx_peer, link_id); 2070 } 2071 #else 2072 static inline void 2073 dp_set_nbuf_band(struct dp_peer *peer, qdf_nbuf_t nbuf) 2074 { 2075 } 2076 #endif 2077 2078 void dp_rx_flush_rx_cached(struct dp_peer *peer, bool drop) 2079 { 2080 struct dp_peer_cached_bufq *bufqi; 2081 struct dp_rx_cached_buf *cache_buf = NULL; 2082 ol_txrx_rx_fp data_rx = NULL; 2083 int num_buff_elem; 2084 QDF_STATUS status; 2085 2086 /* 2087 * Flush dp cached frames only for mld peers and legacy peers, as 2088 * link peers don't store cached frames 2089 */ 2090 if (IS_MLO_DP_LINK_PEER(peer)) 2091 return; 2092 2093 if (!peer->txrx_peer) { 2094 dp_err("txrx_peer NULL!! peer mac_addr("QDF_MAC_ADDR_FMT")", 2095 QDF_MAC_ADDR_REF(peer->mac_addr.raw)); 2096 return; 2097 } 2098 2099 if (qdf_atomic_inc_return(&peer->txrx_peer->flush_in_progress) > 1) { 2100 qdf_atomic_dec(&peer->txrx_peer->flush_in_progress); 2101 return; 2102 } 2103 2104 qdf_spin_lock_bh(&peer->peer_info_lock); 2105 if (peer->state >= OL_TXRX_PEER_STATE_CONN && peer->vdev->osif_rx) 2106 data_rx = peer->vdev->osif_rx; 2107 else 2108 drop = true; 2109 qdf_spin_unlock_bh(&peer->peer_info_lock); 2110 2111 bufqi = &peer->txrx_peer->bufq_info; 2112 2113 qdf_spin_lock_bh(&bufqi->bufq_lock); 2114 qdf_list_remove_front(&bufqi->cached_bufq, 2115 (qdf_list_node_t **)&cache_buf); 2116 while (cache_buf) { 2117 num_buff_elem = QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST( 2118 cache_buf->buf); 2119 bufqi->entries -= num_buff_elem; 2120 qdf_spin_unlock_bh(&bufqi->bufq_lock); 2121 if (drop) { 2122 bufqi->dropped = dp_rx_drop_nbuf_list(peer->vdev->pdev, 2123 cache_buf->buf); 2124 } else { 2125 dp_set_nbuf_band(peer, cache_buf->buf); 2126 /* Flush the cached frames to OSIF DEV */ 2127 status = data_rx(peer->vdev->osif_vdev, cache_buf->buf); 2128 if (status != QDF_STATUS_SUCCESS) 2129 bufqi->dropped = dp_rx_drop_nbuf_list( 2130 peer->vdev->pdev, 2131 cache_buf->buf); 2132 } 2133 qdf_mem_free(cache_buf); 2134 cache_buf = NULL; 2135 qdf_spin_lock_bh(&bufqi->bufq_lock); 2136 qdf_list_remove_front(&bufqi->cached_bufq, 2137 (qdf_list_node_t **)&cache_buf); 2138 } 2139 qdf_spin_unlock_bh(&bufqi->bufq_lock); 2140 qdf_atomic_dec(&peer->txrx_peer->flush_in_progress); 2141 } 2142 2143 /** 2144 * dp_rx_enqueue_rx() - cache rx frames 2145 * @peer: peer 2146 * @txrx_peer: DP txrx_peer 2147 * @rx_buf_list: cache buffer list 2148 * 2149 * Return: None 2150 */ 2151 static QDF_STATUS 2152 dp_rx_enqueue_rx(struct dp_peer *peer, 2153 struct dp_txrx_peer *txrx_peer, 2154 qdf_nbuf_t rx_buf_list) 2155 { 2156 struct dp_rx_cached_buf *cache_buf; 2157 struct dp_peer_cached_bufq *bufqi = &txrx_peer->bufq_info; 2158 int num_buff_elem; 2159 QDF_STATUS ret = QDF_STATUS_SUCCESS; 2160 struct dp_soc *soc = txrx_peer->vdev->pdev->soc; 2161 struct dp_peer *ta_peer = NULL; 2162 2163 /* 2164 * If peer id is invalid which likely peer map has not completed, 2165 * then need caller provide dp_peer pointer, else it's ok to use 2166 * txrx_peer->peer_id to get dp_peer. 2167 */ 2168 if (peer) { 2169 if (QDF_STATUS_SUCCESS == 2170 dp_peer_get_ref(soc, peer, DP_MOD_ID_RX)) 2171 ta_peer = peer; 2172 } else { 2173 ta_peer = dp_peer_get_ref_by_id(soc, txrx_peer->peer_id, 2174 DP_MOD_ID_RX); 2175 } 2176 2177 if (!ta_peer) { 2178 bufqi->dropped = dp_rx_drop_nbuf_list(txrx_peer->vdev->pdev, 2179 rx_buf_list); 2180 return QDF_STATUS_E_INVAL; 2181 } 2182 2183 dp_debug_rl("bufq->curr %d bufq->drops %d", bufqi->entries, 2184 bufqi->dropped); 2185 if (!ta_peer->valid) { 2186 bufqi->dropped = dp_rx_drop_nbuf_list(txrx_peer->vdev->pdev, 2187 rx_buf_list); 2188 ret = QDF_STATUS_E_INVAL; 2189 goto fail; 2190 } 2191 2192 qdf_spin_lock_bh(&bufqi->bufq_lock); 2193 if (bufqi->entries >= bufqi->thresh) { 2194 bufqi->dropped = dp_rx_drop_nbuf_list(txrx_peer->vdev->pdev, 2195 rx_buf_list); 2196 qdf_spin_unlock_bh(&bufqi->bufq_lock); 2197 ret = QDF_STATUS_E_RESOURCES; 2198 goto fail; 2199 } 2200 qdf_spin_unlock_bh(&bufqi->bufq_lock); 2201 2202 num_buff_elem = QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(rx_buf_list); 2203 2204 cache_buf = qdf_mem_malloc_atomic(sizeof(*cache_buf)); 2205 if (!cache_buf) { 2206 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 2207 "Failed to allocate buf to cache rx frames"); 2208 bufqi->dropped = dp_rx_drop_nbuf_list(txrx_peer->vdev->pdev, 2209 rx_buf_list); 2210 ret = QDF_STATUS_E_NOMEM; 2211 goto fail; 2212 } 2213 2214 cache_buf->buf = rx_buf_list; 2215 2216 qdf_spin_lock_bh(&bufqi->bufq_lock); 2217 qdf_list_insert_back(&bufqi->cached_bufq, 2218 &cache_buf->node); 2219 bufqi->entries += num_buff_elem; 2220 qdf_spin_unlock_bh(&bufqi->bufq_lock); 2221 2222 fail: 2223 dp_peer_unref_delete(ta_peer, DP_MOD_ID_RX); 2224 return ret; 2225 } 2226 2227 static inline 2228 bool dp_rx_is_peer_cache_bufq_supported(void) 2229 { 2230 return true; 2231 } 2232 #else 2233 static inline 2234 bool dp_rx_is_peer_cache_bufq_supported(void) 2235 { 2236 return false; 2237 } 2238 2239 static inline QDF_STATUS 2240 dp_rx_enqueue_rx(struct dp_peer *peer, 2241 struct dp_txrx_peer *txrx_peer, 2242 qdf_nbuf_t rx_buf_list) 2243 { 2244 return QDF_STATUS_SUCCESS; 2245 } 2246 #endif 2247 2248 #ifndef DELIVERY_TO_STACK_STATUS_CHECK 2249 /** 2250 * dp_rx_check_delivery_to_stack() - Deliver pkts to network 2251 * using the appropriate call back functions. 2252 * @soc: soc 2253 * @vdev: vdev 2254 * @txrx_peer: peer 2255 * @nbuf_head: skb list head 2256 * 2257 * Return: None 2258 */ 2259 static void dp_rx_check_delivery_to_stack(struct dp_soc *soc, 2260 struct dp_vdev *vdev, 2261 struct dp_txrx_peer *txrx_peer, 2262 qdf_nbuf_t nbuf_head) 2263 { 2264 if (qdf_unlikely(dp_rx_deliver_to_stack_ext(soc, vdev, 2265 txrx_peer, nbuf_head))) 2266 return; 2267 2268 /* Function pointer initialized only when FISA is enabled */ 2269 if (vdev->osif_fisa_rx) 2270 /* on failure send it via regular path */ 2271 vdev->osif_fisa_rx(soc, vdev, nbuf_head); 2272 else 2273 vdev->osif_rx(vdev->osif_vdev, nbuf_head); 2274 } 2275 2276 #else 2277 /** 2278 * dp_rx_check_delivery_to_stack() - Deliver pkts to network 2279 * using the appropriate call back functions. 2280 * @soc: soc 2281 * @vdev: vdev 2282 * @txrx_peer: txrx peer 2283 * @nbuf_head: skb list head 2284 * 2285 * Check the return status of the call back function and drop 2286 * the packets if the return status indicates a failure. 2287 * 2288 * Return: None 2289 */ 2290 static void dp_rx_check_delivery_to_stack(struct dp_soc *soc, 2291 struct dp_vdev *vdev, 2292 struct dp_txrx_peer *txrx_peer, 2293 qdf_nbuf_t nbuf_head) 2294 { 2295 int num_nbuf = 0; 2296 QDF_STATUS ret_val = QDF_STATUS_E_FAILURE; 2297 2298 /* Function pointer initialized only when FISA is enabled */ 2299 if (vdev->osif_fisa_rx) 2300 /* on failure send it via regular path */ 2301 ret_val = vdev->osif_fisa_rx(soc, vdev, nbuf_head); 2302 else if (vdev->osif_rx) 2303 ret_val = vdev->osif_rx(vdev->osif_vdev, nbuf_head); 2304 2305 if (!QDF_IS_STATUS_SUCCESS(ret_val)) { 2306 num_nbuf = dp_rx_drop_nbuf_list(vdev->pdev, nbuf_head); 2307 DP_STATS_INC(soc, rx.err.rejected, num_nbuf); 2308 if (txrx_peer) 2309 DP_PEER_STATS_FLAT_DEC(txrx_peer, to_stack.num, 2310 num_nbuf); 2311 } 2312 } 2313 #endif /* ifdef DELIVERY_TO_STACK_STATUS_CHECK */ 2314 2315 /** 2316 * dp_rx_validate_rx_callbacks() - validate rx callbacks 2317 * @soc: DP soc 2318 * @vdev: DP vdev handle 2319 * @txrx_peer: pointer to the txrx peer object 2320 * @nbuf_head: skb list head 2321 * 2322 * Return: QDF_STATUS - QDF_STATUS_SUCCESS 2323 * QDF_STATUS_E_FAILURE 2324 */ 2325 static inline QDF_STATUS 2326 dp_rx_validate_rx_callbacks(struct dp_soc *soc, 2327 struct dp_vdev *vdev, 2328 struct dp_txrx_peer *txrx_peer, 2329 qdf_nbuf_t nbuf_head) 2330 { 2331 int num_nbuf; 2332 2333 if (qdf_unlikely(!vdev || vdev->delete.pending)) { 2334 num_nbuf = dp_rx_drop_nbuf_list(NULL, nbuf_head); 2335 /* 2336 * This is a special case where vdev is invalid, 2337 * so we cannot know the pdev to which this packet 2338 * belonged. Hence we update the soc rx error stats. 2339 */ 2340 DP_STATS_INC(soc, rx.err.invalid_vdev, num_nbuf); 2341 return QDF_STATUS_E_FAILURE; 2342 } 2343 2344 /* 2345 * highly unlikely to have a vdev without a registered rx 2346 * callback function. if so let us free the nbuf_list. 2347 */ 2348 if (qdf_unlikely(!vdev->osif_rx)) { 2349 if (txrx_peer && dp_rx_is_peer_cache_bufq_supported()) { 2350 dp_rx_enqueue_rx(NULL, txrx_peer, nbuf_head); 2351 } else { 2352 num_nbuf = dp_rx_drop_nbuf_list(vdev->pdev, 2353 nbuf_head); 2354 DP_PEER_TO_STACK_DECC(txrx_peer, num_nbuf, 2355 vdev->pdev->enhanced_stats_en); 2356 } 2357 return QDF_STATUS_E_FAILURE; 2358 } 2359 2360 return QDF_STATUS_SUCCESS; 2361 } 2362 2363 #if defined(WLAN_FEATURE_11BE_MLO) && defined(RAW_PKT_MLD_ADDR_CONVERSION) 2364 static void dp_rx_raw_pkt_mld_addr_conv(struct dp_soc *soc, 2365 struct dp_vdev *vdev, 2366 struct dp_txrx_peer *txrx_peer, 2367 qdf_nbuf_t nbuf_head) 2368 { 2369 qdf_nbuf_t nbuf, next; 2370 struct dp_peer *peer = NULL; 2371 struct ieee80211_frame *wh = NULL; 2372 2373 if (vdev->rx_decap_type == htt_cmn_pkt_type_native_wifi) 2374 return; 2375 2376 peer = dp_peer_get_ref_by_id(soc, txrx_peer->peer_id, 2377 DP_MOD_ID_RX); 2378 2379 if (!peer) 2380 return; 2381 2382 if (!IS_MLO_DP_MLD_PEER(peer)) { 2383 dp_peer_unref_delete(peer, DP_MOD_ID_RX); 2384 return; 2385 } 2386 2387 nbuf = nbuf_head; 2388 while (nbuf) { 2389 next = nbuf->next; 2390 wh = (struct ieee80211_frame *)qdf_nbuf_data(nbuf); 2391 qdf_mem_copy(wh->i_addr1, vdev->mld_mac_addr.raw, 2392 QDF_MAC_ADDR_SIZE); 2393 qdf_mem_copy(wh->i_addr2, peer->mac_addr.raw, 2394 QDF_MAC_ADDR_SIZE); 2395 nbuf = next; 2396 } 2397 2398 dp_peer_unref_delete(peer, DP_MOD_ID_RX); 2399 } 2400 #else 2401 static inline 2402 void dp_rx_raw_pkt_mld_addr_conv(struct dp_soc *soc, 2403 struct dp_vdev *vdev, 2404 struct dp_txrx_peer *txrx_peer, 2405 qdf_nbuf_t nbuf_head) 2406 { } 2407 #endif 2408 2409 QDF_STATUS dp_rx_deliver_to_stack(struct dp_soc *soc, 2410 struct dp_vdev *vdev, 2411 struct dp_txrx_peer *txrx_peer, 2412 qdf_nbuf_t nbuf_head, 2413 qdf_nbuf_t nbuf_tail) 2414 { 2415 if (dp_rx_validate_rx_callbacks(soc, vdev, txrx_peer, nbuf_head) != 2416 QDF_STATUS_SUCCESS) 2417 return QDF_STATUS_E_FAILURE; 2418 2419 if (qdf_unlikely(vdev->rx_decap_type == htt_cmn_pkt_type_raw) || 2420 (vdev->rx_decap_type == htt_cmn_pkt_type_native_wifi)) { 2421 dp_rx_raw_pkt_mld_addr_conv(soc, vdev, txrx_peer, nbuf_head); 2422 vdev->osif_rsim_rx_decap(vdev->osif_vdev, &nbuf_head, 2423 &nbuf_tail); 2424 } 2425 2426 dp_rx_check_delivery_to_stack(soc, vdev, txrx_peer, nbuf_head); 2427 2428 return QDF_STATUS_SUCCESS; 2429 } 2430 2431 #ifdef QCA_SUPPORT_EAPOL_OVER_CONTROL_PORT 2432 QDF_STATUS dp_rx_eapol_deliver_to_stack(struct dp_soc *soc, 2433 struct dp_vdev *vdev, 2434 struct dp_txrx_peer *txrx_peer, 2435 qdf_nbuf_t nbuf_head, 2436 qdf_nbuf_t nbuf_tail) 2437 { 2438 if (dp_rx_validate_rx_callbacks(soc, vdev, txrx_peer, nbuf_head) != 2439 QDF_STATUS_SUCCESS) 2440 return QDF_STATUS_E_FAILURE; 2441 2442 vdev->osif_rx_eapol(vdev->osif_vdev, nbuf_head); 2443 2444 return QDF_STATUS_SUCCESS; 2445 } 2446 #endif 2447 2448 #ifndef QCA_HOST_MODE_WIFI_DISABLED 2449 #ifdef VDEV_PEER_PROTOCOL_COUNT 2450 #define dp_rx_msdu_stats_update_prot_cnts(vdev_hdl, nbuf, txrx_peer) \ 2451 { \ 2452 qdf_nbuf_t nbuf_local; \ 2453 struct dp_txrx_peer *txrx_peer_local; \ 2454 struct dp_vdev *vdev_local = vdev_hdl; \ 2455 do { \ 2456 if (qdf_likely(!((vdev_local)->peer_protocol_count_track))) \ 2457 break; \ 2458 nbuf_local = nbuf; \ 2459 txrx_peer_local = txrx_peer; \ 2460 if (qdf_unlikely(qdf_nbuf_is_frag((nbuf_local)))) \ 2461 break; \ 2462 else if (qdf_unlikely(qdf_nbuf_is_raw_frame((nbuf_local)))) \ 2463 break; \ 2464 dp_vdev_peer_stats_update_protocol_cnt((vdev_local), \ 2465 (nbuf_local), \ 2466 (txrx_peer_local), 0, 1); \ 2467 } while (0); \ 2468 } 2469 #else 2470 #define dp_rx_msdu_stats_update_prot_cnts(vdev_hdl, nbuf, txrx_peer) 2471 #endif 2472 2473 #ifdef FEATURE_RX_LINKSPEED_ROAM_TRIGGER 2474 /** 2475 * dp_rx_rates_stats_update() - update rate stats 2476 * from rx msdu. 2477 * @soc: datapath soc handle 2478 * @nbuf: received msdu buffer 2479 * @rx_tlv_hdr: rx tlv header 2480 * @txrx_peer: datapath txrx_peer handle 2481 * @sgi: Short Guard Interval 2482 * @mcs: Modulation and Coding Set 2483 * @nss: Number of Spatial Streams 2484 * @bw: BandWidth 2485 * @pkt_type: Corresponds to preamble 2486 * @link_id: Link Id on which packet is received 2487 * 2488 * To be precisely record rates, following factors are considered: 2489 * Exclude specific frames, ARP, DHCP, ssdp, etc. 2490 * Make sure to affect rx throughput as least as possible. 2491 * 2492 * Return: void 2493 */ 2494 static void 2495 dp_rx_rates_stats_update(struct dp_soc *soc, qdf_nbuf_t nbuf, 2496 uint8_t *rx_tlv_hdr, struct dp_txrx_peer *txrx_peer, 2497 uint32_t sgi, uint32_t mcs, 2498 uint32_t nss, uint32_t bw, uint32_t pkt_type, 2499 uint8_t link_id) 2500 { 2501 uint32_t rix; 2502 uint16_t ratecode; 2503 uint32_t avg_rx_rate; 2504 uint32_t ratekbps; 2505 enum cdp_punctured_modes punc_mode = NO_PUNCTURE; 2506 2507 if (soc->high_throughput || 2508 dp_rx_data_is_specific(soc->hal_soc, rx_tlv_hdr, nbuf)) { 2509 return; 2510 } 2511 2512 DP_PEER_EXTD_STATS_UPD(txrx_peer, rx.rx_rate, mcs, link_id); 2513 2514 /* In 11b mode, the nss we get from tlv is 0, invalid and should be 1 */ 2515 if (qdf_unlikely(pkt_type == DOT11_B)) 2516 nss = 1; 2517 2518 /* here pkt_type corresponds to preamble */ 2519 ratekbps = dp_getrateindex(sgi, 2520 mcs, 2521 nss - 1, 2522 pkt_type, 2523 bw, 2524 punc_mode, 2525 &rix, 2526 &ratecode); 2527 DP_PEER_EXTD_STATS_UPD(txrx_peer, rx.last_rx_rate, ratekbps, link_id); 2528 avg_rx_rate = 2529 dp_ath_rate_lpf( 2530 txrx_peer->stats[link_id].extd_stats.rx.avg_rx_rate, 2531 ratekbps); 2532 DP_PEER_EXTD_STATS_UPD(txrx_peer, rx.avg_rx_rate, avg_rx_rate, link_id); 2533 DP_PEER_EXTD_STATS_UPD(txrx_peer, rx.nss_info, nss, link_id); 2534 DP_PEER_EXTD_STATS_UPD(txrx_peer, rx.mcs_info, mcs, link_id); 2535 DP_PEER_EXTD_STATS_UPD(txrx_peer, rx.bw_info, bw, link_id); 2536 DP_PEER_EXTD_STATS_UPD(txrx_peer, rx.gi_info, sgi, link_id); 2537 DP_PEER_EXTD_STATS_UPD(txrx_peer, rx.preamble_info, pkt_type, link_id); 2538 } 2539 #else 2540 static inline void 2541 dp_rx_rates_stats_update(struct dp_soc *soc, qdf_nbuf_t nbuf, 2542 uint8_t *rx_tlv_hdr, struct dp_txrx_peer *txrx_peer, 2543 uint32_t sgi, uint32_t mcs, 2544 uint32_t nss, uint32_t bw, uint32_t pkt_type, 2545 uint8_t link_id) 2546 { 2547 } 2548 #endif /* FEATURE_RX_LINKSPEED_ROAM_TRIGGER */ 2549 2550 #ifndef QCA_ENHANCED_STATS_SUPPORT 2551 /** 2552 * dp_rx_msdu_extd_stats_update(): Update Rx extended path stats for peer 2553 * 2554 * @soc: datapath soc handle 2555 * @nbuf: received msdu buffer 2556 * @rx_tlv_hdr: rx tlv header 2557 * @txrx_peer: datapath txrx_peer handle 2558 * @link_id: link id on which the packet is received 2559 * 2560 * Return: void 2561 */ 2562 static inline 2563 void dp_rx_msdu_extd_stats_update(struct dp_soc *soc, qdf_nbuf_t nbuf, 2564 uint8_t *rx_tlv_hdr, 2565 struct dp_txrx_peer *txrx_peer, 2566 uint8_t link_id) 2567 { 2568 bool is_ampdu; 2569 uint32_t sgi, mcs, tid, nss, bw, reception_type, pkt_type; 2570 uint8_t dst_mcs_idx; 2571 2572 /* 2573 * TODO - For KIWI this field is present in ring_desc 2574 * Try to use ring desc instead of tlv. 2575 */ 2576 is_ampdu = hal_rx_mpdu_info_ampdu_flag_get(soc->hal_soc, rx_tlv_hdr); 2577 DP_PEER_EXTD_STATS_INCC(txrx_peer, rx.ampdu_cnt, 1, is_ampdu, link_id); 2578 DP_PEER_EXTD_STATS_INCC(txrx_peer, rx.non_ampdu_cnt, 1, !(is_ampdu), 2579 link_id); 2580 2581 sgi = hal_rx_tlv_sgi_get(soc->hal_soc, rx_tlv_hdr); 2582 mcs = hal_rx_tlv_rate_mcs_get(soc->hal_soc, rx_tlv_hdr); 2583 tid = qdf_nbuf_get_tid_val(nbuf); 2584 bw = hal_rx_tlv_bw_get(soc->hal_soc, rx_tlv_hdr); 2585 reception_type = hal_rx_msdu_start_reception_type_get(soc->hal_soc, 2586 rx_tlv_hdr); 2587 nss = hal_rx_msdu_start_nss_get(soc->hal_soc, rx_tlv_hdr); 2588 pkt_type = hal_rx_tlv_get_pkt_type(soc->hal_soc, rx_tlv_hdr); 2589 /* do HW to SW pkt type conversion */ 2590 pkt_type = (pkt_type >= HAL_DOT11_MAX ? DOT11_MAX : 2591 hal_2_dp_pkt_type_map[pkt_type]); 2592 2593 /* 2594 * The MCS index does not start with 0 when NSS>1 in HT mode. 2595 * MCS params for optional 20/40MHz, NSS=1~3, EQM(NSS>1): 2596 * ------------------------------------------------------ 2597 * NSS | 1 | 2 | 3 | 4 2598 * ------------------------------------------------------ 2599 * MCS index: HT20 | 0 ~ 7 | 8 ~ 15 | 16 ~ 23 | 24 ~ 31 2600 * ------------------------------------------------------ 2601 * MCS index: HT40 | 0 ~ 7 | 8 ~ 15 | 16 ~ 23 | 24 ~ 31 2602 * ------------------------------------------------------ 2603 * Currently, the MAX_NSS=2. If NSS>2, MCS index = 8 * (NSS-1) 2604 */ 2605 if ((pkt_type == DOT11_N) && (nss == 2)) 2606 mcs += 8; 2607 2608 DP_PEER_EXTD_STATS_INCC(txrx_peer, rx.rx_mpdu_cnt[mcs], 1, 2609 ((mcs < MAX_MCS) && QDF_NBUF_CB_RX_CHFRAG_START(nbuf)), 2610 link_id); 2611 DP_PEER_EXTD_STATS_INCC(txrx_peer, rx.rx_mpdu_cnt[MAX_MCS - 1], 1, 2612 ((mcs >= MAX_MCS) && QDF_NBUF_CB_RX_CHFRAG_START(nbuf)), 2613 link_id); 2614 DP_PEER_EXTD_STATS_INC(txrx_peer, rx.bw[bw], 1, link_id); 2615 /* 2616 * only if nss > 0 and pkt_type is 11N/AC/AX, 2617 * then increase index [nss - 1] in array counter. 2618 */ 2619 if (nss > 0 && CDP_IS_PKT_TYPE_SUPPORT_NSS(pkt_type)) 2620 DP_PEER_EXTD_STATS_INC(txrx_peer, rx.nss[nss - 1], 1, link_id); 2621 2622 DP_PEER_EXTD_STATS_INC(txrx_peer, rx.sgi_count[sgi], 1, link_id); 2623 DP_PEER_PER_PKT_STATS_INCC(txrx_peer, rx.err.mic_err, 1, 2624 hal_rx_tlv_mic_err_get(soc->hal_soc, 2625 rx_tlv_hdr), link_id); 2626 DP_PEER_PER_PKT_STATS_INCC(txrx_peer, rx.err.decrypt_err, 1, 2627 hal_rx_tlv_decrypt_err_get(soc->hal_soc, 2628 rx_tlv_hdr), link_id); 2629 2630 DP_PEER_EXTD_STATS_INC(txrx_peer, rx.wme_ac_type[TID_TO_WME_AC(tid)], 1, 2631 link_id); 2632 DP_PEER_EXTD_STATS_INC(txrx_peer, rx.reception_type[reception_type], 1, 2633 link_id); 2634 2635 dst_mcs_idx = dp_get_mcs_array_index_by_pkt_type_mcs(pkt_type, mcs); 2636 if (MCS_INVALID_ARRAY_INDEX != dst_mcs_idx) 2637 DP_PEER_EXTD_STATS_INC(txrx_peer, 2638 rx.pkt_type[pkt_type].mcs_count[dst_mcs_idx], 2639 1, link_id); 2640 2641 dp_rx_rates_stats_update(soc, nbuf, rx_tlv_hdr, txrx_peer, 2642 sgi, mcs, nss, bw, pkt_type, link_id); 2643 } 2644 #else 2645 static inline 2646 void dp_rx_msdu_extd_stats_update(struct dp_soc *soc, qdf_nbuf_t nbuf, 2647 uint8_t *rx_tlv_hdr, 2648 struct dp_txrx_peer *txrx_peer, 2649 uint8_t link_id) 2650 { 2651 } 2652 #endif 2653 2654 #if defined(DP_PKT_STATS_PER_LMAC) && defined(WLAN_FEATURE_11BE_MLO) 2655 static inline void 2656 dp_peer_update_rx_pkt_per_lmac(struct dp_txrx_peer *txrx_peer, 2657 qdf_nbuf_t nbuf, uint8_t link_id) 2658 { 2659 uint8_t lmac_id = qdf_nbuf_get_lmac_id(nbuf); 2660 2661 if (qdf_unlikely(lmac_id >= CDP_MAX_LMACS)) { 2662 dp_err_rl("Invalid lmac_id: %u vdev_id: %u", 2663 lmac_id, QDF_NBUF_CB_RX_VDEV_ID(nbuf)); 2664 2665 if (qdf_likely(txrx_peer)) 2666 dp_err_rl("peer_id: %u", txrx_peer->peer_id); 2667 2668 return; 2669 } 2670 2671 /* only count stats per lmac for MLO connection*/ 2672 DP_PEER_PER_PKT_STATS_INCC_PKT(txrx_peer, rx.rx_lmac[lmac_id], 1, 2673 QDF_NBUF_CB_RX_PKT_LEN(nbuf), 2674 txrx_peer->is_mld_peer, link_id); 2675 } 2676 #else 2677 static inline void 2678 dp_peer_update_rx_pkt_per_lmac(struct dp_txrx_peer *txrx_peer, 2679 qdf_nbuf_t nbuf, uint8_t link_id) 2680 { 2681 } 2682 #endif 2683 2684 void dp_rx_msdu_stats_update(struct dp_soc *soc, qdf_nbuf_t nbuf, 2685 uint8_t *rx_tlv_hdr, 2686 struct dp_txrx_peer *txrx_peer, 2687 uint8_t ring_id, 2688 struct cdp_tid_rx_stats *tid_stats, 2689 uint8_t link_id) 2690 { 2691 bool is_not_amsdu; 2692 struct dp_vdev *vdev = txrx_peer->vdev; 2693 uint8_t enh_flag; 2694 qdf_ether_header_t *eh; 2695 uint16_t msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf); 2696 2697 dp_rx_msdu_stats_update_prot_cnts(vdev, nbuf, txrx_peer); 2698 is_not_amsdu = qdf_nbuf_is_rx_chfrag_start(nbuf) & 2699 qdf_nbuf_is_rx_chfrag_end(nbuf); 2700 DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, rx.rcvd_reo[ring_id], 1, 2701 msdu_len, link_id); 2702 DP_PEER_PER_PKT_STATS_INCC(txrx_peer, rx.non_amsdu_cnt, 1, 2703 is_not_amsdu, link_id); 2704 DP_PEER_PER_PKT_STATS_INCC(txrx_peer, rx.amsdu_cnt, 1, 2705 !is_not_amsdu, link_id); 2706 DP_PEER_PER_PKT_STATS_INCC(txrx_peer, rx.rx_retries, 1, 2707 qdf_nbuf_is_rx_retry_flag(nbuf), link_id); 2708 dp_peer_update_rx_pkt_per_lmac(txrx_peer, nbuf, link_id); 2709 tid_stats->msdu_cnt++; 2710 enh_flag = vdev->pdev->enhanced_stats_en; 2711 if (qdf_unlikely(qdf_nbuf_is_da_mcbc(nbuf) && 2712 (vdev->rx_decap_type == htt_cmn_pkt_type_ethernet))) { 2713 eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf); 2714 DP_PEER_MC_INCC_PKT(txrx_peer, 1, msdu_len, enh_flag, link_id); 2715 tid_stats->mcast_msdu_cnt++; 2716 if (QDF_IS_ADDR_BROADCAST(eh->ether_dhost)) { 2717 DP_PEER_BC_INCC_PKT(txrx_peer, 1, msdu_len, 2718 enh_flag, link_id); 2719 tid_stats->bcast_msdu_cnt++; 2720 } 2721 } else { 2722 DP_PEER_UC_INCC_PKT(txrx_peer, 1, msdu_len, 2723 enh_flag, link_id); 2724 } 2725 2726 txrx_peer->stats[link_id].per_pkt_stats.rx.last_rx_ts = 2727 qdf_system_ticks(); 2728 2729 dp_rx_msdu_extd_stats_update(soc, nbuf, rx_tlv_hdr, 2730 txrx_peer, link_id); 2731 } 2732 2733 #ifndef WDS_VENDOR_EXTENSION 2734 int dp_wds_rx_policy_check(uint8_t *rx_tlv_hdr, 2735 struct dp_vdev *vdev, 2736 struct dp_txrx_peer *txrx_peer) 2737 { 2738 return 1; 2739 } 2740 #endif 2741 2742 #ifdef DP_RX_PKT_NO_PEER_DELIVER 2743 #ifdef DP_RX_UDP_OVER_PEER_ROAM 2744 /** 2745 * dp_rx_is_udp_allowed_over_roam_peer() - check if udp data received 2746 * during roaming 2747 * @vdev: dp_vdev pointer 2748 * @rx_tlv_hdr: rx tlv header 2749 * @nbuf: pkt skb pointer 2750 * 2751 * This function will check if rx udp data is received from authorised 2752 * roamed peer before peer map indication is received from FW after 2753 * roaming. This is needed for VoIP scenarios in which packet loss 2754 * expected during roaming is minimal. 2755 * 2756 * Return: bool 2757 */ 2758 static bool dp_rx_is_udp_allowed_over_roam_peer(struct dp_vdev *vdev, 2759 uint8_t *rx_tlv_hdr, 2760 qdf_nbuf_t nbuf) 2761 { 2762 char *hdr_desc; 2763 struct ieee80211_frame *wh = NULL; 2764 2765 hdr_desc = hal_rx_desc_get_80211_hdr(vdev->pdev->soc->hal_soc, 2766 rx_tlv_hdr); 2767 wh = (struct ieee80211_frame *)hdr_desc; 2768 2769 if (vdev->roaming_peer_status == 2770 WLAN_ROAM_PEER_AUTH_STATUS_AUTHENTICATED && 2771 !qdf_mem_cmp(vdev->roaming_peer_mac.raw, wh->i_addr2, 2772 QDF_MAC_ADDR_SIZE) && (qdf_nbuf_is_ipv4_udp_pkt(nbuf) || 2773 qdf_nbuf_is_ipv6_udp_pkt(nbuf))) 2774 return true; 2775 2776 return false; 2777 } 2778 #else 2779 static bool dp_rx_is_udp_allowed_over_roam_peer(struct dp_vdev *vdev, 2780 uint8_t *rx_tlv_hdr, 2781 qdf_nbuf_t nbuf) 2782 { 2783 return false; 2784 } 2785 #endif 2786 void dp_rx_deliver_to_stack_no_peer(struct dp_soc *soc, qdf_nbuf_t nbuf) 2787 { 2788 uint16_t peer_id; 2789 uint8_t vdev_id; 2790 struct dp_vdev *vdev = NULL; 2791 uint32_t l2_hdr_offset = 0; 2792 uint16_t msdu_len = 0; 2793 uint32_t pkt_len = 0; 2794 uint8_t *rx_tlv_hdr; 2795 uint32_t frame_mask = FRAME_MASK_IPV4_ARP | FRAME_MASK_IPV4_DHCP | 2796 FRAME_MASK_IPV4_EAPOL | FRAME_MASK_IPV6_DHCP; 2797 bool is_special_frame = false; 2798 struct dp_peer *peer = NULL; 2799 2800 peer_id = QDF_NBUF_CB_RX_PEER_ID(nbuf); 2801 if (peer_id > soc->max_peer_id) 2802 goto deliver_fail; 2803 2804 vdev_id = QDF_NBUF_CB_RX_VDEV_ID(nbuf); 2805 vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_RX); 2806 if (!vdev || vdev->delete.pending) 2807 goto deliver_fail; 2808 2809 if (qdf_unlikely(qdf_nbuf_is_frag(nbuf))) 2810 goto deliver_fail; 2811 2812 rx_tlv_hdr = qdf_nbuf_data(nbuf); 2813 l2_hdr_offset = 2814 hal_rx_msdu_end_l3_hdr_padding_get(soc->hal_soc, rx_tlv_hdr); 2815 2816 msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf); 2817 pkt_len = msdu_len + l2_hdr_offset + soc->rx_pkt_tlv_size; 2818 QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(nbuf) = 1; 2819 2820 qdf_nbuf_set_pktlen(nbuf, pkt_len); 2821 qdf_nbuf_pull_head(nbuf, soc->rx_pkt_tlv_size + l2_hdr_offset); 2822 2823 is_special_frame = dp_rx_is_special_frame(nbuf, frame_mask); 2824 if (qdf_likely(vdev->osif_rx)) { 2825 if (is_special_frame || 2826 dp_rx_is_udp_allowed_over_roam_peer(vdev, rx_tlv_hdr, 2827 nbuf)) { 2828 qdf_nbuf_set_exc_frame(nbuf, 1); 2829 if (QDF_STATUS_SUCCESS != 2830 vdev->osif_rx(vdev->osif_vdev, nbuf)) 2831 goto deliver_fail; 2832 2833 DP_STATS_INC(soc, rx.err.pkt_delivered_no_peer, 1); 2834 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_RX); 2835 return; 2836 } 2837 } else if (is_special_frame) { 2838 /* 2839 * If MLO connection, txrx_peer for link peer does not exist, 2840 * try to store these RX packets to txrx_peer's bufq of MLD 2841 * peer until vdev->osif_rx is registered from CP and flush 2842 * them to stack. 2843 */ 2844 peer = dp_peer_get_tgt_peer_by_id(soc, peer_id, 2845 DP_MOD_ID_RX); 2846 if (!peer) 2847 goto deliver_fail; 2848 2849 /* only check for MLO connection */ 2850 if (IS_MLO_DP_MLD_PEER(peer) && peer->txrx_peer && 2851 dp_rx_is_peer_cache_bufq_supported()) { 2852 qdf_nbuf_set_exc_frame(nbuf, 1); 2853 2854 if (QDF_STATUS_SUCCESS == 2855 dp_rx_enqueue_rx(peer, peer->txrx_peer, nbuf)) { 2856 DP_STATS_INC(soc, 2857 rx.err.pkt_delivered_no_peer, 2858 1); 2859 } else { 2860 DP_STATS_INC(soc, 2861 rx.err.rx_invalid_peer.num, 2862 1); 2863 } 2864 2865 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_RX); 2866 dp_peer_unref_delete(peer, DP_MOD_ID_RX); 2867 return; 2868 } 2869 2870 dp_peer_unref_delete(peer, DP_MOD_ID_RX); 2871 } 2872 2873 deliver_fail: 2874 DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1, 2875 QDF_NBUF_CB_RX_PKT_LEN(nbuf)); 2876 dp_rx_nbuf_free(nbuf); 2877 if (vdev) 2878 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_RX); 2879 } 2880 #else 2881 void dp_rx_deliver_to_stack_no_peer(struct dp_soc *soc, qdf_nbuf_t nbuf) 2882 { 2883 DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1, 2884 QDF_NBUF_CB_RX_PKT_LEN(nbuf)); 2885 dp_rx_nbuf_free(nbuf); 2886 } 2887 #endif 2888 2889 #endif /* QCA_HOST_MODE_WIFI_DISABLED */ 2890 2891 #ifdef WLAN_SUPPORT_RX_FISA 2892 QDF_STATUS dp_fisa_config(ol_txrx_soc_handle cdp_soc, uint8_t pdev_id, 2893 enum cdp_fisa_config_id config_id, 2894 union cdp_fisa_config *cfg) 2895 { 2896 struct dp_soc *soc = (struct dp_soc *)cdp_soc; 2897 struct dp_pdev *pdev; 2898 QDF_STATUS status; 2899 2900 pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); 2901 if (!pdev) { 2902 dp_err("pdev is NULL for pdev_id %u", pdev_id); 2903 return QDF_STATUS_E_INVAL; 2904 } 2905 2906 switch (config_id) { 2907 case CDP_FISA_HTT_RX_FISA_CFG: 2908 status = dp_htt_rx_fisa_config(pdev, cfg->fisa_config); 2909 break; 2910 case CDP_FISA_HTT_RX_FSE_OP_CFG: 2911 status = dp_htt_rx_flow_fse_operation(pdev, cfg->fse_op_cmd); 2912 break; 2913 case CDP_FISA_HTT_RX_FSE_SETUP_CFG: 2914 status = dp_htt_rx_flow_fst_setup(pdev, cfg->fse_setup_info); 2915 break; 2916 default: 2917 status = QDF_STATUS_E_INVAL; 2918 } 2919 2920 return status; 2921 } 2922 2923 void dp_rx_skip_tlvs(struct dp_soc *soc, qdf_nbuf_t nbuf, uint32_t l3_padding) 2924 { 2925 QDF_NBUF_CB_RX_PACKET_L3_HDR_PAD(nbuf) = l3_padding; 2926 qdf_nbuf_pull_head(nbuf, l3_padding + soc->rx_pkt_tlv_size); 2927 } 2928 #else 2929 void dp_rx_skip_tlvs(struct dp_soc *soc, qdf_nbuf_t nbuf, uint32_t l3_padding) 2930 { 2931 qdf_nbuf_pull_head(nbuf, l3_padding + soc->rx_pkt_tlv_size); 2932 } 2933 #endif 2934 2935 #ifndef QCA_HOST_MODE_WIFI_DISABLED 2936 2937 #ifdef DP_RX_DROP_RAW_FRM 2938 bool dp_rx_is_raw_frame_dropped(qdf_nbuf_t nbuf) 2939 { 2940 if (qdf_nbuf_is_raw_frame(nbuf)) { 2941 dp_rx_nbuf_free(nbuf); 2942 return true; 2943 } 2944 2945 return false; 2946 } 2947 #endif 2948 2949 #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR 2950 void dp_rx_update_stats(struct dp_soc *soc, qdf_nbuf_t nbuf) 2951 { 2952 DP_STATS_INC_PKT(soc, rx.ingress, 1, 2953 QDF_NBUF_CB_RX_PKT_LEN(nbuf)); 2954 } 2955 #endif 2956 2957 #ifdef WLAN_FEATURE_PKT_CAPTURE_V2 2958 void dp_rx_deliver_to_pkt_capture(struct dp_soc *soc, struct dp_pdev *pdev, 2959 uint16_t peer_id, uint32_t is_offload, 2960 qdf_nbuf_t netbuf) 2961 { 2962 if (wlan_cfg_get_pkt_capture_mode(soc->wlan_cfg_ctx)) 2963 dp_wdi_event_handler(WDI_EVENT_PKT_CAPTURE_RX_DATA, soc, netbuf, 2964 peer_id, is_offload, pdev->pdev_id); 2965 } 2966 2967 void dp_rx_deliver_to_pkt_capture_no_peer(struct dp_soc *soc, qdf_nbuf_t nbuf, 2968 uint32_t is_offload) 2969 { 2970 if (wlan_cfg_get_pkt_capture_mode(soc->wlan_cfg_ctx)) 2971 dp_wdi_event_handler(WDI_EVENT_PKT_CAPTURE_RX_DATA_NO_PEER, 2972 soc, nbuf, HTT_INVALID_VDEV, 2973 is_offload, 0); 2974 } 2975 #endif 2976 2977 #endif /* QCA_HOST_MODE_WIFI_DISABLED */ 2978 2979 QDF_STATUS dp_rx_vdev_detach(struct dp_vdev *vdev) 2980 { 2981 QDF_STATUS ret; 2982 2983 if (vdev->osif_rx_flush) { 2984 ret = vdev->osif_rx_flush(vdev->osif_vdev, vdev->vdev_id); 2985 if (!QDF_IS_STATUS_SUCCESS(ret)) { 2986 dp_err("Failed to flush rx pkts for vdev %d", 2987 vdev->vdev_id); 2988 return ret; 2989 } 2990 } 2991 2992 return QDF_STATUS_SUCCESS; 2993 } 2994 2995 static QDF_STATUS 2996 dp_pdev_nbuf_alloc_and_map(struct dp_soc *dp_soc, 2997 struct dp_rx_nbuf_frag_info *nbuf_frag_info_t, 2998 struct dp_pdev *dp_pdev, 2999 struct rx_desc_pool *rx_desc_pool, 3000 bool dp_buf_page_frag_alloc_enable) 3001 { 3002 QDF_STATUS ret = QDF_STATUS_E_FAILURE; 3003 3004 if (dp_buf_page_frag_alloc_enable) { 3005 (nbuf_frag_info_t->virt_addr).nbuf = 3006 qdf_nbuf_frag_alloc(dp_soc->osdev, 3007 rx_desc_pool->buf_size, 3008 RX_BUFFER_RESERVATION, 3009 rx_desc_pool->buf_alignment, FALSE); 3010 } else { 3011 (nbuf_frag_info_t->virt_addr).nbuf = 3012 qdf_nbuf_alloc(dp_soc->osdev, rx_desc_pool->buf_size, 3013 RX_BUFFER_RESERVATION, 3014 rx_desc_pool->buf_alignment, FALSE); 3015 } 3016 if (!((nbuf_frag_info_t->virt_addr).nbuf)) { 3017 dp_err("nbuf alloc failed"); 3018 DP_STATS_INC(dp_pdev, replenish.nbuf_alloc_fail, 1); 3019 return ret; 3020 } 3021 3022 ret = qdf_nbuf_map_nbytes_single(dp_soc->osdev, 3023 (nbuf_frag_info_t->virt_addr).nbuf, 3024 QDF_DMA_FROM_DEVICE, 3025 rx_desc_pool->buf_size); 3026 3027 if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) { 3028 qdf_nbuf_free((nbuf_frag_info_t->virt_addr).nbuf); 3029 dp_err("nbuf map failed"); 3030 DP_STATS_INC(dp_pdev, replenish.map_err, 1); 3031 return ret; 3032 } 3033 3034 nbuf_frag_info_t->paddr = 3035 qdf_nbuf_get_frag_paddr((nbuf_frag_info_t->virt_addr).nbuf, 0); 3036 3037 ret = dp_check_paddr(dp_soc, &((nbuf_frag_info_t->virt_addr).nbuf), 3038 &nbuf_frag_info_t->paddr, 3039 rx_desc_pool); 3040 if (ret == QDF_STATUS_E_FAILURE) { 3041 dp_err("nbuf check x86 failed"); 3042 DP_STATS_INC(dp_pdev, replenish.x86_fail, 1); 3043 return ret; 3044 } 3045 3046 return QDF_STATUS_SUCCESS; 3047 } 3048 3049 QDF_STATUS 3050 dp_pdev_rx_buffers_attach(struct dp_soc *dp_soc, uint32_t mac_id, 3051 struct dp_srng *dp_rxdma_srng, 3052 struct rx_desc_pool *rx_desc_pool, 3053 uint32_t num_req_buffers) 3054 { 3055 struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(dp_soc, mac_id); 3056 hal_ring_handle_t rxdma_srng = dp_rxdma_srng->hal_srng; 3057 union dp_rx_desc_list_elem_t *next; 3058 void *rxdma_ring_entry; 3059 qdf_dma_addr_t paddr; 3060 struct dp_rx_nbuf_frag_info *nf_info; 3061 uint32_t nr_descs, nr_nbuf = 0, nr_nbuf_total = 0; 3062 uint32_t buffer_index, nbuf_ptrs_per_page; 3063 qdf_nbuf_t nbuf; 3064 QDF_STATUS ret; 3065 int page_idx, total_pages; 3066 union dp_rx_desc_list_elem_t *desc_list = NULL; 3067 union dp_rx_desc_list_elem_t *tail = NULL; 3068 int sync_hw_ptr = 1; 3069 uint32_t num_entries_avail; 3070 bool dp_buf_page_frag_alloc_enable; 3071 3072 if (qdf_unlikely(!dp_pdev)) { 3073 dp_rx_err("%pK: pdev is null for mac_id = %d", 3074 dp_soc, mac_id); 3075 return QDF_STATUS_E_FAILURE; 3076 } 3077 3078 dp_buf_page_frag_alloc_enable = 3079 wlan_cfg_is_dp_buf_page_frag_alloc_enable(dp_soc->wlan_cfg_ctx); 3080 3081 if (qdf_unlikely(!rxdma_srng)) { 3082 DP_STATS_INC(dp_pdev, replenish.rxdma_err, num_req_buffers); 3083 return QDF_STATUS_E_FAILURE; 3084 } 3085 3086 dp_debug("requested %u RX buffers for driver attach", num_req_buffers); 3087 3088 hal_srng_access_start(dp_soc->hal_soc, rxdma_srng); 3089 num_entries_avail = hal_srng_src_num_avail(dp_soc->hal_soc, 3090 rxdma_srng, 3091 sync_hw_ptr); 3092 hal_srng_access_end(dp_soc->hal_soc, rxdma_srng); 3093 3094 if (!num_entries_avail) { 3095 dp_err("Num of available entries is zero, nothing to do"); 3096 return QDF_STATUS_E_NOMEM; 3097 } 3098 3099 if (num_entries_avail < num_req_buffers) 3100 num_req_buffers = num_entries_avail; 3101 3102 nr_descs = dp_rx_get_free_desc_list(dp_soc, mac_id, rx_desc_pool, 3103 num_req_buffers, &desc_list, &tail); 3104 if (!nr_descs) { 3105 dp_err("no free rx_descs in freelist"); 3106 DP_STATS_INC(dp_pdev, err.desc_alloc_fail, num_req_buffers); 3107 return QDF_STATUS_E_NOMEM; 3108 } 3109 3110 dp_debug("got %u RX descs for driver attach", nr_descs); 3111 3112 /* 3113 * Try to allocate pointers to the nbuf one page at a time. 3114 * Take pointers that can fit in one page of memory and 3115 * iterate through the total descriptors that need to be 3116 * allocated in order of pages. Reuse the pointers that 3117 * have been allocated to fit in one page across each 3118 * iteration to index into the nbuf. 3119 */ 3120 total_pages = (nr_descs * sizeof(*nf_info)) / DP_BLOCKMEM_SIZE; 3121 3122 /* 3123 * Add an extra page to store the remainder if any 3124 */ 3125 if ((nr_descs * sizeof(*nf_info)) % DP_BLOCKMEM_SIZE) 3126 total_pages++; 3127 nf_info = qdf_mem_malloc(DP_BLOCKMEM_SIZE); 3128 if (!nf_info) { 3129 dp_err("failed to allocate nbuf array"); 3130 DP_STATS_INC(dp_pdev, replenish.rxdma_err, num_req_buffers); 3131 QDF_BUG(0); 3132 return QDF_STATUS_E_NOMEM; 3133 } 3134 nbuf_ptrs_per_page = DP_BLOCKMEM_SIZE / sizeof(*nf_info); 3135 3136 for (page_idx = 0; page_idx < total_pages; page_idx++) { 3137 qdf_mem_zero(nf_info, DP_BLOCKMEM_SIZE); 3138 3139 for (nr_nbuf = 0; nr_nbuf < nbuf_ptrs_per_page; nr_nbuf++) { 3140 /* 3141 * The last page of buffer pointers may not be required 3142 * completely based on the number of descriptors. Below 3143 * check will ensure we are allocating only the 3144 * required number of descriptors. 3145 */ 3146 if (nr_nbuf_total >= nr_descs) 3147 break; 3148 /* Flag is set while pdev rx_desc_pool initialization */ 3149 if (qdf_unlikely(rx_desc_pool->rx_mon_dest_frag_enable)) 3150 ret = dp_pdev_frag_alloc_and_map(dp_soc, 3151 &nf_info[nr_nbuf], dp_pdev, 3152 rx_desc_pool); 3153 else 3154 ret = dp_pdev_nbuf_alloc_and_map(dp_soc, 3155 &nf_info[nr_nbuf], dp_pdev, 3156 rx_desc_pool, 3157 dp_buf_page_frag_alloc_enable); 3158 if (QDF_IS_STATUS_ERROR(ret)) 3159 break; 3160 3161 nr_nbuf_total++; 3162 } 3163 3164 hal_srng_access_start(dp_soc->hal_soc, rxdma_srng); 3165 3166 for (buffer_index = 0; buffer_index < nr_nbuf; buffer_index++) { 3167 rxdma_ring_entry = 3168 hal_srng_src_get_next(dp_soc->hal_soc, 3169 rxdma_srng); 3170 qdf_assert_always(rxdma_ring_entry); 3171 3172 next = desc_list->next; 3173 paddr = nf_info[buffer_index].paddr; 3174 nbuf = nf_info[buffer_index].virt_addr.nbuf; 3175 3176 /* Flag is set while pdev rx_desc_pool initialization */ 3177 if (qdf_unlikely(rx_desc_pool->rx_mon_dest_frag_enable)) 3178 dp_rx_desc_frag_prep(&desc_list->rx_desc, 3179 &nf_info[buffer_index]); 3180 else 3181 dp_rx_desc_prep(&desc_list->rx_desc, 3182 &nf_info[buffer_index]); 3183 desc_list->rx_desc.in_use = 1; 3184 dp_rx_desc_alloc_dbg_info(&desc_list->rx_desc); 3185 dp_rx_desc_update_dbg_info(&desc_list->rx_desc, 3186 __func__, 3187 RX_DESC_REPLENISHED); 3188 3189 hal_rxdma_buff_addr_info_set(dp_soc->hal_soc ,rxdma_ring_entry, paddr, 3190 desc_list->rx_desc.cookie, 3191 rx_desc_pool->owner); 3192 3193 if (qdf_atomic_read(&dp_soc->ipa_mapped)) 3194 dp_ipa_handle_rx_buf_smmu_mapping( 3195 dp_soc, nbuf, 3196 rx_desc_pool->buf_size, true, 3197 __func__, __LINE__); 3198 3199 dp_audio_smmu_map(dp_soc->osdev, 3200 qdf_mem_paddr_from_dmaaddr(dp_soc->osdev, 3201 QDF_NBUF_CB_PADDR(nbuf)), 3202 QDF_NBUF_CB_PADDR(nbuf), 3203 rx_desc_pool->buf_size); 3204 3205 desc_list = next; 3206 } 3207 3208 dp_rx_refill_ring_record_entry(dp_soc, dp_pdev->lmac_id, 3209 rxdma_srng, nr_nbuf, nr_nbuf); 3210 hal_srng_access_end(dp_soc->hal_soc, rxdma_srng); 3211 } 3212 3213 dp_info("filled %u RX buffers for driver attach", nr_nbuf_total); 3214 qdf_mem_free(nf_info); 3215 3216 if (!nr_nbuf_total) { 3217 dp_err("No nbuf's allocated"); 3218 QDF_BUG(0); 3219 return QDF_STATUS_E_RESOURCES; 3220 } 3221 3222 /* No need to count the number of bytes received during replenish. 3223 * Therefore set replenish.pkts.bytes as 0. 3224 */ 3225 DP_STATS_INC_PKT(dp_pdev, replenish.pkts, nr_nbuf, 0); 3226 3227 return QDF_STATUS_SUCCESS; 3228 } 3229 3230 qdf_export_symbol(dp_pdev_rx_buffers_attach); 3231 3232 #ifdef DP_RX_MON_MEM_FRAG 3233 void dp_rx_enable_mon_dest_frag(struct rx_desc_pool *rx_desc_pool, 3234 bool is_mon_dest_desc) 3235 { 3236 rx_desc_pool->rx_mon_dest_frag_enable = is_mon_dest_desc; 3237 if (is_mon_dest_desc) 3238 dp_alert("Feature DP_RX_MON_MEM_FRAG for mon_dest is enabled"); 3239 } 3240 #else 3241 void dp_rx_enable_mon_dest_frag(struct rx_desc_pool *rx_desc_pool, 3242 bool is_mon_dest_desc) 3243 { 3244 rx_desc_pool->rx_mon_dest_frag_enable = false; 3245 if (is_mon_dest_desc) 3246 dp_alert("Feature DP_RX_MON_MEM_FRAG for mon_dest is disabled"); 3247 } 3248 #endif 3249 3250 qdf_export_symbol(dp_rx_enable_mon_dest_frag); 3251 3252 QDF_STATUS 3253 dp_rx_pdev_desc_pool_alloc(struct dp_pdev *pdev) 3254 { 3255 struct dp_soc *soc = pdev->soc; 3256 uint32_t rxdma_entries; 3257 uint32_t rx_sw_desc_num; 3258 struct dp_srng *dp_rxdma_srng; 3259 struct rx_desc_pool *rx_desc_pool; 3260 uint32_t status = QDF_STATUS_SUCCESS; 3261 int mac_for_pdev; 3262 3263 mac_for_pdev = pdev->lmac_id; 3264 if (wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx)) { 3265 dp_rx_info("%pK: nss-wifi<4> skip Rx refil %d", 3266 soc, mac_for_pdev); 3267 return status; 3268 } 3269 3270 dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_for_pdev]; 3271 rxdma_entries = dp_rxdma_srng->num_entries; 3272 3273 rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev]; 3274 rx_sw_desc_num = wlan_cfg_get_dp_soc_rx_sw_desc_num(soc->wlan_cfg_ctx); 3275 3276 rx_desc_pool->desc_type = QDF_DP_RX_DESC_BUF_TYPE; 3277 status = dp_rx_desc_pool_alloc(soc, 3278 rx_sw_desc_num, 3279 rx_desc_pool); 3280 if (status != QDF_STATUS_SUCCESS) 3281 return status; 3282 3283 return status; 3284 } 3285 3286 void dp_rx_pdev_desc_pool_free(struct dp_pdev *pdev) 3287 { 3288 int mac_for_pdev = pdev->lmac_id; 3289 struct dp_soc *soc = pdev->soc; 3290 struct rx_desc_pool *rx_desc_pool; 3291 3292 rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev]; 3293 3294 dp_rx_desc_pool_free(soc, rx_desc_pool); 3295 } 3296 3297 QDF_STATUS dp_rx_pdev_desc_pool_init(struct dp_pdev *pdev) 3298 { 3299 int mac_for_pdev = pdev->lmac_id; 3300 struct dp_soc *soc = pdev->soc; 3301 uint32_t rxdma_entries; 3302 uint32_t rx_sw_desc_num; 3303 struct dp_srng *dp_rxdma_srng; 3304 struct rx_desc_pool *rx_desc_pool; 3305 uint32_t target_type = hal_get_target_type(soc->hal_soc); 3306 uint16_t buf_size; 3307 3308 buf_size = wlan_cfg_rx_buffer_size(soc->wlan_cfg_ctx); 3309 rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev]; 3310 3311 if (wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx)) { 3312 /* 3313 * If NSS is enabled, rx_desc_pool is already filled. 3314 * Hence, just disable desc_pool frag flag. 3315 */ 3316 dp_rx_enable_mon_dest_frag(rx_desc_pool, false); 3317 3318 dp_rx_info("%pK: nss-wifi<4> skip Rx refil %d", 3319 soc, mac_for_pdev); 3320 return QDF_STATUS_SUCCESS; 3321 } 3322 3323 if (dp_rx_desc_pool_is_allocated(rx_desc_pool) == QDF_STATUS_E_NOMEM) 3324 return QDF_STATUS_E_NOMEM; 3325 3326 dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_for_pdev]; 3327 rxdma_entries = dp_rxdma_srng->num_entries; 3328 3329 soc->process_rx_status = CONFIG_PROCESS_RX_STATUS; 3330 3331 rx_sw_desc_num = 3332 wlan_cfg_get_dp_soc_rx_sw_desc_num(soc->wlan_cfg_ctx); 3333 3334 rx_desc_pool->owner = dp_rx_get_rx_bm_id(soc); 3335 rx_desc_pool->buf_size = buf_size; 3336 rx_desc_pool->buf_alignment = RX_DATA_BUFFER_ALIGNMENT; 3337 /* Disable monitor dest processing via frag */ 3338 if (target_type == TARGET_TYPE_QCN9160) { 3339 rx_desc_pool->buf_size = RX_MONITOR_BUFFER_SIZE; 3340 rx_desc_pool->buf_alignment = RX_MONITOR_BUFFER_ALIGNMENT; 3341 dp_rx_enable_mon_dest_frag(rx_desc_pool, true); 3342 } else { 3343 dp_rx_enable_mon_dest_frag(rx_desc_pool, false); 3344 } 3345 3346 dp_rx_desc_pool_init(soc, mac_for_pdev, 3347 rx_sw_desc_num, rx_desc_pool); 3348 return QDF_STATUS_SUCCESS; 3349 } 3350 3351 void dp_rx_pdev_desc_pool_deinit(struct dp_pdev *pdev) 3352 { 3353 int mac_for_pdev = pdev->lmac_id; 3354 struct dp_soc *soc = pdev->soc; 3355 struct rx_desc_pool *rx_desc_pool; 3356 3357 rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev]; 3358 3359 dp_rx_desc_pool_deinit(soc, rx_desc_pool, mac_for_pdev); 3360 } 3361 3362 QDF_STATUS 3363 dp_rx_pdev_buffers_alloc(struct dp_pdev *pdev) 3364 { 3365 int mac_for_pdev = pdev->lmac_id; 3366 struct dp_soc *soc = pdev->soc; 3367 struct dp_srng *dp_rxdma_srng; 3368 struct rx_desc_pool *rx_desc_pool; 3369 uint32_t rxdma_entries; 3370 uint32_t target_type = hal_get_target_type(soc->hal_soc); 3371 3372 dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_for_pdev]; 3373 rxdma_entries = dp_rxdma_srng->num_entries; 3374 3375 rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev]; 3376 3377 /* Initialize RX buffer pool which will be 3378 * used during low memory conditions 3379 */ 3380 dp_rx_buffer_pool_init(soc, mac_for_pdev); 3381 3382 if (target_type == TARGET_TYPE_QCN9160) 3383 return dp_pdev_rx_buffers_attach(soc, mac_for_pdev, 3384 dp_rxdma_srng, 3385 rx_desc_pool, 3386 rxdma_entries - 1); 3387 else 3388 return dp_pdev_rx_buffers_attach_simple(soc, mac_for_pdev, 3389 dp_rxdma_srng, 3390 rx_desc_pool, 3391 rxdma_entries - 1); 3392 } 3393 3394 void 3395 dp_rx_pdev_buffers_free(struct dp_pdev *pdev) 3396 { 3397 int mac_for_pdev = pdev->lmac_id; 3398 struct dp_soc *soc = pdev->soc; 3399 struct rx_desc_pool *rx_desc_pool; 3400 uint32_t target_type = hal_get_target_type(soc->hal_soc); 3401 3402 rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev]; 3403 3404 if (target_type == TARGET_TYPE_QCN9160) 3405 dp_rx_desc_frag_free(soc, rx_desc_pool); 3406 else 3407 dp_rx_desc_nbuf_free(soc, rx_desc_pool, false); 3408 3409 dp_rx_buffer_pool_deinit(soc, mac_for_pdev); 3410 } 3411 3412 #ifdef DP_RX_SPECIAL_FRAME_NEED 3413 bool dp_rx_deliver_special_frame(struct dp_soc *soc, 3414 struct dp_txrx_peer *txrx_peer, 3415 qdf_nbuf_t nbuf, uint32_t frame_mask, 3416 uint8_t *rx_tlv_hdr) 3417 { 3418 uint32_t l2_hdr_offset = 0; 3419 uint16_t msdu_len = 0; 3420 uint32_t skip_len; 3421 3422 l2_hdr_offset = 3423 hal_rx_msdu_end_l3_hdr_padding_get(soc->hal_soc, rx_tlv_hdr); 3424 3425 if (qdf_unlikely(qdf_nbuf_is_frag(nbuf))) { 3426 skip_len = l2_hdr_offset; 3427 } else { 3428 msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf); 3429 skip_len = l2_hdr_offset + soc->rx_pkt_tlv_size; 3430 qdf_nbuf_set_pktlen(nbuf, msdu_len + skip_len); 3431 } 3432 3433 QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(nbuf) = 1; 3434 dp_rx_set_hdr_pad(nbuf, l2_hdr_offset); 3435 qdf_nbuf_pull_head(nbuf, skip_len); 3436 3437 if (txrx_peer->vdev) { 3438 dp_rx_send_pktlog(soc, txrx_peer->vdev->pdev, nbuf, 3439 QDF_TX_RX_STATUS_OK); 3440 } 3441 3442 if (dp_rx_is_special_frame(nbuf, frame_mask)) { 3443 dp_info("special frame, mpdu sn 0x%x", 3444 hal_rx_get_rx_sequence(soc->hal_soc, rx_tlv_hdr)); 3445 qdf_nbuf_set_exc_frame(nbuf, 1); 3446 dp_rx_deliver_to_stack(soc, txrx_peer->vdev, txrx_peer, 3447 nbuf, NULL); 3448 return true; 3449 } 3450 3451 return false; 3452 } 3453 #endif 3454 3455 #ifdef QCA_MULTIPASS_SUPPORT 3456 bool dp_rx_multipass_process(struct dp_txrx_peer *txrx_peer, qdf_nbuf_t nbuf, 3457 uint8_t tid) 3458 { 3459 struct vlan_ethhdr *vethhdrp; 3460 3461 if (qdf_unlikely(!txrx_peer->vlan_id)) 3462 return true; 3463 3464 vethhdrp = (struct vlan_ethhdr *)qdf_nbuf_data(nbuf); 3465 /* 3466 * h_vlan_proto & h_vlan_TCI should be 0x8100 & zero respectively 3467 * as it is expected to be padded by 0 3468 * return false if frame doesn't have above tag so that caller will 3469 * drop the frame. 3470 */ 3471 if (qdf_unlikely(vethhdrp->h_vlan_proto != htons(QDF_ETH_TYPE_8021Q)) || 3472 qdf_unlikely(vethhdrp->h_vlan_TCI != 0)) 3473 return false; 3474 3475 vethhdrp->h_vlan_TCI = htons(((tid & 0x7) << VLAN_PRIO_SHIFT) | 3476 (txrx_peer->vlan_id & VLAN_VID_MASK)); 3477 3478 if (vethhdrp->h_vlan_encapsulated_proto == htons(ETHERTYPE_PAE)) 3479 dp_tx_remove_vlan_tag(txrx_peer->vdev, nbuf); 3480 3481 return true; 3482 } 3483 #endif /* QCA_MULTIPASS_SUPPORT */ 3484