1 /* 2 * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved. 3 * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved. 4 * 5 * Permission to use, copy, modify, and/or distribute this software for 6 * any purpose with or without fee is hereby granted, provided that the 7 * above copyright notice and this permission notice appear in all 8 * copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 11 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 12 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 13 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 14 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 15 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 16 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 17 * PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20 #include "hal_hw_headers.h" 21 #include "dp_types.h" 22 #include "dp_rx.h" 23 #include "dp_tx.h" 24 #include "dp_peer.h" 25 #include "hal_rx.h" 26 #include "hal_api.h" 27 #include "qdf_nbuf.h" 28 #ifdef MESH_MODE_SUPPORT 29 #include "if_meta_hdr.h" 30 #endif 31 #include "dp_internal.h" 32 #include "dp_ipa.h" 33 #include "dp_hist.h" 34 #include "dp_rx_buffer_pool.h" 35 #ifdef WIFI_MONITOR_SUPPORT 36 #include "dp_htt.h" 37 #include <dp_mon.h> 38 #endif 39 #ifdef FEATURE_WDS 40 #include "dp_txrx_wds.h" 41 #endif 42 #ifdef DP_RATETABLE_SUPPORT 43 #include "dp_ratetable.h" 44 #endif 45 #include "enet.h" 46 47 #ifndef WLAN_SOFTUMAC_SUPPORT /* WLAN_SOFTUMAC_SUPPORT */ 48 49 #ifdef DUP_RX_DESC_WAR 50 void dp_rx_dump_info_and_assert(struct dp_soc *soc, 51 hal_ring_handle_t hal_ring, 52 hal_ring_desc_t ring_desc, 53 struct dp_rx_desc *rx_desc) 54 { 55 void *hal_soc = soc->hal_soc; 56 57 hal_srng_dump_ring_desc(hal_soc, hal_ring, ring_desc); 58 dp_rx_desc_dump(rx_desc); 59 } 60 #else 61 void dp_rx_dump_info_and_assert(struct dp_soc *soc, 62 hal_ring_handle_t hal_ring_hdl, 63 hal_ring_desc_t ring_desc, 64 struct dp_rx_desc *rx_desc) 65 { 66 hal_soc_handle_t hal_soc = soc->hal_soc; 67 68 dp_rx_desc_dump(rx_desc); 69 hal_srng_dump_ring_desc(hal_soc, hal_ring_hdl, ring_desc); 70 hal_srng_dump_ring(hal_soc, hal_ring_hdl); 71 qdf_assert_always(0); 72 } 73 #endif 74 75 #ifndef QCA_HOST_MODE_WIFI_DISABLED 76 #ifdef RX_DESC_SANITY_WAR 77 QDF_STATUS dp_rx_desc_sanity(struct dp_soc *soc, hal_soc_handle_t hal_soc, 78 hal_ring_handle_t hal_ring_hdl, 79 hal_ring_desc_t ring_desc, 80 struct dp_rx_desc *rx_desc) 81 { 82 uint8_t return_buffer_manager; 83 84 if (qdf_unlikely(!rx_desc)) { 85 /* 86 * This is an unlikely case where the cookie obtained 87 * from the ring_desc is invalid and hence we are not 88 * able to find the corresponding rx_desc 89 */ 90 goto fail; 91 } 92 93 return_buffer_manager = hal_rx_ret_buf_manager_get(hal_soc, ring_desc); 94 if (qdf_unlikely(!(return_buffer_manager == 95 HAL_RX_BUF_RBM_SW1_BM(soc->wbm_sw0_bm_id) || 96 return_buffer_manager == 97 HAL_RX_BUF_RBM_SW3_BM(soc->wbm_sw0_bm_id)))) { 98 goto fail; 99 } 100 101 return QDF_STATUS_SUCCESS; 102 103 fail: 104 DP_STATS_INC(soc, rx.err.invalid_cookie, 1); 105 dp_err("Ring Desc:"); 106 hal_srng_dump_ring_desc(hal_soc, hal_ring_hdl, 107 ring_desc); 108 return QDF_STATUS_E_NULL_VALUE; 109 110 } 111 #endif 112 113 uint32_t dp_rx_srng_get_num_pending(hal_soc_handle_t hal_soc, 114 hal_ring_handle_t hal_ring_hdl, 115 uint32_t num_entries, 116 bool *near_full) 117 { 118 uint32_t num_pending = 0; 119 120 num_pending = hal_srng_dst_num_valid_locked(hal_soc, 121 hal_ring_hdl, 122 true); 123 124 if (num_entries && (num_pending >= num_entries >> 1)) 125 *near_full = true; 126 else 127 *near_full = false; 128 129 return num_pending; 130 } 131 132 #ifdef RX_DESC_DEBUG_CHECK 133 QDF_STATUS dp_rx_desc_nbuf_sanity_check(struct dp_soc *soc, 134 hal_ring_desc_t ring_desc, 135 struct dp_rx_desc *rx_desc) 136 { 137 struct hal_buf_info hbi; 138 139 hal_rx_reo_buf_paddr_get(soc->hal_soc, ring_desc, &hbi); 140 /* Sanity check for possible buffer paddr corruption */ 141 if (dp_rx_desc_paddr_sanity_check(rx_desc, (&hbi)->paddr)) 142 return QDF_STATUS_SUCCESS; 143 144 return QDF_STATUS_E_FAILURE; 145 } 146 147 /** 148 * dp_rx_desc_nbuf_len_sanity_check - Add sanity check to catch Rx buffer 149 * out of bound access from H.W 150 * 151 * @soc: DP soc 152 * @pkt_len: Packet length received from H.W 153 * 154 * Return: NONE 155 */ 156 static inline void 157 dp_rx_desc_nbuf_len_sanity_check(struct dp_soc *soc, 158 uint32_t pkt_len) 159 { 160 struct rx_desc_pool *rx_desc_pool; 161 162 rx_desc_pool = &soc->rx_desc_buf[0]; 163 qdf_assert_always(pkt_len <= rx_desc_pool->buf_size); 164 } 165 #else 166 static inline void 167 dp_rx_desc_nbuf_len_sanity_check(struct dp_soc *soc, uint32_t pkt_len) { } 168 #endif 169 170 #ifdef WLAN_FEATURE_DP_RX_RING_HISTORY 171 void 172 dp_rx_ring_record_entry(struct dp_soc *soc, uint8_t ring_num, 173 hal_ring_desc_t ring_desc) 174 { 175 struct dp_buf_info_record *record; 176 struct hal_buf_info hbi; 177 uint32_t idx; 178 179 if (qdf_unlikely(!soc->rx_ring_history[ring_num])) 180 return; 181 182 hal_rx_reo_buf_paddr_get(soc->hal_soc, ring_desc, &hbi); 183 184 /* buffer_addr_info is the first element of ring_desc */ 185 hal_rx_buf_cookie_rbm_get(soc->hal_soc, (uint32_t *)ring_desc, 186 &hbi); 187 188 idx = dp_history_get_next_index(&soc->rx_ring_history[ring_num]->index, 189 DP_RX_HIST_MAX); 190 191 /* No NULL check needed for record since its an array */ 192 record = &soc->rx_ring_history[ring_num]->entry[idx]; 193 194 record->timestamp = qdf_get_log_timestamp(); 195 record->hbi.paddr = hbi.paddr; 196 record->hbi.sw_cookie = hbi.sw_cookie; 197 record->hbi.rbm = hbi.rbm; 198 } 199 #endif 200 201 #ifdef WLAN_FEATURE_MARK_FIRST_WAKEUP_PACKET 202 void dp_rx_mark_first_packet_after_wow_wakeup(struct dp_pdev *pdev, 203 uint8_t *rx_tlv, 204 qdf_nbuf_t nbuf) 205 { 206 struct dp_soc *soc; 207 208 if (!pdev->is_first_wakeup_packet) 209 return; 210 211 soc = pdev->soc; 212 if (hal_get_first_wow_wakeup_packet(soc->hal_soc, rx_tlv)) { 213 qdf_nbuf_mark_wakeup_frame(nbuf); 214 dp_info("First packet after WOW Wakeup rcvd"); 215 } 216 } 217 #endif 218 219 #endif /* QCA_HOST_MODE_WIFI_DISABLED */ 220 #endif /* WLAN_SOFTUMAC_SUPPORT */ 221 222 /** 223 * dp_pdev_frag_alloc_and_map() - Allocate frag for desc buffer and map 224 * 225 * @dp_soc: struct dp_soc * 226 * @nbuf_frag_info_t: nbuf frag info 227 * @dp_pdev: struct dp_pdev * 228 * @rx_desc_pool: Rx desc pool 229 * 230 * Return: QDF_STATUS 231 */ 232 #ifdef DP_RX_MON_MEM_FRAG 233 static inline QDF_STATUS 234 dp_pdev_frag_alloc_and_map(struct dp_soc *dp_soc, 235 struct dp_rx_nbuf_frag_info *nbuf_frag_info_t, 236 struct dp_pdev *dp_pdev, 237 struct rx_desc_pool *rx_desc_pool) 238 { 239 QDF_STATUS ret = QDF_STATUS_E_FAILURE; 240 241 (nbuf_frag_info_t->virt_addr).vaddr = 242 qdf_frag_alloc(NULL, rx_desc_pool->buf_size); 243 244 if (!((nbuf_frag_info_t->virt_addr).vaddr)) { 245 dp_err("Frag alloc failed"); 246 DP_STATS_INC(dp_pdev, replenish.frag_alloc_fail, 1); 247 return QDF_STATUS_E_NOMEM; 248 } 249 250 ret = qdf_mem_map_page(dp_soc->osdev, 251 (nbuf_frag_info_t->virt_addr).vaddr, 252 QDF_DMA_FROM_DEVICE, 253 rx_desc_pool->buf_size, 254 &nbuf_frag_info_t->paddr); 255 256 if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) { 257 qdf_frag_free((nbuf_frag_info_t->virt_addr).vaddr); 258 dp_err("Frag map failed"); 259 DP_STATS_INC(dp_pdev, replenish.map_err, 1); 260 return QDF_STATUS_E_FAULT; 261 } 262 263 return QDF_STATUS_SUCCESS; 264 } 265 #else 266 static inline QDF_STATUS 267 dp_pdev_frag_alloc_and_map(struct dp_soc *dp_soc, 268 struct dp_rx_nbuf_frag_info *nbuf_frag_info_t, 269 struct dp_pdev *dp_pdev, 270 struct rx_desc_pool *rx_desc_pool) 271 { 272 return QDF_STATUS_SUCCESS; 273 } 274 #endif /* DP_RX_MON_MEM_FRAG */ 275 276 #ifdef WLAN_FEATURE_DP_RX_RING_HISTORY 277 /** 278 * dp_rx_refill_ring_record_entry() - Record an entry into refill_ring history 279 * @soc: Datapath soc structure 280 * @ring_num: Refill ring number 281 * @hal_ring_hdl: 282 * @num_req: number of buffers requested for refill 283 * @num_refill: number of buffers refilled 284 * 285 * Return: None 286 */ 287 static inline void 288 dp_rx_refill_ring_record_entry(struct dp_soc *soc, uint8_t ring_num, 289 hal_ring_handle_t hal_ring_hdl, 290 uint32_t num_req, uint32_t num_refill) 291 { 292 struct dp_refill_info_record *record; 293 uint32_t idx; 294 uint32_t tp; 295 uint32_t hp; 296 297 if (qdf_unlikely(ring_num >= MAX_PDEV_CNT || 298 !soc->rx_refill_ring_history[ring_num])) 299 return; 300 301 idx = dp_history_get_next_index(&soc->rx_refill_ring_history[ring_num]->index, 302 DP_RX_REFILL_HIST_MAX); 303 304 /* No NULL check needed for record since its an array */ 305 record = &soc->rx_refill_ring_history[ring_num]->entry[idx]; 306 307 hal_get_sw_hptp(soc->hal_soc, hal_ring_hdl, &tp, &hp); 308 record->timestamp = qdf_get_log_timestamp(); 309 record->num_req = num_req; 310 record->num_refill = num_refill; 311 record->hp = hp; 312 record->tp = tp; 313 } 314 #else 315 static inline void 316 dp_rx_refill_ring_record_entry(struct dp_soc *soc, uint8_t ring_num, 317 hal_ring_handle_t hal_ring_hdl, 318 uint32_t num_req, uint32_t num_refill) 319 { 320 } 321 #endif 322 323 /** 324 * dp_pdev_nbuf_alloc_and_map_replenish() - Allocate nbuf for desc buffer and 325 * map 326 * @dp_soc: struct dp_soc * 327 * @mac_id: Mac id 328 * @num_entries_avail: num_entries_avail 329 * @nbuf_frag_info_t: nbuf frag info 330 * @dp_pdev: struct dp_pdev * 331 * @rx_desc_pool: Rx desc pool 332 * 333 * Return: QDF_STATUS 334 */ 335 static inline QDF_STATUS 336 dp_pdev_nbuf_alloc_and_map_replenish(struct dp_soc *dp_soc, 337 uint32_t mac_id, 338 uint32_t num_entries_avail, 339 struct dp_rx_nbuf_frag_info *nbuf_frag_info_t, 340 struct dp_pdev *dp_pdev, 341 struct rx_desc_pool *rx_desc_pool) 342 { 343 QDF_STATUS ret = QDF_STATUS_E_FAILURE; 344 345 (nbuf_frag_info_t->virt_addr).nbuf = 346 dp_rx_buffer_pool_nbuf_alloc(dp_soc, 347 mac_id, 348 rx_desc_pool, 349 num_entries_avail); 350 if (!((nbuf_frag_info_t->virt_addr).nbuf)) { 351 dp_err("nbuf alloc failed"); 352 DP_STATS_INC(dp_pdev, replenish.nbuf_alloc_fail, 1); 353 return QDF_STATUS_E_NOMEM; 354 } 355 356 ret = dp_rx_buffer_pool_nbuf_map(dp_soc, rx_desc_pool, 357 nbuf_frag_info_t); 358 if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) { 359 dp_rx_buffer_pool_nbuf_free(dp_soc, 360 (nbuf_frag_info_t->virt_addr).nbuf, mac_id); 361 dp_err("nbuf map failed"); 362 DP_STATS_INC(dp_pdev, replenish.map_err, 1); 363 return QDF_STATUS_E_FAULT; 364 } 365 366 nbuf_frag_info_t->paddr = 367 qdf_nbuf_get_frag_paddr((nbuf_frag_info_t->virt_addr).nbuf, 0); 368 if (qdf_atomic_read(&dp_soc->ipa_mapped)) 369 dp_ipa_handle_rx_buf_smmu_mapping(dp_soc, (qdf_nbuf_t)( 370 (nbuf_frag_info_t->virt_addr).nbuf), 371 rx_desc_pool->buf_size, 372 true, __func__, __LINE__); 373 374 ret = dp_check_paddr(dp_soc, &((nbuf_frag_info_t->virt_addr).nbuf), 375 &nbuf_frag_info_t->paddr, 376 rx_desc_pool); 377 if (ret == QDF_STATUS_E_FAILURE) { 378 DP_STATS_INC(dp_pdev, replenish.x86_fail, 1); 379 return QDF_STATUS_E_ADDRNOTAVAIL; 380 } 381 382 return QDF_STATUS_SUCCESS; 383 } 384 385 #if defined(QCA_DP_RX_NBUF_NO_MAP_UNMAP) && !defined(BUILD_X86) 386 QDF_STATUS 387 __dp_rx_buffers_no_map_lt_replenish(struct dp_soc *soc, uint32_t mac_id, 388 struct dp_srng *dp_rxdma_srng, 389 struct rx_desc_pool *rx_desc_pool) 390 { 391 struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id); 392 uint32_t count; 393 void *rxdma_ring_entry; 394 union dp_rx_desc_list_elem_t *next = NULL; 395 void *rxdma_srng; 396 qdf_nbuf_t nbuf; 397 qdf_dma_addr_t paddr; 398 uint16_t num_entries_avail = 0; 399 uint16_t num_alloc_desc = 0; 400 union dp_rx_desc_list_elem_t *desc_list = NULL; 401 union dp_rx_desc_list_elem_t *tail = NULL; 402 int sync_hw_ptr = 0; 403 404 rxdma_srng = dp_rxdma_srng->hal_srng; 405 406 if (qdf_unlikely(!dp_pdev)) { 407 dp_rx_err("%pK: pdev is null for mac_id = %d", soc, mac_id); 408 return QDF_STATUS_E_FAILURE; 409 } 410 411 if (qdf_unlikely(!rxdma_srng)) { 412 dp_rx_debug("%pK: rxdma srng not initialized", soc); 413 return QDF_STATUS_E_FAILURE; 414 } 415 416 hal_srng_access_start(soc->hal_soc, rxdma_srng); 417 418 num_entries_avail = hal_srng_src_num_avail(soc->hal_soc, 419 rxdma_srng, 420 sync_hw_ptr); 421 422 dp_rx_debug("%pK: no of available entries in rxdma ring: %d", 423 soc, num_entries_avail); 424 425 if (qdf_unlikely(num_entries_avail < 426 ((dp_rxdma_srng->num_entries * 3) / 4))) { 427 hal_srng_access_end(soc->hal_soc, rxdma_srng); 428 return QDF_STATUS_E_FAILURE; 429 } 430 431 DP_STATS_INC(dp_pdev, replenish.low_thresh_intrs, 1); 432 num_alloc_desc = dp_rx_get_free_desc_list(soc, mac_id, 433 rx_desc_pool, 434 num_entries_avail, 435 &desc_list, 436 &tail); 437 438 if (!num_alloc_desc) { 439 dp_rx_err("%pK: no free rx_descs in freelist", soc); 440 DP_STATS_INC(dp_pdev, err.desc_lt_alloc_fail, 441 num_entries_avail); 442 hal_srng_access_end(soc->hal_soc, rxdma_srng); 443 return QDF_STATUS_E_NOMEM; 444 } 445 446 for (count = 0; count < num_alloc_desc; count++) { 447 next = desc_list->next; 448 qdf_prefetch(next); 449 nbuf = dp_rx_nbuf_alloc(soc, rx_desc_pool); 450 if (qdf_unlikely(!nbuf)) { 451 DP_STATS_INC(dp_pdev, replenish.nbuf_alloc_fail, 1); 452 break; 453 } 454 455 paddr = dp_rx_nbuf_sync_no_dsb(soc, nbuf, 456 rx_desc_pool->buf_size); 457 458 rxdma_ring_entry = hal_srng_src_get_next(soc->hal_soc, 459 rxdma_srng); 460 qdf_assert_always(rxdma_ring_entry); 461 462 desc_list->rx_desc.nbuf = nbuf; 463 dp_rx_set_reuse_nbuf(&desc_list->rx_desc, nbuf); 464 desc_list->rx_desc.rx_buf_start = nbuf->data; 465 desc_list->rx_desc.paddr_buf_start = paddr; 466 desc_list->rx_desc.unmapped = 0; 467 468 /* rx_desc.in_use should be zero at this time*/ 469 qdf_assert_always(desc_list->rx_desc.in_use == 0); 470 471 desc_list->rx_desc.in_use = 1; 472 desc_list->rx_desc.in_err_state = 0; 473 474 hal_rxdma_buff_addr_info_set(soc->hal_soc, rxdma_ring_entry, 475 paddr, 476 desc_list->rx_desc.cookie, 477 rx_desc_pool->owner); 478 479 desc_list = next; 480 } 481 qdf_dsb(); 482 hal_srng_access_end(soc->hal_soc, rxdma_srng); 483 484 /* No need to count the number of bytes received during replenish. 485 * Therefore set replenish.pkts.bytes as 0. 486 */ 487 DP_STATS_INC_PKT(dp_pdev, replenish.pkts, count, 0); 488 DP_STATS_INC(dp_pdev, buf_freelist, (num_alloc_desc - count)); 489 /* 490 * add any available free desc back to the free list 491 */ 492 if (desc_list) 493 dp_rx_add_desc_list_to_free_list(soc, &desc_list, &tail, 494 mac_id, rx_desc_pool); 495 496 return QDF_STATUS_SUCCESS; 497 } 498 499 QDF_STATUS 500 __dp_rx_buffers_no_map_replenish(struct dp_soc *soc, uint32_t mac_id, 501 struct dp_srng *dp_rxdma_srng, 502 struct rx_desc_pool *rx_desc_pool, 503 uint32_t num_req_buffers, 504 union dp_rx_desc_list_elem_t **desc_list, 505 union dp_rx_desc_list_elem_t **tail) 506 { 507 struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id); 508 uint32_t count; 509 void *rxdma_ring_entry; 510 union dp_rx_desc_list_elem_t *next; 511 void *rxdma_srng; 512 qdf_nbuf_t nbuf; 513 qdf_nbuf_t nbuf_next; 514 qdf_nbuf_t nbuf_head = NULL; 515 qdf_nbuf_t nbuf_tail = NULL; 516 qdf_dma_addr_t paddr; 517 518 rxdma_srng = dp_rxdma_srng->hal_srng; 519 520 if (qdf_unlikely(!dp_pdev)) { 521 dp_rx_err("%pK: pdev is null for mac_id = %d", 522 soc, mac_id); 523 return QDF_STATUS_E_FAILURE; 524 } 525 526 if (qdf_unlikely(!rxdma_srng)) { 527 dp_rx_debug("%pK: rxdma srng not initialized", soc); 528 DP_STATS_INC(dp_pdev, replenish.rxdma_err, num_req_buffers); 529 return QDF_STATUS_E_FAILURE; 530 } 531 532 /* Allocate required number of nbufs */ 533 for (count = 0; count < num_req_buffers; count++) { 534 nbuf = dp_rx_nbuf_alloc(soc, rx_desc_pool); 535 if (qdf_unlikely(!nbuf)) { 536 DP_STATS_INC(dp_pdev, replenish.nbuf_alloc_fail, 1); 537 /* Update num_req_buffers to nbufs allocated count */ 538 num_req_buffers = count; 539 break; 540 } 541 542 paddr = dp_rx_nbuf_sync_no_dsb(soc, nbuf, 543 rx_desc_pool->buf_size); 544 545 QDF_NBUF_CB_PADDR(nbuf) = paddr; 546 DP_RX_LIST_APPEND(nbuf_head, 547 nbuf_tail, 548 nbuf); 549 } 550 qdf_dsb(); 551 552 nbuf = nbuf_head; 553 hal_srng_access_start(soc->hal_soc, rxdma_srng); 554 555 for (count = 0; count < num_req_buffers; count++) { 556 next = (*desc_list)->next; 557 nbuf_next = nbuf->next; 558 qdf_prefetch(next); 559 560 rxdma_ring_entry = (struct dp_buffer_addr_info *) 561 hal_srng_src_get_next(soc->hal_soc, rxdma_srng); 562 563 if (!rxdma_ring_entry) 564 break; 565 566 (*desc_list)->rx_desc.nbuf = nbuf; 567 dp_rx_set_reuse_nbuf(&(*desc_list)->rx_desc, nbuf); 568 (*desc_list)->rx_desc.rx_buf_start = nbuf->data; 569 (*desc_list)->rx_desc.paddr_buf_start = QDF_NBUF_CB_PADDR(nbuf); 570 (*desc_list)->rx_desc.unmapped = 0; 571 572 /* rx_desc.in_use should be zero at this time*/ 573 qdf_assert_always((*desc_list)->rx_desc.in_use == 0); 574 575 (*desc_list)->rx_desc.in_use = 1; 576 (*desc_list)->rx_desc.in_err_state = 0; 577 578 hal_rxdma_buff_addr_info_set(soc->hal_soc, rxdma_ring_entry, 579 QDF_NBUF_CB_PADDR(nbuf), 580 (*desc_list)->rx_desc.cookie, 581 rx_desc_pool->owner); 582 583 *desc_list = next; 584 nbuf = nbuf_next; 585 } 586 hal_srng_access_end(soc->hal_soc, rxdma_srng); 587 588 /* No need to count the number of bytes received during replenish. 589 * Therefore set replenish.pkts.bytes as 0. 590 */ 591 DP_STATS_INC_PKT(dp_pdev, replenish.pkts, count, 0); 592 DP_STATS_INC(dp_pdev, buf_freelist, (num_req_buffers - count)); 593 /* 594 * add any available free desc back to the free list 595 */ 596 if (*desc_list) 597 dp_rx_add_desc_list_to_free_list(soc, desc_list, tail, 598 mac_id, rx_desc_pool); 599 while (nbuf) { 600 nbuf_next = nbuf->next; 601 dp_rx_nbuf_unmap_pool(soc, rx_desc_pool, nbuf); 602 qdf_nbuf_free(nbuf); 603 nbuf = nbuf_next; 604 } 605 606 return QDF_STATUS_SUCCESS; 607 } 608 609 #ifdef WLAN_SUPPORT_PPEDS 610 QDF_STATUS 611 __dp_rx_comp2refill_replenish(struct dp_soc *soc, uint32_t mac_id, 612 struct dp_srng *dp_rxdma_srng, 613 struct rx_desc_pool *rx_desc_pool, 614 uint32_t num_req_buffers, 615 union dp_rx_desc_list_elem_t **desc_list, 616 union dp_rx_desc_list_elem_t **tail) 617 { 618 struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id); 619 uint32_t count; 620 void *rxdma_ring_entry; 621 union dp_rx_desc_list_elem_t *next; 622 union dp_rx_desc_list_elem_t *cur; 623 void *rxdma_srng; 624 qdf_nbuf_t nbuf; 625 626 rxdma_srng = dp_rxdma_srng->hal_srng; 627 628 if (qdf_unlikely(!dp_pdev)) { 629 dp_rx_err("%pK: pdev is null for mac_id = %d", 630 soc, mac_id); 631 return QDF_STATUS_E_FAILURE; 632 } 633 634 if (qdf_unlikely(!rxdma_srng)) { 635 dp_rx_debug("%pK: rxdma srng not initialized", soc); 636 DP_STATS_INC(dp_pdev, replenish.rxdma_err, num_req_buffers); 637 return QDF_STATUS_E_FAILURE; 638 } 639 640 hal_srng_access_start(soc->hal_soc, rxdma_srng); 641 642 for (count = 0; count < num_req_buffers; count++) { 643 next = (*desc_list)->next; 644 qdf_prefetch(next); 645 646 rxdma_ring_entry = (struct dp_buffer_addr_info *) 647 hal_srng_src_get_next(soc->hal_soc, rxdma_srng); 648 649 if (!rxdma_ring_entry) 650 break; 651 652 (*desc_list)->rx_desc.in_use = 1; 653 (*desc_list)->rx_desc.in_err_state = 0; 654 (*desc_list)->rx_desc.nbuf = (*desc_list)->rx_desc.reuse_nbuf; 655 656 hal_rxdma_buff_addr_info_set(soc->hal_soc, rxdma_ring_entry, 657 (*desc_list)->rx_desc.paddr_buf_start, 658 (*desc_list)->rx_desc.cookie, 659 rx_desc_pool->owner); 660 661 *desc_list = next; 662 } 663 hal_srng_access_end(soc->hal_soc, rxdma_srng); 664 665 /* No need to count the number of bytes received during replenish. 666 * Therefore set replenish.pkts.bytes as 0. 667 */ 668 DP_STATS_INC_PKT(dp_pdev, replenish.pkts, count, 0); 669 DP_STATS_INC(dp_pdev, buf_freelist, (num_req_buffers - count)); 670 671 /* 672 * add any available free desc back to the free list 673 */ 674 cur = *desc_list; 675 for ( ; count < num_req_buffers; count++) { 676 next = cur->next; 677 qdf_prefetch(next); 678 679 nbuf = cur->rx_desc.reuse_nbuf; 680 681 cur->rx_desc.nbuf = NULL; 682 cur->rx_desc.in_use = 0; 683 cur->rx_desc.has_reuse_nbuf = false; 684 cur->rx_desc.reuse_nbuf = NULL; 685 if (!nbuf->recycled_for_ds) 686 dp_rx_nbuf_unmap_pool(soc, rx_desc_pool, nbuf); 687 688 nbuf->recycled_for_ds = 0; 689 nbuf->fast_recycled = 0; 690 qdf_nbuf_free(nbuf); 691 cur = next; 692 } 693 694 if (*desc_list) 695 dp_rx_add_desc_list_to_free_list(soc, desc_list, tail, 696 mac_id, rx_desc_pool); 697 698 return QDF_STATUS_SUCCESS; 699 } 700 #endif 701 702 QDF_STATUS __dp_pdev_rx_buffers_no_map_attach(struct dp_soc *soc, 703 uint32_t mac_id, 704 struct dp_srng *dp_rxdma_srng, 705 struct rx_desc_pool *rx_desc_pool, 706 uint32_t num_req_buffers) 707 { 708 struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id); 709 uint32_t count; 710 uint32_t nr_descs = 0; 711 void *rxdma_ring_entry; 712 union dp_rx_desc_list_elem_t *next; 713 void *rxdma_srng; 714 qdf_nbuf_t nbuf; 715 qdf_dma_addr_t paddr; 716 union dp_rx_desc_list_elem_t *desc_list = NULL; 717 union dp_rx_desc_list_elem_t *tail = NULL; 718 719 rxdma_srng = dp_rxdma_srng->hal_srng; 720 721 if (qdf_unlikely(!dp_pdev)) { 722 dp_rx_err("%pK: pdev is null for mac_id = %d", 723 soc, mac_id); 724 return QDF_STATUS_E_FAILURE; 725 } 726 727 if (qdf_unlikely(!rxdma_srng)) { 728 dp_rx_debug("%pK: rxdma srng not initialized", soc); 729 DP_STATS_INC(dp_pdev, replenish.rxdma_err, num_req_buffers); 730 return QDF_STATUS_E_FAILURE; 731 } 732 733 dp_rx_debug("%pK: requested %d buffers for replenish", 734 soc, num_req_buffers); 735 736 nr_descs = dp_rx_get_free_desc_list(soc, mac_id, rx_desc_pool, 737 num_req_buffers, &desc_list, &tail); 738 if (!nr_descs) { 739 dp_err("no free rx_descs in freelist"); 740 DP_STATS_INC(dp_pdev, err.desc_alloc_fail, num_req_buffers); 741 return QDF_STATUS_E_NOMEM; 742 } 743 744 dp_debug("got %u RX descs for driver attach", nr_descs); 745 746 hal_srng_access_start(soc->hal_soc, rxdma_srng); 747 748 for (count = 0; count < nr_descs; count++) { 749 next = desc_list->next; 750 qdf_prefetch(next); 751 nbuf = dp_rx_nbuf_alloc(soc, rx_desc_pool); 752 if (qdf_unlikely(!nbuf)) { 753 DP_STATS_INC(dp_pdev, replenish.nbuf_alloc_fail, 1); 754 break; 755 } 756 757 paddr = dp_rx_nbuf_sync_no_dsb(soc, nbuf, 758 rx_desc_pool->buf_size); 759 rxdma_ring_entry = (struct dp_buffer_addr_info *) 760 hal_srng_src_get_next(soc->hal_soc, rxdma_srng); 761 if (!rxdma_ring_entry) 762 break; 763 764 qdf_assert_always(rxdma_ring_entry); 765 766 desc_list->rx_desc.nbuf = nbuf; 767 dp_rx_set_reuse_nbuf(&desc_list->rx_desc, nbuf); 768 desc_list->rx_desc.rx_buf_start = nbuf->data; 769 desc_list->rx_desc.paddr_buf_start = paddr; 770 desc_list->rx_desc.unmapped = 0; 771 772 /* rx_desc.in_use should be zero at this time*/ 773 qdf_assert_always(desc_list->rx_desc.in_use == 0); 774 775 desc_list->rx_desc.in_use = 1; 776 desc_list->rx_desc.in_err_state = 0; 777 778 hal_rxdma_buff_addr_info_set(soc->hal_soc, rxdma_ring_entry, 779 paddr, 780 desc_list->rx_desc.cookie, 781 rx_desc_pool->owner); 782 783 desc_list = next; 784 } 785 qdf_dsb(); 786 hal_srng_access_end(soc->hal_soc, rxdma_srng); 787 788 /* No need to count the number of bytes received during replenish. 789 * Therefore set replenish.pkts.bytes as 0. 790 */ 791 DP_STATS_INC_PKT(dp_pdev, replenish.pkts, count, 0); 792 793 return QDF_STATUS_SUCCESS; 794 } 795 #endif 796 797 #ifdef DP_UMAC_HW_RESET_SUPPORT 798 #if defined(QCA_DP_RX_NBUF_NO_MAP_UNMAP) && !defined(BUILD_X86) 799 static inline 800 qdf_dma_addr_t dp_rx_rep_retrieve_paddr(struct dp_soc *dp_soc, qdf_nbuf_t nbuf, 801 uint32_t buf_size) 802 { 803 return dp_rx_nbuf_sync_no_dsb(dp_soc, nbuf, buf_size); 804 } 805 #else 806 static inline 807 qdf_dma_addr_t dp_rx_rep_retrieve_paddr(struct dp_soc *dp_soc, qdf_nbuf_t nbuf, 808 uint32_t buf_size) 809 { 810 return qdf_nbuf_get_frag_paddr(nbuf, 0); 811 } 812 #endif 813 814 /** 815 * dp_rx_desc_replenish() - Replenish the rx descriptors one at a time 816 * @soc: core txrx main context 817 * @dp_rxdma_srng: rxdma ring 818 * @rx_desc_pool: rx descriptor pool 819 * @rx_desc:rx descriptor 820 * 821 * Return: void 822 */ 823 static inline 824 void dp_rx_desc_replenish(struct dp_soc *soc, struct dp_srng *dp_rxdma_srng, 825 struct rx_desc_pool *rx_desc_pool, 826 struct dp_rx_desc *rx_desc) 827 { 828 void *rxdma_srng; 829 void *rxdma_ring_entry; 830 qdf_dma_addr_t paddr; 831 832 rxdma_srng = dp_rxdma_srng->hal_srng; 833 834 /* No one else should be accessing the srng at this point */ 835 hal_srng_access_start_unlocked(soc->hal_soc, rxdma_srng); 836 837 rxdma_ring_entry = hal_srng_src_get_next(soc->hal_soc, rxdma_srng); 838 839 qdf_assert_always(rxdma_ring_entry); 840 rx_desc->in_err_state = 0; 841 842 paddr = dp_rx_rep_retrieve_paddr(soc, rx_desc->nbuf, 843 rx_desc_pool->buf_size); 844 hal_rxdma_buff_addr_info_set(soc->hal_soc, rxdma_ring_entry, paddr, 845 rx_desc->cookie, rx_desc_pool->owner); 846 847 hal_srng_access_end_unlocked(soc->hal_soc, rxdma_srng); 848 } 849 850 void dp_rx_desc_reuse(struct dp_soc *soc, qdf_nbuf_t *nbuf_list) 851 { 852 int mac_id, i, j; 853 union dp_rx_desc_list_elem_t *head = NULL; 854 union dp_rx_desc_list_elem_t *tail = NULL; 855 856 for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) { 857 struct dp_srng *dp_rxdma_srng = 858 &soc->rx_refill_buf_ring[mac_id]; 859 struct rx_desc_pool *rx_desc_pool = &soc->rx_desc_buf[mac_id]; 860 uint32_t rx_sw_desc_num = rx_desc_pool->pool_size; 861 /* Only fill up 1/3 of the ring size */ 862 uint32_t num_req_decs; 863 864 if (!dp_rxdma_srng || !dp_rxdma_srng->hal_srng || 865 !rx_desc_pool->array) 866 continue; 867 868 num_req_decs = dp_rxdma_srng->num_entries / 3; 869 870 for (i = 0, j = 0; i < rx_sw_desc_num; i++) { 871 struct dp_rx_desc *rx_desc = 872 (struct dp_rx_desc *)&rx_desc_pool->array[i]; 873 874 if (rx_desc->in_use) { 875 if (j < (dp_rxdma_srng->num_entries - 1)) { 876 dp_rx_desc_replenish(soc, dp_rxdma_srng, 877 rx_desc_pool, 878 rx_desc); 879 } else { 880 dp_rx_nbuf_unmap(soc, rx_desc, 0); 881 rx_desc->unmapped = 0; 882 883 rx_desc->nbuf->next = *nbuf_list; 884 *nbuf_list = rx_desc->nbuf; 885 886 dp_rx_add_to_free_desc_list(&head, 887 &tail, 888 rx_desc); 889 } 890 j++; 891 } 892 } 893 894 if (head) 895 dp_rx_add_desc_list_to_free_list(soc, &head, &tail, 896 mac_id, rx_desc_pool); 897 898 /* If num of descs in use were less, then we need to replenish 899 * the ring with some buffers 900 */ 901 head = NULL; 902 tail = NULL; 903 904 if (j < (num_req_decs - 1)) 905 dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng, 906 rx_desc_pool, 907 ((num_req_decs - 1) - j), 908 &head, &tail, true); 909 } 910 } 911 #endif 912 913 QDF_STATUS __dp_rx_buffers_replenish(struct dp_soc *dp_soc, uint32_t mac_id, 914 struct dp_srng *dp_rxdma_srng, 915 struct rx_desc_pool *rx_desc_pool, 916 uint32_t num_req_buffers, 917 union dp_rx_desc_list_elem_t **desc_list, 918 union dp_rx_desc_list_elem_t **tail, 919 bool req_only, const char *func_name) 920 { 921 uint32_t num_alloc_desc; 922 uint16_t num_desc_to_free = 0; 923 struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(dp_soc, mac_id); 924 uint32_t num_entries_avail; 925 uint32_t count; 926 uint32_t extra_buffers; 927 int sync_hw_ptr = 1; 928 struct dp_rx_nbuf_frag_info nbuf_frag_info = {0}; 929 void *rxdma_ring_entry; 930 union dp_rx_desc_list_elem_t *next; 931 QDF_STATUS ret; 932 void *rxdma_srng; 933 union dp_rx_desc_list_elem_t *desc_list_append = NULL; 934 union dp_rx_desc_list_elem_t *tail_append = NULL; 935 union dp_rx_desc_list_elem_t *temp_list = NULL; 936 937 rxdma_srng = dp_rxdma_srng->hal_srng; 938 939 if (qdf_unlikely(!dp_pdev)) { 940 dp_rx_err("%pK: pdev is null for mac_id = %d", 941 dp_soc, mac_id); 942 return QDF_STATUS_E_FAILURE; 943 } 944 945 if (qdf_unlikely(!rxdma_srng)) { 946 dp_rx_debug("%pK: rxdma srng not initialized", dp_soc); 947 DP_STATS_INC(dp_pdev, replenish.rxdma_err, num_req_buffers); 948 return QDF_STATUS_E_FAILURE; 949 } 950 951 dp_verbose_debug("%pK: requested %d buffers for replenish", 952 dp_soc, num_req_buffers); 953 954 hal_srng_access_start(dp_soc->hal_soc, rxdma_srng); 955 956 num_entries_avail = hal_srng_src_num_avail(dp_soc->hal_soc, 957 rxdma_srng, 958 sync_hw_ptr); 959 960 dp_verbose_debug("%pK: no of available entries in rxdma ring: %d", 961 dp_soc, num_entries_avail); 962 963 if (!req_only && !(*desc_list) && (num_entries_avail > 964 ((dp_rxdma_srng->num_entries * 3) / 4))) { 965 num_req_buffers = num_entries_avail; 966 DP_STATS_INC(dp_pdev, replenish.low_thresh_intrs, 1); 967 } else if (num_entries_avail < num_req_buffers) { 968 num_desc_to_free = num_req_buffers - num_entries_avail; 969 num_req_buffers = num_entries_avail; 970 } else if ((*desc_list) && 971 dp_rxdma_srng->num_entries - num_entries_avail < 972 CRITICAL_BUFFER_THRESHOLD) { 973 /* set extra buffers to CRITICAL_BUFFER_THRESHOLD only if 974 * total buff requested after adding extra buffers is less 975 * than or equal to num entries available, else set it to max 976 * possible additional buffers available at that moment 977 */ 978 extra_buffers = 979 ((num_req_buffers + CRITICAL_BUFFER_THRESHOLD) > num_entries_avail) ? 980 (num_entries_avail - num_req_buffers) : 981 CRITICAL_BUFFER_THRESHOLD; 982 /* Append some free descriptors to tail */ 983 num_alloc_desc = 984 dp_rx_get_free_desc_list(dp_soc, mac_id, 985 rx_desc_pool, 986 extra_buffers, 987 &desc_list_append, 988 &tail_append); 989 990 if (num_alloc_desc) { 991 temp_list = *desc_list; 992 *desc_list = desc_list_append; 993 tail_append->next = temp_list; 994 num_req_buffers += num_alloc_desc; 995 996 DP_STATS_DEC(dp_pdev, 997 replenish.free_list, 998 num_alloc_desc); 999 } else 1000 dp_err_rl("%pK: no free rx_descs in freelist", dp_soc); 1001 } 1002 1003 if (qdf_unlikely(!num_req_buffers)) { 1004 num_desc_to_free = num_req_buffers; 1005 hal_srng_access_end(dp_soc->hal_soc, rxdma_srng); 1006 goto free_descs; 1007 } 1008 1009 /* 1010 * if desc_list is NULL, allocate the descs from freelist 1011 */ 1012 if (!(*desc_list)) { 1013 num_alloc_desc = dp_rx_get_free_desc_list(dp_soc, mac_id, 1014 rx_desc_pool, 1015 num_req_buffers, 1016 desc_list, 1017 tail); 1018 1019 if (!num_alloc_desc) { 1020 dp_rx_err("%pK: no free rx_descs in freelist", dp_soc); 1021 DP_STATS_INC(dp_pdev, err.desc_alloc_fail, 1022 num_req_buffers); 1023 hal_srng_access_end(dp_soc->hal_soc, rxdma_srng); 1024 return QDF_STATUS_E_NOMEM; 1025 } 1026 1027 dp_verbose_debug("%pK: %d rx desc allocated", dp_soc, 1028 num_alloc_desc); 1029 num_req_buffers = num_alloc_desc; 1030 } 1031 1032 1033 count = 0; 1034 1035 while (count < num_req_buffers) { 1036 /* Flag is set while pdev rx_desc_pool initialization */ 1037 if (qdf_unlikely(rx_desc_pool->rx_mon_dest_frag_enable)) 1038 ret = dp_pdev_frag_alloc_and_map(dp_soc, 1039 &nbuf_frag_info, 1040 dp_pdev, 1041 rx_desc_pool); 1042 else 1043 ret = dp_pdev_nbuf_alloc_and_map_replenish(dp_soc, 1044 mac_id, 1045 num_entries_avail, &nbuf_frag_info, 1046 dp_pdev, rx_desc_pool); 1047 1048 if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) { 1049 if (qdf_unlikely(ret == QDF_STATUS_E_FAULT)) 1050 continue; 1051 break; 1052 } 1053 1054 count++; 1055 1056 rxdma_ring_entry = hal_srng_src_get_next(dp_soc->hal_soc, 1057 rxdma_srng); 1058 qdf_assert_always(rxdma_ring_entry); 1059 1060 next = (*desc_list)->next; 1061 1062 /* Flag is set while pdev rx_desc_pool initialization */ 1063 if (qdf_unlikely(rx_desc_pool->rx_mon_dest_frag_enable)) 1064 dp_rx_desc_frag_prep(&((*desc_list)->rx_desc), 1065 &nbuf_frag_info); 1066 else 1067 dp_rx_desc_prep(&((*desc_list)->rx_desc), 1068 &nbuf_frag_info); 1069 1070 /* rx_desc.in_use should be zero at this time*/ 1071 qdf_assert_always((*desc_list)->rx_desc.in_use == 0); 1072 1073 (*desc_list)->rx_desc.in_use = 1; 1074 (*desc_list)->rx_desc.in_err_state = 0; 1075 dp_rx_desc_update_dbg_info(&(*desc_list)->rx_desc, 1076 func_name, RX_DESC_REPLENISHED); 1077 dp_verbose_debug("rx_netbuf=%pK, paddr=0x%llx, cookie=%d", 1078 nbuf_frag_info.virt_addr.nbuf, 1079 (unsigned long long)(nbuf_frag_info.paddr), 1080 (*desc_list)->rx_desc.cookie); 1081 1082 hal_rxdma_buff_addr_info_set(dp_soc->hal_soc, rxdma_ring_entry, 1083 nbuf_frag_info.paddr, 1084 (*desc_list)->rx_desc.cookie, 1085 rx_desc_pool->owner); 1086 1087 *desc_list = next; 1088 1089 } 1090 1091 dp_rx_refill_ring_record_entry(dp_soc, dp_pdev->lmac_id, rxdma_srng, 1092 num_req_buffers, count); 1093 1094 hal_srng_access_end(dp_soc->hal_soc, rxdma_srng); 1095 1096 dp_rx_schedule_refill_thread(dp_soc); 1097 1098 dp_verbose_debug("replenished buffers %d, rx desc added back to free list %u", 1099 count, num_desc_to_free); 1100 1101 /* No need to count the number of bytes received during replenish. 1102 * Therefore set replenish.pkts.bytes as 0. 1103 */ 1104 DP_STATS_INC_PKT(dp_pdev, replenish.pkts, count, 0); 1105 DP_STATS_INC(dp_pdev, replenish.free_list, num_req_buffers - count); 1106 1107 free_descs: 1108 DP_STATS_INC(dp_pdev, buf_freelist, num_desc_to_free); 1109 /* 1110 * add any available free desc back to the free list 1111 */ 1112 if (*desc_list) 1113 dp_rx_add_desc_list_to_free_list(dp_soc, desc_list, tail, 1114 mac_id, rx_desc_pool); 1115 1116 return QDF_STATUS_SUCCESS; 1117 } 1118 1119 qdf_export_symbol(__dp_rx_buffers_replenish); 1120 1121 void 1122 dp_rx_deliver_raw(struct dp_vdev *vdev, qdf_nbuf_t nbuf_list, 1123 struct dp_txrx_peer *txrx_peer, uint8_t link_id) 1124 { 1125 qdf_nbuf_t deliver_list_head = NULL; 1126 qdf_nbuf_t deliver_list_tail = NULL; 1127 qdf_nbuf_t nbuf; 1128 1129 nbuf = nbuf_list; 1130 while (nbuf) { 1131 qdf_nbuf_t next = qdf_nbuf_next(nbuf); 1132 1133 DP_RX_LIST_APPEND(deliver_list_head, deliver_list_tail, nbuf); 1134 1135 DP_STATS_INC(vdev->pdev, rx_raw_pkts, 1); 1136 DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, rx.raw, 1, 1137 qdf_nbuf_len(nbuf), link_id); 1138 /* 1139 * reset the chfrag_start and chfrag_end bits in nbuf cb 1140 * as this is a non-amsdu pkt and RAW mode simulation expects 1141 * these bit s to be 0 for non-amsdu pkt. 1142 */ 1143 if (qdf_nbuf_is_rx_chfrag_start(nbuf) && 1144 qdf_nbuf_is_rx_chfrag_end(nbuf)) { 1145 qdf_nbuf_set_rx_chfrag_start(nbuf, 0); 1146 qdf_nbuf_set_rx_chfrag_end(nbuf, 0); 1147 } 1148 1149 nbuf = next; 1150 } 1151 1152 vdev->osif_rsim_rx_decap(vdev->osif_vdev, &deliver_list_head, 1153 &deliver_list_tail); 1154 1155 vdev->osif_rx(vdev->osif_vdev, deliver_list_head); 1156 } 1157 1158 #ifndef QCA_HOST_MODE_WIFI_DISABLED 1159 #ifndef FEATURE_WDS 1160 void dp_rx_da_learn(struct dp_soc *soc, uint8_t *rx_tlv_hdr, 1161 struct dp_txrx_peer *ta_peer, qdf_nbuf_t nbuf) 1162 { 1163 } 1164 #endif 1165 1166 #ifdef QCA_SUPPORT_TX_MIN_RATES_FOR_SPECIAL_FRAMES 1167 /** 1168 * dp_classify_critical_pkts() - API for marking critical packets 1169 * @soc: dp_soc context 1170 * @vdev: vdev on which packet is to be sent 1171 * @nbuf: nbuf that has to be classified 1172 * 1173 * The function parses the packet, identifies whether its a critical frame and 1174 * marks QDF_NBUF_CB_TX_EXTRA_IS_CRITICAL bit in qdf_nbuf_cb for the nbuf. 1175 * Code for marking which frames are CRITICAL is accessed via callback. 1176 * EAPOL, ARP, DHCP, DHCPv6, ICMPv6 NS/NA are the typical critical frames. 1177 * 1178 * Return: None 1179 */ 1180 static 1181 void dp_classify_critical_pkts(struct dp_soc *soc, struct dp_vdev *vdev, 1182 qdf_nbuf_t nbuf) 1183 { 1184 if (vdev->tx_classify_critical_pkt_cb) 1185 vdev->tx_classify_critical_pkt_cb(vdev->osif_vdev, nbuf); 1186 } 1187 #else 1188 static inline 1189 void dp_classify_critical_pkts(struct dp_soc *soc, struct dp_vdev *vdev, 1190 qdf_nbuf_t nbuf) 1191 { 1192 } 1193 #endif 1194 1195 #ifdef QCA_OL_TX_MULTIQ_SUPPORT 1196 static inline 1197 void dp_rx_nbuf_queue_mapping_set(qdf_nbuf_t nbuf, uint8_t ring_id) 1198 { 1199 qdf_nbuf_set_queue_mapping(nbuf, ring_id); 1200 } 1201 #else 1202 static inline 1203 void dp_rx_nbuf_queue_mapping_set(qdf_nbuf_t nbuf, uint8_t ring_id) 1204 { 1205 } 1206 #endif 1207 1208 bool dp_rx_intrabss_mcbc_fwd(struct dp_soc *soc, struct dp_txrx_peer *ta_peer, 1209 uint8_t *rx_tlv_hdr, qdf_nbuf_t nbuf, 1210 struct cdp_tid_rx_stats *tid_stats, 1211 uint8_t link_id) 1212 { 1213 uint16_t len; 1214 qdf_nbuf_t nbuf_copy; 1215 1216 if (dp_rx_intrabss_eapol_drop_check(soc, ta_peer, rx_tlv_hdr, 1217 nbuf)) 1218 return true; 1219 1220 if (!dp_rx_check_ndi_mdns_fwding(ta_peer, nbuf, link_id)) 1221 return false; 1222 1223 /* If the source peer in the isolation list 1224 * then dont forward instead push to bridge stack 1225 */ 1226 if (dp_get_peer_isolation(ta_peer)) 1227 return false; 1228 1229 nbuf_copy = qdf_nbuf_copy(nbuf); 1230 if (!nbuf_copy) 1231 return false; 1232 1233 len = QDF_NBUF_CB_RX_PKT_LEN(nbuf); 1234 1235 qdf_mem_set(nbuf_copy->cb, 0x0, sizeof(nbuf_copy->cb)); 1236 dp_classify_critical_pkts(soc, ta_peer->vdev, nbuf_copy); 1237 1238 if (soc->arch_ops.dp_rx_intrabss_mcast_handler(soc, ta_peer, 1239 nbuf_copy, 1240 tid_stats, 1241 link_id)) 1242 return false; 1243 1244 /* Don't send packets if tx is paused */ 1245 if (!soc->is_tx_pause && 1246 !dp_tx_send((struct cdp_soc_t *)soc, 1247 ta_peer->vdev->vdev_id, nbuf_copy)) { 1248 DP_PEER_PER_PKT_STATS_INC_PKT(ta_peer, rx.intra_bss.pkts, 1, 1249 len, link_id); 1250 tid_stats->intrabss_cnt++; 1251 } else { 1252 DP_PEER_PER_PKT_STATS_INC_PKT(ta_peer, rx.intra_bss.fail, 1, 1253 len, link_id); 1254 tid_stats->fail_cnt[INTRABSS_DROP]++; 1255 dp_rx_nbuf_free(nbuf_copy); 1256 } 1257 return false; 1258 } 1259 1260 bool dp_rx_intrabss_ucast_fwd(struct dp_soc *soc, struct dp_txrx_peer *ta_peer, 1261 uint8_t tx_vdev_id, 1262 uint8_t *rx_tlv_hdr, qdf_nbuf_t nbuf, 1263 struct cdp_tid_rx_stats *tid_stats, 1264 uint8_t link_id) 1265 { 1266 uint16_t len; 1267 1268 len = QDF_NBUF_CB_RX_PKT_LEN(nbuf); 1269 1270 /* linearize the nbuf just before we send to 1271 * dp_tx_send() 1272 */ 1273 if (qdf_unlikely(qdf_nbuf_is_frag(nbuf))) { 1274 if (qdf_nbuf_linearize(nbuf) == -ENOMEM) 1275 return false; 1276 1277 nbuf = qdf_nbuf_unshare(nbuf); 1278 if (!nbuf) { 1279 DP_PEER_PER_PKT_STATS_INC_PKT(ta_peer, 1280 rx.intra_bss.fail, 1281 1, len, link_id); 1282 /* return true even though the pkt is 1283 * not forwarded. Basically skb_unshare 1284 * failed and we want to continue with 1285 * next nbuf. 1286 */ 1287 tid_stats->fail_cnt[INTRABSS_DROP]++; 1288 return false; 1289 } 1290 } 1291 1292 qdf_mem_set(nbuf->cb, 0x0, sizeof(nbuf->cb)); 1293 dp_classify_critical_pkts(soc, ta_peer->vdev, nbuf); 1294 1295 /* Don't send packets if tx is paused */ 1296 if (!soc->is_tx_pause && !dp_tx_send((struct cdp_soc_t *)soc, 1297 tx_vdev_id, nbuf)) { 1298 DP_PEER_PER_PKT_STATS_INC_PKT(ta_peer, rx.intra_bss.pkts, 1, 1299 len, link_id); 1300 } else { 1301 DP_PEER_PER_PKT_STATS_INC_PKT(ta_peer, rx.intra_bss.fail, 1, 1302 len, link_id); 1303 tid_stats->fail_cnt[INTRABSS_DROP]++; 1304 return false; 1305 } 1306 1307 return true; 1308 } 1309 1310 #endif /* QCA_HOST_MODE_WIFI_DISABLED */ 1311 1312 #ifdef MESH_MODE_SUPPORT 1313 1314 void dp_rx_fill_mesh_stats(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 1315 uint8_t *rx_tlv_hdr, 1316 struct dp_txrx_peer *txrx_peer) 1317 { 1318 struct mesh_recv_hdr_s *rx_info = NULL; 1319 uint32_t pkt_type; 1320 uint32_t nss; 1321 uint32_t rate_mcs; 1322 uint32_t bw; 1323 uint8_t primary_chan_num; 1324 uint32_t center_chan_freq; 1325 struct dp_soc *soc = vdev->pdev->soc; 1326 struct dp_peer *peer; 1327 struct dp_peer *primary_link_peer; 1328 struct dp_soc *link_peer_soc; 1329 cdp_peer_stats_param_t buf = {0}; 1330 1331 /* fill recv mesh stats */ 1332 rx_info = qdf_mem_malloc(sizeof(struct mesh_recv_hdr_s)); 1333 1334 /* upper layers are responsible to free this memory */ 1335 1336 if (!rx_info) { 1337 dp_rx_err("%pK: Memory allocation failed for mesh rx stats", 1338 vdev->pdev->soc); 1339 DP_STATS_INC(vdev->pdev, mesh_mem_alloc, 1); 1340 return; 1341 } 1342 1343 rx_info->rs_flags = MESH_RXHDR_VER1; 1344 if (qdf_nbuf_is_rx_chfrag_start(nbuf)) 1345 rx_info->rs_flags |= MESH_RX_FIRST_MSDU; 1346 1347 if (qdf_nbuf_is_rx_chfrag_end(nbuf)) 1348 rx_info->rs_flags |= MESH_RX_LAST_MSDU; 1349 1350 peer = dp_peer_get_ref_by_id(soc, txrx_peer->peer_id, DP_MOD_ID_MESH); 1351 if (peer) { 1352 if (hal_rx_tlv_get_is_decrypted(soc->hal_soc, rx_tlv_hdr)) { 1353 rx_info->rs_flags |= MESH_RX_DECRYPTED; 1354 rx_info->rs_keyix = hal_rx_msdu_get_keyid(soc->hal_soc, 1355 rx_tlv_hdr); 1356 if (vdev->osif_get_key) 1357 vdev->osif_get_key(vdev->osif_vdev, 1358 &rx_info->rs_decryptkey[0], 1359 &peer->mac_addr.raw[0], 1360 rx_info->rs_keyix); 1361 } 1362 1363 dp_peer_unref_delete(peer, DP_MOD_ID_MESH); 1364 } 1365 1366 primary_link_peer = dp_get_primary_link_peer_by_id(soc, 1367 txrx_peer->peer_id, 1368 DP_MOD_ID_MESH); 1369 1370 if (qdf_likely(primary_link_peer)) { 1371 link_peer_soc = primary_link_peer->vdev->pdev->soc; 1372 dp_monitor_peer_get_stats_param(link_peer_soc, 1373 primary_link_peer, 1374 cdp_peer_rx_snr, &buf); 1375 rx_info->rs_snr = buf.rx_snr; 1376 dp_peer_unref_delete(primary_link_peer, DP_MOD_ID_MESH); 1377 } 1378 1379 rx_info->rs_rssi = rx_info->rs_snr + DP_DEFAULT_NOISEFLOOR; 1380 1381 soc = vdev->pdev->soc; 1382 primary_chan_num = hal_rx_tlv_get_freq(soc->hal_soc, rx_tlv_hdr); 1383 center_chan_freq = hal_rx_tlv_get_freq(soc->hal_soc, rx_tlv_hdr) >> 16; 1384 1385 if (soc->cdp_soc.ol_ops && soc->cdp_soc.ol_ops->freq_to_band) { 1386 rx_info->rs_band = soc->cdp_soc.ol_ops->freq_to_band( 1387 soc->ctrl_psoc, 1388 vdev->pdev->pdev_id, 1389 center_chan_freq); 1390 } 1391 rx_info->rs_channel = primary_chan_num; 1392 pkt_type = hal_rx_tlv_get_pkt_type(soc->hal_soc, rx_tlv_hdr); 1393 rate_mcs = hal_rx_tlv_rate_mcs_get(soc->hal_soc, rx_tlv_hdr); 1394 bw = hal_rx_tlv_bw_get(soc->hal_soc, rx_tlv_hdr); 1395 nss = hal_rx_msdu_start_nss_get(soc->hal_soc, rx_tlv_hdr); 1396 1397 /* 1398 * The MCS index does not start with 0 when NSS>1 in HT mode. 1399 * MCS params for optional 20/40MHz, NSS=1~3, EQM(NSS>1): 1400 * ------------------------------------------------------ 1401 * NSS | 1 | 2 | 3 | 4 1402 * ------------------------------------------------------ 1403 * MCS index: HT20 | 0 ~ 7 | 8 ~ 15 | 16 ~ 23 | 24 ~ 31 1404 * ------------------------------------------------------ 1405 * MCS index: HT40 | 0 ~ 7 | 8 ~ 15 | 16 ~ 23 | 24 ~ 31 1406 * ------------------------------------------------------ 1407 * Currently, the MAX_NSS=2. If NSS>2, MCS index = 8 * (NSS-1) 1408 */ 1409 if ((pkt_type == DOT11_N) && (nss == 2)) 1410 rate_mcs += 8; 1411 1412 rx_info->rs_ratephy1 = rate_mcs | (nss << 0x8) | (pkt_type << 16) | 1413 (bw << 24); 1414 1415 qdf_nbuf_set_rx_fctx_type(nbuf, (void *)rx_info, CB_FTYPE_MESH_RX_INFO); 1416 1417 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_MED, 1418 FL("Mesh rx stats: flags %x, rssi %x, chn %x, rate %x, kix %x, snr %x"), 1419 rx_info->rs_flags, 1420 rx_info->rs_rssi, 1421 rx_info->rs_channel, 1422 rx_info->rs_ratephy1, 1423 rx_info->rs_keyix, 1424 rx_info->rs_snr); 1425 1426 } 1427 1428 QDF_STATUS dp_rx_filter_mesh_packets(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 1429 uint8_t *rx_tlv_hdr) 1430 { 1431 union dp_align_mac_addr mac_addr; 1432 struct dp_soc *soc = vdev->pdev->soc; 1433 1434 if (qdf_unlikely(vdev->mesh_rx_filter)) { 1435 if (vdev->mesh_rx_filter & MESH_FILTER_OUT_FROMDS) 1436 if (hal_rx_mpdu_get_fr_ds(soc->hal_soc, 1437 rx_tlv_hdr)) 1438 return QDF_STATUS_SUCCESS; 1439 1440 if (vdev->mesh_rx_filter & MESH_FILTER_OUT_TODS) 1441 if (hal_rx_mpdu_get_to_ds(soc->hal_soc, 1442 rx_tlv_hdr)) 1443 return QDF_STATUS_SUCCESS; 1444 1445 if (vdev->mesh_rx_filter & MESH_FILTER_OUT_NODS) 1446 if (!hal_rx_mpdu_get_fr_ds(soc->hal_soc, 1447 rx_tlv_hdr) && 1448 !hal_rx_mpdu_get_to_ds(soc->hal_soc, 1449 rx_tlv_hdr)) 1450 return QDF_STATUS_SUCCESS; 1451 1452 if (vdev->mesh_rx_filter & MESH_FILTER_OUT_RA) { 1453 if (hal_rx_mpdu_get_addr1(soc->hal_soc, 1454 rx_tlv_hdr, 1455 &mac_addr.raw[0])) 1456 return QDF_STATUS_E_FAILURE; 1457 1458 if (!qdf_mem_cmp(&mac_addr.raw[0], 1459 &vdev->mac_addr.raw[0], 1460 QDF_MAC_ADDR_SIZE)) 1461 return QDF_STATUS_SUCCESS; 1462 } 1463 1464 if (vdev->mesh_rx_filter & MESH_FILTER_OUT_TA) { 1465 if (hal_rx_mpdu_get_addr2(soc->hal_soc, 1466 rx_tlv_hdr, 1467 &mac_addr.raw[0])) 1468 return QDF_STATUS_E_FAILURE; 1469 1470 if (!qdf_mem_cmp(&mac_addr.raw[0], 1471 &vdev->mac_addr.raw[0], 1472 QDF_MAC_ADDR_SIZE)) 1473 return QDF_STATUS_SUCCESS; 1474 } 1475 } 1476 1477 return QDF_STATUS_E_FAILURE; 1478 } 1479 1480 #else 1481 void dp_rx_fill_mesh_stats(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 1482 uint8_t *rx_tlv_hdr, struct dp_txrx_peer *peer) 1483 { 1484 } 1485 1486 QDF_STATUS dp_rx_filter_mesh_packets(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 1487 uint8_t *rx_tlv_hdr) 1488 { 1489 return QDF_STATUS_E_FAILURE; 1490 } 1491 1492 #endif 1493 1494 #ifdef RX_PEER_INVALID_ENH 1495 uint8_t dp_rx_process_invalid_peer(struct dp_soc *soc, qdf_nbuf_t mpdu, 1496 uint8_t mac_id) 1497 { 1498 struct dp_invalid_peer_msg msg; 1499 struct dp_vdev *vdev = NULL; 1500 struct dp_pdev *pdev = NULL; 1501 struct ieee80211_frame *wh; 1502 qdf_nbuf_t curr_nbuf, next_nbuf; 1503 uint8_t *rx_tlv_hdr = qdf_nbuf_data(mpdu); 1504 uint8_t *rx_pkt_hdr = NULL; 1505 int i = 0; 1506 uint32_t nbuf_len; 1507 1508 if (!HAL_IS_DECAP_FORMAT_RAW(soc->hal_soc, rx_tlv_hdr)) { 1509 dp_rx_debug("%pK: Drop decapped frames", soc); 1510 goto free; 1511 } 1512 1513 /* In RAW packet, packet header will be part of data */ 1514 rx_pkt_hdr = rx_tlv_hdr + soc->rx_pkt_tlv_size; 1515 wh = (struct ieee80211_frame *)rx_pkt_hdr; 1516 1517 if (!DP_FRAME_IS_DATA(wh)) { 1518 dp_rx_debug("%pK: NAWDS valid only for data frames", soc); 1519 goto free; 1520 } 1521 1522 nbuf_len = qdf_nbuf_len(mpdu); 1523 if (nbuf_len < sizeof(struct ieee80211_frame)) { 1524 dp_rx_err("%pK: Invalid nbuf length: %u", soc, nbuf_len); 1525 goto free; 1526 } 1527 1528 /* In DMAC case the rx_desc_pools are common across PDEVs 1529 * so PDEV cannot be derived from the pool_id. 1530 * 1531 * link_id need to derived from the TLV tag word which is 1532 * disabled by default. For now adding a WAR to get vdev 1533 * with brute force this need to fixed with word based subscription 1534 * support is added by enabling TLV tag word 1535 */ 1536 if (soc->features.dmac_cmn_src_rxbuf_ring_enabled) { 1537 for (i = 0; i < MAX_PDEV_CNT; i++) { 1538 pdev = soc->pdev_list[i]; 1539 1540 if (!pdev || qdf_unlikely(pdev->is_pdev_down)) 1541 continue; 1542 1543 TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) { 1544 if (qdf_mem_cmp(wh->i_addr1, vdev->mac_addr.raw, 1545 QDF_MAC_ADDR_SIZE) == 0) { 1546 goto out; 1547 } 1548 } 1549 } 1550 } else { 1551 pdev = dp_get_pdev_for_lmac_id(soc, mac_id); 1552 1553 if (!pdev || qdf_unlikely(pdev->is_pdev_down)) { 1554 dp_rx_err("%pK: PDEV %s", 1555 soc, !pdev ? "not found" : "down"); 1556 goto free; 1557 } 1558 1559 if (dp_monitor_filter_neighbour_peer(pdev, rx_pkt_hdr) == 1560 QDF_STATUS_SUCCESS) 1561 return 0; 1562 1563 TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) { 1564 if (qdf_mem_cmp(wh->i_addr1, vdev->mac_addr.raw, 1565 QDF_MAC_ADDR_SIZE) == 0) { 1566 goto out; 1567 } 1568 } 1569 } 1570 1571 if (!vdev) { 1572 dp_rx_err("%pK: VDEV not found", soc); 1573 goto free; 1574 } 1575 out: 1576 msg.wh = wh; 1577 qdf_nbuf_pull_head(mpdu, soc->rx_pkt_tlv_size); 1578 msg.nbuf = mpdu; 1579 msg.vdev_id = vdev->vdev_id; 1580 1581 /* 1582 * NOTE: Only valid for HKv1. 1583 * If smart monitor mode is enabled on RE, we are getting invalid 1584 * peer frames with RA as STA mac of RE and the TA not matching 1585 * with any NAC list or the the BSSID.Such frames need to dropped 1586 * in order to avoid HM_WDS false addition. 1587 */ 1588 if (pdev->soc->cdp_soc.ol_ops->rx_invalid_peer) { 1589 if (dp_monitor_drop_inv_peer_pkts(vdev) == QDF_STATUS_SUCCESS) { 1590 dp_rx_warn("%pK: Drop inv peer pkts with STA RA:%pm", 1591 soc, wh->i_addr1); 1592 goto free; 1593 } 1594 pdev->soc->cdp_soc.ol_ops->rx_invalid_peer( 1595 (struct cdp_ctrl_objmgr_psoc *)soc->ctrl_psoc, 1596 pdev->pdev_id, &msg); 1597 } 1598 1599 free: 1600 /* Drop and free packet */ 1601 curr_nbuf = mpdu; 1602 while (curr_nbuf) { 1603 next_nbuf = qdf_nbuf_next(curr_nbuf); 1604 dp_rx_nbuf_free(curr_nbuf); 1605 curr_nbuf = next_nbuf; 1606 } 1607 1608 return 0; 1609 } 1610 1611 void dp_rx_process_invalid_peer_wrapper(struct dp_soc *soc, 1612 qdf_nbuf_t mpdu, bool mpdu_done, 1613 uint8_t mac_id) 1614 { 1615 /* Only trigger the process when mpdu is completed */ 1616 if (mpdu_done) 1617 dp_rx_process_invalid_peer(soc, mpdu, mac_id); 1618 } 1619 #else 1620 uint8_t dp_rx_process_invalid_peer(struct dp_soc *soc, qdf_nbuf_t mpdu, 1621 uint8_t mac_id) 1622 { 1623 qdf_nbuf_t curr_nbuf, next_nbuf; 1624 struct dp_pdev *pdev; 1625 struct dp_vdev *vdev = NULL; 1626 struct ieee80211_frame *wh; 1627 struct dp_peer *peer = NULL; 1628 uint8_t *rx_tlv_hdr = qdf_nbuf_data(mpdu); 1629 uint8_t *rx_pkt_hdr = hal_rx_pkt_hdr_get(soc->hal_soc, rx_tlv_hdr); 1630 uint32_t nbuf_len; 1631 1632 wh = (struct ieee80211_frame *)rx_pkt_hdr; 1633 1634 if (!DP_FRAME_IS_DATA(wh)) { 1635 QDF_TRACE_ERROR_RL(QDF_MODULE_ID_DP, 1636 "only for data frames"); 1637 goto free; 1638 } 1639 1640 nbuf_len = qdf_nbuf_len(mpdu); 1641 if (nbuf_len < sizeof(struct ieee80211_frame)) { 1642 dp_rx_info_rl("%pK: Invalid nbuf length: %u", soc, nbuf_len); 1643 goto free; 1644 } 1645 1646 pdev = dp_get_pdev_for_lmac_id(soc, mac_id); 1647 if (!pdev) { 1648 dp_rx_info_rl("%pK: PDEV not found", soc); 1649 goto free; 1650 } 1651 1652 qdf_spin_lock_bh(&pdev->vdev_list_lock); 1653 DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) { 1654 if (qdf_mem_cmp(wh->i_addr1, vdev->mac_addr.raw, 1655 QDF_MAC_ADDR_SIZE) == 0) { 1656 qdf_spin_unlock_bh(&pdev->vdev_list_lock); 1657 goto out; 1658 } 1659 } 1660 qdf_spin_unlock_bh(&pdev->vdev_list_lock); 1661 1662 if (!vdev) { 1663 dp_rx_info_rl("%pK: VDEV not found", soc); 1664 goto free; 1665 } 1666 1667 out: 1668 if (vdev->opmode == wlan_op_mode_ap) { 1669 peer = dp_peer_find_hash_find(soc, wh->i_addr2, 0, 1670 vdev->vdev_id, 1671 DP_MOD_ID_RX_ERR); 1672 /* If SA is a valid peer in vdev, 1673 * don't send disconnect 1674 */ 1675 if (peer) { 1676 dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR); 1677 DP_STATS_INC(soc, rx.err.decrypt_err_drop, 1); 1678 dp_err_rl("invalid peer frame with correct SA/RA is freed"); 1679 goto free; 1680 } 1681 } 1682 1683 if (soc->cdp_soc.ol_ops->rx_invalid_peer) 1684 soc->cdp_soc.ol_ops->rx_invalid_peer(vdev->vdev_id, wh); 1685 free: 1686 1687 /* Drop and free packet */ 1688 curr_nbuf = mpdu; 1689 while (curr_nbuf) { 1690 next_nbuf = qdf_nbuf_next(curr_nbuf); 1691 dp_rx_nbuf_free(curr_nbuf); 1692 curr_nbuf = next_nbuf; 1693 } 1694 1695 /* Reset the head and tail pointers */ 1696 pdev = dp_get_pdev_for_lmac_id(soc, mac_id); 1697 if (pdev) { 1698 pdev->invalid_peer_head_msdu = NULL; 1699 pdev->invalid_peer_tail_msdu = NULL; 1700 } 1701 1702 return 0; 1703 } 1704 1705 void dp_rx_process_invalid_peer_wrapper(struct dp_soc *soc, 1706 qdf_nbuf_t mpdu, bool mpdu_done, 1707 uint8_t mac_id) 1708 { 1709 /* Process the nbuf */ 1710 dp_rx_process_invalid_peer(soc, mpdu, mac_id); 1711 } 1712 #endif 1713 1714 #ifndef QCA_HOST_MODE_WIFI_DISABLED 1715 1716 #ifdef RECEIVE_OFFLOAD 1717 /** 1718 * dp_rx_print_offload_info() - Print offload info from RX TLV 1719 * @soc: dp soc handle 1720 * @msdu: MSDU for which the offload info is to be printed 1721 * 1722 * Return: None 1723 */ 1724 static void dp_rx_print_offload_info(struct dp_soc *soc, 1725 qdf_nbuf_t msdu) 1726 { 1727 dp_verbose_debug("----------------------RX DESC LRO/GRO----------------------"); 1728 dp_verbose_debug("lro_eligible 0x%x", 1729 QDF_NBUF_CB_RX_LRO_ELIGIBLE(msdu)); 1730 dp_verbose_debug("pure_ack 0x%x", QDF_NBUF_CB_RX_TCP_PURE_ACK(msdu)); 1731 dp_verbose_debug("chksum 0x%x", QDF_NBUF_CB_RX_TCP_CHKSUM(msdu)); 1732 dp_verbose_debug("TCP seq num 0x%x", QDF_NBUF_CB_RX_TCP_SEQ_NUM(msdu)); 1733 dp_verbose_debug("TCP ack num 0x%x", QDF_NBUF_CB_RX_TCP_ACK_NUM(msdu)); 1734 dp_verbose_debug("TCP window 0x%x", QDF_NBUF_CB_RX_TCP_WIN(msdu)); 1735 dp_verbose_debug("TCP protocol 0x%x", QDF_NBUF_CB_RX_TCP_PROTO(msdu)); 1736 dp_verbose_debug("TCP offset 0x%x", QDF_NBUF_CB_RX_TCP_OFFSET(msdu)); 1737 dp_verbose_debug("toeplitz 0x%x", QDF_NBUF_CB_RX_FLOW_ID(msdu)); 1738 dp_verbose_debug("---------------------------------------------------------"); 1739 } 1740 1741 void dp_rx_fill_gro_info(struct dp_soc *soc, uint8_t *rx_tlv, 1742 qdf_nbuf_t msdu, uint32_t *rx_ol_pkt_cnt) 1743 { 1744 struct hal_offload_info offload_info; 1745 1746 if (!wlan_cfg_is_gro_enabled(soc->wlan_cfg_ctx)) 1747 return; 1748 1749 if (hal_rx_tlv_get_offload_info(soc->hal_soc, rx_tlv, &offload_info)) 1750 return; 1751 1752 *rx_ol_pkt_cnt = *rx_ol_pkt_cnt + 1; 1753 1754 QDF_NBUF_CB_RX_LRO_ELIGIBLE(msdu) = offload_info.lro_eligible; 1755 QDF_NBUF_CB_RX_TCP_PURE_ACK(msdu) = offload_info.tcp_pure_ack; 1756 QDF_NBUF_CB_RX_TCP_CHKSUM(msdu) = 1757 hal_rx_tlv_get_tcp_chksum(soc->hal_soc, 1758 rx_tlv); 1759 QDF_NBUF_CB_RX_TCP_SEQ_NUM(msdu) = offload_info.tcp_seq_num; 1760 QDF_NBUF_CB_RX_TCP_ACK_NUM(msdu) = offload_info.tcp_ack_num; 1761 QDF_NBUF_CB_RX_TCP_WIN(msdu) = offload_info.tcp_win; 1762 QDF_NBUF_CB_RX_TCP_PROTO(msdu) = offload_info.tcp_proto; 1763 QDF_NBUF_CB_RX_IPV6_PROTO(msdu) = offload_info.ipv6_proto; 1764 QDF_NBUF_CB_RX_TCP_OFFSET(msdu) = offload_info.tcp_offset; 1765 QDF_NBUF_CB_RX_FLOW_ID(msdu) = offload_info.flow_id; 1766 1767 dp_rx_print_offload_info(soc, msdu); 1768 } 1769 #endif /* RECEIVE_OFFLOAD */ 1770 1771 /** 1772 * dp_rx_adjust_nbuf_len() - set appropriate msdu length in nbuf. 1773 * 1774 * @soc: DP soc handle 1775 * @nbuf: pointer to msdu. 1776 * @mpdu_len: mpdu length 1777 * @l3_pad_len: L3 padding length by HW 1778 * 1779 * Return: returns true if nbuf is last msdu of mpdu else returns false. 1780 */ 1781 static inline bool dp_rx_adjust_nbuf_len(struct dp_soc *soc, 1782 qdf_nbuf_t nbuf, 1783 uint16_t *mpdu_len, 1784 uint32_t l3_pad_len) 1785 { 1786 bool last_nbuf; 1787 uint32_t pkt_hdr_size; 1788 1789 pkt_hdr_size = soc->rx_pkt_tlv_size + l3_pad_len; 1790 1791 if ((*mpdu_len + pkt_hdr_size) > RX_DATA_BUFFER_SIZE) { 1792 qdf_nbuf_set_pktlen(nbuf, RX_DATA_BUFFER_SIZE); 1793 last_nbuf = false; 1794 *mpdu_len -= (RX_DATA_BUFFER_SIZE - pkt_hdr_size); 1795 } else { 1796 qdf_nbuf_set_pktlen(nbuf, (*mpdu_len + pkt_hdr_size)); 1797 last_nbuf = true; 1798 *mpdu_len = 0; 1799 } 1800 1801 return last_nbuf; 1802 } 1803 1804 /** 1805 * dp_get_l3_hdr_pad_len() - get L3 header padding length. 1806 * 1807 * @soc: DP soc handle 1808 * @nbuf: pointer to msdu. 1809 * 1810 * Return: returns padding length in bytes. 1811 */ 1812 static inline uint32_t dp_get_l3_hdr_pad_len(struct dp_soc *soc, 1813 qdf_nbuf_t nbuf) 1814 { 1815 uint32_t l3_hdr_pad = 0; 1816 uint8_t *rx_tlv_hdr; 1817 struct hal_rx_msdu_metadata msdu_metadata; 1818 1819 while (nbuf) { 1820 if (!qdf_nbuf_is_rx_chfrag_cont(nbuf)) { 1821 /* scattered msdu end with continuation is 0 */ 1822 rx_tlv_hdr = qdf_nbuf_data(nbuf); 1823 hal_rx_msdu_metadata_get(soc->hal_soc, 1824 rx_tlv_hdr, 1825 &msdu_metadata); 1826 l3_hdr_pad = msdu_metadata.l3_hdr_pad; 1827 break; 1828 } 1829 nbuf = nbuf->next; 1830 } 1831 1832 return l3_hdr_pad; 1833 } 1834 1835 qdf_nbuf_t dp_rx_sg_create(struct dp_soc *soc, qdf_nbuf_t nbuf) 1836 { 1837 qdf_nbuf_t parent, frag_list, next = NULL; 1838 uint16_t frag_list_len = 0; 1839 uint16_t mpdu_len; 1840 bool last_nbuf; 1841 uint32_t l3_hdr_pad_offset = 0; 1842 1843 /* 1844 * Use msdu len got from REO entry descriptor instead since 1845 * there is case the RX PKT TLV is corrupted while msdu_len 1846 * from REO descriptor is right for non-raw RX scatter msdu. 1847 */ 1848 mpdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf); 1849 1850 /* 1851 * this is a case where the complete msdu fits in one single nbuf. 1852 * in this case HW sets both start and end bit and we only need to 1853 * reset these bits for RAW mode simulator to decap the pkt 1854 */ 1855 if (qdf_nbuf_is_rx_chfrag_start(nbuf) && 1856 qdf_nbuf_is_rx_chfrag_end(nbuf)) { 1857 qdf_nbuf_set_pktlen(nbuf, mpdu_len + soc->rx_pkt_tlv_size); 1858 qdf_nbuf_pull_head(nbuf, soc->rx_pkt_tlv_size); 1859 return nbuf; 1860 } 1861 1862 l3_hdr_pad_offset = dp_get_l3_hdr_pad_len(soc, nbuf); 1863 /* 1864 * This is a case where we have multiple msdus (A-MSDU) spread across 1865 * multiple nbufs. here we create a fraglist out of these nbufs. 1866 * 1867 * the moment we encounter a nbuf with continuation bit set we 1868 * know for sure we have an MSDU which is spread across multiple 1869 * nbufs. We loop through and reap nbufs till we reach last nbuf. 1870 */ 1871 parent = nbuf; 1872 frag_list = nbuf->next; 1873 nbuf = nbuf->next; 1874 1875 /* 1876 * set the start bit in the first nbuf we encounter with continuation 1877 * bit set. This has the proper mpdu length set as it is the first 1878 * msdu of the mpdu. this becomes the parent nbuf and the subsequent 1879 * nbufs will form the frag_list of the parent nbuf. 1880 */ 1881 qdf_nbuf_set_rx_chfrag_start(parent, 1); 1882 /* 1883 * L3 header padding is only needed for the 1st buffer 1884 * in a scattered msdu 1885 */ 1886 last_nbuf = dp_rx_adjust_nbuf_len(soc, parent, &mpdu_len, 1887 l3_hdr_pad_offset); 1888 1889 /* 1890 * MSDU cont bit is set but reported MPDU length can fit 1891 * in to single buffer 1892 * 1893 * Increment error stats and avoid SG list creation 1894 */ 1895 if (last_nbuf) { 1896 DP_STATS_INC(soc, rx.err.msdu_continuation_err, 1); 1897 qdf_nbuf_pull_head(parent, 1898 soc->rx_pkt_tlv_size + l3_hdr_pad_offset); 1899 return parent; 1900 } 1901 1902 /* 1903 * this is where we set the length of the fragments which are 1904 * associated to the parent nbuf. We iterate through the frag_list 1905 * till we hit the last_nbuf of the list. 1906 */ 1907 do { 1908 last_nbuf = dp_rx_adjust_nbuf_len(soc, nbuf, &mpdu_len, 0); 1909 qdf_nbuf_pull_head(nbuf, 1910 soc->rx_pkt_tlv_size); 1911 frag_list_len += qdf_nbuf_len(nbuf); 1912 1913 if (last_nbuf) { 1914 next = nbuf->next; 1915 nbuf->next = NULL; 1916 break; 1917 } else if (qdf_nbuf_is_rx_chfrag_end(nbuf)) { 1918 dp_err("Invalid packet length"); 1919 qdf_assert_always(0); 1920 } 1921 nbuf = nbuf->next; 1922 } while (!last_nbuf); 1923 1924 qdf_nbuf_set_rx_chfrag_start(nbuf, 0); 1925 qdf_nbuf_append_ext_list(parent, frag_list, frag_list_len); 1926 parent->next = next; 1927 1928 qdf_nbuf_pull_head(parent, 1929 soc->rx_pkt_tlv_size + l3_hdr_pad_offset); 1930 return parent; 1931 } 1932 1933 #ifdef DP_RX_SG_FRAME_SUPPORT 1934 bool dp_rx_is_sg_supported(void) 1935 { 1936 return true; 1937 } 1938 #else 1939 bool dp_rx_is_sg_supported(void) 1940 { 1941 return false; 1942 } 1943 #endif 1944 1945 #endif /* QCA_HOST_MODE_WIFI_DISABLED */ 1946 1947 #ifdef QCA_PEER_EXT_STATS 1948 void dp_rx_compute_tid_delay(struct cdp_delay_tid_stats *stats, 1949 qdf_nbuf_t nbuf) 1950 { 1951 struct cdp_delay_rx_stats *rx_delay = &stats->rx_delay; 1952 uint32_t to_stack = qdf_nbuf_get_timedelta_ms(nbuf); 1953 1954 dp_hist_update_stats(&rx_delay->to_stack_delay, to_stack); 1955 } 1956 #endif /* QCA_PEER_EXT_STATS */ 1957 1958 void dp_rx_compute_delay(struct dp_vdev *vdev, qdf_nbuf_t nbuf) 1959 { 1960 uint8_t ring_id = QDF_NBUF_CB_RX_CTX_ID(nbuf); 1961 int64_t current_ts = qdf_ktime_to_ms(qdf_ktime_get()); 1962 uint32_t to_stack = qdf_nbuf_get_timedelta_ms(nbuf); 1963 uint8_t tid = qdf_nbuf_get_tid_val(nbuf); 1964 uint32_t interframe_delay = 1965 (uint32_t)(current_ts - vdev->prev_rx_deliver_tstamp); 1966 struct cdp_tid_rx_stats *rstats = 1967 &vdev->pdev->stats.tid_stats.tid_rx_stats[ring_id][tid]; 1968 1969 dp_update_delay_stats(NULL, rstats, to_stack, tid, 1970 CDP_DELAY_STATS_REAP_STACK, ring_id, false); 1971 /* 1972 * Update interframe delay stats calculated at deliver_data_ol point. 1973 * Value of vdev->prev_rx_deliver_tstamp will be 0 for 1st frame, so 1974 * interframe delay will not be calculate correctly for 1st frame. 1975 * On the other side, this will help in avoiding extra per packet check 1976 * of vdev->prev_rx_deliver_tstamp. 1977 */ 1978 dp_update_delay_stats(NULL, rstats, interframe_delay, tid, 1979 CDP_DELAY_STATS_RX_INTERFRAME, ring_id, false); 1980 vdev->prev_rx_deliver_tstamp = current_ts; 1981 } 1982 1983 /** 1984 * dp_rx_drop_nbuf_list() - drop an nbuf list 1985 * @pdev: dp pdev reference 1986 * @buf_list: buffer list to be dropepd 1987 * 1988 * Return: int (number of bufs dropped) 1989 */ 1990 static inline int dp_rx_drop_nbuf_list(struct dp_pdev *pdev, 1991 qdf_nbuf_t buf_list) 1992 { 1993 struct cdp_tid_rx_stats *stats = NULL; 1994 uint8_t tid = 0, ring_id = 0; 1995 int num_dropped = 0; 1996 qdf_nbuf_t buf, next_buf; 1997 1998 buf = buf_list; 1999 while (buf) { 2000 ring_id = QDF_NBUF_CB_RX_CTX_ID(buf); 2001 next_buf = qdf_nbuf_queue_next(buf); 2002 tid = qdf_nbuf_get_tid_val(buf); 2003 if (qdf_likely(pdev)) { 2004 stats = &pdev->stats.tid_stats.tid_rx_stats[ring_id][tid]; 2005 stats->fail_cnt[INVALID_PEER_VDEV]++; 2006 stats->delivered_to_stack--; 2007 } 2008 dp_rx_nbuf_free(buf); 2009 buf = next_buf; 2010 num_dropped++; 2011 } 2012 2013 return num_dropped; 2014 } 2015 2016 #ifdef QCA_SUPPORT_WDS_EXTENDED 2017 /** 2018 * dp_rx_deliver_to_stack_ext() - Deliver to netdev per sta 2019 * @soc: core txrx main context 2020 * @vdev: vdev 2021 * @txrx_peer: txrx peer 2022 * @nbuf_head: skb list head 2023 * 2024 * Return: true if packet is delivered to netdev per STA. 2025 */ 2026 static inline bool 2027 dp_rx_deliver_to_stack_ext(struct dp_soc *soc, struct dp_vdev *vdev, 2028 struct dp_txrx_peer *txrx_peer, qdf_nbuf_t nbuf_head) 2029 { 2030 /* 2031 * When extended WDS is disabled, frames are sent to AP netdevice. 2032 */ 2033 if (qdf_likely(!vdev->wds_ext_enabled)) 2034 return false; 2035 2036 /* 2037 * There can be 2 cases: 2038 * 1. Send frame to parent netdev if its not for netdev per STA 2039 * 2. If frame is meant for netdev per STA: 2040 * a. Send frame to appropriate netdev using registered fp. 2041 * b. If fp is NULL, drop the frames. 2042 */ 2043 if (!txrx_peer->wds_ext.init) 2044 return false; 2045 2046 if (txrx_peer->osif_rx) 2047 txrx_peer->osif_rx(txrx_peer->wds_ext.osif_peer, nbuf_head); 2048 else 2049 dp_rx_drop_nbuf_list(vdev->pdev, nbuf_head); 2050 2051 return true; 2052 } 2053 2054 #else 2055 static inline bool 2056 dp_rx_deliver_to_stack_ext(struct dp_soc *soc, struct dp_vdev *vdev, 2057 struct dp_txrx_peer *txrx_peer, qdf_nbuf_t nbuf_head) 2058 { 2059 return false; 2060 } 2061 #endif 2062 2063 #ifdef PEER_CACHE_RX_PKTS 2064 void dp_rx_flush_rx_cached(struct dp_peer *peer, bool drop) 2065 { 2066 struct dp_peer_cached_bufq *bufqi; 2067 struct dp_rx_cached_buf *cache_buf = NULL; 2068 ol_txrx_rx_fp data_rx = NULL; 2069 int num_buff_elem; 2070 QDF_STATUS status; 2071 2072 /* 2073 * Flush dp cached frames only for mld peers and legacy peers, as 2074 * link peers don't store cached frames 2075 */ 2076 if (IS_MLO_DP_LINK_PEER(peer)) 2077 return; 2078 2079 if (!peer->txrx_peer) { 2080 dp_err("txrx_peer NULL!! peer mac_addr("QDF_MAC_ADDR_FMT")", 2081 QDF_MAC_ADDR_REF(peer->mac_addr.raw)); 2082 return; 2083 } 2084 2085 if (qdf_atomic_inc_return(&peer->txrx_peer->flush_in_progress) > 1) { 2086 qdf_atomic_dec(&peer->txrx_peer->flush_in_progress); 2087 return; 2088 } 2089 2090 qdf_spin_lock_bh(&peer->peer_info_lock); 2091 if (peer->state >= OL_TXRX_PEER_STATE_CONN && peer->vdev->osif_rx) 2092 data_rx = peer->vdev->osif_rx; 2093 else 2094 drop = true; 2095 qdf_spin_unlock_bh(&peer->peer_info_lock); 2096 2097 bufqi = &peer->txrx_peer->bufq_info; 2098 2099 qdf_spin_lock_bh(&bufqi->bufq_lock); 2100 qdf_list_remove_front(&bufqi->cached_bufq, 2101 (qdf_list_node_t **)&cache_buf); 2102 while (cache_buf) { 2103 num_buff_elem = QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST( 2104 cache_buf->buf); 2105 bufqi->entries -= num_buff_elem; 2106 qdf_spin_unlock_bh(&bufqi->bufq_lock); 2107 if (drop) { 2108 bufqi->dropped = dp_rx_drop_nbuf_list(peer->vdev->pdev, 2109 cache_buf->buf); 2110 } else { 2111 /* Flush the cached frames to OSIF DEV */ 2112 status = data_rx(peer->vdev->osif_vdev, cache_buf->buf); 2113 if (status != QDF_STATUS_SUCCESS) 2114 bufqi->dropped = dp_rx_drop_nbuf_list( 2115 peer->vdev->pdev, 2116 cache_buf->buf); 2117 } 2118 qdf_mem_free(cache_buf); 2119 cache_buf = NULL; 2120 qdf_spin_lock_bh(&bufqi->bufq_lock); 2121 qdf_list_remove_front(&bufqi->cached_bufq, 2122 (qdf_list_node_t **)&cache_buf); 2123 } 2124 qdf_spin_unlock_bh(&bufqi->bufq_lock); 2125 qdf_atomic_dec(&peer->txrx_peer->flush_in_progress); 2126 } 2127 2128 /** 2129 * dp_rx_enqueue_rx() - cache rx frames 2130 * @peer: peer 2131 * @txrx_peer: DP txrx_peer 2132 * @rx_buf_list: cache buffer list 2133 * 2134 * Return: None 2135 */ 2136 static QDF_STATUS 2137 dp_rx_enqueue_rx(struct dp_peer *peer, 2138 struct dp_txrx_peer *txrx_peer, 2139 qdf_nbuf_t rx_buf_list) 2140 { 2141 struct dp_rx_cached_buf *cache_buf; 2142 struct dp_peer_cached_bufq *bufqi = &txrx_peer->bufq_info; 2143 int num_buff_elem; 2144 QDF_STATUS ret = QDF_STATUS_SUCCESS; 2145 struct dp_soc *soc = txrx_peer->vdev->pdev->soc; 2146 struct dp_peer *ta_peer = NULL; 2147 2148 /* 2149 * If peer id is invalid which likely peer map has not completed, 2150 * then need caller provide dp_peer pointer, else it's ok to use 2151 * txrx_peer->peer_id to get dp_peer. 2152 */ 2153 if (peer) { 2154 if (QDF_STATUS_SUCCESS == 2155 dp_peer_get_ref(soc, peer, DP_MOD_ID_RX)) 2156 ta_peer = peer; 2157 } else { 2158 ta_peer = dp_peer_get_ref_by_id(soc, txrx_peer->peer_id, 2159 DP_MOD_ID_RX); 2160 } 2161 2162 if (!ta_peer) { 2163 bufqi->dropped = dp_rx_drop_nbuf_list(txrx_peer->vdev->pdev, 2164 rx_buf_list); 2165 return QDF_STATUS_E_INVAL; 2166 } 2167 2168 dp_debug_rl("bufq->curr %d bufq->drops %d", bufqi->entries, 2169 bufqi->dropped); 2170 if (!ta_peer->valid) { 2171 bufqi->dropped = dp_rx_drop_nbuf_list(txrx_peer->vdev->pdev, 2172 rx_buf_list); 2173 ret = QDF_STATUS_E_INVAL; 2174 goto fail; 2175 } 2176 2177 qdf_spin_lock_bh(&bufqi->bufq_lock); 2178 if (bufqi->entries >= bufqi->thresh) { 2179 bufqi->dropped = dp_rx_drop_nbuf_list(txrx_peer->vdev->pdev, 2180 rx_buf_list); 2181 qdf_spin_unlock_bh(&bufqi->bufq_lock); 2182 ret = QDF_STATUS_E_RESOURCES; 2183 goto fail; 2184 } 2185 qdf_spin_unlock_bh(&bufqi->bufq_lock); 2186 2187 num_buff_elem = QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(rx_buf_list); 2188 2189 cache_buf = qdf_mem_malloc_atomic(sizeof(*cache_buf)); 2190 if (!cache_buf) { 2191 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 2192 "Failed to allocate buf to cache rx frames"); 2193 bufqi->dropped = dp_rx_drop_nbuf_list(txrx_peer->vdev->pdev, 2194 rx_buf_list); 2195 ret = QDF_STATUS_E_NOMEM; 2196 goto fail; 2197 } 2198 2199 cache_buf->buf = rx_buf_list; 2200 2201 qdf_spin_lock_bh(&bufqi->bufq_lock); 2202 qdf_list_insert_back(&bufqi->cached_bufq, 2203 &cache_buf->node); 2204 bufqi->entries += num_buff_elem; 2205 qdf_spin_unlock_bh(&bufqi->bufq_lock); 2206 2207 fail: 2208 dp_peer_unref_delete(ta_peer, DP_MOD_ID_RX); 2209 return ret; 2210 } 2211 2212 static inline 2213 bool dp_rx_is_peer_cache_bufq_supported(void) 2214 { 2215 return true; 2216 } 2217 #else 2218 static inline 2219 bool dp_rx_is_peer_cache_bufq_supported(void) 2220 { 2221 return false; 2222 } 2223 2224 static inline QDF_STATUS 2225 dp_rx_enqueue_rx(struct dp_peer *peer, 2226 struct dp_txrx_peer *txrx_peer, 2227 qdf_nbuf_t rx_buf_list) 2228 { 2229 return QDF_STATUS_SUCCESS; 2230 } 2231 #endif 2232 2233 #ifndef DELIVERY_TO_STACK_STATUS_CHECK 2234 /** 2235 * dp_rx_check_delivery_to_stack() - Deliver pkts to network 2236 * using the appropriate call back functions. 2237 * @soc: soc 2238 * @vdev: vdev 2239 * @txrx_peer: peer 2240 * @nbuf_head: skb list head 2241 * 2242 * Return: None 2243 */ 2244 static void dp_rx_check_delivery_to_stack(struct dp_soc *soc, 2245 struct dp_vdev *vdev, 2246 struct dp_txrx_peer *txrx_peer, 2247 qdf_nbuf_t nbuf_head) 2248 { 2249 if (qdf_unlikely(dp_rx_deliver_to_stack_ext(soc, vdev, 2250 txrx_peer, nbuf_head))) 2251 return; 2252 2253 /* Function pointer initialized only when FISA is enabled */ 2254 if (vdev->osif_fisa_rx) 2255 /* on failure send it via regular path */ 2256 vdev->osif_fisa_rx(soc, vdev, nbuf_head); 2257 else 2258 vdev->osif_rx(vdev->osif_vdev, nbuf_head); 2259 } 2260 2261 #else 2262 /** 2263 * dp_rx_check_delivery_to_stack() - Deliver pkts to network 2264 * using the appropriate call back functions. 2265 * @soc: soc 2266 * @vdev: vdev 2267 * @txrx_peer: txrx peer 2268 * @nbuf_head: skb list head 2269 * 2270 * Check the return status of the call back function and drop 2271 * the packets if the return status indicates a failure. 2272 * 2273 * Return: None 2274 */ 2275 static void dp_rx_check_delivery_to_stack(struct dp_soc *soc, 2276 struct dp_vdev *vdev, 2277 struct dp_txrx_peer *txrx_peer, 2278 qdf_nbuf_t nbuf_head) 2279 { 2280 int num_nbuf = 0; 2281 QDF_STATUS ret_val = QDF_STATUS_E_FAILURE; 2282 2283 /* Function pointer initialized only when FISA is enabled */ 2284 if (vdev->osif_fisa_rx) 2285 /* on failure send it via regular path */ 2286 ret_val = vdev->osif_fisa_rx(soc, vdev, nbuf_head); 2287 else if (vdev->osif_rx) 2288 ret_val = vdev->osif_rx(vdev->osif_vdev, nbuf_head); 2289 2290 if (!QDF_IS_STATUS_SUCCESS(ret_val)) { 2291 num_nbuf = dp_rx_drop_nbuf_list(vdev->pdev, nbuf_head); 2292 DP_STATS_INC(soc, rx.err.rejected, num_nbuf); 2293 if (txrx_peer) 2294 DP_PEER_STATS_FLAT_DEC(txrx_peer, to_stack.num, 2295 num_nbuf); 2296 } 2297 } 2298 #endif /* ifdef DELIVERY_TO_STACK_STATUS_CHECK */ 2299 2300 /** 2301 * dp_rx_validate_rx_callbacks() - validate rx callbacks 2302 * @soc: DP soc 2303 * @vdev: DP vdev handle 2304 * @txrx_peer: pointer to the txrx peer object 2305 * @nbuf_head: skb list head 2306 * 2307 * Return: QDF_STATUS - QDF_STATUS_SUCCESS 2308 * QDF_STATUS_E_FAILURE 2309 */ 2310 static inline QDF_STATUS 2311 dp_rx_validate_rx_callbacks(struct dp_soc *soc, 2312 struct dp_vdev *vdev, 2313 struct dp_txrx_peer *txrx_peer, 2314 qdf_nbuf_t nbuf_head) 2315 { 2316 int num_nbuf; 2317 2318 if (qdf_unlikely(!vdev || vdev->delete.pending)) { 2319 num_nbuf = dp_rx_drop_nbuf_list(NULL, nbuf_head); 2320 /* 2321 * This is a special case where vdev is invalid, 2322 * so we cannot know the pdev to which this packet 2323 * belonged. Hence we update the soc rx error stats. 2324 */ 2325 DP_STATS_INC(soc, rx.err.invalid_vdev, num_nbuf); 2326 return QDF_STATUS_E_FAILURE; 2327 } 2328 2329 /* 2330 * highly unlikely to have a vdev without a registered rx 2331 * callback function. if so let us free the nbuf_list. 2332 */ 2333 if (qdf_unlikely(!vdev->osif_rx)) { 2334 if (txrx_peer && dp_rx_is_peer_cache_bufq_supported()) { 2335 dp_rx_enqueue_rx(NULL, txrx_peer, nbuf_head); 2336 } else { 2337 num_nbuf = dp_rx_drop_nbuf_list(vdev->pdev, 2338 nbuf_head); 2339 DP_PEER_TO_STACK_DECC(txrx_peer, num_nbuf, 2340 vdev->pdev->enhanced_stats_en); 2341 } 2342 return QDF_STATUS_E_FAILURE; 2343 } 2344 2345 return QDF_STATUS_SUCCESS; 2346 } 2347 2348 #if defined(WLAN_FEATURE_11BE_MLO) && defined(RAW_PKT_MLD_ADDR_CONVERSION) 2349 static void dp_rx_raw_pkt_mld_addr_conv(struct dp_soc *soc, 2350 struct dp_vdev *vdev, 2351 struct dp_txrx_peer *txrx_peer, 2352 qdf_nbuf_t nbuf_head) 2353 { 2354 qdf_nbuf_t nbuf, next; 2355 struct dp_peer *peer = NULL; 2356 struct ieee80211_frame *wh = NULL; 2357 2358 if (vdev->rx_decap_type == htt_cmn_pkt_type_native_wifi) 2359 return; 2360 2361 peer = dp_peer_get_ref_by_id(soc, txrx_peer->peer_id, 2362 DP_MOD_ID_RX); 2363 2364 if (!peer) 2365 return; 2366 2367 if (!IS_MLO_DP_MLD_PEER(peer)) { 2368 dp_peer_unref_delete(peer, DP_MOD_ID_RX); 2369 return; 2370 } 2371 2372 nbuf = nbuf_head; 2373 while (nbuf) { 2374 next = nbuf->next; 2375 wh = (struct ieee80211_frame *)qdf_nbuf_data(nbuf); 2376 qdf_mem_copy(wh->i_addr1, vdev->mld_mac_addr.raw, 2377 QDF_MAC_ADDR_SIZE); 2378 qdf_mem_copy(wh->i_addr2, peer->mac_addr.raw, 2379 QDF_MAC_ADDR_SIZE); 2380 nbuf = next; 2381 } 2382 2383 dp_peer_unref_delete(peer, DP_MOD_ID_RX); 2384 } 2385 #else 2386 static inline 2387 void dp_rx_raw_pkt_mld_addr_conv(struct dp_soc *soc, 2388 struct dp_vdev *vdev, 2389 struct dp_txrx_peer *txrx_peer, 2390 qdf_nbuf_t nbuf_head) 2391 { } 2392 #endif 2393 2394 QDF_STATUS dp_rx_deliver_to_stack(struct dp_soc *soc, 2395 struct dp_vdev *vdev, 2396 struct dp_txrx_peer *txrx_peer, 2397 qdf_nbuf_t nbuf_head, 2398 qdf_nbuf_t nbuf_tail) 2399 { 2400 if (dp_rx_validate_rx_callbacks(soc, vdev, txrx_peer, nbuf_head) != 2401 QDF_STATUS_SUCCESS) 2402 return QDF_STATUS_E_FAILURE; 2403 2404 if (qdf_unlikely(vdev->rx_decap_type == htt_cmn_pkt_type_raw) || 2405 (vdev->rx_decap_type == htt_cmn_pkt_type_native_wifi)) { 2406 dp_rx_raw_pkt_mld_addr_conv(soc, vdev, txrx_peer, nbuf_head); 2407 vdev->osif_rsim_rx_decap(vdev->osif_vdev, &nbuf_head, 2408 &nbuf_tail); 2409 } 2410 2411 dp_rx_check_delivery_to_stack(soc, vdev, txrx_peer, nbuf_head); 2412 2413 return QDF_STATUS_SUCCESS; 2414 } 2415 2416 #ifdef QCA_SUPPORT_EAPOL_OVER_CONTROL_PORT 2417 QDF_STATUS dp_rx_eapol_deliver_to_stack(struct dp_soc *soc, 2418 struct dp_vdev *vdev, 2419 struct dp_txrx_peer *txrx_peer, 2420 qdf_nbuf_t nbuf_head, 2421 qdf_nbuf_t nbuf_tail) 2422 { 2423 if (dp_rx_validate_rx_callbacks(soc, vdev, txrx_peer, nbuf_head) != 2424 QDF_STATUS_SUCCESS) 2425 return QDF_STATUS_E_FAILURE; 2426 2427 vdev->osif_rx_eapol(vdev->osif_vdev, nbuf_head); 2428 2429 return QDF_STATUS_SUCCESS; 2430 } 2431 #endif 2432 2433 #ifndef QCA_HOST_MODE_WIFI_DISABLED 2434 #ifdef VDEV_PEER_PROTOCOL_COUNT 2435 #define dp_rx_msdu_stats_update_prot_cnts(vdev_hdl, nbuf, txrx_peer) \ 2436 { \ 2437 qdf_nbuf_t nbuf_local; \ 2438 struct dp_txrx_peer *txrx_peer_local; \ 2439 struct dp_vdev *vdev_local = vdev_hdl; \ 2440 do { \ 2441 if (qdf_likely(!((vdev_local)->peer_protocol_count_track))) \ 2442 break; \ 2443 nbuf_local = nbuf; \ 2444 txrx_peer_local = txrx_peer; \ 2445 if (qdf_unlikely(qdf_nbuf_is_frag((nbuf_local)))) \ 2446 break; \ 2447 else if (qdf_unlikely(qdf_nbuf_is_raw_frame((nbuf_local)))) \ 2448 break; \ 2449 dp_vdev_peer_stats_update_protocol_cnt((vdev_local), \ 2450 (nbuf_local), \ 2451 (txrx_peer_local), 0, 1); \ 2452 } while (0); \ 2453 } 2454 #else 2455 #define dp_rx_msdu_stats_update_prot_cnts(vdev_hdl, nbuf, txrx_peer) 2456 #endif 2457 2458 #ifdef FEATURE_RX_LINKSPEED_ROAM_TRIGGER 2459 /** 2460 * dp_rx_rates_stats_update() - update rate stats 2461 * from rx msdu. 2462 * @soc: datapath soc handle 2463 * @nbuf: received msdu buffer 2464 * @rx_tlv_hdr: rx tlv header 2465 * @txrx_peer: datapath txrx_peer handle 2466 * @sgi: Short Guard Interval 2467 * @mcs: Modulation and Coding Set 2468 * @nss: Number of Spatial Streams 2469 * @bw: BandWidth 2470 * @pkt_type: Corresponds to preamble 2471 * @link_id: Link Id on which packet is received 2472 * 2473 * To be precisely record rates, following factors are considered: 2474 * Exclude specific frames, ARP, DHCP, ssdp, etc. 2475 * Make sure to affect rx throughput as least as possible. 2476 * 2477 * Return: void 2478 */ 2479 static void 2480 dp_rx_rates_stats_update(struct dp_soc *soc, qdf_nbuf_t nbuf, 2481 uint8_t *rx_tlv_hdr, struct dp_txrx_peer *txrx_peer, 2482 uint32_t sgi, uint32_t mcs, 2483 uint32_t nss, uint32_t bw, uint32_t pkt_type, 2484 uint8_t link_id) 2485 { 2486 uint32_t rix; 2487 uint16_t ratecode; 2488 uint32_t avg_rx_rate; 2489 uint32_t ratekbps; 2490 enum cdp_punctured_modes punc_mode = NO_PUNCTURE; 2491 2492 if (soc->high_throughput || 2493 dp_rx_data_is_specific(soc->hal_soc, rx_tlv_hdr, nbuf)) { 2494 return; 2495 } 2496 2497 DP_PEER_EXTD_STATS_UPD(txrx_peer, rx.rx_rate, mcs, link_id); 2498 2499 /* In 11b mode, the nss we get from tlv is 0, invalid and should be 1 */ 2500 if (qdf_unlikely(pkt_type == DOT11_B)) 2501 nss = 1; 2502 2503 /* here pkt_type corresponds to preamble */ 2504 ratekbps = dp_getrateindex(sgi, 2505 mcs, 2506 nss - 1, 2507 pkt_type, 2508 bw, 2509 punc_mode, 2510 &rix, 2511 &ratecode); 2512 DP_PEER_EXTD_STATS_UPD(txrx_peer, rx.last_rx_rate, ratekbps, link_id); 2513 avg_rx_rate = 2514 dp_ath_rate_lpf( 2515 txrx_peer->stats[link_id].extd_stats.rx.avg_rx_rate, 2516 ratekbps); 2517 DP_PEER_EXTD_STATS_UPD(txrx_peer, rx.avg_rx_rate, avg_rx_rate, link_id); 2518 DP_PEER_EXTD_STATS_UPD(txrx_peer, rx.nss_info, nss, link_id); 2519 DP_PEER_EXTD_STATS_UPD(txrx_peer, rx.mcs_info, mcs, link_id); 2520 DP_PEER_EXTD_STATS_UPD(txrx_peer, rx.bw_info, bw, link_id); 2521 DP_PEER_EXTD_STATS_UPD(txrx_peer, rx.gi_info, sgi, link_id); 2522 DP_PEER_EXTD_STATS_UPD(txrx_peer, rx.preamble_info, pkt_type, link_id); 2523 } 2524 #else 2525 static inline void 2526 dp_rx_rates_stats_update(struct dp_soc *soc, qdf_nbuf_t nbuf, 2527 uint8_t *rx_tlv_hdr, struct dp_txrx_peer *txrx_peer, 2528 uint32_t sgi, uint32_t mcs, 2529 uint32_t nss, uint32_t bw, uint32_t pkt_type, 2530 uint8_t link_id) 2531 { 2532 } 2533 #endif /* FEATURE_RX_LINKSPEED_ROAM_TRIGGER */ 2534 2535 #ifndef QCA_ENHANCED_STATS_SUPPORT 2536 /** 2537 * dp_rx_msdu_extd_stats_update(): Update Rx extended path stats for peer 2538 * 2539 * @soc: datapath soc handle 2540 * @nbuf: received msdu buffer 2541 * @rx_tlv_hdr: rx tlv header 2542 * @txrx_peer: datapath txrx_peer handle 2543 * @link_id: link id on which the packet is received 2544 * 2545 * Return: void 2546 */ 2547 static inline 2548 void dp_rx_msdu_extd_stats_update(struct dp_soc *soc, qdf_nbuf_t nbuf, 2549 uint8_t *rx_tlv_hdr, 2550 struct dp_txrx_peer *txrx_peer, 2551 uint8_t link_id) 2552 { 2553 bool is_ampdu; 2554 uint32_t sgi, mcs, tid, nss, bw, reception_type, pkt_type; 2555 uint8_t dst_mcs_idx; 2556 2557 /* 2558 * TODO - For KIWI this field is present in ring_desc 2559 * Try to use ring desc instead of tlv. 2560 */ 2561 is_ampdu = hal_rx_mpdu_info_ampdu_flag_get(soc->hal_soc, rx_tlv_hdr); 2562 DP_PEER_EXTD_STATS_INCC(txrx_peer, rx.ampdu_cnt, 1, is_ampdu, link_id); 2563 DP_PEER_EXTD_STATS_INCC(txrx_peer, rx.non_ampdu_cnt, 1, !(is_ampdu), 2564 link_id); 2565 2566 sgi = hal_rx_tlv_sgi_get(soc->hal_soc, rx_tlv_hdr); 2567 mcs = hal_rx_tlv_rate_mcs_get(soc->hal_soc, rx_tlv_hdr); 2568 tid = qdf_nbuf_get_tid_val(nbuf); 2569 bw = hal_rx_tlv_bw_get(soc->hal_soc, rx_tlv_hdr); 2570 reception_type = hal_rx_msdu_start_reception_type_get(soc->hal_soc, 2571 rx_tlv_hdr); 2572 nss = hal_rx_msdu_start_nss_get(soc->hal_soc, rx_tlv_hdr); 2573 pkt_type = hal_rx_tlv_get_pkt_type(soc->hal_soc, rx_tlv_hdr); 2574 /* do HW to SW pkt type conversion */ 2575 pkt_type = (pkt_type >= HAL_DOT11_MAX ? DOT11_MAX : 2576 hal_2_dp_pkt_type_map[pkt_type]); 2577 2578 /* 2579 * The MCS index does not start with 0 when NSS>1 in HT mode. 2580 * MCS params for optional 20/40MHz, NSS=1~3, EQM(NSS>1): 2581 * ------------------------------------------------------ 2582 * NSS | 1 | 2 | 3 | 4 2583 * ------------------------------------------------------ 2584 * MCS index: HT20 | 0 ~ 7 | 8 ~ 15 | 16 ~ 23 | 24 ~ 31 2585 * ------------------------------------------------------ 2586 * MCS index: HT40 | 0 ~ 7 | 8 ~ 15 | 16 ~ 23 | 24 ~ 31 2587 * ------------------------------------------------------ 2588 * Currently, the MAX_NSS=2. If NSS>2, MCS index = 8 * (NSS-1) 2589 */ 2590 if ((pkt_type == DOT11_N) && (nss == 2)) 2591 mcs += 8; 2592 2593 DP_PEER_EXTD_STATS_INCC(txrx_peer, rx.rx_mpdu_cnt[mcs], 1, 2594 ((mcs < MAX_MCS) && QDF_NBUF_CB_RX_CHFRAG_START(nbuf)), 2595 link_id); 2596 DP_PEER_EXTD_STATS_INCC(txrx_peer, rx.rx_mpdu_cnt[MAX_MCS - 1], 1, 2597 ((mcs >= MAX_MCS) && QDF_NBUF_CB_RX_CHFRAG_START(nbuf)), 2598 link_id); 2599 DP_PEER_EXTD_STATS_INC(txrx_peer, rx.bw[bw], 1, link_id); 2600 /* 2601 * only if nss > 0 and pkt_type is 11N/AC/AX, 2602 * then increase index [nss - 1] in array counter. 2603 */ 2604 if (nss > 0 && CDP_IS_PKT_TYPE_SUPPORT_NSS(pkt_type)) 2605 DP_PEER_EXTD_STATS_INC(txrx_peer, rx.nss[nss - 1], 1, link_id); 2606 2607 DP_PEER_EXTD_STATS_INC(txrx_peer, rx.sgi_count[sgi], 1, link_id); 2608 DP_PEER_PER_PKT_STATS_INCC(txrx_peer, rx.err.mic_err, 1, 2609 hal_rx_tlv_mic_err_get(soc->hal_soc, 2610 rx_tlv_hdr), link_id); 2611 DP_PEER_PER_PKT_STATS_INCC(txrx_peer, rx.err.decrypt_err, 1, 2612 hal_rx_tlv_decrypt_err_get(soc->hal_soc, 2613 rx_tlv_hdr), link_id); 2614 2615 DP_PEER_EXTD_STATS_INC(txrx_peer, rx.wme_ac_type[TID_TO_WME_AC(tid)], 1, 2616 link_id); 2617 DP_PEER_EXTD_STATS_INC(txrx_peer, rx.reception_type[reception_type], 1, 2618 link_id); 2619 2620 dst_mcs_idx = dp_get_mcs_array_index_by_pkt_type_mcs(pkt_type, mcs); 2621 if (MCS_INVALID_ARRAY_INDEX != dst_mcs_idx) 2622 DP_PEER_EXTD_STATS_INC(txrx_peer, 2623 rx.pkt_type[pkt_type].mcs_count[dst_mcs_idx], 2624 1, link_id); 2625 2626 dp_rx_rates_stats_update(soc, nbuf, rx_tlv_hdr, txrx_peer, 2627 sgi, mcs, nss, bw, pkt_type, link_id); 2628 } 2629 #else 2630 static inline 2631 void dp_rx_msdu_extd_stats_update(struct dp_soc *soc, qdf_nbuf_t nbuf, 2632 uint8_t *rx_tlv_hdr, 2633 struct dp_txrx_peer *txrx_peer, 2634 uint8_t link_id) 2635 { 2636 } 2637 #endif 2638 2639 #if defined(DP_PKT_STATS_PER_LMAC) && defined(WLAN_FEATURE_11BE_MLO) 2640 static inline void 2641 dp_peer_update_rx_pkt_per_lmac(struct dp_txrx_peer *txrx_peer, 2642 qdf_nbuf_t nbuf, uint8_t link_id) 2643 { 2644 uint8_t lmac_id = qdf_nbuf_get_lmac_id(nbuf); 2645 2646 if (qdf_unlikely(lmac_id >= CDP_MAX_LMACS)) { 2647 dp_err_rl("Invalid lmac_id: %u vdev_id: %u", 2648 lmac_id, QDF_NBUF_CB_RX_VDEV_ID(nbuf)); 2649 2650 if (qdf_likely(txrx_peer)) 2651 dp_err_rl("peer_id: %u", txrx_peer->peer_id); 2652 2653 return; 2654 } 2655 2656 /* only count stats per lmac for MLO connection*/ 2657 DP_PEER_PER_PKT_STATS_INCC_PKT(txrx_peer, rx.rx_lmac[lmac_id], 1, 2658 QDF_NBUF_CB_RX_PKT_LEN(nbuf), 2659 txrx_peer->is_mld_peer, link_id); 2660 } 2661 #else 2662 static inline void 2663 dp_peer_update_rx_pkt_per_lmac(struct dp_txrx_peer *txrx_peer, 2664 qdf_nbuf_t nbuf, uint8_t link_id) 2665 { 2666 } 2667 #endif 2668 2669 void dp_rx_msdu_stats_update(struct dp_soc *soc, qdf_nbuf_t nbuf, 2670 uint8_t *rx_tlv_hdr, 2671 struct dp_txrx_peer *txrx_peer, 2672 uint8_t ring_id, 2673 struct cdp_tid_rx_stats *tid_stats, 2674 uint8_t link_id) 2675 { 2676 bool is_not_amsdu; 2677 struct dp_vdev *vdev = txrx_peer->vdev; 2678 uint8_t enh_flag; 2679 qdf_ether_header_t *eh; 2680 uint16_t msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf); 2681 2682 dp_rx_msdu_stats_update_prot_cnts(vdev, nbuf, txrx_peer); 2683 is_not_amsdu = qdf_nbuf_is_rx_chfrag_start(nbuf) & 2684 qdf_nbuf_is_rx_chfrag_end(nbuf); 2685 DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, rx.rcvd_reo[ring_id], 1, 2686 msdu_len, link_id); 2687 DP_PEER_PER_PKT_STATS_INCC(txrx_peer, rx.non_amsdu_cnt, 1, 2688 is_not_amsdu, link_id); 2689 DP_PEER_PER_PKT_STATS_INCC(txrx_peer, rx.amsdu_cnt, 1, 2690 !is_not_amsdu, link_id); 2691 DP_PEER_PER_PKT_STATS_INCC(txrx_peer, rx.rx_retries, 1, 2692 qdf_nbuf_is_rx_retry_flag(nbuf), link_id); 2693 dp_peer_update_rx_pkt_per_lmac(txrx_peer, nbuf, link_id); 2694 tid_stats->msdu_cnt++; 2695 enh_flag = vdev->pdev->enhanced_stats_en; 2696 if (qdf_unlikely(qdf_nbuf_is_da_mcbc(nbuf) && 2697 (vdev->rx_decap_type == htt_cmn_pkt_type_ethernet))) { 2698 eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf); 2699 DP_PEER_MC_INCC_PKT(txrx_peer, 1, msdu_len, enh_flag, link_id); 2700 tid_stats->mcast_msdu_cnt++; 2701 if (QDF_IS_ADDR_BROADCAST(eh->ether_dhost)) { 2702 DP_PEER_BC_INCC_PKT(txrx_peer, 1, msdu_len, 2703 enh_flag, link_id); 2704 tid_stats->bcast_msdu_cnt++; 2705 } 2706 } else { 2707 DP_PEER_UC_INCC_PKT(txrx_peer, 1, msdu_len, 2708 enh_flag, link_id); 2709 } 2710 2711 txrx_peer->stats[link_id].per_pkt_stats.rx.last_rx_ts = 2712 qdf_system_ticks(); 2713 2714 dp_rx_msdu_extd_stats_update(soc, nbuf, rx_tlv_hdr, 2715 txrx_peer, link_id); 2716 } 2717 2718 #ifndef WDS_VENDOR_EXTENSION 2719 int dp_wds_rx_policy_check(uint8_t *rx_tlv_hdr, 2720 struct dp_vdev *vdev, 2721 struct dp_txrx_peer *txrx_peer) 2722 { 2723 return 1; 2724 } 2725 #endif 2726 2727 #ifdef DP_RX_PKT_NO_PEER_DELIVER 2728 #ifdef DP_RX_UDP_OVER_PEER_ROAM 2729 /** 2730 * dp_rx_is_udp_allowed_over_roam_peer() - check if udp data received 2731 * during roaming 2732 * @vdev: dp_vdev pointer 2733 * @rx_tlv_hdr: rx tlv header 2734 * @nbuf: pkt skb pointer 2735 * 2736 * This function will check if rx udp data is received from authorised 2737 * roamed peer before peer map indication is received from FW after 2738 * roaming. This is needed for VoIP scenarios in which packet loss 2739 * expected during roaming is minimal. 2740 * 2741 * Return: bool 2742 */ 2743 static bool dp_rx_is_udp_allowed_over_roam_peer(struct dp_vdev *vdev, 2744 uint8_t *rx_tlv_hdr, 2745 qdf_nbuf_t nbuf) 2746 { 2747 char *hdr_desc; 2748 struct ieee80211_frame *wh = NULL; 2749 2750 hdr_desc = hal_rx_desc_get_80211_hdr(vdev->pdev->soc->hal_soc, 2751 rx_tlv_hdr); 2752 wh = (struct ieee80211_frame *)hdr_desc; 2753 2754 if (vdev->roaming_peer_status == 2755 WLAN_ROAM_PEER_AUTH_STATUS_AUTHENTICATED && 2756 !qdf_mem_cmp(vdev->roaming_peer_mac.raw, wh->i_addr2, 2757 QDF_MAC_ADDR_SIZE) && (qdf_nbuf_is_ipv4_udp_pkt(nbuf) || 2758 qdf_nbuf_is_ipv6_udp_pkt(nbuf))) 2759 return true; 2760 2761 return false; 2762 } 2763 #else 2764 static bool dp_rx_is_udp_allowed_over_roam_peer(struct dp_vdev *vdev, 2765 uint8_t *rx_tlv_hdr, 2766 qdf_nbuf_t nbuf) 2767 { 2768 return false; 2769 } 2770 #endif 2771 void dp_rx_deliver_to_stack_no_peer(struct dp_soc *soc, qdf_nbuf_t nbuf) 2772 { 2773 uint16_t peer_id; 2774 uint8_t vdev_id; 2775 struct dp_vdev *vdev = NULL; 2776 uint32_t l2_hdr_offset = 0; 2777 uint16_t msdu_len = 0; 2778 uint32_t pkt_len = 0; 2779 uint8_t *rx_tlv_hdr; 2780 uint32_t frame_mask = FRAME_MASK_IPV4_ARP | FRAME_MASK_IPV4_DHCP | 2781 FRAME_MASK_IPV4_EAPOL | FRAME_MASK_IPV6_DHCP; 2782 bool is_special_frame = false; 2783 struct dp_peer *peer = NULL; 2784 2785 peer_id = QDF_NBUF_CB_RX_PEER_ID(nbuf); 2786 if (peer_id > soc->max_peer_id) 2787 goto deliver_fail; 2788 2789 vdev_id = QDF_NBUF_CB_RX_VDEV_ID(nbuf); 2790 vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_RX); 2791 if (!vdev || vdev->delete.pending) 2792 goto deliver_fail; 2793 2794 if (qdf_unlikely(qdf_nbuf_is_frag(nbuf))) 2795 goto deliver_fail; 2796 2797 rx_tlv_hdr = qdf_nbuf_data(nbuf); 2798 l2_hdr_offset = 2799 hal_rx_msdu_end_l3_hdr_padding_get(soc->hal_soc, rx_tlv_hdr); 2800 2801 msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf); 2802 pkt_len = msdu_len + l2_hdr_offset + soc->rx_pkt_tlv_size; 2803 QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(nbuf) = 1; 2804 2805 qdf_nbuf_set_pktlen(nbuf, pkt_len); 2806 qdf_nbuf_pull_head(nbuf, soc->rx_pkt_tlv_size + l2_hdr_offset); 2807 2808 is_special_frame = dp_rx_is_special_frame(nbuf, frame_mask); 2809 if (qdf_likely(vdev->osif_rx)) { 2810 if (is_special_frame || 2811 dp_rx_is_udp_allowed_over_roam_peer(vdev, rx_tlv_hdr, 2812 nbuf)) { 2813 qdf_nbuf_set_exc_frame(nbuf, 1); 2814 if (QDF_STATUS_SUCCESS != 2815 vdev->osif_rx(vdev->osif_vdev, nbuf)) 2816 goto deliver_fail; 2817 2818 DP_STATS_INC(soc, rx.err.pkt_delivered_no_peer, 1); 2819 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_RX); 2820 return; 2821 } 2822 } else if (is_special_frame) { 2823 /* 2824 * If MLO connection, txrx_peer for link peer does not exist, 2825 * try to store these RX packets to txrx_peer's bufq of MLD 2826 * peer until vdev->osif_rx is registered from CP and flush 2827 * them to stack. 2828 */ 2829 peer = dp_peer_get_tgt_peer_by_id(soc, peer_id, 2830 DP_MOD_ID_RX); 2831 if (!peer) 2832 goto deliver_fail; 2833 2834 /* only check for MLO connection */ 2835 if (IS_MLO_DP_MLD_PEER(peer) && peer->txrx_peer && 2836 dp_rx_is_peer_cache_bufq_supported()) { 2837 qdf_nbuf_set_exc_frame(nbuf, 1); 2838 2839 if (QDF_STATUS_SUCCESS == 2840 dp_rx_enqueue_rx(peer, peer->txrx_peer, nbuf)) { 2841 DP_STATS_INC(soc, 2842 rx.err.pkt_delivered_no_peer, 2843 1); 2844 } else { 2845 DP_STATS_INC(soc, 2846 rx.err.rx_invalid_peer.num, 2847 1); 2848 } 2849 2850 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_RX); 2851 dp_peer_unref_delete(peer, DP_MOD_ID_RX); 2852 return; 2853 } 2854 2855 dp_peer_unref_delete(peer, DP_MOD_ID_RX); 2856 } 2857 2858 deliver_fail: 2859 DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1, 2860 QDF_NBUF_CB_RX_PKT_LEN(nbuf)); 2861 dp_rx_nbuf_free(nbuf); 2862 if (vdev) 2863 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_RX); 2864 } 2865 #else 2866 void dp_rx_deliver_to_stack_no_peer(struct dp_soc *soc, qdf_nbuf_t nbuf) 2867 { 2868 DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1, 2869 QDF_NBUF_CB_RX_PKT_LEN(nbuf)); 2870 dp_rx_nbuf_free(nbuf); 2871 } 2872 #endif 2873 2874 #endif /* QCA_HOST_MODE_WIFI_DISABLED */ 2875 2876 #ifdef WLAN_SUPPORT_RX_FISA 2877 QDF_STATUS dp_fisa_config(ol_txrx_soc_handle cdp_soc, uint8_t pdev_id, 2878 enum cdp_fisa_config_id config_id, 2879 union cdp_fisa_config *cfg) 2880 { 2881 struct dp_soc *soc = (struct dp_soc *)cdp_soc; 2882 struct dp_pdev *pdev; 2883 QDF_STATUS status; 2884 2885 pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); 2886 if (!pdev) { 2887 dp_err("pdev is NULL for pdev_id %u", pdev_id); 2888 return QDF_STATUS_E_INVAL; 2889 } 2890 2891 switch (config_id) { 2892 case CDP_FISA_HTT_RX_FISA_CFG: 2893 status = dp_htt_rx_fisa_config(pdev, cfg->fisa_config); 2894 break; 2895 case CDP_FISA_HTT_RX_FSE_OP_CFG: 2896 status = dp_htt_rx_flow_fse_operation(pdev, cfg->fse_op_cmd); 2897 break; 2898 case CDP_FISA_HTT_RX_FSE_SETUP_CFG: 2899 status = dp_htt_rx_flow_fst_setup(pdev, cfg->fse_setup_info); 2900 break; 2901 default: 2902 status = QDF_STATUS_E_INVAL; 2903 } 2904 2905 return status; 2906 } 2907 2908 void dp_rx_skip_tlvs(struct dp_soc *soc, qdf_nbuf_t nbuf, uint32_t l3_padding) 2909 { 2910 QDF_NBUF_CB_RX_PACKET_L3_HDR_PAD(nbuf) = l3_padding; 2911 qdf_nbuf_pull_head(nbuf, l3_padding + soc->rx_pkt_tlv_size); 2912 } 2913 #else 2914 void dp_rx_skip_tlvs(struct dp_soc *soc, qdf_nbuf_t nbuf, uint32_t l3_padding) 2915 { 2916 qdf_nbuf_pull_head(nbuf, l3_padding + soc->rx_pkt_tlv_size); 2917 } 2918 #endif 2919 2920 #ifndef QCA_HOST_MODE_WIFI_DISABLED 2921 2922 #ifdef DP_RX_DROP_RAW_FRM 2923 bool dp_rx_is_raw_frame_dropped(qdf_nbuf_t nbuf) 2924 { 2925 if (qdf_nbuf_is_raw_frame(nbuf)) { 2926 dp_rx_nbuf_free(nbuf); 2927 return true; 2928 } 2929 2930 return false; 2931 } 2932 #endif 2933 2934 #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR 2935 void dp_rx_update_stats(struct dp_soc *soc, qdf_nbuf_t nbuf) 2936 { 2937 DP_STATS_INC_PKT(soc, rx.ingress, 1, 2938 QDF_NBUF_CB_RX_PKT_LEN(nbuf)); 2939 } 2940 #endif 2941 2942 #ifdef WLAN_FEATURE_PKT_CAPTURE_V2 2943 void dp_rx_deliver_to_pkt_capture(struct dp_soc *soc, struct dp_pdev *pdev, 2944 uint16_t peer_id, uint32_t is_offload, 2945 qdf_nbuf_t netbuf) 2946 { 2947 if (wlan_cfg_get_pkt_capture_mode(soc->wlan_cfg_ctx)) 2948 dp_wdi_event_handler(WDI_EVENT_PKT_CAPTURE_RX_DATA, soc, netbuf, 2949 peer_id, is_offload, pdev->pdev_id); 2950 } 2951 2952 void dp_rx_deliver_to_pkt_capture_no_peer(struct dp_soc *soc, qdf_nbuf_t nbuf, 2953 uint32_t is_offload) 2954 { 2955 if (wlan_cfg_get_pkt_capture_mode(soc->wlan_cfg_ctx)) 2956 dp_wdi_event_handler(WDI_EVENT_PKT_CAPTURE_RX_DATA_NO_PEER, 2957 soc, nbuf, HTT_INVALID_VDEV, 2958 is_offload, 0); 2959 } 2960 #endif 2961 2962 #endif /* QCA_HOST_MODE_WIFI_DISABLED */ 2963 2964 QDF_STATUS dp_rx_vdev_detach(struct dp_vdev *vdev) 2965 { 2966 QDF_STATUS ret; 2967 2968 if (vdev->osif_rx_flush) { 2969 ret = vdev->osif_rx_flush(vdev->osif_vdev, vdev->vdev_id); 2970 if (!QDF_IS_STATUS_SUCCESS(ret)) { 2971 dp_err("Failed to flush rx pkts for vdev %d", 2972 vdev->vdev_id); 2973 return ret; 2974 } 2975 } 2976 2977 return QDF_STATUS_SUCCESS; 2978 } 2979 2980 static QDF_STATUS 2981 dp_pdev_nbuf_alloc_and_map(struct dp_soc *dp_soc, 2982 struct dp_rx_nbuf_frag_info *nbuf_frag_info_t, 2983 struct dp_pdev *dp_pdev, 2984 struct rx_desc_pool *rx_desc_pool, 2985 bool dp_buf_page_frag_alloc_enable) 2986 { 2987 QDF_STATUS ret = QDF_STATUS_E_FAILURE; 2988 2989 if (dp_buf_page_frag_alloc_enable) { 2990 (nbuf_frag_info_t->virt_addr).nbuf = 2991 qdf_nbuf_frag_alloc(dp_soc->osdev, 2992 rx_desc_pool->buf_size, 2993 RX_BUFFER_RESERVATION, 2994 rx_desc_pool->buf_alignment, FALSE); 2995 } else { 2996 (nbuf_frag_info_t->virt_addr).nbuf = 2997 qdf_nbuf_alloc(dp_soc->osdev, rx_desc_pool->buf_size, 2998 RX_BUFFER_RESERVATION, 2999 rx_desc_pool->buf_alignment, FALSE); 3000 } 3001 if (!((nbuf_frag_info_t->virt_addr).nbuf)) { 3002 dp_err("nbuf alloc failed"); 3003 DP_STATS_INC(dp_pdev, replenish.nbuf_alloc_fail, 1); 3004 return ret; 3005 } 3006 3007 ret = qdf_nbuf_map_nbytes_single(dp_soc->osdev, 3008 (nbuf_frag_info_t->virt_addr).nbuf, 3009 QDF_DMA_FROM_DEVICE, 3010 rx_desc_pool->buf_size); 3011 3012 if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) { 3013 qdf_nbuf_free((nbuf_frag_info_t->virt_addr).nbuf); 3014 dp_err("nbuf map failed"); 3015 DP_STATS_INC(dp_pdev, replenish.map_err, 1); 3016 return ret; 3017 } 3018 3019 nbuf_frag_info_t->paddr = 3020 qdf_nbuf_get_frag_paddr((nbuf_frag_info_t->virt_addr).nbuf, 0); 3021 3022 ret = dp_check_paddr(dp_soc, &((nbuf_frag_info_t->virt_addr).nbuf), 3023 &nbuf_frag_info_t->paddr, 3024 rx_desc_pool); 3025 if (ret == QDF_STATUS_E_FAILURE) { 3026 dp_err("nbuf check x86 failed"); 3027 DP_STATS_INC(dp_pdev, replenish.x86_fail, 1); 3028 return ret; 3029 } 3030 3031 return QDF_STATUS_SUCCESS; 3032 } 3033 3034 QDF_STATUS 3035 dp_pdev_rx_buffers_attach(struct dp_soc *dp_soc, uint32_t mac_id, 3036 struct dp_srng *dp_rxdma_srng, 3037 struct rx_desc_pool *rx_desc_pool, 3038 uint32_t num_req_buffers) 3039 { 3040 struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(dp_soc, mac_id); 3041 hal_ring_handle_t rxdma_srng = dp_rxdma_srng->hal_srng; 3042 union dp_rx_desc_list_elem_t *next; 3043 void *rxdma_ring_entry; 3044 qdf_dma_addr_t paddr; 3045 struct dp_rx_nbuf_frag_info *nf_info; 3046 uint32_t nr_descs, nr_nbuf = 0, nr_nbuf_total = 0; 3047 uint32_t buffer_index, nbuf_ptrs_per_page; 3048 qdf_nbuf_t nbuf; 3049 QDF_STATUS ret; 3050 int page_idx, total_pages; 3051 union dp_rx_desc_list_elem_t *desc_list = NULL; 3052 union dp_rx_desc_list_elem_t *tail = NULL; 3053 int sync_hw_ptr = 1; 3054 uint32_t num_entries_avail; 3055 bool dp_buf_page_frag_alloc_enable; 3056 3057 if (qdf_unlikely(!dp_pdev)) { 3058 dp_rx_err("%pK: pdev is null for mac_id = %d", 3059 dp_soc, mac_id); 3060 return QDF_STATUS_E_FAILURE; 3061 } 3062 3063 dp_buf_page_frag_alloc_enable = 3064 wlan_cfg_is_dp_buf_page_frag_alloc_enable(dp_soc->wlan_cfg_ctx); 3065 3066 if (qdf_unlikely(!rxdma_srng)) { 3067 DP_STATS_INC(dp_pdev, replenish.rxdma_err, num_req_buffers); 3068 return QDF_STATUS_E_FAILURE; 3069 } 3070 3071 dp_debug("requested %u RX buffers for driver attach", num_req_buffers); 3072 3073 hal_srng_access_start(dp_soc->hal_soc, rxdma_srng); 3074 num_entries_avail = hal_srng_src_num_avail(dp_soc->hal_soc, 3075 rxdma_srng, 3076 sync_hw_ptr); 3077 hal_srng_access_end(dp_soc->hal_soc, rxdma_srng); 3078 3079 if (!num_entries_avail) { 3080 dp_err("Num of available entries is zero, nothing to do"); 3081 return QDF_STATUS_E_NOMEM; 3082 } 3083 3084 if (num_entries_avail < num_req_buffers) 3085 num_req_buffers = num_entries_avail; 3086 3087 nr_descs = dp_rx_get_free_desc_list(dp_soc, mac_id, rx_desc_pool, 3088 num_req_buffers, &desc_list, &tail); 3089 if (!nr_descs) { 3090 dp_err("no free rx_descs in freelist"); 3091 DP_STATS_INC(dp_pdev, err.desc_alloc_fail, num_req_buffers); 3092 return QDF_STATUS_E_NOMEM; 3093 } 3094 3095 dp_debug("got %u RX descs for driver attach", nr_descs); 3096 3097 /* 3098 * Try to allocate pointers to the nbuf one page at a time. 3099 * Take pointers that can fit in one page of memory and 3100 * iterate through the total descriptors that need to be 3101 * allocated in order of pages. Reuse the pointers that 3102 * have been allocated to fit in one page across each 3103 * iteration to index into the nbuf. 3104 */ 3105 total_pages = (nr_descs * sizeof(*nf_info)) / DP_BLOCKMEM_SIZE; 3106 3107 /* 3108 * Add an extra page to store the remainder if any 3109 */ 3110 if ((nr_descs * sizeof(*nf_info)) % DP_BLOCKMEM_SIZE) 3111 total_pages++; 3112 nf_info = qdf_mem_malloc(DP_BLOCKMEM_SIZE); 3113 if (!nf_info) { 3114 dp_err("failed to allocate nbuf array"); 3115 DP_STATS_INC(dp_pdev, replenish.rxdma_err, num_req_buffers); 3116 QDF_BUG(0); 3117 return QDF_STATUS_E_NOMEM; 3118 } 3119 nbuf_ptrs_per_page = DP_BLOCKMEM_SIZE / sizeof(*nf_info); 3120 3121 for (page_idx = 0; page_idx < total_pages; page_idx++) { 3122 qdf_mem_zero(nf_info, DP_BLOCKMEM_SIZE); 3123 3124 for (nr_nbuf = 0; nr_nbuf < nbuf_ptrs_per_page; nr_nbuf++) { 3125 /* 3126 * The last page of buffer pointers may not be required 3127 * completely based on the number of descriptors. Below 3128 * check will ensure we are allocating only the 3129 * required number of descriptors. 3130 */ 3131 if (nr_nbuf_total >= nr_descs) 3132 break; 3133 /* Flag is set while pdev rx_desc_pool initialization */ 3134 if (qdf_unlikely(rx_desc_pool->rx_mon_dest_frag_enable)) 3135 ret = dp_pdev_frag_alloc_and_map(dp_soc, 3136 &nf_info[nr_nbuf], dp_pdev, 3137 rx_desc_pool); 3138 else 3139 ret = dp_pdev_nbuf_alloc_and_map(dp_soc, 3140 &nf_info[nr_nbuf], dp_pdev, 3141 rx_desc_pool, 3142 dp_buf_page_frag_alloc_enable); 3143 if (QDF_IS_STATUS_ERROR(ret)) 3144 break; 3145 3146 nr_nbuf_total++; 3147 } 3148 3149 hal_srng_access_start(dp_soc->hal_soc, rxdma_srng); 3150 3151 for (buffer_index = 0; buffer_index < nr_nbuf; buffer_index++) { 3152 rxdma_ring_entry = 3153 hal_srng_src_get_next(dp_soc->hal_soc, 3154 rxdma_srng); 3155 qdf_assert_always(rxdma_ring_entry); 3156 3157 next = desc_list->next; 3158 paddr = nf_info[buffer_index].paddr; 3159 nbuf = nf_info[buffer_index].virt_addr.nbuf; 3160 3161 /* Flag is set while pdev rx_desc_pool initialization */ 3162 if (qdf_unlikely(rx_desc_pool->rx_mon_dest_frag_enable)) 3163 dp_rx_desc_frag_prep(&desc_list->rx_desc, 3164 &nf_info[buffer_index]); 3165 else 3166 dp_rx_desc_prep(&desc_list->rx_desc, 3167 &nf_info[buffer_index]); 3168 desc_list->rx_desc.in_use = 1; 3169 dp_rx_desc_alloc_dbg_info(&desc_list->rx_desc); 3170 dp_rx_desc_update_dbg_info(&desc_list->rx_desc, 3171 __func__, 3172 RX_DESC_REPLENISHED); 3173 3174 hal_rxdma_buff_addr_info_set(dp_soc->hal_soc ,rxdma_ring_entry, paddr, 3175 desc_list->rx_desc.cookie, 3176 rx_desc_pool->owner); 3177 3178 if (qdf_atomic_read(&dp_soc->ipa_mapped)) 3179 dp_ipa_handle_rx_buf_smmu_mapping( 3180 dp_soc, nbuf, 3181 rx_desc_pool->buf_size, true, 3182 __func__, __LINE__); 3183 3184 dp_audio_smmu_map(dp_soc->osdev, 3185 qdf_mem_paddr_from_dmaaddr(dp_soc->osdev, 3186 QDF_NBUF_CB_PADDR(nbuf)), 3187 QDF_NBUF_CB_PADDR(nbuf), 3188 rx_desc_pool->buf_size); 3189 3190 desc_list = next; 3191 } 3192 3193 dp_rx_refill_ring_record_entry(dp_soc, dp_pdev->lmac_id, 3194 rxdma_srng, nr_nbuf, nr_nbuf); 3195 hal_srng_access_end(dp_soc->hal_soc, rxdma_srng); 3196 } 3197 3198 dp_info("filled %u RX buffers for driver attach", nr_nbuf_total); 3199 qdf_mem_free(nf_info); 3200 3201 if (!nr_nbuf_total) { 3202 dp_err("No nbuf's allocated"); 3203 QDF_BUG(0); 3204 return QDF_STATUS_E_RESOURCES; 3205 } 3206 3207 /* No need to count the number of bytes received during replenish. 3208 * Therefore set replenish.pkts.bytes as 0. 3209 */ 3210 DP_STATS_INC_PKT(dp_pdev, replenish.pkts, nr_nbuf, 0); 3211 3212 return QDF_STATUS_SUCCESS; 3213 } 3214 3215 qdf_export_symbol(dp_pdev_rx_buffers_attach); 3216 3217 #ifdef DP_RX_MON_MEM_FRAG 3218 void dp_rx_enable_mon_dest_frag(struct rx_desc_pool *rx_desc_pool, 3219 bool is_mon_dest_desc) 3220 { 3221 rx_desc_pool->rx_mon_dest_frag_enable = is_mon_dest_desc; 3222 if (is_mon_dest_desc) 3223 dp_alert("Feature DP_RX_MON_MEM_FRAG for mon_dest is enabled"); 3224 } 3225 #else 3226 void dp_rx_enable_mon_dest_frag(struct rx_desc_pool *rx_desc_pool, 3227 bool is_mon_dest_desc) 3228 { 3229 rx_desc_pool->rx_mon_dest_frag_enable = false; 3230 if (is_mon_dest_desc) 3231 dp_alert("Feature DP_RX_MON_MEM_FRAG for mon_dest is disabled"); 3232 } 3233 #endif 3234 3235 qdf_export_symbol(dp_rx_enable_mon_dest_frag); 3236 3237 QDF_STATUS 3238 dp_rx_pdev_desc_pool_alloc(struct dp_pdev *pdev) 3239 { 3240 struct dp_soc *soc = pdev->soc; 3241 uint32_t rxdma_entries; 3242 uint32_t rx_sw_desc_num; 3243 struct dp_srng *dp_rxdma_srng; 3244 struct rx_desc_pool *rx_desc_pool; 3245 uint32_t status = QDF_STATUS_SUCCESS; 3246 int mac_for_pdev; 3247 3248 mac_for_pdev = pdev->lmac_id; 3249 if (wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx)) { 3250 dp_rx_info("%pK: nss-wifi<4> skip Rx refil %d", 3251 soc, mac_for_pdev); 3252 return status; 3253 } 3254 3255 dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_for_pdev]; 3256 rxdma_entries = dp_rxdma_srng->num_entries; 3257 3258 rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev]; 3259 rx_sw_desc_num = wlan_cfg_get_dp_soc_rx_sw_desc_num(soc->wlan_cfg_ctx); 3260 3261 rx_desc_pool->desc_type = QDF_DP_RX_DESC_BUF_TYPE; 3262 status = dp_rx_desc_pool_alloc(soc, 3263 rx_sw_desc_num, 3264 rx_desc_pool); 3265 if (status != QDF_STATUS_SUCCESS) 3266 return status; 3267 3268 return status; 3269 } 3270 3271 void dp_rx_pdev_desc_pool_free(struct dp_pdev *pdev) 3272 { 3273 int mac_for_pdev = pdev->lmac_id; 3274 struct dp_soc *soc = pdev->soc; 3275 struct rx_desc_pool *rx_desc_pool; 3276 3277 rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev]; 3278 3279 dp_rx_desc_pool_free(soc, rx_desc_pool); 3280 } 3281 3282 QDF_STATUS dp_rx_pdev_desc_pool_init(struct dp_pdev *pdev) 3283 { 3284 int mac_for_pdev = pdev->lmac_id; 3285 struct dp_soc *soc = pdev->soc; 3286 uint32_t rxdma_entries; 3287 uint32_t rx_sw_desc_num; 3288 struct dp_srng *dp_rxdma_srng; 3289 struct rx_desc_pool *rx_desc_pool; 3290 uint32_t target_type = hal_get_target_type(soc->hal_soc); 3291 3292 rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev]; 3293 if (wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx)) { 3294 /* 3295 * If NSS is enabled, rx_desc_pool is already filled. 3296 * Hence, just disable desc_pool frag flag. 3297 */ 3298 dp_rx_enable_mon_dest_frag(rx_desc_pool, false); 3299 3300 dp_rx_info("%pK: nss-wifi<4> skip Rx refil %d", 3301 soc, mac_for_pdev); 3302 return QDF_STATUS_SUCCESS; 3303 } 3304 3305 if (dp_rx_desc_pool_is_allocated(rx_desc_pool) == QDF_STATUS_E_NOMEM) 3306 return QDF_STATUS_E_NOMEM; 3307 3308 dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_for_pdev]; 3309 rxdma_entries = dp_rxdma_srng->num_entries; 3310 3311 soc->process_rx_status = CONFIG_PROCESS_RX_STATUS; 3312 3313 rx_sw_desc_num = 3314 wlan_cfg_get_dp_soc_rx_sw_desc_num(soc->wlan_cfg_ctx); 3315 3316 rx_desc_pool->owner = dp_rx_get_rx_bm_id(soc); 3317 rx_desc_pool->buf_size = RX_DATA_BUFFER_SIZE; 3318 rx_desc_pool->buf_alignment = RX_DATA_BUFFER_ALIGNMENT; 3319 /* Disable monitor dest processing via frag */ 3320 if (target_type == TARGET_TYPE_QCN9160) { 3321 rx_desc_pool->buf_size = RX_MONITOR_BUFFER_SIZE; 3322 rx_desc_pool->buf_alignment = RX_MONITOR_BUFFER_ALIGNMENT; 3323 dp_rx_enable_mon_dest_frag(rx_desc_pool, true); 3324 } else { 3325 dp_rx_enable_mon_dest_frag(rx_desc_pool, false); 3326 } 3327 3328 dp_rx_desc_pool_init(soc, mac_for_pdev, 3329 rx_sw_desc_num, rx_desc_pool); 3330 return QDF_STATUS_SUCCESS; 3331 } 3332 3333 void dp_rx_pdev_desc_pool_deinit(struct dp_pdev *pdev) 3334 { 3335 int mac_for_pdev = pdev->lmac_id; 3336 struct dp_soc *soc = pdev->soc; 3337 struct rx_desc_pool *rx_desc_pool; 3338 3339 rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev]; 3340 3341 dp_rx_desc_pool_deinit(soc, rx_desc_pool, mac_for_pdev); 3342 } 3343 3344 QDF_STATUS 3345 dp_rx_pdev_buffers_alloc(struct dp_pdev *pdev) 3346 { 3347 int mac_for_pdev = pdev->lmac_id; 3348 struct dp_soc *soc = pdev->soc; 3349 struct dp_srng *dp_rxdma_srng; 3350 struct rx_desc_pool *rx_desc_pool; 3351 uint32_t rxdma_entries; 3352 uint32_t target_type = hal_get_target_type(soc->hal_soc); 3353 3354 dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_for_pdev]; 3355 rxdma_entries = dp_rxdma_srng->num_entries; 3356 3357 rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev]; 3358 3359 /* Initialize RX buffer pool which will be 3360 * used during low memory conditions 3361 */ 3362 dp_rx_buffer_pool_init(soc, mac_for_pdev); 3363 3364 if (target_type == TARGET_TYPE_QCN9160) 3365 return dp_pdev_rx_buffers_attach(soc, mac_for_pdev, 3366 dp_rxdma_srng, 3367 rx_desc_pool, 3368 rxdma_entries - 1); 3369 else 3370 return dp_pdev_rx_buffers_attach_simple(soc, mac_for_pdev, 3371 dp_rxdma_srng, 3372 rx_desc_pool, 3373 rxdma_entries - 1); 3374 } 3375 3376 void 3377 dp_rx_pdev_buffers_free(struct dp_pdev *pdev) 3378 { 3379 int mac_for_pdev = pdev->lmac_id; 3380 struct dp_soc *soc = pdev->soc; 3381 struct rx_desc_pool *rx_desc_pool; 3382 uint32_t target_type = hal_get_target_type(soc->hal_soc); 3383 3384 rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev]; 3385 3386 if (target_type == TARGET_TYPE_QCN9160) 3387 dp_rx_desc_frag_free(soc, rx_desc_pool); 3388 else 3389 dp_rx_desc_nbuf_free(soc, rx_desc_pool, false); 3390 3391 dp_rx_buffer_pool_deinit(soc, mac_for_pdev); 3392 } 3393 3394 #ifdef DP_RX_SPECIAL_FRAME_NEED 3395 bool dp_rx_deliver_special_frame(struct dp_soc *soc, 3396 struct dp_txrx_peer *txrx_peer, 3397 qdf_nbuf_t nbuf, uint32_t frame_mask, 3398 uint8_t *rx_tlv_hdr) 3399 { 3400 uint32_t l2_hdr_offset = 0; 3401 uint16_t msdu_len = 0; 3402 uint32_t skip_len; 3403 3404 l2_hdr_offset = 3405 hal_rx_msdu_end_l3_hdr_padding_get(soc->hal_soc, rx_tlv_hdr); 3406 3407 if (qdf_unlikely(qdf_nbuf_is_frag(nbuf))) { 3408 skip_len = l2_hdr_offset; 3409 } else { 3410 msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf); 3411 skip_len = l2_hdr_offset + soc->rx_pkt_tlv_size; 3412 qdf_nbuf_set_pktlen(nbuf, msdu_len + skip_len); 3413 } 3414 3415 QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(nbuf) = 1; 3416 dp_rx_set_hdr_pad(nbuf, l2_hdr_offset); 3417 qdf_nbuf_pull_head(nbuf, skip_len); 3418 3419 if (txrx_peer->vdev) { 3420 dp_rx_send_pktlog(soc, txrx_peer->vdev->pdev, nbuf, 3421 QDF_TX_RX_STATUS_OK); 3422 } 3423 3424 if (dp_rx_is_special_frame(nbuf, frame_mask)) { 3425 dp_info("special frame, mpdu sn 0x%x", 3426 hal_rx_get_rx_sequence(soc->hal_soc, rx_tlv_hdr)); 3427 qdf_nbuf_set_exc_frame(nbuf, 1); 3428 dp_rx_deliver_to_stack(soc, txrx_peer->vdev, txrx_peer, 3429 nbuf, NULL); 3430 return true; 3431 } 3432 3433 return false; 3434 } 3435 #endif 3436 3437 #ifdef QCA_MULTIPASS_SUPPORT 3438 bool dp_rx_multipass_process(struct dp_txrx_peer *txrx_peer, qdf_nbuf_t nbuf, 3439 uint8_t tid) 3440 { 3441 struct vlan_ethhdr *vethhdrp; 3442 3443 if (qdf_unlikely(!txrx_peer->vlan_id)) 3444 return true; 3445 3446 vethhdrp = (struct vlan_ethhdr *)qdf_nbuf_data(nbuf); 3447 /* 3448 * h_vlan_proto & h_vlan_TCI should be 0x8100 & zero respectively 3449 * as it is expected to be padded by 0 3450 * return false if frame doesn't have above tag so that caller will 3451 * drop the frame. 3452 */ 3453 if (qdf_unlikely(vethhdrp->h_vlan_proto != htons(QDF_ETH_TYPE_8021Q)) || 3454 qdf_unlikely(vethhdrp->h_vlan_TCI != 0)) 3455 return false; 3456 3457 vethhdrp->h_vlan_TCI = htons(((tid & 0x7) << VLAN_PRIO_SHIFT) | 3458 (txrx_peer->vlan_id & VLAN_VID_MASK)); 3459 3460 if (vethhdrp->h_vlan_encapsulated_proto == htons(ETHERTYPE_PAE)) 3461 dp_tx_remove_vlan_tag(txrx_peer->vdev, nbuf); 3462 3463 return true; 3464 } 3465 #endif /* QCA_MULTIPASS_SUPPORT */ 3466