1 /* 2 * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for 5 * any purpose with or without fee is hereby granted, provided that the 6 * above copyright notice and this permission notice appear in all 7 * copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 16 * PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 #include "hal_hw_headers.h" 20 #include "dp_types.h" 21 #include "dp_rx.h" 22 #include "dp_peer.h" 23 #include "hal_rx.h" 24 #include "hal_api.h" 25 #include "qdf_nbuf.h" 26 #ifdef MESH_MODE_SUPPORT 27 #include "if_meta_hdr.h" 28 #endif 29 #include "dp_internal.h" 30 #include "dp_rx_mon.h" 31 #include "dp_ipa.h" 32 #ifdef FEATURE_WDS 33 #include "dp_txrx_wds.h" 34 #endif 35 #include "dp_hist.h" 36 #include "dp_rx_buffer_pool.h" 37 38 #ifndef QCA_HOST_MODE_WIFI_DISABLED 39 40 #ifdef ATH_RX_PRI_SAVE 41 #define DP_RX_TID_SAVE(_nbuf, _tid) \ 42 (qdf_nbuf_set_priority(_nbuf, _tid)) 43 #else 44 #define DP_RX_TID_SAVE(_nbuf, _tid) 45 #endif 46 47 #ifdef DP_RX_DISABLE_NDI_MDNS_FORWARDING 48 static inline 49 bool dp_rx_check_ndi_mdns_fwding(struct dp_peer *ta_peer, qdf_nbuf_t nbuf) 50 { 51 if (ta_peer->vdev->opmode == wlan_op_mode_ndi && 52 qdf_nbuf_is_ipv6_mdns_pkt(nbuf)) { 53 DP_STATS_INC(ta_peer, rx.intra_bss.mdns_no_fwd, 1); 54 return false; 55 } 56 return true; 57 } 58 #else 59 static inline 60 bool dp_rx_check_ndi_mdns_fwding(struct dp_peer *ta_peer, qdf_nbuf_t nbuf) 61 { 62 return true; 63 } 64 #endif 65 static inline bool dp_rx_check_ap_bridge(struct dp_vdev *vdev) 66 { 67 return vdev->ap_bridge_enabled; 68 } 69 70 #endif /* QCA_HOST_MODE_WIFI_DISABLED */ 71 72 #ifdef DUP_RX_DESC_WAR 73 void dp_rx_dump_info_and_assert(struct dp_soc *soc, 74 hal_ring_handle_t hal_ring, 75 hal_ring_desc_t ring_desc, 76 struct dp_rx_desc *rx_desc) 77 { 78 void *hal_soc = soc->hal_soc; 79 80 hal_srng_dump_ring_desc(hal_soc, hal_ring, ring_desc); 81 dp_rx_desc_dump(rx_desc); 82 } 83 #else 84 void dp_rx_dump_info_and_assert(struct dp_soc *soc, 85 hal_ring_handle_t hal_ring_hdl, 86 hal_ring_desc_t ring_desc, 87 struct dp_rx_desc *rx_desc) 88 { 89 hal_soc_handle_t hal_soc = soc->hal_soc; 90 91 dp_rx_desc_dump(rx_desc); 92 hal_srng_dump_ring_desc(hal_soc, hal_ring_hdl, ring_desc); 93 hal_srng_dump_ring(hal_soc, hal_ring_hdl); 94 qdf_assert_always(0); 95 } 96 #endif 97 98 #ifndef QCA_HOST_MODE_WIFI_DISABLED 99 #ifdef RX_DESC_SANITY_WAR 100 static inline 101 QDF_STATUS dp_rx_desc_sanity(struct dp_soc *soc, hal_soc_handle_t hal_soc, 102 hal_ring_handle_t hal_ring_hdl, 103 hal_ring_desc_t ring_desc, 104 struct dp_rx_desc *rx_desc) 105 { 106 uint8_t return_buffer_manager; 107 108 if (qdf_unlikely(!rx_desc)) { 109 /* 110 * This is an unlikely case where the cookie obtained 111 * from the ring_desc is invalid and hence we are not 112 * able to find the corresponding rx_desc 113 */ 114 goto fail; 115 } 116 117 return_buffer_manager = hal_rx_ret_buf_manager_get(ring_desc); 118 if (qdf_unlikely(!(return_buffer_manager == HAL_RX_BUF_RBM_SW1_BM || 119 return_buffer_manager == HAL_RX_BUF_RBM_SW3_BM))) { 120 goto fail; 121 } 122 123 return QDF_STATUS_SUCCESS; 124 125 fail: 126 DP_STATS_INC(soc, rx.err.invalid_cookie, 1); 127 dp_err("Ring Desc:"); 128 hal_srng_dump_ring_desc(hal_soc, hal_ring_hdl, 129 ring_desc); 130 return QDF_STATUS_E_NULL_VALUE; 131 132 } 133 #else 134 static inline 135 QDF_STATUS dp_rx_desc_sanity(struct dp_soc *soc, hal_soc_handle_t hal_soc, 136 hal_ring_handle_t hal_ring_hdl, 137 hal_ring_desc_t ring_desc, 138 struct dp_rx_desc *rx_desc) 139 { 140 return QDF_STATUS_SUCCESS; 141 } 142 #endif 143 144 #endif /* QCA_HOST_MODE_WIFI_DISABLED */ 145 146 /** 147 * dp_pdev_frag_alloc_and_map() - Allocate frag for desc buffer and map 148 * 149 * @dp_soc: struct dp_soc * 150 * @nbuf_frag_info_t: nbuf frag info 151 * @dp_pdev: struct dp_pdev * 152 * @rx_desc_pool: Rx desc pool 153 * 154 * Return: QDF_STATUS 155 */ 156 #ifdef DP_RX_MON_MEM_FRAG 157 static inline QDF_STATUS 158 dp_pdev_frag_alloc_and_map(struct dp_soc *dp_soc, 159 struct dp_rx_nbuf_frag_info *nbuf_frag_info_t, 160 struct dp_pdev *dp_pdev, 161 struct rx_desc_pool *rx_desc_pool) 162 { 163 QDF_STATUS ret = QDF_STATUS_E_FAILURE; 164 165 (nbuf_frag_info_t->virt_addr).vaddr = 166 qdf_frag_alloc(rx_desc_pool->buf_size); 167 168 if (!((nbuf_frag_info_t->virt_addr).vaddr)) { 169 dp_err("Frag alloc failed"); 170 DP_STATS_INC(dp_pdev, replenish.frag_alloc_fail, 1); 171 return QDF_STATUS_E_NOMEM; 172 } 173 174 ret = qdf_mem_map_page(dp_soc->osdev, 175 (nbuf_frag_info_t->virt_addr).vaddr, 176 QDF_DMA_FROM_DEVICE, 177 rx_desc_pool->buf_size, 178 &nbuf_frag_info_t->paddr); 179 180 if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) { 181 qdf_frag_free((nbuf_frag_info_t->virt_addr).vaddr); 182 dp_err("Frag map failed"); 183 DP_STATS_INC(dp_pdev, replenish.map_err, 1); 184 return QDF_STATUS_E_FAULT; 185 } 186 187 return QDF_STATUS_SUCCESS; 188 } 189 #else 190 static inline QDF_STATUS 191 dp_pdev_frag_alloc_and_map(struct dp_soc *dp_soc, 192 struct dp_rx_nbuf_frag_info *nbuf_frag_info_t, 193 struct dp_pdev *dp_pdev, 194 struct rx_desc_pool *rx_desc_pool) 195 { 196 return QDF_STATUS_SUCCESS; 197 } 198 #endif /* DP_RX_MON_MEM_FRAG */ 199 200 /** 201 * dp_pdev_nbuf_alloc_and_map() - Allocate nbuf for desc buffer and map 202 * 203 * @dp_soc: struct dp_soc * 204 * @mac_id: Mac id 205 * @num_entries_avail: num_entries_avail 206 * @nbuf_frag_info_t: nbuf frag info 207 * @dp_pdev: struct dp_pdev * 208 * @rx_desc_pool: Rx desc pool 209 * 210 * Return: QDF_STATUS 211 */ 212 static inline QDF_STATUS 213 dp_pdev_nbuf_alloc_and_map_replenish(struct dp_soc *dp_soc, 214 uint32_t mac_id, 215 uint32_t num_entries_avail, 216 struct dp_rx_nbuf_frag_info *nbuf_frag_info_t, 217 struct dp_pdev *dp_pdev, 218 struct rx_desc_pool *rx_desc_pool) 219 { 220 QDF_STATUS ret = QDF_STATUS_E_FAILURE; 221 222 (nbuf_frag_info_t->virt_addr).nbuf = 223 dp_rx_buffer_pool_nbuf_alloc(dp_soc, 224 mac_id, 225 rx_desc_pool, 226 num_entries_avail); 227 if (!((nbuf_frag_info_t->virt_addr).nbuf)) { 228 dp_err("nbuf alloc failed"); 229 DP_STATS_INC(dp_pdev, replenish.nbuf_alloc_fail, 1); 230 return QDF_STATUS_E_NOMEM; 231 } 232 233 ret = dp_rx_buffer_pool_nbuf_map(dp_soc, rx_desc_pool, 234 nbuf_frag_info_t); 235 if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) { 236 dp_rx_buffer_pool_nbuf_free(dp_soc, 237 (nbuf_frag_info_t->virt_addr).nbuf, mac_id); 238 dp_err("nbuf map failed"); 239 DP_STATS_INC(dp_pdev, replenish.map_err, 1); 240 return QDF_STATUS_E_FAULT; 241 } 242 243 nbuf_frag_info_t->paddr = 244 qdf_nbuf_get_frag_paddr((nbuf_frag_info_t->virt_addr).nbuf, 0); 245 246 ret = dp_check_paddr(dp_soc, &((nbuf_frag_info_t->virt_addr).nbuf), 247 &nbuf_frag_info_t->paddr, 248 rx_desc_pool); 249 if (ret == QDF_STATUS_E_FAILURE) { 250 DP_STATS_INC(dp_pdev, replenish.x86_fail, 1); 251 return QDF_STATUS_E_ADDRNOTAVAIL; 252 } 253 254 return QDF_STATUS_SUCCESS; 255 } 256 257 /* 258 * dp_rx_buffers_replenish() - replenish rxdma ring with rx nbufs 259 * called during dp rx initialization 260 * and at the end of dp_rx_process. 261 * 262 * @soc: core txrx main context 263 * @mac_id: mac_id which is one of 3 mac_ids 264 * @dp_rxdma_srng: dp rxdma circular ring 265 * @rx_desc_pool: Pointer to free Rx descriptor pool 266 * @num_req_buffers: number of buffer to be replenished 267 * @desc_list: list of descs if called from dp_rx_process 268 * or NULL during dp rx initialization or out of buffer 269 * interrupt. 270 * @tail: tail of descs list 271 * @func_name: name of the caller function 272 * Return: return success or failure 273 */ 274 QDF_STATUS __dp_rx_buffers_replenish(struct dp_soc *dp_soc, uint32_t mac_id, 275 struct dp_srng *dp_rxdma_srng, 276 struct rx_desc_pool *rx_desc_pool, 277 uint32_t num_req_buffers, 278 union dp_rx_desc_list_elem_t **desc_list, 279 union dp_rx_desc_list_elem_t **tail, 280 const char *func_name) 281 { 282 uint32_t num_alloc_desc; 283 uint16_t num_desc_to_free = 0; 284 struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(dp_soc, mac_id); 285 uint32_t num_entries_avail; 286 uint32_t count; 287 int sync_hw_ptr = 1; 288 struct dp_rx_nbuf_frag_info nbuf_frag_info = {0}; 289 void *rxdma_ring_entry; 290 union dp_rx_desc_list_elem_t *next; 291 QDF_STATUS ret; 292 void *rxdma_srng; 293 294 rxdma_srng = dp_rxdma_srng->hal_srng; 295 296 if (!rxdma_srng) { 297 dp_rx_debug("%pK: rxdma srng not initialized", dp_soc); 298 DP_STATS_INC(dp_pdev, replenish.rxdma_err, num_req_buffers); 299 return QDF_STATUS_E_FAILURE; 300 } 301 302 dp_rx_debug("%pK: requested %d buffers for replenish", 303 dp_soc, num_req_buffers); 304 305 hal_srng_access_start(dp_soc->hal_soc, rxdma_srng); 306 num_entries_avail = hal_srng_src_num_avail(dp_soc->hal_soc, 307 rxdma_srng, 308 sync_hw_ptr); 309 310 dp_rx_debug("%pK: no of available entries in rxdma ring: %d", 311 dp_soc, num_entries_avail); 312 313 if (!(*desc_list) && (num_entries_avail > 314 ((dp_rxdma_srng->num_entries * 3) / 4))) { 315 num_req_buffers = num_entries_avail; 316 } else if (num_entries_avail < num_req_buffers) { 317 num_desc_to_free = num_req_buffers - num_entries_avail; 318 num_req_buffers = num_entries_avail; 319 } 320 321 if (qdf_unlikely(!num_req_buffers)) { 322 num_desc_to_free = num_req_buffers; 323 hal_srng_access_end(dp_soc->hal_soc, rxdma_srng); 324 goto free_descs; 325 } 326 327 /* 328 * if desc_list is NULL, allocate the descs from freelist 329 */ 330 if (!(*desc_list)) { 331 num_alloc_desc = dp_rx_get_free_desc_list(dp_soc, mac_id, 332 rx_desc_pool, 333 num_req_buffers, 334 desc_list, 335 tail); 336 337 if (!num_alloc_desc) { 338 dp_rx_err("%pK: no free rx_descs in freelist", dp_soc); 339 DP_STATS_INC(dp_pdev, err.desc_alloc_fail, 340 num_req_buffers); 341 hal_srng_access_end(dp_soc->hal_soc, rxdma_srng); 342 return QDF_STATUS_E_NOMEM; 343 } 344 345 dp_rx_debug("%pK: %d rx desc allocated", dp_soc, num_alloc_desc); 346 num_req_buffers = num_alloc_desc; 347 } 348 349 350 count = 0; 351 352 while (count < num_req_buffers) { 353 /* Flag is set while pdev rx_desc_pool initialization */ 354 if (qdf_unlikely(rx_desc_pool->rx_mon_dest_frag_enable)) 355 ret = dp_pdev_frag_alloc_and_map(dp_soc, 356 &nbuf_frag_info, 357 dp_pdev, 358 rx_desc_pool); 359 else 360 ret = dp_pdev_nbuf_alloc_and_map_replenish(dp_soc, 361 mac_id, 362 num_entries_avail, &nbuf_frag_info, 363 dp_pdev, rx_desc_pool); 364 365 if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) { 366 if (qdf_unlikely(ret == QDF_STATUS_E_FAULT)) 367 continue; 368 break; 369 } 370 371 count++; 372 373 rxdma_ring_entry = hal_srng_src_get_next(dp_soc->hal_soc, 374 rxdma_srng); 375 qdf_assert_always(rxdma_ring_entry); 376 377 next = (*desc_list)->next; 378 379 /* Flag is set while pdev rx_desc_pool initialization */ 380 if (qdf_unlikely(rx_desc_pool->rx_mon_dest_frag_enable)) 381 dp_rx_desc_frag_prep(&((*desc_list)->rx_desc), 382 &nbuf_frag_info); 383 else 384 dp_rx_desc_prep(&((*desc_list)->rx_desc), 385 &nbuf_frag_info); 386 387 /* rx_desc.in_use should be zero at this time*/ 388 qdf_assert_always((*desc_list)->rx_desc.in_use == 0); 389 390 (*desc_list)->rx_desc.in_use = 1; 391 (*desc_list)->rx_desc.in_err_state = 0; 392 dp_rx_desc_update_dbg_info(&(*desc_list)->rx_desc, 393 func_name, RX_DESC_REPLENISHED); 394 dp_verbose_debug("rx_netbuf=%pK, paddr=0x%llx, cookie=%d", 395 nbuf_frag_info.virt_addr.nbuf, 396 (unsigned long long)(nbuf_frag_info.paddr), 397 (*desc_list)->rx_desc.cookie); 398 399 hal_rxdma_buff_addr_info_set(rxdma_ring_entry, 400 nbuf_frag_info.paddr, 401 (*desc_list)->rx_desc.cookie, 402 rx_desc_pool->owner); 403 404 *desc_list = next; 405 406 } 407 408 hal_srng_access_end(dp_soc->hal_soc, rxdma_srng); 409 410 dp_rx_schedule_refill_thread(dp_soc); 411 412 dp_verbose_debug("replenished buffers %d, rx desc added back to free list %u", 413 count, num_desc_to_free); 414 415 /* No need to count the number of bytes received during replenish. 416 * Therefore set replenish.pkts.bytes as 0. 417 */ 418 DP_STATS_INC_PKT(dp_pdev, replenish.pkts, count, 0); 419 420 free_descs: 421 DP_STATS_INC(dp_pdev, buf_freelist, num_desc_to_free); 422 /* 423 * add any available free desc back to the free list 424 */ 425 if (*desc_list) 426 dp_rx_add_desc_list_to_free_list(dp_soc, desc_list, tail, 427 mac_id, rx_desc_pool); 428 429 return QDF_STATUS_SUCCESS; 430 } 431 432 /* 433 * dp_rx_deliver_raw() - process RAW mode pkts and hand over the 434 * pkts to RAW mode simulation to 435 * decapsulate the pkt. 436 * 437 * @vdev: vdev on which RAW mode is enabled 438 * @nbuf_list: list of RAW pkts to process 439 * @peer: peer object from which the pkt is rx 440 * 441 * Return: void 442 */ 443 void 444 dp_rx_deliver_raw(struct dp_vdev *vdev, qdf_nbuf_t nbuf_list, 445 struct dp_peer *peer) 446 { 447 qdf_nbuf_t deliver_list_head = NULL; 448 qdf_nbuf_t deliver_list_tail = NULL; 449 qdf_nbuf_t nbuf; 450 451 nbuf = nbuf_list; 452 while (nbuf) { 453 qdf_nbuf_t next = qdf_nbuf_next(nbuf); 454 455 DP_RX_LIST_APPEND(deliver_list_head, deliver_list_tail, nbuf); 456 457 DP_STATS_INC(vdev->pdev, rx_raw_pkts, 1); 458 DP_STATS_INC_PKT(peer, rx.raw, 1, qdf_nbuf_len(nbuf)); 459 /* 460 * reset the chfrag_start and chfrag_end bits in nbuf cb 461 * as this is a non-amsdu pkt and RAW mode simulation expects 462 * these bit s to be 0 for non-amsdu pkt. 463 */ 464 if (qdf_nbuf_is_rx_chfrag_start(nbuf) && 465 qdf_nbuf_is_rx_chfrag_end(nbuf)) { 466 qdf_nbuf_set_rx_chfrag_start(nbuf, 0); 467 qdf_nbuf_set_rx_chfrag_end(nbuf, 0); 468 } 469 470 nbuf = next; 471 } 472 473 vdev->osif_rsim_rx_decap(vdev->osif_vdev, &deliver_list_head, 474 &deliver_list_tail, peer->mac_addr.raw); 475 476 vdev->osif_rx(vdev->osif_vdev, deliver_list_head); 477 } 478 479 #ifndef QCA_HOST_MODE_WIFI_DISABLED 480 481 #ifndef FEATURE_WDS 482 static void 483 dp_rx_da_learn(struct dp_soc *soc, 484 uint8_t *rx_tlv_hdr, 485 struct dp_peer *ta_peer, 486 qdf_nbuf_t nbuf) 487 { 488 } 489 #endif 490 /* 491 * dp_rx_intrabss_fwd() - Implements the Intra-BSS forwarding logic 492 * 493 * @soc: core txrx main context 494 * @ta_peer : source peer entry 495 * @rx_tlv_hdr : start address of rx tlvs 496 * @nbuf : nbuf that has to be intrabss forwarded 497 * 498 * Return: bool: true if it is forwarded else false 499 */ 500 static bool 501 dp_rx_intrabss_fwd(struct dp_soc *soc, 502 struct dp_peer *ta_peer, 503 uint8_t *rx_tlv_hdr, 504 qdf_nbuf_t nbuf, 505 struct hal_rx_msdu_metadata msdu_metadata) 506 { 507 uint16_t len; 508 uint8_t is_frag; 509 uint16_t da_peer_id = HTT_INVALID_PEER; 510 struct dp_peer *da_peer = NULL; 511 bool is_da_bss_peer = false; 512 struct dp_ast_entry *ast_entry; 513 qdf_nbuf_t nbuf_copy; 514 uint8_t tid = qdf_nbuf_get_tid_val(nbuf); 515 uint8_t ring_id = QDF_NBUF_CB_RX_CTX_ID(nbuf); 516 struct cdp_tid_rx_stats *tid_stats = &ta_peer->vdev->pdev->stats. 517 tid_stats.tid_rx_stats[ring_id][tid]; 518 519 /* check if the destination peer is available in peer table 520 * and also check if the source peer and destination peer 521 * belong to the same vap and destination peer is not bss peer. 522 */ 523 524 if ((qdf_nbuf_is_da_valid(nbuf) && !qdf_nbuf_is_da_mcbc(nbuf))) { 525 526 ast_entry = soc->ast_table[msdu_metadata.da_idx]; 527 if (!ast_entry) 528 return false; 529 530 if (ast_entry->type == CDP_TXRX_AST_TYPE_DA) { 531 ast_entry->is_active = TRUE; 532 return false; 533 } 534 535 da_peer_id = ast_entry->peer_id; 536 537 if (da_peer_id == HTT_INVALID_PEER) 538 return false; 539 /* TA peer cannot be same as peer(DA) on which AST is present 540 * this indicates a change in topology and that AST entries 541 * are yet to be updated. 542 */ 543 if (da_peer_id == ta_peer->peer_id) 544 return false; 545 546 if (ast_entry->vdev_id != ta_peer->vdev->vdev_id) 547 return false; 548 549 da_peer = dp_peer_get_ref_by_id(soc, da_peer_id, 550 DP_MOD_ID_RX); 551 if (!da_peer) 552 return false; 553 is_da_bss_peer = da_peer->bss_peer; 554 dp_peer_unref_delete(da_peer, DP_MOD_ID_RX); 555 556 if (!is_da_bss_peer) { 557 len = QDF_NBUF_CB_RX_PKT_LEN(nbuf); 558 is_frag = qdf_nbuf_is_frag(nbuf); 559 memset(nbuf->cb, 0x0, sizeof(nbuf->cb)); 560 561 /* If the source or destination peer in the isolation 562 * list then dont forward instead push to bridge stack. 563 */ 564 if (dp_get_peer_isolation(ta_peer) || 565 dp_get_peer_isolation(da_peer)) 566 return false; 567 568 /* linearize the nbuf just before we send to 569 * dp_tx_send() 570 */ 571 if (qdf_unlikely(is_frag)) { 572 if (qdf_nbuf_linearize(nbuf) == -ENOMEM) 573 return false; 574 575 nbuf = qdf_nbuf_unshare(nbuf); 576 if (!nbuf) { 577 DP_STATS_INC_PKT(ta_peer, 578 rx.intra_bss.fail, 579 1, 580 len); 581 /* return true even though the pkt is 582 * not forwarded. Basically skb_unshare 583 * failed and we want to continue with 584 * next nbuf. 585 */ 586 tid_stats->fail_cnt[INTRABSS_DROP]++; 587 return true; 588 } 589 } 590 591 if (!dp_tx_send((struct cdp_soc_t *)soc, 592 ta_peer->vdev->vdev_id, nbuf)) { 593 DP_STATS_INC_PKT(ta_peer, rx.intra_bss.pkts, 1, 594 len); 595 return true; 596 } else { 597 DP_STATS_INC_PKT(ta_peer, rx.intra_bss.fail, 1, 598 len); 599 tid_stats->fail_cnt[INTRABSS_DROP]++; 600 return false; 601 } 602 } 603 } 604 /* if it is a broadcast pkt (eg: ARP) and it is not its own 605 * source, then clone the pkt and send the cloned pkt for 606 * intra BSS forwarding and original pkt up the network stack 607 * Note: how do we handle multicast pkts. do we forward 608 * all multicast pkts as is or let a higher layer module 609 * like igmpsnoop decide whether to forward or not with 610 * Mcast enhancement. 611 */ 612 else if (qdf_unlikely((qdf_nbuf_is_da_mcbc(nbuf) && 613 !ta_peer->bss_peer))) { 614 if (!dp_rx_check_ndi_mdns_fwding(ta_peer, nbuf)) 615 goto end; 616 617 /* If the source peer in the isolation list 618 * then dont forward instead push to bridge stack 619 */ 620 if (dp_get_peer_isolation(ta_peer)) 621 goto end; 622 623 nbuf_copy = qdf_nbuf_copy(nbuf); 624 if (!nbuf_copy) 625 goto end; 626 627 len = QDF_NBUF_CB_RX_PKT_LEN(nbuf); 628 memset(nbuf_copy->cb, 0x0, sizeof(nbuf_copy->cb)); 629 630 /* Set cb->ftype to intrabss FWD */ 631 qdf_nbuf_set_tx_ftype(nbuf_copy, CB_FTYPE_INTRABSS_FWD); 632 if (dp_tx_send((struct cdp_soc_t *)soc, 633 ta_peer->vdev->vdev_id, nbuf_copy)) { 634 DP_STATS_INC_PKT(ta_peer, rx.intra_bss.fail, 1, len); 635 tid_stats->fail_cnt[INTRABSS_DROP]++; 636 qdf_nbuf_free(nbuf_copy); 637 } else { 638 DP_STATS_INC_PKT(ta_peer, rx.intra_bss.pkts, 1, len); 639 tid_stats->intrabss_cnt++; 640 } 641 } 642 643 end: 644 /* return false as we have to still send the original pkt 645 * up the stack 646 */ 647 return false; 648 } 649 650 #endif /* QCA_HOST_MODE_WIFI_DISABLED */ 651 652 #ifdef MESH_MODE_SUPPORT 653 654 /** 655 * dp_rx_fill_mesh_stats() - Fills the mesh per packet receive stats 656 * 657 * @vdev: DP Virtual device handle 658 * @nbuf: Buffer pointer 659 * @rx_tlv_hdr: start of rx tlv header 660 * @peer: pointer to peer 661 * 662 * This function allocated memory for mesh receive stats and fill the 663 * required stats. Stores the memory address in skb cb. 664 * 665 * Return: void 666 */ 667 668 void dp_rx_fill_mesh_stats(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 669 uint8_t *rx_tlv_hdr, struct dp_peer *peer) 670 { 671 struct mesh_recv_hdr_s *rx_info = NULL; 672 uint32_t pkt_type; 673 uint32_t nss; 674 uint32_t rate_mcs; 675 uint32_t bw; 676 uint8_t primary_chan_num; 677 uint32_t center_chan_freq; 678 struct dp_soc *soc; 679 680 /* fill recv mesh stats */ 681 rx_info = qdf_mem_malloc(sizeof(struct mesh_recv_hdr_s)); 682 683 /* upper layers are resposible to free this memory */ 684 685 if (!rx_info) { 686 dp_rx_err("%pK: Memory allocation failed for mesh rx stats", 687 vdev->pdev->soc); 688 DP_STATS_INC(vdev->pdev, mesh_mem_alloc, 1); 689 return; 690 } 691 692 rx_info->rs_flags = MESH_RXHDR_VER1; 693 if (qdf_nbuf_is_rx_chfrag_start(nbuf)) 694 rx_info->rs_flags |= MESH_RX_FIRST_MSDU; 695 696 if (qdf_nbuf_is_rx_chfrag_end(nbuf)) 697 rx_info->rs_flags |= MESH_RX_LAST_MSDU; 698 699 if (hal_rx_attn_msdu_get_is_decrypted(rx_tlv_hdr)) { 700 rx_info->rs_flags |= MESH_RX_DECRYPTED; 701 rx_info->rs_keyix = hal_rx_msdu_get_keyid(rx_tlv_hdr); 702 if (vdev->osif_get_key) 703 vdev->osif_get_key(vdev->osif_vdev, 704 &rx_info->rs_decryptkey[0], 705 &peer->mac_addr.raw[0], 706 rx_info->rs_keyix); 707 } 708 709 rx_info->rs_snr = peer->stats.rx.snr; 710 rx_info->rs_rssi = rx_info->rs_snr + DP_DEFAULT_NOISEFLOOR; 711 712 soc = vdev->pdev->soc; 713 primary_chan_num = hal_rx_msdu_start_get_freq(rx_tlv_hdr); 714 center_chan_freq = hal_rx_msdu_start_get_freq(rx_tlv_hdr) >> 16; 715 716 if (soc->cdp_soc.ol_ops && soc->cdp_soc.ol_ops->freq_to_band) { 717 rx_info->rs_band = soc->cdp_soc.ol_ops->freq_to_band( 718 soc->ctrl_psoc, 719 vdev->pdev->pdev_id, 720 center_chan_freq); 721 } 722 rx_info->rs_channel = primary_chan_num; 723 pkt_type = hal_rx_msdu_start_get_pkt_type(rx_tlv_hdr); 724 rate_mcs = hal_rx_msdu_start_rate_mcs_get(rx_tlv_hdr); 725 bw = hal_rx_msdu_start_bw_get(rx_tlv_hdr); 726 nss = hal_rx_msdu_start_nss_get(vdev->pdev->soc->hal_soc, rx_tlv_hdr); 727 rx_info->rs_ratephy1 = rate_mcs | (nss << 0x8) | (pkt_type << 16) | 728 (bw << 24); 729 730 qdf_nbuf_set_rx_fctx_type(nbuf, (void *)rx_info, CB_FTYPE_MESH_RX_INFO); 731 732 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_MED, 733 FL("Mesh rx stats: flags %x, rssi %x, chn %x, rate %x, kix %x, snr %x"), 734 rx_info->rs_flags, 735 rx_info->rs_rssi, 736 rx_info->rs_channel, 737 rx_info->rs_ratephy1, 738 rx_info->rs_keyix, 739 rx_info->rs_snr); 740 741 } 742 743 /** 744 * dp_rx_filter_mesh_packets() - Filters mesh unwanted packets 745 * 746 * @vdev: DP Virtual device handle 747 * @nbuf: Buffer pointer 748 * @rx_tlv_hdr: start of rx tlv header 749 * 750 * This checks if the received packet is matching any filter out 751 * catogery and and drop the packet if it matches. 752 * 753 * Return: status(0 indicates drop, 1 indicate to no drop) 754 */ 755 756 QDF_STATUS dp_rx_filter_mesh_packets(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 757 uint8_t *rx_tlv_hdr) 758 { 759 union dp_align_mac_addr mac_addr; 760 struct dp_soc *soc = vdev->pdev->soc; 761 762 if (qdf_unlikely(vdev->mesh_rx_filter)) { 763 if (vdev->mesh_rx_filter & MESH_FILTER_OUT_FROMDS) 764 if (hal_rx_mpdu_get_fr_ds(soc->hal_soc, 765 rx_tlv_hdr)) 766 return QDF_STATUS_SUCCESS; 767 768 if (vdev->mesh_rx_filter & MESH_FILTER_OUT_TODS) 769 if (hal_rx_mpdu_get_to_ds(soc->hal_soc, 770 rx_tlv_hdr)) 771 return QDF_STATUS_SUCCESS; 772 773 if (vdev->mesh_rx_filter & MESH_FILTER_OUT_NODS) 774 if (!hal_rx_mpdu_get_fr_ds(soc->hal_soc, 775 rx_tlv_hdr) && 776 !hal_rx_mpdu_get_to_ds(soc->hal_soc, 777 rx_tlv_hdr)) 778 return QDF_STATUS_SUCCESS; 779 780 if (vdev->mesh_rx_filter & MESH_FILTER_OUT_RA) { 781 if (hal_rx_mpdu_get_addr1(soc->hal_soc, 782 rx_tlv_hdr, 783 &mac_addr.raw[0])) 784 return QDF_STATUS_E_FAILURE; 785 786 if (!qdf_mem_cmp(&mac_addr.raw[0], 787 &vdev->mac_addr.raw[0], 788 QDF_MAC_ADDR_SIZE)) 789 return QDF_STATUS_SUCCESS; 790 } 791 792 if (vdev->mesh_rx_filter & MESH_FILTER_OUT_TA) { 793 if (hal_rx_mpdu_get_addr2(soc->hal_soc, 794 rx_tlv_hdr, 795 &mac_addr.raw[0])) 796 return QDF_STATUS_E_FAILURE; 797 798 if (!qdf_mem_cmp(&mac_addr.raw[0], 799 &vdev->mac_addr.raw[0], 800 QDF_MAC_ADDR_SIZE)) 801 return QDF_STATUS_SUCCESS; 802 } 803 } 804 805 return QDF_STATUS_E_FAILURE; 806 } 807 808 #else 809 void dp_rx_fill_mesh_stats(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 810 uint8_t *rx_tlv_hdr, struct dp_peer *peer) 811 { 812 } 813 814 QDF_STATUS dp_rx_filter_mesh_packets(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 815 uint8_t *rx_tlv_hdr) 816 { 817 return QDF_STATUS_E_FAILURE; 818 } 819 820 #endif 821 822 #ifdef FEATURE_NAC_RSSI 823 /** 824 * dp_rx_nac_filter(): Function to perform filtering of non-associated 825 * clients 826 * @pdev: DP pdev handle 827 * @rx_pkt_hdr: Rx packet Header 828 * 829 * return: dp_vdev* 830 */ 831 static 832 struct dp_vdev *dp_rx_nac_filter(struct dp_pdev *pdev, 833 uint8_t *rx_pkt_hdr) 834 { 835 struct ieee80211_frame *wh; 836 struct dp_neighbour_peer *peer = NULL; 837 838 wh = (struct ieee80211_frame *)rx_pkt_hdr; 839 840 if ((wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) != IEEE80211_FC1_DIR_TODS) 841 return NULL; 842 843 qdf_spin_lock_bh(&pdev->neighbour_peer_mutex); 844 TAILQ_FOREACH(peer, &pdev->neighbour_peers_list, 845 neighbour_peer_list_elem) { 846 if (qdf_mem_cmp(&peer->neighbour_peers_macaddr.raw[0], 847 wh->i_addr2, QDF_MAC_ADDR_SIZE) == 0) { 848 dp_rx_debug("%pK: NAC configuration matched for mac-%2x:%2x:%2x:%2x:%2x:%2x", 849 pdev->soc, 850 peer->neighbour_peers_macaddr.raw[0], 851 peer->neighbour_peers_macaddr.raw[1], 852 peer->neighbour_peers_macaddr.raw[2], 853 peer->neighbour_peers_macaddr.raw[3], 854 peer->neighbour_peers_macaddr.raw[4], 855 peer->neighbour_peers_macaddr.raw[5]); 856 857 qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex); 858 859 return pdev->monitor_vdev; 860 } 861 } 862 qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex); 863 864 return NULL; 865 } 866 867 /** 868 * dp_rx_process_invalid_peer(): Function to pass invalid peer list to umac 869 * @soc: DP SOC handle 870 * @mpdu: mpdu for which peer is invalid 871 * @mac_id: mac_id which is one of 3 mac_ids(Assuming mac_id and 872 * pool_id has same mapping) 873 * 874 * return: integer type 875 */ 876 uint8_t dp_rx_process_invalid_peer(struct dp_soc *soc, qdf_nbuf_t mpdu, 877 uint8_t mac_id) 878 { 879 struct dp_invalid_peer_msg msg; 880 struct dp_vdev *vdev = NULL; 881 struct dp_pdev *pdev = NULL; 882 struct ieee80211_frame *wh; 883 qdf_nbuf_t curr_nbuf, next_nbuf; 884 uint8_t *rx_tlv_hdr = qdf_nbuf_data(mpdu); 885 uint8_t *rx_pkt_hdr = hal_rx_pkt_hdr_get(rx_tlv_hdr); 886 887 rx_pkt_hdr = hal_rx_pkt_hdr_get(rx_tlv_hdr); 888 889 if (!HAL_IS_DECAP_FORMAT_RAW(soc->hal_soc, rx_tlv_hdr)) { 890 dp_rx_debug("%pK: Drop decapped frames", soc); 891 goto free; 892 } 893 894 wh = (struct ieee80211_frame *)rx_pkt_hdr; 895 896 if (!DP_FRAME_IS_DATA(wh)) { 897 dp_rx_debug("%pK: NAWDS valid only for data frames", soc); 898 goto free; 899 } 900 901 if (qdf_nbuf_len(mpdu) < sizeof(struct ieee80211_frame)) { 902 dp_rx_err("%pK: Invalid nbuf length", soc); 903 goto free; 904 } 905 906 pdev = dp_get_pdev_for_lmac_id(soc, mac_id); 907 908 if (!pdev || qdf_unlikely(pdev->is_pdev_down)) { 909 dp_rx_err("%pK: PDEV %s", soc, !pdev ? "not found" : "down"); 910 goto free; 911 } 912 913 if (pdev->filter_neighbour_peers) { 914 /* Next Hop scenario not yet handle */ 915 vdev = dp_rx_nac_filter(pdev, rx_pkt_hdr); 916 if (vdev) { 917 dp_rx_mon_deliver(soc, pdev->pdev_id, 918 pdev->invalid_peer_head_msdu, 919 pdev->invalid_peer_tail_msdu); 920 921 pdev->invalid_peer_head_msdu = NULL; 922 pdev->invalid_peer_tail_msdu = NULL; 923 924 return 0; 925 } 926 } 927 928 TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) { 929 930 if (qdf_mem_cmp(wh->i_addr1, vdev->mac_addr.raw, 931 QDF_MAC_ADDR_SIZE) == 0) { 932 goto out; 933 } 934 } 935 936 if (!vdev) { 937 dp_rx_err("%pK: VDEV not found", soc); 938 goto free; 939 } 940 941 out: 942 msg.wh = wh; 943 qdf_nbuf_pull_head(mpdu, RX_PKT_TLVS_LEN); 944 msg.nbuf = mpdu; 945 msg.vdev_id = vdev->vdev_id; 946 947 /* 948 * NOTE: Only valid for HKv1. 949 * If smart monitor mode is enabled on RE, we are getting invalid 950 * peer frames with RA as STA mac of RE and the TA not matching 951 * with any NAC list or the the BSSID.Such frames need to dropped 952 * in order to avoid HM_WDS false addition. 953 */ 954 if (pdev->soc->cdp_soc.ol_ops->rx_invalid_peer) { 955 if (!soc->hw_nac_monitor_support && 956 pdev->filter_neighbour_peers && 957 vdev->opmode == wlan_op_mode_sta) { 958 dp_rx_warn("%pK: Drop inv peer pkts with STA RA:%pm", 959 soc, wh->i_addr1); 960 goto free; 961 } 962 pdev->soc->cdp_soc.ol_ops->rx_invalid_peer( 963 (struct cdp_ctrl_objmgr_psoc *)soc->ctrl_psoc, 964 pdev->pdev_id, &msg); 965 } 966 967 free: 968 /* Drop and free packet */ 969 curr_nbuf = mpdu; 970 while (curr_nbuf) { 971 next_nbuf = qdf_nbuf_next(curr_nbuf); 972 qdf_nbuf_free(curr_nbuf); 973 curr_nbuf = next_nbuf; 974 } 975 976 return 0; 977 } 978 979 /** 980 * dp_rx_process_invalid_peer_wrapper(): Function to wrap invalid peer handler 981 * @soc: DP SOC handle 982 * @mpdu: mpdu for which peer is invalid 983 * @mpdu_done: if an mpdu is completed 984 * @mac_id: mac_id which is one of 3 mac_ids(Assuming mac_id and 985 * pool_id has same mapping) 986 * 987 * return: integer type 988 */ 989 void dp_rx_process_invalid_peer_wrapper(struct dp_soc *soc, 990 qdf_nbuf_t mpdu, bool mpdu_done, 991 uint8_t mac_id) 992 { 993 /* Only trigger the process when mpdu is completed */ 994 if (mpdu_done) 995 dp_rx_process_invalid_peer(soc, mpdu, mac_id); 996 } 997 #else 998 uint8_t dp_rx_process_invalid_peer(struct dp_soc *soc, qdf_nbuf_t mpdu, 999 uint8_t mac_id) 1000 { 1001 qdf_nbuf_t curr_nbuf, next_nbuf; 1002 struct dp_pdev *pdev; 1003 struct dp_vdev *vdev = NULL; 1004 struct ieee80211_frame *wh; 1005 uint8_t *rx_tlv_hdr = qdf_nbuf_data(mpdu); 1006 uint8_t *rx_pkt_hdr = hal_rx_pkt_hdr_get(rx_tlv_hdr); 1007 1008 wh = (struct ieee80211_frame *)rx_pkt_hdr; 1009 1010 if (!DP_FRAME_IS_DATA(wh)) { 1011 QDF_TRACE_ERROR_RL(QDF_MODULE_ID_DP, 1012 "only for data frames"); 1013 goto free; 1014 } 1015 1016 if (qdf_nbuf_len(mpdu) < sizeof(struct ieee80211_frame)) { 1017 dp_rx_err("%pK: Invalid nbuf length", soc); 1018 goto free; 1019 } 1020 1021 pdev = dp_get_pdev_for_lmac_id(soc, mac_id); 1022 if (!pdev) { 1023 dp_rx_err("%pK: PDEV not found", soc); 1024 goto free; 1025 } 1026 1027 qdf_spin_lock_bh(&pdev->vdev_list_lock); 1028 DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) { 1029 if (qdf_mem_cmp(wh->i_addr1, vdev->mac_addr.raw, 1030 QDF_MAC_ADDR_SIZE) == 0) { 1031 qdf_spin_unlock_bh(&pdev->vdev_list_lock); 1032 goto out; 1033 } 1034 } 1035 qdf_spin_unlock_bh(&pdev->vdev_list_lock); 1036 1037 if (!vdev) { 1038 dp_rx_err("%pK: VDEV not found", soc); 1039 goto free; 1040 } 1041 1042 out: 1043 if (soc->cdp_soc.ol_ops->rx_invalid_peer) 1044 soc->cdp_soc.ol_ops->rx_invalid_peer(vdev->vdev_id, wh); 1045 free: 1046 /* reset the head and tail pointers */ 1047 pdev = dp_get_pdev_for_lmac_id(soc, mac_id); 1048 if (pdev) { 1049 pdev->invalid_peer_head_msdu = NULL; 1050 pdev->invalid_peer_tail_msdu = NULL; 1051 } 1052 1053 /* Drop and free packet */ 1054 curr_nbuf = mpdu; 1055 while (curr_nbuf) { 1056 next_nbuf = qdf_nbuf_next(curr_nbuf); 1057 qdf_nbuf_free(curr_nbuf); 1058 curr_nbuf = next_nbuf; 1059 } 1060 1061 /* Reset the head and tail pointers */ 1062 pdev = dp_get_pdev_for_lmac_id(soc, mac_id); 1063 if (pdev) { 1064 pdev->invalid_peer_head_msdu = NULL; 1065 pdev->invalid_peer_tail_msdu = NULL; 1066 } 1067 1068 return 0; 1069 } 1070 1071 void dp_rx_process_invalid_peer_wrapper(struct dp_soc *soc, 1072 qdf_nbuf_t mpdu, bool mpdu_done, 1073 uint8_t mac_id) 1074 { 1075 /* Process the nbuf */ 1076 dp_rx_process_invalid_peer(soc, mpdu, mac_id); 1077 } 1078 #endif 1079 1080 #ifndef QCA_HOST_MODE_WIFI_DISABLED 1081 1082 #ifdef RECEIVE_OFFLOAD 1083 /** 1084 * dp_rx_print_offload_info() - Print offload info from RX TLV 1085 * @soc: dp soc handle 1086 * @rx_tlv: RX TLV for which offload information is to be printed 1087 * 1088 * Return: None 1089 */ 1090 static void dp_rx_print_offload_info(struct dp_soc *soc, uint8_t *rx_tlv) 1091 { 1092 dp_verbose_debug("----------------------RX DESC LRO/GRO----------------------"); 1093 dp_verbose_debug("lro_eligible 0x%x", HAL_RX_TLV_GET_LRO_ELIGIBLE(rx_tlv)); 1094 dp_verbose_debug("pure_ack 0x%x", HAL_RX_TLV_GET_TCP_PURE_ACK(rx_tlv)); 1095 dp_verbose_debug("chksum 0x%x", hal_rx_tlv_get_tcp_chksum(soc->hal_soc, 1096 rx_tlv)); 1097 dp_verbose_debug("TCP seq num 0x%x", HAL_RX_TLV_GET_TCP_SEQ(rx_tlv)); 1098 dp_verbose_debug("TCP ack num 0x%x", HAL_RX_TLV_GET_TCP_ACK(rx_tlv)); 1099 dp_verbose_debug("TCP window 0x%x", HAL_RX_TLV_GET_TCP_WIN(rx_tlv)); 1100 dp_verbose_debug("TCP protocol 0x%x", HAL_RX_TLV_GET_TCP_PROTO(rx_tlv)); 1101 dp_verbose_debug("TCP offset 0x%x", HAL_RX_TLV_GET_TCP_OFFSET(rx_tlv)); 1102 dp_verbose_debug("toeplitz 0x%x", HAL_RX_TLV_GET_FLOW_ID_TOEPLITZ(rx_tlv)); 1103 dp_verbose_debug("---------------------------------------------------------"); 1104 } 1105 1106 /** 1107 * dp_rx_fill_gro_info() - Fill GRO info from RX TLV into skb->cb 1108 * @soc: DP SOC handle 1109 * @rx_tlv: RX TLV received for the msdu 1110 * @msdu: msdu for which GRO info needs to be filled 1111 * @rx_ol_pkt_cnt: counter to be incremented for GRO eligible packets 1112 * 1113 * Return: None 1114 */ 1115 static 1116 void dp_rx_fill_gro_info(struct dp_soc *soc, uint8_t *rx_tlv, 1117 qdf_nbuf_t msdu, uint32_t *rx_ol_pkt_cnt) 1118 { 1119 if (!wlan_cfg_is_gro_enabled(soc->wlan_cfg_ctx)) 1120 return; 1121 1122 /* Filling up RX offload info only for TCP packets */ 1123 if (!HAL_RX_TLV_GET_TCP_PROTO(rx_tlv)) 1124 return; 1125 1126 *rx_ol_pkt_cnt = *rx_ol_pkt_cnt + 1; 1127 1128 QDF_NBUF_CB_RX_LRO_ELIGIBLE(msdu) = 1129 HAL_RX_TLV_GET_LRO_ELIGIBLE(rx_tlv); 1130 QDF_NBUF_CB_RX_TCP_PURE_ACK(msdu) = 1131 HAL_RX_TLV_GET_TCP_PURE_ACK(rx_tlv); 1132 QDF_NBUF_CB_RX_TCP_CHKSUM(msdu) = 1133 hal_rx_tlv_get_tcp_chksum(soc->hal_soc, 1134 rx_tlv); 1135 QDF_NBUF_CB_RX_TCP_SEQ_NUM(msdu) = 1136 HAL_RX_TLV_GET_TCP_SEQ(rx_tlv); 1137 QDF_NBUF_CB_RX_TCP_ACK_NUM(msdu) = 1138 HAL_RX_TLV_GET_TCP_ACK(rx_tlv); 1139 QDF_NBUF_CB_RX_TCP_WIN(msdu) = 1140 HAL_RX_TLV_GET_TCP_WIN(rx_tlv); 1141 QDF_NBUF_CB_RX_TCP_PROTO(msdu) = 1142 HAL_RX_TLV_GET_TCP_PROTO(rx_tlv); 1143 QDF_NBUF_CB_RX_IPV6_PROTO(msdu) = 1144 HAL_RX_TLV_GET_IPV6(rx_tlv); 1145 QDF_NBUF_CB_RX_TCP_OFFSET(msdu) = 1146 HAL_RX_TLV_GET_TCP_OFFSET(rx_tlv); 1147 QDF_NBUF_CB_RX_FLOW_ID(msdu) = 1148 HAL_RX_TLV_GET_FLOW_ID_TOEPLITZ(rx_tlv); 1149 1150 dp_rx_print_offload_info(soc, rx_tlv); 1151 } 1152 #else 1153 static void dp_rx_fill_gro_info(struct dp_soc *soc, uint8_t *rx_tlv, 1154 qdf_nbuf_t msdu, uint32_t *rx_ol_pkt_cnt) 1155 { 1156 } 1157 #endif /* RECEIVE_OFFLOAD */ 1158 1159 /** 1160 * dp_rx_adjust_nbuf_len() - set appropriate msdu length in nbuf. 1161 * 1162 * @nbuf: pointer to msdu. 1163 * @mpdu_len: mpdu length 1164 * 1165 * Return: returns true if nbuf is last msdu of mpdu else retuns false. 1166 */ 1167 static inline bool dp_rx_adjust_nbuf_len(qdf_nbuf_t nbuf, uint16_t *mpdu_len) 1168 { 1169 bool last_nbuf; 1170 1171 if (*mpdu_len > (RX_DATA_BUFFER_SIZE - RX_PKT_TLVS_LEN)) { 1172 qdf_nbuf_set_pktlen(nbuf, RX_DATA_BUFFER_SIZE); 1173 last_nbuf = false; 1174 } else { 1175 qdf_nbuf_set_pktlen(nbuf, (*mpdu_len + RX_PKT_TLVS_LEN)); 1176 last_nbuf = true; 1177 } 1178 1179 *mpdu_len -= (RX_DATA_BUFFER_SIZE - RX_PKT_TLVS_LEN); 1180 1181 return last_nbuf; 1182 } 1183 1184 /** 1185 * dp_rx_sg_create() - create a frag_list for MSDUs which are spread across 1186 * multiple nbufs. 1187 * @soc: DP SOC handle 1188 * @nbuf: pointer to the first msdu of an amsdu. 1189 * 1190 * This function implements the creation of RX frag_list for cases 1191 * where an MSDU is spread across multiple nbufs. 1192 * 1193 * Return: returns the head nbuf which contains complete frag_list. 1194 */ 1195 qdf_nbuf_t dp_rx_sg_create(struct dp_soc *soc, qdf_nbuf_t nbuf) 1196 { 1197 qdf_nbuf_t parent, frag_list, next = NULL; 1198 uint16_t frag_list_len = 0; 1199 uint16_t mpdu_len; 1200 bool last_nbuf; 1201 1202 /* 1203 * Use msdu len got from REO entry descriptor instead since 1204 * there is case the RX PKT TLV is corrupted while msdu_len 1205 * from REO descriptor is right for non-raw RX scatter msdu. 1206 */ 1207 mpdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf); 1208 /* 1209 * this is a case where the complete msdu fits in one single nbuf. 1210 * in this case HW sets both start and end bit and we only need to 1211 * reset these bits for RAW mode simulator to decap the pkt 1212 */ 1213 if (qdf_nbuf_is_rx_chfrag_start(nbuf) && 1214 qdf_nbuf_is_rx_chfrag_end(nbuf)) { 1215 qdf_nbuf_set_pktlen(nbuf, mpdu_len + RX_PKT_TLVS_LEN); 1216 qdf_nbuf_pull_head(nbuf, RX_PKT_TLVS_LEN); 1217 return nbuf; 1218 } 1219 1220 /* 1221 * This is a case where we have multiple msdus (A-MSDU) spread across 1222 * multiple nbufs. here we create a fraglist out of these nbufs. 1223 * 1224 * the moment we encounter a nbuf with continuation bit set we 1225 * know for sure we have an MSDU which is spread across multiple 1226 * nbufs. We loop through and reap nbufs till we reach last nbuf. 1227 */ 1228 parent = nbuf; 1229 frag_list = nbuf->next; 1230 nbuf = nbuf->next; 1231 1232 /* 1233 * set the start bit in the first nbuf we encounter with continuation 1234 * bit set. This has the proper mpdu length set as it is the first 1235 * msdu of the mpdu. this becomes the parent nbuf and the subsequent 1236 * nbufs will form the frag_list of the parent nbuf. 1237 */ 1238 qdf_nbuf_set_rx_chfrag_start(parent, 1); 1239 last_nbuf = dp_rx_adjust_nbuf_len(parent, &mpdu_len); 1240 1241 /* 1242 * HW issue: MSDU cont bit is set but reported MPDU length can fit 1243 * in to single buffer 1244 * 1245 * Increment error stats and avoid SG list creation 1246 */ 1247 if (last_nbuf) { 1248 DP_STATS_INC(soc, rx.err.msdu_continuation_err, 1); 1249 qdf_nbuf_pull_head(parent, RX_PKT_TLVS_LEN); 1250 return parent; 1251 } 1252 1253 /* 1254 * this is where we set the length of the fragments which are 1255 * associated to the parent nbuf. We iterate through the frag_list 1256 * till we hit the last_nbuf of the list. 1257 */ 1258 do { 1259 last_nbuf = dp_rx_adjust_nbuf_len(nbuf, &mpdu_len); 1260 qdf_nbuf_pull_head(nbuf, RX_PKT_TLVS_LEN); 1261 frag_list_len += qdf_nbuf_len(nbuf); 1262 1263 if (last_nbuf) { 1264 next = nbuf->next; 1265 nbuf->next = NULL; 1266 break; 1267 } 1268 1269 nbuf = nbuf->next; 1270 } while (!last_nbuf); 1271 1272 qdf_nbuf_set_rx_chfrag_start(nbuf, 0); 1273 qdf_nbuf_append_ext_list(parent, frag_list, frag_list_len); 1274 parent->next = next; 1275 1276 qdf_nbuf_pull_head(parent, RX_PKT_TLVS_LEN); 1277 return parent; 1278 } 1279 1280 #endif /* QCA_HOST_MODE_WIFI_DISABLED */ 1281 1282 #ifdef QCA_PEER_EXT_STATS 1283 /* 1284 * dp_rx_compute_tid_delay - Computer per TID delay stats 1285 * @peer: DP soc context 1286 * @nbuf: NBuffer 1287 * 1288 * Return: Void 1289 */ 1290 void dp_rx_compute_tid_delay(struct cdp_delay_tid_stats *stats, 1291 qdf_nbuf_t nbuf) 1292 { 1293 struct cdp_delay_rx_stats *rx_delay = &stats->rx_delay; 1294 uint32_t to_stack = qdf_nbuf_get_timedelta_ms(nbuf); 1295 1296 dp_hist_update_stats(&rx_delay->to_stack_delay, to_stack); 1297 } 1298 #endif /* QCA_PEER_EXT_STATS */ 1299 1300 /** 1301 * dp_rx_compute_delay() - Compute and fill in all timestamps 1302 * to pass in correct fields 1303 * 1304 * @vdev: pdev handle 1305 * @tx_desc: tx descriptor 1306 * @tid: tid value 1307 * Return: none 1308 */ 1309 void dp_rx_compute_delay(struct dp_vdev *vdev, qdf_nbuf_t nbuf) 1310 { 1311 uint8_t ring_id = QDF_NBUF_CB_RX_CTX_ID(nbuf); 1312 int64_t current_ts = qdf_ktime_to_ms(qdf_ktime_get()); 1313 uint32_t to_stack = qdf_nbuf_get_timedelta_ms(nbuf); 1314 uint8_t tid = qdf_nbuf_get_tid_val(nbuf); 1315 uint32_t interframe_delay = 1316 (uint32_t)(current_ts - vdev->prev_rx_deliver_tstamp); 1317 1318 dp_update_delay_stats(vdev->pdev, to_stack, tid, 1319 CDP_DELAY_STATS_REAP_STACK, ring_id); 1320 /* 1321 * Update interframe delay stats calculated at deliver_data_ol point. 1322 * Value of vdev->prev_rx_deliver_tstamp will be 0 for 1st frame, so 1323 * interframe delay will not be calculate correctly for 1st frame. 1324 * On the other side, this will help in avoiding extra per packet check 1325 * of vdev->prev_rx_deliver_tstamp. 1326 */ 1327 dp_update_delay_stats(vdev->pdev, interframe_delay, tid, 1328 CDP_DELAY_STATS_RX_INTERFRAME, ring_id); 1329 vdev->prev_rx_deliver_tstamp = current_ts; 1330 } 1331 1332 /** 1333 * dp_rx_drop_nbuf_list() - drop an nbuf list 1334 * @pdev: dp pdev reference 1335 * @buf_list: buffer list to be dropepd 1336 * 1337 * Return: int (number of bufs dropped) 1338 */ 1339 static inline int dp_rx_drop_nbuf_list(struct dp_pdev *pdev, 1340 qdf_nbuf_t buf_list) 1341 { 1342 struct cdp_tid_rx_stats *stats = NULL; 1343 uint8_t tid = 0, ring_id = 0; 1344 int num_dropped = 0; 1345 qdf_nbuf_t buf, next_buf; 1346 1347 buf = buf_list; 1348 while (buf) { 1349 ring_id = QDF_NBUF_CB_RX_CTX_ID(buf); 1350 next_buf = qdf_nbuf_queue_next(buf); 1351 tid = qdf_nbuf_get_tid_val(buf); 1352 if (qdf_likely(pdev)) { 1353 stats = &pdev->stats.tid_stats.tid_rx_stats[ring_id][tid]; 1354 stats->fail_cnt[INVALID_PEER_VDEV]++; 1355 stats->delivered_to_stack--; 1356 } 1357 qdf_nbuf_free(buf); 1358 buf = next_buf; 1359 num_dropped++; 1360 } 1361 1362 return num_dropped; 1363 } 1364 1365 #ifdef QCA_SUPPORT_WDS_EXTENDED 1366 /** 1367 * dp_rx_wds_ext() - Make different lists for 4-address and 3-address frames 1368 * @nbuf_head: skb list head 1369 * @vdev: vdev 1370 * @peer: peer 1371 * @peer_id: peer id of new received frame 1372 * @vdev_id: vdev_id of new received frame 1373 * 1374 * Return: true if peer_ids are different. 1375 */ 1376 static inline bool 1377 dp_rx_is_list_ready(qdf_nbuf_t nbuf_head, 1378 struct dp_vdev *vdev, 1379 struct dp_peer *peer, 1380 uint16_t peer_id, 1381 uint8_t vdev_id) 1382 { 1383 if (nbuf_head && peer && (peer->peer_id != peer_id)) 1384 return true; 1385 1386 return false; 1387 } 1388 1389 /** 1390 * dp_rx_deliver_to_stack_ext() - Deliver to netdev per sta 1391 * @soc: core txrx main context 1392 * @vdev: vdev 1393 * @peer: peer 1394 * @nbuf_head: skb list head 1395 * 1396 * Return: true if packet is delivered to netdev per STA. 1397 */ 1398 static inline bool 1399 dp_rx_deliver_to_stack_ext(struct dp_soc *soc, struct dp_vdev *vdev, 1400 struct dp_peer *peer, qdf_nbuf_t nbuf_head) 1401 { 1402 /* 1403 * When extended WDS is disabled, frames are sent to AP netdevice. 1404 */ 1405 if (qdf_likely(!vdev->wds_ext_enabled)) 1406 return false; 1407 1408 /* 1409 * There can be 2 cases: 1410 * 1. Send frame to parent netdev if its not for netdev per STA 1411 * 2. If frame is meant for netdev per STA: 1412 * a. Send frame to appropriate netdev using registered fp. 1413 * b. If fp is NULL, drop the frames. 1414 */ 1415 if (!peer->wds_ext.init) 1416 return false; 1417 1418 if (peer->osif_rx) 1419 peer->osif_rx(peer->wds_ext.osif_peer, nbuf_head); 1420 else 1421 dp_rx_drop_nbuf_list(vdev->pdev, nbuf_head); 1422 1423 return true; 1424 } 1425 1426 #else 1427 static inline bool 1428 dp_rx_is_list_ready(qdf_nbuf_t nbuf_head, 1429 struct dp_vdev *vdev, 1430 struct dp_peer *peer, 1431 uint16_t peer_id, 1432 uint8_t vdev_id) 1433 { 1434 if (nbuf_head && vdev && (vdev->vdev_id != vdev_id)) 1435 return true; 1436 1437 return false; 1438 } 1439 1440 static inline bool 1441 dp_rx_deliver_to_stack_ext(struct dp_soc *soc, struct dp_vdev *vdev, 1442 struct dp_peer *peer, qdf_nbuf_t nbuf_head) 1443 { 1444 return false; 1445 } 1446 #endif 1447 1448 #ifdef PEER_CACHE_RX_PKTS 1449 /** 1450 * dp_rx_flush_rx_cached() - flush cached rx frames 1451 * @peer: peer 1452 * @drop: flag to drop frames or forward to net stack 1453 * 1454 * Return: None 1455 */ 1456 void dp_rx_flush_rx_cached(struct dp_peer *peer, bool drop) 1457 { 1458 struct dp_peer_cached_bufq *bufqi; 1459 struct dp_rx_cached_buf *cache_buf = NULL; 1460 ol_txrx_rx_fp data_rx = NULL; 1461 int num_buff_elem; 1462 QDF_STATUS status; 1463 1464 if (qdf_atomic_inc_return(&peer->flush_in_progress) > 1) { 1465 qdf_atomic_dec(&peer->flush_in_progress); 1466 return; 1467 } 1468 1469 qdf_spin_lock_bh(&peer->peer_info_lock); 1470 if (peer->state >= OL_TXRX_PEER_STATE_CONN && peer->vdev->osif_rx) 1471 data_rx = peer->vdev->osif_rx; 1472 else 1473 drop = true; 1474 qdf_spin_unlock_bh(&peer->peer_info_lock); 1475 1476 bufqi = &peer->bufq_info; 1477 1478 qdf_spin_lock_bh(&bufqi->bufq_lock); 1479 qdf_list_remove_front(&bufqi->cached_bufq, 1480 (qdf_list_node_t **)&cache_buf); 1481 while (cache_buf) { 1482 num_buff_elem = QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST( 1483 cache_buf->buf); 1484 bufqi->entries -= num_buff_elem; 1485 qdf_spin_unlock_bh(&bufqi->bufq_lock); 1486 if (drop) { 1487 bufqi->dropped = dp_rx_drop_nbuf_list(peer->vdev->pdev, 1488 cache_buf->buf); 1489 } else { 1490 /* Flush the cached frames to OSIF DEV */ 1491 status = data_rx(peer->vdev->osif_vdev, cache_buf->buf); 1492 if (status != QDF_STATUS_SUCCESS) 1493 bufqi->dropped = dp_rx_drop_nbuf_list( 1494 peer->vdev->pdev, 1495 cache_buf->buf); 1496 } 1497 qdf_mem_free(cache_buf); 1498 cache_buf = NULL; 1499 qdf_spin_lock_bh(&bufqi->bufq_lock); 1500 qdf_list_remove_front(&bufqi->cached_bufq, 1501 (qdf_list_node_t **)&cache_buf); 1502 } 1503 qdf_spin_unlock_bh(&bufqi->bufq_lock); 1504 qdf_atomic_dec(&peer->flush_in_progress); 1505 } 1506 1507 /** 1508 * dp_rx_enqueue_rx() - cache rx frames 1509 * @peer: peer 1510 * @rx_buf_list: cache buffer list 1511 * 1512 * Return: None 1513 */ 1514 static QDF_STATUS 1515 dp_rx_enqueue_rx(struct dp_peer *peer, qdf_nbuf_t rx_buf_list) 1516 { 1517 struct dp_rx_cached_buf *cache_buf; 1518 struct dp_peer_cached_bufq *bufqi = &peer->bufq_info; 1519 int num_buff_elem; 1520 1521 dp_debug_rl("bufq->curr %d bufq->drops %d", bufqi->entries, 1522 bufqi->dropped); 1523 if (!peer->valid) { 1524 bufqi->dropped = dp_rx_drop_nbuf_list(peer->vdev->pdev, 1525 rx_buf_list); 1526 return QDF_STATUS_E_INVAL; 1527 } 1528 1529 qdf_spin_lock_bh(&bufqi->bufq_lock); 1530 if (bufqi->entries >= bufqi->thresh) { 1531 bufqi->dropped = dp_rx_drop_nbuf_list(peer->vdev->pdev, 1532 rx_buf_list); 1533 qdf_spin_unlock_bh(&bufqi->bufq_lock); 1534 return QDF_STATUS_E_RESOURCES; 1535 } 1536 qdf_spin_unlock_bh(&bufqi->bufq_lock); 1537 1538 num_buff_elem = QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(rx_buf_list); 1539 1540 cache_buf = qdf_mem_malloc_atomic(sizeof(*cache_buf)); 1541 if (!cache_buf) { 1542 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1543 "Failed to allocate buf to cache rx frames"); 1544 bufqi->dropped = dp_rx_drop_nbuf_list(peer->vdev->pdev, 1545 rx_buf_list); 1546 return QDF_STATUS_E_NOMEM; 1547 } 1548 1549 cache_buf->buf = rx_buf_list; 1550 1551 qdf_spin_lock_bh(&bufqi->bufq_lock); 1552 qdf_list_insert_back(&bufqi->cached_bufq, 1553 &cache_buf->node); 1554 bufqi->entries += num_buff_elem; 1555 qdf_spin_unlock_bh(&bufqi->bufq_lock); 1556 1557 return QDF_STATUS_SUCCESS; 1558 } 1559 1560 static inline 1561 bool dp_rx_is_peer_cache_bufq_supported(void) 1562 { 1563 return true; 1564 } 1565 #else 1566 static inline 1567 bool dp_rx_is_peer_cache_bufq_supported(void) 1568 { 1569 return false; 1570 } 1571 1572 static inline QDF_STATUS 1573 dp_rx_enqueue_rx(struct dp_peer *peer, qdf_nbuf_t rx_buf_list) 1574 { 1575 return QDF_STATUS_SUCCESS; 1576 } 1577 #endif 1578 1579 #ifndef DELIVERY_TO_STACK_STATUS_CHECK 1580 /** 1581 * dp_rx_check_delivery_to_stack() - Deliver pkts to network 1582 * using the appropriate call back functions. 1583 * @soc: soc 1584 * @vdev: vdev 1585 * @peer: peer 1586 * @nbuf_head: skb list head 1587 * @nbuf_tail: skb list tail 1588 * 1589 * Return: None 1590 */ 1591 static void dp_rx_check_delivery_to_stack(struct dp_soc *soc, 1592 struct dp_vdev *vdev, 1593 struct dp_peer *peer, 1594 qdf_nbuf_t nbuf_head) 1595 { 1596 if (qdf_unlikely(dp_rx_deliver_to_stack_ext(soc, vdev, 1597 peer, nbuf_head))) 1598 return; 1599 1600 /* Function pointer initialized only when FISA is enabled */ 1601 if (vdev->osif_fisa_rx) 1602 /* on failure send it via regular path */ 1603 vdev->osif_fisa_rx(soc, vdev, nbuf_head); 1604 else 1605 vdev->osif_rx(vdev->osif_vdev, nbuf_head); 1606 } 1607 1608 #else 1609 /** 1610 * dp_rx_check_delivery_to_stack() - Deliver pkts to network 1611 * using the appropriate call back functions. 1612 * @soc: soc 1613 * @vdev: vdev 1614 * @peer: peer 1615 * @nbuf_head: skb list head 1616 * @nbuf_tail: skb list tail 1617 * 1618 * Check the return status of the call back function and drop 1619 * the packets if the return status indicates a failure. 1620 * 1621 * Return: None 1622 */ 1623 static void dp_rx_check_delivery_to_stack(struct dp_soc *soc, 1624 struct dp_vdev *vdev, 1625 struct dp_peer *peer, 1626 qdf_nbuf_t nbuf_head) 1627 { 1628 int num_nbuf = 0; 1629 QDF_STATUS ret_val = QDF_STATUS_E_FAILURE; 1630 1631 /* Function pointer initialized only when FISA is enabled */ 1632 if (vdev->osif_fisa_rx) 1633 /* on failure send it via regular path */ 1634 ret_val = vdev->osif_fisa_rx(soc, vdev, nbuf_head); 1635 else if (vdev->osif_rx) 1636 ret_val = vdev->osif_rx(vdev->osif_vdev, nbuf_head); 1637 1638 if (!QDF_IS_STATUS_SUCCESS(ret_val)) { 1639 num_nbuf = dp_rx_drop_nbuf_list(vdev->pdev, nbuf_head); 1640 DP_STATS_INC(soc, rx.err.rejected, num_nbuf); 1641 if (peer) 1642 DP_STATS_DEC(peer, rx.to_stack.num, num_nbuf); 1643 } 1644 } 1645 #endif /* ifdef DELIVERY_TO_STACK_STATUS_CHECK */ 1646 1647 void dp_rx_deliver_to_stack(struct dp_soc *soc, 1648 struct dp_vdev *vdev, 1649 struct dp_peer *peer, 1650 qdf_nbuf_t nbuf_head, 1651 qdf_nbuf_t nbuf_tail) 1652 { 1653 int num_nbuf = 0; 1654 1655 if (qdf_unlikely(!vdev || vdev->delete.pending)) { 1656 num_nbuf = dp_rx_drop_nbuf_list(NULL, nbuf_head); 1657 /* 1658 * This is a special case where vdev is invalid, 1659 * so we cannot know the pdev to which this packet 1660 * belonged. Hence we update the soc rx error stats. 1661 */ 1662 DP_STATS_INC(soc, rx.err.invalid_vdev, num_nbuf); 1663 return; 1664 } 1665 1666 /* 1667 * highly unlikely to have a vdev without a registered rx 1668 * callback function. if so let us free the nbuf_list. 1669 */ 1670 if (qdf_unlikely(!vdev->osif_rx)) { 1671 if (peer && dp_rx_is_peer_cache_bufq_supported()) { 1672 dp_rx_enqueue_rx(peer, nbuf_head); 1673 } else { 1674 num_nbuf = dp_rx_drop_nbuf_list(vdev->pdev, 1675 nbuf_head); 1676 DP_STATS_DEC(peer, rx.to_stack.num, num_nbuf); 1677 } 1678 return; 1679 } 1680 1681 if (qdf_unlikely(vdev->rx_decap_type == htt_cmn_pkt_type_raw) || 1682 (vdev->rx_decap_type == htt_cmn_pkt_type_native_wifi)) { 1683 vdev->osif_rsim_rx_decap(vdev->osif_vdev, &nbuf_head, 1684 &nbuf_tail, peer->mac_addr.raw); 1685 } 1686 1687 dp_rx_check_delivery_to_stack(soc, vdev, peer, nbuf_head); 1688 } 1689 1690 #ifndef QCA_HOST_MODE_WIFI_DISABLED 1691 1692 /** 1693 * dp_rx_cksum_offload() - set the nbuf checksum as defined by hardware. 1694 * @nbuf: pointer to the first msdu of an amsdu. 1695 * @rx_tlv_hdr: pointer to the start of RX TLV headers. 1696 * 1697 * The ipsumed field of the skb is set based on whether HW validated the 1698 * IP/TCP/UDP checksum. 1699 * 1700 * Return: void 1701 */ 1702 static inline void dp_rx_cksum_offload(struct dp_pdev *pdev, 1703 qdf_nbuf_t nbuf, 1704 uint8_t *rx_tlv_hdr) 1705 { 1706 qdf_nbuf_rx_cksum_t cksum = {0}; 1707 bool ip_csum_err = hal_rx_attn_ip_cksum_fail_get(rx_tlv_hdr); 1708 bool tcp_udp_csum_er = hal_rx_attn_tcp_udp_cksum_fail_get(rx_tlv_hdr); 1709 1710 if (qdf_likely(!ip_csum_err && !tcp_udp_csum_er)) { 1711 cksum.l4_result = QDF_NBUF_RX_CKSUM_TCP_UDP_UNNECESSARY; 1712 qdf_nbuf_set_rx_cksum(nbuf, &cksum); 1713 } else { 1714 DP_STATS_INCC(pdev, err.ip_csum_err, 1, ip_csum_err); 1715 DP_STATS_INCC(pdev, err.tcp_udp_csum_err, 1, tcp_udp_csum_er); 1716 } 1717 } 1718 1719 #ifdef VDEV_PEER_PROTOCOL_COUNT 1720 #define dp_rx_msdu_stats_update_prot_cnts(vdev_hdl, nbuf, peer) \ 1721 { \ 1722 qdf_nbuf_t nbuf_local; \ 1723 struct dp_peer *peer_local; \ 1724 struct dp_vdev *vdev_local = vdev_hdl; \ 1725 do { \ 1726 if (qdf_likely(!((vdev_local)->peer_protocol_count_track))) \ 1727 break; \ 1728 nbuf_local = nbuf; \ 1729 peer_local = peer; \ 1730 if (qdf_unlikely(qdf_nbuf_is_frag((nbuf_local)))) \ 1731 break; \ 1732 else if (qdf_unlikely(qdf_nbuf_is_raw_frame((nbuf_local)))) \ 1733 break; \ 1734 dp_vdev_peer_stats_update_protocol_cnt((vdev_local), \ 1735 (nbuf_local), \ 1736 (peer_local), 0, 1); \ 1737 } while (0); \ 1738 } 1739 #else 1740 #define dp_rx_msdu_stats_update_prot_cnts(vdev_hdl, nbuf, peer) 1741 #endif 1742 1743 /** 1744 * dp_rx_msdu_stats_update() - update per msdu stats. 1745 * @soc: core txrx main context 1746 * @nbuf: pointer to the first msdu of an amsdu. 1747 * @rx_tlv_hdr: pointer to the start of RX TLV headers. 1748 * @peer: pointer to the peer object. 1749 * @ring_id: reo dest ring number on which pkt is reaped. 1750 * @tid_stats: per tid rx stats. 1751 * 1752 * update all the per msdu stats for that nbuf. 1753 * Return: void 1754 */ 1755 static void dp_rx_msdu_stats_update(struct dp_soc *soc, 1756 qdf_nbuf_t nbuf, 1757 uint8_t *rx_tlv_hdr, 1758 struct dp_peer *peer, 1759 uint8_t ring_id, 1760 struct cdp_tid_rx_stats *tid_stats) 1761 { 1762 bool is_ampdu, is_not_amsdu; 1763 uint32_t sgi, mcs, tid, nss, bw, reception_type, pkt_type; 1764 struct dp_vdev *vdev = peer->vdev; 1765 qdf_ether_header_t *eh; 1766 uint16_t msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf); 1767 1768 dp_rx_msdu_stats_update_prot_cnts(vdev, nbuf, peer); 1769 is_not_amsdu = qdf_nbuf_is_rx_chfrag_start(nbuf) & 1770 qdf_nbuf_is_rx_chfrag_end(nbuf); 1771 1772 DP_STATS_INC_PKT(peer, rx.rcvd_reo[ring_id], 1, msdu_len); 1773 DP_STATS_INCC(peer, rx.non_amsdu_cnt, 1, is_not_amsdu); 1774 DP_STATS_INCC(peer, rx.amsdu_cnt, 1, !is_not_amsdu); 1775 DP_STATS_INCC(peer, rx.rx_retries, 1, qdf_nbuf_is_rx_retry_flag(nbuf)); 1776 1777 tid_stats->msdu_cnt++; 1778 if (qdf_unlikely(qdf_nbuf_is_da_mcbc(nbuf) && 1779 (vdev->rx_decap_type == htt_cmn_pkt_type_ethernet))) { 1780 eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf); 1781 DP_STATS_INC_PKT(peer, rx.multicast, 1, msdu_len); 1782 tid_stats->mcast_msdu_cnt++; 1783 if (QDF_IS_ADDR_BROADCAST(eh->ether_dhost)) { 1784 DP_STATS_INC_PKT(peer, rx.bcast, 1, msdu_len); 1785 tid_stats->bcast_msdu_cnt++; 1786 } 1787 } 1788 1789 /* 1790 * currently we can return from here as we have similar stats 1791 * updated at per ppdu level instead of msdu level 1792 */ 1793 if (!soc->process_rx_status) 1794 return; 1795 1796 is_ampdu = hal_rx_mpdu_info_ampdu_flag_get(rx_tlv_hdr); 1797 DP_STATS_INCC(peer, rx.ampdu_cnt, 1, is_ampdu); 1798 DP_STATS_INCC(peer, rx.non_ampdu_cnt, 1, !(is_ampdu)); 1799 1800 sgi = hal_rx_msdu_start_sgi_get(rx_tlv_hdr); 1801 mcs = hal_rx_msdu_start_rate_mcs_get(rx_tlv_hdr); 1802 tid = qdf_nbuf_get_tid_val(nbuf); 1803 bw = hal_rx_msdu_start_bw_get(rx_tlv_hdr); 1804 reception_type = hal_rx_msdu_start_reception_type_get(soc->hal_soc, 1805 rx_tlv_hdr); 1806 nss = hal_rx_msdu_start_nss_get(soc->hal_soc, rx_tlv_hdr); 1807 pkt_type = hal_rx_msdu_start_get_pkt_type(rx_tlv_hdr); 1808 1809 DP_STATS_INCC(peer, rx.rx_mpdu_cnt[mcs], 1, 1810 ((mcs < MAX_MCS) && QDF_NBUF_CB_RX_CHFRAG_START(nbuf))); 1811 DP_STATS_INCC(peer, rx.rx_mpdu_cnt[MAX_MCS - 1], 1, 1812 ((mcs >= MAX_MCS) && QDF_NBUF_CB_RX_CHFRAG_START(nbuf))); 1813 DP_STATS_INC(peer, rx.bw[bw], 1); 1814 /* 1815 * only if nss > 0 and pkt_type is 11N/AC/AX, 1816 * then increase index [nss - 1] in array counter. 1817 */ 1818 if (nss > 0 && (pkt_type == DOT11_N || 1819 pkt_type == DOT11_AC || 1820 pkt_type == DOT11_AX)) 1821 DP_STATS_INC(peer, rx.nss[nss - 1], 1); 1822 1823 DP_STATS_INC(peer, rx.sgi_count[sgi], 1); 1824 DP_STATS_INCC(peer, rx.err.mic_err, 1, 1825 hal_rx_mpdu_end_mic_err_get(rx_tlv_hdr)); 1826 DP_STATS_INCC(peer, rx.err.decrypt_err, 1, 1827 hal_rx_mpdu_end_decrypt_err_get(rx_tlv_hdr)); 1828 1829 DP_STATS_INC(peer, rx.wme_ac_type[TID_TO_WME_AC(tid)], 1); 1830 DP_STATS_INC(peer, rx.reception_type[reception_type], 1); 1831 1832 DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1, 1833 ((mcs >= MAX_MCS_11A) && (pkt_type == DOT11_A))); 1834 DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[mcs], 1, 1835 ((mcs <= MAX_MCS_11A) && (pkt_type == DOT11_A))); 1836 DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1, 1837 ((mcs >= MAX_MCS_11B) && (pkt_type == DOT11_B))); 1838 DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[mcs], 1, 1839 ((mcs <= MAX_MCS_11B) && (pkt_type == DOT11_B))); 1840 DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1, 1841 ((mcs >= MAX_MCS_11A) && (pkt_type == DOT11_N))); 1842 DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[mcs], 1, 1843 ((mcs <= MAX_MCS_11A) && (pkt_type == DOT11_N))); 1844 DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1, 1845 ((mcs >= MAX_MCS_11AC) && (pkt_type == DOT11_AC))); 1846 DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[mcs], 1, 1847 ((mcs <= MAX_MCS_11AC) && (pkt_type == DOT11_AC))); 1848 DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1, 1849 ((mcs >= MAX_MCS) && (pkt_type == DOT11_AX))); 1850 DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[mcs], 1, 1851 ((mcs < MAX_MCS) && (pkt_type == DOT11_AX))); 1852 1853 if ((soc->process_rx_status) && 1854 hal_rx_attn_first_mpdu_get(rx_tlv_hdr)) { 1855 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE 1856 if (!vdev->pdev) 1857 return; 1858 1859 dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, vdev->pdev->soc, 1860 &peer->stats, peer->peer_id, 1861 UPDATE_PEER_STATS, 1862 vdev->pdev->pdev_id); 1863 #endif 1864 1865 } 1866 } 1867 1868 static inline bool is_sa_da_idx_valid(struct dp_soc *soc, 1869 uint8_t *rx_tlv_hdr, 1870 qdf_nbuf_t nbuf, 1871 struct hal_rx_msdu_metadata msdu_info) 1872 { 1873 if ((qdf_nbuf_is_sa_valid(nbuf) && 1874 (msdu_info.sa_idx > wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx))) || 1875 (!qdf_nbuf_is_da_mcbc(nbuf) && 1876 qdf_nbuf_is_da_valid(nbuf) && 1877 (msdu_info.da_idx > wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx)))) 1878 return false; 1879 1880 return true; 1881 } 1882 1883 #ifndef WDS_VENDOR_EXTENSION 1884 int dp_wds_rx_policy_check(uint8_t *rx_tlv_hdr, 1885 struct dp_vdev *vdev, 1886 struct dp_peer *peer) 1887 { 1888 return 1; 1889 } 1890 #endif 1891 1892 #ifdef RX_DESC_DEBUG_CHECK 1893 /** 1894 * dp_rx_desc_nbuf_sanity_check - Add sanity check to catch REO rx_desc paddr 1895 * corruption 1896 * 1897 * @ring_desc: REO ring descriptor 1898 * @rx_desc: Rx descriptor 1899 * 1900 * Return: NONE 1901 */ 1902 static inline 1903 QDF_STATUS dp_rx_desc_nbuf_sanity_check(hal_ring_desc_t ring_desc, 1904 struct dp_rx_desc *rx_desc) 1905 { 1906 struct hal_buf_info hbi; 1907 1908 hal_rx_reo_buf_paddr_get(ring_desc, &hbi); 1909 /* Sanity check for possible buffer paddr corruption */ 1910 if (dp_rx_desc_paddr_sanity_check(rx_desc, (&hbi)->paddr)) 1911 return QDF_STATUS_SUCCESS; 1912 1913 return QDF_STATUS_E_FAILURE; 1914 } 1915 #else 1916 static inline 1917 QDF_STATUS dp_rx_desc_nbuf_sanity_check(hal_ring_desc_t ring_desc, 1918 struct dp_rx_desc *rx_desc) 1919 { 1920 return QDF_STATUS_SUCCESS; 1921 } 1922 #endif 1923 1924 #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT 1925 static inline 1926 bool dp_rx_reap_loop_pkt_limit_hit(struct dp_soc *soc, int num_reaped) 1927 { 1928 bool limit_hit = false; 1929 struct wlan_cfg_dp_soc_ctxt *cfg = soc->wlan_cfg_ctx; 1930 1931 limit_hit = 1932 (num_reaped >= cfg->rx_reap_loop_pkt_limit) ? true : false; 1933 1934 if (limit_hit) 1935 DP_STATS_INC(soc, rx.reap_loop_pkt_limit_hit, 1) 1936 1937 return limit_hit; 1938 } 1939 1940 static inline bool dp_rx_enable_eol_data_check(struct dp_soc *soc) 1941 { 1942 return soc->wlan_cfg_ctx->rx_enable_eol_data_check; 1943 } 1944 1945 #else 1946 static inline 1947 bool dp_rx_reap_loop_pkt_limit_hit(struct dp_soc *soc, int num_reaped) 1948 { 1949 return false; 1950 } 1951 1952 static inline bool dp_rx_enable_eol_data_check(struct dp_soc *soc) 1953 { 1954 return false; 1955 } 1956 1957 #endif /* WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT */ 1958 1959 #ifdef DP_RX_PKT_NO_PEER_DELIVER 1960 /** 1961 * dp_rx_deliver_to_stack_no_peer() - try deliver rx data even if 1962 * no corresbonding peer found 1963 * @soc: core txrx main context 1964 * @nbuf: pkt skb pointer 1965 * 1966 * This function will try to deliver some RX special frames to stack 1967 * even there is no peer matched found. for instance, LFR case, some 1968 * eapol data will be sent to host before peer_map done. 1969 * 1970 * Return: None 1971 */ 1972 static 1973 void dp_rx_deliver_to_stack_no_peer(struct dp_soc *soc, qdf_nbuf_t nbuf) 1974 { 1975 uint16_t peer_id; 1976 uint8_t vdev_id; 1977 struct dp_vdev *vdev = NULL; 1978 uint32_t l2_hdr_offset = 0; 1979 uint16_t msdu_len = 0; 1980 uint32_t pkt_len = 0; 1981 uint8_t *rx_tlv_hdr; 1982 uint32_t frame_mask = FRAME_MASK_IPV4_ARP | FRAME_MASK_IPV4_DHCP | 1983 FRAME_MASK_IPV4_EAPOL | FRAME_MASK_IPV6_DHCP; 1984 1985 peer_id = QDF_NBUF_CB_RX_PEER_ID(nbuf); 1986 if (peer_id > soc->max_peers) 1987 goto deliver_fail; 1988 1989 vdev_id = QDF_NBUF_CB_RX_VDEV_ID(nbuf); 1990 vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_RX); 1991 if (!vdev || vdev->delete.pending || !vdev->osif_rx) 1992 goto deliver_fail; 1993 1994 if (qdf_unlikely(qdf_nbuf_is_frag(nbuf))) 1995 goto deliver_fail; 1996 1997 rx_tlv_hdr = qdf_nbuf_data(nbuf); 1998 l2_hdr_offset = 1999 hal_rx_msdu_end_l3_hdr_padding_get(soc->hal_soc, rx_tlv_hdr); 2000 2001 msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf); 2002 pkt_len = msdu_len + l2_hdr_offset + RX_PKT_TLVS_LEN; 2003 QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(nbuf) = 1; 2004 2005 qdf_nbuf_set_pktlen(nbuf, pkt_len); 2006 qdf_nbuf_pull_head(nbuf, 2007 RX_PKT_TLVS_LEN + 2008 l2_hdr_offset); 2009 2010 if (dp_rx_is_special_frame(nbuf, frame_mask)) { 2011 qdf_nbuf_set_exc_frame(nbuf, 1); 2012 if (QDF_STATUS_SUCCESS != 2013 vdev->osif_rx(vdev->osif_vdev, nbuf)) 2014 goto deliver_fail; 2015 DP_STATS_INC(soc, rx.err.pkt_delivered_no_peer, 1); 2016 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_RX); 2017 return; 2018 } 2019 2020 deliver_fail: 2021 DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1, 2022 QDF_NBUF_CB_RX_PKT_LEN(nbuf)); 2023 qdf_nbuf_free(nbuf); 2024 if (vdev) 2025 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_RX); 2026 } 2027 #else 2028 static inline 2029 void dp_rx_deliver_to_stack_no_peer(struct dp_soc *soc, qdf_nbuf_t nbuf) 2030 { 2031 DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1, 2032 QDF_NBUF_CB_RX_PKT_LEN(nbuf)); 2033 qdf_nbuf_free(nbuf); 2034 } 2035 #endif 2036 2037 /** 2038 * dp_rx_srng_get_num_pending() - get number of pending entries 2039 * @hal_soc: hal soc opaque pointer 2040 * @hal_ring: opaque pointer to the HAL Rx Ring 2041 * @num_entries: number of entries in the hal_ring. 2042 * @near_full: pointer to a boolean. This is set if ring is near full. 2043 * 2044 * The function returns the number of entries in a destination ring which are 2045 * yet to be reaped. The function also checks if the ring is near full. 2046 * If more than half of the ring needs to be reaped, the ring is considered 2047 * approaching full. 2048 * The function useses hal_srng_dst_num_valid_locked to get the number of valid 2049 * entries. It should not be called within a SRNG lock. HW pointer value is 2050 * synced into cached_hp. 2051 * 2052 * Return: Number of pending entries if any 2053 */ 2054 static 2055 uint32_t dp_rx_srng_get_num_pending(hal_soc_handle_t hal_soc, 2056 hal_ring_handle_t hal_ring_hdl, 2057 uint32_t num_entries, 2058 bool *near_full) 2059 { 2060 uint32_t num_pending = 0; 2061 2062 num_pending = hal_srng_dst_num_valid_locked(hal_soc, 2063 hal_ring_hdl, 2064 true); 2065 2066 if (num_entries && (num_pending >= num_entries >> 1)) 2067 *near_full = true; 2068 else 2069 *near_full = false; 2070 2071 return num_pending; 2072 } 2073 2074 #endif /* QCA_HOST_MODE_WIFI_DISABLED */ 2075 2076 #ifdef WLAN_SUPPORT_RX_FISA 2077 void dp_rx_skip_tlvs(qdf_nbuf_t nbuf, uint32_t l3_padding) 2078 { 2079 QDF_NBUF_CB_RX_PACKET_L3_HDR_PAD(nbuf) = l3_padding; 2080 qdf_nbuf_pull_head(nbuf, l3_padding + RX_PKT_TLVS_LEN); 2081 } 2082 2083 /** 2084 * dp_rx_set_hdr_pad() - set l3 padding in nbuf cb 2085 * @nbuf: pkt skb pointer 2086 * @l3_padding: l3 padding 2087 * 2088 * Return: None 2089 */ 2090 static inline 2091 void dp_rx_set_hdr_pad(qdf_nbuf_t nbuf, uint32_t l3_padding) 2092 { 2093 QDF_NBUF_CB_RX_PACKET_L3_HDR_PAD(nbuf) = l3_padding; 2094 } 2095 #else 2096 void dp_rx_skip_tlvs(qdf_nbuf_t nbuf, uint32_t l3_padding) 2097 { 2098 qdf_nbuf_pull_head(nbuf, l3_padding + RX_PKT_TLVS_LEN); 2099 } 2100 2101 static inline 2102 void dp_rx_set_hdr_pad(qdf_nbuf_t nbuf, uint32_t l3_padding) 2103 { 2104 } 2105 #endif 2106 2107 #ifndef QCA_HOST_MODE_WIFI_DISABLED 2108 2109 #ifdef DP_RX_DROP_RAW_FRM 2110 /** 2111 * dp_rx_is_raw_frame_dropped() - if raw frame nbuf, free and drop 2112 * @nbuf: pkt skb pointer 2113 * 2114 * Return: true - raw frame, dropped 2115 * false - not raw frame, do nothing 2116 */ 2117 static inline 2118 bool dp_rx_is_raw_frame_dropped(qdf_nbuf_t nbuf) 2119 { 2120 if (qdf_nbuf_is_raw_frame(nbuf)) { 2121 qdf_nbuf_free(nbuf); 2122 return true; 2123 } 2124 2125 return false; 2126 } 2127 #else 2128 static inline 2129 bool dp_rx_is_raw_frame_dropped(qdf_nbuf_t nbuf) 2130 { 2131 return false; 2132 } 2133 #endif 2134 2135 #ifdef WLAN_FEATURE_DP_RX_RING_HISTORY 2136 /** 2137 * dp_rx_ring_record_entry() - Record an entry into the rx ring history. 2138 * @soc: Datapath soc structure 2139 * @ring_num: REO ring number 2140 * @ring_desc: REO ring descriptor 2141 * 2142 * Returns: None 2143 */ 2144 static inline void 2145 dp_rx_ring_record_entry(struct dp_soc *soc, uint8_t ring_num, 2146 hal_ring_desc_t ring_desc) 2147 { 2148 struct dp_buf_info_record *record; 2149 uint8_t rbm; 2150 struct hal_buf_info hbi; 2151 uint32_t idx; 2152 2153 if (qdf_unlikely(!soc->rx_ring_history[ring_num])) 2154 return; 2155 2156 hal_rx_reo_buf_paddr_get(ring_desc, &hbi); 2157 rbm = hal_rx_ret_buf_manager_get(ring_desc); 2158 2159 idx = dp_history_get_next_index(&soc->rx_ring_history[ring_num]->index, 2160 DP_RX_HIST_MAX); 2161 2162 /* No NULL check needed for record since its an array */ 2163 record = &soc->rx_ring_history[ring_num]->entry[idx]; 2164 2165 record->timestamp = qdf_get_log_timestamp(); 2166 record->hbi.paddr = hbi.paddr; 2167 record->hbi.sw_cookie = hbi.sw_cookie; 2168 record->hbi.rbm = rbm; 2169 } 2170 #else 2171 static inline void 2172 dp_rx_ring_record_entry(struct dp_soc *soc, uint8_t ring_num, 2173 hal_ring_desc_t ring_desc) 2174 { 2175 } 2176 #endif 2177 2178 #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR 2179 /** 2180 * dp_rx_update_stats() - Update soc level rx packet count 2181 * @soc: DP soc handle 2182 * @nbuf: nbuf received 2183 * 2184 * Returns: none 2185 */ 2186 static inline void dp_rx_update_stats(struct dp_soc *soc, 2187 qdf_nbuf_t nbuf) 2188 { 2189 DP_STATS_INC_PKT(soc, rx.ingress, 1, 2190 QDF_NBUF_CB_RX_PKT_LEN(nbuf)); 2191 } 2192 #else 2193 static inline void dp_rx_update_stats(struct dp_soc *soc, 2194 qdf_nbuf_t nbuf) 2195 { 2196 } 2197 #endif 2198 2199 #ifdef WLAN_FEATURE_PKT_CAPTURE_LITHIUM 2200 /** 2201 * dp_rx_deliver_to_pkt_capture() - deliver rx packet to packet capture 2202 * @soc : dp_soc handle 2203 * @pdev: dp_pdev handle 2204 * @peer_id: peer_id of the peer for which completion came 2205 * @ppdu_id: ppdu_id 2206 * @netbuf: Buffer pointer 2207 * 2208 * This function is used to deliver rx packet to packet capture 2209 */ 2210 void dp_rx_deliver_to_pkt_capture(struct dp_soc *soc, struct dp_pdev *pdev, 2211 uint16_t peer_id, uint32_t is_offload, 2212 qdf_nbuf_t netbuf) 2213 { 2214 dp_wdi_event_handler(WDI_EVENT_PKT_CAPTURE_RX_DATA, soc, netbuf, 2215 peer_id, is_offload, pdev->pdev_id); 2216 } 2217 2218 void dp_rx_deliver_to_pkt_capture_no_peer(struct dp_soc *soc, qdf_nbuf_t nbuf, 2219 uint32_t is_offload) 2220 { 2221 uint16_t msdu_len = 0; 2222 uint16_t peer_id, vdev_id; 2223 uint32_t pkt_len = 0; 2224 uint8_t *rx_tlv_hdr; 2225 uint32_t l2_hdr_offset = 0; 2226 struct hal_rx_msdu_metadata msdu_metadata; 2227 2228 peer_id = QDF_NBUF_CB_RX_PEER_ID(nbuf); 2229 vdev_id = QDF_NBUF_CB_RX_VDEV_ID(nbuf); 2230 rx_tlv_hdr = qdf_nbuf_data(nbuf); 2231 hal_rx_msdu_metadata_get(soc->hal_soc, rx_tlv_hdr, &msdu_metadata); 2232 msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf); 2233 pkt_len = msdu_len + msdu_metadata.l3_hdr_pad + 2234 RX_PKT_TLVS_LEN; 2235 l2_hdr_offset = 2236 hal_rx_msdu_end_l3_hdr_padding_get(soc->hal_soc, rx_tlv_hdr); 2237 2238 qdf_nbuf_set_pktlen(nbuf, pkt_len); 2239 dp_rx_skip_tlvs(nbuf, msdu_metadata.l3_hdr_pad); 2240 2241 dp_wdi_event_handler(WDI_EVENT_PKT_CAPTURE_RX_DATA, soc, nbuf, 2242 HTT_INVALID_VDEV, is_offload, 0); 2243 } 2244 2245 #endif 2246 2247 /** 2248 * dp_rx_process() - Brain of the Rx processing functionality 2249 * Called from the bottom half (tasklet/NET_RX_SOFTIRQ) 2250 * @int_ctx: per interrupt context 2251 * @hal_ring: opaque pointer to the HAL Rx Ring, which will be serviced 2252 * @reo_ring_num: ring number (0, 1, 2 or 3) of the reo ring. 2253 * @quota: No. of units (packets) that can be serviced in one shot. 2254 * 2255 * This function implements the core of Rx functionality. This is 2256 * expected to handle only non-error frames. 2257 * 2258 * Return: uint32_t: No. of elements processed 2259 */ 2260 uint32_t dp_rx_process(struct dp_intr *int_ctx, hal_ring_handle_t hal_ring_hdl, 2261 uint8_t reo_ring_num, uint32_t quota) 2262 { 2263 hal_ring_desc_t ring_desc; 2264 hal_soc_handle_t hal_soc; 2265 struct dp_rx_desc *rx_desc = NULL; 2266 qdf_nbuf_t nbuf, next; 2267 bool near_full; 2268 union dp_rx_desc_list_elem_t *head[MAX_PDEV_CNT]; 2269 union dp_rx_desc_list_elem_t *tail[MAX_PDEV_CNT]; 2270 uint32_t num_pending; 2271 uint32_t rx_bufs_used = 0, rx_buf_cookie; 2272 uint16_t msdu_len = 0; 2273 uint16_t peer_id; 2274 uint8_t vdev_id; 2275 struct dp_peer *peer; 2276 struct dp_vdev *vdev; 2277 uint32_t pkt_len = 0; 2278 struct hal_rx_mpdu_desc_info mpdu_desc_info; 2279 struct hal_rx_msdu_desc_info msdu_desc_info; 2280 enum hal_reo_error_status error; 2281 uint32_t peer_mdata; 2282 uint8_t *rx_tlv_hdr; 2283 uint32_t rx_bufs_reaped[MAX_PDEV_CNT]; 2284 uint8_t mac_id = 0; 2285 struct dp_pdev *rx_pdev; 2286 struct dp_srng *dp_rxdma_srng; 2287 struct rx_desc_pool *rx_desc_pool; 2288 struct dp_soc *soc = int_ctx->soc; 2289 uint8_t ring_id = 0; 2290 uint8_t core_id = 0; 2291 struct cdp_tid_rx_stats *tid_stats; 2292 qdf_nbuf_t nbuf_head; 2293 qdf_nbuf_t nbuf_tail; 2294 qdf_nbuf_t deliver_list_head; 2295 qdf_nbuf_t deliver_list_tail; 2296 uint32_t num_rx_bufs_reaped = 0; 2297 uint32_t intr_id; 2298 struct hif_opaque_softc *scn; 2299 int32_t tid = 0; 2300 bool is_prev_msdu_last = true; 2301 uint32_t num_entries_avail = 0; 2302 uint32_t rx_ol_pkt_cnt = 0; 2303 uint32_t num_entries = 0; 2304 struct hal_rx_msdu_metadata msdu_metadata; 2305 QDF_STATUS status; 2306 qdf_nbuf_t ebuf_head; 2307 qdf_nbuf_t ebuf_tail; 2308 uint8_t pkt_capture_offload = 0; 2309 2310 DP_HIST_INIT(); 2311 2312 qdf_assert_always(soc && hal_ring_hdl); 2313 hal_soc = soc->hal_soc; 2314 qdf_assert_always(hal_soc); 2315 2316 scn = soc->hif_handle; 2317 hif_pm_runtime_mark_dp_rx_busy(scn); 2318 intr_id = int_ctx->dp_intr_id; 2319 num_entries = hal_srng_get_num_entries(hal_soc, hal_ring_hdl); 2320 2321 more_data: 2322 /* reset local variables here to be re-used in the function */ 2323 nbuf_head = NULL; 2324 nbuf_tail = NULL; 2325 deliver_list_head = NULL; 2326 deliver_list_tail = NULL; 2327 peer = NULL; 2328 vdev = NULL; 2329 num_rx_bufs_reaped = 0; 2330 ebuf_head = NULL; 2331 ebuf_tail = NULL; 2332 2333 qdf_mem_zero(rx_bufs_reaped, sizeof(rx_bufs_reaped)); 2334 qdf_mem_zero(&mpdu_desc_info, sizeof(mpdu_desc_info)); 2335 qdf_mem_zero(&msdu_desc_info, sizeof(msdu_desc_info)); 2336 qdf_mem_zero(head, sizeof(head)); 2337 qdf_mem_zero(tail, sizeof(tail)); 2338 2339 if (qdf_unlikely(dp_rx_srng_access_start(int_ctx, soc, hal_ring_hdl))) { 2340 2341 /* 2342 * Need API to convert from hal_ring pointer to 2343 * Ring Type / Ring Id combo 2344 */ 2345 DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1); 2346 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 2347 FL("HAL RING Access Failed -- %pK"), hal_ring_hdl); 2348 goto done; 2349 } 2350 2351 /* 2352 * start reaping the buffers from reo ring and queue 2353 * them in per vdev queue. 2354 * Process the received pkts in a different per vdev loop. 2355 */ 2356 while (qdf_likely(quota && 2357 (ring_desc = hal_srng_dst_peek(hal_soc, 2358 hal_ring_hdl)))) { 2359 2360 error = HAL_RX_ERROR_STATUS_GET(ring_desc); 2361 ring_id = hal_srng_ring_id_get(hal_ring_hdl); 2362 2363 if (qdf_unlikely(error == HAL_REO_ERROR_DETECTED)) { 2364 dp_rx_err("%pK: HAL RING 0x%pK:error %d", 2365 soc, hal_ring_hdl, error); 2366 DP_STATS_INC(soc, rx.err.hal_reo_error[ring_id], 1); 2367 /* Don't know how to deal with this -- assert */ 2368 qdf_assert(0); 2369 } 2370 2371 dp_rx_ring_record_entry(soc, reo_ring_num, ring_desc); 2372 rx_buf_cookie = HAL_RX_REO_BUF_COOKIE_GET(ring_desc); 2373 status = dp_rx_cookie_check_and_invalidate(ring_desc); 2374 if (qdf_unlikely(QDF_IS_STATUS_ERROR(status))) { 2375 DP_STATS_INC(soc, rx.err.stale_cookie, 1); 2376 break; 2377 } 2378 2379 rx_desc = dp_rx_cookie_2_va_rxdma_buf(soc, rx_buf_cookie); 2380 status = dp_rx_desc_sanity(soc, hal_soc, hal_ring_hdl, 2381 ring_desc, rx_desc); 2382 if (QDF_IS_STATUS_ERROR(status)) { 2383 if (qdf_unlikely(rx_desc && rx_desc->nbuf)) { 2384 qdf_assert_always(rx_desc->unmapped); 2385 dp_ipa_handle_rx_buf_smmu_mapping( 2386 soc, 2387 rx_desc->nbuf, 2388 RX_DATA_BUFFER_SIZE, 2389 false); 2390 qdf_nbuf_unmap_nbytes_single( 2391 soc->osdev, 2392 rx_desc->nbuf, 2393 QDF_DMA_FROM_DEVICE, 2394 RX_DATA_BUFFER_SIZE); 2395 rx_desc->unmapped = 1; 2396 dp_rx_buffer_pool_nbuf_free(soc, rx_desc->nbuf, 2397 rx_desc->pool_id); 2398 dp_rx_add_to_free_desc_list( 2399 &head[rx_desc->pool_id], 2400 &tail[rx_desc->pool_id], 2401 rx_desc); 2402 } 2403 hal_srng_dst_get_next(hal_soc, hal_ring_hdl); 2404 continue; 2405 } 2406 2407 /* 2408 * this is a unlikely scenario where the host is reaping 2409 * a descriptor which it already reaped just a while ago 2410 * but is yet to replenish it back to HW. 2411 * In this case host will dump the last 128 descriptors 2412 * including the software descriptor rx_desc and assert. 2413 */ 2414 2415 if (qdf_unlikely(!rx_desc->in_use)) { 2416 DP_STATS_INC(soc, rx.err.hal_reo_dest_dup, 1); 2417 dp_info_rl("Reaping rx_desc not in use!"); 2418 dp_rx_dump_info_and_assert(soc, hal_ring_hdl, 2419 ring_desc, rx_desc); 2420 /* ignore duplicate RX desc and continue to process */ 2421 /* Pop out the descriptor */ 2422 hal_srng_dst_get_next(hal_soc, hal_ring_hdl); 2423 continue; 2424 } 2425 2426 status = dp_rx_desc_nbuf_sanity_check(ring_desc, rx_desc); 2427 if (qdf_unlikely(QDF_IS_STATUS_ERROR(status))) { 2428 DP_STATS_INC(soc, rx.err.nbuf_sanity_fail, 1); 2429 dp_info_rl("Nbuf sanity check failure!"); 2430 dp_rx_dump_info_and_assert(soc, hal_ring_hdl, 2431 ring_desc, rx_desc); 2432 rx_desc->in_err_state = 1; 2433 hal_srng_dst_get_next(hal_soc, hal_ring_hdl); 2434 continue; 2435 } 2436 2437 if (qdf_unlikely(!dp_rx_desc_check_magic(rx_desc))) { 2438 dp_err("Invalid rx_desc cookie=%d", rx_buf_cookie); 2439 DP_STATS_INC(soc, rx.err.rx_desc_invalid_magic, 1); 2440 dp_rx_dump_info_and_assert(soc, hal_ring_hdl, 2441 ring_desc, rx_desc); 2442 } 2443 2444 /* Get MPDU DESC info */ 2445 hal_rx_mpdu_desc_info_get(ring_desc, &mpdu_desc_info); 2446 2447 /* Get MSDU DESC info */ 2448 hal_rx_msdu_desc_info_get(ring_desc, &msdu_desc_info); 2449 2450 if (qdf_unlikely(msdu_desc_info.msdu_flags & 2451 HAL_MSDU_F_MSDU_CONTINUATION)) { 2452 /* previous msdu has end bit set, so current one is 2453 * the new MPDU 2454 */ 2455 if (is_prev_msdu_last) { 2456 /* Get number of entries available in HW ring */ 2457 num_entries_avail = 2458 hal_srng_dst_num_valid(hal_soc, 2459 hal_ring_hdl, 1); 2460 2461 /* For new MPDU check if we can read complete 2462 * MPDU by comparing the number of buffers 2463 * available and number of buffers needed to 2464 * reap this MPDU 2465 */ 2466 if (((msdu_desc_info.msdu_len / 2467 (RX_DATA_BUFFER_SIZE - RX_PKT_TLVS_LEN) + 2468 1)) > num_entries_avail) { 2469 DP_STATS_INC( 2470 soc, 2471 rx.msdu_scatter_wait_break, 2472 1); 2473 break; 2474 } 2475 is_prev_msdu_last = false; 2476 } 2477 2478 } 2479 2480 core_id = smp_processor_id(); 2481 DP_STATS_INC(soc, rx.ring_packets[core_id][ring_id], 1); 2482 2483 if (mpdu_desc_info.mpdu_flags & HAL_MPDU_F_RETRY_BIT) 2484 qdf_nbuf_set_rx_retry_flag(rx_desc->nbuf, 1); 2485 2486 if (qdf_unlikely(mpdu_desc_info.mpdu_flags & 2487 HAL_MPDU_F_RAW_AMPDU)) 2488 qdf_nbuf_set_raw_frame(rx_desc->nbuf, 1); 2489 2490 if (!is_prev_msdu_last && 2491 msdu_desc_info.msdu_flags & HAL_MSDU_F_LAST_MSDU_IN_MPDU) 2492 is_prev_msdu_last = true; 2493 2494 /* Pop out the descriptor*/ 2495 hal_srng_dst_get_next(hal_soc, hal_ring_hdl); 2496 2497 rx_bufs_reaped[rx_desc->pool_id]++; 2498 peer_mdata = mpdu_desc_info.peer_meta_data; 2499 QDF_NBUF_CB_RX_PEER_ID(rx_desc->nbuf) = 2500 DP_PEER_METADATA_PEER_ID_GET(peer_mdata); 2501 QDF_NBUF_CB_RX_VDEV_ID(rx_desc->nbuf) = 2502 DP_PEER_METADATA_VDEV_ID_GET(peer_mdata); 2503 2504 /* to indicate whether this msdu is rx offload */ 2505 pkt_capture_offload = 2506 DP_PEER_METADATA_OFFLOAD_GET(peer_mdata); 2507 2508 /* 2509 * save msdu flags first, last and continuation msdu in 2510 * nbuf->cb, also save mcbc, is_da_valid, is_sa_valid and 2511 * length to nbuf->cb. This ensures the info required for 2512 * per pkt processing is always in the same cache line. 2513 * This helps in improving throughput for smaller pkt 2514 * sizes. 2515 */ 2516 if (msdu_desc_info.msdu_flags & HAL_MSDU_F_FIRST_MSDU_IN_MPDU) 2517 qdf_nbuf_set_rx_chfrag_start(rx_desc->nbuf, 1); 2518 2519 if (msdu_desc_info.msdu_flags & HAL_MSDU_F_MSDU_CONTINUATION) 2520 qdf_nbuf_set_rx_chfrag_cont(rx_desc->nbuf, 1); 2521 2522 if (msdu_desc_info.msdu_flags & HAL_MSDU_F_LAST_MSDU_IN_MPDU) 2523 qdf_nbuf_set_rx_chfrag_end(rx_desc->nbuf, 1); 2524 2525 if (msdu_desc_info.msdu_flags & HAL_MSDU_F_DA_IS_MCBC) 2526 qdf_nbuf_set_da_mcbc(rx_desc->nbuf, 1); 2527 2528 if (msdu_desc_info.msdu_flags & HAL_MSDU_F_DA_IS_VALID) 2529 qdf_nbuf_set_da_valid(rx_desc->nbuf, 1); 2530 2531 if (msdu_desc_info.msdu_flags & HAL_MSDU_F_SA_IS_VALID) 2532 qdf_nbuf_set_sa_valid(rx_desc->nbuf, 1); 2533 2534 qdf_nbuf_set_tid_val(rx_desc->nbuf, 2535 HAL_RX_REO_QUEUE_NUMBER_GET(ring_desc)); 2536 qdf_nbuf_set_rx_reo_dest_ind( 2537 rx_desc->nbuf, 2538 HAL_RX_REO_MSDU_REO_DST_IND_GET(ring_desc)); 2539 2540 QDF_NBUF_CB_RX_PKT_LEN(rx_desc->nbuf) = msdu_desc_info.msdu_len; 2541 2542 QDF_NBUF_CB_RX_CTX_ID(rx_desc->nbuf) = reo_ring_num; 2543 2544 /* 2545 * move unmap after scattered msdu waiting break logic 2546 * in case double skb unmap happened. 2547 */ 2548 rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id]; 2549 dp_ipa_handle_rx_buf_smmu_mapping(soc, rx_desc->nbuf, 2550 rx_desc_pool->buf_size, 2551 false); 2552 qdf_nbuf_unmap_nbytes_single(soc->osdev, rx_desc->nbuf, 2553 QDF_DMA_FROM_DEVICE, 2554 rx_desc_pool->buf_size); 2555 rx_desc->unmapped = 1; 2556 DP_RX_PROCESS_NBUF(soc, nbuf_head, nbuf_tail, ebuf_head, 2557 ebuf_tail, rx_desc); 2558 /* 2559 * if continuation bit is set then we have MSDU spread 2560 * across multiple buffers, let us not decrement quota 2561 * till we reap all buffers of that MSDU. 2562 */ 2563 if (qdf_likely(!qdf_nbuf_is_rx_chfrag_cont(rx_desc->nbuf))) 2564 quota -= 1; 2565 2566 dp_rx_add_to_free_desc_list(&head[rx_desc->pool_id], 2567 &tail[rx_desc->pool_id], 2568 rx_desc); 2569 2570 num_rx_bufs_reaped++; 2571 /* 2572 * only if complete msdu is received for scatter case, 2573 * then allow break. 2574 */ 2575 if (is_prev_msdu_last && 2576 dp_rx_reap_loop_pkt_limit_hit(soc, num_rx_bufs_reaped)) 2577 break; 2578 } 2579 done: 2580 dp_rx_srng_access_end(int_ctx, soc, hal_ring_hdl); 2581 2582 for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) { 2583 /* 2584 * continue with next mac_id if no pkts were reaped 2585 * from that pool 2586 */ 2587 if (!rx_bufs_reaped[mac_id]) 2588 continue; 2589 2590 dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_id]; 2591 2592 rx_desc_pool = &soc->rx_desc_buf[mac_id]; 2593 2594 dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng, 2595 rx_desc_pool, rx_bufs_reaped[mac_id], 2596 &head[mac_id], &tail[mac_id]); 2597 } 2598 2599 dp_verbose_debug("replenished %u\n", rx_bufs_reaped[0]); 2600 /* Peer can be NULL is case of LFR */ 2601 if (qdf_likely(peer)) 2602 vdev = NULL; 2603 2604 /* 2605 * BIG loop where each nbuf is dequeued from global queue, 2606 * processed and queued back on a per vdev basis. These nbufs 2607 * are sent to stack as and when we run out of nbufs 2608 * or a new nbuf dequeued from global queue has a different 2609 * vdev when compared to previous nbuf. 2610 */ 2611 nbuf = nbuf_head; 2612 while (nbuf) { 2613 next = nbuf->next; 2614 if (qdf_unlikely(dp_rx_is_raw_frame_dropped(nbuf))) { 2615 nbuf = next; 2616 DP_STATS_INC(soc, rx.err.raw_frm_drop, 1); 2617 continue; 2618 } 2619 2620 rx_tlv_hdr = qdf_nbuf_data(nbuf); 2621 vdev_id = QDF_NBUF_CB_RX_VDEV_ID(nbuf); 2622 peer_id = QDF_NBUF_CB_RX_PEER_ID(nbuf); 2623 2624 if (dp_rx_is_list_ready(deliver_list_head, vdev, peer, 2625 peer_id, vdev_id)) { 2626 dp_rx_deliver_to_stack(soc, vdev, peer, 2627 deliver_list_head, 2628 deliver_list_tail); 2629 deliver_list_head = NULL; 2630 deliver_list_tail = NULL; 2631 } 2632 2633 /* Get TID from struct cb->tid_val, save to tid */ 2634 if (qdf_nbuf_is_rx_chfrag_start(nbuf)) 2635 tid = qdf_nbuf_get_tid_val(nbuf); 2636 2637 if (qdf_unlikely(!peer)) { 2638 peer = dp_peer_get_ref_by_id(soc, peer_id, 2639 DP_MOD_ID_RX); 2640 } else if (peer && peer->peer_id != peer_id) { 2641 dp_peer_unref_delete(peer, DP_MOD_ID_RX); 2642 peer = dp_peer_get_ref_by_id(soc, peer_id, 2643 DP_MOD_ID_RX); 2644 } 2645 2646 if (peer) { 2647 QDF_NBUF_CB_DP_TRACE_PRINT(nbuf) = false; 2648 qdf_dp_trace_set_track(nbuf, QDF_RX); 2649 QDF_NBUF_CB_RX_DP_TRACE(nbuf) = 1; 2650 QDF_NBUF_CB_RX_PACKET_TRACK(nbuf) = 2651 QDF_NBUF_RX_PKT_DATA_TRACK; 2652 } 2653 2654 rx_bufs_used++; 2655 2656 if (qdf_likely(peer)) { 2657 vdev = peer->vdev; 2658 2659 /* 2660 * In encryption mode, all data packets except 2661 * EAPOL frames should be dropped when peer is not 2662 * authenticated. Thie feature is enabled for all peers 2663 * under this vdev when peer_authorize flag is set. 2664 */ 2665 if (qdf_unlikely(vdev->peer_authorize)) { 2666 if (qdf_unlikely(vdev->sec_type != cdp_sec_type_none)) { 2667 /* 2668 * Allow only EAPOL frames 2669 */ 2670 if (qdf_unlikely(!peer->authorize && 2671 !qdf_nbuf_is_ipv4_eapol_pkt(nbuf))) { 2672 qdf_nbuf_free(nbuf); 2673 nbuf = next; 2674 DP_STATS_INC(soc, rx.err.peer_unauth_rx_pkt_drop, 1); 2675 continue; 2676 } 2677 } 2678 } 2679 2680 } else { 2681 nbuf->next = NULL; 2682 dp_rx_deliver_to_pkt_capture_no_peer( 2683 soc, nbuf, pkt_capture_offload); 2684 if (!pkt_capture_offload) 2685 dp_rx_deliver_to_stack_no_peer(soc, nbuf); 2686 nbuf = next; 2687 continue; 2688 } 2689 2690 if (qdf_unlikely(!vdev)) { 2691 qdf_nbuf_free(nbuf); 2692 nbuf = next; 2693 DP_STATS_INC(soc, rx.err.invalid_vdev, 1); 2694 continue; 2695 } 2696 2697 /* when hlos tid override is enabled, save tid in 2698 * skb->priority 2699 */ 2700 if (qdf_unlikely(vdev->skip_sw_tid_classification & 2701 DP_TXRX_HLOS_TID_OVERRIDE_ENABLED)) 2702 qdf_nbuf_set_priority(nbuf, tid); 2703 2704 rx_pdev = vdev->pdev; 2705 DP_RX_TID_SAVE(nbuf, tid); 2706 if (qdf_unlikely(rx_pdev->delay_stats_flag) || 2707 qdf_unlikely(wlan_cfg_is_peer_ext_stats_enabled( 2708 soc->wlan_cfg_ctx))) 2709 qdf_nbuf_set_timestamp(nbuf); 2710 2711 ring_id = QDF_NBUF_CB_RX_CTX_ID(nbuf); 2712 tid_stats = 2713 &rx_pdev->stats.tid_stats.tid_rx_stats[ring_id][tid]; 2714 2715 /* 2716 * Check if DMA completed -- msdu_done is the last bit 2717 * to be written 2718 */ 2719 if (qdf_unlikely(!qdf_nbuf_is_rx_chfrag_cont(nbuf) && 2720 !hal_rx_attn_msdu_done_get(rx_tlv_hdr))) { 2721 dp_err("MSDU DONE failure"); 2722 DP_STATS_INC(soc, rx.err.msdu_done_fail, 1); 2723 hal_rx_dump_pkt_tlvs(hal_soc, rx_tlv_hdr, 2724 QDF_TRACE_LEVEL_INFO); 2725 tid_stats->fail_cnt[MSDU_DONE_FAILURE]++; 2726 qdf_nbuf_free(nbuf); 2727 qdf_assert(0); 2728 nbuf = next; 2729 continue; 2730 } 2731 2732 DP_HIST_PACKET_COUNT_INC(vdev->pdev->pdev_id); 2733 /* 2734 * First IF condition: 2735 * 802.11 Fragmented pkts are reinjected to REO 2736 * HW block as SG pkts and for these pkts we only 2737 * need to pull the RX TLVS header length. 2738 * Second IF condition: 2739 * The below condition happens when an MSDU is spread 2740 * across multiple buffers. This can happen in two cases 2741 * 1. The nbuf size is smaller then the received msdu. 2742 * ex: we have set the nbuf size to 2048 during 2743 * nbuf_alloc. but we received an msdu which is 2744 * 2304 bytes in size then this msdu is spread 2745 * across 2 nbufs. 2746 * 2747 * 2. AMSDUs when RAW mode is enabled. 2748 * ex: 1st MSDU is in 1st nbuf and 2nd MSDU is spread 2749 * across 1st nbuf and 2nd nbuf and last MSDU is 2750 * spread across 2nd nbuf and 3rd nbuf. 2751 * 2752 * for these scenarios let us create a skb frag_list and 2753 * append these buffers till the last MSDU of the AMSDU 2754 * Third condition: 2755 * This is the most likely case, we receive 802.3 pkts 2756 * decapsulated by HW, here we need to set the pkt length. 2757 */ 2758 hal_rx_msdu_metadata_get(hal_soc, rx_tlv_hdr, &msdu_metadata); 2759 if (qdf_unlikely(qdf_nbuf_is_frag(nbuf))) { 2760 bool is_mcbc, is_sa_vld, is_da_vld; 2761 2762 is_mcbc = hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc, 2763 rx_tlv_hdr); 2764 is_sa_vld = 2765 hal_rx_msdu_end_sa_is_valid_get(soc->hal_soc, 2766 rx_tlv_hdr); 2767 is_da_vld = 2768 hal_rx_msdu_end_da_is_valid_get(soc->hal_soc, 2769 rx_tlv_hdr); 2770 2771 qdf_nbuf_set_da_mcbc(nbuf, is_mcbc); 2772 qdf_nbuf_set_da_valid(nbuf, is_da_vld); 2773 qdf_nbuf_set_sa_valid(nbuf, is_sa_vld); 2774 2775 qdf_nbuf_pull_head(nbuf, RX_PKT_TLVS_LEN); 2776 } else if (qdf_nbuf_is_rx_chfrag_cont(nbuf)) { 2777 msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf); 2778 nbuf = dp_rx_sg_create(soc, nbuf); 2779 next = nbuf->next; 2780 2781 if (qdf_nbuf_is_raw_frame(nbuf)) { 2782 DP_STATS_INC(vdev->pdev, rx_raw_pkts, 1); 2783 DP_STATS_INC_PKT(peer, rx.raw, 1, msdu_len); 2784 } else { 2785 qdf_nbuf_free(nbuf); 2786 DP_STATS_INC(soc, rx.err.scatter_msdu, 1); 2787 dp_info_rl("scatter msdu len %d, dropped", 2788 msdu_len); 2789 nbuf = next; 2790 continue; 2791 } 2792 } else { 2793 2794 msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf); 2795 pkt_len = msdu_len + 2796 msdu_metadata.l3_hdr_pad + 2797 RX_PKT_TLVS_LEN; 2798 2799 qdf_nbuf_set_pktlen(nbuf, pkt_len); 2800 dp_rx_skip_tlvs(nbuf, msdu_metadata.l3_hdr_pad); 2801 } 2802 2803 /* 2804 * process frame for mulitpass phrase processing 2805 */ 2806 if (qdf_unlikely(vdev->multipass_en)) { 2807 if (dp_rx_multipass_process(peer, nbuf, tid) == false) { 2808 DP_STATS_INC(peer, rx.multipass_rx_pkt_drop, 1); 2809 qdf_nbuf_free(nbuf); 2810 nbuf = next; 2811 continue; 2812 } 2813 } 2814 2815 if (!dp_wds_rx_policy_check(rx_tlv_hdr, vdev, peer)) { 2816 dp_rx_err("%pK: Policy Check Drop pkt", soc); 2817 tid_stats->fail_cnt[POLICY_CHECK_DROP]++; 2818 /* Drop & free packet */ 2819 qdf_nbuf_free(nbuf); 2820 /* Statistics */ 2821 nbuf = next; 2822 continue; 2823 } 2824 2825 if (qdf_unlikely(peer && (peer->nawds_enabled) && 2826 (qdf_nbuf_is_da_mcbc(nbuf)) && 2827 (hal_rx_get_mpdu_mac_ad4_valid(soc->hal_soc, 2828 rx_tlv_hdr) == 2829 false))) { 2830 tid_stats->fail_cnt[NAWDS_MCAST_DROP]++; 2831 DP_STATS_INC(peer, rx.nawds_mcast_drop, 1); 2832 qdf_nbuf_free(nbuf); 2833 nbuf = next; 2834 continue; 2835 } 2836 2837 if (soc->process_rx_status) 2838 dp_rx_cksum_offload(vdev->pdev, nbuf, rx_tlv_hdr); 2839 2840 /* Update the protocol tag in SKB based on CCE metadata */ 2841 dp_rx_update_protocol_tag(soc, vdev, nbuf, rx_tlv_hdr, 2842 reo_ring_num, false, true); 2843 2844 /* Update the flow tag in SKB based on FSE metadata */ 2845 dp_rx_update_flow_tag(soc, vdev, nbuf, rx_tlv_hdr, true); 2846 2847 dp_rx_msdu_stats_update(soc, nbuf, rx_tlv_hdr, peer, 2848 ring_id, tid_stats); 2849 2850 if (qdf_unlikely(vdev->mesh_vdev)) { 2851 if (dp_rx_filter_mesh_packets(vdev, nbuf, rx_tlv_hdr) 2852 == QDF_STATUS_SUCCESS) { 2853 dp_rx_info("%pK: mesh pkt filtered", soc); 2854 tid_stats->fail_cnt[MESH_FILTER_DROP]++; 2855 DP_STATS_INC(vdev->pdev, dropped.mesh_filter, 2856 1); 2857 2858 qdf_nbuf_free(nbuf); 2859 nbuf = next; 2860 continue; 2861 } 2862 dp_rx_fill_mesh_stats(vdev, nbuf, rx_tlv_hdr, peer); 2863 } 2864 2865 if (qdf_likely(vdev->rx_decap_type == 2866 htt_cmn_pkt_type_ethernet) && 2867 qdf_likely(!vdev->mesh_vdev)) { 2868 /* WDS Destination Address Learning */ 2869 dp_rx_da_learn(soc, rx_tlv_hdr, peer, nbuf); 2870 2871 /* Due to HW issue, sometimes we see that the sa_idx 2872 * and da_idx are invalid with sa_valid and da_valid 2873 * bits set 2874 * 2875 * in this case we also see that value of 2876 * sa_sw_peer_id is set as 0 2877 * 2878 * Drop the packet if sa_idx and da_idx OOB or 2879 * sa_sw_peerid is 0 2880 */ 2881 if (!is_sa_da_idx_valid(soc, rx_tlv_hdr, nbuf, 2882 msdu_metadata)) { 2883 qdf_nbuf_free(nbuf); 2884 nbuf = next; 2885 DP_STATS_INC(soc, rx.err.invalid_sa_da_idx, 1); 2886 continue; 2887 } 2888 /* WDS Source Port Learning */ 2889 if (qdf_likely(vdev->wds_enabled)) 2890 dp_rx_wds_srcport_learn(soc, 2891 rx_tlv_hdr, 2892 peer, 2893 nbuf, 2894 msdu_metadata); 2895 2896 /* Intrabss-fwd */ 2897 if (dp_rx_check_ap_bridge(vdev)) 2898 if (dp_rx_intrabss_fwd(soc, 2899 peer, 2900 rx_tlv_hdr, 2901 nbuf, 2902 msdu_metadata)) { 2903 nbuf = next; 2904 tid_stats->intrabss_cnt++; 2905 continue; /* Get next desc */ 2906 } 2907 } 2908 2909 dp_rx_fill_gro_info(soc, rx_tlv_hdr, nbuf, &rx_ol_pkt_cnt); 2910 2911 dp_rx_update_stats(soc, nbuf); 2912 DP_RX_LIST_APPEND(deliver_list_head, 2913 deliver_list_tail, 2914 nbuf); 2915 DP_STATS_INC_PKT(peer, rx.to_stack, 1, 2916 QDF_NBUF_CB_RX_PKT_LEN(nbuf)); 2917 if (qdf_unlikely(peer->in_twt)) 2918 DP_STATS_INC_PKT(peer, rx.to_stack_twt, 1, 2919 QDF_NBUF_CB_RX_PKT_LEN(nbuf)); 2920 2921 tid_stats->delivered_to_stack++; 2922 nbuf = next; 2923 } 2924 2925 if (qdf_likely(deliver_list_head)) { 2926 if (qdf_likely(peer)) { 2927 dp_rx_deliver_to_pkt_capture(soc, vdev->pdev, peer_id, 2928 pkt_capture_offload, 2929 deliver_list_head); 2930 if (!pkt_capture_offload) 2931 dp_rx_deliver_to_stack(soc, vdev, peer, 2932 deliver_list_head, 2933 deliver_list_tail); 2934 } 2935 else { 2936 nbuf = deliver_list_head; 2937 while (nbuf) { 2938 next = nbuf->next; 2939 nbuf->next = NULL; 2940 dp_rx_deliver_to_stack_no_peer(soc, nbuf); 2941 nbuf = next; 2942 } 2943 } 2944 } 2945 2946 if (qdf_likely(peer)) 2947 dp_peer_unref_delete(peer, DP_MOD_ID_RX); 2948 2949 if (dp_rx_enable_eol_data_check(soc) && rx_bufs_used) { 2950 if (quota) { 2951 num_pending = 2952 dp_rx_srng_get_num_pending(hal_soc, 2953 hal_ring_hdl, 2954 num_entries, 2955 &near_full); 2956 if (num_pending) { 2957 DP_STATS_INC(soc, rx.hp_oos2, 1); 2958 2959 if (!hif_exec_should_yield(scn, intr_id)) 2960 goto more_data; 2961 2962 if (qdf_unlikely(near_full)) { 2963 DP_STATS_INC(soc, rx.near_full, 1); 2964 goto more_data; 2965 } 2966 } 2967 } 2968 2969 if (vdev && vdev->osif_fisa_flush) 2970 vdev->osif_fisa_flush(soc, reo_ring_num); 2971 2972 if (vdev && vdev->osif_gro_flush && rx_ol_pkt_cnt) { 2973 vdev->osif_gro_flush(vdev->osif_vdev, 2974 reo_ring_num); 2975 } 2976 } 2977 2978 /* Update histogram statistics by looping through pdev's */ 2979 DP_RX_HIST_STATS_PER_PDEV(); 2980 2981 return rx_bufs_used; /* Assume no scale factor for now */ 2982 } 2983 2984 #endif /* QCA_HOST_MODE_WIFI_DISABLED */ 2985 2986 QDF_STATUS dp_rx_vdev_detach(struct dp_vdev *vdev) 2987 { 2988 QDF_STATUS ret; 2989 2990 if (vdev->osif_rx_flush) { 2991 ret = vdev->osif_rx_flush(vdev->osif_vdev, vdev->vdev_id); 2992 if (!QDF_IS_STATUS_SUCCESS(ret)) { 2993 dp_err("Failed to flush rx pkts for vdev %d\n", 2994 vdev->vdev_id); 2995 return ret; 2996 } 2997 } 2998 2999 return QDF_STATUS_SUCCESS; 3000 } 3001 3002 static QDF_STATUS 3003 dp_pdev_nbuf_alloc_and_map(struct dp_soc *dp_soc, 3004 struct dp_rx_nbuf_frag_info *nbuf_frag_info_t, 3005 struct dp_pdev *dp_pdev, 3006 struct rx_desc_pool *rx_desc_pool) 3007 { 3008 QDF_STATUS ret = QDF_STATUS_E_FAILURE; 3009 3010 (nbuf_frag_info_t->virt_addr).nbuf = 3011 qdf_nbuf_alloc(dp_soc->osdev, rx_desc_pool->buf_size, 3012 RX_BUFFER_RESERVATION, 3013 rx_desc_pool->buf_alignment, FALSE); 3014 if (!((nbuf_frag_info_t->virt_addr).nbuf)) { 3015 dp_err("nbuf alloc failed"); 3016 DP_STATS_INC(dp_pdev, replenish.nbuf_alloc_fail, 1); 3017 return ret; 3018 } 3019 3020 ret = qdf_nbuf_map_nbytes_single(dp_soc->osdev, 3021 (nbuf_frag_info_t->virt_addr).nbuf, 3022 QDF_DMA_FROM_DEVICE, 3023 rx_desc_pool->buf_size); 3024 3025 if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) { 3026 qdf_nbuf_free((nbuf_frag_info_t->virt_addr).nbuf); 3027 dp_err("nbuf map failed"); 3028 DP_STATS_INC(dp_pdev, replenish.map_err, 1); 3029 return ret; 3030 } 3031 3032 nbuf_frag_info_t->paddr = 3033 qdf_nbuf_get_frag_paddr((nbuf_frag_info_t->virt_addr).nbuf, 0); 3034 3035 ret = dp_check_paddr(dp_soc, &((nbuf_frag_info_t->virt_addr).nbuf), 3036 &nbuf_frag_info_t->paddr, 3037 rx_desc_pool); 3038 if (ret == QDF_STATUS_E_FAILURE) { 3039 dp_err("nbuf check x86 failed"); 3040 DP_STATS_INC(dp_pdev, replenish.x86_fail, 1); 3041 return ret; 3042 } 3043 3044 return QDF_STATUS_SUCCESS; 3045 } 3046 3047 QDF_STATUS 3048 dp_pdev_rx_buffers_attach(struct dp_soc *dp_soc, uint32_t mac_id, 3049 struct dp_srng *dp_rxdma_srng, 3050 struct rx_desc_pool *rx_desc_pool, 3051 uint32_t num_req_buffers) 3052 { 3053 struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(dp_soc, mac_id); 3054 hal_ring_handle_t rxdma_srng = dp_rxdma_srng->hal_srng; 3055 union dp_rx_desc_list_elem_t *next; 3056 void *rxdma_ring_entry; 3057 qdf_dma_addr_t paddr; 3058 struct dp_rx_nbuf_frag_info *nf_info; 3059 uint32_t nr_descs, nr_nbuf = 0, nr_nbuf_total = 0; 3060 uint32_t buffer_index, nbuf_ptrs_per_page; 3061 qdf_nbuf_t nbuf; 3062 QDF_STATUS ret; 3063 int page_idx, total_pages; 3064 union dp_rx_desc_list_elem_t *desc_list = NULL; 3065 union dp_rx_desc_list_elem_t *tail = NULL; 3066 int sync_hw_ptr = 1; 3067 uint32_t num_entries_avail; 3068 3069 if (qdf_unlikely(!rxdma_srng)) { 3070 DP_STATS_INC(dp_pdev, replenish.rxdma_err, num_req_buffers); 3071 return QDF_STATUS_E_FAILURE; 3072 } 3073 3074 dp_debug("requested %u RX buffers for driver attach", num_req_buffers); 3075 3076 hal_srng_access_start(dp_soc->hal_soc, rxdma_srng); 3077 num_entries_avail = hal_srng_src_num_avail(dp_soc->hal_soc, 3078 rxdma_srng, 3079 sync_hw_ptr); 3080 hal_srng_access_end(dp_soc->hal_soc, rxdma_srng); 3081 3082 if (!num_entries_avail) { 3083 dp_err("Num of available entries is zero, nothing to do"); 3084 return QDF_STATUS_E_NOMEM; 3085 } 3086 3087 if (num_entries_avail < num_req_buffers) 3088 num_req_buffers = num_entries_avail; 3089 3090 nr_descs = dp_rx_get_free_desc_list(dp_soc, mac_id, rx_desc_pool, 3091 num_req_buffers, &desc_list, &tail); 3092 if (!nr_descs) { 3093 dp_err("no free rx_descs in freelist"); 3094 DP_STATS_INC(dp_pdev, err.desc_alloc_fail, num_req_buffers); 3095 return QDF_STATUS_E_NOMEM; 3096 } 3097 3098 dp_debug("got %u RX descs for driver attach", nr_descs); 3099 3100 /* 3101 * Try to allocate pointers to the nbuf one page at a time. 3102 * Take pointers that can fit in one page of memory and 3103 * iterate through the total descriptors that need to be 3104 * allocated in order of pages. Reuse the pointers that 3105 * have been allocated to fit in one page across each 3106 * iteration to index into the nbuf. 3107 */ 3108 total_pages = (nr_descs * sizeof(*nf_info)) / PAGE_SIZE; 3109 3110 /* 3111 * Add an extra page to store the remainder if any 3112 */ 3113 if ((nr_descs * sizeof(*nf_info)) % PAGE_SIZE) 3114 total_pages++; 3115 nf_info = qdf_mem_malloc(PAGE_SIZE); 3116 if (!nf_info) { 3117 dp_err("failed to allocate nbuf array"); 3118 DP_STATS_INC(dp_pdev, replenish.rxdma_err, num_req_buffers); 3119 QDF_BUG(0); 3120 return QDF_STATUS_E_NOMEM; 3121 } 3122 nbuf_ptrs_per_page = PAGE_SIZE / sizeof(*nf_info); 3123 3124 for (page_idx = 0; page_idx < total_pages; page_idx++) { 3125 qdf_mem_zero(nf_info, PAGE_SIZE); 3126 3127 for (nr_nbuf = 0; nr_nbuf < nbuf_ptrs_per_page; nr_nbuf++) { 3128 /* 3129 * The last page of buffer pointers may not be required 3130 * completely based on the number of descriptors. Below 3131 * check will ensure we are allocating only the 3132 * required number of descriptors. 3133 */ 3134 if (nr_nbuf_total >= nr_descs) 3135 break; 3136 /* Flag is set while pdev rx_desc_pool initialization */ 3137 if (qdf_unlikely(rx_desc_pool->rx_mon_dest_frag_enable)) 3138 ret = dp_pdev_frag_alloc_and_map(dp_soc, 3139 &nf_info[nr_nbuf], dp_pdev, 3140 rx_desc_pool); 3141 else 3142 ret = dp_pdev_nbuf_alloc_and_map(dp_soc, 3143 &nf_info[nr_nbuf], dp_pdev, 3144 rx_desc_pool); 3145 if (QDF_IS_STATUS_ERROR(ret)) 3146 break; 3147 3148 nr_nbuf_total++; 3149 } 3150 3151 hal_srng_access_start(dp_soc->hal_soc, rxdma_srng); 3152 3153 for (buffer_index = 0; buffer_index < nr_nbuf; buffer_index++) { 3154 rxdma_ring_entry = 3155 hal_srng_src_get_next(dp_soc->hal_soc, 3156 rxdma_srng); 3157 qdf_assert_always(rxdma_ring_entry); 3158 3159 next = desc_list->next; 3160 paddr = nf_info[buffer_index].paddr; 3161 nbuf = nf_info[buffer_index].virt_addr.nbuf; 3162 3163 /* Flag is set while pdev rx_desc_pool initialization */ 3164 if (qdf_unlikely(rx_desc_pool->rx_mon_dest_frag_enable)) 3165 dp_rx_desc_frag_prep(&desc_list->rx_desc, 3166 &nf_info[buffer_index]); 3167 else 3168 dp_rx_desc_prep(&desc_list->rx_desc, 3169 &nf_info[buffer_index]); 3170 desc_list->rx_desc.in_use = 1; 3171 dp_rx_desc_alloc_dbg_info(&desc_list->rx_desc); 3172 dp_rx_desc_update_dbg_info(&desc_list->rx_desc, 3173 __func__, 3174 RX_DESC_REPLENISHED); 3175 3176 hal_rxdma_buff_addr_info_set(rxdma_ring_entry, paddr, 3177 desc_list->rx_desc.cookie, 3178 rx_desc_pool->owner); 3179 dp_ipa_handle_rx_buf_smmu_mapping( 3180 dp_soc, nbuf, 3181 rx_desc_pool->buf_size, 3182 true); 3183 3184 desc_list = next; 3185 } 3186 3187 hal_srng_access_end(dp_soc->hal_soc, rxdma_srng); 3188 } 3189 3190 dp_info("filled %u RX buffers for driver attach", nr_nbuf_total); 3191 qdf_mem_free(nf_info); 3192 3193 if (!nr_nbuf_total) { 3194 dp_err("No nbuf's allocated"); 3195 QDF_BUG(0); 3196 return QDF_STATUS_E_RESOURCES; 3197 } 3198 3199 /* No need to count the number of bytes received during replenish. 3200 * Therefore set replenish.pkts.bytes as 0. 3201 */ 3202 DP_STATS_INC_PKT(dp_pdev, replenish.pkts, nr_nbuf, 0); 3203 3204 return QDF_STATUS_SUCCESS; 3205 } 3206 3207 /** 3208 * dp_rx_enable_mon_dest_frag() - Enable frag processing for 3209 * monitor destination ring via frag. 3210 * 3211 * Enable this flag only for monitor destination buffer processing 3212 * if DP_RX_MON_MEM_FRAG feature is enabled. 3213 * If flag is set then frag based function will be called for alloc, 3214 * map, prep desc and free ops for desc buffer else normal nbuf based 3215 * function will be called. 3216 * 3217 * @rx_desc_pool: Rx desc pool 3218 * @is_mon_dest_desc: Is it for monitor dest buffer 3219 * 3220 * Return: None 3221 */ 3222 #ifdef DP_RX_MON_MEM_FRAG 3223 void dp_rx_enable_mon_dest_frag(struct rx_desc_pool *rx_desc_pool, 3224 bool is_mon_dest_desc) 3225 { 3226 rx_desc_pool->rx_mon_dest_frag_enable = is_mon_dest_desc; 3227 if (is_mon_dest_desc) 3228 dp_alert("Feature DP_RX_MON_MEM_FRAG for mon_dest is enabled"); 3229 } 3230 #else 3231 void dp_rx_enable_mon_dest_frag(struct rx_desc_pool *rx_desc_pool, 3232 bool is_mon_dest_desc) 3233 { 3234 rx_desc_pool->rx_mon_dest_frag_enable = false; 3235 if (is_mon_dest_desc) 3236 dp_alert("Feature DP_RX_MON_MEM_FRAG for mon_dest is disabled"); 3237 } 3238 #endif 3239 3240 /* 3241 * dp_rx_pdev_desc_pool_alloc() - allocate memory for software rx descriptor 3242 * pool 3243 * 3244 * @pdev: core txrx pdev context 3245 * 3246 * Return: QDF_STATUS - QDF_STATUS_SUCCESS 3247 * QDF_STATUS_E_NOMEM 3248 */ 3249 QDF_STATUS 3250 dp_rx_pdev_desc_pool_alloc(struct dp_pdev *pdev) 3251 { 3252 struct dp_soc *soc = pdev->soc; 3253 uint32_t rxdma_entries; 3254 uint32_t rx_sw_desc_num; 3255 struct dp_srng *dp_rxdma_srng; 3256 struct rx_desc_pool *rx_desc_pool; 3257 uint32_t status = QDF_STATUS_SUCCESS; 3258 int mac_for_pdev; 3259 3260 mac_for_pdev = pdev->lmac_id; 3261 if (wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx)) { 3262 dp_rx_info("%pK: nss-wifi<4> skip Rx refil %d", 3263 soc, mac_for_pdev); 3264 return status; 3265 } 3266 3267 dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_for_pdev]; 3268 rxdma_entries = dp_rxdma_srng->num_entries; 3269 3270 rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev]; 3271 rx_sw_desc_num = wlan_cfg_get_dp_soc_rx_sw_desc_num(soc->wlan_cfg_ctx); 3272 3273 rx_desc_pool->desc_type = DP_RX_DESC_BUF_TYPE; 3274 status = dp_rx_desc_pool_alloc(soc, 3275 rx_sw_desc_num, 3276 rx_desc_pool); 3277 if (status != QDF_STATUS_SUCCESS) 3278 return status; 3279 3280 return status; 3281 } 3282 3283 /* 3284 * dp_rx_pdev_desc_pool_free() - free software rx descriptor pool 3285 * 3286 * @pdev: core txrx pdev context 3287 */ 3288 void dp_rx_pdev_desc_pool_free(struct dp_pdev *pdev) 3289 { 3290 int mac_for_pdev = pdev->lmac_id; 3291 struct dp_soc *soc = pdev->soc; 3292 struct rx_desc_pool *rx_desc_pool; 3293 3294 rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev]; 3295 3296 dp_rx_desc_pool_free(soc, rx_desc_pool); 3297 } 3298 3299 /* 3300 * dp_rx_pdev_desc_pool_init() - initialize software rx descriptors 3301 * 3302 * @pdev: core txrx pdev context 3303 * 3304 * Return: QDF_STATUS - QDF_STATUS_SUCCESS 3305 * QDF_STATUS_E_NOMEM 3306 */ 3307 QDF_STATUS dp_rx_pdev_desc_pool_init(struct dp_pdev *pdev) 3308 { 3309 int mac_for_pdev = pdev->lmac_id; 3310 struct dp_soc *soc = pdev->soc; 3311 uint32_t rxdma_entries; 3312 uint32_t rx_sw_desc_num; 3313 struct dp_srng *dp_rxdma_srng; 3314 struct rx_desc_pool *rx_desc_pool; 3315 3316 if (wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx)) { 3317 /** 3318 * If NSS is enabled, rx_desc_pool is already filled. 3319 * Hence, just disable desc_pool frag flag. 3320 */ 3321 rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev]; 3322 dp_rx_enable_mon_dest_frag(rx_desc_pool, false); 3323 3324 dp_rx_info("%pK: nss-wifi<4> skip Rx refil %d", 3325 soc, mac_for_pdev); 3326 return QDF_STATUS_SUCCESS; 3327 } 3328 3329 rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev]; 3330 if (dp_rx_desc_pool_is_allocated(rx_desc_pool) == QDF_STATUS_E_NOMEM) 3331 return QDF_STATUS_E_NOMEM; 3332 3333 dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_for_pdev]; 3334 rxdma_entries = dp_rxdma_srng->num_entries; 3335 3336 soc->process_rx_status = CONFIG_PROCESS_RX_STATUS; 3337 3338 rx_sw_desc_num = 3339 wlan_cfg_get_dp_soc_rx_sw_desc_num(soc->wlan_cfg_ctx); 3340 3341 rx_desc_pool->owner = DP_WBM2SW_RBM; 3342 rx_desc_pool->buf_size = RX_DATA_BUFFER_SIZE; 3343 rx_desc_pool->buf_alignment = RX_DATA_BUFFER_ALIGNMENT; 3344 /* Disable monitor dest processing via frag */ 3345 dp_rx_enable_mon_dest_frag(rx_desc_pool, false); 3346 3347 dp_rx_desc_pool_init(soc, mac_for_pdev, 3348 rx_sw_desc_num, rx_desc_pool); 3349 return QDF_STATUS_SUCCESS; 3350 } 3351 3352 /* 3353 * dp_rx_pdev_desc_pool_deinit() - de-initialize software rx descriptor pools 3354 * @pdev: core txrx pdev context 3355 * 3356 * This function resets the freelist of rx descriptors and destroys locks 3357 * associated with this list of descriptors. 3358 */ 3359 void dp_rx_pdev_desc_pool_deinit(struct dp_pdev *pdev) 3360 { 3361 int mac_for_pdev = pdev->lmac_id; 3362 struct dp_soc *soc = pdev->soc; 3363 struct rx_desc_pool *rx_desc_pool; 3364 3365 rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev]; 3366 3367 dp_rx_desc_pool_deinit(soc, rx_desc_pool); 3368 } 3369 3370 /* 3371 * dp_rx_pdev_buffers_alloc() - Allocate nbufs (skbs) and replenish RxDMA ring 3372 * 3373 * @pdev: core txrx pdev context 3374 * 3375 * Return: QDF_STATUS - QDF_STATUS_SUCCESS 3376 * QDF_STATUS_E_NOMEM 3377 */ 3378 QDF_STATUS 3379 dp_rx_pdev_buffers_alloc(struct dp_pdev *pdev) 3380 { 3381 int mac_for_pdev = pdev->lmac_id; 3382 struct dp_soc *soc = pdev->soc; 3383 struct dp_srng *dp_rxdma_srng; 3384 struct rx_desc_pool *rx_desc_pool; 3385 uint32_t rxdma_entries; 3386 3387 dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_for_pdev]; 3388 rxdma_entries = dp_rxdma_srng->num_entries; 3389 3390 rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev]; 3391 3392 /* Initialize RX buffer pool which will be 3393 * used during low memory conditions 3394 */ 3395 dp_rx_buffer_pool_init(soc, mac_for_pdev); 3396 3397 return dp_pdev_rx_buffers_attach(soc, mac_for_pdev, dp_rxdma_srng, 3398 rx_desc_pool, rxdma_entries - 1); 3399 } 3400 3401 /* 3402 * dp_rx_pdev_buffers_free - Free nbufs (skbs) 3403 * 3404 * @pdev: core txrx pdev context 3405 */ 3406 void 3407 dp_rx_pdev_buffers_free(struct dp_pdev *pdev) 3408 { 3409 int mac_for_pdev = pdev->lmac_id; 3410 struct dp_soc *soc = pdev->soc; 3411 struct rx_desc_pool *rx_desc_pool; 3412 3413 rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev]; 3414 3415 dp_rx_desc_nbuf_free(soc, rx_desc_pool); 3416 dp_rx_buffer_pool_deinit(soc, mac_for_pdev); 3417 } 3418 3419 #ifdef DP_RX_SPECIAL_FRAME_NEED 3420 bool dp_rx_deliver_special_frame(struct dp_soc *soc, struct dp_peer *peer, 3421 qdf_nbuf_t nbuf, uint32_t frame_mask, 3422 uint8_t *rx_tlv_hdr) 3423 { 3424 uint32_t l2_hdr_offset = 0; 3425 uint16_t msdu_len = 0; 3426 uint32_t skip_len; 3427 3428 l2_hdr_offset = 3429 hal_rx_msdu_end_l3_hdr_padding_get(soc->hal_soc, rx_tlv_hdr); 3430 3431 if (qdf_unlikely(qdf_nbuf_is_frag(nbuf))) { 3432 skip_len = l2_hdr_offset; 3433 } else { 3434 msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf); 3435 skip_len = l2_hdr_offset + RX_PKT_TLVS_LEN; 3436 qdf_nbuf_set_pktlen(nbuf, msdu_len + skip_len); 3437 } 3438 3439 QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(nbuf) = 1; 3440 dp_rx_set_hdr_pad(nbuf, l2_hdr_offset); 3441 qdf_nbuf_pull_head(nbuf, skip_len); 3442 3443 if (dp_rx_is_special_frame(nbuf, frame_mask)) { 3444 qdf_nbuf_set_exc_frame(nbuf, 1); 3445 dp_rx_deliver_to_stack(soc, peer->vdev, peer, 3446 nbuf, NULL); 3447 return true; 3448 } 3449 3450 return false; 3451 } 3452 #endif 3453