1 /* 2 * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved. 3 * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved. 4 * 5 * Permission to use, copy, modify, and/or distribute this software for 6 * any purpose with or without fee is hereby granted, provided that the 7 * above copyright notice and this permission notice appear in all 8 * copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 11 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 12 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 13 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 14 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 15 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 16 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 17 * PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20 #include "hal_hw_headers.h" 21 #include "dp_types.h" 22 #include "dp_rx.h" 23 #include "dp_tx.h" 24 #include "dp_peer.h" 25 #include "dp_internal.h" 26 #include "hal_api.h" 27 #include "qdf_trace.h" 28 #include "qdf_nbuf.h" 29 #include "dp_rx_defrag.h" 30 #include "dp_ipa.h" 31 #include "dp_internal.h" 32 #ifdef WIFI_MONITOR_SUPPORT 33 #include "dp_htt.h" 34 #include <dp_mon.h> 35 #endif 36 #ifdef FEATURE_WDS 37 #include "dp_txrx_wds.h" 38 #endif 39 #include <enet.h> /* LLC_SNAP_HDR_LEN */ 40 #include "qdf_net_types.h" 41 #include "dp_rx_buffer_pool.h" 42 43 #define dp_rx_err_alert(params...) QDF_TRACE_FATAL(QDF_MODULE_ID_DP_RX_ERROR, params) 44 #define dp_rx_err_warn(params...) QDF_TRACE_WARN(QDF_MODULE_ID_DP_RX_ERROR, params) 45 #define dp_rx_err_info(params...) \ 46 __QDF_TRACE_FL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_RX_ERROR, ## params) 47 #define dp_rx_err_info_rl(params...) \ 48 __QDF_TRACE_RL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_RX_ERROR, ## params) 49 #define dp_rx_err_debug(params...) QDF_TRACE_DEBUG(QDF_MODULE_ID_DP_RX_ERROR, params) 50 51 #ifndef QCA_HOST_MODE_WIFI_DISABLED 52 53 54 /* Max regular Rx packet routing error */ 55 #define DP_MAX_REG_RX_ROUTING_ERRS_THRESHOLD 20 56 #define DP_MAX_REG_RX_ROUTING_ERRS_IN_TIMEOUT 10 57 #define DP_RX_ERR_ROUTE_TIMEOUT_US (5 * 1000 * 1000) /* micro seconds */ 58 59 #ifdef FEATURE_MEC 60 bool dp_rx_mcast_echo_check(struct dp_soc *soc, 61 struct dp_txrx_peer *txrx_peer, 62 uint8_t *rx_tlv_hdr, 63 qdf_nbuf_t nbuf) 64 { 65 struct dp_vdev *vdev = txrx_peer->vdev; 66 struct dp_pdev *pdev = vdev->pdev; 67 struct dp_mec_entry *mecentry = NULL; 68 struct dp_ast_entry *ase = NULL; 69 uint16_t sa_idx = 0; 70 uint8_t *data; 71 /* 72 * Multicast Echo Check is required only if vdev is STA and 73 * received pkt is a multicast/broadcast pkt. otherwise 74 * skip the MEC check. 75 */ 76 if (vdev->opmode != wlan_op_mode_sta) 77 return false; 78 if (!hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc, rx_tlv_hdr)) 79 return false; 80 81 data = qdf_nbuf_data(nbuf); 82 83 /* 84 * if the received pkts src mac addr matches with vdev 85 * mac address then drop the pkt as it is looped back 86 */ 87 if (!(qdf_mem_cmp(&data[QDF_MAC_ADDR_SIZE], 88 vdev->mac_addr.raw, 89 QDF_MAC_ADDR_SIZE))) 90 return true; 91 92 /* 93 * In case of qwrap isolation mode, donot drop loopback packets. 94 * In isolation mode, all packets from the wired stations need to go 95 * to rootap and loop back to reach the wireless stations and 96 * vice-versa. 97 */ 98 if (qdf_unlikely(vdev->isolation_vdev)) 99 return false; 100 101 /* 102 * if the received pkts src mac addr matches with the 103 * wired PCs MAC addr which is behind the STA or with 104 * wireless STAs MAC addr which are behind the Repeater, 105 * then drop the pkt as it is looped back 106 */ 107 if (hal_rx_msdu_end_sa_is_valid_get(soc->hal_soc, rx_tlv_hdr)) { 108 sa_idx = hal_rx_msdu_end_sa_idx_get(soc->hal_soc, rx_tlv_hdr); 109 110 if ((sa_idx < 0) || 111 (sa_idx >= wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx))) { 112 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 113 "invalid sa_idx: %d", sa_idx); 114 qdf_assert_always(0); 115 } 116 117 qdf_spin_lock_bh(&soc->ast_lock); 118 ase = soc->ast_table[sa_idx]; 119 120 /* 121 * this check was not needed since MEC is not dependent on AST, 122 * but if we dont have this check SON has some issues in 123 * dual backhaul scenario. in APS SON mode, client connected 124 * to RE 2G and sends multicast packets. the RE sends it to CAP 125 * over 5G backhaul. the CAP loopback it on 2G to RE. 126 * On receiving in 2G STA vap, we assume that client has roamed 127 * and kickout the client. 128 */ 129 if (ase && (ase->peer_id != txrx_peer->peer_id)) { 130 qdf_spin_unlock_bh(&soc->ast_lock); 131 goto drop; 132 } 133 134 qdf_spin_unlock_bh(&soc->ast_lock); 135 } 136 137 qdf_spin_lock_bh(&soc->mec_lock); 138 139 mecentry = dp_peer_mec_hash_find_by_pdevid(soc, pdev->pdev_id, 140 &data[QDF_MAC_ADDR_SIZE]); 141 if (!mecentry) { 142 qdf_spin_unlock_bh(&soc->mec_lock); 143 return false; 144 } 145 146 qdf_spin_unlock_bh(&soc->mec_lock); 147 148 drop: 149 dp_rx_err_info("%pK: received pkt with same src mac " QDF_MAC_ADDR_FMT, 150 soc, QDF_MAC_ADDR_REF(&data[QDF_MAC_ADDR_SIZE])); 151 152 return true; 153 } 154 #endif 155 #endif /* QCA_HOST_MODE_WIFI_DISABLED */ 156 157 void dp_rx_link_desc_refill_duplicate_check( 158 struct dp_soc *soc, 159 struct hal_buf_info *buf_info, 160 hal_buff_addrinfo_t ring_buf_info) 161 { 162 struct hal_buf_info current_link_desc_buf_info = { 0 }; 163 164 /* do duplicate link desc address check */ 165 hal_rx_buffer_addr_info_get_paddr(ring_buf_info, 166 ¤t_link_desc_buf_info); 167 168 /* 169 * TODO - Check if the hal soc api call can be removed 170 * since the cookie is just used for print. 171 * buffer_addr_info is the first element of ring_desc 172 */ 173 hal_rx_buf_cookie_rbm_get(soc->hal_soc, 174 (uint32_t *)ring_buf_info, 175 ¤t_link_desc_buf_info); 176 177 if (qdf_unlikely(current_link_desc_buf_info.paddr == 178 buf_info->paddr)) { 179 dp_info_rl("duplicate link desc addr: %llu, cookie: 0x%x", 180 current_link_desc_buf_info.paddr, 181 current_link_desc_buf_info.sw_cookie); 182 DP_STATS_INC(soc, rx.err.dup_refill_link_desc, 1); 183 } 184 *buf_info = current_link_desc_buf_info; 185 } 186 187 QDF_STATUS 188 dp_rx_link_desc_return_by_addr(struct dp_soc *soc, 189 hal_buff_addrinfo_t link_desc_addr, 190 uint8_t bm_action) 191 { 192 struct dp_srng *wbm_desc_rel_ring = &soc->wbm_desc_rel_ring; 193 hal_ring_handle_t wbm_rel_srng = wbm_desc_rel_ring->hal_srng; 194 hal_soc_handle_t hal_soc = soc->hal_soc; 195 QDF_STATUS status = QDF_STATUS_E_FAILURE; 196 void *src_srng_desc; 197 198 if (!wbm_rel_srng) { 199 dp_rx_err_err("%pK: WBM RELEASE RING not initialized", soc); 200 return status; 201 } 202 203 /* do duplicate link desc address check */ 204 dp_rx_link_desc_refill_duplicate_check( 205 soc, 206 &soc->last_op_info.wbm_rel_link_desc, 207 link_desc_addr); 208 209 if (qdf_unlikely(hal_srng_access_start(hal_soc, wbm_rel_srng))) { 210 211 /* TODO */ 212 /* 213 * Need API to convert from hal_ring pointer to 214 * Ring Type / Ring Id combo 215 */ 216 dp_rx_err_err("%pK: HAL RING Access For WBM Release SRNG Failed - %pK", 217 soc, wbm_rel_srng); 218 DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1); 219 goto done; 220 } 221 src_srng_desc = hal_srng_src_get_next(hal_soc, wbm_rel_srng); 222 if (qdf_likely(src_srng_desc)) { 223 /* Return link descriptor through WBM ring (SW2WBM)*/ 224 hal_rx_msdu_link_desc_set(hal_soc, 225 src_srng_desc, link_desc_addr, bm_action); 226 status = QDF_STATUS_SUCCESS; 227 } else { 228 struct hal_srng *srng = (struct hal_srng *)wbm_rel_srng; 229 230 DP_STATS_INC(soc, rx.err.hal_ring_access_full_fail, 1); 231 232 dp_info_rl("WBM Release Ring (Id %d) Full(Fail CNT %u)", 233 srng->ring_id, 234 soc->stats.rx.err.hal_ring_access_full_fail); 235 dp_info_rl("HP 0x%x Reap HP 0x%x TP 0x%x Cached TP 0x%x", 236 *srng->u.src_ring.hp_addr, 237 srng->u.src_ring.reap_hp, 238 *srng->u.src_ring.tp_addr, 239 srng->u.src_ring.cached_tp); 240 QDF_BUG(0); 241 } 242 done: 243 hal_srng_access_end(hal_soc, wbm_rel_srng); 244 return status; 245 246 } 247 248 qdf_export_symbol(dp_rx_link_desc_return_by_addr); 249 250 QDF_STATUS 251 dp_rx_link_desc_return(struct dp_soc *soc, hal_ring_desc_t ring_desc, 252 uint8_t bm_action) 253 { 254 void *buf_addr_info = HAL_RX_REO_BUF_ADDR_INFO_GET(ring_desc); 255 256 return dp_rx_link_desc_return_by_addr(soc, buf_addr_info, bm_action); 257 } 258 259 #ifndef QCA_HOST_MODE_WIFI_DISABLED 260 261 /** 262 * dp_rx_msdus_drop() - Drops all MSDU's per MPDU 263 * 264 * @soc: core txrx main context 265 * @ring_desc: opaque pointer to the REO error ring descriptor 266 * @mpdu_desc_info: MPDU descriptor information from ring descriptor 267 * @mac_id: mac ID 268 * @quota: No. of units (packets) that can be serviced in one shot. 269 * 270 * This function is used to drop all MSDU in an MPDU 271 * 272 * Return: uint32_t: No. of elements processed 273 */ 274 static uint32_t 275 dp_rx_msdus_drop(struct dp_soc *soc, hal_ring_desc_t ring_desc, 276 struct hal_rx_mpdu_desc_info *mpdu_desc_info, 277 uint8_t *mac_id, 278 uint32_t quota) 279 { 280 uint32_t rx_bufs_used = 0; 281 void *link_desc_va; 282 struct hal_buf_info buf_info; 283 struct dp_pdev *pdev; 284 struct hal_rx_msdu_list msdu_list; /* MSDU's per MPDU */ 285 int i; 286 uint8_t *rx_tlv_hdr; 287 uint32_t tid; 288 struct rx_desc_pool *rx_desc_pool; 289 struct dp_rx_desc *rx_desc; 290 /* First field in REO Dst ring Desc is buffer_addr_info */ 291 void *buf_addr_info = ring_desc; 292 struct buffer_addr_info cur_link_desc_addr_info = { 0 }; 293 struct buffer_addr_info next_link_desc_addr_info = { 0 }; 294 295 hal_rx_reo_buf_paddr_get(soc->hal_soc, ring_desc, &buf_info); 296 297 /* buffer_addr_info is the first element of ring_desc */ 298 hal_rx_buf_cookie_rbm_get(soc->hal_soc, 299 (uint32_t *)ring_desc, 300 &buf_info); 301 302 link_desc_va = dp_rx_cookie_2_link_desc_va(soc, &buf_info); 303 if (!link_desc_va) { 304 dp_rx_err_debug("link desc va is null, soc %pk", soc); 305 return rx_bufs_used; 306 } 307 308 more_msdu_link_desc: 309 /* No UNMAP required -- this is "malloc_consistent" memory */ 310 hal_rx_msdu_list_get(soc->hal_soc, link_desc_va, &msdu_list, 311 &mpdu_desc_info->msdu_count); 312 313 for (i = 0; (i < mpdu_desc_info->msdu_count); i++) { 314 rx_desc = soc->arch_ops.dp_rx_desc_cookie_2_va( 315 soc, msdu_list.sw_cookie[i]); 316 317 qdf_assert_always(rx_desc); 318 319 /* all buffers from a MSDU link link belong to same pdev */ 320 *mac_id = rx_desc->pool_id; 321 pdev = dp_get_pdev_for_lmac_id(soc, rx_desc->pool_id); 322 if (!pdev) { 323 dp_rx_err_debug("%pK: pdev is null for pool_id = %d", 324 soc, rx_desc->pool_id); 325 return rx_bufs_used; 326 } 327 328 if (!dp_rx_desc_check_magic(rx_desc)) { 329 dp_rx_err_err("%pK: Invalid rx_desc cookie=%d", 330 soc, msdu_list.sw_cookie[i]); 331 return rx_bufs_used; 332 } 333 334 rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id]; 335 dp_ipa_rx_buf_smmu_mapping_lock(soc); 336 dp_rx_nbuf_unmap_pool(soc, rx_desc_pool, rx_desc->nbuf); 337 rx_desc->unmapped = 1; 338 dp_ipa_rx_buf_smmu_mapping_unlock(soc); 339 340 rx_desc->rx_buf_start = qdf_nbuf_data(rx_desc->nbuf); 341 342 rx_bufs_used++; 343 tid = hal_rx_mpdu_start_tid_get(soc->hal_soc, 344 rx_desc->rx_buf_start); 345 dp_rx_err_err("%pK: Packet received with PN error for tid :%d", 346 soc, tid); 347 348 rx_tlv_hdr = qdf_nbuf_data(rx_desc->nbuf); 349 if (hal_rx_encryption_info_valid(soc->hal_soc, rx_tlv_hdr)) 350 hal_rx_print_pn(soc->hal_soc, rx_tlv_hdr); 351 352 dp_rx_err_send_pktlog(soc, pdev, mpdu_desc_info, 353 rx_desc->nbuf, 354 QDF_TX_RX_STATUS_DROP, true); 355 /* Just free the buffers */ 356 dp_rx_buffer_pool_nbuf_free(soc, rx_desc->nbuf, *mac_id); 357 358 dp_rx_add_to_free_desc_list(&pdev->free_list_head, 359 &pdev->free_list_tail, rx_desc); 360 } 361 362 /* 363 * If the msdu's are spread across multiple link-descriptors, 364 * we cannot depend solely on the msdu_count(e.g., if msdu is 365 * spread across multiple buffers).Hence, it is 366 * necessary to check the next link_descriptor and release 367 * all the msdu's that are part of it. 368 */ 369 hal_rx_get_next_msdu_link_desc_buf_addr_info( 370 link_desc_va, 371 &next_link_desc_addr_info); 372 373 if (hal_rx_is_buf_addr_info_valid( 374 &next_link_desc_addr_info)) { 375 /* Clear the next link desc info for the current link_desc */ 376 hal_rx_clear_next_msdu_link_desc_buf_addr_info(link_desc_va); 377 378 dp_rx_link_desc_return_by_addr(soc, buf_addr_info, 379 HAL_BM_ACTION_PUT_IN_IDLE_LIST); 380 hal_rx_buffer_addr_info_get_paddr( 381 &next_link_desc_addr_info, 382 &buf_info); 383 /* buffer_addr_info is the first element of ring_desc */ 384 hal_rx_buf_cookie_rbm_get(soc->hal_soc, 385 (uint32_t *)&next_link_desc_addr_info, 386 &buf_info); 387 cur_link_desc_addr_info = next_link_desc_addr_info; 388 buf_addr_info = &cur_link_desc_addr_info; 389 390 link_desc_va = 391 dp_rx_cookie_2_link_desc_va(soc, &buf_info); 392 393 goto more_msdu_link_desc; 394 } 395 quota--; 396 dp_rx_link_desc_return_by_addr(soc, buf_addr_info, 397 HAL_BM_ACTION_PUT_IN_IDLE_LIST); 398 return rx_bufs_used; 399 } 400 401 /** 402 * dp_rx_pn_error_handle() - Handles PN check errors 403 * 404 * @soc: core txrx main context 405 * @ring_desc: opaque pointer to the REO error ring descriptor 406 * @mpdu_desc_info: MPDU descriptor information from ring descriptor 407 * @mac_id: mac ID 408 * @quota: No. of units (packets) that can be serviced in one shot. 409 * 410 * This function implements PN error handling 411 * If the peer is configured to ignore the PN check errors 412 * or if DP feels, that this frame is still OK, the frame can be 413 * re-injected back to REO to use some of the other features 414 * of REO e.g. duplicate detection/routing to other cores 415 * 416 * Return: uint32_t: No. of elements processed 417 */ 418 static uint32_t 419 dp_rx_pn_error_handle(struct dp_soc *soc, hal_ring_desc_t ring_desc, 420 struct hal_rx_mpdu_desc_info *mpdu_desc_info, 421 uint8_t *mac_id, 422 uint32_t quota) 423 { 424 uint16_t peer_id; 425 uint32_t rx_bufs_used = 0; 426 struct dp_txrx_peer *txrx_peer; 427 bool peer_pn_policy = false; 428 dp_txrx_ref_handle txrx_ref_handle = NULL; 429 430 peer_id = dp_rx_peer_metadata_peer_id_get(soc, 431 mpdu_desc_info->peer_meta_data); 432 433 434 txrx_peer = dp_tgt_txrx_peer_get_ref_by_id(soc, peer_id, 435 &txrx_ref_handle, 436 DP_MOD_ID_RX_ERR); 437 438 if (qdf_likely(txrx_peer)) { 439 /* 440 * TODO: Check for peer specific policies & set peer_pn_policy 441 */ 442 dp_err_rl("discard rx due to PN error for peer %pK", 443 txrx_peer); 444 445 dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_RX_ERR); 446 } 447 dp_rx_err_err("%pK: Packet received with PN error", soc); 448 449 /* No peer PN policy -- definitely drop */ 450 if (!peer_pn_policy) 451 rx_bufs_used = dp_rx_msdus_drop(soc, ring_desc, 452 mpdu_desc_info, 453 mac_id, quota); 454 455 return rx_bufs_used; 456 } 457 458 #ifdef DP_RX_DELIVER_ALL_OOR_FRAMES 459 /** 460 * dp_rx_deliver_oor_frame() - deliver OOR frames to stack 461 * @soc: Datapath soc handler 462 * @txrx_peer: pointer to DP peer 463 * @nbuf: pointer to the skb of RX frame 464 * @frame_mask: the mask for special frame needed 465 * @rx_tlv_hdr: start of rx tlv header 466 * 467 * note: Msdu_len must have been stored in QDF_NBUF_CB_RX_PKT_LEN(nbuf) and 468 * single nbuf is expected. 469 * 470 * return: true - nbuf has been delivered to stack, false - not. 471 */ 472 static bool 473 dp_rx_deliver_oor_frame(struct dp_soc *soc, 474 struct dp_txrx_peer *txrx_peer, 475 qdf_nbuf_t nbuf, uint32_t frame_mask, 476 uint8_t *rx_tlv_hdr) 477 { 478 uint32_t l2_hdr_offset = 0; 479 uint16_t msdu_len = 0; 480 uint32_t skip_len; 481 482 l2_hdr_offset = 483 hal_rx_msdu_end_l3_hdr_padding_get(soc->hal_soc, rx_tlv_hdr); 484 485 if (qdf_unlikely(qdf_nbuf_is_frag(nbuf))) { 486 skip_len = l2_hdr_offset; 487 } else { 488 msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf); 489 skip_len = l2_hdr_offset + soc->rx_pkt_tlv_size; 490 qdf_nbuf_set_pktlen(nbuf, msdu_len + skip_len); 491 } 492 493 QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(nbuf) = 1; 494 dp_rx_set_hdr_pad(nbuf, l2_hdr_offset); 495 qdf_nbuf_pull_head(nbuf, skip_len); 496 qdf_nbuf_set_exc_frame(nbuf, 1); 497 498 dp_info_rl("OOR frame, mpdu sn 0x%x", 499 hal_rx_get_rx_sequence(soc->hal_soc, rx_tlv_hdr)); 500 dp_rx_deliver_to_stack(soc, txrx_peer->vdev, txrx_peer, nbuf, NULL); 501 return true; 502 } 503 504 #else 505 static bool 506 dp_rx_deliver_oor_frame(struct dp_soc *soc, 507 struct dp_txrx_peer *txrx_peer, 508 qdf_nbuf_t nbuf, uint32_t frame_mask, 509 uint8_t *rx_tlv_hdr) 510 { 511 return dp_rx_deliver_special_frame(soc, txrx_peer, nbuf, frame_mask, 512 rx_tlv_hdr); 513 } 514 #endif 515 516 /** 517 * dp_rx_oor_handle() - Handles the msdu which is OOR error 518 * 519 * @soc: core txrx main context 520 * @nbuf: pointer to msdu skb 521 * @peer_id: dp peer ID 522 * @rx_tlv_hdr: start of rx tlv header 523 * 524 * This function process the msdu delivered from REO2TCL 525 * ring with error type OOR 526 * 527 * Return: None 528 */ 529 static void 530 dp_rx_oor_handle(struct dp_soc *soc, 531 qdf_nbuf_t nbuf, 532 uint16_t peer_id, 533 uint8_t *rx_tlv_hdr) 534 { 535 uint32_t frame_mask = wlan_cfg_get_special_frame_cfg(soc->wlan_cfg_ctx); 536 537 struct dp_txrx_peer *txrx_peer = NULL; 538 dp_txrx_ref_handle txrx_ref_handle = NULL; 539 540 txrx_peer = dp_tgt_txrx_peer_get_ref_by_id(soc, peer_id, 541 &txrx_ref_handle, 542 DP_MOD_ID_RX_ERR); 543 if (!txrx_peer) { 544 dp_info_rl("peer not found"); 545 goto free_nbuf; 546 } 547 548 if (dp_rx_deliver_oor_frame(soc, txrx_peer, nbuf, frame_mask, 549 rx_tlv_hdr)) { 550 DP_STATS_INC(soc, rx.err.reo_err_oor_to_stack, 1); 551 dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_RX_ERR); 552 return; 553 } 554 555 free_nbuf: 556 if (txrx_peer) 557 dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_RX_ERR); 558 559 DP_STATS_INC(soc, rx.err.reo_err_oor_drop, 1); 560 dp_rx_nbuf_free(nbuf); 561 } 562 563 /** 564 * dp_rx_err_nbuf_pn_check() - Check if the PN number of this current packet 565 * is a monotonous increment of packet number 566 * from the previous successfully re-ordered 567 * frame. 568 * @soc: Datapath SOC handle 569 * @ring_desc: REO ring descriptor 570 * @nbuf: Current packet 571 * 572 * Return: QDF_STATUS_SUCCESS, if the pn check passes, else QDF_STATUS_E_FAILURE 573 */ 574 static inline QDF_STATUS 575 dp_rx_err_nbuf_pn_check(struct dp_soc *soc, hal_ring_desc_t ring_desc, 576 qdf_nbuf_t nbuf) 577 { 578 uint64_t prev_pn, curr_pn[2]; 579 580 if (!hal_rx_encryption_info_valid(soc->hal_soc, qdf_nbuf_data(nbuf))) 581 return QDF_STATUS_SUCCESS; 582 583 hal_rx_reo_prev_pn_get(soc->hal_soc, ring_desc, &prev_pn); 584 hal_rx_tlv_get_pn_num(soc->hal_soc, qdf_nbuf_data(nbuf), curr_pn); 585 586 if (curr_pn[0] > prev_pn) 587 return QDF_STATUS_SUCCESS; 588 589 return QDF_STATUS_E_FAILURE; 590 } 591 592 #ifdef WLAN_SKIP_BAR_UPDATE 593 static 594 void dp_rx_err_handle_bar(struct dp_soc *soc, 595 struct dp_peer *peer, 596 qdf_nbuf_t nbuf) 597 { 598 dp_info_rl("BAR update to H.W is skipped"); 599 DP_STATS_INC(soc, rx.err.bar_handle_fail_count, 1); 600 } 601 #else 602 static 603 void dp_rx_err_handle_bar(struct dp_soc *soc, 604 struct dp_peer *peer, 605 qdf_nbuf_t nbuf) 606 { 607 uint8_t *rx_tlv_hdr; 608 unsigned char type, subtype; 609 uint16_t start_seq_num; 610 uint32_t tid; 611 QDF_STATUS status; 612 struct ieee80211_frame_bar *bar; 613 614 /* 615 * 1. Is this a BAR frame. If not Discard it. 616 * 2. If it is, get the peer id, tid, ssn 617 * 2a Do a tid update 618 */ 619 620 rx_tlv_hdr = qdf_nbuf_data(nbuf); 621 bar = (struct ieee80211_frame_bar *)(rx_tlv_hdr + soc->rx_pkt_tlv_size); 622 623 type = bar->i_fc[0] & IEEE80211_FC0_TYPE_MASK; 624 subtype = bar->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 625 626 if (!(type == IEEE80211_FC0_TYPE_CTL && 627 subtype == QDF_IEEE80211_FC0_SUBTYPE_BAR)) { 628 dp_err_rl("Not a BAR frame!"); 629 return; 630 } 631 632 tid = hal_rx_mpdu_start_tid_get(soc->hal_soc, rx_tlv_hdr); 633 qdf_assert_always(tid < DP_MAX_TIDS); 634 635 start_seq_num = le16toh(bar->i_seq) >> IEEE80211_SEQ_SEQ_SHIFT; 636 637 dp_info_rl("tid %u window_size %u start_seq_num %u", 638 tid, peer->rx_tid[tid].ba_win_size, start_seq_num); 639 640 status = dp_rx_tid_update_wifi3(peer, tid, 641 peer->rx_tid[tid].ba_win_size, 642 start_seq_num, 643 true); 644 if (status != QDF_STATUS_SUCCESS) { 645 dp_err_rl("failed to handle bar frame update rx tid"); 646 DP_STATS_INC(soc, rx.err.bar_handle_fail_count, 1); 647 } else { 648 DP_STATS_INC(soc, rx.err.ssn_update_count, 1); 649 } 650 } 651 #endif 652 653 /** 654 * _dp_rx_bar_frame_handle(): Core of the BAR frame handling 655 * @soc: Datapath SoC handle 656 * @nbuf: packet being processed 657 * @mpdu_desc_info: mpdu desc info for the current packet 658 * @tid: tid on which the packet arrived 659 * @err_status: Flag to indicate if REO encountered an error while routing this 660 * frame 661 * @error_code: REO error code 662 * 663 * Return: None 664 */ 665 static void 666 _dp_rx_bar_frame_handle(struct dp_soc *soc, qdf_nbuf_t nbuf, 667 struct hal_rx_mpdu_desc_info *mpdu_desc_info, 668 uint32_t tid, uint8_t err_status, uint32_t error_code) 669 { 670 uint16_t peer_id; 671 struct dp_peer *peer; 672 673 peer_id = dp_rx_peer_metadata_peer_id_get(soc, 674 mpdu_desc_info->peer_meta_data); 675 peer = dp_peer_get_tgt_peer_by_id(soc, peer_id, DP_MOD_ID_RX_ERR); 676 if (!peer) 677 return; 678 679 dp_info_rl("BAR frame: " 680 " peer_id = %d" 681 " tid = %u" 682 " SSN = %d" 683 " error status = %d", 684 peer->peer_id, 685 tid, 686 mpdu_desc_info->mpdu_seq, 687 err_status); 688 689 if (err_status == HAL_REO_ERROR_DETECTED) { 690 switch (error_code) { 691 case HAL_REO_ERR_BAR_FRAME_2K_JUMP: 692 case HAL_REO_ERR_BAR_FRAME_OOR: 693 dp_rx_err_handle_bar(soc, peer, nbuf); 694 DP_STATS_INC(soc, rx.err.reo_error[error_code], 1); 695 break; 696 default: 697 DP_STATS_INC(soc, rx.bar_frame, 1); 698 } 699 } 700 701 dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR); 702 } 703 704 /** 705 * dp_rx_bar_frame_handle() - Function to handle err BAR frames 706 * @soc: core DP main context 707 * @ring_desc: Hal ring desc 708 * @rx_desc: dp rx desc 709 * @mpdu_desc_info: mpdu desc info 710 * @err_status: error status 711 * @err_code: error code 712 * 713 * Handle the error BAR frames received. Ensure the SOC level 714 * stats are updated based on the REO error code. The BAR frames 715 * are further processed by updating the Rx tids with the start 716 * sequence number (SSN) and BA window size. Desc is returned 717 * to the free desc list 718 * 719 * Return: none 720 */ 721 static void 722 dp_rx_bar_frame_handle(struct dp_soc *soc, 723 hal_ring_desc_t ring_desc, 724 struct dp_rx_desc *rx_desc, 725 struct hal_rx_mpdu_desc_info *mpdu_desc_info, 726 uint8_t err_status, 727 uint32_t err_code) 728 { 729 qdf_nbuf_t nbuf; 730 struct dp_pdev *pdev; 731 struct rx_desc_pool *rx_desc_pool; 732 uint8_t *rx_tlv_hdr; 733 uint32_t tid; 734 735 nbuf = rx_desc->nbuf; 736 rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id]; 737 dp_ipa_rx_buf_smmu_mapping_lock(soc); 738 dp_rx_nbuf_unmap_pool(soc, rx_desc_pool, nbuf); 739 rx_desc->unmapped = 1; 740 dp_ipa_rx_buf_smmu_mapping_unlock(soc); 741 rx_tlv_hdr = qdf_nbuf_data(nbuf); 742 tid = hal_rx_mpdu_start_tid_get(soc->hal_soc, 743 rx_tlv_hdr); 744 pdev = dp_get_pdev_for_lmac_id(soc, rx_desc->pool_id); 745 746 if (!pdev) { 747 dp_rx_err_debug("%pK: pdev is null for pool_id = %d", 748 soc, rx_desc->pool_id); 749 return; 750 } 751 752 _dp_rx_bar_frame_handle(soc, nbuf, mpdu_desc_info, tid, err_status, 753 err_code); 754 dp_rx_err_send_pktlog(soc, pdev, mpdu_desc_info, nbuf, 755 QDF_TX_RX_STATUS_DROP, true); 756 dp_rx_link_desc_return(soc, ring_desc, 757 HAL_BM_ACTION_PUT_IN_IDLE_LIST); 758 dp_rx_buffer_pool_nbuf_free(soc, rx_desc->nbuf, 759 rx_desc->pool_id); 760 dp_rx_add_to_free_desc_list(&pdev->free_list_head, 761 &pdev->free_list_tail, 762 rx_desc); 763 } 764 765 #endif /* QCA_HOST_MODE_WIFI_DISABLED */ 766 767 void dp_2k_jump_handle(struct dp_soc *soc, qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr, 768 uint16_t peer_id, uint8_t tid) 769 { 770 struct dp_peer *peer = NULL; 771 struct dp_rx_tid *rx_tid = NULL; 772 struct dp_txrx_peer *txrx_peer; 773 uint32_t frame_mask = FRAME_MASK_IPV4_ARP; 774 775 peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_RX_ERR); 776 if (!peer) { 777 dp_rx_err_info_rl("%pK: peer not found", soc); 778 goto free_nbuf; 779 } 780 781 txrx_peer = dp_get_txrx_peer(peer); 782 if (!txrx_peer) { 783 dp_rx_err_info_rl("%pK: txrx_peer not found", soc); 784 goto free_nbuf; 785 } 786 787 if (tid >= DP_MAX_TIDS) { 788 dp_info_rl("invalid tid"); 789 goto nbuf_deliver; 790 } 791 792 rx_tid = &peer->rx_tid[tid]; 793 qdf_spin_lock_bh(&rx_tid->tid_lock); 794 795 /* only if BA session is active, allow send Delba */ 796 if (rx_tid->ba_status != DP_RX_BA_ACTIVE) { 797 qdf_spin_unlock_bh(&rx_tid->tid_lock); 798 goto nbuf_deliver; 799 } 800 801 if (!rx_tid->delba_tx_status) { 802 rx_tid->delba_tx_retry++; 803 rx_tid->delba_tx_status = 1; 804 rx_tid->delba_rcode = 805 IEEE80211_REASON_QOS_SETUP_REQUIRED; 806 qdf_spin_unlock_bh(&rx_tid->tid_lock); 807 if (soc->cdp_soc.ol_ops->send_delba) { 808 DP_STATS_INC(soc, rx.err.rx_2k_jump_delba_sent, 809 1); 810 soc->cdp_soc.ol_ops->send_delba( 811 peer->vdev->pdev->soc->ctrl_psoc, 812 peer->vdev->vdev_id, 813 peer->mac_addr.raw, 814 tid, 815 rx_tid->delba_rcode, 816 CDP_DELBA_2K_JUMP); 817 } 818 } else { 819 qdf_spin_unlock_bh(&rx_tid->tid_lock); 820 } 821 822 nbuf_deliver: 823 if (dp_rx_deliver_special_frame(soc, txrx_peer, nbuf, frame_mask, 824 rx_tlv_hdr)) { 825 DP_STATS_INC(soc, rx.err.rx_2k_jump_to_stack, 1); 826 dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR); 827 return; 828 } 829 830 free_nbuf: 831 if (peer) 832 dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR); 833 DP_STATS_INC(soc, rx.err.rx_2k_jump_drop, 1); 834 dp_rx_nbuf_free(nbuf); 835 } 836 837 #if defined(QCA_WIFI_QCA6390) || defined(QCA_WIFI_QCA6490) || \ 838 defined(QCA_WIFI_QCA6750) || defined(QCA_WIFI_KIWI) 839 bool 840 dp_rx_null_q_handle_invalid_peer_id_exception(struct dp_soc *soc, 841 uint8_t pool_id, 842 uint8_t *rx_tlv_hdr, 843 qdf_nbuf_t nbuf) 844 { 845 struct dp_peer *peer = NULL; 846 uint8_t *rx_pkt_hdr = hal_rx_pkt_hdr_get(soc->hal_soc, rx_tlv_hdr); 847 struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, pool_id); 848 struct ieee80211_frame *wh = (struct ieee80211_frame *)rx_pkt_hdr; 849 850 if (!pdev) { 851 dp_rx_err_debug("%pK: pdev is null for pool_id = %d", 852 soc, pool_id); 853 return false; 854 } 855 /* 856 * WAR- In certain types of packets if peer_id is not correct then 857 * driver may not be able find. Try finding peer by addr_2 of 858 * received MPDU 859 */ 860 if (wh) 861 peer = dp_peer_find_hash_find(soc, wh->i_addr2, 0, 862 DP_VDEV_ALL, DP_MOD_ID_RX_ERR); 863 if (peer) { 864 dp_verbose_debug("MPDU sw_peer_id & ast_idx is corrupted"); 865 hal_rx_dump_pkt_tlvs(soc->hal_soc, rx_tlv_hdr, 866 QDF_TRACE_LEVEL_DEBUG); 867 DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer_id, 868 1, qdf_nbuf_len(nbuf)); 869 dp_rx_nbuf_free(nbuf); 870 871 dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR); 872 return true; 873 } 874 return false; 875 } 876 #else 877 bool 878 dp_rx_null_q_handle_invalid_peer_id_exception(struct dp_soc *soc, 879 uint8_t pool_id, 880 uint8_t *rx_tlv_hdr, 881 qdf_nbuf_t nbuf) 882 { 883 return false; 884 } 885 #endif 886 887 bool dp_rx_check_pkt_len(struct dp_soc *soc, uint32_t pkt_len) 888 { 889 uint16_t buf_size; 890 891 buf_size = wlan_cfg_rx_buffer_size(soc->wlan_cfg_ctx); 892 893 if (qdf_unlikely(pkt_len > buf_size)) { 894 DP_STATS_INC_PKT(soc, rx.err.rx_invalid_pkt_len, 895 1, pkt_len); 896 return true; 897 } else { 898 return false; 899 } 900 } 901 902 #ifdef QCA_SUPPORT_EAPOL_OVER_CONTROL_PORT 903 void 904 dp_rx_deliver_to_osif_stack(struct dp_soc *soc, 905 struct dp_vdev *vdev, 906 struct dp_txrx_peer *txrx_peer, 907 qdf_nbuf_t nbuf, 908 qdf_nbuf_t tail, 909 bool is_eapol) 910 { 911 if (is_eapol && soc->eapol_over_control_port) 912 dp_rx_eapol_deliver_to_stack(soc, vdev, txrx_peer, nbuf, NULL); 913 else 914 dp_rx_deliver_to_stack(soc, vdev, txrx_peer, nbuf, NULL); 915 } 916 #else 917 void 918 dp_rx_deliver_to_osif_stack(struct dp_soc *soc, 919 struct dp_vdev *vdev, 920 struct dp_txrx_peer *txrx_peer, 921 qdf_nbuf_t nbuf, 922 qdf_nbuf_t tail, 923 bool is_eapol) 924 { 925 dp_rx_deliver_to_stack(soc, vdev, txrx_peer, nbuf, NULL); 926 } 927 #endif 928 929 #ifdef WLAN_FEATURE_11BE_MLO 930 int dp_rx_err_match_dhost(qdf_ether_header_t *eh, struct dp_vdev *vdev) 931 { 932 return ((qdf_mem_cmp(eh->ether_dhost, &vdev->mac_addr.raw[0], 933 QDF_MAC_ADDR_SIZE) == 0) || 934 (qdf_mem_cmp(eh->ether_dhost, &vdev->mld_mac_addr.raw[0], 935 QDF_MAC_ADDR_SIZE) == 0)); 936 } 937 938 #else 939 int dp_rx_err_match_dhost(qdf_ether_header_t *eh, struct dp_vdev *vdev) 940 { 941 return (qdf_mem_cmp(eh->ether_dhost, &vdev->mac_addr.raw[0], 942 QDF_MAC_ADDR_SIZE) == 0); 943 } 944 #endif 945 946 #ifndef QCA_HOST_MODE_WIFI_DISABLED 947 948 bool 949 dp_rx_err_drop_3addr_mcast(struct dp_vdev *vdev, uint8_t *rx_tlv_hdr) 950 { 951 struct dp_soc *soc = vdev->pdev->soc; 952 953 if (!vdev->drop_3addr_mcast) 954 return false; 955 956 if (vdev->opmode != wlan_op_mode_sta) 957 return false; 958 959 if (hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc, rx_tlv_hdr)) 960 return true; 961 962 return false; 963 } 964 965 /** 966 * dp_rx_err_is_pn_check_needed() - Check if the packet number check is needed 967 * for this frame received in REO error ring. 968 * @soc: Datapath SOC handle 969 * @error: REO error detected or not 970 * @error_code: Error code in case of REO error 971 * 972 * Return: true if pn check if needed in software, 973 * false, if pn check if not needed. 974 */ 975 static inline bool 976 dp_rx_err_is_pn_check_needed(struct dp_soc *soc, uint8_t error, 977 uint32_t error_code) 978 { 979 return (soc->features.pn_in_reo_dest && 980 (error == HAL_REO_ERROR_DETECTED && 981 (hal_rx_reo_is_2k_jump(error_code) || 982 hal_rx_reo_is_oor_error(error_code) || 983 hal_rx_reo_is_bar_oor_2k_jump(error_code)))); 984 } 985 986 #ifdef DP_WAR_INVALID_FIRST_MSDU_FLAG 987 static inline void 988 dp_rx_err_populate_mpdu_desc_info(struct dp_soc *soc, qdf_nbuf_t nbuf, 989 struct hal_rx_mpdu_desc_info *mpdu_desc_info, 990 bool first_msdu_in_mpdu_processed) 991 { 992 if (first_msdu_in_mpdu_processed) { 993 /* 994 * This is the 2nd indication of first_msdu in the same mpdu. 995 * Skip re-parsing the mdpu_desc_info and use the cached one, 996 * since this msdu is most probably from the current mpdu 997 * which is being processed 998 */ 999 } else { 1000 hal_rx_tlv_populate_mpdu_desc_info(soc->hal_soc, 1001 qdf_nbuf_data(nbuf), 1002 mpdu_desc_info); 1003 } 1004 } 1005 #else 1006 static inline void 1007 dp_rx_err_populate_mpdu_desc_info(struct dp_soc *soc, qdf_nbuf_t nbuf, 1008 struct hal_rx_mpdu_desc_info *mpdu_desc_info, 1009 bool first_msdu_in_mpdu_processed) 1010 { 1011 hal_rx_tlv_populate_mpdu_desc_info(soc->hal_soc, qdf_nbuf_data(nbuf), 1012 mpdu_desc_info); 1013 } 1014 #endif 1015 1016 /** 1017 * dp_rx_reo_err_entry_process() - Handles for REO error entry processing 1018 * 1019 * @soc: core txrx main context 1020 * @ring_desc: opaque pointer to the REO error ring descriptor 1021 * @mpdu_desc_info: pointer to mpdu level description info 1022 * @link_desc_va: pointer to msdu_link_desc virtual address 1023 * @err_code: reo error code fetched from ring entry 1024 * 1025 * Function to handle msdus fetched from msdu link desc, currently 1026 * support REO error NULL queue, 2K jump, OOR. 1027 * 1028 * Return: msdu count processed 1029 */ 1030 static uint32_t 1031 dp_rx_reo_err_entry_process(struct dp_soc *soc, 1032 void *ring_desc, 1033 struct hal_rx_mpdu_desc_info *mpdu_desc_info, 1034 void *link_desc_va, 1035 enum hal_reo_error_code err_code) 1036 { 1037 uint32_t rx_bufs_used = 0; 1038 struct dp_pdev *pdev; 1039 int i; 1040 uint8_t *rx_tlv_hdr_first; 1041 uint8_t *rx_tlv_hdr_last; 1042 uint32_t tid = DP_MAX_TIDS; 1043 uint16_t peer_id; 1044 struct dp_rx_desc *rx_desc; 1045 struct rx_desc_pool *rx_desc_pool; 1046 qdf_nbuf_t nbuf; 1047 qdf_nbuf_t next_nbuf; 1048 struct hal_buf_info buf_info; 1049 struct hal_rx_msdu_list msdu_list; 1050 uint16_t num_msdus; 1051 struct buffer_addr_info cur_link_desc_addr_info = { 0 }; 1052 struct buffer_addr_info next_link_desc_addr_info = { 0 }; 1053 /* First field in REO Dst ring Desc is buffer_addr_info */ 1054 void *buf_addr_info = ring_desc; 1055 qdf_nbuf_t head_nbuf = NULL; 1056 qdf_nbuf_t tail_nbuf = NULL; 1057 uint16_t msdu_processed = 0; 1058 QDF_STATUS status; 1059 bool ret, is_pn_check_needed; 1060 uint8_t rx_desc_pool_id; 1061 struct dp_txrx_peer *txrx_peer = NULL; 1062 dp_txrx_ref_handle txrx_ref_handle = NULL; 1063 hal_ring_handle_t hal_ring_hdl = soc->reo_exception_ring.hal_srng; 1064 bool first_msdu_in_mpdu_processed = false; 1065 bool msdu_dropped = false; 1066 uint8_t link_id = 0; 1067 1068 peer_id = dp_rx_peer_metadata_peer_id_get(soc, 1069 mpdu_desc_info->peer_meta_data); 1070 is_pn_check_needed = dp_rx_err_is_pn_check_needed(soc, 1071 HAL_REO_ERROR_DETECTED, 1072 err_code); 1073 more_msdu_link_desc: 1074 hal_rx_msdu_list_get(soc->hal_soc, link_desc_va, &msdu_list, 1075 &num_msdus); 1076 for (i = 0; i < num_msdus; i++) { 1077 rx_desc = soc->arch_ops.dp_rx_desc_cookie_2_va( 1078 soc, 1079 msdu_list.sw_cookie[i]); 1080 1081 if (dp_assert_always_internal_stat(rx_desc, soc, 1082 rx.err.reo_err_rx_desc_null)) 1083 continue; 1084 1085 nbuf = rx_desc->nbuf; 1086 1087 /* 1088 * this is a unlikely scenario where the host is reaping 1089 * a descriptor which it already reaped just a while ago 1090 * but is yet to replenish it back to HW. 1091 * In this case host will dump the last 128 descriptors 1092 * including the software descriptor rx_desc and assert. 1093 */ 1094 if (qdf_unlikely(!rx_desc->in_use) || 1095 qdf_unlikely(!nbuf)) { 1096 DP_STATS_INC(soc, rx.err.hal_reo_dest_dup, 1); 1097 dp_info_rl("Reaping rx_desc not in use!"); 1098 dp_rx_dump_info_and_assert(soc, hal_ring_hdl, 1099 ring_desc, rx_desc); 1100 /* ignore duplicate RX desc and continue to process */ 1101 /* Pop out the descriptor */ 1102 msdu_dropped = true; 1103 continue; 1104 } 1105 1106 ret = dp_rx_desc_paddr_sanity_check(rx_desc, 1107 msdu_list.paddr[i]); 1108 if (!ret) { 1109 DP_STATS_INC(soc, rx.err.nbuf_sanity_fail, 1); 1110 rx_desc->in_err_state = 1; 1111 msdu_dropped = true; 1112 continue; 1113 } 1114 1115 rx_desc_pool_id = rx_desc->pool_id; 1116 /* all buffers from a MSDU link belong to same pdev */ 1117 pdev = dp_get_pdev_for_lmac_id(soc, rx_desc_pool_id); 1118 1119 rx_desc_pool = &soc->rx_desc_buf[rx_desc_pool_id]; 1120 dp_ipa_rx_buf_smmu_mapping_lock(soc); 1121 dp_rx_nbuf_unmap_pool(soc, rx_desc_pool, nbuf); 1122 rx_desc->unmapped = 1; 1123 dp_ipa_rx_buf_smmu_mapping_unlock(soc); 1124 1125 QDF_NBUF_CB_RX_PKT_LEN(nbuf) = msdu_list.msdu_info[i].msdu_len; 1126 rx_bufs_used++; 1127 dp_rx_add_to_free_desc_list(&pdev->free_list_head, 1128 &pdev->free_list_tail, rx_desc); 1129 1130 DP_RX_LIST_APPEND(head_nbuf, tail_nbuf, nbuf); 1131 1132 if (qdf_unlikely(msdu_list.msdu_info[i].msdu_flags & 1133 HAL_MSDU_F_MSDU_CONTINUATION)) { 1134 qdf_nbuf_set_rx_chfrag_cont(nbuf, 1); 1135 continue; 1136 } 1137 1138 if (dp_rx_buffer_pool_refill(soc, head_nbuf, 1139 rx_desc_pool_id)) { 1140 /* MSDU queued back to the pool */ 1141 msdu_dropped = true; 1142 head_nbuf = NULL; 1143 goto process_next_msdu; 1144 } 1145 1146 if (is_pn_check_needed) { 1147 if (msdu_list.msdu_info[i].msdu_flags & 1148 HAL_MSDU_F_FIRST_MSDU_IN_MPDU) { 1149 dp_rx_err_populate_mpdu_desc_info(soc, nbuf, 1150 mpdu_desc_info, 1151 first_msdu_in_mpdu_processed); 1152 first_msdu_in_mpdu_processed = true; 1153 } else { 1154 if (!first_msdu_in_mpdu_processed) { 1155 /* 1156 * If no msdu in this mpdu was dropped 1157 * due to failed sanity checks, then 1158 * its not expected to hit this 1159 * condition. Hence we assert here. 1160 */ 1161 if (!msdu_dropped) 1162 qdf_assert_always(0); 1163 1164 /* 1165 * We do not have valid mpdu_desc_info 1166 * to process this nbuf, hence drop it. 1167 * TODO - Increment stats 1168 */ 1169 goto process_next_msdu; 1170 } 1171 /* 1172 * DO NOTHING - 1173 * Continue using the same mpdu_desc_info 1174 * details populated from the first msdu in 1175 * the mpdu. 1176 */ 1177 } 1178 1179 status = dp_rx_err_nbuf_pn_check(soc, ring_desc, nbuf); 1180 if (QDF_IS_STATUS_ERROR(status)) { 1181 DP_STATS_INC(soc, rx.err.pn_in_dest_check_fail, 1182 1); 1183 goto process_next_msdu; 1184 } 1185 1186 peer_id = dp_rx_peer_metadata_peer_id_get(soc, 1187 mpdu_desc_info->peer_meta_data); 1188 1189 if (mpdu_desc_info->bar_frame) 1190 _dp_rx_bar_frame_handle(soc, nbuf, 1191 mpdu_desc_info, tid, 1192 HAL_REO_ERROR_DETECTED, 1193 err_code); 1194 } 1195 1196 rx_tlv_hdr_first = qdf_nbuf_data(head_nbuf); 1197 rx_tlv_hdr_last = qdf_nbuf_data(tail_nbuf); 1198 1199 if (qdf_unlikely(head_nbuf != tail_nbuf)) { 1200 /* 1201 * For SG case, only the length of last skb is valid 1202 * as HW only populate the msdu_len for last msdu 1203 * in rx link descriptor, use the length from 1204 * last skb to overwrite the head skb for further 1205 * SG processing. 1206 */ 1207 QDF_NBUF_CB_RX_PKT_LEN(head_nbuf) = 1208 QDF_NBUF_CB_RX_PKT_LEN(tail_nbuf); 1209 nbuf = dp_rx_sg_create(soc, head_nbuf); 1210 qdf_nbuf_set_is_frag(nbuf, 1); 1211 DP_STATS_INC(soc, rx.err.reo_err_oor_sg_count, 1); 1212 } 1213 head_nbuf = NULL; 1214 1215 txrx_peer = dp_tgt_txrx_peer_get_ref_by_id( 1216 soc, peer_id, 1217 &txrx_ref_handle, 1218 DP_MOD_ID_RX_ERR); 1219 if (!txrx_peer) 1220 dp_info_rl("txrx_peer is null peer_id %u", 1221 peer_id); 1222 1223 dp_rx_nbuf_set_link_id_from_tlv(soc, qdf_nbuf_data(nbuf), nbuf); 1224 1225 if (pdev && pdev->link_peer_stats && 1226 txrx_peer && txrx_peer->is_mld_peer) { 1227 link_id = dp_rx_get_stats_arr_idx_from_link_id( 1228 nbuf, 1229 txrx_peer); 1230 } 1231 1232 if (txrx_peer) 1233 dp_rx_set_nbuf_band(nbuf, txrx_peer, link_id); 1234 1235 switch (err_code) { 1236 case HAL_REO_ERR_REGULAR_FRAME_2K_JUMP: 1237 case HAL_REO_ERR_2K_ERROR_HANDLING_FLAG_SET: 1238 case HAL_REO_ERR_BAR_FRAME_2K_JUMP: 1239 /* 1240 * only first msdu, mpdu start description tlv valid? 1241 * and use it for following msdu. 1242 */ 1243 if (hal_rx_msdu_end_first_msdu_get(soc->hal_soc, 1244 rx_tlv_hdr_last)) 1245 tid = hal_rx_mpdu_start_tid_get( 1246 soc->hal_soc, 1247 rx_tlv_hdr_first); 1248 1249 dp_2k_jump_handle(soc, nbuf, rx_tlv_hdr_last, 1250 peer_id, tid); 1251 break; 1252 case HAL_REO_ERR_REGULAR_FRAME_OOR: 1253 case HAL_REO_ERR_BAR_FRAME_OOR: 1254 dp_rx_oor_handle(soc, nbuf, peer_id, rx_tlv_hdr_last); 1255 break; 1256 case HAL_REO_ERR_QUEUE_DESC_ADDR_0: 1257 soc->arch_ops.dp_rx_null_q_desc_handle(soc, nbuf, 1258 rx_tlv_hdr_last, 1259 rx_desc_pool_id, 1260 txrx_peer, 1261 TRUE, 1262 link_id); 1263 break; 1264 default: 1265 dp_err_rl("Non-support error code %d", err_code); 1266 dp_rx_nbuf_free(nbuf); 1267 } 1268 1269 if (txrx_peer) 1270 dp_txrx_peer_unref_delete(txrx_ref_handle, 1271 DP_MOD_ID_RX_ERR); 1272 process_next_msdu: 1273 nbuf = head_nbuf; 1274 while (nbuf) { 1275 next_nbuf = qdf_nbuf_next(nbuf); 1276 dp_rx_nbuf_free(nbuf); 1277 nbuf = next_nbuf; 1278 } 1279 msdu_processed++; 1280 head_nbuf = NULL; 1281 tail_nbuf = NULL; 1282 } 1283 1284 /* 1285 * If the msdu's are spread across multiple link-descriptors, 1286 * we cannot depend solely on the msdu_count(e.g., if msdu is 1287 * spread across multiple buffers).Hence, it is 1288 * necessary to check the next link_descriptor and release 1289 * all the msdu's that are part of it. 1290 */ 1291 hal_rx_get_next_msdu_link_desc_buf_addr_info( 1292 link_desc_va, 1293 &next_link_desc_addr_info); 1294 1295 if (hal_rx_is_buf_addr_info_valid( 1296 &next_link_desc_addr_info)) { 1297 /* Clear the next link desc info for the current link_desc */ 1298 hal_rx_clear_next_msdu_link_desc_buf_addr_info(link_desc_va); 1299 dp_rx_link_desc_return_by_addr( 1300 soc, 1301 buf_addr_info, 1302 HAL_BM_ACTION_PUT_IN_IDLE_LIST); 1303 1304 hal_rx_buffer_addr_info_get_paddr( 1305 &next_link_desc_addr_info, 1306 &buf_info); 1307 /* buffer_addr_info is the first element of ring_desc */ 1308 hal_rx_buf_cookie_rbm_get(soc->hal_soc, 1309 (uint32_t *)&next_link_desc_addr_info, 1310 &buf_info); 1311 link_desc_va = 1312 dp_rx_cookie_2_link_desc_va(soc, &buf_info); 1313 cur_link_desc_addr_info = next_link_desc_addr_info; 1314 buf_addr_info = &cur_link_desc_addr_info; 1315 1316 goto more_msdu_link_desc; 1317 } 1318 1319 dp_rx_link_desc_return_by_addr(soc, buf_addr_info, 1320 HAL_BM_ACTION_PUT_IN_IDLE_LIST); 1321 if (qdf_unlikely(msdu_processed != mpdu_desc_info->msdu_count)) 1322 DP_STATS_INC(soc, rx.err.msdu_count_mismatch, 1); 1323 1324 return rx_bufs_used; 1325 } 1326 1327 #endif /* QCA_HOST_MODE_WIFI_DISABLED */ 1328 1329 void 1330 dp_rx_process_rxdma_err(struct dp_soc *soc, qdf_nbuf_t nbuf, 1331 uint8_t *rx_tlv_hdr, struct dp_txrx_peer *txrx_peer, 1332 uint8_t err_code, uint8_t mac_id, uint8_t link_id) 1333 { 1334 uint32_t pkt_len, l2_hdr_offset; 1335 uint16_t msdu_len; 1336 struct dp_vdev *vdev; 1337 qdf_ether_header_t *eh; 1338 bool is_broadcast; 1339 1340 /* 1341 * Check if DMA completed -- msdu_done is the last bit 1342 * to be written 1343 */ 1344 if (!hal_rx_attn_msdu_done_get(soc->hal_soc, rx_tlv_hdr)) { 1345 1346 dp_err_rl("MSDU DONE failure"); 1347 1348 hal_rx_dump_pkt_tlvs(soc->hal_soc, rx_tlv_hdr, 1349 QDF_TRACE_LEVEL_INFO); 1350 qdf_assert(0); 1351 } 1352 1353 l2_hdr_offset = hal_rx_msdu_end_l3_hdr_padding_get(soc->hal_soc, 1354 rx_tlv_hdr); 1355 msdu_len = hal_rx_msdu_start_msdu_len_get(soc->hal_soc, rx_tlv_hdr); 1356 pkt_len = msdu_len + l2_hdr_offset + soc->rx_pkt_tlv_size; 1357 1358 if (dp_rx_check_pkt_len(soc, pkt_len)) { 1359 /* Drop & free packet */ 1360 dp_rx_nbuf_free(nbuf); 1361 return; 1362 } 1363 /* Set length in nbuf */ 1364 qdf_nbuf_set_pktlen(nbuf, pkt_len); 1365 1366 qdf_nbuf_set_next(nbuf, NULL); 1367 1368 qdf_nbuf_set_rx_chfrag_start(nbuf, 1); 1369 qdf_nbuf_set_rx_chfrag_end(nbuf, 1); 1370 1371 if (!txrx_peer) { 1372 QDF_TRACE_ERROR_RL(QDF_MODULE_ID_DP, "txrx_peer is NULL"); 1373 DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1, 1374 qdf_nbuf_len(nbuf)); 1375 /* Trigger invalid peer handler wrapper */ 1376 dp_rx_process_invalid_peer_wrapper(soc, nbuf, true, mac_id); 1377 return; 1378 } 1379 1380 vdev = txrx_peer->vdev; 1381 if (!vdev) { 1382 dp_rx_err_info_rl("%pK: INVALID vdev %pK OR osif_rx", soc, 1383 vdev); 1384 /* Drop & free packet */ 1385 dp_rx_nbuf_free(nbuf); 1386 DP_STATS_INC(soc, rx.err.invalid_vdev, 1); 1387 return; 1388 } 1389 1390 /* 1391 * Advance the packet start pointer by total size of 1392 * pre-header TLV's 1393 */ 1394 dp_rx_skip_tlvs(soc, nbuf, l2_hdr_offset); 1395 1396 if (err_code == HAL_RXDMA_ERR_WIFI_PARSE) { 1397 uint8_t *pkt_type; 1398 1399 pkt_type = qdf_nbuf_data(nbuf) + (2 * QDF_MAC_ADDR_SIZE); 1400 if (*(uint16_t *)pkt_type == htons(QDF_ETH_TYPE_8021Q)) { 1401 if (*(uint16_t *)(pkt_type + DP_SKIP_VLAN) == 1402 htons(QDF_LLC_STP)) { 1403 DP_STATS_INC(vdev->pdev, vlan_tag_stp_cnt, 1); 1404 goto process_mesh; 1405 } else { 1406 goto process_rx; 1407 } 1408 } 1409 } 1410 if (vdev->rx_decap_type == htt_cmn_pkt_type_raw) 1411 goto process_mesh; 1412 1413 /* 1414 * WAPI cert AP sends rekey frames as unencrypted. 1415 * Thus RXDMA will report unencrypted frame error. 1416 * To pass WAPI cert case, SW needs to pass unencrypted 1417 * rekey frame to stack. 1418 */ 1419 if (qdf_nbuf_is_ipv4_wapi_pkt(nbuf)) { 1420 goto process_rx; 1421 } 1422 /* 1423 * In dynamic WEP case rekey frames are not encrypted 1424 * similar to WAPI. Allow EAPOL when 8021+wep is enabled and 1425 * key install is already done 1426 */ 1427 if ((vdev->sec_type == cdp_sec_type_wep104) && 1428 (qdf_nbuf_is_ipv4_eapol_pkt(nbuf))) 1429 goto process_rx; 1430 1431 process_mesh: 1432 1433 if (!vdev->mesh_vdev && err_code == HAL_RXDMA_ERR_UNENCRYPTED) { 1434 dp_rx_nbuf_free(nbuf); 1435 DP_STATS_INC(soc, rx.err.invalid_vdev, 1); 1436 return; 1437 } 1438 1439 if (vdev->mesh_vdev) { 1440 if (dp_rx_filter_mesh_packets(vdev, nbuf, rx_tlv_hdr) 1441 == QDF_STATUS_SUCCESS) { 1442 dp_rx_err_info("%pK: mesh pkt filtered", soc); 1443 DP_STATS_INC(vdev->pdev, dropped.mesh_filter, 1); 1444 1445 dp_rx_nbuf_free(nbuf); 1446 return; 1447 } 1448 dp_rx_fill_mesh_stats(vdev, nbuf, rx_tlv_hdr, txrx_peer); 1449 } 1450 process_rx: 1451 if (qdf_unlikely(hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc, 1452 rx_tlv_hdr) && 1453 (vdev->rx_decap_type == 1454 htt_cmn_pkt_type_ethernet))) { 1455 eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf); 1456 is_broadcast = (QDF_IS_ADDR_BROADCAST 1457 (eh->ether_dhost)) ? 1 : 0 ; 1458 DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, rx.multicast, 1, 1459 qdf_nbuf_len(nbuf), link_id); 1460 if (is_broadcast) { 1461 DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, rx.bcast, 1, 1462 qdf_nbuf_len(nbuf), 1463 link_id); 1464 } 1465 } else { 1466 DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, rx.unicast, 1, 1467 qdf_nbuf_len(nbuf), 1468 link_id); 1469 } 1470 1471 if (qdf_unlikely(vdev->rx_decap_type == htt_cmn_pkt_type_raw)) { 1472 dp_rx_deliver_raw(vdev, nbuf, txrx_peer, link_id); 1473 } else { 1474 /* Update the protocol tag in SKB based on CCE metadata */ 1475 dp_rx_update_protocol_tag(soc, vdev, nbuf, rx_tlv_hdr, 1476 EXCEPTION_DEST_RING_ID, true, true); 1477 /* Update the flow tag in SKB based on FSE metadata */ 1478 dp_rx_update_flow_tag(soc, vdev, nbuf, rx_tlv_hdr, true); 1479 DP_PEER_STATS_FLAT_INC(txrx_peer, to_stack.num, 1); 1480 qdf_nbuf_set_exc_frame(nbuf, 1); 1481 dp_rx_deliver_to_osif_stack(soc, vdev, txrx_peer, nbuf, NULL, 1482 qdf_nbuf_is_ipv4_eapol_pkt(nbuf)); 1483 } 1484 1485 return; 1486 } 1487 1488 void dp_rx_process_mic_error(struct dp_soc *soc, qdf_nbuf_t nbuf, 1489 uint8_t *rx_tlv_hdr, 1490 struct dp_txrx_peer *txrx_peer) 1491 { 1492 struct dp_vdev *vdev = NULL; 1493 struct dp_pdev *pdev = NULL; 1494 struct ol_if_ops *tops = NULL; 1495 uint16_t rx_seq, fragno; 1496 uint8_t is_raw; 1497 unsigned int tid; 1498 QDF_STATUS status; 1499 struct cdp_rx_mic_err_info mic_failure_info; 1500 1501 if (!hal_rx_msdu_end_first_msdu_get(soc->hal_soc, 1502 rx_tlv_hdr)) 1503 return; 1504 1505 if (!txrx_peer) { 1506 dp_info_rl("txrx_peer not found"); 1507 goto fail; 1508 } 1509 1510 vdev = txrx_peer->vdev; 1511 if (!vdev) { 1512 dp_info_rl("VDEV not found"); 1513 goto fail; 1514 } 1515 1516 pdev = vdev->pdev; 1517 if (!pdev) { 1518 dp_info_rl("PDEV not found"); 1519 goto fail; 1520 } 1521 1522 is_raw = HAL_IS_DECAP_FORMAT_RAW(soc->hal_soc, qdf_nbuf_data(nbuf)); 1523 if (is_raw) { 1524 fragno = dp_rx_frag_get_mpdu_frag_number(soc, 1525 qdf_nbuf_data(nbuf)); 1526 /* Can get only last fragment */ 1527 if (fragno) { 1528 tid = hal_rx_mpdu_start_tid_get(soc->hal_soc, 1529 qdf_nbuf_data(nbuf)); 1530 rx_seq = hal_rx_get_rx_sequence(soc->hal_soc, 1531 qdf_nbuf_data(nbuf)); 1532 1533 status = dp_rx_defrag_add_last_frag(soc, txrx_peer, 1534 tid, rx_seq, nbuf); 1535 dp_info_rl("Frag pkt seq# %d frag# %d consumed " 1536 "status %d !", rx_seq, fragno, status); 1537 return; 1538 } 1539 } 1540 1541 if (hal_rx_mpdu_get_addr1(soc->hal_soc, qdf_nbuf_data(nbuf), 1542 &mic_failure_info.da_mac_addr.bytes[0])) { 1543 dp_err_rl("Failed to get da_mac_addr"); 1544 goto fail; 1545 } 1546 1547 if (hal_rx_mpdu_get_addr2(soc->hal_soc, qdf_nbuf_data(nbuf), 1548 &mic_failure_info.ta_mac_addr.bytes[0])) { 1549 dp_err_rl("Failed to get ta_mac_addr"); 1550 goto fail; 1551 } 1552 1553 mic_failure_info.key_id = 0; 1554 mic_failure_info.multicast = 1555 IEEE80211_IS_MULTICAST(mic_failure_info.da_mac_addr.bytes); 1556 qdf_mem_zero(mic_failure_info.tsc, MIC_SEQ_CTR_SIZE); 1557 mic_failure_info.frame_type = cdp_rx_frame_type_802_11; 1558 mic_failure_info.data = NULL; 1559 mic_failure_info.vdev_id = vdev->vdev_id; 1560 1561 tops = pdev->soc->cdp_soc.ol_ops; 1562 if (tops->rx_mic_error) 1563 tops->rx_mic_error(soc->ctrl_psoc, pdev->pdev_id, 1564 &mic_failure_info); 1565 1566 fail: 1567 dp_rx_nbuf_free(nbuf); 1568 return; 1569 } 1570 1571 #ifdef WLAN_SUPPORT_RX_FLOW_TAG 1572 static void dp_rx_peek_trapped_packet(struct dp_soc *soc, 1573 struct dp_vdev *vdev) 1574 { 1575 if (soc->cdp_soc.ol_ops->send_wakeup_trigger) 1576 soc->cdp_soc.ol_ops->send_wakeup_trigger(soc->ctrl_psoc, 1577 vdev->vdev_id); 1578 } 1579 #else 1580 static void dp_rx_peek_trapped_packet(struct dp_soc *soc, 1581 struct dp_vdev *vdev) 1582 { 1583 return; 1584 } 1585 #endif 1586 1587 #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP) && \ 1588 defined(WLAN_MCAST_MLO) 1589 static bool dp_rx_igmp_handler(struct dp_soc *soc, 1590 struct dp_vdev *vdev, 1591 struct dp_txrx_peer *peer, 1592 qdf_nbuf_t nbuf, 1593 uint8_t link_id) 1594 { 1595 if (soc->arch_ops.dp_rx_mcast_handler) { 1596 if (soc->arch_ops.dp_rx_mcast_handler(soc, vdev, peer, 1597 nbuf, link_id)) 1598 return true; 1599 } 1600 return false; 1601 } 1602 #else 1603 static bool dp_rx_igmp_handler(struct dp_soc *soc, 1604 struct dp_vdev *vdev, 1605 struct dp_txrx_peer *peer, 1606 qdf_nbuf_t nbuf, 1607 uint8_t link_id) 1608 { 1609 return false; 1610 } 1611 #endif 1612 1613 /** 1614 * dp_rx_err_route_hdl() - Function to send EAPOL frames to stack 1615 * Free any other packet which comes in 1616 * this path. 1617 * 1618 * @soc: core DP main context 1619 * @nbuf: buffer pointer 1620 * @txrx_peer: txrx peer handle 1621 * @rx_tlv_hdr: start of rx tlv header 1622 * @err_src: rxdma/reo 1623 * @link_id: link id on which the packet is received 1624 * 1625 * This function indicates EAPOL frame received in wbm error ring to stack. 1626 * Any other frame should be dropped. 1627 * 1628 * Return: SUCCESS if delivered to stack 1629 */ 1630 static void 1631 dp_rx_err_route_hdl(struct dp_soc *soc, qdf_nbuf_t nbuf, 1632 struct dp_txrx_peer *txrx_peer, uint8_t *rx_tlv_hdr, 1633 enum hal_rx_wbm_error_source err_src, 1634 uint8_t link_id) 1635 { 1636 uint32_t pkt_len; 1637 uint16_t msdu_len; 1638 struct dp_vdev *vdev; 1639 struct hal_rx_msdu_metadata msdu_metadata; 1640 bool is_eapol; 1641 uint16_t buf_size; 1642 1643 buf_size = wlan_cfg_rx_buffer_size(soc->wlan_cfg_ctx); 1644 1645 qdf_nbuf_set_rx_chfrag_start( 1646 nbuf, 1647 hal_rx_msdu_end_first_msdu_get(soc->hal_soc, 1648 rx_tlv_hdr)); 1649 qdf_nbuf_set_rx_chfrag_end(nbuf, 1650 hal_rx_msdu_end_last_msdu_get(soc->hal_soc, 1651 rx_tlv_hdr)); 1652 qdf_nbuf_set_da_mcbc(nbuf, hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc, 1653 rx_tlv_hdr)); 1654 qdf_nbuf_set_da_valid(nbuf, 1655 hal_rx_msdu_end_da_is_valid_get(soc->hal_soc, 1656 rx_tlv_hdr)); 1657 qdf_nbuf_set_sa_valid(nbuf, 1658 hal_rx_msdu_end_sa_is_valid_get(soc->hal_soc, 1659 rx_tlv_hdr)); 1660 1661 hal_rx_msdu_metadata_get(soc->hal_soc, rx_tlv_hdr, &msdu_metadata); 1662 msdu_len = hal_rx_msdu_start_msdu_len_get(soc->hal_soc, rx_tlv_hdr); 1663 pkt_len = msdu_len + msdu_metadata.l3_hdr_pad + soc->rx_pkt_tlv_size; 1664 1665 if (qdf_likely(!qdf_nbuf_is_frag(nbuf))) { 1666 if (dp_rx_check_pkt_len(soc, pkt_len)) 1667 goto drop_nbuf; 1668 1669 /* Set length in nbuf */ 1670 qdf_nbuf_set_pktlen(nbuf, qdf_min(pkt_len, (uint32_t)buf_size)); 1671 } 1672 1673 /* 1674 * Check if DMA completed -- msdu_done is the last bit 1675 * to be written 1676 */ 1677 if (!hal_rx_attn_msdu_done_get(soc->hal_soc, rx_tlv_hdr)) { 1678 dp_err_rl("MSDU DONE failure"); 1679 hal_rx_dump_pkt_tlvs(soc->hal_soc, rx_tlv_hdr, 1680 QDF_TRACE_LEVEL_INFO); 1681 qdf_assert(0); 1682 } 1683 1684 if (!txrx_peer) 1685 goto drop_nbuf; 1686 1687 vdev = txrx_peer->vdev; 1688 if (!vdev) { 1689 dp_err_rl("Null vdev!"); 1690 DP_STATS_INC(soc, rx.err.invalid_vdev, 1); 1691 goto drop_nbuf; 1692 } 1693 1694 /* 1695 * Advance the packet start pointer by total size of 1696 * pre-header TLV's 1697 */ 1698 if (qdf_nbuf_is_frag(nbuf)) 1699 qdf_nbuf_pull_head(nbuf, soc->rx_pkt_tlv_size); 1700 else 1701 qdf_nbuf_pull_head(nbuf, (msdu_metadata.l3_hdr_pad + 1702 soc->rx_pkt_tlv_size)); 1703 1704 if (hal_rx_msdu_cce_metadata_get(soc->hal_soc, rx_tlv_hdr) == 1705 CDP_STANDBY_METADATA) 1706 dp_rx_peek_trapped_packet(soc, vdev); 1707 1708 QDF_NBUF_CB_RX_PEER_ID(nbuf) = txrx_peer->peer_id; 1709 if (dp_rx_igmp_handler(soc, vdev, txrx_peer, nbuf, link_id)) 1710 return; 1711 1712 dp_vdev_peer_stats_update_protocol_cnt(vdev, nbuf, NULL, 0, 1); 1713 1714 /* 1715 * Indicate EAPOL frame to stack only when vap mac address 1716 * matches the destination address. 1717 */ 1718 is_eapol = qdf_nbuf_is_ipv4_eapol_pkt(nbuf); 1719 if (is_eapol || qdf_nbuf_is_ipv4_wapi_pkt(nbuf)) { 1720 qdf_ether_header_t *eh = 1721 (qdf_ether_header_t *)qdf_nbuf_data(nbuf); 1722 if (dp_rx_err_match_dhost(eh, vdev)) { 1723 DP_STATS_INC_PKT(vdev, rx_i.routed_eapol_pkt, 1, 1724 qdf_nbuf_len(nbuf)); 1725 1726 /* 1727 * Update the protocol tag in SKB based on 1728 * CCE metadata. 1729 */ 1730 dp_rx_update_protocol_tag(soc, vdev, nbuf, rx_tlv_hdr, 1731 EXCEPTION_DEST_RING_ID, 1732 true, true); 1733 /* Update the flow tag in SKB based on FSE metadata */ 1734 dp_rx_update_flow_tag(soc, vdev, nbuf, rx_tlv_hdr, 1735 true); 1736 DP_PEER_TO_STACK_INCC_PKT(txrx_peer, 1, 1737 qdf_nbuf_len(nbuf), 1738 vdev->pdev->enhanced_stats_en); 1739 DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, 1740 rx.rx_success, 1, 1741 qdf_nbuf_len(nbuf), 1742 link_id); 1743 qdf_nbuf_set_exc_frame(nbuf, 1); 1744 qdf_nbuf_set_next(nbuf, NULL); 1745 1746 dp_rx_deliver_to_osif_stack(soc, vdev, txrx_peer, nbuf, 1747 NULL, is_eapol); 1748 1749 return; 1750 } 1751 } 1752 1753 drop_nbuf: 1754 1755 DP_STATS_INCC(soc, rx.reo2rel_route_drop, 1, 1756 err_src == HAL_RX_WBM_ERR_SRC_REO); 1757 DP_STATS_INCC(soc, rx.rxdma2rel_route_drop, 1, 1758 err_src == HAL_RX_WBM_ERR_SRC_RXDMA); 1759 1760 dp_rx_nbuf_free(nbuf); 1761 } 1762 1763 #ifndef QCA_HOST_MODE_WIFI_DISABLED 1764 1765 #ifdef DP_RX_DESC_COOKIE_INVALIDATE 1766 /** 1767 * dp_rx_link_cookie_check() - Validate link desc cookie 1768 * @ring_desc: ring descriptor 1769 * 1770 * Return: qdf status 1771 */ 1772 static inline QDF_STATUS 1773 dp_rx_link_cookie_check(hal_ring_desc_t ring_desc) 1774 { 1775 if (qdf_unlikely(HAL_RX_REO_BUF_LINK_COOKIE_INVALID_GET(ring_desc))) 1776 return QDF_STATUS_E_FAILURE; 1777 1778 return QDF_STATUS_SUCCESS; 1779 } 1780 1781 /** 1782 * dp_rx_link_cookie_invalidate() - Invalidate link desc cookie 1783 * @ring_desc: ring descriptor 1784 * 1785 * Return: None 1786 */ 1787 static inline void 1788 dp_rx_link_cookie_invalidate(hal_ring_desc_t ring_desc) 1789 { 1790 HAL_RX_REO_BUF_LINK_COOKIE_INVALID_SET(ring_desc); 1791 } 1792 #else 1793 static inline QDF_STATUS 1794 dp_rx_link_cookie_check(hal_ring_desc_t ring_desc) 1795 { 1796 return QDF_STATUS_SUCCESS; 1797 } 1798 1799 static inline void 1800 dp_rx_link_cookie_invalidate(hal_ring_desc_t ring_desc) 1801 { 1802 } 1803 #endif 1804 1805 #ifdef WLAN_FEATURE_DP_RX_RING_HISTORY 1806 /** 1807 * dp_rx_err_ring_record_entry() - Record rx err ring history 1808 * @soc: Datapath soc structure 1809 * @paddr: paddr of the buffer in RX err ring 1810 * @sw_cookie: SW cookie of the buffer in RX err ring 1811 * @rbm: Return buffer manager of the buffer in RX err ring 1812 * 1813 * Return: None 1814 */ 1815 static inline void 1816 dp_rx_err_ring_record_entry(struct dp_soc *soc, uint64_t paddr, 1817 uint32_t sw_cookie, uint8_t rbm) 1818 { 1819 struct dp_buf_info_record *record; 1820 uint32_t idx; 1821 1822 if (qdf_unlikely(!soc->rx_err_ring_history)) 1823 return; 1824 1825 idx = dp_history_get_next_index(&soc->rx_err_ring_history->index, 1826 DP_RX_ERR_HIST_MAX); 1827 1828 /* No NULL check needed for record since its an array */ 1829 record = &soc->rx_err_ring_history->entry[idx]; 1830 1831 record->timestamp = qdf_get_log_timestamp(); 1832 record->hbi.paddr = paddr; 1833 record->hbi.sw_cookie = sw_cookie; 1834 record->hbi.rbm = rbm; 1835 } 1836 #else 1837 static inline void 1838 dp_rx_err_ring_record_entry(struct dp_soc *soc, uint64_t paddr, 1839 uint32_t sw_cookie, uint8_t rbm) 1840 { 1841 } 1842 #endif 1843 1844 #if defined(HANDLE_RX_REROUTE_ERR) || defined(REO_EXCEPTION_MSDU_WAR) 1845 static int dp_rx_err_handle_msdu_buf(struct dp_soc *soc, 1846 hal_ring_desc_t ring_desc) 1847 { 1848 int lmac_id = DP_INVALID_LMAC_ID; 1849 struct dp_rx_desc *rx_desc; 1850 struct hal_buf_info hbi; 1851 struct dp_pdev *pdev; 1852 struct rx_desc_pool *rx_desc_pool; 1853 1854 hal_rx_reo_buf_paddr_get(soc->hal_soc, ring_desc, &hbi); 1855 1856 rx_desc = dp_rx_cookie_2_va_rxdma_buf(soc, hbi.sw_cookie); 1857 1858 /* sanity */ 1859 if (!rx_desc) { 1860 DP_STATS_INC(soc, rx.err.reo_err_msdu_buf_invalid_cookie, 1); 1861 goto assert_return; 1862 } 1863 1864 if (!rx_desc->nbuf) 1865 goto assert_return; 1866 1867 dp_rx_err_ring_record_entry(soc, hbi.paddr, 1868 hbi.sw_cookie, 1869 hal_rx_ret_buf_manager_get(soc->hal_soc, 1870 ring_desc)); 1871 if (hbi.paddr != qdf_nbuf_get_frag_paddr(rx_desc->nbuf, 0)) { 1872 DP_STATS_INC(soc, rx.err.nbuf_sanity_fail, 1); 1873 rx_desc->in_err_state = 1; 1874 goto assert_return; 1875 } 1876 1877 rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id]; 1878 /* After this point the rx_desc and nbuf are valid */ 1879 dp_ipa_rx_buf_smmu_mapping_lock(soc); 1880 qdf_assert_always(!rx_desc->unmapped); 1881 dp_rx_nbuf_unmap_pool(soc, rx_desc_pool, rx_desc->nbuf); 1882 rx_desc->unmapped = 1; 1883 dp_ipa_rx_buf_smmu_mapping_unlock(soc); 1884 dp_rx_buffer_pool_nbuf_free(soc, rx_desc->nbuf, 1885 rx_desc->pool_id); 1886 1887 pdev = dp_get_pdev_for_lmac_id(soc, rx_desc->pool_id); 1888 lmac_id = rx_desc->pool_id; 1889 dp_rx_add_to_free_desc_list(&pdev->free_list_head, 1890 &pdev->free_list_tail, 1891 rx_desc); 1892 return lmac_id; 1893 1894 assert_return: 1895 qdf_assert(0); 1896 return lmac_id; 1897 } 1898 #endif 1899 1900 #ifdef HANDLE_RX_REROUTE_ERR 1901 static int dp_rx_err_exception(struct dp_soc *soc, hal_ring_desc_t ring_desc) 1902 { 1903 int ret; 1904 uint64_t cur_time_stamp; 1905 1906 DP_STATS_INC(soc, rx.err.reo_err_msdu_buf_rcved, 1); 1907 1908 /* Recover if overall error count exceeds threshold */ 1909 if (soc->stats.rx.err.reo_err_msdu_buf_rcved > 1910 DP_MAX_REG_RX_ROUTING_ERRS_THRESHOLD) { 1911 dp_err("pkt threshold breached! reo_err_msdu_buf_rcved %u first err pkt time_stamp %llu", 1912 soc->stats.rx.err.reo_err_msdu_buf_rcved, 1913 soc->rx_route_err_start_pkt_ts); 1914 qdf_trigger_self_recovery(NULL, QDF_RX_REG_PKT_ROUTE_ERR); 1915 } 1916 1917 cur_time_stamp = qdf_get_log_timestamp_usecs(); 1918 if (!soc->rx_route_err_start_pkt_ts) 1919 soc->rx_route_err_start_pkt_ts = cur_time_stamp; 1920 1921 /* Recover if threshold number of packets received in threshold time */ 1922 if ((cur_time_stamp - soc->rx_route_err_start_pkt_ts) > 1923 DP_RX_ERR_ROUTE_TIMEOUT_US) { 1924 soc->rx_route_err_start_pkt_ts = cur_time_stamp; 1925 1926 if (soc->rx_route_err_in_window > 1927 DP_MAX_REG_RX_ROUTING_ERRS_IN_TIMEOUT) { 1928 qdf_trigger_self_recovery(NULL, 1929 QDF_RX_REG_PKT_ROUTE_ERR); 1930 dp_err("rate threshold breached! reo_err_msdu_buf_rcved %u first err pkt time_stamp %llu", 1931 soc->stats.rx.err.reo_err_msdu_buf_rcved, 1932 soc->rx_route_err_start_pkt_ts); 1933 } else { 1934 soc->rx_route_err_in_window = 1; 1935 } 1936 } else { 1937 soc->rx_route_err_in_window++; 1938 } 1939 1940 ret = dp_rx_err_handle_msdu_buf(soc, ring_desc); 1941 1942 return ret; 1943 } 1944 #else /* HANDLE_RX_REROUTE_ERR */ 1945 #ifdef REO_EXCEPTION_MSDU_WAR 1946 static int dp_rx_err_exception(struct dp_soc *soc, hal_ring_desc_t ring_desc) 1947 { 1948 return dp_rx_err_handle_msdu_buf(soc, ring_desc); 1949 } 1950 #else /* REO_EXCEPTION_MSDU_WAR */ 1951 static int dp_rx_err_exception(struct dp_soc *soc, hal_ring_desc_t ring_desc) 1952 { 1953 qdf_assert_always(0); 1954 1955 return DP_INVALID_LMAC_ID; 1956 } 1957 #endif /* REO_EXCEPTION_MSDU_WAR */ 1958 #endif /* HANDLE_RX_REROUTE_ERR */ 1959 1960 #ifdef WLAN_MLO_MULTI_CHIP 1961 /** 1962 * dp_idle_link_bm_id_check() - war for HW issue 1963 * 1964 * @soc: DP SOC handle 1965 * @rbm: idle link RBM value 1966 * @ring_desc: reo error link descriptor 1967 * 1968 * This is a war for HW issue where link descriptor 1969 * of partner soc received due to packets wrongly 1970 * interpreted as fragments 1971 * 1972 * Return: true in case link desc is consumed 1973 * false in other cases 1974 */ 1975 static bool dp_idle_link_bm_id_check(struct dp_soc *soc, uint8_t rbm, 1976 void *ring_desc) 1977 { 1978 struct dp_soc *replenish_soc = NULL; 1979 1980 /* return ok incase of link desc of same soc */ 1981 if (rbm == soc->idle_link_bm_id) 1982 return false; 1983 1984 if (soc->arch_ops.dp_soc_get_by_idle_bm_id) 1985 replenish_soc = 1986 soc->arch_ops.dp_soc_get_by_idle_bm_id(soc, rbm); 1987 1988 qdf_assert_always(replenish_soc); 1989 1990 /* 1991 * For WIN usecase we should only get fragment packets in 1992 * this ring as for MLO case fragmentation is not supported 1993 * we should not see links from other soc. 1994 * 1995 * Drop all packets from partner soc and replenish the descriptors 1996 */ 1997 dp_handle_wbm_internal_error(replenish_soc, ring_desc, 1998 HAL_WBM_RELEASE_RING_2_DESC_TYPE); 1999 2000 return true; 2001 } 2002 #else 2003 static bool dp_idle_link_bm_id_check(struct dp_soc *soc, uint8_t rbm, 2004 void *ring_desc) 2005 { 2006 return false; 2007 } 2008 #endif 2009 2010 static inline void 2011 dp_rx_err_dup_frame(struct dp_soc *soc, 2012 struct hal_rx_mpdu_desc_info *mpdu_desc_info) 2013 { 2014 struct dp_txrx_peer *txrx_peer = NULL; 2015 dp_txrx_ref_handle txrx_ref_handle = NULL; 2016 uint16_t peer_id; 2017 2018 peer_id = 2019 dp_rx_peer_metadata_peer_id_get(soc, 2020 mpdu_desc_info->peer_meta_data); 2021 txrx_peer = dp_tgt_txrx_peer_get_ref_by_id(soc, peer_id, 2022 &txrx_ref_handle, 2023 DP_MOD_ID_RX_ERR); 2024 if (txrx_peer) { 2025 DP_STATS_INC(txrx_peer->vdev, rx.duplicate_count, 1); 2026 dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_RX_ERR); 2027 } 2028 } 2029 2030 uint32_t 2031 dp_rx_err_process(struct dp_intr *int_ctx, struct dp_soc *soc, 2032 hal_ring_handle_t hal_ring_hdl, uint32_t quota) 2033 { 2034 hal_ring_desc_t ring_desc; 2035 hal_soc_handle_t hal_soc; 2036 uint32_t count = 0; 2037 uint32_t rx_bufs_used = 0; 2038 uint32_t rx_bufs_reaped[MAX_PDEV_CNT] = { 0 }; 2039 uint8_t mac_id = 0; 2040 uint8_t buf_type; 2041 uint8_t err_status; 2042 struct hal_rx_mpdu_desc_info mpdu_desc_info; 2043 struct hal_buf_info hbi; 2044 struct dp_pdev *dp_pdev; 2045 struct dp_srng *dp_rxdma_srng; 2046 struct rx_desc_pool *rx_desc_pool; 2047 void *link_desc_va; 2048 struct hal_rx_msdu_list msdu_list; /* MSDU's per MPDU */ 2049 uint16_t num_msdus; 2050 struct dp_rx_desc *rx_desc = NULL; 2051 QDF_STATUS status; 2052 bool ret; 2053 uint32_t error_code = 0; 2054 bool sw_pn_check_needed; 2055 int max_reap_limit = dp_rx_get_loop_pkt_limit(soc); 2056 int i, rx_bufs_reaped_total; 2057 uint16_t peer_id; 2058 struct dp_txrx_peer *txrx_peer = NULL; 2059 dp_txrx_ref_handle txrx_ref_handle = NULL; 2060 2061 /* Debug -- Remove later */ 2062 qdf_assert(soc && hal_ring_hdl); 2063 2064 hal_soc = soc->hal_soc; 2065 2066 /* Debug -- Remove later */ 2067 qdf_assert(hal_soc); 2068 2069 if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, hal_ring_hdl))) { 2070 2071 /* TODO */ 2072 /* 2073 * Need API to convert from hal_ring pointer to 2074 * Ring Type / Ring Id combo 2075 */ 2076 DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1); 2077 dp_rx_err_err("%pK: HAL RING Access Failed -- %pK", soc, 2078 hal_ring_hdl); 2079 goto done; 2080 } 2081 2082 while (qdf_likely(quota-- && (ring_desc = 2083 hal_srng_dst_peek(hal_soc, 2084 hal_ring_hdl)))) { 2085 2086 DP_STATS_INC(soc, rx.err_ring_pkts, 1); 2087 err_status = hal_rx_err_status_get(hal_soc, ring_desc); 2088 buf_type = hal_rx_reo_buf_type_get(hal_soc, ring_desc); 2089 2090 if (err_status == HAL_REO_ERROR_DETECTED) 2091 error_code = hal_rx_get_reo_error_code(hal_soc, 2092 ring_desc); 2093 2094 qdf_mem_set(&mpdu_desc_info, sizeof(mpdu_desc_info), 0); 2095 sw_pn_check_needed = dp_rx_err_is_pn_check_needed(soc, 2096 err_status, 2097 error_code); 2098 if (!sw_pn_check_needed) { 2099 /* 2100 * MPDU desc info will be present in the REO desc 2101 * only in the below scenarios 2102 * 1) pn_in_dest_disabled: always 2103 * 2) pn_in_dest enabled: All cases except 2k-jup 2104 * and OOR errors 2105 */ 2106 hal_rx_mpdu_desc_info_get(hal_soc, ring_desc, 2107 &mpdu_desc_info); 2108 } 2109 2110 if (HAL_RX_REO_DESC_MSDU_COUNT_GET(ring_desc) == 0) 2111 goto next_entry; 2112 2113 /* 2114 * For REO error ring, only MSDU LINK DESC is expected. 2115 * Handle HAL_RX_REO_MSDU_BUF_ADDR_TYPE exception case. 2116 */ 2117 if (qdf_unlikely(buf_type != HAL_RX_REO_MSDU_LINK_DESC_TYPE)) { 2118 int lmac_id; 2119 2120 lmac_id = dp_rx_err_exception(soc, ring_desc); 2121 if (lmac_id >= 0) 2122 rx_bufs_reaped[lmac_id] += 1; 2123 goto next_entry; 2124 } 2125 2126 hal_rx_buf_cookie_rbm_get(hal_soc, (uint32_t *)ring_desc, 2127 &hbi); 2128 /* 2129 * check for the magic number in the sw cookie 2130 */ 2131 qdf_assert_always((hbi.sw_cookie >> LINK_DESC_ID_SHIFT) & 2132 soc->link_desc_id_start); 2133 2134 if (dp_idle_link_bm_id_check(soc, hbi.rbm, ring_desc)) { 2135 DP_STATS_INC(soc, rx.err.invalid_link_cookie, 1); 2136 goto next_entry; 2137 } 2138 2139 status = dp_rx_link_cookie_check(ring_desc); 2140 if (qdf_unlikely(QDF_IS_STATUS_ERROR(status))) { 2141 DP_STATS_INC(soc, rx.err.invalid_link_cookie, 1); 2142 break; 2143 } 2144 2145 hal_rx_reo_buf_paddr_get(soc->hal_soc, ring_desc, &hbi); 2146 link_desc_va = dp_rx_cookie_2_link_desc_va(soc, &hbi); 2147 hal_rx_msdu_list_get(soc->hal_soc, link_desc_va, &msdu_list, 2148 &num_msdus); 2149 if (!num_msdus || 2150 !dp_rx_is_sw_cookie_valid(soc, msdu_list.sw_cookie[0])) { 2151 dp_rx_err_info_rl("Invalid MSDU info num_msdus %u cookie: 0x%x", 2152 num_msdus, msdu_list.sw_cookie[0]); 2153 dp_rx_link_desc_return(soc, ring_desc, 2154 HAL_BM_ACTION_PUT_IN_IDLE_LIST); 2155 goto next_entry; 2156 } 2157 2158 dp_rx_err_ring_record_entry(soc, msdu_list.paddr[0], 2159 msdu_list.sw_cookie[0], 2160 msdu_list.rbm[0]); 2161 // TODO - BE- Check if the RBM is to be checked for all chips 2162 if (qdf_unlikely((msdu_list.rbm[0] != 2163 dp_rx_get_rx_bm_id(soc)) && 2164 (msdu_list.rbm[0] != 2165 soc->idle_link_bm_id) && 2166 (msdu_list.rbm[0] != 2167 dp_rx_get_defrag_bm_id(soc)))) { 2168 /* TODO */ 2169 /* Call appropriate handler */ 2170 if (!wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx)) { 2171 DP_STATS_INC(soc, rx.err.invalid_rbm, 1); 2172 dp_rx_err_err("%pK: Invalid RBM %d", 2173 soc, msdu_list.rbm[0]); 2174 } 2175 2176 /* Return link descriptor through WBM ring (SW2WBM)*/ 2177 dp_rx_link_desc_return(soc, ring_desc, 2178 HAL_BM_ACTION_RELEASE_MSDU_LIST); 2179 goto next_entry; 2180 } 2181 2182 rx_desc = soc->arch_ops.dp_rx_desc_cookie_2_va( 2183 soc, 2184 msdu_list.sw_cookie[0]); 2185 qdf_assert_always(rx_desc); 2186 2187 mac_id = rx_desc->pool_id; 2188 2189 if (sw_pn_check_needed) { 2190 goto process_reo_error_code; 2191 } 2192 2193 if (mpdu_desc_info.bar_frame) { 2194 qdf_assert_always(mpdu_desc_info.msdu_count == 1); 2195 2196 dp_rx_bar_frame_handle(soc, ring_desc, rx_desc, 2197 &mpdu_desc_info, err_status, 2198 error_code); 2199 2200 rx_bufs_reaped[mac_id] += 1; 2201 goto next_entry; 2202 } 2203 2204 if (mpdu_desc_info.mpdu_flags & HAL_MPDU_F_FRAGMENT) { 2205 /* 2206 * We only handle one msdu per link desc for fragmented 2207 * case. We drop the msdus and release the link desc 2208 * back if there are more than one msdu in link desc. 2209 */ 2210 if (qdf_unlikely(num_msdus > 1)) { 2211 count = dp_rx_msdus_drop(soc, ring_desc, 2212 &mpdu_desc_info, 2213 &mac_id, quota); 2214 rx_bufs_reaped[mac_id] += count; 2215 goto next_entry; 2216 } 2217 2218 /* 2219 * this is a unlikely scenario where the host is reaping 2220 * a descriptor which it already reaped just a while ago 2221 * but is yet to replenish it back to HW. 2222 * In this case host will dump the last 128 descriptors 2223 * including the software descriptor rx_desc and assert. 2224 */ 2225 2226 if (qdf_unlikely(!rx_desc->in_use)) { 2227 DP_STATS_INC(soc, rx.err.hal_reo_dest_dup, 1); 2228 dp_info_rl("Reaping rx_desc not in use!"); 2229 dp_rx_dump_info_and_assert(soc, hal_ring_hdl, 2230 ring_desc, rx_desc); 2231 /* ignore duplicate RX desc and continue */ 2232 /* Pop out the descriptor */ 2233 goto next_entry; 2234 } 2235 2236 ret = dp_rx_desc_paddr_sanity_check(rx_desc, 2237 msdu_list.paddr[0]); 2238 if (!ret) { 2239 DP_STATS_INC(soc, rx.err.nbuf_sanity_fail, 1); 2240 rx_desc->in_err_state = 1; 2241 goto next_entry; 2242 } 2243 2244 count = dp_rx_frag_handle(soc, 2245 ring_desc, &mpdu_desc_info, 2246 rx_desc, &mac_id, quota); 2247 2248 rx_bufs_reaped[mac_id] += count; 2249 DP_STATS_INC(soc, rx.rx_frags, 1); 2250 2251 peer_id = dp_rx_peer_metadata_peer_id_get(soc, 2252 mpdu_desc_info.peer_meta_data); 2253 txrx_peer = 2254 dp_tgt_txrx_peer_get_ref_by_id(soc, peer_id, 2255 &txrx_ref_handle, 2256 DP_MOD_ID_RX_ERR); 2257 if (txrx_peer) { 2258 DP_STATS_INC(txrx_peer->vdev, 2259 rx.fragment_count, 1); 2260 dp_txrx_peer_unref_delete(txrx_ref_handle, 2261 DP_MOD_ID_RX_ERR); 2262 } 2263 goto next_entry; 2264 } 2265 2266 process_reo_error_code: 2267 /* 2268 * Expect REO errors to be handled after this point 2269 */ 2270 qdf_assert_always(err_status == HAL_REO_ERROR_DETECTED); 2271 2272 dp_info_rl("Got pkt with REO ERROR: %d", error_code); 2273 2274 switch (error_code) { 2275 case HAL_REO_ERR_PN_CHECK_FAILED: 2276 case HAL_REO_ERR_PN_ERROR_HANDLING_FLAG_SET: 2277 DP_STATS_INC(soc, rx.err.reo_error[error_code], 1); 2278 dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id); 2279 if (dp_pdev) 2280 DP_STATS_INC(dp_pdev, err.reo_error, 1); 2281 count = dp_rx_pn_error_handle(soc, 2282 ring_desc, 2283 &mpdu_desc_info, &mac_id, 2284 quota); 2285 2286 rx_bufs_reaped[mac_id] += count; 2287 break; 2288 case HAL_REO_ERR_REGULAR_FRAME_2K_JUMP: 2289 case HAL_REO_ERR_2K_ERROR_HANDLING_FLAG_SET: 2290 case HAL_REO_ERR_BAR_FRAME_2K_JUMP: 2291 case HAL_REO_ERR_REGULAR_FRAME_OOR: 2292 case HAL_REO_ERR_BAR_FRAME_OOR: 2293 case HAL_REO_ERR_QUEUE_DESC_ADDR_0: 2294 DP_STATS_INC(soc, rx.err.reo_error[error_code], 1); 2295 dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id); 2296 if (dp_pdev) 2297 DP_STATS_INC(dp_pdev, err.reo_error, 1); 2298 count = dp_rx_reo_err_entry_process( 2299 soc, 2300 ring_desc, 2301 &mpdu_desc_info, 2302 link_desc_va, 2303 error_code); 2304 2305 rx_bufs_reaped[mac_id] += count; 2306 break; 2307 case HAL_REO_ERR_NON_BA_DUPLICATE: 2308 dp_rx_err_dup_frame(soc, &mpdu_desc_info); 2309 fallthrough; 2310 case HAL_REO_ERR_QUEUE_DESC_INVALID: 2311 case HAL_REO_ERR_AMPDU_IN_NON_BA: 2312 case HAL_REO_ERR_BA_DUPLICATE: 2313 case HAL_REO_ERR_BAR_FRAME_NO_BA_SESSION: 2314 case HAL_REO_ERR_BAR_FRAME_SN_EQUALS_SSN: 2315 case HAL_REO_ERR_QUEUE_DESC_BLOCKED_SET: 2316 DP_STATS_INC(soc, rx.err.reo_error[error_code], 1); 2317 count = dp_rx_msdus_drop(soc, ring_desc, 2318 &mpdu_desc_info, 2319 &mac_id, quota); 2320 rx_bufs_reaped[mac_id] += count; 2321 break; 2322 default: 2323 /* Assert if unexpected error type */ 2324 qdf_assert_always(0); 2325 } 2326 next_entry: 2327 dp_rx_link_cookie_invalidate(ring_desc); 2328 hal_srng_dst_get_next(hal_soc, hal_ring_hdl); 2329 2330 rx_bufs_reaped_total = 0; 2331 for (i = 0; i < MAX_PDEV_CNT; i++) 2332 rx_bufs_reaped_total += rx_bufs_reaped[i]; 2333 2334 if (dp_rx_reap_loop_pkt_limit_hit(soc, rx_bufs_reaped_total, 2335 max_reap_limit)) 2336 break; 2337 } 2338 2339 done: 2340 dp_srng_access_end(int_ctx, soc, hal_ring_hdl); 2341 2342 if (soc->rx.flags.defrag_timeout_check) { 2343 uint32_t now_ms = 2344 qdf_system_ticks_to_msecs(qdf_system_ticks()); 2345 2346 if (now_ms >= soc->rx.defrag.next_flush_ms) 2347 dp_rx_defrag_waitlist_flush(soc); 2348 } 2349 2350 for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) { 2351 if (rx_bufs_reaped[mac_id]) { 2352 dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id); 2353 dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_id]; 2354 rx_desc_pool = &soc->rx_desc_buf[mac_id]; 2355 2356 dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng, 2357 rx_desc_pool, 2358 rx_bufs_reaped[mac_id], 2359 &dp_pdev->free_list_head, 2360 &dp_pdev->free_list_tail, 2361 false); 2362 rx_bufs_used += rx_bufs_reaped[mac_id]; 2363 } 2364 } 2365 2366 return rx_bufs_used; /* Assume no scale factor for now */ 2367 } 2368 2369 #ifdef DROP_RXDMA_DECRYPT_ERR 2370 /** 2371 * dp_handle_rxdma_decrypt_err() - Check if decrypt err frames can be handled 2372 * 2373 * Return: true if rxdma decrypt err frames are handled and false otherwise 2374 */ 2375 static inline bool dp_handle_rxdma_decrypt_err(void) 2376 { 2377 return false; 2378 } 2379 #else 2380 static inline bool dp_handle_rxdma_decrypt_err(void) 2381 { 2382 return true; 2383 } 2384 #endif 2385 2386 void dp_rx_wbm_sg_list_last_msdu_war(struct dp_soc *soc) 2387 { 2388 if (soc->wbm_sg_last_msdu_war) { 2389 uint32_t len; 2390 qdf_nbuf_t temp = soc->wbm_sg_param.wbm_sg_nbuf_tail; 2391 2392 len = hal_rx_msdu_start_msdu_len_get(soc->hal_soc, 2393 qdf_nbuf_data(temp)); 2394 temp = soc->wbm_sg_param.wbm_sg_nbuf_head; 2395 while (temp) { 2396 QDF_NBUF_CB_RX_PKT_LEN(temp) = len; 2397 temp = temp->next; 2398 } 2399 } 2400 } 2401 2402 #ifdef RX_DESC_DEBUG_CHECK 2403 QDF_STATUS dp_rx_wbm_desc_nbuf_sanity_check(struct dp_soc *soc, 2404 hal_ring_handle_t hal_ring_hdl, 2405 hal_ring_desc_t ring_desc, 2406 struct dp_rx_desc *rx_desc) 2407 { 2408 struct hal_buf_info hbi; 2409 2410 hal_rx_wbm_rel_buf_paddr_get(soc->hal_soc, ring_desc, &hbi); 2411 /* Sanity check for possible buffer paddr corruption */ 2412 if (dp_rx_desc_paddr_sanity_check(rx_desc, (&hbi)->paddr)) 2413 return QDF_STATUS_SUCCESS; 2414 2415 hal_srng_dump_ring_desc(soc->hal_soc, hal_ring_hdl, ring_desc); 2416 2417 return QDF_STATUS_E_FAILURE; 2418 } 2419 2420 #else 2421 QDF_STATUS dp_rx_wbm_desc_nbuf_sanity_check(struct dp_soc *soc, 2422 hal_ring_handle_t hal_ring_hdl, 2423 hal_ring_desc_t ring_desc, 2424 struct dp_rx_desc *rx_desc) 2425 { 2426 return QDF_STATUS_SUCCESS; 2427 } 2428 #endif 2429 bool 2430 dp_rx_is_sg_formation_required(struct hal_wbm_err_desc_info *info) 2431 { 2432 /* 2433 * Currently Null Queue and Unencrypted error handlers has support for 2434 * SG. Other error handler do not deal with SG buffer. 2435 */ 2436 if (((info->wbm_err_src == HAL_RX_WBM_ERR_SRC_REO) && 2437 (info->reo_err_code == HAL_REO_ERR_QUEUE_DESC_ADDR_0)) || 2438 ((info->wbm_err_src == HAL_RX_WBM_ERR_SRC_RXDMA) && 2439 (info->rxdma_err_code == HAL_RXDMA_ERR_UNENCRYPTED))) 2440 return true; 2441 2442 return false; 2443 } 2444 2445 #ifdef QCA_DP_NBUF_FAST_RECYCLE_CHECK 2446 void dp_rx_err_tlv_invalidate(struct dp_soc *soc, 2447 qdf_nbuf_t nbuf) 2448 { 2449 /* 2450 * In case of fast recycle TX driver can avoid invalidate 2451 * of buffer in case of SFE forward. We need to invalidate 2452 * the TLV headers after writing to this location 2453 */ 2454 qdf_nbuf_dma_inv_range_no_dsb((void *)nbuf->data, 2455 (void *)(nbuf->data + 2456 soc->rx_pkt_tlv_size + 2457 L3_HEADER_PAD)); 2458 } 2459 #else 2460 void dp_rx_err_tlv_invalidate(struct dp_soc *soc, 2461 qdf_nbuf_t nbuf) 2462 { 2463 } 2464 #endif 2465 2466 #ifndef CONFIG_NBUF_AP_PLATFORM 2467 static inline uint16_t 2468 dp_rx_get_peer_id(struct dp_soc *soc, 2469 uint8_t *rx_tlv_hdr, 2470 qdf_nbuf_t nbuf) 2471 { 2472 uint32_t peer_mdata = 0; 2473 2474 peer_mdata = hal_rx_tlv_peer_meta_data_get(soc->hal_soc, 2475 rx_tlv_hdr); 2476 return dp_rx_peer_metadata_peer_id_get(soc, peer_mdata); 2477 } 2478 2479 static inline void 2480 dp_rx_get_wbm_err_info_from_nbuf(struct dp_soc *soc, 2481 qdf_nbuf_t nbuf, 2482 uint8_t *rx_tlv_hdr, 2483 union hal_wbm_err_info_u *wbm_err) 2484 { 2485 hal_rx_priv_info_get_from_tlv(soc->hal_soc, rx_tlv_hdr, 2486 (uint8_t *)&wbm_err->info, 2487 sizeof(union hal_wbm_err_info_u)); 2488 } 2489 2490 void 2491 dp_rx_set_wbm_err_info_in_nbuf(struct dp_soc *soc, 2492 qdf_nbuf_t nbuf, 2493 union hal_wbm_err_info_u wbm_err) 2494 { 2495 hal_rx_priv_info_set_in_tlv(soc->hal_soc, 2496 qdf_nbuf_data(nbuf), 2497 (uint8_t *)&wbm_err.info, 2498 sizeof(union hal_wbm_err_info_u)); 2499 } 2500 #else 2501 static inline uint16_t 2502 dp_rx_get_peer_id(struct dp_soc *soc, 2503 uint8_t *rx_tlv_hdr, 2504 qdf_nbuf_t nbuf) 2505 { 2506 uint32_t peer_mdata = QDF_NBUF_CB_RX_MPDU_DESC_INFO_2(nbuf); 2507 2508 return dp_rx_peer_metadata_peer_id_get(soc, peer_mdata); 2509 } 2510 2511 static inline void 2512 dp_rx_get_wbm_err_info_from_nbuf(struct dp_soc *soc, 2513 qdf_nbuf_t nbuf, 2514 uint8_t *rx_tlv_hdr, 2515 union hal_wbm_err_info_u *wbm_err) 2516 { 2517 wbm_err->info = QDF_NBUF_CB_RX_ERROR_CODE_INFO(nbuf); 2518 } 2519 2520 void 2521 dp_rx_set_wbm_err_info_in_nbuf(struct dp_soc *soc, 2522 qdf_nbuf_t nbuf, 2523 union hal_wbm_err_info_u wbm_err) 2524 { 2525 QDF_NBUF_CB_RX_ERROR_CODE_INFO(nbuf) = wbm_err.info; 2526 } 2527 #endif /* CONFIG_NBUF_AP_PLATFORM */ 2528 2529 uint32_t 2530 dp_rx_wbm_err_process(struct dp_intr *int_ctx, struct dp_soc *soc, 2531 hal_ring_handle_t hal_ring_hdl, uint32_t quota) 2532 { 2533 hal_soc_handle_t hal_soc; 2534 uint32_t rx_bufs_used = 0; 2535 struct dp_pdev *dp_pdev; 2536 uint8_t *rx_tlv_hdr; 2537 bool is_tkip_mic_err; 2538 qdf_nbuf_t nbuf_head = NULL; 2539 qdf_nbuf_t nbuf, next; 2540 union hal_wbm_err_info_u wbm_err = { 0 }; 2541 uint8_t pool_id; 2542 uint8_t tid = 0; 2543 uint8_t link_id = 0; 2544 2545 /* Debug -- Remove later */ 2546 qdf_assert(soc && hal_ring_hdl); 2547 2548 hal_soc = soc->hal_soc; 2549 2550 /* Debug -- Remove later */ 2551 qdf_assert(hal_soc); 2552 2553 nbuf_head = soc->arch_ops.dp_rx_wbm_err_reap_desc(int_ctx, soc, 2554 hal_ring_hdl, 2555 quota, 2556 &rx_bufs_used); 2557 nbuf = nbuf_head; 2558 while (nbuf) { 2559 struct dp_txrx_peer *txrx_peer; 2560 struct dp_peer *peer; 2561 uint16_t peer_id; 2562 uint8_t err_code; 2563 uint8_t *tlv_hdr; 2564 dp_txrx_ref_handle txrx_ref_handle = NULL; 2565 rx_tlv_hdr = qdf_nbuf_data(nbuf); 2566 2567 /* 2568 * retrieve the wbm desc info from nbuf CB/TLV, so we can 2569 * handle error cases appropriately 2570 */ 2571 dp_rx_get_wbm_err_info_from_nbuf(soc, nbuf, 2572 rx_tlv_hdr, 2573 &wbm_err); 2574 2575 peer_id = dp_rx_get_peer_id(soc, 2576 rx_tlv_hdr, 2577 nbuf); 2578 txrx_peer = dp_tgt_txrx_peer_get_ref_by_id(soc, peer_id, 2579 &txrx_ref_handle, 2580 DP_MOD_ID_RX_ERR); 2581 2582 if (!txrx_peer) 2583 dp_info_rl("peer is null peer_id %u err_src %u, " 2584 "REO: push_rsn %u err_code %u, " 2585 "RXDMA: push_rsn %u err_code %u", 2586 peer_id, wbm_err.info_bit.wbm_err_src, 2587 wbm_err.info_bit.reo_psh_rsn, 2588 wbm_err.info_bit.reo_err_code, 2589 wbm_err.info_bit.rxdma_psh_rsn, 2590 wbm_err.info_bit.rxdma_err_code); 2591 2592 /* Set queue_mapping in nbuf to 0 */ 2593 dp_set_rx_queue(nbuf, 0); 2594 2595 next = nbuf->next; 2596 /* 2597 * Form the SG for msdu continued buffers 2598 * QCN9000 has this support 2599 */ 2600 if (qdf_nbuf_is_rx_chfrag_cont(nbuf)) { 2601 nbuf = dp_rx_sg_create(soc, nbuf); 2602 next = nbuf->next; 2603 /* 2604 * SG error handling is not done correctly, 2605 * drop SG frames for now. 2606 */ 2607 dp_rx_nbuf_free(nbuf); 2608 dp_info_rl("scattered msdu dropped"); 2609 nbuf = next; 2610 if (txrx_peer) 2611 dp_txrx_peer_unref_delete(txrx_ref_handle, 2612 DP_MOD_ID_RX_ERR); 2613 continue; 2614 } 2615 2616 dp_rx_nbuf_set_link_id_from_tlv(soc, rx_tlv_hdr, nbuf); 2617 2618 pool_id = wbm_err.info_bit.pool_id; 2619 dp_pdev = dp_get_pdev_for_lmac_id(soc, pool_id); 2620 2621 if (dp_pdev && dp_pdev->link_peer_stats && 2622 txrx_peer && txrx_peer->is_mld_peer) { 2623 link_id = dp_rx_get_stats_arr_idx_from_link_id( 2624 nbuf, 2625 txrx_peer); 2626 } else { 2627 link_id = 0; 2628 } 2629 2630 if (wbm_err.info_bit.wbm_err_src == HAL_RX_WBM_ERR_SRC_REO) { 2631 if (wbm_err.info_bit.reo_psh_rsn 2632 == HAL_RX_WBM_REO_PSH_RSN_ERROR) { 2633 2634 DP_STATS_INC(soc, 2635 rx.err.reo_error 2636 [wbm_err.info_bit.reo_err_code], 1); 2637 /* increment @pdev level */ 2638 if (dp_pdev) 2639 DP_STATS_INC(dp_pdev, err.reo_error, 2640 1); 2641 2642 switch (wbm_err.info_bit.reo_err_code) { 2643 /* 2644 * Handling for packets which have NULL REO 2645 * queue descriptor 2646 */ 2647 case HAL_REO_ERR_QUEUE_DESC_ADDR_0: 2648 pool_id = wbm_err.info_bit.pool_id; 2649 soc->arch_ops.dp_rx_null_q_desc_handle( 2650 soc, nbuf, 2651 rx_tlv_hdr, 2652 pool_id, 2653 txrx_peer, 2654 FALSE, 2655 link_id); 2656 break; 2657 /* TODO */ 2658 /* Add per error code accounting */ 2659 case HAL_REO_ERR_REGULAR_FRAME_2K_JUMP: 2660 if (txrx_peer) 2661 DP_PEER_PER_PKT_STATS_INC(txrx_peer, 2662 rx.err.jump_2k_err, 2663 1, 2664 link_id); 2665 2666 pool_id = wbm_err.info_bit.pool_id; 2667 2668 if (hal_rx_msdu_end_first_msdu_get(soc->hal_soc, 2669 rx_tlv_hdr)) { 2670 tid = 2671 hal_rx_mpdu_start_tid_get(hal_soc, rx_tlv_hdr); 2672 } 2673 QDF_NBUF_CB_RX_PKT_LEN(nbuf) = 2674 hal_rx_msdu_start_msdu_len_get( 2675 soc->hal_soc, rx_tlv_hdr); 2676 nbuf->next = NULL; 2677 dp_2k_jump_handle(soc, nbuf, 2678 rx_tlv_hdr, 2679 peer_id, tid); 2680 break; 2681 case HAL_REO_ERR_REGULAR_FRAME_OOR: 2682 if (txrx_peer) 2683 DP_PEER_PER_PKT_STATS_INC(txrx_peer, 2684 rx.err.oor_err, 2685 1, 2686 link_id); 2687 if (hal_rx_msdu_end_first_msdu_get(soc->hal_soc, 2688 rx_tlv_hdr)) { 2689 tid = 2690 hal_rx_mpdu_start_tid_get(hal_soc, rx_tlv_hdr); 2691 } 2692 QDF_NBUF_CB_RX_PKT_LEN(nbuf) = 2693 hal_rx_msdu_start_msdu_len_get( 2694 soc->hal_soc, rx_tlv_hdr); 2695 nbuf->next = NULL; 2696 dp_rx_oor_handle(soc, nbuf, 2697 peer_id, 2698 rx_tlv_hdr); 2699 break; 2700 case HAL_REO_ERR_BAR_FRAME_2K_JUMP: 2701 case HAL_REO_ERR_BAR_FRAME_OOR: 2702 peer = dp_peer_get_tgt_peer_by_id(soc, peer_id, DP_MOD_ID_RX_ERR); 2703 if (peer) { 2704 dp_rx_err_handle_bar(soc, peer, 2705 nbuf); 2706 dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR); 2707 } 2708 dp_rx_nbuf_free(nbuf); 2709 break; 2710 2711 case HAL_REO_ERR_PN_CHECK_FAILED: 2712 case HAL_REO_ERR_PN_ERROR_HANDLING_FLAG_SET: 2713 if (txrx_peer) 2714 DP_PEER_PER_PKT_STATS_INC(txrx_peer, 2715 rx.err.pn_err, 2716 1, 2717 link_id); 2718 dp_rx_nbuf_free(nbuf); 2719 break; 2720 2721 default: 2722 dp_info_rl("Got pkt with REO ERROR: %d", 2723 wbm_err.info_bit. 2724 reo_err_code); 2725 dp_rx_nbuf_free(nbuf); 2726 } 2727 } else if (wbm_err.info_bit.reo_psh_rsn 2728 == HAL_RX_WBM_REO_PSH_RSN_ROUTE) { 2729 dp_rx_err_route_hdl(soc, nbuf, txrx_peer, 2730 rx_tlv_hdr, 2731 HAL_RX_WBM_ERR_SRC_REO, 2732 link_id); 2733 } else { 2734 /* should not enter here */ 2735 dp_rx_err_alert("invalid reo push reason %u", 2736 wbm_err.info_bit.reo_psh_rsn); 2737 dp_rx_nbuf_free(nbuf); 2738 dp_assert_always_internal(0); 2739 } 2740 } else if (wbm_err.info_bit.wbm_err_src == 2741 HAL_RX_WBM_ERR_SRC_RXDMA) { 2742 if (wbm_err.info_bit.rxdma_psh_rsn 2743 == HAL_RX_WBM_RXDMA_PSH_RSN_ERROR) { 2744 DP_STATS_INC(soc, 2745 rx.err.rxdma_error 2746 [wbm_err.info_bit.rxdma_err_code], 1); 2747 /* increment @pdev level */ 2748 if (dp_pdev) 2749 DP_STATS_INC(dp_pdev, 2750 err.rxdma_error, 1); 2751 2752 switch (wbm_err.info_bit.rxdma_err_code) { 2753 case HAL_RXDMA_ERR_UNENCRYPTED: 2754 2755 case HAL_RXDMA_ERR_WIFI_PARSE: 2756 if (txrx_peer) 2757 DP_PEER_PER_PKT_STATS_INC(txrx_peer, 2758 rx.err.rxdma_wifi_parse_err, 2759 1, 2760 link_id); 2761 2762 pool_id = wbm_err.info_bit.pool_id; 2763 dp_rx_process_rxdma_err(soc, nbuf, 2764 rx_tlv_hdr, 2765 txrx_peer, 2766 wbm_err. 2767 info_bit. 2768 rxdma_err_code, 2769 pool_id, 2770 link_id); 2771 break; 2772 2773 case HAL_RXDMA_ERR_TKIP_MIC: 2774 dp_rx_process_mic_error(soc, nbuf, 2775 rx_tlv_hdr, 2776 txrx_peer); 2777 if (txrx_peer) 2778 DP_PEER_PER_PKT_STATS_INC(txrx_peer, 2779 rx.err.mic_err, 2780 1, 2781 link_id); 2782 break; 2783 2784 case HAL_RXDMA_ERR_DECRYPT: 2785 /* All the TKIP-MIC failures are treated as Decrypt Errors 2786 * for QCN9224 Targets 2787 */ 2788 is_tkip_mic_err = hal_rx_msdu_end_is_tkip_mic_err(hal_soc, rx_tlv_hdr); 2789 2790 if (is_tkip_mic_err && txrx_peer) { 2791 dp_rx_process_mic_error(soc, nbuf, 2792 rx_tlv_hdr, 2793 txrx_peer); 2794 DP_PEER_PER_PKT_STATS_INC(txrx_peer, 2795 rx.err.mic_err, 2796 1, 2797 link_id); 2798 break; 2799 } 2800 2801 if (txrx_peer) { 2802 DP_PEER_PER_PKT_STATS_INC(txrx_peer, 2803 rx.err.decrypt_err, 2804 1, 2805 link_id); 2806 dp_rx_nbuf_free(nbuf); 2807 break; 2808 } 2809 2810 if (!dp_handle_rxdma_decrypt_err()) { 2811 dp_rx_nbuf_free(nbuf); 2812 break; 2813 } 2814 2815 pool_id = wbm_err.info_bit.pool_id; 2816 err_code = wbm_err.info_bit.rxdma_err_code; 2817 tlv_hdr = rx_tlv_hdr; 2818 dp_rx_process_rxdma_err(soc, nbuf, 2819 tlv_hdr, NULL, 2820 err_code, 2821 pool_id, 2822 link_id); 2823 break; 2824 case HAL_RXDMA_MULTICAST_ECHO: 2825 if (txrx_peer) 2826 DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, 2827 rx.mec_drop, 1, 2828 qdf_nbuf_len(nbuf), 2829 link_id); 2830 dp_rx_nbuf_free(nbuf); 2831 break; 2832 case HAL_RXDMA_UNAUTHORIZED_WDS: 2833 pool_id = wbm_err.info_bit.pool_id; 2834 err_code = wbm_err.info_bit.rxdma_err_code; 2835 tlv_hdr = rx_tlv_hdr; 2836 dp_rx_process_rxdma_err(soc, nbuf, 2837 tlv_hdr, 2838 txrx_peer, 2839 err_code, 2840 pool_id, 2841 link_id); 2842 break; 2843 default: 2844 dp_rx_nbuf_free(nbuf); 2845 dp_err_rl("RXDMA error %d", 2846 wbm_err.info_bit.rxdma_err_code); 2847 } 2848 } else if (wbm_err.info_bit.rxdma_psh_rsn 2849 == HAL_RX_WBM_RXDMA_PSH_RSN_ROUTE) { 2850 dp_rx_err_route_hdl(soc, nbuf, txrx_peer, 2851 rx_tlv_hdr, 2852 HAL_RX_WBM_ERR_SRC_RXDMA, 2853 link_id); 2854 } else if (wbm_err.info_bit.rxdma_psh_rsn 2855 == HAL_RX_WBM_RXDMA_PSH_RSN_FLUSH) { 2856 dp_rx_err_err("rxdma push reason %u", 2857 wbm_err.info_bit.rxdma_psh_rsn); 2858 DP_STATS_INC(soc, rx.err.rx_flush_count, 1); 2859 dp_rx_nbuf_free(nbuf); 2860 } else { 2861 /* should not enter here */ 2862 dp_rx_err_alert("invalid rxdma push reason %u", 2863 wbm_err.info_bit.rxdma_psh_rsn); 2864 dp_rx_nbuf_free(nbuf); 2865 dp_assert_always_internal(0); 2866 } 2867 } else { 2868 /* Should not come here */ 2869 qdf_assert(0); 2870 } 2871 2872 if (txrx_peer) 2873 dp_txrx_peer_unref_delete(txrx_ref_handle, 2874 DP_MOD_ID_RX_ERR); 2875 2876 nbuf = next; 2877 } 2878 return rx_bufs_used; /* Assume no scale factor for now */ 2879 } 2880 2881 #endif /* QCA_HOST_MODE_WIFI_DISABLED */ 2882 2883 /** 2884 * dup_desc_dbg() - dump and assert if duplicate rx desc found 2885 * 2886 * @soc: core DP main context 2887 * @rxdma_dst_ring_desc: void pointer to monitor link descriptor buf addr info 2888 * @rx_desc: void pointer to rx descriptor 2889 * 2890 * Return: void 2891 */ 2892 static void dup_desc_dbg(struct dp_soc *soc, 2893 hal_rxdma_desc_t rxdma_dst_ring_desc, 2894 void *rx_desc) 2895 { 2896 DP_STATS_INC(soc, rx.err.hal_rxdma_err_dup, 1); 2897 dp_rx_dump_info_and_assert( 2898 soc, 2899 soc->rx_rel_ring.hal_srng, 2900 hal_rxdma_desc_to_hal_ring_desc(rxdma_dst_ring_desc), 2901 rx_desc); 2902 } 2903 2904 /** 2905 * dp_rx_err_mpdu_pop() - extract the MSDU's from link descs 2906 * 2907 * @soc: core DP main context 2908 * @mac_id: mac id which is one of 3 mac_ids 2909 * @rxdma_dst_ring_desc: void pointer to monitor link descriptor buf addr info 2910 * @head: head of descs list to be freed 2911 * @tail: tail of decs list to be freed 2912 * 2913 * Return: number of msdu in MPDU to be popped 2914 */ 2915 static inline uint32_t 2916 dp_rx_err_mpdu_pop(struct dp_soc *soc, uint32_t mac_id, 2917 hal_rxdma_desc_t rxdma_dst_ring_desc, 2918 union dp_rx_desc_list_elem_t **head, 2919 union dp_rx_desc_list_elem_t **tail) 2920 { 2921 void *rx_msdu_link_desc; 2922 qdf_nbuf_t msdu; 2923 qdf_nbuf_t last; 2924 struct hal_rx_msdu_list msdu_list; 2925 uint16_t num_msdus; 2926 struct hal_buf_info buf_info; 2927 uint32_t rx_bufs_used = 0; 2928 uint32_t msdu_cnt; 2929 uint32_t i; 2930 uint8_t push_reason; 2931 uint8_t rxdma_error_code = 0; 2932 uint8_t bm_action = HAL_BM_ACTION_PUT_IN_IDLE_LIST; 2933 struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id); 2934 uint32_t rx_link_buf_info[HAL_RX_BUFFINFO_NUM_DWORDS]; 2935 hal_rxdma_desc_t ring_desc; 2936 struct rx_desc_pool *rx_desc_pool; 2937 2938 if (!pdev) { 2939 dp_rx_err_debug("%pK: pdev is null for mac_id = %d", 2940 soc, mac_id); 2941 return rx_bufs_used; 2942 } 2943 2944 msdu = 0; 2945 2946 last = NULL; 2947 2948 hal_rx_reo_ent_buf_paddr_get(soc->hal_soc, rxdma_dst_ring_desc, 2949 &buf_info, &msdu_cnt); 2950 2951 push_reason = 2952 hal_rx_reo_ent_rxdma_push_reason_get(rxdma_dst_ring_desc); 2953 if (push_reason == HAL_RX_WBM_RXDMA_PSH_RSN_ERROR) { 2954 rxdma_error_code = 2955 hal_rx_reo_ent_rxdma_error_code_get(rxdma_dst_ring_desc); 2956 } 2957 2958 do { 2959 rx_msdu_link_desc = 2960 dp_rx_cookie_2_link_desc_va(soc, &buf_info); 2961 2962 qdf_assert_always(rx_msdu_link_desc); 2963 2964 hal_rx_msdu_list_get(soc->hal_soc, rx_msdu_link_desc, 2965 &msdu_list, &num_msdus); 2966 2967 if (msdu_list.sw_cookie[0] != HAL_RX_COOKIE_SPECIAL) { 2968 /* if the msdus belongs to NSS offloaded radio && 2969 * the rbm is not SW1_BM then return the msdu_link 2970 * descriptor without freeing the msdus (nbufs). let 2971 * these buffers be given to NSS completion ring for 2972 * NSS to free them. 2973 * else iterate through the msdu link desc list and 2974 * free each msdu in the list. 2975 */ 2976 if (msdu_list.rbm[0] != 2977 HAL_RX_BUF_RBM_SW3_BM(soc->wbm_sw0_bm_id) && 2978 wlan_cfg_get_dp_pdev_nss_enabled( 2979 pdev->wlan_cfg_ctx)) 2980 bm_action = HAL_BM_ACTION_RELEASE_MSDU_LIST; 2981 else { 2982 for (i = 0; i < num_msdus; i++) { 2983 struct dp_rx_desc *rx_desc = 2984 soc->arch_ops. 2985 dp_rx_desc_cookie_2_va( 2986 soc, 2987 msdu_list.sw_cookie[i]); 2988 qdf_assert_always(rx_desc); 2989 msdu = rx_desc->nbuf; 2990 /* 2991 * this is a unlikely scenario 2992 * where the host is reaping 2993 * a descriptor which 2994 * it already reaped just a while ago 2995 * but is yet to replenish 2996 * it back to HW. 2997 * In this case host will dump 2998 * the last 128 descriptors 2999 * including the software descriptor 3000 * rx_desc and assert. 3001 */ 3002 ring_desc = rxdma_dst_ring_desc; 3003 if (qdf_unlikely(!rx_desc->in_use)) { 3004 dup_desc_dbg(soc, 3005 ring_desc, 3006 rx_desc); 3007 continue; 3008 } 3009 3010 if (rx_desc->unmapped == 0) { 3011 rx_desc_pool = 3012 &soc->rx_desc_buf[rx_desc->pool_id]; 3013 dp_ipa_rx_buf_smmu_mapping_lock(soc); 3014 dp_rx_nbuf_unmap_pool(soc, 3015 rx_desc_pool, 3016 msdu); 3017 rx_desc->unmapped = 1; 3018 dp_ipa_rx_buf_smmu_mapping_unlock(soc); 3019 } 3020 3021 dp_rx_err_debug("%pK: msdu_nbuf=%pK ", 3022 soc, msdu); 3023 3024 dp_rx_buffer_pool_nbuf_free(soc, msdu, 3025 rx_desc->pool_id); 3026 rx_bufs_used++; 3027 dp_rx_add_to_free_desc_list(head, 3028 tail, rx_desc); 3029 } 3030 } 3031 } else { 3032 rxdma_error_code = HAL_RXDMA_ERR_WAR; 3033 } 3034 3035 /* 3036 * Store the current link buffer into to the local structure 3037 * to be used for release purpose. 3038 */ 3039 hal_rxdma_buff_addr_info_set(soc->hal_soc, rx_link_buf_info, 3040 buf_info.paddr, buf_info.sw_cookie, 3041 buf_info.rbm); 3042 3043 hal_rx_mon_next_link_desc_get(soc->hal_soc, rx_msdu_link_desc, 3044 &buf_info); 3045 dp_rx_link_desc_return_by_addr(soc, 3046 (hal_buff_addrinfo_t) 3047 rx_link_buf_info, 3048 bm_action); 3049 } while (buf_info.paddr); 3050 3051 DP_STATS_INC(soc, rx.err.rxdma_error[rxdma_error_code], 1); 3052 if (pdev) 3053 DP_STATS_INC(pdev, err.rxdma_error, 1); 3054 3055 if (rxdma_error_code == HAL_RXDMA_ERR_DECRYPT) { 3056 dp_rx_err_err("%pK: Packet received with Decrypt error", soc); 3057 } 3058 3059 return rx_bufs_used; 3060 } 3061 3062 uint32_t 3063 dp_rxdma_err_process(struct dp_intr *int_ctx, struct dp_soc *soc, 3064 uint32_t mac_id, uint32_t quota) 3065 { 3066 struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id); 3067 hal_rxdma_desc_t rxdma_dst_ring_desc; 3068 hal_soc_handle_t hal_soc; 3069 void *err_dst_srng; 3070 union dp_rx_desc_list_elem_t *head = NULL; 3071 union dp_rx_desc_list_elem_t *tail = NULL; 3072 struct dp_srng *dp_rxdma_srng; 3073 struct rx_desc_pool *rx_desc_pool; 3074 uint32_t work_done = 0; 3075 uint32_t rx_bufs_used = 0; 3076 3077 if (!pdev) 3078 return 0; 3079 3080 err_dst_srng = soc->rxdma_err_dst_ring[mac_id].hal_srng; 3081 3082 if (!err_dst_srng) { 3083 dp_rx_err_err("%pK: HAL Monitor Destination Ring Init Failed -- %pK", 3084 soc, err_dst_srng); 3085 return 0; 3086 } 3087 3088 hal_soc = soc->hal_soc; 3089 3090 qdf_assert(hal_soc); 3091 3092 if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, err_dst_srng))) { 3093 dp_rx_err_err("%pK: HAL Monitor Destination Ring Init Failed -- %pK", 3094 soc, err_dst_srng); 3095 return 0; 3096 } 3097 3098 while (qdf_likely(quota-- && (rxdma_dst_ring_desc = 3099 hal_srng_dst_get_next(hal_soc, err_dst_srng)))) { 3100 3101 rx_bufs_used += dp_rx_err_mpdu_pop(soc, mac_id, 3102 rxdma_dst_ring_desc, 3103 &head, &tail); 3104 } 3105 3106 dp_srng_access_end(int_ctx, soc, err_dst_srng); 3107 3108 if (rx_bufs_used) { 3109 if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx)) { 3110 dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_id]; 3111 rx_desc_pool = &soc->rx_desc_buf[mac_id]; 3112 } else { 3113 dp_rxdma_srng = &soc->rx_refill_buf_ring[pdev->lmac_id]; 3114 rx_desc_pool = &soc->rx_desc_buf[pdev->lmac_id]; 3115 } 3116 3117 dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng, 3118 rx_desc_pool, rx_bufs_used, &head, &tail, false); 3119 3120 work_done += rx_bufs_used; 3121 } 3122 3123 return work_done; 3124 } 3125 3126 #ifndef QCA_HOST_MODE_WIFI_DISABLED 3127 3128 static inline void 3129 dp_wbm_int_err_mpdu_pop(struct dp_soc *soc, uint32_t mac_id, 3130 hal_rxdma_desc_t rxdma_dst_ring_desc, 3131 union dp_rx_desc_list_elem_t **head, 3132 union dp_rx_desc_list_elem_t **tail, 3133 uint32_t *rx_bufs_used) 3134 { 3135 void *rx_msdu_link_desc; 3136 qdf_nbuf_t msdu; 3137 qdf_nbuf_t last; 3138 struct hal_rx_msdu_list msdu_list; 3139 uint16_t num_msdus; 3140 struct hal_buf_info buf_info; 3141 uint32_t msdu_cnt, i; 3142 uint32_t rx_link_buf_info[HAL_RX_BUFFINFO_NUM_DWORDS]; 3143 struct rx_desc_pool *rx_desc_pool; 3144 struct dp_rx_desc *rx_desc; 3145 3146 msdu = 0; 3147 3148 last = NULL; 3149 3150 hal_rx_reo_ent_buf_paddr_get(soc->hal_soc, rxdma_dst_ring_desc, 3151 &buf_info, &msdu_cnt); 3152 3153 do { 3154 rx_msdu_link_desc = 3155 dp_rx_cookie_2_link_desc_va(soc, &buf_info); 3156 3157 if (!rx_msdu_link_desc) { 3158 DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_REO_NULL_LINK_DESC], 1); 3159 break; 3160 } 3161 3162 hal_rx_msdu_list_get(soc->hal_soc, rx_msdu_link_desc, 3163 &msdu_list, &num_msdus); 3164 3165 if (msdu_list.sw_cookie[0] != HAL_RX_COOKIE_SPECIAL) { 3166 for (i = 0; i < num_msdus; i++) { 3167 if (!dp_rx_is_sw_cookie_valid(soc, msdu_list.sw_cookie[i])) { 3168 dp_rx_err_info_rl("Invalid MSDU info cookie: 0x%x", 3169 msdu_list.sw_cookie[i]); 3170 continue; 3171 } 3172 3173 rx_desc = soc->arch_ops.dp_rx_desc_cookie_2_va( 3174 soc, 3175 msdu_list.sw_cookie[i]); 3176 qdf_assert_always(rx_desc); 3177 rx_desc_pool = 3178 &soc->rx_desc_buf[rx_desc->pool_id]; 3179 msdu = rx_desc->nbuf; 3180 3181 /* 3182 * this is a unlikely scenario where the host is reaping 3183 * a descriptor which it already reaped just a while ago 3184 * but is yet to replenish it back to HW. 3185 */ 3186 if (qdf_unlikely(!rx_desc->in_use) || 3187 qdf_unlikely(!msdu)) { 3188 dp_rx_err_info_rl("Reaping rx_desc not in use!"); 3189 continue; 3190 } 3191 3192 dp_ipa_rx_buf_smmu_mapping_lock(soc); 3193 dp_rx_nbuf_unmap_pool(soc, rx_desc_pool, msdu); 3194 rx_desc->unmapped = 1; 3195 dp_ipa_rx_buf_smmu_mapping_unlock(soc); 3196 3197 dp_rx_buffer_pool_nbuf_free(soc, msdu, 3198 rx_desc->pool_id); 3199 rx_bufs_used[rx_desc->pool_id]++; 3200 dp_rx_add_to_free_desc_list(head, 3201 tail, rx_desc); 3202 } 3203 } 3204 3205 /* 3206 * Store the current link buffer into to the local structure 3207 * to be used for release purpose. 3208 */ 3209 hal_rxdma_buff_addr_info_set(soc->hal_soc, rx_link_buf_info, 3210 buf_info.paddr, buf_info.sw_cookie, 3211 buf_info.rbm); 3212 3213 hal_rx_mon_next_link_desc_get(soc->hal_soc, rx_msdu_link_desc, 3214 &buf_info); 3215 dp_rx_link_desc_return_by_addr(soc, (hal_buff_addrinfo_t) 3216 rx_link_buf_info, 3217 HAL_BM_ACTION_PUT_IN_IDLE_LIST); 3218 } while (buf_info.paddr); 3219 } 3220 3221 void 3222 dp_handle_wbm_internal_error(struct dp_soc *soc, void *hal_desc, 3223 uint32_t buf_type) 3224 { 3225 struct hal_buf_info buf_info = {0}; 3226 struct dp_rx_desc *rx_desc = NULL; 3227 struct rx_desc_pool *rx_desc_pool; 3228 uint32_t rx_bufs_reaped[MAX_PDEV_CNT] = {0}; 3229 union dp_rx_desc_list_elem_t *head = NULL; 3230 union dp_rx_desc_list_elem_t *tail = NULL; 3231 uint8_t pool_id; 3232 uint8_t mac_id; 3233 3234 hal_rx_reo_buf_paddr_get(soc->hal_soc, hal_desc, &buf_info); 3235 3236 if (!buf_info.paddr) { 3237 DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_REO_NULL_BUFFER], 1); 3238 return; 3239 } 3240 3241 /* buffer_addr_info is the first element of ring_desc */ 3242 hal_rx_buf_cookie_rbm_get(soc->hal_soc, (uint32_t *)hal_desc, 3243 &buf_info); 3244 3245 if (buf_type == HAL_WBM_RELEASE_RING_2_BUFFER_TYPE) { 3246 DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_REO_NULL_MSDU_BUFF], 1); 3247 rx_desc = soc->arch_ops.dp_rx_desc_cookie_2_va( 3248 soc, 3249 buf_info.sw_cookie); 3250 3251 if (rx_desc && rx_desc->nbuf) { 3252 rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id]; 3253 dp_ipa_rx_buf_smmu_mapping_lock(soc); 3254 dp_rx_nbuf_unmap_pool(soc, rx_desc_pool, 3255 rx_desc->nbuf); 3256 rx_desc->unmapped = 1; 3257 dp_ipa_rx_buf_smmu_mapping_unlock(soc); 3258 3259 dp_rx_buffer_pool_nbuf_free(soc, rx_desc->nbuf, 3260 rx_desc->pool_id); 3261 dp_rx_add_to_free_desc_list(&head, 3262 &tail, 3263 rx_desc); 3264 3265 rx_bufs_reaped[rx_desc->pool_id]++; 3266 } 3267 } else if (buf_type == HAL_WBM_RELEASE_RING_2_DESC_TYPE) { 3268 pool_id = DP_RX_DESC_COOKIE_POOL_ID_GET(buf_info.sw_cookie); 3269 3270 dp_wbm_int_err_mpdu_pop(soc, pool_id, hal_desc, 3271 &head, &tail, rx_bufs_reaped); 3272 } 3273 3274 for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) { 3275 struct rx_desc_pool *rx_desc_pool; 3276 struct dp_srng *dp_rxdma_srng; 3277 3278 if (!rx_bufs_reaped[mac_id]) 3279 continue; 3280 3281 DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_REO_BUFF_REAPED], 1); 3282 dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_id]; 3283 rx_desc_pool = &soc->rx_desc_buf[mac_id]; 3284 3285 dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng, 3286 rx_desc_pool, 3287 rx_bufs_reaped[mac_id], 3288 &head, &tail, false); 3289 } 3290 } 3291 3292 #endif /* QCA_HOST_MODE_WIFI_DISABLED */ 3293