1 /* 2 * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved. 3 * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. 4 * 5 * Permission to use, copy, modify, and/or distribute this software for 6 * any purpose with or without fee is hereby granted, provided that the 7 * above copyright notice and this permission notice appear in all 8 * copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 11 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 12 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 13 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 14 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 15 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 16 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 17 * PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20 #include "hal_hw_headers.h" 21 #include "dp_types.h" 22 #include "dp_rx.h" 23 #include "dp_tx.h" 24 #include "dp_peer.h" 25 #include "dp_internal.h" 26 #include "hal_api.h" 27 #include "qdf_trace.h" 28 #include "qdf_nbuf.h" 29 #include "dp_rx_defrag.h" 30 #include "dp_ipa.h" 31 #ifdef WIFI_MONITOR_SUPPORT 32 #include "dp_htt.h" 33 #include <dp_mon.h> 34 #endif 35 #ifdef FEATURE_WDS 36 #include "dp_txrx_wds.h" 37 #endif 38 #include <enet.h> /* LLC_SNAP_HDR_LEN */ 39 #include "qdf_net_types.h" 40 #include "dp_rx_buffer_pool.h" 41 42 #define dp_rx_err_alert(params...) QDF_TRACE_FATAL(QDF_MODULE_ID_DP_RX_ERROR, params) 43 #define dp_rx_err_err(params...) QDF_TRACE_ERROR(QDF_MODULE_ID_DP_RX_ERROR, params) 44 #define dp_rx_err_warn(params...) QDF_TRACE_WARN(QDF_MODULE_ID_DP_RX_ERROR, params) 45 #define dp_rx_err_info(params...) \ 46 __QDF_TRACE_FL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_RX_ERROR, ## params) 47 #define dp_rx_err_info_rl(params...) \ 48 __QDF_TRACE_RL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_RX_ERROR, ## params) 49 #define dp_rx_err_debug(params...) QDF_TRACE_DEBUG(QDF_MODULE_ID_DP_RX_ERROR, params) 50 51 #ifndef QCA_HOST_MODE_WIFI_DISABLED 52 53 /* Max buffer in invalid peer SG list*/ 54 #define DP_MAX_INVALID_BUFFERS 10 55 56 /* Max regular Rx packet routing error */ 57 #define DP_MAX_REG_RX_ROUTING_ERRS_THRESHOLD 20 58 #define DP_MAX_REG_RX_ROUTING_ERRS_IN_TIMEOUT 10 59 #define DP_RX_ERR_ROUTE_TIMEOUT_US (5 * 1000 * 1000) /* micro seconds */ 60 61 #ifdef FEATURE_MEC 62 bool dp_rx_mcast_echo_check(struct dp_soc *soc, 63 struct dp_txrx_peer *txrx_peer, 64 uint8_t *rx_tlv_hdr, 65 qdf_nbuf_t nbuf) 66 { 67 struct dp_vdev *vdev = txrx_peer->vdev; 68 struct dp_pdev *pdev = vdev->pdev; 69 struct dp_mec_entry *mecentry = NULL; 70 struct dp_ast_entry *ase = NULL; 71 uint16_t sa_idx = 0; 72 uint8_t *data; 73 /* 74 * Multicast Echo Check is required only if vdev is STA and 75 * received pkt is a multicast/broadcast pkt. otherwise 76 * skip the MEC check. 77 */ 78 if (vdev->opmode != wlan_op_mode_sta) 79 return false; 80 if (!hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc, rx_tlv_hdr)) 81 return false; 82 83 data = qdf_nbuf_data(nbuf); 84 85 /* 86 * if the received pkts src mac addr matches with vdev 87 * mac address then drop the pkt as it is looped back 88 */ 89 if (!(qdf_mem_cmp(&data[QDF_MAC_ADDR_SIZE], 90 vdev->mac_addr.raw, 91 QDF_MAC_ADDR_SIZE))) 92 return true; 93 94 /* 95 * In case of qwrap isolation mode, donot drop loopback packets. 96 * In isolation mode, all packets from the wired stations need to go 97 * to rootap and loop back to reach the wireless stations and 98 * vice-versa. 99 */ 100 if (qdf_unlikely(vdev->isolation_vdev)) 101 return false; 102 103 /* 104 * if the received pkts src mac addr matches with the 105 * wired PCs MAC addr which is behind the STA or with 106 * wireless STAs MAC addr which are behind the Repeater, 107 * then drop the pkt as it is looped back 108 */ 109 if (hal_rx_msdu_end_sa_is_valid_get(soc->hal_soc, rx_tlv_hdr)) { 110 sa_idx = hal_rx_msdu_end_sa_idx_get(soc->hal_soc, rx_tlv_hdr); 111 112 if ((sa_idx < 0) || 113 (sa_idx >= wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx))) { 114 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 115 "invalid sa_idx: %d", sa_idx); 116 qdf_assert_always(0); 117 } 118 119 qdf_spin_lock_bh(&soc->ast_lock); 120 ase = soc->ast_table[sa_idx]; 121 122 /* 123 * this check was not needed since MEC is not dependent on AST, 124 * but if we dont have this check SON has some issues in 125 * dual backhaul scenario. in APS SON mode, client connected 126 * to RE 2G and sends multicast packets. the RE sends it to CAP 127 * over 5G backhaul. the CAP loopback it on 2G to RE. 128 * On receiving in 2G STA vap, we assume that client has roamed 129 * and kickout the client. 130 */ 131 if (ase && (ase->peer_id != txrx_peer->peer_id)) { 132 qdf_spin_unlock_bh(&soc->ast_lock); 133 goto drop; 134 } 135 136 qdf_spin_unlock_bh(&soc->ast_lock); 137 } 138 139 qdf_spin_lock_bh(&soc->mec_lock); 140 141 mecentry = dp_peer_mec_hash_find_by_pdevid(soc, pdev->pdev_id, 142 &data[QDF_MAC_ADDR_SIZE]); 143 if (!mecentry) { 144 qdf_spin_unlock_bh(&soc->mec_lock); 145 return false; 146 } 147 148 qdf_spin_unlock_bh(&soc->mec_lock); 149 150 drop: 151 dp_rx_err_info("%pK: received pkt with same src mac " QDF_MAC_ADDR_FMT, 152 soc, QDF_MAC_ADDR_REF(&data[QDF_MAC_ADDR_SIZE])); 153 154 return true; 155 } 156 #endif 157 #endif /* QCA_HOST_MODE_WIFI_DISABLED */ 158 159 void dp_rx_link_desc_refill_duplicate_check( 160 struct dp_soc *soc, 161 struct hal_buf_info *buf_info, 162 hal_buff_addrinfo_t ring_buf_info) 163 { 164 struct hal_buf_info current_link_desc_buf_info = { 0 }; 165 166 /* do duplicate link desc address check */ 167 hal_rx_buffer_addr_info_get_paddr(ring_buf_info, 168 ¤t_link_desc_buf_info); 169 170 /* 171 * TODO - Check if the hal soc api call can be removed 172 * since the cookie is just used for print. 173 * buffer_addr_info is the first element of ring_desc 174 */ 175 hal_rx_buf_cookie_rbm_get(soc->hal_soc, 176 (uint32_t *)ring_buf_info, 177 ¤t_link_desc_buf_info); 178 179 if (qdf_unlikely(current_link_desc_buf_info.paddr == 180 buf_info->paddr)) { 181 dp_info_rl("duplicate link desc addr: %llu, cookie: 0x%x", 182 current_link_desc_buf_info.paddr, 183 current_link_desc_buf_info.sw_cookie); 184 DP_STATS_INC(soc, rx.err.dup_refill_link_desc, 1); 185 } 186 *buf_info = current_link_desc_buf_info; 187 } 188 189 /** 190 * dp_rx_link_desc_return_by_addr - Return a MPDU link descriptor to 191 * (WBM) by address 192 * 193 * @soc: core DP main context 194 * @link_desc_addr: link descriptor addr 195 * 196 * Return: QDF_STATUS 197 */ 198 QDF_STATUS 199 dp_rx_link_desc_return_by_addr(struct dp_soc *soc, 200 hal_buff_addrinfo_t link_desc_addr, 201 uint8_t bm_action) 202 { 203 struct dp_srng *wbm_desc_rel_ring = &soc->wbm_desc_rel_ring; 204 hal_ring_handle_t wbm_rel_srng = wbm_desc_rel_ring->hal_srng; 205 hal_soc_handle_t hal_soc = soc->hal_soc; 206 QDF_STATUS status = QDF_STATUS_E_FAILURE; 207 void *src_srng_desc; 208 209 if (!wbm_rel_srng) { 210 dp_rx_err_err("%pK: WBM RELEASE RING not initialized", soc); 211 return status; 212 } 213 214 /* do duplicate link desc address check */ 215 dp_rx_link_desc_refill_duplicate_check( 216 soc, 217 &soc->last_op_info.wbm_rel_link_desc, 218 link_desc_addr); 219 220 if (qdf_unlikely(hal_srng_access_start(hal_soc, wbm_rel_srng))) { 221 222 /* TODO */ 223 /* 224 * Need API to convert from hal_ring pointer to 225 * Ring Type / Ring Id combo 226 */ 227 dp_rx_err_err("%pK: HAL RING Access For WBM Release SRNG Failed - %pK", 228 soc, wbm_rel_srng); 229 DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1); 230 goto done; 231 } 232 src_srng_desc = hal_srng_src_get_next(hal_soc, wbm_rel_srng); 233 if (qdf_likely(src_srng_desc)) { 234 /* Return link descriptor through WBM ring (SW2WBM)*/ 235 hal_rx_msdu_link_desc_set(hal_soc, 236 src_srng_desc, link_desc_addr, bm_action); 237 status = QDF_STATUS_SUCCESS; 238 } else { 239 struct hal_srng *srng = (struct hal_srng *)wbm_rel_srng; 240 241 DP_STATS_INC(soc, rx.err.hal_ring_access_full_fail, 1); 242 243 dp_info_rl("WBM Release Ring (Id %d) Full(Fail CNT %u)", 244 srng->ring_id, 245 soc->stats.rx.err.hal_ring_access_full_fail); 246 dp_info_rl("HP 0x%x Reap HP 0x%x TP 0x%x Cached TP 0x%x", 247 *srng->u.src_ring.hp_addr, 248 srng->u.src_ring.reap_hp, 249 *srng->u.src_ring.tp_addr, 250 srng->u.src_ring.cached_tp); 251 QDF_BUG(0); 252 } 253 done: 254 hal_srng_access_end(hal_soc, wbm_rel_srng); 255 return status; 256 257 } 258 259 qdf_export_symbol(dp_rx_link_desc_return_by_addr); 260 261 /** 262 * dp_rx_link_desc_return() - Return a MPDU link descriptor to HW 263 * (WBM), following error handling 264 * 265 * @soc: core DP main context 266 * @ring_desc: opaque pointer to the REO error ring descriptor 267 * 268 * Return: QDF_STATUS 269 */ 270 QDF_STATUS 271 dp_rx_link_desc_return(struct dp_soc *soc, hal_ring_desc_t ring_desc, 272 uint8_t bm_action) 273 { 274 void *buf_addr_info = HAL_RX_REO_BUF_ADDR_INFO_GET(ring_desc); 275 276 return dp_rx_link_desc_return_by_addr(soc, buf_addr_info, bm_action); 277 } 278 279 #ifndef QCA_HOST_MODE_WIFI_DISABLED 280 281 /** 282 * dp_rx_msdus_drop() - Drops all MSDU's per MPDU 283 * 284 * @soc: core txrx main context 285 * @ring_desc: opaque pointer to the REO error ring descriptor 286 * @mpdu_desc_info: MPDU descriptor information from ring descriptor 287 * @head: head of the local descriptor free-list 288 * @tail: tail of the local descriptor free-list 289 * @quota: No. of units (packets) that can be serviced in one shot. 290 * 291 * This function is used to drop all MSDU in an MPDU 292 * 293 * Return: uint32_t: No. of elements processed 294 */ 295 static uint32_t 296 dp_rx_msdus_drop(struct dp_soc *soc, hal_ring_desc_t ring_desc, 297 struct hal_rx_mpdu_desc_info *mpdu_desc_info, 298 uint8_t *mac_id, 299 uint32_t quota) 300 { 301 uint32_t rx_bufs_used = 0; 302 void *link_desc_va; 303 struct hal_buf_info buf_info; 304 struct dp_pdev *pdev; 305 struct hal_rx_msdu_list msdu_list; /* MSDU's per MPDU */ 306 int i; 307 uint8_t *rx_tlv_hdr; 308 uint32_t tid; 309 struct rx_desc_pool *rx_desc_pool; 310 struct dp_rx_desc *rx_desc; 311 /* First field in REO Dst ring Desc is buffer_addr_info */ 312 void *buf_addr_info = ring_desc; 313 struct buffer_addr_info cur_link_desc_addr_info = { 0 }; 314 struct buffer_addr_info next_link_desc_addr_info = { 0 }; 315 316 hal_rx_reo_buf_paddr_get(soc->hal_soc, ring_desc, &buf_info); 317 318 /* buffer_addr_info is the first element of ring_desc */ 319 hal_rx_buf_cookie_rbm_get(soc->hal_soc, 320 (uint32_t *)ring_desc, 321 &buf_info); 322 323 link_desc_va = dp_rx_cookie_2_link_desc_va(soc, &buf_info); 324 if (!link_desc_va) { 325 dp_rx_err_debug("link desc va is null, soc %pk", soc); 326 return rx_bufs_used; 327 } 328 329 more_msdu_link_desc: 330 /* No UNMAP required -- this is "malloc_consistent" memory */ 331 hal_rx_msdu_list_get(soc->hal_soc, link_desc_va, &msdu_list, 332 &mpdu_desc_info->msdu_count); 333 334 for (i = 0; (i < mpdu_desc_info->msdu_count); i++) { 335 rx_desc = soc->arch_ops.dp_rx_desc_cookie_2_va( 336 soc, msdu_list.sw_cookie[i]); 337 338 qdf_assert_always(rx_desc); 339 340 /* all buffers from a MSDU link link belong to same pdev */ 341 *mac_id = rx_desc->pool_id; 342 pdev = dp_get_pdev_for_lmac_id(soc, rx_desc->pool_id); 343 if (!pdev) { 344 dp_rx_err_debug("%pK: pdev is null for pool_id = %d", 345 soc, rx_desc->pool_id); 346 return rx_bufs_used; 347 } 348 349 if (!dp_rx_desc_check_magic(rx_desc)) { 350 dp_rx_err_err("%pK: Invalid rx_desc cookie=%d", 351 soc, msdu_list.sw_cookie[i]); 352 return rx_bufs_used; 353 } 354 355 rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id]; 356 dp_ipa_rx_buf_smmu_mapping_lock(soc); 357 dp_rx_nbuf_unmap_pool(soc, rx_desc_pool, rx_desc->nbuf); 358 rx_desc->unmapped = 1; 359 dp_ipa_rx_buf_smmu_mapping_unlock(soc); 360 361 rx_desc->rx_buf_start = qdf_nbuf_data(rx_desc->nbuf); 362 363 rx_bufs_used++; 364 tid = hal_rx_mpdu_start_tid_get(soc->hal_soc, 365 rx_desc->rx_buf_start); 366 dp_rx_err_err("%pK: Packet received with PN error for tid :%d", 367 soc, tid); 368 369 rx_tlv_hdr = qdf_nbuf_data(rx_desc->nbuf); 370 if (hal_rx_encryption_info_valid(soc->hal_soc, rx_tlv_hdr)) 371 hal_rx_print_pn(soc->hal_soc, rx_tlv_hdr); 372 373 dp_rx_err_send_pktlog(soc, pdev, mpdu_desc_info, 374 rx_desc->nbuf, 375 QDF_TX_RX_STATUS_DROP, true); 376 /* Just free the buffers */ 377 dp_rx_buffer_pool_nbuf_free(soc, rx_desc->nbuf, *mac_id); 378 379 dp_rx_add_to_free_desc_list(&pdev->free_list_head, 380 &pdev->free_list_tail, rx_desc); 381 } 382 383 /* 384 * If the msdu's are spread across multiple link-descriptors, 385 * we cannot depend solely on the msdu_count(e.g., if msdu is 386 * spread across multiple buffers).Hence, it is 387 * necessary to check the next link_descriptor and release 388 * all the msdu's that are part of it. 389 */ 390 hal_rx_get_next_msdu_link_desc_buf_addr_info( 391 link_desc_va, 392 &next_link_desc_addr_info); 393 394 if (hal_rx_is_buf_addr_info_valid( 395 &next_link_desc_addr_info)) { 396 /* Clear the next link desc info for the current link_desc */ 397 hal_rx_clear_next_msdu_link_desc_buf_addr_info(link_desc_va); 398 399 dp_rx_link_desc_return_by_addr(soc, buf_addr_info, 400 HAL_BM_ACTION_PUT_IN_IDLE_LIST); 401 hal_rx_buffer_addr_info_get_paddr( 402 &next_link_desc_addr_info, 403 &buf_info); 404 /* buffer_addr_info is the first element of ring_desc */ 405 hal_rx_buf_cookie_rbm_get(soc->hal_soc, 406 (uint32_t *)&next_link_desc_addr_info, 407 &buf_info); 408 cur_link_desc_addr_info = next_link_desc_addr_info; 409 buf_addr_info = &cur_link_desc_addr_info; 410 411 link_desc_va = 412 dp_rx_cookie_2_link_desc_va(soc, &buf_info); 413 414 goto more_msdu_link_desc; 415 } 416 quota--; 417 dp_rx_link_desc_return_by_addr(soc, buf_addr_info, 418 HAL_BM_ACTION_PUT_IN_IDLE_LIST); 419 return rx_bufs_used; 420 } 421 422 /** 423 * dp_rx_pn_error_handle() - Handles PN check errors 424 * 425 * @soc: core txrx main context 426 * @ring_desc: opaque pointer to the REO error ring descriptor 427 * @mpdu_desc_info: MPDU descriptor information from ring descriptor 428 * @head: head of the local descriptor free-list 429 * @tail: tail of the local descriptor free-list 430 * @quota: No. of units (packets) that can be serviced in one shot. 431 * 432 * This function implements PN error handling 433 * If the peer is configured to ignore the PN check errors 434 * or if DP feels, that this frame is still OK, the frame can be 435 * re-injected back to REO to use some of the other features 436 * of REO e.g. duplicate detection/routing to other cores 437 * 438 * Return: uint32_t: No. of elements processed 439 */ 440 static uint32_t 441 dp_rx_pn_error_handle(struct dp_soc *soc, hal_ring_desc_t ring_desc, 442 struct hal_rx_mpdu_desc_info *mpdu_desc_info, 443 uint8_t *mac_id, 444 uint32_t quota) 445 { 446 uint16_t peer_id; 447 uint32_t rx_bufs_used = 0; 448 struct dp_txrx_peer *txrx_peer; 449 bool peer_pn_policy = false; 450 dp_txrx_ref_handle txrx_ref_handle = NULL; 451 452 peer_id = dp_rx_peer_metadata_peer_id_get(soc, 453 mpdu_desc_info->peer_meta_data); 454 455 456 txrx_peer = dp_tgt_txrx_peer_get_ref_by_id(soc, peer_id, 457 &txrx_ref_handle, 458 DP_MOD_ID_RX_ERR); 459 460 if (qdf_likely(txrx_peer)) { 461 /* 462 * TODO: Check for peer specific policies & set peer_pn_policy 463 */ 464 dp_err_rl("discard rx due to PN error for peer %pK", 465 txrx_peer); 466 467 dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_RX_ERR); 468 } 469 dp_rx_err_err("%pK: Packet received with PN error", soc); 470 471 /* No peer PN policy -- definitely drop */ 472 if (!peer_pn_policy) 473 rx_bufs_used = dp_rx_msdus_drop(soc, ring_desc, 474 mpdu_desc_info, 475 mac_id, quota); 476 477 return rx_bufs_used; 478 } 479 480 #ifdef DP_RX_DELIVER_ALL_OOR_FRAMES 481 /** 482 * dp_rx_deliver_oor_frame() - deliver OOR frames to stack 483 * @soc: Datapath soc handler 484 * @peer: pointer to DP peer 485 * @nbuf: pointer to the skb of RX frame 486 * @frame_mask: the mask for special frame needed 487 * @rx_tlv_hdr: start of rx tlv header 488 * 489 * note: Msdu_len must have been stored in QDF_NBUF_CB_RX_PKT_LEN(nbuf) and 490 * single nbuf is expected. 491 * 492 * return: true - nbuf has been delivered to stack, false - not. 493 */ 494 static bool 495 dp_rx_deliver_oor_frame(struct dp_soc *soc, 496 struct dp_txrx_peer *txrx_peer, 497 qdf_nbuf_t nbuf, uint32_t frame_mask, 498 uint8_t *rx_tlv_hdr) 499 { 500 uint32_t l2_hdr_offset = 0; 501 uint16_t msdu_len = 0; 502 uint32_t skip_len; 503 504 l2_hdr_offset = 505 hal_rx_msdu_end_l3_hdr_padding_get(soc->hal_soc, rx_tlv_hdr); 506 507 if (qdf_unlikely(qdf_nbuf_is_frag(nbuf))) { 508 skip_len = l2_hdr_offset; 509 } else { 510 msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf); 511 skip_len = l2_hdr_offset + soc->rx_pkt_tlv_size; 512 qdf_nbuf_set_pktlen(nbuf, msdu_len + skip_len); 513 } 514 515 QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(nbuf) = 1; 516 dp_rx_set_hdr_pad(nbuf, l2_hdr_offset); 517 qdf_nbuf_pull_head(nbuf, skip_len); 518 qdf_nbuf_set_exc_frame(nbuf, 1); 519 520 dp_info_rl("OOR frame, mpdu sn 0x%x", 521 hal_rx_get_rx_sequence(soc->hal_soc, rx_tlv_hdr)); 522 dp_rx_deliver_to_stack(soc, txrx_peer->vdev, txrx_peer, nbuf, NULL); 523 return true; 524 } 525 526 #else 527 static bool 528 dp_rx_deliver_oor_frame(struct dp_soc *soc, 529 struct dp_txrx_peer *txrx_peer, 530 qdf_nbuf_t nbuf, uint32_t frame_mask, 531 uint8_t *rx_tlv_hdr) 532 { 533 return dp_rx_deliver_special_frame(soc, txrx_peer, nbuf, frame_mask, 534 rx_tlv_hdr); 535 } 536 #endif 537 538 /** 539 * dp_rx_oor_handle() - Handles the msdu which is OOR error 540 * 541 * @soc: core txrx main context 542 * @nbuf: pointer to msdu skb 543 * @peer_id: dp peer ID 544 * @rx_tlv_hdr: start of rx tlv header 545 * 546 * This function process the msdu delivered from REO2TCL 547 * ring with error type OOR 548 * 549 * Return: None 550 */ 551 static void 552 dp_rx_oor_handle(struct dp_soc *soc, 553 qdf_nbuf_t nbuf, 554 uint16_t peer_id, 555 uint8_t *rx_tlv_hdr) 556 { 557 uint32_t frame_mask = FRAME_MASK_IPV4_ARP | FRAME_MASK_IPV4_DHCP | 558 FRAME_MASK_IPV4_EAPOL | FRAME_MASK_IPV6_DHCP; 559 struct dp_txrx_peer *txrx_peer = NULL; 560 dp_txrx_ref_handle txrx_ref_handle = NULL; 561 562 txrx_peer = dp_tgt_txrx_peer_get_ref_by_id(soc, peer_id, 563 &txrx_ref_handle, 564 DP_MOD_ID_RX_ERR); 565 if (!txrx_peer) { 566 dp_info_rl("peer not found"); 567 goto free_nbuf; 568 } 569 570 if (dp_rx_deliver_oor_frame(soc, txrx_peer, nbuf, frame_mask, 571 rx_tlv_hdr)) { 572 DP_STATS_INC(soc, rx.err.reo_err_oor_to_stack, 1); 573 dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_RX_ERR); 574 return; 575 } 576 577 free_nbuf: 578 if (txrx_peer) 579 dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_RX_ERR); 580 581 DP_STATS_INC(soc, rx.err.reo_err_oor_drop, 1); 582 dp_rx_nbuf_free(nbuf); 583 } 584 585 /** 586 * dp_rx_err_nbuf_pn_check() - Check if the PN number of this current packet 587 * is a monotonous increment of packet number 588 * from the previous successfully re-ordered 589 * frame. 590 * @soc: Datapath SOC handle 591 * @ring_desc: REO ring descriptor 592 * @nbuf: Current packet 593 * 594 * Return: QDF_STATUS_SUCCESS, if the pn check passes, else QDF_STATUS_E_FAILURE 595 */ 596 static inline QDF_STATUS 597 dp_rx_err_nbuf_pn_check(struct dp_soc *soc, hal_ring_desc_t ring_desc, 598 qdf_nbuf_t nbuf) 599 { 600 uint64_t prev_pn, curr_pn[2]; 601 602 if (!hal_rx_encryption_info_valid(soc->hal_soc, qdf_nbuf_data(nbuf))) 603 return QDF_STATUS_SUCCESS; 604 605 hal_rx_reo_prev_pn_get(soc->hal_soc, ring_desc, &prev_pn); 606 hal_rx_tlv_get_pn_num(soc->hal_soc, qdf_nbuf_data(nbuf), curr_pn); 607 608 if (curr_pn[0] > prev_pn) 609 return QDF_STATUS_SUCCESS; 610 611 return QDF_STATUS_E_FAILURE; 612 } 613 614 #ifdef WLAN_SKIP_BAR_UPDATE 615 static 616 void dp_rx_err_handle_bar(struct dp_soc *soc, 617 struct dp_peer *peer, 618 qdf_nbuf_t nbuf) 619 { 620 dp_info_rl("BAR update to H.W is skipped"); 621 DP_STATS_INC(soc, rx.err.bar_handle_fail_count, 1); 622 } 623 #else 624 static 625 void dp_rx_err_handle_bar(struct dp_soc *soc, 626 struct dp_peer *peer, 627 qdf_nbuf_t nbuf) 628 { 629 uint8_t *rx_tlv_hdr; 630 unsigned char type, subtype; 631 uint16_t start_seq_num; 632 uint32_t tid; 633 QDF_STATUS status; 634 struct ieee80211_frame_bar *bar; 635 636 /* 637 * 1. Is this a BAR frame. If not Discard it. 638 * 2. If it is, get the peer id, tid, ssn 639 * 2a Do a tid update 640 */ 641 642 rx_tlv_hdr = qdf_nbuf_data(nbuf); 643 bar = (struct ieee80211_frame_bar *)(rx_tlv_hdr + soc->rx_pkt_tlv_size); 644 645 type = bar->i_fc[0] & IEEE80211_FC0_TYPE_MASK; 646 subtype = bar->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 647 648 if (!(type == IEEE80211_FC0_TYPE_CTL && 649 subtype == QDF_IEEE80211_FC0_SUBTYPE_BAR)) { 650 dp_err_rl("Not a BAR frame!"); 651 return; 652 } 653 654 tid = hal_rx_mpdu_start_tid_get(soc->hal_soc, rx_tlv_hdr); 655 qdf_assert_always(tid < DP_MAX_TIDS); 656 657 start_seq_num = le16toh(bar->i_seq) >> IEEE80211_SEQ_SEQ_SHIFT; 658 659 dp_info_rl("tid %u window_size %u start_seq_num %u", 660 tid, peer->rx_tid[tid].ba_win_size, start_seq_num); 661 662 status = dp_rx_tid_update_wifi3(peer, tid, 663 peer->rx_tid[tid].ba_win_size, 664 start_seq_num, 665 true); 666 if (status != QDF_STATUS_SUCCESS) { 667 dp_err_rl("failed to handle bar frame update rx tid"); 668 DP_STATS_INC(soc, rx.err.bar_handle_fail_count, 1); 669 } else { 670 DP_STATS_INC(soc, rx.err.ssn_update_count, 1); 671 } 672 } 673 #endif 674 675 /** 676 * _dp_rx_bar_frame_handle(): Core of the BAR frame handling 677 * @soc: Datapath SoC handle 678 * @nbuf: packet being processed 679 * @mpdu_desc_info: mpdu desc info for the current packet 680 * @tid: tid on which the packet arrived 681 * @err_status: Flag to indicate if REO encountered an error while routing this 682 * frame 683 * @error_code: REO error code 684 * 685 * Return: None 686 */ 687 static void 688 _dp_rx_bar_frame_handle(struct dp_soc *soc, qdf_nbuf_t nbuf, 689 struct hal_rx_mpdu_desc_info *mpdu_desc_info, 690 uint32_t tid, uint8_t err_status, uint32_t error_code) 691 { 692 uint16_t peer_id; 693 struct dp_peer *peer; 694 695 peer_id = dp_rx_peer_metadata_peer_id_get(soc, 696 mpdu_desc_info->peer_meta_data); 697 peer = dp_peer_get_tgt_peer_by_id(soc, peer_id, DP_MOD_ID_RX_ERR); 698 if (!peer) 699 return; 700 701 dp_info("BAR frame: " 702 " peer_id = %d" 703 " tid = %u" 704 " SSN = %d" 705 " error status = %d", 706 peer->peer_id, 707 tid, 708 mpdu_desc_info->mpdu_seq, 709 err_status); 710 711 if (err_status == HAL_REO_ERROR_DETECTED) { 712 switch (error_code) { 713 case HAL_REO_ERR_BAR_FRAME_2K_JUMP: 714 case HAL_REO_ERR_BAR_FRAME_OOR: 715 dp_rx_err_handle_bar(soc, peer, nbuf); 716 DP_STATS_INC(soc, rx.err.reo_error[error_code], 1); 717 break; 718 default: 719 DP_STATS_INC(soc, rx.bar_frame, 1); 720 } 721 } 722 723 dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR); 724 } 725 726 #ifdef DP_INVALID_PEER_ASSERT 727 #define DP_PDEV_INVALID_PEER_MSDU_CHECK(head, tail) \ 728 do { \ 729 qdf_assert_always(!(head)); \ 730 qdf_assert_always(!(tail)); \ 731 } while (0) 732 #else 733 #define DP_PDEV_INVALID_PEER_MSDU_CHECK(head, tail) /* no op */ 734 #endif 735 736 /** 737 * dp_rx_chain_msdus() - Function to chain all msdus of a mpdu 738 * to pdev invalid peer list 739 * 740 * @soc: core DP main context 741 * @nbuf: Buffer pointer 742 * @rx_tlv_hdr: start of rx tlv header 743 * @mac_id: mac id 744 * 745 * Return: bool: true for last msdu of mpdu 746 */ 747 static bool 748 dp_rx_chain_msdus(struct dp_soc *soc, qdf_nbuf_t nbuf, 749 uint8_t *rx_tlv_hdr, uint8_t mac_id) 750 { 751 bool mpdu_done = false; 752 qdf_nbuf_t curr_nbuf = NULL; 753 qdf_nbuf_t tmp_nbuf = NULL; 754 755 /* TODO: Currently only single radio is supported, hence 756 * pdev hard coded to '0' index 757 */ 758 struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id); 759 760 if (!dp_pdev) { 761 dp_rx_err_debug("%pK: pdev is null for mac_id = %d", soc, mac_id); 762 return mpdu_done; 763 } 764 /* if invalid peer SG list has max values free the buffers in list 765 * and treat current buffer as start of list 766 * 767 * current logic to detect the last buffer from attn_tlv is not reliable 768 * in OFDMA UL scenario hence add max buffers check to avoid list pile 769 * up 770 */ 771 if (!dp_pdev->first_nbuf || 772 (dp_pdev->invalid_peer_head_msdu && 773 QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST 774 (dp_pdev->invalid_peer_head_msdu) >= DP_MAX_INVALID_BUFFERS)) { 775 qdf_nbuf_set_rx_chfrag_start(nbuf, 1); 776 dp_pdev->ppdu_id = hal_rx_get_ppdu_id(soc->hal_soc, 777 rx_tlv_hdr); 778 dp_pdev->first_nbuf = true; 779 780 /* If the new nbuf received is the first msdu of the 781 * amsdu and there are msdus in the invalid peer msdu 782 * list, then let us free all the msdus of the invalid 783 * peer msdu list. 784 * This scenario can happen when we start receiving 785 * new a-msdu even before the previous a-msdu is completely 786 * received. 787 */ 788 curr_nbuf = dp_pdev->invalid_peer_head_msdu; 789 while (curr_nbuf) { 790 tmp_nbuf = curr_nbuf->next; 791 dp_rx_nbuf_free(curr_nbuf); 792 curr_nbuf = tmp_nbuf; 793 } 794 795 dp_pdev->invalid_peer_head_msdu = NULL; 796 dp_pdev->invalid_peer_tail_msdu = NULL; 797 798 dp_monitor_get_mpdu_status(dp_pdev, soc, rx_tlv_hdr); 799 } 800 801 if (dp_pdev->ppdu_id == hal_rx_attn_phy_ppdu_id_get(soc->hal_soc, 802 rx_tlv_hdr) && 803 hal_rx_attn_msdu_done_get(soc->hal_soc, rx_tlv_hdr)) { 804 qdf_nbuf_set_rx_chfrag_end(nbuf, 1); 805 qdf_assert_always(dp_pdev->first_nbuf == true); 806 dp_pdev->first_nbuf = false; 807 mpdu_done = true; 808 } 809 810 /* 811 * For MCL, invalid_peer_head_msdu and invalid_peer_tail_msdu 812 * should be NULL here, add the checking for debugging purpose 813 * in case some corner case. 814 */ 815 DP_PDEV_INVALID_PEER_MSDU_CHECK(dp_pdev->invalid_peer_head_msdu, 816 dp_pdev->invalid_peer_tail_msdu); 817 DP_RX_LIST_APPEND(dp_pdev->invalid_peer_head_msdu, 818 dp_pdev->invalid_peer_tail_msdu, 819 nbuf); 820 821 return mpdu_done; 822 } 823 824 /** 825 * dp_rx_bar_frame_handle() - Function to handle err BAR frames 826 * @soc: core DP main context 827 * @ring_desc: Hal ring desc 828 * @rx_desc: dp rx desc 829 * @mpdu_desc_info: mpdu desc info 830 * 831 * Handle the error BAR frames received. Ensure the SOC level 832 * stats are updated based on the REO error code. The BAR frames 833 * are further processed by updating the Rx tids with the start 834 * sequence number (SSN) and BA window size. Desc is returned 835 * to the free desc list 836 * 837 * Return: none 838 */ 839 static void 840 dp_rx_bar_frame_handle(struct dp_soc *soc, 841 hal_ring_desc_t ring_desc, 842 struct dp_rx_desc *rx_desc, 843 struct hal_rx_mpdu_desc_info *mpdu_desc_info, 844 uint8_t err_status, 845 uint32_t err_code) 846 { 847 qdf_nbuf_t nbuf; 848 struct dp_pdev *pdev; 849 struct rx_desc_pool *rx_desc_pool; 850 uint8_t *rx_tlv_hdr; 851 uint32_t tid; 852 853 nbuf = rx_desc->nbuf; 854 rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id]; 855 dp_ipa_rx_buf_smmu_mapping_lock(soc); 856 dp_rx_nbuf_unmap_pool(soc, rx_desc_pool, nbuf); 857 rx_desc->unmapped = 1; 858 dp_ipa_rx_buf_smmu_mapping_unlock(soc); 859 rx_tlv_hdr = qdf_nbuf_data(nbuf); 860 tid = hal_rx_mpdu_start_tid_get(soc->hal_soc, 861 rx_tlv_hdr); 862 pdev = dp_get_pdev_for_lmac_id(soc, rx_desc->pool_id); 863 864 _dp_rx_bar_frame_handle(soc, nbuf, mpdu_desc_info, tid, err_status, 865 err_code); 866 dp_rx_err_send_pktlog(soc, pdev, mpdu_desc_info, nbuf, 867 QDF_TX_RX_STATUS_DROP, true); 868 dp_rx_link_desc_return(soc, ring_desc, 869 HAL_BM_ACTION_PUT_IN_IDLE_LIST); 870 dp_rx_buffer_pool_nbuf_free(soc, rx_desc->nbuf, 871 rx_desc->pool_id); 872 dp_rx_add_to_free_desc_list(&pdev->free_list_head, 873 &pdev->free_list_tail, 874 rx_desc); 875 } 876 877 #endif /* QCA_HOST_MODE_WIFI_DISABLED */ 878 879 /** 880 * dp_2k_jump_handle() - Function to handle 2k jump exception 881 * on WBM ring 882 * 883 * @soc: core DP main context 884 * @nbuf: buffer pointer 885 * @rx_tlv_hdr: start of rx tlv header 886 * @peer_id: peer id of first msdu 887 * @tid: Tid for which exception occurred 888 * 889 * This function handles 2k jump violations arising out 890 * of receiving aggregates in non BA case. This typically 891 * may happen if aggregates are received on a QOS enabled TID 892 * while Rx window size is still initialized to value of 2. Or 893 * it may also happen if negotiated window size is 1 but peer 894 * sends aggregates. 895 * 896 */ 897 898 void 899 dp_2k_jump_handle(struct dp_soc *soc, 900 qdf_nbuf_t nbuf, 901 uint8_t *rx_tlv_hdr, 902 uint16_t peer_id, 903 uint8_t tid) 904 { 905 struct dp_peer *peer = NULL; 906 struct dp_rx_tid *rx_tid = NULL; 907 uint32_t frame_mask = FRAME_MASK_IPV4_ARP; 908 909 peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_RX_ERR); 910 if (!peer) { 911 dp_rx_err_info_rl("%pK: peer not found", soc); 912 goto free_nbuf; 913 } 914 915 if (tid >= DP_MAX_TIDS) { 916 dp_info_rl("invalid tid"); 917 goto nbuf_deliver; 918 } 919 920 rx_tid = &peer->rx_tid[tid]; 921 qdf_spin_lock_bh(&rx_tid->tid_lock); 922 923 /* only if BA session is active, allow send Delba */ 924 if (rx_tid->ba_status != DP_RX_BA_ACTIVE) { 925 qdf_spin_unlock_bh(&rx_tid->tid_lock); 926 goto nbuf_deliver; 927 } 928 929 if (!rx_tid->delba_tx_status) { 930 rx_tid->delba_tx_retry++; 931 rx_tid->delba_tx_status = 1; 932 rx_tid->delba_rcode = 933 IEEE80211_REASON_QOS_SETUP_REQUIRED; 934 qdf_spin_unlock_bh(&rx_tid->tid_lock); 935 if (soc->cdp_soc.ol_ops->send_delba) { 936 DP_STATS_INC(soc, rx.err.rx_2k_jump_delba_sent, 937 1); 938 soc->cdp_soc.ol_ops->send_delba( 939 peer->vdev->pdev->soc->ctrl_psoc, 940 peer->vdev->vdev_id, 941 peer->mac_addr.raw, 942 tid, 943 rx_tid->delba_rcode, 944 CDP_DELBA_2K_JUMP); 945 } 946 } else { 947 qdf_spin_unlock_bh(&rx_tid->tid_lock); 948 } 949 950 nbuf_deliver: 951 if (dp_rx_deliver_special_frame(soc, peer->txrx_peer, nbuf, frame_mask, 952 rx_tlv_hdr)) { 953 DP_STATS_INC(soc, rx.err.rx_2k_jump_to_stack, 1); 954 dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR); 955 return; 956 } 957 958 free_nbuf: 959 if (peer) 960 dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR); 961 DP_STATS_INC(soc, rx.err.rx_2k_jump_drop, 1); 962 dp_rx_nbuf_free(nbuf); 963 } 964 965 #if defined(QCA_WIFI_QCA6390) || defined(QCA_WIFI_QCA6490) || \ 966 defined(QCA_WIFI_QCA6750) || defined(QCA_WIFI_KIWI) 967 /** 968 * dp_rx_null_q_handle_invalid_peer_id_exception() - to find exception 969 * @soc: pointer to dp_soc struct 970 * @pool_id: Pool id to find dp_pdev 971 * @rx_tlv_hdr: TLV header of received packet 972 * @nbuf: SKB 973 * 974 * In certain types of packets if peer_id is not correct then 975 * driver may not be able find. Try finding peer by addr_2 of 976 * received MPDU. If you find the peer then most likely sw_peer_id & 977 * ast_idx is corrupted. 978 * 979 * Return: True if you find the peer by addr_2 of received MPDU else false 980 */ 981 static bool 982 dp_rx_null_q_handle_invalid_peer_id_exception(struct dp_soc *soc, 983 uint8_t pool_id, 984 uint8_t *rx_tlv_hdr, 985 qdf_nbuf_t nbuf) 986 { 987 struct dp_peer *peer = NULL; 988 uint8_t *rx_pkt_hdr = hal_rx_pkt_hdr_get(soc->hal_soc, rx_tlv_hdr); 989 struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, pool_id); 990 struct ieee80211_frame *wh = (struct ieee80211_frame *)rx_pkt_hdr; 991 992 if (!pdev) { 993 dp_rx_err_debug("%pK: pdev is null for pool_id = %d", 994 soc, pool_id); 995 return false; 996 } 997 /* 998 * WAR- In certain types of packets if peer_id is not correct then 999 * driver may not be able find. Try finding peer by addr_2 of 1000 * received MPDU 1001 */ 1002 if (wh) 1003 peer = dp_peer_find_hash_find(soc, wh->i_addr2, 0, 1004 DP_VDEV_ALL, DP_MOD_ID_RX_ERR); 1005 if (peer) { 1006 dp_verbose_debug("MPDU sw_peer_id & ast_idx is corrupted"); 1007 hal_rx_dump_pkt_tlvs(soc->hal_soc, rx_tlv_hdr, 1008 QDF_TRACE_LEVEL_DEBUG); 1009 DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer_id, 1010 1, qdf_nbuf_len(nbuf)); 1011 dp_rx_nbuf_free(nbuf); 1012 1013 dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR); 1014 return true; 1015 } 1016 return false; 1017 } 1018 #else 1019 static inline bool 1020 dp_rx_null_q_handle_invalid_peer_id_exception(struct dp_soc *soc, 1021 uint8_t pool_id, 1022 uint8_t *rx_tlv_hdr, 1023 qdf_nbuf_t nbuf) 1024 { 1025 return false; 1026 } 1027 #endif 1028 1029 /** 1030 * dp_rx_check_pkt_len() - Check for pktlen validity 1031 * @soc: DP SOC context 1032 * @pkt_len: computed length of the pkt from caller in bytes 1033 * 1034 * Return: true if pktlen > RX_BUFFER_SIZE, else return false 1035 * 1036 */ 1037 static inline 1038 bool dp_rx_check_pkt_len(struct dp_soc *soc, uint32_t pkt_len) 1039 { 1040 if (qdf_unlikely(pkt_len > RX_DATA_BUFFER_SIZE)) { 1041 DP_STATS_INC_PKT(soc, rx.err.rx_invalid_pkt_len, 1042 1, pkt_len); 1043 return true; 1044 } else { 1045 return false; 1046 } 1047 } 1048 1049 /* 1050 * dp_rx_deliver_to_osif_stack() - function to deliver rx pkts to stack 1051 * @soc: DP soc 1052 * @vdv: DP vdev handle 1053 * @txrx_peer: pointer to the txrx_peer object 1054 * @nbuf: skb list head 1055 * @tail: skb list tail 1056 * @is_eapol: eapol pkt check 1057 * 1058 * Return: None 1059 */ 1060 #ifdef QCA_SUPPORT_EAPOL_OVER_CONTROL_PORT 1061 static inline void 1062 dp_rx_deliver_to_osif_stack(struct dp_soc *soc, 1063 struct dp_vdev *vdev, 1064 struct dp_txrx_peer *txrx_peer, 1065 qdf_nbuf_t nbuf, 1066 qdf_nbuf_t tail, 1067 bool is_eapol) 1068 { 1069 if (is_eapol && soc->eapol_over_control_port) 1070 dp_rx_eapol_deliver_to_stack(soc, vdev, txrx_peer, nbuf, NULL); 1071 else 1072 dp_rx_deliver_to_stack(soc, vdev, txrx_peer, nbuf, NULL); 1073 } 1074 #else 1075 static inline void 1076 dp_rx_deliver_to_osif_stack(struct dp_soc *soc, 1077 struct dp_vdev *vdev, 1078 struct dp_txrx_peer *txrx_peer, 1079 qdf_nbuf_t nbuf, 1080 qdf_nbuf_t tail, 1081 bool is_eapol) 1082 { 1083 dp_rx_deliver_to_stack(soc, vdev, txrx_peer, nbuf, NULL); 1084 } 1085 #endif 1086 1087 #ifdef WLAN_FEATURE_11BE_MLO 1088 /* 1089 * dp_rx_err_match_dhost() - function to check whether dest-mac is correct 1090 * @eh: Ethernet header of incoming packet 1091 * @vdev: dp_vdev object of the VAP on which this data packet is received 1092 * 1093 * Return: 1 if the destination mac is correct, 1094 * 0 if this frame is not correctly destined to this VAP/MLD 1095 */ 1096 int dp_rx_err_match_dhost(qdf_ether_header_t *eh, struct dp_vdev *vdev) 1097 { 1098 return ((qdf_mem_cmp(eh->ether_dhost, &vdev->mac_addr.raw[0], 1099 QDF_MAC_ADDR_SIZE) == 0) || 1100 (qdf_mem_cmp(eh->ether_dhost, &vdev->mld_mac_addr.raw[0], 1101 QDF_MAC_ADDR_SIZE) == 0)); 1102 } 1103 1104 #else 1105 int dp_rx_err_match_dhost(qdf_ether_header_t *eh, struct dp_vdev *vdev) 1106 { 1107 return (qdf_mem_cmp(eh->ether_dhost, &vdev->mac_addr.raw[0], 1108 QDF_MAC_ADDR_SIZE) == 0); 1109 } 1110 #endif 1111 1112 #ifndef QCA_HOST_MODE_WIFI_DISABLED 1113 1114 /** 1115 * dp_rx_err_drop_3addr_mcast() - Check if feature drop_3ddr_mcast is enabled 1116 * If so, drop the multicast frame. 1117 * @vdev: datapath vdev 1118 * @rx_tlv_hdr: TLV header 1119 * 1120 * Return: true if packet is to be dropped, 1121 * false, if packet is not dropped. 1122 */ 1123 static bool 1124 dp_rx_err_drop_3addr_mcast(struct dp_vdev *vdev, uint8_t *rx_tlv_hdr) 1125 { 1126 struct dp_soc *soc = vdev->pdev->soc; 1127 1128 if (!vdev->drop_3addr_mcast) 1129 return false; 1130 1131 if (vdev->opmode != wlan_op_mode_sta) 1132 return false; 1133 1134 if (hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc, rx_tlv_hdr)) 1135 return true; 1136 1137 return false; 1138 } 1139 1140 /** 1141 * dp_rx_err_is_pn_check_needed() - Check if the packet number check is needed 1142 * for this frame received in REO error ring. 1143 * @soc: Datapath SOC handle 1144 * @error: REO error detected or not 1145 * @error_code: Error code in case of REO error 1146 * 1147 * Return: true if pn check if needed in software, 1148 * false, if pn check if not needed. 1149 */ 1150 static inline bool 1151 dp_rx_err_is_pn_check_needed(struct dp_soc *soc, uint8_t error, 1152 uint32_t error_code) 1153 { 1154 return (soc->features.pn_in_reo_dest && 1155 (error == HAL_REO_ERROR_DETECTED && 1156 (hal_rx_reo_is_2k_jump(error_code) || 1157 hal_rx_reo_is_oor_error(error_code) || 1158 hal_rx_reo_is_bar_oor_2k_jump(error_code)))); 1159 } 1160 1161 /** 1162 * dp_rx_null_q_desc_handle() - Function to handle NULL Queue 1163 * descriptor violation on either a 1164 * REO or WBM ring 1165 * 1166 * @soc: core DP main context 1167 * @nbuf: buffer pointer 1168 * @rx_tlv_hdr: start of rx tlv header 1169 * @pool_id: mac id 1170 * @txrx_peer: txrx peer handle 1171 * 1172 * This function handles NULL queue descriptor violations arising out 1173 * a missing REO queue for a given peer or a given TID. This typically 1174 * may happen if a packet is received on a QOS enabled TID before the 1175 * ADDBA negotiation for that TID, when the TID queue is setup. Or 1176 * it may also happen for MC/BC frames if they are not routed to the 1177 * non-QOS TID queue, in the absence of any other default TID queue. 1178 * This error can show up both in a REO destination or WBM release ring. 1179 * 1180 * Return: QDF_STATUS_SUCCESS, if nbuf handled successfully. QDF status code 1181 * if nbuf could not be handled or dropped. 1182 */ 1183 static QDF_STATUS 1184 dp_rx_null_q_desc_handle(struct dp_soc *soc, qdf_nbuf_t nbuf, 1185 uint8_t *rx_tlv_hdr, uint8_t pool_id, 1186 struct dp_txrx_peer *txrx_peer) 1187 { 1188 uint32_t pkt_len; 1189 uint16_t msdu_len; 1190 struct dp_vdev *vdev; 1191 uint8_t tid; 1192 qdf_ether_header_t *eh; 1193 struct hal_rx_msdu_metadata msdu_metadata; 1194 uint16_t sa_idx = 0; 1195 bool is_eapol = 0; 1196 bool enh_flag; 1197 1198 qdf_nbuf_set_rx_chfrag_start(nbuf, 1199 hal_rx_msdu_end_first_msdu_get(soc->hal_soc, 1200 rx_tlv_hdr)); 1201 qdf_nbuf_set_rx_chfrag_end(nbuf, 1202 hal_rx_msdu_end_last_msdu_get(soc->hal_soc, 1203 rx_tlv_hdr)); 1204 qdf_nbuf_set_da_mcbc(nbuf, hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc, 1205 rx_tlv_hdr)); 1206 qdf_nbuf_set_da_valid(nbuf, 1207 hal_rx_msdu_end_da_is_valid_get(soc->hal_soc, 1208 rx_tlv_hdr)); 1209 qdf_nbuf_set_sa_valid(nbuf, 1210 hal_rx_msdu_end_sa_is_valid_get(soc->hal_soc, 1211 rx_tlv_hdr)); 1212 1213 hal_rx_msdu_metadata_get(soc->hal_soc, rx_tlv_hdr, &msdu_metadata); 1214 msdu_len = hal_rx_msdu_start_msdu_len_get(soc->hal_soc, rx_tlv_hdr); 1215 pkt_len = msdu_len + msdu_metadata.l3_hdr_pad + soc->rx_pkt_tlv_size; 1216 1217 if (qdf_likely(!qdf_nbuf_is_frag(nbuf))) { 1218 if (dp_rx_check_pkt_len(soc, pkt_len)) 1219 goto drop_nbuf; 1220 1221 /* Set length in nbuf */ 1222 qdf_nbuf_set_pktlen( 1223 nbuf, qdf_min(pkt_len, (uint32_t)RX_DATA_BUFFER_SIZE)); 1224 qdf_assert_always(nbuf->data == rx_tlv_hdr); 1225 } 1226 1227 /* 1228 * Check if DMA completed -- msdu_done is the last bit 1229 * to be written 1230 */ 1231 if (!hal_rx_attn_msdu_done_get(soc->hal_soc, rx_tlv_hdr)) { 1232 1233 dp_err_rl("MSDU DONE failure"); 1234 hal_rx_dump_pkt_tlvs(soc->hal_soc, rx_tlv_hdr, 1235 QDF_TRACE_LEVEL_INFO); 1236 qdf_assert(0); 1237 } 1238 1239 if (!txrx_peer && 1240 dp_rx_null_q_handle_invalid_peer_id_exception(soc, pool_id, 1241 rx_tlv_hdr, nbuf)) 1242 return QDF_STATUS_E_FAILURE; 1243 1244 if (!txrx_peer) { 1245 bool mpdu_done = false; 1246 struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, pool_id); 1247 1248 if (!pdev) { 1249 dp_err_rl("pdev is null for pool_id = %d", pool_id); 1250 return QDF_STATUS_E_FAILURE; 1251 } 1252 1253 dp_err_rl("txrx_peer is NULL"); 1254 DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1, 1255 qdf_nbuf_len(nbuf)); 1256 1257 /* QCN9000 has the support enabled */ 1258 if (qdf_unlikely(soc->wbm_release_desc_rx_sg_support)) { 1259 mpdu_done = true; 1260 nbuf->next = NULL; 1261 /* Trigger invalid peer handler wrapper */ 1262 dp_rx_process_invalid_peer_wrapper(soc, 1263 nbuf, mpdu_done, pool_id); 1264 } else { 1265 mpdu_done = dp_rx_chain_msdus(soc, nbuf, rx_tlv_hdr, pool_id); 1266 /* Trigger invalid peer handler wrapper */ 1267 dp_rx_process_invalid_peer_wrapper(soc, 1268 pdev->invalid_peer_head_msdu, 1269 mpdu_done, pool_id); 1270 } 1271 1272 if (mpdu_done) { 1273 pdev->invalid_peer_head_msdu = NULL; 1274 pdev->invalid_peer_tail_msdu = NULL; 1275 } 1276 1277 return QDF_STATUS_E_FAILURE; 1278 } 1279 1280 vdev = txrx_peer->vdev; 1281 if (!vdev) { 1282 dp_err_rl("Null vdev!"); 1283 DP_STATS_INC(soc, rx.err.invalid_vdev, 1); 1284 goto drop_nbuf; 1285 } 1286 1287 /* 1288 * Advance the packet start pointer by total size of 1289 * pre-header TLV's 1290 */ 1291 if (qdf_nbuf_is_frag(nbuf)) 1292 qdf_nbuf_pull_head(nbuf, soc->rx_pkt_tlv_size); 1293 else 1294 qdf_nbuf_pull_head(nbuf, (msdu_metadata.l3_hdr_pad + 1295 soc->rx_pkt_tlv_size)); 1296 1297 DP_STATS_INC_PKT(vdev, rx_i.null_q_desc_pkt, 1, qdf_nbuf_len(nbuf)); 1298 1299 dp_vdev_peer_stats_update_protocol_cnt(vdev, nbuf, NULL, 0, 1); 1300 1301 if (dp_rx_err_drop_3addr_mcast(vdev, rx_tlv_hdr)) { 1302 DP_PEER_PER_PKT_STATS_INC(txrx_peer, rx.mcast_3addr_drop, 1); 1303 goto drop_nbuf; 1304 } 1305 1306 if (hal_rx_msdu_end_sa_is_valid_get(soc->hal_soc, rx_tlv_hdr)) { 1307 sa_idx = hal_rx_msdu_end_sa_idx_get(soc->hal_soc, rx_tlv_hdr); 1308 1309 if ((sa_idx < 0) || 1310 (sa_idx >= wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx))) { 1311 DP_STATS_INC(soc, rx.err.invalid_sa_da_idx, 1); 1312 goto drop_nbuf; 1313 } 1314 } 1315 1316 if ((!soc->mec_fw_offload) && 1317 dp_rx_mcast_echo_check(soc, txrx_peer, rx_tlv_hdr, nbuf)) { 1318 /* this is a looped back MCBC pkt, drop it */ 1319 DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, rx.mec_drop, 1, 1320 qdf_nbuf_len(nbuf)); 1321 goto drop_nbuf; 1322 } 1323 1324 /* 1325 * In qwrap mode if the received packet matches with any of the vdev 1326 * mac addresses, drop it. Donot receive multicast packets originated 1327 * from any proxysta. 1328 */ 1329 if (check_qwrap_multicast_loopback(vdev, nbuf)) { 1330 DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, rx.mec_drop, 1, 1331 qdf_nbuf_len(nbuf)); 1332 goto drop_nbuf; 1333 } 1334 1335 if (qdf_unlikely(txrx_peer->nawds_enabled && 1336 hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc, 1337 rx_tlv_hdr))) { 1338 dp_err_rl("free buffer for multicast packet"); 1339 DP_PEER_PER_PKT_STATS_INC(txrx_peer, rx.nawds_mcast_drop, 1); 1340 goto drop_nbuf; 1341 } 1342 1343 if (!dp_wds_rx_policy_check(rx_tlv_hdr, vdev, txrx_peer)) { 1344 dp_err_rl("mcast Policy Check Drop pkt"); 1345 DP_PEER_PER_PKT_STATS_INC(txrx_peer, rx.policy_check_drop, 1); 1346 goto drop_nbuf; 1347 } 1348 /* WDS Source Port Learning */ 1349 if (!soc->ast_offload_support && 1350 qdf_likely(vdev->rx_decap_type == htt_cmn_pkt_type_ethernet && 1351 vdev->wds_enabled)) 1352 dp_rx_wds_srcport_learn(soc, rx_tlv_hdr, txrx_peer, nbuf, 1353 msdu_metadata); 1354 1355 if (hal_rx_is_unicast(soc->hal_soc, rx_tlv_hdr)) { 1356 struct dp_peer *peer; 1357 struct dp_rx_tid *rx_tid; 1358 tid = hal_rx_tid_get(soc->hal_soc, rx_tlv_hdr); 1359 peer = dp_peer_get_ref_by_id(soc, txrx_peer->peer_id, 1360 DP_MOD_ID_RX_ERR); 1361 if (peer) { 1362 rx_tid = &peer->rx_tid[tid]; 1363 qdf_spin_lock_bh(&rx_tid->tid_lock); 1364 if (!peer->rx_tid[tid].hw_qdesc_vaddr_unaligned) 1365 dp_rx_tid_setup_wifi3(peer, tid, 1, 1366 IEEE80211_SEQ_MAX); 1367 qdf_spin_unlock_bh(&rx_tid->tid_lock); 1368 /* IEEE80211_SEQ_MAX indicates invalid start_seq */ 1369 dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR); 1370 } 1371 } 1372 1373 eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf); 1374 1375 if (!txrx_peer->authorize) { 1376 is_eapol = qdf_nbuf_is_ipv4_eapol_pkt(nbuf) || 1377 qdf_nbuf_is_ipv4_wapi_pkt(nbuf); 1378 1379 if (is_eapol) { 1380 if (!dp_rx_err_match_dhost(eh, vdev)) 1381 goto drop_nbuf; 1382 } else { 1383 goto drop_nbuf; 1384 } 1385 } 1386 1387 /* 1388 * Drop packets in this path if cce_match is found. Packets will come 1389 * in following path depending on whether tidQ is setup. 1390 * 1. If tidQ is setup: WIFILI_HAL_RX_WBM_REO_PSH_RSN_ROUTE and 1391 * cce_match = 1 1392 * Packets with WIFILI_HAL_RX_WBM_REO_PSH_RSN_ROUTE are already 1393 * dropped. 1394 * 2. If tidQ is not setup: WIFILI_HAL_RX_WBM_REO_PSH_RSN_ERROR and 1395 * cce_match = 1 1396 * These packets need to be dropped and should not get delivered 1397 * to stack. 1398 */ 1399 if (qdf_unlikely(dp_rx_err_cce_drop(soc, vdev, nbuf, rx_tlv_hdr))) { 1400 goto drop_nbuf; 1401 } 1402 1403 if (qdf_unlikely(vdev->rx_decap_type == htt_cmn_pkt_type_raw)) { 1404 qdf_nbuf_set_next(nbuf, NULL); 1405 dp_rx_deliver_raw(vdev, nbuf, txrx_peer); 1406 } else { 1407 enh_flag = vdev->pdev->enhanced_stats_en; 1408 qdf_nbuf_set_next(nbuf, NULL); 1409 DP_PEER_TO_STACK_INCC_PKT(txrx_peer, 1, qdf_nbuf_len(nbuf), 1410 enh_flag); 1411 /* 1412 * Update the protocol tag in SKB based on 1413 * CCE metadata 1414 */ 1415 dp_rx_update_protocol_tag(soc, vdev, nbuf, rx_tlv_hdr, 1416 EXCEPTION_DEST_RING_ID, 1417 true, true); 1418 1419 /* Update the flow tag in SKB based on FSE metadata */ 1420 dp_rx_update_flow_tag(soc, vdev, nbuf, 1421 rx_tlv_hdr, true); 1422 1423 if (qdf_unlikely(hal_rx_msdu_end_da_is_mcbc_get( 1424 soc->hal_soc, rx_tlv_hdr) && 1425 (vdev->rx_decap_type == 1426 htt_cmn_pkt_type_ethernet))) { 1427 DP_PEER_MC_INCC_PKT(txrx_peer, 1, qdf_nbuf_len(nbuf), 1428 enh_flag); 1429 1430 if (QDF_IS_ADDR_BROADCAST(eh->ether_dhost)) 1431 DP_PEER_BC_INCC_PKT(txrx_peer, 1, 1432 qdf_nbuf_len(nbuf), 1433 enh_flag); 1434 } 1435 1436 qdf_nbuf_set_exc_frame(nbuf, 1); 1437 dp_rx_deliver_to_osif_stack(soc, vdev, txrx_peer, nbuf, NULL, 1438 is_eapol); 1439 } 1440 return QDF_STATUS_SUCCESS; 1441 1442 drop_nbuf: 1443 dp_rx_nbuf_free(nbuf); 1444 return QDF_STATUS_E_FAILURE; 1445 } 1446 1447 /** 1448 * dp_rx_reo_err_entry_process() - Handles for REO error entry processing 1449 * 1450 * @soc: core txrx main context 1451 * @ring_desc: opaque pointer to the REO error ring descriptor 1452 * @mpdu_desc_info: pointer to mpdu level description info 1453 * @link_desc_va: pointer to msdu_link_desc virtual address 1454 * @err_code: reo error code fetched from ring entry 1455 * 1456 * Function to handle msdus fetched from msdu link desc, currently 1457 * support REO error NULL queue, 2K jump, OOR. 1458 * 1459 * Return: msdu count processed 1460 */ 1461 static uint32_t 1462 dp_rx_reo_err_entry_process(struct dp_soc *soc, 1463 void *ring_desc, 1464 struct hal_rx_mpdu_desc_info *mpdu_desc_info, 1465 void *link_desc_va, 1466 enum hal_reo_error_code err_code) 1467 { 1468 uint32_t rx_bufs_used = 0; 1469 struct dp_pdev *pdev; 1470 int i; 1471 uint8_t *rx_tlv_hdr_first; 1472 uint8_t *rx_tlv_hdr_last; 1473 uint32_t tid = DP_MAX_TIDS; 1474 uint16_t peer_id; 1475 struct dp_rx_desc *rx_desc; 1476 struct rx_desc_pool *rx_desc_pool; 1477 qdf_nbuf_t nbuf; 1478 struct hal_buf_info buf_info; 1479 struct hal_rx_msdu_list msdu_list; 1480 uint16_t num_msdus; 1481 struct buffer_addr_info cur_link_desc_addr_info = { 0 }; 1482 struct buffer_addr_info next_link_desc_addr_info = { 0 }; 1483 /* First field in REO Dst ring Desc is buffer_addr_info */ 1484 void *buf_addr_info = ring_desc; 1485 qdf_nbuf_t head_nbuf = NULL; 1486 qdf_nbuf_t tail_nbuf = NULL; 1487 uint16_t msdu_processed = 0; 1488 QDF_STATUS status; 1489 bool ret, is_pn_check_needed; 1490 uint8_t rx_desc_pool_id; 1491 struct dp_txrx_peer *txrx_peer = NULL; 1492 dp_txrx_ref_handle txrx_ref_handle = NULL; 1493 hal_ring_handle_t hal_ring_hdl = soc->reo_exception_ring.hal_srng; 1494 1495 peer_id = dp_rx_peer_metadata_peer_id_get(soc, 1496 mpdu_desc_info->peer_meta_data); 1497 is_pn_check_needed = dp_rx_err_is_pn_check_needed(soc, 1498 HAL_REO_ERROR_DETECTED, 1499 err_code); 1500 more_msdu_link_desc: 1501 hal_rx_msdu_list_get(soc->hal_soc, link_desc_va, &msdu_list, 1502 &num_msdus); 1503 for (i = 0; i < num_msdus; i++) { 1504 rx_desc = soc->arch_ops.dp_rx_desc_cookie_2_va( 1505 soc, 1506 msdu_list.sw_cookie[i]); 1507 1508 qdf_assert_always(rx_desc); 1509 nbuf = rx_desc->nbuf; 1510 1511 /* 1512 * this is a unlikely scenario where the host is reaping 1513 * a descriptor which it already reaped just a while ago 1514 * but is yet to replenish it back to HW. 1515 * In this case host will dump the last 128 descriptors 1516 * including the software descriptor rx_desc and assert. 1517 */ 1518 if (qdf_unlikely(!rx_desc->in_use) || 1519 qdf_unlikely(!nbuf)) { 1520 DP_STATS_INC(soc, rx.err.hal_reo_dest_dup, 1); 1521 dp_info_rl("Reaping rx_desc not in use!"); 1522 dp_rx_dump_info_and_assert(soc, hal_ring_hdl, 1523 ring_desc, rx_desc); 1524 /* ignore duplicate RX desc and continue to process */ 1525 /* Pop out the descriptor */ 1526 continue; 1527 } 1528 1529 ret = dp_rx_desc_paddr_sanity_check(rx_desc, 1530 msdu_list.paddr[i]); 1531 if (!ret) { 1532 DP_STATS_INC(soc, rx.err.nbuf_sanity_fail, 1); 1533 rx_desc->in_err_state = 1; 1534 continue; 1535 } 1536 1537 rx_desc_pool_id = rx_desc->pool_id; 1538 /* all buffers from a MSDU link belong to same pdev */ 1539 pdev = dp_get_pdev_for_lmac_id(soc, rx_desc_pool_id); 1540 1541 rx_desc_pool = &soc->rx_desc_buf[rx_desc_pool_id]; 1542 dp_ipa_rx_buf_smmu_mapping_lock(soc); 1543 dp_rx_nbuf_unmap_pool(soc, rx_desc_pool, nbuf); 1544 rx_desc->unmapped = 1; 1545 dp_ipa_rx_buf_smmu_mapping_unlock(soc); 1546 1547 QDF_NBUF_CB_RX_PKT_LEN(nbuf) = msdu_list.msdu_info[i].msdu_len; 1548 rx_bufs_used++; 1549 dp_rx_add_to_free_desc_list(&pdev->free_list_head, 1550 &pdev->free_list_tail, rx_desc); 1551 1552 DP_RX_LIST_APPEND(head_nbuf, tail_nbuf, nbuf); 1553 1554 if (qdf_unlikely(msdu_list.msdu_info[i].msdu_flags & 1555 HAL_MSDU_F_MSDU_CONTINUATION)) 1556 continue; 1557 1558 if (dp_rx_buffer_pool_refill(soc, head_nbuf, 1559 rx_desc_pool_id)) { 1560 /* MSDU queued back to the pool */ 1561 goto process_next_msdu; 1562 } 1563 1564 hal_rx_tlv_populate_mpdu_desc_info(soc->hal_soc, 1565 qdf_nbuf_data(head_nbuf), 1566 mpdu_desc_info); 1567 if (qdf_unlikely(mpdu_desc_info->mpdu_flags & 1568 HAL_MPDU_F_RAW_AMPDU)) { 1569 dp_err_rl("RAW ampdu in REO error not expected"); 1570 DP_STATS_INC(soc, rx.err.reo_err_raw_mpdu_drop, 1); 1571 qdf_nbuf_list_free(head_nbuf); 1572 goto process_next_msdu; 1573 } 1574 1575 rx_tlv_hdr_first = qdf_nbuf_data(head_nbuf); 1576 rx_tlv_hdr_last = qdf_nbuf_data(tail_nbuf); 1577 1578 if (qdf_unlikely(head_nbuf != tail_nbuf)) { 1579 nbuf = dp_rx_sg_create(soc, head_nbuf); 1580 qdf_nbuf_set_is_frag(nbuf, 1); 1581 DP_STATS_INC(soc, rx.err.reo_err_oor_sg_count, 1); 1582 } 1583 1584 if (is_pn_check_needed) { 1585 status = dp_rx_err_nbuf_pn_check(soc, ring_desc, nbuf); 1586 if (QDF_IS_STATUS_ERROR(status)) { 1587 DP_STATS_INC(soc, rx.err.pn_in_dest_check_fail, 1588 1); 1589 dp_rx_nbuf_free(nbuf); 1590 goto process_next_msdu; 1591 } 1592 1593 hal_rx_tlv_populate_mpdu_desc_info(soc->hal_soc, 1594 qdf_nbuf_data(nbuf), 1595 mpdu_desc_info); 1596 peer_id = dp_rx_peer_metadata_peer_id_get(soc, 1597 mpdu_desc_info->peer_meta_data); 1598 1599 if (mpdu_desc_info->bar_frame) 1600 _dp_rx_bar_frame_handle(soc, nbuf, 1601 mpdu_desc_info, tid, 1602 HAL_REO_ERROR_DETECTED, 1603 err_code); 1604 } 1605 1606 switch (err_code) { 1607 case HAL_REO_ERR_REGULAR_FRAME_2K_JUMP: 1608 case HAL_REO_ERR_2K_ERROR_HANDLING_FLAG_SET: 1609 case HAL_REO_ERR_BAR_FRAME_2K_JUMP: 1610 /* 1611 * only first msdu, mpdu start description tlv valid? 1612 * and use it for following msdu. 1613 */ 1614 if (hal_rx_msdu_end_first_msdu_get(soc->hal_soc, 1615 rx_tlv_hdr_last)) 1616 tid = hal_rx_mpdu_start_tid_get( 1617 soc->hal_soc, 1618 rx_tlv_hdr_first); 1619 1620 dp_2k_jump_handle(soc, nbuf, rx_tlv_hdr_last, 1621 peer_id, tid); 1622 break; 1623 case HAL_REO_ERR_REGULAR_FRAME_OOR: 1624 case HAL_REO_ERR_BAR_FRAME_OOR: 1625 dp_rx_oor_handle(soc, nbuf, peer_id, rx_tlv_hdr_last); 1626 break; 1627 case HAL_REO_ERR_QUEUE_DESC_ADDR_0: 1628 txrx_peer = dp_tgt_txrx_peer_get_ref_by_id( 1629 soc, peer_id, 1630 &txrx_ref_handle, 1631 DP_MOD_ID_RX_ERR); 1632 if (!txrx_peer) 1633 dp_info_rl("txrx_peer is null peer_id %u", 1634 peer_id); 1635 dp_rx_null_q_desc_handle(soc, nbuf, rx_tlv_hdr_last, 1636 rx_desc_pool_id, txrx_peer); 1637 if (txrx_peer) 1638 dp_txrx_peer_unref_delete(txrx_ref_handle, 1639 DP_MOD_ID_RX_ERR); 1640 break; 1641 default: 1642 dp_err_rl("Non-support error code %d", err_code); 1643 dp_rx_nbuf_free(nbuf); 1644 } 1645 1646 process_next_msdu: 1647 msdu_processed++; 1648 head_nbuf = NULL; 1649 tail_nbuf = NULL; 1650 } 1651 1652 /* 1653 * If the msdu's are spread across multiple link-descriptors, 1654 * we cannot depend solely on the msdu_count(e.g., if msdu is 1655 * spread across multiple buffers).Hence, it is 1656 * necessary to check the next link_descriptor and release 1657 * all the msdu's that are part of it. 1658 */ 1659 hal_rx_get_next_msdu_link_desc_buf_addr_info( 1660 link_desc_va, 1661 &next_link_desc_addr_info); 1662 1663 if (hal_rx_is_buf_addr_info_valid( 1664 &next_link_desc_addr_info)) { 1665 /* Clear the next link desc info for the current link_desc */ 1666 hal_rx_clear_next_msdu_link_desc_buf_addr_info(link_desc_va); 1667 dp_rx_link_desc_return_by_addr( 1668 soc, 1669 buf_addr_info, 1670 HAL_BM_ACTION_PUT_IN_IDLE_LIST); 1671 1672 hal_rx_buffer_addr_info_get_paddr( 1673 &next_link_desc_addr_info, 1674 &buf_info); 1675 /* buffer_addr_info is the first element of ring_desc */ 1676 hal_rx_buf_cookie_rbm_get(soc->hal_soc, 1677 (uint32_t *)&next_link_desc_addr_info, 1678 &buf_info); 1679 link_desc_va = 1680 dp_rx_cookie_2_link_desc_va(soc, &buf_info); 1681 cur_link_desc_addr_info = next_link_desc_addr_info; 1682 buf_addr_info = &cur_link_desc_addr_info; 1683 1684 goto more_msdu_link_desc; 1685 } 1686 1687 dp_rx_link_desc_return_by_addr(soc, buf_addr_info, 1688 HAL_BM_ACTION_PUT_IN_IDLE_LIST); 1689 if (qdf_unlikely(msdu_processed != mpdu_desc_info->msdu_count)) 1690 DP_STATS_INC(soc, rx.err.msdu_count_mismatch, 1); 1691 1692 return rx_bufs_used; 1693 } 1694 1695 #endif /* QCA_HOST_MODE_WIFI_DISABLED */ 1696 1697 /** 1698 * dp_rx_process_rxdma_err() - Function to deliver rxdma unencrypted_err 1699 * frames to OS or wifi parse errors. 1700 * @soc: core DP main context 1701 * @nbuf: buffer pointer 1702 * @rx_tlv_hdr: start of rx tlv header 1703 * @txrx_peer: peer reference 1704 * @err_code: rxdma err code 1705 * @mac_id: mac_id which is one of 3 mac_ids(Assuming mac_id and 1706 * pool_id has same mapping) 1707 * 1708 * Return: None 1709 */ 1710 void 1711 dp_rx_process_rxdma_err(struct dp_soc *soc, qdf_nbuf_t nbuf, 1712 uint8_t *rx_tlv_hdr, struct dp_txrx_peer *txrx_peer, 1713 uint8_t err_code, uint8_t mac_id) 1714 { 1715 uint32_t pkt_len, l2_hdr_offset; 1716 uint16_t msdu_len; 1717 struct dp_vdev *vdev; 1718 qdf_ether_header_t *eh; 1719 bool is_broadcast; 1720 1721 /* 1722 * Check if DMA completed -- msdu_done is the last bit 1723 * to be written 1724 */ 1725 if (!hal_rx_attn_msdu_done_get(soc->hal_soc, rx_tlv_hdr)) { 1726 1727 dp_err_rl("MSDU DONE failure"); 1728 1729 hal_rx_dump_pkt_tlvs(soc->hal_soc, rx_tlv_hdr, 1730 QDF_TRACE_LEVEL_INFO); 1731 qdf_assert(0); 1732 } 1733 1734 l2_hdr_offset = hal_rx_msdu_end_l3_hdr_padding_get(soc->hal_soc, 1735 rx_tlv_hdr); 1736 msdu_len = hal_rx_msdu_start_msdu_len_get(soc->hal_soc, rx_tlv_hdr); 1737 pkt_len = msdu_len + l2_hdr_offset + soc->rx_pkt_tlv_size; 1738 1739 if (dp_rx_check_pkt_len(soc, pkt_len)) { 1740 /* Drop & free packet */ 1741 dp_rx_nbuf_free(nbuf); 1742 return; 1743 } 1744 /* Set length in nbuf */ 1745 qdf_nbuf_set_pktlen(nbuf, pkt_len); 1746 1747 qdf_nbuf_set_next(nbuf, NULL); 1748 1749 qdf_nbuf_set_rx_chfrag_start(nbuf, 1); 1750 qdf_nbuf_set_rx_chfrag_end(nbuf, 1); 1751 1752 if (!txrx_peer) { 1753 QDF_TRACE_ERROR_RL(QDF_MODULE_ID_DP, "txrx_peer is NULL"); 1754 DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1, 1755 qdf_nbuf_len(nbuf)); 1756 /* Trigger invalid peer handler wrapper */ 1757 dp_rx_process_invalid_peer_wrapper(soc, nbuf, true, mac_id); 1758 return; 1759 } 1760 1761 vdev = txrx_peer->vdev; 1762 if (!vdev) { 1763 dp_rx_err_info_rl("%pK: INVALID vdev %pK OR osif_rx", soc, 1764 vdev); 1765 /* Drop & free packet */ 1766 dp_rx_nbuf_free(nbuf); 1767 DP_STATS_INC(soc, rx.err.invalid_vdev, 1); 1768 return; 1769 } 1770 1771 /* 1772 * Advance the packet start pointer by total size of 1773 * pre-header TLV's 1774 */ 1775 dp_rx_skip_tlvs(soc, nbuf, l2_hdr_offset); 1776 1777 if (err_code == HAL_RXDMA_ERR_WIFI_PARSE) { 1778 uint8_t *pkt_type; 1779 1780 pkt_type = qdf_nbuf_data(nbuf) + (2 * QDF_MAC_ADDR_SIZE); 1781 if (*(uint16_t *)pkt_type == htons(QDF_ETH_TYPE_8021Q)) { 1782 if (*(uint16_t *)(pkt_type + DP_SKIP_VLAN) == 1783 htons(QDF_LLC_STP)) { 1784 DP_STATS_INC(vdev->pdev, vlan_tag_stp_cnt, 1); 1785 goto process_mesh; 1786 } else { 1787 goto process_rx; 1788 } 1789 } 1790 } 1791 if (vdev->rx_decap_type == htt_cmn_pkt_type_raw) 1792 goto process_mesh; 1793 1794 /* 1795 * WAPI cert AP sends rekey frames as unencrypted. 1796 * Thus RXDMA will report unencrypted frame error. 1797 * To pass WAPI cert case, SW needs to pass unencrypted 1798 * rekey frame to stack. 1799 */ 1800 if (qdf_nbuf_is_ipv4_wapi_pkt(nbuf)) { 1801 goto process_rx; 1802 } 1803 /* 1804 * In dynamic WEP case rekey frames are not encrypted 1805 * similar to WAPI. Allow EAPOL when 8021+wep is enabled and 1806 * key install is already done 1807 */ 1808 if ((vdev->sec_type == cdp_sec_type_wep104) && 1809 (qdf_nbuf_is_ipv4_eapol_pkt(nbuf))) 1810 goto process_rx; 1811 1812 process_mesh: 1813 1814 if (!vdev->mesh_vdev && err_code == HAL_RXDMA_ERR_UNENCRYPTED) { 1815 dp_rx_nbuf_free(nbuf); 1816 DP_STATS_INC(soc, rx.err.invalid_vdev, 1); 1817 return; 1818 } 1819 1820 if (vdev->mesh_vdev) { 1821 if (dp_rx_filter_mesh_packets(vdev, nbuf, rx_tlv_hdr) 1822 == QDF_STATUS_SUCCESS) { 1823 dp_rx_err_info("%pK: mesh pkt filtered", soc); 1824 DP_STATS_INC(vdev->pdev, dropped.mesh_filter, 1); 1825 1826 dp_rx_nbuf_free(nbuf); 1827 return; 1828 } 1829 dp_rx_fill_mesh_stats(vdev, nbuf, rx_tlv_hdr, txrx_peer); 1830 } 1831 process_rx: 1832 if (qdf_unlikely(hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc, 1833 rx_tlv_hdr) && 1834 (vdev->rx_decap_type == 1835 htt_cmn_pkt_type_ethernet))) { 1836 eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf); 1837 is_broadcast = (QDF_IS_ADDR_BROADCAST 1838 (eh->ether_dhost)) ? 1 : 0 ; 1839 DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, rx.multicast, 1, 1840 qdf_nbuf_len(nbuf)); 1841 if (is_broadcast) { 1842 DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, rx.bcast, 1, 1843 qdf_nbuf_len(nbuf)); 1844 } 1845 } 1846 1847 if (qdf_unlikely(vdev->rx_decap_type == htt_cmn_pkt_type_raw)) { 1848 dp_rx_deliver_raw(vdev, nbuf, txrx_peer); 1849 } else { 1850 /* Update the protocol tag in SKB based on CCE metadata */ 1851 dp_rx_update_protocol_tag(soc, vdev, nbuf, rx_tlv_hdr, 1852 EXCEPTION_DEST_RING_ID, true, true); 1853 /* Update the flow tag in SKB based on FSE metadata */ 1854 dp_rx_update_flow_tag(soc, vdev, nbuf, rx_tlv_hdr, true); 1855 DP_PEER_STATS_FLAT_INC(txrx_peer, to_stack.num, 1); 1856 qdf_nbuf_set_exc_frame(nbuf, 1); 1857 dp_rx_deliver_to_stack(soc, vdev, txrx_peer, nbuf, NULL); 1858 } 1859 1860 return; 1861 } 1862 1863 /** 1864 * dp_rx_process_mic_error(): Function to pass mic error indication to umac 1865 * @soc: core DP main context 1866 * @nbuf: buffer pointer 1867 * @rx_tlv_hdr: start of rx tlv header 1868 * @txrx_peer: txrx peer handle 1869 * 1870 * return: void 1871 */ 1872 void dp_rx_process_mic_error(struct dp_soc *soc, qdf_nbuf_t nbuf, 1873 uint8_t *rx_tlv_hdr, 1874 struct dp_txrx_peer *txrx_peer) 1875 { 1876 struct dp_vdev *vdev = NULL; 1877 struct dp_pdev *pdev = NULL; 1878 struct ol_if_ops *tops = NULL; 1879 uint16_t rx_seq, fragno; 1880 uint8_t is_raw; 1881 unsigned int tid; 1882 QDF_STATUS status; 1883 struct cdp_rx_mic_err_info mic_failure_info; 1884 1885 if (!hal_rx_msdu_end_first_msdu_get(soc->hal_soc, 1886 rx_tlv_hdr)) 1887 return; 1888 1889 if (!txrx_peer) { 1890 dp_info_rl("txrx_peer not found"); 1891 goto fail; 1892 } 1893 1894 vdev = txrx_peer->vdev; 1895 if (!vdev) { 1896 dp_info_rl("VDEV not found"); 1897 goto fail; 1898 } 1899 1900 pdev = vdev->pdev; 1901 if (!pdev) { 1902 dp_info_rl("PDEV not found"); 1903 goto fail; 1904 } 1905 1906 is_raw = HAL_IS_DECAP_FORMAT_RAW(soc->hal_soc, qdf_nbuf_data(nbuf)); 1907 if (is_raw) { 1908 fragno = dp_rx_frag_get_mpdu_frag_number(soc, 1909 qdf_nbuf_data(nbuf)); 1910 /* Can get only last fragment */ 1911 if (fragno) { 1912 tid = hal_rx_mpdu_start_tid_get(soc->hal_soc, 1913 qdf_nbuf_data(nbuf)); 1914 rx_seq = hal_rx_get_rx_sequence(soc->hal_soc, 1915 qdf_nbuf_data(nbuf)); 1916 1917 status = dp_rx_defrag_add_last_frag(soc, txrx_peer, 1918 tid, rx_seq, nbuf); 1919 dp_info_rl("Frag pkt seq# %d frag# %d consumed " 1920 "status %d !", rx_seq, fragno, status); 1921 return; 1922 } 1923 } 1924 1925 if (hal_rx_mpdu_get_addr1(soc->hal_soc, qdf_nbuf_data(nbuf), 1926 &mic_failure_info.da_mac_addr.bytes[0])) { 1927 dp_err_rl("Failed to get da_mac_addr"); 1928 goto fail; 1929 } 1930 1931 if (hal_rx_mpdu_get_addr2(soc->hal_soc, qdf_nbuf_data(nbuf), 1932 &mic_failure_info.ta_mac_addr.bytes[0])) { 1933 dp_err_rl("Failed to get ta_mac_addr"); 1934 goto fail; 1935 } 1936 1937 mic_failure_info.key_id = 0; 1938 mic_failure_info.multicast = 1939 IEEE80211_IS_MULTICAST(mic_failure_info.da_mac_addr.bytes); 1940 qdf_mem_zero(mic_failure_info.tsc, MIC_SEQ_CTR_SIZE); 1941 mic_failure_info.frame_type = cdp_rx_frame_type_802_11; 1942 mic_failure_info.data = NULL; 1943 mic_failure_info.vdev_id = vdev->vdev_id; 1944 1945 tops = pdev->soc->cdp_soc.ol_ops; 1946 if (tops->rx_mic_error) 1947 tops->rx_mic_error(soc->ctrl_psoc, pdev->pdev_id, 1948 &mic_failure_info); 1949 1950 fail: 1951 dp_rx_nbuf_free(nbuf); 1952 return; 1953 } 1954 1955 #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP) && \ 1956 defined(WLAN_MCAST_MLO) 1957 static bool dp_rx_igmp_handler(struct dp_soc *soc, 1958 struct dp_vdev *vdev, 1959 struct dp_txrx_peer *peer, 1960 qdf_nbuf_t nbuf) 1961 { 1962 if (soc->arch_ops.dp_rx_mcast_handler) { 1963 if (soc->arch_ops.dp_rx_mcast_handler(soc, vdev, peer, nbuf)) 1964 return true; 1965 } 1966 return false; 1967 } 1968 #else 1969 static bool dp_rx_igmp_handler(struct dp_soc *soc, 1970 struct dp_vdev *vdev, 1971 struct dp_txrx_peer *peer, 1972 qdf_nbuf_t nbuf) 1973 { 1974 return false; 1975 } 1976 #endif 1977 1978 /** 1979 * dp_rx_err_route_hdl() - Function to send EAPOL frames to stack 1980 * Free any other packet which comes in 1981 * this path. 1982 * 1983 * @soc: core DP main context 1984 * @nbuf: buffer pointer 1985 * @txrx_peer: txrx peer handle 1986 * @rx_tlv_hdr: start of rx tlv header 1987 * @err_src: rxdma/reo 1988 * 1989 * This function indicates EAPOL frame received in wbm error ring to stack. 1990 * Any other frame should be dropped. 1991 * 1992 * Return: SUCCESS if delivered to stack 1993 */ 1994 static void 1995 dp_rx_err_route_hdl(struct dp_soc *soc, qdf_nbuf_t nbuf, 1996 struct dp_txrx_peer *txrx_peer, uint8_t *rx_tlv_hdr, 1997 enum hal_rx_wbm_error_source err_src) 1998 { 1999 uint32_t pkt_len; 2000 uint16_t msdu_len; 2001 struct dp_vdev *vdev; 2002 struct hal_rx_msdu_metadata msdu_metadata; 2003 bool is_eapol; 2004 2005 hal_rx_msdu_metadata_get(soc->hal_soc, rx_tlv_hdr, &msdu_metadata); 2006 msdu_len = hal_rx_msdu_start_msdu_len_get(soc->hal_soc, rx_tlv_hdr); 2007 pkt_len = msdu_len + msdu_metadata.l3_hdr_pad + soc->rx_pkt_tlv_size; 2008 2009 if (qdf_likely(!qdf_nbuf_is_frag(nbuf))) { 2010 if (dp_rx_check_pkt_len(soc, pkt_len)) 2011 goto drop_nbuf; 2012 2013 /* Set length in nbuf */ 2014 qdf_nbuf_set_pktlen( 2015 nbuf, qdf_min(pkt_len, (uint32_t)RX_DATA_BUFFER_SIZE)); 2016 qdf_assert_always(nbuf->data == rx_tlv_hdr); 2017 } 2018 2019 /* 2020 * Check if DMA completed -- msdu_done is the last bit 2021 * to be written 2022 */ 2023 if (!hal_rx_attn_msdu_done_get(soc->hal_soc, rx_tlv_hdr)) { 2024 dp_err_rl("MSDU DONE failure"); 2025 hal_rx_dump_pkt_tlvs(soc->hal_soc, rx_tlv_hdr, 2026 QDF_TRACE_LEVEL_INFO); 2027 qdf_assert(0); 2028 } 2029 2030 if (!txrx_peer) 2031 goto drop_nbuf; 2032 2033 vdev = txrx_peer->vdev; 2034 if (!vdev) { 2035 dp_err_rl("Null vdev!"); 2036 DP_STATS_INC(soc, rx.err.invalid_vdev, 1); 2037 goto drop_nbuf; 2038 } 2039 2040 /* 2041 * Advance the packet start pointer by total size of 2042 * pre-header TLV's 2043 */ 2044 if (qdf_nbuf_is_frag(nbuf)) 2045 qdf_nbuf_pull_head(nbuf, soc->rx_pkt_tlv_size); 2046 else 2047 qdf_nbuf_pull_head(nbuf, (msdu_metadata.l3_hdr_pad + 2048 soc->rx_pkt_tlv_size)); 2049 2050 if (dp_rx_igmp_handler(soc, vdev, txrx_peer, nbuf)) 2051 return; 2052 2053 dp_vdev_peer_stats_update_protocol_cnt(vdev, nbuf, NULL, 0, 1); 2054 2055 /* 2056 * Indicate EAPOL frame to stack only when vap mac address 2057 * matches the destination address. 2058 */ 2059 is_eapol = qdf_nbuf_is_ipv4_eapol_pkt(nbuf); 2060 if (is_eapol || qdf_nbuf_is_ipv4_wapi_pkt(nbuf)) { 2061 qdf_ether_header_t *eh = 2062 (qdf_ether_header_t *)qdf_nbuf_data(nbuf); 2063 if (dp_rx_err_match_dhost(eh, vdev)) { 2064 DP_STATS_INC_PKT(vdev, rx_i.routed_eapol_pkt, 1, 2065 qdf_nbuf_len(nbuf)); 2066 2067 /* 2068 * Update the protocol tag in SKB based on 2069 * CCE metadata. 2070 */ 2071 dp_rx_update_protocol_tag(soc, vdev, nbuf, rx_tlv_hdr, 2072 EXCEPTION_DEST_RING_ID, 2073 true, true); 2074 /* Update the flow tag in SKB based on FSE metadata */ 2075 dp_rx_update_flow_tag(soc, vdev, nbuf, rx_tlv_hdr, 2076 true); 2077 DP_PEER_TO_STACK_INCC_PKT(txrx_peer, 1, 2078 qdf_nbuf_len(nbuf), 2079 vdev->pdev->enhanced_stats_en); 2080 qdf_nbuf_set_exc_frame(nbuf, 1); 2081 qdf_nbuf_set_next(nbuf, NULL); 2082 2083 dp_rx_deliver_to_osif_stack(soc, vdev, txrx_peer, nbuf, 2084 NULL, is_eapol); 2085 2086 return; 2087 } 2088 } 2089 2090 drop_nbuf: 2091 2092 DP_STATS_INCC(soc, rx.reo2rel_route_drop, 1, 2093 err_src == HAL_RX_WBM_ERR_SRC_REO); 2094 DP_STATS_INCC(soc, rx.rxdma2rel_route_drop, 1, 2095 err_src == HAL_RX_WBM_ERR_SRC_RXDMA); 2096 2097 dp_rx_nbuf_free(nbuf); 2098 } 2099 2100 #ifndef QCA_HOST_MODE_WIFI_DISABLED 2101 2102 #ifdef DP_RX_DESC_COOKIE_INVALIDATE 2103 /** 2104 * dp_rx_link_cookie_check() - Validate link desc cookie 2105 * @ring_desc: ring descriptor 2106 * 2107 * Return: qdf status 2108 */ 2109 static inline QDF_STATUS 2110 dp_rx_link_cookie_check(hal_ring_desc_t ring_desc) 2111 { 2112 if (qdf_unlikely(HAL_RX_REO_BUF_LINK_COOKIE_INVALID_GET(ring_desc))) 2113 return QDF_STATUS_E_FAILURE; 2114 2115 return QDF_STATUS_SUCCESS; 2116 } 2117 2118 /** 2119 * dp_rx_link_cookie_invalidate() - Invalidate link desc cookie 2120 * @ring_desc: ring descriptor 2121 * 2122 * Return: None 2123 */ 2124 static inline void 2125 dp_rx_link_cookie_invalidate(hal_ring_desc_t ring_desc) 2126 { 2127 HAL_RX_REO_BUF_LINK_COOKIE_INVALID_SET(ring_desc); 2128 } 2129 #else 2130 static inline QDF_STATUS 2131 dp_rx_link_cookie_check(hal_ring_desc_t ring_desc) 2132 { 2133 return QDF_STATUS_SUCCESS; 2134 } 2135 2136 static inline void 2137 dp_rx_link_cookie_invalidate(hal_ring_desc_t ring_desc) 2138 { 2139 } 2140 #endif 2141 2142 #ifdef WLAN_FEATURE_DP_RX_RING_HISTORY 2143 /** 2144 * dp_rx_err_ring_record_entry() - Record rx err ring history 2145 * @soc: Datapath soc structure 2146 * @paddr: paddr of the buffer in RX err ring 2147 * @sw_cookie: SW cookie of the buffer in RX err ring 2148 * @rbm: Return buffer manager of the buffer in RX err ring 2149 * 2150 * Returns: None 2151 */ 2152 static inline void 2153 dp_rx_err_ring_record_entry(struct dp_soc *soc, uint64_t paddr, 2154 uint32_t sw_cookie, uint8_t rbm) 2155 { 2156 struct dp_buf_info_record *record; 2157 uint32_t idx; 2158 2159 if (qdf_unlikely(!soc->rx_err_ring_history)) 2160 return; 2161 2162 idx = dp_history_get_next_index(&soc->rx_err_ring_history->index, 2163 DP_RX_ERR_HIST_MAX); 2164 2165 /* No NULL check needed for record since its an array */ 2166 record = &soc->rx_err_ring_history->entry[idx]; 2167 2168 record->timestamp = qdf_get_log_timestamp(); 2169 record->hbi.paddr = paddr; 2170 record->hbi.sw_cookie = sw_cookie; 2171 record->hbi.rbm = rbm; 2172 } 2173 #else 2174 static inline void 2175 dp_rx_err_ring_record_entry(struct dp_soc *soc, uint64_t paddr, 2176 uint32_t sw_cookie, uint8_t rbm) 2177 { 2178 } 2179 #endif 2180 2181 #ifdef HANDLE_RX_REROUTE_ERR 2182 static int dp_rx_err_handle_msdu_buf(struct dp_soc *soc, 2183 hal_ring_desc_t ring_desc) 2184 { 2185 int lmac_id = DP_INVALID_LMAC_ID; 2186 struct dp_rx_desc *rx_desc; 2187 struct hal_buf_info hbi; 2188 struct dp_pdev *pdev; 2189 struct rx_desc_pool *rx_desc_pool; 2190 2191 hal_rx_reo_buf_paddr_get(soc->hal_soc, ring_desc, &hbi); 2192 2193 rx_desc = dp_rx_cookie_2_va_rxdma_buf(soc, hbi.sw_cookie); 2194 2195 /* sanity */ 2196 if (!rx_desc) { 2197 DP_STATS_INC(soc, rx.err.reo_err_msdu_buf_invalid_cookie, 1); 2198 goto assert_return; 2199 } 2200 2201 if (!rx_desc->nbuf) 2202 goto assert_return; 2203 2204 dp_rx_err_ring_record_entry(soc, hbi.paddr, 2205 hbi.sw_cookie, 2206 hal_rx_ret_buf_manager_get(soc->hal_soc, 2207 ring_desc)); 2208 if (hbi.paddr != qdf_nbuf_get_frag_paddr(rx_desc->nbuf, 0)) { 2209 DP_STATS_INC(soc, rx.err.nbuf_sanity_fail, 1); 2210 rx_desc->in_err_state = 1; 2211 goto assert_return; 2212 } 2213 2214 rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id]; 2215 /* After this point the rx_desc and nbuf are valid */ 2216 dp_ipa_rx_buf_smmu_mapping_lock(soc); 2217 qdf_assert_always(!rx_desc->unmapped); 2218 dp_rx_nbuf_unmap_pool(soc, rx_desc_pool, rx_desc->nbuf); 2219 rx_desc->unmapped = 1; 2220 dp_ipa_rx_buf_smmu_mapping_unlock(soc); 2221 dp_rx_buffer_pool_nbuf_free(soc, rx_desc->nbuf, 2222 rx_desc->pool_id); 2223 2224 pdev = dp_get_pdev_for_lmac_id(soc, rx_desc->pool_id); 2225 lmac_id = rx_desc->pool_id; 2226 dp_rx_add_to_free_desc_list(&pdev->free_list_head, 2227 &pdev->free_list_tail, 2228 rx_desc); 2229 return lmac_id; 2230 2231 assert_return: 2232 qdf_assert(0); 2233 return lmac_id; 2234 } 2235 2236 static int dp_rx_err_exception(struct dp_soc *soc, hal_ring_desc_t ring_desc) 2237 { 2238 int ret; 2239 uint64_t cur_time_stamp; 2240 2241 DP_STATS_INC(soc, rx.err.reo_err_msdu_buf_rcved, 1); 2242 2243 /* Recover if overall error count exceeds threshold */ 2244 if (soc->stats.rx.err.reo_err_msdu_buf_rcved > 2245 DP_MAX_REG_RX_ROUTING_ERRS_THRESHOLD) { 2246 dp_err("pkt threshold breached! reo_err_msdu_buf_rcved %u first err pkt time_stamp %llu", 2247 soc->stats.rx.err.reo_err_msdu_buf_rcved, 2248 soc->rx_route_err_start_pkt_ts); 2249 qdf_trigger_self_recovery(NULL, QDF_RX_REG_PKT_ROUTE_ERR); 2250 } 2251 2252 cur_time_stamp = qdf_get_log_timestamp_usecs(); 2253 if (!soc->rx_route_err_start_pkt_ts) 2254 soc->rx_route_err_start_pkt_ts = cur_time_stamp; 2255 2256 /* Recover if threshold number of packets received in threshold time */ 2257 if ((cur_time_stamp - soc->rx_route_err_start_pkt_ts) > 2258 DP_RX_ERR_ROUTE_TIMEOUT_US) { 2259 soc->rx_route_err_start_pkt_ts = cur_time_stamp; 2260 2261 if (soc->rx_route_err_in_window > 2262 DP_MAX_REG_RX_ROUTING_ERRS_IN_TIMEOUT) { 2263 qdf_trigger_self_recovery(NULL, 2264 QDF_RX_REG_PKT_ROUTE_ERR); 2265 dp_err("rate threshold breached! reo_err_msdu_buf_rcved %u first err pkt time_stamp %llu", 2266 soc->stats.rx.err.reo_err_msdu_buf_rcved, 2267 soc->rx_route_err_start_pkt_ts); 2268 } else { 2269 soc->rx_route_err_in_window = 1; 2270 } 2271 } else { 2272 soc->rx_route_err_in_window++; 2273 } 2274 2275 ret = dp_rx_err_handle_msdu_buf(soc, ring_desc); 2276 2277 return ret; 2278 } 2279 #else /* HANDLE_RX_REROUTE_ERR */ 2280 2281 static int dp_rx_err_exception(struct dp_soc *soc, hal_ring_desc_t ring_desc) 2282 { 2283 qdf_assert_always(0); 2284 2285 return DP_INVALID_LMAC_ID; 2286 } 2287 #endif /* HANDLE_RX_REROUTE_ERR */ 2288 2289 uint32_t 2290 dp_rx_err_process(struct dp_intr *int_ctx, struct dp_soc *soc, 2291 hal_ring_handle_t hal_ring_hdl, uint32_t quota) 2292 { 2293 hal_ring_desc_t ring_desc; 2294 hal_soc_handle_t hal_soc; 2295 uint32_t count = 0; 2296 uint32_t rx_bufs_used = 0; 2297 uint32_t rx_bufs_reaped[MAX_PDEV_CNT] = { 0 }; 2298 uint8_t mac_id = 0; 2299 uint8_t buf_type; 2300 uint8_t err_status; 2301 struct hal_rx_mpdu_desc_info mpdu_desc_info; 2302 struct hal_buf_info hbi; 2303 struct dp_pdev *dp_pdev; 2304 struct dp_srng *dp_rxdma_srng; 2305 struct rx_desc_pool *rx_desc_pool; 2306 void *link_desc_va; 2307 struct hal_rx_msdu_list msdu_list; /* MSDU's per MPDU */ 2308 uint16_t num_msdus; 2309 struct dp_rx_desc *rx_desc = NULL; 2310 QDF_STATUS status; 2311 bool ret; 2312 uint32_t error_code = 0; 2313 bool sw_pn_check_needed; 2314 int max_reap_limit = dp_rx_get_loop_pkt_limit(soc); 2315 int i, rx_bufs_reaped_total; 2316 2317 /* Debug -- Remove later */ 2318 qdf_assert(soc && hal_ring_hdl); 2319 2320 hal_soc = soc->hal_soc; 2321 2322 /* Debug -- Remove later */ 2323 qdf_assert(hal_soc); 2324 2325 if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, hal_ring_hdl))) { 2326 2327 /* TODO */ 2328 /* 2329 * Need API to convert from hal_ring pointer to 2330 * Ring Type / Ring Id combo 2331 */ 2332 DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1); 2333 dp_rx_err_err("%pK: HAL RING Access Failed -- %pK", soc, 2334 hal_ring_hdl); 2335 goto done; 2336 } 2337 2338 while (qdf_likely(quota-- && (ring_desc = 2339 hal_srng_dst_peek(hal_soc, 2340 hal_ring_hdl)))) { 2341 2342 DP_STATS_INC(soc, rx.err_ring_pkts, 1); 2343 err_status = hal_rx_err_status_get(hal_soc, ring_desc); 2344 buf_type = hal_rx_reo_buf_type_get(hal_soc, ring_desc); 2345 2346 if (err_status == HAL_REO_ERROR_DETECTED) 2347 error_code = hal_rx_get_reo_error_code(hal_soc, 2348 ring_desc); 2349 2350 qdf_mem_set(&mpdu_desc_info, sizeof(mpdu_desc_info), 0); 2351 sw_pn_check_needed = dp_rx_err_is_pn_check_needed(soc, 2352 err_status, 2353 error_code); 2354 if (!sw_pn_check_needed) { 2355 /* 2356 * MPDU desc info will be present in the REO desc 2357 * only in the below scenarios 2358 * 1) pn_in_dest_disabled: always 2359 * 2) pn_in_dest enabled: All cases except 2k-jup 2360 * and OOR errors 2361 */ 2362 hal_rx_mpdu_desc_info_get(hal_soc, ring_desc, 2363 &mpdu_desc_info); 2364 } 2365 2366 if (HAL_RX_REO_DESC_MSDU_COUNT_GET(ring_desc) == 0) 2367 goto next_entry; 2368 2369 /* 2370 * For REO error ring, only MSDU LINK DESC is expected. 2371 * Handle HAL_RX_REO_MSDU_BUF_ADDR_TYPE exception case. 2372 */ 2373 if (qdf_unlikely(buf_type != HAL_RX_REO_MSDU_LINK_DESC_TYPE)) { 2374 int lmac_id; 2375 2376 lmac_id = dp_rx_err_exception(soc, ring_desc); 2377 if (lmac_id >= 0) 2378 rx_bufs_reaped[lmac_id] += 1; 2379 goto next_entry; 2380 } 2381 2382 hal_rx_buf_cookie_rbm_get(hal_soc, (uint32_t *)ring_desc, 2383 &hbi); 2384 /* 2385 * check for the magic number in the sw cookie 2386 */ 2387 qdf_assert_always((hbi.sw_cookie >> LINK_DESC_ID_SHIFT) & 2388 soc->link_desc_id_start); 2389 2390 status = dp_rx_link_cookie_check(ring_desc); 2391 if (qdf_unlikely(QDF_IS_STATUS_ERROR(status))) { 2392 DP_STATS_INC(soc, rx.err.invalid_link_cookie, 1); 2393 break; 2394 } 2395 2396 hal_rx_reo_buf_paddr_get(soc->hal_soc, ring_desc, &hbi); 2397 link_desc_va = dp_rx_cookie_2_link_desc_va(soc, &hbi); 2398 hal_rx_msdu_list_get(soc->hal_soc, link_desc_va, &msdu_list, 2399 &num_msdus); 2400 if (!num_msdus || 2401 !dp_rx_is_sw_cookie_valid(soc, msdu_list.sw_cookie[0])) { 2402 dp_rx_err_info_rl("Invalid MSDU info num_msdus %u cookie: 0x%x", 2403 num_msdus, msdu_list.sw_cookie[0]); 2404 dp_rx_link_desc_return(soc, ring_desc, 2405 HAL_BM_ACTION_PUT_IN_IDLE_LIST); 2406 goto next_entry; 2407 } 2408 2409 dp_rx_err_ring_record_entry(soc, msdu_list.paddr[0], 2410 msdu_list.sw_cookie[0], 2411 msdu_list.rbm[0]); 2412 // TODO - BE- Check if the RBM is to be checked for all chips 2413 if (qdf_unlikely((msdu_list.rbm[0] != 2414 dp_rx_get_rx_bm_id(soc)) && 2415 (msdu_list.rbm[0] != 2416 soc->idle_link_bm_id) && 2417 (msdu_list.rbm[0] != 2418 dp_rx_get_defrag_bm_id(soc)))) { 2419 /* TODO */ 2420 /* Call appropriate handler */ 2421 if (!wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx)) { 2422 DP_STATS_INC(soc, rx.err.invalid_rbm, 1); 2423 dp_rx_err_err("%pK: Invalid RBM %d", 2424 soc, msdu_list.rbm[0]); 2425 } 2426 2427 /* Return link descriptor through WBM ring (SW2WBM)*/ 2428 dp_rx_link_desc_return(soc, ring_desc, 2429 HAL_BM_ACTION_RELEASE_MSDU_LIST); 2430 goto next_entry; 2431 } 2432 2433 rx_desc = soc->arch_ops.dp_rx_desc_cookie_2_va( 2434 soc, 2435 msdu_list.sw_cookie[0]); 2436 qdf_assert_always(rx_desc); 2437 2438 mac_id = rx_desc->pool_id; 2439 2440 if (sw_pn_check_needed) { 2441 goto process_reo_error_code; 2442 } 2443 2444 if (mpdu_desc_info.bar_frame) { 2445 qdf_assert_always(mpdu_desc_info.msdu_count == 1); 2446 2447 dp_rx_bar_frame_handle(soc, ring_desc, rx_desc, 2448 &mpdu_desc_info, err_status, 2449 error_code); 2450 2451 rx_bufs_reaped[mac_id] += 1; 2452 goto next_entry; 2453 } 2454 2455 if (mpdu_desc_info.mpdu_flags & HAL_MPDU_F_FRAGMENT) { 2456 /* 2457 * We only handle one msdu per link desc for fragmented 2458 * case. We drop the msdus and release the link desc 2459 * back if there are more than one msdu in link desc. 2460 */ 2461 if (qdf_unlikely(num_msdus > 1)) { 2462 count = dp_rx_msdus_drop(soc, ring_desc, 2463 &mpdu_desc_info, 2464 &mac_id, quota); 2465 rx_bufs_reaped[mac_id] += count; 2466 goto next_entry; 2467 } 2468 2469 /* 2470 * this is a unlikely scenario where the host is reaping 2471 * a descriptor which it already reaped just a while ago 2472 * but is yet to replenish it back to HW. 2473 * In this case host will dump the last 128 descriptors 2474 * including the software descriptor rx_desc and assert. 2475 */ 2476 2477 if (qdf_unlikely(!rx_desc->in_use)) { 2478 DP_STATS_INC(soc, rx.err.hal_reo_dest_dup, 1); 2479 dp_info_rl("Reaping rx_desc not in use!"); 2480 dp_rx_dump_info_and_assert(soc, hal_ring_hdl, 2481 ring_desc, rx_desc); 2482 /* ignore duplicate RX desc and continue */ 2483 /* Pop out the descriptor */ 2484 goto next_entry; 2485 } 2486 2487 ret = dp_rx_desc_paddr_sanity_check(rx_desc, 2488 msdu_list.paddr[0]); 2489 if (!ret) { 2490 DP_STATS_INC(soc, rx.err.nbuf_sanity_fail, 1); 2491 rx_desc->in_err_state = 1; 2492 goto next_entry; 2493 } 2494 2495 count = dp_rx_frag_handle(soc, 2496 ring_desc, &mpdu_desc_info, 2497 rx_desc, &mac_id, quota); 2498 2499 rx_bufs_reaped[mac_id] += count; 2500 DP_STATS_INC(soc, rx.rx_frags, 1); 2501 goto next_entry; 2502 } 2503 2504 process_reo_error_code: 2505 /* 2506 * Expect REO errors to be handled after this point 2507 */ 2508 qdf_assert_always(err_status == HAL_REO_ERROR_DETECTED); 2509 2510 dp_info_rl("Got pkt with REO ERROR: %d", error_code); 2511 2512 switch (error_code) { 2513 case HAL_REO_ERR_PN_CHECK_FAILED: 2514 case HAL_REO_ERR_PN_ERROR_HANDLING_FLAG_SET: 2515 DP_STATS_INC(soc, rx.err.reo_error[error_code], 1); 2516 dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id); 2517 if (dp_pdev) 2518 DP_STATS_INC(dp_pdev, err.reo_error, 1); 2519 count = dp_rx_pn_error_handle(soc, 2520 ring_desc, 2521 &mpdu_desc_info, &mac_id, 2522 quota); 2523 2524 rx_bufs_reaped[mac_id] += count; 2525 break; 2526 case HAL_REO_ERR_REGULAR_FRAME_2K_JUMP: 2527 case HAL_REO_ERR_2K_ERROR_HANDLING_FLAG_SET: 2528 case HAL_REO_ERR_BAR_FRAME_2K_JUMP: 2529 case HAL_REO_ERR_REGULAR_FRAME_OOR: 2530 case HAL_REO_ERR_BAR_FRAME_OOR: 2531 case HAL_REO_ERR_QUEUE_DESC_ADDR_0: 2532 DP_STATS_INC(soc, rx.err.reo_error[error_code], 1); 2533 dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id); 2534 if (dp_pdev) 2535 DP_STATS_INC(dp_pdev, err.reo_error, 1); 2536 count = dp_rx_reo_err_entry_process( 2537 soc, 2538 ring_desc, 2539 &mpdu_desc_info, 2540 link_desc_va, 2541 error_code); 2542 2543 rx_bufs_reaped[mac_id] += count; 2544 break; 2545 case HAL_REO_ERR_QUEUE_DESC_INVALID: 2546 case HAL_REO_ERR_AMPDU_IN_NON_BA: 2547 case HAL_REO_ERR_NON_BA_DUPLICATE: 2548 case HAL_REO_ERR_BA_DUPLICATE: 2549 case HAL_REO_ERR_BAR_FRAME_NO_BA_SESSION: 2550 case HAL_REO_ERR_BAR_FRAME_SN_EQUALS_SSN: 2551 case HAL_REO_ERR_QUEUE_DESC_BLOCKED_SET: 2552 DP_STATS_INC(soc, rx.err.reo_error[error_code], 1); 2553 count = dp_rx_msdus_drop(soc, ring_desc, 2554 &mpdu_desc_info, 2555 &mac_id, quota); 2556 rx_bufs_reaped[mac_id] += count; 2557 break; 2558 default: 2559 /* Assert if unexpected error type */ 2560 qdf_assert_always(0); 2561 } 2562 next_entry: 2563 dp_rx_link_cookie_invalidate(ring_desc); 2564 hal_srng_dst_get_next(hal_soc, hal_ring_hdl); 2565 2566 rx_bufs_reaped_total = 0; 2567 for (i = 0; i < MAX_PDEV_CNT; i++) 2568 rx_bufs_reaped_total += rx_bufs_reaped[i]; 2569 2570 if (dp_rx_reap_loop_pkt_limit_hit(soc, rx_bufs_reaped_total, 2571 max_reap_limit)) 2572 break; 2573 } 2574 2575 done: 2576 dp_srng_access_end(int_ctx, soc, hal_ring_hdl); 2577 2578 if (soc->rx.flags.defrag_timeout_check) { 2579 uint32_t now_ms = 2580 qdf_system_ticks_to_msecs(qdf_system_ticks()); 2581 2582 if (now_ms >= soc->rx.defrag.next_flush_ms) 2583 dp_rx_defrag_waitlist_flush(soc); 2584 } 2585 2586 for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) { 2587 if (rx_bufs_reaped[mac_id]) { 2588 dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id); 2589 dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_id]; 2590 rx_desc_pool = &soc->rx_desc_buf[mac_id]; 2591 2592 dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng, 2593 rx_desc_pool, 2594 rx_bufs_reaped[mac_id], 2595 &dp_pdev->free_list_head, 2596 &dp_pdev->free_list_tail, 2597 false); 2598 rx_bufs_used += rx_bufs_reaped[mac_id]; 2599 } 2600 } 2601 2602 return rx_bufs_used; /* Assume no scale factor for now */ 2603 } 2604 2605 #ifdef DROP_RXDMA_DECRYPT_ERR 2606 /** 2607 * dp_handle_rxdma_decrypt_err() - Check if decrypt err frames can be handled 2608 * 2609 * Return: true if rxdma decrypt err frames are handled and false otherwise 2610 */ 2611 static inline bool dp_handle_rxdma_decrypt_err(void) 2612 { 2613 return false; 2614 } 2615 #else 2616 static inline bool dp_handle_rxdma_decrypt_err(void) 2617 { 2618 return true; 2619 } 2620 #endif 2621 2622 /* 2623 * dp_rx_wbm_sg_list_last_msdu_war() - war for HW issue 2624 * 2625 * This is a war for HW issue where length is only valid in last msdu 2626 *@soc: DP SOC handle 2627 */ 2628 static inline void dp_rx_wbm_sg_list_last_msdu_war(struct dp_soc *soc) 2629 { 2630 if (soc->wbm_sg_last_msdu_war) { 2631 uint32_t len; 2632 qdf_nbuf_t temp = soc->wbm_sg_param.wbm_sg_nbuf_tail; 2633 2634 len = hal_rx_msdu_start_msdu_len_get(soc->hal_soc, 2635 qdf_nbuf_data(temp)); 2636 temp = soc->wbm_sg_param.wbm_sg_nbuf_head; 2637 while (temp) { 2638 QDF_NBUF_CB_RX_PKT_LEN(temp) = len; 2639 temp = temp->next; 2640 } 2641 } 2642 } 2643 2644 #ifdef RX_DESC_DEBUG_CHECK 2645 /** 2646 * dp_rx_wbm_desc_nbuf_sanity_check - Add sanity check to for WBM rx_desc paddr 2647 * corruption 2648 * @soc: core txrx main context 2649 * @hal_ring_hdl: opaque pointer to the HAL Rx Error Ring 2650 * @ring_desc: REO ring descriptor 2651 * @rx_desc: Rx descriptor 2652 * 2653 * Return: NONE 2654 */ 2655 static 2656 QDF_STATUS dp_rx_wbm_desc_nbuf_sanity_check(struct dp_soc *soc, 2657 hal_ring_handle_t hal_ring_hdl, 2658 hal_ring_desc_t ring_desc, 2659 struct dp_rx_desc *rx_desc) 2660 { 2661 struct hal_buf_info hbi; 2662 2663 hal_rx_wbm_rel_buf_paddr_get(soc->hal_soc, ring_desc, &hbi); 2664 /* Sanity check for possible buffer paddr corruption */ 2665 if (dp_rx_desc_paddr_sanity_check(rx_desc, (&hbi)->paddr)) 2666 return QDF_STATUS_SUCCESS; 2667 2668 hal_srng_dump_ring_desc(soc->hal_soc, hal_ring_hdl, ring_desc); 2669 2670 return QDF_STATUS_E_FAILURE; 2671 } 2672 2673 #else 2674 static 2675 QDF_STATUS dp_rx_wbm_desc_nbuf_sanity_check(struct dp_soc *soc, 2676 hal_ring_handle_t hal_ring_hdl, 2677 hal_ring_desc_t ring_desc, 2678 struct dp_rx_desc *rx_desc) 2679 { 2680 return QDF_STATUS_SUCCESS; 2681 } 2682 #endif 2683 2684 static inline bool 2685 dp_rx_is_sg_formation_required(struct hal_wbm_err_desc_info *info) 2686 { 2687 /* 2688 * Currently Null Queue and Unencrypted error handlers has support for 2689 * SG. Other error handler do not deal with SG buffer. 2690 */ 2691 if (((info->wbm_err_src == HAL_RX_WBM_ERR_SRC_REO) && 2692 (info->reo_err_code == HAL_REO_ERR_QUEUE_DESC_ADDR_0)) || 2693 ((info->wbm_err_src == HAL_RX_WBM_ERR_SRC_RXDMA) && 2694 (info->rxdma_err_code == HAL_RXDMA_ERR_UNENCRYPTED))) 2695 return true; 2696 2697 return false; 2698 } 2699 2700 uint32_t 2701 dp_rx_wbm_err_process(struct dp_intr *int_ctx, struct dp_soc *soc, 2702 hal_ring_handle_t hal_ring_hdl, uint32_t quota) 2703 { 2704 hal_ring_desc_t ring_desc; 2705 hal_soc_handle_t hal_soc; 2706 struct dp_rx_desc *rx_desc; 2707 union dp_rx_desc_list_elem_t *head[MAX_PDEV_CNT] = { NULL }; 2708 union dp_rx_desc_list_elem_t *tail[MAX_PDEV_CNT] = { NULL }; 2709 uint32_t rx_bufs_used = 0; 2710 uint32_t rx_bufs_reaped[MAX_PDEV_CNT] = { 0 }; 2711 uint8_t buf_type; 2712 uint8_t mac_id; 2713 struct dp_pdev *dp_pdev; 2714 struct dp_srng *dp_rxdma_srng; 2715 struct rx_desc_pool *rx_desc_pool; 2716 uint8_t *rx_tlv_hdr; 2717 bool is_tkip_mic_err; 2718 qdf_nbuf_t nbuf_head = NULL; 2719 qdf_nbuf_t nbuf_tail = NULL; 2720 qdf_nbuf_t nbuf, next; 2721 struct hal_wbm_err_desc_info wbm_err_info = { 0 }; 2722 uint8_t pool_id; 2723 uint8_t tid = 0; 2724 uint8_t msdu_continuation = 0; 2725 bool process_sg_buf = false; 2726 uint32_t wbm_err_src; 2727 QDF_STATUS status; 2728 struct hal_rx_mpdu_desc_info mpdu_desc_info = { 0 }; 2729 2730 /* Debug -- Remove later */ 2731 qdf_assert(soc && hal_ring_hdl); 2732 2733 hal_soc = soc->hal_soc; 2734 2735 /* Debug -- Remove later */ 2736 qdf_assert(hal_soc); 2737 2738 if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, hal_ring_hdl))) { 2739 2740 /* TODO */ 2741 /* 2742 * Need API to convert from hal_ring pointer to 2743 * Ring Type / Ring Id combo 2744 */ 2745 dp_rx_err_err("%pK: HAL RING Access Failed -- %pK", 2746 soc, hal_ring_hdl); 2747 goto done; 2748 } 2749 2750 while (qdf_likely(quota)) { 2751 ring_desc = hal_srng_dst_get_next(hal_soc, hal_ring_hdl); 2752 if (qdf_unlikely(!ring_desc)) 2753 break; 2754 2755 /* XXX */ 2756 buf_type = HAL_RX_WBM_BUF_TYPE_GET(ring_desc); 2757 2758 /* 2759 * For WBM ring, expect only MSDU buffers 2760 */ 2761 qdf_assert_always(buf_type == HAL_RX_WBM_BUF_TYPE_REL_BUF); 2762 2763 wbm_err_src = hal_rx_wbm_err_src_get(hal_soc, ring_desc); 2764 qdf_assert((wbm_err_src == HAL_RX_WBM_ERR_SRC_RXDMA) || 2765 (wbm_err_src == HAL_RX_WBM_ERR_SRC_REO)); 2766 2767 if (soc->arch_ops.dp_wbm_get_rx_desc_from_hal_desc(soc, 2768 ring_desc, 2769 &rx_desc)) { 2770 dp_rx_err_err("get rx desc from hal_desc failed"); 2771 continue; 2772 } 2773 2774 qdf_assert_always(rx_desc); 2775 2776 if (!dp_rx_desc_check_magic(rx_desc)) { 2777 dp_rx_err_err("%pk: Invalid rx_desc %pk", 2778 soc, rx_desc); 2779 continue; 2780 } 2781 2782 /* 2783 * this is a unlikely scenario where the host is reaping 2784 * a descriptor which it already reaped just a while ago 2785 * but is yet to replenish it back to HW. 2786 * In this case host will dump the last 128 descriptors 2787 * including the software descriptor rx_desc and assert. 2788 */ 2789 if (qdf_unlikely(!rx_desc->in_use)) { 2790 DP_STATS_INC(soc, rx.err.hal_wbm_rel_dup, 1); 2791 dp_rx_dump_info_and_assert(soc, hal_ring_hdl, 2792 ring_desc, rx_desc); 2793 continue; 2794 } 2795 2796 hal_rx_wbm_err_info_get(ring_desc, &wbm_err_info, hal_soc); 2797 nbuf = rx_desc->nbuf; 2798 2799 status = dp_rx_wbm_desc_nbuf_sanity_check(soc, hal_ring_hdl, 2800 ring_desc, rx_desc); 2801 if (qdf_unlikely(QDF_IS_STATUS_ERROR(status))) { 2802 DP_STATS_INC(soc, rx.err.nbuf_sanity_fail, 1); 2803 dp_info_rl("Rx error Nbuf %pk sanity check failure!", 2804 nbuf); 2805 rx_desc->in_err_state = 1; 2806 rx_desc->unmapped = 1; 2807 rx_bufs_reaped[rx_desc->pool_id]++; 2808 dp_rx_add_to_free_desc_list(&head[rx_desc->pool_id], 2809 &tail[rx_desc->pool_id], 2810 rx_desc); 2811 2812 continue; 2813 } 2814 2815 /* Get MPDU DESC info */ 2816 hal_rx_mpdu_desc_info_get(hal_soc, ring_desc, &mpdu_desc_info); 2817 2818 if (qdf_likely(mpdu_desc_info.mpdu_flags & 2819 HAL_MPDU_F_QOS_CONTROL_VALID)) 2820 qdf_nbuf_set_tid_val(rx_desc->nbuf, mpdu_desc_info.tid); 2821 2822 rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id]; 2823 dp_ipa_rx_buf_smmu_mapping_lock(soc); 2824 dp_rx_nbuf_unmap_pool(soc, rx_desc_pool, nbuf); 2825 rx_desc->unmapped = 1; 2826 dp_ipa_rx_buf_smmu_mapping_unlock(soc); 2827 2828 if (qdf_unlikely(soc->wbm_release_desc_rx_sg_support && 2829 dp_rx_is_sg_formation_required(&wbm_err_info))) { 2830 /* SG is detected from continuation bit */ 2831 msdu_continuation = 2832 hal_rx_wbm_err_msdu_continuation_get(hal_soc, 2833 ring_desc); 2834 if (msdu_continuation && 2835 !(soc->wbm_sg_param.wbm_is_first_msdu_in_sg)) { 2836 /* Update length from first buffer in SG */ 2837 soc->wbm_sg_param.wbm_sg_desc_msdu_len = 2838 hal_rx_msdu_start_msdu_len_get( 2839 soc->hal_soc, 2840 qdf_nbuf_data(nbuf)); 2841 soc->wbm_sg_param.wbm_is_first_msdu_in_sg = true; 2842 } 2843 2844 if (msdu_continuation) { 2845 /* MSDU continued packets */ 2846 qdf_nbuf_set_rx_chfrag_cont(nbuf, 1); 2847 QDF_NBUF_CB_RX_PKT_LEN(nbuf) = 2848 soc->wbm_sg_param.wbm_sg_desc_msdu_len; 2849 } else { 2850 /* This is the terminal packet in SG */ 2851 qdf_nbuf_set_rx_chfrag_start(nbuf, 1); 2852 qdf_nbuf_set_rx_chfrag_end(nbuf, 1); 2853 QDF_NBUF_CB_RX_PKT_LEN(nbuf) = 2854 soc->wbm_sg_param.wbm_sg_desc_msdu_len; 2855 process_sg_buf = true; 2856 } 2857 } 2858 2859 /* 2860 * save the wbm desc info in nbuf TLV. We will need this 2861 * info when we do the actual nbuf processing 2862 */ 2863 wbm_err_info.pool_id = rx_desc->pool_id; 2864 hal_rx_priv_info_set_in_tlv(soc->hal_soc, 2865 qdf_nbuf_data(nbuf), 2866 (uint8_t *)&wbm_err_info, 2867 sizeof(wbm_err_info)); 2868 2869 rx_bufs_reaped[rx_desc->pool_id]++; 2870 2871 if (qdf_nbuf_is_rx_chfrag_cont(nbuf) || process_sg_buf) { 2872 DP_RX_LIST_APPEND(soc->wbm_sg_param.wbm_sg_nbuf_head, 2873 soc->wbm_sg_param.wbm_sg_nbuf_tail, 2874 nbuf); 2875 if (process_sg_buf) { 2876 if (!dp_rx_buffer_pool_refill( 2877 soc, 2878 soc->wbm_sg_param.wbm_sg_nbuf_head, 2879 rx_desc->pool_id)) 2880 DP_RX_MERGE_TWO_LIST( 2881 nbuf_head, nbuf_tail, 2882 soc->wbm_sg_param.wbm_sg_nbuf_head, 2883 soc->wbm_sg_param.wbm_sg_nbuf_tail); 2884 dp_rx_wbm_sg_list_last_msdu_war(soc); 2885 dp_rx_wbm_sg_list_reset(soc); 2886 process_sg_buf = false; 2887 } 2888 } else if (!dp_rx_buffer_pool_refill(soc, nbuf, 2889 rx_desc->pool_id)) { 2890 DP_RX_LIST_APPEND(nbuf_head, nbuf_tail, nbuf); 2891 } 2892 2893 dp_rx_add_to_free_desc_list(&head[rx_desc->pool_id], 2894 &tail[rx_desc->pool_id], 2895 rx_desc); 2896 2897 /* 2898 * if continuation bit is set then we have MSDU spread 2899 * across multiple buffers, let us not decrement quota 2900 * till we reap all buffers of that MSDU. 2901 */ 2902 if (qdf_likely(!msdu_continuation)) 2903 quota -= 1; 2904 } 2905 done: 2906 dp_srng_access_end(int_ctx, soc, hal_ring_hdl); 2907 2908 for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) { 2909 if (rx_bufs_reaped[mac_id]) { 2910 dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_id]; 2911 rx_desc_pool = &soc->rx_desc_buf[mac_id]; 2912 2913 dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng, 2914 rx_desc_pool, rx_bufs_reaped[mac_id], 2915 &head[mac_id], &tail[mac_id], false); 2916 rx_bufs_used += rx_bufs_reaped[mac_id]; 2917 } 2918 } 2919 2920 nbuf = nbuf_head; 2921 while (nbuf) { 2922 struct dp_txrx_peer *txrx_peer; 2923 struct dp_peer *peer; 2924 uint16_t peer_id; 2925 uint8_t err_code; 2926 uint8_t *tlv_hdr; 2927 uint32_t peer_meta_data; 2928 dp_txrx_ref_handle txrx_ref_handle = NULL; 2929 rx_tlv_hdr = qdf_nbuf_data(nbuf); 2930 2931 /* 2932 * retrieve the wbm desc info from nbuf TLV, so we can 2933 * handle error cases appropriately 2934 */ 2935 hal_rx_priv_info_get_from_tlv(soc->hal_soc, rx_tlv_hdr, 2936 (uint8_t *)&wbm_err_info, 2937 sizeof(wbm_err_info)); 2938 2939 peer_meta_data = hal_rx_tlv_peer_meta_data_get(soc->hal_soc, 2940 rx_tlv_hdr); 2941 peer_id = dp_rx_peer_metadata_peer_id_get(soc, peer_meta_data); 2942 txrx_peer = dp_tgt_txrx_peer_get_ref_by_id(soc, peer_id, 2943 &txrx_ref_handle, 2944 DP_MOD_ID_RX_ERR); 2945 2946 if (!txrx_peer) 2947 dp_info_rl("peer is null peer_id%u err_src%u err_rsn%u", 2948 peer_id, wbm_err_info.wbm_err_src, 2949 wbm_err_info.reo_psh_rsn); 2950 2951 /* Set queue_mapping in nbuf to 0 */ 2952 dp_set_rx_queue(nbuf, 0); 2953 2954 next = nbuf->next; 2955 2956 /* 2957 * Form the SG for msdu continued buffers 2958 * QCN9000 has this support 2959 */ 2960 if (qdf_nbuf_is_rx_chfrag_cont(nbuf)) { 2961 nbuf = dp_rx_sg_create(soc, nbuf); 2962 next = nbuf->next; 2963 /* 2964 * SG error handling is not done correctly, 2965 * drop SG frames for now. 2966 */ 2967 dp_rx_nbuf_free(nbuf); 2968 dp_info_rl("scattered msdu dropped"); 2969 nbuf = next; 2970 if (txrx_peer) 2971 dp_txrx_peer_unref_delete(txrx_ref_handle, 2972 DP_MOD_ID_RX_ERR); 2973 continue; 2974 } 2975 2976 if (wbm_err_info.wbm_err_src == HAL_RX_WBM_ERR_SRC_REO) { 2977 if (wbm_err_info.reo_psh_rsn 2978 == HAL_RX_WBM_REO_PSH_RSN_ERROR) { 2979 2980 DP_STATS_INC(soc, 2981 rx.err.reo_error 2982 [wbm_err_info.reo_err_code], 1); 2983 /* increment @pdev level */ 2984 pool_id = wbm_err_info.pool_id; 2985 dp_pdev = dp_get_pdev_for_lmac_id(soc, pool_id); 2986 if (dp_pdev) 2987 DP_STATS_INC(dp_pdev, err.reo_error, 2988 1); 2989 2990 switch (wbm_err_info.reo_err_code) { 2991 /* 2992 * Handling for packets which have NULL REO 2993 * queue descriptor 2994 */ 2995 case HAL_REO_ERR_QUEUE_DESC_ADDR_0: 2996 pool_id = wbm_err_info.pool_id; 2997 dp_rx_null_q_desc_handle(soc, nbuf, 2998 rx_tlv_hdr, 2999 pool_id, 3000 txrx_peer); 3001 break; 3002 /* TODO */ 3003 /* Add per error code accounting */ 3004 case HAL_REO_ERR_REGULAR_FRAME_2K_JUMP: 3005 if (txrx_peer) 3006 DP_PEER_PER_PKT_STATS_INC(txrx_peer, 3007 rx.err.jump_2k_err, 3008 1); 3009 3010 pool_id = wbm_err_info.pool_id; 3011 3012 if (hal_rx_msdu_end_first_msdu_get(soc->hal_soc, 3013 rx_tlv_hdr)) { 3014 tid = 3015 hal_rx_mpdu_start_tid_get(hal_soc, rx_tlv_hdr); 3016 } 3017 QDF_NBUF_CB_RX_PKT_LEN(nbuf) = 3018 hal_rx_msdu_start_msdu_len_get( 3019 soc->hal_soc, rx_tlv_hdr); 3020 nbuf->next = NULL; 3021 dp_2k_jump_handle(soc, nbuf, 3022 rx_tlv_hdr, 3023 peer_id, tid); 3024 break; 3025 case HAL_REO_ERR_REGULAR_FRAME_OOR: 3026 if (txrx_peer) 3027 DP_PEER_PER_PKT_STATS_INC(txrx_peer, 3028 rx.err.oor_err, 3029 1); 3030 if (hal_rx_msdu_end_first_msdu_get(soc->hal_soc, 3031 rx_tlv_hdr)) { 3032 tid = 3033 hal_rx_mpdu_start_tid_get(hal_soc, rx_tlv_hdr); 3034 } 3035 QDF_NBUF_CB_RX_PKT_LEN(nbuf) = 3036 hal_rx_msdu_start_msdu_len_get( 3037 soc->hal_soc, rx_tlv_hdr); 3038 nbuf->next = NULL; 3039 dp_rx_oor_handle(soc, nbuf, 3040 peer_id, 3041 rx_tlv_hdr); 3042 break; 3043 case HAL_REO_ERR_BAR_FRAME_2K_JUMP: 3044 case HAL_REO_ERR_BAR_FRAME_OOR: 3045 peer = dp_peer_get_tgt_peer_by_id(soc, peer_id, DP_MOD_ID_RX_ERR); 3046 if (peer) { 3047 dp_rx_err_handle_bar(soc, peer, 3048 nbuf); 3049 dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR); 3050 } 3051 dp_rx_nbuf_free(nbuf); 3052 break; 3053 3054 case HAL_REO_ERR_PN_CHECK_FAILED: 3055 case HAL_REO_ERR_PN_ERROR_HANDLING_FLAG_SET: 3056 if (txrx_peer) 3057 DP_PEER_PER_PKT_STATS_INC(txrx_peer, 3058 rx.err.pn_err, 3059 1); 3060 dp_rx_nbuf_free(nbuf); 3061 break; 3062 3063 default: 3064 dp_info_rl("Got pkt with REO ERROR: %d", 3065 wbm_err_info.reo_err_code); 3066 dp_rx_nbuf_free(nbuf); 3067 } 3068 } else if (wbm_err_info.reo_psh_rsn 3069 == HAL_RX_WBM_REO_PSH_RSN_ROUTE) { 3070 dp_rx_err_route_hdl(soc, nbuf, txrx_peer, 3071 rx_tlv_hdr, 3072 HAL_RX_WBM_ERR_SRC_REO); 3073 } else { 3074 /* should not enter here */ 3075 dp_rx_err_alert("invalid reo push reason %u", 3076 wbm_err_info.reo_psh_rsn); 3077 dp_rx_nbuf_free(nbuf); 3078 qdf_assert_always(0); 3079 } 3080 } else if (wbm_err_info.wbm_err_src == 3081 HAL_RX_WBM_ERR_SRC_RXDMA) { 3082 if (wbm_err_info.rxdma_psh_rsn 3083 == HAL_RX_WBM_RXDMA_PSH_RSN_ERROR) { 3084 DP_STATS_INC(soc, 3085 rx.err.rxdma_error 3086 [wbm_err_info.rxdma_err_code], 1); 3087 /* increment @pdev level */ 3088 pool_id = wbm_err_info.pool_id; 3089 dp_pdev = dp_get_pdev_for_lmac_id(soc, pool_id); 3090 if (dp_pdev) 3091 DP_STATS_INC(dp_pdev, 3092 err.rxdma_error, 1); 3093 3094 switch (wbm_err_info.rxdma_err_code) { 3095 case HAL_RXDMA_ERR_UNENCRYPTED: 3096 3097 case HAL_RXDMA_ERR_WIFI_PARSE: 3098 if (txrx_peer) 3099 DP_PEER_PER_PKT_STATS_INC(txrx_peer, 3100 rx.err.rxdma_wifi_parse_err, 3101 1); 3102 3103 pool_id = wbm_err_info.pool_id; 3104 dp_rx_process_rxdma_err(soc, nbuf, 3105 rx_tlv_hdr, 3106 txrx_peer, 3107 wbm_err_info. 3108 rxdma_err_code, 3109 pool_id); 3110 break; 3111 3112 case HAL_RXDMA_ERR_TKIP_MIC: 3113 dp_rx_process_mic_error(soc, nbuf, 3114 rx_tlv_hdr, 3115 txrx_peer); 3116 if (txrx_peer) 3117 DP_PEER_PER_PKT_STATS_INC(txrx_peer, 3118 rx.err.mic_err, 3119 1); 3120 break; 3121 3122 case HAL_RXDMA_ERR_DECRYPT: 3123 /* All the TKIP-MIC failures are treated as Decrypt Errors 3124 * for QCN9224 Targets 3125 */ 3126 is_tkip_mic_err = hal_rx_msdu_end_is_tkip_mic_err(hal_soc, rx_tlv_hdr); 3127 3128 if (is_tkip_mic_err && txrx_peer) { 3129 dp_rx_process_mic_error(soc, nbuf, 3130 rx_tlv_hdr, 3131 txrx_peer); 3132 DP_PEER_PER_PKT_STATS_INC(txrx_peer, 3133 rx.err.mic_err, 3134 1); 3135 break; 3136 } 3137 3138 if (txrx_peer) { 3139 DP_PEER_PER_PKT_STATS_INC(txrx_peer, 3140 rx.err.decrypt_err, 3141 1); 3142 dp_rx_nbuf_free(nbuf); 3143 break; 3144 } 3145 3146 if (!dp_handle_rxdma_decrypt_err()) { 3147 dp_rx_nbuf_free(nbuf); 3148 break; 3149 } 3150 3151 pool_id = wbm_err_info.pool_id; 3152 err_code = wbm_err_info.rxdma_err_code; 3153 tlv_hdr = rx_tlv_hdr; 3154 dp_rx_process_rxdma_err(soc, nbuf, 3155 tlv_hdr, NULL, 3156 err_code, 3157 pool_id); 3158 break; 3159 case HAL_RXDMA_MULTICAST_ECHO: 3160 if (txrx_peer) 3161 DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, 3162 rx.mec_drop, 1, 3163 qdf_nbuf_len(nbuf)); 3164 dp_rx_nbuf_free(nbuf); 3165 break; 3166 case HAL_RXDMA_UNAUTHORIZED_WDS: 3167 pool_id = wbm_err_info.pool_id; 3168 err_code = wbm_err_info.rxdma_err_code; 3169 tlv_hdr = rx_tlv_hdr; 3170 dp_rx_process_rxdma_err(soc, nbuf, 3171 tlv_hdr, 3172 txrx_peer, 3173 err_code, 3174 pool_id); 3175 break; 3176 default: 3177 dp_rx_nbuf_free(nbuf); 3178 dp_err_rl("RXDMA error %d", 3179 wbm_err_info.rxdma_err_code); 3180 } 3181 } else if (wbm_err_info.rxdma_psh_rsn 3182 == HAL_RX_WBM_RXDMA_PSH_RSN_ROUTE) { 3183 dp_rx_err_route_hdl(soc, nbuf, txrx_peer, 3184 rx_tlv_hdr, 3185 HAL_RX_WBM_ERR_SRC_RXDMA); 3186 } else if (wbm_err_info.rxdma_psh_rsn 3187 == HAL_RX_WBM_RXDMA_PSH_RSN_FLUSH) { 3188 dp_rx_err_err("rxdma push reason %u", 3189 wbm_err_info.rxdma_psh_rsn); 3190 DP_STATS_INC(soc, rx.err.rx_flush_count, 1); 3191 dp_rx_nbuf_free(nbuf); 3192 } else { 3193 /* should not enter here */ 3194 dp_rx_err_alert("invalid rxdma push reason %u", 3195 wbm_err_info.rxdma_psh_rsn); 3196 dp_rx_nbuf_free(nbuf); 3197 qdf_assert_always(0); 3198 } 3199 } else { 3200 /* Should not come here */ 3201 qdf_assert(0); 3202 } 3203 3204 if (txrx_peer) 3205 dp_txrx_peer_unref_delete(txrx_ref_handle, 3206 DP_MOD_ID_RX_ERR); 3207 3208 nbuf = next; 3209 } 3210 return rx_bufs_used; /* Assume no scale factor for now */ 3211 } 3212 3213 #endif /* QCA_HOST_MODE_WIFI_DISABLED */ 3214 3215 /** 3216 * dup_desc_dbg() - dump and assert if duplicate rx desc found 3217 * 3218 * @soc: core DP main context 3219 * @rxdma_dst_ring_desc: void pointer to monitor link descriptor buf addr info 3220 * @rx_desc: void pointer to rx descriptor 3221 * 3222 * Return: void 3223 */ 3224 static void dup_desc_dbg(struct dp_soc *soc, 3225 hal_rxdma_desc_t rxdma_dst_ring_desc, 3226 void *rx_desc) 3227 { 3228 DP_STATS_INC(soc, rx.err.hal_rxdma_err_dup, 1); 3229 dp_rx_dump_info_and_assert( 3230 soc, 3231 soc->rx_rel_ring.hal_srng, 3232 hal_rxdma_desc_to_hal_ring_desc(rxdma_dst_ring_desc), 3233 rx_desc); 3234 } 3235 3236 /** 3237 * dp_rx_err_mpdu_pop() - extract the MSDU's from link descs 3238 * 3239 * @soc: core DP main context 3240 * @mac_id: mac id which is one of 3 mac_ids 3241 * @rxdma_dst_ring_desc: void pointer to monitor link descriptor buf addr info 3242 * @head: head of descs list to be freed 3243 * @tail: tail of decs list to be freed 3244 3245 * Return: number of msdu in MPDU to be popped 3246 */ 3247 static inline uint32_t 3248 dp_rx_err_mpdu_pop(struct dp_soc *soc, uint32_t mac_id, 3249 hal_rxdma_desc_t rxdma_dst_ring_desc, 3250 union dp_rx_desc_list_elem_t **head, 3251 union dp_rx_desc_list_elem_t **tail) 3252 { 3253 void *rx_msdu_link_desc; 3254 qdf_nbuf_t msdu; 3255 qdf_nbuf_t last; 3256 struct hal_rx_msdu_list msdu_list; 3257 uint16_t num_msdus; 3258 struct hal_buf_info buf_info; 3259 uint32_t rx_bufs_used = 0; 3260 uint32_t msdu_cnt; 3261 uint32_t i; 3262 uint8_t push_reason; 3263 uint8_t rxdma_error_code = 0; 3264 uint8_t bm_action = HAL_BM_ACTION_PUT_IN_IDLE_LIST; 3265 struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id); 3266 uint32_t rx_link_buf_info[HAL_RX_BUFFINFO_NUM_DWORDS]; 3267 hal_rxdma_desc_t ring_desc; 3268 struct rx_desc_pool *rx_desc_pool; 3269 3270 if (!pdev) { 3271 dp_rx_err_debug("%pK: pdev is null for mac_id = %d", 3272 soc, mac_id); 3273 return rx_bufs_used; 3274 } 3275 3276 msdu = 0; 3277 3278 last = NULL; 3279 3280 hal_rx_reo_ent_buf_paddr_get(soc->hal_soc, rxdma_dst_ring_desc, 3281 &buf_info, &msdu_cnt); 3282 3283 push_reason = 3284 hal_rx_reo_ent_rxdma_push_reason_get(rxdma_dst_ring_desc); 3285 if (push_reason == HAL_RX_WBM_RXDMA_PSH_RSN_ERROR) { 3286 rxdma_error_code = 3287 hal_rx_reo_ent_rxdma_error_code_get(rxdma_dst_ring_desc); 3288 } 3289 3290 do { 3291 rx_msdu_link_desc = 3292 dp_rx_cookie_2_link_desc_va(soc, &buf_info); 3293 3294 qdf_assert_always(rx_msdu_link_desc); 3295 3296 hal_rx_msdu_list_get(soc->hal_soc, rx_msdu_link_desc, 3297 &msdu_list, &num_msdus); 3298 3299 if (msdu_list.sw_cookie[0] != HAL_RX_COOKIE_SPECIAL) { 3300 /* if the msdus belongs to NSS offloaded radio && 3301 * the rbm is not SW1_BM then return the msdu_link 3302 * descriptor without freeing the msdus (nbufs). let 3303 * these buffers be given to NSS completion ring for 3304 * NSS to free them. 3305 * else iterate through the msdu link desc list and 3306 * free each msdu in the list. 3307 */ 3308 if (msdu_list.rbm[0] != 3309 HAL_RX_BUF_RBM_SW3_BM(soc->wbm_sw0_bm_id) && 3310 wlan_cfg_get_dp_pdev_nss_enabled( 3311 pdev->wlan_cfg_ctx)) 3312 bm_action = HAL_BM_ACTION_RELEASE_MSDU_LIST; 3313 else { 3314 for (i = 0; i < num_msdus; i++) { 3315 struct dp_rx_desc *rx_desc = 3316 soc->arch_ops. 3317 dp_rx_desc_cookie_2_va( 3318 soc, 3319 msdu_list.sw_cookie[i]); 3320 qdf_assert_always(rx_desc); 3321 msdu = rx_desc->nbuf; 3322 /* 3323 * this is a unlikely scenario 3324 * where the host is reaping 3325 * a descriptor which 3326 * it already reaped just a while ago 3327 * but is yet to replenish 3328 * it back to HW. 3329 * In this case host will dump 3330 * the last 128 descriptors 3331 * including the software descriptor 3332 * rx_desc and assert. 3333 */ 3334 ring_desc = rxdma_dst_ring_desc; 3335 if (qdf_unlikely(!rx_desc->in_use)) { 3336 dup_desc_dbg(soc, 3337 ring_desc, 3338 rx_desc); 3339 continue; 3340 } 3341 3342 if (rx_desc->unmapped == 0) { 3343 rx_desc_pool = 3344 &soc->rx_desc_buf[rx_desc->pool_id]; 3345 dp_ipa_rx_buf_smmu_mapping_lock(soc); 3346 dp_rx_nbuf_unmap_pool(soc, 3347 rx_desc_pool, 3348 msdu); 3349 rx_desc->unmapped = 1; 3350 dp_ipa_rx_buf_smmu_mapping_unlock(soc); 3351 } 3352 3353 dp_rx_err_debug("%pK: msdu_nbuf=%pK ", 3354 soc, msdu); 3355 3356 dp_rx_buffer_pool_nbuf_free(soc, msdu, 3357 rx_desc->pool_id); 3358 rx_bufs_used++; 3359 dp_rx_add_to_free_desc_list(head, 3360 tail, rx_desc); 3361 } 3362 } 3363 } else { 3364 rxdma_error_code = HAL_RXDMA_ERR_WAR; 3365 } 3366 3367 /* 3368 * Store the current link buffer into to the local structure 3369 * to be used for release purpose. 3370 */ 3371 hal_rxdma_buff_addr_info_set(soc->hal_soc, rx_link_buf_info, 3372 buf_info.paddr, buf_info.sw_cookie, 3373 buf_info.rbm); 3374 3375 hal_rx_mon_next_link_desc_get(soc->hal_soc, rx_msdu_link_desc, 3376 &buf_info); 3377 dp_rx_link_desc_return_by_addr(soc, 3378 (hal_buff_addrinfo_t) 3379 rx_link_buf_info, 3380 bm_action); 3381 } while (buf_info.paddr); 3382 3383 DP_STATS_INC(soc, rx.err.rxdma_error[rxdma_error_code], 1); 3384 if (pdev) 3385 DP_STATS_INC(pdev, err.rxdma_error, 1); 3386 3387 if (rxdma_error_code == HAL_RXDMA_ERR_DECRYPT) { 3388 dp_rx_err_err("%pK: Packet received with Decrypt error", soc); 3389 } 3390 3391 return rx_bufs_used; 3392 } 3393 3394 uint32_t 3395 dp_rxdma_err_process(struct dp_intr *int_ctx, struct dp_soc *soc, 3396 uint32_t mac_id, uint32_t quota) 3397 { 3398 struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id); 3399 hal_rxdma_desc_t rxdma_dst_ring_desc; 3400 hal_soc_handle_t hal_soc; 3401 void *err_dst_srng; 3402 union dp_rx_desc_list_elem_t *head = NULL; 3403 union dp_rx_desc_list_elem_t *tail = NULL; 3404 struct dp_srng *dp_rxdma_srng; 3405 struct rx_desc_pool *rx_desc_pool; 3406 uint32_t work_done = 0; 3407 uint32_t rx_bufs_used = 0; 3408 3409 if (!pdev) 3410 return 0; 3411 3412 err_dst_srng = soc->rxdma_err_dst_ring[mac_id].hal_srng; 3413 3414 if (!err_dst_srng) { 3415 dp_rx_err_err("%pK: HAL Monitor Destination Ring Init Failed -- %pK", 3416 soc, err_dst_srng); 3417 return 0; 3418 } 3419 3420 hal_soc = soc->hal_soc; 3421 3422 qdf_assert(hal_soc); 3423 3424 if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, err_dst_srng))) { 3425 dp_rx_err_err("%pK: HAL Monitor Destination Ring Init Failed -- %pK", 3426 soc, err_dst_srng); 3427 return 0; 3428 } 3429 3430 while (qdf_likely(quota-- && (rxdma_dst_ring_desc = 3431 hal_srng_dst_get_next(hal_soc, err_dst_srng)))) { 3432 3433 rx_bufs_used += dp_rx_err_mpdu_pop(soc, mac_id, 3434 rxdma_dst_ring_desc, 3435 &head, &tail); 3436 } 3437 3438 dp_srng_access_end(int_ctx, soc, err_dst_srng); 3439 3440 if (rx_bufs_used) { 3441 if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx)) { 3442 dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_id]; 3443 rx_desc_pool = &soc->rx_desc_buf[mac_id]; 3444 } else { 3445 dp_rxdma_srng = &soc->rx_refill_buf_ring[pdev->lmac_id]; 3446 rx_desc_pool = &soc->rx_desc_buf[pdev->lmac_id]; 3447 } 3448 3449 dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng, 3450 rx_desc_pool, rx_bufs_used, &head, &tail, false); 3451 3452 work_done += rx_bufs_used; 3453 } 3454 3455 return work_done; 3456 } 3457 3458 #ifndef QCA_HOST_MODE_WIFI_DISABLED 3459 3460 static inline void 3461 dp_wbm_int_err_mpdu_pop(struct dp_soc *soc, uint32_t mac_id, 3462 hal_rxdma_desc_t rxdma_dst_ring_desc, 3463 union dp_rx_desc_list_elem_t **head, 3464 union dp_rx_desc_list_elem_t **tail, 3465 uint32_t *rx_bufs_used) 3466 { 3467 void *rx_msdu_link_desc; 3468 qdf_nbuf_t msdu; 3469 qdf_nbuf_t last; 3470 struct hal_rx_msdu_list msdu_list; 3471 uint16_t num_msdus; 3472 struct hal_buf_info buf_info; 3473 uint32_t msdu_cnt, i; 3474 uint32_t rx_link_buf_info[HAL_RX_BUFFINFO_NUM_DWORDS]; 3475 struct rx_desc_pool *rx_desc_pool; 3476 struct dp_rx_desc *rx_desc; 3477 3478 msdu = 0; 3479 3480 last = NULL; 3481 3482 hal_rx_reo_ent_buf_paddr_get(soc->hal_soc, rxdma_dst_ring_desc, 3483 &buf_info, &msdu_cnt); 3484 3485 do { 3486 rx_msdu_link_desc = 3487 dp_rx_cookie_2_link_desc_va(soc, &buf_info); 3488 3489 if (!rx_msdu_link_desc) { 3490 DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_REO_NULL_LINK_DESC], 1); 3491 break; 3492 } 3493 3494 hal_rx_msdu_list_get(soc->hal_soc, rx_msdu_link_desc, 3495 &msdu_list, &num_msdus); 3496 3497 if (msdu_list.sw_cookie[0] != HAL_RX_COOKIE_SPECIAL) { 3498 for (i = 0; i < num_msdus; i++) { 3499 if (!dp_rx_is_sw_cookie_valid(soc, msdu_list.sw_cookie[i])) { 3500 dp_rx_err_info_rl("Invalid MSDU info cookie: 0x%x", 3501 msdu_list.sw_cookie[i]); 3502 continue; 3503 } 3504 3505 rx_desc = soc->arch_ops.dp_rx_desc_cookie_2_va( 3506 soc, 3507 msdu_list.sw_cookie[i]); 3508 qdf_assert_always(rx_desc); 3509 rx_desc_pool = 3510 &soc->rx_desc_buf[rx_desc->pool_id]; 3511 msdu = rx_desc->nbuf; 3512 3513 /* 3514 * this is a unlikely scenario where the host is reaping 3515 * a descriptor which it already reaped just a while ago 3516 * but is yet to replenish it back to HW. 3517 */ 3518 if (qdf_unlikely(!rx_desc->in_use) || 3519 qdf_unlikely(!msdu)) { 3520 dp_rx_err_info_rl("Reaping rx_desc not in use!"); 3521 continue; 3522 } 3523 3524 dp_ipa_rx_buf_smmu_mapping_lock(soc); 3525 dp_rx_nbuf_unmap_pool(soc, rx_desc_pool, msdu); 3526 rx_desc->unmapped = 1; 3527 dp_ipa_rx_buf_smmu_mapping_unlock(soc); 3528 3529 dp_rx_buffer_pool_nbuf_free(soc, msdu, 3530 rx_desc->pool_id); 3531 rx_bufs_used[rx_desc->pool_id]++; 3532 dp_rx_add_to_free_desc_list(head, 3533 tail, rx_desc); 3534 } 3535 } 3536 3537 /* 3538 * Store the current link buffer into to the local structure 3539 * to be used for release purpose. 3540 */ 3541 hal_rxdma_buff_addr_info_set(soc->hal_soc, rx_link_buf_info, 3542 buf_info.paddr, buf_info.sw_cookie, 3543 buf_info.rbm); 3544 3545 hal_rx_mon_next_link_desc_get(soc->hal_soc, rx_msdu_link_desc, 3546 &buf_info); 3547 dp_rx_link_desc_return_by_addr(soc, (hal_buff_addrinfo_t) 3548 rx_link_buf_info, 3549 HAL_BM_ACTION_PUT_IN_IDLE_LIST); 3550 } while (buf_info.paddr); 3551 } 3552 3553 /* 3554 * 3555 * dp_handle_wbm_internal_error() - handles wbm_internal_error case 3556 * 3557 * @soc: core DP main context 3558 * @hal_desc: hal descriptor 3559 * @buf_type: indicates if the buffer is of type link disc or msdu 3560 * Return: None 3561 * 3562 * wbm_internal_error is seen in following scenarios : 3563 * 3564 * 1. Null pointers detected in WBM_RELEASE_RING descriptors 3565 * 2. Null pointers detected during delinking process 3566 * 3567 * Some null pointer cases: 3568 * 3569 * a. MSDU buffer pointer is NULL 3570 * b. Next_MSDU_Link_Desc pointer is NULL, with no last msdu flag 3571 * c. MSDU buffer pointer is NULL or Next_Link_Desc pointer is NULL 3572 */ 3573 void 3574 dp_handle_wbm_internal_error(struct dp_soc *soc, void *hal_desc, 3575 uint32_t buf_type) 3576 { 3577 struct hal_buf_info buf_info = {0}; 3578 struct dp_rx_desc *rx_desc = NULL; 3579 struct rx_desc_pool *rx_desc_pool; 3580 uint32_t rx_bufs_reaped[MAX_PDEV_CNT] = {0}; 3581 union dp_rx_desc_list_elem_t *head = NULL; 3582 union dp_rx_desc_list_elem_t *tail = NULL; 3583 uint8_t pool_id; 3584 uint8_t mac_id; 3585 3586 hal_rx_reo_buf_paddr_get(soc->hal_soc, hal_desc, &buf_info); 3587 3588 if (!buf_info.paddr) { 3589 DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_REO_NULL_BUFFER], 1); 3590 return; 3591 } 3592 3593 /* buffer_addr_info is the first element of ring_desc */ 3594 hal_rx_buf_cookie_rbm_get(soc->hal_soc, (uint32_t *)hal_desc, 3595 &buf_info); 3596 pool_id = DP_RX_DESC_COOKIE_POOL_ID_GET(buf_info.sw_cookie); 3597 3598 if (buf_type == HAL_WBM_RELEASE_RING_2_BUFFER_TYPE) { 3599 DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_REO_NULL_MSDU_BUFF], 1); 3600 rx_desc = soc->arch_ops.dp_rx_desc_cookie_2_va( 3601 soc, 3602 buf_info.sw_cookie); 3603 3604 if (rx_desc && rx_desc->nbuf) { 3605 rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id]; 3606 dp_ipa_rx_buf_smmu_mapping_lock(soc); 3607 dp_rx_nbuf_unmap_pool(soc, rx_desc_pool, 3608 rx_desc->nbuf); 3609 rx_desc->unmapped = 1; 3610 dp_ipa_rx_buf_smmu_mapping_unlock(soc); 3611 3612 dp_rx_buffer_pool_nbuf_free(soc, rx_desc->nbuf, 3613 rx_desc->pool_id); 3614 dp_rx_add_to_free_desc_list(&head, 3615 &tail, 3616 rx_desc); 3617 3618 rx_bufs_reaped[rx_desc->pool_id]++; 3619 } 3620 } else if (buf_type == HAL_WBM_RELEASE_RING_2_DESC_TYPE) { 3621 dp_wbm_int_err_mpdu_pop(soc, pool_id, hal_desc, 3622 &head, &tail, rx_bufs_reaped); 3623 } 3624 3625 for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) { 3626 struct rx_desc_pool *rx_desc_pool; 3627 struct dp_srng *dp_rxdma_srng; 3628 3629 if (!rx_bufs_reaped[mac_id]) 3630 continue; 3631 3632 DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_REO_BUFF_REAPED], 1); 3633 dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_id]; 3634 rx_desc_pool = &soc->rx_desc_buf[mac_id]; 3635 3636 dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng, 3637 rx_desc_pool, 3638 rx_bufs_reaped[mac_id], 3639 &head, &tail, false); 3640 } 3641 } 3642 3643 #endif /* QCA_HOST_MODE_WIFI_DISABLED */ 3644