1 /* 2 * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for 5 * any purpose with or without fee is hereby granted, provided that the 6 * above copyright notice and this permission notice appear in all 7 * copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 16 * PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 #include "hal_hw_headers.h" 20 #include "dp_types.h" 21 #include "dp_rx.h" 22 #include "dp_tx.h" 23 #include "dp_peer.h" 24 #include "dp_internal.h" 25 #include "hal_api.h" 26 #include "qdf_trace.h" 27 #include "qdf_nbuf.h" 28 #include "dp_rx_defrag.h" 29 #include "dp_ipa.h" 30 #ifdef FEATURE_WDS 31 #include "dp_txrx_wds.h" 32 #endif 33 #include <enet.h> /* LLC_SNAP_HDR_LEN */ 34 #include "qdf_net_types.h" 35 #include "dp_rx_buffer_pool.h" 36 37 #define dp_rx_err_alert(params...) QDF_TRACE_FATAL(QDF_MODULE_ID_DP_RX_ERROR, params) 38 #define dp_rx_err_err(params...) QDF_TRACE_ERROR(QDF_MODULE_ID_DP_RX_ERROR, params) 39 #define dp_rx_err_warn(params...) QDF_TRACE_WARN(QDF_MODULE_ID_DP_RX_ERROR, params) 40 #define dp_rx_err_info(params...) \ 41 __QDF_TRACE_FL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_RX_ERROR, ## params) 42 #define dp_rx_err_info_rl(params...) \ 43 __QDF_TRACE_RL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_RX_ERROR, ## params) 44 #define dp_rx_err_debug(params...) QDF_TRACE_DEBUG(QDF_MODULE_ID_DP_RX_ERROR, params) 45 46 #ifndef QCA_HOST_MODE_WIFI_DISABLED 47 48 /* Max buffer in invalid peer SG list*/ 49 #define DP_MAX_INVALID_BUFFERS 10 50 51 /* Max regular Rx packet routing error */ 52 #define DP_MAX_REG_RX_ROUTING_ERRS_THRESHOLD 20 53 #define DP_MAX_REG_RX_ROUTING_ERRS_IN_TIMEOUT 10 54 #define DP_RX_ERR_ROUTE_TIMEOUT_US (5 * 1000 * 1000) /* micro seconds */ 55 56 #ifdef FEATURE_MEC 57 bool dp_rx_mcast_echo_check(struct dp_soc *soc, 58 struct dp_peer *peer, 59 uint8_t *rx_tlv_hdr, 60 qdf_nbuf_t nbuf) 61 { 62 struct dp_vdev *vdev = peer->vdev; 63 struct dp_pdev *pdev = vdev->pdev; 64 struct dp_mec_entry *mecentry = NULL; 65 struct dp_ast_entry *ase = NULL; 66 uint16_t sa_idx = 0; 67 uint8_t *data; 68 /* 69 * Multicast Echo Check is required only if vdev is STA and 70 * received pkt is a multicast/broadcast pkt. otherwise 71 * skip the MEC check. 72 */ 73 if (vdev->opmode != wlan_op_mode_sta) 74 return false; 75 if (!hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc, rx_tlv_hdr)) 76 return false; 77 78 data = qdf_nbuf_data(nbuf); 79 80 /* 81 * if the received pkts src mac addr matches with vdev 82 * mac address then drop the pkt as it is looped back 83 */ 84 if (!(qdf_mem_cmp(&data[QDF_MAC_ADDR_SIZE], 85 vdev->mac_addr.raw, 86 QDF_MAC_ADDR_SIZE))) 87 return true; 88 89 /* 90 * In case of qwrap isolation mode, donot drop loopback packets. 91 * In isolation mode, all packets from the wired stations need to go 92 * to rootap and loop back to reach the wireless stations and 93 * vice-versa. 94 */ 95 if (qdf_unlikely(vdev->isolation_vdev)) 96 return false; 97 98 /* 99 * if the received pkts src mac addr matches with the 100 * wired PCs MAC addr which is behind the STA or with 101 * wireless STAs MAC addr which are behind the Repeater, 102 * then drop the pkt as it is looped back 103 */ 104 if (hal_rx_msdu_end_sa_is_valid_get(soc->hal_soc, rx_tlv_hdr)) { 105 sa_idx = hal_rx_msdu_end_sa_idx_get(soc->hal_soc, rx_tlv_hdr); 106 107 if ((sa_idx < 0) || 108 (sa_idx >= wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx))) { 109 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 110 "invalid sa_idx: %d", sa_idx); 111 qdf_assert_always(0); 112 } 113 114 qdf_spin_lock_bh(&soc->ast_lock); 115 ase = soc->ast_table[sa_idx]; 116 117 /* 118 * this check was not needed since MEC is not dependent on AST, 119 * but if we dont have this check SON has some issues in 120 * dual backhaul scenario. in APS SON mode, client connected 121 * to RE 2G and sends multicast packets. the RE sends it to CAP 122 * over 5G backhaul. the CAP loopback it on 2G to RE. 123 * On receiving in 2G STA vap, we assume that client has roamed 124 * and kickout the client. 125 */ 126 if (ase && (ase->peer_id != peer->peer_id)) { 127 qdf_spin_unlock_bh(&soc->ast_lock); 128 goto drop; 129 } 130 131 qdf_spin_unlock_bh(&soc->ast_lock); 132 } 133 134 qdf_spin_lock_bh(&soc->mec_lock); 135 136 mecentry = dp_peer_mec_hash_find_by_pdevid(soc, pdev->pdev_id, 137 &data[QDF_MAC_ADDR_SIZE]); 138 if (!mecentry) { 139 qdf_spin_unlock_bh(&soc->mec_lock); 140 return false; 141 } 142 143 qdf_spin_unlock_bh(&soc->mec_lock); 144 145 drop: 146 dp_rx_err_info("%pK: received pkt with same src mac " QDF_MAC_ADDR_FMT, 147 soc, QDF_MAC_ADDR_REF(&data[QDF_MAC_ADDR_SIZE])); 148 149 return true; 150 } 151 #endif 152 #endif /* QCA_HOST_MODE_WIFI_DISABLED */ 153 154 void dp_rx_link_desc_refill_duplicate_check( 155 struct dp_soc *soc, 156 struct hal_buf_info *buf_info, 157 hal_buff_addrinfo_t ring_buf_info) 158 { 159 struct hal_buf_info current_link_desc_buf_info = { 0 }; 160 161 /* do duplicate link desc address check */ 162 hal_rx_buffer_addr_info_get_paddr(ring_buf_info, 163 ¤t_link_desc_buf_info); 164 165 /* 166 * TODO - Check if the hal soc api call can be removed 167 * since the cookie is just used for print. 168 * buffer_addr_info is the first element of ring_desc 169 */ 170 hal_rx_buf_cookie_rbm_get(soc->hal_soc, 171 (uint32_t *)ring_buf_info, 172 ¤t_link_desc_buf_info); 173 174 if (qdf_unlikely(current_link_desc_buf_info.paddr == 175 buf_info->paddr)) { 176 dp_info_rl("duplicate link desc addr: %llu, cookie: 0x%x", 177 current_link_desc_buf_info.paddr, 178 current_link_desc_buf_info.sw_cookie); 179 DP_STATS_INC(soc, rx.err.dup_refill_link_desc, 1); 180 } 181 *buf_info = current_link_desc_buf_info; 182 } 183 184 /** 185 * dp_rx_link_desc_return_by_addr - Return a MPDU link descriptor to 186 * (WBM) by address 187 * 188 * @soc: core DP main context 189 * @link_desc_addr: link descriptor addr 190 * 191 * Return: QDF_STATUS 192 */ 193 QDF_STATUS 194 dp_rx_link_desc_return_by_addr(struct dp_soc *soc, 195 hal_buff_addrinfo_t link_desc_addr, 196 uint8_t bm_action) 197 { 198 struct dp_srng *wbm_desc_rel_ring = &soc->wbm_desc_rel_ring; 199 hal_ring_handle_t wbm_rel_srng = wbm_desc_rel_ring->hal_srng; 200 hal_soc_handle_t hal_soc = soc->hal_soc; 201 QDF_STATUS status = QDF_STATUS_E_FAILURE; 202 void *src_srng_desc; 203 204 if (!wbm_rel_srng) { 205 dp_rx_err_err("%pK: WBM RELEASE RING not initialized", soc); 206 return status; 207 } 208 209 /* do duplicate link desc address check */ 210 dp_rx_link_desc_refill_duplicate_check( 211 soc, 212 &soc->last_op_info.wbm_rel_link_desc, 213 link_desc_addr); 214 215 if (qdf_unlikely(hal_srng_access_start(hal_soc, wbm_rel_srng))) { 216 217 /* TODO */ 218 /* 219 * Need API to convert from hal_ring pointer to 220 * Ring Type / Ring Id combo 221 */ 222 dp_rx_err_err("%pK: HAL RING Access For WBM Release SRNG Failed - %pK", 223 soc, wbm_rel_srng); 224 DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1); 225 goto done; 226 } 227 src_srng_desc = hal_srng_src_get_next(hal_soc, wbm_rel_srng); 228 if (qdf_likely(src_srng_desc)) { 229 /* Return link descriptor through WBM ring (SW2WBM)*/ 230 hal_rx_msdu_link_desc_set(hal_soc, 231 src_srng_desc, link_desc_addr, bm_action); 232 status = QDF_STATUS_SUCCESS; 233 } else { 234 struct hal_srng *srng = (struct hal_srng *)wbm_rel_srng; 235 236 DP_STATS_INC(soc, rx.err.hal_ring_access_full_fail, 1); 237 238 dp_info_rl("WBM Release Ring (Id %d) Full(Fail CNT %u)", 239 srng->ring_id, 240 soc->stats.rx.err.hal_ring_access_full_fail); 241 dp_info_rl("HP 0x%x Reap HP 0x%x TP 0x%x Cached TP 0x%x", 242 *srng->u.src_ring.hp_addr, 243 srng->u.src_ring.reap_hp, 244 *srng->u.src_ring.tp_addr, 245 srng->u.src_ring.cached_tp); 246 QDF_BUG(0); 247 } 248 done: 249 hal_srng_access_end(hal_soc, wbm_rel_srng); 250 return status; 251 252 } 253 254 /** 255 * dp_rx_link_desc_return() - Return a MPDU link descriptor to HW 256 * (WBM), following error handling 257 * 258 * @soc: core DP main context 259 * @ring_desc: opaque pointer to the REO error ring descriptor 260 * 261 * Return: QDF_STATUS 262 */ 263 QDF_STATUS 264 dp_rx_link_desc_return(struct dp_soc *soc, hal_ring_desc_t ring_desc, 265 uint8_t bm_action) 266 { 267 void *buf_addr_info = HAL_RX_REO_BUF_ADDR_INFO_GET(ring_desc); 268 269 return dp_rx_link_desc_return_by_addr(soc, buf_addr_info, bm_action); 270 } 271 272 #ifndef QCA_HOST_MODE_WIFI_DISABLED 273 274 /** 275 * dp_rx_msdus_drop() - Drops all MSDU's per MPDU 276 * 277 * @soc: core txrx main context 278 * @ring_desc: opaque pointer to the REO error ring descriptor 279 * @mpdu_desc_info: MPDU descriptor information from ring descriptor 280 * @head: head of the local descriptor free-list 281 * @tail: tail of the local descriptor free-list 282 * @quota: No. of units (packets) that can be serviced in one shot. 283 * 284 * This function is used to drop all MSDU in an MPDU 285 * 286 * Return: uint32_t: No. of elements processed 287 */ 288 static uint32_t 289 dp_rx_msdus_drop(struct dp_soc *soc, hal_ring_desc_t ring_desc, 290 struct hal_rx_mpdu_desc_info *mpdu_desc_info, 291 uint8_t *mac_id, 292 uint32_t quota) 293 { 294 uint32_t rx_bufs_used = 0; 295 void *link_desc_va; 296 struct hal_buf_info buf_info; 297 struct dp_pdev *pdev; 298 struct hal_rx_msdu_list msdu_list; /* MSDU's per MPDU */ 299 int i; 300 uint8_t *rx_tlv_hdr; 301 uint32_t tid; 302 struct rx_desc_pool *rx_desc_pool; 303 struct dp_rx_desc *rx_desc; 304 /* First field in REO Dst ring Desc is buffer_addr_info */ 305 void *buf_addr_info = ring_desc; 306 struct buffer_addr_info cur_link_desc_addr_info = { 0 }; 307 struct buffer_addr_info next_link_desc_addr_info = { 0 }; 308 309 hal_rx_reo_buf_paddr_get(soc->hal_soc, ring_desc, &buf_info); 310 311 /* buffer_addr_info is the first element of ring_desc */ 312 hal_rx_buf_cookie_rbm_get(soc->hal_soc, 313 (uint32_t *)ring_desc, 314 &buf_info); 315 316 link_desc_va = dp_rx_cookie_2_link_desc_va(soc, &buf_info); 317 318 more_msdu_link_desc: 319 /* No UNMAP required -- this is "malloc_consistent" memory */ 320 hal_rx_msdu_list_get(soc->hal_soc, link_desc_va, &msdu_list, 321 &mpdu_desc_info->msdu_count); 322 323 for (i = 0; (i < mpdu_desc_info->msdu_count); i++) { 324 rx_desc = dp_rx_cookie_2_va_rxdma_buf(soc, 325 msdu_list.sw_cookie[i]); 326 327 qdf_assert_always(rx_desc); 328 329 /* all buffers from a MSDU link link belong to same pdev */ 330 *mac_id = rx_desc->pool_id; 331 pdev = dp_get_pdev_for_lmac_id(soc, rx_desc->pool_id); 332 if (!pdev) { 333 dp_rx_err_debug("%pK: pdev is null for pool_id = %d", 334 soc, rx_desc->pool_id); 335 return rx_bufs_used; 336 } 337 338 if (!dp_rx_desc_check_magic(rx_desc)) { 339 dp_rx_err_err("%pK: Invalid rx_desc cookie=%d", 340 soc, msdu_list.sw_cookie[i]); 341 return rx_bufs_used; 342 } 343 344 rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id]; 345 dp_ipa_rx_buf_smmu_mapping_lock(soc); 346 dp_ipa_handle_rx_buf_smmu_mapping(soc, rx_desc->nbuf, 347 rx_desc_pool->buf_size, 348 false); 349 qdf_nbuf_unmap_nbytes_single(soc->osdev, rx_desc->nbuf, 350 QDF_DMA_FROM_DEVICE, 351 rx_desc_pool->buf_size); 352 rx_desc->unmapped = 1; 353 dp_ipa_rx_buf_smmu_mapping_unlock(soc); 354 355 rx_desc->rx_buf_start = qdf_nbuf_data(rx_desc->nbuf); 356 357 rx_bufs_used++; 358 tid = hal_rx_mpdu_start_tid_get(soc->hal_soc, 359 rx_desc->rx_buf_start); 360 dp_rx_err_err("%pK: Packet received with PN error for tid :%d", 361 soc, tid); 362 363 rx_tlv_hdr = qdf_nbuf_data(rx_desc->nbuf); 364 if (hal_rx_encryption_info_valid(soc->hal_soc, rx_tlv_hdr)) 365 hal_rx_print_pn(soc->hal_soc, rx_tlv_hdr); 366 367 /* Just free the buffers */ 368 dp_rx_buffer_pool_nbuf_free(soc, rx_desc->nbuf, *mac_id); 369 370 dp_rx_add_to_free_desc_list(&pdev->free_list_head, 371 &pdev->free_list_tail, rx_desc); 372 } 373 374 /* 375 * If the msdu's are spread across multiple link-descriptors, 376 * we cannot depend solely on the msdu_count(e.g., if msdu is 377 * spread across multiple buffers).Hence, it is 378 * necessary to check the next link_descriptor and release 379 * all the msdu's that are part of it. 380 */ 381 hal_rx_get_next_msdu_link_desc_buf_addr_info( 382 link_desc_va, 383 &next_link_desc_addr_info); 384 385 if (hal_rx_is_buf_addr_info_valid( 386 &next_link_desc_addr_info)) { 387 /* Clear the next link desc info for the current link_desc */ 388 hal_rx_clear_next_msdu_link_desc_buf_addr_info(link_desc_va); 389 390 dp_rx_link_desc_return_by_addr(soc, buf_addr_info, 391 HAL_BM_ACTION_PUT_IN_IDLE_LIST); 392 hal_rx_buffer_addr_info_get_paddr( 393 &next_link_desc_addr_info, 394 &buf_info); 395 /* buffer_addr_info is the first element of ring_desc */ 396 hal_rx_buf_cookie_rbm_get(soc->hal_soc, 397 (uint32_t *)&next_link_desc_addr_info, 398 &buf_info); 399 cur_link_desc_addr_info = next_link_desc_addr_info; 400 buf_addr_info = &cur_link_desc_addr_info; 401 402 link_desc_va = 403 dp_rx_cookie_2_link_desc_va(soc, &buf_info); 404 405 goto more_msdu_link_desc; 406 } 407 quota--; 408 dp_rx_link_desc_return_by_addr(soc, buf_addr_info, 409 HAL_BM_ACTION_PUT_IN_IDLE_LIST); 410 return rx_bufs_used; 411 } 412 413 /** 414 * dp_rx_pn_error_handle() - Handles PN check errors 415 * 416 * @soc: core txrx main context 417 * @ring_desc: opaque pointer to the REO error ring descriptor 418 * @mpdu_desc_info: MPDU descriptor information from ring descriptor 419 * @head: head of the local descriptor free-list 420 * @tail: tail of the local descriptor free-list 421 * @quota: No. of units (packets) that can be serviced in one shot. 422 * 423 * This function implements PN error handling 424 * If the peer is configured to ignore the PN check errors 425 * or if DP feels, that this frame is still OK, the frame can be 426 * re-injected back to REO to use some of the other features 427 * of REO e.g. duplicate detection/routing to other cores 428 * 429 * Return: uint32_t: No. of elements processed 430 */ 431 static uint32_t 432 dp_rx_pn_error_handle(struct dp_soc *soc, hal_ring_desc_t ring_desc, 433 struct hal_rx_mpdu_desc_info *mpdu_desc_info, 434 uint8_t *mac_id, 435 uint32_t quota) 436 { 437 uint16_t peer_id; 438 uint32_t rx_bufs_used = 0; 439 struct dp_peer *peer; 440 bool peer_pn_policy = false; 441 442 peer_id = DP_PEER_METADATA_PEER_ID_GET( 443 mpdu_desc_info->peer_meta_data); 444 445 446 peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_RX_ERR); 447 448 if (qdf_likely(peer)) { 449 /* 450 * TODO: Check for peer specific policies & set peer_pn_policy 451 */ 452 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 453 "discard rx due to PN error for peer %pK "QDF_MAC_ADDR_FMT, 454 peer, QDF_MAC_ADDR_REF(peer->mac_addr.raw)); 455 456 dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR); 457 } 458 dp_rx_err_err("%pK: Packet received with PN error", soc); 459 460 /* No peer PN policy -- definitely drop */ 461 if (!peer_pn_policy) 462 rx_bufs_used = dp_rx_msdus_drop(soc, ring_desc, 463 mpdu_desc_info, 464 mac_id, quota); 465 466 return rx_bufs_used; 467 } 468 469 /** 470 * dp_rx_oor_handle() - Handles the msdu which is OOR error 471 * 472 * @soc: core txrx main context 473 * @nbuf: pointer to msdu skb 474 * @peer_id: dp peer ID 475 * @rx_tlv_hdr: start of rx tlv header 476 * 477 * This function process the msdu delivered from REO2TCL 478 * ring with error type OOR 479 * 480 * Return: None 481 */ 482 static void 483 dp_rx_oor_handle(struct dp_soc *soc, 484 qdf_nbuf_t nbuf, 485 uint16_t peer_id, 486 uint8_t *rx_tlv_hdr) 487 { 488 uint32_t frame_mask = FRAME_MASK_IPV4_ARP | FRAME_MASK_IPV4_DHCP | 489 FRAME_MASK_IPV4_EAPOL | FRAME_MASK_IPV6_DHCP; 490 struct dp_peer *peer = NULL; 491 492 peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_RX_ERR); 493 if (!peer) { 494 dp_info_rl("peer not found"); 495 goto free_nbuf; 496 } 497 498 if (dp_rx_deliver_special_frame(soc, peer, nbuf, frame_mask, 499 rx_tlv_hdr)) { 500 DP_STATS_INC(soc, rx.err.reo_err_oor_to_stack, 1); 501 dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR); 502 return; 503 } 504 505 free_nbuf: 506 if (peer) 507 dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR); 508 509 DP_STATS_INC(soc, rx.err.reo_err_oor_drop, 1); 510 qdf_nbuf_free(nbuf); 511 } 512 513 /** 514 * dp_rx_reo_err_entry_process() - Handles for REO error entry processing 515 * 516 * @soc: core txrx main context 517 * @ring_desc: opaque pointer to the REO error ring descriptor 518 * @mpdu_desc_info: pointer to mpdu level description info 519 * @link_desc_va: pointer to msdu_link_desc virtual address 520 * @err_code: reo erro code fetched from ring entry 521 * 522 * Function to handle msdus fetched from msdu link desc, currently 523 * only support 2K jump, OOR error. 524 * 525 * Return: msdu count processed. 526 */ 527 static uint32_t 528 dp_rx_reo_err_entry_process(struct dp_soc *soc, 529 void *ring_desc, 530 struct hal_rx_mpdu_desc_info *mpdu_desc_info, 531 void *link_desc_va, 532 enum hal_reo_error_code err_code) 533 { 534 uint32_t rx_bufs_used = 0; 535 struct dp_pdev *pdev; 536 int i; 537 uint8_t *rx_tlv_hdr_first; 538 uint8_t *rx_tlv_hdr_last; 539 uint32_t tid = DP_MAX_TIDS; 540 uint16_t peer_id; 541 struct dp_rx_desc *rx_desc; 542 struct rx_desc_pool *rx_desc_pool; 543 qdf_nbuf_t nbuf; 544 struct hal_buf_info buf_info; 545 struct hal_rx_msdu_list msdu_list; 546 uint16_t num_msdus; 547 struct buffer_addr_info cur_link_desc_addr_info = { 0 }; 548 struct buffer_addr_info next_link_desc_addr_info = { 0 }; 549 /* First field in REO Dst ring Desc is buffer_addr_info */ 550 void *buf_addr_info = ring_desc; 551 qdf_nbuf_t head_nbuf = NULL; 552 qdf_nbuf_t tail_nbuf = NULL; 553 uint16_t msdu_processed = 0; 554 bool ret; 555 556 peer_id = DP_PEER_METADATA_PEER_ID_GET( 557 mpdu_desc_info->peer_meta_data); 558 559 more_msdu_link_desc: 560 hal_rx_msdu_list_get(soc->hal_soc, link_desc_va, &msdu_list, 561 &num_msdus); 562 for (i = 0; i < num_msdus; i++) { 563 rx_desc = dp_rx_cookie_2_va_rxdma_buf( 564 soc, 565 msdu_list.sw_cookie[i]); 566 567 qdf_assert_always(rx_desc); 568 569 /* all buffers from a MSDU link belong to same pdev */ 570 pdev = dp_get_pdev_for_lmac_id(soc, rx_desc->pool_id); 571 572 nbuf = rx_desc->nbuf; 573 ret = dp_rx_desc_paddr_sanity_check(rx_desc, 574 msdu_list.paddr[i]); 575 if (!ret) { 576 DP_STATS_INC(soc, rx.err.nbuf_sanity_fail, 1); 577 rx_desc->in_err_state = 1; 578 continue; 579 } 580 581 rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id]; 582 dp_ipa_rx_buf_smmu_mapping_lock(soc); 583 dp_ipa_handle_rx_buf_smmu_mapping(soc, nbuf, 584 rx_desc_pool->buf_size, 585 false); 586 qdf_nbuf_unmap_nbytes_single(soc->osdev, nbuf, 587 QDF_DMA_FROM_DEVICE, 588 rx_desc_pool->buf_size); 589 rx_desc->unmapped = 1; 590 dp_ipa_rx_buf_smmu_mapping_unlock(soc); 591 592 QDF_NBUF_CB_RX_PKT_LEN(nbuf) = msdu_list.msdu_info[i].msdu_len; 593 rx_bufs_used++; 594 dp_rx_add_to_free_desc_list(&pdev->free_list_head, 595 &pdev->free_list_tail, rx_desc); 596 597 DP_RX_LIST_APPEND(head_nbuf, tail_nbuf, nbuf); 598 599 if (qdf_unlikely(msdu_list.msdu_info[i].msdu_flags & 600 HAL_MSDU_F_MSDU_CONTINUATION)) 601 continue; 602 603 if (dp_rx_buffer_pool_refill(soc, head_nbuf, 604 rx_desc->pool_id)) { 605 /* MSDU queued back to the pool */ 606 goto process_next_msdu; 607 } 608 609 rx_tlv_hdr_first = qdf_nbuf_data(head_nbuf); 610 rx_tlv_hdr_last = qdf_nbuf_data(tail_nbuf); 611 612 if (qdf_unlikely(head_nbuf != tail_nbuf)) { 613 nbuf = dp_rx_sg_create(soc, head_nbuf); 614 qdf_nbuf_set_is_frag(nbuf, 1); 615 DP_STATS_INC(soc, rx.err.reo_err_oor_sg_count, 1); 616 } 617 618 switch (err_code) { 619 case HAL_REO_ERR_REGULAR_FRAME_2K_JUMP: 620 /* 621 * only first msdu, mpdu start description tlv valid? 622 * and use it for following msdu. 623 */ 624 if (hal_rx_msdu_end_first_msdu_get(soc->hal_soc, 625 rx_tlv_hdr_last)) 626 tid = hal_rx_mpdu_start_tid_get( 627 soc->hal_soc, 628 rx_tlv_hdr_first); 629 630 dp_2k_jump_handle(soc, nbuf, rx_tlv_hdr_last, 631 peer_id, tid); 632 break; 633 634 case HAL_REO_ERR_REGULAR_FRAME_OOR: 635 dp_rx_oor_handle(soc, nbuf, peer_id, rx_tlv_hdr_last); 636 break; 637 default: 638 dp_err_rl("Non-support error code %d", err_code); 639 qdf_nbuf_free(nbuf); 640 } 641 642 process_next_msdu: 643 msdu_processed++; 644 head_nbuf = NULL; 645 tail_nbuf = NULL; 646 } 647 648 /* 649 * If the msdu's are spread across multiple link-descriptors, 650 * we cannot depend solely on the msdu_count(e.g., if msdu is 651 * spread across multiple buffers).Hence, it is 652 * necessary to check the next link_descriptor and release 653 * all the msdu's that are part of it. 654 */ 655 hal_rx_get_next_msdu_link_desc_buf_addr_info( 656 link_desc_va, 657 &next_link_desc_addr_info); 658 659 if (hal_rx_is_buf_addr_info_valid( 660 &next_link_desc_addr_info)) { 661 /* Clear the next link desc info for the current link_desc */ 662 hal_rx_clear_next_msdu_link_desc_buf_addr_info(link_desc_va); 663 dp_rx_link_desc_return_by_addr( 664 soc, 665 buf_addr_info, 666 HAL_BM_ACTION_PUT_IN_IDLE_LIST); 667 668 hal_rx_buffer_addr_info_get_paddr( 669 &next_link_desc_addr_info, 670 &buf_info); 671 /* buffer_addr_info is the first element of ring_desc */ 672 hal_rx_buf_cookie_rbm_get(soc->hal_soc, 673 (uint32_t *)&next_link_desc_addr_info, 674 &buf_info); 675 link_desc_va = 676 dp_rx_cookie_2_link_desc_va(soc, &buf_info); 677 cur_link_desc_addr_info = next_link_desc_addr_info; 678 buf_addr_info = &cur_link_desc_addr_info; 679 680 goto more_msdu_link_desc; 681 } 682 683 dp_rx_link_desc_return_by_addr(soc, buf_addr_info, 684 HAL_BM_ACTION_PUT_IN_IDLE_LIST); 685 if (qdf_unlikely(msdu_processed != mpdu_desc_info->msdu_count)) 686 DP_STATS_INC(soc, rx.err.msdu_count_mismatch, 1); 687 688 return rx_bufs_used; 689 } 690 691 #ifdef DP_INVALID_PEER_ASSERT 692 #define DP_PDEV_INVALID_PEER_MSDU_CHECK(head, tail) \ 693 do { \ 694 qdf_assert_always(!(head)); \ 695 qdf_assert_always(!(tail)); \ 696 } while (0) 697 #else 698 #define DP_PDEV_INVALID_PEER_MSDU_CHECK(head, tail) /* no op */ 699 #endif 700 701 /** 702 * dp_rx_chain_msdus() - Function to chain all msdus of a mpdu 703 * to pdev invalid peer list 704 * 705 * @soc: core DP main context 706 * @nbuf: Buffer pointer 707 * @rx_tlv_hdr: start of rx tlv header 708 * @mac_id: mac id 709 * 710 * Return: bool: true for last msdu of mpdu 711 */ 712 static bool 713 dp_rx_chain_msdus(struct dp_soc *soc, qdf_nbuf_t nbuf, 714 uint8_t *rx_tlv_hdr, uint8_t mac_id) 715 { 716 bool mpdu_done = false; 717 qdf_nbuf_t curr_nbuf = NULL; 718 qdf_nbuf_t tmp_nbuf = NULL; 719 720 /* TODO: Currently only single radio is supported, hence 721 * pdev hard coded to '0' index 722 */ 723 struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id); 724 725 if (!dp_pdev) { 726 dp_rx_err_debug("%pK: pdev is null for mac_id = %d", soc, mac_id); 727 return mpdu_done; 728 } 729 /* if invalid peer SG list has max values free the buffers in list 730 * and treat current buffer as start of list 731 * 732 * current logic to detect the last buffer from attn_tlv is not reliable 733 * in OFDMA UL scenario hence add max buffers check to avoid list pile 734 * up 735 */ 736 if (!dp_pdev->first_nbuf || 737 (dp_pdev->invalid_peer_head_msdu && 738 QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST 739 (dp_pdev->invalid_peer_head_msdu) >= DP_MAX_INVALID_BUFFERS)) { 740 qdf_nbuf_set_rx_chfrag_start(nbuf, 1); 741 dp_pdev->ppdu_id = hal_rx_get_ppdu_id(soc->hal_soc, 742 rx_tlv_hdr); 743 dp_pdev->first_nbuf = true; 744 745 /* If the new nbuf received is the first msdu of the 746 * amsdu and there are msdus in the invalid peer msdu 747 * list, then let us free all the msdus of the invalid 748 * peer msdu list. 749 * This scenario can happen when we start receiving 750 * new a-msdu even before the previous a-msdu is completely 751 * received. 752 */ 753 curr_nbuf = dp_pdev->invalid_peer_head_msdu; 754 while (curr_nbuf) { 755 tmp_nbuf = curr_nbuf->next; 756 qdf_nbuf_free(curr_nbuf); 757 curr_nbuf = tmp_nbuf; 758 } 759 760 dp_pdev->invalid_peer_head_msdu = NULL; 761 dp_pdev->invalid_peer_tail_msdu = NULL; 762 hal_rx_mon_hw_desc_get_mpdu_status(soc->hal_soc, rx_tlv_hdr, 763 &(dp_pdev->ppdu_info.rx_status)); 764 765 } 766 767 if (dp_pdev->ppdu_id == hal_rx_attn_phy_ppdu_id_get(soc->hal_soc, 768 rx_tlv_hdr) && 769 hal_rx_attn_msdu_done_get(soc->hal_soc, rx_tlv_hdr)) { 770 qdf_nbuf_set_rx_chfrag_end(nbuf, 1); 771 qdf_assert_always(dp_pdev->first_nbuf == true); 772 dp_pdev->first_nbuf = false; 773 mpdu_done = true; 774 } 775 776 /* 777 * For MCL, invalid_peer_head_msdu and invalid_peer_tail_msdu 778 * should be NULL here, add the checking for debugging purpose 779 * in case some corner case. 780 */ 781 DP_PDEV_INVALID_PEER_MSDU_CHECK(dp_pdev->invalid_peer_head_msdu, 782 dp_pdev->invalid_peer_tail_msdu); 783 DP_RX_LIST_APPEND(dp_pdev->invalid_peer_head_msdu, 784 dp_pdev->invalid_peer_tail_msdu, 785 nbuf); 786 787 return mpdu_done; 788 } 789 790 static 791 void dp_rx_err_handle_bar(struct dp_soc *soc, 792 struct dp_peer *peer, 793 qdf_nbuf_t nbuf) 794 { 795 uint8_t *rx_tlv_hdr; 796 unsigned char type, subtype; 797 uint16_t start_seq_num; 798 uint32_t tid; 799 QDF_STATUS status; 800 struct ieee80211_frame_bar *bar; 801 802 /* 803 * 1. Is this a BAR frame. If not Discard it. 804 * 2. If it is, get the peer id, tid, ssn 805 * 2a Do a tid update 806 */ 807 808 rx_tlv_hdr = qdf_nbuf_data(nbuf); 809 bar = (struct ieee80211_frame_bar *)(rx_tlv_hdr + soc->rx_pkt_tlv_size); 810 811 type = bar->i_fc[0] & IEEE80211_FC0_TYPE_MASK; 812 subtype = bar->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 813 814 if (!(type == IEEE80211_FC0_TYPE_CTL && 815 subtype == QDF_IEEE80211_FC0_SUBTYPE_BAR)) { 816 dp_err_rl("Not a BAR frame!"); 817 return; 818 } 819 820 tid = hal_rx_mpdu_start_tid_get(soc->hal_soc, rx_tlv_hdr); 821 qdf_assert_always(tid < DP_MAX_TIDS); 822 823 start_seq_num = le16toh(bar->i_seq) >> IEEE80211_SEQ_SEQ_SHIFT; 824 825 dp_info_rl("tid %u window_size %u start_seq_num %u", 826 tid, peer->rx_tid[tid].ba_win_size, start_seq_num); 827 828 status = dp_rx_tid_update_wifi3(peer, tid, 829 peer->rx_tid[tid].ba_win_size, 830 start_seq_num); 831 if (status != QDF_STATUS_SUCCESS) { 832 dp_err_rl("failed to handle bar frame update rx tid"); 833 DP_STATS_INC(soc, rx.err.bar_handle_fail_count, 1); 834 } else { 835 DP_STATS_INC(soc, rx.err.ssn_update_count, 1); 836 } 837 } 838 839 /** 840 * dp_rx_bar_frame_handle() - Function to handle err BAR frames 841 * @soc: core DP main context 842 * @ring_desc: Hal ring desc 843 * @rx_desc: dp rx desc 844 * @mpdu_desc_info: mpdu desc info 845 * 846 * Handle the error BAR frames received. Ensure the SOC level 847 * stats are updated based on the REO error code. The BAR frames 848 * are further processed by updating the Rx tids with the start 849 * sequence number (SSN) and BA window size. Desc is returned 850 * to the free desc list 851 * 852 * Return: none 853 */ 854 static void 855 dp_rx_bar_frame_handle(struct dp_soc *soc, 856 hal_ring_desc_t ring_desc, 857 struct dp_rx_desc *rx_desc, 858 struct hal_rx_mpdu_desc_info *mpdu_desc_info) 859 { 860 qdf_nbuf_t nbuf; 861 struct dp_pdev *pdev; 862 struct dp_peer *peer; 863 struct rx_desc_pool *rx_desc_pool; 864 uint16_t peer_id; 865 uint8_t *rx_tlv_hdr; 866 uint32_t tid; 867 uint8_t reo_err_code; 868 869 nbuf = rx_desc->nbuf; 870 rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id]; 871 dp_ipa_rx_buf_smmu_mapping_lock(soc); 872 dp_ipa_handle_rx_buf_smmu_mapping(soc, nbuf, 873 rx_desc_pool->buf_size, 874 false); 875 qdf_nbuf_unmap_nbytes_single(soc->osdev, nbuf, 876 QDF_DMA_FROM_DEVICE, 877 rx_desc_pool->buf_size); 878 rx_desc->unmapped = 1; 879 dp_ipa_rx_buf_smmu_mapping_unlock(soc); 880 rx_tlv_hdr = qdf_nbuf_data(nbuf); 881 peer_id = 882 hal_rx_mpdu_start_sw_peer_id_get(soc->hal_soc, 883 rx_tlv_hdr); 884 peer = dp_peer_get_ref_by_id(soc, peer_id, 885 DP_MOD_ID_RX_ERR); 886 tid = hal_rx_mpdu_start_tid_get(soc->hal_soc, 887 rx_tlv_hdr); 888 pdev = dp_get_pdev_for_lmac_id(soc, rx_desc->pool_id); 889 890 if (!peer) 891 goto next; 892 893 reo_err_code = hal_rx_get_reo_error_code(soc->hal_soc, ring_desc); 894 dp_info("BAR frame: peer = "QDF_MAC_ADDR_FMT 895 " peer_id = %d" 896 " tid = %u" 897 " SSN = %d" 898 " error code = %d", 899 QDF_MAC_ADDR_REF(peer->mac_addr.raw), 900 peer->peer_id, 901 tid, 902 mpdu_desc_info->mpdu_seq, 903 reo_err_code); 904 905 switch (reo_err_code) { 906 case HAL_REO_ERR_BAR_FRAME_2K_JUMP: 907 /* fallthrough */ 908 case HAL_REO_ERR_BAR_FRAME_OOR: 909 dp_rx_err_handle_bar(soc, peer, nbuf); 910 DP_STATS_INC(soc, 911 rx.err.reo_error[reo_err_code], 1); 912 break; 913 default: 914 DP_STATS_INC(soc, rx.bar_frame, 1); 915 } 916 917 dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR); 918 next: 919 dp_rx_link_desc_return(soc, ring_desc, 920 HAL_BM_ACTION_PUT_IN_IDLE_LIST); 921 dp_rx_buffer_pool_nbuf_free(soc, rx_desc->nbuf, 922 rx_desc->pool_id); 923 dp_rx_add_to_free_desc_list(&pdev->free_list_head, 924 &pdev->free_list_tail, 925 rx_desc); 926 } 927 928 #endif /* QCA_HOST_MODE_WIFI_DISABLED */ 929 930 /** 931 * dp_2k_jump_handle() - Function to handle 2k jump exception 932 * on WBM ring 933 * 934 * @soc: core DP main context 935 * @nbuf: buffer pointer 936 * @rx_tlv_hdr: start of rx tlv header 937 * @peer_id: peer id of first msdu 938 * @tid: Tid for which exception occurred 939 * 940 * This function handles 2k jump violations arising out 941 * of receiving aggregates in non BA case. This typically 942 * may happen if aggregates are received on a QOS enabled TID 943 * while Rx window size is still initialized to value of 2. Or 944 * it may also happen if negotiated window size is 1 but peer 945 * sends aggregates. 946 * 947 */ 948 949 void 950 dp_2k_jump_handle(struct dp_soc *soc, 951 qdf_nbuf_t nbuf, 952 uint8_t *rx_tlv_hdr, 953 uint16_t peer_id, 954 uint8_t tid) 955 { 956 struct dp_peer *peer = NULL; 957 struct dp_rx_tid *rx_tid = NULL; 958 uint32_t frame_mask = FRAME_MASK_IPV4_ARP; 959 960 peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_RX_ERR); 961 if (!peer) { 962 dp_rx_err_err("%pK: peer not found", soc); 963 goto free_nbuf; 964 } 965 966 if (tid >= DP_MAX_TIDS) { 967 dp_info_rl("invalid tid"); 968 goto nbuf_deliver; 969 } 970 971 rx_tid = &peer->rx_tid[tid]; 972 qdf_spin_lock_bh(&rx_tid->tid_lock); 973 974 /* only if BA session is active, allow send Delba */ 975 if (rx_tid->ba_status != DP_RX_BA_ACTIVE) { 976 qdf_spin_unlock_bh(&rx_tid->tid_lock); 977 goto nbuf_deliver; 978 } 979 980 if (!rx_tid->delba_tx_status) { 981 rx_tid->delba_tx_retry++; 982 rx_tid->delba_tx_status = 1; 983 rx_tid->delba_rcode = 984 IEEE80211_REASON_QOS_SETUP_REQUIRED; 985 qdf_spin_unlock_bh(&rx_tid->tid_lock); 986 if (soc->cdp_soc.ol_ops->send_delba) { 987 DP_STATS_INC(soc, rx.err.rx_2k_jump_delba_sent, 1); 988 soc->cdp_soc.ol_ops->send_delba( 989 peer->vdev->pdev->soc->ctrl_psoc, 990 peer->vdev->vdev_id, 991 peer->mac_addr.raw, 992 tid, 993 rx_tid->delba_rcode); 994 } 995 } else { 996 qdf_spin_unlock_bh(&rx_tid->tid_lock); 997 } 998 999 nbuf_deliver: 1000 if (dp_rx_deliver_special_frame(soc, peer, nbuf, frame_mask, 1001 rx_tlv_hdr)) { 1002 DP_STATS_INC(soc, rx.err.rx_2k_jump_to_stack, 1); 1003 dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR); 1004 return; 1005 } 1006 1007 free_nbuf: 1008 if (peer) 1009 dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR); 1010 DP_STATS_INC(soc, rx.err.rx_2k_jump_drop, 1); 1011 qdf_nbuf_free(nbuf); 1012 } 1013 1014 #if defined(QCA_WIFI_QCA6390) || defined(QCA_WIFI_QCA6490) || \ 1015 defined(QCA_WIFI_QCA6750) || defined(QCA_WIFI_WCN7850) 1016 /** 1017 * dp_rx_null_q_handle_invalid_peer_id_exception() - to find exception 1018 * @soc: pointer to dp_soc struct 1019 * @pool_id: Pool id to find dp_pdev 1020 * @rx_tlv_hdr: TLV header of received packet 1021 * @nbuf: SKB 1022 * 1023 * In certain types of packets if peer_id is not correct then 1024 * driver may not be able find. Try finding peer by addr_2 of 1025 * received MPDU. If you find the peer then most likely sw_peer_id & 1026 * ast_idx is corrupted. 1027 * 1028 * Return: True if you find the peer by addr_2 of received MPDU else false 1029 */ 1030 static bool 1031 dp_rx_null_q_handle_invalid_peer_id_exception(struct dp_soc *soc, 1032 uint8_t pool_id, 1033 uint8_t *rx_tlv_hdr, 1034 qdf_nbuf_t nbuf) 1035 { 1036 struct dp_peer *peer = NULL; 1037 uint8_t *rx_pkt_hdr = hal_rx_pkt_hdr_get(soc->hal_soc, rx_tlv_hdr); 1038 struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, pool_id); 1039 struct ieee80211_frame *wh = (struct ieee80211_frame *)rx_pkt_hdr; 1040 1041 if (!pdev) { 1042 dp_rx_err_debug("%pK: pdev is null for pool_id = %d", 1043 soc, pool_id); 1044 return false; 1045 } 1046 /* 1047 * WAR- In certain types of packets if peer_id is not correct then 1048 * driver may not be able find. Try finding peer by addr_2 of 1049 * received MPDU 1050 */ 1051 if (wh) 1052 peer = dp_peer_find_hash_find(soc, wh->i_addr2, 0, 1053 DP_VDEV_ALL, DP_MOD_ID_RX_ERR); 1054 if (peer) { 1055 dp_verbose_debug("MPDU sw_peer_id & ast_idx is corrupted"); 1056 hal_rx_dump_pkt_tlvs(soc->hal_soc, rx_tlv_hdr, 1057 QDF_TRACE_LEVEL_DEBUG); 1058 DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer_id, 1059 1, qdf_nbuf_len(nbuf)); 1060 qdf_nbuf_free(nbuf); 1061 1062 dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR); 1063 return true; 1064 } 1065 return false; 1066 } 1067 1068 /** 1069 * dp_rx_check_pkt_len() - Check for pktlen validity 1070 * @soc: DP SOC context 1071 * @pkt_len: computed length of the pkt from caller in bytes 1072 * 1073 * Return: true if pktlen > RX_BUFFER_SIZE, else return false 1074 * 1075 */ 1076 static inline 1077 bool dp_rx_check_pkt_len(struct dp_soc *soc, uint32_t pkt_len) 1078 { 1079 if (qdf_unlikely(pkt_len > RX_DATA_BUFFER_SIZE)) { 1080 DP_STATS_INC_PKT(soc, rx.err.rx_invalid_pkt_len, 1081 1, pkt_len); 1082 return true; 1083 } else { 1084 return false; 1085 } 1086 } 1087 1088 #else 1089 static inline bool 1090 dp_rx_null_q_handle_invalid_peer_id_exception(struct dp_soc *soc, 1091 uint8_t pool_id, 1092 uint8_t *rx_tlv_hdr, 1093 qdf_nbuf_t nbuf) 1094 { 1095 return false; 1096 } 1097 1098 static inline 1099 bool dp_rx_check_pkt_len(struct dp_soc *soc, uint32_t pkt_len) 1100 { 1101 return false; 1102 } 1103 1104 #endif 1105 1106 #ifndef QCA_HOST_MODE_WIFI_DISABLED 1107 1108 /** 1109 * dp_rx_null_q_desc_handle() - Function to handle NULL Queue 1110 * descriptor violation on either a 1111 * REO or WBM ring 1112 * 1113 * @soc: core DP main context 1114 * @nbuf: buffer pointer 1115 * @rx_tlv_hdr: start of rx tlv header 1116 * @pool_id: mac id 1117 * @peer: peer handle 1118 * 1119 * This function handles NULL queue descriptor violations arising out 1120 * a missing REO queue for a given peer or a given TID. This typically 1121 * may happen if a packet is received on a QOS enabled TID before the 1122 * ADDBA negotiation for that TID, when the TID queue is setup. Or 1123 * it may also happen for MC/BC frames if they are not routed to the 1124 * non-QOS TID queue, in the absence of any other default TID queue. 1125 * This error can show up both in a REO destination or WBM release ring. 1126 * 1127 * Return: QDF_STATUS_SUCCESS, if nbuf handled successfully. QDF status code 1128 * if nbuf could not be handled or dropped. 1129 */ 1130 static QDF_STATUS 1131 dp_rx_null_q_desc_handle(struct dp_soc *soc, qdf_nbuf_t nbuf, 1132 uint8_t *rx_tlv_hdr, uint8_t pool_id, 1133 struct dp_peer *peer) 1134 { 1135 uint32_t pkt_len; 1136 uint16_t msdu_len; 1137 struct dp_vdev *vdev; 1138 uint8_t tid; 1139 qdf_ether_header_t *eh; 1140 struct hal_rx_msdu_metadata msdu_metadata; 1141 uint16_t sa_idx = 0; 1142 1143 qdf_nbuf_set_rx_chfrag_start(nbuf, 1144 hal_rx_msdu_end_first_msdu_get(soc->hal_soc, 1145 rx_tlv_hdr)); 1146 qdf_nbuf_set_rx_chfrag_end(nbuf, 1147 hal_rx_msdu_end_last_msdu_get(soc->hal_soc, 1148 rx_tlv_hdr)); 1149 qdf_nbuf_set_da_mcbc(nbuf, hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc, 1150 rx_tlv_hdr)); 1151 qdf_nbuf_set_da_valid(nbuf, 1152 hal_rx_msdu_end_da_is_valid_get(soc->hal_soc, 1153 rx_tlv_hdr)); 1154 qdf_nbuf_set_sa_valid(nbuf, 1155 hal_rx_msdu_end_sa_is_valid_get(soc->hal_soc, 1156 rx_tlv_hdr)); 1157 1158 hal_rx_msdu_metadata_get(soc->hal_soc, rx_tlv_hdr, &msdu_metadata); 1159 msdu_len = hal_rx_msdu_start_msdu_len_get(soc->hal_soc, rx_tlv_hdr); 1160 pkt_len = msdu_len + msdu_metadata.l3_hdr_pad + soc->rx_pkt_tlv_size; 1161 1162 if (qdf_likely(!qdf_nbuf_is_frag(nbuf))) { 1163 if (dp_rx_check_pkt_len(soc, pkt_len)) 1164 goto drop_nbuf; 1165 1166 /* Set length in nbuf */ 1167 qdf_nbuf_set_pktlen( 1168 nbuf, qdf_min(pkt_len, (uint32_t)RX_DATA_BUFFER_SIZE)); 1169 qdf_assert_always(nbuf->data == rx_tlv_hdr); 1170 } 1171 1172 /* 1173 * Check if DMA completed -- msdu_done is the last bit 1174 * to be written 1175 */ 1176 if (!hal_rx_attn_msdu_done_get(soc->hal_soc, rx_tlv_hdr)) { 1177 1178 dp_err_rl("MSDU DONE failure"); 1179 hal_rx_dump_pkt_tlvs(soc->hal_soc, rx_tlv_hdr, 1180 QDF_TRACE_LEVEL_INFO); 1181 qdf_assert(0); 1182 } 1183 1184 if (!peer && 1185 dp_rx_null_q_handle_invalid_peer_id_exception(soc, pool_id, 1186 rx_tlv_hdr, nbuf)) 1187 return QDF_STATUS_E_FAILURE; 1188 1189 if (!peer) { 1190 bool mpdu_done = false; 1191 struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, pool_id); 1192 1193 if (!pdev) { 1194 dp_err_rl("pdev is null for pool_id = %d", pool_id); 1195 return QDF_STATUS_E_FAILURE; 1196 } 1197 1198 dp_err_rl("peer is NULL"); 1199 DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1, 1200 qdf_nbuf_len(nbuf)); 1201 1202 /* QCN9000 has the support enabled */ 1203 if (qdf_unlikely(soc->wbm_release_desc_rx_sg_support)) { 1204 mpdu_done = true; 1205 nbuf->next = NULL; 1206 /* Trigger invalid peer handler wrapper */ 1207 dp_rx_process_invalid_peer_wrapper(soc, 1208 nbuf, mpdu_done, pool_id); 1209 } else { 1210 mpdu_done = dp_rx_chain_msdus(soc, nbuf, rx_tlv_hdr, pool_id); 1211 /* Trigger invalid peer handler wrapper */ 1212 dp_rx_process_invalid_peer_wrapper(soc, 1213 pdev->invalid_peer_head_msdu, 1214 mpdu_done, pool_id); 1215 } 1216 1217 if (mpdu_done) { 1218 pdev->invalid_peer_head_msdu = NULL; 1219 pdev->invalid_peer_tail_msdu = NULL; 1220 } 1221 1222 return QDF_STATUS_E_FAILURE; 1223 } 1224 1225 vdev = peer->vdev; 1226 if (!vdev) { 1227 dp_err_rl("Null vdev!"); 1228 DP_STATS_INC(soc, rx.err.invalid_vdev, 1); 1229 goto drop_nbuf; 1230 } 1231 1232 /* 1233 * Advance the packet start pointer by total size of 1234 * pre-header TLV's 1235 */ 1236 if (qdf_nbuf_is_frag(nbuf)) 1237 qdf_nbuf_pull_head(nbuf, soc->rx_pkt_tlv_size); 1238 else 1239 qdf_nbuf_pull_head(nbuf, (msdu_metadata.l3_hdr_pad + 1240 soc->rx_pkt_tlv_size)); 1241 1242 dp_vdev_peer_stats_update_protocol_cnt(vdev, nbuf, NULL, 0, 1); 1243 1244 if (hal_rx_msdu_end_sa_is_valid_get(soc->hal_soc, rx_tlv_hdr)) { 1245 sa_idx = hal_rx_msdu_end_sa_idx_get(soc->hal_soc, rx_tlv_hdr); 1246 1247 if ((sa_idx < 0) || 1248 (sa_idx >= wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx))) { 1249 DP_STATS_INC(soc, rx.err.invalid_sa_da_idx, 1); 1250 goto drop_nbuf; 1251 } 1252 } 1253 1254 if (dp_rx_mcast_echo_check(soc, peer, rx_tlv_hdr, nbuf)) { 1255 /* this is a looped back MCBC pkt, drop it */ 1256 DP_STATS_INC_PKT(peer, rx.mec_drop, 1, qdf_nbuf_len(nbuf)); 1257 goto drop_nbuf; 1258 } 1259 1260 /* 1261 * In qwrap mode if the received packet matches with any of the vdev 1262 * mac addresses, drop it. Donot receive multicast packets originated 1263 * from any proxysta. 1264 */ 1265 if (check_qwrap_multicast_loopback(vdev, nbuf)) { 1266 DP_STATS_INC_PKT(peer, rx.mec_drop, 1, qdf_nbuf_len(nbuf)); 1267 goto drop_nbuf; 1268 } 1269 1270 1271 if (qdf_unlikely((peer->nawds_enabled == true) && 1272 hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc, 1273 rx_tlv_hdr))) { 1274 dp_err_rl("free buffer for multicast packet"); 1275 DP_STATS_INC(peer, rx.nawds_mcast_drop, 1); 1276 goto drop_nbuf; 1277 } 1278 1279 if (!dp_wds_rx_policy_check(rx_tlv_hdr, vdev, peer)) { 1280 dp_err_rl("mcast Policy Check Drop pkt"); 1281 goto drop_nbuf; 1282 } 1283 /* WDS Source Port Learning */ 1284 if (qdf_likely(vdev->rx_decap_type == htt_cmn_pkt_type_ethernet && 1285 vdev->wds_enabled)) 1286 dp_rx_wds_srcport_learn(soc, rx_tlv_hdr, peer, nbuf, 1287 msdu_metadata); 1288 1289 if (hal_rx_is_unicast(soc->hal_soc, rx_tlv_hdr)) { 1290 tid = hal_rx_tid_get(soc->hal_soc, rx_tlv_hdr); 1291 if (!peer->rx_tid[tid].hw_qdesc_vaddr_unaligned) 1292 dp_rx_tid_setup_wifi3(peer, tid, 1, IEEE80211_SEQ_MAX); 1293 /* IEEE80211_SEQ_MAX indicates invalid start_seq */ 1294 } 1295 1296 if (qdf_unlikely(vdev->rx_decap_type == htt_cmn_pkt_type_raw)) { 1297 qdf_nbuf_set_next(nbuf, NULL); 1298 dp_rx_deliver_raw(vdev, nbuf, peer); 1299 } else { 1300 qdf_nbuf_set_next(nbuf, NULL); 1301 DP_STATS_INC_PKT(peer, rx.to_stack, 1, 1302 qdf_nbuf_len(nbuf)); 1303 1304 /* 1305 * Update the protocol tag in SKB based on 1306 * CCE metadata 1307 */ 1308 dp_rx_update_protocol_tag(soc, vdev, nbuf, rx_tlv_hdr, 1309 EXCEPTION_DEST_RING_ID, 1310 true, true); 1311 1312 /* Update the flow tag in SKB based on FSE metadata */ 1313 dp_rx_update_flow_tag(soc, vdev, nbuf, 1314 rx_tlv_hdr, true); 1315 1316 if (qdf_unlikely(hal_rx_msdu_end_da_is_mcbc_get( 1317 soc->hal_soc, rx_tlv_hdr) && 1318 (vdev->rx_decap_type == 1319 htt_cmn_pkt_type_ethernet))) { 1320 eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf); 1321 DP_STATS_INC_PKT(peer, rx.multicast, 1, 1322 qdf_nbuf_len(nbuf)); 1323 1324 if (QDF_IS_ADDR_BROADCAST(eh->ether_dhost)) 1325 DP_STATS_INC_PKT(peer, rx.bcast, 1, 1326 qdf_nbuf_len(nbuf)); 1327 } 1328 1329 qdf_nbuf_set_exc_frame(nbuf, 1); 1330 dp_rx_deliver_to_stack(soc, vdev, peer, nbuf, NULL); 1331 } 1332 return QDF_STATUS_SUCCESS; 1333 1334 drop_nbuf: 1335 qdf_nbuf_free(nbuf); 1336 return QDF_STATUS_E_FAILURE; 1337 } 1338 1339 #endif /* QCA_HOST_MODE_WIFI_DISABLED */ 1340 1341 /** 1342 * dp_rx_process_rxdma_err() - Function to deliver rxdma unencrypted_err 1343 * frames to OS or wifi parse errors. 1344 * @soc: core DP main context 1345 * @nbuf: buffer pointer 1346 * @rx_tlv_hdr: start of rx tlv header 1347 * @peer: peer reference 1348 * @err_code: rxdma err code 1349 * @mac_id: mac_id which is one of 3 mac_ids(Assuming mac_id and 1350 * pool_id has same mapping) 1351 * 1352 * Return: None 1353 */ 1354 void 1355 dp_rx_process_rxdma_err(struct dp_soc *soc, qdf_nbuf_t nbuf, 1356 uint8_t *rx_tlv_hdr, struct dp_peer *peer, 1357 uint8_t err_code, uint8_t mac_id) 1358 { 1359 uint32_t pkt_len, l2_hdr_offset; 1360 uint16_t msdu_len; 1361 struct dp_vdev *vdev; 1362 qdf_ether_header_t *eh; 1363 bool is_broadcast; 1364 1365 /* 1366 * Check if DMA completed -- msdu_done is the last bit 1367 * to be written 1368 */ 1369 if (!hal_rx_attn_msdu_done_get(soc->hal_soc, rx_tlv_hdr)) { 1370 1371 dp_err_rl("MSDU DONE failure"); 1372 1373 hal_rx_dump_pkt_tlvs(soc->hal_soc, rx_tlv_hdr, 1374 QDF_TRACE_LEVEL_INFO); 1375 qdf_assert(0); 1376 } 1377 1378 l2_hdr_offset = hal_rx_msdu_end_l3_hdr_padding_get(soc->hal_soc, 1379 rx_tlv_hdr); 1380 msdu_len = hal_rx_msdu_start_msdu_len_get(soc->hal_soc, rx_tlv_hdr); 1381 pkt_len = msdu_len + l2_hdr_offset + soc->rx_pkt_tlv_size; 1382 1383 if (dp_rx_check_pkt_len(soc, pkt_len)) { 1384 /* Drop & free packet */ 1385 qdf_nbuf_free(nbuf); 1386 return; 1387 } 1388 /* Set length in nbuf */ 1389 qdf_nbuf_set_pktlen(nbuf, pkt_len); 1390 1391 qdf_nbuf_set_next(nbuf, NULL); 1392 1393 qdf_nbuf_set_rx_chfrag_start(nbuf, 1); 1394 qdf_nbuf_set_rx_chfrag_end(nbuf, 1); 1395 1396 if (!peer) { 1397 QDF_TRACE_ERROR_RL(QDF_MODULE_ID_DP, "peer is NULL"); 1398 DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1, 1399 qdf_nbuf_len(nbuf)); 1400 /* Trigger invalid peer handler wrapper */ 1401 dp_rx_process_invalid_peer_wrapper(soc, nbuf, true, mac_id); 1402 return; 1403 } 1404 1405 vdev = peer->vdev; 1406 if (!vdev) { 1407 dp_rx_err_info_rl("%pK: INVALID vdev %pK OR osif_rx", soc, 1408 vdev); 1409 /* Drop & free packet */ 1410 qdf_nbuf_free(nbuf); 1411 DP_STATS_INC(soc, rx.err.invalid_vdev, 1); 1412 return; 1413 } 1414 1415 /* 1416 * Advance the packet start pointer by total size of 1417 * pre-header TLV's 1418 */ 1419 dp_rx_skip_tlvs(soc, nbuf, l2_hdr_offset); 1420 1421 if (err_code == HAL_RXDMA_ERR_WIFI_PARSE) { 1422 uint8_t *pkt_type; 1423 1424 pkt_type = qdf_nbuf_data(nbuf) + (2 * QDF_MAC_ADDR_SIZE); 1425 if (*(uint16_t *)pkt_type == htons(QDF_ETH_TYPE_8021Q)) { 1426 if (*(uint16_t *)(pkt_type + DP_SKIP_VLAN) == 1427 htons(QDF_LLC_STP)) { 1428 DP_STATS_INC(vdev->pdev, vlan_tag_stp_cnt, 1); 1429 goto process_mesh; 1430 } else { 1431 goto process_rx; 1432 } 1433 } 1434 } 1435 if (vdev->rx_decap_type == htt_cmn_pkt_type_raw) 1436 goto process_mesh; 1437 1438 /* 1439 * WAPI cert AP sends rekey frames as unencrypted. 1440 * Thus RXDMA will report unencrypted frame error. 1441 * To pass WAPI cert case, SW needs to pass unencrypted 1442 * rekey frame to stack. 1443 */ 1444 if (qdf_nbuf_is_ipv4_wapi_pkt(nbuf)) { 1445 goto process_rx; 1446 } 1447 /* 1448 * In dynamic WEP case rekey frames are not encrypted 1449 * similar to WAPI. Allow EAPOL when 8021+wep is enabled and 1450 * key install is already done 1451 */ 1452 if ((vdev->sec_type == cdp_sec_type_wep104) && 1453 (qdf_nbuf_is_ipv4_eapol_pkt(nbuf))) 1454 goto process_rx; 1455 1456 process_mesh: 1457 1458 if (!vdev->mesh_vdev && err_code == HAL_RXDMA_ERR_UNENCRYPTED) { 1459 qdf_nbuf_free(nbuf); 1460 DP_STATS_INC(soc, rx.err.invalid_vdev, 1); 1461 return; 1462 } 1463 1464 if (vdev->mesh_vdev) { 1465 if (dp_rx_filter_mesh_packets(vdev, nbuf, rx_tlv_hdr) 1466 == QDF_STATUS_SUCCESS) { 1467 dp_rx_err_info("%pK: mesh pkt filtered", soc); 1468 DP_STATS_INC(vdev->pdev, dropped.mesh_filter, 1); 1469 1470 qdf_nbuf_free(nbuf); 1471 return; 1472 } 1473 dp_rx_fill_mesh_stats(vdev, nbuf, rx_tlv_hdr, peer); 1474 } 1475 process_rx: 1476 if (qdf_unlikely(hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc, 1477 rx_tlv_hdr) && 1478 (vdev->rx_decap_type == 1479 htt_cmn_pkt_type_ethernet))) { 1480 eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf); 1481 is_broadcast = (QDF_IS_ADDR_BROADCAST 1482 (eh->ether_dhost)) ? 1 : 0 ; 1483 DP_STATS_INC_PKT(peer, rx.multicast, 1, qdf_nbuf_len(nbuf)); 1484 if (is_broadcast) { 1485 DP_STATS_INC_PKT(peer, rx.bcast, 1, 1486 qdf_nbuf_len(nbuf)); 1487 } 1488 } 1489 1490 if (qdf_unlikely(vdev->rx_decap_type == htt_cmn_pkt_type_raw)) { 1491 dp_rx_deliver_raw(vdev, nbuf, peer); 1492 } else { 1493 /* Update the protocol tag in SKB based on CCE metadata */ 1494 dp_rx_update_protocol_tag(soc, vdev, nbuf, rx_tlv_hdr, 1495 EXCEPTION_DEST_RING_ID, true, true); 1496 /* Update the flow tag in SKB based on FSE metadata */ 1497 dp_rx_update_flow_tag(soc, vdev, nbuf, rx_tlv_hdr, true); 1498 DP_STATS_INC(peer, rx.to_stack.num, 1); 1499 qdf_nbuf_set_exc_frame(nbuf, 1); 1500 dp_rx_deliver_to_stack(soc, vdev, peer, nbuf, NULL); 1501 } 1502 1503 return; 1504 } 1505 1506 /** 1507 * dp_rx_process_mic_error(): Function to pass mic error indication to umac 1508 * @soc: core DP main context 1509 * @nbuf: buffer pointer 1510 * @rx_tlv_hdr: start of rx tlv header 1511 * @peer: peer handle 1512 * 1513 * return: void 1514 */ 1515 void dp_rx_process_mic_error(struct dp_soc *soc, qdf_nbuf_t nbuf, 1516 uint8_t *rx_tlv_hdr, struct dp_peer *peer) 1517 { 1518 struct dp_vdev *vdev = NULL; 1519 struct dp_pdev *pdev = NULL; 1520 struct ol_if_ops *tops = NULL; 1521 uint16_t rx_seq, fragno; 1522 uint8_t is_raw; 1523 unsigned int tid; 1524 QDF_STATUS status; 1525 struct cdp_rx_mic_err_info mic_failure_info; 1526 1527 if (!hal_rx_msdu_end_first_msdu_get(soc->hal_soc, 1528 rx_tlv_hdr)) 1529 return; 1530 1531 if (!peer) { 1532 dp_info_rl("peer not found"); 1533 goto fail; 1534 } 1535 1536 vdev = peer->vdev; 1537 if (!vdev) { 1538 dp_info_rl("VDEV not found"); 1539 goto fail; 1540 } 1541 1542 pdev = vdev->pdev; 1543 if (!pdev) { 1544 dp_info_rl("PDEV not found"); 1545 goto fail; 1546 } 1547 1548 is_raw = HAL_IS_DECAP_FORMAT_RAW(soc->hal_soc, qdf_nbuf_data(nbuf)); 1549 if (is_raw) { 1550 fragno = dp_rx_frag_get_mpdu_frag_number(soc, 1551 qdf_nbuf_data(nbuf)); 1552 /* Can get only last fragment */ 1553 if (fragno) { 1554 tid = hal_rx_mpdu_start_tid_get(soc->hal_soc, 1555 qdf_nbuf_data(nbuf)); 1556 rx_seq = hal_rx_get_rx_sequence(soc->hal_soc, 1557 qdf_nbuf_data(nbuf)); 1558 1559 status = dp_rx_defrag_add_last_frag(soc, peer, 1560 tid, rx_seq, nbuf); 1561 dp_info_rl("Frag pkt seq# %d frag# %d consumed " 1562 "status %d !", rx_seq, fragno, status); 1563 return; 1564 } 1565 } 1566 1567 if (hal_rx_mpdu_get_addr1(soc->hal_soc, qdf_nbuf_data(nbuf), 1568 &mic_failure_info.da_mac_addr.bytes[0])) { 1569 dp_err_rl("Failed to get da_mac_addr"); 1570 goto fail; 1571 } 1572 1573 if (hal_rx_mpdu_get_addr2(soc->hal_soc, qdf_nbuf_data(nbuf), 1574 &mic_failure_info.ta_mac_addr.bytes[0])) { 1575 dp_err_rl("Failed to get ta_mac_addr"); 1576 goto fail; 1577 } 1578 1579 mic_failure_info.key_id = 0; 1580 mic_failure_info.multicast = 1581 IEEE80211_IS_MULTICAST(mic_failure_info.da_mac_addr.bytes); 1582 qdf_mem_zero(mic_failure_info.tsc, MIC_SEQ_CTR_SIZE); 1583 mic_failure_info.frame_type = cdp_rx_frame_type_802_11; 1584 mic_failure_info.data = NULL; 1585 mic_failure_info.vdev_id = vdev->vdev_id; 1586 1587 tops = pdev->soc->cdp_soc.ol_ops; 1588 if (tops->rx_mic_error) 1589 tops->rx_mic_error(soc->ctrl_psoc, pdev->pdev_id, 1590 &mic_failure_info); 1591 1592 fail: 1593 qdf_nbuf_free(nbuf); 1594 return; 1595 } 1596 1597 #ifndef QCA_HOST_MODE_WIFI_DISABLED 1598 1599 #ifdef DP_RX_DESC_COOKIE_INVALIDATE 1600 /** 1601 * dp_rx_link_cookie_check() - Validate link desc cookie 1602 * @ring_desc: ring descriptor 1603 * 1604 * Return: qdf status 1605 */ 1606 static inline QDF_STATUS 1607 dp_rx_link_cookie_check(hal_ring_desc_t ring_desc) 1608 { 1609 if (qdf_unlikely(HAL_RX_REO_BUF_LINK_COOKIE_INVALID_GET(ring_desc))) 1610 return QDF_STATUS_E_FAILURE; 1611 1612 return QDF_STATUS_SUCCESS; 1613 } 1614 1615 /** 1616 * dp_rx_link_cookie_invalidate() - Invalidate link desc cookie 1617 * @ring_desc: ring descriptor 1618 * 1619 * Return: None 1620 */ 1621 static inline void 1622 dp_rx_link_cookie_invalidate(hal_ring_desc_t ring_desc) 1623 { 1624 HAL_RX_REO_BUF_LINK_COOKIE_INVALID_SET(ring_desc); 1625 } 1626 #else 1627 static inline QDF_STATUS 1628 dp_rx_link_cookie_check(hal_ring_desc_t ring_desc) 1629 { 1630 return QDF_STATUS_SUCCESS; 1631 } 1632 1633 static inline void 1634 dp_rx_link_cookie_invalidate(hal_ring_desc_t ring_desc) 1635 { 1636 } 1637 #endif 1638 1639 #ifdef WLAN_FEATURE_DP_RX_RING_HISTORY 1640 /** 1641 * dp_rx_err_ring_record_entry() - Record rx err ring history 1642 * @soc: Datapath soc structure 1643 * @paddr: paddr of the buffer in RX err ring 1644 * @sw_cookie: SW cookie of the buffer in RX err ring 1645 * @rbm: Return buffer manager of the buffer in RX err ring 1646 * 1647 * Returns: None 1648 */ 1649 static inline void 1650 dp_rx_err_ring_record_entry(struct dp_soc *soc, uint64_t paddr, 1651 uint32_t sw_cookie, uint8_t rbm) 1652 { 1653 struct dp_buf_info_record *record; 1654 uint32_t idx; 1655 1656 if (qdf_unlikely(!soc->rx_err_ring_history)) 1657 return; 1658 1659 idx = dp_history_get_next_index(&soc->rx_err_ring_history->index, 1660 DP_RX_ERR_HIST_MAX); 1661 1662 /* No NULL check needed for record since its an array */ 1663 record = &soc->rx_err_ring_history->entry[idx]; 1664 1665 record->timestamp = qdf_get_log_timestamp(); 1666 record->hbi.paddr = paddr; 1667 record->hbi.sw_cookie = sw_cookie; 1668 record->hbi.rbm = rbm; 1669 } 1670 #else 1671 static inline void 1672 dp_rx_err_ring_record_entry(struct dp_soc *soc, uint64_t paddr, 1673 uint32_t sw_cookie, uint8_t rbm) 1674 { 1675 } 1676 #endif 1677 1678 #ifdef HANDLE_RX_REROUTE_ERR 1679 static int dp_rx_err_handle_msdu_buf(struct dp_soc *soc, 1680 hal_ring_desc_t ring_desc) 1681 { 1682 int lmac_id = DP_INVALID_LMAC_ID; 1683 struct dp_rx_desc *rx_desc; 1684 struct hal_buf_info hbi; 1685 struct dp_pdev *pdev; 1686 1687 hal_rx_reo_buf_paddr_get(soc->hal_soc, ring_desc, &hbi); 1688 1689 rx_desc = dp_rx_cookie_2_va_rxdma_buf(soc, hbi.sw_cookie); 1690 1691 /* sanity */ 1692 if (!rx_desc) { 1693 DP_STATS_INC(soc, rx.err.reo_err_msdu_buf_invalid_cookie, 1); 1694 goto assert_return; 1695 } 1696 1697 if (!rx_desc->nbuf) 1698 goto assert_return; 1699 1700 dp_rx_err_ring_record_entry(soc, hbi.paddr, 1701 hbi.sw_cookie, 1702 hal_rx_ret_buf_manager_get(soc->hal_soc, 1703 ring_desc)); 1704 if (hbi.paddr != qdf_nbuf_get_frag_paddr(rx_desc->nbuf, 0)) { 1705 DP_STATS_INC(soc, rx.err.nbuf_sanity_fail, 1); 1706 rx_desc->in_err_state = 1; 1707 goto assert_return; 1708 } 1709 1710 /* After this point the rx_desc and nbuf are valid */ 1711 dp_ipa_rx_buf_smmu_mapping_lock(soc); 1712 qdf_assert_always(rx_desc->unmapped); 1713 dp_ipa_handle_rx_buf_smmu_mapping(soc, 1714 rx_desc->nbuf, 1715 RX_DATA_BUFFER_SIZE, 1716 false); 1717 qdf_nbuf_unmap_nbytes_single(soc->osdev, 1718 rx_desc->nbuf, 1719 QDF_DMA_FROM_DEVICE, 1720 RX_DATA_BUFFER_SIZE); 1721 rx_desc->unmapped = 1; 1722 dp_ipa_rx_buf_smmu_mapping_unlock(soc); 1723 dp_rx_buffer_pool_nbuf_free(soc, rx_desc->nbuf, 1724 rx_desc->pool_id); 1725 1726 pdev = dp_get_pdev_for_lmac_id(soc, rx_desc->pool_id); 1727 lmac_id = rx_desc->pool_id; 1728 dp_rx_add_to_free_desc_list(&pdev->free_list_head, 1729 &pdev->free_list_tail, 1730 rx_desc); 1731 return lmac_id; 1732 1733 assert_return: 1734 qdf_assert(0); 1735 return lmac_id; 1736 } 1737 1738 static int dp_rx_err_exception(struct dp_soc *soc, hal_ring_desc_t ring_desc) 1739 { 1740 int ret; 1741 uint64_t cur_time_stamp; 1742 1743 DP_STATS_INC(soc, rx.err.reo_err_msdu_buf_rcved, 1); 1744 1745 /* Recover if overall error count exceeds threshold */ 1746 if (soc->stats.rx.err.reo_err_msdu_buf_rcved > 1747 DP_MAX_REG_RX_ROUTING_ERRS_THRESHOLD) { 1748 dp_err("pkt threshold breached! reo_err_msdu_buf_rcved %u first err pkt time_stamp %llu", 1749 soc->stats.rx.err.reo_err_msdu_buf_rcved, 1750 soc->rx_route_err_start_pkt_ts); 1751 qdf_trigger_self_recovery(NULL, QDF_RX_REG_PKT_ROUTE_ERR); 1752 } 1753 1754 cur_time_stamp = qdf_get_log_timestamp_usecs(); 1755 if (!soc->rx_route_err_start_pkt_ts) 1756 soc->rx_route_err_start_pkt_ts = cur_time_stamp; 1757 1758 /* Recover if threshold number of packets received in threshold time */ 1759 if ((cur_time_stamp - soc->rx_route_err_start_pkt_ts) > 1760 DP_RX_ERR_ROUTE_TIMEOUT_US) { 1761 soc->rx_route_err_start_pkt_ts = cur_time_stamp; 1762 1763 if (soc->rx_route_err_in_window > 1764 DP_MAX_REG_RX_ROUTING_ERRS_IN_TIMEOUT) { 1765 qdf_trigger_self_recovery(NULL, 1766 QDF_RX_REG_PKT_ROUTE_ERR); 1767 dp_err("rate threshold breached! reo_err_msdu_buf_rcved %u first err pkt time_stamp %llu", 1768 soc->stats.rx.err.reo_err_msdu_buf_rcved, 1769 soc->rx_route_err_start_pkt_ts); 1770 } else { 1771 soc->rx_route_err_in_window = 1; 1772 } 1773 } else { 1774 soc->rx_route_err_in_window++; 1775 } 1776 1777 ret = dp_rx_err_handle_msdu_buf(soc, ring_desc); 1778 1779 return ret; 1780 } 1781 #else /* HANDLE_RX_REROUTE_ERR */ 1782 1783 static int dp_rx_err_exception(struct dp_soc *soc, hal_ring_desc_t ring_desc) 1784 { 1785 qdf_assert_always(0); 1786 1787 return DP_INVALID_LMAC_ID; 1788 } 1789 #endif /* HANDLE_RX_REROUTE_ERR */ 1790 1791 uint32_t 1792 dp_rx_err_process(struct dp_intr *int_ctx, struct dp_soc *soc, 1793 hal_ring_handle_t hal_ring_hdl, uint32_t quota) 1794 { 1795 hal_ring_desc_t ring_desc; 1796 hal_soc_handle_t hal_soc; 1797 uint32_t count = 0; 1798 uint32_t rx_bufs_used = 0; 1799 uint32_t rx_bufs_reaped[MAX_PDEV_CNT] = { 0 }; 1800 uint8_t mac_id = 0; 1801 uint8_t buf_type; 1802 uint8_t error; 1803 struct hal_rx_mpdu_desc_info mpdu_desc_info; 1804 struct hal_buf_info hbi; 1805 struct dp_pdev *dp_pdev; 1806 struct dp_srng *dp_rxdma_srng; 1807 struct rx_desc_pool *rx_desc_pool; 1808 void *link_desc_va; 1809 struct hal_rx_msdu_list msdu_list; /* MSDU's per MPDU */ 1810 uint16_t num_msdus; 1811 struct dp_rx_desc *rx_desc = NULL; 1812 QDF_STATUS status; 1813 bool ret; 1814 uint32_t error_code = 0; 1815 1816 /* Debug -- Remove later */ 1817 qdf_assert(soc && hal_ring_hdl); 1818 1819 hal_soc = soc->hal_soc; 1820 1821 /* Debug -- Remove later */ 1822 qdf_assert(hal_soc); 1823 1824 if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, hal_ring_hdl))) { 1825 1826 /* TODO */ 1827 /* 1828 * Need API to convert from hal_ring pointer to 1829 * Ring Type / Ring Id combo 1830 */ 1831 DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1); 1832 dp_rx_err_err("%pK: HAL RING Access Failed -- %pK", soc, 1833 hal_ring_hdl); 1834 goto done; 1835 } 1836 1837 while (qdf_likely(quota-- && (ring_desc = 1838 hal_srng_dst_peek(hal_soc, 1839 hal_ring_hdl)))) { 1840 1841 DP_STATS_INC(soc, rx.err_ring_pkts, 1); 1842 1843 error = hal_rx_err_status_get(hal_soc, ring_desc); 1844 1845 buf_type = hal_rx_reo_buf_type_get(hal_soc, ring_desc); 1846 1847 /* Get the MPDU DESC info */ 1848 hal_rx_mpdu_desc_info_get(hal_soc, ring_desc, &mpdu_desc_info); 1849 1850 if (mpdu_desc_info.msdu_count == 0) 1851 goto next_entry; 1852 1853 /* 1854 * For REO error ring, only MSDU LINK DESC is expected. 1855 * Handle HAL_RX_REO_MSDU_BUF_ADDR_TYPE exception case. 1856 */ 1857 if (qdf_unlikely(buf_type != HAL_RX_REO_MSDU_LINK_DESC_TYPE)) { 1858 int lmac_id; 1859 1860 lmac_id = dp_rx_err_exception(soc, ring_desc); 1861 if (lmac_id >= 0) 1862 rx_bufs_reaped[lmac_id] += 1; 1863 goto next_entry; 1864 } 1865 1866 hal_rx_buf_cookie_rbm_get(hal_soc, (uint32_t *)ring_desc, 1867 &hbi); 1868 /* 1869 * check for the magic number in the sw cookie 1870 */ 1871 qdf_assert_always((hbi.sw_cookie >> LINK_DESC_ID_SHIFT) & 1872 soc->link_desc_id_start); 1873 1874 status = dp_rx_link_cookie_check(ring_desc); 1875 if (qdf_unlikely(QDF_IS_STATUS_ERROR(status))) { 1876 DP_STATS_INC(soc, rx.err.invalid_link_cookie, 1); 1877 break; 1878 } 1879 1880 hal_rx_reo_buf_paddr_get(soc->hal_soc, ring_desc, &hbi); 1881 link_desc_va = dp_rx_cookie_2_link_desc_va(soc, &hbi); 1882 hal_rx_msdu_list_get(soc->hal_soc, link_desc_va, &msdu_list, 1883 &num_msdus); 1884 dp_rx_err_ring_record_entry(soc, msdu_list.paddr[0], 1885 msdu_list.sw_cookie[0], 1886 msdu_list.rbm[0]); 1887 // TODO - BE- Check if the RBM is to be checked for all chips 1888 if (qdf_unlikely((msdu_list.rbm[0] != 1889 DP_WBM2SW_RBM(soc->wbm_sw0_bm_id)) && 1890 (msdu_list.rbm[0] != 1891 HAL_RX_BUF_RBM_WBM_CHIP0_IDLE_DESC_LIST) && 1892 (msdu_list.rbm[0] != 1893 DP_DEFRAG_RBM(soc->wbm_sw0_bm_id)))) { 1894 /* TODO */ 1895 /* Call appropriate handler */ 1896 if (!wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx)) { 1897 DP_STATS_INC(soc, rx.err.invalid_rbm, 1); 1898 dp_rx_err_err("%pK: Invalid RBM %d", 1899 soc, msdu_list.rbm[0]); 1900 } 1901 1902 /* Return link descriptor through WBM ring (SW2WBM)*/ 1903 dp_rx_link_desc_return(soc, ring_desc, 1904 HAL_BM_ACTION_RELEASE_MSDU_LIST); 1905 goto next_entry; 1906 } 1907 1908 rx_desc = dp_rx_cookie_2_va_rxdma_buf(soc, 1909 msdu_list.sw_cookie[0]); 1910 qdf_assert_always(rx_desc); 1911 1912 mac_id = rx_desc->pool_id; 1913 1914 if (mpdu_desc_info.bar_frame) { 1915 qdf_assert_always(mpdu_desc_info.msdu_count == 1); 1916 1917 dp_rx_bar_frame_handle(soc, 1918 ring_desc, 1919 rx_desc, 1920 &mpdu_desc_info); 1921 1922 rx_bufs_reaped[mac_id] += 1; 1923 goto next_entry; 1924 } 1925 1926 if (mpdu_desc_info.mpdu_flags & HAL_MPDU_F_FRAGMENT) { 1927 /* 1928 * We only handle one msdu per link desc for fragmented 1929 * case. We drop the msdus and release the link desc 1930 * back if there are more than one msdu in link desc. 1931 */ 1932 if (qdf_unlikely(num_msdus > 1)) { 1933 count = dp_rx_msdus_drop(soc, ring_desc, 1934 &mpdu_desc_info, 1935 &mac_id, quota); 1936 rx_bufs_reaped[mac_id] += count; 1937 goto next_entry; 1938 } 1939 1940 /* 1941 * this is a unlikely scenario where the host is reaping 1942 * a descriptor which it already reaped just a while ago 1943 * but is yet to replenish it back to HW. 1944 * In this case host will dump the last 128 descriptors 1945 * including the software descriptor rx_desc and assert. 1946 */ 1947 1948 if (qdf_unlikely(!rx_desc->in_use)) { 1949 DP_STATS_INC(soc, rx.err.hal_reo_dest_dup, 1); 1950 dp_info_rl("Reaping rx_desc not in use!"); 1951 dp_rx_dump_info_and_assert(soc, hal_ring_hdl, 1952 ring_desc, rx_desc); 1953 /* ignore duplicate RX desc and continue */ 1954 /* Pop out the descriptor */ 1955 goto next_entry; 1956 } 1957 1958 ret = dp_rx_desc_paddr_sanity_check(rx_desc, 1959 msdu_list.paddr[0]); 1960 if (!ret) { 1961 DP_STATS_INC(soc, rx.err.nbuf_sanity_fail, 1); 1962 rx_desc->in_err_state = 1; 1963 goto next_entry; 1964 } 1965 1966 count = dp_rx_frag_handle(soc, 1967 ring_desc, &mpdu_desc_info, 1968 rx_desc, &mac_id, quota); 1969 1970 rx_bufs_reaped[mac_id] += count; 1971 DP_STATS_INC(soc, rx.rx_frags, 1); 1972 goto next_entry; 1973 } 1974 1975 /* 1976 * Expect REO errors to be handled after this point 1977 */ 1978 qdf_assert_always(error == HAL_REO_ERROR_DETECTED); 1979 1980 error_code = hal_rx_get_reo_error_code(hal_soc, ring_desc); 1981 1982 if (hal_rx_reo_is_pn_error(error_code)) { 1983 /* TOD0 */ 1984 DP_STATS_INC(soc, 1985 rx.err. 1986 reo_error[HAL_REO_ERR_PN_CHECK_FAILED], 1987 1); 1988 /* increment @pdev level */ 1989 dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id); 1990 if (dp_pdev) 1991 DP_STATS_INC(dp_pdev, err.reo_error, 1); 1992 count = dp_rx_pn_error_handle(soc, 1993 ring_desc, 1994 &mpdu_desc_info, &mac_id, 1995 quota); 1996 1997 rx_bufs_reaped[mac_id] += count; 1998 goto next_entry; 1999 } 2000 2001 if (hal_rx_reo_is_2k_jump(error_code)) { 2002 /* TOD0 */ 2003 DP_STATS_INC(soc, 2004 rx.err. 2005 reo_error[HAL_REO_ERR_REGULAR_FRAME_2K_JUMP], 2006 1); 2007 /* increment @pdev level */ 2008 dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id); 2009 if (dp_pdev) 2010 DP_STATS_INC(dp_pdev, err.reo_error, 1); 2011 2012 count = dp_rx_reo_err_entry_process( 2013 soc, 2014 ring_desc, 2015 &mpdu_desc_info, 2016 link_desc_va, 2017 HAL_REO_ERR_REGULAR_FRAME_2K_JUMP); 2018 2019 rx_bufs_reaped[mac_id] += count; 2020 goto next_entry; 2021 } 2022 2023 if (hal_rx_reo_is_oor_error(error_code)) { 2024 DP_STATS_INC( 2025 soc, 2026 rx.err. 2027 reo_error[HAL_REO_ERR_REGULAR_FRAME_OOR], 2028 1); 2029 /* increment @pdev level */ 2030 dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id); 2031 if (dp_pdev) 2032 DP_STATS_INC(dp_pdev, err.reo_error, 1); 2033 count = dp_rx_reo_err_entry_process( 2034 soc, 2035 ring_desc, 2036 &mpdu_desc_info, 2037 link_desc_va, 2038 HAL_REO_ERR_REGULAR_FRAME_OOR); 2039 2040 rx_bufs_reaped[mac_id] += count; 2041 goto next_entry; 2042 } 2043 /* Assert if unexpected error type */ 2044 qdf_assert_always(0); 2045 next_entry: 2046 dp_rx_link_cookie_invalidate(ring_desc); 2047 hal_srng_dst_get_next(hal_soc, hal_ring_hdl); 2048 } 2049 2050 done: 2051 dp_srng_access_end(int_ctx, soc, hal_ring_hdl); 2052 2053 if (soc->rx.flags.defrag_timeout_check) { 2054 uint32_t now_ms = 2055 qdf_system_ticks_to_msecs(qdf_system_ticks()); 2056 2057 if (now_ms >= soc->rx.defrag.next_flush_ms) 2058 dp_rx_defrag_waitlist_flush(soc); 2059 } 2060 2061 for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) { 2062 if (rx_bufs_reaped[mac_id]) { 2063 dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id); 2064 dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_id]; 2065 rx_desc_pool = &soc->rx_desc_buf[mac_id]; 2066 2067 dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng, 2068 rx_desc_pool, 2069 rx_bufs_reaped[mac_id], 2070 &dp_pdev->free_list_head, 2071 &dp_pdev->free_list_tail); 2072 rx_bufs_used += rx_bufs_reaped[mac_id]; 2073 } 2074 } 2075 2076 return rx_bufs_used; /* Assume no scale factor for now */ 2077 } 2078 2079 #ifdef DROP_RXDMA_DECRYPT_ERR 2080 /** 2081 * dp_handle_rxdma_decrypt_err() - Check if decrypt err frames can be handled 2082 * 2083 * Return: true if rxdma decrypt err frames are handled and false otheriwse 2084 */ 2085 static inline bool dp_handle_rxdma_decrypt_err(void) 2086 { 2087 return false; 2088 } 2089 #else 2090 static inline bool dp_handle_rxdma_decrypt_err(void) 2091 { 2092 return true; 2093 } 2094 #endif 2095 2096 static inline bool 2097 dp_rx_is_sg_formation_required(struct hal_wbm_err_desc_info *info) 2098 { 2099 /* 2100 * Currently Null Queue and Unencrypted error handlers has support for 2101 * SG. Other error handler do not deal with SG buffer. 2102 */ 2103 if (((info->wbm_err_src == HAL_RX_WBM_ERR_SRC_REO) && 2104 (info->reo_err_code == HAL_REO_ERR_QUEUE_DESC_ADDR_0)) || 2105 ((info->wbm_err_src == HAL_RX_WBM_ERR_SRC_RXDMA) && 2106 (info->rxdma_err_code == HAL_RXDMA_ERR_UNENCRYPTED))) 2107 return true; 2108 2109 return false; 2110 } 2111 2112 uint32_t 2113 dp_rx_wbm_err_process(struct dp_intr *int_ctx, struct dp_soc *soc, 2114 hal_ring_handle_t hal_ring_hdl, uint32_t quota) 2115 { 2116 hal_ring_desc_t ring_desc; 2117 hal_soc_handle_t hal_soc; 2118 struct dp_rx_desc *rx_desc; 2119 union dp_rx_desc_list_elem_t *head[MAX_PDEV_CNT] = { NULL }; 2120 union dp_rx_desc_list_elem_t *tail[MAX_PDEV_CNT] = { NULL }; 2121 uint32_t rx_bufs_used = 0; 2122 uint32_t rx_bufs_reaped[MAX_PDEV_CNT] = { 0 }; 2123 uint8_t buf_type; 2124 uint8_t mac_id; 2125 struct dp_pdev *dp_pdev; 2126 struct dp_srng *dp_rxdma_srng; 2127 struct rx_desc_pool *rx_desc_pool; 2128 uint8_t *rx_tlv_hdr; 2129 qdf_nbuf_t nbuf_head = NULL; 2130 qdf_nbuf_t nbuf_tail = NULL; 2131 qdf_nbuf_t nbuf, next; 2132 struct hal_wbm_err_desc_info wbm_err_info = { 0 }; 2133 uint8_t pool_id; 2134 uint8_t tid = 0; 2135 uint8_t msdu_continuation = 0; 2136 bool process_sg_buf = false; 2137 uint32_t wbm_err_src; 2138 struct hal_buf_info buf_info = {0}; 2139 2140 /* Debug -- Remove later */ 2141 qdf_assert(soc && hal_ring_hdl); 2142 2143 hal_soc = soc->hal_soc; 2144 2145 /* Debug -- Remove later */ 2146 qdf_assert(hal_soc); 2147 2148 if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, hal_ring_hdl))) { 2149 2150 /* TODO */ 2151 /* 2152 * Need API to convert from hal_ring pointer to 2153 * Ring Type / Ring Id combo 2154 */ 2155 dp_rx_err_err("%pK: HAL RING Access Failed -- %pK", 2156 soc, hal_ring_hdl); 2157 goto done; 2158 } 2159 2160 while (qdf_likely(quota)) { 2161 ring_desc = hal_srng_dst_get_next(hal_soc, hal_ring_hdl); 2162 if (qdf_unlikely(!ring_desc)) 2163 break; 2164 2165 /* XXX */ 2166 buf_type = HAL_RX_WBM_BUF_TYPE_GET(ring_desc); 2167 2168 /* 2169 * For WBM ring, expect only MSDU buffers 2170 */ 2171 qdf_assert_always(buf_type == HAL_RX_WBM_BUF_TYPE_REL_BUF); 2172 2173 wbm_err_src = hal_rx_wbm_err_src_get(hal_soc, ring_desc); 2174 qdf_assert((wbm_err_src == HAL_RX_WBM_ERR_SRC_RXDMA) || 2175 (wbm_err_src == HAL_RX_WBM_ERR_SRC_REO)); 2176 2177 /* 2178 * Check if the buffer is to be processed on this processor 2179 */ 2180 2181 /* only cookie and rbm will be valid in buf_info */ 2182 hal_rx_buf_cookie_rbm_get(hal_soc, (uint32_t *)ring_desc, 2183 &buf_info); 2184 2185 if (qdf_unlikely(buf_info.rbm != 2186 HAL_RX_BUF_RBM_SW3_BM(soc->wbm_sw0_bm_id))) { 2187 /* TODO */ 2188 /* Call appropriate handler */ 2189 DP_STATS_INC(soc, rx.err.invalid_rbm, 1); 2190 dp_rx_err_err("%pK: Invalid RBM %d", soc, 2191 buf_info.rbm); 2192 continue; 2193 } 2194 2195 rx_desc = dp_rx_cookie_2_va_rxdma_buf(soc, buf_info.sw_cookie); 2196 qdf_assert_always(rx_desc); 2197 2198 if (!dp_rx_desc_check_magic(rx_desc)) { 2199 dp_rx_err_err("%pk: Invalid rx_desc cookie=%d", 2200 soc, buf_info.sw_cookie); 2201 continue; 2202 } 2203 2204 /* 2205 * this is a unlikely scenario where the host is reaping 2206 * a descriptor which it already reaped just a while ago 2207 * but is yet to replenish it back to HW. 2208 * In this case host will dump the last 128 descriptors 2209 * including the software descriptor rx_desc and assert. 2210 */ 2211 if (qdf_unlikely(!rx_desc->in_use)) { 2212 DP_STATS_INC(soc, rx.err.hal_wbm_rel_dup, 1); 2213 dp_rx_dump_info_and_assert(soc, hal_ring_hdl, 2214 ring_desc, rx_desc); 2215 continue; 2216 } 2217 2218 hal_rx_wbm_err_info_get(ring_desc, &wbm_err_info, hal_soc); 2219 nbuf = rx_desc->nbuf; 2220 rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id]; 2221 dp_ipa_rx_buf_smmu_mapping_lock(soc); 2222 dp_ipa_handle_rx_buf_smmu_mapping(soc, nbuf, 2223 rx_desc_pool->buf_size, 2224 false); 2225 qdf_nbuf_unmap_nbytes_single(soc->osdev, nbuf, 2226 QDF_DMA_FROM_DEVICE, 2227 rx_desc_pool->buf_size); 2228 rx_desc->unmapped = 1; 2229 dp_ipa_rx_buf_smmu_mapping_unlock(soc); 2230 2231 if (qdf_unlikely(soc->wbm_release_desc_rx_sg_support && 2232 dp_rx_is_sg_formation_required(&wbm_err_info))) { 2233 /* SG is detected from continuation bit */ 2234 msdu_continuation = 2235 hal_rx_wbm_err_msdu_continuation_get(hal_soc, 2236 ring_desc); 2237 if (msdu_continuation && 2238 !(soc->wbm_sg_param.wbm_is_first_msdu_in_sg)) { 2239 /* Update length from first buffer in SG */ 2240 soc->wbm_sg_param.wbm_sg_desc_msdu_len = 2241 hal_rx_msdu_start_msdu_len_get( 2242 soc->hal_soc, 2243 qdf_nbuf_data(nbuf)); 2244 soc->wbm_sg_param.wbm_is_first_msdu_in_sg = true; 2245 } 2246 2247 if (msdu_continuation) { 2248 /* MSDU continued packets */ 2249 qdf_nbuf_set_rx_chfrag_cont(nbuf, 1); 2250 QDF_NBUF_CB_RX_PKT_LEN(nbuf) = 2251 soc->wbm_sg_param.wbm_sg_desc_msdu_len; 2252 } else { 2253 /* This is the terminal packet in SG */ 2254 qdf_nbuf_set_rx_chfrag_start(nbuf, 1); 2255 qdf_nbuf_set_rx_chfrag_end(nbuf, 1); 2256 QDF_NBUF_CB_RX_PKT_LEN(nbuf) = 2257 soc->wbm_sg_param.wbm_sg_desc_msdu_len; 2258 process_sg_buf = true; 2259 } 2260 } 2261 2262 /* 2263 * save the wbm desc info in nbuf TLV. We will need this 2264 * info when we do the actual nbuf processing 2265 */ 2266 wbm_err_info.pool_id = rx_desc->pool_id; 2267 hal_rx_priv_info_set_in_tlv(soc->hal_soc, 2268 qdf_nbuf_data(nbuf), 2269 (uint8_t *)&wbm_err_info, 2270 sizeof(wbm_err_info)); 2271 2272 rx_bufs_reaped[rx_desc->pool_id]++; 2273 2274 if (qdf_nbuf_is_rx_chfrag_cont(nbuf) || process_sg_buf) { 2275 DP_RX_LIST_APPEND(soc->wbm_sg_param.wbm_sg_nbuf_head, 2276 soc->wbm_sg_param.wbm_sg_nbuf_tail, 2277 nbuf); 2278 if (process_sg_buf) { 2279 if (!dp_rx_buffer_pool_refill( 2280 soc, 2281 soc->wbm_sg_param.wbm_sg_nbuf_head, 2282 rx_desc->pool_id)) 2283 DP_RX_MERGE_TWO_LIST( 2284 nbuf_head, nbuf_tail, 2285 soc->wbm_sg_param.wbm_sg_nbuf_head, 2286 soc->wbm_sg_param.wbm_sg_nbuf_tail); 2287 dp_rx_wbm_sg_list_reset(soc); 2288 process_sg_buf = false; 2289 } 2290 } else if (!dp_rx_buffer_pool_refill(soc, nbuf, 2291 rx_desc->pool_id)) { 2292 DP_RX_LIST_APPEND(nbuf_head, nbuf_tail, nbuf); 2293 } 2294 2295 dp_rx_add_to_free_desc_list(&head[rx_desc->pool_id], 2296 &tail[rx_desc->pool_id], 2297 rx_desc); 2298 2299 /* 2300 * if continuation bit is set then we have MSDU spread 2301 * across multiple buffers, let us not decrement quota 2302 * till we reap all buffers of that MSDU. 2303 */ 2304 if (qdf_likely(!msdu_continuation)) 2305 quota -= 1; 2306 } 2307 done: 2308 dp_srng_access_end(int_ctx, soc, hal_ring_hdl); 2309 2310 for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) { 2311 if (rx_bufs_reaped[mac_id]) { 2312 dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_id]; 2313 rx_desc_pool = &soc->rx_desc_buf[mac_id]; 2314 2315 dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng, 2316 rx_desc_pool, rx_bufs_reaped[mac_id], 2317 &head[mac_id], &tail[mac_id]); 2318 rx_bufs_used += rx_bufs_reaped[mac_id]; 2319 } 2320 } 2321 2322 nbuf = nbuf_head; 2323 while (nbuf) { 2324 struct dp_peer *peer; 2325 uint16_t peer_id; 2326 uint8_t err_code; 2327 uint8_t *tlv_hdr; 2328 rx_tlv_hdr = qdf_nbuf_data(nbuf); 2329 2330 /* 2331 * retrieve the wbm desc info from nbuf TLV, so we can 2332 * handle error cases appropriately 2333 */ 2334 hal_rx_priv_info_get_from_tlv(soc->hal_soc, rx_tlv_hdr, 2335 (uint8_t *)&wbm_err_info, 2336 sizeof(wbm_err_info)); 2337 2338 peer_id = hal_rx_mpdu_start_sw_peer_id_get(soc->hal_soc, 2339 rx_tlv_hdr); 2340 peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_RX_ERR); 2341 2342 if (!peer) 2343 dp_info_rl("peer is null peer_id%u err_src%u err_rsn%u", 2344 peer_id, wbm_err_info.wbm_err_src, 2345 wbm_err_info.reo_psh_rsn); 2346 2347 /* Set queue_mapping in nbuf to 0 */ 2348 dp_set_rx_queue(nbuf, 0); 2349 2350 next = nbuf->next; 2351 2352 /* 2353 * Form the SG for msdu continued buffers 2354 * QCN9000 has this support 2355 */ 2356 if (qdf_nbuf_is_rx_chfrag_cont(nbuf)) { 2357 nbuf = dp_rx_sg_create(soc, nbuf); 2358 next = nbuf->next; 2359 /* 2360 * SG error handling is not done correctly, 2361 * drop SG frames for now. 2362 */ 2363 qdf_nbuf_free(nbuf); 2364 dp_info_rl("scattered msdu dropped"); 2365 nbuf = next; 2366 if (peer) 2367 dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR); 2368 continue; 2369 } 2370 2371 if (wbm_err_info.wbm_err_src == HAL_RX_WBM_ERR_SRC_REO) { 2372 if (wbm_err_info.reo_psh_rsn 2373 == HAL_RX_WBM_REO_PSH_RSN_ERROR) { 2374 2375 DP_STATS_INC(soc, 2376 rx.err.reo_error 2377 [wbm_err_info.reo_err_code], 1); 2378 /* increment @pdev level */ 2379 pool_id = wbm_err_info.pool_id; 2380 dp_pdev = dp_get_pdev_for_lmac_id(soc, pool_id); 2381 if (dp_pdev) 2382 DP_STATS_INC(dp_pdev, err.reo_error, 2383 1); 2384 2385 switch (wbm_err_info.reo_err_code) { 2386 /* 2387 * Handling for packets which have NULL REO 2388 * queue descriptor 2389 */ 2390 case HAL_REO_ERR_QUEUE_DESC_ADDR_0: 2391 pool_id = wbm_err_info.pool_id; 2392 dp_rx_null_q_desc_handle(soc, nbuf, 2393 rx_tlv_hdr, 2394 pool_id, peer); 2395 break; 2396 /* TODO */ 2397 /* Add per error code accounting */ 2398 case HAL_REO_ERR_REGULAR_FRAME_2K_JUMP: 2399 pool_id = wbm_err_info.pool_id; 2400 2401 if (hal_rx_msdu_end_first_msdu_get(soc->hal_soc, 2402 rx_tlv_hdr)) { 2403 peer_id = 2404 hal_rx_mpdu_start_sw_peer_id_get(soc->hal_soc, 2405 rx_tlv_hdr); 2406 tid = 2407 hal_rx_mpdu_start_tid_get(hal_soc, rx_tlv_hdr); 2408 } 2409 QDF_NBUF_CB_RX_PKT_LEN(nbuf) = 2410 hal_rx_msdu_start_msdu_len_get( 2411 soc->hal_soc, rx_tlv_hdr); 2412 nbuf->next = NULL; 2413 dp_2k_jump_handle(soc, nbuf, 2414 rx_tlv_hdr, 2415 peer_id, tid); 2416 break; 2417 case HAL_REO_ERR_REGULAR_FRAME_OOR: 2418 if (peer) 2419 DP_STATS_INC(peer, 2420 rx.err.oor_err, 1); 2421 if (hal_rx_msdu_end_first_msdu_get(soc->hal_soc, 2422 rx_tlv_hdr)) { 2423 peer_id = 2424 hal_rx_mpdu_start_sw_peer_id_get(soc->hal_soc, 2425 rx_tlv_hdr); 2426 tid = 2427 hal_rx_mpdu_start_tid_get(hal_soc, rx_tlv_hdr); 2428 } 2429 QDF_NBUF_CB_RX_PKT_LEN(nbuf) = 2430 hal_rx_msdu_start_msdu_len_get( 2431 soc->hal_soc, rx_tlv_hdr); 2432 nbuf->next = NULL; 2433 dp_rx_oor_handle(soc, nbuf, 2434 peer_id, 2435 rx_tlv_hdr); 2436 break; 2437 case HAL_REO_ERR_BAR_FRAME_2K_JUMP: 2438 case HAL_REO_ERR_BAR_FRAME_OOR: 2439 if (peer) 2440 dp_rx_err_handle_bar(soc, 2441 peer, 2442 nbuf); 2443 qdf_nbuf_free(nbuf); 2444 break; 2445 2446 case HAL_REO_ERR_PN_CHECK_FAILED: 2447 case HAL_REO_ERR_PN_ERROR_HANDLING_FLAG_SET: 2448 if (peer) 2449 DP_STATS_INC(peer, 2450 rx.err.pn_err, 1); 2451 qdf_nbuf_free(nbuf); 2452 break; 2453 2454 default: 2455 dp_info_rl("Got pkt with REO ERROR: %d", 2456 wbm_err_info.reo_err_code); 2457 qdf_nbuf_free(nbuf); 2458 } 2459 } else if (wbm_err_info.reo_psh_rsn 2460 == HAL_RX_WBM_REO_PSH_RSN_ROUTE) { 2461 DP_STATS_INC(soc, rx.reo2rel_route_drop, 1); 2462 qdf_nbuf_free(nbuf); 2463 } 2464 } else if (wbm_err_info.wbm_err_src == 2465 HAL_RX_WBM_ERR_SRC_RXDMA) { 2466 if (wbm_err_info.rxdma_psh_rsn 2467 == HAL_RX_WBM_RXDMA_PSH_RSN_ERROR) { 2468 DP_STATS_INC(soc, 2469 rx.err.rxdma_error 2470 [wbm_err_info.rxdma_err_code], 1); 2471 /* increment @pdev level */ 2472 pool_id = wbm_err_info.pool_id; 2473 dp_pdev = dp_get_pdev_for_lmac_id(soc, pool_id); 2474 if (dp_pdev) 2475 DP_STATS_INC(dp_pdev, 2476 err.rxdma_error, 1); 2477 2478 switch (wbm_err_info.rxdma_err_code) { 2479 case HAL_RXDMA_ERR_UNENCRYPTED: 2480 2481 case HAL_RXDMA_ERR_WIFI_PARSE: 2482 pool_id = wbm_err_info.pool_id; 2483 dp_rx_process_rxdma_err(soc, nbuf, 2484 rx_tlv_hdr, 2485 peer, 2486 wbm_err_info. 2487 rxdma_err_code, 2488 pool_id); 2489 break; 2490 2491 case HAL_RXDMA_ERR_TKIP_MIC: 2492 dp_rx_process_mic_error(soc, nbuf, 2493 rx_tlv_hdr, 2494 peer); 2495 if (peer) 2496 DP_STATS_INC(peer, rx.err.mic_err, 1); 2497 break; 2498 2499 case HAL_RXDMA_ERR_DECRYPT: 2500 2501 if (peer) { 2502 DP_STATS_INC(peer, rx.err. 2503 decrypt_err, 1); 2504 qdf_nbuf_free(nbuf); 2505 break; 2506 } 2507 2508 if (!dp_handle_rxdma_decrypt_err()) { 2509 qdf_nbuf_free(nbuf); 2510 break; 2511 } 2512 2513 pool_id = wbm_err_info.pool_id; 2514 err_code = wbm_err_info.rxdma_err_code; 2515 tlv_hdr = rx_tlv_hdr; 2516 dp_rx_process_rxdma_err(soc, nbuf, 2517 tlv_hdr, NULL, 2518 err_code, 2519 pool_id); 2520 break; 2521 2522 default: 2523 qdf_nbuf_free(nbuf); 2524 dp_err_rl("RXDMA error %d", 2525 wbm_err_info.rxdma_err_code); 2526 } 2527 } else if (wbm_err_info.rxdma_psh_rsn 2528 == HAL_RX_WBM_RXDMA_PSH_RSN_ROUTE) { 2529 DP_STATS_INC(soc, rx.rxdma2rel_route_drop, 1); 2530 qdf_nbuf_free(nbuf); 2531 } 2532 } else { 2533 /* Should not come here */ 2534 qdf_assert(0); 2535 } 2536 2537 if (peer) 2538 dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR); 2539 2540 nbuf = next; 2541 } 2542 return rx_bufs_used; /* Assume no scale factor for now */ 2543 } 2544 2545 #endif /* QCA_HOST_MODE_WIFI_DISABLED */ 2546 2547 /** 2548 * dup_desc_dbg() - dump and assert if duplicate rx desc found 2549 * 2550 * @soc: core DP main context 2551 * @rxdma_dst_ring_desc: void pointer to monitor link descriptor buf addr info 2552 * @rx_desc: void pointer to rx descriptor 2553 * 2554 * Return: void 2555 */ 2556 static void dup_desc_dbg(struct dp_soc *soc, 2557 hal_rxdma_desc_t rxdma_dst_ring_desc, 2558 void *rx_desc) 2559 { 2560 DP_STATS_INC(soc, rx.err.hal_rxdma_err_dup, 1); 2561 dp_rx_dump_info_and_assert( 2562 soc, 2563 soc->rx_rel_ring.hal_srng, 2564 hal_rxdma_desc_to_hal_ring_desc(rxdma_dst_ring_desc), 2565 rx_desc); 2566 } 2567 2568 /** 2569 * dp_rx_err_mpdu_pop() - extract the MSDU's from link descs 2570 * 2571 * @soc: core DP main context 2572 * @mac_id: mac id which is one of 3 mac_ids 2573 * @rxdma_dst_ring_desc: void pointer to monitor link descriptor buf addr info 2574 * @head: head of descs list to be freed 2575 * @tail: tail of decs list to be freed 2576 2577 * Return: number of msdu in MPDU to be popped 2578 */ 2579 static inline uint32_t 2580 dp_rx_err_mpdu_pop(struct dp_soc *soc, uint32_t mac_id, 2581 hal_rxdma_desc_t rxdma_dst_ring_desc, 2582 union dp_rx_desc_list_elem_t **head, 2583 union dp_rx_desc_list_elem_t **tail) 2584 { 2585 void *rx_msdu_link_desc; 2586 qdf_nbuf_t msdu; 2587 qdf_nbuf_t last; 2588 struct hal_rx_msdu_list msdu_list; 2589 uint16_t num_msdus; 2590 struct hal_buf_info buf_info; 2591 uint32_t rx_bufs_used = 0; 2592 uint32_t msdu_cnt; 2593 uint32_t i; 2594 uint8_t push_reason; 2595 uint8_t rxdma_error_code = 0; 2596 uint8_t bm_action = HAL_BM_ACTION_PUT_IN_IDLE_LIST; 2597 struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id); 2598 uint32_t rx_link_buf_info[HAL_RX_BUFFINFO_NUM_DWORDS]; 2599 hal_rxdma_desc_t ring_desc; 2600 struct rx_desc_pool *rx_desc_pool; 2601 2602 if (!pdev) { 2603 dp_rx_err_debug("%pK: pdev is null for mac_id = %d", 2604 soc, mac_id); 2605 return rx_bufs_used; 2606 } 2607 2608 msdu = 0; 2609 2610 last = NULL; 2611 2612 hal_rx_reo_ent_buf_paddr_get(soc->hal_soc, rxdma_dst_ring_desc, 2613 &buf_info, &msdu_cnt); 2614 2615 push_reason = 2616 hal_rx_reo_ent_rxdma_push_reason_get(rxdma_dst_ring_desc); 2617 if (push_reason == HAL_RX_WBM_RXDMA_PSH_RSN_ERROR) { 2618 rxdma_error_code = 2619 hal_rx_reo_ent_rxdma_error_code_get(rxdma_dst_ring_desc); 2620 } 2621 2622 do { 2623 rx_msdu_link_desc = 2624 dp_rx_cookie_2_link_desc_va(soc, &buf_info); 2625 2626 qdf_assert_always(rx_msdu_link_desc); 2627 2628 hal_rx_msdu_list_get(soc->hal_soc, rx_msdu_link_desc, 2629 &msdu_list, &num_msdus); 2630 2631 if (msdu_list.sw_cookie[0] != HAL_RX_COOKIE_SPECIAL) { 2632 /* if the msdus belongs to NSS offloaded radio && 2633 * the rbm is not SW1_BM then return the msdu_link 2634 * descriptor without freeing the msdus (nbufs). let 2635 * these buffers be given to NSS completion ring for 2636 * NSS to free them. 2637 * else iterate through the msdu link desc list and 2638 * free each msdu in the list. 2639 */ 2640 if (msdu_list.rbm[0] != 2641 HAL_RX_BUF_RBM_SW3_BM(soc->wbm_sw0_bm_id) && 2642 wlan_cfg_get_dp_pdev_nss_enabled( 2643 pdev->wlan_cfg_ctx)) 2644 bm_action = HAL_BM_ACTION_RELEASE_MSDU_LIST; 2645 else { 2646 for (i = 0; i < num_msdus; i++) { 2647 struct dp_rx_desc *rx_desc = 2648 dp_rx_cookie_2_va_rxdma_buf(soc, 2649 msdu_list.sw_cookie[i]); 2650 qdf_assert_always(rx_desc); 2651 msdu = rx_desc->nbuf; 2652 /* 2653 * this is a unlikely scenario 2654 * where the host is reaping 2655 * a descriptor which 2656 * it already reaped just a while ago 2657 * but is yet to replenish 2658 * it back to HW. 2659 * In this case host will dump 2660 * the last 128 descriptors 2661 * including the software descriptor 2662 * rx_desc and assert. 2663 */ 2664 ring_desc = rxdma_dst_ring_desc; 2665 if (qdf_unlikely(!rx_desc->in_use)) { 2666 dup_desc_dbg(soc, 2667 ring_desc, 2668 rx_desc); 2669 continue; 2670 } 2671 2672 rx_desc_pool = &soc-> 2673 rx_desc_buf[rx_desc->pool_id]; 2674 dp_ipa_rx_buf_smmu_mapping_lock(soc); 2675 dp_ipa_handle_rx_buf_smmu_mapping( 2676 soc, msdu, 2677 rx_desc_pool->buf_size, 2678 false); 2679 qdf_nbuf_unmap_nbytes_single( 2680 soc->osdev, msdu, 2681 QDF_DMA_FROM_DEVICE, 2682 rx_desc_pool->buf_size); 2683 rx_desc->unmapped = 1; 2684 dp_ipa_rx_buf_smmu_mapping_unlock(soc); 2685 2686 dp_rx_err_debug("%pK: msdu_nbuf=%pK ", 2687 soc, msdu); 2688 2689 dp_rx_buffer_pool_nbuf_free(soc, msdu, 2690 rx_desc->pool_id); 2691 rx_bufs_used++; 2692 dp_rx_add_to_free_desc_list(head, 2693 tail, rx_desc); 2694 } 2695 } 2696 } else { 2697 rxdma_error_code = HAL_RXDMA_ERR_WAR; 2698 } 2699 2700 /* 2701 * Store the current link buffer into to the local structure 2702 * to be used for release purpose. 2703 */ 2704 hal_rxdma_buff_addr_info_set(soc->hal_soc, rx_link_buf_info, 2705 buf_info.paddr, buf_info.sw_cookie, 2706 buf_info.rbm); 2707 2708 hal_rx_mon_next_link_desc_get(soc->hal_soc, rx_msdu_link_desc, 2709 &buf_info); 2710 dp_rx_link_desc_return_by_addr(soc, 2711 (hal_buff_addrinfo_t) 2712 rx_link_buf_info, 2713 bm_action); 2714 } while (buf_info.paddr); 2715 2716 DP_STATS_INC(soc, rx.err.rxdma_error[rxdma_error_code], 1); 2717 if (pdev) 2718 DP_STATS_INC(pdev, err.rxdma_error, 1); 2719 2720 if (rxdma_error_code == HAL_RXDMA_ERR_DECRYPT) { 2721 dp_rx_err_err("%pK: Packet received with Decrypt error", soc); 2722 } 2723 2724 return rx_bufs_used; 2725 } 2726 2727 uint32_t 2728 dp_rxdma_err_process(struct dp_intr *int_ctx, struct dp_soc *soc, 2729 uint32_t mac_id, uint32_t quota) 2730 { 2731 struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id); 2732 hal_rxdma_desc_t rxdma_dst_ring_desc; 2733 hal_soc_handle_t hal_soc; 2734 void *err_dst_srng; 2735 union dp_rx_desc_list_elem_t *head = NULL; 2736 union dp_rx_desc_list_elem_t *tail = NULL; 2737 struct dp_srng *dp_rxdma_srng; 2738 struct rx_desc_pool *rx_desc_pool; 2739 uint32_t work_done = 0; 2740 uint32_t rx_bufs_used = 0; 2741 2742 if (!pdev) 2743 return 0; 2744 2745 err_dst_srng = soc->rxdma_err_dst_ring[mac_id].hal_srng; 2746 2747 if (!err_dst_srng) { 2748 dp_rx_err_err("%pK: HAL Monitor Destination Ring Init Failed -- %pK", 2749 soc, err_dst_srng); 2750 return 0; 2751 } 2752 2753 hal_soc = soc->hal_soc; 2754 2755 qdf_assert(hal_soc); 2756 2757 if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, err_dst_srng))) { 2758 dp_rx_err_err("%pK: HAL Monitor Destination Ring Init Failed -- %pK", 2759 soc, err_dst_srng); 2760 return 0; 2761 } 2762 2763 while (qdf_likely(quota-- && (rxdma_dst_ring_desc = 2764 hal_srng_dst_get_next(hal_soc, err_dst_srng)))) { 2765 2766 rx_bufs_used += dp_rx_err_mpdu_pop(soc, mac_id, 2767 rxdma_dst_ring_desc, 2768 &head, &tail); 2769 } 2770 2771 dp_srng_access_end(int_ctx, soc, err_dst_srng); 2772 2773 if (rx_bufs_used) { 2774 if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx)) { 2775 dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_id]; 2776 rx_desc_pool = &soc->rx_desc_buf[mac_id]; 2777 } else { 2778 dp_rxdma_srng = &soc->rx_refill_buf_ring[pdev->lmac_id]; 2779 rx_desc_pool = &soc->rx_desc_buf[pdev->lmac_id]; 2780 } 2781 2782 dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng, 2783 rx_desc_pool, rx_bufs_used, &head, &tail); 2784 2785 work_done += rx_bufs_used; 2786 } 2787 2788 return work_done; 2789 } 2790 2791 #ifndef QCA_HOST_MODE_WIFI_DISABLED 2792 2793 static inline uint32_t 2794 dp_wbm_int_err_mpdu_pop(struct dp_soc *soc, uint32_t mac_id, 2795 hal_rxdma_desc_t rxdma_dst_ring_desc, 2796 union dp_rx_desc_list_elem_t **head, 2797 union dp_rx_desc_list_elem_t **tail) 2798 { 2799 void *rx_msdu_link_desc; 2800 qdf_nbuf_t msdu; 2801 qdf_nbuf_t last; 2802 struct hal_rx_msdu_list msdu_list; 2803 uint16_t num_msdus; 2804 struct hal_buf_info buf_info; 2805 uint32_t rx_bufs_used = 0, msdu_cnt, i; 2806 uint32_t rx_link_buf_info[HAL_RX_BUFFINFO_NUM_DWORDS]; 2807 struct rx_desc_pool *rx_desc_pool; 2808 2809 msdu = 0; 2810 2811 last = NULL; 2812 2813 hal_rx_reo_ent_buf_paddr_get(soc->hal_soc, rxdma_dst_ring_desc, 2814 &buf_info, &msdu_cnt); 2815 2816 do { 2817 rx_msdu_link_desc = 2818 dp_rx_cookie_2_link_desc_va(soc, &buf_info); 2819 2820 if (!rx_msdu_link_desc) { 2821 DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_REO_NULL_LINK_DESC], 1); 2822 break; 2823 } 2824 2825 hal_rx_msdu_list_get(soc->hal_soc, rx_msdu_link_desc, 2826 &msdu_list, &num_msdus); 2827 2828 if (msdu_list.sw_cookie[0] != HAL_RX_COOKIE_SPECIAL) { 2829 for (i = 0; i < num_msdus; i++) { 2830 struct dp_rx_desc *rx_desc = 2831 dp_rx_cookie_2_va_rxdma_buf( 2832 soc, 2833 msdu_list.sw_cookie[i]); 2834 qdf_assert_always(rx_desc); 2835 rx_desc_pool = 2836 &soc->rx_desc_buf[rx_desc->pool_id]; 2837 msdu = rx_desc->nbuf; 2838 2839 dp_ipa_rx_buf_smmu_mapping_lock(soc); 2840 dp_ipa_handle_rx_buf_smmu_mapping( 2841 soc, msdu, 2842 rx_desc_pool->buf_size, 2843 false); 2844 2845 qdf_nbuf_unmap_nbytes_single( 2846 soc->osdev, 2847 msdu, 2848 QDF_DMA_FROM_DEVICE, 2849 rx_desc_pool->buf_size); 2850 rx_desc->unmapped = 1; 2851 dp_ipa_rx_buf_smmu_mapping_unlock(soc); 2852 2853 dp_rx_buffer_pool_nbuf_free(soc, msdu, 2854 rx_desc->pool_id); 2855 rx_bufs_used++; 2856 dp_rx_add_to_free_desc_list(head, 2857 tail, rx_desc); 2858 } 2859 } 2860 2861 /* 2862 * Store the current link buffer into to the local structure 2863 * to be used for release purpose. 2864 */ 2865 hal_rxdma_buff_addr_info_set(soc->hal_soc, rx_link_buf_info, 2866 buf_info.paddr, buf_info.sw_cookie, 2867 buf_info.rbm); 2868 2869 hal_rx_mon_next_link_desc_get(soc->hal_soc, rx_msdu_link_desc, 2870 &buf_info); 2871 dp_rx_link_desc_return_by_addr(soc, (hal_buff_addrinfo_t) 2872 rx_link_buf_info, 2873 HAL_BM_ACTION_PUT_IN_IDLE_LIST); 2874 } while (buf_info.paddr); 2875 2876 return rx_bufs_used; 2877 } 2878 2879 /* 2880 * 2881 * dp_handle_wbm_internal_error() - handles wbm_internal_error case 2882 * 2883 * @soc: core DP main context 2884 * @hal_desc: hal descriptor 2885 * @buf_type: indicates if the buffer is of type link disc or msdu 2886 * Return: None 2887 * 2888 * wbm_internal_error is seen in following scenarios : 2889 * 2890 * 1. Null pointers detected in WBM_RELEASE_RING descriptors 2891 * 2. Null pointers detected during delinking process 2892 * 2893 * Some null pointer cases: 2894 * 2895 * a. MSDU buffer pointer is NULL 2896 * b. Next_MSDU_Link_Desc pointer is NULL, with no last msdu flag 2897 * c. MSDU buffer pointer is NULL or Next_Link_Desc pointer is NULL 2898 */ 2899 void 2900 dp_handle_wbm_internal_error(struct dp_soc *soc, void *hal_desc, 2901 uint32_t buf_type) 2902 { 2903 struct hal_buf_info buf_info = {0}; 2904 struct dp_rx_desc *rx_desc = NULL; 2905 struct rx_desc_pool *rx_desc_pool; 2906 uint32_t rx_bufs_reaped = 0; 2907 union dp_rx_desc_list_elem_t *head = NULL; 2908 union dp_rx_desc_list_elem_t *tail = NULL; 2909 uint8_t pool_id; 2910 2911 hal_rx_reo_buf_paddr_get(soc->hal_soc, hal_desc, &buf_info); 2912 2913 if (!buf_info.paddr) { 2914 DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_REO_NULL_BUFFER], 1); 2915 return; 2916 } 2917 2918 /* buffer_addr_info is the first element of ring_desc */ 2919 hal_rx_buf_cookie_rbm_get(soc->hal_soc, (uint32_t *)hal_desc, 2920 &buf_info); 2921 pool_id = DP_RX_DESC_COOKIE_POOL_ID_GET(buf_info.sw_cookie); 2922 2923 if (buf_type == HAL_WBM_RELEASE_RING_2_BUFFER_TYPE) { 2924 DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_REO_NULL_MSDU_BUFF], 1); 2925 rx_desc = dp_rx_cookie_2_va_rxdma_buf(soc, buf_info.sw_cookie); 2926 2927 if (rx_desc && rx_desc->nbuf) { 2928 rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id]; 2929 dp_ipa_rx_buf_smmu_mapping_lock(soc); 2930 dp_ipa_handle_rx_buf_smmu_mapping( 2931 soc, rx_desc->nbuf, 2932 rx_desc_pool->buf_size, 2933 false); 2934 qdf_nbuf_unmap_nbytes_single(soc->osdev, rx_desc->nbuf, 2935 QDF_DMA_FROM_DEVICE, 2936 rx_desc_pool->buf_size); 2937 rx_desc->unmapped = 1; 2938 dp_ipa_rx_buf_smmu_mapping_unlock(soc); 2939 2940 dp_rx_buffer_pool_nbuf_free(soc, rx_desc->nbuf, 2941 rx_desc->pool_id); 2942 dp_rx_add_to_free_desc_list(&head, 2943 &tail, 2944 rx_desc); 2945 2946 rx_bufs_reaped++; 2947 } 2948 } else if (buf_type == HAL_WBM_RELEASE_RING_2_DESC_TYPE) { 2949 rx_bufs_reaped = dp_wbm_int_err_mpdu_pop(soc, pool_id, 2950 hal_desc, 2951 &head, &tail); 2952 } 2953 2954 if (rx_bufs_reaped) { 2955 struct rx_desc_pool *rx_desc_pool; 2956 struct dp_srng *dp_rxdma_srng; 2957 2958 DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_REO_BUFF_REAPED], 1); 2959 dp_rxdma_srng = &soc->rx_refill_buf_ring[pool_id]; 2960 rx_desc_pool = &soc->rx_desc_buf[pool_id]; 2961 2962 dp_rx_buffers_replenish(soc, pool_id, dp_rxdma_srng, 2963 rx_desc_pool, 2964 rx_bufs_reaped, 2965 &head, &tail); 2966 } 2967 } 2968 2969 #endif /* QCA_HOST_MODE_WIFI_DISABLED */ 2970