1 /* 2 * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for 5 * any purpose with or without fee is hereby granted, provided that the 6 * above copyright notice and this permission notice appear in all 7 * copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 16 * PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 #include "hal_hw_headers.h" 20 #include "dp_types.h" 21 #include "dp_rx.h" 22 #include "dp_tx.h" 23 #include "dp_peer.h" 24 #include "dp_internal.h" 25 #include "hal_api.h" 26 #include "qdf_trace.h" 27 #include "qdf_nbuf.h" 28 #include "dp_rx_defrag.h" 29 #include "dp_ipa.h" 30 #ifdef WIFI_MONITOR_SUPPORT 31 #include "dp_htt.h" 32 #include <dp_mon.h> 33 #endif 34 #ifdef FEATURE_WDS 35 #include "dp_txrx_wds.h" 36 #endif 37 #include <enet.h> /* LLC_SNAP_HDR_LEN */ 38 #include "qdf_net_types.h" 39 #include "dp_rx_buffer_pool.h" 40 41 #define dp_rx_err_alert(params...) QDF_TRACE_FATAL(QDF_MODULE_ID_DP_RX_ERROR, params) 42 #define dp_rx_err_err(params...) QDF_TRACE_ERROR(QDF_MODULE_ID_DP_RX_ERROR, params) 43 #define dp_rx_err_warn(params...) QDF_TRACE_WARN(QDF_MODULE_ID_DP_RX_ERROR, params) 44 #define dp_rx_err_info(params...) \ 45 __QDF_TRACE_FL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_RX_ERROR, ## params) 46 #define dp_rx_err_info_rl(params...) \ 47 __QDF_TRACE_RL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_RX_ERROR, ## params) 48 #define dp_rx_err_debug(params...) QDF_TRACE_DEBUG(QDF_MODULE_ID_DP_RX_ERROR, params) 49 50 #ifndef QCA_HOST_MODE_WIFI_DISABLED 51 52 /* Max buffer in invalid peer SG list*/ 53 #define DP_MAX_INVALID_BUFFERS 10 54 55 /* Max regular Rx packet routing error */ 56 #define DP_MAX_REG_RX_ROUTING_ERRS_THRESHOLD 20 57 #define DP_MAX_REG_RX_ROUTING_ERRS_IN_TIMEOUT 10 58 #define DP_RX_ERR_ROUTE_TIMEOUT_US (5 * 1000 * 1000) /* micro seconds */ 59 60 #ifdef FEATURE_MEC 61 bool dp_rx_mcast_echo_check(struct dp_soc *soc, 62 struct dp_peer *peer, 63 uint8_t *rx_tlv_hdr, 64 qdf_nbuf_t nbuf) 65 { 66 struct dp_vdev *vdev = peer->vdev; 67 struct dp_pdev *pdev = vdev->pdev; 68 struct dp_mec_entry *mecentry = NULL; 69 struct dp_ast_entry *ase = NULL; 70 uint16_t sa_idx = 0; 71 uint8_t *data; 72 /* 73 * Multicast Echo Check is required only if vdev is STA and 74 * received pkt is a multicast/broadcast pkt. otherwise 75 * skip the MEC check. 76 */ 77 if (vdev->opmode != wlan_op_mode_sta) 78 return false; 79 if (!hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc, rx_tlv_hdr)) 80 return false; 81 82 data = qdf_nbuf_data(nbuf); 83 84 /* 85 * if the received pkts src mac addr matches with vdev 86 * mac address then drop the pkt as it is looped back 87 */ 88 if (!(qdf_mem_cmp(&data[QDF_MAC_ADDR_SIZE], 89 vdev->mac_addr.raw, 90 QDF_MAC_ADDR_SIZE))) 91 return true; 92 93 /* 94 * In case of qwrap isolation mode, donot drop loopback packets. 95 * In isolation mode, all packets from the wired stations need to go 96 * to rootap and loop back to reach the wireless stations and 97 * vice-versa. 98 */ 99 if (qdf_unlikely(vdev->isolation_vdev)) 100 return false; 101 102 /* 103 * if the received pkts src mac addr matches with the 104 * wired PCs MAC addr which is behind the STA or with 105 * wireless STAs MAC addr which are behind the Repeater, 106 * then drop the pkt as it is looped back 107 */ 108 if (hal_rx_msdu_end_sa_is_valid_get(soc->hal_soc, rx_tlv_hdr)) { 109 sa_idx = hal_rx_msdu_end_sa_idx_get(soc->hal_soc, rx_tlv_hdr); 110 111 if ((sa_idx < 0) || 112 (sa_idx >= wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx))) { 113 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 114 "invalid sa_idx: %d", sa_idx); 115 qdf_assert_always(0); 116 } 117 118 qdf_spin_lock_bh(&soc->ast_lock); 119 ase = soc->ast_table[sa_idx]; 120 121 /* 122 * this check was not needed since MEC is not dependent on AST, 123 * but if we dont have this check SON has some issues in 124 * dual backhaul scenario. in APS SON mode, client connected 125 * to RE 2G and sends multicast packets. the RE sends it to CAP 126 * over 5G backhaul. the CAP loopback it on 2G to RE. 127 * On receiving in 2G STA vap, we assume that client has roamed 128 * and kickout the client. 129 */ 130 if (ase && (ase->peer_id != peer->peer_id)) { 131 qdf_spin_unlock_bh(&soc->ast_lock); 132 goto drop; 133 } 134 135 qdf_spin_unlock_bh(&soc->ast_lock); 136 } 137 138 qdf_spin_lock_bh(&soc->mec_lock); 139 140 mecentry = dp_peer_mec_hash_find_by_pdevid(soc, pdev->pdev_id, 141 &data[QDF_MAC_ADDR_SIZE]); 142 if (!mecentry) { 143 qdf_spin_unlock_bh(&soc->mec_lock); 144 return false; 145 } 146 147 qdf_spin_unlock_bh(&soc->mec_lock); 148 149 drop: 150 dp_rx_err_info("%pK: received pkt with same src mac " QDF_MAC_ADDR_FMT, 151 soc, QDF_MAC_ADDR_REF(&data[QDF_MAC_ADDR_SIZE])); 152 153 return true; 154 } 155 #endif 156 #endif /* QCA_HOST_MODE_WIFI_DISABLED */ 157 158 void dp_rx_link_desc_refill_duplicate_check( 159 struct dp_soc *soc, 160 struct hal_buf_info *buf_info, 161 hal_buff_addrinfo_t ring_buf_info) 162 { 163 struct hal_buf_info current_link_desc_buf_info = { 0 }; 164 165 /* do duplicate link desc address check */ 166 hal_rx_buffer_addr_info_get_paddr(ring_buf_info, 167 ¤t_link_desc_buf_info); 168 169 /* 170 * TODO - Check if the hal soc api call can be removed 171 * since the cookie is just used for print. 172 * buffer_addr_info is the first element of ring_desc 173 */ 174 hal_rx_buf_cookie_rbm_get(soc->hal_soc, 175 (uint32_t *)ring_buf_info, 176 ¤t_link_desc_buf_info); 177 178 if (qdf_unlikely(current_link_desc_buf_info.paddr == 179 buf_info->paddr)) { 180 dp_info_rl("duplicate link desc addr: %llu, cookie: 0x%x", 181 current_link_desc_buf_info.paddr, 182 current_link_desc_buf_info.sw_cookie); 183 DP_STATS_INC(soc, rx.err.dup_refill_link_desc, 1); 184 } 185 *buf_info = current_link_desc_buf_info; 186 } 187 188 /** 189 * dp_rx_link_desc_return_by_addr - Return a MPDU link descriptor to 190 * (WBM) by address 191 * 192 * @soc: core DP main context 193 * @link_desc_addr: link descriptor addr 194 * 195 * Return: QDF_STATUS 196 */ 197 QDF_STATUS 198 dp_rx_link_desc_return_by_addr(struct dp_soc *soc, 199 hal_buff_addrinfo_t link_desc_addr, 200 uint8_t bm_action) 201 { 202 struct dp_srng *wbm_desc_rel_ring = &soc->wbm_desc_rel_ring; 203 hal_ring_handle_t wbm_rel_srng = wbm_desc_rel_ring->hal_srng; 204 hal_soc_handle_t hal_soc = soc->hal_soc; 205 QDF_STATUS status = QDF_STATUS_E_FAILURE; 206 void *src_srng_desc; 207 208 if (!wbm_rel_srng) { 209 dp_rx_err_err("%pK: WBM RELEASE RING not initialized", soc); 210 return status; 211 } 212 213 /* do duplicate link desc address check */ 214 dp_rx_link_desc_refill_duplicate_check( 215 soc, 216 &soc->last_op_info.wbm_rel_link_desc, 217 link_desc_addr); 218 219 if (qdf_unlikely(hal_srng_access_start(hal_soc, wbm_rel_srng))) { 220 221 /* TODO */ 222 /* 223 * Need API to convert from hal_ring pointer to 224 * Ring Type / Ring Id combo 225 */ 226 dp_rx_err_err("%pK: HAL RING Access For WBM Release SRNG Failed - %pK", 227 soc, wbm_rel_srng); 228 DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1); 229 goto done; 230 } 231 src_srng_desc = hal_srng_src_get_next(hal_soc, wbm_rel_srng); 232 if (qdf_likely(src_srng_desc)) { 233 /* Return link descriptor through WBM ring (SW2WBM)*/ 234 hal_rx_msdu_link_desc_set(hal_soc, 235 src_srng_desc, link_desc_addr, bm_action); 236 status = QDF_STATUS_SUCCESS; 237 } else { 238 struct hal_srng *srng = (struct hal_srng *)wbm_rel_srng; 239 240 DP_STATS_INC(soc, rx.err.hal_ring_access_full_fail, 1); 241 242 dp_info_rl("WBM Release Ring (Id %d) Full(Fail CNT %u)", 243 srng->ring_id, 244 soc->stats.rx.err.hal_ring_access_full_fail); 245 dp_info_rl("HP 0x%x Reap HP 0x%x TP 0x%x Cached TP 0x%x", 246 *srng->u.src_ring.hp_addr, 247 srng->u.src_ring.reap_hp, 248 *srng->u.src_ring.tp_addr, 249 srng->u.src_ring.cached_tp); 250 QDF_BUG(0); 251 } 252 done: 253 hal_srng_access_end(hal_soc, wbm_rel_srng); 254 return status; 255 256 } 257 258 qdf_export_symbol(dp_rx_link_desc_return_by_addr); 259 260 /** 261 * dp_rx_link_desc_return() - Return a MPDU link descriptor to HW 262 * (WBM), following error handling 263 * 264 * @soc: core DP main context 265 * @ring_desc: opaque pointer to the REO error ring descriptor 266 * 267 * Return: QDF_STATUS 268 */ 269 QDF_STATUS 270 dp_rx_link_desc_return(struct dp_soc *soc, hal_ring_desc_t ring_desc, 271 uint8_t bm_action) 272 { 273 void *buf_addr_info = HAL_RX_REO_BUF_ADDR_INFO_GET(ring_desc); 274 275 return dp_rx_link_desc_return_by_addr(soc, buf_addr_info, bm_action); 276 } 277 278 #ifndef QCA_HOST_MODE_WIFI_DISABLED 279 280 /** 281 * dp_rx_msdus_drop() - Drops all MSDU's per MPDU 282 * 283 * @soc: core txrx main context 284 * @ring_desc: opaque pointer to the REO error ring descriptor 285 * @mpdu_desc_info: MPDU descriptor information from ring descriptor 286 * @head: head of the local descriptor free-list 287 * @tail: tail of the local descriptor free-list 288 * @quota: No. of units (packets) that can be serviced in one shot. 289 * 290 * This function is used to drop all MSDU in an MPDU 291 * 292 * Return: uint32_t: No. of elements processed 293 */ 294 static uint32_t 295 dp_rx_msdus_drop(struct dp_soc *soc, hal_ring_desc_t ring_desc, 296 struct hal_rx_mpdu_desc_info *mpdu_desc_info, 297 uint8_t *mac_id, 298 uint32_t quota) 299 { 300 uint32_t rx_bufs_used = 0; 301 void *link_desc_va; 302 struct hal_buf_info buf_info; 303 struct dp_pdev *pdev; 304 struct hal_rx_msdu_list msdu_list; /* MSDU's per MPDU */ 305 int i; 306 uint8_t *rx_tlv_hdr; 307 uint32_t tid; 308 struct rx_desc_pool *rx_desc_pool; 309 struct dp_rx_desc *rx_desc; 310 /* First field in REO Dst ring Desc is buffer_addr_info */ 311 void *buf_addr_info = ring_desc; 312 struct buffer_addr_info cur_link_desc_addr_info = { 0 }; 313 struct buffer_addr_info next_link_desc_addr_info = { 0 }; 314 315 hal_rx_reo_buf_paddr_get(soc->hal_soc, ring_desc, &buf_info); 316 317 /* buffer_addr_info is the first element of ring_desc */ 318 hal_rx_buf_cookie_rbm_get(soc->hal_soc, 319 (uint32_t *)ring_desc, 320 &buf_info); 321 322 link_desc_va = dp_rx_cookie_2_link_desc_va(soc, &buf_info); 323 if (!link_desc_va) { 324 dp_rx_err_debug("link desc va is null, soc %pk", soc); 325 return rx_bufs_used; 326 } 327 328 more_msdu_link_desc: 329 /* No UNMAP required -- this is "malloc_consistent" memory */ 330 hal_rx_msdu_list_get(soc->hal_soc, link_desc_va, &msdu_list, 331 &mpdu_desc_info->msdu_count); 332 333 for (i = 0; (i < mpdu_desc_info->msdu_count); i++) { 334 rx_desc = soc->arch_ops.dp_rx_desc_cookie_2_va( 335 soc, msdu_list.sw_cookie[i]); 336 337 qdf_assert_always(rx_desc); 338 339 /* all buffers from a MSDU link link belong to same pdev */ 340 *mac_id = rx_desc->pool_id; 341 pdev = dp_get_pdev_for_lmac_id(soc, rx_desc->pool_id); 342 if (!pdev) { 343 dp_rx_err_debug("%pK: pdev is null for pool_id = %d", 344 soc, rx_desc->pool_id); 345 return rx_bufs_used; 346 } 347 348 if (!dp_rx_desc_check_magic(rx_desc)) { 349 dp_rx_err_err("%pK: Invalid rx_desc cookie=%d", 350 soc, msdu_list.sw_cookie[i]); 351 return rx_bufs_used; 352 } 353 354 rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id]; 355 dp_ipa_rx_buf_smmu_mapping_lock(soc); 356 dp_ipa_handle_rx_buf_smmu_mapping(soc, rx_desc->nbuf, 357 rx_desc_pool->buf_size, 358 false); 359 qdf_nbuf_unmap_nbytes_single(soc->osdev, rx_desc->nbuf, 360 QDF_DMA_FROM_DEVICE, 361 rx_desc_pool->buf_size); 362 rx_desc->unmapped = 1; 363 dp_ipa_rx_buf_smmu_mapping_unlock(soc); 364 365 rx_desc->rx_buf_start = qdf_nbuf_data(rx_desc->nbuf); 366 367 rx_bufs_used++; 368 tid = hal_rx_mpdu_start_tid_get(soc->hal_soc, 369 rx_desc->rx_buf_start); 370 dp_rx_err_err("%pK: Packet received with PN error for tid :%d", 371 soc, tid); 372 373 rx_tlv_hdr = qdf_nbuf_data(rx_desc->nbuf); 374 if (hal_rx_encryption_info_valid(soc->hal_soc, rx_tlv_hdr)) 375 hal_rx_print_pn(soc->hal_soc, rx_tlv_hdr); 376 377 /* Just free the buffers */ 378 dp_rx_buffer_pool_nbuf_free(soc, rx_desc->nbuf, *mac_id); 379 380 dp_rx_add_to_free_desc_list(&pdev->free_list_head, 381 &pdev->free_list_tail, rx_desc); 382 } 383 384 /* 385 * If the msdu's are spread across multiple link-descriptors, 386 * we cannot depend solely on the msdu_count(e.g., if msdu is 387 * spread across multiple buffers).Hence, it is 388 * necessary to check the next link_descriptor and release 389 * all the msdu's that are part of it. 390 */ 391 hal_rx_get_next_msdu_link_desc_buf_addr_info( 392 link_desc_va, 393 &next_link_desc_addr_info); 394 395 if (hal_rx_is_buf_addr_info_valid( 396 &next_link_desc_addr_info)) { 397 /* Clear the next link desc info for the current link_desc */ 398 hal_rx_clear_next_msdu_link_desc_buf_addr_info(link_desc_va); 399 400 dp_rx_link_desc_return_by_addr(soc, buf_addr_info, 401 HAL_BM_ACTION_PUT_IN_IDLE_LIST); 402 hal_rx_buffer_addr_info_get_paddr( 403 &next_link_desc_addr_info, 404 &buf_info); 405 /* buffer_addr_info is the first element of ring_desc */ 406 hal_rx_buf_cookie_rbm_get(soc->hal_soc, 407 (uint32_t *)&next_link_desc_addr_info, 408 &buf_info); 409 cur_link_desc_addr_info = next_link_desc_addr_info; 410 buf_addr_info = &cur_link_desc_addr_info; 411 412 link_desc_va = 413 dp_rx_cookie_2_link_desc_va(soc, &buf_info); 414 415 goto more_msdu_link_desc; 416 } 417 quota--; 418 dp_rx_link_desc_return_by_addr(soc, buf_addr_info, 419 HAL_BM_ACTION_PUT_IN_IDLE_LIST); 420 return rx_bufs_used; 421 } 422 423 /** 424 * dp_rx_pn_error_handle() - Handles PN check errors 425 * 426 * @soc: core txrx main context 427 * @ring_desc: opaque pointer to the REO error ring descriptor 428 * @mpdu_desc_info: MPDU descriptor information from ring descriptor 429 * @head: head of the local descriptor free-list 430 * @tail: tail of the local descriptor free-list 431 * @quota: No. of units (packets) that can be serviced in one shot. 432 * 433 * This function implements PN error handling 434 * If the peer is configured to ignore the PN check errors 435 * or if DP feels, that this frame is still OK, the frame can be 436 * re-injected back to REO to use some of the other features 437 * of REO e.g. duplicate detection/routing to other cores 438 * 439 * Return: uint32_t: No. of elements processed 440 */ 441 static uint32_t 442 dp_rx_pn_error_handle(struct dp_soc *soc, hal_ring_desc_t ring_desc, 443 struct hal_rx_mpdu_desc_info *mpdu_desc_info, 444 uint8_t *mac_id, 445 uint32_t quota) 446 { 447 uint16_t peer_id; 448 uint32_t rx_bufs_used = 0; 449 struct dp_peer *peer; 450 bool peer_pn_policy = false; 451 452 peer_id = DP_PEER_METADATA_PEER_ID_GET( 453 mpdu_desc_info->peer_meta_data); 454 455 456 peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_RX_ERR); 457 458 if (qdf_likely(peer)) { 459 /* 460 * TODO: Check for peer specific policies & set peer_pn_policy 461 */ 462 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 463 "discard rx due to PN error for peer %pK "QDF_MAC_ADDR_FMT, 464 peer, QDF_MAC_ADDR_REF(peer->mac_addr.raw)); 465 466 dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR); 467 } 468 dp_rx_err_err("%pK: Packet received with PN error", soc); 469 470 /* No peer PN policy -- definitely drop */ 471 if (!peer_pn_policy) 472 rx_bufs_used = dp_rx_msdus_drop(soc, ring_desc, 473 mpdu_desc_info, 474 mac_id, quota); 475 476 return rx_bufs_used; 477 } 478 479 /** 480 * dp_rx_oor_handle() - Handles the msdu which is OOR error 481 * 482 * @soc: core txrx main context 483 * @nbuf: pointer to msdu skb 484 * @peer_id: dp peer ID 485 * @rx_tlv_hdr: start of rx tlv header 486 * 487 * This function process the msdu delivered from REO2TCL 488 * ring with error type OOR 489 * 490 * Return: None 491 */ 492 static void 493 dp_rx_oor_handle(struct dp_soc *soc, 494 qdf_nbuf_t nbuf, 495 uint16_t peer_id, 496 uint8_t *rx_tlv_hdr) 497 { 498 uint32_t frame_mask = FRAME_MASK_IPV4_ARP | FRAME_MASK_IPV4_DHCP | 499 FRAME_MASK_IPV4_EAPOL | FRAME_MASK_IPV6_DHCP; 500 struct dp_peer *peer = NULL; 501 502 peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_RX_ERR); 503 if (!peer) { 504 dp_info_rl("peer not found"); 505 goto free_nbuf; 506 } 507 508 if (dp_rx_deliver_special_frame(soc, peer, nbuf, frame_mask, 509 rx_tlv_hdr)) { 510 DP_STATS_INC(soc, rx.err.reo_err_oor_to_stack, 1); 511 dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR); 512 return; 513 } 514 515 free_nbuf: 516 if (peer) 517 dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR); 518 519 DP_STATS_INC(soc, rx.err.reo_err_oor_drop, 1); 520 qdf_nbuf_free(nbuf); 521 } 522 523 /** 524 * dp_rx_err_nbuf_pn_check() - Check if the PN number of this current packet 525 * is a monotonous increment of packet number 526 * from the previous successfully re-ordered 527 * frame. 528 * @soc: Datapath SOC handle 529 * @ring_desc: REO ring descriptor 530 * @nbuf: Current packet 531 * 532 * Return: QDF_STATUS_SUCCESS, if the pn check passes, else QDF_STATUS_E_FAILURE 533 */ 534 static inline QDF_STATUS 535 dp_rx_err_nbuf_pn_check(struct dp_soc *soc, hal_ring_desc_t ring_desc, 536 qdf_nbuf_t nbuf) 537 { 538 uint64_t prev_pn, curr_pn; 539 540 hal_rx_reo_prev_pn_get(soc->hal_soc, ring_desc, &prev_pn); 541 hal_rx_tlv_get_pn_num(soc->hal_soc, qdf_nbuf_data(nbuf), &curr_pn); 542 543 if (curr_pn > prev_pn) 544 return QDF_STATUS_SUCCESS; 545 546 return QDF_STATUS_E_FAILURE; 547 } 548 549 #ifdef WLAN_SKIP_BAR_UPDATE 550 static 551 void dp_rx_err_handle_bar(struct dp_soc *soc, 552 struct dp_peer *peer, 553 qdf_nbuf_t nbuf) 554 { 555 dp_info_rl("BAR update to H.W is skipped"); 556 DP_STATS_INC(soc, rx.err.bar_handle_fail_count, 1); 557 } 558 #else 559 static 560 void dp_rx_err_handle_bar(struct dp_soc *soc, 561 struct dp_peer *peer, 562 qdf_nbuf_t nbuf) 563 { 564 uint8_t *rx_tlv_hdr; 565 unsigned char type, subtype; 566 uint16_t start_seq_num; 567 uint32_t tid; 568 QDF_STATUS status; 569 struct ieee80211_frame_bar *bar; 570 571 /* 572 * 1. Is this a BAR frame. If not Discard it. 573 * 2. If it is, get the peer id, tid, ssn 574 * 2a Do a tid update 575 */ 576 577 rx_tlv_hdr = qdf_nbuf_data(nbuf); 578 bar = (struct ieee80211_frame_bar *)(rx_tlv_hdr + soc->rx_pkt_tlv_size); 579 580 type = bar->i_fc[0] & IEEE80211_FC0_TYPE_MASK; 581 subtype = bar->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 582 583 if (!(type == IEEE80211_FC0_TYPE_CTL && 584 subtype == QDF_IEEE80211_FC0_SUBTYPE_BAR)) { 585 dp_err_rl("Not a BAR frame!"); 586 return; 587 } 588 589 tid = hal_rx_mpdu_start_tid_get(soc->hal_soc, rx_tlv_hdr); 590 qdf_assert_always(tid < DP_MAX_TIDS); 591 592 start_seq_num = le16toh(bar->i_seq) >> IEEE80211_SEQ_SEQ_SHIFT; 593 594 dp_info_rl("tid %u window_size %u start_seq_num %u", 595 tid, peer->rx_tid[tid].ba_win_size, start_seq_num); 596 597 status = dp_rx_tid_update_wifi3(peer, tid, 598 peer->rx_tid[tid].ba_win_size, 599 start_seq_num, 600 true); 601 if (status != QDF_STATUS_SUCCESS) { 602 dp_err_rl("failed to handle bar frame update rx tid"); 603 DP_STATS_INC(soc, rx.err.bar_handle_fail_count, 1); 604 } else { 605 DP_STATS_INC(soc, rx.err.ssn_update_count, 1); 606 } 607 } 608 #endif 609 610 /** 611 * _dp_rx_bar_frame_handle(): Core of the BAR frame handling 612 * @soc: Datapath SoC handle 613 * @nbuf: packet being processed 614 * @mpdu_desc_info: mpdu desc info for the current packet 615 * @tid: tid on which the packet arrived 616 * @err_status: Flag to indicate if REO encountered an error while routing this 617 * frame 618 * @error_code: REO error code 619 * 620 * Return: None 621 */ 622 static void 623 _dp_rx_bar_frame_handle(struct dp_soc *soc, qdf_nbuf_t nbuf, 624 struct hal_rx_mpdu_desc_info *mpdu_desc_info, 625 uint32_t tid, uint8_t err_status, uint32_t error_code) 626 { 627 uint16_t peer_id; 628 struct dp_peer *peer; 629 630 peer_id = DP_PEER_METADATA_PEER_ID_GET(mpdu_desc_info->peer_meta_data); 631 peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_RX_ERR); 632 if (!peer) 633 return; 634 635 dp_info("BAR frame: peer = " QDF_MAC_ADDR_FMT 636 " peer_id = %d" 637 " tid = %u" 638 " SSN = %d" 639 " error status = %d", 640 QDF_MAC_ADDR_REF(peer->mac_addr.raw), 641 peer->peer_id, 642 tid, 643 mpdu_desc_info->mpdu_seq, 644 err_status); 645 646 if (err_status == HAL_REO_ERROR_DETECTED) { 647 switch (error_code) { 648 case HAL_REO_ERR_BAR_FRAME_2K_JUMP: 649 /* fallthrough */ 650 case HAL_REO_ERR_BAR_FRAME_OOR: 651 dp_rx_err_handle_bar(soc, peer, nbuf); 652 DP_STATS_INC(soc, rx.err.reo_error[error_code], 1); 653 break; 654 default: 655 DP_STATS_INC(soc, rx.bar_frame, 1); 656 } 657 } 658 659 dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR); 660 } 661 662 /** 663 * dp_rx_reo_err_entry_process() - Handles for REO error entry processing 664 * 665 * @soc: core txrx main context 666 * @ring_desc: opaque pointer to the REO error ring descriptor 667 * @mpdu_desc_info: pointer to mpdu level description info 668 * @link_desc_va: pointer to msdu_link_desc virtual address 669 * @err_code: reo erro code fetched from ring entry 670 * 671 * Function to handle msdus fetched from msdu link desc, currently 672 * only support 2K jump, OOR error. 673 * 674 * Return: msdu count processed. 675 */ 676 static uint32_t 677 dp_rx_reo_err_entry_process(struct dp_soc *soc, 678 void *ring_desc, 679 struct hal_rx_mpdu_desc_info *mpdu_desc_info, 680 void *link_desc_va, 681 enum hal_reo_error_code err_code) 682 { 683 uint32_t rx_bufs_used = 0; 684 struct dp_pdev *pdev; 685 int i; 686 uint8_t *rx_tlv_hdr_first; 687 uint8_t *rx_tlv_hdr_last; 688 uint32_t tid = DP_MAX_TIDS; 689 uint16_t peer_id; 690 struct dp_rx_desc *rx_desc; 691 struct rx_desc_pool *rx_desc_pool; 692 qdf_nbuf_t nbuf; 693 struct hal_buf_info buf_info; 694 struct hal_rx_msdu_list msdu_list; 695 uint16_t num_msdus; 696 struct buffer_addr_info cur_link_desc_addr_info = { 0 }; 697 struct buffer_addr_info next_link_desc_addr_info = { 0 }; 698 /* First field in REO Dst ring Desc is buffer_addr_info */ 699 void *buf_addr_info = ring_desc; 700 qdf_nbuf_t head_nbuf = NULL; 701 qdf_nbuf_t tail_nbuf = NULL; 702 uint16_t msdu_processed = 0; 703 QDF_STATUS status; 704 bool ret; 705 706 peer_id = DP_PEER_METADATA_PEER_ID_GET( 707 mpdu_desc_info->peer_meta_data); 708 709 more_msdu_link_desc: 710 hal_rx_msdu_list_get(soc->hal_soc, link_desc_va, &msdu_list, 711 &num_msdus); 712 for (i = 0; i < num_msdus; i++) { 713 rx_desc = soc->arch_ops.dp_rx_desc_cookie_2_va( 714 soc, 715 msdu_list.sw_cookie[i]); 716 717 qdf_assert_always(rx_desc); 718 719 /* all buffers from a MSDU link belong to same pdev */ 720 pdev = dp_get_pdev_for_lmac_id(soc, rx_desc->pool_id); 721 722 nbuf = rx_desc->nbuf; 723 ret = dp_rx_desc_paddr_sanity_check(rx_desc, 724 msdu_list.paddr[i]); 725 if (!ret) { 726 DP_STATS_INC(soc, rx.err.nbuf_sanity_fail, 1); 727 rx_desc->in_err_state = 1; 728 continue; 729 } 730 731 rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id]; 732 dp_ipa_rx_buf_smmu_mapping_lock(soc); 733 dp_ipa_handle_rx_buf_smmu_mapping(soc, nbuf, 734 rx_desc_pool->buf_size, 735 false); 736 qdf_nbuf_unmap_nbytes_single(soc->osdev, nbuf, 737 QDF_DMA_FROM_DEVICE, 738 rx_desc_pool->buf_size); 739 rx_desc->unmapped = 1; 740 dp_ipa_rx_buf_smmu_mapping_unlock(soc); 741 742 QDF_NBUF_CB_RX_PKT_LEN(nbuf) = msdu_list.msdu_info[i].msdu_len; 743 rx_bufs_used++; 744 dp_rx_add_to_free_desc_list(&pdev->free_list_head, 745 &pdev->free_list_tail, rx_desc); 746 747 DP_RX_LIST_APPEND(head_nbuf, tail_nbuf, nbuf); 748 749 if (qdf_unlikely(msdu_list.msdu_info[i].msdu_flags & 750 HAL_MSDU_F_MSDU_CONTINUATION)) 751 continue; 752 753 if (dp_rx_buffer_pool_refill(soc, head_nbuf, 754 rx_desc->pool_id)) { 755 /* MSDU queued back to the pool */ 756 goto process_next_msdu; 757 } 758 759 rx_tlv_hdr_first = qdf_nbuf_data(head_nbuf); 760 rx_tlv_hdr_last = qdf_nbuf_data(tail_nbuf); 761 762 if (qdf_unlikely(head_nbuf != tail_nbuf)) { 763 nbuf = dp_rx_sg_create(soc, head_nbuf); 764 qdf_nbuf_set_is_frag(nbuf, 1); 765 DP_STATS_INC(soc, rx.err.reo_err_oor_sg_count, 1); 766 } 767 768 if (soc->features.pn_in_reo_dest) { 769 status = dp_rx_err_nbuf_pn_check(soc, ring_desc, nbuf); 770 if (QDF_IS_STATUS_ERROR(status)) { 771 DP_STATS_INC(soc, rx.err.pn_in_dest_check_fail, 772 1); 773 qdf_nbuf_free(nbuf); 774 goto process_next_msdu; 775 } 776 777 hal_rx_tlv_populate_mpdu_desc_info(soc->hal_soc, 778 qdf_nbuf_data(nbuf), 779 mpdu_desc_info); 780 peer_id = DP_PEER_METADATA_PEER_ID_GET( 781 mpdu_desc_info->peer_meta_data); 782 783 if (mpdu_desc_info->bar_frame) 784 _dp_rx_bar_frame_handle(soc, nbuf, 785 mpdu_desc_info, tid, 786 HAL_REO_ERROR_DETECTED, 787 err_code); 788 } 789 790 switch (err_code) { 791 case HAL_REO_ERR_REGULAR_FRAME_2K_JUMP: 792 /* 793 * only first msdu, mpdu start description tlv valid? 794 * and use it for following msdu. 795 */ 796 if (hal_rx_msdu_end_first_msdu_get(soc->hal_soc, 797 rx_tlv_hdr_last)) 798 tid = hal_rx_mpdu_start_tid_get( 799 soc->hal_soc, 800 rx_tlv_hdr_first); 801 802 dp_2k_jump_handle(soc, nbuf, rx_tlv_hdr_last, 803 peer_id, tid); 804 break; 805 806 case HAL_REO_ERR_REGULAR_FRAME_OOR: 807 dp_rx_oor_handle(soc, nbuf, peer_id, rx_tlv_hdr_last); 808 break; 809 default: 810 dp_err_rl("Non-support error code %d", err_code); 811 qdf_nbuf_free(nbuf); 812 } 813 814 process_next_msdu: 815 msdu_processed++; 816 head_nbuf = NULL; 817 tail_nbuf = NULL; 818 } 819 820 /* 821 * If the msdu's are spread across multiple link-descriptors, 822 * we cannot depend solely on the msdu_count(e.g., if msdu is 823 * spread across multiple buffers).Hence, it is 824 * necessary to check the next link_descriptor and release 825 * all the msdu's that are part of it. 826 */ 827 hal_rx_get_next_msdu_link_desc_buf_addr_info( 828 link_desc_va, 829 &next_link_desc_addr_info); 830 831 if (hal_rx_is_buf_addr_info_valid( 832 &next_link_desc_addr_info)) { 833 /* Clear the next link desc info for the current link_desc */ 834 hal_rx_clear_next_msdu_link_desc_buf_addr_info(link_desc_va); 835 dp_rx_link_desc_return_by_addr( 836 soc, 837 buf_addr_info, 838 HAL_BM_ACTION_PUT_IN_IDLE_LIST); 839 840 hal_rx_buffer_addr_info_get_paddr( 841 &next_link_desc_addr_info, 842 &buf_info); 843 /* buffer_addr_info is the first element of ring_desc */ 844 hal_rx_buf_cookie_rbm_get(soc->hal_soc, 845 (uint32_t *)&next_link_desc_addr_info, 846 &buf_info); 847 link_desc_va = 848 dp_rx_cookie_2_link_desc_va(soc, &buf_info); 849 cur_link_desc_addr_info = next_link_desc_addr_info; 850 buf_addr_info = &cur_link_desc_addr_info; 851 852 goto more_msdu_link_desc; 853 } 854 855 dp_rx_link_desc_return_by_addr(soc, buf_addr_info, 856 HAL_BM_ACTION_PUT_IN_IDLE_LIST); 857 if (qdf_unlikely(msdu_processed != mpdu_desc_info->msdu_count)) 858 DP_STATS_INC(soc, rx.err.msdu_count_mismatch, 1); 859 860 return rx_bufs_used; 861 } 862 863 #ifdef DP_INVALID_PEER_ASSERT 864 #define DP_PDEV_INVALID_PEER_MSDU_CHECK(head, tail) \ 865 do { \ 866 qdf_assert_always(!(head)); \ 867 qdf_assert_always(!(tail)); \ 868 } while (0) 869 #else 870 #define DP_PDEV_INVALID_PEER_MSDU_CHECK(head, tail) /* no op */ 871 #endif 872 873 /** 874 * dp_rx_chain_msdus() - Function to chain all msdus of a mpdu 875 * to pdev invalid peer list 876 * 877 * @soc: core DP main context 878 * @nbuf: Buffer pointer 879 * @rx_tlv_hdr: start of rx tlv header 880 * @mac_id: mac id 881 * 882 * Return: bool: true for last msdu of mpdu 883 */ 884 static bool 885 dp_rx_chain_msdus(struct dp_soc *soc, qdf_nbuf_t nbuf, 886 uint8_t *rx_tlv_hdr, uint8_t mac_id) 887 { 888 bool mpdu_done = false; 889 qdf_nbuf_t curr_nbuf = NULL; 890 qdf_nbuf_t tmp_nbuf = NULL; 891 892 /* TODO: Currently only single radio is supported, hence 893 * pdev hard coded to '0' index 894 */ 895 struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id); 896 897 if (!dp_pdev) { 898 dp_rx_err_debug("%pK: pdev is null for mac_id = %d", soc, mac_id); 899 return mpdu_done; 900 } 901 /* if invalid peer SG list has max values free the buffers in list 902 * and treat current buffer as start of list 903 * 904 * current logic to detect the last buffer from attn_tlv is not reliable 905 * in OFDMA UL scenario hence add max buffers check to avoid list pile 906 * up 907 */ 908 if (!dp_pdev->first_nbuf || 909 (dp_pdev->invalid_peer_head_msdu && 910 QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST 911 (dp_pdev->invalid_peer_head_msdu) >= DP_MAX_INVALID_BUFFERS)) { 912 qdf_nbuf_set_rx_chfrag_start(nbuf, 1); 913 dp_pdev->ppdu_id = hal_rx_get_ppdu_id(soc->hal_soc, 914 rx_tlv_hdr); 915 dp_pdev->first_nbuf = true; 916 917 /* If the new nbuf received is the first msdu of the 918 * amsdu and there are msdus in the invalid peer msdu 919 * list, then let us free all the msdus of the invalid 920 * peer msdu list. 921 * This scenario can happen when we start receiving 922 * new a-msdu even before the previous a-msdu is completely 923 * received. 924 */ 925 curr_nbuf = dp_pdev->invalid_peer_head_msdu; 926 while (curr_nbuf) { 927 tmp_nbuf = curr_nbuf->next; 928 qdf_nbuf_free(curr_nbuf); 929 curr_nbuf = tmp_nbuf; 930 } 931 932 dp_pdev->invalid_peer_head_msdu = NULL; 933 dp_pdev->invalid_peer_tail_msdu = NULL; 934 935 dp_monitor_get_mpdu_status(dp_pdev, soc, rx_tlv_hdr); 936 } 937 938 if (dp_pdev->ppdu_id == hal_rx_attn_phy_ppdu_id_get(soc->hal_soc, 939 rx_tlv_hdr) && 940 hal_rx_attn_msdu_done_get(soc->hal_soc, rx_tlv_hdr)) { 941 qdf_nbuf_set_rx_chfrag_end(nbuf, 1); 942 qdf_assert_always(dp_pdev->first_nbuf == true); 943 dp_pdev->first_nbuf = false; 944 mpdu_done = true; 945 } 946 947 /* 948 * For MCL, invalid_peer_head_msdu and invalid_peer_tail_msdu 949 * should be NULL here, add the checking for debugging purpose 950 * in case some corner case. 951 */ 952 DP_PDEV_INVALID_PEER_MSDU_CHECK(dp_pdev->invalid_peer_head_msdu, 953 dp_pdev->invalid_peer_tail_msdu); 954 DP_RX_LIST_APPEND(dp_pdev->invalid_peer_head_msdu, 955 dp_pdev->invalid_peer_tail_msdu, 956 nbuf); 957 958 return mpdu_done; 959 } 960 961 /** 962 * dp_rx_bar_frame_handle() - Function to handle err BAR frames 963 * @soc: core DP main context 964 * @ring_desc: Hal ring desc 965 * @rx_desc: dp rx desc 966 * @mpdu_desc_info: mpdu desc info 967 * 968 * Handle the error BAR frames received. Ensure the SOC level 969 * stats are updated based on the REO error code. The BAR frames 970 * are further processed by updating the Rx tids with the start 971 * sequence number (SSN) and BA window size. Desc is returned 972 * to the free desc list 973 * 974 * Return: none 975 */ 976 static void 977 dp_rx_bar_frame_handle(struct dp_soc *soc, 978 hal_ring_desc_t ring_desc, 979 struct dp_rx_desc *rx_desc, 980 struct hal_rx_mpdu_desc_info *mpdu_desc_info, 981 uint8_t err_status, 982 uint32_t err_code) 983 { 984 qdf_nbuf_t nbuf; 985 struct dp_pdev *pdev; 986 struct rx_desc_pool *rx_desc_pool; 987 uint8_t *rx_tlv_hdr; 988 uint32_t tid; 989 990 nbuf = rx_desc->nbuf; 991 rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id]; 992 dp_ipa_rx_buf_smmu_mapping_lock(soc); 993 dp_ipa_handle_rx_buf_smmu_mapping(soc, nbuf, 994 rx_desc_pool->buf_size, 995 false); 996 qdf_nbuf_unmap_nbytes_single(soc->osdev, nbuf, 997 QDF_DMA_FROM_DEVICE, 998 rx_desc_pool->buf_size); 999 rx_desc->unmapped = 1; 1000 dp_ipa_rx_buf_smmu_mapping_unlock(soc); 1001 rx_tlv_hdr = qdf_nbuf_data(nbuf); 1002 tid = hal_rx_mpdu_start_tid_get(soc->hal_soc, 1003 rx_tlv_hdr); 1004 pdev = dp_get_pdev_for_lmac_id(soc, rx_desc->pool_id); 1005 1006 _dp_rx_bar_frame_handle(soc, nbuf, mpdu_desc_info, tid, err_status, 1007 err_code); 1008 dp_rx_link_desc_return(soc, ring_desc, 1009 HAL_BM_ACTION_PUT_IN_IDLE_LIST); 1010 dp_rx_buffer_pool_nbuf_free(soc, rx_desc->nbuf, 1011 rx_desc->pool_id); 1012 dp_rx_add_to_free_desc_list(&pdev->free_list_head, 1013 &pdev->free_list_tail, 1014 rx_desc); 1015 } 1016 1017 #endif /* QCA_HOST_MODE_WIFI_DISABLED */ 1018 1019 /** 1020 * dp_2k_jump_handle() - Function to handle 2k jump exception 1021 * on WBM ring 1022 * 1023 * @soc: core DP main context 1024 * @nbuf: buffer pointer 1025 * @rx_tlv_hdr: start of rx tlv header 1026 * @peer_id: peer id of first msdu 1027 * @tid: Tid for which exception occurred 1028 * 1029 * This function handles 2k jump violations arising out 1030 * of receiving aggregates in non BA case. This typically 1031 * may happen if aggregates are received on a QOS enabled TID 1032 * while Rx window size is still initialized to value of 2. Or 1033 * it may also happen if negotiated window size is 1 but peer 1034 * sends aggregates. 1035 * 1036 */ 1037 1038 void 1039 dp_2k_jump_handle(struct dp_soc *soc, 1040 qdf_nbuf_t nbuf, 1041 uint8_t *rx_tlv_hdr, 1042 uint16_t peer_id, 1043 uint8_t tid) 1044 { 1045 struct dp_peer *peer = NULL; 1046 struct dp_rx_tid *rx_tid = NULL; 1047 uint32_t frame_mask = FRAME_MASK_IPV4_ARP; 1048 1049 peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_RX_ERR); 1050 if (!peer) { 1051 dp_rx_err_info_rl("%pK: peer not found", soc); 1052 goto free_nbuf; 1053 } 1054 1055 if (tid >= DP_MAX_TIDS) { 1056 dp_info_rl("invalid tid"); 1057 goto nbuf_deliver; 1058 } 1059 1060 rx_tid = &peer->rx_tid[tid]; 1061 qdf_spin_lock_bh(&rx_tid->tid_lock); 1062 1063 /* only if BA session is active, allow send Delba */ 1064 if (rx_tid->ba_status != DP_RX_BA_ACTIVE) { 1065 qdf_spin_unlock_bh(&rx_tid->tid_lock); 1066 goto nbuf_deliver; 1067 } 1068 1069 if (!rx_tid->delba_tx_status) { 1070 rx_tid->delba_tx_retry++; 1071 rx_tid->delba_tx_status = 1; 1072 rx_tid->delba_rcode = 1073 IEEE80211_REASON_QOS_SETUP_REQUIRED; 1074 qdf_spin_unlock_bh(&rx_tid->tid_lock); 1075 if (soc->cdp_soc.ol_ops->send_delba) { 1076 DP_STATS_INC(soc, rx.err.rx_2k_jump_delba_sent, 1); 1077 soc->cdp_soc.ol_ops->send_delba( 1078 peer->vdev->pdev->soc->ctrl_psoc, 1079 peer->vdev->vdev_id, 1080 peer->mac_addr.raw, 1081 tid, 1082 rx_tid->delba_rcode, 1083 CDP_DELBA_2K_JUMP); 1084 } 1085 } else { 1086 qdf_spin_unlock_bh(&rx_tid->tid_lock); 1087 } 1088 1089 nbuf_deliver: 1090 if (dp_rx_deliver_special_frame(soc, peer, nbuf, frame_mask, 1091 rx_tlv_hdr)) { 1092 DP_STATS_INC(soc, rx.err.rx_2k_jump_to_stack, 1); 1093 dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR); 1094 return; 1095 } 1096 1097 free_nbuf: 1098 if (peer) 1099 dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR); 1100 DP_STATS_INC(soc, rx.err.rx_2k_jump_drop, 1); 1101 qdf_nbuf_free(nbuf); 1102 } 1103 1104 #if defined(QCA_WIFI_QCA6390) || defined(QCA_WIFI_QCA6490) || \ 1105 defined(QCA_WIFI_QCA6750) || defined(QCA_WIFI_WCN7850) 1106 /** 1107 * dp_rx_null_q_handle_invalid_peer_id_exception() - to find exception 1108 * @soc: pointer to dp_soc struct 1109 * @pool_id: Pool id to find dp_pdev 1110 * @rx_tlv_hdr: TLV header of received packet 1111 * @nbuf: SKB 1112 * 1113 * In certain types of packets if peer_id is not correct then 1114 * driver may not be able find. Try finding peer by addr_2 of 1115 * received MPDU. If you find the peer then most likely sw_peer_id & 1116 * ast_idx is corrupted. 1117 * 1118 * Return: True if you find the peer by addr_2 of received MPDU else false 1119 */ 1120 static bool 1121 dp_rx_null_q_handle_invalid_peer_id_exception(struct dp_soc *soc, 1122 uint8_t pool_id, 1123 uint8_t *rx_tlv_hdr, 1124 qdf_nbuf_t nbuf) 1125 { 1126 struct dp_peer *peer = NULL; 1127 uint8_t *rx_pkt_hdr = hal_rx_pkt_hdr_get(soc->hal_soc, rx_tlv_hdr); 1128 struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, pool_id); 1129 struct ieee80211_frame *wh = (struct ieee80211_frame *)rx_pkt_hdr; 1130 1131 if (!pdev) { 1132 dp_rx_err_debug("%pK: pdev is null for pool_id = %d", 1133 soc, pool_id); 1134 return false; 1135 } 1136 /* 1137 * WAR- In certain types of packets if peer_id is not correct then 1138 * driver may not be able find. Try finding peer by addr_2 of 1139 * received MPDU 1140 */ 1141 if (wh) 1142 peer = dp_peer_find_hash_find(soc, wh->i_addr2, 0, 1143 DP_VDEV_ALL, DP_MOD_ID_RX_ERR); 1144 if (peer) { 1145 dp_verbose_debug("MPDU sw_peer_id & ast_idx is corrupted"); 1146 hal_rx_dump_pkt_tlvs(soc->hal_soc, rx_tlv_hdr, 1147 QDF_TRACE_LEVEL_DEBUG); 1148 DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer_id, 1149 1, qdf_nbuf_len(nbuf)); 1150 qdf_nbuf_free(nbuf); 1151 1152 dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR); 1153 return true; 1154 } 1155 return false; 1156 } 1157 1158 /** 1159 * dp_rx_check_pkt_len() - Check for pktlen validity 1160 * @soc: DP SOC context 1161 * @pkt_len: computed length of the pkt from caller in bytes 1162 * 1163 * Return: true if pktlen > RX_BUFFER_SIZE, else return false 1164 * 1165 */ 1166 static inline 1167 bool dp_rx_check_pkt_len(struct dp_soc *soc, uint32_t pkt_len) 1168 { 1169 if (qdf_unlikely(pkt_len > RX_DATA_BUFFER_SIZE)) { 1170 DP_STATS_INC_PKT(soc, rx.err.rx_invalid_pkt_len, 1171 1, pkt_len); 1172 return true; 1173 } else { 1174 return false; 1175 } 1176 } 1177 1178 #else 1179 static inline bool 1180 dp_rx_null_q_handle_invalid_peer_id_exception(struct dp_soc *soc, 1181 uint8_t pool_id, 1182 uint8_t *rx_tlv_hdr, 1183 qdf_nbuf_t nbuf) 1184 { 1185 return false; 1186 } 1187 1188 static inline 1189 bool dp_rx_check_pkt_len(struct dp_soc *soc, uint32_t pkt_len) 1190 { 1191 return false; 1192 } 1193 1194 #endif 1195 1196 #ifndef QCA_HOST_MODE_WIFI_DISABLED 1197 1198 /** 1199 * dp_rx_null_q_desc_handle() - Function to handle NULL Queue 1200 * descriptor violation on either a 1201 * REO or WBM ring 1202 * 1203 * @soc: core DP main context 1204 * @nbuf: buffer pointer 1205 * @rx_tlv_hdr: start of rx tlv header 1206 * @pool_id: mac id 1207 * @peer: peer handle 1208 * 1209 * This function handles NULL queue descriptor violations arising out 1210 * a missing REO queue for a given peer or a given TID. This typically 1211 * may happen if a packet is received on a QOS enabled TID before the 1212 * ADDBA negotiation for that TID, when the TID queue is setup. Or 1213 * it may also happen for MC/BC frames if they are not routed to the 1214 * non-QOS TID queue, in the absence of any other default TID queue. 1215 * This error can show up both in a REO destination or WBM release ring. 1216 * 1217 * Return: QDF_STATUS_SUCCESS, if nbuf handled successfully. QDF status code 1218 * if nbuf could not be handled or dropped. 1219 */ 1220 static QDF_STATUS 1221 dp_rx_null_q_desc_handle(struct dp_soc *soc, qdf_nbuf_t nbuf, 1222 uint8_t *rx_tlv_hdr, uint8_t pool_id, 1223 struct dp_peer *peer) 1224 { 1225 uint32_t pkt_len; 1226 uint16_t msdu_len; 1227 struct dp_vdev *vdev; 1228 uint8_t tid; 1229 qdf_ether_header_t *eh; 1230 struct hal_rx_msdu_metadata msdu_metadata; 1231 uint16_t sa_idx = 0; 1232 bool is_eapol; 1233 1234 qdf_nbuf_set_rx_chfrag_start(nbuf, 1235 hal_rx_msdu_end_first_msdu_get(soc->hal_soc, 1236 rx_tlv_hdr)); 1237 qdf_nbuf_set_rx_chfrag_end(nbuf, 1238 hal_rx_msdu_end_last_msdu_get(soc->hal_soc, 1239 rx_tlv_hdr)); 1240 qdf_nbuf_set_da_mcbc(nbuf, hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc, 1241 rx_tlv_hdr)); 1242 qdf_nbuf_set_da_valid(nbuf, 1243 hal_rx_msdu_end_da_is_valid_get(soc->hal_soc, 1244 rx_tlv_hdr)); 1245 qdf_nbuf_set_sa_valid(nbuf, 1246 hal_rx_msdu_end_sa_is_valid_get(soc->hal_soc, 1247 rx_tlv_hdr)); 1248 1249 hal_rx_msdu_metadata_get(soc->hal_soc, rx_tlv_hdr, &msdu_metadata); 1250 msdu_len = hal_rx_msdu_start_msdu_len_get(soc->hal_soc, rx_tlv_hdr); 1251 pkt_len = msdu_len + msdu_metadata.l3_hdr_pad + soc->rx_pkt_tlv_size; 1252 1253 if (qdf_likely(!qdf_nbuf_is_frag(nbuf))) { 1254 if (dp_rx_check_pkt_len(soc, pkt_len)) 1255 goto drop_nbuf; 1256 1257 /* Set length in nbuf */ 1258 qdf_nbuf_set_pktlen( 1259 nbuf, qdf_min(pkt_len, (uint32_t)RX_DATA_BUFFER_SIZE)); 1260 qdf_assert_always(nbuf->data == rx_tlv_hdr); 1261 } 1262 1263 /* 1264 * Check if DMA completed -- msdu_done is the last bit 1265 * to be written 1266 */ 1267 if (!hal_rx_attn_msdu_done_get(soc->hal_soc, rx_tlv_hdr)) { 1268 1269 dp_err_rl("MSDU DONE failure"); 1270 hal_rx_dump_pkt_tlvs(soc->hal_soc, rx_tlv_hdr, 1271 QDF_TRACE_LEVEL_INFO); 1272 qdf_assert(0); 1273 } 1274 1275 if (!peer && 1276 dp_rx_null_q_handle_invalid_peer_id_exception(soc, pool_id, 1277 rx_tlv_hdr, nbuf)) 1278 return QDF_STATUS_E_FAILURE; 1279 1280 if (!peer) { 1281 bool mpdu_done = false; 1282 struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, pool_id); 1283 1284 if (!pdev) { 1285 dp_err_rl("pdev is null for pool_id = %d", pool_id); 1286 return QDF_STATUS_E_FAILURE; 1287 } 1288 1289 dp_err_rl("peer is NULL"); 1290 DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1, 1291 qdf_nbuf_len(nbuf)); 1292 1293 /* QCN9000 has the support enabled */ 1294 if (qdf_unlikely(soc->wbm_release_desc_rx_sg_support)) { 1295 mpdu_done = true; 1296 nbuf->next = NULL; 1297 /* Trigger invalid peer handler wrapper */ 1298 dp_rx_process_invalid_peer_wrapper(soc, 1299 nbuf, mpdu_done, pool_id); 1300 } else { 1301 mpdu_done = dp_rx_chain_msdus(soc, nbuf, rx_tlv_hdr, pool_id); 1302 /* Trigger invalid peer handler wrapper */ 1303 dp_rx_process_invalid_peer_wrapper(soc, 1304 pdev->invalid_peer_head_msdu, 1305 mpdu_done, pool_id); 1306 } 1307 1308 if (mpdu_done) { 1309 pdev->invalid_peer_head_msdu = NULL; 1310 pdev->invalid_peer_tail_msdu = NULL; 1311 } 1312 1313 return QDF_STATUS_E_FAILURE; 1314 } 1315 1316 vdev = peer->vdev; 1317 if (!vdev) { 1318 dp_err_rl("Null vdev!"); 1319 DP_STATS_INC(soc, rx.err.invalid_vdev, 1); 1320 goto drop_nbuf; 1321 } 1322 1323 /* 1324 * Advance the packet start pointer by total size of 1325 * pre-header TLV's 1326 */ 1327 if (qdf_nbuf_is_frag(nbuf)) 1328 qdf_nbuf_pull_head(nbuf, soc->rx_pkt_tlv_size); 1329 else 1330 qdf_nbuf_pull_head(nbuf, (msdu_metadata.l3_hdr_pad + 1331 soc->rx_pkt_tlv_size)); 1332 1333 dp_vdev_peer_stats_update_protocol_cnt(vdev, nbuf, NULL, 0, 1); 1334 1335 if (hal_rx_msdu_end_sa_is_valid_get(soc->hal_soc, rx_tlv_hdr)) { 1336 sa_idx = hal_rx_msdu_end_sa_idx_get(soc->hal_soc, rx_tlv_hdr); 1337 1338 if ((sa_idx < 0) || 1339 (sa_idx >= wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx))) { 1340 DP_STATS_INC(soc, rx.err.invalid_sa_da_idx, 1); 1341 goto drop_nbuf; 1342 } 1343 } 1344 1345 if (dp_rx_mcast_echo_check(soc, peer, rx_tlv_hdr, nbuf)) { 1346 /* this is a looped back MCBC pkt, drop it */ 1347 DP_STATS_INC_PKT(peer, rx.mec_drop, 1, qdf_nbuf_len(nbuf)); 1348 goto drop_nbuf; 1349 } 1350 1351 /* 1352 * In qwrap mode if the received packet matches with any of the vdev 1353 * mac addresses, drop it. Donot receive multicast packets originated 1354 * from any proxysta. 1355 */ 1356 if (check_qwrap_multicast_loopback(vdev, nbuf)) { 1357 DP_STATS_INC_PKT(peer, rx.mec_drop, 1, qdf_nbuf_len(nbuf)); 1358 goto drop_nbuf; 1359 } 1360 1361 1362 if (qdf_unlikely((peer->nawds_enabled == true) && 1363 hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc, 1364 rx_tlv_hdr))) { 1365 dp_err_rl("free buffer for multicast packet"); 1366 DP_STATS_INC(peer, rx.nawds_mcast_drop, 1); 1367 goto drop_nbuf; 1368 } 1369 1370 if (!dp_wds_rx_policy_check(rx_tlv_hdr, vdev, peer)) { 1371 dp_err_rl("mcast Policy Check Drop pkt"); 1372 goto drop_nbuf; 1373 } 1374 /* WDS Source Port Learning */ 1375 if (qdf_likely(vdev->rx_decap_type == htt_cmn_pkt_type_ethernet && 1376 vdev->wds_enabled)) 1377 dp_rx_wds_srcport_learn(soc, rx_tlv_hdr, peer, nbuf, 1378 msdu_metadata); 1379 1380 if (hal_rx_is_unicast(soc->hal_soc, rx_tlv_hdr)) { 1381 tid = hal_rx_tid_get(soc->hal_soc, rx_tlv_hdr); 1382 if (!peer->rx_tid[tid].hw_qdesc_vaddr_unaligned) 1383 dp_rx_tid_setup_wifi3(peer, tid, 1, IEEE80211_SEQ_MAX); 1384 /* IEEE80211_SEQ_MAX indicates invalid start_seq */ 1385 } 1386 1387 eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf); 1388 1389 if (!peer->authorize) { 1390 is_eapol = qdf_nbuf_is_ipv4_eapol_pkt(nbuf) || 1391 qdf_nbuf_is_ipv4_wapi_pkt(nbuf); 1392 1393 if (is_eapol) { 1394 if (qdf_mem_cmp(eh->ether_dhost, 1395 &vdev->mac_addr.raw[0], 1396 QDF_MAC_ADDR_SIZE)) 1397 goto drop_nbuf; 1398 } else { 1399 goto drop_nbuf; 1400 } 1401 } 1402 1403 /* 1404 * Drop packets in this path if cce_match is found. Packets will come 1405 * in following path depending on whether tidQ is setup. 1406 * 1. If tidQ is setup: WIFILI_HAL_RX_WBM_REO_PSH_RSN_ROUTE and 1407 * cce_match = 1 1408 * Packets with WIFILI_HAL_RX_WBM_REO_PSH_RSN_ROUTE are already 1409 * dropped. 1410 * 2. If tidQ is not setup: WIFILI_HAL_RX_WBM_REO_PSH_RSN_ERROR and 1411 * cce_match = 1 1412 * These packets need to be dropped and should not get delivered 1413 * to stack. 1414 */ 1415 if (qdf_unlikely(dp_rx_err_cce_drop(soc, vdev, nbuf, rx_tlv_hdr))) { 1416 goto drop_nbuf; 1417 } 1418 1419 if (qdf_unlikely(vdev->rx_decap_type == htt_cmn_pkt_type_raw)) { 1420 qdf_nbuf_set_next(nbuf, NULL); 1421 dp_rx_deliver_raw(vdev, nbuf, peer); 1422 } else { 1423 qdf_nbuf_set_next(nbuf, NULL); 1424 DP_STATS_INC_PKT(peer, rx.to_stack, 1, 1425 qdf_nbuf_len(nbuf)); 1426 1427 /* 1428 * Update the protocol tag in SKB based on 1429 * CCE metadata 1430 */ 1431 dp_rx_update_protocol_tag(soc, vdev, nbuf, rx_tlv_hdr, 1432 EXCEPTION_DEST_RING_ID, 1433 true, true); 1434 1435 /* Update the flow tag in SKB based on FSE metadata */ 1436 dp_rx_update_flow_tag(soc, vdev, nbuf, 1437 rx_tlv_hdr, true); 1438 1439 if (qdf_unlikely(hal_rx_msdu_end_da_is_mcbc_get( 1440 soc->hal_soc, rx_tlv_hdr) && 1441 (vdev->rx_decap_type == 1442 htt_cmn_pkt_type_ethernet))) { 1443 DP_STATS_INC_PKT(peer, rx.multicast, 1, 1444 qdf_nbuf_len(nbuf)); 1445 1446 if (QDF_IS_ADDR_BROADCAST(eh->ether_dhost)) 1447 DP_STATS_INC_PKT(peer, rx.bcast, 1, 1448 qdf_nbuf_len(nbuf)); 1449 } 1450 1451 qdf_nbuf_set_exc_frame(nbuf, 1); 1452 dp_rx_deliver_to_stack(soc, vdev, peer, nbuf, NULL); 1453 } 1454 return QDF_STATUS_SUCCESS; 1455 1456 drop_nbuf: 1457 qdf_nbuf_free(nbuf); 1458 return QDF_STATUS_E_FAILURE; 1459 } 1460 1461 #endif /* QCA_HOST_MODE_WIFI_DISABLED */ 1462 1463 /** 1464 * dp_rx_process_rxdma_err() - Function to deliver rxdma unencrypted_err 1465 * frames to OS or wifi parse errors. 1466 * @soc: core DP main context 1467 * @nbuf: buffer pointer 1468 * @rx_tlv_hdr: start of rx tlv header 1469 * @peer: peer reference 1470 * @err_code: rxdma err code 1471 * @mac_id: mac_id which is one of 3 mac_ids(Assuming mac_id and 1472 * pool_id has same mapping) 1473 * 1474 * Return: None 1475 */ 1476 void 1477 dp_rx_process_rxdma_err(struct dp_soc *soc, qdf_nbuf_t nbuf, 1478 uint8_t *rx_tlv_hdr, struct dp_peer *peer, 1479 uint8_t err_code, uint8_t mac_id) 1480 { 1481 uint32_t pkt_len, l2_hdr_offset; 1482 uint16_t msdu_len; 1483 struct dp_vdev *vdev; 1484 qdf_ether_header_t *eh; 1485 bool is_broadcast; 1486 1487 /* 1488 * Check if DMA completed -- msdu_done is the last bit 1489 * to be written 1490 */ 1491 if (!hal_rx_attn_msdu_done_get(soc->hal_soc, rx_tlv_hdr)) { 1492 1493 dp_err_rl("MSDU DONE failure"); 1494 1495 hal_rx_dump_pkt_tlvs(soc->hal_soc, rx_tlv_hdr, 1496 QDF_TRACE_LEVEL_INFO); 1497 qdf_assert(0); 1498 } 1499 1500 l2_hdr_offset = hal_rx_msdu_end_l3_hdr_padding_get(soc->hal_soc, 1501 rx_tlv_hdr); 1502 msdu_len = hal_rx_msdu_start_msdu_len_get(soc->hal_soc, rx_tlv_hdr); 1503 pkt_len = msdu_len + l2_hdr_offset + soc->rx_pkt_tlv_size; 1504 1505 if (dp_rx_check_pkt_len(soc, pkt_len)) { 1506 /* Drop & free packet */ 1507 qdf_nbuf_free(nbuf); 1508 return; 1509 } 1510 /* Set length in nbuf */ 1511 qdf_nbuf_set_pktlen(nbuf, pkt_len); 1512 1513 qdf_nbuf_set_next(nbuf, NULL); 1514 1515 qdf_nbuf_set_rx_chfrag_start(nbuf, 1); 1516 qdf_nbuf_set_rx_chfrag_end(nbuf, 1); 1517 1518 if (!peer) { 1519 QDF_TRACE_ERROR_RL(QDF_MODULE_ID_DP, "peer is NULL"); 1520 DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1, 1521 qdf_nbuf_len(nbuf)); 1522 /* Trigger invalid peer handler wrapper */ 1523 dp_rx_process_invalid_peer_wrapper(soc, nbuf, true, mac_id); 1524 return; 1525 } 1526 1527 vdev = peer->vdev; 1528 if (!vdev) { 1529 dp_rx_err_info_rl("%pK: INVALID vdev %pK OR osif_rx", soc, 1530 vdev); 1531 /* Drop & free packet */ 1532 qdf_nbuf_free(nbuf); 1533 DP_STATS_INC(soc, rx.err.invalid_vdev, 1); 1534 return; 1535 } 1536 1537 /* 1538 * Advance the packet start pointer by total size of 1539 * pre-header TLV's 1540 */ 1541 dp_rx_skip_tlvs(soc, nbuf, l2_hdr_offset); 1542 1543 if (err_code == HAL_RXDMA_ERR_WIFI_PARSE) { 1544 uint8_t *pkt_type; 1545 1546 pkt_type = qdf_nbuf_data(nbuf) + (2 * QDF_MAC_ADDR_SIZE); 1547 if (*(uint16_t *)pkt_type == htons(QDF_ETH_TYPE_8021Q)) { 1548 if (*(uint16_t *)(pkt_type + DP_SKIP_VLAN) == 1549 htons(QDF_LLC_STP)) { 1550 DP_STATS_INC(vdev->pdev, vlan_tag_stp_cnt, 1); 1551 goto process_mesh; 1552 } else { 1553 goto process_rx; 1554 } 1555 } 1556 } 1557 if (vdev->rx_decap_type == htt_cmn_pkt_type_raw) 1558 goto process_mesh; 1559 1560 /* 1561 * WAPI cert AP sends rekey frames as unencrypted. 1562 * Thus RXDMA will report unencrypted frame error. 1563 * To pass WAPI cert case, SW needs to pass unencrypted 1564 * rekey frame to stack. 1565 */ 1566 if (qdf_nbuf_is_ipv4_wapi_pkt(nbuf)) { 1567 goto process_rx; 1568 } 1569 /* 1570 * In dynamic WEP case rekey frames are not encrypted 1571 * similar to WAPI. Allow EAPOL when 8021+wep is enabled and 1572 * key install is already done 1573 */ 1574 if ((vdev->sec_type == cdp_sec_type_wep104) && 1575 (qdf_nbuf_is_ipv4_eapol_pkt(nbuf))) 1576 goto process_rx; 1577 1578 process_mesh: 1579 1580 if (!vdev->mesh_vdev && err_code == HAL_RXDMA_ERR_UNENCRYPTED) { 1581 qdf_nbuf_free(nbuf); 1582 DP_STATS_INC(soc, rx.err.invalid_vdev, 1); 1583 return; 1584 } 1585 1586 if (vdev->mesh_vdev) { 1587 if (dp_rx_filter_mesh_packets(vdev, nbuf, rx_tlv_hdr) 1588 == QDF_STATUS_SUCCESS) { 1589 dp_rx_err_info("%pK: mesh pkt filtered", soc); 1590 DP_STATS_INC(vdev->pdev, dropped.mesh_filter, 1); 1591 1592 qdf_nbuf_free(nbuf); 1593 return; 1594 } 1595 dp_rx_fill_mesh_stats(vdev, nbuf, rx_tlv_hdr, peer); 1596 } 1597 process_rx: 1598 if (qdf_unlikely(hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc, 1599 rx_tlv_hdr) && 1600 (vdev->rx_decap_type == 1601 htt_cmn_pkt_type_ethernet))) { 1602 eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf); 1603 is_broadcast = (QDF_IS_ADDR_BROADCAST 1604 (eh->ether_dhost)) ? 1 : 0 ; 1605 DP_STATS_INC_PKT(peer, rx.multicast, 1, qdf_nbuf_len(nbuf)); 1606 if (is_broadcast) { 1607 DP_STATS_INC_PKT(peer, rx.bcast, 1, 1608 qdf_nbuf_len(nbuf)); 1609 } 1610 } 1611 1612 if (qdf_unlikely(vdev->rx_decap_type == htt_cmn_pkt_type_raw)) { 1613 dp_rx_deliver_raw(vdev, nbuf, peer); 1614 } else { 1615 /* Update the protocol tag in SKB based on CCE metadata */ 1616 dp_rx_update_protocol_tag(soc, vdev, nbuf, rx_tlv_hdr, 1617 EXCEPTION_DEST_RING_ID, true, true); 1618 /* Update the flow tag in SKB based on FSE metadata */ 1619 dp_rx_update_flow_tag(soc, vdev, nbuf, rx_tlv_hdr, true); 1620 DP_STATS_INC(peer, rx.to_stack.num, 1); 1621 qdf_nbuf_set_exc_frame(nbuf, 1); 1622 dp_rx_deliver_to_stack(soc, vdev, peer, nbuf, NULL); 1623 } 1624 1625 return; 1626 } 1627 1628 /** 1629 * dp_rx_process_mic_error(): Function to pass mic error indication to umac 1630 * @soc: core DP main context 1631 * @nbuf: buffer pointer 1632 * @rx_tlv_hdr: start of rx tlv header 1633 * @peer: peer handle 1634 * 1635 * return: void 1636 */ 1637 void dp_rx_process_mic_error(struct dp_soc *soc, qdf_nbuf_t nbuf, 1638 uint8_t *rx_tlv_hdr, struct dp_peer *peer) 1639 { 1640 struct dp_vdev *vdev = NULL; 1641 struct dp_pdev *pdev = NULL; 1642 struct ol_if_ops *tops = NULL; 1643 uint16_t rx_seq, fragno; 1644 uint8_t is_raw; 1645 unsigned int tid; 1646 QDF_STATUS status; 1647 struct cdp_rx_mic_err_info mic_failure_info; 1648 1649 if (!hal_rx_msdu_end_first_msdu_get(soc->hal_soc, 1650 rx_tlv_hdr)) 1651 return; 1652 1653 if (!peer) { 1654 dp_info_rl("peer not found"); 1655 goto fail; 1656 } 1657 1658 vdev = peer->vdev; 1659 if (!vdev) { 1660 dp_info_rl("VDEV not found"); 1661 goto fail; 1662 } 1663 1664 pdev = vdev->pdev; 1665 if (!pdev) { 1666 dp_info_rl("PDEV not found"); 1667 goto fail; 1668 } 1669 1670 is_raw = HAL_IS_DECAP_FORMAT_RAW(soc->hal_soc, qdf_nbuf_data(nbuf)); 1671 if (is_raw) { 1672 fragno = dp_rx_frag_get_mpdu_frag_number(soc, 1673 qdf_nbuf_data(nbuf)); 1674 /* Can get only last fragment */ 1675 if (fragno) { 1676 tid = hal_rx_mpdu_start_tid_get(soc->hal_soc, 1677 qdf_nbuf_data(nbuf)); 1678 rx_seq = hal_rx_get_rx_sequence(soc->hal_soc, 1679 qdf_nbuf_data(nbuf)); 1680 1681 status = dp_rx_defrag_add_last_frag(soc, peer, 1682 tid, rx_seq, nbuf); 1683 dp_info_rl("Frag pkt seq# %d frag# %d consumed " 1684 "status %d !", rx_seq, fragno, status); 1685 return; 1686 } 1687 } 1688 1689 if (hal_rx_mpdu_get_addr1(soc->hal_soc, qdf_nbuf_data(nbuf), 1690 &mic_failure_info.da_mac_addr.bytes[0])) { 1691 dp_err_rl("Failed to get da_mac_addr"); 1692 goto fail; 1693 } 1694 1695 if (hal_rx_mpdu_get_addr2(soc->hal_soc, qdf_nbuf_data(nbuf), 1696 &mic_failure_info.ta_mac_addr.bytes[0])) { 1697 dp_err_rl("Failed to get ta_mac_addr"); 1698 goto fail; 1699 } 1700 1701 mic_failure_info.key_id = 0; 1702 mic_failure_info.multicast = 1703 IEEE80211_IS_MULTICAST(mic_failure_info.da_mac_addr.bytes); 1704 qdf_mem_zero(mic_failure_info.tsc, MIC_SEQ_CTR_SIZE); 1705 mic_failure_info.frame_type = cdp_rx_frame_type_802_11; 1706 mic_failure_info.data = NULL; 1707 mic_failure_info.vdev_id = vdev->vdev_id; 1708 1709 tops = pdev->soc->cdp_soc.ol_ops; 1710 if (tops->rx_mic_error) 1711 tops->rx_mic_error(soc->ctrl_psoc, pdev->pdev_id, 1712 &mic_failure_info); 1713 1714 fail: 1715 qdf_nbuf_free(nbuf); 1716 return; 1717 } 1718 1719 /* 1720 * dp_rx_deliver_to_osif_stack() - function to deliver rx pkts to stack 1721 * @soc: DP soc 1722 * @vdv: DP vdev handle 1723 * @peer: pointer to the peer object 1724 * @nbuf: skb list head 1725 * @tail: skb list tail 1726 * @is_eapol: eapol pkt check 1727 * 1728 * Return: None 1729 */ 1730 #ifdef QCA_SUPPORT_EAPOL_OVER_CONTROL_PORT 1731 static inline void 1732 dp_rx_deliver_to_osif_stack(struct dp_soc *soc, 1733 struct dp_vdev *vdev, 1734 struct dp_peer *peer, 1735 qdf_nbuf_t nbuf, 1736 qdf_nbuf_t tail, 1737 bool is_eapol) 1738 { 1739 if (is_eapol && soc->eapol_over_control_port) 1740 dp_rx_eapol_deliver_to_stack(soc, vdev, peer, nbuf, NULL); 1741 else 1742 dp_rx_deliver_to_stack(soc, vdev, peer, nbuf, NULL); 1743 } 1744 #else 1745 static inline void 1746 dp_rx_deliver_to_osif_stack(struct dp_soc *soc, 1747 struct dp_vdev *vdev, 1748 struct dp_peer *peer, 1749 qdf_nbuf_t nbuf, 1750 qdf_nbuf_t tail, 1751 bool is_eapol) 1752 { 1753 dp_rx_deliver_to_stack(soc, vdev, peer, nbuf, NULL); 1754 } 1755 #endif 1756 1757 #ifdef WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG 1758 /** 1759 * dp_rx_err_route_hdl() - Function to send EAPOL frames to stack 1760 * Free any other packet which comes in 1761 * this path. 1762 * 1763 * @soc: core DP main context 1764 * @nbuf: buffer pointer 1765 * @peer: peer handle 1766 * @rx_tlv_hdr: start of rx tlv header 1767 * @err_src: rxdma/reo 1768 * 1769 * This function indicates EAPOL frame received in wbm error ring to stack. 1770 * Any other frame should be dropped. 1771 * 1772 * Return: SUCCESS if delivered to stack 1773 */ 1774 static void 1775 dp_rx_err_route_hdl(struct dp_soc *soc, qdf_nbuf_t nbuf, 1776 struct dp_peer *peer, uint8_t *rx_tlv_hdr, 1777 enum hal_rx_wbm_error_source err_src) 1778 { 1779 uint32_t pkt_len; 1780 uint16_t msdu_len; 1781 struct dp_vdev *vdev; 1782 struct hal_rx_msdu_metadata msdu_metadata; 1783 bool is_eapol; 1784 1785 hal_rx_msdu_metadata_get(soc->hal_soc, rx_tlv_hdr, &msdu_metadata); 1786 msdu_len = hal_rx_msdu_start_msdu_len_get(soc->hal_soc, rx_tlv_hdr); 1787 pkt_len = msdu_len + msdu_metadata.l3_hdr_pad + soc->rx_pkt_tlv_size; 1788 1789 if (qdf_likely(!qdf_nbuf_is_frag(nbuf))) { 1790 if (dp_rx_check_pkt_len(soc, pkt_len)) 1791 goto drop_nbuf; 1792 1793 /* Set length in nbuf */ 1794 qdf_nbuf_set_pktlen( 1795 nbuf, qdf_min(pkt_len, (uint32_t)RX_DATA_BUFFER_SIZE)); 1796 qdf_assert_always(nbuf->data == rx_tlv_hdr); 1797 } 1798 1799 /* 1800 * Check if DMA completed -- msdu_done is the last bit 1801 * to be written 1802 */ 1803 if (!hal_rx_attn_msdu_done_get(soc->hal_soc, rx_tlv_hdr)) { 1804 dp_err_rl("MSDU DONE failure"); 1805 hal_rx_dump_pkt_tlvs(soc->hal_soc, rx_tlv_hdr, 1806 QDF_TRACE_LEVEL_INFO); 1807 qdf_assert(0); 1808 } 1809 1810 if (!peer) 1811 goto drop_nbuf; 1812 1813 vdev = peer->vdev; 1814 if (!vdev) { 1815 dp_err_rl("Null vdev!"); 1816 DP_STATS_INC(soc, rx.err.invalid_vdev, 1); 1817 goto drop_nbuf; 1818 } 1819 1820 /* 1821 * Advance the packet start pointer by total size of 1822 * pre-header TLV's 1823 */ 1824 if (qdf_nbuf_is_frag(nbuf)) 1825 qdf_nbuf_pull_head(nbuf, soc->rx_pkt_tlv_size); 1826 else 1827 qdf_nbuf_pull_head(nbuf, (msdu_metadata.l3_hdr_pad + 1828 soc->rx_pkt_tlv_size)); 1829 1830 dp_vdev_peer_stats_update_protocol_cnt(vdev, nbuf, NULL, 0, 1); 1831 1832 /* 1833 * Indicate EAPOL frame to stack only when vap mac address 1834 * matches the destination address. 1835 */ 1836 is_eapol = qdf_nbuf_is_ipv4_eapol_pkt(nbuf); 1837 if (is_eapol || qdf_nbuf_is_ipv4_wapi_pkt(nbuf)) { 1838 qdf_ether_header_t *eh = 1839 (qdf_ether_header_t *)qdf_nbuf_data(nbuf); 1840 if (qdf_mem_cmp(eh->ether_dhost, &vdev->mac_addr.raw[0], 1841 QDF_MAC_ADDR_SIZE) == 0) { 1842 /* 1843 * Update the protocol tag in SKB based on 1844 * CCE metadata. 1845 */ 1846 dp_rx_update_protocol_tag(soc, vdev, nbuf, rx_tlv_hdr, 1847 EXCEPTION_DEST_RING_ID, 1848 true, true); 1849 /* Update the flow tag in SKB based on FSE metadata */ 1850 dp_rx_update_flow_tag(soc, vdev, nbuf, rx_tlv_hdr, 1851 true); 1852 DP_STATS_INC(peer, rx.to_stack.num, 1); 1853 qdf_nbuf_set_exc_frame(nbuf, 1); 1854 qdf_nbuf_set_next(nbuf, NULL); 1855 1856 dp_rx_deliver_to_osif_stack(soc, vdev, peer, nbuf, 1857 NULL, is_eapol); 1858 1859 return; 1860 } 1861 } 1862 1863 drop_nbuf: 1864 1865 DP_STATS_INCC(soc, rx.reo2rel_route_drop, 1, 1866 err_src == HAL_RX_WBM_ERR_SRC_REO); 1867 DP_STATS_INCC(soc, rx.rxdma2rel_route_drop, 1, 1868 err_src == HAL_RX_WBM_ERR_SRC_RXDMA); 1869 1870 qdf_nbuf_free(nbuf); 1871 } 1872 #else 1873 1874 static void 1875 dp_rx_err_route_hdl(struct dp_soc *soc, qdf_nbuf_t nbuf, 1876 struct dp_peer *peer, uint8_t *rx_tlv_hdr, 1877 enum hal_rx_wbm_error_source err_src) 1878 { 1879 DP_STATS_INCC(soc, rx.reo2rel_route_drop, 1, 1880 err_src == HAL_RX_WBM_ERR_SRC_REO); 1881 DP_STATS_INCC(soc, rx.rxdma2rel_route_drop, 1, 1882 err_src == HAL_RX_WBM_ERR_SRC_RXDMA); 1883 1884 qdf_nbuf_free(nbuf); 1885 } 1886 #endif 1887 1888 #ifndef QCA_HOST_MODE_WIFI_DISABLED 1889 1890 #ifdef DP_RX_DESC_COOKIE_INVALIDATE 1891 /** 1892 * dp_rx_link_cookie_check() - Validate link desc cookie 1893 * @ring_desc: ring descriptor 1894 * 1895 * Return: qdf status 1896 */ 1897 static inline QDF_STATUS 1898 dp_rx_link_cookie_check(hal_ring_desc_t ring_desc) 1899 { 1900 if (qdf_unlikely(HAL_RX_REO_BUF_LINK_COOKIE_INVALID_GET(ring_desc))) 1901 return QDF_STATUS_E_FAILURE; 1902 1903 return QDF_STATUS_SUCCESS; 1904 } 1905 1906 /** 1907 * dp_rx_link_cookie_invalidate() - Invalidate link desc cookie 1908 * @ring_desc: ring descriptor 1909 * 1910 * Return: None 1911 */ 1912 static inline void 1913 dp_rx_link_cookie_invalidate(hal_ring_desc_t ring_desc) 1914 { 1915 HAL_RX_REO_BUF_LINK_COOKIE_INVALID_SET(ring_desc); 1916 } 1917 #else 1918 static inline QDF_STATUS 1919 dp_rx_link_cookie_check(hal_ring_desc_t ring_desc) 1920 { 1921 return QDF_STATUS_SUCCESS; 1922 } 1923 1924 static inline void 1925 dp_rx_link_cookie_invalidate(hal_ring_desc_t ring_desc) 1926 { 1927 } 1928 #endif 1929 1930 #ifdef WLAN_FEATURE_DP_RX_RING_HISTORY 1931 /** 1932 * dp_rx_err_ring_record_entry() - Record rx err ring history 1933 * @soc: Datapath soc structure 1934 * @paddr: paddr of the buffer in RX err ring 1935 * @sw_cookie: SW cookie of the buffer in RX err ring 1936 * @rbm: Return buffer manager of the buffer in RX err ring 1937 * 1938 * Returns: None 1939 */ 1940 static inline void 1941 dp_rx_err_ring_record_entry(struct dp_soc *soc, uint64_t paddr, 1942 uint32_t sw_cookie, uint8_t rbm) 1943 { 1944 struct dp_buf_info_record *record; 1945 uint32_t idx; 1946 1947 if (qdf_unlikely(!soc->rx_err_ring_history)) 1948 return; 1949 1950 idx = dp_history_get_next_index(&soc->rx_err_ring_history->index, 1951 DP_RX_ERR_HIST_MAX); 1952 1953 /* No NULL check needed for record since its an array */ 1954 record = &soc->rx_err_ring_history->entry[idx]; 1955 1956 record->timestamp = qdf_get_log_timestamp(); 1957 record->hbi.paddr = paddr; 1958 record->hbi.sw_cookie = sw_cookie; 1959 record->hbi.rbm = rbm; 1960 } 1961 #else 1962 static inline void 1963 dp_rx_err_ring_record_entry(struct dp_soc *soc, uint64_t paddr, 1964 uint32_t sw_cookie, uint8_t rbm) 1965 { 1966 } 1967 #endif 1968 1969 #ifdef HANDLE_RX_REROUTE_ERR 1970 static int dp_rx_err_handle_msdu_buf(struct dp_soc *soc, 1971 hal_ring_desc_t ring_desc) 1972 { 1973 int lmac_id = DP_INVALID_LMAC_ID; 1974 struct dp_rx_desc *rx_desc; 1975 struct hal_buf_info hbi; 1976 struct dp_pdev *pdev; 1977 1978 hal_rx_reo_buf_paddr_get(soc->hal_soc, ring_desc, &hbi); 1979 1980 rx_desc = dp_rx_cookie_2_va_rxdma_buf(soc, hbi.sw_cookie); 1981 1982 /* sanity */ 1983 if (!rx_desc) { 1984 DP_STATS_INC(soc, rx.err.reo_err_msdu_buf_invalid_cookie, 1); 1985 goto assert_return; 1986 } 1987 1988 if (!rx_desc->nbuf) 1989 goto assert_return; 1990 1991 dp_rx_err_ring_record_entry(soc, hbi.paddr, 1992 hbi.sw_cookie, 1993 hal_rx_ret_buf_manager_get(soc->hal_soc, 1994 ring_desc)); 1995 if (hbi.paddr != qdf_nbuf_get_frag_paddr(rx_desc->nbuf, 0)) { 1996 DP_STATS_INC(soc, rx.err.nbuf_sanity_fail, 1); 1997 rx_desc->in_err_state = 1; 1998 goto assert_return; 1999 } 2000 2001 /* After this point the rx_desc and nbuf are valid */ 2002 dp_ipa_rx_buf_smmu_mapping_lock(soc); 2003 qdf_assert_always(!rx_desc->unmapped); 2004 dp_ipa_handle_rx_buf_smmu_mapping(soc, 2005 rx_desc->nbuf, 2006 RX_DATA_BUFFER_SIZE, 2007 false); 2008 qdf_nbuf_unmap_nbytes_single(soc->osdev, 2009 rx_desc->nbuf, 2010 QDF_DMA_FROM_DEVICE, 2011 RX_DATA_BUFFER_SIZE); 2012 rx_desc->unmapped = 1; 2013 dp_ipa_rx_buf_smmu_mapping_unlock(soc); 2014 dp_rx_buffer_pool_nbuf_free(soc, rx_desc->nbuf, 2015 rx_desc->pool_id); 2016 2017 pdev = dp_get_pdev_for_lmac_id(soc, rx_desc->pool_id); 2018 lmac_id = rx_desc->pool_id; 2019 dp_rx_add_to_free_desc_list(&pdev->free_list_head, 2020 &pdev->free_list_tail, 2021 rx_desc); 2022 return lmac_id; 2023 2024 assert_return: 2025 qdf_assert(0); 2026 return lmac_id; 2027 } 2028 2029 static int dp_rx_err_exception(struct dp_soc *soc, hal_ring_desc_t ring_desc) 2030 { 2031 int ret; 2032 uint64_t cur_time_stamp; 2033 2034 DP_STATS_INC(soc, rx.err.reo_err_msdu_buf_rcved, 1); 2035 2036 /* Recover if overall error count exceeds threshold */ 2037 if (soc->stats.rx.err.reo_err_msdu_buf_rcved > 2038 DP_MAX_REG_RX_ROUTING_ERRS_THRESHOLD) { 2039 dp_err("pkt threshold breached! reo_err_msdu_buf_rcved %u first err pkt time_stamp %llu", 2040 soc->stats.rx.err.reo_err_msdu_buf_rcved, 2041 soc->rx_route_err_start_pkt_ts); 2042 qdf_trigger_self_recovery(NULL, QDF_RX_REG_PKT_ROUTE_ERR); 2043 } 2044 2045 cur_time_stamp = qdf_get_log_timestamp_usecs(); 2046 if (!soc->rx_route_err_start_pkt_ts) 2047 soc->rx_route_err_start_pkt_ts = cur_time_stamp; 2048 2049 /* Recover if threshold number of packets received in threshold time */ 2050 if ((cur_time_stamp - soc->rx_route_err_start_pkt_ts) > 2051 DP_RX_ERR_ROUTE_TIMEOUT_US) { 2052 soc->rx_route_err_start_pkt_ts = cur_time_stamp; 2053 2054 if (soc->rx_route_err_in_window > 2055 DP_MAX_REG_RX_ROUTING_ERRS_IN_TIMEOUT) { 2056 qdf_trigger_self_recovery(NULL, 2057 QDF_RX_REG_PKT_ROUTE_ERR); 2058 dp_err("rate threshold breached! reo_err_msdu_buf_rcved %u first err pkt time_stamp %llu", 2059 soc->stats.rx.err.reo_err_msdu_buf_rcved, 2060 soc->rx_route_err_start_pkt_ts); 2061 } else { 2062 soc->rx_route_err_in_window = 1; 2063 } 2064 } else { 2065 soc->rx_route_err_in_window++; 2066 } 2067 2068 ret = dp_rx_err_handle_msdu_buf(soc, ring_desc); 2069 2070 return ret; 2071 } 2072 #else /* HANDLE_RX_REROUTE_ERR */ 2073 2074 static int dp_rx_err_exception(struct dp_soc *soc, hal_ring_desc_t ring_desc) 2075 { 2076 qdf_assert_always(0); 2077 2078 return DP_INVALID_LMAC_ID; 2079 } 2080 #endif /* HANDLE_RX_REROUTE_ERR */ 2081 2082 /** 2083 * dp_rx_err_is_pn_check_needed() - Check if the packet number check is needed 2084 * for this frame received in REO error ring. 2085 * @soc: Datapath SOC handle 2086 * @error: REO error detected or not 2087 * @error_code: Error code in case of REO error 2088 * 2089 * Return: true if pn check if needed in software, 2090 * false, if pn check if not needed. 2091 */ 2092 static inline bool 2093 dp_rx_err_is_pn_check_needed(struct dp_soc *soc, uint8_t error, 2094 uint32_t error_code) 2095 { 2096 return (soc->features.pn_in_reo_dest && 2097 (error == HAL_REO_ERROR_DETECTED && 2098 (hal_rx_reo_is_2k_jump(error_code) || 2099 hal_rx_reo_is_oor_error(error_code) || 2100 hal_rx_reo_is_bar_oor_2k_jump(error_code)))); 2101 } 2102 2103 uint32_t 2104 dp_rx_err_process(struct dp_intr *int_ctx, struct dp_soc *soc, 2105 hal_ring_handle_t hal_ring_hdl, uint32_t quota) 2106 { 2107 hal_ring_desc_t ring_desc; 2108 hal_soc_handle_t hal_soc; 2109 uint32_t count = 0; 2110 uint32_t rx_bufs_used = 0; 2111 uint32_t rx_bufs_reaped[MAX_PDEV_CNT] = { 0 }; 2112 uint8_t mac_id = 0; 2113 uint8_t buf_type; 2114 uint8_t err_status; 2115 struct hal_rx_mpdu_desc_info mpdu_desc_info; 2116 struct hal_buf_info hbi; 2117 struct dp_pdev *dp_pdev; 2118 struct dp_srng *dp_rxdma_srng; 2119 struct rx_desc_pool *rx_desc_pool; 2120 void *link_desc_va; 2121 struct hal_rx_msdu_list msdu_list; /* MSDU's per MPDU */ 2122 uint16_t num_msdus; 2123 struct dp_rx_desc *rx_desc = NULL; 2124 QDF_STATUS status; 2125 bool ret; 2126 uint32_t error_code = 0; 2127 bool sw_pn_check_needed; 2128 2129 /* Debug -- Remove later */ 2130 qdf_assert(soc && hal_ring_hdl); 2131 2132 hal_soc = soc->hal_soc; 2133 2134 /* Debug -- Remove later */ 2135 qdf_assert(hal_soc); 2136 2137 if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, hal_ring_hdl))) { 2138 2139 /* TODO */ 2140 /* 2141 * Need API to convert from hal_ring pointer to 2142 * Ring Type / Ring Id combo 2143 */ 2144 DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1); 2145 dp_rx_err_err("%pK: HAL RING Access Failed -- %pK", soc, 2146 hal_ring_hdl); 2147 goto done; 2148 } 2149 2150 while (qdf_likely(quota-- && (ring_desc = 2151 hal_srng_dst_peek(hal_soc, 2152 hal_ring_hdl)))) { 2153 2154 DP_STATS_INC(soc, rx.err_ring_pkts, 1); 2155 err_status = hal_rx_err_status_get(hal_soc, ring_desc); 2156 buf_type = hal_rx_reo_buf_type_get(hal_soc, ring_desc); 2157 2158 if (err_status == HAL_REO_ERROR_DETECTED) 2159 error_code = hal_rx_get_reo_error_code(hal_soc, 2160 ring_desc); 2161 2162 qdf_mem_set(&mpdu_desc_info, sizeof(mpdu_desc_info), 0); 2163 sw_pn_check_needed = dp_rx_err_is_pn_check_needed(soc, 2164 err_status, 2165 error_code); 2166 if (!sw_pn_check_needed) { 2167 /* 2168 * MPDU desc info will be present in the REO desc 2169 * only in the below scenarios 2170 * 1) pn_in_dest_disabled: always 2171 * 2) pn_in_dest enabled: All cases except 2k-jup 2172 * and OOR errors 2173 */ 2174 hal_rx_mpdu_desc_info_get(hal_soc, ring_desc, 2175 &mpdu_desc_info); 2176 } 2177 2178 if (HAL_RX_REO_DESC_MSDU_COUNT_GET(ring_desc) == 0) 2179 goto next_entry; 2180 2181 /* 2182 * For REO error ring, only MSDU LINK DESC is expected. 2183 * Handle HAL_RX_REO_MSDU_BUF_ADDR_TYPE exception case. 2184 */ 2185 if (qdf_unlikely(buf_type != HAL_RX_REO_MSDU_LINK_DESC_TYPE)) { 2186 int lmac_id; 2187 2188 lmac_id = dp_rx_err_exception(soc, ring_desc); 2189 if (lmac_id >= 0) 2190 rx_bufs_reaped[lmac_id] += 1; 2191 goto next_entry; 2192 } 2193 2194 hal_rx_buf_cookie_rbm_get(hal_soc, (uint32_t *)ring_desc, 2195 &hbi); 2196 /* 2197 * check for the magic number in the sw cookie 2198 */ 2199 qdf_assert_always((hbi.sw_cookie >> LINK_DESC_ID_SHIFT) & 2200 soc->link_desc_id_start); 2201 2202 status = dp_rx_link_cookie_check(ring_desc); 2203 if (qdf_unlikely(QDF_IS_STATUS_ERROR(status))) { 2204 DP_STATS_INC(soc, rx.err.invalid_link_cookie, 1); 2205 break; 2206 } 2207 2208 hal_rx_reo_buf_paddr_get(soc->hal_soc, ring_desc, &hbi); 2209 link_desc_va = dp_rx_cookie_2_link_desc_va(soc, &hbi); 2210 hal_rx_msdu_list_get(soc->hal_soc, link_desc_va, &msdu_list, 2211 &num_msdus); 2212 dp_rx_err_ring_record_entry(soc, msdu_list.paddr[0], 2213 msdu_list.sw_cookie[0], 2214 msdu_list.rbm[0]); 2215 // TODO - BE- Check if the RBM is to be checked for all chips 2216 if (qdf_unlikely((msdu_list.rbm[0] != 2217 DP_WBM2SW_RBM(soc->wbm_sw0_bm_id)) && 2218 (msdu_list.rbm[0] != 2219 HAL_RX_BUF_RBM_WBM_CHIP0_IDLE_DESC_LIST) && 2220 (msdu_list.rbm[0] != 2221 DP_DEFRAG_RBM(soc->wbm_sw0_bm_id)))) { 2222 /* TODO */ 2223 /* Call appropriate handler */ 2224 if (!wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx)) { 2225 DP_STATS_INC(soc, rx.err.invalid_rbm, 1); 2226 dp_rx_err_err("%pK: Invalid RBM %d", 2227 soc, msdu_list.rbm[0]); 2228 } 2229 2230 /* Return link descriptor through WBM ring (SW2WBM)*/ 2231 dp_rx_link_desc_return(soc, ring_desc, 2232 HAL_BM_ACTION_RELEASE_MSDU_LIST); 2233 goto next_entry; 2234 } 2235 2236 rx_desc = soc->arch_ops.dp_rx_desc_cookie_2_va( 2237 soc, 2238 msdu_list.sw_cookie[0]); 2239 qdf_assert_always(rx_desc); 2240 2241 mac_id = rx_desc->pool_id; 2242 2243 if (sw_pn_check_needed) { 2244 goto process_reo_error_code; 2245 } 2246 2247 if (mpdu_desc_info.bar_frame) { 2248 qdf_assert_always(mpdu_desc_info.msdu_count == 1); 2249 2250 dp_rx_bar_frame_handle(soc, ring_desc, rx_desc, 2251 &mpdu_desc_info, err_status, 2252 error_code); 2253 2254 rx_bufs_reaped[mac_id] += 1; 2255 goto next_entry; 2256 } 2257 2258 if (mpdu_desc_info.mpdu_flags & HAL_MPDU_F_FRAGMENT) { 2259 /* 2260 * We only handle one msdu per link desc for fragmented 2261 * case. We drop the msdus and release the link desc 2262 * back if there are more than one msdu in link desc. 2263 */ 2264 if (qdf_unlikely(num_msdus > 1)) { 2265 count = dp_rx_msdus_drop(soc, ring_desc, 2266 &mpdu_desc_info, 2267 &mac_id, quota); 2268 rx_bufs_reaped[mac_id] += count; 2269 goto next_entry; 2270 } 2271 2272 /* 2273 * this is a unlikely scenario where the host is reaping 2274 * a descriptor which it already reaped just a while ago 2275 * but is yet to replenish it back to HW. 2276 * In this case host will dump the last 128 descriptors 2277 * including the software descriptor rx_desc and assert. 2278 */ 2279 2280 if (qdf_unlikely(!rx_desc->in_use)) { 2281 DP_STATS_INC(soc, rx.err.hal_reo_dest_dup, 1); 2282 dp_info_rl("Reaping rx_desc not in use!"); 2283 dp_rx_dump_info_and_assert(soc, hal_ring_hdl, 2284 ring_desc, rx_desc); 2285 /* ignore duplicate RX desc and continue */ 2286 /* Pop out the descriptor */ 2287 goto next_entry; 2288 } 2289 2290 ret = dp_rx_desc_paddr_sanity_check(rx_desc, 2291 msdu_list.paddr[0]); 2292 if (!ret) { 2293 DP_STATS_INC(soc, rx.err.nbuf_sanity_fail, 1); 2294 rx_desc->in_err_state = 1; 2295 goto next_entry; 2296 } 2297 2298 count = dp_rx_frag_handle(soc, 2299 ring_desc, &mpdu_desc_info, 2300 rx_desc, &mac_id, quota); 2301 2302 rx_bufs_reaped[mac_id] += count; 2303 DP_STATS_INC(soc, rx.rx_frags, 1); 2304 goto next_entry; 2305 } 2306 2307 process_reo_error_code: 2308 /* 2309 * Expect REO errors to be handled after this point 2310 */ 2311 qdf_assert_always(err_status == HAL_REO_ERROR_DETECTED); 2312 2313 dp_info_rl("Got pkt with REO ERROR: %d", error_code); 2314 2315 switch (error_code) { 2316 case HAL_REO_ERR_PN_CHECK_FAILED: 2317 case HAL_REO_ERR_PN_ERROR_HANDLING_FLAG_SET: 2318 DP_STATS_INC(soc, rx.err.reo_error[error_code], 1); 2319 dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id); 2320 if (dp_pdev) 2321 DP_STATS_INC(dp_pdev, err.reo_error, 1); 2322 count = dp_rx_pn_error_handle(soc, 2323 ring_desc, 2324 &mpdu_desc_info, &mac_id, 2325 quota); 2326 2327 rx_bufs_reaped[mac_id] += count; 2328 break; 2329 case HAL_REO_ERR_REGULAR_FRAME_2K_JUMP: 2330 case HAL_REO_ERR_2K_ERROR_HANDLING_FLAG_SET: 2331 case HAL_REO_ERR_BAR_FRAME_2K_JUMP: 2332 DP_STATS_INC(soc, rx.err.reo_error[error_code], 1); 2333 dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id); 2334 if (dp_pdev) 2335 DP_STATS_INC(dp_pdev, err.reo_error, 1); 2336 2337 count = dp_rx_reo_err_entry_process( 2338 soc, 2339 ring_desc, 2340 &mpdu_desc_info, 2341 link_desc_va, 2342 HAL_REO_ERR_REGULAR_FRAME_2K_JUMP); 2343 2344 rx_bufs_reaped[mac_id] += count; 2345 break; 2346 2347 case HAL_REO_ERR_REGULAR_FRAME_OOR: 2348 case HAL_REO_ERR_BAR_FRAME_OOR: 2349 DP_STATS_INC(soc, rx.err.reo_error[error_code], 1); 2350 dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id); 2351 if (dp_pdev) 2352 DP_STATS_INC(dp_pdev, err.reo_error, 1); 2353 count = dp_rx_reo_err_entry_process( 2354 soc, 2355 ring_desc, 2356 &mpdu_desc_info, 2357 link_desc_va, 2358 HAL_REO_ERR_REGULAR_FRAME_OOR); 2359 2360 rx_bufs_reaped[mac_id] += count; 2361 break; 2362 case HAL_REO_ERR_QUEUE_DESC_ADDR_0: 2363 case HAL_REO_ERR_QUEUE_DESC_INVALID: 2364 case HAL_REO_ERR_AMPDU_IN_NON_BA: 2365 case HAL_REO_ERR_NON_BA_DUPLICATE: 2366 case HAL_REO_ERR_BA_DUPLICATE: 2367 case HAL_REO_ERR_BAR_FRAME_NO_BA_SESSION: 2368 case HAL_REO_ERR_BAR_FRAME_SN_EQUALS_SSN: 2369 case HAL_REO_ERR_QUEUE_DESC_BLOCKED_SET: 2370 DP_STATS_INC(soc, rx.err.reo_error[error_code], 1); 2371 count = dp_rx_msdus_drop(soc, ring_desc, 2372 &mpdu_desc_info, 2373 &mac_id, quota); 2374 rx_bufs_reaped[mac_id] += count; 2375 break; 2376 default: 2377 /* Assert if unexpected error type */ 2378 qdf_assert_always(0); 2379 } 2380 next_entry: 2381 dp_rx_link_cookie_invalidate(ring_desc); 2382 hal_srng_dst_get_next(hal_soc, hal_ring_hdl); 2383 } 2384 2385 done: 2386 dp_srng_access_end(int_ctx, soc, hal_ring_hdl); 2387 2388 if (soc->rx.flags.defrag_timeout_check) { 2389 uint32_t now_ms = 2390 qdf_system_ticks_to_msecs(qdf_system_ticks()); 2391 2392 if (now_ms >= soc->rx.defrag.next_flush_ms) 2393 dp_rx_defrag_waitlist_flush(soc); 2394 } 2395 2396 for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) { 2397 if (rx_bufs_reaped[mac_id]) { 2398 dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id); 2399 dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_id]; 2400 rx_desc_pool = &soc->rx_desc_buf[mac_id]; 2401 2402 dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng, 2403 rx_desc_pool, 2404 rx_bufs_reaped[mac_id], 2405 &dp_pdev->free_list_head, 2406 &dp_pdev->free_list_tail); 2407 rx_bufs_used += rx_bufs_reaped[mac_id]; 2408 } 2409 } 2410 2411 return rx_bufs_used; /* Assume no scale factor for now */ 2412 } 2413 2414 #ifdef DROP_RXDMA_DECRYPT_ERR 2415 /** 2416 * dp_handle_rxdma_decrypt_err() - Check if decrypt err frames can be handled 2417 * 2418 * Return: true if rxdma decrypt err frames are handled and false otheriwse 2419 */ 2420 static inline bool dp_handle_rxdma_decrypt_err(void) 2421 { 2422 return false; 2423 } 2424 #else 2425 static inline bool dp_handle_rxdma_decrypt_err(void) 2426 { 2427 return true; 2428 } 2429 #endif 2430 2431 static inline bool 2432 dp_rx_is_sg_formation_required(struct hal_wbm_err_desc_info *info) 2433 { 2434 /* 2435 * Currently Null Queue and Unencrypted error handlers has support for 2436 * SG. Other error handler do not deal with SG buffer. 2437 */ 2438 if (((info->wbm_err_src == HAL_RX_WBM_ERR_SRC_REO) && 2439 (info->reo_err_code == HAL_REO_ERR_QUEUE_DESC_ADDR_0)) || 2440 ((info->wbm_err_src == HAL_RX_WBM_ERR_SRC_RXDMA) && 2441 (info->rxdma_err_code == HAL_RXDMA_ERR_UNENCRYPTED))) 2442 return true; 2443 2444 return false; 2445 } 2446 2447 uint32_t 2448 dp_rx_wbm_err_process(struct dp_intr *int_ctx, struct dp_soc *soc, 2449 hal_ring_handle_t hal_ring_hdl, uint32_t quota) 2450 { 2451 hal_ring_desc_t ring_desc; 2452 hal_soc_handle_t hal_soc; 2453 struct dp_rx_desc *rx_desc; 2454 union dp_rx_desc_list_elem_t *head[MAX_PDEV_CNT] = { NULL }; 2455 union dp_rx_desc_list_elem_t *tail[MAX_PDEV_CNT] = { NULL }; 2456 uint32_t rx_bufs_used = 0; 2457 uint32_t rx_bufs_reaped[MAX_PDEV_CNT] = { 0 }; 2458 uint8_t buf_type; 2459 uint8_t mac_id; 2460 struct dp_pdev *dp_pdev; 2461 struct dp_srng *dp_rxdma_srng; 2462 struct rx_desc_pool *rx_desc_pool; 2463 uint8_t *rx_tlv_hdr; 2464 qdf_nbuf_t nbuf_head = NULL; 2465 qdf_nbuf_t nbuf_tail = NULL; 2466 qdf_nbuf_t nbuf, next; 2467 struct hal_wbm_err_desc_info wbm_err_info = { 0 }; 2468 uint8_t pool_id; 2469 uint8_t tid = 0; 2470 uint8_t msdu_continuation = 0; 2471 bool process_sg_buf = false; 2472 uint32_t wbm_err_src; 2473 2474 /* Debug -- Remove later */ 2475 qdf_assert(soc && hal_ring_hdl); 2476 2477 hal_soc = soc->hal_soc; 2478 2479 /* Debug -- Remove later */ 2480 qdf_assert(hal_soc); 2481 2482 if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, hal_ring_hdl))) { 2483 2484 /* TODO */ 2485 /* 2486 * Need API to convert from hal_ring pointer to 2487 * Ring Type / Ring Id combo 2488 */ 2489 dp_rx_err_err("%pK: HAL RING Access Failed -- %pK", 2490 soc, hal_ring_hdl); 2491 goto done; 2492 } 2493 2494 while (qdf_likely(quota)) { 2495 ring_desc = hal_srng_dst_get_next(hal_soc, hal_ring_hdl); 2496 if (qdf_unlikely(!ring_desc)) 2497 break; 2498 2499 /* XXX */ 2500 buf_type = HAL_RX_WBM_BUF_TYPE_GET(ring_desc); 2501 2502 /* 2503 * For WBM ring, expect only MSDU buffers 2504 */ 2505 qdf_assert_always(buf_type == HAL_RX_WBM_BUF_TYPE_REL_BUF); 2506 2507 wbm_err_src = hal_rx_wbm_err_src_get(hal_soc, ring_desc); 2508 qdf_assert((wbm_err_src == HAL_RX_WBM_ERR_SRC_RXDMA) || 2509 (wbm_err_src == HAL_RX_WBM_ERR_SRC_REO)); 2510 2511 if (soc->arch_ops.dp_wbm_get_rx_desc_from_hal_desc(soc, 2512 ring_desc, 2513 &rx_desc)) { 2514 dp_rx_err_err("get rx desc from hal_desc failed"); 2515 continue; 2516 } 2517 2518 qdf_assert_always(rx_desc); 2519 2520 if (!dp_rx_desc_check_magic(rx_desc)) { 2521 dp_rx_err_err("%pk: Invalid rx_desc %pk", 2522 soc, rx_desc); 2523 continue; 2524 } 2525 2526 /* 2527 * this is a unlikely scenario where the host is reaping 2528 * a descriptor which it already reaped just a while ago 2529 * but is yet to replenish it back to HW. 2530 * In this case host will dump the last 128 descriptors 2531 * including the software descriptor rx_desc and assert. 2532 */ 2533 if (qdf_unlikely(!rx_desc->in_use)) { 2534 DP_STATS_INC(soc, rx.err.hal_wbm_rel_dup, 1); 2535 dp_rx_dump_info_and_assert(soc, hal_ring_hdl, 2536 ring_desc, rx_desc); 2537 continue; 2538 } 2539 2540 hal_rx_wbm_err_info_get(ring_desc, &wbm_err_info, hal_soc); 2541 nbuf = rx_desc->nbuf; 2542 rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id]; 2543 dp_ipa_rx_buf_smmu_mapping_lock(soc); 2544 dp_ipa_handle_rx_buf_smmu_mapping(soc, nbuf, 2545 rx_desc_pool->buf_size, 2546 false); 2547 qdf_nbuf_unmap_nbytes_single(soc->osdev, nbuf, 2548 QDF_DMA_FROM_DEVICE, 2549 rx_desc_pool->buf_size); 2550 rx_desc->unmapped = 1; 2551 dp_ipa_rx_buf_smmu_mapping_unlock(soc); 2552 2553 if (qdf_unlikely(soc->wbm_release_desc_rx_sg_support && 2554 dp_rx_is_sg_formation_required(&wbm_err_info))) { 2555 /* SG is detected from continuation bit */ 2556 msdu_continuation = 2557 hal_rx_wbm_err_msdu_continuation_get(hal_soc, 2558 ring_desc); 2559 if (msdu_continuation && 2560 !(soc->wbm_sg_param.wbm_is_first_msdu_in_sg)) { 2561 /* Update length from first buffer in SG */ 2562 soc->wbm_sg_param.wbm_sg_desc_msdu_len = 2563 hal_rx_msdu_start_msdu_len_get( 2564 soc->hal_soc, 2565 qdf_nbuf_data(nbuf)); 2566 soc->wbm_sg_param.wbm_is_first_msdu_in_sg = true; 2567 } 2568 2569 if (msdu_continuation) { 2570 /* MSDU continued packets */ 2571 qdf_nbuf_set_rx_chfrag_cont(nbuf, 1); 2572 QDF_NBUF_CB_RX_PKT_LEN(nbuf) = 2573 soc->wbm_sg_param.wbm_sg_desc_msdu_len; 2574 } else { 2575 /* This is the terminal packet in SG */ 2576 qdf_nbuf_set_rx_chfrag_start(nbuf, 1); 2577 qdf_nbuf_set_rx_chfrag_end(nbuf, 1); 2578 QDF_NBUF_CB_RX_PKT_LEN(nbuf) = 2579 soc->wbm_sg_param.wbm_sg_desc_msdu_len; 2580 process_sg_buf = true; 2581 } 2582 } 2583 2584 /* 2585 * save the wbm desc info in nbuf TLV. We will need this 2586 * info when we do the actual nbuf processing 2587 */ 2588 wbm_err_info.pool_id = rx_desc->pool_id; 2589 hal_rx_priv_info_set_in_tlv(soc->hal_soc, 2590 qdf_nbuf_data(nbuf), 2591 (uint8_t *)&wbm_err_info, 2592 sizeof(wbm_err_info)); 2593 2594 rx_bufs_reaped[rx_desc->pool_id]++; 2595 2596 if (qdf_nbuf_is_rx_chfrag_cont(nbuf) || process_sg_buf) { 2597 DP_RX_LIST_APPEND(soc->wbm_sg_param.wbm_sg_nbuf_head, 2598 soc->wbm_sg_param.wbm_sg_nbuf_tail, 2599 nbuf); 2600 if (process_sg_buf) { 2601 if (!dp_rx_buffer_pool_refill( 2602 soc, 2603 soc->wbm_sg_param.wbm_sg_nbuf_head, 2604 rx_desc->pool_id)) 2605 DP_RX_MERGE_TWO_LIST( 2606 nbuf_head, nbuf_tail, 2607 soc->wbm_sg_param.wbm_sg_nbuf_head, 2608 soc->wbm_sg_param.wbm_sg_nbuf_tail); 2609 dp_rx_wbm_sg_list_reset(soc); 2610 process_sg_buf = false; 2611 } 2612 } else if (!dp_rx_buffer_pool_refill(soc, nbuf, 2613 rx_desc->pool_id)) { 2614 DP_RX_LIST_APPEND(nbuf_head, nbuf_tail, nbuf); 2615 } 2616 2617 dp_rx_add_to_free_desc_list(&head[rx_desc->pool_id], 2618 &tail[rx_desc->pool_id], 2619 rx_desc); 2620 2621 /* 2622 * if continuation bit is set then we have MSDU spread 2623 * across multiple buffers, let us not decrement quota 2624 * till we reap all buffers of that MSDU. 2625 */ 2626 if (qdf_likely(!msdu_continuation)) 2627 quota -= 1; 2628 } 2629 done: 2630 dp_srng_access_end(int_ctx, soc, hal_ring_hdl); 2631 2632 for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) { 2633 if (rx_bufs_reaped[mac_id]) { 2634 dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_id]; 2635 rx_desc_pool = &soc->rx_desc_buf[mac_id]; 2636 2637 dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng, 2638 rx_desc_pool, rx_bufs_reaped[mac_id], 2639 &head[mac_id], &tail[mac_id]); 2640 rx_bufs_used += rx_bufs_reaped[mac_id]; 2641 } 2642 } 2643 2644 nbuf = nbuf_head; 2645 while (nbuf) { 2646 struct dp_peer *peer; 2647 uint16_t peer_id; 2648 uint8_t err_code; 2649 uint8_t *tlv_hdr; 2650 rx_tlv_hdr = qdf_nbuf_data(nbuf); 2651 2652 /* 2653 * retrieve the wbm desc info from nbuf TLV, so we can 2654 * handle error cases appropriately 2655 */ 2656 hal_rx_priv_info_get_from_tlv(soc->hal_soc, rx_tlv_hdr, 2657 (uint8_t *)&wbm_err_info, 2658 sizeof(wbm_err_info)); 2659 2660 peer_id = hal_rx_mpdu_start_sw_peer_id_get(soc->hal_soc, 2661 rx_tlv_hdr); 2662 peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_RX_ERR); 2663 2664 if (!peer) 2665 dp_info_rl("peer is null peer_id%u err_src%u err_rsn%u", 2666 peer_id, wbm_err_info.wbm_err_src, 2667 wbm_err_info.reo_psh_rsn); 2668 2669 /* Set queue_mapping in nbuf to 0 */ 2670 dp_set_rx_queue(nbuf, 0); 2671 2672 next = nbuf->next; 2673 2674 /* 2675 * Form the SG for msdu continued buffers 2676 * QCN9000 has this support 2677 */ 2678 if (qdf_nbuf_is_rx_chfrag_cont(nbuf)) { 2679 nbuf = dp_rx_sg_create(soc, nbuf); 2680 next = nbuf->next; 2681 /* 2682 * SG error handling is not done correctly, 2683 * drop SG frames for now. 2684 */ 2685 qdf_nbuf_free(nbuf); 2686 dp_info_rl("scattered msdu dropped"); 2687 nbuf = next; 2688 if (peer) 2689 dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR); 2690 continue; 2691 } 2692 2693 if (wbm_err_info.wbm_err_src == HAL_RX_WBM_ERR_SRC_REO) { 2694 if (wbm_err_info.reo_psh_rsn 2695 == HAL_RX_WBM_REO_PSH_RSN_ERROR) { 2696 2697 DP_STATS_INC(soc, 2698 rx.err.reo_error 2699 [wbm_err_info.reo_err_code], 1); 2700 /* increment @pdev level */ 2701 pool_id = wbm_err_info.pool_id; 2702 dp_pdev = dp_get_pdev_for_lmac_id(soc, pool_id); 2703 if (dp_pdev) 2704 DP_STATS_INC(dp_pdev, err.reo_error, 2705 1); 2706 2707 switch (wbm_err_info.reo_err_code) { 2708 /* 2709 * Handling for packets which have NULL REO 2710 * queue descriptor 2711 */ 2712 case HAL_REO_ERR_QUEUE_DESC_ADDR_0: 2713 pool_id = wbm_err_info.pool_id; 2714 dp_rx_null_q_desc_handle(soc, nbuf, 2715 rx_tlv_hdr, 2716 pool_id, peer); 2717 break; 2718 /* TODO */ 2719 /* Add per error code accounting */ 2720 case HAL_REO_ERR_REGULAR_FRAME_2K_JUMP: 2721 pool_id = wbm_err_info.pool_id; 2722 2723 if (hal_rx_msdu_end_first_msdu_get(soc->hal_soc, 2724 rx_tlv_hdr)) { 2725 peer_id = 2726 hal_rx_mpdu_start_sw_peer_id_get(soc->hal_soc, 2727 rx_tlv_hdr); 2728 tid = 2729 hal_rx_mpdu_start_tid_get(hal_soc, rx_tlv_hdr); 2730 } 2731 QDF_NBUF_CB_RX_PKT_LEN(nbuf) = 2732 hal_rx_msdu_start_msdu_len_get( 2733 soc->hal_soc, rx_tlv_hdr); 2734 nbuf->next = NULL; 2735 dp_2k_jump_handle(soc, nbuf, 2736 rx_tlv_hdr, 2737 peer_id, tid); 2738 break; 2739 case HAL_REO_ERR_REGULAR_FRAME_OOR: 2740 if (peer) 2741 DP_STATS_INC(peer, 2742 rx.err.oor_err, 1); 2743 if (hal_rx_msdu_end_first_msdu_get(soc->hal_soc, 2744 rx_tlv_hdr)) { 2745 peer_id = 2746 hal_rx_mpdu_start_sw_peer_id_get(soc->hal_soc, 2747 rx_tlv_hdr); 2748 tid = 2749 hal_rx_mpdu_start_tid_get(hal_soc, rx_tlv_hdr); 2750 } 2751 QDF_NBUF_CB_RX_PKT_LEN(nbuf) = 2752 hal_rx_msdu_start_msdu_len_get( 2753 soc->hal_soc, rx_tlv_hdr); 2754 nbuf->next = NULL; 2755 dp_rx_oor_handle(soc, nbuf, 2756 peer_id, 2757 rx_tlv_hdr); 2758 break; 2759 case HAL_REO_ERR_BAR_FRAME_2K_JUMP: 2760 case HAL_REO_ERR_BAR_FRAME_OOR: 2761 if (peer) 2762 dp_rx_err_handle_bar(soc, 2763 peer, 2764 nbuf); 2765 qdf_nbuf_free(nbuf); 2766 break; 2767 2768 case HAL_REO_ERR_PN_CHECK_FAILED: 2769 case HAL_REO_ERR_PN_ERROR_HANDLING_FLAG_SET: 2770 if (peer) 2771 DP_STATS_INC(peer, 2772 rx.err.pn_err, 1); 2773 qdf_nbuf_free(nbuf); 2774 break; 2775 2776 default: 2777 dp_info_rl("Got pkt with REO ERROR: %d", 2778 wbm_err_info.reo_err_code); 2779 qdf_nbuf_free(nbuf); 2780 } 2781 } else if (wbm_err_info.reo_psh_rsn 2782 == HAL_RX_WBM_REO_PSH_RSN_ROUTE) { 2783 dp_rx_err_route_hdl(soc, nbuf, peer, 2784 rx_tlv_hdr, 2785 HAL_RX_WBM_ERR_SRC_REO); 2786 } else { 2787 /* should not enter here */ 2788 dp_rx_err_alert("invalid reo push reason %u", 2789 wbm_err_info.reo_psh_rsn); 2790 qdf_nbuf_free(nbuf); 2791 qdf_assert_always(0); 2792 } 2793 } else if (wbm_err_info.wbm_err_src == 2794 HAL_RX_WBM_ERR_SRC_RXDMA) { 2795 if (wbm_err_info.rxdma_psh_rsn 2796 == HAL_RX_WBM_RXDMA_PSH_RSN_ERROR) { 2797 DP_STATS_INC(soc, 2798 rx.err.rxdma_error 2799 [wbm_err_info.rxdma_err_code], 1); 2800 /* increment @pdev level */ 2801 pool_id = wbm_err_info.pool_id; 2802 dp_pdev = dp_get_pdev_for_lmac_id(soc, pool_id); 2803 if (dp_pdev) 2804 DP_STATS_INC(dp_pdev, 2805 err.rxdma_error, 1); 2806 2807 switch (wbm_err_info.rxdma_err_code) { 2808 case HAL_RXDMA_ERR_UNENCRYPTED: 2809 2810 case HAL_RXDMA_ERR_WIFI_PARSE: 2811 pool_id = wbm_err_info.pool_id; 2812 dp_rx_process_rxdma_err(soc, nbuf, 2813 rx_tlv_hdr, 2814 peer, 2815 wbm_err_info. 2816 rxdma_err_code, 2817 pool_id); 2818 break; 2819 2820 case HAL_RXDMA_ERR_TKIP_MIC: 2821 dp_rx_process_mic_error(soc, nbuf, 2822 rx_tlv_hdr, 2823 peer); 2824 if (peer) 2825 DP_STATS_INC(peer, rx.err.mic_err, 1); 2826 break; 2827 2828 case HAL_RXDMA_ERR_DECRYPT: 2829 2830 if (peer) { 2831 DP_STATS_INC(peer, rx.err. 2832 decrypt_err, 1); 2833 qdf_nbuf_free(nbuf); 2834 break; 2835 } 2836 2837 if (!dp_handle_rxdma_decrypt_err()) { 2838 qdf_nbuf_free(nbuf); 2839 break; 2840 } 2841 2842 pool_id = wbm_err_info.pool_id; 2843 err_code = wbm_err_info.rxdma_err_code; 2844 tlv_hdr = rx_tlv_hdr; 2845 dp_rx_process_rxdma_err(soc, nbuf, 2846 tlv_hdr, NULL, 2847 err_code, 2848 pool_id); 2849 break; 2850 2851 default: 2852 qdf_nbuf_free(nbuf); 2853 dp_err_rl("RXDMA error %d", 2854 wbm_err_info.rxdma_err_code); 2855 } 2856 } else if (wbm_err_info.rxdma_psh_rsn 2857 == HAL_RX_WBM_RXDMA_PSH_RSN_ROUTE) { 2858 dp_rx_err_route_hdl(soc, nbuf, peer, 2859 rx_tlv_hdr, 2860 HAL_RX_WBM_ERR_SRC_RXDMA); 2861 } else if (wbm_err_info.rxdma_psh_rsn 2862 == HAL_RX_WBM_RXDMA_PSH_RSN_FLUSH) { 2863 dp_rx_err_err("rxdma push reason %u", 2864 wbm_err_info.rxdma_psh_rsn); 2865 DP_STATS_INC(soc, rx.err.rx_flush_count, 1); 2866 qdf_nbuf_free(nbuf); 2867 } else { 2868 /* should not enter here */ 2869 dp_rx_err_alert("invalid rxdma push reason %u", 2870 wbm_err_info.rxdma_psh_rsn); 2871 qdf_nbuf_free(nbuf); 2872 qdf_assert_always(0); 2873 } 2874 } else { 2875 /* Should not come here */ 2876 qdf_assert(0); 2877 } 2878 2879 if (peer) 2880 dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR); 2881 2882 nbuf = next; 2883 } 2884 return rx_bufs_used; /* Assume no scale factor for now */ 2885 } 2886 2887 #endif /* QCA_HOST_MODE_WIFI_DISABLED */ 2888 2889 /** 2890 * dup_desc_dbg() - dump and assert if duplicate rx desc found 2891 * 2892 * @soc: core DP main context 2893 * @rxdma_dst_ring_desc: void pointer to monitor link descriptor buf addr info 2894 * @rx_desc: void pointer to rx descriptor 2895 * 2896 * Return: void 2897 */ 2898 static void dup_desc_dbg(struct dp_soc *soc, 2899 hal_rxdma_desc_t rxdma_dst_ring_desc, 2900 void *rx_desc) 2901 { 2902 DP_STATS_INC(soc, rx.err.hal_rxdma_err_dup, 1); 2903 dp_rx_dump_info_and_assert( 2904 soc, 2905 soc->rx_rel_ring.hal_srng, 2906 hal_rxdma_desc_to_hal_ring_desc(rxdma_dst_ring_desc), 2907 rx_desc); 2908 } 2909 2910 /** 2911 * dp_rx_err_mpdu_pop() - extract the MSDU's from link descs 2912 * 2913 * @soc: core DP main context 2914 * @mac_id: mac id which is one of 3 mac_ids 2915 * @rxdma_dst_ring_desc: void pointer to monitor link descriptor buf addr info 2916 * @head: head of descs list to be freed 2917 * @tail: tail of decs list to be freed 2918 2919 * Return: number of msdu in MPDU to be popped 2920 */ 2921 static inline uint32_t 2922 dp_rx_err_mpdu_pop(struct dp_soc *soc, uint32_t mac_id, 2923 hal_rxdma_desc_t rxdma_dst_ring_desc, 2924 union dp_rx_desc_list_elem_t **head, 2925 union dp_rx_desc_list_elem_t **tail) 2926 { 2927 void *rx_msdu_link_desc; 2928 qdf_nbuf_t msdu; 2929 qdf_nbuf_t last; 2930 struct hal_rx_msdu_list msdu_list; 2931 uint16_t num_msdus; 2932 struct hal_buf_info buf_info; 2933 uint32_t rx_bufs_used = 0; 2934 uint32_t msdu_cnt; 2935 uint32_t i; 2936 uint8_t push_reason; 2937 uint8_t rxdma_error_code = 0; 2938 uint8_t bm_action = HAL_BM_ACTION_PUT_IN_IDLE_LIST; 2939 struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id); 2940 uint32_t rx_link_buf_info[HAL_RX_BUFFINFO_NUM_DWORDS]; 2941 hal_rxdma_desc_t ring_desc; 2942 struct rx_desc_pool *rx_desc_pool; 2943 2944 if (!pdev) { 2945 dp_rx_err_debug("%pK: pdev is null for mac_id = %d", 2946 soc, mac_id); 2947 return rx_bufs_used; 2948 } 2949 2950 msdu = 0; 2951 2952 last = NULL; 2953 2954 hal_rx_reo_ent_buf_paddr_get(soc->hal_soc, rxdma_dst_ring_desc, 2955 &buf_info, &msdu_cnt); 2956 2957 push_reason = 2958 hal_rx_reo_ent_rxdma_push_reason_get(rxdma_dst_ring_desc); 2959 if (push_reason == HAL_RX_WBM_RXDMA_PSH_RSN_ERROR) { 2960 rxdma_error_code = 2961 hal_rx_reo_ent_rxdma_error_code_get(rxdma_dst_ring_desc); 2962 } 2963 2964 do { 2965 rx_msdu_link_desc = 2966 dp_rx_cookie_2_link_desc_va(soc, &buf_info); 2967 2968 qdf_assert_always(rx_msdu_link_desc); 2969 2970 hal_rx_msdu_list_get(soc->hal_soc, rx_msdu_link_desc, 2971 &msdu_list, &num_msdus); 2972 2973 if (msdu_list.sw_cookie[0] != HAL_RX_COOKIE_SPECIAL) { 2974 /* if the msdus belongs to NSS offloaded radio && 2975 * the rbm is not SW1_BM then return the msdu_link 2976 * descriptor without freeing the msdus (nbufs). let 2977 * these buffers be given to NSS completion ring for 2978 * NSS to free them. 2979 * else iterate through the msdu link desc list and 2980 * free each msdu in the list. 2981 */ 2982 if (msdu_list.rbm[0] != 2983 HAL_RX_BUF_RBM_SW3_BM(soc->wbm_sw0_bm_id) && 2984 wlan_cfg_get_dp_pdev_nss_enabled( 2985 pdev->wlan_cfg_ctx)) 2986 bm_action = HAL_BM_ACTION_RELEASE_MSDU_LIST; 2987 else { 2988 for (i = 0; i < num_msdus; i++) { 2989 struct dp_rx_desc *rx_desc = 2990 soc->arch_ops. 2991 dp_rx_desc_cookie_2_va( 2992 soc, 2993 msdu_list.sw_cookie[i]); 2994 qdf_assert_always(rx_desc); 2995 msdu = rx_desc->nbuf; 2996 /* 2997 * this is a unlikely scenario 2998 * where the host is reaping 2999 * a descriptor which 3000 * it already reaped just a while ago 3001 * but is yet to replenish 3002 * it back to HW. 3003 * In this case host will dump 3004 * the last 128 descriptors 3005 * including the software descriptor 3006 * rx_desc and assert. 3007 */ 3008 ring_desc = rxdma_dst_ring_desc; 3009 if (qdf_unlikely(!rx_desc->in_use)) { 3010 dup_desc_dbg(soc, 3011 ring_desc, 3012 rx_desc); 3013 continue; 3014 } 3015 3016 rx_desc_pool = &soc-> 3017 rx_desc_buf[rx_desc->pool_id]; 3018 dp_ipa_rx_buf_smmu_mapping_lock(soc); 3019 dp_ipa_handle_rx_buf_smmu_mapping( 3020 soc, msdu, 3021 rx_desc_pool->buf_size, 3022 false); 3023 qdf_nbuf_unmap_nbytes_single( 3024 soc->osdev, msdu, 3025 QDF_DMA_FROM_DEVICE, 3026 rx_desc_pool->buf_size); 3027 rx_desc->unmapped = 1; 3028 dp_ipa_rx_buf_smmu_mapping_unlock(soc); 3029 3030 dp_rx_err_debug("%pK: msdu_nbuf=%pK ", 3031 soc, msdu); 3032 3033 dp_rx_buffer_pool_nbuf_free(soc, msdu, 3034 rx_desc->pool_id); 3035 rx_bufs_used++; 3036 dp_rx_add_to_free_desc_list(head, 3037 tail, rx_desc); 3038 } 3039 } 3040 } else { 3041 rxdma_error_code = HAL_RXDMA_ERR_WAR; 3042 } 3043 3044 /* 3045 * Store the current link buffer into to the local structure 3046 * to be used for release purpose. 3047 */ 3048 hal_rxdma_buff_addr_info_set(soc->hal_soc, rx_link_buf_info, 3049 buf_info.paddr, buf_info.sw_cookie, 3050 buf_info.rbm); 3051 3052 hal_rx_mon_next_link_desc_get(soc->hal_soc, rx_msdu_link_desc, 3053 &buf_info); 3054 dp_rx_link_desc_return_by_addr(soc, 3055 (hal_buff_addrinfo_t) 3056 rx_link_buf_info, 3057 bm_action); 3058 } while (buf_info.paddr); 3059 3060 DP_STATS_INC(soc, rx.err.rxdma_error[rxdma_error_code], 1); 3061 if (pdev) 3062 DP_STATS_INC(pdev, err.rxdma_error, 1); 3063 3064 if (rxdma_error_code == HAL_RXDMA_ERR_DECRYPT) { 3065 dp_rx_err_err("%pK: Packet received with Decrypt error", soc); 3066 } 3067 3068 return rx_bufs_used; 3069 } 3070 3071 uint32_t 3072 dp_rxdma_err_process(struct dp_intr *int_ctx, struct dp_soc *soc, 3073 uint32_t mac_id, uint32_t quota) 3074 { 3075 struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id); 3076 hal_rxdma_desc_t rxdma_dst_ring_desc; 3077 hal_soc_handle_t hal_soc; 3078 void *err_dst_srng; 3079 union dp_rx_desc_list_elem_t *head = NULL; 3080 union dp_rx_desc_list_elem_t *tail = NULL; 3081 struct dp_srng *dp_rxdma_srng; 3082 struct rx_desc_pool *rx_desc_pool; 3083 uint32_t work_done = 0; 3084 uint32_t rx_bufs_used = 0; 3085 3086 if (!pdev) 3087 return 0; 3088 3089 err_dst_srng = soc->rxdma_err_dst_ring[mac_id].hal_srng; 3090 3091 if (!err_dst_srng) { 3092 dp_rx_err_err("%pK: HAL Monitor Destination Ring Init Failed -- %pK", 3093 soc, err_dst_srng); 3094 return 0; 3095 } 3096 3097 hal_soc = soc->hal_soc; 3098 3099 qdf_assert(hal_soc); 3100 3101 if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, err_dst_srng))) { 3102 dp_rx_err_err("%pK: HAL Monitor Destination Ring Init Failed -- %pK", 3103 soc, err_dst_srng); 3104 return 0; 3105 } 3106 3107 while (qdf_likely(quota-- && (rxdma_dst_ring_desc = 3108 hal_srng_dst_get_next(hal_soc, err_dst_srng)))) { 3109 3110 rx_bufs_used += dp_rx_err_mpdu_pop(soc, mac_id, 3111 rxdma_dst_ring_desc, 3112 &head, &tail); 3113 } 3114 3115 dp_srng_access_end(int_ctx, soc, err_dst_srng); 3116 3117 if (rx_bufs_used) { 3118 if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx)) { 3119 dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_id]; 3120 rx_desc_pool = &soc->rx_desc_buf[mac_id]; 3121 } else { 3122 dp_rxdma_srng = &soc->rx_refill_buf_ring[pdev->lmac_id]; 3123 rx_desc_pool = &soc->rx_desc_buf[pdev->lmac_id]; 3124 } 3125 3126 dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng, 3127 rx_desc_pool, rx_bufs_used, &head, &tail); 3128 3129 work_done += rx_bufs_used; 3130 } 3131 3132 return work_done; 3133 } 3134 3135 #ifndef QCA_HOST_MODE_WIFI_DISABLED 3136 3137 static inline uint32_t 3138 dp_wbm_int_err_mpdu_pop(struct dp_soc *soc, uint32_t mac_id, 3139 hal_rxdma_desc_t rxdma_dst_ring_desc, 3140 union dp_rx_desc_list_elem_t **head, 3141 union dp_rx_desc_list_elem_t **tail) 3142 { 3143 void *rx_msdu_link_desc; 3144 qdf_nbuf_t msdu; 3145 qdf_nbuf_t last; 3146 struct hal_rx_msdu_list msdu_list; 3147 uint16_t num_msdus; 3148 struct hal_buf_info buf_info; 3149 uint32_t rx_bufs_used = 0, msdu_cnt, i; 3150 uint32_t rx_link_buf_info[HAL_RX_BUFFINFO_NUM_DWORDS]; 3151 struct rx_desc_pool *rx_desc_pool; 3152 3153 msdu = 0; 3154 3155 last = NULL; 3156 3157 hal_rx_reo_ent_buf_paddr_get(soc->hal_soc, rxdma_dst_ring_desc, 3158 &buf_info, &msdu_cnt); 3159 3160 do { 3161 rx_msdu_link_desc = 3162 dp_rx_cookie_2_link_desc_va(soc, &buf_info); 3163 3164 if (!rx_msdu_link_desc) { 3165 DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_REO_NULL_LINK_DESC], 1); 3166 break; 3167 } 3168 3169 hal_rx_msdu_list_get(soc->hal_soc, rx_msdu_link_desc, 3170 &msdu_list, &num_msdus); 3171 3172 if (msdu_list.sw_cookie[0] != HAL_RX_COOKIE_SPECIAL) { 3173 for (i = 0; i < num_msdus; i++) { 3174 struct dp_rx_desc *rx_desc = 3175 soc->arch_ops.dp_rx_desc_cookie_2_va( 3176 soc, 3177 msdu_list.sw_cookie[i]); 3178 qdf_assert_always(rx_desc); 3179 rx_desc_pool = 3180 &soc->rx_desc_buf[rx_desc->pool_id]; 3181 msdu = rx_desc->nbuf; 3182 3183 dp_ipa_rx_buf_smmu_mapping_lock(soc); 3184 dp_ipa_handle_rx_buf_smmu_mapping( 3185 soc, msdu, 3186 rx_desc_pool->buf_size, 3187 false); 3188 3189 qdf_nbuf_unmap_nbytes_single( 3190 soc->osdev, 3191 msdu, 3192 QDF_DMA_FROM_DEVICE, 3193 rx_desc_pool->buf_size); 3194 rx_desc->unmapped = 1; 3195 dp_ipa_rx_buf_smmu_mapping_unlock(soc); 3196 3197 dp_rx_buffer_pool_nbuf_free(soc, msdu, 3198 rx_desc->pool_id); 3199 rx_bufs_used++; 3200 dp_rx_add_to_free_desc_list(head, 3201 tail, rx_desc); 3202 } 3203 } 3204 3205 /* 3206 * Store the current link buffer into to the local structure 3207 * to be used for release purpose. 3208 */ 3209 hal_rxdma_buff_addr_info_set(soc->hal_soc, rx_link_buf_info, 3210 buf_info.paddr, buf_info.sw_cookie, 3211 buf_info.rbm); 3212 3213 hal_rx_mon_next_link_desc_get(soc->hal_soc, rx_msdu_link_desc, 3214 &buf_info); 3215 dp_rx_link_desc_return_by_addr(soc, (hal_buff_addrinfo_t) 3216 rx_link_buf_info, 3217 HAL_BM_ACTION_PUT_IN_IDLE_LIST); 3218 } while (buf_info.paddr); 3219 3220 return rx_bufs_used; 3221 } 3222 3223 /* 3224 * 3225 * dp_handle_wbm_internal_error() - handles wbm_internal_error case 3226 * 3227 * @soc: core DP main context 3228 * @hal_desc: hal descriptor 3229 * @buf_type: indicates if the buffer is of type link disc or msdu 3230 * Return: None 3231 * 3232 * wbm_internal_error is seen in following scenarios : 3233 * 3234 * 1. Null pointers detected in WBM_RELEASE_RING descriptors 3235 * 2. Null pointers detected during delinking process 3236 * 3237 * Some null pointer cases: 3238 * 3239 * a. MSDU buffer pointer is NULL 3240 * b. Next_MSDU_Link_Desc pointer is NULL, with no last msdu flag 3241 * c. MSDU buffer pointer is NULL or Next_Link_Desc pointer is NULL 3242 */ 3243 void 3244 dp_handle_wbm_internal_error(struct dp_soc *soc, void *hal_desc, 3245 uint32_t buf_type) 3246 { 3247 struct hal_buf_info buf_info = {0}; 3248 struct dp_rx_desc *rx_desc = NULL; 3249 struct rx_desc_pool *rx_desc_pool; 3250 uint32_t rx_bufs_reaped = 0; 3251 union dp_rx_desc_list_elem_t *head = NULL; 3252 union dp_rx_desc_list_elem_t *tail = NULL; 3253 uint8_t pool_id; 3254 3255 hal_rx_reo_buf_paddr_get(soc->hal_soc, hal_desc, &buf_info); 3256 3257 if (!buf_info.paddr) { 3258 DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_REO_NULL_BUFFER], 1); 3259 return; 3260 } 3261 3262 /* buffer_addr_info is the first element of ring_desc */ 3263 hal_rx_buf_cookie_rbm_get(soc->hal_soc, (uint32_t *)hal_desc, 3264 &buf_info); 3265 pool_id = DP_RX_DESC_COOKIE_POOL_ID_GET(buf_info.sw_cookie); 3266 3267 if (buf_type == HAL_WBM_RELEASE_RING_2_BUFFER_TYPE) { 3268 DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_REO_NULL_MSDU_BUFF], 1); 3269 rx_desc = soc->arch_ops.dp_rx_desc_cookie_2_va( 3270 soc, 3271 buf_info.sw_cookie); 3272 3273 if (rx_desc && rx_desc->nbuf) { 3274 rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id]; 3275 dp_ipa_rx_buf_smmu_mapping_lock(soc); 3276 dp_ipa_handle_rx_buf_smmu_mapping( 3277 soc, rx_desc->nbuf, 3278 rx_desc_pool->buf_size, 3279 false); 3280 qdf_nbuf_unmap_nbytes_single(soc->osdev, rx_desc->nbuf, 3281 QDF_DMA_FROM_DEVICE, 3282 rx_desc_pool->buf_size); 3283 rx_desc->unmapped = 1; 3284 dp_ipa_rx_buf_smmu_mapping_unlock(soc); 3285 3286 dp_rx_buffer_pool_nbuf_free(soc, rx_desc->nbuf, 3287 rx_desc->pool_id); 3288 dp_rx_add_to_free_desc_list(&head, 3289 &tail, 3290 rx_desc); 3291 3292 rx_bufs_reaped++; 3293 } 3294 } else if (buf_type == HAL_WBM_RELEASE_RING_2_DESC_TYPE) { 3295 rx_bufs_reaped = dp_wbm_int_err_mpdu_pop(soc, pool_id, 3296 hal_desc, 3297 &head, &tail); 3298 } 3299 3300 if (rx_bufs_reaped) { 3301 struct rx_desc_pool *rx_desc_pool; 3302 struct dp_srng *dp_rxdma_srng; 3303 3304 DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_REO_BUFF_REAPED], 1); 3305 dp_rxdma_srng = &soc->rx_refill_buf_ring[pool_id]; 3306 rx_desc_pool = &soc->rx_desc_buf[pool_id]; 3307 3308 dp_rx_buffers_replenish(soc, pool_id, dp_rxdma_srng, 3309 rx_desc_pool, 3310 rx_bufs_reaped, 3311 &head, &tail); 3312 } 3313 } 3314 3315 #endif /* QCA_HOST_MODE_WIFI_DISABLED */ 3316