1 /* 2 * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for 5 * any purpose with or without fee is hereby granted, provided that the 6 * above copyright notice and this permission notice appear in all 7 * copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 16 * PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 #include "hal_hw_headers.h" 20 #include "dp_types.h" 21 #include "dp_rx.h" 22 #include "dp_tx.h" 23 #include "dp_peer.h" 24 #include "dp_internal.h" 25 #include "hal_api.h" 26 #include "qdf_trace.h" 27 #include "qdf_nbuf.h" 28 #include "dp_rx_defrag.h" 29 #include "dp_ipa.h" 30 #ifdef WIFI_MONITOR_SUPPORT 31 #include "dp_htt.h" 32 #include <dp_mon.h> 33 #endif 34 #ifdef FEATURE_WDS 35 #include "dp_txrx_wds.h" 36 #endif 37 #include <enet.h> /* LLC_SNAP_HDR_LEN */ 38 #include "qdf_net_types.h" 39 #include "dp_rx_buffer_pool.h" 40 41 #define dp_rx_err_alert(params...) QDF_TRACE_FATAL(QDF_MODULE_ID_DP_RX_ERROR, params) 42 #define dp_rx_err_err(params...) QDF_TRACE_ERROR(QDF_MODULE_ID_DP_RX_ERROR, params) 43 #define dp_rx_err_warn(params...) QDF_TRACE_WARN(QDF_MODULE_ID_DP_RX_ERROR, params) 44 #define dp_rx_err_info(params...) \ 45 __QDF_TRACE_FL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_RX_ERROR, ## params) 46 #define dp_rx_err_info_rl(params...) \ 47 __QDF_TRACE_RL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_RX_ERROR, ## params) 48 #define dp_rx_err_debug(params...) QDF_TRACE_DEBUG(QDF_MODULE_ID_DP_RX_ERROR, params) 49 50 #ifndef QCA_HOST_MODE_WIFI_DISABLED 51 52 /* Max buffer in invalid peer SG list*/ 53 #define DP_MAX_INVALID_BUFFERS 10 54 55 /* Max regular Rx packet routing error */ 56 #define DP_MAX_REG_RX_ROUTING_ERRS_THRESHOLD 20 57 #define DP_MAX_REG_RX_ROUTING_ERRS_IN_TIMEOUT 10 58 #define DP_RX_ERR_ROUTE_TIMEOUT_US (5 * 1000 * 1000) /* micro seconds */ 59 60 #ifdef FEATURE_MEC 61 bool dp_rx_mcast_echo_check(struct dp_soc *soc, 62 struct dp_peer *peer, 63 uint8_t *rx_tlv_hdr, 64 qdf_nbuf_t nbuf) 65 { 66 struct dp_vdev *vdev = peer->vdev; 67 struct dp_pdev *pdev = vdev->pdev; 68 struct dp_mec_entry *mecentry = NULL; 69 struct dp_ast_entry *ase = NULL; 70 uint16_t sa_idx = 0; 71 uint8_t *data; 72 /* 73 * Multicast Echo Check is required only if vdev is STA and 74 * received pkt is a multicast/broadcast pkt. otherwise 75 * skip the MEC check. 76 */ 77 if (vdev->opmode != wlan_op_mode_sta) 78 return false; 79 if (!hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc, rx_tlv_hdr)) 80 return false; 81 82 data = qdf_nbuf_data(nbuf); 83 84 /* 85 * if the received pkts src mac addr matches with vdev 86 * mac address then drop the pkt as it is looped back 87 */ 88 if (!(qdf_mem_cmp(&data[QDF_MAC_ADDR_SIZE], 89 vdev->mac_addr.raw, 90 QDF_MAC_ADDR_SIZE))) 91 return true; 92 93 /* 94 * In case of qwrap isolation mode, donot drop loopback packets. 95 * In isolation mode, all packets from the wired stations need to go 96 * to rootap and loop back to reach the wireless stations and 97 * vice-versa. 98 */ 99 if (qdf_unlikely(vdev->isolation_vdev)) 100 return false; 101 102 /* 103 * if the received pkts src mac addr matches with the 104 * wired PCs MAC addr which is behind the STA or with 105 * wireless STAs MAC addr which are behind the Repeater, 106 * then drop the pkt as it is looped back 107 */ 108 if (hal_rx_msdu_end_sa_is_valid_get(soc->hal_soc, rx_tlv_hdr)) { 109 sa_idx = hal_rx_msdu_end_sa_idx_get(soc->hal_soc, rx_tlv_hdr); 110 111 if ((sa_idx < 0) || 112 (sa_idx >= wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx))) { 113 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 114 "invalid sa_idx: %d", sa_idx); 115 qdf_assert_always(0); 116 } 117 118 qdf_spin_lock_bh(&soc->ast_lock); 119 ase = soc->ast_table[sa_idx]; 120 121 /* 122 * this check was not needed since MEC is not dependent on AST, 123 * but if we dont have this check SON has some issues in 124 * dual backhaul scenario. in APS SON mode, client connected 125 * to RE 2G and sends multicast packets. the RE sends it to CAP 126 * over 5G backhaul. the CAP loopback it on 2G to RE. 127 * On receiving in 2G STA vap, we assume that client has roamed 128 * and kickout the client. 129 */ 130 if (ase && (ase->peer_id != peer->peer_id)) { 131 qdf_spin_unlock_bh(&soc->ast_lock); 132 goto drop; 133 } 134 135 qdf_spin_unlock_bh(&soc->ast_lock); 136 } 137 138 qdf_spin_lock_bh(&soc->mec_lock); 139 140 mecentry = dp_peer_mec_hash_find_by_pdevid(soc, pdev->pdev_id, 141 &data[QDF_MAC_ADDR_SIZE]); 142 if (!mecentry) { 143 qdf_spin_unlock_bh(&soc->mec_lock); 144 return false; 145 } 146 147 qdf_spin_unlock_bh(&soc->mec_lock); 148 149 drop: 150 dp_rx_err_info("%pK: received pkt with same src mac " QDF_MAC_ADDR_FMT, 151 soc, QDF_MAC_ADDR_REF(&data[QDF_MAC_ADDR_SIZE])); 152 153 return true; 154 } 155 #endif 156 #endif /* QCA_HOST_MODE_WIFI_DISABLED */ 157 158 void dp_rx_link_desc_refill_duplicate_check( 159 struct dp_soc *soc, 160 struct hal_buf_info *buf_info, 161 hal_buff_addrinfo_t ring_buf_info) 162 { 163 struct hal_buf_info current_link_desc_buf_info = { 0 }; 164 165 /* do duplicate link desc address check */ 166 hal_rx_buffer_addr_info_get_paddr(ring_buf_info, 167 ¤t_link_desc_buf_info); 168 169 /* 170 * TODO - Check if the hal soc api call can be removed 171 * since the cookie is just used for print. 172 * buffer_addr_info is the first element of ring_desc 173 */ 174 hal_rx_buf_cookie_rbm_get(soc->hal_soc, 175 (uint32_t *)ring_buf_info, 176 ¤t_link_desc_buf_info); 177 178 if (qdf_unlikely(current_link_desc_buf_info.paddr == 179 buf_info->paddr)) { 180 dp_info_rl("duplicate link desc addr: %llu, cookie: 0x%x", 181 current_link_desc_buf_info.paddr, 182 current_link_desc_buf_info.sw_cookie); 183 DP_STATS_INC(soc, rx.err.dup_refill_link_desc, 1); 184 } 185 *buf_info = current_link_desc_buf_info; 186 } 187 188 /** 189 * dp_rx_link_desc_return_by_addr - Return a MPDU link descriptor to 190 * (WBM) by address 191 * 192 * @soc: core DP main context 193 * @link_desc_addr: link descriptor addr 194 * 195 * Return: QDF_STATUS 196 */ 197 QDF_STATUS 198 dp_rx_link_desc_return_by_addr(struct dp_soc *soc, 199 hal_buff_addrinfo_t link_desc_addr, 200 uint8_t bm_action) 201 { 202 struct dp_srng *wbm_desc_rel_ring = &soc->wbm_desc_rel_ring; 203 hal_ring_handle_t wbm_rel_srng = wbm_desc_rel_ring->hal_srng; 204 hal_soc_handle_t hal_soc = soc->hal_soc; 205 QDF_STATUS status = QDF_STATUS_E_FAILURE; 206 void *src_srng_desc; 207 208 if (!wbm_rel_srng) { 209 dp_rx_err_err("%pK: WBM RELEASE RING not initialized", soc); 210 return status; 211 } 212 213 /* do duplicate link desc address check */ 214 dp_rx_link_desc_refill_duplicate_check( 215 soc, 216 &soc->last_op_info.wbm_rel_link_desc, 217 link_desc_addr); 218 219 if (qdf_unlikely(hal_srng_access_start(hal_soc, wbm_rel_srng))) { 220 221 /* TODO */ 222 /* 223 * Need API to convert from hal_ring pointer to 224 * Ring Type / Ring Id combo 225 */ 226 dp_rx_err_err("%pK: HAL RING Access For WBM Release SRNG Failed - %pK", 227 soc, wbm_rel_srng); 228 DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1); 229 goto done; 230 } 231 src_srng_desc = hal_srng_src_get_next(hal_soc, wbm_rel_srng); 232 if (qdf_likely(src_srng_desc)) { 233 /* Return link descriptor through WBM ring (SW2WBM)*/ 234 hal_rx_msdu_link_desc_set(hal_soc, 235 src_srng_desc, link_desc_addr, bm_action); 236 status = QDF_STATUS_SUCCESS; 237 } else { 238 struct hal_srng *srng = (struct hal_srng *)wbm_rel_srng; 239 240 DP_STATS_INC(soc, rx.err.hal_ring_access_full_fail, 1); 241 242 dp_info_rl("WBM Release Ring (Id %d) Full(Fail CNT %u)", 243 srng->ring_id, 244 soc->stats.rx.err.hal_ring_access_full_fail); 245 dp_info_rl("HP 0x%x Reap HP 0x%x TP 0x%x Cached TP 0x%x", 246 *srng->u.src_ring.hp_addr, 247 srng->u.src_ring.reap_hp, 248 *srng->u.src_ring.tp_addr, 249 srng->u.src_ring.cached_tp); 250 QDF_BUG(0); 251 } 252 done: 253 hal_srng_access_end(hal_soc, wbm_rel_srng); 254 return status; 255 256 } 257 258 qdf_export_symbol(dp_rx_link_desc_return_by_addr); 259 260 /** 261 * dp_rx_link_desc_return() - Return a MPDU link descriptor to HW 262 * (WBM), following error handling 263 * 264 * @soc: core DP main context 265 * @ring_desc: opaque pointer to the REO error ring descriptor 266 * 267 * Return: QDF_STATUS 268 */ 269 QDF_STATUS 270 dp_rx_link_desc_return(struct dp_soc *soc, hal_ring_desc_t ring_desc, 271 uint8_t bm_action) 272 { 273 void *buf_addr_info = HAL_RX_REO_BUF_ADDR_INFO_GET(ring_desc); 274 275 return dp_rx_link_desc_return_by_addr(soc, buf_addr_info, bm_action); 276 } 277 278 #ifndef QCA_HOST_MODE_WIFI_DISABLED 279 280 /** 281 * dp_rx_msdus_drop() - Drops all MSDU's per MPDU 282 * 283 * @soc: core txrx main context 284 * @ring_desc: opaque pointer to the REO error ring descriptor 285 * @mpdu_desc_info: MPDU descriptor information from ring descriptor 286 * @head: head of the local descriptor free-list 287 * @tail: tail of the local descriptor free-list 288 * @quota: No. of units (packets) that can be serviced in one shot. 289 * 290 * This function is used to drop all MSDU in an MPDU 291 * 292 * Return: uint32_t: No. of elements processed 293 */ 294 static uint32_t 295 dp_rx_msdus_drop(struct dp_soc *soc, hal_ring_desc_t ring_desc, 296 struct hal_rx_mpdu_desc_info *mpdu_desc_info, 297 uint8_t *mac_id, 298 uint32_t quota) 299 { 300 uint32_t rx_bufs_used = 0; 301 void *link_desc_va; 302 struct hal_buf_info buf_info; 303 struct dp_pdev *pdev; 304 struct hal_rx_msdu_list msdu_list; /* MSDU's per MPDU */ 305 int i; 306 uint8_t *rx_tlv_hdr; 307 uint32_t tid; 308 struct rx_desc_pool *rx_desc_pool; 309 struct dp_rx_desc *rx_desc; 310 /* First field in REO Dst ring Desc is buffer_addr_info */ 311 void *buf_addr_info = ring_desc; 312 struct buffer_addr_info cur_link_desc_addr_info = { 0 }; 313 struct buffer_addr_info next_link_desc_addr_info = { 0 }; 314 315 hal_rx_reo_buf_paddr_get(soc->hal_soc, ring_desc, &buf_info); 316 317 /* buffer_addr_info is the first element of ring_desc */ 318 hal_rx_buf_cookie_rbm_get(soc->hal_soc, 319 (uint32_t *)ring_desc, 320 &buf_info); 321 322 link_desc_va = dp_rx_cookie_2_link_desc_va(soc, &buf_info); 323 if (!link_desc_va) { 324 dp_rx_err_debug("link desc va is null, soc %pk", soc); 325 return rx_bufs_used; 326 } 327 328 more_msdu_link_desc: 329 /* No UNMAP required -- this is "malloc_consistent" memory */ 330 hal_rx_msdu_list_get(soc->hal_soc, link_desc_va, &msdu_list, 331 &mpdu_desc_info->msdu_count); 332 333 for (i = 0; (i < mpdu_desc_info->msdu_count); i++) { 334 rx_desc = soc->arch_ops.dp_rx_desc_cookie_2_va( 335 soc, msdu_list.sw_cookie[i]); 336 337 qdf_assert_always(rx_desc); 338 339 /* all buffers from a MSDU link link belong to same pdev */ 340 *mac_id = rx_desc->pool_id; 341 pdev = dp_get_pdev_for_lmac_id(soc, rx_desc->pool_id); 342 if (!pdev) { 343 dp_rx_err_debug("%pK: pdev is null for pool_id = %d", 344 soc, rx_desc->pool_id); 345 return rx_bufs_used; 346 } 347 348 if (!dp_rx_desc_check_magic(rx_desc)) { 349 dp_rx_err_err("%pK: Invalid rx_desc cookie=%d", 350 soc, msdu_list.sw_cookie[i]); 351 return rx_bufs_used; 352 } 353 354 rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id]; 355 dp_ipa_rx_buf_smmu_mapping_lock(soc); 356 dp_ipa_handle_rx_buf_smmu_mapping(soc, rx_desc->nbuf, 357 rx_desc_pool->buf_size, 358 false); 359 qdf_nbuf_unmap_nbytes_single(soc->osdev, rx_desc->nbuf, 360 QDF_DMA_FROM_DEVICE, 361 rx_desc_pool->buf_size); 362 rx_desc->unmapped = 1; 363 dp_ipa_rx_buf_smmu_mapping_unlock(soc); 364 365 rx_desc->rx_buf_start = qdf_nbuf_data(rx_desc->nbuf); 366 367 rx_bufs_used++; 368 tid = hal_rx_mpdu_start_tid_get(soc->hal_soc, 369 rx_desc->rx_buf_start); 370 dp_rx_err_err("%pK: Packet received with PN error for tid :%d", 371 soc, tid); 372 373 rx_tlv_hdr = qdf_nbuf_data(rx_desc->nbuf); 374 if (hal_rx_encryption_info_valid(soc->hal_soc, rx_tlv_hdr)) 375 hal_rx_print_pn(soc->hal_soc, rx_tlv_hdr); 376 377 /* Just free the buffers */ 378 dp_rx_buffer_pool_nbuf_free(soc, rx_desc->nbuf, *mac_id); 379 380 dp_rx_add_to_free_desc_list(&pdev->free_list_head, 381 &pdev->free_list_tail, rx_desc); 382 } 383 384 /* 385 * If the msdu's are spread across multiple link-descriptors, 386 * we cannot depend solely on the msdu_count(e.g., if msdu is 387 * spread across multiple buffers).Hence, it is 388 * necessary to check the next link_descriptor and release 389 * all the msdu's that are part of it. 390 */ 391 hal_rx_get_next_msdu_link_desc_buf_addr_info( 392 link_desc_va, 393 &next_link_desc_addr_info); 394 395 if (hal_rx_is_buf_addr_info_valid( 396 &next_link_desc_addr_info)) { 397 /* Clear the next link desc info for the current link_desc */ 398 hal_rx_clear_next_msdu_link_desc_buf_addr_info(link_desc_va); 399 400 dp_rx_link_desc_return_by_addr(soc, buf_addr_info, 401 HAL_BM_ACTION_PUT_IN_IDLE_LIST); 402 hal_rx_buffer_addr_info_get_paddr( 403 &next_link_desc_addr_info, 404 &buf_info); 405 /* buffer_addr_info is the first element of ring_desc */ 406 hal_rx_buf_cookie_rbm_get(soc->hal_soc, 407 (uint32_t *)&next_link_desc_addr_info, 408 &buf_info); 409 cur_link_desc_addr_info = next_link_desc_addr_info; 410 buf_addr_info = &cur_link_desc_addr_info; 411 412 link_desc_va = 413 dp_rx_cookie_2_link_desc_va(soc, &buf_info); 414 415 goto more_msdu_link_desc; 416 } 417 quota--; 418 dp_rx_link_desc_return_by_addr(soc, buf_addr_info, 419 HAL_BM_ACTION_PUT_IN_IDLE_LIST); 420 return rx_bufs_used; 421 } 422 423 /** 424 * dp_rx_pn_error_handle() - Handles PN check errors 425 * 426 * @soc: core txrx main context 427 * @ring_desc: opaque pointer to the REO error ring descriptor 428 * @mpdu_desc_info: MPDU descriptor information from ring descriptor 429 * @head: head of the local descriptor free-list 430 * @tail: tail of the local descriptor free-list 431 * @quota: No. of units (packets) that can be serviced in one shot. 432 * 433 * This function implements PN error handling 434 * If the peer is configured to ignore the PN check errors 435 * or if DP feels, that this frame is still OK, the frame can be 436 * re-injected back to REO to use some of the other features 437 * of REO e.g. duplicate detection/routing to other cores 438 * 439 * Return: uint32_t: No. of elements processed 440 */ 441 static uint32_t 442 dp_rx_pn_error_handle(struct dp_soc *soc, hal_ring_desc_t ring_desc, 443 struct hal_rx_mpdu_desc_info *mpdu_desc_info, 444 uint8_t *mac_id, 445 uint32_t quota) 446 { 447 uint16_t peer_id; 448 uint32_t rx_bufs_used = 0; 449 struct dp_peer *peer; 450 bool peer_pn_policy = false; 451 452 peer_id = DP_PEER_METADATA_PEER_ID_GET( 453 mpdu_desc_info->peer_meta_data); 454 455 456 peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_RX_ERR); 457 458 if (qdf_likely(peer)) { 459 /* 460 * TODO: Check for peer specific policies & set peer_pn_policy 461 */ 462 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 463 "discard rx due to PN error for peer %pK "QDF_MAC_ADDR_FMT, 464 peer, QDF_MAC_ADDR_REF(peer->mac_addr.raw)); 465 466 dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR); 467 } 468 dp_rx_err_err("%pK: Packet received with PN error", soc); 469 470 /* No peer PN policy -- definitely drop */ 471 if (!peer_pn_policy) 472 rx_bufs_used = dp_rx_msdus_drop(soc, ring_desc, 473 mpdu_desc_info, 474 mac_id, quota); 475 476 return rx_bufs_used; 477 } 478 479 /** 480 * dp_rx_oor_handle() - Handles the msdu which is OOR error 481 * 482 * @soc: core txrx main context 483 * @nbuf: pointer to msdu skb 484 * @peer_id: dp peer ID 485 * @rx_tlv_hdr: start of rx tlv header 486 * 487 * This function process the msdu delivered from REO2TCL 488 * ring with error type OOR 489 * 490 * Return: None 491 */ 492 static void 493 dp_rx_oor_handle(struct dp_soc *soc, 494 qdf_nbuf_t nbuf, 495 uint16_t peer_id, 496 uint8_t *rx_tlv_hdr) 497 { 498 uint32_t frame_mask = FRAME_MASK_IPV4_ARP | FRAME_MASK_IPV4_DHCP | 499 FRAME_MASK_IPV4_EAPOL | FRAME_MASK_IPV6_DHCP; 500 struct dp_peer *peer = NULL; 501 502 peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_RX_ERR); 503 if (!peer) { 504 dp_info_rl("peer not found"); 505 goto free_nbuf; 506 } 507 508 if (dp_rx_deliver_special_frame(soc, peer, nbuf, frame_mask, 509 rx_tlv_hdr)) { 510 DP_STATS_INC(soc, rx.err.reo_err_oor_to_stack, 1); 511 dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR); 512 return; 513 } 514 515 free_nbuf: 516 if (peer) 517 dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR); 518 519 DP_STATS_INC(soc, rx.err.reo_err_oor_drop, 1); 520 qdf_nbuf_free(nbuf); 521 } 522 523 /** 524 * dp_rx_err_nbuf_pn_check() - Check if the PN number of this current packet 525 * is a monotonous increment of packet number 526 * from the previous successfully re-ordered 527 * frame. 528 * @soc: Datapath SOC handle 529 * @ring_desc: REO ring descriptor 530 * @nbuf: Current packet 531 * 532 * Return: QDF_STATUS_SUCCESS, if the pn check passes, else QDF_STATUS_E_FAILURE 533 */ 534 static inline QDF_STATUS 535 dp_rx_err_nbuf_pn_check(struct dp_soc *soc, hal_ring_desc_t ring_desc, 536 qdf_nbuf_t nbuf) 537 { 538 uint64_t prev_pn, curr_pn; 539 540 hal_rx_reo_prev_pn_get(soc->hal_soc, ring_desc, &prev_pn); 541 hal_rx_tlv_get_pn_num(soc->hal_soc, qdf_nbuf_data(nbuf), &curr_pn); 542 543 if (curr_pn > prev_pn) 544 return QDF_STATUS_SUCCESS; 545 546 return QDF_STATUS_E_FAILURE; 547 } 548 549 #ifdef WLAN_SKIP_BAR_UPDATE 550 static 551 void dp_rx_err_handle_bar(struct dp_soc *soc, 552 struct dp_peer *peer, 553 qdf_nbuf_t nbuf) 554 { 555 dp_info_rl("BAR update to H.W is skipped"); 556 DP_STATS_INC(soc, rx.err.bar_handle_fail_count, 1); 557 } 558 #else 559 static 560 void dp_rx_err_handle_bar(struct dp_soc *soc, 561 struct dp_peer *peer, 562 qdf_nbuf_t nbuf) 563 { 564 uint8_t *rx_tlv_hdr; 565 unsigned char type, subtype; 566 uint16_t start_seq_num; 567 uint32_t tid; 568 QDF_STATUS status; 569 struct ieee80211_frame_bar *bar; 570 571 /* 572 * 1. Is this a BAR frame. If not Discard it. 573 * 2. If it is, get the peer id, tid, ssn 574 * 2a Do a tid update 575 */ 576 577 rx_tlv_hdr = qdf_nbuf_data(nbuf); 578 bar = (struct ieee80211_frame_bar *)(rx_tlv_hdr + soc->rx_pkt_tlv_size); 579 580 type = bar->i_fc[0] & IEEE80211_FC0_TYPE_MASK; 581 subtype = bar->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 582 583 if (!(type == IEEE80211_FC0_TYPE_CTL && 584 subtype == QDF_IEEE80211_FC0_SUBTYPE_BAR)) { 585 dp_err_rl("Not a BAR frame!"); 586 return; 587 } 588 589 tid = hal_rx_mpdu_start_tid_get(soc->hal_soc, rx_tlv_hdr); 590 qdf_assert_always(tid < DP_MAX_TIDS); 591 592 start_seq_num = le16toh(bar->i_seq) >> IEEE80211_SEQ_SEQ_SHIFT; 593 594 dp_info_rl("tid %u window_size %u start_seq_num %u", 595 tid, peer->rx_tid[tid].ba_win_size, start_seq_num); 596 597 status = dp_rx_tid_update_wifi3(peer, tid, 598 peer->rx_tid[tid].ba_win_size, 599 start_seq_num, 600 true); 601 if (status != QDF_STATUS_SUCCESS) { 602 dp_err_rl("failed to handle bar frame update rx tid"); 603 DP_STATS_INC(soc, rx.err.bar_handle_fail_count, 1); 604 } else { 605 DP_STATS_INC(soc, rx.err.ssn_update_count, 1); 606 } 607 } 608 #endif 609 610 /** 611 * _dp_rx_bar_frame_handle(): Core of the BAR frame handling 612 * @soc: Datapath SoC handle 613 * @nbuf: packet being processed 614 * @mpdu_desc_info: mpdu desc info for the current packet 615 * @tid: tid on which the packet arrived 616 * @err_status: Flag to indicate if REO encountered an error while routing this 617 * frame 618 * @error_code: REO error code 619 * 620 * Return: None 621 */ 622 static void 623 _dp_rx_bar_frame_handle(struct dp_soc *soc, qdf_nbuf_t nbuf, 624 struct hal_rx_mpdu_desc_info *mpdu_desc_info, 625 uint32_t tid, uint8_t err_status, uint32_t error_code) 626 { 627 uint16_t peer_id; 628 struct dp_peer *peer; 629 630 peer_id = DP_PEER_METADATA_PEER_ID_GET(mpdu_desc_info->peer_meta_data); 631 peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_RX_ERR); 632 if (!peer) 633 return; 634 635 dp_info("BAR frame: peer = " QDF_MAC_ADDR_FMT 636 " peer_id = %d" 637 " tid = %u" 638 " SSN = %d" 639 " error status = %d", 640 QDF_MAC_ADDR_REF(peer->mac_addr.raw), 641 peer->peer_id, 642 tid, 643 mpdu_desc_info->mpdu_seq, 644 err_status); 645 646 if (err_status == HAL_REO_ERROR_DETECTED) { 647 switch (error_code) { 648 case HAL_REO_ERR_BAR_FRAME_2K_JUMP: 649 /* fallthrough */ 650 case HAL_REO_ERR_BAR_FRAME_OOR: 651 dp_rx_err_handle_bar(soc, peer, nbuf); 652 DP_STATS_INC(soc, rx.err.reo_error[error_code], 1); 653 break; 654 default: 655 DP_STATS_INC(soc, rx.bar_frame, 1); 656 } 657 } 658 659 dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR); 660 } 661 662 /** 663 * dp_rx_reo_err_entry_process() - Handles for REO error entry processing 664 * 665 * @soc: core txrx main context 666 * @ring_desc: opaque pointer to the REO error ring descriptor 667 * @mpdu_desc_info: pointer to mpdu level description info 668 * @link_desc_va: pointer to msdu_link_desc virtual address 669 * @err_code: reo erro code fetched from ring entry 670 * 671 * Function to handle msdus fetched from msdu link desc, currently 672 * only support 2K jump, OOR error. 673 * 674 * Return: msdu count processed. 675 */ 676 static uint32_t 677 dp_rx_reo_err_entry_process(struct dp_soc *soc, 678 void *ring_desc, 679 struct hal_rx_mpdu_desc_info *mpdu_desc_info, 680 void *link_desc_va, 681 enum hal_reo_error_code err_code) 682 { 683 uint32_t rx_bufs_used = 0; 684 struct dp_pdev *pdev; 685 int i; 686 uint8_t *rx_tlv_hdr_first; 687 uint8_t *rx_tlv_hdr_last; 688 uint32_t tid = DP_MAX_TIDS; 689 uint16_t peer_id; 690 struct dp_rx_desc *rx_desc; 691 struct rx_desc_pool *rx_desc_pool; 692 qdf_nbuf_t nbuf; 693 struct hal_buf_info buf_info; 694 struct hal_rx_msdu_list msdu_list; 695 uint16_t num_msdus; 696 struct buffer_addr_info cur_link_desc_addr_info = { 0 }; 697 struct buffer_addr_info next_link_desc_addr_info = { 0 }; 698 /* First field in REO Dst ring Desc is buffer_addr_info */ 699 void *buf_addr_info = ring_desc; 700 qdf_nbuf_t head_nbuf = NULL; 701 qdf_nbuf_t tail_nbuf = NULL; 702 uint16_t msdu_processed = 0; 703 QDF_STATUS status; 704 bool ret; 705 706 peer_id = DP_PEER_METADATA_PEER_ID_GET( 707 mpdu_desc_info->peer_meta_data); 708 709 more_msdu_link_desc: 710 hal_rx_msdu_list_get(soc->hal_soc, link_desc_va, &msdu_list, 711 &num_msdus); 712 for (i = 0; i < num_msdus; i++) { 713 rx_desc = soc->arch_ops.dp_rx_desc_cookie_2_va( 714 soc, 715 msdu_list.sw_cookie[i]); 716 717 qdf_assert_always(rx_desc); 718 719 /* all buffers from a MSDU link belong to same pdev */ 720 pdev = dp_get_pdev_for_lmac_id(soc, rx_desc->pool_id); 721 722 nbuf = rx_desc->nbuf; 723 ret = dp_rx_desc_paddr_sanity_check(rx_desc, 724 msdu_list.paddr[i]); 725 if (!ret) { 726 DP_STATS_INC(soc, rx.err.nbuf_sanity_fail, 1); 727 rx_desc->in_err_state = 1; 728 continue; 729 } 730 731 rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id]; 732 dp_ipa_rx_buf_smmu_mapping_lock(soc); 733 dp_ipa_handle_rx_buf_smmu_mapping(soc, nbuf, 734 rx_desc_pool->buf_size, 735 false); 736 qdf_nbuf_unmap_nbytes_single(soc->osdev, nbuf, 737 QDF_DMA_FROM_DEVICE, 738 rx_desc_pool->buf_size); 739 rx_desc->unmapped = 1; 740 dp_ipa_rx_buf_smmu_mapping_unlock(soc); 741 742 QDF_NBUF_CB_RX_PKT_LEN(nbuf) = msdu_list.msdu_info[i].msdu_len; 743 rx_bufs_used++; 744 dp_rx_add_to_free_desc_list(&pdev->free_list_head, 745 &pdev->free_list_tail, rx_desc); 746 747 DP_RX_LIST_APPEND(head_nbuf, tail_nbuf, nbuf); 748 749 if (qdf_unlikely(msdu_list.msdu_info[i].msdu_flags & 750 HAL_MSDU_F_MSDU_CONTINUATION)) 751 continue; 752 753 if (dp_rx_buffer_pool_refill(soc, head_nbuf, 754 rx_desc->pool_id)) { 755 /* MSDU queued back to the pool */ 756 goto process_next_msdu; 757 } 758 759 rx_tlv_hdr_first = qdf_nbuf_data(head_nbuf); 760 rx_tlv_hdr_last = qdf_nbuf_data(tail_nbuf); 761 762 if (qdf_unlikely(head_nbuf != tail_nbuf)) { 763 nbuf = dp_rx_sg_create(soc, head_nbuf); 764 qdf_nbuf_set_is_frag(nbuf, 1); 765 DP_STATS_INC(soc, rx.err.reo_err_oor_sg_count, 1); 766 } 767 768 if (soc->features.pn_in_reo_dest) { 769 status = dp_rx_err_nbuf_pn_check(soc, ring_desc, nbuf); 770 if (QDF_IS_STATUS_ERROR(status)) { 771 DP_STATS_INC(soc, rx.err.pn_in_dest_check_fail, 772 1); 773 qdf_nbuf_free(nbuf); 774 goto process_next_msdu; 775 } 776 777 hal_rx_tlv_populate_mpdu_desc_info(soc->hal_soc, 778 qdf_nbuf_data(nbuf), 779 mpdu_desc_info); 780 peer_id = DP_PEER_METADATA_PEER_ID_GET( 781 mpdu_desc_info->peer_meta_data); 782 783 if (mpdu_desc_info->bar_frame) 784 _dp_rx_bar_frame_handle(soc, nbuf, 785 mpdu_desc_info, tid, 786 HAL_REO_ERROR_DETECTED, 787 err_code); 788 } 789 790 switch (err_code) { 791 case HAL_REO_ERR_REGULAR_FRAME_2K_JUMP: 792 /* 793 * only first msdu, mpdu start description tlv valid? 794 * and use it for following msdu. 795 */ 796 if (hal_rx_msdu_end_first_msdu_get(soc->hal_soc, 797 rx_tlv_hdr_last)) 798 tid = hal_rx_mpdu_start_tid_get( 799 soc->hal_soc, 800 rx_tlv_hdr_first); 801 802 dp_2k_jump_handle(soc, nbuf, rx_tlv_hdr_last, 803 peer_id, tid); 804 break; 805 806 case HAL_REO_ERR_REGULAR_FRAME_OOR: 807 dp_rx_oor_handle(soc, nbuf, peer_id, rx_tlv_hdr_last); 808 break; 809 default: 810 dp_err_rl("Non-support error code %d", err_code); 811 qdf_nbuf_free(nbuf); 812 } 813 814 process_next_msdu: 815 msdu_processed++; 816 head_nbuf = NULL; 817 tail_nbuf = NULL; 818 } 819 820 /* 821 * If the msdu's are spread across multiple link-descriptors, 822 * we cannot depend solely on the msdu_count(e.g., if msdu is 823 * spread across multiple buffers).Hence, it is 824 * necessary to check the next link_descriptor and release 825 * all the msdu's that are part of it. 826 */ 827 hal_rx_get_next_msdu_link_desc_buf_addr_info( 828 link_desc_va, 829 &next_link_desc_addr_info); 830 831 if (hal_rx_is_buf_addr_info_valid( 832 &next_link_desc_addr_info)) { 833 /* Clear the next link desc info for the current link_desc */ 834 hal_rx_clear_next_msdu_link_desc_buf_addr_info(link_desc_va); 835 dp_rx_link_desc_return_by_addr( 836 soc, 837 buf_addr_info, 838 HAL_BM_ACTION_PUT_IN_IDLE_LIST); 839 840 hal_rx_buffer_addr_info_get_paddr( 841 &next_link_desc_addr_info, 842 &buf_info); 843 /* buffer_addr_info is the first element of ring_desc */ 844 hal_rx_buf_cookie_rbm_get(soc->hal_soc, 845 (uint32_t *)&next_link_desc_addr_info, 846 &buf_info); 847 link_desc_va = 848 dp_rx_cookie_2_link_desc_va(soc, &buf_info); 849 cur_link_desc_addr_info = next_link_desc_addr_info; 850 buf_addr_info = &cur_link_desc_addr_info; 851 852 goto more_msdu_link_desc; 853 } 854 855 dp_rx_link_desc_return_by_addr(soc, buf_addr_info, 856 HAL_BM_ACTION_PUT_IN_IDLE_LIST); 857 if (qdf_unlikely(msdu_processed != mpdu_desc_info->msdu_count)) 858 DP_STATS_INC(soc, rx.err.msdu_count_mismatch, 1); 859 860 return rx_bufs_used; 861 } 862 863 #ifdef DP_INVALID_PEER_ASSERT 864 #define DP_PDEV_INVALID_PEER_MSDU_CHECK(head, tail) \ 865 do { \ 866 qdf_assert_always(!(head)); \ 867 qdf_assert_always(!(tail)); \ 868 } while (0) 869 #else 870 #define DP_PDEV_INVALID_PEER_MSDU_CHECK(head, tail) /* no op */ 871 #endif 872 873 /** 874 * dp_rx_chain_msdus() - Function to chain all msdus of a mpdu 875 * to pdev invalid peer list 876 * 877 * @soc: core DP main context 878 * @nbuf: Buffer pointer 879 * @rx_tlv_hdr: start of rx tlv header 880 * @mac_id: mac id 881 * 882 * Return: bool: true for last msdu of mpdu 883 */ 884 static bool 885 dp_rx_chain_msdus(struct dp_soc *soc, qdf_nbuf_t nbuf, 886 uint8_t *rx_tlv_hdr, uint8_t mac_id) 887 { 888 bool mpdu_done = false; 889 qdf_nbuf_t curr_nbuf = NULL; 890 qdf_nbuf_t tmp_nbuf = NULL; 891 892 /* TODO: Currently only single radio is supported, hence 893 * pdev hard coded to '0' index 894 */ 895 struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id); 896 897 if (!dp_pdev) { 898 dp_rx_err_debug("%pK: pdev is null for mac_id = %d", soc, mac_id); 899 return mpdu_done; 900 } 901 /* if invalid peer SG list has max values free the buffers in list 902 * and treat current buffer as start of list 903 * 904 * current logic to detect the last buffer from attn_tlv is not reliable 905 * in OFDMA UL scenario hence add max buffers check to avoid list pile 906 * up 907 */ 908 if (!dp_pdev->first_nbuf || 909 (dp_pdev->invalid_peer_head_msdu && 910 QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST 911 (dp_pdev->invalid_peer_head_msdu) >= DP_MAX_INVALID_BUFFERS)) { 912 qdf_nbuf_set_rx_chfrag_start(nbuf, 1); 913 dp_pdev->ppdu_id = hal_rx_get_ppdu_id(soc->hal_soc, 914 rx_tlv_hdr); 915 dp_pdev->first_nbuf = true; 916 917 /* If the new nbuf received is the first msdu of the 918 * amsdu and there are msdus in the invalid peer msdu 919 * list, then let us free all the msdus of the invalid 920 * peer msdu list. 921 * This scenario can happen when we start receiving 922 * new a-msdu even before the previous a-msdu is completely 923 * received. 924 */ 925 curr_nbuf = dp_pdev->invalid_peer_head_msdu; 926 while (curr_nbuf) { 927 tmp_nbuf = curr_nbuf->next; 928 qdf_nbuf_free(curr_nbuf); 929 curr_nbuf = tmp_nbuf; 930 } 931 932 dp_pdev->invalid_peer_head_msdu = NULL; 933 dp_pdev->invalid_peer_tail_msdu = NULL; 934 935 dp_monitor_get_mpdu_status(dp_pdev, soc, rx_tlv_hdr); 936 } 937 938 if (dp_pdev->ppdu_id == hal_rx_attn_phy_ppdu_id_get(soc->hal_soc, 939 rx_tlv_hdr) && 940 hal_rx_attn_msdu_done_get(soc->hal_soc, rx_tlv_hdr)) { 941 qdf_nbuf_set_rx_chfrag_end(nbuf, 1); 942 qdf_assert_always(dp_pdev->first_nbuf == true); 943 dp_pdev->first_nbuf = false; 944 mpdu_done = true; 945 } 946 947 /* 948 * For MCL, invalid_peer_head_msdu and invalid_peer_tail_msdu 949 * should be NULL here, add the checking for debugging purpose 950 * in case some corner case. 951 */ 952 DP_PDEV_INVALID_PEER_MSDU_CHECK(dp_pdev->invalid_peer_head_msdu, 953 dp_pdev->invalid_peer_tail_msdu); 954 DP_RX_LIST_APPEND(dp_pdev->invalid_peer_head_msdu, 955 dp_pdev->invalid_peer_tail_msdu, 956 nbuf); 957 958 return mpdu_done; 959 } 960 961 /** 962 * dp_rx_bar_frame_handle() - Function to handle err BAR frames 963 * @soc: core DP main context 964 * @ring_desc: Hal ring desc 965 * @rx_desc: dp rx desc 966 * @mpdu_desc_info: mpdu desc info 967 * 968 * Handle the error BAR frames received. Ensure the SOC level 969 * stats are updated based on the REO error code. The BAR frames 970 * are further processed by updating the Rx tids with the start 971 * sequence number (SSN) and BA window size. Desc is returned 972 * to the free desc list 973 * 974 * Return: none 975 */ 976 static void 977 dp_rx_bar_frame_handle(struct dp_soc *soc, 978 hal_ring_desc_t ring_desc, 979 struct dp_rx_desc *rx_desc, 980 struct hal_rx_mpdu_desc_info *mpdu_desc_info, 981 uint8_t err_status, 982 uint32_t err_code) 983 { 984 qdf_nbuf_t nbuf; 985 struct dp_pdev *pdev; 986 struct rx_desc_pool *rx_desc_pool; 987 uint8_t *rx_tlv_hdr; 988 uint32_t tid; 989 990 nbuf = rx_desc->nbuf; 991 rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id]; 992 dp_ipa_rx_buf_smmu_mapping_lock(soc); 993 dp_ipa_handle_rx_buf_smmu_mapping(soc, nbuf, 994 rx_desc_pool->buf_size, 995 false); 996 qdf_nbuf_unmap_nbytes_single(soc->osdev, nbuf, 997 QDF_DMA_FROM_DEVICE, 998 rx_desc_pool->buf_size); 999 rx_desc->unmapped = 1; 1000 dp_ipa_rx_buf_smmu_mapping_unlock(soc); 1001 rx_tlv_hdr = qdf_nbuf_data(nbuf); 1002 tid = hal_rx_mpdu_start_tid_get(soc->hal_soc, 1003 rx_tlv_hdr); 1004 pdev = dp_get_pdev_for_lmac_id(soc, rx_desc->pool_id); 1005 1006 _dp_rx_bar_frame_handle(soc, nbuf, mpdu_desc_info, tid, err_status, 1007 err_code); 1008 dp_rx_link_desc_return(soc, ring_desc, 1009 HAL_BM_ACTION_PUT_IN_IDLE_LIST); 1010 dp_rx_buffer_pool_nbuf_free(soc, rx_desc->nbuf, 1011 rx_desc->pool_id); 1012 dp_rx_add_to_free_desc_list(&pdev->free_list_head, 1013 &pdev->free_list_tail, 1014 rx_desc); 1015 } 1016 1017 #endif /* QCA_HOST_MODE_WIFI_DISABLED */ 1018 1019 /** 1020 * dp_2k_jump_handle() - Function to handle 2k jump exception 1021 * on WBM ring 1022 * 1023 * @soc: core DP main context 1024 * @nbuf: buffer pointer 1025 * @rx_tlv_hdr: start of rx tlv header 1026 * @peer_id: peer id of first msdu 1027 * @tid: Tid for which exception occurred 1028 * 1029 * This function handles 2k jump violations arising out 1030 * of receiving aggregates in non BA case. This typically 1031 * may happen if aggregates are received on a QOS enabled TID 1032 * while Rx window size is still initialized to value of 2. Or 1033 * it may also happen if negotiated window size is 1 but peer 1034 * sends aggregates. 1035 * 1036 */ 1037 1038 void 1039 dp_2k_jump_handle(struct dp_soc *soc, 1040 qdf_nbuf_t nbuf, 1041 uint8_t *rx_tlv_hdr, 1042 uint16_t peer_id, 1043 uint8_t tid) 1044 { 1045 struct dp_peer *peer = NULL; 1046 struct dp_rx_tid *rx_tid = NULL; 1047 uint32_t frame_mask = FRAME_MASK_IPV4_ARP; 1048 1049 peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_RX_ERR); 1050 if (!peer) { 1051 dp_rx_err_info_rl("%pK: peer not found", soc); 1052 goto free_nbuf; 1053 } 1054 1055 if (tid >= DP_MAX_TIDS) { 1056 dp_info_rl("invalid tid"); 1057 goto nbuf_deliver; 1058 } 1059 1060 rx_tid = &peer->rx_tid[tid]; 1061 qdf_spin_lock_bh(&rx_tid->tid_lock); 1062 1063 /* only if BA session is active, allow send Delba */ 1064 if (rx_tid->ba_status != DP_RX_BA_ACTIVE) { 1065 qdf_spin_unlock_bh(&rx_tid->tid_lock); 1066 goto nbuf_deliver; 1067 } 1068 1069 if (!rx_tid->delba_tx_status) { 1070 rx_tid->delba_tx_retry++; 1071 rx_tid->delba_tx_status = 1; 1072 rx_tid->delba_rcode = 1073 IEEE80211_REASON_QOS_SETUP_REQUIRED; 1074 qdf_spin_unlock_bh(&rx_tid->tid_lock); 1075 if (soc->cdp_soc.ol_ops->send_delba) { 1076 DP_STATS_INC(soc, rx.err.rx_2k_jump_delba_sent, 1); 1077 soc->cdp_soc.ol_ops->send_delba( 1078 peer->vdev->pdev->soc->ctrl_psoc, 1079 peer->vdev->vdev_id, 1080 peer->mac_addr.raw, 1081 tid, 1082 rx_tid->delba_rcode, 1083 CDP_DELBA_2K_JUMP); 1084 } 1085 } else { 1086 qdf_spin_unlock_bh(&rx_tid->tid_lock); 1087 } 1088 1089 nbuf_deliver: 1090 if (dp_rx_deliver_special_frame(soc, peer, nbuf, frame_mask, 1091 rx_tlv_hdr)) { 1092 DP_STATS_INC(soc, rx.err.rx_2k_jump_to_stack, 1); 1093 dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR); 1094 return; 1095 } 1096 1097 free_nbuf: 1098 if (peer) 1099 dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR); 1100 DP_STATS_INC(soc, rx.err.rx_2k_jump_drop, 1); 1101 qdf_nbuf_free(nbuf); 1102 } 1103 1104 #if defined(QCA_WIFI_QCA6390) || defined(QCA_WIFI_QCA6490) || \ 1105 defined(QCA_WIFI_QCA6750) || defined(QCA_WIFI_WCN7850) 1106 /** 1107 * dp_rx_null_q_handle_invalid_peer_id_exception() - to find exception 1108 * @soc: pointer to dp_soc struct 1109 * @pool_id: Pool id to find dp_pdev 1110 * @rx_tlv_hdr: TLV header of received packet 1111 * @nbuf: SKB 1112 * 1113 * In certain types of packets if peer_id is not correct then 1114 * driver may not be able find. Try finding peer by addr_2 of 1115 * received MPDU. If you find the peer then most likely sw_peer_id & 1116 * ast_idx is corrupted. 1117 * 1118 * Return: True if you find the peer by addr_2 of received MPDU else false 1119 */ 1120 static bool 1121 dp_rx_null_q_handle_invalid_peer_id_exception(struct dp_soc *soc, 1122 uint8_t pool_id, 1123 uint8_t *rx_tlv_hdr, 1124 qdf_nbuf_t nbuf) 1125 { 1126 struct dp_peer *peer = NULL; 1127 uint8_t *rx_pkt_hdr = hal_rx_pkt_hdr_get(soc->hal_soc, rx_tlv_hdr); 1128 struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, pool_id); 1129 struct ieee80211_frame *wh = (struct ieee80211_frame *)rx_pkt_hdr; 1130 1131 if (!pdev) { 1132 dp_rx_err_debug("%pK: pdev is null for pool_id = %d", 1133 soc, pool_id); 1134 return false; 1135 } 1136 /* 1137 * WAR- In certain types of packets if peer_id is not correct then 1138 * driver may not be able find. Try finding peer by addr_2 of 1139 * received MPDU 1140 */ 1141 if (wh) 1142 peer = dp_peer_find_hash_find(soc, wh->i_addr2, 0, 1143 DP_VDEV_ALL, DP_MOD_ID_RX_ERR); 1144 if (peer) { 1145 dp_verbose_debug("MPDU sw_peer_id & ast_idx is corrupted"); 1146 hal_rx_dump_pkt_tlvs(soc->hal_soc, rx_tlv_hdr, 1147 QDF_TRACE_LEVEL_DEBUG); 1148 DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer_id, 1149 1, qdf_nbuf_len(nbuf)); 1150 qdf_nbuf_free(nbuf); 1151 1152 dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR); 1153 return true; 1154 } 1155 return false; 1156 } 1157 1158 /** 1159 * dp_rx_check_pkt_len() - Check for pktlen validity 1160 * @soc: DP SOC context 1161 * @pkt_len: computed length of the pkt from caller in bytes 1162 * 1163 * Return: true if pktlen > RX_BUFFER_SIZE, else return false 1164 * 1165 */ 1166 static inline 1167 bool dp_rx_check_pkt_len(struct dp_soc *soc, uint32_t pkt_len) 1168 { 1169 if (qdf_unlikely(pkt_len > RX_DATA_BUFFER_SIZE)) { 1170 DP_STATS_INC_PKT(soc, rx.err.rx_invalid_pkt_len, 1171 1, pkt_len); 1172 return true; 1173 } else { 1174 return false; 1175 } 1176 } 1177 1178 #else 1179 static inline bool 1180 dp_rx_null_q_handle_invalid_peer_id_exception(struct dp_soc *soc, 1181 uint8_t pool_id, 1182 uint8_t *rx_tlv_hdr, 1183 qdf_nbuf_t nbuf) 1184 { 1185 return false; 1186 } 1187 1188 static inline 1189 bool dp_rx_check_pkt_len(struct dp_soc *soc, uint32_t pkt_len) 1190 { 1191 return false; 1192 } 1193 1194 #endif 1195 1196 #ifndef QCA_HOST_MODE_WIFI_DISABLED 1197 1198 /** 1199 * dp_rx_null_q_desc_handle() - Function to handle NULL Queue 1200 * descriptor violation on either a 1201 * REO or WBM ring 1202 * 1203 * @soc: core DP main context 1204 * @nbuf: buffer pointer 1205 * @rx_tlv_hdr: start of rx tlv header 1206 * @pool_id: mac id 1207 * @peer: peer handle 1208 * 1209 * This function handles NULL queue descriptor violations arising out 1210 * a missing REO queue for a given peer or a given TID. This typically 1211 * may happen if a packet is received on a QOS enabled TID before the 1212 * ADDBA negotiation for that TID, when the TID queue is setup. Or 1213 * it may also happen for MC/BC frames if they are not routed to the 1214 * non-QOS TID queue, in the absence of any other default TID queue. 1215 * This error can show up both in a REO destination or WBM release ring. 1216 * 1217 * Return: QDF_STATUS_SUCCESS, if nbuf handled successfully. QDF status code 1218 * if nbuf could not be handled or dropped. 1219 */ 1220 static QDF_STATUS 1221 dp_rx_null_q_desc_handle(struct dp_soc *soc, qdf_nbuf_t nbuf, 1222 uint8_t *rx_tlv_hdr, uint8_t pool_id, 1223 struct dp_peer *peer) 1224 { 1225 uint32_t pkt_len; 1226 uint16_t msdu_len; 1227 struct dp_vdev *vdev; 1228 uint8_t tid; 1229 qdf_ether_header_t *eh; 1230 struct hal_rx_msdu_metadata msdu_metadata; 1231 uint16_t sa_idx = 0; 1232 bool is_eapol; 1233 1234 qdf_nbuf_set_rx_chfrag_start(nbuf, 1235 hal_rx_msdu_end_first_msdu_get(soc->hal_soc, 1236 rx_tlv_hdr)); 1237 qdf_nbuf_set_rx_chfrag_end(nbuf, 1238 hal_rx_msdu_end_last_msdu_get(soc->hal_soc, 1239 rx_tlv_hdr)); 1240 qdf_nbuf_set_da_mcbc(nbuf, hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc, 1241 rx_tlv_hdr)); 1242 qdf_nbuf_set_da_valid(nbuf, 1243 hal_rx_msdu_end_da_is_valid_get(soc->hal_soc, 1244 rx_tlv_hdr)); 1245 qdf_nbuf_set_sa_valid(nbuf, 1246 hal_rx_msdu_end_sa_is_valid_get(soc->hal_soc, 1247 rx_tlv_hdr)); 1248 1249 hal_rx_msdu_metadata_get(soc->hal_soc, rx_tlv_hdr, &msdu_metadata); 1250 msdu_len = hal_rx_msdu_start_msdu_len_get(soc->hal_soc, rx_tlv_hdr); 1251 pkt_len = msdu_len + msdu_metadata.l3_hdr_pad + soc->rx_pkt_tlv_size; 1252 1253 if (qdf_likely(!qdf_nbuf_is_frag(nbuf))) { 1254 if (dp_rx_check_pkt_len(soc, pkt_len)) 1255 goto drop_nbuf; 1256 1257 /* Set length in nbuf */ 1258 qdf_nbuf_set_pktlen( 1259 nbuf, qdf_min(pkt_len, (uint32_t)RX_DATA_BUFFER_SIZE)); 1260 qdf_assert_always(nbuf->data == rx_tlv_hdr); 1261 } 1262 1263 /* 1264 * Check if DMA completed -- msdu_done is the last bit 1265 * to be written 1266 */ 1267 if (!hal_rx_attn_msdu_done_get(soc->hal_soc, rx_tlv_hdr)) { 1268 1269 dp_err_rl("MSDU DONE failure"); 1270 hal_rx_dump_pkt_tlvs(soc->hal_soc, rx_tlv_hdr, 1271 QDF_TRACE_LEVEL_INFO); 1272 qdf_assert(0); 1273 } 1274 1275 if (!peer && 1276 dp_rx_null_q_handle_invalid_peer_id_exception(soc, pool_id, 1277 rx_tlv_hdr, nbuf)) 1278 return QDF_STATUS_E_FAILURE; 1279 1280 if (!peer) { 1281 bool mpdu_done = false; 1282 struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, pool_id); 1283 1284 if (!pdev) { 1285 dp_err_rl("pdev is null for pool_id = %d", pool_id); 1286 return QDF_STATUS_E_FAILURE; 1287 } 1288 1289 dp_err_rl("peer is NULL"); 1290 DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1, 1291 qdf_nbuf_len(nbuf)); 1292 1293 /* QCN9000 has the support enabled */ 1294 if (qdf_unlikely(soc->wbm_release_desc_rx_sg_support)) { 1295 mpdu_done = true; 1296 nbuf->next = NULL; 1297 /* Trigger invalid peer handler wrapper */ 1298 dp_rx_process_invalid_peer_wrapper(soc, 1299 nbuf, mpdu_done, pool_id); 1300 } else { 1301 mpdu_done = dp_rx_chain_msdus(soc, nbuf, rx_tlv_hdr, pool_id); 1302 /* Trigger invalid peer handler wrapper */ 1303 dp_rx_process_invalid_peer_wrapper(soc, 1304 pdev->invalid_peer_head_msdu, 1305 mpdu_done, pool_id); 1306 } 1307 1308 if (mpdu_done) { 1309 pdev->invalid_peer_head_msdu = NULL; 1310 pdev->invalid_peer_tail_msdu = NULL; 1311 } 1312 1313 return QDF_STATUS_E_FAILURE; 1314 } 1315 1316 vdev = peer->vdev; 1317 if (!vdev) { 1318 dp_err_rl("Null vdev!"); 1319 DP_STATS_INC(soc, rx.err.invalid_vdev, 1); 1320 goto drop_nbuf; 1321 } 1322 1323 /* 1324 * Advance the packet start pointer by total size of 1325 * pre-header TLV's 1326 */ 1327 if (qdf_nbuf_is_frag(nbuf)) 1328 qdf_nbuf_pull_head(nbuf, soc->rx_pkt_tlv_size); 1329 else 1330 qdf_nbuf_pull_head(nbuf, (msdu_metadata.l3_hdr_pad + 1331 soc->rx_pkt_tlv_size)); 1332 1333 dp_vdev_peer_stats_update_protocol_cnt(vdev, nbuf, NULL, 0, 1); 1334 1335 if (hal_rx_msdu_end_sa_is_valid_get(soc->hal_soc, rx_tlv_hdr)) { 1336 sa_idx = hal_rx_msdu_end_sa_idx_get(soc->hal_soc, rx_tlv_hdr); 1337 1338 if ((sa_idx < 0) || 1339 (sa_idx >= wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx))) { 1340 DP_STATS_INC(soc, rx.err.invalid_sa_da_idx, 1); 1341 goto drop_nbuf; 1342 } 1343 } 1344 1345 if ((!soc->mec_fw_offload) && 1346 dp_rx_mcast_echo_check(soc, peer, rx_tlv_hdr, nbuf)) { 1347 /* this is a looped back MCBC pkt, drop it */ 1348 DP_STATS_INC_PKT(peer, rx.mec_drop, 1, 1349 qdf_nbuf_len(nbuf)); 1350 goto drop_nbuf; 1351 } 1352 1353 /* 1354 * In qwrap mode if the received packet matches with any of the vdev 1355 * mac addresses, drop it. Donot receive multicast packets originated 1356 * from any proxysta. 1357 */ 1358 if (check_qwrap_multicast_loopback(vdev, nbuf)) { 1359 DP_STATS_INC_PKT(peer, rx.mec_drop, 1, qdf_nbuf_len(nbuf)); 1360 goto drop_nbuf; 1361 } 1362 1363 1364 if (qdf_unlikely((peer->nawds_enabled == true) && 1365 hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc, 1366 rx_tlv_hdr))) { 1367 dp_err_rl("free buffer for multicast packet"); 1368 DP_STATS_INC(peer, rx.nawds_mcast_drop, 1); 1369 goto drop_nbuf; 1370 } 1371 1372 if (!dp_wds_rx_policy_check(rx_tlv_hdr, vdev, peer)) { 1373 dp_err_rl("mcast Policy Check Drop pkt"); 1374 goto drop_nbuf; 1375 } 1376 /* WDS Source Port Learning */ 1377 if (qdf_likely(vdev->rx_decap_type == htt_cmn_pkt_type_ethernet && 1378 vdev->wds_enabled)) 1379 dp_rx_wds_srcport_learn(soc, rx_tlv_hdr, peer, nbuf, 1380 msdu_metadata); 1381 1382 if (hal_rx_is_unicast(soc->hal_soc, rx_tlv_hdr)) { 1383 tid = hal_rx_tid_get(soc->hal_soc, rx_tlv_hdr); 1384 if (!peer->rx_tid[tid].hw_qdesc_vaddr_unaligned) 1385 dp_rx_tid_setup_wifi3(peer, tid, 1, IEEE80211_SEQ_MAX); 1386 /* IEEE80211_SEQ_MAX indicates invalid start_seq */ 1387 } 1388 1389 eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf); 1390 1391 if (!peer->authorize) { 1392 is_eapol = qdf_nbuf_is_ipv4_eapol_pkt(nbuf) || 1393 qdf_nbuf_is_ipv4_wapi_pkt(nbuf); 1394 1395 if (is_eapol) { 1396 if (qdf_mem_cmp(eh->ether_dhost, 1397 &vdev->mac_addr.raw[0], 1398 QDF_MAC_ADDR_SIZE)) 1399 goto drop_nbuf; 1400 } else { 1401 goto drop_nbuf; 1402 } 1403 } 1404 1405 /* 1406 * Drop packets in this path if cce_match is found. Packets will come 1407 * in following path depending on whether tidQ is setup. 1408 * 1. If tidQ is setup: WIFILI_HAL_RX_WBM_REO_PSH_RSN_ROUTE and 1409 * cce_match = 1 1410 * Packets with WIFILI_HAL_RX_WBM_REO_PSH_RSN_ROUTE are already 1411 * dropped. 1412 * 2. If tidQ is not setup: WIFILI_HAL_RX_WBM_REO_PSH_RSN_ERROR and 1413 * cce_match = 1 1414 * These packets need to be dropped and should not get delivered 1415 * to stack. 1416 */ 1417 if (qdf_unlikely(dp_rx_err_cce_drop(soc, vdev, nbuf, rx_tlv_hdr))) { 1418 goto drop_nbuf; 1419 } 1420 1421 if (qdf_unlikely(vdev->rx_decap_type == htt_cmn_pkt_type_raw)) { 1422 qdf_nbuf_set_next(nbuf, NULL); 1423 dp_rx_deliver_raw(vdev, nbuf, peer); 1424 } else { 1425 qdf_nbuf_set_next(nbuf, NULL); 1426 DP_STATS_INC_PKT(peer, rx.to_stack, 1, 1427 qdf_nbuf_len(nbuf)); 1428 1429 /* 1430 * Update the protocol tag in SKB based on 1431 * CCE metadata 1432 */ 1433 dp_rx_update_protocol_tag(soc, vdev, nbuf, rx_tlv_hdr, 1434 EXCEPTION_DEST_RING_ID, 1435 true, true); 1436 1437 /* Update the flow tag in SKB based on FSE metadata */ 1438 dp_rx_update_flow_tag(soc, vdev, nbuf, 1439 rx_tlv_hdr, true); 1440 1441 if (qdf_unlikely(hal_rx_msdu_end_da_is_mcbc_get( 1442 soc->hal_soc, rx_tlv_hdr) && 1443 (vdev->rx_decap_type == 1444 htt_cmn_pkt_type_ethernet))) { 1445 DP_STATS_INC_PKT(peer, rx.multicast, 1, 1446 qdf_nbuf_len(nbuf)); 1447 1448 if (QDF_IS_ADDR_BROADCAST(eh->ether_dhost)) 1449 DP_STATS_INC_PKT(peer, rx.bcast, 1, 1450 qdf_nbuf_len(nbuf)); 1451 } 1452 1453 qdf_nbuf_set_exc_frame(nbuf, 1); 1454 dp_rx_deliver_to_stack(soc, vdev, peer, nbuf, NULL); 1455 } 1456 return QDF_STATUS_SUCCESS; 1457 1458 drop_nbuf: 1459 qdf_nbuf_free(nbuf); 1460 return QDF_STATUS_E_FAILURE; 1461 } 1462 1463 #endif /* QCA_HOST_MODE_WIFI_DISABLED */ 1464 1465 /** 1466 * dp_rx_process_rxdma_err() - Function to deliver rxdma unencrypted_err 1467 * frames to OS or wifi parse errors. 1468 * @soc: core DP main context 1469 * @nbuf: buffer pointer 1470 * @rx_tlv_hdr: start of rx tlv header 1471 * @peer: peer reference 1472 * @err_code: rxdma err code 1473 * @mac_id: mac_id which is one of 3 mac_ids(Assuming mac_id and 1474 * pool_id has same mapping) 1475 * 1476 * Return: None 1477 */ 1478 void 1479 dp_rx_process_rxdma_err(struct dp_soc *soc, qdf_nbuf_t nbuf, 1480 uint8_t *rx_tlv_hdr, struct dp_peer *peer, 1481 uint8_t err_code, uint8_t mac_id) 1482 { 1483 uint32_t pkt_len, l2_hdr_offset; 1484 uint16_t msdu_len; 1485 struct dp_vdev *vdev; 1486 qdf_ether_header_t *eh; 1487 bool is_broadcast; 1488 1489 /* 1490 * Check if DMA completed -- msdu_done is the last bit 1491 * to be written 1492 */ 1493 if (!hal_rx_attn_msdu_done_get(soc->hal_soc, rx_tlv_hdr)) { 1494 1495 dp_err_rl("MSDU DONE failure"); 1496 1497 hal_rx_dump_pkt_tlvs(soc->hal_soc, rx_tlv_hdr, 1498 QDF_TRACE_LEVEL_INFO); 1499 qdf_assert(0); 1500 } 1501 1502 l2_hdr_offset = hal_rx_msdu_end_l3_hdr_padding_get(soc->hal_soc, 1503 rx_tlv_hdr); 1504 msdu_len = hal_rx_msdu_start_msdu_len_get(soc->hal_soc, rx_tlv_hdr); 1505 pkt_len = msdu_len + l2_hdr_offset + soc->rx_pkt_tlv_size; 1506 1507 if (dp_rx_check_pkt_len(soc, pkt_len)) { 1508 /* Drop & free packet */ 1509 qdf_nbuf_free(nbuf); 1510 return; 1511 } 1512 /* Set length in nbuf */ 1513 qdf_nbuf_set_pktlen(nbuf, pkt_len); 1514 1515 qdf_nbuf_set_next(nbuf, NULL); 1516 1517 qdf_nbuf_set_rx_chfrag_start(nbuf, 1); 1518 qdf_nbuf_set_rx_chfrag_end(nbuf, 1); 1519 1520 if (!peer) { 1521 QDF_TRACE_ERROR_RL(QDF_MODULE_ID_DP, "peer is NULL"); 1522 DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1, 1523 qdf_nbuf_len(nbuf)); 1524 /* Trigger invalid peer handler wrapper */ 1525 dp_rx_process_invalid_peer_wrapper(soc, nbuf, true, mac_id); 1526 return; 1527 } 1528 1529 vdev = peer->vdev; 1530 if (!vdev) { 1531 dp_rx_err_info_rl("%pK: INVALID vdev %pK OR osif_rx", soc, 1532 vdev); 1533 /* Drop & free packet */ 1534 qdf_nbuf_free(nbuf); 1535 DP_STATS_INC(soc, rx.err.invalid_vdev, 1); 1536 return; 1537 } 1538 1539 /* 1540 * Advance the packet start pointer by total size of 1541 * pre-header TLV's 1542 */ 1543 dp_rx_skip_tlvs(soc, nbuf, l2_hdr_offset); 1544 1545 if (err_code == HAL_RXDMA_ERR_WIFI_PARSE) { 1546 uint8_t *pkt_type; 1547 1548 pkt_type = qdf_nbuf_data(nbuf) + (2 * QDF_MAC_ADDR_SIZE); 1549 if (*(uint16_t *)pkt_type == htons(QDF_ETH_TYPE_8021Q)) { 1550 if (*(uint16_t *)(pkt_type + DP_SKIP_VLAN) == 1551 htons(QDF_LLC_STP)) { 1552 DP_STATS_INC(vdev->pdev, vlan_tag_stp_cnt, 1); 1553 goto process_mesh; 1554 } else { 1555 goto process_rx; 1556 } 1557 } 1558 } 1559 if (vdev->rx_decap_type == htt_cmn_pkt_type_raw) 1560 goto process_mesh; 1561 1562 /* 1563 * WAPI cert AP sends rekey frames as unencrypted. 1564 * Thus RXDMA will report unencrypted frame error. 1565 * To pass WAPI cert case, SW needs to pass unencrypted 1566 * rekey frame to stack. 1567 */ 1568 if (qdf_nbuf_is_ipv4_wapi_pkt(nbuf)) { 1569 goto process_rx; 1570 } 1571 /* 1572 * In dynamic WEP case rekey frames are not encrypted 1573 * similar to WAPI. Allow EAPOL when 8021+wep is enabled and 1574 * key install is already done 1575 */ 1576 if ((vdev->sec_type == cdp_sec_type_wep104) && 1577 (qdf_nbuf_is_ipv4_eapol_pkt(nbuf))) 1578 goto process_rx; 1579 1580 process_mesh: 1581 1582 if (!vdev->mesh_vdev && err_code == HAL_RXDMA_ERR_UNENCRYPTED) { 1583 qdf_nbuf_free(nbuf); 1584 DP_STATS_INC(soc, rx.err.invalid_vdev, 1); 1585 return; 1586 } 1587 1588 if (vdev->mesh_vdev) { 1589 if (dp_rx_filter_mesh_packets(vdev, nbuf, rx_tlv_hdr) 1590 == QDF_STATUS_SUCCESS) { 1591 dp_rx_err_info("%pK: mesh pkt filtered", soc); 1592 DP_STATS_INC(vdev->pdev, dropped.mesh_filter, 1); 1593 1594 qdf_nbuf_free(nbuf); 1595 return; 1596 } 1597 dp_rx_fill_mesh_stats(vdev, nbuf, rx_tlv_hdr, peer); 1598 } 1599 process_rx: 1600 if (qdf_unlikely(hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc, 1601 rx_tlv_hdr) && 1602 (vdev->rx_decap_type == 1603 htt_cmn_pkt_type_ethernet))) { 1604 eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf); 1605 is_broadcast = (QDF_IS_ADDR_BROADCAST 1606 (eh->ether_dhost)) ? 1 : 0 ; 1607 DP_STATS_INC_PKT(peer, rx.multicast, 1, qdf_nbuf_len(nbuf)); 1608 if (is_broadcast) { 1609 DP_STATS_INC_PKT(peer, rx.bcast, 1, 1610 qdf_nbuf_len(nbuf)); 1611 } 1612 } 1613 1614 if (qdf_unlikely(vdev->rx_decap_type == htt_cmn_pkt_type_raw)) { 1615 dp_rx_deliver_raw(vdev, nbuf, peer); 1616 } else { 1617 /* Update the protocol tag in SKB based on CCE metadata */ 1618 dp_rx_update_protocol_tag(soc, vdev, nbuf, rx_tlv_hdr, 1619 EXCEPTION_DEST_RING_ID, true, true); 1620 /* Update the flow tag in SKB based on FSE metadata */ 1621 dp_rx_update_flow_tag(soc, vdev, nbuf, rx_tlv_hdr, true); 1622 DP_STATS_INC(peer, rx.to_stack.num, 1); 1623 qdf_nbuf_set_exc_frame(nbuf, 1); 1624 dp_rx_deliver_to_stack(soc, vdev, peer, nbuf, NULL); 1625 } 1626 1627 return; 1628 } 1629 1630 /** 1631 * dp_rx_process_mic_error(): Function to pass mic error indication to umac 1632 * @soc: core DP main context 1633 * @nbuf: buffer pointer 1634 * @rx_tlv_hdr: start of rx tlv header 1635 * @peer: peer handle 1636 * 1637 * return: void 1638 */ 1639 void dp_rx_process_mic_error(struct dp_soc *soc, qdf_nbuf_t nbuf, 1640 uint8_t *rx_tlv_hdr, struct dp_peer *peer) 1641 { 1642 struct dp_vdev *vdev = NULL; 1643 struct dp_pdev *pdev = NULL; 1644 struct ol_if_ops *tops = NULL; 1645 uint16_t rx_seq, fragno; 1646 uint8_t is_raw; 1647 unsigned int tid; 1648 QDF_STATUS status; 1649 struct cdp_rx_mic_err_info mic_failure_info; 1650 1651 if (!hal_rx_msdu_end_first_msdu_get(soc->hal_soc, 1652 rx_tlv_hdr)) 1653 return; 1654 1655 if (!peer) { 1656 dp_info_rl("peer not found"); 1657 goto fail; 1658 } 1659 1660 vdev = peer->vdev; 1661 if (!vdev) { 1662 dp_info_rl("VDEV not found"); 1663 goto fail; 1664 } 1665 1666 pdev = vdev->pdev; 1667 if (!pdev) { 1668 dp_info_rl("PDEV not found"); 1669 goto fail; 1670 } 1671 1672 is_raw = HAL_IS_DECAP_FORMAT_RAW(soc->hal_soc, qdf_nbuf_data(nbuf)); 1673 if (is_raw) { 1674 fragno = dp_rx_frag_get_mpdu_frag_number(soc, 1675 qdf_nbuf_data(nbuf)); 1676 /* Can get only last fragment */ 1677 if (fragno) { 1678 tid = hal_rx_mpdu_start_tid_get(soc->hal_soc, 1679 qdf_nbuf_data(nbuf)); 1680 rx_seq = hal_rx_get_rx_sequence(soc->hal_soc, 1681 qdf_nbuf_data(nbuf)); 1682 1683 status = dp_rx_defrag_add_last_frag(soc, peer, 1684 tid, rx_seq, nbuf); 1685 dp_info_rl("Frag pkt seq# %d frag# %d consumed " 1686 "status %d !", rx_seq, fragno, status); 1687 return; 1688 } 1689 } 1690 1691 if (hal_rx_mpdu_get_addr1(soc->hal_soc, qdf_nbuf_data(nbuf), 1692 &mic_failure_info.da_mac_addr.bytes[0])) { 1693 dp_err_rl("Failed to get da_mac_addr"); 1694 goto fail; 1695 } 1696 1697 if (hal_rx_mpdu_get_addr2(soc->hal_soc, qdf_nbuf_data(nbuf), 1698 &mic_failure_info.ta_mac_addr.bytes[0])) { 1699 dp_err_rl("Failed to get ta_mac_addr"); 1700 goto fail; 1701 } 1702 1703 mic_failure_info.key_id = 0; 1704 mic_failure_info.multicast = 1705 IEEE80211_IS_MULTICAST(mic_failure_info.da_mac_addr.bytes); 1706 qdf_mem_zero(mic_failure_info.tsc, MIC_SEQ_CTR_SIZE); 1707 mic_failure_info.frame_type = cdp_rx_frame_type_802_11; 1708 mic_failure_info.data = NULL; 1709 mic_failure_info.vdev_id = vdev->vdev_id; 1710 1711 tops = pdev->soc->cdp_soc.ol_ops; 1712 if (tops->rx_mic_error) 1713 tops->rx_mic_error(soc->ctrl_psoc, pdev->pdev_id, 1714 &mic_failure_info); 1715 1716 fail: 1717 qdf_nbuf_free(nbuf); 1718 return; 1719 } 1720 1721 /* 1722 * dp_rx_deliver_to_osif_stack() - function to deliver rx pkts to stack 1723 * @soc: DP soc 1724 * @vdv: DP vdev handle 1725 * @peer: pointer to the peer object 1726 * @nbuf: skb list head 1727 * @tail: skb list tail 1728 * @is_eapol: eapol pkt check 1729 * 1730 * Return: None 1731 */ 1732 #ifdef QCA_SUPPORT_EAPOL_OVER_CONTROL_PORT 1733 static inline void 1734 dp_rx_deliver_to_osif_stack(struct dp_soc *soc, 1735 struct dp_vdev *vdev, 1736 struct dp_peer *peer, 1737 qdf_nbuf_t nbuf, 1738 qdf_nbuf_t tail, 1739 bool is_eapol) 1740 { 1741 if (is_eapol && soc->eapol_over_control_port) 1742 dp_rx_eapol_deliver_to_stack(soc, vdev, peer, nbuf, NULL); 1743 else 1744 dp_rx_deliver_to_stack(soc, vdev, peer, nbuf, NULL); 1745 } 1746 #else 1747 static inline void 1748 dp_rx_deliver_to_osif_stack(struct dp_soc *soc, 1749 struct dp_vdev *vdev, 1750 struct dp_peer *peer, 1751 qdf_nbuf_t nbuf, 1752 qdf_nbuf_t tail, 1753 bool is_eapol) 1754 { 1755 dp_rx_deliver_to_stack(soc, vdev, peer, nbuf, NULL); 1756 } 1757 #endif 1758 1759 #ifdef WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG 1760 /** 1761 * dp_rx_err_route_hdl() - Function to send EAPOL frames to stack 1762 * Free any other packet which comes in 1763 * this path. 1764 * 1765 * @soc: core DP main context 1766 * @nbuf: buffer pointer 1767 * @peer: peer handle 1768 * @rx_tlv_hdr: start of rx tlv header 1769 * @err_src: rxdma/reo 1770 * 1771 * This function indicates EAPOL frame received in wbm error ring to stack. 1772 * Any other frame should be dropped. 1773 * 1774 * Return: SUCCESS if delivered to stack 1775 */ 1776 static void 1777 dp_rx_err_route_hdl(struct dp_soc *soc, qdf_nbuf_t nbuf, 1778 struct dp_peer *peer, uint8_t *rx_tlv_hdr, 1779 enum hal_rx_wbm_error_source err_src) 1780 { 1781 uint32_t pkt_len; 1782 uint16_t msdu_len; 1783 struct dp_vdev *vdev; 1784 struct hal_rx_msdu_metadata msdu_metadata; 1785 bool is_eapol; 1786 1787 hal_rx_msdu_metadata_get(soc->hal_soc, rx_tlv_hdr, &msdu_metadata); 1788 msdu_len = hal_rx_msdu_start_msdu_len_get(soc->hal_soc, rx_tlv_hdr); 1789 pkt_len = msdu_len + msdu_metadata.l3_hdr_pad + soc->rx_pkt_tlv_size; 1790 1791 if (qdf_likely(!qdf_nbuf_is_frag(nbuf))) { 1792 if (dp_rx_check_pkt_len(soc, pkt_len)) 1793 goto drop_nbuf; 1794 1795 /* Set length in nbuf */ 1796 qdf_nbuf_set_pktlen( 1797 nbuf, qdf_min(pkt_len, (uint32_t)RX_DATA_BUFFER_SIZE)); 1798 qdf_assert_always(nbuf->data == rx_tlv_hdr); 1799 } 1800 1801 /* 1802 * Check if DMA completed -- msdu_done is the last bit 1803 * to be written 1804 */ 1805 if (!hal_rx_attn_msdu_done_get(soc->hal_soc, rx_tlv_hdr)) { 1806 dp_err_rl("MSDU DONE failure"); 1807 hal_rx_dump_pkt_tlvs(soc->hal_soc, rx_tlv_hdr, 1808 QDF_TRACE_LEVEL_INFO); 1809 qdf_assert(0); 1810 } 1811 1812 if (!peer) 1813 goto drop_nbuf; 1814 1815 vdev = peer->vdev; 1816 if (!vdev) { 1817 dp_err_rl("Null vdev!"); 1818 DP_STATS_INC(soc, rx.err.invalid_vdev, 1); 1819 goto drop_nbuf; 1820 } 1821 1822 /* 1823 * Advance the packet start pointer by total size of 1824 * pre-header TLV's 1825 */ 1826 if (qdf_nbuf_is_frag(nbuf)) 1827 qdf_nbuf_pull_head(nbuf, soc->rx_pkt_tlv_size); 1828 else 1829 qdf_nbuf_pull_head(nbuf, (msdu_metadata.l3_hdr_pad + 1830 soc->rx_pkt_tlv_size)); 1831 1832 dp_vdev_peer_stats_update_protocol_cnt(vdev, nbuf, NULL, 0, 1); 1833 1834 /* 1835 * Indicate EAPOL frame to stack only when vap mac address 1836 * matches the destination address. 1837 */ 1838 is_eapol = qdf_nbuf_is_ipv4_eapol_pkt(nbuf); 1839 if (is_eapol || qdf_nbuf_is_ipv4_wapi_pkt(nbuf)) { 1840 qdf_ether_header_t *eh = 1841 (qdf_ether_header_t *)qdf_nbuf_data(nbuf); 1842 if (qdf_mem_cmp(eh->ether_dhost, &vdev->mac_addr.raw[0], 1843 QDF_MAC_ADDR_SIZE) == 0) { 1844 /* 1845 * Update the protocol tag in SKB based on 1846 * CCE metadata. 1847 */ 1848 dp_rx_update_protocol_tag(soc, vdev, nbuf, rx_tlv_hdr, 1849 EXCEPTION_DEST_RING_ID, 1850 true, true); 1851 /* Update the flow tag in SKB based on FSE metadata */ 1852 dp_rx_update_flow_tag(soc, vdev, nbuf, rx_tlv_hdr, 1853 true); 1854 DP_STATS_INC(peer, rx.to_stack.num, 1); 1855 qdf_nbuf_set_exc_frame(nbuf, 1); 1856 qdf_nbuf_set_next(nbuf, NULL); 1857 1858 dp_rx_deliver_to_osif_stack(soc, vdev, peer, nbuf, 1859 NULL, is_eapol); 1860 1861 return; 1862 } 1863 } 1864 1865 drop_nbuf: 1866 1867 DP_STATS_INCC(soc, rx.reo2rel_route_drop, 1, 1868 err_src == HAL_RX_WBM_ERR_SRC_REO); 1869 DP_STATS_INCC(soc, rx.rxdma2rel_route_drop, 1, 1870 err_src == HAL_RX_WBM_ERR_SRC_RXDMA); 1871 1872 qdf_nbuf_free(nbuf); 1873 } 1874 #else 1875 1876 static void 1877 dp_rx_err_route_hdl(struct dp_soc *soc, qdf_nbuf_t nbuf, 1878 struct dp_peer *peer, uint8_t *rx_tlv_hdr, 1879 enum hal_rx_wbm_error_source err_src) 1880 { 1881 DP_STATS_INCC(soc, rx.reo2rel_route_drop, 1, 1882 err_src == HAL_RX_WBM_ERR_SRC_REO); 1883 DP_STATS_INCC(soc, rx.rxdma2rel_route_drop, 1, 1884 err_src == HAL_RX_WBM_ERR_SRC_RXDMA); 1885 1886 qdf_nbuf_free(nbuf); 1887 } 1888 #endif 1889 1890 #ifndef QCA_HOST_MODE_WIFI_DISABLED 1891 1892 #ifdef DP_RX_DESC_COOKIE_INVALIDATE 1893 /** 1894 * dp_rx_link_cookie_check() - Validate link desc cookie 1895 * @ring_desc: ring descriptor 1896 * 1897 * Return: qdf status 1898 */ 1899 static inline QDF_STATUS 1900 dp_rx_link_cookie_check(hal_ring_desc_t ring_desc) 1901 { 1902 if (qdf_unlikely(HAL_RX_REO_BUF_LINK_COOKIE_INVALID_GET(ring_desc))) 1903 return QDF_STATUS_E_FAILURE; 1904 1905 return QDF_STATUS_SUCCESS; 1906 } 1907 1908 /** 1909 * dp_rx_link_cookie_invalidate() - Invalidate link desc cookie 1910 * @ring_desc: ring descriptor 1911 * 1912 * Return: None 1913 */ 1914 static inline void 1915 dp_rx_link_cookie_invalidate(hal_ring_desc_t ring_desc) 1916 { 1917 HAL_RX_REO_BUF_LINK_COOKIE_INVALID_SET(ring_desc); 1918 } 1919 #else 1920 static inline QDF_STATUS 1921 dp_rx_link_cookie_check(hal_ring_desc_t ring_desc) 1922 { 1923 return QDF_STATUS_SUCCESS; 1924 } 1925 1926 static inline void 1927 dp_rx_link_cookie_invalidate(hal_ring_desc_t ring_desc) 1928 { 1929 } 1930 #endif 1931 1932 #ifdef WLAN_FEATURE_DP_RX_RING_HISTORY 1933 /** 1934 * dp_rx_err_ring_record_entry() - Record rx err ring history 1935 * @soc: Datapath soc structure 1936 * @paddr: paddr of the buffer in RX err ring 1937 * @sw_cookie: SW cookie of the buffer in RX err ring 1938 * @rbm: Return buffer manager of the buffer in RX err ring 1939 * 1940 * Returns: None 1941 */ 1942 static inline void 1943 dp_rx_err_ring_record_entry(struct dp_soc *soc, uint64_t paddr, 1944 uint32_t sw_cookie, uint8_t rbm) 1945 { 1946 struct dp_buf_info_record *record; 1947 uint32_t idx; 1948 1949 if (qdf_unlikely(!soc->rx_err_ring_history)) 1950 return; 1951 1952 idx = dp_history_get_next_index(&soc->rx_err_ring_history->index, 1953 DP_RX_ERR_HIST_MAX); 1954 1955 /* No NULL check needed for record since its an array */ 1956 record = &soc->rx_err_ring_history->entry[idx]; 1957 1958 record->timestamp = qdf_get_log_timestamp(); 1959 record->hbi.paddr = paddr; 1960 record->hbi.sw_cookie = sw_cookie; 1961 record->hbi.rbm = rbm; 1962 } 1963 #else 1964 static inline void 1965 dp_rx_err_ring_record_entry(struct dp_soc *soc, uint64_t paddr, 1966 uint32_t sw_cookie, uint8_t rbm) 1967 { 1968 } 1969 #endif 1970 1971 #ifdef HANDLE_RX_REROUTE_ERR 1972 static int dp_rx_err_handle_msdu_buf(struct dp_soc *soc, 1973 hal_ring_desc_t ring_desc) 1974 { 1975 int lmac_id = DP_INVALID_LMAC_ID; 1976 struct dp_rx_desc *rx_desc; 1977 struct hal_buf_info hbi; 1978 struct dp_pdev *pdev; 1979 1980 hal_rx_reo_buf_paddr_get(soc->hal_soc, ring_desc, &hbi); 1981 1982 rx_desc = dp_rx_cookie_2_va_rxdma_buf(soc, hbi.sw_cookie); 1983 1984 /* sanity */ 1985 if (!rx_desc) { 1986 DP_STATS_INC(soc, rx.err.reo_err_msdu_buf_invalid_cookie, 1); 1987 goto assert_return; 1988 } 1989 1990 if (!rx_desc->nbuf) 1991 goto assert_return; 1992 1993 dp_rx_err_ring_record_entry(soc, hbi.paddr, 1994 hbi.sw_cookie, 1995 hal_rx_ret_buf_manager_get(soc->hal_soc, 1996 ring_desc)); 1997 if (hbi.paddr != qdf_nbuf_get_frag_paddr(rx_desc->nbuf, 0)) { 1998 DP_STATS_INC(soc, rx.err.nbuf_sanity_fail, 1); 1999 rx_desc->in_err_state = 1; 2000 goto assert_return; 2001 } 2002 2003 /* After this point the rx_desc and nbuf are valid */ 2004 dp_ipa_rx_buf_smmu_mapping_lock(soc); 2005 qdf_assert_always(!rx_desc->unmapped); 2006 dp_ipa_handle_rx_buf_smmu_mapping(soc, 2007 rx_desc->nbuf, 2008 RX_DATA_BUFFER_SIZE, 2009 false); 2010 qdf_nbuf_unmap_nbytes_single(soc->osdev, 2011 rx_desc->nbuf, 2012 QDF_DMA_FROM_DEVICE, 2013 RX_DATA_BUFFER_SIZE); 2014 rx_desc->unmapped = 1; 2015 dp_ipa_rx_buf_smmu_mapping_unlock(soc); 2016 dp_rx_buffer_pool_nbuf_free(soc, rx_desc->nbuf, 2017 rx_desc->pool_id); 2018 2019 pdev = dp_get_pdev_for_lmac_id(soc, rx_desc->pool_id); 2020 lmac_id = rx_desc->pool_id; 2021 dp_rx_add_to_free_desc_list(&pdev->free_list_head, 2022 &pdev->free_list_tail, 2023 rx_desc); 2024 return lmac_id; 2025 2026 assert_return: 2027 qdf_assert(0); 2028 return lmac_id; 2029 } 2030 2031 static int dp_rx_err_exception(struct dp_soc *soc, hal_ring_desc_t ring_desc) 2032 { 2033 int ret; 2034 uint64_t cur_time_stamp; 2035 2036 DP_STATS_INC(soc, rx.err.reo_err_msdu_buf_rcved, 1); 2037 2038 /* Recover if overall error count exceeds threshold */ 2039 if (soc->stats.rx.err.reo_err_msdu_buf_rcved > 2040 DP_MAX_REG_RX_ROUTING_ERRS_THRESHOLD) { 2041 dp_err("pkt threshold breached! reo_err_msdu_buf_rcved %u first err pkt time_stamp %llu", 2042 soc->stats.rx.err.reo_err_msdu_buf_rcved, 2043 soc->rx_route_err_start_pkt_ts); 2044 qdf_trigger_self_recovery(NULL, QDF_RX_REG_PKT_ROUTE_ERR); 2045 } 2046 2047 cur_time_stamp = qdf_get_log_timestamp_usecs(); 2048 if (!soc->rx_route_err_start_pkt_ts) 2049 soc->rx_route_err_start_pkt_ts = cur_time_stamp; 2050 2051 /* Recover if threshold number of packets received in threshold time */ 2052 if ((cur_time_stamp - soc->rx_route_err_start_pkt_ts) > 2053 DP_RX_ERR_ROUTE_TIMEOUT_US) { 2054 soc->rx_route_err_start_pkt_ts = cur_time_stamp; 2055 2056 if (soc->rx_route_err_in_window > 2057 DP_MAX_REG_RX_ROUTING_ERRS_IN_TIMEOUT) { 2058 qdf_trigger_self_recovery(NULL, 2059 QDF_RX_REG_PKT_ROUTE_ERR); 2060 dp_err("rate threshold breached! reo_err_msdu_buf_rcved %u first err pkt time_stamp %llu", 2061 soc->stats.rx.err.reo_err_msdu_buf_rcved, 2062 soc->rx_route_err_start_pkt_ts); 2063 } else { 2064 soc->rx_route_err_in_window = 1; 2065 } 2066 } else { 2067 soc->rx_route_err_in_window++; 2068 } 2069 2070 ret = dp_rx_err_handle_msdu_buf(soc, ring_desc); 2071 2072 return ret; 2073 } 2074 #else /* HANDLE_RX_REROUTE_ERR */ 2075 2076 static int dp_rx_err_exception(struct dp_soc *soc, hal_ring_desc_t ring_desc) 2077 { 2078 qdf_assert_always(0); 2079 2080 return DP_INVALID_LMAC_ID; 2081 } 2082 #endif /* HANDLE_RX_REROUTE_ERR */ 2083 2084 /** 2085 * dp_rx_err_is_pn_check_needed() - Check if the packet number check is needed 2086 * for this frame received in REO error ring. 2087 * @soc: Datapath SOC handle 2088 * @error: REO error detected or not 2089 * @error_code: Error code in case of REO error 2090 * 2091 * Return: true if pn check if needed in software, 2092 * false, if pn check if not needed. 2093 */ 2094 static inline bool 2095 dp_rx_err_is_pn_check_needed(struct dp_soc *soc, uint8_t error, 2096 uint32_t error_code) 2097 { 2098 return (soc->features.pn_in_reo_dest && 2099 (error == HAL_REO_ERROR_DETECTED && 2100 (hal_rx_reo_is_2k_jump(error_code) || 2101 hal_rx_reo_is_oor_error(error_code) || 2102 hal_rx_reo_is_bar_oor_2k_jump(error_code)))); 2103 } 2104 2105 uint32_t 2106 dp_rx_err_process(struct dp_intr *int_ctx, struct dp_soc *soc, 2107 hal_ring_handle_t hal_ring_hdl, uint32_t quota) 2108 { 2109 hal_ring_desc_t ring_desc; 2110 hal_soc_handle_t hal_soc; 2111 uint32_t count = 0; 2112 uint32_t rx_bufs_used = 0; 2113 uint32_t rx_bufs_reaped[MAX_PDEV_CNT] = { 0 }; 2114 uint8_t mac_id = 0; 2115 uint8_t buf_type; 2116 uint8_t err_status; 2117 struct hal_rx_mpdu_desc_info mpdu_desc_info; 2118 struct hal_buf_info hbi; 2119 struct dp_pdev *dp_pdev; 2120 struct dp_srng *dp_rxdma_srng; 2121 struct rx_desc_pool *rx_desc_pool; 2122 void *link_desc_va; 2123 struct hal_rx_msdu_list msdu_list; /* MSDU's per MPDU */ 2124 uint16_t num_msdus; 2125 struct dp_rx_desc *rx_desc = NULL; 2126 QDF_STATUS status; 2127 bool ret; 2128 uint32_t error_code = 0; 2129 bool sw_pn_check_needed; 2130 2131 /* Debug -- Remove later */ 2132 qdf_assert(soc && hal_ring_hdl); 2133 2134 hal_soc = soc->hal_soc; 2135 2136 /* Debug -- Remove later */ 2137 qdf_assert(hal_soc); 2138 2139 if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, hal_ring_hdl))) { 2140 2141 /* TODO */ 2142 /* 2143 * Need API to convert from hal_ring pointer to 2144 * Ring Type / Ring Id combo 2145 */ 2146 DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1); 2147 dp_rx_err_err("%pK: HAL RING Access Failed -- %pK", soc, 2148 hal_ring_hdl); 2149 goto done; 2150 } 2151 2152 while (qdf_likely(quota-- && (ring_desc = 2153 hal_srng_dst_peek(hal_soc, 2154 hal_ring_hdl)))) { 2155 2156 DP_STATS_INC(soc, rx.err_ring_pkts, 1); 2157 err_status = hal_rx_err_status_get(hal_soc, ring_desc); 2158 buf_type = hal_rx_reo_buf_type_get(hal_soc, ring_desc); 2159 2160 if (err_status == HAL_REO_ERROR_DETECTED) 2161 error_code = hal_rx_get_reo_error_code(hal_soc, 2162 ring_desc); 2163 2164 qdf_mem_set(&mpdu_desc_info, sizeof(mpdu_desc_info), 0); 2165 sw_pn_check_needed = dp_rx_err_is_pn_check_needed(soc, 2166 err_status, 2167 error_code); 2168 if (!sw_pn_check_needed) { 2169 /* 2170 * MPDU desc info will be present in the REO desc 2171 * only in the below scenarios 2172 * 1) pn_in_dest_disabled: always 2173 * 2) pn_in_dest enabled: All cases except 2k-jup 2174 * and OOR errors 2175 */ 2176 hal_rx_mpdu_desc_info_get(hal_soc, ring_desc, 2177 &mpdu_desc_info); 2178 } 2179 2180 if (HAL_RX_REO_DESC_MSDU_COUNT_GET(ring_desc) == 0) 2181 goto next_entry; 2182 2183 /* 2184 * For REO error ring, only MSDU LINK DESC is expected. 2185 * Handle HAL_RX_REO_MSDU_BUF_ADDR_TYPE exception case. 2186 */ 2187 if (qdf_unlikely(buf_type != HAL_RX_REO_MSDU_LINK_DESC_TYPE)) { 2188 int lmac_id; 2189 2190 lmac_id = dp_rx_err_exception(soc, ring_desc); 2191 if (lmac_id >= 0) 2192 rx_bufs_reaped[lmac_id] += 1; 2193 goto next_entry; 2194 } 2195 2196 hal_rx_buf_cookie_rbm_get(hal_soc, (uint32_t *)ring_desc, 2197 &hbi); 2198 /* 2199 * check for the magic number in the sw cookie 2200 */ 2201 qdf_assert_always((hbi.sw_cookie >> LINK_DESC_ID_SHIFT) & 2202 soc->link_desc_id_start); 2203 2204 status = dp_rx_link_cookie_check(ring_desc); 2205 if (qdf_unlikely(QDF_IS_STATUS_ERROR(status))) { 2206 DP_STATS_INC(soc, rx.err.invalid_link_cookie, 1); 2207 break; 2208 } 2209 2210 hal_rx_reo_buf_paddr_get(soc->hal_soc, ring_desc, &hbi); 2211 link_desc_va = dp_rx_cookie_2_link_desc_va(soc, &hbi); 2212 hal_rx_msdu_list_get(soc->hal_soc, link_desc_va, &msdu_list, 2213 &num_msdus); 2214 dp_rx_err_ring_record_entry(soc, msdu_list.paddr[0], 2215 msdu_list.sw_cookie[0], 2216 msdu_list.rbm[0]); 2217 // TODO - BE- Check if the RBM is to be checked for all chips 2218 if (qdf_unlikely((msdu_list.rbm[0] != 2219 DP_WBM2SW_RBM(soc->wbm_sw0_bm_id)) && 2220 (msdu_list.rbm[0] != 2221 HAL_RX_BUF_RBM_WBM_CHIP0_IDLE_DESC_LIST) && 2222 (msdu_list.rbm[0] != 2223 DP_DEFRAG_RBM(soc->wbm_sw0_bm_id)))) { 2224 /* TODO */ 2225 /* Call appropriate handler */ 2226 if (!wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx)) { 2227 DP_STATS_INC(soc, rx.err.invalid_rbm, 1); 2228 dp_rx_err_err("%pK: Invalid RBM %d", 2229 soc, msdu_list.rbm[0]); 2230 } 2231 2232 /* Return link descriptor through WBM ring (SW2WBM)*/ 2233 dp_rx_link_desc_return(soc, ring_desc, 2234 HAL_BM_ACTION_RELEASE_MSDU_LIST); 2235 goto next_entry; 2236 } 2237 2238 rx_desc = soc->arch_ops.dp_rx_desc_cookie_2_va( 2239 soc, 2240 msdu_list.sw_cookie[0]); 2241 qdf_assert_always(rx_desc); 2242 2243 mac_id = rx_desc->pool_id; 2244 2245 if (sw_pn_check_needed) { 2246 goto process_reo_error_code; 2247 } 2248 2249 if (mpdu_desc_info.bar_frame) { 2250 qdf_assert_always(mpdu_desc_info.msdu_count == 1); 2251 2252 dp_rx_bar_frame_handle(soc, ring_desc, rx_desc, 2253 &mpdu_desc_info, err_status, 2254 error_code); 2255 2256 rx_bufs_reaped[mac_id] += 1; 2257 goto next_entry; 2258 } 2259 2260 if (mpdu_desc_info.mpdu_flags & HAL_MPDU_F_FRAGMENT) { 2261 /* 2262 * We only handle one msdu per link desc for fragmented 2263 * case. We drop the msdus and release the link desc 2264 * back if there are more than one msdu in link desc. 2265 */ 2266 if (qdf_unlikely(num_msdus > 1)) { 2267 count = dp_rx_msdus_drop(soc, ring_desc, 2268 &mpdu_desc_info, 2269 &mac_id, quota); 2270 rx_bufs_reaped[mac_id] += count; 2271 goto next_entry; 2272 } 2273 2274 /* 2275 * this is a unlikely scenario where the host is reaping 2276 * a descriptor which it already reaped just a while ago 2277 * but is yet to replenish it back to HW. 2278 * In this case host will dump the last 128 descriptors 2279 * including the software descriptor rx_desc and assert. 2280 */ 2281 2282 if (qdf_unlikely(!rx_desc->in_use)) { 2283 DP_STATS_INC(soc, rx.err.hal_reo_dest_dup, 1); 2284 dp_info_rl("Reaping rx_desc not in use!"); 2285 dp_rx_dump_info_and_assert(soc, hal_ring_hdl, 2286 ring_desc, rx_desc); 2287 /* ignore duplicate RX desc and continue */ 2288 /* Pop out the descriptor */ 2289 goto next_entry; 2290 } 2291 2292 ret = dp_rx_desc_paddr_sanity_check(rx_desc, 2293 msdu_list.paddr[0]); 2294 if (!ret) { 2295 DP_STATS_INC(soc, rx.err.nbuf_sanity_fail, 1); 2296 rx_desc->in_err_state = 1; 2297 goto next_entry; 2298 } 2299 2300 count = dp_rx_frag_handle(soc, 2301 ring_desc, &mpdu_desc_info, 2302 rx_desc, &mac_id, quota); 2303 2304 rx_bufs_reaped[mac_id] += count; 2305 DP_STATS_INC(soc, rx.rx_frags, 1); 2306 goto next_entry; 2307 } 2308 2309 process_reo_error_code: 2310 /* 2311 * Expect REO errors to be handled after this point 2312 */ 2313 qdf_assert_always(err_status == HAL_REO_ERROR_DETECTED); 2314 2315 dp_info_rl("Got pkt with REO ERROR: %d", error_code); 2316 2317 switch (error_code) { 2318 case HAL_REO_ERR_PN_CHECK_FAILED: 2319 case HAL_REO_ERR_PN_ERROR_HANDLING_FLAG_SET: 2320 DP_STATS_INC(soc, rx.err.reo_error[error_code], 1); 2321 dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id); 2322 if (dp_pdev) 2323 DP_STATS_INC(dp_pdev, err.reo_error, 1); 2324 count = dp_rx_pn_error_handle(soc, 2325 ring_desc, 2326 &mpdu_desc_info, &mac_id, 2327 quota); 2328 2329 rx_bufs_reaped[mac_id] += count; 2330 break; 2331 case HAL_REO_ERR_REGULAR_FRAME_2K_JUMP: 2332 case HAL_REO_ERR_2K_ERROR_HANDLING_FLAG_SET: 2333 case HAL_REO_ERR_BAR_FRAME_2K_JUMP: 2334 DP_STATS_INC(soc, rx.err.reo_error[error_code], 1); 2335 dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id); 2336 if (dp_pdev) 2337 DP_STATS_INC(dp_pdev, err.reo_error, 1); 2338 2339 count = dp_rx_reo_err_entry_process( 2340 soc, 2341 ring_desc, 2342 &mpdu_desc_info, 2343 link_desc_va, 2344 HAL_REO_ERR_REGULAR_FRAME_2K_JUMP); 2345 2346 rx_bufs_reaped[mac_id] += count; 2347 break; 2348 2349 case HAL_REO_ERR_REGULAR_FRAME_OOR: 2350 case HAL_REO_ERR_BAR_FRAME_OOR: 2351 DP_STATS_INC(soc, rx.err.reo_error[error_code], 1); 2352 dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id); 2353 if (dp_pdev) 2354 DP_STATS_INC(dp_pdev, err.reo_error, 1); 2355 count = dp_rx_reo_err_entry_process( 2356 soc, 2357 ring_desc, 2358 &mpdu_desc_info, 2359 link_desc_va, 2360 HAL_REO_ERR_REGULAR_FRAME_OOR); 2361 2362 rx_bufs_reaped[mac_id] += count; 2363 break; 2364 case HAL_REO_ERR_QUEUE_DESC_ADDR_0: 2365 case HAL_REO_ERR_QUEUE_DESC_INVALID: 2366 case HAL_REO_ERR_AMPDU_IN_NON_BA: 2367 case HAL_REO_ERR_NON_BA_DUPLICATE: 2368 case HAL_REO_ERR_BA_DUPLICATE: 2369 case HAL_REO_ERR_BAR_FRAME_NO_BA_SESSION: 2370 case HAL_REO_ERR_BAR_FRAME_SN_EQUALS_SSN: 2371 case HAL_REO_ERR_QUEUE_DESC_BLOCKED_SET: 2372 DP_STATS_INC(soc, rx.err.reo_error[error_code], 1); 2373 count = dp_rx_msdus_drop(soc, ring_desc, 2374 &mpdu_desc_info, 2375 &mac_id, quota); 2376 rx_bufs_reaped[mac_id] += count; 2377 break; 2378 default: 2379 /* Assert if unexpected error type */ 2380 qdf_assert_always(0); 2381 } 2382 next_entry: 2383 dp_rx_link_cookie_invalidate(ring_desc); 2384 hal_srng_dst_get_next(hal_soc, hal_ring_hdl); 2385 } 2386 2387 done: 2388 dp_srng_access_end(int_ctx, soc, hal_ring_hdl); 2389 2390 if (soc->rx.flags.defrag_timeout_check) { 2391 uint32_t now_ms = 2392 qdf_system_ticks_to_msecs(qdf_system_ticks()); 2393 2394 if (now_ms >= soc->rx.defrag.next_flush_ms) 2395 dp_rx_defrag_waitlist_flush(soc); 2396 } 2397 2398 for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) { 2399 if (rx_bufs_reaped[mac_id]) { 2400 dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id); 2401 dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_id]; 2402 rx_desc_pool = &soc->rx_desc_buf[mac_id]; 2403 2404 dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng, 2405 rx_desc_pool, 2406 rx_bufs_reaped[mac_id], 2407 &dp_pdev->free_list_head, 2408 &dp_pdev->free_list_tail); 2409 rx_bufs_used += rx_bufs_reaped[mac_id]; 2410 } 2411 } 2412 2413 return rx_bufs_used; /* Assume no scale factor for now */ 2414 } 2415 2416 #ifdef DROP_RXDMA_DECRYPT_ERR 2417 /** 2418 * dp_handle_rxdma_decrypt_err() - Check if decrypt err frames can be handled 2419 * 2420 * Return: true if rxdma decrypt err frames are handled and false otheriwse 2421 */ 2422 static inline bool dp_handle_rxdma_decrypt_err(void) 2423 { 2424 return false; 2425 } 2426 #else 2427 static inline bool dp_handle_rxdma_decrypt_err(void) 2428 { 2429 return true; 2430 } 2431 #endif 2432 2433 static inline bool 2434 dp_rx_is_sg_formation_required(struct hal_wbm_err_desc_info *info) 2435 { 2436 /* 2437 * Currently Null Queue and Unencrypted error handlers has support for 2438 * SG. Other error handler do not deal with SG buffer. 2439 */ 2440 if (((info->wbm_err_src == HAL_RX_WBM_ERR_SRC_REO) && 2441 (info->reo_err_code == HAL_REO_ERR_QUEUE_DESC_ADDR_0)) || 2442 ((info->wbm_err_src == HAL_RX_WBM_ERR_SRC_RXDMA) && 2443 (info->rxdma_err_code == HAL_RXDMA_ERR_UNENCRYPTED))) 2444 return true; 2445 2446 return false; 2447 } 2448 2449 uint32_t 2450 dp_rx_wbm_err_process(struct dp_intr *int_ctx, struct dp_soc *soc, 2451 hal_ring_handle_t hal_ring_hdl, uint32_t quota) 2452 { 2453 hal_ring_desc_t ring_desc; 2454 hal_soc_handle_t hal_soc; 2455 struct dp_rx_desc *rx_desc; 2456 union dp_rx_desc_list_elem_t *head[MAX_PDEV_CNT] = { NULL }; 2457 union dp_rx_desc_list_elem_t *tail[MAX_PDEV_CNT] = { NULL }; 2458 uint32_t rx_bufs_used = 0; 2459 uint32_t rx_bufs_reaped[MAX_PDEV_CNT] = { 0 }; 2460 uint8_t buf_type; 2461 uint8_t mac_id; 2462 struct dp_pdev *dp_pdev; 2463 struct dp_srng *dp_rxdma_srng; 2464 struct rx_desc_pool *rx_desc_pool; 2465 uint8_t *rx_tlv_hdr; 2466 qdf_nbuf_t nbuf_head = NULL; 2467 qdf_nbuf_t nbuf_tail = NULL; 2468 qdf_nbuf_t nbuf, next; 2469 struct hal_wbm_err_desc_info wbm_err_info = { 0 }; 2470 uint8_t pool_id; 2471 uint8_t tid = 0; 2472 uint8_t msdu_continuation = 0; 2473 bool process_sg_buf = false; 2474 uint32_t wbm_err_src; 2475 2476 /* Debug -- Remove later */ 2477 qdf_assert(soc && hal_ring_hdl); 2478 2479 hal_soc = soc->hal_soc; 2480 2481 /* Debug -- Remove later */ 2482 qdf_assert(hal_soc); 2483 2484 if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, hal_ring_hdl))) { 2485 2486 /* TODO */ 2487 /* 2488 * Need API to convert from hal_ring pointer to 2489 * Ring Type / Ring Id combo 2490 */ 2491 dp_rx_err_err("%pK: HAL RING Access Failed -- %pK", 2492 soc, hal_ring_hdl); 2493 goto done; 2494 } 2495 2496 while (qdf_likely(quota)) { 2497 ring_desc = hal_srng_dst_get_next(hal_soc, hal_ring_hdl); 2498 if (qdf_unlikely(!ring_desc)) 2499 break; 2500 2501 /* XXX */ 2502 buf_type = HAL_RX_WBM_BUF_TYPE_GET(ring_desc); 2503 2504 /* 2505 * For WBM ring, expect only MSDU buffers 2506 */ 2507 qdf_assert_always(buf_type == HAL_RX_WBM_BUF_TYPE_REL_BUF); 2508 2509 wbm_err_src = hal_rx_wbm_err_src_get(hal_soc, ring_desc); 2510 qdf_assert((wbm_err_src == HAL_RX_WBM_ERR_SRC_RXDMA) || 2511 (wbm_err_src == HAL_RX_WBM_ERR_SRC_REO)); 2512 2513 if (soc->arch_ops.dp_wbm_get_rx_desc_from_hal_desc(soc, 2514 ring_desc, 2515 &rx_desc)) { 2516 dp_rx_err_err("get rx desc from hal_desc failed"); 2517 continue; 2518 } 2519 2520 qdf_assert_always(rx_desc); 2521 2522 if (!dp_rx_desc_check_magic(rx_desc)) { 2523 dp_rx_err_err("%pk: Invalid rx_desc %pk", 2524 soc, rx_desc); 2525 continue; 2526 } 2527 2528 /* 2529 * this is a unlikely scenario where the host is reaping 2530 * a descriptor which it already reaped just a while ago 2531 * but is yet to replenish it back to HW. 2532 * In this case host will dump the last 128 descriptors 2533 * including the software descriptor rx_desc and assert. 2534 */ 2535 if (qdf_unlikely(!rx_desc->in_use)) { 2536 DP_STATS_INC(soc, rx.err.hal_wbm_rel_dup, 1); 2537 dp_rx_dump_info_and_assert(soc, hal_ring_hdl, 2538 ring_desc, rx_desc); 2539 continue; 2540 } 2541 2542 hal_rx_wbm_err_info_get(ring_desc, &wbm_err_info, hal_soc); 2543 nbuf = rx_desc->nbuf; 2544 rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id]; 2545 dp_ipa_rx_buf_smmu_mapping_lock(soc); 2546 dp_ipa_handle_rx_buf_smmu_mapping(soc, nbuf, 2547 rx_desc_pool->buf_size, 2548 false); 2549 qdf_nbuf_unmap_nbytes_single(soc->osdev, nbuf, 2550 QDF_DMA_FROM_DEVICE, 2551 rx_desc_pool->buf_size); 2552 rx_desc->unmapped = 1; 2553 dp_ipa_rx_buf_smmu_mapping_unlock(soc); 2554 2555 if (qdf_unlikely(soc->wbm_release_desc_rx_sg_support && 2556 dp_rx_is_sg_formation_required(&wbm_err_info))) { 2557 /* SG is detected from continuation bit */ 2558 msdu_continuation = 2559 hal_rx_wbm_err_msdu_continuation_get(hal_soc, 2560 ring_desc); 2561 if (msdu_continuation && 2562 !(soc->wbm_sg_param.wbm_is_first_msdu_in_sg)) { 2563 /* Update length from first buffer in SG */ 2564 soc->wbm_sg_param.wbm_sg_desc_msdu_len = 2565 hal_rx_msdu_start_msdu_len_get( 2566 soc->hal_soc, 2567 qdf_nbuf_data(nbuf)); 2568 soc->wbm_sg_param.wbm_is_first_msdu_in_sg = true; 2569 } 2570 2571 if (msdu_continuation) { 2572 /* MSDU continued packets */ 2573 qdf_nbuf_set_rx_chfrag_cont(nbuf, 1); 2574 QDF_NBUF_CB_RX_PKT_LEN(nbuf) = 2575 soc->wbm_sg_param.wbm_sg_desc_msdu_len; 2576 } else { 2577 /* This is the terminal packet in SG */ 2578 qdf_nbuf_set_rx_chfrag_start(nbuf, 1); 2579 qdf_nbuf_set_rx_chfrag_end(nbuf, 1); 2580 QDF_NBUF_CB_RX_PKT_LEN(nbuf) = 2581 soc->wbm_sg_param.wbm_sg_desc_msdu_len; 2582 process_sg_buf = true; 2583 } 2584 } 2585 2586 /* 2587 * save the wbm desc info in nbuf TLV. We will need this 2588 * info when we do the actual nbuf processing 2589 */ 2590 wbm_err_info.pool_id = rx_desc->pool_id; 2591 hal_rx_priv_info_set_in_tlv(soc->hal_soc, 2592 qdf_nbuf_data(nbuf), 2593 (uint8_t *)&wbm_err_info, 2594 sizeof(wbm_err_info)); 2595 2596 rx_bufs_reaped[rx_desc->pool_id]++; 2597 2598 if (qdf_nbuf_is_rx_chfrag_cont(nbuf) || process_sg_buf) { 2599 DP_RX_LIST_APPEND(soc->wbm_sg_param.wbm_sg_nbuf_head, 2600 soc->wbm_sg_param.wbm_sg_nbuf_tail, 2601 nbuf); 2602 if (process_sg_buf) { 2603 if (!dp_rx_buffer_pool_refill( 2604 soc, 2605 soc->wbm_sg_param.wbm_sg_nbuf_head, 2606 rx_desc->pool_id)) 2607 DP_RX_MERGE_TWO_LIST( 2608 nbuf_head, nbuf_tail, 2609 soc->wbm_sg_param.wbm_sg_nbuf_head, 2610 soc->wbm_sg_param.wbm_sg_nbuf_tail); 2611 dp_rx_wbm_sg_list_reset(soc); 2612 process_sg_buf = false; 2613 } 2614 } else if (!dp_rx_buffer_pool_refill(soc, nbuf, 2615 rx_desc->pool_id)) { 2616 DP_RX_LIST_APPEND(nbuf_head, nbuf_tail, nbuf); 2617 } 2618 2619 dp_rx_add_to_free_desc_list(&head[rx_desc->pool_id], 2620 &tail[rx_desc->pool_id], 2621 rx_desc); 2622 2623 /* 2624 * if continuation bit is set then we have MSDU spread 2625 * across multiple buffers, let us not decrement quota 2626 * till we reap all buffers of that MSDU. 2627 */ 2628 if (qdf_likely(!msdu_continuation)) 2629 quota -= 1; 2630 } 2631 done: 2632 dp_srng_access_end(int_ctx, soc, hal_ring_hdl); 2633 2634 for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) { 2635 if (rx_bufs_reaped[mac_id]) { 2636 dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_id]; 2637 rx_desc_pool = &soc->rx_desc_buf[mac_id]; 2638 2639 dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng, 2640 rx_desc_pool, rx_bufs_reaped[mac_id], 2641 &head[mac_id], &tail[mac_id]); 2642 rx_bufs_used += rx_bufs_reaped[mac_id]; 2643 } 2644 } 2645 2646 nbuf = nbuf_head; 2647 while (nbuf) { 2648 struct dp_peer *peer; 2649 uint16_t peer_id; 2650 uint8_t err_code; 2651 uint8_t *tlv_hdr; 2652 rx_tlv_hdr = qdf_nbuf_data(nbuf); 2653 2654 /* 2655 * retrieve the wbm desc info from nbuf TLV, so we can 2656 * handle error cases appropriately 2657 */ 2658 hal_rx_priv_info_get_from_tlv(soc->hal_soc, rx_tlv_hdr, 2659 (uint8_t *)&wbm_err_info, 2660 sizeof(wbm_err_info)); 2661 2662 peer_id = hal_rx_mpdu_start_sw_peer_id_get(soc->hal_soc, 2663 rx_tlv_hdr); 2664 peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_RX_ERR); 2665 2666 if (!peer) 2667 dp_info_rl("peer is null peer_id%u err_src%u err_rsn%u", 2668 peer_id, wbm_err_info.wbm_err_src, 2669 wbm_err_info.reo_psh_rsn); 2670 2671 /* Set queue_mapping in nbuf to 0 */ 2672 dp_set_rx_queue(nbuf, 0); 2673 2674 next = nbuf->next; 2675 2676 /* 2677 * Form the SG for msdu continued buffers 2678 * QCN9000 has this support 2679 */ 2680 if (qdf_nbuf_is_rx_chfrag_cont(nbuf)) { 2681 nbuf = dp_rx_sg_create(soc, nbuf); 2682 next = nbuf->next; 2683 /* 2684 * SG error handling is not done correctly, 2685 * drop SG frames for now. 2686 */ 2687 qdf_nbuf_free(nbuf); 2688 dp_info_rl("scattered msdu dropped"); 2689 nbuf = next; 2690 if (peer) 2691 dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR); 2692 continue; 2693 } 2694 2695 if (wbm_err_info.wbm_err_src == HAL_RX_WBM_ERR_SRC_REO) { 2696 if (wbm_err_info.reo_psh_rsn 2697 == HAL_RX_WBM_REO_PSH_RSN_ERROR) { 2698 2699 DP_STATS_INC(soc, 2700 rx.err.reo_error 2701 [wbm_err_info.reo_err_code], 1); 2702 /* increment @pdev level */ 2703 pool_id = wbm_err_info.pool_id; 2704 dp_pdev = dp_get_pdev_for_lmac_id(soc, pool_id); 2705 if (dp_pdev) 2706 DP_STATS_INC(dp_pdev, err.reo_error, 2707 1); 2708 2709 switch (wbm_err_info.reo_err_code) { 2710 /* 2711 * Handling for packets which have NULL REO 2712 * queue descriptor 2713 */ 2714 case HAL_REO_ERR_QUEUE_DESC_ADDR_0: 2715 pool_id = wbm_err_info.pool_id; 2716 dp_rx_null_q_desc_handle(soc, nbuf, 2717 rx_tlv_hdr, 2718 pool_id, peer); 2719 break; 2720 /* TODO */ 2721 /* Add per error code accounting */ 2722 case HAL_REO_ERR_REGULAR_FRAME_2K_JUMP: 2723 pool_id = wbm_err_info.pool_id; 2724 2725 if (hal_rx_msdu_end_first_msdu_get(soc->hal_soc, 2726 rx_tlv_hdr)) { 2727 peer_id = 2728 hal_rx_mpdu_start_sw_peer_id_get(soc->hal_soc, 2729 rx_tlv_hdr); 2730 tid = 2731 hal_rx_mpdu_start_tid_get(hal_soc, rx_tlv_hdr); 2732 } 2733 QDF_NBUF_CB_RX_PKT_LEN(nbuf) = 2734 hal_rx_msdu_start_msdu_len_get( 2735 soc->hal_soc, rx_tlv_hdr); 2736 nbuf->next = NULL; 2737 dp_2k_jump_handle(soc, nbuf, 2738 rx_tlv_hdr, 2739 peer_id, tid); 2740 break; 2741 case HAL_REO_ERR_REGULAR_FRAME_OOR: 2742 if (peer) 2743 DP_STATS_INC(peer, 2744 rx.err.oor_err, 1); 2745 if (hal_rx_msdu_end_first_msdu_get(soc->hal_soc, 2746 rx_tlv_hdr)) { 2747 peer_id = 2748 hal_rx_mpdu_start_sw_peer_id_get(soc->hal_soc, 2749 rx_tlv_hdr); 2750 tid = 2751 hal_rx_mpdu_start_tid_get(hal_soc, rx_tlv_hdr); 2752 } 2753 QDF_NBUF_CB_RX_PKT_LEN(nbuf) = 2754 hal_rx_msdu_start_msdu_len_get( 2755 soc->hal_soc, rx_tlv_hdr); 2756 nbuf->next = NULL; 2757 dp_rx_oor_handle(soc, nbuf, 2758 peer_id, 2759 rx_tlv_hdr); 2760 break; 2761 case HAL_REO_ERR_BAR_FRAME_2K_JUMP: 2762 case HAL_REO_ERR_BAR_FRAME_OOR: 2763 if (peer) 2764 dp_rx_err_handle_bar(soc, 2765 peer, 2766 nbuf); 2767 qdf_nbuf_free(nbuf); 2768 break; 2769 2770 case HAL_REO_ERR_PN_CHECK_FAILED: 2771 case HAL_REO_ERR_PN_ERROR_HANDLING_FLAG_SET: 2772 if (peer) 2773 DP_STATS_INC(peer, 2774 rx.err.pn_err, 1); 2775 qdf_nbuf_free(nbuf); 2776 break; 2777 2778 default: 2779 dp_info_rl("Got pkt with REO ERROR: %d", 2780 wbm_err_info.reo_err_code); 2781 qdf_nbuf_free(nbuf); 2782 } 2783 } else if (wbm_err_info.reo_psh_rsn 2784 == HAL_RX_WBM_REO_PSH_RSN_ROUTE) { 2785 dp_rx_err_route_hdl(soc, nbuf, peer, 2786 rx_tlv_hdr, 2787 HAL_RX_WBM_ERR_SRC_REO); 2788 } else { 2789 /* should not enter here */ 2790 dp_rx_err_alert("invalid reo push reason %u", 2791 wbm_err_info.reo_psh_rsn); 2792 qdf_nbuf_free(nbuf); 2793 qdf_assert_always(0); 2794 } 2795 } else if (wbm_err_info.wbm_err_src == 2796 HAL_RX_WBM_ERR_SRC_RXDMA) { 2797 if (wbm_err_info.rxdma_psh_rsn 2798 == HAL_RX_WBM_RXDMA_PSH_RSN_ERROR) { 2799 DP_STATS_INC(soc, 2800 rx.err.rxdma_error 2801 [wbm_err_info.rxdma_err_code], 1); 2802 /* increment @pdev level */ 2803 pool_id = wbm_err_info.pool_id; 2804 dp_pdev = dp_get_pdev_for_lmac_id(soc, pool_id); 2805 if (dp_pdev) 2806 DP_STATS_INC(dp_pdev, 2807 err.rxdma_error, 1); 2808 2809 switch (wbm_err_info.rxdma_err_code) { 2810 case HAL_RXDMA_ERR_UNENCRYPTED: 2811 2812 case HAL_RXDMA_ERR_WIFI_PARSE: 2813 pool_id = wbm_err_info.pool_id; 2814 dp_rx_process_rxdma_err(soc, nbuf, 2815 rx_tlv_hdr, 2816 peer, 2817 wbm_err_info. 2818 rxdma_err_code, 2819 pool_id); 2820 break; 2821 2822 case HAL_RXDMA_ERR_TKIP_MIC: 2823 dp_rx_process_mic_error(soc, nbuf, 2824 rx_tlv_hdr, 2825 peer); 2826 if (peer) 2827 DP_STATS_INC(peer, rx.err.mic_err, 1); 2828 break; 2829 2830 case HAL_RXDMA_ERR_DECRYPT: 2831 2832 if (peer) { 2833 DP_STATS_INC(peer, rx.err. 2834 decrypt_err, 1); 2835 qdf_nbuf_free(nbuf); 2836 break; 2837 } 2838 2839 if (!dp_handle_rxdma_decrypt_err()) { 2840 qdf_nbuf_free(nbuf); 2841 break; 2842 } 2843 2844 pool_id = wbm_err_info.pool_id; 2845 err_code = wbm_err_info.rxdma_err_code; 2846 tlv_hdr = rx_tlv_hdr; 2847 dp_rx_process_rxdma_err(soc, nbuf, 2848 tlv_hdr, NULL, 2849 err_code, 2850 pool_id); 2851 break; 2852 case HAL_RXDMA_MULTICAST_ECHO: 2853 DP_STATS_INC_PKT(peer, rx.mec_drop, 1, 2854 qdf_nbuf_len(nbuf)); 2855 qdf_nbuf_free(nbuf); 2856 break; 2857 default: 2858 qdf_nbuf_free(nbuf); 2859 dp_err_rl("RXDMA error %d", 2860 wbm_err_info.rxdma_err_code); 2861 } 2862 } else if (wbm_err_info.rxdma_psh_rsn 2863 == HAL_RX_WBM_RXDMA_PSH_RSN_ROUTE) { 2864 dp_rx_err_route_hdl(soc, nbuf, peer, 2865 rx_tlv_hdr, 2866 HAL_RX_WBM_ERR_SRC_RXDMA); 2867 } else if (wbm_err_info.rxdma_psh_rsn 2868 == HAL_RX_WBM_RXDMA_PSH_RSN_FLUSH) { 2869 dp_rx_err_err("rxdma push reason %u", 2870 wbm_err_info.rxdma_psh_rsn); 2871 DP_STATS_INC(soc, rx.err.rx_flush_count, 1); 2872 qdf_nbuf_free(nbuf); 2873 } else { 2874 /* should not enter here */ 2875 dp_rx_err_alert("invalid rxdma push reason %u", 2876 wbm_err_info.rxdma_psh_rsn); 2877 qdf_nbuf_free(nbuf); 2878 qdf_assert_always(0); 2879 } 2880 } else { 2881 /* Should not come here */ 2882 qdf_assert(0); 2883 } 2884 2885 if (peer) 2886 dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR); 2887 2888 nbuf = next; 2889 } 2890 return rx_bufs_used; /* Assume no scale factor for now */ 2891 } 2892 2893 #endif /* QCA_HOST_MODE_WIFI_DISABLED */ 2894 2895 /** 2896 * dup_desc_dbg() - dump and assert if duplicate rx desc found 2897 * 2898 * @soc: core DP main context 2899 * @rxdma_dst_ring_desc: void pointer to monitor link descriptor buf addr info 2900 * @rx_desc: void pointer to rx descriptor 2901 * 2902 * Return: void 2903 */ 2904 static void dup_desc_dbg(struct dp_soc *soc, 2905 hal_rxdma_desc_t rxdma_dst_ring_desc, 2906 void *rx_desc) 2907 { 2908 DP_STATS_INC(soc, rx.err.hal_rxdma_err_dup, 1); 2909 dp_rx_dump_info_and_assert( 2910 soc, 2911 soc->rx_rel_ring.hal_srng, 2912 hal_rxdma_desc_to_hal_ring_desc(rxdma_dst_ring_desc), 2913 rx_desc); 2914 } 2915 2916 /** 2917 * dp_rx_err_mpdu_pop() - extract the MSDU's from link descs 2918 * 2919 * @soc: core DP main context 2920 * @mac_id: mac id which is one of 3 mac_ids 2921 * @rxdma_dst_ring_desc: void pointer to monitor link descriptor buf addr info 2922 * @head: head of descs list to be freed 2923 * @tail: tail of decs list to be freed 2924 2925 * Return: number of msdu in MPDU to be popped 2926 */ 2927 static inline uint32_t 2928 dp_rx_err_mpdu_pop(struct dp_soc *soc, uint32_t mac_id, 2929 hal_rxdma_desc_t rxdma_dst_ring_desc, 2930 union dp_rx_desc_list_elem_t **head, 2931 union dp_rx_desc_list_elem_t **tail) 2932 { 2933 void *rx_msdu_link_desc; 2934 qdf_nbuf_t msdu; 2935 qdf_nbuf_t last; 2936 struct hal_rx_msdu_list msdu_list; 2937 uint16_t num_msdus; 2938 struct hal_buf_info buf_info; 2939 uint32_t rx_bufs_used = 0; 2940 uint32_t msdu_cnt; 2941 uint32_t i; 2942 uint8_t push_reason; 2943 uint8_t rxdma_error_code = 0; 2944 uint8_t bm_action = HAL_BM_ACTION_PUT_IN_IDLE_LIST; 2945 struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id); 2946 uint32_t rx_link_buf_info[HAL_RX_BUFFINFO_NUM_DWORDS]; 2947 hal_rxdma_desc_t ring_desc; 2948 struct rx_desc_pool *rx_desc_pool; 2949 2950 if (!pdev) { 2951 dp_rx_err_debug("%pK: pdev is null for mac_id = %d", 2952 soc, mac_id); 2953 return rx_bufs_used; 2954 } 2955 2956 msdu = 0; 2957 2958 last = NULL; 2959 2960 hal_rx_reo_ent_buf_paddr_get(soc->hal_soc, rxdma_dst_ring_desc, 2961 &buf_info, &msdu_cnt); 2962 2963 push_reason = 2964 hal_rx_reo_ent_rxdma_push_reason_get(rxdma_dst_ring_desc); 2965 if (push_reason == HAL_RX_WBM_RXDMA_PSH_RSN_ERROR) { 2966 rxdma_error_code = 2967 hal_rx_reo_ent_rxdma_error_code_get(rxdma_dst_ring_desc); 2968 } 2969 2970 do { 2971 rx_msdu_link_desc = 2972 dp_rx_cookie_2_link_desc_va(soc, &buf_info); 2973 2974 qdf_assert_always(rx_msdu_link_desc); 2975 2976 hal_rx_msdu_list_get(soc->hal_soc, rx_msdu_link_desc, 2977 &msdu_list, &num_msdus); 2978 2979 if (msdu_list.sw_cookie[0] != HAL_RX_COOKIE_SPECIAL) { 2980 /* if the msdus belongs to NSS offloaded radio && 2981 * the rbm is not SW1_BM then return the msdu_link 2982 * descriptor without freeing the msdus (nbufs). let 2983 * these buffers be given to NSS completion ring for 2984 * NSS to free them. 2985 * else iterate through the msdu link desc list and 2986 * free each msdu in the list. 2987 */ 2988 if (msdu_list.rbm[0] != 2989 HAL_RX_BUF_RBM_SW3_BM(soc->wbm_sw0_bm_id) && 2990 wlan_cfg_get_dp_pdev_nss_enabled( 2991 pdev->wlan_cfg_ctx)) 2992 bm_action = HAL_BM_ACTION_RELEASE_MSDU_LIST; 2993 else { 2994 for (i = 0; i < num_msdus; i++) { 2995 struct dp_rx_desc *rx_desc = 2996 soc->arch_ops. 2997 dp_rx_desc_cookie_2_va( 2998 soc, 2999 msdu_list.sw_cookie[i]); 3000 qdf_assert_always(rx_desc); 3001 msdu = rx_desc->nbuf; 3002 /* 3003 * this is a unlikely scenario 3004 * where the host is reaping 3005 * a descriptor which 3006 * it already reaped just a while ago 3007 * but is yet to replenish 3008 * it back to HW. 3009 * In this case host will dump 3010 * the last 128 descriptors 3011 * including the software descriptor 3012 * rx_desc and assert. 3013 */ 3014 ring_desc = rxdma_dst_ring_desc; 3015 if (qdf_unlikely(!rx_desc->in_use)) { 3016 dup_desc_dbg(soc, 3017 ring_desc, 3018 rx_desc); 3019 continue; 3020 } 3021 3022 rx_desc_pool = &soc-> 3023 rx_desc_buf[rx_desc->pool_id]; 3024 dp_ipa_rx_buf_smmu_mapping_lock(soc); 3025 dp_ipa_handle_rx_buf_smmu_mapping( 3026 soc, msdu, 3027 rx_desc_pool->buf_size, 3028 false); 3029 qdf_nbuf_unmap_nbytes_single( 3030 soc->osdev, msdu, 3031 QDF_DMA_FROM_DEVICE, 3032 rx_desc_pool->buf_size); 3033 rx_desc->unmapped = 1; 3034 dp_ipa_rx_buf_smmu_mapping_unlock(soc); 3035 3036 dp_rx_err_debug("%pK: msdu_nbuf=%pK ", 3037 soc, msdu); 3038 3039 dp_rx_buffer_pool_nbuf_free(soc, msdu, 3040 rx_desc->pool_id); 3041 rx_bufs_used++; 3042 dp_rx_add_to_free_desc_list(head, 3043 tail, rx_desc); 3044 } 3045 } 3046 } else { 3047 rxdma_error_code = HAL_RXDMA_ERR_WAR; 3048 } 3049 3050 /* 3051 * Store the current link buffer into to the local structure 3052 * to be used for release purpose. 3053 */ 3054 hal_rxdma_buff_addr_info_set(soc->hal_soc, rx_link_buf_info, 3055 buf_info.paddr, buf_info.sw_cookie, 3056 buf_info.rbm); 3057 3058 hal_rx_mon_next_link_desc_get(soc->hal_soc, rx_msdu_link_desc, 3059 &buf_info); 3060 dp_rx_link_desc_return_by_addr(soc, 3061 (hal_buff_addrinfo_t) 3062 rx_link_buf_info, 3063 bm_action); 3064 } while (buf_info.paddr); 3065 3066 DP_STATS_INC(soc, rx.err.rxdma_error[rxdma_error_code], 1); 3067 if (pdev) 3068 DP_STATS_INC(pdev, err.rxdma_error, 1); 3069 3070 if (rxdma_error_code == HAL_RXDMA_ERR_DECRYPT) { 3071 dp_rx_err_err("%pK: Packet received with Decrypt error", soc); 3072 } 3073 3074 return rx_bufs_used; 3075 } 3076 3077 uint32_t 3078 dp_rxdma_err_process(struct dp_intr *int_ctx, struct dp_soc *soc, 3079 uint32_t mac_id, uint32_t quota) 3080 { 3081 struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id); 3082 hal_rxdma_desc_t rxdma_dst_ring_desc; 3083 hal_soc_handle_t hal_soc; 3084 void *err_dst_srng; 3085 union dp_rx_desc_list_elem_t *head = NULL; 3086 union dp_rx_desc_list_elem_t *tail = NULL; 3087 struct dp_srng *dp_rxdma_srng; 3088 struct rx_desc_pool *rx_desc_pool; 3089 uint32_t work_done = 0; 3090 uint32_t rx_bufs_used = 0; 3091 3092 if (!pdev) 3093 return 0; 3094 3095 err_dst_srng = soc->rxdma_err_dst_ring[mac_id].hal_srng; 3096 3097 if (!err_dst_srng) { 3098 dp_rx_err_err("%pK: HAL Monitor Destination Ring Init Failed -- %pK", 3099 soc, err_dst_srng); 3100 return 0; 3101 } 3102 3103 hal_soc = soc->hal_soc; 3104 3105 qdf_assert(hal_soc); 3106 3107 if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, err_dst_srng))) { 3108 dp_rx_err_err("%pK: HAL Monitor Destination Ring Init Failed -- %pK", 3109 soc, err_dst_srng); 3110 return 0; 3111 } 3112 3113 while (qdf_likely(quota-- && (rxdma_dst_ring_desc = 3114 hal_srng_dst_get_next(hal_soc, err_dst_srng)))) { 3115 3116 rx_bufs_used += dp_rx_err_mpdu_pop(soc, mac_id, 3117 rxdma_dst_ring_desc, 3118 &head, &tail); 3119 } 3120 3121 dp_srng_access_end(int_ctx, soc, err_dst_srng); 3122 3123 if (rx_bufs_used) { 3124 if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx)) { 3125 dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_id]; 3126 rx_desc_pool = &soc->rx_desc_buf[mac_id]; 3127 } else { 3128 dp_rxdma_srng = &soc->rx_refill_buf_ring[pdev->lmac_id]; 3129 rx_desc_pool = &soc->rx_desc_buf[pdev->lmac_id]; 3130 } 3131 3132 dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng, 3133 rx_desc_pool, rx_bufs_used, &head, &tail); 3134 3135 work_done += rx_bufs_used; 3136 } 3137 3138 return work_done; 3139 } 3140 3141 #ifndef QCA_HOST_MODE_WIFI_DISABLED 3142 3143 static inline uint32_t 3144 dp_wbm_int_err_mpdu_pop(struct dp_soc *soc, uint32_t mac_id, 3145 hal_rxdma_desc_t rxdma_dst_ring_desc, 3146 union dp_rx_desc_list_elem_t **head, 3147 union dp_rx_desc_list_elem_t **tail) 3148 { 3149 void *rx_msdu_link_desc; 3150 qdf_nbuf_t msdu; 3151 qdf_nbuf_t last; 3152 struct hal_rx_msdu_list msdu_list; 3153 uint16_t num_msdus; 3154 struct hal_buf_info buf_info; 3155 uint32_t rx_bufs_used = 0, msdu_cnt, i; 3156 uint32_t rx_link_buf_info[HAL_RX_BUFFINFO_NUM_DWORDS]; 3157 struct rx_desc_pool *rx_desc_pool; 3158 3159 msdu = 0; 3160 3161 last = NULL; 3162 3163 hal_rx_reo_ent_buf_paddr_get(soc->hal_soc, rxdma_dst_ring_desc, 3164 &buf_info, &msdu_cnt); 3165 3166 do { 3167 rx_msdu_link_desc = 3168 dp_rx_cookie_2_link_desc_va(soc, &buf_info); 3169 3170 if (!rx_msdu_link_desc) { 3171 DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_REO_NULL_LINK_DESC], 1); 3172 break; 3173 } 3174 3175 hal_rx_msdu_list_get(soc->hal_soc, rx_msdu_link_desc, 3176 &msdu_list, &num_msdus); 3177 3178 if (msdu_list.sw_cookie[0] != HAL_RX_COOKIE_SPECIAL) { 3179 for (i = 0; i < num_msdus; i++) { 3180 struct dp_rx_desc *rx_desc = 3181 soc->arch_ops.dp_rx_desc_cookie_2_va( 3182 soc, 3183 msdu_list.sw_cookie[i]); 3184 qdf_assert_always(rx_desc); 3185 rx_desc_pool = 3186 &soc->rx_desc_buf[rx_desc->pool_id]; 3187 msdu = rx_desc->nbuf; 3188 3189 dp_ipa_rx_buf_smmu_mapping_lock(soc); 3190 dp_ipa_handle_rx_buf_smmu_mapping( 3191 soc, msdu, 3192 rx_desc_pool->buf_size, 3193 false); 3194 3195 qdf_nbuf_unmap_nbytes_single( 3196 soc->osdev, 3197 msdu, 3198 QDF_DMA_FROM_DEVICE, 3199 rx_desc_pool->buf_size); 3200 rx_desc->unmapped = 1; 3201 dp_ipa_rx_buf_smmu_mapping_unlock(soc); 3202 3203 dp_rx_buffer_pool_nbuf_free(soc, msdu, 3204 rx_desc->pool_id); 3205 rx_bufs_used++; 3206 dp_rx_add_to_free_desc_list(head, 3207 tail, rx_desc); 3208 } 3209 } 3210 3211 /* 3212 * Store the current link buffer into to the local structure 3213 * to be used for release purpose. 3214 */ 3215 hal_rxdma_buff_addr_info_set(soc->hal_soc, rx_link_buf_info, 3216 buf_info.paddr, buf_info.sw_cookie, 3217 buf_info.rbm); 3218 3219 hal_rx_mon_next_link_desc_get(soc->hal_soc, rx_msdu_link_desc, 3220 &buf_info); 3221 dp_rx_link_desc_return_by_addr(soc, (hal_buff_addrinfo_t) 3222 rx_link_buf_info, 3223 HAL_BM_ACTION_PUT_IN_IDLE_LIST); 3224 } while (buf_info.paddr); 3225 3226 return rx_bufs_used; 3227 } 3228 3229 /* 3230 * 3231 * dp_handle_wbm_internal_error() - handles wbm_internal_error case 3232 * 3233 * @soc: core DP main context 3234 * @hal_desc: hal descriptor 3235 * @buf_type: indicates if the buffer is of type link disc or msdu 3236 * Return: None 3237 * 3238 * wbm_internal_error is seen in following scenarios : 3239 * 3240 * 1. Null pointers detected in WBM_RELEASE_RING descriptors 3241 * 2. Null pointers detected during delinking process 3242 * 3243 * Some null pointer cases: 3244 * 3245 * a. MSDU buffer pointer is NULL 3246 * b. Next_MSDU_Link_Desc pointer is NULL, with no last msdu flag 3247 * c. MSDU buffer pointer is NULL or Next_Link_Desc pointer is NULL 3248 */ 3249 void 3250 dp_handle_wbm_internal_error(struct dp_soc *soc, void *hal_desc, 3251 uint32_t buf_type) 3252 { 3253 struct hal_buf_info buf_info = {0}; 3254 struct dp_rx_desc *rx_desc = NULL; 3255 struct rx_desc_pool *rx_desc_pool; 3256 uint32_t rx_bufs_reaped = 0; 3257 union dp_rx_desc_list_elem_t *head = NULL; 3258 union dp_rx_desc_list_elem_t *tail = NULL; 3259 uint8_t pool_id; 3260 3261 hal_rx_reo_buf_paddr_get(soc->hal_soc, hal_desc, &buf_info); 3262 3263 if (!buf_info.paddr) { 3264 DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_REO_NULL_BUFFER], 1); 3265 return; 3266 } 3267 3268 /* buffer_addr_info is the first element of ring_desc */ 3269 hal_rx_buf_cookie_rbm_get(soc->hal_soc, (uint32_t *)hal_desc, 3270 &buf_info); 3271 pool_id = DP_RX_DESC_COOKIE_POOL_ID_GET(buf_info.sw_cookie); 3272 3273 if (buf_type == HAL_WBM_RELEASE_RING_2_BUFFER_TYPE) { 3274 DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_REO_NULL_MSDU_BUFF], 1); 3275 rx_desc = soc->arch_ops.dp_rx_desc_cookie_2_va( 3276 soc, 3277 buf_info.sw_cookie); 3278 3279 if (rx_desc && rx_desc->nbuf) { 3280 rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id]; 3281 dp_ipa_rx_buf_smmu_mapping_lock(soc); 3282 dp_ipa_handle_rx_buf_smmu_mapping( 3283 soc, rx_desc->nbuf, 3284 rx_desc_pool->buf_size, 3285 false); 3286 qdf_nbuf_unmap_nbytes_single(soc->osdev, rx_desc->nbuf, 3287 QDF_DMA_FROM_DEVICE, 3288 rx_desc_pool->buf_size); 3289 rx_desc->unmapped = 1; 3290 dp_ipa_rx_buf_smmu_mapping_unlock(soc); 3291 3292 dp_rx_buffer_pool_nbuf_free(soc, rx_desc->nbuf, 3293 rx_desc->pool_id); 3294 dp_rx_add_to_free_desc_list(&head, 3295 &tail, 3296 rx_desc); 3297 3298 rx_bufs_reaped++; 3299 } 3300 } else if (buf_type == HAL_WBM_RELEASE_RING_2_DESC_TYPE) { 3301 rx_bufs_reaped = dp_wbm_int_err_mpdu_pop(soc, pool_id, 3302 hal_desc, 3303 &head, &tail); 3304 } 3305 3306 if (rx_bufs_reaped) { 3307 struct rx_desc_pool *rx_desc_pool; 3308 struct dp_srng *dp_rxdma_srng; 3309 3310 DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_REO_BUFF_REAPED], 1); 3311 dp_rxdma_srng = &soc->rx_refill_buf_ring[pool_id]; 3312 rx_desc_pool = &soc->rx_desc_buf[pool_id]; 3313 3314 dp_rx_buffers_replenish(soc, pool_id, dp_rxdma_srng, 3315 rx_desc_pool, 3316 rx_bufs_reaped, 3317 &head, &tail); 3318 } 3319 } 3320 3321 #endif /* QCA_HOST_MODE_WIFI_DISABLED */ 3322