1 /* 2 * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for 5 * any purpose with or without fee is hereby granted, provided that the 6 * above copyright notice and this permission notice appear in all 7 * copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 16 * PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 #include "hal_hw_headers.h" 20 #include "dp_types.h" 21 #include "dp_rx.h" 22 #include "dp_peer.h" 23 #include "dp_internal.h" 24 #include "hal_api.h" 25 #include "qdf_trace.h" 26 #include "qdf_nbuf.h" 27 #include "dp_rx_defrag.h" 28 #ifdef FEATURE_WDS 29 #include "dp_txrx_wds.h" 30 #endif 31 #include <enet.h> /* LLC_SNAP_HDR_LEN */ 32 #include "qdf_net_types.h" 33 34 /* Max buffer in invalid peer SG list*/ 35 #define DP_MAX_INVALID_BUFFERS 10 36 37 /** 38 * dp_rx_mcast_echo_check() - check if the mcast pkt is a loop 39 * back on same vap or a different vap. 40 * 41 * @soc: core DP main context 42 * @peer: dp peer handler 43 * @rx_tlv_hdr: start of the rx TLV header 44 * @nbuf: pkt buffer 45 * 46 * Return: bool (true if it is a looped back pkt else false) 47 * 48 */ 49 static inline bool dp_rx_mcast_echo_check(struct dp_soc *soc, 50 struct dp_peer *peer, 51 uint8_t *rx_tlv_hdr, 52 qdf_nbuf_t nbuf) 53 { 54 struct dp_vdev *vdev = peer->vdev; 55 struct dp_ast_entry *ase = NULL; 56 uint16_t sa_idx = 0; 57 uint8_t *data; 58 59 /* 60 * Multicast Echo Check is required only if vdev is STA and 61 * received pkt is a multicast/broadcast pkt. otherwise 62 * skip the MEC check. 63 */ 64 if (vdev->opmode != wlan_op_mode_sta) 65 return false; 66 67 if (!hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc, rx_tlv_hdr)) 68 return false; 69 70 data = qdf_nbuf_data(nbuf); 71 /* 72 * if the received pkts src mac addr matches with vdev 73 * mac address then drop the pkt as it is looped back 74 */ 75 if (!(qdf_mem_cmp(&data[QDF_MAC_ADDR_SIZE], 76 vdev->mac_addr.raw, 77 QDF_MAC_ADDR_SIZE))) 78 return true; 79 80 /* 81 * In case of qwrap isolation mode, donot drop loopback packets. 82 * In isolation mode, all packets from the wired stations need to go 83 * to rootap and loop back to reach the wireless stations and 84 * vice-versa. 85 */ 86 if (qdf_unlikely(vdev->isolation_vdev)) 87 return false; 88 89 /* if the received pkts src mac addr matches with the 90 * wired PCs MAC addr which is behind the STA or with 91 * wireless STAs MAC addr which are behind the Repeater, 92 * then drop the pkt as it is looped back 93 */ 94 qdf_spin_lock_bh(&soc->ast_lock); 95 if (hal_rx_msdu_end_sa_is_valid_get(soc->hal_soc, rx_tlv_hdr)) { 96 sa_idx = hal_rx_msdu_end_sa_idx_get(soc->hal_soc, rx_tlv_hdr); 97 98 if ((sa_idx < 0) || 99 (sa_idx >= wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx))) { 100 qdf_spin_unlock_bh(&soc->ast_lock); 101 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 102 "invalid sa_idx: %d", sa_idx); 103 qdf_assert_always(0); 104 } 105 106 ase = soc->ast_table[sa_idx]; 107 if (!ase) { 108 /* We do not get a peer map event for STA and without 109 * this event we don't know what is STA's sa_idx. 110 * For this reason the AST is still not associated to 111 * any index postion in ast_table. 112 * In these kind of scenarios where sa is valid but 113 * ast is not in ast_table, we use the below API to get 114 * AST entry for STA's own mac_address. 115 */ 116 ase = dp_peer_ast_list_find(soc, peer, 117 &data[QDF_MAC_ADDR_SIZE]); 118 if (ase) { 119 ase->ast_idx = sa_idx; 120 soc->ast_table[sa_idx] = ase; 121 ase->is_mapped = TRUE; 122 } 123 } 124 } else { 125 ase = dp_peer_ast_hash_find_by_pdevid(soc, 126 &data[QDF_MAC_ADDR_SIZE], 127 vdev->pdev->pdev_id); 128 } 129 130 if (ase) { 131 132 if (ase->pdev_id != vdev->pdev->pdev_id) { 133 qdf_spin_unlock_bh(&soc->ast_lock); 134 QDF_TRACE(QDF_MODULE_ID_DP, 135 QDF_TRACE_LEVEL_INFO, 136 "Detected DBDC Root AP %pM, %d %d", 137 &data[QDF_MAC_ADDR_SIZE], vdev->pdev->pdev_id, 138 ase->pdev_id); 139 return false; 140 } 141 142 if ((ase->type == CDP_TXRX_AST_TYPE_MEC) || 143 (ase->peer != peer)) { 144 qdf_spin_unlock_bh(&soc->ast_lock); 145 QDF_TRACE(QDF_MODULE_ID_DP, 146 QDF_TRACE_LEVEL_INFO, 147 "received pkt with same src mac %pM", 148 &data[QDF_MAC_ADDR_SIZE]); 149 150 return true; 151 } 152 } 153 qdf_spin_unlock_bh(&soc->ast_lock); 154 return false; 155 } 156 157 /** 158 * dp_rx_link_desc_return_by_addr - Return a MPDU link descriptor to 159 * (WBM) by address 160 * 161 * @soc: core DP main context 162 * @link_desc_addr: link descriptor addr 163 * 164 * Return: QDF_STATUS 165 */ 166 QDF_STATUS 167 dp_rx_link_desc_return_by_addr(struct dp_soc *soc, 168 hal_buff_addrinfo_t link_desc_addr, 169 uint8_t bm_action) 170 { 171 struct dp_srng *wbm_desc_rel_ring = &soc->wbm_desc_rel_ring; 172 hal_ring_handle_t wbm_rel_srng = wbm_desc_rel_ring->hal_srng; 173 hal_soc_handle_t hal_soc = soc->hal_soc; 174 QDF_STATUS status = QDF_STATUS_E_FAILURE; 175 void *src_srng_desc; 176 177 if (!wbm_rel_srng) { 178 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 179 "WBM RELEASE RING not initialized"); 180 return status; 181 } 182 183 if (qdf_unlikely(hal_srng_access_start(hal_soc, wbm_rel_srng))) { 184 185 /* TODO */ 186 /* 187 * Need API to convert from hal_ring pointer to 188 * Ring Type / Ring Id combo 189 */ 190 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 191 FL("HAL RING Access For WBM Release SRNG Failed - %pK"), 192 wbm_rel_srng); 193 DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1); 194 goto done; 195 } 196 src_srng_desc = hal_srng_src_get_next(hal_soc, wbm_rel_srng); 197 if (qdf_likely(src_srng_desc)) { 198 /* Return link descriptor through WBM ring (SW2WBM)*/ 199 hal_rx_msdu_link_desc_set(hal_soc, 200 src_srng_desc, link_desc_addr, bm_action); 201 status = QDF_STATUS_SUCCESS; 202 } else { 203 struct hal_srng *srng = (struct hal_srng *)wbm_rel_srng; 204 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 205 FL("WBM Release Ring (Id %d) Full"), srng->ring_id); 206 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 207 "HP 0x%x Reap HP 0x%x TP 0x%x Cached TP 0x%x", 208 *srng->u.src_ring.hp_addr, srng->u.src_ring.reap_hp, 209 *srng->u.src_ring.tp_addr, srng->u.src_ring.cached_tp); 210 } 211 done: 212 hal_srng_access_end(hal_soc, wbm_rel_srng); 213 return status; 214 215 } 216 217 /** 218 * dp_rx_link_desc_return() - Return a MPDU link descriptor to HW 219 * (WBM), following error handling 220 * 221 * @soc: core DP main context 222 * @ring_desc: opaque pointer to the REO error ring descriptor 223 * 224 * Return: QDF_STATUS 225 */ 226 QDF_STATUS 227 dp_rx_link_desc_return(struct dp_soc *soc, hal_ring_desc_t ring_desc, 228 uint8_t bm_action) 229 { 230 void *buf_addr_info = HAL_RX_REO_BUF_ADDR_INFO_GET(ring_desc); 231 232 return dp_rx_link_desc_return_by_addr(soc, buf_addr_info, bm_action); 233 } 234 235 /** 236 * dp_rx_msdus_drop() - Drops all MSDU's per MPDU 237 * 238 * @soc: core txrx main context 239 * @ring_desc: opaque pointer to the REO error ring descriptor 240 * @mpdu_desc_info: MPDU descriptor information from ring descriptor 241 * @head: head of the local descriptor free-list 242 * @tail: tail of the local descriptor free-list 243 * @quota: No. of units (packets) that can be serviced in one shot. 244 * 245 * This function is used to drop all MSDU in an MPDU 246 * 247 * Return: uint32_t: No. of elements processed 248 */ 249 static uint32_t 250 dp_rx_msdus_drop(struct dp_soc *soc, hal_ring_desc_t ring_desc, 251 struct hal_rx_mpdu_desc_info *mpdu_desc_info, 252 uint8_t *mac_id, 253 uint32_t quota) 254 { 255 uint32_t rx_bufs_used = 0; 256 void *link_desc_va; 257 struct hal_buf_info buf_info; 258 struct dp_pdev *pdev; 259 struct hal_rx_msdu_list msdu_list; /* MSDU's per MPDU */ 260 int i; 261 uint8_t *rx_tlv_hdr; 262 uint32_t tid; 263 264 hal_rx_reo_buf_paddr_get(ring_desc, &buf_info); 265 266 link_desc_va = dp_rx_cookie_2_link_desc_va(soc, &buf_info); 267 268 /* No UNMAP required -- this is "malloc_consistent" memory */ 269 hal_rx_msdu_list_get(soc->hal_soc, link_desc_va, &msdu_list, 270 &mpdu_desc_info->msdu_count); 271 272 for (i = 0; (i < mpdu_desc_info->msdu_count) && quota--; i++) { 273 struct dp_rx_desc *rx_desc = 274 dp_rx_cookie_2_va_rxdma_buf(soc, 275 msdu_list.sw_cookie[i]); 276 277 qdf_assert_always(rx_desc); 278 279 /* all buffers from a MSDU link link belong to same pdev */ 280 *mac_id = rx_desc->pool_id; 281 pdev = soc->pdev_list[rx_desc->pool_id]; 282 283 if (!dp_rx_desc_check_magic(rx_desc)) { 284 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 285 FL("Invalid rx_desc cookie=%d"), 286 msdu_list.sw_cookie[i]); 287 return rx_bufs_used; 288 } 289 290 qdf_nbuf_unmap_single(soc->osdev, 291 rx_desc->nbuf, QDF_DMA_FROM_DEVICE); 292 293 rx_desc->rx_buf_start = qdf_nbuf_data(rx_desc->nbuf); 294 295 rx_bufs_used++; 296 tid = hal_rx_mpdu_start_tid_get(soc->hal_soc, 297 rx_desc->rx_buf_start); 298 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 299 "Packet received with PN error for tid :%d", tid); 300 301 rx_tlv_hdr = qdf_nbuf_data(rx_desc->nbuf); 302 if (hal_rx_encryption_info_valid(soc->hal_soc, rx_tlv_hdr)) 303 hal_rx_print_pn(soc->hal_soc, rx_tlv_hdr); 304 305 /* Just free the buffers */ 306 qdf_nbuf_free(rx_desc->nbuf); 307 308 dp_rx_add_to_free_desc_list(&pdev->free_list_head, 309 &pdev->free_list_tail, rx_desc); 310 } 311 312 /* Return link descriptor through WBM ring (SW2WBM)*/ 313 dp_rx_link_desc_return(soc, ring_desc, HAL_BM_ACTION_PUT_IN_IDLE_LIST); 314 315 return rx_bufs_used; 316 } 317 318 /** 319 * dp_rx_pn_error_handle() - Handles PN check errors 320 * 321 * @soc: core txrx main context 322 * @ring_desc: opaque pointer to the REO error ring descriptor 323 * @mpdu_desc_info: MPDU descriptor information from ring descriptor 324 * @head: head of the local descriptor free-list 325 * @tail: tail of the local descriptor free-list 326 * @quota: No. of units (packets) that can be serviced in one shot. 327 * 328 * This function implements PN error handling 329 * If the peer is configured to ignore the PN check errors 330 * or if DP feels, that this frame is still OK, the frame can be 331 * re-injected back to REO to use some of the other features 332 * of REO e.g. duplicate detection/routing to other cores 333 * 334 * Return: uint32_t: No. of elements processed 335 */ 336 static uint32_t 337 dp_rx_pn_error_handle(struct dp_soc *soc, hal_ring_desc_t ring_desc, 338 struct hal_rx_mpdu_desc_info *mpdu_desc_info, 339 uint8_t *mac_id, 340 uint32_t quota) 341 { 342 uint16_t peer_id; 343 uint32_t rx_bufs_used = 0; 344 struct dp_peer *peer; 345 bool peer_pn_policy = false; 346 347 peer_id = DP_PEER_METADATA_PEER_ID_GET( 348 mpdu_desc_info->peer_meta_data); 349 350 351 peer = dp_peer_find_by_id(soc, peer_id); 352 353 if (qdf_likely(peer)) { 354 /* 355 * TODO: Check for peer specific policies & set peer_pn_policy 356 */ 357 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 358 "discard rx due to PN error for peer %pK " 359 "(%02x:%02x:%02x:%02x:%02x:%02x)", 360 peer, 361 peer->mac_addr.raw[0], peer->mac_addr.raw[1], 362 peer->mac_addr.raw[2], peer->mac_addr.raw[3], 363 peer->mac_addr.raw[4], peer->mac_addr.raw[5]); 364 365 dp_peer_unref_del_find_by_id(peer); 366 } 367 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 368 "Packet received with PN error"); 369 370 /* No peer PN policy -- definitely drop */ 371 if (!peer_pn_policy) 372 rx_bufs_used = dp_rx_msdus_drop(soc, ring_desc, 373 mpdu_desc_info, 374 mac_id, quota); 375 376 return rx_bufs_used; 377 } 378 379 /** 380 * dp_rx_2k_jump_handle() - Handles Sequence Number Jump by 2K 381 * 382 * @soc: core txrx main context 383 * @ring_desc: opaque pointer to the REO error ring descriptor 384 * @mpdu_desc_info: MPDU descriptor information from ring descriptor 385 * @head: head of the local descriptor free-list 386 * @tail: tail of the local descriptor free-list 387 * @quota: No. of units (packets) that can be serviced in one shot. 388 * 389 * This function implements the error handling when sequence number 390 * of the MPDU jumps suddenly by 2K.Today there are 2 cases that 391 * need to be handled: 392 * A) CSN (Current Sequence Number) = Last Valid SN (LSN) + 2K 393 * B) CSN = LSN + 2K, but falls within a "BA sized window" of the SSN 394 * For case A) the protocol stack is invoked to generate DELBA/DEAUTH frame 395 * For case B), the frame is normally dropped, no more action is taken 396 * 397 * Return: uint32_t: No. of elements processed 398 */ 399 static uint32_t 400 dp_rx_2k_jump_handle(struct dp_soc *soc, hal_ring_desc_t ring_desc, 401 struct hal_rx_mpdu_desc_info *mpdu_desc_info, 402 uint8_t *mac_id, uint32_t quota) 403 { 404 return dp_rx_msdus_drop(soc, ring_desc, mpdu_desc_info, 405 mac_id, quota); 406 } 407 408 #ifdef DP_INVALID_PEER_ASSERT 409 #define DP_PDEV_INVALID_PEER_MSDU_CHECK(head, tail) \ 410 do { \ 411 qdf_assert_always(!(head)); \ 412 qdf_assert_always(!(tail)); \ 413 } while (0) 414 #else 415 #define DP_PDEV_INVALID_PEER_MSDU_CHECK(head, tail) /* no op */ 416 #endif 417 418 /** 419 * dp_rx_chain_msdus() - Function to chain all msdus of a mpdu 420 * to pdev invalid peer list 421 * 422 * @soc: core DP main context 423 * @nbuf: Buffer pointer 424 * @rx_tlv_hdr: start of rx tlv header 425 * @mac_id: mac id 426 * 427 * Return: bool: true for last msdu of mpdu 428 */ 429 static bool 430 dp_rx_chain_msdus(struct dp_soc *soc, qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr, 431 uint8_t mac_id) 432 { 433 bool mpdu_done = false; 434 qdf_nbuf_t curr_nbuf = NULL; 435 qdf_nbuf_t tmp_nbuf = NULL; 436 437 /* TODO: Currently only single radio is supported, hence 438 * pdev hard coded to '0' index 439 */ 440 struct dp_pdev *dp_pdev = soc->pdev_list[mac_id]; 441 442 /* if invalid peer SG list has max values free the buffers in list 443 * and treat current buffer as start of list 444 * 445 * current logic to detect the last buffer from attn_tlv is not reliable 446 * in OFDMA UL scenario hence add max buffers check to avoid list pile 447 * up 448 */ 449 if (!dp_pdev->first_nbuf || 450 QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST 451 (dp_pdev->invalid_peer_head_msdu) >= DP_MAX_INVALID_BUFFERS) { 452 qdf_nbuf_set_rx_chfrag_start(nbuf, 1); 453 dp_pdev->ppdu_id = hal_rx_hw_desc_get_ppduid_get(soc->hal_soc, 454 rx_tlv_hdr); 455 dp_pdev->first_nbuf = true; 456 457 /* If the new nbuf received is the first msdu of the 458 * amsdu and there are msdus in the invalid peer msdu 459 * list, then let us free all the msdus of the invalid 460 * peer msdu list. 461 * This scenario can happen when we start receiving 462 * new a-msdu even before the previous a-msdu is completely 463 * received. 464 */ 465 curr_nbuf = dp_pdev->invalid_peer_head_msdu; 466 while (curr_nbuf) { 467 tmp_nbuf = curr_nbuf->next; 468 qdf_nbuf_free(curr_nbuf); 469 curr_nbuf = tmp_nbuf; 470 } 471 472 dp_pdev->invalid_peer_head_msdu = NULL; 473 dp_pdev->invalid_peer_tail_msdu = NULL; 474 hal_rx_mon_hw_desc_get_mpdu_status(soc->hal_soc, rx_tlv_hdr, 475 &(dp_pdev->ppdu_info.rx_status)); 476 477 } 478 479 if (dp_pdev->ppdu_id == hal_rx_attn_phy_ppdu_id_get(rx_tlv_hdr) && 480 hal_rx_attn_msdu_done_get(rx_tlv_hdr)) { 481 qdf_nbuf_set_rx_chfrag_end(nbuf, 1); 482 qdf_assert_always(dp_pdev->first_nbuf == true); 483 dp_pdev->first_nbuf = false; 484 mpdu_done = true; 485 } 486 487 /* 488 * For MCL, invalid_peer_head_msdu and invalid_peer_tail_msdu 489 * should be NULL here, add the checking for debugging purpose 490 * in case some corner case. 491 */ 492 DP_PDEV_INVALID_PEER_MSDU_CHECK(dp_pdev->invalid_peer_head_msdu, 493 dp_pdev->invalid_peer_tail_msdu); 494 DP_RX_LIST_APPEND(dp_pdev->invalid_peer_head_msdu, 495 dp_pdev->invalid_peer_tail_msdu, 496 nbuf); 497 498 return mpdu_done; 499 } 500 501 static 502 void dp_rx_wbm_err_handle_bar(struct dp_soc *soc, 503 struct dp_peer *peer, 504 qdf_nbuf_t nbuf) 505 { 506 uint8_t *rx_tlv_hdr; 507 unsigned char type, subtype; 508 uint16_t start_seq_num; 509 uint32_t tid; 510 struct ieee80211_frame_bar *bar; 511 512 /* 513 * 1. Is this a BAR frame. If not Discard it. 514 * 2. If it is, get the peer id, tid, ssn 515 * 2a Do a tid update 516 */ 517 518 rx_tlv_hdr = qdf_nbuf_data(nbuf); 519 bar = (struct ieee80211_frame_bar *)(rx_tlv_hdr + 520 sizeof(struct rx_pkt_tlvs)); 521 522 type = bar->i_fc[0] & IEEE80211_FC0_TYPE_MASK; 523 subtype = bar->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 524 525 if (!(type == IEEE80211_FC0_TYPE_CTL && 526 subtype == QDF_IEEE80211_FC0_SUBTYPE_BAR)) { 527 dp_err_rl("Not a BAR frame!"); 528 return; 529 } 530 531 tid = hal_rx_mpdu_start_tid_get(soc->hal_soc, rx_tlv_hdr); 532 qdf_assert_always(tid < DP_MAX_TIDS); 533 534 start_seq_num = le16toh(bar->i_seq) >> IEEE80211_SEQ_SEQ_SHIFT; 535 536 dp_info_rl("tid %u window_size %u start_seq_num %u", 537 tid, peer->rx_tid[tid].ba_win_size, start_seq_num); 538 539 dp_rx_tid_update_wifi3(peer, tid, 540 peer->rx_tid[tid].ba_win_size, 541 start_seq_num); 542 } 543 544 /** 545 * dp_2k_jump_handle() - Function to handle 2k jump exception 546 * on WBM ring 547 * 548 * @soc: core DP main context 549 * @nbuf: buffer pointer 550 * @rx_tlv_hdr: start of rx tlv header 551 * @peer_id: peer id of first msdu 552 * @tid: Tid for which exception occurred 553 * 554 * This function handles 2k jump violations arising out 555 * of receiving aggregates in non BA case. This typically 556 * may happen if aggregates are received on a QOS enabled TID 557 * while Rx window size is still initialized to value of 2. Or 558 * it may also happen if negotiated window size is 1 but peer 559 * sends aggregates. 560 * 561 */ 562 563 void 564 dp_2k_jump_handle(struct dp_soc *soc, 565 qdf_nbuf_t nbuf, 566 uint8_t *rx_tlv_hdr, 567 uint16_t peer_id, 568 uint8_t tid) 569 { 570 uint32_t ppdu_id; 571 struct dp_peer *peer = NULL; 572 struct dp_rx_tid *rx_tid = NULL; 573 574 peer = dp_peer_find_by_id(soc, peer_id); 575 if (!peer || peer->delete_in_progress) { 576 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 577 "peer not found"); 578 goto free_nbuf; 579 } 580 rx_tid = &peer->rx_tid[tid]; 581 if (qdf_unlikely(!rx_tid)) { 582 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 583 "rx_tid is NULL!!"); 584 goto free_nbuf; 585 } 586 qdf_spin_lock_bh(&rx_tid->tid_lock); 587 ppdu_id = hal_rx_attn_phy_ppdu_id_get(rx_tlv_hdr); 588 589 /* 590 * If BA session is created and a non-aggregate packet is 591 * landing here then the issue is with sequence number mismatch. 592 * Proceed with delba even in that case 593 */ 594 if (rx_tid->ppdu_id_2k != ppdu_id && 595 rx_tid->ba_status != DP_RX_BA_ACTIVE) { 596 rx_tid->ppdu_id_2k = ppdu_id; 597 qdf_spin_unlock_bh(&rx_tid->tid_lock); 598 goto free_nbuf; 599 } 600 if (!rx_tid->delba_tx_status) { 601 rx_tid->delba_tx_retry++; 602 rx_tid->delba_tx_status = 1; 603 rx_tid->delba_rcode = 604 IEEE80211_REASON_QOS_SETUP_REQUIRED; 605 qdf_spin_unlock_bh(&rx_tid->tid_lock); 606 if (soc->cdp_soc.ol_ops->send_delba) 607 soc->cdp_soc.ol_ops->send_delba( 608 peer->vdev->pdev->soc->ctrl_psoc, 609 peer->vdev->vdev_id, 610 peer->mac_addr.raw, 611 tid, 612 rx_tid->delba_rcode); 613 } else { 614 qdf_spin_unlock_bh(&rx_tid->tid_lock); 615 } 616 617 free_nbuf: 618 if (peer) 619 dp_peer_unref_del_find_by_id(peer); 620 qdf_nbuf_free(nbuf); 621 return; 622 } 623 624 #if defined(QCA_WIFI_QCA6390) || defined(QCA_WIFI_QCA6490) 625 /** 626 * dp_rx_null_q_handle_invalid_peer_id_exception() - to find exception 627 * @soc: pointer to dp_soc struct 628 * @pool_id: Pool id to find dp_pdev 629 * @rx_tlv_hdr: TLV header of received packet 630 * @nbuf: SKB 631 * 632 * In certain types of packets if peer_id is not correct then 633 * driver may not be able find. Try finding peer by addr_2 of 634 * received MPDU. If you find the peer then most likely sw_peer_id & 635 * ast_idx is corrupted. 636 * 637 * Return: True if you find the peer by addr_2 of received MPDU else false 638 */ 639 static bool 640 dp_rx_null_q_handle_invalid_peer_id_exception(struct dp_soc *soc, 641 uint8_t pool_id, 642 uint8_t *rx_tlv_hdr, 643 qdf_nbuf_t nbuf) 644 { 645 struct dp_peer *peer = NULL; 646 uint8_t *rx_pkt_hdr = hal_rx_pkt_hdr_get(rx_tlv_hdr); 647 struct dp_pdev *pdev = soc->pdev_list[pool_id]; 648 struct ieee80211_frame *wh = (struct ieee80211_frame *)rx_pkt_hdr; 649 650 /* 651 * WAR- In certain types of packets if peer_id is not correct then 652 * driver may not be able find. Try finding peer by addr_2 of 653 * received MPDU 654 */ 655 if (wh) 656 peer = dp_find_peer_by_addr((struct cdp_pdev *)pdev, 657 wh->i_addr2); 658 if (peer) { 659 dp_verbose_debug("MPDU sw_peer_id & ast_idx is corrupted"); 660 hal_rx_dump_pkt_tlvs(soc->hal_soc, rx_tlv_hdr, 661 QDF_TRACE_LEVEL_DEBUG); 662 DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer_id, 663 1, qdf_nbuf_len(nbuf)); 664 qdf_nbuf_free(nbuf); 665 666 return true; 667 } 668 return false; 669 } 670 671 /** 672 * dp_rx_null_q_check_pkt_len_exception() - Check for pktlen validity 673 * @soc: DP SOC context 674 * @pkt_len: computed length of the pkt from caller in bytes 675 * 676 * Return: true if pktlen > RX_BUFFER_SIZE, else return false 677 * 678 */ 679 static inline 680 bool dp_rx_null_q_check_pkt_len_exception(struct dp_soc *soc, uint32_t pkt_len) 681 { 682 if (qdf_unlikely(pkt_len > RX_BUFFER_SIZE)) { 683 DP_STATS_INC_PKT(soc, rx.err.rx_invalid_pkt_len, 684 1, pkt_len); 685 return true; 686 } else { 687 return false; 688 } 689 } 690 691 #else 692 static inline bool 693 dp_rx_null_q_handle_invalid_peer_id_exception(struct dp_soc *soc, 694 uint8_t pool_id, 695 uint8_t *rx_tlv_hdr, 696 qdf_nbuf_t nbuf) 697 { 698 return false; 699 } 700 701 static inline 702 bool dp_rx_null_q_check_pkt_len_exception(struct dp_soc *soc, uint32_t pkt_len) 703 { 704 return false; 705 } 706 707 #endif 708 709 /** 710 * dp_rx_null_q_desc_handle() - Function to handle NULL Queue 711 * descriptor violation on either a 712 * REO or WBM ring 713 * 714 * @soc: core DP main context 715 * @nbuf: buffer pointer 716 * @rx_tlv_hdr: start of rx tlv header 717 * @pool_id: mac id 718 * @peer: peer handle 719 * 720 * This function handles NULL queue descriptor violations arising out 721 * a missing REO queue for a given peer or a given TID. This typically 722 * may happen if a packet is received on a QOS enabled TID before the 723 * ADDBA negotiation for that TID, when the TID queue is setup. Or 724 * it may also happen for MC/BC frames if they are not routed to the 725 * non-QOS TID queue, in the absence of any other default TID queue. 726 * This error can show up both in a REO destination or WBM release ring. 727 * 728 * Return: QDF_STATUS_SUCCESS, if nbuf handled successfully. QDF status code 729 * if nbuf could not be handled or dropped. 730 */ 731 static QDF_STATUS 732 dp_rx_null_q_desc_handle(struct dp_soc *soc, qdf_nbuf_t nbuf, 733 uint8_t *rx_tlv_hdr, uint8_t pool_id, 734 struct dp_peer *peer) 735 { 736 uint32_t pkt_len, l2_hdr_offset; 737 uint16_t msdu_len; 738 struct dp_vdev *vdev; 739 uint8_t tid; 740 qdf_ether_header_t *eh; 741 742 qdf_nbuf_set_rx_chfrag_start(nbuf, 743 hal_rx_msdu_end_first_msdu_get(soc->hal_soc, 744 rx_tlv_hdr)); 745 qdf_nbuf_set_rx_chfrag_end(nbuf, 746 hal_rx_msdu_end_last_msdu_get(soc->hal_soc, 747 rx_tlv_hdr)); 748 qdf_nbuf_set_da_mcbc(nbuf, hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc, 749 rx_tlv_hdr)); 750 qdf_nbuf_set_da_valid(nbuf, 751 hal_rx_msdu_end_da_is_valid_get(soc->hal_soc, 752 rx_tlv_hdr)); 753 qdf_nbuf_set_sa_valid(nbuf, 754 hal_rx_msdu_end_sa_is_valid_get(soc->hal_soc, 755 rx_tlv_hdr)); 756 757 l2_hdr_offset = hal_rx_msdu_end_l3_hdr_padding_get(soc->hal_soc, 758 rx_tlv_hdr); 759 msdu_len = hal_rx_msdu_start_msdu_len_get(rx_tlv_hdr); 760 pkt_len = msdu_len + l2_hdr_offset + RX_PKT_TLVS_LEN; 761 762 if (qdf_likely(!qdf_nbuf_is_frag(nbuf))) { 763 if (dp_rx_null_q_check_pkt_len_exception(soc, pkt_len)) 764 goto drop_nbuf; 765 766 /* Set length in nbuf */ 767 qdf_nbuf_set_pktlen(nbuf, 768 qdf_min(pkt_len, (uint32_t)RX_BUFFER_SIZE)); 769 qdf_assert_always(nbuf->data == rx_tlv_hdr); 770 } 771 772 /* 773 * Check if DMA completed -- msdu_done is the last bit 774 * to be written 775 */ 776 if (!hal_rx_attn_msdu_done_get(rx_tlv_hdr)) { 777 778 dp_err_rl("MSDU DONE failure"); 779 hal_rx_dump_pkt_tlvs(soc->hal_soc, rx_tlv_hdr, 780 QDF_TRACE_LEVEL_INFO); 781 qdf_assert(0); 782 } 783 784 if (!peer && 785 dp_rx_null_q_handle_invalid_peer_id_exception(soc, pool_id, 786 rx_tlv_hdr, nbuf)) 787 return QDF_STATUS_E_FAILURE; 788 789 if (!peer) { 790 bool mpdu_done = false; 791 struct dp_pdev *pdev = soc->pdev_list[pool_id]; 792 793 dp_err_rl("peer is NULL"); 794 DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1, 795 qdf_nbuf_len(nbuf)); 796 797 mpdu_done = dp_rx_chain_msdus(soc, nbuf, rx_tlv_hdr, pool_id); 798 /* Trigger invalid peer handler wrapper */ 799 dp_rx_process_invalid_peer_wrapper(soc, 800 pdev->invalid_peer_head_msdu, 801 mpdu_done, pool_id); 802 803 if (mpdu_done) { 804 pdev->invalid_peer_head_msdu = NULL; 805 pdev->invalid_peer_tail_msdu = NULL; 806 } 807 return QDF_STATUS_E_FAILURE; 808 } 809 810 vdev = peer->vdev; 811 if (!vdev) { 812 dp_err_rl("Null vdev!"); 813 DP_STATS_INC(soc, rx.err.invalid_vdev, 1); 814 goto drop_nbuf; 815 } 816 817 /* 818 * Advance the packet start pointer by total size of 819 * pre-header TLV's 820 */ 821 if (qdf_nbuf_is_frag(nbuf)) 822 qdf_nbuf_pull_head(nbuf, RX_PKT_TLVS_LEN); 823 else 824 qdf_nbuf_pull_head(nbuf, (l2_hdr_offset + RX_PKT_TLVS_LEN)); 825 826 if (dp_rx_mcast_echo_check(soc, peer, rx_tlv_hdr, nbuf)) { 827 /* this is a looped back MCBC pkt, drop it */ 828 DP_STATS_INC_PKT(peer, rx.mec_drop, 1, qdf_nbuf_len(nbuf)); 829 goto drop_nbuf; 830 } 831 832 /* 833 * In qwrap mode if the received packet matches with any of the vdev 834 * mac addresses, drop it. Donot receive multicast packets originated 835 * from any proxysta. 836 */ 837 if (check_qwrap_multicast_loopback(vdev, nbuf)) { 838 DP_STATS_INC_PKT(peer, rx.mec_drop, 1, qdf_nbuf_len(nbuf)); 839 goto drop_nbuf; 840 } 841 842 843 if (qdf_unlikely((peer->nawds_enabled == true) && 844 hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc, 845 rx_tlv_hdr))) { 846 dp_err_rl("free buffer for multicast packet"); 847 DP_STATS_INC(peer, rx.nawds_mcast_drop, 1); 848 goto drop_nbuf; 849 } 850 851 if (!dp_wds_rx_policy_check(rx_tlv_hdr, vdev, peer)) { 852 dp_err_rl("mcast Policy Check Drop pkt"); 853 goto drop_nbuf; 854 } 855 /* WDS Source Port Learning */ 856 if (qdf_likely(vdev->rx_decap_type == htt_cmn_pkt_type_ethernet && 857 vdev->wds_enabled)) 858 dp_rx_wds_srcport_learn(soc, rx_tlv_hdr, peer, nbuf); 859 860 if (hal_rx_is_unicast(soc->hal_soc, rx_tlv_hdr)) { 861 tid = hal_rx_tid_get(soc->hal_soc, rx_tlv_hdr); 862 if (!peer->rx_tid[tid].hw_qdesc_vaddr_unaligned) 863 dp_rx_tid_setup_wifi3(peer, tid, 1, IEEE80211_SEQ_MAX); 864 /* IEEE80211_SEQ_MAX indicates invalid start_seq */ 865 } 866 867 if (qdf_unlikely(vdev->rx_decap_type == htt_cmn_pkt_type_raw)) { 868 qdf_nbuf_set_next(nbuf, NULL); 869 dp_rx_deliver_raw(vdev, nbuf, peer); 870 } else { 871 if (vdev->osif_rx) { 872 qdf_nbuf_set_next(nbuf, NULL); 873 DP_STATS_INC_PKT(peer, rx.to_stack, 1, 874 qdf_nbuf_len(nbuf)); 875 876 /* 877 * Update the protocol tag in SKB based on 878 * CCE metadata 879 */ 880 dp_rx_update_protocol_tag(soc, vdev, nbuf, rx_tlv_hdr, 881 EXCEPTION_DEST_RING_ID, 882 true, true); 883 884 /* Update the flow tag in SKB based on FSE metadata */ 885 dp_rx_update_flow_tag(soc, vdev, nbuf, 886 rx_tlv_hdr, true); 887 888 if (qdf_unlikely(hal_rx_msdu_end_da_is_mcbc_get( 889 soc->hal_soc, rx_tlv_hdr) && 890 (vdev->rx_decap_type == 891 htt_cmn_pkt_type_ethernet))) { 892 eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf); 893 894 DP_STATS_INC_PKT(peer, rx.multicast, 1, 895 qdf_nbuf_len(nbuf)); 896 if (QDF_IS_ADDR_BROADCAST(eh->ether_dhost)) { 897 DP_STATS_INC_PKT(peer, rx.bcast, 1, 898 qdf_nbuf_len(nbuf)); 899 } 900 } 901 902 vdev->osif_rx(vdev->osif_vdev, nbuf); 903 904 } else { 905 dp_err_rl("INVALID osif_rx. vdev %pK", vdev); 906 DP_STATS_INC(soc, rx.err.invalid_vdev, 1); 907 goto drop_nbuf; 908 } 909 } 910 return QDF_STATUS_SUCCESS; 911 912 drop_nbuf: 913 qdf_nbuf_free(nbuf); 914 return QDF_STATUS_E_FAILURE; 915 } 916 917 /** 918 * dp_rx_process_rxdma_err() - Function to deliver rxdma unencrypted_err 919 * frames to OS or wifi parse errors. 920 * @soc: core DP main context 921 * @nbuf: buffer pointer 922 * @rx_tlv_hdr: start of rx tlv header 923 * @peer: peer reference 924 * @err_code: rxdma err code 925 * @mac_id: mac_id which is one of 3 mac_ids(Assuming mac_id and 926 * pool_id has same mapping) 927 * 928 * Return: None 929 */ 930 void 931 dp_rx_process_rxdma_err(struct dp_soc *soc, qdf_nbuf_t nbuf, 932 uint8_t *rx_tlv_hdr, struct dp_peer *peer, 933 uint8_t err_code, uint8_t mac_id) 934 { 935 uint32_t pkt_len, l2_hdr_offset; 936 uint16_t msdu_len; 937 struct dp_vdev *vdev; 938 qdf_ether_header_t *eh; 939 bool is_broadcast; 940 941 /* 942 * Check if DMA completed -- msdu_done is the last bit 943 * to be written 944 */ 945 if (!hal_rx_attn_msdu_done_get(rx_tlv_hdr)) { 946 947 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 948 FL("MSDU DONE failure")); 949 950 hal_rx_dump_pkt_tlvs(soc->hal_soc, rx_tlv_hdr, 951 QDF_TRACE_LEVEL_INFO); 952 qdf_assert(0); 953 } 954 955 l2_hdr_offset = hal_rx_msdu_end_l3_hdr_padding_get(soc->hal_soc, 956 rx_tlv_hdr); 957 msdu_len = hal_rx_msdu_start_msdu_len_get(rx_tlv_hdr); 958 pkt_len = msdu_len + l2_hdr_offset + RX_PKT_TLVS_LEN; 959 960 /* Set length in nbuf */ 961 qdf_nbuf_set_pktlen(nbuf, pkt_len); 962 963 qdf_nbuf_set_next(nbuf, NULL); 964 965 qdf_nbuf_set_rx_chfrag_start(nbuf, 1); 966 qdf_nbuf_set_rx_chfrag_end(nbuf, 1); 967 968 if (!peer) { 969 QDF_TRACE_ERROR_RL(QDF_MODULE_ID_DP, "peer is NULL"); 970 DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1, 971 qdf_nbuf_len(nbuf)); 972 /* Trigger invalid peer handler wrapper */ 973 dp_rx_process_invalid_peer_wrapper(soc, nbuf, true, mac_id); 974 return; 975 } 976 977 vdev = peer->vdev; 978 if (!vdev) { 979 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 980 FL("INVALID vdev %pK OR osif_rx"), vdev); 981 /* Drop & free packet */ 982 qdf_nbuf_free(nbuf); 983 DP_STATS_INC(soc, rx.err.invalid_vdev, 1); 984 return; 985 } 986 987 /* 988 * Advance the packet start pointer by total size of 989 * pre-header TLV's 990 */ 991 qdf_nbuf_pull_head(nbuf, l2_hdr_offset + RX_PKT_TLVS_LEN); 992 993 if (err_code == HAL_RXDMA_ERR_WIFI_PARSE) { 994 uint8_t *pkt_type; 995 996 pkt_type = qdf_nbuf_data(nbuf) + (2 * QDF_MAC_ADDR_SIZE); 997 if (*(uint16_t *)pkt_type == htons(QDF_ETH_TYPE_8021Q)) { 998 if (*(uint16_t *)(pkt_type + DP_SKIP_VLAN) == 999 htons(QDF_LLC_STP)) { 1000 DP_STATS_INC(vdev->pdev, vlan_tag_stp_cnt, 1); 1001 goto process_mesh; 1002 } else { 1003 goto process_rx; 1004 } 1005 } 1006 } 1007 if (vdev->rx_decap_type == htt_cmn_pkt_type_raw) 1008 goto process_mesh; 1009 1010 /* 1011 * WAPI cert AP sends rekey frames as unencrypted. 1012 * Thus RXDMA will report unencrypted frame error. 1013 * To pass WAPI cert case, SW needs to pass unencrypted 1014 * rekey frame to stack. 1015 */ 1016 if (qdf_nbuf_is_ipv4_wapi_pkt(nbuf)) { 1017 goto process_rx; 1018 } 1019 /* 1020 * In dynamic WEP case rekey frames are not encrypted 1021 * similar to WAPI. Allow EAPOL when 8021+wep is enabled and 1022 * key install is already done 1023 */ 1024 if ((vdev->sec_type == cdp_sec_type_wep104) && 1025 (qdf_nbuf_is_ipv4_eapol_pkt(nbuf))) 1026 goto process_rx; 1027 1028 process_mesh: 1029 1030 if (!vdev->mesh_vdev && err_code == HAL_RXDMA_ERR_UNENCRYPTED) { 1031 qdf_nbuf_free(nbuf); 1032 DP_STATS_INC(soc, rx.err.invalid_vdev, 1); 1033 return; 1034 } 1035 1036 if (vdev->mesh_vdev) { 1037 if (dp_rx_filter_mesh_packets(vdev, nbuf, rx_tlv_hdr) 1038 == QDF_STATUS_SUCCESS) { 1039 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_MED, 1040 FL("mesh pkt filtered")); 1041 DP_STATS_INC(vdev->pdev, dropped.mesh_filter, 1); 1042 1043 qdf_nbuf_free(nbuf); 1044 return; 1045 } 1046 dp_rx_fill_mesh_stats(vdev, nbuf, rx_tlv_hdr, peer); 1047 } 1048 process_rx: 1049 if (qdf_unlikely(hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc, 1050 rx_tlv_hdr) && 1051 (vdev->rx_decap_type == 1052 htt_cmn_pkt_type_ethernet))) { 1053 eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf); 1054 is_broadcast = (QDF_IS_ADDR_BROADCAST 1055 (eh->ether_dhost)) ? 1 : 0 ; 1056 DP_STATS_INC_PKT(peer, rx.multicast, 1, qdf_nbuf_len(nbuf)); 1057 if (is_broadcast) { 1058 DP_STATS_INC_PKT(peer, rx.bcast, 1, 1059 qdf_nbuf_len(nbuf)); 1060 } 1061 } 1062 1063 if (qdf_unlikely(vdev->rx_decap_type == htt_cmn_pkt_type_raw)) { 1064 dp_rx_deliver_raw(vdev, nbuf, peer); 1065 } else { 1066 /* Update the protocol tag in SKB based on CCE metadata */ 1067 dp_rx_update_protocol_tag(soc, vdev, nbuf, rx_tlv_hdr, 1068 EXCEPTION_DEST_RING_ID, true, true); 1069 /* Update the flow tag in SKB based on FSE metadata */ 1070 dp_rx_update_flow_tag(soc, vdev, nbuf, rx_tlv_hdr, true); 1071 DP_STATS_INC(peer, rx.to_stack.num, 1); 1072 vdev->osif_rx(vdev->osif_vdev, nbuf); 1073 } 1074 1075 return; 1076 } 1077 1078 /** 1079 * dp_rx_process_mic_error(): Function to pass mic error indication to umac 1080 * @soc: core DP main context 1081 * @nbuf: buffer pointer 1082 * @rx_tlv_hdr: start of rx tlv header 1083 * @peer: peer handle 1084 * 1085 * return: void 1086 */ 1087 void dp_rx_process_mic_error(struct dp_soc *soc, qdf_nbuf_t nbuf, 1088 uint8_t *rx_tlv_hdr, struct dp_peer *peer) 1089 { 1090 struct dp_vdev *vdev = NULL; 1091 struct dp_pdev *pdev = NULL; 1092 struct ol_if_ops *tops = NULL; 1093 uint16_t rx_seq, fragno; 1094 uint8_t is_raw; 1095 unsigned int tid; 1096 QDF_STATUS status; 1097 struct cdp_rx_mic_err_info mic_failure_info; 1098 1099 if (!hal_rx_msdu_end_first_msdu_get(soc->hal_soc, 1100 rx_tlv_hdr)) 1101 return; 1102 1103 if (!peer) { 1104 dp_info_rl("peer not found"); 1105 goto fail; 1106 } 1107 1108 vdev = peer->vdev; 1109 if (!vdev) { 1110 dp_info_rl("VDEV not found"); 1111 goto fail; 1112 } 1113 1114 pdev = vdev->pdev; 1115 if (!pdev) { 1116 dp_info_rl("PDEV not found"); 1117 goto fail; 1118 } 1119 1120 is_raw = HAL_IS_DECAP_FORMAT_RAW(soc->hal_soc, qdf_nbuf_data(nbuf)); 1121 if (is_raw) { 1122 fragno = dp_rx_frag_get_mpdu_frag_number(qdf_nbuf_data(nbuf)); 1123 /* Can get only last fragment */ 1124 if (fragno) { 1125 tid = hal_rx_mpdu_start_tid_get(soc->hal_soc, 1126 qdf_nbuf_data(nbuf)); 1127 rx_seq = hal_rx_get_rx_sequence(soc->hal_soc, 1128 qdf_nbuf_data(nbuf)); 1129 1130 status = dp_rx_defrag_add_last_frag(soc, peer, 1131 tid, rx_seq, nbuf); 1132 dp_info_rl("Frag pkt seq# %d frag# %d consumed " 1133 "status %d !", rx_seq, fragno, status); 1134 return; 1135 } 1136 } 1137 1138 if (hal_rx_mpdu_get_addr1(soc->hal_soc, qdf_nbuf_data(nbuf), 1139 &mic_failure_info.da_mac_addr.bytes[0])) { 1140 dp_err_rl("Failed to get da_mac_addr"); 1141 goto fail; 1142 } 1143 1144 if (hal_rx_mpdu_get_addr2(soc->hal_soc, qdf_nbuf_data(nbuf), 1145 &mic_failure_info.ta_mac_addr.bytes[0])) { 1146 dp_err_rl("Failed to get ta_mac_addr"); 1147 goto fail; 1148 } 1149 1150 mic_failure_info.key_id = 0; 1151 mic_failure_info.multicast = 1152 IEEE80211_IS_MULTICAST(mic_failure_info.da_mac_addr.bytes); 1153 qdf_mem_zero(mic_failure_info.tsc, MIC_SEQ_CTR_SIZE); 1154 mic_failure_info.frame_type = cdp_rx_frame_type_802_11; 1155 mic_failure_info.data = NULL; 1156 mic_failure_info.vdev_id = vdev->vdev_id; 1157 1158 tops = pdev->soc->cdp_soc.ol_ops; 1159 if (tops->rx_mic_error) 1160 tops->rx_mic_error(soc->ctrl_psoc, pdev->pdev_id, 1161 &mic_failure_info); 1162 1163 fail: 1164 qdf_nbuf_free(nbuf); 1165 return; 1166 } 1167 1168 uint32_t 1169 dp_rx_err_process(struct dp_intr *int_ctx, struct dp_soc *soc, 1170 hal_ring_handle_t hal_ring_hdl, uint32_t quota) 1171 { 1172 hal_ring_desc_t ring_desc; 1173 hal_soc_handle_t hal_soc; 1174 uint32_t count = 0; 1175 uint32_t rx_bufs_used = 0; 1176 uint32_t rx_bufs_reaped[MAX_PDEV_CNT] = { 0 }; 1177 uint8_t mac_id = 0; 1178 uint8_t buf_type; 1179 uint8_t error, rbm; 1180 struct hal_rx_mpdu_desc_info mpdu_desc_info; 1181 struct hal_buf_info hbi; 1182 struct dp_pdev *dp_pdev; 1183 struct dp_srng *dp_rxdma_srng; 1184 struct rx_desc_pool *rx_desc_pool; 1185 uint32_t cookie = 0; 1186 void *link_desc_va; 1187 struct hal_rx_msdu_list msdu_list; /* MSDU's per MPDU */ 1188 uint16_t num_msdus; 1189 struct dp_rx_desc *rx_desc = NULL; 1190 1191 /* Debug -- Remove later */ 1192 qdf_assert(soc && hal_ring_hdl); 1193 1194 hal_soc = soc->hal_soc; 1195 1196 /* Debug -- Remove later */ 1197 qdf_assert(hal_soc); 1198 1199 if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, hal_ring_hdl))) { 1200 1201 /* TODO */ 1202 /* 1203 * Need API to convert from hal_ring pointer to 1204 * Ring Type / Ring Id combo 1205 */ 1206 DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1); 1207 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1208 FL("HAL RING Access Failed -- %pK"), hal_ring_hdl); 1209 goto done; 1210 } 1211 1212 while (qdf_likely(quota-- && (ring_desc = 1213 hal_srng_dst_get_next(hal_soc, 1214 hal_ring_hdl)))) { 1215 1216 DP_STATS_INC(soc, rx.err_ring_pkts, 1); 1217 1218 error = HAL_RX_ERROR_STATUS_GET(ring_desc); 1219 1220 qdf_assert(error == HAL_REO_ERROR_DETECTED); 1221 1222 buf_type = HAL_RX_REO_BUF_TYPE_GET(ring_desc); 1223 /* 1224 * For REO error ring, expect only MSDU LINK DESC 1225 */ 1226 qdf_assert_always(buf_type == HAL_RX_REO_MSDU_LINK_DESC_TYPE); 1227 1228 cookie = HAL_RX_REO_BUF_COOKIE_GET(ring_desc); 1229 /* 1230 * check for the magic number in the sw cookie 1231 */ 1232 qdf_assert_always((cookie >> LINK_DESC_ID_SHIFT) & 1233 LINK_DESC_ID_START); 1234 1235 /* 1236 * Check if the buffer is to be processed on this processor 1237 */ 1238 rbm = hal_rx_ret_buf_manager_get(ring_desc); 1239 1240 hal_rx_reo_buf_paddr_get(ring_desc, &hbi); 1241 link_desc_va = dp_rx_cookie_2_link_desc_va(soc, &hbi); 1242 hal_rx_msdu_list_get(soc->hal_soc, link_desc_va, &msdu_list, 1243 &num_msdus); 1244 1245 if (qdf_unlikely((msdu_list.rbm[0] != DP_WBM2SW_RBM) && 1246 (msdu_list.rbm[0] != 1247 HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST))) { 1248 /* TODO */ 1249 /* Call appropriate handler */ 1250 if (!wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx)) { 1251 DP_STATS_INC(soc, rx.err.invalid_rbm, 1); 1252 QDF_TRACE(QDF_MODULE_ID_DP, 1253 QDF_TRACE_LEVEL_ERROR, 1254 FL("Invalid RBM %d"), 1255 msdu_list.rbm[0]); 1256 } 1257 1258 /* Return link descriptor through WBM ring (SW2WBM)*/ 1259 dp_rx_link_desc_return(soc, ring_desc, 1260 HAL_BM_ACTION_RELEASE_MSDU_LIST); 1261 continue; 1262 } 1263 1264 rx_desc = dp_rx_cookie_2_va_rxdma_buf(soc, 1265 msdu_list.sw_cookie[0]); 1266 qdf_assert_always(rx_desc); 1267 1268 mac_id = rx_desc->pool_id; 1269 1270 /* Get the MPDU DESC info */ 1271 hal_rx_mpdu_desc_info_get(ring_desc, &mpdu_desc_info); 1272 1273 if (mpdu_desc_info.mpdu_flags & HAL_MPDU_F_FRAGMENT) { 1274 /* 1275 * We only handle one msdu per link desc for fragmented 1276 * case. We drop the msdus and release the link desc 1277 * back if there are more than one msdu in link desc. 1278 */ 1279 if (qdf_unlikely(num_msdus > 1)) { 1280 count = dp_rx_msdus_drop(soc, ring_desc, 1281 &mpdu_desc_info, 1282 &mac_id, quota); 1283 rx_bufs_reaped[mac_id] += count; 1284 continue; 1285 } 1286 1287 count = dp_rx_frag_handle(soc, 1288 ring_desc, &mpdu_desc_info, 1289 rx_desc, &mac_id, quota); 1290 1291 rx_bufs_reaped[mac_id] += count; 1292 DP_STATS_INC(soc, rx.rx_frags, 1); 1293 continue; 1294 } 1295 1296 if (hal_rx_reo_is_pn_error(ring_desc)) { 1297 /* TOD0 */ 1298 DP_STATS_INC(soc, 1299 rx.err. 1300 reo_error[HAL_REO_ERR_PN_CHECK_FAILED], 1301 1); 1302 /* increment @pdev level */ 1303 dp_pdev = dp_get_pdev_for_mac_id(soc, mac_id); 1304 if (dp_pdev) 1305 DP_STATS_INC(dp_pdev, err.reo_error, 1); 1306 count = dp_rx_pn_error_handle(soc, 1307 ring_desc, 1308 &mpdu_desc_info, &mac_id, 1309 quota); 1310 1311 rx_bufs_reaped[mac_id] += count; 1312 continue; 1313 } 1314 1315 if (hal_rx_reo_is_2k_jump(ring_desc)) { 1316 /* TOD0 */ 1317 DP_STATS_INC(soc, 1318 rx.err. 1319 reo_error[HAL_REO_ERR_REGULAR_FRAME_2K_JUMP], 1320 1); 1321 /* increment @pdev level */ 1322 dp_pdev = dp_get_pdev_for_mac_id(soc, mac_id); 1323 if (dp_pdev) 1324 DP_STATS_INC(dp_pdev, err.reo_error, 1); 1325 1326 count = dp_rx_2k_jump_handle(soc, 1327 ring_desc, &mpdu_desc_info, 1328 &mac_id, quota); 1329 1330 rx_bufs_reaped[mac_id] += count; 1331 continue; 1332 } 1333 } 1334 1335 done: 1336 dp_srng_access_end(int_ctx, soc, hal_ring_hdl); 1337 1338 if (soc->rx.flags.defrag_timeout_check) { 1339 uint32_t now_ms = 1340 qdf_system_ticks_to_msecs(qdf_system_ticks()); 1341 1342 if (now_ms >= soc->rx.defrag.next_flush_ms) 1343 dp_rx_defrag_waitlist_flush(soc); 1344 } 1345 1346 for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) { 1347 if (rx_bufs_reaped[mac_id]) { 1348 dp_pdev = soc->pdev_list[mac_id]; 1349 dp_rxdma_srng = &dp_pdev->rx_refill_buf_ring; 1350 rx_desc_pool = &soc->rx_desc_buf[mac_id]; 1351 1352 dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng, 1353 rx_desc_pool, 1354 rx_bufs_reaped[mac_id], 1355 &dp_pdev->free_list_head, 1356 &dp_pdev->free_list_tail); 1357 rx_bufs_used += rx_bufs_reaped[mac_id]; 1358 } 1359 } 1360 1361 return rx_bufs_used; /* Assume no scale factor for now */ 1362 } 1363 1364 uint32_t 1365 dp_rx_wbm_err_process(struct dp_intr *int_ctx, struct dp_soc *soc, 1366 hal_ring_handle_t hal_ring_hdl, uint32_t quota) 1367 { 1368 hal_ring_desc_t ring_desc; 1369 hal_soc_handle_t hal_soc; 1370 struct dp_rx_desc *rx_desc; 1371 union dp_rx_desc_list_elem_t *head[MAX_PDEV_CNT] = { NULL }; 1372 union dp_rx_desc_list_elem_t *tail[MAX_PDEV_CNT] = { NULL }; 1373 uint32_t rx_bufs_used = 0; 1374 uint32_t rx_bufs_reaped[MAX_PDEV_CNT] = { 0 }; 1375 uint8_t buf_type, rbm; 1376 uint32_t rx_buf_cookie; 1377 uint8_t mac_id; 1378 struct dp_pdev *dp_pdev; 1379 struct dp_srng *dp_rxdma_srng; 1380 struct rx_desc_pool *rx_desc_pool; 1381 uint8_t *rx_tlv_hdr; 1382 qdf_nbuf_t nbuf_head = NULL; 1383 qdf_nbuf_t nbuf_tail = NULL; 1384 qdf_nbuf_t nbuf, next; 1385 struct hal_wbm_err_desc_info wbm_err_info = { 0 }; 1386 uint8_t pool_id; 1387 uint8_t tid = 0; 1388 1389 /* Debug -- Remove later */ 1390 qdf_assert(soc && hal_ring_hdl); 1391 1392 hal_soc = soc->hal_soc; 1393 1394 /* Debug -- Remove later */ 1395 qdf_assert(hal_soc); 1396 1397 if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, hal_ring_hdl))) { 1398 1399 /* TODO */ 1400 /* 1401 * Need API to convert from hal_ring pointer to 1402 * Ring Type / Ring Id combo 1403 */ 1404 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1405 FL("HAL RING Access Failed -- %pK"), hal_ring_hdl); 1406 goto done; 1407 } 1408 1409 while (qdf_likely(quota-- && (ring_desc = 1410 hal_srng_dst_get_next(hal_soc, 1411 hal_ring_hdl)))) { 1412 1413 /* XXX */ 1414 buf_type = HAL_RX_WBM_BUF_TYPE_GET(ring_desc); 1415 1416 /* 1417 * For WBM ring, expect only MSDU buffers 1418 */ 1419 qdf_assert_always(buf_type == HAL_RX_WBM_BUF_TYPE_REL_BUF); 1420 1421 qdf_assert((HAL_RX_WBM_ERR_SRC_GET(ring_desc) 1422 == HAL_RX_WBM_ERR_SRC_RXDMA) || 1423 (HAL_RX_WBM_ERR_SRC_GET(ring_desc) 1424 == HAL_RX_WBM_ERR_SRC_REO)); 1425 1426 /* 1427 * Check if the buffer is to be processed on this processor 1428 */ 1429 rbm = hal_rx_ret_buf_manager_get(ring_desc); 1430 1431 if (qdf_unlikely(rbm != HAL_RX_BUF_RBM_SW3_BM)) { 1432 /* TODO */ 1433 /* Call appropriate handler */ 1434 DP_STATS_INC(soc, rx.err.invalid_rbm, 1); 1435 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1436 FL("Invalid RBM %d"), rbm); 1437 continue; 1438 } 1439 1440 rx_buf_cookie = HAL_RX_WBM_BUF_COOKIE_GET(ring_desc); 1441 1442 rx_desc = dp_rx_cookie_2_va_rxdma_buf(soc, rx_buf_cookie); 1443 qdf_assert_always(rx_desc); 1444 1445 if (!dp_rx_desc_check_magic(rx_desc)) { 1446 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1447 FL("Invalid rx_desc cookie=%d"), 1448 rx_buf_cookie); 1449 continue; 1450 } 1451 1452 /* 1453 * this is a unlikely scenario where the host is reaping 1454 * a descriptor which it already reaped just a while ago 1455 * but is yet to replenish it back to HW. 1456 * In this case host will dump the last 128 descriptors 1457 * including the software descriptor rx_desc and assert. 1458 */ 1459 if (qdf_unlikely(!rx_desc->in_use)) { 1460 DP_STATS_INC(soc, rx.err.hal_wbm_rel_dup, 1); 1461 dp_rx_dump_info_and_assert(soc, hal_ring_hdl, 1462 ring_desc, rx_desc); 1463 } 1464 1465 nbuf = rx_desc->nbuf; 1466 qdf_nbuf_unmap_single(soc->osdev, nbuf, QDF_DMA_FROM_DEVICE); 1467 1468 /* 1469 * save the wbm desc info in nbuf TLV. We will need this 1470 * info when we do the actual nbuf processing 1471 */ 1472 hal_rx_wbm_err_info_get(ring_desc, &wbm_err_info, hal_soc); 1473 wbm_err_info.pool_id = rx_desc->pool_id; 1474 hal_rx_wbm_err_info_set_in_tlv(qdf_nbuf_data(nbuf), 1475 &wbm_err_info); 1476 1477 rx_bufs_reaped[rx_desc->pool_id]++; 1478 1479 DP_RX_LIST_APPEND(nbuf_head, nbuf_tail, rx_desc->nbuf); 1480 dp_rx_add_to_free_desc_list(&head[rx_desc->pool_id], 1481 &tail[rx_desc->pool_id], 1482 rx_desc); 1483 } 1484 done: 1485 dp_srng_access_end(int_ctx, soc, hal_ring_hdl); 1486 1487 for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) { 1488 if (rx_bufs_reaped[mac_id]) { 1489 dp_pdev = soc->pdev_list[mac_id]; 1490 dp_rxdma_srng = &dp_pdev->rx_refill_buf_ring; 1491 rx_desc_pool = &soc->rx_desc_buf[mac_id]; 1492 1493 dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng, 1494 rx_desc_pool, rx_bufs_reaped[mac_id], 1495 &head[mac_id], &tail[mac_id]); 1496 rx_bufs_used += rx_bufs_reaped[mac_id]; 1497 } 1498 } 1499 1500 nbuf = nbuf_head; 1501 while (nbuf) { 1502 struct dp_peer *peer; 1503 uint16_t peer_id; 1504 uint8_t e_code; 1505 uint8_t *tlv_hdr; 1506 rx_tlv_hdr = qdf_nbuf_data(nbuf); 1507 1508 /* 1509 * retrieve the wbm desc info from nbuf TLV, so we can 1510 * handle error cases appropriately 1511 */ 1512 hal_rx_wbm_err_info_get_from_tlv(rx_tlv_hdr, &wbm_err_info); 1513 1514 peer_id = hal_rx_mpdu_start_sw_peer_id_get(soc->hal_soc, 1515 rx_tlv_hdr); 1516 peer = dp_peer_find_by_id(soc, peer_id); 1517 1518 if (!peer) 1519 dp_info_rl("peer is null peer_id%u err_src%u err_rsn%u", 1520 peer_id, wbm_err_info.wbm_err_src, 1521 wbm_err_info.reo_psh_rsn); 1522 1523 /* Set queue_mapping in nbuf to 0 */ 1524 dp_set_rx_queue(nbuf, 0); 1525 1526 next = nbuf->next; 1527 1528 if (wbm_err_info.wbm_err_src == HAL_RX_WBM_ERR_SRC_REO) { 1529 if (wbm_err_info.reo_psh_rsn 1530 == HAL_RX_WBM_REO_PSH_RSN_ERROR) { 1531 1532 DP_STATS_INC(soc, 1533 rx.err.reo_error 1534 [wbm_err_info.reo_err_code], 1); 1535 /* increment @pdev level */ 1536 pool_id = wbm_err_info.pool_id; 1537 dp_pdev = dp_get_pdev_for_mac_id(soc, pool_id); 1538 if (dp_pdev) 1539 DP_STATS_INC(dp_pdev, err.reo_error, 1540 1); 1541 1542 switch (wbm_err_info.reo_err_code) { 1543 /* 1544 * Handling for packets which have NULL REO 1545 * queue descriptor 1546 */ 1547 case HAL_REO_ERR_QUEUE_DESC_ADDR_0: 1548 pool_id = wbm_err_info.pool_id; 1549 dp_rx_null_q_desc_handle(soc, nbuf, 1550 rx_tlv_hdr, 1551 pool_id, peer); 1552 nbuf = next; 1553 if (peer) 1554 dp_peer_unref_del_find_by_id( 1555 peer); 1556 continue; 1557 /* TODO */ 1558 /* Add per error code accounting */ 1559 case HAL_REO_ERR_REGULAR_FRAME_2K_JUMP: 1560 pool_id = wbm_err_info.pool_id; 1561 1562 if (hal_rx_msdu_end_first_msdu_get(soc->hal_soc, 1563 rx_tlv_hdr)) { 1564 peer_id = 1565 hal_rx_mpdu_start_sw_peer_id_get(soc->hal_soc, 1566 rx_tlv_hdr); 1567 tid = 1568 hal_rx_mpdu_start_tid_get(hal_soc, rx_tlv_hdr); 1569 } 1570 dp_2k_jump_handle(soc, nbuf, rx_tlv_hdr, 1571 peer_id, tid); 1572 nbuf = next; 1573 if (peer) 1574 dp_peer_unref_del_find_by_id( 1575 peer); 1576 continue; 1577 case HAL_REO_ERR_BAR_FRAME_2K_JUMP: 1578 case HAL_REO_ERR_BAR_FRAME_OOR: 1579 if (peer) 1580 dp_rx_wbm_err_handle_bar(soc, 1581 peer, 1582 nbuf); 1583 break; 1584 1585 default: 1586 dp_info_rl("Got pkt with REO ERROR: %d", 1587 wbm_err_info.reo_err_code); 1588 break; 1589 } 1590 } 1591 } else if (wbm_err_info.wbm_err_src == 1592 HAL_RX_WBM_ERR_SRC_RXDMA) { 1593 if (wbm_err_info.rxdma_psh_rsn 1594 == HAL_RX_WBM_RXDMA_PSH_RSN_ERROR) { 1595 DP_STATS_INC(soc, 1596 rx.err.rxdma_error 1597 [wbm_err_info.rxdma_err_code], 1); 1598 /* increment @pdev level */ 1599 pool_id = wbm_err_info.pool_id; 1600 dp_pdev = dp_get_pdev_for_mac_id(soc, pool_id); 1601 if (dp_pdev) 1602 DP_STATS_INC(dp_pdev, 1603 err.rxdma_error, 1); 1604 1605 switch (wbm_err_info.rxdma_err_code) { 1606 case HAL_RXDMA_ERR_UNENCRYPTED: 1607 1608 case HAL_RXDMA_ERR_WIFI_PARSE: 1609 pool_id = wbm_err_info.pool_id; 1610 dp_rx_process_rxdma_err(soc, nbuf, 1611 rx_tlv_hdr, 1612 peer, 1613 wbm_err_info. 1614 rxdma_err_code, 1615 pool_id); 1616 nbuf = next; 1617 if (peer) 1618 dp_peer_unref_del_find_by_id(peer); 1619 continue; 1620 1621 case HAL_RXDMA_ERR_TKIP_MIC: 1622 dp_rx_process_mic_error(soc, nbuf, 1623 rx_tlv_hdr, 1624 peer); 1625 nbuf = next; 1626 if (peer) { 1627 DP_STATS_INC(peer, rx.err.mic_err, 1); 1628 dp_peer_unref_del_find_by_id( 1629 peer); 1630 } 1631 continue; 1632 1633 case HAL_RXDMA_ERR_DECRYPT: 1634 pool_id = wbm_err_info.pool_id; 1635 e_code = wbm_err_info.rxdma_err_code; 1636 tlv_hdr = rx_tlv_hdr; 1637 if (peer) { 1638 DP_STATS_INC(peer, rx.err. 1639 decrypt_err, 1); 1640 } else { 1641 dp_rx_process_rxdma_err(soc, 1642 nbuf, 1643 tlv_hdr, 1644 NULL, 1645 e_code, 1646 pool_id 1647 ); 1648 nbuf = next; 1649 continue; 1650 } 1651 1652 QDF_TRACE(QDF_MODULE_ID_DP, 1653 QDF_TRACE_LEVEL_DEBUG, 1654 "Packet received with Decrypt error"); 1655 break; 1656 1657 default: 1658 dp_err_rl("RXDMA error %d", 1659 wbm_err_info.rxdma_err_code); 1660 } 1661 } 1662 } else { 1663 /* Should not come here */ 1664 qdf_assert(0); 1665 } 1666 1667 if (peer) 1668 dp_peer_unref_del_find_by_id(peer); 1669 1670 hal_rx_dump_pkt_tlvs(hal_soc, rx_tlv_hdr, 1671 QDF_TRACE_LEVEL_DEBUG); 1672 qdf_nbuf_free(nbuf); 1673 nbuf = next; 1674 } 1675 return rx_bufs_used; /* Assume no scale factor for now */ 1676 } 1677 1678 /** 1679 * dup_desc_dbg() - dump and assert if duplicate rx desc found 1680 * 1681 * @soc: core DP main context 1682 * @rxdma_dst_ring_desc: void pointer to monitor link descriptor buf addr info 1683 * @rx_desc: void pointer to rx descriptor 1684 * 1685 * Return: void 1686 */ 1687 static void dup_desc_dbg(struct dp_soc *soc, 1688 hal_rxdma_desc_t rxdma_dst_ring_desc, 1689 void *rx_desc) 1690 { 1691 DP_STATS_INC(soc, rx.err.hal_rxdma_err_dup, 1); 1692 dp_rx_dump_info_and_assert( 1693 soc, 1694 soc->rx_rel_ring.hal_srng, 1695 hal_rxdma_desc_to_hal_ring_desc(rxdma_dst_ring_desc), 1696 rx_desc); 1697 } 1698 1699 /** 1700 * dp_rx_err_mpdu_pop() - extract the MSDU's from link descs 1701 * 1702 * @soc: core DP main context 1703 * @mac_id: mac id which is one of 3 mac_ids 1704 * @rxdma_dst_ring_desc: void pointer to monitor link descriptor buf addr info 1705 * @head: head of descs list to be freed 1706 * @tail: tail of decs list to be freed 1707 1708 * Return: number of msdu in MPDU to be popped 1709 */ 1710 static inline uint32_t 1711 dp_rx_err_mpdu_pop(struct dp_soc *soc, uint32_t mac_id, 1712 hal_rxdma_desc_t rxdma_dst_ring_desc, 1713 union dp_rx_desc_list_elem_t **head, 1714 union dp_rx_desc_list_elem_t **tail) 1715 { 1716 void *rx_msdu_link_desc; 1717 qdf_nbuf_t msdu; 1718 qdf_nbuf_t last; 1719 struct hal_rx_msdu_list msdu_list; 1720 uint16_t num_msdus; 1721 struct hal_buf_info buf_info; 1722 uint32_t rx_bufs_used = 0; 1723 uint32_t msdu_cnt; 1724 uint32_t i; 1725 uint8_t push_reason; 1726 uint8_t rxdma_error_code = 0; 1727 uint8_t bm_action = HAL_BM_ACTION_PUT_IN_IDLE_LIST; 1728 struct dp_pdev *pdev = dp_get_pdev_for_mac_id(soc, mac_id); 1729 uint32_t rx_link_buf_info[HAL_RX_BUFFINFO_NUM_DWORDS]; 1730 hal_rxdma_desc_t ring_desc; 1731 1732 msdu = 0; 1733 1734 last = NULL; 1735 1736 hal_rx_reo_ent_buf_paddr_get(rxdma_dst_ring_desc, &buf_info, 1737 &msdu_cnt); 1738 1739 push_reason = 1740 hal_rx_reo_ent_rxdma_push_reason_get(rxdma_dst_ring_desc); 1741 if (push_reason == HAL_RX_WBM_RXDMA_PSH_RSN_ERROR) { 1742 rxdma_error_code = 1743 hal_rx_reo_ent_rxdma_error_code_get(rxdma_dst_ring_desc); 1744 } 1745 1746 do { 1747 rx_msdu_link_desc = 1748 dp_rx_cookie_2_link_desc_va(soc, &buf_info); 1749 1750 qdf_assert(rx_msdu_link_desc); 1751 1752 hal_rx_msdu_list_get(soc->hal_soc, rx_msdu_link_desc, 1753 &msdu_list, &num_msdus); 1754 1755 if (msdu_list.sw_cookie[0] != HAL_RX_COOKIE_SPECIAL) { 1756 /* if the msdus belongs to NSS offloaded radio && 1757 * the rbm is not SW1_BM then return the msdu_link 1758 * descriptor without freeing the msdus (nbufs). let 1759 * these buffers be given to NSS completion ring for 1760 * NSS to free them. 1761 * else iterate through the msdu link desc list and 1762 * free each msdu in the list. 1763 */ 1764 if (msdu_list.rbm[0] != HAL_RX_BUF_RBM_SW3_BM && 1765 wlan_cfg_get_dp_pdev_nss_enabled( 1766 pdev->wlan_cfg_ctx)) 1767 bm_action = HAL_BM_ACTION_RELEASE_MSDU_LIST; 1768 else { 1769 for (i = 0; i < num_msdus; i++) { 1770 struct dp_rx_desc *rx_desc = 1771 dp_rx_cookie_2_va_rxdma_buf(soc, 1772 msdu_list.sw_cookie[i]); 1773 qdf_assert_always(rx_desc); 1774 msdu = rx_desc->nbuf; 1775 /* 1776 * this is a unlikely scenario 1777 * where the host is reaping 1778 * a descriptor which 1779 * it already reaped just a while ago 1780 * but is yet to replenish 1781 * it back to HW. 1782 * In this case host will dump 1783 * the last 128 descriptors 1784 * including the software descriptor 1785 * rx_desc and assert. 1786 */ 1787 ring_desc = rxdma_dst_ring_desc; 1788 if (qdf_unlikely(!rx_desc->in_use)) { 1789 dup_desc_dbg(soc, 1790 ring_desc, 1791 rx_desc); 1792 continue; 1793 } 1794 1795 qdf_nbuf_unmap_single(soc->osdev, msdu, 1796 QDF_DMA_FROM_DEVICE); 1797 1798 QDF_TRACE(QDF_MODULE_ID_DP, 1799 QDF_TRACE_LEVEL_DEBUG, 1800 "[%s][%d] msdu_nbuf=%pK ", 1801 __func__, __LINE__, msdu); 1802 1803 qdf_nbuf_free(msdu); 1804 rx_bufs_used++; 1805 dp_rx_add_to_free_desc_list(head, 1806 tail, rx_desc); 1807 } 1808 } 1809 } else { 1810 rxdma_error_code = HAL_RXDMA_ERR_WAR; 1811 } 1812 1813 /* 1814 * Store the current link buffer into to the local structure 1815 * to be used for release purpose. 1816 */ 1817 hal_rxdma_buff_addr_info_set(rx_link_buf_info, buf_info.paddr, 1818 buf_info.sw_cookie, buf_info.rbm); 1819 1820 hal_rx_mon_next_link_desc_get(rx_msdu_link_desc, &buf_info); 1821 dp_rx_link_desc_return_by_addr(soc, 1822 (hal_buff_addrinfo_t) 1823 rx_link_buf_info, 1824 bm_action); 1825 } while (buf_info.paddr); 1826 1827 DP_STATS_INC(soc, rx.err.rxdma_error[rxdma_error_code], 1); 1828 if (pdev) 1829 DP_STATS_INC(pdev, err.rxdma_error, 1); 1830 1831 if (rxdma_error_code == HAL_RXDMA_ERR_DECRYPT) { 1832 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1833 "Packet received with Decrypt error"); 1834 } 1835 1836 return rx_bufs_used; 1837 } 1838 1839 uint32_t 1840 dp_rxdma_err_process(struct dp_intr *int_ctx, struct dp_soc *soc, 1841 uint32_t mac_id, uint32_t quota) 1842 { 1843 struct dp_pdev *pdev = dp_get_pdev_for_mac_id(soc, mac_id); 1844 int mac_for_pdev = dp_get_mac_id_for_mac(soc, mac_id); 1845 hal_rxdma_desc_t rxdma_dst_ring_desc; 1846 hal_soc_handle_t hal_soc; 1847 void *err_dst_srng; 1848 union dp_rx_desc_list_elem_t *head = NULL; 1849 union dp_rx_desc_list_elem_t *tail = NULL; 1850 struct dp_srng *dp_rxdma_srng; 1851 struct rx_desc_pool *rx_desc_pool; 1852 uint32_t work_done = 0; 1853 uint32_t rx_bufs_used = 0; 1854 1855 if (!pdev) 1856 return 0; 1857 1858 err_dst_srng = pdev->rxdma_err_dst_ring[mac_for_pdev].hal_srng; 1859 1860 if (!err_dst_srng) { 1861 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1862 "%s %d : HAL Monitor Destination Ring Init \ 1863 Failed -- %pK", 1864 __func__, __LINE__, err_dst_srng); 1865 return 0; 1866 } 1867 1868 hal_soc = soc->hal_soc; 1869 1870 qdf_assert(hal_soc); 1871 1872 if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, err_dst_srng))) { 1873 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1874 "%s %d : HAL Monitor Destination Ring Init \ 1875 Failed -- %pK", 1876 __func__, __LINE__, err_dst_srng); 1877 return 0; 1878 } 1879 1880 while (qdf_likely(quota-- && (rxdma_dst_ring_desc = 1881 hal_srng_dst_get_next(hal_soc, err_dst_srng)))) { 1882 1883 rx_bufs_used += dp_rx_err_mpdu_pop(soc, mac_id, 1884 rxdma_dst_ring_desc, 1885 &head, &tail); 1886 } 1887 1888 dp_srng_access_end(int_ctx, soc, err_dst_srng); 1889 1890 if (rx_bufs_used) { 1891 dp_rxdma_srng = &pdev->rx_refill_buf_ring; 1892 rx_desc_pool = &soc->rx_desc_buf[mac_id]; 1893 1894 dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng, 1895 rx_desc_pool, rx_bufs_used, &head, &tail); 1896 1897 work_done += rx_bufs_used; 1898 } 1899 1900 return work_done; 1901 } 1902 1903 static inline uint32_t 1904 dp_wbm_int_err_mpdu_pop(struct dp_soc *soc, uint32_t mac_id, 1905 hal_rxdma_desc_t rxdma_dst_ring_desc, 1906 union dp_rx_desc_list_elem_t **head, 1907 union dp_rx_desc_list_elem_t **tail) 1908 { 1909 void *rx_msdu_link_desc; 1910 qdf_nbuf_t msdu; 1911 qdf_nbuf_t last; 1912 struct hal_rx_msdu_list msdu_list; 1913 uint16_t num_msdus; 1914 struct hal_buf_info buf_info; 1915 uint32_t rx_bufs_used = 0, msdu_cnt, i; 1916 uint32_t rx_link_buf_info[HAL_RX_BUFFINFO_NUM_DWORDS]; 1917 1918 msdu = 0; 1919 1920 last = NULL; 1921 1922 hal_rx_reo_ent_buf_paddr_get(rxdma_dst_ring_desc, &buf_info, 1923 &msdu_cnt); 1924 1925 do { 1926 rx_msdu_link_desc = 1927 dp_rx_cookie_2_link_desc_va(soc, &buf_info); 1928 1929 if (!rx_msdu_link_desc) { 1930 DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_REO_NULL_LINK_DESC], 1); 1931 break; 1932 } 1933 1934 hal_rx_msdu_list_get(soc->hal_soc, rx_msdu_link_desc, 1935 &msdu_list, &num_msdus); 1936 1937 if (msdu_list.sw_cookie[0] != HAL_RX_COOKIE_SPECIAL) { 1938 for (i = 0; i < num_msdus; i++) { 1939 struct dp_rx_desc *rx_desc = 1940 dp_rx_cookie_2_va_rxdma_buf( 1941 soc, 1942 msdu_list.sw_cookie[i]); 1943 qdf_assert_always(rx_desc); 1944 msdu = rx_desc->nbuf; 1945 1946 qdf_nbuf_unmap_single(soc->osdev, msdu, 1947 QDF_DMA_FROM_DEVICE); 1948 1949 qdf_nbuf_free(msdu); 1950 rx_bufs_used++; 1951 dp_rx_add_to_free_desc_list(head, 1952 tail, rx_desc); 1953 } 1954 } 1955 1956 /* 1957 * Store the current link buffer into to the local structure 1958 * to be used for release purpose. 1959 */ 1960 hal_rxdma_buff_addr_info_set(rx_link_buf_info, buf_info.paddr, 1961 buf_info.sw_cookie, buf_info.rbm); 1962 1963 hal_rx_mon_next_link_desc_get(rx_msdu_link_desc, &buf_info); 1964 dp_rx_link_desc_return_by_addr(soc, (hal_buff_addrinfo_t) 1965 rx_link_buf_info, 1966 HAL_BM_ACTION_PUT_IN_IDLE_LIST); 1967 } while (buf_info.paddr); 1968 1969 return rx_bufs_used; 1970 } 1971 1972 /* 1973 * 1974 * dp_handle_wbm_internal_error() - handles wbm_internal_error case 1975 * 1976 * @soc: core DP main context 1977 * @hal_desc: hal descriptor 1978 * @buf_type: indicates if the buffer is of type link disc or msdu 1979 * Return: None 1980 * 1981 * wbm_internal_error is seen in following scenarios : 1982 * 1983 * 1. Null pointers detected in WBM_RELEASE_RING descriptors 1984 * 2. Null pointers detected during delinking process 1985 * 1986 * Some null pointer cases: 1987 * 1988 * a. MSDU buffer pointer is NULL 1989 * b. Next_MSDU_Link_Desc pointer is NULL, with no last msdu flag 1990 * c. MSDU buffer pointer is NULL or Next_Link_Desc pointer is NULL 1991 */ 1992 void 1993 dp_handle_wbm_internal_error(struct dp_soc *soc, void *hal_desc, 1994 uint32_t buf_type) 1995 { 1996 struct hal_buf_info buf_info = {0}; 1997 struct dp_pdev *dp_pdev; 1998 struct dp_rx_desc *rx_desc = NULL; 1999 uint32_t rx_buf_cookie; 2000 uint32_t rx_bufs_reaped = 0; 2001 union dp_rx_desc_list_elem_t *head = NULL; 2002 union dp_rx_desc_list_elem_t *tail = NULL; 2003 uint8_t pool_id; 2004 2005 hal_rx_reo_buf_paddr_get(hal_desc, &buf_info); 2006 2007 if (!buf_info.paddr) { 2008 DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_REO_NULL_BUFFER], 1); 2009 return; 2010 } 2011 2012 rx_buf_cookie = HAL_RX_REO_BUF_COOKIE_GET(hal_desc); 2013 pool_id = DP_RX_DESC_COOKIE_POOL_ID_GET(rx_buf_cookie); 2014 2015 if (buf_type == HAL_WBM_RELEASE_RING_2_BUFFER_TYPE) { 2016 DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_REO_NULL_MSDU_BUFF], 1); 2017 rx_desc = dp_rx_cookie_2_va_rxdma_buf(soc, rx_buf_cookie); 2018 2019 if (rx_desc && rx_desc->nbuf) { 2020 qdf_nbuf_unmap_single(soc->osdev, rx_desc->nbuf, 2021 QDF_DMA_FROM_DEVICE); 2022 2023 rx_desc->unmapped = 1; 2024 2025 qdf_nbuf_free(rx_desc->nbuf); 2026 dp_rx_add_to_free_desc_list(&head, 2027 &tail, 2028 rx_desc); 2029 2030 rx_bufs_reaped++; 2031 } 2032 } else if (buf_type == HAL_WBM_RELEASE_RING_2_DESC_TYPE) { 2033 rx_bufs_reaped = dp_wbm_int_err_mpdu_pop(soc, pool_id, 2034 hal_desc, 2035 &head, &tail); 2036 } 2037 2038 if (rx_bufs_reaped) { 2039 struct rx_desc_pool *rx_desc_pool; 2040 struct dp_srng *dp_rxdma_srng; 2041 2042 DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_REO_BUFF_REAPED], 1); 2043 dp_pdev = soc->pdev_list[pool_id]; 2044 dp_rxdma_srng = &dp_pdev->rx_refill_buf_ring; 2045 rx_desc_pool = &soc->rx_desc_buf[pool_id]; 2046 2047 dp_rx_buffers_replenish(soc, pool_id, dp_rxdma_srng, 2048 rx_desc_pool, 2049 rx_bufs_reaped, 2050 &head, &tail); 2051 } 2052 } 2053