1 /* 2 * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for 5 * any purpose with or without fee is hereby granted, provided that the 6 * above copyright notice and this permission notice appear in all 7 * copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 16 * PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 #include "hal_hw_headers.h" 20 #include "dp_types.h" 21 #include "dp_rx.h" 22 #include "dp_peer.h" 23 #include "dp_internal.h" 24 #include "hal_api.h" 25 #include "qdf_trace.h" 26 #include "qdf_nbuf.h" 27 #include "dp_rx_defrag.h" 28 #ifdef FEATURE_WDS 29 #include "dp_txrx_wds.h" 30 #endif 31 #include <enet.h> /* LLC_SNAP_HDR_LEN */ 32 #include "qdf_net_types.h" 33 34 /* Max buffer in invalid peer SG list*/ 35 #define DP_MAX_INVALID_BUFFERS 10 36 37 /** 38 * dp_rx_mcast_echo_check() - check if the mcast pkt is a loop 39 * back on same vap or a different vap. 40 * 41 * @soc: core DP main context 42 * @peer: dp peer handler 43 * @rx_tlv_hdr: start of the rx TLV header 44 * @nbuf: pkt buffer 45 * 46 * Return: bool (true if it is a looped back pkt else false) 47 * 48 */ 49 static inline bool dp_rx_mcast_echo_check(struct dp_soc *soc, 50 struct dp_peer *peer, 51 uint8_t *rx_tlv_hdr, 52 qdf_nbuf_t nbuf) 53 { 54 struct dp_vdev *vdev = peer->vdev; 55 struct dp_ast_entry *ase = NULL; 56 uint16_t sa_idx = 0; 57 uint8_t *data; 58 59 /* 60 * Multicast Echo Check is required only if vdev is STA and 61 * received pkt is a multicast/broadcast pkt. otherwise 62 * skip the MEC check. 63 */ 64 if (vdev->opmode != wlan_op_mode_sta) 65 return false; 66 67 if (!hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc, rx_tlv_hdr)) 68 return false; 69 70 data = qdf_nbuf_data(nbuf); 71 /* 72 * if the received pkts src mac addr matches with vdev 73 * mac address then drop the pkt as it is looped back 74 */ 75 if (!(qdf_mem_cmp(&data[QDF_MAC_ADDR_SIZE], 76 vdev->mac_addr.raw, 77 QDF_MAC_ADDR_SIZE))) 78 return true; 79 80 /* 81 * In case of qwrap isolation mode, donot drop loopback packets. 82 * In isolation mode, all packets from the wired stations need to go 83 * to rootap and loop back to reach the wireless stations and 84 * vice-versa. 85 */ 86 if (qdf_unlikely(vdev->isolation_vdev)) 87 return false; 88 89 /* if the received pkts src mac addr matches with the 90 * wired PCs MAC addr which is behind the STA or with 91 * wireless STAs MAC addr which are behind the Repeater, 92 * then drop the pkt as it is looped back 93 */ 94 qdf_spin_lock_bh(&soc->ast_lock); 95 if (hal_rx_msdu_end_sa_is_valid_get(soc->hal_soc, rx_tlv_hdr)) { 96 sa_idx = hal_rx_msdu_end_sa_idx_get(soc->hal_soc, rx_tlv_hdr); 97 98 if ((sa_idx < 0) || 99 (sa_idx >= wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx))) { 100 qdf_spin_unlock_bh(&soc->ast_lock); 101 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 102 "invalid sa_idx: %d", sa_idx); 103 qdf_assert_always(0); 104 } 105 106 ase = soc->ast_table[sa_idx]; 107 if (!ase) { 108 /* We do not get a peer map event for STA and without 109 * this event we don't know what is STA's sa_idx. 110 * For this reason the AST is still not associated to 111 * any index postion in ast_table. 112 * In these kind of scenarios where sa is valid but 113 * ast is not in ast_table, we use the below API to get 114 * AST entry for STA's own mac_address. 115 */ 116 ase = dp_peer_ast_list_find(soc, peer, 117 &data[QDF_MAC_ADDR_SIZE]); 118 if (ase) { 119 ase->ast_idx = sa_idx; 120 soc->ast_table[sa_idx] = ase; 121 ase->is_mapped = TRUE; 122 } 123 } 124 } else { 125 ase = dp_peer_ast_hash_find_by_pdevid(soc, 126 &data[QDF_MAC_ADDR_SIZE], 127 vdev->pdev->pdev_id); 128 } 129 130 if (ase) { 131 132 if (ase->pdev_id != vdev->pdev->pdev_id) { 133 qdf_spin_unlock_bh(&soc->ast_lock); 134 QDF_TRACE(QDF_MODULE_ID_DP, 135 QDF_TRACE_LEVEL_INFO, 136 "Detected DBDC Root AP %pM, %d %d", 137 &data[QDF_MAC_ADDR_SIZE], vdev->pdev->pdev_id, 138 ase->pdev_id); 139 return false; 140 } 141 142 if ((ase->type == CDP_TXRX_AST_TYPE_MEC) || 143 (ase->peer != peer)) { 144 qdf_spin_unlock_bh(&soc->ast_lock); 145 QDF_TRACE(QDF_MODULE_ID_DP, 146 QDF_TRACE_LEVEL_INFO, 147 "received pkt with same src mac %pM", 148 &data[QDF_MAC_ADDR_SIZE]); 149 150 return true; 151 } 152 } 153 qdf_spin_unlock_bh(&soc->ast_lock); 154 return false; 155 } 156 157 /** 158 * dp_rx_link_desc_return_by_addr - Return a MPDU link descriptor to 159 * (WBM) by address 160 * 161 * @soc: core DP main context 162 * @link_desc_addr: link descriptor addr 163 * 164 * Return: QDF_STATUS 165 */ 166 QDF_STATUS 167 dp_rx_link_desc_return_by_addr(struct dp_soc *soc, 168 hal_buff_addrinfo_t link_desc_addr, 169 uint8_t bm_action) 170 { 171 struct dp_srng *wbm_desc_rel_ring = &soc->wbm_desc_rel_ring; 172 hal_ring_handle_t wbm_rel_srng = wbm_desc_rel_ring->hal_srng; 173 hal_soc_handle_t hal_soc = soc->hal_soc; 174 QDF_STATUS status = QDF_STATUS_E_FAILURE; 175 void *src_srng_desc; 176 177 if (!wbm_rel_srng) { 178 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 179 "WBM RELEASE RING not initialized"); 180 return status; 181 } 182 183 if (qdf_unlikely(hal_srng_access_start(hal_soc, wbm_rel_srng))) { 184 185 /* TODO */ 186 /* 187 * Need API to convert from hal_ring pointer to 188 * Ring Type / Ring Id combo 189 */ 190 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 191 FL("HAL RING Access For WBM Release SRNG Failed - %pK"), 192 wbm_rel_srng); 193 DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1); 194 goto done; 195 } 196 src_srng_desc = hal_srng_src_get_next(hal_soc, wbm_rel_srng); 197 if (qdf_likely(src_srng_desc)) { 198 /* Return link descriptor through WBM ring (SW2WBM)*/ 199 hal_rx_msdu_link_desc_set(hal_soc, 200 src_srng_desc, link_desc_addr, bm_action); 201 status = QDF_STATUS_SUCCESS; 202 } else { 203 struct hal_srng *srng = (struct hal_srng *)wbm_rel_srng; 204 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 205 FL("WBM Release Ring (Id %d) Full"), srng->ring_id); 206 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 207 "HP 0x%x Reap HP 0x%x TP 0x%x Cached TP 0x%x", 208 *srng->u.src_ring.hp_addr, srng->u.src_ring.reap_hp, 209 *srng->u.src_ring.tp_addr, srng->u.src_ring.cached_tp); 210 } 211 done: 212 hal_srng_access_end(hal_soc, wbm_rel_srng); 213 return status; 214 215 } 216 217 /** 218 * dp_rx_link_desc_return() - Return a MPDU link descriptor to HW 219 * (WBM), following error handling 220 * 221 * @soc: core DP main context 222 * @ring_desc: opaque pointer to the REO error ring descriptor 223 * 224 * Return: QDF_STATUS 225 */ 226 QDF_STATUS 227 dp_rx_link_desc_return(struct dp_soc *soc, hal_ring_desc_t ring_desc, 228 uint8_t bm_action) 229 { 230 void *buf_addr_info = HAL_RX_REO_BUF_ADDR_INFO_GET(ring_desc); 231 232 return dp_rx_link_desc_return_by_addr(soc, buf_addr_info, bm_action); 233 } 234 235 /** 236 * dp_rx_msdus_drop() - Drops all MSDU's per MPDU 237 * 238 * @soc: core txrx main context 239 * @ring_desc: opaque pointer to the REO error ring descriptor 240 * @mpdu_desc_info: MPDU descriptor information from ring descriptor 241 * @head: head of the local descriptor free-list 242 * @tail: tail of the local descriptor free-list 243 * @quota: No. of units (packets) that can be serviced in one shot. 244 * 245 * This function is used to drop all MSDU in an MPDU 246 * 247 * Return: uint32_t: No. of elements processed 248 */ 249 static uint32_t 250 dp_rx_msdus_drop(struct dp_soc *soc, hal_ring_desc_t ring_desc, 251 struct hal_rx_mpdu_desc_info *mpdu_desc_info, 252 uint8_t *mac_id, 253 uint32_t quota) 254 { 255 uint32_t rx_bufs_used = 0; 256 void *link_desc_va; 257 struct hal_buf_info buf_info; 258 struct dp_pdev *pdev; 259 struct hal_rx_msdu_list msdu_list; /* MSDU's per MPDU */ 260 int i; 261 uint8_t *rx_tlv_hdr; 262 uint32_t tid; 263 264 hal_rx_reo_buf_paddr_get(ring_desc, &buf_info); 265 266 link_desc_va = dp_rx_cookie_2_link_desc_va(soc, &buf_info); 267 268 /* No UNMAP required -- this is "malloc_consistent" memory */ 269 hal_rx_msdu_list_get(soc->hal_soc, link_desc_va, &msdu_list, 270 &mpdu_desc_info->msdu_count); 271 272 for (i = 0; (i < mpdu_desc_info->msdu_count) && quota--; i++) { 273 struct dp_rx_desc *rx_desc = 274 dp_rx_cookie_2_va_rxdma_buf(soc, 275 msdu_list.sw_cookie[i]); 276 277 qdf_assert_always(rx_desc); 278 279 /* all buffers from a MSDU link link belong to same pdev */ 280 *mac_id = rx_desc->pool_id; 281 pdev = dp_get_pdev_for_lmac_id(soc, rx_desc->pool_id); 282 283 if (!dp_rx_desc_check_magic(rx_desc)) { 284 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 285 FL("Invalid rx_desc cookie=%d"), 286 msdu_list.sw_cookie[i]); 287 return rx_bufs_used; 288 } 289 290 qdf_nbuf_unmap_single(soc->osdev, 291 rx_desc->nbuf, QDF_DMA_FROM_DEVICE); 292 293 rx_desc->rx_buf_start = qdf_nbuf_data(rx_desc->nbuf); 294 295 rx_bufs_used++; 296 tid = hal_rx_mpdu_start_tid_get(soc->hal_soc, 297 rx_desc->rx_buf_start); 298 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 299 "Packet received with PN error for tid :%d", tid); 300 301 rx_tlv_hdr = qdf_nbuf_data(rx_desc->nbuf); 302 if (hal_rx_encryption_info_valid(soc->hal_soc, rx_tlv_hdr)) 303 hal_rx_print_pn(soc->hal_soc, rx_tlv_hdr); 304 305 /* Just free the buffers */ 306 qdf_nbuf_free(rx_desc->nbuf); 307 308 dp_rx_add_to_free_desc_list(&pdev->free_list_head, 309 &pdev->free_list_tail, rx_desc); 310 } 311 312 /* Return link descriptor through WBM ring (SW2WBM)*/ 313 dp_rx_link_desc_return(soc, ring_desc, HAL_BM_ACTION_PUT_IN_IDLE_LIST); 314 315 return rx_bufs_used; 316 } 317 318 /** 319 * dp_rx_pn_error_handle() - Handles PN check errors 320 * 321 * @soc: core txrx main context 322 * @ring_desc: opaque pointer to the REO error ring descriptor 323 * @mpdu_desc_info: MPDU descriptor information from ring descriptor 324 * @head: head of the local descriptor free-list 325 * @tail: tail of the local descriptor free-list 326 * @quota: No. of units (packets) that can be serviced in one shot. 327 * 328 * This function implements PN error handling 329 * If the peer is configured to ignore the PN check errors 330 * or if DP feels, that this frame is still OK, the frame can be 331 * re-injected back to REO to use some of the other features 332 * of REO e.g. duplicate detection/routing to other cores 333 * 334 * Return: uint32_t: No. of elements processed 335 */ 336 static uint32_t 337 dp_rx_pn_error_handle(struct dp_soc *soc, hal_ring_desc_t ring_desc, 338 struct hal_rx_mpdu_desc_info *mpdu_desc_info, 339 uint8_t *mac_id, 340 uint32_t quota) 341 { 342 uint16_t peer_id; 343 uint32_t rx_bufs_used = 0; 344 struct dp_peer *peer; 345 bool peer_pn_policy = false; 346 347 peer_id = DP_PEER_METADATA_PEER_ID_GET( 348 mpdu_desc_info->peer_meta_data); 349 350 351 peer = dp_peer_find_by_id(soc, peer_id); 352 353 if (qdf_likely(peer)) { 354 /* 355 * TODO: Check for peer specific policies & set peer_pn_policy 356 */ 357 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 358 "discard rx due to PN error for peer %pK %pM", 359 peer, peer->mac_addr.raw); 360 361 dp_peer_unref_del_find_by_id(peer); 362 } 363 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 364 "Packet received with PN error"); 365 366 /* No peer PN policy -- definitely drop */ 367 if (!peer_pn_policy) 368 rx_bufs_used = dp_rx_msdus_drop(soc, ring_desc, 369 mpdu_desc_info, 370 mac_id, quota); 371 372 return rx_bufs_used; 373 } 374 375 /** 376 * dp_rx_2k_jump_handle() - Handles Sequence Number Jump by 2K 377 * 378 * @soc: core txrx main context 379 * @ring_desc: opaque pointer to the REO error ring descriptor 380 * @mpdu_desc_info: MPDU descriptor information from ring descriptor 381 * @head: head of the local descriptor free-list 382 * @tail: tail of the local descriptor free-list 383 * @quota: No. of units (packets) that can be serviced in one shot. 384 * 385 * This function implements the error handling when sequence number 386 * of the MPDU jumps suddenly by 2K.Today there are 2 cases that 387 * need to be handled: 388 * A) CSN (Current Sequence Number) = Last Valid SN (LSN) + 2K 389 * B) CSN = LSN + 2K, but falls within a "BA sized window" of the SSN 390 * For case A) the protocol stack is invoked to generate DELBA/DEAUTH frame 391 * For case B), the frame is normally dropped, no more action is taken 392 * 393 * Return: uint32_t: No. of elements processed 394 */ 395 static uint32_t 396 dp_rx_2k_jump_handle(struct dp_soc *soc, hal_ring_desc_t ring_desc, 397 struct hal_rx_mpdu_desc_info *mpdu_desc_info, 398 uint8_t *mac_id, uint32_t quota) 399 { 400 return dp_rx_msdus_drop(soc, ring_desc, mpdu_desc_info, 401 mac_id, quota); 402 } 403 404 #ifdef DP_INVALID_PEER_ASSERT 405 #define DP_PDEV_INVALID_PEER_MSDU_CHECK(head, tail) \ 406 do { \ 407 qdf_assert_always(!(head)); \ 408 qdf_assert_always(!(tail)); \ 409 } while (0) 410 #else 411 #define DP_PDEV_INVALID_PEER_MSDU_CHECK(head, tail) /* no op */ 412 #endif 413 414 /** 415 * dp_rx_chain_msdus() - Function to chain all msdus of a mpdu 416 * to pdev invalid peer list 417 * 418 * @soc: core DP main context 419 * @nbuf: Buffer pointer 420 * @rx_tlv_hdr: start of rx tlv header 421 * @mac_id: mac id 422 * 423 * Return: bool: true for last msdu of mpdu 424 */ 425 static bool 426 dp_rx_chain_msdus(struct dp_soc *soc, qdf_nbuf_t nbuf, 427 uint8_t *rx_tlv_hdr, uint8_t mac_id) 428 { 429 bool mpdu_done = false; 430 qdf_nbuf_t curr_nbuf = NULL; 431 qdf_nbuf_t tmp_nbuf = NULL; 432 433 /* TODO: Currently only single radio is supported, hence 434 * pdev hard coded to '0' index 435 */ 436 struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id); 437 438 /* if invalid peer SG list has max values free the buffers in list 439 * and treat current buffer as start of list 440 * 441 * current logic to detect the last buffer from attn_tlv is not reliable 442 * in OFDMA UL scenario hence add max buffers check to avoid list pile 443 * up 444 */ 445 if (!dp_pdev->first_nbuf || 446 (dp_pdev->invalid_peer_head_msdu && 447 QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST 448 (dp_pdev->invalid_peer_head_msdu) >= DP_MAX_INVALID_BUFFERS)) { 449 qdf_nbuf_set_rx_chfrag_start(nbuf, 1); 450 dp_pdev->ppdu_id = hal_rx_hw_desc_get_ppduid_get(soc->hal_soc, 451 rx_tlv_hdr); 452 dp_pdev->first_nbuf = true; 453 454 /* If the new nbuf received is the first msdu of the 455 * amsdu and there are msdus in the invalid peer msdu 456 * list, then let us free all the msdus of the invalid 457 * peer msdu list. 458 * This scenario can happen when we start receiving 459 * new a-msdu even before the previous a-msdu is completely 460 * received. 461 */ 462 curr_nbuf = dp_pdev->invalid_peer_head_msdu; 463 while (curr_nbuf) { 464 tmp_nbuf = curr_nbuf->next; 465 qdf_nbuf_free(curr_nbuf); 466 curr_nbuf = tmp_nbuf; 467 } 468 469 dp_pdev->invalid_peer_head_msdu = NULL; 470 dp_pdev->invalid_peer_tail_msdu = NULL; 471 hal_rx_mon_hw_desc_get_mpdu_status(soc->hal_soc, rx_tlv_hdr, 472 &(dp_pdev->ppdu_info.rx_status)); 473 474 } 475 476 if (dp_pdev->ppdu_id == hal_rx_attn_phy_ppdu_id_get(rx_tlv_hdr) && 477 hal_rx_attn_msdu_done_get(rx_tlv_hdr)) { 478 qdf_nbuf_set_rx_chfrag_end(nbuf, 1); 479 qdf_assert_always(dp_pdev->first_nbuf == true); 480 dp_pdev->first_nbuf = false; 481 mpdu_done = true; 482 } 483 484 /* 485 * For MCL, invalid_peer_head_msdu and invalid_peer_tail_msdu 486 * should be NULL here, add the checking for debugging purpose 487 * in case some corner case. 488 */ 489 DP_PDEV_INVALID_PEER_MSDU_CHECK(dp_pdev->invalid_peer_head_msdu, 490 dp_pdev->invalid_peer_tail_msdu); 491 DP_RX_LIST_APPEND(dp_pdev->invalid_peer_head_msdu, 492 dp_pdev->invalid_peer_tail_msdu, 493 nbuf); 494 495 return mpdu_done; 496 } 497 498 static 499 void dp_rx_wbm_err_handle_bar(struct dp_soc *soc, 500 struct dp_peer *peer, 501 qdf_nbuf_t nbuf) 502 { 503 uint8_t *rx_tlv_hdr; 504 unsigned char type, subtype; 505 uint16_t start_seq_num; 506 uint32_t tid; 507 struct ieee80211_frame_bar *bar; 508 509 /* 510 * 1. Is this a BAR frame. If not Discard it. 511 * 2. If it is, get the peer id, tid, ssn 512 * 2a Do a tid update 513 */ 514 515 rx_tlv_hdr = qdf_nbuf_data(nbuf); 516 bar = (struct ieee80211_frame_bar *)(rx_tlv_hdr + SIZE_OF_DATA_RX_TLV); 517 518 type = bar->i_fc[0] & IEEE80211_FC0_TYPE_MASK; 519 subtype = bar->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 520 521 if (!(type == IEEE80211_FC0_TYPE_CTL && 522 subtype == QDF_IEEE80211_FC0_SUBTYPE_BAR)) { 523 dp_err_rl("Not a BAR frame!"); 524 return; 525 } 526 527 tid = hal_rx_mpdu_start_tid_get(soc->hal_soc, rx_tlv_hdr); 528 qdf_assert_always(tid < DP_MAX_TIDS); 529 530 start_seq_num = le16toh(bar->i_seq) >> IEEE80211_SEQ_SEQ_SHIFT; 531 532 dp_info_rl("tid %u window_size %u start_seq_num %u", 533 tid, peer->rx_tid[tid].ba_win_size, start_seq_num); 534 535 dp_rx_tid_update_wifi3(peer, tid, 536 peer->rx_tid[tid].ba_win_size, 537 start_seq_num); 538 } 539 540 /** 541 * dp_2k_jump_handle() - Function to handle 2k jump exception 542 * on WBM ring 543 * 544 * @soc: core DP main context 545 * @nbuf: buffer pointer 546 * @rx_tlv_hdr: start of rx tlv header 547 * @peer_id: peer id of first msdu 548 * @tid: Tid for which exception occurred 549 * 550 * This function handles 2k jump violations arising out 551 * of receiving aggregates in non BA case. This typically 552 * may happen if aggregates are received on a QOS enabled TID 553 * while Rx window size is still initialized to value of 2. Or 554 * it may also happen if negotiated window size is 1 but peer 555 * sends aggregates. 556 * 557 */ 558 559 void 560 dp_2k_jump_handle(struct dp_soc *soc, 561 qdf_nbuf_t nbuf, 562 uint8_t *rx_tlv_hdr, 563 uint16_t peer_id, 564 uint8_t tid) 565 { 566 uint32_t ppdu_id; 567 struct dp_peer *peer = NULL; 568 struct dp_rx_tid *rx_tid = NULL; 569 570 peer = dp_peer_find_by_id(soc, peer_id); 571 if (!peer || peer->delete_in_progress) { 572 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 573 "peer not found"); 574 goto free_nbuf; 575 } 576 rx_tid = &peer->rx_tid[tid]; 577 if (qdf_unlikely(!rx_tid)) { 578 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 579 "rx_tid is NULL!!"); 580 goto free_nbuf; 581 } 582 qdf_spin_lock_bh(&rx_tid->tid_lock); 583 ppdu_id = hal_rx_attn_phy_ppdu_id_get(rx_tlv_hdr); 584 585 /* 586 * If BA session is created and a non-aggregate packet is 587 * landing here then the issue is with sequence number mismatch. 588 * Proceed with delba even in that case 589 */ 590 if (rx_tid->ppdu_id_2k != ppdu_id && 591 rx_tid->ba_status != DP_RX_BA_ACTIVE) { 592 rx_tid->ppdu_id_2k = ppdu_id; 593 qdf_spin_unlock_bh(&rx_tid->tid_lock); 594 goto free_nbuf; 595 } 596 if (!rx_tid->delba_tx_status) { 597 rx_tid->delba_tx_retry++; 598 rx_tid->delba_tx_status = 1; 599 rx_tid->delba_rcode = 600 IEEE80211_REASON_QOS_SETUP_REQUIRED; 601 qdf_spin_unlock_bh(&rx_tid->tid_lock); 602 if (soc->cdp_soc.ol_ops->send_delba) 603 soc->cdp_soc.ol_ops->send_delba( 604 peer->vdev->pdev->soc->ctrl_psoc, 605 peer->vdev->vdev_id, 606 peer->mac_addr.raw, 607 tid, 608 rx_tid->delba_rcode); 609 } else { 610 qdf_spin_unlock_bh(&rx_tid->tid_lock); 611 } 612 613 free_nbuf: 614 if (peer) 615 dp_peer_unref_del_find_by_id(peer); 616 qdf_nbuf_free(nbuf); 617 return; 618 } 619 620 #if defined(QCA_WIFI_QCA6390) || defined(QCA_WIFI_QCA6490) || \ 621 defined(QCA_WIFI_QCA6750) 622 /** 623 * dp_rx_null_q_handle_invalid_peer_id_exception() - to find exception 624 * @soc: pointer to dp_soc struct 625 * @pool_id: Pool id to find dp_pdev 626 * @rx_tlv_hdr: TLV header of received packet 627 * @nbuf: SKB 628 * 629 * In certain types of packets if peer_id is not correct then 630 * driver may not be able find. Try finding peer by addr_2 of 631 * received MPDU. If you find the peer then most likely sw_peer_id & 632 * ast_idx is corrupted. 633 * 634 * Return: True if you find the peer by addr_2 of received MPDU else false 635 */ 636 static bool 637 dp_rx_null_q_handle_invalid_peer_id_exception(struct dp_soc *soc, 638 uint8_t pool_id, 639 uint8_t *rx_tlv_hdr, 640 qdf_nbuf_t nbuf) 641 { 642 struct dp_peer *peer = NULL; 643 uint8_t *rx_pkt_hdr = hal_rx_pkt_hdr_get(rx_tlv_hdr); 644 struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, pool_id); 645 struct ieee80211_frame *wh = (struct ieee80211_frame *)rx_pkt_hdr; 646 647 /* 648 * WAR- In certain types of packets if peer_id is not correct then 649 * driver may not be able find. Try finding peer by addr_2 of 650 * received MPDU 651 */ 652 if (wh) 653 peer = dp_find_peer_by_addr((struct cdp_pdev *)pdev, 654 wh->i_addr2); 655 if (peer) { 656 dp_verbose_debug("MPDU sw_peer_id & ast_idx is corrupted"); 657 hal_rx_dump_pkt_tlvs(soc->hal_soc, rx_tlv_hdr, 658 QDF_TRACE_LEVEL_DEBUG); 659 DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer_id, 660 1, qdf_nbuf_len(nbuf)); 661 qdf_nbuf_free(nbuf); 662 663 return true; 664 } 665 return false; 666 } 667 668 /** 669 * dp_rx_check_pkt_len() - Check for pktlen validity 670 * @soc: DP SOC context 671 * @pkt_len: computed length of the pkt from caller in bytes 672 * 673 * Return: true if pktlen > RX_BUFFER_SIZE, else return false 674 * 675 */ 676 static inline 677 bool dp_rx_check_pkt_len(struct dp_soc *soc, uint32_t pkt_len) 678 { 679 if (qdf_unlikely(pkt_len > RX_DATA_BUFFER_SIZE)) { 680 DP_STATS_INC_PKT(soc, rx.err.rx_invalid_pkt_len, 681 1, pkt_len); 682 return true; 683 } else { 684 return false; 685 } 686 } 687 688 #else 689 static inline bool 690 dp_rx_null_q_handle_invalid_peer_id_exception(struct dp_soc *soc, 691 uint8_t pool_id, 692 uint8_t *rx_tlv_hdr, 693 qdf_nbuf_t nbuf) 694 { 695 return false; 696 } 697 698 static inline 699 bool dp_rx_check_pkt_len(struct dp_soc *soc, uint32_t pkt_len) 700 { 701 return false; 702 } 703 704 #endif 705 706 /** 707 * dp_rx_null_q_desc_handle() - Function to handle NULL Queue 708 * descriptor violation on either a 709 * REO or WBM ring 710 * 711 * @soc: core DP main context 712 * @nbuf: buffer pointer 713 * @rx_tlv_hdr: start of rx tlv header 714 * @pool_id: mac id 715 * @peer: peer handle 716 * 717 * This function handles NULL queue descriptor violations arising out 718 * a missing REO queue for a given peer or a given TID. This typically 719 * may happen if a packet is received on a QOS enabled TID before the 720 * ADDBA negotiation for that TID, when the TID queue is setup. Or 721 * it may also happen for MC/BC frames if they are not routed to the 722 * non-QOS TID queue, in the absence of any other default TID queue. 723 * This error can show up both in a REO destination or WBM release ring. 724 * 725 * Return: QDF_STATUS_SUCCESS, if nbuf handled successfully. QDF status code 726 * if nbuf could not be handled or dropped. 727 */ 728 static QDF_STATUS 729 dp_rx_null_q_desc_handle(struct dp_soc *soc, qdf_nbuf_t nbuf, 730 uint8_t *rx_tlv_hdr, uint8_t pool_id, 731 struct dp_peer *peer) 732 { 733 uint32_t pkt_len; 734 uint16_t msdu_len; 735 struct dp_vdev *vdev; 736 uint8_t tid; 737 qdf_ether_header_t *eh; 738 struct hal_rx_msdu_metadata msdu_metadata; 739 740 qdf_nbuf_set_rx_chfrag_start(nbuf, 741 hal_rx_msdu_end_first_msdu_get(soc->hal_soc, 742 rx_tlv_hdr)); 743 qdf_nbuf_set_rx_chfrag_end(nbuf, 744 hal_rx_msdu_end_last_msdu_get(soc->hal_soc, 745 rx_tlv_hdr)); 746 qdf_nbuf_set_da_mcbc(nbuf, hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc, 747 rx_tlv_hdr)); 748 qdf_nbuf_set_da_valid(nbuf, 749 hal_rx_msdu_end_da_is_valid_get(soc->hal_soc, 750 rx_tlv_hdr)); 751 qdf_nbuf_set_sa_valid(nbuf, 752 hal_rx_msdu_end_sa_is_valid_get(soc->hal_soc, 753 rx_tlv_hdr)); 754 755 hal_rx_msdu_metadata_get(soc->hal_soc, rx_tlv_hdr, &msdu_metadata); 756 msdu_len = hal_rx_msdu_start_msdu_len_get(rx_tlv_hdr); 757 pkt_len = msdu_len + msdu_metadata.l3_hdr_pad + RX_PKT_TLVS_LEN; 758 759 if (qdf_likely(!qdf_nbuf_is_frag(nbuf))) { 760 if (dp_rx_check_pkt_len(soc, pkt_len)) 761 goto drop_nbuf; 762 763 /* Set length in nbuf */ 764 qdf_nbuf_set_pktlen( 765 nbuf, qdf_min(pkt_len, (uint32_t)RX_DATA_BUFFER_SIZE)); 766 qdf_assert_always(nbuf->data == rx_tlv_hdr); 767 } 768 769 /* 770 * Check if DMA completed -- msdu_done is the last bit 771 * to be written 772 */ 773 if (!hal_rx_attn_msdu_done_get(rx_tlv_hdr)) { 774 775 dp_err_rl("MSDU DONE failure"); 776 hal_rx_dump_pkt_tlvs(soc->hal_soc, rx_tlv_hdr, 777 QDF_TRACE_LEVEL_INFO); 778 qdf_assert(0); 779 } 780 781 if (!peer && 782 dp_rx_null_q_handle_invalid_peer_id_exception(soc, pool_id, 783 rx_tlv_hdr, nbuf)) 784 return QDF_STATUS_E_FAILURE; 785 786 if (!peer) { 787 bool mpdu_done = false; 788 struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, pool_id); 789 790 dp_err_rl("peer is NULL"); 791 DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1, 792 qdf_nbuf_len(nbuf)); 793 794 mpdu_done = dp_rx_chain_msdus(soc, nbuf, rx_tlv_hdr, pool_id); 795 /* Trigger invalid peer handler wrapper */ 796 dp_rx_process_invalid_peer_wrapper(soc, 797 pdev->invalid_peer_head_msdu, 798 mpdu_done, pool_id); 799 800 if (mpdu_done) { 801 pdev->invalid_peer_head_msdu = NULL; 802 pdev->invalid_peer_tail_msdu = NULL; 803 } 804 805 return QDF_STATUS_E_FAILURE; 806 } 807 808 vdev = peer->vdev; 809 if (!vdev) { 810 dp_err_rl("Null vdev!"); 811 DP_STATS_INC(soc, rx.err.invalid_vdev, 1); 812 goto drop_nbuf; 813 } 814 815 /* 816 * Advance the packet start pointer by total size of 817 * pre-header TLV's 818 */ 819 if (qdf_nbuf_is_frag(nbuf)) 820 qdf_nbuf_pull_head(nbuf, RX_PKT_TLVS_LEN); 821 else 822 qdf_nbuf_pull_head(nbuf, (msdu_metadata.l3_hdr_pad + 823 RX_PKT_TLVS_LEN)); 824 825 dp_vdev_peer_stats_update_protocol_cnt(vdev, nbuf, NULL, 0, 1); 826 827 if (dp_rx_mcast_echo_check(soc, peer, rx_tlv_hdr, nbuf)) { 828 /* this is a looped back MCBC pkt, drop it */ 829 DP_STATS_INC_PKT(peer, rx.mec_drop, 1, qdf_nbuf_len(nbuf)); 830 goto drop_nbuf; 831 } 832 833 /* 834 * In qwrap mode if the received packet matches with any of the vdev 835 * mac addresses, drop it. Donot receive multicast packets originated 836 * from any proxysta. 837 */ 838 if (check_qwrap_multicast_loopback(vdev, nbuf)) { 839 DP_STATS_INC_PKT(peer, rx.mec_drop, 1, qdf_nbuf_len(nbuf)); 840 goto drop_nbuf; 841 } 842 843 844 if (qdf_unlikely((peer->nawds_enabled == true) && 845 hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc, 846 rx_tlv_hdr))) { 847 dp_err_rl("free buffer for multicast packet"); 848 DP_STATS_INC(peer, rx.nawds_mcast_drop, 1); 849 goto drop_nbuf; 850 } 851 852 if (!dp_wds_rx_policy_check(rx_tlv_hdr, vdev, peer)) { 853 dp_err_rl("mcast Policy Check Drop pkt"); 854 goto drop_nbuf; 855 } 856 /* WDS Source Port Learning */ 857 if (qdf_likely(vdev->rx_decap_type == htt_cmn_pkt_type_ethernet && 858 vdev->wds_enabled)) 859 dp_rx_wds_srcport_learn(soc, rx_tlv_hdr, peer, nbuf, 860 msdu_metadata); 861 862 if (hal_rx_is_unicast(soc->hal_soc, rx_tlv_hdr)) { 863 tid = hal_rx_tid_get(soc->hal_soc, rx_tlv_hdr); 864 if (!peer->rx_tid[tid].hw_qdesc_vaddr_unaligned) 865 dp_rx_tid_setup_wifi3(peer, tid, 1, IEEE80211_SEQ_MAX); 866 /* IEEE80211_SEQ_MAX indicates invalid start_seq */ 867 } 868 869 if (qdf_unlikely(vdev->rx_decap_type == htt_cmn_pkt_type_raw)) { 870 qdf_nbuf_set_next(nbuf, NULL); 871 dp_rx_deliver_raw(vdev, nbuf, peer); 872 } else { 873 if (vdev->osif_rx) { 874 qdf_nbuf_set_next(nbuf, NULL); 875 DP_STATS_INC_PKT(peer, rx.to_stack, 1, 876 qdf_nbuf_len(nbuf)); 877 878 /* 879 * Update the protocol tag in SKB based on 880 * CCE metadata 881 */ 882 dp_rx_update_protocol_tag(soc, vdev, nbuf, rx_tlv_hdr, 883 EXCEPTION_DEST_RING_ID, 884 true, true); 885 886 /* Update the flow tag in SKB based on FSE metadata */ 887 dp_rx_update_flow_tag(soc, vdev, nbuf, 888 rx_tlv_hdr, true); 889 890 if (qdf_unlikely(hal_rx_msdu_end_da_is_mcbc_get( 891 soc->hal_soc, rx_tlv_hdr) && 892 (vdev->rx_decap_type == 893 htt_cmn_pkt_type_ethernet))) { 894 eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf); 895 896 DP_STATS_INC_PKT(peer, rx.multicast, 1, 897 qdf_nbuf_len(nbuf)); 898 if (QDF_IS_ADDR_BROADCAST(eh->ether_dhost)) { 899 DP_STATS_INC_PKT(peer, rx.bcast, 1, 900 qdf_nbuf_len(nbuf)); 901 } 902 } 903 904 vdev->osif_rx(vdev->osif_vdev, nbuf); 905 906 } else { 907 dp_err_rl("INVALID osif_rx. vdev %pK", vdev); 908 DP_STATS_INC(soc, rx.err.invalid_vdev, 1); 909 goto drop_nbuf; 910 } 911 } 912 return QDF_STATUS_SUCCESS; 913 914 drop_nbuf: 915 qdf_nbuf_free(nbuf); 916 return QDF_STATUS_E_FAILURE; 917 } 918 919 /** 920 * dp_rx_process_rxdma_err() - Function to deliver rxdma unencrypted_err 921 * frames to OS or wifi parse errors. 922 * @soc: core DP main context 923 * @nbuf: buffer pointer 924 * @rx_tlv_hdr: start of rx tlv header 925 * @peer: peer reference 926 * @err_code: rxdma err code 927 * @mac_id: mac_id which is one of 3 mac_ids(Assuming mac_id and 928 * pool_id has same mapping) 929 * 930 * Return: None 931 */ 932 void 933 dp_rx_process_rxdma_err(struct dp_soc *soc, qdf_nbuf_t nbuf, 934 uint8_t *rx_tlv_hdr, struct dp_peer *peer, 935 uint8_t err_code, uint8_t mac_id) 936 { 937 uint32_t pkt_len, l2_hdr_offset; 938 uint16_t msdu_len; 939 struct dp_vdev *vdev; 940 qdf_ether_header_t *eh; 941 bool is_broadcast; 942 943 /* 944 * Check if DMA completed -- msdu_done is the last bit 945 * to be written 946 */ 947 if (!hal_rx_attn_msdu_done_get(rx_tlv_hdr)) { 948 949 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 950 FL("MSDU DONE failure")); 951 952 hal_rx_dump_pkt_tlvs(soc->hal_soc, rx_tlv_hdr, 953 QDF_TRACE_LEVEL_INFO); 954 qdf_assert(0); 955 } 956 957 l2_hdr_offset = hal_rx_msdu_end_l3_hdr_padding_get(soc->hal_soc, 958 rx_tlv_hdr); 959 msdu_len = hal_rx_msdu_start_msdu_len_get(rx_tlv_hdr); 960 pkt_len = msdu_len + l2_hdr_offset + RX_PKT_TLVS_LEN; 961 962 if (dp_rx_check_pkt_len(soc, pkt_len)) { 963 /* Drop & free packet */ 964 qdf_nbuf_free(nbuf); 965 return; 966 } 967 /* Set length in nbuf */ 968 qdf_nbuf_set_pktlen(nbuf, pkt_len); 969 970 qdf_nbuf_set_next(nbuf, NULL); 971 972 qdf_nbuf_set_rx_chfrag_start(nbuf, 1); 973 qdf_nbuf_set_rx_chfrag_end(nbuf, 1); 974 975 if (!peer) { 976 QDF_TRACE_ERROR_RL(QDF_MODULE_ID_DP, "peer is NULL"); 977 DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1, 978 qdf_nbuf_len(nbuf)); 979 /* Trigger invalid peer handler wrapper */ 980 dp_rx_process_invalid_peer_wrapper(soc, nbuf, true, mac_id); 981 return; 982 } 983 984 vdev = peer->vdev; 985 if (!vdev) { 986 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 987 FL("INVALID vdev %pK OR osif_rx"), vdev); 988 /* Drop & free packet */ 989 qdf_nbuf_free(nbuf); 990 DP_STATS_INC(soc, rx.err.invalid_vdev, 1); 991 return; 992 } 993 994 /* 995 * Advance the packet start pointer by total size of 996 * pre-header TLV's 997 */ 998 qdf_nbuf_pull_head(nbuf, l2_hdr_offset + RX_PKT_TLVS_LEN); 999 1000 if (err_code == HAL_RXDMA_ERR_WIFI_PARSE) { 1001 uint8_t *pkt_type; 1002 1003 pkt_type = qdf_nbuf_data(nbuf) + (2 * QDF_MAC_ADDR_SIZE); 1004 if (*(uint16_t *)pkt_type == htons(QDF_ETH_TYPE_8021Q)) { 1005 if (*(uint16_t *)(pkt_type + DP_SKIP_VLAN) == 1006 htons(QDF_LLC_STP)) { 1007 DP_STATS_INC(vdev->pdev, vlan_tag_stp_cnt, 1); 1008 goto process_mesh; 1009 } else { 1010 goto process_rx; 1011 } 1012 } 1013 } 1014 if (vdev->rx_decap_type == htt_cmn_pkt_type_raw) 1015 goto process_mesh; 1016 1017 /* 1018 * WAPI cert AP sends rekey frames as unencrypted. 1019 * Thus RXDMA will report unencrypted frame error. 1020 * To pass WAPI cert case, SW needs to pass unencrypted 1021 * rekey frame to stack. 1022 */ 1023 if (qdf_nbuf_is_ipv4_wapi_pkt(nbuf)) { 1024 goto process_rx; 1025 } 1026 /* 1027 * In dynamic WEP case rekey frames are not encrypted 1028 * similar to WAPI. Allow EAPOL when 8021+wep is enabled and 1029 * key install is already done 1030 */ 1031 if ((vdev->sec_type == cdp_sec_type_wep104) && 1032 (qdf_nbuf_is_ipv4_eapol_pkt(nbuf))) 1033 goto process_rx; 1034 1035 process_mesh: 1036 1037 if (!vdev->mesh_vdev && err_code == HAL_RXDMA_ERR_UNENCRYPTED) { 1038 qdf_nbuf_free(nbuf); 1039 DP_STATS_INC(soc, rx.err.invalid_vdev, 1); 1040 return; 1041 } 1042 1043 if (vdev->mesh_vdev) { 1044 if (dp_rx_filter_mesh_packets(vdev, nbuf, rx_tlv_hdr) 1045 == QDF_STATUS_SUCCESS) { 1046 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_MED, 1047 FL("mesh pkt filtered")); 1048 DP_STATS_INC(vdev->pdev, dropped.mesh_filter, 1); 1049 1050 qdf_nbuf_free(nbuf); 1051 return; 1052 } 1053 dp_rx_fill_mesh_stats(vdev, nbuf, rx_tlv_hdr, peer); 1054 } 1055 process_rx: 1056 if (qdf_unlikely(hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc, 1057 rx_tlv_hdr) && 1058 (vdev->rx_decap_type == 1059 htt_cmn_pkt_type_ethernet))) { 1060 eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf); 1061 is_broadcast = (QDF_IS_ADDR_BROADCAST 1062 (eh->ether_dhost)) ? 1 : 0 ; 1063 DP_STATS_INC_PKT(peer, rx.multicast, 1, qdf_nbuf_len(nbuf)); 1064 if (is_broadcast) { 1065 DP_STATS_INC_PKT(peer, rx.bcast, 1, 1066 qdf_nbuf_len(nbuf)); 1067 } 1068 } 1069 1070 if (qdf_unlikely(vdev->rx_decap_type == htt_cmn_pkt_type_raw)) { 1071 dp_rx_deliver_raw(vdev, nbuf, peer); 1072 } else { 1073 /* Update the protocol tag in SKB based on CCE metadata */ 1074 dp_rx_update_protocol_tag(soc, vdev, nbuf, rx_tlv_hdr, 1075 EXCEPTION_DEST_RING_ID, true, true); 1076 /* Update the flow tag in SKB based on FSE metadata */ 1077 dp_rx_update_flow_tag(soc, vdev, nbuf, rx_tlv_hdr, true); 1078 DP_STATS_INC(peer, rx.to_stack.num, 1); 1079 dp_rx_deliver_to_stack(soc, vdev, peer, nbuf, NULL); 1080 } 1081 1082 return; 1083 } 1084 1085 /** 1086 * dp_rx_process_mic_error(): Function to pass mic error indication to umac 1087 * @soc: core DP main context 1088 * @nbuf: buffer pointer 1089 * @rx_tlv_hdr: start of rx tlv header 1090 * @peer: peer handle 1091 * 1092 * return: void 1093 */ 1094 void dp_rx_process_mic_error(struct dp_soc *soc, qdf_nbuf_t nbuf, 1095 uint8_t *rx_tlv_hdr, struct dp_peer *peer) 1096 { 1097 struct dp_vdev *vdev = NULL; 1098 struct dp_pdev *pdev = NULL; 1099 struct ol_if_ops *tops = NULL; 1100 uint16_t rx_seq, fragno; 1101 uint8_t is_raw; 1102 unsigned int tid; 1103 QDF_STATUS status; 1104 struct cdp_rx_mic_err_info mic_failure_info; 1105 1106 if (!hal_rx_msdu_end_first_msdu_get(soc->hal_soc, 1107 rx_tlv_hdr)) 1108 return; 1109 1110 if (!peer) { 1111 dp_info_rl("peer not found"); 1112 goto fail; 1113 } 1114 1115 vdev = peer->vdev; 1116 if (!vdev) { 1117 dp_info_rl("VDEV not found"); 1118 goto fail; 1119 } 1120 1121 pdev = vdev->pdev; 1122 if (!pdev) { 1123 dp_info_rl("PDEV not found"); 1124 goto fail; 1125 } 1126 1127 is_raw = HAL_IS_DECAP_FORMAT_RAW(soc->hal_soc, qdf_nbuf_data(nbuf)); 1128 if (is_raw) { 1129 fragno = dp_rx_frag_get_mpdu_frag_number(qdf_nbuf_data(nbuf)); 1130 /* Can get only last fragment */ 1131 if (fragno) { 1132 tid = hal_rx_mpdu_start_tid_get(soc->hal_soc, 1133 qdf_nbuf_data(nbuf)); 1134 rx_seq = hal_rx_get_rx_sequence(soc->hal_soc, 1135 qdf_nbuf_data(nbuf)); 1136 1137 status = dp_rx_defrag_add_last_frag(soc, peer, 1138 tid, rx_seq, nbuf); 1139 dp_info_rl("Frag pkt seq# %d frag# %d consumed " 1140 "status %d !", rx_seq, fragno, status); 1141 return; 1142 } 1143 } 1144 1145 if (hal_rx_mpdu_get_addr1(soc->hal_soc, qdf_nbuf_data(nbuf), 1146 &mic_failure_info.da_mac_addr.bytes[0])) { 1147 dp_err_rl("Failed to get da_mac_addr"); 1148 goto fail; 1149 } 1150 1151 if (hal_rx_mpdu_get_addr2(soc->hal_soc, qdf_nbuf_data(nbuf), 1152 &mic_failure_info.ta_mac_addr.bytes[0])) { 1153 dp_err_rl("Failed to get ta_mac_addr"); 1154 goto fail; 1155 } 1156 1157 mic_failure_info.key_id = 0; 1158 mic_failure_info.multicast = 1159 IEEE80211_IS_MULTICAST(mic_failure_info.da_mac_addr.bytes); 1160 qdf_mem_zero(mic_failure_info.tsc, MIC_SEQ_CTR_SIZE); 1161 mic_failure_info.frame_type = cdp_rx_frame_type_802_11; 1162 mic_failure_info.data = NULL; 1163 mic_failure_info.vdev_id = vdev->vdev_id; 1164 1165 tops = pdev->soc->cdp_soc.ol_ops; 1166 if (tops->rx_mic_error) 1167 tops->rx_mic_error(soc->ctrl_psoc, pdev->pdev_id, 1168 &mic_failure_info); 1169 1170 fail: 1171 qdf_nbuf_free(nbuf); 1172 return; 1173 } 1174 1175 uint32_t 1176 dp_rx_err_process(struct dp_intr *int_ctx, struct dp_soc *soc, 1177 hal_ring_handle_t hal_ring_hdl, uint32_t quota) 1178 { 1179 hal_ring_desc_t ring_desc; 1180 hal_soc_handle_t hal_soc; 1181 uint32_t count = 0; 1182 uint32_t rx_bufs_used = 0; 1183 uint32_t rx_bufs_reaped[MAX_PDEV_CNT] = { 0 }; 1184 uint8_t mac_id = 0; 1185 uint8_t buf_type; 1186 uint8_t error, rbm; 1187 struct hal_rx_mpdu_desc_info mpdu_desc_info; 1188 struct hal_buf_info hbi; 1189 struct dp_pdev *dp_pdev; 1190 struct dp_srng *dp_rxdma_srng; 1191 struct rx_desc_pool *rx_desc_pool; 1192 uint32_t cookie = 0; 1193 void *link_desc_va; 1194 struct hal_rx_msdu_list msdu_list; /* MSDU's per MPDU */ 1195 uint16_t num_msdus; 1196 struct dp_rx_desc *rx_desc = NULL; 1197 1198 /* Debug -- Remove later */ 1199 qdf_assert(soc && hal_ring_hdl); 1200 1201 hal_soc = soc->hal_soc; 1202 1203 /* Debug -- Remove later */ 1204 qdf_assert(hal_soc); 1205 1206 if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, hal_ring_hdl))) { 1207 1208 /* TODO */ 1209 /* 1210 * Need API to convert from hal_ring pointer to 1211 * Ring Type / Ring Id combo 1212 */ 1213 DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1); 1214 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1215 FL("HAL RING Access Failed -- %pK"), hal_ring_hdl); 1216 goto done; 1217 } 1218 1219 while (qdf_likely(quota-- && (ring_desc = 1220 hal_srng_dst_get_next(hal_soc, 1221 hal_ring_hdl)))) { 1222 1223 DP_STATS_INC(soc, rx.err_ring_pkts, 1); 1224 1225 error = HAL_RX_ERROR_STATUS_GET(ring_desc); 1226 1227 qdf_assert(error == HAL_REO_ERROR_DETECTED); 1228 1229 buf_type = HAL_RX_REO_BUF_TYPE_GET(ring_desc); 1230 /* 1231 * For REO error ring, expect only MSDU LINK DESC 1232 */ 1233 qdf_assert_always(buf_type == HAL_RX_REO_MSDU_LINK_DESC_TYPE); 1234 1235 cookie = HAL_RX_REO_BUF_COOKIE_GET(ring_desc); 1236 /* 1237 * check for the magic number in the sw cookie 1238 */ 1239 qdf_assert_always((cookie >> LINK_DESC_ID_SHIFT) & 1240 LINK_DESC_ID_START); 1241 1242 /* 1243 * Check if the buffer is to be processed on this processor 1244 */ 1245 rbm = hal_rx_ret_buf_manager_get(ring_desc); 1246 1247 hal_rx_reo_buf_paddr_get(ring_desc, &hbi); 1248 link_desc_va = dp_rx_cookie_2_link_desc_va(soc, &hbi); 1249 hal_rx_msdu_list_get(soc->hal_soc, link_desc_va, &msdu_list, 1250 &num_msdus); 1251 1252 if (qdf_unlikely((msdu_list.rbm[0] != DP_WBM2SW_RBM) && 1253 (msdu_list.rbm[0] != 1254 HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST) && 1255 (msdu_list.rbm[0] != DP_DEFRAG_RBM))) { 1256 /* TODO */ 1257 /* Call appropriate handler */ 1258 if (!wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx)) { 1259 DP_STATS_INC(soc, rx.err.invalid_rbm, 1); 1260 QDF_TRACE(QDF_MODULE_ID_DP, 1261 QDF_TRACE_LEVEL_ERROR, 1262 FL("Invalid RBM %d"), 1263 msdu_list.rbm[0]); 1264 } 1265 1266 /* Return link descriptor through WBM ring (SW2WBM)*/ 1267 dp_rx_link_desc_return(soc, ring_desc, 1268 HAL_BM_ACTION_RELEASE_MSDU_LIST); 1269 continue; 1270 } 1271 1272 rx_desc = dp_rx_cookie_2_va_rxdma_buf(soc, 1273 msdu_list.sw_cookie[0]); 1274 qdf_assert_always(rx_desc); 1275 1276 mac_id = rx_desc->pool_id; 1277 1278 /* Get the MPDU DESC info */ 1279 hal_rx_mpdu_desc_info_get(ring_desc, &mpdu_desc_info); 1280 1281 if (mpdu_desc_info.mpdu_flags & HAL_MPDU_F_FRAGMENT) { 1282 /* 1283 * We only handle one msdu per link desc for fragmented 1284 * case. We drop the msdus and release the link desc 1285 * back if there are more than one msdu in link desc. 1286 */ 1287 if (qdf_unlikely(num_msdus > 1)) { 1288 count = dp_rx_msdus_drop(soc, ring_desc, 1289 &mpdu_desc_info, 1290 &mac_id, quota); 1291 rx_bufs_reaped[mac_id] += count; 1292 continue; 1293 } 1294 1295 count = dp_rx_frag_handle(soc, 1296 ring_desc, &mpdu_desc_info, 1297 rx_desc, &mac_id, quota); 1298 1299 rx_bufs_reaped[mac_id] += count; 1300 DP_STATS_INC(soc, rx.rx_frags, 1); 1301 continue; 1302 } 1303 1304 if (hal_rx_reo_is_pn_error(ring_desc)) { 1305 /* TOD0 */ 1306 DP_STATS_INC(soc, 1307 rx.err. 1308 reo_error[HAL_REO_ERR_PN_CHECK_FAILED], 1309 1); 1310 /* increment @pdev level */ 1311 dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id); 1312 if (dp_pdev) 1313 DP_STATS_INC(dp_pdev, err.reo_error, 1); 1314 count = dp_rx_pn_error_handle(soc, 1315 ring_desc, 1316 &mpdu_desc_info, &mac_id, 1317 quota); 1318 1319 rx_bufs_reaped[mac_id] += count; 1320 continue; 1321 } 1322 1323 if (hal_rx_reo_is_2k_jump(ring_desc)) { 1324 /* TOD0 */ 1325 DP_STATS_INC(soc, 1326 rx.err. 1327 reo_error[HAL_REO_ERR_REGULAR_FRAME_2K_JUMP], 1328 1); 1329 /* increment @pdev level */ 1330 dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id); 1331 if (dp_pdev) 1332 DP_STATS_INC(dp_pdev, err.reo_error, 1); 1333 1334 count = dp_rx_2k_jump_handle(soc, 1335 ring_desc, &mpdu_desc_info, 1336 &mac_id, quota); 1337 1338 rx_bufs_reaped[mac_id] += count; 1339 continue; 1340 } 1341 } 1342 1343 done: 1344 dp_srng_access_end(int_ctx, soc, hal_ring_hdl); 1345 1346 if (soc->rx.flags.defrag_timeout_check) { 1347 uint32_t now_ms = 1348 qdf_system_ticks_to_msecs(qdf_system_ticks()); 1349 1350 if (now_ms >= soc->rx.defrag.next_flush_ms) 1351 dp_rx_defrag_waitlist_flush(soc); 1352 } 1353 1354 for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) { 1355 if (rx_bufs_reaped[mac_id]) { 1356 dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id); 1357 dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_id]; 1358 rx_desc_pool = &soc->rx_desc_buf[mac_id]; 1359 1360 dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng, 1361 rx_desc_pool, 1362 rx_bufs_reaped[mac_id], 1363 &dp_pdev->free_list_head, 1364 &dp_pdev->free_list_tail); 1365 rx_bufs_used += rx_bufs_reaped[mac_id]; 1366 } 1367 } 1368 1369 return rx_bufs_used; /* Assume no scale factor for now */ 1370 } 1371 1372 #ifdef DROP_RXDMA_DECRYPT_ERR 1373 /** 1374 * dp_handle_rxdma_decrypt_err() - Check if decrypt err frames can be handled 1375 * 1376 * Return: true if rxdma decrypt err frames are handled and false otheriwse 1377 */ 1378 static inline bool dp_handle_rxdma_decrypt_err(void) 1379 { 1380 return false; 1381 } 1382 #else 1383 static inline bool dp_handle_rxdma_decrypt_err(void) 1384 { 1385 return true; 1386 } 1387 #endif 1388 1389 uint32_t 1390 dp_rx_wbm_err_process(struct dp_intr *int_ctx, struct dp_soc *soc, 1391 hal_ring_handle_t hal_ring_hdl, uint32_t quota) 1392 { 1393 hal_ring_desc_t ring_desc; 1394 hal_soc_handle_t hal_soc; 1395 struct dp_rx_desc *rx_desc; 1396 union dp_rx_desc_list_elem_t *head[MAX_PDEV_CNT] = { NULL }; 1397 union dp_rx_desc_list_elem_t *tail[MAX_PDEV_CNT] = { NULL }; 1398 uint32_t rx_bufs_used = 0; 1399 uint32_t rx_bufs_reaped[MAX_PDEV_CNT] = { 0 }; 1400 uint8_t buf_type, rbm; 1401 uint32_t rx_buf_cookie; 1402 uint8_t mac_id; 1403 struct dp_pdev *dp_pdev; 1404 struct dp_srng *dp_rxdma_srng; 1405 struct rx_desc_pool *rx_desc_pool; 1406 uint8_t *rx_tlv_hdr; 1407 qdf_nbuf_t nbuf_head = NULL; 1408 qdf_nbuf_t nbuf_tail = NULL; 1409 qdf_nbuf_t nbuf, next; 1410 struct hal_wbm_err_desc_info wbm_err_info = { 0 }; 1411 uint8_t pool_id; 1412 uint8_t tid = 0; 1413 1414 /* Debug -- Remove later */ 1415 qdf_assert(soc && hal_ring_hdl); 1416 1417 hal_soc = soc->hal_soc; 1418 1419 /* Debug -- Remove later */ 1420 qdf_assert(hal_soc); 1421 1422 if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, hal_ring_hdl))) { 1423 1424 /* TODO */ 1425 /* 1426 * Need API to convert from hal_ring pointer to 1427 * Ring Type / Ring Id combo 1428 */ 1429 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1430 FL("HAL RING Access Failed -- %pK"), hal_ring_hdl); 1431 goto done; 1432 } 1433 1434 while (qdf_likely(quota-- && (ring_desc = 1435 hal_srng_dst_get_next(hal_soc, 1436 hal_ring_hdl)))) { 1437 1438 /* XXX */ 1439 buf_type = HAL_RX_WBM_BUF_TYPE_GET(ring_desc); 1440 1441 /* 1442 * For WBM ring, expect only MSDU buffers 1443 */ 1444 qdf_assert_always(buf_type == HAL_RX_WBM_BUF_TYPE_REL_BUF); 1445 1446 qdf_assert((HAL_RX_WBM_ERR_SRC_GET(ring_desc) 1447 == HAL_RX_WBM_ERR_SRC_RXDMA) || 1448 (HAL_RX_WBM_ERR_SRC_GET(ring_desc) 1449 == HAL_RX_WBM_ERR_SRC_REO)); 1450 1451 /* 1452 * Check if the buffer is to be processed on this processor 1453 */ 1454 rbm = hal_rx_ret_buf_manager_get(ring_desc); 1455 1456 if (qdf_unlikely(rbm != HAL_RX_BUF_RBM_SW3_BM)) { 1457 /* TODO */ 1458 /* Call appropriate handler */ 1459 DP_STATS_INC(soc, rx.err.invalid_rbm, 1); 1460 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1461 FL("Invalid RBM %d"), rbm); 1462 continue; 1463 } 1464 1465 rx_buf_cookie = HAL_RX_WBM_BUF_COOKIE_GET(ring_desc); 1466 1467 rx_desc = dp_rx_cookie_2_va_rxdma_buf(soc, rx_buf_cookie); 1468 qdf_assert_always(rx_desc); 1469 1470 if (!dp_rx_desc_check_magic(rx_desc)) { 1471 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1472 FL("Invalid rx_desc cookie=%d"), 1473 rx_buf_cookie); 1474 continue; 1475 } 1476 1477 /* 1478 * this is a unlikely scenario where the host is reaping 1479 * a descriptor which it already reaped just a while ago 1480 * but is yet to replenish it back to HW. 1481 * In this case host will dump the last 128 descriptors 1482 * including the software descriptor rx_desc and assert. 1483 */ 1484 if (qdf_unlikely(!rx_desc->in_use)) { 1485 DP_STATS_INC(soc, rx.err.hal_wbm_rel_dup, 1); 1486 dp_rx_dump_info_and_assert(soc, hal_ring_hdl, 1487 ring_desc, rx_desc); 1488 } 1489 1490 nbuf = rx_desc->nbuf; 1491 qdf_nbuf_unmap_single(soc->osdev, nbuf, QDF_DMA_FROM_DEVICE); 1492 1493 /* 1494 * save the wbm desc info in nbuf TLV. We will need this 1495 * info when we do the actual nbuf processing 1496 */ 1497 hal_rx_wbm_err_info_get(ring_desc, &wbm_err_info, hal_soc); 1498 wbm_err_info.pool_id = rx_desc->pool_id; 1499 hal_rx_wbm_err_info_set_in_tlv(qdf_nbuf_data(nbuf), 1500 &wbm_err_info); 1501 1502 rx_bufs_reaped[rx_desc->pool_id]++; 1503 1504 DP_RX_LIST_APPEND(nbuf_head, nbuf_tail, rx_desc->nbuf); 1505 dp_rx_add_to_free_desc_list(&head[rx_desc->pool_id], 1506 &tail[rx_desc->pool_id], 1507 rx_desc); 1508 } 1509 done: 1510 dp_srng_access_end(int_ctx, soc, hal_ring_hdl); 1511 1512 for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) { 1513 if (rx_bufs_reaped[mac_id]) { 1514 dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id); 1515 dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_id]; 1516 rx_desc_pool = &soc->rx_desc_buf[mac_id]; 1517 1518 dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng, 1519 rx_desc_pool, rx_bufs_reaped[mac_id], 1520 &head[mac_id], &tail[mac_id]); 1521 rx_bufs_used += rx_bufs_reaped[mac_id]; 1522 } 1523 } 1524 1525 nbuf = nbuf_head; 1526 while (nbuf) { 1527 struct dp_peer *peer; 1528 uint16_t peer_id; 1529 uint8_t err_code; 1530 uint8_t *tlv_hdr; 1531 rx_tlv_hdr = qdf_nbuf_data(nbuf); 1532 1533 /* 1534 * retrieve the wbm desc info from nbuf TLV, so we can 1535 * handle error cases appropriately 1536 */ 1537 hal_rx_wbm_err_info_get_from_tlv(rx_tlv_hdr, &wbm_err_info); 1538 1539 peer_id = hal_rx_mpdu_start_sw_peer_id_get(soc->hal_soc, 1540 rx_tlv_hdr); 1541 peer = dp_peer_find_by_id(soc, peer_id); 1542 1543 if (!peer) 1544 dp_info_rl("peer is null peer_id%u err_src%u err_rsn%u", 1545 peer_id, wbm_err_info.wbm_err_src, 1546 wbm_err_info.reo_psh_rsn); 1547 1548 /* Set queue_mapping in nbuf to 0 */ 1549 dp_set_rx_queue(nbuf, 0); 1550 1551 next = nbuf->next; 1552 1553 if (wbm_err_info.wbm_err_src == HAL_RX_WBM_ERR_SRC_REO) { 1554 if (wbm_err_info.reo_psh_rsn 1555 == HAL_RX_WBM_REO_PSH_RSN_ERROR) { 1556 1557 DP_STATS_INC(soc, 1558 rx.err.reo_error 1559 [wbm_err_info.reo_err_code], 1); 1560 /* increment @pdev level */ 1561 pool_id = wbm_err_info.pool_id; 1562 dp_pdev = dp_get_pdev_for_lmac_id(soc, pool_id); 1563 if (dp_pdev) 1564 DP_STATS_INC(dp_pdev, err.reo_error, 1565 1); 1566 1567 switch (wbm_err_info.reo_err_code) { 1568 /* 1569 * Handling for packets which have NULL REO 1570 * queue descriptor 1571 */ 1572 case HAL_REO_ERR_QUEUE_DESC_ADDR_0: 1573 pool_id = wbm_err_info.pool_id; 1574 dp_rx_null_q_desc_handle(soc, nbuf, 1575 rx_tlv_hdr, 1576 pool_id, peer); 1577 nbuf = next; 1578 if (peer) 1579 dp_peer_unref_del_find_by_id( 1580 peer); 1581 continue; 1582 /* TODO */ 1583 /* Add per error code accounting */ 1584 case HAL_REO_ERR_REGULAR_FRAME_2K_JUMP: 1585 pool_id = wbm_err_info.pool_id; 1586 1587 if (hal_rx_msdu_end_first_msdu_get(soc->hal_soc, 1588 rx_tlv_hdr)) { 1589 peer_id = 1590 hal_rx_mpdu_start_sw_peer_id_get(soc->hal_soc, 1591 rx_tlv_hdr); 1592 tid = 1593 hal_rx_mpdu_start_tid_get(hal_soc, rx_tlv_hdr); 1594 } 1595 dp_2k_jump_handle(soc, nbuf, rx_tlv_hdr, 1596 peer_id, tid); 1597 nbuf = next; 1598 if (peer) 1599 dp_peer_unref_del_find_by_id( 1600 peer); 1601 continue; 1602 case HAL_REO_ERR_BAR_FRAME_2K_JUMP: 1603 case HAL_REO_ERR_BAR_FRAME_OOR: 1604 if (peer) 1605 dp_rx_wbm_err_handle_bar(soc, 1606 peer, 1607 nbuf); 1608 break; 1609 1610 default: 1611 dp_info_rl("Got pkt with REO ERROR: %d", 1612 wbm_err_info.reo_err_code); 1613 break; 1614 } 1615 } 1616 } else if (wbm_err_info.wbm_err_src == 1617 HAL_RX_WBM_ERR_SRC_RXDMA) { 1618 if (wbm_err_info.rxdma_psh_rsn 1619 == HAL_RX_WBM_RXDMA_PSH_RSN_ERROR) { 1620 DP_STATS_INC(soc, 1621 rx.err.rxdma_error 1622 [wbm_err_info.rxdma_err_code], 1); 1623 /* increment @pdev level */ 1624 pool_id = wbm_err_info.pool_id; 1625 dp_pdev = dp_get_pdev_for_lmac_id(soc, pool_id); 1626 if (dp_pdev) 1627 DP_STATS_INC(dp_pdev, 1628 err.rxdma_error, 1); 1629 1630 switch (wbm_err_info.rxdma_err_code) { 1631 case HAL_RXDMA_ERR_UNENCRYPTED: 1632 1633 case HAL_RXDMA_ERR_WIFI_PARSE: 1634 pool_id = wbm_err_info.pool_id; 1635 dp_rx_process_rxdma_err(soc, nbuf, 1636 rx_tlv_hdr, 1637 peer, 1638 wbm_err_info. 1639 rxdma_err_code, 1640 pool_id); 1641 nbuf = next; 1642 if (peer) 1643 dp_peer_unref_del_find_by_id(peer); 1644 continue; 1645 1646 case HAL_RXDMA_ERR_TKIP_MIC: 1647 dp_rx_process_mic_error(soc, nbuf, 1648 rx_tlv_hdr, 1649 peer); 1650 nbuf = next; 1651 if (peer) { 1652 DP_STATS_INC(peer, rx.err.mic_err, 1); 1653 dp_peer_unref_del_find_by_id( 1654 peer); 1655 } 1656 continue; 1657 1658 case HAL_RXDMA_ERR_DECRYPT: 1659 if (!dp_handle_rxdma_decrypt_err()) { 1660 if (peer) 1661 DP_STATS_INC(peer, 1662 rx.err.decrypt_err, 1); 1663 break; 1664 } 1665 1666 pool_id = wbm_err_info.pool_id; 1667 err_code = wbm_err_info.rxdma_err_code; 1668 tlv_hdr = rx_tlv_hdr; 1669 dp_rx_process_rxdma_err(soc, nbuf, 1670 tlv_hdr, peer, 1671 err_code, 1672 pool_id); 1673 nbuf = next; 1674 if (peer) { 1675 DP_STATS_INC(peer, rx.err. 1676 decrypt_err, 1); 1677 dp_peer_unref_del_find_by_id( 1678 peer); 1679 } 1680 continue; 1681 1682 default: 1683 dp_err_rl("RXDMA error %d", 1684 wbm_err_info.rxdma_err_code); 1685 } 1686 } 1687 } else { 1688 /* Should not come here */ 1689 qdf_assert(0); 1690 } 1691 1692 if (peer) 1693 dp_peer_unref_del_find_by_id(peer); 1694 1695 hal_rx_dump_pkt_tlvs(hal_soc, rx_tlv_hdr, 1696 QDF_TRACE_LEVEL_DEBUG); 1697 qdf_nbuf_free(nbuf); 1698 nbuf = next; 1699 } 1700 return rx_bufs_used; /* Assume no scale factor for now */ 1701 } 1702 1703 /** 1704 * dup_desc_dbg() - dump and assert if duplicate rx desc found 1705 * 1706 * @soc: core DP main context 1707 * @rxdma_dst_ring_desc: void pointer to monitor link descriptor buf addr info 1708 * @rx_desc: void pointer to rx descriptor 1709 * 1710 * Return: void 1711 */ 1712 static void dup_desc_dbg(struct dp_soc *soc, 1713 hal_rxdma_desc_t rxdma_dst_ring_desc, 1714 void *rx_desc) 1715 { 1716 DP_STATS_INC(soc, rx.err.hal_rxdma_err_dup, 1); 1717 dp_rx_dump_info_and_assert( 1718 soc, 1719 soc->rx_rel_ring.hal_srng, 1720 hal_rxdma_desc_to_hal_ring_desc(rxdma_dst_ring_desc), 1721 rx_desc); 1722 } 1723 1724 /** 1725 * dp_rx_err_mpdu_pop() - extract the MSDU's from link descs 1726 * 1727 * @soc: core DP main context 1728 * @mac_id: mac id which is one of 3 mac_ids 1729 * @rxdma_dst_ring_desc: void pointer to monitor link descriptor buf addr info 1730 * @head: head of descs list to be freed 1731 * @tail: tail of decs list to be freed 1732 1733 * Return: number of msdu in MPDU to be popped 1734 */ 1735 static inline uint32_t 1736 dp_rx_err_mpdu_pop(struct dp_soc *soc, uint32_t mac_id, 1737 hal_rxdma_desc_t rxdma_dst_ring_desc, 1738 union dp_rx_desc_list_elem_t **head, 1739 union dp_rx_desc_list_elem_t **tail) 1740 { 1741 void *rx_msdu_link_desc; 1742 qdf_nbuf_t msdu; 1743 qdf_nbuf_t last; 1744 struct hal_rx_msdu_list msdu_list; 1745 uint16_t num_msdus; 1746 struct hal_buf_info buf_info; 1747 uint32_t rx_bufs_used = 0; 1748 uint32_t msdu_cnt; 1749 uint32_t i; 1750 uint8_t push_reason; 1751 uint8_t rxdma_error_code = 0; 1752 uint8_t bm_action = HAL_BM_ACTION_PUT_IN_IDLE_LIST; 1753 struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id); 1754 uint32_t rx_link_buf_info[HAL_RX_BUFFINFO_NUM_DWORDS]; 1755 hal_rxdma_desc_t ring_desc; 1756 1757 msdu = 0; 1758 1759 last = NULL; 1760 1761 hal_rx_reo_ent_buf_paddr_get(rxdma_dst_ring_desc, &buf_info, 1762 &msdu_cnt); 1763 1764 push_reason = 1765 hal_rx_reo_ent_rxdma_push_reason_get(rxdma_dst_ring_desc); 1766 if (push_reason == HAL_RX_WBM_RXDMA_PSH_RSN_ERROR) { 1767 rxdma_error_code = 1768 hal_rx_reo_ent_rxdma_error_code_get(rxdma_dst_ring_desc); 1769 } 1770 1771 do { 1772 rx_msdu_link_desc = 1773 dp_rx_cookie_2_link_desc_va(soc, &buf_info); 1774 1775 qdf_assert(rx_msdu_link_desc); 1776 1777 hal_rx_msdu_list_get(soc->hal_soc, rx_msdu_link_desc, 1778 &msdu_list, &num_msdus); 1779 1780 if (msdu_list.sw_cookie[0] != HAL_RX_COOKIE_SPECIAL) { 1781 /* if the msdus belongs to NSS offloaded radio && 1782 * the rbm is not SW1_BM then return the msdu_link 1783 * descriptor without freeing the msdus (nbufs). let 1784 * these buffers be given to NSS completion ring for 1785 * NSS to free them. 1786 * else iterate through the msdu link desc list and 1787 * free each msdu in the list. 1788 */ 1789 if (msdu_list.rbm[0] != HAL_RX_BUF_RBM_SW3_BM && 1790 wlan_cfg_get_dp_pdev_nss_enabled( 1791 pdev->wlan_cfg_ctx)) 1792 bm_action = HAL_BM_ACTION_RELEASE_MSDU_LIST; 1793 else { 1794 for (i = 0; i < num_msdus; i++) { 1795 struct dp_rx_desc *rx_desc = 1796 dp_rx_cookie_2_va_rxdma_buf(soc, 1797 msdu_list.sw_cookie[i]); 1798 qdf_assert_always(rx_desc); 1799 msdu = rx_desc->nbuf; 1800 /* 1801 * this is a unlikely scenario 1802 * where the host is reaping 1803 * a descriptor which 1804 * it already reaped just a while ago 1805 * but is yet to replenish 1806 * it back to HW. 1807 * In this case host will dump 1808 * the last 128 descriptors 1809 * including the software descriptor 1810 * rx_desc and assert. 1811 */ 1812 ring_desc = rxdma_dst_ring_desc; 1813 if (qdf_unlikely(!rx_desc->in_use)) { 1814 dup_desc_dbg(soc, 1815 ring_desc, 1816 rx_desc); 1817 continue; 1818 } 1819 1820 qdf_nbuf_unmap_single(soc->osdev, msdu, 1821 QDF_DMA_FROM_DEVICE); 1822 1823 QDF_TRACE(QDF_MODULE_ID_DP, 1824 QDF_TRACE_LEVEL_DEBUG, 1825 "[%s][%d] msdu_nbuf=%pK ", 1826 __func__, __LINE__, msdu); 1827 1828 qdf_nbuf_free(msdu); 1829 rx_bufs_used++; 1830 dp_rx_add_to_free_desc_list(head, 1831 tail, rx_desc); 1832 } 1833 } 1834 } else { 1835 rxdma_error_code = HAL_RXDMA_ERR_WAR; 1836 } 1837 1838 /* 1839 * Store the current link buffer into to the local structure 1840 * to be used for release purpose. 1841 */ 1842 hal_rxdma_buff_addr_info_set(rx_link_buf_info, buf_info.paddr, 1843 buf_info.sw_cookie, buf_info.rbm); 1844 1845 hal_rx_mon_next_link_desc_get(rx_msdu_link_desc, &buf_info); 1846 dp_rx_link_desc_return_by_addr(soc, 1847 (hal_buff_addrinfo_t) 1848 rx_link_buf_info, 1849 bm_action); 1850 } while (buf_info.paddr); 1851 1852 DP_STATS_INC(soc, rx.err.rxdma_error[rxdma_error_code], 1); 1853 if (pdev) 1854 DP_STATS_INC(pdev, err.rxdma_error, 1); 1855 1856 if (rxdma_error_code == HAL_RXDMA_ERR_DECRYPT) { 1857 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1858 "Packet received with Decrypt error"); 1859 } 1860 1861 return rx_bufs_used; 1862 } 1863 1864 uint32_t 1865 dp_rxdma_err_process(struct dp_intr *int_ctx, struct dp_soc *soc, 1866 uint32_t mac_id, uint32_t quota) 1867 { 1868 struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id); 1869 hal_rxdma_desc_t rxdma_dst_ring_desc; 1870 hal_soc_handle_t hal_soc; 1871 void *err_dst_srng; 1872 union dp_rx_desc_list_elem_t *head = NULL; 1873 union dp_rx_desc_list_elem_t *tail = NULL; 1874 struct dp_srng *dp_rxdma_srng; 1875 struct rx_desc_pool *rx_desc_pool; 1876 uint32_t work_done = 0; 1877 uint32_t rx_bufs_used = 0; 1878 1879 if (!pdev) 1880 return 0; 1881 1882 err_dst_srng = soc->rxdma_err_dst_ring[mac_id].hal_srng; 1883 1884 if (!err_dst_srng) { 1885 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1886 "%s %d : HAL Monitor Destination Ring Init \ 1887 Failed -- %pK", 1888 __func__, __LINE__, err_dst_srng); 1889 return 0; 1890 } 1891 1892 hal_soc = soc->hal_soc; 1893 1894 qdf_assert(hal_soc); 1895 1896 if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, err_dst_srng))) { 1897 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1898 "%s %d : HAL Monitor Destination Ring Init \ 1899 Failed -- %pK", 1900 __func__, __LINE__, err_dst_srng); 1901 return 0; 1902 } 1903 1904 while (qdf_likely(quota-- && (rxdma_dst_ring_desc = 1905 hal_srng_dst_get_next(hal_soc, err_dst_srng)))) { 1906 1907 rx_bufs_used += dp_rx_err_mpdu_pop(soc, mac_id, 1908 rxdma_dst_ring_desc, 1909 &head, &tail); 1910 } 1911 1912 dp_srng_access_end(int_ctx, soc, err_dst_srng); 1913 1914 if (rx_bufs_used) { 1915 dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_id]; 1916 rx_desc_pool = &soc->rx_desc_buf[mac_id]; 1917 1918 dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng, 1919 rx_desc_pool, rx_bufs_used, &head, &tail); 1920 1921 work_done += rx_bufs_used; 1922 } 1923 1924 return work_done; 1925 } 1926 1927 static inline uint32_t 1928 dp_wbm_int_err_mpdu_pop(struct dp_soc *soc, uint32_t mac_id, 1929 hal_rxdma_desc_t rxdma_dst_ring_desc, 1930 union dp_rx_desc_list_elem_t **head, 1931 union dp_rx_desc_list_elem_t **tail) 1932 { 1933 void *rx_msdu_link_desc; 1934 qdf_nbuf_t msdu; 1935 qdf_nbuf_t last; 1936 struct hal_rx_msdu_list msdu_list; 1937 uint16_t num_msdus; 1938 struct hal_buf_info buf_info; 1939 uint32_t rx_bufs_used = 0, msdu_cnt, i; 1940 uint32_t rx_link_buf_info[HAL_RX_BUFFINFO_NUM_DWORDS]; 1941 1942 msdu = 0; 1943 1944 last = NULL; 1945 1946 hal_rx_reo_ent_buf_paddr_get(rxdma_dst_ring_desc, &buf_info, 1947 &msdu_cnt); 1948 1949 do { 1950 rx_msdu_link_desc = 1951 dp_rx_cookie_2_link_desc_va(soc, &buf_info); 1952 1953 if (!rx_msdu_link_desc) { 1954 DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_REO_NULL_LINK_DESC], 1); 1955 break; 1956 } 1957 1958 hal_rx_msdu_list_get(soc->hal_soc, rx_msdu_link_desc, 1959 &msdu_list, &num_msdus); 1960 1961 if (msdu_list.sw_cookie[0] != HAL_RX_COOKIE_SPECIAL) { 1962 for (i = 0; i < num_msdus; i++) { 1963 struct dp_rx_desc *rx_desc = 1964 dp_rx_cookie_2_va_rxdma_buf( 1965 soc, 1966 msdu_list.sw_cookie[i]); 1967 qdf_assert_always(rx_desc); 1968 msdu = rx_desc->nbuf; 1969 1970 qdf_nbuf_unmap_single(soc->osdev, msdu, 1971 QDF_DMA_FROM_DEVICE); 1972 1973 qdf_nbuf_free(msdu); 1974 rx_bufs_used++; 1975 dp_rx_add_to_free_desc_list(head, 1976 tail, rx_desc); 1977 } 1978 } 1979 1980 /* 1981 * Store the current link buffer into to the local structure 1982 * to be used for release purpose. 1983 */ 1984 hal_rxdma_buff_addr_info_set(rx_link_buf_info, buf_info.paddr, 1985 buf_info.sw_cookie, buf_info.rbm); 1986 1987 hal_rx_mon_next_link_desc_get(rx_msdu_link_desc, &buf_info); 1988 dp_rx_link_desc_return_by_addr(soc, (hal_buff_addrinfo_t) 1989 rx_link_buf_info, 1990 HAL_BM_ACTION_PUT_IN_IDLE_LIST); 1991 } while (buf_info.paddr); 1992 1993 return rx_bufs_used; 1994 } 1995 1996 /* 1997 * 1998 * dp_handle_wbm_internal_error() - handles wbm_internal_error case 1999 * 2000 * @soc: core DP main context 2001 * @hal_desc: hal descriptor 2002 * @buf_type: indicates if the buffer is of type link disc or msdu 2003 * Return: None 2004 * 2005 * wbm_internal_error is seen in following scenarios : 2006 * 2007 * 1. Null pointers detected in WBM_RELEASE_RING descriptors 2008 * 2. Null pointers detected during delinking process 2009 * 2010 * Some null pointer cases: 2011 * 2012 * a. MSDU buffer pointer is NULL 2013 * b. Next_MSDU_Link_Desc pointer is NULL, with no last msdu flag 2014 * c. MSDU buffer pointer is NULL or Next_Link_Desc pointer is NULL 2015 */ 2016 void 2017 dp_handle_wbm_internal_error(struct dp_soc *soc, void *hal_desc, 2018 uint32_t buf_type) 2019 { 2020 struct hal_buf_info buf_info = {0}; 2021 struct dp_pdev *dp_pdev; 2022 struct dp_rx_desc *rx_desc = NULL; 2023 uint32_t rx_buf_cookie; 2024 uint32_t rx_bufs_reaped = 0; 2025 union dp_rx_desc_list_elem_t *head = NULL; 2026 union dp_rx_desc_list_elem_t *tail = NULL; 2027 uint8_t pool_id; 2028 2029 hal_rx_reo_buf_paddr_get(hal_desc, &buf_info); 2030 2031 if (!buf_info.paddr) { 2032 DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_REO_NULL_BUFFER], 1); 2033 return; 2034 } 2035 2036 rx_buf_cookie = HAL_RX_REO_BUF_COOKIE_GET(hal_desc); 2037 pool_id = DP_RX_DESC_COOKIE_POOL_ID_GET(rx_buf_cookie); 2038 2039 if (buf_type == HAL_WBM_RELEASE_RING_2_BUFFER_TYPE) { 2040 DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_REO_NULL_MSDU_BUFF], 1); 2041 rx_desc = dp_rx_cookie_2_va_rxdma_buf(soc, rx_buf_cookie); 2042 2043 if (rx_desc && rx_desc->nbuf) { 2044 qdf_nbuf_unmap_single(soc->osdev, rx_desc->nbuf, 2045 QDF_DMA_FROM_DEVICE); 2046 2047 rx_desc->unmapped = 1; 2048 2049 qdf_nbuf_free(rx_desc->nbuf); 2050 dp_rx_add_to_free_desc_list(&head, 2051 &tail, 2052 rx_desc); 2053 2054 rx_bufs_reaped++; 2055 } 2056 } else if (buf_type == HAL_WBM_RELEASE_RING_2_DESC_TYPE) { 2057 rx_bufs_reaped = dp_wbm_int_err_mpdu_pop(soc, pool_id, 2058 hal_desc, 2059 &head, &tail); 2060 } 2061 2062 if (rx_bufs_reaped) { 2063 struct rx_desc_pool *rx_desc_pool; 2064 struct dp_srng *dp_rxdma_srng; 2065 2066 DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_REO_BUFF_REAPED], 1); 2067 dp_pdev = dp_get_pdev_for_lmac_id(soc, pool_id); 2068 dp_rxdma_srng = &soc->rx_refill_buf_ring[pool_id]; 2069 rx_desc_pool = &soc->rx_desc_buf[pool_id]; 2070 2071 dp_rx_buffers_replenish(soc, pool_id, dp_rxdma_srng, 2072 rx_desc_pool, 2073 rx_bufs_reaped, 2074 &head, &tail); 2075 } 2076 } 2077