1 /* 2 * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for 5 * any purpose with or without fee is hereby granted, provided that the 6 * above copyright notice and this permission notice appear in all 7 * copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 16 * PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 #include "hal_hw_headers.h" 20 #include "dp_types.h" 21 #include "dp_rx.h" 22 #include "dp_peer.h" 23 #include "dp_internal.h" 24 #include "hal_api.h" 25 #include "qdf_trace.h" 26 #include "qdf_nbuf.h" 27 #include "dp_rx_defrag.h" 28 #ifdef FEATURE_WDS 29 #include "dp_txrx_wds.h" 30 #endif 31 #include <enet.h> /* LLC_SNAP_HDR_LEN */ 32 #include "qdf_net_types.h" 33 34 /* Max buffer in invalid peer SG list*/ 35 #define DP_MAX_INVALID_BUFFERS 10 36 37 /** 38 * dp_rx_mcast_echo_check() - check if the mcast pkt is a loop 39 * back on same vap or a different vap. 40 * 41 * @soc: core DP main context 42 * @peer: dp peer handler 43 * @rx_tlv_hdr: start of the rx TLV header 44 * @nbuf: pkt buffer 45 * 46 * Return: bool (true if it is a looped back pkt else false) 47 * 48 */ 49 static inline bool dp_rx_mcast_echo_check(struct dp_soc *soc, 50 struct dp_peer *peer, 51 uint8_t *rx_tlv_hdr, 52 qdf_nbuf_t nbuf) 53 { 54 struct dp_vdev *vdev = peer->vdev; 55 struct dp_ast_entry *ase = NULL; 56 uint16_t sa_idx = 0; 57 uint8_t *data; 58 59 /* 60 * Multicast Echo Check is required only if vdev is STA and 61 * received pkt is a multicast/broadcast pkt. otherwise 62 * skip the MEC check. 63 */ 64 if (vdev->opmode != wlan_op_mode_sta) 65 return false; 66 67 if (!hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc, rx_tlv_hdr)) 68 return false; 69 70 data = qdf_nbuf_data(nbuf); 71 /* 72 * if the received pkts src mac addr matches with vdev 73 * mac address then drop the pkt as it is looped back 74 */ 75 if (!(qdf_mem_cmp(&data[QDF_MAC_ADDR_SIZE], 76 vdev->mac_addr.raw, 77 QDF_MAC_ADDR_SIZE))) 78 return true; 79 80 /* 81 * In case of qwrap isolation mode, donot drop loopback packets. 82 * In isolation mode, all packets from the wired stations need to go 83 * to rootap and loop back to reach the wireless stations and 84 * vice-versa. 85 */ 86 if (qdf_unlikely(vdev->isolation_vdev)) 87 return false; 88 89 /* if the received pkts src mac addr matches with the 90 * wired PCs MAC addr which is behind the STA or with 91 * wireless STAs MAC addr which are behind the Repeater, 92 * then drop the pkt as it is looped back 93 */ 94 qdf_spin_lock_bh(&soc->ast_lock); 95 if (hal_rx_msdu_end_sa_is_valid_get(soc->hal_soc, rx_tlv_hdr)) { 96 sa_idx = hal_rx_msdu_end_sa_idx_get(soc->hal_soc, rx_tlv_hdr); 97 98 if ((sa_idx < 0) || 99 (sa_idx >= wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx))) { 100 qdf_spin_unlock_bh(&soc->ast_lock); 101 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 102 "invalid sa_idx: %d", sa_idx); 103 qdf_assert_always(0); 104 } 105 106 ase = soc->ast_table[sa_idx]; 107 if (!ase) { 108 /* We do not get a peer map event for STA and without 109 * this event we don't know what is STA's sa_idx. 110 * For this reason the AST is still not associated to 111 * any index postion in ast_table. 112 * In these kind of scenarios where sa is valid but 113 * ast is not in ast_table, we use the below API to get 114 * AST entry for STA's own mac_address. 115 */ 116 ase = dp_peer_ast_list_find(soc, peer, 117 &data[QDF_MAC_ADDR_SIZE]); 118 if (ase) { 119 ase->ast_idx = sa_idx; 120 soc->ast_table[sa_idx] = ase; 121 ase->is_mapped = TRUE; 122 } 123 } 124 } else { 125 ase = dp_peer_ast_hash_find_by_pdevid(soc, 126 &data[QDF_MAC_ADDR_SIZE], 127 vdev->pdev->pdev_id); 128 } 129 130 if (ase) { 131 132 if (ase->pdev_id != vdev->pdev->pdev_id) { 133 qdf_spin_unlock_bh(&soc->ast_lock); 134 QDF_TRACE(QDF_MODULE_ID_DP, 135 QDF_TRACE_LEVEL_INFO, 136 "Detected DBDC Root AP %pM, %d %d", 137 &data[QDF_MAC_ADDR_SIZE], vdev->pdev->pdev_id, 138 ase->pdev_id); 139 return false; 140 } 141 142 if ((ase->type == CDP_TXRX_AST_TYPE_MEC) || 143 (ase->peer != peer)) { 144 qdf_spin_unlock_bh(&soc->ast_lock); 145 QDF_TRACE(QDF_MODULE_ID_DP, 146 QDF_TRACE_LEVEL_INFO, 147 "received pkt with same src mac %pM", 148 &data[QDF_MAC_ADDR_SIZE]); 149 150 return true; 151 } 152 } 153 qdf_spin_unlock_bh(&soc->ast_lock); 154 return false; 155 } 156 157 /** 158 * dp_rx_link_desc_return_by_addr - Return a MPDU link descriptor to 159 * (WBM) by address 160 * 161 * @soc: core DP main context 162 * @link_desc_addr: link descriptor addr 163 * 164 * Return: QDF_STATUS 165 */ 166 QDF_STATUS 167 dp_rx_link_desc_return_by_addr(struct dp_soc *soc, 168 hal_buff_addrinfo_t link_desc_addr, 169 uint8_t bm_action) 170 { 171 struct dp_srng *wbm_desc_rel_ring = &soc->wbm_desc_rel_ring; 172 hal_ring_handle_t wbm_rel_srng = wbm_desc_rel_ring->hal_srng; 173 hal_soc_handle_t hal_soc = soc->hal_soc; 174 QDF_STATUS status = QDF_STATUS_E_FAILURE; 175 void *src_srng_desc; 176 177 if (!wbm_rel_srng) { 178 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 179 "WBM RELEASE RING not initialized"); 180 return status; 181 } 182 183 if (qdf_unlikely(hal_srng_access_start(hal_soc, wbm_rel_srng))) { 184 185 /* TODO */ 186 /* 187 * Need API to convert from hal_ring pointer to 188 * Ring Type / Ring Id combo 189 */ 190 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 191 FL("HAL RING Access For WBM Release SRNG Failed - %pK"), 192 wbm_rel_srng); 193 DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1); 194 goto done; 195 } 196 src_srng_desc = hal_srng_src_get_next(hal_soc, wbm_rel_srng); 197 if (qdf_likely(src_srng_desc)) { 198 /* Return link descriptor through WBM ring (SW2WBM)*/ 199 hal_rx_msdu_link_desc_set(hal_soc, 200 src_srng_desc, link_desc_addr, bm_action); 201 status = QDF_STATUS_SUCCESS; 202 } else { 203 struct hal_srng *srng = (struct hal_srng *)wbm_rel_srng; 204 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 205 FL("WBM Release Ring (Id %d) Full"), srng->ring_id); 206 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 207 "HP 0x%x Reap HP 0x%x TP 0x%x Cached TP 0x%x", 208 *srng->u.src_ring.hp_addr, srng->u.src_ring.reap_hp, 209 *srng->u.src_ring.tp_addr, srng->u.src_ring.cached_tp); 210 } 211 done: 212 hal_srng_access_end(hal_soc, wbm_rel_srng); 213 return status; 214 215 } 216 217 /** 218 * dp_rx_link_desc_return() - Return a MPDU link descriptor to HW 219 * (WBM), following error handling 220 * 221 * @soc: core DP main context 222 * @ring_desc: opaque pointer to the REO error ring descriptor 223 * 224 * Return: QDF_STATUS 225 */ 226 QDF_STATUS 227 dp_rx_link_desc_return(struct dp_soc *soc, hal_ring_desc_t ring_desc, 228 uint8_t bm_action) 229 { 230 void *buf_addr_info = HAL_RX_REO_BUF_ADDR_INFO_GET(ring_desc); 231 232 return dp_rx_link_desc_return_by_addr(soc, buf_addr_info, bm_action); 233 } 234 235 /** 236 * dp_rx_msdus_drop() - Drops all MSDU's per MPDU 237 * 238 * @soc: core txrx main context 239 * @ring_desc: opaque pointer to the REO error ring descriptor 240 * @mpdu_desc_info: MPDU descriptor information from ring descriptor 241 * @head: head of the local descriptor free-list 242 * @tail: tail of the local descriptor free-list 243 * @quota: No. of units (packets) that can be serviced in one shot. 244 * 245 * This function is used to drop all MSDU in an MPDU 246 * 247 * Return: uint32_t: No. of elements processed 248 */ 249 static uint32_t 250 dp_rx_msdus_drop(struct dp_soc *soc, hal_ring_desc_t ring_desc, 251 struct hal_rx_mpdu_desc_info *mpdu_desc_info, 252 uint8_t *mac_id, 253 uint32_t quota) 254 { 255 uint32_t rx_bufs_used = 0; 256 void *link_desc_va; 257 struct hal_buf_info buf_info; 258 struct dp_pdev *pdev; 259 struct hal_rx_msdu_list msdu_list; /* MSDU's per MPDU */ 260 int i; 261 uint8_t *rx_tlv_hdr; 262 uint32_t tid; 263 264 hal_rx_reo_buf_paddr_get(ring_desc, &buf_info); 265 266 link_desc_va = dp_rx_cookie_2_link_desc_va(soc, &buf_info); 267 268 /* No UNMAP required -- this is "malloc_consistent" memory */ 269 hal_rx_msdu_list_get(soc->hal_soc, link_desc_va, &msdu_list, 270 &mpdu_desc_info->msdu_count); 271 272 for (i = 0; (i < mpdu_desc_info->msdu_count) && quota--; i++) { 273 struct dp_rx_desc *rx_desc = 274 dp_rx_cookie_2_va_rxdma_buf(soc, 275 msdu_list.sw_cookie[i]); 276 277 qdf_assert_always(rx_desc); 278 279 /* all buffers from a MSDU link link belong to same pdev */ 280 *mac_id = rx_desc->pool_id; 281 pdev = soc->pdev_list[rx_desc->pool_id]; 282 283 if (!dp_rx_desc_check_magic(rx_desc)) { 284 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 285 FL("Invalid rx_desc cookie=%d"), 286 msdu_list.sw_cookie[i]); 287 return rx_bufs_used; 288 } 289 290 qdf_nbuf_unmap_single(soc->osdev, 291 rx_desc->nbuf, QDF_DMA_FROM_DEVICE); 292 293 rx_desc->rx_buf_start = qdf_nbuf_data(rx_desc->nbuf); 294 295 rx_bufs_used++; 296 tid = hal_rx_mpdu_start_tid_get(soc->hal_soc, 297 rx_desc->rx_buf_start); 298 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 299 "Packet received with PN error for tid :%d", tid); 300 301 rx_tlv_hdr = qdf_nbuf_data(rx_desc->nbuf); 302 if (hal_rx_encryption_info_valid(soc->hal_soc, rx_tlv_hdr)) 303 hal_rx_print_pn(soc->hal_soc, rx_tlv_hdr); 304 305 /* Just free the buffers */ 306 qdf_nbuf_free(rx_desc->nbuf); 307 308 dp_rx_add_to_free_desc_list(&pdev->free_list_head, 309 &pdev->free_list_tail, rx_desc); 310 } 311 312 /* Return link descriptor through WBM ring (SW2WBM)*/ 313 dp_rx_link_desc_return(soc, ring_desc, HAL_BM_ACTION_PUT_IN_IDLE_LIST); 314 315 return rx_bufs_used; 316 } 317 318 /** 319 * dp_rx_pn_error_handle() - Handles PN check errors 320 * 321 * @soc: core txrx main context 322 * @ring_desc: opaque pointer to the REO error ring descriptor 323 * @mpdu_desc_info: MPDU descriptor information from ring descriptor 324 * @head: head of the local descriptor free-list 325 * @tail: tail of the local descriptor free-list 326 * @quota: No. of units (packets) that can be serviced in one shot. 327 * 328 * This function implements PN error handling 329 * If the peer is configured to ignore the PN check errors 330 * or if DP feels, that this frame is still OK, the frame can be 331 * re-injected back to REO to use some of the other features 332 * of REO e.g. duplicate detection/routing to other cores 333 * 334 * Return: uint32_t: No. of elements processed 335 */ 336 static uint32_t 337 dp_rx_pn_error_handle(struct dp_soc *soc, hal_ring_desc_t ring_desc, 338 struct hal_rx_mpdu_desc_info *mpdu_desc_info, 339 uint8_t *mac_id, 340 uint32_t quota) 341 { 342 uint16_t peer_id; 343 uint32_t rx_bufs_used = 0; 344 struct dp_peer *peer; 345 bool peer_pn_policy = false; 346 347 peer_id = DP_PEER_METADATA_PEER_ID_GET( 348 mpdu_desc_info->peer_meta_data); 349 350 351 peer = dp_peer_find_by_id(soc, peer_id); 352 353 if (qdf_likely(peer)) { 354 /* 355 * TODO: Check for peer specific policies & set peer_pn_policy 356 */ 357 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 358 "discard rx due to PN error for peer %pK " 359 "(%02x:%02x:%02x:%02x:%02x:%02x)", 360 peer, 361 peer->mac_addr.raw[0], peer->mac_addr.raw[1], 362 peer->mac_addr.raw[2], peer->mac_addr.raw[3], 363 peer->mac_addr.raw[4], peer->mac_addr.raw[5]); 364 365 dp_peer_unref_del_find_by_id(peer); 366 } 367 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 368 "Packet received with PN error"); 369 370 /* No peer PN policy -- definitely drop */ 371 if (!peer_pn_policy) 372 rx_bufs_used = dp_rx_msdus_drop(soc, ring_desc, 373 mpdu_desc_info, 374 mac_id, quota); 375 376 return rx_bufs_used; 377 } 378 379 /** 380 * dp_rx_2k_jump_handle() - Handles Sequence Number Jump by 2K 381 * 382 * @soc: core txrx main context 383 * @ring_desc: opaque pointer to the REO error ring descriptor 384 * @mpdu_desc_info: MPDU descriptor information from ring descriptor 385 * @head: head of the local descriptor free-list 386 * @tail: tail of the local descriptor free-list 387 * @quota: No. of units (packets) that can be serviced in one shot. 388 * 389 * This function implements the error handling when sequence number 390 * of the MPDU jumps suddenly by 2K.Today there are 2 cases that 391 * need to be handled: 392 * A) CSN (Current Sequence Number) = Last Valid SN (LSN) + 2K 393 * B) CSN = LSN + 2K, but falls within a "BA sized window" of the SSN 394 * For case A) the protocol stack is invoked to generate DELBA/DEAUTH frame 395 * For case B), the frame is normally dropped, no more action is taken 396 * 397 * Return: uint32_t: No. of elements processed 398 */ 399 static uint32_t 400 dp_rx_2k_jump_handle(struct dp_soc *soc, hal_ring_desc_t ring_desc, 401 struct hal_rx_mpdu_desc_info *mpdu_desc_info, 402 uint8_t *mac_id, uint32_t quota) 403 { 404 return dp_rx_msdus_drop(soc, ring_desc, mpdu_desc_info, 405 mac_id, quota); 406 } 407 408 #ifdef DP_INVALID_PEER_ASSERT 409 #define DP_PDEV_INVALID_PEER_MSDU_CHECK(head, tail) \ 410 do { \ 411 qdf_assert_always(!(head)); \ 412 qdf_assert_always(!(tail)); \ 413 } while (0) 414 #else 415 #define DP_PDEV_INVALID_PEER_MSDU_CHECK(head, tail) /* no op */ 416 #endif 417 418 /** 419 * dp_rx_chain_msdus() - Function to chain all msdus of a mpdu 420 * to pdev invalid peer list 421 * 422 * @soc: core DP main context 423 * @nbuf: Buffer pointer 424 * @rx_tlv_hdr: start of rx tlv header 425 * @mac_id: mac id 426 * 427 * Return: bool: true for last msdu of mpdu 428 */ 429 static bool 430 dp_rx_chain_msdus(struct dp_soc *soc, qdf_nbuf_t nbuf, 431 uint8_t *rx_tlv_hdr, uint8_t mac_id) 432 { 433 bool mpdu_done = false; 434 qdf_nbuf_t curr_nbuf = NULL; 435 qdf_nbuf_t tmp_nbuf = NULL; 436 437 /* TODO: Currently only single radio is supported, hence 438 * pdev hard coded to '0' index 439 */ 440 struct dp_pdev *dp_pdev = soc->pdev_list[mac_id]; 441 442 /* if invalid peer SG list has max values free the buffers in list 443 * and treat current buffer as start of list 444 * 445 * current logic to detect the last buffer from attn_tlv is not reliable 446 * in OFDMA UL scenario hence add max buffers check to avoid list pile 447 * up 448 */ 449 if (!dp_pdev->first_nbuf || 450 (dp_pdev->invalid_peer_head_msdu && 451 QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST 452 (dp_pdev->invalid_peer_head_msdu) >= DP_MAX_INVALID_BUFFERS)) { 453 qdf_nbuf_set_rx_chfrag_start(nbuf, 1); 454 dp_pdev->ppdu_id = hal_rx_hw_desc_get_ppduid_get(soc->hal_soc, 455 rx_tlv_hdr); 456 dp_pdev->first_nbuf = true; 457 458 /* If the new nbuf received is the first msdu of the 459 * amsdu and there are msdus in the invalid peer msdu 460 * list, then let us free all the msdus of the invalid 461 * peer msdu list. 462 * This scenario can happen when we start receiving 463 * new a-msdu even before the previous a-msdu is completely 464 * received. 465 */ 466 curr_nbuf = dp_pdev->invalid_peer_head_msdu; 467 while (curr_nbuf) { 468 tmp_nbuf = curr_nbuf->next; 469 qdf_nbuf_free(curr_nbuf); 470 curr_nbuf = tmp_nbuf; 471 } 472 473 dp_pdev->invalid_peer_head_msdu = NULL; 474 dp_pdev->invalid_peer_tail_msdu = NULL; 475 hal_rx_mon_hw_desc_get_mpdu_status(soc->hal_soc, rx_tlv_hdr, 476 &(dp_pdev->ppdu_info.rx_status)); 477 478 } 479 480 if (dp_pdev->ppdu_id == hal_rx_attn_phy_ppdu_id_get(rx_tlv_hdr) && 481 hal_rx_attn_msdu_done_get(rx_tlv_hdr)) { 482 qdf_nbuf_set_rx_chfrag_end(nbuf, 1); 483 qdf_assert_always(dp_pdev->first_nbuf == true); 484 dp_pdev->first_nbuf = false; 485 mpdu_done = true; 486 } 487 488 /* 489 * For MCL, invalid_peer_head_msdu and invalid_peer_tail_msdu 490 * should be NULL here, add the checking for debugging purpose 491 * in case some corner case. 492 */ 493 DP_PDEV_INVALID_PEER_MSDU_CHECK(dp_pdev->invalid_peer_head_msdu, 494 dp_pdev->invalid_peer_tail_msdu); 495 DP_RX_LIST_APPEND(dp_pdev->invalid_peer_head_msdu, 496 dp_pdev->invalid_peer_tail_msdu, 497 nbuf); 498 499 return mpdu_done; 500 } 501 502 static 503 void dp_rx_wbm_err_handle_bar(struct dp_soc *soc, 504 struct dp_peer *peer, 505 qdf_nbuf_t nbuf) 506 { 507 uint8_t *rx_tlv_hdr; 508 unsigned char type, subtype; 509 uint16_t start_seq_num; 510 uint32_t tid; 511 struct ieee80211_frame_bar *bar; 512 513 /* 514 * 1. Is this a BAR frame. If not Discard it. 515 * 2. If it is, get the peer id, tid, ssn 516 * 2a Do a tid update 517 */ 518 519 rx_tlv_hdr = qdf_nbuf_data(nbuf); 520 bar = (struct ieee80211_frame_bar *)(rx_tlv_hdr + 521 sizeof(struct rx_pkt_tlvs)); 522 523 type = bar->i_fc[0] & IEEE80211_FC0_TYPE_MASK; 524 subtype = bar->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 525 526 if (!(type == IEEE80211_FC0_TYPE_CTL && 527 subtype == QDF_IEEE80211_FC0_SUBTYPE_BAR)) { 528 dp_err_rl("Not a BAR frame!"); 529 return; 530 } 531 532 tid = hal_rx_mpdu_start_tid_get(soc->hal_soc, rx_tlv_hdr); 533 qdf_assert_always(tid < DP_MAX_TIDS); 534 535 start_seq_num = le16toh(bar->i_seq) >> IEEE80211_SEQ_SEQ_SHIFT; 536 537 dp_info_rl("tid %u window_size %u start_seq_num %u", 538 tid, peer->rx_tid[tid].ba_win_size, start_seq_num); 539 540 dp_rx_tid_update_wifi3(peer, tid, 541 peer->rx_tid[tid].ba_win_size, 542 start_seq_num); 543 } 544 545 /** 546 * dp_2k_jump_handle() - Function to handle 2k jump exception 547 * on WBM ring 548 * 549 * @soc: core DP main context 550 * @nbuf: buffer pointer 551 * @rx_tlv_hdr: start of rx tlv header 552 * @peer_id: peer id of first msdu 553 * @tid: Tid for which exception occurred 554 * 555 * This function handles 2k jump violations arising out 556 * of receiving aggregates in non BA case. This typically 557 * may happen if aggregates are received on a QOS enabled TID 558 * while Rx window size is still initialized to value of 2. Or 559 * it may also happen if negotiated window size is 1 but peer 560 * sends aggregates. 561 * 562 */ 563 564 void 565 dp_2k_jump_handle(struct dp_soc *soc, 566 qdf_nbuf_t nbuf, 567 uint8_t *rx_tlv_hdr, 568 uint16_t peer_id, 569 uint8_t tid) 570 { 571 uint32_t ppdu_id; 572 struct dp_peer *peer = NULL; 573 struct dp_rx_tid *rx_tid = NULL; 574 575 peer = dp_peer_find_by_id(soc, peer_id); 576 if (!peer || peer->delete_in_progress) { 577 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 578 "peer not found"); 579 goto free_nbuf; 580 } 581 rx_tid = &peer->rx_tid[tid]; 582 if (qdf_unlikely(!rx_tid)) { 583 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 584 "rx_tid is NULL!!"); 585 goto free_nbuf; 586 } 587 qdf_spin_lock_bh(&rx_tid->tid_lock); 588 ppdu_id = hal_rx_attn_phy_ppdu_id_get(rx_tlv_hdr); 589 590 /* 591 * If BA session is created and a non-aggregate packet is 592 * landing here then the issue is with sequence number mismatch. 593 * Proceed with delba even in that case 594 */ 595 if (rx_tid->ppdu_id_2k != ppdu_id && 596 rx_tid->ba_status != DP_RX_BA_ACTIVE) { 597 rx_tid->ppdu_id_2k = ppdu_id; 598 qdf_spin_unlock_bh(&rx_tid->tid_lock); 599 goto free_nbuf; 600 } 601 if (!rx_tid->delba_tx_status) { 602 rx_tid->delba_tx_retry++; 603 rx_tid->delba_tx_status = 1; 604 rx_tid->delba_rcode = 605 IEEE80211_REASON_QOS_SETUP_REQUIRED; 606 qdf_spin_unlock_bh(&rx_tid->tid_lock); 607 if (soc->cdp_soc.ol_ops->send_delba) 608 soc->cdp_soc.ol_ops->send_delba( 609 peer->vdev->pdev->soc->ctrl_psoc, 610 peer->vdev->vdev_id, 611 peer->mac_addr.raw, 612 tid, 613 rx_tid->delba_rcode); 614 } else { 615 qdf_spin_unlock_bh(&rx_tid->tid_lock); 616 } 617 618 free_nbuf: 619 if (peer) 620 dp_peer_unref_del_find_by_id(peer); 621 qdf_nbuf_free(nbuf); 622 return; 623 } 624 625 #if defined(QCA_WIFI_QCA6390) || defined(QCA_WIFI_QCA6490) 626 /** 627 * dp_rx_null_q_handle_invalid_peer_id_exception() - to find exception 628 * @soc: pointer to dp_soc struct 629 * @pool_id: Pool id to find dp_pdev 630 * @rx_tlv_hdr: TLV header of received packet 631 * @nbuf: SKB 632 * 633 * In certain types of packets if peer_id is not correct then 634 * driver may not be able find. Try finding peer by addr_2 of 635 * received MPDU. If you find the peer then most likely sw_peer_id & 636 * ast_idx is corrupted. 637 * 638 * Return: True if you find the peer by addr_2 of received MPDU else false 639 */ 640 static bool 641 dp_rx_null_q_handle_invalid_peer_id_exception(struct dp_soc *soc, 642 uint8_t pool_id, 643 uint8_t *rx_tlv_hdr, 644 qdf_nbuf_t nbuf) 645 { 646 struct dp_peer *peer = NULL; 647 uint8_t *rx_pkt_hdr = hal_rx_pkt_hdr_get(rx_tlv_hdr); 648 struct dp_pdev *pdev = soc->pdev_list[pool_id]; 649 struct ieee80211_frame *wh = (struct ieee80211_frame *)rx_pkt_hdr; 650 651 /* 652 * WAR- In certain types of packets if peer_id is not correct then 653 * driver may not be able find. Try finding peer by addr_2 of 654 * received MPDU 655 */ 656 if (wh) 657 peer = dp_find_peer_by_addr((struct cdp_pdev *)pdev, 658 wh->i_addr2); 659 if (peer) { 660 dp_verbose_debug("MPDU sw_peer_id & ast_idx is corrupted"); 661 hal_rx_dump_pkt_tlvs(soc->hal_soc, rx_tlv_hdr, 662 QDF_TRACE_LEVEL_DEBUG); 663 DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer_id, 664 1, qdf_nbuf_len(nbuf)); 665 qdf_nbuf_free(nbuf); 666 667 return true; 668 } 669 return false; 670 } 671 672 /** 673 * dp_rx_null_q_check_pkt_len_exception() - Check for pktlen validity 674 * @soc: DP SOC context 675 * @pkt_len: computed length of the pkt from caller in bytes 676 * 677 * Return: true if pktlen > RX_BUFFER_SIZE, else return false 678 * 679 */ 680 static inline 681 bool dp_rx_null_q_check_pkt_len_exception(struct dp_soc *soc, uint32_t pkt_len) 682 { 683 if (qdf_unlikely(pkt_len > RX_BUFFER_SIZE)) { 684 DP_STATS_INC_PKT(soc, rx.err.rx_invalid_pkt_len, 685 1, pkt_len); 686 return true; 687 } else { 688 return false; 689 } 690 } 691 692 #else 693 static inline bool 694 dp_rx_null_q_handle_invalid_peer_id_exception(struct dp_soc *soc, 695 uint8_t pool_id, 696 uint8_t *rx_tlv_hdr, 697 qdf_nbuf_t nbuf) 698 { 699 return false; 700 } 701 702 static inline 703 bool dp_rx_null_q_check_pkt_len_exception(struct dp_soc *soc, uint32_t pkt_len) 704 { 705 return false; 706 } 707 708 #endif 709 710 /** 711 * dp_rx_null_q_desc_handle() - Function to handle NULL Queue 712 * descriptor violation on either a 713 * REO or WBM ring 714 * 715 * @soc: core DP main context 716 * @nbuf: buffer pointer 717 * @rx_tlv_hdr: start of rx tlv header 718 * @pool_id: mac id 719 * @peer: peer handle 720 * 721 * This function handles NULL queue descriptor violations arising out 722 * a missing REO queue for a given peer or a given TID. This typically 723 * may happen if a packet is received on a QOS enabled TID before the 724 * ADDBA negotiation for that TID, when the TID queue is setup. Or 725 * it may also happen for MC/BC frames if they are not routed to the 726 * non-QOS TID queue, in the absence of any other default TID queue. 727 * This error can show up both in a REO destination or WBM release ring. 728 * 729 * Return: QDF_STATUS_SUCCESS, if nbuf handled successfully. QDF status code 730 * if nbuf could not be handled or dropped. 731 */ 732 static QDF_STATUS 733 dp_rx_null_q_desc_handle(struct dp_soc *soc, qdf_nbuf_t nbuf, 734 uint8_t *rx_tlv_hdr, uint8_t pool_id, 735 struct dp_peer *peer) 736 { 737 uint32_t pkt_len, l2_hdr_offset; 738 uint16_t msdu_len; 739 struct dp_vdev *vdev; 740 uint8_t tid; 741 qdf_ether_header_t *eh; 742 743 qdf_nbuf_set_rx_chfrag_start(nbuf, 744 hal_rx_msdu_end_first_msdu_get(soc->hal_soc, 745 rx_tlv_hdr)); 746 qdf_nbuf_set_rx_chfrag_end(nbuf, 747 hal_rx_msdu_end_last_msdu_get(soc->hal_soc, 748 rx_tlv_hdr)); 749 qdf_nbuf_set_da_mcbc(nbuf, hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc, 750 rx_tlv_hdr)); 751 qdf_nbuf_set_da_valid(nbuf, 752 hal_rx_msdu_end_da_is_valid_get(soc->hal_soc, 753 rx_tlv_hdr)); 754 qdf_nbuf_set_sa_valid(nbuf, 755 hal_rx_msdu_end_sa_is_valid_get(soc->hal_soc, 756 rx_tlv_hdr)); 757 758 l2_hdr_offset = hal_rx_msdu_end_l3_hdr_padding_get(soc->hal_soc, 759 rx_tlv_hdr); 760 msdu_len = hal_rx_msdu_start_msdu_len_get(rx_tlv_hdr); 761 pkt_len = msdu_len + l2_hdr_offset + RX_PKT_TLVS_LEN; 762 763 if (qdf_likely(!qdf_nbuf_is_frag(nbuf))) { 764 if (dp_rx_null_q_check_pkt_len_exception(soc, pkt_len)) 765 goto drop_nbuf; 766 767 /* Set length in nbuf */ 768 qdf_nbuf_set_pktlen(nbuf, 769 qdf_min(pkt_len, (uint32_t)RX_BUFFER_SIZE)); 770 qdf_assert_always(nbuf->data == rx_tlv_hdr); 771 } 772 773 /* 774 * Check if DMA completed -- msdu_done is the last bit 775 * to be written 776 */ 777 if (!hal_rx_attn_msdu_done_get(rx_tlv_hdr)) { 778 779 dp_err_rl("MSDU DONE failure"); 780 hal_rx_dump_pkt_tlvs(soc->hal_soc, rx_tlv_hdr, 781 QDF_TRACE_LEVEL_INFO); 782 qdf_assert(0); 783 } 784 785 if (!peer && 786 dp_rx_null_q_handle_invalid_peer_id_exception(soc, pool_id, 787 rx_tlv_hdr, nbuf)) 788 return QDF_STATUS_E_FAILURE; 789 790 if (!peer) { 791 bool mpdu_done = false; 792 struct dp_pdev *pdev = soc->pdev_list[pool_id]; 793 794 dp_err_rl("peer is NULL"); 795 DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1, 796 qdf_nbuf_len(nbuf)); 797 798 mpdu_done = dp_rx_chain_msdus(soc, nbuf, rx_tlv_hdr, pool_id); 799 /* Trigger invalid peer handler wrapper */ 800 dp_rx_process_invalid_peer_wrapper(soc, 801 pdev->invalid_peer_head_msdu, 802 mpdu_done, pool_id); 803 804 if (mpdu_done) { 805 pdev->invalid_peer_head_msdu = NULL; 806 pdev->invalid_peer_tail_msdu = NULL; 807 } 808 809 return QDF_STATUS_E_FAILURE; 810 } 811 812 vdev = peer->vdev; 813 if (!vdev) { 814 dp_err_rl("Null vdev!"); 815 DP_STATS_INC(soc, rx.err.invalid_vdev, 1); 816 goto drop_nbuf; 817 } 818 819 /* 820 * Advance the packet start pointer by total size of 821 * pre-header TLV's 822 */ 823 if (qdf_nbuf_is_frag(nbuf)) 824 qdf_nbuf_pull_head(nbuf, RX_PKT_TLVS_LEN); 825 else 826 qdf_nbuf_pull_head(nbuf, (l2_hdr_offset + RX_PKT_TLVS_LEN)); 827 828 if (dp_rx_mcast_echo_check(soc, peer, rx_tlv_hdr, nbuf)) { 829 /* this is a looped back MCBC pkt, drop it */ 830 DP_STATS_INC_PKT(peer, rx.mec_drop, 1, qdf_nbuf_len(nbuf)); 831 goto drop_nbuf; 832 } 833 834 /* 835 * In qwrap mode if the received packet matches with any of the vdev 836 * mac addresses, drop it. Donot receive multicast packets originated 837 * from any proxysta. 838 */ 839 if (check_qwrap_multicast_loopback(vdev, nbuf)) { 840 DP_STATS_INC_PKT(peer, rx.mec_drop, 1, qdf_nbuf_len(nbuf)); 841 goto drop_nbuf; 842 } 843 844 845 if (qdf_unlikely((peer->nawds_enabled == true) && 846 hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc, 847 rx_tlv_hdr))) { 848 dp_err_rl("free buffer for multicast packet"); 849 DP_STATS_INC(peer, rx.nawds_mcast_drop, 1); 850 goto drop_nbuf; 851 } 852 853 if (!dp_wds_rx_policy_check(rx_tlv_hdr, vdev, peer)) { 854 dp_err_rl("mcast Policy Check Drop pkt"); 855 goto drop_nbuf; 856 } 857 /* WDS Source Port Learning */ 858 if (qdf_likely(vdev->rx_decap_type == htt_cmn_pkt_type_ethernet && 859 vdev->wds_enabled)) 860 dp_rx_wds_srcport_learn(soc, rx_tlv_hdr, peer, nbuf); 861 862 if (hal_rx_is_unicast(soc->hal_soc, rx_tlv_hdr)) { 863 tid = hal_rx_tid_get(soc->hal_soc, rx_tlv_hdr); 864 if (!peer->rx_tid[tid].hw_qdesc_vaddr_unaligned) 865 dp_rx_tid_setup_wifi3(peer, tid, 1, IEEE80211_SEQ_MAX); 866 /* IEEE80211_SEQ_MAX indicates invalid start_seq */ 867 } 868 869 if (qdf_unlikely(vdev->rx_decap_type == htt_cmn_pkt_type_raw)) { 870 qdf_nbuf_set_next(nbuf, NULL); 871 dp_rx_deliver_raw(vdev, nbuf, peer); 872 } else { 873 if (vdev->osif_rx) { 874 qdf_nbuf_set_next(nbuf, NULL); 875 DP_STATS_INC_PKT(peer, rx.to_stack, 1, 876 qdf_nbuf_len(nbuf)); 877 878 /* 879 * Update the protocol tag in SKB based on 880 * CCE metadata 881 */ 882 dp_rx_update_protocol_tag(soc, vdev, nbuf, rx_tlv_hdr, 883 EXCEPTION_DEST_RING_ID, 884 true, true); 885 886 /* Update the flow tag in SKB based on FSE metadata */ 887 dp_rx_update_flow_tag(soc, vdev, nbuf, 888 rx_tlv_hdr, true); 889 890 if (qdf_unlikely(hal_rx_msdu_end_da_is_mcbc_get( 891 soc->hal_soc, rx_tlv_hdr) && 892 (vdev->rx_decap_type == 893 htt_cmn_pkt_type_ethernet))) { 894 eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf); 895 896 DP_STATS_INC_PKT(peer, rx.multicast, 1, 897 qdf_nbuf_len(nbuf)); 898 if (QDF_IS_ADDR_BROADCAST(eh->ether_dhost)) { 899 DP_STATS_INC_PKT(peer, rx.bcast, 1, 900 qdf_nbuf_len(nbuf)); 901 } 902 } 903 904 vdev->osif_rx(vdev->osif_vdev, nbuf); 905 906 } else { 907 dp_err_rl("INVALID osif_rx. vdev %pK", vdev); 908 DP_STATS_INC(soc, rx.err.invalid_vdev, 1); 909 goto drop_nbuf; 910 } 911 } 912 return QDF_STATUS_SUCCESS; 913 914 drop_nbuf: 915 qdf_nbuf_free(nbuf); 916 return QDF_STATUS_E_FAILURE; 917 } 918 919 /** 920 * dp_rx_process_rxdma_err() - Function to deliver rxdma unencrypted_err 921 * frames to OS or wifi parse errors. 922 * @soc: core DP main context 923 * @nbuf: buffer pointer 924 * @rx_tlv_hdr: start of rx tlv header 925 * @peer: peer reference 926 * @err_code: rxdma err code 927 * @mac_id: mac_id which is one of 3 mac_ids(Assuming mac_id and 928 * pool_id has same mapping) 929 * 930 * Return: None 931 */ 932 void 933 dp_rx_process_rxdma_err(struct dp_soc *soc, qdf_nbuf_t nbuf, 934 uint8_t *rx_tlv_hdr, struct dp_peer *peer, 935 uint8_t err_code, uint8_t mac_id) 936 { 937 uint32_t pkt_len, l2_hdr_offset; 938 uint16_t msdu_len; 939 struct dp_vdev *vdev; 940 qdf_ether_header_t *eh; 941 bool is_broadcast; 942 943 /* 944 * Check if DMA completed -- msdu_done is the last bit 945 * to be written 946 */ 947 if (!hal_rx_attn_msdu_done_get(rx_tlv_hdr)) { 948 949 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 950 FL("MSDU DONE failure")); 951 952 hal_rx_dump_pkt_tlvs(soc->hal_soc, rx_tlv_hdr, 953 QDF_TRACE_LEVEL_INFO); 954 qdf_assert(0); 955 } 956 957 l2_hdr_offset = hal_rx_msdu_end_l3_hdr_padding_get(soc->hal_soc, 958 rx_tlv_hdr); 959 msdu_len = hal_rx_msdu_start_msdu_len_get(rx_tlv_hdr); 960 pkt_len = msdu_len + l2_hdr_offset + RX_PKT_TLVS_LEN; 961 962 /* Set length in nbuf */ 963 qdf_nbuf_set_pktlen(nbuf, pkt_len); 964 965 qdf_nbuf_set_next(nbuf, NULL); 966 967 qdf_nbuf_set_rx_chfrag_start(nbuf, 1); 968 qdf_nbuf_set_rx_chfrag_end(nbuf, 1); 969 970 if (!peer) { 971 QDF_TRACE_ERROR_RL(QDF_MODULE_ID_DP, "peer is NULL"); 972 DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1, 973 qdf_nbuf_len(nbuf)); 974 /* Trigger invalid peer handler wrapper */ 975 dp_rx_process_invalid_peer_wrapper(soc, nbuf, true, mac_id); 976 return; 977 } 978 979 vdev = peer->vdev; 980 if (!vdev) { 981 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 982 FL("INVALID vdev %pK OR osif_rx"), vdev); 983 /* Drop & free packet */ 984 qdf_nbuf_free(nbuf); 985 DP_STATS_INC(soc, rx.err.invalid_vdev, 1); 986 return; 987 } 988 989 /* 990 * Advance the packet start pointer by total size of 991 * pre-header TLV's 992 */ 993 qdf_nbuf_pull_head(nbuf, l2_hdr_offset + RX_PKT_TLVS_LEN); 994 995 if (err_code == HAL_RXDMA_ERR_WIFI_PARSE) { 996 uint8_t *pkt_type; 997 998 pkt_type = qdf_nbuf_data(nbuf) + (2 * QDF_MAC_ADDR_SIZE); 999 if (*(uint16_t *)pkt_type == htons(QDF_ETH_TYPE_8021Q)) { 1000 if (*(uint16_t *)(pkt_type + DP_SKIP_VLAN) == 1001 htons(QDF_LLC_STP)) { 1002 DP_STATS_INC(vdev->pdev, vlan_tag_stp_cnt, 1); 1003 goto process_mesh; 1004 } else { 1005 goto process_rx; 1006 } 1007 } 1008 } 1009 if (vdev->rx_decap_type == htt_cmn_pkt_type_raw) 1010 goto process_mesh; 1011 1012 /* 1013 * WAPI cert AP sends rekey frames as unencrypted. 1014 * Thus RXDMA will report unencrypted frame error. 1015 * To pass WAPI cert case, SW needs to pass unencrypted 1016 * rekey frame to stack. 1017 */ 1018 if (qdf_nbuf_is_ipv4_wapi_pkt(nbuf)) { 1019 goto process_rx; 1020 } 1021 /* 1022 * In dynamic WEP case rekey frames are not encrypted 1023 * similar to WAPI. Allow EAPOL when 8021+wep is enabled and 1024 * key install is already done 1025 */ 1026 if ((vdev->sec_type == cdp_sec_type_wep104) && 1027 (qdf_nbuf_is_ipv4_eapol_pkt(nbuf))) 1028 goto process_rx; 1029 1030 process_mesh: 1031 1032 if (!vdev->mesh_vdev && err_code == HAL_RXDMA_ERR_UNENCRYPTED) { 1033 qdf_nbuf_free(nbuf); 1034 DP_STATS_INC(soc, rx.err.invalid_vdev, 1); 1035 return; 1036 } 1037 1038 if (vdev->mesh_vdev) { 1039 if (dp_rx_filter_mesh_packets(vdev, nbuf, rx_tlv_hdr) 1040 == QDF_STATUS_SUCCESS) { 1041 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_MED, 1042 FL("mesh pkt filtered")); 1043 DP_STATS_INC(vdev->pdev, dropped.mesh_filter, 1); 1044 1045 qdf_nbuf_free(nbuf); 1046 return; 1047 } 1048 dp_rx_fill_mesh_stats(vdev, nbuf, rx_tlv_hdr, peer); 1049 } 1050 process_rx: 1051 if (qdf_unlikely(hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc, 1052 rx_tlv_hdr) && 1053 (vdev->rx_decap_type == 1054 htt_cmn_pkt_type_ethernet))) { 1055 eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf); 1056 is_broadcast = (QDF_IS_ADDR_BROADCAST 1057 (eh->ether_dhost)) ? 1 : 0 ; 1058 DP_STATS_INC_PKT(peer, rx.multicast, 1, qdf_nbuf_len(nbuf)); 1059 if (is_broadcast) { 1060 DP_STATS_INC_PKT(peer, rx.bcast, 1, 1061 qdf_nbuf_len(nbuf)); 1062 } 1063 } 1064 1065 if (qdf_unlikely(vdev->rx_decap_type == htt_cmn_pkt_type_raw)) { 1066 dp_rx_deliver_raw(vdev, nbuf, peer); 1067 } else { 1068 /* Update the protocol tag in SKB based on CCE metadata */ 1069 dp_rx_update_protocol_tag(soc, vdev, nbuf, rx_tlv_hdr, 1070 EXCEPTION_DEST_RING_ID, true, true); 1071 /* Update the flow tag in SKB based on FSE metadata */ 1072 dp_rx_update_flow_tag(soc, vdev, nbuf, rx_tlv_hdr, true); 1073 DP_STATS_INC(peer, rx.to_stack.num, 1); 1074 vdev->osif_rx(vdev->osif_vdev, nbuf); 1075 } 1076 1077 return; 1078 } 1079 1080 /** 1081 * dp_rx_process_mic_error(): Function to pass mic error indication to umac 1082 * @soc: core DP main context 1083 * @nbuf: buffer pointer 1084 * @rx_tlv_hdr: start of rx tlv header 1085 * @peer: peer handle 1086 * 1087 * return: void 1088 */ 1089 void dp_rx_process_mic_error(struct dp_soc *soc, qdf_nbuf_t nbuf, 1090 uint8_t *rx_tlv_hdr, struct dp_peer *peer) 1091 { 1092 struct dp_vdev *vdev = NULL; 1093 struct dp_pdev *pdev = NULL; 1094 struct ol_if_ops *tops = NULL; 1095 uint16_t rx_seq, fragno; 1096 uint8_t is_raw; 1097 unsigned int tid; 1098 QDF_STATUS status; 1099 struct cdp_rx_mic_err_info mic_failure_info; 1100 1101 if (!hal_rx_msdu_end_first_msdu_get(soc->hal_soc, 1102 rx_tlv_hdr)) 1103 return; 1104 1105 if (!peer) { 1106 dp_err_rl("peer not found"); 1107 goto fail; 1108 } 1109 1110 vdev = peer->vdev; 1111 if (!vdev) { 1112 dp_err_rl("VDEV not found"); 1113 goto fail; 1114 } 1115 1116 pdev = vdev->pdev; 1117 if (!pdev) { 1118 dp_err_rl("PDEV not found"); 1119 goto fail; 1120 } 1121 1122 is_raw = HAL_IS_DECAP_FORMAT_RAW(soc->hal_soc, qdf_nbuf_data(nbuf)); 1123 if (is_raw) { 1124 fragno = dp_rx_frag_get_mpdu_frag_number(qdf_nbuf_data(nbuf)); 1125 /* Can get only last fragment */ 1126 if (fragno) { 1127 tid = hal_rx_mpdu_start_tid_get(soc->hal_soc, 1128 qdf_nbuf_data(nbuf)); 1129 rx_seq = hal_rx_get_rx_sequence(soc->hal_soc, 1130 qdf_nbuf_data(nbuf)); 1131 1132 status = dp_rx_defrag_add_last_frag(soc, peer, 1133 tid, rx_seq, nbuf); 1134 dp_info_rl("Frag pkt seq# %d frag# %d consumed " 1135 "status %d !", rx_seq, fragno, status); 1136 return; 1137 } 1138 } 1139 1140 if (hal_rx_mpdu_get_addr1(soc->hal_soc, qdf_nbuf_data(nbuf), 1141 &mic_failure_info.da_mac_addr.bytes[0])) { 1142 dp_err_rl("Failed to get da_mac_addr"); 1143 goto fail; 1144 } 1145 1146 if (hal_rx_mpdu_get_addr2(soc->hal_soc, qdf_nbuf_data(nbuf), 1147 &mic_failure_info.ta_mac_addr.bytes[0])) { 1148 dp_err_rl("Failed to get ta_mac_addr"); 1149 goto fail; 1150 } 1151 1152 mic_failure_info.key_id = 0; 1153 mic_failure_info.multicast = 1154 IEEE80211_IS_MULTICAST(mic_failure_info.da_mac_addr.bytes); 1155 qdf_mem_zero(mic_failure_info.tsc, MIC_SEQ_CTR_SIZE); 1156 mic_failure_info.frame_type = cdp_rx_frame_type_802_11; 1157 mic_failure_info.data = NULL; 1158 mic_failure_info.vdev_id = vdev->vdev_id; 1159 1160 tops = pdev->soc->cdp_soc.ol_ops; 1161 if (tops->rx_mic_error) 1162 tops->rx_mic_error(soc->ctrl_psoc, pdev->pdev_id, 1163 &mic_failure_info); 1164 1165 fail: 1166 qdf_nbuf_free(nbuf); 1167 return; 1168 } 1169 1170 uint32_t 1171 dp_rx_err_process(struct dp_intr *int_ctx, struct dp_soc *soc, 1172 hal_ring_handle_t hal_ring_hdl, uint32_t quota) 1173 { 1174 hal_ring_desc_t ring_desc; 1175 hal_soc_handle_t hal_soc; 1176 uint32_t count = 0; 1177 uint32_t rx_bufs_used = 0; 1178 uint32_t rx_bufs_reaped[MAX_PDEV_CNT] = { 0 }; 1179 uint8_t mac_id = 0; 1180 uint8_t buf_type; 1181 uint8_t error, rbm; 1182 struct hal_rx_mpdu_desc_info mpdu_desc_info; 1183 struct hal_buf_info hbi; 1184 struct dp_pdev *dp_pdev; 1185 struct dp_srng *dp_rxdma_srng; 1186 struct rx_desc_pool *rx_desc_pool; 1187 uint32_t cookie = 0; 1188 void *link_desc_va; 1189 struct hal_rx_msdu_list msdu_list; /* MSDU's per MPDU */ 1190 uint16_t num_msdus; 1191 struct dp_rx_desc *rx_desc = NULL; 1192 1193 /* Debug -- Remove later */ 1194 qdf_assert(soc && hal_ring_hdl); 1195 1196 hal_soc = soc->hal_soc; 1197 1198 /* Debug -- Remove later */ 1199 qdf_assert(hal_soc); 1200 1201 if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, hal_ring_hdl))) { 1202 1203 /* TODO */ 1204 /* 1205 * Need API to convert from hal_ring pointer to 1206 * Ring Type / Ring Id combo 1207 */ 1208 DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1); 1209 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1210 FL("HAL RING Access Failed -- %pK"), hal_ring_hdl); 1211 goto done; 1212 } 1213 1214 while (qdf_likely(quota-- && (ring_desc = 1215 hal_srng_dst_get_next(hal_soc, 1216 hal_ring_hdl)))) { 1217 1218 DP_STATS_INC(soc, rx.err_ring_pkts, 1); 1219 1220 error = HAL_RX_ERROR_STATUS_GET(ring_desc); 1221 1222 qdf_assert(error == HAL_REO_ERROR_DETECTED); 1223 1224 buf_type = HAL_RX_REO_BUF_TYPE_GET(ring_desc); 1225 /* 1226 * For REO error ring, expect only MSDU LINK DESC 1227 */ 1228 qdf_assert_always(buf_type == HAL_RX_REO_MSDU_LINK_DESC_TYPE); 1229 1230 cookie = HAL_RX_REO_BUF_COOKIE_GET(ring_desc); 1231 /* 1232 * check for the magic number in the sw cookie 1233 */ 1234 qdf_assert_always((cookie >> LINK_DESC_ID_SHIFT) & 1235 LINK_DESC_ID_START); 1236 1237 /* 1238 * Check if the buffer is to be processed on this processor 1239 */ 1240 rbm = hal_rx_ret_buf_manager_get(ring_desc); 1241 1242 hal_rx_reo_buf_paddr_get(ring_desc, &hbi); 1243 link_desc_va = dp_rx_cookie_2_link_desc_va(soc, &hbi); 1244 hal_rx_msdu_list_get(soc->hal_soc, link_desc_va, &msdu_list, 1245 &num_msdus); 1246 1247 if (qdf_unlikely((msdu_list.rbm[0] != DP_WBM2SW_RBM) && 1248 (msdu_list.rbm[0] != 1249 HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST) && 1250 (msdu_list.rbm[0] != DP_DEFRAG_RBM))) { 1251 /* TODO */ 1252 /* Call appropriate handler */ 1253 if (!wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx)) { 1254 DP_STATS_INC(soc, rx.err.invalid_rbm, 1); 1255 QDF_TRACE(QDF_MODULE_ID_DP, 1256 QDF_TRACE_LEVEL_ERROR, 1257 FL("Invalid RBM %d"), 1258 msdu_list.rbm[0]); 1259 } 1260 1261 /* Return link descriptor through WBM ring (SW2WBM)*/ 1262 dp_rx_link_desc_return(soc, ring_desc, 1263 HAL_BM_ACTION_RELEASE_MSDU_LIST); 1264 continue; 1265 } 1266 1267 rx_desc = dp_rx_cookie_2_va_rxdma_buf(soc, 1268 msdu_list.sw_cookie[0]); 1269 qdf_assert_always(rx_desc); 1270 1271 mac_id = rx_desc->pool_id; 1272 1273 /* Get the MPDU DESC info */ 1274 hal_rx_mpdu_desc_info_get(ring_desc, &mpdu_desc_info); 1275 1276 if (mpdu_desc_info.mpdu_flags & HAL_MPDU_F_FRAGMENT) { 1277 /* 1278 * We only handle one msdu per link desc for fragmented 1279 * case. We drop the msdus and release the link desc 1280 * back if there are more than one msdu in link desc. 1281 */ 1282 if (qdf_unlikely(num_msdus > 1)) { 1283 count = dp_rx_msdus_drop(soc, ring_desc, 1284 &mpdu_desc_info, 1285 &mac_id, quota); 1286 rx_bufs_reaped[mac_id] += count; 1287 continue; 1288 } 1289 1290 count = dp_rx_frag_handle(soc, 1291 ring_desc, &mpdu_desc_info, 1292 rx_desc, &mac_id, quota); 1293 1294 rx_bufs_reaped[mac_id] += count; 1295 DP_STATS_INC(soc, rx.rx_frags, 1); 1296 continue; 1297 } 1298 1299 if (hal_rx_reo_is_pn_error(ring_desc)) { 1300 /* TOD0 */ 1301 DP_STATS_INC(soc, 1302 rx.err. 1303 reo_error[HAL_REO_ERR_PN_CHECK_FAILED], 1304 1); 1305 /* increment @pdev level */ 1306 dp_pdev = dp_get_pdev_for_mac_id(soc, mac_id); 1307 if (dp_pdev) 1308 DP_STATS_INC(dp_pdev, err.reo_error, 1); 1309 count = dp_rx_pn_error_handle(soc, 1310 ring_desc, 1311 &mpdu_desc_info, &mac_id, 1312 quota); 1313 1314 rx_bufs_reaped[mac_id] += count; 1315 continue; 1316 } 1317 1318 if (hal_rx_reo_is_2k_jump(ring_desc)) { 1319 /* TOD0 */ 1320 DP_STATS_INC(soc, 1321 rx.err. 1322 reo_error[HAL_REO_ERR_REGULAR_FRAME_2K_JUMP], 1323 1); 1324 /* increment @pdev level */ 1325 dp_pdev = dp_get_pdev_for_mac_id(soc, mac_id); 1326 if (dp_pdev) 1327 DP_STATS_INC(dp_pdev, err.reo_error, 1); 1328 1329 count = dp_rx_2k_jump_handle(soc, 1330 ring_desc, &mpdu_desc_info, 1331 &mac_id, quota); 1332 1333 rx_bufs_reaped[mac_id] += count; 1334 continue; 1335 } 1336 } 1337 1338 done: 1339 dp_srng_access_end(int_ctx, soc, hal_ring_hdl); 1340 1341 if (soc->rx.flags.defrag_timeout_check) { 1342 uint32_t now_ms = 1343 qdf_system_ticks_to_msecs(qdf_system_ticks()); 1344 1345 if (now_ms >= soc->rx.defrag.next_flush_ms) 1346 dp_rx_defrag_waitlist_flush(soc); 1347 } 1348 1349 for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) { 1350 if (rx_bufs_reaped[mac_id]) { 1351 dp_pdev = soc->pdev_list[mac_id]; 1352 dp_rxdma_srng = &dp_pdev->rx_refill_buf_ring; 1353 rx_desc_pool = &soc->rx_desc_buf[mac_id]; 1354 1355 dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng, 1356 rx_desc_pool, 1357 rx_bufs_reaped[mac_id], 1358 &dp_pdev->free_list_head, 1359 &dp_pdev->free_list_tail); 1360 rx_bufs_used += rx_bufs_reaped[mac_id]; 1361 } 1362 } 1363 1364 return rx_bufs_used; /* Assume no scale factor for now */ 1365 } 1366 1367 uint32_t 1368 dp_rx_wbm_err_process(struct dp_intr *int_ctx, struct dp_soc *soc, 1369 hal_ring_handle_t hal_ring_hdl, uint32_t quota) 1370 { 1371 hal_ring_desc_t ring_desc; 1372 hal_soc_handle_t hal_soc; 1373 struct dp_rx_desc *rx_desc; 1374 union dp_rx_desc_list_elem_t *head[MAX_PDEV_CNT] = { NULL }; 1375 union dp_rx_desc_list_elem_t *tail[MAX_PDEV_CNT] = { NULL }; 1376 uint32_t rx_bufs_used = 0; 1377 uint32_t rx_bufs_reaped[MAX_PDEV_CNT] = { 0 }; 1378 uint8_t buf_type, rbm; 1379 uint32_t rx_buf_cookie; 1380 uint8_t mac_id; 1381 struct dp_pdev *dp_pdev; 1382 struct dp_srng *dp_rxdma_srng; 1383 struct rx_desc_pool *rx_desc_pool; 1384 uint8_t *rx_tlv_hdr; 1385 qdf_nbuf_t nbuf_head = NULL; 1386 qdf_nbuf_t nbuf_tail = NULL; 1387 qdf_nbuf_t nbuf, next; 1388 struct hal_wbm_err_desc_info wbm_err_info = { 0 }; 1389 uint8_t pool_id; 1390 uint8_t tid = 0; 1391 1392 /* Debug -- Remove later */ 1393 qdf_assert(soc && hal_ring_hdl); 1394 1395 hal_soc = soc->hal_soc; 1396 1397 /* Debug -- Remove later */ 1398 qdf_assert(hal_soc); 1399 1400 if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, hal_ring_hdl))) { 1401 1402 /* TODO */ 1403 /* 1404 * Need API to convert from hal_ring pointer to 1405 * Ring Type / Ring Id combo 1406 */ 1407 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1408 FL("HAL RING Access Failed -- %pK"), hal_ring_hdl); 1409 goto done; 1410 } 1411 1412 while (qdf_likely(quota-- && (ring_desc = 1413 hal_srng_dst_get_next(hal_soc, 1414 hal_ring_hdl)))) { 1415 1416 /* XXX */ 1417 buf_type = HAL_RX_WBM_BUF_TYPE_GET(ring_desc); 1418 1419 /* 1420 * For WBM ring, expect only MSDU buffers 1421 */ 1422 qdf_assert_always(buf_type == HAL_RX_WBM_BUF_TYPE_REL_BUF); 1423 1424 qdf_assert((HAL_RX_WBM_ERR_SRC_GET(ring_desc) 1425 == HAL_RX_WBM_ERR_SRC_RXDMA) || 1426 (HAL_RX_WBM_ERR_SRC_GET(ring_desc) 1427 == HAL_RX_WBM_ERR_SRC_REO)); 1428 1429 /* 1430 * Check if the buffer is to be processed on this processor 1431 */ 1432 rbm = hal_rx_ret_buf_manager_get(ring_desc); 1433 1434 if (qdf_unlikely(rbm != HAL_RX_BUF_RBM_SW3_BM)) { 1435 /* TODO */ 1436 /* Call appropriate handler */ 1437 DP_STATS_INC(soc, rx.err.invalid_rbm, 1); 1438 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1439 FL("Invalid RBM %d"), rbm); 1440 continue; 1441 } 1442 1443 rx_buf_cookie = HAL_RX_WBM_BUF_COOKIE_GET(ring_desc); 1444 1445 rx_desc = dp_rx_cookie_2_va_rxdma_buf(soc, rx_buf_cookie); 1446 qdf_assert_always(rx_desc); 1447 1448 if (!dp_rx_desc_check_magic(rx_desc)) { 1449 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1450 FL("Invalid rx_desc cookie=%d"), 1451 rx_buf_cookie); 1452 continue; 1453 } 1454 1455 /* 1456 * this is a unlikely scenario where the host is reaping 1457 * a descriptor which it already reaped just a while ago 1458 * but is yet to replenish it back to HW. 1459 * In this case host will dump the last 128 descriptors 1460 * including the software descriptor rx_desc and assert. 1461 */ 1462 if (qdf_unlikely(!rx_desc->in_use)) { 1463 DP_STATS_INC(soc, rx.err.hal_wbm_rel_dup, 1); 1464 dp_rx_dump_info_and_assert(soc, hal_ring_hdl, 1465 ring_desc, rx_desc); 1466 } 1467 1468 nbuf = rx_desc->nbuf; 1469 qdf_nbuf_unmap_single(soc->osdev, nbuf, QDF_DMA_FROM_DEVICE); 1470 1471 /* 1472 * save the wbm desc info in nbuf TLV. We will need this 1473 * info when we do the actual nbuf processing 1474 */ 1475 hal_rx_wbm_err_info_get(ring_desc, &wbm_err_info, hal_soc); 1476 wbm_err_info.pool_id = rx_desc->pool_id; 1477 hal_rx_wbm_err_info_set_in_tlv(qdf_nbuf_data(nbuf), 1478 &wbm_err_info); 1479 1480 rx_bufs_reaped[rx_desc->pool_id]++; 1481 1482 DP_RX_LIST_APPEND(nbuf_head, nbuf_tail, rx_desc->nbuf); 1483 dp_rx_add_to_free_desc_list(&head[rx_desc->pool_id], 1484 &tail[rx_desc->pool_id], 1485 rx_desc); 1486 } 1487 done: 1488 dp_srng_access_end(int_ctx, soc, hal_ring_hdl); 1489 1490 for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) { 1491 if (rx_bufs_reaped[mac_id]) { 1492 dp_pdev = soc->pdev_list[mac_id]; 1493 dp_rxdma_srng = &dp_pdev->rx_refill_buf_ring; 1494 rx_desc_pool = &soc->rx_desc_buf[mac_id]; 1495 1496 dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng, 1497 rx_desc_pool, rx_bufs_reaped[mac_id], 1498 &head[mac_id], &tail[mac_id]); 1499 rx_bufs_used += rx_bufs_reaped[mac_id]; 1500 } 1501 } 1502 1503 nbuf = nbuf_head; 1504 while (nbuf) { 1505 struct dp_peer *peer; 1506 uint16_t peer_id; 1507 uint8_t e_code; 1508 uint8_t *tlv_hdr; 1509 rx_tlv_hdr = qdf_nbuf_data(nbuf); 1510 1511 /* 1512 * retrieve the wbm desc info from nbuf TLV, so we can 1513 * handle error cases appropriately 1514 */ 1515 hal_rx_wbm_err_info_get_from_tlv(rx_tlv_hdr, &wbm_err_info); 1516 1517 peer_id = hal_rx_mpdu_start_sw_peer_id_get(soc->hal_soc, 1518 rx_tlv_hdr); 1519 peer = dp_peer_find_by_id(soc, peer_id); 1520 1521 if (!peer) 1522 dp_err_rl("peer is null! peer_id %u err_src %u err_rsn %u", 1523 peer_id, wbm_err_info.wbm_err_src, 1524 wbm_err_info.reo_psh_rsn); 1525 1526 /* Set queue_mapping in nbuf to 0 */ 1527 dp_set_rx_queue(nbuf, 0); 1528 1529 next = nbuf->next; 1530 1531 if (wbm_err_info.wbm_err_src == HAL_RX_WBM_ERR_SRC_REO) { 1532 if (wbm_err_info.reo_psh_rsn 1533 == HAL_RX_WBM_REO_PSH_RSN_ERROR) { 1534 1535 DP_STATS_INC(soc, 1536 rx.err.reo_error 1537 [wbm_err_info.reo_err_code], 1); 1538 /* increment @pdev level */ 1539 pool_id = wbm_err_info.pool_id; 1540 dp_pdev = dp_get_pdev_for_mac_id(soc, pool_id); 1541 if (dp_pdev) 1542 DP_STATS_INC(dp_pdev, err.reo_error, 1543 1); 1544 1545 switch (wbm_err_info.reo_err_code) { 1546 /* 1547 * Handling for packets which have NULL REO 1548 * queue descriptor 1549 */ 1550 case HAL_REO_ERR_QUEUE_DESC_ADDR_0: 1551 pool_id = wbm_err_info.pool_id; 1552 dp_rx_null_q_desc_handle(soc, nbuf, 1553 rx_tlv_hdr, 1554 pool_id, peer); 1555 nbuf = next; 1556 if (peer) 1557 dp_peer_unref_del_find_by_id( 1558 peer); 1559 continue; 1560 /* TODO */ 1561 /* Add per error code accounting */ 1562 case HAL_REO_ERR_REGULAR_FRAME_2K_JUMP: 1563 pool_id = wbm_err_info.pool_id; 1564 1565 if (hal_rx_msdu_end_first_msdu_get(soc->hal_soc, 1566 rx_tlv_hdr)) { 1567 peer_id = 1568 hal_rx_mpdu_start_sw_peer_id_get(soc->hal_soc, 1569 rx_tlv_hdr); 1570 tid = 1571 hal_rx_mpdu_start_tid_get(hal_soc, rx_tlv_hdr); 1572 } 1573 dp_2k_jump_handle(soc, nbuf, rx_tlv_hdr, 1574 peer_id, tid); 1575 nbuf = next; 1576 if (peer) 1577 dp_peer_unref_del_find_by_id( 1578 peer); 1579 continue; 1580 case HAL_REO_ERR_BAR_FRAME_2K_JUMP: 1581 case HAL_REO_ERR_BAR_FRAME_OOR: 1582 if (peer) 1583 dp_rx_wbm_err_handle_bar(soc, 1584 peer, 1585 nbuf); 1586 break; 1587 1588 default: 1589 dp_err_rl("Got pkt with REO ERROR: %d", 1590 wbm_err_info.reo_err_code); 1591 break; 1592 } 1593 } 1594 } else if (wbm_err_info.wbm_err_src == 1595 HAL_RX_WBM_ERR_SRC_RXDMA) { 1596 if (wbm_err_info.rxdma_psh_rsn 1597 == HAL_RX_WBM_RXDMA_PSH_RSN_ERROR) { 1598 DP_STATS_INC(soc, 1599 rx.err.rxdma_error 1600 [wbm_err_info.rxdma_err_code], 1); 1601 /* increment @pdev level */ 1602 pool_id = wbm_err_info.pool_id; 1603 dp_pdev = dp_get_pdev_for_mac_id(soc, pool_id); 1604 if (dp_pdev) 1605 DP_STATS_INC(dp_pdev, 1606 err.rxdma_error, 1); 1607 1608 switch (wbm_err_info.rxdma_err_code) { 1609 case HAL_RXDMA_ERR_UNENCRYPTED: 1610 1611 case HAL_RXDMA_ERR_WIFI_PARSE: 1612 pool_id = wbm_err_info.pool_id; 1613 dp_rx_process_rxdma_err(soc, nbuf, 1614 rx_tlv_hdr, 1615 peer, 1616 wbm_err_info. 1617 rxdma_err_code, 1618 pool_id); 1619 nbuf = next; 1620 if (peer) 1621 dp_peer_unref_del_find_by_id(peer); 1622 continue; 1623 1624 case HAL_RXDMA_ERR_TKIP_MIC: 1625 dp_rx_process_mic_error(soc, nbuf, 1626 rx_tlv_hdr, 1627 peer); 1628 nbuf = next; 1629 if (peer) { 1630 DP_STATS_INC(peer, rx.err.mic_err, 1); 1631 dp_peer_unref_del_find_by_id( 1632 peer); 1633 } 1634 continue; 1635 1636 case HAL_RXDMA_ERR_DECRYPT: 1637 pool_id = wbm_err_info.pool_id; 1638 e_code = wbm_err_info.rxdma_err_code; 1639 tlv_hdr = rx_tlv_hdr; 1640 if (peer) { 1641 DP_STATS_INC(peer, rx.err. 1642 decrypt_err, 1); 1643 } else { 1644 dp_rx_process_rxdma_err(soc, 1645 nbuf, 1646 tlv_hdr, 1647 NULL, 1648 e_code, 1649 pool_id 1650 ); 1651 nbuf = next; 1652 continue; 1653 } 1654 1655 QDF_TRACE(QDF_MODULE_ID_DP, 1656 QDF_TRACE_LEVEL_DEBUG, 1657 "Packet received with Decrypt error"); 1658 break; 1659 1660 default: 1661 dp_err_rl("RXDMA error %d", 1662 wbm_err_info.rxdma_err_code); 1663 } 1664 } 1665 } else { 1666 /* Should not come here */ 1667 qdf_assert(0); 1668 } 1669 1670 if (peer) 1671 dp_peer_unref_del_find_by_id(peer); 1672 1673 hal_rx_dump_pkt_tlvs(hal_soc, rx_tlv_hdr, 1674 QDF_TRACE_LEVEL_DEBUG); 1675 qdf_nbuf_free(nbuf); 1676 nbuf = next; 1677 } 1678 return rx_bufs_used; /* Assume no scale factor for now */ 1679 } 1680 1681 /** 1682 * dup_desc_dbg() - dump and assert if duplicate rx desc found 1683 * 1684 * @soc: core DP main context 1685 * @rxdma_dst_ring_desc: void pointer to monitor link descriptor buf addr info 1686 * @rx_desc: void pointer to rx descriptor 1687 * 1688 * Return: void 1689 */ 1690 static void dup_desc_dbg(struct dp_soc *soc, 1691 hal_rxdma_desc_t rxdma_dst_ring_desc, 1692 void *rx_desc) 1693 { 1694 DP_STATS_INC(soc, rx.err.hal_rxdma_err_dup, 1); 1695 dp_rx_dump_info_and_assert( 1696 soc, 1697 soc->rx_rel_ring.hal_srng, 1698 hal_rxdma_desc_to_hal_ring_desc(rxdma_dst_ring_desc), 1699 rx_desc); 1700 } 1701 1702 /** 1703 * dp_rx_err_mpdu_pop() - extract the MSDU's from link descs 1704 * 1705 * @soc: core DP main context 1706 * @mac_id: mac id which is one of 3 mac_ids 1707 * @rxdma_dst_ring_desc: void pointer to monitor link descriptor buf addr info 1708 * @head: head of descs list to be freed 1709 * @tail: tail of decs list to be freed 1710 1711 * Return: number of msdu in MPDU to be popped 1712 */ 1713 static inline uint32_t 1714 dp_rx_err_mpdu_pop(struct dp_soc *soc, uint32_t mac_id, 1715 hal_rxdma_desc_t rxdma_dst_ring_desc, 1716 union dp_rx_desc_list_elem_t **head, 1717 union dp_rx_desc_list_elem_t **tail) 1718 { 1719 void *rx_msdu_link_desc; 1720 qdf_nbuf_t msdu; 1721 qdf_nbuf_t last; 1722 struct hal_rx_msdu_list msdu_list; 1723 uint16_t num_msdus; 1724 struct hal_buf_info buf_info; 1725 uint32_t rx_bufs_used = 0; 1726 uint32_t msdu_cnt; 1727 uint32_t i; 1728 uint8_t push_reason; 1729 uint8_t rxdma_error_code = 0; 1730 uint8_t bm_action = HAL_BM_ACTION_PUT_IN_IDLE_LIST; 1731 struct dp_pdev *pdev = dp_get_pdev_for_mac_id(soc, mac_id); 1732 uint32_t rx_link_buf_info[HAL_RX_BUFFINFO_NUM_DWORDS]; 1733 hal_rxdma_desc_t ring_desc; 1734 1735 msdu = 0; 1736 1737 last = NULL; 1738 1739 hal_rx_reo_ent_buf_paddr_get(rxdma_dst_ring_desc, &buf_info, 1740 &msdu_cnt); 1741 1742 push_reason = 1743 hal_rx_reo_ent_rxdma_push_reason_get(rxdma_dst_ring_desc); 1744 if (push_reason == HAL_RX_WBM_RXDMA_PSH_RSN_ERROR) { 1745 rxdma_error_code = 1746 hal_rx_reo_ent_rxdma_error_code_get(rxdma_dst_ring_desc); 1747 } 1748 1749 do { 1750 rx_msdu_link_desc = 1751 dp_rx_cookie_2_link_desc_va(soc, &buf_info); 1752 1753 qdf_assert(rx_msdu_link_desc); 1754 1755 hal_rx_msdu_list_get(soc->hal_soc, rx_msdu_link_desc, 1756 &msdu_list, &num_msdus); 1757 1758 if (msdu_list.sw_cookie[0] != HAL_RX_COOKIE_SPECIAL) { 1759 /* if the msdus belongs to NSS offloaded radio && 1760 * the rbm is not SW1_BM then return the msdu_link 1761 * descriptor without freeing the msdus (nbufs). let 1762 * these buffers be given to NSS completion ring for 1763 * NSS to free them. 1764 * else iterate through the msdu link desc list and 1765 * free each msdu in the list. 1766 */ 1767 if (msdu_list.rbm[0] != HAL_RX_BUF_RBM_SW3_BM && 1768 wlan_cfg_get_dp_pdev_nss_enabled( 1769 pdev->wlan_cfg_ctx)) 1770 bm_action = HAL_BM_ACTION_RELEASE_MSDU_LIST; 1771 else { 1772 for (i = 0; i < num_msdus; i++) { 1773 struct dp_rx_desc *rx_desc = 1774 dp_rx_cookie_2_va_rxdma_buf(soc, 1775 msdu_list.sw_cookie[i]); 1776 qdf_assert_always(rx_desc); 1777 msdu = rx_desc->nbuf; 1778 /* 1779 * this is a unlikely scenario 1780 * where the host is reaping 1781 * a descriptor which 1782 * it already reaped just a while ago 1783 * but is yet to replenish 1784 * it back to HW. 1785 * In this case host will dump 1786 * the last 128 descriptors 1787 * including the software descriptor 1788 * rx_desc and assert. 1789 */ 1790 ring_desc = rxdma_dst_ring_desc; 1791 if (qdf_unlikely(!rx_desc->in_use)) { 1792 dup_desc_dbg(soc, 1793 ring_desc, 1794 rx_desc); 1795 continue; 1796 } 1797 1798 qdf_nbuf_unmap_single(soc->osdev, msdu, 1799 QDF_DMA_FROM_DEVICE); 1800 1801 QDF_TRACE(QDF_MODULE_ID_DP, 1802 QDF_TRACE_LEVEL_DEBUG, 1803 "[%s][%d] msdu_nbuf=%pK ", 1804 __func__, __LINE__, msdu); 1805 1806 qdf_nbuf_free(msdu); 1807 rx_bufs_used++; 1808 dp_rx_add_to_free_desc_list(head, 1809 tail, rx_desc); 1810 } 1811 } 1812 } else { 1813 rxdma_error_code = HAL_RXDMA_ERR_WAR; 1814 } 1815 1816 /* 1817 * Store the current link buffer into to the local structure 1818 * to be used for release purpose. 1819 */ 1820 hal_rxdma_buff_addr_info_set(rx_link_buf_info, buf_info.paddr, 1821 buf_info.sw_cookie, buf_info.rbm); 1822 1823 hal_rx_mon_next_link_desc_get(rx_msdu_link_desc, &buf_info); 1824 dp_rx_link_desc_return_by_addr(soc, 1825 (hal_buff_addrinfo_t) 1826 rx_link_buf_info, 1827 bm_action); 1828 } while (buf_info.paddr); 1829 1830 DP_STATS_INC(soc, rx.err.rxdma_error[rxdma_error_code], 1); 1831 if (pdev) 1832 DP_STATS_INC(pdev, err.rxdma_error, 1); 1833 1834 if (rxdma_error_code == HAL_RXDMA_ERR_DECRYPT) { 1835 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1836 "Packet received with Decrypt error"); 1837 } 1838 1839 return rx_bufs_used; 1840 } 1841 1842 uint32_t 1843 dp_rxdma_err_process(struct dp_intr *int_ctx, struct dp_soc *soc, 1844 uint32_t mac_id, uint32_t quota) 1845 { 1846 struct dp_pdev *pdev = dp_get_pdev_for_mac_id(soc, mac_id); 1847 int mac_for_pdev = dp_get_mac_id_for_mac(soc, mac_id); 1848 hal_rxdma_desc_t rxdma_dst_ring_desc; 1849 hal_soc_handle_t hal_soc; 1850 void *err_dst_srng; 1851 union dp_rx_desc_list_elem_t *head = NULL; 1852 union dp_rx_desc_list_elem_t *tail = NULL; 1853 struct dp_srng *dp_rxdma_srng; 1854 struct rx_desc_pool *rx_desc_pool; 1855 uint32_t work_done = 0; 1856 uint32_t rx_bufs_used = 0; 1857 1858 if (!pdev) 1859 return 0; 1860 1861 err_dst_srng = pdev->rxdma_err_dst_ring[mac_for_pdev].hal_srng; 1862 1863 if (!err_dst_srng) { 1864 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1865 "%s %d : HAL Monitor Destination Ring Init \ 1866 Failed -- %pK", 1867 __func__, __LINE__, err_dst_srng); 1868 return 0; 1869 } 1870 1871 hal_soc = soc->hal_soc; 1872 1873 qdf_assert(hal_soc); 1874 1875 if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, err_dst_srng))) { 1876 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1877 "%s %d : HAL Monitor Destination Ring Init \ 1878 Failed -- %pK", 1879 __func__, __LINE__, err_dst_srng); 1880 return 0; 1881 } 1882 1883 while (qdf_likely(quota-- && (rxdma_dst_ring_desc = 1884 hal_srng_dst_get_next(hal_soc, err_dst_srng)))) { 1885 1886 rx_bufs_used += dp_rx_err_mpdu_pop(soc, mac_id, 1887 rxdma_dst_ring_desc, 1888 &head, &tail); 1889 } 1890 1891 dp_srng_access_end(int_ctx, soc, err_dst_srng); 1892 1893 if (rx_bufs_used) { 1894 dp_rxdma_srng = &pdev->rx_refill_buf_ring; 1895 rx_desc_pool = &soc->rx_desc_buf[mac_id]; 1896 1897 dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng, 1898 rx_desc_pool, rx_bufs_used, &head, &tail); 1899 1900 work_done += rx_bufs_used; 1901 } 1902 1903 return work_done; 1904 } 1905 1906 static inline uint32_t 1907 dp_wbm_int_err_mpdu_pop(struct dp_soc *soc, uint32_t mac_id, 1908 hal_rxdma_desc_t rxdma_dst_ring_desc, 1909 union dp_rx_desc_list_elem_t **head, 1910 union dp_rx_desc_list_elem_t **tail) 1911 { 1912 void *rx_msdu_link_desc; 1913 qdf_nbuf_t msdu; 1914 qdf_nbuf_t last; 1915 struct hal_rx_msdu_list msdu_list; 1916 uint16_t num_msdus; 1917 struct hal_buf_info buf_info; 1918 uint32_t rx_bufs_used = 0, msdu_cnt, i; 1919 uint32_t rx_link_buf_info[HAL_RX_BUFFINFO_NUM_DWORDS]; 1920 1921 msdu = 0; 1922 1923 last = NULL; 1924 1925 hal_rx_reo_ent_buf_paddr_get(rxdma_dst_ring_desc, &buf_info, 1926 &msdu_cnt); 1927 1928 do { 1929 rx_msdu_link_desc = 1930 dp_rx_cookie_2_link_desc_va(soc, &buf_info); 1931 1932 if (!rx_msdu_link_desc) { 1933 DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_REO_NULL_LINK_DESC], 1); 1934 break; 1935 } 1936 1937 hal_rx_msdu_list_get(soc->hal_soc, rx_msdu_link_desc, 1938 &msdu_list, &num_msdus); 1939 1940 if (msdu_list.sw_cookie[0] != HAL_RX_COOKIE_SPECIAL) { 1941 for (i = 0; i < num_msdus; i++) { 1942 struct dp_rx_desc *rx_desc = 1943 dp_rx_cookie_2_va_rxdma_buf( 1944 soc, 1945 msdu_list.sw_cookie[i]); 1946 qdf_assert_always(rx_desc); 1947 msdu = rx_desc->nbuf; 1948 1949 qdf_nbuf_unmap_single(soc->osdev, msdu, 1950 QDF_DMA_FROM_DEVICE); 1951 1952 qdf_nbuf_free(msdu); 1953 rx_bufs_used++; 1954 dp_rx_add_to_free_desc_list(head, 1955 tail, rx_desc); 1956 } 1957 } 1958 1959 /* 1960 * Store the current link buffer into to the local structure 1961 * to be used for release purpose. 1962 */ 1963 hal_rxdma_buff_addr_info_set(rx_link_buf_info, buf_info.paddr, 1964 buf_info.sw_cookie, buf_info.rbm); 1965 1966 hal_rx_mon_next_link_desc_get(rx_msdu_link_desc, &buf_info); 1967 dp_rx_link_desc_return_by_addr(soc, (hal_buff_addrinfo_t) 1968 rx_link_buf_info, 1969 HAL_BM_ACTION_PUT_IN_IDLE_LIST); 1970 } while (buf_info.paddr); 1971 1972 return rx_bufs_used; 1973 } 1974 1975 /* 1976 * 1977 * dp_handle_wbm_internal_error() - handles wbm_internal_error case 1978 * 1979 * @soc: core DP main context 1980 * @hal_desc: hal descriptor 1981 * @buf_type: indicates if the buffer is of type link disc or msdu 1982 * Return: None 1983 * 1984 * wbm_internal_error is seen in following scenarios : 1985 * 1986 * 1. Null pointers detected in WBM_RELEASE_RING descriptors 1987 * 2. Null pointers detected during delinking process 1988 * 1989 * Some null pointer cases: 1990 * 1991 * a. MSDU buffer pointer is NULL 1992 * b. Next_MSDU_Link_Desc pointer is NULL, with no last msdu flag 1993 * c. MSDU buffer pointer is NULL or Next_Link_Desc pointer is NULL 1994 */ 1995 void 1996 dp_handle_wbm_internal_error(struct dp_soc *soc, void *hal_desc, 1997 uint32_t buf_type) 1998 { 1999 struct hal_buf_info buf_info = {0}; 2000 struct dp_pdev *dp_pdev; 2001 struct dp_rx_desc *rx_desc = NULL; 2002 uint32_t rx_buf_cookie; 2003 uint32_t rx_bufs_reaped = 0; 2004 union dp_rx_desc_list_elem_t *head = NULL; 2005 union dp_rx_desc_list_elem_t *tail = NULL; 2006 uint8_t pool_id; 2007 2008 hal_rx_reo_buf_paddr_get(hal_desc, &buf_info); 2009 2010 if (!buf_info.paddr) { 2011 DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_REO_NULL_BUFFER], 1); 2012 return; 2013 } 2014 2015 rx_buf_cookie = HAL_RX_REO_BUF_COOKIE_GET(hal_desc); 2016 pool_id = DP_RX_DESC_COOKIE_POOL_ID_GET(rx_buf_cookie); 2017 2018 if (buf_type == HAL_WBM_RELEASE_RING_2_BUFFER_TYPE) { 2019 DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_REO_NULL_MSDU_BUFF], 1); 2020 rx_desc = dp_rx_cookie_2_va_rxdma_buf(soc, rx_buf_cookie); 2021 2022 if (rx_desc && rx_desc->nbuf) { 2023 qdf_nbuf_unmap_single(soc->osdev, rx_desc->nbuf, 2024 QDF_DMA_FROM_DEVICE); 2025 2026 rx_desc->unmapped = 1; 2027 2028 qdf_nbuf_free(rx_desc->nbuf); 2029 dp_rx_add_to_free_desc_list(&head, 2030 &tail, 2031 rx_desc); 2032 2033 rx_bufs_reaped++; 2034 } 2035 } else if (buf_type == HAL_WBM_RELEASE_RING_2_DESC_TYPE) { 2036 rx_bufs_reaped = dp_wbm_int_err_mpdu_pop(soc, pool_id, 2037 hal_desc, 2038 &head, &tail); 2039 } 2040 2041 if (rx_bufs_reaped) { 2042 struct rx_desc_pool *rx_desc_pool; 2043 struct dp_srng *dp_rxdma_srng; 2044 2045 DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_REO_BUFF_REAPED], 1); 2046 dp_pdev = soc->pdev_list[pool_id]; 2047 dp_rxdma_srng = &dp_pdev->rx_refill_buf_ring; 2048 rx_desc_pool = &soc->rx_desc_buf[pool_id]; 2049 2050 dp_rx_buffers_replenish(soc, pool_id, dp_rxdma_srng, 2051 rx_desc_pool, 2052 rx_bufs_reaped, 2053 &head, &tail); 2054 } 2055 } 2056