1 /* 2 * Copyright (c) 2016-2019 The Linux Foundation. All rights reserved. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for 5 * any purpose with or without fee is hereby granted, provided that the 6 * above copyright notice and this permission notice appear in all 7 * copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 16 * PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 #include "hal_hw_headers.h" 20 #include "dp_types.h" 21 #include "dp_rx.h" 22 #include "dp_peer.h" 23 #include "dp_internal.h" 24 #include "hal_api.h" 25 #include "qdf_trace.h" 26 #include "qdf_nbuf.h" 27 #include "dp_rx_defrag.h" 28 #ifdef FEATURE_WDS 29 #include "dp_txrx_wds.h" 30 #endif 31 #include <enet.h> /* LLC_SNAP_HDR_LEN */ 32 #include "qdf_net_types.h" 33 34 /* Max buffer in invalid peer SG list*/ 35 #define DP_MAX_INVALID_BUFFERS 10 36 37 /** 38 * dp_rx_mcast_echo_check() - check if the mcast pkt is a loop 39 * back on same vap or a different vap. 40 * 41 * @soc: core DP main context 42 * @peer: dp peer handler 43 * @rx_tlv_hdr: start of the rx TLV header 44 * @nbuf: pkt buffer 45 * 46 * Return: bool (true if it is a looped back pkt else false) 47 * 48 */ 49 static inline bool dp_rx_mcast_echo_check(struct dp_soc *soc, 50 struct dp_peer *peer, 51 uint8_t *rx_tlv_hdr, 52 qdf_nbuf_t nbuf) 53 { 54 struct dp_vdev *vdev = peer->vdev; 55 struct dp_ast_entry *ase = NULL; 56 uint16_t sa_idx = 0; 57 uint8_t *data; 58 59 /* 60 * Multicast Echo Check is required only if vdev is STA and 61 * received pkt is a multicast/broadcast pkt. otherwise 62 * skip the MEC check. 63 */ 64 if (vdev->opmode != wlan_op_mode_sta) 65 return false; 66 67 if (!hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc, rx_tlv_hdr)) 68 return false; 69 70 data = qdf_nbuf_data(nbuf); 71 /* 72 * if the received pkts src mac addr matches with vdev 73 * mac address then drop the pkt as it is looped back 74 */ 75 if (!(qdf_mem_cmp(&data[QDF_MAC_ADDR_SIZE], 76 vdev->mac_addr.raw, 77 QDF_MAC_ADDR_SIZE))) 78 return true; 79 80 /* 81 * In case of qwrap isolation mode, donot drop loopback packets. 82 * In isolation mode, all packets from the wired stations need to go 83 * to rootap and loop back to reach the wireless stations and 84 * vice-versa. 85 */ 86 if (qdf_unlikely(vdev->isolation_vdev)) 87 return false; 88 89 /* if the received pkts src mac addr matches with the 90 * wired PCs MAC addr which is behind the STA or with 91 * wireless STAs MAC addr which are behind the Repeater, 92 * then drop the pkt as it is looped back 93 */ 94 qdf_spin_lock_bh(&soc->ast_lock); 95 if (hal_rx_msdu_end_sa_is_valid_get(soc->hal_soc, rx_tlv_hdr)) { 96 sa_idx = hal_rx_msdu_end_sa_idx_get(soc->hal_soc, rx_tlv_hdr); 97 98 if ((sa_idx < 0) || 99 (sa_idx >= wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx))) { 100 qdf_spin_unlock_bh(&soc->ast_lock); 101 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 102 "invalid sa_idx: %d", sa_idx); 103 qdf_assert_always(0); 104 } 105 106 ase = soc->ast_table[sa_idx]; 107 if (!ase) { 108 /* We do not get a peer map event for STA and without 109 * this event we don't know what is STA's sa_idx. 110 * For this reason the AST is still not associated to 111 * any index postion in ast_table. 112 * In these kind of scenarios where sa is valid but 113 * ast is not in ast_table, we use the below API to get 114 * AST entry for STA's own mac_address. 115 */ 116 ase = dp_peer_ast_list_find(soc, peer, 117 &data[QDF_MAC_ADDR_SIZE]); 118 if (ase) { 119 ase->ast_idx = sa_idx; 120 soc->ast_table[sa_idx] = ase; 121 ase->is_mapped = TRUE; 122 } 123 } 124 } else { 125 ase = dp_peer_ast_hash_find_by_pdevid(soc, 126 &data[QDF_MAC_ADDR_SIZE], 127 vdev->pdev->pdev_id); 128 } 129 130 if (ase) { 131 132 if (ase->pdev_id != vdev->pdev->pdev_id) { 133 qdf_spin_unlock_bh(&soc->ast_lock); 134 QDF_TRACE(QDF_MODULE_ID_DP, 135 QDF_TRACE_LEVEL_INFO, 136 "Detected DBDC Root AP %pM, %d %d", 137 &data[QDF_MAC_ADDR_SIZE], vdev->pdev->pdev_id, 138 ase->pdev_id); 139 return false; 140 } 141 142 if ((ase->type == CDP_TXRX_AST_TYPE_MEC) || 143 (ase->peer != peer)) { 144 qdf_spin_unlock_bh(&soc->ast_lock); 145 QDF_TRACE(QDF_MODULE_ID_DP, 146 QDF_TRACE_LEVEL_INFO, 147 "received pkt with same src mac %pM", 148 &data[QDF_MAC_ADDR_SIZE]); 149 150 return true; 151 } 152 } 153 qdf_spin_unlock_bh(&soc->ast_lock); 154 return false; 155 } 156 157 /** 158 * dp_rx_link_desc_return_by_addr - Return a MPDU link descriptor to 159 * (WBM) by address 160 * 161 * @soc: core DP main context 162 * @link_desc_addr: link descriptor addr 163 * 164 * Return: QDF_STATUS 165 */ 166 QDF_STATUS 167 dp_rx_link_desc_return_by_addr(struct dp_soc *soc, 168 hal_link_desc_t link_desc_addr, 169 uint8_t bm_action) 170 { 171 struct dp_srng *wbm_desc_rel_ring = &soc->wbm_desc_rel_ring; 172 hal_ring_handle_t wbm_rel_srng = wbm_desc_rel_ring->hal_srng; 173 hal_soc_handle_t hal_soc = soc->hal_soc; 174 QDF_STATUS status = QDF_STATUS_E_FAILURE; 175 void *src_srng_desc; 176 177 if (!wbm_rel_srng) { 178 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 179 "WBM RELEASE RING not initialized"); 180 return status; 181 } 182 183 if (qdf_unlikely(hal_srng_access_start(hal_soc, wbm_rel_srng))) { 184 185 /* TODO */ 186 /* 187 * Need API to convert from hal_ring pointer to 188 * Ring Type / Ring Id combo 189 */ 190 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 191 FL("HAL RING Access For WBM Release SRNG Failed - %pK"), 192 wbm_rel_srng); 193 DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1); 194 goto done; 195 } 196 src_srng_desc = hal_srng_src_get_next(hal_soc, wbm_rel_srng); 197 if (qdf_likely(src_srng_desc)) { 198 /* Return link descriptor through WBM ring (SW2WBM)*/ 199 hal_rx_msdu_link_desc_set(hal_soc, 200 src_srng_desc, link_desc_addr, bm_action); 201 status = QDF_STATUS_SUCCESS; 202 } else { 203 struct hal_srng *srng = (struct hal_srng *)wbm_rel_srng; 204 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 205 FL("WBM Release Ring (Id %d) Full"), srng->ring_id); 206 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 207 "HP 0x%x Reap HP 0x%x TP 0x%x Cached TP 0x%x", 208 *srng->u.src_ring.hp_addr, srng->u.src_ring.reap_hp, 209 *srng->u.src_ring.tp_addr, srng->u.src_ring.cached_tp); 210 } 211 done: 212 hal_srng_access_end(hal_soc, wbm_rel_srng); 213 return status; 214 215 } 216 217 /** 218 * dp_rx_link_desc_return() - Return a MPDU link descriptor to HW 219 * (WBM), following error handling 220 * 221 * @soc: core DP main context 222 * @ring_desc: opaque pointer to the REO error ring descriptor 223 * 224 * Return: QDF_STATUS 225 */ 226 QDF_STATUS 227 dp_rx_link_desc_return(struct dp_soc *soc, hal_ring_desc_t ring_desc, 228 uint8_t bm_action) 229 { 230 void *buf_addr_info = HAL_RX_REO_BUF_ADDR_INFO_GET(ring_desc); 231 232 return dp_rx_link_desc_return_by_addr(soc, buf_addr_info, bm_action); 233 } 234 235 /** 236 * dp_rx_msdus_drop() - Drops all MSDU's per MPDU 237 * 238 * @soc: core txrx main context 239 * @ring_desc: opaque pointer to the REO error ring descriptor 240 * @mpdu_desc_info: MPDU descriptor information from ring descriptor 241 * @head: head of the local descriptor free-list 242 * @tail: tail of the local descriptor free-list 243 * @quota: No. of units (packets) that can be serviced in one shot. 244 * 245 * This function is used to drop all MSDU in an MPDU 246 * 247 * Return: uint32_t: No. of elements processed 248 */ 249 static uint32_t 250 dp_rx_msdus_drop(struct dp_soc *soc, hal_ring_desc_t ring_desc, 251 struct hal_rx_mpdu_desc_info *mpdu_desc_info, 252 uint8_t *mac_id, 253 uint32_t quota) 254 { 255 uint32_t rx_bufs_used = 0; 256 void *link_desc_va; 257 struct hal_buf_info buf_info; 258 struct dp_pdev *pdev; 259 struct hal_rx_msdu_list msdu_list; /* MSDU's per MPDU */ 260 int i; 261 uint8_t *rx_tlv_hdr; 262 uint32_t tid; 263 264 hal_rx_reo_buf_paddr_get(ring_desc, &buf_info); 265 266 link_desc_va = dp_rx_cookie_2_link_desc_va(soc, &buf_info); 267 268 /* No UNMAP required -- this is "malloc_consistent" memory */ 269 hal_rx_msdu_list_get(soc->hal_soc, link_desc_va, &msdu_list, 270 &mpdu_desc_info->msdu_count); 271 272 for (i = 0; (i < mpdu_desc_info->msdu_count) && quota--; i++) { 273 struct dp_rx_desc *rx_desc = 274 dp_rx_cookie_2_va_rxdma_buf(soc, 275 msdu_list.sw_cookie[i]); 276 277 qdf_assert_always(rx_desc); 278 279 /* all buffers from a MSDU link link belong to same pdev */ 280 *mac_id = rx_desc->pool_id; 281 pdev = soc->pdev_list[rx_desc->pool_id]; 282 283 if (!dp_rx_desc_check_magic(rx_desc)) { 284 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 285 FL("Invalid rx_desc cookie=%d"), 286 msdu_list.sw_cookie[i]); 287 return rx_bufs_used; 288 } 289 290 qdf_nbuf_unmap_single(soc->osdev, 291 rx_desc->nbuf, QDF_DMA_FROM_DEVICE); 292 293 rx_desc->rx_buf_start = qdf_nbuf_data(rx_desc->nbuf); 294 295 rx_bufs_used++; 296 tid = hal_rx_mpdu_start_tid_get(soc->hal_soc, 297 rx_desc->rx_buf_start); 298 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 299 "Packet received with PN error for tid :%d", tid); 300 301 rx_tlv_hdr = qdf_nbuf_data(rx_desc->nbuf); 302 if (hal_rx_encryption_info_valid(soc->hal_soc, rx_tlv_hdr)) 303 hal_rx_print_pn(soc->hal_soc, rx_tlv_hdr); 304 305 /* Just free the buffers */ 306 qdf_nbuf_free(rx_desc->nbuf); 307 308 dp_rx_add_to_free_desc_list(&pdev->free_list_head, 309 &pdev->free_list_tail, rx_desc); 310 } 311 312 /* Return link descriptor through WBM ring (SW2WBM)*/ 313 dp_rx_link_desc_return(soc, ring_desc, HAL_BM_ACTION_PUT_IN_IDLE_LIST); 314 315 return rx_bufs_used; 316 } 317 318 /** 319 * dp_rx_pn_error_handle() - Handles PN check errors 320 * 321 * @soc: core txrx main context 322 * @ring_desc: opaque pointer to the REO error ring descriptor 323 * @mpdu_desc_info: MPDU descriptor information from ring descriptor 324 * @head: head of the local descriptor free-list 325 * @tail: tail of the local descriptor free-list 326 * @quota: No. of units (packets) that can be serviced in one shot. 327 * 328 * This function implements PN error handling 329 * If the peer is configured to ignore the PN check errors 330 * or if DP feels, that this frame is still OK, the frame can be 331 * re-injected back to REO to use some of the other features 332 * of REO e.g. duplicate detection/routing to other cores 333 * 334 * Return: uint32_t: No. of elements processed 335 */ 336 static uint32_t 337 dp_rx_pn_error_handle(struct dp_soc *soc, hal_ring_desc_t ring_desc, 338 struct hal_rx_mpdu_desc_info *mpdu_desc_info, 339 uint8_t *mac_id, 340 uint32_t quota) 341 { 342 uint16_t peer_id; 343 uint32_t rx_bufs_used = 0; 344 struct dp_peer *peer; 345 bool peer_pn_policy = false; 346 347 peer_id = DP_PEER_METADATA_PEER_ID_GET( 348 mpdu_desc_info->peer_meta_data); 349 350 351 peer = dp_peer_find_by_id(soc, peer_id); 352 353 if (qdf_likely(peer)) { 354 /* 355 * TODO: Check for peer specific policies & set peer_pn_policy 356 */ 357 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 358 "discard rx due to PN error for peer %pK " 359 "(%02x:%02x:%02x:%02x:%02x:%02x)", 360 peer, 361 peer->mac_addr.raw[0], peer->mac_addr.raw[1], 362 peer->mac_addr.raw[2], peer->mac_addr.raw[3], 363 peer->mac_addr.raw[4], peer->mac_addr.raw[5]); 364 365 dp_peer_unref_del_find_by_id(peer); 366 } 367 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 368 "Packet received with PN error"); 369 370 /* No peer PN policy -- definitely drop */ 371 if (!peer_pn_policy) 372 rx_bufs_used = dp_rx_msdus_drop(soc, ring_desc, 373 mpdu_desc_info, 374 mac_id, quota); 375 376 return rx_bufs_used; 377 } 378 379 /** 380 * dp_rx_2k_jump_handle() - Handles Sequence Number Jump by 2K 381 * 382 * @soc: core txrx main context 383 * @ring_desc: opaque pointer to the REO error ring descriptor 384 * @mpdu_desc_info: MPDU descriptor information from ring descriptor 385 * @head: head of the local descriptor free-list 386 * @tail: tail of the local descriptor free-list 387 * @quota: No. of units (packets) that can be serviced in one shot. 388 * 389 * This function implements the error handling when sequence number 390 * of the MPDU jumps suddenly by 2K.Today there are 2 cases that 391 * need to be handled: 392 * A) CSN (Current Sequence Number) = Last Valid SN (LSN) + 2K 393 * B) CSN = LSN + 2K, but falls within a "BA sized window" of the SSN 394 * For case A) the protocol stack is invoked to generate DELBA/DEAUTH frame 395 * For case B), the frame is normally dropped, no more action is taken 396 * 397 * Return: uint32_t: No. of elements processed 398 */ 399 static uint32_t 400 dp_rx_2k_jump_handle(struct dp_soc *soc, hal_ring_desc_t ring_desc, 401 struct hal_rx_mpdu_desc_info *mpdu_desc_info, 402 uint8_t *mac_id, uint32_t quota) 403 { 404 return dp_rx_msdus_drop(soc, ring_desc, mpdu_desc_info, 405 mac_id, quota); 406 } 407 408 #ifdef DP_INVALID_PEER_ASSERT 409 #define DP_PDEV_INVALID_PEER_MSDU_CHECK(head, tail) \ 410 do { \ 411 qdf_assert_always(!(head)); \ 412 qdf_assert_always(!(tail)); \ 413 } while (0) 414 #else 415 #define DP_PDEV_INVALID_PEER_MSDU_CHECK(head, tail) /* no op */ 416 #endif 417 418 /** 419 * dp_rx_chain_msdus() - Function to chain all msdus of a mpdu 420 * to pdev invalid peer list 421 * 422 * @soc: core DP main context 423 * @nbuf: Buffer pointer 424 * @rx_tlv_hdr: start of rx tlv header 425 * @mac_id: mac id 426 * 427 * Return: bool: true for last msdu of mpdu 428 */ 429 static bool 430 dp_rx_chain_msdus(struct dp_soc *soc, qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr, 431 uint8_t mac_id) 432 { 433 bool mpdu_done = false; 434 qdf_nbuf_t curr_nbuf = NULL; 435 qdf_nbuf_t tmp_nbuf = NULL; 436 437 /* TODO: Currently only single radio is supported, hence 438 * pdev hard coded to '0' index 439 */ 440 struct dp_pdev *dp_pdev = soc->pdev_list[mac_id]; 441 442 /* if invalid peer SG list has max values free the buffers in list 443 * and treat current buffer as start of list 444 * 445 * current logic to detect the last buffer from attn_tlv is not reliable 446 * in OFDMA UL scenario hence add max buffers check to avoid list pile 447 * up 448 */ 449 if (!dp_pdev->first_nbuf || 450 QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST 451 (dp_pdev->invalid_peer_head_msdu) >= DP_MAX_INVALID_BUFFERS) { 452 qdf_nbuf_set_rx_chfrag_start(nbuf, 1); 453 dp_pdev->ppdu_id = hal_rx_hw_desc_get_ppduid_get(soc->hal_soc, 454 rx_tlv_hdr); 455 dp_pdev->first_nbuf = true; 456 457 /* If the new nbuf received is the first msdu of the 458 * amsdu and there are msdus in the invalid peer msdu 459 * list, then let us free all the msdus of the invalid 460 * peer msdu list. 461 * This scenario can happen when we start receiving 462 * new a-msdu even before the previous a-msdu is completely 463 * received. 464 */ 465 curr_nbuf = dp_pdev->invalid_peer_head_msdu; 466 while (curr_nbuf) { 467 tmp_nbuf = curr_nbuf->next; 468 qdf_nbuf_free(curr_nbuf); 469 curr_nbuf = tmp_nbuf; 470 } 471 472 dp_pdev->invalid_peer_head_msdu = NULL; 473 dp_pdev->invalid_peer_tail_msdu = NULL; 474 hal_rx_mon_hw_desc_get_mpdu_status(soc->hal_soc, rx_tlv_hdr, 475 &(dp_pdev->ppdu_info.rx_status)); 476 477 } 478 479 if (dp_pdev->ppdu_id == hal_rx_attn_phy_ppdu_id_get(rx_tlv_hdr) && 480 hal_rx_attn_msdu_done_get(rx_tlv_hdr)) { 481 qdf_nbuf_set_rx_chfrag_end(nbuf, 1); 482 qdf_assert_always(dp_pdev->first_nbuf == true); 483 dp_pdev->first_nbuf = false; 484 mpdu_done = true; 485 } 486 487 /* 488 * For MCL, invalid_peer_head_msdu and invalid_peer_tail_msdu 489 * should be NULL here, add the checking for debugging purpose 490 * in case some corner case. 491 */ 492 DP_PDEV_INVALID_PEER_MSDU_CHECK(dp_pdev->invalid_peer_head_msdu, 493 dp_pdev->invalid_peer_tail_msdu); 494 DP_RX_LIST_APPEND(dp_pdev->invalid_peer_head_msdu, 495 dp_pdev->invalid_peer_tail_msdu, 496 nbuf); 497 498 return mpdu_done; 499 } 500 501 /** 502 * dp_2k_jump_handle() - Function to handle 2k jump exception 503 * on WBM ring 504 * 505 * @soc: core DP main context 506 * @nbuf: buffer pointer 507 * @rx_tlv_hdr: start of rx tlv header 508 * @peer_id: peer id of first msdu 509 * @tid: Tid for which exception occurred 510 * 511 * This function handles 2k jump violations arising out 512 * of receiving aggregates in non BA case. This typically 513 * may happen if aggregates are received on a QOS enabled TID 514 * while Rx window size is still initialized to value of 2. Or 515 * it may also happen if negotiated window size is 1 but peer 516 * sends aggregates. 517 * 518 */ 519 520 void 521 dp_2k_jump_handle(struct dp_soc *soc, 522 qdf_nbuf_t nbuf, 523 uint8_t *rx_tlv_hdr, 524 uint16_t peer_id, 525 uint8_t tid) 526 { 527 uint32_t ppdu_id; 528 struct dp_peer *peer = NULL; 529 struct dp_rx_tid *rx_tid = NULL; 530 531 peer = dp_peer_find_by_id(soc, peer_id); 532 if (!peer || peer->delete_in_progress) { 533 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 534 "peer not found"); 535 goto free_nbuf; 536 } 537 rx_tid = &peer->rx_tid[tid]; 538 if (qdf_unlikely(!rx_tid)) { 539 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 540 "rx_tid is NULL!!"); 541 goto free_nbuf; 542 } 543 qdf_spin_lock_bh(&rx_tid->tid_lock); 544 ppdu_id = hal_rx_attn_phy_ppdu_id_get(rx_tlv_hdr); 545 546 /* 547 * If BA session is created and a non-aggregate packet is 548 * landing here then the issue is with sequence number mismatch. 549 * Proceed with delba even in that case 550 */ 551 if (rx_tid->ppdu_id_2k != ppdu_id && 552 rx_tid->ba_status != DP_RX_BA_ACTIVE) { 553 rx_tid->ppdu_id_2k = ppdu_id; 554 qdf_spin_unlock_bh(&rx_tid->tid_lock); 555 goto free_nbuf; 556 } 557 if (!rx_tid->delba_tx_status) { 558 rx_tid->delba_tx_retry++; 559 rx_tid->delba_tx_status = 1; 560 rx_tid->delba_rcode = 561 IEEE80211_REASON_QOS_SETUP_REQUIRED; 562 qdf_spin_unlock_bh(&rx_tid->tid_lock); 563 if (soc->cdp_soc.ol_ops->send_delba) 564 soc->cdp_soc.ol_ops->send_delba(peer->vdev->pdev->ctrl_pdev, 565 peer->ctrl_peer, 566 peer->mac_addr.raw, 567 tid, 568 peer->vdev->ctrl_vdev, 569 rx_tid->delba_rcode); 570 } else { 571 qdf_spin_unlock_bh(&rx_tid->tid_lock); 572 } 573 574 free_nbuf: 575 if (peer) 576 dp_peer_unref_del_find_by_id(peer); 577 qdf_nbuf_free(nbuf); 578 return; 579 } 580 581 #if defined(QCA_WIFI_QCA6390) || defined(QCA_WIFI_QCA6490) 582 /** 583 * dp_rx_null_q_handle_invalid_peer_id_exception() - to find exception 584 * @soc: pointer to dp_soc struct 585 * @pool_id: Pool id to find dp_pdev 586 * @rx_tlv_hdr: TLV header of received packet 587 * @nbuf: SKB 588 * 589 * In certain types of packets if peer_id is not correct then 590 * driver may not be able find. Try finding peer by addr_2 of 591 * received MPDU. If you find the peer then most likely sw_peer_id & 592 * ast_idx is corrupted. 593 * 594 * Return: True if you find the peer by addr_2 of received MPDU else false 595 */ 596 static bool 597 dp_rx_null_q_handle_invalid_peer_id_exception(struct dp_soc *soc, 598 uint8_t pool_id, 599 uint8_t *rx_tlv_hdr, 600 qdf_nbuf_t nbuf) 601 { 602 uint8_t local_id; 603 struct dp_peer *peer = NULL; 604 uint8_t *rx_pkt_hdr = hal_rx_pkt_hdr_get(rx_tlv_hdr); 605 struct dp_pdev *pdev = soc->pdev_list[pool_id]; 606 struct ieee80211_frame *wh = (struct ieee80211_frame *)rx_pkt_hdr; 607 608 /* 609 * WAR- In certain types of packets if peer_id is not correct then 610 * driver may not be able find. Try finding peer by addr_2 of 611 * received MPDU 612 */ 613 if (wh) 614 peer = dp_find_peer_by_addr((struct cdp_pdev *)pdev, 615 wh->i_addr2, &local_id); 616 if (peer) { 617 dp_verbose_debug("MPDU sw_peer_id & ast_idx is corrupted"); 618 hal_rx_dump_pkt_tlvs(soc->hal_soc, rx_tlv_hdr, 619 QDF_TRACE_LEVEL_DEBUG); 620 DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer_id, 621 1, qdf_nbuf_len(nbuf)); 622 qdf_nbuf_free(nbuf); 623 624 return true; 625 } 626 return false; 627 } 628 629 /** 630 * dp_rx_null_q_check_pkt_len_exception() - Check for pktlen validity 631 * @soc: DP SOC context 632 * @pkt_len: computed length of the pkt from caller in bytes 633 * 634 * Return: true if pktlen > RX_BUFFER_SIZE, else return false 635 * 636 */ 637 static inline 638 bool dp_rx_null_q_check_pkt_len_exception(struct dp_soc *soc, uint32_t pkt_len) 639 { 640 if (qdf_unlikely(pkt_len > RX_BUFFER_SIZE)) { 641 DP_STATS_INC_PKT(soc, rx.err.rx_invalid_pkt_len, 642 1, pkt_len); 643 return true; 644 } else { 645 return false; 646 } 647 } 648 649 #else 650 static inline bool 651 dp_rx_null_q_handle_invalid_peer_id_exception(struct dp_soc *soc, 652 uint8_t pool_id, 653 uint8_t *rx_tlv_hdr, 654 qdf_nbuf_t nbuf) 655 { 656 return false; 657 } 658 659 static inline 660 bool dp_rx_null_q_check_pkt_len_exception(struct dp_soc *soc, uint32_t pkt_len) 661 { 662 return false; 663 } 664 665 #endif 666 667 /** 668 * dp_rx_null_q_desc_handle() - Function to handle NULL Queue 669 * descriptor violation on either a 670 * REO or WBM ring 671 * 672 * @soc: core DP main context 673 * @nbuf: buffer pointer 674 * @rx_tlv_hdr: start of rx tlv header 675 * @pool_id: mac id 676 * @peer: peer handle 677 * 678 * This function handles NULL queue descriptor violations arising out 679 * a missing REO queue for a given peer or a given TID. This typically 680 * may happen if a packet is received on a QOS enabled TID before the 681 * ADDBA negotiation for that TID, when the TID queue is setup. Or 682 * it may also happen for MC/BC frames if they are not routed to the 683 * non-QOS TID queue, in the absence of any other default TID queue. 684 * This error can show up both in a REO destination or WBM release ring. 685 * 686 * Return: QDF_STATUS_SUCCESS, if nbuf handled successfully. QDF status code 687 * if nbuf could not be handled or dropped. 688 */ 689 static QDF_STATUS 690 dp_rx_null_q_desc_handle(struct dp_soc *soc, qdf_nbuf_t nbuf, 691 uint8_t *rx_tlv_hdr, uint8_t pool_id, 692 struct dp_peer *peer) 693 { 694 uint32_t pkt_len, l2_hdr_offset; 695 uint16_t msdu_len; 696 struct dp_vdev *vdev; 697 uint8_t tid; 698 qdf_ether_header_t *eh; 699 700 qdf_nbuf_set_rx_chfrag_start(nbuf, 701 hal_rx_msdu_end_first_msdu_get(soc->hal_soc, 702 rx_tlv_hdr)); 703 qdf_nbuf_set_rx_chfrag_end(nbuf, 704 hal_rx_msdu_end_last_msdu_get(soc->hal_soc, 705 rx_tlv_hdr)); 706 qdf_nbuf_set_da_mcbc(nbuf, hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc, 707 rx_tlv_hdr)); 708 qdf_nbuf_set_da_valid(nbuf, 709 hal_rx_msdu_end_da_is_valid_get(soc->hal_soc, 710 rx_tlv_hdr)); 711 qdf_nbuf_set_sa_valid(nbuf, 712 hal_rx_msdu_end_sa_is_valid_get(soc->hal_soc, 713 rx_tlv_hdr)); 714 715 l2_hdr_offset = hal_rx_msdu_end_l3_hdr_padding_get(soc->hal_soc, 716 rx_tlv_hdr); 717 msdu_len = hal_rx_msdu_start_msdu_len_get(rx_tlv_hdr); 718 pkt_len = msdu_len + l2_hdr_offset + RX_PKT_TLVS_LEN; 719 720 if (qdf_likely(!qdf_nbuf_is_frag(nbuf))) { 721 if (dp_rx_null_q_check_pkt_len_exception(soc, pkt_len)) 722 goto drop_nbuf; 723 724 /* Set length in nbuf */ 725 qdf_nbuf_set_pktlen(nbuf, 726 qdf_min(pkt_len, (uint32_t)RX_BUFFER_SIZE)); 727 qdf_assert_always(nbuf->data == rx_tlv_hdr); 728 } 729 730 /* 731 * Check if DMA completed -- msdu_done is the last bit 732 * to be written 733 */ 734 if (!hal_rx_attn_msdu_done_get(rx_tlv_hdr)) { 735 736 dp_err_rl("MSDU DONE failure"); 737 hal_rx_dump_pkt_tlvs(soc->hal_soc, rx_tlv_hdr, 738 QDF_TRACE_LEVEL_INFO); 739 qdf_assert(0); 740 } 741 742 if (!peer && 743 dp_rx_null_q_handle_invalid_peer_id_exception(soc, pool_id, 744 rx_tlv_hdr, nbuf)) 745 return QDF_STATUS_E_FAILURE; 746 747 if (!peer) { 748 bool mpdu_done = false; 749 struct dp_pdev *pdev = soc->pdev_list[pool_id]; 750 751 dp_err_rl("peer is NULL"); 752 DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1, 753 qdf_nbuf_len(nbuf)); 754 755 mpdu_done = dp_rx_chain_msdus(soc, nbuf, rx_tlv_hdr, pool_id); 756 /* Trigger invalid peer handler wrapper */ 757 dp_rx_process_invalid_peer_wrapper(soc, 758 pdev->invalid_peer_head_msdu, 759 mpdu_done, pool_id); 760 761 if (mpdu_done) { 762 pdev->invalid_peer_head_msdu = NULL; 763 pdev->invalid_peer_tail_msdu = NULL; 764 } 765 return QDF_STATUS_E_FAILURE; 766 } 767 768 vdev = peer->vdev; 769 if (!vdev) { 770 dp_err_rl("Null vdev!"); 771 DP_STATS_INC(soc, rx.err.invalid_vdev, 1); 772 goto drop_nbuf; 773 } 774 775 /* 776 * Advance the packet start pointer by total size of 777 * pre-header TLV's 778 */ 779 if (qdf_nbuf_is_frag(nbuf)) 780 qdf_nbuf_pull_head(nbuf, RX_PKT_TLVS_LEN); 781 else 782 qdf_nbuf_pull_head(nbuf, (l2_hdr_offset + RX_PKT_TLVS_LEN)); 783 784 if (dp_rx_mcast_echo_check(soc, peer, rx_tlv_hdr, nbuf)) { 785 /* this is a looped back MCBC pkt, drop it */ 786 DP_STATS_INC_PKT(peer, rx.mec_drop, 1, qdf_nbuf_len(nbuf)); 787 goto drop_nbuf; 788 } 789 790 /* 791 * In qwrap mode if the received packet matches with any of the vdev 792 * mac addresses, drop it. Donot receive multicast packets originated 793 * from any proxysta. 794 */ 795 if (check_qwrap_multicast_loopback(vdev, nbuf)) { 796 DP_STATS_INC_PKT(peer, rx.mec_drop, 1, qdf_nbuf_len(nbuf)); 797 goto drop_nbuf; 798 } 799 800 801 if (qdf_unlikely((peer->nawds_enabled == true) && 802 hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc, 803 rx_tlv_hdr))) { 804 dp_err_rl("free buffer for multicast packet"); 805 DP_STATS_INC(peer, rx.nawds_mcast_drop, 1); 806 goto drop_nbuf; 807 } 808 809 if (!dp_wds_rx_policy_check(rx_tlv_hdr, vdev, peer)) { 810 dp_err_rl("mcast Policy Check Drop pkt"); 811 goto drop_nbuf; 812 } 813 /* WDS Source Port Learning */ 814 if (qdf_likely(vdev->rx_decap_type == htt_cmn_pkt_type_ethernet && 815 vdev->wds_enabled)) 816 dp_rx_wds_srcport_learn(soc, rx_tlv_hdr, peer, nbuf); 817 818 if (hal_rx_is_unicast(soc->hal_soc, rx_tlv_hdr)) { 819 tid = hal_rx_tid_get(soc->hal_soc, rx_tlv_hdr); 820 if (!peer->rx_tid[tid].hw_qdesc_vaddr_unaligned) 821 dp_rx_tid_setup_wifi3(peer, tid, 1, IEEE80211_SEQ_MAX); 822 /* IEEE80211_SEQ_MAX indicates invalid start_seq */ 823 } 824 825 if (qdf_unlikely(vdev->rx_decap_type == htt_cmn_pkt_type_raw)) { 826 qdf_nbuf_set_next(nbuf, NULL); 827 dp_rx_deliver_raw(vdev, nbuf, peer); 828 } else { 829 if (vdev->osif_rx) { 830 qdf_nbuf_set_next(nbuf, NULL); 831 DP_STATS_INC_PKT(peer, rx.to_stack, 1, 832 qdf_nbuf_len(nbuf)); 833 834 /* 835 * Update the protocol tag in SKB based on 836 * CCE metadata 837 */ 838 dp_rx_update_protocol_tag(soc, vdev, nbuf, rx_tlv_hdr, 839 EXCEPTION_DEST_RING_ID, 840 true, true); 841 842 /* Update the flow tag in SKB based on FSE metadata */ 843 dp_rx_update_flow_tag(soc, vdev, nbuf, 844 rx_tlv_hdr, true); 845 846 if (qdf_unlikely(hal_rx_msdu_end_da_is_mcbc_get( 847 soc->hal_soc, rx_tlv_hdr) && 848 (vdev->rx_decap_type == 849 htt_cmn_pkt_type_ethernet))) { 850 eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf); 851 852 DP_STATS_INC_PKT(peer, rx.multicast, 1, 853 qdf_nbuf_len(nbuf)); 854 if (QDF_IS_ADDR_BROADCAST(eh->ether_dhost)) { 855 DP_STATS_INC_PKT(peer, rx.bcast, 1, 856 qdf_nbuf_len(nbuf)); 857 } 858 } 859 860 vdev->osif_rx(vdev->osif_vdev, nbuf); 861 862 } else { 863 dp_err_rl("INVALID osif_rx. vdev %pK", vdev); 864 DP_STATS_INC(soc, rx.err.invalid_vdev, 1); 865 goto drop_nbuf; 866 } 867 } 868 return QDF_STATUS_SUCCESS; 869 870 drop_nbuf: 871 qdf_nbuf_free(nbuf); 872 return QDF_STATUS_E_FAILURE; 873 } 874 875 /** 876 * dp_rx_process_rxdma_err() - Function to deliver rxdma unencrypted_err 877 * frames to OS or wifi parse errors. 878 * @soc: core DP main context 879 * @nbuf: buffer pointer 880 * @rx_tlv_hdr: start of rx tlv header 881 * @peer: peer reference 882 * @err_code: rxdma err code 883 * @mac_id: mac_id which is one of 3 mac_ids(Assuming mac_id and 884 * pool_id has same mapping) 885 * 886 * Return: None 887 */ 888 void 889 dp_rx_process_rxdma_err(struct dp_soc *soc, qdf_nbuf_t nbuf, 890 uint8_t *rx_tlv_hdr, struct dp_peer *peer, 891 uint8_t err_code, uint8_t mac_id) 892 { 893 uint32_t pkt_len, l2_hdr_offset; 894 uint16_t msdu_len; 895 struct dp_vdev *vdev; 896 qdf_ether_header_t *eh; 897 bool is_broadcast; 898 899 /* 900 * Check if DMA completed -- msdu_done is the last bit 901 * to be written 902 */ 903 if (!hal_rx_attn_msdu_done_get(rx_tlv_hdr)) { 904 905 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 906 FL("MSDU DONE failure")); 907 908 hal_rx_dump_pkt_tlvs(soc->hal_soc, rx_tlv_hdr, 909 QDF_TRACE_LEVEL_INFO); 910 qdf_assert(0); 911 } 912 913 l2_hdr_offset = hal_rx_msdu_end_l3_hdr_padding_get(soc->hal_soc, 914 rx_tlv_hdr); 915 msdu_len = hal_rx_msdu_start_msdu_len_get(rx_tlv_hdr); 916 pkt_len = msdu_len + l2_hdr_offset + RX_PKT_TLVS_LEN; 917 918 /* Set length in nbuf */ 919 qdf_nbuf_set_pktlen(nbuf, pkt_len); 920 921 qdf_nbuf_set_next(nbuf, NULL); 922 923 qdf_nbuf_set_rx_chfrag_start(nbuf, 1); 924 qdf_nbuf_set_rx_chfrag_end(nbuf, 1); 925 926 if (!peer) { 927 QDF_TRACE_ERROR_RL(QDF_MODULE_ID_DP, "peer is NULL"); 928 DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1, 929 qdf_nbuf_len(nbuf)); 930 /* Trigger invalid peer handler wrapper */ 931 dp_rx_process_invalid_peer_wrapper(soc, nbuf, true, mac_id); 932 return; 933 } 934 935 vdev = peer->vdev; 936 if (!vdev) { 937 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 938 FL("INVALID vdev %pK OR osif_rx"), vdev); 939 /* Drop & free packet */ 940 qdf_nbuf_free(nbuf); 941 DP_STATS_INC(soc, rx.err.invalid_vdev, 1); 942 return; 943 } 944 945 /* 946 * Advance the packet start pointer by total size of 947 * pre-header TLV's 948 */ 949 qdf_nbuf_pull_head(nbuf, l2_hdr_offset + RX_PKT_TLVS_LEN); 950 951 if (err_code == HAL_RXDMA_ERR_WIFI_PARSE) { 952 uint8_t *pkt_type; 953 954 pkt_type = qdf_nbuf_data(nbuf) + (2 * QDF_MAC_ADDR_SIZE); 955 if (*(uint16_t *)pkt_type == htons(QDF_ETH_TYPE_8021Q)) { 956 if (*(uint16_t *)(pkt_type + DP_SKIP_VLAN) == 957 htons(QDF_LLC_STP)) { 958 DP_STATS_INC(vdev->pdev, vlan_tag_stp_cnt, 1); 959 goto process_mesh; 960 } else { 961 goto process_rx; 962 } 963 } 964 } 965 if (vdev->rx_decap_type == htt_cmn_pkt_type_raw) 966 goto process_mesh; 967 968 /* 969 * WAPI cert AP sends rekey frames as unencrypted. 970 * Thus RXDMA will report unencrypted frame error. 971 * To pass WAPI cert case, SW needs to pass unencrypted 972 * rekey frame to stack. 973 */ 974 if (qdf_nbuf_is_ipv4_wapi_pkt(nbuf)) { 975 goto process_rx; 976 } 977 /* 978 * In dynamic WEP case rekey frames are not encrypted 979 * similar to WAPI. Allow EAPOL when 8021+wep is enabled and 980 * key install is already done 981 */ 982 if ((vdev->sec_type == cdp_sec_type_wep104) && 983 (qdf_nbuf_is_ipv4_eapol_pkt(nbuf))) 984 goto process_rx; 985 986 process_mesh: 987 988 if (!vdev->mesh_vdev && err_code == HAL_RXDMA_ERR_UNENCRYPTED) { 989 qdf_nbuf_free(nbuf); 990 DP_STATS_INC(soc, rx.err.invalid_vdev, 1); 991 return; 992 } 993 994 if (vdev->mesh_vdev) { 995 if (dp_rx_filter_mesh_packets(vdev, nbuf, rx_tlv_hdr) 996 == QDF_STATUS_SUCCESS) { 997 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_MED, 998 FL("mesh pkt filtered")); 999 DP_STATS_INC(vdev->pdev, dropped.mesh_filter, 1); 1000 1001 qdf_nbuf_free(nbuf); 1002 return; 1003 } 1004 dp_rx_fill_mesh_stats(vdev, nbuf, rx_tlv_hdr, peer); 1005 } 1006 process_rx: 1007 if (qdf_unlikely(hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc, 1008 rx_tlv_hdr) && 1009 (vdev->rx_decap_type == 1010 htt_cmn_pkt_type_ethernet))) { 1011 eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf); 1012 is_broadcast = (QDF_IS_ADDR_BROADCAST 1013 (eh->ether_dhost)) ? 1 : 0 ; 1014 DP_STATS_INC_PKT(peer, rx.multicast, 1, qdf_nbuf_len(nbuf)); 1015 if (is_broadcast) { 1016 DP_STATS_INC_PKT(peer, rx.bcast, 1, 1017 qdf_nbuf_len(nbuf)); 1018 } 1019 } 1020 1021 if (qdf_unlikely(vdev->rx_decap_type == htt_cmn_pkt_type_raw)) { 1022 dp_rx_deliver_raw(vdev, nbuf, peer); 1023 } else { 1024 /* Update the protocol tag in SKB based on CCE metadata */ 1025 dp_rx_update_protocol_tag(soc, vdev, nbuf, rx_tlv_hdr, 1026 EXCEPTION_DEST_RING_ID, true, true); 1027 /* Update the flow tag in SKB based on FSE metadata */ 1028 dp_rx_update_flow_tag(soc, vdev, nbuf, rx_tlv_hdr, true); 1029 DP_STATS_INC(peer, rx.to_stack.num, 1); 1030 vdev->osif_rx(vdev->osif_vdev, nbuf); 1031 } 1032 1033 return; 1034 } 1035 1036 /** 1037 * dp_rx_process_mic_error(): Function to pass mic error indication to umac 1038 * @soc: core DP main context 1039 * @nbuf: buffer pointer 1040 * @rx_tlv_hdr: start of rx tlv header 1041 * @peer: peer handle 1042 * 1043 * return: void 1044 */ 1045 void dp_rx_process_mic_error(struct dp_soc *soc, qdf_nbuf_t nbuf, 1046 uint8_t *rx_tlv_hdr, struct dp_peer *peer) 1047 { 1048 struct dp_vdev *vdev = NULL; 1049 struct dp_pdev *pdev = NULL; 1050 struct ol_if_ops *tops = NULL; 1051 uint16_t rx_seq, fragno; 1052 unsigned int tid; 1053 QDF_STATUS status; 1054 struct cdp_rx_mic_err_info mic_failure_info; 1055 1056 if (!hal_rx_msdu_end_first_msdu_get(soc->hal_soc, 1057 rx_tlv_hdr)) 1058 return; 1059 1060 if (!peer) { 1061 dp_err_rl("peer not found"); 1062 goto fail; 1063 } 1064 1065 vdev = peer->vdev; 1066 if (!vdev) { 1067 dp_err_rl("VDEV not found"); 1068 goto fail; 1069 } 1070 1071 pdev = vdev->pdev; 1072 if (!pdev) { 1073 dp_err_rl("PDEV not found"); 1074 goto fail; 1075 } 1076 1077 fragno = dp_rx_frag_get_mpdu_frag_number(qdf_nbuf_data(nbuf)); 1078 /* Can get only last fragment */ 1079 if (fragno) { 1080 tid = hal_rx_mpdu_start_tid_get(soc->hal_soc, 1081 qdf_nbuf_data(nbuf)); 1082 rx_seq = hal_rx_get_rx_sequence(soc->hal_soc, 1083 qdf_nbuf_data(nbuf)); 1084 1085 status = dp_rx_defrag_add_last_frag(soc, peer, 1086 tid, rx_seq, nbuf); 1087 dp_info_rl("Frag pkt seq# %d frag# %d consumed status %d !", 1088 rx_seq, fragno, status); 1089 return; 1090 } 1091 1092 if (hal_rx_mpdu_get_addr1(soc->hal_soc, qdf_nbuf_data(nbuf), 1093 &mic_failure_info.da_mac_addr.bytes[0])) { 1094 dp_err_rl("Failed to get da_mac_addr"); 1095 goto fail; 1096 } 1097 1098 if (hal_rx_mpdu_get_addr2(soc->hal_soc, qdf_nbuf_data(nbuf), 1099 &mic_failure_info.ta_mac_addr.bytes[0])) { 1100 dp_err_rl("Failed to get ta_mac_addr"); 1101 goto fail; 1102 } 1103 1104 mic_failure_info.key_id = 0; 1105 mic_failure_info.multicast = 1106 IEEE80211_IS_MULTICAST(mic_failure_info.da_mac_addr.bytes); 1107 qdf_mem_zero(mic_failure_info.tsc, MIC_SEQ_CTR_SIZE); 1108 mic_failure_info.frame_type = cdp_rx_frame_type_802_11; 1109 mic_failure_info.data = NULL; 1110 mic_failure_info.vdev_id = vdev->vdev_id; 1111 1112 tops = pdev->soc->cdp_soc.ol_ops; 1113 if (tops->rx_mic_error) 1114 tops->rx_mic_error(pdev->ctrl_pdev, &mic_failure_info); 1115 1116 fail: 1117 qdf_nbuf_free(nbuf); 1118 return; 1119 } 1120 1121 uint32_t 1122 dp_rx_err_process(struct dp_intr *int_ctx, struct dp_soc *soc, 1123 hal_ring_handle_t hal_ring_hdl, uint32_t quota) 1124 { 1125 hal_ring_desc_t ring_desc; 1126 hal_soc_handle_t hal_soc; 1127 uint32_t count = 0; 1128 uint32_t rx_bufs_used = 0; 1129 uint32_t rx_bufs_reaped[MAX_PDEV_CNT] = { 0 }; 1130 uint8_t mac_id = 0; 1131 uint8_t buf_type; 1132 uint8_t error, rbm; 1133 struct hal_rx_mpdu_desc_info mpdu_desc_info; 1134 struct hal_buf_info hbi; 1135 struct dp_pdev *dp_pdev; 1136 struct dp_srng *dp_rxdma_srng; 1137 struct rx_desc_pool *rx_desc_pool; 1138 uint32_t cookie = 0; 1139 void *link_desc_va; 1140 struct hal_rx_msdu_list msdu_list; /* MSDU's per MPDU */ 1141 uint16_t num_msdus; 1142 struct dp_rx_desc *rx_desc = NULL; 1143 1144 /* Debug -- Remove later */ 1145 qdf_assert(soc && hal_ring_hdl); 1146 1147 hal_soc = soc->hal_soc; 1148 1149 /* Debug -- Remove later */ 1150 qdf_assert(hal_soc); 1151 1152 if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, hal_ring_hdl))) { 1153 1154 /* TODO */ 1155 /* 1156 * Need API to convert from hal_ring pointer to 1157 * Ring Type / Ring Id combo 1158 */ 1159 DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1); 1160 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1161 FL("HAL RING Access Failed -- %pK"), hal_ring_hdl); 1162 goto done; 1163 } 1164 1165 while (qdf_likely(quota-- && (ring_desc = 1166 hal_srng_dst_get_next(hal_soc, 1167 hal_ring_hdl)))) { 1168 1169 DP_STATS_INC(soc, rx.err_ring_pkts, 1); 1170 1171 error = HAL_RX_ERROR_STATUS_GET(ring_desc); 1172 1173 qdf_assert(error == HAL_REO_ERROR_DETECTED); 1174 1175 buf_type = HAL_RX_REO_BUF_TYPE_GET(ring_desc); 1176 /* 1177 * For REO error ring, expect only MSDU LINK DESC 1178 */ 1179 qdf_assert_always(buf_type == HAL_RX_REO_MSDU_LINK_DESC_TYPE); 1180 1181 cookie = HAL_RX_REO_BUF_COOKIE_GET(ring_desc); 1182 /* 1183 * check for the magic number in the sw cookie 1184 */ 1185 qdf_assert_always((cookie >> LINK_DESC_ID_SHIFT) & 1186 LINK_DESC_ID_START); 1187 1188 /* 1189 * Check if the buffer is to be processed on this processor 1190 */ 1191 rbm = hal_rx_ret_buf_manager_get(ring_desc); 1192 1193 hal_rx_reo_buf_paddr_get(ring_desc, &hbi); 1194 link_desc_va = dp_rx_cookie_2_link_desc_va(soc, &hbi); 1195 hal_rx_msdu_list_get(soc->hal_soc, link_desc_va, &msdu_list, 1196 &num_msdus); 1197 1198 if (qdf_unlikely((msdu_list.rbm[0] != DP_WBM2SW_RBM) && 1199 (msdu_list.rbm[0] != 1200 HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST))) { 1201 /* TODO */ 1202 /* Call appropriate handler */ 1203 if (!wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx)) { 1204 DP_STATS_INC(soc, rx.err.invalid_rbm, 1); 1205 QDF_TRACE(QDF_MODULE_ID_DP, 1206 QDF_TRACE_LEVEL_ERROR, 1207 FL("Invalid RBM %d"), 1208 msdu_list.rbm[0]); 1209 } 1210 1211 /* Return link descriptor through WBM ring (SW2WBM)*/ 1212 dp_rx_link_desc_return(soc, ring_desc, 1213 HAL_BM_ACTION_RELEASE_MSDU_LIST); 1214 continue; 1215 } 1216 1217 rx_desc = dp_rx_cookie_2_va_rxdma_buf(soc, 1218 msdu_list.sw_cookie[0]); 1219 qdf_assert_always(rx_desc); 1220 1221 mac_id = rx_desc->pool_id; 1222 1223 /* Get the MPDU DESC info */ 1224 hal_rx_mpdu_desc_info_get(ring_desc, &mpdu_desc_info); 1225 1226 if (mpdu_desc_info.mpdu_flags & HAL_MPDU_F_FRAGMENT) { 1227 /* 1228 * We only handle one msdu per link desc for fragmented 1229 * case. We drop the msdus and release the link desc 1230 * back if there are more than one msdu in link desc. 1231 */ 1232 if (qdf_unlikely(num_msdus > 1)) { 1233 count = dp_rx_msdus_drop(soc, ring_desc, 1234 &mpdu_desc_info, 1235 &mac_id, quota); 1236 rx_bufs_reaped[mac_id] += count; 1237 continue; 1238 } 1239 1240 count = dp_rx_frag_handle(soc, 1241 ring_desc, &mpdu_desc_info, 1242 rx_desc, &mac_id, quota); 1243 1244 rx_bufs_reaped[mac_id] += count; 1245 DP_STATS_INC(soc, rx.rx_frags, 1); 1246 continue; 1247 } 1248 1249 if (hal_rx_reo_is_pn_error(ring_desc)) { 1250 /* TOD0 */ 1251 DP_STATS_INC(soc, 1252 rx.err. 1253 reo_error[HAL_REO_ERR_PN_CHECK_FAILED], 1254 1); 1255 /* increment @pdev level */ 1256 dp_pdev = dp_get_pdev_for_mac_id(soc, mac_id); 1257 if (dp_pdev) 1258 DP_STATS_INC(dp_pdev, err.reo_error, 1); 1259 count = dp_rx_pn_error_handle(soc, 1260 ring_desc, 1261 &mpdu_desc_info, &mac_id, 1262 quota); 1263 1264 rx_bufs_reaped[mac_id] += count; 1265 continue; 1266 } 1267 1268 if (hal_rx_reo_is_2k_jump(ring_desc)) { 1269 /* TOD0 */ 1270 DP_STATS_INC(soc, 1271 rx.err. 1272 reo_error[HAL_REO_ERR_REGULAR_FRAME_2K_JUMP], 1273 1); 1274 /* increment @pdev level */ 1275 dp_pdev = dp_get_pdev_for_mac_id(soc, mac_id); 1276 if (dp_pdev) 1277 DP_STATS_INC(dp_pdev, err.reo_error, 1); 1278 1279 count = dp_rx_2k_jump_handle(soc, 1280 ring_desc, &mpdu_desc_info, 1281 &mac_id, quota); 1282 1283 rx_bufs_reaped[mac_id] += count; 1284 continue; 1285 } 1286 } 1287 1288 done: 1289 dp_srng_access_end(int_ctx, soc, hal_ring_hdl); 1290 1291 if (soc->rx.flags.defrag_timeout_check) { 1292 uint32_t now_ms = 1293 qdf_system_ticks_to_msecs(qdf_system_ticks()); 1294 1295 if (now_ms >= soc->rx.defrag.next_flush_ms) 1296 dp_rx_defrag_waitlist_flush(soc); 1297 } 1298 1299 for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) { 1300 if (rx_bufs_reaped[mac_id]) { 1301 dp_pdev = soc->pdev_list[mac_id]; 1302 dp_rxdma_srng = &dp_pdev->rx_refill_buf_ring; 1303 rx_desc_pool = &soc->rx_desc_buf[mac_id]; 1304 1305 dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng, 1306 rx_desc_pool, 1307 rx_bufs_reaped[mac_id], 1308 &dp_pdev->free_list_head, 1309 &dp_pdev->free_list_tail); 1310 rx_bufs_used += rx_bufs_reaped[mac_id]; 1311 } 1312 } 1313 1314 return rx_bufs_used; /* Assume no scale factor for now */ 1315 } 1316 1317 uint32_t 1318 dp_rx_wbm_err_process(struct dp_intr *int_ctx, struct dp_soc *soc, 1319 hal_ring_handle_t hal_ring_hdl, uint32_t quota) 1320 { 1321 hal_ring_desc_t ring_desc; 1322 hal_soc_handle_t hal_soc; 1323 struct dp_rx_desc *rx_desc; 1324 union dp_rx_desc_list_elem_t *head[MAX_PDEV_CNT] = { NULL }; 1325 union dp_rx_desc_list_elem_t *tail[MAX_PDEV_CNT] = { NULL }; 1326 uint32_t rx_bufs_used = 0; 1327 uint32_t rx_bufs_reaped[MAX_PDEV_CNT] = { 0 }; 1328 uint8_t buf_type, rbm; 1329 uint32_t rx_buf_cookie; 1330 uint8_t mac_id; 1331 struct dp_pdev *dp_pdev; 1332 struct dp_srng *dp_rxdma_srng; 1333 struct rx_desc_pool *rx_desc_pool; 1334 uint8_t *rx_tlv_hdr; 1335 qdf_nbuf_t nbuf_head = NULL; 1336 qdf_nbuf_t nbuf_tail = NULL; 1337 qdf_nbuf_t nbuf, next; 1338 struct hal_wbm_err_desc_info wbm_err_info = { 0 }; 1339 uint8_t pool_id; 1340 uint8_t tid = 0; 1341 1342 /* Debug -- Remove later */ 1343 qdf_assert(soc && hal_ring_hdl); 1344 1345 hal_soc = soc->hal_soc; 1346 1347 /* Debug -- Remove later */ 1348 qdf_assert(hal_soc); 1349 1350 if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, hal_ring_hdl))) { 1351 1352 /* TODO */ 1353 /* 1354 * Need API to convert from hal_ring pointer to 1355 * Ring Type / Ring Id combo 1356 */ 1357 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1358 FL("HAL RING Access Failed -- %pK"), hal_ring_hdl); 1359 goto done; 1360 } 1361 1362 while (qdf_likely(quota-- && (ring_desc = 1363 hal_srng_dst_get_next(hal_soc, 1364 hal_ring_hdl)))) { 1365 1366 /* XXX */ 1367 buf_type = HAL_RX_WBM_BUF_TYPE_GET(ring_desc); 1368 1369 /* 1370 * For WBM ring, expect only MSDU buffers 1371 */ 1372 qdf_assert_always(buf_type == HAL_RX_WBM_BUF_TYPE_REL_BUF); 1373 1374 qdf_assert((HAL_RX_WBM_ERR_SRC_GET(ring_desc) 1375 == HAL_RX_WBM_ERR_SRC_RXDMA) || 1376 (HAL_RX_WBM_ERR_SRC_GET(ring_desc) 1377 == HAL_RX_WBM_ERR_SRC_REO)); 1378 1379 /* 1380 * Check if the buffer is to be processed on this processor 1381 */ 1382 rbm = hal_rx_ret_buf_manager_get(ring_desc); 1383 1384 if (qdf_unlikely(rbm != HAL_RX_BUF_RBM_SW3_BM)) { 1385 /* TODO */ 1386 /* Call appropriate handler */ 1387 DP_STATS_INC(soc, rx.err.invalid_rbm, 1); 1388 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1389 FL("Invalid RBM %d"), rbm); 1390 continue; 1391 } 1392 1393 rx_buf_cookie = HAL_RX_WBM_BUF_COOKIE_GET(ring_desc); 1394 1395 rx_desc = dp_rx_cookie_2_va_rxdma_buf(soc, rx_buf_cookie); 1396 qdf_assert_always(rx_desc); 1397 1398 if (!dp_rx_desc_check_magic(rx_desc)) { 1399 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1400 FL("Invalid rx_desc cookie=%d"), 1401 rx_buf_cookie); 1402 continue; 1403 } 1404 1405 /* 1406 * this is a unlikely scenario where the host is reaping 1407 * a descriptor which it already reaped just a while ago 1408 * but is yet to replenish it back to HW. 1409 * In this case host will dump the last 128 descriptors 1410 * including the software descriptor rx_desc and assert. 1411 */ 1412 if (qdf_unlikely(!rx_desc->in_use)) { 1413 DP_STATS_INC(soc, rx.err.hal_wbm_rel_dup, 1); 1414 dp_rx_dump_info_and_assert(soc, hal_ring_hdl, 1415 ring_desc, rx_desc); 1416 } 1417 1418 nbuf = rx_desc->nbuf; 1419 qdf_nbuf_unmap_single(soc->osdev, nbuf, QDF_DMA_FROM_DEVICE); 1420 1421 /* 1422 * save the wbm desc info in nbuf TLV. We will need this 1423 * info when we do the actual nbuf processing 1424 */ 1425 hal_rx_wbm_err_info_get(ring_desc, &wbm_err_info, hal_soc); 1426 wbm_err_info.pool_id = rx_desc->pool_id; 1427 hal_rx_wbm_err_info_set_in_tlv(qdf_nbuf_data(nbuf), 1428 &wbm_err_info); 1429 1430 rx_bufs_reaped[rx_desc->pool_id]++; 1431 1432 DP_RX_LIST_APPEND(nbuf_head, nbuf_tail, rx_desc->nbuf); 1433 dp_rx_add_to_free_desc_list(&head[rx_desc->pool_id], 1434 &tail[rx_desc->pool_id], 1435 rx_desc); 1436 } 1437 done: 1438 dp_srng_access_end(int_ctx, soc, hal_ring_hdl); 1439 1440 for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) { 1441 if (rx_bufs_reaped[mac_id]) { 1442 dp_pdev = soc->pdev_list[mac_id]; 1443 dp_rxdma_srng = &dp_pdev->rx_refill_buf_ring; 1444 rx_desc_pool = &soc->rx_desc_buf[mac_id]; 1445 1446 dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng, 1447 rx_desc_pool, rx_bufs_reaped[mac_id], 1448 &head[mac_id], &tail[mac_id]); 1449 rx_bufs_used += rx_bufs_reaped[mac_id]; 1450 } 1451 } 1452 1453 nbuf = nbuf_head; 1454 while (nbuf) { 1455 struct dp_peer *peer; 1456 uint16_t peer_id; 1457 uint8_t e_code; 1458 uint8_t *tlv_hdr; 1459 rx_tlv_hdr = qdf_nbuf_data(nbuf); 1460 1461 peer_id = hal_rx_mpdu_start_sw_peer_id_get(soc->hal_soc, 1462 rx_tlv_hdr); 1463 peer = dp_peer_find_by_id(soc, peer_id); 1464 1465 /* 1466 * retrieve the wbm desc info from nbuf TLV, so we can 1467 * handle error cases appropriately 1468 */ 1469 hal_rx_wbm_err_info_get_from_tlv(rx_tlv_hdr, &wbm_err_info); 1470 1471 /* Set queue_mapping in nbuf to 0 */ 1472 dp_set_rx_queue(nbuf, 0); 1473 1474 next = nbuf->next; 1475 1476 if (wbm_err_info.wbm_err_src == HAL_RX_WBM_ERR_SRC_REO) { 1477 if (wbm_err_info.reo_psh_rsn 1478 == HAL_RX_WBM_REO_PSH_RSN_ERROR) { 1479 1480 DP_STATS_INC(soc, 1481 rx.err.reo_error 1482 [wbm_err_info.reo_err_code], 1); 1483 /* increment @pdev level */ 1484 pool_id = wbm_err_info.pool_id; 1485 dp_pdev = dp_get_pdev_for_mac_id(soc, pool_id); 1486 if (dp_pdev) 1487 DP_STATS_INC(dp_pdev, err.reo_error, 1488 1); 1489 1490 switch (wbm_err_info.reo_err_code) { 1491 /* 1492 * Handling for packets which have NULL REO 1493 * queue descriptor 1494 */ 1495 case HAL_REO_ERR_QUEUE_DESC_ADDR_0: 1496 pool_id = wbm_err_info.pool_id; 1497 dp_rx_null_q_desc_handle(soc, nbuf, 1498 rx_tlv_hdr, 1499 pool_id, peer); 1500 nbuf = next; 1501 if (peer) 1502 dp_peer_unref_del_find_by_id( 1503 peer); 1504 continue; 1505 /* TODO */ 1506 /* Add per error code accounting */ 1507 case HAL_REO_ERR_REGULAR_FRAME_2K_JUMP: 1508 pool_id = wbm_err_info.pool_id; 1509 1510 if (hal_rx_msdu_end_first_msdu_get(soc->hal_soc, 1511 rx_tlv_hdr)) { 1512 peer_id = 1513 hal_rx_mpdu_start_sw_peer_id_get(soc->hal_soc, 1514 rx_tlv_hdr); 1515 tid = 1516 hal_rx_mpdu_start_tid_get(hal_soc, rx_tlv_hdr); 1517 } 1518 dp_2k_jump_handle(soc, nbuf, rx_tlv_hdr, 1519 peer_id, tid); 1520 nbuf = next; 1521 if (peer) 1522 dp_peer_unref_del_find_by_id( 1523 peer); 1524 continue; 1525 default: 1526 dp_err_rl("Got pkt with REO ERROR: %d", 1527 wbm_err_info.reo_err_code); 1528 break; 1529 } 1530 } 1531 } else if (wbm_err_info.wbm_err_src == 1532 HAL_RX_WBM_ERR_SRC_RXDMA) { 1533 if (wbm_err_info.rxdma_psh_rsn 1534 == HAL_RX_WBM_RXDMA_PSH_RSN_ERROR) { 1535 DP_STATS_INC(soc, 1536 rx.err.rxdma_error 1537 [wbm_err_info.rxdma_err_code], 1); 1538 /* increment @pdev level */ 1539 pool_id = wbm_err_info.pool_id; 1540 dp_pdev = dp_get_pdev_for_mac_id(soc, pool_id); 1541 if (dp_pdev) 1542 DP_STATS_INC(dp_pdev, 1543 err.rxdma_error, 1); 1544 1545 switch (wbm_err_info.rxdma_err_code) { 1546 case HAL_RXDMA_ERR_UNENCRYPTED: 1547 1548 case HAL_RXDMA_ERR_WIFI_PARSE: 1549 pool_id = wbm_err_info.pool_id; 1550 dp_rx_process_rxdma_err(soc, nbuf, 1551 rx_tlv_hdr, 1552 peer, 1553 wbm_err_info. 1554 rxdma_err_code, 1555 pool_id); 1556 nbuf = next; 1557 if (peer) 1558 dp_peer_unref_del_find_by_id(peer); 1559 continue; 1560 1561 case HAL_RXDMA_ERR_TKIP_MIC: 1562 dp_rx_process_mic_error(soc, nbuf, 1563 rx_tlv_hdr, 1564 peer); 1565 nbuf = next; 1566 if (peer) { 1567 DP_STATS_INC(peer, rx.err.mic_err, 1); 1568 dp_peer_unref_del_find_by_id( 1569 peer); 1570 } 1571 continue; 1572 1573 case HAL_RXDMA_ERR_DECRYPT: 1574 pool_id = wbm_err_info.pool_id; 1575 e_code = wbm_err_info.rxdma_err_code; 1576 tlv_hdr = rx_tlv_hdr; 1577 if (peer) { 1578 DP_STATS_INC(peer, rx.err. 1579 decrypt_err, 1); 1580 } else { 1581 dp_rx_process_rxdma_err(soc, 1582 nbuf, 1583 tlv_hdr, 1584 NULL, 1585 e_code, 1586 pool_id 1587 ); 1588 nbuf = next; 1589 continue; 1590 } 1591 1592 QDF_TRACE(QDF_MODULE_ID_DP, 1593 QDF_TRACE_LEVEL_DEBUG, 1594 "Packet received with Decrypt error"); 1595 break; 1596 1597 default: 1598 dp_err_rl("RXDMA error %d", 1599 wbm_err_info.rxdma_err_code); 1600 } 1601 } 1602 } else { 1603 /* Should not come here */ 1604 qdf_assert(0); 1605 } 1606 1607 if (peer) 1608 dp_peer_unref_del_find_by_id(peer); 1609 1610 hal_rx_dump_pkt_tlvs(hal_soc, rx_tlv_hdr, 1611 QDF_TRACE_LEVEL_DEBUG); 1612 qdf_nbuf_free(nbuf); 1613 nbuf = next; 1614 } 1615 return rx_bufs_used; /* Assume no scale factor for now */ 1616 } 1617 1618 /** 1619 * dup_desc_dbg() - dump and assert if duplicate rx desc found 1620 * 1621 * @soc: core DP main context 1622 * @rxdma_dst_ring_desc: void pointer to monitor link descriptor buf addr info 1623 * @rx_desc: void pointer to rx descriptor 1624 * 1625 * Return: void 1626 */ 1627 static void dup_desc_dbg(struct dp_soc *soc, 1628 hal_rxdma_desc_t rxdma_dst_ring_desc, 1629 void *rx_desc) 1630 { 1631 DP_STATS_INC(soc, rx.err.hal_rxdma_err_dup, 1); 1632 dp_rx_dump_info_and_assert( 1633 soc, 1634 soc->rx_rel_ring.hal_srng, 1635 hal_rxdma_desc_to_hal_ring_desc(rxdma_dst_ring_desc), 1636 rx_desc); 1637 } 1638 1639 /** 1640 * dp_rx_err_mpdu_pop() - extract the MSDU's from link descs 1641 * 1642 * @soc: core DP main context 1643 * @mac_id: mac id which is one of 3 mac_ids 1644 * @rxdma_dst_ring_desc: void pointer to monitor link descriptor buf addr info 1645 * @head: head of descs list to be freed 1646 * @tail: tail of decs list to be freed 1647 1648 * Return: number of msdu in MPDU to be popped 1649 */ 1650 static inline uint32_t 1651 dp_rx_err_mpdu_pop(struct dp_soc *soc, uint32_t mac_id, 1652 hal_rxdma_desc_t rxdma_dst_ring_desc, 1653 union dp_rx_desc_list_elem_t **head, 1654 union dp_rx_desc_list_elem_t **tail) 1655 { 1656 void *rx_msdu_link_desc; 1657 qdf_nbuf_t msdu; 1658 qdf_nbuf_t last; 1659 struct hal_rx_msdu_list msdu_list; 1660 uint16_t num_msdus; 1661 struct hal_buf_info buf_info; 1662 void *p_buf_addr_info; 1663 void *p_last_buf_addr_info; 1664 uint32_t rx_bufs_used = 0; 1665 uint32_t msdu_cnt; 1666 uint32_t i; 1667 uint8_t push_reason; 1668 uint8_t rxdma_error_code = 0; 1669 uint8_t bm_action = HAL_BM_ACTION_PUT_IN_IDLE_LIST; 1670 struct dp_pdev *pdev = dp_get_pdev_for_mac_id(soc, mac_id); 1671 hal_rxdma_desc_t ring_desc; 1672 1673 msdu = 0; 1674 1675 last = NULL; 1676 1677 hal_rx_reo_ent_buf_paddr_get(rxdma_dst_ring_desc, &buf_info, 1678 &p_last_buf_addr_info, &msdu_cnt); 1679 1680 push_reason = 1681 hal_rx_reo_ent_rxdma_push_reason_get(rxdma_dst_ring_desc); 1682 if (push_reason == HAL_RX_WBM_RXDMA_PSH_RSN_ERROR) { 1683 rxdma_error_code = 1684 hal_rx_reo_ent_rxdma_error_code_get(rxdma_dst_ring_desc); 1685 } 1686 1687 do { 1688 rx_msdu_link_desc = 1689 dp_rx_cookie_2_link_desc_va(soc, &buf_info); 1690 1691 qdf_assert(rx_msdu_link_desc); 1692 1693 hal_rx_msdu_list_get(soc->hal_soc, rx_msdu_link_desc, 1694 &msdu_list, &num_msdus); 1695 1696 if (msdu_list.sw_cookie[0] != HAL_RX_COOKIE_SPECIAL) { 1697 /* if the msdus belongs to NSS offloaded radio && 1698 * the rbm is not SW1_BM then return the msdu_link 1699 * descriptor without freeing the msdus (nbufs). let 1700 * these buffers be given to NSS completion ring for 1701 * NSS to free them. 1702 * else iterate through the msdu link desc list and 1703 * free each msdu in the list. 1704 */ 1705 if (msdu_list.rbm[0] != HAL_RX_BUF_RBM_SW3_BM && 1706 wlan_cfg_get_dp_pdev_nss_enabled( 1707 pdev->wlan_cfg_ctx)) 1708 bm_action = HAL_BM_ACTION_RELEASE_MSDU_LIST; 1709 else { 1710 for (i = 0; i < num_msdus; i++) { 1711 struct dp_rx_desc *rx_desc = 1712 dp_rx_cookie_2_va_rxdma_buf(soc, 1713 msdu_list.sw_cookie[i]); 1714 qdf_assert_always(rx_desc); 1715 msdu = rx_desc->nbuf; 1716 /* 1717 * this is a unlikely scenario 1718 * where the host is reaping 1719 * a descriptor which 1720 * it already reaped just a while ago 1721 * but is yet to replenish 1722 * it back to HW. 1723 * In this case host will dump 1724 * the last 128 descriptors 1725 * including the software descriptor 1726 * rx_desc and assert. 1727 */ 1728 ring_desc = rxdma_dst_ring_desc; 1729 if (qdf_unlikely(!rx_desc->in_use)) { 1730 dup_desc_dbg(soc, 1731 ring_desc, 1732 rx_desc); 1733 continue; 1734 } 1735 1736 qdf_nbuf_unmap_single(soc->osdev, msdu, 1737 QDF_DMA_FROM_DEVICE); 1738 1739 QDF_TRACE(QDF_MODULE_ID_DP, 1740 QDF_TRACE_LEVEL_DEBUG, 1741 "[%s][%d] msdu_nbuf=%pK ", 1742 __func__, __LINE__, msdu); 1743 1744 qdf_nbuf_free(msdu); 1745 rx_bufs_used++; 1746 dp_rx_add_to_free_desc_list(head, 1747 tail, rx_desc); 1748 } 1749 } 1750 } else { 1751 rxdma_error_code = HAL_RXDMA_ERR_WAR; 1752 } 1753 1754 hal_rx_mon_next_link_desc_get(rx_msdu_link_desc, &buf_info, 1755 &p_buf_addr_info); 1756 1757 dp_rx_link_desc_return(soc, p_last_buf_addr_info, bm_action); 1758 p_last_buf_addr_info = p_buf_addr_info; 1759 1760 } while (buf_info.paddr); 1761 1762 DP_STATS_INC(soc, rx.err.rxdma_error[rxdma_error_code], 1); 1763 if (pdev) 1764 DP_STATS_INC(pdev, err.rxdma_error, 1); 1765 1766 if (rxdma_error_code == HAL_RXDMA_ERR_DECRYPT) { 1767 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1768 "Packet received with Decrypt error"); 1769 } 1770 1771 return rx_bufs_used; 1772 } 1773 1774 uint32_t 1775 dp_rxdma_err_process(struct dp_intr *int_ctx, struct dp_soc *soc, 1776 uint32_t mac_id, uint32_t quota) 1777 { 1778 struct dp_pdev *pdev = dp_get_pdev_for_mac_id(soc, mac_id); 1779 int mac_for_pdev = dp_get_mac_id_for_mac(soc, mac_id); 1780 hal_rxdma_desc_t rxdma_dst_ring_desc; 1781 hal_soc_handle_t hal_soc; 1782 void *err_dst_srng; 1783 union dp_rx_desc_list_elem_t *head = NULL; 1784 union dp_rx_desc_list_elem_t *tail = NULL; 1785 struct dp_srng *dp_rxdma_srng; 1786 struct rx_desc_pool *rx_desc_pool; 1787 uint32_t work_done = 0; 1788 uint32_t rx_bufs_used = 0; 1789 1790 if (!pdev) 1791 return 0; 1792 1793 err_dst_srng = pdev->rxdma_err_dst_ring[mac_for_pdev].hal_srng; 1794 1795 if (!err_dst_srng) { 1796 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1797 "%s %d : HAL Monitor Destination Ring Init \ 1798 Failed -- %pK", 1799 __func__, __LINE__, err_dst_srng); 1800 return 0; 1801 } 1802 1803 hal_soc = soc->hal_soc; 1804 1805 qdf_assert(hal_soc); 1806 1807 if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, err_dst_srng))) { 1808 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1809 "%s %d : HAL Monitor Destination Ring Init \ 1810 Failed -- %pK", 1811 __func__, __LINE__, err_dst_srng); 1812 return 0; 1813 } 1814 1815 while (qdf_likely(quota-- && (rxdma_dst_ring_desc = 1816 hal_srng_dst_get_next(hal_soc, err_dst_srng)))) { 1817 1818 rx_bufs_used += dp_rx_err_mpdu_pop(soc, mac_id, 1819 rxdma_dst_ring_desc, 1820 &head, &tail); 1821 } 1822 1823 dp_srng_access_end(int_ctx, soc, err_dst_srng); 1824 1825 if (rx_bufs_used) { 1826 dp_rxdma_srng = &pdev->rx_refill_buf_ring; 1827 rx_desc_pool = &soc->rx_desc_buf[mac_id]; 1828 1829 dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng, 1830 rx_desc_pool, rx_bufs_used, &head, &tail); 1831 1832 work_done += rx_bufs_used; 1833 } 1834 1835 return work_done; 1836 } 1837 1838 static inline uint32_t 1839 dp_wbm_int_err_mpdu_pop(struct dp_soc *soc, uint32_t mac_id, 1840 hal_rxdma_desc_t rxdma_dst_ring_desc, 1841 union dp_rx_desc_list_elem_t **head, 1842 union dp_rx_desc_list_elem_t **tail) 1843 { 1844 void *rx_msdu_link_desc; 1845 qdf_nbuf_t msdu; 1846 qdf_nbuf_t last; 1847 struct hal_rx_msdu_list msdu_list; 1848 uint16_t num_msdus; 1849 struct hal_buf_info buf_info; 1850 void *p_buf_addr_info; 1851 void *p_last_buf_addr_info; 1852 uint32_t rx_bufs_used = 0; 1853 uint32_t msdu_cnt; 1854 uint32_t i; 1855 1856 msdu = 0; 1857 1858 last = NULL; 1859 1860 hal_rx_reo_ent_buf_paddr_get(rxdma_dst_ring_desc, &buf_info, 1861 &p_last_buf_addr_info, &msdu_cnt); 1862 1863 do { 1864 rx_msdu_link_desc = 1865 dp_rx_cookie_2_link_desc_va(soc, &buf_info); 1866 1867 if (!rx_msdu_link_desc) { 1868 DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_REO_NULL_LINK_DESC], 1); 1869 break; 1870 } 1871 1872 hal_rx_msdu_list_get(soc->hal_soc, rx_msdu_link_desc, 1873 &msdu_list, &num_msdus); 1874 1875 if (msdu_list.sw_cookie[0] != HAL_RX_COOKIE_SPECIAL) { 1876 for (i = 0; i < num_msdus; i++) { 1877 struct dp_rx_desc *rx_desc = 1878 dp_rx_cookie_2_va_rxdma_buf( 1879 soc, 1880 msdu_list.sw_cookie[i]); 1881 qdf_assert_always(rx_desc); 1882 msdu = rx_desc->nbuf; 1883 1884 qdf_nbuf_unmap_single(soc->osdev, msdu, 1885 QDF_DMA_FROM_DEVICE); 1886 1887 qdf_nbuf_free(msdu); 1888 rx_bufs_used++; 1889 dp_rx_add_to_free_desc_list(head, 1890 tail, rx_desc); 1891 } 1892 } 1893 1894 hal_rx_mon_next_link_desc_get(rx_msdu_link_desc, &buf_info, 1895 &p_buf_addr_info); 1896 1897 dp_rx_link_desc_return(soc, p_last_buf_addr_info, 1898 HAL_BM_ACTION_PUT_IN_IDLE_LIST); 1899 p_last_buf_addr_info = p_buf_addr_info; 1900 1901 } while (buf_info.paddr); 1902 1903 return rx_bufs_used; 1904 } 1905 1906 /* 1907 * 1908 * dp_handle_wbm_internal_error() - handles wbm_internal_error case 1909 * 1910 * @soc: core DP main context 1911 * @hal_desc: hal descriptor 1912 * @buf_type: indicates if the buffer is of type link disc or msdu 1913 * Return: None 1914 * 1915 * wbm_internal_error is seen in following scenarios : 1916 * 1917 * 1. Null pointers detected in WBM_RELEASE_RING descriptors 1918 * 2. Null pointers detected during delinking process 1919 * 1920 * Some null pointer cases: 1921 * 1922 * a. MSDU buffer pointer is NULL 1923 * b. Next_MSDU_Link_Desc pointer is NULL, with no last msdu flag 1924 * c. MSDU buffer pointer is NULL or Next_Link_Desc pointer is NULL 1925 */ 1926 void 1927 dp_handle_wbm_internal_error(struct dp_soc *soc, void *hal_desc, 1928 uint32_t buf_type) 1929 { 1930 struct hal_buf_info buf_info = {0}; 1931 struct dp_pdev *dp_pdev; 1932 struct dp_rx_desc *rx_desc = NULL; 1933 uint32_t rx_buf_cookie; 1934 uint32_t rx_bufs_reaped = 0; 1935 union dp_rx_desc_list_elem_t *head = NULL; 1936 union dp_rx_desc_list_elem_t *tail = NULL; 1937 uint8_t pool_id; 1938 1939 hal_rx_reo_buf_paddr_get(hal_desc, &buf_info); 1940 1941 if (!buf_info.paddr) { 1942 DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_REO_NULL_BUFFER], 1); 1943 return; 1944 } 1945 1946 rx_buf_cookie = HAL_RX_REO_BUF_COOKIE_GET(hal_desc); 1947 pool_id = DP_RX_DESC_COOKIE_POOL_ID_GET(rx_buf_cookie); 1948 1949 if (buf_type == HAL_WBM_RELEASE_RING_2_BUFFER_TYPE) { 1950 DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_REO_NULL_MSDU_BUFF], 1); 1951 rx_desc = dp_rx_cookie_2_va_rxdma_buf(soc, rx_buf_cookie); 1952 1953 if (rx_desc && rx_desc->nbuf) { 1954 qdf_nbuf_unmap_single(soc->osdev, rx_desc->nbuf, 1955 QDF_DMA_FROM_DEVICE); 1956 1957 rx_desc->unmapped = 1; 1958 1959 qdf_nbuf_free(rx_desc->nbuf); 1960 dp_rx_add_to_free_desc_list(&head, 1961 &tail, 1962 rx_desc); 1963 1964 rx_bufs_reaped++; 1965 } 1966 } else if (buf_type == HAL_WBM_RELEASE_RING_2_DESC_TYPE) { 1967 rx_bufs_reaped = dp_wbm_int_err_mpdu_pop(soc, pool_id, 1968 hal_desc, 1969 &head, &tail); 1970 } 1971 1972 if (rx_bufs_reaped) { 1973 struct rx_desc_pool *rx_desc_pool; 1974 struct dp_srng *dp_rxdma_srng; 1975 1976 DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_REO_BUFF_REAPED], 1); 1977 dp_pdev = soc->pdev_list[pool_id]; 1978 dp_rxdma_srng = &dp_pdev->rx_refill_buf_ring; 1979 rx_desc_pool = &soc->rx_desc_buf[pool_id]; 1980 1981 dp_rx_buffers_replenish(soc, pool_id, dp_rxdma_srng, 1982 rx_desc_pool, 1983 rx_bufs_reaped, 1984 &head, &tail); 1985 } 1986 } 1987