1 /* 2 * Copyright (c) 2016-2019 The Linux Foundation. All rights reserved. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for 5 * any purpose with or without fee is hereby granted, provided that the 6 * above copyright notice and this permission notice appear in all 7 * copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 16 * PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 #include "hal_hw_headers.h" 20 #include "dp_types.h" 21 #include "dp_rx.h" 22 #include "dp_peer.h" 23 #include "dp_internal.h" 24 #include "hal_api.h" 25 #include "qdf_trace.h" 26 #include "qdf_nbuf.h" 27 #ifdef CONFIG_MCL 28 #include <cds_ieee80211_common.h> 29 #endif 30 #include "dp_rx_defrag.h" 31 #include <enet.h> /* LLC_SNAP_HDR_LEN */ 32 #include "qdf_net_types.h" 33 34 /** 35 * dp_rx_mcast_echo_check() - check if the mcast pkt is a loop 36 * back on same vap or a different vap. 37 * 38 * @soc: core DP main context 39 * @peer: dp peer handler 40 * @rx_tlv_hdr: start of the rx TLV header 41 * @nbuf: pkt buffer 42 * 43 * Return: bool (true if it is a looped back pkt else false) 44 * 45 */ 46 static inline bool dp_rx_mcast_echo_check(struct dp_soc *soc, 47 struct dp_peer *peer, 48 uint8_t *rx_tlv_hdr, 49 qdf_nbuf_t nbuf) 50 { 51 struct dp_vdev *vdev = peer->vdev; 52 struct dp_ast_entry *ase; 53 uint16_t sa_idx = 0; 54 uint8_t *data; 55 56 /* 57 * Multicast Echo Check is required only if vdev is STA and 58 * received pkt is a multicast/broadcast pkt. otherwise 59 * skip the MEC check. 60 */ 61 if (vdev->opmode != wlan_op_mode_sta) 62 return false; 63 64 if (!hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr)) 65 return false; 66 67 data = qdf_nbuf_data(nbuf); 68 /* 69 * if the received pkts src mac addr matches with vdev 70 * mac address then drop the pkt as it is looped back 71 */ 72 if (!(qdf_mem_cmp(&data[QDF_MAC_ADDR_SIZE], 73 vdev->mac_addr.raw, 74 QDF_MAC_ADDR_SIZE))) 75 return true; 76 77 /* 78 * In case of qwrap isolation mode, donot drop loopback packets. 79 * In isolation mode, all packets from the wired stations need to go 80 * to rootap and loop back to reach the wireless stations and 81 * vice-versa. 82 */ 83 if (qdf_unlikely(vdev->isolation_vdev)) 84 return false; 85 86 /* if the received pkts src mac addr matches with the 87 * wired PCs MAC addr which is behind the STA or with 88 * wireless STAs MAC addr which are behind the Repeater, 89 * then drop the pkt as it is looped back 90 */ 91 qdf_spin_lock_bh(&soc->ast_lock); 92 if (hal_rx_msdu_end_sa_is_valid_get(rx_tlv_hdr)) { 93 sa_idx = hal_rx_msdu_end_sa_idx_get(rx_tlv_hdr); 94 95 if ((sa_idx < 0) || 96 (sa_idx >= wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx))) { 97 qdf_spin_unlock_bh(&soc->ast_lock); 98 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 99 "invalid sa_idx: %d", sa_idx); 100 qdf_assert_always(0); 101 } 102 103 ase = soc->ast_table[sa_idx]; 104 if (!ase) { 105 /* We do not get a peer map event for STA and without 106 * this event we don't know what is STA's sa_idx. 107 * For this reason the AST is still not associated to 108 * any index postion in ast_table. 109 * In these kind of scenarios where sa is valid but 110 * ast is not in ast_table, we use the below API to get 111 * AST entry for STA's own mac_address. 112 */ 113 ase = dp_peer_ast_list_find(soc, peer, 114 &data[QDF_MAC_ADDR_SIZE]); 115 if (ase) { 116 ase->ast_idx = sa_idx; 117 soc->ast_table[sa_idx] = ase; 118 ase->is_mapped = TRUE; 119 } 120 } 121 } else 122 ase = dp_peer_ast_hash_find_by_pdevid(soc, 123 &data[QDF_MAC_ADDR_SIZE], 124 vdev->pdev->pdev_id); 125 126 if (ase) { 127 128 if (ase->pdev_id != vdev->pdev->pdev_id) { 129 qdf_spin_unlock_bh(&soc->ast_lock); 130 QDF_TRACE(QDF_MODULE_ID_DP, 131 QDF_TRACE_LEVEL_INFO, 132 "Detected DBDC Root AP %pM, %d %d", 133 &data[QDF_MAC_ADDR_SIZE], vdev->pdev->pdev_id, 134 ase->pdev_id); 135 return false; 136 } 137 138 if ((ase->type == CDP_TXRX_AST_TYPE_MEC) || 139 (ase->peer != peer)) { 140 qdf_spin_unlock_bh(&soc->ast_lock); 141 QDF_TRACE(QDF_MODULE_ID_DP, 142 QDF_TRACE_LEVEL_INFO, 143 "received pkt with same src mac %pM", 144 &data[QDF_MAC_ADDR_SIZE]); 145 146 return true; 147 } 148 } 149 qdf_spin_unlock_bh(&soc->ast_lock); 150 return false; 151 } 152 153 /** 154 * dp_rx_link_desc_return_by_addr - Return a MPDU link descriptor to 155 * (WBM) by address 156 * 157 * @soc: core DP main context 158 * @link_desc_addr: link descriptor addr 159 * 160 * Return: QDF_STATUS 161 */ 162 QDF_STATUS 163 dp_rx_link_desc_return_by_addr(struct dp_soc *soc, void *link_desc_addr, 164 uint8_t bm_action) 165 { 166 struct dp_srng *wbm_desc_rel_ring = &soc->wbm_desc_rel_ring; 167 void *wbm_rel_srng = wbm_desc_rel_ring->hal_srng; 168 void *hal_soc = soc->hal_soc; 169 QDF_STATUS status = QDF_STATUS_E_FAILURE; 170 void *src_srng_desc; 171 172 if (!wbm_rel_srng) { 173 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 174 "WBM RELEASE RING not initialized"); 175 return status; 176 } 177 178 if (qdf_unlikely(hal_srng_access_start(hal_soc, wbm_rel_srng))) { 179 180 /* TODO */ 181 /* 182 * Need API to convert from hal_ring pointer to 183 * Ring Type / Ring Id combo 184 */ 185 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 186 FL("HAL RING Access For WBM Release SRNG Failed - %pK"), 187 wbm_rel_srng); 188 DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1); 189 goto done; 190 } 191 src_srng_desc = hal_srng_src_get_next(hal_soc, wbm_rel_srng); 192 if (qdf_likely(src_srng_desc)) { 193 /* Return link descriptor through WBM ring (SW2WBM)*/ 194 hal_rx_msdu_link_desc_set(hal_soc, 195 src_srng_desc, link_desc_addr, bm_action); 196 status = QDF_STATUS_SUCCESS; 197 } else { 198 struct hal_srng *srng = (struct hal_srng *)wbm_rel_srng; 199 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 200 FL("WBM Release Ring (Id %d) Full"), srng->ring_id); 201 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 202 "HP 0x%x Reap HP 0x%x TP 0x%x Cached TP 0x%x", 203 *srng->u.src_ring.hp_addr, srng->u.src_ring.reap_hp, 204 *srng->u.src_ring.tp_addr, srng->u.src_ring.cached_tp); 205 } 206 done: 207 hal_srng_access_end(hal_soc, wbm_rel_srng); 208 return status; 209 210 } 211 212 /** 213 * dp_rx_link_desc_return() - Return a MPDU link descriptor to HW 214 * (WBM), following error handling 215 * 216 * @soc: core DP main context 217 * @ring_desc: opaque pointer to the REO error ring descriptor 218 * 219 * Return: QDF_STATUS 220 */ 221 QDF_STATUS 222 dp_rx_link_desc_return(struct dp_soc *soc, void *ring_desc, uint8_t bm_action) 223 { 224 void *buf_addr_info = HAL_RX_REO_BUF_ADDR_INFO_GET(ring_desc); 225 return dp_rx_link_desc_return_by_addr(soc, buf_addr_info, bm_action); 226 } 227 228 /** 229 * dp_rx_msdus_drop() - Drops all MSDU's per MPDU 230 * 231 * @soc: core txrx main context 232 * @ring_desc: opaque pointer to the REO error ring descriptor 233 * @mpdu_desc_info: MPDU descriptor information from ring descriptor 234 * @head: head of the local descriptor free-list 235 * @tail: tail of the local descriptor free-list 236 * @quota: No. of units (packets) that can be serviced in one shot. 237 * 238 * This function is used to drop all MSDU in an MPDU 239 * 240 * Return: uint32_t: No. of elements processed 241 */ 242 static uint32_t dp_rx_msdus_drop(struct dp_soc *soc, void *ring_desc, 243 struct hal_rx_mpdu_desc_info *mpdu_desc_info, 244 uint8_t *mac_id, 245 uint32_t quota) 246 { 247 uint32_t rx_bufs_used = 0; 248 void *link_desc_va; 249 struct hal_buf_info buf_info; 250 struct dp_pdev *pdev; 251 struct hal_rx_msdu_list msdu_list; /* MSDU's per MPDU */ 252 int i; 253 uint8_t *rx_tlv_hdr; 254 uint32_t tid; 255 256 hal_rx_reo_buf_paddr_get(ring_desc, &buf_info); 257 258 link_desc_va = dp_rx_cookie_2_link_desc_va(soc, &buf_info); 259 260 /* No UNMAP required -- this is "malloc_consistent" memory */ 261 hal_rx_msdu_list_get(soc->hal_soc, link_desc_va, &msdu_list, 262 &mpdu_desc_info->msdu_count); 263 264 for (i = 0; (i < mpdu_desc_info->msdu_count) && quota--; i++) { 265 struct dp_rx_desc *rx_desc = 266 dp_rx_cookie_2_va_rxdma_buf(soc, 267 msdu_list.sw_cookie[i]); 268 269 qdf_assert_always(rx_desc); 270 271 /* all buffers from a MSDU link link belong to same pdev */ 272 *mac_id = rx_desc->pool_id; 273 pdev = soc->pdev_list[rx_desc->pool_id]; 274 275 if (!dp_rx_desc_check_magic(rx_desc)) { 276 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 277 FL("Invalid rx_desc cookie=%d"), 278 msdu_list.sw_cookie[i]); 279 return rx_bufs_used; 280 } 281 282 qdf_nbuf_unmap_single(soc->osdev, 283 rx_desc->nbuf, QDF_DMA_FROM_DEVICE); 284 285 rx_desc->rx_buf_start = qdf_nbuf_data(rx_desc->nbuf); 286 287 rx_bufs_used++; 288 tid = hal_rx_mpdu_start_tid_get(soc->hal_soc, 289 rx_desc->rx_buf_start); 290 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 291 "Packet received with PN error for tid :%d", tid); 292 293 rx_tlv_hdr = qdf_nbuf_data(rx_desc->nbuf); 294 if (hal_rx_encryption_info_valid(rx_tlv_hdr)) 295 hal_rx_print_pn(rx_tlv_hdr); 296 297 /* Just free the buffers */ 298 qdf_nbuf_free(rx_desc->nbuf); 299 300 dp_rx_add_to_free_desc_list(&pdev->free_list_head, 301 &pdev->free_list_tail, rx_desc); 302 } 303 304 /* Return link descriptor through WBM ring (SW2WBM)*/ 305 dp_rx_link_desc_return(soc, ring_desc, HAL_BM_ACTION_PUT_IN_IDLE_LIST); 306 307 return rx_bufs_used; 308 } 309 310 /** 311 * dp_rx_pn_error_handle() - Handles PN check errors 312 * 313 * @soc: core txrx main context 314 * @ring_desc: opaque pointer to the REO error ring descriptor 315 * @mpdu_desc_info: MPDU descriptor information from ring descriptor 316 * @head: head of the local descriptor free-list 317 * @tail: tail of the local descriptor free-list 318 * @quota: No. of units (packets) that can be serviced in one shot. 319 * 320 * This function implements PN error handling 321 * If the peer is configured to ignore the PN check errors 322 * or if DP feels, that this frame is still OK, the frame can be 323 * re-injected back to REO to use some of the other features 324 * of REO e.g. duplicate detection/routing to other cores 325 * 326 * Return: uint32_t: No. of elements processed 327 */ 328 static uint32_t 329 dp_rx_pn_error_handle(struct dp_soc *soc, void *ring_desc, 330 struct hal_rx_mpdu_desc_info *mpdu_desc_info, 331 uint8_t *mac_id, 332 uint32_t quota) 333 { 334 uint16_t peer_id; 335 uint32_t rx_bufs_used = 0; 336 struct dp_peer *peer; 337 bool peer_pn_policy = false; 338 339 peer_id = DP_PEER_METADATA_PEER_ID_GET( 340 mpdu_desc_info->peer_meta_data); 341 342 343 peer = dp_peer_find_by_id(soc, peer_id); 344 345 if (qdf_likely(peer)) { 346 /* 347 * TODO: Check for peer specific policies & set peer_pn_policy 348 */ 349 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 350 "discard rx due to PN error for peer %pK " 351 "(%02x:%02x:%02x:%02x:%02x:%02x)", 352 peer, 353 peer->mac_addr.raw[0], peer->mac_addr.raw[1], 354 peer->mac_addr.raw[2], peer->mac_addr.raw[3], 355 peer->mac_addr.raw[4], peer->mac_addr.raw[5]); 356 357 dp_peer_unref_del_find_by_id(peer); 358 } 359 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 360 "Packet received with PN error"); 361 362 /* No peer PN policy -- definitely drop */ 363 if (!peer_pn_policy) 364 rx_bufs_used = dp_rx_msdus_drop(soc, ring_desc, 365 mpdu_desc_info, 366 mac_id, quota); 367 368 return rx_bufs_used; 369 } 370 371 /** 372 * dp_rx_2k_jump_handle() - Handles Sequence Number Jump by 2K 373 * 374 * @soc: core txrx main context 375 * @ring_desc: opaque pointer to the REO error ring descriptor 376 * @mpdu_desc_info: MPDU descriptor information from ring descriptor 377 * @head: head of the local descriptor free-list 378 * @tail: tail of the local descriptor free-list 379 * @quota: No. of units (packets) that can be serviced in one shot. 380 * 381 * This function implements the error handling when sequence number 382 * of the MPDU jumps suddenly by 2K.Today there are 2 cases that 383 * need to be handled: 384 * A) CSN (Current Sequence Number) = Last Valid SN (LSN) + 2K 385 * B) CSN = LSN + 2K, but falls within a "BA sized window" of the SSN 386 * For case A) the protocol stack is invoked to generate DELBA/DEAUTH frame 387 * For case B), the frame is normally dropped, no more action is taken 388 * 389 * Return: uint32_t: No. of elements processed 390 */ 391 static uint32_t 392 dp_rx_2k_jump_handle(struct dp_soc *soc, void *ring_desc, 393 struct hal_rx_mpdu_desc_info *mpdu_desc_info, 394 uint8_t *mac_id, uint32_t quota) 395 { 396 return dp_rx_msdus_drop(soc, ring_desc, mpdu_desc_info, 397 mac_id, quota); 398 } 399 400 #ifdef CONFIG_MCL 401 #define DP_PDEV_INVALID_PEER_MSDU_CHECK(head, tail) \ 402 do { \ 403 qdf_assert_always(!(head)); \ 404 qdf_assert_always(!(tail)); \ 405 } while (0) 406 #else 407 #define DP_PDEV_INVALID_PEER_MSDU_CHECK(head, tail) /* no op */ 408 #endif 409 410 /** 411 * dp_rx_chain_msdus() - Function to chain all msdus of a mpdu 412 * to pdev invalid peer list 413 * 414 * @soc: core DP main context 415 * @nbuf: Buffer pointer 416 * @rx_tlv_hdr: start of rx tlv header 417 * @mac_id: mac id 418 * 419 * Return: bool: true for last msdu of mpdu 420 */ 421 static bool 422 dp_rx_chain_msdus(struct dp_soc *soc, qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr, 423 uint8_t mac_id) 424 { 425 bool mpdu_done = false; 426 qdf_nbuf_t curr_nbuf = NULL; 427 qdf_nbuf_t tmp_nbuf = NULL; 428 429 /* TODO: Currently only single radio is supported, hence 430 * pdev hard coded to '0' index 431 */ 432 struct dp_pdev *dp_pdev = soc->pdev_list[mac_id]; 433 434 if (!dp_pdev->first_nbuf) { 435 qdf_nbuf_set_rx_chfrag_start(nbuf, 1); 436 dp_pdev->ppdu_id = HAL_RX_HW_DESC_GET_PPDUID_GET(rx_tlv_hdr); 437 dp_pdev->first_nbuf = true; 438 439 /* If the new nbuf received is the first msdu of the 440 * amsdu and there are msdus in the invalid peer msdu 441 * list, then let us free all the msdus of the invalid 442 * peer msdu list. 443 * This scenario can happen when we start receiving 444 * new a-msdu even before the previous a-msdu is completely 445 * received. 446 */ 447 curr_nbuf = dp_pdev->invalid_peer_head_msdu; 448 while (curr_nbuf) { 449 tmp_nbuf = curr_nbuf->next; 450 qdf_nbuf_free(curr_nbuf); 451 curr_nbuf = tmp_nbuf; 452 } 453 454 dp_pdev->invalid_peer_head_msdu = NULL; 455 dp_pdev->invalid_peer_tail_msdu = NULL; 456 hal_rx_mon_hw_desc_get_mpdu_status(soc->hal_soc, rx_tlv_hdr, 457 &(dp_pdev->ppdu_info.rx_status)); 458 459 } 460 461 if (dp_pdev->ppdu_id == hal_rx_attn_phy_ppdu_id_get(rx_tlv_hdr) && 462 hal_rx_attn_msdu_done_get(rx_tlv_hdr)) { 463 qdf_nbuf_set_rx_chfrag_end(nbuf, 1); 464 qdf_assert_always(dp_pdev->first_nbuf == true); 465 dp_pdev->first_nbuf = false; 466 mpdu_done = true; 467 } 468 469 /* 470 * For MCL, invalid_peer_head_msdu and invalid_peer_tail_msdu 471 * should be NULL here, add the checking for debugging purpose 472 * in case some corner case. 473 */ 474 DP_PDEV_INVALID_PEER_MSDU_CHECK(dp_pdev->invalid_peer_head_msdu, 475 dp_pdev->invalid_peer_tail_msdu); 476 DP_RX_LIST_APPEND(dp_pdev->invalid_peer_head_msdu, 477 dp_pdev->invalid_peer_tail_msdu, 478 nbuf); 479 480 return mpdu_done; 481 } 482 483 /** 484 * dp_2k_jump_handle() - Function to handle 2k jump exception 485 * on WBM ring 486 * 487 * @soc: core DP main context 488 * @nbuf: buffer pointer 489 * @rx_tlv_hdr: start of rx tlv header 490 * @peer_id: peer id of first msdu 491 * @tid: Tid for which exception occurred 492 * 493 * This function handles 2k jump violations arising out 494 * of receiving aggregates in non BA case. This typically 495 * may happen if aggregates are received on a QOS enabled TID 496 * while Rx window size is still initialized to value of 2. Or 497 * it may also happen if negotiated window size is 1 but peer 498 * sends aggregates. 499 * 500 */ 501 502 void 503 dp_2k_jump_handle(struct dp_soc *soc, 504 qdf_nbuf_t nbuf, 505 uint8_t *rx_tlv_hdr, 506 uint16_t peer_id, 507 uint8_t tid) 508 { 509 uint32_t ppdu_id; 510 struct dp_peer *peer = NULL; 511 struct dp_rx_tid *rx_tid = NULL; 512 513 peer = dp_peer_find_by_id(soc, peer_id); 514 if (!peer || peer->delete_in_progress) { 515 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 516 "peer not found"); 517 goto free_nbuf; 518 } 519 rx_tid = &peer->rx_tid[tid]; 520 if (qdf_unlikely(!rx_tid)) { 521 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 522 "rx_tid is NULL!!"); 523 goto free_nbuf; 524 } 525 qdf_spin_lock_bh(&rx_tid->tid_lock); 526 ppdu_id = hal_rx_attn_phy_ppdu_id_get(rx_tlv_hdr); 527 528 /* 529 * If BA session is created and a non-aggregate packet is 530 * landing here then the issue is with sequence number mismatch. 531 * Proceed with delba even in that case 532 */ 533 if (rx_tid->ppdu_id_2k != ppdu_id && 534 rx_tid->ba_status != DP_RX_BA_ACTIVE) { 535 rx_tid->ppdu_id_2k = ppdu_id; 536 qdf_spin_unlock_bh(&rx_tid->tid_lock); 537 goto free_nbuf; 538 } 539 if (!rx_tid->delba_tx_status) { 540 rx_tid->delba_tx_retry++; 541 rx_tid->delba_tx_status = 1; 542 rx_tid->delba_rcode = 543 IEEE80211_REASON_QOS_SETUP_REQUIRED; 544 qdf_spin_unlock_bh(&rx_tid->tid_lock); 545 soc->cdp_soc.ol_ops->send_delba(peer->vdev->pdev->ctrl_pdev, 546 peer->ctrl_peer, 547 peer->mac_addr.raw, 548 tid, 549 peer->vdev->ctrl_vdev, 550 rx_tid->delba_rcode); 551 } else { 552 qdf_spin_unlock_bh(&rx_tid->tid_lock); 553 } 554 555 free_nbuf: 556 if (peer) 557 dp_peer_unref_del_find_by_id(peer); 558 qdf_nbuf_free(nbuf); 559 return; 560 } 561 562 #ifdef QCA_WIFI_QCA6390 563 /** 564 * dp_rx_null_q_handle_invalid_peer_id_exception() - to find exception 565 * @soc: pointer to dp_soc struct 566 * @pool_id: Pool id to find dp_pdev 567 * @rx_tlv_hdr: TLV header of received packet 568 * @nbuf: SKB 569 * 570 * In certain types of packets if peer_id is not correct then 571 * driver may not be able find. Try finding peer by addr_2 of 572 * received MPDU. If you find the peer then most likely sw_peer_id & 573 * ast_idx is corrupted. 574 * 575 * Return: True if you find the peer by addr_2 of received MPDU else false 576 */ 577 static bool 578 dp_rx_null_q_handle_invalid_peer_id_exception(struct dp_soc *soc, 579 uint8_t pool_id, 580 uint8_t *rx_tlv_hdr, 581 qdf_nbuf_t nbuf) 582 { 583 uint8_t local_id; 584 struct dp_peer *peer = NULL; 585 uint8_t *rx_pkt_hdr = hal_rx_pkt_hdr_get(rx_tlv_hdr); 586 struct dp_pdev *pdev = soc->pdev_list[pool_id]; 587 struct ieee80211_frame *wh = (struct ieee80211_frame *)rx_pkt_hdr; 588 589 /* 590 * WAR- In certain types of packets if peer_id is not correct then 591 * driver may not be able find. Try finding peer by addr_2 of 592 * received MPDU 593 */ 594 if (wh) 595 peer = dp_find_peer_by_addr((struct cdp_pdev *)pdev, 596 wh->i_addr2, &local_id); 597 if (peer) { 598 dp_verbose_debug("MPDU sw_peer_id & ast_idx is corrupted"); 599 hal_rx_dump_pkt_tlvs(soc->hal_soc, rx_tlv_hdr, 600 QDF_TRACE_LEVEL_DEBUG); 601 DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer_id, 602 1, qdf_nbuf_len(nbuf)); 603 qdf_nbuf_free(nbuf); 604 605 return true; 606 } 607 return false; 608 } 609 610 /** 611 * dp_rx_null_q_check_pkt_len_exception() - Check for pktlen validity 612 * @soc: DP SOC context 613 * @pkt_len: computed length of the pkt from caller in bytes 614 * 615 * Return: true if pktlen > RX_BUFFER_SIZE, else return false 616 * 617 */ 618 static inline 619 bool dp_rx_null_q_check_pkt_len_exception(struct dp_soc *soc, uint32_t pkt_len) 620 { 621 if (qdf_unlikely(pkt_len > RX_BUFFER_SIZE)) { 622 DP_STATS_INC_PKT(soc, rx.err.rx_invalid_pkt_len, 623 1, pkt_len); 624 return true; 625 } else { 626 return false; 627 } 628 } 629 630 #else 631 static inline bool 632 dp_rx_null_q_handle_invalid_peer_id_exception(struct dp_soc *soc, 633 uint8_t pool_id, 634 uint8_t *rx_tlv_hdr, 635 qdf_nbuf_t nbuf) 636 { 637 return false; 638 } 639 640 static inline 641 bool dp_rx_null_q_check_pkt_len_exception(struct dp_soc *soc, uint32_t pkt_len) 642 { 643 return false; 644 } 645 646 #endif 647 648 /** 649 * dp_rx_null_q_desc_handle() - Function to handle NULL Queue 650 * descriptor violation on either a 651 * REO or WBM ring 652 * 653 * @soc: core DP main context 654 * @nbuf: buffer pointer 655 * @rx_tlv_hdr: start of rx tlv header 656 * @pool_id: mac id 657 * @peer: peer handle 658 * 659 * This function handles NULL queue descriptor violations arising out 660 * a missing REO queue for a given peer or a given TID. This typically 661 * may happen if a packet is received on a QOS enabled TID before the 662 * ADDBA negotiation for that TID, when the TID queue is setup. Or 663 * it may also happen for MC/BC frames if they are not routed to the 664 * non-QOS TID queue, in the absence of any other default TID queue. 665 * This error can show up both in a REO destination or WBM release ring. 666 * 667 * Return: QDF_STATUS_SUCCESS, if nbuf handled successfully. QDF status code 668 * if nbuf could not be handled or dropped. 669 */ 670 static QDF_STATUS 671 dp_rx_null_q_desc_handle(struct dp_soc *soc, qdf_nbuf_t nbuf, 672 uint8_t *rx_tlv_hdr, uint8_t pool_id, 673 struct dp_peer *peer) 674 { 675 uint32_t pkt_len, l2_hdr_offset; 676 uint16_t msdu_len; 677 struct dp_vdev *vdev; 678 uint8_t tid; 679 qdf_ether_header_t *eh; 680 681 qdf_nbuf_set_rx_chfrag_start(nbuf, 682 hal_rx_msdu_end_first_msdu_get(rx_tlv_hdr)); 683 qdf_nbuf_set_rx_chfrag_end(nbuf, 684 hal_rx_msdu_end_last_msdu_get(rx_tlv_hdr)); 685 qdf_nbuf_set_da_mcbc(nbuf, hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr)); 686 qdf_nbuf_set_da_valid(nbuf, 687 hal_rx_msdu_end_da_is_valid_get(rx_tlv_hdr)); 688 qdf_nbuf_set_sa_valid(nbuf, 689 hal_rx_msdu_end_sa_is_valid_get(rx_tlv_hdr)); 690 691 l2_hdr_offset = hal_rx_msdu_end_l3_hdr_padding_get(rx_tlv_hdr); 692 msdu_len = hal_rx_msdu_start_msdu_len_get(rx_tlv_hdr); 693 pkt_len = msdu_len + l2_hdr_offset + RX_PKT_TLVS_LEN; 694 695 if (qdf_likely(!qdf_nbuf_is_frag(nbuf))) { 696 if (dp_rx_null_q_check_pkt_len_exception(soc, pkt_len)) 697 goto drop_nbuf; 698 699 /* Set length in nbuf */ 700 qdf_nbuf_set_pktlen(nbuf, 701 qdf_min(pkt_len, (uint32_t)RX_BUFFER_SIZE)); 702 qdf_assert_always(nbuf->data == rx_tlv_hdr); 703 } 704 705 /* 706 * Check if DMA completed -- msdu_done is the last bit 707 * to be written 708 */ 709 if (!hal_rx_attn_msdu_done_get(rx_tlv_hdr)) { 710 711 dp_err_rl("MSDU DONE failure"); 712 hal_rx_dump_pkt_tlvs(soc->hal_soc, rx_tlv_hdr, 713 QDF_TRACE_LEVEL_INFO); 714 qdf_assert(0); 715 } 716 717 if (!peer && 718 dp_rx_null_q_handle_invalid_peer_id_exception(soc, pool_id, 719 rx_tlv_hdr, nbuf)) 720 return QDF_STATUS_E_FAILURE; 721 722 if (!peer) { 723 bool mpdu_done = false; 724 struct dp_pdev *pdev = soc->pdev_list[pool_id]; 725 726 dp_err_rl("peer is NULL"); 727 DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1, 728 qdf_nbuf_len(nbuf)); 729 730 mpdu_done = dp_rx_chain_msdus(soc, nbuf, rx_tlv_hdr, pool_id); 731 /* Trigger invalid peer handler wrapper */ 732 dp_rx_process_invalid_peer_wrapper(soc, 733 pdev->invalid_peer_head_msdu, 734 mpdu_done); 735 736 if (mpdu_done) { 737 pdev->invalid_peer_head_msdu = NULL; 738 pdev->invalid_peer_tail_msdu = NULL; 739 } 740 return QDF_STATUS_E_FAILURE; 741 } 742 743 vdev = peer->vdev; 744 if (!vdev) { 745 dp_err_rl("Null vdev!"); 746 DP_STATS_INC(soc, rx.err.invalid_vdev, 1); 747 goto drop_nbuf; 748 } 749 750 /* 751 * Advance the packet start pointer by total size of 752 * pre-header TLV's 753 */ 754 if (qdf_nbuf_is_frag(nbuf)) 755 qdf_nbuf_pull_head(nbuf, RX_PKT_TLVS_LEN); 756 else 757 qdf_nbuf_pull_head(nbuf, (l2_hdr_offset + RX_PKT_TLVS_LEN)); 758 759 if (dp_rx_mcast_echo_check(soc, peer, rx_tlv_hdr, nbuf)) { 760 /* this is a looped back MCBC pkt, drop it */ 761 DP_STATS_INC_PKT(peer, rx.mec_drop, 1, qdf_nbuf_len(nbuf)); 762 goto drop_nbuf; 763 } 764 765 /* 766 * In qwrap mode if the received packet matches with any of the vdev 767 * mac addresses, drop it. Donot receive multicast packets originated 768 * from any proxysta. 769 */ 770 if (check_qwrap_multicast_loopback(vdev, nbuf)) { 771 DP_STATS_INC_PKT(peer, rx.mec_drop, 1, qdf_nbuf_len(nbuf)); 772 goto drop_nbuf; 773 } 774 775 776 if (qdf_unlikely((peer->nawds_enabled == true) && 777 hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr))) { 778 dp_err_rl("free buffer for multicast packet"); 779 DP_STATS_INC(peer, rx.nawds_mcast_drop, 1); 780 goto drop_nbuf; 781 } 782 783 if (!dp_wds_rx_policy_check(rx_tlv_hdr, vdev, peer)) { 784 dp_err_rl("mcast Policy Check Drop pkt"); 785 goto drop_nbuf; 786 } 787 788 /* WDS Source Port Learning */ 789 if (qdf_likely(vdev->rx_decap_type == htt_cmn_pkt_type_ethernet && 790 vdev->wds_enabled)) 791 dp_rx_wds_srcport_learn(soc, rx_tlv_hdr, peer, nbuf); 792 793 if (hal_rx_mpdu_start_mpdu_qos_control_valid_get(rx_tlv_hdr)) { 794 /* TODO: Assuming that qos_control_valid also indicates 795 * unicast. Should we check this? 796 */ 797 tid = hal_rx_mpdu_start_tid_get(soc->hal_soc, rx_tlv_hdr); 798 if (peer && !peer->rx_tid[tid].hw_qdesc_vaddr_unaligned) { 799 /* IEEE80211_SEQ_MAX indicates invalid start_seq */ 800 dp_rx_tid_setup_wifi3(peer, tid, 1, IEEE80211_SEQ_MAX); 801 } 802 } 803 804 if (qdf_unlikely(vdev->rx_decap_type == htt_cmn_pkt_type_raw)) { 805 qdf_nbuf_set_next(nbuf, NULL); 806 dp_rx_deliver_raw(vdev, nbuf, peer); 807 } else { 808 if (vdev->osif_rx) { 809 qdf_nbuf_set_next(nbuf, NULL); 810 DP_STATS_INC_PKT(peer, rx.to_stack, 1, 811 qdf_nbuf_len(nbuf)); 812 813 /* 814 * Update the protocol tag in SKB based on 815 * CCE metadata 816 */ 817 dp_rx_update_protocol_tag(soc, vdev, nbuf, rx_tlv_hdr, 818 EXCEPTION_DEST_RING_ID, 819 true, true); 820 821 if (qdf_unlikely(hal_rx_msdu_end_da_is_mcbc_get( 822 rx_tlv_hdr) && 823 (vdev->rx_decap_type == 824 htt_cmn_pkt_type_ethernet))) { 825 eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf); 826 827 DP_STATS_INC_PKT(peer, rx.multicast, 1, 828 qdf_nbuf_len(nbuf)); 829 if (QDF_IS_ADDR_BROADCAST(eh->ether_dhost)) { 830 DP_STATS_INC_PKT(peer, rx.bcast, 1, 831 qdf_nbuf_len(nbuf)); 832 } 833 } 834 835 vdev->osif_rx(vdev->osif_vdev, nbuf); 836 837 } else { 838 dp_err_rl("INVALID osif_rx. vdev %pK", vdev); 839 DP_STATS_INC(soc, rx.err.invalid_vdev, 1); 840 goto drop_nbuf; 841 } 842 } 843 return QDF_STATUS_SUCCESS; 844 845 drop_nbuf: 846 qdf_nbuf_free(nbuf); 847 return QDF_STATUS_E_FAILURE; 848 } 849 850 /** 851 * dp_rx_process_rxdma_err() - Function to deliver rxdma unencrypted_err 852 * frames to OS or wifi parse errors. 853 * @soc: core DP main context 854 * @nbuf: buffer pointer 855 * @rx_tlv_hdr: start of rx tlv header 856 * @peer: peer reference 857 * @err_code: rxdma err code 858 * 859 * Return: None 860 */ 861 void 862 dp_rx_process_rxdma_err(struct dp_soc *soc, qdf_nbuf_t nbuf, 863 uint8_t *rx_tlv_hdr, struct dp_peer *peer, 864 uint8_t err_code) 865 { 866 uint32_t pkt_len, l2_hdr_offset; 867 uint16_t msdu_len; 868 struct dp_vdev *vdev; 869 qdf_ether_header_t *eh; 870 bool is_broadcast; 871 872 /* 873 * Check if DMA completed -- msdu_done is the last bit 874 * to be written 875 */ 876 if (!hal_rx_attn_msdu_done_get(rx_tlv_hdr)) { 877 878 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 879 FL("MSDU DONE failure")); 880 881 hal_rx_dump_pkt_tlvs(soc->hal_soc, rx_tlv_hdr, 882 QDF_TRACE_LEVEL_INFO); 883 qdf_assert(0); 884 } 885 886 l2_hdr_offset = hal_rx_msdu_end_l3_hdr_padding_get(rx_tlv_hdr); 887 msdu_len = hal_rx_msdu_start_msdu_len_get(rx_tlv_hdr); 888 pkt_len = msdu_len + l2_hdr_offset + RX_PKT_TLVS_LEN; 889 890 /* Set length in nbuf */ 891 qdf_nbuf_set_pktlen(nbuf, pkt_len); 892 893 qdf_nbuf_set_next(nbuf, NULL); 894 895 qdf_nbuf_set_rx_chfrag_start(nbuf, 1); 896 qdf_nbuf_set_rx_chfrag_end(nbuf, 1); 897 898 if (!peer) { 899 QDF_TRACE_ERROR_RL(QDF_MODULE_ID_DP, "peer is NULL"); 900 DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1, 901 qdf_nbuf_len(nbuf)); 902 /* Trigger invalid peer handler wrapper */ 903 dp_rx_process_invalid_peer_wrapper(soc, nbuf, true); 904 return; 905 } 906 907 vdev = peer->vdev; 908 if (!vdev) { 909 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 910 FL("INVALID vdev %pK OR osif_rx"), vdev); 911 /* Drop & free packet */ 912 qdf_nbuf_free(nbuf); 913 DP_STATS_INC(soc, rx.err.invalid_vdev, 1); 914 return; 915 } 916 917 /* 918 * Advance the packet start pointer by total size of 919 * pre-header TLV's 920 */ 921 qdf_nbuf_pull_head(nbuf, l2_hdr_offset + RX_PKT_TLVS_LEN); 922 923 if (err_code == HAL_RXDMA_ERR_WIFI_PARSE) { 924 uint8_t *pkt_type; 925 926 pkt_type = qdf_nbuf_data(nbuf) + (2 * QDF_MAC_ADDR_SIZE); 927 if (*(uint16_t *)pkt_type == htons(QDF_ETH_TYPE_8021Q) && 928 *(uint16_t *)(pkt_type + DP_SKIP_VLAN) == htons(QDF_LLC_STP)) { 929 DP_STATS_INC(vdev->pdev, vlan_tag_stp_cnt, 1); 930 goto process_mesh; 931 } else { 932 DP_STATS_INC(vdev->pdev, dropped.wifi_parse, 1); 933 qdf_nbuf_free(nbuf); 934 return; 935 } 936 } 937 938 if (vdev->rx_decap_type == htt_cmn_pkt_type_raw) 939 goto process_mesh; 940 941 /* 942 * WAPI cert AP sends rekey frames as unencrypted. 943 * Thus RXDMA will report unencrypted frame error. 944 * To pass WAPI cert case, SW needs to pass unencrypted 945 * rekey frame to stack. 946 */ 947 if (qdf_nbuf_is_ipv4_wapi_pkt(nbuf)) { 948 qdf_nbuf_cb_update_peer_local_id(nbuf, peer->local_id); 949 goto process_rx; 950 } 951 /* 952 * In dynamic WEP case rekey frames are not encrypted 953 * similar to WAPI. Allow EAPOL when 8021+wep is enabled and 954 * key install is already done 955 */ 956 if ((vdev->sec_type == cdp_sec_type_wep104) && 957 (qdf_nbuf_is_ipv4_eapol_pkt(nbuf))) 958 goto process_rx; 959 960 process_mesh: 961 962 if (!vdev->mesh_vdev && err_code == HAL_RXDMA_ERR_UNENCRYPTED) { 963 qdf_nbuf_free(nbuf); 964 DP_STATS_INC(soc, rx.err.invalid_vdev, 1); 965 return; 966 } 967 968 if (vdev->mesh_vdev) { 969 if (dp_rx_filter_mesh_packets(vdev, nbuf, rx_tlv_hdr) 970 == QDF_STATUS_SUCCESS) { 971 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_MED, 972 FL("mesh pkt filtered")); 973 DP_STATS_INC(vdev->pdev, dropped.mesh_filter, 1); 974 975 qdf_nbuf_free(nbuf); 976 return; 977 } 978 dp_rx_fill_mesh_stats(vdev, nbuf, rx_tlv_hdr, peer); 979 } 980 process_rx: 981 if (qdf_unlikely(hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr) && 982 (vdev->rx_decap_type == 983 htt_cmn_pkt_type_ethernet))) { 984 eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf); 985 is_broadcast = (QDF_IS_ADDR_BROADCAST 986 (eh->ether_dhost)) ? 1 : 0 ; 987 DP_STATS_INC_PKT(peer, rx.multicast, 1, qdf_nbuf_len(nbuf)); 988 if (is_broadcast) { 989 DP_STATS_INC_PKT(peer, rx.bcast, 1, 990 qdf_nbuf_len(nbuf)); 991 } 992 } 993 994 if (qdf_unlikely(vdev->rx_decap_type == htt_cmn_pkt_type_raw)) { 995 dp_rx_deliver_raw(vdev, nbuf, peer); 996 } else { 997 /* Update the protocol tag in SKB based on CCE metadata */ 998 dp_rx_update_protocol_tag(soc, vdev, nbuf, rx_tlv_hdr, 999 EXCEPTION_DEST_RING_ID, true, true); 1000 DP_STATS_INC(peer, rx.to_stack.num, 1); 1001 vdev->osif_rx(vdev->osif_vdev, nbuf); 1002 } 1003 1004 return; 1005 } 1006 1007 /** 1008 * dp_rx_process_mic_error(): Function to pass mic error indication to umac 1009 * @soc: core DP main context 1010 * @nbuf: buffer pointer 1011 * @rx_tlv_hdr: start of rx tlv header 1012 * @peer: peer handle 1013 * 1014 * return: void 1015 */ 1016 void dp_rx_process_mic_error(struct dp_soc *soc, qdf_nbuf_t nbuf, 1017 uint8_t *rx_tlv_hdr, struct dp_peer *peer) 1018 { 1019 struct dp_vdev *vdev = NULL; 1020 struct dp_pdev *pdev = NULL; 1021 struct ol_if_ops *tops = NULL; 1022 struct ieee80211_frame *wh; 1023 uint8_t *rx_pkt_hdr; 1024 uint16_t rx_seq, fragno; 1025 unsigned int tid; 1026 QDF_STATUS status; 1027 1028 if (!hal_rx_msdu_end_first_msdu_get(rx_tlv_hdr)) 1029 return; 1030 1031 rx_pkt_hdr = hal_rx_pkt_hdr_get(qdf_nbuf_data(nbuf)); 1032 wh = (struct ieee80211_frame *)rx_pkt_hdr; 1033 1034 if (!peer) { 1035 dp_err_rl("peer not found"); 1036 goto fail; 1037 } 1038 1039 vdev = peer->vdev; 1040 if (!vdev) { 1041 dp_err_rl("VDEV not found"); 1042 goto fail; 1043 } 1044 1045 pdev = vdev->pdev; 1046 if (!pdev) { 1047 dp_err_rl("PDEV not found"); 1048 goto fail; 1049 } 1050 1051 tid = hal_rx_mpdu_start_tid_get(soc->hal_soc, qdf_nbuf_data(nbuf)); 1052 rx_seq = (((*(uint16_t *)wh->i_seq) & 1053 IEEE80211_SEQ_SEQ_MASK) >> 1054 IEEE80211_SEQ_SEQ_SHIFT); 1055 1056 fragno = dp_rx_frag_get_mpdu_frag_number(qdf_nbuf_data(nbuf)); 1057 1058 /* Can get only last fragment */ 1059 if (fragno) { 1060 status = dp_rx_defrag_add_last_frag(soc, peer, 1061 tid, rx_seq, nbuf); 1062 dp_info_rl("Frag pkt seq# %d frag# %d consumed status %d !", 1063 rx_seq, fragno, status); 1064 return; 1065 } 1066 1067 tops = pdev->soc->cdp_soc.ol_ops; 1068 if (tops->rx_mic_error) 1069 tops->rx_mic_error(pdev->ctrl_pdev, vdev->vdev_id, wh); 1070 1071 fail: 1072 qdf_nbuf_free(nbuf); 1073 return; 1074 } 1075 1076 /** 1077 * dp_rx_err_process() - Processes error frames routed to REO error ring 1078 * 1079 * @soc: core txrx main context 1080 * @hal_ring: opaque pointer to the HAL Rx Error Ring, which will be serviced 1081 * @quota: No. of units (packets) that can be serviced in one shot. 1082 * 1083 * This function implements error processing and top level demultiplexer 1084 * for all the frames routed to REO error ring. 1085 * 1086 * Return: uint32_t: No. of elements processed 1087 */ 1088 uint32_t 1089 dp_rx_err_process(struct dp_soc *soc, void *hal_ring, uint32_t quota) 1090 { 1091 void *hal_soc; 1092 void *ring_desc; 1093 uint32_t count = 0; 1094 uint32_t rx_bufs_used = 0; 1095 uint32_t rx_bufs_reaped[MAX_PDEV_CNT] = { 0 }; 1096 uint8_t mac_id = 0; 1097 uint8_t buf_type; 1098 uint8_t error, rbm; 1099 struct hal_rx_mpdu_desc_info mpdu_desc_info; 1100 struct hal_buf_info hbi; 1101 struct dp_pdev *dp_pdev; 1102 struct dp_srng *dp_rxdma_srng; 1103 struct rx_desc_pool *rx_desc_pool; 1104 uint32_t cookie = 0; 1105 void *link_desc_va; 1106 struct hal_rx_msdu_list msdu_list; /* MSDU's per MPDU */ 1107 uint16_t num_msdus; 1108 struct dp_rx_desc *rx_desc = NULL; 1109 1110 /* Debug -- Remove later */ 1111 qdf_assert(soc && hal_ring); 1112 1113 hal_soc = soc->hal_soc; 1114 1115 /* Debug -- Remove later */ 1116 qdf_assert(hal_soc); 1117 1118 if (qdf_unlikely(hal_srng_access_start(hal_soc, hal_ring))) { 1119 1120 /* TODO */ 1121 /* 1122 * Need API to convert from hal_ring pointer to 1123 * Ring Type / Ring Id combo 1124 */ 1125 DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1); 1126 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1127 FL("HAL RING Access Failed -- %pK"), hal_ring); 1128 goto done; 1129 } 1130 1131 while (qdf_likely(quota-- && (ring_desc = 1132 hal_srng_dst_get_next(hal_soc, hal_ring)))) { 1133 1134 DP_STATS_INC(soc, rx.err_ring_pkts, 1); 1135 1136 error = HAL_RX_ERROR_STATUS_GET(ring_desc); 1137 1138 qdf_assert(error == HAL_REO_ERROR_DETECTED); 1139 1140 buf_type = HAL_RX_REO_BUF_TYPE_GET(ring_desc); 1141 /* 1142 * For REO error ring, expect only MSDU LINK DESC 1143 */ 1144 qdf_assert_always(buf_type == HAL_RX_REO_MSDU_LINK_DESC_TYPE); 1145 1146 cookie = HAL_RX_REO_BUF_COOKIE_GET(ring_desc); 1147 /* 1148 * check for the magic number in the sw cookie 1149 */ 1150 qdf_assert_always((cookie >> LINK_DESC_ID_SHIFT) & 1151 LINK_DESC_ID_START); 1152 1153 /* 1154 * Check if the buffer is to be processed on this processor 1155 */ 1156 rbm = hal_rx_ret_buf_manager_get(ring_desc); 1157 1158 hal_rx_reo_buf_paddr_get(ring_desc, &hbi); 1159 link_desc_va = dp_rx_cookie_2_link_desc_va(soc, &hbi); 1160 hal_rx_msdu_list_get(soc->hal_soc, link_desc_va, &msdu_list, 1161 &num_msdus); 1162 1163 if (qdf_unlikely((msdu_list.rbm[0] != DP_WBM2SW_RBM) && 1164 (msdu_list.rbm[0] != 1165 HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST))) { 1166 /* TODO */ 1167 /* Call appropriate handler */ 1168 if (!wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx)) { 1169 DP_STATS_INC(soc, rx.err.invalid_rbm, 1); 1170 QDF_TRACE(QDF_MODULE_ID_DP, 1171 QDF_TRACE_LEVEL_ERROR, 1172 FL("Invalid RBM %d"), 1173 msdu_list.rbm[0]); 1174 } 1175 1176 /* Return link descriptor through WBM ring (SW2WBM)*/ 1177 dp_rx_link_desc_return(soc, ring_desc, 1178 HAL_BM_ACTION_RELEASE_MSDU_LIST); 1179 continue; 1180 } 1181 1182 rx_desc = dp_rx_cookie_2_va_rxdma_buf(soc, 1183 msdu_list.sw_cookie[0]); 1184 qdf_assert_always(rx_desc); 1185 1186 mac_id = rx_desc->pool_id; 1187 1188 /* Get the MPDU DESC info */ 1189 hal_rx_mpdu_desc_info_get(ring_desc, &mpdu_desc_info); 1190 1191 if (mpdu_desc_info.mpdu_flags & HAL_MPDU_F_FRAGMENT) { 1192 /* 1193 * We only handle one msdu per link desc for fragmented 1194 * case. We drop the msdus and release the link desc 1195 * back if there are more than one msdu in link desc. 1196 */ 1197 if (qdf_unlikely(num_msdus > 1)) { 1198 count = dp_rx_msdus_drop(soc, ring_desc, 1199 &mpdu_desc_info, 1200 &mac_id, quota); 1201 rx_bufs_reaped[mac_id] += count; 1202 continue; 1203 } 1204 1205 count = dp_rx_frag_handle(soc, 1206 ring_desc, &mpdu_desc_info, 1207 rx_desc, &mac_id, quota); 1208 1209 rx_bufs_reaped[mac_id] += count; 1210 DP_STATS_INC(soc, rx.rx_frags, 1); 1211 continue; 1212 } 1213 1214 if (hal_rx_reo_is_pn_error(ring_desc)) { 1215 /* TOD0 */ 1216 DP_STATS_INC(soc, 1217 rx.err. 1218 reo_error[HAL_REO_ERR_PN_CHECK_FAILED], 1219 1); 1220 count = dp_rx_pn_error_handle(soc, 1221 ring_desc, 1222 &mpdu_desc_info, &mac_id, 1223 quota); 1224 1225 rx_bufs_reaped[mac_id] += count; 1226 continue; 1227 } 1228 1229 if (hal_rx_reo_is_2k_jump(ring_desc)) { 1230 /* TOD0 */ 1231 DP_STATS_INC(soc, 1232 rx.err. 1233 reo_error[HAL_REO_ERR_REGULAR_FRAME_2K_JUMP], 1234 1); 1235 1236 count = dp_rx_2k_jump_handle(soc, 1237 ring_desc, &mpdu_desc_info, 1238 &mac_id, quota); 1239 1240 rx_bufs_reaped[mac_id] += count; 1241 continue; 1242 } 1243 } 1244 1245 done: 1246 hal_srng_access_end(hal_soc, hal_ring); 1247 1248 if (soc->rx.flags.defrag_timeout_check) { 1249 uint32_t now_ms = 1250 qdf_system_ticks_to_msecs(qdf_system_ticks()); 1251 1252 if (now_ms >= soc->rx.defrag.next_flush_ms) 1253 dp_rx_defrag_waitlist_flush(soc); 1254 } 1255 1256 for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) { 1257 if (rx_bufs_reaped[mac_id]) { 1258 dp_pdev = soc->pdev_list[mac_id]; 1259 dp_rxdma_srng = &dp_pdev->rx_refill_buf_ring; 1260 rx_desc_pool = &soc->rx_desc_buf[mac_id]; 1261 1262 dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng, 1263 rx_desc_pool, 1264 rx_bufs_reaped[mac_id], 1265 &dp_pdev->free_list_head, 1266 &dp_pdev->free_list_tail); 1267 rx_bufs_used += rx_bufs_reaped[mac_id]; 1268 } 1269 } 1270 1271 return rx_bufs_used; /* Assume no scale factor for now */ 1272 } 1273 1274 /** 1275 * dp_rx_wbm_err_process() - Processes error frames routed to WBM release ring 1276 * 1277 * @soc: core txrx main context 1278 * @hal_ring: opaque pointer to the HAL Rx Error Ring, which will be serviced 1279 * @quota: No. of units (packets) that can be serviced in one shot. 1280 * 1281 * This function implements error processing and top level demultiplexer 1282 * for all the frames routed to WBM2HOST sw release ring. 1283 * 1284 * Return: uint32_t: No. of elements processed 1285 */ 1286 uint32_t 1287 dp_rx_wbm_err_process(struct dp_soc *soc, void *hal_ring, uint32_t quota) 1288 { 1289 void *hal_soc; 1290 void *ring_desc; 1291 struct dp_rx_desc *rx_desc; 1292 union dp_rx_desc_list_elem_t *head[MAX_PDEV_CNT] = { NULL }; 1293 union dp_rx_desc_list_elem_t *tail[MAX_PDEV_CNT] = { NULL }; 1294 uint32_t rx_bufs_used = 0; 1295 uint32_t rx_bufs_reaped[MAX_PDEV_CNT] = { 0 }; 1296 uint8_t buf_type, rbm; 1297 uint32_t rx_buf_cookie; 1298 uint8_t mac_id; 1299 struct dp_pdev *dp_pdev; 1300 struct dp_srng *dp_rxdma_srng; 1301 struct rx_desc_pool *rx_desc_pool; 1302 uint8_t *rx_tlv_hdr; 1303 qdf_nbuf_t nbuf_head = NULL; 1304 qdf_nbuf_t nbuf_tail = NULL; 1305 qdf_nbuf_t nbuf, next; 1306 struct hal_wbm_err_desc_info wbm_err_info = { 0 }; 1307 uint8_t pool_id; 1308 uint8_t tid = 0; 1309 1310 /* Debug -- Remove later */ 1311 qdf_assert(soc && hal_ring); 1312 1313 hal_soc = soc->hal_soc; 1314 1315 /* Debug -- Remove later */ 1316 qdf_assert(hal_soc); 1317 1318 if (qdf_unlikely(hal_srng_access_start(hal_soc, hal_ring))) { 1319 1320 /* TODO */ 1321 /* 1322 * Need API to convert from hal_ring pointer to 1323 * Ring Type / Ring Id combo 1324 */ 1325 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1326 FL("HAL RING Access Failed -- %pK"), hal_ring); 1327 goto done; 1328 } 1329 1330 while (qdf_likely(quota-- && (ring_desc = 1331 hal_srng_dst_get_next(hal_soc, hal_ring)))) { 1332 1333 /* XXX */ 1334 buf_type = HAL_RX_WBM_BUF_TYPE_GET(ring_desc); 1335 1336 /* 1337 * For WBM ring, expect only MSDU buffers 1338 */ 1339 qdf_assert_always(buf_type == HAL_RX_WBM_BUF_TYPE_REL_BUF); 1340 1341 qdf_assert((HAL_RX_WBM_ERR_SRC_GET(ring_desc) 1342 == HAL_RX_WBM_ERR_SRC_RXDMA) || 1343 (HAL_RX_WBM_ERR_SRC_GET(ring_desc) 1344 == HAL_RX_WBM_ERR_SRC_REO)); 1345 1346 /* 1347 * Check if the buffer is to be processed on this processor 1348 */ 1349 rbm = hal_rx_ret_buf_manager_get(ring_desc); 1350 1351 if (qdf_unlikely(rbm != HAL_RX_BUF_RBM_SW3_BM)) { 1352 /* TODO */ 1353 /* Call appropriate handler */ 1354 DP_STATS_INC(soc, rx.err.invalid_rbm, 1); 1355 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1356 FL("Invalid RBM %d"), rbm); 1357 continue; 1358 } 1359 1360 rx_buf_cookie = HAL_RX_WBM_BUF_COOKIE_GET(ring_desc); 1361 1362 rx_desc = dp_rx_cookie_2_va_rxdma_buf(soc, rx_buf_cookie); 1363 qdf_assert_always(rx_desc); 1364 1365 if (!dp_rx_desc_check_magic(rx_desc)) { 1366 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1367 FL("Invalid rx_desc cookie=%d"), 1368 rx_buf_cookie); 1369 continue; 1370 } 1371 1372 /* 1373 * this is a unlikely scenario where the host is reaping 1374 * a descriptor which it already reaped just a while ago 1375 * but is yet to replenish it back to HW. 1376 * In this case host will dump the last 128 descriptors 1377 * including the software descriptor rx_desc and assert. 1378 */ 1379 if (qdf_unlikely(!rx_desc->in_use)) { 1380 DP_STATS_INC(soc, rx.err.hal_wbm_rel_dup, 1); 1381 dp_rx_dump_info_and_assert(soc, hal_ring, 1382 ring_desc, rx_desc); 1383 } 1384 1385 nbuf = rx_desc->nbuf; 1386 qdf_nbuf_unmap_single(soc->osdev, nbuf, QDF_DMA_FROM_DEVICE); 1387 1388 /* 1389 * save the wbm desc info in nbuf TLV. We will need this 1390 * info when we do the actual nbuf processing 1391 */ 1392 hal_rx_wbm_err_info_get(ring_desc, &wbm_err_info, hal_soc); 1393 wbm_err_info.pool_id = rx_desc->pool_id; 1394 hal_rx_wbm_err_info_set_in_tlv(qdf_nbuf_data(nbuf), 1395 &wbm_err_info); 1396 1397 rx_bufs_reaped[rx_desc->pool_id]++; 1398 1399 DP_RX_LIST_APPEND(nbuf_head, nbuf_tail, rx_desc->nbuf); 1400 dp_rx_add_to_free_desc_list(&head[rx_desc->pool_id], 1401 &tail[rx_desc->pool_id], 1402 rx_desc); 1403 } 1404 done: 1405 hal_srng_access_end(hal_soc, hal_ring); 1406 1407 for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) { 1408 if (rx_bufs_reaped[mac_id]) { 1409 dp_pdev = soc->pdev_list[mac_id]; 1410 dp_rxdma_srng = &dp_pdev->rx_refill_buf_ring; 1411 rx_desc_pool = &soc->rx_desc_buf[mac_id]; 1412 1413 dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng, 1414 rx_desc_pool, rx_bufs_reaped[mac_id], 1415 &head[mac_id], &tail[mac_id]); 1416 rx_bufs_used += rx_bufs_reaped[mac_id]; 1417 } 1418 } 1419 1420 nbuf = nbuf_head; 1421 while (nbuf) { 1422 struct dp_peer *peer; 1423 uint16_t peer_id; 1424 1425 rx_tlv_hdr = qdf_nbuf_data(nbuf); 1426 1427 peer_id = hal_rx_mpdu_start_sw_peer_id_get(rx_tlv_hdr); 1428 peer = dp_peer_find_by_id(soc, peer_id); 1429 1430 /* 1431 * retrieve the wbm desc info from nbuf TLV, so we can 1432 * handle error cases appropriately 1433 */ 1434 hal_rx_wbm_err_info_get_from_tlv(rx_tlv_hdr, &wbm_err_info); 1435 1436 /* Set queue_mapping in nbuf to 0 */ 1437 dp_set_rx_queue(nbuf, 0); 1438 1439 next = nbuf->next; 1440 1441 if (wbm_err_info.wbm_err_src == HAL_RX_WBM_ERR_SRC_REO) { 1442 if (wbm_err_info.reo_psh_rsn 1443 == HAL_RX_WBM_REO_PSH_RSN_ERROR) { 1444 1445 DP_STATS_INC(soc, 1446 rx.err.reo_error 1447 [wbm_err_info.reo_err_code], 1); 1448 1449 switch (wbm_err_info.reo_err_code) { 1450 /* 1451 * Handling for packets which have NULL REO 1452 * queue descriptor 1453 */ 1454 case HAL_REO_ERR_QUEUE_DESC_ADDR_0: 1455 pool_id = wbm_err_info.pool_id; 1456 dp_rx_null_q_desc_handle(soc, nbuf, 1457 rx_tlv_hdr, 1458 pool_id, peer); 1459 nbuf = next; 1460 if (peer) 1461 dp_peer_unref_del_find_by_id( 1462 peer); 1463 continue; 1464 /* TODO */ 1465 /* Add per error code accounting */ 1466 case HAL_REO_ERR_REGULAR_FRAME_2K_JUMP: 1467 pool_id = wbm_err_info.pool_id; 1468 1469 if (hal_rx_msdu_end_first_msdu_get(rx_tlv_hdr)) { 1470 peer_id = 1471 hal_rx_mpdu_start_sw_peer_id_get(rx_tlv_hdr); 1472 tid = 1473 hal_rx_mpdu_start_tid_get(hal_soc, rx_tlv_hdr); 1474 } 1475 dp_2k_jump_handle(soc, nbuf, rx_tlv_hdr, 1476 peer_id, tid); 1477 nbuf = next; 1478 if (peer) 1479 dp_peer_unref_del_find_by_id( 1480 peer); 1481 continue; 1482 default: 1483 dp_err_rl("Got pkt with REO ERROR: %d", 1484 wbm_err_info.reo_err_code); 1485 break; 1486 } 1487 } 1488 } else if (wbm_err_info.wbm_err_src == 1489 HAL_RX_WBM_ERR_SRC_RXDMA) { 1490 if (wbm_err_info.rxdma_psh_rsn 1491 == HAL_RX_WBM_RXDMA_PSH_RSN_ERROR) { 1492 DP_STATS_INC(soc, 1493 rx.err.rxdma_error 1494 [wbm_err_info.rxdma_err_code], 1); 1495 1496 switch (wbm_err_info.rxdma_err_code) { 1497 case HAL_RXDMA_ERR_UNENCRYPTED: 1498 1499 case HAL_RXDMA_ERR_WIFI_PARSE: 1500 dp_rx_process_rxdma_err(soc, nbuf, 1501 rx_tlv_hdr, peer, 1502 wbm_err_info.rxdma_err_code); 1503 nbuf = next; 1504 if (peer) 1505 dp_peer_unref_del_find_by_id(peer); 1506 continue; 1507 1508 case HAL_RXDMA_ERR_TKIP_MIC: 1509 dp_rx_process_mic_error(soc, nbuf, 1510 rx_tlv_hdr, 1511 peer); 1512 nbuf = next; 1513 if (peer) { 1514 DP_STATS_INC(peer, rx.err.mic_err, 1); 1515 dp_peer_unref_del_find_by_id( 1516 peer); 1517 } 1518 continue; 1519 1520 case HAL_RXDMA_ERR_DECRYPT: 1521 if (peer) 1522 DP_STATS_INC(peer, rx.err.decrypt_err, 1); 1523 QDF_TRACE(QDF_MODULE_ID_DP, 1524 QDF_TRACE_LEVEL_DEBUG, 1525 "Packet received with Decrypt error"); 1526 break; 1527 1528 default: 1529 dp_err_rl("RXDMA error %d", 1530 wbm_err_info.rxdma_err_code); 1531 } 1532 } 1533 } else { 1534 /* Should not come here */ 1535 qdf_assert(0); 1536 } 1537 1538 if (peer) 1539 dp_peer_unref_del_find_by_id(peer); 1540 1541 hal_rx_dump_pkt_tlvs(hal_soc, rx_tlv_hdr, 1542 QDF_TRACE_LEVEL_DEBUG); 1543 qdf_nbuf_free(nbuf); 1544 nbuf = next; 1545 } 1546 return rx_bufs_used; /* Assume no scale factor for now */ 1547 } 1548 1549 /** 1550 * dup_desc_dbg() - dump and assert if duplicate rx desc found 1551 * 1552 * @soc: core DP main context 1553 * @rxdma_dst_ring_desc: void pointer to monitor link descriptor buf addr info 1554 * @rx_desc: void pointer to rx descriptor 1555 * 1556 * Return: void 1557 */ 1558 static void dup_desc_dbg(struct dp_soc *soc, 1559 void *rxdma_dst_ring_desc, 1560 void *rx_desc) 1561 { 1562 DP_STATS_INC(soc, rx.err.hal_rxdma_err_dup, 1); 1563 dp_rx_dump_info_and_assert(soc, 1564 soc->rx_rel_ring.hal_srng, 1565 rxdma_dst_ring_desc, 1566 rx_desc); 1567 } 1568 1569 /** 1570 * dp_rx_err_mpdu_pop() - extract the MSDU's from link descs 1571 * 1572 * @soc: core DP main context 1573 * @mac_id: mac id which is one of 3 mac_ids 1574 * @rxdma_dst_ring_desc: void pointer to monitor link descriptor buf addr info 1575 * @head: head of descs list to be freed 1576 * @tail: tail of decs list to be freed 1577 1578 * Return: number of msdu in MPDU to be popped 1579 */ 1580 static inline uint32_t 1581 dp_rx_err_mpdu_pop(struct dp_soc *soc, uint32_t mac_id, 1582 void *rxdma_dst_ring_desc, 1583 union dp_rx_desc_list_elem_t **head, 1584 union dp_rx_desc_list_elem_t **tail) 1585 { 1586 void *rx_msdu_link_desc; 1587 qdf_nbuf_t msdu; 1588 qdf_nbuf_t last; 1589 struct hal_rx_msdu_list msdu_list; 1590 uint16_t num_msdus; 1591 struct hal_buf_info buf_info; 1592 void *p_buf_addr_info; 1593 void *p_last_buf_addr_info; 1594 uint32_t rx_bufs_used = 0; 1595 uint32_t msdu_cnt; 1596 uint32_t i; 1597 uint8_t push_reason; 1598 uint8_t rxdma_error_code = 0; 1599 uint8_t bm_action = HAL_BM_ACTION_PUT_IN_IDLE_LIST; 1600 struct dp_pdev *pdev = dp_get_pdev_for_mac_id(soc, mac_id); 1601 void *ring_desc; 1602 1603 msdu = 0; 1604 1605 last = NULL; 1606 1607 hal_rx_reo_ent_buf_paddr_get(rxdma_dst_ring_desc, &buf_info, 1608 &p_last_buf_addr_info, &msdu_cnt); 1609 1610 push_reason = 1611 hal_rx_reo_ent_rxdma_push_reason_get(rxdma_dst_ring_desc); 1612 if (push_reason == HAL_RX_WBM_RXDMA_PSH_RSN_ERROR) { 1613 rxdma_error_code = 1614 hal_rx_reo_ent_rxdma_error_code_get(rxdma_dst_ring_desc); 1615 } 1616 1617 do { 1618 rx_msdu_link_desc = 1619 dp_rx_cookie_2_link_desc_va(soc, &buf_info); 1620 1621 qdf_assert(rx_msdu_link_desc); 1622 1623 hal_rx_msdu_list_get(soc->hal_soc, rx_msdu_link_desc, 1624 &msdu_list, &num_msdus); 1625 1626 if (msdu_list.sw_cookie[0] != HAL_RX_COOKIE_SPECIAL) { 1627 /* if the msdus belongs to NSS offloaded radio && 1628 * the rbm is not SW1_BM then return the msdu_link 1629 * descriptor without freeing the msdus (nbufs). let 1630 * these buffers be given to NSS completion ring for 1631 * NSS to free them. 1632 * else iterate through the msdu link desc list and 1633 * free each msdu in the list. 1634 */ 1635 if (msdu_list.rbm[0] != HAL_RX_BUF_RBM_SW3_BM && 1636 wlan_cfg_get_dp_pdev_nss_enabled( 1637 pdev->wlan_cfg_ctx)) 1638 bm_action = HAL_BM_ACTION_RELEASE_MSDU_LIST; 1639 else { 1640 for (i = 0; i < num_msdus; i++) { 1641 struct dp_rx_desc *rx_desc = 1642 dp_rx_cookie_2_va_rxdma_buf(soc, 1643 msdu_list.sw_cookie[i]); 1644 qdf_assert_always(rx_desc); 1645 msdu = rx_desc->nbuf; 1646 /* 1647 * this is a unlikely scenario 1648 * where the host is reaping 1649 * a descriptor which 1650 * it already reaped just a while ago 1651 * but is yet to replenish 1652 * it back to HW. 1653 * In this case host will dump 1654 * the last 128 descriptors 1655 * including the software descriptor 1656 * rx_desc and assert. 1657 */ 1658 ring_desc = rxdma_dst_ring_desc; 1659 if (qdf_unlikely(!rx_desc->in_use)) { 1660 dup_desc_dbg(soc, 1661 ring_desc, 1662 rx_desc); 1663 continue; 1664 } 1665 1666 qdf_nbuf_unmap_single(soc->osdev, msdu, 1667 QDF_DMA_FROM_DEVICE); 1668 1669 QDF_TRACE(QDF_MODULE_ID_DP, 1670 QDF_TRACE_LEVEL_DEBUG, 1671 "[%s][%d] msdu_nbuf=%pK ", 1672 __func__, __LINE__, msdu); 1673 1674 qdf_nbuf_free(msdu); 1675 rx_bufs_used++; 1676 dp_rx_add_to_free_desc_list(head, 1677 tail, rx_desc); 1678 } 1679 } 1680 } else { 1681 rxdma_error_code = HAL_RXDMA_ERR_WAR; 1682 } 1683 1684 hal_rx_mon_next_link_desc_get(rx_msdu_link_desc, &buf_info, 1685 &p_buf_addr_info); 1686 1687 dp_rx_link_desc_return(soc, p_last_buf_addr_info, bm_action); 1688 p_last_buf_addr_info = p_buf_addr_info; 1689 1690 } while (buf_info.paddr); 1691 1692 DP_STATS_INC(soc, rx.err.rxdma_error[rxdma_error_code], 1); 1693 1694 if (rxdma_error_code == HAL_RXDMA_ERR_DECRYPT) { 1695 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1696 "Packet received with Decrypt error"); 1697 } 1698 1699 return rx_bufs_used; 1700 } 1701 1702 /** 1703 * dp_rxdma_err_process() - RxDMA error processing functionality 1704 * 1705 * @soc: core txrx main contex 1706 * @mac_id: mac id which is one of 3 mac_ids 1707 * @hal_ring: opaque pointer to the HAL Rx Ring, which will be serviced 1708 * @quota: No. of units (packets) that can be serviced in one shot. 1709 1710 * Return: num of buffers processed 1711 */ 1712 uint32_t 1713 dp_rxdma_err_process(struct dp_soc *soc, uint32_t mac_id, uint32_t quota) 1714 { 1715 struct dp_pdev *pdev = dp_get_pdev_for_mac_id(soc, mac_id); 1716 int mac_for_pdev = dp_get_mac_id_for_mac(soc, mac_id); 1717 void *hal_soc; 1718 void *rxdma_dst_ring_desc; 1719 void *err_dst_srng; 1720 union dp_rx_desc_list_elem_t *head = NULL; 1721 union dp_rx_desc_list_elem_t *tail = NULL; 1722 struct dp_srng *dp_rxdma_srng; 1723 struct rx_desc_pool *rx_desc_pool; 1724 uint32_t work_done = 0; 1725 uint32_t rx_bufs_used = 0; 1726 1727 if (!pdev) 1728 return 0; 1729 1730 err_dst_srng = pdev->rxdma_err_dst_ring[mac_for_pdev].hal_srng; 1731 1732 if (!err_dst_srng) { 1733 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1734 "%s %d : HAL Monitor Destination Ring Init \ 1735 Failed -- %pK", 1736 __func__, __LINE__, err_dst_srng); 1737 return 0; 1738 } 1739 1740 hal_soc = soc->hal_soc; 1741 1742 qdf_assert(hal_soc); 1743 1744 if (qdf_unlikely(hal_srng_access_start(hal_soc, err_dst_srng))) { 1745 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1746 "%s %d : HAL Monitor Destination Ring Init \ 1747 Failed -- %pK", 1748 __func__, __LINE__, err_dst_srng); 1749 return 0; 1750 } 1751 1752 while (qdf_likely(quota-- && (rxdma_dst_ring_desc = 1753 hal_srng_dst_get_next(hal_soc, err_dst_srng)))) { 1754 1755 rx_bufs_used += dp_rx_err_mpdu_pop(soc, mac_id, 1756 rxdma_dst_ring_desc, 1757 &head, &tail); 1758 } 1759 1760 hal_srng_access_end(hal_soc, err_dst_srng); 1761 1762 if (rx_bufs_used) { 1763 dp_rxdma_srng = &pdev->rx_refill_buf_ring; 1764 rx_desc_pool = &soc->rx_desc_buf[mac_id]; 1765 1766 dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng, 1767 rx_desc_pool, rx_bufs_used, &head, &tail); 1768 1769 work_done += rx_bufs_used; 1770 } 1771 1772 return work_done; 1773 } 1774