1 /* 2 * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for 5 * any purpose with or without fee is hereby granted, provided that the 6 * above copyright notice and this permission notice appear in all 7 * copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 16 * PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 #include "dp_types.h" 20 #include "dp_rx.h" 21 #include "dp_peer.h" 22 #include "dp_internal.h" 23 #include "hal_api.h" 24 #include "qdf_trace.h" 25 #include "qdf_nbuf.h" 26 #ifdef CONFIG_MCL 27 #include <cds_ieee80211_common.h> 28 #else 29 #include <linux/ieee80211.h> 30 #endif 31 #include "dp_rx_defrag.h" 32 #include <enet.h> /* LLC_SNAP_HDR_LEN */ 33 34 #ifdef RX_DESC_DEBUG_CHECK 35 static inline bool dp_rx_desc_check_magic(struct dp_rx_desc *rx_desc) 36 { 37 if (qdf_unlikely(rx_desc->magic != DP_RX_DESC_MAGIC)) { 38 return false; 39 } 40 rx_desc->magic = 0; 41 return true; 42 } 43 #else 44 static inline bool dp_rx_desc_check_magic(struct dp_rx_desc *rx_desc) 45 { 46 return true; 47 } 48 #endif 49 50 /** 51 * dp_rx_mcast_echo_check() - check if the mcast pkt is a loop 52 * back on same vap or a different vap. 53 * 54 * @soc: core DP main context 55 * @peer: dp peer handler 56 * @rx_tlv_hdr: start of the rx TLV header 57 * @nbuf: pkt buffer 58 * 59 * Return: bool (true if it is a looped back pkt else false) 60 * 61 */ 62 static inline bool dp_rx_mcast_echo_check(struct dp_soc *soc, 63 struct dp_peer *peer, 64 uint8_t *rx_tlv_hdr, 65 qdf_nbuf_t nbuf) 66 { 67 struct dp_vdev *vdev = peer->vdev; 68 struct dp_ast_entry *ase; 69 uint16_t sa_idx; 70 uint8_t *data; 71 72 /* 73 * Multicast Echo Check is required only if vdev is STA and 74 * received pkt is a multicast/broadcast pkt. otherwise 75 * skip the MEC check. 76 */ 77 if (vdev->opmode != wlan_op_mode_sta) 78 return false; 79 80 if (!hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr)) 81 return false; 82 83 data = qdf_nbuf_data(nbuf); 84 /* 85 * if the received pkts src mac addr matches with vdev 86 * mac address then drop the pkt as it is looped back 87 */ 88 if (!(qdf_mem_cmp(&data[DP_MAC_ADDR_LEN], 89 vdev->mac_addr.raw, 90 DP_MAC_ADDR_LEN))) 91 return true; 92 93 /* if the received pkts src mac addr matches with the 94 * wired PCs MAC addr which is behind the STA or with 95 * wireless STAs MAC addr which are behind the Repeater, 96 * then drop the pkt as it is looped back 97 */ 98 qdf_spin_lock_bh(&soc->ast_lock); 99 if (hal_rx_msdu_end_sa_is_valid_get(rx_tlv_hdr)) { 100 sa_idx = hal_rx_msdu_end_sa_idx_get(rx_tlv_hdr); 101 102 if ((sa_idx < 0) || (sa_idx > (WLAN_UMAC_PSOC_MAX_PEERS * 2))) { 103 qdf_spin_unlock_bh(&soc->ast_lock); 104 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 105 "invalid sa_idx: %d", sa_idx); 106 qdf_assert_always(0); 107 } 108 109 ase = soc->ast_table[sa_idx]; 110 } else 111 ase = dp_peer_ast_hash_find(soc, &data[DP_MAC_ADDR_LEN], 0); 112 113 if (ase) { 114 if ((ase->type == CDP_TXRX_AST_TYPE_MEC) || 115 (ase->peer != peer)) { 116 qdf_spin_unlock_bh(&soc->ast_lock); 117 QDF_TRACE(QDF_MODULE_ID_DP, 118 QDF_TRACE_LEVEL_INFO, 119 "received pkt with same src mac %pM", 120 &data[DP_MAC_ADDR_LEN]); 121 122 return true; 123 } 124 } 125 qdf_spin_unlock_bh(&soc->ast_lock); 126 return false; 127 } 128 129 /** 130 * dp_rx_link_desc_return() - Return a MPDU link descriptor to HW 131 * (WBM), following error handling 132 * 133 * @soc: core DP main context 134 * @ring_desc: opaque pointer to the REO error ring descriptor 135 * 136 * Return: QDF_STATUS 137 */ 138 QDF_STATUS 139 dp_rx_link_desc_return(struct dp_soc *soc, void *ring_desc, uint8_t bm_action) 140 { 141 void *buf_addr_info = HAL_RX_REO_BUF_ADDR_INFO_GET(ring_desc); 142 struct dp_srng *wbm_desc_rel_ring = &soc->wbm_desc_rel_ring; 143 void *wbm_rel_srng = wbm_desc_rel_ring->hal_srng; 144 void *hal_soc = soc->hal_soc; 145 QDF_STATUS status = QDF_STATUS_E_FAILURE; 146 void *src_srng_desc; 147 148 if (!wbm_rel_srng) { 149 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 150 "WBM RELEASE RING not initialized"); 151 return status; 152 } 153 154 if (qdf_unlikely(hal_srng_access_start(hal_soc, wbm_rel_srng))) { 155 156 /* TODO */ 157 /* 158 * Need API to convert from hal_ring pointer to 159 * Ring Type / Ring Id combo 160 */ 161 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 162 FL("HAL RING Access For WBM Release SRNG Failed - %pK"), 163 wbm_rel_srng); 164 DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1); 165 goto done; 166 } 167 src_srng_desc = hal_srng_src_get_next(hal_soc, wbm_rel_srng); 168 if (qdf_likely(src_srng_desc)) { 169 /* Return link descriptor through WBM ring (SW2WBM)*/ 170 hal_rx_msdu_link_desc_set(hal_soc, 171 src_srng_desc, buf_addr_info, bm_action); 172 status = QDF_STATUS_SUCCESS; 173 } else { 174 struct hal_srng *srng = (struct hal_srng *)wbm_rel_srng; 175 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 176 FL("WBM Release Ring (Id %d) Full"), srng->ring_id); 177 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 178 "HP 0x%x Reap HP 0x%x TP 0x%x Cached TP 0x%x", 179 *srng->u.src_ring.hp_addr, srng->u.src_ring.reap_hp, 180 *srng->u.src_ring.tp_addr, srng->u.src_ring.cached_tp); 181 } 182 done: 183 hal_srng_access_end(hal_soc, wbm_rel_srng); 184 return status; 185 } 186 187 /** 188 * dp_rx_msdus_drop() - Drops all MSDU's per MPDU 189 * 190 * @soc: core txrx main context 191 * @ring_desc: opaque pointer to the REO error ring descriptor 192 * @mpdu_desc_info: MPDU descriptor information from ring descriptor 193 * @head: head of the local descriptor free-list 194 * @tail: tail of the local descriptor free-list 195 * @quota: No. of units (packets) that can be serviced in one shot. 196 * 197 * This function is used to drop all MSDU in an MPDU 198 * 199 * Return: uint32_t: No. of elements processed 200 */ 201 static uint32_t dp_rx_msdus_drop(struct dp_soc *soc, void *ring_desc, 202 struct hal_rx_mpdu_desc_info *mpdu_desc_info, 203 union dp_rx_desc_list_elem_t **head, 204 union dp_rx_desc_list_elem_t **tail, 205 uint32_t quota) 206 { 207 uint32_t rx_bufs_used = 0; 208 void *link_desc_va; 209 struct hal_buf_info buf_info; 210 struct hal_rx_msdu_list msdu_list; /* MSDU's per MPDU */ 211 int i; 212 uint8_t *rx_tlv_hdr; 213 uint32_t tid; 214 215 hal_rx_reo_buf_paddr_get(ring_desc, &buf_info); 216 217 link_desc_va = dp_rx_cookie_2_link_desc_va(soc, &buf_info); 218 219 /* No UNMAP required -- this is "malloc_consistent" memory */ 220 hal_rx_msdu_list_get(link_desc_va, &msdu_list, 221 &mpdu_desc_info->msdu_count); 222 223 for (i = 0; (i < mpdu_desc_info->msdu_count) && quota--; i++) { 224 struct dp_rx_desc *rx_desc = 225 dp_rx_cookie_2_va_rxdma_buf(soc, 226 msdu_list.sw_cookie[i]); 227 228 qdf_assert(rx_desc); 229 230 if (!dp_rx_desc_check_magic(rx_desc)) { 231 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 232 FL("Invalid rx_desc cookie=%d"), 233 msdu_list.sw_cookie[i]); 234 return rx_bufs_used; 235 } 236 237 rx_bufs_used++; 238 tid = hal_rx_mpdu_start_tid_get(rx_desc->rx_buf_start); 239 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 240 "Packet received with PN error for tid :%d", tid); 241 242 rx_tlv_hdr = qdf_nbuf_data(rx_desc->nbuf); 243 if (hal_rx_encryption_info_valid(rx_tlv_hdr)) 244 hal_rx_print_pn(rx_tlv_hdr); 245 246 /* Just free the buffers */ 247 qdf_nbuf_free(rx_desc->nbuf); 248 249 dp_rx_add_to_free_desc_list(head, tail, rx_desc); 250 } 251 252 /* Return link descriptor through WBM ring (SW2WBM)*/ 253 dp_rx_link_desc_return(soc, ring_desc, HAL_BM_ACTION_PUT_IN_IDLE_LIST); 254 255 return rx_bufs_used; 256 } 257 258 /** 259 * dp_rx_pn_error_handle() - Handles PN check errors 260 * 261 * @soc: core txrx main context 262 * @ring_desc: opaque pointer to the REO error ring descriptor 263 * @mpdu_desc_info: MPDU descriptor information from ring descriptor 264 * @head: head of the local descriptor free-list 265 * @tail: tail of the local descriptor free-list 266 * @quota: No. of units (packets) that can be serviced in one shot. 267 * 268 * This function implements PN error handling 269 * If the peer is configured to ignore the PN check errors 270 * or if DP feels, that this frame is still OK, the frame can be 271 * re-injected back to REO to use some of the other features 272 * of REO e.g. duplicate detection/routing to other cores 273 * 274 * Return: uint32_t: No. of elements processed 275 */ 276 static uint32_t 277 dp_rx_pn_error_handle(struct dp_soc *soc, void *ring_desc, 278 struct hal_rx_mpdu_desc_info *mpdu_desc_info, 279 union dp_rx_desc_list_elem_t **head, 280 union dp_rx_desc_list_elem_t **tail, 281 uint32_t quota) 282 { 283 uint16_t peer_id; 284 uint32_t rx_bufs_used = 0; 285 struct dp_peer *peer; 286 bool peer_pn_policy = false; 287 288 peer_id = DP_PEER_METADATA_PEER_ID_GET( 289 mpdu_desc_info->peer_meta_data); 290 291 292 peer = dp_peer_find_by_id(soc, peer_id); 293 294 if (qdf_likely(peer)) { 295 /* 296 * TODO: Check for peer specific policies & set peer_pn_policy 297 */ 298 } 299 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 300 "Packet received with PN error"); 301 302 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 303 "discard rx due to PN error for peer %pK " 304 "(%02x:%02x:%02x:%02x:%02x:%02x)\n", 305 peer, 306 peer->mac_addr.raw[0], peer->mac_addr.raw[1], 307 peer->mac_addr.raw[2], peer->mac_addr.raw[3], 308 peer->mac_addr.raw[4], peer->mac_addr.raw[5]); 309 310 /* No peer PN policy -- definitely drop */ 311 if (!peer_pn_policy) 312 rx_bufs_used = dp_rx_msdus_drop(soc, ring_desc, 313 mpdu_desc_info, 314 head, tail, quota); 315 316 return rx_bufs_used; 317 } 318 319 /** 320 * dp_rx_2k_jump_handle() - Handles Sequence Number Jump by 2K 321 * 322 * @soc: core txrx main context 323 * @ring_desc: opaque pointer to the REO error ring descriptor 324 * @mpdu_desc_info: MPDU descriptor information from ring descriptor 325 * @head: head of the local descriptor free-list 326 * @tail: tail of the local descriptor free-list 327 * @quota: No. of units (packets) that can be serviced in one shot. 328 * 329 * This function implements the error handling when sequence number 330 * of the MPDU jumps suddenly by 2K.Today there are 2 cases that 331 * need to be handled: 332 * A) CSN (Current Sequence Number) = Last Valid SN (LSN) + 2K 333 * B) CSN = LSN + 2K, but falls within a "BA sized window" of the SSN 334 * For case A) the protocol stack is invoked to generate DELBA/DEAUTH frame 335 * For case B), the frame is normally dropped, no more action is taken 336 * 337 * Return: uint32_t: No. of elements processed 338 */ 339 static uint32_t 340 dp_rx_2k_jump_handle(struct dp_soc *soc, void *ring_desc, 341 struct hal_rx_mpdu_desc_info *mpdu_desc_info, 342 union dp_rx_desc_list_elem_t **head, 343 union dp_rx_desc_list_elem_t **tail, 344 uint32_t quota) 345 { 346 return dp_rx_msdus_drop(soc, ring_desc, mpdu_desc_info, 347 head, tail, quota); 348 } 349 350 static bool 351 dp_rx_chain_msdus(struct dp_soc *soc, qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr, 352 uint8_t mac_id) 353 { 354 bool mpdu_done = false; 355 qdf_nbuf_t curr_nbuf, next_nbuf; 356 357 /* TODO: Currently only single radio is supported, hence 358 * pdev hard coded to '0' index 359 */ 360 struct dp_pdev *dp_pdev = soc->pdev_list[mac_id]; 361 362 if (hal_rx_msdu_end_first_msdu_get(rx_tlv_hdr)) { 363 qdf_nbuf_set_rx_chfrag_start(nbuf, 1); 364 365 curr_nbuf = dp_pdev->invalid_peer_head_msdu; 366 while (curr_nbuf) { 367 next_nbuf = qdf_nbuf_next(curr_nbuf); 368 qdf_nbuf_free(curr_nbuf); 369 curr_nbuf = next_nbuf; 370 } 371 372 dp_pdev->invalid_peer_head_msdu = NULL; 373 dp_pdev->invalid_peer_tail_msdu = NULL; 374 375 hal_rx_mon_hw_desc_get_mpdu_status(rx_tlv_hdr, 376 &(dp_pdev->ppdu_info.rx_status)); 377 378 } 379 380 if (hal_rx_msdu_end_last_msdu_get(rx_tlv_hdr)) { 381 qdf_nbuf_set_rx_chfrag_end(nbuf, 1); 382 mpdu_done = true; 383 } 384 385 DP_RX_LIST_APPEND(dp_pdev->invalid_peer_head_msdu, 386 dp_pdev->invalid_peer_tail_msdu, 387 nbuf); 388 389 return mpdu_done; 390 } 391 392 /** 393 * dp_rx_null_q_desc_handle() - Function to handle NULL Queue 394 * descriptor violation on either a 395 * REO or WBM ring 396 * 397 * @soc: core DP main context 398 * @rx_desc : pointer to the sw rx descriptor 399 * @head: pointer to head of rx descriptors to be added to free list 400 * @tail: pointer to tail of rx descriptors to be added to free list 401 * quota: upper limit of descriptors that can be reaped 402 * 403 * This function handles NULL queue descriptor violations arising out 404 * a missing REO queue for a given peer or a given TID. This typically 405 * may happen if a packet is received on a QOS enabled TID before the 406 * ADDBA negotiation for that TID, when the TID queue is setup. Or 407 * it may also happen for MC/BC frames if they are not routed to the 408 * non-QOS TID queue, in the absence of any other default TID queue. 409 * This error can show up both in a REO destination or WBM release ring. 410 * 411 * Return: uint32_t: No. of Rx buffers reaped 412 */ 413 static void 414 dp_rx_null_q_desc_handle(struct dp_soc *soc, 415 qdf_nbuf_t nbuf, 416 uint8_t *rx_tlv_hdr, 417 uint8_t pool_id) 418 { 419 uint32_t pkt_len, l2_hdr_offset; 420 uint16_t msdu_len; 421 struct dp_vdev *vdev; 422 uint16_t peer_id = 0xFFFF; 423 struct dp_peer *peer = NULL; 424 uint8_t tid; 425 426 qdf_nbuf_set_rx_chfrag_start(nbuf, 427 hal_rx_msdu_end_first_msdu_get(rx_tlv_hdr)); 428 qdf_nbuf_set_rx_chfrag_end(nbuf, 429 hal_rx_msdu_end_last_msdu_get(rx_tlv_hdr)); 430 431 l2_hdr_offset = hal_rx_msdu_end_l3_hdr_padding_get(rx_tlv_hdr); 432 msdu_len = hal_rx_msdu_start_msdu_len_get(rx_tlv_hdr); 433 pkt_len = msdu_len + l2_hdr_offset + RX_PKT_TLVS_LEN; 434 435 /* Set length in nbuf */ 436 qdf_nbuf_set_pktlen(nbuf, pkt_len); 437 438 /* 439 * Check if DMA completed -- msdu_done is the last bit 440 * to be written 441 */ 442 if (!hal_rx_attn_msdu_done_get(rx_tlv_hdr)) { 443 444 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 445 FL("MSDU DONE failure")); 446 447 hal_rx_dump_pkt_tlvs(rx_tlv_hdr, QDF_TRACE_LEVEL_INFO); 448 qdf_assert(0); 449 } 450 451 peer_id = hal_rx_mpdu_start_sw_peer_id_get(rx_tlv_hdr); 452 peer = dp_peer_find_by_id(soc, peer_id); 453 454 if (!peer) { 455 bool mpdu_done = false; 456 457 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 458 FL("peer is NULL")); 459 460 mpdu_done = dp_rx_chain_msdus(soc, nbuf, rx_tlv_hdr, pool_id); 461 /* Trigger invalid peer handler wrapper */ 462 dp_rx_process_invalid_peer_wrapper(soc, nbuf, mpdu_done); 463 464 return; 465 } 466 467 vdev = peer->vdev; 468 if (!vdev) { 469 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 470 FL("INVALID vdev %pK OR osif_rx"), vdev); 471 /* Drop & free packet */ 472 qdf_nbuf_free(nbuf); 473 DP_STATS_INC(soc, rx.err.invalid_vdev, 1); 474 return; 475 } 476 477 /* 478 * Advance the packet start pointer by total size of 479 * pre-header TLV's 480 */ 481 qdf_nbuf_pull_head(nbuf, (l2_hdr_offset + RX_PKT_TLVS_LEN)); 482 483 if (dp_rx_mcast_echo_check(soc, peer, rx_tlv_hdr, nbuf)) { 484 /* this is a looped back MCBC pkt, drop it */ 485 qdf_nbuf_free(nbuf); 486 return; 487 } 488 /* 489 * In qwrap mode if the received packet matches with any of the vdev 490 * mac addresses, drop it. Donot receive multicast packets originated 491 * from any proxysta. 492 */ 493 if (check_qwrap_multicast_loopback(vdev, nbuf)) { 494 qdf_nbuf_free(nbuf); 495 return; 496 } 497 498 499 if (qdf_unlikely((peer->nawds_enabled == true) && 500 hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr))) { 501 QDF_TRACE(QDF_MODULE_ID_DP, 502 QDF_TRACE_LEVEL_DEBUG, 503 "%s free buffer for multicast packet", 504 __func__); 505 DP_STATS_INC_PKT(peer, rx.nawds_mcast_drop, 506 1, qdf_nbuf_len(nbuf)); 507 qdf_nbuf_free(nbuf); 508 return; 509 } 510 511 if (!dp_wds_rx_policy_check(rx_tlv_hdr, vdev, peer, 512 hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr))) { 513 QDF_TRACE(QDF_MODULE_ID_DP, 514 QDF_TRACE_LEVEL_ERROR, 515 FL("mcast Policy Check Drop pkt")); 516 /* Drop & free packet */ 517 qdf_nbuf_free(nbuf); 518 return; 519 } 520 521 /* WDS Source Port Learning */ 522 if (qdf_likely(vdev->rx_decap_type == htt_cmn_pkt_type_ethernet)) 523 dp_rx_wds_srcport_learn(soc, rx_tlv_hdr, peer, nbuf); 524 525 if (hal_rx_mpdu_start_mpdu_qos_control_valid_get(rx_tlv_hdr)) { 526 /* TODO: Assuming that qos_control_valid also indicates 527 * unicast. Should we check this? 528 */ 529 tid = hal_rx_mpdu_start_tid_get(rx_tlv_hdr); 530 if (peer && 531 peer->rx_tid[tid].hw_qdesc_vaddr_unaligned == NULL) { 532 /* IEEE80211_SEQ_MAX indicates invalid start_seq */ 533 dp_rx_tid_setup_wifi3(peer, tid, 1, IEEE80211_SEQ_MAX); 534 } 535 } 536 537 #ifdef QCA_WIFI_NAPIER_EMULATION /* Debug code, remove later */ 538 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 539 "%s: p_id %d msdu_len %d hdr_off %d", 540 __func__, peer_id, msdu_len, l2_hdr_offset); 541 542 print_hex_dump(KERN_ERR, "\t Pkt Data:", DUMP_PREFIX_NONE, 32, 4, 543 qdf_nbuf_data(nbuf), 128, false); 544 #endif /* NAPIER_EMULATION */ 545 546 if (qdf_unlikely(vdev->rx_decap_type == htt_cmn_pkt_type_raw)) { 547 qdf_nbuf_set_next(nbuf, NULL); 548 dp_rx_deliver_raw(vdev, nbuf, peer); 549 } else { 550 if (qdf_unlikely(peer->bss_peer)) { 551 QDF_TRACE(QDF_MODULE_ID_DP, 552 QDF_TRACE_LEVEL_INFO, 553 FL("received pkt with same src MAC")); 554 /* Drop & free packet */ 555 qdf_nbuf_free(nbuf); 556 return; 557 } 558 559 if (vdev->osif_rx) { 560 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, 561 FL("vdev %pK osif_rx %pK"), vdev, 562 vdev->osif_rx); 563 qdf_nbuf_set_next(nbuf, NULL); 564 vdev->osif_rx(vdev->osif_vdev, nbuf); 565 DP_STATS_INCC_PKT(vdev->pdev, rx.multicast, 1, 566 qdf_nbuf_len(nbuf), 567 hal_rx_msdu_end_da_is_mcbc_get( 568 rx_tlv_hdr)); 569 DP_STATS_INC_PKT(vdev->pdev, rx.to_stack, 1, 570 qdf_nbuf_len(nbuf)); 571 } else { 572 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 573 FL("INVALID vdev %pK OR osif_rx"), vdev); 574 DP_STATS_INC(soc, rx.err.invalid_vdev, 1); 575 } 576 } 577 return; 578 } 579 580 /** 581 * dp_rx_err_deliver() - Function to deliver error frames to OS 582 * 583 * @soc: core DP main context 584 * @rx_desc : pointer to the sw rx descriptor 585 * @head: pointer to head of rx descriptors to be added to free list 586 * @tail: pointer to tail of rx descriptors to be added to free list 587 * quota: upper limit of descriptors that can be reaped 588 * 589 * Return: uint32_t: No. of Rx buffers reaped 590 */ 591 static void 592 dp_rx_err_deliver(struct dp_soc *soc, qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr) 593 { 594 uint32_t pkt_len, l2_hdr_offset; 595 uint16_t msdu_len; 596 struct dp_vdev *vdev; 597 uint16_t peer_id = 0xFFFF; 598 struct dp_peer *peer = NULL; 599 600 /* 601 * Check if DMA completed -- msdu_done is the last bit 602 * to be written 603 */ 604 if (!hal_rx_attn_msdu_done_get(rx_tlv_hdr)) { 605 606 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 607 FL("MSDU DONE failure")); 608 609 hal_rx_dump_pkt_tlvs(rx_tlv_hdr, QDF_TRACE_LEVEL_INFO); 610 qdf_assert(0); 611 } 612 613 peer_id = hal_rx_mpdu_start_sw_peer_id_get(rx_tlv_hdr); 614 peer = dp_peer_find_by_id(soc, peer_id); 615 616 if (!peer) { 617 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 618 FL("peer is NULL")); 619 DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1, 620 qdf_nbuf_len(nbuf)); 621 /* Drop & free packet */ 622 qdf_nbuf_free(nbuf); 623 return; 624 } 625 626 vdev = peer->vdev; 627 if (!vdev) { 628 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 629 FL("INVALID vdev %pK OR osif_rx"), vdev); 630 /* Drop & free packet */ 631 qdf_nbuf_free(nbuf); 632 DP_STATS_INC(soc, rx.err.invalid_vdev, 1); 633 return; 634 } 635 636 /* Drop & free packet if mesh mode not enabled */ 637 if (!vdev->mesh_vdev) { 638 qdf_nbuf_free(nbuf); 639 DP_STATS_INC(soc, rx.err.invalid_vdev, 1); 640 return; 641 } 642 643 l2_hdr_offset = hal_rx_msdu_end_l3_hdr_padding_get(rx_tlv_hdr); 644 msdu_len = hal_rx_msdu_start_msdu_len_get(rx_tlv_hdr); 645 pkt_len = msdu_len + l2_hdr_offset + RX_PKT_TLVS_LEN; 646 647 /* Set length in nbuf */ 648 qdf_nbuf_set_pktlen(nbuf, pkt_len); 649 650 qdf_nbuf_set_next(nbuf, NULL); 651 652 /* 653 * Advance the packet start pointer by total size of 654 * pre-header TLV's 655 */ 656 qdf_nbuf_pull_head(nbuf, (l2_hdr_offset + RX_PKT_TLVS_LEN)); 657 658 qdf_nbuf_set_rx_chfrag_start(nbuf, 1); 659 qdf_nbuf_set_rx_chfrag_end(nbuf, 1); 660 if (dp_rx_filter_mesh_packets(vdev, nbuf, rx_tlv_hdr) 661 == QDF_STATUS_SUCCESS) { 662 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_MED, 663 FL("mesh pkt filtered")); 664 DP_STATS_INC(vdev->pdev, dropped.mesh_filter, 1); 665 666 qdf_nbuf_free(nbuf); 667 return; 668 669 } 670 dp_rx_fill_mesh_stats(vdev, nbuf, rx_tlv_hdr, peer); 671 672 if (qdf_unlikely(vdev->rx_decap_type == htt_cmn_pkt_type_raw)) { 673 dp_rx_deliver_raw(vdev, nbuf, peer); 674 } else { 675 DP_STATS_INC(vdev->pdev, rx.to_stack.num, 1); 676 vdev->osif_rx(vdev->osif_vdev, nbuf); 677 } 678 679 return; 680 } 681 682 /** 683 * dp_rx_process_mic_error(): Function to pass mic error indication to umac 684 * @soc: DP SOC handle 685 * @rx_desc : pointer to the sw rx descriptor 686 * @head: pointer to head of rx descriptors to be added to free list 687 * @tail: pointer to tail of rx descriptors to be added to free list 688 * 689 * return: void 690 */ 691 void 692 dp_rx_process_mic_error(struct dp_soc *soc, 693 qdf_nbuf_t nbuf, 694 uint8_t *rx_tlv_hdr) 695 { 696 struct dp_vdev *vdev = NULL; 697 struct dp_pdev *pdev = NULL; 698 struct ol_if_ops *tops = NULL; 699 struct ieee80211_frame *wh; 700 uint8_t *rx_pkt_hdr; 701 struct dp_peer *peer; 702 uint16_t peer_id; 703 704 if (!hal_rx_msdu_end_first_msdu_get(rx_tlv_hdr)) 705 return; 706 707 rx_pkt_hdr = hal_rx_pkt_hdr_get(qdf_nbuf_data(nbuf)); 708 wh = (struct ieee80211_frame *)rx_pkt_hdr; 709 710 peer_id = hal_rx_mpdu_start_sw_peer_id_get(rx_tlv_hdr); 711 peer = dp_peer_find_by_id(soc, peer_id); 712 if (!peer) { 713 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 714 "peer not found"); 715 goto fail; 716 } 717 718 vdev = peer->vdev; 719 if (!vdev) { 720 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 721 "VDEV not found"); 722 goto fail; 723 } 724 725 pdev = vdev->pdev; 726 if (!pdev) { 727 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 728 "PDEV not found"); 729 goto fail; 730 } 731 732 tops = pdev->soc->cdp_soc.ol_ops; 733 if (tops->rx_mic_error) 734 tops->rx_mic_error(pdev->osif_pdev, vdev->vdev_id, wh); 735 736 fail: 737 qdf_nbuf_free(nbuf); 738 return; 739 } 740 741 /** 742 * dp_rx_err_process() - Processes error frames routed to REO error ring 743 * 744 * @soc: core txrx main context 745 * @hal_ring: opaque pointer to the HAL Rx Error Ring, which will be serviced 746 * @quota: No. of units (packets) that can be serviced in one shot. 747 * 748 * This function implements error processing and top level demultiplexer 749 * for all the frames routed to REO error ring. 750 * 751 * Return: uint32_t: No. of elements processed 752 */ 753 uint32_t 754 dp_rx_err_process(struct dp_soc *soc, void *hal_ring, uint32_t quota) 755 { 756 void *hal_soc; 757 void *ring_desc; 758 union dp_rx_desc_list_elem_t *head = NULL; 759 union dp_rx_desc_list_elem_t *tail = NULL; 760 uint32_t rx_bufs_used = 0; 761 uint8_t buf_type; 762 uint8_t error, rbm; 763 struct hal_rx_mpdu_desc_info mpdu_desc_info; 764 struct hal_buf_info hbi; 765 struct dp_pdev *dp_pdev; 766 struct dp_srng *dp_rxdma_srng; 767 struct rx_desc_pool *rx_desc_pool; 768 uint32_t cookie = 0; 769 void *link_desc_va; 770 struct hal_rx_msdu_list msdu_list; /* MSDU's per MPDU */ 771 uint16_t num_msdus; 772 773 /* Debug -- Remove later */ 774 qdf_assert(soc && hal_ring); 775 776 hal_soc = soc->hal_soc; 777 778 /* Debug -- Remove later */ 779 qdf_assert(hal_soc); 780 781 if (qdf_unlikely(hal_srng_access_start(hal_soc, hal_ring))) { 782 783 /* TODO */ 784 /* 785 * Need API to convert from hal_ring pointer to 786 * Ring Type / Ring Id combo 787 */ 788 DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1); 789 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 790 FL("HAL RING Access Failed -- %pK"), hal_ring); 791 goto done; 792 } 793 794 while (qdf_likely(quota-- && (ring_desc = 795 hal_srng_dst_get_next(hal_soc, hal_ring)))) { 796 797 DP_STATS_INC(soc, rx.err_ring_pkts, 1); 798 799 error = HAL_RX_ERROR_STATUS_GET(ring_desc); 800 801 qdf_assert(error == HAL_REO_ERROR_DETECTED); 802 803 buf_type = HAL_RX_REO_BUF_TYPE_GET(ring_desc); 804 /* 805 * For REO error ring, expect only MSDU LINK DESC 806 */ 807 qdf_assert_always(buf_type == HAL_RX_REO_MSDU_LINK_DESC_TYPE); 808 809 cookie = HAL_RX_REO_BUF_COOKIE_GET(ring_desc); 810 /* 811 * check for the magic number in the sw cookie 812 */ 813 qdf_assert_always((cookie >> LINK_DESC_ID_SHIFT) & 814 LINK_DESC_ID_START); 815 816 /* 817 * Check if the buffer is to be processed on this processor 818 */ 819 rbm = hal_rx_ret_buf_manager_get(ring_desc); 820 821 hal_rx_reo_buf_paddr_get(ring_desc, &hbi); 822 link_desc_va = dp_rx_cookie_2_link_desc_va(soc, &hbi); 823 hal_rx_msdu_list_get(link_desc_va, &msdu_list, &num_msdus); 824 825 if (qdf_unlikely((msdu_list.rbm[0] != 826 HAL_RX_BUF_RBM_SW3_BM) && 827 (msdu_list.rbm[0] != 828 HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST))) { 829 /* TODO */ 830 /* Call appropriate handler */ 831 DP_STATS_INC(soc, rx.err.invalid_rbm, 1); 832 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 833 FL("Invalid RBM %d"), rbm); 834 835 /* Return link descriptor through WBM ring (SW2WBM)*/ 836 dp_rx_link_desc_return(soc, ring_desc, 837 HAL_BM_ACTION_RELEASE_MSDU_LIST); 838 continue; 839 } 840 841 842 /* Get the MPDU DESC info */ 843 hal_rx_mpdu_desc_info_get(ring_desc, &mpdu_desc_info); 844 845 if (mpdu_desc_info.mpdu_flags & HAL_MPDU_F_FRAGMENT) { 846 /* TODO */ 847 rx_bufs_used += dp_rx_frag_handle(soc, 848 ring_desc, &mpdu_desc_info, 849 &head, &tail, quota); 850 DP_STATS_INC(soc, rx.rx_frags, 1); 851 continue; 852 } 853 854 if (hal_rx_reo_is_pn_error(ring_desc)) { 855 /* TOD0 */ 856 DP_STATS_INC(soc, 857 rx.err. 858 reo_error[HAL_REO_ERR_PN_CHECK_FAILED], 859 1); 860 rx_bufs_used += dp_rx_pn_error_handle(soc, 861 ring_desc, &mpdu_desc_info, 862 &head, &tail, quota); 863 continue; 864 } 865 866 if (hal_rx_reo_is_2k_jump(ring_desc)) { 867 /* TOD0 */ 868 DP_STATS_INC(soc, 869 rx.err. 870 reo_error[HAL_REO_ERR_REGULAR_FRAME_2K_JUMP], 871 1); 872 rx_bufs_used += dp_rx_2k_jump_handle(soc, 873 ring_desc, &mpdu_desc_info, 874 &head, &tail, quota); 875 continue; 876 } 877 } 878 879 done: 880 hal_srng_access_end(hal_soc, hal_ring); 881 882 883 /* Assume MAC id = 0, owner = 0 */ 884 if (rx_bufs_used) { 885 dp_pdev = soc->pdev_list[0]; 886 dp_rxdma_srng = &dp_pdev->rx_refill_buf_ring; 887 rx_desc_pool = &soc->rx_desc_buf[0]; 888 889 dp_rx_buffers_replenish(soc, 0, dp_rxdma_srng, rx_desc_pool, 890 rx_bufs_used, &head, &tail, HAL_RX_BUF_RBM_SW3_BM); 891 } 892 893 return rx_bufs_used; /* Assume no scale factor for now */ 894 } 895 896 /** 897 * dp_rx_wbm_err_process() - Processes error frames routed to WBM release ring 898 * 899 * @soc: core txrx main context 900 * @hal_ring: opaque pointer to the HAL Rx Error Ring, which will be serviced 901 * @quota: No. of units (packets) that can be serviced in one shot. 902 * 903 * This function implements error processing and top level demultiplexer 904 * for all the frames routed to WBM2HOST sw release ring. 905 * 906 * Return: uint32_t: No. of elements processed 907 */ 908 uint32_t 909 dp_rx_wbm_err_process(struct dp_soc *soc, void *hal_ring, uint32_t quota) 910 { 911 void *hal_soc; 912 void *ring_desc; 913 struct dp_rx_desc *rx_desc; 914 union dp_rx_desc_list_elem_t *head[MAX_PDEV_CNT] = { NULL }; 915 union dp_rx_desc_list_elem_t *tail[MAX_PDEV_CNT] = { NULL }; 916 uint32_t rx_bufs_used = 0; 917 uint32_t rx_bufs_reaped[MAX_PDEV_CNT] = { 0 }; 918 uint8_t buf_type, rbm; 919 uint32_t rx_buf_cookie; 920 uint8_t mac_id; 921 struct dp_pdev *dp_pdev; 922 struct dp_srng *dp_rxdma_srng; 923 struct rx_desc_pool *rx_desc_pool; 924 uint8_t *rx_tlv_hdr; 925 qdf_nbuf_t nbuf_head = NULL; 926 qdf_nbuf_t nbuf_tail = NULL; 927 qdf_nbuf_t nbuf, next; 928 struct hal_wbm_err_desc_info wbm_err_info = { 0 }; 929 uint8_t pool_id; 930 931 /* Debug -- Remove later */ 932 qdf_assert(soc && hal_ring); 933 934 hal_soc = soc->hal_soc; 935 936 /* Debug -- Remove later */ 937 qdf_assert(hal_soc); 938 939 if (qdf_unlikely(hal_srng_access_start(hal_soc, hal_ring))) { 940 941 /* TODO */ 942 /* 943 * Need API to convert from hal_ring pointer to 944 * Ring Type / Ring Id combo 945 */ 946 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 947 FL("HAL RING Access Failed -- %pK"), hal_ring); 948 goto done; 949 } 950 951 while (qdf_likely(quota-- && (ring_desc = 952 hal_srng_dst_get_next(hal_soc, hal_ring)))) { 953 954 /* XXX */ 955 buf_type = HAL_RX_WBM_BUF_TYPE_GET(ring_desc); 956 957 /* 958 * For WBM ring, expect only MSDU buffers 959 */ 960 qdf_assert_always(buf_type == HAL_RX_WBM_BUF_TYPE_REL_BUF); 961 962 qdf_assert((HAL_RX_WBM_ERR_SRC_GET(ring_desc) 963 == HAL_RX_WBM_ERR_SRC_RXDMA) || 964 (HAL_RX_WBM_ERR_SRC_GET(ring_desc) 965 == HAL_RX_WBM_ERR_SRC_REO)); 966 967 /* 968 * Check if the buffer is to be processed on this processor 969 */ 970 rbm = hal_rx_ret_buf_manager_get(ring_desc); 971 972 if (qdf_unlikely(rbm != HAL_RX_BUF_RBM_SW3_BM)) { 973 /* TODO */ 974 /* Call appropriate handler */ 975 DP_STATS_INC(soc, rx.err.invalid_rbm, 1); 976 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 977 FL("Invalid RBM %d"), rbm); 978 continue; 979 } 980 981 rx_buf_cookie = HAL_RX_WBM_BUF_COOKIE_GET(ring_desc); 982 983 rx_desc = dp_rx_cookie_2_va_rxdma_buf(soc, rx_buf_cookie); 984 qdf_assert(rx_desc); 985 986 if (!dp_rx_desc_check_magic(rx_desc)) { 987 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 988 FL("Invalid rx_desc cookie=%d"), 989 rx_buf_cookie); 990 continue; 991 } 992 993 nbuf = rx_desc->nbuf; 994 qdf_nbuf_unmap_single(soc->osdev, nbuf, QDF_DMA_BIDIRECTIONAL); 995 996 /* 997 * save the wbm desc info in nbuf TLV. We will need this 998 * info when we do the actual nbuf processing 999 */ 1000 hal_rx_wbm_err_info_get(ring_desc, &wbm_err_info); 1001 wbm_err_info.pool_id = rx_desc->pool_id; 1002 hal_rx_wbm_err_info_set_in_tlv(qdf_nbuf_data(nbuf), 1003 &wbm_err_info); 1004 1005 rx_bufs_reaped[rx_desc->pool_id]++; 1006 1007 DP_RX_LIST_APPEND(nbuf_head, nbuf_tail, rx_desc->nbuf); 1008 dp_rx_add_to_free_desc_list(&head[rx_desc->pool_id], 1009 &tail[rx_desc->pool_id], 1010 rx_desc); 1011 } 1012 done: 1013 hal_srng_access_end(hal_soc, hal_ring); 1014 1015 for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) { 1016 if (rx_bufs_reaped[mac_id]) { 1017 dp_pdev = soc->pdev_list[mac_id]; 1018 dp_rxdma_srng = &dp_pdev->rx_refill_buf_ring; 1019 rx_desc_pool = &soc->rx_desc_buf[mac_id]; 1020 1021 dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng, 1022 rx_desc_pool, rx_bufs_reaped[mac_id], 1023 &head[mac_id], &tail[mac_id], 1024 HAL_RX_BUF_RBM_SW3_BM); 1025 rx_bufs_used += rx_bufs_reaped[mac_id]; 1026 } 1027 } 1028 1029 nbuf = nbuf_head; 1030 while (nbuf) { 1031 rx_tlv_hdr = qdf_nbuf_data(nbuf); 1032 /* 1033 * retrieve the wbm desc info from nbuf TLV, so we can 1034 * handle error cases appropriately 1035 */ 1036 hal_rx_wbm_err_info_get_from_tlv(rx_tlv_hdr, &wbm_err_info); 1037 1038 next = nbuf->next; 1039 if (wbm_err_info.wbm_err_src == HAL_RX_WBM_ERR_SRC_REO) { 1040 if (wbm_err_info.reo_psh_rsn 1041 == HAL_RX_WBM_REO_PSH_RSN_ERROR) { 1042 1043 DP_STATS_INC(soc, 1044 rx.err.reo_error 1045 [wbm_err_info.reo_err_code], 1); 1046 1047 switch (wbm_err_info.reo_err_code) { 1048 /* 1049 * Handling for packets which have NULL REO 1050 * queue descriptor 1051 */ 1052 case HAL_REO_ERR_QUEUE_DESC_ADDR_0: 1053 pool_id = wbm_err_info.pool_id; 1054 QDF_TRACE(QDF_MODULE_ID_DP, 1055 QDF_TRACE_LEVEL_WARN, 1056 "Got pkt with REO ERROR: %d", 1057 wbm_err_info.reo_err_code); 1058 dp_rx_null_q_desc_handle(soc, 1059 nbuf, 1060 rx_tlv_hdr, 1061 pool_id); 1062 nbuf = next; 1063 continue; 1064 /* TODO */ 1065 /* Add per error code accounting */ 1066 1067 default: 1068 QDF_TRACE(QDF_MODULE_ID_DP, 1069 QDF_TRACE_LEVEL_DEBUG, 1070 "REO error %d detected", 1071 wbm_err_info.reo_err_code); 1072 } 1073 } 1074 } else if (wbm_err_info.wbm_err_src == 1075 HAL_RX_WBM_ERR_SRC_RXDMA) { 1076 if (wbm_err_info.rxdma_psh_rsn 1077 == HAL_RX_WBM_RXDMA_PSH_RSN_ERROR) { 1078 struct dp_peer *peer = NULL; 1079 uint16_t peer_id = 0xFFFF; 1080 1081 DP_STATS_INC(soc, 1082 rx.err.rxdma_error 1083 [wbm_err_info.rxdma_err_code], 1); 1084 peer_id = hal_rx_mpdu_start_sw_peer_id_get(rx_tlv_hdr); 1085 peer = dp_peer_find_by_id(soc, peer_id); 1086 1087 switch (wbm_err_info.rxdma_err_code) { 1088 case HAL_RXDMA_ERR_UNENCRYPTED: 1089 dp_rx_err_deliver(soc, 1090 nbuf, 1091 rx_tlv_hdr); 1092 nbuf = next; 1093 continue; 1094 1095 case HAL_RXDMA_ERR_TKIP_MIC: 1096 dp_rx_process_mic_error(soc, 1097 nbuf, 1098 rx_tlv_hdr); 1099 nbuf = next; 1100 if (peer) 1101 DP_STATS_INC(peer, rx.err.mic_err, 1); 1102 continue; 1103 1104 case HAL_RXDMA_ERR_DECRYPT: 1105 if (peer) 1106 DP_STATS_INC(peer, rx.err.decrypt_err, 1); 1107 QDF_TRACE(QDF_MODULE_ID_DP, 1108 QDF_TRACE_LEVEL_DEBUG, 1109 "Packet received with Decrypt error"); 1110 break; 1111 1112 default: 1113 QDF_TRACE(QDF_MODULE_ID_DP, 1114 QDF_TRACE_LEVEL_DEBUG, 1115 "RXDMA error %d", 1116 wbm_err_info. 1117 rxdma_err_code); 1118 } 1119 } 1120 } else { 1121 /* Should not come here */ 1122 qdf_assert(0); 1123 } 1124 1125 nbuf = next; 1126 hal_rx_dump_pkt_tlvs(rx_tlv_hdr, QDF_TRACE_LEVEL_DEBUG); 1127 } 1128 return rx_bufs_used; /* Assume no scale factor for now */ 1129 } 1130 1131 /** 1132 * dp_rx_err_mpdu_pop() - extract the MSDU's from link descs 1133 * 1134 * @soc: core DP main context 1135 * @mac_id: mac id which is one of 3 mac_ids 1136 * @rxdma_dst_ring_desc: void pointer to monitor link descriptor buf addr info 1137 * @head: head of descs list to be freed 1138 * @tail: tail of decs list to be freed 1139 1140 * Return: number of msdu in MPDU to be popped 1141 */ 1142 static inline uint32_t 1143 dp_rx_err_mpdu_pop(struct dp_soc *soc, uint32_t mac_id, 1144 void *rxdma_dst_ring_desc, 1145 union dp_rx_desc_list_elem_t **head, 1146 union dp_rx_desc_list_elem_t **tail) 1147 { 1148 void *rx_msdu_link_desc; 1149 qdf_nbuf_t msdu; 1150 qdf_nbuf_t last; 1151 struct hal_rx_msdu_list msdu_list; 1152 uint16_t num_msdus; 1153 struct hal_buf_info buf_info; 1154 void *p_buf_addr_info; 1155 void *p_last_buf_addr_info; 1156 uint32_t rx_bufs_used = 0; 1157 uint32_t msdu_cnt; 1158 uint32_t i; 1159 uint8_t push_reason; 1160 uint8_t rxdma_error_code = 0; 1161 uint8_t bm_action = HAL_BM_ACTION_PUT_IN_IDLE_LIST; 1162 struct dp_pdev *pdev = soc->pdev_list[mac_id]; 1163 1164 msdu = 0; 1165 1166 last = NULL; 1167 1168 hal_rx_reo_ent_buf_paddr_get(rxdma_dst_ring_desc, &buf_info, 1169 &p_last_buf_addr_info, &msdu_cnt); 1170 1171 push_reason = 1172 hal_rx_reo_ent_rxdma_push_reason_get(rxdma_dst_ring_desc); 1173 if (push_reason == HAL_RX_WBM_RXDMA_PSH_RSN_ERROR) { 1174 rxdma_error_code = 1175 hal_rx_reo_ent_rxdma_error_code_get(rxdma_dst_ring_desc); 1176 } 1177 1178 do { 1179 rx_msdu_link_desc = 1180 dp_rx_cookie_2_link_desc_va(soc, &buf_info); 1181 1182 qdf_assert(rx_msdu_link_desc); 1183 1184 hal_rx_msdu_list_get(rx_msdu_link_desc, &msdu_list, &num_msdus); 1185 1186 if (msdu_list.sw_cookie[0] != HAL_RX_COOKIE_SPECIAL) { 1187 /* if the msdus belongs to NSS offloaded radio && 1188 * the rbm is not SW3_BM then return the msdu_link 1189 * descriptor without freeing the msdus (nbufs). let 1190 * these buffers be given to NSS completion ring for 1191 * NSS to free them. 1192 * else iterate through the msdu link desc list and 1193 * free each msdu in the list. 1194 */ 1195 if (msdu_list.rbm[0] != HAL_RX_BUF_RBM_SW3_BM && 1196 wlan_cfg_get_dp_pdev_nss_enabled( 1197 pdev->wlan_cfg_ctx)) 1198 bm_action = HAL_BM_ACTION_RELEASE_MSDU_LIST; 1199 else { 1200 for (i = 0; i < num_msdus; i++) { 1201 struct dp_rx_desc *rx_desc = 1202 dp_rx_cookie_2_va_rxdma_buf(soc, 1203 msdu_list.sw_cookie[i]); 1204 qdf_assert(rx_desc); 1205 msdu = rx_desc->nbuf; 1206 1207 qdf_nbuf_unmap_single(soc->osdev, msdu, 1208 QDF_DMA_FROM_DEVICE); 1209 1210 QDF_TRACE(QDF_MODULE_ID_DP, 1211 QDF_TRACE_LEVEL_DEBUG, 1212 "[%s][%d] msdu_nbuf=%pK \n", 1213 __func__, __LINE__, msdu); 1214 1215 qdf_nbuf_free(msdu); 1216 rx_bufs_used++; 1217 dp_rx_add_to_free_desc_list(head, 1218 tail, rx_desc); 1219 } 1220 } 1221 } else { 1222 rxdma_error_code = HAL_RXDMA_ERR_WAR; 1223 } 1224 1225 hal_rx_mon_next_link_desc_get(rx_msdu_link_desc, &buf_info, 1226 &p_buf_addr_info); 1227 1228 dp_rx_link_desc_return(soc, p_last_buf_addr_info, bm_action); 1229 p_last_buf_addr_info = p_buf_addr_info; 1230 1231 } while (buf_info.paddr); 1232 1233 DP_STATS_INC(soc, rx.err.rxdma_error[rxdma_error_code], 1); 1234 1235 if (rxdma_error_code == HAL_RXDMA_ERR_DECRYPT) { 1236 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1237 "Packet received with Decrypt error"); 1238 } 1239 1240 return rx_bufs_used; 1241 } 1242 1243 /** 1244 * dp_rxdma_err_process() - RxDMA error processing functionality 1245 * 1246 * @soc: core txrx main contex 1247 * @mac_id: mac id which is one of 3 mac_ids 1248 * @hal_ring: opaque pointer to the HAL Rx Ring, which will be serviced 1249 * @quota: No. of units (packets) that can be serviced in one shot. 1250 1251 * Return: num of buffers processed 1252 */ 1253 uint32_t 1254 dp_rxdma_err_process(struct dp_soc *soc, uint32_t mac_id, uint32_t quota) 1255 { 1256 struct dp_pdev *pdev = dp_get_pdev_for_mac_id(soc, mac_id); 1257 int ring_idx = dp_get_ring_id_for_mac_id(soc, mac_id); 1258 uint8_t pdev_id; 1259 void *hal_soc; 1260 void *rxdma_dst_ring_desc; 1261 void *err_dst_srng; 1262 union dp_rx_desc_list_elem_t *head = NULL; 1263 union dp_rx_desc_list_elem_t *tail = NULL; 1264 struct dp_srng *dp_rxdma_srng; 1265 struct rx_desc_pool *rx_desc_pool; 1266 uint32_t work_done = 0; 1267 uint32_t rx_bufs_used = 0; 1268 1269 #ifdef DP_INTR_POLL_BASED 1270 if (!pdev) 1271 return 0; 1272 #endif 1273 pdev_id = pdev->pdev_id; 1274 err_dst_srng = pdev->rxdma_err_dst_ring[ring_idx].hal_srng; 1275 1276 if (!err_dst_srng) { 1277 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1278 "%s %d : HAL Monitor Destination Ring Init \ 1279 Failed -- %pK\n", 1280 __func__, __LINE__, err_dst_srng); 1281 return 0; 1282 } 1283 1284 hal_soc = soc->hal_soc; 1285 1286 qdf_assert(hal_soc); 1287 1288 if (qdf_unlikely(hal_srng_access_start(hal_soc, err_dst_srng))) { 1289 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1290 "%s %d : HAL Monitor Destination Ring Init \ 1291 Failed -- %pK\n", 1292 __func__, __LINE__, err_dst_srng); 1293 return 0; 1294 } 1295 1296 while (qdf_likely(quota-- && (rxdma_dst_ring_desc = 1297 hal_srng_dst_get_next(hal_soc, err_dst_srng)))) { 1298 1299 rx_bufs_used += dp_rx_err_mpdu_pop(soc, mac_id, 1300 rxdma_dst_ring_desc, 1301 &head, &tail); 1302 } 1303 1304 hal_srng_access_end(hal_soc, err_dst_srng); 1305 1306 if (rx_bufs_used) { 1307 dp_rxdma_srng = &pdev->rx_refill_buf_ring; 1308 rx_desc_pool = &soc->rx_desc_buf[mac_id]; 1309 1310 dp_rx_buffers_replenish(soc, pdev_id, dp_rxdma_srng, 1311 rx_desc_pool, rx_bufs_used, &head, &tail, 1312 HAL_RX_BUF_RBM_SW3_BM); 1313 work_done += rx_bufs_used; 1314 } 1315 1316 return work_done; 1317 } 1318