1 /* 2 * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for 5 * any purpose with or without fee is hereby granted, provided that the 6 * above copyright notice and this permission notice appear in all 7 * copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 16 * PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 #include "dp_types.h" 20 #include "dp_rx.h" 21 #include "dp_peer.h" 22 #include "dp_internal.h" 23 #include "hal_api.h" 24 #include "qdf_trace.h" 25 #include "qdf_nbuf.h" 26 #ifdef CONFIG_MCL 27 #include <cds_ieee80211_common.h> 28 #else 29 #include <linux/ieee80211.h> 30 #endif 31 #include "dp_rx_defrag.h" 32 #include <enet.h> /* LLC_SNAP_HDR_LEN */ 33 34 #ifdef RX_DESC_DEBUG_CHECK 35 static inline bool dp_rx_desc_check_magic(struct dp_rx_desc *rx_desc) 36 { 37 if (qdf_unlikely(rx_desc->magic != DP_RX_DESC_MAGIC)) { 38 return false; 39 } 40 rx_desc->magic = 0; 41 return true; 42 } 43 #else 44 static inline bool dp_rx_desc_check_magic(struct dp_rx_desc *rx_desc) 45 { 46 return true; 47 } 48 #endif 49 50 /** 51 * dp_rx_mcast_echo_check() - check if the mcast pkt is a loop 52 * back on same vap or a different vap. 53 * 54 * @soc: core DP main context 55 * @peer: dp peer handler 56 * @rx_tlv_hdr: start of the rx TLV header 57 * @nbuf: pkt buffer 58 * 59 * Return: bool (true if it is a looped back pkt else false) 60 * 61 */ 62 static inline bool dp_rx_mcast_echo_check(struct dp_soc *soc, 63 struct dp_peer *peer, 64 uint8_t *rx_tlv_hdr, 65 qdf_nbuf_t nbuf) 66 { 67 struct dp_vdev *vdev = peer->vdev; 68 struct dp_ast_entry *ase; 69 uint16_t sa_idx = 0; 70 uint8_t *data; 71 72 /* 73 * Multicast Echo Check is required only if vdev is STA and 74 * received pkt is a multicast/broadcast pkt. otherwise 75 * skip the MEC check. 76 */ 77 if (vdev->opmode != wlan_op_mode_sta) 78 return false; 79 80 if (!hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr)) 81 return false; 82 83 data = qdf_nbuf_data(nbuf); 84 /* 85 * if the received pkts src mac addr matches with vdev 86 * mac address then drop the pkt as it is looped back 87 */ 88 if (!(qdf_mem_cmp(&data[DP_MAC_ADDR_LEN], 89 vdev->mac_addr.raw, 90 DP_MAC_ADDR_LEN))) 91 return true; 92 93 /* 94 * In case of qwrap isolation mode, donot drop loopback packets. 95 * In isolation mode, all packets from the wired stations need to go 96 * to rootap and loop back to reach the wireless stations and 97 * vice-versa. 98 */ 99 if (qdf_unlikely(vdev->isolation_vdev)) 100 return false; 101 102 /* if the received pkts src mac addr matches with the 103 * wired PCs MAC addr which is behind the STA or with 104 * wireless STAs MAC addr which are behind the Repeater, 105 * then drop the pkt as it is looped back 106 */ 107 qdf_spin_lock_bh(&soc->ast_lock); 108 if (hal_rx_msdu_end_sa_is_valid_get(rx_tlv_hdr)) { 109 sa_idx = hal_rx_msdu_end_sa_idx_get(rx_tlv_hdr); 110 111 if ((sa_idx < 0) || 112 (sa_idx >= (WLAN_UMAC_PSOC_MAX_PEERS * 2))) { 113 qdf_spin_unlock_bh(&soc->ast_lock); 114 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 115 "invalid sa_idx: %d", sa_idx); 116 qdf_assert_always(0); 117 } 118 119 ase = soc->ast_table[sa_idx]; 120 if (!ase) { 121 /* We do not get a peer map event for STA and without 122 * this event we don't know what is STA's sa_idx. 123 * For this reason the AST is still not associated to 124 * any index postion in ast_table. 125 * In these kind of scenarios where sa is valid but 126 * ast is not in ast_table, we use the below API to get 127 * AST entry for STA's own mac_address. 128 */ 129 ase = dp_peer_ast_hash_find(soc, 130 &data[DP_MAC_ADDR_LEN]); 131 132 } 133 } else 134 ase = dp_peer_ast_hash_find(soc, &data[DP_MAC_ADDR_LEN]); 135 136 if (ase) { 137 ase->ast_idx = sa_idx; 138 soc->ast_table[sa_idx] = ase; 139 140 if (ase->pdev_id != vdev->pdev->pdev_id) { 141 qdf_spin_unlock_bh(&soc->ast_lock); 142 QDF_TRACE(QDF_MODULE_ID_DP, 143 QDF_TRACE_LEVEL_INFO, 144 "Detected DBDC Root AP %pM, %d %d", 145 &data[DP_MAC_ADDR_LEN], vdev->pdev->pdev_id, 146 ase->pdev_id); 147 return false; 148 } 149 150 if ((ase->type == CDP_TXRX_AST_TYPE_MEC) || 151 (ase->peer != peer)) { 152 qdf_spin_unlock_bh(&soc->ast_lock); 153 QDF_TRACE(QDF_MODULE_ID_DP, 154 QDF_TRACE_LEVEL_INFO, 155 "received pkt with same src mac %pM", 156 &data[DP_MAC_ADDR_LEN]); 157 158 return true; 159 } 160 } 161 qdf_spin_unlock_bh(&soc->ast_lock); 162 return false; 163 } 164 165 /** 166 * dp_rx_link_desc_return_by_addr - Return a MPDU link descriptor to 167 * (WBM) by address 168 * 169 * @soc: core DP main context 170 * @link_desc_addr: link descriptor addr 171 * 172 * Return: QDF_STATUS 173 */ 174 QDF_STATUS 175 dp_rx_link_desc_return_by_addr(struct dp_soc *soc, void *link_desc_addr, 176 uint8_t bm_action) 177 { 178 struct dp_srng *wbm_desc_rel_ring = &soc->wbm_desc_rel_ring; 179 void *wbm_rel_srng = wbm_desc_rel_ring->hal_srng; 180 void *hal_soc = soc->hal_soc; 181 QDF_STATUS status = QDF_STATUS_E_FAILURE; 182 void *src_srng_desc; 183 184 if (!wbm_rel_srng) { 185 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 186 "WBM RELEASE RING not initialized"); 187 return status; 188 } 189 190 if (qdf_unlikely(hal_srng_access_start(hal_soc, wbm_rel_srng))) { 191 192 /* TODO */ 193 /* 194 * Need API to convert from hal_ring pointer to 195 * Ring Type / Ring Id combo 196 */ 197 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 198 FL("HAL RING Access For WBM Release SRNG Failed - %pK"), 199 wbm_rel_srng); 200 DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1); 201 goto done; 202 } 203 src_srng_desc = hal_srng_src_get_next(hal_soc, wbm_rel_srng); 204 if (qdf_likely(src_srng_desc)) { 205 /* Return link descriptor through WBM ring (SW2WBM)*/ 206 hal_rx_msdu_link_desc_set(hal_soc, 207 src_srng_desc, link_desc_addr, bm_action); 208 status = QDF_STATUS_SUCCESS; 209 } else { 210 struct hal_srng *srng = (struct hal_srng *)wbm_rel_srng; 211 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 212 FL("WBM Release Ring (Id %d) Full"), srng->ring_id); 213 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 214 "HP 0x%x Reap HP 0x%x TP 0x%x Cached TP 0x%x", 215 *srng->u.src_ring.hp_addr, srng->u.src_ring.reap_hp, 216 *srng->u.src_ring.tp_addr, srng->u.src_ring.cached_tp); 217 } 218 done: 219 hal_srng_access_end(hal_soc, wbm_rel_srng); 220 return status; 221 222 } 223 224 /** 225 * dp_rx_link_desc_return() - Return a MPDU link descriptor to HW 226 * (WBM), following error handling 227 * 228 * @soc: core DP main context 229 * @ring_desc: opaque pointer to the REO error ring descriptor 230 * 231 * Return: QDF_STATUS 232 */ 233 QDF_STATUS 234 dp_rx_link_desc_return(struct dp_soc *soc, void *ring_desc, uint8_t bm_action) 235 { 236 void *buf_addr_info = HAL_RX_REO_BUF_ADDR_INFO_GET(ring_desc); 237 return dp_rx_link_desc_return_by_addr(soc, buf_addr_info, bm_action); 238 } 239 240 /** 241 * dp_rx_msdus_drop() - Drops all MSDU's per MPDU 242 * 243 * @soc: core txrx main context 244 * @ring_desc: opaque pointer to the REO error ring descriptor 245 * @mpdu_desc_info: MPDU descriptor information from ring descriptor 246 * @head: head of the local descriptor free-list 247 * @tail: tail of the local descriptor free-list 248 * @quota: No. of units (packets) that can be serviced in one shot. 249 * 250 * This function is used to drop all MSDU in an MPDU 251 * 252 * Return: uint32_t: No. of elements processed 253 */ 254 static uint32_t dp_rx_msdus_drop(struct dp_soc *soc, void *ring_desc, 255 struct hal_rx_mpdu_desc_info *mpdu_desc_info, 256 union dp_rx_desc_list_elem_t **head, 257 union dp_rx_desc_list_elem_t **tail, 258 uint32_t quota) 259 { 260 uint32_t rx_bufs_used = 0; 261 void *link_desc_va; 262 struct hal_buf_info buf_info; 263 struct hal_rx_msdu_list msdu_list; /* MSDU's per MPDU */ 264 int i; 265 uint8_t *rx_tlv_hdr; 266 uint32_t tid; 267 268 hal_rx_reo_buf_paddr_get(ring_desc, &buf_info); 269 270 link_desc_va = dp_rx_cookie_2_link_desc_va(soc, &buf_info); 271 272 /* No UNMAP required -- this is "malloc_consistent" memory */ 273 hal_rx_msdu_list_get(link_desc_va, &msdu_list, 274 &mpdu_desc_info->msdu_count); 275 276 for (i = 0; (i < mpdu_desc_info->msdu_count) && quota--; i++) { 277 struct dp_rx_desc *rx_desc = 278 dp_rx_cookie_2_va_rxdma_buf(soc, 279 msdu_list.sw_cookie[i]); 280 281 qdf_assert(rx_desc); 282 283 if (!dp_rx_desc_check_magic(rx_desc)) { 284 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 285 FL("Invalid rx_desc cookie=%d"), 286 msdu_list.sw_cookie[i]); 287 return rx_bufs_used; 288 } 289 290 rx_bufs_used++; 291 tid = hal_rx_mpdu_start_tid_get(rx_desc->rx_buf_start); 292 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 293 "Packet received with PN error for tid :%d", tid); 294 295 rx_tlv_hdr = qdf_nbuf_data(rx_desc->nbuf); 296 if (hal_rx_encryption_info_valid(rx_tlv_hdr)) 297 hal_rx_print_pn(rx_tlv_hdr); 298 299 /* Just free the buffers */ 300 qdf_nbuf_free(rx_desc->nbuf); 301 302 dp_rx_add_to_free_desc_list(head, tail, rx_desc); 303 } 304 305 /* Return link descriptor through WBM ring (SW2WBM)*/ 306 dp_rx_link_desc_return(soc, ring_desc, HAL_BM_ACTION_PUT_IN_IDLE_LIST); 307 308 return rx_bufs_used; 309 } 310 311 /** 312 * dp_rx_pn_error_handle() - Handles PN check errors 313 * 314 * @soc: core txrx main context 315 * @ring_desc: opaque pointer to the REO error ring descriptor 316 * @mpdu_desc_info: MPDU descriptor information from ring descriptor 317 * @head: head of the local descriptor free-list 318 * @tail: tail of the local descriptor free-list 319 * @quota: No. of units (packets) that can be serviced in one shot. 320 * 321 * This function implements PN error handling 322 * If the peer is configured to ignore the PN check errors 323 * or if DP feels, that this frame is still OK, the frame can be 324 * re-injected back to REO to use some of the other features 325 * of REO e.g. duplicate detection/routing to other cores 326 * 327 * Return: uint32_t: No. of elements processed 328 */ 329 static uint32_t 330 dp_rx_pn_error_handle(struct dp_soc *soc, void *ring_desc, 331 struct hal_rx_mpdu_desc_info *mpdu_desc_info, 332 union dp_rx_desc_list_elem_t **head, 333 union dp_rx_desc_list_elem_t **tail, 334 uint32_t quota) 335 { 336 uint16_t peer_id; 337 uint32_t rx_bufs_used = 0; 338 struct dp_peer *peer; 339 bool peer_pn_policy = false; 340 341 peer_id = DP_PEER_METADATA_PEER_ID_GET( 342 mpdu_desc_info->peer_meta_data); 343 344 345 peer = dp_peer_find_by_id(soc, peer_id); 346 347 if (qdf_likely(peer)) { 348 /* 349 * TODO: Check for peer specific policies & set peer_pn_policy 350 */ 351 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 352 "discard rx due to PN error for peer %pK " 353 "(%02x:%02x:%02x:%02x:%02x:%02x)\n", 354 peer, 355 peer->mac_addr.raw[0], peer->mac_addr.raw[1], 356 peer->mac_addr.raw[2], peer->mac_addr.raw[3], 357 peer->mac_addr.raw[4], peer->mac_addr.raw[5]); 358 359 } 360 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 361 "Packet received with PN error"); 362 363 /* No peer PN policy -- definitely drop */ 364 if (!peer_pn_policy) 365 rx_bufs_used = dp_rx_msdus_drop(soc, ring_desc, 366 mpdu_desc_info, 367 head, tail, quota); 368 369 return rx_bufs_used; 370 } 371 372 /** 373 * dp_rx_2k_jump_handle() - Handles Sequence Number Jump by 2K 374 * 375 * @soc: core txrx main context 376 * @ring_desc: opaque pointer to the REO error ring descriptor 377 * @mpdu_desc_info: MPDU descriptor information from ring descriptor 378 * @head: head of the local descriptor free-list 379 * @tail: tail of the local descriptor free-list 380 * @quota: No. of units (packets) that can be serviced in one shot. 381 * 382 * This function implements the error handling when sequence number 383 * of the MPDU jumps suddenly by 2K.Today there are 2 cases that 384 * need to be handled: 385 * A) CSN (Current Sequence Number) = Last Valid SN (LSN) + 2K 386 * B) CSN = LSN + 2K, but falls within a "BA sized window" of the SSN 387 * For case A) the protocol stack is invoked to generate DELBA/DEAUTH frame 388 * For case B), the frame is normally dropped, no more action is taken 389 * 390 * Return: uint32_t: No. of elements processed 391 */ 392 static uint32_t 393 dp_rx_2k_jump_handle(struct dp_soc *soc, void *ring_desc, 394 struct hal_rx_mpdu_desc_info *mpdu_desc_info, 395 union dp_rx_desc_list_elem_t **head, 396 union dp_rx_desc_list_elem_t **tail, 397 uint32_t quota) 398 { 399 return dp_rx_msdus_drop(soc, ring_desc, mpdu_desc_info, 400 head, tail, quota); 401 } 402 403 /** 404 * dp_rx_chain_msdus() - Function to chain all msdus of a mpdu 405 * to pdev invalid peer list 406 * 407 * @soc: core DP main context 408 * @nbuf: Buffer pointer 409 * @rx_tlv_hdr: start of rx tlv header 410 * @mac_id: mac id 411 * 412 * Return: bool: true for last msdu of mpdu 413 */ 414 static bool 415 dp_rx_chain_msdus(struct dp_soc *soc, qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr, 416 uint8_t mac_id) 417 { 418 bool mpdu_done = false; 419 qdf_nbuf_t curr_nbuf = NULL; 420 qdf_nbuf_t tmp_nbuf = NULL; 421 422 /* TODO: Currently only single radio is supported, hence 423 * pdev hard coded to '0' index 424 */ 425 struct dp_pdev *dp_pdev = soc->pdev_list[mac_id]; 426 427 if (!dp_pdev->first_nbuf) { 428 qdf_nbuf_set_rx_chfrag_start(nbuf, 1); 429 dp_pdev->ppdu_id = HAL_RX_HW_DESC_GET_PPDUID_GET(rx_tlv_hdr); 430 dp_pdev->first_nbuf = true; 431 432 /* If the new nbuf received is the first msdu of the 433 * amsdu and there are msdus in the invalid peer msdu 434 * list, then let us free all the msdus of the invalid 435 * peer msdu list. 436 * This scenario can happen when we start receiving 437 * new a-msdu even before the previous a-msdu is completely 438 * received. 439 */ 440 curr_nbuf = dp_pdev->invalid_peer_head_msdu; 441 while (curr_nbuf) { 442 tmp_nbuf = curr_nbuf->next; 443 qdf_nbuf_free(curr_nbuf); 444 curr_nbuf = tmp_nbuf; 445 } 446 447 dp_pdev->invalid_peer_head_msdu = NULL; 448 dp_pdev->invalid_peer_tail_msdu = NULL; 449 450 hal_rx_mon_hw_desc_get_mpdu_status(rx_tlv_hdr, 451 &(dp_pdev->ppdu_info.rx_status)); 452 453 } 454 455 if (dp_pdev->ppdu_id == hal_rx_attn_phy_ppdu_id_get(rx_tlv_hdr) && 456 hal_rx_attn_msdu_done_get(rx_tlv_hdr)) { 457 qdf_nbuf_set_rx_chfrag_end(nbuf, 1); 458 qdf_assert_always(dp_pdev->first_nbuf == true); 459 dp_pdev->first_nbuf = false; 460 mpdu_done = true; 461 } 462 463 DP_RX_LIST_APPEND(dp_pdev->invalid_peer_head_msdu, 464 dp_pdev->invalid_peer_tail_msdu, 465 nbuf); 466 467 return mpdu_done; 468 } 469 470 /** 471 * dp_rx_null_q_desc_handle() - Function to handle NULL Queue 472 * descriptor violation on either a 473 * REO or WBM ring 474 * 475 * @soc: core DP main context 476 * @nbuf: buffer pointer 477 * @rx_tlv_hdr: start of rx tlv header 478 * @pool_id: mac id 479 * 480 * This function handles NULL queue descriptor violations arising out 481 * a missing REO queue for a given peer or a given TID. This typically 482 * may happen if a packet is received on a QOS enabled TID before the 483 * ADDBA negotiation for that TID, when the TID queue is setup. Or 484 * it may also happen for MC/BC frames if they are not routed to the 485 * non-QOS TID queue, in the absence of any other default TID queue. 486 * This error can show up both in a REO destination or WBM release ring. 487 * 488 */ 489 static void 490 dp_rx_null_q_desc_handle(struct dp_soc *soc, 491 qdf_nbuf_t nbuf, 492 uint8_t *rx_tlv_hdr, 493 uint8_t pool_id) 494 { 495 uint32_t pkt_len, l2_hdr_offset; 496 uint16_t msdu_len; 497 struct dp_vdev *vdev; 498 uint16_t peer_id = 0xFFFF; 499 struct dp_peer *peer = NULL; 500 uint8_t tid; 501 502 qdf_nbuf_set_rx_chfrag_start(nbuf, 503 hal_rx_msdu_end_first_msdu_get(rx_tlv_hdr)); 504 qdf_nbuf_set_rx_chfrag_end(nbuf, 505 hal_rx_msdu_end_last_msdu_get(rx_tlv_hdr)); 506 507 l2_hdr_offset = hal_rx_msdu_end_l3_hdr_padding_get(rx_tlv_hdr); 508 msdu_len = hal_rx_msdu_start_msdu_len_get(rx_tlv_hdr); 509 pkt_len = msdu_len + l2_hdr_offset + RX_PKT_TLVS_LEN; 510 511 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 512 FL("Len %d Extn list %pK "), 513 (uint32_t)qdf_nbuf_len(nbuf), 514 qdf_nbuf_get_ext_list(nbuf)); 515 /* Set length in nbuf */ 516 if (!qdf_nbuf_get_ext_list(nbuf)) 517 qdf_nbuf_set_pktlen(nbuf, pkt_len); 518 519 /* 520 * Check if DMA completed -- msdu_done is the last bit 521 * to be written 522 */ 523 if (!hal_rx_attn_msdu_done_get(rx_tlv_hdr)) { 524 525 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 526 FL("MSDU DONE failure")); 527 528 hal_rx_dump_pkt_tlvs(rx_tlv_hdr, QDF_TRACE_LEVEL_INFO); 529 qdf_assert(0); 530 } 531 532 peer_id = hal_rx_mpdu_start_sw_peer_id_get(rx_tlv_hdr); 533 peer = dp_peer_find_by_id(soc, peer_id); 534 535 if (!peer) { 536 bool mpdu_done = false; 537 struct dp_pdev *pdev = soc->pdev_list[pool_id]; 538 539 QDF_TRACE_DEBUG_RL(QDF_MODULE_ID_DP, "peer is NULL"); 540 541 mpdu_done = dp_rx_chain_msdus(soc, nbuf, rx_tlv_hdr, pool_id); 542 /* Trigger invalid peer handler wrapper */ 543 dp_rx_process_invalid_peer_wrapper(soc, nbuf, mpdu_done); 544 545 if (mpdu_done) { 546 pdev->invalid_peer_head_msdu = NULL; 547 pdev->invalid_peer_tail_msdu = NULL; 548 } 549 return; 550 } 551 552 vdev = peer->vdev; 553 if (!vdev) { 554 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 555 FL("INVALID vdev %pK OR osif_rx"), vdev); 556 /* Drop & free packet */ 557 qdf_nbuf_free(nbuf); 558 DP_STATS_INC(soc, rx.err.invalid_vdev, 1); 559 return; 560 } 561 562 /* 563 * Advance the packet start pointer by total size of 564 * pre-header TLV's 565 */ 566 if (qdf_nbuf_get_ext_list(nbuf)) 567 qdf_nbuf_pull_head(nbuf, RX_PKT_TLVS_LEN); 568 else 569 qdf_nbuf_pull_head(nbuf, (l2_hdr_offset + RX_PKT_TLVS_LEN)); 570 571 if (dp_rx_mcast_echo_check(soc, peer, rx_tlv_hdr, nbuf)) { 572 /* this is a looped back MCBC pkt, drop it */ 573 qdf_nbuf_free(nbuf); 574 return; 575 } 576 /* 577 * In qwrap mode if the received packet matches with any of the vdev 578 * mac addresses, drop it. Donot receive multicast packets originated 579 * from any proxysta. 580 */ 581 if (check_qwrap_multicast_loopback(vdev, nbuf)) { 582 qdf_nbuf_free(nbuf); 583 return; 584 } 585 586 587 if (qdf_unlikely((peer->nawds_enabled == true) && 588 hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr))) { 589 QDF_TRACE(QDF_MODULE_ID_DP, 590 QDF_TRACE_LEVEL_DEBUG, 591 "%s free buffer for multicast packet", 592 __func__); 593 DP_STATS_INC(peer, rx.nawds_mcast_drop, 1); 594 qdf_nbuf_free(nbuf); 595 return; 596 } 597 598 if (!dp_wds_rx_policy_check(rx_tlv_hdr, vdev, peer, 599 hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr))) { 600 QDF_TRACE(QDF_MODULE_ID_DP, 601 QDF_TRACE_LEVEL_ERROR, 602 FL("mcast Policy Check Drop pkt")); 603 /* Drop & free packet */ 604 qdf_nbuf_free(nbuf); 605 return; 606 } 607 608 /* WDS Source Port Learning */ 609 if (qdf_likely(vdev->rx_decap_type == htt_cmn_pkt_type_ethernet && 610 vdev->wds_enabled)) 611 dp_rx_wds_srcport_learn(soc, rx_tlv_hdr, peer, nbuf); 612 613 if (hal_rx_mpdu_start_mpdu_qos_control_valid_get(rx_tlv_hdr)) { 614 /* TODO: Assuming that qos_control_valid also indicates 615 * unicast. Should we check this? 616 */ 617 tid = hal_rx_mpdu_start_tid_get(rx_tlv_hdr); 618 if (peer && 619 peer->rx_tid[tid].hw_qdesc_vaddr_unaligned == NULL) { 620 /* IEEE80211_SEQ_MAX indicates invalid start_seq */ 621 dp_rx_tid_setup_wifi3(peer, tid, 1, IEEE80211_SEQ_MAX); 622 } 623 } 624 625 #ifdef QCA_WIFI_NAPIER_EMULATION /* Debug code, remove later */ 626 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 627 "%s: p_id %d msdu_len %d hdr_off %d", 628 __func__, peer_id, msdu_len, l2_hdr_offset); 629 630 print_hex_dump(KERN_ERR, "\t Pkt Data:", DUMP_PREFIX_NONE, 32, 4, 631 qdf_nbuf_data(nbuf), 128, false); 632 #endif /* NAPIER_EMULATION */ 633 634 if (qdf_unlikely(vdev->rx_decap_type == htt_cmn_pkt_type_raw)) { 635 qdf_nbuf_set_next(nbuf, NULL); 636 dp_rx_deliver_raw(vdev, nbuf, peer); 637 } else { 638 if (qdf_unlikely(peer->bss_peer)) { 639 QDF_TRACE(QDF_MODULE_ID_DP, 640 QDF_TRACE_LEVEL_INFO, 641 FL("received pkt with same src MAC")); 642 /* Drop & free packet */ 643 qdf_nbuf_free(nbuf); 644 return; 645 } 646 647 if (vdev->osif_rx) { 648 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, 649 FL("vdev %pK osif_rx %pK"), vdev, 650 vdev->osif_rx); 651 qdf_nbuf_set_next(nbuf, NULL); 652 vdev->osif_rx(vdev->osif_vdev, nbuf); 653 DP_STATS_INCC_PKT(vdev->pdev, rx.multicast, 1, 654 qdf_nbuf_len(nbuf), 655 hal_rx_msdu_end_da_is_mcbc_get( 656 rx_tlv_hdr)); 657 DP_STATS_INC_PKT(vdev->pdev, rx.to_stack, 1, 658 qdf_nbuf_len(nbuf)); 659 } else { 660 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 661 FL("INVALID vdev %pK OR osif_rx"), vdev); 662 DP_STATS_INC(soc, rx.err.invalid_vdev, 1); 663 } 664 } 665 return; 666 } 667 668 /** 669 * dp_rx_err_deliver() - Function to deliver error frames to OS 670 * 671 * @soc: core DP main context 672 * @rx_desc : pointer to the sw rx descriptor 673 * @head: pointer to head of rx descriptors to be added to free list 674 * @tail: pointer to tail of rx descriptors to be added to free list 675 * quota: upper limit of descriptors that can be reaped 676 * 677 * Return: uint32_t: No. of Rx buffers reaped 678 */ 679 static void 680 dp_rx_err_deliver(struct dp_soc *soc, qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr) 681 { 682 uint32_t pkt_len, l2_hdr_offset; 683 uint16_t msdu_len; 684 struct dp_vdev *vdev; 685 uint16_t peer_id = 0xFFFF; 686 struct dp_peer *peer = NULL; 687 struct ether_header *eh; 688 bool isBroadcast; 689 690 /* 691 * Check if DMA completed -- msdu_done is the last bit 692 * to be written 693 */ 694 if (!hal_rx_attn_msdu_done_get(rx_tlv_hdr)) { 695 696 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 697 FL("MSDU DONE failure")); 698 699 hal_rx_dump_pkt_tlvs(rx_tlv_hdr, QDF_TRACE_LEVEL_INFO); 700 qdf_assert(0); 701 } 702 703 peer_id = hal_rx_mpdu_start_sw_peer_id_get(rx_tlv_hdr); 704 peer = dp_peer_find_by_id(soc, peer_id); 705 706 l2_hdr_offset = hal_rx_msdu_end_l3_hdr_padding_get(rx_tlv_hdr); 707 msdu_len = hal_rx_msdu_start_msdu_len_get(rx_tlv_hdr); 708 pkt_len = msdu_len + l2_hdr_offset + RX_PKT_TLVS_LEN; 709 710 /* Set length in nbuf */ 711 qdf_nbuf_set_pktlen(nbuf, pkt_len); 712 713 qdf_nbuf_set_next(nbuf, NULL); 714 715 qdf_nbuf_set_rx_chfrag_start(nbuf, 1); 716 qdf_nbuf_set_rx_chfrag_end(nbuf, 1); 717 718 if (!peer) { 719 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 720 FL("peer is NULL")); 721 DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1, 722 qdf_nbuf_len(nbuf)); 723 /* Trigger invalid peer handler wrapper */ 724 dp_rx_process_invalid_peer_wrapper(soc, nbuf, true); 725 return; 726 } 727 728 vdev = peer->vdev; 729 if (!vdev) { 730 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 731 FL("INVALID vdev %pK OR osif_rx"), vdev); 732 /* Drop & free packet */ 733 qdf_nbuf_free(nbuf); 734 DP_STATS_INC(soc, rx.err.invalid_vdev, 1); 735 return; 736 } 737 738 /* Drop & free packet if mesh mode not enabled */ 739 if (!vdev->mesh_vdev) { 740 qdf_nbuf_free(nbuf); 741 DP_STATS_INC(soc, rx.err.invalid_vdev, 1); 742 return; 743 } 744 745 /* 746 * Advance the packet start pointer by total size of 747 * pre-header TLV's 748 */ 749 qdf_nbuf_pull_head(nbuf, (l2_hdr_offset + RX_PKT_TLVS_LEN)); 750 751 if (dp_rx_filter_mesh_packets(vdev, nbuf, rx_tlv_hdr) 752 == QDF_STATUS_SUCCESS) { 753 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_MED, 754 FL("mesh pkt filtered")); 755 DP_STATS_INC(vdev->pdev, dropped.mesh_filter, 1); 756 757 qdf_nbuf_free(nbuf); 758 return; 759 760 } 761 dp_rx_fill_mesh_stats(vdev, nbuf, rx_tlv_hdr, peer); 762 763 if (qdf_unlikely(hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr) && 764 (vdev->rx_decap_type == 765 htt_cmn_pkt_type_ethernet))) { 766 eh = (struct ether_header *)qdf_nbuf_data(nbuf); 767 isBroadcast = (IEEE80211_IS_BROADCAST 768 (eh->ether_dhost)) ? 1 : 0 ; 769 if (isBroadcast) { 770 DP_STATS_INC_PKT(peer, rx.bcast, 1, 771 qdf_nbuf_len(nbuf)); 772 } 773 } 774 775 if (qdf_unlikely(vdev->rx_decap_type == htt_cmn_pkt_type_raw)) { 776 dp_rx_deliver_raw(vdev, nbuf, peer); 777 } else { 778 DP_STATS_INC(vdev->pdev, rx.to_stack.num, 1); 779 vdev->osif_rx(vdev->osif_vdev, nbuf); 780 } 781 782 return; 783 } 784 785 /** 786 * dp_rx_process_mic_error(): Function to pass mic error indication to umac 787 * @soc: DP SOC handle 788 * @rx_desc : pointer to the sw rx descriptor 789 * @head: pointer to head of rx descriptors to be added to free list 790 * @tail: pointer to tail of rx descriptors to be added to free list 791 * 792 * return: void 793 */ 794 void 795 dp_rx_process_mic_error(struct dp_soc *soc, 796 qdf_nbuf_t nbuf, 797 uint8_t *rx_tlv_hdr) 798 { 799 struct dp_vdev *vdev = NULL; 800 struct dp_pdev *pdev = NULL; 801 struct ol_if_ops *tops = NULL; 802 struct ieee80211_frame *wh; 803 uint8_t *rx_pkt_hdr; 804 struct dp_peer *peer; 805 uint16_t peer_id, rx_seq, fragno; 806 unsigned int tid; 807 QDF_STATUS status; 808 809 if (!hal_rx_msdu_end_first_msdu_get(rx_tlv_hdr)) 810 return; 811 812 rx_pkt_hdr = hal_rx_pkt_hdr_get(qdf_nbuf_data(nbuf)); 813 wh = (struct ieee80211_frame *)rx_pkt_hdr; 814 815 peer_id = hal_rx_mpdu_start_sw_peer_id_get(rx_tlv_hdr); 816 peer = dp_peer_find_by_id(soc, peer_id); 817 if (!peer) { 818 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 819 "peer not found"); 820 goto fail; 821 } 822 823 vdev = peer->vdev; 824 if (!vdev) { 825 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 826 "VDEV not found"); 827 goto fail; 828 } 829 830 pdev = vdev->pdev; 831 if (!pdev) { 832 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 833 "PDEV not found"); 834 goto fail; 835 } 836 837 tid = hal_rx_mpdu_start_tid_get(qdf_nbuf_data(nbuf)); 838 rx_seq = (((*(uint16_t *)wh->i_seq) & 839 IEEE80211_SEQ_SEQ_MASK) >> 840 IEEE80211_SEQ_SEQ_SHIFT); 841 842 fragno = dp_rx_frag_get_mpdu_frag_number(qdf_nbuf_data(nbuf)); 843 844 /* Can get only last fragment */ 845 if (fragno) { 846 status = dp_rx_defrag_add_last_frag(soc, peer, 847 tid, rx_seq, nbuf); 848 849 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 850 "%s: Frag pkt seq# %d frag# %d consumed status %d !\n", 851 __func__, rx_seq, fragno, status); 852 return; 853 } 854 855 tops = pdev->soc->cdp_soc.ol_ops; 856 if (tops->rx_mic_error) 857 tops->rx_mic_error(pdev->ctrl_pdev, vdev->vdev_id, wh); 858 859 fail: 860 qdf_nbuf_free(nbuf); 861 return; 862 } 863 864 /** 865 * dp_rx_err_process() - Processes error frames routed to REO error ring 866 * 867 * @soc: core txrx main context 868 * @hal_ring: opaque pointer to the HAL Rx Error Ring, which will be serviced 869 * @quota: No. of units (packets) that can be serviced in one shot. 870 * 871 * This function implements error processing and top level demultiplexer 872 * for all the frames routed to REO error ring. 873 * 874 * Return: uint32_t: No. of elements processed 875 */ 876 uint32_t 877 dp_rx_err_process(struct dp_soc *soc, void *hal_ring, uint32_t quota) 878 { 879 void *hal_soc; 880 void *ring_desc; 881 union dp_rx_desc_list_elem_t *head = NULL; 882 union dp_rx_desc_list_elem_t *tail = NULL; 883 uint32_t rx_bufs_used = 0; 884 uint8_t buf_type; 885 uint8_t error, rbm; 886 struct hal_rx_mpdu_desc_info mpdu_desc_info; 887 struct hal_buf_info hbi; 888 struct dp_pdev *dp_pdev; 889 struct dp_srng *dp_rxdma_srng; 890 struct rx_desc_pool *rx_desc_pool; 891 uint32_t cookie = 0; 892 void *link_desc_va; 893 struct hal_rx_msdu_list msdu_list; /* MSDU's per MPDU */ 894 uint16_t num_msdus; 895 896 /* Debug -- Remove later */ 897 qdf_assert(soc && hal_ring); 898 899 hal_soc = soc->hal_soc; 900 901 /* Debug -- Remove later */ 902 qdf_assert(hal_soc); 903 904 if (qdf_unlikely(hal_srng_access_start(hal_soc, hal_ring))) { 905 906 /* TODO */ 907 /* 908 * Need API to convert from hal_ring pointer to 909 * Ring Type / Ring Id combo 910 */ 911 DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1); 912 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 913 FL("HAL RING Access Failed -- %pK"), hal_ring); 914 goto done; 915 } 916 917 while (qdf_likely(quota-- && (ring_desc = 918 hal_srng_dst_get_next(hal_soc, hal_ring)))) { 919 920 DP_STATS_INC(soc, rx.err_ring_pkts, 1); 921 922 error = HAL_RX_ERROR_STATUS_GET(ring_desc); 923 924 qdf_assert(error == HAL_REO_ERROR_DETECTED); 925 926 buf_type = HAL_RX_REO_BUF_TYPE_GET(ring_desc); 927 /* 928 * For REO error ring, expect only MSDU LINK DESC 929 */ 930 qdf_assert_always(buf_type == HAL_RX_REO_MSDU_LINK_DESC_TYPE); 931 932 cookie = HAL_RX_REO_BUF_COOKIE_GET(ring_desc); 933 /* 934 * check for the magic number in the sw cookie 935 */ 936 qdf_assert_always((cookie >> LINK_DESC_ID_SHIFT) & 937 LINK_DESC_ID_START); 938 939 /* 940 * Check if the buffer is to be processed on this processor 941 */ 942 rbm = hal_rx_ret_buf_manager_get(ring_desc); 943 944 hal_rx_reo_buf_paddr_get(ring_desc, &hbi); 945 link_desc_va = dp_rx_cookie_2_link_desc_va(soc, &hbi); 946 hal_rx_msdu_list_get(link_desc_va, &msdu_list, &num_msdus); 947 948 if (qdf_unlikely((msdu_list.rbm[0] != DP_WBM2SW_RBM) && 949 (msdu_list.rbm[0] != 950 HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST))) { 951 /* TODO */ 952 /* Call appropriate handler */ 953 DP_STATS_INC(soc, rx.err.invalid_rbm, 1); 954 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 955 FL("Invalid RBM %d"), msdu_list.rbm[0]); 956 957 /* Return link descriptor through WBM ring (SW2WBM)*/ 958 dp_rx_link_desc_return(soc, ring_desc, 959 HAL_BM_ACTION_RELEASE_MSDU_LIST); 960 continue; 961 } 962 963 /* Get the MPDU DESC info */ 964 hal_rx_mpdu_desc_info_get(ring_desc, &mpdu_desc_info); 965 966 if (mpdu_desc_info.mpdu_flags & HAL_MPDU_F_FRAGMENT) { 967 /* TODO */ 968 rx_bufs_used += dp_rx_frag_handle(soc, 969 ring_desc, &mpdu_desc_info, 970 &head, &tail, quota); 971 DP_STATS_INC(soc, rx.rx_frags, 1); 972 continue; 973 } 974 975 if (hal_rx_reo_is_pn_error(ring_desc)) { 976 /* TOD0 */ 977 DP_STATS_INC(soc, 978 rx.err. 979 reo_error[HAL_REO_ERR_PN_CHECK_FAILED], 980 1); 981 rx_bufs_used += dp_rx_pn_error_handle(soc, 982 ring_desc, &mpdu_desc_info, 983 &head, &tail, quota); 984 continue; 985 } 986 987 if (hal_rx_reo_is_2k_jump(ring_desc)) { 988 /* TOD0 */ 989 DP_STATS_INC(soc, 990 rx.err. 991 reo_error[HAL_REO_ERR_REGULAR_FRAME_2K_JUMP], 992 1); 993 rx_bufs_used += dp_rx_2k_jump_handle(soc, 994 ring_desc, &mpdu_desc_info, 995 &head, &tail, quota); 996 continue; 997 } 998 } 999 1000 done: 1001 hal_srng_access_end(hal_soc, hal_ring); 1002 1003 if (soc->rx.flags.defrag_timeout_check) 1004 dp_rx_defrag_waitlist_flush(soc); 1005 1006 /* Assume MAC id = 0, owner = 0 */ 1007 if (rx_bufs_used) { 1008 dp_pdev = soc->pdev_list[0]; 1009 dp_rxdma_srng = &dp_pdev->rx_refill_buf_ring; 1010 rx_desc_pool = &soc->rx_desc_buf[0]; 1011 1012 dp_rx_buffers_replenish(soc, 0, dp_rxdma_srng, rx_desc_pool, 1013 rx_bufs_used, &head, &tail); 1014 } 1015 1016 return rx_bufs_used; /* Assume no scale factor for now */ 1017 } 1018 1019 /** 1020 * dp_rx_wbm_err_process() - Processes error frames routed to WBM release ring 1021 * 1022 * @soc: core txrx main context 1023 * @hal_ring: opaque pointer to the HAL Rx Error Ring, which will be serviced 1024 * @quota: No. of units (packets) that can be serviced in one shot. 1025 * 1026 * This function implements error processing and top level demultiplexer 1027 * for all the frames routed to WBM2HOST sw release ring. 1028 * 1029 * Return: uint32_t: No. of elements processed 1030 */ 1031 uint32_t 1032 dp_rx_wbm_err_process(struct dp_soc *soc, void *hal_ring, uint32_t quota) 1033 { 1034 void *hal_soc; 1035 void *ring_desc; 1036 struct dp_rx_desc *rx_desc; 1037 union dp_rx_desc_list_elem_t *head[MAX_PDEV_CNT] = { NULL }; 1038 union dp_rx_desc_list_elem_t *tail[MAX_PDEV_CNT] = { NULL }; 1039 uint32_t rx_bufs_used = 0; 1040 uint32_t rx_bufs_reaped[MAX_PDEV_CNT] = { 0 }; 1041 uint8_t buf_type, rbm; 1042 uint32_t rx_buf_cookie; 1043 uint8_t mac_id; 1044 struct dp_pdev *dp_pdev; 1045 struct dp_srng *dp_rxdma_srng; 1046 struct rx_desc_pool *rx_desc_pool; 1047 uint8_t *rx_tlv_hdr; 1048 qdf_nbuf_t nbuf_head = NULL; 1049 qdf_nbuf_t nbuf_tail = NULL; 1050 qdf_nbuf_t nbuf, next; 1051 struct hal_wbm_err_desc_info wbm_err_info = { 0 }; 1052 uint8_t pool_id; 1053 1054 /* Debug -- Remove later */ 1055 qdf_assert(soc && hal_ring); 1056 1057 hal_soc = soc->hal_soc; 1058 1059 /* Debug -- Remove later */ 1060 qdf_assert(hal_soc); 1061 1062 if (qdf_unlikely(hal_srng_access_start(hal_soc, hal_ring))) { 1063 1064 /* TODO */ 1065 /* 1066 * Need API to convert from hal_ring pointer to 1067 * Ring Type / Ring Id combo 1068 */ 1069 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1070 FL("HAL RING Access Failed -- %pK"), hal_ring); 1071 goto done; 1072 } 1073 1074 while (qdf_likely(quota-- && (ring_desc = 1075 hal_srng_dst_get_next(hal_soc, hal_ring)))) { 1076 1077 /* XXX */ 1078 buf_type = HAL_RX_WBM_BUF_TYPE_GET(ring_desc); 1079 1080 /* 1081 * For WBM ring, expect only MSDU buffers 1082 */ 1083 qdf_assert_always(buf_type == HAL_RX_WBM_BUF_TYPE_REL_BUF); 1084 1085 qdf_assert((HAL_RX_WBM_ERR_SRC_GET(ring_desc) 1086 == HAL_RX_WBM_ERR_SRC_RXDMA) || 1087 (HAL_RX_WBM_ERR_SRC_GET(ring_desc) 1088 == HAL_RX_WBM_ERR_SRC_REO)); 1089 1090 /* 1091 * Check if the buffer is to be processed on this processor 1092 */ 1093 rbm = hal_rx_ret_buf_manager_get(ring_desc); 1094 1095 if (qdf_unlikely(rbm != HAL_RX_BUF_RBM_SW3_BM)) { 1096 /* TODO */ 1097 /* Call appropriate handler */ 1098 DP_STATS_INC(soc, rx.err.invalid_rbm, 1); 1099 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1100 FL("Invalid RBM %d"), rbm); 1101 continue; 1102 } 1103 1104 rx_buf_cookie = HAL_RX_WBM_BUF_COOKIE_GET(ring_desc); 1105 1106 rx_desc = dp_rx_cookie_2_va_rxdma_buf(soc, rx_buf_cookie); 1107 qdf_assert(rx_desc); 1108 1109 if (!dp_rx_desc_check_magic(rx_desc)) { 1110 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1111 FL("Invalid rx_desc cookie=%d"), 1112 rx_buf_cookie); 1113 continue; 1114 } 1115 1116 nbuf = rx_desc->nbuf; 1117 qdf_nbuf_unmap_single(soc->osdev, nbuf, QDF_DMA_BIDIRECTIONAL); 1118 1119 /* 1120 * save the wbm desc info in nbuf TLV. We will need this 1121 * info when we do the actual nbuf processing 1122 */ 1123 hal_rx_wbm_err_info_get(ring_desc, &wbm_err_info); 1124 wbm_err_info.pool_id = rx_desc->pool_id; 1125 hal_rx_wbm_err_info_set_in_tlv(qdf_nbuf_data(nbuf), 1126 &wbm_err_info); 1127 1128 rx_bufs_reaped[rx_desc->pool_id]++; 1129 1130 DP_RX_LIST_APPEND(nbuf_head, nbuf_tail, rx_desc->nbuf); 1131 dp_rx_add_to_free_desc_list(&head[rx_desc->pool_id], 1132 &tail[rx_desc->pool_id], 1133 rx_desc); 1134 } 1135 done: 1136 hal_srng_access_end(hal_soc, hal_ring); 1137 1138 for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) { 1139 if (rx_bufs_reaped[mac_id]) { 1140 dp_pdev = soc->pdev_list[mac_id]; 1141 dp_rxdma_srng = &dp_pdev->rx_refill_buf_ring; 1142 rx_desc_pool = &soc->rx_desc_buf[mac_id]; 1143 1144 dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng, 1145 rx_desc_pool, rx_bufs_reaped[mac_id], 1146 &head[mac_id], &tail[mac_id]); 1147 rx_bufs_used += rx_bufs_reaped[mac_id]; 1148 } 1149 } 1150 1151 nbuf = nbuf_head; 1152 while (nbuf) { 1153 rx_tlv_hdr = qdf_nbuf_data(nbuf); 1154 /* 1155 * retrieve the wbm desc info from nbuf TLV, so we can 1156 * handle error cases appropriately 1157 */ 1158 hal_rx_wbm_err_info_get_from_tlv(rx_tlv_hdr, &wbm_err_info); 1159 1160 /* Set queue_mapping in nbuf to 0 */ 1161 dp_set_rx_queue(nbuf, 0); 1162 1163 next = nbuf->next; 1164 if (wbm_err_info.wbm_err_src == HAL_RX_WBM_ERR_SRC_REO) { 1165 if (wbm_err_info.reo_psh_rsn 1166 == HAL_RX_WBM_REO_PSH_RSN_ERROR) { 1167 1168 DP_STATS_INC(soc, 1169 rx.err.reo_error 1170 [wbm_err_info.reo_err_code], 1); 1171 1172 switch (wbm_err_info.reo_err_code) { 1173 /* 1174 * Handling for packets which have NULL REO 1175 * queue descriptor 1176 */ 1177 case HAL_REO_ERR_QUEUE_DESC_ADDR_0: 1178 pool_id = wbm_err_info.pool_id; 1179 QDF_TRACE_DEBUG_RL(QDF_MODULE_ID_DP, 1180 "Got pkt with REO ERROR: %d", 1181 wbm_err_info.reo_err_code); 1182 dp_rx_null_q_desc_handle(soc, 1183 nbuf, 1184 rx_tlv_hdr, 1185 pool_id); 1186 nbuf = next; 1187 continue; 1188 /* TODO */ 1189 /* Add per error code accounting */ 1190 1191 default: 1192 QDF_TRACE(QDF_MODULE_ID_DP, 1193 QDF_TRACE_LEVEL_DEBUG, 1194 "REO error %d detected", 1195 wbm_err_info.reo_err_code); 1196 } 1197 } 1198 } else if (wbm_err_info.wbm_err_src == 1199 HAL_RX_WBM_ERR_SRC_RXDMA) { 1200 if (wbm_err_info.rxdma_psh_rsn 1201 == HAL_RX_WBM_RXDMA_PSH_RSN_ERROR) { 1202 struct dp_peer *peer = NULL; 1203 uint16_t peer_id = 0xFFFF; 1204 1205 DP_STATS_INC(soc, 1206 rx.err.rxdma_error 1207 [wbm_err_info.rxdma_err_code], 1); 1208 peer_id = hal_rx_mpdu_start_sw_peer_id_get(rx_tlv_hdr); 1209 peer = dp_peer_find_by_id(soc, peer_id); 1210 1211 switch (wbm_err_info.rxdma_err_code) { 1212 case HAL_RXDMA_ERR_UNENCRYPTED: 1213 dp_rx_err_deliver(soc, 1214 nbuf, 1215 rx_tlv_hdr); 1216 nbuf = next; 1217 continue; 1218 1219 case HAL_RXDMA_ERR_TKIP_MIC: 1220 dp_rx_process_mic_error(soc, 1221 nbuf, 1222 rx_tlv_hdr); 1223 nbuf = next; 1224 if (peer) 1225 DP_STATS_INC(peer, rx.err.mic_err, 1); 1226 continue; 1227 1228 case HAL_RXDMA_ERR_DECRYPT: 1229 if (peer) 1230 DP_STATS_INC(peer, rx.err.decrypt_err, 1); 1231 QDF_TRACE(QDF_MODULE_ID_DP, 1232 QDF_TRACE_LEVEL_DEBUG, 1233 "Packet received with Decrypt error"); 1234 break; 1235 1236 default: 1237 QDF_TRACE(QDF_MODULE_ID_DP, 1238 QDF_TRACE_LEVEL_DEBUG, 1239 "RXDMA error %d", 1240 wbm_err_info. 1241 rxdma_err_code); 1242 } 1243 } 1244 } else { 1245 /* Should not come here */ 1246 qdf_assert(0); 1247 } 1248 1249 hal_rx_dump_pkt_tlvs(rx_tlv_hdr, QDF_TRACE_LEVEL_DEBUG); 1250 qdf_nbuf_free(nbuf); 1251 nbuf = next; 1252 } 1253 return rx_bufs_used; /* Assume no scale factor for now */ 1254 } 1255 1256 /** 1257 * dp_rx_err_mpdu_pop() - extract the MSDU's from link descs 1258 * 1259 * @soc: core DP main context 1260 * @mac_id: mac id which is one of 3 mac_ids 1261 * @rxdma_dst_ring_desc: void pointer to monitor link descriptor buf addr info 1262 * @head: head of descs list to be freed 1263 * @tail: tail of decs list to be freed 1264 1265 * Return: number of msdu in MPDU to be popped 1266 */ 1267 static inline uint32_t 1268 dp_rx_err_mpdu_pop(struct dp_soc *soc, uint32_t mac_id, 1269 void *rxdma_dst_ring_desc, 1270 union dp_rx_desc_list_elem_t **head, 1271 union dp_rx_desc_list_elem_t **tail) 1272 { 1273 void *rx_msdu_link_desc; 1274 qdf_nbuf_t msdu; 1275 qdf_nbuf_t last; 1276 struct hal_rx_msdu_list msdu_list; 1277 uint16_t num_msdus; 1278 struct hal_buf_info buf_info; 1279 void *p_buf_addr_info; 1280 void *p_last_buf_addr_info; 1281 uint32_t rx_bufs_used = 0; 1282 uint32_t msdu_cnt; 1283 uint32_t i; 1284 uint8_t push_reason; 1285 uint8_t rxdma_error_code = 0; 1286 uint8_t bm_action = HAL_BM_ACTION_PUT_IN_IDLE_LIST; 1287 struct dp_pdev *pdev = dp_get_pdev_for_mac_id(soc, mac_id); 1288 1289 msdu = 0; 1290 1291 last = NULL; 1292 1293 hal_rx_reo_ent_buf_paddr_get(rxdma_dst_ring_desc, &buf_info, 1294 &p_last_buf_addr_info, &msdu_cnt); 1295 1296 push_reason = 1297 hal_rx_reo_ent_rxdma_push_reason_get(rxdma_dst_ring_desc); 1298 if (push_reason == HAL_RX_WBM_RXDMA_PSH_RSN_ERROR) { 1299 rxdma_error_code = 1300 hal_rx_reo_ent_rxdma_error_code_get(rxdma_dst_ring_desc); 1301 } 1302 1303 do { 1304 rx_msdu_link_desc = 1305 dp_rx_cookie_2_link_desc_va(soc, &buf_info); 1306 1307 qdf_assert(rx_msdu_link_desc); 1308 1309 hal_rx_msdu_list_get(rx_msdu_link_desc, &msdu_list, &num_msdus); 1310 1311 if (msdu_list.sw_cookie[0] != HAL_RX_COOKIE_SPECIAL) { 1312 /* if the msdus belongs to NSS offloaded radio && 1313 * the rbm is not SW1_BM then return the msdu_link 1314 * descriptor without freeing the msdus (nbufs). let 1315 * these buffers be given to NSS completion ring for 1316 * NSS to free them. 1317 * else iterate through the msdu link desc list and 1318 * free each msdu in the list. 1319 */ 1320 if (msdu_list.rbm[0] != HAL_RX_BUF_RBM_SW3_BM && 1321 wlan_cfg_get_dp_pdev_nss_enabled( 1322 pdev->wlan_cfg_ctx)) 1323 bm_action = HAL_BM_ACTION_RELEASE_MSDU_LIST; 1324 else { 1325 for (i = 0; i < num_msdus; i++) { 1326 struct dp_rx_desc *rx_desc = 1327 dp_rx_cookie_2_va_rxdma_buf(soc, 1328 msdu_list.sw_cookie[i]); 1329 qdf_assert(rx_desc); 1330 msdu = rx_desc->nbuf; 1331 1332 qdf_nbuf_unmap_single(soc->osdev, msdu, 1333 QDF_DMA_FROM_DEVICE); 1334 1335 QDF_TRACE(QDF_MODULE_ID_DP, 1336 QDF_TRACE_LEVEL_DEBUG, 1337 "[%s][%d] msdu_nbuf=%pK \n", 1338 __func__, __LINE__, msdu); 1339 1340 qdf_nbuf_free(msdu); 1341 rx_bufs_used++; 1342 dp_rx_add_to_free_desc_list(head, 1343 tail, rx_desc); 1344 } 1345 } 1346 } else { 1347 rxdma_error_code = HAL_RXDMA_ERR_WAR; 1348 } 1349 1350 hal_rx_mon_next_link_desc_get(rx_msdu_link_desc, &buf_info, 1351 &p_buf_addr_info); 1352 1353 dp_rx_link_desc_return(soc, p_last_buf_addr_info, bm_action); 1354 p_last_buf_addr_info = p_buf_addr_info; 1355 1356 } while (buf_info.paddr); 1357 1358 DP_STATS_INC(soc, rx.err.rxdma_error[rxdma_error_code], 1); 1359 1360 if (rxdma_error_code == HAL_RXDMA_ERR_DECRYPT) { 1361 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1362 "Packet received with Decrypt error"); 1363 } 1364 1365 return rx_bufs_used; 1366 } 1367 1368 /** 1369 * dp_rxdma_err_process() - RxDMA error processing functionality 1370 * 1371 * @soc: core txrx main contex 1372 * @mac_id: mac id which is one of 3 mac_ids 1373 * @hal_ring: opaque pointer to the HAL Rx Ring, which will be serviced 1374 * @quota: No. of units (packets) that can be serviced in one shot. 1375 1376 * Return: num of buffers processed 1377 */ 1378 uint32_t 1379 dp_rxdma_err_process(struct dp_soc *soc, uint32_t mac_id, uint32_t quota) 1380 { 1381 struct dp_pdev *pdev = dp_get_pdev_for_mac_id(soc, mac_id); 1382 int mac_for_pdev = dp_get_mac_id_for_mac(soc, mac_id); 1383 void *hal_soc; 1384 void *rxdma_dst_ring_desc; 1385 void *err_dst_srng; 1386 union dp_rx_desc_list_elem_t *head = NULL; 1387 union dp_rx_desc_list_elem_t *tail = NULL; 1388 struct dp_srng *dp_rxdma_srng; 1389 struct rx_desc_pool *rx_desc_pool; 1390 uint32_t work_done = 0; 1391 uint32_t rx_bufs_used = 0; 1392 1393 #ifdef DP_INTR_POLL_BASED 1394 if (!pdev) 1395 return 0; 1396 #endif 1397 err_dst_srng = pdev->rxdma_err_dst_ring[mac_for_pdev].hal_srng; 1398 1399 if (!err_dst_srng) { 1400 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1401 "%s %d : HAL Monitor Destination Ring Init \ 1402 Failed -- %pK\n", 1403 __func__, __LINE__, err_dst_srng); 1404 return 0; 1405 } 1406 1407 hal_soc = soc->hal_soc; 1408 1409 qdf_assert(hal_soc); 1410 1411 if (qdf_unlikely(hal_srng_access_start(hal_soc, err_dst_srng))) { 1412 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1413 "%s %d : HAL Monitor Destination Ring Init \ 1414 Failed -- %pK\n", 1415 __func__, __LINE__, err_dst_srng); 1416 return 0; 1417 } 1418 1419 while (qdf_likely(quota-- && (rxdma_dst_ring_desc = 1420 hal_srng_dst_get_next(hal_soc, err_dst_srng)))) { 1421 1422 rx_bufs_used += dp_rx_err_mpdu_pop(soc, mac_id, 1423 rxdma_dst_ring_desc, 1424 &head, &tail); 1425 } 1426 1427 hal_srng_access_end(hal_soc, err_dst_srng); 1428 1429 if (rx_bufs_used) { 1430 dp_rxdma_srng = &pdev->rx_refill_buf_ring; 1431 rx_desc_pool = &soc->rx_desc_buf[mac_id]; 1432 1433 dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng, 1434 rx_desc_pool, rx_bufs_used, &head, &tail); 1435 1436 work_done += rx_bufs_used; 1437 } 1438 1439 return work_done; 1440 } 1441