1 /* 2 * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for 5 * any purpose with or without fee is hereby granted, provided that the 6 * above copyright notice and this permission notice appear in all 7 * copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 16 * PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 #include "hal_hw_headers.h" 20 #include "dp_types.h" 21 #include "dp_rx.h" 22 #include "dp_peer.h" 23 #include "dp_internal.h" 24 #include "hal_api.h" 25 #include "qdf_trace.h" 26 #include "qdf_nbuf.h" 27 #ifdef CONFIG_MCL 28 #include <cds_ieee80211_common.h> 29 #else 30 #include <linux/ieee80211.h> 31 #endif 32 #include "dp_rx_defrag.h" 33 #include <enet.h> /* LLC_SNAP_HDR_LEN */ 34 35 #ifdef RX_DESC_DEBUG_CHECK 36 static inline bool dp_rx_desc_check_magic(struct dp_rx_desc *rx_desc) 37 { 38 if (qdf_unlikely(rx_desc->magic != DP_RX_DESC_MAGIC)) { 39 return false; 40 } 41 rx_desc->magic = 0; 42 return true; 43 } 44 #else 45 static inline bool dp_rx_desc_check_magic(struct dp_rx_desc *rx_desc) 46 { 47 return true; 48 } 49 #endif 50 51 /** 52 * dp_rx_mcast_echo_check() - check if the mcast pkt is a loop 53 * back on same vap or a different vap. 54 * 55 * @soc: core DP main context 56 * @peer: dp peer handler 57 * @rx_tlv_hdr: start of the rx TLV header 58 * @nbuf: pkt buffer 59 * 60 * Return: bool (true if it is a looped back pkt else false) 61 * 62 */ 63 static inline bool dp_rx_mcast_echo_check(struct dp_soc *soc, 64 struct dp_peer *peer, 65 uint8_t *rx_tlv_hdr, 66 qdf_nbuf_t nbuf) 67 { 68 struct dp_vdev *vdev = peer->vdev; 69 struct dp_ast_entry *ase; 70 uint16_t sa_idx = 0; 71 uint8_t *data; 72 73 /* 74 * Multicast Echo Check is required only if vdev is STA and 75 * received pkt is a multicast/broadcast pkt. otherwise 76 * skip the MEC check. 77 */ 78 if (vdev->opmode != wlan_op_mode_sta) 79 return false; 80 81 if (!hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr)) 82 return false; 83 84 data = qdf_nbuf_data(nbuf); 85 /* 86 * if the received pkts src mac addr matches with vdev 87 * mac address then drop the pkt as it is looped back 88 */ 89 if (!(qdf_mem_cmp(&data[DP_MAC_ADDR_LEN], 90 vdev->mac_addr.raw, 91 DP_MAC_ADDR_LEN))) 92 return true; 93 94 /* 95 * In case of qwrap isolation mode, donot drop loopback packets. 96 * In isolation mode, all packets from the wired stations need to go 97 * to rootap and loop back to reach the wireless stations and 98 * vice-versa. 99 */ 100 if (qdf_unlikely(vdev->isolation_vdev)) 101 return false; 102 103 /* if the received pkts src mac addr matches with the 104 * wired PCs MAC addr which is behind the STA or with 105 * wireless STAs MAC addr which are behind the Repeater, 106 * then drop the pkt as it is looped back 107 */ 108 qdf_spin_lock_bh(&soc->ast_lock); 109 if (hal_rx_msdu_end_sa_is_valid_get(rx_tlv_hdr)) { 110 sa_idx = hal_rx_msdu_end_sa_idx_get(rx_tlv_hdr); 111 112 if ((sa_idx < 0) || 113 (sa_idx >= (WLAN_UMAC_PSOC_MAX_PEERS * 2))) { 114 qdf_spin_unlock_bh(&soc->ast_lock); 115 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 116 "invalid sa_idx: %d", sa_idx); 117 qdf_assert_always(0); 118 } 119 120 ase = soc->ast_table[sa_idx]; 121 if (!ase) { 122 /* We do not get a peer map event for STA and without 123 * this event we don't know what is STA's sa_idx. 124 * For this reason the AST is still not associated to 125 * any index postion in ast_table. 126 * In these kind of scenarios where sa is valid but 127 * ast is not in ast_table, we use the below API to get 128 * AST entry for STA's own mac_address. 129 */ 130 ase = dp_peer_ast_hash_find(soc, 131 &data[DP_MAC_ADDR_LEN]); 132 133 } 134 } else 135 ase = dp_peer_ast_hash_find(soc, &data[DP_MAC_ADDR_LEN]); 136 137 if (ase) { 138 ase->ast_idx = sa_idx; 139 soc->ast_table[sa_idx] = ase; 140 141 if (ase->pdev_id != vdev->pdev->pdev_id) { 142 qdf_spin_unlock_bh(&soc->ast_lock); 143 QDF_TRACE(QDF_MODULE_ID_DP, 144 QDF_TRACE_LEVEL_INFO, 145 "Detected DBDC Root AP %pM, %d %d", 146 &data[DP_MAC_ADDR_LEN], vdev->pdev->pdev_id, 147 ase->pdev_id); 148 return false; 149 } 150 151 if ((ase->type == CDP_TXRX_AST_TYPE_MEC) || 152 (ase->peer != peer)) { 153 qdf_spin_unlock_bh(&soc->ast_lock); 154 QDF_TRACE(QDF_MODULE_ID_DP, 155 QDF_TRACE_LEVEL_INFO, 156 "received pkt with same src mac %pM", 157 &data[DP_MAC_ADDR_LEN]); 158 159 return true; 160 } 161 } 162 qdf_spin_unlock_bh(&soc->ast_lock); 163 return false; 164 } 165 166 /** 167 * dp_rx_link_desc_return_by_addr - Return a MPDU link descriptor to 168 * (WBM) by address 169 * 170 * @soc: core DP main context 171 * @link_desc_addr: link descriptor addr 172 * 173 * Return: QDF_STATUS 174 */ 175 QDF_STATUS 176 dp_rx_link_desc_return_by_addr(struct dp_soc *soc, void *link_desc_addr, 177 uint8_t bm_action) 178 { 179 struct dp_srng *wbm_desc_rel_ring = &soc->wbm_desc_rel_ring; 180 void *wbm_rel_srng = wbm_desc_rel_ring->hal_srng; 181 void *hal_soc = soc->hal_soc; 182 QDF_STATUS status = QDF_STATUS_E_FAILURE; 183 void *src_srng_desc; 184 185 if (!wbm_rel_srng) { 186 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 187 "WBM RELEASE RING not initialized"); 188 return status; 189 } 190 191 if (qdf_unlikely(hal_srng_access_start(hal_soc, wbm_rel_srng))) { 192 193 /* TODO */ 194 /* 195 * Need API to convert from hal_ring pointer to 196 * Ring Type / Ring Id combo 197 */ 198 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 199 FL("HAL RING Access For WBM Release SRNG Failed - %pK"), 200 wbm_rel_srng); 201 DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1); 202 goto done; 203 } 204 src_srng_desc = hal_srng_src_get_next(hal_soc, wbm_rel_srng); 205 if (qdf_likely(src_srng_desc)) { 206 /* Return link descriptor through WBM ring (SW2WBM)*/ 207 hal_rx_msdu_link_desc_set(hal_soc, 208 src_srng_desc, link_desc_addr, bm_action); 209 status = QDF_STATUS_SUCCESS; 210 } else { 211 struct hal_srng *srng = (struct hal_srng *)wbm_rel_srng; 212 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 213 FL("WBM Release Ring (Id %d) Full"), srng->ring_id); 214 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 215 "HP 0x%x Reap HP 0x%x TP 0x%x Cached TP 0x%x", 216 *srng->u.src_ring.hp_addr, srng->u.src_ring.reap_hp, 217 *srng->u.src_ring.tp_addr, srng->u.src_ring.cached_tp); 218 } 219 done: 220 hal_srng_access_end(hal_soc, wbm_rel_srng); 221 return status; 222 223 } 224 225 /** 226 * dp_rx_link_desc_return() - Return a MPDU link descriptor to HW 227 * (WBM), following error handling 228 * 229 * @soc: core DP main context 230 * @ring_desc: opaque pointer to the REO error ring descriptor 231 * 232 * Return: QDF_STATUS 233 */ 234 QDF_STATUS 235 dp_rx_link_desc_return(struct dp_soc *soc, void *ring_desc, uint8_t bm_action) 236 { 237 void *buf_addr_info = HAL_RX_REO_BUF_ADDR_INFO_GET(ring_desc); 238 return dp_rx_link_desc_return_by_addr(soc, buf_addr_info, bm_action); 239 } 240 241 /** 242 * dp_rx_msdus_drop() - Drops all MSDU's per MPDU 243 * 244 * @soc: core txrx main context 245 * @ring_desc: opaque pointer to the REO error ring descriptor 246 * @mpdu_desc_info: MPDU descriptor information from ring descriptor 247 * @head: head of the local descriptor free-list 248 * @tail: tail of the local descriptor free-list 249 * @quota: No. of units (packets) that can be serviced in one shot. 250 * 251 * This function is used to drop all MSDU in an MPDU 252 * 253 * Return: uint32_t: No. of elements processed 254 */ 255 static uint32_t dp_rx_msdus_drop(struct dp_soc *soc, void *ring_desc, 256 struct hal_rx_mpdu_desc_info *mpdu_desc_info, 257 union dp_rx_desc_list_elem_t **head, 258 union dp_rx_desc_list_elem_t **tail, 259 uint32_t quota) 260 { 261 uint32_t rx_bufs_used = 0; 262 void *link_desc_va; 263 struct hal_buf_info buf_info; 264 struct hal_rx_msdu_list msdu_list; /* MSDU's per MPDU */ 265 int i; 266 uint8_t *rx_tlv_hdr; 267 uint32_t tid; 268 269 hal_rx_reo_buf_paddr_get(ring_desc, &buf_info); 270 271 link_desc_va = dp_rx_cookie_2_link_desc_va(soc, &buf_info); 272 273 /* No UNMAP required -- this is "malloc_consistent" memory */ 274 hal_rx_msdu_list_get(link_desc_va, &msdu_list, 275 &mpdu_desc_info->msdu_count); 276 277 for (i = 0; (i < mpdu_desc_info->msdu_count) && quota--; i++) { 278 struct dp_rx_desc *rx_desc = 279 dp_rx_cookie_2_va_rxdma_buf(soc, 280 msdu_list.sw_cookie[i]); 281 282 qdf_assert(rx_desc); 283 284 if (!dp_rx_desc_check_magic(rx_desc)) { 285 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 286 FL("Invalid rx_desc cookie=%d"), 287 msdu_list.sw_cookie[i]); 288 return rx_bufs_used; 289 } 290 291 rx_bufs_used++; 292 tid = hal_rx_mpdu_start_tid_get(soc->hal_soc, 293 rx_desc->rx_buf_start); 294 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 295 "Packet received with PN error for tid :%d", tid); 296 297 rx_tlv_hdr = qdf_nbuf_data(rx_desc->nbuf); 298 if (hal_rx_encryption_info_valid(rx_tlv_hdr)) 299 hal_rx_print_pn(rx_tlv_hdr); 300 301 /* Just free the buffers */ 302 qdf_nbuf_free(rx_desc->nbuf); 303 304 dp_rx_add_to_free_desc_list(head, tail, rx_desc); 305 } 306 307 /* Return link descriptor through WBM ring (SW2WBM)*/ 308 dp_rx_link_desc_return(soc, ring_desc, HAL_BM_ACTION_PUT_IN_IDLE_LIST); 309 310 return rx_bufs_used; 311 } 312 313 /** 314 * dp_rx_pn_error_handle() - Handles PN check errors 315 * 316 * @soc: core txrx main context 317 * @ring_desc: opaque pointer to the REO error ring descriptor 318 * @mpdu_desc_info: MPDU descriptor information from ring descriptor 319 * @head: head of the local descriptor free-list 320 * @tail: tail of the local descriptor free-list 321 * @quota: No. of units (packets) that can be serviced in one shot. 322 * 323 * This function implements PN error handling 324 * If the peer is configured to ignore the PN check errors 325 * or if DP feels, that this frame is still OK, the frame can be 326 * re-injected back to REO to use some of the other features 327 * of REO e.g. duplicate detection/routing to other cores 328 * 329 * Return: uint32_t: No. of elements processed 330 */ 331 static uint32_t 332 dp_rx_pn_error_handle(struct dp_soc *soc, void *ring_desc, 333 struct hal_rx_mpdu_desc_info *mpdu_desc_info, 334 union dp_rx_desc_list_elem_t **head, 335 union dp_rx_desc_list_elem_t **tail, 336 uint32_t quota) 337 { 338 uint16_t peer_id; 339 uint32_t rx_bufs_used = 0; 340 struct dp_peer *peer; 341 bool peer_pn_policy = false; 342 343 peer_id = DP_PEER_METADATA_PEER_ID_GET( 344 mpdu_desc_info->peer_meta_data); 345 346 347 peer = dp_peer_find_by_id(soc, peer_id); 348 349 if (qdf_likely(peer)) { 350 /* 351 * TODO: Check for peer specific policies & set peer_pn_policy 352 */ 353 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 354 "discard rx due to PN error for peer %pK " 355 "(%02x:%02x:%02x:%02x:%02x:%02x)", 356 peer, 357 peer->mac_addr.raw[0], peer->mac_addr.raw[1], 358 peer->mac_addr.raw[2], peer->mac_addr.raw[3], 359 peer->mac_addr.raw[4], peer->mac_addr.raw[5]); 360 361 } 362 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 363 "Packet received with PN error"); 364 365 /* No peer PN policy -- definitely drop */ 366 if (!peer_pn_policy) 367 rx_bufs_used = dp_rx_msdus_drop(soc, ring_desc, 368 mpdu_desc_info, 369 head, tail, quota); 370 371 return rx_bufs_used; 372 } 373 374 /** 375 * dp_rx_2k_jump_handle() - Handles Sequence Number Jump by 2K 376 * 377 * @soc: core txrx main context 378 * @ring_desc: opaque pointer to the REO error ring descriptor 379 * @mpdu_desc_info: MPDU descriptor information from ring descriptor 380 * @head: head of the local descriptor free-list 381 * @tail: tail of the local descriptor free-list 382 * @quota: No. of units (packets) that can be serviced in one shot. 383 * 384 * This function implements the error handling when sequence number 385 * of the MPDU jumps suddenly by 2K.Today there are 2 cases that 386 * need to be handled: 387 * A) CSN (Current Sequence Number) = Last Valid SN (LSN) + 2K 388 * B) CSN = LSN + 2K, but falls within a "BA sized window" of the SSN 389 * For case A) the protocol stack is invoked to generate DELBA/DEAUTH frame 390 * For case B), the frame is normally dropped, no more action is taken 391 * 392 * Return: uint32_t: No. of elements processed 393 */ 394 static uint32_t 395 dp_rx_2k_jump_handle(struct dp_soc *soc, void *ring_desc, 396 struct hal_rx_mpdu_desc_info *mpdu_desc_info, 397 union dp_rx_desc_list_elem_t **head, 398 union dp_rx_desc_list_elem_t **tail, 399 uint32_t quota) 400 { 401 return dp_rx_msdus_drop(soc, ring_desc, mpdu_desc_info, 402 head, tail, quota); 403 } 404 405 /** 406 * dp_rx_chain_msdus() - Function to chain all msdus of a mpdu 407 * to pdev invalid peer list 408 * 409 * @soc: core DP main context 410 * @nbuf: Buffer pointer 411 * @rx_tlv_hdr: start of rx tlv header 412 * @mac_id: mac id 413 * 414 * Return: bool: true for last msdu of mpdu 415 */ 416 static bool 417 dp_rx_chain_msdus(struct dp_soc *soc, qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr, 418 uint8_t mac_id) 419 { 420 bool mpdu_done = false; 421 qdf_nbuf_t curr_nbuf = NULL; 422 qdf_nbuf_t tmp_nbuf = NULL; 423 424 /* TODO: Currently only single radio is supported, hence 425 * pdev hard coded to '0' index 426 */ 427 struct dp_pdev *dp_pdev = soc->pdev_list[mac_id]; 428 429 if (!dp_pdev->first_nbuf) { 430 qdf_nbuf_set_rx_chfrag_start(nbuf, 1); 431 dp_pdev->ppdu_id = HAL_RX_HW_DESC_GET_PPDUID_GET(rx_tlv_hdr); 432 dp_pdev->first_nbuf = true; 433 434 /* If the new nbuf received is the first msdu of the 435 * amsdu and there are msdus in the invalid peer msdu 436 * list, then let us free all the msdus of the invalid 437 * peer msdu list. 438 * This scenario can happen when we start receiving 439 * new a-msdu even before the previous a-msdu is completely 440 * received. 441 */ 442 curr_nbuf = dp_pdev->invalid_peer_head_msdu; 443 while (curr_nbuf) { 444 tmp_nbuf = curr_nbuf->next; 445 qdf_nbuf_free(curr_nbuf); 446 curr_nbuf = tmp_nbuf; 447 } 448 449 dp_pdev->invalid_peer_head_msdu = NULL; 450 dp_pdev->invalid_peer_tail_msdu = NULL; 451 hal_rx_mon_hw_desc_get_mpdu_status(soc->hal_soc, rx_tlv_hdr, 452 &(dp_pdev->ppdu_info.rx_status)); 453 454 } 455 456 if (dp_pdev->ppdu_id == hal_rx_attn_phy_ppdu_id_get(rx_tlv_hdr) && 457 hal_rx_attn_msdu_done_get(rx_tlv_hdr)) { 458 qdf_nbuf_set_rx_chfrag_end(nbuf, 1); 459 qdf_assert_always(dp_pdev->first_nbuf == true); 460 dp_pdev->first_nbuf = false; 461 mpdu_done = true; 462 } 463 464 DP_RX_LIST_APPEND(dp_pdev->invalid_peer_head_msdu, 465 dp_pdev->invalid_peer_tail_msdu, 466 nbuf); 467 468 return mpdu_done; 469 } 470 471 /** 472 * dp_rx_null_q_desc_handle() - Function to handle NULL Queue 473 * descriptor violation on either a 474 * REO or WBM ring 475 * 476 * @soc: core DP main context 477 * @nbuf: buffer pointer 478 * @rx_tlv_hdr: start of rx tlv header 479 * @pool_id: mac id 480 * 481 * This function handles NULL queue descriptor violations arising out 482 * a missing REO queue for a given peer or a given TID. This typically 483 * may happen if a packet is received on a QOS enabled TID before the 484 * ADDBA negotiation for that TID, when the TID queue is setup. Or 485 * it may also happen for MC/BC frames if they are not routed to the 486 * non-QOS TID queue, in the absence of any other default TID queue. 487 * This error can show up both in a REO destination or WBM release ring. 488 * 489 */ 490 static void 491 dp_rx_null_q_desc_handle(struct dp_soc *soc, 492 qdf_nbuf_t nbuf, 493 uint8_t *rx_tlv_hdr, 494 uint8_t pool_id) 495 { 496 uint32_t pkt_len, l2_hdr_offset; 497 uint16_t msdu_len; 498 struct dp_vdev *vdev; 499 uint16_t peer_id = 0xFFFF; 500 struct dp_peer *peer = NULL; 501 uint8_t tid; 502 503 qdf_nbuf_set_rx_chfrag_start(nbuf, 504 hal_rx_msdu_end_first_msdu_get(rx_tlv_hdr)); 505 qdf_nbuf_set_rx_chfrag_end(nbuf, 506 hal_rx_msdu_end_last_msdu_get(rx_tlv_hdr)); 507 508 l2_hdr_offset = hal_rx_msdu_end_l3_hdr_padding_get(rx_tlv_hdr); 509 msdu_len = hal_rx_msdu_start_msdu_len_get(rx_tlv_hdr); 510 pkt_len = msdu_len + l2_hdr_offset + RX_PKT_TLVS_LEN; 511 512 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 513 FL("Len %d Extn list %pK "), 514 (uint32_t)qdf_nbuf_len(nbuf), 515 qdf_nbuf_get_ext_list(nbuf)); 516 /* Set length in nbuf */ 517 if (!qdf_nbuf_get_ext_list(nbuf)) 518 qdf_nbuf_set_pktlen(nbuf, pkt_len); 519 520 /* 521 * Check if DMA completed -- msdu_done is the last bit 522 * to be written 523 */ 524 if (!hal_rx_attn_msdu_done_get(rx_tlv_hdr)) { 525 526 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 527 FL("MSDU DONE failure")); 528 529 hal_rx_dump_pkt_tlvs(soc->hal_soc, rx_tlv_hdr, 530 QDF_TRACE_LEVEL_INFO); 531 qdf_assert(0); 532 } 533 534 peer_id = hal_rx_mpdu_start_sw_peer_id_get(rx_tlv_hdr); 535 peer = dp_peer_find_by_id(soc, peer_id); 536 537 if (!peer) { 538 bool mpdu_done = false; 539 struct dp_pdev *pdev = soc->pdev_list[pool_id]; 540 541 QDF_TRACE_DEBUG_RL(QDF_MODULE_ID_DP, "peer is NULL"); 542 543 mpdu_done = dp_rx_chain_msdus(soc, nbuf, rx_tlv_hdr, pool_id); 544 /* Trigger invalid peer handler wrapper */ 545 dp_rx_process_invalid_peer_wrapper(soc, nbuf, mpdu_done); 546 547 if (mpdu_done) { 548 pdev->invalid_peer_head_msdu = NULL; 549 pdev->invalid_peer_tail_msdu = NULL; 550 } 551 return; 552 } 553 554 vdev = peer->vdev; 555 if (!vdev) { 556 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 557 FL("INVALID vdev %pK OR osif_rx"), vdev); 558 /* Drop & free packet */ 559 qdf_nbuf_free(nbuf); 560 DP_STATS_INC(soc, rx.err.invalid_vdev, 1); 561 return; 562 } 563 564 /* 565 * Advance the packet start pointer by total size of 566 * pre-header TLV's 567 */ 568 if (qdf_nbuf_get_ext_list(nbuf)) 569 qdf_nbuf_pull_head(nbuf, RX_PKT_TLVS_LEN); 570 else 571 qdf_nbuf_pull_head(nbuf, (l2_hdr_offset + RX_PKT_TLVS_LEN)); 572 573 if (dp_rx_mcast_echo_check(soc, peer, rx_tlv_hdr, nbuf)) { 574 /* this is a looped back MCBC pkt, drop it */ 575 qdf_nbuf_free(nbuf); 576 return; 577 } 578 /* 579 * In qwrap mode if the received packet matches with any of the vdev 580 * mac addresses, drop it. Donot receive multicast packets originated 581 * from any proxysta. 582 */ 583 if (check_qwrap_multicast_loopback(vdev, nbuf)) { 584 qdf_nbuf_free(nbuf); 585 return; 586 } 587 588 589 if (qdf_unlikely((peer->nawds_enabled == true) && 590 hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr))) { 591 QDF_TRACE(QDF_MODULE_ID_DP, 592 QDF_TRACE_LEVEL_DEBUG, 593 "%s free buffer for multicast packet", 594 __func__); 595 DP_STATS_INC(peer, rx.nawds_mcast_drop, 1); 596 qdf_nbuf_free(nbuf); 597 return; 598 } 599 600 if (!dp_wds_rx_policy_check(rx_tlv_hdr, vdev, peer, 601 hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr))) { 602 QDF_TRACE(QDF_MODULE_ID_DP, 603 QDF_TRACE_LEVEL_ERROR, 604 FL("mcast Policy Check Drop pkt")); 605 /* Drop & free packet */ 606 qdf_nbuf_free(nbuf); 607 return; 608 } 609 610 /* WDS Source Port Learning */ 611 if (qdf_likely(vdev->rx_decap_type == htt_cmn_pkt_type_ethernet && 612 vdev->wds_enabled)) 613 dp_rx_wds_srcport_learn(soc, rx_tlv_hdr, peer, nbuf); 614 615 if (hal_rx_mpdu_start_mpdu_qos_control_valid_get(rx_tlv_hdr)) { 616 /* TODO: Assuming that qos_control_valid also indicates 617 * unicast. Should we check this? 618 */ 619 tid = hal_rx_mpdu_start_tid_get(soc->hal_soc, rx_tlv_hdr); 620 if (peer && 621 peer->rx_tid[tid].hw_qdesc_vaddr_unaligned == NULL) { 622 /* IEEE80211_SEQ_MAX indicates invalid start_seq */ 623 dp_rx_tid_setup_wifi3(peer, tid, 1, IEEE80211_SEQ_MAX); 624 } 625 } 626 627 #ifdef QCA_WIFI_NAPIER_EMULATION /* Debug code, remove later */ 628 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 629 "%s: p_id %d msdu_len %d hdr_off %d", 630 __func__, peer_id, msdu_len, l2_hdr_offset); 631 632 print_hex_dump(KERN_ERR, "\t Pkt Data:", DUMP_PREFIX_NONE, 32, 4, 633 qdf_nbuf_data(nbuf), 128, false); 634 #endif /* NAPIER_EMULATION */ 635 636 if (qdf_unlikely(vdev->rx_decap_type == htt_cmn_pkt_type_raw)) { 637 qdf_nbuf_set_next(nbuf, NULL); 638 dp_rx_deliver_raw(vdev, nbuf, peer); 639 } else { 640 if (qdf_unlikely(peer->bss_peer)) { 641 QDF_TRACE(QDF_MODULE_ID_DP, 642 QDF_TRACE_LEVEL_INFO, 643 FL("received pkt with same src MAC")); 644 /* Drop & free packet */ 645 qdf_nbuf_free(nbuf); 646 return; 647 } 648 649 if (vdev->osif_rx) { 650 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, 651 FL("vdev %pK osif_rx %pK"), vdev, 652 vdev->osif_rx); 653 qdf_nbuf_set_next(nbuf, NULL); 654 vdev->osif_rx(vdev->osif_vdev, nbuf); 655 DP_STATS_INCC_PKT(vdev->pdev, rx.multicast, 1, 656 qdf_nbuf_len(nbuf), 657 hal_rx_msdu_end_da_is_mcbc_get( 658 rx_tlv_hdr)); 659 DP_STATS_INC_PKT(vdev->pdev, rx.to_stack, 1, 660 qdf_nbuf_len(nbuf)); 661 } else { 662 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 663 FL("INVALID vdev %pK OR osif_rx"), vdev); 664 DP_STATS_INC(soc, rx.err.invalid_vdev, 1); 665 } 666 } 667 return; 668 } 669 670 /** 671 * dp_rx_err_deliver() - Function to deliver error frames to OS 672 * 673 * @soc: core DP main context 674 * @rx_desc : pointer to the sw rx descriptor 675 * @head: pointer to head of rx descriptors to be added to free list 676 * @tail: pointer to tail of rx descriptors to be added to free list 677 * quota: upper limit of descriptors that can be reaped 678 * 679 * Return: uint32_t: No. of Rx buffers reaped 680 */ 681 static void 682 dp_rx_err_deliver(struct dp_soc *soc, qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr) 683 { 684 uint32_t pkt_len, l2_hdr_offset; 685 uint16_t msdu_len; 686 struct dp_vdev *vdev; 687 uint16_t peer_id = 0xFFFF; 688 struct dp_peer *peer = NULL; 689 struct ether_header *eh; 690 bool isBroadcast; 691 692 /* 693 * Check if DMA completed -- msdu_done is the last bit 694 * to be written 695 */ 696 if (!hal_rx_attn_msdu_done_get(rx_tlv_hdr)) { 697 698 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 699 FL("MSDU DONE failure")); 700 701 hal_rx_dump_pkt_tlvs(soc->hal_soc, rx_tlv_hdr, 702 QDF_TRACE_LEVEL_INFO); 703 qdf_assert(0); 704 } 705 706 peer_id = hal_rx_mpdu_start_sw_peer_id_get(rx_tlv_hdr); 707 peer = dp_peer_find_by_id(soc, peer_id); 708 709 l2_hdr_offset = hal_rx_msdu_end_l3_hdr_padding_get(rx_tlv_hdr); 710 msdu_len = hal_rx_msdu_start_msdu_len_get(rx_tlv_hdr); 711 pkt_len = msdu_len + l2_hdr_offset + RX_PKT_TLVS_LEN; 712 713 /* Set length in nbuf */ 714 qdf_nbuf_set_pktlen(nbuf, pkt_len); 715 716 qdf_nbuf_set_next(nbuf, NULL); 717 718 qdf_nbuf_set_rx_chfrag_start(nbuf, 1); 719 qdf_nbuf_set_rx_chfrag_end(nbuf, 1); 720 721 if (!peer) { 722 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 723 FL("peer is NULL")); 724 DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1, 725 qdf_nbuf_len(nbuf)); 726 /* Trigger invalid peer handler wrapper */ 727 dp_rx_process_invalid_peer_wrapper(soc, nbuf, true); 728 return; 729 } 730 731 vdev = peer->vdev; 732 if (!vdev) { 733 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 734 FL("INVALID vdev %pK OR osif_rx"), vdev); 735 /* Drop & free packet */ 736 qdf_nbuf_free(nbuf); 737 DP_STATS_INC(soc, rx.err.invalid_vdev, 1); 738 return; 739 } 740 741 /* Drop & free packet if mesh mode not enabled */ 742 if (!vdev->mesh_vdev) { 743 qdf_nbuf_free(nbuf); 744 DP_STATS_INC(soc, rx.err.invalid_vdev, 1); 745 return; 746 } 747 748 /* 749 * Advance the packet start pointer by total size of 750 * pre-header TLV's 751 */ 752 qdf_nbuf_pull_head(nbuf, (l2_hdr_offset + RX_PKT_TLVS_LEN)); 753 754 if (dp_rx_filter_mesh_packets(vdev, nbuf, rx_tlv_hdr) 755 == QDF_STATUS_SUCCESS) { 756 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_MED, 757 FL("mesh pkt filtered")); 758 DP_STATS_INC(vdev->pdev, dropped.mesh_filter, 1); 759 760 qdf_nbuf_free(nbuf); 761 return; 762 763 } 764 dp_rx_fill_mesh_stats(vdev, nbuf, rx_tlv_hdr, peer); 765 766 if (qdf_unlikely(hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr) && 767 (vdev->rx_decap_type == 768 htt_cmn_pkt_type_ethernet))) { 769 eh = (struct ether_header *)qdf_nbuf_data(nbuf); 770 isBroadcast = (IEEE80211_IS_BROADCAST 771 (eh->ether_dhost)) ? 1 : 0 ; 772 if (isBroadcast) { 773 DP_STATS_INC_PKT(peer, rx.bcast, 1, 774 qdf_nbuf_len(nbuf)); 775 } 776 } 777 778 if (qdf_unlikely(vdev->rx_decap_type == htt_cmn_pkt_type_raw)) { 779 dp_rx_deliver_raw(vdev, nbuf, peer); 780 } else { 781 DP_STATS_INC(vdev->pdev, rx.to_stack.num, 1); 782 vdev->osif_rx(vdev->osif_vdev, nbuf); 783 } 784 785 return; 786 } 787 788 /** 789 * dp_rx_process_mic_error(): Function to pass mic error indication to umac 790 * @soc: DP SOC handle 791 * @rx_desc : pointer to the sw rx descriptor 792 * @head: pointer to head of rx descriptors to be added to free list 793 * @tail: pointer to tail of rx descriptors to be added to free list 794 * 795 * return: void 796 */ 797 void 798 dp_rx_process_mic_error(struct dp_soc *soc, 799 qdf_nbuf_t nbuf, 800 uint8_t *rx_tlv_hdr) 801 { 802 struct dp_vdev *vdev = NULL; 803 struct dp_pdev *pdev = NULL; 804 struct ol_if_ops *tops = NULL; 805 struct ieee80211_frame *wh; 806 uint8_t *rx_pkt_hdr; 807 struct dp_peer *peer; 808 uint16_t peer_id, rx_seq, fragno; 809 unsigned int tid; 810 QDF_STATUS status; 811 812 if (!hal_rx_msdu_end_first_msdu_get(rx_tlv_hdr)) 813 return; 814 815 rx_pkt_hdr = hal_rx_pkt_hdr_get(qdf_nbuf_data(nbuf)); 816 wh = (struct ieee80211_frame *)rx_pkt_hdr; 817 818 peer_id = hal_rx_mpdu_start_sw_peer_id_get(rx_tlv_hdr); 819 peer = dp_peer_find_by_id(soc, peer_id); 820 if (!peer) { 821 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 822 "peer not found"); 823 goto fail; 824 } 825 826 vdev = peer->vdev; 827 if (!vdev) { 828 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 829 "VDEV not found"); 830 goto fail; 831 } 832 833 pdev = vdev->pdev; 834 if (!pdev) { 835 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 836 "PDEV not found"); 837 goto fail; 838 } 839 840 tid = hal_rx_mpdu_start_tid_get(soc->hal_soc, qdf_nbuf_data(nbuf)); 841 rx_seq = (((*(uint16_t *)wh->i_seq) & 842 IEEE80211_SEQ_SEQ_MASK) >> 843 IEEE80211_SEQ_SEQ_SHIFT); 844 845 fragno = dp_rx_frag_get_mpdu_frag_number(qdf_nbuf_data(nbuf)); 846 847 /* Can get only last fragment */ 848 if (fragno) { 849 status = dp_rx_defrag_add_last_frag(soc, peer, 850 tid, rx_seq, nbuf); 851 852 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 853 "%s: Frag pkt seq# %d frag# %d consumed status %d !", 854 __func__, rx_seq, fragno, status); 855 return; 856 } 857 858 tops = pdev->soc->cdp_soc.ol_ops; 859 if (tops->rx_mic_error) 860 tops->rx_mic_error(pdev->ctrl_pdev, vdev->vdev_id, wh); 861 862 fail: 863 qdf_nbuf_free(nbuf); 864 return; 865 } 866 867 /** 868 * dp_rx_err_process() - Processes error frames routed to REO error ring 869 * 870 * @soc: core txrx main context 871 * @hal_ring: opaque pointer to the HAL Rx Error Ring, which will be serviced 872 * @quota: No. of units (packets) that can be serviced in one shot. 873 * 874 * This function implements error processing and top level demultiplexer 875 * for all the frames routed to REO error ring. 876 * 877 * Return: uint32_t: No. of elements processed 878 */ 879 uint32_t 880 dp_rx_err_process(struct dp_soc *soc, void *hal_ring, uint32_t quota) 881 { 882 void *hal_soc; 883 void *ring_desc; 884 union dp_rx_desc_list_elem_t *head = NULL; 885 union dp_rx_desc_list_elem_t *tail = NULL; 886 uint32_t rx_bufs_used = 0; 887 uint8_t buf_type; 888 uint8_t error, rbm; 889 struct hal_rx_mpdu_desc_info mpdu_desc_info; 890 struct hal_buf_info hbi; 891 struct dp_pdev *dp_pdev; 892 struct dp_srng *dp_rxdma_srng; 893 struct rx_desc_pool *rx_desc_pool; 894 uint32_t cookie = 0; 895 void *link_desc_va; 896 struct hal_rx_msdu_list msdu_list; /* MSDU's per MPDU */ 897 uint16_t num_msdus; 898 899 /* Debug -- Remove later */ 900 qdf_assert(soc && hal_ring); 901 902 hal_soc = soc->hal_soc; 903 904 /* Debug -- Remove later */ 905 qdf_assert(hal_soc); 906 907 if (qdf_unlikely(hal_srng_access_start(hal_soc, hal_ring))) { 908 909 /* TODO */ 910 /* 911 * Need API to convert from hal_ring pointer to 912 * Ring Type / Ring Id combo 913 */ 914 DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1); 915 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 916 FL("HAL RING Access Failed -- %pK"), hal_ring); 917 goto done; 918 } 919 920 while (qdf_likely(quota-- && (ring_desc = 921 hal_srng_dst_get_next(hal_soc, hal_ring)))) { 922 923 DP_STATS_INC(soc, rx.err_ring_pkts, 1); 924 925 error = HAL_RX_ERROR_STATUS_GET(ring_desc); 926 927 qdf_assert(error == HAL_REO_ERROR_DETECTED); 928 929 buf_type = HAL_RX_REO_BUF_TYPE_GET(ring_desc); 930 /* 931 * For REO error ring, expect only MSDU LINK DESC 932 */ 933 qdf_assert_always(buf_type == HAL_RX_REO_MSDU_LINK_DESC_TYPE); 934 935 cookie = HAL_RX_REO_BUF_COOKIE_GET(ring_desc); 936 /* 937 * check for the magic number in the sw cookie 938 */ 939 qdf_assert_always((cookie >> LINK_DESC_ID_SHIFT) & 940 LINK_DESC_ID_START); 941 942 /* 943 * Check if the buffer is to be processed on this processor 944 */ 945 rbm = hal_rx_ret_buf_manager_get(ring_desc); 946 947 hal_rx_reo_buf_paddr_get(ring_desc, &hbi); 948 link_desc_va = dp_rx_cookie_2_link_desc_va(soc, &hbi); 949 hal_rx_msdu_list_get(link_desc_va, &msdu_list, &num_msdus); 950 951 if (qdf_unlikely((msdu_list.rbm[0] != DP_WBM2SW_RBM) && 952 (msdu_list.rbm[0] != 953 HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST))) { 954 /* TODO */ 955 /* Call appropriate handler */ 956 DP_STATS_INC(soc, rx.err.invalid_rbm, 1); 957 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 958 FL("Invalid RBM %d"), msdu_list.rbm[0]); 959 960 /* Return link descriptor through WBM ring (SW2WBM)*/ 961 dp_rx_link_desc_return(soc, ring_desc, 962 HAL_BM_ACTION_RELEASE_MSDU_LIST); 963 continue; 964 } 965 966 /* Get the MPDU DESC info */ 967 hal_rx_mpdu_desc_info_get(ring_desc, &mpdu_desc_info); 968 969 if (mpdu_desc_info.mpdu_flags & HAL_MPDU_F_FRAGMENT) { 970 /* TODO */ 971 rx_bufs_used += dp_rx_frag_handle(soc, 972 ring_desc, &mpdu_desc_info, 973 &head, &tail, quota); 974 DP_STATS_INC(soc, rx.rx_frags, 1); 975 continue; 976 } 977 978 if (hal_rx_reo_is_pn_error(ring_desc)) { 979 /* TOD0 */ 980 DP_STATS_INC(soc, 981 rx.err. 982 reo_error[HAL_REO_ERR_PN_CHECK_FAILED], 983 1); 984 rx_bufs_used += dp_rx_pn_error_handle(soc, 985 ring_desc, &mpdu_desc_info, 986 &head, &tail, quota); 987 continue; 988 } 989 990 if (hal_rx_reo_is_2k_jump(ring_desc)) { 991 /* TOD0 */ 992 DP_STATS_INC(soc, 993 rx.err. 994 reo_error[HAL_REO_ERR_REGULAR_FRAME_2K_JUMP], 995 1); 996 rx_bufs_used += dp_rx_2k_jump_handle(soc, 997 ring_desc, &mpdu_desc_info, 998 &head, &tail, quota); 999 continue; 1000 } 1001 } 1002 1003 done: 1004 hal_srng_access_end(hal_soc, hal_ring); 1005 1006 if (soc->rx.flags.defrag_timeout_check) 1007 dp_rx_defrag_waitlist_flush(soc); 1008 1009 /* Assume MAC id = 0, owner = 0 */ 1010 if (rx_bufs_used) { 1011 dp_pdev = soc->pdev_list[0]; 1012 dp_rxdma_srng = &dp_pdev->rx_refill_buf_ring; 1013 rx_desc_pool = &soc->rx_desc_buf[0]; 1014 1015 dp_rx_buffers_replenish(soc, 0, dp_rxdma_srng, rx_desc_pool, 1016 rx_bufs_used, &head, &tail); 1017 } 1018 1019 return rx_bufs_used; /* Assume no scale factor for now */ 1020 } 1021 1022 /** 1023 * dp_rx_wbm_err_process() - Processes error frames routed to WBM release ring 1024 * 1025 * @soc: core txrx main context 1026 * @hal_ring: opaque pointer to the HAL Rx Error Ring, which will be serviced 1027 * @quota: No. of units (packets) that can be serviced in one shot. 1028 * 1029 * This function implements error processing and top level demultiplexer 1030 * for all the frames routed to WBM2HOST sw release ring. 1031 * 1032 * Return: uint32_t: No. of elements processed 1033 */ 1034 uint32_t 1035 dp_rx_wbm_err_process(struct dp_soc *soc, void *hal_ring, uint32_t quota) 1036 { 1037 void *hal_soc; 1038 void *ring_desc; 1039 struct dp_rx_desc *rx_desc; 1040 union dp_rx_desc_list_elem_t *head[MAX_PDEV_CNT] = { NULL }; 1041 union dp_rx_desc_list_elem_t *tail[MAX_PDEV_CNT] = { NULL }; 1042 uint32_t rx_bufs_used = 0; 1043 uint32_t rx_bufs_reaped[MAX_PDEV_CNT] = { 0 }; 1044 uint8_t buf_type, rbm; 1045 uint32_t rx_buf_cookie; 1046 uint8_t mac_id; 1047 struct dp_pdev *dp_pdev; 1048 struct dp_srng *dp_rxdma_srng; 1049 struct rx_desc_pool *rx_desc_pool; 1050 uint8_t *rx_tlv_hdr; 1051 qdf_nbuf_t nbuf_head = NULL; 1052 qdf_nbuf_t nbuf_tail = NULL; 1053 qdf_nbuf_t nbuf, next; 1054 struct hal_wbm_err_desc_info wbm_err_info = { 0 }; 1055 uint8_t pool_id; 1056 1057 /* Debug -- Remove later */ 1058 qdf_assert(soc && hal_ring); 1059 1060 hal_soc = soc->hal_soc; 1061 1062 /* Debug -- Remove later */ 1063 qdf_assert(hal_soc); 1064 1065 if (qdf_unlikely(hal_srng_access_start(hal_soc, hal_ring))) { 1066 1067 /* TODO */ 1068 /* 1069 * Need API to convert from hal_ring pointer to 1070 * Ring Type / Ring Id combo 1071 */ 1072 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1073 FL("HAL RING Access Failed -- %pK"), hal_ring); 1074 goto done; 1075 } 1076 1077 while (qdf_likely(quota-- && (ring_desc = 1078 hal_srng_dst_get_next(hal_soc, hal_ring)))) { 1079 1080 /* XXX */ 1081 buf_type = HAL_RX_WBM_BUF_TYPE_GET(ring_desc); 1082 1083 /* 1084 * For WBM ring, expect only MSDU buffers 1085 */ 1086 qdf_assert_always(buf_type == HAL_RX_WBM_BUF_TYPE_REL_BUF); 1087 1088 qdf_assert((HAL_RX_WBM_ERR_SRC_GET(ring_desc) 1089 == HAL_RX_WBM_ERR_SRC_RXDMA) || 1090 (HAL_RX_WBM_ERR_SRC_GET(ring_desc) 1091 == HAL_RX_WBM_ERR_SRC_REO)); 1092 1093 /* 1094 * Check if the buffer is to be processed on this processor 1095 */ 1096 rbm = hal_rx_ret_buf_manager_get(ring_desc); 1097 1098 if (qdf_unlikely(rbm != HAL_RX_BUF_RBM_SW3_BM)) { 1099 /* TODO */ 1100 /* Call appropriate handler */ 1101 DP_STATS_INC(soc, rx.err.invalid_rbm, 1); 1102 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1103 FL("Invalid RBM %d"), rbm); 1104 continue; 1105 } 1106 1107 rx_buf_cookie = HAL_RX_WBM_BUF_COOKIE_GET(ring_desc); 1108 1109 rx_desc = dp_rx_cookie_2_va_rxdma_buf(soc, rx_buf_cookie); 1110 qdf_assert(rx_desc); 1111 1112 if (!dp_rx_desc_check_magic(rx_desc)) { 1113 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1114 FL("Invalid rx_desc cookie=%d"), 1115 rx_buf_cookie); 1116 continue; 1117 } 1118 1119 nbuf = rx_desc->nbuf; 1120 qdf_nbuf_unmap_single(soc->osdev, nbuf, QDF_DMA_BIDIRECTIONAL); 1121 1122 /* 1123 * save the wbm desc info in nbuf TLV. We will need this 1124 * info when we do the actual nbuf processing 1125 */ 1126 hal_rx_wbm_err_info_get(ring_desc, &wbm_err_info); 1127 wbm_err_info.pool_id = rx_desc->pool_id; 1128 hal_rx_wbm_err_info_set_in_tlv(qdf_nbuf_data(nbuf), 1129 &wbm_err_info); 1130 1131 rx_bufs_reaped[rx_desc->pool_id]++; 1132 1133 DP_RX_LIST_APPEND(nbuf_head, nbuf_tail, rx_desc->nbuf); 1134 dp_rx_add_to_free_desc_list(&head[rx_desc->pool_id], 1135 &tail[rx_desc->pool_id], 1136 rx_desc); 1137 } 1138 done: 1139 hal_srng_access_end(hal_soc, hal_ring); 1140 1141 for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) { 1142 if (rx_bufs_reaped[mac_id]) { 1143 dp_pdev = soc->pdev_list[mac_id]; 1144 dp_rxdma_srng = &dp_pdev->rx_refill_buf_ring; 1145 rx_desc_pool = &soc->rx_desc_buf[mac_id]; 1146 1147 dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng, 1148 rx_desc_pool, rx_bufs_reaped[mac_id], 1149 &head[mac_id], &tail[mac_id]); 1150 rx_bufs_used += rx_bufs_reaped[mac_id]; 1151 } 1152 } 1153 1154 nbuf = nbuf_head; 1155 while (nbuf) { 1156 rx_tlv_hdr = qdf_nbuf_data(nbuf); 1157 /* 1158 * retrieve the wbm desc info from nbuf TLV, so we can 1159 * handle error cases appropriately 1160 */ 1161 hal_rx_wbm_err_info_get_from_tlv(rx_tlv_hdr, &wbm_err_info); 1162 1163 /* Set queue_mapping in nbuf to 0 */ 1164 dp_set_rx_queue(nbuf, 0); 1165 1166 next = nbuf->next; 1167 if (wbm_err_info.wbm_err_src == HAL_RX_WBM_ERR_SRC_REO) { 1168 if (wbm_err_info.reo_psh_rsn 1169 == HAL_RX_WBM_REO_PSH_RSN_ERROR) { 1170 1171 DP_STATS_INC(soc, 1172 rx.err.reo_error 1173 [wbm_err_info.reo_err_code], 1); 1174 1175 switch (wbm_err_info.reo_err_code) { 1176 /* 1177 * Handling for packets which have NULL REO 1178 * queue descriptor 1179 */ 1180 case HAL_REO_ERR_QUEUE_DESC_ADDR_0: 1181 pool_id = wbm_err_info.pool_id; 1182 QDF_TRACE_DEBUG_RL(QDF_MODULE_ID_DP, 1183 "Got pkt with REO ERROR: %d", 1184 wbm_err_info.reo_err_code); 1185 dp_rx_null_q_desc_handle(soc, 1186 nbuf, 1187 rx_tlv_hdr, 1188 pool_id); 1189 nbuf = next; 1190 continue; 1191 /* TODO */ 1192 /* Add per error code accounting */ 1193 1194 default: 1195 QDF_TRACE(QDF_MODULE_ID_DP, 1196 QDF_TRACE_LEVEL_DEBUG, 1197 "REO error %d detected", 1198 wbm_err_info.reo_err_code); 1199 } 1200 } 1201 } else if (wbm_err_info.wbm_err_src == 1202 HAL_RX_WBM_ERR_SRC_RXDMA) { 1203 if (wbm_err_info.rxdma_psh_rsn 1204 == HAL_RX_WBM_RXDMA_PSH_RSN_ERROR) { 1205 struct dp_peer *peer = NULL; 1206 uint16_t peer_id = 0xFFFF; 1207 1208 DP_STATS_INC(soc, 1209 rx.err.rxdma_error 1210 [wbm_err_info.rxdma_err_code], 1); 1211 peer_id = hal_rx_mpdu_start_sw_peer_id_get(rx_tlv_hdr); 1212 peer = dp_peer_find_by_id(soc, peer_id); 1213 1214 switch (wbm_err_info.rxdma_err_code) { 1215 case HAL_RXDMA_ERR_UNENCRYPTED: 1216 dp_rx_err_deliver(soc, 1217 nbuf, 1218 rx_tlv_hdr); 1219 nbuf = next; 1220 continue; 1221 1222 case HAL_RXDMA_ERR_TKIP_MIC: 1223 dp_rx_process_mic_error(soc, 1224 nbuf, 1225 rx_tlv_hdr); 1226 nbuf = next; 1227 if (peer) 1228 DP_STATS_INC(peer, rx.err.mic_err, 1); 1229 continue; 1230 1231 case HAL_RXDMA_ERR_DECRYPT: 1232 if (peer) 1233 DP_STATS_INC(peer, rx.err.decrypt_err, 1); 1234 QDF_TRACE(QDF_MODULE_ID_DP, 1235 QDF_TRACE_LEVEL_DEBUG, 1236 "Packet received with Decrypt error"); 1237 break; 1238 1239 default: 1240 QDF_TRACE(QDF_MODULE_ID_DP, 1241 QDF_TRACE_LEVEL_DEBUG, 1242 "RXDMA error %d", 1243 wbm_err_info. 1244 rxdma_err_code); 1245 } 1246 } 1247 } else { 1248 /* Should not come here */ 1249 qdf_assert(0); 1250 } 1251 1252 hal_rx_dump_pkt_tlvs(hal_soc, rx_tlv_hdr, 1253 QDF_TRACE_LEVEL_DEBUG); 1254 qdf_nbuf_free(nbuf); 1255 nbuf = next; 1256 } 1257 return rx_bufs_used; /* Assume no scale factor for now */ 1258 } 1259 1260 /** 1261 * dp_rx_err_mpdu_pop() - extract the MSDU's from link descs 1262 * 1263 * @soc: core DP main context 1264 * @mac_id: mac id which is one of 3 mac_ids 1265 * @rxdma_dst_ring_desc: void pointer to monitor link descriptor buf addr info 1266 * @head: head of descs list to be freed 1267 * @tail: tail of decs list to be freed 1268 1269 * Return: number of msdu in MPDU to be popped 1270 */ 1271 static inline uint32_t 1272 dp_rx_err_mpdu_pop(struct dp_soc *soc, uint32_t mac_id, 1273 void *rxdma_dst_ring_desc, 1274 union dp_rx_desc_list_elem_t **head, 1275 union dp_rx_desc_list_elem_t **tail) 1276 { 1277 void *rx_msdu_link_desc; 1278 qdf_nbuf_t msdu; 1279 qdf_nbuf_t last; 1280 struct hal_rx_msdu_list msdu_list; 1281 uint16_t num_msdus; 1282 struct hal_buf_info buf_info; 1283 void *p_buf_addr_info; 1284 void *p_last_buf_addr_info; 1285 uint32_t rx_bufs_used = 0; 1286 uint32_t msdu_cnt; 1287 uint32_t i; 1288 uint8_t push_reason; 1289 uint8_t rxdma_error_code = 0; 1290 uint8_t bm_action = HAL_BM_ACTION_PUT_IN_IDLE_LIST; 1291 struct dp_pdev *pdev = dp_get_pdev_for_mac_id(soc, mac_id); 1292 1293 msdu = 0; 1294 1295 last = NULL; 1296 1297 hal_rx_reo_ent_buf_paddr_get(rxdma_dst_ring_desc, &buf_info, 1298 &p_last_buf_addr_info, &msdu_cnt); 1299 1300 push_reason = 1301 hal_rx_reo_ent_rxdma_push_reason_get(rxdma_dst_ring_desc); 1302 if (push_reason == HAL_RX_WBM_RXDMA_PSH_RSN_ERROR) { 1303 rxdma_error_code = 1304 hal_rx_reo_ent_rxdma_error_code_get(rxdma_dst_ring_desc); 1305 } 1306 1307 do { 1308 rx_msdu_link_desc = 1309 dp_rx_cookie_2_link_desc_va(soc, &buf_info); 1310 1311 qdf_assert(rx_msdu_link_desc); 1312 1313 hal_rx_msdu_list_get(rx_msdu_link_desc, &msdu_list, &num_msdus); 1314 1315 if (msdu_list.sw_cookie[0] != HAL_RX_COOKIE_SPECIAL) { 1316 /* if the msdus belongs to NSS offloaded radio && 1317 * the rbm is not SW1_BM then return the msdu_link 1318 * descriptor without freeing the msdus (nbufs). let 1319 * these buffers be given to NSS completion ring for 1320 * NSS to free them. 1321 * else iterate through the msdu link desc list and 1322 * free each msdu in the list. 1323 */ 1324 if (msdu_list.rbm[0] != HAL_RX_BUF_RBM_SW3_BM && 1325 wlan_cfg_get_dp_pdev_nss_enabled( 1326 pdev->wlan_cfg_ctx)) 1327 bm_action = HAL_BM_ACTION_RELEASE_MSDU_LIST; 1328 else { 1329 for (i = 0; i < num_msdus; i++) { 1330 struct dp_rx_desc *rx_desc = 1331 dp_rx_cookie_2_va_rxdma_buf(soc, 1332 msdu_list.sw_cookie[i]); 1333 qdf_assert(rx_desc); 1334 msdu = rx_desc->nbuf; 1335 1336 qdf_nbuf_unmap_single(soc->osdev, msdu, 1337 QDF_DMA_FROM_DEVICE); 1338 1339 QDF_TRACE(QDF_MODULE_ID_DP, 1340 QDF_TRACE_LEVEL_DEBUG, 1341 "[%s][%d] msdu_nbuf=%pK ", 1342 __func__, __LINE__, msdu); 1343 1344 qdf_nbuf_free(msdu); 1345 rx_bufs_used++; 1346 dp_rx_add_to_free_desc_list(head, 1347 tail, rx_desc); 1348 } 1349 } 1350 } else { 1351 rxdma_error_code = HAL_RXDMA_ERR_WAR; 1352 } 1353 1354 hal_rx_mon_next_link_desc_get(rx_msdu_link_desc, &buf_info, 1355 &p_buf_addr_info); 1356 1357 dp_rx_link_desc_return(soc, p_last_buf_addr_info, bm_action); 1358 p_last_buf_addr_info = p_buf_addr_info; 1359 1360 } while (buf_info.paddr); 1361 1362 DP_STATS_INC(soc, rx.err.rxdma_error[rxdma_error_code], 1); 1363 1364 if (rxdma_error_code == HAL_RXDMA_ERR_DECRYPT) { 1365 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1366 "Packet received with Decrypt error"); 1367 } 1368 1369 return rx_bufs_used; 1370 } 1371 1372 /** 1373 * dp_rxdma_err_process() - RxDMA error processing functionality 1374 * 1375 * @soc: core txrx main contex 1376 * @mac_id: mac id which is one of 3 mac_ids 1377 * @hal_ring: opaque pointer to the HAL Rx Ring, which will be serviced 1378 * @quota: No. of units (packets) that can be serviced in one shot. 1379 1380 * Return: num of buffers processed 1381 */ 1382 uint32_t 1383 dp_rxdma_err_process(struct dp_soc *soc, uint32_t mac_id, uint32_t quota) 1384 { 1385 struct dp_pdev *pdev = dp_get_pdev_for_mac_id(soc, mac_id); 1386 int mac_for_pdev = dp_get_mac_id_for_mac(soc, mac_id); 1387 void *hal_soc; 1388 void *rxdma_dst_ring_desc; 1389 void *err_dst_srng; 1390 union dp_rx_desc_list_elem_t *head = NULL; 1391 union dp_rx_desc_list_elem_t *tail = NULL; 1392 struct dp_srng *dp_rxdma_srng; 1393 struct rx_desc_pool *rx_desc_pool; 1394 uint32_t work_done = 0; 1395 uint32_t rx_bufs_used = 0; 1396 1397 #ifdef DP_INTR_POLL_BASED 1398 if (!pdev) 1399 return 0; 1400 #endif 1401 err_dst_srng = pdev->rxdma_err_dst_ring[mac_for_pdev].hal_srng; 1402 1403 if (!err_dst_srng) { 1404 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1405 "%s %d : HAL Monitor Destination Ring Init \ 1406 Failed -- %pK", 1407 __func__, __LINE__, err_dst_srng); 1408 return 0; 1409 } 1410 1411 hal_soc = soc->hal_soc; 1412 1413 qdf_assert(hal_soc); 1414 1415 if (qdf_unlikely(hal_srng_access_start(hal_soc, err_dst_srng))) { 1416 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1417 "%s %d : HAL Monitor Destination Ring Init \ 1418 Failed -- %pK", 1419 __func__, __LINE__, err_dst_srng); 1420 return 0; 1421 } 1422 1423 while (qdf_likely(quota-- && (rxdma_dst_ring_desc = 1424 hal_srng_dst_get_next(hal_soc, err_dst_srng)))) { 1425 1426 rx_bufs_used += dp_rx_err_mpdu_pop(soc, mac_id, 1427 rxdma_dst_ring_desc, 1428 &head, &tail); 1429 } 1430 1431 hal_srng_access_end(hal_soc, err_dst_srng); 1432 1433 if (rx_bufs_used) { 1434 dp_rxdma_srng = &pdev->rx_refill_buf_ring; 1435 rx_desc_pool = &soc->rx_desc_buf[mac_id]; 1436 1437 dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng, 1438 rx_desc_pool, rx_bufs_used, &head, &tail); 1439 1440 work_done += rx_bufs_used; 1441 } 1442 1443 return work_done; 1444 } 1445