1 /* 2 * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for 5 * any purpose with or without fee is hereby granted, provided that the 6 * above copyright notice and this permission notice appear in all 7 * copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 16 * PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 #include "dp_types.h" 20 #include "dp_rx.h" 21 #include "dp_peer.h" 22 #include "dp_internal.h" 23 #include "hal_api.h" 24 #include "qdf_trace.h" 25 #include "qdf_nbuf.h" 26 #ifdef CONFIG_MCL 27 #include <cds_ieee80211_common.h> 28 #else 29 #include <linux/ieee80211.h> 30 #endif 31 #include "dp_rx_defrag.h" 32 #include <enet.h> /* LLC_SNAP_HDR_LEN */ 33 34 #ifdef RX_DESC_DEBUG_CHECK 35 static inline bool dp_rx_desc_check_magic(struct dp_rx_desc *rx_desc) 36 { 37 if (qdf_unlikely(rx_desc->magic != DP_RX_DESC_MAGIC)) { 38 return false; 39 } 40 rx_desc->magic = 0; 41 return true; 42 } 43 #else 44 static inline bool dp_rx_desc_check_magic(struct dp_rx_desc *rx_desc) 45 { 46 return true; 47 } 48 #endif 49 50 /** 51 * dp_rx_mcast_echo_check() - check if the mcast pkt is a loop 52 * back on same vap or a different vap. 53 * 54 * @soc: core DP main context 55 * @peer: dp peer handler 56 * @rx_tlv_hdr: start of the rx TLV header 57 * @nbuf: pkt buffer 58 * 59 * Return: bool (true if it is a looped back pkt else false) 60 * 61 */ 62 static inline bool dp_rx_mcast_echo_check(struct dp_soc *soc, 63 struct dp_peer *peer, 64 uint8_t *rx_tlv_hdr, 65 qdf_nbuf_t nbuf) 66 { 67 struct dp_vdev *vdev = peer->vdev; 68 struct dp_ast_entry *ase; 69 uint16_t sa_idx = 0; 70 uint8_t *data; 71 72 /* 73 * Multicast Echo Check is required only if vdev is STA and 74 * received pkt is a multicast/broadcast pkt. otherwise 75 * skip the MEC check. 76 */ 77 if (vdev->opmode != wlan_op_mode_sta) 78 return false; 79 80 if (!hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr)) 81 return false; 82 83 data = qdf_nbuf_data(nbuf); 84 /* 85 * if the received pkts src mac addr matches with vdev 86 * mac address then drop the pkt as it is looped back 87 */ 88 if (!(qdf_mem_cmp(&data[DP_MAC_ADDR_LEN], 89 vdev->mac_addr.raw, 90 DP_MAC_ADDR_LEN))) 91 return true; 92 93 /* 94 * In case of qwrap isolation mode, donot drop loopback packets. 95 * In isolation mode, all packets from the wired stations need to go 96 * to rootap and loop back to reach the wireless stations and 97 * vice-versa. 98 */ 99 if (qdf_unlikely(vdev->isolation_vdev)) 100 return false; 101 102 /* if the received pkts src mac addr matches with the 103 * wired PCs MAC addr which is behind the STA or with 104 * wireless STAs MAC addr which are behind the Repeater, 105 * then drop the pkt as it is looped back 106 */ 107 qdf_spin_lock_bh(&soc->ast_lock); 108 if (hal_rx_msdu_end_sa_is_valid_get(rx_tlv_hdr)) { 109 sa_idx = hal_rx_msdu_end_sa_idx_get(rx_tlv_hdr); 110 111 if ((sa_idx < 0) || 112 (sa_idx >= (WLAN_UMAC_PSOC_MAX_PEERS * 2))) { 113 qdf_spin_unlock_bh(&soc->ast_lock); 114 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 115 "invalid sa_idx: %d", sa_idx); 116 qdf_assert_always(0); 117 } 118 119 ase = soc->ast_table[sa_idx]; 120 if (!ase) { 121 /* We do not get a peer map event for STA and without 122 * this event we don't know what is STA's sa_idx. 123 * For this reason the AST is still not associated to 124 * any index postion in ast_table. 125 * In these kind of scenarios where sa is valid but 126 * ast is not in ast_table, we use the below API to get 127 * AST entry for STA's own mac_address. 128 */ 129 ase = dp_peer_ast_hash_find(soc, 130 &data[DP_MAC_ADDR_LEN]); 131 132 } 133 } else 134 ase = dp_peer_ast_hash_find(soc, &data[DP_MAC_ADDR_LEN]); 135 136 if (ase) { 137 ase->ast_idx = sa_idx; 138 soc->ast_table[sa_idx] = ase; 139 140 if (ase->pdev_id != vdev->pdev->pdev_id) { 141 qdf_spin_unlock_bh(&soc->ast_lock); 142 QDF_TRACE(QDF_MODULE_ID_DP, 143 QDF_TRACE_LEVEL_INFO, 144 "Detected DBDC Root AP %pM, %d %d", 145 &data[DP_MAC_ADDR_LEN], vdev->pdev->pdev_id, 146 ase->pdev_id); 147 return false; 148 } 149 150 if ((ase->type == CDP_TXRX_AST_TYPE_MEC) || 151 (ase->peer != peer)) { 152 qdf_spin_unlock_bh(&soc->ast_lock); 153 QDF_TRACE(QDF_MODULE_ID_DP, 154 QDF_TRACE_LEVEL_INFO, 155 "received pkt with same src mac %pM", 156 &data[DP_MAC_ADDR_LEN]); 157 158 return true; 159 } 160 } 161 qdf_spin_unlock_bh(&soc->ast_lock); 162 return false; 163 } 164 165 /** 166 * dp_rx_link_desc_return_by_addr - Return a MPDU link descriptor to 167 * (WBM) by address 168 * 169 * @soc: core DP main context 170 * @link_desc_addr: link descriptor addr 171 * 172 * Return: QDF_STATUS 173 */ 174 QDF_STATUS 175 dp_rx_link_desc_return_by_addr(struct dp_soc *soc, void *link_desc_addr, 176 uint8_t bm_action) 177 { 178 struct dp_srng *wbm_desc_rel_ring = &soc->wbm_desc_rel_ring; 179 void *wbm_rel_srng = wbm_desc_rel_ring->hal_srng; 180 void *hal_soc = soc->hal_soc; 181 QDF_STATUS status = QDF_STATUS_E_FAILURE; 182 void *src_srng_desc; 183 184 if (!wbm_rel_srng) { 185 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 186 "WBM RELEASE RING not initialized"); 187 return status; 188 } 189 190 if (qdf_unlikely(hal_srng_access_start(hal_soc, wbm_rel_srng))) { 191 192 /* TODO */ 193 /* 194 * Need API to convert from hal_ring pointer to 195 * Ring Type / Ring Id combo 196 */ 197 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 198 FL("HAL RING Access For WBM Release SRNG Failed - %pK"), 199 wbm_rel_srng); 200 DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1); 201 goto done; 202 } 203 src_srng_desc = hal_srng_src_get_next(hal_soc, wbm_rel_srng); 204 if (qdf_likely(src_srng_desc)) { 205 /* Return link descriptor through WBM ring (SW2WBM)*/ 206 hal_rx_msdu_link_desc_set(hal_soc, 207 src_srng_desc, link_desc_addr, bm_action); 208 status = QDF_STATUS_SUCCESS; 209 } else { 210 struct hal_srng *srng = (struct hal_srng *)wbm_rel_srng; 211 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 212 FL("WBM Release Ring (Id %d) Full"), srng->ring_id); 213 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 214 "HP 0x%x Reap HP 0x%x TP 0x%x Cached TP 0x%x", 215 *srng->u.src_ring.hp_addr, srng->u.src_ring.reap_hp, 216 *srng->u.src_ring.tp_addr, srng->u.src_ring.cached_tp); 217 } 218 done: 219 hal_srng_access_end(hal_soc, wbm_rel_srng); 220 return status; 221 222 } 223 224 /** 225 * dp_rx_link_desc_return() - Return a MPDU link descriptor to HW 226 * (WBM), following error handling 227 * 228 * @soc: core DP main context 229 * @ring_desc: opaque pointer to the REO error ring descriptor 230 * 231 * Return: QDF_STATUS 232 */ 233 QDF_STATUS 234 dp_rx_link_desc_return(struct dp_soc *soc, void *ring_desc, uint8_t bm_action) 235 { 236 void *buf_addr_info = HAL_RX_REO_BUF_ADDR_INFO_GET(ring_desc); 237 return dp_rx_link_desc_return_by_addr(soc, buf_addr_info, bm_action); 238 } 239 240 /** 241 * dp_rx_msdus_drop() - Drops all MSDU's per MPDU 242 * 243 * @soc: core txrx main context 244 * @ring_desc: opaque pointer to the REO error ring descriptor 245 * @mpdu_desc_info: MPDU descriptor information from ring descriptor 246 * @head: head of the local descriptor free-list 247 * @tail: tail of the local descriptor free-list 248 * @quota: No. of units (packets) that can be serviced in one shot. 249 * 250 * This function is used to drop all MSDU in an MPDU 251 * 252 * Return: uint32_t: No. of elements processed 253 */ 254 static uint32_t dp_rx_msdus_drop(struct dp_soc *soc, void *ring_desc, 255 struct hal_rx_mpdu_desc_info *mpdu_desc_info, 256 union dp_rx_desc_list_elem_t **head, 257 union dp_rx_desc_list_elem_t **tail, 258 uint32_t quota) 259 { 260 uint32_t rx_bufs_used = 0; 261 void *link_desc_va; 262 struct hal_buf_info buf_info; 263 struct hal_rx_msdu_list msdu_list; /* MSDU's per MPDU */ 264 int i; 265 uint8_t *rx_tlv_hdr; 266 uint32_t tid; 267 268 hal_rx_reo_buf_paddr_get(ring_desc, &buf_info); 269 270 link_desc_va = dp_rx_cookie_2_link_desc_va(soc, &buf_info); 271 272 /* No UNMAP required -- this is "malloc_consistent" memory */ 273 hal_rx_msdu_list_get(link_desc_va, &msdu_list, 274 &mpdu_desc_info->msdu_count); 275 276 for (i = 0; (i < mpdu_desc_info->msdu_count) && quota--; i++) { 277 struct dp_rx_desc *rx_desc = 278 dp_rx_cookie_2_va_rxdma_buf(soc, 279 msdu_list.sw_cookie[i]); 280 281 qdf_assert(rx_desc); 282 283 if (!dp_rx_desc_check_magic(rx_desc)) { 284 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 285 FL("Invalid rx_desc cookie=%d"), 286 msdu_list.sw_cookie[i]); 287 return rx_bufs_used; 288 } 289 290 rx_bufs_used++; 291 tid = hal_rx_mpdu_start_tid_get(rx_desc->rx_buf_start); 292 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 293 "Packet received with PN error for tid :%d", tid); 294 295 rx_tlv_hdr = qdf_nbuf_data(rx_desc->nbuf); 296 if (hal_rx_encryption_info_valid(rx_tlv_hdr)) 297 hal_rx_print_pn(rx_tlv_hdr); 298 299 /* Just free the buffers */ 300 qdf_nbuf_free(rx_desc->nbuf); 301 302 dp_rx_add_to_free_desc_list(head, tail, rx_desc); 303 } 304 305 /* Return link descriptor through WBM ring (SW2WBM)*/ 306 dp_rx_link_desc_return(soc, ring_desc, HAL_BM_ACTION_PUT_IN_IDLE_LIST); 307 308 return rx_bufs_used; 309 } 310 311 /** 312 * dp_rx_pn_error_handle() - Handles PN check errors 313 * 314 * @soc: core txrx main context 315 * @ring_desc: opaque pointer to the REO error ring descriptor 316 * @mpdu_desc_info: MPDU descriptor information from ring descriptor 317 * @head: head of the local descriptor free-list 318 * @tail: tail of the local descriptor free-list 319 * @quota: No. of units (packets) that can be serviced in one shot. 320 * 321 * This function implements PN error handling 322 * If the peer is configured to ignore the PN check errors 323 * or if DP feels, that this frame is still OK, the frame can be 324 * re-injected back to REO to use some of the other features 325 * of REO e.g. duplicate detection/routing to other cores 326 * 327 * Return: uint32_t: No. of elements processed 328 */ 329 static uint32_t 330 dp_rx_pn_error_handle(struct dp_soc *soc, void *ring_desc, 331 struct hal_rx_mpdu_desc_info *mpdu_desc_info, 332 union dp_rx_desc_list_elem_t **head, 333 union dp_rx_desc_list_elem_t **tail, 334 uint32_t quota) 335 { 336 uint16_t peer_id; 337 uint32_t rx_bufs_used = 0; 338 struct dp_peer *peer; 339 bool peer_pn_policy = false; 340 341 peer_id = DP_PEER_METADATA_PEER_ID_GET( 342 mpdu_desc_info->peer_meta_data); 343 344 345 peer = dp_peer_find_by_id(soc, peer_id); 346 347 if (qdf_likely(peer)) { 348 /* 349 * TODO: Check for peer specific policies & set peer_pn_policy 350 */ 351 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 352 "discard rx due to PN error for peer %pK " 353 "(%02x:%02x:%02x:%02x:%02x:%02x)\n", 354 peer, 355 peer->mac_addr.raw[0], peer->mac_addr.raw[1], 356 peer->mac_addr.raw[2], peer->mac_addr.raw[3], 357 peer->mac_addr.raw[4], peer->mac_addr.raw[5]); 358 359 } 360 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 361 "Packet received with PN error"); 362 363 /* No peer PN policy -- definitely drop */ 364 if (!peer_pn_policy) 365 rx_bufs_used = dp_rx_msdus_drop(soc, ring_desc, 366 mpdu_desc_info, 367 head, tail, quota); 368 369 return rx_bufs_used; 370 } 371 372 /** 373 * dp_rx_2k_jump_handle() - Handles Sequence Number Jump by 2K 374 * 375 * @soc: core txrx main context 376 * @ring_desc: opaque pointer to the REO error ring descriptor 377 * @mpdu_desc_info: MPDU descriptor information from ring descriptor 378 * @head: head of the local descriptor free-list 379 * @tail: tail of the local descriptor free-list 380 * @quota: No. of units (packets) that can be serviced in one shot. 381 * 382 * This function implements the error handling when sequence number 383 * of the MPDU jumps suddenly by 2K.Today there are 2 cases that 384 * need to be handled: 385 * A) CSN (Current Sequence Number) = Last Valid SN (LSN) + 2K 386 * B) CSN = LSN + 2K, but falls within a "BA sized window" of the SSN 387 * For case A) the protocol stack is invoked to generate DELBA/DEAUTH frame 388 * For case B), the frame is normally dropped, no more action is taken 389 * 390 * Return: uint32_t: No. of elements processed 391 */ 392 static uint32_t 393 dp_rx_2k_jump_handle(struct dp_soc *soc, void *ring_desc, 394 struct hal_rx_mpdu_desc_info *mpdu_desc_info, 395 union dp_rx_desc_list_elem_t **head, 396 union dp_rx_desc_list_elem_t **tail, 397 uint32_t quota) 398 { 399 return dp_rx_msdus_drop(soc, ring_desc, mpdu_desc_info, 400 head, tail, quota); 401 } 402 403 /** 404 * dp_rx_chain_msdus() - Function to chain all msdus of a mpdu 405 * to pdev invalid peer list 406 * 407 * @soc: core DP main context 408 * @nbuf: Buffer pointer 409 * @rx_tlv_hdr: start of rx tlv header 410 * @mac_id: mac id 411 * 412 * Return: bool: true for last msdu of mpdu 413 */ 414 static bool 415 dp_rx_chain_msdus(struct dp_soc *soc, qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr, 416 uint8_t mac_id) 417 { 418 bool mpdu_done = false; 419 qdf_nbuf_t curr_nbuf = NULL; 420 qdf_nbuf_t tmp_nbuf = NULL; 421 422 /* TODO: Currently only single radio is supported, hence 423 * pdev hard coded to '0' index 424 */ 425 struct dp_pdev *dp_pdev = soc->pdev_list[mac_id]; 426 427 if (!dp_pdev->first_nbuf) { 428 qdf_nbuf_set_rx_chfrag_start(nbuf, 1); 429 dp_pdev->ppdu_id = HAL_RX_HW_DESC_GET_PPDUID_GET(rx_tlv_hdr); 430 dp_pdev->first_nbuf = true; 431 432 /* If the new nbuf received is the first msdu of the 433 * amsdu and there are msdus in the invalid peer msdu 434 * list, then let us free all the msdus of the invalid 435 * peer msdu list. 436 * This scenario can happen when we start receiving 437 * new a-msdu even before the previous a-msdu is completely 438 * received. 439 */ 440 curr_nbuf = dp_pdev->invalid_peer_head_msdu; 441 while (curr_nbuf) { 442 tmp_nbuf = curr_nbuf->next; 443 qdf_nbuf_free(curr_nbuf); 444 curr_nbuf = tmp_nbuf; 445 } 446 447 dp_pdev->invalid_peer_head_msdu = NULL; 448 dp_pdev->invalid_peer_tail_msdu = NULL; 449 450 hal_rx_mon_hw_desc_get_mpdu_status(rx_tlv_hdr, 451 &(dp_pdev->ppdu_info.rx_status)); 452 453 } 454 455 if (dp_pdev->ppdu_id == hal_rx_attn_phy_ppdu_id_get(rx_tlv_hdr) && 456 hal_rx_attn_msdu_done_get(rx_tlv_hdr)) { 457 qdf_nbuf_set_rx_chfrag_end(nbuf, 1); 458 qdf_assert_always(dp_pdev->first_nbuf == true); 459 dp_pdev->first_nbuf = false; 460 mpdu_done = true; 461 } 462 463 DP_RX_LIST_APPEND(dp_pdev->invalid_peer_head_msdu, 464 dp_pdev->invalid_peer_tail_msdu, 465 nbuf); 466 467 return mpdu_done; 468 } 469 470 /** 471 * dp_rx_null_q_desc_handle() - Function to handle NULL Queue 472 * descriptor violation on either a 473 * REO or WBM ring 474 * 475 * @soc: core DP main context 476 * @nbuf: buffer pointer 477 * @rx_tlv_hdr: start of rx tlv header 478 * @pool_id: mac id 479 * 480 * This function handles NULL queue descriptor violations arising out 481 * a missing REO queue for a given peer or a given TID. This typically 482 * may happen if a packet is received on a QOS enabled TID before the 483 * ADDBA negotiation for that TID, when the TID queue is setup. Or 484 * it may also happen for MC/BC frames if they are not routed to the 485 * non-QOS TID queue, in the absence of any other default TID queue. 486 * This error can show up both in a REO destination or WBM release ring. 487 * 488 */ 489 static void 490 dp_rx_null_q_desc_handle(struct dp_soc *soc, 491 qdf_nbuf_t nbuf, 492 uint8_t *rx_tlv_hdr, 493 uint8_t pool_id) 494 { 495 uint32_t pkt_len, l2_hdr_offset; 496 uint16_t msdu_len; 497 struct dp_vdev *vdev; 498 uint16_t peer_id = 0xFFFF; 499 struct dp_peer *peer = NULL; 500 uint8_t tid; 501 502 qdf_nbuf_set_rx_chfrag_start(nbuf, 503 hal_rx_msdu_end_first_msdu_get(rx_tlv_hdr)); 504 qdf_nbuf_set_rx_chfrag_end(nbuf, 505 hal_rx_msdu_end_last_msdu_get(rx_tlv_hdr)); 506 507 l2_hdr_offset = hal_rx_msdu_end_l3_hdr_padding_get(rx_tlv_hdr); 508 msdu_len = hal_rx_msdu_start_msdu_len_get(rx_tlv_hdr); 509 pkt_len = msdu_len + l2_hdr_offset + RX_PKT_TLVS_LEN; 510 511 /* Set length in nbuf */ 512 qdf_nbuf_set_pktlen(nbuf, pkt_len); 513 514 /* 515 * Check if DMA completed -- msdu_done is the last bit 516 * to be written 517 */ 518 if (!hal_rx_attn_msdu_done_get(rx_tlv_hdr)) { 519 520 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 521 FL("MSDU DONE failure")); 522 523 hal_rx_dump_pkt_tlvs(rx_tlv_hdr, QDF_TRACE_LEVEL_INFO); 524 qdf_assert(0); 525 } 526 527 peer_id = hal_rx_mpdu_start_sw_peer_id_get(rx_tlv_hdr); 528 peer = dp_peer_find_by_id(soc, peer_id); 529 530 if (!peer) { 531 bool mpdu_done = false; 532 struct dp_pdev *pdev = soc->pdev_list[pool_id]; 533 534 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 535 FL("peer is NULL")); 536 537 mpdu_done = dp_rx_chain_msdus(soc, nbuf, rx_tlv_hdr, pool_id); 538 /* Trigger invalid peer handler wrapper */ 539 dp_rx_process_invalid_peer_wrapper(soc, nbuf, mpdu_done); 540 541 if (mpdu_done) { 542 pdev->invalid_peer_head_msdu = NULL; 543 pdev->invalid_peer_tail_msdu = NULL; 544 } 545 return; 546 } 547 548 vdev = peer->vdev; 549 if (!vdev) { 550 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 551 FL("INVALID vdev %pK OR osif_rx"), vdev); 552 /* Drop & free packet */ 553 qdf_nbuf_free(nbuf); 554 DP_STATS_INC(soc, rx.err.invalid_vdev, 1); 555 return; 556 } 557 558 /* 559 * Advance the packet start pointer by total size of 560 * pre-header TLV's 561 */ 562 qdf_nbuf_pull_head(nbuf, (l2_hdr_offset + RX_PKT_TLVS_LEN)); 563 564 if (dp_rx_mcast_echo_check(soc, peer, rx_tlv_hdr, nbuf)) { 565 /* this is a looped back MCBC pkt, drop it */ 566 qdf_nbuf_free(nbuf); 567 return; 568 } 569 /* 570 * In qwrap mode if the received packet matches with any of the vdev 571 * mac addresses, drop it. Donot receive multicast packets originated 572 * from any proxysta. 573 */ 574 if (check_qwrap_multicast_loopback(vdev, nbuf)) { 575 qdf_nbuf_free(nbuf); 576 return; 577 } 578 579 580 if (qdf_unlikely((peer->nawds_enabled == true) && 581 hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr))) { 582 QDF_TRACE(QDF_MODULE_ID_DP, 583 QDF_TRACE_LEVEL_DEBUG, 584 "%s free buffer for multicast packet", 585 __func__); 586 DP_STATS_INC(peer, rx.nawds_mcast_drop, 1); 587 qdf_nbuf_free(nbuf); 588 return; 589 } 590 591 if (!dp_wds_rx_policy_check(rx_tlv_hdr, vdev, peer, 592 hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr))) { 593 QDF_TRACE(QDF_MODULE_ID_DP, 594 QDF_TRACE_LEVEL_ERROR, 595 FL("mcast Policy Check Drop pkt")); 596 /* Drop & free packet */ 597 qdf_nbuf_free(nbuf); 598 return; 599 } 600 601 /* WDS Source Port Learning */ 602 if (qdf_likely(vdev->rx_decap_type == htt_cmn_pkt_type_ethernet && 603 vdev->wds_enabled)) 604 dp_rx_wds_srcport_learn(soc, rx_tlv_hdr, peer, nbuf); 605 606 if (hal_rx_mpdu_start_mpdu_qos_control_valid_get(rx_tlv_hdr)) { 607 /* TODO: Assuming that qos_control_valid also indicates 608 * unicast. Should we check this? 609 */ 610 tid = hal_rx_mpdu_start_tid_get(rx_tlv_hdr); 611 if (peer && 612 peer->rx_tid[tid].hw_qdesc_vaddr_unaligned == NULL) { 613 /* IEEE80211_SEQ_MAX indicates invalid start_seq */ 614 dp_rx_tid_setup_wifi3(peer, tid, 1, IEEE80211_SEQ_MAX); 615 } 616 } 617 618 #ifdef QCA_WIFI_NAPIER_EMULATION /* Debug code, remove later */ 619 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 620 "%s: p_id %d msdu_len %d hdr_off %d", 621 __func__, peer_id, msdu_len, l2_hdr_offset); 622 623 print_hex_dump(KERN_ERR, "\t Pkt Data:", DUMP_PREFIX_NONE, 32, 4, 624 qdf_nbuf_data(nbuf), 128, false); 625 #endif /* NAPIER_EMULATION */ 626 627 if (qdf_unlikely(vdev->rx_decap_type == htt_cmn_pkt_type_raw)) { 628 qdf_nbuf_set_next(nbuf, NULL); 629 dp_rx_deliver_raw(vdev, nbuf, peer); 630 } else { 631 if (qdf_unlikely(peer->bss_peer)) { 632 QDF_TRACE(QDF_MODULE_ID_DP, 633 QDF_TRACE_LEVEL_INFO, 634 FL("received pkt with same src MAC")); 635 /* Drop & free packet */ 636 qdf_nbuf_free(nbuf); 637 return; 638 } 639 640 if (vdev->osif_rx) { 641 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, 642 FL("vdev %pK osif_rx %pK"), vdev, 643 vdev->osif_rx); 644 qdf_nbuf_set_next(nbuf, NULL); 645 vdev->osif_rx(vdev->osif_vdev, nbuf); 646 DP_STATS_INCC_PKT(vdev->pdev, rx.multicast, 1, 647 qdf_nbuf_len(nbuf), 648 hal_rx_msdu_end_da_is_mcbc_get( 649 rx_tlv_hdr)); 650 DP_STATS_INC_PKT(vdev->pdev, rx.to_stack, 1, 651 qdf_nbuf_len(nbuf)); 652 } else { 653 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 654 FL("INVALID vdev %pK OR osif_rx"), vdev); 655 DP_STATS_INC(soc, rx.err.invalid_vdev, 1); 656 } 657 } 658 return; 659 } 660 661 /** 662 * dp_rx_err_deliver() - Function to deliver error frames to OS 663 * 664 * @soc: core DP main context 665 * @rx_desc : pointer to the sw rx descriptor 666 * @head: pointer to head of rx descriptors to be added to free list 667 * @tail: pointer to tail of rx descriptors to be added to free list 668 * quota: upper limit of descriptors that can be reaped 669 * 670 * Return: uint32_t: No. of Rx buffers reaped 671 */ 672 static void 673 dp_rx_err_deliver(struct dp_soc *soc, qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr) 674 { 675 uint32_t pkt_len, l2_hdr_offset; 676 uint16_t msdu_len; 677 struct dp_vdev *vdev; 678 uint16_t peer_id = 0xFFFF; 679 struct dp_peer *peer = NULL; 680 struct ether_header *eh; 681 bool isBroadcast; 682 683 /* 684 * Check if DMA completed -- msdu_done is the last bit 685 * to be written 686 */ 687 if (!hal_rx_attn_msdu_done_get(rx_tlv_hdr)) { 688 689 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 690 FL("MSDU DONE failure")); 691 692 hal_rx_dump_pkt_tlvs(rx_tlv_hdr, QDF_TRACE_LEVEL_INFO); 693 qdf_assert(0); 694 } 695 696 peer_id = hal_rx_mpdu_start_sw_peer_id_get(rx_tlv_hdr); 697 peer = dp_peer_find_by_id(soc, peer_id); 698 699 l2_hdr_offset = hal_rx_msdu_end_l3_hdr_padding_get(rx_tlv_hdr); 700 msdu_len = hal_rx_msdu_start_msdu_len_get(rx_tlv_hdr); 701 pkt_len = msdu_len + l2_hdr_offset + RX_PKT_TLVS_LEN; 702 703 /* Set length in nbuf */ 704 qdf_nbuf_set_pktlen(nbuf, pkt_len); 705 706 qdf_nbuf_set_next(nbuf, NULL); 707 708 qdf_nbuf_set_rx_chfrag_start(nbuf, 1); 709 qdf_nbuf_set_rx_chfrag_end(nbuf, 1); 710 711 if (!peer) { 712 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 713 FL("peer is NULL")); 714 DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1, 715 qdf_nbuf_len(nbuf)); 716 /* Trigger invalid peer handler wrapper */ 717 dp_rx_process_invalid_peer_wrapper(soc, nbuf, true); 718 return; 719 } 720 721 vdev = peer->vdev; 722 if (!vdev) { 723 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 724 FL("INVALID vdev %pK OR osif_rx"), vdev); 725 /* Drop & free packet */ 726 qdf_nbuf_free(nbuf); 727 DP_STATS_INC(soc, rx.err.invalid_vdev, 1); 728 return; 729 } 730 731 /* Drop & free packet if mesh mode not enabled */ 732 if (!vdev->mesh_vdev) { 733 qdf_nbuf_free(nbuf); 734 DP_STATS_INC(soc, rx.err.invalid_vdev, 1); 735 return; 736 } 737 738 /* 739 * Advance the packet start pointer by total size of 740 * pre-header TLV's 741 */ 742 qdf_nbuf_pull_head(nbuf, (l2_hdr_offset + RX_PKT_TLVS_LEN)); 743 744 if (dp_rx_filter_mesh_packets(vdev, nbuf, rx_tlv_hdr) 745 == QDF_STATUS_SUCCESS) { 746 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_MED, 747 FL("mesh pkt filtered")); 748 DP_STATS_INC(vdev->pdev, dropped.mesh_filter, 1); 749 750 qdf_nbuf_free(nbuf); 751 return; 752 753 } 754 dp_rx_fill_mesh_stats(vdev, nbuf, rx_tlv_hdr, peer); 755 756 if (qdf_unlikely(hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr) && 757 (vdev->rx_decap_type == 758 htt_cmn_pkt_type_ethernet))) { 759 eh = (struct ether_header *)qdf_nbuf_data(nbuf); 760 isBroadcast = (IEEE80211_IS_BROADCAST 761 (eh->ether_dhost)) ? 1 : 0 ; 762 if (isBroadcast) { 763 DP_STATS_INC_PKT(peer, rx.bcast, 1, 764 qdf_nbuf_len(nbuf)); 765 } 766 } 767 768 if (qdf_unlikely(vdev->rx_decap_type == htt_cmn_pkt_type_raw)) { 769 dp_rx_deliver_raw(vdev, nbuf, peer); 770 } else { 771 DP_STATS_INC(vdev->pdev, rx.to_stack.num, 1); 772 vdev->osif_rx(vdev->osif_vdev, nbuf); 773 } 774 775 return; 776 } 777 778 /** 779 * dp_rx_process_mic_error(): Function to pass mic error indication to umac 780 * @soc: DP SOC handle 781 * @rx_desc : pointer to the sw rx descriptor 782 * @head: pointer to head of rx descriptors to be added to free list 783 * @tail: pointer to tail of rx descriptors to be added to free list 784 * 785 * return: void 786 */ 787 void 788 dp_rx_process_mic_error(struct dp_soc *soc, 789 qdf_nbuf_t nbuf, 790 uint8_t *rx_tlv_hdr) 791 { 792 struct dp_vdev *vdev = NULL; 793 struct dp_pdev *pdev = NULL; 794 struct ol_if_ops *tops = NULL; 795 struct ieee80211_frame *wh; 796 uint8_t *rx_pkt_hdr; 797 struct dp_peer *peer; 798 uint16_t peer_id; 799 800 if (!hal_rx_msdu_end_first_msdu_get(rx_tlv_hdr)) 801 return; 802 803 rx_pkt_hdr = hal_rx_pkt_hdr_get(qdf_nbuf_data(nbuf)); 804 wh = (struct ieee80211_frame *)rx_pkt_hdr; 805 806 peer_id = hal_rx_mpdu_start_sw_peer_id_get(rx_tlv_hdr); 807 peer = dp_peer_find_by_id(soc, peer_id); 808 if (!peer) { 809 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 810 "peer not found"); 811 goto fail; 812 } 813 814 vdev = peer->vdev; 815 if (!vdev) { 816 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 817 "VDEV not found"); 818 goto fail; 819 } 820 821 pdev = vdev->pdev; 822 if (!pdev) { 823 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 824 "PDEV not found"); 825 goto fail; 826 } 827 828 tops = pdev->soc->cdp_soc.ol_ops; 829 if (tops->rx_mic_error) 830 tops->rx_mic_error(pdev->osif_pdev, vdev->vdev_id, wh); 831 832 fail: 833 qdf_nbuf_free(nbuf); 834 return; 835 } 836 837 /** 838 * dp_rx_err_process() - Processes error frames routed to REO error ring 839 * 840 * @soc: core txrx main context 841 * @hal_ring: opaque pointer to the HAL Rx Error Ring, which will be serviced 842 * @quota: No. of units (packets) that can be serviced in one shot. 843 * 844 * This function implements error processing and top level demultiplexer 845 * for all the frames routed to REO error ring. 846 * 847 * Return: uint32_t: No. of elements processed 848 */ 849 uint32_t 850 dp_rx_err_process(struct dp_soc *soc, void *hal_ring, uint32_t quota) 851 { 852 void *hal_soc; 853 void *ring_desc; 854 union dp_rx_desc_list_elem_t *head = NULL; 855 union dp_rx_desc_list_elem_t *tail = NULL; 856 uint32_t rx_bufs_used = 0; 857 uint8_t buf_type; 858 uint8_t error, rbm; 859 struct hal_rx_mpdu_desc_info mpdu_desc_info; 860 struct hal_buf_info hbi; 861 struct dp_pdev *dp_pdev; 862 struct dp_srng *dp_rxdma_srng; 863 struct rx_desc_pool *rx_desc_pool; 864 uint32_t cookie = 0; 865 void *link_desc_va; 866 struct hal_rx_msdu_list msdu_list; /* MSDU's per MPDU */ 867 uint16_t num_msdus; 868 869 /* Debug -- Remove later */ 870 qdf_assert(soc && hal_ring); 871 872 hal_soc = soc->hal_soc; 873 874 /* Debug -- Remove later */ 875 qdf_assert(hal_soc); 876 877 if (qdf_unlikely(hal_srng_access_start(hal_soc, hal_ring))) { 878 879 /* TODO */ 880 /* 881 * Need API to convert from hal_ring pointer to 882 * Ring Type / Ring Id combo 883 */ 884 DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1); 885 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 886 FL("HAL RING Access Failed -- %pK"), hal_ring); 887 goto done; 888 } 889 890 while (qdf_likely(quota-- && (ring_desc = 891 hal_srng_dst_get_next(hal_soc, hal_ring)))) { 892 893 DP_STATS_INC(soc, rx.err_ring_pkts, 1); 894 895 error = HAL_RX_ERROR_STATUS_GET(ring_desc); 896 897 qdf_assert(error == HAL_REO_ERROR_DETECTED); 898 899 buf_type = HAL_RX_REO_BUF_TYPE_GET(ring_desc); 900 /* 901 * For REO error ring, expect only MSDU LINK DESC 902 */ 903 qdf_assert_always(buf_type == HAL_RX_REO_MSDU_LINK_DESC_TYPE); 904 905 cookie = HAL_RX_REO_BUF_COOKIE_GET(ring_desc); 906 /* 907 * check for the magic number in the sw cookie 908 */ 909 qdf_assert_always((cookie >> LINK_DESC_ID_SHIFT) & 910 LINK_DESC_ID_START); 911 912 /* 913 * Check if the buffer is to be processed on this processor 914 */ 915 rbm = hal_rx_ret_buf_manager_get(ring_desc); 916 917 hal_rx_reo_buf_paddr_get(ring_desc, &hbi); 918 link_desc_va = dp_rx_cookie_2_link_desc_va(soc, &hbi); 919 hal_rx_msdu_list_get(link_desc_va, &msdu_list, &num_msdus); 920 921 if (qdf_unlikely((msdu_list.rbm[0] != DP_WBM2SW_RBM) && 922 (msdu_list.rbm[0] != 923 HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST))) { 924 /* TODO */ 925 /* Call appropriate handler */ 926 DP_STATS_INC(soc, rx.err.invalid_rbm, 1); 927 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 928 FL("Invalid RBM %d"), msdu_list.rbm[0]); 929 930 /* Return link descriptor through WBM ring (SW2WBM)*/ 931 dp_rx_link_desc_return(soc, ring_desc, 932 HAL_BM_ACTION_RELEASE_MSDU_LIST); 933 continue; 934 } 935 936 /* Get the MPDU DESC info */ 937 hal_rx_mpdu_desc_info_get(ring_desc, &mpdu_desc_info); 938 939 if (mpdu_desc_info.mpdu_flags & HAL_MPDU_F_FRAGMENT) { 940 /* TODO */ 941 rx_bufs_used += dp_rx_frag_handle(soc, 942 ring_desc, &mpdu_desc_info, 943 &head, &tail, quota); 944 DP_STATS_INC(soc, rx.rx_frags, 1); 945 continue; 946 } 947 948 if (hal_rx_reo_is_pn_error(ring_desc)) { 949 /* TOD0 */ 950 DP_STATS_INC(soc, 951 rx.err. 952 reo_error[HAL_REO_ERR_PN_CHECK_FAILED], 953 1); 954 rx_bufs_used += dp_rx_pn_error_handle(soc, 955 ring_desc, &mpdu_desc_info, 956 &head, &tail, quota); 957 continue; 958 } 959 960 if (hal_rx_reo_is_2k_jump(ring_desc)) { 961 /* TOD0 */ 962 DP_STATS_INC(soc, 963 rx.err. 964 reo_error[HAL_REO_ERR_REGULAR_FRAME_2K_JUMP], 965 1); 966 rx_bufs_used += dp_rx_2k_jump_handle(soc, 967 ring_desc, &mpdu_desc_info, 968 &head, &tail, quota); 969 continue; 970 } 971 } 972 973 done: 974 hal_srng_access_end(hal_soc, hal_ring); 975 976 if (soc->rx.flags.defrag_timeout_check) 977 dp_rx_defrag_waitlist_flush(soc); 978 979 /* Assume MAC id = 0, owner = 0 */ 980 if (rx_bufs_used) { 981 dp_pdev = soc->pdev_list[0]; 982 dp_rxdma_srng = &dp_pdev->rx_refill_buf_ring; 983 rx_desc_pool = &soc->rx_desc_buf[0]; 984 985 dp_rx_buffers_replenish(soc, 0, dp_rxdma_srng, rx_desc_pool, 986 rx_bufs_used, &head, &tail); 987 } 988 989 return rx_bufs_used; /* Assume no scale factor for now */ 990 } 991 992 /** 993 * dp_rx_wbm_err_process() - Processes error frames routed to WBM release ring 994 * 995 * @soc: core txrx main context 996 * @hal_ring: opaque pointer to the HAL Rx Error Ring, which will be serviced 997 * @quota: No. of units (packets) that can be serviced in one shot. 998 * 999 * This function implements error processing and top level demultiplexer 1000 * for all the frames routed to WBM2HOST sw release ring. 1001 * 1002 * Return: uint32_t: No. of elements processed 1003 */ 1004 uint32_t 1005 dp_rx_wbm_err_process(struct dp_soc *soc, void *hal_ring, uint32_t quota) 1006 { 1007 void *hal_soc; 1008 void *ring_desc; 1009 struct dp_rx_desc *rx_desc; 1010 union dp_rx_desc_list_elem_t *head[MAX_PDEV_CNT] = { NULL }; 1011 union dp_rx_desc_list_elem_t *tail[MAX_PDEV_CNT] = { NULL }; 1012 uint32_t rx_bufs_used = 0; 1013 uint32_t rx_bufs_reaped[MAX_PDEV_CNT] = { 0 }; 1014 uint8_t buf_type, rbm; 1015 uint32_t rx_buf_cookie; 1016 uint8_t mac_id; 1017 struct dp_pdev *dp_pdev; 1018 struct dp_srng *dp_rxdma_srng; 1019 struct rx_desc_pool *rx_desc_pool; 1020 uint8_t *rx_tlv_hdr; 1021 qdf_nbuf_t nbuf_head = NULL; 1022 qdf_nbuf_t nbuf_tail = NULL; 1023 qdf_nbuf_t nbuf, next; 1024 struct hal_wbm_err_desc_info wbm_err_info = { 0 }; 1025 uint8_t pool_id; 1026 1027 /* Debug -- Remove later */ 1028 qdf_assert(soc && hal_ring); 1029 1030 hal_soc = soc->hal_soc; 1031 1032 /* Debug -- Remove later */ 1033 qdf_assert(hal_soc); 1034 1035 if (qdf_unlikely(hal_srng_access_start(hal_soc, hal_ring))) { 1036 1037 /* TODO */ 1038 /* 1039 * Need API to convert from hal_ring pointer to 1040 * Ring Type / Ring Id combo 1041 */ 1042 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1043 FL("HAL RING Access Failed -- %pK"), hal_ring); 1044 goto done; 1045 } 1046 1047 while (qdf_likely(quota-- && (ring_desc = 1048 hal_srng_dst_get_next(hal_soc, hal_ring)))) { 1049 1050 /* XXX */ 1051 buf_type = HAL_RX_WBM_BUF_TYPE_GET(ring_desc); 1052 1053 /* 1054 * For WBM ring, expect only MSDU buffers 1055 */ 1056 qdf_assert_always(buf_type == HAL_RX_WBM_BUF_TYPE_REL_BUF); 1057 1058 qdf_assert((HAL_RX_WBM_ERR_SRC_GET(ring_desc) 1059 == HAL_RX_WBM_ERR_SRC_RXDMA) || 1060 (HAL_RX_WBM_ERR_SRC_GET(ring_desc) 1061 == HAL_RX_WBM_ERR_SRC_REO)); 1062 1063 /* 1064 * Check if the buffer is to be processed on this processor 1065 */ 1066 rbm = hal_rx_ret_buf_manager_get(ring_desc); 1067 1068 if (qdf_unlikely(rbm != HAL_RX_BUF_RBM_SW3_BM)) { 1069 /* TODO */ 1070 /* Call appropriate handler */ 1071 DP_STATS_INC(soc, rx.err.invalid_rbm, 1); 1072 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1073 FL("Invalid RBM %d"), rbm); 1074 continue; 1075 } 1076 1077 rx_buf_cookie = HAL_RX_WBM_BUF_COOKIE_GET(ring_desc); 1078 1079 rx_desc = dp_rx_cookie_2_va_rxdma_buf(soc, rx_buf_cookie); 1080 qdf_assert(rx_desc); 1081 1082 if (!dp_rx_desc_check_magic(rx_desc)) { 1083 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1084 FL("Invalid rx_desc cookie=%d"), 1085 rx_buf_cookie); 1086 continue; 1087 } 1088 1089 nbuf = rx_desc->nbuf; 1090 qdf_nbuf_unmap_single(soc->osdev, nbuf, QDF_DMA_BIDIRECTIONAL); 1091 1092 /* 1093 * save the wbm desc info in nbuf TLV. We will need this 1094 * info when we do the actual nbuf processing 1095 */ 1096 hal_rx_wbm_err_info_get(ring_desc, &wbm_err_info); 1097 wbm_err_info.pool_id = rx_desc->pool_id; 1098 hal_rx_wbm_err_info_set_in_tlv(qdf_nbuf_data(nbuf), 1099 &wbm_err_info); 1100 1101 rx_bufs_reaped[rx_desc->pool_id]++; 1102 1103 DP_RX_LIST_APPEND(nbuf_head, nbuf_tail, rx_desc->nbuf); 1104 dp_rx_add_to_free_desc_list(&head[rx_desc->pool_id], 1105 &tail[rx_desc->pool_id], 1106 rx_desc); 1107 } 1108 done: 1109 hal_srng_access_end(hal_soc, hal_ring); 1110 1111 for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) { 1112 if (rx_bufs_reaped[mac_id]) { 1113 dp_pdev = soc->pdev_list[mac_id]; 1114 dp_rxdma_srng = &dp_pdev->rx_refill_buf_ring; 1115 rx_desc_pool = &soc->rx_desc_buf[mac_id]; 1116 1117 dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng, 1118 rx_desc_pool, rx_bufs_reaped[mac_id], 1119 &head[mac_id], &tail[mac_id]); 1120 rx_bufs_used += rx_bufs_reaped[mac_id]; 1121 } 1122 } 1123 1124 nbuf = nbuf_head; 1125 while (nbuf) { 1126 rx_tlv_hdr = qdf_nbuf_data(nbuf); 1127 /* 1128 * retrieve the wbm desc info from nbuf TLV, so we can 1129 * handle error cases appropriately 1130 */ 1131 hal_rx_wbm_err_info_get_from_tlv(rx_tlv_hdr, &wbm_err_info); 1132 1133 /* Set queue_mapping in nbuf to 0 */ 1134 dp_set_rx_queue(nbuf, 0); 1135 1136 next = nbuf->next; 1137 if (wbm_err_info.wbm_err_src == HAL_RX_WBM_ERR_SRC_REO) { 1138 if (wbm_err_info.reo_psh_rsn 1139 == HAL_RX_WBM_REO_PSH_RSN_ERROR) { 1140 1141 DP_STATS_INC(soc, 1142 rx.err.reo_error 1143 [wbm_err_info.reo_err_code], 1); 1144 1145 switch (wbm_err_info.reo_err_code) { 1146 /* 1147 * Handling for packets which have NULL REO 1148 * queue descriptor 1149 */ 1150 case HAL_REO_ERR_QUEUE_DESC_ADDR_0: 1151 pool_id = wbm_err_info.pool_id; 1152 QDF_TRACE(QDF_MODULE_ID_DP, 1153 QDF_TRACE_LEVEL_WARN, 1154 "Got pkt with REO ERROR: %d", 1155 wbm_err_info.reo_err_code); 1156 dp_rx_null_q_desc_handle(soc, 1157 nbuf, 1158 rx_tlv_hdr, 1159 pool_id); 1160 nbuf = next; 1161 continue; 1162 /* TODO */ 1163 /* Add per error code accounting */ 1164 1165 default: 1166 QDF_TRACE(QDF_MODULE_ID_DP, 1167 QDF_TRACE_LEVEL_DEBUG, 1168 "REO error %d detected", 1169 wbm_err_info.reo_err_code); 1170 } 1171 } 1172 } else if (wbm_err_info.wbm_err_src == 1173 HAL_RX_WBM_ERR_SRC_RXDMA) { 1174 if (wbm_err_info.rxdma_psh_rsn 1175 == HAL_RX_WBM_RXDMA_PSH_RSN_ERROR) { 1176 struct dp_peer *peer = NULL; 1177 uint16_t peer_id = 0xFFFF; 1178 1179 DP_STATS_INC(soc, 1180 rx.err.rxdma_error 1181 [wbm_err_info.rxdma_err_code], 1); 1182 peer_id = hal_rx_mpdu_start_sw_peer_id_get(rx_tlv_hdr); 1183 peer = dp_peer_find_by_id(soc, peer_id); 1184 1185 switch (wbm_err_info.rxdma_err_code) { 1186 case HAL_RXDMA_ERR_UNENCRYPTED: 1187 dp_rx_err_deliver(soc, 1188 nbuf, 1189 rx_tlv_hdr); 1190 nbuf = next; 1191 continue; 1192 1193 case HAL_RXDMA_ERR_TKIP_MIC: 1194 dp_rx_process_mic_error(soc, 1195 nbuf, 1196 rx_tlv_hdr); 1197 nbuf = next; 1198 if (peer) 1199 DP_STATS_INC(peer, rx.err.mic_err, 1); 1200 continue; 1201 1202 case HAL_RXDMA_ERR_DECRYPT: 1203 if (peer) 1204 DP_STATS_INC(peer, rx.err.decrypt_err, 1); 1205 QDF_TRACE(QDF_MODULE_ID_DP, 1206 QDF_TRACE_LEVEL_DEBUG, 1207 "Packet received with Decrypt error"); 1208 break; 1209 1210 default: 1211 QDF_TRACE(QDF_MODULE_ID_DP, 1212 QDF_TRACE_LEVEL_DEBUG, 1213 "RXDMA error %d", 1214 wbm_err_info. 1215 rxdma_err_code); 1216 } 1217 } 1218 } else { 1219 /* Should not come here */ 1220 qdf_assert(0); 1221 } 1222 1223 hal_rx_dump_pkt_tlvs(rx_tlv_hdr, QDF_TRACE_LEVEL_DEBUG); 1224 qdf_nbuf_free(nbuf); 1225 nbuf = next; 1226 } 1227 return rx_bufs_used; /* Assume no scale factor for now */ 1228 } 1229 1230 /** 1231 * dp_rx_err_mpdu_pop() - extract the MSDU's from link descs 1232 * 1233 * @soc: core DP main context 1234 * @mac_id: mac id which is one of 3 mac_ids 1235 * @rxdma_dst_ring_desc: void pointer to monitor link descriptor buf addr info 1236 * @head: head of descs list to be freed 1237 * @tail: tail of decs list to be freed 1238 1239 * Return: number of msdu in MPDU to be popped 1240 */ 1241 static inline uint32_t 1242 dp_rx_err_mpdu_pop(struct dp_soc *soc, uint32_t mac_id, 1243 void *rxdma_dst_ring_desc, 1244 union dp_rx_desc_list_elem_t **head, 1245 union dp_rx_desc_list_elem_t **tail) 1246 { 1247 void *rx_msdu_link_desc; 1248 qdf_nbuf_t msdu; 1249 qdf_nbuf_t last; 1250 struct hal_rx_msdu_list msdu_list; 1251 uint16_t num_msdus; 1252 struct hal_buf_info buf_info; 1253 void *p_buf_addr_info; 1254 void *p_last_buf_addr_info; 1255 uint32_t rx_bufs_used = 0; 1256 uint32_t msdu_cnt; 1257 uint32_t i; 1258 uint8_t push_reason; 1259 uint8_t rxdma_error_code = 0; 1260 uint8_t bm_action = HAL_BM_ACTION_PUT_IN_IDLE_LIST; 1261 struct dp_pdev *pdev = dp_get_pdev_for_mac_id(soc, mac_id); 1262 1263 msdu = 0; 1264 1265 last = NULL; 1266 1267 hal_rx_reo_ent_buf_paddr_get(rxdma_dst_ring_desc, &buf_info, 1268 &p_last_buf_addr_info, &msdu_cnt); 1269 1270 push_reason = 1271 hal_rx_reo_ent_rxdma_push_reason_get(rxdma_dst_ring_desc); 1272 if (push_reason == HAL_RX_WBM_RXDMA_PSH_RSN_ERROR) { 1273 rxdma_error_code = 1274 hal_rx_reo_ent_rxdma_error_code_get(rxdma_dst_ring_desc); 1275 } 1276 1277 do { 1278 rx_msdu_link_desc = 1279 dp_rx_cookie_2_link_desc_va(soc, &buf_info); 1280 1281 qdf_assert(rx_msdu_link_desc); 1282 1283 hal_rx_msdu_list_get(rx_msdu_link_desc, &msdu_list, &num_msdus); 1284 1285 if (msdu_list.sw_cookie[0] != HAL_RX_COOKIE_SPECIAL) { 1286 /* if the msdus belongs to NSS offloaded radio && 1287 * the rbm is not SW1_BM then return the msdu_link 1288 * descriptor without freeing the msdus (nbufs). let 1289 * these buffers be given to NSS completion ring for 1290 * NSS to free them. 1291 * else iterate through the msdu link desc list and 1292 * free each msdu in the list. 1293 */ 1294 if (msdu_list.rbm[0] != HAL_RX_BUF_RBM_SW3_BM && 1295 wlan_cfg_get_dp_pdev_nss_enabled( 1296 pdev->wlan_cfg_ctx)) 1297 bm_action = HAL_BM_ACTION_RELEASE_MSDU_LIST; 1298 else { 1299 for (i = 0; i < num_msdus; i++) { 1300 struct dp_rx_desc *rx_desc = 1301 dp_rx_cookie_2_va_rxdma_buf(soc, 1302 msdu_list.sw_cookie[i]); 1303 qdf_assert(rx_desc); 1304 msdu = rx_desc->nbuf; 1305 1306 qdf_nbuf_unmap_single(soc->osdev, msdu, 1307 QDF_DMA_FROM_DEVICE); 1308 1309 QDF_TRACE(QDF_MODULE_ID_DP, 1310 QDF_TRACE_LEVEL_DEBUG, 1311 "[%s][%d] msdu_nbuf=%pK \n", 1312 __func__, __LINE__, msdu); 1313 1314 qdf_nbuf_free(msdu); 1315 rx_bufs_used++; 1316 dp_rx_add_to_free_desc_list(head, 1317 tail, rx_desc); 1318 } 1319 } 1320 } else { 1321 rxdma_error_code = HAL_RXDMA_ERR_WAR; 1322 } 1323 1324 hal_rx_mon_next_link_desc_get(rx_msdu_link_desc, &buf_info, 1325 &p_buf_addr_info); 1326 1327 dp_rx_link_desc_return(soc, p_last_buf_addr_info, bm_action); 1328 p_last_buf_addr_info = p_buf_addr_info; 1329 1330 } while (buf_info.paddr); 1331 1332 DP_STATS_INC(soc, rx.err.rxdma_error[rxdma_error_code], 1); 1333 1334 if (rxdma_error_code == HAL_RXDMA_ERR_DECRYPT) { 1335 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1336 "Packet received with Decrypt error"); 1337 } 1338 1339 return rx_bufs_used; 1340 } 1341 1342 /** 1343 * dp_rxdma_err_process() - RxDMA error processing functionality 1344 * 1345 * @soc: core txrx main contex 1346 * @mac_id: mac id which is one of 3 mac_ids 1347 * @hal_ring: opaque pointer to the HAL Rx Ring, which will be serviced 1348 * @quota: No. of units (packets) that can be serviced in one shot. 1349 1350 * Return: num of buffers processed 1351 */ 1352 uint32_t 1353 dp_rxdma_err_process(struct dp_soc *soc, uint32_t mac_id, uint32_t quota) 1354 { 1355 struct dp_pdev *pdev = dp_get_pdev_for_mac_id(soc, mac_id); 1356 int mac_for_pdev = dp_get_mac_id_for_mac(soc, mac_id); 1357 void *hal_soc; 1358 void *rxdma_dst_ring_desc; 1359 void *err_dst_srng; 1360 union dp_rx_desc_list_elem_t *head = NULL; 1361 union dp_rx_desc_list_elem_t *tail = NULL; 1362 struct dp_srng *dp_rxdma_srng; 1363 struct rx_desc_pool *rx_desc_pool; 1364 uint32_t work_done = 0; 1365 uint32_t rx_bufs_used = 0; 1366 1367 #ifdef DP_INTR_POLL_BASED 1368 if (!pdev) 1369 return 0; 1370 #endif 1371 err_dst_srng = pdev->rxdma_err_dst_ring[mac_for_pdev].hal_srng; 1372 1373 if (!err_dst_srng) { 1374 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1375 "%s %d : HAL Monitor Destination Ring Init \ 1376 Failed -- %pK\n", 1377 __func__, __LINE__, err_dst_srng); 1378 return 0; 1379 } 1380 1381 hal_soc = soc->hal_soc; 1382 1383 qdf_assert(hal_soc); 1384 1385 if (qdf_unlikely(hal_srng_access_start(hal_soc, err_dst_srng))) { 1386 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1387 "%s %d : HAL Monitor Destination Ring Init \ 1388 Failed -- %pK\n", 1389 __func__, __LINE__, err_dst_srng); 1390 return 0; 1391 } 1392 1393 while (qdf_likely(quota-- && (rxdma_dst_ring_desc = 1394 hal_srng_dst_get_next(hal_soc, err_dst_srng)))) { 1395 1396 rx_bufs_used += dp_rx_err_mpdu_pop(soc, mac_id, 1397 rxdma_dst_ring_desc, 1398 &head, &tail); 1399 } 1400 1401 hal_srng_access_end(hal_soc, err_dst_srng); 1402 1403 if (rx_bufs_used) { 1404 dp_rxdma_srng = &pdev->rx_refill_buf_ring; 1405 rx_desc_pool = &soc->rx_desc_buf[mac_id]; 1406 1407 dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng, 1408 rx_desc_pool, rx_bufs_used, &head, &tail); 1409 1410 work_done += rx_bufs_used; 1411 } 1412 1413 return work_done; 1414 } 1415