1 /* 2 * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for 5 * any purpose with or without fee is hereby granted, provided that the 6 * above copyright notice and this permission notice appear in all 7 * copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 16 * PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 #include "dp_types.h" 20 #include "dp_rx.h" 21 #include "dp_peer.h" 22 #include "dp_internal.h" 23 #include "hal_api.h" 24 #include "qdf_trace.h" 25 #include "qdf_nbuf.h" 26 #ifdef CONFIG_MCL 27 #include <cds_ieee80211_common.h> 28 #else 29 #include <linux/ieee80211.h> 30 #endif 31 #include "dp_rx_defrag.h" 32 #include <enet.h> /* LLC_SNAP_HDR_LEN */ 33 34 #ifdef RX_DESC_DEBUG_CHECK 35 static inline bool dp_rx_desc_check_magic(struct dp_rx_desc *rx_desc) 36 { 37 if (qdf_unlikely(rx_desc->magic != DP_RX_DESC_MAGIC)) { 38 return false; 39 } 40 rx_desc->magic = 0; 41 return true; 42 } 43 #else 44 static inline bool dp_rx_desc_check_magic(struct dp_rx_desc *rx_desc) 45 { 46 return true; 47 } 48 #endif 49 50 /** 51 * dp_rx_mcast_echo_check() - check if the mcast pkt is a loop 52 * back on same vap or a different vap. 53 * 54 * @soc: core DP main context 55 * @peer: dp peer handler 56 * @rx_tlv_hdr: start of the rx TLV header 57 * @nbuf: pkt buffer 58 * 59 * Return: bool (true if it is a looped back pkt else false) 60 * 61 */ 62 static inline bool dp_rx_mcast_echo_check(struct dp_soc *soc, 63 struct dp_peer *peer, 64 uint8_t *rx_tlv_hdr, 65 qdf_nbuf_t nbuf) 66 { 67 struct dp_vdev *vdev = peer->vdev; 68 struct dp_ast_entry *ase; 69 uint16_t sa_idx = 0; 70 uint8_t *data; 71 72 /* 73 * Multicast Echo Check is required only if vdev is STA and 74 * received pkt is a multicast/broadcast pkt. otherwise 75 * skip the MEC check. 76 */ 77 if (vdev->opmode != wlan_op_mode_sta) 78 return false; 79 80 if (!hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr)) 81 return false; 82 83 data = qdf_nbuf_data(nbuf); 84 /* 85 * if the received pkts src mac addr matches with vdev 86 * mac address then drop the pkt as it is looped back 87 */ 88 if (!(qdf_mem_cmp(&data[DP_MAC_ADDR_LEN], 89 vdev->mac_addr.raw, 90 DP_MAC_ADDR_LEN))) 91 return true; 92 93 /* if the received pkts src mac addr matches with the 94 * wired PCs MAC addr which is behind the STA or with 95 * wireless STAs MAC addr which are behind the Repeater, 96 * then drop the pkt as it is looped back 97 */ 98 qdf_spin_lock_bh(&soc->ast_lock); 99 if (hal_rx_msdu_end_sa_is_valid_get(rx_tlv_hdr)) { 100 sa_idx = hal_rx_msdu_end_sa_idx_get(rx_tlv_hdr); 101 102 if ((sa_idx < 0) || 103 (sa_idx >= (WLAN_UMAC_PSOC_MAX_PEERS * 2))) { 104 qdf_spin_unlock_bh(&soc->ast_lock); 105 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 106 "invalid sa_idx: %d", sa_idx); 107 qdf_assert_always(0); 108 } 109 110 ase = soc->ast_table[sa_idx]; 111 if (!ase) { 112 /* We do not get a peer map event for STA and without 113 * this event we don't know what is STA's sa_idx. 114 * For this reason the AST is still not associated to 115 * any index postion in ast_table. 116 * In these kind of scenarios where sa is valid but 117 * ast is not in ast_table, we use the below API to get 118 * AST entry for STA's own mac_address. 119 */ 120 ase = dp_peer_ast_hash_find(soc, 121 &data[DP_MAC_ADDR_LEN]); 122 123 } 124 } else 125 ase = dp_peer_ast_hash_find(soc, &data[DP_MAC_ADDR_LEN]); 126 127 if (ase) { 128 ase->ast_idx = sa_idx; 129 soc->ast_table[sa_idx] = ase; 130 131 if (ase->pdev_id != vdev->pdev->pdev_id) { 132 qdf_spin_unlock_bh(&soc->ast_lock); 133 QDF_TRACE(QDF_MODULE_ID_DP, 134 QDF_TRACE_LEVEL_INFO, 135 "Detected DBDC Root AP %pM, %d %d", 136 &data[DP_MAC_ADDR_LEN], vdev->pdev->pdev_id, 137 ase->pdev_id); 138 return false; 139 } 140 141 if ((ase->type == CDP_TXRX_AST_TYPE_MEC) || 142 (ase->peer != peer)) { 143 qdf_spin_unlock_bh(&soc->ast_lock); 144 QDF_TRACE(QDF_MODULE_ID_DP, 145 QDF_TRACE_LEVEL_INFO, 146 "received pkt with same src mac %pM", 147 &data[DP_MAC_ADDR_LEN]); 148 149 return true; 150 } 151 } 152 qdf_spin_unlock_bh(&soc->ast_lock); 153 return false; 154 } 155 156 /** 157 * dp_rx_link_desc_return_by_addr - Return a MPDU link descriptor to 158 * (WBM) by address 159 * 160 * @soc: core DP main context 161 * @link_desc_addr: link descriptor addr 162 * 163 * Return: QDF_STATUS 164 */ 165 QDF_STATUS 166 dp_rx_link_desc_return_by_addr(struct dp_soc *soc, void *link_desc_addr, 167 uint8_t bm_action) 168 { 169 struct dp_srng *wbm_desc_rel_ring = &soc->wbm_desc_rel_ring; 170 void *wbm_rel_srng = wbm_desc_rel_ring->hal_srng; 171 void *hal_soc = soc->hal_soc; 172 QDF_STATUS status = QDF_STATUS_E_FAILURE; 173 void *src_srng_desc; 174 175 if (!wbm_rel_srng) { 176 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 177 "WBM RELEASE RING not initialized"); 178 return status; 179 } 180 181 if (qdf_unlikely(hal_srng_access_start(hal_soc, wbm_rel_srng))) { 182 183 /* TODO */ 184 /* 185 * Need API to convert from hal_ring pointer to 186 * Ring Type / Ring Id combo 187 */ 188 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 189 FL("HAL RING Access For WBM Release SRNG Failed - %pK"), 190 wbm_rel_srng); 191 DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1); 192 goto done; 193 } 194 src_srng_desc = hal_srng_src_get_next(hal_soc, wbm_rel_srng); 195 if (qdf_likely(src_srng_desc)) { 196 /* Return link descriptor through WBM ring (SW2WBM)*/ 197 hal_rx_msdu_link_desc_set(hal_soc, 198 src_srng_desc, link_desc_addr, bm_action); 199 status = QDF_STATUS_SUCCESS; 200 } else { 201 struct hal_srng *srng = (struct hal_srng *)wbm_rel_srng; 202 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 203 FL("WBM Release Ring (Id %d) Full"), srng->ring_id); 204 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 205 "HP 0x%x Reap HP 0x%x TP 0x%x Cached TP 0x%x", 206 *srng->u.src_ring.hp_addr, srng->u.src_ring.reap_hp, 207 *srng->u.src_ring.tp_addr, srng->u.src_ring.cached_tp); 208 } 209 done: 210 hal_srng_access_end(hal_soc, wbm_rel_srng); 211 return status; 212 213 } 214 215 /** 216 * dp_rx_link_desc_return() - Return a MPDU link descriptor to HW 217 * (WBM), following error handling 218 * 219 * @soc: core DP main context 220 * @ring_desc: opaque pointer to the REO error ring descriptor 221 * 222 * Return: QDF_STATUS 223 */ 224 QDF_STATUS 225 dp_rx_link_desc_return(struct dp_soc *soc, void *ring_desc, uint8_t bm_action) 226 { 227 void *buf_addr_info = HAL_RX_REO_BUF_ADDR_INFO_GET(ring_desc); 228 return dp_rx_link_desc_return_by_addr(soc, buf_addr_info, bm_action); 229 } 230 231 /** 232 * dp_rx_msdus_drop() - Drops all MSDU's per MPDU 233 * 234 * @soc: core txrx main context 235 * @ring_desc: opaque pointer to the REO error ring descriptor 236 * @mpdu_desc_info: MPDU descriptor information from ring descriptor 237 * @head: head of the local descriptor free-list 238 * @tail: tail of the local descriptor free-list 239 * @quota: No. of units (packets) that can be serviced in one shot. 240 * 241 * This function is used to drop all MSDU in an MPDU 242 * 243 * Return: uint32_t: No. of elements processed 244 */ 245 static uint32_t dp_rx_msdus_drop(struct dp_soc *soc, void *ring_desc, 246 struct hal_rx_mpdu_desc_info *mpdu_desc_info, 247 union dp_rx_desc_list_elem_t **head, 248 union dp_rx_desc_list_elem_t **tail, 249 uint32_t quota) 250 { 251 uint32_t rx_bufs_used = 0; 252 void *link_desc_va; 253 struct hal_buf_info buf_info; 254 struct hal_rx_msdu_list msdu_list; /* MSDU's per MPDU */ 255 int i; 256 uint8_t *rx_tlv_hdr; 257 uint32_t tid; 258 259 hal_rx_reo_buf_paddr_get(ring_desc, &buf_info); 260 261 link_desc_va = dp_rx_cookie_2_link_desc_va(soc, &buf_info); 262 263 /* No UNMAP required -- this is "malloc_consistent" memory */ 264 hal_rx_msdu_list_get(link_desc_va, &msdu_list, 265 &mpdu_desc_info->msdu_count); 266 267 for (i = 0; (i < mpdu_desc_info->msdu_count) && quota--; i++) { 268 struct dp_rx_desc *rx_desc = 269 dp_rx_cookie_2_va_rxdma_buf(soc, 270 msdu_list.sw_cookie[i]); 271 272 qdf_assert(rx_desc); 273 274 if (!dp_rx_desc_check_magic(rx_desc)) { 275 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 276 FL("Invalid rx_desc cookie=%d"), 277 msdu_list.sw_cookie[i]); 278 return rx_bufs_used; 279 } 280 281 rx_bufs_used++; 282 tid = hal_rx_mpdu_start_tid_get(rx_desc->rx_buf_start); 283 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 284 "Packet received with PN error for tid :%d", tid); 285 286 rx_tlv_hdr = qdf_nbuf_data(rx_desc->nbuf); 287 if (hal_rx_encryption_info_valid(rx_tlv_hdr)) 288 hal_rx_print_pn(rx_tlv_hdr); 289 290 /* Just free the buffers */ 291 qdf_nbuf_free(rx_desc->nbuf); 292 293 dp_rx_add_to_free_desc_list(head, tail, rx_desc); 294 } 295 296 /* Return link descriptor through WBM ring (SW2WBM)*/ 297 dp_rx_link_desc_return(soc, ring_desc, HAL_BM_ACTION_PUT_IN_IDLE_LIST); 298 299 return rx_bufs_used; 300 } 301 302 /** 303 * dp_rx_pn_error_handle() - Handles PN check errors 304 * 305 * @soc: core txrx main context 306 * @ring_desc: opaque pointer to the REO error ring descriptor 307 * @mpdu_desc_info: MPDU descriptor information from ring descriptor 308 * @head: head of the local descriptor free-list 309 * @tail: tail of the local descriptor free-list 310 * @quota: No. of units (packets) that can be serviced in one shot. 311 * 312 * This function implements PN error handling 313 * If the peer is configured to ignore the PN check errors 314 * or if DP feels, that this frame is still OK, the frame can be 315 * re-injected back to REO to use some of the other features 316 * of REO e.g. duplicate detection/routing to other cores 317 * 318 * Return: uint32_t: No. of elements processed 319 */ 320 static uint32_t 321 dp_rx_pn_error_handle(struct dp_soc *soc, void *ring_desc, 322 struct hal_rx_mpdu_desc_info *mpdu_desc_info, 323 union dp_rx_desc_list_elem_t **head, 324 union dp_rx_desc_list_elem_t **tail, 325 uint32_t quota) 326 { 327 uint16_t peer_id; 328 uint32_t rx_bufs_used = 0; 329 struct dp_peer *peer; 330 bool peer_pn_policy = false; 331 332 peer_id = DP_PEER_METADATA_PEER_ID_GET( 333 mpdu_desc_info->peer_meta_data); 334 335 336 peer = dp_peer_find_by_id(soc, peer_id); 337 338 if (qdf_likely(peer)) { 339 /* 340 * TODO: Check for peer specific policies & set peer_pn_policy 341 */ 342 } 343 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 344 "Packet received with PN error"); 345 346 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 347 "discard rx due to PN error for peer %pK " 348 "(%02x:%02x:%02x:%02x:%02x:%02x)\n", 349 peer, 350 peer->mac_addr.raw[0], peer->mac_addr.raw[1], 351 peer->mac_addr.raw[2], peer->mac_addr.raw[3], 352 peer->mac_addr.raw[4], peer->mac_addr.raw[5]); 353 354 /* No peer PN policy -- definitely drop */ 355 if (!peer_pn_policy) 356 rx_bufs_used = dp_rx_msdus_drop(soc, ring_desc, 357 mpdu_desc_info, 358 head, tail, quota); 359 360 return rx_bufs_used; 361 } 362 363 /** 364 * dp_rx_2k_jump_handle() - Handles Sequence Number Jump by 2K 365 * 366 * @soc: core txrx main context 367 * @ring_desc: opaque pointer to the REO error ring descriptor 368 * @mpdu_desc_info: MPDU descriptor information from ring descriptor 369 * @head: head of the local descriptor free-list 370 * @tail: tail of the local descriptor free-list 371 * @quota: No. of units (packets) that can be serviced in one shot. 372 * 373 * This function implements the error handling when sequence number 374 * of the MPDU jumps suddenly by 2K.Today there are 2 cases that 375 * need to be handled: 376 * A) CSN (Current Sequence Number) = Last Valid SN (LSN) + 2K 377 * B) CSN = LSN + 2K, but falls within a "BA sized window" of the SSN 378 * For case A) the protocol stack is invoked to generate DELBA/DEAUTH frame 379 * For case B), the frame is normally dropped, no more action is taken 380 * 381 * Return: uint32_t: No. of elements processed 382 */ 383 static uint32_t 384 dp_rx_2k_jump_handle(struct dp_soc *soc, void *ring_desc, 385 struct hal_rx_mpdu_desc_info *mpdu_desc_info, 386 union dp_rx_desc_list_elem_t **head, 387 union dp_rx_desc_list_elem_t **tail, 388 uint32_t quota) 389 { 390 return dp_rx_msdus_drop(soc, ring_desc, mpdu_desc_info, 391 head, tail, quota); 392 } 393 394 /** 395 * dp_rx_chain_msdus() - Function to chain all msdus of a mpdu 396 * to pdev invalid peer list 397 * 398 * @soc: core DP main context 399 * @nbuf: Buffer pointer 400 * @rx_tlv_hdr: start of rx tlv header 401 * @mac_id: mac id 402 * 403 * Return: bool: true for last msdu of mpdu 404 */ 405 static bool 406 dp_rx_chain_msdus(struct dp_soc *soc, qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr, 407 uint8_t mac_id) 408 { 409 bool mpdu_done = false; 410 qdf_nbuf_t curr_nbuf = NULL; 411 qdf_nbuf_t tmp_nbuf = NULL; 412 413 /* TODO: Currently only single radio is supported, hence 414 * pdev hard coded to '0' index 415 */ 416 struct dp_pdev *dp_pdev = soc->pdev_list[mac_id]; 417 418 if (hal_rx_msdu_end_first_msdu_get(rx_tlv_hdr)) { 419 qdf_nbuf_set_rx_chfrag_start(nbuf, 1); 420 421 /* If the new nbuf received is the first msdu of the 422 * amsdu and there are msdus in the invalid peer msdu 423 * list, then let us free all the msdus of the invalid 424 * peer msdu list. 425 * This scenario can happen when we start receiving 426 * new a-msdu even before the previous a-msdu is completely 427 * received. 428 */ 429 curr_nbuf = dp_pdev->invalid_peer_head_msdu; 430 while (curr_nbuf) { 431 tmp_nbuf = curr_nbuf->next; 432 qdf_nbuf_free(curr_nbuf); 433 curr_nbuf = tmp_nbuf; 434 } 435 436 dp_pdev->invalid_peer_head_msdu = NULL; 437 dp_pdev->invalid_peer_tail_msdu = NULL; 438 439 hal_rx_mon_hw_desc_get_mpdu_status(rx_tlv_hdr, 440 &(dp_pdev->ppdu_info.rx_status)); 441 442 } 443 444 if (hal_rx_msdu_end_last_msdu_get(rx_tlv_hdr)) { 445 qdf_nbuf_set_rx_chfrag_end(nbuf, 1); 446 mpdu_done = true; 447 } 448 449 DP_RX_LIST_APPEND(dp_pdev->invalid_peer_head_msdu, 450 dp_pdev->invalid_peer_tail_msdu, 451 nbuf); 452 453 return mpdu_done; 454 } 455 456 /** 457 * dp_rx_null_q_desc_handle() - Function to handle NULL Queue 458 * descriptor violation on either a 459 * REO or WBM ring 460 * 461 * @soc: core DP main context 462 * @nbuf: buffer pointer 463 * @rx_tlv_hdr: start of rx tlv header 464 * @pool_id: mac id 465 * 466 * This function handles NULL queue descriptor violations arising out 467 * a missing REO queue for a given peer or a given TID. This typically 468 * may happen if a packet is received on a QOS enabled TID before the 469 * ADDBA negotiation for that TID, when the TID queue is setup. Or 470 * it may also happen for MC/BC frames if they are not routed to the 471 * non-QOS TID queue, in the absence of any other default TID queue. 472 * This error can show up both in a REO destination or WBM release ring. 473 * 474 */ 475 static void 476 dp_rx_null_q_desc_handle(struct dp_soc *soc, 477 qdf_nbuf_t nbuf, 478 uint8_t *rx_tlv_hdr, 479 uint8_t pool_id) 480 { 481 uint32_t pkt_len, l2_hdr_offset; 482 uint16_t msdu_len; 483 struct dp_vdev *vdev; 484 uint16_t peer_id = 0xFFFF; 485 struct dp_peer *peer = NULL; 486 uint8_t tid; 487 488 qdf_nbuf_set_rx_chfrag_start(nbuf, 489 hal_rx_msdu_end_first_msdu_get(rx_tlv_hdr)); 490 qdf_nbuf_set_rx_chfrag_end(nbuf, 491 hal_rx_msdu_end_last_msdu_get(rx_tlv_hdr)); 492 493 l2_hdr_offset = hal_rx_msdu_end_l3_hdr_padding_get(rx_tlv_hdr); 494 msdu_len = hal_rx_msdu_start_msdu_len_get(rx_tlv_hdr); 495 pkt_len = msdu_len + l2_hdr_offset + RX_PKT_TLVS_LEN; 496 497 /* Set length in nbuf */ 498 qdf_nbuf_set_pktlen(nbuf, pkt_len); 499 500 /* 501 * Check if DMA completed -- msdu_done is the last bit 502 * to be written 503 */ 504 if (!hal_rx_attn_msdu_done_get(rx_tlv_hdr)) { 505 506 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 507 FL("MSDU DONE failure")); 508 509 hal_rx_dump_pkt_tlvs(rx_tlv_hdr, QDF_TRACE_LEVEL_INFO); 510 qdf_assert(0); 511 } 512 513 peer_id = hal_rx_mpdu_start_sw_peer_id_get(rx_tlv_hdr); 514 peer = dp_peer_find_by_id(soc, peer_id); 515 516 if (!peer) { 517 bool mpdu_done = false; 518 struct dp_pdev *pdev = soc->pdev_list[pool_id]; 519 520 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 521 FL("peer is NULL")); 522 523 mpdu_done = dp_rx_chain_msdus(soc, nbuf, rx_tlv_hdr, pool_id); 524 /* Trigger invalid peer handler wrapper */ 525 dp_rx_process_invalid_peer_wrapper(soc, nbuf, mpdu_done); 526 527 if (mpdu_done) { 528 pdev->invalid_peer_head_msdu = NULL; 529 pdev->invalid_peer_tail_msdu = NULL; 530 } 531 return; 532 } 533 534 vdev = peer->vdev; 535 if (!vdev) { 536 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 537 FL("INVALID vdev %pK OR osif_rx"), vdev); 538 /* Drop & free packet */ 539 qdf_nbuf_free(nbuf); 540 DP_STATS_INC(soc, rx.err.invalid_vdev, 1); 541 return; 542 } 543 544 /* 545 * Advance the packet start pointer by total size of 546 * pre-header TLV's 547 */ 548 qdf_nbuf_pull_head(nbuf, (l2_hdr_offset + RX_PKT_TLVS_LEN)); 549 550 if (dp_rx_mcast_echo_check(soc, peer, rx_tlv_hdr, nbuf)) { 551 /* this is a looped back MCBC pkt, drop it */ 552 qdf_nbuf_free(nbuf); 553 return; 554 } 555 /* 556 * In qwrap mode if the received packet matches with any of the vdev 557 * mac addresses, drop it. Donot receive multicast packets originated 558 * from any proxysta. 559 */ 560 if (check_qwrap_multicast_loopback(vdev, nbuf)) { 561 qdf_nbuf_free(nbuf); 562 return; 563 } 564 565 566 if (qdf_unlikely((peer->nawds_enabled == true) && 567 hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr))) { 568 QDF_TRACE(QDF_MODULE_ID_DP, 569 QDF_TRACE_LEVEL_DEBUG, 570 "%s free buffer for multicast packet", 571 __func__); 572 DP_STATS_INC_PKT(peer, rx.nawds_mcast_drop, 573 1, qdf_nbuf_len(nbuf)); 574 qdf_nbuf_free(nbuf); 575 return; 576 } 577 578 if (!dp_wds_rx_policy_check(rx_tlv_hdr, vdev, peer, 579 hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr))) { 580 QDF_TRACE(QDF_MODULE_ID_DP, 581 QDF_TRACE_LEVEL_ERROR, 582 FL("mcast Policy Check Drop pkt")); 583 /* Drop & free packet */ 584 qdf_nbuf_free(nbuf); 585 return; 586 } 587 588 /* WDS Source Port Learning */ 589 if (qdf_likely(vdev->rx_decap_type == htt_cmn_pkt_type_ethernet && 590 vdev->wds_enabled)) 591 dp_rx_wds_srcport_learn(soc, rx_tlv_hdr, peer, nbuf); 592 593 if (hal_rx_mpdu_start_mpdu_qos_control_valid_get(rx_tlv_hdr)) { 594 /* TODO: Assuming that qos_control_valid also indicates 595 * unicast. Should we check this? 596 */ 597 tid = hal_rx_mpdu_start_tid_get(rx_tlv_hdr); 598 if (peer && 599 peer->rx_tid[tid].hw_qdesc_vaddr_unaligned == NULL) { 600 /* IEEE80211_SEQ_MAX indicates invalid start_seq */ 601 dp_rx_tid_setup_wifi3(peer, tid, 1, IEEE80211_SEQ_MAX); 602 } 603 } 604 605 #ifdef QCA_WIFI_NAPIER_EMULATION /* Debug code, remove later */ 606 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 607 "%s: p_id %d msdu_len %d hdr_off %d", 608 __func__, peer_id, msdu_len, l2_hdr_offset); 609 610 print_hex_dump(KERN_ERR, "\t Pkt Data:", DUMP_PREFIX_NONE, 32, 4, 611 qdf_nbuf_data(nbuf), 128, false); 612 #endif /* NAPIER_EMULATION */ 613 614 if (qdf_unlikely(vdev->rx_decap_type == htt_cmn_pkt_type_raw)) { 615 qdf_nbuf_set_next(nbuf, NULL); 616 dp_rx_deliver_raw(vdev, nbuf, peer); 617 } else { 618 if (qdf_unlikely(peer->bss_peer)) { 619 QDF_TRACE(QDF_MODULE_ID_DP, 620 QDF_TRACE_LEVEL_INFO, 621 FL("received pkt with same src MAC")); 622 /* Drop & free packet */ 623 qdf_nbuf_free(nbuf); 624 return; 625 } 626 627 if (vdev->osif_rx) { 628 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, 629 FL("vdev %pK osif_rx %pK"), vdev, 630 vdev->osif_rx); 631 qdf_nbuf_set_next(nbuf, NULL); 632 vdev->osif_rx(vdev->osif_vdev, nbuf); 633 DP_STATS_INCC_PKT(vdev->pdev, rx.multicast, 1, 634 qdf_nbuf_len(nbuf), 635 hal_rx_msdu_end_da_is_mcbc_get( 636 rx_tlv_hdr)); 637 DP_STATS_INC_PKT(vdev->pdev, rx.to_stack, 1, 638 qdf_nbuf_len(nbuf)); 639 } else { 640 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 641 FL("INVALID vdev %pK OR osif_rx"), vdev); 642 DP_STATS_INC(soc, rx.err.invalid_vdev, 1); 643 } 644 } 645 return; 646 } 647 648 /** 649 * dp_rx_err_deliver() - Function to deliver error frames to OS 650 * 651 * @soc: core DP main context 652 * @rx_desc : pointer to the sw rx descriptor 653 * @head: pointer to head of rx descriptors to be added to free list 654 * @tail: pointer to tail of rx descriptors to be added to free list 655 * quota: upper limit of descriptors that can be reaped 656 * 657 * Return: uint32_t: No. of Rx buffers reaped 658 */ 659 static void 660 dp_rx_err_deliver(struct dp_soc *soc, qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr) 661 { 662 uint32_t pkt_len, l2_hdr_offset; 663 uint16_t msdu_len; 664 struct dp_vdev *vdev; 665 uint16_t peer_id = 0xFFFF; 666 struct dp_peer *peer = NULL; 667 struct ether_header *eh; 668 bool isBroadcast; 669 670 /* 671 * Check if DMA completed -- msdu_done is the last bit 672 * to be written 673 */ 674 if (!hal_rx_attn_msdu_done_get(rx_tlv_hdr)) { 675 676 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 677 FL("MSDU DONE failure")); 678 679 hal_rx_dump_pkt_tlvs(rx_tlv_hdr, QDF_TRACE_LEVEL_INFO); 680 qdf_assert(0); 681 } 682 683 peer_id = hal_rx_mpdu_start_sw_peer_id_get(rx_tlv_hdr); 684 peer = dp_peer_find_by_id(soc, peer_id); 685 686 if (!peer) { 687 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 688 FL("peer is NULL")); 689 DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1, 690 qdf_nbuf_len(nbuf)); 691 /* Drop & free packet */ 692 qdf_nbuf_free(nbuf); 693 return; 694 } 695 696 vdev = peer->vdev; 697 if (!vdev) { 698 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 699 FL("INVALID vdev %pK OR osif_rx"), vdev); 700 /* Drop & free packet */ 701 qdf_nbuf_free(nbuf); 702 DP_STATS_INC(soc, rx.err.invalid_vdev, 1); 703 return; 704 } 705 706 /* Drop & free packet if mesh mode not enabled */ 707 if (!vdev->mesh_vdev) { 708 qdf_nbuf_free(nbuf); 709 DP_STATS_INC(soc, rx.err.invalid_vdev, 1); 710 return; 711 } 712 713 l2_hdr_offset = hal_rx_msdu_end_l3_hdr_padding_get(rx_tlv_hdr); 714 msdu_len = hal_rx_msdu_start_msdu_len_get(rx_tlv_hdr); 715 pkt_len = msdu_len + l2_hdr_offset + RX_PKT_TLVS_LEN; 716 717 /* Set length in nbuf */ 718 qdf_nbuf_set_pktlen(nbuf, pkt_len); 719 720 qdf_nbuf_set_next(nbuf, NULL); 721 722 /* 723 * Advance the packet start pointer by total size of 724 * pre-header TLV's 725 */ 726 qdf_nbuf_pull_head(nbuf, (l2_hdr_offset + RX_PKT_TLVS_LEN)); 727 728 qdf_nbuf_set_rx_chfrag_start(nbuf, 1); 729 qdf_nbuf_set_rx_chfrag_end(nbuf, 1); 730 if (dp_rx_filter_mesh_packets(vdev, nbuf, rx_tlv_hdr) 731 == QDF_STATUS_SUCCESS) { 732 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_MED, 733 FL("mesh pkt filtered")); 734 DP_STATS_INC(vdev->pdev, dropped.mesh_filter, 1); 735 736 qdf_nbuf_free(nbuf); 737 return; 738 739 } 740 dp_rx_fill_mesh_stats(vdev, nbuf, rx_tlv_hdr, peer); 741 742 if (qdf_unlikely(hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr) && 743 (vdev->rx_decap_type == 744 htt_cmn_pkt_type_ethernet))) { 745 eh = (struct ether_header *)qdf_nbuf_data(nbuf); 746 isBroadcast = (IEEE80211_IS_BROADCAST 747 (eh->ether_dhost)) ? 1 : 0 ; 748 if (isBroadcast) { 749 DP_STATS_INC_PKT(peer, rx.bcast, 1, 750 qdf_nbuf_len(nbuf)); 751 } 752 } 753 754 if (qdf_unlikely(vdev->rx_decap_type == htt_cmn_pkt_type_raw)) { 755 dp_rx_deliver_raw(vdev, nbuf, peer); 756 } else { 757 DP_STATS_INC(vdev->pdev, rx.to_stack.num, 1); 758 vdev->osif_rx(vdev->osif_vdev, nbuf); 759 } 760 761 return; 762 } 763 764 /** 765 * dp_rx_process_mic_error(): Function to pass mic error indication to umac 766 * @soc: DP SOC handle 767 * @rx_desc : pointer to the sw rx descriptor 768 * @head: pointer to head of rx descriptors to be added to free list 769 * @tail: pointer to tail of rx descriptors to be added to free list 770 * 771 * return: void 772 */ 773 void 774 dp_rx_process_mic_error(struct dp_soc *soc, 775 qdf_nbuf_t nbuf, 776 uint8_t *rx_tlv_hdr) 777 { 778 struct dp_vdev *vdev = NULL; 779 struct dp_pdev *pdev = NULL; 780 struct ol_if_ops *tops = NULL; 781 struct ieee80211_frame *wh; 782 uint8_t *rx_pkt_hdr; 783 struct dp_peer *peer; 784 uint16_t peer_id; 785 786 if (!hal_rx_msdu_end_first_msdu_get(rx_tlv_hdr)) 787 return; 788 789 rx_pkt_hdr = hal_rx_pkt_hdr_get(qdf_nbuf_data(nbuf)); 790 wh = (struct ieee80211_frame *)rx_pkt_hdr; 791 792 peer_id = hal_rx_mpdu_start_sw_peer_id_get(rx_tlv_hdr); 793 peer = dp_peer_find_by_id(soc, peer_id); 794 if (!peer) { 795 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 796 "peer not found"); 797 goto fail; 798 } 799 800 vdev = peer->vdev; 801 if (!vdev) { 802 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 803 "VDEV not found"); 804 goto fail; 805 } 806 807 pdev = vdev->pdev; 808 if (!pdev) { 809 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 810 "PDEV not found"); 811 goto fail; 812 } 813 814 tops = pdev->soc->cdp_soc.ol_ops; 815 if (tops->rx_mic_error) 816 tops->rx_mic_error(pdev->osif_pdev, vdev->vdev_id, wh); 817 818 fail: 819 qdf_nbuf_free(nbuf); 820 return; 821 } 822 823 /** 824 * dp_rx_err_process() - Processes error frames routed to REO error ring 825 * 826 * @soc: core txrx main context 827 * @hal_ring: opaque pointer to the HAL Rx Error Ring, which will be serviced 828 * @quota: No. of units (packets) that can be serviced in one shot. 829 * 830 * This function implements error processing and top level demultiplexer 831 * for all the frames routed to REO error ring. 832 * 833 * Return: uint32_t: No. of elements processed 834 */ 835 uint32_t 836 dp_rx_err_process(struct dp_soc *soc, void *hal_ring, uint32_t quota) 837 { 838 void *hal_soc; 839 void *ring_desc; 840 union dp_rx_desc_list_elem_t *head = NULL; 841 union dp_rx_desc_list_elem_t *tail = NULL; 842 uint32_t rx_bufs_used = 0; 843 uint8_t buf_type; 844 uint8_t error, rbm; 845 struct hal_rx_mpdu_desc_info mpdu_desc_info; 846 struct hal_buf_info hbi; 847 struct dp_pdev *dp_pdev; 848 struct dp_srng *dp_rxdma_srng; 849 struct rx_desc_pool *rx_desc_pool; 850 uint32_t cookie = 0; 851 void *link_desc_va; 852 struct hal_rx_msdu_list msdu_list; /* MSDU's per MPDU */ 853 uint16_t num_msdus; 854 855 /* Debug -- Remove later */ 856 qdf_assert(soc && hal_ring); 857 858 hal_soc = soc->hal_soc; 859 860 /* Debug -- Remove later */ 861 qdf_assert(hal_soc); 862 863 if (qdf_unlikely(hal_srng_access_start(hal_soc, hal_ring))) { 864 865 /* TODO */ 866 /* 867 * Need API to convert from hal_ring pointer to 868 * Ring Type / Ring Id combo 869 */ 870 DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1); 871 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 872 FL("HAL RING Access Failed -- %pK"), hal_ring); 873 goto done; 874 } 875 876 while (qdf_likely(quota-- && (ring_desc = 877 hal_srng_dst_get_next(hal_soc, hal_ring)))) { 878 879 DP_STATS_INC(soc, rx.err_ring_pkts, 1); 880 881 error = HAL_RX_ERROR_STATUS_GET(ring_desc); 882 883 qdf_assert(error == HAL_REO_ERROR_DETECTED); 884 885 buf_type = HAL_RX_REO_BUF_TYPE_GET(ring_desc); 886 /* 887 * For REO error ring, expect only MSDU LINK DESC 888 */ 889 qdf_assert_always(buf_type == HAL_RX_REO_MSDU_LINK_DESC_TYPE); 890 891 cookie = HAL_RX_REO_BUF_COOKIE_GET(ring_desc); 892 /* 893 * check for the magic number in the sw cookie 894 */ 895 qdf_assert_always((cookie >> LINK_DESC_ID_SHIFT) & 896 LINK_DESC_ID_START); 897 898 /* 899 * Check if the buffer is to be processed on this processor 900 */ 901 rbm = hal_rx_ret_buf_manager_get(ring_desc); 902 903 hal_rx_reo_buf_paddr_get(ring_desc, &hbi); 904 link_desc_va = dp_rx_cookie_2_link_desc_va(soc, &hbi); 905 hal_rx_msdu_list_get(link_desc_va, &msdu_list, &num_msdus); 906 907 if (qdf_unlikely((msdu_list.rbm[0] != 908 HAL_RX_BUF_RBM_SW3_BM) && 909 (msdu_list.rbm[0] != 910 HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST))) { 911 /* TODO */ 912 /* Call appropriate handler */ 913 DP_STATS_INC(soc, rx.err.invalid_rbm, 1); 914 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 915 FL("Invalid RBM %d"), rbm); 916 917 /* Return link descriptor through WBM ring (SW2WBM)*/ 918 dp_rx_link_desc_return(soc, ring_desc, 919 HAL_BM_ACTION_RELEASE_MSDU_LIST); 920 continue; 921 } 922 923 924 /* Get the MPDU DESC info */ 925 hal_rx_mpdu_desc_info_get(ring_desc, &mpdu_desc_info); 926 927 if (mpdu_desc_info.mpdu_flags & HAL_MPDU_F_FRAGMENT) { 928 /* TODO */ 929 rx_bufs_used += dp_rx_frag_handle(soc, 930 ring_desc, &mpdu_desc_info, 931 &head, &tail, quota); 932 DP_STATS_INC(soc, rx.rx_frags, 1); 933 continue; 934 } 935 936 if (hal_rx_reo_is_pn_error(ring_desc)) { 937 /* TOD0 */ 938 DP_STATS_INC(soc, 939 rx.err. 940 reo_error[HAL_REO_ERR_PN_CHECK_FAILED], 941 1); 942 rx_bufs_used += dp_rx_pn_error_handle(soc, 943 ring_desc, &mpdu_desc_info, 944 &head, &tail, quota); 945 continue; 946 } 947 948 if (hal_rx_reo_is_2k_jump(ring_desc)) { 949 /* TOD0 */ 950 DP_STATS_INC(soc, 951 rx.err. 952 reo_error[HAL_REO_ERR_REGULAR_FRAME_2K_JUMP], 953 1); 954 rx_bufs_used += dp_rx_2k_jump_handle(soc, 955 ring_desc, &mpdu_desc_info, 956 &head, &tail, quota); 957 continue; 958 } 959 } 960 961 done: 962 hal_srng_access_end(hal_soc, hal_ring); 963 964 if (soc->rx.flags.defrag_timeout_check) 965 dp_rx_defrag_waitlist_flush(soc); 966 967 /* Assume MAC id = 0, owner = 0 */ 968 if (rx_bufs_used) { 969 dp_pdev = soc->pdev_list[0]; 970 dp_rxdma_srng = &dp_pdev->rx_refill_buf_ring; 971 rx_desc_pool = &soc->rx_desc_buf[0]; 972 973 dp_rx_buffers_replenish(soc, 0, dp_rxdma_srng, rx_desc_pool, 974 rx_bufs_used, &head, &tail, HAL_RX_BUF_RBM_SW3_BM); 975 } 976 977 return rx_bufs_used; /* Assume no scale factor for now */ 978 } 979 980 /** 981 * dp_rx_wbm_err_process() - Processes error frames routed to WBM release ring 982 * 983 * @soc: core txrx main context 984 * @hal_ring: opaque pointer to the HAL Rx Error Ring, which will be serviced 985 * @quota: No. of units (packets) that can be serviced in one shot. 986 * 987 * This function implements error processing and top level demultiplexer 988 * for all the frames routed to WBM2HOST sw release ring. 989 * 990 * Return: uint32_t: No. of elements processed 991 */ 992 uint32_t 993 dp_rx_wbm_err_process(struct dp_soc *soc, void *hal_ring, uint32_t quota) 994 { 995 void *hal_soc; 996 void *ring_desc; 997 struct dp_rx_desc *rx_desc; 998 union dp_rx_desc_list_elem_t *head[MAX_PDEV_CNT] = { NULL }; 999 union dp_rx_desc_list_elem_t *tail[MAX_PDEV_CNT] = { NULL }; 1000 uint32_t rx_bufs_used = 0; 1001 uint32_t rx_bufs_reaped[MAX_PDEV_CNT] = { 0 }; 1002 uint8_t buf_type, rbm; 1003 uint32_t rx_buf_cookie; 1004 uint8_t mac_id; 1005 struct dp_pdev *dp_pdev; 1006 struct dp_srng *dp_rxdma_srng; 1007 struct rx_desc_pool *rx_desc_pool; 1008 uint8_t *rx_tlv_hdr; 1009 qdf_nbuf_t nbuf_head = NULL; 1010 qdf_nbuf_t nbuf_tail = NULL; 1011 qdf_nbuf_t nbuf, next; 1012 struct hal_wbm_err_desc_info wbm_err_info = { 0 }; 1013 uint8_t pool_id; 1014 1015 /* Debug -- Remove later */ 1016 qdf_assert(soc && hal_ring); 1017 1018 hal_soc = soc->hal_soc; 1019 1020 /* Debug -- Remove later */ 1021 qdf_assert(hal_soc); 1022 1023 if (qdf_unlikely(hal_srng_access_start(hal_soc, hal_ring))) { 1024 1025 /* TODO */ 1026 /* 1027 * Need API to convert from hal_ring pointer to 1028 * Ring Type / Ring Id combo 1029 */ 1030 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1031 FL("HAL RING Access Failed -- %pK"), hal_ring); 1032 goto done; 1033 } 1034 1035 while (qdf_likely(quota-- && (ring_desc = 1036 hal_srng_dst_get_next(hal_soc, hal_ring)))) { 1037 1038 /* XXX */ 1039 buf_type = HAL_RX_WBM_BUF_TYPE_GET(ring_desc); 1040 1041 /* 1042 * For WBM ring, expect only MSDU buffers 1043 */ 1044 qdf_assert_always(buf_type == HAL_RX_WBM_BUF_TYPE_REL_BUF); 1045 1046 qdf_assert((HAL_RX_WBM_ERR_SRC_GET(ring_desc) 1047 == HAL_RX_WBM_ERR_SRC_RXDMA) || 1048 (HAL_RX_WBM_ERR_SRC_GET(ring_desc) 1049 == HAL_RX_WBM_ERR_SRC_REO)); 1050 1051 /* 1052 * Check if the buffer is to be processed on this processor 1053 */ 1054 rbm = hal_rx_ret_buf_manager_get(ring_desc); 1055 1056 if (qdf_unlikely(rbm != HAL_RX_BUF_RBM_SW3_BM)) { 1057 /* TODO */ 1058 /* Call appropriate handler */ 1059 DP_STATS_INC(soc, rx.err.invalid_rbm, 1); 1060 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1061 FL("Invalid RBM %d"), rbm); 1062 continue; 1063 } 1064 1065 rx_buf_cookie = HAL_RX_WBM_BUF_COOKIE_GET(ring_desc); 1066 1067 rx_desc = dp_rx_cookie_2_va_rxdma_buf(soc, rx_buf_cookie); 1068 qdf_assert(rx_desc); 1069 1070 if (!dp_rx_desc_check_magic(rx_desc)) { 1071 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1072 FL("Invalid rx_desc cookie=%d"), 1073 rx_buf_cookie); 1074 continue; 1075 } 1076 1077 nbuf = rx_desc->nbuf; 1078 qdf_nbuf_unmap_single(soc->osdev, nbuf, QDF_DMA_BIDIRECTIONAL); 1079 1080 /* 1081 * save the wbm desc info in nbuf TLV. We will need this 1082 * info when we do the actual nbuf processing 1083 */ 1084 hal_rx_wbm_err_info_get(ring_desc, &wbm_err_info); 1085 wbm_err_info.pool_id = rx_desc->pool_id; 1086 hal_rx_wbm_err_info_set_in_tlv(qdf_nbuf_data(nbuf), 1087 &wbm_err_info); 1088 1089 rx_bufs_reaped[rx_desc->pool_id]++; 1090 1091 DP_RX_LIST_APPEND(nbuf_head, nbuf_tail, rx_desc->nbuf); 1092 dp_rx_add_to_free_desc_list(&head[rx_desc->pool_id], 1093 &tail[rx_desc->pool_id], 1094 rx_desc); 1095 } 1096 done: 1097 hal_srng_access_end(hal_soc, hal_ring); 1098 1099 for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) { 1100 if (rx_bufs_reaped[mac_id]) { 1101 dp_pdev = soc->pdev_list[mac_id]; 1102 dp_rxdma_srng = &dp_pdev->rx_refill_buf_ring; 1103 rx_desc_pool = &soc->rx_desc_buf[mac_id]; 1104 1105 dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng, 1106 rx_desc_pool, rx_bufs_reaped[mac_id], 1107 &head[mac_id], &tail[mac_id], 1108 HAL_RX_BUF_RBM_SW3_BM); 1109 rx_bufs_used += rx_bufs_reaped[mac_id]; 1110 } 1111 } 1112 1113 nbuf = nbuf_head; 1114 while (nbuf) { 1115 rx_tlv_hdr = qdf_nbuf_data(nbuf); 1116 /* 1117 * retrieve the wbm desc info from nbuf TLV, so we can 1118 * handle error cases appropriately 1119 */ 1120 hal_rx_wbm_err_info_get_from_tlv(rx_tlv_hdr, &wbm_err_info); 1121 1122 next = nbuf->next; 1123 if (wbm_err_info.wbm_err_src == HAL_RX_WBM_ERR_SRC_REO) { 1124 if (wbm_err_info.reo_psh_rsn 1125 == HAL_RX_WBM_REO_PSH_RSN_ERROR) { 1126 1127 DP_STATS_INC(soc, 1128 rx.err.reo_error 1129 [wbm_err_info.reo_err_code], 1); 1130 1131 switch (wbm_err_info.reo_err_code) { 1132 /* 1133 * Handling for packets which have NULL REO 1134 * queue descriptor 1135 */ 1136 case HAL_REO_ERR_QUEUE_DESC_ADDR_0: 1137 pool_id = wbm_err_info.pool_id; 1138 QDF_TRACE(QDF_MODULE_ID_DP, 1139 QDF_TRACE_LEVEL_WARN, 1140 "Got pkt with REO ERROR: %d", 1141 wbm_err_info.reo_err_code); 1142 dp_rx_null_q_desc_handle(soc, 1143 nbuf, 1144 rx_tlv_hdr, 1145 pool_id); 1146 nbuf = next; 1147 continue; 1148 /* TODO */ 1149 /* Add per error code accounting */ 1150 1151 default: 1152 QDF_TRACE(QDF_MODULE_ID_DP, 1153 QDF_TRACE_LEVEL_DEBUG, 1154 "REO error %d detected", 1155 wbm_err_info.reo_err_code); 1156 } 1157 } 1158 } else if (wbm_err_info.wbm_err_src == 1159 HAL_RX_WBM_ERR_SRC_RXDMA) { 1160 if (wbm_err_info.rxdma_psh_rsn 1161 == HAL_RX_WBM_RXDMA_PSH_RSN_ERROR) { 1162 struct dp_peer *peer = NULL; 1163 uint16_t peer_id = 0xFFFF; 1164 1165 DP_STATS_INC(soc, 1166 rx.err.rxdma_error 1167 [wbm_err_info.rxdma_err_code], 1); 1168 peer_id = hal_rx_mpdu_start_sw_peer_id_get(rx_tlv_hdr); 1169 peer = dp_peer_find_by_id(soc, peer_id); 1170 1171 switch (wbm_err_info.rxdma_err_code) { 1172 case HAL_RXDMA_ERR_UNENCRYPTED: 1173 dp_rx_err_deliver(soc, 1174 nbuf, 1175 rx_tlv_hdr); 1176 nbuf = next; 1177 continue; 1178 1179 case HAL_RXDMA_ERR_TKIP_MIC: 1180 dp_rx_process_mic_error(soc, 1181 nbuf, 1182 rx_tlv_hdr); 1183 nbuf = next; 1184 if (peer) 1185 DP_STATS_INC(peer, rx.err.mic_err, 1); 1186 continue; 1187 1188 case HAL_RXDMA_ERR_DECRYPT: 1189 if (peer) 1190 DP_STATS_INC(peer, rx.err.decrypt_err, 1); 1191 QDF_TRACE(QDF_MODULE_ID_DP, 1192 QDF_TRACE_LEVEL_DEBUG, 1193 "Packet received with Decrypt error"); 1194 break; 1195 1196 default: 1197 QDF_TRACE(QDF_MODULE_ID_DP, 1198 QDF_TRACE_LEVEL_DEBUG, 1199 "RXDMA error %d", 1200 wbm_err_info. 1201 rxdma_err_code); 1202 } 1203 } 1204 } else { 1205 /* Should not come here */ 1206 qdf_assert(0); 1207 } 1208 1209 qdf_nbuf_free(nbuf); 1210 nbuf = next; 1211 hal_rx_dump_pkt_tlvs(rx_tlv_hdr, QDF_TRACE_LEVEL_DEBUG); 1212 } 1213 return rx_bufs_used; /* Assume no scale factor for now */ 1214 } 1215 1216 /** 1217 * dp_rx_err_mpdu_pop() - extract the MSDU's from link descs 1218 * 1219 * @soc: core DP main context 1220 * @mac_id: mac id which is one of 3 mac_ids 1221 * @rxdma_dst_ring_desc: void pointer to monitor link descriptor buf addr info 1222 * @head: head of descs list to be freed 1223 * @tail: tail of decs list to be freed 1224 1225 * Return: number of msdu in MPDU to be popped 1226 */ 1227 static inline uint32_t 1228 dp_rx_err_mpdu_pop(struct dp_soc *soc, uint32_t mac_id, 1229 void *rxdma_dst_ring_desc, 1230 union dp_rx_desc_list_elem_t **head, 1231 union dp_rx_desc_list_elem_t **tail) 1232 { 1233 void *rx_msdu_link_desc; 1234 qdf_nbuf_t msdu; 1235 qdf_nbuf_t last; 1236 struct hal_rx_msdu_list msdu_list; 1237 uint16_t num_msdus; 1238 struct hal_buf_info buf_info; 1239 void *p_buf_addr_info; 1240 void *p_last_buf_addr_info; 1241 uint32_t rx_bufs_used = 0; 1242 uint32_t msdu_cnt; 1243 uint32_t i; 1244 uint8_t push_reason; 1245 uint8_t rxdma_error_code = 0; 1246 uint8_t bm_action = HAL_BM_ACTION_PUT_IN_IDLE_LIST; 1247 struct dp_pdev *pdev = soc->pdev_list[mac_id]; 1248 1249 msdu = 0; 1250 1251 last = NULL; 1252 1253 hal_rx_reo_ent_buf_paddr_get(rxdma_dst_ring_desc, &buf_info, 1254 &p_last_buf_addr_info, &msdu_cnt); 1255 1256 push_reason = 1257 hal_rx_reo_ent_rxdma_push_reason_get(rxdma_dst_ring_desc); 1258 if (push_reason == HAL_RX_WBM_RXDMA_PSH_RSN_ERROR) { 1259 rxdma_error_code = 1260 hal_rx_reo_ent_rxdma_error_code_get(rxdma_dst_ring_desc); 1261 } 1262 1263 do { 1264 rx_msdu_link_desc = 1265 dp_rx_cookie_2_link_desc_va(soc, &buf_info); 1266 1267 qdf_assert(rx_msdu_link_desc); 1268 1269 hal_rx_msdu_list_get(rx_msdu_link_desc, &msdu_list, &num_msdus); 1270 1271 if (msdu_list.sw_cookie[0] != HAL_RX_COOKIE_SPECIAL) { 1272 /* if the msdus belongs to NSS offloaded radio && 1273 * the rbm is not SW3_BM then return the msdu_link 1274 * descriptor without freeing the msdus (nbufs). let 1275 * these buffers be given to NSS completion ring for 1276 * NSS to free them. 1277 * else iterate through the msdu link desc list and 1278 * free each msdu in the list. 1279 */ 1280 if (msdu_list.rbm[0] != HAL_RX_BUF_RBM_SW3_BM && 1281 wlan_cfg_get_dp_pdev_nss_enabled( 1282 pdev->wlan_cfg_ctx)) 1283 bm_action = HAL_BM_ACTION_RELEASE_MSDU_LIST; 1284 else { 1285 for (i = 0; i < num_msdus; i++) { 1286 struct dp_rx_desc *rx_desc = 1287 dp_rx_cookie_2_va_rxdma_buf(soc, 1288 msdu_list.sw_cookie[i]); 1289 qdf_assert(rx_desc); 1290 msdu = rx_desc->nbuf; 1291 1292 qdf_nbuf_unmap_single(soc->osdev, msdu, 1293 QDF_DMA_FROM_DEVICE); 1294 1295 QDF_TRACE(QDF_MODULE_ID_DP, 1296 QDF_TRACE_LEVEL_DEBUG, 1297 "[%s][%d] msdu_nbuf=%pK \n", 1298 __func__, __LINE__, msdu); 1299 1300 qdf_nbuf_free(msdu); 1301 rx_bufs_used++; 1302 dp_rx_add_to_free_desc_list(head, 1303 tail, rx_desc); 1304 } 1305 } 1306 } else { 1307 rxdma_error_code = HAL_RXDMA_ERR_WAR; 1308 } 1309 1310 hal_rx_mon_next_link_desc_get(rx_msdu_link_desc, &buf_info, 1311 &p_buf_addr_info); 1312 1313 dp_rx_link_desc_return(soc, p_last_buf_addr_info, bm_action); 1314 p_last_buf_addr_info = p_buf_addr_info; 1315 1316 } while (buf_info.paddr); 1317 1318 DP_STATS_INC(soc, rx.err.rxdma_error[rxdma_error_code], 1); 1319 1320 if (rxdma_error_code == HAL_RXDMA_ERR_DECRYPT) { 1321 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1322 "Packet received with Decrypt error"); 1323 } 1324 1325 return rx_bufs_used; 1326 } 1327 1328 /** 1329 * dp_rxdma_err_process() - RxDMA error processing functionality 1330 * 1331 * @soc: core txrx main contex 1332 * @mac_id: mac id which is one of 3 mac_ids 1333 * @hal_ring: opaque pointer to the HAL Rx Ring, which will be serviced 1334 * @quota: No. of units (packets) that can be serviced in one shot. 1335 1336 * Return: num of buffers processed 1337 */ 1338 uint32_t 1339 dp_rxdma_err_process(struct dp_soc *soc, uint32_t mac_id, uint32_t quota) 1340 { 1341 struct dp_pdev *pdev = dp_get_pdev_for_mac_id(soc, mac_id); 1342 int ring_idx = dp_get_ring_id_for_mac_id(soc, mac_id); 1343 uint8_t pdev_id; 1344 void *hal_soc; 1345 void *rxdma_dst_ring_desc; 1346 void *err_dst_srng; 1347 union dp_rx_desc_list_elem_t *head = NULL; 1348 union dp_rx_desc_list_elem_t *tail = NULL; 1349 struct dp_srng *dp_rxdma_srng; 1350 struct rx_desc_pool *rx_desc_pool; 1351 uint32_t work_done = 0; 1352 uint32_t rx_bufs_used = 0; 1353 1354 #ifdef DP_INTR_POLL_BASED 1355 if (!pdev) 1356 return 0; 1357 #endif 1358 pdev_id = pdev->pdev_id; 1359 err_dst_srng = pdev->rxdma_err_dst_ring[ring_idx].hal_srng; 1360 1361 if (!err_dst_srng) { 1362 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1363 "%s %d : HAL Monitor Destination Ring Init \ 1364 Failed -- %pK\n", 1365 __func__, __LINE__, err_dst_srng); 1366 return 0; 1367 } 1368 1369 hal_soc = soc->hal_soc; 1370 1371 qdf_assert(hal_soc); 1372 1373 if (qdf_unlikely(hal_srng_access_start(hal_soc, err_dst_srng))) { 1374 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1375 "%s %d : HAL Monitor Destination Ring Init \ 1376 Failed -- %pK\n", 1377 __func__, __LINE__, err_dst_srng); 1378 return 0; 1379 } 1380 1381 while (qdf_likely(quota-- && (rxdma_dst_ring_desc = 1382 hal_srng_dst_get_next(hal_soc, err_dst_srng)))) { 1383 1384 rx_bufs_used += dp_rx_err_mpdu_pop(soc, mac_id, 1385 rxdma_dst_ring_desc, 1386 &head, &tail); 1387 } 1388 1389 hal_srng_access_end(hal_soc, err_dst_srng); 1390 1391 if (rx_bufs_used) { 1392 dp_rxdma_srng = &pdev->rx_refill_buf_ring; 1393 rx_desc_pool = &soc->rx_desc_buf[mac_id]; 1394 1395 dp_rx_buffers_replenish(soc, pdev_id, dp_rxdma_srng, 1396 rx_desc_pool, rx_bufs_used, &head, &tail, 1397 HAL_RX_BUF_RBM_SW3_BM); 1398 work_done += rx_bufs_used; 1399 } 1400 1401 return work_done; 1402 } 1403