1 /* 2 * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for 5 * any purpose with or without fee is hereby granted, provided that the 6 * above copyright notice and this permission notice appear in all 7 * copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 16 * PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 #include "dp_types.h" 20 #include "dp_rx.h" 21 #include "dp_peer.h" 22 #include "dp_internal.h" 23 #include "hal_api.h" 24 #include "qdf_trace.h" 25 #include "qdf_nbuf.h" 26 #ifdef CONFIG_MCL 27 #include <cds_ieee80211_common.h> 28 #else 29 #include <linux/ieee80211.h> 30 #endif 31 #include "dp_rx_defrag.h" 32 #include <enet.h> /* LLC_SNAP_HDR_LEN */ 33 34 #ifdef RX_DESC_DEBUG_CHECK 35 static inline bool dp_rx_desc_check_magic(struct dp_rx_desc *rx_desc) 36 { 37 if (qdf_unlikely(rx_desc->magic != DP_RX_DESC_MAGIC)) { 38 return false; 39 } 40 rx_desc->magic = 0; 41 return true; 42 } 43 #else 44 static inline bool dp_rx_desc_check_magic(struct dp_rx_desc *rx_desc) 45 { 46 return true; 47 } 48 #endif 49 50 /** 51 * dp_rx_mcast_echo_check() - check if the mcast pkt is a loop 52 * back on same vap or a different vap. 53 * 54 * @soc: core DP main context 55 * @peer: dp peer handler 56 * @rx_tlv_hdr: start of the rx TLV header 57 * @nbuf: pkt buffer 58 * 59 * Return: bool (true if it is a looped back pkt else false) 60 * 61 */ 62 static inline bool dp_rx_mcast_echo_check(struct dp_soc *soc, 63 struct dp_peer *peer, 64 uint8_t *rx_tlv_hdr, 65 qdf_nbuf_t nbuf) 66 { 67 struct dp_vdev *vdev = peer->vdev; 68 struct dp_ast_entry *ase; 69 uint16_t sa_idx = 0; 70 uint8_t *data; 71 72 /* 73 * Multicast Echo Check is required only if vdev is STA and 74 * received pkt is a multicast/broadcast pkt. otherwise 75 * skip the MEC check. 76 */ 77 if (vdev->opmode != wlan_op_mode_sta) 78 return false; 79 80 if (!hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr)) 81 return false; 82 83 data = qdf_nbuf_data(nbuf); 84 /* 85 * if the received pkts src mac addr matches with vdev 86 * mac address then drop the pkt as it is looped back 87 */ 88 if (!(qdf_mem_cmp(&data[DP_MAC_ADDR_LEN], 89 vdev->mac_addr.raw, 90 DP_MAC_ADDR_LEN))) 91 return true; 92 93 /* if the received pkts src mac addr matches with the 94 * wired PCs MAC addr which is behind the STA or with 95 * wireless STAs MAC addr which are behind the Repeater, 96 * then drop the pkt as it is looped back 97 */ 98 qdf_spin_lock_bh(&soc->ast_lock); 99 if (hal_rx_msdu_end_sa_is_valid_get(rx_tlv_hdr)) { 100 sa_idx = hal_rx_msdu_end_sa_idx_get(rx_tlv_hdr); 101 102 if ((sa_idx < 0) || 103 (sa_idx >= (WLAN_UMAC_PSOC_MAX_PEERS * 2))) { 104 qdf_spin_unlock_bh(&soc->ast_lock); 105 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 106 "invalid sa_idx: %d", sa_idx); 107 qdf_assert_always(0); 108 } 109 110 ase = soc->ast_table[sa_idx]; 111 if (!ase) { 112 /* We do not get a peer map event for STA and without 113 * this event we don't know what is STA's sa_idx. 114 * For this reason the AST is still not associated to 115 * any index postion in ast_table. 116 * In these kind of scenarios where sa is valid but 117 * ast is not in ast_table, we use the below API to get 118 * AST entry for STA's own mac_address. 119 */ 120 ase = dp_peer_ast_hash_find(soc, 121 &data[DP_MAC_ADDR_LEN]); 122 123 } 124 } else 125 ase = dp_peer_ast_hash_find(soc, &data[DP_MAC_ADDR_LEN]); 126 127 if (ase) { 128 ase->ast_idx = sa_idx; 129 soc->ast_table[sa_idx] = ase; 130 131 if (ase->pdev_id != vdev->pdev->pdev_id) { 132 qdf_spin_unlock_bh(&soc->ast_lock); 133 QDF_TRACE(QDF_MODULE_ID_DP, 134 QDF_TRACE_LEVEL_INFO, 135 "Detected DBDC Root AP %pM, %d %d", 136 &data[DP_MAC_ADDR_LEN], vdev->pdev->pdev_id, 137 ase->pdev_id); 138 return false; 139 } 140 141 if ((ase->type == CDP_TXRX_AST_TYPE_MEC) || 142 (ase->peer != peer)) { 143 qdf_spin_unlock_bh(&soc->ast_lock); 144 QDF_TRACE(QDF_MODULE_ID_DP, 145 QDF_TRACE_LEVEL_INFO, 146 "received pkt with same src mac %pM", 147 &data[DP_MAC_ADDR_LEN]); 148 149 return true; 150 } 151 } 152 qdf_spin_unlock_bh(&soc->ast_lock); 153 return false; 154 } 155 156 /** 157 * dp_rx_link_desc_return_by_addr - Return a MPDU link descriptor to 158 * (WBM) by address 159 * 160 * @soc: core DP main context 161 * @link_desc_addr: link descriptor addr 162 * 163 * Return: QDF_STATUS 164 */ 165 QDF_STATUS 166 dp_rx_link_desc_return_by_addr(struct dp_soc *soc, void *link_desc_addr, 167 uint8_t bm_action) 168 { 169 struct dp_srng *wbm_desc_rel_ring = &soc->wbm_desc_rel_ring; 170 void *wbm_rel_srng = wbm_desc_rel_ring->hal_srng; 171 void *hal_soc = soc->hal_soc; 172 QDF_STATUS status = QDF_STATUS_E_FAILURE; 173 void *src_srng_desc; 174 175 if (!wbm_rel_srng) { 176 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 177 "WBM RELEASE RING not initialized"); 178 return status; 179 } 180 181 if (qdf_unlikely(hal_srng_access_start(hal_soc, wbm_rel_srng))) { 182 183 /* TODO */ 184 /* 185 * Need API to convert from hal_ring pointer to 186 * Ring Type / Ring Id combo 187 */ 188 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 189 FL("HAL RING Access For WBM Release SRNG Failed - %pK"), 190 wbm_rel_srng); 191 DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1); 192 goto done; 193 } 194 src_srng_desc = hal_srng_src_get_next(hal_soc, wbm_rel_srng); 195 if (qdf_likely(src_srng_desc)) { 196 /* Return link descriptor through WBM ring (SW2WBM)*/ 197 hal_rx_msdu_link_desc_set(hal_soc, 198 src_srng_desc, link_desc_addr, bm_action); 199 status = QDF_STATUS_SUCCESS; 200 } else { 201 struct hal_srng *srng = (struct hal_srng *)wbm_rel_srng; 202 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 203 FL("WBM Release Ring (Id %d) Full"), srng->ring_id); 204 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 205 "HP 0x%x Reap HP 0x%x TP 0x%x Cached TP 0x%x", 206 *srng->u.src_ring.hp_addr, srng->u.src_ring.reap_hp, 207 *srng->u.src_ring.tp_addr, srng->u.src_ring.cached_tp); 208 } 209 done: 210 hal_srng_access_end(hal_soc, wbm_rel_srng); 211 return status; 212 213 } 214 215 /** 216 * dp_rx_link_desc_return() - Return a MPDU link descriptor to HW 217 * (WBM), following error handling 218 * 219 * @soc: core DP main context 220 * @ring_desc: opaque pointer to the REO error ring descriptor 221 * 222 * Return: QDF_STATUS 223 */ 224 QDF_STATUS 225 dp_rx_link_desc_return(struct dp_soc *soc, void *ring_desc, uint8_t bm_action) 226 { 227 void *buf_addr_info = HAL_RX_REO_BUF_ADDR_INFO_GET(ring_desc); 228 return dp_rx_link_desc_return_by_addr(soc, buf_addr_info, bm_action); 229 } 230 231 /** 232 * dp_rx_msdus_drop() - Drops all MSDU's per MPDU 233 * 234 * @soc: core txrx main context 235 * @ring_desc: opaque pointer to the REO error ring descriptor 236 * @mpdu_desc_info: MPDU descriptor information from ring descriptor 237 * @head: head of the local descriptor free-list 238 * @tail: tail of the local descriptor free-list 239 * @quota: No. of units (packets) that can be serviced in one shot. 240 * 241 * This function is used to drop all MSDU in an MPDU 242 * 243 * Return: uint32_t: No. of elements processed 244 */ 245 static uint32_t dp_rx_msdus_drop(struct dp_soc *soc, void *ring_desc, 246 struct hal_rx_mpdu_desc_info *mpdu_desc_info, 247 union dp_rx_desc_list_elem_t **head, 248 union dp_rx_desc_list_elem_t **tail, 249 uint32_t quota) 250 { 251 uint32_t rx_bufs_used = 0; 252 void *link_desc_va; 253 struct hal_buf_info buf_info; 254 struct hal_rx_msdu_list msdu_list; /* MSDU's per MPDU */ 255 int i; 256 uint8_t *rx_tlv_hdr; 257 uint32_t tid; 258 259 hal_rx_reo_buf_paddr_get(ring_desc, &buf_info); 260 261 link_desc_va = dp_rx_cookie_2_link_desc_va(soc, &buf_info); 262 263 /* No UNMAP required -- this is "malloc_consistent" memory */ 264 hal_rx_msdu_list_get(link_desc_va, &msdu_list, 265 &mpdu_desc_info->msdu_count); 266 267 for (i = 0; (i < mpdu_desc_info->msdu_count) && quota--; i++) { 268 struct dp_rx_desc *rx_desc = 269 dp_rx_cookie_2_va_rxdma_buf(soc, 270 msdu_list.sw_cookie[i]); 271 272 qdf_assert(rx_desc); 273 274 if (!dp_rx_desc_check_magic(rx_desc)) { 275 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 276 FL("Invalid rx_desc cookie=%d"), 277 msdu_list.sw_cookie[i]); 278 return rx_bufs_used; 279 } 280 281 rx_bufs_used++; 282 tid = hal_rx_mpdu_start_tid_get(rx_desc->rx_buf_start); 283 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 284 "Packet received with PN error for tid :%d", tid); 285 286 rx_tlv_hdr = qdf_nbuf_data(rx_desc->nbuf); 287 if (hal_rx_encryption_info_valid(rx_tlv_hdr)) 288 hal_rx_print_pn(rx_tlv_hdr); 289 290 /* Just free the buffers */ 291 qdf_nbuf_free(rx_desc->nbuf); 292 293 dp_rx_add_to_free_desc_list(head, tail, rx_desc); 294 } 295 296 /* Return link descriptor through WBM ring (SW2WBM)*/ 297 dp_rx_link_desc_return(soc, ring_desc, HAL_BM_ACTION_PUT_IN_IDLE_LIST); 298 299 return rx_bufs_used; 300 } 301 302 /** 303 * dp_rx_pn_error_handle() - Handles PN check errors 304 * 305 * @soc: core txrx main context 306 * @ring_desc: opaque pointer to the REO error ring descriptor 307 * @mpdu_desc_info: MPDU descriptor information from ring descriptor 308 * @head: head of the local descriptor free-list 309 * @tail: tail of the local descriptor free-list 310 * @quota: No. of units (packets) that can be serviced in one shot. 311 * 312 * This function implements PN error handling 313 * If the peer is configured to ignore the PN check errors 314 * or if DP feels, that this frame is still OK, the frame can be 315 * re-injected back to REO to use some of the other features 316 * of REO e.g. duplicate detection/routing to other cores 317 * 318 * Return: uint32_t: No. of elements processed 319 */ 320 static uint32_t 321 dp_rx_pn_error_handle(struct dp_soc *soc, void *ring_desc, 322 struct hal_rx_mpdu_desc_info *mpdu_desc_info, 323 union dp_rx_desc_list_elem_t **head, 324 union dp_rx_desc_list_elem_t **tail, 325 uint32_t quota) 326 { 327 uint16_t peer_id; 328 uint32_t rx_bufs_used = 0; 329 struct dp_peer *peer; 330 bool peer_pn_policy = false; 331 332 peer_id = DP_PEER_METADATA_PEER_ID_GET( 333 mpdu_desc_info->peer_meta_data); 334 335 336 peer = dp_peer_find_by_id(soc, peer_id); 337 338 if (qdf_likely(peer)) { 339 /* 340 * TODO: Check for peer specific policies & set peer_pn_policy 341 */ 342 } 343 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 344 "Packet received with PN error"); 345 346 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 347 "discard rx due to PN error for peer %pK " 348 "(%02x:%02x:%02x:%02x:%02x:%02x)\n", 349 peer, 350 peer->mac_addr.raw[0], peer->mac_addr.raw[1], 351 peer->mac_addr.raw[2], peer->mac_addr.raw[3], 352 peer->mac_addr.raw[4], peer->mac_addr.raw[5]); 353 354 /* No peer PN policy -- definitely drop */ 355 if (!peer_pn_policy) 356 rx_bufs_used = dp_rx_msdus_drop(soc, ring_desc, 357 mpdu_desc_info, 358 head, tail, quota); 359 360 return rx_bufs_used; 361 } 362 363 /** 364 * dp_rx_2k_jump_handle() - Handles Sequence Number Jump by 2K 365 * 366 * @soc: core txrx main context 367 * @ring_desc: opaque pointer to the REO error ring descriptor 368 * @mpdu_desc_info: MPDU descriptor information from ring descriptor 369 * @head: head of the local descriptor free-list 370 * @tail: tail of the local descriptor free-list 371 * @quota: No. of units (packets) that can be serviced in one shot. 372 * 373 * This function implements the error handling when sequence number 374 * of the MPDU jumps suddenly by 2K.Today there are 2 cases that 375 * need to be handled: 376 * A) CSN (Current Sequence Number) = Last Valid SN (LSN) + 2K 377 * B) CSN = LSN + 2K, but falls within a "BA sized window" of the SSN 378 * For case A) the protocol stack is invoked to generate DELBA/DEAUTH frame 379 * For case B), the frame is normally dropped, no more action is taken 380 * 381 * Return: uint32_t: No. of elements processed 382 */ 383 static uint32_t 384 dp_rx_2k_jump_handle(struct dp_soc *soc, void *ring_desc, 385 struct hal_rx_mpdu_desc_info *mpdu_desc_info, 386 union dp_rx_desc_list_elem_t **head, 387 union dp_rx_desc_list_elem_t **tail, 388 uint32_t quota) 389 { 390 return dp_rx_msdus_drop(soc, ring_desc, mpdu_desc_info, 391 head, tail, quota); 392 } 393 394 static bool 395 dp_rx_chain_msdus(struct dp_soc *soc, qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr, 396 uint8_t mac_id) 397 { 398 bool mpdu_done = false; 399 400 /* TODO: Currently only single radio is supported, hence 401 * pdev hard coded to '0' index 402 */ 403 struct dp_pdev *dp_pdev = soc->pdev_list[mac_id]; 404 405 if (hal_rx_msdu_end_first_msdu_get(rx_tlv_hdr)) { 406 qdf_nbuf_set_rx_chfrag_start(nbuf, 1); 407 408 dp_pdev->invalid_peer_head_msdu = NULL; 409 dp_pdev->invalid_peer_tail_msdu = NULL; 410 411 hal_rx_mon_hw_desc_get_mpdu_status(rx_tlv_hdr, 412 &(dp_pdev->ppdu_info.rx_status)); 413 414 } 415 416 if (hal_rx_msdu_end_last_msdu_get(rx_tlv_hdr)) { 417 qdf_nbuf_set_rx_chfrag_end(nbuf, 1); 418 mpdu_done = true; 419 } 420 421 DP_RX_LIST_APPEND(dp_pdev->invalid_peer_head_msdu, 422 dp_pdev->invalid_peer_tail_msdu, 423 nbuf); 424 425 return mpdu_done; 426 } 427 428 /** 429 * dp_rx_null_q_desc_handle() - Function to handle NULL Queue 430 * descriptor violation on either a 431 * REO or WBM ring 432 * 433 * @soc: core DP main context 434 * @rx_desc : pointer to the sw rx descriptor 435 * @head: pointer to head of rx descriptors to be added to free list 436 * @tail: pointer to tail of rx descriptors to be added to free list 437 * quota: upper limit of descriptors that can be reaped 438 * 439 * This function handles NULL queue descriptor violations arising out 440 * a missing REO queue for a given peer or a given TID. This typically 441 * may happen if a packet is received on a QOS enabled TID before the 442 * ADDBA negotiation for that TID, when the TID queue is setup. Or 443 * it may also happen for MC/BC frames if they are not routed to the 444 * non-QOS TID queue, in the absence of any other default TID queue. 445 * This error can show up both in a REO destination or WBM release ring. 446 * 447 * Return: uint32_t: No. of Rx buffers reaped 448 */ 449 static void 450 dp_rx_null_q_desc_handle(struct dp_soc *soc, 451 qdf_nbuf_t nbuf, 452 uint8_t *rx_tlv_hdr, 453 uint8_t pool_id) 454 { 455 uint32_t pkt_len, l2_hdr_offset; 456 uint16_t msdu_len; 457 struct dp_vdev *vdev; 458 uint16_t peer_id = 0xFFFF; 459 struct dp_peer *peer = NULL; 460 uint8_t tid; 461 462 qdf_nbuf_set_rx_chfrag_start(nbuf, 463 hal_rx_msdu_end_first_msdu_get(rx_tlv_hdr)); 464 qdf_nbuf_set_rx_chfrag_end(nbuf, 465 hal_rx_msdu_end_last_msdu_get(rx_tlv_hdr)); 466 467 l2_hdr_offset = hal_rx_msdu_end_l3_hdr_padding_get(rx_tlv_hdr); 468 msdu_len = hal_rx_msdu_start_msdu_len_get(rx_tlv_hdr); 469 pkt_len = msdu_len + l2_hdr_offset + RX_PKT_TLVS_LEN; 470 471 /* Set length in nbuf */ 472 qdf_nbuf_set_pktlen(nbuf, pkt_len); 473 474 /* 475 * Check if DMA completed -- msdu_done is the last bit 476 * to be written 477 */ 478 if (!hal_rx_attn_msdu_done_get(rx_tlv_hdr)) { 479 480 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 481 FL("MSDU DONE failure")); 482 483 hal_rx_dump_pkt_tlvs(rx_tlv_hdr, QDF_TRACE_LEVEL_INFO); 484 qdf_assert(0); 485 } 486 487 peer_id = hal_rx_mpdu_start_sw_peer_id_get(rx_tlv_hdr); 488 peer = dp_peer_find_by_id(soc, peer_id); 489 490 if (!peer) { 491 bool mpdu_done = false; 492 493 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 494 FL("peer is NULL")); 495 496 mpdu_done = dp_rx_chain_msdus(soc, nbuf, rx_tlv_hdr, pool_id); 497 /* Trigger invalid peer handler wrapper */ 498 dp_rx_process_invalid_peer_wrapper(soc, nbuf, mpdu_done); 499 500 return; 501 } 502 503 vdev = peer->vdev; 504 if (!vdev) { 505 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 506 FL("INVALID vdev %pK OR osif_rx"), vdev); 507 /* Drop & free packet */ 508 qdf_nbuf_free(nbuf); 509 DP_STATS_INC(soc, rx.err.invalid_vdev, 1); 510 return; 511 } 512 513 /* 514 * Advance the packet start pointer by total size of 515 * pre-header TLV's 516 */ 517 qdf_nbuf_pull_head(nbuf, (l2_hdr_offset + RX_PKT_TLVS_LEN)); 518 519 if (dp_rx_mcast_echo_check(soc, peer, rx_tlv_hdr, nbuf)) { 520 /* this is a looped back MCBC pkt, drop it */ 521 qdf_nbuf_free(nbuf); 522 return; 523 } 524 /* 525 * In qwrap mode if the received packet matches with any of the vdev 526 * mac addresses, drop it. Donot receive multicast packets originated 527 * from any proxysta. 528 */ 529 if (check_qwrap_multicast_loopback(vdev, nbuf)) { 530 qdf_nbuf_free(nbuf); 531 return; 532 } 533 534 535 if (qdf_unlikely((peer->nawds_enabled == true) && 536 hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr))) { 537 QDF_TRACE(QDF_MODULE_ID_DP, 538 QDF_TRACE_LEVEL_DEBUG, 539 "%s free buffer for multicast packet", 540 __func__); 541 DP_STATS_INC_PKT(peer, rx.nawds_mcast_drop, 542 1, qdf_nbuf_len(nbuf)); 543 qdf_nbuf_free(nbuf); 544 return; 545 } 546 547 if (!dp_wds_rx_policy_check(rx_tlv_hdr, vdev, peer, 548 hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr))) { 549 QDF_TRACE(QDF_MODULE_ID_DP, 550 QDF_TRACE_LEVEL_ERROR, 551 FL("mcast Policy Check Drop pkt")); 552 /* Drop & free packet */ 553 qdf_nbuf_free(nbuf); 554 return; 555 } 556 557 /* WDS Source Port Learning */ 558 if (qdf_likely(vdev->rx_decap_type == htt_cmn_pkt_type_ethernet && 559 vdev->wds_enabled)) 560 dp_rx_wds_srcport_learn(soc, rx_tlv_hdr, peer, nbuf); 561 562 if (hal_rx_mpdu_start_mpdu_qos_control_valid_get(rx_tlv_hdr)) { 563 /* TODO: Assuming that qos_control_valid also indicates 564 * unicast. Should we check this? 565 */ 566 tid = hal_rx_mpdu_start_tid_get(rx_tlv_hdr); 567 if (peer && 568 peer->rx_tid[tid].hw_qdesc_vaddr_unaligned == NULL) { 569 /* IEEE80211_SEQ_MAX indicates invalid start_seq */ 570 dp_rx_tid_setup_wifi3(peer, tid, 1, IEEE80211_SEQ_MAX); 571 } 572 } 573 574 #ifdef QCA_WIFI_NAPIER_EMULATION /* Debug code, remove later */ 575 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 576 "%s: p_id %d msdu_len %d hdr_off %d", 577 __func__, peer_id, msdu_len, l2_hdr_offset); 578 579 print_hex_dump(KERN_ERR, "\t Pkt Data:", DUMP_PREFIX_NONE, 32, 4, 580 qdf_nbuf_data(nbuf), 128, false); 581 #endif /* NAPIER_EMULATION */ 582 583 if (qdf_unlikely(vdev->rx_decap_type == htt_cmn_pkt_type_raw)) { 584 qdf_nbuf_set_next(nbuf, NULL); 585 dp_rx_deliver_raw(vdev, nbuf, peer); 586 } else { 587 if (qdf_unlikely(peer->bss_peer)) { 588 QDF_TRACE(QDF_MODULE_ID_DP, 589 QDF_TRACE_LEVEL_INFO, 590 FL("received pkt with same src MAC")); 591 /* Drop & free packet */ 592 qdf_nbuf_free(nbuf); 593 return; 594 } 595 596 if (vdev->osif_rx) { 597 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, 598 FL("vdev %pK osif_rx %pK"), vdev, 599 vdev->osif_rx); 600 qdf_nbuf_set_next(nbuf, NULL); 601 vdev->osif_rx(vdev->osif_vdev, nbuf); 602 DP_STATS_INCC_PKT(vdev->pdev, rx.multicast, 1, 603 qdf_nbuf_len(nbuf), 604 hal_rx_msdu_end_da_is_mcbc_get( 605 rx_tlv_hdr)); 606 DP_STATS_INC_PKT(vdev->pdev, rx.to_stack, 1, 607 qdf_nbuf_len(nbuf)); 608 } else { 609 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 610 FL("INVALID vdev %pK OR osif_rx"), vdev); 611 DP_STATS_INC(soc, rx.err.invalid_vdev, 1); 612 } 613 } 614 return; 615 } 616 617 /** 618 * dp_rx_err_deliver() - Function to deliver error frames to OS 619 * 620 * @soc: core DP main context 621 * @rx_desc : pointer to the sw rx descriptor 622 * @head: pointer to head of rx descriptors to be added to free list 623 * @tail: pointer to tail of rx descriptors to be added to free list 624 * quota: upper limit of descriptors that can be reaped 625 * 626 * Return: uint32_t: No. of Rx buffers reaped 627 */ 628 static void 629 dp_rx_err_deliver(struct dp_soc *soc, qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr) 630 { 631 uint32_t pkt_len, l2_hdr_offset; 632 uint16_t msdu_len; 633 struct dp_vdev *vdev; 634 uint16_t peer_id = 0xFFFF; 635 struct dp_peer *peer = NULL; 636 struct ether_header *eh; 637 bool isBroadcast; 638 639 /* 640 * Check if DMA completed -- msdu_done is the last bit 641 * to be written 642 */ 643 if (!hal_rx_attn_msdu_done_get(rx_tlv_hdr)) { 644 645 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 646 FL("MSDU DONE failure")); 647 648 hal_rx_dump_pkt_tlvs(rx_tlv_hdr, QDF_TRACE_LEVEL_INFO); 649 qdf_assert(0); 650 } 651 652 peer_id = hal_rx_mpdu_start_sw_peer_id_get(rx_tlv_hdr); 653 peer = dp_peer_find_by_id(soc, peer_id); 654 655 if (!peer) { 656 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 657 FL("peer is NULL")); 658 DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1, 659 qdf_nbuf_len(nbuf)); 660 /* Drop & free packet */ 661 qdf_nbuf_free(nbuf); 662 return; 663 } 664 665 vdev = peer->vdev; 666 if (!vdev) { 667 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 668 FL("INVALID vdev %pK OR osif_rx"), vdev); 669 /* Drop & free packet */ 670 qdf_nbuf_free(nbuf); 671 DP_STATS_INC(soc, rx.err.invalid_vdev, 1); 672 return; 673 } 674 675 /* Drop & free packet if mesh mode not enabled */ 676 if (!vdev->mesh_vdev) { 677 qdf_nbuf_free(nbuf); 678 DP_STATS_INC(soc, rx.err.invalid_vdev, 1); 679 return; 680 } 681 682 l2_hdr_offset = hal_rx_msdu_end_l3_hdr_padding_get(rx_tlv_hdr); 683 msdu_len = hal_rx_msdu_start_msdu_len_get(rx_tlv_hdr); 684 pkt_len = msdu_len + l2_hdr_offset + RX_PKT_TLVS_LEN; 685 686 /* Set length in nbuf */ 687 qdf_nbuf_set_pktlen(nbuf, pkt_len); 688 689 qdf_nbuf_set_next(nbuf, NULL); 690 691 /* 692 * Advance the packet start pointer by total size of 693 * pre-header TLV's 694 */ 695 qdf_nbuf_pull_head(nbuf, (l2_hdr_offset + RX_PKT_TLVS_LEN)); 696 697 qdf_nbuf_set_rx_chfrag_start(nbuf, 1); 698 qdf_nbuf_set_rx_chfrag_end(nbuf, 1); 699 if (dp_rx_filter_mesh_packets(vdev, nbuf, rx_tlv_hdr) 700 == QDF_STATUS_SUCCESS) { 701 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_MED, 702 FL("mesh pkt filtered")); 703 DP_STATS_INC(vdev->pdev, dropped.mesh_filter, 1); 704 705 qdf_nbuf_free(nbuf); 706 return; 707 708 } 709 dp_rx_fill_mesh_stats(vdev, nbuf, rx_tlv_hdr, peer); 710 711 if (qdf_unlikely(hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr) && 712 (vdev->rx_decap_type == 713 htt_cmn_pkt_type_ethernet))) { 714 eh = (struct ether_header *)qdf_nbuf_data(nbuf); 715 isBroadcast = (IEEE80211_IS_BROADCAST 716 (eh->ether_dhost)) ? 1 : 0 ; 717 if (isBroadcast) { 718 DP_STATS_INC_PKT(peer, rx.bcast, 1, 719 qdf_nbuf_len(nbuf)); 720 } 721 } 722 723 if (qdf_unlikely(vdev->rx_decap_type == htt_cmn_pkt_type_raw)) { 724 dp_rx_deliver_raw(vdev, nbuf, peer); 725 } else { 726 DP_STATS_INC(vdev->pdev, rx.to_stack.num, 1); 727 vdev->osif_rx(vdev->osif_vdev, nbuf); 728 } 729 730 return; 731 } 732 733 /** 734 * dp_rx_process_mic_error(): Function to pass mic error indication to umac 735 * @soc: DP SOC handle 736 * @rx_desc : pointer to the sw rx descriptor 737 * @head: pointer to head of rx descriptors to be added to free list 738 * @tail: pointer to tail of rx descriptors to be added to free list 739 * 740 * return: void 741 */ 742 void 743 dp_rx_process_mic_error(struct dp_soc *soc, 744 qdf_nbuf_t nbuf, 745 uint8_t *rx_tlv_hdr) 746 { 747 struct dp_vdev *vdev = NULL; 748 struct dp_pdev *pdev = NULL; 749 struct ol_if_ops *tops = NULL; 750 struct ieee80211_frame *wh; 751 uint8_t *rx_pkt_hdr; 752 struct dp_peer *peer; 753 uint16_t peer_id; 754 755 if (!hal_rx_msdu_end_first_msdu_get(rx_tlv_hdr)) 756 return; 757 758 rx_pkt_hdr = hal_rx_pkt_hdr_get(qdf_nbuf_data(nbuf)); 759 wh = (struct ieee80211_frame *)rx_pkt_hdr; 760 761 peer_id = hal_rx_mpdu_start_sw_peer_id_get(rx_tlv_hdr); 762 peer = dp_peer_find_by_id(soc, peer_id); 763 if (!peer) { 764 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 765 "peer not found"); 766 goto fail; 767 } 768 769 vdev = peer->vdev; 770 if (!vdev) { 771 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 772 "VDEV not found"); 773 goto fail; 774 } 775 776 pdev = vdev->pdev; 777 if (!pdev) { 778 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 779 "PDEV not found"); 780 goto fail; 781 } 782 783 tops = pdev->soc->cdp_soc.ol_ops; 784 if (tops->rx_mic_error) 785 tops->rx_mic_error(pdev->osif_pdev, vdev->vdev_id, wh); 786 787 fail: 788 qdf_nbuf_free(nbuf); 789 return; 790 } 791 792 /** 793 * dp_rx_err_process() - Processes error frames routed to REO error ring 794 * 795 * @soc: core txrx main context 796 * @hal_ring: opaque pointer to the HAL Rx Error Ring, which will be serviced 797 * @quota: No. of units (packets) that can be serviced in one shot. 798 * 799 * This function implements error processing and top level demultiplexer 800 * for all the frames routed to REO error ring. 801 * 802 * Return: uint32_t: No. of elements processed 803 */ 804 uint32_t 805 dp_rx_err_process(struct dp_soc *soc, void *hal_ring, uint32_t quota) 806 { 807 void *hal_soc; 808 void *ring_desc; 809 union dp_rx_desc_list_elem_t *head = NULL; 810 union dp_rx_desc_list_elem_t *tail = NULL; 811 uint32_t rx_bufs_used = 0; 812 uint8_t buf_type; 813 uint8_t error, rbm; 814 struct hal_rx_mpdu_desc_info mpdu_desc_info; 815 struct hal_buf_info hbi; 816 struct dp_pdev *dp_pdev; 817 struct dp_srng *dp_rxdma_srng; 818 struct rx_desc_pool *rx_desc_pool; 819 uint32_t cookie = 0; 820 void *link_desc_va; 821 struct hal_rx_msdu_list msdu_list; /* MSDU's per MPDU */ 822 uint16_t num_msdus; 823 824 /* Debug -- Remove later */ 825 qdf_assert(soc && hal_ring); 826 827 hal_soc = soc->hal_soc; 828 829 /* Debug -- Remove later */ 830 qdf_assert(hal_soc); 831 832 if (qdf_unlikely(hal_srng_access_start(hal_soc, hal_ring))) { 833 834 /* TODO */ 835 /* 836 * Need API to convert from hal_ring pointer to 837 * Ring Type / Ring Id combo 838 */ 839 DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1); 840 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 841 FL("HAL RING Access Failed -- %pK"), hal_ring); 842 goto done; 843 } 844 845 while (qdf_likely(quota-- && (ring_desc = 846 hal_srng_dst_get_next(hal_soc, hal_ring)))) { 847 848 DP_STATS_INC(soc, rx.err_ring_pkts, 1); 849 850 error = HAL_RX_ERROR_STATUS_GET(ring_desc); 851 852 qdf_assert(error == HAL_REO_ERROR_DETECTED); 853 854 buf_type = HAL_RX_REO_BUF_TYPE_GET(ring_desc); 855 /* 856 * For REO error ring, expect only MSDU LINK DESC 857 */ 858 qdf_assert_always(buf_type == HAL_RX_REO_MSDU_LINK_DESC_TYPE); 859 860 cookie = HAL_RX_REO_BUF_COOKIE_GET(ring_desc); 861 /* 862 * check for the magic number in the sw cookie 863 */ 864 qdf_assert_always((cookie >> LINK_DESC_ID_SHIFT) & 865 LINK_DESC_ID_START); 866 867 /* 868 * Check if the buffer is to be processed on this processor 869 */ 870 rbm = hal_rx_ret_buf_manager_get(ring_desc); 871 872 hal_rx_reo_buf_paddr_get(ring_desc, &hbi); 873 link_desc_va = dp_rx_cookie_2_link_desc_va(soc, &hbi); 874 hal_rx_msdu_list_get(link_desc_va, &msdu_list, &num_msdus); 875 876 if (qdf_unlikely((msdu_list.rbm[0] != 877 HAL_RX_BUF_RBM_SW3_BM) && 878 (msdu_list.rbm[0] != 879 HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST))) { 880 /* TODO */ 881 /* Call appropriate handler */ 882 DP_STATS_INC(soc, rx.err.invalid_rbm, 1); 883 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 884 FL("Invalid RBM %d"), rbm); 885 886 /* Return link descriptor through WBM ring (SW2WBM)*/ 887 dp_rx_link_desc_return(soc, ring_desc, 888 HAL_BM_ACTION_RELEASE_MSDU_LIST); 889 continue; 890 } 891 892 893 /* Get the MPDU DESC info */ 894 hal_rx_mpdu_desc_info_get(ring_desc, &mpdu_desc_info); 895 896 if (mpdu_desc_info.mpdu_flags & HAL_MPDU_F_FRAGMENT) { 897 /* TODO */ 898 rx_bufs_used += dp_rx_frag_handle(soc, 899 ring_desc, &mpdu_desc_info, 900 &head, &tail, quota); 901 DP_STATS_INC(soc, rx.rx_frags, 1); 902 continue; 903 } 904 905 if (hal_rx_reo_is_pn_error(ring_desc)) { 906 /* TOD0 */ 907 DP_STATS_INC(soc, 908 rx.err. 909 reo_error[HAL_REO_ERR_PN_CHECK_FAILED], 910 1); 911 rx_bufs_used += dp_rx_pn_error_handle(soc, 912 ring_desc, &mpdu_desc_info, 913 &head, &tail, quota); 914 continue; 915 } 916 917 if (hal_rx_reo_is_2k_jump(ring_desc)) { 918 /* TOD0 */ 919 DP_STATS_INC(soc, 920 rx.err. 921 reo_error[HAL_REO_ERR_REGULAR_FRAME_2K_JUMP], 922 1); 923 rx_bufs_used += dp_rx_2k_jump_handle(soc, 924 ring_desc, &mpdu_desc_info, 925 &head, &tail, quota); 926 continue; 927 } 928 } 929 930 done: 931 hal_srng_access_end(hal_soc, hal_ring); 932 933 if (soc->rx.flags.defrag_timeout_check) 934 dp_rx_defrag_waitlist_flush(soc); 935 936 /* Assume MAC id = 0, owner = 0 */ 937 if (rx_bufs_used) { 938 dp_pdev = soc->pdev_list[0]; 939 dp_rxdma_srng = &dp_pdev->rx_refill_buf_ring; 940 rx_desc_pool = &soc->rx_desc_buf[0]; 941 942 dp_rx_buffers_replenish(soc, 0, dp_rxdma_srng, rx_desc_pool, 943 rx_bufs_used, &head, &tail, HAL_RX_BUF_RBM_SW3_BM); 944 } 945 946 return rx_bufs_used; /* Assume no scale factor for now */ 947 } 948 949 /** 950 * dp_rx_wbm_err_process() - Processes error frames routed to WBM release ring 951 * 952 * @soc: core txrx main context 953 * @hal_ring: opaque pointer to the HAL Rx Error Ring, which will be serviced 954 * @quota: No. of units (packets) that can be serviced in one shot. 955 * 956 * This function implements error processing and top level demultiplexer 957 * for all the frames routed to WBM2HOST sw release ring. 958 * 959 * Return: uint32_t: No. of elements processed 960 */ 961 uint32_t 962 dp_rx_wbm_err_process(struct dp_soc *soc, void *hal_ring, uint32_t quota) 963 { 964 void *hal_soc; 965 void *ring_desc; 966 struct dp_rx_desc *rx_desc; 967 union dp_rx_desc_list_elem_t *head[MAX_PDEV_CNT] = { NULL }; 968 union dp_rx_desc_list_elem_t *tail[MAX_PDEV_CNT] = { NULL }; 969 uint32_t rx_bufs_used = 0; 970 uint32_t rx_bufs_reaped[MAX_PDEV_CNT] = { 0 }; 971 uint8_t buf_type, rbm; 972 uint32_t rx_buf_cookie; 973 uint8_t mac_id; 974 struct dp_pdev *dp_pdev; 975 struct dp_srng *dp_rxdma_srng; 976 struct rx_desc_pool *rx_desc_pool; 977 uint8_t *rx_tlv_hdr; 978 qdf_nbuf_t nbuf_head = NULL; 979 qdf_nbuf_t nbuf_tail = NULL; 980 qdf_nbuf_t nbuf, next; 981 struct hal_wbm_err_desc_info wbm_err_info = { 0 }; 982 uint8_t pool_id; 983 984 /* Debug -- Remove later */ 985 qdf_assert(soc && hal_ring); 986 987 hal_soc = soc->hal_soc; 988 989 /* Debug -- Remove later */ 990 qdf_assert(hal_soc); 991 992 if (qdf_unlikely(hal_srng_access_start(hal_soc, hal_ring))) { 993 994 /* TODO */ 995 /* 996 * Need API to convert from hal_ring pointer to 997 * Ring Type / Ring Id combo 998 */ 999 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1000 FL("HAL RING Access Failed -- %pK"), hal_ring); 1001 goto done; 1002 } 1003 1004 while (qdf_likely(quota-- && (ring_desc = 1005 hal_srng_dst_get_next(hal_soc, hal_ring)))) { 1006 1007 /* XXX */ 1008 buf_type = HAL_RX_WBM_BUF_TYPE_GET(ring_desc); 1009 1010 /* 1011 * For WBM ring, expect only MSDU buffers 1012 */ 1013 qdf_assert_always(buf_type == HAL_RX_WBM_BUF_TYPE_REL_BUF); 1014 1015 qdf_assert((HAL_RX_WBM_ERR_SRC_GET(ring_desc) 1016 == HAL_RX_WBM_ERR_SRC_RXDMA) || 1017 (HAL_RX_WBM_ERR_SRC_GET(ring_desc) 1018 == HAL_RX_WBM_ERR_SRC_REO)); 1019 1020 /* 1021 * Check if the buffer is to be processed on this processor 1022 */ 1023 rbm = hal_rx_ret_buf_manager_get(ring_desc); 1024 1025 if (qdf_unlikely(rbm != HAL_RX_BUF_RBM_SW3_BM)) { 1026 /* TODO */ 1027 /* Call appropriate handler */ 1028 DP_STATS_INC(soc, rx.err.invalid_rbm, 1); 1029 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1030 FL("Invalid RBM %d"), rbm); 1031 continue; 1032 } 1033 1034 rx_buf_cookie = HAL_RX_WBM_BUF_COOKIE_GET(ring_desc); 1035 1036 rx_desc = dp_rx_cookie_2_va_rxdma_buf(soc, rx_buf_cookie); 1037 qdf_assert(rx_desc); 1038 1039 if (!dp_rx_desc_check_magic(rx_desc)) { 1040 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1041 FL("Invalid rx_desc cookie=%d"), 1042 rx_buf_cookie); 1043 continue; 1044 } 1045 1046 nbuf = rx_desc->nbuf; 1047 qdf_nbuf_unmap_single(soc->osdev, nbuf, QDF_DMA_BIDIRECTIONAL); 1048 1049 /* 1050 * save the wbm desc info in nbuf TLV. We will need this 1051 * info when we do the actual nbuf processing 1052 */ 1053 hal_rx_wbm_err_info_get(ring_desc, &wbm_err_info); 1054 wbm_err_info.pool_id = rx_desc->pool_id; 1055 hal_rx_wbm_err_info_set_in_tlv(qdf_nbuf_data(nbuf), 1056 &wbm_err_info); 1057 1058 rx_bufs_reaped[rx_desc->pool_id]++; 1059 1060 DP_RX_LIST_APPEND(nbuf_head, nbuf_tail, rx_desc->nbuf); 1061 dp_rx_add_to_free_desc_list(&head[rx_desc->pool_id], 1062 &tail[rx_desc->pool_id], 1063 rx_desc); 1064 } 1065 done: 1066 hal_srng_access_end(hal_soc, hal_ring); 1067 1068 for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) { 1069 if (rx_bufs_reaped[mac_id]) { 1070 dp_pdev = soc->pdev_list[mac_id]; 1071 dp_rxdma_srng = &dp_pdev->rx_refill_buf_ring; 1072 rx_desc_pool = &soc->rx_desc_buf[mac_id]; 1073 1074 dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng, 1075 rx_desc_pool, rx_bufs_reaped[mac_id], 1076 &head[mac_id], &tail[mac_id], 1077 HAL_RX_BUF_RBM_SW3_BM); 1078 rx_bufs_used += rx_bufs_reaped[mac_id]; 1079 } 1080 } 1081 1082 nbuf = nbuf_head; 1083 while (nbuf) { 1084 rx_tlv_hdr = qdf_nbuf_data(nbuf); 1085 /* 1086 * retrieve the wbm desc info from nbuf TLV, so we can 1087 * handle error cases appropriately 1088 */ 1089 hal_rx_wbm_err_info_get_from_tlv(rx_tlv_hdr, &wbm_err_info); 1090 1091 next = nbuf->next; 1092 if (wbm_err_info.wbm_err_src == HAL_RX_WBM_ERR_SRC_REO) { 1093 if (wbm_err_info.reo_psh_rsn 1094 == HAL_RX_WBM_REO_PSH_RSN_ERROR) { 1095 1096 DP_STATS_INC(soc, 1097 rx.err.reo_error 1098 [wbm_err_info.reo_err_code], 1); 1099 1100 switch (wbm_err_info.reo_err_code) { 1101 /* 1102 * Handling for packets which have NULL REO 1103 * queue descriptor 1104 */ 1105 case HAL_REO_ERR_QUEUE_DESC_ADDR_0: 1106 pool_id = wbm_err_info.pool_id; 1107 QDF_TRACE(QDF_MODULE_ID_DP, 1108 QDF_TRACE_LEVEL_WARN, 1109 "Got pkt with REO ERROR: %d", 1110 wbm_err_info.reo_err_code); 1111 dp_rx_null_q_desc_handle(soc, 1112 nbuf, 1113 rx_tlv_hdr, 1114 pool_id); 1115 nbuf = next; 1116 continue; 1117 /* TODO */ 1118 /* Add per error code accounting */ 1119 1120 default: 1121 QDF_TRACE(QDF_MODULE_ID_DP, 1122 QDF_TRACE_LEVEL_DEBUG, 1123 "REO error %d detected", 1124 wbm_err_info.reo_err_code); 1125 } 1126 } 1127 } else if (wbm_err_info.wbm_err_src == 1128 HAL_RX_WBM_ERR_SRC_RXDMA) { 1129 if (wbm_err_info.rxdma_psh_rsn 1130 == HAL_RX_WBM_RXDMA_PSH_RSN_ERROR) { 1131 struct dp_peer *peer = NULL; 1132 uint16_t peer_id = 0xFFFF; 1133 1134 DP_STATS_INC(soc, 1135 rx.err.rxdma_error 1136 [wbm_err_info.rxdma_err_code], 1); 1137 peer_id = hal_rx_mpdu_start_sw_peer_id_get(rx_tlv_hdr); 1138 peer = dp_peer_find_by_id(soc, peer_id); 1139 1140 switch (wbm_err_info.rxdma_err_code) { 1141 case HAL_RXDMA_ERR_UNENCRYPTED: 1142 dp_rx_err_deliver(soc, 1143 nbuf, 1144 rx_tlv_hdr); 1145 nbuf = next; 1146 continue; 1147 1148 case HAL_RXDMA_ERR_TKIP_MIC: 1149 dp_rx_process_mic_error(soc, 1150 nbuf, 1151 rx_tlv_hdr); 1152 nbuf = next; 1153 if (peer) 1154 DP_STATS_INC(peer, rx.err.mic_err, 1); 1155 continue; 1156 1157 case HAL_RXDMA_ERR_DECRYPT: 1158 if (peer) 1159 DP_STATS_INC(peer, rx.err.decrypt_err, 1); 1160 QDF_TRACE(QDF_MODULE_ID_DP, 1161 QDF_TRACE_LEVEL_DEBUG, 1162 "Packet received with Decrypt error"); 1163 break; 1164 1165 default: 1166 QDF_TRACE(QDF_MODULE_ID_DP, 1167 QDF_TRACE_LEVEL_DEBUG, 1168 "RXDMA error %d", 1169 wbm_err_info. 1170 rxdma_err_code); 1171 } 1172 } 1173 } else { 1174 /* Should not come here */ 1175 qdf_assert(0); 1176 } 1177 1178 qdf_nbuf_free(nbuf); 1179 nbuf = next; 1180 hal_rx_dump_pkt_tlvs(rx_tlv_hdr, QDF_TRACE_LEVEL_DEBUG); 1181 } 1182 return rx_bufs_used; /* Assume no scale factor for now */ 1183 } 1184 1185 /** 1186 * dp_rx_err_mpdu_pop() - extract the MSDU's from link descs 1187 * 1188 * @soc: core DP main context 1189 * @mac_id: mac id which is one of 3 mac_ids 1190 * @rxdma_dst_ring_desc: void pointer to monitor link descriptor buf addr info 1191 * @head: head of descs list to be freed 1192 * @tail: tail of decs list to be freed 1193 1194 * Return: number of msdu in MPDU to be popped 1195 */ 1196 static inline uint32_t 1197 dp_rx_err_mpdu_pop(struct dp_soc *soc, uint32_t mac_id, 1198 void *rxdma_dst_ring_desc, 1199 union dp_rx_desc_list_elem_t **head, 1200 union dp_rx_desc_list_elem_t **tail) 1201 { 1202 void *rx_msdu_link_desc; 1203 qdf_nbuf_t msdu; 1204 qdf_nbuf_t last; 1205 struct hal_rx_msdu_list msdu_list; 1206 uint16_t num_msdus; 1207 struct hal_buf_info buf_info; 1208 void *p_buf_addr_info; 1209 void *p_last_buf_addr_info; 1210 uint32_t rx_bufs_used = 0; 1211 uint32_t msdu_cnt; 1212 uint32_t i; 1213 uint8_t push_reason; 1214 uint8_t rxdma_error_code = 0; 1215 uint8_t bm_action = HAL_BM_ACTION_PUT_IN_IDLE_LIST; 1216 struct dp_pdev *pdev = soc->pdev_list[mac_id]; 1217 1218 msdu = 0; 1219 1220 last = NULL; 1221 1222 hal_rx_reo_ent_buf_paddr_get(rxdma_dst_ring_desc, &buf_info, 1223 &p_last_buf_addr_info, &msdu_cnt); 1224 1225 push_reason = 1226 hal_rx_reo_ent_rxdma_push_reason_get(rxdma_dst_ring_desc); 1227 if (push_reason == HAL_RX_WBM_RXDMA_PSH_RSN_ERROR) { 1228 rxdma_error_code = 1229 hal_rx_reo_ent_rxdma_error_code_get(rxdma_dst_ring_desc); 1230 } 1231 1232 do { 1233 rx_msdu_link_desc = 1234 dp_rx_cookie_2_link_desc_va(soc, &buf_info); 1235 1236 qdf_assert(rx_msdu_link_desc); 1237 1238 hal_rx_msdu_list_get(rx_msdu_link_desc, &msdu_list, &num_msdus); 1239 1240 if (msdu_list.sw_cookie[0] != HAL_RX_COOKIE_SPECIAL) { 1241 /* if the msdus belongs to NSS offloaded radio && 1242 * the rbm is not SW3_BM then return the msdu_link 1243 * descriptor without freeing the msdus (nbufs). let 1244 * these buffers be given to NSS completion ring for 1245 * NSS to free them. 1246 * else iterate through the msdu link desc list and 1247 * free each msdu in the list. 1248 */ 1249 if (msdu_list.rbm[0] != HAL_RX_BUF_RBM_SW3_BM && 1250 wlan_cfg_get_dp_pdev_nss_enabled( 1251 pdev->wlan_cfg_ctx)) 1252 bm_action = HAL_BM_ACTION_RELEASE_MSDU_LIST; 1253 else { 1254 for (i = 0; i < num_msdus; i++) { 1255 struct dp_rx_desc *rx_desc = 1256 dp_rx_cookie_2_va_rxdma_buf(soc, 1257 msdu_list.sw_cookie[i]); 1258 qdf_assert(rx_desc); 1259 msdu = rx_desc->nbuf; 1260 1261 qdf_nbuf_unmap_single(soc->osdev, msdu, 1262 QDF_DMA_FROM_DEVICE); 1263 1264 QDF_TRACE(QDF_MODULE_ID_DP, 1265 QDF_TRACE_LEVEL_DEBUG, 1266 "[%s][%d] msdu_nbuf=%pK \n", 1267 __func__, __LINE__, msdu); 1268 1269 qdf_nbuf_free(msdu); 1270 rx_bufs_used++; 1271 dp_rx_add_to_free_desc_list(head, 1272 tail, rx_desc); 1273 } 1274 } 1275 } else { 1276 rxdma_error_code = HAL_RXDMA_ERR_WAR; 1277 } 1278 1279 hal_rx_mon_next_link_desc_get(rx_msdu_link_desc, &buf_info, 1280 &p_buf_addr_info); 1281 1282 dp_rx_link_desc_return(soc, p_last_buf_addr_info, bm_action); 1283 p_last_buf_addr_info = p_buf_addr_info; 1284 1285 } while (buf_info.paddr); 1286 1287 DP_STATS_INC(soc, rx.err.rxdma_error[rxdma_error_code], 1); 1288 1289 if (rxdma_error_code == HAL_RXDMA_ERR_DECRYPT) { 1290 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1291 "Packet received with Decrypt error"); 1292 } 1293 1294 return rx_bufs_used; 1295 } 1296 1297 /** 1298 * dp_rxdma_err_process() - RxDMA error processing functionality 1299 * 1300 * @soc: core txrx main contex 1301 * @mac_id: mac id which is one of 3 mac_ids 1302 * @hal_ring: opaque pointer to the HAL Rx Ring, which will be serviced 1303 * @quota: No. of units (packets) that can be serviced in one shot. 1304 1305 * Return: num of buffers processed 1306 */ 1307 uint32_t 1308 dp_rxdma_err_process(struct dp_soc *soc, uint32_t mac_id, uint32_t quota) 1309 { 1310 struct dp_pdev *pdev = dp_get_pdev_for_mac_id(soc, mac_id); 1311 int ring_idx = dp_get_ring_id_for_mac_id(soc, mac_id); 1312 uint8_t pdev_id; 1313 void *hal_soc; 1314 void *rxdma_dst_ring_desc; 1315 void *err_dst_srng; 1316 union dp_rx_desc_list_elem_t *head = NULL; 1317 union dp_rx_desc_list_elem_t *tail = NULL; 1318 struct dp_srng *dp_rxdma_srng; 1319 struct rx_desc_pool *rx_desc_pool; 1320 uint32_t work_done = 0; 1321 uint32_t rx_bufs_used = 0; 1322 1323 #ifdef DP_INTR_POLL_BASED 1324 if (!pdev) 1325 return 0; 1326 #endif 1327 pdev_id = pdev->pdev_id; 1328 err_dst_srng = pdev->rxdma_err_dst_ring[ring_idx].hal_srng; 1329 1330 if (!err_dst_srng) { 1331 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1332 "%s %d : HAL Monitor Destination Ring Init \ 1333 Failed -- %pK\n", 1334 __func__, __LINE__, err_dst_srng); 1335 return 0; 1336 } 1337 1338 hal_soc = soc->hal_soc; 1339 1340 qdf_assert(hal_soc); 1341 1342 if (qdf_unlikely(hal_srng_access_start(hal_soc, err_dst_srng))) { 1343 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1344 "%s %d : HAL Monitor Destination Ring Init \ 1345 Failed -- %pK\n", 1346 __func__, __LINE__, err_dst_srng); 1347 return 0; 1348 } 1349 1350 while (qdf_likely(quota-- && (rxdma_dst_ring_desc = 1351 hal_srng_dst_get_next(hal_soc, err_dst_srng)))) { 1352 1353 rx_bufs_used += dp_rx_err_mpdu_pop(soc, mac_id, 1354 rxdma_dst_ring_desc, 1355 &head, &tail); 1356 } 1357 1358 hal_srng_access_end(hal_soc, err_dst_srng); 1359 1360 if (rx_bufs_used) { 1361 dp_rxdma_srng = &pdev->rx_refill_buf_ring; 1362 rx_desc_pool = &soc->rx_desc_buf[mac_id]; 1363 1364 dp_rx_buffers_replenish(soc, pdev_id, dp_rxdma_srng, 1365 rx_desc_pool, rx_bufs_used, &head, &tail, 1366 HAL_RX_BUF_RBM_SW3_BM); 1367 work_done += rx_bufs_used; 1368 } 1369 1370 return work_done; 1371 } 1372