1 /* 2 * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for 5 * any purpose with or without fee is hereby granted, provided that the 6 * above copyright notice and this permission notice appear in all 7 * copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 16 * PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 #include "dp_types.h" 20 #include "dp_rx.h" 21 #include "dp_peer.h" 22 #include "dp_internal.h" 23 #include "hal_api.h" 24 #include "qdf_trace.h" 25 #include "qdf_nbuf.h" 26 #ifdef CONFIG_MCL 27 #include <cds_ieee80211_common.h> 28 #else 29 #include <linux/ieee80211.h> 30 #endif 31 #include "dp_rx_defrag.h" 32 #include <enet.h> /* LLC_SNAP_HDR_LEN */ 33 34 #ifdef RX_DESC_DEBUG_CHECK 35 static inline bool dp_rx_desc_check_magic(struct dp_rx_desc *rx_desc) 36 { 37 if (qdf_unlikely(rx_desc->magic != DP_RX_DESC_MAGIC)) { 38 return false; 39 } 40 rx_desc->magic = 0; 41 return true; 42 } 43 #else 44 static inline bool dp_rx_desc_check_magic(struct dp_rx_desc *rx_desc) 45 { 46 return true; 47 } 48 #endif 49 50 /** 51 * dp_rx_mcast_echo_check() - check if the mcast pkt is a loop 52 * back on same vap or a different vap. 53 * 54 * @soc: core DP main context 55 * @peer: dp peer handler 56 * @rx_tlv_hdr: start of the rx TLV header 57 * @nbuf: pkt buffer 58 * 59 * Return: bool (true if it is a looped back pkt else false) 60 * 61 */ 62 static inline bool dp_rx_mcast_echo_check(struct dp_soc *soc, 63 struct dp_peer *peer, 64 uint8_t *rx_tlv_hdr, 65 qdf_nbuf_t nbuf) 66 { 67 struct dp_vdev *vdev = peer->vdev; 68 struct dp_ast_entry *ase; 69 uint16_t sa_idx = 0; 70 uint8_t *data; 71 72 /* 73 * Multicast Echo Check is required only if vdev is STA and 74 * received pkt is a multicast/broadcast pkt. otherwise 75 * skip the MEC check. 76 */ 77 if (vdev->opmode != wlan_op_mode_sta) 78 return false; 79 80 if (!hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr)) 81 return false; 82 83 data = qdf_nbuf_data(nbuf); 84 /* 85 * if the received pkts src mac addr matches with vdev 86 * mac address then drop the pkt as it is looped back 87 */ 88 if (!(qdf_mem_cmp(&data[DP_MAC_ADDR_LEN], 89 vdev->mac_addr.raw, 90 DP_MAC_ADDR_LEN))) 91 return true; 92 93 /* 94 * In case of qwrap isolation mode, donot drop loopback packets. 95 * In isolation mode, all packets from the wired stations need to go 96 * to rootap and loop back to reach the wireless stations and 97 * vice-versa. 98 */ 99 if (qdf_unlikely(vdev->isolation_vdev)) 100 return false; 101 102 /* if the received pkts src mac addr matches with the 103 * wired PCs MAC addr which is behind the STA or with 104 * wireless STAs MAC addr which are behind the Repeater, 105 * then drop the pkt as it is looped back 106 */ 107 qdf_spin_lock_bh(&soc->ast_lock); 108 if (hal_rx_msdu_end_sa_is_valid_get(rx_tlv_hdr)) { 109 sa_idx = hal_rx_msdu_end_sa_idx_get(rx_tlv_hdr); 110 111 if ((sa_idx < 0) || 112 (sa_idx >= (WLAN_UMAC_PSOC_MAX_PEERS * 2))) { 113 qdf_spin_unlock_bh(&soc->ast_lock); 114 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 115 "invalid sa_idx: %d", sa_idx); 116 qdf_assert_always(0); 117 } 118 119 ase = soc->ast_table[sa_idx]; 120 if (!ase) { 121 /* We do not get a peer map event for STA and without 122 * this event we don't know what is STA's sa_idx. 123 * For this reason the AST is still not associated to 124 * any index postion in ast_table. 125 * In these kind of scenarios where sa is valid but 126 * ast is not in ast_table, we use the below API to get 127 * AST entry for STA's own mac_address. 128 */ 129 ase = dp_peer_ast_hash_find(soc, 130 &data[DP_MAC_ADDR_LEN]); 131 132 } 133 } else 134 ase = dp_peer_ast_hash_find(soc, &data[DP_MAC_ADDR_LEN]); 135 136 if (ase) { 137 ase->ast_idx = sa_idx; 138 soc->ast_table[sa_idx] = ase; 139 140 if (ase->pdev_id != vdev->pdev->pdev_id) { 141 qdf_spin_unlock_bh(&soc->ast_lock); 142 QDF_TRACE(QDF_MODULE_ID_DP, 143 QDF_TRACE_LEVEL_INFO, 144 "Detected DBDC Root AP %pM, %d %d", 145 &data[DP_MAC_ADDR_LEN], vdev->pdev->pdev_id, 146 ase->pdev_id); 147 return false; 148 } 149 150 if ((ase->type == CDP_TXRX_AST_TYPE_MEC) || 151 (ase->peer != peer)) { 152 qdf_spin_unlock_bh(&soc->ast_lock); 153 QDF_TRACE(QDF_MODULE_ID_DP, 154 QDF_TRACE_LEVEL_INFO, 155 "received pkt with same src mac %pM", 156 &data[DP_MAC_ADDR_LEN]); 157 158 return true; 159 } 160 } 161 qdf_spin_unlock_bh(&soc->ast_lock); 162 return false; 163 } 164 165 /** 166 * dp_rx_link_desc_return_by_addr - Return a MPDU link descriptor to 167 * (WBM) by address 168 * 169 * @soc: core DP main context 170 * @link_desc_addr: link descriptor addr 171 * 172 * Return: QDF_STATUS 173 */ 174 QDF_STATUS 175 dp_rx_link_desc_return_by_addr(struct dp_soc *soc, void *link_desc_addr, 176 uint8_t bm_action) 177 { 178 struct dp_srng *wbm_desc_rel_ring = &soc->wbm_desc_rel_ring; 179 void *wbm_rel_srng = wbm_desc_rel_ring->hal_srng; 180 void *hal_soc = soc->hal_soc; 181 QDF_STATUS status = QDF_STATUS_E_FAILURE; 182 void *src_srng_desc; 183 184 if (!wbm_rel_srng) { 185 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 186 "WBM RELEASE RING not initialized"); 187 return status; 188 } 189 190 if (qdf_unlikely(hal_srng_access_start(hal_soc, wbm_rel_srng))) { 191 192 /* TODO */ 193 /* 194 * Need API to convert from hal_ring pointer to 195 * Ring Type / Ring Id combo 196 */ 197 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 198 FL("HAL RING Access For WBM Release SRNG Failed - %pK"), 199 wbm_rel_srng); 200 DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1); 201 goto done; 202 } 203 src_srng_desc = hal_srng_src_get_next(hal_soc, wbm_rel_srng); 204 if (qdf_likely(src_srng_desc)) { 205 /* Return link descriptor through WBM ring (SW2WBM)*/ 206 hal_rx_msdu_link_desc_set(hal_soc, 207 src_srng_desc, link_desc_addr, bm_action); 208 status = QDF_STATUS_SUCCESS; 209 } else { 210 struct hal_srng *srng = (struct hal_srng *)wbm_rel_srng; 211 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 212 FL("WBM Release Ring (Id %d) Full"), srng->ring_id); 213 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 214 "HP 0x%x Reap HP 0x%x TP 0x%x Cached TP 0x%x", 215 *srng->u.src_ring.hp_addr, srng->u.src_ring.reap_hp, 216 *srng->u.src_ring.tp_addr, srng->u.src_ring.cached_tp); 217 } 218 done: 219 hal_srng_access_end(hal_soc, wbm_rel_srng); 220 return status; 221 222 } 223 224 /** 225 * dp_rx_link_desc_return() - Return a MPDU link descriptor to HW 226 * (WBM), following error handling 227 * 228 * @soc: core DP main context 229 * @ring_desc: opaque pointer to the REO error ring descriptor 230 * 231 * Return: QDF_STATUS 232 */ 233 QDF_STATUS 234 dp_rx_link_desc_return(struct dp_soc *soc, void *ring_desc, uint8_t bm_action) 235 { 236 void *buf_addr_info = HAL_RX_REO_BUF_ADDR_INFO_GET(ring_desc); 237 return dp_rx_link_desc_return_by_addr(soc, buf_addr_info, bm_action); 238 } 239 240 /** 241 * dp_rx_msdus_drop() - Drops all MSDU's per MPDU 242 * 243 * @soc: core txrx main context 244 * @ring_desc: opaque pointer to the REO error ring descriptor 245 * @mpdu_desc_info: MPDU descriptor information from ring descriptor 246 * @head: head of the local descriptor free-list 247 * @tail: tail of the local descriptor free-list 248 * @quota: No. of units (packets) that can be serviced in one shot. 249 * 250 * This function is used to drop all MSDU in an MPDU 251 * 252 * Return: uint32_t: No. of elements processed 253 */ 254 static uint32_t dp_rx_msdus_drop(struct dp_soc *soc, void *ring_desc, 255 struct hal_rx_mpdu_desc_info *mpdu_desc_info, 256 union dp_rx_desc_list_elem_t **head, 257 union dp_rx_desc_list_elem_t **tail, 258 uint32_t quota) 259 { 260 uint32_t rx_bufs_used = 0; 261 void *link_desc_va; 262 struct hal_buf_info buf_info; 263 struct hal_rx_msdu_list msdu_list; /* MSDU's per MPDU */ 264 int i; 265 uint8_t *rx_tlv_hdr; 266 uint32_t tid; 267 268 hal_rx_reo_buf_paddr_get(ring_desc, &buf_info); 269 270 link_desc_va = dp_rx_cookie_2_link_desc_va(soc, &buf_info); 271 272 /* No UNMAP required -- this is "malloc_consistent" memory */ 273 hal_rx_msdu_list_get(link_desc_va, &msdu_list, 274 &mpdu_desc_info->msdu_count); 275 276 for (i = 0; (i < mpdu_desc_info->msdu_count) && quota--; i++) { 277 struct dp_rx_desc *rx_desc = 278 dp_rx_cookie_2_va_rxdma_buf(soc, 279 msdu_list.sw_cookie[i]); 280 281 qdf_assert(rx_desc); 282 283 if (!dp_rx_desc_check_magic(rx_desc)) { 284 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 285 FL("Invalid rx_desc cookie=%d"), 286 msdu_list.sw_cookie[i]); 287 return rx_bufs_used; 288 } 289 290 rx_bufs_used++; 291 tid = hal_rx_mpdu_start_tid_get(rx_desc->rx_buf_start); 292 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 293 "Packet received with PN error for tid :%d", tid); 294 295 rx_tlv_hdr = qdf_nbuf_data(rx_desc->nbuf); 296 if (hal_rx_encryption_info_valid(rx_tlv_hdr)) 297 hal_rx_print_pn(rx_tlv_hdr); 298 299 /* Just free the buffers */ 300 qdf_nbuf_free(rx_desc->nbuf); 301 302 dp_rx_add_to_free_desc_list(head, tail, rx_desc); 303 } 304 305 /* Return link descriptor through WBM ring (SW2WBM)*/ 306 dp_rx_link_desc_return(soc, ring_desc, HAL_BM_ACTION_PUT_IN_IDLE_LIST); 307 308 return rx_bufs_used; 309 } 310 311 /** 312 * dp_rx_pn_error_handle() - Handles PN check errors 313 * 314 * @soc: core txrx main context 315 * @ring_desc: opaque pointer to the REO error ring descriptor 316 * @mpdu_desc_info: MPDU descriptor information from ring descriptor 317 * @head: head of the local descriptor free-list 318 * @tail: tail of the local descriptor free-list 319 * @quota: No. of units (packets) that can be serviced in one shot. 320 * 321 * This function implements PN error handling 322 * If the peer is configured to ignore the PN check errors 323 * or if DP feels, that this frame is still OK, the frame can be 324 * re-injected back to REO to use some of the other features 325 * of REO e.g. duplicate detection/routing to other cores 326 * 327 * Return: uint32_t: No. of elements processed 328 */ 329 static uint32_t 330 dp_rx_pn_error_handle(struct dp_soc *soc, void *ring_desc, 331 struct hal_rx_mpdu_desc_info *mpdu_desc_info, 332 union dp_rx_desc_list_elem_t **head, 333 union dp_rx_desc_list_elem_t **tail, 334 uint32_t quota) 335 { 336 uint16_t peer_id; 337 uint32_t rx_bufs_used = 0; 338 struct dp_peer *peer; 339 bool peer_pn_policy = false; 340 341 peer_id = DP_PEER_METADATA_PEER_ID_GET( 342 mpdu_desc_info->peer_meta_data); 343 344 345 peer = dp_peer_find_by_id(soc, peer_id); 346 347 if (qdf_likely(peer)) { 348 /* 349 * TODO: Check for peer specific policies & set peer_pn_policy 350 */ 351 } 352 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 353 "Packet received with PN error"); 354 355 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 356 "discard rx due to PN error for peer %pK " 357 "(%02x:%02x:%02x:%02x:%02x:%02x)\n", 358 peer, 359 peer->mac_addr.raw[0], peer->mac_addr.raw[1], 360 peer->mac_addr.raw[2], peer->mac_addr.raw[3], 361 peer->mac_addr.raw[4], peer->mac_addr.raw[5]); 362 363 /* No peer PN policy -- definitely drop */ 364 if (!peer_pn_policy) 365 rx_bufs_used = dp_rx_msdus_drop(soc, ring_desc, 366 mpdu_desc_info, 367 head, tail, quota); 368 369 return rx_bufs_used; 370 } 371 372 /** 373 * dp_rx_2k_jump_handle() - Handles Sequence Number Jump by 2K 374 * 375 * @soc: core txrx main context 376 * @ring_desc: opaque pointer to the REO error ring descriptor 377 * @mpdu_desc_info: MPDU descriptor information from ring descriptor 378 * @head: head of the local descriptor free-list 379 * @tail: tail of the local descriptor free-list 380 * @quota: No. of units (packets) that can be serviced in one shot. 381 * 382 * This function implements the error handling when sequence number 383 * of the MPDU jumps suddenly by 2K.Today there are 2 cases that 384 * need to be handled: 385 * A) CSN (Current Sequence Number) = Last Valid SN (LSN) + 2K 386 * B) CSN = LSN + 2K, but falls within a "BA sized window" of the SSN 387 * For case A) the protocol stack is invoked to generate DELBA/DEAUTH frame 388 * For case B), the frame is normally dropped, no more action is taken 389 * 390 * Return: uint32_t: No. of elements processed 391 */ 392 static uint32_t 393 dp_rx_2k_jump_handle(struct dp_soc *soc, void *ring_desc, 394 struct hal_rx_mpdu_desc_info *mpdu_desc_info, 395 union dp_rx_desc_list_elem_t **head, 396 union dp_rx_desc_list_elem_t **tail, 397 uint32_t quota) 398 { 399 return dp_rx_msdus_drop(soc, ring_desc, mpdu_desc_info, 400 head, tail, quota); 401 } 402 403 /** 404 * dp_rx_chain_msdus() - Function to chain all msdus of a mpdu 405 * to pdev invalid peer list 406 * 407 * @soc: core DP main context 408 * @nbuf: Buffer pointer 409 * @rx_tlv_hdr: start of rx tlv header 410 * @mac_id: mac id 411 * 412 * Return: bool: true for last msdu of mpdu 413 */ 414 static bool 415 dp_rx_chain_msdus(struct dp_soc *soc, qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr, 416 uint8_t mac_id) 417 { 418 bool mpdu_done = false; 419 qdf_nbuf_t curr_nbuf = NULL; 420 qdf_nbuf_t tmp_nbuf = NULL; 421 422 /* TODO: Currently only single radio is supported, hence 423 * pdev hard coded to '0' index 424 */ 425 struct dp_pdev *dp_pdev = soc->pdev_list[mac_id]; 426 427 if (hal_rx_msdu_end_first_msdu_get(rx_tlv_hdr)) { 428 qdf_nbuf_set_rx_chfrag_start(nbuf, 1); 429 430 /* If the new nbuf received is the first msdu of the 431 * amsdu and there are msdus in the invalid peer msdu 432 * list, then let us free all the msdus of the invalid 433 * peer msdu list. 434 * This scenario can happen when we start receiving 435 * new a-msdu even before the previous a-msdu is completely 436 * received. 437 */ 438 curr_nbuf = dp_pdev->invalid_peer_head_msdu; 439 while (curr_nbuf) { 440 tmp_nbuf = curr_nbuf->next; 441 qdf_nbuf_free(curr_nbuf); 442 curr_nbuf = tmp_nbuf; 443 } 444 445 dp_pdev->invalid_peer_head_msdu = NULL; 446 dp_pdev->invalid_peer_tail_msdu = NULL; 447 448 hal_rx_mon_hw_desc_get_mpdu_status(rx_tlv_hdr, 449 &(dp_pdev->ppdu_info.rx_status)); 450 451 } 452 453 if (hal_rx_msdu_end_last_msdu_get(rx_tlv_hdr)) { 454 qdf_nbuf_set_rx_chfrag_end(nbuf, 1); 455 mpdu_done = true; 456 } 457 458 DP_RX_LIST_APPEND(dp_pdev->invalid_peer_head_msdu, 459 dp_pdev->invalid_peer_tail_msdu, 460 nbuf); 461 462 return mpdu_done; 463 } 464 465 /** 466 * dp_rx_null_q_desc_handle() - Function to handle NULL Queue 467 * descriptor violation on either a 468 * REO or WBM ring 469 * 470 * @soc: core DP main context 471 * @nbuf: buffer pointer 472 * @rx_tlv_hdr: start of rx tlv header 473 * @pool_id: mac id 474 * 475 * This function handles NULL queue descriptor violations arising out 476 * a missing REO queue for a given peer or a given TID. This typically 477 * may happen if a packet is received on a QOS enabled TID before the 478 * ADDBA negotiation for that TID, when the TID queue is setup. Or 479 * it may also happen for MC/BC frames if they are not routed to the 480 * non-QOS TID queue, in the absence of any other default TID queue. 481 * This error can show up both in a REO destination or WBM release ring. 482 * 483 */ 484 static void 485 dp_rx_null_q_desc_handle(struct dp_soc *soc, 486 qdf_nbuf_t nbuf, 487 uint8_t *rx_tlv_hdr, 488 uint8_t pool_id) 489 { 490 uint32_t pkt_len, l2_hdr_offset; 491 uint16_t msdu_len; 492 struct dp_vdev *vdev; 493 uint16_t peer_id = 0xFFFF; 494 struct dp_peer *peer = NULL; 495 uint8_t tid; 496 497 qdf_nbuf_set_rx_chfrag_start(nbuf, 498 hal_rx_msdu_end_first_msdu_get(rx_tlv_hdr)); 499 qdf_nbuf_set_rx_chfrag_end(nbuf, 500 hal_rx_msdu_end_last_msdu_get(rx_tlv_hdr)); 501 502 l2_hdr_offset = hal_rx_msdu_end_l3_hdr_padding_get(rx_tlv_hdr); 503 msdu_len = hal_rx_msdu_start_msdu_len_get(rx_tlv_hdr); 504 pkt_len = msdu_len + l2_hdr_offset + RX_PKT_TLVS_LEN; 505 506 /* Set length in nbuf */ 507 qdf_nbuf_set_pktlen(nbuf, pkt_len); 508 509 /* 510 * Check if DMA completed -- msdu_done is the last bit 511 * to be written 512 */ 513 if (!hal_rx_attn_msdu_done_get(rx_tlv_hdr)) { 514 515 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 516 FL("MSDU DONE failure")); 517 518 hal_rx_dump_pkt_tlvs(rx_tlv_hdr, QDF_TRACE_LEVEL_INFO); 519 qdf_assert(0); 520 } 521 522 peer_id = hal_rx_mpdu_start_sw_peer_id_get(rx_tlv_hdr); 523 peer = dp_peer_find_by_id(soc, peer_id); 524 525 if (!peer) { 526 bool mpdu_done = false; 527 struct dp_pdev *pdev = soc->pdev_list[pool_id]; 528 529 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 530 FL("peer is NULL")); 531 532 mpdu_done = dp_rx_chain_msdus(soc, nbuf, rx_tlv_hdr, pool_id); 533 /* Trigger invalid peer handler wrapper */ 534 dp_rx_process_invalid_peer_wrapper(soc, nbuf, mpdu_done); 535 536 if (mpdu_done) { 537 pdev->invalid_peer_head_msdu = NULL; 538 pdev->invalid_peer_tail_msdu = NULL; 539 } 540 return; 541 } 542 543 vdev = peer->vdev; 544 if (!vdev) { 545 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 546 FL("INVALID vdev %pK OR osif_rx"), vdev); 547 /* Drop & free packet */ 548 qdf_nbuf_free(nbuf); 549 DP_STATS_INC(soc, rx.err.invalid_vdev, 1); 550 return; 551 } 552 553 /* 554 * Advance the packet start pointer by total size of 555 * pre-header TLV's 556 */ 557 qdf_nbuf_pull_head(nbuf, (l2_hdr_offset + RX_PKT_TLVS_LEN)); 558 559 if (dp_rx_mcast_echo_check(soc, peer, rx_tlv_hdr, nbuf)) { 560 /* this is a looped back MCBC pkt, drop it */ 561 qdf_nbuf_free(nbuf); 562 return; 563 } 564 /* 565 * In qwrap mode if the received packet matches with any of the vdev 566 * mac addresses, drop it. Donot receive multicast packets originated 567 * from any proxysta. 568 */ 569 if (check_qwrap_multicast_loopback(vdev, nbuf)) { 570 qdf_nbuf_free(nbuf); 571 return; 572 } 573 574 575 if (qdf_unlikely((peer->nawds_enabled == true) && 576 hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr))) { 577 QDF_TRACE(QDF_MODULE_ID_DP, 578 QDF_TRACE_LEVEL_DEBUG, 579 "%s free buffer for multicast packet", 580 __func__); 581 DP_STATS_INC_PKT(peer, rx.nawds_mcast_drop, 582 1, qdf_nbuf_len(nbuf)); 583 qdf_nbuf_free(nbuf); 584 return; 585 } 586 587 if (!dp_wds_rx_policy_check(rx_tlv_hdr, vdev, peer, 588 hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr))) { 589 QDF_TRACE(QDF_MODULE_ID_DP, 590 QDF_TRACE_LEVEL_ERROR, 591 FL("mcast Policy Check Drop pkt")); 592 /* Drop & free packet */ 593 qdf_nbuf_free(nbuf); 594 return; 595 } 596 597 /* WDS Source Port Learning */ 598 if (qdf_likely(vdev->rx_decap_type == htt_cmn_pkt_type_ethernet && 599 vdev->wds_enabled)) 600 dp_rx_wds_srcport_learn(soc, rx_tlv_hdr, peer, nbuf); 601 602 if (hal_rx_mpdu_start_mpdu_qos_control_valid_get(rx_tlv_hdr)) { 603 /* TODO: Assuming that qos_control_valid also indicates 604 * unicast. Should we check this? 605 */ 606 tid = hal_rx_mpdu_start_tid_get(rx_tlv_hdr); 607 if (peer && 608 peer->rx_tid[tid].hw_qdesc_vaddr_unaligned == NULL) { 609 /* IEEE80211_SEQ_MAX indicates invalid start_seq */ 610 dp_rx_tid_setup_wifi3(peer, tid, 1, IEEE80211_SEQ_MAX); 611 } 612 } 613 614 #ifdef QCA_WIFI_NAPIER_EMULATION /* Debug code, remove later */ 615 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 616 "%s: p_id %d msdu_len %d hdr_off %d", 617 __func__, peer_id, msdu_len, l2_hdr_offset); 618 619 print_hex_dump(KERN_ERR, "\t Pkt Data:", DUMP_PREFIX_NONE, 32, 4, 620 qdf_nbuf_data(nbuf), 128, false); 621 #endif /* NAPIER_EMULATION */ 622 623 if (qdf_unlikely(vdev->rx_decap_type == htt_cmn_pkt_type_raw)) { 624 qdf_nbuf_set_next(nbuf, NULL); 625 dp_rx_deliver_raw(vdev, nbuf, peer); 626 } else { 627 if (qdf_unlikely(peer->bss_peer)) { 628 QDF_TRACE(QDF_MODULE_ID_DP, 629 QDF_TRACE_LEVEL_INFO, 630 FL("received pkt with same src MAC")); 631 /* Drop & free packet */ 632 qdf_nbuf_free(nbuf); 633 return; 634 } 635 636 if (vdev->osif_rx) { 637 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, 638 FL("vdev %pK osif_rx %pK"), vdev, 639 vdev->osif_rx); 640 qdf_nbuf_set_next(nbuf, NULL); 641 vdev->osif_rx(vdev->osif_vdev, nbuf); 642 DP_STATS_INCC_PKT(vdev->pdev, rx.multicast, 1, 643 qdf_nbuf_len(nbuf), 644 hal_rx_msdu_end_da_is_mcbc_get( 645 rx_tlv_hdr)); 646 DP_STATS_INC_PKT(vdev->pdev, rx.to_stack, 1, 647 qdf_nbuf_len(nbuf)); 648 } else { 649 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 650 FL("INVALID vdev %pK OR osif_rx"), vdev); 651 DP_STATS_INC(soc, rx.err.invalid_vdev, 1); 652 } 653 } 654 return; 655 } 656 657 /** 658 * dp_rx_err_deliver() - Function to deliver error frames to OS 659 * 660 * @soc: core DP main context 661 * @rx_desc : pointer to the sw rx descriptor 662 * @head: pointer to head of rx descriptors to be added to free list 663 * @tail: pointer to tail of rx descriptors to be added to free list 664 * quota: upper limit of descriptors that can be reaped 665 * 666 * Return: uint32_t: No. of Rx buffers reaped 667 */ 668 static void 669 dp_rx_err_deliver(struct dp_soc *soc, qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr) 670 { 671 uint32_t pkt_len, l2_hdr_offset; 672 uint16_t msdu_len; 673 struct dp_vdev *vdev; 674 uint16_t peer_id = 0xFFFF; 675 struct dp_peer *peer = NULL; 676 struct ether_header *eh; 677 bool isBroadcast; 678 679 /* 680 * Check if DMA completed -- msdu_done is the last bit 681 * to be written 682 */ 683 if (!hal_rx_attn_msdu_done_get(rx_tlv_hdr)) { 684 685 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 686 FL("MSDU DONE failure")); 687 688 hal_rx_dump_pkt_tlvs(rx_tlv_hdr, QDF_TRACE_LEVEL_INFO); 689 qdf_assert(0); 690 } 691 692 peer_id = hal_rx_mpdu_start_sw_peer_id_get(rx_tlv_hdr); 693 peer = dp_peer_find_by_id(soc, peer_id); 694 695 l2_hdr_offset = hal_rx_msdu_end_l3_hdr_padding_get(rx_tlv_hdr); 696 msdu_len = hal_rx_msdu_start_msdu_len_get(rx_tlv_hdr); 697 pkt_len = msdu_len + l2_hdr_offset + RX_PKT_TLVS_LEN; 698 699 /* Set length in nbuf */ 700 qdf_nbuf_set_pktlen(nbuf, pkt_len); 701 702 qdf_nbuf_set_next(nbuf, NULL); 703 704 qdf_nbuf_set_rx_chfrag_start(nbuf, 1); 705 qdf_nbuf_set_rx_chfrag_end(nbuf, 1); 706 707 if (!peer) { 708 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 709 FL("peer is NULL")); 710 DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1, 711 qdf_nbuf_len(nbuf)); 712 /* Trigger invalid peer handler wrapper */ 713 dp_rx_process_invalid_peer_wrapper(soc, nbuf, true); 714 return; 715 } 716 717 vdev = peer->vdev; 718 if (!vdev) { 719 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 720 FL("INVALID vdev %pK OR osif_rx"), vdev); 721 /* Drop & free packet */ 722 qdf_nbuf_free(nbuf); 723 DP_STATS_INC(soc, rx.err.invalid_vdev, 1); 724 return; 725 } 726 727 /* Drop & free packet if mesh mode not enabled */ 728 if (!vdev->mesh_vdev) { 729 qdf_nbuf_free(nbuf); 730 DP_STATS_INC(soc, rx.err.invalid_vdev, 1); 731 return; 732 } 733 734 /* 735 * Advance the packet start pointer by total size of 736 * pre-header TLV's 737 */ 738 qdf_nbuf_pull_head(nbuf, (l2_hdr_offset + RX_PKT_TLVS_LEN)); 739 740 if (dp_rx_filter_mesh_packets(vdev, nbuf, rx_tlv_hdr) 741 == QDF_STATUS_SUCCESS) { 742 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_MED, 743 FL("mesh pkt filtered")); 744 DP_STATS_INC(vdev->pdev, dropped.mesh_filter, 1); 745 746 qdf_nbuf_free(nbuf); 747 return; 748 749 } 750 dp_rx_fill_mesh_stats(vdev, nbuf, rx_tlv_hdr, peer); 751 752 if (qdf_unlikely(hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr) && 753 (vdev->rx_decap_type == 754 htt_cmn_pkt_type_ethernet))) { 755 eh = (struct ether_header *)qdf_nbuf_data(nbuf); 756 isBroadcast = (IEEE80211_IS_BROADCAST 757 (eh->ether_dhost)) ? 1 : 0 ; 758 if (isBroadcast) { 759 DP_STATS_INC_PKT(peer, rx.bcast, 1, 760 qdf_nbuf_len(nbuf)); 761 } 762 } 763 764 if (qdf_unlikely(vdev->rx_decap_type == htt_cmn_pkt_type_raw)) { 765 dp_rx_deliver_raw(vdev, nbuf, peer); 766 } else { 767 DP_STATS_INC(vdev->pdev, rx.to_stack.num, 1); 768 vdev->osif_rx(vdev->osif_vdev, nbuf); 769 } 770 771 return; 772 } 773 774 /** 775 * dp_rx_process_mic_error(): Function to pass mic error indication to umac 776 * @soc: DP SOC handle 777 * @rx_desc : pointer to the sw rx descriptor 778 * @head: pointer to head of rx descriptors to be added to free list 779 * @tail: pointer to tail of rx descriptors to be added to free list 780 * 781 * return: void 782 */ 783 void 784 dp_rx_process_mic_error(struct dp_soc *soc, 785 qdf_nbuf_t nbuf, 786 uint8_t *rx_tlv_hdr) 787 { 788 struct dp_vdev *vdev = NULL; 789 struct dp_pdev *pdev = NULL; 790 struct ol_if_ops *tops = NULL; 791 struct ieee80211_frame *wh; 792 uint8_t *rx_pkt_hdr; 793 struct dp_peer *peer; 794 uint16_t peer_id; 795 796 if (!hal_rx_msdu_end_first_msdu_get(rx_tlv_hdr)) 797 return; 798 799 rx_pkt_hdr = hal_rx_pkt_hdr_get(qdf_nbuf_data(nbuf)); 800 wh = (struct ieee80211_frame *)rx_pkt_hdr; 801 802 peer_id = hal_rx_mpdu_start_sw_peer_id_get(rx_tlv_hdr); 803 peer = dp_peer_find_by_id(soc, peer_id); 804 if (!peer) { 805 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 806 "peer not found"); 807 goto fail; 808 } 809 810 vdev = peer->vdev; 811 if (!vdev) { 812 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 813 "VDEV not found"); 814 goto fail; 815 } 816 817 pdev = vdev->pdev; 818 if (!pdev) { 819 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 820 "PDEV not found"); 821 goto fail; 822 } 823 824 tops = pdev->soc->cdp_soc.ol_ops; 825 if (tops->rx_mic_error) 826 tops->rx_mic_error(pdev->osif_pdev, vdev->vdev_id, wh); 827 828 fail: 829 qdf_nbuf_free(nbuf); 830 return; 831 } 832 833 /** 834 * dp_rx_err_process() - Processes error frames routed to REO error ring 835 * 836 * @soc: core txrx main context 837 * @hal_ring: opaque pointer to the HAL Rx Error Ring, which will be serviced 838 * @quota: No. of units (packets) that can be serviced in one shot. 839 * 840 * This function implements error processing and top level demultiplexer 841 * for all the frames routed to REO error ring. 842 * 843 * Return: uint32_t: No. of elements processed 844 */ 845 uint32_t 846 dp_rx_err_process(struct dp_soc *soc, void *hal_ring, uint32_t quota) 847 { 848 void *hal_soc; 849 void *ring_desc; 850 union dp_rx_desc_list_elem_t *head = NULL; 851 union dp_rx_desc_list_elem_t *tail = NULL; 852 uint32_t rx_bufs_used = 0; 853 uint8_t buf_type; 854 uint8_t error, rbm; 855 struct hal_rx_mpdu_desc_info mpdu_desc_info; 856 struct hal_buf_info hbi; 857 struct dp_pdev *dp_pdev; 858 struct dp_srng *dp_rxdma_srng; 859 struct rx_desc_pool *rx_desc_pool; 860 uint32_t cookie = 0; 861 void *link_desc_va; 862 struct hal_rx_msdu_list msdu_list; /* MSDU's per MPDU */ 863 uint16_t num_msdus; 864 865 /* Debug -- Remove later */ 866 qdf_assert(soc && hal_ring); 867 868 hal_soc = soc->hal_soc; 869 870 /* Debug -- Remove later */ 871 qdf_assert(hal_soc); 872 873 if (qdf_unlikely(hal_srng_access_start(hal_soc, hal_ring))) { 874 875 /* TODO */ 876 /* 877 * Need API to convert from hal_ring pointer to 878 * Ring Type / Ring Id combo 879 */ 880 DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1); 881 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 882 FL("HAL RING Access Failed -- %pK"), hal_ring); 883 goto done; 884 } 885 886 while (qdf_likely(quota-- && (ring_desc = 887 hal_srng_dst_get_next(hal_soc, hal_ring)))) { 888 889 DP_STATS_INC(soc, rx.err_ring_pkts, 1); 890 891 error = HAL_RX_ERROR_STATUS_GET(ring_desc); 892 893 qdf_assert(error == HAL_REO_ERROR_DETECTED); 894 895 buf_type = HAL_RX_REO_BUF_TYPE_GET(ring_desc); 896 /* 897 * For REO error ring, expect only MSDU LINK DESC 898 */ 899 qdf_assert_always(buf_type == HAL_RX_REO_MSDU_LINK_DESC_TYPE); 900 901 cookie = HAL_RX_REO_BUF_COOKIE_GET(ring_desc); 902 /* 903 * check for the magic number in the sw cookie 904 */ 905 qdf_assert_always((cookie >> LINK_DESC_ID_SHIFT) & 906 LINK_DESC_ID_START); 907 908 /* 909 * Check if the buffer is to be processed on this processor 910 */ 911 rbm = hal_rx_ret_buf_manager_get(ring_desc); 912 913 hal_rx_reo_buf_paddr_get(ring_desc, &hbi); 914 link_desc_va = dp_rx_cookie_2_link_desc_va(soc, &hbi); 915 hal_rx_msdu_list_get(link_desc_va, &msdu_list, &num_msdus); 916 917 if (qdf_unlikely((msdu_list.rbm[0] != 918 HAL_RX_BUF_RBM_SW3_BM) && 919 (msdu_list.rbm[0] != 920 HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST))) { 921 /* TODO */ 922 /* Call appropriate handler */ 923 DP_STATS_INC(soc, rx.err.invalid_rbm, 1); 924 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 925 FL("Invalid RBM %d"), rbm); 926 927 /* Return link descriptor through WBM ring (SW2WBM)*/ 928 dp_rx_link_desc_return(soc, ring_desc, 929 HAL_BM_ACTION_RELEASE_MSDU_LIST); 930 continue; 931 } 932 933 934 /* Get the MPDU DESC info */ 935 hal_rx_mpdu_desc_info_get(ring_desc, &mpdu_desc_info); 936 937 if (mpdu_desc_info.mpdu_flags & HAL_MPDU_F_FRAGMENT) { 938 /* TODO */ 939 rx_bufs_used += dp_rx_frag_handle(soc, 940 ring_desc, &mpdu_desc_info, 941 &head, &tail, quota); 942 DP_STATS_INC(soc, rx.rx_frags, 1); 943 continue; 944 } 945 946 if (hal_rx_reo_is_pn_error(ring_desc)) { 947 /* TOD0 */ 948 DP_STATS_INC(soc, 949 rx.err. 950 reo_error[HAL_REO_ERR_PN_CHECK_FAILED], 951 1); 952 rx_bufs_used += dp_rx_pn_error_handle(soc, 953 ring_desc, &mpdu_desc_info, 954 &head, &tail, quota); 955 continue; 956 } 957 958 if (hal_rx_reo_is_2k_jump(ring_desc)) { 959 /* TOD0 */ 960 DP_STATS_INC(soc, 961 rx.err. 962 reo_error[HAL_REO_ERR_REGULAR_FRAME_2K_JUMP], 963 1); 964 rx_bufs_used += dp_rx_2k_jump_handle(soc, 965 ring_desc, &mpdu_desc_info, 966 &head, &tail, quota); 967 continue; 968 } 969 } 970 971 done: 972 hal_srng_access_end(hal_soc, hal_ring); 973 974 if (soc->rx.flags.defrag_timeout_check) 975 dp_rx_defrag_waitlist_flush(soc); 976 977 /* Assume MAC id = 0, owner = 0 */ 978 if (rx_bufs_used) { 979 dp_pdev = soc->pdev_list[0]; 980 dp_rxdma_srng = &dp_pdev->rx_refill_buf_ring; 981 rx_desc_pool = &soc->rx_desc_buf[0]; 982 983 dp_rx_buffers_replenish(soc, 0, dp_rxdma_srng, rx_desc_pool, 984 rx_bufs_used, &head, &tail); 985 } 986 987 return rx_bufs_used; /* Assume no scale factor for now */ 988 } 989 990 /** 991 * dp_rx_wbm_err_process() - Processes error frames routed to WBM release ring 992 * 993 * @soc: core txrx main context 994 * @hal_ring: opaque pointer to the HAL Rx Error Ring, which will be serviced 995 * @quota: No. of units (packets) that can be serviced in one shot. 996 * 997 * This function implements error processing and top level demultiplexer 998 * for all the frames routed to WBM2HOST sw release ring. 999 * 1000 * Return: uint32_t: No. of elements processed 1001 */ 1002 uint32_t 1003 dp_rx_wbm_err_process(struct dp_soc *soc, void *hal_ring, uint32_t quota) 1004 { 1005 void *hal_soc; 1006 void *ring_desc; 1007 struct dp_rx_desc *rx_desc; 1008 union dp_rx_desc_list_elem_t *head[MAX_PDEV_CNT] = { NULL }; 1009 union dp_rx_desc_list_elem_t *tail[MAX_PDEV_CNT] = { NULL }; 1010 uint32_t rx_bufs_used = 0; 1011 uint32_t rx_bufs_reaped[MAX_PDEV_CNT] = { 0 }; 1012 uint8_t buf_type, rbm; 1013 uint32_t rx_buf_cookie; 1014 uint8_t mac_id; 1015 struct dp_pdev *dp_pdev; 1016 struct dp_srng *dp_rxdma_srng; 1017 struct rx_desc_pool *rx_desc_pool; 1018 uint8_t *rx_tlv_hdr; 1019 qdf_nbuf_t nbuf_head = NULL; 1020 qdf_nbuf_t nbuf_tail = NULL; 1021 qdf_nbuf_t nbuf, next; 1022 struct hal_wbm_err_desc_info wbm_err_info = { 0 }; 1023 uint8_t pool_id; 1024 1025 /* Debug -- Remove later */ 1026 qdf_assert(soc && hal_ring); 1027 1028 hal_soc = soc->hal_soc; 1029 1030 /* Debug -- Remove later */ 1031 qdf_assert(hal_soc); 1032 1033 if (qdf_unlikely(hal_srng_access_start(hal_soc, hal_ring))) { 1034 1035 /* TODO */ 1036 /* 1037 * Need API to convert from hal_ring pointer to 1038 * Ring Type / Ring Id combo 1039 */ 1040 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1041 FL("HAL RING Access Failed -- %pK"), hal_ring); 1042 goto done; 1043 } 1044 1045 while (qdf_likely(quota-- && (ring_desc = 1046 hal_srng_dst_get_next(hal_soc, hal_ring)))) { 1047 1048 /* XXX */ 1049 buf_type = HAL_RX_WBM_BUF_TYPE_GET(ring_desc); 1050 1051 /* 1052 * For WBM ring, expect only MSDU buffers 1053 */ 1054 qdf_assert_always(buf_type == HAL_RX_WBM_BUF_TYPE_REL_BUF); 1055 1056 qdf_assert((HAL_RX_WBM_ERR_SRC_GET(ring_desc) 1057 == HAL_RX_WBM_ERR_SRC_RXDMA) || 1058 (HAL_RX_WBM_ERR_SRC_GET(ring_desc) 1059 == HAL_RX_WBM_ERR_SRC_REO)); 1060 1061 /* 1062 * Check if the buffer is to be processed on this processor 1063 */ 1064 rbm = hal_rx_ret_buf_manager_get(ring_desc); 1065 1066 if (qdf_unlikely(rbm != HAL_RX_BUF_RBM_SW3_BM)) { 1067 /* TODO */ 1068 /* Call appropriate handler */ 1069 DP_STATS_INC(soc, rx.err.invalid_rbm, 1); 1070 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1071 FL("Invalid RBM %d"), rbm); 1072 continue; 1073 } 1074 1075 rx_buf_cookie = HAL_RX_WBM_BUF_COOKIE_GET(ring_desc); 1076 1077 rx_desc = dp_rx_cookie_2_va_rxdma_buf(soc, rx_buf_cookie); 1078 qdf_assert(rx_desc); 1079 1080 if (!dp_rx_desc_check_magic(rx_desc)) { 1081 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1082 FL("Invalid rx_desc cookie=%d"), 1083 rx_buf_cookie); 1084 continue; 1085 } 1086 1087 nbuf = rx_desc->nbuf; 1088 qdf_nbuf_unmap_single(soc->osdev, nbuf, QDF_DMA_BIDIRECTIONAL); 1089 1090 /* 1091 * save the wbm desc info in nbuf TLV. We will need this 1092 * info when we do the actual nbuf processing 1093 */ 1094 hal_rx_wbm_err_info_get(ring_desc, &wbm_err_info); 1095 wbm_err_info.pool_id = rx_desc->pool_id; 1096 hal_rx_wbm_err_info_set_in_tlv(qdf_nbuf_data(nbuf), 1097 &wbm_err_info); 1098 1099 rx_bufs_reaped[rx_desc->pool_id]++; 1100 1101 DP_RX_LIST_APPEND(nbuf_head, nbuf_tail, rx_desc->nbuf); 1102 dp_rx_add_to_free_desc_list(&head[rx_desc->pool_id], 1103 &tail[rx_desc->pool_id], 1104 rx_desc); 1105 } 1106 done: 1107 hal_srng_access_end(hal_soc, hal_ring); 1108 1109 for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) { 1110 if (rx_bufs_reaped[mac_id]) { 1111 dp_pdev = soc->pdev_list[mac_id]; 1112 dp_rxdma_srng = &dp_pdev->rx_refill_buf_ring; 1113 rx_desc_pool = &soc->rx_desc_buf[mac_id]; 1114 1115 dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng, 1116 rx_desc_pool, rx_bufs_reaped[mac_id], 1117 &head[mac_id], &tail[mac_id]); 1118 rx_bufs_used += rx_bufs_reaped[mac_id]; 1119 } 1120 } 1121 1122 nbuf = nbuf_head; 1123 while (nbuf) { 1124 rx_tlv_hdr = qdf_nbuf_data(nbuf); 1125 /* 1126 * retrieve the wbm desc info from nbuf TLV, so we can 1127 * handle error cases appropriately 1128 */ 1129 hal_rx_wbm_err_info_get_from_tlv(rx_tlv_hdr, &wbm_err_info); 1130 1131 next = nbuf->next; 1132 if (wbm_err_info.wbm_err_src == HAL_RX_WBM_ERR_SRC_REO) { 1133 if (wbm_err_info.reo_psh_rsn 1134 == HAL_RX_WBM_REO_PSH_RSN_ERROR) { 1135 1136 DP_STATS_INC(soc, 1137 rx.err.reo_error 1138 [wbm_err_info.reo_err_code], 1); 1139 1140 switch (wbm_err_info.reo_err_code) { 1141 /* 1142 * Handling for packets which have NULL REO 1143 * queue descriptor 1144 */ 1145 case HAL_REO_ERR_QUEUE_DESC_ADDR_0: 1146 pool_id = wbm_err_info.pool_id; 1147 QDF_TRACE(QDF_MODULE_ID_DP, 1148 QDF_TRACE_LEVEL_WARN, 1149 "Got pkt with REO ERROR: %d", 1150 wbm_err_info.reo_err_code); 1151 dp_rx_null_q_desc_handle(soc, 1152 nbuf, 1153 rx_tlv_hdr, 1154 pool_id); 1155 nbuf = next; 1156 continue; 1157 /* TODO */ 1158 /* Add per error code accounting */ 1159 1160 default: 1161 QDF_TRACE(QDF_MODULE_ID_DP, 1162 QDF_TRACE_LEVEL_DEBUG, 1163 "REO error %d detected", 1164 wbm_err_info.reo_err_code); 1165 } 1166 } 1167 } else if (wbm_err_info.wbm_err_src == 1168 HAL_RX_WBM_ERR_SRC_RXDMA) { 1169 if (wbm_err_info.rxdma_psh_rsn 1170 == HAL_RX_WBM_RXDMA_PSH_RSN_ERROR) { 1171 struct dp_peer *peer = NULL; 1172 uint16_t peer_id = 0xFFFF; 1173 1174 DP_STATS_INC(soc, 1175 rx.err.rxdma_error 1176 [wbm_err_info.rxdma_err_code], 1); 1177 peer_id = hal_rx_mpdu_start_sw_peer_id_get(rx_tlv_hdr); 1178 peer = dp_peer_find_by_id(soc, peer_id); 1179 1180 switch (wbm_err_info.rxdma_err_code) { 1181 case HAL_RXDMA_ERR_UNENCRYPTED: 1182 dp_rx_err_deliver(soc, 1183 nbuf, 1184 rx_tlv_hdr); 1185 nbuf = next; 1186 continue; 1187 1188 case HAL_RXDMA_ERR_TKIP_MIC: 1189 dp_rx_process_mic_error(soc, 1190 nbuf, 1191 rx_tlv_hdr); 1192 nbuf = next; 1193 if (peer) 1194 DP_STATS_INC(peer, rx.err.mic_err, 1); 1195 continue; 1196 1197 case HAL_RXDMA_ERR_DECRYPT: 1198 if (peer) 1199 DP_STATS_INC(peer, rx.err.decrypt_err, 1); 1200 QDF_TRACE(QDF_MODULE_ID_DP, 1201 QDF_TRACE_LEVEL_DEBUG, 1202 "Packet received with Decrypt error"); 1203 break; 1204 1205 default: 1206 QDF_TRACE(QDF_MODULE_ID_DP, 1207 QDF_TRACE_LEVEL_DEBUG, 1208 "RXDMA error %d", 1209 wbm_err_info. 1210 rxdma_err_code); 1211 } 1212 } 1213 } else { 1214 /* Should not come here */ 1215 qdf_assert(0); 1216 } 1217 1218 hal_rx_dump_pkt_tlvs(rx_tlv_hdr, QDF_TRACE_LEVEL_DEBUG); 1219 qdf_nbuf_free(nbuf); 1220 nbuf = next; 1221 } 1222 return rx_bufs_used; /* Assume no scale factor for now */ 1223 } 1224 1225 /** 1226 * dp_rx_err_mpdu_pop() - extract the MSDU's from link descs 1227 * 1228 * @soc: core DP main context 1229 * @mac_id: mac id which is one of 3 mac_ids 1230 * @rxdma_dst_ring_desc: void pointer to monitor link descriptor buf addr info 1231 * @head: head of descs list to be freed 1232 * @tail: tail of decs list to be freed 1233 1234 * Return: number of msdu in MPDU to be popped 1235 */ 1236 static inline uint32_t 1237 dp_rx_err_mpdu_pop(struct dp_soc *soc, uint32_t mac_id, 1238 void *rxdma_dst_ring_desc, 1239 union dp_rx_desc_list_elem_t **head, 1240 union dp_rx_desc_list_elem_t **tail) 1241 { 1242 void *rx_msdu_link_desc; 1243 qdf_nbuf_t msdu; 1244 qdf_nbuf_t last; 1245 struct hal_rx_msdu_list msdu_list; 1246 uint16_t num_msdus; 1247 struct hal_buf_info buf_info; 1248 void *p_buf_addr_info; 1249 void *p_last_buf_addr_info; 1250 uint32_t rx_bufs_used = 0; 1251 uint32_t msdu_cnt; 1252 uint32_t i; 1253 uint8_t push_reason; 1254 uint8_t rxdma_error_code = 0; 1255 uint8_t bm_action = HAL_BM_ACTION_PUT_IN_IDLE_LIST; 1256 struct dp_pdev *pdev = dp_get_pdev_for_mac_id(soc, mac_id); 1257 1258 msdu = 0; 1259 1260 last = NULL; 1261 1262 hal_rx_reo_ent_buf_paddr_get(rxdma_dst_ring_desc, &buf_info, 1263 &p_last_buf_addr_info, &msdu_cnt); 1264 1265 push_reason = 1266 hal_rx_reo_ent_rxdma_push_reason_get(rxdma_dst_ring_desc); 1267 if (push_reason == HAL_RX_WBM_RXDMA_PSH_RSN_ERROR) { 1268 rxdma_error_code = 1269 hal_rx_reo_ent_rxdma_error_code_get(rxdma_dst_ring_desc); 1270 } 1271 1272 do { 1273 rx_msdu_link_desc = 1274 dp_rx_cookie_2_link_desc_va(soc, &buf_info); 1275 1276 qdf_assert(rx_msdu_link_desc); 1277 1278 hal_rx_msdu_list_get(rx_msdu_link_desc, &msdu_list, &num_msdus); 1279 1280 if (msdu_list.sw_cookie[0] != HAL_RX_COOKIE_SPECIAL) { 1281 /* if the msdus belongs to NSS offloaded radio && 1282 * the rbm is not SW1_BM then return the msdu_link 1283 * descriptor without freeing the msdus (nbufs). let 1284 * these buffers be given to NSS completion ring for 1285 * NSS to free them. 1286 * else iterate through the msdu link desc list and 1287 * free each msdu in the list. 1288 */ 1289 if (msdu_list.rbm[0] != HAL_RX_BUF_RBM_SW3_BM && 1290 wlan_cfg_get_dp_pdev_nss_enabled( 1291 pdev->wlan_cfg_ctx)) 1292 bm_action = HAL_BM_ACTION_RELEASE_MSDU_LIST; 1293 else { 1294 for (i = 0; i < num_msdus; i++) { 1295 struct dp_rx_desc *rx_desc = 1296 dp_rx_cookie_2_va_rxdma_buf(soc, 1297 msdu_list.sw_cookie[i]); 1298 qdf_assert(rx_desc); 1299 msdu = rx_desc->nbuf; 1300 1301 qdf_nbuf_unmap_single(soc->osdev, msdu, 1302 QDF_DMA_FROM_DEVICE); 1303 1304 QDF_TRACE(QDF_MODULE_ID_DP, 1305 QDF_TRACE_LEVEL_DEBUG, 1306 "[%s][%d] msdu_nbuf=%pK \n", 1307 __func__, __LINE__, msdu); 1308 1309 qdf_nbuf_free(msdu); 1310 rx_bufs_used++; 1311 dp_rx_add_to_free_desc_list(head, 1312 tail, rx_desc); 1313 } 1314 } 1315 } else { 1316 rxdma_error_code = HAL_RXDMA_ERR_WAR; 1317 } 1318 1319 hal_rx_mon_next_link_desc_get(rx_msdu_link_desc, &buf_info, 1320 &p_buf_addr_info); 1321 1322 dp_rx_link_desc_return(soc, p_last_buf_addr_info, bm_action); 1323 p_last_buf_addr_info = p_buf_addr_info; 1324 1325 } while (buf_info.paddr); 1326 1327 DP_STATS_INC(soc, rx.err.rxdma_error[rxdma_error_code], 1); 1328 1329 if (rxdma_error_code == HAL_RXDMA_ERR_DECRYPT) { 1330 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1331 "Packet received with Decrypt error"); 1332 } 1333 1334 return rx_bufs_used; 1335 } 1336 1337 /** 1338 * dp_rxdma_err_process() - RxDMA error processing functionality 1339 * 1340 * @soc: core txrx main contex 1341 * @mac_id: mac id which is one of 3 mac_ids 1342 * @hal_ring: opaque pointer to the HAL Rx Ring, which will be serviced 1343 * @quota: No. of units (packets) that can be serviced in one shot. 1344 1345 * Return: num of buffers processed 1346 */ 1347 uint32_t 1348 dp_rxdma_err_process(struct dp_soc *soc, uint32_t mac_id, uint32_t quota) 1349 { 1350 struct dp_pdev *pdev = dp_get_pdev_for_mac_id(soc, mac_id); 1351 int mac_for_pdev = dp_get_mac_id_for_mac(soc, mac_id); 1352 void *hal_soc; 1353 void *rxdma_dst_ring_desc; 1354 void *err_dst_srng; 1355 union dp_rx_desc_list_elem_t *head = NULL; 1356 union dp_rx_desc_list_elem_t *tail = NULL; 1357 struct dp_srng *dp_rxdma_srng; 1358 struct rx_desc_pool *rx_desc_pool; 1359 uint32_t work_done = 0; 1360 uint32_t rx_bufs_used = 0; 1361 1362 #ifdef DP_INTR_POLL_BASED 1363 if (!pdev) 1364 return 0; 1365 #endif 1366 err_dst_srng = pdev->rxdma_err_dst_ring[mac_for_pdev].hal_srng; 1367 1368 if (!err_dst_srng) { 1369 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1370 "%s %d : HAL Monitor Destination Ring Init \ 1371 Failed -- %pK\n", 1372 __func__, __LINE__, err_dst_srng); 1373 return 0; 1374 } 1375 1376 hal_soc = soc->hal_soc; 1377 1378 qdf_assert(hal_soc); 1379 1380 if (qdf_unlikely(hal_srng_access_start(hal_soc, err_dst_srng))) { 1381 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1382 "%s %d : HAL Monitor Destination Ring Init \ 1383 Failed -- %pK\n", 1384 __func__, __LINE__, err_dst_srng); 1385 return 0; 1386 } 1387 1388 while (qdf_likely(quota-- && (rxdma_dst_ring_desc = 1389 hal_srng_dst_get_next(hal_soc, err_dst_srng)))) { 1390 1391 rx_bufs_used += dp_rx_err_mpdu_pop(soc, mac_id, 1392 rxdma_dst_ring_desc, 1393 &head, &tail); 1394 } 1395 1396 hal_srng_access_end(hal_soc, err_dst_srng); 1397 1398 if (rx_bufs_used) { 1399 dp_rxdma_srng = &pdev->rx_refill_buf_ring; 1400 rx_desc_pool = &soc->rx_desc_buf[mac_id]; 1401 1402 dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng, 1403 rx_desc_pool, rx_bufs_used, &head, &tail); 1404 1405 work_done += rx_bufs_used; 1406 } 1407 1408 return work_done; 1409 } 1410