1 /* 2 * Copyright (c) 2016-2019 The Linux Foundation. All rights reserved. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for 5 * any purpose with or without fee is hereby granted, provided that the 6 * above copyright notice and this permission notice appear in all 7 * copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 16 * PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 #include "hal_hw_headers.h" 20 #include "dp_types.h" 21 #include "dp_rx.h" 22 #include "dp_peer.h" 23 #include "dp_internal.h" 24 #include "hal_api.h" 25 #include "qdf_trace.h" 26 #include "qdf_nbuf.h" 27 #ifdef CONFIG_MCL 28 #include <cds_ieee80211_common.h> 29 #else 30 #include <linux/ieee80211.h> 31 #endif 32 #include "dp_rx_defrag.h" 33 #include <enet.h> /* LLC_SNAP_HDR_LEN */ 34 35 #ifdef RX_DESC_DEBUG_CHECK 36 static inline bool dp_rx_desc_check_magic(struct dp_rx_desc *rx_desc) 37 { 38 if (qdf_unlikely(rx_desc->magic != DP_RX_DESC_MAGIC)) { 39 return false; 40 } 41 rx_desc->magic = 0; 42 return true; 43 } 44 #else 45 static inline bool dp_rx_desc_check_magic(struct dp_rx_desc *rx_desc) 46 { 47 return true; 48 } 49 #endif 50 51 /** 52 * dp_rx_mcast_echo_check() - check if the mcast pkt is a loop 53 * back on same vap or a different vap. 54 * 55 * @soc: core DP main context 56 * @peer: dp peer handler 57 * @rx_tlv_hdr: start of the rx TLV header 58 * @nbuf: pkt buffer 59 * 60 * Return: bool (true if it is a looped back pkt else false) 61 * 62 */ 63 static inline bool dp_rx_mcast_echo_check(struct dp_soc *soc, 64 struct dp_peer *peer, 65 uint8_t *rx_tlv_hdr, 66 qdf_nbuf_t nbuf) 67 { 68 struct dp_vdev *vdev = peer->vdev; 69 struct dp_ast_entry *ase; 70 uint16_t sa_idx = 0; 71 uint8_t *data; 72 73 /* 74 * Multicast Echo Check is required only if vdev is STA and 75 * received pkt is a multicast/broadcast pkt. otherwise 76 * skip the MEC check. 77 */ 78 if (vdev->opmode != wlan_op_mode_sta) 79 return false; 80 81 if (!hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr)) 82 return false; 83 84 data = qdf_nbuf_data(nbuf); 85 /* 86 * if the received pkts src mac addr matches with vdev 87 * mac address then drop the pkt as it is looped back 88 */ 89 if (!(qdf_mem_cmp(&data[DP_MAC_ADDR_LEN], 90 vdev->mac_addr.raw, 91 DP_MAC_ADDR_LEN))) 92 return true; 93 94 /* 95 * In case of qwrap isolation mode, donot drop loopback packets. 96 * In isolation mode, all packets from the wired stations need to go 97 * to rootap and loop back to reach the wireless stations and 98 * vice-versa. 99 */ 100 if (qdf_unlikely(vdev->isolation_vdev)) 101 return false; 102 103 /* if the received pkts src mac addr matches with the 104 * wired PCs MAC addr which is behind the STA or with 105 * wireless STAs MAC addr which are behind the Repeater, 106 * then drop the pkt as it is looped back 107 */ 108 qdf_spin_lock_bh(&soc->ast_lock); 109 if (hal_rx_msdu_end_sa_is_valid_get(rx_tlv_hdr)) { 110 sa_idx = hal_rx_msdu_end_sa_idx_get(rx_tlv_hdr); 111 112 if ((sa_idx < 0) || 113 (sa_idx >= (WLAN_UMAC_PSOC_MAX_PEERS * 2))) { 114 qdf_spin_unlock_bh(&soc->ast_lock); 115 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 116 "invalid sa_idx: %d", sa_idx); 117 qdf_assert_always(0); 118 } 119 120 ase = soc->ast_table[sa_idx]; 121 if (!ase) { 122 /* We do not get a peer map event for STA and without 123 * this event we don't know what is STA's sa_idx. 124 * For this reason the AST is still not associated to 125 * any index postion in ast_table. 126 * In these kind of scenarios where sa is valid but 127 * ast is not in ast_table, we use the below API to get 128 * AST entry for STA's own mac_address. 129 */ 130 ase = dp_peer_ast_list_find(soc, peer, 131 &data[DP_MAC_ADDR_LEN]); 132 if (ase) { 133 ase->ast_idx = sa_idx; 134 soc->ast_table[sa_idx] = ase; 135 ase->is_mapped = TRUE; 136 } 137 } 138 } else 139 ase = dp_peer_ast_hash_find_by_pdevid(soc, 140 &data[DP_MAC_ADDR_LEN], 141 vdev->pdev->pdev_id); 142 143 if (ase) { 144 145 if (ase->pdev_id != vdev->pdev->pdev_id) { 146 qdf_spin_unlock_bh(&soc->ast_lock); 147 QDF_TRACE(QDF_MODULE_ID_DP, 148 QDF_TRACE_LEVEL_INFO, 149 "Detected DBDC Root AP %pM, %d %d", 150 &data[DP_MAC_ADDR_LEN], vdev->pdev->pdev_id, 151 ase->pdev_id); 152 return false; 153 } 154 155 if ((ase->type == CDP_TXRX_AST_TYPE_MEC) || 156 (ase->peer != peer)) { 157 qdf_spin_unlock_bh(&soc->ast_lock); 158 QDF_TRACE(QDF_MODULE_ID_DP, 159 QDF_TRACE_LEVEL_INFO, 160 "received pkt with same src mac %pM", 161 &data[DP_MAC_ADDR_LEN]); 162 163 return true; 164 } 165 } 166 qdf_spin_unlock_bh(&soc->ast_lock); 167 return false; 168 } 169 170 /** 171 * dp_rx_link_desc_return_by_addr - Return a MPDU link descriptor to 172 * (WBM) by address 173 * 174 * @soc: core DP main context 175 * @link_desc_addr: link descriptor addr 176 * 177 * Return: QDF_STATUS 178 */ 179 QDF_STATUS 180 dp_rx_link_desc_return_by_addr(struct dp_soc *soc, void *link_desc_addr, 181 uint8_t bm_action) 182 { 183 struct dp_srng *wbm_desc_rel_ring = &soc->wbm_desc_rel_ring; 184 void *wbm_rel_srng = wbm_desc_rel_ring->hal_srng; 185 void *hal_soc = soc->hal_soc; 186 QDF_STATUS status = QDF_STATUS_E_FAILURE; 187 void *src_srng_desc; 188 189 if (!wbm_rel_srng) { 190 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 191 "WBM RELEASE RING not initialized"); 192 return status; 193 } 194 195 if (qdf_unlikely(hal_srng_access_start(hal_soc, wbm_rel_srng))) { 196 197 /* TODO */ 198 /* 199 * Need API to convert from hal_ring pointer to 200 * Ring Type / Ring Id combo 201 */ 202 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 203 FL("HAL RING Access For WBM Release SRNG Failed - %pK"), 204 wbm_rel_srng); 205 DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1); 206 goto done; 207 } 208 src_srng_desc = hal_srng_src_get_next(hal_soc, wbm_rel_srng); 209 if (qdf_likely(src_srng_desc)) { 210 /* Return link descriptor through WBM ring (SW2WBM)*/ 211 hal_rx_msdu_link_desc_set(hal_soc, 212 src_srng_desc, link_desc_addr, bm_action); 213 status = QDF_STATUS_SUCCESS; 214 } else { 215 struct hal_srng *srng = (struct hal_srng *)wbm_rel_srng; 216 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 217 FL("WBM Release Ring (Id %d) Full"), srng->ring_id); 218 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 219 "HP 0x%x Reap HP 0x%x TP 0x%x Cached TP 0x%x", 220 *srng->u.src_ring.hp_addr, srng->u.src_ring.reap_hp, 221 *srng->u.src_ring.tp_addr, srng->u.src_ring.cached_tp); 222 } 223 done: 224 hal_srng_access_end(hal_soc, wbm_rel_srng); 225 return status; 226 227 } 228 229 /** 230 * dp_rx_link_desc_return() - Return a MPDU link descriptor to HW 231 * (WBM), following error handling 232 * 233 * @soc: core DP main context 234 * @ring_desc: opaque pointer to the REO error ring descriptor 235 * 236 * Return: QDF_STATUS 237 */ 238 QDF_STATUS 239 dp_rx_link_desc_return(struct dp_soc *soc, void *ring_desc, uint8_t bm_action) 240 { 241 void *buf_addr_info = HAL_RX_REO_BUF_ADDR_INFO_GET(ring_desc); 242 return dp_rx_link_desc_return_by_addr(soc, buf_addr_info, bm_action); 243 } 244 245 /** 246 * dp_rx_msdus_drop() - Drops all MSDU's per MPDU 247 * 248 * @soc: core txrx main context 249 * @ring_desc: opaque pointer to the REO error ring descriptor 250 * @mpdu_desc_info: MPDU descriptor information from ring descriptor 251 * @head: head of the local descriptor free-list 252 * @tail: tail of the local descriptor free-list 253 * @quota: No. of units (packets) that can be serviced in one shot. 254 * 255 * This function is used to drop all MSDU in an MPDU 256 * 257 * Return: uint32_t: No. of elements processed 258 */ 259 static uint32_t dp_rx_msdus_drop(struct dp_soc *soc, void *ring_desc, 260 struct hal_rx_mpdu_desc_info *mpdu_desc_info, 261 uint8_t *mac_id, 262 uint32_t quota) 263 { 264 uint32_t rx_bufs_used = 0; 265 void *link_desc_va; 266 struct hal_buf_info buf_info; 267 struct dp_pdev *pdev; 268 struct hal_rx_msdu_list msdu_list; /* MSDU's per MPDU */ 269 int i; 270 uint8_t *rx_tlv_hdr; 271 uint32_t tid; 272 273 hal_rx_reo_buf_paddr_get(ring_desc, &buf_info); 274 275 link_desc_va = dp_rx_cookie_2_link_desc_va(soc, &buf_info); 276 277 /* No UNMAP required -- this is "malloc_consistent" memory */ 278 hal_rx_msdu_list_get(soc->hal_soc, link_desc_va, &msdu_list, 279 &mpdu_desc_info->msdu_count); 280 281 for (i = 0; (i < mpdu_desc_info->msdu_count) && quota--; i++) { 282 struct dp_rx_desc *rx_desc = 283 dp_rx_cookie_2_va_rxdma_buf(soc, 284 msdu_list.sw_cookie[i]); 285 286 qdf_assert_always(rx_desc); 287 288 /* all buffers from a MSDU link link belong to same pdev */ 289 *mac_id = rx_desc->pool_id; 290 pdev = soc->pdev_list[rx_desc->pool_id]; 291 292 if (!dp_rx_desc_check_magic(rx_desc)) { 293 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 294 FL("Invalid rx_desc cookie=%d"), 295 msdu_list.sw_cookie[i]); 296 return rx_bufs_used; 297 } 298 299 rx_bufs_used++; 300 tid = hal_rx_mpdu_start_tid_get(soc->hal_soc, 301 rx_desc->rx_buf_start); 302 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 303 "Packet received with PN error for tid :%d", tid); 304 305 rx_tlv_hdr = qdf_nbuf_data(rx_desc->nbuf); 306 if (hal_rx_encryption_info_valid(rx_tlv_hdr)) 307 hal_rx_print_pn(rx_tlv_hdr); 308 309 /* Just free the buffers */ 310 qdf_nbuf_free(rx_desc->nbuf); 311 312 dp_rx_add_to_free_desc_list(&pdev->free_list_head, 313 &pdev->free_list_tail, rx_desc); 314 } 315 316 /* Return link descriptor through WBM ring (SW2WBM)*/ 317 dp_rx_link_desc_return(soc, ring_desc, HAL_BM_ACTION_PUT_IN_IDLE_LIST); 318 319 return rx_bufs_used; 320 } 321 322 /** 323 * dp_rx_pn_error_handle() - Handles PN check errors 324 * 325 * @soc: core txrx main context 326 * @ring_desc: opaque pointer to the REO error ring descriptor 327 * @mpdu_desc_info: MPDU descriptor information from ring descriptor 328 * @head: head of the local descriptor free-list 329 * @tail: tail of the local descriptor free-list 330 * @quota: No. of units (packets) that can be serviced in one shot. 331 * 332 * This function implements PN error handling 333 * If the peer is configured to ignore the PN check errors 334 * or if DP feels, that this frame is still OK, the frame can be 335 * re-injected back to REO to use some of the other features 336 * of REO e.g. duplicate detection/routing to other cores 337 * 338 * Return: uint32_t: No. of elements processed 339 */ 340 static uint32_t 341 dp_rx_pn_error_handle(struct dp_soc *soc, void *ring_desc, 342 struct hal_rx_mpdu_desc_info *mpdu_desc_info, 343 uint8_t *mac_id, 344 uint32_t quota) 345 { 346 uint16_t peer_id; 347 uint32_t rx_bufs_used = 0; 348 struct dp_peer *peer; 349 bool peer_pn_policy = false; 350 351 peer_id = DP_PEER_METADATA_PEER_ID_GET( 352 mpdu_desc_info->peer_meta_data); 353 354 355 peer = dp_peer_find_by_id(soc, peer_id); 356 357 if (qdf_likely(peer)) { 358 /* 359 * TODO: Check for peer specific policies & set peer_pn_policy 360 */ 361 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 362 "discard rx due to PN error for peer %pK " 363 "(%02x:%02x:%02x:%02x:%02x:%02x)", 364 peer, 365 peer->mac_addr.raw[0], peer->mac_addr.raw[1], 366 peer->mac_addr.raw[2], peer->mac_addr.raw[3], 367 peer->mac_addr.raw[4], peer->mac_addr.raw[5]); 368 369 dp_peer_unref_del_find_by_id(peer); 370 } 371 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 372 "Packet received with PN error"); 373 374 /* No peer PN policy -- definitely drop */ 375 if (!peer_pn_policy) 376 rx_bufs_used = dp_rx_msdus_drop(soc, ring_desc, 377 mpdu_desc_info, 378 mac_id, quota); 379 380 return rx_bufs_used; 381 } 382 383 /** 384 * dp_rx_2k_jump_handle() - Handles Sequence Number Jump by 2K 385 * 386 * @soc: core txrx main context 387 * @ring_desc: opaque pointer to the REO error ring descriptor 388 * @mpdu_desc_info: MPDU descriptor information from ring descriptor 389 * @head: head of the local descriptor free-list 390 * @tail: tail of the local descriptor free-list 391 * @quota: No. of units (packets) that can be serviced in one shot. 392 * 393 * This function implements the error handling when sequence number 394 * of the MPDU jumps suddenly by 2K.Today there are 2 cases that 395 * need to be handled: 396 * A) CSN (Current Sequence Number) = Last Valid SN (LSN) + 2K 397 * B) CSN = LSN + 2K, but falls within a "BA sized window" of the SSN 398 * For case A) the protocol stack is invoked to generate DELBA/DEAUTH frame 399 * For case B), the frame is normally dropped, no more action is taken 400 * 401 * Return: uint32_t: No. of elements processed 402 */ 403 static uint32_t 404 dp_rx_2k_jump_handle(struct dp_soc *soc, void *ring_desc, 405 struct hal_rx_mpdu_desc_info *mpdu_desc_info, 406 uint8_t *mac_id, uint32_t quota) 407 { 408 return dp_rx_msdus_drop(soc, ring_desc, mpdu_desc_info, 409 mac_id, quota); 410 } 411 412 #ifdef CONFIG_MCL 413 #define DP_PDEV_INVALID_PEER_MSDU_CHECK(head, tail) \ 414 do { \ 415 qdf_assert_always(!(head)); \ 416 qdf_assert_always(!(tail)); \ 417 } while (0) 418 #else 419 #define DP_PDEV_INVALID_PEER_MSDU_CHECK(head, tail) /* no op */ 420 #endif 421 422 /** 423 * dp_rx_chain_msdus() - Function to chain all msdus of a mpdu 424 * to pdev invalid peer list 425 * 426 * @soc: core DP main context 427 * @nbuf: Buffer pointer 428 * @rx_tlv_hdr: start of rx tlv header 429 * @mac_id: mac id 430 * 431 * Return: bool: true for last msdu of mpdu 432 */ 433 static bool 434 dp_rx_chain_msdus(struct dp_soc *soc, qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr, 435 uint8_t mac_id) 436 { 437 bool mpdu_done = false; 438 qdf_nbuf_t curr_nbuf = NULL; 439 qdf_nbuf_t tmp_nbuf = NULL; 440 441 /* TODO: Currently only single radio is supported, hence 442 * pdev hard coded to '0' index 443 */ 444 struct dp_pdev *dp_pdev = soc->pdev_list[mac_id]; 445 446 if (!dp_pdev->first_nbuf) { 447 qdf_nbuf_set_rx_chfrag_start(nbuf, 1); 448 dp_pdev->ppdu_id = HAL_RX_HW_DESC_GET_PPDUID_GET(rx_tlv_hdr); 449 dp_pdev->first_nbuf = true; 450 451 /* If the new nbuf received is the first msdu of the 452 * amsdu and there are msdus in the invalid peer msdu 453 * list, then let us free all the msdus of the invalid 454 * peer msdu list. 455 * This scenario can happen when we start receiving 456 * new a-msdu even before the previous a-msdu is completely 457 * received. 458 */ 459 curr_nbuf = dp_pdev->invalid_peer_head_msdu; 460 while (curr_nbuf) { 461 tmp_nbuf = curr_nbuf->next; 462 qdf_nbuf_free(curr_nbuf); 463 curr_nbuf = tmp_nbuf; 464 } 465 466 dp_pdev->invalid_peer_head_msdu = NULL; 467 dp_pdev->invalid_peer_tail_msdu = NULL; 468 hal_rx_mon_hw_desc_get_mpdu_status(soc->hal_soc, rx_tlv_hdr, 469 &(dp_pdev->ppdu_info.rx_status)); 470 471 } 472 473 if (dp_pdev->ppdu_id == hal_rx_attn_phy_ppdu_id_get(rx_tlv_hdr) && 474 hal_rx_attn_msdu_done_get(rx_tlv_hdr)) { 475 qdf_nbuf_set_rx_chfrag_end(nbuf, 1); 476 qdf_assert_always(dp_pdev->first_nbuf == true); 477 dp_pdev->first_nbuf = false; 478 mpdu_done = true; 479 } 480 481 /* 482 * For MCL, invalid_peer_head_msdu and invalid_peer_tail_msdu 483 * should be NULL here, add the checking for debugging purpose 484 * in case some corner case. 485 */ 486 DP_PDEV_INVALID_PEER_MSDU_CHECK(dp_pdev->invalid_peer_head_msdu, 487 dp_pdev->invalid_peer_tail_msdu); 488 DP_RX_LIST_APPEND(dp_pdev->invalid_peer_head_msdu, 489 dp_pdev->invalid_peer_tail_msdu, 490 nbuf); 491 492 return mpdu_done; 493 } 494 495 /** 496 * dp_2k_jump_handle() - Function to handle 2k jump exception 497 * on WBM ring 498 * 499 * @soc: core DP main context 500 * @nbuf: buffer pointer 501 * @rx_tlv_hdr: start of rx tlv header 502 * @peer_id: peer id of first msdu 503 * @tid: Tid for which exception occurred 504 * 505 * This function handles 2k jump violations arising out 506 * of receiving aggregates in non BA case. This typically 507 * may happen if aggregates are received on a QOS enabled TID 508 * while Rx window size is still initialized to value of 2. Or 509 * it may also happen if negotiated window size is 1 but peer 510 * sends aggregates. 511 * 512 */ 513 514 void 515 dp_2k_jump_handle(struct dp_soc *soc, 516 qdf_nbuf_t nbuf, 517 uint8_t *rx_tlv_hdr, 518 uint16_t peer_id, 519 uint8_t tid) 520 { 521 uint32_t ppdu_id; 522 struct dp_peer *peer = NULL; 523 struct dp_rx_tid *rx_tid = NULL; 524 525 peer = dp_peer_find_by_id(soc, peer_id); 526 if (!peer || peer->delete_in_progress) { 527 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 528 "peer not found"); 529 goto free_nbuf; 530 } 531 rx_tid = &peer->rx_tid[tid]; 532 if (qdf_unlikely(rx_tid == NULL)) { 533 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 534 "rx_tid is NULL!!"); 535 goto free_nbuf; 536 } 537 qdf_spin_lock_bh(&rx_tid->tid_lock); 538 ppdu_id = hal_rx_attn_phy_ppdu_id_get(rx_tlv_hdr); 539 if (rx_tid->ppdu_id_2k != ppdu_id) { 540 rx_tid->ppdu_id_2k = ppdu_id; 541 qdf_spin_unlock_bh(&rx_tid->tid_lock); 542 goto free_nbuf; 543 } 544 if (!rx_tid->delba_tx_status) { 545 rx_tid->delba_tx_retry++; 546 rx_tid->delba_tx_status = 1; 547 rx_tid->delba_rcode = 548 IEEE80211_REASON_QOS_SETUP_REQUIRED; 549 qdf_spin_unlock_bh(&rx_tid->tid_lock); 550 soc->cdp_soc.ol_ops->send_delba(peer->vdev->pdev->ctrl_pdev, 551 peer->ctrl_peer, 552 peer->mac_addr.raw, 553 tid, 554 peer->vdev->ctrl_vdev, 555 rx_tid->delba_rcode); 556 } else { 557 qdf_spin_unlock_bh(&rx_tid->tid_lock); 558 } 559 560 free_nbuf: 561 if (peer) 562 dp_peer_unref_del_find_by_id(peer); 563 qdf_nbuf_free(nbuf); 564 return; 565 } 566 567 /** 568 * dp_rx_null_q_desc_handle() - Function to handle NULL Queue 569 * descriptor violation on either a 570 * REO or WBM ring 571 * 572 * @soc: core DP main context 573 * @nbuf: buffer pointer 574 * @rx_tlv_hdr: start of rx tlv header 575 * @pool_id: mac id 576 * @peer: peer handle 577 * 578 * This function handles NULL queue descriptor violations arising out 579 * a missing REO queue for a given peer or a given TID. This typically 580 * may happen if a packet is received on a QOS enabled TID before the 581 * ADDBA negotiation for that TID, when the TID queue is setup. Or 582 * it may also happen for MC/BC frames if they are not routed to the 583 * non-QOS TID queue, in the absence of any other default TID queue. 584 * This error can show up both in a REO destination or WBM release ring. 585 * 586 */ 587 static void 588 dp_rx_null_q_desc_handle(struct dp_soc *soc, qdf_nbuf_t nbuf, 589 uint8_t *rx_tlv_hdr, uint8_t pool_id, 590 struct dp_peer *peer) 591 { 592 uint32_t pkt_len, l2_hdr_offset; 593 uint16_t msdu_len; 594 struct dp_vdev *vdev; 595 uint8_t tid; 596 struct ether_header *eh; 597 598 qdf_nbuf_set_rx_chfrag_start(nbuf, 599 hal_rx_msdu_end_first_msdu_get(rx_tlv_hdr)); 600 qdf_nbuf_set_rx_chfrag_end(nbuf, 601 hal_rx_msdu_end_last_msdu_get(rx_tlv_hdr)); 602 603 l2_hdr_offset = hal_rx_msdu_end_l3_hdr_padding_get(rx_tlv_hdr); 604 msdu_len = hal_rx_msdu_start_msdu_len_get(rx_tlv_hdr); 605 pkt_len = msdu_len + l2_hdr_offset + RX_PKT_TLVS_LEN; 606 607 QDF_TRACE_ERROR_RL(QDF_MODULE_ID_DP, 608 "Len %d Extn list %pK ", 609 (uint32_t)qdf_nbuf_len(nbuf), 610 qdf_nbuf_get_ext_list(nbuf)); 611 /* Set length in nbuf */ 612 if (!qdf_nbuf_get_ext_list(nbuf)) 613 qdf_nbuf_set_pktlen(nbuf, pkt_len); 614 615 /* 616 * Check if DMA completed -- msdu_done is the last bit 617 * to be written 618 */ 619 if (!hal_rx_attn_msdu_done_get(rx_tlv_hdr)) { 620 621 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 622 FL("MSDU DONE failure")); 623 624 hal_rx_dump_pkt_tlvs(soc->hal_soc, rx_tlv_hdr, 625 QDF_TRACE_LEVEL_INFO); 626 qdf_assert(0); 627 } 628 629 if (!peer) { 630 bool mpdu_done = false; 631 struct dp_pdev *pdev = soc->pdev_list[pool_id]; 632 633 QDF_TRACE_DEBUG_RL(QDF_MODULE_ID_DP, "peer is NULL"); 634 635 DP_STATS_INC_PKT(soc, 636 rx.err.rx_invalid_peer, 637 1, 638 qdf_nbuf_len(nbuf)); 639 640 mpdu_done = dp_rx_chain_msdus(soc, nbuf, rx_tlv_hdr, pool_id); 641 /* Trigger invalid peer handler wrapper */ 642 dp_rx_process_invalid_peer_wrapper(soc, nbuf, mpdu_done); 643 644 if (mpdu_done) { 645 pdev->invalid_peer_head_msdu = NULL; 646 pdev->invalid_peer_tail_msdu = NULL; 647 } 648 return; 649 } 650 651 vdev = peer->vdev; 652 if (!vdev) { 653 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 654 FL("INVALID vdev %pK OR osif_rx"), vdev); 655 /* Drop & free packet */ 656 qdf_nbuf_free(nbuf); 657 DP_STATS_INC(soc, rx.err.invalid_vdev, 1); 658 return; 659 } 660 661 /* 662 * Advance the packet start pointer by total size of 663 * pre-header TLV's 664 */ 665 if (qdf_nbuf_get_ext_list(nbuf)) 666 qdf_nbuf_pull_head(nbuf, RX_PKT_TLVS_LEN); 667 else 668 qdf_nbuf_pull_head(nbuf, (l2_hdr_offset + RX_PKT_TLVS_LEN)); 669 670 if (dp_rx_mcast_echo_check(soc, peer, rx_tlv_hdr, nbuf)) { 671 /* this is a looped back MCBC pkt, drop it */ 672 DP_STATS_INC_PKT(peer, rx.mec_drop, 1, qdf_nbuf_len(nbuf)); 673 qdf_nbuf_free(nbuf); 674 return; 675 } 676 /* 677 * In qwrap mode if the received packet matches with any of the vdev 678 * mac addresses, drop it. Donot receive multicast packets originated 679 * from any proxysta. 680 */ 681 if (check_qwrap_multicast_loopback(vdev, nbuf)) { 682 DP_STATS_INC_PKT(peer, rx.mec_drop, 1, qdf_nbuf_len(nbuf)); 683 qdf_nbuf_free(nbuf); 684 return; 685 } 686 687 688 if (qdf_unlikely((peer->nawds_enabled == true) && 689 hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr))) { 690 QDF_TRACE(QDF_MODULE_ID_DP, 691 QDF_TRACE_LEVEL_DEBUG, 692 "%s free buffer for multicast packet", 693 __func__); 694 DP_STATS_INC(peer, rx.nawds_mcast_drop, 1); 695 qdf_nbuf_free(nbuf); 696 return; 697 } 698 699 if (!dp_wds_rx_policy_check(rx_tlv_hdr, vdev, peer, 700 hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr))) { 701 QDF_TRACE(QDF_MODULE_ID_DP, 702 QDF_TRACE_LEVEL_ERROR, 703 FL("mcast Policy Check Drop pkt")); 704 /* Drop & free packet */ 705 qdf_nbuf_free(nbuf); 706 return; 707 } 708 709 /* WDS Source Port Learning */ 710 if (qdf_likely(vdev->rx_decap_type == htt_cmn_pkt_type_ethernet && 711 vdev->wds_enabled)) 712 dp_rx_wds_srcport_learn(soc, rx_tlv_hdr, peer, nbuf); 713 714 if (hal_rx_mpdu_start_mpdu_qos_control_valid_get(rx_tlv_hdr)) { 715 /* TODO: Assuming that qos_control_valid also indicates 716 * unicast. Should we check this? 717 */ 718 tid = hal_rx_mpdu_start_tid_get(soc->hal_soc, rx_tlv_hdr); 719 if (peer && !peer->rx_tid[tid].hw_qdesc_vaddr_unaligned) { 720 /* IEEE80211_SEQ_MAX indicates invalid start_seq */ 721 dp_rx_tid_setup_wifi3(peer, tid, 1, IEEE80211_SEQ_MAX); 722 } 723 } 724 725 #ifdef QCA_WIFI_NAPIER_EMULATION /* Debug code, remove later */ 726 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 727 "%s: mac_add:%pM msdu_len %d hdr_off %d", 728 __func__, peer->mac_addr.raw, msdu_len, 729 l2_hdr_offset); 730 731 print_hex_dump(KERN_ERR, "\t Pkt Data:", DUMP_PREFIX_NONE, 32, 4, 732 qdf_nbuf_data(nbuf), 128, false); 733 #endif /* NAPIER_EMULATION */ 734 735 if (qdf_unlikely(vdev->rx_decap_type == htt_cmn_pkt_type_raw)) { 736 qdf_nbuf_set_next(nbuf, NULL); 737 dp_rx_deliver_raw(vdev, nbuf, peer); 738 } else { 739 if (qdf_unlikely(peer->bss_peer)) { 740 QDF_TRACE(QDF_MODULE_ID_DP, 741 QDF_TRACE_LEVEL_INFO, 742 FL("received pkt with same src MAC")); 743 DP_STATS_INC_PKT(peer, rx.mec_drop, 1, 744 qdf_nbuf_len(nbuf)); 745 746 /* Drop & free packet */ 747 qdf_nbuf_free(nbuf); 748 return; 749 } 750 751 if (vdev->osif_rx) { 752 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, 753 FL("vdev %pK osif_rx %pK"), vdev, 754 vdev->osif_rx); 755 qdf_nbuf_set_next(nbuf, NULL); 756 DP_STATS_INC_PKT(peer, rx.to_stack, 1, 757 qdf_nbuf_len(nbuf)); 758 vdev->osif_rx(vdev->osif_vdev, nbuf); 759 if (qdf_unlikely(hal_rx_msdu_end_da_is_mcbc_get( 760 rx_tlv_hdr) && 761 (vdev->rx_decap_type == 762 htt_cmn_pkt_type_ethernet))) { 763 eh = (struct ether_header *)qdf_nbuf_data(nbuf); 764 765 DP_STATS_INC_PKT(peer, rx.multicast, 1, 766 qdf_nbuf_len(nbuf)); 767 if (IEEE80211_IS_BROADCAST(eh->ether_dhost)) { 768 DP_STATS_INC_PKT(peer, rx.bcast, 1, 769 qdf_nbuf_len(nbuf)); 770 } 771 } 772 } else { 773 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 774 FL("INVALID vdev %pK OR osif_rx"), vdev); 775 DP_STATS_INC(soc, rx.err.invalid_vdev, 1); 776 } 777 } 778 return; 779 } 780 781 /** 782 * dp_rx_process_err_unencrypted() - Function to deliver rxdma unencrypted_err 783 * frames to OS 784 * @soc: core DP main context 785 * @nbuf: buffer pointer 786 * @rx_tlv_hdr: start of rx tlv header 787 * @peer: peer reference 788 * 789 * Return: None 790 */ 791 static void 792 dp_rx_process_err_unencrypted(struct dp_soc *soc, qdf_nbuf_t nbuf, 793 uint8_t *rx_tlv_hdr, struct dp_peer *peer) 794 { 795 uint32_t pkt_len, l2_hdr_offset; 796 uint16_t msdu_len; 797 struct dp_vdev *vdev; 798 struct ether_header *eh; 799 bool is_broadcast; 800 801 /* 802 * Check if DMA completed -- msdu_done is the last bit 803 * to be written 804 */ 805 if (!hal_rx_attn_msdu_done_get(rx_tlv_hdr)) { 806 807 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 808 FL("MSDU DONE failure")); 809 810 hal_rx_dump_pkt_tlvs(soc->hal_soc, rx_tlv_hdr, 811 QDF_TRACE_LEVEL_INFO); 812 qdf_assert(0); 813 } 814 815 l2_hdr_offset = hal_rx_msdu_end_l3_hdr_padding_get(rx_tlv_hdr); 816 msdu_len = hal_rx_msdu_start_msdu_len_get(rx_tlv_hdr); 817 pkt_len = msdu_len + l2_hdr_offset + RX_PKT_TLVS_LEN; 818 819 /* Set length in nbuf */ 820 qdf_nbuf_set_pktlen(nbuf, pkt_len); 821 822 qdf_nbuf_set_next(nbuf, NULL); 823 824 qdf_nbuf_set_rx_chfrag_start(nbuf, 1); 825 qdf_nbuf_set_rx_chfrag_end(nbuf, 1); 826 827 if (!peer) { 828 QDF_TRACE_ERROR_RL(QDF_MODULE_ID_DP, "peer is NULL"); 829 DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1, 830 qdf_nbuf_len(nbuf)); 831 /* Trigger invalid peer handler wrapper */ 832 dp_rx_process_invalid_peer_wrapper(soc, nbuf, true); 833 return; 834 } 835 836 vdev = peer->vdev; 837 if (!vdev) { 838 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 839 FL("INVALID vdev %pK OR osif_rx"), vdev); 840 /* Drop & free packet */ 841 qdf_nbuf_free(nbuf); 842 DP_STATS_INC(soc, rx.err.invalid_vdev, 1); 843 return; 844 } 845 846 /* 847 * Advance the packet start pointer by total size of 848 * pre-header TLV's 849 */ 850 qdf_nbuf_pull_head(nbuf, l2_hdr_offset + RX_PKT_TLVS_LEN); 851 852 if (vdev->rx_decap_type == htt_cmn_pkt_type_raw) 853 goto process_mesh; 854 855 /* 856 * WAPI cert AP sends rekey frames as unencrypted. 857 * Thus RXDMA will report unencrypted frame error. 858 * To pass WAPI cert case, SW needs to pass unencrypted 859 * rekey frame to stack. 860 */ 861 if (qdf_nbuf_is_ipv4_wapi_pkt(nbuf)) { 862 qdf_nbuf_cb_update_peer_local_id(nbuf, peer->local_id); 863 goto process_rx; 864 } 865 /* 866 * In dynamic WEP case rekey frames are not encrypted 867 * similar to WAPI. Allow EAPOL when 8021+wep is enabled and 868 * key install is already done 869 */ 870 if ((vdev->sec_type == cdp_sec_type_wep104) && 871 (qdf_nbuf_is_ipv4_eapol_pkt(nbuf))) 872 goto process_rx; 873 874 process_mesh: 875 876 /* Drop & free packet if mesh mode not enabled */ 877 if (!vdev->mesh_vdev) { 878 qdf_nbuf_free(nbuf); 879 DP_STATS_INC(soc, rx.err.invalid_vdev, 1); 880 return; 881 } 882 883 if (dp_rx_filter_mesh_packets(vdev, nbuf, rx_tlv_hdr) 884 == QDF_STATUS_SUCCESS) { 885 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_MED, 886 FL("mesh pkt filtered")); 887 DP_STATS_INC(vdev->pdev, dropped.mesh_filter, 1); 888 889 qdf_nbuf_free(nbuf); 890 return; 891 } 892 dp_rx_fill_mesh_stats(vdev, nbuf, rx_tlv_hdr, peer); 893 894 process_rx: 895 if (qdf_unlikely(hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr) && 896 (vdev->rx_decap_type == 897 htt_cmn_pkt_type_ethernet))) { 898 eh = (struct ether_header *)qdf_nbuf_data(nbuf); 899 is_broadcast = (IEEE80211_IS_BROADCAST 900 (eh->ether_dhost)) ? 1 : 0 ; 901 DP_STATS_INC_PKT(peer, rx.multicast, 1, qdf_nbuf_len(nbuf)); 902 if (is_broadcast) { 903 DP_STATS_INC_PKT(peer, rx.bcast, 1, 904 qdf_nbuf_len(nbuf)); 905 } 906 } 907 908 if (qdf_unlikely(vdev->rx_decap_type == htt_cmn_pkt_type_raw)) { 909 dp_rx_deliver_raw(vdev, nbuf, peer); 910 } else { 911 DP_STATS_INC(peer, rx.to_stack.num, 1); 912 vdev->osif_rx(vdev->osif_vdev, nbuf); 913 } 914 915 return; 916 } 917 918 /** 919 * dp_rx_process_mic_error(): Function to pass mic error indication to umac 920 * @soc: core DP main context 921 * @nbuf: buffer pointer 922 * @rx_tlv_hdr: start of rx tlv header 923 * @peer: peer handle 924 * 925 * return: void 926 */ 927 void dp_rx_process_mic_error(struct dp_soc *soc, qdf_nbuf_t nbuf, 928 uint8_t *rx_tlv_hdr, struct dp_peer *peer) 929 { 930 struct dp_vdev *vdev = NULL; 931 struct dp_pdev *pdev = NULL; 932 struct ol_if_ops *tops = NULL; 933 struct ieee80211_frame *wh; 934 uint8_t *rx_pkt_hdr; 935 uint16_t rx_seq, fragno; 936 unsigned int tid; 937 QDF_STATUS status; 938 939 if (!hal_rx_msdu_end_first_msdu_get(rx_tlv_hdr)) 940 return; 941 942 rx_pkt_hdr = hal_rx_pkt_hdr_get(qdf_nbuf_data(nbuf)); 943 wh = (struct ieee80211_frame *)rx_pkt_hdr; 944 945 if (!peer) { 946 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 947 "peer not found"); 948 goto fail; 949 } 950 951 vdev = peer->vdev; 952 if (!vdev) { 953 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 954 "VDEV not found"); 955 goto fail; 956 } 957 958 pdev = vdev->pdev; 959 if (!pdev) { 960 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 961 "PDEV not found"); 962 goto fail; 963 } 964 965 tid = hal_rx_mpdu_start_tid_get(soc->hal_soc, qdf_nbuf_data(nbuf)); 966 rx_seq = (((*(uint16_t *)wh->i_seq) & 967 IEEE80211_SEQ_SEQ_MASK) >> 968 IEEE80211_SEQ_SEQ_SHIFT); 969 970 fragno = dp_rx_frag_get_mpdu_frag_number(qdf_nbuf_data(nbuf)); 971 972 /* Can get only last fragment */ 973 if (fragno) { 974 status = dp_rx_defrag_add_last_frag(soc, peer, 975 tid, rx_seq, nbuf); 976 977 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 978 "%s: Frag pkt seq# %d frag# %d consumed status %d !", 979 __func__, rx_seq, fragno, status); 980 return; 981 } 982 983 tops = pdev->soc->cdp_soc.ol_ops; 984 if (tops->rx_mic_error) 985 tops->rx_mic_error(pdev->ctrl_pdev, vdev->vdev_id, wh); 986 987 fail: 988 qdf_nbuf_free(nbuf); 989 return; 990 } 991 992 /** 993 * dp_rx_err_process() - Processes error frames routed to REO error ring 994 * 995 * @soc: core txrx main context 996 * @hal_ring: opaque pointer to the HAL Rx Error Ring, which will be serviced 997 * @quota: No. of units (packets) that can be serviced in one shot. 998 * 999 * This function implements error processing and top level demultiplexer 1000 * for all the frames routed to REO error ring. 1001 * 1002 * Return: uint32_t: No. of elements processed 1003 */ 1004 uint32_t 1005 dp_rx_err_process(struct dp_soc *soc, void *hal_ring, uint32_t quota) 1006 { 1007 void *hal_soc; 1008 void *ring_desc; 1009 uint32_t count = 0; 1010 uint32_t rx_bufs_used = 0; 1011 uint32_t rx_bufs_reaped[MAX_PDEV_CNT] = { 0 }; 1012 uint8_t mac_id = 0; 1013 uint8_t buf_type; 1014 uint8_t error, rbm; 1015 struct hal_rx_mpdu_desc_info mpdu_desc_info; 1016 struct hal_buf_info hbi; 1017 struct dp_pdev *dp_pdev; 1018 struct dp_srng *dp_rxdma_srng; 1019 struct rx_desc_pool *rx_desc_pool; 1020 uint32_t cookie = 0; 1021 void *link_desc_va; 1022 struct hal_rx_msdu_list msdu_list; /* MSDU's per MPDU */ 1023 uint16_t num_msdus; 1024 1025 /* Debug -- Remove later */ 1026 qdf_assert(soc && hal_ring); 1027 1028 hal_soc = soc->hal_soc; 1029 1030 /* Debug -- Remove later */ 1031 qdf_assert(hal_soc); 1032 1033 if (qdf_unlikely(hal_srng_access_start(hal_soc, hal_ring))) { 1034 1035 /* TODO */ 1036 /* 1037 * Need API to convert from hal_ring pointer to 1038 * Ring Type / Ring Id combo 1039 */ 1040 DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1); 1041 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1042 FL("HAL RING Access Failed -- %pK"), hal_ring); 1043 goto done; 1044 } 1045 1046 while (qdf_likely(quota-- && (ring_desc = 1047 hal_srng_dst_get_next(hal_soc, hal_ring)))) { 1048 1049 DP_STATS_INC(soc, rx.err_ring_pkts, 1); 1050 1051 error = HAL_RX_ERROR_STATUS_GET(ring_desc); 1052 1053 qdf_assert(error == HAL_REO_ERROR_DETECTED); 1054 1055 buf_type = HAL_RX_REO_BUF_TYPE_GET(ring_desc); 1056 /* 1057 * For REO error ring, expect only MSDU LINK DESC 1058 */ 1059 qdf_assert_always(buf_type == HAL_RX_REO_MSDU_LINK_DESC_TYPE); 1060 1061 cookie = HAL_RX_REO_BUF_COOKIE_GET(ring_desc); 1062 /* 1063 * check for the magic number in the sw cookie 1064 */ 1065 qdf_assert_always((cookie >> LINK_DESC_ID_SHIFT) & 1066 LINK_DESC_ID_START); 1067 1068 /* 1069 * Check if the buffer is to be processed on this processor 1070 */ 1071 rbm = hal_rx_ret_buf_manager_get(ring_desc); 1072 1073 hal_rx_reo_buf_paddr_get(ring_desc, &hbi); 1074 link_desc_va = dp_rx_cookie_2_link_desc_va(soc, &hbi); 1075 hal_rx_msdu_list_get(soc->hal_soc, link_desc_va, &msdu_list, 1076 &num_msdus); 1077 1078 if (qdf_unlikely((msdu_list.rbm[0] != DP_WBM2SW_RBM) && 1079 (msdu_list.rbm[0] != 1080 HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST))) { 1081 /* TODO */ 1082 /* Call appropriate handler */ 1083 DP_STATS_INC(soc, rx.err.invalid_rbm, 1); 1084 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1085 FL("Invalid RBM %d"), msdu_list.rbm[0]); 1086 1087 /* Return link descriptor through WBM ring (SW2WBM)*/ 1088 dp_rx_link_desc_return(soc, ring_desc, 1089 HAL_BM_ACTION_RELEASE_MSDU_LIST); 1090 continue; 1091 } 1092 1093 /* Get the MPDU DESC info */ 1094 hal_rx_mpdu_desc_info_get(ring_desc, &mpdu_desc_info); 1095 1096 if (mpdu_desc_info.mpdu_flags & HAL_MPDU_F_FRAGMENT) { 1097 /* TODO */ 1098 count = dp_rx_frag_handle(soc, 1099 ring_desc, &mpdu_desc_info, 1100 &mac_id, quota); 1101 1102 rx_bufs_reaped[mac_id] += count; 1103 DP_STATS_INC(soc, rx.rx_frags, 1); 1104 continue; 1105 } 1106 1107 if (hal_rx_reo_is_pn_error(ring_desc)) { 1108 /* TOD0 */ 1109 DP_STATS_INC(soc, 1110 rx.err. 1111 reo_error[HAL_REO_ERR_PN_CHECK_FAILED], 1112 1); 1113 count = dp_rx_pn_error_handle(soc, 1114 ring_desc, 1115 &mpdu_desc_info, &mac_id, 1116 quota); 1117 1118 rx_bufs_reaped[mac_id] += count; 1119 continue; 1120 } 1121 1122 if (hal_rx_reo_is_2k_jump(ring_desc)) { 1123 /* TOD0 */ 1124 DP_STATS_INC(soc, 1125 rx.err. 1126 reo_error[HAL_REO_ERR_REGULAR_FRAME_2K_JUMP], 1127 1); 1128 1129 count = dp_rx_2k_jump_handle(soc, 1130 ring_desc, &mpdu_desc_info, 1131 &mac_id, quota); 1132 1133 rx_bufs_reaped[mac_id] += count; 1134 continue; 1135 } 1136 } 1137 1138 done: 1139 hal_srng_access_end(hal_soc, hal_ring); 1140 1141 if (soc->rx.flags.defrag_timeout_check) 1142 dp_rx_defrag_waitlist_flush(soc); 1143 1144 for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) { 1145 if (rx_bufs_reaped[mac_id]) { 1146 dp_pdev = soc->pdev_list[mac_id]; 1147 dp_rxdma_srng = &dp_pdev->rx_refill_buf_ring; 1148 rx_desc_pool = &soc->rx_desc_buf[mac_id]; 1149 1150 dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng, 1151 rx_desc_pool, 1152 rx_bufs_reaped[mac_id], 1153 &dp_pdev->free_list_head, 1154 &dp_pdev->free_list_tail); 1155 rx_bufs_used += rx_bufs_reaped[mac_id]; 1156 } 1157 } 1158 1159 return rx_bufs_used; /* Assume no scale factor for now */ 1160 } 1161 1162 /** 1163 * dp_rx_wbm_err_process() - Processes error frames routed to WBM release ring 1164 * 1165 * @soc: core txrx main context 1166 * @hal_ring: opaque pointer to the HAL Rx Error Ring, which will be serviced 1167 * @quota: No. of units (packets) that can be serviced in one shot. 1168 * 1169 * This function implements error processing and top level demultiplexer 1170 * for all the frames routed to WBM2HOST sw release ring. 1171 * 1172 * Return: uint32_t: No. of elements processed 1173 */ 1174 uint32_t 1175 dp_rx_wbm_err_process(struct dp_soc *soc, void *hal_ring, uint32_t quota) 1176 { 1177 void *hal_soc; 1178 void *ring_desc; 1179 struct dp_rx_desc *rx_desc; 1180 union dp_rx_desc_list_elem_t *head[MAX_PDEV_CNT] = { NULL }; 1181 union dp_rx_desc_list_elem_t *tail[MAX_PDEV_CNT] = { NULL }; 1182 uint32_t rx_bufs_used = 0; 1183 uint32_t rx_bufs_reaped[MAX_PDEV_CNT] = { 0 }; 1184 uint8_t buf_type, rbm; 1185 uint32_t rx_buf_cookie; 1186 uint8_t mac_id; 1187 struct dp_pdev *dp_pdev; 1188 struct dp_srng *dp_rxdma_srng; 1189 struct rx_desc_pool *rx_desc_pool; 1190 uint8_t *rx_tlv_hdr; 1191 qdf_nbuf_t nbuf_head = NULL; 1192 qdf_nbuf_t nbuf_tail = NULL; 1193 qdf_nbuf_t nbuf, next; 1194 struct hal_wbm_err_desc_info wbm_err_info = { 0 }; 1195 uint8_t pool_id; 1196 uint8_t tid = 0; 1197 1198 /* Debug -- Remove later */ 1199 qdf_assert(soc && hal_ring); 1200 1201 hal_soc = soc->hal_soc; 1202 1203 /* Debug -- Remove later */ 1204 qdf_assert(hal_soc); 1205 1206 if (qdf_unlikely(hal_srng_access_start(hal_soc, hal_ring))) { 1207 1208 /* TODO */ 1209 /* 1210 * Need API to convert from hal_ring pointer to 1211 * Ring Type / Ring Id combo 1212 */ 1213 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1214 FL("HAL RING Access Failed -- %pK"), hal_ring); 1215 goto done; 1216 } 1217 1218 while (qdf_likely(quota-- && (ring_desc = 1219 hal_srng_dst_get_next(hal_soc, hal_ring)))) { 1220 1221 /* XXX */ 1222 buf_type = HAL_RX_WBM_BUF_TYPE_GET(ring_desc); 1223 1224 /* 1225 * For WBM ring, expect only MSDU buffers 1226 */ 1227 qdf_assert_always(buf_type == HAL_RX_WBM_BUF_TYPE_REL_BUF); 1228 1229 qdf_assert((HAL_RX_WBM_ERR_SRC_GET(ring_desc) 1230 == HAL_RX_WBM_ERR_SRC_RXDMA) || 1231 (HAL_RX_WBM_ERR_SRC_GET(ring_desc) 1232 == HAL_RX_WBM_ERR_SRC_REO)); 1233 1234 /* 1235 * Check if the buffer is to be processed on this processor 1236 */ 1237 rbm = hal_rx_ret_buf_manager_get(ring_desc); 1238 1239 if (qdf_unlikely(rbm != HAL_RX_BUF_RBM_SW3_BM)) { 1240 /* TODO */ 1241 /* Call appropriate handler */ 1242 DP_STATS_INC(soc, rx.err.invalid_rbm, 1); 1243 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1244 FL("Invalid RBM %d"), rbm); 1245 continue; 1246 } 1247 1248 rx_buf_cookie = HAL_RX_WBM_BUF_COOKIE_GET(ring_desc); 1249 1250 rx_desc = dp_rx_cookie_2_va_rxdma_buf(soc, rx_buf_cookie); 1251 qdf_assert_always(rx_desc); 1252 1253 if (!dp_rx_desc_check_magic(rx_desc)) { 1254 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1255 FL("Invalid rx_desc cookie=%d"), 1256 rx_buf_cookie); 1257 continue; 1258 } 1259 1260 /* 1261 * this is a unlikely scenario where the host is reaping 1262 * a descriptor which it already reaped just a while ago 1263 * but is yet to replenish it back to HW. 1264 * In this case host will dump the last 128 descriptors 1265 * including the software descriptor rx_desc and assert. 1266 */ 1267 if (qdf_unlikely(!rx_desc->in_use)) { 1268 DP_STATS_INC(soc, rx.err.hal_wbm_rel_dup, 1); 1269 dp_rx_dump_info_and_assert(soc, hal_ring, 1270 ring_desc, rx_desc); 1271 } 1272 1273 nbuf = rx_desc->nbuf; 1274 qdf_nbuf_unmap_single(soc->osdev, nbuf, QDF_DMA_BIDIRECTIONAL); 1275 1276 /* 1277 * save the wbm desc info in nbuf TLV. We will need this 1278 * info when we do the actual nbuf processing 1279 */ 1280 hal_rx_wbm_err_info_get(ring_desc, &wbm_err_info, hal_soc); 1281 wbm_err_info.pool_id = rx_desc->pool_id; 1282 hal_rx_wbm_err_info_set_in_tlv(qdf_nbuf_data(nbuf), 1283 &wbm_err_info); 1284 1285 rx_bufs_reaped[rx_desc->pool_id]++; 1286 1287 DP_RX_LIST_APPEND(nbuf_head, nbuf_tail, rx_desc->nbuf); 1288 dp_rx_add_to_free_desc_list(&head[rx_desc->pool_id], 1289 &tail[rx_desc->pool_id], 1290 rx_desc); 1291 } 1292 done: 1293 hal_srng_access_end(hal_soc, hal_ring); 1294 1295 for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) { 1296 if (rx_bufs_reaped[mac_id]) { 1297 dp_pdev = soc->pdev_list[mac_id]; 1298 dp_rxdma_srng = &dp_pdev->rx_refill_buf_ring; 1299 rx_desc_pool = &soc->rx_desc_buf[mac_id]; 1300 1301 dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng, 1302 rx_desc_pool, rx_bufs_reaped[mac_id], 1303 &head[mac_id], &tail[mac_id]); 1304 rx_bufs_used += rx_bufs_reaped[mac_id]; 1305 } 1306 } 1307 1308 nbuf = nbuf_head; 1309 while (nbuf) { 1310 struct dp_peer *peer; 1311 uint16_t peer_id; 1312 1313 rx_tlv_hdr = qdf_nbuf_data(nbuf); 1314 1315 peer_id = hal_rx_mpdu_start_sw_peer_id_get(rx_tlv_hdr); 1316 peer = dp_peer_find_by_id(soc, peer_id); 1317 1318 /* 1319 * retrieve the wbm desc info from nbuf TLV, so we can 1320 * handle error cases appropriately 1321 */ 1322 hal_rx_wbm_err_info_get_from_tlv(rx_tlv_hdr, &wbm_err_info); 1323 1324 /* Set queue_mapping in nbuf to 0 */ 1325 dp_set_rx_queue(nbuf, 0); 1326 1327 next = nbuf->next; 1328 if (wbm_err_info.wbm_err_src == HAL_RX_WBM_ERR_SRC_REO) { 1329 if (wbm_err_info.reo_psh_rsn 1330 == HAL_RX_WBM_REO_PSH_RSN_ERROR) { 1331 1332 DP_STATS_INC(soc, 1333 rx.err.reo_error 1334 [wbm_err_info.reo_err_code], 1); 1335 1336 switch (wbm_err_info.reo_err_code) { 1337 /* 1338 * Handling for packets which have NULL REO 1339 * queue descriptor 1340 */ 1341 case HAL_REO_ERR_QUEUE_DESC_ADDR_0: 1342 pool_id = wbm_err_info.pool_id; 1343 QDF_TRACE_DEBUG_RL(QDF_MODULE_ID_DP, 1344 "Got pkt with REO ERROR: %d", 1345 wbm_err_info.reo_err_code); 1346 dp_rx_null_q_desc_handle(soc, nbuf, 1347 rx_tlv_hdr, 1348 pool_id, peer); 1349 nbuf = next; 1350 if (peer) 1351 dp_peer_unref_del_find_by_id( 1352 peer); 1353 continue; 1354 /* TODO */ 1355 /* Add per error code accounting */ 1356 case HAL_REO_ERR_REGULAR_FRAME_2K_JUMP: 1357 pool_id = wbm_err_info.pool_id; 1358 QDF_TRACE(QDF_MODULE_ID_DP, 1359 QDF_TRACE_LEVEL_ERROR, 1360 "Got pkt with REO ERROR: %d", 1361 wbm_err_info.reo_err_code); 1362 if (hal_rx_msdu_end_first_msdu_get(rx_tlv_hdr)) { 1363 peer_id = 1364 hal_rx_mpdu_start_sw_peer_id_get(rx_tlv_hdr); 1365 tid = 1366 hal_rx_mpdu_start_tid_get(hal_soc, rx_tlv_hdr); 1367 } 1368 dp_2k_jump_handle(soc, nbuf, rx_tlv_hdr, 1369 peer_id, tid); 1370 nbuf = next; 1371 if (peer) 1372 dp_peer_unref_del_find_by_id( 1373 peer); 1374 continue; 1375 default: 1376 QDF_TRACE(QDF_MODULE_ID_DP, 1377 QDF_TRACE_LEVEL_ERROR, 1378 "REO error %d detected", 1379 wbm_err_info.reo_err_code); 1380 } 1381 } 1382 } else if (wbm_err_info.wbm_err_src == 1383 HAL_RX_WBM_ERR_SRC_RXDMA) { 1384 if (wbm_err_info.rxdma_psh_rsn 1385 == HAL_RX_WBM_RXDMA_PSH_RSN_ERROR) { 1386 DP_STATS_INC(soc, 1387 rx.err.rxdma_error 1388 [wbm_err_info.rxdma_err_code], 1); 1389 1390 switch (wbm_err_info.rxdma_err_code) { 1391 case HAL_RXDMA_ERR_UNENCRYPTED: 1392 dp_rx_process_err_unencrypted( 1393 soc, nbuf, 1394 rx_tlv_hdr, peer); 1395 nbuf = next; 1396 if (peer) 1397 dp_peer_unref_del_find_by_id( 1398 peer); 1399 continue; 1400 1401 case HAL_RXDMA_ERR_TKIP_MIC: 1402 dp_rx_process_mic_error(soc, nbuf, 1403 rx_tlv_hdr, 1404 peer); 1405 nbuf = next; 1406 if (peer) { 1407 DP_STATS_INC(peer, rx.err.mic_err, 1); 1408 dp_peer_unref_del_find_by_id( 1409 peer); 1410 } 1411 continue; 1412 1413 case HAL_RXDMA_ERR_DECRYPT: 1414 if (peer) 1415 DP_STATS_INC(peer, rx.err.decrypt_err, 1); 1416 QDF_TRACE(QDF_MODULE_ID_DP, 1417 QDF_TRACE_LEVEL_DEBUG, 1418 "Packet received with Decrypt error"); 1419 break; 1420 1421 default: 1422 QDF_TRACE(QDF_MODULE_ID_DP, 1423 QDF_TRACE_LEVEL_DEBUG, 1424 "RXDMA error %d", 1425 wbm_err_info. 1426 rxdma_err_code); 1427 } 1428 } 1429 } else { 1430 /* Should not come here */ 1431 qdf_assert(0); 1432 } 1433 1434 if (peer) 1435 dp_peer_unref_del_find_by_id(peer); 1436 1437 hal_rx_dump_pkt_tlvs(hal_soc, rx_tlv_hdr, 1438 QDF_TRACE_LEVEL_DEBUG); 1439 qdf_nbuf_free(nbuf); 1440 nbuf = next; 1441 } 1442 return rx_bufs_used; /* Assume no scale factor for now */ 1443 } 1444 1445 /** 1446 * dp_rx_err_mpdu_pop() - extract the MSDU's from link descs 1447 * 1448 * @soc: core DP main context 1449 * @mac_id: mac id which is one of 3 mac_ids 1450 * @rxdma_dst_ring_desc: void pointer to monitor link descriptor buf addr info 1451 * @head: head of descs list to be freed 1452 * @tail: tail of decs list to be freed 1453 1454 * Return: number of msdu in MPDU to be popped 1455 */ 1456 static inline uint32_t 1457 dp_rx_err_mpdu_pop(struct dp_soc *soc, uint32_t mac_id, 1458 void *rxdma_dst_ring_desc, 1459 union dp_rx_desc_list_elem_t **head, 1460 union dp_rx_desc_list_elem_t **tail) 1461 { 1462 void *rx_msdu_link_desc; 1463 qdf_nbuf_t msdu; 1464 qdf_nbuf_t last; 1465 struct hal_rx_msdu_list msdu_list; 1466 uint16_t num_msdus; 1467 struct hal_buf_info buf_info; 1468 void *p_buf_addr_info; 1469 void *p_last_buf_addr_info; 1470 uint32_t rx_bufs_used = 0; 1471 uint32_t msdu_cnt; 1472 uint32_t i; 1473 uint8_t push_reason; 1474 uint8_t rxdma_error_code = 0; 1475 uint8_t bm_action = HAL_BM_ACTION_PUT_IN_IDLE_LIST; 1476 struct dp_pdev *pdev = dp_get_pdev_for_mac_id(soc, mac_id); 1477 1478 msdu = 0; 1479 1480 last = NULL; 1481 1482 hal_rx_reo_ent_buf_paddr_get(rxdma_dst_ring_desc, &buf_info, 1483 &p_last_buf_addr_info, &msdu_cnt); 1484 1485 push_reason = 1486 hal_rx_reo_ent_rxdma_push_reason_get(rxdma_dst_ring_desc); 1487 if (push_reason == HAL_RX_WBM_RXDMA_PSH_RSN_ERROR) { 1488 rxdma_error_code = 1489 hal_rx_reo_ent_rxdma_error_code_get(rxdma_dst_ring_desc); 1490 } 1491 1492 do { 1493 rx_msdu_link_desc = 1494 dp_rx_cookie_2_link_desc_va(soc, &buf_info); 1495 1496 qdf_assert(rx_msdu_link_desc); 1497 1498 hal_rx_msdu_list_get(soc->hal_soc, rx_msdu_link_desc, 1499 &msdu_list, &num_msdus); 1500 1501 if (msdu_list.sw_cookie[0] != HAL_RX_COOKIE_SPECIAL) { 1502 /* if the msdus belongs to NSS offloaded radio && 1503 * the rbm is not SW1_BM then return the msdu_link 1504 * descriptor without freeing the msdus (nbufs). let 1505 * these buffers be given to NSS completion ring for 1506 * NSS to free them. 1507 * else iterate through the msdu link desc list and 1508 * free each msdu in the list. 1509 */ 1510 if (msdu_list.rbm[0] != HAL_RX_BUF_RBM_SW3_BM && 1511 wlan_cfg_get_dp_pdev_nss_enabled( 1512 pdev->wlan_cfg_ctx)) 1513 bm_action = HAL_BM_ACTION_RELEASE_MSDU_LIST; 1514 else { 1515 for (i = 0; i < num_msdus; i++) { 1516 struct dp_rx_desc *rx_desc = 1517 dp_rx_cookie_2_va_rxdma_buf(soc, 1518 msdu_list.sw_cookie[i]); 1519 qdf_assert_always(rx_desc); 1520 msdu = rx_desc->nbuf; 1521 1522 qdf_nbuf_unmap_single(soc->osdev, msdu, 1523 QDF_DMA_FROM_DEVICE); 1524 1525 QDF_TRACE(QDF_MODULE_ID_DP, 1526 QDF_TRACE_LEVEL_DEBUG, 1527 "[%s][%d] msdu_nbuf=%pK ", 1528 __func__, __LINE__, msdu); 1529 1530 qdf_nbuf_free(msdu); 1531 rx_bufs_used++; 1532 dp_rx_add_to_free_desc_list(head, 1533 tail, rx_desc); 1534 } 1535 } 1536 } else { 1537 rxdma_error_code = HAL_RXDMA_ERR_WAR; 1538 } 1539 1540 hal_rx_mon_next_link_desc_get(rx_msdu_link_desc, &buf_info, 1541 &p_buf_addr_info); 1542 1543 dp_rx_link_desc_return(soc, p_last_buf_addr_info, bm_action); 1544 p_last_buf_addr_info = p_buf_addr_info; 1545 1546 } while (buf_info.paddr); 1547 1548 DP_STATS_INC(soc, rx.err.rxdma_error[rxdma_error_code], 1); 1549 1550 if (rxdma_error_code == HAL_RXDMA_ERR_DECRYPT) { 1551 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1552 "Packet received with Decrypt error"); 1553 } 1554 1555 return rx_bufs_used; 1556 } 1557 1558 /** 1559 * dp_rxdma_err_process() - RxDMA error processing functionality 1560 * 1561 * @soc: core txrx main contex 1562 * @mac_id: mac id which is one of 3 mac_ids 1563 * @hal_ring: opaque pointer to the HAL Rx Ring, which will be serviced 1564 * @quota: No. of units (packets) that can be serviced in one shot. 1565 1566 * Return: num of buffers processed 1567 */ 1568 uint32_t 1569 dp_rxdma_err_process(struct dp_soc *soc, uint32_t mac_id, uint32_t quota) 1570 { 1571 struct dp_pdev *pdev = dp_get_pdev_for_mac_id(soc, mac_id); 1572 int mac_for_pdev = dp_get_mac_id_for_mac(soc, mac_id); 1573 void *hal_soc; 1574 void *rxdma_dst_ring_desc; 1575 void *err_dst_srng; 1576 union dp_rx_desc_list_elem_t *head = NULL; 1577 union dp_rx_desc_list_elem_t *tail = NULL; 1578 struct dp_srng *dp_rxdma_srng; 1579 struct rx_desc_pool *rx_desc_pool; 1580 uint32_t work_done = 0; 1581 uint32_t rx_bufs_used = 0; 1582 1583 if (!pdev) 1584 return 0; 1585 1586 err_dst_srng = pdev->rxdma_err_dst_ring[mac_for_pdev].hal_srng; 1587 1588 if (!err_dst_srng) { 1589 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1590 "%s %d : HAL Monitor Destination Ring Init \ 1591 Failed -- %pK", 1592 __func__, __LINE__, err_dst_srng); 1593 return 0; 1594 } 1595 1596 hal_soc = soc->hal_soc; 1597 1598 qdf_assert(hal_soc); 1599 1600 if (qdf_unlikely(hal_srng_access_start(hal_soc, err_dst_srng))) { 1601 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1602 "%s %d : HAL Monitor Destination Ring Init \ 1603 Failed -- %pK", 1604 __func__, __LINE__, err_dst_srng); 1605 return 0; 1606 } 1607 1608 while (qdf_likely(quota-- && (rxdma_dst_ring_desc = 1609 hal_srng_dst_get_next(hal_soc, err_dst_srng)))) { 1610 1611 rx_bufs_used += dp_rx_err_mpdu_pop(soc, mac_id, 1612 rxdma_dst_ring_desc, 1613 &head, &tail); 1614 } 1615 1616 hal_srng_access_end(hal_soc, err_dst_srng); 1617 1618 if (rx_bufs_used) { 1619 dp_rxdma_srng = &pdev->rx_refill_buf_ring; 1620 rx_desc_pool = &soc->rx_desc_buf[mac_id]; 1621 1622 dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng, 1623 rx_desc_pool, rx_bufs_used, &head, &tail); 1624 1625 work_done += rx_bufs_used; 1626 } 1627 1628 return work_done; 1629 } 1630