1 /* 2 * Copyright (c) 2016-2019 The Linux Foundation. All rights reserved. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for 5 * any purpose with or without fee is hereby granted, provided that the 6 * above copyright notice and this permission notice appear in all 7 * copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 16 * PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 #include "hal_hw_headers.h" 20 #include "dp_types.h" 21 #include "dp_rx.h" 22 #include "dp_peer.h" 23 #include "dp_internal.h" 24 #include "hal_api.h" 25 #include "qdf_trace.h" 26 #include "qdf_nbuf.h" 27 #ifdef CONFIG_MCL 28 #include <cds_ieee80211_common.h> 29 #else 30 #include <linux/ieee80211.h> 31 #endif 32 #include "dp_rx_defrag.h" 33 #include <enet.h> /* LLC_SNAP_HDR_LEN */ 34 35 #ifdef RX_DESC_DEBUG_CHECK 36 static inline bool dp_rx_desc_check_magic(struct dp_rx_desc *rx_desc) 37 { 38 if (qdf_unlikely(rx_desc->magic != DP_RX_DESC_MAGIC)) { 39 return false; 40 } 41 rx_desc->magic = 0; 42 return true; 43 } 44 #else 45 static inline bool dp_rx_desc_check_magic(struct dp_rx_desc *rx_desc) 46 { 47 return true; 48 } 49 #endif 50 51 /** 52 * dp_rx_mcast_echo_check() - check if the mcast pkt is a loop 53 * back on same vap or a different vap. 54 * 55 * @soc: core DP main context 56 * @peer: dp peer handler 57 * @rx_tlv_hdr: start of the rx TLV header 58 * @nbuf: pkt buffer 59 * 60 * Return: bool (true if it is a looped back pkt else false) 61 * 62 */ 63 static inline bool dp_rx_mcast_echo_check(struct dp_soc *soc, 64 struct dp_peer *peer, 65 uint8_t *rx_tlv_hdr, 66 qdf_nbuf_t nbuf) 67 { 68 struct dp_vdev *vdev = peer->vdev; 69 struct dp_ast_entry *ase; 70 uint16_t sa_idx = 0; 71 uint8_t *data; 72 73 /* 74 * Multicast Echo Check is required only if vdev is STA and 75 * received pkt is a multicast/broadcast pkt. otherwise 76 * skip the MEC check. 77 */ 78 if (vdev->opmode != wlan_op_mode_sta) 79 return false; 80 81 if (!hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr)) 82 return false; 83 84 data = qdf_nbuf_data(nbuf); 85 /* 86 * if the received pkts src mac addr matches with vdev 87 * mac address then drop the pkt as it is looped back 88 */ 89 if (!(qdf_mem_cmp(&data[DP_MAC_ADDR_LEN], 90 vdev->mac_addr.raw, 91 DP_MAC_ADDR_LEN))) 92 return true; 93 94 /* 95 * In case of qwrap isolation mode, donot drop loopback packets. 96 * In isolation mode, all packets from the wired stations need to go 97 * to rootap and loop back to reach the wireless stations and 98 * vice-versa. 99 */ 100 if (qdf_unlikely(vdev->isolation_vdev)) 101 return false; 102 103 /* if the received pkts src mac addr matches with the 104 * wired PCs MAC addr which is behind the STA or with 105 * wireless STAs MAC addr which are behind the Repeater, 106 * then drop the pkt as it is looped back 107 */ 108 qdf_spin_lock_bh(&soc->ast_lock); 109 if (hal_rx_msdu_end_sa_is_valid_get(rx_tlv_hdr)) { 110 sa_idx = hal_rx_msdu_end_sa_idx_get(rx_tlv_hdr); 111 112 if ((sa_idx < 0) || 113 (sa_idx >= (WLAN_UMAC_PSOC_MAX_PEERS * 2))) { 114 qdf_spin_unlock_bh(&soc->ast_lock); 115 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 116 "invalid sa_idx: %d", sa_idx); 117 qdf_assert_always(0); 118 } 119 120 ase = soc->ast_table[sa_idx]; 121 if (!ase) { 122 /* We do not get a peer map event for STA and without 123 * this event we don't know what is STA's sa_idx. 124 * For this reason the AST is still not associated to 125 * any index postion in ast_table. 126 * In these kind of scenarios where sa is valid but 127 * ast is not in ast_table, we use the below API to get 128 * AST entry for STA's own mac_address. 129 */ 130 ase = dp_peer_ast_list_find(soc, peer, 131 &data[DP_MAC_ADDR_LEN]); 132 if (ase) { 133 ase->ast_idx = sa_idx; 134 soc->ast_table[sa_idx] = ase; 135 ase->is_mapped = TRUE; 136 } 137 } 138 } else 139 ase = dp_peer_ast_hash_find_by_pdevid(soc, 140 &data[DP_MAC_ADDR_LEN], 141 vdev->pdev->pdev_id); 142 143 if (ase) { 144 145 if (ase->pdev_id != vdev->pdev->pdev_id) { 146 qdf_spin_unlock_bh(&soc->ast_lock); 147 QDF_TRACE(QDF_MODULE_ID_DP, 148 QDF_TRACE_LEVEL_INFO, 149 "Detected DBDC Root AP %pM, %d %d", 150 &data[DP_MAC_ADDR_LEN], vdev->pdev->pdev_id, 151 ase->pdev_id); 152 return false; 153 } 154 155 if ((ase->type == CDP_TXRX_AST_TYPE_MEC) || 156 (ase->peer != peer)) { 157 qdf_spin_unlock_bh(&soc->ast_lock); 158 QDF_TRACE(QDF_MODULE_ID_DP, 159 QDF_TRACE_LEVEL_INFO, 160 "received pkt with same src mac %pM", 161 &data[DP_MAC_ADDR_LEN]); 162 163 return true; 164 } 165 } 166 qdf_spin_unlock_bh(&soc->ast_lock); 167 return false; 168 } 169 170 /** 171 * dp_rx_link_desc_return_by_addr - Return a MPDU link descriptor to 172 * (WBM) by address 173 * 174 * @soc: core DP main context 175 * @link_desc_addr: link descriptor addr 176 * 177 * Return: QDF_STATUS 178 */ 179 QDF_STATUS 180 dp_rx_link_desc_return_by_addr(struct dp_soc *soc, void *link_desc_addr, 181 uint8_t bm_action) 182 { 183 struct dp_srng *wbm_desc_rel_ring = &soc->wbm_desc_rel_ring; 184 void *wbm_rel_srng = wbm_desc_rel_ring->hal_srng; 185 void *hal_soc = soc->hal_soc; 186 QDF_STATUS status = QDF_STATUS_E_FAILURE; 187 void *src_srng_desc; 188 189 if (!wbm_rel_srng) { 190 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 191 "WBM RELEASE RING not initialized"); 192 return status; 193 } 194 195 if (qdf_unlikely(hal_srng_access_start(hal_soc, wbm_rel_srng))) { 196 197 /* TODO */ 198 /* 199 * Need API to convert from hal_ring pointer to 200 * Ring Type / Ring Id combo 201 */ 202 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 203 FL("HAL RING Access For WBM Release SRNG Failed - %pK"), 204 wbm_rel_srng); 205 DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1); 206 goto done; 207 } 208 src_srng_desc = hal_srng_src_get_next(hal_soc, wbm_rel_srng); 209 if (qdf_likely(src_srng_desc)) { 210 /* Return link descriptor through WBM ring (SW2WBM)*/ 211 hal_rx_msdu_link_desc_set(hal_soc, 212 src_srng_desc, link_desc_addr, bm_action); 213 status = QDF_STATUS_SUCCESS; 214 } else { 215 struct hal_srng *srng = (struct hal_srng *)wbm_rel_srng; 216 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 217 FL("WBM Release Ring (Id %d) Full"), srng->ring_id); 218 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 219 "HP 0x%x Reap HP 0x%x TP 0x%x Cached TP 0x%x", 220 *srng->u.src_ring.hp_addr, srng->u.src_ring.reap_hp, 221 *srng->u.src_ring.tp_addr, srng->u.src_ring.cached_tp); 222 } 223 done: 224 hal_srng_access_end(hal_soc, wbm_rel_srng); 225 return status; 226 227 } 228 229 /** 230 * dp_rx_link_desc_return() - Return a MPDU link descriptor to HW 231 * (WBM), following error handling 232 * 233 * @soc: core DP main context 234 * @ring_desc: opaque pointer to the REO error ring descriptor 235 * 236 * Return: QDF_STATUS 237 */ 238 QDF_STATUS 239 dp_rx_link_desc_return(struct dp_soc *soc, void *ring_desc, uint8_t bm_action) 240 { 241 void *buf_addr_info = HAL_RX_REO_BUF_ADDR_INFO_GET(ring_desc); 242 return dp_rx_link_desc_return_by_addr(soc, buf_addr_info, bm_action); 243 } 244 245 /** 246 * dp_rx_msdus_drop() - Drops all MSDU's per MPDU 247 * 248 * @soc: core txrx main context 249 * @ring_desc: opaque pointer to the REO error ring descriptor 250 * @mpdu_desc_info: MPDU descriptor information from ring descriptor 251 * @head: head of the local descriptor free-list 252 * @tail: tail of the local descriptor free-list 253 * @quota: No. of units (packets) that can be serviced in one shot. 254 * 255 * This function is used to drop all MSDU in an MPDU 256 * 257 * Return: uint32_t: No. of elements processed 258 */ 259 static uint32_t dp_rx_msdus_drop(struct dp_soc *soc, void *ring_desc, 260 struct hal_rx_mpdu_desc_info *mpdu_desc_info, 261 uint8_t *mac_id, 262 uint32_t quota) 263 { 264 uint32_t rx_bufs_used = 0; 265 void *link_desc_va; 266 struct hal_buf_info buf_info; 267 struct dp_pdev *pdev; 268 struct hal_rx_msdu_list msdu_list; /* MSDU's per MPDU */ 269 int i; 270 uint8_t *rx_tlv_hdr; 271 uint32_t tid; 272 273 hal_rx_reo_buf_paddr_get(ring_desc, &buf_info); 274 275 link_desc_va = dp_rx_cookie_2_link_desc_va(soc, &buf_info); 276 277 /* No UNMAP required -- this is "malloc_consistent" memory */ 278 hal_rx_msdu_list_get(soc->hal_soc, link_desc_va, &msdu_list, 279 &mpdu_desc_info->msdu_count); 280 281 for (i = 0; (i < mpdu_desc_info->msdu_count) && quota--; i++) { 282 struct dp_rx_desc *rx_desc = 283 dp_rx_cookie_2_va_rxdma_buf(soc, 284 msdu_list.sw_cookie[i]); 285 286 qdf_assert_always(rx_desc); 287 288 /* all buffers from a MSDU link link belong to same pdev */ 289 *mac_id = rx_desc->pool_id; 290 pdev = soc->pdev_list[rx_desc->pool_id]; 291 292 if (!dp_rx_desc_check_magic(rx_desc)) { 293 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 294 FL("Invalid rx_desc cookie=%d"), 295 msdu_list.sw_cookie[i]); 296 return rx_bufs_used; 297 } 298 299 rx_bufs_used++; 300 tid = hal_rx_mpdu_start_tid_get(soc->hal_soc, 301 rx_desc->rx_buf_start); 302 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 303 "Packet received with PN error for tid :%d", tid); 304 305 rx_tlv_hdr = qdf_nbuf_data(rx_desc->nbuf); 306 if (hal_rx_encryption_info_valid(rx_tlv_hdr)) 307 hal_rx_print_pn(rx_tlv_hdr); 308 309 /* Just free the buffers */ 310 qdf_nbuf_free(rx_desc->nbuf); 311 312 dp_rx_add_to_free_desc_list(&pdev->free_list_head, 313 &pdev->free_list_tail, rx_desc); 314 } 315 316 /* Return link descriptor through WBM ring (SW2WBM)*/ 317 dp_rx_link_desc_return(soc, ring_desc, HAL_BM_ACTION_PUT_IN_IDLE_LIST); 318 319 return rx_bufs_used; 320 } 321 322 /** 323 * dp_rx_pn_error_handle() - Handles PN check errors 324 * 325 * @soc: core txrx main context 326 * @ring_desc: opaque pointer to the REO error ring descriptor 327 * @mpdu_desc_info: MPDU descriptor information from ring descriptor 328 * @head: head of the local descriptor free-list 329 * @tail: tail of the local descriptor free-list 330 * @quota: No. of units (packets) that can be serviced in one shot. 331 * 332 * This function implements PN error handling 333 * If the peer is configured to ignore the PN check errors 334 * or if DP feels, that this frame is still OK, the frame can be 335 * re-injected back to REO to use some of the other features 336 * of REO e.g. duplicate detection/routing to other cores 337 * 338 * Return: uint32_t: No. of elements processed 339 */ 340 static uint32_t 341 dp_rx_pn_error_handle(struct dp_soc *soc, void *ring_desc, 342 struct hal_rx_mpdu_desc_info *mpdu_desc_info, 343 uint8_t *mac_id, 344 uint32_t quota) 345 { 346 uint16_t peer_id; 347 uint32_t rx_bufs_used = 0; 348 struct dp_peer *peer; 349 bool peer_pn_policy = false; 350 351 peer_id = DP_PEER_METADATA_PEER_ID_GET( 352 mpdu_desc_info->peer_meta_data); 353 354 355 peer = dp_peer_find_by_id(soc, peer_id); 356 357 if (qdf_likely(peer)) { 358 /* 359 * TODO: Check for peer specific policies & set peer_pn_policy 360 */ 361 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 362 "discard rx due to PN error for peer %pK " 363 "(%02x:%02x:%02x:%02x:%02x:%02x)", 364 peer, 365 peer->mac_addr.raw[0], peer->mac_addr.raw[1], 366 peer->mac_addr.raw[2], peer->mac_addr.raw[3], 367 peer->mac_addr.raw[4], peer->mac_addr.raw[5]); 368 369 dp_peer_unref_del_find_by_id(peer); 370 } 371 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 372 "Packet received with PN error"); 373 374 /* No peer PN policy -- definitely drop */ 375 if (!peer_pn_policy) 376 rx_bufs_used = dp_rx_msdus_drop(soc, ring_desc, 377 mpdu_desc_info, 378 mac_id, quota); 379 380 return rx_bufs_used; 381 } 382 383 /** 384 * dp_rx_2k_jump_handle() - Handles Sequence Number Jump by 2K 385 * 386 * @soc: core txrx main context 387 * @ring_desc: opaque pointer to the REO error ring descriptor 388 * @mpdu_desc_info: MPDU descriptor information from ring descriptor 389 * @head: head of the local descriptor free-list 390 * @tail: tail of the local descriptor free-list 391 * @quota: No. of units (packets) that can be serviced in one shot. 392 * 393 * This function implements the error handling when sequence number 394 * of the MPDU jumps suddenly by 2K.Today there are 2 cases that 395 * need to be handled: 396 * A) CSN (Current Sequence Number) = Last Valid SN (LSN) + 2K 397 * B) CSN = LSN + 2K, but falls within a "BA sized window" of the SSN 398 * For case A) the protocol stack is invoked to generate DELBA/DEAUTH frame 399 * For case B), the frame is normally dropped, no more action is taken 400 * 401 * Return: uint32_t: No. of elements processed 402 */ 403 static uint32_t 404 dp_rx_2k_jump_handle(struct dp_soc *soc, void *ring_desc, 405 struct hal_rx_mpdu_desc_info *mpdu_desc_info, 406 uint8_t *mac_id, uint32_t quota) 407 { 408 return dp_rx_msdus_drop(soc, ring_desc, mpdu_desc_info, 409 mac_id, quota); 410 } 411 412 #ifdef CONFIG_MCL 413 #define DP_PDEV_INVALID_PEER_MSDU_CHECK(head, tail) \ 414 do { \ 415 qdf_assert_always(!(head)); \ 416 qdf_assert_always(!(tail)); \ 417 } while (0) 418 #else 419 #define DP_PDEV_INVALID_PEER_MSDU_CHECK(head, tail) /* no op */ 420 #endif 421 422 /** 423 * dp_rx_chain_msdus() - Function to chain all msdus of a mpdu 424 * to pdev invalid peer list 425 * 426 * @soc: core DP main context 427 * @nbuf: Buffer pointer 428 * @rx_tlv_hdr: start of rx tlv header 429 * @mac_id: mac id 430 * 431 * Return: bool: true for last msdu of mpdu 432 */ 433 static bool 434 dp_rx_chain_msdus(struct dp_soc *soc, qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr, 435 uint8_t mac_id) 436 { 437 bool mpdu_done = false; 438 qdf_nbuf_t curr_nbuf = NULL; 439 qdf_nbuf_t tmp_nbuf = NULL; 440 441 /* TODO: Currently only single radio is supported, hence 442 * pdev hard coded to '0' index 443 */ 444 struct dp_pdev *dp_pdev = soc->pdev_list[mac_id]; 445 446 if (!dp_pdev->first_nbuf) { 447 qdf_nbuf_set_rx_chfrag_start(nbuf, 1); 448 dp_pdev->ppdu_id = HAL_RX_HW_DESC_GET_PPDUID_GET(rx_tlv_hdr); 449 dp_pdev->first_nbuf = true; 450 451 /* If the new nbuf received is the first msdu of the 452 * amsdu and there are msdus in the invalid peer msdu 453 * list, then let us free all the msdus of the invalid 454 * peer msdu list. 455 * This scenario can happen when we start receiving 456 * new a-msdu even before the previous a-msdu is completely 457 * received. 458 */ 459 curr_nbuf = dp_pdev->invalid_peer_head_msdu; 460 while (curr_nbuf) { 461 tmp_nbuf = curr_nbuf->next; 462 qdf_nbuf_free(curr_nbuf); 463 curr_nbuf = tmp_nbuf; 464 } 465 466 dp_pdev->invalid_peer_head_msdu = NULL; 467 dp_pdev->invalid_peer_tail_msdu = NULL; 468 hal_rx_mon_hw_desc_get_mpdu_status(soc->hal_soc, rx_tlv_hdr, 469 &(dp_pdev->ppdu_info.rx_status)); 470 471 } 472 473 if (dp_pdev->ppdu_id == hal_rx_attn_phy_ppdu_id_get(rx_tlv_hdr) && 474 hal_rx_attn_msdu_done_get(rx_tlv_hdr)) { 475 qdf_nbuf_set_rx_chfrag_end(nbuf, 1); 476 qdf_assert_always(dp_pdev->first_nbuf == true); 477 dp_pdev->first_nbuf = false; 478 mpdu_done = true; 479 } 480 481 /* 482 * For MCL, invalid_peer_head_msdu and invalid_peer_tail_msdu 483 * should be NULL here, add the checking for debugging purpose 484 * in case some corner case. 485 */ 486 DP_PDEV_INVALID_PEER_MSDU_CHECK(dp_pdev->invalid_peer_head_msdu, 487 dp_pdev->invalid_peer_tail_msdu); 488 DP_RX_LIST_APPEND(dp_pdev->invalid_peer_head_msdu, 489 dp_pdev->invalid_peer_tail_msdu, 490 nbuf); 491 492 return mpdu_done; 493 } 494 495 /** 496 * dp_2k_jump_handle() - Function to handle 2k jump exception 497 * on WBM ring 498 * 499 * @soc: core DP main context 500 * @nbuf: buffer pointer 501 * @rx_tlv_hdr: start of rx tlv header 502 * @peer_id: peer id of first msdu 503 * @tid: Tid for which exception occurred 504 * 505 * This function handles 2k jump violations arising out 506 * of receiving aggregates in non BA case. This typically 507 * may happen if aggregates are received on a QOS enabled TID 508 * while Rx window size is still initialized to value of 2. Or 509 * it may also happen if negotiated window size is 1 but peer 510 * sends aggregates. 511 * 512 */ 513 514 void 515 dp_2k_jump_handle(struct dp_soc *soc, 516 qdf_nbuf_t nbuf, 517 uint8_t *rx_tlv_hdr, 518 uint16_t peer_id, 519 uint8_t tid) 520 { 521 uint32_t ppdu_id; 522 struct dp_peer *peer = NULL; 523 struct dp_rx_tid *rx_tid = NULL; 524 525 peer = dp_peer_find_by_id(soc, peer_id); 526 if (!peer || peer->delete_in_progress) { 527 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 528 "peer not found"); 529 goto free_nbuf; 530 } 531 rx_tid = &peer->rx_tid[tid]; 532 if (qdf_unlikely(rx_tid == NULL)) { 533 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 534 "rx_tid is NULL!!"); 535 goto free_nbuf; 536 } 537 qdf_spin_lock_bh(&rx_tid->tid_lock); 538 ppdu_id = hal_rx_attn_phy_ppdu_id_get(rx_tlv_hdr); 539 if (rx_tid->ppdu_id_2k != ppdu_id) { 540 rx_tid->ppdu_id_2k = ppdu_id; 541 qdf_spin_unlock_bh(&rx_tid->tid_lock); 542 goto free_nbuf; 543 } 544 if (!rx_tid->delba_tx_status) { 545 rx_tid->delba_tx_retry++; 546 rx_tid->delba_tx_status = 1; 547 rx_tid->delba_rcode = 548 IEEE80211_REASON_QOS_SETUP_REQUIRED; 549 qdf_spin_unlock_bh(&rx_tid->tid_lock); 550 soc->cdp_soc.ol_ops->send_delba(peer->vdev->pdev->ctrl_pdev, 551 peer->ctrl_peer, 552 peer->mac_addr.raw, 553 tid, 554 peer->vdev->ctrl_vdev, 555 rx_tid->delba_rcode); 556 } else { 557 qdf_spin_unlock_bh(&rx_tid->tid_lock); 558 } 559 560 free_nbuf: 561 if (peer) 562 dp_peer_unref_del_find_by_id(peer); 563 qdf_nbuf_free(nbuf); 564 return; 565 } 566 567 /** 568 * dp_rx_null_q_desc_handle() - Function to handle NULL Queue 569 * descriptor violation on either a 570 * REO or WBM ring 571 * 572 * @soc: core DP main context 573 * @nbuf: buffer pointer 574 * @rx_tlv_hdr: start of rx tlv header 575 * @pool_id: mac id 576 * @peer: peer handle 577 * 578 * This function handles NULL queue descriptor violations arising out 579 * a missing REO queue for a given peer or a given TID. This typically 580 * may happen if a packet is received on a QOS enabled TID before the 581 * ADDBA negotiation for that TID, when the TID queue is setup. Or 582 * it may also happen for MC/BC frames if they are not routed to the 583 * non-QOS TID queue, in the absence of any other default TID queue. 584 * This error can show up both in a REO destination or WBM release ring. 585 * 586 */ 587 static void 588 dp_rx_null_q_desc_handle(struct dp_soc *soc, qdf_nbuf_t nbuf, 589 uint8_t *rx_tlv_hdr, uint8_t pool_id, 590 struct dp_peer *peer) 591 { 592 uint32_t pkt_len, l2_hdr_offset; 593 uint16_t msdu_len; 594 struct dp_vdev *vdev; 595 uint8_t tid; 596 struct ether_header *eh; 597 598 qdf_nbuf_set_rx_chfrag_start(nbuf, 599 hal_rx_msdu_end_first_msdu_get(rx_tlv_hdr)); 600 qdf_nbuf_set_rx_chfrag_end(nbuf, 601 hal_rx_msdu_end_last_msdu_get(rx_tlv_hdr)); 602 603 l2_hdr_offset = hal_rx_msdu_end_l3_hdr_padding_get(rx_tlv_hdr); 604 msdu_len = hal_rx_msdu_start_msdu_len_get(rx_tlv_hdr); 605 pkt_len = msdu_len + l2_hdr_offset + RX_PKT_TLVS_LEN; 606 607 QDF_TRACE_ERROR_RL(QDF_MODULE_ID_DP, 608 "Len %d Extn list %pK ", 609 (uint32_t)qdf_nbuf_len(nbuf), 610 qdf_nbuf_get_ext_list(nbuf)); 611 /* Set length in nbuf */ 612 if (!qdf_nbuf_get_ext_list(nbuf)) 613 qdf_nbuf_set_pktlen(nbuf, pkt_len); 614 615 /* 616 * Check if DMA completed -- msdu_done is the last bit 617 * to be written 618 */ 619 if (!hal_rx_attn_msdu_done_get(rx_tlv_hdr)) { 620 621 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 622 FL("MSDU DONE failure")); 623 624 hal_rx_dump_pkt_tlvs(soc->hal_soc, rx_tlv_hdr, 625 QDF_TRACE_LEVEL_INFO); 626 qdf_assert(0); 627 } 628 629 if (!peer) { 630 bool mpdu_done = false; 631 struct dp_pdev *pdev = soc->pdev_list[pool_id]; 632 633 QDF_TRACE_DEBUG_RL(QDF_MODULE_ID_DP, "peer is NULL"); 634 635 DP_STATS_INC_PKT(soc, 636 rx.err.rx_invalid_peer, 637 1, 638 qdf_nbuf_len(nbuf)); 639 640 mpdu_done = dp_rx_chain_msdus(soc, nbuf, rx_tlv_hdr, pool_id); 641 /* Trigger invalid peer handler wrapper */ 642 dp_rx_process_invalid_peer_wrapper(soc, nbuf, mpdu_done); 643 644 if (mpdu_done) { 645 pdev->invalid_peer_head_msdu = NULL; 646 pdev->invalid_peer_tail_msdu = NULL; 647 } 648 return; 649 } 650 651 vdev = peer->vdev; 652 if (!vdev) { 653 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 654 FL("INVALID vdev %pK OR osif_rx"), vdev); 655 /* Drop & free packet */ 656 qdf_nbuf_free(nbuf); 657 DP_STATS_INC(soc, rx.err.invalid_vdev, 1); 658 return; 659 } 660 661 /* 662 * Advance the packet start pointer by total size of 663 * pre-header TLV's 664 */ 665 if (qdf_nbuf_get_ext_list(nbuf)) 666 qdf_nbuf_pull_head(nbuf, RX_PKT_TLVS_LEN); 667 else 668 qdf_nbuf_pull_head(nbuf, (l2_hdr_offset + RX_PKT_TLVS_LEN)); 669 670 if (dp_rx_mcast_echo_check(soc, peer, rx_tlv_hdr, nbuf)) { 671 /* this is a looped back MCBC pkt, drop it */ 672 DP_STATS_INC_PKT(peer, rx.mec_drop, 1, qdf_nbuf_len(nbuf)); 673 qdf_nbuf_free(nbuf); 674 return; 675 } 676 /* 677 * In qwrap mode if the received packet matches with any of the vdev 678 * mac addresses, drop it. Donot receive multicast packets originated 679 * from any proxysta. 680 */ 681 if (check_qwrap_multicast_loopback(vdev, nbuf)) { 682 DP_STATS_INC_PKT(peer, rx.mec_drop, 1, qdf_nbuf_len(nbuf)); 683 qdf_nbuf_free(nbuf); 684 return; 685 } 686 687 688 if (qdf_unlikely((peer->nawds_enabled == true) && 689 hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr))) { 690 QDF_TRACE(QDF_MODULE_ID_DP, 691 QDF_TRACE_LEVEL_DEBUG, 692 "%s free buffer for multicast packet", 693 __func__); 694 DP_STATS_INC(peer, rx.nawds_mcast_drop, 1); 695 qdf_nbuf_free(nbuf); 696 return; 697 } 698 699 if (!dp_wds_rx_policy_check(rx_tlv_hdr, vdev, peer, 700 hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr))) { 701 QDF_TRACE(QDF_MODULE_ID_DP, 702 QDF_TRACE_LEVEL_ERROR, 703 FL("mcast Policy Check Drop pkt")); 704 /* Drop & free packet */ 705 qdf_nbuf_free(nbuf); 706 return; 707 } 708 709 /* WDS Source Port Learning */ 710 if (qdf_likely(vdev->rx_decap_type == htt_cmn_pkt_type_ethernet && 711 vdev->wds_enabled)) 712 dp_rx_wds_srcport_learn(soc, rx_tlv_hdr, peer, nbuf); 713 714 if (hal_rx_mpdu_start_mpdu_qos_control_valid_get(rx_tlv_hdr)) { 715 /* TODO: Assuming that qos_control_valid also indicates 716 * unicast. Should we check this? 717 */ 718 tid = hal_rx_mpdu_start_tid_get(soc->hal_soc, rx_tlv_hdr); 719 if (peer && !peer->rx_tid[tid].hw_qdesc_vaddr_unaligned) { 720 /* IEEE80211_SEQ_MAX indicates invalid start_seq */ 721 dp_rx_tid_setup_wifi3(peer, tid, 1, IEEE80211_SEQ_MAX); 722 } 723 } 724 725 #ifdef QCA_WIFI_NAPIER_EMULATION /* Debug code, remove later */ 726 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 727 "%s: mac_add:%pM msdu_len %d hdr_off %d", 728 __func__, peer->mac_addr.raw, msdu_len, 729 l2_hdr_offset); 730 731 print_hex_dump(KERN_ERR, "\t Pkt Data:", DUMP_PREFIX_NONE, 32, 4, 732 qdf_nbuf_data(nbuf), 128, false); 733 #endif /* NAPIER_EMULATION */ 734 735 if (qdf_unlikely(vdev->rx_decap_type == htt_cmn_pkt_type_raw)) { 736 qdf_nbuf_set_next(nbuf, NULL); 737 dp_rx_deliver_raw(vdev, nbuf, peer); 738 } else { 739 if (qdf_unlikely(peer->bss_peer)) { 740 QDF_TRACE(QDF_MODULE_ID_DP, 741 QDF_TRACE_LEVEL_INFO, 742 FL("received pkt with same src MAC")); 743 DP_STATS_INC_PKT(peer, rx.mec_drop, 1, 744 qdf_nbuf_len(nbuf)); 745 746 /* Drop & free packet */ 747 qdf_nbuf_free(nbuf); 748 return; 749 } 750 751 if (vdev->osif_rx) { 752 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, 753 FL("vdev %pK osif_rx %pK"), vdev, 754 vdev->osif_rx); 755 qdf_nbuf_set_next(nbuf, NULL); 756 DP_STATS_INC_PKT(peer, rx.to_stack, 1, 757 qdf_nbuf_len(nbuf)); 758 vdev->osif_rx(vdev->osif_vdev, nbuf); 759 if (qdf_unlikely(hal_rx_msdu_end_da_is_mcbc_get( 760 rx_tlv_hdr) && 761 (vdev->rx_decap_type == 762 htt_cmn_pkt_type_ethernet))) { 763 eh = (struct ether_header *)qdf_nbuf_data(nbuf); 764 765 DP_STATS_INC_PKT(peer, rx.multicast, 1, 766 qdf_nbuf_len(nbuf)); 767 if (IEEE80211_IS_BROADCAST(eh->ether_dhost)) { 768 DP_STATS_INC_PKT(peer, rx.bcast, 1, 769 qdf_nbuf_len(nbuf)); 770 } 771 } 772 } else { 773 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 774 FL("INVALID vdev %pK OR osif_rx"), vdev); 775 DP_STATS_INC(soc, rx.err.invalid_vdev, 1); 776 } 777 } 778 return; 779 } 780 781 /** 782 * dp_rx_process_err_unencrypted() - Function to deliver rxdma unencrypted_err 783 * frames to OS 784 * @soc: core DP main context 785 * @nbuf: buffer pointer 786 * @rx_tlv_hdr: start of rx tlv header 787 * @peer: peer reference 788 * 789 * Return: None 790 */ 791 static void 792 dp_rx_process_err_unencrypted(struct dp_soc *soc, qdf_nbuf_t nbuf, 793 uint8_t *rx_tlv_hdr, struct dp_peer *peer) 794 { 795 uint32_t pkt_len, l2_hdr_offset; 796 uint16_t msdu_len; 797 struct dp_vdev *vdev; 798 struct ether_header *eh; 799 bool isBroadcast; 800 801 /* 802 * Check if DMA completed -- msdu_done is the last bit 803 * to be written 804 */ 805 if (!hal_rx_attn_msdu_done_get(rx_tlv_hdr)) { 806 807 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 808 FL("MSDU DONE failure")); 809 810 hal_rx_dump_pkt_tlvs(soc->hal_soc, rx_tlv_hdr, 811 QDF_TRACE_LEVEL_INFO); 812 qdf_assert(0); 813 } 814 815 l2_hdr_offset = hal_rx_msdu_end_l3_hdr_padding_get(rx_tlv_hdr); 816 msdu_len = hal_rx_msdu_start_msdu_len_get(rx_tlv_hdr); 817 pkt_len = msdu_len + l2_hdr_offset + RX_PKT_TLVS_LEN; 818 819 /* Set length in nbuf */ 820 qdf_nbuf_set_pktlen(nbuf, pkt_len); 821 822 qdf_nbuf_set_next(nbuf, NULL); 823 824 qdf_nbuf_set_rx_chfrag_start(nbuf, 1); 825 qdf_nbuf_set_rx_chfrag_end(nbuf, 1); 826 827 if (!peer) { 828 QDF_TRACE_ERROR_RL(QDF_MODULE_ID_DP, "peer is NULL"); 829 DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1, 830 qdf_nbuf_len(nbuf)); 831 /* Trigger invalid peer handler wrapper */ 832 dp_rx_process_invalid_peer_wrapper(soc, nbuf, true); 833 return; 834 } 835 836 vdev = peer->vdev; 837 if (!vdev) { 838 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 839 FL("INVALID vdev %pK OR osif_rx"), vdev); 840 /* Drop & free packet */ 841 qdf_nbuf_free(nbuf); 842 DP_STATS_INC(soc, rx.err.invalid_vdev, 1); 843 return; 844 } 845 846 /* 847 * Advance the packet start pointer by total size of 848 * pre-header TLV's 849 */ 850 qdf_nbuf_pull_head(nbuf, l2_hdr_offset + RX_PKT_TLVS_LEN); 851 852 /* 853 * WAPI cert AP sends rekey frames as unencrypted. 854 * Thus RXDMA will report unencrypted frame error. 855 * To pass WAPI cert case, SW needs to pass unencrypted 856 * rekey frame to stack. 857 */ 858 if (qdf_nbuf_is_ipv4_wapi_pkt(nbuf)) { 859 qdf_nbuf_cb_update_peer_local_id(nbuf, peer->local_id); 860 861 if (qdf_likely(vdev->osif_rx)) { 862 DP_STATS_INC(peer, rx.to_stack.num, 1); 863 vdev->osif_rx(vdev->osif_vdev, nbuf); 864 } else { 865 qdf_nbuf_free(nbuf); 866 DP_STATS_INC(soc, rx.err.invalid_vdev, 1); 867 } 868 869 return; 870 } 871 872 /* Drop & free packet if mesh mode not enabled */ 873 if (!vdev->mesh_vdev) { 874 qdf_nbuf_free(nbuf); 875 DP_STATS_INC(soc, rx.err.invalid_vdev, 1); 876 return; 877 } 878 879 if (dp_rx_filter_mesh_packets(vdev, nbuf, rx_tlv_hdr) 880 == QDF_STATUS_SUCCESS) { 881 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_MED, 882 FL("mesh pkt filtered")); 883 DP_STATS_INC(vdev->pdev, dropped.mesh_filter, 1); 884 885 qdf_nbuf_free(nbuf); 886 return; 887 888 } 889 dp_rx_fill_mesh_stats(vdev, nbuf, rx_tlv_hdr, peer); 890 891 if (qdf_unlikely(hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr) && 892 (vdev->rx_decap_type == 893 htt_cmn_pkt_type_ethernet))) { 894 eh = (struct ether_header *)qdf_nbuf_data(nbuf); 895 isBroadcast = (IEEE80211_IS_BROADCAST 896 (eh->ether_dhost)) ? 1 : 0 ; 897 DP_STATS_INC_PKT(peer, rx.multicast, 1, qdf_nbuf_len(nbuf)); 898 if (isBroadcast) { 899 DP_STATS_INC_PKT(peer, rx.bcast, 1, 900 qdf_nbuf_len(nbuf)); 901 } 902 } 903 904 if (qdf_unlikely(vdev->rx_decap_type == htt_cmn_pkt_type_raw)) { 905 dp_rx_deliver_raw(vdev, nbuf, peer); 906 } else { 907 DP_STATS_INC(peer, rx.to_stack.num, 1); 908 vdev->osif_rx(vdev->osif_vdev, nbuf); 909 } 910 911 return; 912 } 913 914 /** 915 * dp_rx_process_mic_error(): Function to pass mic error indication to umac 916 * @soc: core DP main context 917 * @nbuf: buffer pointer 918 * @rx_tlv_hdr: start of rx tlv header 919 * @peer: peer handle 920 * 921 * return: void 922 */ 923 void dp_rx_process_mic_error(struct dp_soc *soc, qdf_nbuf_t nbuf, 924 uint8_t *rx_tlv_hdr, struct dp_peer *peer) 925 { 926 struct dp_vdev *vdev = NULL; 927 struct dp_pdev *pdev = NULL; 928 struct ol_if_ops *tops = NULL; 929 struct ieee80211_frame *wh; 930 uint8_t *rx_pkt_hdr; 931 uint16_t rx_seq, fragno; 932 unsigned int tid; 933 QDF_STATUS status; 934 935 if (!hal_rx_msdu_end_first_msdu_get(rx_tlv_hdr)) 936 return; 937 938 rx_pkt_hdr = hal_rx_pkt_hdr_get(qdf_nbuf_data(nbuf)); 939 wh = (struct ieee80211_frame *)rx_pkt_hdr; 940 941 if (!peer) { 942 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 943 "peer not found"); 944 goto fail; 945 } 946 947 vdev = peer->vdev; 948 if (!vdev) { 949 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 950 "VDEV not found"); 951 goto fail; 952 } 953 954 pdev = vdev->pdev; 955 if (!pdev) { 956 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 957 "PDEV not found"); 958 goto fail; 959 } 960 961 tid = hal_rx_mpdu_start_tid_get(soc->hal_soc, qdf_nbuf_data(nbuf)); 962 rx_seq = (((*(uint16_t *)wh->i_seq) & 963 IEEE80211_SEQ_SEQ_MASK) >> 964 IEEE80211_SEQ_SEQ_SHIFT); 965 966 fragno = dp_rx_frag_get_mpdu_frag_number(qdf_nbuf_data(nbuf)); 967 968 /* Can get only last fragment */ 969 if (fragno) { 970 status = dp_rx_defrag_add_last_frag(soc, peer, 971 tid, rx_seq, nbuf); 972 973 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 974 "%s: Frag pkt seq# %d frag# %d consumed status %d !", 975 __func__, rx_seq, fragno, status); 976 return; 977 } 978 979 tops = pdev->soc->cdp_soc.ol_ops; 980 if (tops->rx_mic_error) 981 tops->rx_mic_error(pdev->ctrl_pdev, vdev->vdev_id, wh); 982 983 fail: 984 qdf_nbuf_free(nbuf); 985 return; 986 } 987 988 /** 989 * dp_rx_err_process() - Processes error frames routed to REO error ring 990 * 991 * @soc: core txrx main context 992 * @hal_ring: opaque pointer to the HAL Rx Error Ring, which will be serviced 993 * @quota: No. of units (packets) that can be serviced in one shot. 994 * 995 * This function implements error processing and top level demultiplexer 996 * for all the frames routed to REO error ring. 997 * 998 * Return: uint32_t: No. of elements processed 999 */ 1000 uint32_t 1001 dp_rx_err_process(struct dp_soc *soc, void *hal_ring, uint32_t quota) 1002 { 1003 void *hal_soc; 1004 void *ring_desc; 1005 uint32_t count = 0; 1006 uint32_t rx_bufs_used = 0; 1007 uint32_t rx_bufs_reaped[MAX_PDEV_CNT] = { 0 }; 1008 uint8_t mac_id = 0; 1009 uint8_t buf_type; 1010 uint8_t error, rbm; 1011 struct hal_rx_mpdu_desc_info mpdu_desc_info; 1012 struct hal_buf_info hbi; 1013 struct dp_pdev *dp_pdev; 1014 struct dp_srng *dp_rxdma_srng; 1015 struct rx_desc_pool *rx_desc_pool; 1016 uint32_t cookie = 0; 1017 void *link_desc_va; 1018 struct hal_rx_msdu_list msdu_list; /* MSDU's per MPDU */ 1019 uint16_t num_msdus; 1020 1021 /* Debug -- Remove later */ 1022 qdf_assert(soc && hal_ring); 1023 1024 hal_soc = soc->hal_soc; 1025 1026 /* Debug -- Remove later */ 1027 qdf_assert(hal_soc); 1028 1029 if (qdf_unlikely(hal_srng_access_start(hal_soc, hal_ring))) { 1030 1031 /* TODO */ 1032 /* 1033 * Need API to convert from hal_ring pointer to 1034 * Ring Type / Ring Id combo 1035 */ 1036 DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1); 1037 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1038 FL("HAL RING Access Failed -- %pK"), hal_ring); 1039 goto done; 1040 } 1041 1042 while (qdf_likely(quota-- && (ring_desc = 1043 hal_srng_dst_get_next(hal_soc, hal_ring)))) { 1044 1045 DP_STATS_INC(soc, rx.err_ring_pkts, 1); 1046 1047 error = HAL_RX_ERROR_STATUS_GET(ring_desc); 1048 1049 qdf_assert(error == HAL_REO_ERROR_DETECTED); 1050 1051 buf_type = HAL_RX_REO_BUF_TYPE_GET(ring_desc); 1052 /* 1053 * For REO error ring, expect only MSDU LINK DESC 1054 */ 1055 qdf_assert_always(buf_type == HAL_RX_REO_MSDU_LINK_DESC_TYPE); 1056 1057 cookie = HAL_RX_REO_BUF_COOKIE_GET(ring_desc); 1058 /* 1059 * check for the magic number in the sw cookie 1060 */ 1061 qdf_assert_always((cookie >> LINK_DESC_ID_SHIFT) & 1062 LINK_DESC_ID_START); 1063 1064 /* 1065 * Check if the buffer is to be processed on this processor 1066 */ 1067 rbm = hal_rx_ret_buf_manager_get(ring_desc); 1068 1069 hal_rx_reo_buf_paddr_get(ring_desc, &hbi); 1070 link_desc_va = dp_rx_cookie_2_link_desc_va(soc, &hbi); 1071 hal_rx_msdu_list_get(soc->hal_soc, link_desc_va, &msdu_list, 1072 &num_msdus); 1073 1074 if (qdf_unlikely((msdu_list.rbm[0] != DP_WBM2SW_RBM) && 1075 (msdu_list.rbm[0] != 1076 HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST))) { 1077 /* TODO */ 1078 /* Call appropriate handler */ 1079 DP_STATS_INC(soc, rx.err.invalid_rbm, 1); 1080 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1081 FL("Invalid RBM %d"), msdu_list.rbm[0]); 1082 1083 /* Return link descriptor through WBM ring (SW2WBM)*/ 1084 dp_rx_link_desc_return(soc, ring_desc, 1085 HAL_BM_ACTION_RELEASE_MSDU_LIST); 1086 continue; 1087 } 1088 1089 /* Get the MPDU DESC info */ 1090 hal_rx_mpdu_desc_info_get(ring_desc, &mpdu_desc_info); 1091 1092 if (mpdu_desc_info.mpdu_flags & HAL_MPDU_F_FRAGMENT) { 1093 /* TODO */ 1094 count = dp_rx_frag_handle(soc, 1095 ring_desc, &mpdu_desc_info, 1096 &mac_id, quota); 1097 1098 rx_bufs_reaped[mac_id] += count; 1099 DP_STATS_INC(soc, rx.rx_frags, 1); 1100 continue; 1101 } 1102 1103 if (hal_rx_reo_is_pn_error(ring_desc)) { 1104 /* TOD0 */ 1105 DP_STATS_INC(soc, 1106 rx.err. 1107 reo_error[HAL_REO_ERR_PN_CHECK_FAILED], 1108 1); 1109 count = dp_rx_pn_error_handle(soc, 1110 ring_desc, 1111 &mpdu_desc_info, &mac_id, 1112 quota); 1113 1114 rx_bufs_reaped[mac_id] += count; 1115 continue; 1116 } 1117 1118 if (hal_rx_reo_is_2k_jump(ring_desc)) { 1119 /* TOD0 */ 1120 DP_STATS_INC(soc, 1121 rx.err. 1122 reo_error[HAL_REO_ERR_REGULAR_FRAME_2K_JUMP], 1123 1); 1124 1125 count = dp_rx_2k_jump_handle(soc, 1126 ring_desc, &mpdu_desc_info, 1127 &mac_id, quota); 1128 1129 rx_bufs_reaped[mac_id] += count; 1130 continue; 1131 } 1132 } 1133 1134 done: 1135 hal_srng_access_end(hal_soc, hal_ring); 1136 1137 if (soc->rx.flags.defrag_timeout_check) 1138 dp_rx_defrag_waitlist_flush(soc); 1139 1140 for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) { 1141 if (rx_bufs_reaped[mac_id]) { 1142 dp_pdev = soc->pdev_list[mac_id]; 1143 dp_rxdma_srng = &dp_pdev->rx_refill_buf_ring; 1144 rx_desc_pool = &soc->rx_desc_buf[mac_id]; 1145 1146 dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng, 1147 rx_desc_pool, 1148 rx_bufs_reaped[mac_id], 1149 &dp_pdev->free_list_head, 1150 &dp_pdev->free_list_tail); 1151 rx_bufs_used += rx_bufs_reaped[mac_id]; 1152 } 1153 } 1154 1155 return rx_bufs_used; /* Assume no scale factor for now */ 1156 } 1157 1158 /** 1159 * dp_rx_wbm_err_process() - Processes error frames routed to WBM release ring 1160 * 1161 * @soc: core txrx main context 1162 * @hal_ring: opaque pointer to the HAL Rx Error Ring, which will be serviced 1163 * @quota: No. of units (packets) that can be serviced in one shot. 1164 * 1165 * This function implements error processing and top level demultiplexer 1166 * for all the frames routed to WBM2HOST sw release ring. 1167 * 1168 * Return: uint32_t: No. of elements processed 1169 */ 1170 uint32_t 1171 dp_rx_wbm_err_process(struct dp_soc *soc, void *hal_ring, uint32_t quota) 1172 { 1173 void *hal_soc; 1174 void *ring_desc; 1175 struct dp_rx_desc *rx_desc; 1176 union dp_rx_desc_list_elem_t *head[MAX_PDEV_CNT] = { NULL }; 1177 union dp_rx_desc_list_elem_t *tail[MAX_PDEV_CNT] = { NULL }; 1178 uint32_t rx_bufs_used = 0; 1179 uint32_t rx_bufs_reaped[MAX_PDEV_CNT] = { 0 }; 1180 uint8_t buf_type, rbm; 1181 uint32_t rx_buf_cookie; 1182 uint8_t mac_id; 1183 struct dp_pdev *dp_pdev; 1184 struct dp_srng *dp_rxdma_srng; 1185 struct rx_desc_pool *rx_desc_pool; 1186 uint8_t *rx_tlv_hdr; 1187 qdf_nbuf_t nbuf_head = NULL; 1188 qdf_nbuf_t nbuf_tail = NULL; 1189 qdf_nbuf_t nbuf, next; 1190 struct hal_wbm_err_desc_info wbm_err_info = { 0 }; 1191 uint8_t pool_id; 1192 uint8_t tid = 0; 1193 1194 /* Debug -- Remove later */ 1195 qdf_assert(soc && hal_ring); 1196 1197 hal_soc = soc->hal_soc; 1198 1199 /* Debug -- Remove later */ 1200 qdf_assert(hal_soc); 1201 1202 if (qdf_unlikely(hal_srng_access_start(hal_soc, hal_ring))) { 1203 1204 /* TODO */ 1205 /* 1206 * Need API to convert from hal_ring pointer to 1207 * Ring Type / Ring Id combo 1208 */ 1209 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1210 FL("HAL RING Access Failed -- %pK"), hal_ring); 1211 goto done; 1212 } 1213 1214 while (qdf_likely(quota-- && (ring_desc = 1215 hal_srng_dst_get_next(hal_soc, hal_ring)))) { 1216 1217 /* XXX */ 1218 buf_type = HAL_RX_WBM_BUF_TYPE_GET(ring_desc); 1219 1220 /* 1221 * For WBM ring, expect only MSDU buffers 1222 */ 1223 qdf_assert_always(buf_type == HAL_RX_WBM_BUF_TYPE_REL_BUF); 1224 1225 qdf_assert((HAL_RX_WBM_ERR_SRC_GET(ring_desc) 1226 == HAL_RX_WBM_ERR_SRC_RXDMA) || 1227 (HAL_RX_WBM_ERR_SRC_GET(ring_desc) 1228 == HAL_RX_WBM_ERR_SRC_REO)); 1229 1230 /* 1231 * Check if the buffer is to be processed on this processor 1232 */ 1233 rbm = hal_rx_ret_buf_manager_get(ring_desc); 1234 1235 if (qdf_unlikely(rbm != HAL_RX_BUF_RBM_SW3_BM)) { 1236 /* TODO */ 1237 /* Call appropriate handler */ 1238 DP_STATS_INC(soc, rx.err.invalid_rbm, 1); 1239 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1240 FL("Invalid RBM %d"), rbm); 1241 continue; 1242 } 1243 1244 rx_buf_cookie = HAL_RX_WBM_BUF_COOKIE_GET(ring_desc); 1245 1246 rx_desc = dp_rx_cookie_2_va_rxdma_buf(soc, rx_buf_cookie); 1247 qdf_assert_always(rx_desc); 1248 1249 if (!dp_rx_desc_check_magic(rx_desc)) { 1250 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1251 FL("Invalid rx_desc cookie=%d"), 1252 rx_buf_cookie); 1253 continue; 1254 } 1255 1256 /* 1257 * this is a unlikely scenario where the host is reaping 1258 * a descriptor which it already reaped just a while ago 1259 * but is yet to replenish it back to HW. 1260 * In this case host will dump the last 128 descriptors 1261 * including the software descriptor rx_desc and assert. 1262 */ 1263 if (qdf_unlikely(!rx_desc->in_use)) { 1264 DP_STATS_INC(soc, rx.err.hal_wbm_rel_dup, 1); 1265 dp_rx_dump_info_and_assert(soc, hal_ring, 1266 ring_desc, rx_desc); 1267 } 1268 1269 nbuf = rx_desc->nbuf; 1270 qdf_nbuf_unmap_single(soc->osdev, nbuf, QDF_DMA_BIDIRECTIONAL); 1271 1272 /* 1273 * save the wbm desc info in nbuf TLV. We will need this 1274 * info when we do the actual nbuf processing 1275 */ 1276 hal_rx_wbm_err_info_get(ring_desc, &wbm_err_info, hal_soc); 1277 wbm_err_info.pool_id = rx_desc->pool_id; 1278 hal_rx_wbm_err_info_set_in_tlv(qdf_nbuf_data(nbuf), 1279 &wbm_err_info); 1280 1281 rx_bufs_reaped[rx_desc->pool_id]++; 1282 1283 DP_RX_LIST_APPEND(nbuf_head, nbuf_tail, rx_desc->nbuf); 1284 dp_rx_add_to_free_desc_list(&head[rx_desc->pool_id], 1285 &tail[rx_desc->pool_id], 1286 rx_desc); 1287 } 1288 done: 1289 hal_srng_access_end(hal_soc, hal_ring); 1290 1291 for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) { 1292 if (rx_bufs_reaped[mac_id]) { 1293 dp_pdev = soc->pdev_list[mac_id]; 1294 dp_rxdma_srng = &dp_pdev->rx_refill_buf_ring; 1295 rx_desc_pool = &soc->rx_desc_buf[mac_id]; 1296 1297 dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng, 1298 rx_desc_pool, rx_bufs_reaped[mac_id], 1299 &head[mac_id], &tail[mac_id]); 1300 rx_bufs_used += rx_bufs_reaped[mac_id]; 1301 } 1302 } 1303 1304 nbuf = nbuf_head; 1305 while (nbuf) { 1306 struct dp_peer *peer; 1307 uint16_t peer_id; 1308 1309 rx_tlv_hdr = qdf_nbuf_data(nbuf); 1310 1311 peer_id = hal_rx_mpdu_start_sw_peer_id_get(rx_tlv_hdr); 1312 peer = dp_peer_find_by_id(soc, peer_id); 1313 1314 /* 1315 * retrieve the wbm desc info from nbuf TLV, so we can 1316 * handle error cases appropriately 1317 */ 1318 hal_rx_wbm_err_info_get_from_tlv(rx_tlv_hdr, &wbm_err_info); 1319 1320 /* Set queue_mapping in nbuf to 0 */ 1321 dp_set_rx_queue(nbuf, 0); 1322 1323 next = nbuf->next; 1324 if (wbm_err_info.wbm_err_src == HAL_RX_WBM_ERR_SRC_REO) { 1325 if (wbm_err_info.reo_psh_rsn 1326 == HAL_RX_WBM_REO_PSH_RSN_ERROR) { 1327 1328 DP_STATS_INC(soc, 1329 rx.err.reo_error 1330 [wbm_err_info.reo_err_code], 1); 1331 1332 switch (wbm_err_info.reo_err_code) { 1333 /* 1334 * Handling for packets which have NULL REO 1335 * queue descriptor 1336 */ 1337 case HAL_REO_ERR_QUEUE_DESC_ADDR_0: 1338 pool_id = wbm_err_info.pool_id; 1339 QDF_TRACE_DEBUG_RL(QDF_MODULE_ID_DP, 1340 "Got pkt with REO ERROR: %d", 1341 wbm_err_info.reo_err_code); 1342 dp_rx_null_q_desc_handle(soc, nbuf, 1343 rx_tlv_hdr, 1344 pool_id, peer); 1345 nbuf = next; 1346 if (peer) 1347 dp_peer_unref_del_find_by_id( 1348 peer); 1349 continue; 1350 /* TODO */ 1351 /* Add per error code accounting */ 1352 case HAL_REO_ERR_REGULAR_FRAME_2K_JUMP: 1353 pool_id = wbm_err_info.pool_id; 1354 QDF_TRACE(QDF_MODULE_ID_DP, 1355 QDF_TRACE_LEVEL_ERROR, 1356 "Got pkt with REO ERROR: %d", 1357 wbm_err_info.reo_err_code); 1358 if (hal_rx_msdu_end_first_msdu_get(rx_tlv_hdr)) { 1359 peer_id = 1360 hal_rx_mpdu_start_sw_peer_id_get(rx_tlv_hdr); 1361 tid = 1362 hal_rx_mpdu_start_tid_get(hal_soc, rx_tlv_hdr); 1363 } 1364 dp_2k_jump_handle(soc, nbuf, rx_tlv_hdr, 1365 peer_id, tid); 1366 nbuf = next; 1367 if (peer) 1368 dp_peer_unref_del_find_by_id( 1369 peer); 1370 continue; 1371 default: 1372 QDF_TRACE(QDF_MODULE_ID_DP, 1373 QDF_TRACE_LEVEL_ERROR, 1374 "REO error %d detected", 1375 wbm_err_info.reo_err_code); 1376 } 1377 } 1378 } else if (wbm_err_info.wbm_err_src == 1379 HAL_RX_WBM_ERR_SRC_RXDMA) { 1380 if (wbm_err_info.rxdma_psh_rsn 1381 == HAL_RX_WBM_RXDMA_PSH_RSN_ERROR) { 1382 DP_STATS_INC(soc, 1383 rx.err.rxdma_error 1384 [wbm_err_info.rxdma_err_code], 1); 1385 1386 switch (wbm_err_info.rxdma_err_code) { 1387 case HAL_RXDMA_ERR_UNENCRYPTED: 1388 dp_rx_process_err_unencrypted( 1389 soc, nbuf, 1390 rx_tlv_hdr, peer); 1391 nbuf = next; 1392 if (peer) 1393 dp_peer_unref_del_find_by_id( 1394 peer); 1395 continue; 1396 1397 case HAL_RXDMA_ERR_TKIP_MIC: 1398 dp_rx_process_mic_error(soc, nbuf, 1399 rx_tlv_hdr, 1400 peer); 1401 nbuf = next; 1402 if (peer) { 1403 DP_STATS_INC(peer, rx.err.mic_err, 1); 1404 dp_peer_unref_del_find_by_id( 1405 peer); 1406 } 1407 continue; 1408 1409 case HAL_RXDMA_ERR_DECRYPT: 1410 if (peer) 1411 DP_STATS_INC(peer, rx.err.decrypt_err, 1); 1412 QDF_TRACE(QDF_MODULE_ID_DP, 1413 QDF_TRACE_LEVEL_DEBUG, 1414 "Packet received with Decrypt error"); 1415 break; 1416 1417 default: 1418 QDF_TRACE(QDF_MODULE_ID_DP, 1419 QDF_TRACE_LEVEL_DEBUG, 1420 "RXDMA error %d", 1421 wbm_err_info. 1422 rxdma_err_code); 1423 } 1424 } 1425 } else { 1426 /* Should not come here */ 1427 qdf_assert(0); 1428 } 1429 1430 if (peer) 1431 dp_peer_unref_del_find_by_id(peer); 1432 1433 hal_rx_dump_pkt_tlvs(hal_soc, rx_tlv_hdr, 1434 QDF_TRACE_LEVEL_DEBUG); 1435 qdf_nbuf_free(nbuf); 1436 nbuf = next; 1437 } 1438 return rx_bufs_used; /* Assume no scale factor for now */ 1439 } 1440 1441 /** 1442 * dp_rx_err_mpdu_pop() - extract the MSDU's from link descs 1443 * 1444 * @soc: core DP main context 1445 * @mac_id: mac id which is one of 3 mac_ids 1446 * @rxdma_dst_ring_desc: void pointer to monitor link descriptor buf addr info 1447 * @head: head of descs list to be freed 1448 * @tail: tail of decs list to be freed 1449 1450 * Return: number of msdu in MPDU to be popped 1451 */ 1452 static inline uint32_t 1453 dp_rx_err_mpdu_pop(struct dp_soc *soc, uint32_t mac_id, 1454 void *rxdma_dst_ring_desc, 1455 union dp_rx_desc_list_elem_t **head, 1456 union dp_rx_desc_list_elem_t **tail) 1457 { 1458 void *rx_msdu_link_desc; 1459 qdf_nbuf_t msdu; 1460 qdf_nbuf_t last; 1461 struct hal_rx_msdu_list msdu_list; 1462 uint16_t num_msdus; 1463 struct hal_buf_info buf_info; 1464 void *p_buf_addr_info; 1465 void *p_last_buf_addr_info; 1466 uint32_t rx_bufs_used = 0; 1467 uint32_t msdu_cnt; 1468 uint32_t i; 1469 uint8_t push_reason; 1470 uint8_t rxdma_error_code = 0; 1471 uint8_t bm_action = HAL_BM_ACTION_PUT_IN_IDLE_LIST; 1472 struct dp_pdev *pdev = dp_get_pdev_for_mac_id(soc, mac_id); 1473 1474 msdu = 0; 1475 1476 last = NULL; 1477 1478 hal_rx_reo_ent_buf_paddr_get(rxdma_dst_ring_desc, &buf_info, 1479 &p_last_buf_addr_info, &msdu_cnt); 1480 1481 push_reason = 1482 hal_rx_reo_ent_rxdma_push_reason_get(rxdma_dst_ring_desc); 1483 if (push_reason == HAL_RX_WBM_RXDMA_PSH_RSN_ERROR) { 1484 rxdma_error_code = 1485 hal_rx_reo_ent_rxdma_error_code_get(rxdma_dst_ring_desc); 1486 } 1487 1488 do { 1489 rx_msdu_link_desc = 1490 dp_rx_cookie_2_link_desc_va(soc, &buf_info); 1491 1492 qdf_assert(rx_msdu_link_desc); 1493 1494 hal_rx_msdu_list_get(soc->hal_soc, rx_msdu_link_desc, 1495 &msdu_list, &num_msdus); 1496 1497 if (msdu_list.sw_cookie[0] != HAL_RX_COOKIE_SPECIAL) { 1498 /* if the msdus belongs to NSS offloaded radio && 1499 * the rbm is not SW1_BM then return the msdu_link 1500 * descriptor without freeing the msdus (nbufs). let 1501 * these buffers be given to NSS completion ring for 1502 * NSS to free them. 1503 * else iterate through the msdu link desc list and 1504 * free each msdu in the list. 1505 */ 1506 if (msdu_list.rbm[0] != HAL_RX_BUF_RBM_SW3_BM && 1507 wlan_cfg_get_dp_pdev_nss_enabled( 1508 pdev->wlan_cfg_ctx)) 1509 bm_action = HAL_BM_ACTION_RELEASE_MSDU_LIST; 1510 else { 1511 for (i = 0; i < num_msdus; i++) { 1512 struct dp_rx_desc *rx_desc = 1513 dp_rx_cookie_2_va_rxdma_buf(soc, 1514 msdu_list.sw_cookie[i]); 1515 qdf_assert_always(rx_desc); 1516 msdu = rx_desc->nbuf; 1517 1518 qdf_nbuf_unmap_single(soc->osdev, msdu, 1519 QDF_DMA_FROM_DEVICE); 1520 1521 QDF_TRACE(QDF_MODULE_ID_DP, 1522 QDF_TRACE_LEVEL_DEBUG, 1523 "[%s][%d] msdu_nbuf=%pK ", 1524 __func__, __LINE__, msdu); 1525 1526 qdf_nbuf_free(msdu); 1527 rx_bufs_used++; 1528 dp_rx_add_to_free_desc_list(head, 1529 tail, rx_desc); 1530 } 1531 } 1532 } else { 1533 rxdma_error_code = HAL_RXDMA_ERR_WAR; 1534 } 1535 1536 hal_rx_mon_next_link_desc_get(rx_msdu_link_desc, &buf_info, 1537 &p_buf_addr_info); 1538 1539 dp_rx_link_desc_return(soc, p_last_buf_addr_info, bm_action); 1540 p_last_buf_addr_info = p_buf_addr_info; 1541 1542 } while (buf_info.paddr); 1543 1544 DP_STATS_INC(soc, rx.err.rxdma_error[rxdma_error_code], 1); 1545 1546 if (rxdma_error_code == HAL_RXDMA_ERR_DECRYPT) { 1547 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1548 "Packet received with Decrypt error"); 1549 } 1550 1551 return rx_bufs_used; 1552 } 1553 1554 /** 1555 * dp_rxdma_err_process() - RxDMA error processing functionality 1556 * 1557 * @soc: core txrx main contex 1558 * @mac_id: mac id which is one of 3 mac_ids 1559 * @hal_ring: opaque pointer to the HAL Rx Ring, which will be serviced 1560 * @quota: No. of units (packets) that can be serviced in one shot. 1561 1562 * Return: num of buffers processed 1563 */ 1564 uint32_t 1565 dp_rxdma_err_process(struct dp_soc *soc, uint32_t mac_id, uint32_t quota) 1566 { 1567 struct dp_pdev *pdev = dp_get_pdev_for_mac_id(soc, mac_id); 1568 int mac_for_pdev = dp_get_mac_id_for_mac(soc, mac_id); 1569 void *hal_soc; 1570 void *rxdma_dst_ring_desc; 1571 void *err_dst_srng; 1572 union dp_rx_desc_list_elem_t *head = NULL; 1573 union dp_rx_desc_list_elem_t *tail = NULL; 1574 struct dp_srng *dp_rxdma_srng; 1575 struct rx_desc_pool *rx_desc_pool; 1576 uint32_t work_done = 0; 1577 uint32_t rx_bufs_used = 0; 1578 1579 if (!pdev) 1580 return 0; 1581 1582 err_dst_srng = pdev->rxdma_err_dst_ring[mac_for_pdev].hal_srng; 1583 1584 if (!err_dst_srng) { 1585 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1586 "%s %d : HAL Monitor Destination Ring Init \ 1587 Failed -- %pK", 1588 __func__, __LINE__, err_dst_srng); 1589 return 0; 1590 } 1591 1592 hal_soc = soc->hal_soc; 1593 1594 qdf_assert(hal_soc); 1595 1596 if (qdf_unlikely(hal_srng_access_start(hal_soc, err_dst_srng))) { 1597 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1598 "%s %d : HAL Monitor Destination Ring Init \ 1599 Failed -- %pK", 1600 __func__, __LINE__, err_dst_srng); 1601 return 0; 1602 } 1603 1604 while (qdf_likely(quota-- && (rxdma_dst_ring_desc = 1605 hal_srng_dst_get_next(hal_soc, err_dst_srng)))) { 1606 1607 rx_bufs_used += dp_rx_err_mpdu_pop(soc, mac_id, 1608 rxdma_dst_ring_desc, 1609 &head, &tail); 1610 } 1611 1612 hal_srng_access_end(hal_soc, err_dst_srng); 1613 1614 if (rx_bufs_used) { 1615 dp_rxdma_srng = &pdev->rx_refill_buf_ring; 1616 rx_desc_pool = &soc->rx_desc_buf[mac_id]; 1617 1618 dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng, 1619 rx_desc_pool, rx_bufs_used, &head, &tail); 1620 1621 work_done += rx_bufs_used; 1622 } 1623 1624 return work_done; 1625 } 1626