1 /* 2 * Copyright (c) 2016-2019 The Linux Foundation. All rights reserved. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for 5 * any purpose with or without fee is hereby granted, provided that the 6 * above copyright notice and this permission notice appear in all 7 * copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 16 * PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 #include "hal_hw_headers.h" 20 #include "dp_types.h" 21 #include "dp_rx.h" 22 #include "dp_peer.h" 23 #include "dp_internal.h" 24 #include "hal_api.h" 25 #include "qdf_trace.h" 26 #include "qdf_nbuf.h" 27 #ifdef CONFIG_MCL 28 #include <cds_ieee80211_common.h> 29 #else 30 #include <linux/ieee80211.h> 31 #endif 32 #include "dp_rx_defrag.h" 33 #include <enet.h> /* LLC_SNAP_HDR_LEN */ 34 35 #ifdef RX_DESC_DEBUG_CHECK 36 static inline bool dp_rx_desc_check_magic(struct dp_rx_desc *rx_desc) 37 { 38 if (qdf_unlikely(rx_desc->magic != DP_RX_DESC_MAGIC)) { 39 return false; 40 } 41 rx_desc->magic = 0; 42 return true; 43 } 44 #else 45 static inline bool dp_rx_desc_check_magic(struct dp_rx_desc *rx_desc) 46 { 47 return true; 48 } 49 #endif 50 51 /** 52 * dp_rx_mcast_echo_check() - check if the mcast pkt is a loop 53 * back on same vap or a different vap. 54 * 55 * @soc: core DP main context 56 * @peer: dp peer handler 57 * @rx_tlv_hdr: start of the rx TLV header 58 * @nbuf: pkt buffer 59 * 60 * Return: bool (true if it is a looped back pkt else false) 61 * 62 */ 63 static inline bool dp_rx_mcast_echo_check(struct dp_soc *soc, 64 struct dp_peer *peer, 65 uint8_t *rx_tlv_hdr, 66 qdf_nbuf_t nbuf) 67 { 68 struct dp_vdev *vdev = peer->vdev; 69 struct dp_ast_entry *ase; 70 uint16_t sa_idx = 0; 71 uint8_t *data; 72 73 /* 74 * Multicast Echo Check is required only if vdev is STA and 75 * received pkt is a multicast/broadcast pkt. otherwise 76 * skip the MEC check. 77 */ 78 if (vdev->opmode != wlan_op_mode_sta) 79 return false; 80 81 if (!hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr)) 82 return false; 83 84 data = qdf_nbuf_data(nbuf); 85 /* 86 * if the received pkts src mac addr matches with vdev 87 * mac address then drop the pkt as it is looped back 88 */ 89 if (!(qdf_mem_cmp(&data[DP_MAC_ADDR_LEN], 90 vdev->mac_addr.raw, 91 DP_MAC_ADDR_LEN))) 92 return true; 93 94 /* 95 * In case of qwrap isolation mode, donot drop loopback packets. 96 * In isolation mode, all packets from the wired stations need to go 97 * to rootap and loop back to reach the wireless stations and 98 * vice-versa. 99 */ 100 if (qdf_unlikely(vdev->isolation_vdev)) 101 return false; 102 103 /* if the received pkts src mac addr matches with the 104 * wired PCs MAC addr which is behind the STA or with 105 * wireless STAs MAC addr which are behind the Repeater, 106 * then drop the pkt as it is looped back 107 */ 108 qdf_spin_lock_bh(&soc->ast_lock); 109 if (hal_rx_msdu_end_sa_is_valid_get(rx_tlv_hdr)) { 110 sa_idx = hal_rx_msdu_end_sa_idx_get(rx_tlv_hdr); 111 112 if ((sa_idx < 0) || 113 (sa_idx >= wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx))) { 114 qdf_spin_unlock_bh(&soc->ast_lock); 115 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 116 "invalid sa_idx: %d", sa_idx); 117 qdf_assert_always(0); 118 } 119 120 ase = soc->ast_table[sa_idx]; 121 if (!ase) { 122 /* We do not get a peer map event for STA and without 123 * this event we don't know what is STA's sa_idx. 124 * For this reason the AST is still not associated to 125 * any index postion in ast_table. 126 * In these kind of scenarios where sa is valid but 127 * ast is not in ast_table, we use the below API to get 128 * AST entry for STA's own mac_address. 129 */ 130 ase = dp_peer_ast_list_find(soc, peer, 131 &data[DP_MAC_ADDR_LEN]); 132 if (ase) { 133 ase->ast_idx = sa_idx; 134 soc->ast_table[sa_idx] = ase; 135 ase->is_mapped = TRUE; 136 } 137 } 138 } else 139 ase = dp_peer_ast_hash_find_by_pdevid(soc, 140 &data[DP_MAC_ADDR_LEN], 141 vdev->pdev->pdev_id); 142 143 if (ase) { 144 145 if (ase->pdev_id != vdev->pdev->pdev_id) { 146 qdf_spin_unlock_bh(&soc->ast_lock); 147 QDF_TRACE(QDF_MODULE_ID_DP, 148 QDF_TRACE_LEVEL_INFO, 149 "Detected DBDC Root AP %pM, %d %d", 150 &data[DP_MAC_ADDR_LEN], vdev->pdev->pdev_id, 151 ase->pdev_id); 152 return false; 153 } 154 155 if ((ase->type == CDP_TXRX_AST_TYPE_MEC) || 156 (ase->peer != peer)) { 157 qdf_spin_unlock_bh(&soc->ast_lock); 158 QDF_TRACE(QDF_MODULE_ID_DP, 159 QDF_TRACE_LEVEL_INFO, 160 "received pkt with same src mac %pM", 161 &data[DP_MAC_ADDR_LEN]); 162 163 return true; 164 } 165 } 166 qdf_spin_unlock_bh(&soc->ast_lock); 167 return false; 168 } 169 170 /** 171 * dp_rx_link_desc_return_by_addr - Return a MPDU link descriptor to 172 * (WBM) by address 173 * 174 * @soc: core DP main context 175 * @link_desc_addr: link descriptor addr 176 * 177 * Return: QDF_STATUS 178 */ 179 QDF_STATUS 180 dp_rx_link_desc_return_by_addr(struct dp_soc *soc, void *link_desc_addr, 181 uint8_t bm_action) 182 { 183 struct dp_srng *wbm_desc_rel_ring = &soc->wbm_desc_rel_ring; 184 void *wbm_rel_srng = wbm_desc_rel_ring->hal_srng; 185 void *hal_soc = soc->hal_soc; 186 QDF_STATUS status = QDF_STATUS_E_FAILURE; 187 void *src_srng_desc; 188 189 if (!wbm_rel_srng) { 190 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 191 "WBM RELEASE RING not initialized"); 192 return status; 193 } 194 195 if (qdf_unlikely(hal_srng_access_start(hal_soc, wbm_rel_srng))) { 196 197 /* TODO */ 198 /* 199 * Need API to convert from hal_ring pointer to 200 * Ring Type / Ring Id combo 201 */ 202 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 203 FL("HAL RING Access For WBM Release SRNG Failed - %pK"), 204 wbm_rel_srng); 205 DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1); 206 goto done; 207 } 208 src_srng_desc = hal_srng_src_get_next(hal_soc, wbm_rel_srng); 209 if (qdf_likely(src_srng_desc)) { 210 /* Return link descriptor through WBM ring (SW2WBM)*/ 211 hal_rx_msdu_link_desc_set(hal_soc, 212 src_srng_desc, link_desc_addr, bm_action); 213 status = QDF_STATUS_SUCCESS; 214 } else { 215 struct hal_srng *srng = (struct hal_srng *)wbm_rel_srng; 216 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 217 FL("WBM Release Ring (Id %d) Full"), srng->ring_id); 218 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 219 "HP 0x%x Reap HP 0x%x TP 0x%x Cached TP 0x%x", 220 *srng->u.src_ring.hp_addr, srng->u.src_ring.reap_hp, 221 *srng->u.src_ring.tp_addr, srng->u.src_ring.cached_tp); 222 } 223 done: 224 hal_srng_access_end(hal_soc, wbm_rel_srng); 225 return status; 226 227 } 228 229 /** 230 * dp_rx_link_desc_return() - Return a MPDU link descriptor to HW 231 * (WBM), following error handling 232 * 233 * @soc: core DP main context 234 * @ring_desc: opaque pointer to the REO error ring descriptor 235 * 236 * Return: QDF_STATUS 237 */ 238 QDF_STATUS 239 dp_rx_link_desc_return(struct dp_soc *soc, void *ring_desc, uint8_t bm_action) 240 { 241 void *buf_addr_info = HAL_RX_REO_BUF_ADDR_INFO_GET(ring_desc); 242 return dp_rx_link_desc_return_by_addr(soc, buf_addr_info, bm_action); 243 } 244 245 /** 246 * dp_rx_msdus_drop() - Drops all MSDU's per MPDU 247 * 248 * @soc: core txrx main context 249 * @ring_desc: opaque pointer to the REO error ring descriptor 250 * @mpdu_desc_info: MPDU descriptor information from ring descriptor 251 * @head: head of the local descriptor free-list 252 * @tail: tail of the local descriptor free-list 253 * @quota: No. of units (packets) that can be serviced in one shot. 254 * 255 * This function is used to drop all MSDU in an MPDU 256 * 257 * Return: uint32_t: No. of elements processed 258 */ 259 static uint32_t dp_rx_msdus_drop(struct dp_soc *soc, void *ring_desc, 260 struct hal_rx_mpdu_desc_info *mpdu_desc_info, 261 uint8_t *mac_id, 262 uint32_t quota) 263 { 264 uint32_t rx_bufs_used = 0; 265 void *link_desc_va; 266 struct hal_buf_info buf_info; 267 struct dp_pdev *pdev; 268 struct hal_rx_msdu_list msdu_list; /* MSDU's per MPDU */ 269 int i; 270 uint8_t *rx_tlv_hdr; 271 uint32_t tid; 272 273 hal_rx_reo_buf_paddr_get(ring_desc, &buf_info); 274 275 link_desc_va = dp_rx_cookie_2_link_desc_va(soc, &buf_info); 276 277 /* No UNMAP required -- this is "malloc_consistent" memory */ 278 hal_rx_msdu_list_get(soc->hal_soc, link_desc_va, &msdu_list, 279 &mpdu_desc_info->msdu_count); 280 281 for (i = 0; (i < mpdu_desc_info->msdu_count) && quota--; i++) { 282 struct dp_rx_desc *rx_desc = 283 dp_rx_cookie_2_va_rxdma_buf(soc, 284 msdu_list.sw_cookie[i]); 285 286 qdf_assert_always(rx_desc); 287 288 /* all buffers from a MSDU link link belong to same pdev */ 289 *mac_id = rx_desc->pool_id; 290 pdev = soc->pdev_list[rx_desc->pool_id]; 291 292 if (!dp_rx_desc_check_magic(rx_desc)) { 293 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 294 FL("Invalid rx_desc cookie=%d"), 295 msdu_list.sw_cookie[i]); 296 return rx_bufs_used; 297 } 298 299 rx_bufs_used++; 300 tid = hal_rx_mpdu_start_tid_get(soc->hal_soc, 301 rx_desc->rx_buf_start); 302 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 303 "Packet received with PN error for tid :%d", tid); 304 305 rx_tlv_hdr = qdf_nbuf_data(rx_desc->nbuf); 306 if (hal_rx_encryption_info_valid(rx_tlv_hdr)) 307 hal_rx_print_pn(rx_tlv_hdr); 308 309 /* Just free the buffers */ 310 qdf_nbuf_free(rx_desc->nbuf); 311 312 dp_rx_add_to_free_desc_list(&pdev->free_list_head, 313 &pdev->free_list_tail, rx_desc); 314 } 315 316 /* Return link descriptor through WBM ring (SW2WBM)*/ 317 dp_rx_link_desc_return(soc, ring_desc, HAL_BM_ACTION_PUT_IN_IDLE_LIST); 318 319 return rx_bufs_used; 320 } 321 322 /** 323 * dp_rx_pn_error_handle() - Handles PN check errors 324 * 325 * @soc: core txrx main context 326 * @ring_desc: opaque pointer to the REO error ring descriptor 327 * @mpdu_desc_info: MPDU descriptor information from ring descriptor 328 * @head: head of the local descriptor free-list 329 * @tail: tail of the local descriptor free-list 330 * @quota: No. of units (packets) that can be serviced in one shot. 331 * 332 * This function implements PN error handling 333 * If the peer is configured to ignore the PN check errors 334 * or if DP feels, that this frame is still OK, the frame can be 335 * re-injected back to REO to use some of the other features 336 * of REO e.g. duplicate detection/routing to other cores 337 * 338 * Return: uint32_t: No. of elements processed 339 */ 340 static uint32_t 341 dp_rx_pn_error_handle(struct dp_soc *soc, void *ring_desc, 342 struct hal_rx_mpdu_desc_info *mpdu_desc_info, 343 uint8_t *mac_id, 344 uint32_t quota) 345 { 346 uint16_t peer_id; 347 uint32_t rx_bufs_used = 0; 348 struct dp_peer *peer; 349 bool peer_pn_policy = false; 350 351 peer_id = DP_PEER_METADATA_PEER_ID_GET( 352 mpdu_desc_info->peer_meta_data); 353 354 355 peer = dp_peer_find_by_id(soc, peer_id); 356 357 if (qdf_likely(peer)) { 358 /* 359 * TODO: Check for peer specific policies & set peer_pn_policy 360 */ 361 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 362 "discard rx due to PN error for peer %pK " 363 "(%02x:%02x:%02x:%02x:%02x:%02x)", 364 peer, 365 peer->mac_addr.raw[0], peer->mac_addr.raw[1], 366 peer->mac_addr.raw[2], peer->mac_addr.raw[3], 367 peer->mac_addr.raw[4], peer->mac_addr.raw[5]); 368 369 dp_peer_unref_del_find_by_id(peer); 370 } 371 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 372 "Packet received with PN error"); 373 374 /* No peer PN policy -- definitely drop */ 375 if (!peer_pn_policy) 376 rx_bufs_used = dp_rx_msdus_drop(soc, ring_desc, 377 mpdu_desc_info, 378 mac_id, quota); 379 380 return rx_bufs_used; 381 } 382 383 /** 384 * dp_rx_2k_jump_handle() - Handles Sequence Number Jump by 2K 385 * 386 * @soc: core txrx main context 387 * @ring_desc: opaque pointer to the REO error ring descriptor 388 * @mpdu_desc_info: MPDU descriptor information from ring descriptor 389 * @head: head of the local descriptor free-list 390 * @tail: tail of the local descriptor free-list 391 * @quota: No. of units (packets) that can be serviced in one shot. 392 * 393 * This function implements the error handling when sequence number 394 * of the MPDU jumps suddenly by 2K.Today there are 2 cases that 395 * need to be handled: 396 * A) CSN (Current Sequence Number) = Last Valid SN (LSN) + 2K 397 * B) CSN = LSN + 2K, but falls within a "BA sized window" of the SSN 398 * For case A) the protocol stack is invoked to generate DELBA/DEAUTH frame 399 * For case B), the frame is normally dropped, no more action is taken 400 * 401 * Return: uint32_t: No. of elements processed 402 */ 403 static uint32_t 404 dp_rx_2k_jump_handle(struct dp_soc *soc, void *ring_desc, 405 struct hal_rx_mpdu_desc_info *mpdu_desc_info, 406 uint8_t *mac_id, uint32_t quota) 407 { 408 return dp_rx_msdus_drop(soc, ring_desc, mpdu_desc_info, 409 mac_id, quota); 410 } 411 412 #ifdef CONFIG_MCL 413 #define DP_PDEV_INVALID_PEER_MSDU_CHECK(head, tail) \ 414 do { \ 415 qdf_assert_always(!(head)); \ 416 qdf_assert_always(!(tail)); \ 417 } while (0) 418 #else 419 #define DP_PDEV_INVALID_PEER_MSDU_CHECK(head, tail) /* no op */ 420 #endif 421 422 /** 423 * dp_rx_chain_msdus() - Function to chain all msdus of a mpdu 424 * to pdev invalid peer list 425 * 426 * @soc: core DP main context 427 * @nbuf: Buffer pointer 428 * @rx_tlv_hdr: start of rx tlv header 429 * @mac_id: mac id 430 * 431 * Return: bool: true for last msdu of mpdu 432 */ 433 static bool 434 dp_rx_chain_msdus(struct dp_soc *soc, qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr, 435 uint8_t mac_id) 436 { 437 bool mpdu_done = false; 438 qdf_nbuf_t curr_nbuf = NULL; 439 qdf_nbuf_t tmp_nbuf = NULL; 440 441 /* TODO: Currently only single radio is supported, hence 442 * pdev hard coded to '0' index 443 */ 444 struct dp_pdev *dp_pdev = soc->pdev_list[mac_id]; 445 446 if (!dp_pdev->first_nbuf) { 447 qdf_nbuf_set_rx_chfrag_start(nbuf, 1); 448 dp_pdev->ppdu_id = HAL_RX_HW_DESC_GET_PPDUID_GET(rx_tlv_hdr); 449 dp_pdev->first_nbuf = true; 450 451 /* If the new nbuf received is the first msdu of the 452 * amsdu and there are msdus in the invalid peer msdu 453 * list, then let us free all the msdus of the invalid 454 * peer msdu list. 455 * This scenario can happen when we start receiving 456 * new a-msdu even before the previous a-msdu is completely 457 * received. 458 */ 459 curr_nbuf = dp_pdev->invalid_peer_head_msdu; 460 while (curr_nbuf) { 461 tmp_nbuf = curr_nbuf->next; 462 qdf_nbuf_free(curr_nbuf); 463 curr_nbuf = tmp_nbuf; 464 } 465 466 dp_pdev->invalid_peer_head_msdu = NULL; 467 dp_pdev->invalid_peer_tail_msdu = NULL; 468 hal_rx_mon_hw_desc_get_mpdu_status(soc->hal_soc, rx_tlv_hdr, 469 &(dp_pdev->ppdu_info.rx_status)); 470 471 } 472 473 if (dp_pdev->ppdu_id == hal_rx_attn_phy_ppdu_id_get(rx_tlv_hdr) && 474 hal_rx_attn_msdu_done_get(rx_tlv_hdr)) { 475 qdf_nbuf_set_rx_chfrag_end(nbuf, 1); 476 qdf_assert_always(dp_pdev->first_nbuf == true); 477 dp_pdev->first_nbuf = false; 478 mpdu_done = true; 479 } 480 481 /* 482 * For MCL, invalid_peer_head_msdu and invalid_peer_tail_msdu 483 * should be NULL here, add the checking for debugging purpose 484 * in case some corner case. 485 */ 486 DP_PDEV_INVALID_PEER_MSDU_CHECK(dp_pdev->invalid_peer_head_msdu, 487 dp_pdev->invalid_peer_tail_msdu); 488 DP_RX_LIST_APPEND(dp_pdev->invalid_peer_head_msdu, 489 dp_pdev->invalid_peer_tail_msdu, 490 nbuf); 491 492 return mpdu_done; 493 } 494 495 /** 496 * dp_2k_jump_handle() - Function to handle 2k jump exception 497 * on WBM ring 498 * 499 * @soc: core DP main context 500 * @nbuf: buffer pointer 501 * @rx_tlv_hdr: start of rx tlv header 502 * @peer_id: peer id of first msdu 503 * @tid: Tid for which exception occurred 504 * 505 * This function handles 2k jump violations arising out 506 * of receiving aggregates in non BA case. This typically 507 * may happen if aggregates are received on a QOS enabled TID 508 * while Rx window size is still initialized to value of 2. Or 509 * it may also happen if negotiated window size is 1 but peer 510 * sends aggregates. 511 * 512 */ 513 514 void 515 dp_2k_jump_handle(struct dp_soc *soc, 516 qdf_nbuf_t nbuf, 517 uint8_t *rx_tlv_hdr, 518 uint16_t peer_id, 519 uint8_t tid) 520 { 521 uint32_t ppdu_id; 522 struct dp_peer *peer = NULL; 523 struct dp_rx_tid *rx_tid = NULL; 524 525 peer = dp_peer_find_by_id(soc, peer_id); 526 if (!peer || peer->delete_in_progress) { 527 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 528 "peer not found"); 529 goto free_nbuf; 530 } 531 rx_tid = &peer->rx_tid[tid]; 532 if (qdf_unlikely(rx_tid == NULL)) { 533 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 534 "rx_tid is NULL!!"); 535 goto free_nbuf; 536 } 537 qdf_spin_lock_bh(&rx_tid->tid_lock); 538 ppdu_id = hal_rx_attn_phy_ppdu_id_get(rx_tlv_hdr); 539 if (rx_tid->ppdu_id_2k != ppdu_id) { 540 rx_tid->ppdu_id_2k = ppdu_id; 541 qdf_spin_unlock_bh(&rx_tid->tid_lock); 542 goto free_nbuf; 543 } 544 if (!rx_tid->delba_tx_status) { 545 rx_tid->delba_tx_retry++; 546 rx_tid->delba_tx_status = 1; 547 rx_tid->delba_rcode = 548 IEEE80211_REASON_QOS_SETUP_REQUIRED; 549 qdf_spin_unlock_bh(&rx_tid->tid_lock); 550 soc->cdp_soc.ol_ops->send_delba(peer->vdev->pdev->ctrl_pdev, 551 peer->ctrl_peer, 552 peer->mac_addr.raw, 553 tid, 554 peer->vdev->ctrl_vdev, 555 rx_tid->delba_rcode); 556 } else { 557 qdf_spin_unlock_bh(&rx_tid->tid_lock); 558 } 559 560 free_nbuf: 561 if (peer) 562 dp_peer_unref_del_find_by_id(peer); 563 qdf_nbuf_free(nbuf); 564 return; 565 } 566 567 #ifdef QCA_WIFI_QCA6390 568 /** 569 * dp_rx_null_q_handle_invalid_peer_id_exception() - to find exception 570 * @soc: pointer to dp_soc struct 571 * @pool_id: Pool id to find dp_pdev 572 * @rx_tlv_hdr: TLV header of received packet 573 * @nbuf: SKB 574 * 575 * In certain types of packets if peer_id is not correct then 576 * driver may not be able find. Try finding peer by addr_2 of 577 * received MPDU. If you find the peer then most likely sw_peer_id & 578 * ast_idx is corrupted. 579 * 580 * Return: True if you find the peer by addr_2 of received MPDU else false 581 */ 582 static bool 583 dp_rx_null_q_handle_invalid_peer_id_exception(struct dp_soc *soc, 584 uint8_t pool_id, 585 uint8_t *rx_tlv_hdr, 586 qdf_nbuf_t nbuf) 587 { 588 uint8_t local_id; 589 struct dp_peer *peer = NULL; 590 uint8_t *rx_pkt_hdr = hal_rx_pkt_hdr_get(rx_tlv_hdr); 591 struct dp_pdev *pdev = soc->pdev_list[pool_id]; 592 struct ieee80211_frame *wh = (struct ieee80211_frame *)rx_pkt_hdr; 593 594 /* 595 * WAR- In certain types of packets if peer_id is not correct then 596 * driver may not be able find. Try finding peer by addr_2 of 597 * received MPDU 598 */ 599 if (wh) 600 peer = dp_find_peer_by_addr((struct cdp_pdev *)pdev, 601 wh->i_addr2, &local_id); 602 if (peer) { 603 dp_err("MPDU sw_peer_id & ast_idx is corrupted"); 604 hal_rx_dump_pkt_tlvs(soc->hal_soc, rx_tlv_hdr, 605 QDF_TRACE_LEVEL_INFO); 606 DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer_id, 607 1, qdf_nbuf_len(nbuf)); 608 qdf_nbuf_free(nbuf); 609 610 return true; 611 } 612 return false; 613 } 614 #else 615 static inline bool 616 dp_rx_null_q_handle_invalid_peer_id_exception(struct dp_soc *soc, 617 uint8_t pool_id, 618 uint8_t *rx_tlv_hdr, 619 qdf_nbuf_t nbuf) 620 { 621 return false; 622 } 623 #endif 624 625 /** 626 * dp_rx_null_q_desc_handle() - Function to handle NULL Queue 627 * descriptor violation on either a 628 * REO or WBM ring 629 * 630 * @soc: core DP main context 631 * @nbuf: buffer pointer 632 * @rx_tlv_hdr: start of rx tlv header 633 * @pool_id: mac id 634 * @peer: peer handle 635 * 636 * This function handles NULL queue descriptor violations arising out 637 * a missing REO queue for a given peer or a given TID. This typically 638 * may happen if a packet is received on a QOS enabled TID before the 639 * ADDBA negotiation for that TID, when the TID queue is setup. Or 640 * it may also happen for MC/BC frames if they are not routed to the 641 * non-QOS TID queue, in the absence of any other default TID queue. 642 * This error can show up both in a REO destination or WBM release ring. 643 * 644 */ 645 static void 646 dp_rx_null_q_desc_handle(struct dp_soc *soc, qdf_nbuf_t nbuf, 647 uint8_t *rx_tlv_hdr, uint8_t pool_id, 648 struct dp_peer *peer) 649 { 650 uint32_t pkt_len, l2_hdr_offset; 651 uint16_t msdu_len; 652 struct dp_vdev *vdev; 653 uint8_t tid; 654 qdf_ether_header_t *eh; 655 656 qdf_nbuf_set_rx_chfrag_start(nbuf, 657 hal_rx_msdu_end_first_msdu_get(rx_tlv_hdr)); 658 qdf_nbuf_set_rx_chfrag_end(nbuf, 659 hal_rx_msdu_end_last_msdu_get(rx_tlv_hdr)); 660 661 l2_hdr_offset = hal_rx_msdu_end_l3_hdr_padding_get(rx_tlv_hdr); 662 msdu_len = hal_rx_msdu_start_msdu_len_get(rx_tlv_hdr); 663 pkt_len = msdu_len + l2_hdr_offset + RX_PKT_TLVS_LEN; 664 665 /* Set length in nbuf */ 666 if (!qdf_nbuf_get_ext_list(nbuf)) 667 qdf_nbuf_set_pktlen(nbuf, pkt_len); 668 669 QDF_TRACE_ERROR_RL(QDF_MODULE_ID_DP, 670 "Len %d Extn list %pK ", 671 (uint32_t)qdf_nbuf_len(nbuf), 672 qdf_nbuf_get_ext_list(nbuf)); 673 /* 674 * Check if DMA completed -- msdu_done is the last bit 675 * to be written 676 */ 677 if (!hal_rx_attn_msdu_done_get(rx_tlv_hdr)) { 678 679 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 680 FL("MSDU DONE failure")); 681 682 hal_rx_dump_pkt_tlvs(soc->hal_soc, rx_tlv_hdr, 683 QDF_TRACE_LEVEL_INFO); 684 qdf_assert(0); 685 } 686 687 if (!peer && 688 dp_rx_null_q_handle_invalid_peer_id_exception(soc, pool_id, 689 rx_tlv_hdr, nbuf)) 690 return; 691 692 if (!peer) { 693 bool mpdu_done = false; 694 struct dp_pdev *pdev = soc->pdev_list[pool_id]; 695 696 QDF_TRACE_DEBUG_RL(QDF_MODULE_ID_DP, "peer is NULL"); 697 698 DP_STATS_INC_PKT(soc, 699 rx.err.rx_invalid_peer, 700 1, 701 qdf_nbuf_len(nbuf)); 702 703 mpdu_done = dp_rx_chain_msdus(soc, nbuf, rx_tlv_hdr, pool_id); 704 /* Trigger invalid peer handler wrapper */ 705 dp_rx_process_invalid_peer_wrapper(soc, nbuf, mpdu_done); 706 707 if (mpdu_done) { 708 pdev->invalid_peer_head_msdu = NULL; 709 pdev->invalid_peer_tail_msdu = NULL; 710 } 711 return; 712 } 713 714 vdev = peer->vdev; 715 if (!vdev) { 716 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 717 FL("INVALID vdev %pK OR osif_rx"), vdev); 718 /* Drop & free packet */ 719 qdf_nbuf_free(nbuf); 720 DP_STATS_INC(soc, rx.err.invalid_vdev, 1); 721 return; 722 } 723 724 /* 725 * Advance the packet start pointer by total size of 726 * pre-header TLV's 727 */ 728 if (qdf_nbuf_get_ext_list(nbuf)) 729 qdf_nbuf_pull_head(nbuf, RX_PKT_TLVS_LEN); 730 else 731 qdf_nbuf_pull_head(nbuf, (l2_hdr_offset + RX_PKT_TLVS_LEN)); 732 733 if (dp_rx_mcast_echo_check(soc, peer, rx_tlv_hdr, nbuf)) { 734 /* this is a looped back MCBC pkt, drop it */ 735 DP_STATS_INC_PKT(peer, rx.mec_drop, 1, qdf_nbuf_len(nbuf)); 736 qdf_nbuf_free(nbuf); 737 return; 738 } 739 /* 740 * In qwrap mode if the received packet matches with any of the vdev 741 * mac addresses, drop it. Donot receive multicast packets originated 742 * from any proxysta. 743 */ 744 if (check_qwrap_multicast_loopback(vdev, nbuf)) { 745 DP_STATS_INC_PKT(peer, rx.mec_drop, 1, qdf_nbuf_len(nbuf)); 746 qdf_nbuf_free(nbuf); 747 return; 748 } 749 750 751 if (qdf_unlikely((peer->nawds_enabled == true) && 752 hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr))) { 753 QDF_TRACE(QDF_MODULE_ID_DP, 754 QDF_TRACE_LEVEL_DEBUG, 755 "%s free buffer for multicast packet", 756 __func__); 757 DP_STATS_INC(peer, rx.nawds_mcast_drop, 1); 758 qdf_nbuf_free(nbuf); 759 return; 760 } 761 762 if (!dp_wds_rx_policy_check(rx_tlv_hdr, vdev, peer, 763 hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr))) { 764 QDF_TRACE(QDF_MODULE_ID_DP, 765 QDF_TRACE_LEVEL_ERROR, 766 FL("mcast Policy Check Drop pkt")); 767 /* Drop & free packet */ 768 qdf_nbuf_free(nbuf); 769 return; 770 } 771 772 /* WDS Source Port Learning */ 773 if (qdf_likely(vdev->rx_decap_type == htt_cmn_pkt_type_ethernet && 774 vdev->wds_enabled)) 775 dp_rx_wds_srcport_learn(soc, rx_tlv_hdr, peer, nbuf); 776 777 if (hal_rx_mpdu_start_mpdu_qos_control_valid_get(rx_tlv_hdr)) { 778 /* TODO: Assuming that qos_control_valid also indicates 779 * unicast. Should we check this? 780 */ 781 tid = hal_rx_mpdu_start_tid_get(soc->hal_soc, rx_tlv_hdr); 782 if (peer && !peer->rx_tid[tid].hw_qdesc_vaddr_unaligned) { 783 /* IEEE80211_SEQ_MAX indicates invalid start_seq */ 784 dp_rx_tid_setup_wifi3(peer, tid, 1, IEEE80211_SEQ_MAX); 785 } 786 } 787 788 #ifdef QCA_WIFI_NAPIER_EMULATION /* Debug code, remove later */ 789 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 790 "%s: mac_add:%pM msdu_len %d hdr_off %d", 791 __func__, peer->mac_addr.raw, msdu_len, 792 l2_hdr_offset); 793 794 print_hex_dump(KERN_ERR, "\t Pkt Data:", DUMP_PREFIX_NONE, 32, 4, 795 qdf_nbuf_data(nbuf), 128, false); 796 #endif /* NAPIER_EMULATION */ 797 798 if (qdf_unlikely(vdev->rx_decap_type == htt_cmn_pkt_type_raw)) { 799 qdf_nbuf_set_next(nbuf, NULL); 800 dp_rx_deliver_raw(vdev, nbuf, peer); 801 } else { 802 if (qdf_unlikely(peer->bss_peer)) { 803 QDF_TRACE(QDF_MODULE_ID_DP, 804 QDF_TRACE_LEVEL_INFO, 805 FL("received pkt with same src MAC")); 806 DP_STATS_INC_PKT(peer, rx.mec_drop, 1, 807 qdf_nbuf_len(nbuf)); 808 809 /* Drop & free packet */ 810 qdf_nbuf_free(nbuf); 811 return; 812 } 813 814 if (vdev->osif_rx) { 815 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, 816 FL("vdev %pK osif_rx %pK"), vdev, 817 vdev->osif_rx); 818 qdf_nbuf_set_next(nbuf, NULL); 819 DP_STATS_INC_PKT(peer, rx.to_stack, 1, 820 qdf_nbuf_len(nbuf)); 821 vdev->osif_rx(vdev->osif_vdev, nbuf); 822 if (qdf_unlikely(hal_rx_msdu_end_da_is_mcbc_get( 823 rx_tlv_hdr) && 824 (vdev->rx_decap_type == 825 htt_cmn_pkt_type_ethernet))) { 826 eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf); 827 828 DP_STATS_INC_PKT(peer, rx.multicast, 1, 829 qdf_nbuf_len(nbuf)); 830 if (QDF_IS_ADDR_BROADCAST(eh->ether_dhost)) { 831 DP_STATS_INC_PKT(peer, rx.bcast, 1, 832 qdf_nbuf_len(nbuf)); 833 } 834 } 835 } else { 836 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 837 FL("INVALID vdev %pK OR osif_rx"), vdev); 838 DP_STATS_INC(soc, rx.err.invalid_vdev, 1); 839 } 840 } 841 return; 842 } 843 844 /** 845 * dp_rx_process_err_unencrypted() - Function to deliver rxdma unencrypted_err 846 * frames to OS 847 * @soc: core DP main context 848 * @nbuf: buffer pointer 849 * @rx_tlv_hdr: start of rx tlv header 850 * @peer: peer reference 851 * 852 * Return: None 853 */ 854 static void 855 dp_rx_process_err_unencrypted(struct dp_soc *soc, qdf_nbuf_t nbuf, 856 uint8_t *rx_tlv_hdr, struct dp_peer *peer) 857 { 858 uint32_t pkt_len, l2_hdr_offset; 859 uint16_t msdu_len; 860 struct dp_vdev *vdev; 861 qdf_ether_header_t *eh; 862 bool is_broadcast; 863 864 /* 865 * Check if DMA completed -- msdu_done is the last bit 866 * to be written 867 */ 868 if (!hal_rx_attn_msdu_done_get(rx_tlv_hdr)) { 869 870 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 871 FL("MSDU DONE failure")); 872 873 hal_rx_dump_pkt_tlvs(soc->hal_soc, rx_tlv_hdr, 874 QDF_TRACE_LEVEL_INFO); 875 qdf_assert(0); 876 } 877 878 l2_hdr_offset = hal_rx_msdu_end_l3_hdr_padding_get(rx_tlv_hdr); 879 msdu_len = hal_rx_msdu_start_msdu_len_get(rx_tlv_hdr); 880 pkt_len = msdu_len + l2_hdr_offset + RX_PKT_TLVS_LEN; 881 882 /* Set length in nbuf */ 883 qdf_nbuf_set_pktlen(nbuf, pkt_len); 884 885 qdf_nbuf_set_next(nbuf, NULL); 886 887 qdf_nbuf_set_rx_chfrag_start(nbuf, 1); 888 qdf_nbuf_set_rx_chfrag_end(nbuf, 1); 889 890 if (!peer) { 891 QDF_TRACE_ERROR_RL(QDF_MODULE_ID_DP, "peer is NULL"); 892 DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1, 893 qdf_nbuf_len(nbuf)); 894 /* Trigger invalid peer handler wrapper */ 895 dp_rx_process_invalid_peer_wrapper(soc, nbuf, true); 896 return; 897 } 898 899 vdev = peer->vdev; 900 if (!vdev) { 901 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 902 FL("INVALID vdev %pK OR osif_rx"), vdev); 903 /* Drop & free packet */ 904 qdf_nbuf_free(nbuf); 905 DP_STATS_INC(soc, rx.err.invalid_vdev, 1); 906 return; 907 } 908 909 /* 910 * Advance the packet start pointer by total size of 911 * pre-header TLV's 912 */ 913 qdf_nbuf_pull_head(nbuf, l2_hdr_offset + RX_PKT_TLVS_LEN); 914 915 if (vdev->rx_decap_type == htt_cmn_pkt_type_raw) 916 goto process_mesh; 917 918 /* 919 * WAPI cert AP sends rekey frames as unencrypted. 920 * Thus RXDMA will report unencrypted frame error. 921 * To pass WAPI cert case, SW needs to pass unencrypted 922 * rekey frame to stack. 923 */ 924 if (qdf_nbuf_is_ipv4_wapi_pkt(nbuf)) { 925 qdf_nbuf_cb_update_peer_local_id(nbuf, peer->local_id); 926 goto process_rx; 927 } 928 /* 929 * In dynamic WEP case rekey frames are not encrypted 930 * similar to WAPI. Allow EAPOL when 8021+wep is enabled and 931 * key install is already done 932 */ 933 if ((vdev->sec_type == cdp_sec_type_wep104) && 934 (qdf_nbuf_is_ipv4_eapol_pkt(nbuf))) 935 goto process_rx; 936 937 process_mesh: 938 939 /* Drop & free packet if mesh mode not enabled */ 940 if (!vdev->mesh_vdev) { 941 qdf_nbuf_free(nbuf); 942 DP_STATS_INC(soc, rx.err.invalid_vdev, 1); 943 return; 944 } 945 946 if (dp_rx_filter_mesh_packets(vdev, nbuf, rx_tlv_hdr) 947 == QDF_STATUS_SUCCESS) { 948 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_MED, 949 FL("mesh pkt filtered")); 950 DP_STATS_INC(vdev->pdev, dropped.mesh_filter, 1); 951 952 qdf_nbuf_free(nbuf); 953 return; 954 } 955 dp_rx_fill_mesh_stats(vdev, nbuf, rx_tlv_hdr, peer); 956 957 process_rx: 958 if (qdf_unlikely(hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr) && 959 (vdev->rx_decap_type == 960 htt_cmn_pkt_type_ethernet))) { 961 eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf); 962 is_broadcast = (QDF_IS_ADDR_BROADCAST 963 (eh->ether_dhost)) ? 1 : 0 ; 964 DP_STATS_INC_PKT(peer, rx.multicast, 1, qdf_nbuf_len(nbuf)); 965 if (is_broadcast) { 966 DP_STATS_INC_PKT(peer, rx.bcast, 1, 967 qdf_nbuf_len(nbuf)); 968 } 969 } 970 971 if (qdf_unlikely(vdev->rx_decap_type == htt_cmn_pkt_type_raw)) { 972 dp_rx_deliver_raw(vdev, nbuf, peer); 973 } else { 974 DP_STATS_INC(peer, rx.to_stack.num, 1); 975 vdev->osif_rx(vdev->osif_vdev, nbuf); 976 } 977 978 return; 979 } 980 981 /** 982 * dp_rx_process_mic_error(): Function to pass mic error indication to umac 983 * @soc: core DP main context 984 * @nbuf: buffer pointer 985 * @rx_tlv_hdr: start of rx tlv header 986 * @peer: peer handle 987 * 988 * return: void 989 */ 990 void dp_rx_process_mic_error(struct dp_soc *soc, qdf_nbuf_t nbuf, 991 uint8_t *rx_tlv_hdr, struct dp_peer *peer) 992 { 993 struct dp_vdev *vdev = NULL; 994 struct dp_pdev *pdev = NULL; 995 struct ol_if_ops *tops = NULL; 996 struct ieee80211_frame *wh; 997 uint8_t *rx_pkt_hdr; 998 uint16_t rx_seq, fragno; 999 unsigned int tid; 1000 QDF_STATUS status; 1001 1002 if (!hal_rx_msdu_end_first_msdu_get(rx_tlv_hdr)) 1003 return; 1004 1005 rx_pkt_hdr = hal_rx_pkt_hdr_get(qdf_nbuf_data(nbuf)); 1006 wh = (struct ieee80211_frame *)rx_pkt_hdr; 1007 1008 if (!peer) { 1009 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1010 "peer not found"); 1011 goto fail; 1012 } 1013 1014 vdev = peer->vdev; 1015 if (!vdev) { 1016 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1017 "VDEV not found"); 1018 goto fail; 1019 } 1020 1021 pdev = vdev->pdev; 1022 if (!pdev) { 1023 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1024 "PDEV not found"); 1025 goto fail; 1026 } 1027 1028 tid = hal_rx_mpdu_start_tid_get(soc->hal_soc, qdf_nbuf_data(nbuf)); 1029 rx_seq = (((*(uint16_t *)wh->i_seq) & 1030 IEEE80211_SEQ_SEQ_MASK) >> 1031 IEEE80211_SEQ_SEQ_SHIFT); 1032 1033 fragno = dp_rx_frag_get_mpdu_frag_number(qdf_nbuf_data(nbuf)); 1034 1035 /* Can get only last fragment */ 1036 if (fragno) { 1037 status = dp_rx_defrag_add_last_frag(soc, peer, 1038 tid, rx_seq, nbuf); 1039 1040 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1041 "%s: Frag pkt seq# %d frag# %d consumed status %d !", 1042 __func__, rx_seq, fragno, status); 1043 return; 1044 } 1045 1046 tops = pdev->soc->cdp_soc.ol_ops; 1047 if (tops->rx_mic_error) 1048 tops->rx_mic_error(pdev->ctrl_pdev, vdev->vdev_id, wh); 1049 1050 fail: 1051 qdf_nbuf_free(nbuf); 1052 return; 1053 } 1054 1055 /** 1056 * dp_rx_err_process() - Processes error frames routed to REO error ring 1057 * 1058 * @soc: core txrx main context 1059 * @hal_ring: opaque pointer to the HAL Rx Error Ring, which will be serviced 1060 * @quota: No. of units (packets) that can be serviced in one shot. 1061 * 1062 * This function implements error processing and top level demultiplexer 1063 * for all the frames routed to REO error ring. 1064 * 1065 * Return: uint32_t: No. of elements processed 1066 */ 1067 uint32_t 1068 dp_rx_err_process(struct dp_soc *soc, void *hal_ring, uint32_t quota) 1069 { 1070 void *hal_soc; 1071 void *ring_desc; 1072 uint32_t count = 0; 1073 uint32_t rx_bufs_used = 0; 1074 uint32_t rx_bufs_reaped[MAX_PDEV_CNT] = { 0 }; 1075 uint8_t mac_id = 0; 1076 uint8_t buf_type; 1077 uint8_t error, rbm; 1078 struct hal_rx_mpdu_desc_info mpdu_desc_info; 1079 struct hal_buf_info hbi; 1080 struct dp_pdev *dp_pdev; 1081 struct dp_srng *dp_rxdma_srng; 1082 struct rx_desc_pool *rx_desc_pool; 1083 uint32_t cookie = 0; 1084 void *link_desc_va; 1085 struct hal_rx_msdu_list msdu_list; /* MSDU's per MPDU */ 1086 uint16_t num_msdus; 1087 1088 /* Debug -- Remove later */ 1089 qdf_assert(soc && hal_ring); 1090 1091 hal_soc = soc->hal_soc; 1092 1093 /* Debug -- Remove later */ 1094 qdf_assert(hal_soc); 1095 1096 if (qdf_unlikely(hal_srng_access_start(hal_soc, hal_ring))) { 1097 1098 /* TODO */ 1099 /* 1100 * Need API to convert from hal_ring pointer to 1101 * Ring Type / Ring Id combo 1102 */ 1103 DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1); 1104 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1105 FL("HAL RING Access Failed -- %pK"), hal_ring); 1106 goto done; 1107 } 1108 1109 while (qdf_likely(quota-- && (ring_desc = 1110 hal_srng_dst_get_next(hal_soc, hal_ring)))) { 1111 1112 DP_STATS_INC(soc, rx.err_ring_pkts, 1); 1113 1114 error = HAL_RX_ERROR_STATUS_GET(ring_desc); 1115 1116 qdf_assert(error == HAL_REO_ERROR_DETECTED); 1117 1118 buf_type = HAL_RX_REO_BUF_TYPE_GET(ring_desc); 1119 /* 1120 * For REO error ring, expect only MSDU LINK DESC 1121 */ 1122 qdf_assert_always(buf_type == HAL_RX_REO_MSDU_LINK_DESC_TYPE); 1123 1124 cookie = HAL_RX_REO_BUF_COOKIE_GET(ring_desc); 1125 /* 1126 * check for the magic number in the sw cookie 1127 */ 1128 qdf_assert_always((cookie >> LINK_DESC_ID_SHIFT) & 1129 LINK_DESC_ID_START); 1130 1131 /* 1132 * Check if the buffer is to be processed on this processor 1133 */ 1134 rbm = hal_rx_ret_buf_manager_get(ring_desc); 1135 1136 hal_rx_reo_buf_paddr_get(ring_desc, &hbi); 1137 link_desc_va = dp_rx_cookie_2_link_desc_va(soc, &hbi); 1138 hal_rx_msdu_list_get(soc->hal_soc, link_desc_va, &msdu_list, 1139 &num_msdus); 1140 1141 if (qdf_unlikely((msdu_list.rbm[0] != DP_WBM2SW_RBM) && 1142 (msdu_list.rbm[0] != 1143 HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST))) { 1144 /* TODO */ 1145 /* Call appropriate handler */ 1146 DP_STATS_INC(soc, rx.err.invalid_rbm, 1); 1147 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1148 FL("Invalid RBM %d"), msdu_list.rbm[0]); 1149 1150 /* Return link descriptor through WBM ring (SW2WBM)*/ 1151 dp_rx_link_desc_return(soc, ring_desc, 1152 HAL_BM_ACTION_RELEASE_MSDU_LIST); 1153 continue; 1154 } 1155 1156 /* Get the MPDU DESC info */ 1157 hal_rx_mpdu_desc_info_get(ring_desc, &mpdu_desc_info); 1158 1159 if (mpdu_desc_info.mpdu_flags & HAL_MPDU_F_FRAGMENT) { 1160 /* TODO */ 1161 count = dp_rx_frag_handle(soc, 1162 ring_desc, &mpdu_desc_info, 1163 &mac_id, quota); 1164 1165 rx_bufs_reaped[mac_id] += count; 1166 DP_STATS_INC(soc, rx.rx_frags, 1); 1167 continue; 1168 } 1169 1170 if (hal_rx_reo_is_pn_error(ring_desc)) { 1171 /* TOD0 */ 1172 DP_STATS_INC(soc, 1173 rx.err. 1174 reo_error[HAL_REO_ERR_PN_CHECK_FAILED], 1175 1); 1176 count = dp_rx_pn_error_handle(soc, 1177 ring_desc, 1178 &mpdu_desc_info, &mac_id, 1179 quota); 1180 1181 rx_bufs_reaped[mac_id] += count; 1182 continue; 1183 } 1184 1185 if (hal_rx_reo_is_2k_jump(ring_desc)) { 1186 /* TOD0 */ 1187 DP_STATS_INC(soc, 1188 rx.err. 1189 reo_error[HAL_REO_ERR_REGULAR_FRAME_2K_JUMP], 1190 1); 1191 1192 count = dp_rx_2k_jump_handle(soc, 1193 ring_desc, &mpdu_desc_info, 1194 &mac_id, quota); 1195 1196 rx_bufs_reaped[mac_id] += count; 1197 continue; 1198 } 1199 } 1200 1201 done: 1202 hal_srng_access_end(hal_soc, hal_ring); 1203 1204 if (soc->rx.flags.defrag_timeout_check) { 1205 uint32_t now_ms = 1206 qdf_system_ticks_to_msecs(qdf_system_ticks()); 1207 1208 if (now_ms >= soc->rx.defrag.next_flush_ms) 1209 dp_rx_defrag_waitlist_flush(soc); 1210 } 1211 1212 for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) { 1213 if (rx_bufs_reaped[mac_id]) { 1214 dp_pdev = soc->pdev_list[mac_id]; 1215 dp_rxdma_srng = &dp_pdev->rx_refill_buf_ring; 1216 rx_desc_pool = &soc->rx_desc_buf[mac_id]; 1217 1218 dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng, 1219 rx_desc_pool, 1220 rx_bufs_reaped[mac_id], 1221 &dp_pdev->free_list_head, 1222 &dp_pdev->free_list_tail); 1223 rx_bufs_used += rx_bufs_reaped[mac_id]; 1224 } 1225 } 1226 1227 return rx_bufs_used; /* Assume no scale factor for now */ 1228 } 1229 1230 /** 1231 * dp_rx_wbm_err_process() - Processes error frames routed to WBM release ring 1232 * 1233 * @soc: core txrx main context 1234 * @hal_ring: opaque pointer to the HAL Rx Error Ring, which will be serviced 1235 * @quota: No. of units (packets) that can be serviced in one shot. 1236 * 1237 * This function implements error processing and top level demultiplexer 1238 * for all the frames routed to WBM2HOST sw release ring. 1239 * 1240 * Return: uint32_t: No. of elements processed 1241 */ 1242 uint32_t 1243 dp_rx_wbm_err_process(struct dp_soc *soc, void *hal_ring, uint32_t quota) 1244 { 1245 void *hal_soc; 1246 void *ring_desc; 1247 struct dp_rx_desc *rx_desc; 1248 union dp_rx_desc_list_elem_t *head[MAX_PDEV_CNT] = { NULL }; 1249 union dp_rx_desc_list_elem_t *tail[MAX_PDEV_CNT] = { NULL }; 1250 uint32_t rx_bufs_used = 0; 1251 uint32_t rx_bufs_reaped[MAX_PDEV_CNT] = { 0 }; 1252 uint8_t buf_type, rbm; 1253 uint32_t rx_buf_cookie; 1254 uint8_t mac_id; 1255 struct dp_pdev *dp_pdev; 1256 struct dp_srng *dp_rxdma_srng; 1257 struct rx_desc_pool *rx_desc_pool; 1258 uint8_t *rx_tlv_hdr; 1259 qdf_nbuf_t nbuf_head = NULL; 1260 qdf_nbuf_t nbuf_tail = NULL; 1261 qdf_nbuf_t nbuf, next; 1262 struct hal_wbm_err_desc_info wbm_err_info = { 0 }; 1263 uint8_t pool_id; 1264 uint8_t tid = 0; 1265 1266 /* Debug -- Remove later */ 1267 qdf_assert(soc && hal_ring); 1268 1269 hal_soc = soc->hal_soc; 1270 1271 /* Debug -- Remove later */ 1272 qdf_assert(hal_soc); 1273 1274 if (qdf_unlikely(hal_srng_access_start(hal_soc, hal_ring))) { 1275 1276 /* TODO */ 1277 /* 1278 * Need API to convert from hal_ring pointer to 1279 * Ring Type / Ring Id combo 1280 */ 1281 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1282 FL("HAL RING Access Failed -- %pK"), hal_ring); 1283 goto done; 1284 } 1285 1286 while (qdf_likely(quota-- && (ring_desc = 1287 hal_srng_dst_get_next(hal_soc, hal_ring)))) { 1288 1289 /* XXX */ 1290 buf_type = HAL_RX_WBM_BUF_TYPE_GET(ring_desc); 1291 1292 /* 1293 * For WBM ring, expect only MSDU buffers 1294 */ 1295 qdf_assert_always(buf_type == HAL_RX_WBM_BUF_TYPE_REL_BUF); 1296 1297 qdf_assert((HAL_RX_WBM_ERR_SRC_GET(ring_desc) 1298 == HAL_RX_WBM_ERR_SRC_RXDMA) || 1299 (HAL_RX_WBM_ERR_SRC_GET(ring_desc) 1300 == HAL_RX_WBM_ERR_SRC_REO)); 1301 1302 /* 1303 * Check if the buffer is to be processed on this processor 1304 */ 1305 rbm = hal_rx_ret_buf_manager_get(ring_desc); 1306 1307 if (qdf_unlikely(rbm != HAL_RX_BUF_RBM_SW3_BM)) { 1308 /* TODO */ 1309 /* Call appropriate handler */ 1310 DP_STATS_INC(soc, rx.err.invalid_rbm, 1); 1311 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1312 FL("Invalid RBM %d"), rbm); 1313 continue; 1314 } 1315 1316 rx_buf_cookie = HAL_RX_WBM_BUF_COOKIE_GET(ring_desc); 1317 1318 rx_desc = dp_rx_cookie_2_va_rxdma_buf(soc, rx_buf_cookie); 1319 qdf_assert_always(rx_desc); 1320 1321 if (!dp_rx_desc_check_magic(rx_desc)) { 1322 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1323 FL("Invalid rx_desc cookie=%d"), 1324 rx_buf_cookie); 1325 continue; 1326 } 1327 1328 /* 1329 * this is a unlikely scenario where the host is reaping 1330 * a descriptor which it already reaped just a while ago 1331 * but is yet to replenish it back to HW. 1332 * In this case host will dump the last 128 descriptors 1333 * including the software descriptor rx_desc and assert. 1334 */ 1335 if (qdf_unlikely(!rx_desc->in_use)) { 1336 DP_STATS_INC(soc, rx.err.hal_wbm_rel_dup, 1); 1337 dp_rx_dump_info_and_assert(soc, hal_ring, 1338 ring_desc, rx_desc); 1339 } 1340 1341 nbuf = rx_desc->nbuf; 1342 qdf_nbuf_unmap_single(soc->osdev, nbuf, QDF_DMA_BIDIRECTIONAL); 1343 1344 /* 1345 * save the wbm desc info in nbuf TLV. We will need this 1346 * info when we do the actual nbuf processing 1347 */ 1348 hal_rx_wbm_err_info_get(ring_desc, &wbm_err_info, hal_soc); 1349 wbm_err_info.pool_id = rx_desc->pool_id; 1350 hal_rx_wbm_err_info_set_in_tlv(qdf_nbuf_data(nbuf), 1351 &wbm_err_info); 1352 1353 rx_bufs_reaped[rx_desc->pool_id]++; 1354 1355 DP_RX_LIST_APPEND(nbuf_head, nbuf_tail, rx_desc->nbuf); 1356 dp_rx_add_to_free_desc_list(&head[rx_desc->pool_id], 1357 &tail[rx_desc->pool_id], 1358 rx_desc); 1359 } 1360 done: 1361 hal_srng_access_end(hal_soc, hal_ring); 1362 1363 for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) { 1364 if (rx_bufs_reaped[mac_id]) { 1365 dp_pdev = soc->pdev_list[mac_id]; 1366 dp_rxdma_srng = &dp_pdev->rx_refill_buf_ring; 1367 rx_desc_pool = &soc->rx_desc_buf[mac_id]; 1368 1369 dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng, 1370 rx_desc_pool, rx_bufs_reaped[mac_id], 1371 &head[mac_id], &tail[mac_id]); 1372 rx_bufs_used += rx_bufs_reaped[mac_id]; 1373 } 1374 } 1375 1376 nbuf = nbuf_head; 1377 while (nbuf) { 1378 struct dp_peer *peer; 1379 uint16_t peer_id; 1380 1381 rx_tlv_hdr = qdf_nbuf_data(nbuf); 1382 1383 peer_id = hal_rx_mpdu_start_sw_peer_id_get(rx_tlv_hdr); 1384 peer = dp_peer_find_by_id(soc, peer_id); 1385 1386 /* 1387 * retrieve the wbm desc info from nbuf TLV, so we can 1388 * handle error cases appropriately 1389 */ 1390 hal_rx_wbm_err_info_get_from_tlv(rx_tlv_hdr, &wbm_err_info); 1391 1392 /* Set queue_mapping in nbuf to 0 */ 1393 dp_set_rx_queue(nbuf, 0); 1394 1395 next = nbuf->next; 1396 if (wbm_err_info.wbm_err_src == HAL_RX_WBM_ERR_SRC_REO) { 1397 if (wbm_err_info.reo_psh_rsn 1398 == HAL_RX_WBM_REO_PSH_RSN_ERROR) { 1399 1400 DP_STATS_INC(soc, 1401 rx.err.reo_error 1402 [wbm_err_info.reo_err_code], 1); 1403 1404 switch (wbm_err_info.reo_err_code) { 1405 /* 1406 * Handling for packets which have NULL REO 1407 * queue descriptor 1408 */ 1409 case HAL_REO_ERR_QUEUE_DESC_ADDR_0: 1410 pool_id = wbm_err_info.pool_id; 1411 QDF_TRACE_DEBUG_RL(QDF_MODULE_ID_DP, 1412 "Got pkt with REO ERROR: %d", 1413 wbm_err_info.reo_err_code); 1414 dp_rx_null_q_desc_handle(soc, nbuf, 1415 rx_tlv_hdr, 1416 pool_id, peer); 1417 nbuf = next; 1418 if (peer) 1419 dp_peer_unref_del_find_by_id( 1420 peer); 1421 continue; 1422 /* TODO */ 1423 /* Add per error code accounting */ 1424 case HAL_REO_ERR_REGULAR_FRAME_2K_JUMP: 1425 pool_id = wbm_err_info.pool_id; 1426 QDF_TRACE(QDF_MODULE_ID_DP, 1427 QDF_TRACE_LEVEL_ERROR, 1428 "Got pkt with REO ERROR: %d", 1429 wbm_err_info.reo_err_code); 1430 if (hal_rx_msdu_end_first_msdu_get(rx_tlv_hdr)) { 1431 peer_id = 1432 hal_rx_mpdu_start_sw_peer_id_get(rx_tlv_hdr); 1433 tid = 1434 hal_rx_mpdu_start_tid_get(hal_soc, rx_tlv_hdr); 1435 } 1436 dp_2k_jump_handle(soc, nbuf, rx_tlv_hdr, 1437 peer_id, tid); 1438 nbuf = next; 1439 if (peer) 1440 dp_peer_unref_del_find_by_id( 1441 peer); 1442 continue; 1443 default: 1444 QDF_TRACE(QDF_MODULE_ID_DP, 1445 QDF_TRACE_LEVEL_ERROR, 1446 "REO error %d detected", 1447 wbm_err_info.reo_err_code); 1448 } 1449 } 1450 } else if (wbm_err_info.wbm_err_src == 1451 HAL_RX_WBM_ERR_SRC_RXDMA) { 1452 if (wbm_err_info.rxdma_psh_rsn 1453 == HAL_RX_WBM_RXDMA_PSH_RSN_ERROR) { 1454 DP_STATS_INC(soc, 1455 rx.err.rxdma_error 1456 [wbm_err_info.rxdma_err_code], 1); 1457 1458 switch (wbm_err_info.rxdma_err_code) { 1459 case HAL_RXDMA_ERR_UNENCRYPTED: 1460 dp_rx_process_err_unencrypted( 1461 soc, nbuf, 1462 rx_tlv_hdr, peer); 1463 nbuf = next; 1464 if (peer) 1465 dp_peer_unref_del_find_by_id( 1466 peer); 1467 continue; 1468 1469 case HAL_RXDMA_ERR_TKIP_MIC: 1470 dp_rx_process_mic_error(soc, nbuf, 1471 rx_tlv_hdr, 1472 peer); 1473 nbuf = next; 1474 if (peer) { 1475 DP_STATS_INC(peer, rx.err.mic_err, 1); 1476 dp_peer_unref_del_find_by_id( 1477 peer); 1478 } 1479 continue; 1480 1481 case HAL_RXDMA_ERR_DECRYPT: 1482 if (peer) 1483 DP_STATS_INC(peer, rx.err.decrypt_err, 1); 1484 QDF_TRACE(QDF_MODULE_ID_DP, 1485 QDF_TRACE_LEVEL_DEBUG, 1486 "Packet received with Decrypt error"); 1487 break; 1488 1489 default: 1490 QDF_TRACE(QDF_MODULE_ID_DP, 1491 QDF_TRACE_LEVEL_DEBUG, 1492 "RXDMA error %d", 1493 wbm_err_info. 1494 rxdma_err_code); 1495 } 1496 } 1497 } else { 1498 /* Should not come here */ 1499 qdf_assert(0); 1500 } 1501 1502 if (peer) 1503 dp_peer_unref_del_find_by_id(peer); 1504 1505 hal_rx_dump_pkt_tlvs(hal_soc, rx_tlv_hdr, 1506 QDF_TRACE_LEVEL_DEBUG); 1507 qdf_nbuf_free(nbuf); 1508 nbuf = next; 1509 } 1510 return rx_bufs_used; /* Assume no scale factor for now */ 1511 } 1512 1513 /** 1514 * dp_rx_err_mpdu_pop() - extract the MSDU's from link descs 1515 * 1516 * @soc: core DP main context 1517 * @mac_id: mac id which is one of 3 mac_ids 1518 * @rxdma_dst_ring_desc: void pointer to monitor link descriptor buf addr info 1519 * @head: head of descs list to be freed 1520 * @tail: tail of decs list to be freed 1521 1522 * Return: number of msdu in MPDU to be popped 1523 */ 1524 static inline uint32_t 1525 dp_rx_err_mpdu_pop(struct dp_soc *soc, uint32_t mac_id, 1526 void *rxdma_dst_ring_desc, 1527 union dp_rx_desc_list_elem_t **head, 1528 union dp_rx_desc_list_elem_t **tail) 1529 { 1530 void *rx_msdu_link_desc; 1531 qdf_nbuf_t msdu; 1532 qdf_nbuf_t last; 1533 struct hal_rx_msdu_list msdu_list; 1534 uint16_t num_msdus; 1535 struct hal_buf_info buf_info; 1536 void *p_buf_addr_info; 1537 void *p_last_buf_addr_info; 1538 uint32_t rx_bufs_used = 0; 1539 uint32_t msdu_cnt; 1540 uint32_t i; 1541 uint8_t push_reason; 1542 uint8_t rxdma_error_code = 0; 1543 uint8_t bm_action = HAL_BM_ACTION_PUT_IN_IDLE_LIST; 1544 struct dp_pdev *pdev = dp_get_pdev_for_mac_id(soc, mac_id); 1545 1546 msdu = 0; 1547 1548 last = NULL; 1549 1550 hal_rx_reo_ent_buf_paddr_get(rxdma_dst_ring_desc, &buf_info, 1551 &p_last_buf_addr_info, &msdu_cnt); 1552 1553 push_reason = 1554 hal_rx_reo_ent_rxdma_push_reason_get(rxdma_dst_ring_desc); 1555 if (push_reason == HAL_RX_WBM_RXDMA_PSH_RSN_ERROR) { 1556 rxdma_error_code = 1557 hal_rx_reo_ent_rxdma_error_code_get(rxdma_dst_ring_desc); 1558 } 1559 1560 do { 1561 rx_msdu_link_desc = 1562 dp_rx_cookie_2_link_desc_va(soc, &buf_info); 1563 1564 qdf_assert(rx_msdu_link_desc); 1565 1566 hal_rx_msdu_list_get(soc->hal_soc, rx_msdu_link_desc, 1567 &msdu_list, &num_msdus); 1568 1569 if (msdu_list.sw_cookie[0] != HAL_RX_COOKIE_SPECIAL) { 1570 /* if the msdus belongs to NSS offloaded radio && 1571 * the rbm is not SW1_BM then return the msdu_link 1572 * descriptor without freeing the msdus (nbufs). let 1573 * these buffers be given to NSS completion ring for 1574 * NSS to free them. 1575 * else iterate through the msdu link desc list and 1576 * free each msdu in the list. 1577 */ 1578 if (msdu_list.rbm[0] != HAL_RX_BUF_RBM_SW3_BM && 1579 wlan_cfg_get_dp_pdev_nss_enabled( 1580 pdev->wlan_cfg_ctx)) 1581 bm_action = HAL_BM_ACTION_RELEASE_MSDU_LIST; 1582 else { 1583 for (i = 0; i < num_msdus; i++) { 1584 struct dp_rx_desc *rx_desc = 1585 dp_rx_cookie_2_va_rxdma_buf(soc, 1586 msdu_list.sw_cookie[i]); 1587 qdf_assert_always(rx_desc); 1588 msdu = rx_desc->nbuf; 1589 1590 qdf_nbuf_unmap_single(soc->osdev, msdu, 1591 QDF_DMA_FROM_DEVICE); 1592 1593 QDF_TRACE(QDF_MODULE_ID_DP, 1594 QDF_TRACE_LEVEL_DEBUG, 1595 "[%s][%d] msdu_nbuf=%pK ", 1596 __func__, __LINE__, msdu); 1597 1598 qdf_nbuf_free(msdu); 1599 rx_bufs_used++; 1600 dp_rx_add_to_free_desc_list(head, 1601 tail, rx_desc); 1602 } 1603 } 1604 } else { 1605 rxdma_error_code = HAL_RXDMA_ERR_WAR; 1606 } 1607 1608 hal_rx_mon_next_link_desc_get(rx_msdu_link_desc, &buf_info, 1609 &p_buf_addr_info); 1610 1611 dp_rx_link_desc_return(soc, p_last_buf_addr_info, bm_action); 1612 p_last_buf_addr_info = p_buf_addr_info; 1613 1614 } while (buf_info.paddr); 1615 1616 DP_STATS_INC(soc, rx.err.rxdma_error[rxdma_error_code], 1); 1617 1618 if (rxdma_error_code == HAL_RXDMA_ERR_DECRYPT) { 1619 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1620 "Packet received with Decrypt error"); 1621 } 1622 1623 return rx_bufs_used; 1624 } 1625 1626 /** 1627 * dp_rxdma_err_process() - RxDMA error processing functionality 1628 * 1629 * @soc: core txrx main contex 1630 * @mac_id: mac id which is one of 3 mac_ids 1631 * @hal_ring: opaque pointer to the HAL Rx Ring, which will be serviced 1632 * @quota: No. of units (packets) that can be serviced in one shot. 1633 1634 * Return: num of buffers processed 1635 */ 1636 uint32_t 1637 dp_rxdma_err_process(struct dp_soc *soc, uint32_t mac_id, uint32_t quota) 1638 { 1639 struct dp_pdev *pdev = dp_get_pdev_for_mac_id(soc, mac_id); 1640 int mac_for_pdev = dp_get_mac_id_for_mac(soc, mac_id); 1641 void *hal_soc; 1642 void *rxdma_dst_ring_desc; 1643 void *err_dst_srng; 1644 union dp_rx_desc_list_elem_t *head = NULL; 1645 union dp_rx_desc_list_elem_t *tail = NULL; 1646 struct dp_srng *dp_rxdma_srng; 1647 struct rx_desc_pool *rx_desc_pool; 1648 uint32_t work_done = 0; 1649 uint32_t rx_bufs_used = 0; 1650 1651 if (!pdev) 1652 return 0; 1653 1654 err_dst_srng = pdev->rxdma_err_dst_ring[mac_for_pdev].hal_srng; 1655 1656 if (!err_dst_srng) { 1657 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1658 "%s %d : HAL Monitor Destination Ring Init \ 1659 Failed -- %pK", 1660 __func__, __LINE__, err_dst_srng); 1661 return 0; 1662 } 1663 1664 hal_soc = soc->hal_soc; 1665 1666 qdf_assert(hal_soc); 1667 1668 if (qdf_unlikely(hal_srng_access_start(hal_soc, err_dst_srng))) { 1669 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1670 "%s %d : HAL Monitor Destination Ring Init \ 1671 Failed -- %pK", 1672 __func__, __LINE__, err_dst_srng); 1673 return 0; 1674 } 1675 1676 while (qdf_likely(quota-- && (rxdma_dst_ring_desc = 1677 hal_srng_dst_get_next(hal_soc, err_dst_srng)))) { 1678 1679 rx_bufs_used += dp_rx_err_mpdu_pop(soc, mac_id, 1680 rxdma_dst_ring_desc, 1681 &head, &tail); 1682 } 1683 1684 hal_srng_access_end(hal_soc, err_dst_srng); 1685 1686 if (rx_bufs_used) { 1687 dp_rxdma_srng = &pdev->rx_refill_buf_ring; 1688 rx_desc_pool = &soc->rx_desc_buf[mac_id]; 1689 1690 dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng, 1691 rx_desc_pool, rx_bufs_used, &head, &tail); 1692 1693 work_done += rx_bufs_used; 1694 } 1695 1696 return work_done; 1697 } 1698