1 /* 2 * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for 5 * any purpose with or without fee is hereby granted, provided that the 6 * above copyright notice and this permission notice appear in all 7 * copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 16 * PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 #include "hal_hw_headers.h" 20 #include "dp_types.h" 21 #include "dp_rx.h" 22 #include "dp_peer.h" 23 #include "dp_internal.h" 24 #include "hal_api.h" 25 #include "qdf_trace.h" 26 #include "qdf_nbuf.h" 27 #ifdef CONFIG_MCL 28 #include <cds_ieee80211_common.h> 29 #else 30 #include <linux/ieee80211.h> 31 #endif 32 #include "dp_rx_defrag.h" 33 #include <enet.h> /* LLC_SNAP_HDR_LEN */ 34 35 #ifdef RX_DESC_DEBUG_CHECK 36 static inline bool dp_rx_desc_check_magic(struct dp_rx_desc *rx_desc) 37 { 38 if (qdf_unlikely(rx_desc->magic != DP_RX_DESC_MAGIC)) { 39 return false; 40 } 41 rx_desc->magic = 0; 42 return true; 43 } 44 #else 45 static inline bool dp_rx_desc_check_magic(struct dp_rx_desc *rx_desc) 46 { 47 return true; 48 } 49 #endif 50 51 /** 52 * dp_rx_mcast_echo_check() - check if the mcast pkt is a loop 53 * back on same vap or a different vap. 54 * 55 * @soc: core DP main context 56 * @peer: dp peer handler 57 * @rx_tlv_hdr: start of the rx TLV header 58 * @nbuf: pkt buffer 59 * 60 * Return: bool (true if it is a looped back pkt else false) 61 * 62 */ 63 static inline bool dp_rx_mcast_echo_check(struct dp_soc *soc, 64 struct dp_peer *peer, 65 uint8_t *rx_tlv_hdr, 66 qdf_nbuf_t nbuf) 67 { 68 struct dp_vdev *vdev = peer->vdev; 69 struct dp_ast_entry *ase; 70 uint16_t sa_idx = 0; 71 uint8_t *data; 72 73 /* 74 * Multicast Echo Check is required only if vdev is STA and 75 * received pkt is a multicast/broadcast pkt. otherwise 76 * skip the MEC check. 77 */ 78 if (vdev->opmode != wlan_op_mode_sta) 79 return false; 80 81 if (!hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr)) 82 return false; 83 84 data = qdf_nbuf_data(nbuf); 85 /* 86 * if the received pkts src mac addr matches with vdev 87 * mac address then drop the pkt as it is looped back 88 */ 89 if (!(qdf_mem_cmp(&data[DP_MAC_ADDR_LEN], 90 vdev->mac_addr.raw, 91 DP_MAC_ADDR_LEN))) 92 return true; 93 94 /* 95 * In case of qwrap isolation mode, donot drop loopback packets. 96 * In isolation mode, all packets from the wired stations need to go 97 * to rootap and loop back to reach the wireless stations and 98 * vice-versa. 99 */ 100 if (qdf_unlikely(vdev->isolation_vdev)) 101 return false; 102 103 /* if the received pkts src mac addr matches with the 104 * wired PCs MAC addr which is behind the STA or with 105 * wireless STAs MAC addr which are behind the Repeater, 106 * then drop the pkt as it is looped back 107 */ 108 qdf_spin_lock_bh(&soc->ast_lock); 109 if (hal_rx_msdu_end_sa_is_valid_get(rx_tlv_hdr)) { 110 sa_idx = hal_rx_msdu_end_sa_idx_get(rx_tlv_hdr); 111 112 if ((sa_idx < 0) || 113 (sa_idx >= (WLAN_UMAC_PSOC_MAX_PEERS * 2))) { 114 qdf_spin_unlock_bh(&soc->ast_lock); 115 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 116 "invalid sa_idx: %d", sa_idx); 117 qdf_assert_always(0); 118 } 119 120 ase = soc->ast_table[sa_idx]; 121 if (!ase) { 122 /* We do not get a peer map event for STA and without 123 * this event we don't know what is STA's sa_idx. 124 * For this reason the AST is still not associated to 125 * any index postion in ast_table. 126 * In these kind of scenarios where sa is valid but 127 * ast is not in ast_table, we use the below API to get 128 * AST entry for STA's own mac_address. 129 */ 130 ase = dp_peer_ast_list_find(soc, peer, 131 &data[DP_MAC_ADDR_LEN]); 132 if (ase) { 133 ase->ast_idx = sa_idx; 134 soc->ast_table[sa_idx] = ase; 135 } 136 } 137 } else 138 ase = dp_peer_ast_hash_find_soc(soc, &data[DP_MAC_ADDR_LEN]); 139 140 if (ase) { 141 142 if (ase->pdev_id != vdev->pdev->pdev_id) { 143 qdf_spin_unlock_bh(&soc->ast_lock); 144 QDF_TRACE(QDF_MODULE_ID_DP, 145 QDF_TRACE_LEVEL_INFO, 146 "Detected DBDC Root AP %pM, %d %d", 147 &data[DP_MAC_ADDR_LEN], vdev->pdev->pdev_id, 148 ase->pdev_id); 149 return false; 150 } 151 152 if ((ase->type == CDP_TXRX_AST_TYPE_MEC) || 153 (ase->peer != peer)) { 154 qdf_spin_unlock_bh(&soc->ast_lock); 155 QDF_TRACE(QDF_MODULE_ID_DP, 156 QDF_TRACE_LEVEL_INFO, 157 "received pkt with same src mac %pM", 158 &data[DP_MAC_ADDR_LEN]); 159 160 return true; 161 } 162 } 163 qdf_spin_unlock_bh(&soc->ast_lock); 164 return false; 165 } 166 167 /** 168 * dp_rx_link_desc_return_by_addr - Return a MPDU link descriptor to 169 * (WBM) by address 170 * 171 * @soc: core DP main context 172 * @link_desc_addr: link descriptor addr 173 * 174 * Return: QDF_STATUS 175 */ 176 QDF_STATUS 177 dp_rx_link_desc_return_by_addr(struct dp_soc *soc, void *link_desc_addr, 178 uint8_t bm_action) 179 { 180 struct dp_srng *wbm_desc_rel_ring = &soc->wbm_desc_rel_ring; 181 void *wbm_rel_srng = wbm_desc_rel_ring->hal_srng; 182 void *hal_soc = soc->hal_soc; 183 QDF_STATUS status = QDF_STATUS_E_FAILURE; 184 void *src_srng_desc; 185 186 if (!wbm_rel_srng) { 187 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 188 "WBM RELEASE RING not initialized"); 189 return status; 190 } 191 192 if (qdf_unlikely(hal_srng_access_start(hal_soc, wbm_rel_srng))) { 193 194 /* TODO */ 195 /* 196 * Need API to convert from hal_ring pointer to 197 * Ring Type / Ring Id combo 198 */ 199 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 200 FL("HAL RING Access For WBM Release SRNG Failed - %pK"), 201 wbm_rel_srng); 202 DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1); 203 goto done; 204 } 205 src_srng_desc = hal_srng_src_get_next(hal_soc, wbm_rel_srng); 206 if (qdf_likely(src_srng_desc)) { 207 /* Return link descriptor through WBM ring (SW2WBM)*/ 208 hal_rx_msdu_link_desc_set(hal_soc, 209 src_srng_desc, link_desc_addr, bm_action); 210 status = QDF_STATUS_SUCCESS; 211 } else { 212 struct hal_srng *srng = (struct hal_srng *)wbm_rel_srng; 213 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 214 FL("WBM Release Ring (Id %d) Full"), srng->ring_id); 215 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 216 "HP 0x%x Reap HP 0x%x TP 0x%x Cached TP 0x%x", 217 *srng->u.src_ring.hp_addr, srng->u.src_ring.reap_hp, 218 *srng->u.src_ring.tp_addr, srng->u.src_ring.cached_tp); 219 } 220 done: 221 hal_srng_access_end(hal_soc, wbm_rel_srng); 222 return status; 223 224 } 225 226 /** 227 * dp_rx_link_desc_return() - Return a MPDU link descriptor to HW 228 * (WBM), following error handling 229 * 230 * @soc: core DP main context 231 * @ring_desc: opaque pointer to the REO error ring descriptor 232 * 233 * Return: QDF_STATUS 234 */ 235 QDF_STATUS 236 dp_rx_link_desc_return(struct dp_soc *soc, void *ring_desc, uint8_t bm_action) 237 { 238 void *buf_addr_info = HAL_RX_REO_BUF_ADDR_INFO_GET(ring_desc); 239 return dp_rx_link_desc_return_by_addr(soc, buf_addr_info, bm_action); 240 } 241 242 /** 243 * dp_rx_msdus_drop() - Drops all MSDU's per MPDU 244 * 245 * @soc: core txrx main context 246 * @ring_desc: opaque pointer to the REO error ring descriptor 247 * @mpdu_desc_info: MPDU descriptor information from ring descriptor 248 * @head: head of the local descriptor free-list 249 * @tail: tail of the local descriptor free-list 250 * @quota: No. of units (packets) that can be serviced in one shot. 251 * 252 * This function is used to drop all MSDU in an MPDU 253 * 254 * Return: uint32_t: No. of elements processed 255 */ 256 static uint32_t dp_rx_msdus_drop(struct dp_soc *soc, void *ring_desc, 257 struct hal_rx_mpdu_desc_info *mpdu_desc_info, 258 union dp_rx_desc_list_elem_t **head, 259 union dp_rx_desc_list_elem_t **tail, 260 uint32_t quota) 261 { 262 uint32_t rx_bufs_used = 0; 263 void *link_desc_va; 264 struct hal_buf_info buf_info; 265 struct hal_rx_msdu_list msdu_list; /* MSDU's per MPDU */ 266 int i; 267 uint8_t *rx_tlv_hdr; 268 uint32_t tid; 269 270 hal_rx_reo_buf_paddr_get(ring_desc, &buf_info); 271 272 link_desc_va = dp_rx_cookie_2_link_desc_va(soc, &buf_info); 273 274 /* No UNMAP required -- this is "malloc_consistent" memory */ 275 hal_rx_msdu_list_get(soc->hal_soc, link_desc_va, &msdu_list, 276 &mpdu_desc_info->msdu_count); 277 278 for (i = 0; (i < mpdu_desc_info->msdu_count) && quota--; i++) { 279 struct dp_rx_desc *rx_desc = 280 dp_rx_cookie_2_va_rxdma_buf(soc, 281 msdu_list.sw_cookie[i]); 282 283 qdf_assert(rx_desc); 284 285 if (!dp_rx_desc_check_magic(rx_desc)) { 286 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 287 FL("Invalid rx_desc cookie=%d"), 288 msdu_list.sw_cookie[i]); 289 return rx_bufs_used; 290 } 291 292 rx_bufs_used++; 293 tid = hal_rx_mpdu_start_tid_get(soc->hal_soc, 294 rx_desc->rx_buf_start); 295 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 296 "Packet received with PN error for tid :%d", tid); 297 298 rx_tlv_hdr = qdf_nbuf_data(rx_desc->nbuf); 299 if (hal_rx_encryption_info_valid(rx_tlv_hdr)) 300 hal_rx_print_pn(rx_tlv_hdr); 301 302 /* Just free the buffers */ 303 qdf_nbuf_free(rx_desc->nbuf); 304 305 dp_rx_add_to_free_desc_list(head, tail, rx_desc); 306 } 307 308 /* Return link descriptor through WBM ring (SW2WBM)*/ 309 dp_rx_link_desc_return(soc, ring_desc, HAL_BM_ACTION_PUT_IN_IDLE_LIST); 310 311 return rx_bufs_used; 312 } 313 314 /** 315 * dp_rx_pn_error_handle() - Handles PN check errors 316 * 317 * @soc: core txrx main context 318 * @ring_desc: opaque pointer to the REO error ring descriptor 319 * @mpdu_desc_info: MPDU descriptor information from ring descriptor 320 * @head: head of the local descriptor free-list 321 * @tail: tail of the local descriptor free-list 322 * @quota: No. of units (packets) that can be serviced in one shot. 323 * 324 * This function implements PN error handling 325 * If the peer is configured to ignore the PN check errors 326 * or if DP feels, that this frame is still OK, the frame can be 327 * re-injected back to REO to use some of the other features 328 * of REO e.g. duplicate detection/routing to other cores 329 * 330 * Return: uint32_t: No. of elements processed 331 */ 332 static uint32_t 333 dp_rx_pn_error_handle(struct dp_soc *soc, void *ring_desc, 334 struct hal_rx_mpdu_desc_info *mpdu_desc_info, 335 union dp_rx_desc_list_elem_t **head, 336 union dp_rx_desc_list_elem_t **tail, 337 uint32_t quota) 338 { 339 uint16_t peer_id; 340 uint32_t rx_bufs_used = 0; 341 struct dp_peer *peer; 342 bool peer_pn_policy = false; 343 344 peer_id = DP_PEER_METADATA_PEER_ID_GET( 345 mpdu_desc_info->peer_meta_data); 346 347 348 peer = dp_peer_find_by_id(soc, peer_id); 349 350 if (qdf_likely(peer)) { 351 /* 352 * TODO: Check for peer specific policies & set peer_pn_policy 353 */ 354 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 355 "discard rx due to PN error for peer %pK " 356 "(%02x:%02x:%02x:%02x:%02x:%02x)", 357 peer, 358 peer->mac_addr.raw[0], peer->mac_addr.raw[1], 359 peer->mac_addr.raw[2], peer->mac_addr.raw[3], 360 peer->mac_addr.raw[4], peer->mac_addr.raw[5]); 361 362 dp_peer_unref_del_find_by_id(peer); 363 } 364 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 365 "Packet received with PN error"); 366 367 /* No peer PN policy -- definitely drop */ 368 if (!peer_pn_policy) 369 rx_bufs_used = dp_rx_msdus_drop(soc, ring_desc, 370 mpdu_desc_info, 371 head, tail, quota); 372 373 return rx_bufs_used; 374 } 375 376 /** 377 * dp_rx_2k_jump_handle() - Handles Sequence Number Jump by 2K 378 * 379 * @soc: core txrx main context 380 * @ring_desc: opaque pointer to the REO error ring descriptor 381 * @mpdu_desc_info: MPDU descriptor information from ring descriptor 382 * @head: head of the local descriptor free-list 383 * @tail: tail of the local descriptor free-list 384 * @quota: No. of units (packets) that can be serviced in one shot. 385 * 386 * This function implements the error handling when sequence number 387 * of the MPDU jumps suddenly by 2K.Today there are 2 cases that 388 * need to be handled: 389 * A) CSN (Current Sequence Number) = Last Valid SN (LSN) + 2K 390 * B) CSN = LSN + 2K, but falls within a "BA sized window" of the SSN 391 * For case A) the protocol stack is invoked to generate DELBA/DEAUTH frame 392 * For case B), the frame is normally dropped, no more action is taken 393 * 394 * Return: uint32_t: No. of elements processed 395 */ 396 static uint32_t 397 dp_rx_2k_jump_handle(struct dp_soc *soc, void *ring_desc, 398 struct hal_rx_mpdu_desc_info *mpdu_desc_info, 399 union dp_rx_desc_list_elem_t **head, 400 union dp_rx_desc_list_elem_t **tail, 401 uint32_t quota) 402 { 403 return dp_rx_msdus_drop(soc, ring_desc, mpdu_desc_info, 404 head, tail, quota); 405 } 406 407 /** 408 * dp_rx_chain_msdus() - Function to chain all msdus of a mpdu 409 * to pdev invalid peer list 410 * 411 * @soc: core DP main context 412 * @nbuf: Buffer pointer 413 * @rx_tlv_hdr: start of rx tlv header 414 * @mac_id: mac id 415 * 416 * Return: bool: true for last msdu of mpdu 417 */ 418 static bool 419 dp_rx_chain_msdus(struct dp_soc *soc, qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr, 420 uint8_t mac_id) 421 { 422 bool mpdu_done = false; 423 qdf_nbuf_t curr_nbuf = NULL; 424 qdf_nbuf_t tmp_nbuf = NULL; 425 426 /* TODO: Currently only single radio is supported, hence 427 * pdev hard coded to '0' index 428 */ 429 struct dp_pdev *dp_pdev = soc->pdev_list[mac_id]; 430 431 if (!dp_pdev->first_nbuf) { 432 qdf_nbuf_set_rx_chfrag_start(nbuf, 1); 433 dp_pdev->ppdu_id = HAL_RX_HW_DESC_GET_PPDUID_GET(rx_tlv_hdr); 434 dp_pdev->first_nbuf = true; 435 436 /* If the new nbuf received is the first msdu of the 437 * amsdu and there are msdus in the invalid peer msdu 438 * list, then let us free all the msdus of the invalid 439 * peer msdu list. 440 * This scenario can happen when we start receiving 441 * new a-msdu even before the previous a-msdu is completely 442 * received. 443 */ 444 curr_nbuf = dp_pdev->invalid_peer_head_msdu; 445 while (curr_nbuf) { 446 tmp_nbuf = curr_nbuf->next; 447 qdf_nbuf_free(curr_nbuf); 448 curr_nbuf = tmp_nbuf; 449 } 450 451 dp_pdev->invalid_peer_head_msdu = NULL; 452 dp_pdev->invalid_peer_tail_msdu = NULL; 453 hal_rx_mon_hw_desc_get_mpdu_status(soc->hal_soc, rx_tlv_hdr, 454 &(dp_pdev->ppdu_info.rx_status)); 455 456 } 457 458 if (dp_pdev->ppdu_id == hal_rx_attn_phy_ppdu_id_get(rx_tlv_hdr) && 459 hal_rx_attn_msdu_done_get(rx_tlv_hdr)) { 460 qdf_nbuf_set_rx_chfrag_end(nbuf, 1); 461 qdf_assert_always(dp_pdev->first_nbuf == true); 462 dp_pdev->first_nbuf = false; 463 mpdu_done = true; 464 } 465 466 DP_RX_LIST_APPEND(dp_pdev->invalid_peer_head_msdu, 467 dp_pdev->invalid_peer_tail_msdu, 468 nbuf); 469 470 return mpdu_done; 471 } 472 473 /** 474 * dp_2k_jump_handle() - Function to handle 2k jump exception 475 * on WBM ring 476 * 477 * @soc: core DP main context 478 * @nbuf: buffer pointer 479 * @rx_tlv_hdr: start of rx tlv header 480 * @peer_id: peer id of first msdu 481 * @tid: Tid for which exception occurred 482 * 483 * This function handles 2k jump violations arising out 484 * of receiving aggregates in non BA case. This typically 485 * may happen if aggregates are received on a QOS enabled TID 486 * while Rx window size is still initialized to value of 2. Or 487 * it may also happen if negotiated window size is 1 but peer 488 * sends aggregates. 489 * 490 */ 491 492 static void 493 dp_2k_jump_handle(struct dp_soc *soc, 494 qdf_nbuf_t nbuf, 495 uint8_t *rx_tlv_hdr, 496 uint16_t peer_id, 497 uint8_t tid) 498 { 499 uint32_t ppdu_id; 500 struct dp_peer *peer = NULL; 501 struct dp_rx_tid *rx_tid = NULL; 502 503 peer = dp_peer_find_by_id(soc, peer_id); 504 if (!peer || peer->delete_in_progress) { 505 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 506 "peer not found"); 507 goto free_nbuf; 508 } 509 rx_tid = &peer->rx_tid[tid]; 510 if (qdf_unlikely(rx_tid == NULL)) { 511 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 512 "rx_tid is NULL!!"); 513 goto free_nbuf; 514 } 515 qdf_spin_lock_bh(&rx_tid->tid_lock); 516 ppdu_id = hal_rx_attn_phy_ppdu_id_get(rx_tlv_hdr); 517 if (rx_tid->ppdu_id_2k != ppdu_id) { 518 rx_tid->ppdu_id_2k = ppdu_id; 519 qdf_spin_unlock_bh(&rx_tid->tid_lock); 520 goto free_nbuf; 521 } 522 if (!rx_tid->delba_tx_status) { 523 rx_tid->delba_tx_retry++; 524 rx_tid->delba_tx_status = 1; 525 rx_tid->delba_rcode = 526 IEEE80211_REASON_QOS_SETUP_REQUIRED; 527 qdf_spin_unlock_bh(&rx_tid->tid_lock); 528 soc->cdp_soc.ol_ops->send_delba(peer->vdev->pdev->ctrl_pdev, 529 peer->ctrl_peer, 530 peer->mac_addr.raw, 531 tid, 532 peer->vdev->ctrl_vdev, 533 rx_tid->delba_rcode); 534 } else { 535 qdf_spin_unlock_bh(&rx_tid->tid_lock); 536 } 537 538 free_nbuf: 539 qdf_nbuf_free(nbuf); 540 return; 541 } 542 543 /** 544 * dp_rx_null_q_desc_handle() - Function to handle NULL Queue 545 * descriptor violation on either a 546 * REO or WBM ring 547 * 548 * @soc: core DP main context 549 * @nbuf: buffer pointer 550 * @rx_tlv_hdr: start of rx tlv header 551 * @pool_id: mac id 552 * @peer: peer handle 553 * 554 * This function handles NULL queue descriptor violations arising out 555 * a missing REO queue for a given peer or a given TID. This typically 556 * may happen if a packet is received on a QOS enabled TID before the 557 * ADDBA negotiation for that TID, when the TID queue is setup. Or 558 * it may also happen for MC/BC frames if they are not routed to the 559 * non-QOS TID queue, in the absence of any other default TID queue. 560 * This error can show up both in a REO destination or WBM release ring. 561 * 562 */ 563 static void 564 dp_rx_null_q_desc_handle(struct dp_soc *soc, qdf_nbuf_t nbuf, 565 uint8_t *rx_tlv_hdr, uint8_t pool_id, 566 struct dp_peer *peer) 567 { 568 uint32_t pkt_len, l2_hdr_offset; 569 uint16_t msdu_len; 570 struct dp_vdev *vdev; 571 uint8_t tid; 572 struct ether_header *eh; 573 574 qdf_nbuf_set_rx_chfrag_start(nbuf, 575 hal_rx_msdu_end_first_msdu_get(rx_tlv_hdr)); 576 qdf_nbuf_set_rx_chfrag_end(nbuf, 577 hal_rx_msdu_end_last_msdu_get(rx_tlv_hdr)); 578 579 l2_hdr_offset = hal_rx_msdu_end_l3_hdr_padding_get(rx_tlv_hdr); 580 msdu_len = hal_rx_msdu_start_msdu_len_get(rx_tlv_hdr); 581 pkt_len = msdu_len + l2_hdr_offset + RX_PKT_TLVS_LEN; 582 583 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 584 FL("Len %d Extn list %pK "), 585 (uint32_t)qdf_nbuf_len(nbuf), 586 qdf_nbuf_get_ext_list(nbuf)); 587 /* Set length in nbuf */ 588 if (!qdf_nbuf_get_ext_list(nbuf)) 589 qdf_nbuf_set_pktlen(nbuf, pkt_len); 590 591 /* 592 * Check if DMA completed -- msdu_done is the last bit 593 * to be written 594 */ 595 if (!hal_rx_attn_msdu_done_get(rx_tlv_hdr)) { 596 597 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 598 FL("MSDU DONE failure")); 599 600 hal_rx_dump_pkt_tlvs(soc->hal_soc, rx_tlv_hdr, 601 QDF_TRACE_LEVEL_INFO); 602 qdf_assert(0); 603 } 604 605 if (!peer) { 606 bool mpdu_done = false; 607 struct dp_pdev *pdev = soc->pdev_list[pool_id]; 608 609 QDF_TRACE_DEBUG_RL(QDF_MODULE_ID_DP, "peer is NULL"); 610 611 DP_STATS_INC_PKT(soc, 612 rx.err.rx_invalid_peer, 613 1, 614 qdf_nbuf_len(nbuf)); 615 616 mpdu_done = dp_rx_chain_msdus(soc, nbuf, rx_tlv_hdr, pool_id); 617 /* Trigger invalid peer handler wrapper */ 618 dp_rx_process_invalid_peer_wrapper(soc, nbuf, mpdu_done); 619 620 if (mpdu_done) { 621 pdev->invalid_peer_head_msdu = NULL; 622 pdev->invalid_peer_tail_msdu = NULL; 623 } 624 return; 625 } 626 627 vdev = peer->vdev; 628 if (!vdev) { 629 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 630 FL("INVALID vdev %pK OR osif_rx"), vdev); 631 /* Drop & free packet */ 632 qdf_nbuf_free(nbuf); 633 DP_STATS_INC(soc, rx.err.invalid_vdev, 1); 634 return; 635 } 636 637 /* 638 * Advance the packet start pointer by total size of 639 * pre-header TLV's 640 */ 641 if (qdf_nbuf_get_ext_list(nbuf)) 642 qdf_nbuf_pull_head(nbuf, RX_PKT_TLVS_LEN); 643 else 644 qdf_nbuf_pull_head(nbuf, (l2_hdr_offset + RX_PKT_TLVS_LEN)); 645 646 if (dp_rx_mcast_echo_check(soc, peer, rx_tlv_hdr, nbuf)) { 647 /* this is a looped back MCBC pkt, drop it */ 648 DP_STATS_INC_PKT(peer, rx.mec_drop, 1, qdf_nbuf_len(nbuf)); 649 qdf_nbuf_free(nbuf); 650 return; 651 } 652 /* 653 * In qwrap mode if the received packet matches with any of the vdev 654 * mac addresses, drop it. Donot receive multicast packets originated 655 * from any proxysta. 656 */ 657 if (check_qwrap_multicast_loopback(vdev, nbuf)) { 658 DP_STATS_INC_PKT(peer, rx.mec_drop, 1, qdf_nbuf_len(nbuf)); 659 qdf_nbuf_free(nbuf); 660 return; 661 } 662 663 664 if (qdf_unlikely((peer->nawds_enabled == true) && 665 hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr))) { 666 QDF_TRACE(QDF_MODULE_ID_DP, 667 QDF_TRACE_LEVEL_DEBUG, 668 "%s free buffer for multicast packet", 669 __func__); 670 DP_STATS_INC(peer, rx.nawds_mcast_drop, 1); 671 qdf_nbuf_free(nbuf); 672 return; 673 } 674 675 if (!dp_wds_rx_policy_check(rx_tlv_hdr, vdev, peer, 676 hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr))) { 677 QDF_TRACE(QDF_MODULE_ID_DP, 678 QDF_TRACE_LEVEL_ERROR, 679 FL("mcast Policy Check Drop pkt")); 680 /* Drop & free packet */ 681 qdf_nbuf_free(nbuf); 682 return; 683 } 684 685 /* WDS Source Port Learning */ 686 if (qdf_likely(vdev->rx_decap_type == htt_cmn_pkt_type_ethernet && 687 vdev->wds_enabled)) 688 dp_rx_wds_srcport_learn(soc, rx_tlv_hdr, peer, nbuf); 689 690 if (hal_rx_mpdu_start_mpdu_qos_control_valid_get(rx_tlv_hdr)) { 691 /* TODO: Assuming that qos_control_valid also indicates 692 * unicast. Should we check this? 693 */ 694 tid = hal_rx_mpdu_start_tid_get(soc->hal_soc, rx_tlv_hdr); 695 if (peer && !peer->rx_tid[tid].hw_qdesc_vaddr_unaligned) { 696 /* IEEE80211_SEQ_MAX indicates invalid start_seq */ 697 dp_rx_tid_setup_wifi3(peer, tid, 1, IEEE80211_SEQ_MAX); 698 } 699 } 700 701 #ifdef QCA_WIFI_NAPIER_EMULATION /* Debug code, remove later */ 702 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 703 "%s: mac_add:%pM msdu_len %d hdr_off %d", 704 __func__, peer->mac_addr.raw, msdu_len, 705 l2_hdr_offset); 706 707 print_hex_dump(KERN_ERR, "\t Pkt Data:", DUMP_PREFIX_NONE, 32, 4, 708 qdf_nbuf_data(nbuf), 128, false); 709 #endif /* NAPIER_EMULATION */ 710 711 if (qdf_unlikely(vdev->rx_decap_type == htt_cmn_pkt_type_raw)) { 712 qdf_nbuf_set_next(nbuf, NULL); 713 dp_rx_deliver_raw(vdev, nbuf, peer); 714 } else { 715 if (qdf_unlikely(peer->bss_peer)) { 716 QDF_TRACE(QDF_MODULE_ID_DP, 717 QDF_TRACE_LEVEL_INFO, 718 FL("received pkt with same src MAC")); 719 DP_STATS_INC_PKT(peer, rx.mec_drop, 1, 720 qdf_nbuf_len(nbuf)); 721 722 /* Drop & free packet */ 723 qdf_nbuf_free(nbuf); 724 return; 725 } 726 727 if (vdev->osif_rx) { 728 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, 729 FL("vdev %pK osif_rx %pK"), vdev, 730 vdev->osif_rx); 731 qdf_nbuf_set_next(nbuf, NULL); 732 DP_STATS_INC_PKT(peer, rx.to_stack, 1, 733 qdf_nbuf_len(nbuf)); 734 vdev->osif_rx(vdev->osif_vdev, nbuf); 735 if (qdf_unlikely(hal_rx_msdu_end_da_is_mcbc_get( 736 rx_tlv_hdr) && 737 (vdev->rx_decap_type == 738 htt_cmn_pkt_type_ethernet))) { 739 eh = (struct ether_header *)qdf_nbuf_data(nbuf); 740 741 DP_STATS_INC_PKT(peer, rx.multicast, 1, 742 qdf_nbuf_len(nbuf)); 743 if (IEEE80211_IS_BROADCAST(eh->ether_dhost)) { 744 DP_STATS_INC_PKT(peer, rx.bcast, 1, 745 qdf_nbuf_len(nbuf)); 746 } 747 } 748 } else { 749 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 750 FL("INVALID vdev %pK OR osif_rx"), vdev); 751 DP_STATS_INC(soc, rx.err.invalid_vdev, 1); 752 } 753 } 754 return; 755 } 756 757 /** 758 * dp_rx_err_deliver() - Function to deliver error frames to OS 759 * @soc: core DP main context 760 * @nbuf: buffer pointer 761 * @rx_tlv_hdr: start of rx tlv header 762 * @peer: peer reference 763 * 764 * Return: None 765 */ 766 static void 767 dp_rx_err_deliver(struct dp_soc *soc, qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr, 768 struct dp_peer *peer) 769 { 770 uint32_t pkt_len, l2_hdr_offset; 771 uint16_t msdu_len; 772 struct dp_vdev *vdev; 773 struct ether_header *eh; 774 bool isBroadcast; 775 776 /* 777 * Check if DMA completed -- msdu_done is the last bit 778 * to be written 779 */ 780 if (!hal_rx_attn_msdu_done_get(rx_tlv_hdr)) { 781 782 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 783 FL("MSDU DONE failure")); 784 785 hal_rx_dump_pkt_tlvs(soc->hal_soc, rx_tlv_hdr, 786 QDF_TRACE_LEVEL_INFO); 787 qdf_assert(0); 788 } 789 790 l2_hdr_offset = hal_rx_msdu_end_l3_hdr_padding_get(rx_tlv_hdr); 791 msdu_len = hal_rx_msdu_start_msdu_len_get(rx_tlv_hdr); 792 pkt_len = msdu_len + l2_hdr_offset + RX_PKT_TLVS_LEN; 793 794 /* Set length in nbuf */ 795 qdf_nbuf_set_pktlen(nbuf, pkt_len); 796 797 qdf_nbuf_set_next(nbuf, NULL); 798 799 qdf_nbuf_set_rx_chfrag_start(nbuf, 1); 800 qdf_nbuf_set_rx_chfrag_end(nbuf, 1); 801 802 if (!peer) { 803 QDF_TRACE_ERROR_RL(QDF_MODULE_ID_DP, "peer is NULL"); 804 DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1, 805 qdf_nbuf_len(nbuf)); 806 /* Trigger invalid peer handler wrapper */ 807 dp_rx_process_invalid_peer_wrapper(soc, nbuf, true); 808 return; 809 } 810 811 vdev = peer->vdev; 812 if (!vdev) { 813 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 814 FL("INVALID vdev %pK OR osif_rx"), vdev); 815 /* Drop & free packet */ 816 qdf_nbuf_free(nbuf); 817 DP_STATS_INC(soc, rx.err.invalid_vdev, 1); 818 return; 819 } 820 821 /* Drop & free packet if mesh mode not enabled */ 822 if (!vdev->mesh_vdev) { 823 qdf_nbuf_free(nbuf); 824 DP_STATS_INC(soc, rx.err.invalid_vdev, 1); 825 return; 826 } 827 828 /* 829 * Advance the packet start pointer by total size of 830 * pre-header TLV's 831 */ 832 qdf_nbuf_pull_head(nbuf, (l2_hdr_offset + RX_PKT_TLVS_LEN)); 833 834 if (dp_rx_filter_mesh_packets(vdev, nbuf, rx_tlv_hdr) 835 == QDF_STATUS_SUCCESS) { 836 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_MED, 837 FL("mesh pkt filtered")); 838 DP_STATS_INC(vdev->pdev, dropped.mesh_filter, 1); 839 840 qdf_nbuf_free(nbuf); 841 return; 842 843 } 844 dp_rx_fill_mesh_stats(vdev, nbuf, rx_tlv_hdr, peer); 845 846 if (qdf_unlikely(hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr) && 847 (vdev->rx_decap_type == 848 htt_cmn_pkt_type_ethernet))) { 849 eh = (struct ether_header *)qdf_nbuf_data(nbuf); 850 isBroadcast = (IEEE80211_IS_BROADCAST 851 (eh->ether_dhost)) ? 1 : 0 ; 852 DP_STATS_INC_PKT(peer, rx.multicast, 1, qdf_nbuf_len(nbuf)); 853 if (isBroadcast) { 854 DP_STATS_INC_PKT(peer, rx.bcast, 1, 855 qdf_nbuf_len(nbuf)); 856 } 857 } 858 859 if (qdf_unlikely(vdev->rx_decap_type == htt_cmn_pkt_type_raw)) { 860 dp_rx_deliver_raw(vdev, nbuf, peer); 861 } else { 862 DP_STATS_INC(peer, rx.to_stack.num, 1); 863 vdev->osif_rx(vdev->osif_vdev, nbuf); 864 } 865 866 return; 867 } 868 869 /** 870 * dp_rx_process_mic_error(): Function to pass mic error indication to umac 871 * @soc: core DP main context 872 * @nbuf: buffer pointer 873 * @rx_tlv_hdr: start of rx tlv header 874 * @peer: peer handle 875 * 876 * return: void 877 */ 878 void dp_rx_process_mic_error(struct dp_soc *soc, qdf_nbuf_t nbuf, 879 uint8_t *rx_tlv_hdr, struct dp_peer *peer) 880 { 881 struct dp_vdev *vdev = NULL; 882 struct dp_pdev *pdev = NULL; 883 struct ol_if_ops *tops = NULL; 884 struct ieee80211_frame *wh; 885 uint8_t *rx_pkt_hdr; 886 uint16_t rx_seq, fragno; 887 unsigned int tid; 888 QDF_STATUS status; 889 890 if (!hal_rx_msdu_end_first_msdu_get(rx_tlv_hdr)) 891 return; 892 893 rx_pkt_hdr = hal_rx_pkt_hdr_get(qdf_nbuf_data(nbuf)); 894 wh = (struct ieee80211_frame *)rx_pkt_hdr; 895 896 if (!peer) { 897 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 898 "peer not found"); 899 goto fail; 900 } 901 902 vdev = peer->vdev; 903 if (!vdev) { 904 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 905 "VDEV not found"); 906 goto fail; 907 } 908 909 pdev = vdev->pdev; 910 if (!pdev) { 911 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 912 "PDEV not found"); 913 goto fail; 914 } 915 916 tid = hal_rx_mpdu_start_tid_get(soc->hal_soc, qdf_nbuf_data(nbuf)); 917 rx_seq = (((*(uint16_t *)wh->i_seq) & 918 IEEE80211_SEQ_SEQ_MASK) >> 919 IEEE80211_SEQ_SEQ_SHIFT); 920 921 fragno = dp_rx_frag_get_mpdu_frag_number(qdf_nbuf_data(nbuf)); 922 923 /* Can get only last fragment */ 924 if (fragno) { 925 status = dp_rx_defrag_add_last_frag(soc, peer, 926 tid, rx_seq, nbuf); 927 928 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 929 "%s: Frag pkt seq# %d frag# %d consumed status %d !", 930 __func__, rx_seq, fragno, status); 931 return; 932 } 933 934 tops = pdev->soc->cdp_soc.ol_ops; 935 if (tops->rx_mic_error) 936 tops->rx_mic_error(pdev->ctrl_pdev, vdev->vdev_id, wh); 937 938 fail: 939 qdf_nbuf_free(nbuf); 940 return; 941 } 942 943 /** 944 * dp_rx_err_process() - Processes error frames routed to REO error ring 945 * 946 * @soc: core txrx main context 947 * @hal_ring: opaque pointer to the HAL Rx Error Ring, which will be serviced 948 * @quota: No. of units (packets) that can be serviced in one shot. 949 * 950 * This function implements error processing and top level demultiplexer 951 * for all the frames routed to REO error ring. 952 * 953 * Return: uint32_t: No. of elements processed 954 */ 955 uint32_t 956 dp_rx_err_process(struct dp_soc *soc, void *hal_ring, uint32_t quota) 957 { 958 void *hal_soc; 959 void *ring_desc; 960 union dp_rx_desc_list_elem_t *head = NULL; 961 union dp_rx_desc_list_elem_t *tail = NULL; 962 uint32_t rx_bufs_used = 0; 963 uint8_t buf_type; 964 uint8_t error, rbm; 965 struct hal_rx_mpdu_desc_info mpdu_desc_info; 966 struct hal_buf_info hbi; 967 struct dp_pdev *dp_pdev; 968 struct dp_srng *dp_rxdma_srng; 969 struct rx_desc_pool *rx_desc_pool; 970 uint32_t cookie = 0; 971 void *link_desc_va; 972 struct hal_rx_msdu_list msdu_list; /* MSDU's per MPDU */ 973 uint16_t num_msdus; 974 975 /* Debug -- Remove later */ 976 qdf_assert(soc && hal_ring); 977 978 hal_soc = soc->hal_soc; 979 980 /* Debug -- Remove later */ 981 qdf_assert(hal_soc); 982 983 if (qdf_unlikely(hal_srng_access_start(hal_soc, hal_ring))) { 984 985 /* TODO */ 986 /* 987 * Need API to convert from hal_ring pointer to 988 * Ring Type / Ring Id combo 989 */ 990 DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1); 991 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 992 FL("HAL RING Access Failed -- %pK"), hal_ring); 993 goto done; 994 } 995 996 while (qdf_likely(quota-- && (ring_desc = 997 hal_srng_dst_get_next(hal_soc, hal_ring)))) { 998 999 DP_STATS_INC(soc, rx.err_ring_pkts, 1); 1000 1001 error = HAL_RX_ERROR_STATUS_GET(ring_desc); 1002 1003 qdf_assert(error == HAL_REO_ERROR_DETECTED); 1004 1005 buf_type = HAL_RX_REO_BUF_TYPE_GET(ring_desc); 1006 /* 1007 * For REO error ring, expect only MSDU LINK DESC 1008 */ 1009 qdf_assert_always(buf_type == HAL_RX_REO_MSDU_LINK_DESC_TYPE); 1010 1011 cookie = HAL_RX_REO_BUF_COOKIE_GET(ring_desc); 1012 /* 1013 * check for the magic number in the sw cookie 1014 */ 1015 qdf_assert_always((cookie >> LINK_DESC_ID_SHIFT) & 1016 LINK_DESC_ID_START); 1017 1018 /* 1019 * Check if the buffer is to be processed on this processor 1020 */ 1021 rbm = hal_rx_ret_buf_manager_get(ring_desc); 1022 1023 hal_rx_reo_buf_paddr_get(ring_desc, &hbi); 1024 link_desc_va = dp_rx_cookie_2_link_desc_va(soc, &hbi); 1025 hal_rx_msdu_list_get(soc->hal_soc, link_desc_va, &msdu_list, 1026 &num_msdus); 1027 1028 if (qdf_unlikely((msdu_list.rbm[0] != DP_WBM2SW_RBM) && 1029 (msdu_list.rbm[0] != 1030 HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST))) { 1031 /* TODO */ 1032 /* Call appropriate handler */ 1033 DP_STATS_INC(soc, rx.err.invalid_rbm, 1); 1034 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1035 FL("Invalid RBM %d"), msdu_list.rbm[0]); 1036 1037 /* Return link descriptor through WBM ring (SW2WBM)*/ 1038 dp_rx_link_desc_return(soc, ring_desc, 1039 HAL_BM_ACTION_RELEASE_MSDU_LIST); 1040 continue; 1041 } 1042 1043 /* Get the MPDU DESC info */ 1044 hal_rx_mpdu_desc_info_get(ring_desc, &mpdu_desc_info); 1045 1046 if (mpdu_desc_info.mpdu_flags & HAL_MPDU_F_FRAGMENT) { 1047 /* TODO */ 1048 rx_bufs_used += dp_rx_frag_handle(soc, 1049 ring_desc, &mpdu_desc_info, 1050 &head, &tail, quota); 1051 DP_STATS_INC(soc, rx.rx_frags, 1); 1052 continue; 1053 } 1054 1055 if (hal_rx_reo_is_pn_error(ring_desc)) { 1056 /* TOD0 */ 1057 DP_STATS_INC(soc, 1058 rx.err. 1059 reo_error[HAL_REO_ERR_PN_CHECK_FAILED], 1060 1); 1061 rx_bufs_used += dp_rx_pn_error_handle(soc, 1062 ring_desc, &mpdu_desc_info, 1063 &head, &tail, quota); 1064 continue; 1065 } 1066 1067 if (hal_rx_reo_is_2k_jump(ring_desc)) { 1068 /* TOD0 */ 1069 DP_STATS_INC(soc, 1070 rx.err. 1071 reo_error[HAL_REO_ERR_REGULAR_FRAME_2K_JUMP], 1072 1); 1073 rx_bufs_used += dp_rx_2k_jump_handle(soc, 1074 ring_desc, &mpdu_desc_info, 1075 &head, &tail, quota); 1076 continue; 1077 } 1078 } 1079 1080 done: 1081 hal_srng_access_end(hal_soc, hal_ring); 1082 1083 if (soc->rx.flags.defrag_timeout_check) 1084 dp_rx_defrag_waitlist_flush(soc); 1085 1086 /* Assume MAC id = 0, owner = 0 */ 1087 if (rx_bufs_used) { 1088 dp_pdev = soc->pdev_list[0]; 1089 dp_rxdma_srng = &dp_pdev->rx_refill_buf_ring; 1090 rx_desc_pool = &soc->rx_desc_buf[0]; 1091 1092 dp_rx_buffers_replenish(soc, 0, dp_rxdma_srng, rx_desc_pool, 1093 rx_bufs_used, &head, &tail); 1094 } 1095 1096 return rx_bufs_used; /* Assume no scale factor for now */ 1097 } 1098 1099 /** 1100 * dp_rx_wbm_err_process() - Processes error frames routed to WBM release ring 1101 * 1102 * @soc: core txrx main context 1103 * @hal_ring: opaque pointer to the HAL Rx Error Ring, which will be serviced 1104 * @quota: No. of units (packets) that can be serviced in one shot. 1105 * 1106 * This function implements error processing and top level demultiplexer 1107 * for all the frames routed to WBM2HOST sw release ring. 1108 * 1109 * Return: uint32_t: No. of elements processed 1110 */ 1111 uint32_t 1112 dp_rx_wbm_err_process(struct dp_soc *soc, void *hal_ring, uint32_t quota) 1113 { 1114 void *hal_soc; 1115 void *ring_desc; 1116 struct dp_rx_desc *rx_desc; 1117 union dp_rx_desc_list_elem_t *head[MAX_PDEV_CNT] = { NULL }; 1118 union dp_rx_desc_list_elem_t *tail[MAX_PDEV_CNT] = { NULL }; 1119 uint32_t rx_bufs_used = 0; 1120 uint32_t rx_bufs_reaped[MAX_PDEV_CNT] = { 0 }; 1121 uint8_t buf_type, rbm; 1122 uint32_t rx_buf_cookie; 1123 uint8_t mac_id; 1124 struct dp_pdev *dp_pdev; 1125 struct dp_srng *dp_rxdma_srng; 1126 struct rx_desc_pool *rx_desc_pool; 1127 uint8_t *rx_tlv_hdr; 1128 qdf_nbuf_t nbuf_head = NULL; 1129 qdf_nbuf_t nbuf_tail = NULL; 1130 qdf_nbuf_t nbuf, next; 1131 struct hal_wbm_err_desc_info wbm_err_info = { 0 }; 1132 uint8_t pool_id; 1133 uint8_t tid = 0; 1134 1135 /* Debug -- Remove later */ 1136 qdf_assert(soc && hal_ring); 1137 1138 hal_soc = soc->hal_soc; 1139 1140 /* Debug -- Remove later */ 1141 qdf_assert(hal_soc); 1142 1143 if (qdf_unlikely(hal_srng_access_start(hal_soc, hal_ring))) { 1144 1145 /* TODO */ 1146 /* 1147 * Need API to convert from hal_ring pointer to 1148 * Ring Type / Ring Id combo 1149 */ 1150 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1151 FL("HAL RING Access Failed -- %pK"), hal_ring); 1152 goto done; 1153 } 1154 1155 while (qdf_likely(quota-- && (ring_desc = 1156 hal_srng_dst_get_next(hal_soc, hal_ring)))) { 1157 1158 /* XXX */ 1159 buf_type = HAL_RX_WBM_BUF_TYPE_GET(ring_desc); 1160 1161 /* 1162 * For WBM ring, expect only MSDU buffers 1163 */ 1164 qdf_assert_always(buf_type == HAL_RX_WBM_BUF_TYPE_REL_BUF); 1165 1166 qdf_assert((HAL_RX_WBM_ERR_SRC_GET(ring_desc) 1167 == HAL_RX_WBM_ERR_SRC_RXDMA) || 1168 (HAL_RX_WBM_ERR_SRC_GET(ring_desc) 1169 == HAL_RX_WBM_ERR_SRC_REO)); 1170 1171 /* 1172 * Check if the buffer is to be processed on this processor 1173 */ 1174 rbm = hal_rx_ret_buf_manager_get(ring_desc); 1175 1176 if (qdf_unlikely(rbm != HAL_RX_BUF_RBM_SW3_BM)) { 1177 /* TODO */ 1178 /* Call appropriate handler */ 1179 DP_STATS_INC(soc, rx.err.invalid_rbm, 1); 1180 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1181 FL("Invalid RBM %d"), rbm); 1182 continue; 1183 } 1184 1185 rx_buf_cookie = HAL_RX_WBM_BUF_COOKIE_GET(ring_desc); 1186 1187 rx_desc = dp_rx_cookie_2_va_rxdma_buf(soc, rx_buf_cookie); 1188 qdf_assert(rx_desc); 1189 1190 if (!dp_rx_desc_check_magic(rx_desc)) { 1191 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1192 FL("Invalid rx_desc cookie=%d"), 1193 rx_buf_cookie); 1194 continue; 1195 } 1196 1197 nbuf = rx_desc->nbuf; 1198 qdf_nbuf_unmap_single(soc->osdev, nbuf, QDF_DMA_BIDIRECTIONAL); 1199 1200 /* 1201 * save the wbm desc info in nbuf TLV. We will need this 1202 * info when we do the actual nbuf processing 1203 */ 1204 hal_rx_wbm_err_info_get(ring_desc, &wbm_err_info, hal_soc); 1205 wbm_err_info.pool_id = rx_desc->pool_id; 1206 hal_rx_wbm_err_info_set_in_tlv(qdf_nbuf_data(nbuf), 1207 &wbm_err_info); 1208 1209 rx_bufs_reaped[rx_desc->pool_id]++; 1210 1211 DP_RX_LIST_APPEND(nbuf_head, nbuf_tail, rx_desc->nbuf); 1212 dp_rx_add_to_free_desc_list(&head[rx_desc->pool_id], 1213 &tail[rx_desc->pool_id], 1214 rx_desc); 1215 } 1216 done: 1217 hal_srng_access_end(hal_soc, hal_ring); 1218 1219 for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) { 1220 if (rx_bufs_reaped[mac_id]) { 1221 dp_pdev = soc->pdev_list[mac_id]; 1222 dp_rxdma_srng = &dp_pdev->rx_refill_buf_ring; 1223 rx_desc_pool = &soc->rx_desc_buf[mac_id]; 1224 1225 dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng, 1226 rx_desc_pool, rx_bufs_reaped[mac_id], 1227 &head[mac_id], &tail[mac_id]); 1228 rx_bufs_used += rx_bufs_reaped[mac_id]; 1229 } 1230 } 1231 1232 nbuf = nbuf_head; 1233 while (nbuf) { 1234 struct dp_peer *peer; 1235 uint16_t peer_id; 1236 1237 rx_tlv_hdr = qdf_nbuf_data(nbuf); 1238 1239 peer_id = hal_rx_mpdu_start_sw_peer_id_get(rx_tlv_hdr); 1240 peer = dp_peer_find_by_id(soc, peer_id); 1241 1242 /* 1243 * retrieve the wbm desc info from nbuf TLV, so we can 1244 * handle error cases appropriately 1245 */ 1246 hal_rx_wbm_err_info_get_from_tlv(rx_tlv_hdr, &wbm_err_info); 1247 1248 /* Set queue_mapping in nbuf to 0 */ 1249 dp_set_rx_queue(nbuf, 0); 1250 1251 next = nbuf->next; 1252 if (wbm_err_info.wbm_err_src == HAL_RX_WBM_ERR_SRC_REO) { 1253 if (wbm_err_info.reo_psh_rsn 1254 == HAL_RX_WBM_REO_PSH_RSN_ERROR) { 1255 1256 DP_STATS_INC(soc, 1257 rx.err.reo_error 1258 [wbm_err_info.reo_err_code], 1); 1259 1260 switch (wbm_err_info.reo_err_code) { 1261 /* 1262 * Handling for packets which have NULL REO 1263 * queue descriptor 1264 */ 1265 case HAL_REO_ERR_QUEUE_DESC_ADDR_0: 1266 pool_id = wbm_err_info.pool_id; 1267 QDF_TRACE_DEBUG_RL(QDF_MODULE_ID_DP, 1268 "Got pkt with REO ERROR: %d", 1269 wbm_err_info.reo_err_code); 1270 dp_rx_null_q_desc_handle(soc, nbuf, 1271 rx_tlv_hdr, 1272 pool_id, peer); 1273 nbuf = next; 1274 if (peer) 1275 dp_peer_unref_del_find_by_id( 1276 peer); 1277 continue; 1278 /* TODO */ 1279 /* Add per error code accounting */ 1280 case HAL_REO_ERR_REGULAR_FRAME_2K_JUMP: 1281 pool_id = wbm_err_info.pool_id; 1282 QDF_TRACE(QDF_MODULE_ID_DP, 1283 QDF_TRACE_LEVEL_ERROR, 1284 "Got pkt with REO ERROR: %d", 1285 wbm_err_info.reo_err_code); 1286 if (hal_rx_msdu_end_first_msdu_get(rx_tlv_hdr)) { 1287 peer_id = 1288 hal_rx_mpdu_start_sw_peer_id_get(rx_tlv_hdr); 1289 tid = 1290 hal_rx_mpdu_start_tid_get(hal_soc, rx_tlv_hdr); 1291 } 1292 dp_2k_jump_handle(soc, nbuf, rx_tlv_hdr, 1293 peer_id, tid); 1294 nbuf = next; 1295 if (peer) 1296 dp_peer_unref_del_find_by_id( 1297 peer); 1298 continue; 1299 default: 1300 QDF_TRACE(QDF_MODULE_ID_DP, 1301 QDF_TRACE_LEVEL_ERROR, 1302 "REO error %d detected", 1303 wbm_err_info.reo_err_code); 1304 } 1305 } 1306 } else if (wbm_err_info.wbm_err_src == 1307 HAL_RX_WBM_ERR_SRC_RXDMA) { 1308 if (wbm_err_info.rxdma_psh_rsn 1309 == HAL_RX_WBM_RXDMA_PSH_RSN_ERROR) { 1310 DP_STATS_INC(soc, 1311 rx.err.rxdma_error 1312 [wbm_err_info.rxdma_err_code], 1); 1313 1314 switch (wbm_err_info.rxdma_err_code) { 1315 case HAL_RXDMA_ERR_UNENCRYPTED: 1316 dp_rx_err_deliver(soc, nbuf, 1317 rx_tlv_hdr, peer); 1318 nbuf = next; 1319 if (peer) 1320 dp_peer_unref_del_find_by_id( 1321 peer); 1322 continue; 1323 1324 case HAL_RXDMA_ERR_TKIP_MIC: 1325 dp_rx_process_mic_error(soc, nbuf, 1326 rx_tlv_hdr, 1327 peer); 1328 nbuf = next; 1329 if (peer) { 1330 DP_STATS_INC(peer, rx.err.mic_err, 1); 1331 dp_peer_unref_del_find_by_id( 1332 peer); 1333 } 1334 continue; 1335 1336 case HAL_RXDMA_ERR_DECRYPT: 1337 if (peer) 1338 DP_STATS_INC(peer, rx.err.decrypt_err, 1); 1339 QDF_TRACE(QDF_MODULE_ID_DP, 1340 QDF_TRACE_LEVEL_DEBUG, 1341 "Packet received with Decrypt error"); 1342 break; 1343 1344 default: 1345 QDF_TRACE(QDF_MODULE_ID_DP, 1346 QDF_TRACE_LEVEL_DEBUG, 1347 "RXDMA error %d", 1348 wbm_err_info. 1349 rxdma_err_code); 1350 } 1351 } 1352 } else { 1353 /* Should not come here */ 1354 qdf_assert(0); 1355 } 1356 1357 if (peer) 1358 dp_peer_unref_del_find_by_id(peer); 1359 1360 hal_rx_dump_pkt_tlvs(hal_soc, rx_tlv_hdr, 1361 QDF_TRACE_LEVEL_DEBUG); 1362 qdf_nbuf_free(nbuf); 1363 nbuf = next; 1364 } 1365 return rx_bufs_used; /* Assume no scale factor for now */ 1366 } 1367 1368 /** 1369 * dp_rx_err_mpdu_pop() - extract the MSDU's from link descs 1370 * 1371 * @soc: core DP main context 1372 * @mac_id: mac id which is one of 3 mac_ids 1373 * @rxdma_dst_ring_desc: void pointer to monitor link descriptor buf addr info 1374 * @head: head of descs list to be freed 1375 * @tail: tail of decs list to be freed 1376 1377 * Return: number of msdu in MPDU to be popped 1378 */ 1379 static inline uint32_t 1380 dp_rx_err_mpdu_pop(struct dp_soc *soc, uint32_t mac_id, 1381 void *rxdma_dst_ring_desc, 1382 union dp_rx_desc_list_elem_t **head, 1383 union dp_rx_desc_list_elem_t **tail) 1384 { 1385 void *rx_msdu_link_desc; 1386 qdf_nbuf_t msdu; 1387 qdf_nbuf_t last; 1388 struct hal_rx_msdu_list msdu_list; 1389 uint16_t num_msdus; 1390 struct hal_buf_info buf_info; 1391 void *p_buf_addr_info; 1392 void *p_last_buf_addr_info; 1393 uint32_t rx_bufs_used = 0; 1394 uint32_t msdu_cnt; 1395 uint32_t i; 1396 uint8_t push_reason; 1397 uint8_t rxdma_error_code = 0; 1398 uint8_t bm_action = HAL_BM_ACTION_PUT_IN_IDLE_LIST; 1399 struct dp_pdev *pdev = dp_get_pdev_for_mac_id(soc, mac_id); 1400 1401 msdu = 0; 1402 1403 last = NULL; 1404 1405 hal_rx_reo_ent_buf_paddr_get(rxdma_dst_ring_desc, &buf_info, 1406 &p_last_buf_addr_info, &msdu_cnt); 1407 1408 push_reason = 1409 hal_rx_reo_ent_rxdma_push_reason_get(rxdma_dst_ring_desc); 1410 if (push_reason == HAL_RX_WBM_RXDMA_PSH_RSN_ERROR) { 1411 rxdma_error_code = 1412 hal_rx_reo_ent_rxdma_error_code_get(rxdma_dst_ring_desc); 1413 } 1414 1415 do { 1416 rx_msdu_link_desc = 1417 dp_rx_cookie_2_link_desc_va(soc, &buf_info); 1418 1419 qdf_assert(rx_msdu_link_desc); 1420 1421 hal_rx_msdu_list_get(soc->hal_soc, rx_msdu_link_desc, 1422 &msdu_list, &num_msdus); 1423 1424 if (msdu_list.sw_cookie[0] != HAL_RX_COOKIE_SPECIAL) { 1425 /* if the msdus belongs to NSS offloaded radio && 1426 * the rbm is not SW1_BM then return the msdu_link 1427 * descriptor without freeing the msdus (nbufs). let 1428 * these buffers be given to NSS completion ring for 1429 * NSS to free them. 1430 * else iterate through the msdu link desc list and 1431 * free each msdu in the list. 1432 */ 1433 if (msdu_list.rbm[0] != HAL_RX_BUF_RBM_SW3_BM && 1434 wlan_cfg_get_dp_pdev_nss_enabled( 1435 pdev->wlan_cfg_ctx)) 1436 bm_action = HAL_BM_ACTION_RELEASE_MSDU_LIST; 1437 else { 1438 for (i = 0; i < num_msdus; i++) { 1439 struct dp_rx_desc *rx_desc = 1440 dp_rx_cookie_2_va_rxdma_buf(soc, 1441 msdu_list.sw_cookie[i]); 1442 qdf_assert(rx_desc); 1443 msdu = rx_desc->nbuf; 1444 1445 qdf_nbuf_unmap_single(soc->osdev, msdu, 1446 QDF_DMA_FROM_DEVICE); 1447 1448 QDF_TRACE(QDF_MODULE_ID_DP, 1449 QDF_TRACE_LEVEL_DEBUG, 1450 "[%s][%d] msdu_nbuf=%pK ", 1451 __func__, __LINE__, msdu); 1452 1453 qdf_nbuf_free(msdu); 1454 rx_bufs_used++; 1455 dp_rx_add_to_free_desc_list(head, 1456 tail, rx_desc); 1457 } 1458 } 1459 } else { 1460 rxdma_error_code = HAL_RXDMA_ERR_WAR; 1461 } 1462 1463 hal_rx_mon_next_link_desc_get(rx_msdu_link_desc, &buf_info, 1464 &p_buf_addr_info); 1465 1466 dp_rx_link_desc_return(soc, p_last_buf_addr_info, bm_action); 1467 p_last_buf_addr_info = p_buf_addr_info; 1468 1469 } while (buf_info.paddr); 1470 1471 DP_STATS_INC(soc, rx.err.rxdma_error[rxdma_error_code], 1); 1472 1473 if (rxdma_error_code == HAL_RXDMA_ERR_DECRYPT) { 1474 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1475 "Packet received with Decrypt error"); 1476 } 1477 1478 return rx_bufs_used; 1479 } 1480 1481 /** 1482 * dp_rxdma_err_process() - RxDMA error processing functionality 1483 * 1484 * @soc: core txrx main contex 1485 * @mac_id: mac id which is one of 3 mac_ids 1486 * @hal_ring: opaque pointer to the HAL Rx Ring, which will be serviced 1487 * @quota: No. of units (packets) that can be serviced in one shot. 1488 1489 * Return: num of buffers processed 1490 */ 1491 uint32_t 1492 dp_rxdma_err_process(struct dp_soc *soc, uint32_t mac_id, uint32_t quota) 1493 { 1494 struct dp_pdev *pdev = dp_get_pdev_for_mac_id(soc, mac_id); 1495 int mac_for_pdev = dp_get_mac_id_for_mac(soc, mac_id); 1496 void *hal_soc; 1497 void *rxdma_dst_ring_desc; 1498 void *err_dst_srng; 1499 union dp_rx_desc_list_elem_t *head = NULL; 1500 union dp_rx_desc_list_elem_t *tail = NULL; 1501 struct dp_srng *dp_rxdma_srng; 1502 struct rx_desc_pool *rx_desc_pool; 1503 uint32_t work_done = 0; 1504 uint32_t rx_bufs_used = 0; 1505 1506 if (!pdev) 1507 return 0; 1508 1509 err_dst_srng = pdev->rxdma_err_dst_ring[mac_for_pdev].hal_srng; 1510 1511 if (!err_dst_srng) { 1512 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1513 "%s %d : HAL Monitor Destination Ring Init \ 1514 Failed -- %pK", 1515 __func__, __LINE__, err_dst_srng); 1516 return 0; 1517 } 1518 1519 hal_soc = soc->hal_soc; 1520 1521 qdf_assert(hal_soc); 1522 1523 if (qdf_unlikely(hal_srng_access_start(hal_soc, err_dst_srng))) { 1524 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1525 "%s %d : HAL Monitor Destination Ring Init \ 1526 Failed -- %pK", 1527 __func__, __LINE__, err_dst_srng); 1528 return 0; 1529 } 1530 1531 while (qdf_likely(quota-- && (rxdma_dst_ring_desc = 1532 hal_srng_dst_get_next(hal_soc, err_dst_srng)))) { 1533 1534 rx_bufs_used += dp_rx_err_mpdu_pop(soc, mac_id, 1535 rxdma_dst_ring_desc, 1536 &head, &tail); 1537 } 1538 1539 hal_srng_access_end(hal_soc, err_dst_srng); 1540 1541 if (rx_bufs_used) { 1542 dp_rxdma_srng = &pdev->rx_refill_buf_ring; 1543 rx_desc_pool = &soc->rx_desc_buf[mac_id]; 1544 1545 dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng, 1546 rx_desc_pool, rx_bufs_used, &head, &tail); 1547 1548 work_done += rx_bufs_used; 1549 } 1550 1551 return work_done; 1552 } 1553