1 /* 2 * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for 5 * any purpose with or without fee is hereby granted, provided that the 6 * above copyright notice and this permission notice appear in all 7 * copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 16 * PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 #include "hal_hw_headers.h" 20 #include "dp_types.h" 21 #include "dp_rx.h" 22 #include "dp_peer.h" 23 #include "dp_internal.h" 24 #include "hal_api.h" 25 #include "qdf_trace.h" 26 #include "qdf_nbuf.h" 27 #ifdef CONFIG_MCL 28 #include <cds_ieee80211_common.h> 29 #else 30 #include <linux/ieee80211.h> 31 #endif 32 #include "dp_rx_defrag.h" 33 #include <enet.h> /* LLC_SNAP_HDR_LEN */ 34 35 #ifdef RX_DESC_DEBUG_CHECK 36 static inline bool dp_rx_desc_check_magic(struct dp_rx_desc *rx_desc) 37 { 38 if (qdf_unlikely(rx_desc->magic != DP_RX_DESC_MAGIC)) { 39 return false; 40 } 41 rx_desc->magic = 0; 42 return true; 43 } 44 #else 45 static inline bool dp_rx_desc_check_magic(struct dp_rx_desc *rx_desc) 46 { 47 return true; 48 } 49 #endif 50 51 /** 52 * dp_rx_mcast_echo_check() - check if the mcast pkt is a loop 53 * back on same vap or a different vap. 54 * 55 * @soc: core DP main context 56 * @peer: dp peer handler 57 * @rx_tlv_hdr: start of the rx TLV header 58 * @nbuf: pkt buffer 59 * 60 * Return: bool (true if it is a looped back pkt else false) 61 * 62 */ 63 static inline bool dp_rx_mcast_echo_check(struct dp_soc *soc, 64 struct dp_peer *peer, 65 uint8_t *rx_tlv_hdr, 66 qdf_nbuf_t nbuf) 67 { 68 struct dp_vdev *vdev = peer->vdev; 69 struct dp_ast_entry *ase; 70 uint16_t sa_idx = 0; 71 uint8_t *data; 72 73 /* 74 * Multicast Echo Check is required only if vdev is STA and 75 * received pkt is a multicast/broadcast pkt. otherwise 76 * skip the MEC check. 77 */ 78 if (vdev->opmode != wlan_op_mode_sta) 79 return false; 80 81 if (!hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr)) 82 return false; 83 84 data = qdf_nbuf_data(nbuf); 85 /* 86 * if the received pkts src mac addr matches with vdev 87 * mac address then drop the pkt as it is looped back 88 */ 89 if (!(qdf_mem_cmp(&data[DP_MAC_ADDR_LEN], 90 vdev->mac_addr.raw, 91 DP_MAC_ADDR_LEN))) 92 return true; 93 94 /* 95 * In case of qwrap isolation mode, donot drop loopback packets. 96 * In isolation mode, all packets from the wired stations need to go 97 * to rootap and loop back to reach the wireless stations and 98 * vice-versa. 99 */ 100 if (qdf_unlikely(vdev->isolation_vdev)) 101 return false; 102 103 /* if the received pkts src mac addr matches with the 104 * wired PCs MAC addr which is behind the STA or with 105 * wireless STAs MAC addr which are behind the Repeater, 106 * then drop the pkt as it is looped back 107 */ 108 qdf_spin_lock_bh(&soc->ast_lock); 109 if (hal_rx_msdu_end_sa_is_valid_get(rx_tlv_hdr)) { 110 sa_idx = hal_rx_msdu_end_sa_idx_get(rx_tlv_hdr); 111 112 if ((sa_idx < 0) || 113 (sa_idx >= (WLAN_UMAC_PSOC_MAX_PEERS * 2))) { 114 qdf_spin_unlock_bh(&soc->ast_lock); 115 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 116 "invalid sa_idx: %d", sa_idx); 117 qdf_assert_always(0); 118 } 119 120 ase = soc->ast_table[sa_idx]; 121 if (!ase) { 122 /* We do not get a peer map event for STA and without 123 * this event we don't know what is STA's sa_idx. 124 * For this reason the AST is still not associated to 125 * any index postion in ast_table. 126 * In these kind of scenarios where sa is valid but 127 * ast is not in ast_table, we use the below API to get 128 * AST entry for STA's own mac_address. 129 */ 130 ase = dp_peer_ast_hash_find(soc, 131 &data[DP_MAC_ADDR_LEN]); 132 133 } 134 } else 135 ase = dp_peer_ast_hash_find(soc, &data[DP_MAC_ADDR_LEN]); 136 137 if (ase) { 138 ase->ast_idx = sa_idx; 139 soc->ast_table[sa_idx] = ase; 140 141 if (ase->pdev_id != vdev->pdev->pdev_id) { 142 qdf_spin_unlock_bh(&soc->ast_lock); 143 QDF_TRACE(QDF_MODULE_ID_DP, 144 QDF_TRACE_LEVEL_INFO, 145 "Detected DBDC Root AP %pM, %d %d", 146 &data[DP_MAC_ADDR_LEN], vdev->pdev->pdev_id, 147 ase->pdev_id); 148 return false; 149 } 150 151 if ((ase->type == CDP_TXRX_AST_TYPE_MEC) || 152 (ase->peer != peer)) { 153 qdf_spin_unlock_bh(&soc->ast_lock); 154 QDF_TRACE(QDF_MODULE_ID_DP, 155 QDF_TRACE_LEVEL_INFO, 156 "received pkt with same src mac %pM", 157 &data[DP_MAC_ADDR_LEN]); 158 159 return true; 160 } 161 } 162 qdf_spin_unlock_bh(&soc->ast_lock); 163 return false; 164 } 165 166 /** 167 * dp_rx_link_desc_return_by_addr - Return a MPDU link descriptor to 168 * (WBM) by address 169 * 170 * @soc: core DP main context 171 * @link_desc_addr: link descriptor addr 172 * 173 * Return: QDF_STATUS 174 */ 175 QDF_STATUS 176 dp_rx_link_desc_return_by_addr(struct dp_soc *soc, void *link_desc_addr, 177 uint8_t bm_action) 178 { 179 struct dp_srng *wbm_desc_rel_ring = &soc->wbm_desc_rel_ring; 180 void *wbm_rel_srng = wbm_desc_rel_ring->hal_srng; 181 void *hal_soc = soc->hal_soc; 182 QDF_STATUS status = QDF_STATUS_E_FAILURE; 183 void *src_srng_desc; 184 185 if (!wbm_rel_srng) { 186 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 187 "WBM RELEASE RING not initialized"); 188 return status; 189 } 190 191 if (qdf_unlikely(hal_srng_access_start(hal_soc, wbm_rel_srng))) { 192 193 /* TODO */ 194 /* 195 * Need API to convert from hal_ring pointer to 196 * Ring Type / Ring Id combo 197 */ 198 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 199 FL("HAL RING Access For WBM Release SRNG Failed - %pK"), 200 wbm_rel_srng); 201 DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1); 202 goto done; 203 } 204 src_srng_desc = hal_srng_src_get_next(hal_soc, wbm_rel_srng); 205 if (qdf_likely(src_srng_desc)) { 206 /* Return link descriptor through WBM ring (SW2WBM)*/ 207 hal_rx_msdu_link_desc_set(hal_soc, 208 src_srng_desc, link_desc_addr, bm_action); 209 status = QDF_STATUS_SUCCESS; 210 } else { 211 struct hal_srng *srng = (struct hal_srng *)wbm_rel_srng; 212 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 213 FL("WBM Release Ring (Id %d) Full"), srng->ring_id); 214 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 215 "HP 0x%x Reap HP 0x%x TP 0x%x Cached TP 0x%x", 216 *srng->u.src_ring.hp_addr, srng->u.src_ring.reap_hp, 217 *srng->u.src_ring.tp_addr, srng->u.src_ring.cached_tp); 218 } 219 done: 220 hal_srng_access_end(hal_soc, wbm_rel_srng); 221 return status; 222 223 } 224 225 /** 226 * dp_rx_link_desc_return() - Return a MPDU link descriptor to HW 227 * (WBM), following error handling 228 * 229 * @soc: core DP main context 230 * @ring_desc: opaque pointer to the REO error ring descriptor 231 * 232 * Return: QDF_STATUS 233 */ 234 QDF_STATUS 235 dp_rx_link_desc_return(struct dp_soc *soc, void *ring_desc, uint8_t bm_action) 236 { 237 void *buf_addr_info = HAL_RX_REO_BUF_ADDR_INFO_GET(ring_desc); 238 return dp_rx_link_desc_return_by_addr(soc, buf_addr_info, bm_action); 239 } 240 241 /** 242 * dp_rx_msdus_drop() - Drops all MSDU's per MPDU 243 * 244 * @soc: core txrx main context 245 * @ring_desc: opaque pointer to the REO error ring descriptor 246 * @mpdu_desc_info: MPDU descriptor information from ring descriptor 247 * @head: head of the local descriptor free-list 248 * @tail: tail of the local descriptor free-list 249 * @quota: No. of units (packets) that can be serviced in one shot. 250 * 251 * This function is used to drop all MSDU in an MPDU 252 * 253 * Return: uint32_t: No. of elements processed 254 */ 255 static uint32_t dp_rx_msdus_drop(struct dp_soc *soc, void *ring_desc, 256 struct hal_rx_mpdu_desc_info *mpdu_desc_info, 257 union dp_rx_desc_list_elem_t **head, 258 union dp_rx_desc_list_elem_t **tail, 259 uint32_t quota) 260 { 261 uint32_t rx_bufs_used = 0; 262 void *link_desc_va; 263 struct hal_buf_info buf_info; 264 struct hal_rx_msdu_list msdu_list; /* MSDU's per MPDU */ 265 int i; 266 uint8_t *rx_tlv_hdr; 267 uint32_t tid; 268 269 hal_rx_reo_buf_paddr_get(ring_desc, &buf_info); 270 271 link_desc_va = dp_rx_cookie_2_link_desc_va(soc, &buf_info); 272 273 /* No UNMAP required -- this is "malloc_consistent" memory */ 274 hal_rx_msdu_list_get(soc->hal_soc, link_desc_va, &msdu_list, 275 &mpdu_desc_info->msdu_count); 276 277 for (i = 0; (i < mpdu_desc_info->msdu_count) && quota--; i++) { 278 struct dp_rx_desc *rx_desc = 279 dp_rx_cookie_2_va_rxdma_buf(soc, 280 msdu_list.sw_cookie[i]); 281 282 qdf_assert(rx_desc); 283 284 if (!dp_rx_desc_check_magic(rx_desc)) { 285 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 286 FL("Invalid rx_desc cookie=%d"), 287 msdu_list.sw_cookie[i]); 288 return rx_bufs_used; 289 } 290 291 rx_bufs_used++; 292 tid = hal_rx_mpdu_start_tid_get(soc->hal_soc, 293 rx_desc->rx_buf_start); 294 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 295 "Packet received with PN error for tid :%d", tid); 296 297 rx_tlv_hdr = qdf_nbuf_data(rx_desc->nbuf); 298 if (hal_rx_encryption_info_valid(rx_tlv_hdr)) 299 hal_rx_print_pn(rx_tlv_hdr); 300 301 /* Just free the buffers */ 302 qdf_nbuf_free(rx_desc->nbuf); 303 304 dp_rx_add_to_free_desc_list(head, tail, rx_desc); 305 } 306 307 /* Return link descriptor through WBM ring (SW2WBM)*/ 308 dp_rx_link_desc_return(soc, ring_desc, HAL_BM_ACTION_PUT_IN_IDLE_LIST); 309 310 return rx_bufs_used; 311 } 312 313 /** 314 * dp_rx_pn_error_handle() - Handles PN check errors 315 * 316 * @soc: core txrx main context 317 * @ring_desc: opaque pointer to the REO error ring descriptor 318 * @mpdu_desc_info: MPDU descriptor information from ring descriptor 319 * @head: head of the local descriptor free-list 320 * @tail: tail of the local descriptor free-list 321 * @quota: No. of units (packets) that can be serviced in one shot. 322 * 323 * This function implements PN error handling 324 * If the peer is configured to ignore the PN check errors 325 * or if DP feels, that this frame is still OK, the frame can be 326 * re-injected back to REO to use some of the other features 327 * of REO e.g. duplicate detection/routing to other cores 328 * 329 * Return: uint32_t: No. of elements processed 330 */ 331 static uint32_t 332 dp_rx_pn_error_handle(struct dp_soc *soc, void *ring_desc, 333 struct hal_rx_mpdu_desc_info *mpdu_desc_info, 334 union dp_rx_desc_list_elem_t **head, 335 union dp_rx_desc_list_elem_t **tail, 336 uint32_t quota) 337 { 338 uint16_t peer_id; 339 uint32_t rx_bufs_used = 0; 340 struct dp_peer *peer; 341 bool peer_pn_policy = false; 342 343 peer_id = DP_PEER_METADATA_PEER_ID_GET( 344 mpdu_desc_info->peer_meta_data); 345 346 347 peer = dp_peer_find_by_id(soc, peer_id); 348 349 if (qdf_likely(peer)) { 350 /* 351 * TODO: Check for peer specific policies & set peer_pn_policy 352 */ 353 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 354 "discard rx due to PN error for peer %pK " 355 "(%02x:%02x:%02x:%02x:%02x:%02x)", 356 peer, 357 peer->mac_addr.raw[0], peer->mac_addr.raw[1], 358 peer->mac_addr.raw[2], peer->mac_addr.raw[3], 359 peer->mac_addr.raw[4], peer->mac_addr.raw[5]); 360 361 } 362 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 363 "Packet received with PN error"); 364 365 /* No peer PN policy -- definitely drop */ 366 if (!peer_pn_policy) 367 rx_bufs_used = dp_rx_msdus_drop(soc, ring_desc, 368 mpdu_desc_info, 369 head, tail, quota); 370 371 return rx_bufs_used; 372 } 373 374 /** 375 * dp_rx_2k_jump_handle() - Handles Sequence Number Jump by 2K 376 * 377 * @soc: core txrx main context 378 * @ring_desc: opaque pointer to the REO error ring descriptor 379 * @mpdu_desc_info: MPDU descriptor information from ring descriptor 380 * @head: head of the local descriptor free-list 381 * @tail: tail of the local descriptor free-list 382 * @quota: No. of units (packets) that can be serviced in one shot. 383 * 384 * This function implements the error handling when sequence number 385 * of the MPDU jumps suddenly by 2K.Today there are 2 cases that 386 * need to be handled: 387 * A) CSN (Current Sequence Number) = Last Valid SN (LSN) + 2K 388 * B) CSN = LSN + 2K, but falls within a "BA sized window" of the SSN 389 * For case A) the protocol stack is invoked to generate DELBA/DEAUTH frame 390 * For case B), the frame is normally dropped, no more action is taken 391 * 392 * Return: uint32_t: No. of elements processed 393 */ 394 static uint32_t 395 dp_rx_2k_jump_handle(struct dp_soc *soc, void *ring_desc, 396 struct hal_rx_mpdu_desc_info *mpdu_desc_info, 397 union dp_rx_desc_list_elem_t **head, 398 union dp_rx_desc_list_elem_t **tail, 399 uint32_t quota) 400 { 401 return dp_rx_msdus_drop(soc, ring_desc, mpdu_desc_info, 402 head, tail, quota); 403 } 404 405 /** 406 * dp_rx_chain_msdus() - Function to chain all msdus of a mpdu 407 * to pdev invalid peer list 408 * 409 * @soc: core DP main context 410 * @nbuf: Buffer pointer 411 * @rx_tlv_hdr: start of rx tlv header 412 * @mac_id: mac id 413 * 414 * Return: bool: true for last msdu of mpdu 415 */ 416 static bool 417 dp_rx_chain_msdus(struct dp_soc *soc, qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr, 418 uint8_t mac_id) 419 { 420 bool mpdu_done = false; 421 qdf_nbuf_t curr_nbuf = NULL; 422 qdf_nbuf_t tmp_nbuf = NULL; 423 424 /* TODO: Currently only single radio is supported, hence 425 * pdev hard coded to '0' index 426 */ 427 struct dp_pdev *dp_pdev = soc->pdev_list[mac_id]; 428 429 if (!dp_pdev->first_nbuf) { 430 qdf_nbuf_set_rx_chfrag_start(nbuf, 1); 431 dp_pdev->ppdu_id = HAL_RX_HW_DESC_GET_PPDUID_GET(rx_tlv_hdr); 432 dp_pdev->first_nbuf = true; 433 434 /* If the new nbuf received is the first msdu of the 435 * amsdu and there are msdus in the invalid peer msdu 436 * list, then let us free all the msdus of the invalid 437 * peer msdu list. 438 * This scenario can happen when we start receiving 439 * new a-msdu even before the previous a-msdu is completely 440 * received. 441 */ 442 curr_nbuf = dp_pdev->invalid_peer_head_msdu; 443 while (curr_nbuf) { 444 tmp_nbuf = curr_nbuf->next; 445 qdf_nbuf_free(curr_nbuf); 446 curr_nbuf = tmp_nbuf; 447 } 448 449 dp_pdev->invalid_peer_head_msdu = NULL; 450 dp_pdev->invalid_peer_tail_msdu = NULL; 451 hal_rx_mon_hw_desc_get_mpdu_status(soc->hal_soc, rx_tlv_hdr, 452 &(dp_pdev->ppdu_info.rx_status)); 453 454 } 455 456 if (dp_pdev->ppdu_id == hal_rx_attn_phy_ppdu_id_get(rx_tlv_hdr) && 457 hal_rx_attn_msdu_done_get(rx_tlv_hdr)) { 458 qdf_nbuf_set_rx_chfrag_end(nbuf, 1); 459 qdf_assert_always(dp_pdev->first_nbuf == true); 460 dp_pdev->first_nbuf = false; 461 mpdu_done = true; 462 } 463 464 DP_RX_LIST_APPEND(dp_pdev->invalid_peer_head_msdu, 465 dp_pdev->invalid_peer_tail_msdu, 466 nbuf); 467 468 return mpdu_done; 469 } 470 471 /** 472 * dp_2k_jump_handle() - Function to handle 2k jump exception 473 * on WBM ring 474 * 475 * @soc: core DP main context 476 * @nbuf: buffer pointer 477 * @rx_tlv_hdr: start of rx tlv header 478 * @peer_id: peer id of first msdu 479 * @tid: Tid for which exception occurred 480 * 481 * This function handles 2k jump violations arising out 482 * of receiving aggregates in non BA case. This typically 483 * may happen if aggregates are received on a QOS enabled TID 484 * while Rx window size is still initialized to value of 2. Or 485 * it may also happen if negotiated window size is 1 but peer 486 * sends aggregates. 487 * 488 */ 489 490 static void 491 dp_2k_jump_handle(struct dp_soc *soc, 492 qdf_nbuf_t nbuf, 493 uint8_t *rx_tlv_hdr, 494 uint16_t peer_id, 495 uint8_t tid) 496 { 497 uint32_t ppdu_id; 498 struct dp_peer *peer = NULL; 499 struct dp_rx_tid *rx_tid = NULL; 500 501 peer = dp_peer_find_by_id(soc, peer_id); 502 if (!peer || peer->delete_in_progress) { 503 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 504 "peer not found"); 505 goto free_nbuf; 506 } 507 rx_tid = &peer->rx_tid[tid]; 508 if (qdf_unlikely(rx_tid == NULL)) { 509 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 510 "rx_tid is NULL!!"); 511 goto free_nbuf; 512 } 513 qdf_spin_lock_bh(&rx_tid->tid_lock); 514 ppdu_id = hal_rx_attn_phy_ppdu_id_get(rx_tlv_hdr); 515 if (rx_tid->ppdu_id_2k != ppdu_id) { 516 rx_tid->ppdu_id_2k = ppdu_id; 517 qdf_spin_unlock_bh(&rx_tid->tid_lock); 518 goto free_nbuf; 519 } 520 if (!rx_tid->delba_tx_status) { 521 rx_tid->delba_tx_retry++; 522 rx_tid->delba_tx_status = 1; 523 rx_tid->delba_rcode = 524 IEEE80211_REASON_QOS_SETUP_REQUIRED; 525 qdf_spin_unlock_bh(&rx_tid->tid_lock); 526 soc->cdp_soc.ol_ops->send_delba(peer->vdev->pdev->ctrl_pdev, 527 peer->ctrl_peer, 528 peer->mac_addr.raw, 529 tid, 530 peer->vdev->ctrl_vdev, 531 rx_tid->delba_rcode); 532 } else { 533 qdf_spin_unlock_bh(&rx_tid->tid_lock); 534 } 535 536 free_nbuf: 537 qdf_nbuf_free(nbuf); 538 return; 539 } 540 541 /** 542 * dp_rx_null_q_desc_handle() - Function to handle NULL Queue 543 * descriptor violation on either a 544 * REO or WBM ring 545 * 546 * @soc: core DP main context 547 * @nbuf: buffer pointer 548 * @rx_tlv_hdr: start of rx tlv header 549 * @pool_id: mac id 550 * 551 * This function handles NULL queue descriptor violations arising out 552 * a missing REO queue for a given peer or a given TID. This typically 553 * may happen if a packet is received on a QOS enabled TID before the 554 * ADDBA negotiation for that TID, when the TID queue is setup. Or 555 * it may also happen for MC/BC frames if they are not routed to the 556 * non-QOS TID queue, in the absence of any other default TID queue. 557 * This error can show up both in a REO destination or WBM release ring. 558 * 559 */ 560 static void 561 dp_rx_null_q_desc_handle(struct dp_soc *soc, 562 qdf_nbuf_t nbuf, 563 uint8_t *rx_tlv_hdr, 564 uint8_t pool_id) 565 { 566 uint32_t pkt_len, l2_hdr_offset; 567 uint16_t msdu_len; 568 struct dp_vdev *vdev; 569 uint16_t peer_id = 0xFFFF; 570 struct dp_peer *peer = NULL; 571 uint8_t tid; 572 573 qdf_nbuf_set_rx_chfrag_start(nbuf, 574 hal_rx_msdu_end_first_msdu_get(rx_tlv_hdr)); 575 qdf_nbuf_set_rx_chfrag_end(nbuf, 576 hal_rx_msdu_end_last_msdu_get(rx_tlv_hdr)); 577 578 l2_hdr_offset = hal_rx_msdu_end_l3_hdr_padding_get(rx_tlv_hdr); 579 msdu_len = hal_rx_msdu_start_msdu_len_get(rx_tlv_hdr); 580 pkt_len = msdu_len + l2_hdr_offset + RX_PKT_TLVS_LEN; 581 582 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 583 FL("Len %d Extn list %pK "), 584 (uint32_t)qdf_nbuf_len(nbuf), 585 qdf_nbuf_get_ext_list(nbuf)); 586 /* Set length in nbuf */ 587 if (!qdf_nbuf_get_ext_list(nbuf)) 588 qdf_nbuf_set_pktlen(nbuf, pkt_len); 589 590 /* 591 * Check if DMA completed -- msdu_done is the last bit 592 * to be written 593 */ 594 if (!hal_rx_attn_msdu_done_get(rx_tlv_hdr)) { 595 596 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 597 FL("MSDU DONE failure")); 598 599 hal_rx_dump_pkt_tlvs(soc->hal_soc, rx_tlv_hdr, 600 QDF_TRACE_LEVEL_INFO); 601 qdf_assert(0); 602 } 603 604 peer_id = hal_rx_mpdu_start_sw_peer_id_get(rx_tlv_hdr); 605 peer = dp_peer_find_by_id(soc, peer_id); 606 607 if (!peer) { 608 bool mpdu_done = false; 609 struct dp_pdev *pdev = soc->pdev_list[pool_id]; 610 611 QDF_TRACE_DEBUG_RL(QDF_MODULE_ID_DP, "peer is NULL"); 612 613 mpdu_done = dp_rx_chain_msdus(soc, nbuf, rx_tlv_hdr, pool_id); 614 /* Trigger invalid peer handler wrapper */ 615 dp_rx_process_invalid_peer_wrapper(soc, nbuf, mpdu_done); 616 617 if (mpdu_done) { 618 pdev->invalid_peer_head_msdu = NULL; 619 pdev->invalid_peer_tail_msdu = NULL; 620 } 621 return; 622 } 623 624 vdev = peer->vdev; 625 if (!vdev) { 626 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 627 FL("INVALID vdev %pK OR osif_rx"), vdev); 628 /* Drop & free packet */ 629 qdf_nbuf_free(nbuf); 630 DP_STATS_INC(soc, rx.err.invalid_vdev, 1); 631 return; 632 } 633 634 /* 635 * Advance the packet start pointer by total size of 636 * pre-header TLV's 637 */ 638 if (qdf_nbuf_get_ext_list(nbuf)) 639 qdf_nbuf_pull_head(nbuf, RX_PKT_TLVS_LEN); 640 else 641 qdf_nbuf_pull_head(nbuf, (l2_hdr_offset + RX_PKT_TLVS_LEN)); 642 643 if (dp_rx_mcast_echo_check(soc, peer, rx_tlv_hdr, nbuf)) { 644 /* this is a looped back MCBC pkt, drop it */ 645 qdf_nbuf_free(nbuf); 646 return; 647 } 648 /* 649 * In qwrap mode if the received packet matches with any of the vdev 650 * mac addresses, drop it. Donot receive multicast packets originated 651 * from any proxysta. 652 */ 653 if (check_qwrap_multicast_loopback(vdev, nbuf)) { 654 qdf_nbuf_free(nbuf); 655 return; 656 } 657 658 659 if (qdf_unlikely((peer->nawds_enabled == true) && 660 hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr))) { 661 QDF_TRACE(QDF_MODULE_ID_DP, 662 QDF_TRACE_LEVEL_DEBUG, 663 "%s free buffer for multicast packet", 664 __func__); 665 DP_STATS_INC(peer, rx.nawds_mcast_drop, 1); 666 qdf_nbuf_free(nbuf); 667 return; 668 } 669 670 if (!dp_wds_rx_policy_check(rx_tlv_hdr, vdev, peer, 671 hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr))) { 672 QDF_TRACE(QDF_MODULE_ID_DP, 673 QDF_TRACE_LEVEL_ERROR, 674 FL("mcast Policy Check Drop pkt")); 675 /* Drop & free packet */ 676 qdf_nbuf_free(nbuf); 677 return; 678 } 679 680 /* WDS Source Port Learning */ 681 if (qdf_likely(vdev->rx_decap_type == htt_cmn_pkt_type_ethernet && 682 vdev->wds_enabled)) 683 dp_rx_wds_srcport_learn(soc, rx_tlv_hdr, peer, nbuf); 684 685 if (hal_rx_mpdu_start_mpdu_qos_control_valid_get(rx_tlv_hdr)) { 686 /* TODO: Assuming that qos_control_valid also indicates 687 * unicast. Should we check this? 688 */ 689 tid = hal_rx_mpdu_start_tid_get(soc->hal_soc, rx_tlv_hdr); 690 if (peer && 691 peer->rx_tid[tid].hw_qdesc_vaddr_unaligned == NULL) { 692 /* IEEE80211_SEQ_MAX indicates invalid start_seq */ 693 dp_rx_tid_setup_wifi3(peer, tid, 1, IEEE80211_SEQ_MAX); 694 } 695 } 696 697 #ifdef QCA_WIFI_NAPIER_EMULATION /* Debug code, remove later */ 698 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 699 "%s: p_id %d msdu_len %d hdr_off %d", 700 __func__, peer_id, msdu_len, l2_hdr_offset); 701 702 print_hex_dump(KERN_ERR, "\t Pkt Data:", DUMP_PREFIX_NONE, 32, 4, 703 qdf_nbuf_data(nbuf), 128, false); 704 #endif /* NAPIER_EMULATION */ 705 706 if (qdf_unlikely(vdev->rx_decap_type == htt_cmn_pkt_type_raw)) { 707 qdf_nbuf_set_next(nbuf, NULL); 708 dp_rx_deliver_raw(vdev, nbuf, peer); 709 } else { 710 if (qdf_unlikely(peer->bss_peer)) { 711 QDF_TRACE(QDF_MODULE_ID_DP, 712 QDF_TRACE_LEVEL_INFO, 713 FL("received pkt with same src MAC")); 714 /* Drop & free packet */ 715 qdf_nbuf_free(nbuf); 716 return; 717 } 718 719 if (vdev->osif_rx) { 720 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, 721 FL("vdev %pK osif_rx %pK"), vdev, 722 vdev->osif_rx); 723 qdf_nbuf_set_next(nbuf, NULL); 724 vdev->osif_rx(vdev->osif_vdev, nbuf); 725 DP_STATS_INCC_PKT(vdev->pdev, rx.multicast, 1, 726 qdf_nbuf_len(nbuf), 727 hal_rx_msdu_end_da_is_mcbc_get( 728 rx_tlv_hdr)); 729 DP_STATS_INC_PKT(vdev->pdev, rx.to_stack, 1, 730 qdf_nbuf_len(nbuf)); 731 } else { 732 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 733 FL("INVALID vdev %pK OR osif_rx"), vdev); 734 DP_STATS_INC(soc, rx.err.invalid_vdev, 1); 735 } 736 } 737 return; 738 } 739 740 /** 741 * dp_rx_err_deliver() - Function to deliver error frames to OS 742 * 743 * @soc: core DP main context 744 * @rx_desc : pointer to the sw rx descriptor 745 * @head: pointer to head of rx descriptors to be added to free list 746 * @tail: pointer to tail of rx descriptors to be added to free list 747 * quota: upper limit of descriptors that can be reaped 748 * 749 * Return: uint32_t: No. of Rx buffers reaped 750 */ 751 static void 752 dp_rx_err_deliver(struct dp_soc *soc, qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr) 753 { 754 uint32_t pkt_len, l2_hdr_offset; 755 uint16_t msdu_len; 756 struct dp_vdev *vdev; 757 uint16_t peer_id = 0xFFFF; 758 struct dp_peer *peer = NULL; 759 struct ether_header *eh; 760 bool isBroadcast; 761 762 /* 763 * Check if DMA completed -- msdu_done is the last bit 764 * to be written 765 */ 766 if (!hal_rx_attn_msdu_done_get(rx_tlv_hdr)) { 767 768 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 769 FL("MSDU DONE failure")); 770 771 hal_rx_dump_pkt_tlvs(soc->hal_soc, rx_tlv_hdr, 772 QDF_TRACE_LEVEL_INFO); 773 qdf_assert(0); 774 } 775 776 peer_id = hal_rx_mpdu_start_sw_peer_id_get(rx_tlv_hdr); 777 peer = dp_peer_find_by_id(soc, peer_id); 778 779 l2_hdr_offset = hal_rx_msdu_end_l3_hdr_padding_get(rx_tlv_hdr); 780 msdu_len = hal_rx_msdu_start_msdu_len_get(rx_tlv_hdr); 781 pkt_len = msdu_len + l2_hdr_offset + RX_PKT_TLVS_LEN; 782 783 /* Set length in nbuf */ 784 qdf_nbuf_set_pktlen(nbuf, pkt_len); 785 786 qdf_nbuf_set_next(nbuf, NULL); 787 788 qdf_nbuf_set_rx_chfrag_start(nbuf, 1); 789 qdf_nbuf_set_rx_chfrag_end(nbuf, 1); 790 791 if (!peer) { 792 QDF_TRACE_ERROR_RL(QDF_MODULE_ID_DP, "peer is NULL"); 793 DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1, 794 qdf_nbuf_len(nbuf)); 795 /* Trigger invalid peer handler wrapper */ 796 dp_rx_process_invalid_peer_wrapper(soc, nbuf, true); 797 return; 798 } 799 800 vdev = peer->vdev; 801 if (!vdev) { 802 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 803 FL("INVALID vdev %pK OR osif_rx"), vdev); 804 /* Drop & free packet */ 805 qdf_nbuf_free(nbuf); 806 DP_STATS_INC(soc, rx.err.invalid_vdev, 1); 807 return; 808 } 809 810 /* Drop & free packet if mesh mode not enabled */ 811 if (!vdev->mesh_vdev) { 812 qdf_nbuf_free(nbuf); 813 DP_STATS_INC(soc, rx.err.invalid_vdev, 1); 814 return; 815 } 816 817 /* 818 * Advance the packet start pointer by total size of 819 * pre-header TLV's 820 */ 821 qdf_nbuf_pull_head(nbuf, (l2_hdr_offset + RX_PKT_TLVS_LEN)); 822 823 if (dp_rx_filter_mesh_packets(vdev, nbuf, rx_tlv_hdr) 824 == QDF_STATUS_SUCCESS) { 825 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_MED, 826 FL("mesh pkt filtered")); 827 DP_STATS_INC(vdev->pdev, dropped.mesh_filter, 1); 828 829 qdf_nbuf_free(nbuf); 830 return; 831 832 } 833 dp_rx_fill_mesh_stats(vdev, nbuf, rx_tlv_hdr, peer); 834 835 if (qdf_unlikely(hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr) && 836 (vdev->rx_decap_type == 837 htt_cmn_pkt_type_ethernet))) { 838 eh = (struct ether_header *)qdf_nbuf_data(nbuf); 839 isBroadcast = (IEEE80211_IS_BROADCAST 840 (eh->ether_dhost)) ? 1 : 0 ; 841 if (isBroadcast) { 842 DP_STATS_INC_PKT(peer, rx.bcast, 1, 843 qdf_nbuf_len(nbuf)); 844 } 845 } 846 847 if (qdf_unlikely(vdev->rx_decap_type == htt_cmn_pkt_type_raw)) { 848 dp_rx_deliver_raw(vdev, nbuf, peer); 849 } else { 850 DP_STATS_INC(vdev->pdev, rx.to_stack.num, 1); 851 vdev->osif_rx(vdev->osif_vdev, nbuf); 852 } 853 854 return; 855 } 856 857 /** 858 * dp_rx_process_mic_error(): Function to pass mic error indication to umac 859 * @soc: DP SOC handle 860 * @rx_desc : pointer to the sw rx descriptor 861 * @head: pointer to head of rx descriptors to be added to free list 862 * @tail: pointer to tail of rx descriptors to be added to free list 863 * 864 * return: void 865 */ 866 void 867 dp_rx_process_mic_error(struct dp_soc *soc, 868 qdf_nbuf_t nbuf, 869 uint8_t *rx_tlv_hdr) 870 { 871 struct dp_vdev *vdev = NULL; 872 struct dp_pdev *pdev = NULL; 873 struct ol_if_ops *tops = NULL; 874 struct ieee80211_frame *wh; 875 uint8_t *rx_pkt_hdr; 876 struct dp_peer *peer; 877 uint16_t peer_id, rx_seq, fragno; 878 unsigned int tid; 879 QDF_STATUS status; 880 881 if (!hal_rx_msdu_end_first_msdu_get(rx_tlv_hdr)) 882 return; 883 884 rx_pkt_hdr = hal_rx_pkt_hdr_get(qdf_nbuf_data(nbuf)); 885 wh = (struct ieee80211_frame *)rx_pkt_hdr; 886 887 peer_id = hal_rx_mpdu_start_sw_peer_id_get(rx_tlv_hdr); 888 peer = dp_peer_find_by_id(soc, peer_id); 889 if (!peer) { 890 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 891 "peer not found"); 892 goto fail; 893 } 894 895 vdev = peer->vdev; 896 if (!vdev) { 897 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 898 "VDEV not found"); 899 goto fail; 900 } 901 902 pdev = vdev->pdev; 903 if (!pdev) { 904 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 905 "PDEV not found"); 906 goto fail; 907 } 908 909 tid = hal_rx_mpdu_start_tid_get(soc->hal_soc, qdf_nbuf_data(nbuf)); 910 rx_seq = (((*(uint16_t *)wh->i_seq) & 911 IEEE80211_SEQ_SEQ_MASK) >> 912 IEEE80211_SEQ_SEQ_SHIFT); 913 914 fragno = dp_rx_frag_get_mpdu_frag_number(qdf_nbuf_data(nbuf)); 915 916 /* Can get only last fragment */ 917 if (fragno) { 918 status = dp_rx_defrag_add_last_frag(soc, peer, 919 tid, rx_seq, nbuf); 920 921 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 922 "%s: Frag pkt seq# %d frag# %d consumed status %d !", 923 __func__, rx_seq, fragno, status); 924 return; 925 } 926 927 tops = pdev->soc->cdp_soc.ol_ops; 928 if (tops->rx_mic_error) 929 tops->rx_mic_error(pdev->ctrl_pdev, vdev->vdev_id, wh); 930 931 fail: 932 qdf_nbuf_free(nbuf); 933 return; 934 } 935 936 /** 937 * dp_rx_err_process() - Processes error frames routed to REO error ring 938 * 939 * @soc: core txrx main context 940 * @hal_ring: opaque pointer to the HAL Rx Error Ring, which will be serviced 941 * @quota: No. of units (packets) that can be serviced in one shot. 942 * 943 * This function implements error processing and top level demultiplexer 944 * for all the frames routed to REO error ring. 945 * 946 * Return: uint32_t: No. of elements processed 947 */ 948 uint32_t 949 dp_rx_err_process(struct dp_soc *soc, void *hal_ring, uint32_t quota) 950 { 951 void *hal_soc; 952 void *ring_desc; 953 union dp_rx_desc_list_elem_t *head = NULL; 954 union dp_rx_desc_list_elem_t *tail = NULL; 955 uint32_t rx_bufs_used = 0; 956 uint8_t buf_type; 957 uint8_t error, rbm; 958 struct hal_rx_mpdu_desc_info mpdu_desc_info; 959 struct hal_buf_info hbi; 960 struct dp_pdev *dp_pdev; 961 struct dp_srng *dp_rxdma_srng; 962 struct rx_desc_pool *rx_desc_pool; 963 uint32_t cookie = 0; 964 void *link_desc_va; 965 struct hal_rx_msdu_list msdu_list; /* MSDU's per MPDU */ 966 uint16_t num_msdus; 967 968 /* Debug -- Remove later */ 969 qdf_assert(soc && hal_ring); 970 971 hal_soc = soc->hal_soc; 972 973 /* Debug -- Remove later */ 974 qdf_assert(hal_soc); 975 976 if (qdf_unlikely(hal_srng_access_start(hal_soc, hal_ring))) { 977 978 /* TODO */ 979 /* 980 * Need API to convert from hal_ring pointer to 981 * Ring Type / Ring Id combo 982 */ 983 DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1); 984 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 985 FL("HAL RING Access Failed -- %pK"), hal_ring); 986 goto done; 987 } 988 989 while (qdf_likely(quota-- && (ring_desc = 990 hal_srng_dst_get_next(hal_soc, hal_ring)))) { 991 992 DP_STATS_INC(soc, rx.err_ring_pkts, 1); 993 994 error = HAL_RX_ERROR_STATUS_GET(ring_desc); 995 996 qdf_assert(error == HAL_REO_ERROR_DETECTED); 997 998 buf_type = HAL_RX_REO_BUF_TYPE_GET(ring_desc); 999 /* 1000 * For REO error ring, expect only MSDU LINK DESC 1001 */ 1002 qdf_assert_always(buf_type == HAL_RX_REO_MSDU_LINK_DESC_TYPE); 1003 1004 cookie = HAL_RX_REO_BUF_COOKIE_GET(ring_desc); 1005 /* 1006 * check for the magic number in the sw cookie 1007 */ 1008 qdf_assert_always((cookie >> LINK_DESC_ID_SHIFT) & 1009 LINK_DESC_ID_START); 1010 1011 /* 1012 * Check if the buffer is to be processed on this processor 1013 */ 1014 rbm = hal_rx_ret_buf_manager_get(ring_desc); 1015 1016 hal_rx_reo_buf_paddr_get(ring_desc, &hbi); 1017 link_desc_va = dp_rx_cookie_2_link_desc_va(soc, &hbi); 1018 hal_rx_msdu_list_get(soc->hal_soc, link_desc_va, &msdu_list, 1019 &num_msdus); 1020 1021 if (qdf_unlikely((msdu_list.rbm[0] != DP_WBM2SW_RBM) && 1022 (msdu_list.rbm[0] != 1023 HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST))) { 1024 /* TODO */ 1025 /* Call appropriate handler */ 1026 DP_STATS_INC(soc, rx.err.invalid_rbm, 1); 1027 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1028 FL("Invalid RBM %d"), msdu_list.rbm[0]); 1029 1030 /* Return link descriptor through WBM ring (SW2WBM)*/ 1031 dp_rx_link_desc_return(soc, ring_desc, 1032 HAL_BM_ACTION_RELEASE_MSDU_LIST); 1033 continue; 1034 } 1035 1036 /* Get the MPDU DESC info */ 1037 hal_rx_mpdu_desc_info_get(ring_desc, &mpdu_desc_info); 1038 1039 if (mpdu_desc_info.mpdu_flags & HAL_MPDU_F_FRAGMENT) { 1040 /* TODO */ 1041 rx_bufs_used += dp_rx_frag_handle(soc, 1042 ring_desc, &mpdu_desc_info, 1043 &head, &tail, quota); 1044 DP_STATS_INC(soc, rx.rx_frags, 1); 1045 continue; 1046 } 1047 1048 if (hal_rx_reo_is_pn_error(ring_desc)) { 1049 /* TOD0 */ 1050 DP_STATS_INC(soc, 1051 rx.err. 1052 reo_error[HAL_REO_ERR_PN_CHECK_FAILED], 1053 1); 1054 rx_bufs_used += dp_rx_pn_error_handle(soc, 1055 ring_desc, &mpdu_desc_info, 1056 &head, &tail, quota); 1057 continue; 1058 } 1059 1060 if (hal_rx_reo_is_2k_jump(ring_desc)) { 1061 /* TOD0 */ 1062 DP_STATS_INC(soc, 1063 rx.err. 1064 reo_error[HAL_REO_ERR_REGULAR_FRAME_2K_JUMP], 1065 1); 1066 rx_bufs_used += dp_rx_2k_jump_handle(soc, 1067 ring_desc, &mpdu_desc_info, 1068 &head, &tail, quota); 1069 continue; 1070 } 1071 } 1072 1073 done: 1074 hal_srng_access_end(hal_soc, hal_ring); 1075 1076 if (soc->rx.flags.defrag_timeout_check) 1077 dp_rx_defrag_waitlist_flush(soc); 1078 1079 /* Assume MAC id = 0, owner = 0 */ 1080 if (rx_bufs_used) { 1081 dp_pdev = soc->pdev_list[0]; 1082 dp_rxdma_srng = &dp_pdev->rx_refill_buf_ring; 1083 rx_desc_pool = &soc->rx_desc_buf[0]; 1084 1085 dp_rx_buffers_replenish(soc, 0, dp_rxdma_srng, rx_desc_pool, 1086 rx_bufs_used, &head, &tail); 1087 } 1088 1089 return rx_bufs_used; /* Assume no scale factor for now */ 1090 } 1091 1092 /** 1093 * dp_rx_wbm_err_process() - Processes error frames routed to WBM release ring 1094 * 1095 * @soc: core txrx main context 1096 * @hal_ring: opaque pointer to the HAL Rx Error Ring, which will be serviced 1097 * @quota: No. of units (packets) that can be serviced in one shot. 1098 * 1099 * This function implements error processing and top level demultiplexer 1100 * for all the frames routed to WBM2HOST sw release ring. 1101 * 1102 * Return: uint32_t: No. of elements processed 1103 */ 1104 uint32_t 1105 dp_rx_wbm_err_process(struct dp_soc *soc, void *hal_ring, uint32_t quota) 1106 { 1107 void *hal_soc; 1108 void *ring_desc; 1109 struct dp_rx_desc *rx_desc; 1110 union dp_rx_desc_list_elem_t *head[MAX_PDEV_CNT] = { NULL }; 1111 union dp_rx_desc_list_elem_t *tail[MAX_PDEV_CNT] = { NULL }; 1112 uint32_t rx_bufs_used = 0; 1113 uint32_t rx_bufs_reaped[MAX_PDEV_CNT] = { 0 }; 1114 uint8_t buf_type, rbm; 1115 uint32_t rx_buf_cookie; 1116 uint8_t mac_id; 1117 struct dp_pdev *dp_pdev; 1118 struct dp_srng *dp_rxdma_srng; 1119 struct rx_desc_pool *rx_desc_pool; 1120 uint8_t *rx_tlv_hdr; 1121 qdf_nbuf_t nbuf_head = NULL; 1122 qdf_nbuf_t nbuf_tail = NULL; 1123 qdf_nbuf_t nbuf, next; 1124 struct hal_wbm_err_desc_info wbm_err_info = { 0 }; 1125 uint8_t pool_id; 1126 uint16_t peer_id = 0xFFFF; 1127 uint8_t tid = 0; 1128 1129 /* Debug -- Remove later */ 1130 qdf_assert(soc && hal_ring); 1131 1132 hal_soc = soc->hal_soc; 1133 1134 /* Debug -- Remove later */ 1135 qdf_assert(hal_soc); 1136 1137 if (qdf_unlikely(hal_srng_access_start(hal_soc, hal_ring))) { 1138 1139 /* TODO */ 1140 /* 1141 * Need API to convert from hal_ring pointer to 1142 * Ring Type / Ring Id combo 1143 */ 1144 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1145 FL("HAL RING Access Failed -- %pK"), hal_ring); 1146 goto done; 1147 } 1148 1149 while (qdf_likely(quota-- && (ring_desc = 1150 hal_srng_dst_get_next(hal_soc, hal_ring)))) { 1151 1152 /* XXX */ 1153 buf_type = HAL_RX_WBM_BUF_TYPE_GET(ring_desc); 1154 1155 /* 1156 * For WBM ring, expect only MSDU buffers 1157 */ 1158 qdf_assert_always(buf_type == HAL_RX_WBM_BUF_TYPE_REL_BUF); 1159 1160 qdf_assert((HAL_RX_WBM_ERR_SRC_GET(ring_desc) 1161 == HAL_RX_WBM_ERR_SRC_RXDMA) || 1162 (HAL_RX_WBM_ERR_SRC_GET(ring_desc) 1163 == HAL_RX_WBM_ERR_SRC_REO)); 1164 1165 /* 1166 * Check if the buffer is to be processed on this processor 1167 */ 1168 rbm = hal_rx_ret_buf_manager_get(ring_desc); 1169 1170 if (qdf_unlikely(rbm != HAL_RX_BUF_RBM_SW3_BM)) { 1171 /* TODO */ 1172 /* Call appropriate handler */ 1173 DP_STATS_INC(soc, rx.err.invalid_rbm, 1); 1174 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1175 FL("Invalid RBM %d"), rbm); 1176 continue; 1177 } 1178 1179 rx_buf_cookie = HAL_RX_WBM_BUF_COOKIE_GET(ring_desc); 1180 1181 rx_desc = dp_rx_cookie_2_va_rxdma_buf(soc, rx_buf_cookie); 1182 qdf_assert(rx_desc); 1183 1184 if (!dp_rx_desc_check_magic(rx_desc)) { 1185 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1186 FL("Invalid rx_desc cookie=%d"), 1187 rx_buf_cookie); 1188 continue; 1189 } 1190 1191 nbuf = rx_desc->nbuf; 1192 qdf_nbuf_unmap_single(soc->osdev, nbuf, QDF_DMA_BIDIRECTIONAL); 1193 1194 /* 1195 * save the wbm desc info in nbuf TLV. We will need this 1196 * info when we do the actual nbuf processing 1197 */ 1198 hal_rx_wbm_err_info_get(ring_desc, &wbm_err_info); 1199 wbm_err_info.pool_id = rx_desc->pool_id; 1200 hal_rx_wbm_err_info_set_in_tlv(qdf_nbuf_data(nbuf), 1201 &wbm_err_info); 1202 1203 rx_bufs_reaped[rx_desc->pool_id]++; 1204 1205 DP_RX_LIST_APPEND(nbuf_head, nbuf_tail, rx_desc->nbuf); 1206 dp_rx_add_to_free_desc_list(&head[rx_desc->pool_id], 1207 &tail[rx_desc->pool_id], 1208 rx_desc); 1209 } 1210 done: 1211 hal_srng_access_end(hal_soc, hal_ring); 1212 1213 for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) { 1214 if (rx_bufs_reaped[mac_id]) { 1215 dp_pdev = soc->pdev_list[mac_id]; 1216 dp_rxdma_srng = &dp_pdev->rx_refill_buf_ring; 1217 rx_desc_pool = &soc->rx_desc_buf[mac_id]; 1218 1219 dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng, 1220 rx_desc_pool, rx_bufs_reaped[mac_id], 1221 &head[mac_id], &tail[mac_id]); 1222 rx_bufs_used += rx_bufs_reaped[mac_id]; 1223 } 1224 } 1225 1226 nbuf = nbuf_head; 1227 while (nbuf) { 1228 rx_tlv_hdr = qdf_nbuf_data(nbuf); 1229 /* 1230 * retrieve the wbm desc info from nbuf TLV, so we can 1231 * handle error cases appropriately 1232 */ 1233 hal_rx_wbm_err_info_get_from_tlv(rx_tlv_hdr, &wbm_err_info); 1234 1235 /* Set queue_mapping in nbuf to 0 */ 1236 dp_set_rx_queue(nbuf, 0); 1237 1238 next = nbuf->next; 1239 if (wbm_err_info.wbm_err_src == HAL_RX_WBM_ERR_SRC_REO) { 1240 if (wbm_err_info.reo_psh_rsn 1241 == HAL_RX_WBM_REO_PSH_RSN_ERROR) { 1242 1243 DP_STATS_INC(soc, 1244 rx.err.reo_error 1245 [wbm_err_info.reo_err_code], 1); 1246 1247 switch (wbm_err_info.reo_err_code) { 1248 /* 1249 * Handling for packets which have NULL REO 1250 * queue descriptor 1251 */ 1252 case HAL_REO_ERR_QUEUE_DESC_ADDR_0: 1253 pool_id = wbm_err_info.pool_id; 1254 QDF_TRACE_DEBUG_RL(QDF_MODULE_ID_DP, 1255 "Got pkt with REO ERROR: %d", 1256 wbm_err_info.reo_err_code); 1257 dp_rx_null_q_desc_handle(soc, 1258 nbuf, 1259 rx_tlv_hdr, 1260 pool_id); 1261 nbuf = next; 1262 continue; 1263 /* TODO */ 1264 /* Add per error code accounting */ 1265 case HAL_REO_ERR_REGULAR_FRAME_2K_JUMP: 1266 pool_id = wbm_err_info.pool_id; 1267 QDF_TRACE(QDF_MODULE_ID_DP, 1268 QDF_TRACE_LEVEL_ERROR, 1269 "Got pkt with REO ERROR: %d", 1270 wbm_err_info.reo_err_code); 1271 if (hal_rx_msdu_end_first_msdu_get(rx_tlv_hdr)) { 1272 peer_id = 1273 hal_rx_mpdu_start_sw_peer_id_get(rx_tlv_hdr); 1274 tid = 1275 hal_rx_mpdu_start_tid_get(hal_soc, rx_tlv_hdr); 1276 } 1277 dp_2k_jump_handle(soc, nbuf, rx_tlv_hdr, 1278 peer_id, tid); 1279 nbuf = next; 1280 continue; 1281 default: 1282 QDF_TRACE(QDF_MODULE_ID_DP, 1283 QDF_TRACE_LEVEL_ERROR, 1284 "REO error %d detected", 1285 wbm_err_info.reo_err_code); 1286 } 1287 } 1288 } else if (wbm_err_info.wbm_err_src == 1289 HAL_RX_WBM_ERR_SRC_RXDMA) { 1290 if (wbm_err_info.rxdma_psh_rsn 1291 == HAL_RX_WBM_RXDMA_PSH_RSN_ERROR) { 1292 struct dp_peer *peer = NULL; 1293 uint16_t peer_id = 0xFFFF; 1294 1295 DP_STATS_INC(soc, 1296 rx.err.rxdma_error 1297 [wbm_err_info.rxdma_err_code], 1); 1298 peer_id = hal_rx_mpdu_start_sw_peer_id_get(rx_tlv_hdr); 1299 peer = dp_peer_find_by_id(soc, peer_id); 1300 1301 switch (wbm_err_info.rxdma_err_code) { 1302 case HAL_RXDMA_ERR_UNENCRYPTED: 1303 dp_rx_err_deliver(soc, 1304 nbuf, 1305 rx_tlv_hdr); 1306 nbuf = next; 1307 continue; 1308 1309 case HAL_RXDMA_ERR_TKIP_MIC: 1310 dp_rx_process_mic_error(soc, 1311 nbuf, 1312 rx_tlv_hdr); 1313 nbuf = next; 1314 if (peer) 1315 DP_STATS_INC(peer, rx.err.mic_err, 1); 1316 continue; 1317 1318 case HAL_RXDMA_ERR_DECRYPT: 1319 if (peer) 1320 DP_STATS_INC(peer, rx.err.decrypt_err, 1); 1321 QDF_TRACE(QDF_MODULE_ID_DP, 1322 QDF_TRACE_LEVEL_DEBUG, 1323 "Packet received with Decrypt error"); 1324 break; 1325 1326 default: 1327 QDF_TRACE(QDF_MODULE_ID_DP, 1328 QDF_TRACE_LEVEL_DEBUG, 1329 "RXDMA error %d", 1330 wbm_err_info. 1331 rxdma_err_code); 1332 } 1333 } 1334 } else { 1335 /* Should not come here */ 1336 qdf_assert(0); 1337 } 1338 1339 hal_rx_dump_pkt_tlvs(hal_soc, rx_tlv_hdr, 1340 QDF_TRACE_LEVEL_DEBUG); 1341 qdf_nbuf_free(nbuf); 1342 nbuf = next; 1343 } 1344 return rx_bufs_used; /* Assume no scale factor for now */ 1345 } 1346 1347 /** 1348 * dp_rx_err_mpdu_pop() - extract the MSDU's from link descs 1349 * 1350 * @soc: core DP main context 1351 * @mac_id: mac id which is one of 3 mac_ids 1352 * @rxdma_dst_ring_desc: void pointer to monitor link descriptor buf addr info 1353 * @head: head of descs list to be freed 1354 * @tail: tail of decs list to be freed 1355 1356 * Return: number of msdu in MPDU to be popped 1357 */ 1358 static inline uint32_t 1359 dp_rx_err_mpdu_pop(struct dp_soc *soc, uint32_t mac_id, 1360 void *rxdma_dst_ring_desc, 1361 union dp_rx_desc_list_elem_t **head, 1362 union dp_rx_desc_list_elem_t **tail) 1363 { 1364 void *rx_msdu_link_desc; 1365 qdf_nbuf_t msdu; 1366 qdf_nbuf_t last; 1367 struct hal_rx_msdu_list msdu_list; 1368 uint16_t num_msdus; 1369 struct hal_buf_info buf_info; 1370 void *p_buf_addr_info; 1371 void *p_last_buf_addr_info; 1372 uint32_t rx_bufs_used = 0; 1373 uint32_t msdu_cnt; 1374 uint32_t i; 1375 uint8_t push_reason; 1376 uint8_t rxdma_error_code = 0; 1377 uint8_t bm_action = HAL_BM_ACTION_PUT_IN_IDLE_LIST; 1378 struct dp_pdev *pdev = dp_get_pdev_for_mac_id(soc, mac_id); 1379 1380 msdu = 0; 1381 1382 last = NULL; 1383 1384 hal_rx_reo_ent_buf_paddr_get(rxdma_dst_ring_desc, &buf_info, 1385 &p_last_buf_addr_info, &msdu_cnt); 1386 1387 push_reason = 1388 hal_rx_reo_ent_rxdma_push_reason_get(rxdma_dst_ring_desc); 1389 if (push_reason == HAL_RX_WBM_RXDMA_PSH_RSN_ERROR) { 1390 rxdma_error_code = 1391 hal_rx_reo_ent_rxdma_error_code_get(rxdma_dst_ring_desc); 1392 } 1393 1394 do { 1395 rx_msdu_link_desc = 1396 dp_rx_cookie_2_link_desc_va(soc, &buf_info); 1397 1398 qdf_assert(rx_msdu_link_desc); 1399 1400 hal_rx_msdu_list_get(soc->hal_soc, rx_msdu_link_desc, 1401 &msdu_list, &num_msdus); 1402 1403 if (msdu_list.sw_cookie[0] != HAL_RX_COOKIE_SPECIAL) { 1404 /* if the msdus belongs to NSS offloaded radio && 1405 * the rbm is not SW1_BM then return the msdu_link 1406 * descriptor without freeing the msdus (nbufs). let 1407 * these buffers be given to NSS completion ring for 1408 * NSS to free them. 1409 * else iterate through the msdu link desc list and 1410 * free each msdu in the list. 1411 */ 1412 if (msdu_list.rbm[0] != HAL_RX_BUF_RBM_SW3_BM && 1413 wlan_cfg_get_dp_pdev_nss_enabled( 1414 pdev->wlan_cfg_ctx)) 1415 bm_action = HAL_BM_ACTION_RELEASE_MSDU_LIST; 1416 else { 1417 for (i = 0; i < num_msdus; i++) { 1418 struct dp_rx_desc *rx_desc = 1419 dp_rx_cookie_2_va_rxdma_buf(soc, 1420 msdu_list.sw_cookie[i]); 1421 qdf_assert(rx_desc); 1422 msdu = rx_desc->nbuf; 1423 1424 qdf_nbuf_unmap_single(soc->osdev, msdu, 1425 QDF_DMA_FROM_DEVICE); 1426 1427 QDF_TRACE(QDF_MODULE_ID_DP, 1428 QDF_TRACE_LEVEL_DEBUG, 1429 "[%s][%d] msdu_nbuf=%pK ", 1430 __func__, __LINE__, msdu); 1431 1432 qdf_nbuf_free(msdu); 1433 rx_bufs_used++; 1434 dp_rx_add_to_free_desc_list(head, 1435 tail, rx_desc); 1436 } 1437 } 1438 } else { 1439 rxdma_error_code = HAL_RXDMA_ERR_WAR; 1440 } 1441 1442 hal_rx_mon_next_link_desc_get(rx_msdu_link_desc, &buf_info, 1443 &p_buf_addr_info); 1444 1445 dp_rx_link_desc_return(soc, p_last_buf_addr_info, bm_action); 1446 p_last_buf_addr_info = p_buf_addr_info; 1447 1448 } while (buf_info.paddr); 1449 1450 DP_STATS_INC(soc, rx.err.rxdma_error[rxdma_error_code], 1); 1451 1452 if (rxdma_error_code == HAL_RXDMA_ERR_DECRYPT) { 1453 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1454 "Packet received with Decrypt error"); 1455 } 1456 1457 return rx_bufs_used; 1458 } 1459 1460 /** 1461 * dp_rxdma_err_process() - RxDMA error processing functionality 1462 * 1463 * @soc: core txrx main contex 1464 * @mac_id: mac id which is one of 3 mac_ids 1465 * @hal_ring: opaque pointer to the HAL Rx Ring, which will be serviced 1466 * @quota: No. of units (packets) that can be serviced in one shot. 1467 1468 * Return: num of buffers processed 1469 */ 1470 uint32_t 1471 dp_rxdma_err_process(struct dp_soc *soc, uint32_t mac_id, uint32_t quota) 1472 { 1473 struct dp_pdev *pdev = dp_get_pdev_for_mac_id(soc, mac_id); 1474 int mac_for_pdev = dp_get_mac_id_for_mac(soc, mac_id); 1475 void *hal_soc; 1476 void *rxdma_dst_ring_desc; 1477 void *err_dst_srng; 1478 union dp_rx_desc_list_elem_t *head = NULL; 1479 union dp_rx_desc_list_elem_t *tail = NULL; 1480 struct dp_srng *dp_rxdma_srng; 1481 struct rx_desc_pool *rx_desc_pool; 1482 uint32_t work_done = 0; 1483 uint32_t rx_bufs_used = 0; 1484 1485 if (!pdev) 1486 return 0; 1487 1488 err_dst_srng = pdev->rxdma_err_dst_ring[mac_for_pdev].hal_srng; 1489 1490 if (!err_dst_srng) { 1491 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1492 "%s %d : HAL Monitor Destination Ring Init \ 1493 Failed -- %pK", 1494 __func__, __LINE__, err_dst_srng); 1495 return 0; 1496 } 1497 1498 hal_soc = soc->hal_soc; 1499 1500 qdf_assert(hal_soc); 1501 1502 if (qdf_unlikely(hal_srng_access_start(hal_soc, err_dst_srng))) { 1503 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1504 "%s %d : HAL Monitor Destination Ring Init \ 1505 Failed -- %pK", 1506 __func__, __LINE__, err_dst_srng); 1507 return 0; 1508 } 1509 1510 while (qdf_likely(quota-- && (rxdma_dst_ring_desc = 1511 hal_srng_dst_get_next(hal_soc, err_dst_srng)))) { 1512 1513 rx_bufs_used += dp_rx_err_mpdu_pop(soc, mac_id, 1514 rxdma_dst_ring_desc, 1515 &head, &tail); 1516 } 1517 1518 hal_srng_access_end(hal_soc, err_dst_srng); 1519 1520 if (rx_bufs_used) { 1521 dp_rxdma_srng = &pdev->rx_refill_buf_ring; 1522 rx_desc_pool = &soc->rx_desc_buf[mac_id]; 1523 1524 dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng, 1525 rx_desc_pool, rx_bufs_used, &head, &tail); 1526 1527 work_done += rx_bufs_used; 1528 } 1529 1530 return work_done; 1531 } 1532