1 /* 2 * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for 5 * any purpose with or without fee is hereby granted, provided that the 6 * above copyright notice and this permission notice appear in all 7 * copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 16 * PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 #include "hal_hw_headers.h" 20 #include "dp_types.h" 21 #include "dp_rx.h" 22 #include "dp_peer.h" 23 #include "dp_internal.h" 24 #include "hal_api.h" 25 #include "qdf_trace.h" 26 #include "qdf_nbuf.h" 27 #ifdef CONFIG_MCL 28 #include <cds_ieee80211_common.h> 29 #else 30 #include <linux/ieee80211.h> 31 #endif 32 #include "dp_rx_defrag.h" 33 #include <enet.h> /* LLC_SNAP_HDR_LEN */ 34 35 #ifdef RX_DESC_DEBUG_CHECK 36 static inline bool dp_rx_desc_check_magic(struct dp_rx_desc *rx_desc) 37 { 38 if (qdf_unlikely(rx_desc->magic != DP_RX_DESC_MAGIC)) { 39 return false; 40 } 41 rx_desc->magic = 0; 42 return true; 43 } 44 #else 45 static inline bool dp_rx_desc_check_magic(struct dp_rx_desc *rx_desc) 46 { 47 return true; 48 } 49 #endif 50 51 /** 52 * dp_rx_mcast_echo_check() - check if the mcast pkt is a loop 53 * back on same vap or a different vap. 54 * 55 * @soc: core DP main context 56 * @peer: dp peer handler 57 * @rx_tlv_hdr: start of the rx TLV header 58 * @nbuf: pkt buffer 59 * 60 * Return: bool (true if it is a looped back pkt else false) 61 * 62 */ 63 static inline bool dp_rx_mcast_echo_check(struct dp_soc *soc, 64 struct dp_peer *peer, 65 uint8_t *rx_tlv_hdr, 66 qdf_nbuf_t nbuf) 67 { 68 struct dp_vdev *vdev = peer->vdev; 69 struct dp_ast_entry *ase; 70 uint16_t sa_idx = 0; 71 uint8_t *data; 72 73 /* 74 * Multicast Echo Check is required only if vdev is STA and 75 * received pkt is a multicast/broadcast pkt. otherwise 76 * skip the MEC check. 77 */ 78 if (vdev->opmode != wlan_op_mode_sta) 79 return false; 80 81 if (!hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr)) 82 return false; 83 84 data = qdf_nbuf_data(nbuf); 85 /* 86 * if the received pkts src mac addr matches with vdev 87 * mac address then drop the pkt as it is looped back 88 */ 89 if (!(qdf_mem_cmp(&data[DP_MAC_ADDR_LEN], 90 vdev->mac_addr.raw, 91 DP_MAC_ADDR_LEN))) 92 return true; 93 94 /* 95 * In case of qwrap isolation mode, donot drop loopback packets. 96 * In isolation mode, all packets from the wired stations need to go 97 * to rootap and loop back to reach the wireless stations and 98 * vice-versa. 99 */ 100 if (qdf_unlikely(vdev->isolation_vdev)) 101 return false; 102 103 /* if the received pkts src mac addr matches with the 104 * wired PCs MAC addr which is behind the STA or with 105 * wireless STAs MAC addr which are behind the Repeater, 106 * then drop the pkt as it is looped back 107 */ 108 qdf_spin_lock_bh(&soc->ast_lock); 109 if (hal_rx_msdu_end_sa_is_valid_get(rx_tlv_hdr)) { 110 sa_idx = hal_rx_msdu_end_sa_idx_get(rx_tlv_hdr); 111 112 if ((sa_idx < 0) || 113 (sa_idx >= (WLAN_UMAC_PSOC_MAX_PEERS * 2))) { 114 qdf_spin_unlock_bh(&soc->ast_lock); 115 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 116 "invalid sa_idx: %d", sa_idx); 117 qdf_assert_always(0); 118 } 119 120 ase = soc->ast_table[sa_idx]; 121 if (!ase) { 122 /* We do not get a peer map event for STA and without 123 * this event we don't know what is STA's sa_idx. 124 * For this reason the AST is still not associated to 125 * any index postion in ast_table. 126 * In these kind of scenarios where sa is valid but 127 * ast is not in ast_table, we use the below API to get 128 * AST entry for STA's own mac_address. 129 */ 130 ase = dp_peer_ast_hash_find(soc, 131 &data[DP_MAC_ADDR_LEN]); 132 133 } 134 } else 135 ase = dp_peer_ast_hash_find(soc, &data[DP_MAC_ADDR_LEN]); 136 137 if (ase) { 138 ase->ast_idx = sa_idx; 139 soc->ast_table[sa_idx] = ase; 140 141 if (ase->pdev_id != vdev->pdev->pdev_id) { 142 qdf_spin_unlock_bh(&soc->ast_lock); 143 QDF_TRACE(QDF_MODULE_ID_DP, 144 QDF_TRACE_LEVEL_INFO, 145 "Detected DBDC Root AP %pM, %d %d", 146 &data[DP_MAC_ADDR_LEN], vdev->pdev->pdev_id, 147 ase->pdev_id); 148 return false; 149 } 150 151 if ((ase->type == CDP_TXRX_AST_TYPE_MEC) || 152 (ase->peer != peer)) { 153 qdf_spin_unlock_bh(&soc->ast_lock); 154 QDF_TRACE(QDF_MODULE_ID_DP, 155 QDF_TRACE_LEVEL_INFO, 156 "received pkt with same src mac %pM", 157 &data[DP_MAC_ADDR_LEN]); 158 159 return true; 160 } 161 } 162 qdf_spin_unlock_bh(&soc->ast_lock); 163 return false; 164 } 165 166 /** 167 * dp_rx_link_desc_return_by_addr - Return a MPDU link descriptor to 168 * (WBM) by address 169 * 170 * @soc: core DP main context 171 * @link_desc_addr: link descriptor addr 172 * 173 * Return: QDF_STATUS 174 */ 175 QDF_STATUS 176 dp_rx_link_desc_return_by_addr(struct dp_soc *soc, void *link_desc_addr, 177 uint8_t bm_action) 178 { 179 struct dp_srng *wbm_desc_rel_ring = &soc->wbm_desc_rel_ring; 180 void *wbm_rel_srng = wbm_desc_rel_ring->hal_srng; 181 void *hal_soc = soc->hal_soc; 182 QDF_STATUS status = QDF_STATUS_E_FAILURE; 183 void *src_srng_desc; 184 185 if (!wbm_rel_srng) { 186 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 187 "WBM RELEASE RING not initialized"); 188 return status; 189 } 190 191 if (qdf_unlikely(hal_srng_access_start(hal_soc, wbm_rel_srng))) { 192 193 /* TODO */ 194 /* 195 * Need API to convert from hal_ring pointer to 196 * Ring Type / Ring Id combo 197 */ 198 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 199 FL("HAL RING Access For WBM Release SRNG Failed - %pK"), 200 wbm_rel_srng); 201 DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1); 202 goto done; 203 } 204 src_srng_desc = hal_srng_src_get_next(hal_soc, wbm_rel_srng); 205 if (qdf_likely(src_srng_desc)) { 206 /* Return link descriptor through WBM ring (SW2WBM)*/ 207 hal_rx_msdu_link_desc_set(hal_soc, 208 src_srng_desc, link_desc_addr, bm_action); 209 status = QDF_STATUS_SUCCESS; 210 } else { 211 struct hal_srng *srng = (struct hal_srng *)wbm_rel_srng; 212 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 213 FL("WBM Release Ring (Id %d) Full"), srng->ring_id); 214 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 215 "HP 0x%x Reap HP 0x%x TP 0x%x Cached TP 0x%x", 216 *srng->u.src_ring.hp_addr, srng->u.src_ring.reap_hp, 217 *srng->u.src_ring.tp_addr, srng->u.src_ring.cached_tp); 218 } 219 done: 220 hal_srng_access_end(hal_soc, wbm_rel_srng); 221 return status; 222 223 } 224 225 /** 226 * dp_rx_link_desc_return() - Return a MPDU link descriptor to HW 227 * (WBM), following error handling 228 * 229 * @soc: core DP main context 230 * @ring_desc: opaque pointer to the REO error ring descriptor 231 * 232 * Return: QDF_STATUS 233 */ 234 QDF_STATUS 235 dp_rx_link_desc_return(struct dp_soc *soc, void *ring_desc, uint8_t bm_action) 236 { 237 void *buf_addr_info = HAL_RX_REO_BUF_ADDR_INFO_GET(ring_desc); 238 return dp_rx_link_desc_return_by_addr(soc, buf_addr_info, bm_action); 239 } 240 241 /** 242 * dp_rx_msdus_drop() - Drops all MSDU's per MPDU 243 * 244 * @soc: core txrx main context 245 * @ring_desc: opaque pointer to the REO error ring descriptor 246 * @mpdu_desc_info: MPDU descriptor information from ring descriptor 247 * @head: head of the local descriptor free-list 248 * @tail: tail of the local descriptor free-list 249 * @quota: No. of units (packets) that can be serviced in one shot. 250 * 251 * This function is used to drop all MSDU in an MPDU 252 * 253 * Return: uint32_t: No. of elements processed 254 */ 255 static uint32_t dp_rx_msdus_drop(struct dp_soc *soc, void *ring_desc, 256 struct hal_rx_mpdu_desc_info *mpdu_desc_info, 257 union dp_rx_desc_list_elem_t **head, 258 union dp_rx_desc_list_elem_t **tail, 259 uint32_t quota) 260 { 261 uint32_t rx_bufs_used = 0; 262 void *link_desc_va; 263 struct hal_buf_info buf_info; 264 struct hal_rx_msdu_list msdu_list; /* MSDU's per MPDU */ 265 int i; 266 uint8_t *rx_tlv_hdr; 267 uint32_t tid; 268 269 hal_rx_reo_buf_paddr_get(ring_desc, &buf_info); 270 271 link_desc_va = dp_rx_cookie_2_link_desc_va(soc, &buf_info); 272 273 /* No UNMAP required -- this is "malloc_consistent" memory */ 274 hal_rx_msdu_list_get(soc->hal_soc, link_desc_va, &msdu_list, 275 &mpdu_desc_info->msdu_count); 276 277 for (i = 0; (i < mpdu_desc_info->msdu_count) && quota--; i++) { 278 struct dp_rx_desc *rx_desc = 279 dp_rx_cookie_2_va_rxdma_buf(soc, 280 msdu_list.sw_cookie[i]); 281 282 qdf_assert(rx_desc); 283 284 if (!dp_rx_desc_check_magic(rx_desc)) { 285 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 286 FL("Invalid rx_desc cookie=%d"), 287 msdu_list.sw_cookie[i]); 288 return rx_bufs_used; 289 } 290 291 rx_bufs_used++; 292 tid = hal_rx_mpdu_start_tid_get(soc->hal_soc, 293 rx_desc->rx_buf_start); 294 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 295 "Packet received with PN error for tid :%d", tid); 296 297 rx_tlv_hdr = qdf_nbuf_data(rx_desc->nbuf); 298 if (hal_rx_encryption_info_valid(rx_tlv_hdr)) 299 hal_rx_print_pn(rx_tlv_hdr); 300 301 /* Just free the buffers */ 302 qdf_nbuf_free(rx_desc->nbuf); 303 304 dp_rx_add_to_free_desc_list(head, tail, rx_desc); 305 } 306 307 /* Return link descriptor through WBM ring (SW2WBM)*/ 308 dp_rx_link_desc_return(soc, ring_desc, HAL_BM_ACTION_PUT_IN_IDLE_LIST); 309 310 return rx_bufs_used; 311 } 312 313 /** 314 * dp_rx_pn_error_handle() - Handles PN check errors 315 * 316 * @soc: core txrx main context 317 * @ring_desc: opaque pointer to the REO error ring descriptor 318 * @mpdu_desc_info: MPDU descriptor information from ring descriptor 319 * @head: head of the local descriptor free-list 320 * @tail: tail of the local descriptor free-list 321 * @quota: No. of units (packets) that can be serviced in one shot. 322 * 323 * This function implements PN error handling 324 * If the peer is configured to ignore the PN check errors 325 * or if DP feels, that this frame is still OK, the frame can be 326 * re-injected back to REO to use some of the other features 327 * of REO e.g. duplicate detection/routing to other cores 328 * 329 * Return: uint32_t: No. of elements processed 330 */ 331 static uint32_t 332 dp_rx_pn_error_handle(struct dp_soc *soc, void *ring_desc, 333 struct hal_rx_mpdu_desc_info *mpdu_desc_info, 334 union dp_rx_desc_list_elem_t **head, 335 union dp_rx_desc_list_elem_t **tail, 336 uint32_t quota) 337 { 338 uint16_t peer_id; 339 uint32_t rx_bufs_used = 0; 340 struct dp_peer *peer; 341 bool peer_pn_policy = false; 342 343 peer_id = DP_PEER_METADATA_PEER_ID_GET( 344 mpdu_desc_info->peer_meta_data); 345 346 347 peer = dp_peer_find_by_id(soc, peer_id); 348 349 if (qdf_likely(peer)) { 350 /* 351 * TODO: Check for peer specific policies & set peer_pn_policy 352 */ 353 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 354 "discard rx due to PN error for peer %pK " 355 "(%02x:%02x:%02x:%02x:%02x:%02x)", 356 peer, 357 peer->mac_addr.raw[0], peer->mac_addr.raw[1], 358 peer->mac_addr.raw[2], peer->mac_addr.raw[3], 359 peer->mac_addr.raw[4], peer->mac_addr.raw[5]); 360 361 dp_peer_unref_del_find_by_id(peer); 362 } 363 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 364 "Packet received with PN error"); 365 366 /* No peer PN policy -- definitely drop */ 367 if (!peer_pn_policy) 368 rx_bufs_used = dp_rx_msdus_drop(soc, ring_desc, 369 mpdu_desc_info, 370 head, tail, quota); 371 372 return rx_bufs_used; 373 } 374 375 /** 376 * dp_rx_2k_jump_handle() - Handles Sequence Number Jump by 2K 377 * 378 * @soc: core txrx main context 379 * @ring_desc: opaque pointer to the REO error ring descriptor 380 * @mpdu_desc_info: MPDU descriptor information from ring descriptor 381 * @head: head of the local descriptor free-list 382 * @tail: tail of the local descriptor free-list 383 * @quota: No. of units (packets) that can be serviced in one shot. 384 * 385 * This function implements the error handling when sequence number 386 * of the MPDU jumps suddenly by 2K.Today there are 2 cases that 387 * need to be handled: 388 * A) CSN (Current Sequence Number) = Last Valid SN (LSN) + 2K 389 * B) CSN = LSN + 2K, but falls within a "BA sized window" of the SSN 390 * For case A) the protocol stack is invoked to generate DELBA/DEAUTH frame 391 * For case B), the frame is normally dropped, no more action is taken 392 * 393 * Return: uint32_t: No. of elements processed 394 */ 395 static uint32_t 396 dp_rx_2k_jump_handle(struct dp_soc *soc, void *ring_desc, 397 struct hal_rx_mpdu_desc_info *mpdu_desc_info, 398 union dp_rx_desc_list_elem_t **head, 399 union dp_rx_desc_list_elem_t **tail, 400 uint32_t quota) 401 { 402 return dp_rx_msdus_drop(soc, ring_desc, mpdu_desc_info, 403 head, tail, quota); 404 } 405 406 /** 407 * dp_rx_chain_msdus() - Function to chain all msdus of a mpdu 408 * to pdev invalid peer list 409 * 410 * @soc: core DP main context 411 * @nbuf: Buffer pointer 412 * @rx_tlv_hdr: start of rx tlv header 413 * @mac_id: mac id 414 * 415 * Return: bool: true for last msdu of mpdu 416 */ 417 static bool 418 dp_rx_chain_msdus(struct dp_soc *soc, qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr, 419 uint8_t mac_id) 420 { 421 bool mpdu_done = false; 422 qdf_nbuf_t curr_nbuf = NULL; 423 qdf_nbuf_t tmp_nbuf = NULL; 424 425 /* TODO: Currently only single radio is supported, hence 426 * pdev hard coded to '0' index 427 */ 428 struct dp_pdev *dp_pdev = soc->pdev_list[mac_id]; 429 430 if (!dp_pdev->first_nbuf) { 431 qdf_nbuf_set_rx_chfrag_start(nbuf, 1); 432 dp_pdev->ppdu_id = HAL_RX_HW_DESC_GET_PPDUID_GET(rx_tlv_hdr); 433 dp_pdev->first_nbuf = true; 434 435 /* If the new nbuf received is the first msdu of the 436 * amsdu and there are msdus in the invalid peer msdu 437 * list, then let us free all the msdus of the invalid 438 * peer msdu list. 439 * This scenario can happen when we start receiving 440 * new a-msdu even before the previous a-msdu is completely 441 * received. 442 */ 443 curr_nbuf = dp_pdev->invalid_peer_head_msdu; 444 while (curr_nbuf) { 445 tmp_nbuf = curr_nbuf->next; 446 qdf_nbuf_free(curr_nbuf); 447 curr_nbuf = tmp_nbuf; 448 } 449 450 dp_pdev->invalid_peer_head_msdu = NULL; 451 dp_pdev->invalid_peer_tail_msdu = NULL; 452 hal_rx_mon_hw_desc_get_mpdu_status(soc->hal_soc, rx_tlv_hdr, 453 &(dp_pdev->ppdu_info.rx_status)); 454 455 } 456 457 if (dp_pdev->ppdu_id == hal_rx_attn_phy_ppdu_id_get(rx_tlv_hdr) && 458 hal_rx_attn_msdu_done_get(rx_tlv_hdr)) { 459 qdf_nbuf_set_rx_chfrag_end(nbuf, 1); 460 qdf_assert_always(dp_pdev->first_nbuf == true); 461 dp_pdev->first_nbuf = false; 462 mpdu_done = true; 463 } 464 465 DP_RX_LIST_APPEND(dp_pdev->invalid_peer_head_msdu, 466 dp_pdev->invalid_peer_tail_msdu, 467 nbuf); 468 469 return mpdu_done; 470 } 471 472 /** 473 * dp_2k_jump_handle() - Function to handle 2k jump exception 474 * on WBM ring 475 * 476 * @soc: core DP main context 477 * @nbuf: buffer pointer 478 * @rx_tlv_hdr: start of rx tlv header 479 * @peer_id: peer id of first msdu 480 * @tid: Tid for which exception occurred 481 * 482 * This function handles 2k jump violations arising out 483 * of receiving aggregates in non BA case. This typically 484 * may happen if aggregates are received on a QOS enabled TID 485 * while Rx window size is still initialized to value of 2. Or 486 * it may also happen if negotiated window size is 1 but peer 487 * sends aggregates. 488 * 489 */ 490 491 static void 492 dp_2k_jump_handle(struct dp_soc *soc, 493 qdf_nbuf_t nbuf, 494 uint8_t *rx_tlv_hdr, 495 uint16_t peer_id, 496 uint8_t tid) 497 { 498 uint32_t ppdu_id; 499 struct dp_peer *peer = NULL; 500 struct dp_rx_tid *rx_tid = NULL; 501 502 peer = dp_peer_find_by_id(soc, peer_id); 503 if (!peer || peer->delete_in_progress) { 504 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 505 "peer not found"); 506 goto free_nbuf; 507 } 508 rx_tid = &peer->rx_tid[tid]; 509 if (qdf_unlikely(rx_tid == NULL)) { 510 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 511 "rx_tid is NULL!!"); 512 goto free_nbuf; 513 } 514 qdf_spin_lock_bh(&rx_tid->tid_lock); 515 ppdu_id = hal_rx_attn_phy_ppdu_id_get(rx_tlv_hdr); 516 if (rx_tid->ppdu_id_2k != ppdu_id) { 517 rx_tid->ppdu_id_2k = ppdu_id; 518 qdf_spin_unlock_bh(&rx_tid->tid_lock); 519 goto free_nbuf; 520 } 521 if (!rx_tid->delba_tx_status) { 522 rx_tid->delba_tx_retry++; 523 rx_tid->delba_tx_status = 1; 524 rx_tid->delba_rcode = 525 IEEE80211_REASON_QOS_SETUP_REQUIRED; 526 qdf_spin_unlock_bh(&rx_tid->tid_lock); 527 soc->cdp_soc.ol_ops->send_delba(peer->vdev->pdev->ctrl_pdev, 528 peer->ctrl_peer, 529 peer->mac_addr.raw, 530 tid, 531 peer->vdev->ctrl_vdev, 532 rx_tid->delba_rcode); 533 } else { 534 qdf_spin_unlock_bh(&rx_tid->tid_lock); 535 } 536 537 free_nbuf: 538 qdf_nbuf_free(nbuf); 539 return; 540 } 541 542 /** 543 * dp_rx_null_q_desc_handle() - Function to handle NULL Queue 544 * descriptor violation on either a 545 * REO or WBM ring 546 * 547 * @soc: core DP main context 548 * @nbuf: buffer pointer 549 * @rx_tlv_hdr: start of rx tlv header 550 * @pool_id: mac id 551 * @peer: peer handle 552 * 553 * This function handles NULL queue descriptor violations arising out 554 * a missing REO queue for a given peer or a given TID. This typically 555 * may happen if a packet is received on a QOS enabled TID before the 556 * ADDBA negotiation for that TID, when the TID queue is setup. Or 557 * it may also happen for MC/BC frames if they are not routed to the 558 * non-QOS TID queue, in the absence of any other default TID queue. 559 * This error can show up both in a REO destination or WBM release ring. 560 * 561 */ 562 static void 563 dp_rx_null_q_desc_handle(struct dp_soc *soc, qdf_nbuf_t nbuf, 564 uint8_t *rx_tlv_hdr, uint8_t pool_id, 565 struct dp_peer *peer) 566 { 567 uint32_t pkt_len, l2_hdr_offset; 568 uint16_t msdu_len; 569 struct dp_vdev *vdev; 570 uint8_t tid; 571 struct ether_header *eh; 572 573 qdf_nbuf_set_rx_chfrag_start(nbuf, 574 hal_rx_msdu_end_first_msdu_get(rx_tlv_hdr)); 575 qdf_nbuf_set_rx_chfrag_end(nbuf, 576 hal_rx_msdu_end_last_msdu_get(rx_tlv_hdr)); 577 578 l2_hdr_offset = hal_rx_msdu_end_l3_hdr_padding_get(rx_tlv_hdr); 579 msdu_len = hal_rx_msdu_start_msdu_len_get(rx_tlv_hdr); 580 pkt_len = msdu_len + l2_hdr_offset + RX_PKT_TLVS_LEN; 581 582 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 583 FL("Len %d Extn list %pK "), 584 (uint32_t)qdf_nbuf_len(nbuf), 585 qdf_nbuf_get_ext_list(nbuf)); 586 /* Set length in nbuf */ 587 if (!qdf_nbuf_get_ext_list(nbuf)) 588 qdf_nbuf_set_pktlen(nbuf, pkt_len); 589 590 /* 591 * Check if DMA completed -- msdu_done is the last bit 592 * to be written 593 */ 594 if (!hal_rx_attn_msdu_done_get(rx_tlv_hdr)) { 595 596 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 597 FL("MSDU DONE failure")); 598 599 hal_rx_dump_pkt_tlvs(soc->hal_soc, rx_tlv_hdr, 600 QDF_TRACE_LEVEL_INFO); 601 qdf_assert(0); 602 } 603 604 if (!peer) { 605 bool mpdu_done = false; 606 struct dp_pdev *pdev = soc->pdev_list[pool_id]; 607 608 QDF_TRACE_DEBUG_RL(QDF_MODULE_ID_DP, "peer is NULL"); 609 610 mpdu_done = dp_rx_chain_msdus(soc, nbuf, rx_tlv_hdr, pool_id); 611 /* Trigger invalid peer handler wrapper */ 612 dp_rx_process_invalid_peer_wrapper(soc, nbuf, mpdu_done); 613 614 if (mpdu_done) { 615 pdev->invalid_peer_head_msdu = NULL; 616 pdev->invalid_peer_tail_msdu = NULL; 617 } 618 return; 619 } 620 621 vdev = peer->vdev; 622 if (!vdev) { 623 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 624 FL("INVALID vdev %pK OR osif_rx"), vdev); 625 /* Drop & free packet */ 626 qdf_nbuf_free(nbuf); 627 DP_STATS_INC(soc, rx.err.invalid_vdev, 1); 628 return; 629 } 630 631 /* 632 * Advance the packet start pointer by total size of 633 * pre-header TLV's 634 */ 635 if (qdf_nbuf_get_ext_list(nbuf)) 636 qdf_nbuf_pull_head(nbuf, RX_PKT_TLVS_LEN); 637 else 638 qdf_nbuf_pull_head(nbuf, (l2_hdr_offset + RX_PKT_TLVS_LEN)); 639 640 if (dp_rx_mcast_echo_check(soc, peer, rx_tlv_hdr, nbuf)) { 641 /* this is a looped back MCBC pkt, drop it */ 642 qdf_nbuf_free(nbuf); 643 return; 644 } 645 /* 646 * In qwrap mode if the received packet matches with any of the vdev 647 * mac addresses, drop it. Donot receive multicast packets originated 648 * from any proxysta. 649 */ 650 if (check_qwrap_multicast_loopback(vdev, nbuf)) { 651 qdf_nbuf_free(nbuf); 652 return; 653 } 654 655 656 if (qdf_unlikely((peer->nawds_enabled == true) && 657 hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr))) { 658 QDF_TRACE(QDF_MODULE_ID_DP, 659 QDF_TRACE_LEVEL_DEBUG, 660 "%s free buffer for multicast packet", 661 __func__); 662 DP_STATS_INC(peer, rx.nawds_mcast_drop, 1); 663 qdf_nbuf_free(nbuf); 664 return; 665 } 666 667 if (!dp_wds_rx_policy_check(rx_tlv_hdr, vdev, peer, 668 hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr))) { 669 QDF_TRACE(QDF_MODULE_ID_DP, 670 QDF_TRACE_LEVEL_ERROR, 671 FL("mcast Policy Check Drop pkt")); 672 /* Drop & free packet */ 673 qdf_nbuf_free(nbuf); 674 return; 675 } 676 677 /* WDS Source Port Learning */ 678 if (qdf_likely(vdev->rx_decap_type == htt_cmn_pkt_type_ethernet && 679 vdev->wds_enabled)) 680 dp_rx_wds_srcport_learn(soc, rx_tlv_hdr, peer, nbuf); 681 682 if (hal_rx_mpdu_start_mpdu_qos_control_valid_get(rx_tlv_hdr)) { 683 /* TODO: Assuming that qos_control_valid also indicates 684 * unicast. Should we check this? 685 */ 686 tid = hal_rx_mpdu_start_tid_get(soc->hal_soc, rx_tlv_hdr); 687 if (peer && 688 peer->rx_tid[tid].hw_qdesc_vaddr_unaligned == NULL) { 689 /* IEEE80211_SEQ_MAX indicates invalid start_seq */ 690 dp_rx_tid_setup_wifi3(peer, tid, 1, IEEE80211_SEQ_MAX); 691 } 692 } 693 694 #ifdef QCA_WIFI_NAPIER_EMULATION /* Debug code, remove later */ 695 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 696 "%s: mac_add:%pM msdu_len %d hdr_off %d", 697 __func__, peer->mac_addr.raw, msdu_len, 698 l2_hdr_offset); 699 700 print_hex_dump(KERN_ERR, "\t Pkt Data:", DUMP_PREFIX_NONE, 32, 4, 701 qdf_nbuf_data(nbuf), 128, false); 702 #endif /* NAPIER_EMULATION */ 703 704 if (qdf_unlikely(vdev->rx_decap_type == htt_cmn_pkt_type_raw)) { 705 qdf_nbuf_set_next(nbuf, NULL); 706 dp_rx_deliver_raw(vdev, nbuf, peer); 707 } else { 708 if (qdf_unlikely(peer->bss_peer)) { 709 QDF_TRACE(QDF_MODULE_ID_DP, 710 QDF_TRACE_LEVEL_INFO, 711 FL("received pkt with same src MAC")); 712 /* Drop & free packet */ 713 qdf_nbuf_free(nbuf); 714 return; 715 } 716 717 if (vdev->osif_rx) { 718 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, 719 FL("vdev %pK osif_rx %pK"), vdev, 720 vdev->osif_rx); 721 qdf_nbuf_set_next(nbuf, NULL); 722 vdev->osif_rx(vdev->osif_vdev, nbuf); 723 DP_STATS_INC_PKT(peer, rx.to_stack, 1, 724 qdf_nbuf_len(nbuf)); 725 if (qdf_unlikely(hal_rx_msdu_end_da_is_mcbc_get( 726 rx_tlv_hdr) && 727 (vdev->rx_decap_type == 728 htt_cmn_pkt_type_ethernet))) { 729 eh = (struct ether_header *)qdf_nbuf_data(nbuf); 730 731 DP_STATS_INC_PKT(peer, rx.multicast, 1, 732 qdf_nbuf_len(nbuf)); 733 if (IEEE80211_IS_BROADCAST(eh->ether_dhost)) { 734 DP_STATS_INC_PKT(peer, rx.bcast, 1, 735 qdf_nbuf_len(nbuf)); 736 } 737 } 738 } else { 739 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 740 FL("INVALID vdev %pK OR osif_rx"), vdev); 741 DP_STATS_INC(soc, rx.err.invalid_vdev, 1); 742 } 743 } 744 return; 745 } 746 747 /** 748 * dp_rx_err_deliver() - Function to deliver error frames to OS 749 * @soc: core DP main context 750 * @nbuf: buffer pointer 751 * @rx_tlv_hdr: start of rx tlv header 752 * @peer: peer reference 753 * 754 * Return: None 755 */ 756 static void 757 dp_rx_err_deliver(struct dp_soc *soc, qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr, 758 struct dp_peer *peer) 759 { 760 uint32_t pkt_len, l2_hdr_offset; 761 uint16_t msdu_len; 762 struct dp_vdev *vdev; 763 struct ether_header *eh; 764 bool isBroadcast; 765 766 /* 767 * Check if DMA completed -- msdu_done is the last bit 768 * to be written 769 */ 770 if (!hal_rx_attn_msdu_done_get(rx_tlv_hdr)) { 771 772 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 773 FL("MSDU DONE failure")); 774 775 hal_rx_dump_pkt_tlvs(soc->hal_soc, rx_tlv_hdr, 776 QDF_TRACE_LEVEL_INFO); 777 qdf_assert(0); 778 } 779 780 l2_hdr_offset = hal_rx_msdu_end_l3_hdr_padding_get(rx_tlv_hdr); 781 msdu_len = hal_rx_msdu_start_msdu_len_get(rx_tlv_hdr); 782 pkt_len = msdu_len + l2_hdr_offset + RX_PKT_TLVS_LEN; 783 784 /* Set length in nbuf */ 785 qdf_nbuf_set_pktlen(nbuf, pkt_len); 786 787 qdf_nbuf_set_next(nbuf, NULL); 788 789 qdf_nbuf_set_rx_chfrag_start(nbuf, 1); 790 qdf_nbuf_set_rx_chfrag_end(nbuf, 1); 791 792 if (!peer) { 793 QDF_TRACE_ERROR_RL(QDF_MODULE_ID_DP, "peer is NULL"); 794 DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1, 795 qdf_nbuf_len(nbuf)); 796 /* Trigger invalid peer handler wrapper */ 797 dp_rx_process_invalid_peer_wrapper(soc, nbuf, true); 798 return; 799 } 800 801 vdev = peer->vdev; 802 if (!vdev) { 803 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 804 FL("INVALID vdev %pK OR osif_rx"), vdev); 805 /* Drop & free packet */ 806 qdf_nbuf_free(nbuf); 807 DP_STATS_INC(soc, rx.err.invalid_vdev, 1); 808 return; 809 } 810 811 /* Drop & free packet if mesh mode not enabled */ 812 if (!vdev->mesh_vdev) { 813 qdf_nbuf_free(nbuf); 814 DP_STATS_INC(soc, rx.err.invalid_vdev, 1); 815 return; 816 } 817 818 /* 819 * Advance the packet start pointer by total size of 820 * pre-header TLV's 821 */ 822 qdf_nbuf_pull_head(nbuf, (l2_hdr_offset + RX_PKT_TLVS_LEN)); 823 824 if (dp_rx_filter_mesh_packets(vdev, nbuf, rx_tlv_hdr) 825 == QDF_STATUS_SUCCESS) { 826 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_MED, 827 FL("mesh pkt filtered")); 828 DP_STATS_INC(vdev->pdev, dropped.mesh_filter, 1); 829 830 qdf_nbuf_free(nbuf); 831 return; 832 833 } 834 dp_rx_fill_mesh_stats(vdev, nbuf, rx_tlv_hdr, peer); 835 836 if (qdf_unlikely(hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr) && 837 (vdev->rx_decap_type == 838 htt_cmn_pkt_type_ethernet))) { 839 eh = (struct ether_header *)qdf_nbuf_data(nbuf); 840 isBroadcast = (IEEE80211_IS_BROADCAST 841 (eh->ether_dhost)) ? 1 : 0 ; 842 DP_STATS_INC_PKT(peer, rx.multicast, 1, qdf_nbuf_len(nbuf)); 843 if (isBroadcast) { 844 DP_STATS_INC_PKT(peer, rx.bcast, 1, 845 qdf_nbuf_len(nbuf)); 846 } 847 } 848 849 if (qdf_unlikely(vdev->rx_decap_type == htt_cmn_pkt_type_raw)) { 850 dp_rx_deliver_raw(vdev, nbuf, peer); 851 } else { 852 DP_STATS_INC(vdev->pdev, rx.to_stack.num, 1); 853 vdev->osif_rx(vdev->osif_vdev, nbuf); 854 } 855 856 return; 857 } 858 859 /** 860 * dp_rx_process_mic_error(): Function to pass mic error indication to umac 861 * @soc: core DP main context 862 * @nbuf: buffer pointer 863 * @rx_tlv_hdr: start of rx tlv header 864 * @peer: peer handle 865 * 866 * return: void 867 */ 868 void dp_rx_process_mic_error(struct dp_soc *soc, qdf_nbuf_t nbuf, 869 uint8_t *rx_tlv_hdr, struct dp_peer *peer) 870 { 871 struct dp_vdev *vdev = NULL; 872 struct dp_pdev *pdev = NULL; 873 struct ol_if_ops *tops = NULL; 874 struct ieee80211_frame *wh; 875 uint8_t *rx_pkt_hdr; 876 uint16_t rx_seq, fragno; 877 unsigned int tid; 878 QDF_STATUS status; 879 880 if (!hal_rx_msdu_end_first_msdu_get(rx_tlv_hdr)) 881 return; 882 883 rx_pkt_hdr = hal_rx_pkt_hdr_get(qdf_nbuf_data(nbuf)); 884 wh = (struct ieee80211_frame *)rx_pkt_hdr; 885 886 if (!peer) { 887 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 888 "peer not found"); 889 goto fail; 890 } 891 892 vdev = peer->vdev; 893 if (!vdev) { 894 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 895 "VDEV not found"); 896 goto fail; 897 } 898 899 pdev = vdev->pdev; 900 if (!pdev) { 901 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 902 "PDEV not found"); 903 goto fail; 904 } 905 906 tid = hal_rx_mpdu_start_tid_get(soc->hal_soc, qdf_nbuf_data(nbuf)); 907 rx_seq = (((*(uint16_t *)wh->i_seq) & 908 IEEE80211_SEQ_SEQ_MASK) >> 909 IEEE80211_SEQ_SEQ_SHIFT); 910 911 fragno = dp_rx_frag_get_mpdu_frag_number(qdf_nbuf_data(nbuf)); 912 913 /* Can get only last fragment */ 914 if (fragno) { 915 status = dp_rx_defrag_add_last_frag(soc, peer, 916 tid, rx_seq, nbuf); 917 918 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 919 "%s: Frag pkt seq# %d frag# %d consumed status %d !", 920 __func__, rx_seq, fragno, status); 921 return; 922 } 923 924 tops = pdev->soc->cdp_soc.ol_ops; 925 if (tops->rx_mic_error) 926 tops->rx_mic_error(pdev->ctrl_pdev, vdev->vdev_id, wh); 927 928 fail: 929 qdf_nbuf_free(nbuf); 930 return; 931 } 932 933 /** 934 * dp_rx_err_process() - Processes error frames routed to REO error ring 935 * 936 * @soc: core txrx main context 937 * @hal_ring: opaque pointer to the HAL Rx Error Ring, which will be serviced 938 * @quota: No. of units (packets) that can be serviced in one shot. 939 * 940 * This function implements error processing and top level demultiplexer 941 * for all the frames routed to REO error ring. 942 * 943 * Return: uint32_t: No. of elements processed 944 */ 945 uint32_t 946 dp_rx_err_process(struct dp_soc *soc, void *hal_ring, uint32_t quota) 947 { 948 void *hal_soc; 949 void *ring_desc; 950 union dp_rx_desc_list_elem_t *head = NULL; 951 union dp_rx_desc_list_elem_t *tail = NULL; 952 uint32_t rx_bufs_used = 0; 953 uint8_t buf_type; 954 uint8_t error, rbm; 955 struct hal_rx_mpdu_desc_info mpdu_desc_info; 956 struct hal_buf_info hbi; 957 struct dp_pdev *dp_pdev; 958 struct dp_srng *dp_rxdma_srng; 959 struct rx_desc_pool *rx_desc_pool; 960 uint32_t cookie = 0; 961 void *link_desc_va; 962 struct hal_rx_msdu_list msdu_list; /* MSDU's per MPDU */ 963 uint16_t num_msdus; 964 965 /* Debug -- Remove later */ 966 qdf_assert(soc && hal_ring); 967 968 hal_soc = soc->hal_soc; 969 970 /* Debug -- Remove later */ 971 qdf_assert(hal_soc); 972 973 if (qdf_unlikely(hal_srng_access_start(hal_soc, hal_ring))) { 974 975 /* TODO */ 976 /* 977 * Need API to convert from hal_ring pointer to 978 * Ring Type / Ring Id combo 979 */ 980 DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1); 981 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 982 FL("HAL RING Access Failed -- %pK"), hal_ring); 983 goto done; 984 } 985 986 while (qdf_likely(quota-- && (ring_desc = 987 hal_srng_dst_get_next(hal_soc, hal_ring)))) { 988 989 DP_STATS_INC(soc, rx.err_ring_pkts, 1); 990 991 error = HAL_RX_ERROR_STATUS_GET(ring_desc); 992 993 qdf_assert(error == HAL_REO_ERROR_DETECTED); 994 995 buf_type = HAL_RX_REO_BUF_TYPE_GET(ring_desc); 996 /* 997 * For REO error ring, expect only MSDU LINK DESC 998 */ 999 qdf_assert_always(buf_type == HAL_RX_REO_MSDU_LINK_DESC_TYPE); 1000 1001 cookie = HAL_RX_REO_BUF_COOKIE_GET(ring_desc); 1002 /* 1003 * check for the magic number in the sw cookie 1004 */ 1005 qdf_assert_always((cookie >> LINK_DESC_ID_SHIFT) & 1006 LINK_DESC_ID_START); 1007 1008 /* 1009 * Check if the buffer is to be processed on this processor 1010 */ 1011 rbm = hal_rx_ret_buf_manager_get(ring_desc); 1012 1013 hal_rx_reo_buf_paddr_get(ring_desc, &hbi); 1014 link_desc_va = dp_rx_cookie_2_link_desc_va(soc, &hbi); 1015 hal_rx_msdu_list_get(soc->hal_soc, link_desc_va, &msdu_list, 1016 &num_msdus); 1017 1018 if (qdf_unlikely((msdu_list.rbm[0] != DP_WBM2SW_RBM) && 1019 (msdu_list.rbm[0] != 1020 HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST))) { 1021 /* TODO */ 1022 /* Call appropriate handler */ 1023 DP_STATS_INC(soc, rx.err.invalid_rbm, 1); 1024 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1025 FL("Invalid RBM %d"), msdu_list.rbm[0]); 1026 1027 /* Return link descriptor through WBM ring (SW2WBM)*/ 1028 dp_rx_link_desc_return(soc, ring_desc, 1029 HAL_BM_ACTION_RELEASE_MSDU_LIST); 1030 continue; 1031 } 1032 1033 /* Get the MPDU DESC info */ 1034 hal_rx_mpdu_desc_info_get(ring_desc, &mpdu_desc_info); 1035 1036 if (mpdu_desc_info.mpdu_flags & HAL_MPDU_F_FRAGMENT) { 1037 /* TODO */ 1038 rx_bufs_used += dp_rx_frag_handle(soc, 1039 ring_desc, &mpdu_desc_info, 1040 &head, &tail, quota); 1041 DP_STATS_INC(soc, rx.rx_frags, 1); 1042 continue; 1043 } 1044 1045 if (hal_rx_reo_is_pn_error(ring_desc)) { 1046 /* TOD0 */ 1047 DP_STATS_INC(soc, 1048 rx.err. 1049 reo_error[HAL_REO_ERR_PN_CHECK_FAILED], 1050 1); 1051 rx_bufs_used += dp_rx_pn_error_handle(soc, 1052 ring_desc, &mpdu_desc_info, 1053 &head, &tail, quota); 1054 continue; 1055 } 1056 1057 if (hal_rx_reo_is_2k_jump(ring_desc)) { 1058 /* TOD0 */ 1059 DP_STATS_INC(soc, 1060 rx.err. 1061 reo_error[HAL_REO_ERR_REGULAR_FRAME_2K_JUMP], 1062 1); 1063 rx_bufs_used += dp_rx_2k_jump_handle(soc, 1064 ring_desc, &mpdu_desc_info, 1065 &head, &tail, quota); 1066 continue; 1067 } 1068 } 1069 1070 done: 1071 hal_srng_access_end(hal_soc, hal_ring); 1072 1073 if (soc->rx.flags.defrag_timeout_check) 1074 dp_rx_defrag_waitlist_flush(soc); 1075 1076 /* Assume MAC id = 0, owner = 0 */ 1077 if (rx_bufs_used) { 1078 dp_pdev = soc->pdev_list[0]; 1079 dp_rxdma_srng = &dp_pdev->rx_refill_buf_ring; 1080 rx_desc_pool = &soc->rx_desc_buf[0]; 1081 1082 dp_rx_buffers_replenish(soc, 0, dp_rxdma_srng, rx_desc_pool, 1083 rx_bufs_used, &head, &tail); 1084 } 1085 1086 return rx_bufs_used; /* Assume no scale factor for now */ 1087 } 1088 1089 /** 1090 * dp_rx_wbm_err_process() - Processes error frames routed to WBM release ring 1091 * 1092 * @soc: core txrx main context 1093 * @hal_ring: opaque pointer to the HAL Rx Error Ring, which will be serviced 1094 * @quota: No. of units (packets) that can be serviced in one shot. 1095 * 1096 * This function implements error processing and top level demultiplexer 1097 * for all the frames routed to WBM2HOST sw release ring. 1098 * 1099 * Return: uint32_t: No. of elements processed 1100 */ 1101 uint32_t 1102 dp_rx_wbm_err_process(struct dp_soc *soc, void *hal_ring, uint32_t quota) 1103 { 1104 void *hal_soc; 1105 void *ring_desc; 1106 struct dp_rx_desc *rx_desc; 1107 union dp_rx_desc_list_elem_t *head[MAX_PDEV_CNT] = { NULL }; 1108 union dp_rx_desc_list_elem_t *tail[MAX_PDEV_CNT] = { NULL }; 1109 uint32_t rx_bufs_used = 0; 1110 uint32_t rx_bufs_reaped[MAX_PDEV_CNT] = { 0 }; 1111 uint8_t buf_type, rbm; 1112 uint32_t rx_buf_cookie; 1113 uint8_t mac_id; 1114 struct dp_pdev *dp_pdev; 1115 struct dp_srng *dp_rxdma_srng; 1116 struct rx_desc_pool *rx_desc_pool; 1117 uint8_t *rx_tlv_hdr; 1118 qdf_nbuf_t nbuf_head = NULL; 1119 qdf_nbuf_t nbuf_tail = NULL; 1120 qdf_nbuf_t nbuf, next; 1121 struct hal_wbm_err_desc_info wbm_err_info = { 0 }; 1122 uint8_t pool_id; 1123 uint8_t tid = 0; 1124 1125 /* Debug -- Remove later */ 1126 qdf_assert(soc && hal_ring); 1127 1128 hal_soc = soc->hal_soc; 1129 1130 /* Debug -- Remove later */ 1131 qdf_assert(hal_soc); 1132 1133 if (qdf_unlikely(hal_srng_access_start(hal_soc, hal_ring))) { 1134 1135 /* TODO */ 1136 /* 1137 * Need API to convert from hal_ring pointer to 1138 * Ring Type / Ring Id combo 1139 */ 1140 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1141 FL("HAL RING Access Failed -- %pK"), hal_ring); 1142 goto done; 1143 } 1144 1145 while (qdf_likely(quota-- && (ring_desc = 1146 hal_srng_dst_get_next(hal_soc, hal_ring)))) { 1147 1148 /* XXX */ 1149 buf_type = HAL_RX_WBM_BUF_TYPE_GET(ring_desc); 1150 1151 /* 1152 * For WBM ring, expect only MSDU buffers 1153 */ 1154 qdf_assert_always(buf_type == HAL_RX_WBM_BUF_TYPE_REL_BUF); 1155 1156 qdf_assert((HAL_RX_WBM_ERR_SRC_GET(ring_desc) 1157 == HAL_RX_WBM_ERR_SRC_RXDMA) || 1158 (HAL_RX_WBM_ERR_SRC_GET(ring_desc) 1159 == HAL_RX_WBM_ERR_SRC_REO)); 1160 1161 /* 1162 * Check if the buffer is to be processed on this processor 1163 */ 1164 rbm = hal_rx_ret_buf_manager_get(ring_desc); 1165 1166 if (qdf_unlikely(rbm != HAL_RX_BUF_RBM_SW3_BM)) { 1167 /* TODO */ 1168 /* Call appropriate handler */ 1169 DP_STATS_INC(soc, rx.err.invalid_rbm, 1); 1170 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1171 FL("Invalid RBM %d"), rbm); 1172 continue; 1173 } 1174 1175 rx_buf_cookie = HAL_RX_WBM_BUF_COOKIE_GET(ring_desc); 1176 1177 rx_desc = dp_rx_cookie_2_va_rxdma_buf(soc, rx_buf_cookie); 1178 qdf_assert(rx_desc); 1179 1180 if (!dp_rx_desc_check_magic(rx_desc)) { 1181 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1182 FL("Invalid rx_desc cookie=%d"), 1183 rx_buf_cookie); 1184 continue; 1185 } 1186 1187 nbuf = rx_desc->nbuf; 1188 qdf_nbuf_unmap_single(soc->osdev, nbuf, QDF_DMA_BIDIRECTIONAL); 1189 1190 /* 1191 * save the wbm desc info in nbuf TLV. We will need this 1192 * info when we do the actual nbuf processing 1193 */ 1194 hal_rx_wbm_err_info_get(ring_desc, &wbm_err_info, hal_soc); 1195 wbm_err_info.pool_id = rx_desc->pool_id; 1196 hal_rx_wbm_err_info_set_in_tlv(qdf_nbuf_data(nbuf), 1197 &wbm_err_info); 1198 1199 rx_bufs_reaped[rx_desc->pool_id]++; 1200 1201 DP_RX_LIST_APPEND(nbuf_head, nbuf_tail, rx_desc->nbuf); 1202 dp_rx_add_to_free_desc_list(&head[rx_desc->pool_id], 1203 &tail[rx_desc->pool_id], 1204 rx_desc); 1205 } 1206 done: 1207 hal_srng_access_end(hal_soc, hal_ring); 1208 1209 for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) { 1210 if (rx_bufs_reaped[mac_id]) { 1211 dp_pdev = soc->pdev_list[mac_id]; 1212 dp_rxdma_srng = &dp_pdev->rx_refill_buf_ring; 1213 rx_desc_pool = &soc->rx_desc_buf[mac_id]; 1214 1215 dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng, 1216 rx_desc_pool, rx_bufs_reaped[mac_id], 1217 &head[mac_id], &tail[mac_id]); 1218 rx_bufs_used += rx_bufs_reaped[mac_id]; 1219 } 1220 } 1221 1222 nbuf = nbuf_head; 1223 while (nbuf) { 1224 struct dp_peer *peer; 1225 uint16_t peer_id; 1226 1227 rx_tlv_hdr = qdf_nbuf_data(nbuf); 1228 1229 peer_id = hal_rx_mpdu_start_sw_peer_id_get(rx_tlv_hdr); 1230 peer = dp_peer_find_by_id(soc, peer_id); 1231 1232 /* 1233 * retrieve the wbm desc info from nbuf TLV, so we can 1234 * handle error cases appropriately 1235 */ 1236 hal_rx_wbm_err_info_get_from_tlv(rx_tlv_hdr, &wbm_err_info); 1237 1238 /* Set queue_mapping in nbuf to 0 */ 1239 dp_set_rx_queue(nbuf, 0); 1240 1241 next = nbuf->next; 1242 if (wbm_err_info.wbm_err_src == HAL_RX_WBM_ERR_SRC_REO) { 1243 if (wbm_err_info.reo_psh_rsn 1244 == HAL_RX_WBM_REO_PSH_RSN_ERROR) { 1245 1246 DP_STATS_INC(soc, 1247 rx.err.reo_error 1248 [wbm_err_info.reo_err_code], 1); 1249 1250 switch (wbm_err_info.reo_err_code) { 1251 /* 1252 * Handling for packets which have NULL REO 1253 * queue descriptor 1254 */ 1255 case HAL_REO_ERR_QUEUE_DESC_ADDR_0: 1256 pool_id = wbm_err_info.pool_id; 1257 QDF_TRACE_DEBUG_RL(QDF_MODULE_ID_DP, 1258 "Got pkt with REO ERROR: %d", 1259 wbm_err_info.reo_err_code); 1260 dp_rx_null_q_desc_handle(soc, nbuf, 1261 rx_tlv_hdr, 1262 pool_id, peer); 1263 nbuf = next; 1264 if (peer) 1265 dp_peer_unref_del_find_by_id( 1266 peer); 1267 continue; 1268 /* TODO */ 1269 /* Add per error code accounting */ 1270 case HAL_REO_ERR_REGULAR_FRAME_2K_JUMP: 1271 pool_id = wbm_err_info.pool_id; 1272 QDF_TRACE(QDF_MODULE_ID_DP, 1273 QDF_TRACE_LEVEL_ERROR, 1274 "Got pkt with REO ERROR: %d", 1275 wbm_err_info.reo_err_code); 1276 if (hal_rx_msdu_end_first_msdu_get(rx_tlv_hdr)) { 1277 peer_id = 1278 hal_rx_mpdu_start_sw_peer_id_get(rx_tlv_hdr); 1279 tid = 1280 hal_rx_mpdu_start_tid_get(hal_soc, rx_tlv_hdr); 1281 } 1282 dp_2k_jump_handle(soc, nbuf, rx_tlv_hdr, 1283 peer_id, tid); 1284 nbuf = next; 1285 if (peer) 1286 dp_peer_unref_del_find_by_id( 1287 peer); 1288 continue; 1289 default: 1290 QDF_TRACE(QDF_MODULE_ID_DP, 1291 QDF_TRACE_LEVEL_ERROR, 1292 "REO error %d detected", 1293 wbm_err_info.reo_err_code); 1294 } 1295 } 1296 } else if (wbm_err_info.wbm_err_src == 1297 HAL_RX_WBM_ERR_SRC_RXDMA) { 1298 if (wbm_err_info.rxdma_psh_rsn 1299 == HAL_RX_WBM_RXDMA_PSH_RSN_ERROR) { 1300 DP_STATS_INC(soc, 1301 rx.err.rxdma_error 1302 [wbm_err_info.rxdma_err_code], 1); 1303 1304 switch (wbm_err_info.rxdma_err_code) { 1305 case HAL_RXDMA_ERR_UNENCRYPTED: 1306 dp_rx_err_deliver(soc, nbuf, 1307 rx_tlv_hdr, peer); 1308 nbuf = next; 1309 if (peer) 1310 dp_peer_unref_del_find_by_id( 1311 peer); 1312 continue; 1313 1314 case HAL_RXDMA_ERR_TKIP_MIC: 1315 dp_rx_process_mic_error(soc, nbuf, 1316 rx_tlv_hdr, 1317 peer); 1318 nbuf = next; 1319 if (peer) { 1320 DP_STATS_INC(peer, rx.err.mic_err, 1); 1321 dp_peer_unref_del_find_by_id( 1322 peer); 1323 } 1324 continue; 1325 1326 case HAL_RXDMA_ERR_DECRYPT: 1327 if (peer) 1328 DP_STATS_INC(peer, rx.err.decrypt_err, 1); 1329 QDF_TRACE(QDF_MODULE_ID_DP, 1330 QDF_TRACE_LEVEL_DEBUG, 1331 "Packet received with Decrypt error"); 1332 break; 1333 1334 default: 1335 QDF_TRACE(QDF_MODULE_ID_DP, 1336 QDF_TRACE_LEVEL_DEBUG, 1337 "RXDMA error %d", 1338 wbm_err_info. 1339 rxdma_err_code); 1340 } 1341 } 1342 } else { 1343 /* Should not come here */ 1344 qdf_assert(0); 1345 } 1346 1347 if (peer) 1348 dp_peer_unref_del_find_by_id(peer); 1349 1350 hal_rx_dump_pkt_tlvs(hal_soc, rx_tlv_hdr, 1351 QDF_TRACE_LEVEL_DEBUG); 1352 qdf_nbuf_free(nbuf); 1353 nbuf = next; 1354 } 1355 return rx_bufs_used; /* Assume no scale factor for now */ 1356 } 1357 1358 /** 1359 * dp_rx_err_mpdu_pop() - extract the MSDU's from link descs 1360 * 1361 * @soc: core DP main context 1362 * @mac_id: mac id which is one of 3 mac_ids 1363 * @rxdma_dst_ring_desc: void pointer to monitor link descriptor buf addr info 1364 * @head: head of descs list to be freed 1365 * @tail: tail of decs list to be freed 1366 1367 * Return: number of msdu in MPDU to be popped 1368 */ 1369 static inline uint32_t 1370 dp_rx_err_mpdu_pop(struct dp_soc *soc, uint32_t mac_id, 1371 void *rxdma_dst_ring_desc, 1372 union dp_rx_desc_list_elem_t **head, 1373 union dp_rx_desc_list_elem_t **tail) 1374 { 1375 void *rx_msdu_link_desc; 1376 qdf_nbuf_t msdu; 1377 qdf_nbuf_t last; 1378 struct hal_rx_msdu_list msdu_list; 1379 uint16_t num_msdus; 1380 struct hal_buf_info buf_info; 1381 void *p_buf_addr_info; 1382 void *p_last_buf_addr_info; 1383 uint32_t rx_bufs_used = 0; 1384 uint32_t msdu_cnt; 1385 uint32_t i; 1386 uint8_t push_reason; 1387 uint8_t rxdma_error_code = 0; 1388 uint8_t bm_action = HAL_BM_ACTION_PUT_IN_IDLE_LIST; 1389 struct dp_pdev *pdev = dp_get_pdev_for_mac_id(soc, mac_id); 1390 1391 msdu = 0; 1392 1393 last = NULL; 1394 1395 hal_rx_reo_ent_buf_paddr_get(rxdma_dst_ring_desc, &buf_info, 1396 &p_last_buf_addr_info, &msdu_cnt); 1397 1398 push_reason = 1399 hal_rx_reo_ent_rxdma_push_reason_get(rxdma_dst_ring_desc); 1400 if (push_reason == HAL_RX_WBM_RXDMA_PSH_RSN_ERROR) { 1401 rxdma_error_code = 1402 hal_rx_reo_ent_rxdma_error_code_get(rxdma_dst_ring_desc); 1403 } 1404 1405 do { 1406 rx_msdu_link_desc = 1407 dp_rx_cookie_2_link_desc_va(soc, &buf_info); 1408 1409 qdf_assert(rx_msdu_link_desc); 1410 1411 hal_rx_msdu_list_get(soc->hal_soc, rx_msdu_link_desc, 1412 &msdu_list, &num_msdus); 1413 1414 if (msdu_list.sw_cookie[0] != HAL_RX_COOKIE_SPECIAL) { 1415 /* if the msdus belongs to NSS offloaded radio && 1416 * the rbm is not SW1_BM then return the msdu_link 1417 * descriptor without freeing the msdus (nbufs). let 1418 * these buffers be given to NSS completion ring for 1419 * NSS to free them. 1420 * else iterate through the msdu link desc list and 1421 * free each msdu in the list. 1422 */ 1423 if (msdu_list.rbm[0] != HAL_RX_BUF_RBM_SW3_BM && 1424 wlan_cfg_get_dp_pdev_nss_enabled( 1425 pdev->wlan_cfg_ctx)) 1426 bm_action = HAL_BM_ACTION_RELEASE_MSDU_LIST; 1427 else { 1428 for (i = 0; i < num_msdus; i++) { 1429 struct dp_rx_desc *rx_desc = 1430 dp_rx_cookie_2_va_rxdma_buf(soc, 1431 msdu_list.sw_cookie[i]); 1432 qdf_assert(rx_desc); 1433 msdu = rx_desc->nbuf; 1434 1435 qdf_nbuf_unmap_single(soc->osdev, msdu, 1436 QDF_DMA_FROM_DEVICE); 1437 1438 QDF_TRACE(QDF_MODULE_ID_DP, 1439 QDF_TRACE_LEVEL_DEBUG, 1440 "[%s][%d] msdu_nbuf=%pK ", 1441 __func__, __LINE__, msdu); 1442 1443 qdf_nbuf_free(msdu); 1444 rx_bufs_used++; 1445 dp_rx_add_to_free_desc_list(head, 1446 tail, rx_desc); 1447 } 1448 } 1449 } else { 1450 rxdma_error_code = HAL_RXDMA_ERR_WAR; 1451 } 1452 1453 hal_rx_mon_next_link_desc_get(rx_msdu_link_desc, &buf_info, 1454 &p_buf_addr_info); 1455 1456 dp_rx_link_desc_return(soc, p_last_buf_addr_info, bm_action); 1457 p_last_buf_addr_info = p_buf_addr_info; 1458 1459 } while (buf_info.paddr); 1460 1461 DP_STATS_INC(soc, rx.err.rxdma_error[rxdma_error_code], 1); 1462 1463 if (rxdma_error_code == HAL_RXDMA_ERR_DECRYPT) { 1464 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1465 "Packet received with Decrypt error"); 1466 } 1467 1468 return rx_bufs_used; 1469 } 1470 1471 /** 1472 * dp_rxdma_err_process() - RxDMA error processing functionality 1473 * 1474 * @soc: core txrx main contex 1475 * @mac_id: mac id which is one of 3 mac_ids 1476 * @hal_ring: opaque pointer to the HAL Rx Ring, which will be serviced 1477 * @quota: No. of units (packets) that can be serviced in one shot. 1478 1479 * Return: num of buffers processed 1480 */ 1481 uint32_t 1482 dp_rxdma_err_process(struct dp_soc *soc, uint32_t mac_id, uint32_t quota) 1483 { 1484 struct dp_pdev *pdev = dp_get_pdev_for_mac_id(soc, mac_id); 1485 int mac_for_pdev = dp_get_mac_id_for_mac(soc, mac_id); 1486 void *hal_soc; 1487 void *rxdma_dst_ring_desc; 1488 void *err_dst_srng; 1489 union dp_rx_desc_list_elem_t *head = NULL; 1490 union dp_rx_desc_list_elem_t *tail = NULL; 1491 struct dp_srng *dp_rxdma_srng; 1492 struct rx_desc_pool *rx_desc_pool; 1493 uint32_t work_done = 0; 1494 uint32_t rx_bufs_used = 0; 1495 1496 if (!pdev) 1497 return 0; 1498 1499 err_dst_srng = pdev->rxdma_err_dst_ring[mac_for_pdev].hal_srng; 1500 1501 if (!err_dst_srng) { 1502 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1503 "%s %d : HAL Monitor Destination Ring Init \ 1504 Failed -- %pK", 1505 __func__, __LINE__, err_dst_srng); 1506 return 0; 1507 } 1508 1509 hal_soc = soc->hal_soc; 1510 1511 qdf_assert(hal_soc); 1512 1513 if (qdf_unlikely(hal_srng_access_start(hal_soc, err_dst_srng))) { 1514 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1515 "%s %d : HAL Monitor Destination Ring Init \ 1516 Failed -- %pK", 1517 __func__, __LINE__, err_dst_srng); 1518 return 0; 1519 } 1520 1521 while (qdf_likely(quota-- && (rxdma_dst_ring_desc = 1522 hal_srng_dst_get_next(hal_soc, err_dst_srng)))) { 1523 1524 rx_bufs_used += dp_rx_err_mpdu_pop(soc, mac_id, 1525 rxdma_dst_ring_desc, 1526 &head, &tail); 1527 } 1528 1529 hal_srng_access_end(hal_soc, err_dst_srng); 1530 1531 if (rx_bufs_used) { 1532 dp_rxdma_srng = &pdev->rx_refill_buf_ring; 1533 rx_desc_pool = &soc->rx_desc_buf[mac_id]; 1534 1535 dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng, 1536 rx_desc_pool, rx_bufs_used, &head, &tail); 1537 1538 work_done += rx_bufs_used; 1539 } 1540 1541 return work_done; 1542 } 1543