1 /* 2 * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for 5 * any purpose with or without fee is hereby granted, provided that the 6 * above copyright notice and this permission notice appear in all 7 * copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 16 * PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 #include "hal_hw_headers.h" 20 #include "dp_types.h" 21 #include "dp_rx.h" 22 #include "dp_peer.h" 23 #include "dp_internal.h" 24 #include "hal_api.h" 25 #include "qdf_trace.h" 26 #include "qdf_nbuf.h" 27 #ifdef CONFIG_MCL 28 #include <cds_ieee80211_common.h> 29 #else 30 #include <linux/ieee80211.h> 31 #endif 32 #include "dp_rx_defrag.h" 33 #include <enet.h> /* LLC_SNAP_HDR_LEN */ 34 35 #ifdef RX_DESC_DEBUG_CHECK 36 static inline bool dp_rx_desc_check_magic(struct dp_rx_desc *rx_desc) 37 { 38 if (qdf_unlikely(rx_desc->magic != DP_RX_DESC_MAGIC)) { 39 return false; 40 } 41 rx_desc->magic = 0; 42 return true; 43 } 44 #else 45 static inline bool dp_rx_desc_check_magic(struct dp_rx_desc *rx_desc) 46 { 47 return true; 48 } 49 #endif 50 51 /** 52 * dp_rx_mcast_echo_check() - check if the mcast pkt is a loop 53 * back on same vap or a different vap. 54 * 55 * @soc: core DP main context 56 * @peer: dp peer handler 57 * @rx_tlv_hdr: start of the rx TLV header 58 * @nbuf: pkt buffer 59 * 60 * Return: bool (true if it is a looped back pkt else false) 61 * 62 */ 63 static inline bool dp_rx_mcast_echo_check(struct dp_soc *soc, 64 struct dp_peer *peer, 65 uint8_t *rx_tlv_hdr, 66 qdf_nbuf_t nbuf) 67 { 68 struct dp_vdev *vdev = peer->vdev; 69 struct dp_ast_entry *ase; 70 uint16_t sa_idx = 0; 71 uint8_t *data; 72 73 /* 74 * Multicast Echo Check is required only if vdev is STA and 75 * received pkt is a multicast/broadcast pkt. otherwise 76 * skip the MEC check. 77 */ 78 if (vdev->opmode != wlan_op_mode_sta) 79 return false; 80 81 if (!hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr)) 82 return false; 83 84 data = qdf_nbuf_data(nbuf); 85 /* 86 * if the received pkts src mac addr matches with vdev 87 * mac address then drop the pkt as it is looped back 88 */ 89 if (!(qdf_mem_cmp(&data[DP_MAC_ADDR_LEN], 90 vdev->mac_addr.raw, 91 DP_MAC_ADDR_LEN))) 92 return true; 93 94 /* 95 * In case of qwrap isolation mode, donot drop loopback packets. 96 * In isolation mode, all packets from the wired stations need to go 97 * to rootap and loop back to reach the wireless stations and 98 * vice-versa. 99 */ 100 if (qdf_unlikely(vdev->isolation_vdev)) 101 return false; 102 103 /* if the received pkts src mac addr matches with the 104 * wired PCs MAC addr which is behind the STA or with 105 * wireless STAs MAC addr which are behind the Repeater, 106 * then drop the pkt as it is looped back 107 */ 108 qdf_spin_lock_bh(&soc->ast_lock); 109 if (hal_rx_msdu_end_sa_is_valid_get(rx_tlv_hdr)) { 110 sa_idx = hal_rx_msdu_end_sa_idx_get(rx_tlv_hdr); 111 112 if ((sa_idx < 0) || 113 (sa_idx >= (WLAN_UMAC_PSOC_MAX_PEERS * 2))) { 114 qdf_spin_unlock_bh(&soc->ast_lock); 115 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 116 "invalid sa_idx: %d", sa_idx); 117 qdf_assert_always(0); 118 } 119 120 ase = soc->ast_table[sa_idx]; 121 if (!ase) { 122 /* We do not get a peer map event for STA and without 123 * this event we don't know what is STA's sa_idx. 124 * For this reason the AST is still not associated to 125 * any index postion in ast_table. 126 * In these kind of scenarios where sa is valid but 127 * ast is not in ast_table, we use the below API to get 128 * AST entry for STA's own mac_address. 129 */ 130 ase = dp_peer_ast_hash_find(soc, 131 &data[DP_MAC_ADDR_LEN]); 132 133 } 134 } else 135 ase = dp_peer_ast_hash_find(soc, &data[DP_MAC_ADDR_LEN]); 136 137 if (ase) { 138 ase->ast_idx = sa_idx; 139 soc->ast_table[sa_idx] = ase; 140 141 if (ase->pdev_id != vdev->pdev->pdev_id) { 142 qdf_spin_unlock_bh(&soc->ast_lock); 143 QDF_TRACE(QDF_MODULE_ID_DP, 144 QDF_TRACE_LEVEL_INFO, 145 "Detected DBDC Root AP %pM, %d %d", 146 &data[DP_MAC_ADDR_LEN], vdev->pdev->pdev_id, 147 ase->pdev_id); 148 return false; 149 } 150 151 if ((ase->type == CDP_TXRX_AST_TYPE_MEC) || 152 (ase->peer != peer)) { 153 qdf_spin_unlock_bh(&soc->ast_lock); 154 QDF_TRACE(QDF_MODULE_ID_DP, 155 QDF_TRACE_LEVEL_INFO, 156 "received pkt with same src mac %pM", 157 &data[DP_MAC_ADDR_LEN]); 158 159 return true; 160 } 161 } 162 qdf_spin_unlock_bh(&soc->ast_lock); 163 return false; 164 } 165 166 /** 167 * dp_rx_link_desc_return_by_addr - Return a MPDU link descriptor to 168 * (WBM) by address 169 * 170 * @soc: core DP main context 171 * @link_desc_addr: link descriptor addr 172 * 173 * Return: QDF_STATUS 174 */ 175 QDF_STATUS 176 dp_rx_link_desc_return_by_addr(struct dp_soc *soc, void *link_desc_addr, 177 uint8_t bm_action) 178 { 179 struct dp_srng *wbm_desc_rel_ring = &soc->wbm_desc_rel_ring; 180 void *wbm_rel_srng = wbm_desc_rel_ring->hal_srng; 181 void *hal_soc = soc->hal_soc; 182 QDF_STATUS status = QDF_STATUS_E_FAILURE; 183 void *src_srng_desc; 184 185 if (!wbm_rel_srng) { 186 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 187 "WBM RELEASE RING not initialized"); 188 return status; 189 } 190 191 if (qdf_unlikely(hal_srng_access_start(hal_soc, wbm_rel_srng))) { 192 193 /* TODO */ 194 /* 195 * Need API to convert from hal_ring pointer to 196 * Ring Type / Ring Id combo 197 */ 198 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 199 FL("HAL RING Access For WBM Release SRNG Failed - %pK"), 200 wbm_rel_srng); 201 DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1); 202 goto done; 203 } 204 src_srng_desc = hal_srng_src_get_next(hal_soc, wbm_rel_srng); 205 if (qdf_likely(src_srng_desc)) { 206 /* Return link descriptor through WBM ring (SW2WBM)*/ 207 hal_rx_msdu_link_desc_set(hal_soc, 208 src_srng_desc, link_desc_addr, bm_action); 209 status = QDF_STATUS_SUCCESS; 210 } else { 211 struct hal_srng *srng = (struct hal_srng *)wbm_rel_srng; 212 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 213 FL("WBM Release Ring (Id %d) Full"), srng->ring_id); 214 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 215 "HP 0x%x Reap HP 0x%x TP 0x%x Cached TP 0x%x", 216 *srng->u.src_ring.hp_addr, srng->u.src_ring.reap_hp, 217 *srng->u.src_ring.tp_addr, srng->u.src_ring.cached_tp); 218 } 219 done: 220 hal_srng_access_end(hal_soc, wbm_rel_srng); 221 return status; 222 223 } 224 225 /** 226 * dp_rx_link_desc_return() - Return a MPDU link descriptor to HW 227 * (WBM), following error handling 228 * 229 * @soc: core DP main context 230 * @ring_desc: opaque pointer to the REO error ring descriptor 231 * 232 * Return: QDF_STATUS 233 */ 234 QDF_STATUS 235 dp_rx_link_desc_return(struct dp_soc *soc, void *ring_desc, uint8_t bm_action) 236 { 237 void *buf_addr_info = HAL_RX_REO_BUF_ADDR_INFO_GET(ring_desc); 238 return dp_rx_link_desc_return_by_addr(soc, buf_addr_info, bm_action); 239 } 240 241 /** 242 * dp_rx_msdus_drop() - Drops all MSDU's per MPDU 243 * 244 * @soc: core txrx main context 245 * @ring_desc: opaque pointer to the REO error ring descriptor 246 * @mpdu_desc_info: MPDU descriptor information from ring descriptor 247 * @head: head of the local descriptor free-list 248 * @tail: tail of the local descriptor free-list 249 * @quota: No. of units (packets) that can be serviced in one shot. 250 * 251 * This function is used to drop all MSDU in an MPDU 252 * 253 * Return: uint32_t: No. of elements processed 254 */ 255 static uint32_t dp_rx_msdus_drop(struct dp_soc *soc, void *ring_desc, 256 struct hal_rx_mpdu_desc_info *mpdu_desc_info, 257 union dp_rx_desc_list_elem_t **head, 258 union dp_rx_desc_list_elem_t **tail, 259 uint32_t quota) 260 { 261 uint32_t rx_bufs_used = 0; 262 void *link_desc_va; 263 struct hal_buf_info buf_info; 264 struct hal_rx_msdu_list msdu_list; /* MSDU's per MPDU */ 265 int i; 266 uint8_t *rx_tlv_hdr; 267 uint32_t tid; 268 269 hal_rx_reo_buf_paddr_get(ring_desc, &buf_info); 270 271 link_desc_va = dp_rx_cookie_2_link_desc_va(soc, &buf_info); 272 273 /* No UNMAP required -- this is "malloc_consistent" memory */ 274 hal_rx_msdu_list_get(link_desc_va, &msdu_list, 275 &mpdu_desc_info->msdu_count); 276 277 for (i = 0; (i < mpdu_desc_info->msdu_count) && quota--; i++) { 278 struct dp_rx_desc *rx_desc = 279 dp_rx_cookie_2_va_rxdma_buf(soc, 280 msdu_list.sw_cookie[i]); 281 282 qdf_assert(rx_desc); 283 284 if (!dp_rx_desc_check_magic(rx_desc)) { 285 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 286 FL("Invalid rx_desc cookie=%d"), 287 msdu_list.sw_cookie[i]); 288 return rx_bufs_used; 289 } 290 291 rx_bufs_used++; 292 tid = hal_rx_mpdu_start_tid_get(soc->hal_soc, 293 rx_desc->rx_buf_start); 294 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 295 "Packet received with PN error for tid :%d", tid); 296 297 rx_tlv_hdr = qdf_nbuf_data(rx_desc->nbuf); 298 if (hal_rx_encryption_info_valid(rx_tlv_hdr)) 299 hal_rx_print_pn(rx_tlv_hdr); 300 301 /* Just free the buffers */ 302 qdf_nbuf_free(rx_desc->nbuf); 303 304 dp_rx_add_to_free_desc_list(head, tail, rx_desc); 305 } 306 307 /* Return link descriptor through WBM ring (SW2WBM)*/ 308 dp_rx_link_desc_return(soc, ring_desc, HAL_BM_ACTION_PUT_IN_IDLE_LIST); 309 310 return rx_bufs_used; 311 } 312 313 /** 314 * dp_rx_pn_error_handle() - Handles PN check errors 315 * 316 * @soc: core txrx main context 317 * @ring_desc: opaque pointer to the REO error ring descriptor 318 * @mpdu_desc_info: MPDU descriptor information from ring descriptor 319 * @head: head of the local descriptor free-list 320 * @tail: tail of the local descriptor free-list 321 * @quota: No. of units (packets) that can be serviced in one shot. 322 * 323 * This function implements PN error handling 324 * If the peer is configured to ignore the PN check errors 325 * or if DP feels, that this frame is still OK, the frame can be 326 * re-injected back to REO to use some of the other features 327 * of REO e.g. duplicate detection/routing to other cores 328 * 329 * Return: uint32_t: No. of elements processed 330 */ 331 static uint32_t 332 dp_rx_pn_error_handle(struct dp_soc *soc, void *ring_desc, 333 struct hal_rx_mpdu_desc_info *mpdu_desc_info, 334 union dp_rx_desc_list_elem_t **head, 335 union dp_rx_desc_list_elem_t **tail, 336 uint32_t quota) 337 { 338 uint16_t peer_id; 339 uint32_t rx_bufs_used = 0; 340 struct dp_peer *peer; 341 bool peer_pn_policy = false; 342 343 peer_id = DP_PEER_METADATA_PEER_ID_GET( 344 mpdu_desc_info->peer_meta_data); 345 346 347 peer = dp_peer_find_by_id(soc, peer_id); 348 349 if (qdf_likely(peer)) { 350 /* 351 * TODO: Check for peer specific policies & set peer_pn_policy 352 */ 353 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 354 "discard rx due to PN error for peer %pK " 355 "(%02x:%02x:%02x:%02x:%02x:%02x)", 356 peer, 357 peer->mac_addr.raw[0], peer->mac_addr.raw[1], 358 peer->mac_addr.raw[2], peer->mac_addr.raw[3], 359 peer->mac_addr.raw[4], peer->mac_addr.raw[5]); 360 361 } 362 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 363 "Packet received with PN error"); 364 365 /* No peer PN policy -- definitely drop */ 366 if (!peer_pn_policy) 367 rx_bufs_used = dp_rx_msdus_drop(soc, ring_desc, 368 mpdu_desc_info, 369 head, tail, quota); 370 371 return rx_bufs_used; 372 } 373 374 /** 375 * dp_rx_2k_jump_handle() - Handles Sequence Number Jump by 2K 376 * 377 * @soc: core txrx main context 378 * @ring_desc: opaque pointer to the REO error ring descriptor 379 * @mpdu_desc_info: MPDU descriptor information from ring descriptor 380 * @head: head of the local descriptor free-list 381 * @tail: tail of the local descriptor free-list 382 * @quota: No. of units (packets) that can be serviced in one shot. 383 * 384 * This function implements the error handling when sequence number 385 * of the MPDU jumps suddenly by 2K.Today there are 2 cases that 386 * need to be handled: 387 * A) CSN (Current Sequence Number) = Last Valid SN (LSN) + 2K 388 * B) CSN = LSN + 2K, but falls within a "BA sized window" of the SSN 389 * For case A) the protocol stack is invoked to generate DELBA/DEAUTH frame 390 * For case B), the frame is normally dropped, no more action is taken 391 * 392 * Return: uint32_t: No. of elements processed 393 */ 394 static uint32_t 395 dp_rx_2k_jump_handle(struct dp_soc *soc, void *ring_desc, 396 struct hal_rx_mpdu_desc_info *mpdu_desc_info, 397 union dp_rx_desc_list_elem_t **head, 398 union dp_rx_desc_list_elem_t **tail, 399 uint32_t quota) 400 { 401 return dp_rx_msdus_drop(soc, ring_desc, mpdu_desc_info, 402 head, tail, quota); 403 } 404 405 /** 406 * dp_rx_chain_msdus() - Function to chain all msdus of a mpdu 407 * to pdev invalid peer list 408 * 409 * @soc: core DP main context 410 * @nbuf: Buffer pointer 411 * @rx_tlv_hdr: start of rx tlv header 412 * @mac_id: mac id 413 * 414 * Return: bool: true for last msdu of mpdu 415 */ 416 static bool 417 dp_rx_chain_msdus(struct dp_soc *soc, qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr, 418 uint8_t mac_id) 419 { 420 bool mpdu_done = false; 421 qdf_nbuf_t curr_nbuf = NULL; 422 qdf_nbuf_t tmp_nbuf = NULL; 423 424 /* TODO: Currently only single radio is supported, hence 425 * pdev hard coded to '0' index 426 */ 427 struct dp_pdev *dp_pdev = soc->pdev_list[mac_id]; 428 429 if (!dp_pdev->first_nbuf) { 430 qdf_nbuf_set_rx_chfrag_start(nbuf, 1); 431 dp_pdev->ppdu_id = HAL_RX_HW_DESC_GET_PPDUID_GET(rx_tlv_hdr); 432 dp_pdev->first_nbuf = true; 433 434 /* If the new nbuf received is the first msdu of the 435 * amsdu and there are msdus in the invalid peer msdu 436 * list, then let us free all the msdus of the invalid 437 * peer msdu list. 438 * This scenario can happen when we start receiving 439 * new a-msdu even before the previous a-msdu is completely 440 * received. 441 */ 442 curr_nbuf = dp_pdev->invalid_peer_head_msdu; 443 while (curr_nbuf) { 444 tmp_nbuf = curr_nbuf->next; 445 qdf_nbuf_free(curr_nbuf); 446 curr_nbuf = tmp_nbuf; 447 } 448 449 dp_pdev->invalid_peer_head_msdu = NULL; 450 dp_pdev->invalid_peer_tail_msdu = NULL; 451 hal_rx_mon_hw_desc_get_mpdu_status(soc->hal_soc, rx_tlv_hdr, 452 &(dp_pdev->ppdu_info.rx_status)); 453 454 } 455 456 if (dp_pdev->ppdu_id == hal_rx_attn_phy_ppdu_id_get(rx_tlv_hdr) && 457 hal_rx_attn_msdu_done_get(rx_tlv_hdr)) { 458 qdf_nbuf_set_rx_chfrag_end(nbuf, 1); 459 qdf_assert_always(dp_pdev->first_nbuf == true); 460 dp_pdev->first_nbuf = false; 461 mpdu_done = true; 462 } 463 464 DP_RX_LIST_APPEND(dp_pdev->invalid_peer_head_msdu, 465 dp_pdev->invalid_peer_tail_msdu, 466 nbuf); 467 468 return mpdu_done; 469 } 470 471 /** 472 * dp_2k_jump_handle() - Function to handle 2k jump exception 473 * on WBM ring 474 * 475 * @soc: core DP main context 476 * @nbuf: buffer pointer 477 * @rx_tlv_hdr: start of rx tlv header 478 * @peer_id: peer id of first msdu 479 * @tid: Tid for which exception occurred 480 * 481 * This function handles 2k jump violations arising out 482 * of receiving aggregates in non BA case. This typically 483 * may happen if aggregates are received on a QOS enabled TID 484 * while Rx window size is still initialized to value of 2. Or 485 * it may also happen if negotiated window size is 1 but peer 486 * sends aggregates. 487 * 488 */ 489 490 static void 491 dp_2k_jump_handle(struct dp_soc *soc, 492 qdf_nbuf_t nbuf, 493 uint8_t *rx_tlv_hdr, 494 uint16_t peer_id, 495 uint8_t tid) 496 { 497 uint32_t ppdu_id; 498 struct dp_peer *peer = NULL; 499 struct dp_rx_tid *rx_tid = NULL; 500 501 peer = dp_peer_find_by_id(soc, peer_id); 502 if (!peer || peer->delete_in_progress) { 503 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 504 "peer not found"); 505 goto free_nbuf; 506 } 507 rx_tid = &peer->rx_tid[tid]; 508 if (qdf_unlikely(rx_tid == NULL)) { 509 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 510 "rx_tid is NULL!!"); 511 goto free_nbuf; 512 } 513 qdf_spin_lock_bh(&rx_tid->tid_lock); 514 ppdu_id = hal_rx_attn_phy_ppdu_id_get(rx_tlv_hdr); 515 if (rx_tid->ppdu_id_2k != ppdu_id) { 516 rx_tid->ppdu_id_2k = ppdu_id; 517 qdf_spin_unlock_bh(&rx_tid->tid_lock); 518 goto free_nbuf; 519 } 520 if (!rx_tid->delba_tx_status) { 521 rx_tid->delba_tx_retry++; 522 rx_tid->delba_tx_status = 1; 523 qdf_spin_unlock_bh(&rx_tid->tid_lock); 524 soc->cdp_soc.ol_ops->send_delba(peer->vdev->pdev->ctrl_pdev, 525 peer->ctrl_peer, 526 peer->mac_addr.raw, 527 tid, 528 peer->vdev->ctrl_vdev); 529 } else { 530 qdf_spin_unlock_bh(&rx_tid->tid_lock); 531 } 532 533 free_nbuf: 534 qdf_nbuf_free(nbuf); 535 return; 536 } 537 538 /** 539 * dp_rx_null_q_desc_handle() - Function to handle NULL Queue 540 * descriptor violation on either a 541 * REO or WBM ring 542 * 543 * @soc: core DP main context 544 * @nbuf: buffer pointer 545 * @rx_tlv_hdr: start of rx tlv header 546 * @pool_id: mac id 547 * 548 * This function handles NULL queue descriptor violations arising out 549 * a missing REO queue for a given peer or a given TID. This typically 550 * may happen if a packet is received on a QOS enabled TID before the 551 * ADDBA negotiation for that TID, when the TID queue is setup. Or 552 * it may also happen for MC/BC frames if they are not routed to the 553 * non-QOS TID queue, in the absence of any other default TID queue. 554 * This error can show up both in a REO destination or WBM release ring. 555 * 556 */ 557 static void 558 dp_rx_null_q_desc_handle(struct dp_soc *soc, 559 qdf_nbuf_t nbuf, 560 uint8_t *rx_tlv_hdr, 561 uint8_t pool_id) 562 { 563 uint32_t pkt_len, l2_hdr_offset; 564 uint16_t msdu_len; 565 struct dp_vdev *vdev; 566 uint16_t peer_id = 0xFFFF; 567 struct dp_peer *peer = NULL; 568 uint8_t tid; 569 570 qdf_nbuf_set_rx_chfrag_start(nbuf, 571 hal_rx_msdu_end_first_msdu_get(rx_tlv_hdr)); 572 qdf_nbuf_set_rx_chfrag_end(nbuf, 573 hal_rx_msdu_end_last_msdu_get(rx_tlv_hdr)); 574 575 l2_hdr_offset = hal_rx_msdu_end_l3_hdr_padding_get(rx_tlv_hdr); 576 msdu_len = hal_rx_msdu_start_msdu_len_get(rx_tlv_hdr); 577 pkt_len = msdu_len + l2_hdr_offset + RX_PKT_TLVS_LEN; 578 579 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 580 FL("Len %d Extn list %pK "), 581 (uint32_t)qdf_nbuf_len(nbuf), 582 qdf_nbuf_get_ext_list(nbuf)); 583 /* Set length in nbuf */ 584 if (!qdf_nbuf_get_ext_list(nbuf)) 585 qdf_nbuf_set_pktlen(nbuf, pkt_len); 586 587 /* 588 * Check if DMA completed -- msdu_done is the last bit 589 * to be written 590 */ 591 if (!hal_rx_attn_msdu_done_get(rx_tlv_hdr)) { 592 593 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 594 FL("MSDU DONE failure")); 595 596 hal_rx_dump_pkt_tlvs(soc->hal_soc, rx_tlv_hdr, 597 QDF_TRACE_LEVEL_INFO); 598 qdf_assert(0); 599 } 600 601 peer_id = hal_rx_mpdu_start_sw_peer_id_get(rx_tlv_hdr); 602 peer = dp_peer_find_by_id(soc, peer_id); 603 604 if (!peer) { 605 bool mpdu_done = false; 606 struct dp_pdev *pdev = soc->pdev_list[pool_id]; 607 608 QDF_TRACE_DEBUG_RL(QDF_MODULE_ID_DP, "peer is NULL"); 609 610 mpdu_done = dp_rx_chain_msdus(soc, nbuf, rx_tlv_hdr, pool_id); 611 /* Trigger invalid peer handler wrapper */ 612 dp_rx_process_invalid_peer_wrapper(soc, nbuf, mpdu_done); 613 614 if (mpdu_done) { 615 pdev->invalid_peer_head_msdu = NULL; 616 pdev->invalid_peer_tail_msdu = NULL; 617 } 618 return; 619 } 620 621 vdev = peer->vdev; 622 if (!vdev) { 623 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 624 FL("INVALID vdev %pK OR osif_rx"), vdev); 625 /* Drop & free packet */ 626 qdf_nbuf_free(nbuf); 627 DP_STATS_INC(soc, rx.err.invalid_vdev, 1); 628 return; 629 } 630 631 /* 632 * Advance the packet start pointer by total size of 633 * pre-header TLV's 634 */ 635 if (qdf_nbuf_get_ext_list(nbuf)) 636 qdf_nbuf_pull_head(nbuf, RX_PKT_TLVS_LEN); 637 else 638 qdf_nbuf_pull_head(nbuf, (l2_hdr_offset + RX_PKT_TLVS_LEN)); 639 640 if (dp_rx_mcast_echo_check(soc, peer, rx_tlv_hdr, nbuf)) { 641 /* this is a looped back MCBC pkt, drop it */ 642 qdf_nbuf_free(nbuf); 643 return; 644 } 645 /* 646 * In qwrap mode if the received packet matches with any of the vdev 647 * mac addresses, drop it. Donot receive multicast packets originated 648 * from any proxysta. 649 */ 650 if (check_qwrap_multicast_loopback(vdev, nbuf)) { 651 qdf_nbuf_free(nbuf); 652 return; 653 } 654 655 656 if (qdf_unlikely((peer->nawds_enabled == true) && 657 hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr))) { 658 QDF_TRACE(QDF_MODULE_ID_DP, 659 QDF_TRACE_LEVEL_DEBUG, 660 "%s free buffer for multicast packet", 661 __func__); 662 DP_STATS_INC(peer, rx.nawds_mcast_drop, 1); 663 qdf_nbuf_free(nbuf); 664 return; 665 } 666 667 if (!dp_wds_rx_policy_check(rx_tlv_hdr, vdev, peer, 668 hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr))) { 669 QDF_TRACE(QDF_MODULE_ID_DP, 670 QDF_TRACE_LEVEL_ERROR, 671 FL("mcast Policy Check Drop pkt")); 672 /* Drop & free packet */ 673 qdf_nbuf_free(nbuf); 674 return; 675 } 676 677 /* WDS Source Port Learning */ 678 if (qdf_likely(vdev->rx_decap_type == htt_cmn_pkt_type_ethernet && 679 vdev->wds_enabled)) 680 dp_rx_wds_srcport_learn(soc, rx_tlv_hdr, peer, nbuf); 681 682 if (hal_rx_mpdu_start_mpdu_qos_control_valid_get(rx_tlv_hdr)) { 683 /* TODO: Assuming that qos_control_valid also indicates 684 * unicast. Should we check this? 685 */ 686 tid = hal_rx_mpdu_start_tid_get(soc->hal_soc, rx_tlv_hdr); 687 if (peer && 688 peer->rx_tid[tid].hw_qdesc_vaddr_unaligned == NULL) { 689 /* IEEE80211_SEQ_MAX indicates invalid start_seq */ 690 dp_rx_tid_setup_wifi3(peer, tid, 1, IEEE80211_SEQ_MAX); 691 } 692 } 693 694 #ifdef QCA_WIFI_NAPIER_EMULATION /* Debug code, remove later */ 695 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 696 "%s: p_id %d msdu_len %d hdr_off %d", 697 __func__, peer_id, msdu_len, l2_hdr_offset); 698 699 print_hex_dump(KERN_ERR, "\t Pkt Data:", DUMP_PREFIX_NONE, 32, 4, 700 qdf_nbuf_data(nbuf), 128, false); 701 #endif /* NAPIER_EMULATION */ 702 703 if (qdf_unlikely(vdev->rx_decap_type == htt_cmn_pkt_type_raw)) { 704 qdf_nbuf_set_next(nbuf, NULL); 705 dp_rx_deliver_raw(vdev, nbuf, peer); 706 } else { 707 if (qdf_unlikely(peer->bss_peer)) { 708 QDF_TRACE(QDF_MODULE_ID_DP, 709 QDF_TRACE_LEVEL_INFO, 710 FL("received pkt with same src MAC")); 711 /* Drop & free packet */ 712 qdf_nbuf_free(nbuf); 713 return; 714 } 715 716 if (vdev->osif_rx) { 717 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, 718 FL("vdev %pK osif_rx %pK"), vdev, 719 vdev->osif_rx); 720 qdf_nbuf_set_next(nbuf, NULL); 721 vdev->osif_rx(vdev->osif_vdev, nbuf); 722 DP_STATS_INCC_PKT(vdev->pdev, rx.multicast, 1, 723 qdf_nbuf_len(nbuf), 724 hal_rx_msdu_end_da_is_mcbc_get( 725 rx_tlv_hdr)); 726 DP_STATS_INC_PKT(vdev->pdev, rx.to_stack, 1, 727 qdf_nbuf_len(nbuf)); 728 } else { 729 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 730 FL("INVALID vdev %pK OR osif_rx"), vdev); 731 DP_STATS_INC(soc, rx.err.invalid_vdev, 1); 732 } 733 } 734 return; 735 } 736 737 /** 738 * dp_rx_err_deliver() - Function to deliver error frames to OS 739 * 740 * @soc: core DP main context 741 * @rx_desc : pointer to the sw rx descriptor 742 * @head: pointer to head of rx descriptors to be added to free list 743 * @tail: pointer to tail of rx descriptors to be added to free list 744 * quota: upper limit of descriptors that can be reaped 745 * 746 * Return: uint32_t: No. of Rx buffers reaped 747 */ 748 static void 749 dp_rx_err_deliver(struct dp_soc *soc, qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr) 750 { 751 uint32_t pkt_len, l2_hdr_offset; 752 uint16_t msdu_len; 753 struct dp_vdev *vdev; 754 uint16_t peer_id = 0xFFFF; 755 struct dp_peer *peer = NULL; 756 struct ether_header *eh; 757 bool isBroadcast; 758 759 /* 760 * Check if DMA completed -- msdu_done is the last bit 761 * to be written 762 */ 763 if (!hal_rx_attn_msdu_done_get(rx_tlv_hdr)) { 764 765 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 766 FL("MSDU DONE failure")); 767 768 hal_rx_dump_pkt_tlvs(soc->hal_soc, rx_tlv_hdr, 769 QDF_TRACE_LEVEL_INFO); 770 qdf_assert(0); 771 } 772 773 peer_id = hal_rx_mpdu_start_sw_peer_id_get(rx_tlv_hdr); 774 peer = dp_peer_find_by_id(soc, peer_id); 775 776 l2_hdr_offset = hal_rx_msdu_end_l3_hdr_padding_get(rx_tlv_hdr); 777 msdu_len = hal_rx_msdu_start_msdu_len_get(rx_tlv_hdr); 778 pkt_len = msdu_len + l2_hdr_offset + RX_PKT_TLVS_LEN; 779 780 /* Set length in nbuf */ 781 qdf_nbuf_set_pktlen(nbuf, pkt_len); 782 783 qdf_nbuf_set_next(nbuf, NULL); 784 785 qdf_nbuf_set_rx_chfrag_start(nbuf, 1); 786 qdf_nbuf_set_rx_chfrag_end(nbuf, 1); 787 788 if (!peer) { 789 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 790 FL("peer is NULL")); 791 DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1, 792 qdf_nbuf_len(nbuf)); 793 /* Trigger invalid peer handler wrapper */ 794 dp_rx_process_invalid_peer_wrapper(soc, nbuf, true); 795 return; 796 } 797 798 vdev = peer->vdev; 799 if (!vdev) { 800 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 801 FL("INVALID vdev %pK OR osif_rx"), vdev); 802 /* Drop & free packet */ 803 qdf_nbuf_free(nbuf); 804 DP_STATS_INC(soc, rx.err.invalid_vdev, 1); 805 return; 806 } 807 808 /* Drop & free packet if mesh mode not enabled */ 809 if (!vdev->mesh_vdev) { 810 qdf_nbuf_free(nbuf); 811 DP_STATS_INC(soc, rx.err.invalid_vdev, 1); 812 return; 813 } 814 815 /* 816 * Advance the packet start pointer by total size of 817 * pre-header TLV's 818 */ 819 qdf_nbuf_pull_head(nbuf, (l2_hdr_offset + RX_PKT_TLVS_LEN)); 820 821 if (dp_rx_filter_mesh_packets(vdev, nbuf, rx_tlv_hdr) 822 == QDF_STATUS_SUCCESS) { 823 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_MED, 824 FL("mesh pkt filtered")); 825 DP_STATS_INC(vdev->pdev, dropped.mesh_filter, 1); 826 827 qdf_nbuf_free(nbuf); 828 return; 829 830 } 831 dp_rx_fill_mesh_stats(vdev, nbuf, rx_tlv_hdr, peer); 832 833 if (qdf_unlikely(hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr) && 834 (vdev->rx_decap_type == 835 htt_cmn_pkt_type_ethernet))) { 836 eh = (struct ether_header *)qdf_nbuf_data(nbuf); 837 isBroadcast = (IEEE80211_IS_BROADCAST 838 (eh->ether_dhost)) ? 1 : 0 ; 839 if (isBroadcast) { 840 DP_STATS_INC_PKT(peer, rx.bcast, 1, 841 qdf_nbuf_len(nbuf)); 842 } 843 } 844 845 if (qdf_unlikely(vdev->rx_decap_type == htt_cmn_pkt_type_raw)) { 846 dp_rx_deliver_raw(vdev, nbuf, peer); 847 } else { 848 DP_STATS_INC(vdev->pdev, rx.to_stack.num, 1); 849 vdev->osif_rx(vdev->osif_vdev, nbuf); 850 } 851 852 return; 853 } 854 855 /** 856 * dp_rx_process_mic_error(): Function to pass mic error indication to umac 857 * @soc: DP SOC handle 858 * @rx_desc : pointer to the sw rx descriptor 859 * @head: pointer to head of rx descriptors to be added to free list 860 * @tail: pointer to tail of rx descriptors to be added to free list 861 * 862 * return: void 863 */ 864 void 865 dp_rx_process_mic_error(struct dp_soc *soc, 866 qdf_nbuf_t nbuf, 867 uint8_t *rx_tlv_hdr) 868 { 869 struct dp_vdev *vdev = NULL; 870 struct dp_pdev *pdev = NULL; 871 struct ol_if_ops *tops = NULL; 872 struct ieee80211_frame *wh; 873 uint8_t *rx_pkt_hdr; 874 struct dp_peer *peer; 875 uint16_t peer_id, rx_seq, fragno; 876 unsigned int tid; 877 QDF_STATUS status; 878 879 if (!hal_rx_msdu_end_first_msdu_get(rx_tlv_hdr)) 880 return; 881 882 rx_pkt_hdr = hal_rx_pkt_hdr_get(qdf_nbuf_data(nbuf)); 883 wh = (struct ieee80211_frame *)rx_pkt_hdr; 884 885 peer_id = hal_rx_mpdu_start_sw_peer_id_get(rx_tlv_hdr); 886 peer = dp_peer_find_by_id(soc, peer_id); 887 if (!peer) { 888 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 889 "peer not found"); 890 goto fail; 891 } 892 893 vdev = peer->vdev; 894 if (!vdev) { 895 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 896 "VDEV not found"); 897 goto fail; 898 } 899 900 pdev = vdev->pdev; 901 if (!pdev) { 902 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 903 "PDEV not found"); 904 goto fail; 905 } 906 907 tid = hal_rx_mpdu_start_tid_get(soc->hal_soc, qdf_nbuf_data(nbuf)); 908 rx_seq = (((*(uint16_t *)wh->i_seq) & 909 IEEE80211_SEQ_SEQ_MASK) >> 910 IEEE80211_SEQ_SEQ_SHIFT); 911 912 fragno = dp_rx_frag_get_mpdu_frag_number(qdf_nbuf_data(nbuf)); 913 914 /* Can get only last fragment */ 915 if (fragno) { 916 status = dp_rx_defrag_add_last_frag(soc, peer, 917 tid, rx_seq, nbuf); 918 919 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 920 "%s: Frag pkt seq# %d frag# %d consumed status %d !", 921 __func__, rx_seq, fragno, status); 922 return; 923 } 924 925 tops = pdev->soc->cdp_soc.ol_ops; 926 if (tops->rx_mic_error) 927 tops->rx_mic_error(pdev->ctrl_pdev, vdev->vdev_id, wh); 928 929 fail: 930 qdf_nbuf_free(nbuf); 931 return; 932 } 933 934 /** 935 * dp_rx_err_process() - Processes error frames routed to REO error ring 936 * 937 * @soc: core txrx main context 938 * @hal_ring: opaque pointer to the HAL Rx Error Ring, which will be serviced 939 * @quota: No. of units (packets) that can be serviced in one shot. 940 * 941 * This function implements error processing and top level demultiplexer 942 * for all the frames routed to REO error ring. 943 * 944 * Return: uint32_t: No. of elements processed 945 */ 946 uint32_t 947 dp_rx_err_process(struct dp_soc *soc, void *hal_ring, uint32_t quota) 948 { 949 void *hal_soc; 950 void *ring_desc; 951 union dp_rx_desc_list_elem_t *head = NULL; 952 union dp_rx_desc_list_elem_t *tail = NULL; 953 uint32_t rx_bufs_used = 0; 954 uint8_t buf_type; 955 uint8_t error, rbm; 956 struct hal_rx_mpdu_desc_info mpdu_desc_info; 957 struct hal_buf_info hbi; 958 struct dp_pdev *dp_pdev; 959 struct dp_srng *dp_rxdma_srng; 960 struct rx_desc_pool *rx_desc_pool; 961 uint32_t cookie = 0; 962 void *link_desc_va; 963 struct hal_rx_msdu_list msdu_list; /* MSDU's per MPDU */ 964 uint16_t num_msdus; 965 966 /* Debug -- Remove later */ 967 qdf_assert(soc && hal_ring); 968 969 hal_soc = soc->hal_soc; 970 971 /* Debug -- Remove later */ 972 qdf_assert(hal_soc); 973 974 if (qdf_unlikely(hal_srng_access_start(hal_soc, hal_ring))) { 975 976 /* TODO */ 977 /* 978 * Need API to convert from hal_ring pointer to 979 * Ring Type / Ring Id combo 980 */ 981 DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1); 982 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 983 FL("HAL RING Access Failed -- %pK"), hal_ring); 984 goto done; 985 } 986 987 while (qdf_likely(quota-- && (ring_desc = 988 hal_srng_dst_get_next(hal_soc, hal_ring)))) { 989 990 DP_STATS_INC(soc, rx.err_ring_pkts, 1); 991 992 error = HAL_RX_ERROR_STATUS_GET(ring_desc); 993 994 qdf_assert(error == HAL_REO_ERROR_DETECTED); 995 996 buf_type = HAL_RX_REO_BUF_TYPE_GET(ring_desc); 997 /* 998 * For REO error ring, expect only MSDU LINK DESC 999 */ 1000 qdf_assert_always(buf_type == HAL_RX_REO_MSDU_LINK_DESC_TYPE); 1001 1002 cookie = HAL_RX_REO_BUF_COOKIE_GET(ring_desc); 1003 /* 1004 * check for the magic number in the sw cookie 1005 */ 1006 qdf_assert_always((cookie >> LINK_DESC_ID_SHIFT) & 1007 LINK_DESC_ID_START); 1008 1009 /* 1010 * Check if the buffer is to be processed on this processor 1011 */ 1012 rbm = hal_rx_ret_buf_manager_get(ring_desc); 1013 1014 hal_rx_reo_buf_paddr_get(ring_desc, &hbi); 1015 link_desc_va = dp_rx_cookie_2_link_desc_va(soc, &hbi); 1016 hal_rx_msdu_list_get(link_desc_va, &msdu_list, &num_msdus); 1017 1018 if (qdf_unlikely((msdu_list.rbm[0] != DP_WBM2SW_RBM) && 1019 (msdu_list.rbm[0] != 1020 HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST))) { 1021 /* TODO */ 1022 /* Call appropriate handler */ 1023 DP_STATS_INC(soc, rx.err.invalid_rbm, 1); 1024 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1025 FL("Invalid RBM %d"), msdu_list.rbm[0]); 1026 1027 /* Return link descriptor through WBM ring (SW2WBM)*/ 1028 dp_rx_link_desc_return(soc, ring_desc, 1029 HAL_BM_ACTION_RELEASE_MSDU_LIST); 1030 continue; 1031 } 1032 1033 /* Get the MPDU DESC info */ 1034 hal_rx_mpdu_desc_info_get(ring_desc, &mpdu_desc_info); 1035 1036 if (mpdu_desc_info.mpdu_flags & HAL_MPDU_F_FRAGMENT) { 1037 /* TODO */ 1038 rx_bufs_used += dp_rx_frag_handle(soc, 1039 ring_desc, &mpdu_desc_info, 1040 &head, &tail, quota); 1041 DP_STATS_INC(soc, rx.rx_frags, 1); 1042 continue; 1043 } 1044 1045 if (hal_rx_reo_is_pn_error(ring_desc)) { 1046 /* TOD0 */ 1047 DP_STATS_INC(soc, 1048 rx.err. 1049 reo_error[HAL_REO_ERR_PN_CHECK_FAILED], 1050 1); 1051 rx_bufs_used += dp_rx_pn_error_handle(soc, 1052 ring_desc, &mpdu_desc_info, 1053 &head, &tail, quota); 1054 continue; 1055 } 1056 1057 if (hal_rx_reo_is_2k_jump(ring_desc)) { 1058 /* TOD0 */ 1059 DP_STATS_INC(soc, 1060 rx.err. 1061 reo_error[HAL_REO_ERR_REGULAR_FRAME_2K_JUMP], 1062 1); 1063 rx_bufs_used += dp_rx_2k_jump_handle(soc, 1064 ring_desc, &mpdu_desc_info, 1065 &head, &tail, quota); 1066 continue; 1067 } 1068 } 1069 1070 done: 1071 hal_srng_access_end(hal_soc, hal_ring); 1072 1073 if (soc->rx.flags.defrag_timeout_check) 1074 dp_rx_defrag_waitlist_flush(soc); 1075 1076 /* Assume MAC id = 0, owner = 0 */ 1077 if (rx_bufs_used) { 1078 dp_pdev = soc->pdev_list[0]; 1079 dp_rxdma_srng = &dp_pdev->rx_refill_buf_ring; 1080 rx_desc_pool = &soc->rx_desc_buf[0]; 1081 1082 dp_rx_buffers_replenish(soc, 0, dp_rxdma_srng, rx_desc_pool, 1083 rx_bufs_used, &head, &tail); 1084 } 1085 1086 return rx_bufs_used; /* Assume no scale factor for now */ 1087 } 1088 1089 /** 1090 * dp_rx_wbm_err_process() - Processes error frames routed to WBM release ring 1091 * 1092 * @soc: core txrx main context 1093 * @hal_ring: opaque pointer to the HAL Rx Error Ring, which will be serviced 1094 * @quota: No. of units (packets) that can be serviced in one shot. 1095 * 1096 * This function implements error processing and top level demultiplexer 1097 * for all the frames routed to WBM2HOST sw release ring. 1098 * 1099 * Return: uint32_t: No. of elements processed 1100 */ 1101 uint32_t 1102 dp_rx_wbm_err_process(struct dp_soc *soc, void *hal_ring, uint32_t quota) 1103 { 1104 void *hal_soc; 1105 void *ring_desc; 1106 struct dp_rx_desc *rx_desc; 1107 union dp_rx_desc_list_elem_t *head[MAX_PDEV_CNT] = { NULL }; 1108 union dp_rx_desc_list_elem_t *tail[MAX_PDEV_CNT] = { NULL }; 1109 uint32_t rx_bufs_used = 0; 1110 uint32_t rx_bufs_reaped[MAX_PDEV_CNT] = { 0 }; 1111 uint8_t buf_type, rbm; 1112 uint32_t rx_buf_cookie; 1113 uint8_t mac_id; 1114 struct dp_pdev *dp_pdev; 1115 struct dp_srng *dp_rxdma_srng; 1116 struct rx_desc_pool *rx_desc_pool; 1117 uint8_t *rx_tlv_hdr; 1118 qdf_nbuf_t nbuf_head = NULL; 1119 qdf_nbuf_t nbuf_tail = NULL; 1120 qdf_nbuf_t nbuf, next; 1121 struct hal_wbm_err_desc_info wbm_err_info = { 0 }; 1122 uint8_t pool_id; 1123 uint16_t peer_id = 0xFFFF; 1124 uint8_t tid = 0; 1125 1126 /* Debug -- Remove later */ 1127 qdf_assert(soc && hal_ring); 1128 1129 hal_soc = soc->hal_soc; 1130 1131 /* Debug -- Remove later */ 1132 qdf_assert(hal_soc); 1133 1134 if (qdf_unlikely(hal_srng_access_start(hal_soc, hal_ring))) { 1135 1136 /* TODO */ 1137 /* 1138 * Need API to convert from hal_ring pointer to 1139 * Ring Type / Ring Id combo 1140 */ 1141 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1142 FL("HAL RING Access Failed -- %pK"), hal_ring); 1143 goto done; 1144 } 1145 1146 while (qdf_likely(quota-- && (ring_desc = 1147 hal_srng_dst_get_next(hal_soc, hal_ring)))) { 1148 1149 /* XXX */ 1150 buf_type = HAL_RX_WBM_BUF_TYPE_GET(ring_desc); 1151 1152 /* 1153 * For WBM ring, expect only MSDU buffers 1154 */ 1155 qdf_assert_always(buf_type == HAL_RX_WBM_BUF_TYPE_REL_BUF); 1156 1157 qdf_assert((HAL_RX_WBM_ERR_SRC_GET(ring_desc) 1158 == HAL_RX_WBM_ERR_SRC_RXDMA) || 1159 (HAL_RX_WBM_ERR_SRC_GET(ring_desc) 1160 == HAL_RX_WBM_ERR_SRC_REO)); 1161 1162 /* 1163 * Check if the buffer is to be processed on this processor 1164 */ 1165 rbm = hal_rx_ret_buf_manager_get(ring_desc); 1166 1167 if (qdf_unlikely(rbm != HAL_RX_BUF_RBM_SW3_BM)) { 1168 /* TODO */ 1169 /* Call appropriate handler */ 1170 DP_STATS_INC(soc, rx.err.invalid_rbm, 1); 1171 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1172 FL("Invalid RBM %d"), rbm); 1173 continue; 1174 } 1175 1176 rx_buf_cookie = HAL_RX_WBM_BUF_COOKIE_GET(ring_desc); 1177 1178 rx_desc = dp_rx_cookie_2_va_rxdma_buf(soc, rx_buf_cookie); 1179 qdf_assert(rx_desc); 1180 1181 if (!dp_rx_desc_check_magic(rx_desc)) { 1182 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1183 FL("Invalid rx_desc cookie=%d"), 1184 rx_buf_cookie); 1185 continue; 1186 } 1187 1188 nbuf = rx_desc->nbuf; 1189 qdf_nbuf_unmap_single(soc->osdev, nbuf, QDF_DMA_BIDIRECTIONAL); 1190 1191 /* 1192 * save the wbm desc info in nbuf TLV. We will need this 1193 * info when we do the actual nbuf processing 1194 */ 1195 hal_rx_wbm_err_info_get(ring_desc, &wbm_err_info); 1196 wbm_err_info.pool_id = rx_desc->pool_id; 1197 hal_rx_wbm_err_info_set_in_tlv(qdf_nbuf_data(nbuf), 1198 &wbm_err_info); 1199 1200 rx_bufs_reaped[rx_desc->pool_id]++; 1201 1202 DP_RX_LIST_APPEND(nbuf_head, nbuf_tail, rx_desc->nbuf); 1203 dp_rx_add_to_free_desc_list(&head[rx_desc->pool_id], 1204 &tail[rx_desc->pool_id], 1205 rx_desc); 1206 } 1207 done: 1208 hal_srng_access_end(hal_soc, hal_ring); 1209 1210 for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) { 1211 if (rx_bufs_reaped[mac_id]) { 1212 dp_pdev = soc->pdev_list[mac_id]; 1213 dp_rxdma_srng = &dp_pdev->rx_refill_buf_ring; 1214 rx_desc_pool = &soc->rx_desc_buf[mac_id]; 1215 1216 dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng, 1217 rx_desc_pool, rx_bufs_reaped[mac_id], 1218 &head[mac_id], &tail[mac_id]); 1219 rx_bufs_used += rx_bufs_reaped[mac_id]; 1220 } 1221 } 1222 1223 nbuf = nbuf_head; 1224 while (nbuf) { 1225 rx_tlv_hdr = qdf_nbuf_data(nbuf); 1226 /* 1227 * retrieve the wbm desc info from nbuf TLV, so we can 1228 * handle error cases appropriately 1229 */ 1230 hal_rx_wbm_err_info_get_from_tlv(rx_tlv_hdr, &wbm_err_info); 1231 1232 /* Set queue_mapping in nbuf to 0 */ 1233 dp_set_rx_queue(nbuf, 0); 1234 1235 next = nbuf->next; 1236 if (wbm_err_info.wbm_err_src == HAL_RX_WBM_ERR_SRC_REO) { 1237 if (wbm_err_info.reo_psh_rsn 1238 == HAL_RX_WBM_REO_PSH_RSN_ERROR) { 1239 1240 DP_STATS_INC(soc, 1241 rx.err.reo_error 1242 [wbm_err_info.reo_err_code], 1); 1243 1244 switch (wbm_err_info.reo_err_code) { 1245 /* 1246 * Handling for packets which have NULL REO 1247 * queue descriptor 1248 */ 1249 case HAL_REO_ERR_QUEUE_DESC_ADDR_0: 1250 pool_id = wbm_err_info.pool_id; 1251 QDF_TRACE_DEBUG_RL(QDF_MODULE_ID_DP, 1252 "Got pkt with REO ERROR: %d", 1253 wbm_err_info.reo_err_code); 1254 dp_rx_null_q_desc_handle(soc, 1255 nbuf, 1256 rx_tlv_hdr, 1257 pool_id); 1258 nbuf = next; 1259 continue; 1260 /* TODO */ 1261 /* Add per error code accounting */ 1262 case HAL_REO_ERR_REGULAR_FRAME_2K_JUMP: 1263 pool_id = wbm_err_info.pool_id; 1264 QDF_TRACE(QDF_MODULE_ID_DP, 1265 QDF_TRACE_LEVEL_ERROR, 1266 "Got pkt with REO ERROR: %d", 1267 wbm_err_info.reo_err_code); 1268 if (hal_rx_msdu_end_first_msdu_get(rx_tlv_hdr)) { 1269 peer_id = 1270 hal_rx_mpdu_start_sw_peer_id_get(rx_tlv_hdr); 1271 tid = 1272 hal_rx_mpdu_start_tid_get(hal_soc, rx_tlv_hdr); 1273 } 1274 dp_2k_jump_handle(soc, nbuf, rx_tlv_hdr, 1275 peer_id, tid); 1276 nbuf = next; 1277 continue; 1278 default: 1279 QDF_TRACE(QDF_MODULE_ID_DP, 1280 QDF_TRACE_LEVEL_ERROR, 1281 "REO error %d detected", 1282 wbm_err_info.reo_err_code); 1283 } 1284 } 1285 } else if (wbm_err_info.wbm_err_src == 1286 HAL_RX_WBM_ERR_SRC_RXDMA) { 1287 if (wbm_err_info.rxdma_psh_rsn 1288 == HAL_RX_WBM_RXDMA_PSH_RSN_ERROR) { 1289 struct dp_peer *peer = NULL; 1290 uint16_t peer_id = 0xFFFF; 1291 1292 DP_STATS_INC(soc, 1293 rx.err.rxdma_error 1294 [wbm_err_info.rxdma_err_code], 1); 1295 peer_id = hal_rx_mpdu_start_sw_peer_id_get(rx_tlv_hdr); 1296 peer = dp_peer_find_by_id(soc, peer_id); 1297 1298 switch (wbm_err_info.rxdma_err_code) { 1299 case HAL_RXDMA_ERR_UNENCRYPTED: 1300 dp_rx_err_deliver(soc, 1301 nbuf, 1302 rx_tlv_hdr); 1303 nbuf = next; 1304 continue; 1305 1306 case HAL_RXDMA_ERR_TKIP_MIC: 1307 dp_rx_process_mic_error(soc, 1308 nbuf, 1309 rx_tlv_hdr); 1310 nbuf = next; 1311 if (peer) 1312 DP_STATS_INC(peer, rx.err.mic_err, 1); 1313 continue; 1314 1315 case HAL_RXDMA_ERR_DECRYPT: 1316 if (peer) 1317 DP_STATS_INC(peer, rx.err.decrypt_err, 1); 1318 QDF_TRACE(QDF_MODULE_ID_DP, 1319 QDF_TRACE_LEVEL_DEBUG, 1320 "Packet received with Decrypt error"); 1321 break; 1322 1323 default: 1324 QDF_TRACE(QDF_MODULE_ID_DP, 1325 QDF_TRACE_LEVEL_DEBUG, 1326 "RXDMA error %d", 1327 wbm_err_info. 1328 rxdma_err_code); 1329 } 1330 } 1331 } else { 1332 /* Should not come here */ 1333 qdf_assert(0); 1334 } 1335 1336 hal_rx_dump_pkt_tlvs(hal_soc, rx_tlv_hdr, 1337 QDF_TRACE_LEVEL_DEBUG); 1338 qdf_nbuf_free(nbuf); 1339 nbuf = next; 1340 } 1341 return rx_bufs_used; /* Assume no scale factor for now */ 1342 } 1343 1344 /** 1345 * dp_rx_err_mpdu_pop() - extract the MSDU's from link descs 1346 * 1347 * @soc: core DP main context 1348 * @mac_id: mac id which is one of 3 mac_ids 1349 * @rxdma_dst_ring_desc: void pointer to monitor link descriptor buf addr info 1350 * @head: head of descs list to be freed 1351 * @tail: tail of decs list to be freed 1352 1353 * Return: number of msdu in MPDU to be popped 1354 */ 1355 static inline uint32_t 1356 dp_rx_err_mpdu_pop(struct dp_soc *soc, uint32_t mac_id, 1357 void *rxdma_dst_ring_desc, 1358 union dp_rx_desc_list_elem_t **head, 1359 union dp_rx_desc_list_elem_t **tail) 1360 { 1361 void *rx_msdu_link_desc; 1362 qdf_nbuf_t msdu; 1363 qdf_nbuf_t last; 1364 struct hal_rx_msdu_list msdu_list; 1365 uint16_t num_msdus; 1366 struct hal_buf_info buf_info; 1367 void *p_buf_addr_info; 1368 void *p_last_buf_addr_info; 1369 uint32_t rx_bufs_used = 0; 1370 uint32_t msdu_cnt; 1371 uint32_t i; 1372 uint8_t push_reason; 1373 uint8_t rxdma_error_code = 0; 1374 uint8_t bm_action = HAL_BM_ACTION_PUT_IN_IDLE_LIST; 1375 struct dp_pdev *pdev = dp_get_pdev_for_mac_id(soc, mac_id); 1376 1377 msdu = 0; 1378 1379 last = NULL; 1380 1381 hal_rx_reo_ent_buf_paddr_get(rxdma_dst_ring_desc, &buf_info, 1382 &p_last_buf_addr_info, &msdu_cnt); 1383 1384 push_reason = 1385 hal_rx_reo_ent_rxdma_push_reason_get(rxdma_dst_ring_desc); 1386 if (push_reason == HAL_RX_WBM_RXDMA_PSH_RSN_ERROR) { 1387 rxdma_error_code = 1388 hal_rx_reo_ent_rxdma_error_code_get(rxdma_dst_ring_desc); 1389 } 1390 1391 do { 1392 rx_msdu_link_desc = 1393 dp_rx_cookie_2_link_desc_va(soc, &buf_info); 1394 1395 qdf_assert(rx_msdu_link_desc); 1396 1397 hal_rx_msdu_list_get(rx_msdu_link_desc, &msdu_list, &num_msdus); 1398 1399 if (msdu_list.sw_cookie[0] != HAL_RX_COOKIE_SPECIAL) { 1400 /* if the msdus belongs to NSS offloaded radio && 1401 * the rbm is not SW1_BM then return the msdu_link 1402 * descriptor without freeing the msdus (nbufs). let 1403 * these buffers be given to NSS completion ring for 1404 * NSS to free them. 1405 * else iterate through the msdu link desc list and 1406 * free each msdu in the list. 1407 */ 1408 if (msdu_list.rbm[0] != HAL_RX_BUF_RBM_SW3_BM && 1409 wlan_cfg_get_dp_pdev_nss_enabled( 1410 pdev->wlan_cfg_ctx)) 1411 bm_action = HAL_BM_ACTION_RELEASE_MSDU_LIST; 1412 else { 1413 for (i = 0; i < num_msdus; i++) { 1414 struct dp_rx_desc *rx_desc = 1415 dp_rx_cookie_2_va_rxdma_buf(soc, 1416 msdu_list.sw_cookie[i]); 1417 qdf_assert(rx_desc); 1418 msdu = rx_desc->nbuf; 1419 1420 qdf_nbuf_unmap_single(soc->osdev, msdu, 1421 QDF_DMA_FROM_DEVICE); 1422 1423 QDF_TRACE(QDF_MODULE_ID_DP, 1424 QDF_TRACE_LEVEL_DEBUG, 1425 "[%s][%d] msdu_nbuf=%pK ", 1426 __func__, __LINE__, msdu); 1427 1428 qdf_nbuf_free(msdu); 1429 rx_bufs_used++; 1430 dp_rx_add_to_free_desc_list(head, 1431 tail, rx_desc); 1432 } 1433 } 1434 } else { 1435 rxdma_error_code = HAL_RXDMA_ERR_WAR; 1436 } 1437 1438 hal_rx_mon_next_link_desc_get(rx_msdu_link_desc, &buf_info, 1439 &p_buf_addr_info); 1440 1441 dp_rx_link_desc_return(soc, p_last_buf_addr_info, bm_action); 1442 p_last_buf_addr_info = p_buf_addr_info; 1443 1444 } while (buf_info.paddr); 1445 1446 DP_STATS_INC(soc, rx.err.rxdma_error[rxdma_error_code], 1); 1447 1448 if (rxdma_error_code == HAL_RXDMA_ERR_DECRYPT) { 1449 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1450 "Packet received with Decrypt error"); 1451 } 1452 1453 return rx_bufs_used; 1454 } 1455 1456 /** 1457 * dp_rxdma_err_process() - RxDMA error processing functionality 1458 * 1459 * @soc: core txrx main contex 1460 * @mac_id: mac id which is one of 3 mac_ids 1461 * @hal_ring: opaque pointer to the HAL Rx Ring, which will be serviced 1462 * @quota: No. of units (packets) that can be serviced in one shot. 1463 1464 * Return: num of buffers processed 1465 */ 1466 uint32_t 1467 dp_rxdma_err_process(struct dp_soc *soc, uint32_t mac_id, uint32_t quota) 1468 { 1469 struct dp_pdev *pdev = dp_get_pdev_for_mac_id(soc, mac_id); 1470 int mac_for_pdev = dp_get_mac_id_for_mac(soc, mac_id); 1471 void *hal_soc; 1472 void *rxdma_dst_ring_desc; 1473 void *err_dst_srng; 1474 union dp_rx_desc_list_elem_t *head = NULL; 1475 union dp_rx_desc_list_elem_t *tail = NULL; 1476 struct dp_srng *dp_rxdma_srng; 1477 struct rx_desc_pool *rx_desc_pool; 1478 uint32_t work_done = 0; 1479 uint32_t rx_bufs_used = 0; 1480 1481 #ifdef DP_INTR_POLL_BASED 1482 if (!pdev) 1483 return 0; 1484 #endif 1485 err_dst_srng = pdev->rxdma_err_dst_ring[mac_for_pdev].hal_srng; 1486 1487 if (!err_dst_srng) { 1488 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1489 "%s %d : HAL Monitor Destination Ring Init \ 1490 Failed -- %pK", 1491 __func__, __LINE__, err_dst_srng); 1492 return 0; 1493 } 1494 1495 hal_soc = soc->hal_soc; 1496 1497 qdf_assert(hal_soc); 1498 1499 if (qdf_unlikely(hal_srng_access_start(hal_soc, err_dst_srng))) { 1500 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1501 "%s %d : HAL Monitor Destination Ring Init \ 1502 Failed -- %pK", 1503 __func__, __LINE__, err_dst_srng); 1504 return 0; 1505 } 1506 1507 while (qdf_likely(quota-- && (rxdma_dst_ring_desc = 1508 hal_srng_dst_get_next(hal_soc, err_dst_srng)))) { 1509 1510 rx_bufs_used += dp_rx_err_mpdu_pop(soc, mac_id, 1511 rxdma_dst_ring_desc, 1512 &head, &tail); 1513 } 1514 1515 hal_srng_access_end(hal_soc, err_dst_srng); 1516 1517 if (rx_bufs_used) { 1518 dp_rxdma_srng = &pdev->rx_refill_buf_ring; 1519 rx_desc_pool = &soc->rx_desc_buf[mac_id]; 1520 1521 dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng, 1522 rx_desc_pool, rx_bufs_used, &head, &tail); 1523 1524 work_done += rx_bufs_used; 1525 } 1526 1527 return work_done; 1528 } 1529