1 /* 2 * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for 5 * any purpose with or without fee is hereby granted, provided that the 6 * above copyright notice and this permission notice appear in all 7 * copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 16 * PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 #include "hal_hw_headers.h" 20 #include "dp_types.h" 21 #include "dp_rx.h" 22 #include "dp_peer.h" 23 #include "dp_internal.h" 24 #include "hal_api.h" 25 #include "qdf_trace.h" 26 #include "qdf_nbuf.h" 27 #ifdef CONFIG_MCL 28 #include <cds_ieee80211_common.h> 29 #else 30 #include <linux/ieee80211.h> 31 #endif 32 #include "dp_rx_defrag.h" 33 #include <enet.h> /* LLC_SNAP_HDR_LEN */ 34 35 #ifdef RX_DESC_DEBUG_CHECK 36 static inline bool dp_rx_desc_check_magic(struct dp_rx_desc *rx_desc) 37 { 38 if (qdf_unlikely(rx_desc->magic != DP_RX_DESC_MAGIC)) { 39 return false; 40 } 41 rx_desc->magic = 0; 42 return true; 43 } 44 #else 45 static inline bool dp_rx_desc_check_magic(struct dp_rx_desc *rx_desc) 46 { 47 return true; 48 } 49 #endif 50 51 /** 52 * dp_rx_mcast_echo_check() - check if the mcast pkt is a loop 53 * back on same vap or a different vap. 54 * 55 * @soc: core DP main context 56 * @peer: dp peer handler 57 * @rx_tlv_hdr: start of the rx TLV header 58 * @nbuf: pkt buffer 59 * 60 * Return: bool (true if it is a looped back pkt else false) 61 * 62 */ 63 static inline bool dp_rx_mcast_echo_check(struct dp_soc *soc, 64 struct dp_peer *peer, 65 uint8_t *rx_tlv_hdr, 66 qdf_nbuf_t nbuf) 67 { 68 struct dp_vdev *vdev = peer->vdev; 69 struct dp_ast_entry *ase; 70 uint16_t sa_idx = 0; 71 uint8_t *data; 72 73 /* 74 * Multicast Echo Check is required only if vdev is STA and 75 * received pkt is a multicast/broadcast pkt. otherwise 76 * skip the MEC check. 77 */ 78 if (vdev->opmode != wlan_op_mode_sta) 79 return false; 80 81 if (!hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr)) 82 return false; 83 84 data = qdf_nbuf_data(nbuf); 85 /* 86 * if the received pkts src mac addr matches with vdev 87 * mac address then drop the pkt as it is looped back 88 */ 89 if (!(qdf_mem_cmp(&data[DP_MAC_ADDR_LEN], 90 vdev->mac_addr.raw, 91 DP_MAC_ADDR_LEN))) 92 return true; 93 94 /* 95 * In case of qwrap isolation mode, donot drop loopback packets. 96 * In isolation mode, all packets from the wired stations need to go 97 * to rootap and loop back to reach the wireless stations and 98 * vice-versa. 99 */ 100 if (qdf_unlikely(vdev->isolation_vdev)) 101 return false; 102 103 /* if the received pkts src mac addr matches with the 104 * wired PCs MAC addr which is behind the STA or with 105 * wireless STAs MAC addr which are behind the Repeater, 106 * then drop the pkt as it is looped back 107 */ 108 qdf_spin_lock_bh(&soc->ast_lock); 109 if (hal_rx_msdu_end_sa_is_valid_get(rx_tlv_hdr)) { 110 sa_idx = hal_rx_msdu_end_sa_idx_get(rx_tlv_hdr); 111 112 if ((sa_idx < 0) || 113 (sa_idx >= (WLAN_UMAC_PSOC_MAX_PEERS * 2))) { 114 qdf_spin_unlock_bh(&soc->ast_lock); 115 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 116 "invalid sa_idx: %d", sa_idx); 117 qdf_assert_always(0); 118 } 119 120 ase = soc->ast_table[sa_idx]; 121 if (!ase) { 122 /* We do not get a peer map event for STA and without 123 * this event we don't know what is STA's sa_idx. 124 * For this reason the AST is still not associated to 125 * any index postion in ast_table. 126 * In these kind of scenarios where sa is valid but 127 * ast is not in ast_table, we use the below API to get 128 * AST entry for STA's own mac_address. 129 */ 130 ase = dp_peer_ast_hash_find(soc, 131 &data[DP_MAC_ADDR_LEN]); 132 133 } 134 } else 135 ase = dp_peer_ast_hash_find(soc, &data[DP_MAC_ADDR_LEN]); 136 137 if (ase) { 138 ase->ast_idx = sa_idx; 139 soc->ast_table[sa_idx] = ase; 140 141 if (ase->pdev_id != vdev->pdev->pdev_id) { 142 qdf_spin_unlock_bh(&soc->ast_lock); 143 QDF_TRACE(QDF_MODULE_ID_DP, 144 QDF_TRACE_LEVEL_INFO, 145 "Detected DBDC Root AP %pM, %d %d", 146 &data[DP_MAC_ADDR_LEN], vdev->pdev->pdev_id, 147 ase->pdev_id); 148 return false; 149 } 150 151 if ((ase->type == CDP_TXRX_AST_TYPE_MEC) || 152 (ase->peer != peer)) { 153 qdf_spin_unlock_bh(&soc->ast_lock); 154 QDF_TRACE(QDF_MODULE_ID_DP, 155 QDF_TRACE_LEVEL_INFO, 156 "received pkt with same src mac %pM", 157 &data[DP_MAC_ADDR_LEN]); 158 159 return true; 160 } 161 } 162 qdf_spin_unlock_bh(&soc->ast_lock); 163 return false; 164 } 165 166 /** 167 * dp_rx_link_desc_return_by_addr - Return a MPDU link descriptor to 168 * (WBM) by address 169 * 170 * @soc: core DP main context 171 * @link_desc_addr: link descriptor addr 172 * 173 * Return: QDF_STATUS 174 */ 175 QDF_STATUS 176 dp_rx_link_desc_return_by_addr(struct dp_soc *soc, void *link_desc_addr, 177 uint8_t bm_action) 178 { 179 struct dp_srng *wbm_desc_rel_ring = &soc->wbm_desc_rel_ring; 180 void *wbm_rel_srng = wbm_desc_rel_ring->hal_srng; 181 void *hal_soc = soc->hal_soc; 182 QDF_STATUS status = QDF_STATUS_E_FAILURE; 183 void *src_srng_desc; 184 185 if (!wbm_rel_srng) { 186 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 187 "WBM RELEASE RING not initialized"); 188 return status; 189 } 190 191 if (qdf_unlikely(hal_srng_access_start(hal_soc, wbm_rel_srng))) { 192 193 /* TODO */ 194 /* 195 * Need API to convert from hal_ring pointer to 196 * Ring Type / Ring Id combo 197 */ 198 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 199 FL("HAL RING Access For WBM Release SRNG Failed - %pK"), 200 wbm_rel_srng); 201 DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1); 202 goto done; 203 } 204 src_srng_desc = hal_srng_src_get_next(hal_soc, wbm_rel_srng); 205 if (qdf_likely(src_srng_desc)) { 206 /* Return link descriptor through WBM ring (SW2WBM)*/ 207 hal_rx_msdu_link_desc_set(hal_soc, 208 src_srng_desc, link_desc_addr, bm_action); 209 status = QDF_STATUS_SUCCESS; 210 } else { 211 struct hal_srng *srng = (struct hal_srng *)wbm_rel_srng; 212 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 213 FL("WBM Release Ring (Id %d) Full"), srng->ring_id); 214 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 215 "HP 0x%x Reap HP 0x%x TP 0x%x Cached TP 0x%x", 216 *srng->u.src_ring.hp_addr, srng->u.src_ring.reap_hp, 217 *srng->u.src_ring.tp_addr, srng->u.src_ring.cached_tp); 218 } 219 done: 220 hal_srng_access_end(hal_soc, wbm_rel_srng); 221 return status; 222 223 } 224 225 /** 226 * dp_rx_link_desc_return() - Return a MPDU link descriptor to HW 227 * (WBM), following error handling 228 * 229 * @soc: core DP main context 230 * @ring_desc: opaque pointer to the REO error ring descriptor 231 * 232 * Return: QDF_STATUS 233 */ 234 QDF_STATUS 235 dp_rx_link_desc_return(struct dp_soc *soc, void *ring_desc, uint8_t bm_action) 236 { 237 void *buf_addr_info = HAL_RX_REO_BUF_ADDR_INFO_GET(ring_desc); 238 return dp_rx_link_desc_return_by_addr(soc, buf_addr_info, bm_action); 239 } 240 241 /** 242 * dp_rx_msdus_drop() - Drops all MSDU's per MPDU 243 * 244 * @soc: core txrx main context 245 * @ring_desc: opaque pointer to the REO error ring descriptor 246 * @mpdu_desc_info: MPDU descriptor information from ring descriptor 247 * @head: head of the local descriptor free-list 248 * @tail: tail of the local descriptor free-list 249 * @quota: No. of units (packets) that can be serviced in one shot. 250 * 251 * This function is used to drop all MSDU in an MPDU 252 * 253 * Return: uint32_t: No. of elements processed 254 */ 255 static uint32_t dp_rx_msdus_drop(struct dp_soc *soc, void *ring_desc, 256 struct hal_rx_mpdu_desc_info *mpdu_desc_info, 257 union dp_rx_desc_list_elem_t **head, 258 union dp_rx_desc_list_elem_t **tail, 259 uint32_t quota) 260 { 261 uint32_t rx_bufs_used = 0; 262 void *link_desc_va; 263 struct hal_buf_info buf_info; 264 struct hal_rx_msdu_list msdu_list; /* MSDU's per MPDU */ 265 int i; 266 uint8_t *rx_tlv_hdr; 267 uint32_t tid; 268 269 hal_rx_reo_buf_paddr_get(ring_desc, &buf_info); 270 271 link_desc_va = dp_rx_cookie_2_link_desc_va(soc, &buf_info); 272 273 /* No UNMAP required -- this is "malloc_consistent" memory */ 274 hal_rx_msdu_list_get(soc->hal_soc, link_desc_va, &msdu_list, 275 &mpdu_desc_info->msdu_count); 276 277 for (i = 0; (i < mpdu_desc_info->msdu_count) && quota--; i++) { 278 struct dp_rx_desc *rx_desc = 279 dp_rx_cookie_2_va_rxdma_buf(soc, 280 msdu_list.sw_cookie[i]); 281 282 qdf_assert(rx_desc); 283 284 if (!dp_rx_desc_check_magic(rx_desc)) { 285 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 286 FL("Invalid rx_desc cookie=%d"), 287 msdu_list.sw_cookie[i]); 288 return rx_bufs_used; 289 } 290 291 rx_bufs_used++; 292 tid = hal_rx_mpdu_start_tid_get(soc->hal_soc, 293 rx_desc->rx_buf_start); 294 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 295 "Packet received with PN error for tid :%d", tid); 296 297 rx_tlv_hdr = qdf_nbuf_data(rx_desc->nbuf); 298 if (hal_rx_encryption_info_valid(rx_tlv_hdr)) 299 hal_rx_print_pn(rx_tlv_hdr); 300 301 /* Just free the buffers */ 302 qdf_nbuf_free(rx_desc->nbuf); 303 304 dp_rx_add_to_free_desc_list(head, tail, rx_desc); 305 } 306 307 /* Return link descriptor through WBM ring (SW2WBM)*/ 308 dp_rx_link_desc_return(soc, ring_desc, HAL_BM_ACTION_PUT_IN_IDLE_LIST); 309 310 return rx_bufs_used; 311 } 312 313 /** 314 * dp_rx_pn_error_handle() - Handles PN check errors 315 * 316 * @soc: core txrx main context 317 * @ring_desc: opaque pointer to the REO error ring descriptor 318 * @mpdu_desc_info: MPDU descriptor information from ring descriptor 319 * @head: head of the local descriptor free-list 320 * @tail: tail of the local descriptor free-list 321 * @quota: No. of units (packets) that can be serviced in one shot. 322 * 323 * This function implements PN error handling 324 * If the peer is configured to ignore the PN check errors 325 * or if DP feels, that this frame is still OK, the frame can be 326 * re-injected back to REO to use some of the other features 327 * of REO e.g. duplicate detection/routing to other cores 328 * 329 * Return: uint32_t: No. of elements processed 330 */ 331 static uint32_t 332 dp_rx_pn_error_handle(struct dp_soc *soc, void *ring_desc, 333 struct hal_rx_mpdu_desc_info *mpdu_desc_info, 334 union dp_rx_desc_list_elem_t **head, 335 union dp_rx_desc_list_elem_t **tail, 336 uint32_t quota) 337 { 338 uint16_t peer_id; 339 uint32_t rx_bufs_used = 0; 340 struct dp_peer *peer; 341 bool peer_pn_policy = false; 342 343 peer_id = DP_PEER_METADATA_PEER_ID_GET( 344 mpdu_desc_info->peer_meta_data); 345 346 347 peer = dp_peer_find_by_id(soc, peer_id); 348 349 if (qdf_likely(peer)) { 350 /* 351 * TODO: Check for peer specific policies & set peer_pn_policy 352 */ 353 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 354 "discard rx due to PN error for peer %pK " 355 "(%02x:%02x:%02x:%02x:%02x:%02x)", 356 peer, 357 peer->mac_addr.raw[0], peer->mac_addr.raw[1], 358 peer->mac_addr.raw[2], peer->mac_addr.raw[3], 359 peer->mac_addr.raw[4], peer->mac_addr.raw[5]); 360 361 } 362 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 363 "Packet received with PN error"); 364 365 /* No peer PN policy -- definitely drop */ 366 if (!peer_pn_policy) 367 rx_bufs_used = dp_rx_msdus_drop(soc, ring_desc, 368 mpdu_desc_info, 369 head, tail, quota); 370 371 return rx_bufs_used; 372 } 373 374 /** 375 * dp_rx_2k_jump_handle() - Handles Sequence Number Jump by 2K 376 * 377 * @soc: core txrx main context 378 * @ring_desc: opaque pointer to the REO error ring descriptor 379 * @mpdu_desc_info: MPDU descriptor information from ring descriptor 380 * @head: head of the local descriptor free-list 381 * @tail: tail of the local descriptor free-list 382 * @quota: No. of units (packets) that can be serviced in one shot. 383 * 384 * This function implements the error handling when sequence number 385 * of the MPDU jumps suddenly by 2K.Today there are 2 cases that 386 * need to be handled: 387 * A) CSN (Current Sequence Number) = Last Valid SN (LSN) + 2K 388 * B) CSN = LSN + 2K, but falls within a "BA sized window" of the SSN 389 * For case A) the protocol stack is invoked to generate DELBA/DEAUTH frame 390 * For case B), the frame is normally dropped, no more action is taken 391 * 392 * Return: uint32_t: No. of elements processed 393 */ 394 static uint32_t 395 dp_rx_2k_jump_handle(struct dp_soc *soc, void *ring_desc, 396 struct hal_rx_mpdu_desc_info *mpdu_desc_info, 397 union dp_rx_desc_list_elem_t **head, 398 union dp_rx_desc_list_elem_t **tail, 399 uint32_t quota) 400 { 401 return dp_rx_msdus_drop(soc, ring_desc, mpdu_desc_info, 402 head, tail, quota); 403 } 404 405 /** 406 * dp_rx_chain_msdus() - Function to chain all msdus of a mpdu 407 * to pdev invalid peer list 408 * 409 * @soc: core DP main context 410 * @nbuf: Buffer pointer 411 * @rx_tlv_hdr: start of rx tlv header 412 * @mac_id: mac id 413 * 414 * Return: bool: true for last msdu of mpdu 415 */ 416 static bool 417 dp_rx_chain_msdus(struct dp_soc *soc, qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr, 418 uint8_t mac_id) 419 { 420 bool mpdu_done = false; 421 qdf_nbuf_t curr_nbuf = NULL; 422 qdf_nbuf_t tmp_nbuf = NULL; 423 424 /* TODO: Currently only single radio is supported, hence 425 * pdev hard coded to '0' index 426 */ 427 struct dp_pdev *dp_pdev = soc->pdev_list[mac_id]; 428 429 if (!dp_pdev->first_nbuf) { 430 qdf_nbuf_set_rx_chfrag_start(nbuf, 1); 431 dp_pdev->ppdu_id = HAL_RX_HW_DESC_GET_PPDUID_GET(rx_tlv_hdr); 432 dp_pdev->first_nbuf = true; 433 434 /* If the new nbuf received is the first msdu of the 435 * amsdu and there are msdus in the invalid peer msdu 436 * list, then let us free all the msdus of the invalid 437 * peer msdu list. 438 * This scenario can happen when we start receiving 439 * new a-msdu even before the previous a-msdu is completely 440 * received. 441 */ 442 curr_nbuf = dp_pdev->invalid_peer_head_msdu; 443 while (curr_nbuf) { 444 tmp_nbuf = curr_nbuf->next; 445 qdf_nbuf_free(curr_nbuf); 446 curr_nbuf = tmp_nbuf; 447 } 448 449 dp_pdev->invalid_peer_head_msdu = NULL; 450 dp_pdev->invalid_peer_tail_msdu = NULL; 451 hal_rx_mon_hw_desc_get_mpdu_status(soc->hal_soc, rx_tlv_hdr, 452 &(dp_pdev->ppdu_info.rx_status)); 453 454 } 455 456 if (dp_pdev->ppdu_id == hal_rx_attn_phy_ppdu_id_get(rx_tlv_hdr) && 457 hal_rx_attn_msdu_done_get(rx_tlv_hdr)) { 458 qdf_nbuf_set_rx_chfrag_end(nbuf, 1); 459 qdf_assert_always(dp_pdev->first_nbuf == true); 460 dp_pdev->first_nbuf = false; 461 mpdu_done = true; 462 } 463 464 DP_RX_LIST_APPEND(dp_pdev->invalid_peer_head_msdu, 465 dp_pdev->invalid_peer_tail_msdu, 466 nbuf); 467 468 return mpdu_done; 469 } 470 471 /** 472 * dp_2k_jump_handle() - Function to handle 2k jump exception 473 * on WBM ring 474 * 475 * @soc: core DP main context 476 * @nbuf: buffer pointer 477 * @rx_tlv_hdr: start of rx tlv header 478 * @peer_id: peer id of first msdu 479 * @tid: Tid for which exception occurred 480 * 481 * This function handles 2k jump violations arising out 482 * of receiving aggregates in non BA case. This typically 483 * may happen if aggregates are received on a QOS enabled TID 484 * while Rx window size is still initialized to value of 2. Or 485 * it may also happen if negotiated window size is 1 but peer 486 * sends aggregates. 487 * 488 */ 489 490 static void 491 dp_2k_jump_handle(struct dp_soc *soc, 492 qdf_nbuf_t nbuf, 493 uint8_t *rx_tlv_hdr, 494 uint16_t peer_id, 495 uint8_t tid) 496 { 497 uint32_t ppdu_id; 498 struct dp_peer *peer = NULL; 499 struct dp_rx_tid *rx_tid = NULL; 500 501 peer = dp_peer_find_by_id(soc, peer_id); 502 if (!peer || peer->delete_in_progress) { 503 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 504 "peer not found"); 505 goto free_nbuf; 506 } 507 rx_tid = &peer->rx_tid[tid]; 508 if (qdf_unlikely(rx_tid == NULL)) { 509 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 510 "rx_tid is NULL!!"); 511 goto free_nbuf; 512 } 513 qdf_spin_lock_bh(&rx_tid->tid_lock); 514 ppdu_id = hal_rx_attn_phy_ppdu_id_get(rx_tlv_hdr); 515 if (rx_tid->ppdu_id_2k != ppdu_id) { 516 rx_tid->ppdu_id_2k = ppdu_id; 517 qdf_spin_unlock_bh(&rx_tid->tid_lock); 518 goto free_nbuf; 519 } 520 if (!rx_tid->delba_tx_status) { 521 rx_tid->delba_tx_retry++; 522 rx_tid->delba_tx_status = 1; 523 rx_tid->delba_rcode = 524 IEEE80211_REASON_QOS_SETUP_REQUIRED; 525 qdf_spin_unlock_bh(&rx_tid->tid_lock); 526 soc->cdp_soc.ol_ops->send_delba(peer->vdev->pdev->ctrl_pdev, 527 peer->ctrl_peer, 528 peer->mac_addr.raw, 529 tid, 530 peer->vdev->ctrl_vdev, 531 rx_tid->delba_rcode); 532 } else { 533 qdf_spin_unlock_bh(&rx_tid->tid_lock); 534 } 535 536 free_nbuf: 537 qdf_nbuf_free(nbuf); 538 return; 539 } 540 541 /** 542 * dp_rx_null_q_desc_handle() - Function to handle NULL Queue 543 * descriptor violation on either a 544 * REO or WBM ring 545 * 546 * @soc: core DP main context 547 * @nbuf: buffer pointer 548 * @rx_tlv_hdr: start of rx tlv header 549 * @pool_id: mac id 550 * 551 * This function handles NULL queue descriptor violations arising out 552 * a missing REO queue for a given peer or a given TID. This typically 553 * may happen if a packet is received on a QOS enabled TID before the 554 * ADDBA negotiation for that TID, when the TID queue is setup. Or 555 * it may also happen for MC/BC frames if they are not routed to the 556 * non-QOS TID queue, in the absence of any other default TID queue. 557 * This error can show up both in a REO destination or WBM release ring. 558 * 559 */ 560 static void 561 dp_rx_null_q_desc_handle(struct dp_soc *soc, 562 qdf_nbuf_t nbuf, 563 uint8_t *rx_tlv_hdr, 564 uint8_t pool_id) 565 { 566 uint32_t pkt_len, l2_hdr_offset; 567 uint16_t msdu_len; 568 struct dp_vdev *vdev; 569 uint16_t peer_id = 0xFFFF; 570 struct dp_peer *peer = NULL; 571 uint8_t tid; 572 struct ether_header *eh; 573 574 qdf_nbuf_set_rx_chfrag_start(nbuf, 575 hal_rx_msdu_end_first_msdu_get(rx_tlv_hdr)); 576 qdf_nbuf_set_rx_chfrag_end(nbuf, 577 hal_rx_msdu_end_last_msdu_get(rx_tlv_hdr)); 578 579 l2_hdr_offset = hal_rx_msdu_end_l3_hdr_padding_get(rx_tlv_hdr); 580 msdu_len = hal_rx_msdu_start_msdu_len_get(rx_tlv_hdr); 581 pkt_len = msdu_len + l2_hdr_offset + RX_PKT_TLVS_LEN; 582 583 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 584 FL("Len %d Extn list %pK "), 585 (uint32_t)qdf_nbuf_len(nbuf), 586 qdf_nbuf_get_ext_list(nbuf)); 587 /* Set length in nbuf */ 588 if (!qdf_nbuf_get_ext_list(nbuf)) 589 qdf_nbuf_set_pktlen(nbuf, pkt_len); 590 591 /* 592 * Check if DMA completed -- msdu_done is the last bit 593 * to be written 594 */ 595 if (!hal_rx_attn_msdu_done_get(rx_tlv_hdr)) { 596 597 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 598 FL("MSDU DONE failure")); 599 600 hal_rx_dump_pkt_tlvs(soc->hal_soc, rx_tlv_hdr, 601 QDF_TRACE_LEVEL_INFO); 602 qdf_assert(0); 603 } 604 605 peer_id = hal_rx_mpdu_start_sw_peer_id_get(rx_tlv_hdr); 606 peer = dp_peer_find_by_id(soc, peer_id); 607 608 if (!peer) { 609 bool mpdu_done = false; 610 struct dp_pdev *pdev = soc->pdev_list[pool_id]; 611 612 QDF_TRACE_DEBUG_RL(QDF_MODULE_ID_DP, "peer is NULL"); 613 614 mpdu_done = dp_rx_chain_msdus(soc, nbuf, rx_tlv_hdr, pool_id); 615 /* Trigger invalid peer handler wrapper */ 616 dp_rx_process_invalid_peer_wrapper(soc, nbuf, mpdu_done); 617 618 if (mpdu_done) { 619 pdev->invalid_peer_head_msdu = NULL; 620 pdev->invalid_peer_tail_msdu = NULL; 621 } 622 return; 623 } 624 625 vdev = peer->vdev; 626 if (!vdev) { 627 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 628 FL("INVALID vdev %pK OR osif_rx"), vdev); 629 /* Drop & free packet */ 630 qdf_nbuf_free(nbuf); 631 DP_STATS_INC(soc, rx.err.invalid_vdev, 1); 632 return; 633 } 634 635 /* 636 * Advance the packet start pointer by total size of 637 * pre-header TLV's 638 */ 639 if (qdf_nbuf_get_ext_list(nbuf)) 640 qdf_nbuf_pull_head(nbuf, RX_PKT_TLVS_LEN); 641 else 642 qdf_nbuf_pull_head(nbuf, (l2_hdr_offset + RX_PKT_TLVS_LEN)); 643 644 if (dp_rx_mcast_echo_check(soc, peer, rx_tlv_hdr, nbuf)) { 645 /* this is a looped back MCBC pkt, drop it */ 646 qdf_nbuf_free(nbuf); 647 return; 648 } 649 /* 650 * In qwrap mode if the received packet matches with any of the vdev 651 * mac addresses, drop it. Donot receive multicast packets originated 652 * from any proxysta. 653 */ 654 if (check_qwrap_multicast_loopback(vdev, nbuf)) { 655 qdf_nbuf_free(nbuf); 656 return; 657 } 658 659 660 if (qdf_unlikely((peer->nawds_enabled == true) && 661 hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr))) { 662 QDF_TRACE(QDF_MODULE_ID_DP, 663 QDF_TRACE_LEVEL_DEBUG, 664 "%s free buffer for multicast packet", 665 __func__); 666 DP_STATS_INC(peer, rx.nawds_mcast_drop, 1); 667 qdf_nbuf_free(nbuf); 668 return; 669 } 670 671 if (!dp_wds_rx_policy_check(rx_tlv_hdr, vdev, peer, 672 hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr))) { 673 QDF_TRACE(QDF_MODULE_ID_DP, 674 QDF_TRACE_LEVEL_ERROR, 675 FL("mcast Policy Check Drop pkt")); 676 /* Drop & free packet */ 677 qdf_nbuf_free(nbuf); 678 return; 679 } 680 681 /* WDS Source Port Learning */ 682 if (qdf_likely(vdev->rx_decap_type == htt_cmn_pkt_type_ethernet && 683 vdev->wds_enabled)) 684 dp_rx_wds_srcport_learn(soc, rx_tlv_hdr, peer, nbuf); 685 686 if (hal_rx_mpdu_start_mpdu_qos_control_valid_get(rx_tlv_hdr)) { 687 /* TODO: Assuming that qos_control_valid also indicates 688 * unicast. Should we check this? 689 */ 690 tid = hal_rx_mpdu_start_tid_get(soc->hal_soc, rx_tlv_hdr); 691 if (peer && 692 peer->rx_tid[tid].hw_qdesc_vaddr_unaligned == NULL) { 693 /* IEEE80211_SEQ_MAX indicates invalid start_seq */ 694 dp_rx_tid_setup_wifi3(peer, tid, 1, IEEE80211_SEQ_MAX); 695 } 696 } 697 698 #ifdef QCA_WIFI_NAPIER_EMULATION /* Debug code, remove later */ 699 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 700 "%s: p_id %d msdu_len %d hdr_off %d", 701 __func__, peer_id, msdu_len, l2_hdr_offset); 702 703 print_hex_dump(KERN_ERR, "\t Pkt Data:", DUMP_PREFIX_NONE, 32, 4, 704 qdf_nbuf_data(nbuf), 128, false); 705 #endif /* NAPIER_EMULATION */ 706 707 if (qdf_unlikely(vdev->rx_decap_type == htt_cmn_pkt_type_raw)) { 708 qdf_nbuf_set_next(nbuf, NULL); 709 dp_rx_deliver_raw(vdev, nbuf, peer); 710 } else { 711 if (qdf_unlikely(peer->bss_peer)) { 712 QDF_TRACE(QDF_MODULE_ID_DP, 713 QDF_TRACE_LEVEL_INFO, 714 FL("received pkt with same src MAC")); 715 /* Drop & free packet */ 716 qdf_nbuf_free(nbuf); 717 return; 718 } 719 720 if (vdev->osif_rx) { 721 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, 722 FL("vdev %pK osif_rx %pK"), vdev, 723 vdev->osif_rx); 724 qdf_nbuf_set_next(nbuf, NULL); 725 vdev->osif_rx(vdev->osif_vdev, nbuf); 726 DP_STATS_INC_PKT(peer, rx.to_stack, 1, 727 qdf_nbuf_len(nbuf)); 728 if (qdf_unlikely(hal_rx_msdu_end_da_is_mcbc_get( 729 rx_tlv_hdr) && 730 (vdev->rx_decap_type == 731 htt_cmn_pkt_type_ethernet))) { 732 eh = (struct ether_header *)qdf_nbuf_data(nbuf); 733 734 DP_STATS_INC_PKT(peer, rx.multicast, 1, 735 qdf_nbuf_len(nbuf)); 736 if (IEEE80211_IS_BROADCAST(eh->ether_dhost)) { 737 DP_STATS_INC_PKT(peer, rx.bcast, 1, 738 qdf_nbuf_len(nbuf)); 739 } 740 } 741 } else { 742 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 743 FL("INVALID vdev %pK OR osif_rx"), vdev); 744 DP_STATS_INC(soc, rx.err.invalid_vdev, 1); 745 } 746 } 747 return; 748 } 749 750 /** 751 * dp_rx_err_deliver() - Function to deliver error frames to OS 752 * 753 * @soc: core DP main context 754 * @rx_desc : pointer to the sw rx descriptor 755 * @head: pointer to head of rx descriptors to be added to free list 756 * @tail: pointer to tail of rx descriptors to be added to free list 757 * quota: upper limit of descriptors that can be reaped 758 * 759 * Return: uint32_t: No. of Rx buffers reaped 760 */ 761 static void 762 dp_rx_err_deliver(struct dp_soc *soc, qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr) 763 { 764 uint32_t pkt_len, l2_hdr_offset; 765 uint16_t msdu_len; 766 struct dp_vdev *vdev; 767 uint16_t peer_id = 0xFFFF; 768 struct dp_peer *peer = NULL; 769 struct ether_header *eh; 770 bool isBroadcast; 771 772 /* 773 * Check if DMA completed -- msdu_done is the last bit 774 * to be written 775 */ 776 if (!hal_rx_attn_msdu_done_get(rx_tlv_hdr)) { 777 778 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 779 FL("MSDU DONE failure")); 780 781 hal_rx_dump_pkt_tlvs(soc->hal_soc, rx_tlv_hdr, 782 QDF_TRACE_LEVEL_INFO); 783 qdf_assert(0); 784 } 785 786 peer_id = hal_rx_mpdu_start_sw_peer_id_get(rx_tlv_hdr); 787 peer = dp_peer_find_by_id(soc, peer_id); 788 789 l2_hdr_offset = hal_rx_msdu_end_l3_hdr_padding_get(rx_tlv_hdr); 790 msdu_len = hal_rx_msdu_start_msdu_len_get(rx_tlv_hdr); 791 pkt_len = msdu_len + l2_hdr_offset + RX_PKT_TLVS_LEN; 792 793 /* Set length in nbuf */ 794 qdf_nbuf_set_pktlen(nbuf, pkt_len); 795 796 qdf_nbuf_set_next(nbuf, NULL); 797 798 qdf_nbuf_set_rx_chfrag_start(nbuf, 1); 799 qdf_nbuf_set_rx_chfrag_end(nbuf, 1); 800 801 if (!peer) { 802 QDF_TRACE_ERROR_RL(QDF_MODULE_ID_DP, "peer is NULL"); 803 DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1, 804 qdf_nbuf_len(nbuf)); 805 /* Trigger invalid peer handler wrapper */ 806 dp_rx_process_invalid_peer_wrapper(soc, nbuf, true); 807 return; 808 } 809 810 vdev = peer->vdev; 811 if (!vdev) { 812 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 813 FL("INVALID vdev %pK OR osif_rx"), vdev); 814 /* Drop & free packet */ 815 qdf_nbuf_free(nbuf); 816 DP_STATS_INC(soc, rx.err.invalid_vdev, 1); 817 return; 818 } 819 820 /* Drop & free packet if mesh mode not enabled */ 821 if (!vdev->mesh_vdev) { 822 qdf_nbuf_free(nbuf); 823 DP_STATS_INC(soc, rx.err.invalid_vdev, 1); 824 return; 825 } 826 827 /* 828 * Advance the packet start pointer by total size of 829 * pre-header TLV's 830 */ 831 qdf_nbuf_pull_head(nbuf, (l2_hdr_offset + RX_PKT_TLVS_LEN)); 832 833 if (dp_rx_filter_mesh_packets(vdev, nbuf, rx_tlv_hdr) 834 == QDF_STATUS_SUCCESS) { 835 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_MED, 836 FL("mesh pkt filtered")); 837 DP_STATS_INC(vdev->pdev, dropped.mesh_filter, 1); 838 839 qdf_nbuf_free(nbuf); 840 return; 841 842 } 843 dp_rx_fill_mesh_stats(vdev, nbuf, rx_tlv_hdr, peer); 844 845 if (qdf_unlikely(hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr) && 846 (vdev->rx_decap_type == 847 htt_cmn_pkt_type_ethernet))) { 848 eh = (struct ether_header *)qdf_nbuf_data(nbuf); 849 isBroadcast = (IEEE80211_IS_BROADCAST 850 (eh->ether_dhost)) ? 1 : 0 ; 851 DP_STATS_INC_PKT(peer, rx.multicast, 1, qdf_nbuf_len(nbuf)); 852 if (isBroadcast) { 853 DP_STATS_INC_PKT(peer, rx.bcast, 1, 854 qdf_nbuf_len(nbuf)); 855 } 856 } 857 858 if (qdf_unlikely(vdev->rx_decap_type == htt_cmn_pkt_type_raw)) { 859 dp_rx_deliver_raw(vdev, nbuf, peer); 860 } else { 861 DP_STATS_INC(vdev->pdev, rx.to_stack.num, 1); 862 vdev->osif_rx(vdev->osif_vdev, nbuf); 863 } 864 865 return; 866 } 867 868 /** 869 * dp_rx_process_mic_error(): Function to pass mic error indication to umac 870 * @soc: DP SOC handle 871 * @rx_desc : pointer to the sw rx descriptor 872 * @head: pointer to head of rx descriptors to be added to free list 873 * @tail: pointer to tail of rx descriptors to be added to free list 874 * 875 * return: void 876 */ 877 void 878 dp_rx_process_mic_error(struct dp_soc *soc, 879 qdf_nbuf_t nbuf, 880 uint8_t *rx_tlv_hdr) 881 { 882 struct dp_vdev *vdev = NULL; 883 struct dp_pdev *pdev = NULL; 884 struct ol_if_ops *tops = NULL; 885 struct ieee80211_frame *wh; 886 uint8_t *rx_pkt_hdr; 887 struct dp_peer *peer; 888 uint16_t peer_id, rx_seq, fragno; 889 unsigned int tid; 890 QDF_STATUS status; 891 892 if (!hal_rx_msdu_end_first_msdu_get(rx_tlv_hdr)) 893 return; 894 895 rx_pkt_hdr = hal_rx_pkt_hdr_get(qdf_nbuf_data(nbuf)); 896 wh = (struct ieee80211_frame *)rx_pkt_hdr; 897 898 peer_id = hal_rx_mpdu_start_sw_peer_id_get(rx_tlv_hdr); 899 peer = dp_peer_find_by_id(soc, peer_id); 900 if (!peer) { 901 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 902 "peer not found"); 903 goto fail; 904 } 905 906 vdev = peer->vdev; 907 if (!vdev) { 908 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 909 "VDEV not found"); 910 goto fail; 911 } 912 913 pdev = vdev->pdev; 914 if (!pdev) { 915 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 916 "PDEV not found"); 917 goto fail; 918 } 919 920 tid = hal_rx_mpdu_start_tid_get(soc->hal_soc, qdf_nbuf_data(nbuf)); 921 rx_seq = (((*(uint16_t *)wh->i_seq) & 922 IEEE80211_SEQ_SEQ_MASK) >> 923 IEEE80211_SEQ_SEQ_SHIFT); 924 925 fragno = dp_rx_frag_get_mpdu_frag_number(qdf_nbuf_data(nbuf)); 926 927 /* Can get only last fragment */ 928 if (fragno) { 929 status = dp_rx_defrag_add_last_frag(soc, peer, 930 tid, rx_seq, nbuf); 931 932 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 933 "%s: Frag pkt seq# %d frag# %d consumed status %d !", 934 __func__, rx_seq, fragno, status); 935 return; 936 } 937 938 tops = pdev->soc->cdp_soc.ol_ops; 939 if (tops->rx_mic_error) 940 tops->rx_mic_error(pdev->ctrl_pdev, vdev->vdev_id, wh); 941 942 fail: 943 qdf_nbuf_free(nbuf); 944 return; 945 } 946 947 /** 948 * dp_rx_err_process() - Processes error frames routed to REO error ring 949 * 950 * @soc: core txrx main context 951 * @hal_ring: opaque pointer to the HAL Rx Error Ring, which will be serviced 952 * @quota: No. of units (packets) that can be serviced in one shot. 953 * 954 * This function implements error processing and top level demultiplexer 955 * for all the frames routed to REO error ring. 956 * 957 * Return: uint32_t: No. of elements processed 958 */ 959 uint32_t 960 dp_rx_err_process(struct dp_soc *soc, void *hal_ring, uint32_t quota) 961 { 962 void *hal_soc; 963 void *ring_desc; 964 union dp_rx_desc_list_elem_t *head = NULL; 965 union dp_rx_desc_list_elem_t *tail = NULL; 966 uint32_t rx_bufs_used = 0; 967 uint8_t buf_type; 968 uint8_t error, rbm; 969 struct hal_rx_mpdu_desc_info mpdu_desc_info; 970 struct hal_buf_info hbi; 971 struct dp_pdev *dp_pdev; 972 struct dp_srng *dp_rxdma_srng; 973 struct rx_desc_pool *rx_desc_pool; 974 uint32_t cookie = 0; 975 void *link_desc_va; 976 struct hal_rx_msdu_list msdu_list; /* MSDU's per MPDU */ 977 uint16_t num_msdus; 978 979 /* Debug -- Remove later */ 980 qdf_assert(soc && hal_ring); 981 982 hal_soc = soc->hal_soc; 983 984 /* Debug -- Remove later */ 985 qdf_assert(hal_soc); 986 987 if (qdf_unlikely(hal_srng_access_start(hal_soc, hal_ring))) { 988 989 /* TODO */ 990 /* 991 * Need API to convert from hal_ring pointer to 992 * Ring Type / Ring Id combo 993 */ 994 DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1); 995 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 996 FL("HAL RING Access Failed -- %pK"), hal_ring); 997 goto done; 998 } 999 1000 while (qdf_likely(quota-- && (ring_desc = 1001 hal_srng_dst_get_next(hal_soc, hal_ring)))) { 1002 1003 DP_STATS_INC(soc, rx.err_ring_pkts, 1); 1004 1005 error = HAL_RX_ERROR_STATUS_GET(ring_desc); 1006 1007 qdf_assert(error == HAL_REO_ERROR_DETECTED); 1008 1009 buf_type = HAL_RX_REO_BUF_TYPE_GET(ring_desc); 1010 /* 1011 * For REO error ring, expect only MSDU LINK DESC 1012 */ 1013 qdf_assert_always(buf_type == HAL_RX_REO_MSDU_LINK_DESC_TYPE); 1014 1015 cookie = HAL_RX_REO_BUF_COOKIE_GET(ring_desc); 1016 /* 1017 * check for the magic number in the sw cookie 1018 */ 1019 qdf_assert_always((cookie >> LINK_DESC_ID_SHIFT) & 1020 LINK_DESC_ID_START); 1021 1022 /* 1023 * Check if the buffer is to be processed on this processor 1024 */ 1025 rbm = hal_rx_ret_buf_manager_get(ring_desc); 1026 1027 hal_rx_reo_buf_paddr_get(ring_desc, &hbi); 1028 link_desc_va = dp_rx_cookie_2_link_desc_va(soc, &hbi); 1029 hal_rx_msdu_list_get(soc->hal_soc, link_desc_va, &msdu_list, 1030 &num_msdus); 1031 1032 if (qdf_unlikely((msdu_list.rbm[0] != DP_WBM2SW_RBM) && 1033 (msdu_list.rbm[0] != 1034 HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST))) { 1035 /* TODO */ 1036 /* Call appropriate handler */ 1037 DP_STATS_INC(soc, rx.err.invalid_rbm, 1); 1038 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1039 FL("Invalid RBM %d"), msdu_list.rbm[0]); 1040 1041 /* Return link descriptor through WBM ring (SW2WBM)*/ 1042 dp_rx_link_desc_return(soc, ring_desc, 1043 HAL_BM_ACTION_RELEASE_MSDU_LIST); 1044 continue; 1045 } 1046 1047 /* Get the MPDU DESC info */ 1048 hal_rx_mpdu_desc_info_get(ring_desc, &mpdu_desc_info); 1049 1050 if (mpdu_desc_info.mpdu_flags & HAL_MPDU_F_FRAGMENT) { 1051 /* TODO */ 1052 rx_bufs_used += dp_rx_frag_handle(soc, 1053 ring_desc, &mpdu_desc_info, 1054 &head, &tail, quota); 1055 DP_STATS_INC(soc, rx.rx_frags, 1); 1056 continue; 1057 } 1058 1059 if (hal_rx_reo_is_pn_error(ring_desc)) { 1060 /* TOD0 */ 1061 DP_STATS_INC(soc, 1062 rx.err. 1063 reo_error[HAL_REO_ERR_PN_CHECK_FAILED], 1064 1); 1065 rx_bufs_used += dp_rx_pn_error_handle(soc, 1066 ring_desc, &mpdu_desc_info, 1067 &head, &tail, quota); 1068 continue; 1069 } 1070 1071 if (hal_rx_reo_is_2k_jump(ring_desc)) { 1072 /* TOD0 */ 1073 DP_STATS_INC(soc, 1074 rx.err. 1075 reo_error[HAL_REO_ERR_REGULAR_FRAME_2K_JUMP], 1076 1); 1077 rx_bufs_used += dp_rx_2k_jump_handle(soc, 1078 ring_desc, &mpdu_desc_info, 1079 &head, &tail, quota); 1080 continue; 1081 } 1082 } 1083 1084 done: 1085 hal_srng_access_end(hal_soc, hal_ring); 1086 1087 if (soc->rx.flags.defrag_timeout_check) 1088 dp_rx_defrag_waitlist_flush(soc); 1089 1090 /* Assume MAC id = 0, owner = 0 */ 1091 if (rx_bufs_used) { 1092 dp_pdev = soc->pdev_list[0]; 1093 dp_rxdma_srng = &dp_pdev->rx_refill_buf_ring; 1094 rx_desc_pool = &soc->rx_desc_buf[0]; 1095 1096 dp_rx_buffers_replenish(soc, 0, dp_rxdma_srng, rx_desc_pool, 1097 rx_bufs_used, &head, &tail); 1098 } 1099 1100 return rx_bufs_used; /* Assume no scale factor for now */ 1101 } 1102 1103 /** 1104 * dp_rx_wbm_err_process() - Processes error frames routed to WBM release ring 1105 * 1106 * @soc: core txrx main context 1107 * @hal_ring: opaque pointer to the HAL Rx Error Ring, which will be serviced 1108 * @quota: No. of units (packets) that can be serviced in one shot. 1109 * 1110 * This function implements error processing and top level demultiplexer 1111 * for all the frames routed to WBM2HOST sw release ring. 1112 * 1113 * Return: uint32_t: No. of elements processed 1114 */ 1115 uint32_t 1116 dp_rx_wbm_err_process(struct dp_soc *soc, void *hal_ring, uint32_t quota) 1117 { 1118 void *hal_soc; 1119 void *ring_desc; 1120 struct dp_rx_desc *rx_desc; 1121 union dp_rx_desc_list_elem_t *head[MAX_PDEV_CNT] = { NULL }; 1122 union dp_rx_desc_list_elem_t *tail[MAX_PDEV_CNT] = { NULL }; 1123 uint32_t rx_bufs_used = 0; 1124 uint32_t rx_bufs_reaped[MAX_PDEV_CNT] = { 0 }; 1125 uint8_t buf_type, rbm; 1126 uint32_t rx_buf_cookie; 1127 uint8_t mac_id; 1128 struct dp_pdev *dp_pdev; 1129 struct dp_srng *dp_rxdma_srng; 1130 struct rx_desc_pool *rx_desc_pool; 1131 uint8_t *rx_tlv_hdr; 1132 qdf_nbuf_t nbuf_head = NULL; 1133 qdf_nbuf_t nbuf_tail = NULL; 1134 qdf_nbuf_t nbuf, next; 1135 struct hal_wbm_err_desc_info wbm_err_info = { 0 }; 1136 uint8_t pool_id; 1137 uint16_t peer_id = 0xFFFF; 1138 uint8_t tid = 0; 1139 1140 /* Debug -- Remove later */ 1141 qdf_assert(soc && hal_ring); 1142 1143 hal_soc = soc->hal_soc; 1144 1145 /* Debug -- Remove later */ 1146 qdf_assert(hal_soc); 1147 1148 if (qdf_unlikely(hal_srng_access_start(hal_soc, hal_ring))) { 1149 1150 /* TODO */ 1151 /* 1152 * Need API to convert from hal_ring pointer to 1153 * Ring Type / Ring Id combo 1154 */ 1155 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1156 FL("HAL RING Access Failed -- %pK"), hal_ring); 1157 goto done; 1158 } 1159 1160 while (qdf_likely(quota-- && (ring_desc = 1161 hal_srng_dst_get_next(hal_soc, hal_ring)))) { 1162 1163 /* XXX */ 1164 buf_type = HAL_RX_WBM_BUF_TYPE_GET(ring_desc); 1165 1166 /* 1167 * For WBM ring, expect only MSDU buffers 1168 */ 1169 qdf_assert_always(buf_type == HAL_RX_WBM_BUF_TYPE_REL_BUF); 1170 1171 qdf_assert((HAL_RX_WBM_ERR_SRC_GET(ring_desc) 1172 == HAL_RX_WBM_ERR_SRC_RXDMA) || 1173 (HAL_RX_WBM_ERR_SRC_GET(ring_desc) 1174 == HAL_RX_WBM_ERR_SRC_REO)); 1175 1176 /* 1177 * Check if the buffer is to be processed on this processor 1178 */ 1179 rbm = hal_rx_ret_buf_manager_get(ring_desc); 1180 1181 if (qdf_unlikely(rbm != HAL_RX_BUF_RBM_SW3_BM)) { 1182 /* TODO */ 1183 /* Call appropriate handler */ 1184 DP_STATS_INC(soc, rx.err.invalid_rbm, 1); 1185 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1186 FL("Invalid RBM %d"), rbm); 1187 continue; 1188 } 1189 1190 rx_buf_cookie = HAL_RX_WBM_BUF_COOKIE_GET(ring_desc); 1191 1192 rx_desc = dp_rx_cookie_2_va_rxdma_buf(soc, rx_buf_cookie); 1193 qdf_assert(rx_desc); 1194 1195 if (!dp_rx_desc_check_magic(rx_desc)) { 1196 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1197 FL("Invalid rx_desc cookie=%d"), 1198 rx_buf_cookie); 1199 continue; 1200 } 1201 1202 nbuf = rx_desc->nbuf; 1203 qdf_nbuf_unmap_single(soc->osdev, nbuf, QDF_DMA_BIDIRECTIONAL); 1204 1205 /* 1206 * save the wbm desc info in nbuf TLV. We will need this 1207 * info when we do the actual nbuf processing 1208 */ 1209 hal_rx_wbm_err_info_get(ring_desc, &wbm_err_info); 1210 wbm_err_info.pool_id = rx_desc->pool_id; 1211 hal_rx_wbm_err_info_set_in_tlv(qdf_nbuf_data(nbuf), 1212 &wbm_err_info); 1213 1214 rx_bufs_reaped[rx_desc->pool_id]++; 1215 1216 DP_RX_LIST_APPEND(nbuf_head, nbuf_tail, rx_desc->nbuf); 1217 dp_rx_add_to_free_desc_list(&head[rx_desc->pool_id], 1218 &tail[rx_desc->pool_id], 1219 rx_desc); 1220 } 1221 done: 1222 hal_srng_access_end(hal_soc, hal_ring); 1223 1224 for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) { 1225 if (rx_bufs_reaped[mac_id]) { 1226 dp_pdev = soc->pdev_list[mac_id]; 1227 dp_rxdma_srng = &dp_pdev->rx_refill_buf_ring; 1228 rx_desc_pool = &soc->rx_desc_buf[mac_id]; 1229 1230 dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng, 1231 rx_desc_pool, rx_bufs_reaped[mac_id], 1232 &head[mac_id], &tail[mac_id]); 1233 rx_bufs_used += rx_bufs_reaped[mac_id]; 1234 } 1235 } 1236 1237 nbuf = nbuf_head; 1238 while (nbuf) { 1239 rx_tlv_hdr = qdf_nbuf_data(nbuf); 1240 /* 1241 * retrieve the wbm desc info from nbuf TLV, so we can 1242 * handle error cases appropriately 1243 */ 1244 hal_rx_wbm_err_info_get_from_tlv(rx_tlv_hdr, &wbm_err_info); 1245 1246 /* Set queue_mapping in nbuf to 0 */ 1247 dp_set_rx_queue(nbuf, 0); 1248 1249 next = nbuf->next; 1250 if (wbm_err_info.wbm_err_src == HAL_RX_WBM_ERR_SRC_REO) { 1251 if (wbm_err_info.reo_psh_rsn 1252 == HAL_RX_WBM_REO_PSH_RSN_ERROR) { 1253 1254 DP_STATS_INC(soc, 1255 rx.err.reo_error 1256 [wbm_err_info.reo_err_code], 1); 1257 1258 switch (wbm_err_info.reo_err_code) { 1259 /* 1260 * Handling for packets which have NULL REO 1261 * queue descriptor 1262 */ 1263 case HAL_REO_ERR_QUEUE_DESC_ADDR_0: 1264 pool_id = wbm_err_info.pool_id; 1265 QDF_TRACE_DEBUG_RL(QDF_MODULE_ID_DP, 1266 "Got pkt with REO ERROR: %d", 1267 wbm_err_info.reo_err_code); 1268 dp_rx_null_q_desc_handle(soc, 1269 nbuf, 1270 rx_tlv_hdr, 1271 pool_id); 1272 nbuf = next; 1273 continue; 1274 /* TODO */ 1275 /* Add per error code accounting */ 1276 case HAL_REO_ERR_REGULAR_FRAME_2K_JUMP: 1277 pool_id = wbm_err_info.pool_id; 1278 QDF_TRACE(QDF_MODULE_ID_DP, 1279 QDF_TRACE_LEVEL_ERROR, 1280 "Got pkt with REO ERROR: %d", 1281 wbm_err_info.reo_err_code); 1282 if (hal_rx_msdu_end_first_msdu_get(rx_tlv_hdr)) { 1283 peer_id = 1284 hal_rx_mpdu_start_sw_peer_id_get(rx_tlv_hdr); 1285 tid = 1286 hal_rx_mpdu_start_tid_get(hal_soc, rx_tlv_hdr); 1287 } 1288 dp_2k_jump_handle(soc, nbuf, rx_tlv_hdr, 1289 peer_id, tid); 1290 nbuf = next; 1291 continue; 1292 default: 1293 QDF_TRACE(QDF_MODULE_ID_DP, 1294 QDF_TRACE_LEVEL_ERROR, 1295 "REO error %d detected", 1296 wbm_err_info.reo_err_code); 1297 } 1298 } 1299 } else if (wbm_err_info.wbm_err_src == 1300 HAL_RX_WBM_ERR_SRC_RXDMA) { 1301 if (wbm_err_info.rxdma_psh_rsn 1302 == HAL_RX_WBM_RXDMA_PSH_RSN_ERROR) { 1303 struct dp_peer *peer = NULL; 1304 uint16_t peer_id = 0xFFFF; 1305 1306 DP_STATS_INC(soc, 1307 rx.err.rxdma_error 1308 [wbm_err_info.rxdma_err_code], 1); 1309 peer_id = hal_rx_mpdu_start_sw_peer_id_get(rx_tlv_hdr); 1310 peer = dp_peer_find_by_id(soc, peer_id); 1311 1312 switch (wbm_err_info.rxdma_err_code) { 1313 case HAL_RXDMA_ERR_UNENCRYPTED: 1314 dp_rx_err_deliver(soc, 1315 nbuf, 1316 rx_tlv_hdr); 1317 nbuf = next; 1318 continue; 1319 1320 case HAL_RXDMA_ERR_TKIP_MIC: 1321 dp_rx_process_mic_error(soc, 1322 nbuf, 1323 rx_tlv_hdr); 1324 nbuf = next; 1325 if (peer) 1326 DP_STATS_INC(peer, rx.err.mic_err, 1); 1327 continue; 1328 1329 case HAL_RXDMA_ERR_DECRYPT: 1330 if (peer) 1331 DP_STATS_INC(peer, rx.err.decrypt_err, 1); 1332 QDF_TRACE(QDF_MODULE_ID_DP, 1333 QDF_TRACE_LEVEL_DEBUG, 1334 "Packet received with Decrypt error"); 1335 break; 1336 1337 default: 1338 QDF_TRACE(QDF_MODULE_ID_DP, 1339 QDF_TRACE_LEVEL_DEBUG, 1340 "RXDMA error %d", 1341 wbm_err_info. 1342 rxdma_err_code); 1343 } 1344 } 1345 } else { 1346 /* Should not come here */ 1347 qdf_assert(0); 1348 } 1349 1350 hal_rx_dump_pkt_tlvs(hal_soc, rx_tlv_hdr, 1351 QDF_TRACE_LEVEL_DEBUG); 1352 qdf_nbuf_free(nbuf); 1353 nbuf = next; 1354 } 1355 return rx_bufs_used; /* Assume no scale factor for now */ 1356 } 1357 1358 /** 1359 * dp_rx_err_mpdu_pop() - extract the MSDU's from link descs 1360 * 1361 * @soc: core DP main context 1362 * @mac_id: mac id which is one of 3 mac_ids 1363 * @rxdma_dst_ring_desc: void pointer to monitor link descriptor buf addr info 1364 * @head: head of descs list to be freed 1365 * @tail: tail of decs list to be freed 1366 1367 * Return: number of msdu in MPDU to be popped 1368 */ 1369 static inline uint32_t 1370 dp_rx_err_mpdu_pop(struct dp_soc *soc, uint32_t mac_id, 1371 void *rxdma_dst_ring_desc, 1372 union dp_rx_desc_list_elem_t **head, 1373 union dp_rx_desc_list_elem_t **tail) 1374 { 1375 void *rx_msdu_link_desc; 1376 qdf_nbuf_t msdu; 1377 qdf_nbuf_t last; 1378 struct hal_rx_msdu_list msdu_list; 1379 uint16_t num_msdus; 1380 struct hal_buf_info buf_info; 1381 void *p_buf_addr_info; 1382 void *p_last_buf_addr_info; 1383 uint32_t rx_bufs_used = 0; 1384 uint32_t msdu_cnt; 1385 uint32_t i; 1386 uint8_t push_reason; 1387 uint8_t rxdma_error_code = 0; 1388 uint8_t bm_action = HAL_BM_ACTION_PUT_IN_IDLE_LIST; 1389 struct dp_pdev *pdev = dp_get_pdev_for_mac_id(soc, mac_id); 1390 1391 msdu = 0; 1392 1393 last = NULL; 1394 1395 hal_rx_reo_ent_buf_paddr_get(rxdma_dst_ring_desc, &buf_info, 1396 &p_last_buf_addr_info, &msdu_cnt); 1397 1398 push_reason = 1399 hal_rx_reo_ent_rxdma_push_reason_get(rxdma_dst_ring_desc); 1400 if (push_reason == HAL_RX_WBM_RXDMA_PSH_RSN_ERROR) { 1401 rxdma_error_code = 1402 hal_rx_reo_ent_rxdma_error_code_get(rxdma_dst_ring_desc); 1403 } 1404 1405 do { 1406 rx_msdu_link_desc = 1407 dp_rx_cookie_2_link_desc_va(soc, &buf_info); 1408 1409 qdf_assert(rx_msdu_link_desc); 1410 1411 hal_rx_msdu_list_get(soc->hal_soc, rx_msdu_link_desc, 1412 &msdu_list, &num_msdus); 1413 1414 if (msdu_list.sw_cookie[0] != HAL_RX_COOKIE_SPECIAL) { 1415 /* if the msdus belongs to NSS offloaded radio && 1416 * the rbm is not SW1_BM then return the msdu_link 1417 * descriptor without freeing the msdus (nbufs). let 1418 * these buffers be given to NSS completion ring for 1419 * NSS to free them. 1420 * else iterate through the msdu link desc list and 1421 * free each msdu in the list. 1422 */ 1423 if (msdu_list.rbm[0] != HAL_RX_BUF_RBM_SW3_BM && 1424 wlan_cfg_get_dp_pdev_nss_enabled( 1425 pdev->wlan_cfg_ctx)) 1426 bm_action = HAL_BM_ACTION_RELEASE_MSDU_LIST; 1427 else { 1428 for (i = 0; i < num_msdus; i++) { 1429 struct dp_rx_desc *rx_desc = 1430 dp_rx_cookie_2_va_rxdma_buf(soc, 1431 msdu_list.sw_cookie[i]); 1432 qdf_assert(rx_desc); 1433 msdu = rx_desc->nbuf; 1434 1435 qdf_nbuf_unmap_single(soc->osdev, msdu, 1436 QDF_DMA_FROM_DEVICE); 1437 1438 QDF_TRACE(QDF_MODULE_ID_DP, 1439 QDF_TRACE_LEVEL_DEBUG, 1440 "[%s][%d] msdu_nbuf=%pK ", 1441 __func__, __LINE__, msdu); 1442 1443 qdf_nbuf_free(msdu); 1444 rx_bufs_used++; 1445 dp_rx_add_to_free_desc_list(head, 1446 tail, rx_desc); 1447 } 1448 } 1449 } else { 1450 rxdma_error_code = HAL_RXDMA_ERR_WAR; 1451 } 1452 1453 hal_rx_mon_next_link_desc_get(rx_msdu_link_desc, &buf_info, 1454 &p_buf_addr_info); 1455 1456 dp_rx_link_desc_return(soc, p_last_buf_addr_info, bm_action); 1457 p_last_buf_addr_info = p_buf_addr_info; 1458 1459 } while (buf_info.paddr); 1460 1461 DP_STATS_INC(soc, rx.err.rxdma_error[rxdma_error_code], 1); 1462 1463 if (rxdma_error_code == HAL_RXDMA_ERR_DECRYPT) { 1464 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1465 "Packet received with Decrypt error"); 1466 } 1467 1468 return rx_bufs_used; 1469 } 1470 1471 /** 1472 * dp_rxdma_err_process() - RxDMA error processing functionality 1473 * 1474 * @soc: core txrx main contex 1475 * @mac_id: mac id which is one of 3 mac_ids 1476 * @hal_ring: opaque pointer to the HAL Rx Ring, which will be serviced 1477 * @quota: No. of units (packets) that can be serviced in one shot. 1478 1479 * Return: num of buffers processed 1480 */ 1481 uint32_t 1482 dp_rxdma_err_process(struct dp_soc *soc, uint32_t mac_id, uint32_t quota) 1483 { 1484 struct dp_pdev *pdev = dp_get_pdev_for_mac_id(soc, mac_id); 1485 int mac_for_pdev = dp_get_mac_id_for_mac(soc, mac_id); 1486 void *hal_soc; 1487 void *rxdma_dst_ring_desc; 1488 void *err_dst_srng; 1489 union dp_rx_desc_list_elem_t *head = NULL; 1490 union dp_rx_desc_list_elem_t *tail = NULL; 1491 struct dp_srng *dp_rxdma_srng; 1492 struct rx_desc_pool *rx_desc_pool; 1493 uint32_t work_done = 0; 1494 uint32_t rx_bufs_used = 0; 1495 1496 if (!pdev) 1497 return 0; 1498 1499 err_dst_srng = pdev->rxdma_err_dst_ring[mac_for_pdev].hal_srng; 1500 1501 if (!err_dst_srng) { 1502 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1503 "%s %d : HAL Monitor Destination Ring Init \ 1504 Failed -- %pK", 1505 __func__, __LINE__, err_dst_srng); 1506 return 0; 1507 } 1508 1509 hal_soc = soc->hal_soc; 1510 1511 qdf_assert(hal_soc); 1512 1513 if (qdf_unlikely(hal_srng_access_start(hal_soc, err_dst_srng))) { 1514 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1515 "%s %d : HAL Monitor Destination Ring Init \ 1516 Failed -- %pK", 1517 __func__, __LINE__, err_dst_srng); 1518 return 0; 1519 } 1520 1521 while (qdf_likely(quota-- && (rxdma_dst_ring_desc = 1522 hal_srng_dst_get_next(hal_soc, err_dst_srng)))) { 1523 1524 rx_bufs_used += dp_rx_err_mpdu_pop(soc, mac_id, 1525 rxdma_dst_ring_desc, 1526 &head, &tail); 1527 } 1528 1529 hal_srng_access_end(hal_soc, err_dst_srng); 1530 1531 if (rx_bufs_used) { 1532 dp_rxdma_srng = &pdev->rx_refill_buf_ring; 1533 rx_desc_pool = &soc->rx_desc_buf[mac_id]; 1534 1535 dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng, 1536 rx_desc_pool, rx_bufs_used, &head, &tail); 1537 1538 work_done += rx_bufs_used; 1539 } 1540 1541 return work_done; 1542 } 1543