1 /* 2 * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for 5 * any purpose with or without fee is hereby granted, provided that the 6 * above copyright notice and this permission notice appear in all 7 * copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 16 * PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 #include "hal_hw_headers.h" 20 #include "dp_types.h" 21 #include "dp_rx.h" 22 #include "dp_peer.h" 23 #include "dp_internal.h" 24 #include "hal_api.h" 25 #include "qdf_trace.h" 26 #include "qdf_nbuf.h" 27 #include "dp_rx_defrag.h" 28 #ifdef FEATURE_WDS 29 #include "dp_txrx_wds.h" 30 #endif 31 #include <enet.h> /* LLC_SNAP_HDR_LEN */ 32 #include "qdf_net_types.h" 33 34 /* Max buffer in invalid peer SG list*/ 35 #define DP_MAX_INVALID_BUFFERS 10 36 37 /** 38 * dp_rx_mcast_echo_check() - check if the mcast pkt is a loop 39 * back on same vap or a different vap. 40 * 41 * @soc: core DP main context 42 * @peer: dp peer handler 43 * @rx_tlv_hdr: start of the rx TLV header 44 * @nbuf: pkt buffer 45 * 46 * Return: bool (true if it is a looped back pkt else false) 47 * 48 */ 49 static inline bool dp_rx_mcast_echo_check(struct dp_soc *soc, 50 struct dp_peer *peer, 51 uint8_t *rx_tlv_hdr, 52 qdf_nbuf_t nbuf) 53 { 54 struct dp_vdev *vdev = peer->vdev; 55 struct dp_ast_entry *ase = NULL; 56 uint16_t sa_idx = 0; 57 uint8_t *data; 58 59 /* 60 * Multicast Echo Check is required only if vdev is STA and 61 * received pkt is a multicast/broadcast pkt. otherwise 62 * skip the MEC check. 63 */ 64 if (vdev->opmode != wlan_op_mode_sta) 65 return false; 66 67 if (!hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc, rx_tlv_hdr)) 68 return false; 69 70 data = qdf_nbuf_data(nbuf); 71 /* 72 * if the received pkts src mac addr matches with vdev 73 * mac address then drop the pkt as it is looped back 74 */ 75 if (!(qdf_mem_cmp(&data[QDF_MAC_ADDR_SIZE], 76 vdev->mac_addr.raw, 77 QDF_MAC_ADDR_SIZE))) 78 return true; 79 80 /* 81 * In case of qwrap isolation mode, donot drop loopback packets. 82 * In isolation mode, all packets from the wired stations need to go 83 * to rootap and loop back to reach the wireless stations and 84 * vice-versa. 85 */ 86 if (qdf_unlikely(vdev->isolation_vdev)) 87 return false; 88 89 /* if the received pkts src mac addr matches with the 90 * wired PCs MAC addr which is behind the STA or with 91 * wireless STAs MAC addr which are behind the Repeater, 92 * then drop the pkt as it is looped back 93 */ 94 qdf_spin_lock_bh(&soc->ast_lock); 95 if (hal_rx_msdu_end_sa_is_valid_get(soc->hal_soc, rx_tlv_hdr)) { 96 sa_idx = hal_rx_msdu_end_sa_idx_get(soc->hal_soc, rx_tlv_hdr); 97 98 if ((sa_idx < 0) || 99 (sa_idx >= wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx))) { 100 qdf_spin_unlock_bh(&soc->ast_lock); 101 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 102 "invalid sa_idx: %d", sa_idx); 103 qdf_assert_always(0); 104 } 105 106 ase = soc->ast_table[sa_idx]; 107 if (!ase) { 108 /* We do not get a peer map event for STA and without 109 * this event we don't know what is STA's sa_idx. 110 * For this reason the AST is still not associated to 111 * any index postion in ast_table. 112 * In these kind of scenarios where sa is valid but 113 * ast is not in ast_table, we use the below API to get 114 * AST entry for STA's own mac_address. 115 */ 116 ase = dp_peer_ast_list_find(soc, peer, 117 &data[QDF_MAC_ADDR_SIZE]); 118 if (ase) { 119 ase->ast_idx = sa_idx; 120 soc->ast_table[sa_idx] = ase; 121 ase->is_mapped = TRUE; 122 } 123 } 124 } else { 125 ase = dp_peer_ast_hash_find_by_pdevid(soc, 126 &data[QDF_MAC_ADDR_SIZE], 127 vdev->pdev->pdev_id); 128 } 129 130 if (ase) { 131 132 if (ase->pdev_id != vdev->pdev->pdev_id) { 133 qdf_spin_unlock_bh(&soc->ast_lock); 134 QDF_TRACE(QDF_MODULE_ID_DP, 135 QDF_TRACE_LEVEL_INFO, 136 "Detected DBDC Root AP %pM, %d %d", 137 &data[QDF_MAC_ADDR_SIZE], vdev->pdev->pdev_id, 138 ase->pdev_id); 139 return false; 140 } 141 142 if ((ase->type == CDP_TXRX_AST_TYPE_MEC) || 143 (ase->peer != peer)) { 144 qdf_spin_unlock_bh(&soc->ast_lock); 145 QDF_TRACE(QDF_MODULE_ID_DP, 146 QDF_TRACE_LEVEL_INFO, 147 "received pkt with same src mac %pM", 148 &data[QDF_MAC_ADDR_SIZE]); 149 150 return true; 151 } 152 } 153 qdf_spin_unlock_bh(&soc->ast_lock); 154 return false; 155 } 156 157 /** 158 * dp_rx_link_desc_return_by_addr - Return a MPDU link descriptor to 159 * (WBM) by address 160 * 161 * @soc: core DP main context 162 * @link_desc_addr: link descriptor addr 163 * 164 * Return: QDF_STATUS 165 */ 166 QDF_STATUS 167 dp_rx_link_desc_return_by_addr(struct dp_soc *soc, 168 hal_buff_addrinfo_t link_desc_addr, 169 uint8_t bm_action) 170 { 171 struct dp_srng *wbm_desc_rel_ring = &soc->wbm_desc_rel_ring; 172 hal_ring_handle_t wbm_rel_srng = wbm_desc_rel_ring->hal_srng; 173 hal_soc_handle_t hal_soc = soc->hal_soc; 174 QDF_STATUS status = QDF_STATUS_E_FAILURE; 175 void *src_srng_desc; 176 177 if (!wbm_rel_srng) { 178 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 179 "WBM RELEASE RING not initialized"); 180 return status; 181 } 182 183 if (qdf_unlikely(hal_srng_access_start(hal_soc, wbm_rel_srng))) { 184 185 /* TODO */ 186 /* 187 * Need API to convert from hal_ring pointer to 188 * Ring Type / Ring Id combo 189 */ 190 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 191 FL("HAL RING Access For WBM Release SRNG Failed - %pK"), 192 wbm_rel_srng); 193 DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1); 194 goto done; 195 } 196 src_srng_desc = hal_srng_src_get_next(hal_soc, wbm_rel_srng); 197 if (qdf_likely(src_srng_desc)) { 198 /* Return link descriptor through WBM ring (SW2WBM)*/ 199 hal_rx_msdu_link_desc_set(hal_soc, 200 src_srng_desc, link_desc_addr, bm_action); 201 status = QDF_STATUS_SUCCESS; 202 } else { 203 struct hal_srng *srng = (struct hal_srng *)wbm_rel_srng; 204 205 DP_STATS_INC(soc, rx.err.hal_ring_access_full_fail, 1); 206 207 dp_info_rl("WBM Release Ring (Id %d) Full(Fail CNT %u)", 208 srng->ring_id, 209 soc->stats.rx.err.hal_ring_access_full_fail); 210 dp_info_rl("HP 0x%x Reap HP 0x%x TP 0x%x Cached TP 0x%x", 211 *srng->u.src_ring.hp_addr, 212 srng->u.src_ring.reap_hp, 213 *srng->u.src_ring.tp_addr, 214 srng->u.src_ring.cached_tp); 215 QDF_BUG(0); 216 } 217 done: 218 hal_srng_access_end(hal_soc, wbm_rel_srng); 219 return status; 220 221 } 222 223 /** 224 * dp_rx_link_desc_return() - Return a MPDU link descriptor to HW 225 * (WBM), following error handling 226 * 227 * @soc: core DP main context 228 * @ring_desc: opaque pointer to the REO error ring descriptor 229 * 230 * Return: QDF_STATUS 231 */ 232 QDF_STATUS 233 dp_rx_link_desc_return(struct dp_soc *soc, hal_ring_desc_t ring_desc, 234 uint8_t bm_action) 235 { 236 void *buf_addr_info = HAL_RX_REO_BUF_ADDR_INFO_GET(ring_desc); 237 238 return dp_rx_link_desc_return_by_addr(soc, buf_addr_info, bm_action); 239 } 240 241 /** 242 * dp_rx_msdus_drop() - Drops all MSDU's per MPDU 243 * 244 * @soc: core txrx main context 245 * @ring_desc: opaque pointer to the REO error ring descriptor 246 * @mpdu_desc_info: MPDU descriptor information from ring descriptor 247 * @head: head of the local descriptor free-list 248 * @tail: tail of the local descriptor free-list 249 * @quota: No. of units (packets) that can be serviced in one shot. 250 * 251 * This function is used to drop all MSDU in an MPDU 252 * 253 * Return: uint32_t: No. of elements processed 254 */ 255 static uint32_t 256 dp_rx_msdus_drop(struct dp_soc *soc, hal_ring_desc_t ring_desc, 257 struct hal_rx_mpdu_desc_info *mpdu_desc_info, 258 uint8_t *mac_id, 259 uint32_t quota) 260 { 261 uint32_t rx_bufs_used = 0; 262 void *link_desc_va; 263 struct hal_buf_info buf_info; 264 struct dp_pdev *pdev; 265 struct hal_rx_msdu_list msdu_list; /* MSDU's per MPDU */ 266 int i; 267 uint8_t *rx_tlv_hdr; 268 uint32_t tid; 269 struct rx_desc_pool *rx_desc_pool; 270 271 hal_rx_reo_buf_paddr_get(ring_desc, &buf_info); 272 273 link_desc_va = dp_rx_cookie_2_link_desc_va(soc, &buf_info); 274 275 /* No UNMAP required -- this is "malloc_consistent" memory */ 276 hal_rx_msdu_list_get(soc->hal_soc, link_desc_va, &msdu_list, 277 &mpdu_desc_info->msdu_count); 278 279 for (i = 0; (i < mpdu_desc_info->msdu_count) && quota--; i++) { 280 struct dp_rx_desc *rx_desc = 281 dp_rx_cookie_2_va_rxdma_buf(soc, 282 msdu_list.sw_cookie[i]); 283 284 qdf_assert_always(rx_desc); 285 286 /* all buffers from a MSDU link link belong to same pdev */ 287 *mac_id = rx_desc->pool_id; 288 pdev = dp_get_pdev_for_lmac_id(soc, rx_desc->pool_id); 289 if (!pdev) { 290 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 291 "pdev is null for pool_id = %d", 292 rx_desc->pool_id); 293 return rx_bufs_used; 294 } 295 296 if (!dp_rx_desc_check_magic(rx_desc)) { 297 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 298 FL("Invalid rx_desc cookie=%d"), 299 msdu_list.sw_cookie[i]); 300 return rx_bufs_used; 301 } 302 303 rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id]; 304 qdf_nbuf_unmap_nbytes_single(soc->osdev, rx_desc->nbuf, 305 QDF_DMA_FROM_DEVICE, 306 rx_desc_pool->buf_size); 307 rx_desc->unmapped = 1; 308 309 rx_desc->rx_buf_start = qdf_nbuf_data(rx_desc->nbuf); 310 311 rx_bufs_used++; 312 tid = hal_rx_mpdu_start_tid_get(soc->hal_soc, 313 rx_desc->rx_buf_start); 314 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 315 "Packet received with PN error for tid :%d", tid); 316 317 rx_tlv_hdr = qdf_nbuf_data(rx_desc->nbuf); 318 if (hal_rx_encryption_info_valid(soc->hal_soc, rx_tlv_hdr)) 319 hal_rx_print_pn(soc->hal_soc, rx_tlv_hdr); 320 321 /* Just free the buffers */ 322 qdf_nbuf_free(rx_desc->nbuf); 323 324 dp_rx_add_to_free_desc_list(&pdev->free_list_head, 325 &pdev->free_list_tail, rx_desc); 326 } 327 328 /* Return link descriptor through WBM ring (SW2WBM)*/ 329 dp_rx_link_desc_return(soc, ring_desc, HAL_BM_ACTION_PUT_IN_IDLE_LIST); 330 331 return rx_bufs_used; 332 } 333 334 /** 335 * dp_rx_pn_error_handle() - Handles PN check errors 336 * 337 * @soc: core txrx main context 338 * @ring_desc: opaque pointer to the REO error ring descriptor 339 * @mpdu_desc_info: MPDU descriptor information from ring descriptor 340 * @head: head of the local descriptor free-list 341 * @tail: tail of the local descriptor free-list 342 * @quota: No. of units (packets) that can be serviced in one shot. 343 * 344 * This function implements PN error handling 345 * If the peer is configured to ignore the PN check errors 346 * or if DP feels, that this frame is still OK, the frame can be 347 * re-injected back to REO to use some of the other features 348 * of REO e.g. duplicate detection/routing to other cores 349 * 350 * Return: uint32_t: No. of elements processed 351 */ 352 static uint32_t 353 dp_rx_pn_error_handle(struct dp_soc *soc, hal_ring_desc_t ring_desc, 354 struct hal_rx_mpdu_desc_info *mpdu_desc_info, 355 uint8_t *mac_id, 356 uint32_t quota) 357 { 358 uint16_t peer_id; 359 uint32_t rx_bufs_used = 0; 360 struct dp_peer *peer; 361 bool peer_pn_policy = false; 362 363 peer_id = DP_PEER_METADATA_PEER_ID_GET( 364 mpdu_desc_info->peer_meta_data); 365 366 367 peer = dp_peer_find_by_id(soc, peer_id); 368 369 if (qdf_likely(peer)) { 370 /* 371 * TODO: Check for peer specific policies & set peer_pn_policy 372 */ 373 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 374 "discard rx due to PN error for peer %pK %pM", 375 peer, peer->mac_addr.raw); 376 377 dp_peer_unref_del_find_by_id(peer); 378 } 379 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 380 "Packet received with PN error"); 381 382 /* No peer PN policy -- definitely drop */ 383 if (!peer_pn_policy) 384 rx_bufs_used = dp_rx_msdus_drop(soc, ring_desc, 385 mpdu_desc_info, 386 mac_id, quota); 387 388 return rx_bufs_used; 389 } 390 391 /** 392 * dp_rx_oor_handle() - Handles the msdu which is OOR error 393 * 394 * @soc: core txrx main context 395 * @nbuf: pointer to msdu skb 396 * @peer_id: dp peer ID 397 * @rx_tlv_hdr: start of rx tlv header 398 * 399 * This function process the msdu delivered from REO2TCL 400 * ring with error type OOR 401 * 402 * Return: None 403 */ 404 static void 405 dp_rx_oor_handle(struct dp_soc *soc, 406 qdf_nbuf_t nbuf, 407 uint16_t peer_id, 408 uint8_t *rx_tlv_hdr) 409 { 410 uint32_t frame_mask = FRAME_MASK_IPV4_ARP | FRAME_MASK_IPV4_DHCP | 411 FRAME_MASK_IPV4_EAPOL | FRAME_MASK_IPV6_DHCP; 412 struct dp_peer *peer = NULL; 413 414 peer = dp_peer_find_by_id(soc, peer_id); 415 if (!peer) { 416 dp_info_rl("peer not found"); 417 goto free_nbuf; 418 } 419 420 if (dp_rx_deliver_special_frame(soc, peer, nbuf, frame_mask, 421 rx_tlv_hdr)) { 422 DP_STATS_INC(soc, rx.err.reo_err_oor_to_stack, 1); 423 dp_peer_unref_del_find_by_id(peer); 424 return; 425 } 426 427 free_nbuf: 428 if (peer) 429 dp_peer_unref_del_find_by_id(peer); 430 431 DP_STATS_INC(soc, rx.err.reo_err_oor_drop, 1); 432 qdf_nbuf_free(nbuf); 433 } 434 435 /** 436 * dp_rx_reo_err_entry_process() - Handles for REO error entry processing 437 * 438 * @soc: core txrx main context 439 * @ring_desc: opaque pointer to the REO error ring descriptor 440 * @mpdu_desc_info: pointer to mpdu level description info 441 * @link_desc_va: pointer to msdu_link_desc virtual address 442 * @err_code: reo erro code fetched from ring entry 443 * 444 * Function to handle msdus fetched from msdu link desc, currently 445 * only support 2K jump, OOR error. 446 * 447 * Return: msdu count processed. 448 */ 449 static uint32_t 450 dp_rx_reo_err_entry_process(struct dp_soc *soc, 451 void *ring_desc, 452 struct hal_rx_mpdu_desc_info *mpdu_desc_info, 453 void *link_desc_va, 454 enum hal_reo_error_code err_code) 455 { 456 uint32_t rx_bufs_used = 0; 457 struct dp_pdev *pdev; 458 int i; 459 uint8_t *rx_tlv_hdr_first; 460 uint8_t *rx_tlv_hdr_last; 461 uint32_t tid = DP_MAX_TIDS; 462 uint16_t peer_id; 463 struct dp_rx_desc *rx_desc; 464 struct rx_desc_pool *rx_desc_pool; 465 qdf_nbuf_t nbuf; 466 struct hal_buf_info buf_info; 467 struct hal_rx_msdu_list msdu_list; 468 uint16_t num_msdus; 469 struct buffer_addr_info cur_link_desc_addr_info = { 0 }; 470 struct buffer_addr_info next_link_desc_addr_info = { 0 }; 471 /* First field in REO Dst ring Desc is buffer_addr_info */ 472 void *buf_addr_info = ring_desc; 473 qdf_nbuf_t head_nbuf = NULL; 474 qdf_nbuf_t tail_nbuf = NULL; 475 uint16_t msdu_processed = 0; 476 477 peer_id = DP_PEER_METADATA_PEER_ID_GET( 478 mpdu_desc_info->peer_meta_data); 479 480 more_msdu_link_desc: 481 hal_rx_msdu_list_get(soc->hal_soc, link_desc_va, &msdu_list, 482 &num_msdus); 483 for (i = 0; i < num_msdus; i++) { 484 rx_desc = dp_rx_cookie_2_va_rxdma_buf( 485 soc, 486 msdu_list.sw_cookie[i]); 487 488 qdf_assert_always(rx_desc); 489 490 /* all buffers from a MSDU link belong to same pdev */ 491 pdev = dp_get_pdev_for_lmac_id(soc, rx_desc->pool_id); 492 493 nbuf = rx_desc->nbuf; 494 rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id]; 495 qdf_nbuf_unmap_nbytes_single(soc->osdev, nbuf, 496 QDF_DMA_FROM_DEVICE, 497 rx_desc_pool->buf_size); 498 rx_desc->unmapped = 1; 499 500 QDF_NBUF_CB_RX_PKT_LEN(nbuf) = msdu_list.msdu_info[i].msdu_len; 501 rx_bufs_used++; 502 dp_rx_add_to_free_desc_list(&pdev->free_list_head, 503 &pdev->free_list_tail, rx_desc); 504 505 DP_RX_LIST_APPEND(head_nbuf, tail_nbuf, nbuf); 506 507 if (qdf_unlikely(msdu_list.msdu_info[i].msdu_flags & 508 HAL_MSDU_F_MSDU_CONTINUATION)) 509 continue; 510 511 rx_tlv_hdr_first = qdf_nbuf_data(head_nbuf); 512 rx_tlv_hdr_last = qdf_nbuf_data(tail_nbuf); 513 514 if (qdf_unlikely(head_nbuf != tail_nbuf)) { 515 nbuf = dp_rx_sg_create(head_nbuf); 516 qdf_nbuf_set_is_frag(nbuf, 1); 517 DP_STATS_INC(soc, rx.err.reo_err_oor_sg_count, 1); 518 } 519 520 switch (err_code) { 521 case HAL_REO_ERR_REGULAR_FRAME_2K_JUMP: 522 /* 523 * only first msdu, mpdu start description tlv valid? 524 * and use it for following msdu. 525 */ 526 if (hal_rx_msdu_end_first_msdu_get(soc->hal_soc, 527 rx_tlv_hdr_last)) 528 tid = hal_rx_mpdu_start_tid_get(soc->hal_soc, 529 rx_tlv_hdr_first); 530 531 dp_2k_jump_handle(soc, nbuf, rx_tlv_hdr_last, 532 peer_id, tid); 533 break; 534 535 case HAL_REO_ERR_REGULAR_FRAME_OOR: 536 dp_rx_oor_handle(soc, nbuf, peer_id, rx_tlv_hdr_last); 537 break; 538 default: 539 dp_err_rl("Non-support error code %d", err_code); 540 qdf_nbuf_free(nbuf); 541 } 542 543 msdu_processed++; 544 head_nbuf = NULL; 545 tail_nbuf = NULL; 546 } 547 548 if (msdu_processed < mpdu_desc_info->msdu_count) { 549 hal_rx_get_next_msdu_link_desc_buf_addr_info( 550 link_desc_va, 551 &next_link_desc_addr_info); 552 553 if (hal_rx_is_buf_addr_info_valid( 554 &next_link_desc_addr_info)) { 555 dp_rx_link_desc_return_by_addr( 556 soc, 557 buf_addr_info, 558 HAL_BM_ACTION_PUT_IN_IDLE_LIST); 559 560 hal_rx_buffer_addr_info_get_paddr( 561 &next_link_desc_addr_info, 562 &buf_info); 563 link_desc_va = 564 dp_rx_cookie_2_link_desc_va(soc, &buf_info); 565 cur_link_desc_addr_info = next_link_desc_addr_info; 566 buf_addr_info = &cur_link_desc_addr_info; 567 568 goto more_msdu_link_desc; 569 } 570 } 571 572 dp_rx_link_desc_return_by_addr(soc, buf_addr_info, 573 HAL_BM_ACTION_PUT_IN_IDLE_LIST); 574 QDF_BUG(msdu_processed == mpdu_desc_info->msdu_count); 575 576 return rx_bufs_used; 577 } 578 579 #ifdef DP_INVALID_PEER_ASSERT 580 #define DP_PDEV_INVALID_PEER_MSDU_CHECK(head, tail) \ 581 do { \ 582 qdf_assert_always(!(head)); \ 583 qdf_assert_always(!(tail)); \ 584 } while (0) 585 #else 586 #define DP_PDEV_INVALID_PEER_MSDU_CHECK(head, tail) /* no op */ 587 #endif 588 589 /** 590 * dp_rx_chain_msdus() - Function to chain all msdus of a mpdu 591 * to pdev invalid peer list 592 * 593 * @soc: core DP main context 594 * @nbuf: Buffer pointer 595 * @rx_tlv_hdr: start of rx tlv header 596 * @mac_id: mac id 597 * 598 * Return: bool: true for last msdu of mpdu 599 */ 600 static bool 601 dp_rx_chain_msdus(struct dp_soc *soc, qdf_nbuf_t nbuf, 602 uint8_t *rx_tlv_hdr, uint8_t mac_id) 603 { 604 bool mpdu_done = false; 605 qdf_nbuf_t curr_nbuf = NULL; 606 qdf_nbuf_t tmp_nbuf = NULL; 607 608 /* TODO: Currently only single radio is supported, hence 609 * pdev hard coded to '0' index 610 */ 611 struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id); 612 613 if (!dp_pdev) { 614 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 615 "pdev is null for mac_id = %d", mac_id); 616 return mpdu_done; 617 } 618 /* if invalid peer SG list has max values free the buffers in list 619 * and treat current buffer as start of list 620 * 621 * current logic to detect the last buffer from attn_tlv is not reliable 622 * in OFDMA UL scenario hence add max buffers check to avoid list pile 623 * up 624 */ 625 if (!dp_pdev->first_nbuf || 626 (dp_pdev->invalid_peer_head_msdu && 627 QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST 628 (dp_pdev->invalid_peer_head_msdu) >= DP_MAX_INVALID_BUFFERS)) { 629 qdf_nbuf_set_rx_chfrag_start(nbuf, 1); 630 dp_pdev->ppdu_id = hal_rx_get_ppdu_id(soc->hal_soc, 631 rx_tlv_hdr); 632 dp_pdev->first_nbuf = true; 633 634 /* If the new nbuf received is the first msdu of the 635 * amsdu and there are msdus in the invalid peer msdu 636 * list, then let us free all the msdus of the invalid 637 * peer msdu list. 638 * This scenario can happen when we start receiving 639 * new a-msdu even before the previous a-msdu is completely 640 * received. 641 */ 642 curr_nbuf = dp_pdev->invalid_peer_head_msdu; 643 while (curr_nbuf) { 644 tmp_nbuf = curr_nbuf->next; 645 qdf_nbuf_free(curr_nbuf); 646 curr_nbuf = tmp_nbuf; 647 } 648 649 dp_pdev->invalid_peer_head_msdu = NULL; 650 dp_pdev->invalid_peer_tail_msdu = NULL; 651 hal_rx_mon_hw_desc_get_mpdu_status(soc->hal_soc, rx_tlv_hdr, 652 &(dp_pdev->ppdu_info.rx_status)); 653 654 } 655 656 if (dp_pdev->ppdu_id == hal_rx_attn_phy_ppdu_id_get(rx_tlv_hdr) && 657 hal_rx_attn_msdu_done_get(rx_tlv_hdr)) { 658 qdf_nbuf_set_rx_chfrag_end(nbuf, 1); 659 qdf_assert_always(dp_pdev->first_nbuf == true); 660 dp_pdev->first_nbuf = false; 661 mpdu_done = true; 662 } 663 664 /* 665 * For MCL, invalid_peer_head_msdu and invalid_peer_tail_msdu 666 * should be NULL here, add the checking for debugging purpose 667 * in case some corner case. 668 */ 669 DP_PDEV_INVALID_PEER_MSDU_CHECK(dp_pdev->invalid_peer_head_msdu, 670 dp_pdev->invalid_peer_tail_msdu); 671 DP_RX_LIST_APPEND(dp_pdev->invalid_peer_head_msdu, 672 dp_pdev->invalid_peer_tail_msdu, 673 nbuf); 674 675 return mpdu_done; 676 } 677 678 static 679 void dp_rx_wbm_err_handle_bar(struct dp_soc *soc, 680 struct dp_peer *peer, 681 qdf_nbuf_t nbuf) 682 { 683 uint8_t *rx_tlv_hdr; 684 unsigned char type, subtype; 685 uint16_t start_seq_num; 686 uint32_t tid; 687 struct ieee80211_frame_bar *bar; 688 689 /* 690 * 1. Is this a BAR frame. If not Discard it. 691 * 2. If it is, get the peer id, tid, ssn 692 * 2a Do a tid update 693 */ 694 695 rx_tlv_hdr = qdf_nbuf_data(nbuf); 696 bar = (struct ieee80211_frame_bar *)(rx_tlv_hdr + SIZE_OF_DATA_RX_TLV); 697 698 type = bar->i_fc[0] & IEEE80211_FC0_TYPE_MASK; 699 subtype = bar->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 700 701 if (!(type == IEEE80211_FC0_TYPE_CTL && 702 subtype == QDF_IEEE80211_FC0_SUBTYPE_BAR)) { 703 dp_err_rl("Not a BAR frame!"); 704 return; 705 } 706 707 tid = hal_rx_mpdu_start_tid_get(soc->hal_soc, rx_tlv_hdr); 708 qdf_assert_always(tid < DP_MAX_TIDS); 709 710 start_seq_num = le16toh(bar->i_seq) >> IEEE80211_SEQ_SEQ_SHIFT; 711 712 dp_info_rl("tid %u window_size %u start_seq_num %u", 713 tid, peer->rx_tid[tid].ba_win_size, start_seq_num); 714 715 dp_rx_tid_update_wifi3(peer, tid, 716 peer->rx_tid[tid].ba_win_size, 717 start_seq_num); 718 } 719 720 /** 721 * dp_2k_jump_handle() - Function to handle 2k jump exception 722 * on WBM ring 723 * 724 * @soc: core DP main context 725 * @nbuf: buffer pointer 726 * @rx_tlv_hdr: start of rx tlv header 727 * @peer_id: peer id of first msdu 728 * @tid: Tid for which exception occurred 729 * 730 * This function handles 2k jump violations arising out 731 * of receiving aggregates in non BA case. This typically 732 * may happen if aggregates are received on a QOS enabled TID 733 * while Rx window size is still initialized to value of 2. Or 734 * it may also happen if negotiated window size is 1 but peer 735 * sends aggregates. 736 * 737 */ 738 739 void 740 dp_2k_jump_handle(struct dp_soc *soc, 741 qdf_nbuf_t nbuf, 742 uint8_t *rx_tlv_hdr, 743 uint16_t peer_id, 744 uint8_t tid) 745 { 746 struct dp_peer *peer = NULL; 747 struct dp_rx_tid *rx_tid = NULL; 748 uint32_t frame_mask = FRAME_MASK_IPV4_ARP; 749 750 peer = dp_peer_find_by_id(soc, peer_id); 751 if (!peer) { 752 dp_info_rl("peer not found"); 753 goto free_nbuf; 754 } 755 756 if (tid >= DP_MAX_TIDS) { 757 dp_info_rl("invalid tid"); 758 goto nbuf_deliver; 759 } 760 761 rx_tid = &peer->rx_tid[tid]; 762 qdf_spin_lock_bh(&rx_tid->tid_lock); 763 764 /* only if BA session is active, allow send Delba */ 765 if (rx_tid->ba_status != DP_RX_BA_ACTIVE) { 766 qdf_spin_unlock_bh(&rx_tid->tid_lock); 767 goto nbuf_deliver; 768 } 769 770 if (!rx_tid->delba_tx_status) { 771 rx_tid->delba_tx_retry++; 772 rx_tid->delba_tx_status = 1; 773 rx_tid->delba_rcode = 774 IEEE80211_REASON_QOS_SETUP_REQUIRED; 775 qdf_spin_unlock_bh(&rx_tid->tid_lock); 776 if (soc->cdp_soc.ol_ops->send_delba) { 777 DP_STATS_INC(soc, rx.err.rx_2k_jump_delba_sent, 1); 778 soc->cdp_soc.ol_ops->send_delba( 779 peer->vdev->pdev->soc->ctrl_psoc, 780 peer->vdev->vdev_id, 781 peer->mac_addr.raw, 782 tid, 783 rx_tid->delba_rcode); 784 } 785 } else { 786 qdf_spin_unlock_bh(&rx_tid->tid_lock); 787 } 788 789 nbuf_deliver: 790 if (dp_rx_deliver_special_frame(soc, peer, nbuf, frame_mask, 791 rx_tlv_hdr)) { 792 DP_STATS_INC(soc, rx.err.rx_2k_jump_to_stack, 1); 793 dp_peer_unref_del_find_by_id(peer); 794 return; 795 } 796 797 free_nbuf: 798 if (peer) 799 dp_peer_unref_del_find_by_id(peer); 800 801 DP_STATS_INC(soc, rx.err.rx_2k_jump_drop, 1); 802 qdf_nbuf_free(nbuf); 803 } 804 805 #if defined(QCA_WIFI_QCA6390) || defined(QCA_WIFI_QCA6490) || \ 806 defined(QCA_WIFI_QCA6750) 807 /** 808 * dp_rx_null_q_handle_invalid_peer_id_exception() - to find exception 809 * @soc: pointer to dp_soc struct 810 * @pool_id: Pool id to find dp_pdev 811 * @rx_tlv_hdr: TLV header of received packet 812 * @nbuf: SKB 813 * 814 * In certain types of packets if peer_id is not correct then 815 * driver may not be able find. Try finding peer by addr_2 of 816 * received MPDU. If you find the peer then most likely sw_peer_id & 817 * ast_idx is corrupted. 818 * 819 * Return: True if you find the peer by addr_2 of received MPDU else false 820 */ 821 static bool 822 dp_rx_null_q_handle_invalid_peer_id_exception(struct dp_soc *soc, 823 uint8_t pool_id, 824 uint8_t *rx_tlv_hdr, 825 qdf_nbuf_t nbuf) 826 { 827 struct dp_peer *peer = NULL; 828 uint8_t *rx_pkt_hdr = hal_rx_pkt_hdr_get(rx_tlv_hdr); 829 struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, pool_id); 830 struct ieee80211_frame *wh = (struct ieee80211_frame *)rx_pkt_hdr; 831 832 if (!pdev) { 833 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 834 "pdev is null for pool_id = %d", pool_id); 835 return false; 836 } 837 /* 838 * WAR- In certain types of packets if peer_id is not correct then 839 * driver may not be able find. Try finding peer by addr_2 of 840 * received MPDU 841 */ 842 if (wh) 843 peer = dp_find_peer_by_addr((struct cdp_pdev *)pdev, 844 wh->i_addr2); 845 if (peer) { 846 dp_verbose_debug("MPDU sw_peer_id & ast_idx is corrupted"); 847 hal_rx_dump_pkt_tlvs(soc->hal_soc, rx_tlv_hdr, 848 QDF_TRACE_LEVEL_DEBUG); 849 DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer_id, 850 1, qdf_nbuf_len(nbuf)); 851 qdf_nbuf_free(nbuf); 852 853 return true; 854 } 855 return false; 856 } 857 858 /** 859 * dp_rx_check_pkt_len() - Check for pktlen validity 860 * @soc: DP SOC context 861 * @pkt_len: computed length of the pkt from caller in bytes 862 * 863 * Return: true if pktlen > RX_BUFFER_SIZE, else return false 864 * 865 */ 866 static inline 867 bool dp_rx_check_pkt_len(struct dp_soc *soc, uint32_t pkt_len) 868 { 869 if (qdf_unlikely(pkt_len > RX_DATA_BUFFER_SIZE)) { 870 DP_STATS_INC_PKT(soc, rx.err.rx_invalid_pkt_len, 871 1, pkt_len); 872 return true; 873 } else { 874 return false; 875 } 876 } 877 878 #else 879 static inline bool 880 dp_rx_null_q_handle_invalid_peer_id_exception(struct dp_soc *soc, 881 uint8_t pool_id, 882 uint8_t *rx_tlv_hdr, 883 qdf_nbuf_t nbuf) 884 { 885 return false; 886 } 887 888 static inline 889 bool dp_rx_check_pkt_len(struct dp_soc *soc, uint32_t pkt_len) 890 { 891 return false; 892 } 893 894 #endif 895 896 /** 897 * dp_rx_null_q_desc_handle() - Function to handle NULL Queue 898 * descriptor violation on either a 899 * REO or WBM ring 900 * 901 * @soc: core DP main context 902 * @nbuf: buffer pointer 903 * @rx_tlv_hdr: start of rx tlv header 904 * @pool_id: mac id 905 * @peer: peer handle 906 * 907 * This function handles NULL queue descriptor violations arising out 908 * a missing REO queue for a given peer or a given TID. This typically 909 * may happen if a packet is received on a QOS enabled TID before the 910 * ADDBA negotiation for that TID, when the TID queue is setup. Or 911 * it may also happen for MC/BC frames if they are not routed to the 912 * non-QOS TID queue, in the absence of any other default TID queue. 913 * This error can show up both in a REO destination or WBM release ring. 914 * 915 * Return: QDF_STATUS_SUCCESS, if nbuf handled successfully. QDF status code 916 * if nbuf could not be handled or dropped. 917 */ 918 static QDF_STATUS 919 dp_rx_null_q_desc_handle(struct dp_soc *soc, qdf_nbuf_t nbuf, 920 uint8_t *rx_tlv_hdr, uint8_t pool_id, 921 struct dp_peer *peer) 922 { 923 uint32_t pkt_len; 924 uint16_t msdu_len; 925 struct dp_vdev *vdev; 926 uint8_t tid; 927 qdf_ether_header_t *eh; 928 struct hal_rx_msdu_metadata msdu_metadata; 929 uint16_t sa_idx = 0; 930 931 qdf_nbuf_set_rx_chfrag_start(nbuf, 932 hal_rx_msdu_end_first_msdu_get(soc->hal_soc, 933 rx_tlv_hdr)); 934 qdf_nbuf_set_rx_chfrag_end(nbuf, 935 hal_rx_msdu_end_last_msdu_get(soc->hal_soc, 936 rx_tlv_hdr)); 937 qdf_nbuf_set_da_mcbc(nbuf, hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc, 938 rx_tlv_hdr)); 939 qdf_nbuf_set_da_valid(nbuf, 940 hal_rx_msdu_end_da_is_valid_get(soc->hal_soc, 941 rx_tlv_hdr)); 942 qdf_nbuf_set_sa_valid(nbuf, 943 hal_rx_msdu_end_sa_is_valid_get(soc->hal_soc, 944 rx_tlv_hdr)); 945 946 hal_rx_msdu_metadata_get(soc->hal_soc, rx_tlv_hdr, &msdu_metadata); 947 msdu_len = hal_rx_msdu_start_msdu_len_get(rx_tlv_hdr); 948 pkt_len = msdu_len + msdu_metadata.l3_hdr_pad + RX_PKT_TLVS_LEN; 949 950 if (qdf_likely(!qdf_nbuf_is_frag(nbuf))) { 951 if (dp_rx_check_pkt_len(soc, pkt_len)) 952 goto drop_nbuf; 953 954 /* Set length in nbuf */ 955 qdf_nbuf_set_pktlen( 956 nbuf, qdf_min(pkt_len, (uint32_t)RX_DATA_BUFFER_SIZE)); 957 qdf_assert_always(nbuf->data == rx_tlv_hdr); 958 } 959 960 /* 961 * Check if DMA completed -- msdu_done is the last bit 962 * to be written 963 */ 964 if (!hal_rx_attn_msdu_done_get(rx_tlv_hdr)) { 965 966 dp_err_rl("MSDU DONE failure"); 967 hal_rx_dump_pkt_tlvs(soc->hal_soc, rx_tlv_hdr, 968 QDF_TRACE_LEVEL_INFO); 969 qdf_assert(0); 970 } 971 972 if (!peer && 973 dp_rx_null_q_handle_invalid_peer_id_exception(soc, pool_id, 974 rx_tlv_hdr, nbuf)) 975 return QDF_STATUS_E_FAILURE; 976 977 if (!peer) { 978 bool mpdu_done = false; 979 struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, pool_id); 980 981 if (!pdev) { 982 dp_err_rl("pdev is null for pool_id = %d", pool_id); 983 return QDF_STATUS_E_FAILURE; 984 } 985 986 dp_err_rl("peer is NULL"); 987 DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1, 988 qdf_nbuf_len(nbuf)); 989 990 /* QCN9000 has the support enabled */ 991 if (qdf_unlikely(soc->wbm_release_desc_rx_sg_support)) { 992 mpdu_done = true; 993 nbuf->next = NULL; 994 /* Trigger invalid peer handler wrapper */ 995 dp_rx_process_invalid_peer_wrapper(soc, 996 nbuf, mpdu_done, pool_id); 997 } else { 998 mpdu_done = dp_rx_chain_msdus(soc, nbuf, rx_tlv_hdr, pool_id); 999 /* Trigger invalid peer handler wrapper */ 1000 dp_rx_process_invalid_peer_wrapper(soc, 1001 pdev->invalid_peer_head_msdu, 1002 mpdu_done, pool_id); 1003 } 1004 1005 if (mpdu_done) { 1006 pdev->invalid_peer_head_msdu = NULL; 1007 pdev->invalid_peer_tail_msdu = NULL; 1008 } 1009 1010 return QDF_STATUS_E_FAILURE; 1011 } 1012 1013 vdev = peer->vdev; 1014 if (!vdev) { 1015 dp_err_rl("Null vdev!"); 1016 DP_STATS_INC(soc, rx.err.invalid_vdev, 1); 1017 goto drop_nbuf; 1018 } 1019 1020 /* 1021 * Advance the packet start pointer by total size of 1022 * pre-header TLV's 1023 */ 1024 if (qdf_nbuf_is_frag(nbuf)) 1025 qdf_nbuf_pull_head(nbuf, RX_PKT_TLVS_LEN); 1026 else 1027 qdf_nbuf_pull_head(nbuf, (msdu_metadata.l3_hdr_pad + 1028 RX_PKT_TLVS_LEN)); 1029 1030 dp_vdev_peer_stats_update_protocol_cnt(vdev, nbuf, NULL, 0, 1); 1031 1032 if (hal_rx_msdu_end_sa_is_valid_get(soc->hal_soc, rx_tlv_hdr)) { 1033 sa_idx = hal_rx_msdu_end_sa_idx_get(soc->hal_soc, rx_tlv_hdr); 1034 1035 if ((sa_idx < 0) || 1036 (sa_idx >= wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx))) { 1037 DP_STATS_INC(soc, rx.err.invalid_sa_da_idx, 1); 1038 goto drop_nbuf; 1039 } 1040 } 1041 1042 if (dp_rx_mcast_echo_check(soc, peer, rx_tlv_hdr, nbuf)) { 1043 /* this is a looped back MCBC pkt, drop it */ 1044 DP_STATS_INC_PKT(peer, rx.mec_drop, 1, qdf_nbuf_len(nbuf)); 1045 goto drop_nbuf; 1046 } 1047 1048 /* 1049 * In qwrap mode if the received packet matches with any of the vdev 1050 * mac addresses, drop it. Donot receive multicast packets originated 1051 * from any proxysta. 1052 */ 1053 if (check_qwrap_multicast_loopback(vdev, nbuf)) { 1054 DP_STATS_INC_PKT(peer, rx.mec_drop, 1, qdf_nbuf_len(nbuf)); 1055 goto drop_nbuf; 1056 } 1057 1058 1059 if (qdf_unlikely((peer->nawds_enabled == true) && 1060 hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc, 1061 rx_tlv_hdr))) { 1062 dp_err_rl("free buffer for multicast packet"); 1063 DP_STATS_INC(peer, rx.nawds_mcast_drop, 1); 1064 goto drop_nbuf; 1065 } 1066 1067 if (!dp_wds_rx_policy_check(rx_tlv_hdr, vdev, peer)) { 1068 dp_err_rl("mcast Policy Check Drop pkt"); 1069 goto drop_nbuf; 1070 } 1071 /* WDS Source Port Learning */ 1072 if (qdf_likely(vdev->rx_decap_type == htt_cmn_pkt_type_ethernet && 1073 vdev->wds_enabled)) 1074 dp_rx_wds_srcport_learn(soc, rx_tlv_hdr, peer, nbuf, 1075 msdu_metadata); 1076 1077 if (hal_rx_is_unicast(soc->hal_soc, rx_tlv_hdr)) { 1078 tid = hal_rx_tid_get(soc->hal_soc, rx_tlv_hdr); 1079 if (!peer->rx_tid[tid].hw_qdesc_vaddr_unaligned) 1080 dp_rx_tid_setup_wifi3(peer, tid, 1, IEEE80211_SEQ_MAX); 1081 /* IEEE80211_SEQ_MAX indicates invalid start_seq */ 1082 } 1083 1084 if (qdf_unlikely(vdev->rx_decap_type == htt_cmn_pkt_type_raw)) { 1085 qdf_nbuf_set_next(nbuf, NULL); 1086 dp_rx_deliver_raw(vdev, nbuf, peer); 1087 } else { 1088 qdf_nbuf_set_next(nbuf, NULL); 1089 DP_STATS_INC_PKT(peer, rx.to_stack, 1, 1090 qdf_nbuf_len(nbuf)); 1091 1092 /* 1093 * Update the protocol tag in SKB based on 1094 * CCE metadata 1095 */ 1096 dp_rx_update_protocol_tag(soc, vdev, nbuf, rx_tlv_hdr, 1097 EXCEPTION_DEST_RING_ID, 1098 true, true); 1099 1100 /* Update the flow tag in SKB based on FSE metadata */ 1101 dp_rx_update_flow_tag(soc, vdev, nbuf, 1102 rx_tlv_hdr, true); 1103 1104 if (qdf_unlikely(hal_rx_msdu_end_da_is_mcbc_get( 1105 soc->hal_soc, rx_tlv_hdr) && 1106 (vdev->rx_decap_type == 1107 htt_cmn_pkt_type_ethernet))) { 1108 eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf); 1109 DP_STATS_INC_PKT(peer, rx.multicast, 1, 1110 qdf_nbuf_len(nbuf)); 1111 1112 if (QDF_IS_ADDR_BROADCAST(eh->ether_dhost)) 1113 DP_STATS_INC_PKT(peer, rx.bcast, 1, 1114 qdf_nbuf_len(nbuf)); 1115 } 1116 1117 qdf_nbuf_set_exc_frame(nbuf, 1); 1118 dp_rx_deliver_to_stack(soc, vdev, peer, nbuf, NULL); 1119 } 1120 return QDF_STATUS_SUCCESS; 1121 1122 drop_nbuf: 1123 qdf_nbuf_free(nbuf); 1124 return QDF_STATUS_E_FAILURE; 1125 } 1126 1127 /** 1128 * dp_rx_process_rxdma_err() - Function to deliver rxdma unencrypted_err 1129 * frames to OS or wifi parse errors. 1130 * @soc: core DP main context 1131 * @nbuf: buffer pointer 1132 * @rx_tlv_hdr: start of rx tlv header 1133 * @peer: peer reference 1134 * @err_code: rxdma err code 1135 * @mac_id: mac_id which is one of 3 mac_ids(Assuming mac_id and 1136 * pool_id has same mapping) 1137 * 1138 * Return: None 1139 */ 1140 void 1141 dp_rx_process_rxdma_err(struct dp_soc *soc, qdf_nbuf_t nbuf, 1142 uint8_t *rx_tlv_hdr, struct dp_peer *peer, 1143 uint8_t err_code, uint8_t mac_id) 1144 { 1145 uint32_t pkt_len, l2_hdr_offset; 1146 uint16_t msdu_len; 1147 struct dp_vdev *vdev; 1148 qdf_ether_header_t *eh; 1149 bool is_broadcast; 1150 1151 /* 1152 * Check if DMA completed -- msdu_done is the last bit 1153 * to be written 1154 */ 1155 if (!hal_rx_attn_msdu_done_get(rx_tlv_hdr)) { 1156 1157 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1158 FL("MSDU DONE failure")); 1159 1160 hal_rx_dump_pkt_tlvs(soc->hal_soc, rx_tlv_hdr, 1161 QDF_TRACE_LEVEL_INFO); 1162 qdf_assert(0); 1163 } 1164 1165 l2_hdr_offset = hal_rx_msdu_end_l3_hdr_padding_get(soc->hal_soc, 1166 rx_tlv_hdr); 1167 msdu_len = hal_rx_msdu_start_msdu_len_get(rx_tlv_hdr); 1168 pkt_len = msdu_len + l2_hdr_offset + RX_PKT_TLVS_LEN; 1169 1170 if (dp_rx_check_pkt_len(soc, pkt_len)) { 1171 /* Drop & free packet */ 1172 qdf_nbuf_free(nbuf); 1173 return; 1174 } 1175 /* Set length in nbuf */ 1176 qdf_nbuf_set_pktlen(nbuf, pkt_len); 1177 1178 qdf_nbuf_set_next(nbuf, NULL); 1179 1180 qdf_nbuf_set_rx_chfrag_start(nbuf, 1); 1181 qdf_nbuf_set_rx_chfrag_end(nbuf, 1); 1182 1183 if (!peer) { 1184 QDF_TRACE_ERROR_RL(QDF_MODULE_ID_DP, "peer is NULL"); 1185 DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1, 1186 qdf_nbuf_len(nbuf)); 1187 /* Trigger invalid peer handler wrapper */ 1188 dp_rx_process_invalid_peer_wrapper(soc, nbuf, true, mac_id); 1189 return; 1190 } 1191 1192 vdev = peer->vdev; 1193 if (!vdev) { 1194 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1195 FL("INVALID vdev %pK OR osif_rx"), vdev); 1196 /* Drop & free packet */ 1197 qdf_nbuf_free(nbuf); 1198 DP_STATS_INC(soc, rx.err.invalid_vdev, 1); 1199 return; 1200 } 1201 1202 /* 1203 * Advance the packet start pointer by total size of 1204 * pre-header TLV's 1205 */ 1206 dp_rx_skip_tlvs(nbuf, l2_hdr_offset); 1207 1208 if (err_code == HAL_RXDMA_ERR_WIFI_PARSE) { 1209 uint8_t *pkt_type; 1210 1211 pkt_type = qdf_nbuf_data(nbuf) + (2 * QDF_MAC_ADDR_SIZE); 1212 if (*(uint16_t *)pkt_type == htons(QDF_ETH_TYPE_8021Q)) { 1213 if (*(uint16_t *)(pkt_type + DP_SKIP_VLAN) == 1214 htons(QDF_LLC_STP)) { 1215 DP_STATS_INC(vdev->pdev, vlan_tag_stp_cnt, 1); 1216 goto process_mesh; 1217 } else { 1218 goto process_rx; 1219 } 1220 } 1221 } 1222 if (vdev->rx_decap_type == htt_cmn_pkt_type_raw) 1223 goto process_mesh; 1224 1225 /* 1226 * WAPI cert AP sends rekey frames as unencrypted. 1227 * Thus RXDMA will report unencrypted frame error. 1228 * To pass WAPI cert case, SW needs to pass unencrypted 1229 * rekey frame to stack. 1230 */ 1231 if (qdf_nbuf_is_ipv4_wapi_pkt(nbuf)) { 1232 goto process_rx; 1233 } 1234 /* 1235 * In dynamic WEP case rekey frames are not encrypted 1236 * similar to WAPI. Allow EAPOL when 8021+wep is enabled and 1237 * key install is already done 1238 */ 1239 if ((vdev->sec_type == cdp_sec_type_wep104) && 1240 (qdf_nbuf_is_ipv4_eapol_pkt(nbuf))) 1241 goto process_rx; 1242 1243 process_mesh: 1244 1245 if (!vdev->mesh_vdev && err_code == HAL_RXDMA_ERR_UNENCRYPTED) { 1246 qdf_nbuf_free(nbuf); 1247 DP_STATS_INC(soc, rx.err.invalid_vdev, 1); 1248 return; 1249 } 1250 1251 if (vdev->mesh_vdev) { 1252 if (dp_rx_filter_mesh_packets(vdev, nbuf, rx_tlv_hdr) 1253 == QDF_STATUS_SUCCESS) { 1254 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_MED, 1255 FL("mesh pkt filtered")); 1256 DP_STATS_INC(vdev->pdev, dropped.mesh_filter, 1); 1257 1258 qdf_nbuf_free(nbuf); 1259 return; 1260 } 1261 dp_rx_fill_mesh_stats(vdev, nbuf, rx_tlv_hdr, peer); 1262 } 1263 process_rx: 1264 if (qdf_unlikely(hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc, 1265 rx_tlv_hdr) && 1266 (vdev->rx_decap_type == 1267 htt_cmn_pkt_type_ethernet))) { 1268 eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf); 1269 is_broadcast = (QDF_IS_ADDR_BROADCAST 1270 (eh->ether_dhost)) ? 1 : 0 ; 1271 DP_STATS_INC_PKT(peer, rx.multicast, 1, qdf_nbuf_len(nbuf)); 1272 if (is_broadcast) { 1273 DP_STATS_INC_PKT(peer, rx.bcast, 1, 1274 qdf_nbuf_len(nbuf)); 1275 } 1276 } 1277 1278 if (qdf_unlikely(vdev->rx_decap_type == htt_cmn_pkt_type_raw)) { 1279 dp_rx_deliver_raw(vdev, nbuf, peer); 1280 } else { 1281 /* Update the protocol tag in SKB based on CCE metadata */ 1282 dp_rx_update_protocol_tag(soc, vdev, nbuf, rx_tlv_hdr, 1283 EXCEPTION_DEST_RING_ID, true, true); 1284 /* Update the flow tag in SKB based on FSE metadata */ 1285 dp_rx_update_flow_tag(soc, vdev, nbuf, rx_tlv_hdr, true); 1286 DP_STATS_INC(peer, rx.to_stack.num, 1); 1287 qdf_nbuf_set_exc_frame(nbuf, 1); 1288 dp_rx_deliver_to_stack(soc, vdev, peer, nbuf, NULL); 1289 } 1290 1291 return; 1292 } 1293 1294 /** 1295 * dp_rx_process_mic_error(): Function to pass mic error indication to umac 1296 * @soc: core DP main context 1297 * @nbuf: buffer pointer 1298 * @rx_tlv_hdr: start of rx tlv header 1299 * @peer: peer handle 1300 * 1301 * return: void 1302 */ 1303 void dp_rx_process_mic_error(struct dp_soc *soc, qdf_nbuf_t nbuf, 1304 uint8_t *rx_tlv_hdr, struct dp_peer *peer) 1305 { 1306 struct dp_vdev *vdev = NULL; 1307 struct dp_pdev *pdev = NULL; 1308 struct ol_if_ops *tops = NULL; 1309 uint16_t rx_seq, fragno; 1310 uint8_t is_raw; 1311 unsigned int tid; 1312 QDF_STATUS status; 1313 struct cdp_rx_mic_err_info mic_failure_info; 1314 1315 if (!hal_rx_msdu_end_first_msdu_get(soc->hal_soc, 1316 rx_tlv_hdr)) 1317 return; 1318 1319 if (!peer) { 1320 dp_info_rl("peer not found"); 1321 goto fail; 1322 } 1323 1324 vdev = peer->vdev; 1325 if (!vdev) { 1326 dp_info_rl("VDEV not found"); 1327 goto fail; 1328 } 1329 1330 pdev = vdev->pdev; 1331 if (!pdev) { 1332 dp_info_rl("PDEV not found"); 1333 goto fail; 1334 } 1335 1336 is_raw = HAL_IS_DECAP_FORMAT_RAW(soc->hal_soc, qdf_nbuf_data(nbuf)); 1337 if (is_raw) { 1338 fragno = dp_rx_frag_get_mpdu_frag_number(qdf_nbuf_data(nbuf)); 1339 /* Can get only last fragment */ 1340 if (fragno) { 1341 tid = hal_rx_mpdu_start_tid_get(soc->hal_soc, 1342 qdf_nbuf_data(nbuf)); 1343 rx_seq = hal_rx_get_rx_sequence(soc->hal_soc, 1344 qdf_nbuf_data(nbuf)); 1345 1346 status = dp_rx_defrag_add_last_frag(soc, peer, 1347 tid, rx_seq, nbuf); 1348 dp_info_rl("Frag pkt seq# %d frag# %d consumed " 1349 "status %d !", rx_seq, fragno, status); 1350 return; 1351 } 1352 } 1353 1354 if (hal_rx_mpdu_get_addr1(soc->hal_soc, qdf_nbuf_data(nbuf), 1355 &mic_failure_info.da_mac_addr.bytes[0])) { 1356 dp_err_rl("Failed to get da_mac_addr"); 1357 goto fail; 1358 } 1359 1360 if (hal_rx_mpdu_get_addr2(soc->hal_soc, qdf_nbuf_data(nbuf), 1361 &mic_failure_info.ta_mac_addr.bytes[0])) { 1362 dp_err_rl("Failed to get ta_mac_addr"); 1363 goto fail; 1364 } 1365 1366 mic_failure_info.key_id = 0; 1367 mic_failure_info.multicast = 1368 IEEE80211_IS_MULTICAST(mic_failure_info.da_mac_addr.bytes); 1369 qdf_mem_zero(mic_failure_info.tsc, MIC_SEQ_CTR_SIZE); 1370 mic_failure_info.frame_type = cdp_rx_frame_type_802_11; 1371 mic_failure_info.data = NULL; 1372 mic_failure_info.vdev_id = vdev->vdev_id; 1373 1374 tops = pdev->soc->cdp_soc.ol_ops; 1375 if (tops->rx_mic_error) 1376 tops->rx_mic_error(soc->ctrl_psoc, pdev->pdev_id, 1377 &mic_failure_info); 1378 1379 fail: 1380 qdf_nbuf_free(nbuf); 1381 return; 1382 } 1383 1384 uint32_t 1385 dp_rx_err_process(struct dp_intr *int_ctx, struct dp_soc *soc, 1386 hal_ring_handle_t hal_ring_hdl, uint32_t quota) 1387 { 1388 hal_ring_desc_t ring_desc; 1389 hal_soc_handle_t hal_soc; 1390 uint32_t count = 0; 1391 uint32_t rx_bufs_used = 0; 1392 uint32_t rx_bufs_reaped[MAX_PDEV_CNT] = { 0 }; 1393 uint8_t mac_id = 0; 1394 uint8_t buf_type; 1395 uint8_t error, rbm; 1396 struct hal_rx_mpdu_desc_info mpdu_desc_info; 1397 struct hal_buf_info hbi; 1398 struct dp_pdev *dp_pdev; 1399 struct dp_srng *dp_rxdma_srng; 1400 struct rx_desc_pool *rx_desc_pool; 1401 uint32_t cookie = 0; 1402 void *link_desc_va; 1403 struct hal_rx_msdu_list msdu_list; /* MSDU's per MPDU */ 1404 uint16_t num_msdus; 1405 struct dp_rx_desc *rx_desc = NULL; 1406 1407 /* Debug -- Remove later */ 1408 qdf_assert(soc && hal_ring_hdl); 1409 1410 hal_soc = soc->hal_soc; 1411 1412 /* Debug -- Remove later */ 1413 qdf_assert(hal_soc); 1414 1415 if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, hal_ring_hdl))) { 1416 1417 /* TODO */ 1418 /* 1419 * Need API to convert from hal_ring pointer to 1420 * Ring Type / Ring Id combo 1421 */ 1422 DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1); 1423 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1424 FL("HAL RING Access Failed -- %pK"), hal_ring_hdl); 1425 goto done; 1426 } 1427 1428 while (qdf_likely(quota-- && (ring_desc = 1429 hal_srng_dst_get_next(hal_soc, 1430 hal_ring_hdl)))) { 1431 1432 DP_STATS_INC(soc, rx.err_ring_pkts, 1); 1433 1434 error = HAL_RX_ERROR_STATUS_GET(ring_desc); 1435 1436 qdf_assert(error == HAL_REO_ERROR_DETECTED); 1437 1438 buf_type = HAL_RX_REO_BUF_TYPE_GET(ring_desc); 1439 /* 1440 * For REO error ring, expect only MSDU LINK DESC 1441 */ 1442 qdf_assert_always(buf_type == HAL_RX_REO_MSDU_LINK_DESC_TYPE); 1443 1444 cookie = HAL_RX_REO_BUF_COOKIE_GET(ring_desc); 1445 /* 1446 * check for the magic number in the sw cookie 1447 */ 1448 qdf_assert_always((cookie >> LINK_DESC_ID_SHIFT) & 1449 LINK_DESC_ID_START); 1450 1451 /* 1452 * Check if the buffer is to be processed on this processor 1453 */ 1454 rbm = hal_rx_ret_buf_manager_get(ring_desc); 1455 1456 hal_rx_reo_buf_paddr_get(ring_desc, &hbi); 1457 link_desc_va = dp_rx_cookie_2_link_desc_va(soc, &hbi); 1458 hal_rx_msdu_list_get(soc->hal_soc, link_desc_va, &msdu_list, 1459 &num_msdus); 1460 1461 if (qdf_unlikely((msdu_list.rbm[0] != DP_WBM2SW_RBM) && 1462 (msdu_list.rbm[0] != 1463 HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST) && 1464 (msdu_list.rbm[0] != DP_DEFRAG_RBM))) { 1465 /* TODO */ 1466 /* Call appropriate handler */ 1467 if (!wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx)) { 1468 DP_STATS_INC(soc, rx.err.invalid_rbm, 1); 1469 QDF_TRACE(QDF_MODULE_ID_DP, 1470 QDF_TRACE_LEVEL_ERROR, 1471 FL("Invalid RBM %d"), 1472 msdu_list.rbm[0]); 1473 } 1474 1475 /* Return link descriptor through WBM ring (SW2WBM)*/ 1476 dp_rx_link_desc_return(soc, ring_desc, 1477 HAL_BM_ACTION_RELEASE_MSDU_LIST); 1478 continue; 1479 } 1480 1481 rx_desc = dp_rx_cookie_2_va_rxdma_buf(soc, 1482 msdu_list.sw_cookie[0]); 1483 qdf_assert_always(rx_desc); 1484 1485 mac_id = rx_desc->pool_id; 1486 1487 /* Get the MPDU DESC info */ 1488 hal_rx_mpdu_desc_info_get(ring_desc, &mpdu_desc_info); 1489 1490 if (mpdu_desc_info.mpdu_flags & HAL_MPDU_F_FRAGMENT) { 1491 /* 1492 * We only handle one msdu per link desc for fragmented 1493 * case. We drop the msdus and release the link desc 1494 * back if there are more than one msdu in link desc. 1495 */ 1496 if (qdf_unlikely(num_msdus > 1)) { 1497 count = dp_rx_msdus_drop(soc, ring_desc, 1498 &mpdu_desc_info, 1499 &mac_id, quota); 1500 rx_bufs_reaped[mac_id] += count; 1501 continue; 1502 } 1503 1504 count = dp_rx_frag_handle(soc, 1505 ring_desc, &mpdu_desc_info, 1506 rx_desc, &mac_id, quota); 1507 1508 rx_bufs_reaped[mac_id] += count; 1509 DP_STATS_INC(soc, rx.rx_frags, 1); 1510 continue; 1511 } 1512 1513 if (hal_rx_reo_is_pn_error(ring_desc)) { 1514 /* TOD0 */ 1515 DP_STATS_INC(soc, 1516 rx.err. 1517 reo_error[HAL_REO_ERR_PN_CHECK_FAILED], 1518 1); 1519 /* increment @pdev level */ 1520 dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id); 1521 if (dp_pdev) 1522 DP_STATS_INC(dp_pdev, err.reo_error, 1); 1523 count = dp_rx_pn_error_handle(soc, 1524 ring_desc, 1525 &mpdu_desc_info, &mac_id, 1526 quota); 1527 1528 rx_bufs_reaped[mac_id] += count; 1529 continue; 1530 } 1531 1532 if (hal_rx_reo_is_2k_jump(ring_desc)) { 1533 /* TOD0 */ 1534 DP_STATS_INC(soc, 1535 rx.err. 1536 reo_error[HAL_REO_ERR_REGULAR_FRAME_2K_JUMP], 1537 1); 1538 /* increment @pdev level */ 1539 dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id); 1540 if (dp_pdev) 1541 DP_STATS_INC(dp_pdev, err.reo_error, 1); 1542 1543 count = dp_rx_reo_err_entry_process( 1544 soc, 1545 ring_desc, 1546 &mpdu_desc_info, 1547 link_desc_va, 1548 HAL_REO_ERR_REGULAR_FRAME_2K_JUMP); 1549 1550 rx_bufs_reaped[mac_id] += count; 1551 continue; 1552 } 1553 1554 if (hal_rx_reo_is_oor_error(ring_desc)) { 1555 DP_STATS_INC( 1556 soc, 1557 rx.err. 1558 reo_error[HAL_REO_ERR_REGULAR_FRAME_OOR], 1559 1); 1560 /* increment @pdev level */ 1561 dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id); 1562 if (dp_pdev) 1563 DP_STATS_INC(dp_pdev, err.reo_error, 1); 1564 count = dp_rx_reo_err_entry_process( 1565 soc, 1566 ring_desc, 1567 &mpdu_desc_info, 1568 link_desc_va, 1569 HAL_REO_ERR_REGULAR_FRAME_OOR); 1570 1571 rx_bufs_reaped[mac_id] += count; 1572 continue; 1573 } 1574 } 1575 1576 done: 1577 dp_srng_access_end(int_ctx, soc, hal_ring_hdl); 1578 1579 if (soc->rx.flags.defrag_timeout_check) { 1580 uint32_t now_ms = 1581 qdf_system_ticks_to_msecs(qdf_system_ticks()); 1582 1583 if (now_ms >= soc->rx.defrag.next_flush_ms) 1584 dp_rx_defrag_waitlist_flush(soc); 1585 } 1586 1587 for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) { 1588 if (rx_bufs_reaped[mac_id]) { 1589 dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id); 1590 dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_id]; 1591 rx_desc_pool = &soc->rx_desc_buf[mac_id]; 1592 1593 dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng, 1594 rx_desc_pool, 1595 rx_bufs_reaped[mac_id], 1596 &dp_pdev->free_list_head, 1597 &dp_pdev->free_list_tail); 1598 rx_bufs_used += rx_bufs_reaped[mac_id]; 1599 } 1600 } 1601 1602 return rx_bufs_used; /* Assume no scale factor for now */ 1603 } 1604 1605 #ifdef DROP_RXDMA_DECRYPT_ERR 1606 /** 1607 * dp_handle_rxdma_decrypt_err() - Check if decrypt err frames can be handled 1608 * 1609 * Return: true if rxdma decrypt err frames are handled and false otheriwse 1610 */ 1611 static inline bool dp_handle_rxdma_decrypt_err(void) 1612 { 1613 return false; 1614 } 1615 #else 1616 static inline bool dp_handle_rxdma_decrypt_err(void) 1617 { 1618 return true; 1619 } 1620 #endif 1621 1622 static inline bool 1623 dp_rx_is_sg_formation_required(struct hal_wbm_err_desc_info *info) 1624 { 1625 /* 1626 * Currently Null Queue and Unencrypted error handlers has support for 1627 * SG. Other error handler do not deal with SG buffer. 1628 */ 1629 if (((info->wbm_err_src == HAL_RX_WBM_ERR_SRC_REO) && 1630 (info->reo_err_code == HAL_REO_ERR_QUEUE_DESC_ADDR_0)) || 1631 ((info->wbm_err_src == HAL_RX_WBM_ERR_SRC_RXDMA) && 1632 (info->rxdma_err_code == HAL_RXDMA_ERR_UNENCRYPTED))) 1633 return true; 1634 1635 return false; 1636 } 1637 1638 uint32_t 1639 dp_rx_wbm_err_process(struct dp_intr *int_ctx, struct dp_soc *soc, 1640 hal_ring_handle_t hal_ring_hdl, uint32_t quota) 1641 { 1642 hal_ring_desc_t ring_desc; 1643 hal_soc_handle_t hal_soc; 1644 struct dp_rx_desc *rx_desc; 1645 union dp_rx_desc_list_elem_t *head[MAX_PDEV_CNT] = { NULL }; 1646 union dp_rx_desc_list_elem_t *tail[MAX_PDEV_CNT] = { NULL }; 1647 uint32_t rx_bufs_used = 0; 1648 uint32_t rx_bufs_reaped[MAX_PDEV_CNT] = { 0 }; 1649 uint8_t buf_type, rbm; 1650 uint32_t rx_buf_cookie; 1651 uint8_t mac_id; 1652 struct dp_pdev *dp_pdev; 1653 struct dp_srng *dp_rxdma_srng; 1654 struct rx_desc_pool *rx_desc_pool; 1655 uint8_t *rx_tlv_hdr; 1656 qdf_nbuf_t nbuf_head = NULL; 1657 qdf_nbuf_t nbuf_tail = NULL; 1658 qdf_nbuf_t nbuf, next; 1659 struct hal_wbm_err_desc_info wbm_err_info = { 0 }; 1660 uint8_t pool_id; 1661 uint8_t tid = 0; 1662 uint8_t msdu_continuation = 0; 1663 bool process_sg_buf = false; 1664 1665 /* Debug -- Remove later */ 1666 qdf_assert(soc && hal_ring_hdl); 1667 1668 hal_soc = soc->hal_soc; 1669 1670 /* Debug -- Remove later */ 1671 qdf_assert(hal_soc); 1672 1673 if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, hal_ring_hdl))) { 1674 1675 /* TODO */ 1676 /* 1677 * Need API to convert from hal_ring pointer to 1678 * Ring Type / Ring Id combo 1679 */ 1680 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1681 FL("HAL RING Access Failed -- %pK"), hal_ring_hdl); 1682 goto done; 1683 } 1684 1685 while (qdf_likely(quota)) { 1686 ring_desc = hal_srng_dst_get_next(hal_soc, hal_ring_hdl); 1687 1688 if (qdf_unlikely(!ring_desc)) { 1689 /* Check hw hp in case of SG support */ 1690 if (qdf_unlikely(soc->wbm_release_desc_rx_sg_support)) { 1691 /* 1692 * Update the cached hp from hw hp 1693 * This is required for partially created 1694 * SG packets while quote is still left 1695 */ 1696 hal_srng_sync_cachedhp(hal_soc, hal_ring_hdl); 1697 ring_desc = hal_srng_dst_get_next(hal_soc, hal_ring_hdl); 1698 if (!ring_desc) { 1699 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1700 FL("No Rx Hw Desc for intermediate sg -- %pK"), 1701 hal_ring_hdl); 1702 break; 1703 } 1704 } else { 1705 /* Come out of the loop in Non SG support cases */ 1706 break; 1707 } 1708 } 1709 1710 /* XXX */ 1711 buf_type = HAL_RX_WBM_BUF_TYPE_GET(ring_desc); 1712 1713 /* 1714 * For WBM ring, expect only MSDU buffers 1715 */ 1716 qdf_assert_always(buf_type == HAL_RX_WBM_BUF_TYPE_REL_BUF); 1717 1718 qdf_assert((HAL_RX_WBM_ERR_SRC_GET(ring_desc) 1719 == HAL_RX_WBM_ERR_SRC_RXDMA) || 1720 (HAL_RX_WBM_ERR_SRC_GET(ring_desc) 1721 == HAL_RX_WBM_ERR_SRC_REO)); 1722 1723 /* 1724 * Check if the buffer is to be processed on this processor 1725 */ 1726 rbm = hal_rx_ret_buf_manager_get(ring_desc); 1727 1728 if (qdf_unlikely(rbm != HAL_RX_BUF_RBM_SW3_BM)) { 1729 /* TODO */ 1730 /* Call appropriate handler */ 1731 DP_STATS_INC(soc, rx.err.invalid_rbm, 1); 1732 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1733 FL("Invalid RBM %d"), rbm); 1734 continue; 1735 } 1736 1737 rx_buf_cookie = HAL_RX_WBM_BUF_COOKIE_GET(ring_desc); 1738 1739 rx_desc = dp_rx_cookie_2_va_rxdma_buf(soc, rx_buf_cookie); 1740 qdf_assert_always(rx_desc); 1741 1742 if (!dp_rx_desc_check_magic(rx_desc)) { 1743 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1744 FL("Invalid rx_desc cookie=%d"), 1745 rx_buf_cookie); 1746 continue; 1747 } 1748 1749 /* 1750 * this is a unlikely scenario where the host is reaping 1751 * a descriptor which it already reaped just a while ago 1752 * but is yet to replenish it back to HW. 1753 * In this case host will dump the last 128 descriptors 1754 * including the software descriptor rx_desc and assert. 1755 */ 1756 if (qdf_unlikely(!rx_desc->in_use)) { 1757 DP_STATS_INC(soc, rx.err.hal_wbm_rel_dup, 1); 1758 dp_rx_dump_info_and_assert(soc, hal_ring_hdl, 1759 ring_desc, rx_desc); 1760 } 1761 1762 hal_rx_wbm_err_info_get(ring_desc, &wbm_err_info, hal_soc); 1763 1764 if (qdf_unlikely(soc->wbm_release_desc_rx_sg_support && 1765 dp_rx_is_sg_formation_required(&wbm_err_info))) { 1766 /* SG is detected from continuation bit */ 1767 msdu_continuation = hal_rx_wbm_err_msdu_continuation_get(hal_soc, 1768 ring_desc); 1769 if (msdu_continuation && 1770 !(soc->wbm_sg_param.wbm_is_first_msdu_in_sg)) { 1771 /* Update length from first buffer in SG */ 1772 soc->wbm_sg_param.wbm_sg_desc_msdu_len = 1773 hal_rx_msdu_start_msdu_len_get( 1774 qdf_nbuf_data(rx_desc->nbuf)); 1775 soc->wbm_sg_param.wbm_is_first_msdu_in_sg = true; 1776 } 1777 1778 if (msdu_continuation) { 1779 /* MSDU continued packets */ 1780 qdf_nbuf_set_rx_chfrag_cont(rx_desc->nbuf, 1); 1781 QDF_NBUF_CB_RX_PKT_LEN(rx_desc->nbuf) = 1782 soc->wbm_sg_param.wbm_sg_desc_msdu_len; 1783 } else { 1784 /* This is the terminal packet in SG */ 1785 qdf_nbuf_set_rx_chfrag_start(rx_desc->nbuf, 1); 1786 qdf_nbuf_set_rx_chfrag_end(rx_desc->nbuf, 1); 1787 QDF_NBUF_CB_RX_PKT_LEN(rx_desc->nbuf) = 1788 soc->wbm_sg_param.wbm_sg_desc_msdu_len; 1789 process_sg_buf = true; 1790 } 1791 } 1792 1793 nbuf = rx_desc->nbuf; 1794 rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id]; 1795 qdf_nbuf_unmap_nbytes_single(soc->osdev, nbuf, 1796 QDF_DMA_FROM_DEVICE, 1797 rx_desc_pool->buf_size); 1798 rx_desc->unmapped = 1; 1799 1800 /* 1801 * save the wbm desc info in nbuf TLV. We will need this 1802 * info when we do the actual nbuf processing 1803 */ 1804 wbm_err_info.pool_id = rx_desc->pool_id; 1805 hal_rx_wbm_err_info_set_in_tlv(qdf_nbuf_data(nbuf), 1806 &wbm_err_info); 1807 1808 rx_bufs_reaped[rx_desc->pool_id]++; 1809 1810 if (qdf_nbuf_is_rx_chfrag_cont(nbuf) || process_sg_buf) { 1811 DP_RX_LIST_APPEND(soc->wbm_sg_param.wbm_sg_nbuf_head, 1812 soc->wbm_sg_param.wbm_sg_nbuf_tail, 1813 nbuf); 1814 if (process_sg_buf) { 1815 DP_RX_MERGE_TWO_LIST(nbuf_head, nbuf_tail, 1816 soc->wbm_sg_param.wbm_sg_nbuf_head, 1817 soc->wbm_sg_param.wbm_sg_nbuf_tail); 1818 dp_rx_wbm_sg_list_reset(soc); 1819 process_sg_buf = false; 1820 } 1821 } else { 1822 DP_RX_LIST_APPEND(nbuf_head, nbuf_tail, nbuf); 1823 } 1824 1825 dp_rx_add_to_free_desc_list(&head[rx_desc->pool_id], 1826 &tail[rx_desc->pool_id], 1827 rx_desc); 1828 1829 /* 1830 * if continuation bit is set then we have MSDU spread 1831 * across multiple buffers, let us not decrement quota 1832 * till we reap all buffers of that MSDU. 1833 */ 1834 if (qdf_likely(!msdu_continuation)) 1835 quota -= 1; 1836 } 1837 done: 1838 dp_srng_access_end(int_ctx, soc, hal_ring_hdl); 1839 1840 for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) { 1841 if (rx_bufs_reaped[mac_id]) { 1842 dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_id]; 1843 rx_desc_pool = &soc->rx_desc_buf[mac_id]; 1844 1845 dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng, 1846 rx_desc_pool, rx_bufs_reaped[mac_id], 1847 &head[mac_id], &tail[mac_id]); 1848 rx_bufs_used += rx_bufs_reaped[mac_id]; 1849 } 1850 } 1851 1852 nbuf = nbuf_head; 1853 while (nbuf) { 1854 struct dp_peer *peer; 1855 uint16_t peer_id; 1856 uint8_t err_code; 1857 uint8_t *tlv_hdr; 1858 rx_tlv_hdr = qdf_nbuf_data(nbuf); 1859 1860 /* 1861 * retrieve the wbm desc info from nbuf TLV, so we can 1862 * handle error cases appropriately 1863 */ 1864 hal_rx_wbm_err_info_get_from_tlv(rx_tlv_hdr, &wbm_err_info); 1865 1866 peer_id = hal_rx_mpdu_start_sw_peer_id_get(soc->hal_soc, 1867 rx_tlv_hdr); 1868 peer = dp_peer_find_by_id(soc, peer_id); 1869 1870 if (!peer) 1871 dp_info_rl("peer is null peer_id%u err_src%u err_rsn%u", 1872 peer_id, wbm_err_info.wbm_err_src, 1873 wbm_err_info.reo_psh_rsn); 1874 1875 /* Set queue_mapping in nbuf to 0 */ 1876 dp_set_rx_queue(nbuf, 0); 1877 1878 next = nbuf->next; 1879 1880 /* 1881 * Form the SG for msdu continued buffers 1882 * QCN9000 has this support 1883 */ 1884 if (qdf_nbuf_is_rx_chfrag_cont(nbuf)) { 1885 nbuf = dp_rx_sg_create(nbuf); 1886 next = nbuf->next; 1887 /* 1888 * SG error handling is not done correctly, 1889 * drop SG frames for now. 1890 */ 1891 qdf_nbuf_free(nbuf); 1892 dp_info_rl("scattered msdu dropped"); 1893 nbuf = next; 1894 continue; 1895 } 1896 1897 if (wbm_err_info.wbm_err_src == HAL_RX_WBM_ERR_SRC_REO) { 1898 if (wbm_err_info.reo_psh_rsn 1899 == HAL_RX_WBM_REO_PSH_RSN_ERROR) { 1900 1901 DP_STATS_INC(soc, 1902 rx.err.reo_error 1903 [wbm_err_info.reo_err_code], 1); 1904 /* increment @pdev level */ 1905 pool_id = wbm_err_info.pool_id; 1906 dp_pdev = dp_get_pdev_for_lmac_id(soc, pool_id); 1907 if (dp_pdev) 1908 DP_STATS_INC(dp_pdev, err.reo_error, 1909 1); 1910 1911 switch (wbm_err_info.reo_err_code) { 1912 /* 1913 * Handling for packets which have NULL REO 1914 * queue descriptor 1915 */ 1916 case HAL_REO_ERR_QUEUE_DESC_ADDR_0: 1917 pool_id = wbm_err_info.pool_id; 1918 dp_rx_null_q_desc_handle(soc, nbuf, 1919 rx_tlv_hdr, 1920 pool_id, peer); 1921 nbuf = next; 1922 if (peer) 1923 dp_peer_unref_del_find_by_id( 1924 peer); 1925 continue; 1926 /* TODO */ 1927 /* Add per error code accounting */ 1928 case HAL_REO_ERR_REGULAR_FRAME_2K_JUMP: 1929 pool_id = wbm_err_info.pool_id; 1930 1931 if (hal_rx_msdu_end_first_msdu_get(soc->hal_soc, 1932 rx_tlv_hdr)) { 1933 peer_id = 1934 hal_rx_mpdu_start_sw_peer_id_get(soc->hal_soc, 1935 rx_tlv_hdr); 1936 tid = 1937 hal_rx_mpdu_start_tid_get(hal_soc, rx_tlv_hdr); 1938 } 1939 QDF_NBUF_CB_RX_PKT_LEN(nbuf) = 1940 hal_rx_msdu_start_msdu_len_get( 1941 rx_tlv_hdr); 1942 nbuf->next = NULL; 1943 dp_2k_jump_handle(soc, nbuf, 1944 rx_tlv_hdr, 1945 peer_id, tid); 1946 nbuf = next; 1947 if (peer) 1948 dp_peer_unref_del_find_by_id( 1949 peer); 1950 continue; 1951 case HAL_REO_ERR_BAR_FRAME_2K_JUMP: 1952 case HAL_REO_ERR_BAR_FRAME_OOR: 1953 if (peer) 1954 dp_rx_wbm_err_handle_bar(soc, 1955 peer, 1956 nbuf); 1957 break; 1958 1959 default: 1960 dp_info_rl("Got pkt with REO ERROR: %d", 1961 wbm_err_info.reo_err_code); 1962 break; 1963 } 1964 } 1965 } else if (wbm_err_info.wbm_err_src == 1966 HAL_RX_WBM_ERR_SRC_RXDMA) { 1967 if (wbm_err_info.rxdma_psh_rsn 1968 == HAL_RX_WBM_RXDMA_PSH_RSN_ERROR) { 1969 DP_STATS_INC(soc, 1970 rx.err.rxdma_error 1971 [wbm_err_info.rxdma_err_code], 1); 1972 /* increment @pdev level */ 1973 pool_id = wbm_err_info.pool_id; 1974 dp_pdev = dp_get_pdev_for_lmac_id(soc, pool_id); 1975 if (dp_pdev) 1976 DP_STATS_INC(dp_pdev, 1977 err.rxdma_error, 1); 1978 1979 switch (wbm_err_info.rxdma_err_code) { 1980 case HAL_RXDMA_ERR_UNENCRYPTED: 1981 1982 case HAL_RXDMA_ERR_WIFI_PARSE: 1983 pool_id = wbm_err_info.pool_id; 1984 dp_rx_process_rxdma_err(soc, nbuf, 1985 rx_tlv_hdr, 1986 peer, 1987 wbm_err_info. 1988 rxdma_err_code, 1989 pool_id); 1990 nbuf = next; 1991 if (peer) 1992 dp_peer_unref_del_find_by_id(peer); 1993 continue; 1994 1995 case HAL_RXDMA_ERR_TKIP_MIC: 1996 dp_rx_process_mic_error(soc, nbuf, 1997 rx_tlv_hdr, 1998 peer); 1999 nbuf = next; 2000 if (peer) { 2001 DP_STATS_INC(peer, rx.err.mic_err, 1); 2002 dp_peer_unref_del_find_by_id( 2003 peer); 2004 } 2005 continue; 2006 2007 case HAL_RXDMA_ERR_DECRYPT: 2008 2009 if (peer) { 2010 DP_STATS_INC(peer, rx.err. 2011 decrypt_err, 1); 2012 break; 2013 } 2014 2015 if (!dp_handle_rxdma_decrypt_err()) 2016 break; 2017 2018 pool_id = wbm_err_info.pool_id; 2019 err_code = wbm_err_info.rxdma_err_code; 2020 tlv_hdr = rx_tlv_hdr; 2021 dp_rx_process_rxdma_err(soc, nbuf, 2022 tlv_hdr, NULL, 2023 err_code, 2024 pool_id); 2025 nbuf = next; 2026 continue; 2027 2028 default: 2029 dp_err_rl("RXDMA error %d", 2030 wbm_err_info.rxdma_err_code); 2031 } 2032 } 2033 } else { 2034 /* Should not come here */ 2035 qdf_assert(0); 2036 } 2037 2038 if (peer) 2039 dp_peer_unref_del_find_by_id(peer); 2040 2041 hal_rx_dump_pkt_tlvs(hal_soc, rx_tlv_hdr, 2042 QDF_TRACE_LEVEL_DEBUG); 2043 qdf_nbuf_free(nbuf); 2044 nbuf = next; 2045 } 2046 return rx_bufs_used; /* Assume no scale factor for now */ 2047 } 2048 2049 /** 2050 * dup_desc_dbg() - dump and assert if duplicate rx desc found 2051 * 2052 * @soc: core DP main context 2053 * @rxdma_dst_ring_desc: void pointer to monitor link descriptor buf addr info 2054 * @rx_desc: void pointer to rx descriptor 2055 * 2056 * Return: void 2057 */ 2058 static void dup_desc_dbg(struct dp_soc *soc, 2059 hal_rxdma_desc_t rxdma_dst_ring_desc, 2060 void *rx_desc) 2061 { 2062 DP_STATS_INC(soc, rx.err.hal_rxdma_err_dup, 1); 2063 dp_rx_dump_info_and_assert( 2064 soc, 2065 soc->rx_rel_ring.hal_srng, 2066 hal_rxdma_desc_to_hal_ring_desc(rxdma_dst_ring_desc), 2067 rx_desc); 2068 } 2069 2070 /** 2071 * dp_rx_err_mpdu_pop() - extract the MSDU's from link descs 2072 * 2073 * @soc: core DP main context 2074 * @mac_id: mac id which is one of 3 mac_ids 2075 * @rxdma_dst_ring_desc: void pointer to monitor link descriptor buf addr info 2076 * @head: head of descs list to be freed 2077 * @tail: tail of decs list to be freed 2078 2079 * Return: number of msdu in MPDU to be popped 2080 */ 2081 static inline uint32_t 2082 dp_rx_err_mpdu_pop(struct dp_soc *soc, uint32_t mac_id, 2083 hal_rxdma_desc_t rxdma_dst_ring_desc, 2084 union dp_rx_desc_list_elem_t **head, 2085 union dp_rx_desc_list_elem_t **tail) 2086 { 2087 void *rx_msdu_link_desc; 2088 qdf_nbuf_t msdu; 2089 qdf_nbuf_t last; 2090 struct hal_rx_msdu_list msdu_list; 2091 uint16_t num_msdus; 2092 struct hal_buf_info buf_info; 2093 uint32_t rx_bufs_used = 0; 2094 uint32_t msdu_cnt; 2095 uint32_t i; 2096 uint8_t push_reason; 2097 uint8_t rxdma_error_code = 0; 2098 uint8_t bm_action = HAL_BM_ACTION_PUT_IN_IDLE_LIST; 2099 struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id); 2100 uint32_t rx_link_buf_info[HAL_RX_BUFFINFO_NUM_DWORDS]; 2101 hal_rxdma_desc_t ring_desc; 2102 2103 if (!pdev) { 2104 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 2105 "pdev is null for mac_id = %d", mac_id); 2106 return rx_bufs_used; 2107 } 2108 2109 msdu = 0; 2110 2111 last = NULL; 2112 2113 hal_rx_reo_ent_buf_paddr_get(rxdma_dst_ring_desc, &buf_info, 2114 &msdu_cnt); 2115 2116 push_reason = 2117 hal_rx_reo_ent_rxdma_push_reason_get(rxdma_dst_ring_desc); 2118 if (push_reason == HAL_RX_WBM_RXDMA_PSH_RSN_ERROR) { 2119 rxdma_error_code = 2120 hal_rx_reo_ent_rxdma_error_code_get(rxdma_dst_ring_desc); 2121 } 2122 2123 do { 2124 rx_msdu_link_desc = 2125 dp_rx_cookie_2_link_desc_va(soc, &buf_info); 2126 2127 qdf_assert_always(rx_msdu_link_desc); 2128 2129 hal_rx_msdu_list_get(soc->hal_soc, rx_msdu_link_desc, 2130 &msdu_list, &num_msdus); 2131 2132 if (msdu_list.sw_cookie[0] != HAL_RX_COOKIE_SPECIAL) { 2133 /* if the msdus belongs to NSS offloaded radio && 2134 * the rbm is not SW1_BM then return the msdu_link 2135 * descriptor without freeing the msdus (nbufs). let 2136 * these buffers be given to NSS completion ring for 2137 * NSS to free them. 2138 * else iterate through the msdu link desc list and 2139 * free each msdu in the list. 2140 */ 2141 if (msdu_list.rbm[0] != HAL_RX_BUF_RBM_SW3_BM && 2142 wlan_cfg_get_dp_pdev_nss_enabled( 2143 pdev->wlan_cfg_ctx)) 2144 bm_action = HAL_BM_ACTION_RELEASE_MSDU_LIST; 2145 else { 2146 for (i = 0; i < num_msdus; i++) { 2147 struct dp_rx_desc *rx_desc = 2148 dp_rx_cookie_2_va_rxdma_buf(soc, 2149 msdu_list.sw_cookie[i]); 2150 qdf_assert_always(rx_desc); 2151 msdu = rx_desc->nbuf; 2152 /* 2153 * this is a unlikely scenario 2154 * where the host is reaping 2155 * a descriptor which 2156 * it already reaped just a while ago 2157 * but is yet to replenish 2158 * it back to HW. 2159 * In this case host will dump 2160 * the last 128 descriptors 2161 * including the software descriptor 2162 * rx_desc and assert. 2163 */ 2164 ring_desc = rxdma_dst_ring_desc; 2165 if (qdf_unlikely(!rx_desc->in_use)) { 2166 dup_desc_dbg(soc, 2167 ring_desc, 2168 rx_desc); 2169 continue; 2170 } 2171 2172 qdf_nbuf_unmap_single(soc->osdev, msdu, 2173 QDF_DMA_FROM_DEVICE); 2174 2175 QDF_TRACE(QDF_MODULE_ID_DP, 2176 QDF_TRACE_LEVEL_DEBUG, 2177 "[%s][%d] msdu_nbuf=%pK ", 2178 __func__, __LINE__, msdu); 2179 2180 qdf_nbuf_free(msdu); 2181 rx_bufs_used++; 2182 dp_rx_add_to_free_desc_list(head, 2183 tail, rx_desc); 2184 } 2185 } 2186 } else { 2187 rxdma_error_code = HAL_RXDMA_ERR_WAR; 2188 } 2189 2190 /* 2191 * Store the current link buffer into to the local structure 2192 * to be used for release purpose. 2193 */ 2194 hal_rxdma_buff_addr_info_set(rx_link_buf_info, buf_info.paddr, 2195 buf_info.sw_cookie, buf_info.rbm); 2196 2197 hal_rx_mon_next_link_desc_get(rx_msdu_link_desc, &buf_info); 2198 dp_rx_link_desc_return_by_addr(soc, 2199 (hal_buff_addrinfo_t) 2200 rx_link_buf_info, 2201 bm_action); 2202 } while (buf_info.paddr); 2203 2204 DP_STATS_INC(soc, rx.err.rxdma_error[rxdma_error_code], 1); 2205 if (pdev) 2206 DP_STATS_INC(pdev, err.rxdma_error, 1); 2207 2208 if (rxdma_error_code == HAL_RXDMA_ERR_DECRYPT) { 2209 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 2210 "Packet received with Decrypt error"); 2211 } 2212 2213 return rx_bufs_used; 2214 } 2215 2216 uint32_t 2217 dp_rxdma_err_process(struct dp_intr *int_ctx, struct dp_soc *soc, 2218 uint32_t mac_id, uint32_t quota) 2219 { 2220 struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id); 2221 hal_rxdma_desc_t rxdma_dst_ring_desc; 2222 hal_soc_handle_t hal_soc; 2223 void *err_dst_srng; 2224 union dp_rx_desc_list_elem_t *head = NULL; 2225 union dp_rx_desc_list_elem_t *tail = NULL; 2226 struct dp_srng *dp_rxdma_srng; 2227 struct rx_desc_pool *rx_desc_pool; 2228 uint32_t work_done = 0; 2229 uint32_t rx_bufs_used = 0; 2230 2231 if (!pdev) 2232 return 0; 2233 2234 err_dst_srng = soc->rxdma_err_dst_ring[mac_id].hal_srng; 2235 2236 if (!err_dst_srng) { 2237 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 2238 "%s %d : HAL Monitor Destination Ring Init \ 2239 Failed -- %pK", 2240 __func__, __LINE__, err_dst_srng); 2241 return 0; 2242 } 2243 2244 hal_soc = soc->hal_soc; 2245 2246 qdf_assert(hal_soc); 2247 2248 if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, err_dst_srng))) { 2249 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 2250 "%s %d : HAL Monitor Destination Ring Init \ 2251 Failed -- %pK", 2252 __func__, __LINE__, err_dst_srng); 2253 return 0; 2254 } 2255 2256 while (qdf_likely(quota-- && (rxdma_dst_ring_desc = 2257 hal_srng_dst_get_next(hal_soc, err_dst_srng)))) { 2258 2259 rx_bufs_used += dp_rx_err_mpdu_pop(soc, mac_id, 2260 rxdma_dst_ring_desc, 2261 &head, &tail); 2262 } 2263 2264 dp_srng_access_end(int_ctx, soc, err_dst_srng); 2265 2266 if (rx_bufs_used) { 2267 if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx)) 2268 dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_id]; 2269 else 2270 dp_rxdma_srng = &soc->rx_refill_buf_ring[pdev->lmac_id]; 2271 rx_desc_pool = &soc->rx_desc_buf[mac_id]; 2272 2273 dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng, 2274 rx_desc_pool, rx_bufs_used, &head, &tail); 2275 2276 work_done += rx_bufs_used; 2277 } 2278 2279 return work_done; 2280 } 2281 2282 static inline uint32_t 2283 dp_wbm_int_err_mpdu_pop(struct dp_soc *soc, uint32_t mac_id, 2284 hal_rxdma_desc_t rxdma_dst_ring_desc, 2285 union dp_rx_desc_list_elem_t **head, 2286 union dp_rx_desc_list_elem_t **tail) 2287 { 2288 void *rx_msdu_link_desc; 2289 qdf_nbuf_t msdu; 2290 qdf_nbuf_t last; 2291 struct hal_rx_msdu_list msdu_list; 2292 uint16_t num_msdus; 2293 struct hal_buf_info buf_info; 2294 uint32_t rx_bufs_used = 0, msdu_cnt, i; 2295 uint32_t rx_link_buf_info[HAL_RX_BUFFINFO_NUM_DWORDS]; 2296 2297 msdu = 0; 2298 2299 last = NULL; 2300 2301 hal_rx_reo_ent_buf_paddr_get(rxdma_dst_ring_desc, &buf_info, 2302 &msdu_cnt); 2303 2304 do { 2305 rx_msdu_link_desc = 2306 dp_rx_cookie_2_link_desc_va(soc, &buf_info); 2307 2308 if (!rx_msdu_link_desc) { 2309 DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_REO_NULL_LINK_DESC], 1); 2310 break; 2311 } 2312 2313 hal_rx_msdu_list_get(soc->hal_soc, rx_msdu_link_desc, 2314 &msdu_list, &num_msdus); 2315 2316 if (msdu_list.sw_cookie[0] != HAL_RX_COOKIE_SPECIAL) { 2317 for (i = 0; i < num_msdus; i++) { 2318 struct dp_rx_desc *rx_desc = 2319 dp_rx_cookie_2_va_rxdma_buf( 2320 soc, 2321 msdu_list.sw_cookie[i]); 2322 qdf_assert_always(rx_desc); 2323 msdu = rx_desc->nbuf; 2324 2325 qdf_nbuf_unmap_single(soc->osdev, msdu, 2326 QDF_DMA_FROM_DEVICE); 2327 2328 qdf_nbuf_free(msdu); 2329 rx_bufs_used++; 2330 dp_rx_add_to_free_desc_list(head, 2331 tail, rx_desc); 2332 } 2333 } 2334 2335 /* 2336 * Store the current link buffer into to the local structure 2337 * to be used for release purpose. 2338 */ 2339 hal_rxdma_buff_addr_info_set(rx_link_buf_info, buf_info.paddr, 2340 buf_info.sw_cookie, buf_info.rbm); 2341 2342 hal_rx_mon_next_link_desc_get(rx_msdu_link_desc, &buf_info); 2343 dp_rx_link_desc_return_by_addr(soc, (hal_buff_addrinfo_t) 2344 rx_link_buf_info, 2345 HAL_BM_ACTION_PUT_IN_IDLE_LIST); 2346 } while (buf_info.paddr); 2347 2348 return rx_bufs_used; 2349 } 2350 2351 /* 2352 * 2353 * dp_handle_wbm_internal_error() - handles wbm_internal_error case 2354 * 2355 * @soc: core DP main context 2356 * @hal_desc: hal descriptor 2357 * @buf_type: indicates if the buffer is of type link disc or msdu 2358 * Return: None 2359 * 2360 * wbm_internal_error is seen in following scenarios : 2361 * 2362 * 1. Null pointers detected in WBM_RELEASE_RING descriptors 2363 * 2. Null pointers detected during delinking process 2364 * 2365 * Some null pointer cases: 2366 * 2367 * a. MSDU buffer pointer is NULL 2368 * b. Next_MSDU_Link_Desc pointer is NULL, with no last msdu flag 2369 * c. MSDU buffer pointer is NULL or Next_Link_Desc pointer is NULL 2370 */ 2371 void 2372 dp_handle_wbm_internal_error(struct dp_soc *soc, void *hal_desc, 2373 uint32_t buf_type) 2374 { 2375 struct hal_buf_info buf_info = {0}; 2376 struct dp_rx_desc *rx_desc = NULL; 2377 struct rx_desc_pool *rx_desc_pool; 2378 uint32_t rx_buf_cookie; 2379 uint32_t rx_bufs_reaped = 0; 2380 union dp_rx_desc_list_elem_t *head = NULL; 2381 union dp_rx_desc_list_elem_t *tail = NULL; 2382 uint8_t pool_id; 2383 2384 hal_rx_reo_buf_paddr_get(hal_desc, &buf_info); 2385 2386 if (!buf_info.paddr) { 2387 DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_REO_NULL_BUFFER], 1); 2388 return; 2389 } 2390 2391 rx_buf_cookie = HAL_RX_REO_BUF_COOKIE_GET(hal_desc); 2392 pool_id = DP_RX_DESC_COOKIE_POOL_ID_GET(rx_buf_cookie); 2393 2394 if (buf_type == HAL_WBM_RELEASE_RING_2_BUFFER_TYPE) { 2395 DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_REO_NULL_MSDU_BUFF], 1); 2396 rx_desc = dp_rx_cookie_2_va_rxdma_buf(soc, rx_buf_cookie); 2397 2398 if (rx_desc && rx_desc->nbuf) { 2399 rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id]; 2400 qdf_nbuf_unmap_nbytes_single(soc->osdev, rx_desc->nbuf, 2401 QDF_DMA_FROM_DEVICE, 2402 rx_desc_pool->buf_size); 2403 rx_desc->unmapped = 1; 2404 2405 qdf_nbuf_free(rx_desc->nbuf); 2406 dp_rx_add_to_free_desc_list(&head, 2407 &tail, 2408 rx_desc); 2409 2410 rx_bufs_reaped++; 2411 } 2412 } else if (buf_type == HAL_WBM_RELEASE_RING_2_DESC_TYPE) { 2413 rx_bufs_reaped = dp_wbm_int_err_mpdu_pop(soc, pool_id, 2414 hal_desc, 2415 &head, &tail); 2416 } 2417 2418 if (rx_bufs_reaped) { 2419 struct rx_desc_pool *rx_desc_pool; 2420 struct dp_srng *dp_rxdma_srng; 2421 2422 DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_REO_BUFF_REAPED], 1); 2423 dp_rxdma_srng = &soc->rx_refill_buf_ring[pool_id]; 2424 rx_desc_pool = &soc->rx_desc_buf[pool_id]; 2425 2426 dp_rx_buffers_replenish(soc, pool_id, dp_rxdma_srng, 2427 rx_desc_pool, 2428 rx_bufs_reaped, 2429 &head, &tail); 2430 } 2431 } 2432