1 /* 2 * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved. 3 * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved. 4 * 5 * Permission to use, copy, modify, and/or distribute this software for 6 * any purpose with or without fee is hereby granted, provided that the 7 * above copyright notice and this permission notice appear in all 8 * copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 11 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 12 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 13 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 14 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 15 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 16 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 17 * PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20 #include "cdp_txrx_cmn_struct.h" 21 #include "hal_hw_headers.h" 22 #include "dp_types.h" 23 #include "dp_rx.h" 24 #include "dp_tx.h" 25 #include "dp_li_rx.h" 26 #include "dp_peer.h" 27 #include "hal_rx.h" 28 #include "hal_li_rx.h" 29 #include "hal_api.h" 30 #include "hal_li_api.h" 31 #include "qdf_nbuf.h" 32 #ifdef MESH_MODE_SUPPORT 33 #include "if_meta_hdr.h" 34 #endif 35 #include "dp_internal.h" 36 #include "dp_ipa.h" 37 #ifdef WIFI_MONITOR_SUPPORT 38 #include <dp_mon.h> 39 #endif 40 #ifdef FEATURE_WDS 41 #include "dp_txrx_wds.h" 42 #endif 43 #include "dp_hist.h" 44 #include "dp_rx_buffer_pool.h" 45 #include "dp_li.h" 46 47 static inline 48 bool is_sa_da_idx_valid(uint32_t max_ast, 49 qdf_nbuf_t nbuf, struct hal_rx_msdu_metadata msdu_info) 50 { 51 if ((qdf_nbuf_is_sa_valid(nbuf) && (msdu_info.sa_idx > max_ast)) || 52 (!qdf_nbuf_is_da_mcbc(nbuf) && qdf_nbuf_is_da_valid(nbuf) && 53 (msdu_info.da_idx > max_ast))) 54 return false; 55 56 return true; 57 } 58 59 #ifndef QCA_HOST_MODE_WIFI_DISABLED 60 #if defined(FEATURE_MCL_REPEATER) && defined(FEATURE_MEC) 61 /** 62 * dp_rx_mec_check_wrapper() - wrapper to dp_rx_mcast_echo_check 63 * @soc: core DP main context 64 * @txrx_peer: dp peer handler 65 * @rx_tlv_hdr: start of the rx TLV header 66 * @nbuf: pkt buffer 67 * 68 * Return: bool (true if it is a looped back pkt else false) 69 */ 70 static inline bool dp_rx_mec_check_wrapper(struct dp_soc *soc, 71 struct dp_txrx_peer *txrx_peer, 72 uint8_t *rx_tlv_hdr, 73 qdf_nbuf_t nbuf) 74 { 75 return dp_rx_mcast_echo_check(soc, txrx_peer, rx_tlv_hdr, nbuf); 76 } 77 #else 78 static inline bool dp_rx_mec_check_wrapper(struct dp_soc *soc, 79 struct dp_txrx_peer *txrx_peer, 80 uint8_t *rx_tlv_hdr, 81 qdf_nbuf_t nbuf) 82 { 83 return false; 84 } 85 #endif 86 #endif 87 88 #ifndef QCA_HOST_MODE_WIFI_DISABLE 89 static bool 90 dp_rx_intrabss_ucast_check_li(struct dp_soc *soc, qdf_nbuf_t nbuf, 91 struct dp_txrx_peer *ta_txrx_peer, 92 struct hal_rx_msdu_metadata *msdu_metadata, 93 uint8_t *p_tx_vdev_id) 94 { 95 uint16_t da_peer_id; 96 struct dp_txrx_peer *da_peer; 97 struct dp_ast_entry *ast_entry; 98 dp_txrx_ref_handle txrx_ref_handle = NULL; 99 100 if (!qdf_nbuf_is_da_valid(nbuf) || qdf_nbuf_is_da_mcbc(nbuf)) 101 return false; 102 103 ast_entry = soc->ast_table[msdu_metadata->da_idx]; 104 if (!ast_entry) 105 return false; 106 107 if (ast_entry->type == CDP_TXRX_AST_TYPE_DA) { 108 ast_entry->is_active = TRUE; 109 return false; 110 } 111 112 da_peer_id = ast_entry->peer_id; 113 /* TA peer cannot be same as peer(DA) on which AST is present 114 * this indicates a change in topology and that AST entries 115 * are yet to be updated. 116 */ 117 if (da_peer_id == ta_txrx_peer->peer_id || 118 da_peer_id == HTT_INVALID_PEER) 119 return false; 120 121 da_peer = dp_txrx_peer_get_ref_by_id(soc, da_peer_id, 122 &txrx_ref_handle, DP_MOD_ID_RX); 123 if (!da_peer) 124 return false; 125 126 *p_tx_vdev_id = da_peer->vdev->vdev_id; 127 /* If the source or destination peer in the isolation 128 * list then dont forward instead push to bridge stack. 129 */ 130 if (dp_get_peer_isolation(ta_txrx_peer) || 131 dp_get_peer_isolation(da_peer) || 132 da_peer->vdev->vdev_id != ta_txrx_peer->vdev->vdev_id) { 133 dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_RX); 134 return false; 135 } 136 137 if (da_peer->bss_peer) { 138 dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_RX); 139 return false; 140 } 141 142 dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_RX); 143 return true; 144 } 145 146 /* 147 * dp_rx_intrabss_fwd_li() - Implements the Intra-BSS forwarding logic 148 * 149 * @soc: core txrx main context 150 * @ta_txrx_peer : source peer entry 151 * @rx_tlv_hdr : start address of rx tlvs 152 * @nbuf : nbuf that has to be intrabss forwarded 153 * 154 * Return: bool: true if it is forwarded else false 155 */ 156 static bool 157 dp_rx_intrabss_fwd_li(struct dp_soc *soc, 158 struct dp_txrx_peer *ta_txrx_peer, 159 uint8_t *rx_tlv_hdr, 160 qdf_nbuf_t nbuf, 161 struct hal_rx_msdu_metadata msdu_metadata, 162 struct cdp_tid_rx_stats *tid_stats) 163 { 164 uint8_t tx_vdev_id; 165 166 /* if it is a broadcast pkt (eg: ARP) and it is not its own 167 * source, then clone the pkt and send the cloned pkt for 168 * intra BSS forwarding and original pkt up the network stack 169 * Note: how do we handle multicast pkts. do we forward 170 * all multicast pkts as is or let a higher layer module 171 * like igmpsnoop decide whether to forward or not with 172 * Mcast enhancement. 173 */ 174 if (qdf_nbuf_is_da_mcbc(nbuf) && !ta_txrx_peer->bss_peer) 175 return dp_rx_intrabss_mcbc_fwd(soc, ta_txrx_peer, rx_tlv_hdr, 176 nbuf, tid_stats, 0); 177 178 if (dp_rx_intrabss_eapol_drop_check(soc, ta_txrx_peer, rx_tlv_hdr, 179 nbuf)) 180 return true; 181 182 if (dp_rx_intrabss_ucast_check_li(soc, nbuf, ta_txrx_peer, 183 &msdu_metadata, &tx_vdev_id)) 184 return dp_rx_intrabss_ucast_fwd(soc, ta_txrx_peer, tx_vdev_id, 185 rx_tlv_hdr, nbuf, tid_stats, 186 0); 187 188 return false; 189 } 190 #endif 191 192 uint32_t dp_rx_process_li(struct dp_intr *int_ctx, 193 hal_ring_handle_t hal_ring_hdl, uint8_t reo_ring_num, 194 uint32_t quota) 195 { 196 hal_ring_desc_t ring_desc; 197 hal_ring_desc_t last_prefetched_hw_desc; 198 hal_soc_handle_t hal_soc; 199 struct dp_rx_desc *rx_desc = NULL; 200 struct dp_rx_desc *last_prefetched_sw_desc = NULL; 201 qdf_nbuf_t nbuf, next; 202 bool near_full; 203 union dp_rx_desc_list_elem_t *head[MAX_PDEV_CNT]; 204 union dp_rx_desc_list_elem_t *tail[MAX_PDEV_CNT]; 205 uint32_t num_pending = 0; 206 uint32_t rx_bufs_used = 0, rx_buf_cookie; 207 uint16_t msdu_len = 0; 208 uint16_t peer_id; 209 uint8_t vdev_id; 210 struct dp_txrx_peer *txrx_peer; 211 dp_txrx_ref_handle txrx_ref_handle = NULL; 212 struct dp_vdev *vdev; 213 uint32_t pkt_len = 0; 214 struct hal_rx_mpdu_desc_info mpdu_desc_info; 215 struct hal_rx_msdu_desc_info msdu_desc_info; 216 enum hal_reo_error_status error; 217 uint32_t peer_mdata; 218 uint8_t *rx_tlv_hdr; 219 uint32_t rx_bufs_reaped[MAX_PDEV_CNT]; 220 uint8_t mac_id = 0; 221 struct dp_pdev *rx_pdev; 222 struct dp_srng *dp_rxdma_srng; 223 struct rx_desc_pool *rx_desc_pool; 224 struct dp_soc *soc = int_ctx->soc; 225 struct cdp_tid_rx_stats *tid_stats; 226 qdf_nbuf_t nbuf_head; 227 qdf_nbuf_t nbuf_tail; 228 qdf_nbuf_t deliver_list_head; 229 qdf_nbuf_t deliver_list_tail; 230 uint32_t num_rx_bufs_reaped = 0; 231 uint32_t intr_id; 232 struct hif_opaque_softc *scn; 233 int32_t tid = 0; 234 bool is_prev_msdu_last = true; 235 uint32_t rx_ol_pkt_cnt = 0; 236 uint32_t num_entries = 0; 237 struct hal_rx_msdu_metadata msdu_metadata; 238 QDF_STATUS status; 239 qdf_nbuf_t ebuf_head; 240 qdf_nbuf_t ebuf_tail; 241 uint8_t pkt_capture_offload = 0; 242 int max_reap_limit; 243 uint32_t old_tid; 244 uint32_t peer_ext_stats; 245 uint32_t dsf; 246 uint32_t max_ast; 247 uint64_t current_time = 0; 248 249 DP_HIST_INIT(); 250 251 qdf_assert_always(soc && hal_ring_hdl); 252 hal_soc = soc->hal_soc; 253 qdf_assert_always(hal_soc); 254 255 scn = soc->hif_handle; 256 intr_id = int_ctx->dp_intr_id; 257 num_entries = hal_srng_get_num_entries(hal_soc, hal_ring_hdl); 258 dp_runtime_pm_mark_last_busy(soc); 259 260 more_data: 261 /* reset local variables here to be re-used in the function */ 262 nbuf_head = NULL; 263 nbuf_tail = NULL; 264 deliver_list_head = NULL; 265 deliver_list_tail = NULL; 266 txrx_peer = NULL; 267 vdev = NULL; 268 num_rx_bufs_reaped = 0; 269 ebuf_head = NULL; 270 ebuf_tail = NULL; 271 max_reap_limit = dp_rx_get_loop_pkt_limit(soc); 272 273 qdf_mem_zero(rx_bufs_reaped, sizeof(rx_bufs_reaped)); 274 qdf_mem_zero(&mpdu_desc_info, sizeof(mpdu_desc_info)); 275 qdf_mem_zero(&msdu_desc_info, sizeof(msdu_desc_info)); 276 qdf_mem_zero(head, sizeof(head)); 277 qdf_mem_zero(tail, sizeof(tail)); 278 old_tid = 0xff; 279 dsf = 0; 280 peer_ext_stats = 0; 281 max_ast = 0; 282 rx_pdev = NULL; 283 tid_stats = NULL; 284 285 dp_pkt_get_timestamp(¤t_time); 286 287 if (qdf_unlikely(dp_rx_srng_access_start(int_ctx, soc, hal_ring_hdl))) { 288 /* 289 * Need API to convert from hal_ring pointer to 290 * Ring Type / Ring Id combo 291 */ 292 DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1); 293 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 294 FL("HAL RING Access Failed -- %pK"), hal_ring_hdl); 295 goto done; 296 } 297 298 if (!num_pending) 299 num_pending = hal_srng_dst_num_valid(hal_soc, hal_ring_hdl, 0); 300 301 dp_srng_dst_inv_cached_descs(soc, hal_ring_hdl, num_pending); 302 303 if (num_pending > quota) 304 num_pending = quota; 305 306 last_prefetched_hw_desc = dp_srng_dst_prefetch(hal_soc, hal_ring_hdl, 307 num_pending); 308 309 peer_ext_stats = wlan_cfg_is_peer_ext_stats_enabled(soc->wlan_cfg_ctx); 310 max_ast = wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx); 311 /* 312 * start reaping the buffers from reo ring and queue 313 * them in per vdev queue. 314 * Process the received pkts in a different per vdev loop. 315 */ 316 while (qdf_likely(num_pending)) { 317 ring_desc = dp_srng_dst_get_next(soc, hal_ring_hdl); 318 319 if (qdf_unlikely(!ring_desc)) 320 break; 321 322 error = HAL_RX_ERROR_STATUS_GET(ring_desc); 323 if (qdf_unlikely(error == HAL_REO_ERROR_DETECTED)) { 324 dp_rx_err("%pK: HAL RING 0x%pK:error %d", 325 soc, hal_ring_hdl, error); 326 DP_STATS_INC(soc, rx.err.hal_reo_error[reo_ring_num], 327 1); 328 /* Don't know how to deal with this -- assert */ 329 qdf_assert(0); 330 } 331 332 dp_rx_ring_record_entry(soc, reo_ring_num, ring_desc); 333 rx_buf_cookie = HAL_RX_REO_BUF_COOKIE_GET(ring_desc); 334 status = dp_rx_cookie_check_and_invalidate(ring_desc); 335 if (qdf_unlikely(QDF_IS_STATUS_ERROR(status))) { 336 DP_STATS_INC(soc, rx.err.stale_cookie, 1); 337 break; 338 } 339 340 rx_desc = dp_rx_cookie_2_va_rxdma_buf(soc, rx_buf_cookie); 341 status = dp_rx_desc_sanity(soc, hal_soc, hal_ring_hdl, 342 ring_desc, rx_desc); 343 if (QDF_IS_STATUS_ERROR(status)) { 344 if (qdf_unlikely(rx_desc && rx_desc->nbuf)) { 345 qdf_assert_always(!rx_desc->unmapped); 346 dp_rx_nbuf_unmap(soc, rx_desc, reo_ring_num); 347 rx_desc->unmapped = 1; 348 dp_rx_buffer_pool_nbuf_free(soc, rx_desc->nbuf, 349 rx_desc->pool_id); 350 dp_rx_add_to_free_desc_list( 351 &head[rx_desc->pool_id], 352 &tail[rx_desc->pool_id], 353 rx_desc); 354 } 355 continue; 356 } 357 358 /* 359 * this is a unlikely scenario where the host is reaping 360 * a descriptor which it already reaped just a while ago 361 * but is yet to replenish it back to HW. 362 * In this case host will dump the last 128 descriptors 363 * including the software descriptor rx_desc and assert. 364 */ 365 366 if (qdf_unlikely(!rx_desc->in_use)) { 367 DP_STATS_INC(soc, rx.err.hal_reo_dest_dup, 1); 368 dp_info_rl("Reaping rx_desc not in use!"); 369 dp_rx_dump_info_and_assert(soc, hal_ring_hdl, 370 ring_desc, rx_desc); 371 /* ignore duplicate RX desc and continue to process */ 372 /* Pop out the descriptor */ 373 continue; 374 } 375 376 status = dp_rx_desc_nbuf_sanity_check(soc, ring_desc, rx_desc); 377 if (qdf_unlikely(QDF_IS_STATUS_ERROR(status))) { 378 DP_STATS_INC(soc, rx.err.nbuf_sanity_fail, 1); 379 dp_info_rl("Nbuf sanity check failure!"); 380 dp_rx_dump_info_and_assert(soc, hal_ring_hdl, 381 ring_desc, rx_desc); 382 rx_desc->in_err_state = 1; 383 continue; 384 } 385 386 if (qdf_unlikely(!dp_rx_desc_check_magic(rx_desc))) { 387 dp_err("Invalid rx_desc cookie=%d", rx_buf_cookie); 388 DP_STATS_INC(soc, rx.err.rx_desc_invalid_magic, 1); 389 dp_rx_dump_info_and_assert(soc, hal_ring_hdl, 390 ring_desc, rx_desc); 391 } 392 393 /* Get MPDU DESC info */ 394 hal_rx_mpdu_desc_info_get_li(ring_desc, &mpdu_desc_info); 395 396 /* Get MSDU DESC info */ 397 hal_rx_msdu_desc_info_get_li(ring_desc, &msdu_desc_info); 398 399 if (qdf_unlikely(msdu_desc_info.msdu_flags & 400 HAL_MSDU_F_MSDU_CONTINUATION)) { 401 /* previous msdu has end bit set, so current one is 402 * the new MPDU 403 */ 404 if (is_prev_msdu_last) { 405 /* For new MPDU check if we can read complete 406 * MPDU by comparing the number of buffers 407 * available and number of buffers needed to 408 * reap this MPDU 409 */ 410 if ((msdu_desc_info.msdu_len / 411 (RX_DATA_BUFFER_SIZE - 412 soc->rx_pkt_tlv_size) + 1) > 413 num_pending) { 414 DP_STATS_INC(soc, 415 rx.msdu_scatter_wait_break, 416 1); 417 dp_rx_cookie_reset_invalid_bit( 418 ring_desc); 419 /* As we are going to break out of the 420 * loop because of unavailability of 421 * descs to form complete SG, we need to 422 * reset the TP in the REO destination 423 * ring. 424 */ 425 hal_srng_dst_dec_tp(hal_soc, 426 hal_ring_hdl); 427 break; 428 } 429 is_prev_msdu_last = false; 430 } 431 } 432 433 if (mpdu_desc_info.mpdu_flags & HAL_MPDU_F_RETRY_BIT) 434 qdf_nbuf_set_rx_retry_flag(rx_desc->nbuf, 1); 435 436 if (qdf_unlikely(mpdu_desc_info.mpdu_flags & 437 HAL_MPDU_F_RAW_AMPDU)) 438 qdf_nbuf_set_raw_frame(rx_desc->nbuf, 1); 439 440 if (!is_prev_msdu_last && 441 !(msdu_desc_info.msdu_flags & HAL_MSDU_F_MSDU_CONTINUATION)) 442 is_prev_msdu_last = true; 443 444 rx_bufs_reaped[rx_desc->pool_id]++; 445 peer_mdata = mpdu_desc_info.peer_meta_data; 446 QDF_NBUF_CB_RX_PEER_ID(rx_desc->nbuf) = 447 dp_rx_peer_metadata_peer_id_get_li(soc, peer_mdata); 448 QDF_NBUF_CB_RX_VDEV_ID(rx_desc->nbuf) = 449 DP_PEER_METADATA_VDEV_ID_GET_LI(peer_mdata); 450 451 /* to indicate whether this msdu is rx offload */ 452 pkt_capture_offload = 453 DP_PEER_METADATA_OFFLOAD_GET_LI(peer_mdata); 454 455 /* 456 * save msdu flags first, last and continuation msdu in 457 * nbuf->cb, also save mcbc, is_da_valid, is_sa_valid and 458 * length to nbuf->cb. This ensures the info required for 459 * per pkt processing is always in the same cache line. 460 * This helps in improving throughput for smaller pkt 461 * sizes. 462 */ 463 if (msdu_desc_info.msdu_flags & HAL_MSDU_F_FIRST_MSDU_IN_MPDU) 464 qdf_nbuf_set_rx_chfrag_start(rx_desc->nbuf, 1); 465 466 if (msdu_desc_info.msdu_flags & HAL_MSDU_F_MSDU_CONTINUATION) 467 qdf_nbuf_set_rx_chfrag_cont(rx_desc->nbuf, 1); 468 469 if (msdu_desc_info.msdu_flags & HAL_MSDU_F_LAST_MSDU_IN_MPDU) 470 qdf_nbuf_set_rx_chfrag_end(rx_desc->nbuf, 1); 471 472 if (msdu_desc_info.msdu_flags & HAL_MSDU_F_DA_IS_MCBC) 473 qdf_nbuf_set_da_mcbc(rx_desc->nbuf, 1); 474 475 if (msdu_desc_info.msdu_flags & HAL_MSDU_F_DA_IS_VALID) 476 qdf_nbuf_set_da_valid(rx_desc->nbuf, 1); 477 478 if (msdu_desc_info.msdu_flags & HAL_MSDU_F_SA_IS_VALID) 479 qdf_nbuf_set_sa_valid(rx_desc->nbuf, 1); 480 481 qdf_nbuf_set_tid_val(rx_desc->nbuf, 482 HAL_RX_REO_QUEUE_NUMBER_GET(ring_desc)); 483 484 /* set reo dest indication */ 485 qdf_nbuf_set_rx_reo_dest_ind_or_sw_excpt( 486 rx_desc->nbuf, 487 HAL_RX_REO_MSDU_REO_DST_IND_GET(ring_desc)); 488 489 QDF_NBUF_CB_RX_PKT_LEN(rx_desc->nbuf) = msdu_desc_info.msdu_len; 490 491 QDF_NBUF_CB_RX_CTX_ID(rx_desc->nbuf) = reo_ring_num; 492 493 /* 494 * move unmap after scattered msdu waiting break logic 495 * in case double skb unmap happened. 496 */ 497 dp_rx_nbuf_unmap(soc, rx_desc, reo_ring_num); 498 rx_desc->unmapped = 1; 499 DP_RX_PROCESS_NBUF(soc, nbuf_head, nbuf_tail, ebuf_head, 500 ebuf_tail, rx_desc); 501 502 quota -= 1; 503 num_pending -= 1; 504 505 dp_rx_add_to_free_desc_list(&head[rx_desc->pool_id], 506 &tail[rx_desc->pool_id], rx_desc); 507 num_rx_bufs_reaped++; 508 509 dp_rx_prefetch_hw_sw_nbuf_desc(soc, hal_soc, num_pending, 510 hal_ring_hdl, 511 &last_prefetched_hw_desc, 512 &last_prefetched_sw_desc); 513 514 /* 515 * only if complete msdu is received for scatter case, 516 * then allow break. 517 */ 518 if (is_prev_msdu_last && 519 dp_rx_reap_loop_pkt_limit_hit(soc, num_rx_bufs_reaped, 520 max_reap_limit)) 521 break; 522 } 523 done: 524 dp_rx_srng_access_end(int_ctx, soc, hal_ring_hdl); 525 526 dp_rx_per_core_stats_update(soc, reo_ring_num, num_rx_bufs_reaped); 527 528 for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) { 529 /* 530 * continue with next mac_id if no pkts were reaped 531 * from that pool 532 */ 533 if (!rx_bufs_reaped[mac_id]) 534 continue; 535 536 dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_id]; 537 538 rx_desc_pool = &soc->rx_desc_buf[mac_id]; 539 540 dp_rx_buffers_replenish_simple(soc, mac_id, dp_rxdma_srng, 541 rx_desc_pool, 542 rx_bufs_reaped[mac_id], 543 &head[mac_id], &tail[mac_id]); 544 } 545 546 dp_verbose_debug("replenished %u\n", rx_bufs_reaped[0]); 547 /* Peer can be NULL is case of LFR */ 548 if (qdf_likely(txrx_peer)) 549 vdev = NULL; 550 551 /* 552 * BIG loop where each nbuf is dequeued from global queue, 553 * processed and queued back on a per vdev basis. These nbufs 554 * are sent to stack as and when we run out of nbufs 555 * or a new nbuf dequeued from global queue has a different 556 * vdev when compared to previous nbuf. 557 */ 558 nbuf = nbuf_head; 559 while (nbuf) { 560 next = nbuf->next; 561 dp_rx_prefetch_nbuf_data(nbuf, next); 562 563 if (qdf_unlikely(dp_rx_is_raw_frame_dropped(nbuf))) { 564 nbuf = next; 565 DP_STATS_INC(soc, rx.err.raw_frm_drop, 1); 566 continue; 567 } 568 569 rx_tlv_hdr = qdf_nbuf_data(nbuf); 570 vdev_id = QDF_NBUF_CB_RX_VDEV_ID(nbuf); 571 peer_id = QDF_NBUF_CB_RX_PEER_ID(nbuf); 572 573 if (dp_rx_is_list_ready(deliver_list_head, vdev, txrx_peer, 574 peer_id, vdev_id)) { 575 dp_rx_deliver_to_stack(soc, vdev, txrx_peer, 576 deliver_list_head, 577 deliver_list_tail); 578 deliver_list_head = NULL; 579 deliver_list_tail = NULL; 580 } 581 582 /* Get TID from struct cb->tid_val, save to tid */ 583 if (qdf_nbuf_is_rx_chfrag_start(nbuf)) 584 tid = qdf_nbuf_get_tid_val(nbuf); 585 586 if (qdf_unlikely(tid >= CDP_MAX_DATA_TIDS)) { 587 DP_STATS_INC(soc, rx.err.rx_invalid_tid_err, 1); 588 dp_rx_nbuf_free(nbuf); 589 nbuf = next; 590 continue; 591 } 592 593 if (qdf_unlikely(!txrx_peer)) { 594 txrx_peer = 595 dp_rx_get_txrx_peer_and_vdev(soc, nbuf, peer_id, 596 &txrx_ref_handle, 597 pkt_capture_offload, 598 &vdev, 599 &rx_pdev, &dsf, 600 &old_tid); 601 if (qdf_unlikely(!txrx_peer) || qdf_unlikely(!vdev)) { 602 nbuf = next; 603 continue; 604 } 605 } else if (txrx_peer && txrx_peer->peer_id != peer_id) { 606 dp_txrx_peer_unref_delete(txrx_ref_handle, 607 DP_MOD_ID_RX); 608 609 txrx_peer = 610 dp_rx_get_txrx_peer_and_vdev(soc, nbuf, peer_id, 611 &txrx_ref_handle, 612 pkt_capture_offload, 613 &vdev, 614 &rx_pdev, &dsf, 615 &old_tid); 616 if (qdf_unlikely(!txrx_peer) || qdf_unlikely(!vdev)) { 617 nbuf = next; 618 continue; 619 } 620 } 621 622 if (txrx_peer) { 623 QDF_NBUF_CB_DP_TRACE_PRINT(nbuf) = false; 624 qdf_dp_trace_set_track(nbuf, QDF_RX); 625 QDF_NBUF_CB_RX_DP_TRACE(nbuf) = 1; 626 QDF_NBUF_CB_RX_PACKET_TRACK(nbuf) = 627 QDF_NBUF_RX_PKT_DATA_TRACK; 628 } 629 630 rx_bufs_used++; 631 632 /* when hlos tid override is enabled, save tid in 633 * skb->priority 634 */ 635 if (qdf_unlikely(vdev->skip_sw_tid_classification & 636 DP_TXRX_HLOS_TID_OVERRIDE_ENABLED)) 637 qdf_nbuf_set_priority(nbuf, tid); 638 639 DP_RX_TID_SAVE(nbuf, tid); 640 if (qdf_unlikely(dsf) || qdf_unlikely(peer_ext_stats) || 641 dp_rx_pkt_tracepoints_enabled()) 642 qdf_nbuf_set_timestamp(nbuf); 643 644 if (qdf_likely(old_tid != tid)) { 645 tid_stats = 646 &rx_pdev->stats.tid_stats.tid_rx_stats[reo_ring_num][tid]; 647 old_tid = tid; 648 } 649 650 /* 651 * Check if DMA completed -- msdu_done is the last bit 652 * to be written 653 */ 654 if (qdf_likely(!qdf_nbuf_is_rx_chfrag_cont(nbuf))) { 655 if (qdf_unlikely(!hal_rx_attn_msdu_done_get_li( 656 rx_tlv_hdr))) { 657 dp_err_rl("MSDU DONE failure"); 658 DP_STATS_INC(soc, rx.err.msdu_done_fail, 1); 659 hal_rx_dump_pkt_tlvs(hal_soc, rx_tlv_hdr, 660 QDF_TRACE_LEVEL_INFO); 661 tid_stats->fail_cnt[MSDU_DONE_FAILURE]++; 662 qdf_assert(0); 663 dp_rx_nbuf_free(nbuf); 664 nbuf = next; 665 continue; 666 } else if (qdf_unlikely(hal_rx_attn_msdu_len_err_get_li( 667 rx_tlv_hdr))) { 668 DP_STATS_INC(soc, rx.err.msdu_len_err, 1); 669 dp_rx_nbuf_free(nbuf); 670 nbuf = next; 671 continue; 672 } 673 } 674 675 DP_HIST_PACKET_COUNT_INC(vdev->pdev->pdev_id); 676 /* 677 * First IF condition: 678 * 802.11 Fragmented pkts are reinjected to REO 679 * HW block as SG pkts and for these pkts we only 680 * need to pull the RX TLVS header length. 681 * Second IF condition: 682 * The below condition happens when an MSDU is spread 683 * across multiple buffers. This can happen in two cases 684 * 1. The nbuf size is smaller then the received msdu. 685 * ex: we have set the nbuf size to 2048 during 686 * nbuf_alloc. but we received an msdu which is 687 * 2304 bytes in size then this msdu is spread 688 * across 2 nbufs. 689 * 690 * 2. AMSDUs when RAW mode is enabled. 691 * ex: 1st MSDU is in 1st nbuf and 2nd MSDU is spread 692 * across 1st nbuf and 2nd nbuf and last MSDU is 693 * spread across 2nd nbuf and 3rd nbuf. 694 * 695 * for these scenarios let us create a skb frag_list and 696 * append these buffers till the last MSDU of the AMSDU 697 * Third condition: 698 * This is the most likely case, we receive 802.3 pkts 699 * decapsulated by HW, here we need to set the pkt length. 700 */ 701 hal_rx_msdu_metadata_get(hal_soc, rx_tlv_hdr, &msdu_metadata); 702 if (qdf_unlikely(qdf_nbuf_is_frag(nbuf))) { 703 bool is_mcbc, is_sa_vld, is_da_vld; 704 705 is_mcbc = hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc, 706 rx_tlv_hdr); 707 is_sa_vld = 708 hal_rx_msdu_end_sa_is_valid_get(soc->hal_soc, 709 rx_tlv_hdr); 710 is_da_vld = 711 hal_rx_msdu_end_da_is_valid_get(soc->hal_soc, 712 rx_tlv_hdr); 713 714 qdf_nbuf_set_da_mcbc(nbuf, is_mcbc); 715 qdf_nbuf_set_da_valid(nbuf, is_da_vld); 716 qdf_nbuf_set_sa_valid(nbuf, is_sa_vld); 717 718 qdf_nbuf_pull_head(nbuf, soc->rx_pkt_tlv_size); 719 } else if (qdf_nbuf_is_rx_chfrag_cont(nbuf)) { 720 msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf); 721 nbuf = dp_rx_sg_create(soc, nbuf); 722 next = nbuf->next; 723 724 if (qdf_nbuf_is_raw_frame(nbuf)) { 725 DP_STATS_INC(vdev->pdev, rx_raw_pkts, 1); 726 DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, 727 rx.raw, 1, 728 msdu_len, 729 0); 730 } else { 731 DP_STATS_INC(soc, rx.err.scatter_msdu, 1); 732 733 if (!dp_rx_is_sg_supported()) { 734 dp_rx_nbuf_free(nbuf); 735 dp_info_rl("sg msdu len %d, dropped", 736 msdu_len); 737 nbuf = next; 738 continue; 739 } 740 } 741 } else { 742 msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf); 743 pkt_len = msdu_len + 744 msdu_metadata.l3_hdr_pad + 745 soc->rx_pkt_tlv_size; 746 747 qdf_nbuf_set_pktlen(nbuf, pkt_len); 748 dp_rx_skip_tlvs(soc, nbuf, msdu_metadata.l3_hdr_pad); 749 } 750 751 dp_rx_send_pktlog(soc, rx_pdev, nbuf, QDF_TX_RX_STATUS_OK); 752 753 /* 754 * process frame for mulitpass phrase processing 755 */ 756 if (qdf_unlikely(vdev->multipass_en)) { 757 if (dp_rx_multipass_process(txrx_peer, nbuf, 758 tid) == false) { 759 DP_PEER_PER_PKT_STATS_INC(txrx_peer, 760 rx.multipass_rx_pkt_drop, 761 1, 0); 762 dp_rx_nbuf_free(nbuf); 763 nbuf = next; 764 continue; 765 } 766 } 767 768 if (!dp_wds_rx_policy_check(rx_tlv_hdr, vdev, txrx_peer)) { 769 dp_rx_err("%pK: Policy Check Drop pkt", soc); 770 DP_PEER_PER_PKT_STATS_INC(txrx_peer, 771 rx.policy_check_drop, 772 1, 0); 773 tid_stats->fail_cnt[POLICY_CHECK_DROP]++; 774 /* Drop & free packet */ 775 dp_rx_nbuf_free(nbuf); 776 /* Statistics */ 777 nbuf = next; 778 continue; 779 } 780 781 if (qdf_unlikely(txrx_peer && (txrx_peer->nawds_enabled) && 782 (qdf_nbuf_is_da_mcbc(nbuf)) && 783 (hal_rx_get_mpdu_mac_ad4_valid(soc->hal_soc, 784 rx_tlv_hdr) == 785 false))) { 786 tid_stats->fail_cnt[NAWDS_MCAST_DROP]++; 787 DP_PEER_PER_PKT_STATS_INC(txrx_peer, 788 rx.nawds_mcast_drop, 789 1, 0); 790 dp_rx_nbuf_free(nbuf); 791 nbuf = next; 792 continue; 793 } 794 795 /* 796 * Drop non-EAPOL frames from unauthorized peer. 797 */ 798 if (qdf_likely(txrx_peer) && 799 qdf_unlikely(!txrx_peer->authorize) && 800 !qdf_nbuf_is_raw_frame(nbuf)) { 801 bool is_eapol = qdf_nbuf_is_ipv4_eapol_pkt(nbuf) || 802 qdf_nbuf_is_ipv4_wapi_pkt(nbuf); 803 804 if (!is_eapol) { 805 DP_PEER_PER_PKT_STATS_INC(txrx_peer, 806 rx.peer_unauth_rx_pkt_drop, 807 1, 0); 808 dp_rx_nbuf_free(nbuf); 809 nbuf = next; 810 continue; 811 } 812 } 813 814 if (soc->process_rx_status) 815 dp_rx_cksum_offload(vdev->pdev, nbuf, rx_tlv_hdr); 816 817 /* Update the protocol tag in SKB based on CCE metadata */ 818 dp_rx_update_protocol_tag(soc, vdev, nbuf, rx_tlv_hdr, 819 reo_ring_num, false, true); 820 821 /* Update the flow tag in SKB based on FSE metadata */ 822 dp_rx_update_flow_tag(soc, vdev, nbuf, rx_tlv_hdr, true); 823 824 dp_rx_msdu_stats_update(soc, nbuf, rx_tlv_hdr, txrx_peer, 825 reo_ring_num, tid_stats, 0); 826 827 if (qdf_unlikely(vdev->mesh_vdev)) { 828 if (dp_rx_filter_mesh_packets(vdev, nbuf, rx_tlv_hdr) 829 == QDF_STATUS_SUCCESS) { 830 dp_rx_info("%pK: mesh pkt filtered", soc); 831 tid_stats->fail_cnt[MESH_FILTER_DROP]++; 832 DP_STATS_INC(vdev->pdev, dropped.mesh_filter, 833 1); 834 835 dp_rx_nbuf_free(nbuf); 836 nbuf = next; 837 continue; 838 } 839 dp_rx_fill_mesh_stats(vdev, nbuf, rx_tlv_hdr, 840 txrx_peer); 841 } 842 843 if (qdf_likely(vdev->rx_decap_type == 844 htt_cmn_pkt_type_ethernet) && 845 qdf_likely(!vdev->mesh_vdev)) { 846 /* Due to HW issue, sometimes we see that the sa_idx 847 * and da_idx are invalid with sa_valid and da_valid 848 * bits set 849 * 850 * in this case we also see that value of 851 * sa_sw_peer_id is set as 0 852 * 853 * Drop the packet if sa_idx and da_idx OOB or 854 * sa_sw_peerid is 0 855 */ 856 if (!is_sa_da_idx_valid(max_ast, nbuf, 857 msdu_metadata)) { 858 dp_rx_nbuf_free(nbuf); 859 nbuf = next; 860 DP_STATS_INC(soc, rx.err.invalid_sa_da_idx, 1); 861 continue; 862 } 863 if (qdf_unlikely(dp_rx_mec_check_wrapper(soc, 864 txrx_peer, 865 rx_tlv_hdr, 866 nbuf))) { 867 /* this is a looped back MCBC pkt,drop it */ 868 DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, 869 rx.mec_drop, 1, 870 QDF_NBUF_CB_RX_PKT_LEN(nbuf), 871 0); 872 dp_rx_nbuf_free(nbuf); 873 nbuf = next; 874 continue; 875 } 876 /* WDS Source Port Learning */ 877 if (qdf_likely(vdev->wds_enabled)) 878 dp_rx_wds_srcport_learn(soc, 879 rx_tlv_hdr, 880 txrx_peer, 881 nbuf, 882 msdu_metadata); 883 884 /* Intrabss-fwd */ 885 if (dp_rx_check_ap_bridge(vdev)) 886 if (dp_rx_intrabss_fwd_li(soc, txrx_peer, 887 rx_tlv_hdr, 888 nbuf, 889 msdu_metadata, 890 tid_stats)) { 891 nbuf = next; 892 tid_stats->intrabss_cnt++; 893 continue; /* Get next desc */ 894 } 895 } 896 897 dp_rx_fill_gro_info(soc, rx_tlv_hdr, nbuf, &rx_ol_pkt_cnt); 898 899 dp_rx_mark_first_packet_after_wow_wakeup(vdev->pdev, rx_tlv_hdr, 900 nbuf); 901 902 dp_rx_update_stats(soc, nbuf); 903 904 dp_pkt_add_timestamp(txrx_peer->vdev, QDF_PKT_RX_DRIVER_ENTRY, 905 current_time, nbuf); 906 907 DP_RX_LIST_APPEND(deliver_list_head, 908 deliver_list_tail, 909 nbuf); 910 DP_PEER_STATS_FLAT_INC_PKT(txrx_peer, to_stack, 1, 911 QDF_NBUF_CB_RX_PKT_LEN(nbuf)); 912 if (qdf_unlikely(txrx_peer->in_twt)) 913 DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, 914 rx.to_stack_twt, 1, 915 QDF_NBUF_CB_RX_PKT_LEN(nbuf), 916 0); 917 918 tid_stats->delivered_to_stack++; 919 nbuf = next; 920 } 921 922 DP_RX_DELIVER_TO_STACK(soc, vdev, txrx_peer, peer_id, 923 pkt_capture_offload, 924 deliver_list_head, 925 deliver_list_tail); 926 927 if (qdf_likely(txrx_peer)) 928 dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_RX); 929 930 if (dp_rx_enable_eol_data_check(soc) && rx_bufs_used) { 931 if (quota) { 932 num_pending = 933 dp_rx_srng_get_num_pending(hal_soc, 934 hal_ring_hdl, 935 num_entries, 936 &near_full); 937 if (num_pending) { 938 DP_STATS_INC(soc, rx.hp_oos2, 1); 939 940 if (!hif_exec_should_yield(scn, intr_id)) 941 goto more_data; 942 943 if (qdf_unlikely(near_full)) { 944 DP_STATS_INC(soc, rx.near_full, 1); 945 goto more_data; 946 } 947 } 948 } 949 950 if (vdev && vdev->osif_fisa_flush) 951 vdev->osif_fisa_flush(soc, reo_ring_num); 952 953 if (vdev && vdev->osif_gro_flush && rx_ol_pkt_cnt) { 954 vdev->osif_gro_flush(vdev->osif_vdev, 955 reo_ring_num); 956 } 957 } 958 959 /* Update histogram statistics by looping through pdev's */ 960 DP_RX_HIST_STATS_PER_PDEV(); 961 962 return rx_bufs_used; /* Assume no scale factor for now */ 963 } 964 965 QDF_STATUS dp_rx_desc_pool_init_li(struct dp_soc *soc, 966 struct rx_desc_pool *rx_desc_pool, 967 uint32_t pool_id) 968 { 969 return dp_rx_desc_pool_init_generic(soc, rx_desc_pool, pool_id); 970 971 } 972 973 void dp_rx_desc_pool_deinit_li(struct dp_soc *soc, 974 struct rx_desc_pool *rx_desc_pool, 975 uint32_t pool_id) 976 { 977 } 978 979 QDF_STATUS dp_wbm_get_rx_desc_from_hal_desc_li( 980 struct dp_soc *soc, 981 void *ring_desc, 982 struct dp_rx_desc **r_rx_desc) 983 { 984 struct hal_buf_info buf_info = {0}; 985 hal_soc_handle_t hal_soc = soc->hal_soc; 986 987 /* only cookie and rbm will be valid in buf_info */ 988 hal_rx_buf_cookie_rbm_get(hal_soc, (uint32_t *)ring_desc, 989 &buf_info); 990 991 if (qdf_unlikely(buf_info.rbm != 992 HAL_RX_BUF_RBM_SW3_BM(soc->wbm_sw0_bm_id))) { 993 /* TODO */ 994 /* Call appropriate handler */ 995 DP_STATS_INC(soc, rx.err.invalid_rbm, 1); 996 dp_rx_err("%pK: Invalid RBM %d", soc, buf_info.rbm); 997 return QDF_STATUS_E_INVAL; 998 } 999 1000 if (!dp_rx_is_sw_cookie_valid(soc, buf_info.sw_cookie)) { 1001 dp_rx_err("invalid sw_cookie 0x%x", buf_info.sw_cookie); 1002 return QDF_STATUS_E_INVAL; 1003 } 1004 1005 *r_rx_desc = dp_rx_cookie_2_va_rxdma_buf(soc, buf_info.sw_cookie); 1006 1007 return QDF_STATUS_SUCCESS; 1008 } 1009 1010 bool dp_rx_chain_msdus_li(struct dp_soc *soc, qdf_nbuf_t nbuf, 1011 uint8_t *rx_tlv_hdr, uint8_t mac_id) 1012 { 1013 bool mpdu_done = false; 1014 qdf_nbuf_t curr_nbuf = NULL; 1015 qdf_nbuf_t tmp_nbuf = NULL; 1016 1017 /* TODO: Currently only single radio is supported, hence 1018 * pdev hard coded to '0' index 1019 */ 1020 struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id); 1021 1022 if (!dp_pdev) { 1023 dp_rx_debug("%pK: pdev is null for mac_id = %d", soc, mac_id); 1024 return mpdu_done; 1025 } 1026 /* if invalid peer SG list has max values free the buffers in list 1027 * and treat current buffer as start of list 1028 * 1029 * current logic to detect the last buffer from attn_tlv is not reliable 1030 * in OFDMA UL scenario hence add max buffers check to avoid list pile 1031 * up 1032 */ 1033 if (!dp_pdev->first_nbuf || 1034 (dp_pdev->invalid_peer_head_msdu && 1035 QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST 1036 (dp_pdev->invalid_peer_head_msdu) >= DP_MAX_INVALID_BUFFERS)) { 1037 qdf_nbuf_set_rx_chfrag_start(nbuf, 1); 1038 dp_pdev->ppdu_id = hal_rx_get_ppdu_id(soc->hal_soc, 1039 rx_tlv_hdr); 1040 dp_pdev->first_nbuf = true; 1041 1042 /* If the new nbuf received is the first msdu of the 1043 * amsdu and there are msdus in the invalid peer msdu 1044 * list, then let us free all the msdus of the invalid 1045 * peer msdu list. 1046 * This scenario can happen when we start receiving 1047 * new a-msdu even before the previous a-msdu is completely 1048 * received. 1049 */ 1050 curr_nbuf = dp_pdev->invalid_peer_head_msdu; 1051 while (curr_nbuf) { 1052 tmp_nbuf = curr_nbuf->next; 1053 dp_rx_nbuf_free(curr_nbuf); 1054 curr_nbuf = tmp_nbuf; 1055 } 1056 1057 dp_pdev->invalid_peer_head_msdu = NULL; 1058 dp_pdev->invalid_peer_tail_msdu = NULL; 1059 1060 dp_monitor_get_mpdu_status(dp_pdev, soc, rx_tlv_hdr); 1061 } 1062 1063 if (dp_pdev->ppdu_id == hal_rx_attn_phy_ppdu_id_get(soc->hal_soc, 1064 rx_tlv_hdr) && 1065 hal_rx_attn_msdu_done_get(soc->hal_soc, rx_tlv_hdr)) { 1066 qdf_nbuf_set_rx_chfrag_end(nbuf, 1); 1067 qdf_assert_always(dp_pdev->first_nbuf); 1068 dp_pdev->first_nbuf = false; 1069 mpdu_done = true; 1070 } 1071 1072 /* 1073 * For MCL, invalid_peer_head_msdu and invalid_peer_tail_msdu 1074 * should be NULL here, add the checking for debugging purpose 1075 * in case some corner case. 1076 */ 1077 DP_PDEV_INVALID_PEER_MSDU_CHECK(dp_pdev->invalid_peer_head_msdu, 1078 dp_pdev->invalid_peer_tail_msdu); 1079 DP_RX_LIST_APPEND(dp_pdev->invalid_peer_head_msdu, 1080 dp_pdev->invalid_peer_tail_msdu, 1081 nbuf); 1082 1083 return mpdu_done; 1084 } 1085 1086 qdf_nbuf_t 1087 dp_rx_wbm_err_reap_desc_li(struct dp_intr *int_ctx, struct dp_soc *soc, 1088 hal_ring_handle_t hal_ring_hdl, uint32_t quota, 1089 uint32_t *rx_bufs_used) 1090 { 1091 hal_ring_desc_t ring_desc; 1092 hal_soc_handle_t hal_soc; 1093 struct dp_rx_desc *rx_desc; 1094 union dp_rx_desc_list_elem_t 1095 *head[WLAN_MAX_MLO_CHIPS][MAX_PDEV_CNT] = { { NULL } }; 1096 union dp_rx_desc_list_elem_t 1097 *tail[WLAN_MAX_MLO_CHIPS][MAX_PDEV_CNT] = { { NULL } }; 1098 uint32_t rx_bufs_reaped[WLAN_MAX_MLO_CHIPS][MAX_PDEV_CNT] = { { 0 } }; 1099 uint8_t buf_type; 1100 uint8_t mac_id; 1101 struct dp_srng *dp_rxdma_srng; 1102 struct rx_desc_pool *rx_desc_pool; 1103 qdf_nbuf_t nbuf_head = NULL; 1104 qdf_nbuf_t nbuf_tail = NULL; 1105 qdf_nbuf_t nbuf; 1106 struct hal_wbm_err_desc_info wbm_err_info = { 0 }; 1107 uint8_t msdu_continuation = 0; 1108 bool process_sg_buf = false; 1109 uint32_t wbm_err_src; 1110 QDF_STATUS status; 1111 struct dp_soc *replenish_soc; 1112 uint8_t chip_id; 1113 struct hal_rx_mpdu_desc_info mpdu_desc_info = { 0 }; 1114 1115 qdf_assert(soc && hal_ring_hdl); 1116 hal_soc = soc->hal_soc; 1117 qdf_assert(hal_soc); 1118 1119 if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, hal_ring_hdl))) { 1120 /* TODO */ 1121 /* 1122 * Need API to convert from hal_ring pointer to 1123 * Ring Type / Ring Id combo 1124 */ 1125 dp_rx_err_err("%pK: HAL RING Access Failed -- %pK", 1126 soc, hal_ring_hdl); 1127 goto done; 1128 } 1129 1130 while (qdf_likely(quota)) { 1131 ring_desc = hal_srng_dst_get_next(hal_soc, hal_ring_hdl); 1132 if (qdf_unlikely(!ring_desc)) 1133 break; 1134 1135 /* XXX */ 1136 buf_type = HAL_RX_WBM_BUF_TYPE_GET(ring_desc); 1137 1138 /* 1139 * For WBM ring, expect only MSDU buffers 1140 */ 1141 qdf_assert_always(buf_type == HAL_RX_WBM_BUF_TYPE_REL_BUF); 1142 1143 wbm_err_src = hal_rx_wbm_err_src_get(hal_soc, ring_desc); 1144 qdf_assert((wbm_err_src == HAL_RX_WBM_ERR_SRC_RXDMA) || 1145 (wbm_err_src == HAL_RX_WBM_ERR_SRC_REO)); 1146 1147 if (soc->arch_ops.dp_wbm_get_rx_desc_from_hal_desc(soc, 1148 ring_desc, 1149 &rx_desc)) { 1150 dp_rx_err_err("get rx desc from hal_desc failed"); 1151 continue; 1152 } 1153 1154 qdf_assert_always(rx_desc); 1155 1156 if (!dp_rx_desc_check_magic(rx_desc)) { 1157 dp_rx_err_err("%pk: Invalid rx_desc %pk", 1158 soc, rx_desc); 1159 continue; 1160 } 1161 1162 /* 1163 * this is a unlikely scenario where the host is reaping 1164 * a descriptor which it already reaped just a while ago 1165 * but is yet to replenish it back to HW. 1166 * In this case host will dump the last 128 descriptors 1167 * including the software descriptor rx_desc and assert. 1168 */ 1169 if (qdf_unlikely(!rx_desc->in_use)) { 1170 DP_STATS_INC(soc, rx.err.hal_wbm_rel_dup, 1); 1171 dp_rx_dump_info_and_assert(soc, hal_ring_hdl, 1172 ring_desc, rx_desc); 1173 continue; 1174 } 1175 1176 hal_rx_wbm_err_info_get(ring_desc, &wbm_err_info, hal_soc); 1177 nbuf = rx_desc->nbuf; 1178 1179 status = dp_rx_wbm_desc_nbuf_sanity_check(soc, hal_ring_hdl, 1180 ring_desc, rx_desc); 1181 if (qdf_unlikely(QDF_IS_STATUS_ERROR(status))) { 1182 DP_STATS_INC(soc, rx.err.nbuf_sanity_fail, 1); 1183 dp_info_rl("Rx error Nbuf %pk sanity check failure!", 1184 nbuf); 1185 rx_desc->in_err_state = 1; 1186 rx_desc->unmapped = 1; 1187 rx_bufs_reaped[rx_desc->chip_id][rx_desc->pool_id]++; 1188 1189 dp_rx_add_to_free_desc_list( 1190 &head[rx_desc->chip_id][rx_desc->pool_id], 1191 &tail[rx_desc->chip_id][rx_desc->pool_id], 1192 rx_desc); 1193 continue; 1194 } 1195 1196 /* Get MPDU DESC info */ 1197 hal_rx_mpdu_desc_info_get(hal_soc, ring_desc, &mpdu_desc_info); 1198 1199 if (qdf_likely(mpdu_desc_info.mpdu_flags & 1200 HAL_MPDU_F_QOS_CONTROL_VALID)) 1201 qdf_nbuf_set_tid_val(rx_desc->nbuf, mpdu_desc_info.tid); 1202 1203 rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id]; 1204 dp_ipa_rx_buf_smmu_mapping_lock(soc); 1205 dp_rx_nbuf_unmap_pool(soc, rx_desc_pool, nbuf); 1206 rx_desc->unmapped = 1; 1207 dp_ipa_rx_buf_smmu_mapping_unlock(soc); 1208 1209 if (qdf_unlikely( 1210 soc->wbm_release_desc_rx_sg_support && 1211 dp_rx_is_sg_formation_required(&wbm_err_info))) { 1212 /* SG is detected from continuation bit */ 1213 msdu_continuation = 1214 hal_rx_wbm_err_msdu_continuation_get(hal_soc, 1215 ring_desc); 1216 if (msdu_continuation && 1217 !(soc->wbm_sg_param.wbm_is_first_msdu_in_sg)) { 1218 /* Update length from first buffer in SG */ 1219 soc->wbm_sg_param.wbm_sg_desc_msdu_len = 1220 hal_rx_msdu_start_msdu_len_get( 1221 soc->hal_soc, 1222 qdf_nbuf_data(nbuf)); 1223 soc->wbm_sg_param.wbm_is_first_msdu_in_sg = 1224 true; 1225 } 1226 1227 if (msdu_continuation) { 1228 /* MSDU continued packets */ 1229 qdf_nbuf_set_rx_chfrag_cont(nbuf, 1); 1230 QDF_NBUF_CB_RX_PKT_LEN(nbuf) = 1231 soc->wbm_sg_param.wbm_sg_desc_msdu_len; 1232 } else { 1233 /* This is the terminal packet in SG */ 1234 qdf_nbuf_set_rx_chfrag_start(nbuf, 1); 1235 qdf_nbuf_set_rx_chfrag_end(nbuf, 1); 1236 QDF_NBUF_CB_RX_PKT_LEN(nbuf) = 1237 soc->wbm_sg_param.wbm_sg_desc_msdu_len; 1238 process_sg_buf = true; 1239 } 1240 } 1241 1242 /* 1243 * save the wbm desc info in nbuf TLV. We will need this 1244 * info when we do the actual nbuf processing 1245 */ 1246 wbm_err_info.pool_id = rx_desc->pool_id; 1247 dp_rx_set_err_info(soc, nbuf, wbm_err_info); 1248 1249 rx_bufs_reaped[rx_desc->chip_id][rx_desc->pool_id]++; 1250 1251 if (qdf_nbuf_is_rx_chfrag_cont(nbuf) || process_sg_buf) { 1252 DP_RX_LIST_APPEND(soc->wbm_sg_param.wbm_sg_nbuf_head, 1253 soc->wbm_sg_param.wbm_sg_nbuf_tail, 1254 nbuf); 1255 if (process_sg_buf) { 1256 if (!dp_rx_buffer_pool_refill( 1257 soc, 1258 soc->wbm_sg_param.wbm_sg_nbuf_head, 1259 rx_desc->pool_id)) 1260 DP_RX_MERGE_TWO_LIST( 1261 nbuf_head, nbuf_tail, 1262 soc->wbm_sg_param.wbm_sg_nbuf_head, 1263 soc->wbm_sg_param.wbm_sg_nbuf_tail); 1264 dp_rx_wbm_sg_list_last_msdu_war(soc); 1265 dp_rx_wbm_sg_list_reset(soc); 1266 process_sg_buf = false; 1267 } 1268 } else if (!dp_rx_buffer_pool_refill(soc, nbuf, 1269 rx_desc->pool_id)) { 1270 DP_RX_LIST_APPEND(nbuf_head, nbuf_tail, nbuf); 1271 } 1272 1273 dp_rx_add_to_free_desc_list 1274 (&head[rx_desc->chip_id][rx_desc->pool_id], 1275 &tail[rx_desc->chip_id][rx_desc->pool_id], rx_desc); 1276 1277 /* 1278 * if continuation bit is set then we have MSDU spread 1279 * across multiple buffers, let us not decrement quota 1280 * till we reap all buffers of that MSDU. 1281 */ 1282 if (qdf_likely(!msdu_continuation)) 1283 quota -= 1; 1284 } 1285 done: 1286 dp_srng_access_end(int_ctx, soc, hal_ring_hdl); 1287 1288 for (chip_id = 0; chip_id < WLAN_MAX_MLO_CHIPS; chip_id++) { 1289 for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) { 1290 /* 1291 * continue with next mac_id if no pkts were reaped 1292 * from that pool 1293 */ 1294 if (!rx_bufs_reaped[chip_id][mac_id]) 1295 continue; 1296 1297 replenish_soc = 1298 soc->arch_ops.dp_rx_replenish_soc_get(soc, chip_id); 1299 1300 dp_rxdma_srng = 1301 &replenish_soc->rx_refill_buf_ring[mac_id]; 1302 1303 rx_desc_pool = &replenish_soc->rx_desc_buf[mac_id]; 1304 1305 dp_rx_buffers_replenish_simple( 1306 replenish_soc, mac_id, 1307 dp_rxdma_srng, 1308 rx_desc_pool, 1309 rx_bufs_reaped[chip_id][mac_id], 1310 &head[chip_id][mac_id], 1311 &tail[chip_id][mac_id]); 1312 *rx_bufs_used += rx_bufs_reaped[chip_id][mac_id]; 1313 } 1314 } 1315 return nbuf_head; 1316 } 1317 1318 QDF_STATUS 1319 dp_rx_null_q_desc_handle_li(struct dp_soc *soc, qdf_nbuf_t nbuf, 1320 uint8_t *rx_tlv_hdr, uint8_t pool_id, 1321 struct dp_txrx_peer *txrx_peer, 1322 bool is_reo_exception, 1323 uint8_t link_id) 1324 { 1325 uint32_t pkt_len; 1326 uint16_t msdu_len; 1327 struct dp_vdev *vdev; 1328 uint8_t tid; 1329 qdf_ether_header_t *eh; 1330 struct hal_rx_msdu_metadata msdu_metadata; 1331 uint16_t sa_idx = 0; 1332 bool is_eapol = 0; 1333 bool enh_flag; 1334 1335 qdf_nbuf_set_rx_chfrag_start( 1336 nbuf, 1337 hal_rx_msdu_end_first_msdu_get(soc->hal_soc, 1338 rx_tlv_hdr)); 1339 qdf_nbuf_set_rx_chfrag_end(nbuf, 1340 hal_rx_msdu_end_last_msdu_get(soc->hal_soc, 1341 rx_tlv_hdr)); 1342 qdf_nbuf_set_da_mcbc(nbuf, hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc, 1343 rx_tlv_hdr)); 1344 qdf_nbuf_set_da_valid(nbuf, 1345 hal_rx_msdu_end_da_is_valid_get(soc->hal_soc, 1346 rx_tlv_hdr)); 1347 qdf_nbuf_set_sa_valid(nbuf, 1348 hal_rx_msdu_end_sa_is_valid_get(soc->hal_soc, 1349 rx_tlv_hdr)); 1350 1351 hal_rx_msdu_metadata_get(soc->hal_soc, rx_tlv_hdr, &msdu_metadata); 1352 msdu_len = hal_rx_msdu_start_msdu_len_get(soc->hal_soc, rx_tlv_hdr); 1353 pkt_len = msdu_len + msdu_metadata.l3_hdr_pad + soc->rx_pkt_tlv_size; 1354 1355 if (qdf_likely(!qdf_nbuf_is_frag(nbuf))) { 1356 if (dp_rx_check_pkt_len(soc, pkt_len)) 1357 goto drop_nbuf; 1358 1359 /* Set length in nbuf */ 1360 qdf_nbuf_set_pktlen( 1361 nbuf, qdf_min(pkt_len, (uint32_t)RX_DATA_BUFFER_SIZE)); 1362 qdf_assert_always(nbuf->data == rx_tlv_hdr); 1363 } 1364 1365 /* 1366 * Check if DMA completed -- msdu_done is the last bit 1367 * to be written 1368 */ 1369 if (!hal_rx_attn_msdu_done_get(soc->hal_soc, rx_tlv_hdr)) { 1370 dp_err_rl("MSDU DONE failure"); 1371 hal_rx_dump_pkt_tlvs(soc->hal_soc, rx_tlv_hdr, 1372 QDF_TRACE_LEVEL_INFO); 1373 qdf_assert(0); 1374 } 1375 1376 if (!txrx_peer && 1377 dp_rx_null_q_handle_invalid_peer_id_exception(soc, pool_id, 1378 rx_tlv_hdr, nbuf)) 1379 return QDF_STATUS_E_FAILURE; 1380 1381 if (!txrx_peer) { 1382 bool mpdu_done = false; 1383 struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, pool_id); 1384 1385 if (!pdev) { 1386 dp_err_rl("pdev is null for pool_id = %d", pool_id); 1387 return QDF_STATUS_E_FAILURE; 1388 } 1389 1390 dp_err_rl("txrx_peer is NULL"); 1391 DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1, 1392 qdf_nbuf_len(nbuf)); 1393 1394 /* QCN9000 has the support enabled */ 1395 if (qdf_unlikely(soc->wbm_release_desc_rx_sg_support)) { 1396 mpdu_done = true; 1397 nbuf->next = NULL; 1398 /* Trigger invalid peer handler wrapper */ 1399 dp_rx_process_invalid_peer_wrapper(soc, 1400 nbuf, 1401 mpdu_done, 1402 pool_id); 1403 } else { 1404 mpdu_done = soc->arch_ops.dp_rx_chain_msdus(soc, nbuf, 1405 rx_tlv_hdr, 1406 pool_id); 1407 /* Trigger invalid peer handler wrapper */ 1408 dp_rx_process_invalid_peer_wrapper( 1409 soc, 1410 pdev->invalid_peer_head_msdu, 1411 mpdu_done, pool_id); 1412 } 1413 1414 if (mpdu_done) { 1415 pdev->invalid_peer_head_msdu = NULL; 1416 pdev->invalid_peer_tail_msdu = NULL; 1417 } 1418 1419 return QDF_STATUS_E_FAILURE; 1420 } 1421 1422 vdev = txrx_peer->vdev; 1423 if (!vdev) { 1424 dp_err_rl("Null vdev!"); 1425 DP_STATS_INC(soc, rx.err.invalid_vdev, 1); 1426 goto drop_nbuf; 1427 } 1428 1429 /* 1430 * Advance the packet start pointer by total size of 1431 * pre-header TLV's 1432 */ 1433 if (qdf_nbuf_is_frag(nbuf)) 1434 qdf_nbuf_pull_head(nbuf, soc->rx_pkt_tlv_size); 1435 else 1436 qdf_nbuf_pull_head(nbuf, (msdu_metadata.l3_hdr_pad + 1437 soc->rx_pkt_tlv_size)); 1438 1439 DP_STATS_INC_PKT(vdev, rx_i.null_q_desc_pkt, 1, qdf_nbuf_len(nbuf)); 1440 1441 dp_vdev_peer_stats_update_protocol_cnt(vdev, nbuf, NULL, 0, 1); 1442 1443 if (dp_rx_err_drop_3addr_mcast(vdev, rx_tlv_hdr)) { 1444 DP_PEER_PER_PKT_STATS_INC(txrx_peer, rx.mcast_3addr_drop, 1, 1445 0); 1446 goto drop_nbuf; 1447 } 1448 1449 if (hal_rx_msdu_end_sa_is_valid_get(soc->hal_soc, rx_tlv_hdr)) { 1450 sa_idx = hal_rx_msdu_end_sa_idx_get(soc->hal_soc, rx_tlv_hdr); 1451 1452 if ((sa_idx < 0) || 1453 (sa_idx >= wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx))) { 1454 DP_STATS_INC(soc, rx.err.invalid_sa_da_idx, 1); 1455 goto drop_nbuf; 1456 } 1457 } 1458 1459 if ((!soc->mec_fw_offload) && 1460 dp_rx_mcast_echo_check(soc, txrx_peer, rx_tlv_hdr, nbuf)) { 1461 /* this is a looped back MCBC pkt, drop it */ 1462 DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, rx.mec_drop, 1, 1463 qdf_nbuf_len(nbuf), 0); 1464 goto drop_nbuf; 1465 } 1466 1467 /* 1468 * In qwrap mode if the received packet matches with any of the vdev 1469 * mac addresses, drop it. Donot receive multicast packets originated 1470 * from any proxysta. 1471 */ 1472 if (check_qwrap_multicast_loopback(vdev, nbuf)) { 1473 DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, rx.mec_drop, 1, 1474 qdf_nbuf_len(nbuf), 0); 1475 goto drop_nbuf; 1476 } 1477 1478 if (qdf_unlikely(txrx_peer->nawds_enabled && 1479 hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc, 1480 rx_tlv_hdr))) { 1481 dp_err_rl("free buffer for multicast packet"); 1482 DP_PEER_PER_PKT_STATS_INC(txrx_peer, rx.nawds_mcast_drop, 1, 1483 0); 1484 goto drop_nbuf; 1485 } 1486 1487 if (!dp_wds_rx_policy_check(rx_tlv_hdr, vdev, txrx_peer)) { 1488 dp_err_rl("mcast Policy Check Drop pkt"); 1489 DP_PEER_PER_PKT_STATS_INC(txrx_peer, rx.policy_check_drop, 1, 1490 0); 1491 goto drop_nbuf; 1492 } 1493 /* WDS Source Port Learning */ 1494 if (!soc->ast_offload_support && 1495 qdf_likely(vdev->rx_decap_type == htt_cmn_pkt_type_ethernet && 1496 vdev->wds_enabled)) 1497 dp_rx_wds_srcport_learn(soc, rx_tlv_hdr, txrx_peer, nbuf, 1498 msdu_metadata); 1499 1500 if (hal_rx_is_unicast(soc->hal_soc, rx_tlv_hdr)) { 1501 struct dp_peer *peer; 1502 struct dp_rx_tid *rx_tid; 1503 1504 tid = hal_rx_tid_get(soc->hal_soc, rx_tlv_hdr); 1505 peer = dp_peer_get_ref_by_id(soc, txrx_peer->peer_id, 1506 DP_MOD_ID_RX_ERR); 1507 if (peer) { 1508 rx_tid = &peer->rx_tid[tid]; 1509 qdf_spin_lock_bh(&rx_tid->tid_lock); 1510 if (!peer->rx_tid[tid].hw_qdesc_vaddr_unaligned) 1511 dp_rx_tid_setup_wifi3(peer, tid, 1, 1512 IEEE80211_SEQ_MAX); 1513 qdf_spin_unlock_bh(&rx_tid->tid_lock); 1514 /* IEEE80211_SEQ_MAX indicates invalid start_seq */ 1515 dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR); 1516 } 1517 } 1518 1519 eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf); 1520 1521 if (!txrx_peer->authorize) { 1522 is_eapol = qdf_nbuf_is_ipv4_eapol_pkt(nbuf); 1523 1524 if (is_eapol || qdf_nbuf_is_ipv4_wapi_pkt(nbuf)) { 1525 if (!dp_rx_err_match_dhost(eh, vdev)) 1526 goto drop_nbuf; 1527 } else { 1528 goto drop_nbuf; 1529 } 1530 } 1531 1532 /* 1533 * Drop packets in this path if cce_match is found. Packets will come 1534 * in following path depending on whether tidQ is setup. 1535 * 1. If tidQ is setup: WIFILI_HAL_RX_WBM_REO_PSH_RSN_ROUTE and 1536 * cce_match = 1 1537 * Packets with WIFILI_HAL_RX_WBM_REO_PSH_RSN_ROUTE are already 1538 * dropped. 1539 * 2. If tidQ is not setup: WIFILI_HAL_RX_WBM_REO_PSH_RSN_ERROR and 1540 * cce_match = 1 1541 * These packets need to be dropped and should not get delivered 1542 * to stack. 1543 */ 1544 if (qdf_unlikely(dp_rx_err_cce_drop(soc, vdev, nbuf, rx_tlv_hdr))) 1545 goto drop_nbuf; 1546 1547 if (qdf_unlikely(vdev->rx_decap_type == htt_cmn_pkt_type_raw)) { 1548 qdf_nbuf_set_next(nbuf, NULL); 1549 dp_rx_deliver_raw(vdev, nbuf, txrx_peer, 0); 1550 } else { 1551 enh_flag = vdev->pdev->enhanced_stats_en; 1552 qdf_nbuf_set_next(nbuf, NULL); 1553 DP_PEER_TO_STACK_INCC_PKT(txrx_peer, 1, qdf_nbuf_len(nbuf), 1554 enh_flag); 1555 /* 1556 * Update the protocol tag in SKB based on 1557 * CCE metadata 1558 */ 1559 dp_rx_update_protocol_tag(soc, vdev, nbuf, rx_tlv_hdr, 1560 EXCEPTION_DEST_RING_ID, 1561 true, true); 1562 1563 /* Update the flow tag in SKB based on FSE metadata */ 1564 dp_rx_update_flow_tag(soc, vdev, nbuf, 1565 rx_tlv_hdr, true); 1566 1567 if (qdf_unlikely(hal_rx_msdu_end_da_is_mcbc_get( 1568 soc->hal_soc, rx_tlv_hdr) && 1569 (vdev->rx_decap_type == 1570 htt_cmn_pkt_type_ethernet))) { 1571 DP_PEER_MC_INCC_PKT(txrx_peer, 1, qdf_nbuf_len(nbuf), 1572 enh_flag, 0); 1573 1574 if (QDF_IS_ADDR_BROADCAST(eh->ether_dhost)) 1575 DP_PEER_BC_INCC_PKT(txrx_peer, 1, 1576 qdf_nbuf_len(nbuf), 1577 enh_flag, 0); 1578 } else { 1579 DP_PEER_UC_INCC_PKT(txrx_peer, 1, 1580 qdf_nbuf_len(nbuf), 1581 enh_flag, 1582 0); 1583 } 1584 1585 qdf_nbuf_set_exc_frame(nbuf, 1); 1586 dp_rx_deliver_to_osif_stack(soc, vdev, txrx_peer, nbuf, NULL, 1587 is_eapol); 1588 } 1589 return QDF_STATUS_SUCCESS; 1590 1591 drop_nbuf: 1592 dp_rx_nbuf_free(nbuf); 1593 return QDF_STATUS_E_FAILURE; 1594 } 1595