1 /* 2 * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved. 3 * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. 4 * 5 * Permission to use, copy, modify, and/or distribute this software for 6 * any purpose with or without fee is hereby granted, provided that the 7 * above copyright notice and this permission notice appear in all 8 * copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 11 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 12 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 13 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 14 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 15 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 16 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 17 * PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20 #include "cdp_txrx_cmn_struct.h" 21 #include "hal_hw_headers.h" 22 #include "dp_types.h" 23 #include "dp_rx.h" 24 #include "dp_tx.h" 25 #include "dp_be_rx.h" 26 #include "dp_peer.h" 27 #include "hal_rx.h" 28 #include "hal_be_rx.h" 29 #include "hal_api.h" 30 #include "hal_be_api.h" 31 #include "qdf_nbuf.h" 32 #include "hal_be_rx_tlv.h" 33 #ifdef MESH_MODE_SUPPORT 34 #include "if_meta_hdr.h" 35 #endif 36 #include "dp_internal.h" 37 #include "dp_ipa.h" 38 #ifdef FEATURE_WDS 39 #include "dp_txrx_wds.h" 40 #endif 41 #include "dp_hist.h" 42 #include "dp_rx_buffer_pool.h" 43 44 #ifndef AST_OFFLOAD_ENABLE 45 static void 46 dp_rx_wds_learn(struct dp_soc *soc, 47 struct dp_vdev *vdev, 48 uint8_t *rx_tlv_hdr, 49 struct dp_txrx_peer *txrx_peer, 50 qdf_nbuf_t nbuf, 51 struct hal_rx_msdu_metadata msdu_metadata) 52 { 53 /* WDS Source Port Learning */ 54 if (qdf_likely(vdev->wds_enabled)) 55 dp_rx_wds_srcport_learn(soc, 56 rx_tlv_hdr, 57 txrx_peer, 58 nbuf, 59 msdu_metadata); 60 } 61 #else 62 #ifdef QCA_SUPPORT_WDS_EXTENDED 63 /** 64 * dp_wds_ext_peer_learn_be() - function to send event to control 65 * path on receiving 1st 4-address frame from backhaul. 66 * @soc: DP soc 67 * @ta_txrx_peer: WDS repeater txrx peer 68 * @rx_tlv_hdr : start address of rx tlvs 69 * @nbuf: RX packet buffer 70 * 71 * Return: void 72 */ 73 static inline void dp_wds_ext_peer_learn_be(struct dp_soc *soc, 74 struct dp_txrx_peer *ta_txrx_peer, 75 uint8_t *rx_tlv_hdr, 76 qdf_nbuf_t nbuf) 77 { 78 uint8_t wds_ext_src_mac[QDF_MAC_ADDR_SIZE]; 79 struct dp_peer *ta_base_peer; 80 81 /* instead of checking addr4 is valid or not in per packet path 82 * check for init bit, which will be set on reception of 83 * first addr4 valid packet. 84 */ 85 if (!ta_txrx_peer->vdev->wds_ext_enabled || 86 qdf_atomic_test_bit(WDS_EXT_PEER_INIT_BIT, 87 &ta_txrx_peer->wds_ext.init)) 88 return; 89 90 if (qdf_nbuf_is_rx_chfrag_start(nbuf) && 91 hal_rx_get_mpdu_mac_ad4_valid_be(rx_tlv_hdr)) { 92 qdf_atomic_test_and_set_bit(WDS_EXT_PEER_INIT_BIT, 93 &ta_txrx_peer->wds_ext.init); 94 95 ta_base_peer = dp_peer_get_ref_by_id(soc, ta_txrx_peer->peer_id, 96 DP_MOD_ID_RX); 97 98 if (!ta_base_peer) 99 return; 100 101 qdf_mem_copy(wds_ext_src_mac, &ta_base_peer->mac_addr.raw[0], 102 QDF_MAC_ADDR_SIZE); 103 dp_peer_unref_delete(ta_base_peer, DP_MOD_ID_RX); 104 105 soc->cdp_soc.ol_ops->rx_wds_ext_peer_learn( 106 soc->ctrl_psoc, 107 ta_txrx_peer->peer_id, 108 ta_txrx_peer->vdev->vdev_id, 109 wds_ext_src_mac); 110 } 111 } 112 #else 113 static inline void dp_wds_ext_peer_learn_be(struct dp_soc *soc, 114 struct dp_txrx_peer *ta_txrx_peer, 115 uint8_t *rx_tlv_hdr, 116 qdf_nbuf_t nbuf) 117 { 118 } 119 #endif 120 static void 121 dp_rx_wds_learn(struct dp_soc *soc, 122 struct dp_vdev *vdev, 123 uint8_t *rx_tlv_hdr, 124 struct dp_txrx_peer *ta_txrx_peer, 125 qdf_nbuf_t nbuf, 126 struct hal_rx_msdu_metadata msdu_metadata) 127 { 128 dp_wds_ext_peer_learn_be(soc, ta_txrx_peer, rx_tlv_hdr, nbuf); 129 } 130 #endif 131 132 #if defined(DP_PKT_STATS_PER_LMAC) && defined(WLAN_FEATURE_11BE_MLO) 133 static inline void 134 dp_rx_set_msdu_lmac_id(qdf_nbuf_t nbuf, uint32_t peer_mdata) 135 { 136 uint8_t lmac_id; 137 138 lmac_id = dp_rx_peer_metadata_lmac_id_get_be(peer_mdata); 139 qdf_nbuf_set_lmac_id(nbuf, lmac_id); 140 } 141 #else 142 static inline void 143 dp_rx_set_msdu_lmac_id(qdf_nbuf_t nbuf, uint32_t peer_mdata) 144 { 145 } 146 #endif 147 148 /** 149 * dp_rx_process_be() - Brain of the Rx processing functionality 150 * Called from the bottom half (tasklet/NET_RX_SOFTIRQ) 151 * @int_ctx: per interrupt context 152 * @hal_ring: opaque pointer to the HAL Rx Ring, which will be serviced 153 * @reo_ring_num: ring number (0, 1, 2 or 3) of the reo ring. 154 * @quota: No. of units (packets) that can be serviced in one shot. 155 * 156 * This function implements the core of Rx functionality. This is 157 * expected to handle only non-error frames. 158 * 159 * Return: uint32_t: No. of elements processed 160 */ 161 uint32_t dp_rx_process_be(struct dp_intr *int_ctx, 162 hal_ring_handle_t hal_ring_hdl, uint8_t reo_ring_num, 163 uint32_t quota) 164 { 165 hal_ring_desc_t ring_desc; 166 hal_ring_desc_t last_prefetched_hw_desc; 167 hal_soc_handle_t hal_soc; 168 struct dp_rx_desc *rx_desc = NULL; 169 struct dp_rx_desc *last_prefetched_sw_desc = NULL; 170 qdf_nbuf_t nbuf, next; 171 bool near_full; 172 union dp_rx_desc_list_elem_t *head[WLAN_MAX_MLO_CHIPS][MAX_PDEV_CNT]; 173 union dp_rx_desc_list_elem_t *tail[WLAN_MAX_MLO_CHIPS][MAX_PDEV_CNT]; 174 uint32_t num_pending = 0; 175 uint32_t rx_bufs_used = 0, rx_buf_cookie; 176 uint16_t msdu_len = 0; 177 uint16_t peer_id; 178 uint8_t vdev_id; 179 struct dp_txrx_peer *txrx_peer; 180 dp_txrx_ref_handle txrx_ref_handle = NULL; 181 struct dp_vdev *vdev; 182 uint32_t pkt_len = 0; 183 struct hal_rx_mpdu_desc_info mpdu_desc_info; 184 struct hal_rx_msdu_desc_info msdu_desc_info; 185 enum hal_reo_error_status error; 186 uint32_t peer_mdata; 187 uint8_t *rx_tlv_hdr; 188 uint32_t rx_bufs_reaped[WLAN_MAX_MLO_CHIPS][MAX_PDEV_CNT]; 189 uint8_t mac_id = 0; 190 struct dp_pdev *rx_pdev; 191 bool enh_flag; 192 struct dp_srng *dp_rxdma_srng; 193 struct rx_desc_pool *rx_desc_pool; 194 struct dp_soc *soc = int_ctx->soc; 195 struct cdp_tid_rx_stats *tid_stats; 196 qdf_nbuf_t nbuf_head; 197 qdf_nbuf_t nbuf_tail; 198 qdf_nbuf_t deliver_list_head; 199 qdf_nbuf_t deliver_list_tail; 200 uint32_t num_rx_bufs_reaped = 0; 201 uint32_t intr_id; 202 struct hif_opaque_softc *scn; 203 int32_t tid = 0; 204 bool is_prev_msdu_last = true; 205 uint32_t num_entries_avail = 0; 206 uint32_t rx_ol_pkt_cnt = 0; 207 uint32_t num_entries = 0; 208 struct hal_rx_msdu_metadata msdu_metadata; 209 QDF_STATUS status; 210 qdf_nbuf_t ebuf_head; 211 qdf_nbuf_t ebuf_tail; 212 uint8_t pkt_capture_offload = 0; 213 struct dp_srng *rx_ring = &soc->reo_dest_ring[reo_ring_num]; 214 int max_reap_limit, ring_near_full; 215 struct dp_soc *replenish_soc; 216 uint8_t chip_id; 217 uint64_t current_time = 0; 218 uint32_t old_tid; 219 uint32_t peer_ext_stats; 220 uint32_t dsf; 221 222 DP_HIST_INIT(); 223 224 qdf_assert_always(soc && hal_ring_hdl); 225 hal_soc = soc->hal_soc; 226 qdf_assert_always(hal_soc); 227 228 scn = soc->hif_handle; 229 intr_id = int_ctx->dp_intr_id; 230 num_entries = hal_srng_get_num_entries(hal_soc, hal_ring_hdl); 231 dp_runtime_pm_mark_last_busy(soc); 232 233 more_data: 234 /* reset local variables here to be re-used in the function */ 235 nbuf_head = NULL; 236 nbuf_tail = NULL; 237 deliver_list_head = NULL; 238 deliver_list_tail = NULL; 239 txrx_peer = NULL; 240 vdev = NULL; 241 num_rx_bufs_reaped = 0; 242 ebuf_head = NULL; 243 ebuf_tail = NULL; 244 ring_near_full = 0; 245 max_reap_limit = dp_rx_get_loop_pkt_limit(soc); 246 247 qdf_mem_zero(rx_bufs_reaped, sizeof(rx_bufs_reaped)); 248 qdf_mem_zero(&mpdu_desc_info, sizeof(mpdu_desc_info)); 249 qdf_mem_zero(&msdu_desc_info, sizeof(msdu_desc_info)); 250 qdf_mem_zero(head, sizeof(head)); 251 qdf_mem_zero(tail, sizeof(tail)); 252 old_tid = 0xff; 253 dsf = 0; 254 peer_ext_stats = 0; 255 rx_pdev = NULL; 256 tid_stats = NULL; 257 258 dp_pkt_get_timestamp(¤t_time); 259 260 ring_near_full = _dp_srng_test_and_update_nf_params(soc, rx_ring, 261 &max_reap_limit); 262 263 peer_ext_stats = wlan_cfg_is_peer_ext_stats_enabled(soc->wlan_cfg_ctx); 264 if (qdf_unlikely(dp_rx_srng_access_start(int_ctx, soc, hal_ring_hdl))) { 265 /* 266 * Need API to convert from hal_ring pointer to 267 * Ring Type / Ring Id combo 268 */ 269 DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1); 270 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 271 FL("HAL RING Access Failed -- %pK"), hal_ring_hdl); 272 goto done; 273 } 274 275 hal_srng_update_ring_usage_wm_no_lock(soc->hal_soc, hal_ring_hdl); 276 277 if (!num_pending) 278 num_pending = hal_srng_dst_num_valid(hal_soc, hal_ring_hdl, 0); 279 280 if (num_pending > quota) 281 num_pending = quota; 282 283 dp_srng_dst_inv_cached_descs(soc, hal_ring_hdl, num_pending); 284 last_prefetched_hw_desc = dp_srng_dst_prefetch_32_byte_desc(hal_soc, 285 hal_ring_hdl, 286 num_pending); 287 /* 288 * start reaping the buffers from reo ring and queue 289 * them in per vdev queue. 290 * Process the received pkts in a different per vdev loop. 291 */ 292 while (qdf_likely(num_pending)) { 293 ring_desc = dp_srng_dst_get_next(soc, hal_ring_hdl); 294 295 if (qdf_unlikely(!ring_desc)) 296 break; 297 298 error = HAL_RX_ERROR_STATUS_GET(ring_desc); 299 300 if (qdf_unlikely(error == HAL_REO_ERROR_DETECTED)) { 301 dp_rx_err("%pK: HAL RING 0x%pK:error %d", 302 soc, hal_ring_hdl, error); 303 DP_STATS_INC(soc, rx.err.hal_reo_error[reo_ring_num], 304 1); 305 /* Don't know how to deal with this -- assert */ 306 qdf_assert(0); 307 } 308 309 dp_rx_ring_record_entry(soc, reo_ring_num, ring_desc); 310 rx_buf_cookie = HAL_RX_REO_BUF_COOKIE_GET(ring_desc); 311 status = dp_rx_cookie_check_and_invalidate(ring_desc); 312 if (qdf_unlikely(QDF_IS_STATUS_ERROR(status))) { 313 DP_STATS_INC(soc, rx.err.stale_cookie, 1); 314 break; 315 } 316 317 rx_desc = (struct dp_rx_desc *) 318 hal_rx_get_reo_desc_va(ring_desc); 319 dp_rx_desc_sw_cc_check(soc, rx_buf_cookie, &rx_desc); 320 321 status = dp_rx_desc_sanity(soc, hal_soc, hal_ring_hdl, 322 ring_desc, rx_desc); 323 if (QDF_IS_STATUS_ERROR(status)) { 324 if (qdf_unlikely(rx_desc && rx_desc->nbuf)) { 325 qdf_assert_always(!rx_desc->unmapped); 326 dp_rx_nbuf_unmap(soc, rx_desc, reo_ring_num); 327 rx_desc->unmapped = 1; 328 dp_rx_buffer_pool_nbuf_free(soc, rx_desc->nbuf, 329 rx_desc->pool_id); 330 dp_rx_add_to_free_desc_list( 331 &head[rx_desc->chip_id][rx_desc->pool_id], 332 &tail[rx_desc->chip_id][rx_desc->pool_id], 333 rx_desc); 334 } 335 hal_srng_dst_get_next(hal_soc, hal_ring_hdl); 336 continue; 337 } 338 339 /* 340 * this is a unlikely scenario where the host is reaping 341 * a descriptor which it already reaped just a while ago 342 * but is yet to replenish it back to HW. 343 * In this case host will dump the last 128 descriptors 344 * including the software descriptor rx_desc and assert. 345 */ 346 347 if (qdf_unlikely(!rx_desc->in_use)) { 348 DP_STATS_INC(soc, rx.err.hal_reo_dest_dup, 1); 349 dp_info_rl("Reaping rx_desc not in use!"); 350 dp_rx_dump_info_and_assert(soc, hal_ring_hdl, 351 ring_desc, rx_desc); 352 /* ignore duplicate RX desc and continue to process */ 353 /* Pop out the descriptor */ 354 hal_srng_dst_get_next(hal_soc, hal_ring_hdl); 355 continue; 356 } 357 358 status = dp_rx_desc_nbuf_sanity_check(soc, ring_desc, rx_desc); 359 if (qdf_unlikely(QDF_IS_STATUS_ERROR(status))) { 360 DP_STATS_INC(soc, rx.err.nbuf_sanity_fail, 1); 361 dp_info_rl("Nbuf sanity check failure!"); 362 dp_rx_dump_info_and_assert(soc, hal_ring_hdl, 363 ring_desc, rx_desc); 364 rx_desc->in_err_state = 1; 365 hal_srng_dst_get_next(hal_soc, hal_ring_hdl); 366 continue; 367 } 368 369 if (qdf_unlikely(!dp_rx_desc_check_magic(rx_desc))) { 370 dp_err("Invalid rx_desc cookie=%d", rx_buf_cookie); 371 DP_STATS_INC(soc, rx.err.rx_desc_invalid_magic, 1); 372 dp_rx_dump_info_and_assert(soc, hal_ring_hdl, 373 ring_desc, rx_desc); 374 } 375 376 /* Get MPDU DESC info */ 377 hal_rx_mpdu_desc_info_get_be(ring_desc, &mpdu_desc_info); 378 379 /* Get MSDU DESC info */ 380 hal_rx_msdu_desc_info_get_be(ring_desc, &msdu_desc_info); 381 382 /* Set the end bit to identify the last buffer in MPDU */ 383 if (msdu_desc_info.msdu_flags & HAL_MSDU_F_LAST_MSDU_IN_MPDU) 384 qdf_nbuf_set_rx_chfrag_end(rx_desc->nbuf, 1); 385 386 if (qdf_unlikely(msdu_desc_info.msdu_flags & 387 HAL_MSDU_F_MSDU_CONTINUATION)) { 388 /* In dp_rx_sg_create() until the last buffer, 389 * end bit should not be set. As continuation bit set, 390 * this is not a last buffer. 391 */ 392 qdf_nbuf_set_rx_chfrag_end(rx_desc->nbuf, 0); 393 394 /* previous msdu has end bit set, so current one is 395 * the new MPDU 396 */ 397 if (is_prev_msdu_last) { 398 /* Get number of entries available in HW ring */ 399 num_entries_avail = 400 hal_srng_dst_num_valid(hal_soc, 401 hal_ring_hdl, 1); 402 403 /* For new MPDU check if we can read complete 404 * MPDU by comparing the number of buffers 405 * available and number of buffers needed to 406 * reap this MPDU 407 */ 408 if ((msdu_desc_info.msdu_len / 409 (RX_DATA_BUFFER_SIZE - 410 soc->rx_pkt_tlv_size) + 1) > 411 num_pending) { 412 DP_STATS_INC(soc, 413 rx.msdu_scatter_wait_break, 414 1); 415 dp_rx_cookie_reset_invalid_bit( 416 ring_desc); 417 /* As we are going to break out of the 418 * loop because of unavailability of 419 * descs to form complete SG, we need to 420 * reset the TP in the REO destination 421 * ring. 422 */ 423 hal_srng_dst_dec_tp(hal_soc, 424 hal_ring_hdl); 425 break; 426 } 427 is_prev_msdu_last = false; 428 } 429 } 430 431 if (mpdu_desc_info.mpdu_flags & HAL_MPDU_F_RETRY_BIT) 432 qdf_nbuf_set_rx_retry_flag(rx_desc->nbuf, 1); 433 434 if (qdf_unlikely(mpdu_desc_info.mpdu_flags & 435 HAL_MPDU_F_RAW_AMPDU)) 436 qdf_nbuf_set_raw_frame(rx_desc->nbuf, 1); 437 438 if (!is_prev_msdu_last && 439 !(msdu_desc_info.msdu_flags & HAL_MSDU_F_MSDU_CONTINUATION)) 440 is_prev_msdu_last = true; 441 442 rx_bufs_reaped[rx_desc->chip_id][rx_desc->pool_id]++; 443 444 peer_mdata = mpdu_desc_info.peer_meta_data; 445 QDF_NBUF_CB_RX_PEER_ID(rx_desc->nbuf) = 446 dp_rx_peer_metadata_peer_id_get_be(soc, peer_mdata); 447 QDF_NBUF_CB_RX_VDEV_ID(rx_desc->nbuf) = 448 dp_rx_peer_metadata_vdev_id_get_be(soc, peer_mdata); 449 dp_rx_set_msdu_lmac_id(rx_desc->nbuf, peer_mdata); 450 451 /* to indicate whether this msdu is rx offload */ 452 pkt_capture_offload = 453 DP_PEER_METADATA_OFFLOAD_GET_BE(peer_mdata); 454 455 /* 456 * save msdu flags first, last and continuation msdu in 457 * nbuf->cb, also save mcbc, is_da_valid, is_sa_valid and 458 * length to nbuf->cb. This ensures the info required for 459 * per pkt processing is always in the same cache line. 460 * This helps in improving throughput for smaller pkt 461 * sizes. 462 */ 463 if (msdu_desc_info.msdu_flags & HAL_MSDU_F_FIRST_MSDU_IN_MPDU) 464 qdf_nbuf_set_rx_chfrag_start(rx_desc->nbuf, 1); 465 466 if (msdu_desc_info.msdu_flags & HAL_MSDU_F_MSDU_CONTINUATION) 467 qdf_nbuf_set_rx_chfrag_cont(rx_desc->nbuf, 1); 468 469 if (msdu_desc_info.msdu_flags & HAL_MSDU_F_DA_IS_MCBC) 470 qdf_nbuf_set_da_mcbc(rx_desc->nbuf, 1); 471 472 if (msdu_desc_info.msdu_flags & HAL_MSDU_F_DA_IS_VALID) 473 qdf_nbuf_set_da_valid(rx_desc->nbuf, 1); 474 475 if (msdu_desc_info.msdu_flags & HAL_MSDU_F_SA_IS_VALID) 476 qdf_nbuf_set_sa_valid(rx_desc->nbuf, 1); 477 478 if (msdu_desc_info.msdu_flags & HAL_MSDU_F_INTRA_BSS) 479 qdf_nbuf_set_intra_bss(rx_desc->nbuf, 1); 480 481 if (qdf_likely(mpdu_desc_info.mpdu_flags & 482 HAL_MPDU_F_QOS_CONTROL_VALID)) 483 qdf_nbuf_set_tid_val(rx_desc->nbuf, mpdu_desc_info.tid); 484 485 /* set sw exception */ 486 qdf_nbuf_set_rx_reo_dest_ind_or_sw_excpt( 487 rx_desc->nbuf, 488 hal_rx_sw_exception_get_be(ring_desc)); 489 490 QDF_NBUF_CB_RX_PKT_LEN(rx_desc->nbuf) = msdu_desc_info.msdu_len; 491 492 QDF_NBUF_CB_RX_CTX_ID(rx_desc->nbuf) = reo_ring_num; 493 494 /* 495 * move unmap after scattered msdu waiting break logic 496 * in case double skb unmap happened. 497 */ 498 dp_rx_nbuf_unmap(soc, rx_desc, reo_ring_num); 499 rx_desc->unmapped = 1; 500 DP_RX_PROCESS_NBUF(soc, nbuf_head, nbuf_tail, ebuf_head, 501 ebuf_tail, rx_desc); 502 503 quota -= 1; 504 num_pending -= 1; 505 506 dp_rx_add_to_free_desc_list 507 (&head[rx_desc->chip_id][rx_desc->pool_id], 508 &tail[rx_desc->chip_id][rx_desc->pool_id], rx_desc); 509 num_rx_bufs_reaped++; 510 511 dp_rx_prefetch_hw_sw_nbuf_32_byte_desc(soc, hal_soc, 512 num_pending, 513 hal_ring_hdl, 514 &last_prefetched_hw_desc, 515 &last_prefetched_sw_desc); 516 517 /* 518 * only if complete msdu is received for scatter case, 519 * then allow break. 520 */ 521 if (is_prev_msdu_last && 522 dp_rx_reap_loop_pkt_limit_hit(soc, num_rx_bufs_reaped, 523 max_reap_limit)) 524 break; 525 } 526 done: 527 dp_rx_srng_access_end(int_ctx, soc, hal_ring_hdl); 528 qdf_dsb(); 529 530 dp_rx_per_core_stats_update(soc, reo_ring_num, num_rx_bufs_reaped); 531 532 for (chip_id = 0; chip_id < WLAN_MAX_MLO_CHIPS; chip_id++) { 533 for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) { 534 /* 535 * continue with next mac_id if no pkts were reaped 536 * from that pool 537 */ 538 if (!rx_bufs_reaped[chip_id][mac_id]) 539 continue; 540 541 replenish_soc = dp_rx_replensih_soc_get(soc, chip_id); 542 543 dp_rxdma_srng = 544 &replenish_soc->rx_refill_buf_ring[mac_id]; 545 546 rx_desc_pool = &replenish_soc->rx_desc_buf[mac_id]; 547 548 dp_rx_buffers_replenish_simple(replenish_soc, mac_id, 549 dp_rxdma_srng, 550 rx_desc_pool, 551 rx_bufs_reaped[chip_id][mac_id], 552 &head[chip_id][mac_id], 553 &tail[chip_id][mac_id]); 554 } 555 } 556 557 /* Peer can be NULL is case of LFR */ 558 if (qdf_likely(txrx_peer)) 559 vdev = NULL; 560 561 /* 562 * BIG loop where each nbuf is dequeued from global queue, 563 * processed and queued back on a per vdev basis. These nbufs 564 * are sent to stack as and when we run out of nbufs 565 * or a new nbuf dequeued from global queue has a different 566 * vdev when compared to previous nbuf. 567 */ 568 nbuf = nbuf_head; 569 while (nbuf) { 570 next = nbuf->next; 571 dp_rx_prefetch_nbuf_data_be(nbuf, next); 572 if (qdf_unlikely(dp_rx_is_raw_frame_dropped(nbuf))) { 573 nbuf = next; 574 DP_STATS_INC(soc, rx.err.raw_frm_drop, 1); 575 continue; 576 } 577 578 rx_tlv_hdr = qdf_nbuf_data(nbuf); 579 vdev_id = QDF_NBUF_CB_RX_VDEV_ID(nbuf); 580 peer_id = QDF_NBUF_CB_RX_PEER_ID(nbuf); 581 582 if (dp_rx_is_list_ready(deliver_list_head, vdev, txrx_peer, 583 peer_id, vdev_id)) { 584 dp_rx_deliver_to_stack(soc, vdev, txrx_peer, 585 deliver_list_head, 586 deliver_list_tail); 587 deliver_list_head = NULL; 588 deliver_list_tail = NULL; 589 } 590 591 /* Get TID from struct cb->tid_val, save to tid */ 592 tid = qdf_nbuf_get_tid_val(nbuf); 593 if (qdf_unlikely(tid >= CDP_MAX_DATA_TIDS)) { 594 DP_STATS_INC(soc, rx.err.rx_invalid_tid_err, 1); 595 dp_rx_nbuf_free(nbuf); 596 nbuf = next; 597 continue; 598 } 599 600 if (qdf_unlikely(!txrx_peer)) { 601 txrx_peer = dp_rx_get_txrx_peer_and_vdev(soc, nbuf, 602 peer_id, 603 &txrx_ref_handle, 604 pkt_capture_offload, 605 &vdev, 606 &rx_pdev, &dsf, 607 &old_tid); 608 if (qdf_unlikely(!txrx_peer) || qdf_unlikely(!vdev)) { 609 nbuf = next; 610 continue; 611 } 612 enh_flag = rx_pdev->enhanced_stats_en; 613 } else if (txrx_peer && txrx_peer->peer_id != peer_id) { 614 dp_txrx_peer_unref_delete(txrx_ref_handle, 615 DP_MOD_ID_RX); 616 617 txrx_peer = dp_rx_get_txrx_peer_and_vdev(soc, nbuf, 618 peer_id, 619 &txrx_ref_handle, 620 pkt_capture_offload, 621 &vdev, 622 &rx_pdev, &dsf, 623 &old_tid); 624 if (qdf_unlikely(!txrx_peer) || qdf_unlikely(!vdev)) { 625 nbuf = next; 626 continue; 627 } 628 enh_flag = rx_pdev->enhanced_stats_en; 629 } 630 631 if (txrx_peer) { 632 QDF_NBUF_CB_DP_TRACE_PRINT(nbuf) = false; 633 qdf_dp_trace_set_track(nbuf, QDF_RX); 634 QDF_NBUF_CB_RX_DP_TRACE(nbuf) = 1; 635 QDF_NBUF_CB_RX_PACKET_TRACK(nbuf) = 636 QDF_NBUF_RX_PKT_DATA_TRACK; 637 } 638 639 rx_bufs_used++; 640 641 /* when hlos tid override is enabled, save tid in 642 * skb->priority 643 */ 644 if (qdf_unlikely(vdev->skip_sw_tid_classification & 645 DP_TXRX_HLOS_TID_OVERRIDE_ENABLED)) 646 qdf_nbuf_set_priority(nbuf, tid); 647 648 DP_RX_TID_SAVE(nbuf, tid); 649 if (qdf_unlikely(dsf) || qdf_unlikely(peer_ext_stats) || 650 dp_rx_pkt_tracepoints_enabled()) 651 qdf_nbuf_set_timestamp(nbuf); 652 653 if (qdf_likely(old_tid != tid)) { 654 tid_stats = 655 &rx_pdev->stats.tid_stats.tid_rx_stats[reo_ring_num][tid]; 656 old_tid = tid; 657 } 658 659 /* 660 * Check if DMA completed -- msdu_done is the last bit 661 * to be written 662 */ 663 if (qdf_unlikely(!qdf_nbuf_is_rx_chfrag_cont(nbuf) && 664 !hal_rx_tlv_msdu_done_get_be(rx_tlv_hdr))) { 665 dp_err("MSDU DONE failure"); 666 DP_STATS_INC(soc, rx.err.msdu_done_fail, 1); 667 hal_rx_dump_pkt_tlvs(hal_soc, rx_tlv_hdr, 668 QDF_TRACE_LEVEL_INFO); 669 tid_stats->fail_cnt[MSDU_DONE_FAILURE]++; 670 dp_rx_nbuf_free(nbuf); 671 qdf_assert(0); 672 nbuf = next; 673 continue; 674 } 675 676 DP_HIST_PACKET_COUNT_INC(vdev->pdev->pdev_id); 677 /* 678 * First IF condition: 679 * 802.11 Fragmented pkts are reinjected to REO 680 * HW block as SG pkts and for these pkts we only 681 * need to pull the RX TLVS header length. 682 * Second IF condition: 683 * The below condition happens when an MSDU is spread 684 * across multiple buffers. This can happen in two cases 685 * 1. The nbuf size is smaller then the received msdu. 686 * ex: we have set the nbuf size to 2048 during 687 * nbuf_alloc. but we received an msdu which is 688 * 2304 bytes in size then this msdu is spread 689 * across 2 nbufs. 690 * 691 * 2. AMSDUs when RAW mode is enabled. 692 * ex: 1st MSDU is in 1st nbuf and 2nd MSDU is spread 693 * across 1st nbuf and 2nd nbuf and last MSDU is 694 * spread across 2nd nbuf and 3rd nbuf. 695 * 696 * for these scenarios let us create a skb frag_list and 697 * append these buffers till the last MSDU of the AMSDU 698 * Third condition: 699 * This is the most likely case, we receive 802.3 pkts 700 * decapsulated by HW, here we need to set the pkt length. 701 */ 702 hal_rx_msdu_packet_metadata_get_generic_be(rx_tlv_hdr, 703 &msdu_metadata); 704 if (qdf_unlikely(qdf_nbuf_is_frag(nbuf))) { 705 bool is_mcbc, is_sa_vld, is_da_vld; 706 707 is_mcbc = hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc, 708 rx_tlv_hdr); 709 is_sa_vld = 710 hal_rx_msdu_end_sa_is_valid_get(soc->hal_soc, 711 rx_tlv_hdr); 712 is_da_vld = 713 hal_rx_msdu_end_da_is_valid_get(soc->hal_soc, 714 rx_tlv_hdr); 715 716 qdf_nbuf_set_da_mcbc(nbuf, is_mcbc); 717 qdf_nbuf_set_da_valid(nbuf, is_da_vld); 718 qdf_nbuf_set_sa_valid(nbuf, is_sa_vld); 719 720 qdf_nbuf_pull_head(nbuf, soc->rx_pkt_tlv_size); 721 } else if (qdf_nbuf_is_rx_chfrag_cont(nbuf)) { 722 msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf); 723 nbuf = dp_rx_sg_create(soc, nbuf); 724 next = nbuf->next; 725 726 if (qdf_nbuf_is_raw_frame(nbuf)) { 727 DP_STATS_INC(vdev->pdev, rx_raw_pkts, 1); 728 DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, 729 rx.raw, 1, 730 msdu_len); 731 } else { 732 DP_STATS_INC(soc, rx.err.scatter_msdu, 1); 733 734 if (!dp_rx_is_sg_supported()) { 735 dp_rx_nbuf_free(nbuf); 736 dp_info_rl("sg msdu len %d, dropped", 737 msdu_len); 738 nbuf = next; 739 continue; 740 } 741 } 742 } else { 743 msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf); 744 pkt_len = msdu_len + 745 msdu_metadata.l3_hdr_pad + 746 soc->rx_pkt_tlv_size; 747 748 qdf_nbuf_set_pktlen(nbuf, pkt_len); 749 dp_rx_skip_tlvs(soc, nbuf, msdu_metadata.l3_hdr_pad); 750 } 751 752 dp_rx_send_pktlog(soc, rx_pdev, nbuf, QDF_TX_RX_STATUS_OK); 753 754 if (!dp_wds_rx_policy_check(rx_tlv_hdr, vdev, txrx_peer)) { 755 dp_rx_err("%pK: Policy Check Drop pkt", soc); 756 DP_PEER_PER_PKT_STATS_INC(txrx_peer, 757 rx.policy_check_drop, 1); 758 tid_stats->fail_cnt[POLICY_CHECK_DROP]++; 759 /* Drop & free packet */ 760 dp_rx_nbuf_free(nbuf); 761 /* Statistics */ 762 nbuf = next; 763 continue; 764 } 765 766 /* 767 * Drop non-EAPOL frames from unauthorized peer. 768 */ 769 if (qdf_likely(txrx_peer) && 770 qdf_unlikely(!txrx_peer->authorize) && 771 !qdf_nbuf_is_raw_frame(nbuf)) { 772 bool is_eapol = qdf_nbuf_is_ipv4_eapol_pkt(nbuf) || 773 qdf_nbuf_is_ipv4_wapi_pkt(nbuf); 774 775 if (!is_eapol) { 776 DP_PEER_PER_PKT_STATS_INC(txrx_peer, 777 rx.peer_unauth_rx_pkt_drop, 778 1); 779 dp_rx_nbuf_free(nbuf); 780 nbuf = next; 781 continue; 782 } 783 } 784 785 dp_rx_cksum_offload(vdev->pdev, nbuf, rx_tlv_hdr); 786 787 if (qdf_unlikely(!rx_pdev->rx_fast_flag)) { 788 /* 789 * process frame for mulitpass phrase processing 790 */ 791 if (qdf_unlikely(vdev->multipass_en)) { 792 if (dp_rx_multipass_process(txrx_peer, nbuf, 793 tid) == false) { 794 DP_PEER_PER_PKT_STATS_INC 795 (txrx_peer, 796 rx.multipass_rx_pkt_drop, 1); 797 dp_rx_nbuf_free(nbuf); 798 nbuf = next; 799 continue; 800 } 801 } 802 if (qdf_unlikely(txrx_peer && 803 (txrx_peer->nawds_enabled) && 804 (qdf_nbuf_is_da_mcbc(nbuf)) && 805 (hal_rx_get_mpdu_mac_ad4_valid_be 806 (rx_tlv_hdr) == false))) { 807 tid_stats->fail_cnt[NAWDS_MCAST_DROP]++; 808 DP_PEER_PER_PKT_STATS_INC(txrx_peer, 809 rx.nawds_mcast_drop, 810 1); 811 dp_rx_nbuf_free(nbuf); 812 nbuf = next; 813 continue; 814 } 815 816 /* Update the protocol tag in SKB based on CCE metadata 817 */ 818 dp_rx_update_protocol_tag(soc, vdev, nbuf, rx_tlv_hdr, 819 reo_ring_num, false, true); 820 821 /* Update the flow tag in SKB based on FSE metadata */ 822 dp_rx_update_flow_tag(soc, vdev, nbuf, rx_tlv_hdr, 823 true); 824 825 if (qdf_likely(vdev->rx_decap_type == 826 htt_cmn_pkt_type_ethernet) && 827 qdf_likely(!vdev->mesh_vdev)) { 828 dp_rx_wds_learn(soc, vdev, 829 rx_tlv_hdr, 830 txrx_peer, 831 nbuf, 832 msdu_metadata); 833 } 834 835 if (qdf_unlikely(vdev->mesh_vdev)) { 836 if (dp_rx_filter_mesh_packets(vdev, nbuf, 837 rx_tlv_hdr) 838 == QDF_STATUS_SUCCESS) { 839 dp_rx_info("%pK: mesh pkt filtered", 840 soc); 841 tid_stats->fail_cnt[MESH_FILTER_DROP]++; 842 DP_STATS_INC(vdev->pdev, 843 dropped.mesh_filter, 1); 844 845 dp_rx_nbuf_free(nbuf); 846 nbuf = next; 847 continue; 848 } 849 dp_rx_fill_mesh_stats(vdev, nbuf, rx_tlv_hdr, 850 txrx_peer); 851 } 852 } 853 854 dp_rx_msdu_stats_update(soc, nbuf, rx_tlv_hdr, txrx_peer, 855 reo_ring_num, tid_stats); 856 857 if (qdf_likely(vdev->rx_decap_type == 858 htt_cmn_pkt_type_ethernet) && 859 qdf_likely(!vdev->mesh_vdev)) { 860 /* Intrabss-fwd */ 861 if (dp_rx_check_ap_bridge(vdev)) 862 if (dp_rx_intrabss_fwd_be(soc, txrx_peer, 863 rx_tlv_hdr, 864 nbuf, 865 msdu_metadata)) { 866 nbuf = next; 867 tid_stats->intrabss_cnt++; 868 continue; /* Get next desc */ 869 } 870 } 871 872 dp_rx_fill_gro_info(soc, rx_tlv_hdr, nbuf, &rx_ol_pkt_cnt); 873 874 dp_rx_mark_first_packet_after_wow_wakeup(vdev->pdev, rx_tlv_hdr, 875 nbuf); 876 877 dp_rx_update_stats(soc, nbuf); 878 879 dp_pkt_add_timestamp(txrx_peer->vdev, QDF_PKT_RX_DRIVER_ENTRY, 880 current_time, nbuf); 881 882 DP_RX_LIST_APPEND(deliver_list_head, 883 deliver_list_tail, 884 nbuf); 885 886 DP_PEER_TO_STACK_INCC_PKT(txrx_peer, 1, 887 QDF_NBUF_CB_RX_PKT_LEN(nbuf), 888 enh_flag); 889 if (qdf_unlikely(txrx_peer->in_twt)) 890 DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, 891 rx.to_stack_twt, 1, 892 QDF_NBUF_CB_RX_PKT_LEN(nbuf)); 893 894 tid_stats->delivered_to_stack++; 895 nbuf = next; 896 } 897 898 DP_RX_DELIVER_TO_STACK(soc, vdev, txrx_peer, peer_id, 899 pkt_capture_offload, 900 deliver_list_head, 901 deliver_list_tail); 902 903 if (qdf_likely(txrx_peer)) 904 dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_RX); 905 906 /* 907 * If we are processing in near-full condition, there are 3 scenario 908 * 1) Ring entries has reached critical state 909 * 2) Ring entries are still near high threshold 910 * 3) Ring entries are below the safe level 911 * 912 * One more loop will move the state to normal processing and yield 913 */ 914 if (ring_near_full && quota) 915 goto more_data; 916 917 if (dp_rx_enable_eol_data_check(soc) && rx_bufs_used) { 918 if (quota) { 919 num_pending = 920 dp_rx_srng_get_num_pending(hal_soc, 921 hal_ring_hdl, 922 num_entries, 923 &near_full); 924 if (num_pending) { 925 DP_STATS_INC(soc, rx.hp_oos2, 1); 926 927 if (!hif_exec_should_yield(scn, intr_id)) 928 goto more_data; 929 930 if (qdf_unlikely(near_full)) { 931 DP_STATS_INC(soc, rx.near_full, 1); 932 goto more_data; 933 } 934 } 935 } 936 937 if (vdev && vdev->osif_fisa_flush) 938 vdev->osif_fisa_flush(soc, reo_ring_num); 939 940 if (vdev && vdev->osif_gro_flush && rx_ol_pkt_cnt) { 941 vdev->osif_gro_flush(vdev->osif_vdev, 942 reo_ring_num); 943 } 944 } 945 946 /* Update histogram statistics by looping through pdev's */ 947 DP_RX_HIST_STATS_PER_PDEV(); 948 949 return rx_bufs_used; /* Assume no scale factor for now */ 950 } 951 952 #ifdef RX_DESC_MULTI_PAGE_ALLOC 953 /** 954 * dp_rx_desc_pool_init_be_cc() - initial RX desc pool for cookie conversion 955 * @soc: Handle to DP Soc structure 956 * @rx_desc_pool: Rx descriptor pool handler 957 * @pool_id: Rx descriptor pool ID 958 * 959 * Return: QDF_STATUS_SUCCESS - succeeded, others - failed 960 */ 961 static QDF_STATUS 962 dp_rx_desc_pool_init_be_cc(struct dp_soc *soc, 963 struct rx_desc_pool *rx_desc_pool, 964 uint32_t pool_id) 965 { 966 struct dp_hw_cookie_conversion_t *cc_ctx; 967 struct dp_soc_be *be_soc; 968 union dp_rx_desc_list_elem_t *rx_desc_elem; 969 struct dp_spt_page_desc *page_desc; 970 uint32_t ppt_idx = 0; 971 uint32_t avail_entry_index = 0; 972 973 if (!rx_desc_pool->pool_size) { 974 dp_err("desc_num 0 !!"); 975 return QDF_STATUS_E_FAILURE; 976 } 977 978 be_soc = dp_get_be_soc_from_dp_soc(soc); 979 cc_ctx = &be_soc->rx_cc_ctx[pool_id]; 980 981 page_desc = &cc_ctx->page_desc_base[0]; 982 rx_desc_elem = rx_desc_pool->freelist; 983 while (rx_desc_elem) { 984 if (avail_entry_index == 0) { 985 if (ppt_idx >= cc_ctx->total_page_num) { 986 dp_alert("insufficient secondary page tables"); 987 qdf_assert_always(0); 988 } 989 page_desc = &cc_ctx->page_desc_base[ppt_idx++]; 990 } 991 992 /* put each RX Desc VA to SPT pages and 993 * get corresponding ID 994 */ 995 DP_CC_SPT_PAGE_UPDATE_VA(page_desc->page_v_addr, 996 avail_entry_index, 997 &rx_desc_elem->rx_desc); 998 rx_desc_elem->rx_desc.cookie = 999 dp_cc_desc_id_generate(page_desc->ppt_index, 1000 avail_entry_index); 1001 rx_desc_elem->rx_desc.chip_id = dp_mlo_get_chip_id(soc); 1002 rx_desc_elem->rx_desc.pool_id = pool_id; 1003 rx_desc_elem->rx_desc.in_use = 0; 1004 rx_desc_elem = rx_desc_elem->next; 1005 1006 avail_entry_index = (avail_entry_index + 1) & 1007 DP_CC_SPT_PAGE_MAX_ENTRIES_MASK; 1008 } 1009 1010 return QDF_STATUS_SUCCESS; 1011 } 1012 #else 1013 static QDF_STATUS 1014 dp_rx_desc_pool_init_be_cc(struct dp_soc *soc, 1015 struct rx_desc_pool *rx_desc_pool, 1016 uint32_t pool_id) 1017 { 1018 struct dp_hw_cookie_conversion_t *cc_ctx; 1019 struct dp_soc_be *be_soc; 1020 struct dp_spt_page_desc *page_desc; 1021 uint32_t ppt_idx = 0; 1022 uint32_t avail_entry_index = 0; 1023 int i = 0; 1024 1025 if (!rx_desc_pool->pool_size) { 1026 dp_err("desc_num 0 !!"); 1027 return QDF_STATUS_E_FAILURE; 1028 } 1029 1030 be_soc = dp_get_be_soc_from_dp_soc(soc); 1031 cc_ctx = &be_soc->rx_cc_ctx[pool_id]; 1032 1033 page_desc = &cc_ctx->page_desc_base[0]; 1034 for (i = 0; i <= rx_desc_pool->pool_size - 1; i++) { 1035 if (i == rx_desc_pool->pool_size - 1) 1036 rx_desc_pool->array[i].next = NULL; 1037 else 1038 rx_desc_pool->array[i].next = 1039 &rx_desc_pool->array[i + 1]; 1040 1041 if (avail_entry_index == 0) { 1042 if (ppt_idx >= cc_ctx->total_page_num) { 1043 dp_alert("insufficient secondary page tables"); 1044 qdf_assert_always(0); 1045 } 1046 page_desc = &cc_ctx->page_desc_base[ppt_idx++]; 1047 } 1048 1049 /* put each RX Desc VA to SPT pages and 1050 * get corresponding ID 1051 */ 1052 DP_CC_SPT_PAGE_UPDATE_VA(page_desc->page_v_addr, 1053 avail_entry_index, 1054 &rx_desc_pool->array[i].rx_desc); 1055 rx_desc_pool->array[i].rx_desc.cookie = 1056 dp_cc_desc_id_generate(page_desc->ppt_index, 1057 avail_entry_index); 1058 rx_desc_pool->array[i].rx_desc.pool_id = pool_id; 1059 rx_desc_pool->array[i].rx_desc.in_use = 0; 1060 rx_desc_pool->array[i].rx_desc.chip_id = 1061 dp_mlo_get_chip_id(soc); 1062 1063 avail_entry_index = (avail_entry_index + 1) & 1064 DP_CC_SPT_PAGE_MAX_ENTRIES_MASK; 1065 } 1066 return QDF_STATUS_SUCCESS; 1067 } 1068 #endif 1069 1070 static void 1071 dp_rx_desc_pool_deinit_be_cc(struct dp_soc *soc, 1072 struct rx_desc_pool *rx_desc_pool, 1073 uint32_t pool_id) 1074 { 1075 struct dp_spt_page_desc *page_desc; 1076 struct dp_soc_be *be_soc; 1077 int i = 0; 1078 struct dp_hw_cookie_conversion_t *cc_ctx; 1079 1080 be_soc = dp_get_be_soc_from_dp_soc(soc); 1081 cc_ctx = &be_soc->rx_cc_ctx[pool_id]; 1082 1083 for (i = 0; i < cc_ctx->total_page_num; i++) { 1084 page_desc = &cc_ctx->page_desc_base[i]; 1085 qdf_mem_zero(page_desc->page_v_addr, qdf_page_size); 1086 } 1087 } 1088 1089 QDF_STATUS dp_rx_desc_pool_init_be(struct dp_soc *soc, 1090 struct rx_desc_pool *rx_desc_pool, 1091 uint32_t pool_id) 1092 { 1093 QDF_STATUS status = QDF_STATUS_SUCCESS; 1094 1095 /* Only regular RX buffer desc pool use HW cookie conversion */ 1096 if (rx_desc_pool->desc_type == DP_RX_DESC_BUF_TYPE) { 1097 dp_info("rx_desc_buf pool init"); 1098 status = dp_rx_desc_pool_init_be_cc(soc, 1099 rx_desc_pool, 1100 pool_id); 1101 } else { 1102 dp_info("non_rx_desc_buf_pool init"); 1103 status = dp_rx_desc_pool_init_generic(soc, rx_desc_pool, 1104 pool_id); 1105 } 1106 1107 return status; 1108 } 1109 1110 void dp_rx_desc_pool_deinit_be(struct dp_soc *soc, 1111 struct rx_desc_pool *rx_desc_pool, 1112 uint32_t pool_id) 1113 { 1114 if (rx_desc_pool->desc_type == DP_RX_DESC_BUF_TYPE) 1115 dp_rx_desc_pool_deinit_be_cc(soc, rx_desc_pool, pool_id); 1116 } 1117 1118 #ifdef DP_FEATURE_HW_COOKIE_CONVERSION 1119 #ifdef DP_HW_COOKIE_CONVERT_EXCEPTION 1120 QDF_STATUS dp_wbm_get_rx_desc_from_hal_desc_be(struct dp_soc *soc, 1121 void *ring_desc, 1122 struct dp_rx_desc **r_rx_desc) 1123 { 1124 if (hal_rx_wbm_get_cookie_convert_done(ring_desc)) { 1125 /* HW cookie conversion done */ 1126 *r_rx_desc = (struct dp_rx_desc *) 1127 hal_rx_wbm_get_desc_va(ring_desc); 1128 } else { 1129 /* SW do cookie conversion */ 1130 uint32_t cookie = HAL_RX_BUF_COOKIE_GET(ring_desc); 1131 1132 *r_rx_desc = (struct dp_rx_desc *) 1133 dp_cc_desc_find(soc, cookie); 1134 } 1135 1136 return QDF_STATUS_SUCCESS; 1137 } 1138 #else 1139 QDF_STATUS dp_wbm_get_rx_desc_from_hal_desc_be(struct dp_soc *soc, 1140 void *ring_desc, 1141 struct dp_rx_desc **r_rx_desc) 1142 { 1143 *r_rx_desc = (struct dp_rx_desc *) 1144 hal_rx_wbm_get_desc_va(ring_desc); 1145 1146 return QDF_STATUS_SUCCESS; 1147 } 1148 #endif /* DP_HW_COOKIE_CONVERT_EXCEPTION */ 1149 #else 1150 QDF_STATUS dp_wbm_get_rx_desc_from_hal_desc_be(struct dp_soc *soc, 1151 void *ring_desc, 1152 struct dp_rx_desc **r_rx_desc) 1153 { 1154 /* SW do cookie conversion */ 1155 uint32_t cookie = HAL_RX_BUF_COOKIE_GET(ring_desc); 1156 1157 *r_rx_desc = (struct dp_rx_desc *) 1158 dp_cc_desc_find(soc, cookie); 1159 1160 return QDF_STATUS_SUCCESS; 1161 } 1162 #endif /* DP_FEATURE_HW_COOKIE_CONVERSION */ 1163 1164 struct dp_rx_desc *dp_rx_desc_cookie_2_va_be(struct dp_soc *soc, 1165 uint32_t cookie) 1166 { 1167 return (struct dp_rx_desc *)dp_cc_desc_find(soc, cookie); 1168 } 1169 1170 #if defined(WLAN_FEATURE_11BE_MLO) 1171 #if defined(WLAN_MLO_MULTI_CHIP) && defined(WLAN_MCAST_MLO) 1172 #define DP_RANDOM_MAC_ID_BIT_MASK 0xC0 1173 #define DP_RANDOM_MAC_OFFSET 1 1174 #define DP_MAC_LOCAL_ADMBIT_MASK 0x2 1175 #define DP_MAC_LOCAL_ADMBIT_OFFSET 0 1176 static inline void dp_rx_dummy_src_mac(struct dp_vdev *vdev, 1177 qdf_nbuf_t nbuf) 1178 { 1179 uint8_t random_mac[QDF_MAC_ADDR_SIZE] = {0}; 1180 qdf_ether_header_t *eh = 1181 (qdf_ether_header_t *)qdf_nbuf_data(nbuf); 1182 1183 qdf_mem_copy(random_mac, &vdev->mld_mac_addr.raw[0], QDF_MAC_ADDR_SIZE); 1184 random_mac[DP_MAC_LOCAL_ADMBIT_OFFSET] = 1185 random_mac[DP_MAC_LOCAL_ADMBIT_OFFSET] | 1186 DP_MAC_LOCAL_ADMBIT_MASK; 1187 random_mac[DP_RANDOM_MAC_OFFSET] = 1188 random_mac[DP_RANDOM_MAC_OFFSET] ^ DP_RANDOM_MAC_ID_BIT_MASK; 1189 1190 qdf_mem_copy(&eh->ether_shost[0], random_mac, QDF_MAC_ADDR_SIZE); 1191 } 1192 1193 #ifdef QCA_SUPPORT_WDS_EXTENDED 1194 static inline bool dp_rx_mlo_igmp_wds_ext_handler(struct dp_txrx_peer *peer) 1195 { 1196 return qdf_atomic_test_bit(WDS_EXT_PEER_INIT_BIT, &peer->wds_ext.init); 1197 } 1198 #else 1199 static inline bool dp_rx_mlo_igmp_wds_ext_handler(struct dp_txrx_peer *peer) 1200 { 1201 return false; 1202 } 1203 #endif 1204 1205 bool dp_rx_mlo_igmp_handler(struct dp_soc *soc, 1206 struct dp_vdev *vdev, 1207 struct dp_txrx_peer *peer, 1208 qdf_nbuf_t nbuf) 1209 { 1210 struct dp_vdev *mcast_primary_vdev = NULL; 1211 struct dp_vdev_be *be_vdev = dp_get_be_vdev_from_dp_vdev(vdev); 1212 struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc); 1213 uint8_t tid = qdf_nbuf_get_tid_val(nbuf); 1214 struct cdp_tid_rx_stats *tid_stats = &peer->vdev->pdev->stats. 1215 tid_stats.tid_rx_wbm_stats[0][tid]; 1216 1217 if (!(qdf_nbuf_is_ipv4_igmp_pkt(nbuf) || 1218 qdf_nbuf_is_ipv6_igmp_pkt(nbuf))) 1219 return false; 1220 1221 if (qdf_unlikely(vdev->multipass_en)) { 1222 if (dp_rx_multipass_process(peer, nbuf, tid) == false) { 1223 DP_PEER_PER_PKT_STATS_INC(peer, 1224 rx.multipass_rx_pkt_drop, 1); 1225 return false; 1226 } 1227 } 1228 1229 if (!peer->bss_peer) { 1230 if (dp_rx_intrabss_mcbc_fwd(soc, peer, NULL, nbuf, tid_stats)) 1231 dp_rx_err("forwarding failed"); 1232 } 1233 1234 /* 1235 * In the case of ME6, Backhaul WDS, NAWDS 1236 * send the igmp pkt on the same link where it received, 1237 * as these features will use peer based tcl metadata 1238 */ 1239 1240 qdf_nbuf_set_next(nbuf, NULL); 1241 1242 if (vdev->mcast_enhancement_en || be_vdev->mcast_primary || 1243 peer->nawds_enabled) 1244 goto send_pkt; 1245 1246 if (qdf_unlikely(dp_rx_mlo_igmp_wds_ext_handler(peer))) 1247 goto send_pkt; 1248 1249 mcast_primary_vdev = dp_mlo_get_mcast_primary_vdev(be_soc, be_vdev, 1250 DP_MOD_ID_RX); 1251 if (!mcast_primary_vdev) { 1252 dp_rx_debug("Non mlo vdev"); 1253 goto send_pkt; 1254 } 1255 1256 if (qdf_unlikely(vdev->wrap_vdev)) { 1257 /* In the case of qwrap repeater send the original 1258 * packet on the interface where it received, 1259 * packet with dummy src on the mcast primary interface. 1260 */ 1261 qdf_nbuf_t nbuf_copy; 1262 1263 nbuf_copy = qdf_nbuf_copy(nbuf); 1264 if (qdf_likely(nbuf_copy)) 1265 dp_rx_deliver_to_stack(soc, vdev, peer, nbuf_copy, 1266 NULL); 1267 } 1268 1269 dp_rx_dummy_src_mac(vdev, nbuf); 1270 dp_rx_deliver_to_stack(mcast_primary_vdev->pdev->soc, 1271 mcast_primary_vdev, 1272 peer, 1273 nbuf, 1274 NULL); 1275 dp_vdev_unref_delete(mcast_primary_vdev->pdev->soc, 1276 mcast_primary_vdev, 1277 DP_MOD_ID_RX); 1278 return true; 1279 send_pkt: 1280 dp_rx_deliver_to_stack(be_vdev->vdev.pdev->soc, 1281 &be_vdev->vdev, 1282 peer, 1283 nbuf, 1284 NULL); 1285 return true; 1286 } 1287 #else 1288 bool dp_rx_mlo_igmp_handler(struct dp_soc *soc, 1289 struct dp_vdev *vdev, 1290 struct dp_txrx_peer *peer, 1291 qdf_nbuf_t nbuf) 1292 { 1293 return false; 1294 } 1295 #endif 1296 #endif 1297 1298 #ifdef WLAN_FEATURE_NEAR_FULL_IRQ 1299 uint32_t dp_rx_nf_process(struct dp_intr *int_ctx, 1300 hal_ring_handle_t hal_ring_hdl, 1301 uint8_t reo_ring_num, 1302 uint32_t quota) 1303 { 1304 struct dp_soc *soc = int_ctx->soc; 1305 struct dp_srng *rx_ring = &soc->reo_dest_ring[reo_ring_num]; 1306 uint32_t work_done = 0; 1307 1308 if (dp_srng_get_near_full_level(soc, rx_ring) < 1309 DP_SRNG_THRESH_NEAR_FULL) 1310 return 0; 1311 1312 qdf_atomic_set(&rx_ring->near_full, 1); 1313 work_done++; 1314 1315 return work_done; 1316 } 1317 #endif 1318 1319 #ifndef QCA_HOST_MODE_WIFI_DISABLED 1320 #ifdef WLAN_FEATURE_11BE_MLO 1321 /** 1322 * dp_rx_intrabss_fwd_mlo_allow() - check if MLO forwarding is allowed 1323 * @ta_peer: transmitter peer handle 1324 * @da_peer: destination peer handle 1325 * 1326 * Return: true - MLO forwarding case, false: not 1327 */ 1328 static inline bool 1329 dp_rx_intrabss_fwd_mlo_allow(struct dp_txrx_peer *ta_peer, 1330 struct dp_txrx_peer *da_peer) 1331 { 1332 /* one of TA/DA peer should belong to MLO connection peer, 1333 * only MLD peer type is as expected 1334 */ 1335 if (!IS_MLO_DP_MLD_TXRX_PEER(ta_peer) && 1336 !IS_MLO_DP_MLD_TXRX_PEER(da_peer)) 1337 return false; 1338 1339 /* TA peer and DA peer's vdev should be partner MLO vdevs */ 1340 if (dp_peer_find_mac_addr_cmp(&ta_peer->vdev->mld_mac_addr, 1341 &da_peer->vdev->mld_mac_addr)) 1342 return false; 1343 1344 return true; 1345 } 1346 #else 1347 static inline bool 1348 dp_rx_intrabss_fwd_mlo_allow(struct dp_txrx_peer *ta_peer, 1349 struct dp_txrx_peer *da_peer) 1350 { 1351 return false; 1352 } 1353 #endif 1354 1355 #ifdef INTRA_BSS_FWD_OFFLOAD 1356 /** 1357 * dp_rx_intrabss_ucast_check_be() - Check if intrabss is allowed 1358 for unicast frame 1359 * @soc: SOC handle 1360 * @nbuf: RX packet buffer 1361 * @ta_peer: transmitter DP peer handle 1362 * @msdu_metadata: MSDU meta data info 1363 * @p_tx_vdev_id: get vdev id for Intra-BSS TX 1364 * 1365 * Return: true - intrabss allowed 1366 false - not allow 1367 */ 1368 static bool 1369 dp_rx_intrabss_ucast_check_be(qdf_nbuf_t nbuf, 1370 struct dp_txrx_peer *ta_peer, 1371 struct hal_rx_msdu_metadata *msdu_metadata, 1372 struct dp_be_intrabss_params *params) 1373 { 1374 uint16_t da_peer_id; 1375 struct dp_txrx_peer *da_peer; 1376 dp_txrx_ref_handle txrx_ref_handle = NULL; 1377 1378 if (!qdf_nbuf_is_intra_bss(nbuf)) 1379 return false; 1380 1381 da_peer_id = dp_rx_peer_metadata_peer_id_get_be( 1382 params->dest_soc, 1383 msdu_metadata->da_idx); 1384 da_peer = dp_txrx_peer_get_ref_by_id(params->dest_soc, da_peer_id, 1385 &txrx_ref_handle, DP_MOD_ID_RX); 1386 if (!da_peer) 1387 return false; 1388 params->tx_vdev_id = da_peer->vdev->vdev_id; 1389 dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_RX); 1390 1391 return true; 1392 } 1393 #else 1394 #ifdef WLAN_MLO_MULTI_CHIP 1395 static bool 1396 dp_rx_intrabss_ucast_check_be(qdf_nbuf_t nbuf, 1397 struct dp_txrx_peer *ta_peer, 1398 struct hal_rx_msdu_metadata *msdu_metadata, 1399 struct dp_be_intrabss_params *params) 1400 { 1401 uint16_t da_peer_id; 1402 struct dp_txrx_peer *da_peer; 1403 bool ret = false; 1404 uint8_t dest_chip_id; 1405 dp_txrx_ref_handle txrx_ref_handle = NULL; 1406 struct dp_vdev_be *be_vdev = 1407 dp_get_be_vdev_from_dp_vdev(ta_peer->vdev); 1408 struct dp_soc_be *be_soc = 1409 dp_get_be_soc_from_dp_soc(params->dest_soc); 1410 1411 if (!(qdf_nbuf_is_da_valid(nbuf) || qdf_nbuf_is_da_mcbc(nbuf))) 1412 return false; 1413 1414 dest_chip_id = HAL_RX_DEST_CHIP_ID_GET(msdu_metadata); 1415 qdf_assert_always(dest_chip_id <= (DP_MLO_MAX_DEST_CHIP_ID - 1)); 1416 da_peer_id = HAL_RX_PEER_ID_GET(msdu_metadata); 1417 1418 /* use dest chip id when TA is MLD peer and DA is legacy */ 1419 if (be_soc->mlo_enabled && 1420 ta_peer->mld_peer && 1421 !(da_peer_id & HAL_RX_DA_IDX_ML_PEER_MASK)) { 1422 /* validate chip_id, get a ref, and re-assign soc */ 1423 params->dest_soc = 1424 dp_mlo_get_soc_ref_by_chip_id(be_soc->ml_ctxt, 1425 dest_chip_id); 1426 if (!params->dest_soc) 1427 return false; 1428 1429 da_peer = dp_txrx_peer_get_ref_by_id(params->dest_soc, 1430 da_peer_id, 1431 &txrx_ref_handle, 1432 DP_MOD_ID_RX); 1433 if (!da_peer) 1434 return false; 1435 1436 } else { 1437 da_peer = dp_txrx_peer_get_ref_by_id(params->dest_soc, 1438 da_peer_id, 1439 &txrx_ref_handle, 1440 DP_MOD_ID_RX); 1441 if (!da_peer) 1442 return false; 1443 1444 params->dest_soc = da_peer->vdev->pdev->soc; 1445 if (!params->dest_soc) 1446 goto rel_da_peer; 1447 1448 } 1449 1450 params->tx_vdev_id = da_peer->vdev->vdev_id; 1451 1452 /* If the source or destination peer in the isolation 1453 * list then dont forward instead push to bridge stack. 1454 */ 1455 if (dp_get_peer_isolation(ta_peer) || 1456 dp_get_peer_isolation(da_peer)) { 1457 ret = false; 1458 goto rel_da_peer; 1459 } 1460 1461 if (da_peer->bss_peer || (da_peer == ta_peer)) { 1462 ret = false; 1463 goto rel_da_peer; 1464 } 1465 1466 /* Same vdev, support Inra-BSS */ 1467 if (da_peer->vdev == ta_peer->vdev) { 1468 ret = true; 1469 goto rel_da_peer; 1470 } 1471 1472 /* MLO specific Intra-BSS check */ 1473 if (dp_rx_intrabss_fwd_mlo_allow(ta_peer, da_peer)) { 1474 /* use dest chip id for legacy dest peer */ 1475 if (!(da_peer_id & HAL_RX_DA_IDX_ML_PEER_MASK)) { 1476 if (!(be_vdev->partner_vdev_list[dest_chip_id][0] == 1477 params->tx_vdev_id) && 1478 !(be_vdev->partner_vdev_list[dest_chip_id][1] == 1479 params->tx_vdev_id)) { 1480 /*dp_soc_unref_delete(soc);*/ 1481 goto rel_da_peer; 1482 } 1483 } 1484 ret = true; 1485 } 1486 1487 rel_da_peer: 1488 dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_RX); 1489 return ret; 1490 } 1491 #else 1492 static bool 1493 dp_rx_intrabss_ucast_check_be(qdf_nbuf_t nbuf, 1494 struct dp_txrx_peer *ta_peer, 1495 struct hal_rx_msdu_metadata *msdu_metadata, 1496 struct dp_be_intrabss_params *params) 1497 { 1498 uint16_t da_peer_id; 1499 struct dp_txrx_peer *da_peer; 1500 bool ret = false; 1501 dp_txrx_ref_handle txrx_ref_handle = NULL; 1502 1503 if (!qdf_nbuf_is_da_valid(nbuf) || qdf_nbuf_is_da_mcbc(nbuf)) 1504 return false; 1505 1506 da_peer_id = dp_rx_peer_metadata_peer_id_get_be( 1507 params->dest_soc, 1508 msdu_metadata->da_idx); 1509 1510 da_peer = dp_txrx_peer_get_ref_by_id(params->dest_soc, da_peer_id, 1511 &txrx_ref_handle, DP_MOD_ID_RX); 1512 if (!da_peer) 1513 return false; 1514 1515 params->tx_vdev_id = da_peer->vdev->vdev_id; 1516 /* If the source or destination peer in the isolation 1517 * list then dont forward instead push to bridge stack. 1518 */ 1519 if (dp_get_peer_isolation(ta_peer) || 1520 dp_get_peer_isolation(da_peer)) 1521 goto rel_da_peer; 1522 1523 if (da_peer->bss_peer || da_peer == ta_peer) 1524 goto rel_da_peer; 1525 1526 /* Same vdev, support Inra-BSS */ 1527 if (da_peer->vdev == ta_peer->vdev) { 1528 ret = true; 1529 goto rel_da_peer; 1530 } 1531 1532 /* MLO specific Intra-BSS check */ 1533 if (dp_rx_intrabss_fwd_mlo_allow(ta_peer, da_peer)) { 1534 ret = true; 1535 goto rel_da_peer; 1536 } 1537 1538 rel_da_peer: 1539 dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_RX); 1540 return ret; 1541 } 1542 #endif /* WLAN_MLO_MULTI_CHIP */ 1543 #endif /* INTRA_BSS_FWD_OFFLOAD */ 1544 1545 #if defined(QCA_MONITOR_2_0_SUPPORT) || defined(CONFIG_WORD_BASED_TLV) 1546 void dp_rx_word_mask_subscribe_be(struct dp_soc *soc, 1547 uint32_t *msg_word, 1548 void *rx_filter) 1549 { 1550 struct htt_rx_ring_tlv_filter *tlv_filter = 1551 (struct htt_rx_ring_tlv_filter *)rx_filter; 1552 1553 if (!msg_word || !tlv_filter) 1554 return; 1555 1556 /* if word mask is zero, FW will set the default values */ 1557 if (!(tlv_filter->rx_mpdu_start_wmask > 0 && 1558 tlv_filter->rx_msdu_end_wmask > 0)) { 1559 msg_word += 4; 1560 *msg_word = 0; 1561 goto config_mon; 1562 } 1563 1564 HTT_RX_RING_SELECTION_CFG_WORD_MASK_COMPACTION_ENABLE_SET(*msg_word, 1); 1565 1566 /* word 14 */ 1567 msg_word += 3; 1568 *msg_word = 0; 1569 1570 HTT_RX_RING_SELECTION_CFG_RX_MPDU_START_WORD_MASK_SET( 1571 *msg_word, 1572 tlv_filter->rx_mpdu_start_wmask); 1573 1574 /* word 15 */ 1575 msg_word++; 1576 *msg_word = 0; 1577 HTT_RX_RING_SELECTION_CFG_RX_MSDU_END_WORD_MASK_SET( 1578 *msg_word, 1579 tlv_filter->rx_msdu_end_wmask); 1580 config_mon: 1581 msg_word--; 1582 dp_mon_rx_wmask_subscribe(soc, msg_word, tlv_filter); 1583 } 1584 #else 1585 void dp_rx_word_mask_subscribe_be(struct dp_soc *soc, 1586 uint32_t *msg_word, 1587 void *rx_filter) 1588 { 1589 } 1590 #endif 1591 /* 1592 * dp_rx_intrabss_handle_nawds_be() - Forward mcbc intrabss pkts in nawds case 1593 * @soc: core txrx main context 1594 * @ta_txrx_peer: source txrx_peer entry 1595 * @nbuf_copy: nbuf that has to be intrabss forwarded 1596 * @tid_stats: tid_stats structure 1597 * 1598 * Return: true if it is forwarded else false 1599 */ 1600 bool 1601 dp_rx_intrabss_handle_nawds_be(struct dp_soc *soc, 1602 struct dp_txrx_peer *ta_txrx_peer, 1603 qdf_nbuf_t nbuf_copy, 1604 struct cdp_tid_rx_stats *tid_stats) 1605 { 1606 if (qdf_unlikely(ta_txrx_peer->vdev->nawds_enabled)) { 1607 struct cdp_tx_exception_metadata tx_exc_metadata = {0}; 1608 uint16_t len = QDF_NBUF_CB_RX_PKT_LEN(nbuf_copy); 1609 1610 tx_exc_metadata.peer_id = ta_txrx_peer->peer_id; 1611 tx_exc_metadata.is_intrabss_fwd = 1; 1612 tx_exc_metadata.tid = HTT_TX_EXT_TID_INVALID; 1613 if (dp_tx_send_exception((struct cdp_soc_t *)soc, 1614 ta_txrx_peer->vdev->vdev_id, 1615 nbuf_copy, 1616 &tx_exc_metadata)) { 1617 DP_PEER_PER_PKT_STATS_INC_PKT(ta_txrx_peer, 1618 rx.intra_bss.fail, 1, 1619 len); 1620 tid_stats->fail_cnt[INTRABSS_DROP]++; 1621 qdf_nbuf_free(nbuf_copy); 1622 } else { 1623 DP_PEER_PER_PKT_STATS_INC_PKT(ta_txrx_peer, 1624 rx.intra_bss.pkts, 1, 1625 len); 1626 tid_stats->intrabss_cnt++; 1627 } 1628 return true; 1629 } 1630 return false; 1631 } 1632 1633 /* 1634 * dp_rx_intrabss_fwd_be() - API for intrabss fwd. For EAPOL 1635 * pkt with DA not equal to vdev mac addr, fwd is not allowed. 1636 * @soc: core txrx main context 1637 * @ta_peer: source peer entry 1638 * @rx_tlv_hdr: start address of rx tlvs 1639 * @nbuf: nbuf that has to be intrabss forwarded 1640 * @msdu_metadata: msdu metadata 1641 * 1642 * Return: true if it is forwarded else false 1643 */ 1644 bool dp_rx_intrabss_fwd_be(struct dp_soc *soc, struct dp_txrx_peer *ta_peer, 1645 uint8_t *rx_tlv_hdr, qdf_nbuf_t nbuf, 1646 struct hal_rx_msdu_metadata msdu_metadata) 1647 { 1648 uint8_t tid = qdf_nbuf_get_tid_val(nbuf); 1649 uint8_t ring_id = QDF_NBUF_CB_RX_CTX_ID(nbuf); 1650 struct cdp_tid_rx_stats *tid_stats = &ta_peer->vdev->pdev->stats. 1651 tid_stats.tid_rx_stats[ring_id][tid]; 1652 bool ret = false; 1653 struct dp_be_intrabss_params params; 1654 1655 /* if it is a broadcast pkt (eg: ARP) and it is not its own 1656 * source, then clone the pkt and send the cloned pkt for 1657 * intra BSS forwarding and original pkt up the network stack 1658 * Note: how do we handle multicast pkts. do we forward 1659 * all multicast pkts as is or let a higher layer module 1660 * like igmpsnoop decide whether to forward or not with 1661 * Mcast enhancement. 1662 */ 1663 if (qdf_nbuf_is_da_mcbc(nbuf) && !ta_peer->bss_peer) { 1664 return dp_rx_intrabss_mcbc_fwd(soc, ta_peer, rx_tlv_hdr, 1665 nbuf, tid_stats); 1666 } 1667 1668 if (dp_rx_intrabss_eapol_drop_check(soc, ta_peer, rx_tlv_hdr, 1669 nbuf)) 1670 return true; 1671 1672 params.dest_soc = soc; 1673 if (dp_rx_intrabss_ucast_check_be(nbuf, ta_peer, 1674 &msdu_metadata, ¶ms)) { 1675 ret = dp_rx_intrabss_ucast_fwd(params.dest_soc, ta_peer, 1676 params.tx_vdev_id, 1677 rx_tlv_hdr, nbuf, tid_stats); 1678 } 1679 1680 return ret; 1681 } 1682 #endif 1683 1684 bool dp_rx_chain_msdus_be(struct dp_soc *soc, qdf_nbuf_t nbuf, 1685 uint8_t *rx_tlv_hdr, uint8_t mac_id) 1686 { 1687 bool mpdu_done = false; 1688 qdf_nbuf_t curr_nbuf = NULL; 1689 qdf_nbuf_t tmp_nbuf = NULL; 1690 1691 struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id); 1692 1693 if (!dp_pdev) { 1694 dp_rx_debug("%pK: pdev is null for mac_id = %d", soc, mac_id); 1695 return mpdu_done; 1696 } 1697 /* if invalid peer SG list has max values free the buffers in list 1698 * and treat current buffer as start of list 1699 * 1700 * current logic to detect the last buffer from attn_tlv is not reliable 1701 * in OFDMA UL scenario hence add max buffers check to avoid list pile 1702 * up 1703 */ 1704 if (!dp_pdev->first_nbuf || 1705 (dp_pdev->invalid_peer_head_msdu && 1706 QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST 1707 (dp_pdev->invalid_peer_head_msdu) >= DP_MAX_INVALID_BUFFERS)) { 1708 qdf_nbuf_set_rx_chfrag_start(nbuf, 1); 1709 dp_pdev->first_nbuf = true; 1710 1711 /* If the new nbuf received is the first msdu of the 1712 * amsdu and there are msdus in the invalid peer msdu 1713 * list, then let us free all the msdus of the invalid 1714 * peer msdu list. 1715 * This scenario can happen when we start receiving 1716 * new a-msdu even before the previous a-msdu is completely 1717 * received. 1718 */ 1719 curr_nbuf = dp_pdev->invalid_peer_head_msdu; 1720 while (curr_nbuf) { 1721 tmp_nbuf = curr_nbuf->next; 1722 dp_rx_nbuf_free(curr_nbuf); 1723 curr_nbuf = tmp_nbuf; 1724 } 1725 1726 dp_pdev->invalid_peer_head_msdu = NULL; 1727 dp_pdev->invalid_peer_tail_msdu = NULL; 1728 1729 dp_monitor_get_mpdu_status(dp_pdev, soc, rx_tlv_hdr); 1730 } 1731 1732 if (qdf_nbuf_is_rx_chfrag_end(nbuf) && 1733 hal_rx_attn_msdu_done_get(soc->hal_soc, rx_tlv_hdr)) { 1734 qdf_assert_always(dp_pdev->first_nbuf); 1735 dp_pdev->first_nbuf = false; 1736 mpdu_done = true; 1737 } 1738 1739 /* 1740 * For MCL, invalid_peer_head_msdu and invalid_peer_tail_msdu 1741 * should be NULL here, add the checking for debugging purpose 1742 * in case some corner case. 1743 */ 1744 DP_PDEV_INVALID_PEER_MSDU_CHECK(dp_pdev->invalid_peer_head_msdu, 1745 dp_pdev->invalid_peer_tail_msdu); 1746 DP_RX_LIST_APPEND(dp_pdev->invalid_peer_head_msdu, 1747 dp_pdev->invalid_peer_tail_msdu, 1748 nbuf); 1749 1750 return mpdu_done; 1751 } 1752