1 /* 2 * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved. 3 * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved. 4 * 5 * Permission to use, copy, modify, and/or distribute this software for 6 * any purpose with or without fee is hereby granted, provided that the 7 * above copyright notice and this permission notice appear in all 8 * copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 11 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 12 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 13 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 14 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 15 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 16 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 17 * PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20 #include "cdp_txrx_cmn_struct.h" 21 #include "hal_hw_headers.h" 22 #include "dp_types.h" 23 #include "dp_rx.h" 24 #include "dp_tx.h" 25 #include "dp_be_rx.h" 26 #include "dp_peer.h" 27 #include "hal_rx.h" 28 #include "hal_be_rx.h" 29 #include "hal_api.h" 30 #include "hal_be_api.h" 31 #include "qdf_nbuf.h" 32 #include "hal_be_rx_tlv.h" 33 #ifdef MESH_MODE_SUPPORT 34 #include "if_meta_hdr.h" 35 #endif 36 #include "dp_internal.h" 37 #include "dp_ipa.h" 38 #ifdef FEATURE_WDS 39 #include "dp_txrx_wds.h" 40 #endif 41 #include "dp_hist.h" 42 #include "dp_rx_buffer_pool.h" 43 44 #ifdef WLAN_SUPPORT_RX_FLOW_TAG 45 static inline void 46 dp_rx_update_flow_info(qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr) 47 { 48 qdf_nbuf_set_rx_flow_idx_invalid(nbuf, 49 hal_rx_msdu_flow_idx_invalid_be(rx_tlv_hdr)); 50 qdf_nbuf_set_rx_flow_idx_timeout(nbuf, 51 hal_rx_msdu_flow_idx_invalid_be(rx_tlv_hdr)); 52 } 53 #else 54 static inline void 55 dp_rx_update_flow_info(qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr) 56 { 57 } 58 #endif 59 60 #ifndef AST_OFFLOAD_ENABLE 61 static void 62 dp_rx_wds_learn(struct dp_soc *soc, 63 struct dp_vdev *vdev, 64 uint8_t *rx_tlv_hdr, 65 struct dp_txrx_peer *txrx_peer, 66 qdf_nbuf_t nbuf, 67 struct hal_rx_msdu_metadata msdu_metadata) 68 { 69 /* WDS Source Port Learning */ 70 if (qdf_likely(vdev->wds_enabled)) 71 dp_rx_wds_srcport_learn(soc, 72 rx_tlv_hdr, 73 txrx_peer, 74 nbuf, 75 msdu_metadata); 76 } 77 #else 78 #ifdef QCA_SUPPORT_WDS_EXTENDED 79 /** 80 * dp_wds_ext_peer_learn_be() - function to send event to control 81 * path on receiving 1st 4-address frame from backhaul. 82 * @soc: DP soc 83 * @ta_txrx_peer: WDS repeater txrx peer 84 * @rx_tlv_hdr : start address of rx tlvs 85 * @nbuf: RX packet buffer 86 * 87 * Return: void 88 */ 89 static inline void dp_wds_ext_peer_learn_be(struct dp_soc *soc, 90 struct dp_txrx_peer *ta_txrx_peer, 91 uint8_t *rx_tlv_hdr, 92 qdf_nbuf_t nbuf) 93 { 94 uint8_t wds_ext_src_mac[QDF_MAC_ADDR_SIZE]; 95 struct dp_peer *ta_base_peer; 96 97 /* instead of checking addr4 is valid or not in per packet path 98 * check for init bit, which will be set on reception of 99 * first addr4 valid packet. 100 */ 101 if (!ta_txrx_peer->vdev->wds_ext_enabled || 102 qdf_atomic_test_bit(WDS_EXT_PEER_INIT_BIT, 103 &ta_txrx_peer->wds_ext.init)) 104 return; 105 106 if (qdf_nbuf_is_rx_chfrag_start(nbuf) && 107 hal_rx_get_mpdu_mac_ad4_valid_be(rx_tlv_hdr)) { 108 qdf_atomic_test_and_set_bit(WDS_EXT_PEER_INIT_BIT, 109 &ta_txrx_peer->wds_ext.init); 110 111 ta_base_peer = dp_peer_get_ref_by_id(soc, ta_txrx_peer->peer_id, 112 DP_MOD_ID_RX); 113 114 if (!ta_base_peer) 115 return; 116 117 qdf_mem_copy(wds_ext_src_mac, &ta_base_peer->mac_addr.raw[0], 118 QDF_MAC_ADDR_SIZE); 119 dp_peer_unref_delete(ta_base_peer, DP_MOD_ID_RX); 120 121 soc->cdp_soc.ol_ops->rx_wds_ext_peer_learn( 122 soc->ctrl_psoc, 123 ta_txrx_peer->peer_id, 124 ta_txrx_peer->vdev->vdev_id, 125 wds_ext_src_mac); 126 } 127 } 128 #else 129 static inline void dp_wds_ext_peer_learn_be(struct dp_soc *soc, 130 struct dp_txrx_peer *ta_txrx_peer, 131 uint8_t *rx_tlv_hdr, 132 qdf_nbuf_t nbuf) 133 { 134 } 135 #endif 136 static void 137 dp_rx_wds_learn(struct dp_soc *soc, 138 struct dp_vdev *vdev, 139 uint8_t *rx_tlv_hdr, 140 struct dp_txrx_peer *ta_txrx_peer, 141 qdf_nbuf_t nbuf, 142 struct hal_rx_msdu_metadata msdu_metadata) 143 { 144 dp_wds_ext_peer_learn_be(soc, ta_txrx_peer, rx_tlv_hdr, nbuf); 145 } 146 #endif 147 148 #if defined(DP_PKT_STATS_PER_LMAC) && defined(WLAN_FEATURE_11BE_MLO) 149 static inline void 150 dp_rx_set_msdu_lmac_id(qdf_nbuf_t nbuf, uint32_t peer_mdata) 151 { 152 uint8_t lmac_id; 153 154 lmac_id = dp_rx_peer_metadata_lmac_id_get_be(peer_mdata); 155 qdf_nbuf_set_lmac_id(nbuf, lmac_id); 156 } 157 #else 158 static inline void 159 dp_rx_set_msdu_lmac_id(qdf_nbuf_t nbuf, uint32_t peer_mdata) 160 { 161 } 162 #endif 163 164 /** 165 * dp_rx_process_be() - Brain of the Rx processing functionality 166 * Called from the bottom half (tasklet/NET_RX_SOFTIRQ) 167 * @int_ctx: per interrupt context 168 * @hal_ring_hdl: opaque pointer to the HAL Rx Ring, which will be serviced 169 * @reo_ring_num: ring number (0, 1, 2 or 3) of the reo ring. 170 * @quota: No. of units (packets) that can be serviced in one shot. 171 * 172 * This function implements the core of Rx functionality. This is 173 * expected to handle only non-error frames. 174 * 175 * Return: uint32_t: No. of elements processed 176 */ 177 uint32_t dp_rx_process_be(struct dp_intr *int_ctx, 178 hal_ring_handle_t hal_ring_hdl, uint8_t reo_ring_num, 179 uint32_t quota) 180 { 181 hal_ring_desc_t ring_desc; 182 hal_ring_desc_t last_prefetched_hw_desc; 183 hal_soc_handle_t hal_soc; 184 struct dp_rx_desc *rx_desc = NULL; 185 struct dp_rx_desc *last_prefetched_sw_desc = NULL; 186 qdf_nbuf_t nbuf, next; 187 bool near_full; 188 union dp_rx_desc_list_elem_t *head[WLAN_MAX_MLO_CHIPS][MAX_PDEV_CNT]; 189 union dp_rx_desc_list_elem_t *tail[WLAN_MAX_MLO_CHIPS][MAX_PDEV_CNT]; 190 uint32_t num_pending = 0; 191 uint32_t rx_bufs_used = 0, rx_buf_cookie; 192 uint16_t msdu_len = 0; 193 uint16_t peer_id; 194 uint8_t vdev_id; 195 struct dp_txrx_peer *txrx_peer; 196 dp_txrx_ref_handle txrx_ref_handle = NULL; 197 struct dp_vdev *vdev; 198 uint32_t pkt_len = 0; 199 struct hal_rx_mpdu_desc_info mpdu_desc_info; 200 struct hal_rx_msdu_desc_info msdu_desc_info; 201 enum hal_reo_error_status error; 202 uint32_t peer_mdata; 203 uint8_t *rx_tlv_hdr; 204 uint32_t rx_bufs_reaped[WLAN_MAX_MLO_CHIPS][MAX_PDEV_CNT]; 205 uint8_t mac_id = 0; 206 struct dp_pdev *rx_pdev; 207 bool enh_flag; 208 struct dp_srng *dp_rxdma_srng; 209 struct rx_desc_pool *rx_desc_pool; 210 struct dp_soc *soc = int_ctx->soc; 211 struct cdp_tid_rx_stats *tid_stats; 212 qdf_nbuf_t nbuf_head; 213 qdf_nbuf_t nbuf_tail; 214 qdf_nbuf_t deliver_list_head; 215 qdf_nbuf_t deliver_list_tail; 216 uint32_t num_rx_bufs_reaped = 0; 217 uint32_t intr_id; 218 struct hif_opaque_softc *scn; 219 int32_t tid = 0; 220 bool is_prev_msdu_last = true; 221 uint32_t num_entries_avail = 0; 222 uint32_t rx_ol_pkt_cnt = 0; 223 uint32_t num_entries = 0; 224 struct hal_rx_msdu_metadata msdu_metadata; 225 QDF_STATUS status; 226 qdf_nbuf_t ebuf_head; 227 qdf_nbuf_t ebuf_tail; 228 uint8_t pkt_capture_offload = 0; 229 struct dp_srng *rx_ring = &soc->reo_dest_ring[reo_ring_num]; 230 int max_reap_limit, ring_near_full; 231 struct dp_soc *replenish_soc; 232 uint8_t chip_id; 233 uint64_t current_time = 0; 234 uint32_t old_tid; 235 uint32_t peer_ext_stats; 236 uint32_t dsf; 237 238 DP_HIST_INIT(); 239 240 qdf_assert_always(soc && hal_ring_hdl); 241 hal_soc = soc->hal_soc; 242 qdf_assert_always(hal_soc); 243 244 scn = soc->hif_handle; 245 intr_id = int_ctx->dp_intr_id; 246 num_entries = hal_srng_get_num_entries(hal_soc, hal_ring_hdl); 247 dp_runtime_pm_mark_last_busy(soc); 248 249 more_data: 250 /* reset local variables here to be re-used in the function */ 251 nbuf_head = NULL; 252 nbuf_tail = NULL; 253 deliver_list_head = NULL; 254 deliver_list_tail = NULL; 255 txrx_peer = NULL; 256 vdev = NULL; 257 num_rx_bufs_reaped = 0; 258 ebuf_head = NULL; 259 ebuf_tail = NULL; 260 ring_near_full = 0; 261 max_reap_limit = dp_rx_get_loop_pkt_limit(soc); 262 263 qdf_mem_zero(rx_bufs_reaped, sizeof(rx_bufs_reaped)); 264 qdf_mem_zero(&mpdu_desc_info, sizeof(mpdu_desc_info)); 265 qdf_mem_zero(&msdu_desc_info, sizeof(msdu_desc_info)); 266 qdf_mem_zero(head, sizeof(head)); 267 qdf_mem_zero(tail, sizeof(tail)); 268 old_tid = 0xff; 269 dsf = 0; 270 peer_ext_stats = 0; 271 rx_pdev = NULL; 272 tid_stats = NULL; 273 274 dp_pkt_get_timestamp(¤t_time); 275 276 ring_near_full = _dp_srng_test_and_update_nf_params(soc, rx_ring, 277 &max_reap_limit); 278 279 peer_ext_stats = wlan_cfg_is_peer_ext_stats_enabled(soc->wlan_cfg_ctx); 280 if (qdf_unlikely(dp_rx_srng_access_start(int_ctx, soc, hal_ring_hdl))) { 281 /* 282 * Need API to convert from hal_ring pointer to 283 * Ring Type / Ring Id combo 284 */ 285 DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1); 286 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 287 FL("HAL RING Access Failed -- %pK"), hal_ring_hdl); 288 goto done; 289 } 290 291 hal_srng_update_ring_usage_wm_no_lock(soc->hal_soc, hal_ring_hdl); 292 293 if (!num_pending) 294 num_pending = hal_srng_dst_num_valid(hal_soc, hal_ring_hdl, 0); 295 296 if (num_pending > quota) 297 num_pending = quota; 298 299 dp_srng_dst_inv_cached_descs(soc, hal_ring_hdl, num_pending); 300 last_prefetched_hw_desc = dp_srng_dst_prefetch_32_byte_desc(hal_soc, 301 hal_ring_hdl, 302 num_pending); 303 /* 304 * start reaping the buffers from reo ring and queue 305 * them in per vdev queue. 306 * Process the received pkts in a different per vdev loop. 307 */ 308 while (qdf_likely(num_pending)) { 309 ring_desc = dp_srng_dst_get_next(soc, hal_ring_hdl); 310 311 if (qdf_unlikely(!ring_desc)) 312 break; 313 314 error = HAL_RX_ERROR_STATUS_GET(ring_desc); 315 316 if (qdf_unlikely(error == HAL_REO_ERROR_DETECTED)) { 317 dp_rx_err("%pK: HAL RING 0x%pK:error %d", 318 soc, hal_ring_hdl, error); 319 DP_STATS_INC(soc, rx.err.hal_reo_error[reo_ring_num], 320 1); 321 /* Don't know how to deal with this -- assert */ 322 qdf_assert(0); 323 } 324 325 dp_rx_ring_record_entry(soc, reo_ring_num, ring_desc); 326 rx_buf_cookie = HAL_RX_REO_BUF_COOKIE_GET(ring_desc); 327 status = dp_rx_cookie_check_and_invalidate(ring_desc); 328 if (qdf_unlikely(QDF_IS_STATUS_ERROR(status))) { 329 DP_STATS_INC(soc, rx.err.stale_cookie, 1); 330 break; 331 } 332 333 rx_desc = (struct dp_rx_desc *) 334 hal_rx_get_reo_desc_va(ring_desc); 335 dp_rx_desc_sw_cc_check(soc, rx_buf_cookie, &rx_desc); 336 337 status = dp_rx_desc_sanity(soc, hal_soc, hal_ring_hdl, 338 ring_desc, rx_desc); 339 if (QDF_IS_STATUS_ERROR(status)) { 340 if (qdf_unlikely(rx_desc && rx_desc->nbuf)) { 341 qdf_assert_always(!rx_desc->unmapped); 342 dp_rx_nbuf_unmap(soc, rx_desc, reo_ring_num); 343 rx_desc->unmapped = 1; 344 dp_rx_buffer_pool_nbuf_free(soc, rx_desc->nbuf, 345 rx_desc->pool_id); 346 dp_rx_add_to_free_desc_list( 347 &head[rx_desc->chip_id][rx_desc->pool_id], 348 &tail[rx_desc->chip_id][rx_desc->pool_id], 349 rx_desc); 350 } 351 hal_srng_dst_get_next(hal_soc, hal_ring_hdl); 352 continue; 353 } 354 355 /* 356 * this is a unlikely scenario where the host is reaping 357 * a descriptor which it already reaped just a while ago 358 * but is yet to replenish it back to HW. 359 * In this case host will dump the last 128 descriptors 360 * including the software descriptor rx_desc and assert. 361 */ 362 363 if (qdf_unlikely(!rx_desc->in_use)) { 364 DP_STATS_INC(soc, rx.err.hal_reo_dest_dup, 1); 365 dp_info_rl("Reaping rx_desc not in use!"); 366 dp_rx_dump_info_and_assert(soc, hal_ring_hdl, 367 ring_desc, rx_desc); 368 /* ignore duplicate RX desc and continue to process */ 369 /* Pop out the descriptor */ 370 hal_srng_dst_get_next(hal_soc, hal_ring_hdl); 371 continue; 372 } 373 374 status = dp_rx_desc_nbuf_sanity_check(soc, ring_desc, rx_desc); 375 if (qdf_unlikely(QDF_IS_STATUS_ERROR(status))) { 376 DP_STATS_INC(soc, rx.err.nbuf_sanity_fail, 1); 377 dp_info_rl("Nbuf sanity check failure!"); 378 dp_rx_dump_info_and_assert(soc, hal_ring_hdl, 379 ring_desc, rx_desc); 380 rx_desc->in_err_state = 1; 381 hal_srng_dst_get_next(hal_soc, hal_ring_hdl); 382 continue; 383 } 384 385 if (qdf_unlikely(!dp_rx_desc_check_magic(rx_desc))) { 386 dp_err("Invalid rx_desc cookie=%d", rx_buf_cookie); 387 DP_STATS_INC(soc, rx.err.rx_desc_invalid_magic, 1); 388 dp_rx_dump_info_and_assert(soc, hal_ring_hdl, 389 ring_desc, rx_desc); 390 } 391 392 /* Get MPDU DESC info */ 393 hal_rx_mpdu_desc_info_get_be(ring_desc, &mpdu_desc_info); 394 395 /* Get MSDU DESC info */ 396 hal_rx_msdu_desc_info_get_be(ring_desc, &msdu_desc_info); 397 398 /* Set the end bit to identify the last buffer in MPDU */ 399 if (msdu_desc_info.msdu_flags & HAL_MSDU_F_LAST_MSDU_IN_MPDU) 400 qdf_nbuf_set_rx_chfrag_end(rx_desc->nbuf, 1); 401 402 if (qdf_unlikely(msdu_desc_info.msdu_flags & 403 HAL_MSDU_F_MSDU_CONTINUATION)) { 404 /* In dp_rx_sg_create() until the last buffer, 405 * end bit should not be set. As continuation bit set, 406 * this is not a last buffer. 407 */ 408 qdf_nbuf_set_rx_chfrag_end(rx_desc->nbuf, 0); 409 410 /* previous msdu has end bit set, so current one is 411 * the new MPDU 412 */ 413 if (is_prev_msdu_last) { 414 /* Get number of entries available in HW ring */ 415 num_entries_avail = 416 hal_srng_dst_num_valid(hal_soc, 417 hal_ring_hdl, 1); 418 419 /* For new MPDU check if we can read complete 420 * MPDU by comparing the number of buffers 421 * available and number of buffers needed to 422 * reap this MPDU 423 */ 424 if ((msdu_desc_info.msdu_len / 425 (RX_DATA_BUFFER_SIZE - 426 soc->rx_pkt_tlv_size) + 1) > 427 num_pending) { 428 DP_STATS_INC(soc, 429 rx.msdu_scatter_wait_break, 430 1); 431 dp_rx_cookie_reset_invalid_bit( 432 ring_desc); 433 /* As we are going to break out of the 434 * loop because of unavailability of 435 * descs to form complete SG, we need to 436 * reset the TP in the REO destination 437 * ring. 438 */ 439 hal_srng_dst_dec_tp(hal_soc, 440 hal_ring_hdl); 441 break; 442 } 443 is_prev_msdu_last = false; 444 } 445 } 446 447 if (mpdu_desc_info.mpdu_flags & HAL_MPDU_F_RETRY_BIT) 448 qdf_nbuf_set_rx_retry_flag(rx_desc->nbuf, 1); 449 450 if (qdf_unlikely(mpdu_desc_info.mpdu_flags & 451 HAL_MPDU_F_RAW_AMPDU)) 452 qdf_nbuf_set_raw_frame(rx_desc->nbuf, 1); 453 454 if (!is_prev_msdu_last && 455 !(msdu_desc_info.msdu_flags & HAL_MSDU_F_MSDU_CONTINUATION)) 456 is_prev_msdu_last = true; 457 458 rx_bufs_reaped[rx_desc->chip_id][rx_desc->pool_id]++; 459 460 peer_mdata = mpdu_desc_info.peer_meta_data; 461 QDF_NBUF_CB_RX_PEER_ID(rx_desc->nbuf) = 462 dp_rx_peer_metadata_peer_id_get_be(soc, peer_mdata); 463 QDF_NBUF_CB_RX_VDEV_ID(rx_desc->nbuf) = 464 dp_rx_peer_metadata_vdev_id_get_be(soc, peer_mdata); 465 dp_rx_set_msdu_lmac_id(rx_desc->nbuf, peer_mdata); 466 467 /* to indicate whether this msdu is rx offload */ 468 pkt_capture_offload = 469 DP_PEER_METADATA_OFFLOAD_GET_BE(peer_mdata); 470 471 /* 472 * save msdu flags first, last and continuation msdu in 473 * nbuf->cb, also save mcbc, is_da_valid, is_sa_valid and 474 * length to nbuf->cb. This ensures the info required for 475 * per pkt processing is always in the same cache line. 476 * This helps in improving throughput for smaller pkt 477 * sizes. 478 */ 479 if (msdu_desc_info.msdu_flags & HAL_MSDU_F_FIRST_MSDU_IN_MPDU) 480 qdf_nbuf_set_rx_chfrag_start(rx_desc->nbuf, 1); 481 482 if (msdu_desc_info.msdu_flags & HAL_MSDU_F_MSDU_CONTINUATION) 483 qdf_nbuf_set_rx_chfrag_cont(rx_desc->nbuf, 1); 484 485 if (msdu_desc_info.msdu_flags & HAL_MSDU_F_DA_IS_MCBC) 486 qdf_nbuf_set_da_mcbc(rx_desc->nbuf, 1); 487 488 if (msdu_desc_info.msdu_flags & HAL_MSDU_F_DA_IS_VALID) 489 qdf_nbuf_set_da_valid(rx_desc->nbuf, 1); 490 491 if (msdu_desc_info.msdu_flags & HAL_MSDU_F_SA_IS_VALID) 492 qdf_nbuf_set_sa_valid(rx_desc->nbuf, 1); 493 494 if (msdu_desc_info.msdu_flags & HAL_MSDU_F_INTRA_BSS) 495 qdf_nbuf_set_intra_bss(rx_desc->nbuf, 1); 496 497 if (qdf_likely(mpdu_desc_info.mpdu_flags & 498 HAL_MPDU_F_QOS_CONTROL_VALID)) 499 qdf_nbuf_set_tid_val(rx_desc->nbuf, mpdu_desc_info.tid); 500 501 /* set sw exception */ 502 qdf_nbuf_set_rx_reo_dest_ind_or_sw_excpt( 503 rx_desc->nbuf, 504 hal_rx_sw_exception_get_be(ring_desc)); 505 506 QDF_NBUF_CB_RX_PKT_LEN(rx_desc->nbuf) = msdu_desc_info.msdu_len; 507 508 QDF_NBUF_CB_RX_CTX_ID(rx_desc->nbuf) = reo_ring_num; 509 510 /* 511 * move unmap after scattered msdu waiting break logic 512 * in case double skb unmap happened. 513 */ 514 dp_rx_nbuf_unmap(soc, rx_desc, reo_ring_num); 515 rx_desc->unmapped = 1; 516 DP_RX_PROCESS_NBUF(soc, nbuf_head, nbuf_tail, ebuf_head, 517 ebuf_tail, rx_desc); 518 519 quota -= 1; 520 num_pending -= 1; 521 522 dp_rx_add_to_free_desc_list 523 (&head[rx_desc->chip_id][rx_desc->pool_id], 524 &tail[rx_desc->chip_id][rx_desc->pool_id], rx_desc); 525 num_rx_bufs_reaped++; 526 527 dp_rx_prefetch_hw_sw_nbuf_32_byte_desc(soc, hal_soc, 528 num_pending, 529 hal_ring_hdl, 530 &last_prefetched_hw_desc, 531 &last_prefetched_sw_desc); 532 533 /* 534 * only if complete msdu is received for scatter case, 535 * then allow break. 536 */ 537 if (is_prev_msdu_last && 538 dp_rx_reap_loop_pkt_limit_hit(soc, num_rx_bufs_reaped, 539 max_reap_limit)) 540 break; 541 } 542 done: 543 dp_rx_srng_access_end(int_ctx, soc, hal_ring_hdl); 544 qdf_dsb(); 545 546 dp_rx_per_core_stats_update(soc, reo_ring_num, num_rx_bufs_reaped); 547 548 for (chip_id = 0; chip_id < WLAN_MAX_MLO_CHIPS; chip_id++) { 549 for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) { 550 /* 551 * continue with next mac_id if no pkts were reaped 552 * from that pool 553 */ 554 if (!rx_bufs_reaped[chip_id][mac_id]) 555 continue; 556 557 replenish_soc = dp_rx_replensih_soc_get(soc, chip_id); 558 559 dp_rxdma_srng = 560 &replenish_soc->rx_refill_buf_ring[mac_id]; 561 562 rx_desc_pool = &replenish_soc->rx_desc_buf[mac_id]; 563 564 dp_rx_buffers_replenish_simple(replenish_soc, mac_id, 565 dp_rxdma_srng, 566 rx_desc_pool, 567 rx_bufs_reaped[chip_id][mac_id], 568 &head[chip_id][mac_id], 569 &tail[chip_id][mac_id]); 570 } 571 } 572 573 /* Peer can be NULL is case of LFR */ 574 if (qdf_likely(txrx_peer)) 575 vdev = NULL; 576 577 /* 578 * BIG loop where each nbuf is dequeued from global queue, 579 * processed and queued back on a per vdev basis. These nbufs 580 * are sent to stack as and when we run out of nbufs 581 * or a new nbuf dequeued from global queue has a different 582 * vdev when compared to previous nbuf. 583 */ 584 nbuf = nbuf_head; 585 while (nbuf) { 586 next = nbuf->next; 587 dp_rx_prefetch_nbuf_data_be(nbuf, next); 588 if (qdf_unlikely(dp_rx_is_raw_frame_dropped(nbuf))) { 589 nbuf = next; 590 DP_STATS_INC(soc, rx.err.raw_frm_drop, 1); 591 continue; 592 } 593 594 rx_tlv_hdr = qdf_nbuf_data(nbuf); 595 vdev_id = QDF_NBUF_CB_RX_VDEV_ID(nbuf); 596 peer_id = QDF_NBUF_CB_RX_PEER_ID(nbuf); 597 598 if (dp_rx_is_list_ready(deliver_list_head, vdev, txrx_peer, 599 peer_id, vdev_id)) { 600 dp_rx_deliver_to_stack(soc, vdev, txrx_peer, 601 deliver_list_head, 602 deliver_list_tail); 603 deliver_list_head = NULL; 604 deliver_list_tail = NULL; 605 } 606 607 /* Get TID from struct cb->tid_val, save to tid */ 608 tid = qdf_nbuf_get_tid_val(nbuf); 609 if (qdf_unlikely(tid >= CDP_MAX_DATA_TIDS)) { 610 DP_STATS_INC(soc, rx.err.rx_invalid_tid_err, 1); 611 dp_rx_nbuf_free(nbuf); 612 nbuf = next; 613 continue; 614 } 615 616 if (qdf_unlikely(!txrx_peer)) { 617 txrx_peer = dp_rx_get_txrx_peer_and_vdev(soc, nbuf, 618 peer_id, 619 &txrx_ref_handle, 620 pkt_capture_offload, 621 &vdev, 622 &rx_pdev, &dsf, 623 &old_tid); 624 if (qdf_unlikely(!txrx_peer) || qdf_unlikely(!vdev)) { 625 nbuf = next; 626 continue; 627 } 628 enh_flag = rx_pdev->enhanced_stats_en; 629 } else if (txrx_peer && txrx_peer->peer_id != peer_id) { 630 dp_txrx_peer_unref_delete(txrx_ref_handle, 631 DP_MOD_ID_RX); 632 633 txrx_peer = dp_rx_get_txrx_peer_and_vdev(soc, nbuf, 634 peer_id, 635 &txrx_ref_handle, 636 pkt_capture_offload, 637 &vdev, 638 &rx_pdev, &dsf, 639 &old_tid); 640 if (qdf_unlikely(!txrx_peer) || qdf_unlikely(!vdev)) { 641 nbuf = next; 642 continue; 643 } 644 enh_flag = rx_pdev->enhanced_stats_en; 645 } 646 647 if (txrx_peer) { 648 QDF_NBUF_CB_DP_TRACE_PRINT(nbuf) = false; 649 qdf_dp_trace_set_track(nbuf, QDF_RX); 650 QDF_NBUF_CB_RX_DP_TRACE(nbuf) = 1; 651 QDF_NBUF_CB_RX_PACKET_TRACK(nbuf) = 652 QDF_NBUF_RX_PKT_DATA_TRACK; 653 } 654 655 rx_bufs_used++; 656 657 /* when hlos tid override is enabled, save tid in 658 * skb->priority 659 */ 660 if (qdf_unlikely(vdev->skip_sw_tid_classification & 661 DP_TXRX_HLOS_TID_OVERRIDE_ENABLED)) 662 qdf_nbuf_set_priority(nbuf, tid); 663 664 DP_RX_TID_SAVE(nbuf, tid); 665 if (qdf_unlikely(dsf) || qdf_unlikely(peer_ext_stats) || 666 dp_rx_pkt_tracepoints_enabled()) 667 qdf_nbuf_set_timestamp(nbuf); 668 669 if (qdf_likely(old_tid != tid)) { 670 tid_stats = 671 &rx_pdev->stats.tid_stats.tid_rx_stats[reo_ring_num][tid]; 672 old_tid = tid; 673 } 674 675 /* 676 * Check if DMA completed -- msdu_done is the last bit 677 * to be written 678 */ 679 if (qdf_unlikely(!qdf_nbuf_is_rx_chfrag_cont(nbuf) && 680 !hal_rx_tlv_msdu_done_get_be(rx_tlv_hdr))) { 681 dp_err("MSDU DONE failure"); 682 DP_STATS_INC(soc, rx.err.msdu_done_fail, 1); 683 hal_rx_dump_pkt_tlvs(hal_soc, rx_tlv_hdr, 684 QDF_TRACE_LEVEL_INFO); 685 tid_stats->fail_cnt[MSDU_DONE_FAILURE]++; 686 dp_rx_nbuf_free(nbuf); 687 qdf_assert(0); 688 nbuf = next; 689 continue; 690 } 691 692 DP_HIST_PACKET_COUNT_INC(vdev->pdev->pdev_id); 693 /* 694 * First IF condition: 695 * 802.11 Fragmented pkts are reinjected to REO 696 * HW block as SG pkts and for these pkts we only 697 * need to pull the RX TLVS header length. 698 * Second IF condition: 699 * The below condition happens when an MSDU is spread 700 * across multiple buffers. This can happen in two cases 701 * 1. The nbuf size is smaller then the received msdu. 702 * ex: we have set the nbuf size to 2048 during 703 * nbuf_alloc. but we received an msdu which is 704 * 2304 bytes in size then this msdu is spread 705 * across 2 nbufs. 706 * 707 * 2. AMSDUs when RAW mode is enabled. 708 * ex: 1st MSDU is in 1st nbuf and 2nd MSDU is spread 709 * across 1st nbuf and 2nd nbuf and last MSDU is 710 * spread across 2nd nbuf and 3rd nbuf. 711 * 712 * for these scenarios let us create a skb frag_list and 713 * append these buffers till the last MSDU of the AMSDU 714 * Third condition: 715 * This is the most likely case, we receive 802.3 pkts 716 * decapsulated by HW, here we need to set the pkt length. 717 */ 718 hal_rx_msdu_packet_metadata_get_generic_be(rx_tlv_hdr, 719 &msdu_metadata); 720 if (qdf_unlikely(qdf_nbuf_is_frag(nbuf))) { 721 bool is_mcbc, is_sa_vld, is_da_vld; 722 723 is_mcbc = hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc, 724 rx_tlv_hdr); 725 is_sa_vld = 726 hal_rx_msdu_end_sa_is_valid_get(soc->hal_soc, 727 rx_tlv_hdr); 728 is_da_vld = 729 hal_rx_msdu_end_da_is_valid_get(soc->hal_soc, 730 rx_tlv_hdr); 731 732 qdf_nbuf_set_da_mcbc(nbuf, is_mcbc); 733 qdf_nbuf_set_da_valid(nbuf, is_da_vld); 734 qdf_nbuf_set_sa_valid(nbuf, is_sa_vld); 735 736 qdf_nbuf_pull_head(nbuf, soc->rx_pkt_tlv_size); 737 } else if (qdf_nbuf_is_rx_chfrag_cont(nbuf)) { 738 msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf); 739 nbuf = dp_rx_sg_create(soc, nbuf); 740 next = nbuf->next; 741 742 if (qdf_nbuf_is_raw_frame(nbuf)) { 743 DP_STATS_INC(vdev->pdev, rx_raw_pkts, 1); 744 DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, 745 rx.raw, 1, 746 msdu_len); 747 } else { 748 DP_STATS_INC(soc, rx.err.scatter_msdu, 1); 749 750 if (!dp_rx_is_sg_supported()) { 751 dp_rx_nbuf_free(nbuf); 752 dp_info_rl("sg msdu len %d, dropped", 753 msdu_len); 754 nbuf = next; 755 continue; 756 } 757 } 758 } else { 759 msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf); 760 pkt_len = msdu_len + 761 msdu_metadata.l3_hdr_pad + 762 soc->rx_pkt_tlv_size; 763 764 qdf_nbuf_set_pktlen(nbuf, pkt_len); 765 dp_rx_skip_tlvs(soc, nbuf, msdu_metadata.l3_hdr_pad); 766 } 767 768 dp_rx_send_pktlog(soc, rx_pdev, nbuf, QDF_TX_RX_STATUS_OK); 769 770 if (!dp_wds_rx_policy_check(rx_tlv_hdr, vdev, txrx_peer)) { 771 dp_rx_err("%pK: Policy Check Drop pkt", soc); 772 DP_PEER_PER_PKT_STATS_INC(txrx_peer, 773 rx.policy_check_drop, 1); 774 tid_stats->fail_cnt[POLICY_CHECK_DROP]++; 775 /* Drop & free packet */ 776 dp_rx_nbuf_free(nbuf); 777 /* Statistics */ 778 nbuf = next; 779 continue; 780 } 781 782 /* 783 * Drop non-EAPOL frames from unauthorized peer. 784 */ 785 if (qdf_likely(txrx_peer) && 786 qdf_unlikely(!txrx_peer->authorize) && 787 !qdf_nbuf_is_raw_frame(nbuf)) { 788 bool is_eapol = qdf_nbuf_is_ipv4_eapol_pkt(nbuf) || 789 qdf_nbuf_is_ipv4_wapi_pkt(nbuf); 790 791 if (!is_eapol) { 792 DP_PEER_PER_PKT_STATS_INC(txrx_peer, 793 rx.peer_unauth_rx_pkt_drop, 794 1); 795 dp_rx_nbuf_free(nbuf); 796 nbuf = next; 797 continue; 798 } 799 } 800 801 dp_rx_cksum_offload(vdev->pdev, nbuf, rx_tlv_hdr); 802 dp_rx_update_flow_info(nbuf, rx_tlv_hdr); 803 804 if (qdf_unlikely(!rx_pdev->rx_fast_flag)) { 805 /* 806 * process frame for mulitpass phrase processing 807 */ 808 if (qdf_unlikely(vdev->multipass_en)) { 809 if (dp_rx_multipass_process(txrx_peer, nbuf, 810 tid) == false) { 811 DP_PEER_PER_PKT_STATS_INC 812 (txrx_peer, 813 rx.multipass_rx_pkt_drop, 1); 814 dp_rx_nbuf_free(nbuf); 815 nbuf = next; 816 continue; 817 } 818 } 819 if (qdf_unlikely(txrx_peer && 820 (txrx_peer->nawds_enabled) && 821 (qdf_nbuf_is_da_mcbc(nbuf)) && 822 (hal_rx_get_mpdu_mac_ad4_valid_be 823 (rx_tlv_hdr) == false))) { 824 tid_stats->fail_cnt[NAWDS_MCAST_DROP]++; 825 DP_PEER_PER_PKT_STATS_INC(txrx_peer, 826 rx.nawds_mcast_drop, 827 1); 828 dp_rx_nbuf_free(nbuf); 829 nbuf = next; 830 continue; 831 } 832 833 /* Update the protocol tag in SKB based on CCE metadata 834 */ 835 dp_rx_update_protocol_tag(soc, vdev, nbuf, rx_tlv_hdr, 836 reo_ring_num, false, true); 837 838 /* Update the flow tag in SKB based on FSE metadata */ 839 dp_rx_update_flow_tag(soc, vdev, nbuf, rx_tlv_hdr, 840 true); 841 842 if (qdf_likely(vdev->rx_decap_type == 843 htt_cmn_pkt_type_ethernet) && 844 qdf_likely(!vdev->mesh_vdev)) { 845 dp_rx_wds_learn(soc, vdev, 846 rx_tlv_hdr, 847 txrx_peer, 848 nbuf, 849 msdu_metadata); 850 } 851 852 if (qdf_unlikely(vdev->mesh_vdev)) { 853 if (dp_rx_filter_mesh_packets(vdev, nbuf, 854 rx_tlv_hdr) 855 == QDF_STATUS_SUCCESS) { 856 dp_rx_info("%pK: mesh pkt filtered", 857 soc); 858 tid_stats->fail_cnt[MESH_FILTER_DROP]++; 859 DP_STATS_INC(vdev->pdev, 860 dropped.mesh_filter, 1); 861 862 dp_rx_nbuf_free(nbuf); 863 nbuf = next; 864 continue; 865 } 866 dp_rx_fill_mesh_stats(vdev, nbuf, rx_tlv_hdr, 867 txrx_peer); 868 } 869 } 870 871 dp_rx_msdu_stats_update(soc, nbuf, rx_tlv_hdr, txrx_peer, 872 reo_ring_num, tid_stats); 873 874 if (qdf_likely(vdev->rx_decap_type == 875 htt_cmn_pkt_type_ethernet) && 876 qdf_likely(!vdev->mesh_vdev)) { 877 /* Intrabss-fwd */ 878 if (dp_rx_check_ap_bridge(vdev)) 879 if (dp_rx_intrabss_fwd_be(soc, txrx_peer, 880 rx_tlv_hdr, 881 nbuf, 882 msdu_metadata)) { 883 nbuf = next; 884 tid_stats->intrabss_cnt++; 885 continue; /* Get next desc */ 886 } 887 } 888 889 dp_rx_fill_gro_info(soc, rx_tlv_hdr, nbuf, &rx_ol_pkt_cnt); 890 891 dp_rx_mark_first_packet_after_wow_wakeup(vdev->pdev, rx_tlv_hdr, 892 nbuf); 893 894 dp_rx_update_stats(soc, nbuf); 895 896 dp_pkt_add_timestamp(txrx_peer->vdev, QDF_PKT_RX_DRIVER_ENTRY, 897 current_time, nbuf); 898 899 DP_RX_LIST_APPEND(deliver_list_head, 900 deliver_list_tail, 901 nbuf); 902 903 DP_PEER_TO_STACK_INCC_PKT(txrx_peer, 1, 904 QDF_NBUF_CB_RX_PKT_LEN(nbuf), 905 enh_flag); 906 if (qdf_unlikely(txrx_peer->in_twt)) 907 DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, 908 rx.to_stack_twt, 1, 909 QDF_NBUF_CB_RX_PKT_LEN(nbuf)); 910 911 tid_stats->delivered_to_stack++; 912 nbuf = next; 913 } 914 915 DP_RX_DELIVER_TO_STACK(soc, vdev, txrx_peer, peer_id, 916 pkt_capture_offload, 917 deliver_list_head, 918 deliver_list_tail); 919 920 if (qdf_likely(txrx_peer)) 921 dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_RX); 922 923 /* 924 * If we are processing in near-full condition, there are 3 scenario 925 * 1) Ring entries has reached critical state 926 * 2) Ring entries are still near high threshold 927 * 3) Ring entries are below the safe level 928 * 929 * One more loop will move the state to normal processing and yield 930 */ 931 if (ring_near_full && quota) 932 goto more_data; 933 934 if (dp_rx_enable_eol_data_check(soc) && rx_bufs_used) { 935 if (quota) { 936 num_pending = 937 dp_rx_srng_get_num_pending(hal_soc, 938 hal_ring_hdl, 939 num_entries, 940 &near_full); 941 if (num_pending) { 942 DP_STATS_INC(soc, rx.hp_oos2, 1); 943 944 if (!hif_exec_should_yield(scn, intr_id)) 945 goto more_data; 946 947 if (qdf_unlikely(near_full)) { 948 DP_STATS_INC(soc, rx.near_full, 1); 949 goto more_data; 950 } 951 } 952 } 953 954 if (vdev && vdev->osif_fisa_flush) 955 vdev->osif_fisa_flush(soc, reo_ring_num); 956 957 if (vdev && vdev->osif_gro_flush && rx_ol_pkt_cnt) { 958 vdev->osif_gro_flush(vdev->osif_vdev, 959 reo_ring_num); 960 } 961 } 962 963 /* Update histogram statistics by looping through pdev's */ 964 DP_RX_HIST_STATS_PER_PDEV(); 965 966 return rx_bufs_used; /* Assume no scale factor for now */ 967 } 968 969 #ifdef RX_DESC_MULTI_PAGE_ALLOC 970 /** 971 * dp_rx_desc_pool_init_be_cc() - initial RX desc pool for cookie conversion 972 * @soc: Handle to DP Soc structure 973 * @rx_desc_pool: Rx descriptor pool handler 974 * @pool_id: Rx descriptor pool ID 975 * 976 * Return: QDF_STATUS_SUCCESS - succeeded, others - failed 977 */ 978 static QDF_STATUS 979 dp_rx_desc_pool_init_be_cc(struct dp_soc *soc, 980 struct rx_desc_pool *rx_desc_pool, 981 uint32_t pool_id) 982 { 983 struct dp_hw_cookie_conversion_t *cc_ctx; 984 struct dp_soc_be *be_soc; 985 union dp_rx_desc_list_elem_t *rx_desc_elem; 986 struct dp_spt_page_desc *page_desc; 987 uint32_t ppt_idx = 0; 988 uint32_t avail_entry_index = 0; 989 990 if (!rx_desc_pool->pool_size) { 991 dp_err("desc_num 0 !!"); 992 return QDF_STATUS_E_FAILURE; 993 } 994 995 be_soc = dp_get_be_soc_from_dp_soc(soc); 996 cc_ctx = &be_soc->rx_cc_ctx[pool_id]; 997 998 page_desc = &cc_ctx->page_desc_base[0]; 999 rx_desc_elem = rx_desc_pool->freelist; 1000 while (rx_desc_elem) { 1001 if (avail_entry_index == 0) { 1002 if (ppt_idx >= cc_ctx->total_page_num) { 1003 dp_alert("insufficient secondary page tables"); 1004 qdf_assert_always(0); 1005 } 1006 page_desc = &cc_ctx->page_desc_base[ppt_idx++]; 1007 } 1008 1009 /* put each RX Desc VA to SPT pages and 1010 * get corresponding ID 1011 */ 1012 DP_CC_SPT_PAGE_UPDATE_VA(page_desc->page_v_addr, 1013 avail_entry_index, 1014 &rx_desc_elem->rx_desc); 1015 rx_desc_elem->rx_desc.cookie = 1016 dp_cc_desc_id_generate(page_desc->ppt_index, 1017 avail_entry_index); 1018 rx_desc_elem->rx_desc.chip_id = dp_mlo_get_chip_id(soc); 1019 rx_desc_elem->rx_desc.pool_id = pool_id; 1020 rx_desc_elem->rx_desc.in_use = 0; 1021 rx_desc_elem = rx_desc_elem->next; 1022 1023 avail_entry_index = (avail_entry_index + 1) & 1024 DP_CC_SPT_PAGE_MAX_ENTRIES_MASK; 1025 } 1026 1027 return QDF_STATUS_SUCCESS; 1028 } 1029 #else 1030 static QDF_STATUS 1031 dp_rx_desc_pool_init_be_cc(struct dp_soc *soc, 1032 struct rx_desc_pool *rx_desc_pool, 1033 uint32_t pool_id) 1034 { 1035 struct dp_hw_cookie_conversion_t *cc_ctx; 1036 struct dp_soc_be *be_soc; 1037 struct dp_spt_page_desc *page_desc; 1038 uint32_t ppt_idx = 0; 1039 uint32_t avail_entry_index = 0; 1040 int i = 0; 1041 1042 if (!rx_desc_pool->pool_size) { 1043 dp_err("desc_num 0 !!"); 1044 return QDF_STATUS_E_FAILURE; 1045 } 1046 1047 be_soc = dp_get_be_soc_from_dp_soc(soc); 1048 cc_ctx = &be_soc->rx_cc_ctx[pool_id]; 1049 1050 page_desc = &cc_ctx->page_desc_base[0]; 1051 for (i = 0; i <= rx_desc_pool->pool_size - 1; i++) { 1052 if (i == rx_desc_pool->pool_size - 1) 1053 rx_desc_pool->array[i].next = NULL; 1054 else 1055 rx_desc_pool->array[i].next = 1056 &rx_desc_pool->array[i + 1]; 1057 1058 if (avail_entry_index == 0) { 1059 if (ppt_idx >= cc_ctx->total_page_num) { 1060 dp_alert("insufficient secondary page tables"); 1061 qdf_assert_always(0); 1062 } 1063 page_desc = &cc_ctx->page_desc_base[ppt_idx++]; 1064 } 1065 1066 /* put each RX Desc VA to SPT pages and 1067 * get corresponding ID 1068 */ 1069 DP_CC_SPT_PAGE_UPDATE_VA(page_desc->page_v_addr, 1070 avail_entry_index, 1071 &rx_desc_pool->array[i].rx_desc); 1072 rx_desc_pool->array[i].rx_desc.cookie = 1073 dp_cc_desc_id_generate(page_desc->ppt_index, 1074 avail_entry_index); 1075 rx_desc_pool->array[i].rx_desc.pool_id = pool_id; 1076 rx_desc_pool->array[i].rx_desc.in_use = 0; 1077 rx_desc_pool->array[i].rx_desc.chip_id = 1078 dp_mlo_get_chip_id(soc); 1079 1080 avail_entry_index = (avail_entry_index + 1) & 1081 DP_CC_SPT_PAGE_MAX_ENTRIES_MASK; 1082 } 1083 return QDF_STATUS_SUCCESS; 1084 } 1085 #endif 1086 1087 static void 1088 dp_rx_desc_pool_deinit_be_cc(struct dp_soc *soc, 1089 struct rx_desc_pool *rx_desc_pool, 1090 uint32_t pool_id) 1091 { 1092 struct dp_spt_page_desc *page_desc; 1093 struct dp_soc_be *be_soc; 1094 int i = 0; 1095 struct dp_hw_cookie_conversion_t *cc_ctx; 1096 1097 be_soc = dp_get_be_soc_from_dp_soc(soc); 1098 cc_ctx = &be_soc->rx_cc_ctx[pool_id]; 1099 1100 for (i = 0; i < cc_ctx->total_page_num; i++) { 1101 page_desc = &cc_ctx->page_desc_base[i]; 1102 qdf_mem_zero(page_desc->page_v_addr, qdf_page_size); 1103 } 1104 } 1105 1106 QDF_STATUS dp_rx_desc_pool_init_be(struct dp_soc *soc, 1107 struct rx_desc_pool *rx_desc_pool, 1108 uint32_t pool_id) 1109 { 1110 QDF_STATUS status = QDF_STATUS_SUCCESS; 1111 1112 /* Only regular RX buffer desc pool use HW cookie conversion */ 1113 if (rx_desc_pool->desc_type == DP_RX_DESC_BUF_TYPE) { 1114 dp_info("rx_desc_buf pool init"); 1115 status = dp_rx_desc_pool_init_be_cc(soc, 1116 rx_desc_pool, 1117 pool_id); 1118 } else { 1119 dp_info("non_rx_desc_buf_pool init"); 1120 status = dp_rx_desc_pool_init_generic(soc, rx_desc_pool, 1121 pool_id); 1122 } 1123 1124 return status; 1125 } 1126 1127 void dp_rx_desc_pool_deinit_be(struct dp_soc *soc, 1128 struct rx_desc_pool *rx_desc_pool, 1129 uint32_t pool_id) 1130 { 1131 if (rx_desc_pool->desc_type == DP_RX_DESC_BUF_TYPE) 1132 dp_rx_desc_pool_deinit_be_cc(soc, rx_desc_pool, pool_id); 1133 } 1134 1135 #ifdef DP_FEATURE_HW_COOKIE_CONVERSION 1136 #ifdef DP_HW_COOKIE_CONVERT_EXCEPTION 1137 QDF_STATUS dp_wbm_get_rx_desc_from_hal_desc_be(struct dp_soc *soc, 1138 void *ring_desc, 1139 struct dp_rx_desc **r_rx_desc) 1140 { 1141 if (hal_rx_wbm_get_cookie_convert_done(ring_desc)) { 1142 /* HW cookie conversion done */ 1143 *r_rx_desc = (struct dp_rx_desc *) 1144 hal_rx_wbm_get_desc_va(ring_desc); 1145 } else { 1146 /* SW do cookie conversion */ 1147 uint32_t cookie = HAL_RX_BUF_COOKIE_GET(ring_desc); 1148 1149 *r_rx_desc = (struct dp_rx_desc *) 1150 dp_cc_desc_find(soc, cookie); 1151 } 1152 1153 return QDF_STATUS_SUCCESS; 1154 } 1155 #else 1156 QDF_STATUS dp_wbm_get_rx_desc_from_hal_desc_be(struct dp_soc *soc, 1157 void *ring_desc, 1158 struct dp_rx_desc **r_rx_desc) 1159 { 1160 *r_rx_desc = (struct dp_rx_desc *) 1161 hal_rx_wbm_get_desc_va(ring_desc); 1162 1163 return QDF_STATUS_SUCCESS; 1164 } 1165 #endif /* DP_HW_COOKIE_CONVERT_EXCEPTION */ 1166 #else 1167 QDF_STATUS dp_wbm_get_rx_desc_from_hal_desc_be(struct dp_soc *soc, 1168 void *ring_desc, 1169 struct dp_rx_desc **r_rx_desc) 1170 { 1171 /* SW do cookie conversion */ 1172 uint32_t cookie = HAL_RX_BUF_COOKIE_GET(ring_desc); 1173 1174 *r_rx_desc = (struct dp_rx_desc *) 1175 dp_cc_desc_find(soc, cookie); 1176 1177 return QDF_STATUS_SUCCESS; 1178 } 1179 #endif /* DP_FEATURE_HW_COOKIE_CONVERSION */ 1180 1181 struct dp_rx_desc *dp_rx_desc_cookie_2_va_be(struct dp_soc *soc, 1182 uint32_t cookie) 1183 { 1184 return (struct dp_rx_desc *)dp_cc_desc_find(soc, cookie); 1185 } 1186 1187 #if defined(WLAN_FEATURE_11BE_MLO) 1188 #if defined(WLAN_MLO_MULTI_CHIP) && defined(WLAN_MCAST_MLO) 1189 #define DP_RANDOM_MAC_ID_BIT_MASK 0xC0 1190 #define DP_RANDOM_MAC_OFFSET 1 1191 #define DP_MAC_LOCAL_ADMBIT_MASK 0x2 1192 #define DP_MAC_LOCAL_ADMBIT_OFFSET 0 1193 static inline void dp_rx_dummy_src_mac(struct dp_vdev *vdev, 1194 qdf_nbuf_t nbuf) 1195 { 1196 uint8_t random_mac[QDF_MAC_ADDR_SIZE] = {0}; 1197 qdf_ether_header_t *eh = 1198 (qdf_ether_header_t *)qdf_nbuf_data(nbuf); 1199 1200 qdf_mem_copy(random_mac, &vdev->mld_mac_addr.raw[0], QDF_MAC_ADDR_SIZE); 1201 random_mac[DP_MAC_LOCAL_ADMBIT_OFFSET] = 1202 random_mac[DP_MAC_LOCAL_ADMBIT_OFFSET] | 1203 DP_MAC_LOCAL_ADMBIT_MASK; 1204 random_mac[DP_RANDOM_MAC_OFFSET] = 1205 random_mac[DP_RANDOM_MAC_OFFSET] ^ DP_RANDOM_MAC_ID_BIT_MASK; 1206 1207 qdf_mem_copy(&eh->ether_shost[0], random_mac, QDF_MAC_ADDR_SIZE); 1208 } 1209 1210 #ifdef QCA_SUPPORT_WDS_EXTENDED 1211 static inline bool dp_rx_mlo_igmp_wds_ext_handler(struct dp_txrx_peer *peer) 1212 { 1213 return qdf_atomic_test_bit(WDS_EXT_PEER_INIT_BIT, &peer->wds_ext.init); 1214 } 1215 #else 1216 static inline bool dp_rx_mlo_igmp_wds_ext_handler(struct dp_txrx_peer *peer) 1217 { 1218 return false; 1219 } 1220 #endif 1221 1222 bool dp_rx_mlo_igmp_handler(struct dp_soc *soc, 1223 struct dp_vdev *vdev, 1224 struct dp_txrx_peer *peer, 1225 qdf_nbuf_t nbuf) 1226 { 1227 struct dp_vdev *mcast_primary_vdev = NULL; 1228 struct dp_vdev_be *be_vdev = dp_get_be_vdev_from_dp_vdev(vdev); 1229 struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc); 1230 uint8_t tid = qdf_nbuf_get_tid_val(nbuf); 1231 struct cdp_tid_rx_stats *tid_stats = &peer->vdev->pdev->stats. 1232 tid_stats.tid_rx_wbm_stats[0][tid]; 1233 1234 if (!(qdf_nbuf_is_ipv4_igmp_pkt(nbuf) || 1235 qdf_nbuf_is_ipv6_igmp_pkt(nbuf))) 1236 return false; 1237 1238 if (qdf_unlikely(vdev->multipass_en)) { 1239 if (dp_rx_multipass_process(peer, nbuf, tid) == false) { 1240 DP_PEER_PER_PKT_STATS_INC(peer, 1241 rx.multipass_rx_pkt_drop, 1); 1242 return false; 1243 } 1244 } 1245 1246 if (!peer->bss_peer) { 1247 if (dp_rx_intrabss_mcbc_fwd(soc, peer, NULL, nbuf, tid_stats)) 1248 dp_rx_err("forwarding failed"); 1249 } 1250 1251 /* 1252 * In the case of ME6, Backhaul WDS, NAWDS 1253 * send the igmp pkt on the same link where it received, 1254 * as these features will use peer based tcl metadata 1255 */ 1256 1257 qdf_nbuf_set_next(nbuf, NULL); 1258 1259 if (vdev->mcast_enhancement_en || be_vdev->mcast_primary || 1260 peer->nawds_enabled) 1261 goto send_pkt; 1262 1263 if (qdf_unlikely(dp_rx_mlo_igmp_wds_ext_handler(peer))) 1264 goto send_pkt; 1265 1266 mcast_primary_vdev = dp_mlo_get_mcast_primary_vdev(be_soc, be_vdev, 1267 DP_MOD_ID_RX); 1268 if (!mcast_primary_vdev) { 1269 dp_rx_debug("Non mlo vdev"); 1270 goto send_pkt; 1271 } 1272 1273 if (qdf_unlikely(vdev->wrap_vdev)) { 1274 /* In the case of qwrap repeater send the original 1275 * packet on the interface where it received, 1276 * packet with dummy src on the mcast primary interface. 1277 */ 1278 qdf_nbuf_t nbuf_copy; 1279 1280 nbuf_copy = qdf_nbuf_copy(nbuf); 1281 if (qdf_likely(nbuf_copy)) 1282 dp_rx_deliver_to_stack(soc, vdev, peer, nbuf_copy, 1283 NULL); 1284 } 1285 1286 if (qdf_nbuf_is_ipv4_igmp_leave_pkt(nbuf) || 1287 qdf_nbuf_is_ipv6_igmp_leave_pkt(nbuf)) { 1288 qdf_nbuf_free(nbuf); 1289 dp_vdev_unref_delete(mcast_primary_vdev->pdev->soc, 1290 mcast_primary_vdev, 1291 DP_MOD_ID_RX); 1292 return true; 1293 } 1294 1295 dp_rx_dummy_src_mac(vdev, nbuf); 1296 dp_rx_deliver_to_stack(mcast_primary_vdev->pdev->soc, 1297 mcast_primary_vdev, 1298 peer, 1299 nbuf, 1300 NULL); 1301 dp_vdev_unref_delete(mcast_primary_vdev->pdev->soc, 1302 mcast_primary_vdev, 1303 DP_MOD_ID_RX); 1304 return true; 1305 send_pkt: 1306 dp_rx_deliver_to_stack(be_vdev->vdev.pdev->soc, 1307 &be_vdev->vdev, 1308 peer, 1309 nbuf, 1310 NULL); 1311 return true; 1312 } 1313 #else 1314 bool dp_rx_mlo_igmp_handler(struct dp_soc *soc, 1315 struct dp_vdev *vdev, 1316 struct dp_txrx_peer *peer, 1317 qdf_nbuf_t nbuf) 1318 { 1319 return false; 1320 } 1321 #endif 1322 #endif 1323 1324 #ifdef WLAN_FEATURE_NEAR_FULL_IRQ 1325 uint32_t dp_rx_nf_process(struct dp_intr *int_ctx, 1326 hal_ring_handle_t hal_ring_hdl, 1327 uint8_t reo_ring_num, 1328 uint32_t quota) 1329 { 1330 struct dp_soc *soc = int_ctx->soc; 1331 struct dp_srng *rx_ring = &soc->reo_dest_ring[reo_ring_num]; 1332 uint32_t work_done = 0; 1333 1334 if (dp_srng_get_near_full_level(soc, rx_ring) < 1335 DP_SRNG_THRESH_NEAR_FULL) 1336 return 0; 1337 1338 qdf_atomic_set(&rx_ring->near_full, 1); 1339 work_done++; 1340 1341 return work_done; 1342 } 1343 #endif 1344 1345 #ifndef QCA_HOST_MODE_WIFI_DISABLED 1346 #ifdef WLAN_FEATURE_11BE_MLO 1347 /** 1348 * dp_rx_intrabss_fwd_mlo_allow() - check if MLO forwarding is allowed 1349 * @ta_peer: transmitter peer handle 1350 * @da_peer: destination peer handle 1351 * 1352 * Return: true - MLO forwarding case, false: not 1353 */ 1354 static inline bool 1355 dp_rx_intrabss_fwd_mlo_allow(struct dp_txrx_peer *ta_peer, 1356 struct dp_txrx_peer *da_peer) 1357 { 1358 /* TA peer and DA peer's vdev should be partner MLO vdevs */ 1359 if (dp_peer_find_mac_addr_cmp(&ta_peer->vdev->mld_mac_addr, 1360 &da_peer->vdev->mld_mac_addr)) 1361 return false; 1362 1363 return true; 1364 } 1365 #else 1366 static inline bool 1367 dp_rx_intrabss_fwd_mlo_allow(struct dp_txrx_peer *ta_peer, 1368 struct dp_txrx_peer *da_peer) 1369 { 1370 return false; 1371 } 1372 #endif 1373 1374 #ifdef INTRA_BSS_FWD_OFFLOAD 1375 /** 1376 * dp_rx_intrabss_ucast_check_be() - Check if intrabss is allowed 1377 * for unicast frame 1378 * @nbuf: RX packet buffer 1379 * @ta_peer: transmitter DP peer handle 1380 * @rx_tlv_hdr: Rx TLV header 1381 * @msdu_metadata: MSDU meta data info 1382 * @params: params to be filled in 1383 * 1384 * Return: true - intrabss allowed 1385 * false - not allow 1386 */ 1387 static bool 1388 dp_rx_intrabss_ucast_check_be(qdf_nbuf_t nbuf, 1389 struct dp_txrx_peer *ta_peer, 1390 uint8_t *rx_tlv_hdr, 1391 struct hal_rx_msdu_metadata *msdu_metadata, 1392 struct dp_be_intrabss_params *params) 1393 { 1394 uint8_t dest_chip_id, dest_chip_pmac_id; 1395 struct dp_vdev_be *be_vdev = 1396 dp_get_be_vdev_from_dp_vdev(ta_peer->vdev); 1397 struct dp_soc_be *be_soc = 1398 dp_get_be_soc_from_dp_soc(params->dest_soc); 1399 1400 if (!qdf_nbuf_is_intra_bss(nbuf)) 1401 return false; 1402 1403 hal_rx_tlv_get_dest_chip_pmac_id(rx_tlv_hdr, 1404 &dest_chip_id, 1405 &dest_chip_pmac_id); 1406 qdf_assert_always(dest_chip_id <= (DP_MLO_MAX_DEST_CHIP_ID - 1)); 1407 1408 if (dest_chip_id == be_soc->mlo_chip_id) { 1409 /* TODO: adding to self list is better */ 1410 params->tx_vdev_id = ta_peer->vdev->vdev_id; 1411 return true; 1412 } 1413 1414 params->dest_soc = 1415 dp_mlo_get_soc_ref_by_chip_id(be_soc->ml_ctxt, 1416 dest_chip_id); 1417 if (!params->dest_soc) 1418 return false; 1419 1420 params->tx_vdev_id = 1421 be_vdev->partner_vdev_list[dest_chip_id][dest_chip_pmac_id]; 1422 1423 return true; 1424 } 1425 #else 1426 #ifdef WLAN_MLO_MULTI_CHIP 1427 static bool 1428 dp_rx_intrabss_ucast_check_be(qdf_nbuf_t nbuf, 1429 struct dp_txrx_peer *ta_peer, 1430 uint8_t *rx_tlv_hdr, 1431 struct hal_rx_msdu_metadata *msdu_metadata, 1432 struct dp_be_intrabss_params *params) 1433 { 1434 uint16_t da_peer_id; 1435 struct dp_txrx_peer *da_peer; 1436 bool ret = false; 1437 uint8_t dest_chip_id; 1438 dp_txrx_ref_handle txrx_ref_handle = NULL; 1439 struct dp_vdev_be *be_vdev = 1440 dp_get_be_vdev_from_dp_vdev(ta_peer->vdev); 1441 struct dp_soc_be *be_soc = 1442 dp_get_be_soc_from_dp_soc(params->dest_soc); 1443 1444 if (!(qdf_nbuf_is_da_valid(nbuf) || qdf_nbuf_is_da_mcbc(nbuf))) 1445 return false; 1446 1447 dest_chip_id = HAL_RX_DEST_CHIP_ID_GET(msdu_metadata); 1448 qdf_assert_always(dest_chip_id <= (DP_MLO_MAX_DEST_CHIP_ID - 1)); 1449 da_peer_id = HAL_RX_PEER_ID_GET(msdu_metadata); 1450 1451 /* use dest chip id when TA is MLD peer and DA is legacy */ 1452 if (be_soc->mlo_enabled && 1453 ta_peer->mld_peer && 1454 !(da_peer_id & HAL_RX_DA_IDX_ML_PEER_MASK)) { 1455 /* validate chip_id, get a ref, and re-assign soc */ 1456 params->dest_soc = 1457 dp_mlo_get_soc_ref_by_chip_id(be_soc->ml_ctxt, 1458 dest_chip_id); 1459 if (!params->dest_soc) 1460 return false; 1461 1462 da_peer = dp_txrx_peer_get_ref_by_id(params->dest_soc, 1463 da_peer_id, 1464 &txrx_ref_handle, 1465 DP_MOD_ID_RX); 1466 if (!da_peer) 1467 return false; 1468 1469 } else { 1470 da_peer = dp_txrx_peer_get_ref_by_id(params->dest_soc, 1471 da_peer_id, 1472 &txrx_ref_handle, 1473 DP_MOD_ID_RX); 1474 if (!da_peer) 1475 return false; 1476 1477 params->dest_soc = da_peer->vdev->pdev->soc; 1478 if (!params->dest_soc) 1479 goto rel_da_peer; 1480 1481 } 1482 1483 params->tx_vdev_id = da_peer->vdev->vdev_id; 1484 1485 /* If the source or destination peer in the isolation 1486 * list then dont forward instead push to bridge stack. 1487 */ 1488 if (dp_get_peer_isolation(ta_peer) || 1489 dp_get_peer_isolation(da_peer)) { 1490 ret = false; 1491 goto rel_da_peer; 1492 } 1493 1494 if (da_peer->bss_peer || (da_peer == ta_peer)) { 1495 ret = false; 1496 goto rel_da_peer; 1497 } 1498 1499 /* Same vdev, support Inra-BSS */ 1500 if (da_peer->vdev == ta_peer->vdev) { 1501 ret = true; 1502 goto rel_da_peer; 1503 } 1504 1505 /* MLO specific Intra-BSS check */ 1506 if (dp_rx_intrabss_fwd_mlo_allow(ta_peer, da_peer)) { 1507 /* use dest chip id for legacy dest peer */ 1508 if (!(da_peer_id & HAL_RX_DA_IDX_ML_PEER_MASK)) { 1509 if (!(be_vdev->partner_vdev_list[dest_chip_id][0] == 1510 params->tx_vdev_id) && 1511 !(be_vdev->partner_vdev_list[dest_chip_id][1] == 1512 params->tx_vdev_id)) { 1513 /*dp_soc_unref_delete(soc);*/ 1514 goto rel_da_peer; 1515 } 1516 } 1517 ret = true; 1518 } 1519 1520 rel_da_peer: 1521 dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_RX); 1522 return ret; 1523 } 1524 #else 1525 static bool 1526 dp_rx_intrabss_ucast_check_be(qdf_nbuf_t nbuf, 1527 struct dp_txrx_peer *ta_peer, 1528 uint8_t *rx_tlv_hdr, 1529 struct hal_rx_msdu_metadata *msdu_metadata, 1530 struct dp_be_intrabss_params *params) 1531 { 1532 uint16_t da_peer_id; 1533 struct dp_txrx_peer *da_peer; 1534 bool ret = false; 1535 dp_txrx_ref_handle txrx_ref_handle = NULL; 1536 1537 if (!qdf_nbuf_is_da_valid(nbuf) || qdf_nbuf_is_da_mcbc(nbuf)) 1538 return false; 1539 1540 da_peer_id = dp_rx_peer_metadata_peer_id_get_be( 1541 params->dest_soc, 1542 msdu_metadata->da_idx); 1543 1544 da_peer = dp_txrx_peer_get_ref_by_id(params->dest_soc, da_peer_id, 1545 &txrx_ref_handle, DP_MOD_ID_RX); 1546 if (!da_peer) 1547 return false; 1548 1549 params->tx_vdev_id = da_peer->vdev->vdev_id; 1550 /* If the source or destination peer in the isolation 1551 * list then dont forward instead push to bridge stack. 1552 */ 1553 if (dp_get_peer_isolation(ta_peer) || 1554 dp_get_peer_isolation(da_peer)) 1555 goto rel_da_peer; 1556 1557 if (da_peer->bss_peer || da_peer == ta_peer) 1558 goto rel_da_peer; 1559 1560 /* Same vdev, support Inra-BSS */ 1561 if (da_peer->vdev == ta_peer->vdev) { 1562 ret = true; 1563 goto rel_da_peer; 1564 } 1565 1566 /* MLO specific Intra-BSS check */ 1567 if (dp_rx_intrabss_fwd_mlo_allow(ta_peer, da_peer)) { 1568 ret = true; 1569 goto rel_da_peer; 1570 } 1571 1572 rel_da_peer: 1573 dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_RX); 1574 return ret; 1575 } 1576 #endif /* WLAN_MLO_MULTI_CHIP */ 1577 #endif /* INTRA_BSS_FWD_OFFLOAD */ 1578 1579 #if defined(QCA_MONITOR_2_0_SUPPORT) || defined(CONFIG_WORD_BASED_TLV) 1580 void dp_rx_word_mask_subscribe_be(struct dp_soc *soc, 1581 uint32_t *msg_word, 1582 void *rx_filter) 1583 { 1584 struct htt_rx_ring_tlv_filter *tlv_filter = 1585 (struct htt_rx_ring_tlv_filter *)rx_filter; 1586 1587 if (!msg_word || !tlv_filter) 1588 return; 1589 1590 /* if word mask is zero, FW will set the default values */ 1591 if (!(tlv_filter->rx_mpdu_start_wmask > 0 && 1592 tlv_filter->rx_msdu_end_wmask > 0)) { 1593 msg_word += 4; 1594 *msg_word = 0; 1595 goto config_mon; 1596 } 1597 1598 HTT_RX_RING_SELECTION_CFG_WORD_MASK_COMPACTION_ENABLE_SET(*msg_word, 1); 1599 1600 /* word 14 */ 1601 msg_word += 3; 1602 *msg_word = 0; 1603 1604 HTT_RX_RING_SELECTION_CFG_RX_MPDU_START_WORD_MASK_SET( 1605 *msg_word, 1606 tlv_filter->rx_mpdu_start_wmask); 1607 1608 /* word 15 */ 1609 msg_word++; 1610 *msg_word = 0; 1611 HTT_RX_RING_SELECTION_CFG_RX_MSDU_END_WORD_MASK_SET( 1612 *msg_word, 1613 tlv_filter->rx_msdu_end_wmask); 1614 config_mon: 1615 msg_word--; 1616 dp_mon_rx_wmask_subscribe(soc, msg_word, tlv_filter); 1617 } 1618 #else 1619 void dp_rx_word_mask_subscribe_be(struct dp_soc *soc, 1620 uint32_t *msg_word, 1621 void *rx_filter) 1622 { 1623 } 1624 #endif 1625 1626 #if defined(WLAN_MCAST_MLO) && defined(CONFIG_MLO_SINGLE_DEV) 1627 static inline 1628 bool dp_rx_intrabss_mlo_mcbc_fwd(struct dp_soc *soc, struct dp_vdev *vdev, 1629 qdf_nbuf_t nbuf_copy) 1630 { 1631 struct dp_vdev *mcast_primary_vdev = NULL; 1632 struct dp_vdev_be *be_vdev = dp_get_be_vdev_from_dp_vdev(vdev); 1633 struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc); 1634 struct cdp_tx_exception_metadata tx_exc_metadata = {0}; 1635 1636 if (!vdev->mlo_vdev) 1637 return false; 1638 1639 tx_exc_metadata.is_mlo_mcast = 1; 1640 mcast_primary_vdev = dp_mlo_get_mcast_primary_vdev(be_soc, 1641 be_vdev, 1642 DP_MOD_ID_RX); 1643 1644 if (!mcast_primary_vdev) 1645 return false; 1646 1647 nbuf_copy = dp_tx_send_exception((struct cdp_soc_t *) 1648 mcast_primary_vdev->pdev->soc, 1649 mcast_primary_vdev->vdev_id, 1650 nbuf_copy, &tx_exc_metadata); 1651 1652 if (nbuf_copy) 1653 qdf_nbuf_free(nbuf_copy); 1654 1655 dp_vdev_unref_delete(mcast_primary_vdev->pdev->soc, 1656 mcast_primary_vdev, DP_MOD_ID_RX); 1657 return true; 1658 } 1659 #else 1660 static inline 1661 bool dp_rx_intrabss_mlo_mcbc_fwd(struct dp_soc *soc, struct dp_vdev *vdev, 1662 qdf_nbuf_t nbuf_copy) 1663 { 1664 return false; 1665 } 1666 #endif 1667 /** 1668 * dp_rx_intrabss_mcast_handler_be() - handler for mcast packets 1669 * @soc: core txrx main context 1670 * @ta_txrx_peer: source txrx_peer entry 1671 * @nbuf_copy: nbuf that has to be intrabss forwarded 1672 * @tid_stats: tid_stats structure 1673 * 1674 * Return: true if it is forwarded else false 1675 */ 1676 bool 1677 dp_rx_intrabss_mcast_handler_be(struct dp_soc *soc, 1678 struct dp_txrx_peer *ta_txrx_peer, 1679 qdf_nbuf_t nbuf_copy, 1680 struct cdp_tid_rx_stats *tid_stats) 1681 { 1682 if (qdf_unlikely(ta_txrx_peer->vdev->nawds_enabled)) { 1683 struct cdp_tx_exception_metadata tx_exc_metadata = {0}; 1684 uint16_t len = QDF_NBUF_CB_RX_PKT_LEN(nbuf_copy); 1685 1686 tx_exc_metadata.peer_id = ta_txrx_peer->peer_id; 1687 tx_exc_metadata.is_intrabss_fwd = 1; 1688 tx_exc_metadata.tid = HTT_TX_EXT_TID_INVALID; 1689 1690 if (dp_tx_send_exception((struct cdp_soc_t *)soc, 1691 ta_txrx_peer->vdev->vdev_id, 1692 nbuf_copy, 1693 &tx_exc_metadata)) { 1694 DP_PEER_PER_PKT_STATS_INC_PKT(ta_txrx_peer, 1695 rx.intra_bss.fail, 1, 1696 len); 1697 tid_stats->fail_cnt[INTRABSS_DROP]++; 1698 qdf_nbuf_free(nbuf_copy); 1699 } else { 1700 DP_PEER_PER_PKT_STATS_INC_PKT(ta_txrx_peer, 1701 rx.intra_bss.pkts, 1, 1702 len); 1703 tid_stats->intrabss_cnt++; 1704 } 1705 return true; 1706 } 1707 1708 if (dp_rx_intrabss_mlo_mcbc_fwd(soc, ta_txrx_peer->vdev, 1709 nbuf_copy)) 1710 return true; 1711 1712 return false; 1713 } 1714 1715 /* 1716 * dp_rx_intrabss_fwd_be() - API for intrabss fwd. For EAPOL 1717 * pkt with DA not equal to vdev mac addr, fwd is not allowed. 1718 * @soc: core txrx main context 1719 * @ta_peer: source peer entry 1720 * @rx_tlv_hdr: start address of rx tlvs 1721 * @nbuf: nbuf that has to be intrabss forwarded 1722 * @msdu_metadata: msdu metadata 1723 * 1724 * Return: true if it is forwarded else false 1725 */ 1726 bool dp_rx_intrabss_fwd_be(struct dp_soc *soc, struct dp_txrx_peer *ta_peer, 1727 uint8_t *rx_tlv_hdr, qdf_nbuf_t nbuf, 1728 struct hal_rx_msdu_metadata msdu_metadata) 1729 { 1730 uint8_t tid = qdf_nbuf_get_tid_val(nbuf); 1731 uint8_t ring_id = QDF_NBUF_CB_RX_CTX_ID(nbuf); 1732 struct cdp_tid_rx_stats *tid_stats = &ta_peer->vdev->pdev->stats. 1733 tid_stats.tid_rx_stats[ring_id][tid]; 1734 bool ret = false; 1735 struct dp_be_intrabss_params params; 1736 1737 /* if it is a broadcast pkt (eg: ARP) and it is not its own 1738 * source, then clone the pkt and send the cloned pkt for 1739 * intra BSS forwarding and original pkt up the network stack 1740 * Note: how do we handle multicast pkts. do we forward 1741 * all multicast pkts as is or let a higher layer module 1742 * like igmpsnoop decide whether to forward or not with 1743 * Mcast enhancement. 1744 */ 1745 if (qdf_nbuf_is_da_mcbc(nbuf) && !ta_peer->bss_peer) { 1746 return dp_rx_intrabss_mcbc_fwd(soc, ta_peer, rx_tlv_hdr, 1747 nbuf, tid_stats); 1748 } 1749 1750 if (dp_rx_intrabss_eapol_drop_check(soc, ta_peer, rx_tlv_hdr, 1751 nbuf)) 1752 return true; 1753 1754 params.dest_soc = soc; 1755 if (dp_rx_intrabss_ucast_check_be(nbuf, ta_peer, rx_tlv_hdr, 1756 &msdu_metadata, ¶ms)) { 1757 ret = dp_rx_intrabss_ucast_fwd(params.dest_soc, ta_peer, 1758 params.tx_vdev_id, 1759 rx_tlv_hdr, nbuf, tid_stats); 1760 } 1761 1762 return ret; 1763 } 1764 #endif 1765 1766 bool dp_rx_chain_msdus_be(struct dp_soc *soc, qdf_nbuf_t nbuf, 1767 uint8_t *rx_tlv_hdr, uint8_t mac_id) 1768 { 1769 bool mpdu_done = false; 1770 qdf_nbuf_t curr_nbuf = NULL; 1771 qdf_nbuf_t tmp_nbuf = NULL; 1772 1773 struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id); 1774 1775 if (!dp_pdev) { 1776 dp_rx_debug("%pK: pdev is null for mac_id = %d", soc, mac_id); 1777 return mpdu_done; 1778 } 1779 /* if invalid peer SG list has max values free the buffers in list 1780 * and treat current buffer as start of list 1781 * 1782 * current logic to detect the last buffer from attn_tlv is not reliable 1783 * in OFDMA UL scenario hence add max buffers check to avoid list pile 1784 * up 1785 */ 1786 if (!dp_pdev->first_nbuf || 1787 (dp_pdev->invalid_peer_head_msdu && 1788 QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST 1789 (dp_pdev->invalid_peer_head_msdu) >= DP_MAX_INVALID_BUFFERS)) { 1790 qdf_nbuf_set_rx_chfrag_start(nbuf, 1); 1791 dp_pdev->first_nbuf = true; 1792 1793 /* If the new nbuf received is the first msdu of the 1794 * amsdu and there are msdus in the invalid peer msdu 1795 * list, then let us free all the msdus of the invalid 1796 * peer msdu list. 1797 * This scenario can happen when we start receiving 1798 * new a-msdu even before the previous a-msdu is completely 1799 * received. 1800 */ 1801 curr_nbuf = dp_pdev->invalid_peer_head_msdu; 1802 while (curr_nbuf) { 1803 tmp_nbuf = curr_nbuf->next; 1804 dp_rx_nbuf_free(curr_nbuf); 1805 curr_nbuf = tmp_nbuf; 1806 } 1807 1808 dp_pdev->invalid_peer_head_msdu = NULL; 1809 dp_pdev->invalid_peer_tail_msdu = NULL; 1810 1811 dp_monitor_get_mpdu_status(dp_pdev, soc, rx_tlv_hdr); 1812 } 1813 1814 if (qdf_nbuf_is_rx_chfrag_end(nbuf) && 1815 hal_rx_attn_msdu_done_get(soc->hal_soc, rx_tlv_hdr)) { 1816 qdf_assert_always(dp_pdev->first_nbuf); 1817 dp_pdev->first_nbuf = false; 1818 mpdu_done = true; 1819 } 1820 1821 /* 1822 * For MCL, invalid_peer_head_msdu and invalid_peer_tail_msdu 1823 * should be NULL here, add the checking for debugging purpose 1824 * in case some corner case. 1825 */ 1826 DP_PDEV_INVALID_PEER_MSDU_CHECK(dp_pdev->invalid_peer_head_msdu, 1827 dp_pdev->invalid_peer_tail_msdu); 1828 DP_RX_LIST_APPEND(dp_pdev->invalid_peer_head_msdu, 1829 dp_pdev->invalid_peer_tail_msdu, 1830 nbuf); 1831 1832 return mpdu_done; 1833 } 1834