1 /* 2 * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved. 3 * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. 4 * 5 * Permission to use, copy, modify, and/or distribute this software for 6 * any purpose with or without fee is hereby granted, provided that the 7 * above copyright notice and this permission notice appear in all 8 * copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 11 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 12 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 13 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 14 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 15 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 16 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 17 * PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20 #include "cdp_txrx_cmn_struct.h" 21 #include "hal_hw_headers.h" 22 #include "dp_types.h" 23 #include "dp_rx.h" 24 #include "dp_tx.h" 25 #include "dp_be_rx.h" 26 #include "dp_peer.h" 27 #include "hal_rx.h" 28 #include "hal_be_rx.h" 29 #include "hal_api.h" 30 #include "hal_be_api.h" 31 #include "qdf_nbuf.h" 32 #include "hal_be_rx_tlv.h" 33 #ifdef MESH_MODE_SUPPORT 34 #include "if_meta_hdr.h" 35 #endif 36 #include "dp_internal.h" 37 #include "dp_ipa.h" 38 #ifdef FEATURE_WDS 39 #include "dp_txrx_wds.h" 40 #endif 41 #include "dp_hist.h" 42 #include "dp_rx_buffer_pool.h" 43 44 #ifndef AST_OFFLOAD_ENABLE 45 static void 46 dp_rx_wds_learn(struct dp_soc *soc, 47 struct dp_vdev *vdev, 48 uint8_t *rx_tlv_hdr, 49 struct dp_txrx_peer *txrx_peer, 50 qdf_nbuf_t nbuf, 51 struct hal_rx_msdu_metadata msdu_metadata) 52 { 53 /* WDS Source Port Learning */ 54 if (qdf_likely(vdev->wds_enabled)) 55 dp_rx_wds_srcport_learn(soc, 56 rx_tlv_hdr, 57 txrx_peer, 58 nbuf, 59 msdu_metadata); 60 } 61 #else 62 #ifdef QCA_SUPPORT_WDS_EXTENDED 63 /** 64 * dp_wds_ext_peer_learn_be() - function to send event to control 65 * path on receiving 1st 4-address frame from backhaul. 66 * @soc: DP soc 67 * @ta_txrx_peer: WDS repeater txrx peer 68 * @rx_tlv_hdr : start address of rx tlvs 69 * 70 * Return: void 71 */ 72 static inline void dp_wds_ext_peer_learn_be(struct dp_soc *soc, 73 struct dp_txrx_peer *ta_txrx_peer, 74 uint8_t *rx_tlv_hdr) 75 { 76 uint8_t wds_ext_src_mac[QDF_MAC_ADDR_SIZE]; 77 struct dp_peer *ta_base_peer; 78 79 /* instead of checking addr4 is valid or not in per packet path 80 * check for init bit, which will be set on reception of 81 * first addr4 valid packet. 82 */ 83 if (!ta_txrx_peer->vdev->wds_ext_enabled || 84 qdf_atomic_test_bit(WDS_EXT_PEER_INIT_BIT, 85 &ta_txrx_peer->wds_ext.init)) 86 return; 87 88 if (hal_rx_get_mpdu_mac_ad4_valid_be(rx_tlv_hdr)) { 89 qdf_atomic_test_and_set_bit(WDS_EXT_PEER_INIT_BIT, 90 &ta_txrx_peer->wds_ext.init); 91 92 ta_base_peer = dp_peer_get_ref_by_id(soc, ta_txrx_peer->peer_id, 93 DP_MOD_ID_RX); 94 95 if (!ta_base_peer) 96 return; 97 98 qdf_mem_copy(wds_ext_src_mac, &ta_base_peer->mac_addr.raw[0], 99 QDF_MAC_ADDR_SIZE); 100 dp_peer_unref_delete(ta_base_peer, DP_MOD_ID_RX); 101 102 soc->cdp_soc.ol_ops->rx_wds_ext_peer_learn( 103 soc->ctrl_psoc, 104 ta_txrx_peer->peer_id, 105 ta_txrx_peer->vdev->vdev_id, 106 wds_ext_src_mac); 107 } 108 } 109 #else 110 static inline void dp_wds_ext_peer_learn_be(struct dp_soc *soc, 111 struct dp_txrx_peer *ta_txrx_peer, 112 uint8_t *rx_tlv_hdr) 113 { 114 } 115 #endif 116 static void 117 dp_rx_wds_learn(struct dp_soc *soc, 118 struct dp_vdev *vdev, 119 uint8_t *rx_tlv_hdr, 120 struct dp_txrx_peer *ta_txrx_peer, 121 qdf_nbuf_t nbuf, 122 struct hal_rx_msdu_metadata msdu_metadata) 123 { 124 dp_wds_ext_peer_learn_be(soc, ta_txrx_peer, rx_tlv_hdr); 125 } 126 #endif 127 128 /** 129 * dp_rx_process_be() - Brain of the Rx processing functionality 130 * Called from the bottom half (tasklet/NET_RX_SOFTIRQ) 131 * @int_ctx: per interrupt context 132 * @hal_ring: opaque pointer to the HAL Rx Ring, which will be serviced 133 * @reo_ring_num: ring number (0, 1, 2 or 3) of the reo ring. 134 * @quota: No. of units (packets) that can be serviced in one shot. 135 * 136 * This function implements the core of Rx functionality. This is 137 * expected to handle only non-error frames. 138 * 139 * Return: uint32_t: No. of elements processed 140 */ 141 uint32_t dp_rx_process_be(struct dp_intr *int_ctx, 142 hal_ring_handle_t hal_ring_hdl, uint8_t reo_ring_num, 143 uint32_t quota) 144 { 145 hal_ring_desc_t ring_desc; 146 hal_soc_handle_t hal_soc; 147 struct dp_rx_desc *rx_desc = NULL; 148 qdf_nbuf_t nbuf, next; 149 bool near_full; 150 union dp_rx_desc_list_elem_t *head[MAX_PDEV_CNT]; 151 union dp_rx_desc_list_elem_t *tail[MAX_PDEV_CNT]; 152 uint32_t num_pending; 153 uint32_t rx_bufs_used = 0, rx_buf_cookie; 154 uint16_t msdu_len = 0; 155 uint16_t peer_id; 156 uint8_t vdev_id; 157 struct dp_txrx_peer *txrx_peer; 158 dp_txrx_ref_handle txrx_ref_handle = NULL; 159 struct dp_vdev *vdev; 160 uint32_t pkt_len = 0; 161 struct hal_rx_mpdu_desc_info mpdu_desc_info; 162 struct hal_rx_msdu_desc_info msdu_desc_info; 163 enum hal_reo_error_status error; 164 uint32_t peer_mdata; 165 uint8_t *rx_tlv_hdr; 166 uint32_t rx_bufs_reaped[MAX_PDEV_CNT]; 167 uint8_t mac_id = 0; 168 struct dp_pdev *rx_pdev; 169 bool enh_flag; 170 struct dp_srng *dp_rxdma_srng; 171 struct rx_desc_pool *rx_desc_pool; 172 struct dp_soc *soc = int_ctx->soc; 173 uint8_t core_id = 0; 174 struct cdp_tid_rx_stats *tid_stats; 175 qdf_nbuf_t nbuf_head; 176 qdf_nbuf_t nbuf_tail; 177 qdf_nbuf_t deliver_list_head; 178 qdf_nbuf_t deliver_list_tail; 179 uint32_t num_rx_bufs_reaped = 0; 180 uint32_t intr_id; 181 struct hif_opaque_softc *scn; 182 int32_t tid = 0; 183 bool is_prev_msdu_last = true; 184 uint32_t num_entries_avail = 0; 185 uint32_t rx_ol_pkt_cnt = 0; 186 uint32_t num_entries = 0; 187 struct hal_rx_msdu_metadata msdu_metadata; 188 QDF_STATUS status; 189 qdf_nbuf_t ebuf_head; 190 qdf_nbuf_t ebuf_tail; 191 uint8_t pkt_capture_offload = 0; 192 struct dp_srng *rx_ring = &soc->reo_dest_ring[reo_ring_num]; 193 int max_reap_limit, ring_near_full; 194 struct dp_soc *replenish_soc; 195 196 DP_HIST_INIT(); 197 198 qdf_assert_always(soc && hal_ring_hdl); 199 hal_soc = soc->hal_soc; 200 qdf_assert_always(hal_soc); 201 202 scn = soc->hif_handle; 203 hif_pm_runtime_mark_dp_rx_busy(scn); 204 intr_id = int_ctx->dp_intr_id; 205 num_entries = hal_srng_get_num_entries(hal_soc, hal_ring_hdl); 206 207 more_data: 208 /* reset local variables here to be re-used in the function */ 209 nbuf_head = NULL; 210 nbuf_tail = NULL; 211 deliver_list_head = NULL; 212 deliver_list_tail = NULL; 213 txrx_peer = NULL; 214 vdev = NULL; 215 num_rx_bufs_reaped = 0; 216 ebuf_head = NULL; 217 ebuf_tail = NULL; 218 ring_near_full = 0; 219 max_reap_limit = dp_rx_get_loop_pkt_limit(soc); 220 221 qdf_mem_zero(rx_bufs_reaped, sizeof(rx_bufs_reaped)); 222 qdf_mem_zero(&mpdu_desc_info, sizeof(mpdu_desc_info)); 223 qdf_mem_zero(&msdu_desc_info, sizeof(msdu_desc_info)); 224 qdf_mem_zero(head, sizeof(head)); 225 qdf_mem_zero(tail, sizeof(tail)); 226 227 ring_near_full = _dp_srng_test_and_update_nf_params(soc, rx_ring, 228 &max_reap_limit); 229 230 if (qdf_unlikely(dp_rx_srng_access_start(int_ctx, soc, hal_ring_hdl))) { 231 /* 232 * Need API to convert from hal_ring pointer to 233 * Ring Type / Ring Id combo 234 */ 235 DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1); 236 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 237 FL("HAL RING Access Failed -- %pK"), hal_ring_hdl); 238 goto done; 239 } 240 241 /* 242 * start reaping the buffers from reo ring and queue 243 * them in per vdev queue. 244 * Process the received pkts in a different per vdev loop. 245 */ 246 while (qdf_likely(quota && 247 (ring_desc = hal_srng_dst_peek(hal_soc, 248 hal_ring_hdl)))) { 249 error = HAL_RX_ERROR_STATUS_GET(ring_desc); 250 251 if (qdf_unlikely(error == HAL_REO_ERROR_DETECTED)) { 252 dp_rx_err("%pK: HAL RING 0x%pK:error %d", 253 soc, hal_ring_hdl, error); 254 DP_STATS_INC(soc, rx.err.hal_reo_error[reo_ring_num], 255 1); 256 /* Don't know how to deal with this -- assert */ 257 qdf_assert(0); 258 } 259 260 dp_rx_ring_record_entry(soc, reo_ring_num, ring_desc); 261 rx_buf_cookie = HAL_RX_REO_BUF_COOKIE_GET(ring_desc); 262 status = dp_rx_cookie_check_and_invalidate(ring_desc); 263 if (qdf_unlikely(QDF_IS_STATUS_ERROR(status))) { 264 DP_STATS_INC(soc, rx.err.stale_cookie, 1); 265 break; 266 } 267 268 rx_desc = (struct dp_rx_desc *) 269 hal_rx_get_reo_desc_va(ring_desc); 270 dp_rx_desc_sw_cc_check(soc, rx_buf_cookie, &rx_desc); 271 272 status = dp_rx_desc_sanity(soc, hal_soc, hal_ring_hdl, 273 ring_desc, rx_desc); 274 if (QDF_IS_STATUS_ERROR(status)) { 275 if (qdf_unlikely(rx_desc && rx_desc->nbuf)) { 276 qdf_assert_always(!rx_desc->unmapped); 277 dp_rx_nbuf_unmap(soc, rx_desc, reo_ring_num); 278 rx_desc->unmapped = 1; 279 dp_rx_buffer_pool_nbuf_free(soc, rx_desc->nbuf, 280 rx_desc->pool_id); 281 dp_rx_add_to_free_desc_list( 282 &head[rx_desc->pool_id], 283 &tail[rx_desc->pool_id], 284 rx_desc); 285 } 286 hal_srng_dst_get_next(hal_soc, hal_ring_hdl); 287 continue; 288 } 289 290 /* 291 * this is a unlikely scenario where the host is reaping 292 * a descriptor which it already reaped just a while ago 293 * but is yet to replenish it back to HW. 294 * In this case host will dump the last 128 descriptors 295 * including the software descriptor rx_desc and assert. 296 */ 297 298 if (qdf_unlikely(!rx_desc->in_use)) { 299 DP_STATS_INC(soc, rx.err.hal_reo_dest_dup, 1); 300 dp_info_rl("Reaping rx_desc not in use!"); 301 dp_rx_dump_info_and_assert(soc, hal_ring_hdl, 302 ring_desc, rx_desc); 303 /* ignore duplicate RX desc and continue to process */ 304 /* Pop out the descriptor */ 305 hal_srng_dst_get_next(hal_soc, hal_ring_hdl); 306 continue; 307 } 308 309 status = dp_rx_desc_nbuf_sanity_check(soc, ring_desc, rx_desc); 310 if (qdf_unlikely(QDF_IS_STATUS_ERROR(status))) { 311 DP_STATS_INC(soc, rx.err.nbuf_sanity_fail, 1); 312 dp_info_rl("Nbuf sanity check failure!"); 313 dp_rx_dump_info_and_assert(soc, hal_ring_hdl, 314 ring_desc, rx_desc); 315 rx_desc->in_err_state = 1; 316 hal_srng_dst_get_next(hal_soc, hal_ring_hdl); 317 continue; 318 } 319 320 if (qdf_unlikely(!dp_rx_desc_check_magic(rx_desc))) { 321 dp_err("Invalid rx_desc cookie=%d", rx_buf_cookie); 322 DP_STATS_INC(soc, rx.err.rx_desc_invalid_magic, 1); 323 dp_rx_dump_info_and_assert(soc, hal_ring_hdl, 324 ring_desc, rx_desc); 325 } 326 327 /* Get MPDU DESC info */ 328 hal_rx_mpdu_desc_info_get_be(ring_desc, &mpdu_desc_info); 329 330 /* Get MSDU DESC info */ 331 hal_rx_msdu_desc_info_get_be(ring_desc, &msdu_desc_info); 332 333 if (qdf_unlikely(msdu_desc_info.msdu_flags & 334 HAL_MSDU_F_MSDU_CONTINUATION)) { 335 /* previous msdu has end bit set, so current one is 336 * the new MPDU 337 */ 338 if (is_prev_msdu_last) { 339 /* Get number of entries available in HW ring */ 340 num_entries_avail = 341 hal_srng_dst_num_valid(hal_soc, 342 hal_ring_hdl, 1); 343 344 /* For new MPDU check if we can read complete 345 * MPDU by comparing the number of buffers 346 * available and number of buffers needed to 347 * reap this MPDU 348 */ 349 if ((msdu_desc_info.msdu_len / 350 (RX_DATA_BUFFER_SIZE - 351 soc->rx_pkt_tlv_size) + 1) > 352 num_entries_avail) { 353 DP_STATS_INC(soc, 354 rx.msdu_scatter_wait_break, 355 1); 356 dp_rx_cookie_reset_invalid_bit( 357 ring_desc); 358 break; 359 } 360 is_prev_msdu_last = false; 361 } 362 } 363 core_id = smp_processor_id(); 364 DP_STATS_INC(soc, rx.ring_packets[core_id][reo_ring_num], 1); 365 366 if (mpdu_desc_info.mpdu_flags & HAL_MPDU_F_RETRY_BIT) 367 qdf_nbuf_set_rx_retry_flag(rx_desc->nbuf, 1); 368 369 if (qdf_unlikely(mpdu_desc_info.mpdu_flags & 370 HAL_MPDU_F_RAW_AMPDU)) 371 qdf_nbuf_set_raw_frame(rx_desc->nbuf, 1); 372 373 if (!is_prev_msdu_last && 374 msdu_desc_info.msdu_flags & HAL_MSDU_F_LAST_MSDU_IN_MPDU) 375 is_prev_msdu_last = true; 376 377 /* Pop out the descriptor*/ 378 hal_srng_dst_get_next(hal_soc, hal_ring_hdl); 379 380 rx_bufs_reaped[rx_desc->pool_id]++; 381 peer_mdata = mpdu_desc_info.peer_meta_data; 382 QDF_NBUF_CB_RX_PEER_ID(rx_desc->nbuf) = 383 dp_rx_peer_metadata_peer_id_get_be(soc, peer_mdata); 384 QDF_NBUF_CB_RX_VDEV_ID(rx_desc->nbuf) = 385 dp_rx_peer_metadata_vdev_id_get_be(soc, peer_mdata); 386 387 /* to indicate whether this msdu is rx offload */ 388 pkt_capture_offload = 389 DP_PEER_METADATA_OFFLOAD_GET_BE(peer_mdata); 390 391 /* 392 * save msdu flags first, last and continuation msdu in 393 * nbuf->cb, also save mcbc, is_da_valid, is_sa_valid and 394 * length to nbuf->cb. This ensures the info required for 395 * per pkt processing is always in the same cache line. 396 * This helps in improving throughput for smaller pkt 397 * sizes. 398 */ 399 if (msdu_desc_info.msdu_flags & HAL_MSDU_F_FIRST_MSDU_IN_MPDU) 400 qdf_nbuf_set_rx_chfrag_start(rx_desc->nbuf, 1); 401 402 if (msdu_desc_info.msdu_flags & HAL_MSDU_F_MSDU_CONTINUATION) 403 qdf_nbuf_set_rx_chfrag_cont(rx_desc->nbuf, 1); 404 405 if (msdu_desc_info.msdu_flags & HAL_MSDU_F_LAST_MSDU_IN_MPDU) 406 qdf_nbuf_set_rx_chfrag_end(rx_desc->nbuf, 1); 407 408 if (msdu_desc_info.msdu_flags & HAL_MSDU_F_DA_IS_MCBC) 409 qdf_nbuf_set_da_mcbc(rx_desc->nbuf, 1); 410 411 if (msdu_desc_info.msdu_flags & HAL_MSDU_F_DA_IS_VALID) 412 qdf_nbuf_set_da_valid(rx_desc->nbuf, 1); 413 414 if (msdu_desc_info.msdu_flags & HAL_MSDU_F_SA_IS_VALID) 415 qdf_nbuf_set_sa_valid(rx_desc->nbuf, 1); 416 417 if (msdu_desc_info.msdu_flags & HAL_MSDU_F_INTRA_BSS) 418 qdf_nbuf_set_intra_bss(rx_desc->nbuf, 1); 419 420 if (qdf_likely(mpdu_desc_info.mpdu_flags & 421 HAL_MPDU_F_QOS_CONTROL_VALID)) 422 qdf_nbuf_set_tid_val(rx_desc->nbuf, mpdu_desc_info.tid); 423 424 /* set sw exception */ 425 qdf_nbuf_set_rx_reo_dest_ind_or_sw_excpt( 426 rx_desc->nbuf, 427 hal_rx_sw_exception_get_be(ring_desc)); 428 429 QDF_NBUF_CB_RX_PKT_LEN(rx_desc->nbuf) = msdu_desc_info.msdu_len; 430 431 QDF_NBUF_CB_RX_CTX_ID(rx_desc->nbuf) = reo_ring_num; 432 433 /* 434 * move unmap after scattered msdu waiting break logic 435 * in case double skb unmap happened. 436 */ 437 rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id]; 438 dp_rx_nbuf_unmap(soc, rx_desc, reo_ring_num); 439 rx_desc->unmapped = 1; 440 DP_RX_PROCESS_NBUF(soc, nbuf_head, nbuf_tail, ebuf_head, 441 ebuf_tail, rx_desc); 442 /* 443 * if continuation bit is set then we have MSDU spread 444 * across multiple buffers, let us not decrement quota 445 * till we reap all buffers of that MSDU. 446 */ 447 if (qdf_likely(!qdf_nbuf_is_rx_chfrag_cont(rx_desc->nbuf))) 448 quota -= 1; 449 450 dp_rx_add_to_free_desc_list(&head[rx_desc->pool_id], 451 &tail[rx_desc->pool_id], rx_desc); 452 num_rx_bufs_reaped++; 453 /* 454 * only if complete msdu is received for scatter case, 455 * then allow break. 456 */ 457 if (is_prev_msdu_last && 458 dp_rx_reap_loop_pkt_limit_hit(soc, num_rx_bufs_reaped, 459 max_reap_limit)) 460 break; 461 } 462 done: 463 dp_rx_srng_access_end(int_ctx, soc, hal_ring_hdl); 464 465 replenish_soc = dp_rx_replensih_soc_get(soc, reo_ring_num); 466 for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) { 467 /* 468 * continue with next mac_id if no pkts were reaped 469 * from that pool 470 */ 471 if (!rx_bufs_reaped[mac_id]) 472 continue; 473 474 dp_rxdma_srng = &replenish_soc->rx_refill_buf_ring[mac_id]; 475 476 rx_desc_pool = &replenish_soc->rx_desc_buf[mac_id]; 477 478 dp_rx_buffers_replenish(replenish_soc, mac_id, dp_rxdma_srng, 479 rx_desc_pool, rx_bufs_reaped[mac_id], 480 &head[mac_id], &tail[mac_id]); 481 } 482 483 dp_verbose_debug("replenished %u\n", rx_bufs_reaped[0]); 484 /* Peer can be NULL is case of LFR */ 485 if (qdf_likely(txrx_peer)) 486 vdev = NULL; 487 488 /* 489 * BIG loop where each nbuf is dequeued from global queue, 490 * processed and queued back on a per vdev basis. These nbufs 491 * are sent to stack as and when we run out of nbufs 492 * or a new nbuf dequeued from global queue has a different 493 * vdev when compared to previous nbuf. 494 */ 495 nbuf = nbuf_head; 496 while (nbuf) { 497 next = nbuf->next; 498 dp_rx_prefetch_nbuf_data_be(nbuf, next); 499 if (qdf_unlikely(dp_rx_is_raw_frame_dropped(nbuf))) { 500 nbuf = next; 501 DP_STATS_INC(soc, rx.err.raw_frm_drop, 1); 502 continue; 503 } 504 505 rx_tlv_hdr = qdf_nbuf_data(nbuf); 506 vdev_id = QDF_NBUF_CB_RX_VDEV_ID(nbuf); 507 peer_id = QDF_NBUF_CB_RX_PEER_ID(nbuf); 508 509 if (dp_rx_is_list_ready(deliver_list_head, vdev, txrx_peer, 510 peer_id, vdev_id)) { 511 dp_rx_deliver_to_stack(soc, vdev, txrx_peer, 512 deliver_list_head, 513 deliver_list_tail); 514 deliver_list_head = NULL; 515 deliver_list_tail = NULL; 516 } 517 518 /* Get TID from struct cb->tid_val, save to tid */ 519 if (qdf_nbuf_is_rx_chfrag_start(nbuf)) 520 tid = qdf_nbuf_get_tid_val(nbuf); 521 522 if (qdf_unlikely(!txrx_peer)) { 523 txrx_peer = dp_txrx_peer_get_ref_by_id(soc, peer_id, 524 &txrx_ref_handle, 525 DP_MOD_ID_RX); 526 } else if (txrx_peer && txrx_peer->peer_id != peer_id) { 527 dp_txrx_peer_unref_delete(txrx_ref_handle, 528 DP_MOD_ID_RX); 529 txrx_peer = dp_txrx_peer_get_ref_by_id(soc, peer_id, 530 &txrx_ref_handle, 531 DP_MOD_ID_RX); 532 } 533 534 if (txrx_peer) { 535 QDF_NBUF_CB_DP_TRACE_PRINT(nbuf) = false; 536 qdf_dp_trace_set_track(nbuf, QDF_RX); 537 QDF_NBUF_CB_RX_DP_TRACE(nbuf) = 1; 538 QDF_NBUF_CB_RX_PACKET_TRACK(nbuf) = 539 QDF_NBUF_RX_PKT_DATA_TRACK; 540 } 541 542 rx_bufs_used++; 543 544 if (qdf_likely(txrx_peer)) { 545 vdev = txrx_peer->vdev; 546 } else { 547 nbuf->next = NULL; 548 dp_rx_deliver_to_pkt_capture_no_peer( 549 soc, nbuf, pkt_capture_offload); 550 551 if (!pkt_capture_offload) 552 dp_rx_deliver_to_stack_no_peer(soc, nbuf); 553 nbuf = next; 554 continue; 555 } 556 557 if (qdf_unlikely(!vdev)) { 558 dp_rx_nbuf_free(nbuf); 559 nbuf = next; 560 DP_STATS_INC(soc, rx.err.invalid_vdev, 1); 561 continue; 562 } 563 564 /* when hlos tid override is enabled, save tid in 565 * skb->priority 566 */ 567 if (qdf_unlikely(vdev->skip_sw_tid_classification & 568 DP_TXRX_HLOS_TID_OVERRIDE_ENABLED)) 569 qdf_nbuf_set_priority(nbuf, tid); 570 571 rx_pdev = vdev->pdev; 572 DP_RX_TID_SAVE(nbuf, tid); 573 if (qdf_unlikely(rx_pdev->delay_stats_flag) || 574 qdf_unlikely(wlan_cfg_is_peer_ext_stats_enabled( 575 soc->wlan_cfg_ctx)) || 576 dp_rx_pkt_tracepoints_enabled()) 577 qdf_nbuf_set_timestamp(nbuf); 578 579 enh_flag = rx_pdev->enhanced_stats_en; 580 581 tid_stats = 582 &rx_pdev->stats.tid_stats.tid_rx_stats[reo_ring_num][tid]; 583 584 /* 585 * Check if DMA completed -- msdu_done is the last bit 586 * to be written 587 */ 588 if (qdf_unlikely(!qdf_nbuf_is_rx_chfrag_cont(nbuf) && 589 !hal_rx_tlv_msdu_done_get_be(rx_tlv_hdr))) { 590 dp_err("MSDU DONE failure"); 591 DP_STATS_INC(soc, rx.err.msdu_done_fail, 1); 592 hal_rx_dump_pkt_tlvs(hal_soc, rx_tlv_hdr, 593 QDF_TRACE_LEVEL_INFO); 594 tid_stats->fail_cnt[MSDU_DONE_FAILURE]++; 595 dp_rx_nbuf_free(nbuf); 596 qdf_assert(0); 597 nbuf = next; 598 continue; 599 } 600 601 DP_HIST_PACKET_COUNT_INC(vdev->pdev->pdev_id); 602 /* 603 * First IF condition: 604 * 802.11 Fragmented pkts are reinjected to REO 605 * HW block as SG pkts and for these pkts we only 606 * need to pull the RX TLVS header length. 607 * Second IF condition: 608 * The below condition happens when an MSDU is spread 609 * across multiple buffers. This can happen in two cases 610 * 1. The nbuf size is smaller then the received msdu. 611 * ex: we have set the nbuf size to 2048 during 612 * nbuf_alloc. but we received an msdu which is 613 * 2304 bytes in size then this msdu is spread 614 * across 2 nbufs. 615 * 616 * 2. AMSDUs when RAW mode is enabled. 617 * ex: 1st MSDU is in 1st nbuf and 2nd MSDU is spread 618 * across 1st nbuf and 2nd nbuf and last MSDU is 619 * spread across 2nd nbuf and 3rd nbuf. 620 * 621 * for these scenarios let us create a skb frag_list and 622 * append these buffers till the last MSDU of the AMSDU 623 * Third condition: 624 * This is the most likely case, we receive 802.3 pkts 625 * decapsulated by HW, here we need to set the pkt length. 626 */ 627 hal_rx_msdu_packet_metadata_get_generic_be(rx_tlv_hdr, 628 &msdu_metadata); 629 if (qdf_unlikely(qdf_nbuf_is_frag(nbuf))) { 630 bool is_mcbc, is_sa_vld, is_da_vld; 631 632 is_mcbc = hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc, 633 rx_tlv_hdr); 634 is_sa_vld = 635 hal_rx_msdu_end_sa_is_valid_get(soc->hal_soc, 636 rx_tlv_hdr); 637 is_da_vld = 638 hal_rx_msdu_end_da_is_valid_get(soc->hal_soc, 639 rx_tlv_hdr); 640 641 qdf_nbuf_set_da_mcbc(nbuf, is_mcbc); 642 qdf_nbuf_set_da_valid(nbuf, is_da_vld); 643 qdf_nbuf_set_sa_valid(nbuf, is_sa_vld); 644 645 qdf_nbuf_pull_head(nbuf, soc->rx_pkt_tlv_size); 646 } else if (qdf_nbuf_is_rx_chfrag_cont(nbuf)) { 647 msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf); 648 nbuf = dp_rx_sg_create(soc, nbuf); 649 next = nbuf->next; 650 651 if (qdf_nbuf_is_raw_frame(nbuf)) { 652 DP_STATS_INC(vdev->pdev, rx_raw_pkts, 1); 653 DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, 654 rx.raw, 1, 655 msdu_len); 656 } else { 657 dp_rx_nbuf_free(nbuf); 658 DP_STATS_INC(soc, rx.err.scatter_msdu, 1); 659 dp_info_rl("scatter msdu len %d, dropped", 660 msdu_len); 661 nbuf = next; 662 continue; 663 } 664 } else { 665 msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf); 666 pkt_len = msdu_len + 667 msdu_metadata.l3_hdr_pad + 668 soc->rx_pkt_tlv_size; 669 670 qdf_nbuf_set_pktlen(nbuf, pkt_len); 671 dp_rx_skip_tlvs(soc, nbuf, msdu_metadata.l3_hdr_pad); 672 } 673 674 /* 675 * process frame for mulitpass phrase processing 676 */ 677 if (qdf_unlikely(vdev->multipass_en)) { 678 if (dp_rx_multipass_process(txrx_peer, nbuf, 679 tid) == false) { 680 DP_PEER_PER_PKT_STATS_INC(txrx_peer, 681 rx.multipass_rx_pkt_drop, 682 1); 683 dp_rx_nbuf_free(nbuf); 684 nbuf = next; 685 continue; 686 } 687 } 688 689 if (!dp_wds_rx_policy_check(rx_tlv_hdr, vdev, txrx_peer)) { 690 dp_rx_err("%pK: Policy Check Drop pkt", soc); 691 DP_PEER_PER_PKT_STATS_INC(txrx_peer, 692 rx.policy_check_drop, 1); 693 tid_stats->fail_cnt[POLICY_CHECK_DROP]++; 694 /* Drop & free packet */ 695 dp_rx_nbuf_free(nbuf); 696 /* Statistics */ 697 nbuf = next; 698 continue; 699 } 700 701 if (qdf_unlikely(txrx_peer && (txrx_peer->nawds_enabled) && 702 (qdf_nbuf_is_da_mcbc(nbuf)) && 703 (hal_rx_get_mpdu_mac_ad4_valid_be(rx_tlv_hdr) 704 == false))) { 705 tid_stats->fail_cnt[NAWDS_MCAST_DROP]++; 706 DP_PEER_PER_PKT_STATS_INC(txrx_peer, 707 rx.nawds_mcast_drop, 1); 708 dp_rx_nbuf_free(nbuf); 709 nbuf = next; 710 continue; 711 } 712 713 /* 714 * Drop non-EAPOL frames from unauthorized peer. 715 */ 716 if (qdf_likely(txrx_peer) && 717 qdf_unlikely(!txrx_peer->authorize) && 718 !qdf_nbuf_is_raw_frame(nbuf)) { 719 bool is_eapol = qdf_nbuf_is_ipv4_eapol_pkt(nbuf) || 720 qdf_nbuf_is_ipv4_wapi_pkt(nbuf); 721 722 if (!is_eapol) { 723 DP_PEER_PER_PKT_STATS_INC(txrx_peer, 724 rx.peer_unauth_rx_pkt_drop, 725 1); 726 dp_rx_nbuf_free(nbuf); 727 nbuf = next; 728 continue; 729 } 730 } 731 732 if (soc->process_rx_status) 733 dp_rx_cksum_offload(vdev->pdev, nbuf, rx_tlv_hdr); 734 735 /* Update the protocol tag in SKB based on CCE metadata */ 736 dp_rx_update_protocol_tag(soc, vdev, nbuf, rx_tlv_hdr, 737 reo_ring_num, false, true); 738 739 /* Update the flow tag in SKB based on FSE metadata */ 740 dp_rx_update_flow_tag(soc, vdev, nbuf, rx_tlv_hdr, true); 741 742 dp_rx_msdu_stats_update(soc, nbuf, rx_tlv_hdr, txrx_peer, 743 reo_ring_num, tid_stats); 744 745 if (qdf_unlikely(vdev->mesh_vdev)) { 746 if (dp_rx_filter_mesh_packets(vdev, nbuf, rx_tlv_hdr) 747 == QDF_STATUS_SUCCESS) { 748 dp_rx_info("%pK: mesh pkt filtered", soc); 749 tid_stats->fail_cnt[MESH_FILTER_DROP]++; 750 DP_STATS_INC(vdev->pdev, dropped.mesh_filter, 751 1); 752 753 dp_rx_nbuf_free(nbuf); 754 nbuf = next; 755 continue; 756 } 757 dp_rx_fill_mesh_stats(vdev, nbuf, rx_tlv_hdr, 758 txrx_peer); 759 } 760 761 if (qdf_likely(vdev->rx_decap_type == 762 htt_cmn_pkt_type_ethernet) && 763 qdf_likely(!vdev->mesh_vdev)) { 764 dp_rx_wds_learn(soc, vdev, 765 rx_tlv_hdr, 766 txrx_peer, 767 nbuf, 768 msdu_metadata); 769 770 /* Intrabss-fwd */ 771 if (dp_rx_check_ap_bridge(vdev)) 772 if (dp_rx_intrabss_fwd_be(soc, txrx_peer, 773 rx_tlv_hdr, 774 nbuf, 775 msdu_metadata)) { 776 nbuf = next; 777 tid_stats->intrabss_cnt++; 778 continue; /* Get next desc */ 779 } 780 } 781 782 dp_rx_fill_gro_info(soc, rx_tlv_hdr, nbuf, &rx_ol_pkt_cnt); 783 784 dp_rx_mark_first_packet_after_wow_wakeup(vdev->pdev, rx_tlv_hdr, 785 nbuf); 786 787 dp_rx_update_stats(soc, nbuf); 788 DP_RX_LIST_APPEND(deliver_list_head, 789 deliver_list_tail, 790 nbuf); 791 792 DP_PEER_TO_STACK_INCC_PKT(txrx_peer, 1, 793 QDF_NBUF_CB_RX_PKT_LEN(nbuf), 794 enh_flag); 795 if (qdf_unlikely(txrx_peer->in_twt)) 796 DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, 797 rx.to_stack_twt, 1, 798 QDF_NBUF_CB_RX_PKT_LEN(nbuf)); 799 800 tid_stats->delivered_to_stack++; 801 nbuf = next; 802 } 803 804 if (qdf_likely(deliver_list_head)) { 805 if (qdf_likely(txrx_peer)) { 806 dp_rx_deliver_to_pkt_capture(soc, vdev->pdev, peer_id, 807 pkt_capture_offload, 808 deliver_list_head); 809 if (!pkt_capture_offload) 810 dp_rx_deliver_to_stack(soc, vdev, txrx_peer, 811 deliver_list_head, 812 deliver_list_tail); 813 } else { 814 nbuf = deliver_list_head; 815 while (nbuf) { 816 next = nbuf->next; 817 nbuf->next = NULL; 818 dp_rx_deliver_to_stack_no_peer(soc, nbuf); 819 nbuf = next; 820 } 821 } 822 } 823 824 if (qdf_likely(txrx_peer)) 825 dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_RX); 826 827 /* 828 * If we are processing in near-full condition, there are 3 scenario 829 * 1) Ring entries has reached critical state 830 * 2) Ring entries are still near high threshold 831 * 3) Ring entries are below the safe level 832 * 833 * One more loop will move the state to normal processing and yield 834 */ 835 if (ring_near_full && quota) 836 goto more_data; 837 838 if (dp_rx_enable_eol_data_check(soc) && rx_bufs_used) { 839 if (quota) { 840 num_pending = 841 dp_rx_srng_get_num_pending(hal_soc, 842 hal_ring_hdl, 843 num_entries, 844 &near_full); 845 if (num_pending) { 846 DP_STATS_INC(soc, rx.hp_oos2, 1); 847 848 if (!hif_exec_should_yield(scn, intr_id)) 849 goto more_data; 850 851 if (qdf_unlikely(near_full)) { 852 DP_STATS_INC(soc, rx.near_full, 1); 853 goto more_data; 854 } 855 } 856 } 857 858 if (vdev && vdev->osif_fisa_flush) 859 vdev->osif_fisa_flush(soc, reo_ring_num); 860 861 if (vdev && vdev->osif_gro_flush && rx_ol_pkt_cnt) { 862 vdev->osif_gro_flush(vdev->osif_vdev, 863 reo_ring_num); 864 } 865 } 866 867 /* Update histogram statistics by looping through pdev's */ 868 DP_RX_HIST_STATS_PER_PDEV(); 869 870 return rx_bufs_used; /* Assume no scale factor for now */ 871 } 872 873 #ifdef RX_DESC_MULTI_PAGE_ALLOC 874 /** 875 * dp_rx_desc_pool_init_be_cc() - initial RX desc pool for cookie conversion 876 * @soc: Handle to DP Soc structure 877 * @rx_desc_pool: Rx descriptor pool handler 878 * @pool_id: Rx descriptor pool ID 879 * 880 * Return: QDF_STATUS_SUCCESS - succeeded, others - failed 881 */ 882 static QDF_STATUS 883 dp_rx_desc_pool_init_be_cc(struct dp_soc *soc, 884 struct rx_desc_pool *rx_desc_pool, 885 uint32_t pool_id) 886 { 887 struct dp_hw_cookie_conversion_t *cc_ctx; 888 struct dp_soc_be *be_soc; 889 union dp_rx_desc_list_elem_t *rx_desc_elem; 890 struct dp_spt_page_desc *page_desc; 891 uint32_t ppt_idx = 0; 892 uint32_t avail_entry_index = 0; 893 894 if (!rx_desc_pool->pool_size) { 895 dp_err("desc_num 0 !!"); 896 return QDF_STATUS_E_FAILURE; 897 } 898 899 be_soc = dp_get_be_soc_from_dp_soc(soc); 900 cc_ctx = &be_soc->rx_cc_ctx[pool_id]; 901 902 page_desc = &cc_ctx->page_desc_base[0]; 903 rx_desc_elem = rx_desc_pool->freelist; 904 while (rx_desc_elem) { 905 if (avail_entry_index == 0) { 906 if (ppt_idx >= cc_ctx->total_page_num) { 907 dp_alert("insufficient secondary page tables"); 908 qdf_assert_always(0); 909 } 910 page_desc = &cc_ctx->page_desc_base[ppt_idx++]; 911 } 912 913 /* put each RX Desc VA to SPT pages and 914 * get corresponding ID 915 */ 916 DP_CC_SPT_PAGE_UPDATE_VA(page_desc->page_v_addr, 917 avail_entry_index, 918 &rx_desc_elem->rx_desc); 919 rx_desc_elem->rx_desc.cookie = 920 dp_cc_desc_id_generate(page_desc->ppt_index, 921 avail_entry_index); 922 rx_desc_elem->rx_desc.pool_id = pool_id; 923 rx_desc_elem->rx_desc.in_use = 0; 924 rx_desc_elem = rx_desc_elem->next; 925 926 avail_entry_index = (avail_entry_index + 1) & 927 DP_CC_SPT_PAGE_MAX_ENTRIES_MASK; 928 } 929 930 return QDF_STATUS_SUCCESS; 931 } 932 #else 933 static QDF_STATUS 934 dp_rx_desc_pool_init_be_cc(struct dp_soc *soc, 935 struct rx_desc_pool *rx_desc_pool, 936 uint32_t pool_id) 937 { 938 struct dp_hw_cookie_conversion_t *cc_ctx; 939 struct dp_soc_be *be_soc; 940 struct dp_spt_page_desc *page_desc; 941 uint32_t ppt_idx = 0; 942 uint32_t avail_entry_index = 0; 943 int i = 0; 944 945 if (!rx_desc_pool->pool_size) { 946 dp_err("desc_num 0 !!"); 947 return QDF_STATUS_E_FAILURE; 948 } 949 950 be_soc = dp_get_be_soc_from_dp_soc(soc); 951 cc_ctx = &be_soc->rx_cc_ctx[pool_id]; 952 953 page_desc = &cc_ctx->page_desc_base[0]; 954 for (i = 0; i <= rx_desc_pool->pool_size - 1; i++) { 955 if (i == rx_desc_pool->pool_size - 1) 956 rx_desc_pool->array[i].next = NULL; 957 else 958 rx_desc_pool->array[i].next = 959 &rx_desc_pool->array[i + 1]; 960 961 if (avail_entry_index == 0) { 962 if (ppt_idx >= cc_ctx->total_page_num) { 963 dp_alert("insufficient secondary page tables"); 964 qdf_assert_always(0); 965 } 966 page_desc = &cc_ctx->page_desc_base[ppt_idx++]; 967 } 968 969 /* put each RX Desc VA to SPT pages and 970 * get corresponding ID 971 */ 972 DP_CC_SPT_PAGE_UPDATE_VA(page_desc->page_v_addr, 973 avail_entry_index, 974 &rx_desc_pool->array[i].rx_desc); 975 rx_desc_pool->array[i].rx_desc.cookie = 976 dp_cc_desc_id_generate(page_desc->ppt_index, 977 avail_entry_index); 978 rx_desc_pool->array[i].rx_desc.pool_id = pool_id; 979 rx_desc_pool->array[i].rx_desc.in_use = 0; 980 981 avail_entry_index = (avail_entry_index + 1) & 982 DP_CC_SPT_PAGE_MAX_ENTRIES_MASK; 983 } 984 return QDF_STATUS_SUCCESS; 985 } 986 #endif 987 988 static void 989 dp_rx_desc_pool_deinit_be_cc(struct dp_soc *soc, 990 struct rx_desc_pool *rx_desc_pool, 991 uint32_t pool_id) 992 { 993 struct dp_spt_page_desc *page_desc; 994 struct dp_soc_be *be_soc; 995 int i = 0; 996 struct dp_hw_cookie_conversion_t *cc_ctx; 997 998 be_soc = dp_get_be_soc_from_dp_soc(soc); 999 cc_ctx = &be_soc->rx_cc_ctx[pool_id]; 1000 1001 for (i = 0; i < cc_ctx->total_page_num; i++) { 1002 page_desc = &cc_ctx->page_desc_base[i]; 1003 qdf_mem_zero(page_desc->page_v_addr, qdf_page_size); 1004 } 1005 } 1006 1007 QDF_STATUS dp_rx_desc_pool_init_be(struct dp_soc *soc, 1008 struct rx_desc_pool *rx_desc_pool, 1009 uint32_t pool_id) 1010 { 1011 QDF_STATUS status = QDF_STATUS_SUCCESS; 1012 1013 /* Only regular RX buffer desc pool use HW cookie conversion */ 1014 if (rx_desc_pool->desc_type == DP_RX_DESC_BUF_TYPE) { 1015 dp_info("rx_desc_buf pool init"); 1016 status = dp_rx_desc_pool_init_be_cc(soc, 1017 rx_desc_pool, 1018 pool_id); 1019 } else { 1020 dp_info("non_rx_desc_buf_pool init"); 1021 status = dp_rx_desc_pool_init_generic(soc, rx_desc_pool, 1022 pool_id); 1023 } 1024 1025 return status; 1026 } 1027 1028 void dp_rx_desc_pool_deinit_be(struct dp_soc *soc, 1029 struct rx_desc_pool *rx_desc_pool, 1030 uint32_t pool_id) 1031 { 1032 if (rx_desc_pool->desc_type == DP_RX_DESC_BUF_TYPE) 1033 dp_rx_desc_pool_deinit_be_cc(soc, rx_desc_pool, pool_id); 1034 } 1035 1036 #ifdef DP_FEATURE_HW_COOKIE_CONVERSION 1037 #ifdef DP_HW_COOKIE_CONVERT_EXCEPTION 1038 QDF_STATUS dp_wbm_get_rx_desc_from_hal_desc_be(struct dp_soc *soc, 1039 void *ring_desc, 1040 struct dp_rx_desc **r_rx_desc) 1041 { 1042 if (hal_rx_wbm_get_cookie_convert_done(ring_desc)) { 1043 /* HW cookie conversion done */ 1044 *r_rx_desc = (struct dp_rx_desc *) 1045 hal_rx_wbm_get_desc_va(ring_desc); 1046 } else { 1047 /* SW do cookie conversion */ 1048 uint32_t cookie = HAL_RX_BUF_COOKIE_GET(ring_desc); 1049 1050 *r_rx_desc = (struct dp_rx_desc *) 1051 dp_cc_desc_find(soc, cookie); 1052 } 1053 1054 return QDF_STATUS_SUCCESS; 1055 } 1056 #else 1057 QDF_STATUS dp_wbm_get_rx_desc_from_hal_desc_be(struct dp_soc *soc, 1058 void *ring_desc, 1059 struct dp_rx_desc **r_rx_desc) 1060 { 1061 *r_rx_desc = (struct dp_rx_desc *) 1062 hal_rx_wbm_get_desc_va(ring_desc); 1063 1064 return QDF_STATUS_SUCCESS; 1065 } 1066 #endif /* DP_HW_COOKIE_CONVERT_EXCEPTION */ 1067 #else 1068 QDF_STATUS dp_wbm_get_rx_desc_from_hal_desc_be(struct dp_soc *soc, 1069 void *ring_desc, 1070 struct dp_rx_desc **r_rx_desc) 1071 { 1072 /* SW do cookie conversion */ 1073 uint32_t cookie = HAL_RX_BUF_COOKIE_GET(ring_desc); 1074 1075 *r_rx_desc = (struct dp_rx_desc *) 1076 dp_cc_desc_find(soc, cookie); 1077 1078 return QDF_STATUS_SUCCESS; 1079 } 1080 #endif /* DP_FEATURE_HW_COOKIE_CONVERSION */ 1081 1082 struct dp_rx_desc *dp_rx_desc_cookie_2_va_be(struct dp_soc *soc, 1083 uint32_t cookie) 1084 { 1085 return (struct dp_rx_desc *)dp_cc_desc_find(soc, cookie); 1086 } 1087 1088 #if defined(WLAN_FEATURE_11BE_MLO) 1089 #if defined(WLAN_MLO_MULTI_CHIP) && defined(WLAN_MCAST_MLO) 1090 static inline void dp_rx_dummy_src_mac(qdf_nbuf_t nbuf) 1091 { 1092 qdf_ether_header_t *eh = 1093 (qdf_ether_header_t *)qdf_nbuf_data(nbuf); 1094 1095 eh->ether_shost[0] = 0x4d; /* M */ 1096 eh->ether_shost[1] = 0x4c; /* L */ 1097 eh->ether_shost[2] = 0x4d; /* M */ 1098 eh->ether_shost[3] = 0x43; /* C */ 1099 eh->ether_shost[4] = 0x41; /* A */ 1100 eh->ether_shost[5] = 0x53; /* S */ 1101 } 1102 1103 bool dp_rx_mlo_igmp_handler(struct dp_soc *soc, 1104 struct dp_vdev *vdev, 1105 struct dp_txrx_peer *peer, 1106 qdf_nbuf_t nbuf) 1107 { 1108 struct dp_vdev *mcast_primary_vdev = NULL; 1109 struct dp_vdev_be *be_vdev = dp_get_be_vdev_from_dp_vdev(vdev); 1110 struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc); 1111 1112 if (!(qdf_nbuf_is_ipv4_igmp_pkt(buf) || 1113 qdf_nbuf_is_ipv6_igmp_pkt(buf))) 1114 return false; 1115 1116 if (vdev->mcast_enhancement_en || be_vdev->mcast_primary) 1117 goto send_pkt; 1118 1119 mcast_primary_vdev = dp_mlo_get_mcast_primary_vdev(be_soc, be_vdev, 1120 DP_MOD_ID_RX); 1121 if (!mcast_primary_vdev) { 1122 dp_rx_debug("Non mlo vdev"); 1123 goto send_pkt; 1124 } 1125 dp_rx_dummy_src_mac(nbuf); 1126 dp_rx_deliver_to_stack(mcast_primary_vdev->pdev->soc, 1127 mcast_primary_vdev, 1128 peer, 1129 nbuf, 1130 NULL); 1131 dp_vdev_unref_delete(mcast_primary_vdev->pdev->soc, 1132 mcast_primary_vdev, 1133 DP_MOD_ID_RX); 1134 return true; 1135 send_pkt: 1136 dp_rx_deliver_to_stack(be_vdev->vdev.pdev->soc, 1137 &be_vdev->vdev, 1138 peer, 1139 nbuf, 1140 NULL); 1141 return true; 1142 } 1143 #else 1144 bool dp_rx_mlo_igmp_handler(struct dp_soc *soc, 1145 struct dp_vdev *vdev, 1146 struct dp_peer *peer, 1147 qdf_nbuf_t nbuf) 1148 { 1149 return false; 1150 } 1151 #endif 1152 #endif 1153 1154 #ifdef WLAN_FEATURE_NEAR_FULL_IRQ 1155 uint32_t dp_rx_nf_process(struct dp_intr *int_ctx, 1156 hal_ring_handle_t hal_ring_hdl, 1157 uint8_t reo_ring_num, 1158 uint32_t quota) 1159 { 1160 struct dp_soc *soc = int_ctx->soc; 1161 struct dp_srng *rx_ring = &soc->reo_dest_ring[reo_ring_num]; 1162 uint32_t work_done = 0; 1163 1164 if (dp_srng_get_near_full_level(soc, rx_ring) < 1165 DP_SRNG_THRESH_NEAR_FULL) 1166 return 0; 1167 1168 qdf_atomic_set(&rx_ring->near_full, 1); 1169 work_done++; 1170 1171 return work_done; 1172 } 1173 #endif 1174 1175 #ifndef QCA_HOST_MODE_WIFI_DISABLED 1176 #ifdef WLAN_FEATURE_11BE_MLO 1177 /** 1178 * dp_rx_intrabss_fwd_mlo_allow() - check if MLO forwarding is allowed 1179 * @ta_peer: transmitter peer handle 1180 * @da_peer: destination peer handle 1181 * 1182 * Return: true - MLO forwarding case, false: not 1183 */ 1184 static inline bool 1185 dp_rx_intrabss_fwd_mlo_allow(struct dp_txrx_peer *ta_peer, 1186 struct dp_txrx_peer *da_peer) 1187 { 1188 /* one of TA/DA peer should belong to MLO connection peer, 1189 * only MLD peer type is as expected 1190 */ 1191 if (!IS_MLO_DP_MLD_TXRX_PEER(ta_peer) && 1192 !IS_MLO_DP_MLD_TXRX_PEER(da_peer)) 1193 return false; 1194 1195 /* TA peer and DA peer's vdev should be partner MLO vdevs */ 1196 if (dp_peer_find_mac_addr_cmp(&ta_peer->vdev->mld_mac_addr, 1197 &da_peer->vdev->mld_mac_addr)) 1198 return false; 1199 1200 return true; 1201 } 1202 #else 1203 static inline bool 1204 dp_rx_intrabss_fwd_mlo_allow(struct dp_txrx_peer *ta_peer, 1205 struct dp_txrx_peer *da_peer) 1206 { 1207 return false; 1208 } 1209 #endif 1210 1211 #ifdef INTRA_BSS_FWD_OFFLOAD 1212 /** 1213 * dp_rx_intrabss_ucast_check_be() - Check if intrabss is allowed 1214 for unicast frame 1215 * @soc: SOC hanlde 1216 * @nbuf: RX packet buffer 1217 * @ta_peer: transmitter DP peer handle 1218 * @msdu_metadata: MSDU meta data info 1219 * @p_tx_vdev_id: get vdev id for Intra-BSS TX 1220 * 1221 * Return: true - intrabss allowed 1222 false - not allow 1223 */ 1224 static bool 1225 dp_rx_intrabss_ucast_check_be(qdf_nbuf_t nbuf, 1226 struct dp_txrx_peer *ta_peer, 1227 struct hal_rx_msdu_metadata *msdu_metadata, 1228 struct dp_be_intrabss_params *params) 1229 { 1230 uint16_t da_peer_id; 1231 struct dp_txrx_peer *da_peer; 1232 dp_txrx_ref_handle txrx_ref_handle = NULL; 1233 1234 if (!qdf_nbuf_is_intra_bss(nbuf)) 1235 return false; 1236 1237 da_peer_id = dp_rx_peer_metadata_peer_id_get_be( 1238 params->dest_soc, 1239 msdu_metadata->da_idx); 1240 da_peer = dp_txrx_peer_get_ref_by_id(params->dest_soc, da_peer_id, 1241 &txrx_ref_handle, DP_MOD_ID_RX); 1242 if (!da_peer) 1243 return false; 1244 params->tx_vdev_id = da_peer->vdev->vdev_id; 1245 dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_RX); 1246 1247 return true; 1248 } 1249 #else 1250 #ifdef WLAN_MLO_MULTI_CHIP 1251 static bool 1252 dp_rx_intrabss_ucast_check_be(qdf_nbuf_t nbuf, 1253 struct dp_txrx_peer *ta_peer, 1254 struct hal_rx_msdu_metadata *msdu_metadata, 1255 struct dp_be_intrabss_params *params) 1256 { 1257 uint16_t da_peer_id; 1258 struct dp_txrx_peer *da_peer; 1259 bool ret = false; 1260 uint8_t dest_chip_id; 1261 uint8_t soc_idx; 1262 dp_txrx_ref_handle txrx_ref_handle = NULL; 1263 struct dp_vdev_be *be_vdev = 1264 dp_get_be_vdev_from_dp_vdev(ta_peer->vdev); 1265 struct dp_soc_be *be_soc = 1266 dp_get_be_soc_from_dp_soc(params->dest_soc); 1267 1268 if (!(qdf_nbuf_is_da_valid(nbuf) || qdf_nbuf_is_da_mcbc(nbuf))) 1269 return false; 1270 1271 dest_chip_id = HAL_RX_DEST_CHIP_ID_GET(msdu_metadata); 1272 qdf_assert_always(dest_chip_id <= (DP_MLO_MAX_DEST_CHIP_ID - 1)); 1273 1274 if (be_soc->mlo_enabled) { 1275 /* validate chip_id, get a ref, and re-assign soc */ 1276 params->dest_soc = 1277 dp_mlo_get_soc_ref_by_chip_id(be_soc->ml_ctxt, 1278 dest_chip_id); 1279 if (!params->dest_soc) 1280 return false; 1281 } 1282 1283 da_peer_id = dp_rx_peer_metadata_peer_id_get_be(params->dest_soc, 1284 msdu_metadata->da_idx); 1285 da_peer = dp_txrx_peer_get_ref_by_id(params->dest_soc, da_peer_id, 1286 &txrx_ref_handle, DP_MOD_ID_RX); 1287 if (!da_peer) 1288 return false; 1289 /* soc unref if needed */ 1290 1291 params->tx_vdev_id = da_peer->vdev->vdev_id; 1292 1293 /* If the source or destination peer in the isolation 1294 * list then dont forward instead push to bridge stack. 1295 */ 1296 if (dp_get_peer_isolation(ta_peer) || 1297 dp_get_peer_isolation(da_peer)) 1298 goto rel_da_peer; 1299 1300 if (da_peer->bss_peer || da_peer == ta_peer) 1301 goto rel_da_peer; 1302 1303 /* Same vdev, support Inra-BSS */ 1304 if (da_peer->vdev == ta_peer->vdev) { 1305 ret = true; 1306 goto rel_da_peer; 1307 } 1308 1309 /* MLO specific Intra-BSS check */ 1310 if (dp_rx_intrabss_fwd_mlo_allow(ta_peer, da_peer)) { 1311 /* index of soc in the array */ 1312 soc_idx = dest_chip_id << DP_MLO_DEST_CHIP_ID_SHIFT; 1313 if (!(be_vdev->partner_vdev_list[soc_idx][0] == 1314 params->tx_vdev_id) && 1315 !(be_vdev->partner_vdev_list[soc_idx][1] == 1316 params->tx_vdev_id)) { 1317 /*dp_soc_unref_delete(soc);*/ 1318 goto rel_da_peer; 1319 } 1320 ret = true; 1321 } 1322 1323 rel_da_peer: 1324 dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_RX); 1325 return ret; 1326 } 1327 #else 1328 static bool 1329 dp_rx_intrabss_ucast_check_be(qdf_nbuf_t nbuf, 1330 struct dp_txrx_peer *ta_peer, 1331 struct hal_rx_msdu_metadata *msdu_metadata, 1332 struct dp_be_intrabss_params *params) 1333 { 1334 uint16_t da_peer_id; 1335 struct dp_txrx_peer *da_peer; 1336 bool ret = false; 1337 dp_txrx_ref_handle txrx_ref_handle = NULL; 1338 1339 if (!qdf_nbuf_is_da_valid(nbuf) || qdf_nbuf_is_da_mcbc(nbuf)) 1340 return false; 1341 1342 da_peer_id = dp_rx_peer_metadata_peer_id_get_be( 1343 params->dest_soc, 1344 msdu_metadata->da_idx); 1345 1346 da_peer = dp_txrx_peer_get_ref_by_id(params->dest_soc, da_peer_id, 1347 &txrx_ref_handle, DP_MOD_ID_RX); 1348 if (!da_peer) 1349 return false; 1350 1351 params->tx_vdev_id = da_peer->vdev->vdev_id; 1352 /* If the source or destination peer in the isolation 1353 * list then dont forward instead push to bridge stack. 1354 */ 1355 if (dp_get_peer_isolation(ta_peer) || 1356 dp_get_peer_isolation(da_peer)) 1357 goto rel_da_peer; 1358 1359 if (da_peer->bss_peer || da_peer == ta_peer) 1360 goto rel_da_peer; 1361 1362 /* Same vdev, support Inra-BSS */ 1363 if (da_peer->vdev == ta_peer->vdev) { 1364 ret = true; 1365 goto rel_da_peer; 1366 } 1367 1368 /* MLO specific Intra-BSS check */ 1369 if (dp_rx_intrabss_fwd_mlo_allow(ta_peer, da_peer)) { 1370 ret = true; 1371 goto rel_da_peer; 1372 } 1373 1374 rel_da_peer: 1375 dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_RX); 1376 return ret; 1377 } 1378 #endif /* WLAN_MLO_MULTI_CHIP */ 1379 #endif /* INTRA_BSS_FWD_OFFLOAD */ 1380 1381 /* 1382 * dp_rx_intrabss_handle_nawds_be() - Forward mcbc intrabss pkts in nawds case 1383 * @soc: core txrx main context 1384 * @ta_txrx_peer: source txrx_peer entry 1385 * @nbuf_copy: nbuf that has to be intrabss forwarded 1386 * @tid_stats: tid_stats structure 1387 * 1388 * Return: true if it is forwarded else false 1389 */ 1390 bool 1391 dp_rx_intrabss_handle_nawds_be(struct dp_soc *soc, 1392 struct dp_txrx_peer *ta_txrx_peer, 1393 qdf_nbuf_t nbuf_copy, 1394 struct cdp_tid_rx_stats *tid_stats) 1395 { 1396 if (qdf_unlikely(ta_txrx_peer->vdev->nawds_enabled)) { 1397 struct cdp_tx_exception_metadata tx_exc_metadata = {0}; 1398 uint16_t len = QDF_NBUF_CB_RX_PKT_LEN(nbuf_copy); 1399 1400 tx_exc_metadata.peer_id = ta_txrx_peer->peer_id; 1401 tx_exc_metadata.is_intrabss_fwd = 1; 1402 tx_exc_metadata.tid = HTT_TX_EXT_TID_INVALID; 1403 if (dp_tx_send_exception((struct cdp_soc_t *)soc, 1404 ta_txrx_peer->vdev->vdev_id, 1405 nbuf_copy, 1406 &tx_exc_metadata)) { 1407 DP_PEER_PER_PKT_STATS_INC_PKT(ta_txrx_peer, 1408 rx.intra_bss.fail, 1, 1409 len); 1410 tid_stats->fail_cnt[INTRABSS_DROP]++; 1411 qdf_nbuf_free(nbuf_copy); 1412 } else { 1413 DP_PEER_PER_PKT_STATS_INC_PKT(ta_txrx_peer, 1414 rx.intra_bss.pkts, 1, 1415 len); 1416 tid_stats->intrabss_cnt++; 1417 } 1418 return true; 1419 } 1420 return false; 1421 } 1422 1423 /* 1424 * dp_rx_intrabss_fwd_be() - API for intrabss fwd. For EAPOL 1425 * pkt with DA not equal to vdev mac addr, fwd is not allowed. 1426 * @soc: core txrx main context 1427 * @ta_peer: source peer entry 1428 * @rx_tlv_hdr: start address of rx tlvs 1429 * @nbuf: nbuf that has to be intrabss forwarded 1430 * @msdu_metadata: msdu metadata 1431 * 1432 * Return: true if it is forwarded else false 1433 */ 1434 bool dp_rx_intrabss_fwd_be(struct dp_soc *soc, struct dp_txrx_peer *ta_peer, 1435 uint8_t *rx_tlv_hdr, qdf_nbuf_t nbuf, 1436 struct hal_rx_msdu_metadata msdu_metadata) 1437 { 1438 uint8_t tid = qdf_nbuf_get_tid_val(nbuf); 1439 uint8_t ring_id = QDF_NBUF_CB_RX_CTX_ID(nbuf); 1440 struct cdp_tid_rx_stats *tid_stats = &ta_peer->vdev->pdev->stats. 1441 tid_stats.tid_rx_stats[ring_id][tid]; 1442 bool ret = false; 1443 struct dp_be_intrabss_params params; 1444 1445 /* if it is a broadcast pkt (eg: ARP) and it is not its own 1446 * source, then clone the pkt and send the cloned pkt for 1447 * intra BSS forwarding and original pkt up the network stack 1448 * Note: how do we handle multicast pkts. do we forward 1449 * all multicast pkts as is or let a higher layer module 1450 * like igmpsnoop decide whether to forward or not with 1451 * Mcast enhancement. 1452 */ 1453 if (qdf_nbuf_is_da_mcbc(nbuf) && !ta_peer->bss_peer) { 1454 return dp_rx_intrabss_mcbc_fwd(soc, ta_peer, rx_tlv_hdr, 1455 nbuf, tid_stats); 1456 } 1457 1458 if (dp_rx_intrabss_eapol_drop_check(soc, ta_peer, rx_tlv_hdr, 1459 nbuf)) 1460 return true; 1461 1462 params.dest_soc = soc; 1463 if (dp_rx_intrabss_ucast_check_be(nbuf, ta_peer, 1464 &msdu_metadata, ¶ms)) { 1465 ret = dp_rx_intrabss_ucast_fwd(params.dest_soc, ta_peer, 1466 params.tx_vdev_id, 1467 rx_tlv_hdr, nbuf, tid_stats); 1468 } 1469 1470 return ret; 1471 } 1472 #endif 1473