1 /* 2 * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved. 3 * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. 4 * 5 * Permission to use, copy, modify, and/or distribute this software for 6 * any purpose with or without fee is hereby granted, provided that the 7 * above copyright notice and this permission notice appear in all 8 * copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 11 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 12 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 13 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 14 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 15 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 16 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 17 * PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20 #include "cdp_txrx_cmn_struct.h" 21 #include "hal_hw_headers.h" 22 #include "dp_types.h" 23 #include "dp_rx.h" 24 #include "dp_tx.h" 25 #include "dp_be_rx.h" 26 #include "dp_peer.h" 27 #include "hal_rx.h" 28 #include "hal_be_rx.h" 29 #include "hal_api.h" 30 #include "hal_be_api.h" 31 #include "qdf_nbuf.h" 32 #include "hal_be_rx_tlv.h" 33 #ifdef MESH_MODE_SUPPORT 34 #include "if_meta_hdr.h" 35 #endif 36 #include "dp_internal.h" 37 #include "dp_ipa.h" 38 #ifdef FEATURE_WDS 39 #include "dp_txrx_wds.h" 40 #endif 41 #include "dp_hist.h" 42 #include "dp_rx_buffer_pool.h" 43 44 #ifndef AST_OFFLOAD_ENABLE 45 static void 46 dp_rx_wds_learn(struct dp_soc *soc, 47 struct dp_vdev *vdev, 48 uint8_t *rx_tlv_hdr, 49 struct dp_txrx_peer *txrx_peer, 50 qdf_nbuf_t nbuf, 51 struct hal_rx_msdu_metadata msdu_metadata) 52 { 53 /* WDS Source Port Learning */ 54 if (qdf_likely(vdev->wds_enabled)) 55 dp_rx_wds_srcport_learn(soc, 56 rx_tlv_hdr, 57 txrx_peer, 58 nbuf, 59 msdu_metadata); 60 } 61 #else 62 #ifdef QCA_SUPPORT_WDS_EXTENDED 63 /** 64 * dp_wds_ext_peer_learn_be() - function to send event to control 65 * path on receiving 1st 4-address frame from backhaul. 66 * @soc: DP soc 67 * @ta_txrx_peer: WDS repeater txrx peer 68 * @rx_tlv_hdr : start address of rx tlvs 69 * @nbuf: RX packet buffer 70 * 71 * Return: void 72 */ 73 static inline void dp_wds_ext_peer_learn_be(struct dp_soc *soc, 74 struct dp_txrx_peer *ta_txrx_peer, 75 uint8_t *rx_tlv_hdr, 76 qdf_nbuf_t nbuf) 77 { 78 uint8_t wds_ext_src_mac[QDF_MAC_ADDR_SIZE]; 79 struct dp_peer *ta_base_peer; 80 81 /* instead of checking addr4 is valid or not in per packet path 82 * check for init bit, which will be set on reception of 83 * first addr4 valid packet. 84 */ 85 if (!ta_txrx_peer->vdev->wds_ext_enabled || 86 qdf_atomic_test_bit(WDS_EXT_PEER_INIT_BIT, 87 &ta_txrx_peer->wds_ext.init)) 88 return; 89 90 if (qdf_nbuf_is_rx_chfrag_start(nbuf) && 91 hal_rx_get_mpdu_mac_ad4_valid_be(rx_tlv_hdr)) { 92 qdf_atomic_test_and_set_bit(WDS_EXT_PEER_INIT_BIT, 93 &ta_txrx_peer->wds_ext.init); 94 95 ta_base_peer = dp_peer_get_ref_by_id(soc, ta_txrx_peer->peer_id, 96 DP_MOD_ID_RX); 97 98 if (!ta_base_peer) 99 return; 100 101 qdf_mem_copy(wds_ext_src_mac, &ta_base_peer->mac_addr.raw[0], 102 QDF_MAC_ADDR_SIZE); 103 dp_peer_unref_delete(ta_base_peer, DP_MOD_ID_RX); 104 105 soc->cdp_soc.ol_ops->rx_wds_ext_peer_learn( 106 soc->ctrl_psoc, 107 ta_txrx_peer->peer_id, 108 ta_txrx_peer->vdev->vdev_id, 109 wds_ext_src_mac); 110 } 111 } 112 #else 113 static inline void dp_wds_ext_peer_learn_be(struct dp_soc *soc, 114 struct dp_txrx_peer *ta_txrx_peer, 115 uint8_t *rx_tlv_hdr, 116 qdf_nbuf_t nbuf) 117 { 118 } 119 #endif 120 static void 121 dp_rx_wds_learn(struct dp_soc *soc, 122 struct dp_vdev *vdev, 123 uint8_t *rx_tlv_hdr, 124 struct dp_txrx_peer *ta_txrx_peer, 125 qdf_nbuf_t nbuf, 126 struct hal_rx_msdu_metadata msdu_metadata) 127 { 128 dp_wds_ext_peer_learn_be(soc, ta_txrx_peer, rx_tlv_hdr, nbuf); 129 } 130 #endif 131 132 #if defined(DP_PKT_STATS_PER_LMAC) && defined(WLAN_FEATURE_11BE_MLO) 133 static inline void 134 dp_rx_set_msdu_lmac_id(qdf_nbuf_t nbuf, uint32_t peer_mdata) 135 { 136 uint8_t lmac_id; 137 138 lmac_id = dp_rx_peer_metadata_lmac_id_get_be(peer_mdata); 139 qdf_nbuf_set_lmac_id(nbuf, lmac_id); 140 } 141 #else 142 static inline void 143 dp_rx_set_msdu_lmac_id(qdf_nbuf_t nbuf, uint32_t peer_mdata) 144 { 145 } 146 #endif 147 148 /** 149 * dp_rx_process_be() - Brain of the Rx processing functionality 150 * Called from the bottom half (tasklet/NET_RX_SOFTIRQ) 151 * @int_ctx: per interrupt context 152 * @hal_ring: opaque pointer to the HAL Rx Ring, which will be serviced 153 * @reo_ring_num: ring number (0, 1, 2 or 3) of the reo ring. 154 * @quota: No. of units (packets) that can be serviced in one shot. 155 * 156 * This function implements the core of Rx functionality. This is 157 * expected to handle only non-error frames. 158 * 159 * Return: uint32_t: No. of elements processed 160 */ 161 uint32_t dp_rx_process_be(struct dp_intr *int_ctx, 162 hal_ring_handle_t hal_ring_hdl, uint8_t reo_ring_num, 163 uint32_t quota) 164 { 165 hal_ring_desc_t ring_desc; 166 hal_soc_handle_t hal_soc; 167 struct dp_rx_desc *rx_desc = NULL; 168 qdf_nbuf_t nbuf, next; 169 bool near_full; 170 union dp_rx_desc_list_elem_t *head[WLAN_MAX_MLO_CHIPS][MAX_PDEV_CNT]; 171 union dp_rx_desc_list_elem_t *tail[WLAN_MAX_MLO_CHIPS][MAX_PDEV_CNT]; 172 uint32_t num_pending; 173 uint32_t rx_bufs_used = 0, rx_buf_cookie; 174 uint16_t msdu_len = 0; 175 uint16_t peer_id; 176 uint8_t vdev_id; 177 struct dp_txrx_peer *txrx_peer; 178 dp_txrx_ref_handle txrx_ref_handle = NULL; 179 struct dp_vdev *vdev; 180 uint32_t pkt_len = 0; 181 struct hal_rx_mpdu_desc_info mpdu_desc_info; 182 struct hal_rx_msdu_desc_info msdu_desc_info; 183 enum hal_reo_error_status error; 184 uint32_t peer_mdata; 185 uint8_t *rx_tlv_hdr; 186 uint32_t rx_bufs_reaped[WLAN_MAX_MLO_CHIPS][MAX_PDEV_CNT]; 187 uint8_t mac_id = 0; 188 struct dp_pdev *rx_pdev; 189 bool enh_flag; 190 struct dp_srng *dp_rxdma_srng; 191 struct rx_desc_pool *rx_desc_pool; 192 struct dp_soc *soc = int_ctx->soc; 193 uint8_t core_id = 0; 194 struct cdp_tid_rx_stats *tid_stats; 195 qdf_nbuf_t nbuf_head; 196 qdf_nbuf_t nbuf_tail; 197 qdf_nbuf_t deliver_list_head; 198 qdf_nbuf_t deliver_list_tail; 199 uint32_t num_rx_bufs_reaped = 0; 200 uint32_t intr_id; 201 struct hif_opaque_softc *scn; 202 int32_t tid = 0; 203 bool is_prev_msdu_last = true; 204 uint32_t num_entries_avail = 0; 205 uint32_t rx_ol_pkt_cnt = 0; 206 uint32_t num_entries = 0; 207 struct hal_rx_msdu_metadata msdu_metadata; 208 QDF_STATUS status; 209 qdf_nbuf_t ebuf_head; 210 qdf_nbuf_t ebuf_tail; 211 uint8_t pkt_capture_offload = 0; 212 struct dp_srng *rx_ring = &soc->reo_dest_ring[reo_ring_num]; 213 int max_reap_limit, ring_near_full; 214 struct dp_soc *replenish_soc; 215 uint8_t chip_id; 216 uint64_t current_time = 0; 217 218 DP_HIST_INIT(); 219 220 qdf_assert_always(soc && hal_ring_hdl); 221 hal_soc = soc->hal_soc; 222 qdf_assert_always(hal_soc); 223 224 scn = soc->hif_handle; 225 intr_id = int_ctx->dp_intr_id; 226 num_entries = hal_srng_get_num_entries(hal_soc, hal_ring_hdl); 227 dp_runtime_pm_mark_last_busy(soc); 228 229 more_data: 230 /* reset local variables here to be re-used in the function */ 231 nbuf_head = NULL; 232 nbuf_tail = NULL; 233 deliver_list_head = NULL; 234 deliver_list_tail = NULL; 235 txrx_peer = NULL; 236 vdev = NULL; 237 num_rx_bufs_reaped = 0; 238 ebuf_head = NULL; 239 ebuf_tail = NULL; 240 ring_near_full = 0; 241 max_reap_limit = dp_rx_get_loop_pkt_limit(soc); 242 243 qdf_mem_zero(rx_bufs_reaped, sizeof(rx_bufs_reaped)); 244 qdf_mem_zero(&mpdu_desc_info, sizeof(mpdu_desc_info)); 245 qdf_mem_zero(&msdu_desc_info, sizeof(msdu_desc_info)); 246 qdf_mem_zero(head, sizeof(head)); 247 qdf_mem_zero(tail, sizeof(tail)); 248 249 dp_pkt_get_timestamp(¤t_time); 250 251 ring_near_full = _dp_srng_test_and_update_nf_params(soc, rx_ring, 252 &max_reap_limit); 253 254 if (qdf_unlikely(dp_rx_srng_access_start(int_ctx, soc, hal_ring_hdl))) { 255 /* 256 * Need API to convert from hal_ring pointer to 257 * Ring Type / Ring Id combo 258 */ 259 DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1); 260 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 261 FL("HAL RING Access Failed -- %pK"), hal_ring_hdl); 262 goto done; 263 } 264 265 hal_srng_update_ring_usage_wm_no_lock(soc->hal_soc, hal_ring_hdl); 266 267 /* 268 * start reaping the buffers from reo ring and queue 269 * them in per vdev queue. 270 * Process the received pkts in a different per vdev loop. 271 */ 272 while (qdf_likely(quota && 273 (ring_desc = hal_srng_dst_peek(hal_soc, 274 hal_ring_hdl)))) { 275 error = HAL_RX_ERROR_STATUS_GET(ring_desc); 276 277 if (qdf_unlikely(error == HAL_REO_ERROR_DETECTED)) { 278 dp_rx_err("%pK: HAL RING 0x%pK:error %d", 279 soc, hal_ring_hdl, error); 280 DP_STATS_INC(soc, rx.err.hal_reo_error[reo_ring_num], 281 1); 282 /* Don't know how to deal with this -- assert */ 283 qdf_assert(0); 284 } 285 286 dp_rx_ring_record_entry(soc, reo_ring_num, ring_desc); 287 rx_buf_cookie = HAL_RX_REO_BUF_COOKIE_GET(ring_desc); 288 status = dp_rx_cookie_check_and_invalidate(ring_desc); 289 if (qdf_unlikely(QDF_IS_STATUS_ERROR(status))) { 290 DP_STATS_INC(soc, rx.err.stale_cookie, 1); 291 break; 292 } 293 294 rx_desc = (struct dp_rx_desc *) 295 hal_rx_get_reo_desc_va(ring_desc); 296 dp_rx_desc_sw_cc_check(soc, rx_buf_cookie, &rx_desc); 297 298 status = dp_rx_desc_sanity(soc, hal_soc, hal_ring_hdl, 299 ring_desc, rx_desc); 300 if (QDF_IS_STATUS_ERROR(status)) { 301 if (qdf_unlikely(rx_desc && rx_desc->nbuf)) { 302 qdf_assert_always(!rx_desc->unmapped); 303 dp_rx_nbuf_unmap(soc, rx_desc, reo_ring_num); 304 rx_desc->unmapped = 1; 305 dp_rx_buffer_pool_nbuf_free(soc, rx_desc->nbuf, 306 rx_desc->pool_id); 307 dp_rx_add_to_free_desc_list( 308 &head[rx_desc->chip_id][rx_desc->pool_id], 309 &tail[rx_desc->chip_id][rx_desc->pool_id], 310 rx_desc); 311 } 312 hal_srng_dst_get_next(hal_soc, hal_ring_hdl); 313 continue; 314 } 315 316 /* 317 * this is a unlikely scenario where the host is reaping 318 * a descriptor which it already reaped just a while ago 319 * but is yet to replenish it back to HW. 320 * In this case host will dump the last 128 descriptors 321 * including the software descriptor rx_desc and assert. 322 */ 323 324 if (qdf_unlikely(!rx_desc->in_use)) { 325 DP_STATS_INC(soc, rx.err.hal_reo_dest_dup, 1); 326 dp_info_rl("Reaping rx_desc not in use!"); 327 dp_rx_dump_info_and_assert(soc, hal_ring_hdl, 328 ring_desc, rx_desc); 329 /* ignore duplicate RX desc and continue to process */ 330 /* Pop out the descriptor */ 331 hal_srng_dst_get_next(hal_soc, hal_ring_hdl); 332 continue; 333 } 334 335 status = dp_rx_desc_nbuf_sanity_check(soc, ring_desc, rx_desc); 336 if (qdf_unlikely(QDF_IS_STATUS_ERROR(status))) { 337 DP_STATS_INC(soc, rx.err.nbuf_sanity_fail, 1); 338 dp_info_rl("Nbuf sanity check failure!"); 339 dp_rx_dump_info_and_assert(soc, hal_ring_hdl, 340 ring_desc, rx_desc); 341 rx_desc->in_err_state = 1; 342 hal_srng_dst_get_next(hal_soc, hal_ring_hdl); 343 continue; 344 } 345 346 if (qdf_unlikely(!dp_rx_desc_check_magic(rx_desc))) { 347 dp_err("Invalid rx_desc cookie=%d", rx_buf_cookie); 348 DP_STATS_INC(soc, rx.err.rx_desc_invalid_magic, 1); 349 dp_rx_dump_info_and_assert(soc, hal_ring_hdl, 350 ring_desc, rx_desc); 351 } 352 353 /* Get MPDU DESC info */ 354 hal_rx_mpdu_desc_info_get_be(ring_desc, &mpdu_desc_info); 355 356 /* Get MSDU DESC info */ 357 hal_rx_msdu_desc_info_get_be(ring_desc, &msdu_desc_info); 358 359 if (qdf_unlikely(msdu_desc_info.msdu_flags & 360 HAL_MSDU_F_MSDU_CONTINUATION)) { 361 /* previous msdu has end bit set, so current one is 362 * the new MPDU 363 */ 364 if (is_prev_msdu_last) { 365 /* Get number of entries available in HW ring */ 366 num_entries_avail = 367 hal_srng_dst_num_valid(hal_soc, 368 hal_ring_hdl, 1); 369 370 /* For new MPDU check if we can read complete 371 * MPDU by comparing the number of buffers 372 * available and number of buffers needed to 373 * reap this MPDU 374 */ 375 if ((msdu_desc_info.msdu_len / 376 (RX_DATA_BUFFER_SIZE - 377 soc->rx_pkt_tlv_size) + 1) > 378 num_entries_avail) { 379 DP_STATS_INC(soc, 380 rx.msdu_scatter_wait_break, 381 1); 382 dp_rx_cookie_reset_invalid_bit( 383 ring_desc); 384 break; 385 } 386 is_prev_msdu_last = false; 387 } 388 } 389 core_id = smp_processor_id(); 390 DP_STATS_INC(soc, rx.ring_packets[core_id][reo_ring_num], 1); 391 392 if (mpdu_desc_info.mpdu_flags & HAL_MPDU_F_RETRY_BIT) 393 qdf_nbuf_set_rx_retry_flag(rx_desc->nbuf, 1); 394 395 if (qdf_unlikely(mpdu_desc_info.mpdu_flags & 396 HAL_MPDU_F_RAW_AMPDU)) 397 qdf_nbuf_set_raw_frame(rx_desc->nbuf, 1); 398 399 if (!is_prev_msdu_last && 400 msdu_desc_info.msdu_flags & HAL_MSDU_F_LAST_MSDU_IN_MPDU) 401 is_prev_msdu_last = true; 402 403 /* Pop out the descriptor*/ 404 hal_srng_dst_get_next(hal_soc, hal_ring_hdl); 405 406 rx_bufs_reaped[rx_desc->chip_id][rx_desc->pool_id]++; 407 peer_mdata = mpdu_desc_info.peer_meta_data; 408 QDF_NBUF_CB_RX_PEER_ID(rx_desc->nbuf) = 409 dp_rx_peer_metadata_peer_id_get_be(soc, peer_mdata); 410 QDF_NBUF_CB_RX_VDEV_ID(rx_desc->nbuf) = 411 dp_rx_peer_metadata_vdev_id_get_be(soc, peer_mdata); 412 dp_rx_set_msdu_lmac_id(rx_desc->nbuf, peer_mdata); 413 414 /* to indicate whether this msdu is rx offload */ 415 pkt_capture_offload = 416 DP_PEER_METADATA_OFFLOAD_GET_BE(peer_mdata); 417 418 /* 419 * save msdu flags first, last and continuation msdu in 420 * nbuf->cb, also save mcbc, is_da_valid, is_sa_valid and 421 * length to nbuf->cb. This ensures the info required for 422 * per pkt processing is always in the same cache line. 423 * This helps in improving throughput for smaller pkt 424 * sizes. 425 */ 426 if (msdu_desc_info.msdu_flags & HAL_MSDU_F_FIRST_MSDU_IN_MPDU) 427 qdf_nbuf_set_rx_chfrag_start(rx_desc->nbuf, 1); 428 429 if (msdu_desc_info.msdu_flags & HAL_MSDU_F_MSDU_CONTINUATION) 430 qdf_nbuf_set_rx_chfrag_cont(rx_desc->nbuf, 1); 431 432 if (msdu_desc_info.msdu_flags & HAL_MSDU_F_LAST_MSDU_IN_MPDU) 433 qdf_nbuf_set_rx_chfrag_end(rx_desc->nbuf, 1); 434 435 if (msdu_desc_info.msdu_flags & HAL_MSDU_F_DA_IS_MCBC) 436 qdf_nbuf_set_da_mcbc(rx_desc->nbuf, 1); 437 438 if (msdu_desc_info.msdu_flags & HAL_MSDU_F_DA_IS_VALID) 439 qdf_nbuf_set_da_valid(rx_desc->nbuf, 1); 440 441 if (msdu_desc_info.msdu_flags & HAL_MSDU_F_SA_IS_VALID) 442 qdf_nbuf_set_sa_valid(rx_desc->nbuf, 1); 443 444 if (msdu_desc_info.msdu_flags & HAL_MSDU_F_INTRA_BSS) 445 qdf_nbuf_set_intra_bss(rx_desc->nbuf, 1); 446 447 if (qdf_likely(mpdu_desc_info.mpdu_flags & 448 HAL_MPDU_F_QOS_CONTROL_VALID)) 449 qdf_nbuf_set_tid_val(rx_desc->nbuf, mpdu_desc_info.tid); 450 451 /* set sw exception */ 452 qdf_nbuf_set_rx_reo_dest_ind_or_sw_excpt( 453 rx_desc->nbuf, 454 hal_rx_sw_exception_get_be(ring_desc)); 455 456 QDF_NBUF_CB_RX_PKT_LEN(rx_desc->nbuf) = msdu_desc_info.msdu_len; 457 458 QDF_NBUF_CB_RX_CTX_ID(rx_desc->nbuf) = reo_ring_num; 459 460 /* 461 * move unmap after scattered msdu waiting break logic 462 * in case double skb unmap happened. 463 */ 464 rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id]; 465 dp_rx_nbuf_unmap(soc, rx_desc, reo_ring_num); 466 rx_desc->unmapped = 1; 467 DP_RX_PROCESS_NBUF(soc, nbuf_head, nbuf_tail, ebuf_head, 468 ebuf_tail, rx_desc); 469 /* 470 * if continuation bit is set then we have MSDU spread 471 * across multiple buffers, let us not decrement quota 472 * till we reap all buffers of that MSDU. 473 */ 474 if (qdf_likely(!qdf_nbuf_is_rx_chfrag_cont(rx_desc->nbuf))) 475 quota -= 1; 476 477 dp_rx_add_to_free_desc_list 478 (&head[rx_desc->chip_id][rx_desc->pool_id], 479 &tail[rx_desc->chip_id][rx_desc->pool_id], rx_desc); 480 num_rx_bufs_reaped++; 481 /* 482 * only if complete msdu is received for scatter case, 483 * then allow break. 484 */ 485 if (is_prev_msdu_last && 486 dp_rx_reap_loop_pkt_limit_hit(soc, num_rx_bufs_reaped, 487 max_reap_limit)) 488 break; 489 } 490 done: 491 dp_rx_srng_access_end(int_ctx, soc, hal_ring_hdl); 492 493 for (chip_id = 0; chip_id < WLAN_MAX_MLO_CHIPS; chip_id++) { 494 for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) { 495 /* 496 * continue with next mac_id if no pkts were reaped 497 * from that pool 498 */ 499 if (!rx_bufs_reaped[chip_id][mac_id]) 500 continue; 501 502 replenish_soc = dp_rx_replensih_soc_get(soc, chip_id); 503 504 dp_rxdma_srng = 505 &replenish_soc->rx_refill_buf_ring[mac_id]; 506 507 rx_desc_pool = &replenish_soc->rx_desc_buf[mac_id]; 508 509 dp_rx_buffers_replenish(replenish_soc, mac_id, 510 dp_rxdma_srng, rx_desc_pool, 511 rx_bufs_reaped[chip_id][mac_id], 512 &head[chip_id][mac_id], 513 &tail[chip_id][mac_id]); 514 } 515 } 516 517 /* Peer can be NULL is case of LFR */ 518 if (qdf_likely(txrx_peer)) 519 vdev = NULL; 520 521 /* 522 * BIG loop where each nbuf is dequeued from global queue, 523 * processed and queued back on a per vdev basis. These nbufs 524 * are sent to stack as and when we run out of nbufs 525 * or a new nbuf dequeued from global queue has a different 526 * vdev when compared to previous nbuf. 527 */ 528 nbuf = nbuf_head; 529 while (nbuf) { 530 next = nbuf->next; 531 dp_rx_prefetch_nbuf_data_be(nbuf, next); 532 if (qdf_unlikely(dp_rx_is_raw_frame_dropped(nbuf))) { 533 nbuf = next; 534 DP_STATS_INC(soc, rx.err.raw_frm_drop, 1); 535 continue; 536 } 537 538 rx_tlv_hdr = qdf_nbuf_data(nbuf); 539 vdev_id = QDF_NBUF_CB_RX_VDEV_ID(nbuf); 540 peer_id = QDF_NBUF_CB_RX_PEER_ID(nbuf); 541 542 if (dp_rx_is_list_ready(deliver_list_head, vdev, txrx_peer, 543 peer_id, vdev_id)) { 544 dp_rx_deliver_to_stack(soc, vdev, txrx_peer, 545 deliver_list_head, 546 deliver_list_tail); 547 deliver_list_head = NULL; 548 deliver_list_tail = NULL; 549 } 550 551 /* Get TID from struct cb->tid_val, save to tid */ 552 if (qdf_nbuf_is_rx_chfrag_start(nbuf)) 553 tid = qdf_nbuf_get_tid_val(nbuf); 554 555 if (qdf_unlikely(!txrx_peer)) { 556 txrx_peer = dp_txrx_peer_get_ref_by_id(soc, peer_id, 557 &txrx_ref_handle, 558 DP_MOD_ID_RX); 559 } else if (txrx_peer && txrx_peer->peer_id != peer_id) { 560 dp_txrx_peer_unref_delete(txrx_ref_handle, 561 DP_MOD_ID_RX); 562 txrx_peer = dp_txrx_peer_get_ref_by_id(soc, peer_id, 563 &txrx_ref_handle, 564 DP_MOD_ID_RX); 565 } 566 567 if (txrx_peer) { 568 QDF_NBUF_CB_DP_TRACE_PRINT(nbuf) = false; 569 qdf_dp_trace_set_track(nbuf, QDF_RX); 570 QDF_NBUF_CB_RX_DP_TRACE(nbuf) = 1; 571 QDF_NBUF_CB_RX_PACKET_TRACK(nbuf) = 572 QDF_NBUF_RX_PKT_DATA_TRACK; 573 } 574 575 rx_bufs_used++; 576 577 if (qdf_likely(txrx_peer)) { 578 vdev = txrx_peer->vdev; 579 } else { 580 nbuf->next = NULL; 581 dp_rx_deliver_to_pkt_capture_no_peer( 582 soc, nbuf, pkt_capture_offload); 583 584 if (!pkt_capture_offload) 585 dp_rx_deliver_to_stack_no_peer(soc, nbuf); 586 nbuf = next; 587 continue; 588 } 589 590 if (qdf_unlikely(!vdev)) { 591 dp_rx_nbuf_free(nbuf); 592 nbuf = next; 593 DP_STATS_INC(soc, rx.err.invalid_vdev, 1); 594 continue; 595 } 596 597 /* when hlos tid override is enabled, save tid in 598 * skb->priority 599 */ 600 if (qdf_unlikely(vdev->skip_sw_tid_classification & 601 DP_TXRX_HLOS_TID_OVERRIDE_ENABLED)) 602 qdf_nbuf_set_priority(nbuf, tid); 603 604 rx_pdev = vdev->pdev; 605 DP_RX_TID_SAVE(nbuf, tid); 606 if (qdf_unlikely(rx_pdev->delay_stats_flag) || 607 qdf_unlikely(wlan_cfg_is_peer_ext_stats_enabled( 608 soc->wlan_cfg_ctx)) || 609 dp_rx_pkt_tracepoints_enabled()) 610 qdf_nbuf_set_timestamp(nbuf); 611 612 enh_flag = rx_pdev->enhanced_stats_en; 613 614 tid_stats = 615 &rx_pdev->stats.tid_stats.tid_rx_stats[reo_ring_num][tid]; 616 617 /* 618 * Check if DMA completed -- msdu_done is the last bit 619 * to be written 620 */ 621 if (qdf_unlikely(!qdf_nbuf_is_rx_chfrag_cont(nbuf) && 622 !hal_rx_tlv_msdu_done_get_be(rx_tlv_hdr))) { 623 dp_err("MSDU DONE failure"); 624 DP_STATS_INC(soc, rx.err.msdu_done_fail, 1); 625 hal_rx_dump_pkt_tlvs(hal_soc, rx_tlv_hdr, 626 QDF_TRACE_LEVEL_INFO); 627 tid_stats->fail_cnt[MSDU_DONE_FAILURE]++; 628 dp_rx_nbuf_free(nbuf); 629 qdf_assert(0); 630 nbuf = next; 631 continue; 632 } 633 634 DP_HIST_PACKET_COUNT_INC(vdev->pdev->pdev_id); 635 /* 636 * First IF condition: 637 * 802.11 Fragmented pkts are reinjected to REO 638 * HW block as SG pkts and for these pkts we only 639 * need to pull the RX TLVS header length. 640 * Second IF condition: 641 * The below condition happens when an MSDU is spread 642 * across multiple buffers. This can happen in two cases 643 * 1. The nbuf size is smaller then the received msdu. 644 * ex: we have set the nbuf size to 2048 during 645 * nbuf_alloc. but we received an msdu which is 646 * 2304 bytes in size then this msdu is spread 647 * across 2 nbufs. 648 * 649 * 2. AMSDUs when RAW mode is enabled. 650 * ex: 1st MSDU is in 1st nbuf and 2nd MSDU is spread 651 * across 1st nbuf and 2nd nbuf and last MSDU is 652 * spread across 2nd nbuf and 3rd nbuf. 653 * 654 * for these scenarios let us create a skb frag_list and 655 * append these buffers till the last MSDU of the AMSDU 656 * Third condition: 657 * This is the most likely case, we receive 802.3 pkts 658 * decapsulated by HW, here we need to set the pkt length. 659 */ 660 hal_rx_msdu_packet_metadata_get_generic_be(rx_tlv_hdr, 661 &msdu_metadata); 662 if (qdf_unlikely(qdf_nbuf_is_frag(nbuf))) { 663 bool is_mcbc, is_sa_vld, is_da_vld; 664 665 is_mcbc = hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc, 666 rx_tlv_hdr); 667 is_sa_vld = 668 hal_rx_msdu_end_sa_is_valid_get(soc->hal_soc, 669 rx_tlv_hdr); 670 is_da_vld = 671 hal_rx_msdu_end_da_is_valid_get(soc->hal_soc, 672 rx_tlv_hdr); 673 674 qdf_nbuf_set_da_mcbc(nbuf, is_mcbc); 675 qdf_nbuf_set_da_valid(nbuf, is_da_vld); 676 qdf_nbuf_set_sa_valid(nbuf, is_sa_vld); 677 678 qdf_nbuf_pull_head(nbuf, soc->rx_pkt_tlv_size); 679 } else if (qdf_nbuf_is_rx_chfrag_cont(nbuf)) { 680 msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf); 681 nbuf = dp_rx_sg_create(soc, nbuf); 682 next = nbuf->next; 683 684 if (qdf_nbuf_is_raw_frame(nbuf)) { 685 DP_STATS_INC(vdev->pdev, rx_raw_pkts, 1); 686 DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, 687 rx.raw, 1, 688 msdu_len); 689 } else { 690 dp_rx_nbuf_free(nbuf); 691 DP_STATS_INC(soc, rx.err.scatter_msdu, 1); 692 dp_info_rl("scatter msdu len %d, dropped", 693 msdu_len); 694 nbuf = next; 695 continue; 696 } 697 } else { 698 msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf); 699 pkt_len = msdu_len + 700 msdu_metadata.l3_hdr_pad + 701 soc->rx_pkt_tlv_size; 702 703 qdf_nbuf_set_pktlen(nbuf, pkt_len); 704 dp_rx_skip_tlvs(soc, nbuf, msdu_metadata.l3_hdr_pad); 705 } 706 707 dp_rx_send_pktlog(soc, rx_pdev, nbuf, QDF_TX_RX_STATUS_OK); 708 709 /* 710 * process frame for mulitpass phrase processing 711 */ 712 if (qdf_unlikely(vdev->multipass_en)) { 713 if (dp_rx_multipass_process(txrx_peer, nbuf, 714 tid) == false) { 715 DP_PEER_PER_PKT_STATS_INC(txrx_peer, 716 rx.multipass_rx_pkt_drop, 717 1); 718 dp_rx_nbuf_free(nbuf); 719 nbuf = next; 720 continue; 721 } 722 } 723 724 if (!dp_wds_rx_policy_check(rx_tlv_hdr, vdev, txrx_peer)) { 725 dp_rx_err("%pK: Policy Check Drop pkt", soc); 726 DP_PEER_PER_PKT_STATS_INC(txrx_peer, 727 rx.policy_check_drop, 1); 728 tid_stats->fail_cnt[POLICY_CHECK_DROP]++; 729 /* Drop & free packet */ 730 dp_rx_nbuf_free(nbuf); 731 /* Statistics */ 732 nbuf = next; 733 continue; 734 } 735 736 if (qdf_unlikely(txrx_peer && (txrx_peer->nawds_enabled) && 737 (qdf_nbuf_is_da_mcbc(nbuf)) && 738 (hal_rx_get_mpdu_mac_ad4_valid_be(rx_tlv_hdr) 739 == false))) { 740 tid_stats->fail_cnt[NAWDS_MCAST_DROP]++; 741 DP_PEER_PER_PKT_STATS_INC(txrx_peer, 742 rx.nawds_mcast_drop, 1); 743 dp_rx_nbuf_free(nbuf); 744 nbuf = next; 745 continue; 746 } 747 748 /* 749 * Drop non-EAPOL frames from unauthorized peer. 750 */ 751 if (qdf_likely(txrx_peer) && 752 qdf_unlikely(!txrx_peer->authorize) && 753 !qdf_nbuf_is_raw_frame(nbuf)) { 754 bool is_eapol = qdf_nbuf_is_ipv4_eapol_pkt(nbuf) || 755 qdf_nbuf_is_ipv4_wapi_pkt(nbuf); 756 757 if (!is_eapol) { 758 DP_PEER_PER_PKT_STATS_INC(txrx_peer, 759 rx.peer_unauth_rx_pkt_drop, 760 1); 761 dp_rx_nbuf_free(nbuf); 762 nbuf = next; 763 continue; 764 } 765 } 766 767 if (soc->process_rx_status) 768 dp_rx_cksum_offload(vdev->pdev, nbuf, rx_tlv_hdr); 769 770 /* Update the protocol tag in SKB based on CCE metadata */ 771 dp_rx_update_protocol_tag(soc, vdev, nbuf, rx_tlv_hdr, 772 reo_ring_num, false, true); 773 774 /* Update the flow tag in SKB based on FSE metadata */ 775 dp_rx_update_flow_tag(soc, vdev, nbuf, rx_tlv_hdr, true); 776 777 dp_rx_msdu_stats_update(soc, nbuf, rx_tlv_hdr, txrx_peer, 778 reo_ring_num, tid_stats); 779 780 if (qdf_unlikely(vdev->mesh_vdev)) { 781 if (dp_rx_filter_mesh_packets(vdev, nbuf, rx_tlv_hdr) 782 == QDF_STATUS_SUCCESS) { 783 dp_rx_info("%pK: mesh pkt filtered", soc); 784 tid_stats->fail_cnt[MESH_FILTER_DROP]++; 785 DP_STATS_INC(vdev->pdev, dropped.mesh_filter, 786 1); 787 788 dp_rx_nbuf_free(nbuf); 789 nbuf = next; 790 continue; 791 } 792 dp_rx_fill_mesh_stats(vdev, nbuf, rx_tlv_hdr, 793 txrx_peer); 794 } 795 796 if (qdf_likely(vdev->rx_decap_type == 797 htt_cmn_pkt_type_ethernet) && 798 qdf_likely(!vdev->mesh_vdev)) { 799 dp_rx_wds_learn(soc, vdev, 800 rx_tlv_hdr, 801 txrx_peer, 802 nbuf, 803 msdu_metadata); 804 805 /* Intrabss-fwd */ 806 if (dp_rx_check_ap_bridge(vdev)) 807 if (dp_rx_intrabss_fwd_be(soc, txrx_peer, 808 rx_tlv_hdr, 809 nbuf, 810 msdu_metadata)) { 811 nbuf = next; 812 tid_stats->intrabss_cnt++; 813 continue; /* Get next desc */ 814 } 815 } 816 817 dp_rx_fill_gro_info(soc, rx_tlv_hdr, nbuf, &rx_ol_pkt_cnt); 818 819 dp_rx_mark_first_packet_after_wow_wakeup(vdev->pdev, rx_tlv_hdr, 820 nbuf); 821 822 dp_rx_update_stats(soc, nbuf); 823 824 dp_pkt_add_timestamp(txrx_peer->vdev, QDF_PKT_RX_DRIVER_ENTRY, 825 current_time, nbuf); 826 827 DP_RX_LIST_APPEND(deliver_list_head, 828 deliver_list_tail, 829 nbuf); 830 831 DP_PEER_TO_STACK_INCC_PKT(txrx_peer, 1, 832 QDF_NBUF_CB_RX_PKT_LEN(nbuf), 833 enh_flag); 834 if (qdf_unlikely(txrx_peer->in_twt)) 835 DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, 836 rx.to_stack_twt, 1, 837 QDF_NBUF_CB_RX_PKT_LEN(nbuf)); 838 839 tid_stats->delivered_to_stack++; 840 nbuf = next; 841 } 842 843 DP_RX_DELIVER_TO_STACK(soc, vdev, txrx_peer, peer_id, 844 pkt_capture_offload, 845 deliver_list_head, 846 deliver_list_tail); 847 848 if (qdf_likely(txrx_peer)) 849 dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_RX); 850 851 /* 852 * If we are processing in near-full condition, there are 3 scenario 853 * 1) Ring entries has reached critical state 854 * 2) Ring entries are still near high threshold 855 * 3) Ring entries are below the safe level 856 * 857 * One more loop will move the state to normal processing and yield 858 */ 859 if (ring_near_full && quota) 860 goto more_data; 861 862 if (dp_rx_enable_eol_data_check(soc) && rx_bufs_used) { 863 if (quota) { 864 num_pending = 865 dp_rx_srng_get_num_pending(hal_soc, 866 hal_ring_hdl, 867 num_entries, 868 &near_full); 869 if (num_pending) { 870 DP_STATS_INC(soc, rx.hp_oos2, 1); 871 872 if (!hif_exec_should_yield(scn, intr_id)) 873 goto more_data; 874 875 if (qdf_unlikely(near_full)) { 876 DP_STATS_INC(soc, rx.near_full, 1); 877 goto more_data; 878 } 879 } 880 } 881 882 if (vdev && vdev->osif_fisa_flush) 883 vdev->osif_fisa_flush(soc, reo_ring_num); 884 885 if (vdev && vdev->osif_gro_flush && rx_ol_pkt_cnt) { 886 vdev->osif_gro_flush(vdev->osif_vdev, 887 reo_ring_num); 888 } 889 } 890 891 /* Update histogram statistics by looping through pdev's */ 892 DP_RX_HIST_STATS_PER_PDEV(); 893 894 return rx_bufs_used; /* Assume no scale factor for now */ 895 } 896 897 #ifdef RX_DESC_MULTI_PAGE_ALLOC 898 /** 899 * dp_rx_desc_pool_init_be_cc() - initial RX desc pool for cookie conversion 900 * @soc: Handle to DP Soc structure 901 * @rx_desc_pool: Rx descriptor pool handler 902 * @pool_id: Rx descriptor pool ID 903 * 904 * Return: QDF_STATUS_SUCCESS - succeeded, others - failed 905 */ 906 static QDF_STATUS 907 dp_rx_desc_pool_init_be_cc(struct dp_soc *soc, 908 struct rx_desc_pool *rx_desc_pool, 909 uint32_t pool_id) 910 { 911 struct dp_hw_cookie_conversion_t *cc_ctx; 912 struct dp_soc_be *be_soc; 913 union dp_rx_desc_list_elem_t *rx_desc_elem; 914 struct dp_spt_page_desc *page_desc; 915 uint32_t ppt_idx = 0; 916 uint32_t avail_entry_index = 0; 917 918 if (!rx_desc_pool->pool_size) { 919 dp_err("desc_num 0 !!"); 920 return QDF_STATUS_E_FAILURE; 921 } 922 923 be_soc = dp_get_be_soc_from_dp_soc(soc); 924 cc_ctx = &be_soc->rx_cc_ctx[pool_id]; 925 926 page_desc = &cc_ctx->page_desc_base[0]; 927 rx_desc_elem = rx_desc_pool->freelist; 928 while (rx_desc_elem) { 929 if (avail_entry_index == 0) { 930 if (ppt_idx >= cc_ctx->total_page_num) { 931 dp_alert("insufficient secondary page tables"); 932 qdf_assert_always(0); 933 } 934 page_desc = &cc_ctx->page_desc_base[ppt_idx++]; 935 } 936 937 /* put each RX Desc VA to SPT pages and 938 * get corresponding ID 939 */ 940 DP_CC_SPT_PAGE_UPDATE_VA(page_desc->page_v_addr, 941 avail_entry_index, 942 &rx_desc_elem->rx_desc); 943 rx_desc_elem->rx_desc.cookie = 944 dp_cc_desc_id_generate(page_desc->ppt_index, 945 avail_entry_index); 946 rx_desc_elem->rx_desc.chip_id = dp_mlo_get_chip_id(soc); 947 rx_desc_elem->rx_desc.pool_id = pool_id; 948 rx_desc_elem->rx_desc.in_use = 0; 949 rx_desc_elem = rx_desc_elem->next; 950 951 avail_entry_index = (avail_entry_index + 1) & 952 DP_CC_SPT_PAGE_MAX_ENTRIES_MASK; 953 } 954 955 return QDF_STATUS_SUCCESS; 956 } 957 #else 958 static QDF_STATUS 959 dp_rx_desc_pool_init_be_cc(struct dp_soc *soc, 960 struct rx_desc_pool *rx_desc_pool, 961 uint32_t pool_id) 962 { 963 struct dp_hw_cookie_conversion_t *cc_ctx; 964 struct dp_soc_be *be_soc; 965 struct dp_spt_page_desc *page_desc; 966 uint32_t ppt_idx = 0; 967 uint32_t avail_entry_index = 0; 968 int i = 0; 969 970 if (!rx_desc_pool->pool_size) { 971 dp_err("desc_num 0 !!"); 972 return QDF_STATUS_E_FAILURE; 973 } 974 975 be_soc = dp_get_be_soc_from_dp_soc(soc); 976 cc_ctx = &be_soc->rx_cc_ctx[pool_id]; 977 978 page_desc = &cc_ctx->page_desc_base[0]; 979 for (i = 0; i <= rx_desc_pool->pool_size - 1; i++) { 980 if (i == rx_desc_pool->pool_size - 1) 981 rx_desc_pool->array[i].next = NULL; 982 else 983 rx_desc_pool->array[i].next = 984 &rx_desc_pool->array[i + 1]; 985 986 if (avail_entry_index == 0) { 987 if (ppt_idx >= cc_ctx->total_page_num) { 988 dp_alert("insufficient secondary page tables"); 989 qdf_assert_always(0); 990 } 991 page_desc = &cc_ctx->page_desc_base[ppt_idx++]; 992 } 993 994 /* put each RX Desc VA to SPT pages and 995 * get corresponding ID 996 */ 997 DP_CC_SPT_PAGE_UPDATE_VA(page_desc->page_v_addr, 998 avail_entry_index, 999 &rx_desc_pool->array[i].rx_desc); 1000 rx_desc_pool->array[i].rx_desc.cookie = 1001 dp_cc_desc_id_generate(page_desc->ppt_index, 1002 avail_entry_index); 1003 rx_desc_pool->array[i].rx_desc.pool_id = pool_id; 1004 rx_desc_pool->array[i].rx_desc.in_use = 0; 1005 rx_desc_pool->array[i].rx_desc.chip_id = 1006 dp_mlo_get_chip_id(soc); 1007 1008 avail_entry_index = (avail_entry_index + 1) & 1009 DP_CC_SPT_PAGE_MAX_ENTRIES_MASK; 1010 } 1011 return QDF_STATUS_SUCCESS; 1012 } 1013 #endif 1014 1015 static void 1016 dp_rx_desc_pool_deinit_be_cc(struct dp_soc *soc, 1017 struct rx_desc_pool *rx_desc_pool, 1018 uint32_t pool_id) 1019 { 1020 struct dp_spt_page_desc *page_desc; 1021 struct dp_soc_be *be_soc; 1022 int i = 0; 1023 struct dp_hw_cookie_conversion_t *cc_ctx; 1024 1025 be_soc = dp_get_be_soc_from_dp_soc(soc); 1026 cc_ctx = &be_soc->rx_cc_ctx[pool_id]; 1027 1028 for (i = 0; i < cc_ctx->total_page_num; i++) { 1029 page_desc = &cc_ctx->page_desc_base[i]; 1030 qdf_mem_zero(page_desc->page_v_addr, qdf_page_size); 1031 } 1032 } 1033 1034 QDF_STATUS dp_rx_desc_pool_init_be(struct dp_soc *soc, 1035 struct rx_desc_pool *rx_desc_pool, 1036 uint32_t pool_id) 1037 { 1038 QDF_STATUS status = QDF_STATUS_SUCCESS; 1039 1040 /* Only regular RX buffer desc pool use HW cookie conversion */ 1041 if (rx_desc_pool->desc_type == DP_RX_DESC_BUF_TYPE) { 1042 dp_info("rx_desc_buf pool init"); 1043 status = dp_rx_desc_pool_init_be_cc(soc, 1044 rx_desc_pool, 1045 pool_id); 1046 } else { 1047 dp_info("non_rx_desc_buf_pool init"); 1048 status = dp_rx_desc_pool_init_generic(soc, rx_desc_pool, 1049 pool_id); 1050 } 1051 1052 return status; 1053 } 1054 1055 void dp_rx_desc_pool_deinit_be(struct dp_soc *soc, 1056 struct rx_desc_pool *rx_desc_pool, 1057 uint32_t pool_id) 1058 { 1059 if (rx_desc_pool->desc_type == DP_RX_DESC_BUF_TYPE) 1060 dp_rx_desc_pool_deinit_be_cc(soc, rx_desc_pool, pool_id); 1061 } 1062 1063 #ifdef DP_FEATURE_HW_COOKIE_CONVERSION 1064 #ifdef DP_HW_COOKIE_CONVERT_EXCEPTION 1065 QDF_STATUS dp_wbm_get_rx_desc_from_hal_desc_be(struct dp_soc *soc, 1066 void *ring_desc, 1067 struct dp_rx_desc **r_rx_desc) 1068 { 1069 if (hal_rx_wbm_get_cookie_convert_done(ring_desc)) { 1070 /* HW cookie conversion done */ 1071 *r_rx_desc = (struct dp_rx_desc *) 1072 hal_rx_wbm_get_desc_va(ring_desc); 1073 } else { 1074 /* SW do cookie conversion */ 1075 uint32_t cookie = HAL_RX_BUF_COOKIE_GET(ring_desc); 1076 1077 *r_rx_desc = (struct dp_rx_desc *) 1078 dp_cc_desc_find(soc, cookie); 1079 } 1080 1081 return QDF_STATUS_SUCCESS; 1082 } 1083 #else 1084 QDF_STATUS dp_wbm_get_rx_desc_from_hal_desc_be(struct dp_soc *soc, 1085 void *ring_desc, 1086 struct dp_rx_desc **r_rx_desc) 1087 { 1088 *r_rx_desc = (struct dp_rx_desc *) 1089 hal_rx_wbm_get_desc_va(ring_desc); 1090 1091 return QDF_STATUS_SUCCESS; 1092 } 1093 #endif /* DP_HW_COOKIE_CONVERT_EXCEPTION */ 1094 #else 1095 QDF_STATUS dp_wbm_get_rx_desc_from_hal_desc_be(struct dp_soc *soc, 1096 void *ring_desc, 1097 struct dp_rx_desc **r_rx_desc) 1098 { 1099 /* SW do cookie conversion */ 1100 uint32_t cookie = HAL_RX_BUF_COOKIE_GET(ring_desc); 1101 1102 *r_rx_desc = (struct dp_rx_desc *) 1103 dp_cc_desc_find(soc, cookie); 1104 1105 return QDF_STATUS_SUCCESS; 1106 } 1107 #endif /* DP_FEATURE_HW_COOKIE_CONVERSION */ 1108 1109 struct dp_rx_desc *dp_rx_desc_cookie_2_va_be(struct dp_soc *soc, 1110 uint32_t cookie) 1111 { 1112 return (struct dp_rx_desc *)dp_cc_desc_find(soc, cookie); 1113 } 1114 1115 #if defined(WLAN_FEATURE_11BE_MLO) 1116 #if defined(WLAN_MLO_MULTI_CHIP) && defined(WLAN_MCAST_MLO) 1117 #define DP_RANDOM_MAC_ID_BIT_MASK 0xC0 1118 #define DP_RANDOM_MAC_OFFSET 1 1119 #define DP_MAC_LOCAL_ADMBIT_MASK 0x2 1120 #define DP_MAC_LOCAL_ADMBIT_OFFSET 0 1121 static inline void dp_rx_dummy_src_mac(struct dp_vdev *vdev, 1122 qdf_nbuf_t nbuf) 1123 { 1124 uint8_t random_mac[QDF_MAC_ADDR_SIZE] = {0}; 1125 qdf_ether_header_t *eh = 1126 (qdf_ether_header_t *)qdf_nbuf_data(nbuf); 1127 1128 qdf_mem_copy(random_mac, &vdev->mld_mac_addr.raw[0], QDF_MAC_ADDR_SIZE); 1129 random_mac[DP_MAC_LOCAL_ADMBIT_OFFSET] = 1130 random_mac[DP_MAC_LOCAL_ADMBIT_OFFSET] | 1131 DP_MAC_LOCAL_ADMBIT_MASK; 1132 random_mac[DP_RANDOM_MAC_OFFSET] = 1133 random_mac[DP_RANDOM_MAC_OFFSET] ^ DP_RANDOM_MAC_ID_BIT_MASK; 1134 1135 qdf_mem_copy(&eh->ether_shost[0], random_mac, QDF_MAC_ADDR_SIZE); 1136 } 1137 1138 #ifdef QCA_SUPPORT_WDS_EXTENDED 1139 static inline bool dp_rx_mlo_igmp_wds_ext_handler(struct dp_txrx_peer *peer) 1140 { 1141 return qdf_atomic_test_bit(WDS_EXT_PEER_INIT_BIT, &peer->wds_ext.init); 1142 } 1143 #else 1144 static inline bool dp_rx_mlo_igmp_wds_ext_handler(struct dp_txrx_peer *peer) 1145 { 1146 return false; 1147 } 1148 #endif 1149 1150 bool dp_rx_mlo_igmp_handler(struct dp_soc *soc, 1151 struct dp_vdev *vdev, 1152 struct dp_txrx_peer *peer, 1153 qdf_nbuf_t nbuf) 1154 { 1155 struct dp_vdev *mcast_primary_vdev = NULL; 1156 struct dp_vdev_be *be_vdev = dp_get_be_vdev_from_dp_vdev(vdev); 1157 struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc); 1158 1159 if (!(qdf_nbuf_is_ipv4_igmp_pkt(nbuf) || 1160 qdf_nbuf_is_ipv6_igmp_pkt(nbuf))) 1161 return false; 1162 /* 1163 * In the case of ME6, Backhaul WDS, NAWDS 1164 * send the igmp pkt on the same link where it received, 1165 * as these features will use peer based tcl metadata 1166 */ 1167 1168 qdf_nbuf_set_next(nbuf, NULL); 1169 1170 if (vdev->mcast_enhancement_en || be_vdev->mcast_primary || 1171 peer->nawds_enabled) 1172 goto send_pkt; 1173 1174 if (qdf_unlikely(dp_rx_mlo_igmp_wds_ext_handler(peer))) 1175 goto send_pkt; 1176 1177 mcast_primary_vdev = dp_mlo_get_mcast_primary_vdev(be_soc, be_vdev, 1178 DP_MOD_ID_RX); 1179 if (!mcast_primary_vdev) { 1180 dp_rx_debug("Non mlo vdev"); 1181 goto send_pkt; 1182 } 1183 1184 if (qdf_unlikely(vdev->wrap_vdev)) { 1185 /* In the case of qwrap repeater send the original 1186 * packet on the interface where it received, 1187 * packet with dummy src on the mcast primary interface. 1188 */ 1189 qdf_nbuf_t nbuf_copy; 1190 1191 nbuf_copy = qdf_nbuf_copy(nbuf); 1192 if (qdf_likely(nbuf_copy)) 1193 dp_rx_deliver_to_stack(soc, vdev, peer, nbuf_copy, 1194 NULL); 1195 } 1196 1197 dp_rx_dummy_src_mac(vdev, nbuf); 1198 dp_rx_deliver_to_stack(mcast_primary_vdev->pdev->soc, 1199 mcast_primary_vdev, 1200 peer, 1201 nbuf, 1202 NULL); 1203 dp_vdev_unref_delete(mcast_primary_vdev->pdev->soc, 1204 mcast_primary_vdev, 1205 DP_MOD_ID_RX); 1206 return true; 1207 send_pkt: 1208 dp_rx_deliver_to_stack(be_vdev->vdev.pdev->soc, 1209 &be_vdev->vdev, 1210 peer, 1211 nbuf, 1212 NULL); 1213 return true; 1214 } 1215 #else 1216 bool dp_rx_mlo_igmp_handler(struct dp_soc *soc, 1217 struct dp_vdev *vdev, 1218 struct dp_txrx_peer *peer, 1219 qdf_nbuf_t nbuf) 1220 { 1221 return false; 1222 } 1223 #endif 1224 #endif 1225 1226 #ifdef WLAN_FEATURE_NEAR_FULL_IRQ 1227 uint32_t dp_rx_nf_process(struct dp_intr *int_ctx, 1228 hal_ring_handle_t hal_ring_hdl, 1229 uint8_t reo_ring_num, 1230 uint32_t quota) 1231 { 1232 struct dp_soc *soc = int_ctx->soc; 1233 struct dp_srng *rx_ring = &soc->reo_dest_ring[reo_ring_num]; 1234 uint32_t work_done = 0; 1235 1236 if (dp_srng_get_near_full_level(soc, rx_ring) < 1237 DP_SRNG_THRESH_NEAR_FULL) 1238 return 0; 1239 1240 qdf_atomic_set(&rx_ring->near_full, 1); 1241 work_done++; 1242 1243 return work_done; 1244 } 1245 #endif 1246 1247 #ifndef QCA_HOST_MODE_WIFI_DISABLED 1248 #ifdef WLAN_FEATURE_11BE_MLO 1249 /** 1250 * dp_rx_intrabss_fwd_mlo_allow() - check if MLO forwarding is allowed 1251 * @ta_peer: transmitter peer handle 1252 * @da_peer: destination peer handle 1253 * 1254 * Return: true - MLO forwarding case, false: not 1255 */ 1256 static inline bool 1257 dp_rx_intrabss_fwd_mlo_allow(struct dp_txrx_peer *ta_peer, 1258 struct dp_txrx_peer *da_peer) 1259 { 1260 /* one of TA/DA peer should belong to MLO connection peer, 1261 * only MLD peer type is as expected 1262 */ 1263 if (!IS_MLO_DP_MLD_TXRX_PEER(ta_peer) && 1264 !IS_MLO_DP_MLD_TXRX_PEER(da_peer)) 1265 return false; 1266 1267 /* TA peer and DA peer's vdev should be partner MLO vdevs */ 1268 if (dp_peer_find_mac_addr_cmp(&ta_peer->vdev->mld_mac_addr, 1269 &da_peer->vdev->mld_mac_addr)) 1270 return false; 1271 1272 return true; 1273 } 1274 #else 1275 static inline bool 1276 dp_rx_intrabss_fwd_mlo_allow(struct dp_txrx_peer *ta_peer, 1277 struct dp_txrx_peer *da_peer) 1278 { 1279 return false; 1280 } 1281 #endif 1282 1283 #ifdef INTRA_BSS_FWD_OFFLOAD 1284 /** 1285 * dp_rx_intrabss_ucast_check_be() - Check if intrabss is allowed 1286 for unicast frame 1287 * @soc: SOC hanlde 1288 * @nbuf: RX packet buffer 1289 * @ta_peer: transmitter DP peer handle 1290 * @msdu_metadata: MSDU meta data info 1291 * @p_tx_vdev_id: get vdev id for Intra-BSS TX 1292 * 1293 * Return: true - intrabss allowed 1294 false - not allow 1295 */ 1296 static bool 1297 dp_rx_intrabss_ucast_check_be(qdf_nbuf_t nbuf, 1298 struct dp_txrx_peer *ta_peer, 1299 struct hal_rx_msdu_metadata *msdu_metadata, 1300 struct dp_be_intrabss_params *params) 1301 { 1302 uint16_t da_peer_id; 1303 struct dp_txrx_peer *da_peer; 1304 dp_txrx_ref_handle txrx_ref_handle = NULL; 1305 1306 if (!qdf_nbuf_is_intra_bss(nbuf)) 1307 return false; 1308 1309 da_peer_id = dp_rx_peer_metadata_peer_id_get_be( 1310 params->dest_soc, 1311 msdu_metadata->da_idx); 1312 da_peer = dp_txrx_peer_get_ref_by_id(params->dest_soc, da_peer_id, 1313 &txrx_ref_handle, DP_MOD_ID_RX); 1314 if (!da_peer) 1315 return false; 1316 params->tx_vdev_id = da_peer->vdev->vdev_id; 1317 dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_RX); 1318 1319 return true; 1320 } 1321 #else 1322 #ifdef WLAN_MLO_MULTI_CHIP 1323 static bool 1324 dp_rx_intrabss_ucast_check_be(qdf_nbuf_t nbuf, 1325 struct dp_txrx_peer *ta_peer, 1326 struct hal_rx_msdu_metadata *msdu_metadata, 1327 struct dp_be_intrabss_params *params) 1328 { 1329 uint16_t da_peer_id; 1330 struct dp_txrx_peer *da_peer; 1331 bool ret = false; 1332 uint8_t dest_chip_id; 1333 dp_txrx_ref_handle txrx_ref_handle = NULL; 1334 struct dp_vdev_be *be_vdev = 1335 dp_get_be_vdev_from_dp_vdev(ta_peer->vdev); 1336 struct dp_soc_be *be_soc = 1337 dp_get_be_soc_from_dp_soc(params->dest_soc); 1338 1339 if (!(qdf_nbuf_is_da_valid(nbuf) || qdf_nbuf_is_da_mcbc(nbuf))) 1340 return false; 1341 1342 dest_chip_id = HAL_RX_DEST_CHIP_ID_GET(msdu_metadata); 1343 qdf_assert_always(dest_chip_id <= (DP_MLO_MAX_DEST_CHIP_ID - 1)); 1344 da_peer_id = HAL_RX_PEER_ID_GET(msdu_metadata); 1345 1346 /* use dest chip id when TA is MLD peer and DA is legacy */ 1347 if (be_soc->mlo_enabled && 1348 ta_peer->mld_peer && 1349 !(da_peer_id & HAL_RX_DA_IDX_ML_PEER_MASK)) { 1350 /* validate chip_id, get a ref, and re-assign soc */ 1351 params->dest_soc = 1352 dp_mlo_get_soc_ref_by_chip_id(be_soc->ml_ctxt, 1353 dest_chip_id); 1354 if (!params->dest_soc) 1355 return false; 1356 1357 da_peer = dp_txrx_peer_get_ref_by_id(params->dest_soc, 1358 da_peer_id, 1359 &txrx_ref_handle, 1360 DP_MOD_ID_RX); 1361 if (!da_peer) 1362 return false; 1363 1364 } else { 1365 da_peer = dp_txrx_peer_get_ref_by_id(params->dest_soc, 1366 da_peer_id, 1367 &txrx_ref_handle, 1368 DP_MOD_ID_RX); 1369 if (!da_peer) 1370 return false; 1371 1372 params->dest_soc = da_peer->vdev->pdev->soc; 1373 if (!params->dest_soc) 1374 goto rel_da_peer; 1375 1376 } 1377 1378 params->tx_vdev_id = da_peer->vdev->vdev_id; 1379 1380 /* If the source or destination peer in the isolation 1381 * list then dont forward instead push to bridge stack. 1382 */ 1383 if (dp_get_peer_isolation(ta_peer) || 1384 dp_get_peer_isolation(da_peer)) { 1385 ret = false; 1386 goto rel_da_peer; 1387 } 1388 1389 if (da_peer->bss_peer || (da_peer == ta_peer)) { 1390 ret = false; 1391 goto rel_da_peer; 1392 } 1393 1394 /* Same vdev, support Inra-BSS */ 1395 if (da_peer->vdev == ta_peer->vdev) { 1396 ret = true; 1397 goto rel_da_peer; 1398 } 1399 1400 /* MLO specific Intra-BSS check */ 1401 if (dp_rx_intrabss_fwd_mlo_allow(ta_peer, da_peer)) { 1402 /* use dest chip id for legacy dest peer */ 1403 if (!(da_peer_id & HAL_RX_DA_IDX_ML_PEER_MASK)) { 1404 if (!(be_vdev->partner_vdev_list[dest_chip_id][0] == 1405 params->tx_vdev_id) && 1406 !(be_vdev->partner_vdev_list[dest_chip_id][1] == 1407 params->tx_vdev_id)) { 1408 /*dp_soc_unref_delete(soc);*/ 1409 goto rel_da_peer; 1410 } 1411 } 1412 ret = true; 1413 } 1414 1415 rel_da_peer: 1416 dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_RX); 1417 return ret; 1418 } 1419 #else 1420 static bool 1421 dp_rx_intrabss_ucast_check_be(qdf_nbuf_t nbuf, 1422 struct dp_txrx_peer *ta_peer, 1423 struct hal_rx_msdu_metadata *msdu_metadata, 1424 struct dp_be_intrabss_params *params) 1425 { 1426 uint16_t da_peer_id; 1427 struct dp_txrx_peer *da_peer; 1428 bool ret = false; 1429 dp_txrx_ref_handle txrx_ref_handle = NULL; 1430 1431 if (!qdf_nbuf_is_da_valid(nbuf) || qdf_nbuf_is_da_mcbc(nbuf)) 1432 return false; 1433 1434 da_peer_id = dp_rx_peer_metadata_peer_id_get_be( 1435 params->dest_soc, 1436 msdu_metadata->da_idx); 1437 1438 da_peer = dp_txrx_peer_get_ref_by_id(params->dest_soc, da_peer_id, 1439 &txrx_ref_handle, DP_MOD_ID_RX); 1440 if (!da_peer) 1441 return false; 1442 1443 params->tx_vdev_id = da_peer->vdev->vdev_id; 1444 /* If the source or destination peer in the isolation 1445 * list then dont forward instead push to bridge stack. 1446 */ 1447 if (dp_get_peer_isolation(ta_peer) || 1448 dp_get_peer_isolation(da_peer)) 1449 goto rel_da_peer; 1450 1451 if (da_peer->bss_peer || da_peer == ta_peer) 1452 goto rel_da_peer; 1453 1454 /* Same vdev, support Inra-BSS */ 1455 if (da_peer->vdev == ta_peer->vdev) { 1456 ret = true; 1457 goto rel_da_peer; 1458 } 1459 1460 /* MLO specific Intra-BSS check */ 1461 if (dp_rx_intrabss_fwd_mlo_allow(ta_peer, da_peer)) { 1462 ret = true; 1463 goto rel_da_peer; 1464 } 1465 1466 rel_da_peer: 1467 dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_RX); 1468 return ret; 1469 } 1470 #endif /* WLAN_MLO_MULTI_CHIP */ 1471 #endif /* INTRA_BSS_FWD_OFFLOAD */ 1472 1473 /* 1474 * dp_rx_intrabss_handle_nawds_be() - Forward mcbc intrabss pkts in nawds case 1475 * @soc: core txrx main context 1476 * @ta_txrx_peer: source txrx_peer entry 1477 * @nbuf_copy: nbuf that has to be intrabss forwarded 1478 * @tid_stats: tid_stats structure 1479 * 1480 * Return: true if it is forwarded else false 1481 */ 1482 bool 1483 dp_rx_intrabss_handle_nawds_be(struct dp_soc *soc, 1484 struct dp_txrx_peer *ta_txrx_peer, 1485 qdf_nbuf_t nbuf_copy, 1486 struct cdp_tid_rx_stats *tid_stats) 1487 { 1488 if (qdf_unlikely(ta_txrx_peer->vdev->nawds_enabled)) { 1489 struct cdp_tx_exception_metadata tx_exc_metadata = {0}; 1490 uint16_t len = QDF_NBUF_CB_RX_PKT_LEN(nbuf_copy); 1491 1492 tx_exc_metadata.peer_id = ta_txrx_peer->peer_id; 1493 tx_exc_metadata.is_intrabss_fwd = 1; 1494 tx_exc_metadata.tid = HTT_TX_EXT_TID_INVALID; 1495 if (dp_tx_send_exception((struct cdp_soc_t *)soc, 1496 ta_txrx_peer->vdev->vdev_id, 1497 nbuf_copy, 1498 &tx_exc_metadata)) { 1499 DP_PEER_PER_PKT_STATS_INC_PKT(ta_txrx_peer, 1500 rx.intra_bss.fail, 1, 1501 len); 1502 tid_stats->fail_cnt[INTRABSS_DROP]++; 1503 qdf_nbuf_free(nbuf_copy); 1504 } else { 1505 DP_PEER_PER_PKT_STATS_INC_PKT(ta_txrx_peer, 1506 rx.intra_bss.pkts, 1, 1507 len); 1508 tid_stats->intrabss_cnt++; 1509 } 1510 return true; 1511 } 1512 return false; 1513 } 1514 1515 /* 1516 * dp_rx_intrabss_fwd_be() - API for intrabss fwd. For EAPOL 1517 * pkt with DA not equal to vdev mac addr, fwd is not allowed. 1518 * @soc: core txrx main context 1519 * @ta_peer: source peer entry 1520 * @rx_tlv_hdr: start address of rx tlvs 1521 * @nbuf: nbuf that has to be intrabss forwarded 1522 * @msdu_metadata: msdu metadata 1523 * 1524 * Return: true if it is forwarded else false 1525 */ 1526 bool dp_rx_intrabss_fwd_be(struct dp_soc *soc, struct dp_txrx_peer *ta_peer, 1527 uint8_t *rx_tlv_hdr, qdf_nbuf_t nbuf, 1528 struct hal_rx_msdu_metadata msdu_metadata) 1529 { 1530 uint8_t tid = qdf_nbuf_get_tid_val(nbuf); 1531 uint8_t ring_id = QDF_NBUF_CB_RX_CTX_ID(nbuf); 1532 struct cdp_tid_rx_stats *tid_stats = &ta_peer->vdev->pdev->stats. 1533 tid_stats.tid_rx_stats[ring_id][tid]; 1534 bool ret = false; 1535 struct dp_be_intrabss_params params; 1536 1537 /* if it is a broadcast pkt (eg: ARP) and it is not its own 1538 * source, then clone the pkt and send the cloned pkt for 1539 * intra BSS forwarding and original pkt up the network stack 1540 * Note: how do we handle multicast pkts. do we forward 1541 * all multicast pkts as is or let a higher layer module 1542 * like igmpsnoop decide whether to forward or not with 1543 * Mcast enhancement. 1544 */ 1545 if (qdf_nbuf_is_da_mcbc(nbuf) && !ta_peer->bss_peer) { 1546 return dp_rx_intrabss_mcbc_fwd(soc, ta_peer, rx_tlv_hdr, 1547 nbuf, tid_stats); 1548 } 1549 1550 if (dp_rx_intrabss_eapol_drop_check(soc, ta_peer, rx_tlv_hdr, 1551 nbuf)) 1552 return true; 1553 1554 params.dest_soc = soc; 1555 if (dp_rx_intrabss_ucast_check_be(nbuf, ta_peer, 1556 &msdu_metadata, ¶ms)) { 1557 ret = dp_rx_intrabss_ucast_fwd(params.dest_soc, ta_peer, 1558 params.tx_vdev_id, 1559 rx_tlv_hdr, nbuf, tid_stats); 1560 } 1561 1562 return ret; 1563 } 1564 #endif 1565