1 /* 2 * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved. 3 * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. 4 * 5 * Permission to use, copy, modify, and/or distribute this software for 6 * any purpose with or without fee is hereby granted, provided that the 7 * above copyright notice and this permission notice appear in all 8 * copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 11 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 12 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 13 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 14 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 15 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 16 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 17 * PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20 #include "cdp_txrx_cmn_struct.h" 21 #include "hal_hw_headers.h" 22 #include "dp_types.h" 23 #include "dp_rx.h" 24 #include "dp_tx.h" 25 #include "dp_be_rx.h" 26 #include "dp_peer.h" 27 #include "hal_rx.h" 28 #include "hal_be_rx.h" 29 #include "hal_api.h" 30 #include "hal_be_api.h" 31 #include "qdf_nbuf.h" 32 #include "hal_be_rx_tlv.h" 33 #ifdef MESH_MODE_SUPPORT 34 #include "if_meta_hdr.h" 35 #endif 36 #include "dp_internal.h" 37 #include "dp_ipa.h" 38 #ifdef FEATURE_WDS 39 #include "dp_txrx_wds.h" 40 #endif 41 #include "dp_hist.h" 42 #include "dp_rx_buffer_pool.h" 43 44 #ifndef AST_OFFLOAD_ENABLE 45 static void 46 dp_rx_wds_learn(struct dp_soc *soc, 47 struct dp_vdev *vdev, 48 uint8_t *rx_tlv_hdr, 49 struct dp_txrx_peer *txrx_peer, 50 qdf_nbuf_t nbuf, 51 struct hal_rx_msdu_metadata msdu_metadata) 52 { 53 /* WDS Source Port Learning */ 54 if (qdf_likely(vdev->wds_enabled)) 55 dp_rx_wds_srcport_learn(soc, 56 rx_tlv_hdr, 57 txrx_peer, 58 nbuf, 59 msdu_metadata); 60 } 61 #else 62 #ifdef QCA_SUPPORT_WDS_EXTENDED 63 /** 64 * dp_wds_ext_peer_learn_be() - function to send event to control 65 * path on receiving 1st 4-address frame from backhaul. 66 * @soc: DP soc 67 * @ta_txrx_peer: WDS repeater txrx peer 68 * @rx_tlv_hdr : start address of rx tlvs 69 * @nbuf: RX packet buffer 70 * 71 * Return: void 72 */ 73 static inline void dp_wds_ext_peer_learn_be(struct dp_soc *soc, 74 struct dp_txrx_peer *ta_txrx_peer, 75 uint8_t *rx_tlv_hdr, 76 qdf_nbuf_t nbuf) 77 { 78 uint8_t wds_ext_src_mac[QDF_MAC_ADDR_SIZE]; 79 struct dp_peer *ta_base_peer; 80 81 /* instead of checking addr4 is valid or not in per packet path 82 * check for init bit, which will be set on reception of 83 * first addr4 valid packet. 84 */ 85 if (!ta_txrx_peer->vdev->wds_ext_enabled || 86 qdf_atomic_test_bit(WDS_EXT_PEER_INIT_BIT, 87 &ta_txrx_peer->wds_ext.init)) 88 return; 89 90 if (qdf_nbuf_is_rx_chfrag_start(nbuf) && 91 hal_rx_get_mpdu_mac_ad4_valid_be(rx_tlv_hdr)) { 92 qdf_atomic_test_and_set_bit(WDS_EXT_PEER_INIT_BIT, 93 &ta_txrx_peer->wds_ext.init); 94 95 ta_base_peer = dp_peer_get_ref_by_id(soc, ta_txrx_peer->peer_id, 96 DP_MOD_ID_RX); 97 98 if (!ta_base_peer) 99 return; 100 101 qdf_mem_copy(wds_ext_src_mac, &ta_base_peer->mac_addr.raw[0], 102 QDF_MAC_ADDR_SIZE); 103 dp_peer_unref_delete(ta_base_peer, DP_MOD_ID_RX); 104 105 soc->cdp_soc.ol_ops->rx_wds_ext_peer_learn( 106 soc->ctrl_psoc, 107 ta_txrx_peer->peer_id, 108 ta_txrx_peer->vdev->vdev_id, 109 wds_ext_src_mac); 110 } 111 } 112 #else 113 static inline void dp_wds_ext_peer_learn_be(struct dp_soc *soc, 114 struct dp_txrx_peer *ta_txrx_peer, 115 uint8_t *rx_tlv_hdr, 116 qdf_nbuf_t nbuf) 117 { 118 } 119 #endif 120 static void 121 dp_rx_wds_learn(struct dp_soc *soc, 122 struct dp_vdev *vdev, 123 uint8_t *rx_tlv_hdr, 124 struct dp_txrx_peer *ta_txrx_peer, 125 qdf_nbuf_t nbuf, 126 struct hal_rx_msdu_metadata msdu_metadata) 127 { 128 dp_wds_ext_peer_learn_be(soc, ta_txrx_peer, rx_tlv_hdr, nbuf); 129 } 130 #endif 131 132 #if defined(DP_PKT_STATS_PER_LMAC) && defined(WLAN_FEATURE_11BE_MLO) 133 static inline void 134 dp_rx_set_msdu_lmac_id(qdf_nbuf_t nbuf, uint32_t peer_mdata) 135 { 136 uint8_t lmac_id; 137 138 lmac_id = dp_rx_peer_metadata_lmac_id_get_be(peer_mdata); 139 qdf_nbuf_set_lmac_id(nbuf, lmac_id); 140 } 141 #else 142 static inline void 143 dp_rx_set_msdu_lmac_id(qdf_nbuf_t nbuf, uint32_t peer_mdata) 144 { 145 } 146 #endif 147 148 /** 149 * dp_rx_process_be() - Brain of the Rx processing functionality 150 * Called from the bottom half (tasklet/NET_RX_SOFTIRQ) 151 * @int_ctx: per interrupt context 152 * @hal_ring: opaque pointer to the HAL Rx Ring, which will be serviced 153 * @reo_ring_num: ring number (0, 1, 2 or 3) of the reo ring. 154 * @quota: No. of units (packets) that can be serviced in one shot. 155 * 156 * This function implements the core of Rx functionality. This is 157 * expected to handle only non-error frames. 158 * 159 * Return: uint32_t: No. of elements processed 160 */ 161 uint32_t dp_rx_process_be(struct dp_intr *int_ctx, 162 hal_ring_handle_t hal_ring_hdl, uint8_t reo_ring_num, 163 uint32_t quota) 164 { 165 hal_ring_desc_t ring_desc; 166 hal_ring_desc_t last_prefetched_hw_desc; 167 hal_soc_handle_t hal_soc; 168 struct dp_rx_desc *rx_desc = NULL; 169 struct dp_rx_desc *last_prefetched_sw_desc = NULL; 170 qdf_nbuf_t nbuf, next; 171 bool near_full; 172 union dp_rx_desc_list_elem_t *head[WLAN_MAX_MLO_CHIPS][MAX_PDEV_CNT]; 173 union dp_rx_desc_list_elem_t *tail[WLAN_MAX_MLO_CHIPS][MAX_PDEV_CNT]; 174 uint32_t num_pending = 0; 175 uint32_t rx_bufs_used = 0, rx_buf_cookie; 176 uint16_t msdu_len = 0; 177 uint16_t peer_id; 178 uint8_t vdev_id; 179 struct dp_txrx_peer *txrx_peer; 180 dp_txrx_ref_handle txrx_ref_handle = NULL; 181 struct dp_vdev *vdev; 182 uint32_t pkt_len = 0; 183 struct hal_rx_mpdu_desc_info mpdu_desc_info; 184 struct hal_rx_msdu_desc_info msdu_desc_info; 185 enum hal_reo_error_status error; 186 uint32_t peer_mdata; 187 uint8_t *rx_tlv_hdr; 188 uint32_t rx_bufs_reaped[WLAN_MAX_MLO_CHIPS][MAX_PDEV_CNT]; 189 uint8_t mac_id = 0; 190 struct dp_pdev *rx_pdev; 191 bool enh_flag; 192 struct dp_srng *dp_rxdma_srng; 193 struct rx_desc_pool *rx_desc_pool; 194 struct dp_soc *soc = int_ctx->soc; 195 struct cdp_tid_rx_stats *tid_stats; 196 qdf_nbuf_t nbuf_head; 197 qdf_nbuf_t nbuf_tail; 198 qdf_nbuf_t deliver_list_head; 199 qdf_nbuf_t deliver_list_tail; 200 uint32_t num_rx_bufs_reaped = 0; 201 uint32_t intr_id; 202 struct hif_opaque_softc *scn; 203 int32_t tid = 0; 204 bool is_prev_msdu_last = true; 205 uint32_t num_entries_avail = 0; 206 uint32_t rx_ol_pkt_cnt = 0; 207 uint32_t num_entries = 0; 208 struct hal_rx_msdu_metadata msdu_metadata; 209 QDF_STATUS status; 210 qdf_nbuf_t ebuf_head; 211 qdf_nbuf_t ebuf_tail; 212 uint8_t pkt_capture_offload = 0; 213 struct dp_srng *rx_ring = &soc->reo_dest_ring[reo_ring_num]; 214 int max_reap_limit, ring_near_full; 215 struct dp_soc *replenish_soc; 216 uint8_t chip_id; 217 uint64_t current_time = 0; 218 uint32_t old_tid; 219 uint32_t peer_ext_stats; 220 uint32_t dsf; 221 222 DP_HIST_INIT(); 223 224 qdf_assert_always(soc && hal_ring_hdl); 225 hal_soc = soc->hal_soc; 226 qdf_assert_always(hal_soc); 227 228 scn = soc->hif_handle; 229 intr_id = int_ctx->dp_intr_id; 230 num_entries = hal_srng_get_num_entries(hal_soc, hal_ring_hdl); 231 dp_runtime_pm_mark_last_busy(soc); 232 233 more_data: 234 /* reset local variables here to be re-used in the function */ 235 nbuf_head = NULL; 236 nbuf_tail = NULL; 237 deliver_list_head = NULL; 238 deliver_list_tail = NULL; 239 txrx_peer = NULL; 240 vdev = NULL; 241 num_rx_bufs_reaped = 0; 242 ebuf_head = NULL; 243 ebuf_tail = NULL; 244 ring_near_full = 0; 245 max_reap_limit = dp_rx_get_loop_pkt_limit(soc); 246 247 qdf_mem_zero(rx_bufs_reaped, sizeof(rx_bufs_reaped)); 248 qdf_mem_zero(&mpdu_desc_info, sizeof(mpdu_desc_info)); 249 qdf_mem_zero(&msdu_desc_info, sizeof(msdu_desc_info)); 250 qdf_mem_zero(head, sizeof(head)); 251 qdf_mem_zero(tail, sizeof(tail)); 252 old_tid = 0xff; 253 dsf = 0; 254 peer_ext_stats = 0; 255 rx_pdev = NULL; 256 tid_stats = NULL; 257 258 dp_pkt_get_timestamp(¤t_time); 259 260 ring_near_full = _dp_srng_test_and_update_nf_params(soc, rx_ring, 261 &max_reap_limit); 262 263 peer_ext_stats = wlan_cfg_is_peer_ext_stats_enabled(soc->wlan_cfg_ctx); 264 if (qdf_unlikely(dp_rx_srng_access_start(int_ctx, soc, hal_ring_hdl))) { 265 /* 266 * Need API to convert from hal_ring pointer to 267 * Ring Type / Ring Id combo 268 */ 269 DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1); 270 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 271 FL("HAL RING Access Failed -- %pK"), hal_ring_hdl); 272 goto done; 273 } 274 275 hal_srng_update_ring_usage_wm_no_lock(soc->hal_soc, hal_ring_hdl); 276 277 if (!num_pending) 278 num_pending = hal_srng_dst_num_valid(hal_soc, hal_ring_hdl, 0); 279 280 if (num_pending > quota) 281 num_pending = quota; 282 283 dp_srng_dst_inv_cached_descs(soc, hal_ring_hdl, num_pending); 284 last_prefetched_hw_desc = dp_srng_dst_prefetch_32_byte_desc(hal_soc, 285 hal_ring_hdl, 286 num_pending); 287 /* 288 * start reaping the buffers from reo ring and queue 289 * them in per vdev queue. 290 * Process the received pkts in a different per vdev loop. 291 */ 292 while (qdf_likely(num_pending)) { 293 ring_desc = dp_srng_dst_get_next(soc, hal_ring_hdl); 294 295 if (qdf_unlikely(!ring_desc)) 296 break; 297 298 error = HAL_RX_ERROR_STATUS_GET(ring_desc); 299 300 if (qdf_unlikely(error == HAL_REO_ERROR_DETECTED)) { 301 dp_rx_err("%pK: HAL RING 0x%pK:error %d", 302 soc, hal_ring_hdl, error); 303 DP_STATS_INC(soc, rx.err.hal_reo_error[reo_ring_num], 304 1); 305 /* Don't know how to deal with this -- assert */ 306 qdf_assert(0); 307 } 308 309 dp_rx_ring_record_entry(soc, reo_ring_num, ring_desc); 310 rx_buf_cookie = HAL_RX_REO_BUF_COOKIE_GET(ring_desc); 311 status = dp_rx_cookie_check_and_invalidate(ring_desc); 312 if (qdf_unlikely(QDF_IS_STATUS_ERROR(status))) { 313 DP_STATS_INC(soc, rx.err.stale_cookie, 1); 314 break; 315 } 316 317 rx_desc = (struct dp_rx_desc *) 318 hal_rx_get_reo_desc_va(ring_desc); 319 dp_rx_desc_sw_cc_check(soc, rx_buf_cookie, &rx_desc); 320 321 status = dp_rx_desc_sanity(soc, hal_soc, hal_ring_hdl, 322 ring_desc, rx_desc); 323 if (QDF_IS_STATUS_ERROR(status)) { 324 if (qdf_unlikely(rx_desc && rx_desc->nbuf)) { 325 qdf_assert_always(!rx_desc->unmapped); 326 dp_rx_nbuf_unmap(soc, rx_desc, reo_ring_num); 327 rx_desc->unmapped = 1; 328 dp_rx_buffer_pool_nbuf_free(soc, rx_desc->nbuf, 329 rx_desc->pool_id); 330 dp_rx_add_to_free_desc_list( 331 &head[rx_desc->chip_id][rx_desc->pool_id], 332 &tail[rx_desc->chip_id][rx_desc->pool_id], 333 rx_desc); 334 } 335 hal_srng_dst_get_next(hal_soc, hal_ring_hdl); 336 continue; 337 } 338 339 /* 340 * this is a unlikely scenario where the host is reaping 341 * a descriptor which it already reaped just a while ago 342 * but is yet to replenish it back to HW. 343 * In this case host will dump the last 128 descriptors 344 * including the software descriptor rx_desc and assert. 345 */ 346 347 if (qdf_unlikely(!rx_desc->in_use)) { 348 DP_STATS_INC(soc, rx.err.hal_reo_dest_dup, 1); 349 dp_info_rl("Reaping rx_desc not in use!"); 350 dp_rx_dump_info_and_assert(soc, hal_ring_hdl, 351 ring_desc, rx_desc); 352 /* ignore duplicate RX desc and continue to process */ 353 /* Pop out the descriptor */ 354 hal_srng_dst_get_next(hal_soc, hal_ring_hdl); 355 continue; 356 } 357 358 status = dp_rx_desc_nbuf_sanity_check(soc, ring_desc, rx_desc); 359 if (qdf_unlikely(QDF_IS_STATUS_ERROR(status))) { 360 DP_STATS_INC(soc, rx.err.nbuf_sanity_fail, 1); 361 dp_info_rl("Nbuf sanity check failure!"); 362 dp_rx_dump_info_and_assert(soc, hal_ring_hdl, 363 ring_desc, rx_desc); 364 rx_desc->in_err_state = 1; 365 hal_srng_dst_get_next(hal_soc, hal_ring_hdl); 366 continue; 367 } 368 369 if (qdf_unlikely(!dp_rx_desc_check_magic(rx_desc))) { 370 dp_err("Invalid rx_desc cookie=%d", rx_buf_cookie); 371 DP_STATS_INC(soc, rx.err.rx_desc_invalid_magic, 1); 372 dp_rx_dump_info_and_assert(soc, hal_ring_hdl, 373 ring_desc, rx_desc); 374 } 375 376 /* Get MPDU DESC info */ 377 hal_rx_mpdu_desc_info_get_be(ring_desc, &mpdu_desc_info); 378 379 /* Get MSDU DESC info */ 380 hal_rx_msdu_desc_info_get_be(ring_desc, &msdu_desc_info); 381 382 /* Set the end bit to identify the last buffer in MPDU */ 383 if (msdu_desc_info.msdu_flags & HAL_MSDU_F_LAST_MSDU_IN_MPDU) 384 qdf_nbuf_set_rx_chfrag_end(rx_desc->nbuf, 1); 385 386 if (qdf_unlikely(msdu_desc_info.msdu_flags & 387 HAL_MSDU_F_MSDU_CONTINUATION)) { 388 /* In dp_rx_sg_create() until the last buffer, 389 * end bit should not be set. As continuation bit set, 390 * this is not a last buffer. 391 */ 392 qdf_nbuf_set_rx_chfrag_end(rx_desc->nbuf, 0); 393 394 /* previous msdu has end bit set, so current one is 395 * the new MPDU 396 */ 397 if (is_prev_msdu_last) { 398 /* Get number of entries available in HW ring */ 399 num_entries_avail = 400 hal_srng_dst_num_valid(hal_soc, 401 hal_ring_hdl, 1); 402 403 /* For new MPDU check if we can read complete 404 * MPDU by comparing the number of buffers 405 * available and number of buffers needed to 406 * reap this MPDU 407 */ 408 if ((msdu_desc_info.msdu_len / 409 (RX_DATA_BUFFER_SIZE - 410 soc->rx_pkt_tlv_size) + 1) > 411 num_pending) { 412 DP_STATS_INC(soc, 413 rx.msdu_scatter_wait_break, 414 1); 415 dp_rx_cookie_reset_invalid_bit( 416 ring_desc); 417 /* As we are going to break out of the 418 * loop because of unavailability of 419 * descs to form complete SG, we need to 420 * reset the TP in the REO destination 421 * ring. 422 */ 423 hal_srng_dst_dec_tp(hal_soc, 424 hal_ring_hdl); 425 break; 426 } 427 is_prev_msdu_last = false; 428 } 429 } 430 431 if (mpdu_desc_info.mpdu_flags & HAL_MPDU_F_RETRY_BIT) 432 qdf_nbuf_set_rx_retry_flag(rx_desc->nbuf, 1); 433 434 if (qdf_unlikely(mpdu_desc_info.mpdu_flags & 435 HAL_MPDU_F_RAW_AMPDU)) 436 qdf_nbuf_set_raw_frame(rx_desc->nbuf, 1); 437 438 if (!is_prev_msdu_last && 439 !(msdu_desc_info.msdu_flags & HAL_MSDU_F_MSDU_CONTINUATION)) 440 is_prev_msdu_last = true; 441 442 rx_bufs_reaped[rx_desc->chip_id][rx_desc->pool_id]++; 443 444 peer_mdata = mpdu_desc_info.peer_meta_data; 445 QDF_NBUF_CB_RX_PEER_ID(rx_desc->nbuf) = 446 dp_rx_peer_metadata_peer_id_get_be(soc, peer_mdata); 447 QDF_NBUF_CB_RX_VDEV_ID(rx_desc->nbuf) = 448 dp_rx_peer_metadata_vdev_id_get_be(soc, peer_mdata); 449 dp_rx_set_msdu_lmac_id(rx_desc->nbuf, peer_mdata); 450 451 /* to indicate whether this msdu is rx offload */ 452 pkt_capture_offload = 453 DP_PEER_METADATA_OFFLOAD_GET_BE(peer_mdata); 454 455 /* 456 * save msdu flags first, last and continuation msdu in 457 * nbuf->cb, also save mcbc, is_da_valid, is_sa_valid and 458 * length to nbuf->cb. This ensures the info required for 459 * per pkt processing is always in the same cache line. 460 * This helps in improving throughput for smaller pkt 461 * sizes. 462 */ 463 if (msdu_desc_info.msdu_flags & HAL_MSDU_F_FIRST_MSDU_IN_MPDU) 464 qdf_nbuf_set_rx_chfrag_start(rx_desc->nbuf, 1); 465 466 if (msdu_desc_info.msdu_flags & HAL_MSDU_F_MSDU_CONTINUATION) 467 qdf_nbuf_set_rx_chfrag_cont(rx_desc->nbuf, 1); 468 469 if (msdu_desc_info.msdu_flags & HAL_MSDU_F_DA_IS_MCBC) 470 qdf_nbuf_set_da_mcbc(rx_desc->nbuf, 1); 471 472 if (msdu_desc_info.msdu_flags & HAL_MSDU_F_DA_IS_VALID) 473 qdf_nbuf_set_da_valid(rx_desc->nbuf, 1); 474 475 if (msdu_desc_info.msdu_flags & HAL_MSDU_F_SA_IS_VALID) 476 qdf_nbuf_set_sa_valid(rx_desc->nbuf, 1); 477 478 if (msdu_desc_info.msdu_flags & HAL_MSDU_F_INTRA_BSS) 479 qdf_nbuf_set_intra_bss(rx_desc->nbuf, 1); 480 481 if (qdf_likely(mpdu_desc_info.mpdu_flags & 482 HAL_MPDU_F_QOS_CONTROL_VALID)) 483 qdf_nbuf_set_tid_val(rx_desc->nbuf, mpdu_desc_info.tid); 484 485 /* set sw exception */ 486 qdf_nbuf_set_rx_reo_dest_ind_or_sw_excpt( 487 rx_desc->nbuf, 488 hal_rx_sw_exception_get_be(ring_desc)); 489 490 QDF_NBUF_CB_RX_PKT_LEN(rx_desc->nbuf) = msdu_desc_info.msdu_len; 491 492 QDF_NBUF_CB_RX_CTX_ID(rx_desc->nbuf) = reo_ring_num; 493 494 /* 495 * move unmap after scattered msdu waiting break logic 496 * in case double skb unmap happened. 497 */ 498 dp_rx_nbuf_unmap(soc, rx_desc, reo_ring_num); 499 rx_desc->unmapped = 1; 500 DP_RX_PROCESS_NBUF(soc, nbuf_head, nbuf_tail, ebuf_head, 501 ebuf_tail, rx_desc); 502 503 quota -= 1; 504 num_pending -= 1; 505 506 dp_rx_add_to_free_desc_list 507 (&head[rx_desc->chip_id][rx_desc->pool_id], 508 &tail[rx_desc->chip_id][rx_desc->pool_id], rx_desc); 509 num_rx_bufs_reaped++; 510 511 dp_rx_prefetch_hw_sw_nbuf_32_byte_desc(soc, hal_soc, 512 num_pending, 513 hal_ring_hdl, 514 &last_prefetched_hw_desc, 515 &last_prefetched_sw_desc); 516 517 /* 518 * only if complete msdu is received for scatter case, 519 * then allow break. 520 */ 521 if (is_prev_msdu_last && 522 dp_rx_reap_loop_pkt_limit_hit(soc, num_rx_bufs_reaped, 523 max_reap_limit)) 524 break; 525 } 526 done: 527 dp_rx_srng_access_end(int_ctx, soc, hal_ring_hdl); 528 qdf_dsb(); 529 530 dp_rx_per_core_stats_update(soc, reo_ring_num, num_rx_bufs_reaped); 531 532 for (chip_id = 0; chip_id < WLAN_MAX_MLO_CHIPS; chip_id++) { 533 for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) { 534 /* 535 * continue with next mac_id if no pkts were reaped 536 * from that pool 537 */ 538 if (!rx_bufs_reaped[chip_id][mac_id]) 539 continue; 540 541 replenish_soc = dp_rx_replensih_soc_get(soc, chip_id); 542 543 dp_rxdma_srng = 544 &replenish_soc->rx_refill_buf_ring[mac_id]; 545 546 rx_desc_pool = &replenish_soc->rx_desc_buf[mac_id]; 547 548 dp_rx_buffers_replenish_simple(replenish_soc, mac_id, 549 dp_rxdma_srng, 550 rx_desc_pool, 551 rx_bufs_reaped[chip_id][mac_id], 552 &head[chip_id][mac_id], 553 &tail[chip_id][mac_id]); 554 } 555 } 556 557 /* Peer can be NULL is case of LFR */ 558 if (qdf_likely(txrx_peer)) 559 vdev = NULL; 560 561 /* 562 * BIG loop where each nbuf is dequeued from global queue, 563 * processed and queued back on a per vdev basis. These nbufs 564 * are sent to stack as and when we run out of nbufs 565 * or a new nbuf dequeued from global queue has a different 566 * vdev when compared to previous nbuf. 567 */ 568 nbuf = nbuf_head; 569 while (nbuf) { 570 next = nbuf->next; 571 dp_rx_prefetch_nbuf_data_be(nbuf, next); 572 if (qdf_unlikely(dp_rx_is_raw_frame_dropped(nbuf))) { 573 nbuf = next; 574 DP_STATS_INC(soc, rx.err.raw_frm_drop, 1); 575 continue; 576 } 577 578 rx_tlv_hdr = qdf_nbuf_data(nbuf); 579 vdev_id = QDF_NBUF_CB_RX_VDEV_ID(nbuf); 580 peer_id = QDF_NBUF_CB_RX_PEER_ID(nbuf); 581 582 if (dp_rx_is_list_ready(deliver_list_head, vdev, txrx_peer, 583 peer_id, vdev_id)) { 584 dp_rx_deliver_to_stack(soc, vdev, txrx_peer, 585 deliver_list_head, 586 deliver_list_tail); 587 deliver_list_head = NULL; 588 deliver_list_tail = NULL; 589 } 590 591 /* Get TID from struct cb->tid_val, save to tid */ 592 tid = qdf_nbuf_get_tid_val(nbuf); 593 if (qdf_unlikely(tid >= CDP_MAX_DATA_TIDS)) { 594 DP_STATS_INC(soc, rx.err.rx_invalid_tid_err, 1); 595 dp_rx_nbuf_free(nbuf); 596 nbuf = next; 597 continue; 598 } 599 600 if (qdf_unlikely(!txrx_peer)) { 601 txrx_peer = dp_rx_get_txrx_peer_and_vdev(soc, nbuf, 602 peer_id, 603 &txrx_ref_handle, 604 pkt_capture_offload, 605 &vdev, 606 &rx_pdev, &dsf, 607 &old_tid); 608 if (qdf_unlikely(!txrx_peer) || qdf_unlikely(!vdev)) { 609 nbuf = next; 610 continue; 611 } 612 enh_flag = rx_pdev->enhanced_stats_en; 613 } else if (txrx_peer && txrx_peer->peer_id != peer_id) { 614 dp_txrx_peer_unref_delete(txrx_ref_handle, 615 DP_MOD_ID_RX); 616 617 txrx_peer = dp_rx_get_txrx_peer_and_vdev(soc, nbuf, 618 peer_id, 619 &txrx_ref_handle, 620 pkt_capture_offload, 621 &vdev, 622 &rx_pdev, &dsf, 623 &old_tid); 624 if (qdf_unlikely(!txrx_peer) || qdf_unlikely(!vdev)) { 625 nbuf = next; 626 continue; 627 } 628 enh_flag = rx_pdev->enhanced_stats_en; 629 } 630 631 if (txrx_peer) { 632 QDF_NBUF_CB_DP_TRACE_PRINT(nbuf) = false; 633 qdf_dp_trace_set_track(nbuf, QDF_RX); 634 QDF_NBUF_CB_RX_DP_TRACE(nbuf) = 1; 635 QDF_NBUF_CB_RX_PACKET_TRACK(nbuf) = 636 QDF_NBUF_RX_PKT_DATA_TRACK; 637 } 638 639 rx_bufs_used++; 640 641 /* when hlos tid override is enabled, save tid in 642 * skb->priority 643 */ 644 if (qdf_unlikely(vdev->skip_sw_tid_classification & 645 DP_TXRX_HLOS_TID_OVERRIDE_ENABLED)) 646 qdf_nbuf_set_priority(nbuf, tid); 647 648 DP_RX_TID_SAVE(nbuf, tid); 649 if (qdf_unlikely(dsf) || qdf_unlikely(peer_ext_stats) || 650 dp_rx_pkt_tracepoints_enabled()) 651 qdf_nbuf_set_timestamp(nbuf); 652 653 if (qdf_likely(old_tid != tid)) { 654 tid_stats = 655 &rx_pdev->stats.tid_stats.tid_rx_stats[reo_ring_num][tid]; 656 old_tid = tid; 657 } 658 659 /* 660 * Check if DMA completed -- msdu_done is the last bit 661 * to be written 662 */ 663 if (qdf_unlikely(!qdf_nbuf_is_rx_chfrag_cont(nbuf) && 664 !hal_rx_tlv_msdu_done_get_be(rx_tlv_hdr))) { 665 dp_err("MSDU DONE failure"); 666 DP_STATS_INC(soc, rx.err.msdu_done_fail, 1); 667 hal_rx_dump_pkt_tlvs(hal_soc, rx_tlv_hdr, 668 QDF_TRACE_LEVEL_INFO); 669 tid_stats->fail_cnt[MSDU_DONE_FAILURE]++; 670 dp_rx_nbuf_free(nbuf); 671 qdf_assert(0); 672 nbuf = next; 673 continue; 674 } 675 676 DP_HIST_PACKET_COUNT_INC(vdev->pdev->pdev_id); 677 /* 678 * First IF condition: 679 * 802.11 Fragmented pkts are reinjected to REO 680 * HW block as SG pkts and for these pkts we only 681 * need to pull the RX TLVS header length. 682 * Second IF condition: 683 * The below condition happens when an MSDU is spread 684 * across multiple buffers. This can happen in two cases 685 * 1. The nbuf size is smaller then the received msdu. 686 * ex: we have set the nbuf size to 2048 during 687 * nbuf_alloc. but we received an msdu which is 688 * 2304 bytes in size then this msdu is spread 689 * across 2 nbufs. 690 * 691 * 2. AMSDUs when RAW mode is enabled. 692 * ex: 1st MSDU is in 1st nbuf and 2nd MSDU is spread 693 * across 1st nbuf and 2nd nbuf and last MSDU is 694 * spread across 2nd nbuf and 3rd nbuf. 695 * 696 * for these scenarios let us create a skb frag_list and 697 * append these buffers till the last MSDU of the AMSDU 698 * Third condition: 699 * This is the most likely case, we receive 802.3 pkts 700 * decapsulated by HW, here we need to set the pkt length. 701 */ 702 hal_rx_msdu_packet_metadata_get_generic_be(rx_tlv_hdr, 703 &msdu_metadata); 704 if (qdf_unlikely(qdf_nbuf_is_frag(nbuf))) { 705 bool is_mcbc, is_sa_vld, is_da_vld; 706 707 is_mcbc = hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc, 708 rx_tlv_hdr); 709 is_sa_vld = 710 hal_rx_msdu_end_sa_is_valid_get(soc->hal_soc, 711 rx_tlv_hdr); 712 is_da_vld = 713 hal_rx_msdu_end_da_is_valid_get(soc->hal_soc, 714 rx_tlv_hdr); 715 716 qdf_nbuf_set_da_mcbc(nbuf, is_mcbc); 717 qdf_nbuf_set_da_valid(nbuf, is_da_vld); 718 qdf_nbuf_set_sa_valid(nbuf, is_sa_vld); 719 720 qdf_nbuf_pull_head(nbuf, soc->rx_pkt_tlv_size); 721 } else if (qdf_nbuf_is_rx_chfrag_cont(nbuf)) { 722 msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf); 723 nbuf = dp_rx_sg_create(soc, nbuf); 724 next = nbuf->next; 725 726 if (qdf_nbuf_is_raw_frame(nbuf)) { 727 DP_STATS_INC(vdev->pdev, rx_raw_pkts, 1); 728 DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, 729 rx.raw, 1, 730 msdu_len); 731 } else { 732 dp_rx_nbuf_free(nbuf); 733 DP_STATS_INC(soc, rx.err.scatter_msdu, 1); 734 dp_info_rl("scatter msdu len %d, dropped", 735 msdu_len); 736 nbuf = next; 737 continue; 738 } 739 } else { 740 msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf); 741 pkt_len = msdu_len + 742 msdu_metadata.l3_hdr_pad + 743 soc->rx_pkt_tlv_size; 744 745 qdf_nbuf_set_pktlen(nbuf, pkt_len); 746 dp_rx_skip_tlvs(soc, nbuf, msdu_metadata.l3_hdr_pad); 747 } 748 749 dp_rx_send_pktlog(soc, rx_pdev, nbuf, QDF_TX_RX_STATUS_OK); 750 751 if (!dp_wds_rx_policy_check(rx_tlv_hdr, vdev, txrx_peer)) { 752 dp_rx_err("%pK: Policy Check Drop pkt", soc); 753 DP_PEER_PER_PKT_STATS_INC(txrx_peer, 754 rx.policy_check_drop, 1); 755 tid_stats->fail_cnt[POLICY_CHECK_DROP]++; 756 /* Drop & free packet */ 757 dp_rx_nbuf_free(nbuf); 758 /* Statistics */ 759 nbuf = next; 760 continue; 761 } 762 763 /* 764 * Drop non-EAPOL frames from unauthorized peer. 765 */ 766 if (qdf_likely(txrx_peer) && 767 qdf_unlikely(!txrx_peer->authorize) && 768 !qdf_nbuf_is_raw_frame(nbuf)) { 769 bool is_eapol = qdf_nbuf_is_ipv4_eapol_pkt(nbuf) || 770 qdf_nbuf_is_ipv4_wapi_pkt(nbuf); 771 772 if (!is_eapol) { 773 DP_PEER_PER_PKT_STATS_INC(txrx_peer, 774 rx.peer_unauth_rx_pkt_drop, 775 1); 776 dp_rx_nbuf_free(nbuf); 777 nbuf = next; 778 continue; 779 } 780 } 781 782 dp_rx_cksum_offload(vdev->pdev, nbuf, rx_tlv_hdr); 783 784 if (qdf_unlikely(!rx_pdev->rx_fast_flag)) { 785 /* 786 * process frame for mulitpass phrase processing 787 */ 788 if (qdf_unlikely(vdev->multipass_en)) { 789 if (dp_rx_multipass_process(txrx_peer, nbuf, 790 tid) == false) { 791 DP_PEER_PER_PKT_STATS_INC 792 (txrx_peer, 793 rx.multipass_rx_pkt_drop, 1); 794 dp_rx_nbuf_free(nbuf); 795 nbuf = next; 796 continue; 797 } 798 } 799 if (qdf_unlikely(txrx_peer && 800 (txrx_peer->nawds_enabled) && 801 (qdf_nbuf_is_da_mcbc(nbuf)) && 802 (hal_rx_get_mpdu_mac_ad4_valid_be 803 (rx_tlv_hdr) == false))) { 804 tid_stats->fail_cnt[NAWDS_MCAST_DROP]++; 805 DP_PEER_PER_PKT_STATS_INC(txrx_peer, 806 rx.nawds_mcast_drop, 807 1); 808 dp_rx_nbuf_free(nbuf); 809 nbuf = next; 810 continue; 811 } 812 813 /* Update the protocol tag in SKB based on CCE metadata 814 */ 815 dp_rx_update_protocol_tag(soc, vdev, nbuf, rx_tlv_hdr, 816 reo_ring_num, false, true); 817 818 /* Update the flow tag in SKB based on FSE metadata */ 819 dp_rx_update_flow_tag(soc, vdev, nbuf, rx_tlv_hdr, 820 true); 821 822 if (qdf_likely(vdev->rx_decap_type == 823 htt_cmn_pkt_type_ethernet) && 824 qdf_likely(!vdev->mesh_vdev)) { 825 dp_rx_wds_learn(soc, vdev, 826 rx_tlv_hdr, 827 txrx_peer, 828 nbuf, 829 msdu_metadata); 830 } 831 832 if (qdf_unlikely(vdev->mesh_vdev)) { 833 if (dp_rx_filter_mesh_packets(vdev, nbuf, 834 rx_tlv_hdr) 835 == QDF_STATUS_SUCCESS) { 836 dp_rx_info("%pK: mesh pkt filtered", 837 soc); 838 tid_stats->fail_cnt[MESH_FILTER_DROP]++; 839 DP_STATS_INC(vdev->pdev, 840 dropped.mesh_filter, 1); 841 842 dp_rx_nbuf_free(nbuf); 843 nbuf = next; 844 continue; 845 } 846 dp_rx_fill_mesh_stats(vdev, nbuf, rx_tlv_hdr, 847 txrx_peer); 848 } 849 } 850 851 dp_rx_msdu_stats_update(soc, nbuf, rx_tlv_hdr, txrx_peer, 852 reo_ring_num, tid_stats); 853 854 if (qdf_likely(vdev->rx_decap_type == 855 htt_cmn_pkt_type_ethernet) && 856 qdf_likely(!vdev->mesh_vdev)) { 857 /* Intrabss-fwd */ 858 if (dp_rx_check_ap_bridge(vdev)) 859 if (dp_rx_intrabss_fwd_be(soc, txrx_peer, 860 rx_tlv_hdr, 861 nbuf, 862 msdu_metadata)) { 863 nbuf = next; 864 tid_stats->intrabss_cnt++; 865 continue; /* Get next desc */ 866 } 867 } 868 869 dp_rx_fill_gro_info(soc, rx_tlv_hdr, nbuf, &rx_ol_pkt_cnt); 870 871 dp_rx_mark_first_packet_after_wow_wakeup(vdev->pdev, rx_tlv_hdr, 872 nbuf); 873 874 dp_rx_update_stats(soc, nbuf); 875 876 dp_pkt_add_timestamp(txrx_peer->vdev, QDF_PKT_RX_DRIVER_ENTRY, 877 current_time, nbuf); 878 879 DP_RX_LIST_APPEND(deliver_list_head, 880 deliver_list_tail, 881 nbuf); 882 883 DP_PEER_TO_STACK_INCC_PKT(txrx_peer, 1, 884 QDF_NBUF_CB_RX_PKT_LEN(nbuf), 885 enh_flag); 886 if (qdf_unlikely(txrx_peer->in_twt)) 887 DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, 888 rx.to_stack_twt, 1, 889 QDF_NBUF_CB_RX_PKT_LEN(nbuf)); 890 891 tid_stats->delivered_to_stack++; 892 nbuf = next; 893 } 894 895 DP_RX_DELIVER_TO_STACK(soc, vdev, txrx_peer, peer_id, 896 pkt_capture_offload, 897 deliver_list_head, 898 deliver_list_tail); 899 900 if (qdf_likely(txrx_peer)) 901 dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_RX); 902 903 /* 904 * If we are processing in near-full condition, there are 3 scenario 905 * 1) Ring entries has reached critical state 906 * 2) Ring entries are still near high threshold 907 * 3) Ring entries are below the safe level 908 * 909 * One more loop will move the state to normal processing and yield 910 */ 911 if (ring_near_full && quota) 912 goto more_data; 913 914 if (dp_rx_enable_eol_data_check(soc) && rx_bufs_used) { 915 if (quota) { 916 num_pending = 917 dp_rx_srng_get_num_pending(hal_soc, 918 hal_ring_hdl, 919 num_entries, 920 &near_full); 921 if (num_pending) { 922 DP_STATS_INC(soc, rx.hp_oos2, 1); 923 924 if (!hif_exec_should_yield(scn, intr_id)) 925 goto more_data; 926 927 if (qdf_unlikely(near_full)) { 928 DP_STATS_INC(soc, rx.near_full, 1); 929 goto more_data; 930 } 931 } 932 } 933 934 if (vdev && vdev->osif_fisa_flush) 935 vdev->osif_fisa_flush(soc, reo_ring_num); 936 937 if (vdev && vdev->osif_gro_flush && rx_ol_pkt_cnt) { 938 vdev->osif_gro_flush(vdev->osif_vdev, 939 reo_ring_num); 940 } 941 } 942 943 /* Update histogram statistics by looping through pdev's */ 944 DP_RX_HIST_STATS_PER_PDEV(); 945 946 return rx_bufs_used; /* Assume no scale factor for now */ 947 } 948 949 #ifdef RX_DESC_MULTI_PAGE_ALLOC 950 /** 951 * dp_rx_desc_pool_init_be_cc() - initial RX desc pool for cookie conversion 952 * @soc: Handle to DP Soc structure 953 * @rx_desc_pool: Rx descriptor pool handler 954 * @pool_id: Rx descriptor pool ID 955 * 956 * Return: QDF_STATUS_SUCCESS - succeeded, others - failed 957 */ 958 static QDF_STATUS 959 dp_rx_desc_pool_init_be_cc(struct dp_soc *soc, 960 struct rx_desc_pool *rx_desc_pool, 961 uint32_t pool_id) 962 { 963 struct dp_hw_cookie_conversion_t *cc_ctx; 964 struct dp_soc_be *be_soc; 965 union dp_rx_desc_list_elem_t *rx_desc_elem; 966 struct dp_spt_page_desc *page_desc; 967 uint32_t ppt_idx = 0; 968 uint32_t avail_entry_index = 0; 969 970 if (!rx_desc_pool->pool_size) { 971 dp_err("desc_num 0 !!"); 972 return QDF_STATUS_E_FAILURE; 973 } 974 975 be_soc = dp_get_be_soc_from_dp_soc(soc); 976 cc_ctx = &be_soc->rx_cc_ctx[pool_id]; 977 978 page_desc = &cc_ctx->page_desc_base[0]; 979 rx_desc_elem = rx_desc_pool->freelist; 980 while (rx_desc_elem) { 981 if (avail_entry_index == 0) { 982 if (ppt_idx >= cc_ctx->total_page_num) { 983 dp_alert("insufficient secondary page tables"); 984 qdf_assert_always(0); 985 } 986 page_desc = &cc_ctx->page_desc_base[ppt_idx++]; 987 } 988 989 /* put each RX Desc VA to SPT pages and 990 * get corresponding ID 991 */ 992 DP_CC_SPT_PAGE_UPDATE_VA(page_desc->page_v_addr, 993 avail_entry_index, 994 &rx_desc_elem->rx_desc); 995 rx_desc_elem->rx_desc.cookie = 996 dp_cc_desc_id_generate(page_desc->ppt_index, 997 avail_entry_index); 998 rx_desc_elem->rx_desc.chip_id = dp_mlo_get_chip_id(soc); 999 rx_desc_elem->rx_desc.pool_id = pool_id; 1000 rx_desc_elem->rx_desc.in_use = 0; 1001 rx_desc_elem = rx_desc_elem->next; 1002 1003 avail_entry_index = (avail_entry_index + 1) & 1004 DP_CC_SPT_PAGE_MAX_ENTRIES_MASK; 1005 } 1006 1007 return QDF_STATUS_SUCCESS; 1008 } 1009 #else 1010 static QDF_STATUS 1011 dp_rx_desc_pool_init_be_cc(struct dp_soc *soc, 1012 struct rx_desc_pool *rx_desc_pool, 1013 uint32_t pool_id) 1014 { 1015 struct dp_hw_cookie_conversion_t *cc_ctx; 1016 struct dp_soc_be *be_soc; 1017 struct dp_spt_page_desc *page_desc; 1018 uint32_t ppt_idx = 0; 1019 uint32_t avail_entry_index = 0; 1020 int i = 0; 1021 1022 if (!rx_desc_pool->pool_size) { 1023 dp_err("desc_num 0 !!"); 1024 return QDF_STATUS_E_FAILURE; 1025 } 1026 1027 be_soc = dp_get_be_soc_from_dp_soc(soc); 1028 cc_ctx = &be_soc->rx_cc_ctx[pool_id]; 1029 1030 page_desc = &cc_ctx->page_desc_base[0]; 1031 for (i = 0; i <= rx_desc_pool->pool_size - 1; i++) { 1032 if (i == rx_desc_pool->pool_size - 1) 1033 rx_desc_pool->array[i].next = NULL; 1034 else 1035 rx_desc_pool->array[i].next = 1036 &rx_desc_pool->array[i + 1]; 1037 1038 if (avail_entry_index == 0) { 1039 if (ppt_idx >= cc_ctx->total_page_num) { 1040 dp_alert("insufficient secondary page tables"); 1041 qdf_assert_always(0); 1042 } 1043 page_desc = &cc_ctx->page_desc_base[ppt_idx++]; 1044 } 1045 1046 /* put each RX Desc VA to SPT pages and 1047 * get corresponding ID 1048 */ 1049 DP_CC_SPT_PAGE_UPDATE_VA(page_desc->page_v_addr, 1050 avail_entry_index, 1051 &rx_desc_pool->array[i].rx_desc); 1052 rx_desc_pool->array[i].rx_desc.cookie = 1053 dp_cc_desc_id_generate(page_desc->ppt_index, 1054 avail_entry_index); 1055 rx_desc_pool->array[i].rx_desc.pool_id = pool_id; 1056 rx_desc_pool->array[i].rx_desc.in_use = 0; 1057 rx_desc_pool->array[i].rx_desc.chip_id = 1058 dp_mlo_get_chip_id(soc); 1059 1060 avail_entry_index = (avail_entry_index + 1) & 1061 DP_CC_SPT_PAGE_MAX_ENTRIES_MASK; 1062 } 1063 return QDF_STATUS_SUCCESS; 1064 } 1065 #endif 1066 1067 static void 1068 dp_rx_desc_pool_deinit_be_cc(struct dp_soc *soc, 1069 struct rx_desc_pool *rx_desc_pool, 1070 uint32_t pool_id) 1071 { 1072 struct dp_spt_page_desc *page_desc; 1073 struct dp_soc_be *be_soc; 1074 int i = 0; 1075 struct dp_hw_cookie_conversion_t *cc_ctx; 1076 1077 be_soc = dp_get_be_soc_from_dp_soc(soc); 1078 cc_ctx = &be_soc->rx_cc_ctx[pool_id]; 1079 1080 for (i = 0; i < cc_ctx->total_page_num; i++) { 1081 page_desc = &cc_ctx->page_desc_base[i]; 1082 qdf_mem_zero(page_desc->page_v_addr, qdf_page_size); 1083 } 1084 } 1085 1086 QDF_STATUS dp_rx_desc_pool_init_be(struct dp_soc *soc, 1087 struct rx_desc_pool *rx_desc_pool, 1088 uint32_t pool_id) 1089 { 1090 QDF_STATUS status = QDF_STATUS_SUCCESS; 1091 1092 /* Only regular RX buffer desc pool use HW cookie conversion */ 1093 if (rx_desc_pool->desc_type == DP_RX_DESC_BUF_TYPE) { 1094 dp_info("rx_desc_buf pool init"); 1095 status = dp_rx_desc_pool_init_be_cc(soc, 1096 rx_desc_pool, 1097 pool_id); 1098 } else { 1099 dp_info("non_rx_desc_buf_pool init"); 1100 status = dp_rx_desc_pool_init_generic(soc, rx_desc_pool, 1101 pool_id); 1102 } 1103 1104 return status; 1105 } 1106 1107 void dp_rx_desc_pool_deinit_be(struct dp_soc *soc, 1108 struct rx_desc_pool *rx_desc_pool, 1109 uint32_t pool_id) 1110 { 1111 if (rx_desc_pool->desc_type == DP_RX_DESC_BUF_TYPE) 1112 dp_rx_desc_pool_deinit_be_cc(soc, rx_desc_pool, pool_id); 1113 } 1114 1115 #ifdef DP_FEATURE_HW_COOKIE_CONVERSION 1116 #ifdef DP_HW_COOKIE_CONVERT_EXCEPTION 1117 QDF_STATUS dp_wbm_get_rx_desc_from_hal_desc_be(struct dp_soc *soc, 1118 void *ring_desc, 1119 struct dp_rx_desc **r_rx_desc) 1120 { 1121 if (hal_rx_wbm_get_cookie_convert_done(ring_desc)) { 1122 /* HW cookie conversion done */ 1123 *r_rx_desc = (struct dp_rx_desc *) 1124 hal_rx_wbm_get_desc_va(ring_desc); 1125 } else { 1126 /* SW do cookie conversion */ 1127 uint32_t cookie = HAL_RX_BUF_COOKIE_GET(ring_desc); 1128 1129 *r_rx_desc = (struct dp_rx_desc *) 1130 dp_cc_desc_find(soc, cookie); 1131 } 1132 1133 return QDF_STATUS_SUCCESS; 1134 } 1135 #else 1136 QDF_STATUS dp_wbm_get_rx_desc_from_hal_desc_be(struct dp_soc *soc, 1137 void *ring_desc, 1138 struct dp_rx_desc **r_rx_desc) 1139 { 1140 *r_rx_desc = (struct dp_rx_desc *) 1141 hal_rx_wbm_get_desc_va(ring_desc); 1142 1143 return QDF_STATUS_SUCCESS; 1144 } 1145 #endif /* DP_HW_COOKIE_CONVERT_EXCEPTION */ 1146 #else 1147 QDF_STATUS dp_wbm_get_rx_desc_from_hal_desc_be(struct dp_soc *soc, 1148 void *ring_desc, 1149 struct dp_rx_desc **r_rx_desc) 1150 { 1151 /* SW do cookie conversion */ 1152 uint32_t cookie = HAL_RX_BUF_COOKIE_GET(ring_desc); 1153 1154 *r_rx_desc = (struct dp_rx_desc *) 1155 dp_cc_desc_find(soc, cookie); 1156 1157 return QDF_STATUS_SUCCESS; 1158 } 1159 #endif /* DP_FEATURE_HW_COOKIE_CONVERSION */ 1160 1161 struct dp_rx_desc *dp_rx_desc_cookie_2_va_be(struct dp_soc *soc, 1162 uint32_t cookie) 1163 { 1164 return (struct dp_rx_desc *)dp_cc_desc_find(soc, cookie); 1165 } 1166 1167 #if defined(WLAN_FEATURE_11BE_MLO) 1168 #if defined(WLAN_MLO_MULTI_CHIP) && defined(WLAN_MCAST_MLO) 1169 #define DP_RANDOM_MAC_ID_BIT_MASK 0xC0 1170 #define DP_RANDOM_MAC_OFFSET 1 1171 #define DP_MAC_LOCAL_ADMBIT_MASK 0x2 1172 #define DP_MAC_LOCAL_ADMBIT_OFFSET 0 1173 static inline void dp_rx_dummy_src_mac(struct dp_vdev *vdev, 1174 qdf_nbuf_t nbuf) 1175 { 1176 uint8_t random_mac[QDF_MAC_ADDR_SIZE] = {0}; 1177 qdf_ether_header_t *eh = 1178 (qdf_ether_header_t *)qdf_nbuf_data(nbuf); 1179 1180 qdf_mem_copy(random_mac, &vdev->mld_mac_addr.raw[0], QDF_MAC_ADDR_SIZE); 1181 random_mac[DP_MAC_LOCAL_ADMBIT_OFFSET] = 1182 random_mac[DP_MAC_LOCAL_ADMBIT_OFFSET] | 1183 DP_MAC_LOCAL_ADMBIT_MASK; 1184 random_mac[DP_RANDOM_MAC_OFFSET] = 1185 random_mac[DP_RANDOM_MAC_OFFSET] ^ DP_RANDOM_MAC_ID_BIT_MASK; 1186 1187 qdf_mem_copy(&eh->ether_shost[0], random_mac, QDF_MAC_ADDR_SIZE); 1188 } 1189 1190 #ifdef QCA_SUPPORT_WDS_EXTENDED 1191 static inline bool dp_rx_mlo_igmp_wds_ext_handler(struct dp_txrx_peer *peer) 1192 { 1193 return qdf_atomic_test_bit(WDS_EXT_PEER_INIT_BIT, &peer->wds_ext.init); 1194 } 1195 #else 1196 static inline bool dp_rx_mlo_igmp_wds_ext_handler(struct dp_txrx_peer *peer) 1197 { 1198 return false; 1199 } 1200 #endif 1201 1202 bool dp_rx_mlo_igmp_handler(struct dp_soc *soc, 1203 struct dp_vdev *vdev, 1204 struct dp_txrx_peer *peer, 1205 qdf_nbuf_t nbuf) 1206 { 1207 struct dp_vdev *mcast_primary_vdev = NULL; 1208 struct dp_vdev_be *be_vdev = dp_get_be_vdev_from_dp_vdev(vdev); 1209 struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc); 1210 uint8_t tid = qdf_nbuf_get_tid_val(nbuf); 1211 struct cdp_tid_rx_stats *tid_stats = &peer->vdev->pdev->stats. 1212 tid_stats.tid_rx_wbm_stats[0][tid]; 1213 1214 if (!(qdf_nbuf_is_ipv4_igmp_pkt(nbuf) || 1215 qdf_nbuf_is_ipv6_igmp_pkt(nbuf))) 1216 return false; 1217 1218 if (!peer->bss_peer) { 1219 if (dp_rx_intrabss_mcbc_fwd(soc, peer, NULL, nbuf, tid_stats)) 1220 dp_rx_err("forwarding failed"); 1221 } 1222 1223 /* 1224 * In the case of ME6, Backhaul WDS, NAWDS 1225 * send the igmp pkt on the same link where it received, 1226 * as these features will use peer based tcl metadata 1227 */ 1228 1229 qdf_nbuf_set_next(nbuf, NULL); 1230 1231 if (vdev->mcast_enhancement_en || be_vdev->mcast_primary || 1232 peer->nawds_enabled) 1233 goto send_pkt; 1234 1235 if (qdf_unlikely(dp_rx_mlo_igmp_wds_ext_handler(peer))) 1236 goto send_pkt; 1237 1238 mcast_primary_vdev = dp_mlo_get_mcast_primary_vdev(be_soc, be_vdev, 1239 DP_MOD_ID_RX); 1240 if (!mcast_primary_vdev) { 1241 dp_rx_debug("Non mlo vdev"); 1242 goto send_pkt; 1243 } 1244 1245 if (qdf_unlikely(vdev->wrap_vdev)) { 1246 /* In the case of qwrap repeater send the original 1247 * packet on the interface where it received, 1248 * packet with dummy src on the mcast primary interface. 1249 */ 1250 qdf_nbuf_t nbuf_copy; 1251 1252 nbuf_copy = qdf_nbuf_copy(nbuf); 1253 if (qdf_likely(nbuf_copy)) 1254 dp_rx_deliver_to_stack(soc, vdev, peer, nbuf_copy, 1255 NULL); 1256 } 1257 1258 dp_rx_dummy_src_mac(vdev, nbuf); 1259 dp_rx_deliver_to_stack(mcast_primary_vdev->pdev->soc, 1260 mcast_primary_vdev, 1261 peer, 1262 nbuf, 1263 NULL); 1264 dp_vdev_unref_delete(mcast_primary_vdev->pdev->soc, 1265 mcast_primary_vdev, 1266 DP_MOD_ID_RX); 1267 return true; 1268 send_pkt: 1269 dp_rx_deliver_to_stack(be_vdev->vdev.pdev->soc, 1270 &be_vdev->vdev, 1271 peer, 1272 nbuf, 1273 NULL); 1274 return true; 1275 } 1276 #else 1277 bool dp_rx_mlo_igmp_handler(struct dp_soc *soc, 1278 struct dp_vdev *vdev, 1279 struct dp_txrx_peer *peer, 1280 qdf_nbuf_t nbuf) 1281 { 1282 return false; 1283 } 1284 #endif 1285 #endif 1286 1287 #ifdef WLAN_FEATURE_NEAR_FULL_IRQ 1288 uint32_t dp_rx_nf_process(struct dp_intr *int_ctx, 1289 hal_ring_handle_t hal_ring_hdl, 1290 uint8_t reo_ring_num, 1291 uint32_t quota) 1292 { 1293 struct dp_soc *soc = int_ctx->soc; 1294 struct dp_srng *rx_ring = &soc->reo_dest_ring[reo_ring_num]; 1295 uint32_t work_done = 0; 1296 1297 if (dp_srng_get_near_full_level(soc, rx_ring) < 1298 DP_SRNG_THRESH_NEAR_FULL) 1299 return 0; 1300 1301 qdf_atomic_set(&rx_ring->near_full, 1); 1302 work_done++; 1303 1304 return work_done; 1305 } 1306 #endif 1307 1308 #ifndef QCA_HOST_MODE_WIFI_DISABLED 1309 #ifdef WLAN_FEATURE_11BE_MLO 1310 /** 1311 * dp_rx_intrabss_fwd_mlo_allow() - check if MLO forwarding is allowed 1312 * @ta_peer: transmitter peer handle 1313 * @da_peer: destination peer handle 1314 * 1315 * Return: true - MLO forwarding case, false: not 1316 */ 1317 static inline bool 1318 dp_rx_intrabss_fwd_mlo_allow(struct dp_txrx_peer *ta_peer, 1319 struct dp_txrx_peer *da_peer) 1320 { 1321 /* one of TA/DA peer should belong to MLO connection peer, 1322 * only MLD peer type is as expected 1323 */ 1324 if (!IS_MLO_DP_MLD_TXRX_PEER(ta_peer) && 1325 !IS_MLO_DP_MLD_TXRX_PEER(da_peer)) 1326 return false; 1327 1328 /* TA peer and DA peer's vdev should be partner MLO vdevs */ 1329 if (dp_peer_find_mac_addr_cmp(&ta_peer->vdev->mld_mac_addr, 1330 &da_peer->vdev->mld_mac_addr)) 1331 return false; 1332 1333 return true; 1334 } 1335 #else 1336 static inline bool 1337 dp_rx_intrabss_fwd_mlo_allow(struct dp_txrx_peer *ta_peer, 1338 struct dp_txrx_peer *da_peer) 1339 { 1340 return false; 1341 } 1342 #endif 1343 1344 #ifdef INTRA_BSS_FWD_OFFLOAD 1345 /** 1346 * dp_rx_intrabss_ucast_check_be() - Check if intrabss is allowed 1347 for unicast frame 1348 * @soc: SOC handle 1349 * @nbuf: RX packet buffer 1350 * @ta_peer: transmitter DP peer handle 1351 * @msdu_metadata: MSDU meta data info 1352 * @p_tx_vdev_id: get vdev id for Intra-BSS TX 1353 * 1354 * Return: true - intrabss allowed 1355 false - not allow 1356 */ 1357 static bool 1358 dp_rx_intrabss_ucast_check_be(qdf_nbuf_t nbuf, 1359 struct dp_txrx_peer *ta_peer, 1360 struct hal_rx_msdu_metadata *msdu_metadata, 1361 struct dp_be_intrabss_params *params) 1362 { 1363 uint16_t da_peer_id; 1364 struct dp_txrx_peer *da_peer; 1365 dp_txrx_ref_handle txrx_ref_handle = NULL; 1366 1367 if (!qdf_nbuf_is_intra_bss(nbuf)) 1368 return false; 1369 1370 da_peer_id = dp_rx_peer_metadata_peer_id_get_be( 1371 params->dest_soc, 1372 msdu_metadata->da_idx); 1373 da_peer = dp_txrx_peer_get_ref_by_id(params->dest_soc, da_peer_id, 1374 &txrx_ref_handle, DP_MOD_ID_RX); 1375 if (!da_peer) 1376 return false; 1377 params->tx_vdev_id = da_peer->vdev->vdev_id; 1378 dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_RX); 1379 1380 return true; 1381 } 1382 #else 1383 #ifdef WLAN_MLO_MULTI_CHIP 1384 static bool 1385 dp_rx_intrabss_ucast_check_be(qdf_nbuf_t nbuf, 1386 struct dp_txrx_peer *ta_peer, 1387 struct hal_rx_msdu_metadata *msdu_metadata, 1388 struct dp_be_intrabss_params *params) 1389 { 1390 uint16_t da_peer_id; 1391 struct dp_txrx_peer *da_peer; 1392 bool ret = false; 1393 uint8_t dest_chip_id; 1394 dp_txrx_ref_handle txrx_ref_handle = NULL; 1395 struct dp_vdev_be *be_vdev = 1396 dp_get_be_vdev_from_dp_vdev(ta_peer->vdev); 1397 struct dp_soc_be *be_soc = 1398 dp_get_be_soc_from_dp_soc(params->dest_soc); 1399 1400 if (!(qdf_nbuf_is_da_valid(nbuf) || qdf_nbuf_is_da_mcbc(nbuf))) 1401 return false; 1402 1403 dest_chip_id = HAL_RX_DEST_CHIP_ID_GET(msdu_metadata); 1404 qdf_assert_always(dest_chip_id <= (DP_MLO_MAX_DEST_CHIP_ID - 1)); 1405 da_peer_id = HAL_RX_PEER_ID_GET(msdu_metadata); 1406 1407 /* use dest chip id when TA is MLD peer and DA is legacy */ 1408 if (be_soc->mlo_enabled && 1409 ta_peer->mld_peer && 1410 !(da_peer_id & HAL_RX_DA_IDX_ML_PEER_MASK)) { 1411 /* validate chip_id, get a ref, and re-assign soc */ 1412 params->dest_soc = 1413 dp_mlo_get_soc_ref_by_chip_id(be_soc->ml_ctxt, 1414 dest_chip_id); 1415 if (!params->dest_soc) 1416 return false; 1417 1418 da_peer = dp_txrx_peer_get_ref_by_id(params->dest_soc, 1419 da_peer_id, 1420 &txrx_ref_handle, 1421 DP_MOD_ID_RX); 1422 if (!da_peer) 1423 return false; 1424 1425 } else { 1426 da_peer = dp_txrx_peer_get_ref_by_id(params->dest_soc, 1427 da_peer_id, 1428 &txrx_ref_handle, 1429 DP_MOD_ID_RX); 1430 if (!da_peer) 1431 return false; 1432 1433 params->dest_soc = da_peer->vdev->pdev->soc; 1434 if (!params->dest_soc) 1435 goto rel_da_peer; 1436 1437 } 1438 1439 params->tx_vdev_id = da_peer->vdev->vdev_id; 1440 1441 /* If the source or destination peer in the isolation 1442 * list then dont forward instead push to bridge stack. 1443 */ 1444 if (dp_get_peer_isolation(ta_peer) || 1445 dp_get_peer_isolation(da_peer)) { 1446 ret = false; 1447 goto rel_da_peer; 1448 } 1449 1450 if (da_peer->bss_peer || (da_peer == ta_peer)) { 1451 ret = false; 1452 goto rel_da_peer; 1453 } 1454 1455 /* Same vdev, support Inra-BSS */ 1456 if (da_peer->vdev == ta_peer->vdev) { 1457 ret = true; 1458 goto rel_da_peer; 1459 } 1460 1461 /* MLO specific Intra-BSS check */ 1462 if (dp_rx_intrabss_fwd_mlo_allow(ta_peer, da_peer)) { 1463 /* use dest chip id for legacy dest peer */ 1464 if (!(da_peer_id & HAL_RX_DA_IDX_ML_PEER_MASK)) { 1465 if (!(be_vdev->partner_vdev_list[dest_chip_id][0] == 1466 params->tx_vdev_id) && 1467 !(be_vdev->partner_vdev_list[dest_chip_id][1] == 1468 params->tx_vdev_id)) { 1469 /*dp_soc_unref_delete(soc);*/ 1470 goto rel_da_peer; 1471 } 1472 } 1473 ret = true; 1474 } 1475 1476 rel_da_peer: 1477 dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_RX); 1478 return ret; 1479 } 1480 #else 1481 static bool 1482 dp_rx_intrabss_ucast_check_be(qdf_nbuf_t nbuf, 1483 struct dp_txrx_peer *ta_peer, 1484 struct hal_rx_msdu_metadata *msdu_metadata, 1485 struct dp_be_intrabss_params *params) 1486 { 1487 uint16_t da_peer_id; 1488 struct dp_txrx_peer *da_peer; 1489 bool ret = false; 1490 dp_txrx_ref_handle txrx_ref_handle = NULL; 1491 1492 if (!qdf_nbuf_is_da_valid(nbuf) || qdf_nbuf_is_da_mcbc(nbuf)) 1493 return false; 1494 1495 da_peer_id = dp_rx_peer_metadata_peer_id_get_be( 1496 params->dest_soc, 1497 msdu_metadata->da_idx); 1498 1499 da_peer = dp_txrx_peer_get_ref_by_id(params->dest_soc, da_peer_id, 1500 &txrx_ref_handle, DP_MOD_ID_RX); 1501 if (!da_peer) 1502 return false; 1503 1504 params->tx_vdev_id = da_peer->vdev->vdev_id; 1505 /* If the source or destination peer in the isolation 1506 * list then dont forward instead push to bridge stack. 1507 */ 1508 if (dp_get_peer_isolation(ta_peer) || 1509 dp_get_peer_isolation(da_peer)) 1510 goto rel_da_peer; 1511 1512 if (da_peer->bss_peer || da_peer == ta_peer) 1513 goto rel_da_peer; 1514 1515 /* Same vdev, support Inra-BSS */ 1516 if (da_peer->vdev == ta_peer->vdev) { 1517 ret = true; 1518 goto rel_da_peer; 1519 } 1520 1521 /* MLO specific Intra-BSS check */ 1522 if (dp_rx_intrabss_fwd_mlo_allow(ta_peer, da_peer)) { 1523 ret = true; 1524 goto rel_da_peer; 1525 } 1526 1527 rel_da_peer: 1528 dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_RX); 1529 return ret; 1530 } 1531 #endif /* WLAN_MLO_MULTI_CHIP */ 1532 #endif /* INTRA_BSS_FWD_OFFLOAD */ 1533 1534 #if defined(QCA_MONITOR_2_0_SUPPORT) || defined(CONFIG_WORD_BASED_TLV) 1535 void dp_rx_word_mask_subscribe_be(struct dp_soc *soc, 1536 uint32_t *msg_word, 1537 void *rx_filter) 1538 { 1539 struct htt_rx_ring_tlv_filter *tlv_filter = 1540 (struct htt_rx_ring_tlv_filter *)rx_filter; 1541 1542 if (!msg_word || !tlv_filter) 1543 return; 1544 1545 /* if word mask is zero, FW will set the default values */ 1546 if (!(tlv_filter->rx_mpdu_start_wmask > 0 && 1547 tlv_filter->rx_msdu_end_wmask > 0)) { 1548 msg_word += 4; 1549 *msg_word = 0; 1550 goto config_mon; 1551 } 1552 1553 HTT_RX_RING_SELECTION_CFG_WORD_MASK_COMPACTION_ENABLE_SET(*msg_word, 1); 1554 1555 /* word 14 */ 1556 msg_word += 3; 1557 *msg_word = 0; 1558 1559 HTT_RX_RING_SELECTION_CFG_RX_MPDU_START_WORD_MASK_SET( 1560 *msg_word, 1561 tlv_filter->rx_mpdu_start_wmask); 1562 1563 /* word 15 */ 1564 msg_word++; 1565 *msg_word = 0; 1566 HTT_RX_RING_SELECTION_CFG_RX_MSDU_END_WORD_MASK_SET( 1567 *msg_word, 1568 tlv_filter->rx_msdu_end_wmask); 1569 config_mon: 1570 msg_word--; 1571 dp_mon_rx_wmask_subscribe(soc, msg_word, tlv_filter); 1572 } 1573 #else 1574 void dp_rx_word_mask_subscribe_be(struct dp_soc *soc, 1575 uint32_t *msg_word, 1576 void *rx_filter) 1577 { 1578 } 1579 #endif 1580 /* 1581 * dp_rx_intrabss_handle_nawds_be() - Forward mcbc intrabss pkts in nawds case 1582 * @soc: core txrx main context 1583 * @ta_txrx_peer: source txrx_peer entry 1584 * @nbuf_copy: nbuf that has to be intrabss forwarded 1585 * @tid_stats: tid_stats structure 1586 * 1587 * Return: true if it is forwarded else false 1588 */ 1589 bool 1590 dp_rx_intrabss_handle_nawds_be(struct dp_soc *soc, 1591 struct dp_txrx_peer *ta_txrx_peer, 1592 qdf_nbuf_t nbuf_copy, 1593 struct cdp_tid_rx_stats *tid_stats) 1594 { 1595 if (qdf_unlikely(ta_txrx_peer->vdev->nawds_enabled)) { 1596 struct cdp_tx_exception_metadata tx_exc_metadata = {0}; 1597 uint16_t len = QDF_NBUF_CB_RX_PKT_LEN(nbuf_copy); 1598 1599 tx_exc_metadata.peer_id = ta_txrx_peer->peer_id; 1600 tx_exc_metadata.is_intrabss_fwd = 1; 1601 tx_exc_metadata.tid = HTT_TX_EXT_TID_INVALID; 1602 if (dp_tx_send_exception((struct cdp_soc_t *)soc, 1603 ta_txrx_peer->vdev->vdev_id, 1604 nbuf_copy, 1605 &tx_exc_metadata)) { 1606 DP_PEER_PER_PKT_STATS_INC_PKT(ta_txrx_peer, 1607 rx.intra_bss.fail, 1, 1608 len); 1609 tid_stats->fail_cnt[INTRABSS_DROP]++; 1610 qdf_nbuf_free(nbuf_copy); 1611 } else { 1612 DP_PEER_PER_PKT_STATS_INC_PKT(ta_txrx_peer, 1613 rx.intra_bss.pkts, 1, 1614 len); 1615 tid_stats->intrabss_cnt++; 1616 } 1617 return true; 1618 } 1619 return false; 1620 } 1621 1622 /* 1623 * dp_rx_intrabss_fwd_be() - API for intrabss fwd. For EAPOL 1624 * pkt with DA not equal to vdev mac addr, fwd is not allowed. 1625 * @soc: core txrx main context 1626 * @ta_peer: source peer entry 1627 * @rx_tlv_hdr: start address of rx tlvs 1628 * @nbuf: nbuf that has to be intrabss forwarded 1629 * @msdu_metadata: msdu metadata 1630 * 1631 * Return: true if it is forwarded else false 1632 */ 1633 bool dp_rx_intrabss_fwd_be(struct dp_soc *soc, struct dp_txrx_peer *ta_peer, 1634 uint8_t *rx_tlv_hdr, qdf_nbuf_t nbuf, 1635 struct hal_rx_msdu_metadata msdu_metadata) 1636 { 1637 uint8_t tid = qdf_nbuf_get_tid_val(nbuf); 1638 uint8_t ring_id = QDF_NBUF_CB_RX_CTX_ID(nbuf); 1639 struct cdp_tid_rx_stats *tid_stats = &ta_peer->vdev->pdev->stats. 1640 tid_stats.tid_rx_stats[ring_id][tid]; 1641 bool ret = false; 1642 struct dp_be_intrabss_params params; 1643 1644 /* if it is a broadcast pkt (eg: ARP) and it is not its own 1645 * source, then clone the pkt and send the cloned pkt for 1646 * intra BSS forwarding and original pkt up the network stack 1647 * Note: how do we handle multicast pkts. do we forward 1648 * all multicast pkts as is or let a higher layer module 1649 * like igmpsnoop decide whether to forward or not with 1650 * Mcast enhancement. 1651 */ 1652 if (qdf_nbuf_is_da_mcbc(nbuf) && !ta_peer->bss_peer) { 1653 return dp_rx_intrabss_mcbc_fwd(soc, ta_peer, rx_tlv_hdr, 1654 nbuf, tid_stats); 1655 } 1656 1657 if (dp_rx_intrabss_eapol_drop_check(soc, ta_peer, rx_tlv_hdr, 1658 nbuf)) 1659 return true; 1660 1661 params.dest_soc = soc; 1662 if (dp_rx_intrabss_ucast_check_be(nbuf, ta_peer, 1663 &msdu_metadata, ¶ms)) { 1664 ret = dp_rx_intrabss_ucast_fwd(params.dest_soc, ta_peer, 1665 params.tx_vdev_id, 1666 rx_tlv_hdr, nbuf, tid_stats); 1667 } 1668 1669 return ret; 1670 } 1671 #endif 1672