1 /* 2 * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved. 3 * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved. 4 * 5 * Permission to use, copy, modify, and/or distribute this software for 6 * any purpose with or without fee is hereby granted, provided that the 7 * above copyright notice and this permission notice appear in all 8 * copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 11 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 12 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 13 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 14 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 15 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 16 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 17 * PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20 #include "cdp_txrx_cmn_struct.h" 21 #include "hal_hw_headers.h" 22 #include "dp_types.h" 23 #include "dp_rx.h" 24 #include "dp_tx.h" 25 #include "dp_li_rx.h" 26 #include "dp_peer.h" 27 #include "hal_rx.h" 28 #include "hal_li_rx.h" 29 #include "hal_api.h" 30 #include "hal_li_api.h" 31 #include "qdf_nbuf.h" 32 #ifdef MESH_MODE_SUPPORT 33 #include "if_meta_hdr.h" 34 #endif 35 #include "dp_internal.h" 36 #include "dp_ipa.h" 37 #ifdef WIFI_MONITOR_SUPPORT 38 #include <dp_mon.h> 39 #endif 40 #ifdef FEATURE_WDS 41 #include "dp_txrx_wds.h" 42 #endif 43 #include "dp_hist.h" 44 #include "dp_rx_buffer_pool.h" 45 #include "dp_li.h" 46 47 static inline 48 bool is_sa_da_idx_valid(uint32_t max_ast, 49 qdf_nbuf_t nbuf, struct hal_rx_msdu_metadata msdu_info) 50 { 51 if ((qdf_nbuf_is_sa_valid(nbuf) && (msdu_info.sa_idx > max_ast)) || 52 (!qdf_nbuf_is_da_mcbc(nbuf) && qdf_nbuf_is_da_valid(nbuf) && 53 (msdu_info.da_idx > max_ast))) 54 return false; 55 56 return true; 57 } 58 59 #ifndef QCA_HOST_MODE_WIFI_DISABLED 60 #if defined(FEATURE_MCL_REPEATER) && defined(FEATURE_MEC) 61 /** 62 * dp_rx_mec_check_wrapper() - wrapper to dp_rx_mcast_echo_check 63 * @soc: core DP main context 64 * @txrx_peer: dp peer handler 65 * @rx_tlv_hdr: start of the rx TLV header 66 * @nbuf: pkt buffer 67 * 68 * Return: bool (true if it is a looped back pkt else false) 69 */ 70 static inline bool dp_rx_mec_check_wrapper(struct dp_soc *soc, 71 struct dp_txrx_peer *txrx_peer, 72 uint8_t *rx_tlv_hdr, 73 qdf_nbuf_t nbuf) 74 { 75 return dp_rx_mcast_echo_check(soc, txrx_peer, rx_tlv_hdr, nbuf); 76 } 77 #else 78 static inline bool dp_rx_mec_check_wrapper(struct dp_soc *soc, 79 struct dp_txrx_peer *txrx_peer, 80 uint8_t *rx_tlv_hdr, 81 qdf_nbuf_t nbuf) 82 { 83 return false; 84 } 85 #endif 86 #endif 87 88 #ifndef QCA_HOST_MODE_WIFI_DISABLE 89 static bool 90 dp_rx_intrabss_ucast_check_li(struct dp_soc *soc, qdf_nbuf_t nbuf, 91 struct dp_txrx_peer *ta_txrx_peer, 92 struct hal_rx_msdu_metadata *msdu_metadata, 93 uint8_t *p_tx_vdev_id) 94 { 95 uint16_t da_peer_id; 96 struct dp_txrx_peer *da_peer; 97 struct dp_ast_entry *ast_entry; 98 dp_txrx_ref_handle txrx_ref_handle = NULL; 99 100 if (!qdf_nbuf_is_da_valid(nbuf) || qdf_nbuf_is_da_mcbc(nbuf)) 101 return false; 102 103 ast_entry = soc->ast_table[msdu_metadata->da_idx]; 104 if (!ast_entry) 105 return false; 106 107 if (ast_entry->type == CDP_TXRX_AST_TYPE_DA) { 108 ast_entry->is_active = TRUE; 109 return false; 110 } 111 112 da_peer_id = ast_entry->peer_id; 113 /* TA peer cannot be same as peer(DA) on which AST is present 114 * this indicates a change in topology and that AST entries 115 * are yet to be updated. 116 */ 117 if (da_peer_id == ta_txrx_peer->peer_id || 118 da_peer_id == HTT_INVALID_PEER) 119 return false; 120 121 da_peer = dp_txrx_peer_get_ref_by_id(soc, da_peer_id, 122 &txrx_ref_handle, DP_MOD_ID_RX); 123 if (!da_peer) 124 return false; 125 126 *p_tx_vdev_id = da_peer->vdev->vdev_id; 127 /* If the source or destination peer in the isolation 128 * list then dont forward instead push to bridge stack. 129 */ 130 if (dp_get_peer_isolation(ta_txrx_peer) || 131 dp_get_peer_isolation(da_peer) || 132 da_peer->vdev->vdev_id != ta_txrx_peer->vdev->vdev_id) { 133 dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_RX); 134 return false; 135 } 136 137 if (da_peer->bss_peer) { 138 dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_RX); 139 return false; 140 } 141 142 dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_RX); 143 return true; 144 } 145 146 /* 147 * dp_rx_intrabss_fwd_li() - Implements the Intra-BSS forwarding logic 148 * 149 * @soc: core txrx main context 150 * @ta_txrx_peer : source peer entry 151 * @rx_tlv_hdr : start address of rx tlvs 152 * @nbuf : nbuf that has to be intrabss forwarded 153 * 154 * Return: bool: true if it is forwarded else false 155 */ 156 static bool 157 dp_rx_intrabss_fwd_li(struct dp_soc *soc, 158 struct dp_txrx_peer *ta_txrx_peer, 159 uint8_t *rx_tlv_hdr, 160 qdf_nbuf_t nbuf, 161 struct hal_rx_msdu_metadata msdu_metadata, 162 struct cdp_tid_rx_stats *tid_stats) 163 { 164 uint8_t tx_vdev_id; 165 166 /* if it is a broadcast pkt (eg: ARP) and it is not its own 167 * source, then clone the pkt and send the cloned pkt for 168 * intra BSS forwarding and original pkt up the network stack 169 * Note: how do we handle multicast pkts. do we forward 170 * all multicast pkts as is or let a higher layer module 171 * like igmpsnoop decide whether to forward or not with 172 * Mcast enhancement. 173 */ 174 if (qdf_nbuf_is_da_mcbc(nbuf) && !ta_txrx_peer->bss_peer) 175 return dp_rx_intrabss_mcbc_fwd(soc, ta_txrx_peer, rx_tlv_hdr, 176 nbuf, tid_stats); 177 178 if (dp_rx_intrabss_eapol_drop_check(soc, ta_txrx_peer, rx_tlv_hdr, 179 nbuf)) 180 return true; 181 182 if (dp_rx_intrabss_ucast_check_li(soc, nbuf, ta_txrx_peer, 183 &msdu_metadata, &tx_vdev_id)) 184 return dp_rx_intrabss_ucast_fwd(soc, ta_txrx_peer, tx_vdev_id, 185 rx_tlv_hdr, nbuf, tid_stats); 186 187 return false; 188 } 189 #endif 190 191 uint32_t dp_rx_process_li(struct dp_intr *int_ctx, 192 hal_ring_handle_t hal_ring_hdl, uint8_t reo_ring_num, 193 uint32_t quota) 194 { 195 hal_ring_desc_t ring_desc; 196 hal_ring_desc_t last_prefetched_hw_desc; 197 hal_soc_handle_t hal_soc; 198 struct dp_rx_desc *rx_desc = NULL; 199 struct dp_rx_desc *last_prefetched_sw_desc = NULL; 200 qdf_nbuf_t nbuf, next; 201 bool near_full; 202 union dp_rx_desc_list_elem_t *head[MAX_PDEV_CNT]; 203 union dp_rx_desc_list_elem_t *tail[MAX_PDEV_CNT]; 204 uint32_t num_pending = 0; 205 uint32_t rx_bufs_used = 0, rx_buf_cookie; 206 uint16_t msdu_len = 0; 207 uint16_t peer_id; 208 uint8_t vdev_id; 209 struct dp_txrx_peer *txrx_peer; 210 dp_txrx_ref_handle txrx_ref_handle = NULL; 211 struct dp_vdev *vdev; 212 uint32_t pkt_len = 0; 213 struct hal_rx_mpdu_desc_info mpdu_desc_info; 214 struct hal_rx_msdu_desc_info msdu_desc_info; 215 enum hal_reo_error_status error; 216 uint32_t peer_mdata; 217 uint8_t *rx_tlv_hdr; 218 uint32_t rx_bufs_reaped[MAX_PDEV_CNT]; 219 uint8_t mac_id = 0; 220 struct dp_pdev *rx_pdev; 221 struct dp_srng *dp_rxdma_srng; 222 struct rx_desc_pool *rx_desc_pool; 223 struct dp_soc *soc = int_ctx->soc; 224 struct cdp_tid_rx_stats *tid_stats; 225 qdf_nbuf_t nbuf_head; 226 qdf_nbuf_t nbuf_tail; 227 qdf_nbuf_t deliver_list_head; 228 qdf_nbuf_t deliver_list_tail; 229 uint32_t num_rx_bufs_reaped = 0; 230 uint32_t intr_id; 231 struct hif_opaque_softc *scn; 232 int32_t tid = 0; 233 bool is_prev_msdu_last = true; 234 uint32_t rx_ol_pkt_cnt = 0; 235 uint32_t num_entries = 0; 236 struct hal_rx_msdu_metadata msdu_metadata; 237 QDF_STATUS status; 238 qdf_nbuf_t ebuf_head; 239 qdf_nbuf_t ebuf_tail; 240 uint8_t pkt_capture_offload = 0; 241 int max_reap_limit; 242 uint32_t old_tid; 243 uint32_t peer_ext_stats; 244 uint32_t dsf; 245 uint32_t max_ast; 246 uint64_t current_time = 0; 247 248 DP_HIST_INIT(); 249 250 qdf_assert_always(soc && hal_ring_hdl); 251 hal_soc = soc->hal_soc; 252 qdf_assert_always(hal_soc); 253 254 scn = soc->hif_handle; 255 intr_id = int_ctx->dp_intr_id; 256 num_entries = hal_srng_get_num_entries(hal_soc, hal_ring_hdl); 257 dp_runtime_pm_mark_last_busy(soc); 258 259 more_data: 260 /* reset local variables here to be re-used in the function */ 261 nbuf_head = NULL; 262 nbuf_tail = NULL; 263 deliver_list_head = NULL; 264 deliver_list_tail = NULL; 265 txrx_peer = NULL; 266 vdev = NULL; 267 num_rx_bufs_reaped = 0; 268 ebuf_head = NULL; 269 ebuf_tail = NULL; 270 max_reap_limit = dp_rx_get_loop_pkt_limit(soc); 271 272 qdf_mem_zero(rx_bufs_reaped, sizeof(rx_bufs_reaped)); 273 qdf_mem_zero(&mpdu_desc_info, sizeof(mpdu_desc_info)); 274 qdf_mem_zero(&msdu_desc_info, sizeof(msdu_desc_info)); 275 qdf_mem_zero(head, sizeof(head)); 276 qdf_mem_zero(tail, sizeof(tail)); 277 old_tid = 0xff; 278 dsf = 0; 279 peer_ext_stats = 0; 280 max_ast = 0; 281 rx_pdev = NULL; 282 tid_stats = NULL; 283 284 dp_pkt_get_timestamp(¤t_time); 285 286 if (qdf_unlikely(dp_rx_srng_access_start(int_ctx, soc, hal_ring_hdl))) { 287 /* 288 * Need API to convert from hal_ring pointer to 289 * Ring Type / Ring Id combo 290 */ 291 DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1); 292 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 293 FL("HAL RING Access Failed -- %pK"), hal_ring_hdl); 294 goto done; 295 } 296 297 if (!num_pending) 298 num_pending = hal_srng_dst_num_valid(hal_soc, hal_ring_hdl, 0); 299 300 dp_srng_dst_inv_cached_descs(soc, hal_ring_hdl, num_pending); 301 302 if (num_pending > quota) 303 num_pending = quota; 304 305 last_prefetched_hw_desc = dp_srng_dst_prefetch(hal_soc, hal_ring_hdl, 306 num_pending); 307 308 peer_ext_stats = wlan_cfg_is_peer_ext_stats_enabled(soc->wlan_cfg_ctx); 309 max_ast = wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx); 310 /* 311 * start reaping the buffers from reo ring and queue 312 * them in per vdev queue. 313 * Process the received pkts in a different per vdev loop. 314 */ 315 while (qdf_likely(num_pending)) { 316 ring_desc = dp_srng_dst_get_next(soc, hal_ring_hdl); 317 318 if (qdf_unlikely(!ring_desc)) 319 break; 320 321 error = HAL_RX_ERROR_STATUS_GET(ring_desc); 322 if (qdf_unlikely(error == HAL_REO_ERROR_DETECTED)) { 323 dp_rx_err("%pK: HAL RING 0x%pK:error %d", 324 soc, hal_ring_hdl, error); 325 DP_STATS_INC(soc, rx.err.hal_reo_error[reo_ring_num], 326 1); 327 /* Don't know how to deal with this -- assert */ 328 qdf_assert(0); 329 } 330 331 dp_rx_ring_record_entry(soc, reo_ring_num, ring_desc); 332 rx_buf_cookie = HAL_RX_REO_BUF_COOKIE_GET(ring_desc); 333 status = dp_rx_cookie_check_and_invalidate(ring_desc); 334 if (qdf_unlikely(QDF_IS_STATUS_ERROR(status))) { 335 DP_STATS_INC(soc, rx.err.stale_cookie, 1); 336 break; 337 } 338 339 rx_desc = dp_rx_cookie_2_va_rxdma_buf(soc, rx_buf_cookie); 340 status = dp_rx_desc_sanity(soc, hal_soc, hal_ring_hdl, 341 ring_desc, rx_desc); 342 if (QDF_IS_STATUS_ERROR(status)) { 343 if (qdf_unlikely(rx_desc && rx_desc->nbuf)) { 344 qdf_assert_always(!rx_desc->unmapped); 345 dp_rx_nbuf_unmap(soc, rx_desc, reo_ring_num); 346 rx_desc->unmapped = 1; 347 dp_rx_buffer_pool_nbuf_free(soc, rx_desc->nbuf, 348 rx_desc->pool_id); 349 dp_rx_add_to_free_desc_list( 350 &head[rx_desc->pool_id], 351 &tail[rx_desc->pool_id], 352 rx_desc); 353 } 354 continue; 355 } 356 357 /* 358 * this is a unlikely scenario where the host is reaping 359 * a descriptor which it already reaped just a while ago 360 * but is yet to replenish it back to HW. 361 * In this case host will dump the last 128 descriptors 362 * including the software descriptor rx_desc and assert. 363 */ 364 365 if (qdf_unlikely(!rx_desc->in_use)) { 366 DP_STATS_INC(soc, rx.err.hal_reo_dest_dup, 1); 367 dp_info_rl("Reaping rx_desc not in use!"); 368 dp_rx_dump_info_and_assert(soc, hal_ring_hdl, 369 ring_desc, rx_desc); 370 /* ignore duplicate RX desc and continue to process */ 371 /* Pop out the descriptor */ 372 continue; 373 } 374 375 status = dp_rx_desc_nbuf_sanity_check(soc, ring_desc, rx_desc); 376 if (qdf_unlikely(QDF_IS_STATUS_ERROR(status))) { 377 DP_STATS_INC(soc, rx.err.nbuf_sanity_fail, 1); 378 dp_info_rl("Nbuf sanity check failure!"); 379 dp_rx_dump_info_and_assert(soc, hal_ring_hdl, 380 ring_desc, rx_desc); 381 rx_desc->in_err_state = 1; 382 continue; 383 } 384 385 if (qdf_unlikely(!dp_rx_desc_check_magic(rx_desc))) { 386 dp_err("Invalid rx_desc cookie=%d", rx_buf_cookie); 387 DP_STATS_INC(soc, rx.err.rx_desc_invalid_magic, 1); 388 dp_rx_dump_info_and_assert(soc, hal_ring_hdl, 389 ring_desc, rx_desc); 390 } 391 392 /* Get MPDU DESC info */ 393 hal_rx_mpdu_desc_info_get_li(ring_desc, &mpdu_desc_info); 394 395 /* Get MSDU DESC info */ 396 hal_rx_msdu_desc_info_get_li(ring_desc, &msdu_desc_info); 397 398 if (qdf_unlikely(msdu_desc_info.msdu_flags & 399 HAL_MSDU_F_MSDU_CONTINUATION)) { 400 /* previous msdu has end bit set, so current one is 401 * the new MPDU 402 */ 403 if (is_prev_msdu_last) { 404 /* For new MPDU check if we can read complete 405 * MPDU by comparing the number of buffers 406 * available and number of buffers needed to 407 * reap this MPDU 408 */ 409 if ((msdu_desc_info.msdu_len / 410 (RX_DATA_BUFFER_SIZE - 411 soc->rx_pkt_tlv_size) + 1) > 412 num_pending) { 413 DP_STATS_INC(soc, 414 rx.msdu_scatter_wait_break, 415 1); 416 dp_rx_cookie_reset_invalid_bit( 417 ring_desc); 418 /* As we are going to break out of the 419 * loop because of unavailability of 420 * descs to form complete SG, we need to 421 * reset the TP in the REO destination 422 * ring. 423 */ 424 hal_srng_dst_dec_tp(hal_soc, 425 hal_ring_hdl); 426 break; 427 } 428 is_prev_msdu_last = false; 429 } 430 } 431 432 if (mpdu_desc_info.mpdu_flags & HAL_MPDU_F_RETRY_BIT) 433 qdf_nbuf_set_rx_retry_flag(rx_desc->nbuf, 1); 434 435 if (qdf_unlikely(mpdu_desc_info.mpdu_flags & 436 HAL_MPDU_F_RAW_AMPDU)) 437 qdf_nbuf_set_raw_frame(rx_desc->nbuf, 1); 438 439 if (!is_prev_msdu_last && 440 msdu_desc_info.msdu_flags & HAL_MSDU_F_LAST_MSDU_IN_MPDU) 441 is_prev_msdu_last = true; 442 443 rx_bufs_reaped[rx_desc->pool_id]++; 444 peer_mdata = mpdu_desc_info.peer_meta_data; 445 QDF_NBUF_CB_RX_PEER_ID(rx_desc->nbuf) = 446 dp_rx_peer_metadata_peer_id_get_li(soc, peer_mdata); 447 QDF_NBUF_CB_RX_VDEV_ID(rx_desc->nbuf) = 448 DP_PEER_METADATA_VDEV_ID_GET_LI(peer_mdata); 449 450 /* to indicate whether this msdu is rx offload */ 451 pkt_capture_offload = 452 DP_PEER_METADATA_OFFLOAD_GET_LI(peer_mdata); 453 454 /* 455 * save msdu flags first, last and continuation msdu in 456 * nbuf->cb, also save mcbc, is_da_valid, is_sa_valid and 457 * length to nbuf->cb. This ensures the info required for 458 * per pkt processing is always in the same cache line. 459 * This helps in improving throughput for smaller pkt 460 * sizes. 461 */ 462 if (msdu_desc_info.msdu_flags & HAL_MSDU_F_FIRST_MSDU_IN_MPDU) 463 qdf_nbuf_set_rx_chfrag_start(rx_desc->nbuf, 1); 464 465 if (msdu_desc_info.msdu_flags & HAL_MSDU_F_MSDU_CONTINUATION) 466 qdf_nbuf_set_rx_chfrag_cont(rx_desc->nbuf, 1); 467 468 if (msdu_desc_info.msdu_flags & HAL_MSDU_F_LAST_MSDU_IN_MPDU) 469 qdf_nbuf_set_rx_chfrag_end(rx_desc->nbuf, 1); 470 471 if (msdu_desc_info.msdu_flags & HAL_MSDU_F_DA_IS_MCBC) 472 qdf_nbuf_set_da_mcbc(rx_desc->nbuf, 1); 473 474 if (msdu_desc_info.msdu_flags & HAL_MSDU_F_DA_IS_VALID) 475 qdf_nbuf_set_da_valid(rx_desc->nbuf, 1); 476 477 if (msdu_desc_info.msdu_flags & HAL_MSDU_F_SA_IS_VALID) 478 qdf_nbuf_set_sa_valid(rx_desc->nbuf, 1); 479 480 qdf_nbuf_set_tid_val(rx_desc->nbuf, 481 HAL_RX_REO_QUEUE_NUMBER_GET(ring_desc)); 482 483 /* set reo dest indication */ 484 qdf_nbuf_set_rx_reo_dest_ind_or_sw_excpt( 485 rx_desc->nbuf, 486 HAL_RX_REO_MSDU_REO_DST_IND_GET(ring_desc)); 487 488 QDF_NBUF_CB_RX_PKT_LEN(rx_desc->nbuf) = msdu_desc_info.msdu_len; 489 490 QDF_NBUF_CB_RX_CTX_ID(rx_desc->nbuf) = reo_ring_num; 491 492 /* 493 * move unmap after scattered msdu waiting break logic 494 * in case double skb unmap happened. 495 */ 496 dp_rx_nbuf_unmap(soc, rx_desc, reo_ring_num); 497 rx_desc->unmapped = 1; 498 DP_RX_PROCESS_NBUF(soc, nbuf_head, nbuf_tail, ebuf_head, 499 ebuf_tail, rx_desc); 500 501 quota -= 1; 502 num_pending -= 1; 503 504 dp_rx_add_to_free_desc_list(&head[rx_desc->pool_id], 505 &tail[rx_desc->pool_id], rx_desc); 506 num_rx_bufs_reaped++; 507 508 dp_rx_prefetch_hw_sw_nbuf_desc(soc, hal_soc, num_pending, 509 hal_ring_hdl, 510 &last_prefetched_hw_desc, 511 &last_prefetched_sw_desc); 512 513 /* 514 * only if complete msdu is received for scatter case, 515 * then allow break. 516 */ 517 if (is_prev_msdu_last && 518 dp_rx_reap_loop_pkt_limit_hit(soc, num_rx_bufs_reaped, 519 max_reap_limit)) 520 break; 521 } 522 done: 523 dp_rx_srng_access_end(int_ctx, soc, hal_ring_hdl); 524 525 dp_rx_per_core_stats_update(soc, reo_ring_num, num_rx_bufs_reaped); 526 527 for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) { 528 /* 529 * continue with next mac_id if no pkts were reaped 530 * from that pool 531 */ 532 if (!rx_bufs_reaped[mac_id]) 533 continue; 534 535 dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_id]; 536 537 rx_desc_pool = &soc->rx_desc_buf[mac_id]; 538 539 dp_rx_buffers_replenish_simple(soc, mac_id, dp_rxdma_srng, 540 rx_desc_pool, 541 rx_bufs_reaped[mac_id], 542 &head[mac_id], &tail[mac_id]); 543 } 544 545 dp_verbose_debug("replenished %u\n", rx_bufs_reaped[0]); 546 /* Peer can be NULL is case of LFR */ 547 if (qdf_likely(txrx_peer)) 548 vdev = NULL; 549 550 /* 551 * BIG loop where each nbuf is dequeued from global queue, 552 * processed and queued back on a per vdev basis. These nbufs 553 * are sent to stack as and when we run out of nbufs 554 * or a new nbuf dequeued from global queue has a different 555 * vdev when compared to previous nbuf. 556 */ 557 nbuf = nbuf_head; 558 while (nbuf) { 559 next = nbuf->next; 560 dp_rx_prefetch_nbuf_data(nbuf, next); 561 562 if (qdf_unlikely(dp_rx_is_raw_frame_dropped(nbuf))) { 563 nbuf = next; 564 DP_STATS_INC(soc, rx.err.raw_frm_drop, 1); 565 continue; 566 } 567 568 rx_tlv_hdr = qdf_nbuf_data(nbuf); 569 vdev_id = QDF_NBUF_CB_RX_VDEV_ID(nbuf); 570 peer_id = QDF_NBUF_CB_RX_PEER_ID(nbuf); 571 572 if (dp_rx_is_list_ready(deliver_list_head, vdev, txrx_peer, 573 peer_id, vdev_id)) { 574 dp_rx_deliver_to_stack(soc, vdev, txrx_peer, 575 deliver_list_head, 576 deliver_list_tail); 577 deliver_list_head = NULL; 578 deliver_list_tail = NULL; 579 } 580 581 /* Get TID from struct cb->tid_val, save to tid */ 582 if (qdf_nbuf_is_rx_chfrag_start(nbuf)) 583 tid = qdf_nbuf_get_tid_val(nbuf); 584 585 if (qdf_unlikely(tid >= CDP_MAX_DATA_TIDS)) { 586 DP_STATS_INC(soc, rx.err.rx_invalid_tid_err, 1); 587 dp_rx_nbuf_free(nbuf); 588 nbuf = next; 589 continue; 590 } 591 592 if (qdf_unlikely(!txrx_peer)) { 593 txrx_peer = 594 dp_rx_get_txrx_peer_and_vdev(soc, nbuf, peer_id, 595 &txrx_ref_handle, 596 pkt_capture_offload, 597 &vdev, 598 &rx_pdev, &dsf, 599 &old_tid); 600 if (qdf_unlikely(!txrx_peer) || qdf_unlikely(!vdev)) { 601 nbuf = next; 602 continue; 603 } 604 } else if (txrx_peer && txrx_peer->peer_id != peer_id) { 605 dp_txrx_peer_unref_delete(txrx_ref_handle, 606 DP_MOD_ID_RX); 607 608 txrx_peer = 609 dp_rx_get_txrx_peer_and_vdev(soc, nbuf, peer_id, 610 &txrx_ref_handle, 611 pkt_capture_offload, 612 &vdev, 613 &rx_pdev, &dsf, 614 &old_tid); 615 if (qdf_unlikely(!txrx_peer) || qdf_unlikely(!vdev)) { 616 nbuf = next; 617 continue; 618 } 619 } 620 621 if (txrx_peer) { 622 QDF_NBUF_CB_DP_TRACE_PRINT(nbuf) = false; 623 qdf_dp_trace_set_track(nbuf, QDF_RX); 624 QDF_NBUF_CB_RX_DP_TRACE(nbuf) = 1; 625 QDF_NBUF_CB_RX_PACKET_TRACK(nbuf) = 626 QDF_NBUF_RX_PKT_DATA_TRACK; 627 } 628 629 rx_bufs_used++; 630 631 /* when hlos tid override is enabled, save tid in 632 * skb->priority 633 */ 634 if (qdf_unlikely(vdev->skip_sw_tid_classification & 635 DP_TXRX_HLOS_TID_OVERRIDE_ENABLED)) 636 qdf_nbuf_set_priority(nbuf, tid); 637 638 DP_RX_TID_SAVE(nbuf, tid); 639 if (qdf_unlikely(dsf) || qdf_unlikely(peer_ext_stats) || 640 dp_rx_pkt_tracepoints_enabled()) 641 qdf_nbuf_set_timestamp(nbuf); 642 643 if (qdf_likely(old_tid != tid)) { 644 tid_stats = 645 &rx_pdev->stats.tid_stats.tid_rx_stats[reo_ring_num][tid]; 646 old_tid = tid; 647 } 648 649 /* 650 * Check if DMA completed -- msdu_done is the last bit 651 * to be written 652 */ 653 if (qdf_likely(!qdf_nbuf_is_rx_chfrag_cont(nbuf))) { 654 if (qdf_unlikely(!hal_rx_attn_msdu_done_get_li( 655 rx_tlv_hdr))) { 656 dp_err_rl("MSDU DONE failure"); 657 DP_STATS_INC(soc, rx.err.msdu_done_fail, 1); 658 hal_rx_dump_pkt_tlvs(hal_soc, rx_tlv_hdr, 659 QDF_TRACE_LEVEL_INFO); 660 tid_stats->fail_cnt[MSDU_DONE_FAILURE]++; 661 qdf_assert(0); 662 dp_rx_nbuf_free(nbuf); 663 nbuf = next; 664 continue; 665 } else if (qdf_unlikely(hal_rx_attn_msdu_len_err_get_li( 666 rx_tlv_hdr))) { 667 DP_STATS_INC(soc, rx.err.msdu_len_err, 1); 668 dp_rx_nbuf_free(nbuf); 669 nbuf = next; 670 continue; 671 } 672 } 673 674 DP_HIST_PACKET_COUNT_INC(vdev->pdev->pdev_id); 675 /* 676 * First IF condition: 677 * 802.11 Fragmented pkts are reinjected to REO 678 * HW block as SG pkts and for these pkts we only 679 * need to pull the RX TLVS header length. 680 * Second IF condition: 681 * The below condition happens when an MSDU is spread 682 * across multiple buffers. This can happen in two cases 683 * 1. The nbuf size is smaller then the received msdu. 684 * ex: we have set the nbuf size to 2048 during 685 * nbuf_alloc. but we received an msdu which is 686 * 2304 bytes in size then this msdu is spread 687 * across 2 nbufs. 688 * 689 * 2. AMSDUs when RAW mode is enabled. 690 * ex: 1st MSDU is in 1st nbuf and 2nd MSDU is spread 691 * across 1st nbuf and 2nd nbuf and last MSDU is 692 * spread across 2nd nbuf and 3rd nbuf. 693 * 694 * for these scenarios let us create a skb frag_list and 695 * append these buffers till the last MSDU of the AMSDU 696 * Third condition: 697 * This is the most likely case, we receive 802.3 pkts 698 * decapsulated by HW, here we need to set the pkt length. 699 */ 700 hal_rx_msdu_metadata_get(hal_soc, rx_tlv_hdr, &msdu_metadata); 701 if (qdf_unlikely(qdf_nbuf_is_frag(nbuf))) { 702 bool is_mcbc, is_sa_vld, is_da_vld; 703 704 is_mcbc = hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc, 705 rx_tlv_hdr); 706 is_sa_vld = 707 hal_rx_msdu_end_sa_is_valid_get(soc->hal_soc, 708 rx_tlv_hdr); 709 is_da_vld = 710 hal_rx_msdu_end_da_is_valid_get(soc->hal_soc, 711 rx_tlv_hdr); 712 713 qdf_nbuf_set_da_mcbc(nbuf, is_mcbc); 714 qdf_nbuf_set_da_valid(nbuf, is_da_vld); 715 qdf_nbuf_set_sa_valid(nbuf, is_sa_vld); 716 717 qdf_nbuf_pull_head(nbuf, soc->rx_pkt_tlv_size); 718 } else if (qdf_nbuf_is_rx_chfrag_cont(nbuf)) { 719 msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf); 720 nbuf = dp_rx_sg_create(soc, nbuf); 721 next = nbuf->next; 722 723 if (qdf_nbuf_is_raw_frame(nbuf)) { 724 DP_STATS_INC(vdev->pdev, rx_raw_pkts, 1); 725 DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, 726 rx.raw, 1, 727 msdu_len); 728 } else { 729 DP_STATS_INC(soc, rx.err.scatter_msdu, 1); 730 731 if (!dp_rx_is_sg_supported()) { 732 dp_rx_nbuf_free(nbuf); 733 dp_info_rl("sg msdu len %d, dropped", 734 msdu_len); 735 nbuf = next; 736 continue; 737 } 738 } 739 } else { 740 msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf); 741 pkt_len = msdu_len + 742 msdu_metadata.l3_hdr_pad + 743 soc->rx_pkt_tlv_size; 744 745 qdf_nbuf_set_pktlen(nbuf, pkt_len); 746 dp_rx_skip_tlvs(soc, nbuf, msdu_metadata.l3_hdr_pad); 747 } 748 749 dp_rx_send_pktlog(soc, rx_pdev, nbuf, QDF_TX_RX_STATUS_OK); 750 751 /* 752 * process frame for mulitpass phrase processing 753 */ 754 if (qdf_unlikely(vdev->multipass_en)) { 755 if (dp_rx_multipass_process(txrx_peer, nbuf, 756 tid) == false) { 757 DP_PEER_PER_PKT_STATS_INC(txrx_peer, 758 rx.multipass_rx_pkt_drop, 759 1); 760 dp_rx_nbuf_free(nbuf); 761 nbuf = next; 762 continue; 763 } 764 } 765 766 if (!dp_wds_rx_policy_check(rx_tlv_hdr, vdev, txrx_peer)) { 767 dp_rx_err("%pK: Policy Check Drop pkt", soc); 768 DP_PEER_PER_PKT_STATS_INC(txrx_peer, 769 rx.policy_check_drop, 1); 770 tid_stats->fail_cnt[POLICY_CHECK_DROP]++; 771 /* Drop & free packet */ 772 dp_rx_nbuf_free(nbuf); 773 /* Statistics */ 774 nbuf = next; 775 continue; 776 } 777 778 if (qdf_unlikely(txrx_peer && (txrx_peer->nawds_enabled) && 779 (qdf_nbuf_is_da_mcbc(nbuf)) && 780 (hal_rx_get_mpdu_mac_ad4_valid(soc->hal_soc, 781 rx_tlv_hdr) == 782 false))) { 783 tid_stats->fail_cnt[NAWDS_MCAST_DROP]++; 784 DP_PEER_PER_PKT_STATS_INC(txrx_peer, 785 rx.nawds_mcast_drop, 1); 786 dp_rx_nbuf_free(nbuf); 787 nbuf = next; 788 continue; 789 } 790 791 /* 792 * Drop non-EAPOL frames from unauthorized peer. 793 */ 794 if (qdf_likely(txrx_peer) && 795 qdf_unlikely(!txrx_peer->authorize) && 796 !qdf_nbuf_is_raw_frame(nbuf)) { 797 bool is_eapol = qdf_nbuf_is_ipv4_eapol_pkt(nbuf) || 798 qdf_nbuf_is_ipv4_wapi_pkt(nbuf); 799 800 if (!is_eapol) { 801 DP_PEER_PER_PKT_STATS_INC(txrx_peer, 802 rx.peer_unauth_rx_pkt_drop, 803 1); 804 dp_rx_nbuf_free(nbuf); 805 nbuf = next; 806 continue; 807 } 808 } 809 810 if (soc->process_rx_status) 811 dp_rx_cksum_offload(vdev->pdev, nbuf, rx_tlv_hdr); 812 813 /* Update the protocol tag in SKB based on CCE metadata */ 814 dp_rx_update_protocol_tag(soc, vdev, nbuf, rx_tlv_hdr, 815 reo_ring_num, false, true); 816 817 /* Update the flow tag in SKB based on FSE metadata */ 818 dp_rx_update_flow_tag(soc, vdev, nbuf, rx_tlv_hdr, true); 819 820 dp_rx_msdu_stats_update(soc, nbuf, rx_tlv_hdr, txrx_peer, 821 reo_ring_num, tid_stats); 822 823 if (qdf_unlikely(vdev->mesh_vdev)) { 824 if (dp_rx_filter_mesh_packets(vdev, nbuf, rx_tlv_hdr) 825 == QDF_STATUS_SUCCESS) { 826 dp_rx_info("%pK: mesh pkt filtered", soc); 827 tid_stats->fail_cnt[MESH_FILTER_DROP]++; 828 DP_STATS_INC(vdev->pdev, dropped.mesh_filter, 829 1); 830 831 dp_rx_nbuf_free(nbuf); 832 nbuf = next; 833 continue; 834 } 835 dp_rx_fill_mesh_stats(vdev, nbuf, rx_tlv_hdr, 836 txrx_peer); 837 } 838 839 if (qdf_likely(vdev->rx_decap_type == 840 htt_cmn_pkt_type_ethernet) && 841 qdf_likely(!vdev->mesh_vdev)) { 842 /* Due to HW issue, sometimes we see that the sa_idx 843 * and da_idx are invalid with sa_valid and da_valid 844 * bits set 845 * 846 * in this case we also see that value of 847 * sa_sw_peer_id is set as 0 848 * 849 * Drop the packet if sa_idx and da_idx OOB or 850 * sa_sw_peerid is 0 851 */ 852 if (!is_sa_da_idx_valid(max_ast, nbuf, 853 msdu_metadata)) { 854 dp_rx_nbuf_free(nbuf); 855 nbuf = next; 856 DP_STATS_INC(soc, rx.err.invalid_sa_da_idx, 1); 857 continue; 858 } 859 if (qdf_unlikely(dp_rx_mec_check_wrapper(soc, 860 txrx_peer, 861 rx_tlv_hdr, 862 nbuf))) { 863 /* this is a looped back MCBC pkt,drop it */ 864 DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, 865 rx.mec_drop, 1, 866 QDF_NBUF_CB_RX_PKT_LEN(nbuf)); 867 dp_rx_nbuf_free(nbuf); 868 nbuf = next; 869 continue; 870 } 871 /* WDS Source Port Learning */ 872 if (qdf_likely(vdev->wds_enabled)) 873 dp_rx_wds_srcport_learn(soc, 874 rx_tlv_hdr, 875 txrx_peer, 876 nbuf, 877 msdu_metadata); 878 879 /* Intrabss-fwd */ 880 if (dp_rx_check_ap_bridge(vdev)) 881 if (dp_rx_intrabss_fwd_li(soc, txrx_peer, 882 rx_tlv_hdr, 883 nbuf, 884 msdu_metadata, 885 tid_stats)) { 886 nbuf = next; 887 tid_stats->intrabss_cnt++; 888 continue; /* Get next desc */ 889 } 890 } 891 892 dp_rx_fill_gro_info(soc, rx_tlv_hdr, nbuf, &rx_ol_pkt_cnt); 893 894 dp_rx_mark_first_packet_after_wow_wakeup(vdev->pdev, rx_tlv_hdr, 895 nbuf); 896 897 dp_rx_update_stats(soc, nbuf); 898 899 dp_pkt_add_timestamp(txrx_peer->vdev, QDF_PKT_RX_DRIVER_ENTRY, 900 current_time, nbuf); 901 902 DP_RX_LIST_APPEND(deliver_list_head, 903 deliver_list_tail, 904 nbuf); 905 DP_PEER_STATS_FLAT_INC_PKT(txrx_peer, to_stack, 1, 906 QDF_NBUF_CB_RX_PKT_LEN(nbuf)); 907 if (qdf_unlikely(txrx_peer->in_twt)) 908 DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, 909 rx.to_stack_twt, 1, 910 QDF_NBUF_CB_RX_PKT_LEN(nbuf)); 911 912 tid_stats->delivered_to_stack++; 913 nbuf = next; 914 } 915 916 DP_RX_DELIVER_TO_STACK(soc, vdev, txrx_peer, peer_id, 917 pkt_capture_offload, 918 deliver_list_head, 919 deliver_list_tail); 920 921 if (qdf_likely(txrx_peer)) 922 dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_RX); 923 924 if (dp_rx_enable_eol_data_check(soc) && rx_bufs_used) { 925 if (quota) { 926 num_pending = 927 dp_rx_srng_get_num_pending(hal_soc, 928 hal_ring_hdl, 929 num_entries, 930 &near_full); 931 if (num_pending) { 932 DP_STATS_INC(soc, rx.hp_oos2, 1); 933 934 if (!hif_exec_should_yield(scn, intr_id)) 935 goto more_data; 936 937 if (qdf_unlikely(near_full)) { 938 DP_STATS_INC(soc, rx.near_full, 1); 939 goto more_data; 940 } 941 } 942 } 943 944 if (vdev && vdev->osif_fisa_flush) 945 vdev->osif_fisa_flush(soc, reo_ring_num); 946 947 if (vdev && vdev->osif_gro_flush && rx_ol_pkt_cnt) { 948 vdev->osif_gro_flush(vdev->osif_vdev, 949 reo_ring_num); 950 } 951 } 952 953 /* Update histogram statistics by looping through pdev's */ 954 DP_RX_HIST_STATS_PER_PDEV(); 955 956 return rx_bufs_used; /* Assume no scale factor for now */ 957 } 958 959 QDF_STATUS dp_rx_desc_pool_init_li(struct dp_soc *soc, 960 struct rx_desc_pool *rx_desc_pool, 961 uint32_t pool_id) 962 { 963 return dp_rx_desc_pool_init_generic(soc, rx_desc_pool, pool_id); 964 965 } 966 967 void dp_rx_desc_pool_deinit_li(struct dp_soc *soc, 968 struct rx_desc_pool *rx_desc_pool, 969 uint32_t pool_id) 970 { 971 } 972 973 QDF_STATUS dp_wbm_get_rx_desc_from_hal_desc_li( 974 struct dp_soc *soc, 975 void *ring_desc, 976 struct dp_rx_desc **r_rx_desc) 977 { 978 struct hal_buf_info buf_info = {0}; 979 hal_soc_handle_t hal_soc = soc->hal_soc; 980 981 /* only cookie and rbm will be valid in buf_info */ 982 hal_rx_buf_cookie_rbm_get(hal_soc, (uint32_t *)ring_desc, 983 &buf_info); 984 985 if (qdf_unlikely(buf_info.rbm != 986 HAL_RX_BUF_RBM_SW3_BM(soc->wbm_sw0_bm_id))) { 987 /* TODO */ 988 /* Call appropriate handler */ 989 DP_STATS_INC(soc, rx.err.invalid_rbm, 1); 990 dp_rx_err("%pK: Invalid RBM %d", soc, buf_info.rbm); 991 return QDF_STATUS_E_INVAL; 992 } 993 994 if (!dp_rx_is_sw_cookie_valid(soc, buf_info.sw_cookie)) { 995 dp_rx_err("invalid sw_cookie 0x%x", buf_info.sw_cookie); 996 return QDF_STATUS_E_INVAL; 997 } 998 999 *r_rx_desc = dp_rx_cookie_2_va_rxdma_buf(soc, buf_info.sw_cookie); 1000 1001 return QDF_STATUS_SUCCESS; 1002 } 1003 1004 bool dp_rx_chain_msdus_li(struct dp_soc *soc, qdf_nbuf_t nbuf, 1005 uint8_t *rx_tlv_hdr, uint8_t mac_id) 1006 { 1007 bool mpdu_done = false; 1008 qdf_nbuf_t curr_nbuf = NULL; 1009 qdf_nbuf_t tmp_nbuf = NULL; 1010 1011 /* TODO: Currently only single radio is supported, hence 1012 * pdev hard coded to '0' index 1013 */ 1014 struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id); 1015 1016 if (!dp_pdev) { 1017 dp_rx_debug("%pK: pdev is null for mac_id = %d", soc, mac_id); 1018 return mpdu_done; 1019 } 1020 /* if invalid peer SG list has max values free the buffers in list 1021 * and treat current buffer as start of list 1022 * 1023 * current logic to detect the last buffer from attn_tlv is not reliable 1024 * in OFDMA UL scenario hence add max buffers check to avoid list pile 1025 * up 1026 */ 1027 if (!dp_pdev->first_nbuf || 1028 (dp_pdev->invalid_peer_head_msdu && 1029 QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST 1030 (dp_pdev->invalid_peer_head_msdu) >= DP_MAX_INVALID_BUFFERS)) { 1031 qdf_nbuf_set_rx_chfrag_start(nbuf, 1); 1032 dp_pdev->ppdu_id = hal_rx_get_ppdu_id(soc->hal_soc, 1033 rx_tlv_hdr); 1034 dp_pdev->first_nbuf = true; 1035 1036 /* If the new nbuf received is the first msdu of the 1037 * amsdu and there are msdus in the invalid peer msdu 1038 * list, then let us free all the msdus of the invalid 1039 * peer msdu list. 1040 * This scenario can happen when we start receiving 1041 * new a-msdu even before the previous a-msdu is completely 1042 * received. 1043 */ 1044 curr_nbuf = dp_pdev->invalid_peer_head_msdu; 1045 while (curr_nbuf) { 1046 tmp_nbuf = curr_nbuf->next; 1047 dp_rx_nbuf_free(curr_nbuf); 1048 curr_nbuf = tmp_nbuf; 1049 } 1050 1051 dp_pdev->invalid_peer_head_msdu = NULL; 1052 dp_pdev->invalid_peer_tail_msdu = NULL; 1053 1054 dp_monitor_get_mpdu_status(dp_pdev, soc, rx_tlv_hdr); 1055 } 1056 1057 if (dp_pdev->ppdu_id == hal_rx_attn_phy_ppdu_id_get(soc->hal_soc, 1058 rx_tlv_hdr) && 1059 hal_rx_attn_msdu_done_get(soc->hal_soc, rx_tlv_hdr)) { 1060 qdf_nbuf_set_rx_chfrag_end(nbuf, 1); 1061 qdf_assert_always(dp_pdev->first_nbuf); 1062 dp_pdev->first_nbuf = false; 1063 mpdu_done = true; 1064 } 1065 1066 /* 1067 * For MCL, invalid_peer_head_msdu and invalid_peer_tail_msdu 1068 * should be NULL here, add the checking for debugging purpose 1069 * in case some corner case. 1070 */ 1071 DP_PDEV_INVALID_PEER_MSDU_CHECK(dp_pdev->invalid_peer_head_msdu, 1072 dp_pdev->invalid_peer_tail_msdu); 1073 DP_RX_LIST_APPEND(dp_pdev->invalid_peer_head_msdu, 1074 dp_pdev->invalid_peer_tail_msdu, 1075 nbuf); 1076 1077 return mpdu_done; 1078 } 1079 1080 qdf_nbuf_t 1081 dp_rx_wbm_err_reap_desc_li(struct dp_intr *int_ctx, struct dp_soc *soc, 1082 hal_ring_handle_t hal_ring_hdl, uint32_t quota, 1083 uint32_t *rx_bufs_used) 1084 { 1085 hal_ring_desc_t ring_desc; 1086 hal_soc_handle_t hal_soc; 1087 struct dp_rx_desc *rx_desc; 1088 union dp_rx_desc_list_elem_t 1089 *head[WLAN_MAX_MLO_CHIPS][MAX_PDEV_CNT] = { { NULL } }; 1090 union dp_rx_desc_list_elem_t 1091 *tail[WLAN_MAX_MLO_CHIPS][MAX_PDEV_CNT] = { { NULL } }; 1092 uint32_t rx_bufs_reaped[WLAN_MAX_MLO_CHIPS][MAX_PDEV_CNT] = { { 0 } }; 1093 uint8_t buf_type; 1094 uint8_t mac_id; 1095 struct dp_srng *dp_rxdma_srng; 1096 struct rx_desc_pool *rx_desc_pool; 1097 qdf_nbuf_t nbuf_head = NULL; 1098 qdf_nbuf_t nbuf_tail = NULL; 1099 qdf_nbuf_t nbuf; 1100 struct hal_wbm_err_desc_info wbm_err_info = { 0 }; 1101 uint8_t msdu_continuation = 0; 1102 bool process_sg_buf = false; 1103 uint32_t wbm_err_src; 1104 QDF_STATUS status; 1105 struct dp_soc *replenish_soc; 1106 uint8_t chip_id; 1107 struct hal_rx_mpdu_desc_info mpdu_desc_info = { 0 }; 1108 1109 qdf_assert(soc && hal_ring_hdl); 1110 hal_soc = soc->hal_soc; 1111 qdf_assert(hal_soc); 1112 1113 if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, hal_ring_hdl))) { 1114 /* TODO */ 1115 /* 1116 * Need API to convert from hal_ring pointer to 1117 * Ring Type / Ring Id combo 1118 */ 1119 dp_rx_err_err("%pK: HAL RING Access Failed -- %pK", 1120 soc, hal_ring_hdl); 1121 goto done; 1122 } 1123 1124 while (qdf_likely(quota)) { 1125 ring_desc = hal_srng_dst_get_next(hal_soc, hal_ring_hdl); 1126 if (qdf_unlikely(!ring_desc)) 1127 break; 1128 1129 /* XXX */ 1130 buf_type = HAL_RX_WBM_BUF_TYPE_GET(ring_desc); 1131 1132 /* 1133 * For WBM ring, expect only MSDU buffers 1134 */ 1135 qdf_assert_always(buf_type == HAL_RX_WBM_BUF_TYPE_REL_BUF); 1136 1137 wbm_err_src = hal_rx_wbm_err_src_get(hal_soc, ring_desc); 1138 qdf_assert((wbm_err_src == HAL_RX_WBM_ERR_SRC_RXDMA) || 1139 (wbm_err_src == HAL_RX_WBM_ERR_SRC_REO)); 1140 1141 if (soc->arch_ops.dp_wbm_get_rx_desc_from_hal_desc(soc, 1142 ring_desc, 1143 &rx_desc)) { 1144 dp_rx_err_err("get rx desc from hal_desc failed"); 1145 continue; 1146 } 1147 1148 qdf_assert_always(rx_desc); 1149 1150 if (!dp_rx_desc_check_magic(rx_desc)) { 1151 dp_rx_err_err("%pk: Invalid rx_desc %pk", 1152 soc, rx_desc); 1153 continue; 1154 } 1155 1156 /* 1157 * this is a unlikely scenario where the host is reaping 1158 * a descriptor which it already reaped just a while ago 1159 * but is yet to replenish it back to HW. 1160 * In this case host will dump the last 128 descriptors 1161 * including the software descriptor rx_desc and assert. 1162 */ 1163 if (qdf_unlikely(!rx_desc->in_use)) { 1164 DP_STATS_INC(soc, rx.err.hal_wbm_rel_dup, 1); 1165 dp_rx_dump_info_and_assert(soc, hal_ring_hdl, 1166 ring_desc, rx_desc); 1167 continue; 1168 } 1169 1170 hal_rx_wbm_err_info_get(ring_desc, &wbm_err_info, hal_soc); 1171 nbuf = rx_desc->nbuf; 1172 1173 status = dp_rx_wbm_desc_nbuf_sanity_check(soc, hal_ring_hdl, 1174 ring_desc, rx_desc); 1175 if (qdf_unlikely(QDF_IS_STATUS_ERROR(status))) { 1176 DP_STATS_INC(soc, rx.err.nbuf_sanity_fail, 1); 1177 dp_info_rl("Rx error Nbuf %pk sanity check failure!", 1178 nbuf); 1179 rx_desc->in_err_state = 1; 1180 rx_desc->unmapped = 1; 1181 rx_bufs_reaped[rx_desc->chip_id][rx_desc->pool_id]++; 1182 1183 dp_rx_add_to_free_desc_list( 1184 &head[rx_desc->chip_id][rx_desc->pool_id], 1185 &tail[rx_desc->chip_id][rx_desc->pool_id], 1186 rx_desc); 1187 continue; 1188 } 1189 1190 /* Get MPDU DESC info */ 1191 hal_rx_mpdu_desc_info_get(hal_soc, ring_desc, &mpdu_desc_info); 1192 1193 if (qdf_likely(mpdu_desc_info.mpdu_flags & 1194 HAL_MPDU_F_QOS_CONTROL_VALID)) 1195 qdf_nbuf_set_tid_val(rx_desc->nbuf, mpdu_desc_info.tid); 1196 1197 rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id]; 1198 dp_ipa_rx_buf_smmu_mapping_lock(soc); 1199 dp_rx_nbuf_unmap_pool(soc, rx_desc_pool, nbuf); 1200 rx_desc->unmapped = 1; 1201 dp_ipa_rx_buf_smmu_mapping_unlock(soc); 1202 1203 if (qdf_unlikely( 1204 soc->wbm_release_desc_rx_sg_support && 1205 dp_rx_is_sg_formation_required(&wbm_err_info))) { 1206 /* SG is detected from continuation bit */ 1207 msdu_continuation = 1208 hal_rx_wbm_err_msdu_continuation_get(hal_soc, 1209 ring_desc); 1210 if (msdu_continuation && 1211 !(soc->wbm_sg_param.wbm_is_first_msdu_in_sg)) { 1212 /* Update length from first buffer in SG */ 1213 soc->wbm_sg_param.wbm_sg_desc_msdu_len = 1214 hal_rx_msdu_start_msdu_len_get( 1215 soc->hal_soc, 1216 qdf_nbuf_data(nbuf)); 1217 soc->wbm_sg_param.wbm_is_first_msdu_in_sg = 1218 true; 1219 } 1220 1221 if (msdu_continuation) { 1222 /* MSDU continued packets */ 1223 qdf_nbuf_set_rx_chfrag_cont(nbuf, 1); 1224 QDF_NBUF_CB_RX_PKT_LEN(nbuf) = 1225 soc->wbm_sg_param.wbm_sg_desc_msdu_len; 1226 } else { 1227 /* This is the terminal packet in SG */ 1228 qdf_nbuf_set_rx_chfrag_start(nbuf, 1); 1229 qdf_nbuf_set_rx_chfrag_end(nbuf, 1); 1230 QDF_NBUF_CB_RX_PKT_LEN(nbuf) = 1231 soc->wbm_sg_param.wbm_sg_desc_msdu_len; 1232 process_sg_buf = true; 1233 } 1234 } 1235 1236 /* 1237 * save the wbm desc info in nbuf TLV. We will need this 1238 * info when we do the actual nbuf processing 1239 */ 1240 wbm_err_info.pool_id = rx_desc->pool_id; 1241 hal_rx_priv_info_set_in_tlv(soc->hal_soc, 1242 qdf_nbuf_data(nbuf), 1243 (uint8_t *)&wbm_err_info, 1244 sizeof(wbm_err_info)); 1245 1246 dp_rx_err_tlv_invalidate(soc, nbuf); 1247 rx_bufs_reaped[rx_desc->chip_id][rx_desc->pool_id]++; 1248 1249 if (qdf_nbuf_is_rx_chfrag_cont(nbuf) || process_sg_buf) { 1250 DP_RX_LIST_APPEND(soc->wbm_sg_param.wbm_sg_nbuf_head, 1251 soc->wbm_sg_param.wbm_sg_nbuf_tail, 1252 nbuf); 1253 if (process_sg_buf) { 1254 if (!dp_rx_buffer_pool_refill( 1255 soc, 1256 soc->wbm_sg_param.wbm_sg_nbuf_head, 1257 rx_desc->pool_id)) 1258 DP_RX_MERGE_TWO_LIST( 1259 nbuf_head, nbuf_tail, 1260 soc->wbm_sg_param.wbm_sg_nbuf_head, 1261 soc->wbm_sg_param.wbm_sg_nbuf_tail); 1262 dp_rx_wbm_sg_list_last_msdu_war(soc); 1263 dp_rx_wbm_sg_list_reset(soc); 1264 process_sg_buf = false; 1265 } 1266 } else if (!dp_rx_buffer_pool_refill(soc, nbuf, 1267 rx_desc->pool_id)) { 1268 DP_RX_LIST_APPEND(nbuf_head, nbuf_tail, nbuf); 1269 } 1270 1271 dp_rx_add_to_free_desc_list 1272 (&head[rx_desc->chip_id][rx_desc->pool_id], 1273 &tail[rx_desc->chip_id][rx_desc->pool_id], rx_desc); 1274 1275 /* 1276 * if continuation bit is set then we have MSDU spread 1277 * across multiple buffers, let us not decrement quota 1278 * till we reap all buffers of that MSDU. 1279 */ 1280 if (qdf_likely(!msdu_continuation)) 1281 quota -= 1; 1282 } 1283 done: 1284 dp_srng_access_end(int_ctx, soc, hal_ring_hdl); 1285 1286 for (chip_id = 0; chip_id < WLAN_MAX_MLO_CHIPS; chip_id++) { 1287 for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) { 1288 /* 1289 * continue with next mac_id if no pkts were reaped 1290 * from that pool 1291 */ 1292 if (!rx_bufs_reaped[chip_id][mac_id]) 1293 continue; 1294 1295 replenish_soc = 1296 soc->arch_ops.dp_rx_replenish_soc_get(soc, chip_id); 1297 1298 dp_rxdma_srng = 1299 &replenish_soc->rx_refill_buf_ring[mac_id]; 1300 1301 rx_desc_pool = &replenish_soc->rx_desc_buf[mac_id]; 1302 1303 dp_rx_buffers_replenish(replenish_soc, mac_id, 1304 dp_rxdma_srng, 1305 rx_desc_pool, 1306 rx_bufs_reaped[chip_id][mac_id], 1307 &head[chip_id][mac_id], 1308 &tail[chip_id][mac_id], false); 1309 *rx_bufs_used += rx_bufs_reaped[chip_id][mac_id]; 1310 } 1311 } 1312 return nbuf_head; 1313 } 1314 1315 QDF_STATUS 1316 dp_rx_null_q_desc_handle_li(struct dp_soc *soc, qdf_nbuf_t nbuf, 1317 uint8_t *rx_tlv_hdr, uint8_t pool_id, 1318 struct dp_txrx_peer *txrx_peer, 1319 bool is_reo_exception) 1320 { 1321 uint32_t pkt_len; 1322 uint16_t msdu_len; 1323 struct dp_vdev *vdev; 1324 uint8_t tid; 1325 qdf_ether_header_t *eh; 1326 struct hal_rx_msdu_metadata msdu_metadata; 1327 uint16_t sa_idx = 0; 1328 bool is_eapol = 0; 1329 bool enh_flag; 1330 1331 qdf_nbuf_set_rx_chfrag_start( 1332 nbuf, 1333 hal_rx_msdu_end_first_msdu_get(soc->hal_soc, 1334 rx_tlv_hdr)); 1335 qdf_nbuf_set_rx_chfrag_end(nbuf, 1336 hal_rx_msdu_end_last_msdu_get(soc->hal_soc, 1337 rx_tlv_hdr)); 1338 qdf_nbuf_set_da_mcbc(nbuf, hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc, 1339 rx_tlv_hdr)); 1340 qdf_nbuf_set_da_valid(nbuf, 1341 hal_rx_msdu_end_da_is_valid_get(soc->hal_soc, 1342 rx_tlv_hdr)); 1343 qdf_nbuf_set_sa_valid(nbuf, 1344 hal_rx_msdu_end_sa_is_valid_get(soc->hal_soc, 1345 rx_tlv_hdr)); 1346 1347 hal_rx_msdu_metadata_get(soc->hal_soc, rx_tlv_hdr, &msdu_metadata); 1348 msdu_len = hal_rx_msdu_start_msdu_len_get(soc->hal_soc, rx_tlv_hdr); 1349 pkt_len = msdu_len + msdu_metadata.l3_hdr_pad + soc->rx_pkt_tlv_size; 1350 1351 if (qdf_likely(!qdf_nbuf_is_frag(nbuf))) { 1352 if (dp_rx_check_pkt_len(soc, pkt_len)) 1353 goto drop_nbuf; 1354 1355 /* Set length in nbuf */ 1356 qdf_nbuf_set_pktlen( 1357 nbuf, qdf_min(pkt_len, (uint32_t)RX_DATA_BUFFER_SIZE)); 1358 qdf_assert_always(nbuf->data == rx_tlv_hdr); 1359 } 1360 1361 /* 1362 * Check if DMA completed -- msdu_done is the last bit 1363 * to be written 1364 */ 1365 if (!hal_rx_attn_msdu_done_get(soc->hal_soc, rx_tlv_hdr)) { 1366 dp_err_rl("MSDU DONE failure"); 1367 hal_rx_dump_pkt_tlvs(soc->hal_soc, rx_tlv_hdr, 1368 QDF_TRACE_LEVEL_INFO); 1369 qdf_assert(0); 1370 } 1371 1372 if (!txrx_peer && 1373 dp_rx_null_q_handle_invalid_peer_id_exception(soc, pool_id, 1374 rx_tlv_hdr, nbuf)) 1375 return QDF_STATUS_E_FAILURE; 1376 1377 if (!txrx_peer) { 1378 bool mpdu_done = false; 1379 struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, pool_id); 1380 1381 if (!pdev) { 1382 dp_err_rl("pdev is null for pool_id = %d", pool_id); 1383 return QDF_STATUS_E_FAILURE; 1384 } 1385 1386 dp_err_rl("txrx_peer is NULL"); 1387 DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1, 1388 qdf_nbuf_len(nbuf)); 1389 1390 /* QCN9000 has the support enabled */ 1391 if (qdf_unlikely(soc->wbm_release_desc_rx_sg_support)) { 1392 mpdu_done = true; 1393 nbuf->next = NULL; 1394 /* Trigger invalid peer handler wrapper */ 1395 dp_rx_process_invalid_peer_wrapper(soc, 1396 nbuf, 1397 mpdu_done, 1398 pool_id); 1399 } else { 1400 mpdu_done = soc->arch_ops.dp_rx_chain_msdus(soc, nbuf, 1401 rx_tlv_hdr, 1402 pool_id); 1403 /* Trigger invalid peer handler wrapper */ 1404 dp_rx_process_invalid_peer_wrapper( 1405 soc, 1406 pdev->invalid_peer_head_msdu, 1407 mpdu_done, pool_id); 1408 } 1409 1410 if (mpdu_done) { 1411 pdev->invalid_peer_head_msdu = NULL; 1412 pdev->invalid_peer_tail_msdu = NULL; 1413 } 1414 1415 return QDF_STATUS_E_FAILURE; 1416 } 1417 1418 vdev = txrx_peer->vdev; 1419 if (!vdev) { 1420 dp_err_rl("Null vdev!"); 1421 DP_STATS_INC(soc, rx.err.invalid_vdev, 1); 1422 goto drop_nbuf; 1423 } 1424 1425 /* 1426 * Advance the packet start pointer by total size of 1427 * pre-header TLV's 1428 */ 1429 if (qdf_nbuf_is_frag(nbuf)) 1430 qdf_nbuf_pull_head(nbuf, soc->rx_pkt_tlv_size); 1431 else 1432 qdf_nbuf_pull_head(nbuf, (msdu_metadata.l3_hdr_pad + 1433 soc->rx_pkt_tlv_size)); 1434 1435 DP_STATS_INC_PKT(vdev, rx_i.null_q_desc_pkt, 1, qdf_nbuf_len(nbuf)); 1436 1437 dp_vdev_peer_stats_update_protocol_cnt(vdev, nbuf, NULL, 0, 1); 1438 1439 if (dp_rx_err_drop_3addr_mcast(vdev, rx_tlv_hdr)) { 1440 DP_PEER_PER_PKT_STATS_INC(txrx_peer, rx.mcast_3addr_drop, 1); 1441 goto drop_nbuf; 1442 } 1443 1444 if (hal_rx_msdu_end_sa_is_valid_get(soc->hal_soc, rx_tlv_hdr)) { 1445 sa_idx = hal_rx_msdu_end_sa_idx_get(soc->hal_soc, rx_tlv_hdr); 1446 1447 if ((sa_idx < 0) || 1448 (sa_idx >= wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx))) { 1449 DP_STATS_INC(soc, rx.err.invalid_sa_da_idx, 1); 1450 goto drop_nbuf; 1451 } 1452 } 1453 1454 if ((!soc->mec_fw_offload) && 1455 dp_rx_mcast_echo_check(soc, txrx_peer, rx_tlv_hdr, nbuf)) { 1456 /* this is a looped back MCBC pkt, drop it */ 1457 DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, rx.mec_drop, 1, 1458 qdf_nbuf_len(nbuf)); 1459 goto drop_nbuf; 1460 } 1461 1462 /* 1463 * In qwrap mode if the received packet matches with any of the vdev 1464 * mac addresses, drop it. Donot receive multicast packets originated 1465 * from any proxysta. 1466 */ 1467 if (check_qwrap_multicast_loopback(vdev, nbuf)) { 1468 DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, rx.mec_drop, 1, 1469 qdf_nbuf_len(nbuf)); 1470 goto drop_nbuf; 1471 } 1472 1473 if (qdf_unlikely(txrx_peer->nawds_enabled && 1474 hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc, 1475 rx_tlv_hdr))) { 1476 dp_err_rl("free buffer for multicast packet"); 1477 DP_PEER_PER_PKT_STATS_INC(txrx_peer, rx.nawds_mcast_drop, 1); 1478 goto drop_nbuf; 1479 } 1480 1481 if (!dp_wds_rx_policy_check(rx_tlv_hdr, vdev, txrx_peer)) { 1482 dp_err_rl("mcast Policy Check Drop pkt"); 1483 DP_PEER_PER_PKT_STATS_INC(txrx_peer, rx.policy_check_drop, 1); 1484 goto drop_nbuf; 1485 } 1486 /* WDS Source Port Learning */ 1487 if (!soc->ast_offload_support && 1488 qdf_likely(vdev->rx_decap_type == htt_cmn_pkt_type_ethernet && 1489 vdev->wds_enabled)) 1490 dp_rx_wds_srcport_learn(soc, rx_tlv_hdr, txrx_peer, nbuf, 1491 msdu_metadata); 1492 1493 if (hal_rx_is_unicast(soc->hal_soc, rx_tlv_hdr)) { 1494 struct dp_peer *peer; 1495 struct dp_rx_tid *rx_tid; 1496 1497 tid = hal_rx_tid_get(soc->hal_soc, rx_tlv_hdr); 1498 peer = dp_peer_get_ref_by_id(soc, txrx_peer->peer_id, 1499 DP_MOD_ID_RX_ERR); 1500 if (peer) { 1501 rx_tid = &peer->rx_tid[tid]; 1502 qdf_spin_lock_bh(&rx_tid->tid_lock); 1503 if (!peer->rx_tid[tid].hw_qdesc_vaddr_unaligned) 1504 dp_rx_tid_setup_wifi3(peer, tid, 1, 1505 IEEE80211_SEQ_MAX); 1506 qdf_spin_unlock_bh(&rx_tid->tid_lock); 1507 /* IEEE80211_SEQ_MAX indicates invalid start_seq */ 1508 dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR); 1509 } 1510 } 1511 1512 eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf); 1513 1514 if (!txrx_peer->authorize) { 1515 is_eapol = qdf_nbuf_is_ipv4_eapol_pkt(nbuf) || 1516 qdf_nbuf_is_ipv4_wapi_pkt(nbuf); 1517 1518 if (is_eapol) { 1519 if (!dp_rx_err_match_dhost(eh, vdev)) 1520 goto drop_nbuf; 1521 } else { 1522 goto drop_nbuf; 1523 } 1524 } 1525 1526 /* 1527 * Drop packets in this path if cce_match is found. Packets will come 1528 * in following path depending on whether tidQ is setup. 1529 * 1. If tidQ is setup: WIFILI_HAL_RX_WBM_REO_PSH_RSN_ROUTE and 1530 * cce_match = 1 1531 * Packets with WIFILI_HAL_RX_WBM_REO_PSH_RSN_ROUTE are already 1532 * dropped. 1533 * 2. If tidQ is not setup: WIFILI_HAL_RX_WBM_REO_PSH_RSN_ERROR and 1534 * cce_match = 1 1535 * These packets need to be dropped and should not get delivered 1536 * to stack. 1537 */ 1538 if (qdf_unlikely(dp_rx_err_cce_drop(soc, vdev, nbuf, rx_tlv_hdr))) 1539 goto drop_nbuf; 1540 1541 if (qdf_unlikely(vdev->rx_decap_type == htt_cmn_pkt_type_raw)) { 1542 qdf_nbuf_set_next(nbuf, NULL); 1543 dp_rx_deliver_raw(vdev, nbuf, txrx_peer); 1544 } else { 1545 enh_flag = vdev->pdev->enhanced_stats_en; 1546 qdf_nbuf_set_next(nbuf, NULL); 1547 DP_PEER_TO_STACK_INCC_PKT(txrx_peer, 1, qdf_nbuf_len(nbuf), 1548 enh_flag); 1549 /* 1550 * Update the protocol tag in SKB based on 1551 * CCE metadata 1552 */ 1553 dp_rx_update_protocol_tag(soc, vdev, nbuf, rx_tlv_hdr, 1554 EXCEPTION_DEST_RING_ID, 1555 true, true); 1556 1557 /* Update the flow tag in SKB based on FSE metadata */ 1558 dp_rx_update_flow_tag(soc, vdev, nbuf, 1559 rx_tlv_hdr, true); 1560 1561 if (qdf_unlikely(hal_rx_msdu_end_da_is_mcbc_get( 1562 soc->hal_soc, rx_tlv_hdr) && 1563 (vdev->rx_decap_type == 1564 htt_cmn_pkt_type_ethernet))) { 1565 DP_PEER_MC_INCC_PKT(txrx_peer, 1, qdf_nbuf_len(nbuf), 1566 enh_flag); 1567 1568 if (QDF_IS_ADDR_BROADCAST(eh->ether_dhost)) 1569 DP_PEER_BC_INCC_PKT(txrx_peer, 1, 1570 qdf_nbuf_len(nbuf), 1571 enh_flag); 1572 } 1573 1574 qdf_nbuf_set_exc_frame(nbuf, 1); 1575 dp_rx_deliver_to_osif_stack(soc, vdev, txrx_peer, nbuf, NULL, 1576 is_eapol); 1577 } 1578 return QDF_STATUS_SUCCESS; 1579 1580 drop_nbuf: 1581 dp_rx_nbuf_free(nbuf); 1582 return QDF_STATUS_E_FAILURE; 1583 } 1584