1 /* 2 * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved. 3 * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved. 4 * 5 * Permission to use, copy, modify, and/or distribute this software for 6 * any purpose with or without fee is hereby granted, provided that the 7 * above copyright notice and this permission notice appear in all 8 * copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 11 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 12 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 13 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 14 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 15 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 16 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 17 * PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20 #include "cdp_txrx_cmn_struct.h" 21 #include "hal_hw_headers.h" 22 #include "dp_types.h" 23 #include "dp_rx.h" 24 #include "dp_tx.h" 25 #include "dp_li_rx.h" 26 #include "dp_peer.h" 27 #include "hal_rx.h" 28 #include "hal_li_rx.h" 29 #include "hal_api.h" 30 #include "hal_li_api.h" 31 #include "qdf_nbuf.h" 32 #ifdef MESH_MODE_SUPPORT 33 #include "if_meta_hdr.h" 34 #endif 35 #include "dp_internal.h" 36 #include "dp_ipa.h" 37 #ifdef WIFI_MONITOR_SUPPORT 38 #include <dp_mon.h> 39 #endif 40 #ifdef FEATURE_WDS 41 #include "dp_txrx_wds.h" 42 #endif 43 #include "dp_hist.h" 44 #include "dp_rx_buffer_pool.h" 45 #include "dp_li.h" 46 47 static inline 48 bool is_sa_da_idx_valid(uint32_t max_ast, 49 qdf_nbuf_t nbuf, struct hal_rx_msdu_metadata msdu_info) 50 { 51 if ((qdf_nbuf_is_sa_valid(nbuf) && (msdu_info.sa_idx > max_ast)) || 52 (!qdf_nbuf_is_da_mcbc(nbuf) && qdf_nbuf_is_da_valid(nbuf) && 53 (msdu_info.da_idx > max_ast))) 54 return false; 55 56 return true; 57 } 58 59 #ifndef QCA_HOST_MODE_WIFI_DISABLED 60 #if defined(FEATURE_MCL_REPEATER) && defined(FEATURE_MEC) 61 /** 62 * dp_rx_mec_check_wrapper() - wrapper to dp_rx_mcast_echo_check 63 * @soc: core DP main context 64 * @txrx_peer: dp peer handler 65 * @rx_tlv_hdr: start of the rx TLV header 66 * @nbuf: pkt buffer 67 * 68 * Return: bool (true if it is a looped back pkt else false) 69 */ 70 static inline bool dp_rx_mec_check_wrapper(struct dp_soc *soc, 71 struct dp_txrx_peer *txrx_peer, 72 uint8_t *rx_tlv_hdr, 73 qdf_nbuf_t nbuf) 74 { 75 return dp_rx_mcast_echo_check(soc, txrx_peer, rx_tlv_hdr, nbuf); 76 } 77 #else 78 static inline bool dp_rx_mec_check_wrapper(struct dp_soc *soc, 79 struct dp_txrx_peer *txrx_peer, 80 uint8_t *rx_tlv_hdr, 81 qdf_nbuf_t nbuf) 82 { 83 return false; 84 } 85 #endif 86 #endif 87 88 #ifndef QCA_HOST_MODE_WIFI_DISABLE 89 static bool 90 dp_rx_intrabss_ucast_check_li(struct dp_soc *soc, qdf_nbuf_t nbuf, 91 struct dp_txrx_peer *ta_txrx_peer, 92 struct hal_rx_msdu_metadata *msdu_metadata, 93 uint8_t *p_tx_vdev_id) 94 { 95 uint16_t da_peer_id; 96 struct dp_txrx_peer *da_peer; 97 struct dp_ast_entry *ast_entry; 98 dp_txrx_ref_handle txrx_ref_handle = NULL; 99 100 if (!qdf_nbuf_is_da_valid(nbuf) || qdf_nbuf_is_da_mcbc(nbuf)) 101 return false; 102 103 ast_entry = soc->ast_table[msdu_metadata->da_idx]; 104 if (!ast_entry) 105 return false; 106 107 if (ast_entry->type == CDP_TXRX_AST_TYPE_DA) { 108 ast_entry->is_active = TRUE; 109 return false; 110 } 111 112 da_peer_id = ast_entry->peer_id; 113 /* TA peer cannot be same as peer(DA) on which AST is present 114 * this indicates a change in topology and that AST entries 115 * are yet to be updated. 116 */ 117 if (da_peer_id == ta_txrx_peer->peer_id || 118 da_peer_id == HTT_INVALID_PEER) 119 return false; 120 121 da_peer = dp_txrx_peer_get_ref_by_id(soc, da_peer_id, 122 &txrx_ref_handle, DP_MOD_ID_RX); 123 if (!da_peer) 124 return false; 125 126 *p_tx_vdev_id = da_peer->vdev->vdev_id; 127 /* If the source or destination peer in the isolation 128 * list then dont forward instead push to bridge stack. 129 */ 130 if (dp_get_peer_isolation(ta_txrx_peer) || 131 dp_get_peer_isolation(da_peer) || 132 da_peer->vdev->vdev_id != ta_txrx_peer->vdev->vdev_id) { 133 dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_RX); 134 return false; 135 } 136 137 if (da_peer->bss_peer) { 138 dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_RX); 139 return false; 140 } 141 142 dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_RX); 143 return true; 144 } 145 146 /* 147 * dp_rx_intrabss_fwd_li() - Implements the Intra-BSS forwarding logic 148 * 149 * @soc: core txrx main context 150 * @ta_txrx_peer : source peer entry 151 * @rx_tlv_hdr : start address of rx tlvs 152 * @nbuf : nbuf that has to be intrabss forwarded 153 * 154 * Return: bool: true if it is forwarded else false 155 */ 156 static bool 157 dp_rx_intrabss_fwd_li(struct dp_soc *soc, 158 struct dp_txrx_peer *ta_txrx_peer, 159 uint8_t *rx_tlv_hdr, 160 qdf_nbuf_t nbuf, 161 struct hal_rx_msdu_metadata msdu_metadata, 162 struct cdp_tid_rx_stats *tid_stats) 163 { 164 uint8_t tx_vdev_id; 165 166 /* if it is a broadcast pkt (eg: ARP) and it is not its own 167 * source, then clone the pkt and send the cloned pkt for 168 * intra BSS forwarding and original pkt up the network stack 169 * Note: how do we handle multicast pkts. do we forward 170 * all multicast pkts as is or let a higher layer module 171 * like igmpsnoop decide whether to forward or not with 172 * Mcast enhancement. 173 */ 174 if (qdf_nbuf_is_da_mcbc(nbuf) && !ta_txrx_peer->bss_peer) 175 return dp_rx_intrabss_mcbc_fwd(soc, ta_txrx_peer, rx_tlv_hdr, 176 nbuf, tid_stats, 0); 177 178 if (dp_rx_intrabss_eapol_drop_check(soc, ta_txrx_peer, rx_tlv_hdr, 179 nbuf)) 180 return true; 181 182 if (dp_rx_intrabss_ucast_check_li(soc, nbuf, ta_txrx_peer, 183 &msdu_metadata, &tx_vdev_id)) 184 return dp_rx_intrabss_ucast_fwd(soc, ta_txrx_peer, tx_vdev_id, 185 rx_tlv_hdr, nbuf, tid_stats, 186 0); 187 188 return false; 189 } 190 #endif 191 192 uint32_t dp_rx_process_li(struct dp_intr *int_ctx, 193 hal_ring_handle_t hal_ring_hdl, uint8_t reo_ring_num, 194 uint32_t quota) 195 { 196 hal_ring_desc_t ring_desc; 197 hal_ring_desc_t last_prefetched_hw_desc; 198 hal_soc_handle_t hal_soc; 199 struct dp_rx_desc *rx_desc = NULL; 200 struct dp_rx_desc *last_prefetched_sw_desc = NULL; 201 qdf_nbuf_t nbuf, next; 202 bool near_full; 203 union dp_rx_desc_list_elem_t *head[MAX_PDEV_CNT]; 204 union dp_rx_desc_list_elem_t *tail[MAX_PDEV_CNT]; 205 uint32_t num_pending = 0; 206 uint32_t rx_bufs_used = 0, rx_buf_cookie; 207 uint16_t msdu_len = 0; 208 uint16_t peer_id; 209 uint8_t vdev_id; 210 struct dp_txrx_peer *txrx_peer; 211 dp_txrx_ref_handle txrx_ref_handle = NULL; 212 struct dp_vdev *vdev; 213 uint32_t pkt_len = 0; 214 struct hal_rx_mpdu_desc_info mpdu_desc_info; 215 struct hal_rx_msdu_desc_info msdu_desc_info; 216 enum hal_reo_error_status error; 217 uint32_t peer_mdata; 218 uint8_t *rx_tlv_hdr; 219 uint32_t rx_bufs_reaped[MAX_PDEV_CNT]; 220 uint8_t mac_id = 0; 221 struct dp_pdev *rx_pdev; 222 struct dp_srng *dp_rxdma_srng; 223 struct rx_desc_pool *rx_desc_pool; 224 struct dp_soc *soc = int_ctx->soc; 225 struct cdp_tid_rx_stats *tid_stats; 226 qdf_nbuf_t nbuf_head; 227 qdf_nbuf_t nbuf_tail; 228 qdf_nbuf_t deliver_list_head; 229 qdf_nbuf_t deliver_list_tail; 230 uint32_t num_rx_bufs_reaped = 0; 231 uint32_t intr_id; 232 struct hif_opaque_softc *scn; 233 int32_t tid = 0; 234 bool is_prev_msdu_last = true; 235 uint32_t rx_ol_pkt_cnt = 0; 236 uint32_t num_entries = 0; 237 struct hal_rx_msdu_metadata msdu_metadata; 238 QDF_STATUS status; 239 qdf_nbuf_t ebuf_head; 240 qdf_nbuf_t ebuf_tail; 241 uint8_t pkt_capture_offload = 0; 242 int max_reap_limit; 243 uint32_t old_tid; 244 uint32_t peer_ext_stats; 245 uint32_t dsf; 246 uint32_t max_ast; 247 uint64_t current_time = 0; 248 uint16_t buf_size; 249 250 DP_HIST_INIT(); 251 252 qdf_assert_always(soc && hal_ring_hdl); 253 hal_soc = soc->hal_soc; 254 qdf_assert_always(hal_soc); 255 256 buf_size = wlan_cfg_rx_buffer_size(soc->wlan_cfg_ctx); 257 258 scn = soc->hif_handle; 259 intr_id = int_ctx->dp_intr_id; 260 num_entries = hal_srng_get_num_entries(hal_soc, hal_ring_hdl); 261 dp_runtime_pm_mark_last_busy(soc); 262 263 more_data: 264 /* reset local variables here to be re-used in the function */ 265 nbuf_head = NULL; 266 nbuf_tail = NULL; 267 deliver_list_head = NULL; 268 deliver_list_tail = NULL; 269 txrx_peer = NULL; 270 vdev = NULL; 271 num_rx_bufs_reaped = 0; 272 ebuf_head = NULL; 273 ebuf_tail = NULL; 274 max_reap_limit = dp_rx_get_loop_pkt_limit(soc); 275 276 qdf_mem_zero(rx_bufs_reaped, sizeof(rx_bufs_reaped)); 277 qdf_mem_zero(&mpdu_desc_info, sizeof(mpdu_desc_info)); 278 qdf_mem_zero(&msdu_desc_info, sizeof(msdu_desc_info)); 279 qdf_mem_zero(head, sizeof(head)); 280 qdf_mem_zero(tail, sizeof(tail)); 281 old_tid = 0xff; 282 dsf = 0; 283 peer_ext_stats = 0; 284 max_ast = 0; 285 rx_pdev = NULL; 286 tid_stats = NULL; 287 288 dp_pkt_get_timestamp(¤t_time); 289 290 if (qdf_unlikely(dp_rx_srng_access_start(int_ctx, soc, hal_ring_hdl))) { 291 /* 292 * Need API to convert from hal_ring pointer to 293 * Ring Type / Ring Id combo 294 */ 295 DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1); 296 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 297 FL("HAL RING Access Failed -- %pK"), hal_ring_hdl); 298 goto done; 299 } 300 301 if (!num_pending) 302 num_pending = hal_srng_dst_num_valid(hal_soc, hal_ring_hdl, 0); 303 304 dp_srng_dst_inv_cached_descs(soc, hal_ring_hdl, num_pending); 305 306 if (num_pending > quota) 307 num_pending = quota; 308 309 last_prefetched_hw_desc = dp_srng_dst_prefetch(hal_soc, hal_ring_hdl, 310 num_pending); 311 312 peer_ext_stats = wlan_cfg_is_peer_ext_stats_enabled(soc->wlan_cfg_ctx); 313 max_ast = wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx); 314 /* 315 * start reaping the buffers from reo ring and queue 316 * them in per vdev queue. 317 * Process the received pkts in a different per vdev loop. 318 */ 319 while (qdf_likely(num_pending)) { 320 ring_desc = dp_srng_dst_get_next(soc, hal_ring_hdl); 321 322 if (qdf_unlikely(!ring_desc)) 323 break; 324 325 error = HAL_RX_ERROR_STATUS_GET(ring_desc); 326 if (qdf_unlikely(error == HAL_REO_ERROR_DETECTED)) { 327 dp_rx_err("%pK: HAL RING 0x%pK:error %d", 328 soc, hal_ring_hdl, error); 329 DP_STATS_INC(soc, rx.err.hal_reo_error[reo_ring_num], 330 1); 331 /* Don't know how to deal with this -- assert */ 332 qdf_assert(0); 333 } 334 335 dp_rx_ring_record_entry(soc, reo_ring_num, ring_desc); 336 rx_buf_cookie = HAL_RX_REO_BUF_COOKIE_GET(ring_desc); 337 status = dp_rx_cookie_check_and_invalidate(ring_desc); 338 if (qdf_unlikely(QDF_IS_STATUS_ERROR(status))) { 339 DP_STATS_INC(soc, rx.err.stale_cookie, 1); 340 break; 341 } 342 343 rx_desc = dp_rx_cookie_2_va_rxdma_buf(soc, rx_buf_cookie); 344 status = dp_rx_desc_sanity(soc, hal_soc, hal_ring_hdl, 345 ring_desc, rx_desc); 346 if (QDF_IS_STATUS_ERROR(status)) { 347 if (qdf_unlikely(rx_desc && rx_desc->nbuf)) { 348 qdf_assert_always(!rx_desc->unmapped); 349 dp_rx_nbuf_unmap(soc, rx_desc, reo_ring_num); 350 dp_rx_buffer_pool_nbuf_free(soc, rx_desc->nbuf, 351 rx_desc->pool_id); 352 dp_rx_add_to_free_desc_list( 353 &head[rx_desc->pool_id], 354 &tail[rx_desc->pool_id], 355 rx_desc); 356 } 357 continue; 358 } 359 360 /* 361 * this is a unlikely scenario where the host is reaping 362 * a descriptor which it already reaped just a while ago 363 * but is yet to replenish it back to HW. 364 * In this case host will dump the last 128 descriptors 365 * including the software descriptor rx_desc and assert. 366 */ 367 368 if (qdf_unlikely(!rx_desc->in_use)) { 369 DP_STATS_INC(soc, rx.err.hal_reo_dest_dup, 1); 370 dp_info_rl("Reaping rx_desc not in use!"); 371 dp_rx_dump_info_and_assert(soc, hal_ring_hdl, 372 ring_desc, rx_desc); 373 /* ignore duplicate RX desc and continue to process */ 374 /* Pop out the descriptor */ 375 continue; 376 } 377 378 status = dp_rx_desc_nbuf_sanity_check(soc, ring_desc, rx_desc); 379 if (qdf_unlikely(QDF_IS_STATUS_ERROR(status))) { 380 DP_STATS_INC(soc, rx.err.nbuf_sanity_fail, 1); 381 dp_info_rl("Nbuf sanity check failure!"); 382 dp_rx_dump_info_and_assert(soc, hal_ring_hdl, 383 ring_desc, rx_desc); 384 rx_desc->in_err_state = 1; 385 continue; 386 } 387 388 if (qdf_unlikely(!dp_rx_desc_check_magic(rx_desc))) { 389 dp_err("Invalid rx_desc cookie=%d", rx_buf_cookie); 390 DP_STATS_INC(soc, rx.err.rx_desc_invalid_magic, 1); 391 dp_rx_dump_info_and_assert(soc, hal_ring_hdl, 392 ring_desc, rx_desc); 393 } 394 395 /* Get MPDU DESC info */ 396 hal_rx_mpdu_desc_info_get_li(ring_desc, &mpdu_desc_info); 397 398 /* Get MSDU DESC info */ 399 hal_rx_msdu_desc_info_get_li(ring_desc, &msdu_desc_info); 400 401 if (qdf_unlikely(msdu_desc_info.msdu_flags & 402 HAL_MSDU_F_MSDU_CONTINUATION)) { 403 /* previous msdu has end bit set, so current one is 404 * the new MPDU 405 */ 406 if (is_prev_msdu_last) { 407 /* For new MPDU check if we can read complete 408 * MPDU by comparing the number of buffers 409 * available and number of buffers needed to 410 * reap this MPDU 411 */ 412 if ((msdu_desc_info.msdu_len / 413 (buf_size - 414 soc->rx_pkt_tlv_size) + 1) > 415 num_pending) { 416 DP_STATS_INC(soc, 417 rx.msdu_scatter_wait_break, 418 1); 419 dp_rx_cookie_reset_invalid_bit( 420 ring_desc); 421 /* As we are going to break out of the 422 * loop because of unavailability of 423 * descs to form complete SG, we need to 424 * reset the TP in the REO destination 425 * ring. 426 */ 427 hal_srng_dst_dec_tp(hal_soc, 428 hal_ring_hdl); 429 break; 430 } 431 is_prev_msdu_last = false; 432 } 433 } 434 435 if (mpdu_desc_info.mpdu_flags & HAL_MPDU_F_RETRY_BIT) 436 qdf_nbuf_set_rx_retry_flag(rx_desc->nbuf, 1); 437 438 if (qdf_unlikely(mpdu_desc_info.mpdu_flags & 439 HAL_MPDU_F_RAW_AMPDU)) 440 qdf_nbuf_set_raw_frame(rx_desc->nbuf, 1); 441 442 if (!is_prev_msdu_last && 443 !(msdu_desc_info.msdu_flags & HAL_MSDU_F_MSDU_CONTINUATION)) 444 is_prev_msdu_last = true; 445 446 rx_bufs_reaped[rx_desc->pool_id]++; 447 peer_mdata = mpdu_desc_info.peer_meta_data; 448 QDF_NBUF_CB_RX_PEER_ID(rx_desc->nbuf) = 449 dp_rx_peer_metadata_peer_id_get_li(soc, peer_mdata); 450 QDF_NBUF_CB_RX_VDEV_ID(rx_desc->nbuf) = 451 DP_PEER_METADATA_VDEV_ID_GET_LI(peer_mdata); 452 453 /* to indicate whether this msdu is rx offload */ 454 pkt_capture_offload = 455 DP_PEER_METADATA_OFFLOAD_GET_LI(peer_mdata); 456 457 /* 458 * save msdu flags first, last and continuation msdu in 459 * nbuf->cb, also save mcbc, is_da_valid, is_sa_valid and 460 * length to nbuf->cb. This ensures the info required for 461 * per pkt processing is always in the same cache line. 462 * This helps in improving throughput for smaller pkt 463 * sizes. 464 */ 465 if (msdu_desc_info.msdu_flags & HAL_MSDU_F_FIRST_MSDU_IN_MPDU) 466 qdf_nbuf_set_rx_chfrag_start(rx_desc->nbuf, 1); 467 468 if (msdu_desc_info.msdu_flags & HAL_MSDU_F_MSDU_CONTINUATION) 469 qdf_nbuf_set_rx_chfrag_cont(rx_desc->nbuf, 1); 470 471 if (msdu_desc_info.msdu_flags & HAL_MSDU_F_LAST_MSDU_IN_MPDU) 472 qdf_nbuf_set_rx_chfrag_end(rx_desc->nbuf, 1); 473 474 if (msdu_desc_info.msdu_flags & HAL_MSDU_F_DA_IS_MCBC) 475 qdf_nbuf_set_da_mcbc(rx_desc->nbuf, 1); 476 477 if (msdu_desc_info.msdu_flags & HAL_MSDU_F_DA_IS_VALID) 478 qdf_nbuf_set_da_valid(rx_desc->nbuf, 1); 479 480 if (msdu_desc_info.msdu_flags & HAL_MSDU_F_SA_IS_VALID) 481 qdf_nbuf_set_sa_valid(rx_desc->nbuf, 1); 482 483 qdf_nbuf_set_tid_val(rx_desc->nbuf, 484 HAL_RX_REO_QUEUE_NUMBER_GET(ring_desc)); 485 486 /* set reo dest indication */ 487 qdf_nbuf_set_rx_reo_dest_ind_or_sw_excpt( 488 rx_desc->nbuf, 489 HAL_RX_REO_MSDU_REO_DST_IND_GET(ring_desc)); 490 491 QDF_NBUF_CB_RX_PKT_LEN(rx_desc->nbuf) = msdu_desc_info.msdu_len; 492 493 QDF_NBUF_CB_RX_CTX_ID(rx_desc->nbuf) = reo_ring_num; 494 495 /* 496 * move unmap after scattered msdu waiting break logic 497 * in case double skb unmap happened. 498 */ 499 dp_rx_nbuf_unmap(soc, rx_desc, reo_ring_num); 500 DP_RX_PROCESS_NBUF(soc, nbuf_head, nbuf_tail, ebuf_head, 501 ebuf_tail, rx_desc); 502 503 quota -= 1; 504 num_pending -= 1; 505 506 dp_rx_add_to_free_desc_list(&head[rx_desc->pool_id], 507 &tail[rx_desc->pool_id], rx_desc); 508 num_rx_bufs_reaped++; 509 510 dp_rx_prefetch_hw_sw_nbuf_desc(soc, hal_soc, num_pending, 511 hal_ring_hdl, 512 &last_prefetched_hw_desc, 513 &last_prefetched_sw_desc); 514 515 /* 516 * only if complete msdu is received for scatter case, 517 * then allow break. 518 */ 519 if (is_prev_msdu_last && 520 dp_rx_reap_loop_pkt_limit_hit(soc, num_rx_bufs_reaped, 521 max_reap_limit)) 522 break; 523 } 524 done: 525 dp_rx_srng_access_end(int_ctx, soc, hal_ring_hdl); 526 527 dp_rx_per_core_stats_update(soc, reo_ring_num, num_rx_bufs_reaped); 528 529 for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) { 530 /* 531 * continue with next mac_id if no pkts were reaped 532 * from that pool 533 */ 534 if (!rx_bufs_reaped[mac_id]) 535 continue; 536 537 dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_id]; 538 539 rx_desc_pool = &soc->rx_desc_buf[mac_id]; 540 541 dp_rx_buffers_replenish_simple(soc, mac_id, dp_rxdma_srng, 542 rx_desc_pool, 543 rx_bufs_reaped[mac_id], 544 &head[mac_id], &tail[mac_id]); 545 } 546 547 dp_verbose_debug("replenished %u", rx_bufs_reaped[0]); 548 /* Peer can be NULL is case of LFR */ 549 if (qdf_likely(txrx_peer)) 550 vdev = NULL; 551 552 /* 553 * BIG loop where each nbuf is dequeued from global queue, 554 * processed and queued back on a per vdev basis. These nbufs 555 * are sent to stack as and when we run out of nbufs 556 * or a new nbuf dequeued from global queue has a different 557 * vdev when compared to previous nbuf. 558 */ 559 nbuf = nbuf_head; 560 while (nbuf) { 561 next = nbuf->next; 562 dp_rx_prefetch_nbuf_data(nbuf, next); 563 564 if (qdf_unlikely(dp_rx_is_raw_frame_dropped(nbuf))) { 565 nbuf = next; 566 DP_STATS_INC(soc, rx.err.raw_frm_drop, 1); 567 continue; 568 } 569 570 rx_tlv_hdr = qdf_nbuf_data(nbuf); 571 vdev_id = QDF_NBUF_CB_RX_VDEV_ID(nbuf); 572 peer_id = QDF_NBUF_CB_RX_PEER_ID(nbuf); 573 574 if (dp_rx_is_list_ready(deliver_list_head, vdev, txrx_peer, 575 peer_id, vdev_id)) { 576 dp_rx_deliver_to_stack(soc, vdev, txrx_peer, 577 deliver_list_head, 578 deliver_list_tail); 579 deliver_list_head = NULL; 580 deliver_list_tail = NULL; 581 } 582 583 /* Get TID from struct cb->tid_val, save to tid */ 584 if (qdf_nbuf_is_rx_chfrag_start(nbuf)) 585 tid = qdf_nbuf_get_tid_val(nbuf); 586 587 if (qdf_unlikely(tid >= CDP_MAX_DATA_TIDS)) { 588 DP_STATS_INC(soc, rx.err.rx_invalid_tid_err, 1); 589 dp_rx_nbuf_free(nbuf); 590 nbuf = next; 591 continue; 592 } 593 594 if (qdf_unlikely(!txrx_peer)) { 595 txrx_peer = 596 dp_rx_get_txrx_peer_and_vdev(soc, nbuf, peer_id, 597 &txrx_ref_handle, 598 pkt_capture_offload, 599 &vdev, 600 &rx_pdev, &dsf, 601 &old_tid); 602 if (qdf_unlikely(!txrx_peer) || qdf_unlikely(!vdev)) { 603 nbuf = next; 604 continue; 605 } 606 } else if (txrx_peer && txrx_peer->peer_id != peer_id) { 607 dp_txrx_peer_unref_delete(txrx_ref_handle, 608 DP_MOD_ID_RX); 609 610 txrx_peer = 611 dp_rx_get_txrx_peer_and_vdev(soc, nbuf, peer_id, 612 &txrx_ref_handle, 613 pkt_capture_offload, 614 &vdev, 615 &rx_pdev, &dsf, 616 &old_tid); 617 if (qdf_unlikely(!txrx_peer) || qdf_unlikely(!vdev)) { 618 nbuf = next; 619 continue; 620 } 621 } 622 623 if (txrx_peer) { 624 QDF_NBUF_CB_DP_TRACE_PRINT(nbuf) = false; 625 qdf_dp_trace_set_track(nbuf, QDF_RX); 626 QDF_NBUF_CB_RX_DP_TRACE(nbuf) = 1; 627 QDF_NBUF_CB_RX_PACKET_TRACK(nbuf) = 628 QDF_NBUF_RX_PKT_DATA_TRACK; 629 } 630 631 rx_bufs_used++; 632 633 /* when hlos tid override is enabled, save tid in 634 * skb->priority 635 */ 636 if (qdf_unlikely(vdev->skip_sw_tid_classification & 637 DP_TXRX_HLOS_TID_OVERRIDE_ENABLED)) 638 qdf_nbuf_set_priority(nbuf, tid); 639 640 DP_RX_TID_SAVE(nbuf, tid); 641 if (qdf_unlikely(dsf) || qdf_unlikely(peer_ext_stats) || 642 dp_rx_pkt_tracepoints_enabled()) 643 qdf_nbuf_set_timestamp(nbuf); 644 645 if (qdf_likely(old_tid != tid)) { 646 tid_stats = 647 &rx_pdev->stats.tid_stats.tid_rx_stats[reo_ring_num][tid]; 648 old_tid = tid; 649 } 650 651 /* 652 * Check if DMA completed -- msdu_done is the last bit 653 * to be written 654 */ 655 if (qdf_likely(!qdf_nbuf_is_rx_chfrag_cont(nbuf))) { 656 if (qdf_unlikely(!hal_rx_attn_msdu_done_get_li( 657 rx_tlv_hdr))) { 658 dp_err_rl("MSDU DONE failure"); 659 DP_STATS_INC(soc, rx.err.msdu_done_fail, 1); 660 hal_rx_dump_pkt_tlvs(hal_soc, rx_tlv_hdr, 661 QDF_TRACE_LEVEL_INFO); 662 tid_stats->fail_cnt[MSDU_DONE_FAILURE]++; 663 qdf_assert(0); 664 dp_rx_nbuf_free(nbuf); 665 nbuf = next; 666 continue; 667 } else if (qdf_unlikely(hal_rx_attn_msdu_len_err_get_li( 668 rx_tlv_hdr))) { 669 DP_STATS_INC(soc, rx.err.msdu_len_err, 1); 670 dp_rx_nbuf_free(nbuf); 671 nbuf = next; 672 continue; 673 } 674 } 675 676 DP_HIST_PACKET_COUNT_INC(vdev->pdev->pdev_id); 677 /* 678 * First IF condition: 679 * 802.11 Fragmented pkts are reinjected to REO 680 * HW block as SG pkts and for these pkts we only 681 * need to pull the RX TLVS header length. 682 * Second IF condition: 683 * The below condition happens when an MSDU is spread 684 * across multiple buffers. This can happen in two cases 685 * 1. The nbuf size is smaller then the received msdu. 686 * ex: we have set the nbuf size to 2048 during 687 * nbuf_alloc. but we received an msdu which is 688 * 2304 bytes in size then this msdu is spread 689 * across 2 nbufs. 690 * 691 * 2. AMSDUs when RAW mode is enabled. 692 * ex: 1st MSDU is in 1st nbuf and 2nd MSDU is spread 693 * across 1st nbuf and 2nd nbuf and last MSDU is 694 * spread across 2nd nbuf and 3rd nbuf. 695 * 696 * for these scenarios let us create a skb frag_list and 697 * append these buffers till the last MSDU of the AMSDU 698 * Third condition: 699 * This is the most likely case, we receive 802.3 pkts 700 * decapsulated by HW, here we need to set the pkt length. 701 */ 702 hal_rx_msdu_metadata_get(hal_soc, rx_tlv_hdr, &msdu_metadata); 703 if (qdf_unlikely(qdf_nbuf_is_frag(nbuf))) { 704 bool is_mcbc, is_sa_vld, is_da_vld; 705 706 is_mcbc = hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc, 707 rx_tlv_hdr); 708 is_sa_vld = 709 hal_rx_msdu_end_sa_is_valid_get(soc->hal_soc, 710 rx_tlv_hdr); 711 is_da_vld = 712 hal_rx_msdu_end_da_is_valid_get(soc->hal_soc, 713 rx_tlv_hdr); 714 715 qdf_nbuf_set_da_mcbc(nbuf, is_mcbc); 716 qdf_nbuf_set_da_valid(nbuf, is_da_vld); 717 qdf_nbuf_set_sa_valid(nbuf, is_sa_vld); 718 719 qdf_nbuf_pull_head(nbuf, soc->rx_pkt_tlv_size); 720 } else if (qdf_nbuf_is_rx_chfrag_cont(nbuf)) { 721 msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf); 722 nbuf = dp_rx_sg_create(soc, nbuf); 723 next = nbuf->next; 724 725 if (qdf_nbuf_is_raw_frame(nbuf)) { 726 DP_STATS_INC(vdev->pdev, rx_raw_pkts, 1); 727 DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, 728 rx.raw, 1, 729 msdu_len, 730 0); 731 } else { 732 DP_STATS_INC(soc, rx.err.scatter_msdu, 1); 733 734 if (!dp_rx_is_sg_supported()) { 735 dp_rx_nbuf_free(nbuf); 736 dp_info_rl("sg msdu len %d, dropped", 737 msdu_len); 738 nbuf = next; 739 continue; 740 } 741 } 742 } else { 743 msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf); 744 pkt_len = msdu_len + 745 msdu_metadata.l3_hdr_pad + 746 soc->rx_pkt_tlv_size; 747 748 qdf_nbuf_set_pktlen(nbuf, pkt_len); 749 dp_rx_skip_tlvs(soc, nbuf, msdu_metadata.l3_hdr_pad); 750 } 751 752 dp_rx_send_pktlog(soc, rx_pdev, nbuf, QDF_TX_RX_STATUS_OK); 753 754 /* 755 * process frame for mulitpass phrase processing 756 */ 757 if (qdf_unlikely(vdev->multipass_en)) { 758 if (dp_rx_multipass_process(txrx_peer, nbuf, 759 tid) == false) { 760 DP_PEER_PER_PKT_STATS_INC(txrx_peer, 761 rx.multipass_rx_pkt_drop, 762 1, 0); 763 dp_rx_nbuf_free(nbuf); 764 nbuf = next; 765 continue; 766 } 767 } 768 769 if (!dp_wds_rx_policy_check(rx_tlv_hdr, vdev, txrx_peer)) { 770 dp_rx_err("%pK: Policy Check Drop pkt", soc); 771 DP_PEER_PER_PKT_STATS_INC(txrx_peer, 772 rx.policy_check_drop, 773 1, 0); 774 tid_stats->fail_cnt[POLICY_CHECK_DROP]++; 775 /* Drop & free packet */ 776 dp_rx_nbuf_free(nbuf); 777 /* Statistics */ 778 nbuf = next; 779 continue; 780 } 781 782 if (qdf_unlikely(txrx_peer && (txrx_peer->nawds_enabled) && 783 (qdf_nbuf_is_da_mcbc(nbuf)) && 784 (hal_rx_get_mpdu_mac_ad4_valid(soc->hal_soc, 785 rx_tlv_hdr) == 786 false))) { 787 tid_stats->fail_cnt[NAWDS_MCAST_DROP]++; 788 DP_PEER_PER_PKT_STATS_INC(txrx_peer, 789 rx.nawds_mcast_drop, 790 1, 0); 791 dp_rx_nbuf_free(nbuf); 792 nbuf = next; 793 continue; 794 } 795 796 /* 797 * Drop non-EAPOL frames from unauthorized peer. 798 */ 799 if (qdf_likely(txrx_peer) && 800 qdf_unlikely(!txrx_peer->authorize) && 801 !qdf_nbuf_is_raw_frame(nbuf)) { 802 bool is_eapol = qdf_nbuf_is_ipv4_eapol_pkt(nbuf) || 803 qdf_nbuf_is_ipv4_wapi_pkt(nbuf); 804 805 if (!is_eapol) { 806 DP_PEER_PER_PKT_STATS_INC(txrx_peer, 807 rx.peer_unauth_rx_pkt_drop, 808 1, 0); 809 dp_rx_nbuf_free(nbuf); 810 nbuf = next; 811 continue; 812 } 813 } 814 815 if (soc->process_rx_status) 816 dp_rx_cksum_offload(vdev->pdev, nbuf, rx_tlv_hdr); 817 818 /* Update the protocol tag in SKB based on CCE metadata */ 819 dp_rx_update_protocol_tag(soc, vdev, nbuf, rx_tlv_hdr, 820 reo_ring_num, false, true); 821 822 /* Update the flow tag in SKB based on FSE metadata */ 823 dp_rx_update_flow_tag(soc, vdev, nbuf, rx_tlv_hdr, true); 824 825 dp_rx_msdu_stats_update(soc, nbuf, rx_tlv_hdr, txrx_peer, 826 reo_ring_num, tid_stats, 0); 827 828 if (qdf_unlikely(vdev->mesh_vdev)) { 829 if (dp_rx_filter_mesh_packets(vdev, nbuf, rx_tlv_hdr) 830 == QDF_STATUS_SUCCESS) { 831 dp_rx_info("%pK: mesh pkt filtered", soc); 832 tid_stats->fail_cnt[MESH_FILTER_DROP]++; 833 DP_STATS_INC(vdev->pdev, dropped.mesh_filter, 834 1); 835 836 dp_rx_nbuf_free(nbuf); 837 nbuf = next; 838 continue; 839 } 840 dp_rx_fill_mesh_stats(vdev, nbuf, rx_tlv_hdr, 841 txrx_peer); 842 } 843 844 if (qdf_likely(vdev->rx_decap_type == 845 htt_cmn_pkt_type_ethernet) && 846 qdf_likely(!vdev->mesh_vdev)) { 847 /* Due to HW issue, sometimes we see that the sa_idx 848 * and da_idx are invalid with sa_valid and da_valid 849 * bits set 850 * 851 * in this case we also see that value of 852 * sa_sw_peer_id is set as 0 853 * 854 * Drop the packet if sa_idx and da_idx OOB or 855 * sa_sw_peerid is 0 856 */ 857 if (!is_sa_da_idx_valid(max_ast, nbuf, 858 msdu_metadata)) { 859 dp_rx_nbuf_free(nbuf); 860 nbuf = next; 861 DP_STATS_INC(soc, rx.err.invalid_sa_da_idx, 1); 862 continue; 863 } 864 if (qdf_unlikely(dp_rx_mec_check_wrapper(soc, 865 txrx_peer, 866 rx_tlv_hdr, 867 nbuf))) { 868 /* this is a looped back MCBC pkt,drop it */ 869 DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, 870 rx.mec_drop, 1, 871 QDF_NBUF_CB_RX_PKT_LEN(nbuf), 872 0); 873 dp_rx_nbuf_free(nbuf); 874 nbuf = next; 875 continue; 876 } 877 /* WDS Source Port Learning */ 878 if (qdf_likely(vdev->wds_enabled)) 879 dp_rx_wds_srcport_learn(soc, 880 rx_tlv_hdr, 881 txrx_peer, 882 nbuf, 883 msdu_metadata); 884 885 /* Intrabss-fwd */ 886 if (dp_rx_check_ap_bridge(vdev)) 887 if (dp_rx_intrabss_fwd_li(soc, txrx_peer, 888 rx_tlv_hdr, 889 nbuf, 890 msdu_metadata, 891 tid_stats)) { 892 nbuf = next; 893 tid_stats->intrabss_cnt++; 894 continue; /* Get next desc */ 895 } 896 } 897 898 dp_rx_fill_gro_info(soc, rx_tlv_hdr, nbuf, &rx_ol_pkt_cnt); 899 900 dp_rx_mark_first_packet_after_wow_wakeup(vdev->pdev, rx_tlv_hdr, 901 nbuf); 902 903 dp_rx_update_stats(soc, nbuf); 904 905 dp_pkt_add_timestamp(txrx_peer->vdev, QDF_PKT_RX_DRIVER_ENTRY, 906 current_time, nbuf); 907 908 DP_RX_LIST_APPEND(deliver_list_head, 909 deliver_list_tail, 910 nbuf); 911 DP_PEER_STATS_FLAT_INC_PKT(txrx_peer, to_stack, 1, 912 QDF_NBUF_CB_RX_PKT_LEN(nbuf)); 913 DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, 914 rx.rx_success, 1, 915 QDF_NBUF_CB_RX_PKT_LEN(nbuf), 0); 916 if (qdf_unlikely(txrx_peer->in_twt)) 917 DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, 918 rx.to_stack_twt, 1, 919 QDF_NBUF_CB_RX_PKT_LEN(nbuf), 920 0); 921 922 tid_stats->delivered_to_stack++; 923 nbuf = next; 924 } 925 926 DP_RX_DELIVER_TO_STACK(soc, vdev, txrx_peer, peer_id, 927 pkt_capture_offload, 928 deliver_list_head, 929 deliver_list_tail); 930 931 if (qdf_likely(txrx_peer)) 932 dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_RX); 933 934 if (dp_rx_enable_eol_data_check(soc) && rx_bufs_used) { 935 if (quota) { 936 num_pending = 937 dp_rx_srng_get_num_pending(hal_soc, 938 hal_ring_hdl, 939 num_entries, 940 &near_full); 941 if (num_pending) { 942 DP_STATS_INC(soc, rx.hp_oos2, 1); 943 944 if (!hif_exec_should_yield(scn, intr_id)) 945 goto more_data; 946 947 if (qdf_unlikely(near_full)) { 948 DP_STATS_INC(soc, rx.near_full, 1); 949 goto more_data; 950 } 951 } 952 } 953 954 if (vdev && vdev->osif_fisa_flush) 955 vdev->osif_fisa_flush(soc, reo_ring_num); 956 957 if (vdev && vdev->osif_gro_flush && rx_ol_pkt_cnt) { 958 vdev->osif_gro_flush(vdev->osif_vdev, 959 reo_ring_num); 960 } 961 } 962 963 /* Update histogram statistics by looping through pdev's */ 964 DP_RX_HIST_STATS_PER_PDEV(); 965 966 return rx_bufs_used; /* Assume no scale factor for now */ 967 } 968 969 QDF_STATUS dp_rx_desc_pool_init_li(struct dp_soc *soc, 970 struct rx_desc_pool *rx_desc_pool, 971 uint32_t pool_id) 972 { 973 return dp_rx_desc_pool_init_generic(soc, rx_desc_pool, pool_id); 974 975 } 976 977 void dp_rx_desc_pool_deinit_li(struct dp_soc *soc, 978 struct rx_desc_pool *rx_desc_pool, 979 uint32_t pool_id) 980 { 981 } 982 983 QDF_STATUS dp_wbm_get_rx_desc_from_hal_desc_li( 984 struct dp_soc *soc, 985 void *ring_desc, 986 struct dp_rx_desc **r_rx_desc) 987 { 988 struct hal_buf_info buf_info = {0}; 989 hal_soc_handle_t hal_soc = soc->hal_soc; 990 991 /* only cookie and rbm will be valid in buf_info */ 992 hal_rx_buf_cookie_rbm_get(hal_soc, (uint32_t *)ring_desc, 993 &buf_info); 994 995 if (qdf_unlikely(buf_info.rbm != 996 HAL_RX_BUF_RBM_SW3_BM(soc->wbm_sw0_bm_id))) { 997 /* TODO */ 998 /* Call appropriate handler */ 999 DP_STATS_INC(soc, rx.err.invalid_rbm, 1); 1000 dp_rx_err("%pK: Invalid RBM %d", soc, buf_info.rbm); 1001 return QDF_STATUS_E_INVAL; 1002 } 1003 1004 if (!dp_rx_is_sw_cookie_valid(soc, buf_info.sw_cookie)) { 1005 dp_rx_err("invalid sw_cookie 0x%x", buf_info.sw_cookie); 1006 return QDF_STATUS_E_INVAL; 1007 } 1008 1009 *r_rx_desc = dp_rx_cookie_2_va_rxdma_buf(soc, buf_info.sw_cookie); 1010 1011 return QDF_STATUS_SUCCESS; 1012 } 1013 1014 bool dp_rx_chain_msdus_li(struct dp_soc *soc, qdf_nbuf_t nbuf, 1015 uint8_t *rx_tlv_hdr, uint8_t mac_id) 1016 { 1017 bool mpdu_done = false; 1018 qdf_nbuf_t curr_nbuf = NULL; 1019 qdf_nbuf_t tmp_nbuf = NULL; 1020 1021 /* TODO: Currently only single radio is supported, hence 1022 * pdev hard coded to '0' index 1023 */ 1024 struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id); 1025 1026 if (!dp_pdev) { 1027 dp_rx_debug("%pK: pdev is null for mac_id = %d", soc, mac_id); 1028 return mpdu_done; 1029 } 1030 /* if invalid peer SG list has max values free the buffers in list 1031 * and treat current buffer as start of list 1032 * 1033 * current logic to detect the last buffer from attn_tlv is not reliable 1034 * in OFDMA UL scenario hence add max buffers check to avoid list pile 1035 * up 1036 */ 1037 if (!dp_pdev->first_nbuf || 1038 (dp_pdev->invalid_peer_head_msdu && 1039 QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST 1040 (dp_pdev->invalid_peer_head_msdu) >= DP_MAX_INVALID_BUFFERS)) { 1041 qdf_nbuf_set_rx_chfrag_start(nbuf, 1); 1042 dp_pdev->ppdu_id = hal_rx_get_ppdu_id(soc->hal_soc, 1043 rx_tlv_hdr); 1044 dp_pdev->first_nbuf = true; 1045 1046 /* If the new nbuf received is the first msdu of the 1047 * amsdu and there are msdus in the invalid peer msdu 1048 * list, then let us free all the msdus of the invalid 1049 * peer msdu list. 1050 * This scenario can happen when we start receiving 1051 * new a-msdu even before the previous a-msdu is completely 1052 * received. 1053 */ 1054 curr_nbuf = dp_pdev->invalid_peer_head_msdu; 1055 while (curr_nbuf) { 1056 tmp_nbuf = curr_nbuf->next; 1057 dp_rx_nbuf_free(curr_nbuf); 1058 curr_nbuf = tmp_nbuf; 1059 } 1060 1061 dp_pdev->invalid_peer_head_msdu = NULL; 1062 dp_pdev->invalid_peer_tail_msdu = NULL; 1063 1064 dp_monitor_get_mpdu_status(dp_pdev, soc, rx_tlv_hdr); 1065 } 1066 1067 if (dp_pdev->ppdu_id == hal_rx_attn_phy_ppdu_id_get(soc->hal_soc, 1068 rx_tlv_hdr) && 1069 hal_rx_attn_msdu_done_get(soc->hal_soc, rx_tlv_hdr)) { 1070 qdf_nbuf_set_rx_chfrag_end(nbuf, 1); 1071 qdf_assert_always(dp_pdev->first_nbuf); 1072 dp_pdev->first_nbuf = false; 1073 mpdu_done = true; 1074 } 1075 1076 /* 1077 * For MCL, invalid_peer_head_msdu and invalid_peer_tail_msdu 1078 * should be NULL here, add the checking for debugging purpose 1079 * in case some corner case. 1080 */ 1081 DP_PDEV_INVALID_PEER_MSDU_CHECK(dp_pdev->invalid_peer_head_msdu, 1082 dp_pdev->invalid_peer_tail_msdu); 1083 DP_RX_LIST_APPEND(dp_pdev->invalid_peer_head_msdu, 1084 dp_pdev->invalid_peer_tail_msdu, 1085 nbuf); 1086 1087 return mpdu_done; 1088 } 1089 1090 static struct dp_soc *dp_rx_replensih_soc_get_li(struct dp_soc *soc, 1091 uint8_t chip_id) 1092 { 1093 return soc; 1094 } 1095 1096 qdf_nbuf_t 1097 dp_rx_wbm_err_reap_desc_li(struct dp_intr *int_ctx, struct dp_soc *soc, 1098 hal_ring_handle_t hal_ring_hdl, uint32_t quota, 1099 uint32_t *rx_bufs_used) 1100 { 1101 hal_ring_desc_t ring_desc; 1102 hal_soc_handle_t hal_soc; 1103 struct dp_rx_desc *rx_desc; 1104 union dp_rx_desc_list_elem_t *head[MAX_PDEV_CNT] = { NULL }; 1105 union dp_rx_desc_list_elem_t *tail[MAX_PDEV_CNT] = { NULL }; 1106 uint32_t rx_bufs_reaped[MAX_PDEV_CNT] = { 0 }; 1107 uint8_t buf_type; 1108 uint8_t mac_id; 1109 struct dp_srng *dp_rxdma_srng; 1110 struct rx_desc_pool *rx_desc_pool; 1111 qdf_nbuf_t nbuf_head = NULL; 1112 qdf_nbuf_t nbuf_tail = NULL; 1113 qdf_nbuf_t nbuf; 1114 union hal_wbm_err_info_u wbm_err_info = { 0 }; 1115 uint8_t msdu_continuation = 0; 1116 bool process_sg_buf = false; 1117 uint32_t wbm_err_src; 1118 QDF_STATUS status; 1119 struct dp_soc *replenish_soc; 1120 uint8_t chip_id = 0; 1121 struct hal_rx_mpdu_desc_info mpdu_desc_info = { 0 }; 1122 uint8_t *rx_tlv_hdr; 1123 uint32_t peer_mdata; 1124 1125 qdf_assert(soc && hal_ring_hdl); 1126 hal_soc = soc->hal_soc; 1127 qdf_assert(hal_soc); 1128 1129 if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, hal_ring_hdl))) { 1130 /* TODO */ 1131 /* 1132 * Need API to convert from hal_ring pointer to 1133 * Ring Type / Ring Id combo 1134 */ 1135 dp_rx_err_err("%pK: HAL RING Access Failed -- %pK", 1136 soc, hal_ring_hdl); 1137 goto done; 1138 } 1139 1140 while (qdf_likely(quota)) { 1141 ring_desc = hal_srng_dst_get_next(hal_soc, hal_ring_hdl); 1142 if (qdf_unlikely(!ring_desc)) 1143 break; 1144 1145 /* XXX */ 1146 buf_type = HAL_RX_WBM_BUF_TYPE_GET(ring_desc); 1147 1148 if (dp_assert_always_internal_stat( 1149 buf_type == HAL_RX_WBM_BUF_TYPE_REL_BUF, 1150 soc, rx.err.wbm_err_buf_rel_type)) 1151 continue; 1152 1153 wbm_err_src = hal_rx_wbm_err_src_get(hal_soc, ring_desc); 1154 qdf_assert((wbm_err_src == HAL_RX_WBM_ERR_SRC_RXDMA) || 1155 (wbm_err_src == HAL_RX_WBM_ERR_SRC_REO)); 1156 1157 if (soc->arch_ops.dp_wbm_get_rx_desc_from_hal_desc(soc, 1158 ring_desc, 1159 &rx_desc)) { 1160 dp_rx_err_err("get rx desc from hal_desc failed"); 1161 continue; 1162 } 1163 1164 if (dp_assert_always_internal_stat(rx_desc, soc, 1165 rx.err.rx_desc_null)) 1166 continue; 1167 1168 if (!dp_rx_desc_check_magic(rx_desc)) { 1169 dp_rx_err_err("%pk: Invalid rx_desc %pk", 1170 soc, rx_desc); 1171 continue; 1172 } 1173 1174 /* 1175 * this is a unlikely scenario where the host is reaping 1176 * a descriptor which it already reaped just a while ago 1177 * but is yet to replenish it back to HW. 1178 * In this case host will dump the last 128 descriptors 1179 * including the software descriptor rx_desc and assert. 1180 */ 1181 if (qdf_unlikely(!rx_desc->in_use)) { 1182 DP_STATS_INC(soc, rx.err.hal_wbm_rel_dup, 1); 1183 dp_rx_dump_info_and_assert(soc, hal_ring_hdl, 1184 ring_desc, rx_desc); 1185 continue; 1186 } 1187 1188 hal_rx_wbm_err_info_get(ring_desc, &wbm_err_info.info_bit, 1189 hal_soc); 1190 nbuf = rx_desc->nbuf; 1191 1192 status = dp_rx_wbm_desc_nbuf_sanity_check(soc, hal_ring_hdl, 1193 ring_desc, rx_desc); 1194 if (qdf_unlikely(QDF_IS_STATUS_ERROR(status))) { 1195 DP_STATS_INC(soc, rx.err.nbuf_sanity_fail, 1); 1196 dp_info_rl("Rx error Nbuf %pk sanity check failure!", 1197 nbuf); 1198 rx_desc->in_err_state = 1; 1199 rx_desc->unmapped = 1; 1200 rx_bufs_reaped[rx_desc->pool_id]++; 1201 1202 dp_rx_add_to_free_desc_list( 1203 &head[rx_desc->pool_id], 1204 &tail[rx_desc->pool_id], 1205 rx_desc); 1206 continue; 1207 } 1208 1209 /* Update peer_id in nbuf cb */ 1210 rx_tlv_hdr = qdf_nbuf_data(nbuf); 1211 peer_mdata = hal_rx_tlv_peer_meta_data_get(soc->hal_soc, 1212 rx_tlv_hdr); 1213 QDF_NBUF_CB_RX_PEER_ID(rx_desc->nbuf) = 1214 dp_rx_peer_metadata_peer_id_get(soc, peer_mdata); 1215 1216 /* Get MPDU DESC info */ 1217 hal_rx_mpdu_desc_info_get(hal_soc, ring_desc, &mpdu_desc_info); 1218 1219 if (qdf_likely(mpdu_desc_info.mpdu_flags & 1220 HAL_MPDU_F_QOS_CONTROL_VALID)) 1221 qdf_nbuf_set_tid_val(rx_desc->nbuf, mpdu_desc_info.tid); 1222 1223 rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id]; 1224 dp_ipa_rx_buf_smmu_mapping_lock(soc); 1225 dp_rx_nbuf_unmap_pool(soc, rx_desc_pool, nbuf); 1226 rx_desc->unmapped = 1; 1227 dp_ipa_rx_buf_smmu_mapping_unlock(soc); 1228 1229 if (qdf_unlikely( 1230 soc->wbm_release_desc_rx_sg_support && 1231 dp_rx_is_sg_formation_required(&wbm_err_info.info_bit))) { 1232 /* SG is detected from continuation bit */ 1233 msdu_continuation = 1234 hal_rx_wbm_err_msdu_continuation_get(hal_soc, 1235 ring_desc); 1236 if (msdu_continuation && 1237 !(soc->wbm_sg_param.wbm_is_first_msdu_in_sg)) { 1238 /* Update length from first buffer in SG */ 1239 soc->wbm_sg_param.wbm_sg_desc_msdu_len = 1240 hal_rx_msdu_start_msdu_len_get( 1241 soc->hal_soc, 1242 qdf_nbuf_data(nbuf)); 1243 soc->wbm_sg_param.wbm_is_first_msdu_in_sg = 1244 true; 1245 } 1246 1247 if (msdu_continuation) { 1248 /* MSDU continued packets */ 1249 qdf_nbuf_set_rx_chfrag_cont(nbuf, 1); 1250 QDF_NBUF_CB_RX_PKT_LEN(nbuf) = 1251 soc->wbm_sg_param.wbm_sg_desc_msdu_len; 1252 } else { 1253 /* This is the terminal packet in SG */ 1254 qdf_nbuf_set_rx_chfrag_start(nbuf, 1); 1255 qdf_nbuf_set_rx_chfrag_end(nbuf, 1); 1256 QDF_NBUF_CB_RX_PKT_LEN(nbuf) = 1257 soc->wbm_sg_param.wbm_sg_desc_msdu_len; 1258 process_sg_buf = true; 1259 } 1260 } 1261 1262 /* 1263 * save the wbm desc info in nbuf CB/TLV. We will need this 1264 * info when we do the actual nbuf processing 1265 */ 1266 wbm_err_info.info_bit.pool_id = rx_desc->pool_id; 1267 dp_rx_set_wbm_err_info_in_nbuf(soc, nbuf, wbm_err_info); 1268 1269 rx_bufs_reaped[rx_desc->pool_id]++; 1270 1271 if (qdf_nbuf_is_rx_chfrag_cont(nbuf) || process_sg_buf) { 1272 DP_RX_LIST_APPEND(soc->wbm_sg_param.wbm_sg_nbuf_head, 1273 soc->wbm_sg_param.wbm_sg_nbuf_tail, 1274 nbuf); 1275 if (process_sg_buf) { 1276 if (!dp_rx_buffer_pool_refill( 1277 soc, 1278 soc->wbm_sg_param.wbm_sg_nbuf_head, 1279 rx_desc->pool_id)) 1280 DP_RX_MERGE_TWO_LIST( 1281 nbuf_head, nbuf_tail, 1282 soc->wbm_sg_param.wbm_sg_nbuf_head, 1283 soc->wbm_sg_param.wbm_sg_nbuf_tail); 1284 dp_rx_wbm_sg_list_last_msdu_war(soc); 1285 dp_rx_wbm_sg_list_reset(soc); 1286 process_sg_buf = false; 1287 } 1288 } else if (!dp_rx_buffer_pool_refill(soc, nbuf, 1289 rx_desc->pool_id)) { 1290 DP_RX_LIST_APPEND(nbuf_head, nbuf_tail, nbuf); 1291 } 1292 1293 dp_rx_add_to_free_desc_list 1294 (&head[rx_desc->pool_id], 1295 &tail[rx_desc->pool_id], rx_desc); 1296 1297 /* 1298 * if continuation bit is set then we have MSDU spread 1299 * across multiple buffers, let us not decrement quota 1300 * till we reap all buffers of that MSDU. 1301 */ 1302 if (qdf_likely(!msdu_continuation)) 1303 quota -= 1; 1304 } 1305 done: 1306 dp_srng_access_end(int_ctx, soc, hal_ring_hdl); 1307 1308 for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) { 1309 /* 1310 * continue with next mac_id if no pkts were reaped 1311 * from that pool 1312 */ 1313 if (!rx_bufs_reaped[mac_id]) 1314 continue; 1315 1316 replenish_soc = 1317 dp_rx_replensih_soc_get_li(soc, chip_id); 1318 dp_rxdma_srng = 1319 &replenish_soc->rx_refill_buf_ring[mac_id]; 1320 1321 rx_desc_pool = &replenish_soc->rx_desc_buf[mac_id]; 1322 1323 dp_rx_buffers_replenish_simple( 1324 replenish_soc, mac_id, 1325 dp_rxdma_srng, 1326 rx_desc_pool, 1327 rx_bufs_reaped[mac_id], 1328 &head[mac_id], 1329 &tail[mac_id]); 1330 *rx_bufs_used += rx_bufs_reaped[mac_id]; 1331 } 1332 return nbuf_head; 1333 } 1334 1335 QDF_STATUS 1336 dp_rx_null_q_desc_handle_li(struct dp_soc *soc, qdf_nbuf_t nbuf, 1337 uint8_t *rx_tlv_hdr, uint8_t pool_id, 1338 struct dp_txrx_peer *txrx_peer, 1339 bool is_reo_exception, 1340 uint8_t link_id) 1341 { 1342 uint32_t pkt_len; 1343 uint16_t msdu_len; 1344 struct dp_vdev *vdev; 1345 uint8_t tid; 1346 qdf_ether_header_t *eh; 1347 struct hal_rx_msdu_metadata msdu_metadata; 1348 uint16_t sa_idx = 0; 1349 bool is_eapol = 0; 1350 bool enh_flag; 1351 uint16_t buf_size; 1352 1353 buf_size = wlan_cfg_rx_buffer_size(soc->wlan_cfg_ctx); 1354 1355 qdf_nbuf_set_rx_chfrag_start( 1356 nbuf, 1357 hal_rx_msdu_end_first_msdu_get(soc->hal_soc, 1358 rx_tlv_hdr)); 1359 qdf_nbuf_set_rx_chfrag_end(nbuf, 1360 hal_rx_msdu_end_last_msdu_get(soc->hal_soc, 1361 rx_tlv_hdr)); 1362 qdf_nbuf_set_da_mcbc(nbuf, hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc, 1363 rx_tlv_hdr)); 1364 qdf_nbuf_set_da_valid(nbuf, 1365 hal_rx_msdu_end_da_is_valid_get(soc->hal_soc, 1366 rx_tlv_hdr)); 1367 qdf_nbuf_set_sa_valid(nbuf, 1368 hal_rx_msdu_end_sa_is_valid_get(soc->hal_soc, 1369 rx_tlv_hdr)); 1370 1371 tid = hal_rx_tid_get(soc->hal_soc, rx_tlv_hdr); 1372 hal_rx_msdu_metadata_get(soc->hal_soc, rx_tlv_hdr, &msdu_metadata); 1373 msdu_len = hal_rx_msdu_start_msdu_len_get(soc->hal_soc, rx_tlv_hdr); 1374 pkt_len = msdu_len + msdu_metadata.l3_hdr_pad + soc->rx_pkt_tlv_size; 1375 1376 if (qdf_likely(!qdf_nbuf_is_frag(nbuf))) { 1377 if (dp_rx_check_pkt_len(soc, pkt_len)) 1378 goto drop_nbuf; 1379 1380 /* Set length in nbuf */ 1381 qdf_nbuf_set_pktlen(nbuf, qdf_min(pkt_len, (uint32_t)buf_size)); 1382 } 1383 1384 /* 1385 * Check if DMA completed -- msdu_done is the last bit 1386 * to be written 1387 */ 1388 if (!hal_rx_attn_msdu_done_get(soc->hal_soc, rx_tlv_hdr)) { 1389 dp_err_rl("MSDU DONE failure"); 1390 hal_rx_dump_pkt_tlvs(soc->hal_soc, rx_tlv_hdr, 1391 QDF_TRACE_LEVEL_INFO); 1392 qdf_assert(0); 1393 } 1394 1395 if (!txrx_peer && 1396 dp_rx_null_q_handle_invalid_peer_id_exception(soc, pool_id, 1397 rx_tlv_hdr, nbuf)) 1398 return QDF_STATUS_E_FAILURE; 1399 1400 if (!txrx_peer) { 1401 bool mpdu_done = false; 1402 struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, pool_id); 1403 1404 if (!pdev) { 1405 dp_err_rl("pdev is null for pool_id = %d", pool_id); 1406 return QDF_STATUS_E_FAILURE; 1407 } 1408 1409 dp_err_rl("txrx_peer is NULL"); 1410 DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1, 1411 qdf_nbuf_len(nbuf)); 1412 1413 /* QCN9000 has the support enabled */ 1414 if (qdf_unlikely(soc->wbm_release_desc_rx_sg_support)) { 1415 mpdu_done = true; 1416 nbuf->next = NULL; 1417 /* Trigger invalid peer handler wrapper */ 1418 dp_rx_process_invalid_peer_wrapper(soc, 1419 nbuf, 1420 mpdu_done, 1421 pool_id); 1422 } else { 1423 mpdu_done = soc->arch_ops.dp_rx_chain_msdus(soc, nbuf, 1424 rx_tlv_hdr, 1425 pool_id); 1426 /* Trigger invalid peer handler wrapper */ 1427 dp_rx_process_invalid_peer_wrapper( 1428 soc, 1429 pdev->invalid_peer_head_msdu, 1430 mpdu_done, pool_id); 1431 } 1432 1433 if (mpdu_done) { 1434 pdev->invalid_peer_head_msdu = NULL; 1435 pdev->invalid_peer_tail_msdu = NULL; 1436 } 1437 1438 return QDF_STATUS_E_FAILURE; 1439 } 1440 1441 vdev = txrx_peer->vdev; 1442 if (!vdev) { 1443 dp_err_rl("Null vdev!"); 1444 DP_STATS_INC(soc, rx.err.invalid_vdev, 1); 1445 goto drop_nbuf; 1446 } 1447 1448 /* 1449 * Advance the packet start pointer by total size of 1450 * pre-header TLV's 1451 */ 1452 if (qdf_nbuf_is_frag(nbuf)) 1453 qdf_nbuf_pull_head(nbuf, soc->rx_pkt_tlv_size); 1454 else 1455 qdf_nbuf_pull_head(nbuf, (msdu_metadata.l3_hdr_pad + 1456 soc->rx_pkt_tlv_size)); 1457 1458 DP_STATS_INC_PKT(vdev, rx_i.null_q_desc_pkt, 1, qdf_nbuf_len(nbuf)); 1459 1460 dp_vdev_peer_stats_update_protocol_cnt(vdev, nbuf, NULL, 0, 1); 1461 1462 if (dp_rx_err_drop_3addr_mcast(vdev, rx_tlv_hdr)) { 1463 DP_PEER_PER_PKT_STATS_INC(txrx_peer, rx.mcast_3addr_drop, 1, 1464 0); 1465 goto drop_nbuf; 1466 } 1467 1468 if (hal_rx_msdu_end_sa_is_valid_get(soc->hal_soc, rx_tlv_hdr)) { 1469 sa_idx = hal_rx_msdu_end_sa_idx_get(soc->hal_soc, rx_tlv_hdr); 1470 1471 if ((sa_idx < 0) || 1472 (sa_idx >= wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx))) { 1473 DP_STATS_INC(soc, rx.err.invalid_sa_da_idx, 1); 1474 goto drop_nbuf; 1475 } 1476 } 1477 1478 if ((!soc->mec_fw_offload) && 1479 dp_rx_mcast_echo_check(soc, txrx_peer, rx_tlv_hdr, nbuf)) { 1480 /* this is a looped back MCBC pkt, drop it */ 1481 DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, rx.mec_drop, 1, 1482 qdf_nbuf_len(nbuf), 0); 1483 goto drop_nbuf; 1484 } 1485 1486 /* 1487 * In qwrap mode if the received packet matches with any of the vdev 1488 * mac addresses, drop it. Donot receive multicast packets originated 1489 * from any proxysta. 1490 */ 1491 if (check_qwrap_multicast_loopback(vdev, nbuf)) { 1492 DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, rx.mec_drop, 1, 1493 qdf_nbuf_len(nbuf), 0); 1494 goto drop_nbuf; 1495 } 1496 1497 if (qdf_unlikely(txrx_peer->nawds_enabled && 1498 hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc, 1499 rx_tlv_hdr))) { 1500 dp_err_rl("free buffer for multicast packet"); 1501 DP_PEER_PER_PKT_STATS_INC(txrx_peer, rx.nawds_mcast_drop, 1, 1502 0); 1503 goto drop_nbuf; 1504 } 1505 1506 if (!dp_wds_rx_policy_check(rx_tlv_hdr, vdev, txrx_peer)) { 1507 dp_err_rl("mcast Policy Check Drop pkt"); 1508 DP_PEER_PER_PKT_STATS_INC(txrx_peer, rx.policy_check_drop, 1, 1509 0); 1510 goto drop_nbuf; 1511 } 1512 /* WDS Source Port Learning */ 1513 if (!soc->ast_offload_support && 1514 qdf_likely(vdev->rx_decap_type == htt_cmn_pkt_type_ethernet && 1515 vdev->wds_enabled)) 1516 dp_rx_wds_srcport_learn(soc, rx_tlv_hdr, txrx_peer, nbuf, 1517 msdu_metadata); 1518 1519 if (hal_rx_is_unicast(soc->hal_soc, rx_tlv_hdr)) { 1520 struct dp_peer *peer; 1521 struct dp_rx_tid *rx_tid; 1522 1523 peer = dp_peer_get_ref_by_id(soc, txrx_peer->peer_id, 1524 DP_MOD_ID_RX_ERR); 1525 if (peer) { 1526 rx_tid = &peer->rx_tid[tid]; 1527 qdf_spin_lock_bh(&rx_tid->tid_lock); 1528 if (!peer->rx_tid[tid].hw_qdesc_vaddr_unaligned) { 1529 /* For Mesh peer, if on one of the mesh AP the 1530 * mesh peer is not deleted, the new addition of mesh 1531 * peer on other mesh AP doesn't do BA negotiation 1532 * leading to mismatch in BA windows. 1533 * To avoid this send max BA window during init. 1534 */ 1535 if (qdf_unlikely(vdev->mesh_vdev) || 1536 qdf_unlikely(txrx_peer->nawds_enabled)) 1537 dp_rx_tid_setup_wifi3( 1538 peer, tid, 1539 hal_get_rx_max_ba_window(soc->hal_soc,tid), 1540 IEEE80211_SEQ_MAX); 1541 else 1542 dp_rx_tid_setup_wifi3(peer, tid, 1, 1543 IEEE80211_SEQ_MAX); 1544 } 1545 qdf_spin_unlock_bh(&rx_tid->tid_lock); 1546 /* IEEE80211_SEQ_MAX indicates invalid start_seq */ 1547 dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR); 1548 } 1549 } 1550 1551 eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf); 1552 1553 if (!txrx_peer->authorize) { 1554 is_eapol = qdf_nbuf_is_ipv4_eapol_pkt(nbuf); 1555 1556 if (is_eapol || qdf_nbuf_is_ipv4_wapi_pkt(nbuf)) { 1557 if (!dp_rx_err_match_dhost(eh, vdev)) 1558 goto drop_nbuf; 1559 } else { 1560 goto drop_nbuf; 1561 } 1562 } 1563 1564 /* 1565 * Drop packets in this path if cce_match is found. Packets will come 1566 * in following path depending on whether tidQ is setup. 1567 * 1. If tidQ is setup: WIFILI_HAL_RX_WBM_REO_PSH_RSN_ROUTE and 1568 * cce_match = 1 1569 * Packets with WIFILI_HAL_RX_WBM_REO_PSH_RSN_ROUTE are already 1570 * dropped. 1571 * 2. If tidQ is not setup: WIFILI_HAL_RX_WBM_REO_PSH_RSN_ERROR and 1572 * cce_match = 1 1573 * These packets need to be dropped and should not get delivered 1574 * to stack. 1575 */ 1576 if (qdf_unlikely(dp_rx_err_cce_drop(soc, vdev, nbuf, rx_tlv_hdr))) 1577 goto drop_nbuf; 1578 1579 if (qdf_unlikely(vdev->rx_decap_type == htt_cmn_pkt_type_raw)) { 1580 qdf_nbuf_set_raw_frame(nbuf, 1); 1581 qdf_nbuf_set_next(nbuf, NULL); 1582 dp_rx_deliver_raw(vdev, nbuf, txrx_peer, 0); 1583 } else { 1584 enh_flag = vdev->pdev->enhanced_stats_en; 1585 qdf_nbuf_set_next(nbuf, NULL); 1586 DP_PEER_TO_STACK_INCC_PKT(txrx_peer, 1, qdf_nbuf_len(nbuf), 1587 enh_flag); 1588 1589 DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, 1590 rx.rx_success, 1, 1591 qdf_nbuf_len(nbuf), 0); 1592 /* 1593 * Update the protocol tag in SKB based on 1594 * CCE metadata 1595 */ 1596 dp_rx_update_protocol_tag(soc, vdev, nbuf, rx_tlv_hdr, 1597 EXCEPTION_DEST_RING_ID, 1598 true, true); 1599 1600 /* Update the flow tag in SKB based on FSE metadata */ 1601 dp_rx_update_flow_tag(soc, vdev, nbuf, 1602 rx_tlv_hdr, true); 1603 1604 if (qdf_unlikely(hal_rx_msdu_end_da_is_mcbc_get( 1605 soc->hal_soc, rx_tlv_hdr) && 1606 (vdev->rx_decap_type == 1607 htt_cmn_pkt_type_ethernet))) { 1608 DP_PEER_MC_INCC_PKT(txrx_peer, 1, qdf_nbuf_len(nbuf), 1609 enh_flag, 0); 1610 1611 if (QDF_IS_ADDR_BROADCAST(eh->ether_dhost)) 1612 DP_PEER_BC_INCC_PKT(txrx_peer, 1, 1613 qdf_nbuf_len(nbuf), 1614 enh_flag, 0); 1615 } else { 1616 DP_PEER_UC_INCC_PKT(txrx_peer, 1, 1617 qdf_nbuf_len(nbuf), 1618 enh_flag, 1619 0); 1620 } 1621 1622 qdf_nbuf_set_exc_frame(nbuf, 1); 1623 1624 if (qdf_unlikely(vdev->multipass_en)) { 1625 if (dp_rx_multipass_process(txrx_peer, nbuf, 1626 tid) == false) { 1627 DP_PEER_PER_PKT_STATS_INC 1628 (txrx_peer, 1629 rx.multipass_rx_pkt_drop, 1630 1, link_id); 1631 goto drop_nbuf; 1632 } 1633 } 1634 1635 dp_rx_deliver_to_osif_stack(soc, vdev, txrx_peer, nbuf, NULL, 1636 is_eapol); 1637 } 1638 return QDF_STATUS_SUCCESS; 1639 1640 drop_nbuf: 1641 dp_rx_nbuf_free(nbuf); 1642 return QDF_STATUS_E_FAILURE; 1643 } 1644