1 /* 2 * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved. 3 * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. 4 * 5 * Permission to use, copy, modify, and/or distribute this software for 6 * any purpose with or without fee is hereby granted, provided that the 7 * above copyright notice and this permission notice appear in all 8 * copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 11 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 12 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 13 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 14 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 15 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 16 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 17 * PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20 #include "cdp_txrx_cmn_struct.h" 21 #include "hal_hw_headers.h" 22 #include "dp_types.h" 23 #include "dp_rx.h" 24 #include "dp_tx.h" 25 #include "dp_li_rx.h" 26 #include "dp_peer.h" 27 #include "hal_rx.h" 28 #include "hal_li_rx.h" 29 #include "hal_api.h" 30 #include "hal_li_api.h" 31 #include "qdf_nbuf.h" 32 #ifdef MESH_MODE_SUPPORT 33 #include "if_meta_hdr.h" 34 #endif 35 #include "dp_internal.h" 36 #include "dp_ipa.h" 37 #ifdef WIFI_MONITOR_SUPPORT 38 #include <dp_mon.h> 39 #endif 40 #ifdef FEATURE_WDS 41 #include "dp_txrx_wds.h" 42 #endif 43 #include "dp_hist.h" 44 #include "dp_rx_buffer_pool.h" 45 #include "dp_li.h" 46 47 static inline 48 bool is_sa_da_idx_valid(uint32_t max_ast, 49 qdf_nbuf_t nbuf, struct hal_rx_msdu_metadata msdu_info) 50 { 51 if ((qdf_nbuf_is_sa_valid(nbuf) && (msdu_info.sa_idx > max_ast)) || 52 (!qdf_nbuf_is_da_mcbc(nbuf) && qdf_nbuf_is_da_valid(nbuf) && 53 (msdu_info.da_idx > max_ast))) 54 return false; 55 56 return true; 57 } 58 59 #ifndef QCA_HOST_MODE_WIFI_DISABLED 60 #if defined(FEATURE_MCL_REPEATER) && defined(FEATURE_MEC) 61 /** 62 * dp_rx_mec_check_wrapper() - wrapper to dp_rx_mcast_echo_check 63 * @soc: core DP main context 64 * @peer: dp peer handler 65 * @rx_tlv_hdr: start of the rx TLV header 66 * @nbuf: pkt buffer 67 * 68 * Return: bool (true if it is a looped back pkt else false) 69 */ 70 static inline bool dp_rx_mec_check_wrapper(struct dp_soc *soc, 71 struct dp_txrx_peer *txrx_peer, 72 uint8_t *rx_tlv_hdr, 73 qdf_nbuf_t nbuf) 74 { 75 return dp_rx_mcast_echo_check(soc, txrx_peer, rx_tlv_hdr, nbuf); 76 } 77 #else 78 static inline bool dp_rx_mec_check_wrapper(struct dp_soc *soc, 79 struct dp_txrx_peer *txrx_peer, 80 uint8_t *rx_tlv_hdr, 81 qdf_nbuf_t nbuf) 82 { 83 return false; 84 } 85 #endif 86 #endif 87 88 #ifndef QCA_HOST_MODE_WIFI_DISABLE 89 static bool 90 dp_rx_intrabss_ucast_check_li(struct dp_soc *soc, qdf_nbuf_t nbuf, 91 struct dp_txrx_peer *ta_txrx_peer, 92 struct hal_rx_msdu_metadata *msdu_metadata, 93 uint8_t *p_tx_vdev_id) 94 { 95 uint16_t da_peer_id; 96 struct dp_txrx_peer *da_peer; 97 struct dp_ast_entry *ast_entry; 98 dp_txrx_ref_handle txrx_ref_handle = NULL; 99 100 if (!qdf_nbuf_is_da_valid(nbuf) || qdf_nbuf_is_da_mcbc(nbuf)) 101 return false; 102 103 ast_entry = soc->ast_table[msdu_metadata->da_idx]; 104 if (!ast_entry) 105 return false; 106 107 if (ast_entry->type == CDP_TXRX_AST_TYPE_DA) { 108 ast_entry->is_active = TRUE; 109 return false; 110 } 111 112 da_peer_id = ast_entry->peer_id; 113 /* TA peer cannot be same as peer(DA) on which AST is present 114 * this indicates a change in topology and that AST entries 115 * are yet to be updated. 116 */ 117 if (da_peer_id == ta_txrx_peer->peer_id || 118 da_peer_id == HTT_INVALID_PEER) 119 return false; 120 121 da_peer = dp_txrx_peer_get_ref_by_id(soc, da_peer_id, 122 &txrx_ref_handle, DP_MOD_ID_RX); 123 if (!da_peer) 124 return false; 125 126 *p_tx_vdev_id = da_peer->vdev->vdev_id; 127 /* If the source or destination peer in the isolation 128 * list then dont forward instead push to bridge stack. 129 */ 130 if (dp_get_peer_isolation(ta_txrx_peer) || 131 dp_get_peer_isolation(da_peer) || 132 da_peer->vdev->vdev_id != ta_txrx_peer->vdev->vdev_id) { 133 dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_RX); 134 return false; 135 } 136 137 if (da_peer->bss_peer) { 138 dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_RX); 139 return false; 140 } 141 142 dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_RX); 143 return true; 144 } 145 146 /* 147 * dp_rx_intrabss_fwd_li() - Implements the Intra-BSS forwarding logic 148 * 149 * @soc: core txrx main context 150 * @ta_txrx_peer : source peer entry 151 * @rx_tlv_hdr : start address of rx tlvs 152 * @nbuf : nbuf that has to be intrabss forwarded 153 * 154 * Return: bool: true if it is forwarded else false 155 */ 156 static bool 157 dp_rx_intrabss_fwd_li(struct dp_soc *soc, 158 struct dp_txrx_peer *ta_txrx_peer, 159 uint8_t *rx_tlv_hdr, 160 qdf_nbuf_t nbuf, 161 struct hal_rx_msdu_metadata msdu_metadata, 162 struct cdp_tid_rx_stats *tid_stats) 163 { 164 uint8_t tx_vdev_id; 165 166 /* if it is a broadcast pkt (eg: ARP) and it is not its own 167 * source, then clone the pkt and send the cloned pkt for 168 * intra BSS forwarding and original pkt up the network stack 169 * Note: how do we handle multicast pkts. do we forward 170 * all multicast pkts as is or let a higher layer module 171 * like igmpsnoop decide whether to forward or not with 172 * Mcast enhancement. 173 */ 174 if (qdf_nbuf_is_da_mcbc(nbuf) && !ta_txrx_peer->bss_peer) 175 return dp_rx_intrabss_mcbc_fwd(soc, ta_txrx_peer, rx_tlv_hdr, 176 nbuf, tid_stats); 177 178 if (dp_rx_intrabss_eapol_drop_check(soc, ta_txrx_peer, rx_tlv_hdr, 179 nbuf)) 180 return true; 181 182 if (dp_rx_intrabss_ucast_check_li(soc, nbuf, ta_txrx_peer, 183 &msdu_metadata, &tx_vdev_id)) 184 return dp_rx_intrabss_ucast_fwd(soc, ta_txrx_peer, tx_vdev_id, 185 rx_tlv_hdr, nbuf, tid_stats); 186 187 return false; 188 } 189 #endif 190 191 /** 192 * dp_rx_process_li() - Brain of the Rx processing functionality 193 * Called from the bottom half (tasklet/NET_RX_SOFTIRQ) 194 * @int_ctx: per interrupt context 195 * @hal_ring: opaque pointer to the HAL Rx Ring, which will be serviced 196 * @reo_ring_num: ring number (0, 1, 2 or 3) of the reo ring. 197 * @quota: No. of units (packets) that can be serviced in one shot. 198 * 199 * This function implements the core of Rx functionality. This is 200 * expected to handle only non-error frames. 201 * 202 * Return: uint32_t: No. of elements processed 203 */ 204 uint32_t dp_rx_process_li(struct dp_intr *int_ctx, 205 hal_ring_handle_t hal_ring_hdl, uint8_t reo_ring_num, 206 uint32_t quota) 207 { 208 hal_ring_desc_t ring_desc; 209 hal_ring_desc_t last_prefetched_hw_desc; 210 hal_soc_handle_t hal_soc; 211 struct dp_rx_desc *rx_desc = NULL; 212 struct dp_rx_desc *last_prefetched_sw_desc = NULL; 213 qdf_nbuf_t nbuf, next; 214 bool near_full; 215 union dp_rx_desc_list_elem_t *head[MAX_PDEV_CNT]; 216 union dp_rx_desc_list_elem_t *tail[MAX_PDEV_CNT]; 217 uint32_t num_pending = 0; 218 uint32_t rx_bufs_used = 0, rx_buf_cookie; 219 uint16_t msdu_len = 0; 220 uint16_t peer_id; 221 uint8_t vdev_id; 222 struct dp_txrx_peer *txrx_peer; 223 dp_txrx_ref_handle txrx_ref_handle = NULL; 224 struct dp_vdev *vdev; 225 uint32_t pkt_len = 0; 226 struct hal_rx_mpdu_desc_info mpdu_desc_info; 227 struct hal_rx_msdu_desc_info msdu_desc_info; 228 enum hal_reo_error_status error; 229 uint32_t peer_mdata; 230 uint8_t *rx_tlv_hdr; 231 uint32_t rx_bufs_reaped[MAX_PDEV_CNT]; 232 uint8_t mac_id = 0; 233 struct dp_pdev *rx_pdev; 234 struct dp_srng *dp_rxdma_srng; 235 struct rx_desc_pool *rx_desc_pool; 236 struct dp_soc *soc = int_ctx->soc; 237 struct cdp_tid_rx_stats *tid_stats; 238 qdf_nbuf_t nbuf_head; 239 qdf_nbuf_t nbuf_tail; 240 qdf_nbuf_t deliver_list_head; 241 qdf_nbuf_t deliver_list_tail; 242 uint32_t num_rx_bufs_reaped = 0; 243 uint32_t intr_id; 244 struct hif_opaque_softc *scn; 245 int32_t tid = 0; 246 bool is_prev_msdu_last = true; 247 uint32_t rx_ol_pkt_cnt = 0; 248 uint32_t num_entries = 0; 249 struct hal_rx_msdu_metadata msdu_metadata; 250 QDF_STATUS status; 251 qdf_nbuf_t ebuf_head; 252 qdf_nbuf_t ebuf_tail; 253 uint8_t pkt_capture_offload = 0; 254 int max_reap_limit; 255 uint32_t old_tid; 256 uint32_t peer_ext_stats; 257 uint32_t dsf; 258 uint32_t max_ast; 259 uint64_t current_time = 0; 260 261 DP_HIST_INIT(); 262 263 qdf_assert_always(soc && hal_ring_hdl); 264 hal_soc = soc->hal_soc; 265 qdf_assert_always(hal_soc); 266 267 scn = soc->hif_handle; 268 intr_id = int_ctx->dp_intr_id; 269 num_entries = hal_srng_get_num_entries(hal_soc, hal_ring_hdl); 270 dp_runtime_pm_mark_last_busy(soc); 271 272 more_data: 273 /* reset local variables here to be re-used in the function */ 274 nbuf_head = NULL; 275 nbuf_tail = NULL; 276 deliver_list_head = NULL; 277 deliver_list_tail = NULL; 278 txrx_peer = NULL; 279 vdev = NULL; 280 num_rx_bufs_reaped = 0; 281 ebuf_head = NULL; 282 ebuf_tail = NULL; 283 max_reap_limit = dp_rx_get_loop_pkt_limit(soc); 284 285 qdf_mem_zero(rx_bufs_reaped, sizeof(rx_bufs_reaped)); 286 qdf_mem_zero(&mpdu_desc_info, sizeof(mpdu_desc_info)); 287 qdf_mem_zero(&msdu_desc_info, sizeof(msdu_desc_info)); 288 qdf_mem_zero(head, sizeof(head)); 289 qdf_mem_zero(tail, sizeof(tail)); 290 old_tid = 0xff; 291 dsf = 0; 292 peer_ext_stats = 0; 293 max_ast = 0; 294 rx_pdev = NULL; 295 tid_stats = NULL; 296 297 dp_pkt_get_timestamp(¤t_time); 298 299 if (qdf_unlikely(dp_rx_srng_access_start(int_ctx, soc, hal_ring_hdl))) { 300 /* 301 * Need API to convert from hal_ring pointer to 302 * Ring Type / Ring Id combo 303 */ 304 DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1); 305 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 306 FL("HAL RING Access Failed -- %pK"), hal_ring_hdl); 307 goto done; 308 } 309 310 if (!num_pending) 311 num_pending = hal_srng_dst_num_valid(hal_soc, hal_ring_hdl, 0); 312 313 dp_srng_dst_inv_cached_descs(soc, hal_ring_hdl, num_pending); 314 315 if (num_pending > quota) 316 num_pending = quota; 317 318 last_prefetched_hw_desc = dp_srng_dst_prefetch(hal_soc, hal_ring_hdl, 319 num_pending); 320 321 peer_ext_stats = wlan_cfg_is_peer_ext_stats_enabled(soc->wlan_cfg_ctx); 322 max_ast = wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx); 323 /* 324 * start reaping the buffers from reo ring and queue 325 * them in per vdev queue. 326 * Process the received pkts in a different per vdev loop. 327 */ 328 while (qdf_likely(num_pending)) { 329 ring_desc = dp_srng_dst_get_next(soc, hal_ring_hdl); 330 331 if (qdf_unlikely(!ring_desc)) 332 break; 333 334 error = HAL_RX_ERROR_STATUS_GET(ring_desc); 335 if (qdf_unlikely(error == HAL_REO_ERROR_DETECTED)) { 336 dp_rx_err("%pK: HAL RING 0x%pK:error %d", 337 soc, hal_ring_hdl, error); 338 DP_STATS_INC(soc, rx.err.hal_reo_error[reo_ring_num], 339 1); 340 /* Don't know how to deal with this -- assert */ 341 qdf_assert(0); 342 } 343 344 dp_rx_ring_record_entry(soc, reo_ring_num, ring_desc); 345 rx_buf_cookie = HAL_RX_REO_BUF_COOKIE_GET(ring_desc); 346 status = dp_rx_cookie_check_and_invalidate(ring_desc); 347 if (qdf_unlikely(QDF_IS_STATUS_ERROR(status))) { 348 DP_STATS_INC(soc, rx.err.stale_cookie, 1); 349 break; 350 } 351 352 rx_desc = dp_rx_cookie_2_va_rxdma_buf(soc, rx_buf_cookie); 353 status = dp_rx_desc_sanity(soc, hal_soc, hal_ring_hdl, 354 ring_desc, rx_desc); 355 if (QDF_IS_STATUS_ERROR(status)) { 356 if (qdf_unlikely(rx_desc && rx_desc->nbuf)) { 357 qdf_assert_always(!rx_desc->unmapped); 358 dp_rx_nbuf_unmap(soc, rx_desc, reo_ring_num); 359 rx_desc->unmapped = 1; 360 dp_rx_buffer_pool_nbuf_free(soc, rx_desc->nbuf, 361 rx_desc->pool_id); 362 dp_rx_add_to_free_desc_list( 363 &head[rx_desc->pool_id], 364 &tail[rx_desc->pool_id], 365 rx_desc); 366 } 367 continue; 368 } 369 370 /* 371 * this is a unlikely scenario where the host is reaping 372 * a descriptor which it already reaped just a while ago 373 * but is yet to replenish it back to HW. 374 * In this case host will dump the last 128 descriptors 375 * including the software descriptor rx_desc and assert. 376 */ 377 378 if (qdf_unlikely(!rx_desc->in_use)) { 379 DP_STATS_INC(soc, rx.err.hal_reo_dest_dup, 1); 380 dp_info_rl("Reaping rx_desc not in use!"); 381 dp_rx_dump_info_and_assert(soc, hal_ring_hdl, 382 ring_desc, rx_desc); 383 /* ignore duplicate RX desc and continue to process */ 384 /* Pop out the descriptor */ 385 continue; 386 } 387 388 status = dp_rx_desc_nbuf_sanity_check(soc, ring_desc, rx_desc); 389 if (qdf_unlikely(QDF_IS_STATUS_ERROR(status))) { 390 DP_STATS_INC(soc, rx.err.nbuf_sanity_fail, 1); 391 dp_info_rl("Nbuf sanity check failure!"); 392 dp_rx_dump_info_and_assert(soc, hal_ring_hdl, 393 ring_desc, rx_desc); 394 rx_desc->in_err_state = 1; 395 continue; 396 } 397 398 if (qdf_unlikely(!dp_rx_desc_check_magic(rx_desc))) { 399 dp_err("Invalid rx_desc cookie=%d", rx_buf_cookie); 400 DP_STATS_INC(soc, rx.err.rx_desc_invalid_magic, 1); 401 dp_rx_dump_info_and_assert(soc, hal_ring_hdl, 402 ring_desc, rx_desc); 403 } 404 405 /* Get MPDU DESC info */ 406 hal_rx_mpdu_desc_info_get_li(ring_desc, &mpdu_desc_info); 407 408 /* Get MSDU DESC info */ 409 hal_rx_msdu_desc_info_get_li(ring_desc, &msdu_desc_info); 410 411 if (qdf_unlikely(msdu_desc_info.msdu_flags & 412 HAL_MSDU_F_MSDU_CONTINUATION)) { 413 /* previous msdu has end bit set, so current one is 414 * the new MPDU 415 */ 416 if (is_prev_msdu_last) { 417 /* For new MPDU check if we can read complete 418 * MPDU by comparing the number of buffers 419 * available and number of buffers needed to 420 * reap this MPDU 421 */ 422 if ((msdu_desc_info.msdu_len / 423 (RX_DATA_BUFFER_SIZE - 424 soc->rx_pkt_tlv_size) + 1) > 425 num_pending) { 426 DP_STATS_INC(soc, 427 rx.msdu_scatter_wait_break, 428 1); 429 dp_rx_cookie_reset_invalid_bit( 430 ring_desc); 431 /* As we are going to break out of the 432 * loop because of unavailability of 433 * descs to form complete SG, we need to 434 * reset the TP in the REO destination 435 * ring. 436 */ 437 hal_srng_dst_dec_tp(hal_soc, 438 hal_ring_hdl); 439 break; 440 } 441 is_prev_msdu_last = false; 442 } 443 } 444 445 if (mpdu_desc_info.mpdu_flags & HAL_MPDU_F_RETRY_BIT) 446 qdf_nbuf_set_rx_retry_flag(rx_desc->nbuf, 1); 447 448 if (qdf_unlikely(mpdu_desc_info.mpdu_flags & 449 HAL_MPDU_F_RAW_AMPDU)) 450 qdf_nbuf_set_raw_frame(rx_desc->nbuf, 1); 451 452 if (!is_prev_msdu_last && 453 msdu_desc_info.msdu_flags & HAL_MSDU_F_LAST_MSDU_IN_MPDU) 454 is_prev_msdu_last = true; 455 456 rx_bufs_reaped[rx_desc->pool_id]++; 457 peer_mdata = mpdu_desc_info.peer_meta_data; 458 QDF_NBUF_CB_RX_PEER_ID(rx_desc->nbuf) = 459 dp_rx_peer_metadata_peer_id_get_li(soc, peer_mdata); 460 QDF_NBUF_CB_RX_VDEV_ID(rx_desc->nbuf) = 461 DP_PEER_METADATA_VDEV_ID_GET_LI(peer_mdata); 462 463 /* to indicate whether this msdu is rx offload */ 464 pkt_capture_offload = 465 DP_PEER_METADATA_OFFLOAD_GET_LI(peer_mdata); 466 467 /* 468 * save msdu flags first, last and continuation msdu in 469 * nbuf->cb, also save mcbc, is_da_valid, is_sa_valid and 470 * length to nbuf->cb. This ensures the info required for 471 * per pkt processing is always in the same cache line. 472 * This helps in improving throughput for smaller pkt 473 * sizes. 474 */ 475 if (msdu_desc_info.msdu_flags & HAL_MSDU_F_FIRST_MSDU_IN_MPDU) 476 qdf_nbuf_set_rx_chfrag_start(rx_desc->nbuf, 1); 477 478 if (msdu_desc_info.msdu_flags & HAL_MSDU_F_MSDU_CONTINUATION) 479 qdf_nbuf_set_rx_chfrag_cont(rx_desc->nbuf, 1); 480 481 if (msdu_desc_info.msdu_flags & HAL_MSDU_F_LAST_MSDU_IN_MPDU) 482 qdf_nbuf_set_rx_chfrag_end(rx_desc->nbuf, 1); 483 484 if (msdu_desc_info.msdu_flags & HAL_MSDU_F_DA_IS_MCBC) 485 qdf_nbuf_set_da_mcbc(rx_desc->nbuf, 1); 486 487 if (msdu_desc_info.msdu_flags & HAL_MSDU_F_DA_IS_VALID) 488 qdf_nbuf_set_da_valid(rx_desc->nbuf, 1); 489 490 if (msdu_desc_info.msdu_flags & HAL_MSDU_F_SA_IS_VALID) 491 qdf_nbuf_set_sa_valid(rx_desc->nbuf, 1); 492 493 qdf_nbuf_set_tid_val(rx_desc->nbuf, 494 HAL_RX_REO_QUEUE_NUMBER_GET(ring_desc)); 495 496 /* set reo dest indication */ 497 qdf_nbuf_set_rx_reo_dest_ind_or_sw_excpt( 498 rx_desc->nbuf, 499 HAL_RX_REO_MSDU_REO_DST_IND_GET(ring_desc)); 500 501 QDF_NBUF_CB_RX_PKT_LEN(rx_desc->nbuf) = msdu_desc_info.msdu_len; 502 503 QDF_NBUF_CB_RX_CTX_ID(rx_desc->nbuf) = reo_ring_num; 504 505 /* 506 * move unmap after scattered msdu waiting break logic 507 * in case double skb unmap happened. 508 */ 509 dp_rx_nbuf_unmap(soc, rx_desc, reo_ring_num); 510 rx_desc->unmapped = 1; 511 DP_RX_PROCESS_NBUF(soc, nbuf_head, nbuf_tail, ebuf_head, 512 ebuf_tail, rx_desc); 513 514 quota -= 1; 515 num_pending -= 1; 516 517 dp_rx_add_to_free_desc_list(&head[rx_desc->pool_id], 518 &tail[rx_desc->pool_id], rx_desc); 519 num_rx_bufs_reaped++; 520 521 dp_rx_prefetch_hw_sw_nbuf_desc(soc, hal_soc, num_pending, 522 hal_ring_hdl, 523 &last_prefetched_hw_desc, 524 &last_prefetched_sw_desc); 525 526 /* 527 * only if complete msdu is received for scatter case, 528 * then allow break. 529 */ 530 if (is_prev_msdu_last && 531 dp_rx_reap_loop_pkt_limit_hit(soc, num_rx_bufs_reaped, 532 max_reap_limit)) 533 break; 534 } 535 done: 536 dp_rx_srng_access_end(int_ctx, soc, hal_ring_hdl); 537 538 dp_rx_per_core_stats_update(soc, reo_ring_num, num_rx_bufs_reaped); 539 540 for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) { 541 /* 542 * continue with next mac_id if no pkts were reaped 543 * from that pool 544 */ 545 if (!rx_bufs_reaped[mac_id]) 546 continue; 547 548 dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_id]; 549 550 rx_desc_pool = &soc->rx_desc_buf[mac_id]; 551 552 dp_rx_buffers_replenish_simple(soc, mac_id, dp_rxdma_srng, 553 rx_desc_pool, 554 rx_bufs_reaped[mac_id], 555 &head[mac_id], &tail[mac_id]); 556 } 557 558 dp_verbose_debug("replenished %u\n", rx_bufs_reaped[0]); 559 /* Peer can be NULL is case of LFR */ 560 if (qdf_likely(txrx_peer)) 561 vdev = NULL; 562 563 /* 564 * BIG loop where each nbuf is dequeued from global queue, 565 * processed and queued back on a per vdev basis. These nbufs 566 * are sent to stack as and when we run out of nbufs 567 * or a new nbuf dequeued from global queue has a different 568 * vdev when compared to previous nbuf. 569 */ 570 nbuf = nbuf_head; 571 while (nbuf) { 572 next = nbuf->next; 573 dp_rx_prefetch_nbuf_data(nbuf, next); 574 575 if (qdf_unlikely(dp_rx_is_raw_frame_dropped(nbuf))) { 576 nbuf = next; 577 DP_STATS_INC(soc, rx.err.raw_frm_drop, 1); 578 continue; 579 } 580 581 rx_tlv_hdr = qdf_nbuf_data(nbuf); 582 vdev_id = QDF_NBUF_CB_RX_VDEV_ID(nbuf); 583 peer_id = QDF_NBUF_CB_RX_PEER_ID(nbuf); 584 585 if (dp_rx_is_list_ready(deliver_list_head, vdev, txrx_peer, 586 peer_id, vdev_id)) { 587 dp_rx_deliver_to_stack(soc, vdev, txrx_peer, 588 deliver_list_head, 589 deliver_list_tail); 590 deliver_list_head = NULL; 591 deliver_list_tail = NULL; 592 } 593 594 /* Get TID from struct cb->tid_val, save to tid */ 595 if (qdf_nbuf_is_rx_chfrag_start(nbuf)) { 596 tid = qdf_nbuf_get_tid_val(nbuf); 597 if (tid >= CDP_MAX_DATA_TIDS) { 598 DP_STATS_INC(soc, rx.err.rx_invalid_tid_err, 1); 599 dp_rx_nbuf_free(nbuf); 600 nbuf = next; 601 continue; 602 } 603 } 604 605 if (qdf_unlikely(!txrx_peer)) { 606 txrx_peer = 607 dp_rx_get_txrx_peer_and_vdev(soc, nbuf, peer_id, 608 &txrx_ref_handle, 609 pkt_capture_offload, 610 &vdev, 611 &rx_pdev, &dsf, 612 &old_tid); 613 if (qdf_unlikely(!txrx_peer) || qdf_unlikely(!vdev)) { 614 nbuf = next; 615 continue; 616 } 617 } else if (txrx_peer && txrx_peer->peer_id != peer_id) { 618 dp_txrx_peer_unref_delete(txrx_ref_handle, 619 DP_MOD_ID_RX); 620 621 txrx_peer = 622 dp_rx_get_txrx_peer_and_vdev(soc, nbuf, peer_id, 623 &txrx_ref_handle, 624 pkt_capture_offload, 625 &vdev, 626 &rx_pdev, &dsf, 627 &old_tid); 628 if (qdf_unlikely(!txrx_peer) || qdf_unlikely(!vdev)) { 629 nbuf = next; 630 continue; 631 } 632 } 633 634 if (txrx_peer) { 635 QDF_NBUF_CB_DP_TRACE_PRINT(nbuf) = false; 636 qdf_dp_trace_set_track(nbuf, QDF_RX); 637 QDF_NBUF_CB_RX_DP_TRACE(nbuf) = 1; 638 QDF_NBUF_CB_RX_PACKET_TRACK(nbuf) = 639 QDF_NBUF_RX_PKT_DATA_TRACK; 640 } 641 642 rx_bufs_used++; 643 644 /* when hlos tid override is enabled, save tid in 645 * skb->priority 646 */ 647 if (qdf_unlikely(vdev->skip_sw_tid_classification & 648 DP_TXRX_HLOS_TID_OVERRIDE_ENABLED)) 649 qdf_nbuf_set_priority(nbuf, tid); 650 651 DP_RX_TID_SAVE(nbuf, tid); 652 if (qdf_unlikely(dsf) || qdf_unlikely(peer_ext_stats) || 653 dp_rx_pkt_tracepoints_enabled()) 654 qdf_nbuf_set_timestamp(nbuf); 655 656 if (qdf_likely(old_tid != tid)) { 657 tid_stats = 658 &rx_pdev->stats.tid_stats.tid_rx_stats[reo_ring_num][tid]; 659 old_tid = tid; 660 } 661 662 /* 663 * Check if DMA completed -- msdu_done is the last bit 664 * to be written 665 */ 666 if (qdf_likely(!qdf_nbuf_is_rx_chfrag_cont(nbuf))) { 667 if (qdf_unlikely(!hal_rx_attn_msdu_done_get_li( 668 rx_tlv_hdr))) { 669 dp_err_rl("MSDU DONE failure"); 670 DP_STATS_INC(soc, rx.err.msdu_done_fail, 1); 671 hal_rx_dump_pkt_tlvs(hal_soc, rx_tlv_hdr, 672 QDF_TRACE_LEVEL_INFO); 673 tid_stats->fail_cnt[MSDU_DONE_FAILURE]++; 674 qdf_assert(0); 675 dp_rx_nbuf_free(nbuf); 676 nbuf = next; 677 continue; 678 } else if (qdf_unlikely(hal_rx_attn_msdu_len_err_get_li( 679 rx_tlv_hdr))) { 680 DP_STATS_INC(soc, rx.err.msdu_len_err, 1); 681 dp_rx_nbuf_free(nbuf); 682 nbuf = next; 683 continue; 684 } 685 } 686 687 DP_HIST_PACKET_COUNT_INC(vdev->pdev->pdev_id); 688 /* 689 * First IF condition: 690 * 802.11 Fragmented pkts are reinjected to REO 691 * HW block as SG pkts and for these pkts we only 692 * need to pull the RX TLVS header length. 693 * Second IF condition: 694 * The below condition happens when an MSDU is spread 695 * across multiple buffers. This can happen in two cases 696 * 1. The nbuf size is smaller then the received msdu. 697 * ex: we have set the nbuf size to 2048 during 698 * nbuf_alloc. but we received an msdu which is 699 * 2304 bytes in size then this msdu is spread 700 * across 2 nbufs. 701 * 702 * 2. AMSDUs when RAW mode is enabled. 703 * ex: 1st MSDU is in 1st nbuf and 2nd MSDU is spread 704 * across 1st nbuf and 2nd nbuf and last MSDU is 705 * spread across 2nd nbuf and 3rd nbuf. 706 * 707 * for these scenarios let us create a skb frag_list and 708 * append these buffers till the last MSDU of the AMSDU 709 * Third condition: 710 * This is the most likely case, we receive 802.3 pkts 711 * decapsulated by HW, here we need to set the pkt length. 712 */ 713 hal_rx_msdu_metadata_get(hal_soc, rx_tlv_hdr, &msdu_metadata); 714 if (qdf_unlikely(qdf_nbuf_is_frag(nbuf))) { 715 bool is_mcbc, is_sa_vld, is_da_vld; 716 717 is_mcbc = hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc, 718 rx_tlv_hdr); 719 is_sa_vld = 720 hal_rx_msdu_end_sa_is_valid_get(soc->hal_soc, 721 rx_tlv_hdr); 722 is_da_vld = 723 hal_rx_msdu_end_da_is_valid_get(soc->hal_soc, 724 rx_tlv_hdr); 725 726 qdf_nbuf_set_da_mcbc(nbuf, is_mcbc); 727 qdf_nbuf_set_da_valid(nbuf, is_da_vld); 728 qdf_nbuf_set_sa_valid(nbuf, is_sa_vld); 729 730 qdf_nbuf_pull_head(nbuf, soc->rx_pkt_tlv_size); 731 } else if (qdf_nbuf_is_rx_chfrag_cont(nbuf)) { 732 msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf); 733 nbuf = dp_rx_sg_create(soc, nbuf); 734 next = nbuf->next; 735 736 if (qdf_nbuf_is_raw_frame(nbuf)) { 737 DP_STATS_INC(vdev->pdev, rx_raw_pkts, 1); 738 DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, 739 rx.raw, 1, 740 msdu_len); 741 } else { 742 dp_rx_nbuf_free(nbuf); 743 DP_STATS_INC(soc, rx.err.scatter_msdu, 1); 744 dp_info_rl("scatter msdu len %d, dropped", 745 msdu_len); 746 nbuf = next; 747 continue; 748 } 749 } else { 750 msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf); 751 pkt_len = msdu_len + 752 msdu_metadata.l3_hdr_pad + 753 soc->rx_pkt_tlv_size; 754 755 qdf_nbuf_set_pktlen(nbuf, pkt_len); 756 dp_rx_skip_tlvs(soc, nbuf, msdu_metadata.l3_hdr_pad); 757 } 758 759 dp_rx_send_pktlog(soc, rx_pdev, nbuf, QDF_TX_RX_STATUS_OK); 760 761 /* 762 * process frame for mulitpass phrase processing 763 */ 764 if (qdf_unlikely(vdev->multipass_en)) { 765 if (dp_rx_multipass_process(txrx_peer, nbuf, 766 tid) == false) { 767 DP_PEER_PER_PKT_STATS_INC(txrx_peer, 768 rx.multipass_rx_pkt_drop, 769 1); 770 dp_rx_nbuf_free(nbuf); 771 nbuf = next; 772 continue; 773 } 774 } 775 776 if (!dp_wds_rx_policy_check(rx_tlv_hdr, vdev, txrx_peer)) { 777 dp_rx_err("%pK: Policy Check Drop pkt", soc); 778 DP_PEER_PER_PKT_STATS_INC(txrx_peer, 779 rx.policy_check_drop, 1); 780 tid_stats->fail_cnt[POLICY_CHECK_DROP]++; 781 /* Drop & free packet */ 782 dp_rx_nbuf_free(nbuf); 783 /* Statistics */ 784 nbuf = next; 785 continue; 786 } 787 788 if (qdf_unlikely(txrx_peer && (txrx_peer->nawds_enabled) && 789 (qdf_nbuf_is_da_mcbc(nbuf)) && 790 (hal_rx_get_mpdu_mac_ad4_valid(soc->hal_soc, 791 rx_tlv_hdr) == 792 false))) { 793 tid_stats->fail_cnt[NAWDS_MCAST_DROP]++; 794 DP_PEER_PER_PKT_STATS_INC(txrx_peer, 795 rx.nawds_mcast_drop, 1); 796 dp_rx_nbuf_free(nbuf); 797 nbuf = next; 798 continue; 799 } 800 801 /* 802 * Drop non-EAPOL frames from unauthorized peer. 803 */ 804 if (qdf_likely(txrx_peer) && 805 qdf_unlikely(!txrx_peer->authorize) && 806 !qdf_nbuf_is_raw_frame(nbuf)) { 807 bool is_eapol = qdf_nbuf_is_ipv4_eapol_pkt(nbuf) || 808 qdf_nbuf_is_ipv4_wapi_pkt(nbuf); 809 810 if (!is_eapol) { 811 DP_PEER_PER_PKT_STATS_INC(txrx_peer, 812 rx.peer_unauth_rx_pkt_drop, 813 1); 814 dp_rx_nbuf_free(nbuf); 815 nbuf = next; 816 continue; 817 } 818 } 819 820 if (soc->process_rx_status) 821 dp_rx_cksum_offload(vdev->pdev, nbuf, rx_tlv_hdr); 822 823 /* Update the protocol tag in SKB based on CCE metadata */ 824 dp_rx_update_protocol_tag(soc, vdev, nbuf, rx_tlv_hdr, 825 reo_ring_num, false, true); 826 827 /* Update the flow tag in SKB based on FSE metadata */ 828 dp_rx_update_flow_tag(soc, vdev, nbuf, rx_tlv_hdr, true); 829 830 dp_rx_msdu_stats_update(soc, nbuf, rx_tlv_hdr, txrx_peer, 831 reo_ring_num, tid_stats); 832 833 if (qdf_unlikely(vdev->mesh_vdev)) { 834 if (dp_rx_filter_mesh_packets(vdev, nbuf, rx_tlv_hdr) 835 == QDF_STATUS_SUCCESS) { 836 dp_rx_info("%pK: mesh pkt filtered", soc); 837 tid_stats->fail_cnt[MESH_FILTER_DROP]++; 838 DP_STATS_INC(vdev->pdev, dropped.mesh_filter, 839 1); 840 841 dp_rx_nbuf_free(nbuf); 842 nbuf = next; 843 continue; 844 } 845 dp_rx_fill_mesh_stats(vdev, nbuf, rx_tlv_hdr, 846 txrx_peer); 847 } 848 849 if (qdf_likely(vdev->rx_decap_type == 850 htt_cmn_pkt_type_ethernet) && 851 qdf_likely(!vdev->mesh_vdev)) { 852 /* Due to HW issue, sometimes we see that the sa_idx 853 * and da_idx are invalid with sa_valid and da_valid 854 * bits set 855 * 856 * in this case we also see that value of 857 * sa_sw_peer_id is set as 0 858 * 859 * Drop the packet if sa_idx and da_idx OOB or 860 * sa_sw_peerid is 0 861 */ 862 if (!is_sa_da_idx_valid(max_ast, nbuf, 863 msdu_metadata)) { 864 dp_rx_nbuf_free(nbuf); 865 nbuf = next; 866 DP_STATS_INC(soc, rx.err.invalid_sa_da_idx, 1); 867 continue; 868 } 869 if (qdf_unlikely(dp_rx_mec_check_wrapper(soc, 870 txrx_peer, 871 rx_tlv_hdr, 872 nbuf))) { 873 /* this is a looped back MCBC pkt,drop it */ 874 DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, 875 rx.mec_drop, 1, 876 QDF_NBUF_CB_RX_PKT_LEN(nbuf)); 877 dp_rx_nbuf_free(nbuf); 878 nbuf = next; 879 continue; 880 } 881 /* WDS Source Port Learning */ 882 if (qdf_likely(vdev->wds_enabled)) 883 dp_rx_wds_srcport_learn(soc, 884 rx_tlv_hdr, 885 txrx_peer, 886 nbuf, 887 msdu_metadata); 888 889 /* Intrabss-fwd */ 890 if (dp_rx_check_ap_bridge(vdev)) 891 if (dp_rx_intrabss_fwd_li(soc, txrx_peer, 892 rx_tlv_hdr, 893 nbuf, 894 msdu_metadata, 895 tid_stats)) { 896 nbuf = next; 897 tid_stats->intrabss_cnt++; 898 continue; /* Get next desc */ 899 } 900 } 901 902 dp_rx_fill_gro_info(soc, rx_tlv_hdr, nbuf, &rx_ol_pkt_cnt); 903 904 dp_rx_mark_first_packet_after_wow_wakeup(vdev->pdev, rx_tlv_hdr, 905 nbuf); 906 907 dp_rx_update_stats(soc, nbuf); 908 909 dp_pkt_add_timestamp(txrx_peer->vdev, QDF_PKT_RX_DRIVER_ENTRY, 910 current_time, nbuf); 911 912 DP_RX_LIST_APPEND(deliver_list_head, 913 deliver_list_tail, 914 nbuf); 915 DP_PEER_STATS_FLAT_INC_PKT(txrx_peer, to_stack, 1, 916 QDF_NBUF_CB_RX_PKT_LEN(nbuf)); 917 if (qdf_unlikely(txrx_peer->in_twt)) 918 DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, 919 rx.to_stack_twt, 1, 920 QDF_NBUF_CB_RX_PKT_LEN(nbuf)); 921 922 tid_stats->delivered_to_stack++; 923 nbuf = next; 924 } 925 926 DP_RX_DELIVER_TO_STACK(soc, vdev, txrx_peer, peer_id, 927 pkt_capture_offload, 928 deliver_list_head, 929 deliver_list_tail); 930 931 if (qdf_likely(txrx_peer)) 932 dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_RX); 933 934 if (dp_rx_enable_eol_data_check(soc) && rx_bufs_used) { 935 if (quota) { 936 num_pending = 937 dp_rx_srng_get_num_pending(hal_soc, 938 hal_ring_hdl, 939 num_entries, 940 &near_full); 941 if (num_pending) { 942 DP_STATS_INC(soc, rx.hp_oos2, 1); 943 944 if (!hif_exec_should_yield(scn, intr_id)) 945 goto more_data; 946 947 if (qdf_unlikely(near_full)) { 948 DP_STATS_INC(soc, rx.near_full, 1); 949 goto more_data; 950 } 951 } 952 } 953 954 if (vdev && vdev->osif_fisa_flush) 955 vdev->osif_fisa_flush(soc, reo_ring_num); 956 957 if (vdev && vdev->osif_gro_flush && rx_ol_pkt_cnt) { 958 vdev->osif_gro_flush(vdev->osif_vdev, 959 reo_ring_num); 960 } 961 } 962 963 /* Update histogram statistics by looping through pdev's */ 964 DP_RX_HIST_STATS_PER_PDEV(); 965 966 return rx_bufs_used; /* Assume no scale factor for now */ 967 } 968 969 QDF_STATUS dp_rx_desc_pool_init_li(struct dp_soc *soc, 970 struct rx_desc_pool *rx_desc_pool, 971 uint32_t pool_id) 972 { 973 return dp_rx_desc_pool_init_generic(soc, rx_desc_pool, pool_id); 974 975 } 976 977 void dp_rx_desc_pool_deinit_li(struct dp_soc *soc, 978 struct rx_desc_pool *rx_desc_pool, 979 uint32_t pool_id) 980 { 981 } 982 983 QDF_STATUS dp_wbm_get_rx_desc_from_hal_desc_li( 984 struct dp_soc *soc, 985 void *ring_desc, 986 struct dp_rx_desc **r_rx_desc) 987 { 988 struct hal_buf_info buf_info = {0}; 989 hal_soc_handle_t hal_soc = soc->hal_soc; 990 991 /* only cookie and rbm will be valid in buf_info */ 992 hal_rx_buf_cookie_rbm_get(hal_soc, (uint32_t *)ring_desc, 993 &buf_info); 994 995 if (qdf_unlikely(buf_info.rbm != 996 HAL_RX_BUF_RBM_SW3_BM(soc->wbm_sw0_bm_id))) { 997 /* TODO */ 998 /* Call appropriate handler */ 999 DP_STATS_INC(soc, rx.err.invalid_rbm, 1); 1000 dp_rx_err("%pK: Invalid RBM %d", soc, buf_info.rbm); 1001 return QDF_STATUS_E_INVAL; 1002 } 1003 1004 if (!dp_rx_is_sw_cookie_valid(soc, buf_info.sw_cookie)) { 1005 dp_rx_err("invalid sw_cookie 0x%x", buf_info.sw_cookie); 1006 return QDF_STATUS_E_INVAL; 1007 } 1008 1009 *r_rx_desc = dp_rx_cookie_2_va_rxdma_buf(soc, buf_info.sw_cookie); 1010 1011 return QDF_STATUS_SUCCESS; 1012 } 1013