1 /* 2 * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved. 3 * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. 4 * 5 * Permission to use, copy, modify, and/or distribute this software for 6 * any purpose with or without fee is hereby granted, provided that the 7 * above copyright notice and this permission notice appear in all 8 * copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 11 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 12 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 13 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 14 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 15 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 16 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 17 * PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20 #include "cdp_txrx_cmn_struct.h" 21 #include "hal_hw_headers.h" 22 #include "dp_types.h" 23 #include "dp_rx.h" 24 #include "dp_li_rx.h" 25 #include "dp_peer.h" 26 #include "hal_rx.h" 27 #include "hal_li_rx.h" 28 #include "hal_api.h" 29 #include "hal_li_api.h" 30 #include "qdf_nbuf.h" 31 #ifdef MESH_MODE_SUPPORT 32 #include "if_meta_hdr.h" 33 #endif 34 #include "dp_internal.h" 35 #include "dp_ipa.h" 36 #ifdef WIFI_MONITOR_SUPPORT 37 #include <dp_mon.h> 38 #endif 39 #ifdef FEATURE_WDS 40 #include "dp_txrx_wds.h" 41 #endif 42 #include "dp_hist.h" 43 #include "dp_rx_buffer_pool.h" 44 #include "dp_li.h" 45 46 static inline 47 bool is_sa_da_idx_valid(uint32_t max_ast, 48 qdf_nbuf_t nbuf, struct hal_rx_msdu_metadata msdu_info) 49 { 50 if ((qdf_nbuf_is_sa_valid(nbuf) && (msdu_info.sa_idx > max_ast)) || 51 (!qdf_nbuf_is_da_mcbc(nbuf) && qdf_nbuf_is_da_valid(nbuf) && 52 (msdu_info.da_idx > max_ast))) 53 return false; 54 55 return true; 56 } 57 58 #ifndef QCA_HOST_MODE_WIFI_DISABLED 59 #if defined(FEATURE_MCL_REPEATER) && defined(FEATURE_MEC) 60 /** 61 * dp_rx_mec_check_wrapper() - wrapper to dp_rx_mcast_echo_check 62 * @soc: core DP main context 63 * @peer: dp peer handler 64 * @rx_tlv_hdr: start of the rx TLV header 65 * @nbuf: pkt buffer 66 * 67 * Return: bool (true if it is a looped back pkt else false) 68 */ 69 static inline bool dp_rx_mec_check_wrapper(struct dp_soc *soc, 70 struct dp_txrx_peer *txrx_peer, 71 uint8_t *rx_tlv_hdr, 72 qdf_nbuf_t nbuf) 73 { 74 return dp_rx_mcast_echo_check(soc, txrx_peer, rx_tlv_hdr, nbuf); 75 } 76 #else 77 static inline bool dp_rx_mec_check_wrapper(struct dp_soc *soc, 78 struct dp_txrx_peer *txrx_peer, 79 uint8_t *rx_tlv_hdr, 80 qdf_nbuf_t nbuf) 81 { 82 return false; 83 } 84 #endif 85 #endif 86 87 #ifndef QCA_HOST_MODE_WIFI_DISABLE 88 static bool 89 dp_rx_intrabss_ucast_check_li(struct dp_soc *soc, qdf_nbuf_t nbuf, 90 struct dp_txrx_peer *ta_txrx_peer, 91 struct hal_rx_msdu_metadata *msdu_metadata, 92 uint8_t *p_tx_vdev_id) 93 { 94 uint16_t da_peer_id; 95 struct dp_txrx_peer *da_peer; 96 struct dp_ast_entry *ast_entry; 97 dp_txrx_ref_handle txrx_ref_handle; 98 99 if (!qdf_nbuf_is_da_valid(nbuf) || qdf_nbuf_is_da_mcbc(nbuf)) 100 return false; 101 102 ast_entry = soc->ast_table[msdu_metadata->da_idx]; 103 if (!ast_entry) 104 return false; 105 106 if (ast_entry->type == CDP_TXRX_AST_TYPE_DA) { 107 ast_entry->is_active = TRUE; 108 return false; 109 } 110 111 da_peer_id = ast_entry->peer_id; 112 /* TA peer cannot be same as peer(DA) on which AST is present 113 * this indicates a change in topology and that AST entries 114 * are yet to be updated. 115 */ 116 if (da_peer_id == ta_txrx_peer->peer_id || 117 da_peer_id == HTT_INVALID_PEER) 118 return false; 119 120 da_peer = dp_txrx_peer_get_ref_by_id(soc, da_peer_id, 121 &txrx_ref_handle, DP_MOD_ID_RX); 122 if (!da_peer) 123 return false; 124 125 *p_tx_vdev_id = da_peer->vdev->vdev_id; 126 /* If the source or destination peer in the isolation 127 * list then dont forward instead push to bridge stack. 128 */ 129 if (dp_get_peer_isolation(ta_txrx_peer) || 130 dp_get_peer_isolation(da_peer) || 131 da_peer->vdev->vdev_id != ta_txrx_peer->vdev->vdev_id) { 132 dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_RX); 133 return false; 134 } 135 136 if (da_peer->bss_peer) { 137 dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_RX); 138 return false; 139 } 140 141 dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_RX); 142 return true; 143 } 144 145 /* 146 * dp_rx_intrabss_fwd_li() - Implements the Intra-BSS forwarding logic 147 * 148 * @soc: core txrx main context 149 * @ta_txrx_peer : source peer entry 150 * @rx_tlv_hdr : start address of rx tlvs 151 * @nbuf : nbuf that has to be intrabss forwarded 152 * 153 * Return: bool: true if it is forwarded else false 154 */ 155 static bool 156 dp_rx_intrabss_fwd_li(struct dp_soc *soc, 157 struct dp_txrx_peer *ta_txrx_peer, 158 uint8_t *rx_tlv_hdr, 159 qdf_nbuf_t nbuf, 160 struct hal_rx_msdu_metadata msdu_metadata, 161 struct cdp_tid_rx_stats *tid_stats) 162 { 163 uint8_t tx_vdev_id; 164 165 /* if it is a broadcast pkt (eg: ARP) and it is not its own 166 * source, then clone the pkt and send the cloned pkt for 167 * intra BSS forwarding and original pkt up the network stack 168 * Note: how do we handle multicast pkts. do we forward 169 * all multicast pkts as is or let a higher layer module 170 * like igmpsnoop decide whether to forward or not with 171 * Mcast enhancement. 172 */ 173 if (qdf_nbuf_is_da_mcbc(nbuf) && !ta_txrx_peer->bss_peer) 174 return dp_rx_intrabss_mcbc_fwd(soc, ta_txrx_peer, rx_tlv_hdr, 175 nbuf, tid_stats); 176 177 if (dp_rx_intrabss_eapol_drop_check(soc, ta_txrx_peer, rx_tlv_hdr, 178 nbuf)) 179 return true; 180 181 if (dp_rx_intrabss_ucast_check_li(soc, nbuf, ta_txrx_peer, 182 &msdu_metadata, &tx_vdev_id)) 183 return dp_rx_intrabss_ucast_fwd(soc, ta_txrx_peer, tx_vdev_id, 184 rx_tlv_hdr, nbuf, tid_stats); 185 186 return false; 187 } 188 #endif 189 190 /** 191 * dp_rx_process_li() - Brain of the Rx processing functionality 192 * Called from the bottom half (tasklet/NET_RX_SOFTIRQ) 193 * @int_ctx: per interrupt context 194 * @hal_ring: opaque pointer to the HAL Rx Ring, which will be serviced 195 * @reo_ring_num: ring number (0, 1, 2 or 3) of the reo ring. 196 * @quota: No. of units (packets) that can be serviced in one shot. 197 * 198 * This function implements the core of Rx functionality. This is 199 * expected to handle only non-error frames. 200 * 201 * Return: uint32_t: No. of elements processed 202 */ 203 uint32_t dp_rx_process_li(struct dp_intr *int_ctx, 204 hal_ring_handle_t hal_ring_hdl, uint8_t reo_ring_num, 205 uint32_t quota) 206 { 207 hal_ring_desc_t ring_desc; 208 hal_ring_desc_t last_prefetched_hw_desc; 209 hal_soc_handle_t hal_soc; 210 struct dp_rx_desc *rx_desc = NULL; 211 struct dp_rx_desc *last_prefetched_sw_desc = NULL; 212 qdf_nbuf_t nbuf, next; 213 bool near_full; 214 union dp_rx_desc_list_elem_t *head[MAX_PDEV_CNT]; 215 union dp_rx_desc_list_elem_t *tail[MAX_PDEV_CNT]; 216 uint32_t num_pending = 0; 217 uint32_t rx_bufs_used = 0, rx_buf_cookie; 218 uint16_t msdu_len = 0; 219 uint16_t peer_id; 220 uint8_t vdev_id; 221 struct dp_txrx_peer *txrx_peer; 222 dp_txrx_ref_handle txrx_ref_handle; 223 struct dp_vdev *vdev; 224 uint32_t pkt_len = 0; 225 struct hal_rx_mpdu_desc_info mpdu_desc_info; 226 struct hal_rx_msdu_desc_info msdu_desc_info; 227 enum hal_reo_error_status error; 228 uint32_t peer_mdata; 229 uint8_t *rx_tlv_hdr; 230 uint32_t rx_bufs_reaped[MAX_PDEV_CNT]; 231 uint8_t mac_id = 0; 232 struct dp_pdev *rx_pdev; 233 struct dp_srng *dp_rxdma_srng; 234 struct rx_desc_pool *rx_desc_pool; 235 struct dp_soc *soc = int_ctx->soc; 236 struct cdp_tid_rx_stats *tid_stats; 237 qdf_nbuf_t nbuf_head; 238 qdf_nbuf_t nbuf_tail; 239 qdf_nbuf_t deliver_list_head; 240 qdf_nbuf_t deliver_list_tail; 241 uint32_t num_rx_bufs_reaped = 0; 242 uint32_t intr_id; 243 struct hif_opaque_softc *scn; 244 int32_t tid = 0; 245 bool is_prev_msdu_last = true; 246 uint32_t rx_ol_pkt_cnt = 0; 247 uint32_t num_entries = 0; 248 struct hal_rx_msdu_metadata msdu_metadata; 249 QDF_STATUS status; 250 qdf_nbuf_t ebuf_head; 251 qdf_nbuf_t ebuf_tail; 252 uint8_t pkt_capture_offload = 0; 253 int max_reap_limit; 254 uint32_t old_tid; 255 uint32_t peer_ext_stats; 256 uint32_t dsf; 257 uint32_t max_ast; 258 uint64_t current_time = 0; 259 260 DP_HIST_INIT(); 261 262 qdf_assert_always(soc && hal_ring_hdl); 263 hal_soc = soc->hal_soc; 264 qdf_assert_always(hal_soc); 265 266 scn = soc->hif_handle; 267 hif_pm_runtime_mark_dp_rx_busy(scn); 268 intr_id = int_ctx->dp_intr_id; 269 num_entries = hal_srng_get_num_entries(hal_soc, hal_ring_hdl); 270 271 more_data: 272 /* reset local variables here to be re-used in the function */ 273 nbuf_head = NULL; 274 nbuf_tail = NULL; 275 deliver_list_head = NULL; 276 deliver_list_tail = NULL; 277 txrx_peer = NULL; 278 vdev = NULL; 279 num_rx_bufs_reaped = 0; 280 ebuf_head = NULL; 281 ebuf_tail = NULL; 282 max_reap_limit = dp_rx_get_loop_pkt_limit(soc); 283 284 qdf_mem_zero(rx_bufs_reaped, sizeof(rx_bufs_reaped)); 285 qdf_mem_zero(&mpdu_desc_info, sizeof(mpdu_desc_info)); 286 qdf_mem_zero(&msdu_desc_info, sizeof(msdu_desc_info)); 287 qdf_mem_zero(head, sizeof(head)); 288 qdf_mem_zero(tail, sizeof(tail)); 289 old_tid = 0xff; 290 dsf = 0; 291 peer_ext_stats = 0; 292 max_ast = 0; 293 rx_pdev = NULL; 294 tid_stats = NULL; 295 296 dp_pkt_get_timestamp(¤t_time); 297 298 if (qdf_unlikely(dp_rx_srng_access_start(int_ctx, soc, hal_ring_hdl))) { 299 /* 300 * Need API to convert from hal_ring pointer to 301 * Ring Type / Ring Id combo 302 */ 303 DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1); 304 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 305 FL("HAL RING Access Failed -- %pK"), hal_ring_hdl); 306 goto done; 307 } 308 309 if (!num_pending) 310 num_pending = hal_srng_dst_num_valid(hal_soc, hal_ring_hdl, 0); 311 312 dp_srng_dst_inv_cached_descs(soc, hal_ring_hdl, num_pending); 313 314 if (num_pending > quota) 315 num_pending = quota; 316 317 last_prefetched_hw_desc = dp_srng_dst_prefetch(hal_soc, hal_ring_hdl, 318 num_pending); 319 320 peer_ext_stats = wlan_cfg_is_peer_ext_stats_enabled(soc->wlan_cfg_ctx); 321 max_ast = wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx); 322 /* 323 * start reaping the buffers from reo ring and queue 324 * them in per vdev queue. 325 * Process the received pkts in a different per vdev loop. 326 */ 327 while (qdf_likely(num_pending)) { 328 ring_desc = dp_srng_dst_get_next(soc, hal_ring_hdl); 329 330 if (qdf_unlikely(!ring_desc)) 331 break; 332 333 error = HAL_RX_ERROR_STATUS_GET(ring_desc); 334 if (qdf_unlikely(error == HAL_REO_ERROR_DETECTED)) { 335 dp_rx_err("%pK: HAL RING 0x%pK:error %d", 336 soc, hal_ring_hdl, error); 337 DP_STATS_INC(soc, rx.err.hal_reo_error[reo_ring_num], 338 1); 339 /* Don't know how to deal with this -- assert */ 340 qdf_assert(0); 341 } 342 343 dp_rx_ring_record_entry(soc, reo_ring_num, ring_desc); 344 rx_buf_cookie = HAL_RX_REO_BUF_COOKIE_GET(ring_desc); 345 status = dp_rx_cookie_check_and_invalidate(ring_desc); 346 if (qdf_unlikely(QDF_IS_STATUS_ERROR(status))) { 347 DP_STATS_INC(soc, rx.err.stale_cookie, 1); 348 break; 349 } 350 351 rx_desc = dp_rx_cookie_2_va_rxdma_buf(soc, rx_buf_cookie); 352 status = dp_rx_desc_sanity(soc, hal_soc, hal_ring_hdl, 353 ring_desc, rx_desc); 354 if (QDF_IS_STATUS_ERROR(status)) { 355 if (qdf_unlikely(rx_desc && rx_desc->nbuf)) { 356 qdf_assert_always(!rx_desc->unmapped); 357 dp_rx_nbuf_unmap(soc, rx_desc, reo_ring_num); 358 rx_desc->unmapped = 1; 359 dp_rx_buffer_pool_nbuf_free(soc, rx_desc->nbuf, 360 rx_desc->pool_id); 361 dp_rx_add_to_free_desc_list( 362 &head[rx_desc->pool_id], 363 &tail[rx_desc->pool_id], 364 rx_desc); 365 } 366 continue; 367 } 368 369 /* 370 * this is a unlikely scenario where the host is reaping 371 * a descriptor which it already reaped just a while ago 372 * but is yet to replenish it back to HW. 373 * In this case host will dump the last 128 descriptors 374 * including the software descriptor rx_desc and assert. 375 */ 376 377 if (qdf_unlikely(!rx_desc->in_use)) { 378 DP_STATS_INC(soc, rx.err.hal_reo_dest_dup, 1); 379 dp_info_rl("Reaping rx_desc not in use!"); 380 dp_rx_dump_info_and_assert(soc, hal_ring_hdl, 381 ring_desc, rx_desc); 382 /* ignore duplicate RX desc and continue to process */ 383 /* Pop out the descriptor */ 384 continue; 385 } 386 387 status = dp_rx_desc_nbuf_sanity_check(soc, ring_desc, rx_desc); 388 if (qdf_unlikely(QDF_IS_STATUS_ERROR(status))) { 389 DP_STATS_INC(soc, rx.err.nbuf_sanity_fail, 1); 390 dp_info_rl("Nbuf sanity check failure!"); 391 dp_rx_dump_info_and_assert(soc, hal_ring_hdl, 392 ring_desc, rx_desc); 393 rx_desc->in_err_state = 1; 394 continue; 395 } 396 397 if (qdf_unlikely(!dp_rx_desc_check_magic(rx_desc))) { 398 dp_err("Invalid rx_desc cookie=%d", rx_buf_cookie); 399 DP_STATS_INC(soc, rx.err.rx_desc_invalid_magic, 1); 400 dp_rx_dump_info_and_assert(soc, hal_ring_hdl, 401 ring_desc, rx_desc); 402 } 403 404 /* Get MPDU DESC info */ 405 hal_rx_mpdu_desc_info_get_li(ring_desc, &mpdu_desc_info); 406 407 /* Get MSDU DESC info */ 408 hal_rx_msdu_desc_info_get_li(ring_desc, &msdu_desc_info); 409 410 if (qdf_unlikely(msdu_desc_info.msdu_flags & 411 HAL_MSDU_F_MSDU_CONTINUATION)) { 412 /* previous msdu has end bit set, so current one is 413 * the new MPDU 414 */ 415 if (is_prev_msdu_last) { 416 /* For new MPDU check if we can read complete 417 * MPDU by comparing the number of buffers 418 * available and number of buffers needed to 419 * reap this MPDU 420 */ 421 if ((msdu_desc_info.msdu_len / 422 (RX_DATA_BUFFER_SIZE - 423 soc->rx_pkt_tlv_size) + 1) > 424 num_pending) { 425 DP_STATS_INC(soc, 426 rx.msdu_scatter_wait_break, 427 1); 428 dp_rx_cookie_reset_invalid_bit( 429 ring_desc); 430 /* As we are going to break out of the 431 * loop because of unavailability of 432 * descs to form complete SG, we need to 433 * reset the TP in the REO destination 434 * ring. 435 */ 436 hal_srng_dst_dec_tp(hal_soc, 437 hal_ring_hdl); 438 break; 439 } 440 is_prev_msdu_last = false; 441 } 442 } 443 444 if (mpdu_desc_info.mpdu_flags & HAL_MPDU_F_RETRY_BIT) 445 qdf_nbuf_set_rx_retry_flag(rx_desc->nbuf, 1); 446 447 if (qdf_unlikely(mpdu_desc_info.mpdu_flags & 448 HAL_MPDU_F_RAW_AMPDU)) 449 qdf_nbuf_set_raw_frame(rx_desc->nbuf, 1); 450 451 if (!is_prev_msdu_last && 452 msdu_desc_info.msdu_flags & HAL_MSDU_F_LAST_MSDU_IN_MPDU) 453 is_prev_msdu_last = true; 454 455 rx_bufs_reaped[rx_desc->pool_id]++; 456 peer_mdata = mpdu_desc_info.peer_meta_data; 457 QDF_NBUF_CB_RX_PEER_ID(rx_desc->nbuf) = 458 dp_rx_peer_metadata_peer_id_get_li(soc, peer_mdata); 459 QDF_NBUF_CB_RX_VDEV_ID(rx_desc->nbuf) = 460 DP_PEER_METADATA_VDEV_ID_GET_LI(peer_mdata); 461 462 /* to indicate whether this msdu is rx offload */ 463 pkt_capture_offload = 464 DP_PEER_METADATA_OFFLOAD_GET_LI(peer_mdata); 465 466 /* 467 * save msdu flags first, last and continuation msdu in 468 * nbuf->cb, also save mcbc, is_da_valid, is_sa_valid and 469 * length to nbuf->cb. This ensures the info required for 470 * per pkt processing is always in the same cache line. 471 * This helps in improving throughput for smaller pkt 472 * sizes. 473 */ 474 if (msdu_desc_info.msdu_flags & HAL_MSDU_F_FIRST_MSDU_IN_MPDU) 475 qdf_nbuf_set_rx_chfrag_start(rx_desc->nbuf, 1); 476 477 if (msdu_desc_info.msdu_flags & HAL_MSDU_F_MSDU_CONTINUATION) 478 qdf_nbuf_set_rx_chfrag_cont(rx_desc->nbuf, 1); 479 480 if (msdu_desc_info.msdu_flags & HAL_MSDU_F_LAST_MSDU_IN_MPDU) 481 qdf_nbuf_set_rx_chfrag_end(rx_desc->nbuf, 1); 482 483 if (msdu_desc_info.msdu_flags & HAL_MSDU_F_DA_IS_MCBC) 484 qdf_nbuf_set_da_mcbc(rx_desc->nbuf, 1); 485 486 if (msdu_desc_info.msdu_flags & HAL_MSDU_F_DA_IS_VALID) 487 qdf_nbuf_set_da_valid(rx_desc->nbuf, 1); 488 489 if (msdu_desc_info.msdu_flags & HAL_MSDU_F_SA_IS_VALID) 490 qdf_nbuf_set_sa_valid(rx_desc->nbuf, 1); 491 492 qdf_nbuf_set_tid_val(rx_desc->nbuf, 493 HAL_RX_REO_QUEUE_NUMBER_GET(ring_desc)); 494 495 /* set reo dest indication */ 496 qdf_nbuf_set_rx_reo_dest_ind_or_sw_excpt( 497 rx_desc->nbuf, 498 HAL_RX_REO_MSDU_REO_DST_IND_GET(ring_desc)); 499 500 QDF_NBUF_CB_RX_PKT_LEN(rx_desc->nbuf) = msdu_desc_info.msdu_len; 501 502 QDF_NBUF_CB_RX_CTX_ID(rx_desc->nbuf) = reo_ring_num; 503 504 /* 505 * move unmap after scattered msdu waiting break logic 506 * in case double skb unmap happened. 507 */ 508 dp_rx_nbuf_unmap(soc, rx_desc, reo_ring_num); 509 rx_desc->unmapped = 1; 510 DP_RX_PROCESS_NBUF(soc, nbuf_head, nbuf_tail, ebuf_head, 511 ebuf_tail, rx_desc); 512 /* 513 * if continuation bit is set then we have MSDU spread 514 * across multiple buffers, let us not decrement quota 515 * till we reap all buffers of that MSDU. 516 */ 517 if (qdf_likely(!qdf_nbuf_is_rx_chfrag_cont(rx_desc->nbuf))) { 518 quota -= 1; 519 num_pending -= 1; 520 } 521 522 dp_rx_add_to_free_desc_list(&head[rx_desc->pool_id], 523 &tail[rx_desc->pool_id], rx_desc); 524 num_rx_bufs_reaped++; 525 526 dp_rx_prefetch_hw_sw_nbuf_desc(soc, hal_soc, num_pending, 527 hal_ring_hdl, 528 &last_prefetched_hw_desc, 529 &last_prefetched_sw_desc); 530 531 /* 532 * only if complete msdu is received for scatter case, 533 * then allow break. 534 */ 535 if (is_prev_msdu_last && 536 dp_rx_reap_loop_pkt_limit_hit(soc, num_rx_bufs_reaped, 537 max_reap_limit)) 538 break; 539 } 540 done: 541 dp_rx_srng_access_end(int_ctx, soc, hal_ring_hdl); 542 543 dp_rx_per_core_stats_update(soc, reo_ring_num, num_rx_bufs_reaped); 544 545 for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) { 546 /* 547 * continue with next mac_id if no pkts were reaped 548 * from that pool 549 */ 550 if (!rx_bufs_reaped[mac_id]) 551 continue; 552 553 dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_id]; 554 555 rx_desc_pool = &soc->rx_desc_buf[mac_id]; 556 557 dp_rx_buffers_replenish_simple(soc, mac_id, dp_rxdma_srng, 558 rx_desc_pool, 559 rx_bufs_reaped[mac_id], 560 &head[mac_id], &tail[mac_id]); 561 } 562 563 dp_verbose_debug("replenished %u\n", rx_bufs_reaped[0]); 564 /* Peer can be NULL is case of LFR */ 565 if (qdf_likely(txrx_peer)) 566 vdev = NULL; 567 568 /* 569 * BIG loop where each nbuf is dequeued from global queue, 570 * processed and queued back on a per vdev basis. These nbufs 571 * are sent to stack as and when we run out of nbufs 572 * or a new nbuf dequeued from global queue has a different 573 * vdev when compared to previous nbuf. 574 */ 575 nbuf = nbuf_head; 576 while (nbuf) { 577 next = nbuf->next; 578 dp_rx_prefetch_nbuf_data(nbuf, next); 579 580 if (qdf_unlikely(dp_rx_is_raw_frame_dropped(nbuf))) { 581 nbuf = next; 582 DP_STATS_INC(soc, rx.err.raw_frm_drop, 1); 583 continue; 584 } 585 586 rx_tlv_hdr = qdf_nbuf_data(nbuf); 587 vdev_id = QDF_NBUF_CB_RX_VDEV_ID(nbuf); 588 peer_id = QDF_NBUF_CB_RX_PEER_ID(nbuf); 589 590 if (dp_rx_is_list_ready(deliver_list_head, vdev, txrx_peer, 591 peer_id, vdev_id)) { 592 dp_rx_deliver_to_stack(soc, vdev, txrx_peer, 593 deliver_list_head, 594 deliver_list_tail); 595 deliver_list_head = NULL; 596 deliver_list_tail = NULL; 597 } 598 599 /* Get TID from struct cb->tid_val, save to tid */ 600 if (qdf_nbuf_is_rx_chfrag_start(nbuf)) { 601 tid = qdf_nbuf_get_tid_val(nbuf); 602 if (tid >= CDP_MAX_DATA_TIDS) { 603 DP_STATS_INC(soc, rx.err.rx_invalid_tid_err, 1); 604 dp_rx_nbuf_free(nbuf); 605 nbuf = next; 606 continue; 607 } 608 } 609 610 if (qdf_unlikely(!txrx_peer)) { 611 txrx_peer = 612 dp_rx_get_txrx_peer_and_vdev(soc, nbuf, peer_id, 613 &txrx_ref_handle, 614 pkt_capture_offload, 615 &vdev, 616 &rx_pdev, &dsf, 617 &old_tid); 618 if (qdf_unlikely(!txrx_peer) || qdf_unlikely(!vdev)) { 619 nbuf = next; 620 continue; 621 } 622 } else if (txrx_peer && txrx_peer->peer_id != peer_id) { 623 dp_txrx_peer_unref_delete(txrx_ref_handle, 624 DP_MOD_ID_RX); 625 626 txrx_peer = 627 dp_rx_get_txrx_peer_and_vdev(soc, nbuf, peer_id, 628 &txrx_ref_handle, 629 pkt_capture_offload, 630 &vdev, 631 &rx_pdev, &dsf, 632 &old_tid); 633 if (qdf_unlikely(!txrx_peer) || qdf_unlikely(!vdev)) { 634 nbuf = next; 635 continue; 636 } 637 } 638 639 if (txrx_peer) { 640 QDF_NBUF_CB_DP_TRACE_PRINT(nbuf) = false; 641 qdf_dp_trace_set_track(nbuf, QDF_RX); 642 QDF_NBUF_CB_RX_DP_TRACE(nbuf) = 1; 643 QDF_NBUF_CB_RX_PACKET_TRACK(nbuf) = 644 QDF_NBUF_RX_PKT_DATA_TRACK; 645 } 646 647 rx_bufs_used++; 648 649 /* when hlos tid override is enabled, save tid in 650 * skb->priority 651 */ 652 if (qdf_unlikely(vdev->skip_sw_tid_classification & 653 DP_TXRX_HLOS_TID_OVERRIDE_ENABLED)) 654 qdf_nbuf_set_priority(nbuf, tid); 655 656 DP_RX_TID_SAVE(nbuf, tid); 657 if (qdf_unlikely(dsf) || qdf_unlikely(peer_ext_stats) || 658 dp_rx_pkt_tracepoints_enabled()) 659 qdf_nbuf_set_timestamp(nbuf); 660 661 if (qdf_likely(old_tid != tid)) { 662 tid_stats = 663 &rx_pdev->stats.tid_stats.tid_rx_stats[reo_ring_num][tid]; 664 old_tid = tid; 665 } 666 667 /* 668 * Check if DMA completed -- msdu_done is the last bit 669 * to be written 670 */ 671 if (qdf_likely(!qdf_nbuf_is_rx_chfrag_cont(nbuf))) { 672 if (qdf_unlikely(!hal_rx_attn_msdu_done_get_li( 673 rx_tlv_hdr))) { 674 dp_err_rl("MSDU DONE failure"); 675 DP_STATS_INC(soc, rx.err.msdu_done_fail, 1); 676 hal_rx_dump_pkt_tlvs(hal_soc, rx_tlv_hdr, 677 QDF_TRACE_LEVEL_INFO); 678 tid_stats->fail_cnt[MSDU_DONE_FAILURE]++; 679 qdf_assert(0); 680 dp_rx_nbuf_free(nbuf); 681 nbuf = next; 682 continue; 683 } else if (qdf_unlikely(hal_rx_attn_msdu_len_err_get_li( 684 rx_tlv_hdr))) { 685 DP_STATS_INC(soc, rx.err.msdu_len_err, 1); 686 dp_rx_nbuf_free(nbuf); 687 nbuf = next; 688 continue; 689 } 690 } 691 692 DP_HIST_PACKET_COUNT_INC(vdev->pdev->pdev_id); 693 /* 694 * First IF condition: 695 * 802.11 Fragmented pkts are reinjected to REO 696 * HW block as SG pkts and for these pkts we only 697 * need to pull the RX TLVS header length. 698 * Second IF condition: 699 * The below condition happens when an MSDU is spread 700 * across multiple buffers. This can happen in two cases 701 * 1. The nbuf size is smaller then the received msdu. 702 * ex: we have set the nbuf size to 2048 during 703 * nbuf_alloc. but we received an msdu which is 704 * 2304 bytes in size then this msdu is spread 705 * across 2 nbufs. 706 * 707 * 2. AMSDUs when RAW mode is enabled. 708 * ex: 1st MSDU is in 1st nbuf and 2nd MSDU is spread 709 * across 1st nbuf and 2nd nbuf and last MSDU is 710 * spread across 2nd nbuf and 3rd nbuf. 711 * 712 * for these scenarios let us create a skb frag_list and 713 * append these buffers till the last MSDU of the AMSDU 714 * Third condition: 715 * This is the most likely case, we receive 802.3 pkts 716 * decapsulated by HW, here we need to set the pkt length. 717 */ 718 hal_rx_msdu_metadata_get(hal_soc, rx_tlv_hdr, &msdu_metadata); 719 if (qdf_unlikely(qdf_nbuf_is_frag(nbuf))) { 720 bool is_mcbc, is_sa_vld, is_da_vld; 721 722 is_mcbc = hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc, 723 rx_tlv_hdr); 724 is_sa_vld = 725 hal_rx_msdu_end_sa_is_valid_get(soc->hal_soc, 726 rx_tlv_hdr); 727 is_da_vld = 728 hal_rx_msdu_end_da_is_valid_get(soc->hal_soc, 729 rx_tlv_hdr); 730 731 qdf_nbuf_set_da_mcbc(nbuf, is_mcbc); 732 qdf_nbuf_set_da_valid(nbuf, is_da_vld); 733 qdf_nbuf_set_sa_valid(nbuf, is_sa_vld); 734 735 qdf_nbuf_pull_head(nbuf, soc->rx_pkt_tlv_size); 736 } else if (qdf_nbuf_is_rx_chfrag_cont(nbuf)) { 737 msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf); 738 nbuf = dp_rx_sg_create(soc, nbuf); 739 next = nbuf->next; 740 741 if (qdf_nbuf_is_raw_frame(nbuf)) { 742 DP_STATS_INC(vdev->pdev, rx_raw_pkts, 1); 743 DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, 744 rx.raw, 1, 745 msdu_len); 746 } else { 747 dp_rx_nbuf_free(nbuf); 748 DP_STATS_INC(soc, rx.err.scatter_msdu, 1); 749 dp_info_rl("scatter msdu len %d, dropped", 750 msdu_len); 751 nbuf = next; 752 continue; 753 } 754 } else { 755 msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf); 756 pkt_len = msdu_len + 757 msdu_metadata.l3_hdr_pad + 758 soc->rx_pkt_tlv_size; 759 760 qdf_nbuf_set_pktlen(nbuf, pkt_len); 761 dp_rx_skip_tlvs(soc, nbuf, msdu_metadata.l3_hdr_pad); 762 } 763 764 /* 765 * process frame for mulitpass phrase processing 766 */ 767 if (qdf_unlikely(vdev->multipass_en)) { 768 if (dp_rx_multipass_process(txrx_peer, nbuf, 769 tid) == false) { 770 DP_PEER_PER_PKT_STATS_INC(txrx_peer, 771 rx.multipass_rx_pkt_drop, 772 1); 773 dp_rx_nbuf_free(nbuf); 774 nbuf = next; 775 continue; 776 } 777 } 778 779 if (!dp_wds_rx_policy_check(rx_tlv_hdr, vdev, txrx_peer)) { 780 dp_rx_err("%pK: Policy Check Drop pkt", soc); 781 DP_PEER_PER_PKT_STATS_INC(txrx_peer, 782 rx.policy_check_drop, 1); 783 tid_stats->fail_cnt[POLICY_CHECK_DROP]++; 784 /* Drop & free packet */ 785 dp_rx_nbuf_free(nbuf); 786 /* Statistics */ 787 nbuf = next; 788 continue; 789 } 790 791 if (qdf_unlikely(txrx_peer && (txrx_peer->nawds_enabled) && 792 (qdf_nbuf_is_da_mcbc(nbuf)) && 793 (hal_rx_get_mpdu_mac_ad4_valid(soc->hal_soc, 794 rx_tlv_hdr) == 795 false))) { 796 tid_stats->fail_cnt[NAWDS_MCAST_DROP]++; 797 DP_PEER_PER_PKT_STATS_INC(txrx_peer, 798 rx.nawds_mcast_drop, 1); 799 dp_rx_nbuf_free(nbuf); 800 nbuf = next; 801 continue; 802 } 803 804 /* 805 * Drop non-EAPOL frames from unauthorized peer. 806 */ 807 if (qdf_likely(txrx_peer) && 808 qdf_unlikely(!txrx_peer->authorize) && 809 !qdf_nbuf_is_raw_frame(nbuf)) { 810 bool is_eapol = qdf_nbuf_is_ipv4_eapol_pkt(nbuf) || 811 qdf_nbuf_is_ipv4_wapi_pkt(nbuf); 812 813 if (!is_eapol) { 814 DP_PEER_PER_PKT_STATS_INC(txrx_peer, 815 rx.peer_unauth_rx_pkt_drop, 816 1); 817 dp_rx_nbuf_free(nbuf); 818 nbuf = next; 819 continue; 820 } 821 } 822 823 if (soc->process_rx_status) 824 dp_rx_cksum_offload(vdev->pdev, nbuf, rx_tlv_hdr); 825 826 /* Update the protocol tag in SKB based on CCE metadata */ 827 dp_rx_update_protocol_tag(soc, vdev, nbuf, rx_tlv_hdr, 828 reo_ring_num, false, true); 829 830 /* Update the flow tag in SKB based on FSE metadata */ 831 dp_rx_update_flow_tag(soc, vdev, nbuf, rx_tlv_hdr, true); 832 833 dp_rx_msdu_stats_update(soc, nbuf, rx_tlv_hdr, txrx_peer, 834 reo_ring_num, tid_stats); 835 836 if (qdf_unlikely(vdev->mesh_vdev)) { 837 if (dp_rx_filter_mesh_packets(vdev, nbuf, rx_tlv_hdr) 838 == QDF_STATUS_SUCCESS) { 839 dp_rx_info("%pK: mesh pkt filtered", soc); 840 tid_stats->fail_cnt[MESH_FILTER_DROP]++; 841 DP_STATS_INC(vdev->pdev, dropped.mesh_filter, 842 1); 843 844 dp_rx_nbuf_free(nbuf); 845 nbuf = next; 846 continue; 847 } 848 dp_rx_fill_mesh_stats(vdev, nbuf, rx_tlv_hdr, 849 txrx_peer); 850 } 851 852 if (qdf_likely(vdev->rx_decap_type == 853 htt_cmn_pkt_type_ethernet) && 854 qdf_likely(!vdev->mesh_vdev)) { 855 /* Due to HW issue, sometimes we see that the sa_idx 856 * and da_idx are invalid with sa_valid and da_valid 857 * bits set 858 * 859 * in this case we also see that value of 860 * sa_sw_peer_id is set as 0 861 * 862 * Drop the packet if sa_idx and da_idx OOB or 863 * sa_sw_peerid is 0 864 */ 865 if (!is_sa_da_idx_valid(max_ast, nbuf, 866 msdu_metadata)) { 867 dp_rx_nbuf_free(nbuf); 868 nbuf = next; 869 DP_STATS_INC(soc, rx.err.invalid_sa_da_idx, 1); 870 continue; 871 } 872 if (qdf_unlikely(dp_rx_mec_check_wrapper(soc, 873 txrx_peer, 874 rx_tlv_hdr, 875 nbuf))) { 876 /* this is a looped back MCBC pkt,drop it */ 877 DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, 878 rx.mec_drop, 1, 879 QDF_NBUF_CB_RX_PKT_LEN(nbuf)); 880 dp_rx_nbuf_free(nbuf); 881 nbuf = next; 882 continue; 883 } 884 /* WDS Source Port Learning */ 885 if (qdf_likely(vdev->wds_enabled)) 886 dp_rx_wds_srcport_learn(soc, 887 rx_tlv_hdr, 888 txrx_peer, 889 nbuf, 890 msdu_metadata); 891 892 /* Intrabss-fwd */ 893 if (dp_rx_check_ap_bridge(vdev)) 894 if (dp_rx_intrabss_fwd_li(soc, txrx_peer, 895 rx_tlv_hdr, 896 nbuf, 897 msdu_metadata, 898 tid_stats)) { 899 nbuf = next; 900 tid_stats->intrabss_cnt++; 901 continue; /* Get next desc */ 902 } 903 } 904 905 dp_rx_fill_gro_info(soc, rx_tlv_hdr, nbuf, &rx_ol_pkt_cnt); 906 907 dp_rx_update_stats(soc, nbuf); 908 909 dp_pkt_add_timestamp(txrx_peer->vdev, QDF_PKT_RX_DRIVER_ENTRY, 910 current_time, nbuf); 911 912 DP_RX_LIST_APPEND(deliver_list_head, 913 deliver_list_tail, 914 nbuf); 915 DP_PEER_STATS_FLAT_INC_PKT(txrx_peer, to_stack, 1, 916 QDF_NBUF_CB_RX_PKT_LEN(nbuf)); 917 if (qdf_unlikely(txrx_peer->in_twt)) 918 DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, 919 rx.to_stack_twt, 1, 920 QDF_NBUF_CB_RX_PKT_LEN(nbuf)); 921 922 tid_stats->delivered_to_stack++; 923 nbuf = next; 924 } 925 926 if (qdf_likely(deliver_list_head)) { 927 if (qdf_likely(txrx_peer)) { 928 dp_rx_deliver_to_pkt_capture(soc, vdev->pdev, peer_id, 929 pkt_capture_offload, 930 deliver_list_head); 931 if (!pkt_capture_offload) 932 dp_rx_deliver_to_stack(soc, vdev, txrx_peer, 933 deliver_list_head, 934 deliver_list_tail); 935 } else { 936 nbuf = deliver_list_head; 937 while (nbuf) { 938 next = nbuf->next; 939 nbuf->next = NULL; 940 dp_rx_deliver_to_stack_no_peer(soc, nbuf); 941 nbuf = next; 942 } 943 } 944 } 945 946 if (qdf_likely(txrx_peer)) 947 dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_RX); 948 949 if (dp_rx_enable_eol_data_check(soc) && rx_bufs_used) { 950 if (quota) { 951 num_pending = 952 dp_rx_srng_get_num_pending(hal_soc, 953 hal_ring_hdl, 954 num_entries, 955 &near_full); 956 if (num_pending) { 957 DP_STATS_INC(soc, rx.hp_oos2, 1); 958 959 if (!hif_exec_should_yield(scn, intr_id)) 960 goto more_data; 961 962 if (qdf_unlikely(near_full)) { 963 DP_STATS_INC(soc, rx.near_full, 1); 964 goto more_data; 965 } 966 } 967 } 968 969 if (vdev && vdev->osif_fisa_flush) 970 vdev->osif_fisa_flush(soc, reo_ring_num); 971 972 if (vdev && vdev->osif_gro_flush && rx_ol_pkt_cnt) { 973 vdev->osif_gro_flush(vdev->osif_vdev, 974 reo_ring_num); 975 } 976 } 977 978 /* Update histogram statistics by looping through pdev's */ 979 DP_RX_HIST_STATS_PER_PDEV(); 980 981 return rx_bufs_used; /* Assume no scale factor for now */ 982 } 983 984 QDF_STATUS dp_rx_desc_pool_init_li(struct dp_soc *soc, 985 struct rx_desc_pool *rx_desc_pool, 986 uint32_t pool_id) 987 { 988 return dp_rx_desc_pool_init_generic(soc, rx_desc_pool, pool_id); 989 990 } 991 992 void dp_rx_desc_pool_deinit_li(struct dp_soc *soc, 993 struct rx_desc_pool *rx_desc_pool, 994 uint32_t pool_id) 995 { 996 } 997 998 QDF_STATUS dp_wbm_get_rx_desc_from_hal_desc_li( 999 struct dp_soc *soc, 1000 void *ring_desc, 1001 struct dp_rx_desc **r_rx_desc) 1002 { 1003 struct hal_buf_info buf_info = {0}; 1004 hal_soc_handle_t hal_soc = soc->hal_soc; 1005 1006 /* only cookie and rbm will be valid in buf_info */ 1007 hal_rx_buf_cookie_rbm_get(hal_soc, (uint32_t *)ring_desc, 1008 &buf_info); 1009 1010 if (qdf_unlikely(buf_info.rbm != 1011 HAL_RX_BUF_RBM_SW3_BM(soc->wbm_sw0_bm_id))) { 1012 /* TODO */ 1013 /* Call appropriate handler */ 1014 DP_STATS_INC(soc, rx.err.invalid_rbm, 1); 1015 dp_rx_err("%pK: Invalid RBM %d", soc, buf_info.rbm); 1016 return QDF_STATUS_E_INVAL; 1017 } 1018 1019 *r_rx_desc = dp_rx_cookie_2_va_rxdma_buf(soc, buf_info.sw_cookie); 1020 1021 return QDF_STATUS_SUCCESS; 1022 } 1023