1 /* 2 * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for 5 * any purpose with or without fee is hereby granted, provided that the 6 * above copyright notice and this permission notice appear in all 7 * copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 16 * PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 #include "cdp_txrx_cmn_struct.h" 20 #include "hal_hw_headers.h" 21 #include "dp_types.h" 22 #include "dp_rx.h" 23 #include "dp_li_rx.h" 24 #include "dp_peer.h" 25 #include "hal_rx.h" 26 #include "hal_li_rx.h" 27 #include "hal_api.h" 28 #include "hal_li_api.h" 29 #include "qdf_nbuf.h" 30 #ifdef MESH_MODE_SUPPORT 31 #include "if_meta_hdr.h" 32 #endif 33 #include "dp_internal.h" 34 #include "dp_ipa.h" 35 #ifdef WIFI_MONITOR_SUPPORT 36 #include <dp_mon.h> 37 #endif 38 #ifdef FEATURE_WDS 39 #include "dp_txrx_wds.h" 40 #endif 41 #include "dp_hist.h" 42 #include "dp_rx_buffer_pool.h" 43 44 static inline 45 bool is_sa_da_idx_valid(struct dp_soc *soc, uint8_t *rx_tlv_hdr, 46 qdf_nbuf_t nbuf, struct hal_rx_msdu_metadata msdu_info) 47 { 48 if ((qdf_nbuf_is_sa_valid(nbuf) && 49 (msdu_info.sa_idx > wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx))) || 50 (!qdf_nbuf_is_da_mcbc(nbuf) && qdf_nbuf_is_da_valid(nbuf) && 51 (msdu_info.da_idx > wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx)))) 52 return false; 53 54 return true; 55 } 56 57 #ifndef QCA_HOST_MODE_WIFI_DISABLED 58 #if defined(FEATURE_MCL_REPEATER) && defined(FEATURE_MEC) 59 /** 60 * dp_rx_mec_check_wrapper() - wrapper to dp_rx_mcast_echo_check 61 * @soc: core DP main context 62 * @peer: dp peer handler 63 * @rx_tlv_hdr: start of the rx TLV header 64 * @nbuf: pkt buffer 65 * 66 * Return: bool (true if it is a looped back pkt else false) 67 */ 68 static inline bool dp_rx_mec_check_wrapper(struct dp_soc *soc, 69 struct dp_peer *peer, 70 uint8_t *rx_tlv_hdr, 71 qdf_nbuf_t nbuf) 72 { 73 return dp_rx_mcast_echo_check(soc, peer, rx_tlv_hdr, nbuf); 74 } 75 #else 76 static inline bool dp_rx_mec_check_wrapper(struct dp_soc *soc, 77 struct dp_peer *peer, 78 uint8_t *rx_tlv_hdr, 79 qdf_nbuf_t nbuf) 80 { 81 return false; 82 } 83 #endif 84 #endif 85 86 #ifndef QCA_HOST_MODE_WIFI_DISABLE 87 static bool 88 dp_rx_intrabss_ucast_check_li(struct dp_soc *soc, qdf_nbuf_t nbuf, 89 struct dp_peer *ta_peer, 90 struct hal_rx_msdu_metadata *msdu_metadata) 91 { 92 uint16_t da_peer_id; 93 struct dp_peer *da_peer; 94 struct dp_ast_entry *ast_entry; 95 96 if (!(qdf_nbuf_is_da_valid(nbuf) || qdf_nbuf_is_da_mcbc(nbuf))) 97 return false; 98 99 ast_entry = soc->ast_table[msdu_metadata->da_idx]; 100 if (!ast_entry) 101 return false; 102 103 if (ast_entry->type == CDP_TXRX_AST_TYPE_DA) { 104 ast_entry->is_active = TRUE; 105 return false; 106 } 107 108 da_peer_id = ast_entry->peer_id; 109 /* TA peer cannot be same as peer(DA) on which AST is present 110 * this indicates a change in topology and that AST entries 111 * are yet to be updated. 112 */ 113 if ((da_peer_id == ta_peer->peer_id) || 114 (da_peer_id == HTT_INVALID_PEER)) 115 return false; 116 117 da_peer = dp_peer_get_ref_by_id(soc, da_peer_id, 118 DP_MOD_ID_RX); 119 if (!da_peer) 120 return false; 121 122 /* If the source or destination peer in the isolation 123 * list then dont forward instead push to bridge stack. 124 */ 125 if (dp_get_peer_isolation(ta_peer) || 126 dp_get_peer_isolation(da_peer) || 127 (da_peer->vdev->vdev_id != ta_peer->vdev->vdev_id)) { 128 dp_peer_unref_delete(da_peer, DP_MOD_ID_RX); 129 return false; 130 } 131 132 if (da_peer->bss_peer) { 133 dp_peer_unref_delete(da_peer, DP_MOD_ID_RX); 134 return false; 135 } 136 137 dp_peer_unref_delete(da_peer, DP_MOD_ID_RX); 138 return true; 139 } 140 141 /* 142 * dp_rx_intrabss_fwd_li() - Implements the Intra-BSS forwarding logic 143 * 144 * @soc: core txrx main context 145 * @ta_peer : source peer entry 146 * @rx_tlv_hdr : start address of rx tlvs 147 * @nbuf : nbuf that has to be intrabss forwarded 148 * 149 * Return: bool: true if it is forwarded else false 150 */ 151 static bool 152 dp_rx_intrabss_fwd_li(struct dp_soc *soc, 153 struct dp_peer *ta_peer, 154 uint8_t *rx_tlv_hdr, 155 qdf_nbuf_t nbuf, 156 struct hal_rx_msdu_metadata msdu_metadata) 157 { 158 uint8_t tid = qdf_nbuf_get_tid_val(nbuf); 159 uint8_t ring_id = QDF_NBUF_CB_RX_CTX_ID(nbuf); 160 struct cdp_tid_rx_stats *tid_stats = &ta_peer->vdev->pdev->stats. 161 tid_stats.tid_rx_stats[ring_id][tid]; 162 163 /* if it is a broadcast pkt (eg: ARP) and it is not its own 164 * source, then clone the pkt and send the cloned pkt for 165 * intra BSS forwarding and original pkt up the network stack 166 * Note: how do we handle multicast pkts. do we forward 167 * all multicast pkts as is or let a higher layer module 168 * like igmpsnoop decide whether to forward or not with 169 * Mcast enhancement. 170 */ 171 if (qdf_nbuf_is_da_mcbc(nbuf) && !ta_peer->bss_peer) 172 return dp_rx_intrabss_mcbc_fwd(soc, ta_peer, rx_tlv_hdr, 173 nbuf, tid_stats); 174 175 if (dp_rx_intrabss_ucast_check_li(soc, nbuf, ta_peer, &msdu_metadata)) 176 return dp_rx_intrabss_ucast_fwd(soc, ta_peer, rx_tlv_hdr, 177 nbuf, tid_stats); 178 179 return false; 180 } 181 #endif 182 183 /** 184 * dp_rx_process_li() - Brain of the Rx processing functionality 185 * Called from the bottom half (tasklet/NET_RX_SOFTIRQ) 186 * @int_ctx: per interrupt context 187 * @hal_ring: opaque pointer to the HAL Rx Ring, which will be serviced 188 * @reo_ring_num: ring number (0, 1, 2 or 3) of the reo ring. 189 * @quota: No. of units (packets) that can be serviced in one shot. 190 * 191 * This function implements the core of Rx functionality. This is 192 * expected to handle only non-error frames. 193 * 194 * Return: uint32_t: No. of elements processed 195 */ 196 uint32_t dp_rx_process_li(struct dp_intr *int_ctx, 197 hal_ring_handle_t hal_ring_hdl, uint8_t reo_ring_num, 198 uint32_t quota) 199 { 200 hal_ring_desc_t ring_desc; 201 hal_soc_handle_t hal_soc; 202 struct dp_rx_desc *rx_desc = NULL; 203 qdf_nbuf_t nbuf, next; 204 bool near_full; 205 union dp_rx_desc_list_elem_t *head[MAX_PDEV_CNT]; 206 union dp_rx_desc_list_elem_t *tail[MAX_PDEV_CNT]; 207 uint32_t num_pending; 208 uint32_t rx_bufs_used = 0, rx_buf_cookie; 209 uint16_t msdu_len = 0; 210 uint16_t peer_id; 211 uint8_t vdev_id; 212 struct dp_peer *peer; 213 struct dp_vdev *vdev; 214 uint32_t pkt_len = 0; 215 struct hal_rx_mpdu_desc_info mpdu_desc_info; 216 struct hal_rx_msdu_desc_info msdu_desc_info; 217 enum hal_reo_error_status error; 218 uint32_t peer_mdata; 219 uint8_t *rx_tlv_hdr; 220 uint32_t rx_bufs_reaped[MAX_PDEV_CNT]; 221 uint8_t mac_id = 0; 222 struct dp_pdev *rx_pdev; 223 struct dp_srng *dp_rxdma_srng; 224 struct rx_desc_pool *rx_desc_pool; 225 struct dp_soc *soc = int_ctx->soc; 226 uint8_t core_id = 0; 227 struct cdp_tid_rx_stats *tid_stats; 228 qdf_nbuf_t nbuf_head; 229 qdf_nbuf_t nbuf_tail; 230 qdf_nbuf_t deliver_list_head; 231 qdf_nbuf_t deliver_list_tail; 232 uint32_t num_rx_bufs_reaped = 0; 233 uint32_t intr_id; 234 struct hif_opaque_softc *scn; 235 int32_t tid = 0; 236 bool is_prev_msdu_last = true; 237 uint32_t num_entries_avail = 0; 238 uint32_t rx_ol_pkt_cnt = 0; 239 uint32_t num_entries = 0; 240 struct hal_rx_msdu_metadata msdu_metadata; 241 QDF_STATUS status; 242 qdf_nbuf_t ebuf_head; 243 qdf_nbuf_t ebuf_tail; 244 uint8_t pkt_capture_offload = 0; 245 int max_reap_limit; 246 247 DP_HIST_INIT(); 248 249 qdf_assert_always(soc && hal_ring_hdl); 250 hal_soc = soc->hal_soc; 251 qdf_assert_always(hal_soc); 252 253 scn = soc->hif_handle; 254 hif_pm_runtime_mark_dp_rx_busy(scn); 255 intr_id = int_ctx->dp_intr_id; 256 num_entries = hal_srng_get_num_entries(hal_soc, hal_ring_hdl); 257 258 more_data: 259 /* reset local variables here to be re-used in the function */ 260 nbuf_head = NULL; 261 nbuf_tail = NULL; 262 deliver_list_head = NULL; 263 deliver_list_tail = NULL; 264 peer = NULL; 265 vdev = NULL; 266 num_rx_bufs_reaped = 0; 267 ebuf_head = NULL; 268 ebuf_tail = NULL; 269 max_reap_limit = dp_rx_get_loop_pkt_limit(soc); 270 271 qdf_mem_zero(rx_bufs_reaped, sizeof(rx_bufs_reaped)); 272 qdf_mem_zero(&mpdu_desc_info, sizeof(mpdu_desc_info)); 273 qdf_mem_zero(&msdu_desc_info, sizeof(msdu_desc_info)); 274 qdf_mem_zero(head, sizeof(head)); 275 qdf_mem_zero(tail, sizeof(tail)); 276 277 if (qdf_unlikely(dp_rx_srng_access_start(int_ctx, soc, hal_ring_hdl))) { 278 /* 279 * Need API to convert from hal_ring pointer to 280 * Ring Type / Ring Id combo 281 */ 282 DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1); 283 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 284 FL("HAL RING Access Failed -- %pK"), hal_ring_hdl); 285 goto done; 286 } 287 288 /* 289 * start reaping the buffers from reo ring and queue 290 * them in per vdev queue. 291 * Process the received pkts in a different per vdev loop. 292 */ 293 while (qdf_likely(quota && 294 (ring_desc = hal_srng_dst_peek(hal_soc, 295 hal_ring_hdl)))) { 296 error = HAL_RX_ERROR_STATUS_GET(ring_desc); 297 if (qdf_unlikely(error == HAL_REO_ERROR_DETECTED)) { 298 dp_rx_err("%pK: HAL RING 0x%pK:error %d", 299 soc, hal_ring_hdl, error); 300 DP_STATS_INC(soc, rx.err.hal_reo_error[reo_ring_num], 301 1); 302 /* Don't know how to deal with this -- assert */ 303 qdf_assert(0); 304 } 305 306 dp_rx_ring_record_entry(soc, reo_ring_num, ring_desc); 307 rx_buf_cookie = HAL_RX_REO_BUF_COOKIE_GET(ring_desc); 308 status = dp_rx_cookie_check_and_invalidate(ring_desc); 309 if (qdf_unlikely(QDF_IS_STATUS_ERROR(status))) { 310 DP_STATS_INC(soc, rx.err.stale_cookie, 1); 311 break; 312 } 313 314 rx_desc = dp_rx_cookie_2_va_rxdma_buf(soc, rx_buf_cookie); 315 status = dp_rx_desc_sanity(soc, hal_soc, hal_ring_hdl, 316 ring_desc, rx_desc); 317 if (QDF_IS_STATUS_ERROR(status)) { 318 if (qdf_unlikely(rx_desc && rx_desc->nbuf)) { 319 qdf_assert_always(!rx_desc->unmapped); 320 dp_ipa_reo_ctx_buf_mapping_lock(soc, 321 reo_ring_num); 322 dp_ipa_handle_rx_buf_smmu_mapping( 323 soc, 324 rx_desc->nbuf, 325 RX_DATA_BUFFER_SIZE, 326 false); 327 qdf_nbuf_unmap_nbytes_single( 328 soc->osdev, 329 rx_desc->nbuf, 330 QDF_DMA_FROM_DEVICE, 331 RX_DATA_BUFFER_SIZE); 332 rx_desc->unmapped = 1; 333 dp_ipa_reo_ctx_buf_mapping_unlock(soc, 334 reo_ring_num); 335 dp_rx_buffer_pool_nbuf_free(soc, rx_desc->nbuf, 336 rx_desc->pool_id); 337 dp_rx_add_to_free_desc_list( 338 &head[rx_desc->pool_id], 339 &tail[rx_desc->pool_id], 340 rx_desc); 341 } 342 hal_srng_dst_get_next(hal_soc, hal_ring_hdl); 343 continue; 344 } 345 346 /* 347 * this is a unlikely scenario where the host is reaping 348 * a descriptor which it already reaped just a while ago 349 * but is yet to replenish it back to HW. 350 * In this case host will dump the last 128 descriptors 351 * including the software descriptor rx_desc and assert. 352 */ 353 354 if (qdf_unlikely(!rx_desc->in_use)) { 355 DP_STATS_INC(soc, rx.err.hal_reo_dest_dup, 1); 356 dp_info_rl("Reaping rx_desc not in use!"); 357 dp_rx_dump_info_and_assert(soc, hal_ring_hdl, 358 ring_desc, rx_desc); 359 /* ignore duplicate RX desc and continue to process */ 360 /* Pop out the descriptor */ 361 hal_srng_dst_get_next(hal_soc, hal_ring_hdl); 362 continue; 363 } 364 365 status = dp_rx_desc_nbuf_sanity_check(soc, ring_desc, rx_desc); 366 if (qdf_unlikely(QDF_IS_STATUS_ERROR(status))) { 367 DP_STATS_INC(soc, rx.err.nbuf_sanity_fail, 1); 368 dp_info_rl("Nbuf sanity check failure!"); 369 dp_rx_dump_info_and_assert(soc, hal_ring_hdl, 370 ring_desc, rx_desc); 371 rx_desc->in_err_state = 1; 372 hal_srng_dst_get_next(hal_soc, hal_ring_hdl); 373 continue; 374 } 375 376 if (qdf_unlikely(!dp_rx_desc_check_magic(rx_desc))) { 377 dp_err("Invalid rx_desc cookie=%d", rx_buf_cookie); 378 DP_STATS_INC(soc, rx.err.rx_desc_invalid_magic, 1); 379 dp_rx_dump_info_and_assert(soc, hal_ring_hdl, 380 ring_desc, rx_desc); 381 } 382 383 /* Get MPDU DESC info */ 384 hal_rx_mpdu_desc_info_get_li(ring_desc, &mpdu_desc_info); 385 386 /* Get MSDU DESC info */ 387 hal_rx_msdu_desc_info_get_li(ring_desc, &msdu_desc_info); 388 389 if (qdf_unlikely(msdu_desc_info.msdu_flags & 390 HAL_MSDU_F_MSDU_CONTINUATION)) { 391 /* previous msdu has end bit set, so current one is 392 * the new MPDU 393 */ 394 if (is_prev_msdu_last) { 395 /* Get number of entries available in HW ring */ 396 num_entries_avail = 397 hal_srng_dst_num_valid(hal_soc, 398 hal_ring_hdl, 1); 399 400 /* For new MPDU check if we can read complete 401 * MPDU by comparing the number of buffers 402 * available and number of buffers needed to 403 * reap this MPDU 404 */ 405 if ((msdu_desc_info.msdu_len / 406 (RX_DATA_BUFFER_SIZE - 407 soc->rx_pkt_tlv_size) + 1) > 408 num_entries_avail) { 409 DP_STATS_INC(soc, 410 rx.msdu_scatter_wait_break, 411 1); 412 dp_rx_cookie_reset_invalid_bit( 413 ring_desc); 414 break; 415 } 416 is_prev_msdu_last = false; 417 } 418 } 419 420 core_id = smp_processor_id(); 421 DP_STATS_INC(soc, rx.ring_packets[core_id][reo_ring_num], 1); 422 423 if (mpdu_desc_info.mpdu_flags & HAL_MPDU_F_RETRY_BIT) 424 qdf_nbuf_set_rx_retry_flag(rx_desc->nbuf, 1); 425 426 if (qdf_unlikely(mpdu_desc_info.mpdu_flags & 427 HAL_MPDU_F_RAW_AMPDU)) 428 qdf_nbuf_set_raw_frame(rx_desc->nbuf, 1); 429 430 if (!is_prev_msdu_last && 431 msdu_desc_info.msdu_flags & HAL_MSDU_F_LAST_MSDU_IN_MPDU) 432 is_prev_msdu_last = true; 433 434 /* Pop out the descriptor*/ 435 hal_srng_dst_get_next(hal_soc, hal_ring_hdl); 436 437 rx_bufs_reaped[rx_desc->pool_id]++; 438 peer_mdata = mpdu_desc_info.peer_meta_data; 439 QDF_NBUF_CB_RX_PEER_ID(rx_desc->nbuf) = 440 dp_rx_peer_metadata_peer_id_get_li(soc, peer_mdata); 441 QDF_NBUF_CB_RX_VDEV_ID(rx_desc->nbuf) = 442 DP_PEER_METADATA_VDEV_ID_GET_LI(peer_mdata); 443 444 /* to indicate whether this msdu is rx offload */ 445 pkt_capture_offload = 446 DP_PEER_METADATA_OFFLOAD_GET_LI(peer_mdata); 447 448 /* 449 * save msdu flags first, last and continuation msdu in 450 * nbuf->cb, also save mcbc, is_da_valid, is_sa_valid and 451 * length to nbuf->cb. This ensures the info required for 452 * per pkt processing is always in the same cache line. 453 * This helps in improving throughput for smaller pkt 454 * sizes. 455 */ 456 if (msdu_desc_info.msdu_flags & HAL_MSDU_F_FIRST_MSDU_IN_MPDU) 457 qdf_nbuf_set_rx_chfrag_start(rx_desc->nbuf, 1); 458 459 if (msdu_desc_info.msdu_flags & HAL_MSDU_F_MSDU_CONTINUATION) 460 qdf_nbuf_set_rx_chfrag_cont(rx_desc->nbuf, 1); 461 462 if (msdu_desc_info.msdu_flags & HAL_MSDU_F_LAST_MSDU_IN_MPDU) 463 qdf_nbuf_set_rx_chfrag_end(rx_desc->nbuf, 1); 464 465 if (msdu_desc_info.msdu_flags & HAL_MSDU_F_DA_IS_MCBC) 466 qdf_nbuf_set_da_mcbc(rx_desc->nbuf, 1); 467 468 if (msdu_desc_info.msdu_flags & HAL_MSDU_F_DA_IS_VALID) 469 qdf_nbuf_set_da_valid(rx_desc->nbuf, 1); 470 471 if (msdu_desc_info.msdu_flags & HAL_MSDU_F_SA_IS_VALID) 472 qdf_nbuf_set_sa_valid(rx_desc->nbuf, 1); 473 474 qdf_nbuf_set_tid_val(rx_desc->nbuf, 475 HAL_RX_REO_QUEUE_NUMBER_GET(ring_desc)); 476 477 /* set reo dest indication */ 478 qdf_nbuf_set_rx_reo_dest_ind_or_sw_excpt( 479 rx_desc->nbuf, 480 HAL_RX_REO_MSDU_REO_DST_IND_GET(ring_desc)); 481 482 QDF_NBUF_CB_RX_PKT_LEN(rx_desc->nbuf) = msdu_desc_info.msdu_len; 483 484 QDF_NBUF_CB_RX_CTX_ID(rx_desc->nbuf) = reo_ring_num; 485 486 /* 487 * move unmap after scattered msdu waiting break logic 488 * in case double skb unmap happened. 489 */ 490 rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id]; 491 dp_ipa_reo_ctx_buf_mapping_lock(soc, reo_ring_num); 492 dp_ipa_handle_rx_buf_smmu_mapping(soc, rx_desc->nbuf, 493 rx_desc_pool->buf_size, 494 false); 495 qdf_nbuf_unmap_nbytes_single(soc->osdev, rx_desc->nbuf, 496 QDF_DMA_FROM_DEVICE, 497 rx_desc_pool->buf_size); 498 rx_desc->unmapped = 1; 499 dp_ipa_reo_ctx_buf_mapping_unlock(soc, reo_ring_num); 500 DP_RX_PROCESS_NBUF(soc, nbuf_head, nbuf_tail, ebuf_head, 501 ebuf_tail, rx_desc); 502 /* 503 * if continuation bit is set then we have MSDU spread 504 * across multiple buffers, let us not decrement quota 505 * till we reap all buffers of that MSDU. 506 */ 507 if (qdf_likely(!qdf_nbuf_is_rx_chfrag_cont(rx_desc->nbuf))) 508 quota -= 1; 509 510 dp_rx_add_to_free_desc_list(&head[rx_desc->pool_id], 511 &tail[rx_desc->pool_id], rx_desc); 512 num_rx_bufs_reaped++; 513 /* 514 * only if complete msdu is received for scatter case, 515 * then allow break. 516 */ 517 if (is_prev_msdu_last && 518 dp_rx_reap_loop_pkt_limit_hit(soc, num_rx_bufs_reaped, 519 max_reap_limit)) 520 break; 521 } 522 done: 523 dp_rx_srng_access_end(int_ctx, soc, hal_ring_hdl); 524 525 for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) { 526 /* 527 * continue with next mac_id if no pkts were reaped 528 * from that pool 529 */ 530 if (!rx_bufs_reaped[mac_id]) 531 continue; 532 533 dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_id]; 534 535 rx_desc_pool = &soc->rx_desc_buf[mac_id]; 536 537 dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng, 538 rx_desc_pool, rx_bufs_reaped[mac_id], 539 &head[mac_id], &tail[mac_id]); 540 } 541 542 dp_verbose_debug("replenished %u\n", rx_bufs_reaped[0]); 543 /* Peer can be NULL is case of LFR */ 544 if (qdf_likely(peer)) 545 vdev = NULL; 546 547 /* 548 * BIG loop where each nbuf is dequeued from global queue, 549 * processed and queued back on a per vdev basis. These nbufs 550 * are sent to stack as and when we run out of nbufs 551 * or a new nbuf dequeued from global queue has a different 552 * vdev when compared to previous nbuf. 553 */ 554 nbuf = nbuf_head; 555 while (nbuf) { 556 next = nbuf->next; 557 dp_rx_prefetch_nbuf_data(nbuf, next); 558 559 if (qdf_unlikely(dp_rx_is_raw_frame_dropped(nbuf))) { 560 nbuf = next; 561 DP_STATS_INC(soc, rx.err.raw_frm_drop, 1); 562 continue; 563 } 564 565 rx_tlv_hdr = qdf_nbuf_data(nbuf); 566 vdev_id = QDF_NBUF_CB_RX_VDEV_ID(nbuf); 567 peer_id = QDF_NBUF_CB_RX_PEER_ID(nbuf); 568 569 if (dp_rx_is_list_ready(deliver_list_head, vdev, peer, 570 peer_id, vdev_id)) { 571 dp_rx_deliver_to_stack(soc, vdev, peer, 572 deliver_list_head, 573 deliver_list_tail); 574 deliver_list_head = NULL; 575 deliver_list_tail = NULL; 576 } 577 578 /* Get TID from struct cb->tid_val, save to tid */ 579 if (qdf_nbuf_is_rx_chfrag_start(nbuf)) 580 tid = qdf_nbuf_get_tid_val(nbuf); 581 582 if (qdf_unlikely(!peer)) { 583 peer = dp_peer_get_ref_by_id(soc, peer_id, 584 DP_MOD_ID_RX); 585 } else if (peer && peer->peer_id != peer_id) { 586 dp_peer_unref_delete(peer, DP_MOD_ID_RX); 587 peer = dp_peer_get_ref_by_id(soc, peer_id, 588 DP_MOD_ID_RX); 589 } 590 591 if (peer) { 592 QDF_NBUF_CB_DP_TRACE_PRINT(nbuf) = false; 593 qdf_dp_trace_set_track(nbuf, QDF_RX); 594 QDF_NBUF_CB_RX_DP_TRACE(nbuf) = 1; 595 QDF_NBUF_CB_RX_PACKET_TRACK(nbuf) = 596 QDF_NBUF_RX_PKT_DATA_TRACK; 597 } 598 599 rx_bufs_used++; 600 601 if (qdf_likely(peer)) { 602 vdev = peer->vdev; 603 } else { 604 nbuf->next = NULL; 605 dp_rx_deliver_to_pkt_capture_no_peer( 606 soc, nbuf, pkt_capture_offload); 607 if (!pkt_capture_offload) 608 dp_rx_deliver_to_stack_no_peer(soc, nbuf); 609 nbuf = next; 610 continue; 611 } 612 613 if (qdf_unlikely(!vdev)) { 614 qdf_nbuf_free(nbuf); 615 nbuf = next; 616 DP_STATS_INC(soc, rx.err.invalid_vdev, 1); 617 continue; 618 } 619 620 /* when hlos tid override is enabled, save tid in 621 * skb->priority 622 */ 623 if (qdf_unlikely(vdev->skip_sw_tid_classification & 624 DP_TXRX_HLOS_TID_OVERRIDE_ENABLED)) 625 qdf_nbuf_set_priority(nbuf, tid); 626 627 rx_pdev = vdev->pdev; 628 DP_RX_TID_SAVE(nbuf, tid); 629 if (qdf_unlikely(rx_pdev->delay_stats_flag) || 630 qdf_unlikely(wlan_cfg_is_peer_ext_stats_enabled( 631 soc->wlan_cfg_ctx))) 632 qdf_nbuf_set_timestamp(nbuf); 633 634 tid_stats = 635 &rx_pdev->stats.tid_stats.tid_rx_stats[reo_ring_num][tid]; 636 637 /* 638 * Check if DMA completed -- msdu_done is the last bit 639 * to be written 640 */ 641 if (qdf_likely(!qdf_nbuf_is_rx_chfrag_cont(nbuf))) { 642 if (qdf_unlikely(!hal_rx_attn_msdu_done_get_li( 643 rx_tlv_hdr))) { 644 dp_err_rl("MSDU DONE failure"); 645 DP_STATS_INC(soc, rx.err.msdu_done_fail, 1); 646 hal_rx_dump_pkt_tlvs(hal_soc, rx_tlv_hdr, 647 QDF_TRACE_LEVEL_INFO); 648 tid_stats->fail_cnt[MSDU_DONE_FAILURE]++; 649 qdf_assert(0); 650 qdf_nbuf_free(nbuf); 651 nbuf = next; 652 continue; 653 } else if (qdf_unlikely(hal_rx_attn_msdu_len_err_get_li( 654 rx_tlv_hdr))) { 655 DP_STATS_INC(soc, rx.err.msdu_len_err, 1); 656 qdf_nbuf_free(nbuf); 657 nbuf = next; 658 continue; 659 } 660 } 661 662 DP_HIST_PACKET_COUNT_INC(vdev->pdev->pdev_id); 663 /* 664 * First IF condition: 665 * 802.11 Fragmented pkts are reinjected to REO 666 * HW block as SG pkts and for these pkts we only 667 * need to pull the RX TLVS header length. 668 * Second IF condition: 669 * The below condition happens when an MSDU is spread 670 * across multiple buffers. This can happen in two cases 671 * 1. The nbuf size is smaller then the received msdu. 672 * ex: we have set the nbuf size to 2048 during 673 * nbuf_alloc. but we received an msdu which is 674 * 2304 bytes in size then this msdu is spread 675 * across 2 nbufs. 676 * 677 * 2. AMSDUs when RAW mode is enabled. 678 * ex: 1st MSDU is in 1st nbuf and 2nd MSDU is spread 679 * across 1st nbuf and 2nd nbuf and last MSDU is 680 * spread across 2nd nbuf and 3rd nbuf. 681 * 682 * for these scenarios let us create a skb frag_list and 683 * append these buffers till the last MSDU of the AMSDU 684 * Third condition: 685 * This is the most likely case, we receive 802.3 pkts 686 * decapsulated by HW, here we need to set the pkt length. 687 */ 688 hal_rx_msdu_metadata_get(hal_soc, rx_tlv_hdr, &msdu_metadata); 689 if (qdf_unlikely(qdf_nbuf_is_frag(nbuf))) { 690 bool is_mcbc, is_sa_vld, is_da_vld; 691 692 is_mcbc = hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc, 693 rx_tlv_hdr); 694 is_sa_vld = 695 hal_rx_msdu_end_sa_is_valid_get(soc->hal_soc, 696 rx_tlv_hdr); 697 is_da_vld = 698 hal_rx_msdu_end_da_is_valid_get(soc->hal_soc, 699 rx_tlv_hdr); 700 701 qdf_nbuf_set_da_mcbc(nbuf, is_mcbc); 702 qdf_nbuf_set_da_valid(nbuf, is_da_vld); 703 qdf_nbuf_set_sa_valid(nbuf, is_sa_vld); 704 705 qdf_nbuf_pull_head(nbuf, soc->rx_pkt_tlv_size); 706 } else if (qdf_nbuf_is_rx_chfrag_cont(nbuf)) { 707 msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf); 708 nbuf = dp_rx_sg_create(soc, nbuf); 709 next = nbuf->next; 710 711 if (qdf_nbuf_is_raw_frame(nbuf)) { 712 DP_STATS_INC(vdev->pdev, rx_raw_pkts, 1); 713 DP_STATS_INC_PKT(peer, rx.raw, 1, msdu_len); 714 } else { 715 qdf_nbuf_free(nbuf); 716 DP_STATS_INC(soc, rx.err.scatter_msdu, 1); 717 dp_info_rl("scatter msdu len %d, dropped", 718 msdu_len); 719 nbuf = next; 720 continue; 721 } 722 } else { 723 msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf); 724 pkt_len = msdu_len + 725 msdu_metadata.l3_hdr_pad + 726 soc->rx_pkt_tlv_size; 727 728 qdf_nbuf_set_pktlen(nbuf, pkt_len); 729 dp_rx_skip_tlvs(soc, nbuf, msdu_metadata.l3_hdr_pad); 730 } 731 732 /* 733 * process frame for mulitpass phrase processing 734 */ 735 if (qdf_unlikely(vdev->multipass_en)) { 736 if (dp_rx_multipass_process(peer, nbuf, tid) == false) { 737 DP_STATS_INC(peer, rx.multipass_rx_pkt_drop, 1); 738 qdf_nbuf_free(nbuf); 739 nbuf = next; 740 continue; 741 } 742 } 743 744 if (!dp_wds_rx_policy_check(rx_tlv_hdr, vdev, peer)) { 745 dp_rx_err("%pK: Policy Check Drop pkt", soc); 746 tid_stats->fail_cnt[POLICY_CHECK_DROP]++; 747 /* Drop & free packet */ 748 qdf_nbuf_free(nbuf); 749 /* Statistics */ 750 nbuf = next; 751 continue; 752 } 753 754 if (qdf_unlikely(peer && (peer->nawds_enabled) && 755 (qdf_nbuf_is_da_mcbc(nbuf)) && 756 (hal_rx_get_mpdu_mac_ad4_valid(soc->hal_soc, 757 rx_tlv_hdr) == 758 false))) { 759 tid_stats->fail_cnt[NAWDS_MCAST_DROP]++; 760 DP_STATS_INC(peer, rx.nawds_mcast_drop, 1); 761 qdf_nbuf_free(nbuf); 762 nbuf = next; 763 continue; 764 } 765 766 /* 767 * Drop non-EAPOL frames from unauthorized peer. 768 */ 769 if (qdf_likely(peer) && qdf_unlikely(!peer->authorize) && 770 !qdf_nbuf_is_raw_frame(nbuf)) { 771 bool is_eapol = qdf_nbuf_is_ipv4_eapol_pkt(nbuf) || 772 qdf_nbuf_is_ipv4_wapi_pkt(nbuf); 773 774 if (!is_eapol) { 775 DP_STATS_INC(soc, 776 rx.err.peer_unauth_rx_pkt_drop, 777 1); 778 qdf_nbuf_free(nbuf); 779 nbuf = next; 780 continue; 781 } 782 } 783 784 if (soc->process_rx_status) 785 dp_rx_cksum_offload(vdev->pdev, nbuf, rx_tlv_hdr); 786 787 /* Update the protocol tag in SKB based on CCE metadata */ 788 dp_rx_update_protocol_tag(soc, vdev, nbuf, rx_tlv_hdr, 789 reo_ring_num, false, true); 790 791 /* Update the flow tag in SKB based on FSE metadata */ 792 dp_rx_update_flow_tag(soc, vdev, nbuf, rx_tlv_hdr, true); 793 794 dp_rx_msdu_stats_update(soc, nbuf, rx_tlv_hdr, peer, 795 reo_ring_num, tid_stats); 796 797 if (qdf_unlikely(vdev->mesh_vdev)) { 798 if (dp_rx_filter_mesh_packets(vdev, nbuf, rx_tlv_hdr) 799 == QDF_STATUS_SUCCESS) { 800 dp_rx_info("%pK: mesh pkt filtered", soc); 801 tid_stats->fail_cnt[MESH_FILTER_DROP]++; 802 DP_STATS_INC(vdev->pdev, dropped.mesh_filter, 803 1); 804 805 qdf_nbuf_free(nbuf); 806 nbuf = next; 807 continue; 808 } 809 dp_rx_fill_mesh_stats(vdev, nbuf, rx_tlv_hdr, peer); 810 } 811 812 if (qdf_likely(vdev->rx_decap_type == 813 htt_cmn_pkt_type_ethernet) && 814 qdf_likely(!vdev->mesh_vdev)) { 815 /* WDS Destination Address Learning */ 816 dp_rx_da_learn(soc, rx_tlv_hdr, peer, nbuf); 817 818 /* Due to HW issue, sometimes we see that the sa_idx 819 * and da_idx are invalid with sa_valid and da_valid 820 * bits set 821 * 822 * in this case we also see that value of 823 * sa_sw_peer_id is set as 0 824 * 825 * Drop the packet if sa_idx and da_idx OOB or 826 * sa_sw_peerid is 0 827 */ 828 if (!is_sa_da_idx_valid(soc, rx_tlv_hdr, nbuf, 829 msdu_metadata)) { 830 qdf_nbuf_free(nbuf); 831 nbuf = next; 832 DP_STATS_INC(soc, rx.err.invalid_sa_da_idx, 1); 833 continue; 834 } 835 if (qdf_unlikely(dp_rx_mec_check_wrapper(soc, 836 peer, 837 rx_tlv_hdr, 838 nbuf))) { 839 /* this is a looped back MCBC pkt,drop it */ 840 DP_STATS_INC_PKT(peer, rx.mec_drop, 1, 841 QDF_NBUF_CB_RX_PKT_LEN(nbuf)); 842 qdf_nbuf_free(nbuf); 843 nbuf = next; 844 continue; 845 } 846 /* WDS Source Port Learning */ 847 if (qdf_likely(vdev->wds_enabled)) 848 dp_rx_wds_srcport_learn(soc, 849 rx_tlv_hdr, 850 peer, 851 nbuf, 852 msdu_metadata); 853 854 /* Intrabss-fwd */ 855 if (dp_rx_check_ap_bridge(vdev)) 856 if (dp_rx_intrabss_fwd_li(soc, peer, rx_tlv_hdr, 857 nbuf, 858 msdu_metadata)) { 859 nbuf = next; 860 tid_stats->intrabss_cnt++; 861 continue; /* Get next desc */ 862 } 863 } 864 865 dp_rx_fill_gro_info(soc, rx_tlv_hdr, nbuf, &rx_ol_pkt_cnt); 866 867 dp_rx_update_stats(soc, nbuf); 868 DP_RX_LIST_APPEND(deliver_list_head, 869 deliver_list_tail, 870 nbuf); 871 DP_STATS_INC_PKT(peer, rx.to_stack, 1, 872 QDF_NBUF_CB_RX_PKT_LEN(nbuf)); 873 if (qdf_unlikely(peer->in_twt)) 874 DP_STATS_INC_PKT(peer, rx.to_stack_twt, 1, 875 QDF_NBUF_CB_RX_PKT_LEN(nbuf)); 876 877 tid_stats->delivered_to_stack++; 878 nbuf = next; 879 } 880 881 if (qdf_likely(deliver_list_head)) { 882 if (qdf_likely(peer)) { 883 dp_rx_deliver_to_pkt_capture(soc, vdev->pdev, peer_id, 884 pkt_capture_offload, 885 deliver_list_head); 886 if (!pkt_capture_offload) 887 dp_rx_deliver_to_stack(soc, vdev, peer, 888 deliver_list_head, 889 deliver_list_tail); 890 } else { 891 nbuf = deliver_list_head; 892 while (nbuf) { 893 next = nbuf->next; 894 nbuf->next = NULL; 895 dp_rx_deliver_to_stack_no_peer(soc, nbuf); 896 nbuf = next; 897 } 898 } 899 } 900 901 if (qdf_likely(peer)) 902 dp_peer_unref_delete(peer, DP_MOD_ID_RX); 903 904 if (dp_rx_enable_eol_data_check(soc) && rx_bufs_used) { 905 if (quota) { 906 num_pending = 907 dp_rx_srng_get_num_pending(hal_soc, 908 hal_ring_hdl, 909 num_entries, 910 &near_full); 911 if (num_pending) { 912 DP_STATS_INC(soc, rx.hp_oos2, 1); 913 914 if (!hif_exec_should_yield(scn, intr_id)) 915 goto more_data; 916 917 if (qdf_unlikely(near_full)) { 918 DP_STATS_INC(soc, rx.near_full, 1); 919 goto more_data; 920 } 921 } 922 } 923 924 if (vdev && vdev->osif_fisa_flush) 925 vdev->osif_fisa_flush(soc, reo_ring_num); 926 927 if (vdev && vdev->osif_gro_flush && rx_ol_pkt_cnt) { 928 vdev->osif_gro_flush(vdev->osif_vdev, 929 reo_ring_num); 930 } 931 } 932 933 /* Update histogram statistics by looping through pdev's */ 934 DP_RX_HIST_STATS_PER_PDEV(); 935 936 return rx_bufs_used; /* Assume no scale factor for now */ 937 } 938 939 QDF_STATUS dp_rx_desc_pool_init_li(struct dp_soc *soc, 940 struct rx_desc_pool *rx_desc_pool, 941 uint32_t pool_id) 942 { 943 return dp_rx_desc_pool_init_generic(soc, rx_desc_pool, pool_id); 944 945 } 946 947 void dp_rx_desc_pool_deinit_li(struct dp_soc *soc, 948 struct rx_desc_pool *rx_desc_pool, 949 uint32_t pool_id) 950 { 951 } 952 953 QDF_STATUS dp_wbm_get_rx_desc_from_hal_desc_li( 954 struct dp_soc *soc, 955 void *ring_desc, 956 struct dp_rx_desc **r_rx_desc) 957 { 958 struct hal_buf_info buf_info = {0}; 959 hal_soc_handle_t hal_soc = soc->hal_soc; 960 961 /* only cookie and rbm will be valid in buf_info */ 962 hal_rx_buf_cookie_rbm_get(hal_soc, (uint32_t *)ring_desc, 963 &buf_info); 964 965 if (qdf_unlikely(buf_info.rbm != 966 HAL_RX_BUF_RBM_SW3_BM(soc->wbm_sw0_bm_id))) { 967 /* TODO */ 968 /* Call appropriate handler */ 969 DP_STATS_INC(soc, rx.err.invalid_rbm, 1); 970 dp_rx_err("%pK: Invalid RBM %d", soc, buf_info.rbm); 971 return QDF_STATUS_E_INVAL; 972 } 973 974 *r_rx_desc = dp_rx_cookie_2_va_rxdma_buf(soc, buf_info.sw_cookie); 975 976 return QDF_STATUS_SUCCESS; 977 } 978