1 /* 2 * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for 5 * any purpose with or without fee is hereby granted, provided that the 6 * above copyright notice and this permission notice appear in all 7 * copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 16 * PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 #include "cdp_txrx_cmn_struct.h" 20 #include "hal_hw_headers.h" 21 #include "dp_types.h" 22 #include "dp_rx.h" 23 #include "dp_li_rx.h" 24 #include "dp_peer.h" 25 #include "hal_rx.h" 26 #include "hal_li_rx.h" 27 #include "hal_api.h" 28 #include "hal_li_api.h" 29 #include "qdf_nbuf.h" 30 #ifdef MESH_MODE_SUPPORT 31 #include "if_meta_hdr.h" 32 #endif 33 #include "dp_internal.h" 34 #include "dp_ipa.h" 35 #ifdef WIFI_MONITOR_SUPPORT 36 #include <dp_mon.h> 37 #endif 38 #ifdef FEATURE_WDS 39 #include "dp_txrx_wds.h" 40 #endif 41 #include "dp_hist.h" 42 #include "dp_rx_buffer_pool.h" 43 44 static inline 45 bool is_sa_da_idx_valid(struct dp_soc *soc, uint8_t *rx_tlv_hdr, 46 qdf_nbuf_t nbuf, struct hal_rx_msdu_metadata msdu_info) 47 { 48 if ((qdf_nbuf_is_sa_valid(nbuf) && 49 (msdu_info.sa_idx > wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx))) || 50 (!qdf_nbuf_is_da_mcbc(nbuf) && qdf_nbuf_is_da_valid(nbuf) && 51 (msdu_info.da_idx > wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx)))) 52 return false; 53 54 return true; 55 } 56 57 #ifndef QCA_HOST_MODE_WIFI_DISABLED 58 #if defined(FEATURE_MCL_REPEATER) && defined(FEATURE_MEC) 59 /** 60 * dp_rx_mec_check_wrapper() - wrapper to dp_rx_mcast_echo_check 61 * @soc: core DP main context 62 * @peer: dp peer handler 63 * @rx_tlv_hdr: start of the rx TLV header 64 * @nbuf: pkt buffer 65 * 66 * Return: bool (true if it is a looped back pkt else false) 67 */ 68 static inline bool dp_rx_mec_check_wrapper(struct dp_soc *soc, 69 struct dp_peer *peer, 70 uint8_t *rx_tlv_hdr, 71 qdf_nbuf_t nbuf) 72 { 73 return dp_rx_mcast_echo_check(soc, peer, rx_tlv_hdr, nbuf); 74 } 75 #else 76 static inline bool dp_rx_mec_check_wrapper(struct dp_soc *soc, 77 struct dp_peer *peer, 78 uint8_t *rx_tlv_hdr, 79 qdf_nbuf_t nbuf) 80 { 81 return false; 82 } 83 #endif 84 #endif 85 86 /** 87 * dp_rx_process_li() - Brain of the Rx processing functionality 88 * Called from the bottom half (tasklet/NET_RX_SOFTIRQ) 89 * @int_ctx: per interrupt context 90 * @hal_ring: opaque pointer to the HAL Rx Ring, which will be serviced 91 * @reo_ring_num: ring number (0, 1, 2 or 3) of the reo ring. 92 * @quota: No. of units (packets) that can be serviced in one shot. 93 * 94 * This function implements the core of Rx functionality. This is 95 * expected to handle only non-error frames. 96 * 97 * Return: uint32_t: No. of elements processed 98 */ 99 uint32_t dp_rx_process_li(struct dp_intr *int_ctx, 100 hal_ring_handle_t hal_ring_hdl, uint8_t reo_ring_num, 101 uint32_t quota) 102 { 103 hal_ring_desc_t ring_desc; 104 hal_soc_handle_t hal_soc; 105 struct dp_rx_desc *rx_desc = NULL; 106 qdf_nbuf_t nbuf, next; 107 bool near_full; 108 union dp_rx_desc_list_elem_t *head[MAX_PDEV_CNT]; 109 union dp_rx_desc_list_elem_t *tail[MAX_PDEV_CNT]; 110 uint32_t num_pending; 111 uint32_t rx_bufs_used = 0, rx_buf_cookie; 112 uint16_t msdu_len = 0; 113 uint16_t peer_id; 114 uint8_t vdev_id; 115 struct dp_peer *peer; 116 struct dp_vdev *vdev; 117 uint32_t pkt_len = 0; 118 struct hal_rx_mpdu_desc_info mpdu_desc_info; 119 struct hal_rx_msdu_desc_info msdu_desc_info; 120 enum hal_reo_error_status error; 121 uint32_t peer_mdata; 122 uint8_t *rx_tlv_hdr; 123 uint32_t rx_bufs_reaped[MAX_PDEV_CNT]; 124 uint8_t mac_id = 0; 125 struct dp_pdev *rx_pdev; 126 struct dp_srng *dp_rxdma_srng; 127 struct rx_desc_pool *rx_desc_pool; 128 struct dp_soc *soc = int_ctx->soc; 129 uint8_t core_id = 0; 130 struct cdp_tid_rx_stats *tid_stats; 131 qdf_nbuf_t nbuf_head; 132 qdf_nbuf_t nbuf_tail; 133 qdf_nbuf_t deliver_list_head; 134 qdf_nbuf_t deliver_list_tail; 135 uint32_t num_rx_bufs_reaped = 0; 136 uint32_t intr_id; 137 struct hif_opaque_softc *scn; 138 int32_t tid = 0; 139 bool is_prev_msdu_last = true; 140 uint32_t num_entries_avail = 0; 141 uint32_t rx_ol_pkt_cnt = 0; 142 uint32_t num_entries = 0; 143 struct hal_rx_msdu_metadata msdu_metadata; 144 QDF_STATUS status; 145 qdf_nbuf_t ebuf_head; 146 qdf_nbuf_t ebuf_tail; 147 uint8_t pkt_capture_offload = 0; 148 int max_reap_limit; 149 150 DP_HIST_INIT(); 151 152 qdf_assert_always(soc && hal_ring_hdl); 153 hal_soc = soc->hal_soc; 154 qdf_assert_always(hal_soc); 155 156 scn = soc->hif_handle; 157 hif_pm_runtime_mark_dp_rx_busy(scn); 158 intr_id = int_ctx->dp_intr_id; 159 num_entries = hal_srng_get_num_entries(hal_soc, hal_ring_hdl); 160 161 more_data: 162 /* reset local variables here to be re-used in the function */ 163 nbuf_head = NULL; 164 nbuf_tail = NULL; 165 deliver_list_head = NULL; 166 deliver_list_tail = NULL; 167 peer = NULL; 168 vdev = NULL; 169 num_rx_bufs_reaped = 0; 170 ebuf_head = NULL; 171 ebuf_tail = NULL; 172 max_reap_limit = dp_rx_get_loop_pkt_limit(soc); 173 174 qdf_mem_zero(rx_bufs_reaped, sizeof(rx_bufs_reaped)); 175 qdf_mem_zero(&mpdu_desc_info, sizeof(mpdu_desc_info)); 176 qdf_mem_zero(&msdu_desc_info, sizeof(msdu_desc_info)); 177 qdf_mem_zero(head, sizeof(head)); 178 qdf_mem_zero(tail, sizeof(tail)); 179 180 if (qdf_unlikely(dp_rx_srng_access_start(int_ctx, soc, hal_ring_hdl))) { 181 /* 182 * Need API to convert from hal_ring pointer to 183 * Ring Type / Ring Id combo 184 */ 185 DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1); 186 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 187 FL("HAL RING Access Failed -- %pK"), hal_ring_hdl); 188 goto done; 189 } 190 191 /* 192 * start reaping the buffers from reo ring and queue 193 * them in per vdev queue. 194 * Process the received pkts in a different per vdev loop. 195 */ 196 while (qdf_likely(quota && 197 (ring_desc = hal_srng_dst_peek(hal_soc, 198 hal_ring_hdl)))) { 199 error = HAL_RX_ERROR_STATUS_GET(ring_desc); 200 if (qdf_unlikely(error == HAL_REO_ERROR_DETECTED)) { 201 dp_rx_err("%pK: HAL RING 0x%pK:error %d", 202 soc, hal_ring_hdl, error); 203 DP_STATS_INC(soc, rx.err.hal_reo_error[reo_ring_num], 204 1); 205 /* Don't know how to deal with this -- assert */ 206 qdf_assert(0); 207 } 208 209 dp_rx_ring_record_entry(soc, reo_ring_num, ring_desc); 210 rx_buf_cookie = HAL_RX_REO_BUF_COOKIE_GET(ring_desc); 211 status = dp_rx_cookie_check_and_invalidate(ring_desc); 212 if (qdf_unlikely(QDF_IS_STATUS_ERROR(status))) { 213 DP_STATS_INC(soc, rx.err.stale_cookie, 1); 214 break; 215 } 216 217 rx_desc = dp_rx_cookie_2_va_rxdma_buf(soc, rx_buf_cookie); 218 status = dp_rx_desc_sanity(soc, hal_soc, hal_ring_hdl, 219 ring_desc, rx_desc); 220 if (QDF_IS_STATUS_ERROR(status)) { 221 if (qdf_unlikely(rx_desc && rx_desc->nbuf)) { 222 qdf_assert_always(!rx_desc->unmapped); 223 dp_ipa_reo_ctx_buf_mapping_lock(soc, 224 reo_ring_num); 225 dp_ipa_handle_rx_buf_smmu_mapping( 226 soc, 227 rx_desc->nbuf, 228 RX_DATA_BUFFER_SIZE, 229 false); 230 qdf_nbuf_unmap_nbytes_single( 231 soc->osdev, 232 rx_desc->nbuf, 233 QDF_DMA_FROM_DEVICE, 234 RX_DATA_BUFFER_SIZE); 235 rx_desc->unmapped = 1; 236 dp_ipa_reo_ctx_buf_mapping_unlock(soc, 237 reo_ring_num); 238 dp_rx_buffer_pool_nbuf_free(soc, rx_desc->nbuf, 239 rx_desc->pool_id); 240 dp_rx_add_to_free_desc_list( 241 &head[rx_desc->pool_id], 242 &tail[rx_desc->pool_id], 243 rx_desc); 244 } 245 hal_srng_dst_get_next(hal_soc, hal_ring_hdl); 246 continue; 247 } 248 249 /* 250 * this is a unlikely scenario where the host is reaping 251 * a descriptor which it already reaped just a while ago 252 * but is yet to replenish it back to HW. 253 * In this case host will dump the last 128 descriptors 254 * including the software descriptor rx_desc and assert. 255 */ 256 257 if (qdf_unlikely(!rx_desc->in_use)) { 258 DP_STATS_INC(soc, rx.err.hal_reo_dest_dup, 1); 259 dp_info_rl("Reaping rx_desc not in use!"); 260 dp_rx_dump_info_and_assert(soc, hal_ring_hdl, 261 ring_desc, rx_desc); 262 /* ignore duplicate RX desc and continue to process */ 263 /* Pop out the descriptor */ 264 hal_srng_dst_get_next(hal_soc, hal_ring_hdl); 265 continue; 266 } 267 268 status = dp_rx_desc_nbuf_sanity_check(soc, ring_desc, rx_desc); 269 if (qdf_unlikely(QDF_IS_STATUS_ERROR(status))) { 270 DP_STATS_INC(soc, rx.err.nbuf_sanity_fail, 1); 271 dp_info_rl("Nbuf sanity check failure!"); 272 dp_rx_dump_info_and_assert(soc, hal_ring_hdl, 273 ring_desc, rx_desc); 274 rx_desc->in_err_state = 1; 275 hal_srng_dst_get_next(hal_soc, hal_ring_hdl); 276 continue; 277 } 278 279 if (qdf_unlikely(!dp_rx_desc_check_magic(rx_desc))) { 280 dp_err("Invalid rx_desc cookie=%d", rx_buf_cookie); 281 DP_STATS_INC(soc, rx.err.rx_desc_invalid_magic, 1); 282 dp_rx_dump_info_and_assert(soc, hal_ring_hdl, 283 ring_desc, rx_desc); 284 } 285 286 /* Get MPDU DESC info */ 287 hal_rx_mpdu_desc_info_get_li(ring_desc, &mpdu_desc_info); 288 289 /* Get MSDU DESC info */ 290 hal_rx_msdu_desc_info_get_li(ring_desc, &msdu_desc_info); 291 292 if (qdf_unlikely(msdu_desc_info.msdu_flags & 293 HAL_MSDU_F_MSDU_CONTINUATION)) { 294 /* previous msdu has end bit set, so current one is 295 * the new MPDU 296 */ 297 if (is_prev_msdu_last) { 298 /* Get number of entries available in HW ring */ 299 num_entries_avail = 300 hal_srng_dst_num_valid(hal_soc, 301 hal_ring_hdl, 1); 302 303 /* For new MPDU check if we can read complete 304 * MPDU by comparing the number of buffers 305 * available and number of buffers needed to 306 * reap this MPDU 307 */ 308 if ((msdu_desc_info.msdu_len / 309 (RX_DATA_BUFFER_SIZE - 310 soc->rx_pkt_tlv_size) + 1) > 311 num_entries_avail) { 312 DP_STATS_INC(soc, 313 rx.msdu_scatter_wait_break, 314 1); 315 dp_rx_cookie_reset_invalid_bit( 316 ring_desc); 317 break; 318 } 319 is_prev_msdu_last = false; 320 } 321 } 322 323 core_id = smp_processor_id(); 324 DP_STATS_INC(soc, rx.ring_packets[core_id][reo_ring_num], 1); 325 326 if (mpdu_desc_info.mpdu_flags & HAL_MPDU_F_RETRY_BIT) 327 qdf_nbuf_set_rx_retry_flag(rx_desc->nbuf, 1); 328 329 if (qdf_unlikely(mpdu_desc_info.mpdu_flags & 330 HAL_MPDU_F_RAW_AMPDU)) 331 qdf_nbuf_set_raw_frame(rx_desc->nbuf, 1); 332 333 if (!is_prev_msdu_last && 334 msdu_desc_info.msdu_flags & HAL_MSDU_F_LAST_MSDU_IN_MPDU) 335 is_prev_msdu_last = true; 336 337 /* Pop out the descriptor*/ 338 hal_srng_dst_get_next(hal_soc, hal_ring_hdl); 339 340 rx_bufs_reaped[rx_desc->pool_id]++; 341 peer_mdata = mpdu_desc_info.peer_meta_data; 342 QDF_NBUF_CB_RX_PEER_ID(rx_desc->nbuf) = 343 DP_PEER_METADATA_PEER_ID_GET(peer_mdata); 344 QDF_NBUF_CB_RX_VDEV_ID(rx_desc->nbuf) = 345 DP_PEER_METADATA_VDEV_ID_GET(peer_mdata); 346 347 /* to indicate whether this msdu is rx offload */ 348 pkt_capture_offload = 349 DP_PEER_METADATA_OFFLOAD_GET(peer_mdata); 350 351 /* 352 * save msdu flags first, last and continuation msdu in 353 * nbuf->cb, also save mcbc, is_da_valid, is_sa_valid and 354 * length to nbuf->cb. This ensures the info required for 355 * per pkt processing is always in the same cache line. 356 * This helps in improving throughput for smaller pkt 357 * sizes. 358 */ 359 if (msdu_desc_info.msdu_flags & HAL_MSDU_F_FIRST_MSDU_IN_MPDU) 360 qdf_nbuf_set_rx_chfrag_start(rx_desc->nbuf, 1); 361 362 if (msdu_desc_info.msdu_flags & HAL_MSDU_F_MSDU_CONTINUATION) 363 qdf_nbuf_set_rx_chfrag_cont(rx_desc->nbuf, 1); 364 365 if (msdu_desc_info.msdu_flags & HAL_MSDU_F_LAST_MSDU_IN_MPDU) 366 qdf_nbuf_set_rx_chfrag_end(rx_desc->nbuf, 1); 367 368 if (msdu_desc_info.msdu_flags & HAL_MSDU_F_DA_IS_MCBC) 369 qdf_nbuf_set_da_mcbc(rx_desc->nbuf, 1); 370 371 if (msdu_desc_info.msdu_flags & HAL_MSDU_F_DA_IS_VALID) 372 qdf_nbuf_set_da_valid(rx_desc->nbuf, 1); 373 374 if (msdu_desc_info.msdu_flags & HAL_MSDU_F_SA_IS_VALID) 375 qdf_nbuf_set_sa_valid(rx_desc->nbuf, 1); 376 377 qdf_nbuf_set_tid_val(rx_desc->nbuf, 378 HAL_RX_REO_QUEUE_NUMBER_GET(ring_desc)); 379 qdf_nbuf_set_rx_reo_dest_ind( 380 rx_desc->nbuf, 381 HAL_RX_REO_MSDU_REO_DST_IND_GET(ring_desc)); 382 383 QDF_NBUF_CB_RX_PKT_LEN(rx_desc->nbuf) = msdu_desc_info.msdu_len; 384 385 QDF_NBUF_CB_RX_CTX_ID(rx_desc->nbuf) = reo_ring_num; 386 387 /* 388 * move unmap after scattered msdu waiting break logic 389 * in case double skb unmap happened. 390 */ 391 rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id]; 392 dp_ipa_reo_ctx_buf_mapping_lock(soc, reo_ring_num); 393 dp_ipa_handle_rx_buf_smmu_mapping(soc, rx_desc->nbuf, 394 rx_desc_pool->buf_size, 395 false); 396 qdf_nbuf_unmap_nbytes_single(soc->osdev, rx_desc->nbuf, 397 QDF_DMA_FROM_DEVICE, 398 rx_desc_pool->buf_size); 399 rx_desc->unmapped = 1; 400 dp_ipa_reo_ctx_buf_mapping_unlock(soc, reo_ring_num); 401 DP_RX_PROCESS_NBUF(soc, nbuf_head, nbuf_tail, ebuf_head, 402 ebuf_tail, rx_desc); 403 /* 404 * if continuation bit is set then we have MSDU spread 405 * across multiple buffers, let us not decrement quota 406 * till we reap all buffers of that MSDU. 407 */ 408 if (qdf_likely(!qdf_nbuf_is_rx_chfrag_cont(rx_desc->nbuf))) 409 quota -= 1; 410 411 dp_rx_add_to_free_desc_list(&head[rx_desc->pool_id], 412 &tail[rx_desc->pool_id], rx_desc); 413 num_rx_bufs_reaped++; 414 /* 415 * only if complete msdu is received for scatter case, 416 * then allow break. 417 */ 418 if (is_prev_msdu_last && 419 dp_rx_reap_loop_pkt_limit_hit(soc, num_rx_bufs_reaped, 420 max_reap_limit)) 421 break; 422 } 423 done: 424 dp_rx_srng_access_end(int_ctx, soc, hal_ring_hdl); 425 426 for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) { 427 /* 428 * continue with next mac_id if no pkts were reaped 429 * from that pool 430 */ 431 if (!rx_bufs_reaped[mac_id]) 432 continue; 433 434 dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_id]; 435 436 rx_desc_pool = &soc->rx_desc_buf[mac_id]; 437 438 dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng, 439 rx_desc_pool, rx_bufs_reaped[mac_id], 440 &head[mac_id], &tail[mac_id]); 441 } 442 443 dp_verbose_debug("replenished %u\n", rx_bufs_reaped[0]); 444 /* Peer can be NULL is case of LFR */ 445 if (qdf_likely(peer)) 446 vdev = NULL; 447 448 /* 449 * BIG loop where each nbuf is dequeued from global queue, 450 * processed and queued back on a per vdev basis. These nbufs 451 * are sent to stack as and when we run out of nbufs 452 * or a new nbuf dequeued from global queue has a different 453 * vdev when compared to previous nbuf. 454 */ 455 nbuf = nbuf_head; 456 while (nbuf) { 457 next = nbuf->next; 458 if (qdf_unlikely(dp_rx_is_raw_frame_dropped(nbuf))) { 459 nbuf = next; 460 DP_STATS_INC(soc, rx.err.raw_frm_drop, 1); 461 continue; 462 } 463 464 rx_tlv_hdr = qdf_nbuf_data(nbuf); 465 vdev_id = QDF_NBUF_CB_RX_VDEV_ID(nbuf); 466 peer_id = QDF_NBUF_CB_RX_PEER_ID(nbuf); 467 468 if (dp_rx_is_list_ready(deliver_list_head, vdev, peer, 469 peer_id, vdev_id)) { 470 dp_rx_deliver_to_stack(soc, vdev, peer, 471 deliver_list_head, 472 deliver_list_tail); 473 deliver_list_head = NULL; 474 deliver_list_tail = NULL; 475 } 476 477 /* Get TID from struct cb->tid_val, save to tid */ 478 if (qdf_nbuf_is_rx_chfrag_start(nbuf)) 479 tid = qdf_nbuf_get_tid_val(nbuf); 480 481 if (qdf_unlikely(!peer)) { 482 peer = dp_peer_get_ref_by_id(soc, peer_id, 483 DP_MOD_ID_RX); 484 } else if (peer && peer->peer_id != peer_id) { 485 dp_peer_unref_delete(peer, DP_MOD_ID_RX); 486 peer = dp_peer_get_ref_by_id(soc, peer_id, 487 DP_MOD_ID_RX); 488 } 489 490 if (peer) { 491 QDF_NBUF_CB_DP_TRACE_PRINT(nbuf) = false; 492 qdf_dp_trace_set_track(nbuf, QDF_RX); 493 QDF_NBUF_CB_RX_DP_TRACE(nbuf) = 1; 494 QDF_NBUF_CB_RX_PACKET_TRACK(nbuf) = 495 QDF_NBUF_RX_PKT_DATA_TRACK; 496 } 497 498 rx_bufs_used++; 499 500 if (qdf_likely(peer)) { 501 vdev = peer->vdev; 502 } else { 503 nbuf->next = NULL; 504 dp_rx_deliver_to_pkt_capture_no_peer( 505 soc, nbuf, pkt_capture_offload); 506 if (!pkt_capture_offload) 507 dp_rx_deliver_to_stack_no_peer(soc, nbuf); 508 nbuf = next; 509 continue; 510 } 511 512 if (qdf_unlikely(!vdev)) { 513 qdf_nbuf_free(nbuf); 514 nbuf = next; 515 DP_STATS_INC(soc, rx.err.invalid_vdev, 1); 516 continue; 517 } 518 519 /* when hlos tid override is enabled, save tid in 520 * skb->priority 521 */ 522 if (qdf_unlikely(vdev->skip_sw_tid_classification & 523 DP_TXRX_HLOS_TID_OVERRIDE_ENABLED)) 524 qdf_nbuf_set_priority(nbuf, tid); 525 526 rx_pdev = vdev->pdev; 527 DP_RX_TID_SAVE(nbuf, tid); 528 if (qdf_unlikely(rx_pdev->delay_stats_flag) || 529 qdf_unlikely(wlan_cfg_is_peer_ext_stats_enabled( 530 soc->wlan_cfg_ctx))) 531 qdf_nbuf_set_timestamp(nbuf); 532 533 tid_stats = 534 &rx_pdev->stats.tid_stats.tid_rx_stats[reo_ring_num][tid]; 535 536 /* 537 * Check if DMA completed -- msdu_done is the last bit 538 * to be written 539 */ 540 if (qdf_likely(!qdf_nbuf_is_rx_chfrag_cont(nbuf))) { 541 if (qdf_unlikely(!hal_rx_attn_msdu_done_get_li( 542 rx_tlv_hdr))) { 543 dp_err_rl("MSDU DONE failure"); 544 DP_STATS_INC(soc, rx.err.msdu_done_fail, 1); 545 hal_rx_dump_pkt_tlvs(hal_soc, rx_tlv_hdr, 546 QDF_TRACE_LEVEL_INFO); 547 tid_stats->fail_cnt[MSDU_DONE_FAILURE]++; 548 qdf_assert(0); 549 qdf_nbuf_free(nbuf); 550 nbuf = next; 551 continue; 552 } else if (qdf_unlikely(hal_rx_attn_msdu_len_err_get_li( 553 rx_tlv_hdr))) { 554 DP_STATS_INC(soc, rx.err.msdu_len_err, 1); 555 qdf_nbuf_free(nbuf); 556 nbuf = next; 557 continue; 558 } 559 } 560 561 DP_HIST_PACKET_COUNT_INC(vdev->pdev->pdev_id); 562 /* 563 * First IF condition: 564 * 802.11 Fragmented pkts are reinjected to REO 565 * HW block as SG pkts and for these pkts we only 566 * need to pull the RX TLVS header length. 567 * Second IF condition: 568 * The below condition happens when an MSDU is spread 569 * across multiple buffers. This can happen in two cases 570 * 1. The nbuf size is smaller then the received msdu. 571 * ex: we have set the nbuf size to 2048 during 572 * nbuf_alloc. but we received an msdu which is 573 * 2304 bytes in size then this msdu is spread 574 * across 2 nbufs. 575 * 576 * 2. AMSDUs when RAW mode is enabled. 577 * ex: 1st MSDU is in 1st nbuf and 2nd MSDU is spread 578 * across 1st nbuf and 2nd nbuf and last MSDU is 579 * spread across 2nd nbuf and 3rd nbuf. 580 * 581 * for these scenarios let us create a skb frag_list and 582 * append these buffers till the last MSDU of the AMSDU 583 * Third condition: 584 * This is the most likely case, we receive 802.3 pkts 585 * decapsulated by HW, here we need to set the pkt length. 586 */ 587 hal_rx_msdu_metadata_get(hal_soc, rx_tlv_hdr, &msdu_metadata); 588 if (qdf_unlikely(qdf_nbuf_is_frag(nbuf))) { 589 bool is_mcbc, is_sa_vld, is_da_vld; 590 591 is_mcbc = hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc, 592 rx_tlv_hdr); 593 is_sa_vld = 594 hal_rx_msdu_end_sa_is_valid_get(soc->hal_soc, 595 rx_tlv_hdr); 596 is_da_vld = 597 hal_rx_msdu_end_da_is_valid_get(soc->hal_soc, 598 rx_tlv_hdr); 599 600 qdf_nbuf_set_da_mcbc(nbuf, is_mcbc); 601 qdf_nbuf_set_da_valid(nbuf, is_da_vld); 602 qdf_nbuf_set_sa_valid(nbuf, is_sa_vld); 603 604 qdf_nbuf_pull_head(nbuf, soc->rx_pkt_tlv_size); 605 } else if (qdf_nbuf_is_rx_chfrag_cont(nbuf)) { 606 msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf); 607 nbuf = dp_rx_sg_create(soc, nbuf); 608 next = nbuf->next; 609 610 if (qdf_nbuf_is_raw_frame(nbuf)) { 611 DP_STATS_INC(vdev->pdev, rx_raw_pkts, 1); 612 DP_STATS_INC_PKT(peer, rx.raw, 1, msdu_len); 613 } else { 614 qdf_nbuf_free(nbuf); 615 DP_STATS_INC(soc, rx.err.scatter_msdu, 1); 616 dp_info_rl("scatter msdu len %d, dropped", 617 msdu_len); 618 nbuf = next; 619 continue; 620 } 621 } else { 622 msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf); 623 pkt_len = msdu_len + 624 msdu_metadata.l3_hdr_pad + 625 soc->rx_pkt_tlv_size; 626 627 qdf_nbuf_set_pktlen(nbuf, pkt_len); 628 dp_rx_skip_tlvs(soc, nbuf, msdu_metadata.l3_hdr_pad); 629 } 630 631 /* 632 * process frame for mulitpass phrase processing 633 */ 634 if (qdf_unlikely(vdev->multipass_en)) { 635 if (dp_rx_multipass_process(peer, nbuf, tid) == false) { 636 DP_STATS_INC(peer, rx.multipass_rx_pkt_drop, 1); 637 qdf_nbuf_free(nbuf); 638 nbuf = next; 639 continue; 640 } 641 } 642 643 if (!dp_wds_rx_policy_check(rx_tlv_hdr, vdev, peer)) { 644 dp_rx_err("%pK: Policy Check Drop pkt", soc); 645 tid_stats->fail_cnt[POLICY_CHECK_DROP]++; 646 /* Drop & free packet */ 647 qdf_nbuf_free(nbuf); 648 /* Statistics */ 649 nbuf = next; 650 continue; 651 } 652 653 if (qdf_unlikely(peer && (peer->nawds_enabled) && 654 (qdf_nbuf_is_da_mcbc(nbuf)) && 655 (hal_rx_get_mpdu_mac_ad4_valid(soc->hal_soc, 656 rx_tlv_hdr) == 657 false))) { 658 tid_stats->fail_cnt[NAWDS_MCAST_DROP]++; 659 DP_STATS_INC(peer, rx.nawds_mcast_drop, 1); 660 qdf_nbuf_free(nbuf); 661 nbuf = next; 662 continue; 663 } 664 665 /* 666 * Drop non-EAPOL frames from unauthorized peer. 667 */ 668 if (qdf_likely(peer) && qdf_unlikely(!peer->authorize) && 669 !qdf_nbuf_is_raw_frame(nbuf)) { 670 bool is_eapol = qdf_nbuf_is_ipv4_eapol_pkt(nbuf) || 671 qdf_nbuf_is_ipv4_wapi_pkt(nbuf); 672 673 if (!is_eapol) { 674 DP_STATS_INC(soc, 675 rx.err.peer_unauth_rx_pkt_drop, 676 1); 677 qdf_nbuf_free(nbuf); 678 nbuf = next; 679 continue; 680 } 681 } 682 683 if (soc->process_rx_status) 684 dp_rx_cksum_offload(vdev->pdev, nbuf, rx_tlv_hdr); 685 686 /* Update the protocol tag in SKB based on CCE metadata */ 687 dp_rx_update_protocol_tag(soc, vdev, nbuf, rx_tlv_hdr, 688 reo_ring_num, false, true); 689 690 /* Update the flow tag in SKB based on FSE metadata */ 691 dp_rx_update_flow_tag(soc, vdev, nbuf, rx_tlv_hdr, true); 692 693 dp_rx_msdu_stats_update(soc, nbuf, rx_tlv_hdr, peer, 694 reo_ring_num, tid_stats); 695 696 if (qdf_unlikely(vdev->mesh_vdev)) { 697 if (dp_rx_filter_mesh_packets(vdev, nbuf, rx_tlv_hdr) 698 == QDF_STATUS_SUCCESS) { 699 dp_rx_info("%pK: mesh pkt filtered", soc); 700 tid_stats->fail_cnt[MESH_FILTER_DROP]++; 701 DP_STATS_INC(vdev->pdev, dropped.mesh_filter, 702 1); 703 704 qdf_nbuf_free(nbuf); 705 nbuf = next; 706 continue; 707 } 708 dp_rx_fill_mesh_stats(vdev, nbuf, rx_tlv_hdr, peer); 709 } 710 711 if (qdf_likely(vdev->rx_decap_type == 712 htt_cmn_pkt_type_ethernet) && 713 qdf_likely(!vdev->mesh_vdev)) { 714 /* WDS Destination Address Learning */ 715 dp_rx_da_learn(soc, rx_tlv_hdr, peer, nbuf); 716 717 /* Due to HW issue, sometimes we see that the sa_idx 718 * and da_idx are invalid with sa_valid and da_valid 719 * bits set 720 * 721 * in this case we also see that value of 722 * sa_sw_peer_id is set as 0 723 * 724 * Drop the packet if sa_idx and da_idx OOB or 725 * sa_sw_peerid is 0 726 */ 727 if (!is_sa_da_idx_valid(soc, rx_tlv_hdr, nbuf, 728 msdu_metadata)) { 729 qdf_nbuf_free(nbuf); 730 nbuf = next; 731 DP_STATS_INC(soc, rx.err.invalid_sa_da_idx, 1); 732 continue; 733 } 734 if (qdf_unlikely(dp_rx_mec_check_wrapper(soc, 735 peer, 736 rx_tlv_hdr, 737 nbuf))) { 738 /* this is a looped back MCBC pkt,drop it */ 739 DP_STATS_INC_PKT(peer, rx.mec_drop, 1, 740 QDF_NBUF_CB_RX_PKT_LEN(nbuf)); 741 qdf_nbuf_free(nbuf); 742 nbuf = next; 743 continue; 744 } 745 /* WDS Source Port Learning */ 746 if (qdf_likely(vdev->wds_enabled)) 747 dp_rx_wds_srcport_learn(soc, 748 rx_tlv_hdr, 749 peer, 750 nbuf, 751 msdu_metadata); 752 753 /* Intrabss-fwd */ 754 if (dp_rx_check_ap_bridge(vdev)) 755 if (DP_RX_INTRABSS_FWD(soc, peer, rx_tlv_hdr, 756 nbuf, msdu_metadata)) { 757 nbuf = next; 758 tid_stats->intrabss_cnt++; 759 continue; /* Get next desc */ 760 } 761 } 762 763 dp_rx_fill_gro_info(soc, rx_tlv_hdr, nbuf, &rx_ol_pkt_cnt); 764 765 dp_rx_update_stats(soc, nbuf); 766 DP_RX_LIST_APPEND(deliver_list_head, 767 deliver_list_tail, 768 nbuf); 769 DP_STATS_INC_PKT(peer, rx.to_stack, 1, 770 QDF_NBUF_CB_RX_PKT_LEN(nbuf)); 771 if (qdf_unlikely(peer->in_twt)) 772 DP_STATS_INC_PKT(peer, rx.to_stack_twt, 1, 773 QDF_NBUF_CB_RX_PKT_LEN(nbuf)); 774 775 tid_stats->delivered_to_stack++; 776 nbuf = next; 777 } 778 779 if (qdf_likely(deliver_list_head)) { 780 if (qdf_likely(peer)) { 781 dp_rx_deliver_to_pkt_capture(soc, vdev->pdev, peer_id, 782 pkt_capture_offload, 783 deliver_list_head); 784 if (!pkt_capture_offload) 785 dp_rx_deliver_to_stack(soc, vdev, peer, 786 deliver_list_head, 787 deliver_list_tail); 788 } else { 789 nbuf = deliver_list_head; 790 while (nbuf) { 791 next = nbuf->next; 792 nbuf->next = NULL; 793 dp_rx_deliver_to_stack_no_peer(soc, nbuf); 794 nbuf = next; 795 } 796 } 797 } 798 799 if (qdf_likely(peer)) 800 dp_peer_unref_delete(peer, DP_MOD_ID_RX); 801 802 if (dp_rx_enable_eol_data_check(soc) && rx_bufs_used) { 803 if (quota) { 804 num_pending = 805 dp_rx_srng_get_num_pending(hal_soc, 806 hal_ring_hdl, 807 num_entries, 808 &near_full); 809 if (num_pending) { 810 DP_STATS_INC(soc, rx.hp_oos2, 1); 811 812 if (!hif_exec_should_yield(scn, intr_id)) 813 goto more_data; 814 815 if (qdf_unlikely(near_full)) { 816 DP_STATS_INC(soc, rx.near_full, 1); 817 goto more_data; 818 } 819 } 820 } 821 822 if (vdev && vdev->osif_fisa_flush) 823 vdev->osif_fisa_flush(soc, reo_ring_num); 824 825 if (vdev && vdev->osif_gro_flush && rx_ol_pkt_cnt) { 826 vdev->osif_gro_flush(vdev->osif_vdev, 827 reo_ring_num); 828 } 829 } 830 831 /* Update histogram statistics by looping through pdev's */ 832 DP_RX_HIST_STATS_PER_PDEV(); 833 834 return rx_bufs_used; /* Assume no scale factor for now */ 835 } 836 837 QDF_STATUS dp_rx_desc_pool_init_li(struct dp_soc *soc, 838 struct rx_desc_pool *rx_desc_pool, 839 uint32_t pool_id) 840 { 841 return dp_rx_desc_pool_init_generic(soc, rx_desc_pool, pool_id); 842 843 } 844 845 void dp_rx_desc_pool_deinit_li(struct dp_soc *soc, 846 struct rx_desc_pool *rx_desc_pool, 847 uint32_t pool_id) 848 { 849 } 850 851 QDF_STATUS dp_wbm_get_rx_desc_from_hal_desc_li( 852 struct dp_soc *soc, 853 void *ring_desc, 854 struct dp_rx_desc **r_rx_desc) 855 { 856 struct hal_buf_info buf_info = {0}; 857 hal_soc_handle_t hal_soc = soc->hal_soc; 858 859 /* only cookie and rbm will be valid in buf_info */ 860 hal_rx_buf_cookie_rbm_get(hal_soc, (uint32_t *)ring_desc, 861 &buf_info); 862 863 if (qdf_unlikely(buf_info.rbm != 864 HAL_RX_BUF_RBM_SW3_BM(soc->wbm_sw0_bm_id))) { 865 /* TODO */ 866 /* Call appropriate handler */ 867 DP_STATS_INC(soc, rx.err.invalid_rbm, 1); 868 dp_rx_err("%pK: Invalid RBM %d", soc, buf_info.rbm); 869 return QDF_STATUS_E_INVAL; 870 } 871 872 *r_rx_desc = dp_rx_cookie_2_va_rxdma_buf(soc, buf_info.sw_cookie); 873 874 return QDF_STATUS_SUCCESS; 875 } 876