1 /* 2 * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for 5 * any purpose with or without fee is hereby granted, provided that the 6 * above copyright notice and this permission notice appear in all 7 * copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 16 * PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 #include "cdp_txrx_cmn_struct.h" 20 #include "hal_hw_headers.h" 21 #include "dp_types.h" 22 #include "dp_rx.h" 23 #include "dp_tx.h" 24 #include "dp_be_rx.h" 25 #include "dp_peer.h" 26 #include "hal_rx.h" 27 #include "hal_be_rx.h" 28 #include "hal_api.h" 29 #include "hal_be_api.h" 30 #include "qdf_nbuf.h" 31 #ifdef MESH_MODE_SUPPORT 32 #include "if_meta_hdr.h" 33 #endif 34 #include "dp_internal.h" 35 #include "dp_ipa.h" 36 #ifdef FEATURE_WDS 37 #include "dp_txrx_wds.h" 38 #endif 39 #include "dp_hist.h" 40 #include "dp_rx_buffer_pool.h" 41 42 /** 43 * dp_rx_process_be() - Brain of the Rx processing functionality 44 * Called from the bottom half (tasklet/NET_RX_SOFTIRQ) 45 * @int_ctx: per interrupt context 46 * @hal_ring: opaque pointer to the HAL Rx Ring, which will be serviced 47 * @reo_ring_num: ring number (0, 1, 2 or 3) of the reo ring. 48 * @quota: No. of units (packets) that can be serviced in one shot. 49 * 50 * This function implements the core of Rx functionality. This is 51 * expected to handle only non-error frames. 52 * 53 * Return: uint32_t: No. of elements processed 54 */ 55 uint32_t dp_rx_process_be(struct dp_intr *int_ctx, 56 hal_ring_handle_t hal_ring_hdl, uint8_t reo_ring_num, 57 uint32_t quota) 58 { 59 hal_ring_desc_t ring_desc; 60 hal_soc_handle_t hal_soc; 61 struct dp_rx_desc *rx_desc = NULL; 62 qdf_nbuf_t nbuf, next; 63 bool near_full; 64 union dp_rx_desc_list_elem_t *head[MAX_PDEV_CNT]; 65 union dp_rx_desc_list_elem_t *tail[MAX_PDEV_CNT]; 66 uint32_t num_pending; 67 uint32_t rx_bufs_used = 0, rx_buf_cookie; 68 uint16_t msdu_len = 0; 69 uint16_t peer_id; 70 uint8_t vdev_id; 71 struct dp_peer *peer; 72 struct dp_vdev *vdev; 73 uint32_t pkt_len = 0; 74 struct hal_rx_mpdu_desc_info mpdu_desc_info; 75 struct hal_rx_msdu_desc_info msdu_desc_info; 76 enum hal_reo_error_status error; 77 uint32_t peer_mdata; 78 uint8_t *rx_tlv_hdr; 79 uint32_t rx_bufs_reaped[MAX_PDEV_CNT]; 80 uint8_t mac_id = 0; 81 struct dp_pdev *rx_pdev; 82 struct dp_srng *dp_rxdma_srng; 83 struct rx_desc_pool *rx_desc_pool; 84 struct dp_soc *soc = int_ctx->soc; 85 uint8_t core_id = 0; 86 struct cdp_tid_rx_stats *tid_stats; 87 qdf_nbuf_t nbuf_head; 88 qdf_nbuf_t nbuf_tail; 89 qdf_nbuf_t deliver_list_head; 90 qdf_nbuf_t deliver_list_tail; 91 uint32_t num_rx_bufs_reaped = 0; 92 uint32_t intr_id; 93 struct hif_opaque_softc *scn; 94 int32_t tid = 0; 95 bool is_prev_msdu_last = true; 96 uint32_t num_entries_avail = 0; 97 uint32_t rx_ol_pkt_cnt = 0; 98 uint32_t num_entries = 0; 99 struct hal_rx_msdu_metadata msdu_metadata; 100 QDF_STATUS status; 101 qdf_nbuf_t ebuf_head; 102 qdf_nbuf_t ebuf_tail; 103 uint8_t pkt_capture_offload = 0; 104 struct dp_srng *rx_ring = &soc->reo_dest_ring[reo_ring_num]; 105 int max_reap_limit, ring_near_full; 106 107 DP_HIST_INIT(); 108 109 qdf_assert_always(soc && hal_ring_hdl); 110 hal_soc = soc->hal_soc; 111 qdf_assert_always(hal_soc); 112 113 scn = soc->hif_handle; 114 hif_pm_runtime_mark_dp_rx_busy(scn); 115 intr_id = int_ctx->dp_intr_id; 116 num_entries = hal_srng_get_num_entries(hal_soc, hal_ring_hdl); 117 118 more_data: 119 /* reset local variables here to be re-used in the function */ 120 nbuf_head = NULL; 121 nbuf_tail = NULL; 122 deliver_list_head = NULL; 123 deliver_list_tail = NULL; 124 peer = NULL; 125 vdev = NULL; 126 num_rx_bufs_reaped = 0; 127 ebuf_head = NULL; 128 ebuf_tail = NULL; 129 ring_near_full = 0; 130 max_reap_limit = dp_rx_get_loop_pkt_limit(soc); 131 132 qdf_mem_zero(rx_bufs_reaped, sizeof(rx_bufs_reaped)); 133 qdf_mem_zero(&mpdu_desc_info, sizeof(mpdu_desc_info)); 134 qdf_mem_zero(&msdu_desc_info, sizeof(msdu_desc_info)); 135 qdf_mem_zero(head, sizeof(head)); 136 qdf_mem_zero(tail, sizeof(tail)); 137 138 ring_near_full = _dp_srng_test_and_update_nf_params(soc, rx_ring, 139 &max_reap_limit); 140 141 if (qdf_unlikely(dp_rx_srng_access_start(int_ctx, soc, hal_ring_hdl))) { 142 /* 143 * Need API to convert from hal_ring pointer to 144 * Ring Type / Ring Id combo 145 */ 146 DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1); 147 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 148 FL("HAL RING Access Failed -- %pK"), hal_ring_hdl); 149 goto done; 150 } 151 152 /* 153 * start reaping the buffers from reo ring and queue 154 * them in per vdev queue. 155 * Process the received pkts in a different per vdev loop. 156 */ 157 while (qdf_likely(quota && 158 (ring_desc = hal_srng_dst_peek(hal_soc, 159 hal_ring_hdl)))) { 160 error = HAL_RX_ERROR_STATUS_GET(ring_desc); 161 162 if (qdf_unlikely(error == HAL_REO_ERROR_DETECTED)) { 163 dp_rx_err("%pK: HAL RING 0x%pK:error %d", 164 soc, hal_ring_hdl, error); 165 DP_STATS_INC(soc, rx.err.hal_reo_error[reo_ring_num], 166 1); 167 /* Don't know how to deal with this -- assert */ 168 qdf_assert(0); 169 } 170 171 dp_rx_ring_record_entry(soc, reo_ring_num, ring_desc); 172 rx_buf_cookie = HAL_RX_REO_BUF_COOKIE_GET(ring_desc); 173 status = dp_rx_cookie_check_and_invalidate(ring_desc); 174 if (qdf_unlikely(QDF_IS_STATUS_ERROR(status))) { 175 DP_STATS_INC(soc, rx.err.stale_cookie, 1); 176 break; 177 } 178 179 rx_desc = (struct dp_rx_desc *) 180 hal_rx_get_reo_desc_va(ring_desc); 181 dp_rx_desc_sw_cc_check(soc, rx_buf_cookie, &rx_desc); 182 183 status = dp_rx_desc_sanity(soc, hal_soc, hal_ring_hdl, 184 ring_desc, rx_desc); 185 if (QDF_IS_STATUS_ERROR(status)) { 186 if (qdf_unlikely(rx_desc && rx_desc->nbuf)) { 187 qdf_assert_always(!rx_desc->unmapped); 188 dp_ipa_reo_ctx_buf_mapping_lock( 189 soc, 190 reo_ring_num); 191 dp_ipa_handle_rx_buf_smmu_mapping( 192 soc, 193 rx_desc->nbuf, 194 RX_DATA_BUFFER_SIZE, 195 false); 196 qdf_nbuf_unmap_nbytes_single( 197 soc->osdev, 198 rx_desc->nbuf, 199 QDF_DMA_FROM_DEVICE, 200 RX_DATA_BUFFER_SIZE); 201 rx_desc->unmapped = 1; 202 dp_ipa_reo_ctx_buf_mapping_unlock( 203 soc, 204 reo_ring_num); 205 dp_rx_buffer_pool_nbuf_free(soc, rx_desc->nbuf, 206 rx_desc->pool_id); 207 dp_rx_add_to_free_desc_list( 208 &head[rx_desc->pool_id], 209 &tail[rx_desc->pool_id], 210 rx_desc); 211 } 212 hal_srng_dst_get_next(hal_soc, hal_ring_hdl); 213 continue; 214 } 215 216 /* 217 * this is a unlikely scenario where the host is reaping 218 * a descriptor which it already reaped just a while ago 219 * but is yet to replenish it back to HW. 220 * In this case host will dump the last 128 descriptors 221 * including the software descriptor rx_desc and assert. 222 */ 223 224 if (qdf_unlikely(!rx_desc->in_use)) { 225 DP_STATS_INC(soc, rx.err.hal_reo_dest_dup, 1); 226 dp_info_rl("Reaping rx_desc not in use!"); 227 dp_rx_dump_info_and_assert(soc, hal_ring_hdl, 228 ring_desc, rx_desc); 229 /* ignore duplicate RX desc and continue to process */ 230 /* Pop out the descriptor */ 231 hal_srng_dst_get_next(hal_soc, hal_ring_hdl); 232 continue; 233 } 234 235 status = dp_rx_desc_nbuf_sanity_check(soc, ring_desc, rx_desc); 236 if (qdf_unlikely(QDF_IS_STATUS_ERROR(status))) { 237 DP_STATS_INC(soc, rx.err.nbuf_sanity_fail, 1); 238 dp_info_rl("Nbuf sanity check failure!"); 239 dp_rx_dump_info_and_assert(soc, hal_ring_hdl, 240 ring_desc, rx_desc); 241 rx_desc->in_err_state = 1; 242 hal_srng_dst_get_next(hal_soc, hal_ring_hdl); 243 continue; 244 } 245 246 if (qdf_unlikely(!dp_rx_desc_check_magic(rx_desc))) { 247 dp_err("Invalid rx_desc cookie=%d", rx_buf_cookie); 248 DP_STATS_INC(soc, rx.err.rx_desc_invalid_magic, 1); 249 dp_rx_dump_info_and_assert(soc, hal_ring_hdl, 250 ring_desc, rx_desc); 251 } 252 253 /* Get MPDU DESC info */ 254 hal_rx_mpdu_desc_info_get_be(ring_desc, &mpdu_desc_info); 255 256 /* Get MSDU DESC info */ 257 hal_rx_msdu_desc_info_get_be(ring_desc, &msdu_desc_info); 258 259 if (qdf_unlikely(msdu_desc_info.msdu_flags & 260 HAL_MSDU_F_MSDU_CONTINUATION)) { 261 /* previous msdu has end bit set, so current one is 262 * the new MPDU 263 */ 264 if (is_prev_msdu_last) { 265 /* Get number of entries available in HW ring */ 266 num_entries_avail = 267 hal_srng_dst_num_valid(hal_soc, 268 hal_ring_hdl, 1); 269 270 /* For new MPDU check if we can read complete 271 * MPDU by comparing the number of buffers 272 * available and number of buffers needed to 273 * reap this MPDU 274 */ 275 if ((msdu_desc_info.msdu_len / 276 (RX_DATA_BUFFER_SIZE - 277 soc->rx_pkt_tlv_size) + 1) > 278 num_entries_avail) { 279 DP_STATS_INC(soc, 280 rx.msdu_scatter_wait_break, 281 1); 282 dp_rx_cookie_reset_invalid_bit( 283 ring_desc); 284 break; 285 } 286 is_prev_msdu_last = false; 287 } 288 } 289 core_id = smp_processor_id(); 290 DP_STATS_INC(soc, rx.ring_packets[core_id][reo_ring_num], 1); 291 292 if (mpdu_desc_info.mpdu_flags & HAL_MPDU_F_RETRY_BIT) 293 qdf_nbuf_set_rx_retry_flag(rx_desc->nbuf, 1); 294 295 if (qdf_unlikely(mpdu_desc_info.mpdu_flags & 296 HAL_MPDU_F_RAW_AMPDU)) 297 qdf_nbuf_set_raw_frame(rx_desc->nbuf, 1); 298 299 if (!is_prev_msdu_last && 300 msdu_desc_info.msdu_flags & HAL_MSDU_F_LAST_MSDU_IN_MPDU) 301 is_prev_msdu_last = true; 302 303 /* Pop out the descriptor*/ 304 hal_srng_dst_get_next(hal_soc, hal_ring_hdl); 305 306 rx_bufs_reaped[rx_desc->pool_id]++; 307 peer_mdata = mpdu_desc_info.peer_meta_data; 308 QDF_NBUF_CB_RX_PEER_ID(rx_desc->nbuf) = 309 DP_PEER_METADATA_PEER_ID_GET(peer_mdata); 310 QDF_NBUF_CB_RX_VDEV_ID(rx_desc->nbuf) = 311 DP_PEER_METADATA_VDEV_ID_GET(peer_mdata); 312 313 /* to indicate whether this msdu is rx offload */ 314 pkt_capture_offload = 315 DP_PEER_METADATA_OFFLOAD_GET(peer_mdata); 316 317 /* 318 * save msdu flags first, last and continuation msdu in 319 * nbuf->cb, also save mcbc, is_da_valid, is_sa_valid and 320 * length to nbuf->cb. This ensures the info required for 321 * per pkt processing is always in the same cache line. 322 * This helps in improving throughput for smaller pkt 323 * sizes. 324 */ 325 if (msdu_desc_info.msdu_flags & HAL_MSDU_F_FIRST_MSDU_IN_MPDU) 326 qdf_nbuf_set_rx_chfrag_start(rx_desc->nbuf, 1); 327 328 if (msdu_desc_info.msdu_flags & HAL_MSDU_F_MSDU_CONTINUATION) 329 qdf_nbuf_set_rx_chfrag_cont(rx_desc->nbuf, 1); 330 331 if (msdu_desc_info.msdu_flags & HAL_MSDU_F_LAST_MSDU_IN_MPDU) 332 qdf_nbuf_set_rx_chfrag_end(rx_desc->nbuf, 1); 333 334 if (msdu_desc_info.msdu_flags & HAL_MSDU_F_DA_IS_MCBC) 335 qdf_nbuf_set_da_mcbc(rx_desc->nbuf, 1); 336 337 if (msdu_desc_info.msdu_flags & HAL_MSDU_F_DA_IS_VALID) 338 qdf_nbuf_set_da_valid(rx_desc->nbuf, 1); 339 340 if (msdu_desc_info.msdu_flags & HAL_MSDU_F_SA_IS_VALID) 341 qdf_nbuf_set_sa_valid(rx_desc->nbuf, 1); 342 343 if (msdu_desc_info.msdu_flags & HAL_MSDU_F_INTRA_BSS) 344 qdf_nbuf_set_intra_bss(rx_desc->nbuf, 1); 345 346 qdf_nbuf_set_tid_val(rx_desc->nbuf, 347 HAL_RX_REO_QUEUE_NUMBER_GET(ring_desc)); 348 #ifdef CONFIG_LITHIUM 349 qdf_nbuf_set_rx_reo_dest_ind( 350 rx_desc->nbuf, 351 HAL_RX_REO_MSDU_REO_DST_IND_GET(ring_desc)); 352 #endif 353 354 QDF_NBUF_CB_RX_PKT_LEN(rx_desc->nbuf) = msdu_desc_info.msdu_len; 355 356 QDF_NBUF_CB_RX_CTX_ID(rx_desc->nbuf) = reo_ring_num; 357 358 /* 359 * move unmap after scattered msdu waiting break logic 360 * in case double skb unmap happened. 361 */ 362 rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id]; 363 dp_ipa_reo_ctx_buf_mapping_lock(soc, reo_ring_num); 364 dp_ipa_handle_rx_buf_smmu_mapping(soc, rx_desc->nbuf, 365 rx_desc_pool->buf_size, 366 false); 367 qdf_nbuf_unmap_nbytes_single(soc->osdev, rx_desc->nbuf, 368 QDF_DMA_FROM_DEVICE, 369 rx_desc_pool->buf_size); 370 rx_desc->unmapped = 1; 371 dp_ipa_reo_ctx_buf_mapping_unlock(soc, reo_ring_num); 372 DP_RX_PROCESS_NBUF(soc, nbuf_head, nbuf_tail, ebuf_head, 373 ebuf_tail, rx_desc); 374 /* 375 * if continuation bit is set then we have MSDU spread 376 * across multiple buffers, let us not decrement quota 377 * till we reap all buffers of that MSDU. 378 */ 379 if (qdf_likely(!qdf_nbuf_is_rx_chfrag_cont(rx_desc->nbuf))) 380 quota -= 1; 381 382 dp_rx_add_to_free_desc_list(&head[rx_desc->pool_id], 383 &tail[rx_desc->pool_id], rx_desc); 384 num_rx_bufs_reaped++; 385 /* 386 * only if complete msdu is received for scatter case, 387 * then allow break. 388 */ 389 if (is_prev_msdu_last && 390 dp_rx_reap_loop_pkt_limit_hit(soc, num_rx_bufs_reaped, 391 max_reap_limit)) 392 break; 393 } 394 done: 395 dp_rx_srng_access_end(int_ctx, soc, hal_ring_hdl); 396 397 for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) { 398 /* 399 * continue with next mac_id if no pkts were reaped 400 * from that pool 401 */ 402 if (!rx_bufs_reaped[mac_id]) 403 continue; 404 405 dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_id]; 406 407 rx_desc_pool = &soc->rx_desc_buf[mac_id]; 408 409 dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng, 410 rx_desc_pool, rx_bufs_reaped[mac_id], 411 &head[mac_id], &tail[mac_id]); 412 } 413 414 dp_verbose_debug("replenished %u\n", rx_bufs_reaped[0]); 415 /* Peer can be NULL is case of LFR */ 416 if (qdf_likely(peer)) 417 vdev = NULL; 418 419 /* 420 * BIG loop where each nbuf is dequeued from global queue, 421 * processed and queued back on a per vdev basis. These nbufs 422 * are sent to stack as and when we run out of nbufs 423 * or a new nbuf dequeued from global queue has a different 424 * vdev when compared to previous nbuf. 425 */ 426 nbuf = nbuf_head; 427 while (nbuf) { 428 next = nbuf->next; 429 if (qdf_unlikely(dp_rx_is_raw_frame_dropped(nbuf))) { 430 nbuf = next; 431 DP_STATS_INC(soc, rx.err.raw_frm_drop, 1); 432 continue; 433 } 434 435 rx_tlv_hdr = qdf_nbuf_data(nbuf); 436 vdev_id = QDF_NBUF_CB_RX_VDEV_ID(nbuf); 437 peer_id = QDF_NBUF_CB_RX_PEER_ID(nbuf); 438 439 if (dp_rx_is_list_ready(deliver_list_head, vdev, peer, 440 peer_id, vdev_id)) { 441 dp_rx_deliver_to_stack(soc, vdev, peer, 442 deliver_list_head, 443 deliver_list_tail); 444 deliver_list_head = NULL; 445 deliver_list_tail = NULL; 446 } 447 448 /* Get TID from struct cb->tid_val, save to tid */ 449 if (qdf_nbuf_is_rx_chfrag_start(nbuf)) 450 tid = qdf_nbuf_get_tid_val(nbuf); 451 452 if (qdf_unlikely(!peer)) { 453 peer = dp_peer_get_ref_by_id(soc, peer_id, 454 DP_MOD_ID_RX); 455 } else if (peer && peer->peer_id != peer_id) { 456 dp_peer_unref_delete(peer, DP_MOD_ID_RX); 457 peer = dp_peer_get_ref_by_id(soc, peer_id, 458 DP_MOD_ID_RX); 459 } 460 461 if (peer) { 462 QDF_NBUF_CB_DP_TRACE_PRINT(nbuf) = false; 463 qdf_dp_trace_set_track(nbuf, QDF_RX); 464 QDF_NBUF_CB_RX_DP_TRACE(nbuf) = 1; 465 QDF_NBUF_CB_RX_PACKET_TRACK(nbuf) = 466 QDF_NBUF_RX_PKT_DATA_TRACK; 467 } 468 469 rx_bufs_used++; 470 471 if (qdf_likely(peer)) { 472 vdev = peer->vdev; 473 } else { 474 nbuf->next = NULL; 475 dp_rx_deliver_to_pkt_capture_no_peer( 476 soc, nbuf, pkt_capture_offload); 477 478 if (!pkt_capture_offload) 479 dp_rx_deliver_to_stack_no_peer(soc, nbuf); 480 nbuf = next; 481 continue; 482 } 483 484 if (qdf_unlikely(!vdev)) { 485 qdf_nbuf_free(nbuf); 486 nbuf = next; 487 DP_STATS_INC(soc, rx.err.invalid_vdev, 1); 488 continue; 489 } 490 491 /* when hlos tid override is enabled, save tid in 492 * skb->priority 493 */ 494 if (qdf_unlikely(vdev->skip_sw_tid_classification & 495 DP_TXRX_HLOS_TID_OVERRIDE_ENABLED)) 496 qdf_nbuf_set_priority(nbuf, tid); 497 498 rx_pdev = vdev->pdev; 499 DP_RX_TID_SAVE(nbuf, tid); 500 if (qdf_unlikely(rx_pdev->delay_stats_flag) || 501 qdf_unlikely(wlan_cfg_is_peer_ext_stats_enabled( 502 soc->wlan_cfg_ctx))) 503 qdf_nbuf_set_timestamp(nbuf); 504 505 tid_stats = 506 &rx_pdev->stats.tid_stats.tid_rx_stats[reo_ring_num][tid]; 507 508 /* 509 * Check if DMA completed -- msdu_done is the last bit 510 * to be written 511 */ 512 if (qdf_unlikely(!qdf_nbuf_is_rx_chfrag_cont(nbuf) && 513 !hal_rx_attn_msdu_done_get(hal_soc, 514 rx_tlv_hdr))) { 515 dp_err("MSDU DONE failure"); 516 DP_STATS_INC(soc, rx.err.msdu_done_fail, 1); 517 hal_rx_dump_pkt_tlvs(hal_soc, rx_tlv_hdr, 518 QDF_TRACE_LEVEL_INFO); 519 tid_stats->fail_cnt[MSDU_DONE_FAILURE]++; 520 qdf_nbuf_free(nbuf); 521 qdf_assert(0); 522 nbuf = next; 523 continue; 524 } 525 526 DP_HIST_PACKET_COUNT_INC(vdev->pdev->pdev_id); 527 /* 528 * First IF condition: 529 * 802.11 Fragmented pkts are reinjected to REO 530 * HW block as SG pkts and for these pkts we only 531 * need to pull the RX TLVS header length. 532 * Second IF condition: 533 * The below condition happens when an MSDU is spread 534 * across multiple buffers. This can happen in two cases 535 * 1. The nbuf size is smaller then the received msdu. 536 * ex: we have set the nbuf size to 2048 during 537 * nbuf_alloc. but we received an msdu which is 538 * 2304 bytes in size then this msdu is spread 539 * across 2 nbufs. 540 * 541 * 2. AMSDUs when RAW mode is enabled. 542 * ex: 1st MSDU is in 1st nbuf and 2nd MSDU is spread 543 * across 1st nbuf and 2nd nbuf and last MSDU is 544 * spread across 2nd nbuf and 3rd nbuf. 545 * 546 * for these scenarios let us create a skb frag_list and 547 * append these buffers till the last MSDU of the AMSDU 548 * Third condition: 549 * This is the most likely case, we receive 802.3 pkts 550 * decapsulated by HW, here we need to set the pkt length. 551 */ 552 hal_rx_msdu_metadata_get(hal_soc, rx_tlv_hdr, &msdu_metadata); 553 if (qdf_unlikely(qdf_nbuf_is_frag(nbuf))) { 554 bool is_mcbc, is_sa_vld, is_da_vld; 555 556 is_mcbc = hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc, 557 rx_tlv_hdr); 558 is_sa_vld = 559 hal_rx_msdu_end_sa_is_valid_get(soc->hal_soc, 560 rx_tlv_hdr); 561 is_da_vld = 562 hal_rx_msdu_end_da_is_valid_get(soc->hal_soc, 563 rx_tlv_hdr); 564 565 qdf_nbuf_set_da_mcbc(nbuf, is_mcbc); 566 qdf_nbuf_set_da_valid(nbuf, is_da_vld); 567 qdf_nbuf_set_sa_valid(nbuf, is_sa_vld); 568 569 qdf_nbuf_pull_head(nbuf, soc->rx_pkt_tlv_size); 570 } else if (qdf_nbuf_is_rx_chfrag_cont(nbuf)) { 571 msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf); 572 nbuf = dp_rx_sg_create(soc, nbuf); 573 next = nbuf->next; 574 575 if (qdf_nbuf_is_raw_frame(nbuf)) { 576 DP_STATS_INC(vdev->pdev, rx_raw_pkts, 1); 577 DP_STATS_INC_PKT(peer, rx.raw, 1, msdu_len); 578 } else { 579 qdf_nbuf_free(nbuf); 580 DP_STATS_INC(soc, rx.err.scatter_msdu, 1); 581 dp_info_rl("scatter msdu len %d, dropped", 582 msdu_len); 583 nbuf = next; 584 continue; 585 } 586 } else { 587 msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf); 588 pkt_len = msdu_len + 589 msdu_metadata.l3_hdr_pad + 590 soc->rx_pkt_tlv_size; 591 592 qdf_nbuf_set_pktlen(nbuf, pkt_len); 593 dp_rx_skip_tlvs(soc, nbuf, msdu_metadata.l3_hdr_pad); 594 } 595 596 /* 597 * process frame for mulitpass phrase processing 598 */ 599 if (qdf_unlikely(vdev->multipass_en)) { 600 if (dp_rx_multipass_process(peer, nbuf, tid) == false) { 601 DP_STATS_INC(peer, rx.multipass_rx_pkt_drop, 1); 602 qdf_nbuf_free(nbuf); 603 nbuf = next; 604 continue; 605 } 606 } 607 608 if (!dp_wds_rx_policy_check(rx_tlv_hdr, vdev, peer)) { 609 dp_rx_err("%pK: Policy Check Drop pkt", soc); 610 tid_stats->fail_cnt[POLICY_CHECK_DROP]++; 611 /* Drop & free packet */ 612 qdf_nbuf_free(nbuf); 613 /* Statistics */ 614 nbuf = next; 615 continue; 616 } 617 618 if (qdf_unlikely(peer && (peer->nawds_enabled) && 619 (qdf_nbuf_is_da_mcbc(nbuf)) && 620 (hal_rx_get_mpdu_mac_ad4_valid(soc->hal_soc, 621 rx_tlv_hdr) == 622 false))) { 623 tid_stats->fail_cnt[NAWDS_MCAST_DROP]++; 624 DP_STATS_INC(peer, rx.nawds_mcast_drop, 1); 625 qdf_nbuf_free(nbuf); 626 nbuf = next; 627 continue; 628 } 629 630 /* 631 * Drop non-EAPOL frames from unauthorized peer. 632 */ 633 if (qdf_likely(peer) && qdf_unlikely(!peer->authorize) && 634 !qdf_nbuf_is_raw_frame(nbuf)) { 635 bool is_eapol = qdf_nbuf_is_ipv4_eapol_pkt(nbuf) || 636 qdf_nbuf_is_ipv4_wapi_pkt(nbuf); 637 638 if (!is_eapol) { 639 DP_STATS_INC(soc, 640 rx.err.peer_unauth_rx_pkt_drop, 641 1); 642 qdf_nbuf_free(nbuf); 643 nbuf = next; 644 continue; 645 } 646 } 647 648 if (soc->process_rx_status) 649 dp_rx_cksum_offload(vdev->pdev, nbuf, rx_tlv_hdr); 650 651 /* Update the protocol tag in SKB based on CCE metadata */ 652 dp_rx_update_protocol_tag(soc, vdev, nbuf, rx_tlv_hdr, 653 reo_ring_num, false, true); 654 655 /* Update the flow tag in SKB based on FSE metadata */ 656 dp_rx_update_flow_tag(soc, vdev, nbuf, rx_tlv_hdr, true); 657 658 dp_rx_msdu_stats_update(soc, nbuf, rx_tlv_hdr, peer, 659 reo_ring_num, tid_stats); 660 661 if (qdf_unlikely(vdev->mesh_vdev)) { 662 if (dp_rx_filter_mesh_packets(vdev, nbuf, rx_tlv_hdr) 663 == QDF_STATUS_SUCCESS) { 664 dp_rx_info("%pK: mesh pkt filtered", soc); 665 tid_stats->fail_cnt[MESH_FILTER_DROP]++; 666 DP_STATS_INC(vdev->pdev, dropped.mesh_filter, 667 1); 668 669 qdf_nbuf_free(nbuf); 670 nbuf = next; 671 continue; 672 } 673 dp_rx_fill_mesh_stats(vdev, nbuf, rx_tlv_hdr, peer); 674 } 675 676 if (qdf_likely(vdev->rx_decap_type == 677 htt_cmn_pkt_type_ethernet) && 678 qdf_likely(!vdev->mesh_vdev)) { 679 /* WDS Destination Address Learning */ 680 dp_rx_da_learn(soc, rx_tlv_hdr, peer, nbuf); 681 682 /* WDS Source Port Learning */ 683 if (qdf_likely(vdev->wds_enabled)) 684 dp_rx_wds_srcport_learn(soc, 685 rx_tlv_hdr, 686 peer, 687 nbuf, 688 msdu_metadata); 689 690 /* Intrabss-fwd */ 691 if (dp_rx_check_ap_bridge(vdev)) 692 if (dp_rx_intrabss_fwd_be(soc, peer, rx_tlv_hdr, 693 nbuf, 694 msdu_metadata)) { 695 nbuf = next; 696 tid_stats->intrabss_cnt++; 697 continue; /* Get next desc */ 698 } 699 } 700 701 dp_rx_fill_gro_info(soc, rx_tlv_hdr, nbuf, &rx_ol_pkt_cnt); 702 703 dp_rx_update_stats(soc, nbuf); 704 DP_RX_LIST_APPEND(deliver_list_head, 705 deliver_list_tail, 706 nbuf); 707 DP_STATS_INC_PKT(peer, rx.to_stack, 1, 708 QDF_NBUF_CB_RX_PKT_LEN(nbuf)); 709 if (qdf_unlikely(peer->in_twt)) 710 DP_STATS_INC_PKT(peer, rx.to_stack_twt, 1, 711 QDF_NBUF_CB_RX_PKT_LEN(nbuf)); 712 713 tid_stats->delivered_to_stack++; 714 nbuf = next; 715 } 716 717 if (qdf_likely(deliver_list_head)) { 718 if (qdf_likely(peer)) { 719 dp_rx_deliver_to_pkt_capture(soc, vdev->pdev, peer_id, 720 pkt_capture_offload, 721 deliver_list_head); 722 if (!pkt_capture_offload) 723 dp_rx_deliver_to_stack(soc, vdev, peer, 724 deliver_list_head, 725 deliver_list_tail); 726 } else { 727 nbuf = deliver_list_head; 728 while (nbuf) { 729 next = nbuf->next; 730 nbuf->next = NULL; 731 dp_rx_deliver_to_stack_no_peer(soc, nbuf); 732 nbuf = next; 733 } 734 } 735 } 736 737 if (qdf_likely(peer)) 738 dp_peer_unref_delete(peer, DP_MOD_ID_RX); 739 740 /* 741 * If we are processing in near-full condition, there are 3 scenario 742 * 1) Ring entries has reached critical state 743 * 2) Ring entries are still near high threshold 744 * 3) Ring entries are below the safe level 745 * 746 * One more loop will move the state to normal processing and yield 747 */ 748 if (ring_near_full) 749 goto more_data; 750 751 if (dp_rx_enable_eol_data_check(soc) && rx_bufs_used) { 752 if (quota) { 753 num_pending = 754 dp_rx_srng_get_num_pending(hal_soc, 755 hal_ring_hdl, 756 num_entries, 757 &near_full); 758 if (num_pending) { 759 DP_STATS_INC(soc, rx.hp_oos2, 1); 760 761 if (!hif_exec_should_yield(scn, intr_id)) 762 goto more_data; 763 764 if (qdf_unlikely(near_full)) { 765 DP_STATS_INC(soc, rx.near_full, 1); 766 goto more_data; 767 } 768 } 769 } 770 771 if (vdev && vdev->osif_fisa_flush) 772 vdev->osif_fisa_flush(soc, reo_ring_num); 773 774 if (vdev && vdev->osif_gro_flush && rx_ol_pkt_cnt) { 775 vdev->osif_gro_flush(vdev->osif_vdev, 776 reo_ring_num); 777 } 778 } 779 780 /* Update histogram statistics by looping through pdev's */ 781 DP_RX_HIST_STATS_PER_PDEV(); 782 783 return rx_bufs_used; /* Assume no scale factor for now */ 784 } 785 786 #ifdef RX_DESC_MULTI_PAGE_ALLOC 787 /** 788 * dp_rx_desc_pool_init_be_cc() - initial RX desc pool for cookie conversion 789 * @soc: Handle to DP Soc structure 790 * @rx_desc_pool: Rx descriptor pool handler 791 * @pool_id: Rx descriptor pool ID 792 * 793 * Return: QDF_STATUS_SUCCESS - succeeded, others - failed 794 */ 795 static QDF_STATUS 796 dp_rx_desc_pool_init_be_cc(struct dp_soc *soc, 797 struct rx_desc_pool *rx_desc_pool, 798 uint32_t pool_id) 799 { 800 struct dp_soc_be *be_soc; 801 union dp_rx_desc_list_elem_t *rx_desc_elem; 802 struct dp_spt_page_desc *page_desc; 803 struct dp_spt_page_desc_list *page_desc_list; 804 805 be_soc = dp_get_be_soc_from_dp_soc(soc); 806 page_desc_list = &be_soc->rx_spt_page_desc[pool_id]; 807 808 /* allocate SPT pages from page desc pool */ 809 page_desc_list->num_spt_pages = 810 dp_cc_spt_page_desc_alloc(be_soc, 811 &page_desc_list->spt_page_list_head, 812 &page_desc_list->spt_page_list_tail, 813 rx_desc_pool->pool_size); 814 815 if (!page_desc_list->num_spt_pages) { 816 dp_err("fail to allocate cookie conversion spt pages"); 817 return QDF_STATUS_E_FAILURE; 818 } 819 820 /* put each RX Desc VA to SPT pages and get corresponding ID */ 821 page_desc = page_desc_list->spt_page_list_head; 822 rx_desc_elem = rx_desc_pool->freelist; 823 while (rx_desc_elem) { 824 DP_CC_SPT_PAGE_UPDATE_VA(page_desc->page_v_addr, 825 page_desc->avail_entry_index, 826 &rx_desc_elem->rx_desc); 827 828 rx_desc_elem->rx_desc.cookie = 829 dp_cc_desc_id_generate(page_desc->ppt_index, 830 page_desc->avail_entry_index); 831 rx_desc_elem->rx_desc.pool_id = pool_id; 832 rx_desc_elem->rx_desc.in_use = 0; 833 rx_desc_elem = rx_desc_elem->next; 834 835 page_desc->avail_entry_index++; 836 if (page_desc->avail_entry_index >= 837 DP_CC_SPT_PAGE_MAX_ENTRIES) 838 page_desc = page_desc->next; 839 } 840 841 return QDF_STATUS_SUCCESS; 842 } 843 #else 844 static QDF_STATUS 845 dp_rx_desc_pool_init_be_cc(struct dp_soc *soc, 846 struct rx_desc_pool *rx_desc_pool, 847 uint32_t pool_id) 848 { 849 struct dp_soc_be *be_soc; 850 struct dp_spt_page_desc *page_desc; 851 struct dp_spt_page_desc_list *page_desc_list; 852 int i; 853 854 be_soc = dp_get_be_soc_from_dp_soc(soc); 855 page_desc_list = &be_soc->rx_spt_page_desc[pool_id]; 856 857 /* allocate SPT pages from page desc pool */ 858 page_desc_list->num_spt_pages = 859 dp_cc_spt_page_desc_alloc( 860 be_soc, 861 &page_desc_list->spt_page_list_head, 862 &page_desc_list->spt_page_list_tail, 863 rx_desc_pool->pool_size); 864 865 if (!page_desc_list->num_spt_pages) { 866 dp_err("fail to allocate cookie conversion spt pages"); 867 return QDF_STATUS_E_FAILURE; 868 } 869 870 /* put each RX Desc VA to SPT pages and get corresponding ID */ 871 page_desc = page_desc_list->spt_page_list_head; 872 for (i = 0; i <= rx_desc_pool->pool_size - 1; i++) { 873 if (i == rx_desc_pool->pool_size - 1) 874 rx_desc_pool->array[i].next = NULL; 875 else 876 rx_desc_pool->array[i].next = 877 &rx_desc_pool->array[i + 1]; 878 879 DP_CC_SPT_PAGE_UPDATE_VA(page_desc->page_v_addr, 880 page_desc->avail_entry_index, 881 &rx_desc_pool->array[i].rx_desc); 882 883 rx_desc_pool->array[i].rx_desc.cookie = 884 dp_cc_desc_id_generate(page_desc->ppt_index, 885 page_desc->avail_entry_index); 886 887 rx_desc_pool->array[i].rx_desc.pool_id = pool_id; 888 rx_desc_pool->array[i].rx_desc.in_use = 0; 889 890 page_desc->avail_entry_index++; 891 if (page_desc->avail_entry_index >= 892 DP_CC_SPT_PAGE_MAX_ENTRIES) 893 page_desc = page_desc->next; 894 } 895 896 return QDF_STATUS_SUCCESS; 897 } 898 #endif 899 900 static void 901 dp_rx_desc_pool_deinit_be_cc(struct dp_soc *soc, 902 struct rx_desc_pool *rx_desc_pool, 903 uint32_t pool_id) 904 { 905 struct dp_soc_be *be_soc; 906 struct dp_spt_page_desc *page_desc; 907 struct dp_spt_page_desc_list *page_desc_list; 908 909 be_soc = dp_get_be_soc_from_dp_soc(soc); 910 page_desc_list = &be_soc->rx_spt_page_desc[pool_id]; 911 912 if (!page_desc_list->num_spt_pages) { 913 dp_warn("page_desc_list is empty for pool_id %d", pool_id); 914 return; 915 } 916 917 /* cleanup for each page */ 918 page_desc = page_desc_list->spt_page_list_head; 919 while (page_desc) { 920 page_desc->avail_entry_index = 0; 921 qdf_mem_zero(page_desc->page_v_addr, qdf_page_size); 922 page_desc = page_desc->next; 923 } 924 925 /* free pages desc back to pool */ 926 dp_cc_spt_page_desc_free(be_soc, 927 &page_desc_list->spt_page_list_head, 928 &page_desc_list->spt_page_list_tail, 929 page_desc_list->num_spt_pages); 930 page_desc_list->num_spt_pages = 0; 931 } 932 933 QDF_STATUS dp_rx_desc_pool_init_be(struct dp_soc *soc, 934 struct rx_desc_pool *rx_desc_pool, 935 uint32_t pool_id) 936 { 937 QDF_STATUS status = QDF_STATUS_SUCCESS; 938 939 /* Only regular RX buffer desc pool use HW cookie conversion */ 940 if (rx_desc_pool->desc_type == DP_RX_DESC_BUF_TYPE) { 941 dp_info("rx_desc_buf pool init"); 942 status = dp_rx_desc_pool_init_be_cc(soc, 943 rx_desc_pool, 944 pool_id); 945 } else { 946 dp_info("non_rx_desc_buf_pool init"); 947 status = dp_rx_desc_pool_init_generic(soc, rx_desc_pool, pool_id); 948 } 949 950 return status; 951 } 952 953 void dp_rx_desc_pool_deinit_be(struct dp_soc *soc, 954 struct rx_desc_pool *rx_desc_pool, 955 uint32_t pool_id) 956 { 957 if (rx_desc_pool->desc_type == DP_RX_DESC_BUF_TYPE) 958 dp_rx_desc_pool_deinit_be_cc(soc, rx_desc_pool, pool_id); 959 } 960 961 #ifdef DP_FEATURE_HW_COOKIE_CONVERSION 962 #ifdef DP_HW_COOKIE_CONVERT_EXCEPTION 963 QDF_STATUS dp_wbm_get_rx_desc_from_hal_desc_be(struct dp_soc *soc, 964 void *ring_desc, 965 struct dp_rx_desc **r_rx_desc) 966 { 967 if (hal_rx_wbm_get_cookie_convert_done(ring_desc)) { 968 /* HW cookie conversion done */ 969 *r_rx_desc = (struct dp_rx_desc *) 970 hal_rx_wbm_get_desc_va(ring_desc); 971 } else { 972 /* SW do cookie conversion */ 973 uint32_t cookie = HAL_RX_BUF_COOKIE_GET(ring_desc); 974 975 *r_rx_desc = (struct dp_rx_desc *) 976 dp_cc_desc_find(soc, cookie); 977 } 978 979 return QDF_STATUS_SUCCESS; 980 } 981 #else 982 QDF_STATUS dp_wbm_get_rx_desc_from_hal_desc_be(struct dp_soc *soc, 983 void *ring_desc, 984 struct dp_rx_desc **r_rx_desc) 985 { 986 *r_rx_desc = (struct dp_rx_desc *) 987 hal_rx_wbm_get_desc_va(ring_desc); 988 989 return QDF_STATUS_SUCCESS; 990 } 991 #endif /* DP_HW_COOKIE_CONVERT_EXCEPTION */ 992 #else 993 QDF_STATUS dp_wbm_get_rx_desc_from_hal_desc_be(struct dp_soc *soc, 994 void *ring_desc, 995 struct dp_rx_desc **r_rx_desc) 996 { 997 /* SW do cookie conversion */ 998 uint32_t cookie = HAL_RX_BUF_COOKIE_GET(ring_desc); 999 1000 *r_rx_desc = (struct dp_rx_desc *) 1001 dp_cc_desc_find(soc, cookie); 1002 1003 return QDF_STATUS_SUCCESS; 1004 } 1005 #endif /* DP_FEATURE_HW_COOKIE_CONVERSION */ 1006 1007 struct dp_rx_desc *dp_rx_desc_cookie_2_va_be(struct dp_soc *soc, 1008 uint32_t cookie) 1009 { 1010 return (struct dp_rx_desc *)dp_cc_desc_find(soc, cookie); 1011 } 1012 1013 #ifdef WLAN_FEATURE_NEAR_FULL_IRQ 1014 uint32_t dp_rx_nf_process(struct dp_intr *int_ctx, 1015 hal_ring_handle_t hal_ring_hdl, 1016 uint8_t reo_ring_num, 1017 uint32_t quota) 1018 { 1019 struct dp_soc *soc = int_ctx->soc; 1020 struct dp_srng *rx_ring = &soc->reo_dest_ring[reo_ring_num]; 1021 uint32_t work_done = 0; 1022 1023 if (dp_srng_get_near_full_level(soc, rx_ring) < 1024 DP_SRNG_THRESH_NEAR_FULL) 1025 return 0; 1026 1027 qdf_atomic_set(&rx_ring->near_full, 1); 1028 work_done++; 1029 1030 return work_done; 1031 } 1032 #endif 1033 1034 #ifndef QCA_HOST_MODE_WIFI_DISABLED 1035 #if defined(QCA_WIFI_WCN7850) || !defined(INTRA_BSS_FW_OFFLOAD) 1036 bool dp_rx_intrabss_fwd_be(struct dp_soc *soc, struct dp_peer *ta_peer, 1037 uint8_t *rx_tlv_hdr, qdf_nbuf_t nbuf, 1038 struct hal_rx_msdu_metadata msdu_metadata) 1039 { 1040 /* Hamilton V1 uses Lithium path */ 1041 return dp_rx_intrabss_fwd(soc, ta_peer, rx_tlv_hdr, nbuf, 1042 msdu_metadata); 1043 } 1044 #else 1045 /* 1046 * dp_rx_intrabss_fwd_be() - API for intrabss fwd. For EAPOL 1047 * pkt with DA not equal to vdev mac addr, fwd is not allowed. 1048 * @soc: core txrx main context 1049 * @ta_peer: source peer entry 1050 * @rx_tlv_hdr: start address of rx tlvs 1051 * @nbuf: nbuf that has to be intrabss forwarded 1052 * @msdu_metadata: msdu metadata 1053 * 1054 * Return: true if it is forwarded else false 1055 */ 1056 bool dp_rx_intrabss_fwd_be(struct dp_soc *soc, struct dp_peer *ta_peer, 1057 uint8_t *rx_tlv_hdr, qdf_nbuf_t nbuf, 1058 struct hal_rx_msdu_metadata msdu_metadata) 1059 { 1060 uint16_t len; 1061 qdf_nbuf_t nbuf_copy; 1062 uint8_t tid = qdf_nbuf_get_tid_val(nbuf); 1063 uint8_t ring_id = QDF_NBUF_CB_RX_CTX_ID(nbuf); 1064 struct cdp_tid_rx_stats *tid_stats = &ta_peer->vdev->pdev->stats. 1065 tid_stats.tid_rx_stats[ring_id][tid]; 1066 1067 /* if it is a broadcast pkt (eg: ARP) and it is not its own 1068 * source, then clone the pkt and send the cloned pkt for 1069 * intra BSS forwarding and original pkt up the network stack 1070 * Note: how do we handle multicast pkts. do we forward 1071 * all multicast pkts as is or let a higher layer module 1072 * like igmpsnoop decide whether to forward or not with 1073 * Mcast enhancement. 1074 */ 1075 if (qdf_nbuf_is_da_mcbc(nbuf) && !ta_peer->bss_peer) { 1076 if (dp_rx_intrabss_eapol_drop_check(soc, ta_peer, rx_tlv_hdr, 1077 nbuf)) 1078 return true; 1079 1080 if (!dp_rx_check_ndi_mdns_fwding(ta_peer, nbuf)) 1081 return false; 1082 1083 /* If the source peer in the isolation list 1084 * then dont forward instead push to bridge stack 1085 */ 1086 if (dp_get_peer_isolation(ta_peer)) 1087 return false; 1088 1089 nbuf_copy = qdf_nbuf_copy(nbuf); 1090 if (!nbuf_copy) 1091 return false; 1092 1093 len = QDF_NBUF_CB_RX_PKT_LEN(nbuf); 1094 if (dp_tx_send((struct cdp_soc_t *)soc, 1095 ta_peer->vdev->vdev_id, nbuf_copy)) { 1096 DP_STATS_INC_PKT(ta_peer, rx.intra_bss.fail, 1, len); 1097 tid_stats->fail_cnt[INTRABSS_DROP]++; 1098 qdf_nbuf_free(nbuf_copy); 1099 } else { 1100 DP_STATS_INC_PKT(ta_peer, rx.intra_bss.pkts, 1, len); 1101 tid_stats->intrabss_cnt++; 1102 } 1103 return false; 1104 } 1105 1106 if (qdf_nbuf_is_intra_bss(nbuf)) { 1107 if (dp_rx_intrabss_eapol_drop_check(soc, ta_peer, rx_tlv_hdr, 1108 nbuf)) 1109 return true; 1110 1111 len = QDF_NBUF_CB_RX_PKT_LEN(nbuf); 1112 1113 /* linearize the nbuf just before we send to 1114 * dp_tx_send() 1115 */ 1116 if (qdf_unlikely(qdf_nbuf_is_frag(nbuf))) { 1117 if (qdf_nbuf_linearize(nbuf) == -ENOMEM) 1118 return false; 1119 1120 nbuf = qdf_nbuf_unshare(nbuf); 1121 if (!nbuf) { 1122 DP_STATS_INC_PKT(ta_peer, 1123 rx.intra_bss.fail, 1, len); 1124 /* return true even though the pkt is 1125 * not forwarded. Basically skb_unshare 1126 * failed and we want to continue with 1127 * next nbuf. 1128 */ 1129 tid_stats->fail_cnt[INTRABSS_DROP]++; 1130 return true; 1131 } 1132 } 1133 1134 if (!dp_tx_send((struct cdp_soc_t *)soc, 1135 ta_peer->vdev->vdev_id, nbuf)) { 1136 DP_STATS_INC_PKT(ta_peer, rx.intra_bss.pkts, 1, 1137 len); 1138 } else { 1139 DP_STATS_INC_PKT(ta_peer, rx.intra_bss.fail, 1, 1140 len); 1141 tid_stats->fail_cnt[INTRABSS_DROP]++; 1142 return false; 1143 } 1144 1145 return true; 1146 } 1147 return false; 1148 } 1149 #endif 1150 #endif 1151 1152