1 /* 2 * Copyright (c) 2016-2019 The Linux Foundation. All rights reserved. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for 5 * any purpose with or without fee is hereby granted, provided that the 6 * above copyright notice and this permission notice appear in all 7 * copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 16 * PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 #include "hal_hw_headers.h" 20 #include "dp_types.h" 21 #include "dp_rx.h" 22 #include "dp_peer.h" 23 #include "hal_rx.h" 24 #include "hal_api.h" 25 #include "qdf_nbuf.h" 26 #ifdef MESH_MODE_SUPPORT 27 #include "if_meta_hdr.h" 28 #endif 29 #include "dp_internal.h" 30 #include "dp_rx_mon.h" 31 #include "dp_ipa.h" 32 33 #ifdef ATH_RX_PRI_SAVE 34 #define DP_RX_TID_SAVE(_nbuf, _tid) \ 35 (qdf_nbuf_set_priority(_nbuf, _tid)) 36 #else 37 #define DP_RX_TID_SAVE(_nbuf, _tid) 38 #endif 39 40 #ifdef CONFIG_MCL 41 static inline bool dp_rx_check_ap_bridge(struct dp_vdev *vdev) 42 { 43 if (vdev->opmode != wlan_op_mode_sta) 44 return true; 45 else 46 return false; 47 } 48 #else 49 static inline bool dp_rx_check_ap_bridge(struct dp_vdev *vdev) 50 { 51 return vdev->ap_bridge_enabled; 52 } 53 #endif 54 55 /* 56 * dp_rx_dump_info_and_assert() - dump RX Ring info and Rx Desc info 57 * 58 * @soc: core txrx main context 59 * @hal_ring: opaque pointer to the HAL Rx Ring, which will be serviced 60 * @ring_desc: opaque pointer to the RX ring descriptor 61 * @rx_desc: host rs descriptor 62 * 63 * Return: void 64 */ 65 void dp_rx_dump_info_and_assert(struct dp_soc *soc, void *hal_ring, 66 void *ring_desc, struct dp_rx_desc *rx_desc) 67 { 68 void *hal_soc = soc->hal_soc; 69 70 dp_rx_desc_dump(rx_desc); 71 hal_srng_dump_ring_desc(hal_soc, hal_ring, ring_desc); 72 hal_srng_dump_ring(hal_soc, hal_ring); 73 qdf_assert_always(0); 74 } 75 76 /* 77 * dp_rx_buffers_replenish() - replenish rxdma ring with rx nbufs 78 * called during dp rx initialization 79 * and at the end of dp_rx_process. 80 * 81 * @soc: core txrx main context 82 * @mac_id: mac_id which is one of 3 mac_ids 83 * @dp_rxdma_srng: dp rxdma circular ring 84 * @rx_desc_pool: Pointer to free Rx descriptor pool 85 * @num_req_buffers: number of buffer to be replenished 86 * @desc_list: list of descs if called from dp_rx_process 87 * or NULL during dp rx initialization or out of buffer 88 * interrupt. 89 * @tail: tail of descs list 90 * Return: return success or failure 91 */ 92 QDF_STATUS dp_rx_buffers_replenish(struct dp_soc *dp_soc, uint32_t mac_id, 93 struct dp_srng *dp_rxdma_srng, 94 struct rx_desc_pool *rx_desc_pool, 95 uint32_t num_req_buffers, 96 union dp_rx_desc_list_elem_t **desc_list, 97 union dp_rx_desc_list_elem_t **tail) 98 { 99 uint32_t num_alloc_desc; 100 uint16_t num_desc_to_free = 0; 101 struct dp_pdev *dp_pdev = dp_get_pdev_for_mac_id(dp_soc, mac_id); 102 uint32_t num_entries_avail; 103 uint32_t count; 104 int sync_hw_ptr = 1; 105 qdf_dma_addr_t paddr; 106 qdf_nbuf_t rx_netbuf; 107 void *rxdma_ring_entry; 108 union dp_rx_desc_list_elem_t *next; 109 QDF_STATUS ret; 110 111 void *rxdma_srng; 112 113 rxdma_srng = dp_rxdma_srng->hal_srng; 114 115 if (!rxdma_srng) { 116 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 117 "rxdma srng not initialized"); 118 DP_STATS_INC(dp_pdev, replenish.rxdma_err, num_req_buffers); 119 return QDF_STATUS_E_FAILURE; 120 } 121 122 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 123 "requested %d buffers for replenish", num_req_buffers); 124 125 hal_srng_access_start(dp_soc->hal_soc, rxdma_srng); 126 num_entries_avail = hal_srng_src_num_avail(dp_soc->hal_soc, 127 rxdma_srng, 128 sync_hw_ptr); 129 130 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 131 "no of available entries in rxdma ring: %d", 132 num_entries_avail); 133 134 if (!(*desc_list) && (num_entries_avail > 135 ((dp_rxdma_srng->num_entries * 3) / 4))) { 136 num_req_buffers = num_entries_avail; 137 } else if (num_entries_avail < num_req_buffers) { 138 num_desc_to_free = num_req_buffers - num_entries_avail; 139 num_req_buffers = num_entries_avail; 140 } 141 142 if (qdf_unlikely(!num_req_buffers)) { 143 num_desc_to_free = num_req_buffers; 144 hal_srng_access_end(dp_soc->hal_soc, rxdma_srng); 145 goto free_descs; 146 } 147 148 /* 149 * if desc_list is NULL, allocate the descs from freelist 150 */ 151 if (!(*desc_list)) { 152 num_alloc_desc = dp_rx_get_free_desc_list(dp_soc, mac_id, 153 rx_desc_pool, 154 num_req_buffers, 155 desc_list, 156 tail); 157 158 if (!num_alloc_desc) { 159 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 160 "no free rx_descs in freelist"); 161 DP_STATS_INC(dp_pdev, err.desc_alloc_fail, 162 num_req_buffers); 163 hal_srng_access_end(dp_soc->hal_soc, rxdma_srng); 164 return QDF_STATUS_E_NOMEM; 165 } 166 167 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 168 "%d rx desc allocated", num_alloc_desc); 169 num_req_buffers = num_alloc_desc; 170 } 171 172 173 count = 0; 174 175 while (count < num_req_buffers) { 176 rx_netbuf = qdf_nbuf_alloc(dp_soc->osdev, 177 RX_BUFFER_SIZE, 178 RX_BUFFER_RESERVATION, 179 RX_BUFFER_ALIGNMENT, 180 FALSE); 181 182 if (qdf_unlikely(!rx_netbuf)) { 183 DP_STATS_INC(dp_pdev, replenish.nbuf_alloc_fail, 1); 184 continue; 185 } 186 187 ret = qdf_nbuf_map_single(dp_soc->osdev, rx_netbuf, 188 QDF_DMA_FROM_DEVICE); 189 if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) { 190 qdf_nbuf_free(rx_netbuf); 191 DP_STATS_INC(dp_pdev, replenish.map_err, 1); 192 continue; 193 } 194 195 paddr = qdf_nbuf_get_frag_paddr(rx_netbuf, 0); 196 197 /* 198 * check if the physical address of nbuf->data is 199 * less then 0x50000000 then free the nbuf and try 200 * allocating new nbuf. We can try for 100 times. 201 * this is a temp WAR till we fix it properly. 202 */ 203 ret = check_x86_paddr(dp_soc, &rx_netbuf, &paddr, dp_pdev); 204 if (ret == QDF_STATUS_E_FAILURE) { 205 DP_STATS_INC(dp_pdev, replenish.x86_fail, 1); 206 break; 207 } 208 209 count++; 210 211 rxdma_ring_entry = hal_srng_src_get_next(dp_soc->hal_soc, 212 rxdma_srng); 213 qdf_assert_always(rxdma_ring_entry); 214 215 next = (*desc_list)->next; 216 217 dp_rx_desc_prep(&((*desc_list)->rx_desc), rx_netbuf); 218 219 /* rx_desc.in_use should be zero at this time*/ 220 qdf_assert_always((*desc_list)->rx_desc.in_use == 0); 221 222 (*desc_list)->rx_desc.in_use = 1; 223 224 dp_verbose_debug("rx_netbuf=%pK, buf=%pK, paddr=0x%llx, cookie=%d", 225 rx_netbuf, qdf_nbuf_data(rx_netbuf), 226 (unsigned long long)paddr, 227 (*desc_list)->rx_desc.cookie); 228 229 hal_rxdma_buff_addr_info_set(rxdma_ring_entry, paddr, 230 (*desc_list)->rx_desc.cookie, 231 rx_desc_pool->owner); 232 233 *desc_list = next; 234 235 dp_ipa_handle_rx_buf_smmu_mapping(dp_soc, rx_netbuf, true); 236 } 237 238 hal_srng_access_end(dp_soc->hal_soc, rxdma_srng); 239 240 dp_verbose_debug("replenished buffers %d, rx desc added back to free list %u", 241 num_req_buffers, num_desc_to_free); 242 243 DP_STATS_INC_PKT(dp_pdev, replenish.pkts, num_req_buffers, 244 (RX_BUFFER_SIZE * num_req_buffers)); 245 246 free_descs: 247 DP_STATS_INC(dp_pdev, buf_freelist, num_desc_to_free); 248 /* 249 * add any available free desc back to the free list 250 */ 251 if (*desc_list) 252 dp_rx_add_desc_list_to_free_list(dp_soc, desc_list, tail, 253 mac_id, rx_desc_pool); 254 255 return QDF_STATUS_SUCCESS; 256 } 257 258 /* 259 * dp_rx_deliver_raw() - process RAW mode pkts and hand over the 260 * pkts to RAW mode simulation to 261 * decapsulate the pkt. 262 * 263 * @vdev: vdev on which RAW mode is enabled 264 * @nbuf_list: list of RAW pkts to process 265 * @peer: peer object from which the pkt is rx 266 * 267 * Return: void 268 */ 269 void 270 dp_rx_deliver_raw(struct dp_vdev *vdev, qdf_nbuf_t nbuf_list, 271 struct dp_peer *peer) 272 { 273 qdf_nbuf_t deliver_list_head = NULL; 274 qdf_nbuf_t deliver_list_tail = NULL; 275 qdf_nbuf_t nbuf; 276 277 nbuf = nbuf_list; 278 while (nbuf) { 279 qdf_nbuf_t next = qdf_nbuf_next(nbuf); 280 281 DP_RX_LIST_APPEND(deliver_list_head, deliver_list_tail, nbuf); 282 283 DP_STATS_INC(vdev->pdev, rx_raw_pkts, 1); 284 DP_STATS_INC_PKT(peer, rx.raw, 1, qdf_nbuf_len(nbuf)); 285 /* 286 * reset the chfrag_start and chfrag_end bits in nbuf cb 287 * as this is a non-amsdu pkt and RAW mode simulation expects 288 * these bit s to be 0 for non-amsdu pkt. 289 */ 290 if (qdf_nbuf_is_rx_chfrag_start(nbuf) && 291 qdf_nbuf_is_rx_chfrag_end(nbuf)) { 292 qdf_nbuf_set_rx_chfrag_start(nbuf, 0); 293 qdf_nbuf_set_rx_chfrag_end(nbuf, 0); 294 } 295 296 nbuf = next; 297 } 298 299 vdev->osif_rsim_rx_decap(vdev->osif_vdev, &deliver_list_head, 300 &deliver_list_tail, (struct cdp_peer*) peer); 301 302 vdev->osif_rx(vdev->osif_vdev, deliver_list_head); 303 } 304 305 306 #ifdef DP_LFR 307 /* 308 * In case of LFR, data of a new peer might be sent up 309 * even before peer is added. 310 */ 311 static inline struct dp_vdev * 312 dp_get_vdev_from_peer(struct dp_soc *soc, 313 uint16_t peer_id, 314 struct dp_peer *peer, 315 struct hal_rx_mpdu_desc_info mpdu_desc_info) 316 { 317 struct dp_vdev *vdev; 318 uint8_t vdev_id; 319 320 if (unlikely(!peer)) { 321 if (peer_id != HTT_INVALID_PEER) { 322 vdev_id = DP_PEER_METADATA_ID_GET( 323 mpdu_desc_info.peer_meta_data); 324 QDF_TRACE(QDF_MODULE_ID_DP, 325 QDF_TRACE_LEVEL_DEBUG, 326 FL("PeerID %d not found use vdevID %d"), 327 peer_id, vdev_id); 328 vdev = dp_get_vdev_from_soc_vdev_id_wifi3(soc, 329 vdev_id); 330 } else { 331 QDF_TRACE(QDF_MODULE_ID_DP, 332 QDF_TRACE_LEVEL_DEBUG, 333 FL("Invalid PeerID %d"), 334 peer_id); 335 return NULL; 336 } 337 } else { 338 vdev = peer->vdev; 339 } 340 return vdev; 341 } 342 #else 343 static inline struct dp_vdev * 344 dp_get_vdev_from_peer(struct dp_soc *soc, 345 uint16_t peer_id, 346 struct dp_peer *peer, 347 struct hal_rx_mpdu_desc_info mpdu_desc_info) 348 { 349 if (unlikely(!peer)) { 350 QDF_TRACE(QDF_MODULE_ID_DP, 351 QDF_TRACE_LEVEL_DEBUG, 352 FL("Peer not found for peerID %d"), 353 peer_id); 354 return NULL; 355 } else { 356 return peer->vdev; 357 } 358 } 359 #endif 360 361 /** 362 * dp_rx_da_learn() - Add AST entry based on DA lookup 363 * This is a WAR for HK 1.0 and will 364 * be removed in HK 2.0 365 * 366 * @soc: core txrx main context 367 * @rx_tlv_hdr : start address of rx tlvs 368 * @ta_peer : Transmitter peer entry 369 * @nbuf : nbuf to retrieve destination mac for which AST will be added 370 * 371 */ 372 #ifdef FEATURE_WDS 373 static void 374 dp_rx_da_learn(struct dp_soc *soc, 375 uint8_t *rx_tlv_hdr, 376 struct dp_peer *ta_peer, 377 qdf_nbuf_t nbuf) 378 { 379 /* For HKv2 DA port learing is not needed */ 380 if (qdf_likely(soc->ast_override_support)) 381 return; 382 383 if (qdf_unlikely(!ta_peer)) 384 return; 385 386 if (qdf_unlikely(ta_peer->vdev->opmode != wlan_op_mode_ap)) 387 return; 388 389 if (!soc->da_war_enabled) 390 return; 391 392 if (qdf_unlikely(!qdf_nbuf_is_da_valid(nbuf) && 393 !qdf_nbuf_is_da_mcbc(nbuf))) { 394 dp_peer_add_ast(soc, 395 ta_peer, 396 qdf_nbuf_data(nbuf), 397 CDP_TXRX_AST_TYPE_DA, 398 IEEE80211_NODE_F_WDS_HM); 399 } 400 } 401 #else 402 static void 403 dp_rx_da_learn(struct dp_soc *soc, 404 uint8_t *rx_tlv_hdr, 405 struct dp_peer *ta_peer, 406 qdf_nbuf_t nbuf) 407 { 408 } 409 #endif 410 411 /** 412 * dp_rx_intrabss_fwd() - Implements the Intra-BSS forwarding logic 413 * 414 * @soc: core txrx main context 415 * @ta_peer : source peer entry 416 * @rx_tlv_hdr : start address of rx tlvs 417 * @nbuf : nbuf that has to be intrabss forwarded 418 * 419 * Return: bool: true if it is forwarded else false 420 */ 421 static bool 422 dp_rx_intrabss_fwd(struct dp_soc *soc, 423 struct dp_peer *ta_peer, 424 uint8_t *rx_tlv_hdr, 425 qdf_nbuf_t nbuf) 426 { 427 uint16_t da_idx; 428 uint16_t len; 429 uint8_t is_frag; 430 struct dp_peer *da_peer; 431 struct dp_ast_entry *ast_entry; 432 qdf_nbuf_t nbuf_copy; 433 uint8_t tid = qdf_nbuf_get_tid_val(nbuf); 434 struct cdp_tid_rx_stats *tid_stats = 435 &ta_peer->vdev->pdev->stats.tid_stats.tid_rx_stats[tid]; 436 437 /* check if the destination peer is available in peer table 438 * and also check if the source peer and destination peer 439 * belong to the same vap and destination peer is not bss peer. 440 */ 441 442 if ((qdf_nbuf_is_da_valid(nbuf) && !qdf_nbuf_is_da_mcbc(nbuf))) { 443 da_idx = hal_rx_msdu_end_da_idx_get(soc->hal_soc, rx_tlv_hdr); 444 445 ast_entry = soc->ast_table[da_idx]; 446 if (!ast_entry) 447 return false; 448 449 if (ast_entry->type == CDP_TXRX_AST_TYPE_DA) { 450 ast_entry->is_active = TRUE; 451 return false; 452 } 453 454 da_peer = ast_entry->peer; 455 456 if (!da_peer) 457 return false; 458 /* TA peer cannot be same as peer(DA) on which AST is present 459 * this indicates a change in topology and that AST entries 460 * are yet to be updated. 461 */ 462 if (da_peer == ta_peer) 463 return false; 464 465 if (da_peer->vdev == ta_peer->vdev && !da_peer->bss_peer) { 466 len = QDF_NBUF_CB_RX_PKT_LEN(nbuf); 467 is_frag = qdf_nbuf_is_frag(nbuf); 468 memset(nbuf->cb, 0x0, sizeof(nbuf->cb)); 469 470 /* linearize the nbuf just before we send to 471 * dp_tx_send() 472 */ 473 if (qdf_unlikely(is_frag)) { 474 if (qdf_nbuf_linearize(nbuf) == -ENOMEM) 475 return false; 476 477 nbuf = qdf_nbuf_unshare(nbuf); 478 if (!nbuf) { 479 DP_STATS_INC_PKT(ta_peer, 480 rx.intra_bss.fail, 481 1, 482 len); 483 /* return true even though the pkt is 484 * not forwarded. Basically skb_unshare 485 * failed and we want to continue with 486 * next nbuf. 487 */ 488 tid_stats->fail_cnt[INTRABSS_DROP]++; 489 return true; 490 } 491 } 492 493 if (!dp_tx_send(ta_peer->vdev, nbuf)) { 494 DP_STATS_INC_PKT(ta_peer, rx.intra_bss.pkts, 1, 495 len); 496 return true; 497 } else { 498 DP_STATS_INC_PKT(ta_peer, rx.intra_bss.fail, 1, 499 len); 500 tid_stats->fail_cnt[INTRABSS_DROP]++; 501 return false; 502 } 503 } 504 } 505 /* if it is a broadcast pkt (eg: ARP) and it is not its own 506 * source, then clone the pkt and send the cloned pkt for 507 * intra BSS forwarding and original pkt up the network stack 508 * Note: how do we handle multicast pkts. do we forward 509 * all multicast pkts as is or let a higher layer module 510 * like igmpsnoop decide whether to forward or not with 511 * Mcast enhancement. 512 */ 513 else if (qdf_unlikely((qdf_nbuf_is_da_mcbc(nbuf) && 514 !ta_peer->bss_peer))) { 515 nbuf_copy = qdf_nbuf_copy(nbuf); 516 if (!nbuf_copy) 517 return false; 518 519 len = QDF_NBUF_CB_RX_PKT_LEN(nbuf); 520 memset(nbuf_copy->cb, 0x0, sizeof(nbuf_copy->cb)); 521 522 if (dp_tx_send(ta_peer->vdev, nbuf_copy)) { 523 DP_STATS_INC_PKT(ta_peer, rx.intra_bss.fail, 1, len); 524 tid_stats->fail_cnt[INTRABSS_DROP]++; 525 qdf_nbuf_free(nbuf_copy); 526 } else { 527 DP_STATS_INC_PKT(ta_peer, rx.intra_bss.pkts, 1, len); 528 tid_stats->intrabss_cnt++; 529 } 530 } 531 /* return false as we have to still send the original pkt 532 * up the stack 533 */ 534 return false; 535 } 536 537 #ifdef MESH_MODE_SUPPORT 538 539 /** 540 * dp_rx_fill_mesh_stats() - Fills the mesh per packet receive stats 541 * 542 * @vdev: DP Virtual device handle 543 * @nbuf: Buffer pointer 544 * @rx_tlv_hdr: start of rx tlv header 545 * @peer: pointer to peer 546 * 547 * This function allocated memory for mesh receive stats and fill the 548 * required stats. Stores the memory address in skb cb. 549 * 550 * Return: void 551 */ 552 553 void dp_rx_fill_mesh_stats(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 554 uint8_t *rx_tlv_hdr, struct dp_peer *peer) 555 { 556 struct mesh_recv_hdr_s *rx_info = NULL; 557 uint32_t pkt_type; 558 uint32_t nss; 559 uint32_t rate_mcs; 560 uint32_t bw; 561 562 /* fill recv mesh stats */ 563 rx_info = qdf_mem_malloc(sizeof(struct mesh_recv_hdr_s)); 564 565 /* upper layers are resposible to free this memory */ 566 567 if (!rx_info) { 568 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 569 "Memory allocation failed for mesh rx stats"); 570 DP_STATS_INC(vdev->pdev, mesh_mem_alloc, 1); 571 return; 572 } 573 574 rx_info->rs_flags = MESH_RXHDR_VER1; 575 if (qdf_nbuf_is_rx_chfrag_start(nbuf)) 576 rx_info->rs_flags |= MESH_RX_FIRST_MSDU; 577 578 if (qdf_nbuf_is_rx_chfrag_end(nbuf)) 579 rx_info->rs_flags |= MESH_RX_LAST_MSDU; 580 581 if (hal_rx_attn_msdu_get_is_decrypted(rx_tlv_hdr)) { 582 rx_info->rs_flags |= MESH_RX_DECRYPTED; 583 rx_info->rs_keyix = hal_rx_msdu_get_keyid(rx_tlv_hdr); 584 if (vdev->osif_get_key) 585 vdev->osif_get_key(vdev->osif_vdev, 586 &rx_info->rs_decryptkey[0], 587 &peer->mac_addr.raw[0], 588 rx_info->rs_keyix); 589 } 590 591 rx_info->rs_rssi = hal_rx_msdu_start_get_rssi(rx_tlv_hdr); 592 rx_info->rs_channel = hal_rx_msdu_start_get_freq(rx_tlv_hdr); 593 pkt_type = hal_rx_msdu_start_get_pkt_type(rx_tlv_hdr); 594 rate_mcs = hal_rx_msdu_start_rate_mcs_get(rx_tlv_hdr); 595 bw = hal_rx_msdu_start_bw_get(rx_tlv_hdr); 596 nss = hal_rx_msdu_start_nss_get(vdev->pdev->soc->hal_soc, rx_tlv_hdr); 597 rx_info->rs_ratephy1 = rate_mcs | (nss << 0x8) | (pkt_type << 16) | 598 (bw << 24); 599 600 qdf_nbuf_set_rx_fctx_type(nbuf, (void *)rx_info, CB_FTYPE_MESH_RX_INFO); 601 602 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_MED, 603 FL("Mesh rx stats: flags %x, rssi %x, chn %x, rate %x, kix %x"), 604 rx_info->rs_flags, 605 rx_info->rs_rssi, 606 rx_info->rs_channel, 607 rx_info->rs_ratephy1, 608 rx_info->rs_keyix); 609 610 } 611 612 /** 613 * dp_rx_filter_mesh_packets() - Filters mesh unwanted packets 614 * 615 * @vdev: DP Virtual device handle 616 * @nbuf: Buffer pointer 617 * @rx_tlv_hdr: start of rx tlv header 618 * 619 * This checks if the received packet is matching any filter out 620 * catogery and and drop the packet if it matches. 621 * 622 * Return: status(0 indicates drop, 1 indicate to no drop) 623 */ 624 625 QDF_STATUS dp_rx_filter_mesh_packets(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 626 uint8_t *rx_tlv_hdr) 627 { 628 union dp_align_mac_addr mac_addr; 629 630 if (qdf_unlikely(vdev->mesh_rx_filter)) { 631 if (vdev->mesh_rx_filter & MESH_FILTER_OUT_FROMDS) 632 if (hal_rx_mpdu_get_fr_ds(rx_tlv_hdr)) 633 return QDF_STATUS_SUCCESS; 634 635 if (vdev->mesh_rx_filter & MESH_FILTER_OUT_TODS) 636 if (hal_rx_mpdu_get_to_ds(rx_tlv_hdr)) 637 return QDF_STATUS_SUCCESS; 638 639 if (vdev->mesh_rx_filter & MESH_FILTER_OUT_NODS) 640 if (!hal_rx_mpdu_get_fr_ds(rx_tlv_hdr) 641 && !hal_rx_mpdu_get_to_ds(rx_tlv_hdr)) 642 return QDF_STATUS_SUCCESS; 643 644 if (vdev->mesh_rx_filter & MESH_FILTER_OUT_RA) { 645 if (hal_rx_mpdu_get_addr1(rx_tlv_hdr, 646 &mac_addr.raw[0])) 647 return QDF_STATUS_E_FAILURE; 648 649 if (!qdf_mem_cmp(&mac_addr.raw[0], 650 &vdev->mac_addr.raw[0], 651 QDF_MAC_ADDR_SIZE)) 652 return QDF_STATUS_SUCCESS; 653 } 654 655 if (vdev->mesh_rx_filter & MESH_FILTER_OUT_TA) { 656 if (hal_rx_mpdu_get_addr2(rx_tlv_hdr, 657 &mac_addr.raw[0])) 658 return QDF_STATUS_E_FAILURE; 659 660 if (!qdf_mem_cmp(&mac_addr.raw[0], 661 &vdev->mac_addr.raw[0], 662 QDF_MAC_ADDR_SIZE)) 663 return QDF_STATUS_SUCCESS; 664 } 665 } 666 667 return QDF_STATUS_E_FAILURE; 668 } 669 670 #else 671 void dp_rx_fill_mesh_stats(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 672 uint8_t *rx_tlv_hdr, struct dp_peer *peer) 673 { 674 } 675 676 QDF_STATUS dp_rx_filter_mesh_packets(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 677 uint8_t *rx_tlv_hdr) 678 { 679 return QDF_STATUS_E_FAILURE; 680 } 681 682 #endif 683 684 #ifdef FEATURE_NAC_RSSI 685 /** 686 * dp_rx_nac_filter(): Function to perform filtering of non-associated 687 * clients 688 * @pdev: DP pdev handle 689 * @rx_pkt_hdr: Rx packet Header 690 * 691 * return: dp_vdev* 692 */ 693 static 694 struct dp_vdev *dp_rx_nac_filter(struct dp_pdev *pdev, 695 uint8_t *rx_pkt_hdr) 696 { 697 struct ieee80211_frame *wh; 698 struct dp_neighbour_peer *peer = NULL; 699 700 wh = (struct ieee80211_frame *)rx_pkt_hdr; 701 702 if ((wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) != IEEE80211_FC1_DIR_TODS) 703 return NULL; 704 705 qdf_spin_lock_bh(&pdev->neighbour_peer_mutex); 706 TAILQ_FOREACH(peer, &pdev->neighbour_peers_list, 707 neighbour_peer_list_elem) { 708 if (qdf_mem_cmp(&peer->neighbour_peers_macaddr.raw[0], 709 wh->i_addr2, QDF_MAC_ADDR_SIZE) == 0) { 710 QDF_TRACE( 711 QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 712 FL("NAC configuration matched for mac-%2x:%2x:%2x:%2x:%2x:%2x"), 713 peer->neighbour_peers_macaddr.raw[0], 714 peer->neighbour_peers_macaddr.raw[1], 715 peer->neighbour_peers_macaddr.raw[2], 716 peer->neighbour_peers_macaddr.raw[3], 717 peer->neighbour_peers_macaddr.raw[4], 718 peer->neighbour_peers_macaddr.raw[5]); 719 720 qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex); 721 722 return pdev->monitor_vdev; 723 } 724 } 725 qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex); 726 727 return NULL; 728 } 729 730 /** 731 * dp_rx_process_invalid_peer(): Function to pass invalid peer list to umac 732 * @soc: DP SOC handle 733 * @mpdu: mpdu for which peer is invalid 734 * 735 * return: integer type 736 */ 737 uint8_t dp_rx_process_invalid_peer(struct dp_soc *soc, qdf_nbuf_t mpdu) 738 { 739 struct dp_invalid_peer_msg msg; 740 struct dp_vdev *vdev = NULL; 741 struct dp_pdev *pdev = NULL; 742 struct ieee80211_frame *wh; 743 uint8_t i; 744 qdf_nbuf_t curr_nbuf, next_nbuf; 745 uint8_t *rx_tlv_hdr = qdf_nbuf_data(mpdu); 746 uint8_t *rx_pkt_hdr = hal_rx_pkt_hdr_get(rx_tlv_hdr); 747 748 if (!HAL_IS_DECAP_FORMAT_RAW(rx_tlv_hdr)) { 749 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 750 "Drop decapped frames"); 751 goto free; 752 } 753 754 wh = (struct ieee80211_frame *)rx_pkt_hdr; 755 756 if (!DP_FRAME_IS_DATA(wh)) { 757 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 758 "NAWDS valid only for data frames"); 759 goto free; 760 } 761 762 if (qdf_nbuf_len(mpdu) < sizeof(struct ieee80211_frame)) { 763 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 764 "Invalid nbuf length"); 765 goto free; 766 } 767 768 769 for (i = 0; i < MAX_PDEV_CNT; i++) { 770 pdev = soc->pdev_list[i]; 771 if (!pdev) { 772 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 773 "PDEV not found"); 774 continue; 775 } 776 777 if (pdev->filter_neighbour_peers) { 778 /* Next Hop scenario not yet handle */ 779 vdev = dp_rx_nac_filter(pdev, rx_pkt_hdr); 780 if (vdev) { 781 dp_rx_mon_deliver(soc, i, 782 pdev->invalid_peer_head_msdu, 783 pdev->invalid_peer_tail_msdu); 784 785 pdev->invalid_peer_head_msdu = NULL; 786 pdev->invalid_peer_tail_msdu = NULL; 787 788 return 0; 789 } 790 } 791 792 793 TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) { 794 795 if (qdf_mem_cmp(wh->i_addr1, vdev->mac_addr.raw, 796 QDF_MAC_ADDR_SIZE) == 0) { 797 goto out; 798 } 799 } 800 } 801 802 if (!vdev) { 803 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 804 "VDEV not found"); 805 goto free; 806 } 807 808 out: 809 msg.wh = wh; 810 qdf_nbuf_pull_head(mpdu, RX_PKT_TLVS_LEN); 811 msg.nbuf = mpdu; 812 msg.vdev_id = vdev->vdev_id; 813 if (pdev->soc->cdp_soc.ol_ops->rx_invalid_peer) 814 pdev->soc->cdp_soc.ol_ops->rx_invalid_peer(pdev->ctrl_pdev, 815 &msg); 816 817 free: 818 /* Drop and free packet */ 819 curr_nbuf = mpdu; 820 while (curr_nbuf) { 821 next_nbuf = qdf_nbuf_next(curr_nbuf); 822 qdf_nbuf_free(curr_nbuf); 823 curr_nbuf = next_nbuf; 824 } 825 826 return 0; 827 } 828 829 /** 830 * dp_rx_process_invalid_peer_wrapper(): Function to wrap invalid peer handler 831 * @soc: DP SOC handle 832 * @mpdu: mpdu for which peer is invalid 833 * @mpdu_done: if an mpdu is completed 834 * 835 * return: integer type 836 */ 837 void dp_rx_process_invalid_peer_wrapper(struct dp_soc *soc, 838 qdf_nbuf_t mpdu, bool mpdu_done) 839 { 840 /* Only trigger the process when mpdu is completed */ 841 if (mpdu_done) 842 dp_rx_process_invalid_peer(soc, mpdu); 843 } 844 #else 845 uint8_t dp_rx_process_invalid_peer(struct dp_soc *soc, qdf_nbuf_t mpdu) 846 { 847 qdf_nbuf_t curr_nbuf, next_nbuf; 848 struct dp_pdev *pdev; 849 uint8_t i; 850 struct dp_vdev *vdev = NULL; 851 struct ieee80211_frame *wh; 852 uint8_t *rx_tlv_hdr = qdf_nbuf_data(mpdu); 853 uint8_t *rx_pkt_hdr = hal_rx_pkt_hdr_get(rx_tlv_hdr); 854 855 wh = (struct ieee80211_frame *)rx_pkt_hdr; 856 857 if (!DP_FRAME_IS_DATA(wh)) { 858 QDF_TRACE_ERROR_RL(QDF_MODULE_ID_DP, 859 "only for data frames"); 860 goto free; 861 } 862 863 if (qdf_nbuf_len(mpdu) < sizeof(struct ieee80211_frame)) { 864 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 865 "Invalid nbuf length"); 866 goto free; 867 } 868 869 for (i = 0; i < MAX_PDEV_CNT; i++) { 870 pdev = soc->pdev_list[i]; 871 if (!pdev) { 872 QDF_TRACE(QDF_MODULE_ID_DP, 873 QDF_TRACE_LEVEL_ERROR, 874 "PDEV not found"); 875 continue; 876 } 877 878 qdf_spin_lock_bh(&pdev->vdev_list_lock); 879 DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) { 880 if (qdf_mem_cmp(wh->i_addr1, vdev->mac_addr.raw, 881 QDF_MAC_ADDR_SIZE) == 0) { 882 qdf_spin_unlock_bh(&pdev->vdev_list_lock); 883 goto out; 884 } 885 } 886 qdf_spin_unlock_bh(&pdev->vdev_list_lock); 887 } 888 889 if (!vdev) { 890 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 891 "VDEV not found"); 892 goto free; 893 } 894 895 out: 896 if (soc->cdp_soc.ol_ops->rx_invalid_peer) 897 soc->cdp_soc.ol_ops->rx_invalid_peer(vdev->vdev_id, wh); 898 free: 899 /* reset the head and tail pointers */ 900 for (i = 0; i < MAX_PDEV_CNT; i++) { 901 pdev = soc->pdev_list[i]; 902 if (!pdev) { 903 QDF_TRACE(QDF_MODULE_ID_DP, 904 QDF_TRACE_LEVEL_ERROR, 905 "PDEV not found"); 906 continue; 907 } 908 909 pdev->invalid_peer_head_msdu = NULL; 910 pdev->invalid_peer_tail_msdu = NULL; 911 } 912 913 /* Drop and free packet */ 914 curr_nbuf = mpdu; 915 while (curr_nbuf) { 916 next_nbuf = qdf_nbuf_next(curr_nbuf); 917 qdf_nbuf_free(curr_nbuf); 918 curr_nbuf = next_nbuf; 919 } 920 921 return 0; 922 } 923 924 void dp_rx_process_invalid_peer_wrapper(struct dp_soc *soc, 925 qdf_nbuf_t mpdu, bool mpdu_done) 926 { 927 /* Process the nbuf */ 928 dp_rx_process_invalid_peer(soc, mpdu); 929 } 930 #endif 931 932 #ifdef RECEIVE_OFFLOAD 933 /** 934 * dp_rx_print_offload_info() - Print offload info from RX TLV 935 * @rx_tlv: RX TLV for which offload information is to be printed 936 * 937 * Return: None 938 */ 939 static void dp_rx_print_offload_info(uint8_t *rx_tlv) 940 { 941 dp_verbose_debug("----------------------RX DESC LRO/GRO----------------------"); 942 dp_verbose_debug("lro_eligible 0x%x", HAL_RX_TLV_GET_LRO_ELIGIBLE(rx_tlv)); 943 dp_verbose_debug("pure_ack 0x%x", HAL_RX_TLV_GET_TCP_PURE_ACK(rx_tlv)); 944 dp_verbose_debug("chksum 0x%x", HAL_RX_TLV_GET_TCP_CHKSUM(rx_tlv)); 945 dp_verbose_debug("TCP seq num 0x%x", HAL_RX_TLV_GET_TCP_SEQ(rx_tlv)); 946 dp_verbose_debug("TCP ack num 0x%x", HAL_RX_TLV_GET_TCP_ACK(rx_tlv)); 947 dp_verbose_debug("TCP window 0x%x", HAL_RX_TLV_GET_TCP_WIN(rx_tlv)); 948 dp_verbose_debug("TCP protocol 0x%x", HAL_RX_TLV_GET_TCP_PROTO(rx_tlv)); 949 dp_verbose_debug("TCP offset 0x%x", HAL_RX_TLV_GET_TCP_OFFSET(rx_tlv)); 950 dp_verbose_debug("toeplitz 0x%x", HAL_RX_TLV_GET_FLOW_ID_TOEPLITZ(rx_tlv)); 951 dp_verbose_debug("---------------------------------------------------------"); 952 } 953 954 /** 955 * dp_rx_fill_gro_info() - Fill GRO info from RX TLV into skb->cb 956 * @soc: DP SOC handle 957 * @rx_tlv: RX TLV received for the msdu 958 * @msdu: msdu for which GRO info needs to be filled 959 * 960 * Return: None 961 */ 962 static 963 void dp_rx_fill_gro_info(struct dp_soc *soc, uint8_t *rx_tlv, 964 qdf_nbuf_t msdu) 965 { 966 if (!wlan_cfg_is_gro_enabled(soc->wlan_cfg_ctx)) 967 return; 968 969 /* Filling up RX offload info only for TCP packets */ 970 if (!HAL_RX_TLV_GET_TCP_PROTO(rx_tlv)) 971 return; 972 973 QDF_NBUF_CB_RX_LRO_ELIGIBLE(msdu) = 974 HAL_RX_TLV_GET_LRO_ELIGIBLE(rx_tlv); 975 QDF_NBUF_CB_RX_TCP_PURE_ACK(msdu) = 976 HAL_RX_TLV_GET_TCP_PURE_ACK(rx_tlv); 977 QDF_NBUF_CB_RX_TCP_CHKSUM(msdu) = 978 HAL_RX_TLV_GET_TCP_CHKSUM(rx_tlv); 979 QDF_NBUF_CB_RX_TCP_SEQ_NUM(msdu) = 980 HAL_RX_TLV_GET_TCP_SEQ(rx_tlv); 981 QDF_NBUF_CB_RX_TCP_ACK_NUM(msdu) = 982 HAL_RX_TLV_GET_TCP_ACK(rx_tlv); 983 QDF_NBUF_CB_RX_TCP_WIN(msdu) = 984 HAL_RX_TLV_GET_TCP_WIN(rx_tlv); 985 QDF_NBUF_CB_RX_TCP_PROTO(msdu) = 986 HAL_RX_TLV_GET_TCP_PROTO(rx_tlv); 987 QDF_NBUF_CB_RX_IPV6_PROTO(msdu) = 988 HAL_RX_TLV_GET_IPV6(rx_tlv); 989 QDF_NBUF_CB_RX_TCP_OFFSET(msdu) = 990 HAL_RX_TLV_GET_TCP_OFFSET(rx_tlv); 991 QDF_NBUF_CB_RX_FLOW_ID(msdu) = 992 HAL_RX_TLV_GET_FLOW_ID_TOEPLITZ(rx_tlv); 993 994 dp_rx_print_offload_info(rx_tlv); 995 } 996 #else 997 static void dp_rx_fill_gro_info(struct dp_soc *soc, uint8_t *rx_tlv, 998 qdf_nbuf_t msdu) 999 { 1000 } 1001 #endif /* RECEIVE_OFFLOAD */ 1002 1003 /** 1004 * dp_rx_adjust_nbuf_len() - set appropriate msdu length in nbuf. 1005 * 1006 * @nbuf: pointer to msdu. 1007 * @mpdu_len: mpdu length 1008 * 1009 * Return: returns true if nbuf is last msdu of mpdu else retuns false. 1010 */ 1011 static inline bool dp_rx_adjust_nbuf_len(qdf_nbuf_t nbuf, uint16_t *mpdu_len) 1012 { 1013 bool last_nbuf; 1014 1015 if (*mpdu_len > (RX_BUFFER_SIZE - RX_PKT_TLVS_LEN)) { 1016 qdf_nbuf_set_pktlen(nbuf, RX_BUFFER_SIZE); 1017 last_nbuf = false; 1018 } else { 1019 qdf_nbuf_set_pktlen(nbuf, (*mpdu_len + RX_PKT_TLVS_LEN)); 1020 last_nbuf = true; 1021 } 1022 1023 *mpdu_len -= (RX_BUFFER_SIZE - RX_PKT_TLVS_LEN); 1024 1025 return last_nbuf; 1026 } 1027 1028 /** 1029 * dp_rx_sg_create() - create a frag_list for MSDUs which are spread across 1030 * multiple nbufs. 1031 * @nbuf: pointer to the first msdu of an amsdu. 1032 * @rx_tlv_hdr: pointer to the start of RX TLV headers. 1033 * 1034 * 1035 * This function implements the creation of RX frag_list for cases 1036 * where an MSDU is spread across multiple nbufs. 1037 * 1038 * Return: returns the head nbuf which contains complete frag_list. 1039 */ 1040 qdf_nbuf_t dp_rx_sg_create(qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr) 1041 { 1042 qdf_nbuf_t parent, next, frag_list; 1043 uint16_t frag_list_len = 0; 1044 uint16_t mpdu_len; 1045 bool last_nbuf; 1046 1047 mpdu_len = hal_rx_msdu_start_msdu_len_get(rx_tlv_hdr); 1048 /* 1049 * this is a case where the complete msdu fits in one single nbuf. 1050 * in this case HW sets both start and end bit and we only need to 1051 * reset these bits for RAW mode simulator to decap the pkt 1052 */ 1053 if (qdf_nbuf_is_rx_chfrag_start(nbuf) && 1054 qdf_nbuf_is_rx_chfrag_end(nbuf)) { 1055 qdf_nbuf_set_pktlen(nbuf, mpdu_len + RX_PKT_TLVS_LEN); 1056 qdf_nbuf_pull_head(nbuf, RX_PKT_TLVS_LEN); 1057 return nbuf; 1058 } 1059 1060 /* 1061 * This is a case where we have multiple msdus (A-MSDU) spread across 1062 * multiple nbufs. here we create a fraglist out of these nbufs. 1063 * 1064 * the moment we encounter a nbuf with continuation bit set we 1065 * know for sure we have an MSDU which is spread across multiple 1066 * nbufs. We loop through and reap nbufs till we reach last nbuf. 1067 */ 1068 parent = nbuf; 1069 frag_list = nbuf->next; 1070 nbuf = nbuf->next; 1071 1072 /* 1073 * set the start bit in the first nbuf we encounter with continuation 1074 * bit set. This has the proper mpdu length set as it is the first 1075 * msdu of the mpdu. this becomes the parent nbuf and the subsequent 1076 * nbufs will form the frag_list of the parent nbuf. 1077 */ 1078 qdf_nbuf_set_rx_chfrag_start(parent, 1); 1079 last_nbuf = dp_rx_adjust_nbuf_len(parent, &mpdu_len); 1080 1081 /* 1082 * this is where we set the length of the fragments which are 1083 * associated to the parent nbuf. We iterate through the frag_list 1084 * till we hit the last_nbuf of the list. 1085 */ 1086 do { 1087 last_nbuf = dp_rx_adjust_nbuf_len(nbuf, &mpdu_len); 1088 qdf_nbuf_pull_head(nbuf, RX_PKT_TLVS_LEN); 1089 frag_list_len += qdf_nbuf_len(nbuf); 1090 1091 if (last_nbuf) { 1092 next = nbuf->next; 1093 nbuf->next = NULL; 1094 break; 1095 } 1096 1097 nbuf = nbuf->next; 1098 } while (!last_nbuf); 1099 1100 qdf_nbuf_set_rx_chfrag_start(nbuf, 0); 1101 qdf_nbuf_append_ext_list(parent, frag_list, frag_list_len); 1102 parent->next = next; 1103 1104 qdf_nbuf_pull_head(parent, RX_PKT_TLVS_LEN); 1105 return parent; 1106 } 1107 1108 /** 1109 * dp_rx_compute_delay() - Compute and fill in all timestamps 1110 * to pass in correct fields 1111 * 1112 * @vdev: pdev handle 1113 * @tx_desc: tx descriptor 1114 * @tid: tid value 1115 * Return: none 1116 */ 1117 void dp_rx_compute_delay(struct dp_vdev *vdev, qdf_nbuf_t nbuf) 1118 { 1119 int64_t current_ts = qdf_ktime_to_ms(qdf_ktime_get()); 1120 uint32_t to_stack = qdf_nbuf_get_timedelta_ms(nbuf); 1121 uint8_t tid = qdf_nbuf_get_tid_val(nbuf); 1122 uint32_t interframe_delay = 1123 (uint32_t)(current_ts - vdev->prev_rx_deliver_tstamp); 1124 1125 dp_update_delay_stats(vdev->pdev, to_stack, tid, 1126 CDP_DELAY_STATS_REAP_STACK); 1127 /* 1128 * Update interframe delay stats calculated at deliver_data_ol point. 1129 * Value of vdev->prev_rx_deliver_tstamp will be 0 for 1st frame, so 1130 * interframe delay will not be calculate correctly for 1st frame. 1131 * On the other side, this will help in avoiding extra per packet check 1132 * of vdev->prev_rx_deliver_tstamp. 1133 */ 1134 dp_update_delay_stats(vdev->pdev, interframe_delay, tid, 1135 CDP_DELAY_STATS_RX_INTERFRAME); 1136 vdev->prev_rx_deliver_tstamp = current_ts; 1137 } 1138 1139 /** 1140 * dp_rx_drop_nbuf_list() - drop an nbuf list 1141 * @pdev: dp pdev reference 1142 * @buf_list: buffer list to be dropepd 1143 * 1144 * Return: int (number of bufs dropped) 1145 */ 1146 static inline int dp_rx_drop_nbuf_list(struct dp_pdev *pdev, 1147 qdf_nbuf_t buf_list) 1148 { 1149 struct cdp_tid_rx_stats *stats = NULL; 1150 uint8_t tid = 0; 1151 int num_dropped = 0; 1152 qdf_nbuf_t buf, next_buf; 1153 1154 buf = buf_list; 1155 while (buf) { 1156 next_buf = qdf_nbuf_queue_next(buf); 1157 tid = qdf_nbuf_get_tid_val(buf); 1158 stats = &pdev->stats.tid_stats.tid_rx_stats[tid]; 1159 stats->fail_cnt[INVALID_PEER_VDEV]++; 1160 stats->delivered_to_stack--; 1161 qdf_nbuf_free(buf); 1162 buf = next_buf; 1163 num_dropped++; 1164 } 1165 1166 return num_dropped; 1167 } 1168 1169 #ifdef PEER_CACHE_RX_PKTS 1170 /** 1171 * dp_rx_flush_rx_cached() - flush cached rx frames 1172 * @peer: peer 1173 * @drop: flag to drop frames or forward to net stack 1174 * 1175 * Return: None 1176 */ 1177 void dp_rx_flush_rx_cached(struct dp_peer *peer, bool drop) 1178 { 1179 struct dp_peer_cached_bufq *bufqi; 1180 struct dp_rx_cached_buf *cache_buf = NULL; 1181 ol_txrx_rx_fp data_rx = NULL; 1182 int num_buff_elem; 1183 QDF_STATUS status; 1184 1185 if (qdf_atomic_inc_return(&peer->flush_in_progress) > 1) { 1186 qdf_atomic_dec(&peer->flush_in_progress); 1187 return; 1188 } 1189 1190 qdf_spin_lock_bh(&peer->peer_info_lock); 1191 if (peer->state >= OL_TXRX_PEER_STATE_CONN && peer->vdev->osif_rx) 1192 data_rx = peer->vdev->osif_rx; 1193 else 1194 drop = true; 1195 qdf_spin_unlock_bh(&peer->peer_info_lock); 1196 1197 bufqi = &peer->bufq_info; 1198 1199 qdf_spin_lock_bh(&bufqi->bufq_lock); 1200 qdf_list_remove_front(&bufqi->cached_bufq, 1201 (qdf_list_node_t **)&cache_buf); 1202 while (cache_buf) { 1203 num_buff_elem = QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST( 1204 cache_buf->buf); 1205 bufqi->entries -= num_buff_elem; 1206 qdf_spin_unlock_bh(&bufqi->bufq_lock); 1207 if (drop) { 1208 bufqi->dropped = dp_rx_drop_nbuf_list(peer->vdev->pdev, 1209 cache_buf->buf); 1210 } else { 1211 /* Flush the cached frames to OSIF DEV */ 1212 status = data_rx(peer->vdev->osif_vdev, cache_buf->buf); 1213 if (status != QDF_STATUS_SUCCESS) 1214 bufqi->dropped = dp_rx_drop_nbuf_list( 1215 peer->vdev->pdev, 1216 cache_buf->buf); 1217 } 1218 qdf_mem_free(cache_buf); 1219 cache_buf = NULL; 1220 qdf_spin_lock_bh(&bufqi->bufq_lock); 1221 qdf_list_remove_front(&bufqi->cached_bufq, 1222 (qdf_list_node_t **)&cache_buf); 1223 } 1224 qdf_spin_unlock_bh(&bufqi->bufq_lock); 1225 qdf_atomic_dec(&peer->flush_in_progress); 1226 } 1227 1228 /** 1229 * dp_rx_enqueue_rx() - cache rx frames 1230 * @peer: peer 1231 * @rx_buf_list: cache buffer list 1232 * 1233 * Return: None 1234 */ 1235 static QDF_STATUS 1236 dp_rx_enqueue_rx(struct dp_peer *peer, qdf_nbuf_t rx_buf_list) 1237 { 1238 struct dp_rx_cached_buf *cache_buf; 1239 struct dp_peer_cached_bufq *bufqi = &peer->bufq_info; 1240 int num_buff_elem; 1241 1242 QDF_TRACE_DEBUG_RL(QDF_MODULE_ID_TXRX, "bufq->curr %d bufq->drops %d", 1243 bufqi->entries, bufqi->dropped); 1244 1245 if (!peer->valid) { 1246 bufqi->dropped = dp_rx_drop_nbuf_list(peer->vdev->pdev, 1247 rx_buf_list); 1248 return QDF_STATUS_E_INVAL; 1249 } 1250 1251 qdf_spin_lock_bh(&bufqi->bufq_lock); 1252 if (bufqi->entries >= bufqi->thresh) { 1253 bufqi->dropped = dp_rx_drop_nbuf_list(peer->vdev->pdev, 1254 rx_buf_list); 1255 qdf_spin_unlock_bh(&bufqi->bufq_lock); 1256 return QDF_STATUS_E_RESOURCES; 1257 } 1258 qdf_spin_unlock_bh(&bufqi->bufq_lock); 1259 1260 num_buff_elem = QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(rx_buf_list); 1261 1262 cache_buf = qdf_mem_malloc_atomic(sizeof(*cache_buf)); 1263 if (!cache_buf) { 1264 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1265 "Failed to allocate buf to cache rx frames"); 1266 bufqi->dropped = dp_rx_drop_nbuf_list(peer->vdev->pdev, 1267 rx_buf_list); 1268 return QDF_STATUS_E_NOMEM; 1269 } 1270 1271 cache_buf->buf = rx_buf_list; 1272 1273 qdf_spin_lock_bh(&bufqi->bufq_lock); 1274 qdf_list_insert_back(&bufqi->cached_bufq, 1275 &cache_buf->node); 1276 bufqi->entries += num_buff_elem; 1277 qdf_spin_unlock_bh(&bufqi->bufq_lock); 1278 1279 return QDF_STATUS_SUCCESS; 1280 } 1281 1282 static inline 1283 bool dp_rx_is_peer_cache_bufq_supported(void) 1284 { 1285 return true; 1286 } 1287 #else 1288 static inline 1289 bool dp_rx_is_peer_cache_bufq_supported(void) 1290 { 1291 return false; 1292 } 1293 1294 static inline QDF_STATUS 1295 dp_rx_enqueue_rx(struct dp_peer *peer, qdf_nbuf_t rx_buf_list) 1296 { 1297 return QDF_STATUS_SUCCESS; 1298 } 1299 #endif 1300 1301 static inline void dp_rx_deliver_to_stack(struct dp_vdev *vdev, 1302 struct dp_peer *peer, 1303 qdf_nbuf_t nbuf_head, 1304 qdf_nbuf_t nbuf_tail) 1305 { 1306 /* 1307 * highly unlikely to have a vdev without a registered rx 1308 * callback function. if so let us free the nbuf_list. 1309 */ 1310 if (qdf_unlikely(!vdev->osif_rx)) { 1311 if (dp_rx_is_peer_cache_bufq_supported()) 1312 dp_rx_enqueue_rx(peer, nbuf_head); 1313 else 1314 dp_rx_drop_nbuf_list(vdev->pdev, nbuf_head); 1315 1316 return; 1317 } 1318 1319 if (qdf_unlikely(vdev->rx_decap_type == htt_cmn_pkt_type_raw) || 1320 (vdev->rx_decap_type == htt_cmn_pkt_type_native_wifi)) { 1321 vdev->osif_rsim_rx_decap(vdev->osif_vdev, &nbuf_head, 1322 &nbuf_tail, (struct cdp_peer *) peer); 1323 } 1324 1325 vdev->osif_rx(vdev->osif_vdev, nbuf_head); 1326 } 1327 1328 /** 1329 * dp_rx_cksum_offload() - set the nbuf checksum as defined by hardware. 1330 * @nbuf: pointer to the first msdu of an amsdu. 1331 * @rx_tlv_hdr: pointer to the start of RX TLV headers. 1332 * 1333 * The ipsumed field of the skb is set based on whether HW validated the 1334 * IP/TCP/UDP checksum. 1335 * 1336 * Return: void 1337 */ 1338 static inline void dp_rx_cksum_offload(struct dp_pdev *pdev, 1339 qdf_nbuf_t nbuf, 1340 uint8_t *rx_tlv_hdr) 1341 { 1342 qdf_nbuf_rx_cksum_t cksum = {0}; 1343 bool ip_csum_err = hal_rx_attn_ip_cksum_fail_get(rx_tlv_hdr); 1344 bool tcp_udp_csum_er = hal_rx_attn_tcp_udp_cksum_fail_get(rx_tlv_hdr); 1345 1346 if (qdf_likely(!ip_csum_err && !tcp_udp_csum_er)) { 1347 cksum.l4_result = QDF_NBUF_RX_CKSUM_TCP_UDP_UNNECESSARY; 1348 qdf_nbuf_set_rx_cksum(nbuf, &cksum); 1349 } else { 1350 DP_STATS_INCC(pdev, err.ip_csum_err, 1, ip_csum_err); 1351 DP_STATS_INCC(pdev, err.tcp_udp_csum_err, 1, tcp_udp_csum_er); 1352 } 1353 } 1354 1355 /** 1356 * dp_rx_msdu_stats_update() - update per msdu stats. 1357 * @soc: core txrx main context 1358 * @nbuf: pointer to the first msdu of an amsdu. 1359 * @rx_tlv_hdr: pointer to the start of RX TLV headers. 1360 * @peer: pointer to the peer object. 1361 * @ring_id: reo dest ring number on which pkt is reaped. 1362 * @tid_stats: per tid rx stats. 1363 * 1364 * update all the per msdu stats for that nbuf. 1365 * Return: void 1366 */ 1367 static void dp_rx_msdu_stats_update(struct dp_soc *soc, 1368 qdf_nbuf_t nbuf, 1369 uint8_t *rx_tlv_hdr, 1370 struct dp_peer *peer, 1371 uint8_t ring_id, 1372 struct cdp_tid_rx_stats *tid_stats) 1373 { 1374 bool is_ampdu, is_not_amsdu; 1375 uint32_t sgi, mcs, tid, nss, bw, reception_type, pkt_type; 1376 struct dp_vdev *vdev = peer->vdev; 1377 qdf_ether_header_t *eh; 1378 uint16_t msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf); 1379 1380 is_not_amsdu = qdf_nbuf_is_rx_chfrag_start(nbuf) & 1381 qdf_nbuf_is_rx_chfrag_end(nbuf); 1382 1383 DP_STATS_INC_PKT(peer, rx.rcvd_reo[ring_id], 1, msdu_len); 1384 DP_STATS_INCC(peer, rx.non_amsdu_cnt, 1, is_not_amsdu); 1385 DP_STATS_INCC(peer, rx.amsdu_cnt, 1, !is_not_amsdu); 1386 1387 tid_stats->msdu_cnt++; 1388 if (qdf_unlikely(qdf_nbuf_is_da_mcbc(nbuf) && 1389 (vdev->rx_decap_type == htt_cmn_pkt_type_ethernet))) { 1390 eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf); 1391 DP_STATS_INC_PKT(peer, rx.multicast, 1, msdu_len); 1392 tid_stats->mcast_msdu_cnt++; 1393 if (QDF_IS_ADDR_BROADCAST(eh->ether_dhost)) { 1394 DP_STATS_INC_PKT(peer, rx.bcast, 1, msdu_len); 1395 tid_stats->bcast_msdu_cnt++; 1396 } 1397 } 1398 1399 /* 1400 * currently we can return from here as we have similar stats 1401 * updated at per ppdu level instead of msdu level 1402 */ 1403 if (!soc->process_rx_status) 1404 return; 1405 1406 is_ampdu = hal_rx_mpdu_info_ampdu_flag_get(rx_tlv_hdr); 1407 DP_STATS_INCC(peer, rx.ampdu_cnt, 1, is_ampdu); 1408 DP_STATS_INCC(peer, rx.non_ampdu_cnt, 1, !(is_ampdu)); 1409 1410 sgi = hal_rx_msdu_start_sgi_get(rx_tlv_hdr); 1411 mcs = hal_rx_msdu_start_rate_mcs_get(rx_tlv_hdr); 1412 tid = qdf_nbuf_get_tid_val(nbuf); 1413 bw = hal_rx_msdu_start_bw_get(rx_tlv_hdr); 1414 reception_type = hal_rx_msdu_start_reception_type_get(soc->hal_soc, 1415 rx_tlv_hdr); 1416 nss = hal_rx_msdu_start_nss_get(soc->hal_soc, rx_tlv_hdr); 1417 pkt_type = hal_rx_msdu_start_get_pkt_type(rx_tlv_hdr); 1418 1419 DP_STATS_INC(peer, rx.bw[bw], 1); 1420 /* 1421 * only if nss > 0 and pkt_type is 11N/AC/AX, 1422 * then increase index [nss - 1] in array counter. 1423 */ 1424 if (nss > 0 && (pkt_type == DOT11_N || 1425 pkt_type == DOT11_AC || 1426 pkt_type == DOT11_AX)) 1427 DP_STATS_INC(peer, rx.nss[nss - 1], 1); 1428 1429 DP_STATS_INC(peer, rx.sgi_count[sgi], 1); 1430 DP_STATS_INCC(peer, rx.err.mic_err, 1, 1431 hal_rx_mpdu_end_mic_err_get(rx_tlv_hdr)); 1432 DP_STATS_INCC(peer, rx.err.decrypt_err, 1, 1433 hal_rx_mpdu_end_decrypt_err_get(rx_tlv_hdr)); 1434 1435 DP_STATS_INC(peer, rx.wme_ac_type[TID_TO_WME_AC(tid)], 1); 1436 DP_STATS_INC(peer, rx.reception_type[reception_type], 1); 1437 1438 DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1, 1439 ((mcs >= MAX_MCS_11A) && (pkt_type == DOT11_A))); 1440 DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[mcs], 1, 1441 ((mcs <= MAX_MCS_11A) && (pkt_type == DOT11_A))); 1442 DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1, 1443 ((mcs >= MAX_MCS_11B) && (pkt_type == DOT11_B))); 1444 DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[mcs], 1, 1445 ((mcs <= MAX_MCS_11B) && (pkt_type == DOT11_B))); 1446 DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1, 1447 ((mcs >= MAX_MCS_11A) && (pkt_type == DOT11_N))); 1448 DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[mcs], 1, 1449 ((mcs <= MAX_MCS_11A) && (pkt_type == DOT11_N))); 1450 DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1, 1451 ((mcs >= MAX_MCS_11AC) && (pkt_type == DOT11_AC))); 1452 DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[mcs], 1, 1453 ((mcs <= MAX_MCS_11AC) && (pkt_type == DOT11_AC))); 1454 DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1, 1455 ((mcs >= MAX_MCS) && (pkt_type == DOT11_AX))); 1456 DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[mcs], 1, 1457 ((mcs < MAX_MCS) && (pkt_type == DOT11_AX))); 1458 1459 if ((soc->process_rx_status) && 1460 hal_rx_attn_first_mpdu_get(rx_tlv_hdr)) { 1461 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE 1462 if (!vdev->pdev) 1463 return; 1464 1465 dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, vdev->pdev->soc, 1466 &peer->stats, peer->peer_ids[0], 1467 UPDATE_PEER_STATS, 1468 vdev->pdev->pdev_id); 1469 #endif 1470 1471 } 1472 } 1473 1474 static inline bool is_sa_da_idx_valid(struct dp_soc *soc, 1475 void *rx_tlv_hdr, 1476 qdf_nbuf_t nbuf) 1477 { 1478 if ((qdf_nbuf_is_sa_valid(nbuf) && 1479 (hal_rx_msdu_end_sa_idx_get(rx_tlv_hdr) > 1480 wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx))) || 1481 (qdf_nbuf_is_da_valid(nbuf) && 1482 (hal_rx_msdu_end_da_idx_get(soc->hal_soc, 1483 rx_tlv_hdr) > 1484 wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx)))) 1485 return false; 1486 1487 return true; 1488 } 1489 1490 #ifdef WDS_VENDOR_EXTENSION 1491 int dp_wds_rx_policy_check(uint8_t *rx_tlv_hdr, 1492 struct dp_vdev *vdev, 1493 struct dp_peer *peer) 1494 { 1495 struct dp_peer *bss_peer; 1496 int fr_ds, to_ds, rx_3addr, rx_4addr; 1497 int rx_policy_ucast, rx_policy_mcast; 1498 int rx_mcast = hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr); 1499 1500 if (vdev->opmode == wlan_op_mode_ap) { 1501 TAILQ_FOREACH(bss_peer, &vdev->peer_list, peer_list_elem) { 1502 if (bss_peer->bss_peer) { 1503 /* if wds policy check is not enabled on this vdev, accept all frames */ 1504 if (!bss_peer->wds_ecm.wds_rx_filter) { 1505 return 1; 1506 } 1507 break; 1508 } 1509 } 1510 rx_policy_ucast = bss_peer->wds_ecm.wds_rx_ucast_4addr; 1511 rx_policy_mcast = bss_peer->wds_ecm.wds_rx_mcast_4addr; 1512 } else { /* sta mode */ 1513 if (!peer->wds_ecm.wds_rx_filter) { 1514 return 1; 1515 } 1516 rx_policy_ucast = peer->wds_ecm.wds_rx_ucast_4addr; 1517 rx_policy_mcast = peer->wds_ecm.wds_rx_mcast_4addr; 1518 } 1519 1520 /* ------------------------------------------------ 1521 * self 1522 * peer- rx rx- 1523 * wds ucast mcast dir policy accept note 1524 * ------------------------------------------------ 1525 * 1 1 0 11 x1 1 AP configured to accept ds-to-ds Rx ucast from wds peers, constraint met; so, accept 1526 * 1 1 0 01 x1 0 AP configured to accept ds-to-ds Rx ucast from wds peers, constraint not met; so, drop 1527 * 1 1 0 10 x1 0 AP configured to accept ds-to-ds Rx ucast from wds peers, constraint not met; so, drop 1528 * 1 1 0 00 x1 0 bad frame, won't see it 1529 * 1 0 1 11 1x 1 AP configured to accept ds-to-ds Rx mcast from wds peers, constraint met; so, accept 1530 * 1 0 1 01 1x 0 AP configured to accept ds-to-ds Rx mcast from wds peers, constraint not met; so, drop 1531 * 1 0 1 10 1x 0 AP configured to accept ds-to-ds Rx mcast from wds peers, constraint not met; so, drop 1532 * 1 0 1 00 1x 0 bad frame, won't see it 1533 * 1 1 0 11 x0 0 AP configured to accept from-ds Rx ucast from wds peers, constraint not met; so, drop 1534 * 1 1 0 01 x0 0 AP configured to accept from-ds Rx ucast from wds peers, constraint not met; so, drop 1535 * 1 1 0 10 x0 1 AP configured to accept from-ds Rx ucast from wds peers, constraint met; so, accept 1536 * 1 1 0 00 x0 0 bad frame, won't see it 1537 * 1 0 1 11 0x 0 AP configured to accept from-ds Rx mcast from wds peers, constraint not met; so, drop 1538 * 1 0 1 01 0x 0 AP configured to accept from-ds Rx mcast from wds peers, constraint not met; so, drop 1539 * 1 0 1 10 0x 1 AP configured to accept from-ds Rx mcast from wds peers, constraint met; so, accept 1540 * 1 0 1 00 0x 0 bad frame, won't see it 1541 * 1542 * 0 x x 11 xx 0 we only accept td-ds Rx frames from non-wds peers in mode. 1543 * 0 x x 01 xx 1 1544 * 0 x x 10 xx 0 1545 * 0 x x 00 xx 0 bad frame, won't see it 1546 * ------------------------------------------------ 1547 */ 1548 1549 fr_ds = hal_rx_mpdu_get_fr_ds(rx_tlv_hdr); 1550 to_ds = hal_rx_mpdu_get_to_ds(rx_tlv_hdr); 1551 rx_3addr = fr_ds ^ to_ds; 1552 rx_4addr = fr_ds & to_ds; 1553 1554 if (vdev->opmode == wlan_op_mode_ap) { 1555 if ((!peer->wds_enabled && rx_3addr && to_ds) || 1556 (peer->wds_enabled && !rx_mcast && (rx_4addr == rx_policy_ucast)) || 1557 (peer->wds_enabled && rx_mcast && (rx_4addr == rx_policy_mcast))) { 1558 return 1; 1559 } 1560 } else { /* sta mode */ 1561 if ((!rx_mcast && (rx_4addr == rx_policy_ucast)) || 1562 (rx_mcast && (rx_4addr == rx_policy_mcast))) { 1563 return 1; 1564 } 1565 } 1566 return 0; 1567 } 1568 #else 1569 int dp_wds_rx_policy_check(uint8_t *rx_tlv_hdr, 1570 struct dp_vdev *vdev, 1571 struct dp_peer *peer) 1572 { 1573 return 1; 1574 } 1575 #endif 1576 1577 #ifdef RX_DESC_DEBUG_CHECK 1578 /** 1579 * dp_rx_desc_nbuf_sanity_check - Add sanity check to catch REO rx_desc paddr 1580 * corruption 1581 * 1582 * @ring_desc: REO ring descriptor 1583 * @rx_desc: Rx descriptor 1584 * 1585 * Return: NONE 1586 */ 1587 static inline void dp_rx_desc_nbuf_sanity_check(void *ring_desc, 1588 struct dp_rx_desc *rx_desc) 1589 { 1590 struct hal_buf_info hbi; 1591 1592 hal_rx_reo_buf_paddr_get(ring_desc, &hbi); 1593 /* Sanity check for possible buffer paddr corruption */ 1594 qdf_assert_always((&hbi)->paddr == 1595 qdf_nbuf_get_frag_paddr(rx_desc->nbuf, 0)); 1596 } 1597 #else 1598 static inline void dp_rx_desc_nbuf_sanity_check(void *ring_desc, 1599 struct dp_rx_desc *rx_desc) 1600 { 1601 } 1602 #endif 1603 1604 #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT 1605 static inline 1606 bool dp_rx_reap_loop_pkt_limit_hit(struct dp_soc *soc, int num_reaped) 1607 { 1608 bool limit_hit = false; 1609 struct wlan_cfg_dp_soc_ctxt *cfg = soc->wlan_cfg_ctx; 1610 1611 limit_hit = 1612 (num_reaped >= cfg->rx_reap_loop_pkt_limit) ? true : false; 1613 1614 if (limit_hit) 1615 DP_STATS_INC(soc, rx.reap_loop_pkt_limit_hit, 1) 1616 1617 return limit_hit; 1618 } 1619 1620 static inline bool dp_rx_enable_eol_data_check(struct dp_soc *soc) 1621 { 1622 return soc->wlan_cfg_ctx->rx_enable_eol_data_check; 1623 } 1624 1625 #else 1626 static inline 1627 bool dp_rx_reap_loop_pkt_limit_hit(struct dp_soc *soc, int num_reaped) 1628 { 1629 return false; 1630 } 1631 1632 static inline bool dp_rx_enable_eol_data_check(struct dp_soc *soc) 1633 { 1634 return false; 1635 } 1636 1637 #endif /* WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT */ 1638 /** 1639 * dp_rx_process() - Brain of the Rx processing functionality 1640 * Called from the bottom half (tasklet/NET_RX_SOFTIRQ) 1641 * @soc: core txrx main context 1642 * @hal_ring: opaque pointer to the HAL Rx Ring, which will be serviced 1643 * @reo_ring_num: ring number (0, 1, 2 or 3) of the reo ring. 1644 * @quota: No. of units (packets) that can be serviced in one shot. 1645 * 1646 * This function implements the core of Rx functionality. This is 1647 * expected to handle only non-error frames. 1648 * 1649 * Return: uint32_t: No. of elements processed 1650 */ 1651 uint32_t dp_rx_process(struct dp_intr *int_ctx, void *hal_ring, 1652 uint8_t reo_ring_num, uint32_t quota) 1653 { 1654 void *hal_soc; 1655 void *ring_desc; 1656 struct dp_rx_desc *rx_desc = NULL; 1657 qdf_nbuf_t nbuf, next; 1658 union dp_rx_desc_list_elem_t *head[MAX_PDEV_CNT]; 1659 union dp_rx_desc_list_elem_t *tail[MAX_PDEV_CNT]; 1660 uint32_t rx_bufs_used = 0, rx_buf_cookie; 1661 uint32_t l2_hdr_offset = 0; 1662 uint16_t msdu_len = 0; 1663 uint16_t peer_id; 1664 struct dp_peer *peer; 1665 struct dp_vdev *vdev; 1666 uint32_t pkt_len = 0; 1667 struct hal_rx_mpdu_desc_info mpdu_desc_info; 1668 struct hal_rx_msdu_desc_info msdu_desc_info; 1669 enum hal_reo_error_status error; 1670 uint32_t peer_mdata; 1671 uint8_t *rx_tlv_hdr; 1672 uint32_t rx_bufs_reaped[MAX_PDEV_CNT]; 1673 uint8_t mac_id = 0; 1674 struct dp_pdev *pdev; 1675 struct dp_pdev *rx_pdev; 1676 struct dp_srng *dp_rxdma_srng; 1677 struct rx_desc_pool *rx_desc_pool; 1678 struct dp_soc *soc = int_ctx->soc; 1679 uint8_t ring_id = 0; 1680 uint8_t core_id = 0; 1681 struct cdp_tid_rx_stats *tid_stats; 1682 qdf_nbuf_t nbuf_head; 1683 qdf_nbuf_t nbuf_tail; 1684 qdf_nbuf_t deliver_list_head; 1685 qdf_nbuf_t deliver_list_tail; 1686 uint32_t num_rx_bufs_reaped = 0; 1687 uint32_t intr_id; 1688 struct hif_opaque_softc *scn; 1689 int32_t tid = 0; 1690 bool is_prev_msdu_last = true; 1691 uint32_t num_entries_avail = 0; 1692 1693 DP_HIST_INIT(); 1694 1695 qdf_assert_always(soc && hal_ring); 1696 hal_soc = soc->hal_soc; 1697 qdf_assert_always(hal_soc); 1698 1699 hif_pm_runtime_mark_last_busy(soc->osdev->dev); 1700 scn = soc->hif_handle; 1701 intr_id = int_ctx->dp_intr_id; 1702 1703 more_data: 1704 /* reset local variables here to be re-used in the function */ 1705 nbuf_head = NULL; 1706 nbuf_tail = NULL; 1707 deliver_list_head = NULL; 1708 deliver_list_tail = NULL; 1709 peer = NULL; 1710 vdev = NULL; 1711 num_rx_bufs_reaped = 0; 1712 1713 qdf_mem_zero(rx_bufs_reaped, sizeof(rx_bufs_reaped)); 1714 qdf_mem_zero(&mpdu_desc_info, sizeof(mpdu_desc_info)); 1715 qdf_mem_zero(&msdu_desc_info, sizeof(msdu_desc_info)); 1716 qdf_mem_zero(head, sizeof(head)); 1717 qdf_mem_zero(tail, sizeof(tail)); 1718 1719 if (qdf_unlikely(hal_srng_access_start(hal_soc, hal_ring))) { 1720 1721 /* 1722 * Need API to convert from hal_ring pointer to 1723 * Ring Type / Ring Id combo 1724 */ 1725 DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1); 1726 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1727 FL("HAL RING Access Failed -- %pK"), hal_ring); 1728 hal_srng_access_end(hal_soc, hal_ring); 1729 goto done; 1730 } 1731 1732 /* 1733 * start reaping the buffers from reo ring and queue 1734 * them in per vdev queue. 1735 * Process the received pkts in a different per vdev loop. 1736 */ 1737 while (qdf_likely(quota && 1738 (ring_desc = hal_srng_dst_peek(hal_soc, hal_ring)))) { 1739 1740 error = HAL_RX_ERROR_STATUS_GET(ring_desc); 1741 ring_id = hal_srng_ring_id_get(hal_ring); 1742 1743 if (qdf_unlikely(error == HAL_REO_ERROR_DETECTED)) { 1744 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1745 FL("HAL RING 0x%pK:error %d"), hal_ring, error); 1746 DP_STATS_INC(soc, rx.err.hal_reo_error[ring_id], 1); 1747 /* Don't know how to deal with this -- assert */ 1748 qdf_assert(0); 1749 } 1750 1751 rx_buf_cookie = HAL_RX_REO_BUF_COOKIE_GET(ring_desc); 1752 1753 rx_desc = dp_rx_cookie_2_va_rxdma_buf(soc, rx_buf_cookie); 1754 qdf_assert(rx_desc); 1755 1756 dp_rx_desc_nbuf_sanity_check(ring_desc, rx_desc); 1757 /* 1758 * this is a unlikely scenario where the host is reaping 1759 * a descriptor which it already reaped just a while ago 1760 * but is yet to replenish it back to HW. 1761 * In this case host will dump the last 128 descriptors 1762 * including the software descriptor rx_desc and assert. 1763 */ 1764 if (qdf_unlikely(!rx_desc->in_use)) { 1765 DP_STATS_INC(soc, rx.err.hal_reo_dest_dup, 1); 1766 dp_err("Reaping rx_desc not in use!"); 1767 dp_rx_dump_info_and_assert(soc, hal_ring, 1768 ring_desc, rx_desc); 1769 } 1770 1771 if (qdf_unlikely(!dp_rx_desc_check_magic(rx_desc))) { 1772 dp_err("Invalid rx_desc cookie=%d", rx_buf_cookie); 1773 DP_STATS_INC(soc, rx.err.rx_desc_invalid_magic, 1); 1774 dp_rx_dump_info_and_assert(soc, hal_ring, 1775 ring_desc, rx_desc); 1776 } 1777 1778 /* TODO */ 1779 /* 1780 * Need a separate API for unmapping based on 1781 * phyiscal address 1782 */ 1783 qdf_nbuf_unmap_single(soc->osdev, rx_desc->nbuf, 1784 QDF_DMA_FROM_DEVICE); 1785 rx_desc->unmapped = 1; 1786 1787 core_id = smp_processor_id(); 1788 DP_STATS_INC(soc, rx.ring_packets[core_id][ring_id], 1); 1789 1790 /* Get MPDU DESC info */ 1791 hal_rx_mpdu_desc_info_get(ring_desc, &mpdu_desc_info); 1792 1793 /* Get MSDU DESC info */ 1794 hal_rx_msdu_desc_info_get(ring_desc, &msdu_desc_info); 1795 1796 if (qdf_unlikely(mpdu_desc_info.mpdu_flags & 1797 HAL_MPDU_F_RAW_AMPDU)) { 1798 /* previous msdu has end bit set, so current one is 1799 * the new MPDU 1800 */ 1801 if (is_prev_msdu_last) { 1802 is_prev_msdu_last = false; 1803 /* Get number of entries available in HW ring */ 1804 num_entries_avail = 1805 hal_srng_dst_num_valid(hal_soc, hal_ring, 1); 1806 1807 /* For new MPDU check if we can read complete 1808 * MPDU by comparing the number of buffers 1809 * available and number of buffers needed to 1810 * reap this MPDU 1811 */ 1812 if (((msdu_desc_info.msdu_len / 1813 (RX_BUFFER_SIZE - RX_PKT_TLVS_LEN) + 1)) > 1814 num_entries_avail) 1815 break; 1816 } else { 1817 if (msdu_desc_info.msdu_flags & 1818 HAL_MSDU_F_LAST_MSDU_IN_MPDU) 1819 is_prev_msdu_last = true; 1820 } 1821 qdf_nbuf_set_raw_frame(rx_desc->nbuf, 1); 1822 } 1823 1824 /* Pop out the descriptor*/ 1825 hal_srng_dst_get_next(hal_soc, hal_ring); 1826 1827 rx_bufs_reaped[rx_desc->pool_id]++; 1828 peer_mdata = mpdu_desc_info.peer_meta_data; 1829 QDF_NBUF_CB_RX_PEER_ID(rx_desc->nbuf) = 1830 DP_PEER_METADATA_PEER_ID_GET(peer_mdata); 1831 1832 /* 1833 * save msdu flags first, last and continuation msdu in 1834 * nbuf->cb, also save mcbc, is_da_valid, is_sa_valid and 1835 * length to nbuf->cb. This ensures the info required for 1836 * per pkt processing is always in the same cache line. 1837 * This helps in improving throughput for smaller pkt 1838 * sizes. 1839 */ 1840 if (msdu_desc_info.msdu_flags & HAL_MSDU_F_FIRST_MSDU_IN_MPDU) 1841 qdf_nbuf_set_rx_chfrag_start(rx_desc->nbuf, 1); 1842 1843 if (msdu_desc_info.msdu_flags & HAL_MSDU_F_MSDU_CONTINUATION) 1844 qdf_nbuf_set_rx_chfrag_cont(rx_desc->nbuf, 1); 1845 1846 if (msdu_desc_info.msdu_flags & HAL_MSDU_F_LAST_MSDU_IN_MPDU) 1847 qdf_nbuf_set_rx_chfrag_end(rx_desc->nbuf, 1); 1848 1849 if (msdu_desc_info.msdu_flags & HAL_MSDU_F_DA_IS_MCBC) 1850 qdf_nbuf_set_da_mcbc(rx_desc->nbuf, 1); 1851 1852 if (msdu_desc_info.msdu_flags & HAL_MSDU_F_DA_IS_VALID) 1853 qdf_nbuf_set_da_valid(rx_desc->nbuf, 1); 1854 1855 if (msdu_desc_info.msdu_flags & HAL_MSDU_F_SA_IS_VALID) 1856 qdf_nbuf_set_sa_valid(rx_desc->nbuf, 1); 1857 1858 qdf_nbuf_set_tid_val(rx_desc->nbuf, 1859 HAL_RX_REO_QUEUE_NUMBER_GET(ring_desc)); 1860 1861 QDF_NBUF_CB_RX_PKT_LEN(rx_desc->nbuf) = msdu_desc_info.msdu_len; 1862 1863 QDF_NBUF_CB_RX_CTX_ID(rx_desc->nbuf) = reo_ring_num; 1864 1865 DP_RX_LIST_APPEND(nbuf_head, nbuf_tail, rx_desc->nbuf); 1866 1867 /* 1868 * if continuation bit is set then we have MSDU spread 1869 * across multiple buffers, let us not decrement quota 1870 * till we reap all buffers of that MSDU. 1871 */ 1872 if (qdf_likely(!qdf_nbuf_is_rx_chfrag_cont(rx_desc->nbuf))) 1873 quota -= 1; 1874 1875 dp_rx_add_to_free_desc_list(&head[rx_desc->pool_id], 1876 &tail[rx_desc->pool_id], 1877 rx_desc); 1878 1879 num_rx_bufs_reaped++; 1880 if (dp_rx_reap_loop_pkt_limit_hit(soc, num_rx_bufs_reaped)) 1881 break; 1882 } 1883 done: 1884 hal_srng_access_end(hal_soc, hal_ring); 1885 1886 if (nbuf_tail) 1887 QDF_NBUF_CB_RX_FLUSH_IND(nbuf_tail) = 1; 1888 1889 for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) { 1890 /* 1891 * continue with next mac_id if no pkts were reaped 1892 * from that pool 1893 */ 1894 if (!rx_bufs_reaped[mac_id]) 1895 continue; 1896 1897 pdev = soc->pdev_list[mac_id]; 1898 dp_rxdma_srng = &pdev->rx_refill_buf_ring; 1899 rx_desc_pool = &soc->rx_desc_buf[mac_id]; 1900 1901 dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng, 1902 rx_desc_pool, rx_bufs_reaped[mac_id], 1903 &head[mac_id], &tail[mac_id]); 1904 } 1905 1906 dp_verbose_debug("replenished %u\n", rx_bufs_reaped[0]); 1907 /* Peer can be NULL is case of LFR */ 1908 if (qdf_likely(peer)) 1909 vdev = NULL; 1910 1911 /* 1912 * BIG loop where each nbuf is dequeued from global queue, 1913 * processed and queued back on a per vdev basis. These nbufs 1914 * are sent to stack as and when we run out of nbufs 1915 * or a new nbuf dequeued from global queue has a different 1916 * vdev when compared to previous nbuf. 1917 */ 1918 nbuf = nbuf_head; 1919 while (nbuf) { 1920 next = nbuf->next; 1921 rx_tlv_hdr = qdf_nbuf_data(nbuf); 1922 /* Get TID from struct cb->tid_val, save to tid */ 1923 if (qdf_nbuf_is_rx_chfrag_start(nbuf)) 1924 tid = qdf_nbuf_get_tid_val(nbuf); 1925 1926 /* 1927 * Check if DMA completed -- msdu_done is the last bit 1928 * to be written 1929 */ 1930 rx_pdev = soc->pdev_list[rx_desc->pool_id]; 1931 DP_RX_TID_SAVE(nbuf, tid); 1932 if (qdf_unlikely(rx_pdev->delay_stats_flag)) 1933 qdf_nbuf_set_timestamp(nbuf); 1934 1935 tid_stats = &rx_pdev->stats.tid_stats.tid_rx_stats[tid]; 1936 if (qdf_unlikely(!hal_rx_attn_msdu_done_get(rx_tlv_hdr))) { 1937 dp_err("MSDU DONE failure"); 1938 DP_STATS_INC(soc, rx.err.msdu_done_fail, 1); 1939 hal_rx_dump_pkt_tlvs(hal_soc, rx_tlv_hdr, 1940 QDF_TRACE_LEVEL_INFO); 1941 tid_stats->fail_cnt[MSDU_DONE_FAILURE]++; 1942 qdf_nbuf_free(nbuf); 1943 qdf_assert(0); 1944 nbuf = next; 1945 continue; 1946 } 1947 1948 peer_mdata = QDF_NBUF_CB_RX_PEER_ID(nbuf); 1949 peer_id = DP_PEER_METADATA_PEER_ID_GET(peer_mdata); 1950 peer = dp_peer_find_by_id(soc, peer_id); 1951 1952 if (peer) { 1953 QDF_NBUF_CB_DP_TRACE_PRINT(nbuf) = false; 1954 qdf_dp_trace_set_track(nbuf, QDF_RX); 1955 QDF_NBUF_CB_RX_DP_TRACE(nbuf) = 1; 1956 QDF_NBUF_CB_RX_PACKET_TRACK(nbuf) = 1957 QDF_NBUF_RX_PKT_DATA_TRACK; 1958 } 1959 1960 rx_bufs_used++; 1961 1962 if (deliver_list_head && peer && (vdev != peer->vdev)) { 1963 dp_rx_deliver_to_stack(vdev, peer, deliver_list_head, 1964 deliver_list_tail); 1965 deliver_list_head = NULL; 1966 deliver_list_tail = NULL; 1967 } 1968 1969 if (qdf_likely(peer)) { 1970 vdev = peer->vdev; 1971 } else { 1972 DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1, 1973 QDF_NBUF_CB_RX_PKT_LEN(nbuf)); 1974 tid_stats->fail_cnt[INVALID_PEER_VDEV]++; 1975 qdf_nbuf_free(nbuf); 1976 nbuf = next; 1977 continue; 1978 } 1979 1980 if (qdf_unlikely(!vdev)) { 1981 tid_stats->fail_cnt[INVALID_PEER_VDEV]++; 1982 qdf_nbuf_free(nbuf); 1983 nbuf = next; 1984 DP_STATS_INC(soc, rx.err.invalid_vdev, 1); 1985 dp_peer_unref_del_find_by_id(peer); 1986 continue; 1987 } 1988 1989 DP_HIST_PACKET_COUNT_INC(vdev->pdev->pdev_id); 1990 /* 1991 * First IF condition: 1992 * 802.11 Fragmented pkts are reinjected to REO 1993 * HW block as SG pkts and for these pkts we only 1994 * need to pull the RX TLVS header length. 1995 * Second IF condition: 1996 * The below condition happens when an MSDU is spread 1997 * across multiple buffers. This can happen in two cases 1998 * 1. The nbuf size is smaller then the received msdu. 1999 * ex: we have set the nbuf size to 2048 during 2000 * nbuf_alloc. but we received an msdu which is 2001 * 2304 bytes in size then this msdu is spread 2002 * across 2 nbufs. 2003 * 2004 * 2. AMSDUs when RAW mode is enabled. 2005 * ex: 1st MSDU is in 1st nbuf and 2nd MSDU is spread 2006 * across 1st nbuf and 2nd nbuf and last MSDU is 2007 * spread across 2nd nbuf and 3rd nbuf. 2008 * 2009 * for these scenarios let us create a skb frag_list and 2010 * append these buffers till the last MSDU of the AMSDU 2011 * Third condition: 2012 * This is the most likely case, we receive 802.3 pkts 2013 * decapsulated by HW, here we need to set the pkt length. 2014 */ 2015 if (qdf_unlikely(qdf_nbuf_is_frag(nbuf))) { 2016 bool is_mcbc, is_sa_vld, is_da_vld; 2017 2018 is_mcbc = hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr); 2019 is_sa_vld = hal_rx_msdu_end_sa_is_valid_get(rx_tlv_hdr); 2020 is_da_vld = hal_rx_msdu_end_da_is_valid_get(rx_tlv_hdr); 2021 2022 qdf_nbuf_set_da_mcbc(nbuf, is_mcbc); 2023 qdf_nbuf_set_da_valid(nbuf, is_da_vld); 2024 qdf_nbuf_set_sa_valid(nbuf, is_sa_vld); 2025 2026 qdf_nbuf_pull_head(nbuf, RX_PKT_TLVS_LEN); 2027 } else if (qdf_nbuf_is_raw_frame(nbuf)) { 2028 msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf); 2029 nbuf = dp_rx_sg_create(nbuf, rx_tlv_hdr); 2030 2031 DP_STATS_INC(vdev->pdev, rx_raw_pkts, 1); 2032 DP_STATS_INC_PKT(peer, rx.raw, 1, msdu_len); 2033 2034 next = nbuf->next; 2035 } else { 2036 l2_hdr_offset = 2037 hal_rx_msdu_end_l3_hdr_padding_get(rx_tlv_hdr); 2038 2039 msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf); 2040 pkt_len = msdu_len + l2_hdr_offset + RX_PKT_TLVS_LEN; 2041 2042 qdf_nbuf_set_pktlen(nbuf, pkt_len); 2043 qdf_nbuf_pull_head(nbuf, 2044 RX_PKT_TLVS_LEN + 2045 l2_hdr_offset); 2046 } 2047 2048 if (!dp_wds_rx_policy_check(rx_tlv_hdr, vdev, peer)) { 2049 QDF_TRACE(QDF_MODULE_ID_DP, 2050 QDF_TRACE_LEVEL_ERROR, 2051 FL("Policy Check Drop pkt")); 2052 tid_stats->fail_cnt[POLICY_CHECK_DROP]++; 2053 /* Drop & free packet */ 2054 qdf_nbuf_free(nbuf); 2055 /* Statistics */ 2056 nbuf = next; 2057 dp_peer_unref_del_find_by_id(peer); 2058 continue; 2059 } 2060 2061 if (qdf_unlikely(peer && (peer->nawds_enabled) && 2062 (qdf_nbuf_is_da_mcbc(nbuf)) && 2063 (hal_rx_get_mpdu_mac_ad4_valid(rx_tlv_hdr) == 2064 false))) { 2065 tid_stats->fail_cnt[NAWDS_MCAST_DROP]++; 2066 DP_STATS_INC(peer, rx.nawds_mcast_drop, 1); 2067 qdf_nbuf_free(nbuf); 2068 nbuf = next; 2069 dp_peer_unref_del_find_by_id(peer); 2070 continue; 2071 } 2072 2073 if (soc->process_rx_status) 2074 dp_rx_cksum_offload(vdev->pdev, nbuf, rx_tlv_hdr); 2075 2076 /* Update the protocol tag in SKB based on CCE metadata */ 2077 dp_rx_update_protocol_tag(soc, vdev, nbuf, rx_tlv_hdr, 2078 reo_ring_num, false, true); 2079 2080 dp_rx_msdu_stats_update(soc, nbuf, rx_tlv_hdr, peer, 2081 ring_id, tid_stats); 2082 2083 if (qdf_unlikely(vdev->mesh_vdev)) { 2084 if (dp_rx_filter_mesh_packets(vdev, nbuf, rx_tlv_hdr) 2085 == QDF_STATUS_SUCCESS) { 2086 QDF_TRACE(QDF_MODULE_ID_DP, 2087 QDF_TRACE_LEVEL_INFO_MED, 2088 FL("mesh pkt filtered")); 2089 tid_stats->fail_cnt[MESH_FILTER_DROP]++; 2090 DP_STATS_INC(vdev->pdev, dropped.mesh_filter, 2091 1); 2092 2093 qdf_nbuf_free(nbuf); 2094 nbuf = next; 2095 dp_peer_unref_del_find_by_id(peer); 2096 continue; 2097 } 2098 dp_rx_fill_mesh_stats(vdev, nbuf, rx_tlv_hdr, peer); 2099 } 2100 2101 if (qdf_likely(vdev->rx_decap_type == 2102 htt_cmn_pkt_type_ethernet) && 2103 qdf_likely(!vdev->mesh_vdev)) { 2104 /* WDS Destination Address Learning */ 2105 dp_rx_da_learn(soc, rx_tlv_hdr, peer, nbuf); 2106 2107 /* Due to HW issue, sometimes we see that the sa_idx 2108 * and da_idx are invalid with sa_valid and da_valid 2109 * bits set 2110 * 2111 * in this case we also see that value of 2112 * sa_sw_peer_id is set as 0 2113 * 2114 * Drop the packet if sa_idx and da_idx OOB or 2115 * sa_sw_peerid is 0 2116 */ 2117 if (!is_sa_da_idx_valid(soc, rx_tlv_hdr, nbuf)) { 2118 qdf_nbuf_free(nbuf); 2119 nbuf = next; 2120 DP_STATS_INC(soc, rx.err.invalid_sa_da_idx, 1); 2121 dp_peer_unref_del_find_by_id(peer); 2122 continue; 2123 } 2124 /* WDS Source Port Learning */ 2125 if (qdf_likely(vdev->wds_enabled)) 2126 dp_rx_wds_srcport_learn(soc, rx_tlv_hdr, 2127 peer, nbuf); 2128 2129 /* Intrabss-fwd */ 2130 if (dp_rx_check_ap_bridge(vdev)) 2131 if (dp_rx_intrabss_fwd(soc, 2132 peer, 2133 rx_tlv_hdr, 2134 nbuf)) { 2135 nbuf = next; 2136 dp_peer_unref_del_find_by_id(peer); 2137 tid_stats->intrabss_cnt++; 2138 continue; /* Get next desc */ 2139 } 2140 } 2141 2142 dp_rx_fill_gro_info(soc, rx_tlv_hdr, nbuf); 2143 qdf_nbuf_cb_update_peer_local_id(nbuf, peer->local_id); 2144 2145 DP_RX_LIST_APPEND(deliver_list_head, 2146 deliver_list_tail, 2147 nbuf); 2148 DP_STATS_INC_PKT(peer, rx.to_stack, 1, 2149 QDF_NBUF_CB_RX_PKT_LEN(nbuf)); 2150 2151 tid_stats->delivered_to_stack++; 2152 nbuf = next; 2153 dp_peer_unref_del_find_by_id(peer); 2154 } 2155 2156 if (deliver_list_head) 2157 dp_rx_deliver_to_stack(vdev, peer, deliver_list_head, 2158 deliver_list_tail); 2159 2160 if (dp_rx_enable_eol_data_check(soc)) { 2161 if (quota && 2162 hal_srng_dst_peek_sync_locked(soc, hal_ring)) { 2163 DP_STATS_INC(soc, rx.hp_oos2, 1); 2164 if (!hif_exec_should_yield(scn, intr_id)) 2165 goto more_data; 2166 } 2167 } 2168 /* Update histogram statistics by looping through pdev's */ 2169 DP_RX_HIST_STATS_PER_PDEV(); 2170 2171 return rx_bufs_used; /* Assume no scale factor for now */ 2172 } 2173 2174 /** 2175 * dp_rx_detach() - detach dp rx 2176 * @pdev: core txrx pdev context 2177 * 2178 * This function will detach DP RX into main device context 2179 * will free DP Rx resources. 2180 * 2181 * Return: void 2182 */ 2183 void 2184 dp_rx_pdev_detach(struct dp_pdev *pdev) 2185 { 2186 uint8_t pdev_id = pdev->pdev_id; 2187 struct dp_soc *soc = pdev->soc; 2188 struct rx_desc_pool *rx_desc_pool; 2189 2190 rx_desc_pool = &soc->rx_desc_buf[pdev_id]; 2191 2192 if (rx_desc_pool->pool_size != 0) { 2193 if (!dp_is_soc_reinit(soc)) 2194 dp_rx_desc_nbuf_and_pool_free(soc, pdev_id, 2195 rx_desc_pool); 2196 else 2197 dp_rx_desc_nbuf_free(soc, rx_desc_pool); 2198 } 2199 2200 return; 2201 } 2202 2203 static QDF_STATUS 2204 dp_pdev_rx_buffers_attach(struct dp_soc *dp_soc, uint32_t mac_id, 2205 struct dp_srng *dp_rxdma_srng, 2206 struct rx_desc_pool *rx_desc_pool, 2207 uint32_t num_req_buffers, 2208 union dp_rx_desc_list_elem_t **desc_list, 2209 union dp_rx_desc_list_elem_t **tail) 2210 { 2211 struct dp_pdev *dp_pdev = dp_get_pdev_for_mac_id(dp_soc, mac_id); 2212 void *rxdma_srng = dp_rxdma_srng->hal_srng; 2213 union dp_rx_desc_list_elem_t *next; 2214 void *rxdma_ring_entry; 2215 qdf_dma_addr_t paddr; 2216 void **rx_nbuf_arr; 2217 uint32_t nr_descs; 2218 uint32_t nr_nbuf; 2219 qdf_nbuf_t nbuf; 2220 QDF_STATUS ret; 2221 int i; 2222 2223 if (qdf_unlikely(!rxdma_srng)) { 2224 DP_STATS_INC(dp_pdev, replenish.rxdma_err, num_req_buffers); 2225 return QDF_STATUS_E_FAILURE; 2226 } 2227 2228 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 2229 "requested %u RX buffers for driver attach", num_req_buffers); 2230 2231 nr_descs = dp_rx_get_free_desc_list(dp_soc, mac_id, rx_desc_pool, 2232 num_req_buffers, desc_list, tail); 2233 if (!nr_descs) { 2234 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 2235 "no free rx_descs in freelist"); 2236 DP_STATS_INC(dp_pdev, err.desc_alloc_fail, num_req_buffers); 2237 return QDF_STATUS_E_NOMEM; 2238 } 2239 2240 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 2241 "got %u RX descs for driver attach", nr_descs); 2242 2243 rx_nbuf_arr = qdf_mem_malloc(nr_descs * sizeof(*rx_nbuf_arr)); 2244 if (!rx_nbuf_arr) { 2245 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 2246 "failed to allocate nbuf array"); 2247 DP_STATS_INC(dp_pdev, replenish.rxdma_err, num_req_buffers); 2248 return QDF_STATUS_E_NOMEM; 2249 } 2250 2251 for (nr_nbuf = 0; nr_nbuf < nr_descs; nr_nbuf++) { 2252 nbuf = qdf_nbuf_alloc(dp_soc->osdev, RX_BUFFER_SIZE, 2253 RX_BUFFER_RESERVATION, 2254 RX_BUFFER_ALIGNMENT, 2255 FALSE); 2256 if (!nbuf) { 2257 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 2258 "nbuf alloc failed"); 2259 DP_STATS_INC(dp_pdev, replenish.nbuf_alloc_fail, 1); 2260 break; 2261 } 2262 2263 ret = qdf_nbuf_map_single(dp_soc->osdev, nbuf, 2264 QDF_DMA_FROM_DEVICE); 2265 if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) { 2266 qdf_nbuf_free(nbuf); 2267 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 2268 "nbuf map failed"); 2269 DP_STATS_INC(dp_pdev, replenish.map_err, 1); 2270 break; 2271 } 2272 2273 paddr = qdf_nbuf_get_frag_paddr(nbuf, 0); 2274 2275 ret = check_x86_paddr(dp_soc, &nbuf, &paddr, dp_pdev); 2276 if (ret == QDF_STATUS_E_FAILURE) { 2277 qdf_nbuf_unmap_single(dp_soc->osdev, nbuf, 2278 QDF_DMA_FROM_DEVICE); 2279 qdf_nbuf_free(nbuf); 2280 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 2281 "nbuf check x86 failed"); 2282 DP_STATS_INC(dp_pdev, replenish.x86_fail, 1); 2283 break; 2284 } 2285 2286 rx_nbuf_arr[nr_nbuf] = (void *)nbuf; 2287 } 2288 2289 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 2290 "allocated %u nbuf for driver attach", nr_nbuf); 2291 2292 hal_srng_access_start(dp_soc->hal_soc, rxdma_srng); 2293 2294 for (i = 0; i < nr_nbuf; i++) { 2295 rxdma_ring_entry = hal_srng_src_get_next(dp_soc->hal_soc, 2296 rxdma_srng); 2297 qdf_assert_always(rxdma_ring_entry); 2298 2299 next = (*desc_list)->next; 2300 nbuf = rx_nbuf_arr[i]; 2301 paddr = qdf_nbuf_get_frag_paddr(nbuf, 0); 2302 2303 dp_rx_desc_prep(&((*desc_list)->rx_desc), nbuf); 2304 (*desc_list)->rx_desc.in_use = 1; 2305 2306 hal_rxdma_buff_addr_info_set(rxdma_ring_entry, paddr, 2307 (*desc_list)->rx_desc.cookie, 2308 rx_desc_pool->owner); 2309 2310 dp_ipa_handle_rx_buf_smmu_mapping(dp_soc, nbuf, true); 2311 2312 *desc_list = next; 2313 } 2314 2315 hal_srng_access_end(dp_soc->hal_soc, rxdma_srng); 2316 2317 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 2318 "filled %u RX buffers for driver attach", nr_nbuf); 2319 DP_STATS_INC_PKT(dp_pdev, replenish.pkts, nr_nbuf, RX_BUFFER_SIZE * 2320 nr_nbuf); 2321 2322 qdf_mem_free(rx_nbuf_arr); 2323 2324 return QDF_STATUS_SUCCESS; 2325 } 2326 2327 /** 2328 * dp_rx_attach() - attach DP RX 2329 * @pdev: core txrx pdev context 2330 * 2331 * This function will attach a DP RX instance into the main 2332 * device (SOC) context. Will allocate dp rx resource and 2333 * initialize resources. 2334 * 2335 * Return: QDF_STATUS_SUCCESS: success 2336 * QDF_STATUS_E_RESOURCES: Error return 2337 */ 2338 QDF_STATUS 2339 dp_rx_pdev_attach(struct dp_pdev *pdev) 2340 { 2341 uint8_t pdev_id = pdev->pdev_id; 2342 struct dp_soc *soc = pdev->soc; 2343 uint32_t rxdma_entries; 2344 union dp_rx_desc_list_elem_t *desc_list = NULL; 2345 union dp_rx_desc_list_elem_t *tail = NULL; 2346 struct dp_srng *dp_rxdma_srng; 2347 struct rx_desc_pool *rx_desc_pool; 2348 2349 if (wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx)) { 2350 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, 2351 "nss-wifi<4> skip Rx refil %d", pdev_id); 2352 return QDF_STATUS_SUCCESS; 2353 } 2354 2355 pdev = soc->pdev_list[pdev_id]; 2356 dp_rxdma_srng = &pdev->rx_refill_buf_ring; 2357 rxdma_entries = dp_rxdma_srng->num_entries; 2358 2359 soc->process_rx_status = CONFIG_PROCESS_RX_STATUS; 2360 2361 rx_desc_pool = &soc->rx_desc_buf[pdev_id]; 2362 dp_rx_desc_pool_alloc(soc, pdev_id, 2363 DP_RX_DESC_ALLOC_MULTIPLIER * rxdma_entries, 2364 rx_desc_pool); 2365 2366 rx_desc_pool->owner = DP_WBM2SW_RBM; 2367 /* For Rx buffers, WBM release ring is SW RING 3,for all pdev's */ 2368 2369 return dp_pdev_rx_buffers_attach(soc, pdev_id, dp_rxdma_srng, 2370 rx_desc_pool, rxdma_entries - 1, 2371 &desc_list, &tail); 2372 } 2373 2374 /* 2375 * dp_rx_nbuf_prepare() - prepare RX nbuf 2376 * @soc: core txrx main context 2377 * @pdev: core txrx pdev context 2378 * 2379 * This function alloc & map nbuf for RX dma usage, retry it if failed 2380 * until retry times reaches max threshold or succeeded. 2381 * 2382 * Return: qdf_nbuf_t pointer if succeeded, NULL if failed. 2383 */ 2384 qdf_nbuf_t 2385 dp_rx_nbuf_prepare(struct dp_soc *soc, struct dp_pdev *pdev) 2386 { 2387 uint8_t *buf; 2388 int32_t nbuf_retry_count; 2389 QDF_STATUS ret; 2390 qdf_nbuf_t nbuf = NULL; 2391 2392 for (nbuf_retry_count = 0; nbuf_retry_count < 2393 QDF_NBUF_ALLOC_MAP_RETRY_THRESHOLD; 2394 nbuf_retry_count++) { 2395 /* Allocate a new skb */ 2396 nbuf = qdf_nbuf_alloc(soc->osdev, 2397 RX_BUFFER_SIZE, 2398 RX_BUFFER_RESERVATION, 2399 RX_BUFFER_ALIGNMENT, 2400 FALSE); 2401 2402 if (!nbuf) { 2403 DP_STATS_INC(pdev, 2404 replenish.nbuf_alloc_fail, 1); 2405 continue; 2406 } 2407 2408 buf = qdf_nbuf_data(nbuf); 2409 2410 memset(buf, 0, RX_BUFFER_SIZE); 2411 2412 ret = qdf_nbuf_map_single(soc->osdev, nbuf, 2413 QDF_DMA_FROM_DEVICE); 2414 2415 /* nbuf map failed */ 2416 if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) { 2417 qdf_nbuf_free(nbuf); 2418 DP_STATS_INC(pdev, replenish.map_err, 1); 2419 continue; 2420 } 2421 /* qdf_nbuf alloc and map succeeded */ 2422 break; 2423 } 2424 2425 /* qdf_nbuf still alloc or map failed */ 2426 if (qdf_unlikely(nbuf_retry_count >= 2427 QDF_NBUF_ALLOC_MAP_RETRY_THRESHOLD)) 2428 return NULL; 2429 2430 return nbuf; 2431 } 2432