1 /* 2 * Copyright (c) 2016-2019 The Linux Foundation. All rights reserved. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for 5 * any purpose with or without fee is hereby granted, provided that the 6 * above copyright notice and this permission notice appear in all 7 * copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 16 * PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 #include "hal_hw_headers.h" 20 #include "dp_types.h" 21 #include "dp_rx.h" 22 #include "dp_peer.h" 23 #include "hal_rx.h" 24 #include "hal_api.h" 25 #include "qdf_nbuf.h" 26 #ifdef MESH_MODE_SUPPORT 27 #include "if_meta_hdr.h" 28 #endif 29 #include "dp_internal.h" 30 #include "dp_rx_mon.h" 31 32 #ifdef RX_DESC_DEBUG_CHECK 33 static inline void dp_rx_desc_prep(struct dp_rx_desc *rx_desc, qdf_nbuf_t nbuf) 34 { 35 rx_desc->magic = DP_RX_DESC_MAGIC; 36 rx_desc->nbuf = nbuf; 37 } 38 #else 39 static inline void dp_rx_desc_prep(struct dp_rx_desc *rx_desc, qdf_nbuf_t nbuf) 40 { 41 rx_desc->nbuf = nbuf; 42 } 43 #endif 44 45 #ifdef CONFIG_WIN 46 static inline bool dp_rx_check_ap_bridge(struct dp_vdev *vdev) 47 { 48 return vdev->ap_bridge_enabled; 49 } 50 #else 51 static inline bool dp_rx_check_ap_bridge(struct dp_vdev *vdev) 52 { 53 if (vdev->opmode != wlan_op_mode_sta) 54 return true; 55 else 56 return false; 57 } 58 #endif 59 60 /* 61 * dp_rx_dump_info_and_assert() - dump RX Ring info and Rx Desc info 62 * 63 * @soc: core txrx main context 64 * @hal_ring: opaque pointer to the HAL Rx Ring, which will be serviced 65 * @ring_desc: opaque pointer to the RX ring descriptor 66 * @rx_desc: host rs descriptor 67 * 68 * Return: void 69 */ 70 void dp_rx_dump_info_and_assert(struct dp_soc *soc, void *hal_ring, 71 void *ring_desc, struct dp_rx_desc *rx_desc) 72 { 73 void *hal_soc = soc->hal_soc; 74 75 dp_rx_desc_dump(rx_desc); 76 hal_srng_dump_ring_desc(hal_soc, hal_ring, ring_desc); 77 hal_srng_dump_ring(hal_soc, hal_ring); 78 qdf_assert_always(rx_desc->in_use); 79 } 80 81 /* 82 * dp_rx_buffers_replenish() - replenish rxdma ring with rx nbufs 83 * called during dp rx initialization 84 * and at the end of dp_rx_process. 85 * 86 * @soc: core txrx main context 87 * @mac_id: mac_id which is one of 3 mac_ids 88 * @dp_rxdma_srng: dp rxdma circular ring 89 * @rx_desc_pool: Pointer to free Rx descriptor pool 90 * @num_req_buffers: number of buffer to be replenished 91 * @desc_list: list of descs if called from dp_rx_process 92 * or NULL during dp rx initialization or out of buffer 93 * interrupt. 94 * @tail: tail of descs list 95 * Return: return success or failure 96 */ 97 QDF_STATUS dp_rx_buffers_replenish(struct dp_soc *dp_soc, uint32_t mac_id, 98 struct dp_srng *dp_rxdma_srng, 99 struct rx_desc_pool *rx_desc_pool, 100 uint32_t num_req_buffers, 101 union dp_rx_desc_list_elem_t **desc_list, 102 union dp_rx_desc_list_elem_t **tail) 103 { 104 uint32_t num_alloc_desc; 105 uint16_t num_desc_to_free = 0; 106 struct dp_pdev *dp_pdev = dp_get_pdev_for_mac_id(dp_soc, mac_id); 107 uint32_t num_entries_avail; 108 uint32_t count; 109 int sync_hw_ptr = 1; 110 qdf_dma_addr_t paddr; 111 qdf_nbuf_t rx_netbuf; 112 void *rxdma_ring_entry; 113 union dp_rx_desc_list_elem_t *next; 114 QDF_STATUS ret; 115 116 void *rxdma_srng; 117 118 rxdma_srng = dp_rxdma_srng->hal_srng; 119 120 if (!rxdma_srng) { 121 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 122 "rxdma srng not initialized"); 123 DP_STATS_INC(dp_pdev, replenish.rxdma_err, num_req_buffers); 124 return QDF_STATUS_E_FAILURE; 125 } 126 127 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 128 "requested %d buffers for replenish", num_req_buffers); 129 130 hal_srng_access_start(dp_soc->hal_soc, rxdma_srng); 131 num_entries_avail = hal_srng_src_num_avail(dp_soc->hal_soc, 132 rxdma_srng, 133 sync_hw_ptr); 134 135 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 136 "no of available entries in rxdma ring: %d", 137 num_entries_avail); 138 139 if (!(*desc_list) && (num_entries_avail > 140 ((dp_rxdma_srng->num_entries * 3) / 4))) { 141 num_req_buffers = num_entries_avail; 142 } else if (num_entries_avail < num_req_buffers) { 143 num_desc_to_free = num_req_buffers - num_entries_avail; 144 num_req_buffers = num_entries_avail; 145 } 146 147 if (qdf_unlikely(!num_req_buffers)) { 148 num_desc_to_free = num_req_buffers; 149 hal_srng_access_end(dp_soc->hal_soc, rxdma_srng); 150 goto free_descs; 151 } 152 153 /* 154 * if desc_list is NULL, allocate the descs from freelist 155 */ 156 if (!(*desc_list)) { 157 num_alloc_desc = dp_rx_get_free_desc_list(dp_soc, mac_id, 158 rx_desc_pool, 159 num_req_buffers, 160 desc_list, 161 tail); 162 163 if (!num_alloc_desc) { 164 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 165 "no free rx_descs in freelist"); 166 DP_STATS_INC(dp_pdev, err.desc_alloc_fail, 167 num_req_buffers); 168 hal_srng_access_end(dp_soc->hal_soc, rxdma_srng); 169 return QDF_STATUS_E_NOMEM; 170 } 171 172 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 173 "%d rx desc allocated", num_alloc_desc); 174 num_req_buffers = num_alloc_desc; 175 } 176 177 178 count = 0; 179 180 while (count < num_req_buffers) { 181 rx_netbuf = qdf_nbuf_alloc(dp_soc->osdev, 182 RX_BUFFER_SIZE, 183 RX_BUFFER_RESERVATION, 184 RX_BUFFER_ALIGNMENT, 185 FALSE); 186 187 if (rx_netbuf == NULL) { 188 DP_STATS_INC(dp_pdev, replenish.nbuf_alloc_fail, 1); 189 continue; 190 } 191 192 ret = qdf_nbuf_map_single(dp_soc->osdev, rx_netbuf, 193 QDF_DMA_BIDIRECTIONAL); 194 if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) { 195 qdf_nbuf_free(rx_netbuf); 196 DP_STATS_INC(dp_pdev, replenish.map_err, 1); 197 continue; 198 } 199 200 paddr = qdf_nbuf_get_frag_paddr(rx_netbuf, 0); 201 202 /* 203 * check if the physical address of nbuf->data is 204 * less then 0x50000000 then free the nbuf and try 205 * allocating new nbuf. We can try for 100 times. 206 * this is a temp WAR till we fix it properly. 207 */ 208 ret = check_x86_paddr(dp_soc, &rx_netbuf, &paddr, dp_pdev); 209 if (ret == QDF_STATUS_E_FAILURE) { 210 DP_STATS_INC(dp_pdev, replenish.x86_fail, 1); 211 break; 212 } 213 214 count++; 215 216 rxdma_ring_entry = hal_srng_src_get_next(dp_soc->hal_soc, 217 rxdma_srng); 218 qdf_assert_always(rxdma_ring_entry); 219 220 next = (*desc_list)->next; 221 222 dp_rx_desc_prep(&((*desc_list)->rx_desc), rx_netbuf); 223 (*desc_list)->rx_desc.in_use = 1; 224 225 dp_debug("rx_netbuf=%pK, buf=%pK, paddr=0x%llx, cookie=%d", 226 rx_netbuf, qdf_nbuf_data(rx_netbuf), 227 (unsigned long long)paddr, 228 (*desc_list)->rx_desc.cookie); 229 230 hal_rxdma_buff_addr_info_set(rxdma_ring_entry, paddr, 231 (*desc_list)->rx_desc.cookie, 232 rx_desc_pool->owner); 233 234 *desc_list = next; 235 } 236 237 hal_srng_access_end(dp_soc->hal_soc, rxdma_srng); 238 239 dp_debug("replenished buffers %d, rx desc added back to free list %u", 240 num_req_buffers, num_desc_to_free); 241 242 DP_STATS_INC_PKT(dp_pdev, replenish.pkts, num_req_buffers, 243 (RX_BUFFER_SIZE * num_req_buffers)); 244 245 free_descs: 246 DP_STATS_INC(dp_pdev, buf_freelist, num_desc_to_free); 247 /* 248 * add any available free desc back to the free list 249 */ 250 if (*desc_list) 251 dp_rx_add_desc_list_to_free_list(dp_soc, desc_list, tail, 252 mac_id, rx_desc_pool); 253 254 return QDF_STATUS_SUCCESS; 255 } 256 257 /* 258 * dp_rx_deliver_raw() - process RAW mode pkts and hand over the 259 * pkts to RAW mode simulation to 260 * decapsulate the pkt. 261 * 262 * @vdev: vdev on which RAW mode is enabled 263 * @nbuf_list: list of RAW pkts to process 264 * @peer: peer object from which the pkt is rx 265 * 266 * Return: void 267 */ 268 void 269 dp_rx_deliver_raw(struct dp_vdev *vdev, qdf_nbuf_t nbuf_list, 270 struct dp_peer *peer) 271 { 272 qdf_nbuf_t deliver_list_head = NULL; 273 qdf_nbuf_t deliver_list_tail = NULL; 274 qdf_nbuf_t nbuf; 275 276 nbuf = nbuf_list; 277 while (nbuf) { 278 qdf_nbuf_t next = qdf_nbuf_next(nbuf); 279 280 DP_RX_LIST_APPEND(deliver_list_head, deliver_list_tail, nbuf); 281 282 DP_STATS_INC(vdev->pdev, rx_raw_pkts, 1); 283 DP_STATS_INC_PKT(peer, rx.raw, 1, qdf_nbuf_len(nbuf)); 284 /* 285 * reset the chfrag_start and chfrag_end bits in nbuf cb 286 * as this is a non-amsdu pkt and RAW mode simulation expects 287 * these bit s to be 0 for non-amsdu pkt. 288 */ 289 if (qdf_nbuf_is_rx_chfrag_start(nbuf) && 290 qdf_nbuf_is_rx_chfrag_end(nbuf)) { 291 qdf_nbuf_set_rx_chfrag_start(nbuf, 0); 292 qdf_nbuf_set_rx_chfrag_end(nbuf, 0); 293 } 294 295 nbuf = next; 296 } 297 298 vdev->osif_rsim_rx_decap(vdev->osif_vdev, &deliver_list_head, 299 &deliver_list_tail, (struct cdp_peer*) peer); 300 301 vdev->osif_rx(vdev->osif_vdev, deliver_list_head); 302 } 303 304 305 #ifdef DP_LFR 306 /* 307 * In case of LFR, data of a new peer might be sent up 308 * even before peer is added. 309 */ 310 static inline struct dp_vdev * 311 dp_get_vdev_from_peer(struct dp_soc *soc, 312 uint16_t peer_id, 313 struct dp_peer *peer, 314 struct hal_rx_mpdu_desc_info mpdu_desc_info) 315 { 316 struct dp_vdev *vdev; 317 uint8_t vdev_id; 318 319 if (unlikely(!peer)) { 320 if (peer_id != HTT_INVALID_PEER) { 321 vdev_id = DP_PEER_METADATA_ID_GET( 322 mpdu_desc_info.peer_meta_data); 323 QDF_TRACE(QDF_MODULE_ID_DP, 324 QDF_TRACE_LEVEL_DEBUG, 325 FL("PeerID %d not found use vdevID %d"), 326 peer_id, vdev_id); 327 vdev = dp_get_vdev_from_soc_vdev_id_wifi3(soc, 328 vdev_id); 329 } else { 330 QDF_TRACE(QDF_MODULE_ID_DP, 331 QDF_TRACE_LEVEL_DEBUG, 332 FL("Invalid PeerID %d"), 333 peer_id); 334 return NULL; 335 } 336 } else { 337 vdev = peer->vdev; 338 } 339 return vdev; 340 } 341 #else 342 static inline struct dp_vdev * 343 dp_get_vdev_from_peer(struct dp_soc *soc, 344 uint16_t peer_id, 345 struct dp_peer *peer, 346 struct hal_rx_mpdu_desc_info mpdu_desc_info) 347 { 348 if (unlikely(!peer)) { 349 QDF_TRACE(QDF_MODULE_ID_DP, 350 QDF_TRACE_LEVEL_DEBUG, 351 FL("Peer not found for peerID %d"), 352 peer_id); 353 return NULL; 354 } else { 355 return peer->vdev; 356 } 357 } 358 #endif 359 360 /** 361 * dp_rx_da_learn() - Add AST entry based on DA lookup 362 * This is a WAR for HK 1.0 and will 363 * be removed in HK 2.0 364 * 365 * @soc: core txrx main context 366 * @rx_tlv_hdr : start address of rx tlvs 367 * @ta_peer : Transmitter peer entry 368 * @nbuf : nbuf to retrieve destination mac for which AST will be added 369 * 370 */ 371 #ifdef FEATURE_WDS 372 static void 373 dp_rx_da_learn(struct dp_soc *soc, 374 uint8_t *rx_tlv_hdr, 375 struct dp_peer *ta_peer, 376 qdf_nbuf_t nbuf) 377 { 378 /* For HKv2 DA port learing is not needed */ 379 if (qdf_likely(soc->ast_override_support)) 380 return; 381 382 if (qdf_unlikely(!ta_peer)) 383 return; 384 385 if (qdf_unlikely(ta_peer->vdev->opmode != wlan_op_mode_ap)) 386 return; 387 388 if (!soc->da_war_enabled) 389 return; 390 391 if (qdf_unlikely(!hal_rx_msdu_end_da_is_valid_get(rx_tlv_hdr) && 392 !hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr))) { 393 dp_peer_add_ast(soc, 394 ta_peer, 395 qdf_nbuf_data(nbuf), 396 CDP_TXRX_AST_TYPE_DA, 397 IEEE80211_NODE_F_WDS_HM); 398 } 399 } 400 #else 401 static void 402 dp_rx_da_learn(struct dp_soc *soc, 403 uint8_t *rx_tlv_hdr, 404 struct dp_peer *ta_peer, 405 qdf_nbuf_t nbuf) 406 { 407 } 408 #endif 409 410 /** 411 * dp_rx_intrabss_fwd() - Implements the Intra-BSS forwarding logic 412 * 413 * @soc: core txrx main context 414 * @ta_peer : source peer entry 415 * @rx_tlv_hdr : start address of rx tlvs 416 * @nbuf : nbuf that has to be intrabss forwarded 417 * 418 * Return: bool: true if it is forwarded else false 419 */ 420 static bool 421 dp_rx_intrabss_fwd(struct dp_soc *soc, 422 struct dp_peer *ta_peer, 423 uint8_t *rx_tlv_hdr, 424 qdf_nbuf_t nbuf) 425 { 426 uint16_t da_idx; 427 uint16_t len; 428 struct dp_peer *da_peer; 429 struct dp_ast_entry *ast_entry; 430 qdf_nbuf_t nbuf_copy; 431 432 /* check if the destination peer is available in peer table 433 * and also check if the source peer and destination peer 434 * belong to the same vap and destination peer is not bss peer. 435 */ 436 437 if ((hal_rx_msdu_end_da_is_valid_get(rx_tlv_hdr) && 438 !hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr))) { 439 da_idx = hal_rx_msdu_end_da_idx_get(soc->hal_soc, rx_tlv_hdr); 440 441 ast_entry = soc->ast_table[da_idx]; 442 if (!ast_entry) 443 return false; 444 445 if (ast_entry->type == CDP_TXRX_AST_TYPE_DA) { 446 ast_entry->is_active = TRUE; 447 return false; 448 } 449 450 da_peer = ast_entry->peer; 451 452 if (!da_peer) 453 return false; 454 /* TA peer cannot be same as peer(DA) on which AST is present 455 * this indicates a change in topology and that AST entries 456 * are yet to be updated. 457 */ 458 if (da_peer == ta_peer) 459 return false; 460 461 if (da_peer->vdev == ta_peer->vdev && !da_peer->bss_peer) { 462 memset(nbuf->cb, 0x0, sizeof(nbuf->cb)); 463 len = qdf_nbuf_len(nbuf); 464 465 /* linearize the nbuf just before we send to 466 * dp_tx_send() 467 */ 468 if (qdf_unlikely(qdf_nbuf_get_ext_list(nbuf))) { 469 if (qdf_nbuf_linearize(nbuf) == -ENOMEM) 470 return false; 471 472 nbuf = qdf_nbuf_unshare(nbuf); 473 if (!nbuf) { 474 DP_STATS_INC_PKT(ta_peer, 475 rx.intra_bss.fail, 476 1, 477 len); 478 /* return true even though the pkt is 479 * not forwarded. Basically skb_unshare 480 * failed and we want to continue with 481 * next nbuf. 482 */ 483 return true; 484 } 485 } 486 487 if (!dp_tx_send(ta_peer->vdev, nbuf)) { 488 DP_STATS_INC_PKT(ta_peer, rx.intra_bss.pkts, 1, 489 len); 490 return true; 491 } else { 492 DP_STATS_INC_PKT(ta_peer, rx.intra_bss.fail, 1, 493 len); 494 return false; 495 } 496 } 497 } 498 /* if it is a broadcast pkt (eg: ARP) and it is not its own 499 * source, then clone the pkt and send the cloned pkt for 500 * intra BSS forwarding and original pkt up the network stack 501 * Note: how do we handle multicast pkts. do we forward 502 * all multicast pkts as is or let a higher layer module 503 * like igmpsnoop decide whether to forward or not with 504 * Mcast enhancement. 505 */ 506 else if (qdf_unlikely((hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr) && 507 !ta_peer->bss_peer))) { 508 nbuf_copy = qdf_nbuf_copy(nbuf); 509 if (!nbuf_copy) 510 return false; 511 memset(nbuf_copy->cb, 0x0, sizeof(nbuf_copy->cb)); 512 len = qdf_nbuf_len(nbuf_copy); 513 514 if (dp_tx_send(ta_peer->vdev, nbuf_copy)) { 515 DP_STATS_INC_PKT(ta_peer, rx.intra_bss.fail, 1, len); 516 qdf_nbuf_free(nbuf_copy); 517 } else { 518 DP_STATS_INC_PKT(ta_peer, rx.intra_bss.pkts, 1, len); 519 } 520 } 521 /* return false as we have to still send the original pkt 522 * up the stack 523 */ 524 return false; 525 } 526 527 #ifdef MESH_MODE_SUPPORT 528 529 /** 530 * dp_rx_fill_mesh_stats() - Fills the mesh per packet receive stats 531 * 532 * @vdev: DP Virtual device handle 533 * @nbuf: Buffer pointer 534 * @rx_tlv_hdr: start of rx tlv header 535 * @peer: pointer to peer 536 * 537 * This function allocated memory for mesh receive stats and fill the 538 * required stats. Stores the memory address in skb cb. 539 * 540 * Return: void 541 */ 542 543 void dp_rx_fill_mesh_stats(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 544 uint8_t *rx_tlv_hdr, struct dp_peer *peer) 545 { 546 struct mesh_recv_hdr_s *rx_info = NULL; 547 uint32_t pkt_type; 548 uint32_t nss; 549 uint32_t rate_mcs; 550 uint32_t bw; 551 552 /* fill recv mesh stats */ 553 rx_info = qdf_mem_malloc(sizeof(struct mesh_recv_hdr_s)); 554 555 /* upper layers are resposible to free this memory */ 556 557 if (rx_info == NULL) { 558 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 559 "Memory allocation failed for mesh rx stats"); 560 DP_STATS_INC(vdev->pdev, mesh_mem_alloc, 1); 561 return; 562 } 563 564 rx_info->rs_flags = MESH_RXHDR_VER1; 565 if (qdf_nbuf_is_rx_chfrag_start(nbuf)) 566 rx_info->rs_flags |= MESH_RX_FIRST_MSDU; 567 568 if (qdf_nbuf_is_rx_chfrag_end(nbuf)) 569 rx_info->rs_flags |= MESH_RX_LAST_MSDU; 570 571 if (hal_rx_attn_msdu_get_is_decrypted(rx_tlv_hdr)) { 572 rx_info->rs_flags |= MESH_RX_DECRYPTED; 573 rx_info->rs_keyix = hal_rx_msdu_get_keyid(rx_tlv_hdr); 574 if (vdev->osif_get_key) 575 vdev->osif_get_key(vdev->osif_vdev, 576 &rx_info->rs_decryptkey[0], 577 &peer->mac_addr.raw[0], 578 rx_info->rs_keyix); 579 } 580 581 rx_info->rs_rssi = hal_rx_msdu_start_get_rssi(rx_tlv_hdr); 582 rx_info->rs_channel = hal_rx_msdu_start_get_freq(rx_tlv_hdr); 583 pkt_type = hal_rx_msdu_start_get_pkt_type(rx_tlv_hdr); 584 rate_mcs = hal_rx_msdu_start_rate_mcs_get(rx_tlv_hdr); 585 bw = hal_rx_msdu_start_bw_get(rx_tlv_hdr); 586 nss = hal_rx_msdu_start_nss_get(vdev->pdev->soc->hal_soc, rx_tlv_hdr); 587 rx_info->rs_ratephy1 = rate_mcs | (nss << 0x8) | (pkt_type << 16) | 588 (bw << 24); 589 590 qdf_nbuf_set_rx_fctx_type(nbuf, (void *)rx_info, CB_FTYPE_MESH_RX_INFO); 591 592 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_MED, 593 FL("Mesh rx stats: flags %x, rssi %x, chn %x, rate %x, kix %x"), 594 rx_info->rs_flags, 595 rx_info->rs_rssi, 596 rx_info->rs_channel, 597 rx_info->rs_ratephy1, 598 rx_info->rs_keyix); 599 600 } 601 602 /** 603 * dp_rx_filter_mesh_packets() - Filters mesh unwanted packets 604 * 605 * @vdev: DP Virtual device handle 606 * @nbuf: Buffer pointer 607 * @rx_tlv_hdr: start of rx tlv header 608 * 609 * This checks if the received packet is matching any filter out 610 * catogery and and drop the packet if it matches. 611 * 612 * Return: status(0 indicates drop, 1 indicate to no drop) 613 */ 614 615 QDF_STATUS dp_rx_filter_mesh_packets(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 616 uint8_t *rx_tlv_hdr) 617 { 618 union dp_align_mac_addr mac_addr; 619 620 if (qdf_unlikely(vdev->mesh_rx_filter)) { 621 if (vdev->mesh_rx_filter & MESH_FILTER_OUT_FROMDS) 622 if (hal_rx_mpdu_get_fr_ds(rx_tlv_hdr)) 623 return QDF_STATUS_SUCCESS; 624 625 if (vdev->mesh_rx_filter & MESH_FILTER_OUT_TODS) 626 if (hal_rx_mpdu_get_to_ds(rx_tlv_hdr)) 627 return QDF_STATUS_SUCCESS; 628 629 if (vdev->mesh_rx_filter & MESH_FILTER_OUT_NODS) 630 if (!hal_rx_mpdu_get_fr_ds(rx_tlv_hdr) 631 && !hal_rx_mpdu_get_to_ds(rx_tlv_hdr)) 632 return QDF_STATUS_SUCCESS; 633 634 if (vdev->mesh_rx_filter & MESH_FILTER_OUT_RA) { 635 if (hal_rx_mpdu_get_addr1(rx_tlv_hdr, 636 &mac_addr.raw[0])) 637 return QDF_STATUS_E_FAILURE; 638 639 if (!qdf_mem_cmp(&mac_addr.raw[0], 640 &vdev->mac_addr.raw[0], 641 DP_MAC_ADDR_LEN)) 642 return QDF_STATUS_SUCCESS; 643 } 644 645 if (vdev->mesh_rx_filter & MESH_FILTER_OUT_TA) { 646 if (hal_rx_mpdu_get_addr2(rx_tlv_hdr, 647 &mac_addr.raw[0])) 648 return QDF_STATUS_E_FAILURE; 649 650 if (!qdf_mem_cmp(&mac_addr.raw[0], 651 &vdev->mac_addr.raw[0], 652 DP_MAC_ADDR_LEN)) 653 return QDF_STATUS_SUCCESS; 654 } 655 } 656 657 return QDF_STATUS_E_FAILURE; 658 } 659 660 #else 661 void dp_rx_fill_mesh_stats(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 662 uint8_t *rx_tlv_hdr, struct dp_peer *peer) 663 { 664 } 665 666 QDF_STATUS dp_rx_filter_mesh_packets(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 667 uint8_t *rx_tlv_hdr) 668 { 669 return QDF_STATUS_E_FAILURE; 670 } 671 672 #endif 673 674 #ifdef CONFIG_WIN 675 /** 676 * dp_rx_nac_filter(): Function to perform filtering of non-associated 677 * clients 678 * @pdev: DP pdev handle 679 * @rx_pkt_hdr: Rx packet Header 680 * 681 * return: dp_vdev* 682 */ 683 static 684 struct dp_vdev *dp_rx_nac_filter(struct dp_pdev *pdev, 685 uint8_t *rx_pkt_hdr) 686 { 687 struct ieee80211_frame *wh; 688 struct dp_neighbour_peer *peer = NULL; 689 690 wh = (struct ieee80211_frame *)rx_pkt_hdr; 691 692 if ((wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) != IEEE80211_FC1_DIR_TODS) 693 return NULL; 694 695 qdf_spin_lock_bh(&pdev->neighbour_peer_mutex); 696 TAILQ_FOREACH(peer, &pdev->neighbour_peers_list, 697 neighbour_peer_list_elem) { 698 if (qdf_mem_cmp(&peer->neighbour_peers_macaddr.raw[0], 699 wh->i_addr2, DP_MAC_ADDR_LEN) == 0) { 700 QDF_TRACE( 701 QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 702 FL("NAC configuration matched for mac-%2x:%2x:%2x:%2x:%2x:%2x"), 703 peer->neighbour_peers_macaddr.raw[0], 704 peer->neighbour_peers_macaddr.raw[1], 705 peer->neighbour_peers_macaddr.raw[2], 706 peer->neighbour_peers_macaddr.raw[3], 707 peer->neighbour_peers_macaddr.raw[4], 708 peer->neighbour_peers_macaddr.raw[5]); 709 710 qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex); 711 712 return pdev->monitor_vdev; 713 } 714 } 715 qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex); 716 717 return NULL; 718 } 719 720 /** 721 * dp_rx_process_invalid_peer(): Function to pass invalid peer list to umac 722 * @soc: DP SOC handle 723 * @mpdu: mpdu for which peer is invalid 724 * 725 * return: integer type 726 */ 727 uint8_t dp_rx_process_invalid_peer(struct dp_soc *soc, qdf_nbuf_t mpdu) 728 { 729 struct dp_invalid_peer_msg msg; 730 struct dp_vdev *vdev = NULL; 731 struct dp_pdev *pdev = NULL; 732 struct ieee80211_frame *wh; 733 uint8_t i; 734 qdf_nbuf_t curr_nbuf, next_nbuf; 735 uint8_t *rx_tlv_hdr = qdf_nbuf_data(mpdu); 736 uint8_t *rx_pkt_hdr = hal_rx_pkt_hdr_get(rx_tlv_hdr); 737 738 wh = (struct ieee80211_frame *)rx_pkt_hdr; 739 740 if (!DP_FRAME_IS_DATA(wh)) { 741 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 742 "NAWDS valid only for data frames"); 743 goto free; 744 } 745 746 if (qdf_nbuf_len(mpdu) < sizeof(struct ieee80211_frame)) { 747 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 748 "Invalid nbuf length"); 749 goto free; 750 } 751 752 753 for (i = 0; i < MAX_PDEV_CNT; i++) { 754 pdev = soc->pdev_list[i]; 755 if (!pdev) { 756 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 757 "PDEV not found"); 758 continue; 759 } 760 761 if (pdev->filter_neighbour_peers) { 762 /* Next Hop scenario not yet handle */ 763 vdev = dp_rx_nac_filter(pdev, rx_pkt_hdr); 764 if (vdev) { 765 dp_rx_mon_deliver(soc, i, 766 pdev->invalid_peer_head_msdu, 767 pdev->invalid_peer_tail_msdu); 768 769 pdev->invalid_peer_head_msdu = NULL; 770 pdev->invalid_peer_tail_msdu = NULL; 771 772 return 0; 773 } 774 } 775 776 777 TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) { 778 779 if (qdf_mem_cmp(wh->i_addr1, vdev->mac_addr.raw, 780 DP_MAC_ADDR_LEN) == 0) { 781 goto out; 782 } 783 } 784 } 785 786 if (!vdev) { 787 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 788 "VDEV not found"); 789 goto free; 790 } 791 792 out: 793 msg.wh = wh; 794 qdf_nbuf_pull_head(mpdu, RX_PKT_TLVS_LEN); 795 msg.nbuf = mpdu; 796 msg.vdev_id = vdev->vdev_id; 797 if (pdev->soc->cdp_soc.ol_ops->rx_invalid_peer) 798 pdev->soc->cdp_soc.ol_ops->rx_invalid_peer(pdev->ctrl_pdev, 799 &msg); 800 801 free: 802 /* Drop and free packet */ 803 curr_nbuf = mpdu; 804 while (curr_nbuf) { 805 next_nbuf = qdf_nbuf_next(curr_nbuf); 806 qdf_nbuf_free(curr_nbuf); 807 curr_nbuf = next_nbuf; 808 } 809 810 return 0; 811 } 812 813 /** 814 * dp_rx_process_invalid_peer_wrapper(): Function to wrap invalid peer handler 815 * @soc: DP SOC handle 816 * @mpdu: mpdu for which peer is invalid 817 * @mpdu_done: if an mpdu is completed 818 * 819 * return: integer type 820 */ 821 void dp_rx_process_invalid_peer_wrapper(struct dp_soc *soc, 822 qdf_nbuf_t mpdu, bool mpdu_done) 823 { 824 /* Only trigger the process when mpdu is completed */ 825 if (mpdu_done) 826 dp_rx_process_invalid_peer(soc, mpdu); 827 } 828 #else 829 uint8_t dp_rx_process_invalid_peer(struct dp_soc *soc, qdf_nbuf_t mpdu) 830 { 831 qdf_nbuf_t curr_nbuf, next_nbuf; 832 struct dp_pdev *pdev; 833 uint8_t i; 834 struct dp_vdev *vdev = NULL; 835 struct ieee80211_frame *wh; 836 uint8_t *rx_tlv_hdr = qdf_nbuf_data(mpdu); 837 uint8_t *rx_pkt_hdr = hal_rx_pkt_hdr_get(rx_tlv_hdr); 838 839 wh = (struct ieee80211_frame *)rx_pkt_hdr; 840 841 if (!DP_FRAME_IS_DATA(wh)) { 842 QDF_TRACE_ERROR_RL(QDF_MODULE_ID_DP, 843 "only for data frames"); 844 goto free; 845 } 846 847 if (qdf_nbuf_len(mpdu) < sizeof(struct ieee80211_frame)) { 848 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 849 "Invalid nbuf length"); 850 goto free; 851 } 852 853 for (i = 0; i < MAX_PDEV_CNT; i++) { 854 pdev = soc->pdev_list[i]; 855 if (!pdev) { 856 QDF_TRACE(QDF_MODULE_ID_DP, 857 QDF_TRACE_LEVEL_ERROR, 858 "PDEV not found"); 859 continue; 860 } 861 862 qdf_spin_lock_bh(&pdev->vdev_list_lock); 863 DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) { 864 if (qdf_mem_cmp(wh->i_addr1, vdev->mac_addr.raw, 865 DP_MAC_ADDR_LEN) == 0) { 866 qdf_spin_unlock_bh(&pdev->vdev_list_lock); 867 goto out; 868 } 869 } 870 qdf_spin_unlock_bh(&pdev->vdev_list_lock); 871 } 872 873 if (NULL == vdev) { 874 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 875 "VDEV not found"); 876 goto free; 877 } 878 879 out: 880 if (soc->cdp_soc.ol_ops->rx_invalid_peer) 881 soc->cdp_soc.ol_ops->rx_invalid_peer(vdev->vdev_id, wh); 882 free: 883 /* reset the head and tail pointers */ 884 for (i = 0; i < MAX_PDEV_CNT; i++) { 885 pdev = soc->pdev_list[i]; 886 if (!pdev) { 887 QDF_TRACE(QDF_MODULE_ID_DP, 888 QDF_TRACE_LEVEL_ERROR, 889 "PDEV not found"); 890 continue; 891 } 892 893 pdev->invalid_peer_head_msdu = NULL; 894 pdev->invalid_peer_tail_msdu = NULL; 895 } 896 897 /* Drop and free packet */ 898 curr_nbuf = mpdu; 899 while (curr_nbuf) { 900 next_nbuf = qdf_nbuf_next(curr_nbuf); 901 qdf_nbuf_free(curr_nbuf); 902 curr_nbuf = next_nbuf; 903 } 904 905 return 0; 906 } 907 908 void dp_rx_process_invalid_peer_wrapper(struct dp_soc *soc, 909 qdf_nbuf_t mpdu, bool mpdu_done) 910 { 911 /* Process the nbuf */ 912 dp_rx_process_invalid_peer(soc, mpdu); 913 } 914 #endif 915 916 #ifdef RECEIVE_OFFLOAD 917 /** 918 * dp_rx_print_offload_info() - Print offload info from RX TLV 919 * @rx_tlv: RX TLV for which offload information is to be printed 920 * 921 * Return: None 922 */ 923 static void dp_rx_print_offload_info(uint8_t *rx_tlv) 924 { 925 dp_debug("----------------------RX DESC LRO/GRO----------------------"); 926 dp_debug("lro_eligible 0x%x", HAL_RX_TLV_GET_LRO_ELIGIBLE(rx_tlv)); 927 dp_debug("pure_ack 0x%x", HAL_RX_TLV_GET_TCP_PURE_ACK(rx_tlv)); 928 dp_debug("chksum 0x%x", HAL_RX_TLV_GET_TCP_CHKSUM(rx_tlv)); 929 dp_debug("TCP seq num 0x%x", HAL_RX_TLV_GET_TCP_SEQ(rx_tlv)); 930 dp_debug("TCP ack num 0x%x", HAL_RX_TLV_GET_TCP_ACK(rx_tlv)); 931 dp_debug("TCP window 0x%x", HAL_RX_TLV_GET_TCP_WIN(rx_tlv)); 932 dp_debug("TCP protocol 0x%x", HAL_RX_TLV_GET_TCP_PROTO(rx_tlv)); 933 dp_debug("TCP offset 0x%x", HAL_RX_TLV_GET_TCP_OFFSET(rx_tlv)); 934 dp_debug("toeplitz 0x%x", HAL_RX_TLV_GET_FLOW_ID_TOEPLITZ(rx_tlv)); 935 dp_debug("---------------------------------------------------------"); 936 } 937 938 /** 939 * dp_rx_fill_gro_info() - Fill GRO info from RX TLV into skb->cb 940 * @soc: DP SOC handle 941 * @rx_tlv: RX TLV received for the msdu 942 * @msdu: msdu for which GRO info needs to be filled 943 * 944 * Return: None 945 */ 946 static 947 void dp_rx_fill_gro_info(struct dp_soc *soc, uint8_t *rx_tlv, 948 qdf_nbuf_t msdu) 949 { 950 if (!wlan_cfg_is_gro_enabled(soc->wlan_cfg_ctx)) 951 return; 952 953 /* Filling up RX offload info only for TCP packets */ 954 if (!HAL_RX_TLV_GET_TCP_PROTO(rx_tlv)) 955 return; 956 957 QDF_NBUF_CB_RX_LRO_ELIGIBLE(msdu) = 958 HAL_RX_TLV_GET_LRO_ELIGIBLE(rx_tlv); 959 QDF_NBUF_CB_RX_TCP_PURE_ACK(msdu) = 960 HAL_RX_TLV_GET_TCP_PURE_ACK(rx_tlv); 961 QDF_NBUF_CB_RX_TCP_CHKSUM(msdu) = 962 HAL_RX_TLV_GET_TCP_CHKSUM(rx_tlv); 963 QDF_NBUF_CB_RX_TCP_SEQ_NUM(msdu) = 964 HAL_RX_TLV_GET_TCP_SEQ(rx_tlv); 965 QDF_NBUF_CB_RX_TCP_ACK_NUM(msdu) = 966 HAL_RX_TLV_GET_TCP_ACK(rx_tlv); 967 QDF_NBUF_CB_RX_TCP_WIN(msdu) = 968 HAL_RX_TLV_GET_TCP_WIN(rx_tlv); 969 QDF_NBUF_CB_RX_TCP_PROTO(msdu) = 970 HAL_RX_TLV_GET_TCP_PROTO(rx_tlv); 971 QDF_NBUF_CB_RX_IPV6_PROTO(msdu) = 972 HAL_RX_TLV_GET_IPV6(rx_tlv); 973 QDF_NBUF_CB_RX_TCP_OFFSET(msdu) = 974 HAL_RX_TLV_GET_TCP_OFFSET(rx_tlv); 975 QDF_NBUF_CB_RX_FLOW_ID(msdu) = 976 HAL_RX_TLV_GET_FLOW_ID_TOEPLITZ(rx_tlv); 977 978 dp_rx_print_offload_info(rx_tlv); 979 } 980 #else 981 static void dp_rx_fill_gro_info(struct dp_soc *soc, uint8_t *rx_tlv, 982 qdf_nbuf_t msdu) 983 { 984 } 985 #endif /* RECEIVE_OFFLOAD */ 986 987 /** 988 * dp_rx_adjust_nbuf_len() - set appropriate msdu length in nbuf. 989 * 990 * @nbuf: pointer to msdu. 991 * @mpdu_len: mpdu length 992 * 993 * Return: returns true if nbuf is last msdu of mpdu else retuns false. 994 */ 995 static inline bool dp_rx_adjust_nbuf_len(qdf_nbuf_t nbuf, uint16_t *mpdu_len) 996 { 997 bool last_nbuf; 998 999 if (*mpdu_len > (RX_BUFFER_SIZE - RX_PKT_TLVS_LEN)) { 1000 qdf_nbuf_set_pktlen(nbuf, RX_BUFFER_SIZE); 1001 last_nbuf = false; 1002 } else { 1003 qdf_nbuf_set_pktlen(nbuf, (*mpdu_len + RX_PKT_TLVS_LEN)); 1004 last_nbuf = true; 1005 } 1006 1007 *mpdu_len -= (RX_BUFFER_SIZE - RX_PKT_TLVS_LEN); 1008 1009 return last_nbuf; 1010 } 1011 1012 /** 1013 * dp_rx_sg_create() - create a frag_list for MSDUs which are spread across 1014 * multiple nbufs. 1015 * @nbuf: pointer to the first msdu of an amsdu. 1016 * @rx_tlv_hdr: pointer to the start of RX TLV headers. 1017 * 1018 * 1019 * This function implements the creation of RX frag_list for cases 1020 * where an MSDU is spread across multiple nbufs. 1021 * 1022 * Return: returns the head nbuf which contains complete frag_list. 1023 */ 1024 qdf_nbuf_t dp_rx_sg_create(qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr) 1025 { 1026 qdf_nbuf_t parent, next, frag_list; 1027 uint16_t frag_list_len = 0; 1028 uint16_t mpdu_len; 1029 bool last_nbuf; 1030 1031 mpdu_len = hal_rx_msdu_start_msdu_len_get(rx_tlv_hdr); 1032 /* 1033 * this is a case where the complete msdu fits in one single nbuf. 1034 * in this case HW sets both start and end bit and we only need to 1035 * reset these bits for RAW mode simulator to decap the pkt 1036 */ 1037 if (qdf_nbuf_is_rx_chfrag_start(nbuf) && 1038 qdf_nbuf_is_rx_chfrag_end(nbuf)) { 1039 qdf_nbuf_set_pktlen(nbuf, mpdu_len + RX_PKT_TLVS_LEN); 1040 qdf_nbuf_pull_head(nbuf, RX_PKT_TLVS_LEN); 1041 return nbuf; 1042 } 1043 1044 /* 1045 * This is a case where we have multiple msdus (A-MSDU) spread across 1046 * multiple nbufs. here we create a fraglist out of these nbufs. 1047 * 1048 * the moment we encounter a nbuf with continuation bit set we 1049 * know for sure we have an MSDU which is spread across multiple 1050 * nbufs. We loop through and reap nbufs till we reach last nbuf. 1051 */ 1052 parent = nbuf; 1053 frag_list = nbuf->next; 1054 nbuf = nbuf->next; 1055 1056 /* 1057 * set the start bit in the first nbuf we encounter with continuation 1058 * bit set. This has the proper mpdu length set as it is the first 1059 * msdu of the mpdu. this becomes the parent nbuf and the subsequent 1060 * nbufs will form the frag_list of the parent nbuf. 1061 */ 1062 qdf_nbuf_set_rx_chfrag_start(parent, 1); 1063 last_nbuf = dp_rx_adjust_nbuf_len(parent, &mpdu_len); 1064 1065 /* 1066 * this is where we set the length of the fragments which are 1067 * associated to the parent nbuf. We iterate through the frag_list 1068 * till we hit the last_nbuf of the list. 1069 */ 1070 do { 1071 last_nbuf = dp_rx_adjust_nbuf_len(nbuf, &mpdu_len); 1072 qdf_nbuf_pull_head(nbuf, RX_PKT_TLVS_LEN); 1073 frag_list_len += qdf_nbuf_len(nbuf); 1074 1075 if (last_nbuf) { 1076 next = nbuf->next; 1077 nbuf->next = NULL; 1078 break; 1079 } 1080 1081 nbuf = nbuf->next; 1082 } while (!last_nbuf); 1083 1084 qdf_nbuf_set_rx_chfrag_start(nbuf, 0); 1085 qdf_nbuf_append_ext_list(parent, frag_list, frag_list_len); 1086 parent->next = next; 1087 1088 qdf_nbuf_pull_head(parent, RX_PKT_TLVS_LEN); 1089 return parent; 1090 } 1091 1092 static inline void dp_rx_deliver_to_stack(struct dp_vdev *vdev, 1093 struct dp_peer *peer, 1094 qdf_nbuf_t nbuf_head, 1095 qdf_nbuf_t nbuf_tail) 1096 { 1097 /* 1098 * highly unlikely to have a vdev without a registered rx 1099 * callback function. if so let us free the nbuf_list. 1100 */ 1101 if (qdf_unlikely(!vdev->osif_rx)) { 1102 qdf_nbuf_t nbuf; 1103 do { 1104 nbuf = nbuf_head; 1105 nbuf_head = nbuf_head->next; 1106 qdf_nbuf_free(nbuf); 1107 } while (nbuf_head); 1108 1109 return; 1110 } 1111 1112 if (qdf_unlikely(vdev->rx_decap_type == htt_cmn_pkt_type_raw) || 1113 (vdev->rx_decap_type == htt_cmn_pkt_type_native_wifi)) { 1114 vdev->osif_rsim_rx_decap(vdev->osif_vdev, &nbuf_head, 1115 &nbuf_tail, (struct cdp_peer *) peer); 1116 } 1117 1118 vdev->osif_rx(vdev->osif_vdev, nbuf_head); 1119 1120 } 1121 1122 /** 1123 * dp_rx_cksum_offload() - set the nbuf checksum as defined by hardware. 1124 * @nbuf: pointer to the first msdu of an amsdu. 1125 * @rx_tlv_hdr: pointer to the start of RX TLV headers. 1126 * 1127 * The ipsumed field of the skb is set based on whether HW validated the 1128 * IP/TCP/UDP checksum. 1129 * 1130 * Return: void 1131 */ 1132 static inline void dp_rx_cksum_offload(struct dp_pdev *pdev, 1133 qdf_nbuf_t nbuf, 1134 uint8_t *rx_tlv_hdr) 1135 { 1136 qdf_nbuf_rx_cksum_t cksum = {0}; 1137 bool ip_csum_err = hal_rx_attn_ip_cksum_fail_get(rx_tlv_hdr); 1138 bool tcp_udp_csum_er = hal_rx_attn_tcp_udp_cksum_fail_get(rx_tlv_hdr); 1139 1140 if (qdf_likely(!ip_csum_err && !tcp_udp_csum_er)) { 1141 cksum.l4_result = QDF_NBUF_RX_CKSUM_TCP_UDP_UNNECESSARY; 1142 qdf_nbuf_set_rx_cksum(nbuf, &cksum); 1143 } else { 1144 DP_STATS_INCC(pdev, err.ip_csum_err, 1, ip_csum_err); 1145 DP_STATS_INCC(pdev, err.tcp_udp_csum_err, 1, tcp_udp_csum_er); 1146 } 1147 } 1148 1149 /** 1150 * dp_rx_msdu_stats_update() - update per msdu stats. 1151 * @soc: core txrx main context 1152 * @nbuf: pointer to the first msdu of an amsdu. 1153 * @rx_tlv_hdr: pointer to the start of RX TLV headers. 1154 * @peer: pointer to the peer object. 1155 * @ring_id: reo dest ring number on which pkt is reaped. 1156 * 1157 * update all the per msdu stats for that nbuf. 1158 * Return: void 1159 */ 1160 static void dp_rx_msdu_stats_update(struct dp_soc *soc, 1161 qdf_nbuf_t nbuf, 1162 uint8_t *rx_tlv_hdr, 1163 struct dp_peer *peer, 1164 uint8_t ring_id) 1165 { 1166 bool is_ampdu, is_not_amsdu; 1167 uint16_t peer_id; 1168 uint32_t sgi, mcs, tid, nss, bw, reception_type, pkt_type; 1169 struct dp_vdev *vdev = peer->vdev; 1170 qdf_ether_header_t *eh; 1171 uint16_t msdu_len = qdf_nbuf_len(nbuf); 1172 1173 peer_id = DP_PEER_METADATA_PEER_ID_GET( 1174 hal_rx_mpdu_peer_meta_data_get(rx_tlv_hdr)); 1175 1176 is_not_amsdu = qdf_nbuf_is_rx_chfrag_start(nbuf) & 1177 qdf_nbuf_is_rx_chfrag_end(nbuf); 1178 1179 DP_STATS_INC_PKT(peer, rx.rcvd_reo[ring_id], 1, msdu_len); 1180 DP_STATS_INCC(peer, rx.non_amsdu_cnt, 1, is_not_amsdu); 1181 DP_STATS_INCC(peer, rx.amsdu_cnt, 1, !is_not_amsdu); 1182 1183 if (qdf_unlikely(hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr) && 1184 (vdev->rx_decap_type == htt_cmn_pkt_type_ethernet))) { 1185 eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf); 1186 DP_STATS_INC_PKT(peer, rx.multicast, 1, msdu_len); 1187 if (QDF_IS_ADDR_BROADCAST(eh->ether_dhost)) { 1188 DP_STATS_INC_PKT(peer, rx.bcast, 1, msdu_len); 1189 1190 } 1191 } 1192 1193 /* 1194 * currently we can return from here as we have similar stats 1195 * updated at per ppdu level instead of msdu level 1196 */ 1197 if (!soc->process_rx_status) 1198 return; 1199 1200 is_ampdu = hal_rx_mpdu_info_ampdu_flag_get(rx_tlv_hdr); 1201 DP_STATS_INCC(peer, rx.ampdu_cnt, 1, is_ampdu); 1202 DP_STATS_INCC(peer, rx.non_ampdu_cnt, 1, !(is_ampdu)); 1203 1204 sgi = hal_rx_msdu_start_sgi_get(rx_tlv_hdr); 1205 mcs = hal_rx_msdu_start_rate_mcs_get(rx_tlv_hdr); 1206 tid = hal_rx_mpdu_start_tid_get(soc->hal_soc, rx_tlv_hdr); 1207 bw = hal_rx_msdu_start_bw_get(rx_tlv_hdr); 1208 reception_type = hal_rx_msdu_start_reception_type_get(soc->hal_soc, 1209 rx_tlv_hdr); 1210 nss = hal_rx_msdu_start_nss_get(soc->hal_soc, rx_tlv_hdr); 1211 pkt_type = hal_rx_msdu_start_get_pkt_type(rx_tlv_hdr); 1212 1213 DP_STATS_INC(peer, rx.bw[bw], 1); 1214 DP_STATS_INC(peer, rx.nss[nss], 1); 1215 DP_STATS_INC(peer, rx.sgi_count[sgi], 1); 1216 DP_STATS_INCC(peer, rx.err.mic_err, 1, 1217 hal_rx_mpdu_end_mic_err_get(rx_tlv_hdr)); 1218 DP_STATS_INCC(peer, rx.err.decrypt_err, 1, 1219 hal_rx_mpdu_end_decrypt_err_get(rx_tlv_hdr)); 1220 1221 DP_STATS_INC(peer, rx.wme_ac_type[TID_TO_WME_AC(tid)], 1); 1222 DP_STATS_INC(peer, rx.reception_type[reception_type], 1); 1223 1224 DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1, 1225 ((mcs >= MAX_MCS_11A) && (pkt_type == DOT11_A))); 1226 DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[mcs], 1, 1227 ((mcs <= MAX_MCS_11A) && (pkt_type == DOT11_A))); 1228 DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1, 1229 ((mcs >= MAX_MCS_11B) && (pkt_type == DOT11_B))); 1230 DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[mcs], 1, 1231 ((mcs <= MAX_MCS_11B) && (pkt_type == DOT11_B))); 1232 DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1, 1233 ((mcs >= MAX_MCS_11A) && (pkt_type == DOT11_N))); 1234 DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[mcs], 1, 1235 ((mcs <= MAX_MCS_11A) && (pkt_type == DOT11_N))); 1236 DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1, 1237 ((mcs >= MAX_MCS_11AC) && (pkt_type == DOT11_AC))); 1238 DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[mcs], 1, 1239 ((mcs <= MAX_MCS_11AC) && (pkt_type == DOT11_AC))); 1240 DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1, 1241 ((mcs >= MAX_MCS) && (pkt_type == DOT11_AX))); 1242 DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[mcs], 1, 1243 ((mcs < MAX_MCS) && (pkt_type == DOT11_AX))); 1244 1245 if ((soc->process_rx_status) && 1246 hal_rx_attn_first_mpdu_get(rx_tlv_hdr)) { 1247 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE 1248 if (!vdev->pdev) 1249 return; 1250 1251 dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, vdev->pdev->soc, 1252 &peer->stats, peer_id, 1253 UPDATE_PEER_STATS, 1254 vdev->pdev->pdev_id); 1255 #endif 1256 1257 } 1258 } 1259 1260 static inline bool is_sa_da_idx_valid(struct dp_soc *soc, 1261 void *rx_tlv_hdr) 1262 { 1263 if ((hal_rx_msdu_end_sa_is_valid_get(rx_tlv_hdr) && 1264 (hal_rx_msdu_end_sa_idx_get(rx_tlv_hdr) > 1265 wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx))) || 1266 (hal_rx_msdu_end_da_is_valid_get(rx_tlv_hdr) && 1267 (hal_rx_msdu_end_da_idx_get(soc->hal_soc, 1268 rx_tlv_hdr) > 1269 wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx)))) 1270 return false; 1271 1272 return true; 1273 } 1274 1275 #ifdef WDS_VENDOR_EXTENSION 1276 int dp_wds_rx_policy_check( 1277 uint8_t *rx_tlv_hdr, 1278 struct dp_vdev *vdev, 1279 struct dp_peer *peer, 1280 int rx_mcast 1281 ) 1282 { 1283 struct dp_peer *bss_peer; 1284 int fr_ds, to_ds, rx_3addr, rx_4addr; 1285 int rx_policy_ucast, rx_policy_mcast; 1286 1287 if (vdev->opmode == wlan_op_mode_ap) { 1288 TAILQ_FOREACH(bss_peer, &vdev->peer_list, peer_list_elem) { 1289 if (bss_peer->bss_peer) { 1290 /* if wds policy check is not enabled on this vdev, accept all frames */ 1291 if (!bss_peer->wds_ecm.wds_rx_filter) { 1292 return 1; 1293 } 1294 break; 1295 } 1296 } 1297 rx_policy_ucast = bss_peer->wds_ecm.wds_rx_ucast_4addr; 1298 rx_policy_mcast = bss_peer->wds_ecm.wds_rx_mcast_4addr; 1299 } else { /* sta mode */ 1300 if (!peer->wds_ecm.wds_rx_filter) { 1301 return 1; 1302 } 1303 rx_policy_ucast = peer->wds_ecm.wds_rx_ucast_4addr; 1304 rx_policy_mcast = peer->wds_ecm.wds_rx_mcast_4addr; 1305 } 1306 1307 /* ------------------------------------------------ 1308 * self 1309 * peer- rx rx- 1310 * wds ucast mcast dir policy accept note 1311 * ------------------------------------------------ 1312 * 1 1 0 11 x1 1 AP configured to accept ds-to-ds Rx ucast from wds peers, constraint met; so, accept 1313 * 1 1 0 01 x1 0 AP configured to accept ds-to-ds Rx ucast from wds peers, constraint not met; so, drop 1314 * 1 1 0 10 x1 0 AP configured to accept ds-to-ds Rx ucast from wds peers, constraint not met; so, drop 1315 * 1 1 0 00 x1 0 bad frame, won't see it 1316 * 1 0 1 11 1x 1 AP configured to accept ds-to-ds Rx mcast from wds peers, constraint met; so, accept 1317 * 1 0 1 01 1x 0 AP configured to accept ds-to-ds Rx mcast from wds peers, constraint not met; so, drop 1318 * 1 0 1 10 1x 0 AP configured to accept ds-to-ds Rx mcast from wds peers, constraint not met; so, drop 1319 * 1 0 1 00 1x 0 bad frame, won't see it 1320 * 1 1 0 11 x0 0 AP configured to accept from-ds Rx ucast from wds peers, constraint not met; so, drop 1321 * 1 1 0 01 x0 0 AP configured to accept from-ds Rx ucast from wds peers, constraint not met; so, drop 1322 * 1 1 0 10 x0 1 AP configured to accept from-ds Rx ucast from wds peers, constraint met; so, accept 1323 * 1 1 0 00 x0 0 bad frame, won't see it 1324 * 1 0 1 11 0x 0 AP configured to accept from-ds Rx mcast from wds peers, constraint not met; so, drop 1325 * 1 0 1 01 0x 0 AP configured to accept from-ds Rx mcast from wds peers, constraint not met; so, drop 1326 * 1 0 1 10 0x 1 AP configured to accept from-ds Rx mcast from wds peers, constraint met; so, accept 1327 * 1 0 1 00 0x 0 bad frame, won't see it 1328 * 1329 * 0 x x 11 xx 0 we only accept td-ds Rx frames from non-wds peers in mode. 1330 * 0 x x 01 xx 1 1331 * 0 x x 10 xx 0 1332 * 0 x x 00 xx 0 bad frame, won't see it 1333 * ------------------------------------------------ 1334 */ 1335 1336 fr_ds = hal_rx_mpdu_get_fr_ds(rx_tlv_hdr); 1337 to_ds = hal_rx_mpdu_get_to_ds(rx_tlv_hdr); 1338 rx_3addr = fr_ds ^ to_ds; 1339 rx_4addr = fr_ds & to_ds; 1340 1341 if (vdev->opmode == wlan_op_mode_ap) { 1342 if ((!peer->wds_enabled && rx_3addr && to_ds) || 1343 (peer->wds_enabled && !rx_mcast && (rx_4addr == rx_policy_ucast)) || 1344 (peer->wds_enabled && rx_mcast && (rx_4addr == rx_policy_mcast))) { 1345 return 1; 1346 } 1347 } else { /* sta mode */ 1348 if ((!rx_mcast && (rx_4addr == rx_policy_ucast)) || 1349 (rx_mcast && (rx_4addr == rx_policy_mcast))) { 1350 return 1; 1351 } 1352 } 1353 return 0; 1354 } 1355 #else 1356 int dp_wds_rx_policy_check( 1357 uint8_t *rx_tlv_hdr, 1358 struct dp_vdev *vdev, 1359 struct dp_peer *peer, 1360 int rx_mcast 1361 ) 1362 { 1363 return 1; 1364 } 1365 #endif 1366 1367 /** 1368 * dp_rx_process() - Brain of the Rx processing functionality 1369 * Called from the bottom half (tasklet/NET_RX_SOFTIRQ) 1370 * @soc: core txrx main context 1371 * @hal_ring: opaque pointer to the HAL Rx Ring, which will be serviced 1372 * @reo_ring_num: ring number (0, 1, 2 or 3) of the reo ring. 1373 * @quota: No. of units (packets) that can be serviced in one shot. 1374 * 1375 * This function implements the core of Rx functionality. This is 1376 * expected to handle only non-error frames. 1377 * 1378 * Return: uint32_t: No. of elements processed 1379 */ 1380 uint32_t dp_rx_process(struct dp_intr *int_ctx, void *hal_ring, 1381 uint8_t reo_ring_num, uint32_t quota) 1382 { 1383 void *hal_soc; 1384 void *ring_desc; 1385 struct dp_rx_desc *rx_desc = NULL; 1386 qdf_nbuf_t nbuf, next; 1387 union dp_rx_desc_list_elem_t *head[MAX_PDEV_CNT] = { NULL }; 1388 union dp_rx_desc_list_elem_t *tail[MAX_PDEV_CNT] = { NULL }; 1389 uint32_t rx_bufs_used = 0, rx_buf_cookie; 1390 uint32_t l2_hdr_offset = 0; 1391 uint16_t msdu_len = 0; 1392 uint16_t peer_id; 1393 struct dp_peer *peer = NULL; 1394 struct dp_vdev *vdev = NULL; 1395 uint32_t pkt_len = 0; 1396 struct hal_rx_mpdu_desc_info mpdu_desc_info = { 0 }; 1397 struct hal_rx_msdu_desc_info msdu_desc_info = { 0 }; 1398 enum hal_reo_error_status error; 1399 uint32_t peer_mdata; 1400 uint8_t *rx_tlv_hdr; 1401 uint32_t rx_bufs_reaped[MAX_PDEV_CNT] = { 0 }; 1402 uint8_t mac_id = 0; 1403 struct dp_pdev *pdev; 1404 struct dp_srng *dp_rxdma_srng; 1405 struct rx_desc_pool *rx_desc_pool; 1406 struct dp_soc *soc = int_ctx->soc; 1407 uint8_t ring_id = 0; 1408 uint8_t core_id = 0; 1409 qdf_nbuf_t nbuf_head = NULL; 1410 qdf_nbuf_t nbuf_tail = NULL; 1411 qdf_nbuf_t deliver_list_head = NULL; 1412 qdf_nbuf_t deliver_list_tail = NULL; 1413 int32_t tid = 0; 1414 uint32_t dst_num_valid = 0; 1415 1416 DP_HIST_INIT(); 1417 /* Debug -- Remove later */ 1418 qdf_assert(soc && hal_ring); 1419 1420 hal_soc = soc->hal_soc; 1421 1422 /* Debug -- Remove later */ 1423 qdf_assert(hal_soc); 1424 1425 hif_pm_runtime_mark_last_busy(soc->osdev->dev); 1426 1427 if (qdf_unlikely(hal_srng_access_start(hal_soc, hal_ring))) { 1428 1429 /* 1430 * Need API to convert from hal_ring pointer to 1431 * Ring Type / Ring Id combo 1432 */ 1433 DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1); 1434 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1435 FL("HAL RING Access Failed -- %pK"), hal_ring); 1436 hal_srng_access_end(hal_soc, hal_ring); 1437 goto done; 1438 } 1439 1440 /* 1441 * start reaping the buffers from reo ring and queue 1442 * them in per vdev queue. 1443 * Process the received pkts in a different per vdev loop. 1444 */ 1445 while (qdf_likely(quota)) { 1446 ring_desc = hal_srng_dst_get_next(hal_soc, hal_ring); 1447 1448 /* 1449 * in case HW has updated hp after we cached the hp 1450 * ring_desc can be NULL even there are entries 1451 * available in the ring. Update the cached_hp 1452 * and reap the buffers available to read complete 1453 * mpdu in one reap 1454 * 1455 * This is needed for RAW mode we have to read all 1456 * msdus corresponding to amsdu in one reap to create 1457 * SG list properly but due to mismatch in cached_hp 1458 * and actual hp sometimes we are unable to read 1459 * complete mpdu in one reap. 1460 */ 1461 if (qdf_unlikely(!ring_desc)) { 1462 dst_num_valid = hal_srng_dst_num_valid(hal_soc, 1463 hal_ring, 1464 true); 1465 if (dst_num_valid) { 1466 DP_STATS_INC(soc, rx.hp_oos, 1); 1467 hal_srng_access_end_unlocked(hal_soc, 1468 hal_ring); 1469 continue; 1470 } else { 1471 break; 1472 } 1473 } 1474 1475 error = HAL_RX_ERROR_STATUS_GET(ring_desc); 1476 ring_id = hal_srng_ring_id_get(hal_ring); 1477 1478 if (qdf_unlikely(error == HAL_REO_ERROR_DETECTED)) { 1479 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1480 FL("HAL RING 0x%pK:error %d"), hal_ring, error); 1481 DP_STATS_INC(soc, rx.err.hal_reo_error[ring_id], 1); 1482 /* Don't know how to deal with this -- assert */ 1483 qdf_assert(0); 1484 } 1485 1486 rx_buf_cookie = HAL_RX_REO_BUF_COOKIE_GET(ring_desc); 1487 1488 rx_desc = dp_rx_cookie_2_va_rxdma_buf(soc, rx_buf_cookie); 1489 qdf_assert(rx_desc); 1490 1491 /* 1492 * this is a unlikely scenario where the host is reaping 1493 * a descriptor which it already reaped just a while ago 1494 * but is yet to replenish it back to HW. 1495 * In this case host will dump the last 128 descriptors 1496 * including the software descriptor rx_desc and assert. 1497 */ 1498 if (qdf_unlikely(!rx_desc->in_use)) { 1499 DP_STATS_INC(soc, rx.err.hal_reo_dest_dup, 1); 1500 dp_rx_dump_info_and_assert(soc, hal_ring, 1501 ring_desc, rx_desc); 1502 } 1503 1504 rx_bufs_reaped[rx_desc->pool_id]++; 1505 1506 /* TODO */ 1507 /* 1508 * Need a separate API for unmapping based on 1509 * phyiscal address 1510 */ 1511 qdf_nbuf_unmap_single(soc->osdev, rx_desc->nbuf, 1512 QDF_DMA_BIDIRECTIONAL); 1513 1514 core_id = smp_processor_id(); 1515 DP_STATS_INC(soc, rx.ring_packets[core_id][ring_id], 1); 1516 1517 /* Get MPDU DESC info */ 1518 hal_rx_mpdu_desc_info_get(ring_desc, &mpdu_desc_info); 1519 1520 hal_rx_mpdu_peer_meta_data_set(qdf_nbuf_data(rx_desc->nbuf), 1521 mpdu_desc_info.peer_meta_data); 1522 1523 /* Get MSDU DESC info */ 1524 hal_rx_msdu_desc_info_get(ring_desc, &msdu_desc_info); 1525 1526 /* 1527 * save msdu flags first, last and continuation msdu in 1528 * nbuf->cb 1529 */ 1530 if (msdu_desc_info.msdu_flags & HAL_MSDU_F_FIRST_MSDU_IN_MPDU) 1531 qdf_nbuf_set_rx_chfrag_start(rx_desc->nbuf, 1); 1532 1533 if (msdu_desc_info.msdu_flags & HAL_MSDU_F_MSDU_CONTINUATION) 1534 qdf_nbuf_set_rx_chfrag_cont(rx_desc->nbuf, 1); 1535 1536 if (msdu_desc_info.msdu_flags & HAL_MSDU_F_LAST_MSDU_IN_MPDU) 1537 qdf_nbuf_set_rx_chfrag_end(rx_desc->nbuf, 1); 1538 1539 QDF_NBUF_CB_RX_CTX_ID(rx_desc->nbuf) = reo_ring_num; 1540 DP_RX_LIST_APPEND(nbuf_head, nbuf_tail, rx_desc->nbuf); 1541 1542 /* 1543 * if continuation bit is set then we have MSDU spread 1544 * across multiple buffers, let us not decrement quota 1545 * till we reap all buffers of that MSDU. 1546 */ 1547 if (qdf_likely(!qdf_nbuf_is_rx_chfrag_cont(rx_desc->nbuf))) 1548 quota -= 1; 1549 1550 1551 dp_rx_add_to_free_desc_list(&head[rx_desc->pool_id], 1552 &tail[rx_desc->pool_id], 1553 rx_desc); 1554 } 1555 done: 1556 hal_srng_access_end(hal_soc, hal_ring); 1557 1558 if (nbuf_tail) 1559 QDF_NBUF_CB_RX_FLUSH_IND(nbuf_tail) = 1; 1560 1561 /* Update histogram statistics by looping through pdev's */ 1562 DP_RX_HIST_STATS_PER_PDEV(); 1563 1564 for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) { 1565 /* 1566 * continue with next mac_id if no pkts were reaped 1567 * from that pool 1568 */ 1569 if (!rx_bufs_reaped[mac_id]) 1570 continue; 1571 1572 pdev = soc->pdev_list[mac_id]; 1573 dp_rxdma_srng = &pdev->rx_refill_buf_ring; 1574 rx_desc_pool = &soc->rx_desc_buf[mac_id]; 1575 1576 dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng, 1577 rx_desc_pool, rx_bufs_reaped[mac_id], 1578 &head[mac_id], &tail[mac_id]); 1579 } 1580 1581 /* Peer can be NULL is case of LFR */ 1582 if (qdf_likely(peer != NULL)) 1583 vdev = NULL; 1584 1585 /* 1586 * BIG loop where each nbuf is dequeued from global queue, 1587 * processed and queued back on a per vdev basis. These nbufs 1588 * are sent to stack as and when we run out of nbufs 1589 * or a new nbuf dequeued from global queue has a different 1590 * vdev when compared to previous nbuf. 1591 */ 1592 nbuf = nbuf_head; 1593 while (nbuf) { 1594 next = nbuf->next; 1595 rx_tlv_hdr = qdf_nbuf_data(nbuf); 1596 1597 /* 1598 * Check if DMA completed -- msdu_done is the last bit 1599 * to be written 1600 */ 1601 if (qdf_unlikely(!hal_rx_attn_msdu_done_get(rx_tlv_hdr))) { 1602 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1603 FL("MSDU DONE failure")); 1604 hal_rx_dump_pkt_tlvs(hal_soc, rx_tlv_hdr, 1605 QDF_TRACE_LEVEL_INFO); 1606 qdf_assert(0); 1607 } 1608 1609 peer_mdata = hal_rx_mpdu_peer_meta_data_get(rx_tlv_hdr); 1610 peer_id = DP_PEER_METADATA_PEER_ID_GET(peer_mdata); 1611 peer = dp_peer_find_by_id(soc, peer_id); 1612 1613 if (peer) { 1614 QDF_NBUF_CB_DP_TRACE_PRINT(nbuf) = false; 1615 qdf_dp_trace_set_track(nbuf, QDF_RX); 1616 QDF_NBUF_CB_RX_DP_TRACE(nbuf) = 1; 1617 QDF_NBUF_CB_RX_PACKET_TRACK(nbuf) = 1618 QDF_NBUF_RX_PKT_DATA_TRACK; 1619 } 1620 1621 rx_bufs_used++; 1622 1623 if (deliver_list_head && peer && (vdev != peer->vdev)) { 1624 dp_rx_deliver_to_stack(vdev, peer, deliver_list_head, 1625 deliver_list_tail); 1626 deliver_list_head = NULL; 1627 deliver_list_tail = NULL; 1628 } 1629 1630 if (qdf_likely(peer != NULL)) { 1631 vdev = peer->vdev; 1632 } else { 1633 DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1, 1634 qdf_nbuf_len(nbuf)); 1635 qdf_nbuf_free(nbuf); 1636 nbuf = next; 1637 continue; 1638 } 1639 1640 if (qdf_unlikely(vdev == NULL)) { 1641 qdf_nbuf_free(nbuf); 1642 nbuf = next; 1643 DP_STATS_INC(soc, rx.err.invalid_vdev, 1); 1644 dp_peer_unref_del_find_by_id(peer); 1645 continue; 1646 } 1647 1648 DP_HIST_PACKET_COUNT_INC(vdev->pdev->pdev_id); 1649 /* 1650 * First IF condition: 1651 * 802.11 Fragmented pkts are reinjected to REO 1652 * HW block as SG pkts and for these pkts we only 1653 * need to pull the RX TLVS header length. 1654 * Second IF condition: 1655 * The below condition happens when an MSDU is spread 1656 * across multiple buffers. This can happen in two cases 1657 * 1. The nbuf size is smaller then the received msdu. 1658 * ex: we have set the nbuf size to 2048 during 1659 * nbuf_alloc. but we received an msdu which is 1660 * 2304 bytes in size then this msdu is spread 1661 * across 2 nbufs. 1662 * 1663 * 2. AMSDUs when RAW mode is enabled. 1664 * ex: 1st MSDU is in 1st nbuf and 2nd MSDU is spread 1665 * across 1st nbuf and 2nd nbuf and last MSDU is 1666 * spread across 2nd nbuf and 3rd nbuf. 1667 * 1668 * for these scenarios let us create a skb frag_list and 1669 * append these buffers till the last MSDU of the AMSDU 1670 * Third condition: 1671 * This is the most likely case, we receive 802.3 pkts 1672 * decapsulated by HW, here we need to set the pkt length. 1673 */ 1674 if (qdf_unlikely(qdf_nbuf_get_ext_list(nbuf))) 1675 qdf_nbuf_pull_head(nbuf, RX_PKT_TLVS_LEN); 1676 else if (qdf_unlikely(vdev->rx_decap_type == 1677 htt_cmn_pkt_type_raw)) { 1678 msdu_len = hal_rx_msdu_start_msdu_len_get(rx_tlv_hdr); 1679 nbuf = dp_rx_sg_create(nbuf, rx_tlv_hdr); 1680 1681 DP_STATS_INC(vdev->pdev, rx_raw_pkts, 1); 1682 DP_STATS_INC_PKT(peer, rx.raw, 1, 1683 msdu_len); 1684 1685 next = nbuf->next; 1686 } else { 1687 l2_hdr_offset = 1688 hal_rx_msdu_end_l3_hdr_padding_get(rx_tlv_hdr); 1689 1690 msdu_len = hal_rx_msdu_start_msdu_len_get(rx_tlv_hdr); 1691 pkt_len = msdu_len + l2_hdr_offset + RX_PKT_TLVS_LEN; 1692 1693 qdf_nbuf_set_pktlen(nbuf, pkt_len); 1694 qdf_nbuf_pull_head(nbuf, 1695 RX_PKT_TLVS_LEN + 1696 l2_hdr_offset); 1697 } 1698 1699 if (!dp_wds_rx_policy_check(rx_tlv_hdr, vdev, peer, 1700 hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr))) { 1701 QDF_TRACE(QDF_MODULE_ID_DP, 1702 QDF_TRACE_LEVEL_ERROR, 1703 FL("Policy Check Drop pkt")); 1704 /* Drop & free packet */ 1705 qdf_nbuf_free(nbuf); 1706 /* Statistics */ 1707 nbuf = next; 1708 dp_peer_unref_del_find_by_id(peer); 1709 continue; 1710 } 1711 1712 if (qdf_unlikely(peer && peer->bss_peer)) { 1713 QDF_TRACE(QDF_MODULE_ID_DP, 1714 QDF_TRACE_LEVEL_ERROR, 1715 FL("received pkt with same src MAC")); 1716 DP_STATS_INC_PKT(peer, rx.mec_drop, 1, msdu_len); 1717 1718 /* Drop & free packet */ 1719 qdf_nbuf_free(nbuf); 1720 /* Statistics */ 1721 nbuf = next; 1722 dp_peer_unref_del_find_by_id(peer); 1723 continue; 1724 } 1725 1726 if (qdf_unlikely(peer && (peer->nawds_enabled == true) && 1727 (hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr)) && 1728 (hal_rx_get_mpdu_mac_ad4_valid(rx_tlv_hdr) == false))) { 1729 DP_STATS_INC(peer, rx.nawds_mcast_drop, 1); 1730 qdf_nbuf_free(nbuf); 1731 nbuf = next; 1732 dp_peer_unref_del_find_by_id(peer); 1733 continue; 1734 } 1735 1736 dp_rx_cksum_offload(vdev->pdev, nbuf, rx_tlv_hdr); 1737 1738 dp_set_rx_queue(nbuf, ring_id); 1739 1740 /* 1741 * HW structures call this L3 header padding -- 1742 * even though this is actually the offset from 1743 * the buffer beginning where the L2 header 1744 * begins. 1745 */ 1746 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 1747 FL("rxhash: flow id toeplitz: 0x%x"), 1748 hal_rx_msdu_start_toeplitz_get(rx_tlv_hdr)); 1749 1750 dp_rx_msdu_stats_update(soc, nbuf, rx_tlv_hdr, peer, ring_id); 1751 1752 if (qdf_unlikely(vdev->mesh_vdev)) { 1753 if (dp_rx_filter_mesh_packets(vdev, nbuf, 1754 rx_tlv_hdr) 1755 == QDF_STATUS_SUCCESS) { 1756 QDF_TRACE(QDF_MODULE_ID_DP, 1757 QDF_TRACE_LEVEL_INFO_MED, 1758 FL("mesh pkt filtered")); 1759 DP_STATS_INC(vdev->pdev, dropped.mesh_filter, 1760 1); 1761 1762 qdf_nbuf_free(nbuf); 1763 nbuf = next; 1764 dp_peer_unref_del_find_by_id(peer); 1765 continue; 1766 } 1767 dp_rx_fill_mesh_stats(vdev, nbuf, rx_tlv_hdr, peer); 1768 } 1769 1770 #ifdef QCA_WIFI_NAPIER_EMULATION_DBG /* Debug code, remove later */ 1771 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1772 "p_id %d msdu_len %d hdr_off %d", 1773 peer_id, msdu_len, l2_hdr_offset); 1774 1775 print_hex_dump(KERN_ERR, 1776 "\t Pkt Data:", DUMP_PREFIX_NONE, 32, 4, 1777 qdf_nbuf_data(nbuf), 128, false); 1778 #endif /* NAPIER_EMULATION */ 1779 1780 if (qdf_likely(vdev->rx_decap_type == 1781 htt_cmn_pkt_type_ethernet) && 1782 qdf_likely(!vdev->mesh_vdev)) { 1783 /* WDS Destination Address Learning */ 1784 dp_rx_da_learn(soc, rx_tlv_hdr, peer, nbuf); 1785 1786 /* Due to HW issue, sometimes we see that the sa_idx 1787 * and da_idx are invalid with sa_valid and da_valid 1788 * bits set 1789 * 1790 * in this case we also see that value of 1791 * sa_sw_peer_id is set as 0 1792 * 1793 * Drop the packet if sa_idx and da_idx OOB or 1794 * sa_sw_peerid is 0 1795 */ 1796 if (!is_sa_da_idx_valid(soc, rx_tlv_hdr)) { 1797 qdf_nbuf_free(nbuf); 1798 nbuf = next; 1799 DP_STATS_INC(soc, rx.err.invalid_sa_da_idx, 1); 1800 continue; 1801 } 1802 /* WDS Source Port Learning */ 1803 if (vdev->wds_enabled) 1804 dp_rx_wds_srcport_learn(soc, rx_tlv_hdr, 1805 peer, nbuf); 1806 1807 /* Intrabss-fwd */ 1808 if (dp_rx_check_ap_bridge(vdev)) 1809 if (dp_rx_intrabss_fwd(soc, 1810 peer, 1811 rx_tlv_hdr, 1812 nbuf)) { 1813 nbuf = next; 1814 dp_peer_unref_del_find_by_id(peer); 1815 continue; /* Get next desc */ 1816 } 1817 } 1818 1819 dp_rx_fill_gro_info(soc, rx_tlv_hdr, nbuf); 1820 qdf_nbuf_cb_update_peer_local_id(nbuf, peer->local_id); 1821 1822 /* Get TID from first msdu per MPDU, save to skb->priority */ 1823 if (qdf_nbuf_is_rx_chfrag_start(nbuf)) 1824 tid = hal_rx_mpdu_start_tid_get(soc->hal_soc, 1825 rx_tlv_hdr); 1826 DP_RX_TID_SAVE(nbuf, tid); 1827 1828 DP_RX_LIST_APPEND(deliver_list_head, 1829 deliver_list_tail, 1830 nbuf); 1831 DP_STATS_INC_PKT(peer, rx.to_stack, 1, 1832 qdf_nbuf_len(nbuf)); 1833 1834 nbuf = next; 1835 dp_peer_unref_del_find_by_id(peer); 1836 } 1837 1838 if (deliver_list_head) 1839 dp_rx_deliver_to_stack(vdev, peer, deliver_list_head, 1840 deliver_list_tail); 1841 1842 return rx_bufs_used; /* Assume no scale factor for now */ 1843 } 1844 1845 /** 1846 * dp_rx_detach() - detach dp rx 1847 * @pdev: core txrx pdev context 1848 * 1849 * This function will detach DP RX into main device context 1850 * will free DP Rx resources. 1851 * 1852 * Return: void 1853 */ 1854 void 1855 dp_rx_pdev_detach(struct dp_pdev *pdev) 1856 { 1857 uint8_t pdev_id = pdev->pdev_id; 1858 struct dp_soc *soc = pdev->soc; 1859 struct rx_desc_pool *rx_desc_pool; 1860 1861 rx_desc_pool = &soc->rx_desc_buf[pdev_id]; 1862 1863 if (rx_desc_pool->pool_size != 0) { 1864 dp_rx_desc_pool_free(soc, pdev_id, rx_desc_pool); 1865 } 1866 1867 return; 1868 } 1869 1870 /** 1871 * dp_rx_attach() - attach DP RX 1872 * @pdev: core txrx pdev context 1873 * 1874 * This function will attach a DP RX instance into the main 1875 * device (SOC) context. Will allocate dp rx resource and 1876 * initialize resources. 1877 * 1878 * Return: QDF_STATUS_SUCCESS: success 1879 * QDF_STATUS_E_RESOURCES: Error return 1880 */ 1881 QDF_STATUS 1882 dp_rx_pdev_attach(struct dp_pdev *pdev) 1883 { 1884 uint8_t pdev_id = pdev->pdev_id; 1885 struct dp_soc *soc = pdev->soc; 1886 uint32_t rxdma_entries; 1887 union dp_rx_desc_list_elem_t *desc_list = NULL; 1888 union dp_rx_desc_list_elem_t *tail = NULL; 1889 struct dp_srng *dp_rxdma_srng; 1890 struct rx_desc_pool *rx_desc_pool; 1891 1892 if (wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx)) { 1893 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, 1894 "nss-wifi<4> skip Rx refil %d", pdev_id); 1895 return QDF_STATUS_SUCCESS; 1896 } 1897 1898 pdev = soc->pdev_list[pdev_id]; 1899 dp_rxdma_srng = &pdev->rx_refill_buf_ring; 1900 rxdma_entries = dp_rxdma_srng->num_entries; 1901 1902 soc->process_rx_status = CONFIG_PROCESS_RX_STATUS; 1903 1904 rx_desc_pool = &soc->rx_desc_buf[pdev_id]; 1905 dp_rx_desc_pool_alloc(soc, pdev_id, 1906 DP_RX_DESC_ALLOC_MULTIPLIER * rxdma_entries, 1907 rx_desc_pool); 1908 1909 rx_desc_pool->owner = DP_WBM2SW_RBM; 1910 /* For Rx buffers, WBM release ring is SW RING 3,for all pdev's */ 1911 1912 dp_rx_buffers_replenish(soc, pdev_id, dp_rxdma_srng, rx_desc_pool, 1913 0, &desc_list, &tail); 1914 1915 return QDF_STATUS_SUCCESS; 1916 } 1917 1918 /* 1919 * dp_rx_nbuf_prepare() - prepare RX nbuf 1920 * @soc: core txrx main context 1921 * @pdev: core txrx pdev context 1922 * 1923 * This function alloc & map nbuf for RX dma usage, retry it if failed 1924 * until retry times reaches max threshold or succeeded. 1925 * 1926 * Return: qdf_nbuf_t pointer if succeeded, NULL if failed. 1927 */ 1928 qdf_nbuf_t 1929 dp_rx_nbuf_prepare(struct dp_soc *soc, struct dp_pdev *pdev) 1930 { 1931 uint8_t *buf; 1932 int32_t nbuf_retry_count; 1933 QDF_STATUS ret; 1934 qdf_nbuf_t nbuf = NULL; 1935 1936 for (nbuf_retry_count = 0; nbuf_retry_count < 1937 QDF_NBUF_ALLOC_MAP_RETRY_THRESHOLD; 1938 nbuf_retry_count++) { 1939 /* Allocate a new skb */ 1940 nbuf = qdf_nbuf_alloc(soc->osdev, 1941 RX_BUFFER_SIZE, 1942 RX_BUFFER_RESERVATION, 1943 RX_BUFFER_ALIGNMENT, 1944 FALSE); 1945 1946 if (nbuf == NULL) { 1947 DP_STATS_INC(pdev, 1948 replenish.nbuf_alloc_fail, 1); 1949 continue; 1950 } 1951 1952 buf = qdf_nbuf_data(nbuf); 1953 1954 memset(buf, 0, RX_BUFFER_SIZE); 1955 1956 ret = qdf_nbuf_map_single(soc->osdev, nbuf, 1957 QDF_DMA_BIDIRECTIONAL); 1958 1959 /* nbuf map failed */ 1960 if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) { 1961 qdf_nbuf_free(nbuf); 1962 DP_STATS_INC(pdev, replenish.map_err, 1); 1963 continue; 1964 } 1965 /* qdf_nbuf alloc and map succeeded */ 1966 break; 1967 } 1968 1969 /* qdf_nbuf still alloc or map failed */ 1970 if (qdf_unlikely(nbuf_retry_count >= 1971 QDF_NBUF_ALLOC_MAP_RETRY_THRESHOLD)) 1972 return NULL; 1973 1974 return nbuf; 1975 } 1976