1 /* 2 * Copyright (c) 2016-2019 The Linux Foundation. All rights reserved. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for 5 * any purpose with or without fee is hereby granted, provided that the 6 * above copyright notice and this permission notice appear in all 7 * copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 16 * PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 #include "hal_hw_headers.h" 20 #include "dp_types.h" 21 #include "dp_rx.h" 22 #include "dp_peer.h" 23 #include "hal_rx.h" 24 #include "hal_api.h" 25 #include "qdf_nbuf.h" 26 #ifdef MESH_MODE_SUPPORT 27 #include "if_meta_hdr.h" 28 #endif 29 #include "dp_internal.h" 30 #include "dp_rx_mon.h" 31 32 #ifdef RX_DESC_DEBUG_CHECK 33 static inline void dp_rx_desc_prep(struct dp_rx_desc *rx_desc, qdf_nbuf_t nbuf) 34 { 35 rx_desc->magic = DP_RX_DESC_MAGIC; 36 rx_desc->nbuf = nbuf; 37 } 38 #else 39 static inline void dp_rx_desc_prep(struct dp_rx_desc *rx_desc, qdf_nbuf_t nbuf) 40 { 41 rx_desc->nbuf = nbuf; 42 } 43 #endif 44 45 #ifdef CONFIG_WIN 46 static inline bool dp_rx_check_ap_bridge(struct dp_vdev *vdev) 47 { 48 return vdev->ap_bridge_enabled; 49 } 50 #else 51 static inline bool dp_rx_check_ap_bridge(struct dp_vdev *vdev) 52 { 53 if (vdev->opmode != wlan_op_mode_sta) 54 return true; 55 else 56 return false; 57 } 58 #endif 59 60 /* 61 * dp_rx_dump_info_and_assert() - dump RX Ring info and Rx Desc info 62 * 63 * @soc: core txrx main context 64 * @hal_ring: opaque pointer to the HAL Rx Ring, which will be serviced 65 * @ring_desc: opaque pointer to the RX ring descriptor 66 * @rx_desc: host rs descriptor 67 * 68 * Return: void 69 */ 70 void dp_rx_dump_info_and_assert(struct dp_soc *soc, void *hal_ring, 71 void *ring_desc, struct dp_rx_desc *rx_desc) 72 { 73 void *hal_soc = soc->hal_soc; 74 75 dp_rx_desc_dump(rx_desc); 76 hal_srng_dump_ring_desc(hal_soc, hal_ring, ring_desc); 77 hal_srng_dump_ring(hal_soc, hal_ring); 78 qdf_assert_always(rx_desc->in_use); 79 } 80 81 /* 82 * dp_rx_buffers_replenish() - replenish rxdma ring with rx nbufs 83 * called during dp rx initialization 84 * and at the end of dp_rx_process. 85 * 86 * @soc: core txrx main context 87 * @mac_id: mac_id which is one of 3 mac_ids 88 * @dp_rxdma_srng: dp rxdma circular ring 89 * @rx_desc_pool: Pointer to free Rx descriptor pool 90 * @num_req_buffers: number of buffer to be replenished 91 * @desc_list: list of descs if called from dp_rx_process 92 * or NULL during dp rx initialization or out of buffer 93 * interrupt. 94 * @tail: tail of descs list 95 * Return: return success or failure 96 */ 97 QDF_STATUS dp_rx_buffers_replenish(struct dp_soc *dp_soc, uint32_t mac_id, 98 struct dp_srng *dp_rxdma_srng, 99 struct rx_desc_pool *rx_desc_pool, 100 uint32_t num_req_buffers, 101 union dp_rx_desc_list_elem_t **desc_list, 102 union dp_rx_desc_list_elem_t **tail) 103 { 104 uint32_t num_alloc_desc; 105 uint16_t num_desc_to_free = 0; 106 struct dp_pdev *dp_pdev = dp_get_pdev_for_mac_id(dp_soc, mac_id); 107 uint32_t num_entries_avail; 108 uint32_t count; 109 int sync_hw_ptr = 1; 110 qdf_dma_addr_t paddr; 111 qdf_nbuf_t rx_netbuf; 112 void *rxdma_ring_entry; 113 union dp_rx_desc_list_elem_t *next; 114 QDF_STATUS ret; 115 116 void *rxdma_srng; 117 118 rxdma_srng = dp_rxdma_srng->hal_srng; 119 120 if (!rxdma_srng) { 121 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 122 "rxdma srng not initialized"); 123 DP_STATS_INC(dp_pdev, replenish.rxdma_err, num_req_buffers); 124 return QDF_STATUS_E_FAILURE; 125 } 126 127 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 128 "requested %d buffers for replenish", num_req_buffers); 129 130 hal_srng_access_start(dp_soc->hal_soc, rxdma_srng); 131 num_entries_avail = hal_srng_src_num_avail(dp_soc->hal_soc, 132 rxdma_srng, 133 sync_hw_ptr); 134 135 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 136 "no of available entries in rxdma ring: %d", 137 num_entries_avail); 138 139 if (!(*desc_list) && (num_entries_avail > 140 ((dp_rxdma_srng->num_entries * 3) / 4))) { 141 num_req_buffers = num_entries_avail; 142 } else if (num_entries_avail < num_req_buffers) { 143 num_desc_to_free = num_req_buffers - num_entries_avail; 144 num_req_buffers = num_entries_avail; 145 } 146 147 if (qdf_unlikely(!num_req_buffers)) { 148 num_desc_to_free = num_req_buffers; 149 hal_srng_access_end(dp_soc->hal_soc, rxdma_srng); 150 goto free_descs; 151 } 152 153 /* 154 * if desc_list is NULL, allocate the descs from freelist 155 */ 156 if (!(*desc_list)) { 157 num_alloc_desc = dp_rx_get_free_desc_list(dp_soc, mac_id, 158 rx_desc_pool, 159 num_req_buffers, 160 desc_list, 161 tail); 162 163 if (!num_alloc_desc) { 164 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 165 "no free rx_descs in freelist"); 166 DP_STATS_INC(dp_pdev, err.desc_alloc_fail, 167 num_req_buffers); 168 hal_srng_access_end(dp_soc->hal_soc, rxdma_srng); 169 return QDF_STATUS_E_NOMEM; 170 } 171 172 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 173 "%d rx desc allocated", num_alloc_desc); 174 num_req_buffers = num_alloc_desc; 175 } 176 177 178 count = 0; 179 180 while (count < num_req_buffers) { 181 rx_netbuf = qdf_nbuf_alloc(dp_soc->osdev, 182 RX_BUFFER_SIZE, 183 RX_BUFFER_RESERVATION, 184 RX_BUFFER_ALIGNMENT, 185 FALSE); 186 187 if (rx_netbuf == NULL) { 188 DP_STATS_INC(dp_pdev, replenish.nbuf_alloc_fail, 1); 189 continue; 190 } 191 192 ret = qdf_nbuf_map_single(dp_soc->osdev, rx_netbuf, 193 QDF_DMA_BIDIRECTIONAL); 194 if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) { 195 qdf_nbuf_free(rx_netbuf); 196 DP_STATS_INC(dp_pdev, replenish.map_err, 1); 197 continue; 198 } 199 200 paddr = qdf_nbuf_get_frag_paddr(rx_netbuf, 0); 201 202 /* 203 * check if the physical address of nbuf->data is 204 * less then 0x50000000 then free the nbuf and try 205 * allocating new nbuf. We can try for 100 times. 206 * this is a temp WAR till we fix it properly. 207 */ 208 ret = check_x86_paddr(dp_soc, &rx_netbuf, &paddr, dp_pdev); 209 if (ret == QDF_STATUS_E_FAILURE) { 210 DP_STATS_INC(dp_pdev, replenish.x86_fail, 1); 211 break; 212 } 213 214 count++; 215 216 rxdma_ring_entry = hal_srng_src_get_next(dp_soc->hal_soc, 217 rxdma_srng); 218 qdf_assert_always(rxdma_ring_entry); 219 220 next = (*desc_list)->next; 221 222 dp_rx_desc_prep(&((*desc_list)->rx_desc), rx_netbuf); 223 (*desc_list)->rx_desc.in_use = 1; 224 225 dp_debug("rx_netbuf=%pK, buf=%pK, paddr=0x%llx, cookie=%d", 226 rx_netbuf, qdf_nbuf_data(rx_netbuf), 227 (unsigned long long)paddr, 228 (*desc_list)->rx_desc.cookie); 229 230 hal_rxdma_buff_addr_info_set(rxdma_ring_entry, paddr, 231 (*desc_list)->rx_desc.cookie, 232 rx_desc_pool->owner); 233 234 *desc_list = next; 235 } 236 237 hal_srng_access_end(dp_soc->hal_soc, rxdma_srng); 238 239 dp_debug("replenished buffers %d, rx desc added back to free list %u", 240 num_req_buffers, num_desc_to_free); 241 242 DP_STATS_INC_PKT(dp_pdev, replenish.pkts, num_req_buffers, 243 (RX_BUFFER_SIZE * num_req_buffers)); 244 245 free_descs: 246 DP_STATS_INC(dp_pdev, buf_freelist, num_desc_to_free); 247 /* 248 * add any available free desc back to the free list 249 */ 250 if (*desc_list) 251 dp_rx_add_desc_list_to_free_list(dp_soc, desc_list, tail, 252 mac_id, rx_desc_pool); 253 254 return QDF_STATUS_SUCCESS; 255 } 256 257 /* 258 * dp_rx_deliver_raw() - process RAW mode pkts and hand over the 259 * pkts to RAW mode simulation to 260 * decapsulate the pkt. 261 * 262 * @vdev: vdev on which RAW mode is enabled 263 * @nbuf_list: list of RAW pkts to process 264 * @peer: peer object from which the pkt is rx 265 * 266 * Return: void 267 */ 268 void 269 dp_rx_deliver_raw(struct dp_vdev *vdev, qdf_nbuf_t nbuf_list, 270 struct dp_peer *peer) 271 { 272 qdf_nbuf_t deliver_list_head = NULL; 273 qdf_nbuf_t deliver_list_tail = NULL; 274 qdf_nbuf_t nbuf; 275 276 nbuf = nbuf_list; 277 while (nbuf) { 278 qdf_nbuf_t next = qdf_nbuf_next(nbuf); 279 280 DP_RX_LIST_APPEND(deliver_list_head, deliver_list_tail, nbuf); 281 282 DP_STATS_INC(vdev->pdev, rx_raw_pkts, 1); 283 DP_STATS_INC_PKT(peer, rx.raw, 1, qdf_nbuf_len(nbuf)); 284 /* 285 * reset the chfrag_start and chfrag_end bits in nbuf cb 286 * as this is a non-amsdu pkt and RAW mode simulation expects 287 * these bit s to be 0 for non-amsdu pkt. 288 */ 289 if (qdf_nbuf_is_rx_chfrag_start(nbuf) && 290 qdf_nbuf_is_rx_chfrag_end(nbuf)) { 291 qdf_nbuf_set_rx_chfrag_start(nbuf, 0); 292 qdf_nbuf_set_rx_chfrag_end(nbuf, 0); 293 } 294 295 nbuf = next; 296 } 297 298 vdev->osif_rsim_rx_decap(vdev->osif_vdev, &deliver_list_head, 299 &deliver_list_tail, (struct cdp_peer*) peer); 300 301 vdev->osif_rx(vdev->osif_vdev, deliver_list_head); 302 } 303 304 305 #ifdef DP_LFR 306 /* 307 * In case of LFR, data of a new peer might be sent up 308 * even before peer is added. 309 */ 310 static inline struct dp_vdev * 311 dp_get_vdev_from_peer(struct dp_soc *soc, 312 uint16_t peer_id, 313 struct dp_peer *peer, 314 struct hal_rx_mpdu_desc_info mpdu_desc_info) 315 { 316 struct dp_vdev *vdev; 317 uint8_t vdev_id; 318 319 if (unlikely(!peer)) { 320 if (peer_id != HTT_INVALID_PEER) { 321 vdev_id = DP_PEER_METADATA_ID_GET( 322 mpdu_desc_info.peer_meta_data); 323 QDF_TRACE(QDF_MODULE_ID_DP, 324 QDF_TRACE_LEVEL_DEBUG, 325 FL("PeerID %d not found use vdevID %d"), 326 peer_id, vdev_id); 327 vdev = dp_get_vdev_from_soc_vdev_id_wifi3(soc, 328 vdev_id); 329 } else { 330 QDF_TRACE(QDF_MODULE_ID_DP, 331 QDF_TRACE_LEVEL_DEBUG, 332 FL("Invalid PeerID %d"), 333 peer_id); 334 return NULL; 335 } 336 } else { 337 vdev = peer->vdev; 338 } 339 return vdev; 340 } 341 #else 342 static inline struct dp_vdev * 343 dp_get_vdev_from_peer(struct dp_soc *soc, 344 uint16_t peer_id, 345 struct dp_peer *peer, 346 struct hal_rx_mpdu_desc_info mpdu_desc_info) 347 { 348 if (unlikely(!peer)) { 349 QDF_TRACE(QDF_MODULE_ID_DP, 350 QDF_TRACE_LEVEL_DEBUG, 351 FL("Peer not found for peerID %d"), 352 peer_id); 353 return NULL; 354 } else { 355 return peer->vdev; 356 } 357 } 358 #endif 359 360 /** 361 * dp_rx_da_learn() - Add AST entry based on DA lookup 362 * This is a WAR for HK 1.0 and will 363 * be removed in HK 2.0 364 * 365 * @soc: core txrx main context 366 * @rx_tlv_hdr : start address of rx tlvs 367 * @ta_peer : Transmitter peer entry 368 * @nbuf : nbuf to retrieve destination mac for which AST will be added 369 * 370 */ 371 #ifdef FEATURE_WDS 372 static void 373 dp_rx_da_learn(struct dp_soc *soc, 374 uint8_t *rx_tlv_hdr, 375 struct dp_peer *ta_peer, 376 qdf_nbuf_t nbuf) 377 { 378 /* For HKv2 DA port learing is not needed */ 379 if (qdf_likely(soc->ast_override_support)) 380 return; 381 382 if (qdf_unlikely(!ta_peer)) 383 return; 384 385 if (qdf_unlikely(ta_peer->vdev->opmode != wlan_op_mode_ap)) 386 return; 387 388 if (qdf_unlikely(!hal_rx_msdu_end_da_is_valid_get(rx_tlv_hdr) && 389 !hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr))) { 390 dp_peer_add_ast(soc, 391 ta_peer, 392 qdf_nbuf_data(nbuf), 393 CDP_TXRX_AST_TYPE_DA, 394 IEEE80211_NODE_F_WDS_HM); 395 } 396 } 397 #else 398 static void 399 dp_rx_da_learn(struct dp_soc *soc, 400 uint8_t *rx_tlv_hdr, 401 struct dp_peer *ta_peer, 402 qdf_nbuf_t nbuf) 403 { 404 } 405 #endif 406 407 /** 408 * dp_rx_intrabss_fwd() - Implements the Intra-BSS forwarding logic 409 * 410 * @soc: core txrx main context 411 * @ta_peer : source peer entry 412 * @rx_tlv_hdr : start address of rx tlvs 413 * @nbuf : nbuf that has to be intrabss forwarded 414 * 415 * Return: bool: true if it is forwarded else false 416 */ 417 static bool 418 dp_rx_intrabss_fwd(struct dp_soc *soc, 419 struct dp_peer *ta_peer, 420 uint8_t *rx_tlv_hdr, 421 qdf_nbuf_t nbuf) 422 { 423 uint16_t da_idx; 424 uint16_t len; 425 struct dp_peer *da_peer; 426 struct dp_ast_entry *ast_entry; 427 qdf_nbuf_t nbuf_copy; 428 429 /* check if the destination peer is available in peer table 430 * and also check if the source peer and destination peer 431 * belong to the same vap and destination peer is not bss peer. 432 */ 433 434 if ((hal_rx_msdu_end_da_is_valid_get(rx_tlv_hdr) && 435 !hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr))) { 436 da_idx = hal_rx_msdu_end_da_idx_get(soc->hal_soc, rx_tlv_hdr); 437 438 ast_entry = soc->ast_table[da_idx]; 439 if (!ast_entry) 440 return false; 441 442 if (ast_entry->type == CDP_TXRX_AST_TYPE_DA) { 443 ast_entry->is_active = TRUE; 444 return false; 445 } 446 447 da_peer = ast_entry->peer; 448 449 if (!da_peer) 450 return false; 451 /* TA peer cannot be same as peer(DA) on which AST is present 452 * this indicates a change in topology and that AST entries 453 * are yet to be updated. 454 */ 455 if (da_peer == ta_peer) 456 return false; 457 458 if (da_peer->vdev == ta_peer->vdev && !da_peer->bss_peer) { 459 memset(nbuf->cb, 0x0, sizeof(nbuf->cb)); 460 len = qdf_nbuf_len(nbuf); 461 462 /* linearize the nbuf just before we send to 463 * dp_tx_send() 464 */ 465 if (qdf_unlikely(qdf_nbuf_get_ext_list(nbuf))) { 466 if (qdf_nbuf_linearize(nbuf) == -ENOMEM) 467 return false; 468 469 nbuf = qdf_nbuf_unshare(nbuf); 470 if (!nbuf) { 471 DP_STATS_INC_PKT(ta_peer, 472 rx.intra_bss.fail, 473 1, 474 len); 475 /* return true even though the pkt is 476 * not forwarded. Basically skb_unshare 477 * failed and we want to continue with 478 * next nbuf. 479 */ 480 return true; 481 } 482 } 483 484 if (!dp_tx_send(ta_peer->vdev, nbuf)) { 485 DP_STATS_INC_PKT(ta_peer, rx.intra_bss.pkts, 1, 486 len); 487 return true; 488 } else { 489 DP_STATS_INC_PKT(ta_peer, rx.intra_bss.fail, 1, 490 len); 491 return false; 492 } 493 } 494 } 495 /* if it is a broadcast pkt (eg: ARP) and it is not its own 496 * source, then clone the pkt and send the cloned pkt for 497 * intra BSS forwarding and original pkt up the network stack 498 * Note: how do we handle multicast pkts. do we forward 499 * all multicast pkts as is or let a higher layer module 500 * like igmpsnoop decide whether to forward or not with 501 * Mcast enhancement. 502 */ 503 else if (qdf_unlikely((hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr) && 504 !ta_peer->bss_peer))) { 505 nbuf_copy = qdf_nbuf_copy(nbuf); 506 if (!nbuf_copy) 507 return false; 508 memset(nbuf_copy->cb, 0x0, sizeof(nbuf_copy->cb)); 509 len = qdf_nbuf_len(nbuf_copy); 510 511 if (dp_tx_send(ta_peer->vdev, nbuf_copy)) { 512 DP_STATS_INC_PKT(ta_peer, rx.intra_bss.fail, 1, len); 513 qdf_nbuf_free(nbuf_copy); 514 } else { 515 DP_STATS_INC_PKT(ta_peer, rx.intra_bss.pkts, 1, len); 516 } 517 } 518 /* return false as we have to still send the original pkt 519 * up the stack 520 */ 521 return false; 522 } 523 524 #ifdef MESH_MODE_SUPPORT 525 526 /** 527 * dp_rx_fill_mesh_stats() - Fills the mesh per packet receive stats 528 * 529 * @vdev: DP Virtual device handle 530 * @nbuf: Buffer pointer 531 * @rx_tlv_hdr: start of rx tlv header 532 * @peer: pointer to peer 533 * 534 * This function allocated memory for mesh receive stats and fill the 535 * required stats. Stores the memory address in skb cb. 536 * 537 * Return: void 538 */ 539 540 void dp_rx_fill_mesh_stats(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 541 uint8_t *rx_tlv_hdr, struct dp_peer *peer) 542 { 543 struct mesh_recv_hdr_s *rx_info = NULL; 544 uint32_t pkt_type; 545 uint32_t nss; 546 uint32_t rate_mcs; 547 uint32_t bw; 548 549 /* fill recv mesh stats */ 550 rx_info = qdf_mem_malloc(sizeof(struct mesh_recv_hdr_s)); 551 552 /* upper layers are resposible to free this memory */ 553 554 if (rx_info == NULL) { 555 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 556 "Memory allocation failed for mesh rx stats"); 557 DP_STATS_INC(vdev->pdev, mesh_mem_alloc, 1); 558 return; 559 } 560 561 rx_info->rs_flags = MESH_RXHDR_VER1; 562 if (qdf_nbuf_is_rx_chfrag_start(nbuf)) 563 rx_info->rs_flags |= MESH_RX_FIRST_MSDU; 564 565 if (qdf_nbuf_is_rx_chfrag_end(nbuf)) 566 rx_info->rs_flags |= MESH_RX_LAST_MSDU; 567 568 if (hal_rx_attn_msdu_get_is_decrypted(rx_tlv_hdr)) { 569 rx_info->rs_flags |= MESH_RX_DECRYPTED; 570 rx_info->rs_keyix = hal_rx_msdu_get_keyid(rx_tlv_hdr); 571 if (vdev->osif_get_key) 572 vdev->osif_get_key(vdev->osif_vdev, 573 &rx_info->rs_decryptkey[0], 574 &peer->mac_addr.raw[0], 575 rx_info->rs_keyix); 576 } 577 578 rx_info->rs_rssi = hal_rx_msdu_start_get_rssi(rx_tlv_hdr); 579 rx_info->rs_channel = hal_rx_msdu_start_get_freq(rx_tlv_hdr); 580 pkt_type = hal_rx_msdu_start_get_pkt_type(rx_tlv_hdr); 581 rate_mcs = hal_rx_msdu_start_rate_mcs_get(rx_tlv_hdr); 582 bw = hal_rx_msdu_start_bw_get(rx_tlv_hdr); 583 nss = hal_rx_msdu_start_nss_get(vdev->pdev->soc->hal_soc, rx_tlv_hdr); 584 rx_info->rs_ratephy1 = rate_mcs | (nss << 0x8) | (pkt_type << 16) | 585 (bw << 24); 586 587 qdf_nbuf_set_rx_fctx_type(nbuf, (void *)rx_info, CB_FTYPE_MESH_RX_INFO); 588 589 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_MED, 590 FL("Mesh rx stats: flags %x, rssi %x, chn %x, rate %x, kix %x"), 591 rx_info->rs_flags, 592 rx_info->rs_rssi, 593 rx_info->rs_channel, 594 rx_info->rs_ratephy1, 595 rx_info->rs_keyix); 596 597 } 598 599 /** 600 * dp_rx_filter_mesh_packets() - Filters mesh unwanted packets 601 * 602 * @vdev: DP Virtual device handle 603 * @nbuf: Buffer pointer 604 * @rx_tlv_hdr: start of rx tlv header 605 * 606 * This checks if the received packet is matching any filter out 607 * catogery and and drop the packet if it matches. 608 * 609 * Return: status(0 indicates drop, 1 indicate to no drop) 610 */ 611 612 QDF_STATUS dp_rx_filter_mesh_packets(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 613 uint8_t *rx_tlv_hdr) 614 { 615 union dp_align_mac_addr mac_addr; 616 617 if (qdf_unlikely(vdev->mesh_rx_filter)) { 618 if (vdev->mesh_rx_filter & MESH_FILTER_OUT_FROMDS) 619 if (hal_rx_mpdu_get_fr_ds(rx_tlv_hdr)) 620 return QDF_STATUS_SUCCESS; 621 622 if (vdev->mesh_rx_filter & MESH_FILTER_OUT_TODS) 623 if (hal_rx_mpdu_get_to_ds(rx_tlv_hdr)) 624 return QDF_STATUS_SUCCESS; 625 626 if (vdev->mesh_rx_filter & MESH_FILTER_OUT_NODS) 627 if (!hal_rx_mpdu_get_fr_ds(rx_tlv_hdr) 628 && !hal_rx_mpdu_get_to_ds(rx_tlv_hdr)) 629 return QDF_STATUS_SUCCESS; 630 631 if (vdev->mesh_rx_filter & MESH_FILTER_OUT_RA) { 632 if (hal_rx_mpdu_get_addr1(rx_tlv_hdr, 633 &mac_addr.raw[0])) 634 return QDF_STATUS_E_FAILURE; 635 636 if (!qdf_mem_cmp(&mac_addr.raw[0], 637 &vdev->mac_addr.raw[0], 638 DP_MAC_ADDR_LEN)) 639 return QDF_STATUS_SUCCESS; 640 } 641 642 if (vdev->mesh_rx_filter & MESH_FILTER_OUT_TA) { 643 if (hal_rx_mpdu_get_addr2(rx_tlv_hdr, 644 &mac_addr.raw[0])) 645 return QDF_STATUS_E_FAILURE; 646 647 if (!qdf_mem_cmp(&mac_addr.raw[0], 648 &vdev->mac_addr.raw[0], 649 DP_MAC_ADDR_LEN)) 650 return QDF_STATUS_SUCCESS; 651 } 652 } 653 654 return QDF_STATUS_E_FAILURE; 655 } 656 657 #else 658 void dp_rx_fill_mesh_stats(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 659 uint8_t *rx_tlv_hdr, struct dp_peer *peer) 660 { 661 } 662 663 QDF_STATUS dp_rx_filter_mesh_packets(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 664 uint8_t *rx_tlv_hdr) 665 { 666 return QDF_STATUS_E_FAILURE; 667 } 668 669 #endif 670 671 #ifdef CONFIG_WIN 672 /** 673 * dp_rx_nac_filter(): Function to perform filtering of non-associated 674 * clients 675 * @pdev: DP pdev handle 676 * @rx_pkt_hdr: Rx packet Header 677 * 678 * return: dp_vdev* 679 */ 680 static 681 struct dp_vdev *dp_rx_nac_filter(struct dp_pdev *pdev, 682 uint8_t *rx_pkt_hdr) 683 { 684 struct ieee80211_frame *wh; 685 struct dp_neighbour_peer *peer = NULL; 686 687 wh = (struct ieee80211_frame *)rx_pkt_hdr; 688 689 if ((wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) != IEEE80211_FC1_DIR_TODS) 690 return NULL; 691 692 qdf_spin_lock_bh(&pdev->neighbour_peer_mutex); 693 TAILQ_FOREACH(peer, &pdev->neighbour_peers_list, 694 neighbour_peer_list_elem) { 695 if (qdf_mem_cmp(&peer->neighbour_peers_macaddr.raw[0], 696 wh->i_addr2, DP_MAC_ADDR_LEN) == 0) { 697 QDF_TRACE( 698 QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 699 FL("NAC configuration matched for mac-%2x:%2x:%2x:%2x:%2x:%2x"), 700 peer->neighbour_peers_macaddr.raw[0], 701 peer->neighbour_peers_macaddr.raw[1], 702 peer->neighbour_peers_macaddr.raw[2], 703 peer->neighbour_peers_macaddr.raw[3], 704 peer->neighbour_peers_macaddr.raw[4], 705 peer->neighbour_peers_macaddr.raw[5]); 706 707 qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex); 708 709 return pdev->monitor_vdev; 710 } 711 } 712 qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex); 713 714 return NULL; 715 } 716 717 /** 718 * dp_rx_process_invalid_peer(): Function to pass invalid peer list to umac 719 * @soc: DP SOC handle 720 * @mpdu: mpdu for which peer is invalid 721 * 722 * return: integer type 723 */ 724 uint8_t dp_rx_process_invalid_peer(struct dp_soc *soc, qdf_nbuf_t mpdu) 725 { 726 struct dp_invalid_peer_msg msg; 727 struct dp_vdev *vdev = NULL; 728 struct dp_pdev *pdev = NULL; 729 struct ieee80211_frame *wh; 730 uint8_t i; 731 qdf_nbuf_t curr_nbuf, next_nbuf; 732 uint8_t *rx_tlv_hdr = qdf_nbuf_data(mpdu); 733 uint8_t *rx_pkt_hdr = hal_rx_pkt_hdr_get(rx_tlv_hdr); 734 735 wh = (struct ieee80211_frame *)rx_pkt_hdr; 736 737 if (!DP_FRAME_IS_DATA(wh)) { 738 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 739 "NAWDS valid only for data frames"); 740 goto free; 741 } 742 743 if (qdf_nbuf_len(mpdu) < sizeof(struct ieee80211_frame)) { 744 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 745 "Invalid nbuf length"); 746 goto free; 747 } 748 749 750 for (i = 0; i < MAX_PDEV_CNT; i++) { 751 pdev = soc->pdev_list[i]; 752 if (!pdev) { 753 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 754 "PDEV not found"); 755 continue; 756 } 757 758 if (pdev->filter_neighbour_peers) { 759 /* Next Hop scenario not yet handle */ 760 vdev = dp_rx_nac_filter(pdev, rx_pkt_hdr); 761 if (vdev) { 762 dp_rx_mon_deliver(soc, i, 763 pdev->invalid_peer_head_msdu, 764 pdev->invalid_peer_tail_msdu); 765 766 pdev->invalid_peer_head_msdu = NULL; 767 pdev->invalid_peer_tail_msdu = NULL; 768 769 return 0; 770 } 771 } 772 773 774 TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) { 775 776 if (qdf_mem_cmp(wh->i_addr1, vdev->mac_addr.raw, 777 DP_MAC_ADDR_LEN) == 0) { 778 goto out; 779 } 780 } 781 } 782 783 if (!vdev) { 784 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 785 "VDEV not found"); 786 goto free; 787 } 788 789 out: 790 msg.wh = wh; 791 qdf_nbuf_pull_head(mpdu, RX_PKT_TLVS_LEN); 792 msg.nbuf = mpdu; 793 msg.vdev_id = vdev->vdev_id; 794 if (pdev->soc->cdp_soc.ol_ops->rx_invalid_peer) 795 pdev->soc->cdp_soc.ol_ops->rx_invalid_peer(pdev->ctrl_pdev, 796 &msg); 797 798 free: 799 /* Drop and free packet */ 800 curr_nbuf = mpdu; 801 while (curr_nbuf) { 802 next_nbuf = qdf_nbuf_next(curr_nbuf); 803 qdf_nbuf_free(curr_nbuf); 804 curr_nbuf = next_nbuf; 805 } 806 807 return 0; 808 } 809 810 /** 811 * dp_rx_process_invalid_peer_wrapper(): Function to wrap invalid peer handler 812 * @soc: DP SOC handle 813 * @mpdu: mpdu for which peer is invalid 814 * @mpdu_done: if an mpdu is completed 815 * 816 * return: integer type 817 */ 818 void dp_rx_process_invalid_peer_wrapper(struct dp_soc *soc, 819 qdf_nbuf_t mpdu, bool mpdu_done) 820 { 821 /* Only trigger the process when mpdu is completed */ 822 if (mpdu_done) 823 dp_rx_process_invalid_peer(soc, mpdu); 824 } 825 #else 826 uint8_t dp_rx_process_invalid_peer(struct dp_soc *soc, qdf_nbuf_t mpdu) 827 { 828 qdf_nbuf_t curr_nbuf, next_nbuf; 829 struct dp_pdev *pdev; 830 uint8_t i; 831 struct dp_vdev *vdev = NULL; 832 struct ieee80211_frame *wh; 833 uint8_t *rx_tlv_hdr = qdf_nbuf_data(mpdu); 834 uint8_t *rx_pkt_hdr = hal_rx_pkt_hdr_get(rx_tlv_hdr); 835 836 wh = (struct ieee80211_frame *)rx_pkt_hdr; 837 838 if (!DP_FRAME_IS_DATA(wh)) { 839 QDF_TRACE_ERROR_RL(QDF_MODULE_ID_DP, 840 "only for data frames"); 841 goto free; 842 } 843 844 if (qdf_nbuf_len(mpdu) < sizeof(struct ieee80211_frame)) { 845 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 846 "Invalid nbuf length"); 847 goto free; 848 } 849 850 for (i = 0; i < MAX_PDEV_CNT; i++) { 851 pdev = soc->pdev_list[i]; 852 if (!pdev) { 853 QDF_TRACE(QDF_MODULE_ID_DP, 854 QDF_TRACE_LEVEL_ERROR, 855 "PDEV not found"); 856 continue; 857 } 858 859 qdf_spin_lock_bh(&pdev->vdev_list_lock); 860 DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) { 861 if (qdf_mem_cmp(wh->i_addr1, vdev->mac_addr.raw, 862 DP_MAC_ADDR_LEN) == 0) { 863 qdf_spin_unlock_bh(&pdev->vdev_list_lock); 864 goto out; 865 } 866 } 867 qdf_spin_unlock_bh(&pdev->vdev_list_lock); 868 } 869 870 if (NULL == vdev) { 871 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 872 "VDEV not found"); 873 goto free; 874 } 875 876 out: 877 if (soc->cdp_soc.ol_ops->rx_invalid_peer) 878 soc->cdp_soc.ol_ops->rx_invalid_peer(vdev->vdev_id, wh); 879 free: 880 /* reset the head and tail pointers */ 881 for (i = 0; i < MAX_PDEV_CNT; i++) { 882 pdev = soc->pdev_list[i]; 883 if (!pdev) { 884 QDF_TRACE(QDF_MODULE_ID_DP, 885 QDF_TRACE_LEVEL_ERROR, 886 "PDEV not found"); 887 continue; 888 } 889 890 pdev->invalid_peer_head_msdu = NULL; 891 pdev->invalid_peer_tail_msdu = NULL; 892 } 893 894 /* Drop and free packet */ 895 curr_nbuf = mpdu; 896 while (curr_nbuf) { 897 next_nbuf = qdf_nbuf_next(curr_nbuf); 898 qdf_nbuf_free(curr_nbuf); 899 curr_nbuf = next_nbuf; 900 } 901 902 return 0; 903 } 904 905 void dp_rx_process_invalid_peer_wrapper(struct dp_soc *soc, 906 qdf_nbuf_t mpdu, bool mpdu_done) 907 { 908 /* Process the nbuf */ 909 dp_rx_process_invalid_peer(soc, mpdu); 910 } 911 #endif 912 913 #ifdef RECEIVE_OFFLOAD 914 /** 915 * dp_rx_print_offload_info() - Print offload info from RX TLV 916 * @rx_tlv: RX TLV for which offload information is to be printed 917 * 918 * Return: None 919 */ 920 static void dp_rx_print_offload_info(uint8_t *rx_tlv) 921 { 922 dp_debug("----------------------RX DESC LRO/GRO----------------------"); 923 dp_debug("lro_eligible 0x%x", HAL_RX_TLV_GET_LRO_ELIGIBLE(rx_tlv)); 924 dp_debug("pure_ack 0x%x", HAL_RX_TLV_GET_TCP_PURE_ACK(rx_tlv)); 925 dp_debug("chksum 0x%x", HAL_RX_TLV_GET_TCP_CHKSUM(rx_tlv)); 926 dp_debug("TCP seq num 0x%x", HAL_RX_TLV_GET_TCP_SEQ(rx_tlv)); 927 dp_debug("TCP ack num 0x%x", HAL_RX_TLV_GET_TCP_ACK(rx_tlv)); 928 dp_debug("TCP window 0x%x", HAL_RX_TLV_GET_TCP_WIN(rx_tlv)); 929 dp_debug("TCP protocol 0x%x", HAL_RX_TLV_GET_TCP_PROTO(rx_tlv)); 930 dp_debug("TCP offset 0x%x", HAL_RX_TLV_GET_TCP_OFFSET(rx_tlv)); 931 dp_debug("toeplitz 0x%x", HAL_RX_TLV_GET_FLOW_ID_TOEPLITZ(rx_tlv)); 932 dp_debug("---------------------------------------------------------"); 933 } 934 935 /** 936 * dp_rx_fill_gro_info() - Fill GRO info from RX TLV into skb->cb 937 * @soc: DP SOC handle 938 * @rx_tlv: RX TLV received for the msdu 939 * @msdu: msdu for which GRO info needs to be filled 940 * 941 * Return: None 942 */ 943 static 944 void dp_rx_fill_gro_info(struct dp_soc *soc, uint8_t *rx_tlv, 945 qdf_nbuf_t msdu) 946 { 947 if (!wlan_cfg_is_gro_enabled(soc->wlan_cfg_ctx)) 948 return; 949 950 /* Filling up RX offload info only for TCP packets */ 951 if (!HAL_RX_TLV_GET_TCP_PROTO(rx_tlv)) 952 return; 953 954 QDF_NBUF_CB_RX_LRO_ELIGIBLE(msdu) = 955 HAL_RX_TLV_GET_LRO_ELIGIBLE(rx_tlv); 956 QDF_NBUF_CB_RX_TCP_PURE_ACK(msdu) = 957 HAL_RX_TLV_GET_TCP_PURE_ACK(rx_tlv); 958 QDF_NBUF_CB_RX_TCP_CHKSUM(msdu) = 959 HAL_RX_TLV_GET_TCP_CHKSUM(rx_tlv); 960 QDF_NBUF_CB_RX_TCP_SEQ_NUM(msdu) = 961 HAL_RX_TLV_GET_TCP_SEQ(rx_tlv); 962 QDF_NBUF_CB_RX_TCP_ACK_NUM(msdu) = 963 HAL_RX_TLV_GET_TCP_ACK(rx_tlv); 964 QDF_NBUF_CB_RX_TCP_WIN(msdu) = 965 HAL_RX_TLV_GET_TCP_WIN(rx_tlv); 966 QDF_NBUF_CB_RX_TCP_PROTO(msdu) = 967 HAL_RX_TLV_GET_TCP_PROTO(rx_tlv); 968 QDF_NBUF_CB_RX_IPV6_PROTO(msdu) = 969 HAL_RX_TLV_GET_IPV6(rx_tlv); 970 QDF_NBUF_CB_RX_TCP_OFFSET(msdu) = 971 HAL_RX_TLV_GET_TCP_OFFSET(rx_tlv); 972 QDF_NBUF_CB_RX_FLOW_ID(msdu) = 973 HAL_RX_TLV_GET_FLOW_ID_TOEPLITZ(rx_tlv); 974 975 dp_rx_print_offload_info(rx_tlv); 976 } 977 #else 978 static void dp_rx_fill_gro_info(struct dp_soc *soc, uint8_t *rx_tlv, 979 qdf_nbuf_t msdu) 980 { 981 } 982 #endif /* RECEIVE_OFFLOAD */ 983 984 /** 985 * dp_rx_adjust_nbuf_len() - set appropriate msdu length in nbuf. 986 * 987 * @nbuf: pointer to msdu. 988 * @mpdu_len: mpdu length 989 * 990 * Return: returns true if nbuf is last msdu of mpdu else retuns false. 991 */ 992 static inline bool dp_rx_adjust_nbuf_len(qdf_nbuf_t nbuf, uint16_t *mpdu_len) 993 { 994 bool last_nbuf; 995 996 if (*mpdu_len > (RX_BUFFER_SIZE - RX_PKT_TLVS_LEN)) { 997 qdf_nbuf_set_pktlen(nbuf, RX_BUFFER_SIZE); 998 last_nbuf = false; 999 } else { 1000 qdf_nbuf_set_pktlen(nbuf, (*mpdu_len + RX_PKT_TLVS_LEN)); 1001 last_nbuf = true; 1002 } 1003 1004 *mpdu_len -= (RX_BUFFER_SIZE - RX_PKT_TLVS_LEN); 1005 1006 return last_nbuf; 1007 } 1008 1009 /** 1010 * dp_rx_sg_create() - create a frag_list for MSDUs which are spread across 1011 * multiple nbufs. 1012 * @nbuf: pointer to the first msdu of an amsdu. 1013 * @rx_tlv_hdr: pointer to the start of RX TLV headers. 1014 * 1015 * 1016 * This function implements the creation of RX frag_list for cases 1017 * where an MSDU is spread across multiple nbufs. 1018 * 1019 * Return: returns the head nbuf which contains complete frag_list. 1020 */ 1021 qdf_nbuf_t dp_rx_sg_create(qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr) 1022 { 1023 qdf_nbuf_t parent, next, frag_list; 1024 uint16_t frag_list_len = 0; 1025 uint16_t mpdu_len; 1026 bool last_nbuf; 1027 1028 mpdu_len = hal_rx_msdu_start_msdu_len_get(rx_tlv_hdr); 1029 /* 1030 * this is a case where the complete msdu fits in one single nbuf. 1031 * in this case HW sets both start and end bit and we only need to 1032 * reset these bits for RAW mode simulator to decap the pkt 1033 */ 1034 if (qdf_nbuf_is_rx_chfrag_start(nbuf) && 1035 qdf_nbuf_is_rx_chfrag_end(nbuf)) { 1036 qdf_nbuf_set_pktlen(nbuf, mpdu_len + RX_PKT_TLVS_LEN); 1037 qdf_nbuf_pull_head(nbuf, RX_PKT_TLVS_LEN); 1038 return nbuf; 1039 } 1040 1041 /* 1042 * This is a case where we have multiple msdus (A-MSDU) spread across 1043 * multiple nbufs. here we create a fraglist out of these nbufs. 1044 * 1045 * the moment we encounter a nbuf with continuation bit set we 1046 * know for sure we have an MSDU which is spread across multiple 1047 * nbufs. We loop through and reap nbufs till we reach last nbuf. 1048 */ 1049 parent = nbuf; 1050 frag_list = nbuf->next; 1051 nbuf = nbuf->next; 1052 1053 /* 1054 * set the start bit in the first nbuf we encounter with continuation 1055 * bit set. This has the proper mpdu length set as it is the first 1056 * msdu of the mpdu. this becomes the parent nbuf and the subsequent 1057 * nbufs will form the frag_list of the parent nbuf. 1058 */ 1059 qdf_nbuf_set_rx_chfrag_start(parent, 1); 1060 last_nbuf = dp_rx_adjust_nbuf_len(parent, &mpdu_len); 1061 1062 /* 1063 * this is where we set the length of the fragments which are 1064 * associated to the parent nbuf. We iterate through the frag_list 1065 * till we hit the last_nbuf of the list. 1066 */ 1067 do { 1068 last_nbuf = dp_rx_adjust_nbuf_len(nbuf, &mpdu_len); 1069 qdf_nbuf_pull_head(nbuf, RX_PKT_TLVS_LEN); 1070 frag_list_len += qdf_nbuf_len(nbuf); 1071 1072 if (last_nbuf) { 1073 next = nbuf->next; 1074 nbuf->next = NULL; 1075 break; 1076 } 1077 1078 nbuf = nbuf->next; 1079 } while (!last_nbuf); 1080 1081 qdf_nbuf_set_rx_chfrag_start(nbuf, 0); 1082 qdf_nbuf_append_ext_list(parent, frag_list, frag_list_len); 1083 parent->next = next; 1084 1085 qdf_nbuf_pull_head(parent, RX_PKT_TLVS_LEN); 1086 return parent; 1087 } 1088 1089 static inline void dp_rx_deliver_to_stack(struct dp_vdev *vdev, 1090 struct dp_peer *peer, 1091 qdf_nbuf_t nbuf_head, 1092 qdf_nbuf_t nbuf_tail) 1093 { 1094 /* 1095 * highly unlikely to have a vdev without a registered rx 1096 * callback function. if so let us free the nbuf_list. 1097 */ 1098 if (qdf_unlikely(!vdev->osif_rx)) { 1099 qdf_nbuf_t nbuf; 1100 do { 1101 nbuf = nbuf_head; 1102 nbuf_head = nbuf_head->next; 1103 qdf_nbuf_free(nbuf); 1104 } while (nbuf_head); 1105 1106 return; 1107 } 1108 1109 if (qdf_unlikely(vdev->rx_decap_type == htt_cmn_pkt_type_raw) || 1110 (vdev->rx_decap_type == htt_cmn_pkt_type_native_wifi)) { 1111 vdev->osif_rsim_rx_decap(vdev->osif_vdev, &nbuf_head, 1112 &nbuf_tail, (struct cdp_peer *) peer); 1113 } 1114 1115 vdev->osif_rx(vdev->osif_vdev, nbuf_head); 1116 1117 } 1118 1119 /** 1120 * dp_rx_cksum_offload() - set the nbuf checksum as defined by hardware. 1121 * @nbuf: pointer to the first msdu of an amsdu. 1122 * @rx_tlv_hdr: pointer to the start of RX TLV headers. 1123 * 1124 * The ipsumed field of the skb is set based on whether HW validated the 1125 * IP/TCP/UDP checksum. 1126 * 1127 * Return: void 1128 */ 1129 static inline void dp_rx_cksum_offload(struct dp_pdev *pdev, 1130 qdf_nbuf_t nbuf, 1131 uint8_t *rx_tlv_hdr) 1132 { 1133 qdf_nbuf_rx_cksum_t cksum = {0}; 1134 bool ip_csum_err = hal_rx_attn_ip_cksum_fail_get(rx_tlv_hdr); 1135 bool tcp_udp_csum_er = hal_rx_attn_tcp_udp_cksum_fail_get(rx_tlv_hdr); 1136 1137 if (qdf_likely(!ip_csum_err && !tcp_udp_csum_er)) { 1138 cksum.l4_result = QDF_NBUF_RX_CKSUM_TCP_UDP_UNNECESSARY; 1139 qdf_nbuf_set_rx_cksum(nbuf, &cksum); 1140 } else { 1141 DP_STATS_INCC(pdev, err.ip_csum_err, 1, ip_csum_err); 1142 DP_STATS_INCC(pdev, err.tcp_udp_csum_err, 1, tcp_udp_csum_er); 1143 } 1144 } 1145 1146 /** 1147 * dp_rx_msdu_stats_update() - update per msdu stats. 1148 * @soc: core txrx main context 1149 * @nbuf: pointer to the first msdu of an amsdu. 1150 * @rx_tlv_hdr: pointer to the start of RX TLV headers. 1151 * @peer: pointer to the peer object. 1152 * @ring_id: reo dest ring number on which pkt is reaped. 1153 * 1154 * update all the per msdu stats for that nbuf. 1155 * Return: void 1156 */ 1157 static void dp_rx_msdu_stats_update(struct dp_soc *soc, 1158 qdf_nbuf_t nbuf, 1159 uint8_t *rx_tlv_hdr, 1160 struct dp_peer *peer, 1161 uint8_t ring_id) 1162 { 1163 bool is_ampdu, is_not_amsdu; 1164 uint16_t peer_id; 1165 uint32_t sgi, mcs, tid, nss, bw, reception_type, pkt_type; 1166 struct dp_vdev *vdev = peer->vdev; 1167 struct ether_header *eh; 1168 uint16_t msdu_len = qdf_nbuf_len(nbuf); 1169 1170 peer_id = DP_PEER_METADATA_PEER_ID_GET( 1171 hal_rx_mpdu_peer_meta_data_get(rx_tlv_hdr)); 1172 1173 is_not_amsdu = qdf_nbuf_is_rx_chfrag_start(nbuf) & 1174 qdf_nbuf_is_rx_chfrag_end(nbuf); 1175 1176 DP_STATS_INC_PKT(peer, rx.rcvd_reo[ring_id], 1, msdu_len); 1177 DP_STATS_INCC(peer, rx.non_amsdu_cnt, 1, is_not_amsdu); 1178 DP_STATS_INCC(peer, rx.amsdu_cnt, 1, !is_not_amsdu); 1179 1180 if (qdf_unlikely(hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr) && 1181 (vdev->rx_decap_type == htt_cmn_pkt_type_ethernet))) { 1182 eh = (struct ether_header *)qdf_nbuf_data(nbuf); 1183 DP_STATS_INC_PKT(peer, rx.multicast, 1, msdu_len); 1184 if (IEEE80211_IS_BROADCAST(eh->ether_dhost)) { 1185 DP_STATS_INC_PKT(peer, rx.bcast, 1, msdu_len); 1186 1187 } 1188 } 1189 1190 /* 1191 * currently we can return from here as we have similar stats 1192 * updated at per ppdu level instead of msdu level 1193 */ 1194 if (!soc->process_rx_status) 1195 return; 1196 1197 is_ampdu = hal_rx_mpdu_info_ampdu_flag_get(rx_tlv_hdr); 1198 DP_STATS_INCC(peer, rx.ampdu_cnt, 1, is_ampdu); 1199 DP_STATS_INCC(peer, rx.non_ampdu_cnt, 1, !(is_ampdu)); 1200 1201 sgi = hal_rx_msdu_start_sgi_get(rx_tlv_hdr); 1202 mcs = hal_rx_msdu_start_rate_mcs_get(rx_tlv_hdr); 1203 tid = hal_rx_mpdu_start_tid_get(soc->hal_soc, rx_tlv_hdr); 1204 bw = hal_rx_msdu_start_bw_get(rx_tlv_hdr); 1205 reception_type = hal_rx_msdu_start_reception_type_get(soc->hal_soc, 1206 rx_tlv_hdr); 1207 nss = hal_rx_msdu_start_nss_get(soc->hal_soc, rx_tlv_hdr); 1208 pkt_type = hal_rx_msdu_start_get_pkt_type(rx_tlv_hdr); 1209 1210 /* Save tid to skb->priority */ 1211 DP_RX_TID_SAVE(nbuf, tid); 1212 1213 DP_STATS_INC(peer, rx.bw[bw], 1); 1214 DP_STATS_INC(peer, rx.nss[nss], 1); 1215 DP_STATS_INC(peer, rx.sgi_count[sgi], 1); 1216 DP_STATS_INCC(peer, rx.err.mic_err, 1, 1217 hal_rx_mpdu_end_mic_err_get(rx_tlv_hdr)); 1218 DP_STATS_INCC(peer, rx.err.decrypt_err, 1, 1219 hal_rx_mpdu_end_decrypt_err_get(rx_tlv_hdr)); 1220 1221 DP_STATS_INC(peer, rx.wme_ac_type[TID_TO_WME_AC(tid)], 1); 1222 DP_STATS_INC(peer, rx.reception_type[reception_type], 1); 1223 1224 DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1, 1225 ((mcs >= MAX_MCS_11A) && (pkt_type == DOT11_A))); 1226 DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[mcs], 1, 1227 ((mcs <= MAX_MCS_11A) && (pkt_type == DOT11_A))); 1228 DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1, 1229 ((mcs >= MAX_MCS_11B) && (pkt_type == DOT11_B))); 1230 DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[mcs], 1, 1231 ((mcs <= MAX_MCS_11B) && (pkt_type == DOT11_B))); 1232 DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1, 1233 ((mcs >= MAX_MCS_11A) && (pkt_type == DOT11_N))); 1234 DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[mcs], 1, 1235 ((mcs <= MAX_MCS_11A) && (pkt_type == DOT11_N))); 1236 DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1, 1237 ((mcs >= MAX_MCS_11AC) && (pkt_type == DOT11_AC))); 1238 DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[mcs], 1, 1239 ((mcs <= MAX_MCS_11AC) && (pkt_type == DOT11_AC))); 1240 DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1, 1241 ((mcs >= MAX_MCS) && (pkt_type == DOT11_AX))); 1242 DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[mcs], 1, 1243 ((mcs < MAX_MCS) && (pkt_type == DOT11_AX))); 1244 1245 if ((soc->process_rx_status) && 1246 hal_rx_attn_first_mpdu_get(rx_tlv_hdr)) { 1247 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE 1248 if (!vdev->pdev) 1249 return; 1250 1251 dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, vdev->pdev->soc, 1252 &peer->stats, peer_id, 1253 UPDATE_PEER_STATS, 1254 vdev->pdev->pdev_id); 1255 #endif 1256 1257 } 1258 } 1259 1260 #ifdef WDS_VENDOR_EXTENSION 1261 int dp_wds_rx_policy_check( 1262 uint8_t *rx_tlv_hdr, 1263 struct dp_vdev *vdev, 1264 struct dp_peer *peer, 1265 int rx_mcast 1266 ) 1267 { 1268 struct dp_peer *bss_peer; 1269 int fr_ds, to_ds, rx_3addr, rx_4addr; 1270 int rx_policy_ucast, rx_policy_mcast; 1271 1272 if (vdev->opmode == wlan_op_mode_ap) { 1273 TAILQ_FOREACH(bss_peer, &vdev->peer_list, peer_list_elem) { 1274 if (bss_peer->bss_peer) { 1275 /* if wds policy check is not enabled on this vdev, accept all frames */ 1276 if (!bss_peer->wds_ecm.wds_rx_filter) { 1277 return 1; 1278 } 1279 break; 1280 } 1281 } 1282 rx_policy_ucast = bss_peer->wds_ecm.wds_rx_ucast_4addr; 1283 rx_policy_mcast = bss_peer->wds_ecm.wds_rx_mcast_4addr; 1284 } else { /* sta mode */ 1285 if (!peer->wds_ecm.wds_rx_filter) { 1286 return 1; 1287 } 1288 rx_policy_ucast = peer->wds_ecm.wds_rx_ucast_4addr; 1289 rx_policy_mcast = peer->wds_ecm.wds_rx_mcast_4addr; 1290 } 1291 1292 /* ------------------------------------------------ 1293 * self 1294 * peer- rx rx- 1295 * wds ucast mcast dir policy accept note 1296 * ------------------------------------------------ 1297 * 1 1 0 11 x1 1 AP configured to accept ds-to-ds Rx ucast from wds peers, constraint met; so, accept 1298 * 1 1 0 01 x1 0 AP configured to accept ds-to-ds Rx ucast from wds peers, constraint not met; so, drop 1299 * 1 1 0 10 x1 0 AP configured to accept ds-to-ds Rx ucast from wds peers, constraint not met; so, drop 1300 * 1 1 0 00 x1 0 bad frame, won't see it 1301 * 1 0 1 11 1x 1 AP configured to accept ds-to-ds Rx mcast from wds peers, constraint met; so, accept 1302 * 1 0 1 01 1x 0 AP configured to accept ds-to-ds Rx mcast from wds peers, constraint not met; so, drop 1303 * 1 0 1 10 1x 0 AP configured to accept ds-to-ds Rx mcast from wds peers, constraint not met; so, drop 1304 * 1 0 1 00 1x 0 bad frame, won't see it 1305 * 1 1 0 11 x0 0 AP configured to accept from-ds Rx ucast from wds peers, constraint not met; so, drop 1306 * 1 1 0 01 x0 0 AP configured to accept from-ds Rx ucast from wds peers, constraint not met; so, drop 1307 * 1 1 0 10 x0 1 AP configured to accept from-ds Rx ucast from wds peers, constraint met; so, accept 1308 * 1 1 0 00 x0 0 bad frame, won't see it 1309 * 1 0 1 11 0x 0 AP configured to accept from-ds Rx mcast from wds peers, constraint not met; so, drop 1310 * 1 0 1 01 0x 0 AP configured to accept from-ds Rx mcast from wds peers, constraint not met; so, drop 1311 * 1 0 1 10 0x 1 AP configured to accept from-ds Rx mcast from wds peers, constraint met; so, accept 1312 * 1 0 1 00 0x 0 bad frame, won't see it 1313 * 1314 * 0 x x 11 xx 0 we only accept td-ds Rx frames from non-wds peers in mode. 1315 * 0 x x 01 xx 1 1316 * 0 x x 10 xx 0 1317 * 0 x x 00 xx 0 bad frame, won't see it 1318 * ------------------------------------------------ 1319 */ 1320 1321 fr_ds = hal_rx_mpdu_get_fr_ds(rx_tlv_hdr); 1322 to_ds = hal_rx_mpdu_get_to_ds(rx_tlv_hdr); 1323 rx_3addr = fr_ds ^ to_ds; 1324 rx_4addr = fr_ds & to_ds; 1325 1326 if (vdev->opmode == wlan_op_mode_ap) { 1327 if ((!peer->wds_enabled && rx_3addr && to_ds) || 1328 (peer->wds_enabled && !rx_mcast && (rx_4addr == rx_policy_ucast)) || 1329 (peer->wds_enabled && rx_mcast && (rx_4addr == rx_policy_mcast))) { 1330 return 1; 1331 } 1332 } else { /* sta mode */ 1333 if ((!rx_mcast && (rx_4addr == rx_policy_ucast)) || 1334 (rx_mcast && (rx_4addr == rx_policy_mcast))) { 1335 return 1; 1336 } 1337 } 1338 return 0; 1339 } 1340 #else 1341 int dp_wds_rx_policy_check( 1342 uint8_t *rx_tlv_hdr, 1343 struct dp_vdev *vdev, 1344 struct dp_peer *peer, 1345 int rx_mcast 1346 ) 1347 { 1348 return 1; 1349 } 1350 #endif 1351 1352 /** 1353 * dp_rx_process() - Brain of the Rx processing functionality 1354 * Called from the bottom half (tasklet/NET_RX_SOFTIRQ) 1355 * @soc: core txrx main context 1356 * @hal_ring: opaque pointer to the HAL Rx Ring, which will be serviced 1357 * @reo_ring_num: ring number (0, 1, 2 or 3) of the reo ring. 1358 * @quota: No. of units (packets) that can be serviced in one shot. 1359 * 1360 * This function implements the core of Rx functionality. This is 1361 * expected to handle only non-error frames. 1362 * 1363 * Return: uint32_t: No. of elements processed 1364 */ 1365 uint32_t dp_rx_process(struct dp_intr *int_ctx, void *hal_ring, 1366 uint8_t reo_ring_num, uint32_t quota) 1367 { 1368 void *hal_soc; 1369 void *ring_desc; 1370 struct dp_rx_desc *rx_desc = NULL; 1371 qdf_nbuf_t nbuf, next; 1372 union dp_rx_desc_list_elem_t *head[MAX_PDEV_CNT] = { NULL }; 1373 union dp_rx_desc_list_elem_t *tail[MAX_PDEV_CNT] = { NULL }; 1374 uint32_t rx_bufs_used = 0, rx_buf_cookie; 1375 uint32_t l2_hdr_offset = 0; 1376 uint16_t msdu_len = 0; 1377 uint16_t peer_id; 1378 struct dp_peer *peer = NULL; 1379 struct dp_vdev *vdev = NULL; 1380 uint32_t pkt_len = 0; 1381 struct hal_rx_mpdu_desc_info mpdu_desc_info = { 0 }; 1382 struct hal_rx_msdu_desc_info msdu_desc_info = { 0 }; 1383 enum hal_reo_error_status error; 1384 uint32_t peer_mdata; 1385 uint8_t *rx_tlv_hdr; 1386 uint32_t rx_bufs_reaped[MAX_PDEV_CNT] = { 0 }; 1387 uint8_t mac_id = 0; 1388 struct dp_pdev *pdev; 1389 struct dp_srng *dp_rxdma_srng; 1390 struct rx_desc_pool *rx_desc_pool; 1391 struct dp_soc *soc = int_ctx->soc; 1392 uint8_t ring_id = 0; 1393 uint8_t core_id = 0; 1394 qdf_nbuf_t nbuf_head = NULL; 1395 qdf_nbuf_t nbuf_tail = NULL; 1396 qdf_nbuf_t deliver_list_head = NULL; 1397 qdf_nbuf_t deliver_list_tail = NULL; 1398 1399 DP_HIST_INIT(); 1400 /* Debug -- Remove later */ 1401 qdf_assert(soc && hal_ring); 1402 1403 hal_soc = soc->hal_soc; 1404 1405 /* Debug -- Remove later */ 1406 qdf_assert(hal_soc); 1407 1408 hif_pm_runtime_mark_last_busy(soc->osdev->dev); 1409 1410 if (qdf_unlikely(hal_srng_access_start(hal_soc, hal_ring))) { 1411 1412 /* 1413 * Need API to convert from hal_ring pointer to 1414 * Ring Type / Ring Id combo 1415 */ 1416 DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1); 1417 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1418 FL("HAL RING Access Failed -- %pK"), hal_ring); 1419 hal_srng_access_end(hal_soc, hal_ring); 1420 goto done; 1421 } 1422 1423 /* 1424 * start reaping the buffers from reo ring and queue 1425 * them in per vdev queue. 1426 * Process the received pkts in a different per vdev loop. 1427 */ 1428 while (qdf_likely(quota)) { 1429 ring_desc = hal_srng_dst_get_next(hal_soc, hal_ring); 1430 1431 /* 1432 * in case HW has updated hp after we cached the hp 1433 * ring_desc can be NULL even there are entries 1434 * available in the ring. Update the cached_hp 1435 * and reap the buffers available to read complete 1436 * mpdu in one reap 1437 * 1438 * This is needed for RAW mode we have to read all 1439 * msdus corresponding to amsdu in one reap to create 1440 * SG list properly but due to mismatch in cached_hp 1441 * and actual hp sometimes we are unable to read 1442 * complete mpdu in one reap. 1443 */ 1444 if (qdf_unlikely(!ring_desc)) { 1445 hal_srng_access_start_unlocked(hal_soc, hal_ring); 1446 ring_desc = hal_srng_dst_get_next(hal_soc, hal_ring); 1447 if (!ring_desc) 1448 break; 1449 DP_STATS_INC(soc, rx.hp_oos, 1); 1450 /* 1451 * update TP here in case loop takes long, 1452 * then the ring is easily full. 1453 */ 1454 hal_srng_access_end_unlocked(hal_soc, hal_ring); 1455 } 1456 1457 error = HAL_RX_ERROR_STATUS_GET(ring_desc); 1458 ring_id = hal_srng_ring_id_get(hal_ring); 1459 1460 if (qdf_unlikely(error == HAL_REO_ERROR_DETECTED)) { 1461 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1462 FL("HAL RING 0x%pK:error %d"), hal_ring, error); 1463 DP_STATS_INC(soc, rx.err.hal_reo_error[ring_id], 1); 1464 /* Don't know how to deal with this -- assert */ 1465 qdf_assert(0); 1466 } 1467 1468 rx_buf_cookie = HAL_RX_REO_BUF_COOKIE_GET(ring_desc); 1469 1470 rx_desc = dp_rx_cookie_2_va_rxdma_buf(soc, rx_buf_cookie); 1471 qdf_assert(rx_desc); 1472 1473 /* 1474 * this is a unlikely scenario where the host is reaping 1475 * a descriptor which it already reaped just a while ago 1476 * but is yet to replenish it back to HW. 1477 * In this case host will dump the last 128 descriptors 1478 * including the software descriptor rx_desc and assert. 1479 */ 1480 if (qdf_unlikely(!rx_desc->in_use)) { 1481 DP_STATS_INC(soc, rx.err.hal_reo_dest_dup, 1); 1482 dp_rx_dump_info_and_assert(soc, hal_ring, 1483 ring_desc, rx_desc); 1484 } 1485 1486 rx_bufs_reaped[rx_desc->pool_id]++; 1487 1488 /* TODO */ 1489 /* 1490 * Need a separate API for unmapping based on 1491 * phyiscal address 1492 */ 1493 qdf_nbuf_unmap_single(soc->osdev, rx_desc->nbuf, 1494 QDF_DMA_BIDIRECTIONAL); 1495 1496 core_id = smp_processor_id(); 1497 DP_STATS_INC(soc, rx.ring_packets[core_id][ring_id], 1); 1498 1499 /* Get MPDU DESC info */ 1500 hal_rx_mpdu_desc_info_get(ring_desc, &mpdu_desc_info); 1501 1502 hal_rx_mpdu_peer_meta_data_set(qdf_nbuf_data(rx_desc->nbuf), 1503 mpdu_desc_info.peer_meta_data); 1504 1505 /* Get MSDU DESC info */ 1506 hal_rx_msdu_desc_info_get(ring_desc, &msdu_desc_info); 1507 1508 /* 1509 * save msdu flags first, last and continuation msdu in 1510 * nbuf->cb 1511 */ 1512 if (msdu_desc_info.msdu_flags & HAL_MSDU_F_FIRST_MSDU_IN_MPDU) 1513 qdf_nbuf_set_rx_chfrag_start(rx_desc->nbuf, 1); 1514 1515 if (msdu_desc_info.msdu_flags & HAL_MSDU_F_MSDU_CONTINUATION) 1516 qdf_nbuf_set_rx_chfrag_cont(rx_desc->nbuf, 1); 1517 1518 if (msdu_desc_info.msdu_flags & HAL_MSDU_F_LAST_MSDU_IN_MPDU) 1519 qdf_nbuf_set_rx_chfrag_end(rx_desc->nbuf, 1); 1520 1521 QDF_NBUF_CB_RX_CTX_ID(rx_desc->nbuf) = reo_ring_num; 1522 DP_RX_LIST_APPEND(nbuf_head, nbuf_tail, rx_desc->nbuf); 1523 1524 /* 1525 * if continuation bit is set then we have MSDU spread 1526 * across multiple buffers, let us not decrement quota 1527 * till we reap all buffers of that MSDU. 1528 */ 1529 if (qdf_likely(!qdf_nbuf_is_rx_chfrag_cont(rx_desc->nbuf))) 1530 quota -= 1; 1531 1532 1533 dp_rx_add_to_free_desc_list(&head[rx_desc->pool_id], 1534 &tail[rx_desc->pool_id], 1535 rx_desc); 1536 } 1537 done: 1538 hal_srng_access_end(hal_soc, hal_ring); 1539 1540 if (nbuf_tail) 1541 QDF_NBUF_CB_RX_FLUSH_IND(nbuf_tail) = 1; 1542 1543 /* Update histogram statistics by looping through pdev's */ 1544 DP_RX_HIST_STATS_PER_PDEV(); 1545 1546 for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) { 1547 /* 1548 * continue with next mac_id if no pkts were reaped 1549 * from that pool 1550 */ 1551 if (!rx_bufs_reaped[mac_id]) 1552 continue; 1553 1554 pdev = soc->pdev_list[mac_id]; 1555 dp_rxdma_srng = &pdev->rx_refill_buf_ring; 1556 rx_desc_pool = &soc->rx_desc_buf[mac_id]; 1557 1558 dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng, 1559 rx_desc_pool, rx_bufs_reaped[mac_id], 1560 &head[mac_id], &tail[mac_id]); 1561 } 1562 1563 /* Peer can be NULL is case of LFR */ 1564 if (qdf_likely(peer != NULL)) 1565 vdev = NULL; 1566 1567 /* 1568 * BIG loop where each nbuf is dequeued from global queue, 1569 * processed and queued back on a per vdev basis. These nbufs 1570 * are sent to stack as and when we run out of nbufs 1571 * or a new nbuf dequeued from global queue has a different 1572 * vdev when compared to previous nbuf. 1573 */ 1574 nbuf = nbuf_head; 1575 while (nbuf) { 1576 next = nbuf->next; 1577 rx_tlv_hdr = qdf_nbuf_data(nbuf); 1578 1579 /* 1580 * Check if DMA completed -- msdu_done is the last bit 1581 * to be written 1582 */ 1583 if (qdf_unlikely(!hal_rx_attn_msdu_done_get(rx_tlv_hdr))) { 1584 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1585 FL("MSDU DONE failure")); 1586 hal_rx_dump_pkt_tlvs(hal_soc, rx_tlv_hdr, 1587 QDF_TRACE_LEVEL_INFO); 1588 qdf_assert(0); 1589 } 1590 1591 peer_mdata = hal_rx_mpdu_peer_meta_data_get(rx_tlv_hdr); 1592 peer_id = DP_PEER_METADATA_PEER_ID_GET(peer_mdata); 1593 peer = dp_peer_find_by_id(soc, peer_id); 1594 1595 if (peer) { 1596 QDF_NBUF_CB_DP_TRACE_PRINT(nbuf) = false; 1597 qdf_dp_trace_set_track(nbuf, QDF_RX); 1598 QDF_NBUF_CB_RX_DP_TRACE(nbuf) = 1; 1599 QDF_NBUF_CB_RX_PACKET_TRACK(nbuf) = 1600 QDF_NBUF_RX_PKT_DATA_TRACK; 1601 } 1602 1603 rx_bufs_used++; 1604 1605 if (deliver_list_head && peer && (vdev != peer->vdev)) { 1606 dp_rx_deliver_to_stack(vdev, peer, deliver_list_head, 1607 deliver_list_tail); 1608 deliver_list_head = NULL; 1609 deliver_list_tail = NULL; 1610 } 1611 1612 if (qdf_likely(peer != NULL)) { 1613 vdev = peer->vdev; 1614 } else { 1615 DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1, 1616 qdf_nbuf_len(nbuf)); 1617 qdf_nbuf_free(nbuf); 1618 nbuf = next; 1619 continue; 1620 } 1621 1622 if (qdf_unlikely(vdev == NULL)) { 1623 qdf_nbuf_free(nbuf); 1624 nbuf = next; 1625 DP_STATS_INC(soc, rx.err.invalid_vdev, 1); 1626 dp_peer_unref_del_find_by_id(peer); 1627 continue; 1628 } 1629 1630 DP_HIST_PACKET_COUNT_INC(vdev->pdev->pdev_id); 1631 /* 1632 * First IF condition: 1633 * 802.11 Fragmented pkts are reinjected to REO 1634 * HW block as SG pkts and for these pkts we only 1635 * need to pull the RX TLVS header length. 1636 * Second IF condition: 1637 * The below condition happens when an MSDU is spread 1638 * across multiple buffers. This can happen in two cases 1639 * 1. The nbuf size is smaller then the received msdu. 1640 * ex: we have set the nbuf size to 2048 during 1641 * nbuf_alloc. but we received an msdu which is 1642 * 2304 bytes in size then this msdu is spread 1643 * across 2 nbufs. 1644 * 1645 * 2. AMSDUs when RAW mode is enabled. 1646 * ex: 1st MSDU is in 1st nbuf and 2nd MSDU is spread 1647 * across 1st nbuf and 2nd nbuf and last MSDU is 1648 * spread across 2nd nbuf and 3rd nbuf. 1649 * 1650 * for these scenarios let us create a skb frag_list and 1651 * append these buffers till the last MSDU of the AMSDU 1652 * Third condition: 1653 * This is the most likely case, we receive 802.3 pkts 1654 * decapsulated by HW, here we need to set the pkt length. 1655 */ 1656 if (qdf_unlikely(qdf_nbuf_get_ext_list(nbuf))) 1657 qdf_nbuf_pull_head(nbuf, RX_PKT_TLVS_LEN); 1658 else if (qdf_unlikely(vdev->rx_decap_type == 1659 htt_cmn_pkt_type_raw)) { 1660 msdu_len = hal_rx_msdu_start_msdu_len_get(rx_tlv_hdr); 1661 nbuf = dp_rx_sg_create(nbuf, rx_tlv_hdr); 1662 1663 DP_STATS_INC(vdev->pdev, rx_raw_pkts, 1); 1664 DP_STATS_INC_PKT(peer, rx.raw, 1, 1665 msdu_len); 1666 1667 next = nbuf->next; 1668 } else { 1669 l2_hdr_offset = 1670 hal_rx_msdu_end_l3_hdr_padding_get(rx_tlv_hdr); 1671 1672 msdu_len = hal_rx_msdu_start_msdu_len_get(rx_tlv_hdr); 1673 pkt_len = msdu_len + l2_hdr_offset + RX_PKT_TLVS_LEN; 1674 1675 qdf_nbuf_set_pktlen(nbuf, pkt_len); 1676 qdf_nbuf_pull_head(nbuf, 1677 RX_PKT_TLVS_LEN + 1678 l2_hdr_offset); 1679 } 1680 1681 if (!dp_wds_rx_policy_check(rx_tlv_hdr, vdev, peer, 1682 hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr))) { 1683 QDF_TRACE(QDF_MODULE_ID_DP, 1684 QDF_TRACE_LEVEL_ERROR, 1685 FL("Policy Check Drop pkt")); 1686 /* Drop & free packet */ 1687 qdf_nbuf_free(nbuf); 1688 /* Statistics */ 1689 nbuf = next; 1690 dp_peer_unref_del_find_by_id(peer); 1691 continue; 1692 } 1693 1694 if (qdf_unlikely(peer && peer->bss_peer)) { 1695 QDF_TRACE(QDF_MODULE_ID_DP, 1696 QDF_TRACE_LEVEL_ERROR, 1697 FL("received pkt with same src MAC")); 1698 DP_STATS_INC_PKT(peer, rx.mec_drop, 1, msdu_len); 1699 1700 /* Drop & free packet */ 1701 qdf_nbuf_free(nbuf); 1702 /* Statistics */ 1703 nbuf = next; 1704 dp_peer_unref_del_find_by_id(peer); 1705 continue; 1706 } 1707 1708 if (qdf_unlikely(peer && (peer->nawds_enabled == true) && 1709 (hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr)) && 1710 (hal_rx_get_mpdu_mac_ad4_valid(rx_tlv_hdr) == false))) { 1711 DP_STATS_INC(peer, rx.nawds_mcast_drop, 1); 1712 qdf_nbuf_free(nbuf); 1713 nbuf = next; 1714 dp_peer_unref_del_find_by_id(peer); 1715 continue; 1716 } 1717 1718 dp_rx_cksum_offload(vdev->pdev, nbuf, rx_tlv_hdr); 1719 1720 dp_set_rx_queue(nbuf, ring_id); 1721 1722 /* 1723 * HW structures call this L3 header padding -- 1724 * even though this is actually the offset from 1725 * the buffer beginning where the L2 header 1726 * begins. 1727 */ 1728 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 1729 FL("rxhash: flow id toeplitz: 0x%x"), 1730 hal_rx_msdu_start_toeplitz_get(rx_tlv_hdr)); 1731 1732 dp_rx_msdu_stats_update(soc, nbuf, rx_tlv_hdr, peer, ring_id); 1733 1734 if (qdf_unlikely(vdev->mesh_vdev)) { 1735 if (dp_rx_filter_mesh_packets(vdev, nbuf, 1736 rx_tlv_hdr) 1737 == QDF_STATUS_SUCCESS) { 1738 QDF_TRACE(QDF_MODULE_ID_DP, 1739 QDF_TRACE_LEVEL_INFO_MED, 1740 FL("mesh pkt filtered")); 1741 DP_STATS_INC(vdev->pdev, dropped.mesh_filter, 1742 1); 1743 1744 qdf_nbuf_free(nbuf); 1745 nbuf = next; 1746 dp_peer_unref_del_find_by_id(peer); 1747 continue; 1748 } 1749 dp_rx_fill_mesh_stats(vdev, nbuf, rx_tlv_hdr, peer); 1750 } 1751 1752 #ifdef QCA_WIFI_NAPIER_EMULATION_DBG /* Debug code, remove later */ 1753 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1754 "p_id %d msdu_len %d hdr_off %d", 1755 peer_id, msdu_len, l2_hdr_offset); 1756 1757 print_hex_dump(KERN_ERR, 1758 "\t Pkt Data:", DUMP_PREFIX_NONE, 32, 4, 1759 qdf_nbuf_data(nbuf), 128, false); 1760 #endif /* NAPIER_EMULATION */ 1761 1762 if (qdf_likely(vdev->rx_decap_type == 1763 htt_cmn_pkt_type_ethernet) && 1764 qdf_likely(!vdev->mesh_vdev)) { 1765 /* WDS Destination Address Learning */ 1766 if (vdev->da_war_enabled) 1767 dp_rx_da_learn(soc, rx_tlv_hdr, peer, nbuf); 1768 1769 /* WDS Source Port Learning */ 1770 if (vdev->wds_enabled) 1771 dp_rx_wds_srcport_learn(soc, rx_tlv_hdr, 1772 peer, nbuf); 1773 1774 /* Intrabss-fwd */ 1775 if (dp_rx_check_ap_bridge(vdev)) 1776 if (dp_rx_intrabss_fwd(soc, 1777 peer, 1778 rx_tlv_hdr, 1779 nbuf)) { 1780 nbuf = next; 1781 dp_peer_unref_del_find_by_id(peer); 1782 continue; /* Get next desc */ 1783 } 1784 } 1785 1786 dp_rx_fill_gro_info(soc, rx_tlv_hdr, nbuf); 1787 qdf_nbuf_cb_update_peer_local_id(nbuf, peer->local_id); 1788 DP_RX_LIST_APPEND(deliver_list_head, 1789 deliver_list_tail, 1790 nbuf); 1791 DP_STATS_INC_PKT(peer, rx.to_stack, 1, 1792 qdf_nbuf_len(nbuf)); 1793 1794 nbuf = next; 1795 dp_peer_unref_del_find_by_id(peer); 1796 } 1797 1798 if (deliver_list_head) 1799 dp_rx_deliver_to_stack(vdev, peer, deliver_list_head, 1800 deliver_list_tail); 1801 1802 return rx_bufs_used; /* Assume no scale factor for now */ 1803 } 1804 1805 /** 1806 * dp_rx_detach() - detach dp rx 1807 * @pdev: core txrx pdev context 1808 * 1809 * This function will detach DP RX into main device context 1810 * will free DP Rx resources. 1811 * 1812 * Return: void 1813 */ 1814 void 1815 dp_rx_pdev_detach(struct dp_pdev *pdev) 1816 { 1817 uint8_t pdev_id = pdev->pdev_id; 1818 struct dp_soc *soc = pdev->soc; 1819 struct rx_desc_pool *rx_desc_pool; 1820 1821 rx_desc_pool = &soc->rx_desc_buf[pdev_id]; 1822 1823 if (rx_desc_pool->pool_size != 0) { 1824 dp_rx_desc_pool_free(soc, pdev_id, rx_desc_pool); 1825 } 1826 1827 return; 1828 } 1829 1830 /** 1831 * dp_rx_attach() - attach DP RX 1832 * @pdev: core txrx pdev context 1833 * 1834 * This function will attach a DP RX instance into the main 1835 * device (SOC) context. Will allocate dp rx resource and 1836 * initialize resources. 1837 * 1838 * Return: QDF_STATUS_SUCCESS: success 1839 * QDF_STATUS_E_RESOURCES: Error return 1840 */ 1841 QDF_STATUS 1842 dp_rx_pdev_attach(struct dp_pdev *pdev) 1843 { 1844 uint8_t pdev_id = pdev->pdev_id; 1845 struct dp_soc *soc = pdev->soc; 1846 uint32_t rxdma_entries; 1847 union dp_rx_desc_list_elem_t *desc_list = NULL; 1848 union dp_rx_desc_list_elem_t *tail = NULL; 1849 struct dp_srng *dp_rxdma_srng; 1850 struct rx_desc_pool *rx_desc_pool; 1851 1852 if (wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx)) { 1853 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, 1854 "nss-wifi<4> skip Rx refil %d", pdev_id); 1855 return QDF_STATUS_SUCCESS; 1856 } 1857 1858 pdev = soc->pdev_list[pdev_id]; 1859 dp_rxdma_srng = &pdev->rx_refill_buf_ring; 1860 rxdma_entries = dp_rxdma_srng->num_entries; 1861 1862 soc->process_rx_status = CONFIG_PROCESS_RX_STATUS; 1863 1864 rx_desc_pool = &soc->rx_desc_buf[pdev_id]; 1865 dp_rx_desc_pool_alloc(soc, pdev_id, 1866 DP_RX_DESC_ALLOC_MULTIPLIER * rxdma_entries, 1867 rx_desc_pool); 1868 1869 rx_desc_pool->owner = DP_WBM2SW_RBM; 1870 /* For Rx buffers, WBM release ring is SW RING 3,for all pdev's */ 1871 1872 dp_rx_buffers_replenish(soc, pdev_id, dp_rxdma_srng, rx_desc_pool, 1873 0, &desc_list, &tail); 1874 1875 return QDF_STATUS_SUCCESS; 1876 } 1877 1878 /* 1879 * dp_rx_nbuf_prepare() - prepare RX nbuf 1880 * @soc: core txrx main context 1881 * @pdev: core txrx pdev context 1882 * 1883 * This function alloc & map nbuf for RX dma usage, retry it if failed 1884 * until retry times reaches max threshold or succeeded. 1885 * 1886 * Return: qdf_nbuf_t pointer if succeeded, NULL if failed. 1887 */ 1888 qdf_nbuf_t 1889 dp_rx_nbuf_prepare(struct dp_soc *soc, struct dp_pdev *pdev) 1890 { 1891 uint8_t *buf; 1892 int32_t nbuf_retry_count; 1893 QDF_STATUS ret; 1894 qdf_nbuf_t nbuf = NULL; 1895 1896 for (nbuf_retry_count = 0; nbuf_retry_count < 1897 QDF_NBUF_ALLOC_MAP_RETRY_THRESHOLD; 1898 nbuf_retry_count++) { 1899 /* Allocate a new skb */ 1900 nbuf = qdf_nbuf_alloc(soc->osdev, 1901 RX_BUFFER_SIZE, 1902 RX_BUFFER_RESERVATION, 1903 RX_BUFFER_ALIGNMENT, 1904 FALSE); 1905 1906 if (nbuf == NULL) { 1907 DP_STATS_INC(pdev, 1908 replenish.nbuf_alloc_fail, 1); 1909 continue; 1910 } 1911 1912 buf = qdf_nbuf_data(nbuf); 1913 1914 memset(buf, 0, RX_BUFFER_SIZE); 1915 1916 ret = qdf_nbuf_map_single(soc->osdev, nbuf, 1917 QDF_DMA_BIDIRECTIONAL); 1918 1919 /* nbuf map failed */ 1920 if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) { 1921 qdf_nbuf_free(nbuf); 1922 DP_STATS_INC(pdev, replenish.map_err, 1); 1923 continue; 1924 } 1925 /* qdf_nbuf alloc and map succeeded */ 1926 break; 1927 } 1928 1929 /* qdf_nbuf still alloc or map failed */ 1930 if (qdf_unlikely(nbuf_retry_count >= 1931 QDF_NBUF_ALLOC_MAP_RETRY_THRESHOLD)) 1932 return NULL; 1933 1934 return nbuf; 1935 } 1936