1 /* 2 * Copyright (c) 2016-2019 The Linux Foundation. All rights reserved. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for 5 * any purpose with or without fee is hereby granted, provided that the 6 * above copyright notice and this permission notice appear in all 7 * copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 16 * PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 #include "hal_hw_headers.h" 20 #include "dp_types.h" 21 #include "dp_rx.h" 22 #include "dp_peer.h" 23 #include "hal_rx.h" 24 #include "hal_api.h" 25 #include "qdf_nbuf.h" 26 #ifdef MESH_MODE_SUPPORT 27 #include "if_meta_hdr.h" 28 #endif 29 #include "dp_internal.h" 30 #include "dp_rx_mon.h" 31 32 #ifdef RX_DESC_DEBUG_CHECK 33 static inline void dp_rx_desc_prep(struct dp_rx_desc *rx_desc, qdf_nbuf_t nbuf) 34 { 35 rx_desc->magic = DP_RX_DESC_MAGIC; 36 rx_desc->nbuf = nbuf; 37 } 38 #else 39 static inline void dp_rx_desc_prep(struct dp_rx_desc *rx_desc, qdf_nbuf_t nbuf) 40 { 41 rx_desc->nbuf = nbuf; 42 } 43 #endif 44 45 #ifdef CONFIG_WIN 46 static inline bool dp_rx_check_ap_bridge(struct dp_vdev *vdev) 47 { 48 return vdev->ap_bridge_enabled; 49 } 50 #else 51 static inline bool dp_rx_check_ap_bridge(struct dp_vdev *vdev) 52 { 53 if (vdev->opmode != wlan_op_mode_sta) 54 return true; 55 else 56 return false; 57 } 58 #endif 59 60 /* 61 * dp_rx_dump_info_and_assert() - dump RX Ring info and Rx Desc info 62 * 63 * @soc: core txrx main context 64 * @hal_ring: opaque pointer to the HAL Rx Ring, which will be serviced 65 * @ring_desc: opaque pointer to the RX ring descriptor 66 * @rx_desc: host rs descriptor 67 * 68 * Return: void 69 */ 70 void dp_rx_dump_info_and_assert(struct dp_soc *soc, void *hal_ring, 71 void *ring_desc, struct dp_rx_desc *rx_desc) 72 { 73 void *hal_soc = soc->hal_soc; 74 75 dp_rx_desc_dump(rx_desc); 76 hal_srng_dump_ring_desc(hal_soc, hal_ring, ring_desc); 77 hal_srng_dump_ring(hal_soc, hal_ring); 78 qdf_assert_always(rx_desc->in_use); 79 } 80 81 /* 82 * dp_rx_buffers_replenish() - replenish rxdma ring with rx nbufs 83 * called during dp rx initialization 84 * and at the end of dp_rx_process. 85 * 86 * @soc: core txrx main context 87 * @mac_id: mac_id which is one of 3 mac_ids 88 * @dp_rxdma_srng: dp rxdma circular ring 89 * @rx_desc_pool: Pointer to free Rx descriptor pool 90 * @num_req_buffers: number of buffer to be replenished 91 * @desc_list: list of descs if called from dp_rx_process 92 * or NULL during dp rx initialization or out of buffer 93 * interrupt. 94 * @tail: tail of descs list 95 * Return: return success or failure 96 */ 97 QDF_STATUS dp_rx_buffers_replenish(struct dp_soc *dp_soc, uint32_t mac_id, 98 struct dp_srng *dp_rxdma_srng, 99 struct rx_desc_pool *rx_desc_pool, 100 uint32_t num_req_buffers, 101 union dp_rx_desc_list_elem_t **desc_list, 102 union dp_rx_desc_list_elem_t **tail) 103 { 104 uint32_t num_alloc_desc; 105 uint16_t num_desc_to_free = 0; 106 struct dp_pdev *dp_pdev = dp_get_pdev_for_mac_id(dp_soc, mac_id); 107 uint32_t num_entries_avail; 108 uint32_t count; 109 int sync_hw_ptr = 1; 110 qdf_dma_addr_t paddr; 111 qdf_nbuf_t rx_netbuf; 112 void *rxdma_ring_entry; 113 union dp_rx_desc_list_elem_t *next; 114 QDF_STATUS ret; 115 116 void *rxdma_srng; 117 118 rxdma_srng = dp_rxdma_srng->hal_srng; 119 120 if (!rxdma_srng) { 121 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 122 "rxdma srng not initialized"); 123 DP_STATS_INC(dp_pdev, replenish.rxdma_err, num_req_buffers); 124 return QDF_STATUS_E_FAILURE; 125 } 126 127 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 128 "requested %d buffers for replenish", num_req_buffers); 129 130 hal_srng_access_start(dp_soc->hal_soc, rxdma_srng); 131 num_entries_avail = hal_srng_src_num_avail(dp_soc->hal_soc, 132 rxdma_srng, 133 sync_hw_ptr); 134 135 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 136 "no of available entries in rxdma ring: %d", 137 num_entries_avail); 138 139 if (!(*desc_list) && (num_entries_avail > 140 ((dp_rxdma_srng->num_entries * 3) / 4))) { 141 num_req_buffers = num_entries_avail; 142 } else if (num_entries_avail < num_req_buffers) { 143 num_desc_to_free = num_req_buffers - num_entries_avail; 144 num_req_buffers = num_entries_avail; 145 } 146 147 if (qdf_unlikely(!num_req_buffers)) { 148 num_desc_to_free = num_req_buffers; 149 hal_srng_access_end(dp_soc->hal_soc, rxdma_srng); 150 goto free_descs; 151 } 152 153 /* 154 * if desc_list is NULL, allocate the descs from freelist 155 */ 156 if (!(*desc_list)) { 157 num_alloc_desc = dp_rx_get_free_desc_list(dp_soc, mac_id, 158 rx_desc_pool, 159 num_req_buffers, 160 desc_list, 161 tail); 162 163 if (!num_alloc_desc) { 164 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 165 "no free rx_descs in freelist"); 166 DP_STATS_INC(dp_pdev, err.desc_alloc_fail, 167 num_req_buffers); 168 hal_srng_access_end(dp_soc->hal_soc, rxdma_srng); 169 return QDF_STATUS_E_NOMEM; 170 } 171 172 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 173 "%d rx desc allocated", num_alloc_desc); 174 num_req_buffers = num_alloc_desc; 175 } 176 177 178 count = 0; 179 180 while (count < num_req_buffers) { 181 rx_netbuf = qdf_nbuf_alloc(dp_soc->osdev, 182 RX_BUFFER_SIZE, 183 RX_BUFFER_RESERVATION, 184 RX_BUFFER_ALIGNMENT, 185 FALSE); 186 187 if (rx_netbuf == NULL) { 188 DP_STATS_INC(dp_pdev, replenish.nbuf_alloc_fail, 1); 189 continue; 190 } 191 192 ret = qdf_nbuf_map_single(dp_soc->osdev, rx_netbuf, 193 QDF_DMA_BIDIRECTIONAL); 194 if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) { 195 qdf_nbuf_free(rx_netbuf); 196 DP_STATS_INC(dp_pdev, replenish.map_err, 1); 197 continue; 198 } 199 200 paddr = qdf_nbuf_get_frag_paddr(rx_netbuf, 0); 201 202 /* 203 * check if the physical address of nbuf->data is 204 * less then 0x50000000 then free the nbuf and try 205 * allocating new nbuf. We can try for 100 times. 206 * this is a temp WAR till we fix it properly. 207 */ 208 ret = check_x86_paddr(dp_soc, &rx_netbuf, &paddr, dp_pdev); 209 if (ret == QDF_STATUS_E_FAILURE) { 210 DP_STATS_INC(dp_pdev, replenish.x86_fail, 1); 211 break; 212 } 213 214 count++; 215 216 rxdma_ring_entry = hal_srng_src_get_next(dp_soc->hal_soc, 217 rxdma_srng); 218 qdf_assert_always(rxdma_ring_entry); 219 220 next = (*desc_list)->next; 221 222 dp_rx_desc_prep(&((*desc_list)->rx_desc), rx_netbuf); 223 (*desc_list)->rx_desc.in_use = 1; 224 225 dp_debug("rx_netbuf=%pK, buf=%pK, paddr=0x%llx, cookie=%d", 226 rx_netbuf, qdf_nbuf_data(rx_netbuf), 227 (unsigned long long)paddr, 228 (*desc_list)->rx_desc.cookie); 229 230 hal_rxdma_buff_addr_info_set(rxdma_ring_entry, paddr, 231 (*desc_list)->rx_desc.cookie, 232 rx_desc_pool->owner); 233 234 *desc_list = next; 235 } 236 237 hal_srng_access_end(dp_soc->hal_soc, rxdma_srng); 238 239 dp_debug("replenished buffers %d, rx desc added back to free list %u", 240 num_req_buffers, num_desc_to_free); 241 242 DP_STATS_INC_PKT(dp_pdev, replenish.pkts, num_req_buffers, 243 (RX_BUFFER_SIZE * num_req_buffers)); 244 245 free_descs: 246 DP_STATS_INC(dp_pdev, buf_freelist, num_desc_to_free); 247 /* 248 * add any available free desc back to the free list 249 */ 250 if (*desc_list) 251 dp_rx_add_desc_list_to_free_list(dp_soc, desc_list, tail, 252 mac_id, rx_desc_pool); 253 254 return QDF_STATUS_SUCCESS; 255 } 256 257 /* 258 * dp_rx_deliver_raw() - process RAW mode pkts and hand over the 259 * pkts to RAW mode simulation to 260 * decapsulate the pkt. 261 * 262 * @vdev: vdev on which RAW mode is enabled 263 * @nbuf_list: list of RAW pkts to process 264 * @peer: peer object from which the pkt is rx 265 * 266 * Return: void 267 */ 268 void 269 dp_rx_deliver_raw(struct dp_vdev *vdev, qdf_nbuf_t nbuf_list, 270 struct dp_peer *peer) 271 { 272 qdf_nbuf_t deliver_list_head = NULL; 273 qdf_nbuf_t deliver_list_tail = NULL; 274 qdf_nbuf_t nbuf; 275 276 nbuf = nbuf_list; 277 while (nbuf) { 278 qdf_nbuf_t next = qdf_nbuf_next(nbuf); 279 280 DP_RX_LIST_APPEND(deliver_list_head, deliver_list_tail, nbuf); 281 282 DP_STATS_INC(vdev->pdev, rx_raw_pkts, 1); 283 DP_STATS_INC_PKT(peer, rx.raw, 1, qdf_nbuf_len(nbuf)); 284 /* 285 * reset the chfrag_start and chfrag_end bits in nbuf cb 286 * as this is a non-amsdu pkt and RAW mode simulation expects 287 * these bit s to be 0 for non-amsdu pkt. 288 */ 289 if (qdf_nbuf_is_rx_chfrag_start(nbuf) && 290 qdf_nbuf_is_rx_chfrag_end(nbuf)) { 291 qdf_nbuf_set_rx_chfrag_start(nbuf, 0); 292 qdf_nbuf_set_rx_chfrag_end(nbuf, 0); 293 } 294 295 nbuf = next; 296 } 297 298 vdev->osif_rsim_rx_decap(vdev->osif_vdev, &deliver_list_head, 299 &deliver_list_tail, (struct cdp_peer*) peer); 300 301 vdev->osif_rx(vdev->osif_vdev, deliver_list_head); 302 } 303 304 305 #ifdef DP_LFR 306 /* 307 * In case of LFR, data of a new peer might be sent up 308 * even before peer is added. 309 */ 310 static inline struct dp_vdev * 311 dp_get_vdev_from_peer(struct dp_soc *soc, 312 uint16_t peer_id, 313 struct dp_peer *peer, 314 struct hal_rx_mpdu_desc_info mpdu_desc_info) 315 { 316 struct dp_vdev *vdev; 317 uint8_t vdev_id; 318 319 if (unlikely(!peer)) { 320 if (peer_id != HTT_INVALID_PEER) { 321 vdev_id = DP_PEER_METADATA_ID_GET( 322 mpdu_desc_info.peer_meta_data); 323 QDF_TRACE(QDF_MODULE_ID_DP, 324 QDF_TRACE_LEVEL_DEBUG, 325 FL("PeerID %d not found use vdevID %d"), 326 peer_id, vdev_id); 327 vdev = dp_get_vdev_from_soc_vdev_id_wifi3(soc, 328 vdev_id); 329 } else { 330 QDF_TRACE(QDF_MODULE_ID_DP, 331 QDF_TRACE_LEVEL_DEBUG, 332 FL("Invalid PeerID %d"), 333 peer_id); 334 return NULL; 335 } 336 } else { 337 vdev = peer->vdev; 338 } 339 return vdev; 340 } 341 #else 342 static inline struct dp_vdev * 343 dp_get_vdev_from_peer(struct dp_soc *soc, 344 uint16_t peer_id, 345 struct dp_peer *peer, 346 struct hal_rx_mpdu_desc_info mpdu_desc_info) 347 { 348 if (unlikely(!peer)) { 349 QDF_TRACE(QDF_MODULE_ID_DP, 350 QDF_TRACE_LEVEL_DEBUG, 351 FL("Peer not found for peerID %d"), 352 peer_id); 353 return NULL; 354 } else { 355 return peer->vdev; 356 } 357 } 358 #endif 359 360 /** 361 * dp_rx_da_learn() - Add AST entry based on DA lookup 362 * This is a WAR for HK 1.0 and will 363 * be removed in HK 2.0 364 * 365 * @soc: core txrx main context 366 * @rx_tlv_hdr : start address of rx tlvs 367 * @ta_peer : Transmitter peer entry 368 * @nbuf : nbuf to retrieve destination mac for which AST will be added 369 * 370 */ 371 #ifdef FEATURE_WDS 372 static void 373 dp_rx_da_learn(struct dp_soc *soc, 374 uint8_t *rx_tlv_hdr, 375 struct dp_peer *ta_peer, 376 qdf_nbuf_t nbuf) 377 { 378 /* For HKv2 DA port learing is not needed */ 379 if (qdf_likely(soc->ast_override_support)) 380 return; 381 382 if (qdf_unlikely(!ta_peer)) 383 return; 384 385 if (qdf_unlikely(ta_peer->vdev->opmode != wlan_op_mode_ap)) 386 return; 387 388 if (qdf_unlikely(!hal_rx_msdu_end_da_is_valid_get(rx_tlv_hdr) && 389 !hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr))) { 390 dp_peer_add_ast(soc, 391 ta_peer, 392 qdf_nbuf_data(nbuf), 393 CDP_TXRX_AST_TYPE_DA, 394 IEEE80211_NODE_F_WDS_HM); 395 } 396 } 397 #else 398 static void 399 dp_rx_da_learn(struct dp_soc *soc, 400 uint8_t *rx_tlv_hdr, 401 struct dp_peer *ta_peer, 402 qdf_nbuf_t nbuf) 403 { 404 } 405 #endif 406 407 /** 408 * dp_rx_intrabss_fwd() - Implements the Intra-BSS forwarding logic 409 * 410 * @soc: core txrx main context 411 * @ta_peer : source peer entry 412 * @rx_tlv_hdr : start address of rx tlvs 413 * @nbuf : nbuf that has to be intrabss forwarded 414 * 415 * Return: bool: true if it is forwarded else false 416 */ 417 static bool 418 dp_rx_intrabss_fwd(struct dp_soc *soc, 419 struct dp_peer *ta_peer, 420 uint8_t *rx_tlv_hdr, 421 qdf_nbuf_t nbuf) 422 { 423 uint16_t da_idx; 424 uint16_t len; 425 struct dp_peer *da_peer; 426 struct dp_ast_entry *ast_entry; 427 qdf_nbuf_t nbuf_copy; 428 429 /* check if the destination peer is available in peer table 430 * and also check if the source peer and destination peer 431 * belong to the same vap and destination peer is not bss peer. 432 */ 433 434 if ((hal_rx_msdu_end_da_is_valid_get(rx_tlv_hdr) && 435 !hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr))) { 436 da_idx = hal_rx_msdu_end_da_idx_get(soc->hal_soc, rx_tlv_hdr); 437 438 ast_entry = soc->ast_table[da_idx]; 439 if (!ast_entry) 440 return false; 441 442 if (ast_entry->type == CDP_TXRX_AST_TYPE_DA) { 443 ast_entry->is_active = TRUE; 444 return false; 445 } 446 447 da_peer = ast_entry->peer; 448 449 if (!da_peer) 450 return false; 451 /* TA peer cannot be same as peer(DA) on which AST is present 452 * this indicates a change in topology and that AST entries 453 * are yet to be updated. 454 */ 455 if (da_peer == ta_peer) 456 return false; 457 458 if (da_peer->vdev == ta_peer->vdev && !da_peer->bss_peer) { 459 memset(nbuf->cb, 0x0, sizeof(nbuf->cb)); 460 len = qdf_nbuf_len(nbuf); 461 462 /* linearize the nbuf just before we send to 463 * dp_tx_send() 464 */ 465 if (qdf_unlikely(qdf_nbuf_get_ext_list(nbuf))) { 466 if (qdf_nbuf_linearize(nbuf) == -ENOMEM) 467 return false; 468 469 nbuf = qdf_nbuf_unshare(nbuf); 470 if (!nbuf) { 471 DP_STATS_INC_PKT(ta_peer, 472 rx.intra_bss.fail, 473 1, 474 len); 475 /* return true even though the pkt is 476 * not forwarded. Basically skb_unshare 477 * failed and we want to continue with 478 * next nbuf. 479 */ 480 return true; 481 } 482 } 483 484 if (!dp_tx_send(ta_peer->vdev, nbuf)) { 485 DP_STATS_INC_PKT(ta_peer, rx.intra_bss.pkts, 1, 486 len); 487 return true; 488 } else { 489 DP_STATS_INC_PKT(ta_peer, rx.intra_bss.fail, 1, 490 len); 491 return false; 492 } 493 } 494 } 495 /* if it is a broadcast pkt (eg: ARP) and it is not its own 496 * source, then clone the pkt and send the cloned pkt for 497 * intra BSS forwarding and original pkt up the network stack 498 * Note: how do we handle multicast pkts. do we forward 499 * all multicast pkts as is or let a higher layer module 500 * like igmpsnoop decide whether to forward or not with 501 * Mcast enhancement. 502 */ 503 else if (qdf_unlikely((hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr) && 504 !ta_peer->bss_peer))) { 505 nbuf_copy = qdf_nbuf_copy(nbuf); 506 if (!nbuf_copy) 507 return false; 508 memset(nbuf_copy->cb, 0x0, sizeof(nbuf_copy->cb)); 509 len = qdf_nbuf_len(nbuf_copy); 510 511 if (dp_tx_send(ta_peer->vdev, nbuf_copy)) { 512 DP_STATS_INC_PKT(ta_peer, rx.intra_bss.fail, 1, len); 513 qdf_nbuf_free(nbuf_copy); 514 } else { 515 DP_STATS_INC_PKT(ta_peer, rx.intra_bss.pkts, 1, len); 516 } 517 } 518 /* return false as we have to still send the original pkt 519 * up the stack 520 */ 521 return false; 522 } 523 524 #ifdef MESH_MODE_SUPPORT 525 526 /** 527 * dp_rx_fill_mesh_stats() - Fills the mesh per packet receive stats 528 * 529 * @vdev: DP Virtual device handle 530 * @nbuf: Buffer pointer 531 * @rx_tlv_hdr: start of rx tlv header 532 * @peer: pointer to peer 533 * 534 * This function allocated memory for mesh receive stats and fill the 535 * required stats. Stores the memory address in skb cb. 536 * 537 * Return: void 538 */ 539 540 void dp_rx_fill_mesh_stats(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 541 uint8_t *rx_tlv_hdr, struct dp_peer *peer) 542 { 543 struct mesh_recv_hdr_s *rx_info = NULL; 544 uint32_t pkt_type; 545 uint32_t nss; 546 uint32_t rate_mcs; 547 uint32_t bw; 548 549 /* fill recv mesh stats */ 550 rx_info = qdf_mem_malloc(sizeof(struct mesh_recv_hdr_s)); 551 552 /* upper layers are resposible to free this memory */ 553 554 if (rx_info == NULL) { 555 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 556 "Memory allocation failed for mesh rx stats"); 557 DP_STATS_INC(vdev->pdev, mesh_mem_alloc, 1); 558 return; 559 } 560 561 rx_info->rs_flags = MESH_RXHDR_VER1; 562 if (qdf_nbuf_is_rx_chfrag_start(nbuf)) 563 rx_info->rs_flags |= MESH_RX_FIRST_MSDU; 564 565 if (qdf_nbuf_is_rx_chfrag_end(nbuf)) 566 rx_info->rs_flags |= MESH_RX_LAST_MSDU; 567 568 if (hal_rx_attn_msdu_get_is_decrypted(rx_tlv_hdr)) { 569 rx_info->rs_flags |= MESH_RX_DECRYPTED; 570 rx_info->rs_keyix = hal_rx_msdu_get_keyid(rx_tlv_hdr); 571 if (vdev->osif_get_key) 572 vdev->osif_get_key(vdev->osif_vdev, 573 &rx_info->rs_decryptkey[0], 574 &peer->mac_addr.raw[0], 575 rx_info->rs_keyix); 576 } 577 578 rx_info->rs_rssi = hal_rx_msdu_start_get_rssi(rx_tlv_hdr); 579 rx_info->rs_channel = hal_rx_msdu_start_get_freq(rx_tlv_hdr); 580 pkt_type = hal_rx_msdu_start_get_pkt_type(rx_tlv_hdr); 581 rate_mcs = hal_rx_msdu_start_rate_mcs_get(rx_tlv_hdr); 582 bw = hal_rx_msdu_start_bw_get(rx_tlv_hdr); 583 nss = hal_rx_msdu_start_nss_get(vdev->pdev->soc->hal_soc, rx_tlv_hdr); 584 rx_info->rs_ratephy1 = rate_mcs | (nss << 0x8) | (pkt_type << 16) | 585 (bw << 24); 586 587 qdf_nbuf_set_rx_fctx_type(nbuf, (void *)rx_info, CB_FTYPE_MESH_RX_INFO); 588 589 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_MED, 590 FL("Mesh rx stats: flags %x, rssi %x, chn %x, rate %x, kix %x"), 591 rx_info->rs_flags, 592 rx_info->rs_rssi, 593 rx_info->rs_channel, 594 rx_info->rs_ratephy1, 595 rx_info->rs_keyix); 596 597 } 598 599 /** 600 * dp_rx_filter_mesh_packets() - Filters mesh unwanted packets 601 * 602 * @vdev: DP Virtual device handle 603 * @nbuf: Buffer pointer 604 * @rx_tlv_hdr: start of rx tlv header 605 * 606 * This checks if the received packet is matching any filter out 607 * catogery and and drop the packet if it matches. 608 * 609 * Return: status(0 indicates drop, 1 indicate to no drop) 610 */ 611 612 QDF_STATUS dp_rx_filter_mesh_packets(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 613 uint8_t *rx_tlv_hdr) 614 { 615 union dp_align_mac_addr mac_addr; 616 617 if (qdf_unlikely(vdev->mesh_rx_filter)) { 618 if (vdev->mesh_rx_filter & MESH_FILTER_OUT_FROMDS) 619 if (hal_rx_mpdu_get_fr_ds(rx_tlv_hdr)) 620 return QDF_STATUS_SUCCESS; 621 622 if (vdev->mesh_rx_filter & MESH_FILTER_OUT_TODS) 623 if (hal_rx_mpdu_get_to_ds(rx_tlv_hdr)) 624 return QDF_STATUS_SUCCESS; 625 626 if (vdev->mesh_rx_filter & MESH_FILTER_OUT_NODS) 627 if (!hal_rx_mpdu_get_fr_ds(rx_tlv_hdr) 628 && !hal_rx_mpdu_get_to_ds(rx_tlv_hdr)) 629 return QDF_STATUS_SUCCESS; 630 631 if (vdev->mesh_rx_filter & MESH_FILTER_OUT_RA) { 632 if (hal_rx_mpdu_get_addr1(rx_tlv_hdr, 633 &mac_addr.raw[0])) 634 return QDF_STATUS_E_FAILURE; 635 636 if (!qdf_mem_cmp(&mac_addr.raw[0], 637 &vdev->mac_addr.raw[0], 638 DP_MAC_ADDR_LEN)) 639 return QDF_STATUS_SUCCESS; 640 } 641 642 if (vdev->mesh_rx_filter & MESH_FILTER_OUT_TA) { 643 if (hal_rx_mpdu_get_addr2(rx_tlv_hdr, 644 &mac_addr.raw[0])) 645 return QDF_STATUS_E_FAILURE; 646 647 if (!qdf_mem_cmp(&mac_addr.raw[0], 648 &vdev->mac_addr.raw[0], 649 DP_MAC_ADDR_LEN)) 650 return QDF_STATUS_SUCCESS; 651 } 652 } 653 654 return QDF_STATUS_E_FAILURE; 655 } 656 657 #else 658 void dp_rx_fill_mesh_stats(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 659 uint8_t *rx_tlv_hdr, struct dp_peer *peer) 660 { 661 } 662 663 QDF_STATUS dp_rx_filter_mesh_packets(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 664 uint8_t *rx_tlv_hdr) 665 { 666 return QDF_STATUS_E_FAILURE; 667 } 668 669 #endif 670 671 #ifdef CONFIG_WIN 672 /** 673 * dp_rx_nac_filter(): Function to perform filtering of non-associated 674 * clients 675 * @pdev: DP pdev handle 676 * @rx_pkt_hdr: Rx packet Header 677 * 678 * return: dp_vdev* 679 */ 680 static 681 struct dp_vdev *dp_rx_nac_filter(struct dp_pdev *pdev, 682 uint8_t *rx_pkt_hdr) 683 { 684 struct ieee80211_frame *wh; 685 struct dp_neighbour_peer *peer = NULL; 686 687 wh = (struct ieee80211_frame *)rx_pkt_hdr; 688 689 if ((wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) != IEEE80211_FC1_DIR_TODS) 690 return NULL; 691 692 qdf_spin_lock_bh(&pdev->neighbour_peer_mutex); 693 TAILQ_FOREACH(peer, &pdev->neighbour_peers_list, 694 neighbour_peer_list_elem) { 695 if (qdf_mem_cmp(&peer->neighbour_peers_macaddr.raw[0], 696 wh->i_addr2, DP_MAC_ADDR_LEN) == 0) { 697 QDF_TRACE( 698 QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 699 FL("NAC configuration matched for mac-%2x:%2x:%2x:%2x:%2x:%2x"), 700 peer->neighbour_peers_macaddr.raw[0], 701 peer->neighbour_peers_macaddr.raw[1], 702 peer->neighbour_peers_macaddr.raw[2], 703 peer->neighbour_peers_macaddr.raw[3], 704 peer->neighbour_peers_macaddr.raw[4], 705 peer->neighbour_peers_macaddr.raw[5]); 706 707 qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex); 708 709 return pdev->monitor_vdev; 710 } 711 } 712 qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex); 713 714 return NULL; 715 } 716 717 /** 718 * dp_rx_process_invalid_peer(): Function to pass invalid peer list to umac 719 * @soc: DP SOC handle 720 * @mpdu: mpdu for which peer is invalid 721 * 722 * return: integer type 723 */ 724 uint8_t dp_rx_process_invalid_peer(struct dp_soc *soc, qdf_nbuf_t mpdu) 725 { 726 struct dp_invalid_peer_msg msg; 727 struct dp_vdev *vdev = NULL; 728 struct dp_pdev *pdev = NULL; 729 struct ieee80211_frame *wh; 730 uint8_t i; 731 qdf_nbuf_t curr_nbuf, next_nbuf; 732 uint8_t *rx_tlv_hdr = qdf_nbuf_data(mpdu); 733 uint8_t *rx_pkt_hdr = hal_rx_pkt_hdr_get(rx_tlv_hdr); 734 735 wh = (struct ieee80211_frame *)rx_pkt_hdr; 736 737 if (!DP_FRAME_IS_DATA(wh)) { 738 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 739 "NAWDS valid only for data frames"); 740 goto free; 741 } 742 743 if (qdf_nbuf_len(mpdu) < sizeof(struct ieee80211_frame)) { 744 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 745 "Invalid nbuf length"); 746 goto free; 747 } 748 749 750 for (i = 0; i < MAX_PDEV_CNT; i++) { 751 pdev = soc->pdev_list[i]; 752 if (!pdev) { 753 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 754 "PDEV not found"); 755 continue; 756 } 757 758 if (pdev->filter_neighbour_peers) { 759 /* Next Hop scenario not yet handle */ 760 vdev = dp_rx_nac_filter(pdev, rx_pkt_hdr); 761 if (vdev) { 762 dp_rx_mon_deliver(soc, i, 763 pdev->invalid_peer_head_msdu, 764 pdev->invalid_peer_tail_msdu); 765 766 pdev->invalid_peer_head_msdu = NULL; 767 pdev->invalid_peer_tail_msdu = NULL; 768 769 return 0; 770 } 771 } 772 773 774 TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) { 775 776 if (qdf_mem_cmp(wh->i_addr1, vdev->mac_addr.raw, 777 DP_MAC_ADDR_LEN) == 0) { 778 goto out; 779 } 780 } 781 } 782 783 if (!vdev) { 784 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 785 "VDEV not found"); 786 goto free; 787 } 788 789 out: 790 msg.wh = wh; 791 qdf_nbuf_pull_head(mpdu, RX_PKT_TLVS_LEN); 792 msg.nbuf = mpdu; 793 msg.vdev_id = vdev->vdev_id; 794 if (pdev->soc->cdp_soc.ol_ops->rx_invalid_peer) 795 pdev->soc->cdp_soc.ol_ops->rx_invalid_peer(pdev->ctrl_pdev, 796 &msg); 797 798 free: 799 /* Drop and free packet */ 800 curr_nbuf = mpdu; 801 while (curr_nbuf) { 802 next_nbuf = qdf_nbuf_next(curr_nbuf); 803 qdf_nbuf_free(curr_nbuf); 804 curr_nbuf = next_nbuf; 805 } 806 807 return 0; 808 } 809 810 /** 811 * dp_rx_process_invalid_peer_wrapper(): Function to wrap invalid peer handler 812 * @soc: DP SOC handle 813 * @mpdu: mpdu for which peer is invalid 814 * @mpdu_done: if an mpdu is completed 815 * 816 * return: integer type 817 */ 818 void dp_rx_process_invalid_peer_wrapper(struct dp_soc *soc, 819 qdf_nbuf_t mpdu, bool mpdu_done) 820 { 821 /* Only trigger the process when mpdu is completed */ 822 if (mpdu_done) 823 dp_rx_process_invalid_peer(soc, mpdu); 824 } 825 #else 826 uint8_t dp_rx_process_invalid_peer(struct dp_soc *soc, qdf_nbuf_t mpdu) 827 { 828 qdf_nbuf_t curr_nbuf, next_nbuf; 829 struct dp_pdev *pdev; 830 uint8_t i; 831 struct dp_vdev *vdev = NULL; 832 struct ieee80211_frame *wh; 833 uint8_t *rx_tlv_hdr = qdf_nbuf_data(mpdu); 834 uint8_t *rx_pkt_hdr = hal_rx_pkt_hdr_get(rx_tlv_hdr); 835 836 wh = (struct ieee80211_frame *)rx_pkt_hdr; 837 838 if (!DP_FRAME_IS_DATA(wh)) { 839 QDF_TRACE_ERROR_RL(QDF_MODULE_ID_DP, 840 "only for data frames"); 841 goto free; 842 } 843 844 if (qdf_nbuf_len(mpdu) < sizeof(struct ieee80211_frame)) { 845 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 846 "Invalid nbuf length"); 847 goto free; 848 } 849 850 for (i = 0; i < MAX_PDEV_CNT; i++) { 851 pdev = soc->pdev_list[i]; 852 if (!pdev) { 853 QDF_TRACE(QDF_MODULE_ID_DP, 854 QDF_TRACE_LEVEL_ERROR, 855 "PDEV not found"); 856 continue; 857 } 858 859 qdf_spin_lock_bh(&pdev->vdev_list_lock); 860 DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) { 861 if (qdf_mem_cmp(wh->i_addr1, vdev->mac_addr.raw, 862 DP_MAC_ADDR_LEN) == 0) { 863 qdf_spin_unlock_bh(&pdev->vdev_list_lock); 864 goto out; 865 } 866 } 867 qdf_spin_unlock_bh(&pdev->vdev_list_lock); 868 } 869 870 if (NULL == vdev) { 871 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 872 "VDEV not found"); 873 goto free; 874 } 875 876 out: 877 if (soc->cdp_soc.ol_ops->rx_invalid_peer) 878 soc->cdp_soc.ol_ops->rx_invalid_peer(vdev->vdev_id, wh); 879 free: 880 /* reset the head and tail pointers */ 881 for (i = 0; i < MAX_PDEV_CNT; i++) { 882 pdev = soc->pdev_list[i]; 883 if (!pdev) { 884 QDF_TRACE(QDF_MODULE_ID_DP, 885 QDF_TRACE_LEVEL_ERROR, 886 "PDEV not found"); 887 continue; 888 } 889 890 pdev->invalid_peer_head_msdu = NULL; 891 pdev->invalid_peer_tail_msdu = NULL; 892 } 893 894 /* Drop and free packet */ 895 curr_nbuf = mpdu; 896 while (curr_nbuf) { 897 next_nbuf = qdf_nbuf_next(curr_nbuf); 898 DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1, 899 qdf_nbuf_len(curr_nbuf)); 900 qdf_nbuf_free(curr_nbuf); 901 curr_nbuf = next_nbuf; 902 } 903 904 return 0; 905 } 906 907 void dp_rx_process_invalid_peer_wrapper(struct dp_soc *soc, 908 qdf_nbuf_t mpdu, bool mpdu_done) 909 { 910 /* Process the nbuf */ 911 dp_rx_process_invalid_peer(soc, mpdu); 912 } 913 #endif 914 915 #ifdef RECEIVE_OFFLOAD 916 /** 917 * dp_rx_print_offload_info() - Print offload info from RX TLV 918 * @rx_tlv: RX TLV for which offload information is to be printed 919 * 920 * Return: None 921 */ 922 static void dp_rx_print_offload_info(uint8_t *rx_tlv) 923 { 924 dp_debug("----------------------RX DESC LRO/GRO----------------------"); 925 dp_debug("lro_eligible 0x%x", HAL_RX_TLV_GET_LRO_ELIGIBLE(rx_tlv)); 926 dp_debug("pure_ack 0x%x", HAL_RX_TLV_GET_TCP_PURE_ACK(rx_tlv)); 927 dp_debug("chksum 0x%x", HAL_RX_TLV_GET_TCP_CHKSUM(rx_tlv)); 928 dp_debug("TCP seq num 0x%x", HAL_RX_TLV_GET_TCP_SEQ(rx_tlv)); 929 dp_debug("TCP ack num 0x%x", HAL_RX_TLV_GET_TCP_ACK(rx_tlv)); 930 dp_debug("TCP window 0x%x", HAL_RX_TLV_GET_TCP_WIN(rx_tlv)); 931 dp_debug("TCP protocol 0x%x", HAL_RX_TLV_GET_TCP_PROTO(rx_tlv)); 932 dp_debug("TCP offset 0x%x", HAL_RX_TLV_GET_TCP_OFFSET(rx_tlv)); 933 dp_debug("toeplitz 0x%x", HAL_RX_TLV_GET_FLOW_ID_TOEPLITZ(rx_tlv)); 934 dp_debug("---------------------------------------------------------"); 935 } 936 937 /** 938 * dp_rx_fill_gro_info() - Fill GRO info from RX TLV into skb->cb 939 * @soc: DP SOC handle 940 * @rx_tlv: RX TLV received for the msdu 941 * @msdu: msdu for which GRO info needs to be filled 942 * 943 * Return: None 944 */ 945 static 946 void dp_rx_fill_gro_info(struct dp_soc *soc, uint8_t *rx_tlv, 947 qdf_nbuf_t msdu) 948 { 949 if (!wlan_cfg_is_gro_enabled(soc->wlan_cfg_ctx)) 950 return; 951 952 /* Filling up RX offload info only for TCP packets */ 953 if (!HAL_RX_TLV_GET_TCP_PROTO(rx_tlv)) 954 return; 955 956 QDF_NBUF_CB_RX_LRO_ELIGIBLE(msdu) = 957 HAL_RX_TLV_GET_LRO_ELIGIBLE(rx_tlv); 958 QDF_NBUF_CB_RX_TCP_PURE_ACK(msdu) = 959 HAL_RX_TLV_GET_TCP_PURE_ACK(rx_tlv); 960 QDF_NBUF_CB_RX_TCP_CHKSUM(msdu) = 961 HAL_RX_TLV_GET_TCP_CHKSUM(rx_tlv); 962 QDF_NBUF_CB_RX_TCP_SEQ_NUM(msdu) = 963 HAL_RX_TLV_GET_TCP_SEQ(rx_tlv); 964 QDF_NBUF_CB_RX_TCP_ACK_NUM(msdu) = 965 HAL_RX_TLV_GET_TCP_ACK(rx_tlv); 966 QDF_NBUF_CB_RX_TCP_WIN(msdu) = 967 HAL_RX_TLV_GET_TCP_WIN(rx_tlv); 968 QDF_NBUF_CB_RX_TCP_PROTO(msdu) = 969 HAL_RX_TLV_GET_TCP_PROTO(rx_tlv); 970 QDF_NBUF_CB_RX_IPV6_PROTO(msdu) = 971 HAL_RX_TLV_GET_IPV6(rx_tlv); 972 QDF_NBUF_CB_RX_TCP_OFFSET(msdu) = 973 HAL_RX_TLV_GET_TCP_OFFSET(rx_tlv); 974 QDF_NBUF_CB_RX_FLOW_ID(msdu) = 975 HAL_RX_TLV_GET_FLOW_ID_TOEPLITZ(rx_tlv); 976 977 dp_rx_print_offload_info(rx_tlv); 978 } 979 #else 980 static void dp_rx_fill_gro_info(struct dp_soc *soc, uint8_t *rx_tlv, 981 qdf_nbuf_t msdu) 982 { 983 } 984 #endif /* RECEIVE_OFFLOAD */ 985 986 /** 987 * dp_rx_adjust_nbuf_len() - set appropriate msdu length in nbuf. 988 * 989 * @nbuf: pointer to msdu. 990 * @mpdu_len: mpdu length 991 * 992 * Return: returns true if nbuf is last msdu of mpdu else retuns false. 993 */ 994 static inline bool dp_rx_adjust_nbuf_len(qdf_nbuf_t nbuf, uint16_t *mpdu_len) 995 { 996 bool last_nbuf; 997 998 if (*mpdu_len > (RX_BUFFER_SIZE - RX_PKT_TLVS_LEN)) { 999 qdf_nbuf_set_pktlen(nbuf, RX_BUFFER_SIZE); 1000 last_nbuf = false; 1001 } else { 1002 qdf_nbuf_set_pktlen(nbuf, (*mpdu_len + RX_PKT_TLVS_LEN)); 1003 last_nbuf = true; 1004 } 1005 1006 *mpdu_len -= (RX_BUFFER_SIZE - RX_PKT_TLVS_LEN); 1007 1008 return last_nbuf; 1009 } 1010 1011 /** 1012 * dp_rx_sg_create() - create a frag_list for MSDUs which are spread across 1013 * multiple nbufs. 1014 * @nbuf: pointer to the first msdu of an amsdu. 1015 * @rx_tlv_hdr: pointer to the start of RX TLV headers. 1016 * 1017 * 1018 * This function implements the creation of RX frag_list for cases 1019 * where an MSDU is spread across multiple nbufs. 1020 * 1021 * Return: returns the head nbuf which contains complete frag_list. 1022 */ 1023 qdf_nbuf_t dp_rx_sg_create(qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr) 1024 { 1025 qdf_nbuf_t parent, next, frag_list; 1026 uint16_t frag_list_len = 0; 1027 uint16_t mpdu_len; 1028 bool last_nbuf; 1029 1030 mpdu_len = hal_rx_msdu_start_msdu_len_get(rx_tlv_hdr); 1031 /* 1032 * this is a case where the complete msdu fits in one single nbuf. 1033 * in this case HW sets both start and end bit and we only need to 1034 * reset these bits for RAW mode simulator to decap the pkt 1035 */ 1036 if (qdf_nbuf_is_rx_chfrag_start(nbuf) && 1037 qdf_nbuf_is_rx_chfrag_end(nbuf)) { 1038 qdf_nbuf_set_pktlen(nbuf, mpdu_len + RX_PKT_TLVS_LEN); 1039 qdf_nbuf_pull_head(nbuf, RX_PKT_TLVS_LEN); 1040 return nbuf; 1041 } 1042 1043 /* 1044 * This is a case where we have multiple msdus (A-MSDU) spread across 1045 * multiple nbufs. here we create a fraglist out of these nbufs. 1046 * 1047 * the moment we encounter a nbuf with continuation bit set we 1048 * know for sure we have an MSDU which is spread across multiple 1049 * nbufs. We loop through and reap nbufs till we reach last nbuf. 1050 */ 1051 parent = nbuf; 1052 frag_list = nbuf->next; 1053 nbuf = nbuf->next; 1054 1055 /* 1056 * set the start bit in the first nbuf we encounter with continuation 1057 * bit set. This has the proper mpdu length set as it is the first 1058 * msdu of the mpdu. this becomes the parent nbuf and the subsequent 1059 * nbufs will form the frag_list of the parent nbuf. 1060 */ 1061 qdf_nbuf_set_rx_chfrag_start(parent, 1); 1062 last_nbuf = dp_rx_adjust_nbuf_len(parent, &mpdu_len); 1063 1064 /* 1065 * this is where we set the length of the fragments which are 1066 * associated to the parent nbuf. We iterate through the frag_list 1067 * till we hit the last_nbuf of the list. 1068 */ 1069 do { 1070 last_nbuf = dp_rx_adjust_nbuf_len(nbuf, &mpdu_len); 1071 qdf_nbuf_pull_head(nbuf, RX_PKT_TLVS_LEN); 1072 frag_list_len += qdf_nbuf_len(nbuf); 1073 1074 if (last_nbuf) { 1075 next = nbuf->next; 1076 nbuf->next = NULL; 1077 break; 1078 } 1079 1080 nbuf = nbuf->next; 1081 } while (!last_nbuf); 1082 1083 qdf_nbuf_set_rx_chfrag_start(nbuf, 0); 1084 qdf_nbuf_append_ext_list(parent, frag_list, frag_list_len); 1085 parent->next = next; 1086 1087 qdf_nbuf_pull_head(parent, RX_PKT_TLVS_LEN); 1088 return parent; 1089 } 1090 1091 static inline void dp_rx_deliver_to_stack(struct dp_vdev *vdev, 1092 struct dp_peer *peer, 1093 qdf_nbuf_t nbuf_head, 1094 qdf_nbuf_t nbuf_tail) 1095 { 1096 /* 1097 * highly unlikely to have a vdev without a registered rx 1098 * callback function. if so let us free the nbuf_list. 1099 */ 1100 if (qdf_unlikely(!vdev->osif_rx)) { 1101 qdf_nbuf_t nbuf; 1102 do { 1103 nbuf = nbuf_head; 1104 nbuf_head = nbuf_head->next; 1105 qdf_nbuf_free(nbuf); 1106 } while (nbuf_head); 1107 1108 return; 1109 } 1110 1111 if (qdf_unlikely(vdev->rx_decap_type == htt_cmn_pkt_type_raw) || 1112 (vdev->rx_decap_type == htt_cmn_pkt_type_native_wifi)) { 1113 vdev->osif_rsim_rx_decap(vdev->osif_vdev, &nbuf_head, 1114 &nbuf_tail, (struct cdp_peer *) peer); 1115 } 1116 1117 vdev->osif_rx(vdev->osif_vdev, nbuf_head); 1118 1119 } 1120 1121 /** 1122 * dp_rx_cksum_offload() - set the nbuf checksum as defined by hardware. 1123 * @nbuf: pointer to the first msdu of an amsdu. 1124 * @rx_tlv_hdr: pointer to the start of RX TLV headers. 1125 * 1126 * The ipsumed field of the skb is set based on whether HW validated the 1127 * IP/TCP/UDP checksum. 1128 * 1129 * Return: void 1130 */ 1131 static inline void dp_rx_cksum_offload(struct dp_pdev *pdev, 1132 qdf_nbuf_t nbuf, 1133 uint8_t *rx_tlv_hdr) 1134 { 1135 qdf_nbuf_rx_cksum_t cksum = {0}; 1136 bool ip_csum_err = hal_rx_attn_ip_cksum_fail_get(rx_tlv_hdr); 1137 bool tcp_udp_csum_er = hal_rx_attn_tcp_udp_cksum_fail_get(rx_tlv_hdr); 1138 1139 if (qdf_likely(!ip_csum_err && !tcp_udp_csum_er)) { 1140 cksum.l4_result = QDF_NBUF_RX_CKSUM_TCP_UDP_UNNECESSARY; 1141 qdf_nbuf_set_rx_cksum(nbuf, &cksum); 1142 } else { 1143 DP_STATS_INCC(pdev, err.ip_csum_err, 1, ip_csum_err); 1144 DP_STATS_INCC(pdev, err.tcp_udp_csum_err, 1, tcp_udp_csum_er); 1145 } 1146 } 1147 1148 /** 1149 * dp_rx_msdu_stats_update() - update per msdu stats. 1150 * @soc: core txrx main context 1151 * @nbuf: pointer to the first msdu of an amsdu. 1152 * @rx_tlv_hdr: pointer to the start of RX TLV headers. 1153 * @peer: pointer to the peer object. 1154 * @ring_id: reo dest ring number on which pkt is reaped. 1155 * 1156 * update all the per msdu stats for that nbuf. 1157 * Return: void 1158 */ 1159 static void dp_rx_msdu_stats_update(struct dp_soc *soc, 1160 qdf_nbuf_t nbuf, 1161 uint8_t *rx_tlv_hdr, 1162 struct dp_peer *peer, 1163 uint8_t ring_id) 1164 { 1165 bool is_ampdu, is_not_amsdu; 1166 uint16_t peer_id; 1167 uint32_t sgi, mcs, tid, nss, bw, reception_type, pkt_type; 1168 struct dp_vdev *vdev = peer->vdev; 1169 struct ether_header *eh; 1170 uint16_t msdu_len = qdf_nbuf_len(nbuf); 1171 1172 peer_id = DP_PEER_METADATA_PEER_ID_GET( 1173 hal_rx_mpdu_peer_meta_data_get(rx_tlv_hdr)); 1174 1175 is_not_amsdu = qdf_nbuf_is_rx_chfrag_start(nbuf) & 1176 qdf_nbuf_is_rx_chfrag_end(nbuf); 1177 1178 DP_STATS_INC_PKT(peer, rx.rcvd_reo[ring_id], 1, msdu_len); 1179 DP_STATS_INCC(peer, rx.non_amsdu_cnt, 1, is_not_amsdu); 1180 DP_STATS_INCC(peer, rx.amsdu_cnt, 1, !is_not_amsdu); 1181 1182 if (qdf_unlikely(hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr) && 1183 (vdev->rx_decap_type == htt_cmn_pkt_type_ethernet))) { 1184 eh = (struct ether_header *)qdf_nbuf_data(nbuf); 1185 DP_STATS_INC_PKT(peer, rx.multicast, 1, msdu_len); 1186 if (IEEE80211_IS_BROADCAST(eh->ether_dhost)) { 1187 DP_STATS_INC_PKT(peer, rx.bcast, 1, msdu_len); 1188 1189 } 1190 } 1191 1192 /* 1193 * currently we can return from here as we have similar stats 1194 * updated at per ppdu level instead of msdu level 1195 */ 1196 if (!soc->process_rx_status) 1197 return; 1198 1199 is_ampdu = hal_rx_mpdu_info_ampdu_flag_get(rx_tlv_hdr); 1200 DP_STATS_INCC(peer, rx.ampdu_cnt, 1, is_ampdu); 1201 DP_STATS_INCC(peer, rx.non_ampdu_cnt, 1, !(is_ampdu)); 1202 1203 sgi = hal_rx_msdu_start_sgi_get(rx_tlv_hdr); 1204 mcs = hal_rx_msdu_start_rate_mcs_get(rx_tlv_hdr); 1205 tid = hal_rx_mpdu_start_tid_get(soc->hal_soc, rx_tlv_hdr); 1206 bw = hal_rx_msdu_start_bw_get(rx_tlv_hdr); 1207 reception_type = hal_rx_msdu_start_reception_type_get(soc->hal_soc, 1208 rx_tlv_hdr); 1209 nss = hal_rx_msdu_start_nss_get(soc->hal_soc, rx_tlv_hdr); 1210 pkt_type = hal_rx_msdu_start_get_pkt_type(rx_tlv_hdr); 1211 1212 /* Save tid to skb->priority */ 1213 DP_RX_TID_SAVE(nbuf, tid); 1214 1215 DP_STATS_INC(peer, rx.bw[bw], 1); 1216 DP_STATS_INC(peer, rx.nss[nss], 1); 1217 DP_STATS_INC(peer, rx.sgi_count[sgi], 1); 1218 DP_STATS_INCC(peer, rx.err.mic_err, 1, 1219 hal_rx_mpdu_end_mic_err_get(rx_tlv_hdr)); 1220 DP_STATS_INCC(peer, rx.err.decrypt_err, 1, 1221 hal_rx_mpdu_end_decrypt_err_get(rx_tlv_hdr)); 1222 1223 DP_STATS_INC(peer, rx.wme_ac_type[TID_TO_WME_AC(tid)], 1); 1224 DP_STATS_INC(peer, rx.reception_type[reception_type], 1); 1225 1226 DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1, 1227 ((mcs >= MAX_MCS_11A) && (pkt_type == DOT11_A))); 1228 DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[mcs], 1, 1229 ((mcs <= MAX_MCS_11A) && (pkt_type == DOT11_A))); 1230 DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1, 1231 ((mcs >= MAX_MCS_11B) && (pkt_type == DOT11_B))); 1232 DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[mcs], 1, 1233 ((mcs <= MAX_MCS_11B) && (pkt_type == DOT11_B))); 1234 DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1, 1235 ((mcs >= MAX_MCS_11A) && (pkt_type == DOT11_N))); 1236 DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[mcs], 1, 1237 ((mcs <= MAX_MCS_11A) && (pkt_type == DOT11_N))); 1238 DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1, 1239 ((mcs >= MAX_MCS_11AC) && (pkt_type == DOT11_AC))); 1240 DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[mcs], 1, 1241 ((mcs <= MAX_MCS_11AC) && (pkt_type == DOT11_AC))); 1242 DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1, 1243 ((mcs >= MAX_MCS) && (pkt_type == DOT11_AX))); 1244 DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[mcs], 1, 1245 ((mcs < MAX_MCS) && (pkt_type == DOT11_AX))); 1246 1247 if ((soc->process_rx_status) && 1248 hal_rx_attn_first_mpdu_get(rx_tlv_hdr)) { 1249 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE 1250 if (!vdev->pdev) 1251 return; 1252 1253 dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, vdev->pdev->soc, 1254 &peer->stats, peer_id, 1255 UPDATE_PEER_STATS, 1256 vdev->pdev->pdev_id); 1257 #endif 1258 1259 } 1260 } 1261 1262 #ifdef WDS_VENDOR_EXTENSION 1263 int dp_wds_rx_policy_check( 1264 uint8_t *rx_tlv_hdr, 1265 struct dp_vdev *vdev, 1266 struct dp_peer *peer, 1267 int rx_mcast 1268 ) 1269 { 1270 struct dp_peer *bss_peer; 1271 int fr_ds, to_ds, rx_3addr, rx_4addr; 1272 int rx_policy_ucast, rx_policy_mcast; 1273 1274 if (vdev->opmode == wlan_op_mode_ap) { 1275 TAILQ_FOREACH(bss_peer, &vdev->peer_list, peer_list_elem) { 1276 if (bss_peer->bss_peer) { 1277 /* if wds policy check is not enabled on this vdev, accept all frames */ 1278 if (!bss_peer->wds_ecm.wds_rx_filter) { 1279 return 1; 1280 } 1281 break; 1282 } 1283 } 1284 rx_policy_ucast = bss_peer->wds_ecm.wds_rx_ucast_4addr; 1285 rx_policy_mcast = bss_peer->wds_ecm.wds_rx_mcast_4addr; 1286 } else { /* sta mode */ 1287 if (!peer->wds_ecm.wds_rx_filter) { 1288 return 1; 1289 } 1290 rx_policy_ucast = peer->wds_ecm.wds_rx_ucast_4addr; 1291 rx_policy_mcast = peer->wds_ecm.wds_rx_mcast_4addr; 1292 } 1293 1294 /* ------------------------------------------------ 1295 * self 1296 * peer- rx rx- 1297 * wds ucast mcast dir policy accept note 1298 * ------------------------------------------------ 1299 * 1 1 0 11 x1 1 AP configured to accept ds-to-ds Rx ucast from wds peers, constraint met; so, accept 1300 * 1 1 0 01 x1 0 AP configured to accept ds-to-ds Rx ucast from wds peers, constraint not met; so, drop 1301 * 1 1 0 10 x1 0 AP configured to accept ds-to-ds Rx ucast from wds peers, constraint not met; so, drop 1302 * 1 1 0 00 x1 0 bad frame, won't see it 1303 * 1 0 1 11 1x 1 AP configured to accept ds-to-ds Rx mcast from wds peers, constraint met; so, accept 1304 * 1 0 1 01 1x 0 AP configured to accept ds-to-ds Rx mcast from wds peers, constraint not met; so, drop 1305 * 1 0 1 10 1x 0 AP configured to accept ds-to-ds Rx mcast from wds peers, constraint not met; so, drop 1306 * 1 0 1 00 1x 0 bad frame, won't see it 1307 * 1 1 0 11 x0 0 AP configured to accept from-ds Rx ucast from wds peers, constraint not met; so, drop 1308 * 1 1 0 01 x0 0 AP configured to accept from-ds Rx ucast from wds peers, constraint not met; so, drop 1309 * 1 1 0 10 x0 1 AP configured to accept from-ds Rx ucast from wds peers, constraint met; so, accept 1310 * 1 1 0 00 x0 0 bad frame, won't see it 1311 * 1 0 1 11 0x 0 AP configured to accept from-ds Rx mcast from wds peers, constraint not met; so, drop 1312 * 1 0 1 01 0x 0 AP configured to accept from-ds Rx mcast from wds peers, constraint not met; so, drop 1313 * 1 0 1 10 0x 1 AP configured to accept from-ds Rx mcast from wds peers, constraint met; so, accept 1314 * 1 0 1 00 0x 0 bad frame, won't see it 1315 * 1316 * 0 x x 11 xx 0 we only accept td-ds Rx frames from non-wds peers in mode. 1317 * 0 x x 01 xx 1 1318 * 0 x x 10 xx 0 1319 * 0 x x 00 xx 0 bad frame, won't see it 1320 * ------------------------------------------------ 1321 */ 1322 1323 fr_ds = hal_rx_mpdu_get_fr_ds(rx_tlv_hdr); 1324 to_ds = hal_rx_mpdu_get_to_ds(rx_tlv_hdr); 1325 rx_3addr = fr_ds ^ to_ds; 1326 rx_4addr = fr_ds & to_ds; 1327 1328 if (vdev->opmode == wlan_op_mode_ap) { 1329 if ((!peer->wds_enabled && rx_3addr && to_ds) || 1330 (peer->wds_enabled && !rx_mcast && (rx_4addr == rx_policy_ucast)) || 1331 (peer->wds_enabled && rx_mcast && (rx_4addr == rx_policy_mcast))) { 1332 return 1; 1333 } 1334 } else { /* sta mode */ 1335 if ((!rx_mcast && (rx_4addr == rx_policy_ucast)) || 1336 (rx_mcast && (rx_4addr == rx_policy_mcast))) { 1337 return 1; 1338 } 1339 } 1340 return 0; 1341 } 1342 #else 1343 int dp_wds_rx_policy_check( 1344 uint8_t *rx_tlv_hdr, 1345 struct dp_vdev *vdev, 1346 struct dp_peer *peer, 1347 int rx_mcast 1348 ) 1349 { 1350 return 1; 1351 } 1352 #endif 1353 1354 /** 1355 * dp_rx_process() - Brain of the Rx processing functionality 1356 * Called from the bottom half (tasklet/NET_RX_SOFTIRQ) 1357 * @soc: core txrx main context 1358 * @hal_ring: opaque pointer to the HAL Rx Ring, which will be serviced 1359 * @reo_ring_num: ring number (0, 1, 2 or 3) of the reo ring. 1360 * @quota: No. of units (packets) that can be serviced in one shot. 1361 * 1362 * This function implements the core of Rx functionality. This is 1363 * expected to handle only non-error frames. 1364 * 1365 * Return: uint32_t: No. of elements processed 1366 */ 1367 uint32_t dp_rx_process(struct dp_intr *int_ctx, void *hal_ring, 1368 uint8_t reo_ring_num, uint32_t quota) 1369 { 1370 void *hal_soc; 1371 void *ring_desc; 1372 struct dp_rx_desc *rx_desc = NULL; 1373 qdf_nbuf_t nbuf, next; 1374 union dp_rx_desc_list_elem_t *head[MAX_PDEV_CNT] = { NULL }; 1375 union dp_rx_desc_list_elem_t *tail[MAX_PDEV_CNT] = { NULL }; 1376 uint32_t rx_bufs_used = 0, rx_buf_cookie; 1377 uint32_t l2_hdr_offset = 0; 1378 uint16_t msdu_len = 0; 1379 uint16_t peer_id; 1380 struct dp_peer *peer = NULL; 1381 struct dp_vdev *vdev = NULL; 1382 uint32_t pkt_len = 0; 1383 struct hal_rx_mpdu_desc_info mpdu_desc_info = { 0 }; 1384 struct hal_rx_msdu_desc_info msdu_desc_info = { 0 }; 1385 enum hal_reo_error_status error; 1386 uint32_t peer_mdata; 1387 uint8_t *rx_tlv_hdr; 1388 uint32_t rx_bufs_reaped[MAX_PDEV_CNT] = { 0 }; 1389 uint8_t mac_id = 0; 1390 struct dp_pdev *pdev; 1391 struct dp_srng *dp_rxdma_srng; 1392 struct rx_desc_pool *rx_desc_pool; 1393 struct dp_soc *soc = int_ctx->soc; 1394 uint8_t ring_id = 0; 1395 uint8_t core_id = 0; 1396 qdf_nbuf_t nbuf_head = NULL; 1397 qdf_nbuf_t nbuf_tail = NULL; 1398 qdf_nbuf_t deliver_list_head = NULL; 1399 qdf_nbuf_t deliver_list_tail = NULL; 1400 1401 DP_HIST_INIT(); 1402 /* Debug -- Remove later */ 1403 qdf_assert(soc && hal_ring); 1404 1405 hal_soc = soc->hal_soc; 1406 1407 /* Debug -- Remove later */ 1408 qdf_assert(hal_soc); 1409 1410 hif_pm_runtime_mark_last_busy(soc->osdev->dev); 1411 1412 if (qdf_unlikely(hal_srng_access_start(hal_soc, hal_ring))) { 1413 1414 /* 1415 * Need API to convert from hal_ring pointer to 1416 * Ring Type / Ring Id combo 1417 */ 1418 DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1); 1419 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1420 FL("HAL RING Access Failed -- %pK"), hal_ring); 1421 hal_srng_access_end(hal_soc, hal_ring); 1422 goto done; 1423 } 1424 1425 /* 1426 * start reaping the buffers from reo ring and queue 1427 * them in per vdev queue. 1428 * Process the received pkts in a different per vdev loop. 1429 */ 1430 while (qdf_likely(quota)) { 1431 ring_desc = hal_srng_dst_get_next(hal_soc, hal_ring); 1432 1433 /* 1434 * in case HW has updated hp after we cached the hp 1435 * ring_desc can be NULL even there are entries 1436 * available in the ring. Update the cached_hp 1437 * and reap the buffers available to read complete 1438 * mpdu in one reap 1439 * 1440 * This is needed for RAW mode we have to read all 1441 * msdus corresponding to amsdu in one reap to create 1442 * SG list properly but due to mismatch in cached_hp 1443 * and actual hp sometimes we are unable to read 1444 * complete mpdu in one reap. 1445 */ 1446 if (qdf_unlikely(!ring_desc)) { 1447 hal_srng_access_start_unlocked(hal_soc, hal_ring); 1448 ring_desc = hal_srng_dst_get_next(hal_soc, hal_ring); 1449 if (!ring_desc) 1450 break; 1451 DP_STATS_INC(soc, rx.hp_oos, 1); 1452 /* 1453 * update TP here in case loop takes long, 1454 * then the ring is easily full. 1455 */ 1456 hal_srng_access_end_unlocked(hal_soc, hal_ring); 1457 } 1458 1459 error = HAL_RX_ERROR_STATUS_GET(ring_desc); 1460 ring_id = hal_srng_ring_id_get(hal_ring); 1461 1462 if (qdf_unlikely(error == HAL_REO_ERROR_DETECTED)) { 1463 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1464 FL("HAL RING 0x%pK:error %d"), hal_ring, error); 1465 DP_STATS_INC(soc, rx.err.hal_reo_error[ring_id], 1); 1466 /* Don't know how to deal with this -- assert */ 1467 qdf_assert(0); 1468 } 1469 1470 rx_buf_cookie = HAL_RX_REO_BUF_COOKIE_GET(ring_desc); 1471 1472 rx_desc = dp_rx_cookie_2_va_rxdma_buf(soc, rx_buf_cookie); 1473 qdf_assert(rx_desc); 1474 1475 /* 1476 * this is a unlikely scenario where the host is reaping 1477 * a descriptor which it already reaped just a while ago 1478 * but is yet to replenish it back to HW. 1479 * In this case host will dump the last 128 descriptors 1480 * including the software descriptor rx_desc and assert. 1481 */ 1482 if (qdf_unlikely(!rx_desc->in_use)) { 1483 DP_STATS_INC(soc, rx.err.hal_reo_dest_dup, 1); 1484 dp_rx_dump_info_and_assert(soc, hal_ring, 1485 ring_desc, rx_desc); 1486 } 1487 1488 rx_bufs_reaped[rx_desc->pool_id]++; 1489 1490 /* TODO */ 1491 /* 1492 * Need a separate API for unmapping based on 1493 * phyiscal address 1494 */ 1495 qdf_nbuf_unmap_single(soc->osdev, rx_desc->nbuf, 1496 QDF_DMA_BIDIRECTIONAL); 1497 1498 core_id = smp_processor_id(); 1499 DP_STATS_INC(soc, rx.ring_packets[core_id][ring_id], 1); 1500 1501 /* Get MPDU DESC info */ 1502 hal_rx_mpdu_desc_info_get(ring_desc, &mpdu_desc_info); 1503 1504 hal_rx_mpdu_peer_meta_data_set(qdf_nbuf_data(rx_desc->nbuf), 1505 mpdu_desc_info.peer_meta_data); 1506 1507 /* Get MSDU DESC info */ 1508 hal_rx_msdu_desc_info_get(ring_desc, &msdu_desc_info); 1509 1510 /* 1511 * save msdu flags first, last and continuation msdu in 1512 * nbuf->cb 1513 */ 1514 if (msdu_desc_info.msdu_flags & HAL_MSDU_F_FIRST_MSDU_IN_MPDU) 1515 qdf_nbuf_set_rx_chfrag_start(rx_desc->nbuf, 1); 1516 1517 if (msdu_desc_info.msdu_flags & HAL_MSDU_F_MSDU_CONTINUATION) 1518 qdf_nbuf_set_rx_chfrag_cont(rx_desc->nbuf, 1); 1519 1520 if (msdu_desc_info.msdu_flags & HAL_MSDU_F_LAST_MSDU_IN_MPDU) 1521 qdf_nbuf_set_rx_chfrag_end(rx_desc->nbuf, 1); 1522 1523 QDF_NBUF_CB_RX_CTX_ID(rx_desc->nbuf) = reo_ring_num; 1524 DP_RX_LIST_APPEND(nbuf_head, nbuf_tail, rx_desc->nbuf); 1525 1526 /* 1527 * if continuation bit is set then we have MSDU spread 1528 * across multiple buffers, let us not decrement quota 1529 * till we reap all buffers of that MSDU. 1530 */ 1531 if (qdf_likely(!qdf_nbuf_is_rx_chfrag_cont(rx_desc->nbuf))) 1532 quota -= 1; 1533 1534 1535 dp_rx_add_to_free_desc_list(&head[rx_desc->pool_id], 1536 &tail[rx_desc->pool_id], 1537 rx_desc); 1538 } 1539 done: 1540 hal_srng_access_end(hal_soc, hal_ring); 1541 1542 if (nbuf_tail) 1543 QDF_NBUF_CB_RX_FLUSH_IND(nbuf_tail) = 1; 1544 1545 /* Update histogram statistics by looping through pdev's */ 1546 DP_RX_HIST_STATS_PER_PDEV(); 1547 1548 for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) { 1549 /* 1550 * continue with next mac_id if no pkts were reaped 1551 * from that pool 1552 */ 1553 if (!rx_bufs_reaped[mac_id]) 1554 continue; 1555 1556 pdev = soc->pdev_list[mac_id]; 1557 dp_rxdma_srng = &pdev->rx_refill_buf_ring; 1558 rx_desc_pool = &soc->rx_desc_buf[mac_id]; 1559 1560 dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng, 1561 rx_desc_pool, rx_bufs_reaped[mac_id], 1562 &head[mac_id], &tail[mac_id]); 1563 } 1564 1565 /* Peer can be NULL is case of LFR */ 1566 if (qdf_likely(peer != NULL)) 1567 vdev = NULL; 1568 1569 /* 1570 * BIG loop where each nbuf is dequeued from global queue, 1571 * processed and queued back on a per vdev basis. These nbufs 1572 * are sent to stack as and when we run out of nbufs 1573 * or a new nbuf dequeued from global queue has a different 1574 * vdev when compared to previous nbuf. 1575 */ 1576 nbuf = nbuf_head; 1577 while (nbuf) { 1578 next = nbuf->next; 1579 rx_tlv_hdr = qdf_nbuf_data(nbuf); 1580 1581 /* 1582 * Check if DMA completed -- msdu_done is the last bit 1583 * to be written 1584 */ 1585 if (qdf_unlikely(!hal_rx_attn_msdu_done_get(rx_tlv_hdr))) { 1586 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1587 FL("MSDU DONE failure")); 1588 hal_rx_dump_pkt_tlvs(hal_soc, rx_tlv_hdr, 1589 QDF_TRACE_LEVEL_INFO); 1590 qdf_assert(0); 1591 } 1592 1593 peer_mdata = hal_rx_mpdu_peer_meta_data_get(rx_tlv_hdr); 1594 peer_id = DP_PEER_METADATA_PEER_ID_GET(peer_mdata); 1595 peer = dp_peer_find_by_id(soc, peer_id); 1596 1597 if (peer) { 1598 QDF_NBUF_CB_DP_TRACE_PRINT(nbuf) = false; 1599 qdf_dp_trace_set_track(nbuf, QDF_RX); 1600 QDF_NBUF_CB_RX_DP_TRACE(nbuf) = 1; 1601 QDF_NBUF_CB_RX_PACKET_TRACK(nbuf) = 1602 QDF_NBUF_RX_PKT_DATA_TRACK; 1603 } 1604 1605 rx_bufs_used++; 1606 1607 if (deliver_list_head && peer && (vdev != peer->vdev)) { 1608 dp_rx_deliver_to_stack(vdev, peer, deliver_list_head, 1609 deliver_list_tail); 1610 deliver_list_head = NULL; 1611 deliver_list_tail = NULL; 1612 } 1613 1614 if (qdf_likely(peer != NULL)) { 1615 vdev = peer->vdev; 1616 } else { 1617 DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1, 1618 qdf_nbuf_len(nbuf)); 1619 qdf_nbuf_free(nbuf); 1620 nbuf = next; 1621 continue; 1622 } 1623 1624 if (qdf_unlikely(vdev == NULL)) { 1625 qdf_nbuf_free(nbuf); 1626 nbuf = next; 1627 DP_STATS_INC(soc, rx.err.invalid_vdev, 1); 1628 dp_peer_unref_del_find_by_id(peer); 1629 continue; 1630 } 1631 1632 DP_HIST_PACKET_COUNT_INC(vdev->pdev->pdev_id); 1633 /* 1634 * First IF condition: 1635 * 802.11 Fragmented pkts are reinjected to REO 1636 * HW block as SG pkts and for these pkts we only 1637 * need to pull the RX TLVS header length. 1638 * Second IF condition: 1639 * The below condition happens when an MSDU is spread 1640 * across multiple buffers. This can happen in two cases 1641 * 1. The nbuf size is smaller then the received msdu. 1642 * ex: we have set the nbuf size to 2048 during 1643 * nbuf_alloc. but we received an msdu which is 1644 * 2304 bytes in size then this msdu is spread 1645 * across 2 nbufs. 1646 * 1647 * 2. AMSDUs when RAW mode is enabled. 1648 * ex: 1st MSDU is in 1st nbuf and 2nd MSDU is spread 1649 * across 1st nbuf and 2nd nbuf and last MSDU is 1650 * spread across 2nd nbuf and 3rd nbuf. 1651 * 1652 * for these scenarios let us create a skb frag_list and 1653 * append these buffers till the last MSDU of the AMSDU 1654 * Third condition: 1655 * This is the most likely case, we receive 802.3 pkts 1656 * decapsulated by HW, here we need to set the pkt length. 1657 */ 1658 if (qdf_unlikely(qdf_nbuf_get_ext_list(nbuf))) 1659 qdf_nbuf_pull_head(nbuf, RX_PKT_TLVS_LEN); 1660 else if (qdf_unlikely(vdev->rx_decap_type == 1661 htt_cmn_pkt_type_raw)) { 1662 msdu_len = hal_rx_msdu_start_msdu_len_get(rx_tlv_hdr); 1663 nbuf = dp_rx_sg_create(nbuf, rx_tlv_hdr); 1664 1665 DP_STATS_INC(vdev->pdev, rx_raw_pkts, 1); 1666 DP_STATS_INC_PKT(peer, rx.raw, 1, 1667 msdu_len); 1668 1669 next = nbuf->next; 1670 } else { 1671 l2_hdr_offset = 1672 hal_rx_msdu_end_l3_hdr_padding_get(rx_tlv_hdr); 1673 1674 msdu_len = hal_rx_msdu_start_msdu_len_get(rx_tlv_hdr); 1675 pkt_len = msdu_len + l2_hdr_offset + RX_PKT_TLVS_LEN; 1676 1677 qdf_nbuf_set_pktlen(nbuf, pkt_len); 1678 qdf_nbuf_pull_head(nbuf, 1679 RX_PKT_TLVS_LEN + 1680 l2_hdr_offset); 1681 } 1682 1683 if (!dp_wds_rx_policy_check(rx_tlv_hdr, vdev, peer, 1684 hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr))) { 1685 QDF_TRACE(QDF_MODULE_ID_DP, 1686 QDF_TRACE_LEVEL_ERROR, 1687 FL("Policy Check Drop pkt")); 1688 /* Drop & free packet */ 1689 qdf_nbuf_free(nbuf); 1690 /* Statistics */ 1691 nbuf = next; 1692 dp_peer_unref_del_find_by_id(peer); 1693 continue; 1694 } 1695 1696 if (qdf_unlikely(peer && peer->bss_peer)) { 1697 QDF_TRACE(QDF_MODULE_ID_DP, 1698 QDF_TRACE_LEVEL_ERROR, 1699 FL("received pkt with same src MAC")); 1700 DP_STATS_INC_PKT(peer, rx.mec_drop, 1, msdu_len); 1701 1702 /* Drop & free packet */ 1703 qdf_nbuf_free(nbuf); 1704 /* Statistics */ 1705 nbuf = next; 1706 dp_peer_unref_del_find_by_id(peer); 1707 continue; 1708 } 1709 1710 if (qdf_unlikely(peer && (peer->nawds_enabled == true) && 1711 (hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr)) && 1712 (hal_rx_get_mpdu_mac_ad4_valid(rx_tlv_hdr) == false))) { 1713 DP_STATS_INC(peer, rx.nawds_mcast_drop, 1); 1714 qdf_nbuf_free(nbuf); 1715 nbuf = next; 1716 dp_peer_unref_del_find_by_id(peer); 1717 continue; 1718 } 1719 1720 dp_rx_cksum_offload(vdev->pdev, nbuf, rx_tlv_hdr); 1721 1722 dp_set_rx_queue(nbuf, ring_id); 1723 1724 /* 1725 * HW structures call this L3 header padding -- 1726 * even though this is actually the offset from 1727 * the buffer beginning where the L2 header 1728 * begins. 1729 */ 1730 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 1731 FL("rxhash: flow id toeplitz: 0x%x"), 1732 hal_rx_msdu_start_toeplitz_get(rx_tlv_hdr)); 1733 1734 dp_rx_msdu_stats_update(soc, nbuf, rx_tlv_hdr, peer, ring_id); 1735 1736 if (qdf_unlikely(vdev->mesh_vdev)) { 1737 if (dp_rx_filter_mesh_packets(vdev, nbuf, 1738 rx_tlv_hdr) 1739 == QDF_STATUS_SUCCESS) { 1740 QDF_TRACE(QDF_MODULE_ID_DP, 1741 QDF_TRACE_LEVEL_INFO_MED, 1742 FL("mesh pkt filtered")); 1743 DP_STATS_INC(vdev->pdev, dropped.mesh_filter, 1744 1); 1745 1746 qdf_nbuf_free(nbuf); 1747 nbuf = next; 1748 dp_peer_unref_del_find_by_id(peer); 1749 continue; 1750 } 1751 dp_rx_fill_mesh_stats(vdev, nbuf, rx_tlv_hdr, peer); 1752 } 1753 1754 #ifdef QCA_WIFI_NAPIER_EMULATION_DBG /* Debug code, remove later */ 1755 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1756 "p_id %d msdu_len %d hdr_off %d", 1757 peer_id, msdu_len, l2_hdr_offset); 1758 1759 print_hex_dump(KERN_ERR, 1760 "\t Pkt Data:", DUMP_PREFIX_NONE, 32, 4, 1761 qdf_nbuf_data(nbuf), 128, false); 1762 #endif /* NAPIER_EMULATION */ 1763 1764 if (qdf_likely(vdev->rx_decap_type == 1765 htt_cmn_pkt_type_ethernet) && 1766 qdf_likely(!vdev->mesh_vdev)) { 1767 /* WDS Destination Address Learning */ 1768 if (vdev->da_war_enabled) 1769 dp_rx_da_learn(soc, rx_tlv_hdr, peer, nbuf); 1770 1771 /* WDS Source Port Learning */ 1772 if (vdev->wds_enabled) 1773 dp_rx_wds_srcport_learn(soc, rx_tlv_hdr, 1774 peer, nbuf); 1775 1776 /* Intrabss-fwd */ 1777 if (dp_rx_check_ap_bridge(vdev)) 1778 if (dp_rx_intrabss_fwd(soc, 1779 peer, 1780 rx_tlv_hdr, 1781 nbuf)) { 1782 nbuf = next; 1783 dp_peer_unref_del_find_by_id(peer); 1784 continue; /* Get next desc */ 1785 } 1786 } 1787 1788 dp_rx_fill_gro_info(soc, rx_tlv_hdr, nbuf); 1789 qdf_nbuf_cb_update_peer_local_id(nbuf, peer->local_id); 1790 DP_RX_LIST_APPEND(deliver_list_head, 1791 deliver_list_tail, 1792 nbuf); 1793 DP_STATS_INC_PKT(peer, rx.to_stack, 1, 1794 qdf_nbuf_len(nbuf)); 1795 1796 nbuf = next; 1797 dp_peer_unref_del_find_by_id(peer); 1798 } 1799 1800 if (deliver_list_head) 1801 dp_rx_deliver_to_stack(vdev, peer, deliver_list_head, 1802 deliver_list_tail); 1803 1804 return rx_bufs_used; /* Assume no scale factor for now */ 1805 } 1806 1807 /** 1808 * dp_rx_detach() - detach dp rx 1809 * @pdev: core txrx pdev context 1810 * 1811 * This function will detach DP RX into main device context 1812 * will free DP Rx resources. 1813 * 1814 * Return: void 1815 */ 1816 void 1817 dp_rx_pdev_detach(struct dp_pdev *pdev) 1818 { 1819 uint8_t pdev_id = pdev->pdev_id; 1820 struct dp_soc *soc = pdev->soc; 1821 struct rx_desc_pool *rx_desc_pool; 1822 1823 rx_desc_pool = &soc->rx_desc_buf[pdev_id]; 1824 1825 if (rx_desc_pool->pool_size != 0) { 1826 dp_rx_desc_pool_free(soc, pdev_id, rx_desc_pool); 1827 } 1828 1829 return; 1830 } 1831 1832 /** 1833 * dp_rx_attach() - attach DP RX 1834 * @pdev: core txrx pdev context 1835 * 1836 * This function will attach a DP RX instance into the main 1837 * device (SOC) context. Will allocate dp rx resource and 1838 * initialize resources. 1839 * 1840 * Return: QDF_STATUS_SUCCESS: success 1841 * QDF_STATUS_E_RESOURCES: Error return 1842 */ 1843 QDF_STATUS 1844 dp_rx_pdev_attach(struct dp_pdev *pdev) 1845 { 1846 uint8_t pdev_id = pdev->pdev_id; 1847 struct dp_soc *soc = pdev->soc; 1848 uint32_t rxdma_entries; 1849 union dp_rx_desc_list_elem_t *desc_list = NULL; 1850 union dp_rx_desc_list_elem_t *tail = NULL; 1851 struct dp_srng *dp_rxdma_srng; 1852 struct rx_desc_pool *rx_desc_pool; 1853 1854 if (wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx)) { 1855 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, 1856 "nss-wifi<4> skip Rx refil %d", pdev_id); 1857 return QDF_STATUS_SUCCESS; 1858 } 1859 1860 pdev = soc->pdev_list[pdev_id]; 1861 dp_rxdma_srng = &pdev->rx_refill_buf_ring; 1862 rxdma_entries = dp_rxdma_srng->num_entries; 1863 1864 soc->process_rx_status = CONFIG_PROCESS_RX_STATUS; 1865 1866 rx_desc_pool = &soc->rx_desc_buf[pdev_id]; 1867 dp_rx_desc_pool_alloc(soc, pdev_id, 1868 DP_RX_DESC_ALLOC_MULTIPLIER * rxdma_entries, 1869 rx_desc_pool); 1870 1871 rx_desc_pool->owner = DP_WBM2SW_RBM; 1872 /* For Rx buffers, WBM release ring is SW RING 3,for all pdev's */ 1873 1874 dp_rx_buffers_replenish(soc, pdev_id, dp_rxdma_srng, rx_desc_pool, 1875 0, &desc_list, &tail); 1876 1877 return QDF_STATUS_SUCCESS; 1878 } 1879 1880 /* 1881 * dp_rx_nbuf_prepare() - prepare RX nbuf 1882 * @soc: core txrx main context 1883 * @pdev: core txrx pdev context 1884 * 1885 * This function alloc & map nbuf for RX dma usage, retry it if failed 1886 * until retry times reaches max threshold or succeeded. 1887 * 1888 * Return: qdf_nbuf_t pointer if succeeded, NULL if failed. 1889 */ 1890 qdf_nbuf_t 1891 dp_rx_nbuf_prepare(struct dp_soc *soc, struct dp_pdev *pdev) 1892 { 1893 uint8_t *buf; 1894 int32_t nbuf_retry_count; 1895 QDF_STATUS ret; 1896 qdf_nbuf_t nbuf = NULL; 1897 1898 for (nbuf_retry_count = 0; nbuf_retry_count < 1899 QDF_NBUF_ALLOC_MAP_RETRY_THRESHOLD; 1900 nbuf_retry_count++) { 1901 /* Allocate a new skb */ 1902 nbuf = qdf_nbuf_alloc(soc->osdev, 1903 RX_BUFFER_SIZE, 1904 RX_BUFFER_RESERVATION, 1905 RX_BUFFER_ALIGNMENT, 1906 FALSE); 1907 1908 if (nbuf == NULL) { 1909 DP_STATS_INC(pdev, 1910 replenish.nbuf_alloc_fail, 1); 1911 continue; 1912 } 1913 1914 buf = qdf_nbuf_data(nbuf); 1915 1916 memset(buf, 0, RX_BUFFER_SIZE); 1917 1918 ret = qdf_nbuf_map_single(soc->osdev, nbuf, 1919 QDF_DMA_BIDIRECTIONAL); 1920 1921 /* nbuf map failed */ 1922 if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) { 1923 qdf_nbuf_free(nbuf); 1924 DP_STATS_INC(pdev, replenish.map_err, 1); 1925 continue; 1926 } 1927 /* qdf_nbuf alloc and map succeeded */ 1928 break; 1929 } 1930 1931 /* qdf_nbuf still alloc or map failed */ 1932 if (qdf_unlikely(nbuf_retry_count >= 1933 QDF_NBUF_ALLOC_MAP_RETRY_THRESHOLD)) 1934 return NULL; 1935 1936 return nbuf; 1937 } 1938