1 /* 2 * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for 5 * any purpose with or without fee is hereby granted, provided that the 6 * above copyright notice and this permission notice appear in all 7 * copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 16 * PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 #include "dp_types.h" 20 #include "dp_rx.h" 21 #include "dp_peer.h" 22 #include "hal_rx.h" 23 #include "hal_api.h" 24 #include "qdf_nbuf.h" 25 #ifdef MESH_MODE_SUPPORT 26 #include "if_meta_hdr.h" 27 #endif 28 #include "dp_internal.h" 29 #include "dp_rx_mon.h" 30 #ifdef RX_DESC_DEBUG_CHECK 31 static inline void dp_rx_desc_prep(struct dp_rx_desc *rx_desc, qdf_nbuf_t nbuf) 32 { 33 rx_desc->magic = DP_RX_DESC_MAGIC; 34 rx_desc->nbuf = nbuf; 35 } 36 #else 37 static inline void dp_rx_desc_prep(struct dp_rx_desc *rx_desc, qdf_nbuf_t nbuf) 38 { 39 rx_desc->nbuf = nbuf; 40 } 41 #endif 42 43 #ifdef CONFIG_WIN 44 static inline bool dp_rx_check_ap_bridge(struct dp_vdev *vdev) 45 { 46 return vdev->ap_bridge_enabled; 47 } 48 #else 49 static inline bool dp_rx_check_ap_bridge(struct dp_vdev *vdev) 50 { 51 if (vdev->opmode != wlan_op_mode_sta) 52 return true; 53 else 54 return false; 55 } 56 #endif 57 /* 58 * dp_rx_buffers_replenish() - replenish rxdma ring with rx nbufs 59 * called during dp rx initialization 60 * and at the end of dp_rx_process. 61 * 62 * @soc: core txrx main context 63 * @mac_id: mac_id which is one of 3 mac_ids 64 * @dp_rxdma_srng: dp rxdma circular ring 65 * @rx_desc_pool: Poiter to free Rx descriptor pool 66 * @num_req_buffers: number of buffer to be replenished 67 * @desc_list: list of descs if called from dp_rx_process 68 * or NULL during dp rx initialization or out of buffer 69 * interrupt. 70 * @tail: tail of descs list 71 * Return: return success or failure 72 */ 73 QDF_STATUS dp_rx_buffers_replenish(struct dp_soc *dp_soc, uint32_t mac_id, 74 struct dp_srng *dp_rxdma_srng, 75 struct rx_desc_pool *rx_desc_pool, 76 uint32_t num_req_buffers, 77 union dp_rx_desc_list_elem_t **desc_list, 78 union dp_rx_desc_list_elem_t **tail) 79 { 80 uint32_t num_alloc_desc; 81 uint16_t num_desc_to_free = 0; 82 struct dp_pdev *dp_pdev = dp_get_pdev_for_mac_id(dp_soc, mac_id); 83 uint32_t num_entries_avail; 84 uint32_t count; 85 int sync_hw_ptr = 1; 86 qdf_dma_addr_t paddr; 87 qdf_nbuf_t rx_netbuf; 88 void *rxdma_ring_entry; 89 union dp_rx_desc_list_elem_t *next; 90 QDF_STATUS ret; 91 92 void *rxdma_srng; 93 94 rxdma_srng = dp_rxdma_srng->hal_srng; 95 96 if (!rxdma_srng) { 97 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 98 "rxdma srng not initialized"); 99 DP_STATS_INC(dp_pdev, replenish.rxdma_err, num_req_buffers); 100 return QDF_STATUS_E_FAILURE; 101 } 102 103 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 104 "requested %d buffers for replenish", num_req_buffers); 105 106 hal_srng_access_start(dp_soc->hal_soc, rxdma_srng); 107 num_entries_avail = hal_srng_src_num_avail(dp_soc->hal_soc, 108 rxdma_srng, 109 sync_hw_ptr); 110 111 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 112 "no of availble entries in rxdma ring: %d", 113 num_entries_avail); 114 115 if (!(*desc_list) && (num_entries_avail > 116 ((dp_rxdma_srng->num_entries * 3) / 4))) { 117 num_req_buffers = num_entries_avail; 118 } else if (num_entries_avail < num_req_buffers) { 119 num_desc_to_free = num_req_buffers - num_entries_avail; 120 num_req_buffers = num_entries_avail; 121 } 122 123 if (qdf_unlikely(!num_req_buffers)) { 124 num_desc_to_free = num_req_buffers; 125 hal_srng_access_end(dp_soc->hal_soc, rxdma_srng); 126 goto free_descs; 127 } 128 129 /* 130 * if desc_list is NULL, allocate the descs from freelist 131 */ 132 if (!(*desc_list)) { 133 num_alloc_desc = dp_rx_get_free_desc_list(dp_soc, mac_id, 134 rx_desc_pool, 135 num_req_buffers, 136 desc_list, 137 tail); 138 139 if (!num_alloc_desc) { 140 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 141 "no free rx_descs in freelist"); 142 DP_STATS_INC(dp_pdev, err.desc_alloc_fail, 143 num_req_buffers); 144 hal_srng_access_end(dp_soc->hal_soc, rxdma_srng); 145 return QDF_STATUS_E_NOMEM; 146 } 147 148 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 149 "%d rx desc allocated", num_alloc_desc); 150 num_req_buffers = num_alloc_desc; 151 } 152 153 154 count = 0; 155 156 while (count < num_req_buffers) { 157 rx_netbuf = qdf_nbuf_alloc(dp_soc->osdev, 158 RX_BUFFER_SIZE, 159 RX_BUFFER_RESERVATION, 160 RX_BUFFER_ALIGNMENT, 161 FALSE); 162 163 if (rx_netbuf == NULL) { 164 DP_STATS_INC(dp_pdev, replenish.nbuf_alloc_fail, 1); 165 continue; 166 } 167 168 ret = qdf_nbuf_map_single(dp_soc->osdev, rx_netbuf, 169 QDF_DMA_BIDIRECTIONAL); 170 if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) { 171 qdf_nbuf_free(rx_netbuf); 172 DP_STATS_INC(dp_pdev, replenish.map_err, 1); 173 continue; 174 } 175 176 paddr = qdf_nbuf_get_frag_paddr(rx_netbuf, 0); 177 178 /* 179 * check if the physical address of nbuf->data is 180 * less then 0x50000000 then free the nbuf and try 181 * allocating new nbuf. We can try for 100 times. 182 * this is a temp WAR till we fix it properly. 183 */ 184 ret = check_x86_paddr(dp_soc, &rx_netbuf, &paddr, dp_pdev); 185 if (ret == QDF_STATUS_E_FAILURE) { 186 DP_STATS_INC(dp_pdev, replenish.x86_fail, 1); 187 break; 188 } 189 190 count++; 191 192 rxdma_ring_entry = hal_srng_src_get_next(dp_soc->hal_soc, 193 rxdma_srng); 194 qdf_assert_always(rxdma_ring_entry); 195 196 next = (*desc_list)->next; 197 198 dp_rx_desc_prep(&((*desc_list)->rx_desc), rx_netbuf); 199 (*desc_list)->rx_desc.in_use = 1; 200 201 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 202 "rx_netbuf=%pK, buf=%pK, paddr=0x%llx, cookie=%d", 203 rx_netbuf, qdf_nbuf_data(rx_netbuf), 204 (unsigned long long)paddr, (*desc_list)->rx_desc.cookie); 205 206 hal_rxdma_buff_addr_info_set(rxdma_ring_entry, paddr, 207 (*desc_list)->rx_desc.cookie, 208 rx_desc_pool->owner); 209 210 *desc_list = next; 211 } 212 213 hal_srng_access_end(dp_soc->hal_soc, rxdma_srng); 214 215 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 216 "successfully replenished %d buffers", num_req_buffers); 217 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 218 "%d rx desc added back to free list", num_desc_to_free); 219 220 DP_STATS_INC_PKT(dp_pdev, replenish.pkts, num_req_buffers, 221 (RX_BUFFER_SIZE * num_req_buffers)); 222 223 free_descs: 224 DP_STATS_INC(dp_pdev, buf_freelist, num_desc_to_free); 225 /* 226 * add any available free desc back to the free list 227 */ 228 if (*desc_list) 229 dp_rx_add_desc_list_to_free_list(dp_soc, desc_list, tail, 230 mac_id, rx_desc_pool); 231 232 return QDF_STATUS_SUCCESS; 233 } 234 235 /* 236 * dp_rx_deliver_raw() - process RAW mode pkts and hand over the 237 * pkts to RAW mode simulation to 238 * decapsulate the pkt. 239 * 240 * @vdev: vdev on which RAW mode is enabled 241 * @nbuf_list: list of RAW pkts to process 242 * @peer: peer object from which the pkt is rx 243 * 244 * Return: void 245 */ 246 void 247 dp_rx_deliver_raw(struct dp_vdev *vdev, qdf_nbuf_t nbuf_list, 248 struct dp_peer *peer) 249 { 250 qdf_nbuf_t deliver_list_head = NULL; 251 qdf_nbuf_t deliver_list_tail = NULL; 252 qdf_nbuf_t nbuf; 253 254 nbuf = nbuf_list; 255 while (nbuf) { 256 qdf_nbuf_t next = qdf_nbuf_next(nbuf); 257 258 DP_RX_LIST_APPEND(deliver_list_head, deliver_list_tail, nbuf); 259 260 DP_STATS_INC(vdev->pdev, rx_raw_pkts, 1); 261 DP_STATS_INC_PKT(peer, rx.raw, 1, qdf_nbuf_len(nbuf)); 262 /* 263 * reset the chfrag_start and chfrag_end bits in nbuf cb 264 * as this is a non-amsdu pkt and RAW mode simulation expects 265 * these bit s to be 0 for non-amsdu pkt. 266 */ 267 if (qdf_nbuf_is_rx_chfrag_start(nbuf) && 268 qdf_nbuf_is_rx_chfrag_end(nbuf)) { 269 qdf_nbuf_set_rx_chfrag_start(nbuf, 0); 270 qdf_nbuf_set_rx_chfrag_end(nbuf, 0); 271 } 272 273 nbuf = next; 274 } 275 276 vdev->osif_rsim_rx_decap(vdev->osif_vdev, &deliver_list_head, 277 &deliver_list_tail, (struct cdp_peer*) peer); 278 279 vdev->osif_rx(vdev->osif_vdev, deliver_list_head); 280 } 281 282 283 #ifdef DP_LFR 284 /* 285 * In case of LFR, data of a new peer might be sent up 286 * even before peer is added. 287 */ 288 static inline struct dp_vdev * 289 dp_get_vdev_from_peer(struct dp_soc *soc, 290 uint16_t peer_id, 291 struct dp_peer *peer, 292 struct hal_rx_mpdu_desc_info mpdu_desc_info) 293 { 294 struct dp_vdev *vdev; 295 uint8_t vdev_id; 296 297 if (unlikely(!peer)) { 298 if (peer_id != HTT_INVALID_PEER) { 299 vdev_id = DP_PEER_METADATA_ID_GET( 300 mpdu_desc_info.peer_meta_data); 301 QDF_TRACE(QDF_MODULE_ID_DP, 302 QDF_TRACE_LEVEL_DEBUG, 303 FL("PeerID %d not found use vdevID %d"), 304 peer_id, vdev_id); 305 vdev = dp_get_vdev_from_soc_vdev_id_wifi3(soc, 306 vdev_id); 307 } else { 308 QDF_TRACE(QDF_MODULE_ID_DP, 309 QDF_TRACE_LEVEL_DEBUG, 310 FL("Invalid PeerID %d"), 311 peer_id); 312 return NULL; 313 } 314 } else { 315 vdev = peer->vdev; 316 } 317 return vdev; 318 } 319 #else 320 static inline struct dp_vdev * 321 dp_get_vdev_from_peer(struct dp_soc *soc, 322 uint16_t peer_id, 323 struct dp_peer *peer, 324 struct hal_rx_mpdu_desc_info mpdu_desc_info) 325 { 326 if (unlikely(!peer)) { 327 QDF_TRACE(QDF_MODULE_ID_DP, 328 QDF_TRACE_LEVEL_DEBUG, 329 FL("Peer not found for peerID %d"), 330 peer_id); 331 return NULL; 332 } else { 333 return peer->vdev; 334 } 335 } 336 #endif 337 338 /** 339 * dp_rx_intrabss_fwd() - Implements the Intra-BSS forwarding logic 340 * 341 * @soc: core txrx main context 342 * @sa_peer : source peer entry 343 * @rx_tlv_hdr : start address of rx tlvs 344 * @nbuf : nbuf that has to be intrabss forwarded 345 * 346 * Return: bool: true if it is forwarded else false 347 */ 348 static bool 349 dp_rx_intrabss_fwd(struct dp_soc *soc, 350 struct dp_peer *sa_peer, 351 uint8_t *rx_tlv_hdr, 352 qdf_nbuf_t nbuf) 353 { 354 uint16_t da_idx; 355 uint16_t len; 356 struct dp_peer *da_peer; 357 struct dp_ast_entry *ast_entry; 358 qdf_nbuf_t nbuf_copy; 359 struct dp_vdev *vdev = sa_peer->vdev; 360 361 /* 362 * intrabss forwarding is not applicable if 363 * vap is nawds enabled or ap_bridge is false. 364 */ 365 if (vdev->nawds_enabled) 366 return false; 367 368 369 /* check if the destination peer is available in peer table 370 * and also check if the source peer and destination peer 371 * belong to the same vap and destination peer is not bss peer. 372 */ 373 374 if ((hal_rx_msdu_end_da_is_valid_get(rx_tlv_hdr) && 375 !hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr))) { 376 da_idx = hal_rx_msdu_end_da_idx_get(rx_tlv_hdr); 377 378 ast_entry = soc->ast_table[da_idx]; 379 if (!ast_entry) 380 return false; 381 382 da_peer = ast_entry->peer; 383 384 if (!da_peer) 385 return false; 386 387 if (da_peer->vdev == sa_peer->vdev && !da_peer->bss_peer) { 388 memset(nbuf->cb, 0x0, sizeof(nbuf->cb)); 389 len = qdf_nbuf_len(nbuf); 390 391 /* linearize the nbuf just before we send to 392 * dp_tx_send() 393 */ 394 if (qdf_unlikely(qdf_nbuf_get_ext_list(nbuf))) { 395 if (qdf_nbuf_linearize(nbuf) == -ENOMEM) 396 return false; 397 398 nbuf = qdf_nbuf_unshare(nbuf); 399 if (!nbuf) { 400 DP_STATS_INC_PKT(sa_peer, 401 rx.intra_bss.fail, 402 1, 403 len); 404 /* return true even though the pkt is 405 * not forwarded. Basically skb_unshare 406 * failed and we want to continue with 407 * next nbuf. 408 */ 409 return true; 410 } 411 } 412 413 if (!dp_tx_send(sa_peer->vdev, nbuf)) { 414 DP_STATS_INC_PKT(sa_peer, rx.intra_bss.pkts, 415 1, len); 416 return true; 417 } else { 418 DP_STATS_INC_PKT(sa_peer, rx.intra_bss.fail, 1, 419 len); 420 return false; 421 } 422 } 423 } 424 /* if it is a broadcast pkt (eg: ARP) and it is not its own 425 * source, then clone the pkt and send the cloned pkt for 426 * intra BSS forwarding and original pkt up the network stack 427 * Note: how do we handle multicast pkts. do we forward 428 * all multicast pkts as is or let a higher layer module 429 * like igmpsnoop decide whether to forward or not with 430 * Mcast enhancement. 431 */ 432 else if (qdf_unlikely((hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr) && 433 !sa_peer->bss_peer))) { 434 nbuf_copy = qdf_nbuf_copy(nbuf); 435 if (!nbuf_copy) 436 return false; 437 memset(nbuf_copy->cb, 0x0, sizeof(nbuf_copy->cb)); 438 len = qdf_nbuf_len(nbuf_copy); 439 440 if (dp_tx_send(sa_peer->vdev, nbuf_copy)) { 441 DP_STATS_INC_PKT(sa_peer, rx.intra_bss.fail, 1, len); 442 qdf_nbuf_free(nbuf_copy); 443 } else 444 DP_STATS_INC_PKT(sa_peer, rx.intra_bss.pkts, 1, len); 445 } 446 /* return false as we have to still send the original pkt 447 * up the stack 448 */ 449 return false; 450 } 451 452 #ifdef MESH_MODE_SUPPORT 453 454 /** 455 * dp_rx_fill_mesh_stats() - Fills the mesh per packet receive stats 456 * 457 * @vdev: DP Virtual device handle 458 * @nbuf: Buffer pointer 459 * @rx_tlv_hdr: start of rx tlv header 460 * @peer: pointer to peer 461 * 462 * This function allocated memory for mesh receive stats and fill the 463 * required stats. Stores the memory address in skb cb. 464 * 465 * Return: void 466 */ 467 468 void dp_rx_fill_mesh_stats(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 469 uint8_t *rx_tlv_hdr, struct dp_peer *peer) 470 { 471 struct mesh_recv_hdr_s *rx_info = NULL; 472 uint32_t pkt_type; 473 uint32_t nss; 474 uint32_t rate_mcs; 475 uint32_t bw; 476 477 /* fill recv mesh stats */ 478 rx_info = qdf_mem_malloc(sizeof(struct mesh_recv_hdr_s)); 479 480 /* upper layers are resposible to free this memory */ 481 482 if (rx_info == NULL) { 483 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 484 "Memory allocation failed for mesh rx stats"); 485 DP_STATS_INC(vdev->pdev, mesh_mem_alloc, 1); 486 return; 487 } 488 489 rx_info->rs_flags = MESH_RXHDR_VER1; 490 if (qdf_nbuf_is_rx_chfrag_start(nbuf)) 491 rx_info->rs_flags |= MESH_RX_FIRST_MSDU; 492 493 if (qdf_nbuf_is_rx_chfrag_end(nbuf)) 494 rx_info->rs_flags |= MESH_RX_LAST_MSDU; 495 496 if (hal_rx_attn_msdu_get_is_decrypted(rx_tlv_hdr)) { 497 rx_info->rs_flags |= MESH_RX_DECRYPTED; 498 rx_info->rs_keyix = hal_rx_msdu_get_keyid(rx_tlv_hdr); 499 if (vdev->osif_get_key) 500 vdev->osif_get_key(vdev->osif_vdev, 501 &rx_info->rs_decryptkey[0], 502 &peer->mac_addr.raw[0], 503 rx_info->rs_keyix); 504 } 505 506 rx_info->rs_rssi = hal_rx_msdu_start_get_rssi(rx_tlv_hdr); 507 rx_info->rs_channel = hal_rx_msdu_start_get_freq(rx_tlv_hdr); 508 pkt_type = hal_rx_msdu_start_get_pkt_type(rx_tlv_hdr); 509 rate_mcs = hal_rx_msdu_start_rate_mcs_get(rx_tlv_hdr); 510 bw = hal_rx_msdu_start_bw_get(rx_tlv_hdr); 511 nss = hal_rx_msdu_start_nss_get(rx_tlv_hdr); 512 rx_info->rs_ratephy1 = rate_mcs | (nss << 0x8) | (pkt_type << 16) | 513 (bw << 24); 514 515 qdf_nbuf_set_rx_fctx_type(nbuf, (void *)rx_info, CB_FTYPE_MESH_RX_INFO); 516 517 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_MED, 518 FL("Mesh rx stats: flags %x, rssi %x, chn %x, rate %x, kix %x"), 519 rx_info->rs_flags, 520 rx_info->rs_rssi, 521 rx_info->rs_channel, 522 rx_info->rs_ratephy1, 523 rx_info->rs_keyix); 524 525 } 526 527 /** 528 * dp_rx_filter_mesh_packets() - Filters mesh unwanted packets 529 * 530 * @vdev: DP Virtual device handle 531 * @nbuf: Buffer pointer 532 * @rx_tlv_hdr: start of rx tlv header 533 * 534 * This checks if the received packet is matching any filter out 535 * catogery and and drop the packet if it matches. 536 * 537 * Return: status(0 indicates drop, 1 indicate to no drop) 538 */ 539 540 QDF_STATUS dp_rx_filter_mesh_packets(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 541 uint8_t *rx_tlv_hdr) 542 { 543 union dp_align_mac_addr mac_addr; 544 545 if (qdf_unlikely(vdev->mesh_rx_filter)) { 546 if (vdev->mesh_rx_filter & MESH_FILTER_OUT_FROMDS) 547 if (hal_rx_mpdu_get_fr_ds(rx_tlv_hdr)) 548 return QDF_STATUS_SUCCESS; 549 550 if (vdev->mesh_rx_filter & MESH_FILTER_OUT_TODS) 551 if (hal_rx_mpdu_get_to_ds(rx_tlv_hdr)) 552 return QDF_STATUS_SUCCESS; 553 554 if (vdev->mesh_rx_filter & MESH_FILTER_OUT_NODS) 555 if (!hal_rx_mpdu_get_fr_ds(rx_tlv_hdr) 556 && !hal_rx_mpdu_get_to_ds(rx_tlv_hdr)) 557 return QDF_STATUS_SUCCESS; 558 559 if (vdev->mesh_rx_filter & MESH_FILTER_OUT_RA) { 560 if (hal_rx_mpdu_get_addr1(rx_tlv_hdr, 561 &mac_addr.raw[0])) 562 return QDF_STATUS_E_FAILURE; 563 564 if (!qdf_mem_cmp(&mac_addr.raw[0], 565 &vdev->mac_addr.raw[0], 566 DP_MAC_ADDR_LEN)) 567 return QDF_STATUS_SUCCESS; 568 } 569 570 if (vdev->mesh_rx_filter & MESH_FILTER_OUT_TA) { 571 if (hal_rx_mpdu_get_addr2(rx_tlv_hdr, 572 &mac_addr.raw[0])) 573 return QDF_STATUS_E_FAILURE; 574 575 if (!qdf_mem_cmp(&mac_addr.raw[0], 576 &vdev->mac_addr.raw[0], 577 DP_MAC_ADDR_LEN)) 578 return QDF_STATUS_SUCCESS; 579 } 580 } 581 582 return QDF_STATUS_E_FAILURE; 583 } 584 585 #else 586 void dp_rx_fill_mesh_stats(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 587 uint8_t *rx_tlv_hdr, struct dp_peer *peer) 588 { 589 } 590 591 QDF_STATUS dp_rx_filter_mesh_packets(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 592 uint8_t *rx_tlv_hdr) 593 { 594 return QDF_STATUS_E_FAILURE; 595 } 596 597 #endif 598 599 #ifdef CONFIG_WIN 600 /** 601 * dp_rx_nac_filter(): Function to perform filtering of non-associated 602 * clients 603 * @pdev: DP pdev handle 604 * @rx_pkt_hdr: Rx packet Header 605 * 606 * return: dp_vdev* 607 */ 608 static 609 struct dp_vdev *dp_rx_nac_filter(struct dp_pdev *pdev, 610 uint8_t *rx_pkt_hdr) 611 { 612 struct ieee80211_frame *wh; 613 struct dp_neighbour_peer *peer = NULL; 614 615 wh = (struct ieee80211_frame *)rx_pkt_hdr; 616 617 if ((wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) != IEEE80211_FC1_DIR_TODS) 618 return NULL; 619 620 qdf_spin_lock_bh(&pdev->neighbour_peer_mutex); 621 TAILQ_FOREACH(peer, &pdev->neighbour_peers_list, 622 neighbour_peer_list_elem) { 623 if (qdf_mem_cmp(&peer->neighbour_peers_macaddr.raw[0], 624 wh->i_addr2, DP_MAC_ADDR_LEN) == 0) { 625 QDF_TRACE( 626 QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 627 FL("NAC configuration matched for mac-%2x:%2x:%2x:%2x:%2x:%2x"), 628 peer->neighbour_peers_macaddr.raw[0], 629 peer->neighbour_peers_macaddr.raw[1], 630 peer->neighbour_peers_macaddr.raw[2], 631 peer->neighbour_peers_macaddr.raw[3], 632 peer->neighbour_peers_macaddr.raw[4], 633 peer->neighbour_peers_macaddr.raw[5]); 634 635 qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex); 636 637 return pdev->monitor_vdev; 638 } 639 } 640 qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex); 641 642 return NULL; 643 } 644 645 /** 646 * dp_rx_process_nac_rssi_frames(): Store RSSI for configured NAC 647 * @pdev: DP pdev handle 648 * @rx_tlv_hdr: tlv hdr buf 649 * 650 * return: None 651 */ 652 #ifdef ATH_SUPPORT_NAC_RSSI 653 static void dp_rx_process_nac_rssi_frames(struct dp_pdev *pdev, uint8_t *rx_tlv_hdr) 654 { 655 struct dp_vdev *vdev = NULL; 656 struct dp_soc *soc = pdev->soc; 657 uint8_t *rx_pkt_hdr = hal_rx_pkt_hdr_get(rx_tlv_hdr); 658 struct ieee80211_frame *wh = (struct ieee80211_frame *)rx_pkt_hdr; 659 660 if (pdev->nac_rssi_filtering) { 661 TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) { 662 if (vdev->cdp_nac_rssi_enabled && 663 (qdf_mem_cmp(vdev->cdp_nac_rssi.client_mac, 664 wh->i_addr1, DP_MAC_ADDR_LEN) == 0)) { 665 QDF_TRACE(QDF_MODULE_ID_DP, 666 QDF_TRACE_LEVEL_DEBUG, "RSSI updated"); 667 vdev->cdp_nac_rssi.vdev_id = vdev->vdev_id; 668 vdev->cdp_nac_rssi.client_rssi = 669 hal_rx_msdu_start_get_rssi(rx_tlv_hdr); 670 dp_wdi_event_handler(WDI_EVENT_NAC_RSSI, soc, 671 (void *)&vdev->cdp_nac_rssi, 672 HTT_INVALID_PEER, WDI_NO_VAL, 673 pdev->pdev_id); 674 } 675 } 676 } 677 } 678 #else 679 static void dp_rx_process_nac_rssi_frames(struct dp_pdev *pdev, uint8_t *rx_tlv_hdr) 680 { 681 } 682 #endif 683 684 /** 685 * dp_rx_process_invalid_peer(): Function to pass invalid peer list to umac 686 * @soc: DP SOC handle 687 * @mpdu: mpdu for which peer is invalid 688 * 689 * return: integer type 690 */ 691 uint8_t dp_rx_process_invalid_peer(struct dp_soc *soc, qdf_nbuf_t mpdu) 692 { 693 struct dp_invalid_peer_msg msg; 694 struct dp_vdev *vdev = NULL; 695 struct dp_pdev *pdev = NULL; 696 struct ieee80211_frame *wh; 697 uint8_t i; 698 qdf_nbuf_t curr_nbuf, next_nbuf; 699 uint8_t *rx_tlv_hdr = qdf_nbuf_data(mpdu); 700 uint8_t *rx_pkt_hdr = hal_rx_pkt_hdr_get(rx_tlv_hdr); 701 702 wh = (struct ieee80211_frame *)rx_pkt_hdr; 703 704 if (!DP_FRAME_IS_DATA(wh)) { 705 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 706 "NAWDS valid only for data frames"); 707 goto free; 708 } 709 710 if (qdf_nbuf_len(mpdu) < sizeof(struct ieee80211_frame)) { 711 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 712 "Invalid nbuf length"); 713 goto free; 714 } 715 716 717 for (i = 0; i < MAX_PDEV_CNT; i++) { 718 pdev = soc->pdev_list[i]; 719 if (!pdev) { 720 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 721 "PDEV not found"); 722 continue; 723 } 724 725 if (pdev->filter_neighbour_peers) { 726 /* Next Hop scenario not yet handle */ 727 vdev = dp_rx_nac_filter(pdev, rx_pkt_hdr); 728 if (vdev) { 729 dp_rx_mon_deliver(soc, i, 730 pdev->invalid_peer_head_msdu, 731 pdev->invalid_peer_tail_msdu); 732 733 pdev->invalid_peer_head_msdu = NULL; 734 pdev->invalid_peer_tail_msdu = NULL; 735 736 return 0; 737 } 738 } 739 740 741 dp_rx_process_nac_rssi_frames(pdev, rx_tlv_hdr); 742 743 TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) { 744 745 if (qdf_mem_cmp(wh->i_addr1, vdev->mac_addr.raw, 746 DP_MAC_ADDR_LEN) == 0) { 747 goto out; 748 } 749 } 750 } 751 752 if (!vdev) { 753 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 754 "VDEV not found"); 755 goto free; 756 } 757 758 out: 759 msg.wh = wh; 760 qdf_nbuf_pull_head(mpdu, RX_PKT_TLVS_LEN); 761 msg.nbuf = mpdu; 762 msg.vdev_id = vdev->vdev_id; 763 if (pdev->soc->cdp_soc.ol_ops->rx_invalid_peer) 764 pdev->soc->cdp_soc.ol_ops->rx_invalid_peer(pdev->osif_pdev, &msg); 765 766 free: 767 /* Drop and free packet */ 768 curr_nbuf = mpdu; 769 while (curr_nbuf) { 770 next_nbuf = qdf_nbuf_next(curr_nbuf); 771 qdf_nbuf_free(curr_nbuf); 772 curr_nbuf = next_nbuf; 773 } 774 775 return 0; 776 } 777 778 /** 779 * dp_rx_process_invalid_peer_wrapper(): Function to wrap invalid peer handler 780 * @soc: DP SOC handle 781 * @mpdu: mpdu for which peer is invalid 782 * @mpdu_done: if an mpdu is completed 783 * 784 * return: integer type 785 */ 786 void dp_rx_process_invalid_peer_wrapper(struct dp_soc *soc, 787 qdf_nbuf_t mpdu, bool mpdu_done) 788 { 789 /* Only trigger the process when mpdu is completed */ 790 if (mpdu_done) 791 dp_rx_process_invalid_peer(soc, mpdu); 792 } 793 #else 794 uint8_t dp_rx_process_invalid_peer(struct dp_soc *soc, qdf_nbuf_t mpdu) 795 { 796 qdf_nbuf_t curr_nbuf, next_nbuf; 797 struct dp_pdev *pdev; 798 uint8_t i; 799 800 curr_nbuf = mpdu; 801 while (curr_nbuf) { 802 next_nbuf = qdf_nbuf_next(curr_nbuf); 803 /* Drop and free packet */ 804 DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1, 805 qdf_nbuf_len(curr_nbuf)); 806 qdf_nbuf_free(curr_nbuf); 807 curr_nbuf = next_nbuf; 808 } 809 810 /* reset the head and tail pointers */ 811 for (i = 0; i < MAX_PDEV_CNT; i++) { 812 pdev = soc->pdev_list[i]; 813 if (!pdev) { 814 QDF_TRACE(QDF_MODULE_ID_DP, 815 QDF_TRACE_LEVEL_ERROR, 816 "PDEV not found"); 817 continue; 818 } 819 820 pdev->invalid_peer_head_msdu = NULL; 821 pdev->invalid_peer_tail_msdu = NULL; 822 } 823 return 0; 824 } 825 826 void dp_rx_process_invalid_peer_wrapper(struct dp_soc *soc, 827 qdf_nbuf_t mpdu, bool mpdu_done) 828 { 829 /* To avoid compiler warning */ 830 mpdu_done = mpdu_done; 831 832 /* Process the nbuf */ 833 dp_rx_process_invalid_peer(soc, mpdu); 834 } 835 #endif 836 837 #if defined(FEATURE_LRO) 838 static void dp_rx_print_lro_info(uint8_t *rx_tlv) 839 { 840 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 841 FL("----------------------RX DESC LRO----------------------\n")); 842 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 843 FL("lro_eligible 0x%x"), HAL_RX_TLV_GET_LRO_ELIGIBLE(rx_tlv)); 844 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 845 FL("pure_ack 0x%x"), HAL_RX_TLV_GET_TCP_PURE_ACK(rx_tlv)); 846 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 847 FL("chksum 0x%x"), HAL_RX_TLV_GET_TCP_CHKSUM(rx_tlv)); 848 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 849 FL("TCP seq num 0x%x"), HAL_RX_TLV_GET_TCP_SEQ(rx_tlv)); 850 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 851 FL("TCP ack num 0x%x"), HAL_RX_TLV_GET_TCP_ACK(rx_tlv)); 852 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 853 FL("TCP window 0x%x"), HAL_RX_TLV_GET_TCP_WIN(rx_tlv)); 854 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 855 FL("TCP protocol 0x%x"), HAL_RX_TLV_GET_TCP_PROTO(rx_tlv)); 856 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 857 FL("TCP offset 0x%x"), HAL_RX_TLV_GET_TCP_OFFSET(rx_tlv)); 858 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 859 FL("toeplitz 0x%x"), HAL_RX_TLV_GET_FLOW_ID_TOEPLITZ(rx_tlv)); 860 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 861 FL("---------------------------------------------------------\n")); 862 } 863 864 /** 865 * dp_rx_lro() - LRO related processing 866 * @rx_tlv: TLV data extracted from the rx packet 867 * @peer: destination peer of the msdu 868 * @msdu: network buffer 869 * @ctx: LRO context 870 * 871 * This function performs the LRO related processing of the msdu 872 * 873 * Return: true: LRO enabled false: LRO is not enabled 874 */ 875 static void dp_rx_lro(uint8_t *rx_tlv, struct dp_peer *peer, 876 qdf_nbuf_t msdu, qdf_lro_ctx_t ctx) 877 { 878 if (!peer || !peer->vdev || !peer->vdev->lro_enable) { 879 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG, 880 FL("no peer, no vdev or LRO disabled")); 881 QDF_NBUF_CB_RX_LRO_ELIGIBLE(msdu) = 0; 882 return; 883 } 884 qdf_assert(rx_tlv); 885 dp_rx_print_lro_info(rx_tlv); 886 887 QDF_NBUF_CB_RX_LRO_ELIGIBLE(msdu) = 888 HAL_RX_TLV_GET_LRO_ELIGIBLE(rx_tlv); 889 890 QDF_NBUF_CB_RX_TCP_PURE_ACK(msdu) = 891 HAL_RX_TLV_GET_TCP_PURE_ACK(rx_tlv); 892 893 QDF_NBUF_CB_RX_TCP_CHKSUM(msdu) = 894 HAL_RX_TLV_GET_TCP_CHKSUM(rx_tlv); 895 QDF_NBUF_CB_RX_TCP_SEQ_NUM(msdu) = 896 HAL_RX_TLV_GET_TCP_SEQ(rx_tlv); 897 QDF_NBUF_CB_RX_TCP_ACK_NUM(msdu) = 898 HAL_RX_TLV_GET_TCP_ACK(rx_tlv); 899 QDF_NBUF_CB_RX_TCP_WIN(msdu) = 900 HAL_RX_TLV_GET_TCP_WIN(rx_tlv); 901 QDF_NBUF_CB_RX_TCP_PROTO(msdu) = 902 HAL_RX_TLV_GET_TCP_PROTO(rx_tlv); 903 QDF_NBUF_CB_RX_IPV6_PROTO(msdu) = 904 HAL_RX_TLV_GET_IPV6(rx_tlv); 905 QDF_NBUF_CB_RX_TCP_OFFSET(msdu) = 906 HAL_RX_TLV_GET_TCP_OFFSET(rx_tlv); 907 QDF_NBUF_CB_RX_FLOW_ID(msdu) = 908 HAL_RX_TLV_GET_FLOW_ID_TOEPLITZ(rx_tlv); 909 QDF_NBUF_CB_RX_LRO_CTX(msdu) = (unsigned char *)ctx; 910 911 } 912 #else 913 static void dp_rx_lro(uint8_t *rx_tlv, struct dp_peer *peer, 914 qdf_nbuf_t msdu, qdf_lro_ctx_t ctx) 915 { 916 } 917 #endif 918 919 /** 920 * dp_rx_adjust_nbuf_len() - set appropriate msdu length in nbuf. 921 * 922 * @nbuf: pointer to msdu. 923 * @mpdu_len: mpdu length 924 * 925 * Return: returns true if nbuf is last msdu of mpdu else retuns false. 926 */ 927 static inline bool dp_rx_adjust_nbuf_len(qdf_nbuf_t nbuf, uint16_t *mpdu_len) 928 { 929 bool last_nbuf; 930 931 if (*mpdu_len >= (RX_BUFFER_SIZE - RX_PKT_TLVS_LEN)) { 932 qdf_nbuf_set_pktlen(nbuf, RX_BUFFER_SIZE); 933 last_nbuf = false; 934 } else { 935 qdf_nbuf_set_pktlen(nbuf, (*mpdu_len + RX_PKT_TLVS_LEN)); 936 last_nbuf = true; 937 } 938 939 *mpdu_len -= (RX_BUFFER_SIZE - RX_PKT_TLVS_LEN); 940 941 return last_nbuf; 942 } 943 944 /** 945 * dp_rx_sg_create() - create a frag_list for MSDUs which are spread across 946 * multiple nbufs. 947 * @nbuf: pointer to the first msdu of an amsdu. 948 * @rx_tlv_hdr: pointer to the start of RX TLV headers. 949 * 950 * 951 * This function implements the creation of RX frag_list for cases 952 * where an MSDU is spread across multiple nbufs. 953 * 954 * Return: returns the head nbuf which contains complete frag_list. 955 */ 956 qdf_nbuf_t dp_rx_sg_create(qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr) 957 { 958 qdf_nbuf_t parent, next, frag_list; 959 uint16_t frag_list_len = 0; 960 uint16_t mpdu_len; 961 bool last_nbuf; 962 963 /* 964 * this is a case where the complete msdu fits in one single nbuf. 965 * in this case HW sets both start and end bit and we only need to 966 * reset these bits for RAW mode simulator to decap the pkt 967 */ 968 if (qdf_nbuf_is_rx_chfrag_start(nbuf) && 969 qdf_nbuf_is_rx_chfrag_end(nbuf)) { 970 qdf_nbuf_set_rx_chfrag_start(nbuf, 0); 971 qdf_nbuf_set_rx_chfrag_end(nbuf, 0); 972 return nbuf; 973 } 974 975 /* 976 * This is a case where we have multiple msdus (A-MSDU) spread across 977 * multiple nbufs. here we create a fraglist out of these nbufs. 978 * 979 * the moment we encounter a nbuf with continuation bit set we 980 * know for sure we have an MSDU which is spread across multiple 981 * nbufs. We loop through and reap nbufs till we reach last nbuf. 982 */ 983 parent = nbuf; 984 frag_list = nbuf->next; 985 nbuf = nbuf->next; 986 987 /* 988 * set the start bit in the first nbuf we encounter with continuation 989 * bit set. This has the proper mpdu length set as it is the first 990 * msdu of the mpdu. this becomes the parent nbuf and the subsequent 991 * nbufs will form the frag_list of the parent nbuf. 992 */ 993 qdf_nbuf_set_rx_chfrag_start(parent, 1); 994 mpdu_len = hal_rx_msdu_start_msdu_len_get(rx_tlv_hdr); 995 last_nbuf = dp_rx_adjust_nbuf_len(parent, &mpdu_len); 996 997 /* 998 * this is where we set the length of the fragments which are 999 * associated to the parent nbuf. We iterate through the frag_list 1000 * till we hit the last_nbuf of the list. 1001 */ 1002 do { 1003 last_nbuf = dp_rx_adjust_nbuf_len(nbuf, &mpdu_len); 1004 qdf_nbuf_pull_head(nbuf, RX_PKT_TLVS_LEN); 1005 frag_list_len += qdf_nbuf_len(nbuf); 1006 1007 if (last_nbuf) { 1008 next = nbuf->next; 1009 nbuf->next = NULL; 1010 break; 1011 } 1012 1013 nbuf = nbuf->next; 1014 } while (!last_nbuf); 1015 1016 qdf_nbuf_set_rx_chfrag_start(nbuf, 0); 1017 qdf_nbuf_append_ext_list(parent, frag_list, frag_list_len); 1018 parent->next = next; 1019 1020 qdf_nbuf_pull_head(parent, RX_PKT_TLVS_LEN); 1021 return parent; 1022 } 1023 1024 static inline void dp_rx_deliver_to_stack(struct dp_vdev *vdev, 1025 struct dp_peer *peer, 1026 qdf_nbuf_t nbuf_head, 1027 qdf_nbuf_t nbuf_tail) 1028 { 1029 /* 1030 * highly unlikely to have a vdev without a registerd rx 1031 * callback function. if so let us free the nbuf_list. 1032 */ 1033 if (qdf_unlikely(!vdev->osif_rx)) { 1034 qdf_nbuf_t nbuf; 1035 do { 1036 nbuf = nbuf_head; 1037 nbuf_head = nbuf_head->next; 1038 qdf_nbuf_free(nbuf); 1039 } while (nbuf_head); 1040 1041 return; 1042 } 1043 1044 if (qdf_unlikely(vdev->rx_decap_type == htt_cmn_pkt_type_raw) || 1045 (vdev->rx_decap_type == htt_cmn_pkt_type_native_wifi)) { 1046 vdev->osif_rsim_rx_decap(vdev->osif_vdev, &nbuf_head, 1047 &nbuf_tail, (struct cdp_peer *) peer); 1048 } 1049 1050 vdev->osif_rx(vdev->osif_vdev, nbuf_head); 1051 1052 } 1053 1054 /** 1055 * dp_rx_cksum_offload() - set the nbuf checksum as defined by hardware. 1056 * @nbuf: pointer to the first msdu of an amsdu. 1057 * @rx_tlv_hdr: pointer to the start of RX TLV headers. 1058 * 1059 * The ipsumed field of the skb is set based on whether HW validated the 1060 * IP/TCP/UDP checksum. 1061 * 1062 * Return: void 1063 */ 1064 static inline void dp_rx_cksum_offload(struct dp_pdev *pdev, 1065 qdf_nbuf_t nbuf, 1066 uint8_t *rx_tlv_hdr) 1067 { 1068 qdf_nbuf_rx_cksum_t cksum = {0}; 1069 bool ip_csum_err = hal_rx_attn_ip_cksum_fail_get(rx_tlv_hdr); 1070 bool tcp_udp_csum_er = hal_rx_attn_tcp_udp_cksum_fail_get(rx_tlv_hdr); 1071 1072 if (qdf_likely(!ip_csum_err && !tcp_udp_csum_er)) { 1073 cksum.l4_result = QDF_NBUF_RX_CKSUM_TCP_UDP_UNNECESSARY; 1074 qdf_nbuf_set_rx_cksum(nbuf, &cksum); 1075 } else { 1076 DP_STATS_INCC(pdev, err.ip_csum_err, 1, ip_csum_err); 1077 DP_STATS_INCC(pdev, err.tcp_udp_csum_err, 1, tcp_udp_csum_er); 1078 } 1079 } 1080 1081 /** 1082 * dp_rx_msdu_stats_update() - update per msdu stats. 1083 * @soc: core txrx main context 1084 * @nbuf: pointer to the first msdu of an amsdu. 1085 * @rx_tlv_hdr: pointer to the start of RX TLV headers. 1086 * @peer: pointer to the peer object. 1087 * @ring_id: reo dest ring number on which pkt is reaped. 1088 * 1089 * update all the per msdu stats for that nbuf. 1090 * Return: void 1091 */ 1092 static void dp_rx_msdu_stats_update(struct dp_soc *soc, 1093 qdf_nbuf_t nbuf, 1094 uint8_t *rx_tlv_hdr, 1095 struct dp_peer *peer, 1096 uint8_t ring_id) 1097 { 1098 bool is_ampdu, is_not_amsdu; 1099 uint16_t peer_id; 1100 uint32_t sgi, mcs, tid, nss, bw, reception_type, pkt_type; 1101 struct dp_vdev *vdev = peer->vdev; 1102 struct ether_header *eh; 1103 uint16_t msdu_len = qdf_nbuf_len(nbuf); 1104 1105 peer_id = DP_PEER_METADATA_PEER_ID_GET( 1106 hal_rx_mpdu_peer_meta_data_get(rx_tlv_hdr)); 1107 1108 is_not_amsdu = qdf_nbuf_is_rx_chfrag_start(nbuf) & 1109 qdf_nbuf_is_rx_chfrag_end(nbuf); 1110 1111 DP_STATS_INC_PKT(peer, rx.rcvd_reo[ring_id], 1, msdu_len); 1112 DP_STATS_INCC(peer, rx.non_amsdu_cnt, 1, is_not_amsdu); 1113 DP_STATS_INCC(peer, rx.amsdu_cnt, 1, !is_not_amsdu); 1114 1115 if (qdf_unlikely(hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr) && 1116 (vdev->rx_decap_type == htt_cmn_pkt_type_ethernet))) { 1117 eh = (struct ether_header *)qdf_nbuf_data(nbuf); 1118 if (IEEE80211_IS_BROADCAST(eh->ether_dhost)) { 1119 DP_STATS_INC_PKT(peer, rx.bcast, 1, msdu_len); 1120 } else { 1121 DP_STATS_INC_PKT(peer, rx.multicast, 1, msdu_len); 1122 } 1123 } 1124 1125 /* 1126 * currently we can return from here as we have similar stats 1127 * updated at per ppdu level instead of msdu level 1128 */ 1129 if (!soc->process_rx_status) 1130 return; 1131 1132 is_ampdu = hal_rx_mpdu_info_ampdu_flag_get(rx_tlv_hdr); 1133 DP_STATS_INCC(peer, rx.ampdu_cnt, 1, is_ampdu); 1134 DP_STATS_INCC(peer, rx.non_ampdu_cnt, 1, !(is_ampdu)); 1135 1136 sgi = hal_rx_msdu_start_sgi_get(rx_tlv_hdr); 1137 mcs = hal_rx_msdu_start_rate_mcs_get(rx_tlv_hdr); 1138 tid = hal_rx_mpdu_start_tid_get(rx_tlv_hdr); 1139 bw = hal_rx_msdu_start_bw_get(rx_tlv_hdr); 1140 reception_type = hal_rx_msdu_start_reception_type_get(rx_tlv_hdr); 1141 nss = hal_rx_msdu_start_nss_get(rx_tlv_hdr); 1142 pkt_type = hal_rx_msdu_start_get_pkt_type(rx_tlv_hdr); 1143 1144 /* Save tid to skb->priority */ 1145 DP_RX_TID_SAVE(nbuf, tid); 1146 1147 DP_STATS_INC(vdev->pdev, rx.bw[bw], 1); 1148 DP_STATS_INC(vdev->pdev, rx.reception_type[reception_type], 1); 1149 DP_STATS_INC(peer, rx.nss[nss], 1); 1150 DP_STATS_INC(peer, rx.sgi_count[sgi], 1); 1151 DP_STATS_INCC(peer, rx.err.mic_err, 1, 1152 hal_rx_mpdu_end_mic_err_get(rx_tlv_hdr)); 1153 DP_STATS_INCC(peer, rx.err.decrypt_err, 1, 1154 hal_rx_mpdu_end_decrypt_err_get(rx_tlv_hdr)); 1155 1156 DP_STATS_INC(peer, rx.wme_ac_type[TID_TO_WME_AC(tid)], 1); 1157 DP_STATS_INC(peer, rx.bw[bw], 1); 1158 DP_STATS_INC(peer, rx.reception_type[reception_type], 1); 1159 1160 DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[MAX_MCS], 1, 1161 ((mcs >= MAX_MCS_11A) && (pkt_type == DOT11_A))); 1162 DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[mcs], 1, 1163 ((mcs <= MAX_MCS_11A) && (pkt_type == DOT11_A))); 1164 DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[MAX_MCS], 1, 1165 ((mcs >= MAX_MCS_11B) && (pkt_type == DOT11_B))); 1166 DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[mcs], 1, 1167 ((mcs <= MAX_MCS_11B) && (pkt_type == DOT11_B))); 1168 DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[MAX_MCS], 1, 1169 ((mcs >= MAX_MCS_11A) && (pkt_type == DOT11_N))); 1170 DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[mcs], 1, 1171 ((mcs <= MAX_MCS_11A) && (pkt_type == DOT11_N))); 1172 DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[MAX_MCS], 1, 1173 ((mcs >= MAX_MCS_11AC) && (pkt_type == DOT11_AC))); 1174 DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[mcs], 1, 1175 ((mcs <= MAX_MCS_11AC) && (pkt_type == DOT11_AC))); 1176 DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[MAX_MCS], 1, 1177 ((mcs >= MAX_MCS) && (pkt_type == DOT11_AX))); 1178 DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[mcs], 1, 1179 ((mcs <= MAX_MCS) && (pkt_type == DOT11_AX))); 1180 1181 if ((soc->process_rx_status) && 1182 hal_rx_attn_first_mpdu_get(rx_tlv_hdr)) { 1183 if (soc->cdp_soc.ol_ops->update_dp_stats) { 1184 soc->cdp_soc.ol_ops->update_dp_stats( 1185 vdev->pdev->osif_pdev, 1186 &peer->stats, 1187 peer_id, 1188 UPDATE_PEER_STATS); 1189 } 1190 } 1191 } 1192 1193 #ifdef WDS_VENDOR_EXTENSION 1194 int dp_wds_rx_policy_check( 1195 uint8_t *rx_tlv_hdr, 1196 struct dp_vdev *vdev, 1197 struct dp_peer *peer, 1198 int rx_mcast 1199 ) 1200 { 1201 struct dp_peer *bss_peer; 1202 int fr_ds, to_ds, rx_3addr, rx_4addr; 1203 int rx_policy_ucast, rx_policy_mcast; 1204 1205 if (vdev->opmode == wlan_op_mode_ap) { 1206 TAILQ_FOREACH(bss_peer, &vdev->peer_list, peer_list_elem) { 1207 if (bss_peer->bss_peer) { 1208 /* if wds policy check is not enabled on this vdev, accept all frames */ 1209 if (!bss_peer->wds_ecm.wds_rx_filter) { 1210 return 1; 1211 } 1212 break; 1213 } 1214 } 1215 rx_policy_ucast = bss_peer->wds_ecm.wds_rx_ucast_4addr; 1216 rx_policy_mcast = bss_peer->wds_ecm.wds_rx_mcast_4addr; 1217 } else { /* sta mode */ 1218 if (!peer->wds_ecm.wds_rx_filter) { 1219 return 1; 1220 } 1221 rx_policy_ucast = peer->wds_ecm.wds_rx_ucast_4addr; 1222 rx_policy_mcast = peer->wds_ecm.wds_rx_mcast_4addr; 1223 } 1224 1225 /* ------------------------------------------------ 1226 * self 1227 * peer- rx rx- 1228 * wds ucast mcast dir policy accept note 1229 * ------------------------------------------------ 1230 * 1 1 0 11 x1 1 AP configured to accept ds-to-ds Rx ucast from wds peers, constraint met; so, accept 1231 * 1 1 0 01 x1 0 AP configured to accept ds-to-ds Rx ucast from wds peers, constraint not met; so, drop 1232 * 1 1 0 10 x1 0 AP configured to accept ds-to-ds Rx ucast from wds peers, constraint not met; so, drop 1233 * 1 1 0 00 x1 0 bad frame, won't see it 1234 * 1 0 1 11 1x 1 AP configured to accept ds-to-ds Rx mcast from wds peers, constraint met; so, accept 1235 * 1 0 1 01 1x 0 AP configured to accept ds-to-ds Rx mcast from wds peers, constraint not met; so, drop 1236 * 1 0 1 10 1x 0 AP configured to accept ds-to-ds Rx mcast from wds peers, constraint not met; so, drop 1237 * 1 0 1 00 1x 0 bad frame, won't see it 1238 * 1 1 0 11 x0 0 AP configured to accept from-ds Rx ucast from wds peers, constraint not met; so, drop 1239 * 1 1 0 01 x0 0 AP configured to accept from-ds Rx ucast from wds peers, constraint not met; so, drop 1240 * 1 1 0 10 x0 1 AP configured to accept from-ds Rx ucast from wds peers, constraint met; so, accept 1241 * 1 1 0 00 x0 0 bad frame, won't see it 1242 * 1 0 1 11 0x 0 AP configured to accept from-ds Rx mcast from wds peers, constraint not met; so, drop 1243 * 1 0 1 01 0x 0 AP configured to accept from-ds Rx mcast from wds peers, constraint not met; so, drop 1244 * 1 0 1 10 0x 1 AP configured to accept from-ds Rx mcast from wds peers, constraint met; so, accept 1245 * 1 0 1 00 0x 0 bad frame, won't see it 1246 * 1247 * 0 x x 11 xx 0 we only accept td-ds Rx frames from non-wds peers in mode. 1248 * 0 x x 01 xx 1 1249 * 0 x x 10 xx 0 1250 * 0 x x 00 xx 0 bad frame, won't see it 1251 * ------------------------------------------------ 1252 */ 1253 1254 fr_ds = hal_rx_mpdu_get_fr_ds(rx_tlv_hdr); 1255 to_ds = hal_rx_mpdu_get_to_ds(rx_tlv_hdr); 1256 rx_3addr = fr_ds ^ to_ds; 1257 rx_4addr = fr_ds & to_ds; 1258 1259 if (vdev->opmode == wlan_op_mode_ap) { 1260 if ((!peer->wds_enabled && rx_3addr && to_ds) || 1261 (peer->wds_enabled && !rx_mcast && (rx_4addr == rx_policy_ucast)) || 1262 (peer->wds_enabled && rx_mcast && (rx_4addr == rx_policy_mcast))) { 1263 return 1; 1264 } 1265 } else { /* sta mode */ 1266 if ((!rx_mcast && (rx_4addr == rx_policy_ucast)) || 1267 (rx_mcast && (rx_4addr == rx_policy_mcast))) { 1268 return 1; 1269 } 1270 } 1271 return 0; 1272 } 1273 #else 1274 int dp_wds_rx_policy_check( 1275 uint8_t *rx_tlv_hdr, 1276 struct dp_vdev *vdev, 1277 struct dp_peer *peer, 1278 int rx_mcast 1279 ) 1280 { 1281 return 1; 1282 } 1283 #endif 1284 1285 /** 1286 * dp_rx_process() - Brain of the Rx processing functionality 1287 * Called from the bottom half (tasklet/NET_RX_SOFTIRQ) 1288 * @soc: core txrx main context 1289 * @hal_ring: opaque pointer to the HAL Rx Ring, which will be serviced 1290 * @quota: No. of units (packets) that can be serviced in one shot. 1291 * 1292 * This function implements the core of Rx functionality. This is 1293 * expected to handle only non-error frames. 1294 * 1295 * Return: uint32_t: No. of elements processed 1296 */ 1297 uint32_t 1298 dp_rx_process(struct dp_intr *int_ctx, void *hal_ring, uint32_t quota) 1299 { 1300 void *hal_soc; 1301 void *ring_desc; 1302 struct dp_rx_desc *rx_desc = NULL; 1303 qdf_nbuf_t nbuf, next; 1304 union dp_rx_desc_list_elem_t *head[MAX_PDEV_CNT] = { NULL }; 1305 union dp_rx_desc_list_elem_t *tail[MAX_PDEV_CNT] = { NULL }; 1306 uint32_t rx_bufs_used = 0, rx_buf_cookie; 1307 uint32_t l2_hdr_offset = 0; 1308 uint16_t msdu_len; 1309 uint16_t peer_id; 1310 struct dp_peer *peer = NULL; 1311 struct dp_vdev *vdev = NULL; 1312 uint32_t pkt_len; 1313 struct hal_rx_mpdu_desc_info mpdu_desc_info = { 0 }; 1314 struct hal_rx_msdu_desc_info msdu_desc_info = { 0 }; 1315 enum hal_reo_error_status error; 1316 uint32_t peer_mdata; 1317 uint8_t *rx_tlv_hdr; 1318 uint32_t rx_bufs_reaped[MAX_PDEV_CNT] = { 0 }; 1319 uint8_t mac_id = 0; 1320 struct dp_pdev *pdev; 1321 struct dp_srng *dp_rxdma_srng; 1322 struct rx_desc_pool *rx_desc_pool; 1323 struct dp_soc *soc = int_ctx->soc; 1324 uint8_t ring_id = 0; 1325 uint8_t core_id = 0; 1326 qdf_nbuf_t nbuf_head = NULL; 1327 qdf_nbuf_t nbuf_tail = NULL; 1328 qdf_nbuf_t deliver_list_head = NULL; 1329 qdf_nbuf_t deliver_list_tail = NULL; 1330 1331 DP_HIST_INIT(); 1332 /* Debug -- Remove later */ 1333 qdf_assert(soc && hal_ring); 1334 1335 hal_soc = soc->hal_soc; 1336 1337 /* Debug -- Remove later */ 1338 qdf_assert(hal_soc); 1339 1340 hif_pm_runtime_mark_last_busy(soc->osdev->dev); 1341 1342 if (qdf_unlikely(hal_srng_access_start(hal_soc, hal_ring))) { 1343 1344 /* 1345 * Need API to convert from hal_ring pointer to 1346 * Ring Type / Ring Id combo 1347 */ 1348 DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1); 1349 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1350 FL("HAL RING Access Failed -- %pK"), hal_ring); 1351 hal_srng_access_end(hal_soc, hal_ring); 1352 goto done; 1353 } 1354 1355 /* 1356 * start reaping the buffers from reo ring and queue 1357 * them in per vdev queue. 1358 * Process the received pkts in a different per vdev loop. 1359 */ 1360 while (qdf_likely(quota && (ring_desc = 1361 hal_srng_dst_get_next(hal_soc, hal_ring)))) { 1362 1363 error = HAL_RX_ERROR_STATUS_GET(ring_desc); 1364 ring_id = hal_srng_ring_id_get(hal_ring); 1365 1366 if (qdf_unlikely(error == HAL_REO_ERROR_DETECTED)) { 1367 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1368 FL("HAL RING 0x%pK:error %d"), hal_ring, error); 1369 DP_STATS_INC(soc, rx.err.hal_reo_error[ring_id], 1); 1370 /* Don't know how to deal with this -- assert */ 1371 qdf_assert(0); 1372 } 1373 1374 rx_buf_cookie = HAL_RX_REO_BUF_COOKIE_GET(ring_desc); 1375 1376 rx_desc = dp_rx_cookie_2_va_rxdma_buf(soc, rx_buf_cookie); 1377 1378 1379 qdf_assert(rx_desc); 1380 rx_bufs_reaped[rx_desc->pool_id]++; 1381 1382 /* TODO */ 1383 /* 1384 * Need a separate API for unmapping based on 1385 * phyiscal address 1386 */ 1387 qdf_nbuf_unmap_single(soc->osdev, rx_desc->nbuf, 1388 QDF_DMA_BIDIRECTIONAL); 1389 1390 core_id = smp_processor_id(); 1391 DP_STATS_INC(soc, rx.ring_packets[core_id][ring_id], 1); 1392 1393 /* Get MPDU DESC info */ 1394 hal_rx_mpdu_desc_info_get(ring_desc, &mpdu_desc_info); 1395 1396 hal_rx_mpdu_peer_meta_data_set(qdf_nbuf_data(rx_desc->nbuf), 1397 mpdu_desc_info.peer_meta_data); 1398 1399 /* Get MSDU DESC info */ 1400 hal_rx_msdu_desc_info_get(ring_desc, &msdu_desc_info); 1401 1402 /* 1403 * save msdu flags first, last and continuation msdu in 1404 * nbuf->cb 1405 */ 1406 if (msdu_desc_info.msdu_flags & HAL_MSDU_F_FIRST_MSDU_IN_MPDU) 1407 qdf_nbuf_set_rx_chfrag_start(rx_desc->nbuf, 1); 1408 1409 if (msdu_desc_info.msdu_flags & HAL_MSDU_F_MSDU_CONTINUATION) 1410 qdf_nbuf_set_rx_chfrag_cont(rx_desc->nbuf, 1); 1411 1412 if (msdu_desc_info.msdu_flags & HAL_MSDU_F_LAST_MSDU_IN_MPDU) 1413 qdf_nbuf_set_rx_chfrag_end(rx_desc->nbuf, 1); 1414 1415 DP_RX_LIST_APPEND(nbuf_head, nbuf_tail, rx_desc->nbuf); 1416 1417 /* 1418 * if continuation bit is set then we have MSDU spread 1419 * across multiple buffers, let us not decrement quota 1420 * till we reap all buffers of that MSDU. 1421 */ 1422 if (qdf_likely(!qdf_nbuf_is_rx_chfrag_cont(rx_desc->nbuf))) 1423 quota -= 1; 1424 1425 1426 dp_rx_add_to_free_desc_list(&head[rx_desc->pool_id], 1427 &tail[rx_desc->pool_id], 1428 rx_desc); 1429 } 1430 done: 1431 hal_srng_access_end(hal_soc, hal_ring); 1432 1433 /* Update histogram statistics by looping through pdev's */ 1434 DP_RX_HIST_STATS_PER_PDEV(); 1435 1436 for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) { 1437 /* 1438 * continue with next mac_id if no pkts were reaped 1439 * from that pool 1440 */ 1441 if (!rx_bufs_reaped[mac_id]) 1442 continue; 1443 1444 pdev = soc->pdev_list[mac_id]; 1445 dp_rxdma_srng = &pdev->rx_refill_buf_ring; 1446 rx_desc_pool = &soc->rx_desc_buf[mac_id]; 1447 1448 dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng, 1449 rx_desc_pool, rx_bufs_reaped[mac_id], 1450 &head[mac_id], &tail[mac_id]); 1451 } 1452 1453 /* Peer can be NULL is case of LFR */ 1454 if (qdf_likely(peer != NULL)) 1455 vdev = NULL; 1456 1457 /* 1458 * BIG loop where each nbuf is dequeued from global queue, 1459 * processed and queued back on a per vdev basis. These nbufs 1460 * are sent to stack as and when we run out of nbufs 1461 * or a new nbuf dequeued from global queue has a different 1462 * vdev when compared to previous nbuf. 1463 */ 1464 nbuf = nbuf_head; 1465 while (nbuf) { 1466 next = nbuf->next; 1467 rx_tlv_hdr = qdf_nbuf_data(nbuf); 1468 1469 /* 1470 * Check if DMA completed -- msdu_done is the last bit 1471 * to be written 1472 */ 1473 if (qdf_unlikely(!hal_rx_attn_msdu_done_get(rx_tlv_hdr))) { 1474 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1475 FL("MSDU DONE failure")); 1476 hal_rx_dump_pkt_tlvs(rx_tlv_hdr, QDF_TRACE_LEVEL_INFO); 1477 qdf_assert(0); 1478 } 1479 1480 peer_mdata = hal_rx_mpdu_peer_meta_data_get(rx_tlv_hdr); 1481 peer_id = DP_PEER_METADATA_PEER_ID_GET(peer_mdata); 1482 peer = dp_peer_find_by_id(soc, peer_id); 1483 1484 rx_bufs_used++; 1485 1486 if (deliver_list_head && peer && (vdev != peer->vdev)) { 1487 dp_rx_deliver_to_stack(vdev, peer, deliver_list_head, 1488 deliver_list_tail); 1489 deliver_list_head = NULL; 1490 deliver_list_tail = NULL; 1491 } 1492 1493 if (qdf_likely(peer != NULL)) { 1494 vdev = peer->vdev; 1495 } else { 1496 qdf_nbuf_free(nbuf); 1497 nbuf = next; 1498 continue; 1499 } 1500 1501 if (qdf_unlikely(vdev == NULL)) { 1502 qdf_nbuf_free(nbuf); 1503 nbuf = next; 1504 DP_STATS_INC(soc, rx.err.invalid_vdev, 1); 1505 continue; 1506 } 1507 1508 DP_HIST_PACKET_COUNT_INC(vdev->pdev->pdev_id); 1509 /* 1510 * The below condition happens when an MSDU is spread 1511 * across multiple buffers. This can happen in two cases 1512 * 1. The nbuf size is smaller then the received msdu. 1513 * ex: we have set the nbuf size to 2048 during 1514 * nbuf_alloc. but we received an msdu which is 1515 * 2304 bytes in size then this msdu is spread 1516 * across 2 nbufs. 1517 * 1518 * 2. AMSDUs when RAW mode is enabled. 1519 * ex: 1st MSDU is in 1st nbuf and 2nd MSDU is spread 1520 * across 1st nbuf and 2nd nbuf and last MSDU is 1521 * spread across 2nd nbuf and 3rd nbuf. 1522 * 1523 * for these scenarios let us create a skb frag_list and 1524 * append these buffers till the last MSDU of the AMSDU 1525 */ 1526 if (qdf_unlikely(vdev->rx_decap_type == 1527 htt_cmn_pkt_type_raw)) { 1528 1529 DP_STATS_INC(vdev->pdev, rx_raw_pkts, 1); 1530 DP_STATS_INC_PKT(peer, rx.raw, 1, qdf_nbuf_len(nbuf)); 1531 1532 nbuf = dp_rx_sg_create(nbuf, rx_tlv_hdr); 1533 next = nbuf->next; 1534 } 1535 1536 if (!dp_wds_rx_policy_check(rx_tlv_hdr, vdev, peer, 1537 hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr))) { 1538 QDF_TRACE(QDF_MODULE_ID_DP, 1539 QDF_TRACE_LEVEL_ERROR, 1540 FL("Policy Check Drop pkt")); 1541 /* Drop & free packet */ 1542 qdf_nbuf_free(nbuf); 1543 /* Statistics */ 1544 nbuf = next; 1545 continue; 1546 } 1547 1548 if (qdf_unlikely(peer && peer->bss_peer)) { 1549 QDF_TRACE(QDF_MODULE_ID_DP, 1550 QDF_TRACE_LEVEL_ERROR, 1551 FL("received pkt with same src MAC")); 1552 DP_STATS_INC(vdev->pdev, dropped.mec, 1); 1553 1554 /* Drop & free packet */ 1555 qdf_nbuf_free(nbuf); 1556 /* Statistics */ 1557 nbuf = next; 1558 continue; 1559 } 1560 1561 if (qdf_unlikely(peer && (peer->nawds_enabled == true) && 1562 (hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr)) && 1563 (hal_rx_get_mpdu_mac_ad4_valid(rx_tlv_hdr) == false))) { 1564 DP_STATS_INC(peer, rx.nawds_mcast_drop, 1); 1565 qdf_nbuf_free(nbuf); 1566 nbuf = next; 1567 continue; 1568 } 1569 1570 dp_rx_cksum_offload(vdev->pdev, nbuf, rx_tlv_hdr); 1571 1572 dp_set_rx_queue(nbuf, ring_id); 1573 1574 /* 1575 * HW structures call this L3 header padding -- 1576 * even though this is actually the offset from 1577 * the buffer beginning where the L2 header 1578 * begins. 1579 */ 1580 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 1581 FL("rxhash: flow id toeplitz: 0x%x\n"), 1582 hal_rx_msdu_start_toeplitz_get(rx_tlv_hdr)); 1583 1584 /*L2 header offset will not be set in raw mode*/ 1585 if (qdf_likely(vdev->rx_decap_type != 1586 htt_cmn_pkt_type_raw)) { 1587 l2_hdr_offset = 1588 hal_rx_msdu_end_l3_hdr_padding_get(rx_tlv_hdr); 1589 } 1590 1591 msdu_len = hal_rx_msdu_start_msdu_len_get(rx_tlv_hdr); 1592 pkt_len = msdu_len + l2_hdr_offset + RX_PKT_TLVS_LEN; 1593 1594 if (unlikely(qdf_nbuf_get_ext_list(nbuf))) 1595 qdf_nbuf_pull_head(nbuf, RX_PKT_TLVS_LEN); 1596 else { 1597 qdf_nbuf_set_pktlen(nbuf, pkt_len); 1598 qdf_nbuf_pull_head(nbuf, 1599 RX_PKT_TLVS_LEN + 1600 l2_hdr_offset); 1601 } 1602 1603 dp_rx_msdu_stats_update(soc, nbuf, rx_tlv_hdr, peer, ring_id); 1604 1605 if (qdf_unlikely(vdev->mesh_vdev)) { 1606 if (dp_rx_filter_mesh_packets(vdev, nbuf, 1607 rx_tlv_hdr) 1608 == QDF_STATUS_SUCCESS) { 1609 QDF_TRACE(QDF_MODULE_ID_DP, 1610 QDF_TRACE_LEVEL_INFO_MED, 1611 FL("mesh pkt filtered")); 1612 DP_STATS_INC(vdev->pdev, dropped.mesh_filter, 1613 1); 1614 1615 qdf_nbuf_free(nbuf); 1616 nbuf = next; 1617 continue; 1618 } 1619 dp_rx_fill_mesh_stats(vdev, nbuf, rx_tlv_hdr, peer); 1620 } 1621 1622 #ifdef QCA_WIFI_NAPIER_EMULATION_DBG /* Debug code, remove later */ 1623 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1624 "p_id %d msdu_len %d hdr_off %d", 1625 peer_id, msdu_len, l2_hdr_offset); 1626 1627 print_hex_dump(KERN_ERR, 1628 "\t Pkt Data:", DUMP_PREFIX_NONE, 32, 4, 1629 qdf_nbuf_data(nbuf), 128, false); 1630 #endif /* NAPIER_EMULATION */ 1631 1632 if (qdf_likely(vdev->rx_decap_type == 1633 htt_cmn_pkt_type_ethernet) && 1634 (qdf_likely(!vdev->mesh_vdev))) { 1635 /* WDS Source Port Learning */ 1636 dp_rx_wds_srcport_learn(soc, 1637 rx_tlv_hdr, 1638 peer, 1639 nbuf); 1640 1641 /* Intrabss-fwd */ 1642 if (dp_rx_check_ap_bridge(vdev)) 1643 if (dp_rx_intrabss_fwd(soc, 1644 peer, 1645 rx_tlv_hdr, 1646 nbuf)) { 1647 nbuf = next; 1648 continue; /* Get next desc */ 1649 } 1650 } 1651 1652 dp_rx_lro(rx_tlv_hdr, peer, nbuf, int_ctx->lro_ctx); 1653 1654 DP_RX_LIST_APPEND(deliver_list_head, 1655 deliver_list_tail, 1656 nbuf); 1657 1658 DP_STATS_INC_PKT(peer, rx.to_stack, 1, 1659 qdf_nbuf_len(nbuf)); 1660 1661 nbuf = next; 1662 } 1663 1664 if (deliver_list_head) 1665 dp_rx_deliver_to_stack(vdev, peer, deliver_list_head, 1666 deliver_list_tail); 1667 1668 return rx_bufs_used; /* Assume no scale factor for now */ 1669 } 1670 1671 /** 1672 * dp_rx_detach() - detach dp rx 1673 * @pdev: core txrx pdev context 1674 * 1675 * This function will detach DP RX into main device context 1676 * will free DP Rx resources. 1677 * 1678 * Return: void 1679 */ 1680 void 1681 dp_rx_pdev_detach(struct dp_pdev *pdev) 1682 { 1683 uint8_t pdev_id = pdev->pdev_id; 1684 struct dp_soc *soc = pdev->soc; 1685 struct rx_desc_pool *rx_desc_pool; 1686 1687 rx_desc_pool = &soc->rx_desc_buf[pdev_id]; 1688 1689 if (rx_desc_pool->pool_size != 0) { 1690 dp_rx_desc_pool_free(soc, pdev_id, rx_desc_pool); 1691 } 1692 1693 return; 1694 } 1695 1696 /** 1697 * dp_rx_attach() - attach DP RX 1698 * @pdev: core txrx pdev context 1699 * 1700 * This function will attach a DP RX instance into the main 1701 * device (SOC) context. Will allocate dp rx resource and 1702 * initialize resources. 1703 * 1704 * Return: QDF_STATUS_SUCCESS: success 1705 * QDF_STATUS_E_RESOURCES: Error return 1706 */ 1707 QDF_STATUS 1708 dp_rx_pdev_attach(struct dp_pdev *pdev) 1709 { 1710 uint8_t pdev_id = pdev->pdev_id; 1711 struct dp_soc *soc = pdev->soc; 1712 struct dp_srng rxdma_srng; 1713 uint32_t rxdma_entries; 1714 union dp_rx_desc_list_elem_t *desc_list = NULL; 1715 union dp_rx_desc_list_elem_t *tail = NULL; 1716 struct dp_srng *dp_rxdma_srng; 1717 struct rx_desc_pool *rx_desc_pool; 1718 1719 if (wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx)) { 1720 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1721 "nss-wifi<4> skip Rx refil %d", pdev_id); 1722 return QDF_STATUS_SUCCESS; 1723 } 1724 1725 pdev = soc->pdev_list[pdev_id]; 1726 rxdma_srng = pdev->rx_refill_buf_ring; 1727 soc->process_rx_status = CONFIG_PROCESS_RX_STATUS; 1728 rxdma_entries = rxdma_srng.alloc_size/hal_srng_get_entrysize( 1729 soc->hal_soc, RXDMA_BUF); 1730 1731 rx_desc_pool = &soc->rx_desc_buf[pdev_id]; 1732 1733 dp_rx_desc_pool_alloc(soc, pdev_id, rxdma_entries*3, rx_desc_pool); 1734 1735 rx_desc_pool->owner = DP_WBM2SW_RBM; 1736 /* For Rx buffers, WBM release ring is SW RING 3,for all pdev's */ 1737 dp_rxdma_srng = &pdev->rx_refill_buf_ring; 1738 dp_rx_buffers_replenish(soc, pdev_id, dp_rxdma_srng, rx_desc_pool, 1739 0, &desc_list, &tail); 1740 1741 return QDF_STATUS_SUCCESS; 1742 } 1743 1744 /* 1745 * dp_rx_nbuf_prepare() - prepare RX nbuf 1746 * @soc: core txrx main context 1747 * @pdev: core txrx pdev context 1748 * 1749 * This function alloc & map nbuf for RX dma usage, retry it if failed 1750 * until retry times reaches max threshold or succeeded. 1751 * 1752 * Return: qdf_nbuf_t pointer if succeeded, NULL if failed. 1753 */ 1754 qdf_nbuf_t 1755 dp_rx_nbuf_prepare(struct dp_soc *soc, struct dp_pdev *pdev) 1756 { 1757 uint8_t *buf; 1758 int32_t nbuf_retry_count; 1759 QDF_STATUS ret; 1760 qdf_nbuf_t nbuf = NULL; 1761 1762 for (nbuf_retry_count = 0; nbuf_retry_count < 1763 QDF_NBUF_ALLOC_MAP_RETRY_THRESHOLD; 1764 nbuf_retry_count++) { 1765 /* Allocate a new skb */ 1766 nbuf = qdf_nbuf_alloc(soc->osdev, 1767 RX_BUFFER_SIZE, 1768 RX_BUFFER_RESERVATION, 1769 RX_BUFFER_ALIGNMENT, 1770 FALSE); 1771 1772 if (nbuf == NULL) { 1773 DP_STATS_INC(pdev, 1774 replenish.nbuf_alloc_fail, 1); 1775 continue; 1776 } 1777 1778 buf = qdf_nbuf_data(nbuf); 1779 1780 memset(buf, 0, RX_BUFFER_SIZE); 1781 1782 ret = qdf_nbuf_map_single(soc->osdev, nbuf, 1783 QDF_DMA_BIDIRECTIONAL); 1784 1785 /* nbuf map failed */ 1786 if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) { 1787 qdf_nbuf_free(nbuf); 1788 DP_STATS_INC(pdev, replenish.map_err, 1); 1789 continue; 1790 } 1791 /* qdf_nbuf alloc and map succeeded */ 1792 break; 1793 } 1794 1795 /* qdf_nbuf still alloc or map failed */ 1796 if (qdf_unlikely(nbuf_retry_count >= 1797 QDF_NBUF_ALLOC_MAP_RETRY_THRESHOLD)) 1798 return NULL; 1799 1800 return nbuf; 1801 } 1802