1 /* 2 * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for 5 * any purpose with or without fee is hereby granted, provided that the 6 * above copyright notice and this permission notice appear in all 7 * copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 16 * PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 #include "hal_hw_headers.h" 20 #include "dp_types.h" 21 #include "dp_rx.h" 22 #include "dp_peer.h" 23 #include "hal_rx.h" 24 #include "hal_api.h" 25 #include "qdf_nbuf.h" 26 #ifdef MESH_MODE_SUPPORT 27 #include "if_meta_hdr.h" 28 #endif 29 #include "dp_internal.h" 30 #include "dp_rx_mon.h" 31 #ifdef RX_DESC_DEBUG_CHECK 32 static inline void dp_rx_desc_prep(struct dp_rx_desc *rx_desc, qdf_nbuf_t nbuf) 33 { 34 rx_desc->magic = DP_RX_DESC_MAGIC; 35 rx_desc->nbuf = nbuf; 36 } 37 #else 38 static inline void dp_rx_desc_prep(struct dp_rx_desc *rx_desc, qdf_nbuf_t nbuf) 39 { 40 rx_desc->nbuf = nbuf; 41 } 42 #endif 43 44 #ifdef CONFIG_WIN 45 static inline bool dp_rx_check_ap_bridge(struct dp_vdev *vdev) 46 { 47 return vdev->ap_bridge_enabled; 48 } 49 #else 50 static inline bool dp_rx_check_ap_bridge(struct dp_vdev *vdev) 51 { 52 if (vdev->opmode != wlan_op_mode_sta) 53 return true; 54 else 55 return false; 56 } 57 #endif 58 /* 59 * dp_rx_buffers_replenish() - replenish rxdma ring with rx nbufs 60 * called during dp rx initialization 61 * and at the end of dp_rx_process. 62 * 63 * @soc: core txrx main context 64 * @mac_id: mac_id which is one of 3 mac_ids 65 * @dp_rxdma_srng: dp rxdma circular ring 66 * @rx_desc_pool: Pointer to free Rx descriptor pool 67 * @num_req_buffers: number of buffer to be replenished 68 * @desc_list: list of descs if called from dp_rx_process 69 * or NULL during dp rx initialization or out of buffer 70 * interrupt. 71 * @tail: tail of descs list 72 * Return: return success or failure 73 */ 74 QDF_STATUS dp_rx_buffers_replenish(struct dp_soc *dp_soc, uint32_t mac_id, 75 struct dp_srng *dp_rxdma_srng, 76 struct rx_desc_pool *rx_desc_pool, 77 uint32_t num_req_buffers, 78 union dp_rx_desc_list_elem_t **desc_list, 79 union dp_rx_desc_list_elem_t **tail) 80 { 81 uint32_t num_alloc_desc; 82 uint16_t num_desc_to_free = 0; 83 struct dp_pdev *dp_pdev = dp_get_pdev_for_mac_id(dp_soc, mac_id); 84 uint32_t num_entries_avail; 85 uint32_t count; 86 int sync_hw_ptr = 1; 87 qdf_dma_addr_t paddr; 88 qdf_nbuf_t rx_netbuf; 89 void *rxdma_ring_entry; 90 union dp_rx_desc_list_elem_t *next; 91 QDF_STATUS ret; 92 93 void *rxdma_srng; 94 95 rxdma_srng = dp_rxdma_srng->hal_srng; 96 97 if (!rxdma_srng) { 98 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 99 "rxdma srng not initialized"); 100 DP_STATS_INC(dp_pdev, replenish.rxdma_err, num_req_buffers); 101 return QDF_STATUS_E_FAILURE; 102 } 103 104 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 105 "requested %d buffers for replenish", num_req_buffers); 106 107 hal_srng_access_start(dp_soc->hal_soc, rxdma_srng); 108 num_entries_avail = hal_srng_src_num_avail(dp_soc->hal_soc, 109 rxdma_srng, 110 sync_hw_ptr); 111 112 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 113 "no of available entries in rxdma ring: %d", 114 num_entries_avail); 115 116 if (!(*desc_list) && (num_entries_avail > 117 ((dp_rxdma_srng->num_entries * 3) / 4))) { 118 num_req_buffers = num_entries_avail; 119 } else if (num_entries_avail < num_req_buffers) { 120 num_desc_to_free = num_req_buffers - num_entries_avail; 121 num_req_buffers = num_entries_avail; 122 } 123 124 if (qdf_unlikely(!num_req_buffers)) { 125 num_desc_to_free = num_req_buffers; 126 hal_srng_access_end(dp_soc->hal_soc, rxdma_srng); 127 goto free_descs; 128 } 129 130 /* 131 * if desc_list is NULL, allocate the descs from freelist 132 */ 133 if (!(*desc_list)) { 134 num_alloc_desc = dp_rx_get_free_desc_list(dp_soc, mac_id, 135 rx_desc_pool, 136 num_req_buffers, 137 desc_list, 138 tail); 139 140 if (!num_alloc_desc) { 141 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 142 "no free rx_descs in freelist"); 143 DP_STATS_INC(dp_pdev, err.desc_alloc_fail, 144 num_req_buffers); 145 hal_srng_access_end(dp_soc->hal_soc, rxdma_srng); 146 return QDF_STATUS_E_NOMEM; 147 } 148 149 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 150 "%d rx desc allocated", num_alloc_desc); 151 num_req_buffers = num_alloc_desc; 152 } 153 154 155 count = 0; 156 157 while (count < num_req_buffers) { 158 rx_netbuf = qdf_nbuf_alloc(dp_soc->osdev, 159 RX_BUFFER_SIZE, 160 RX_BUFFER_RESERVATION, 161 RX_BUFFER_ALIGNMENT, 162 FALSE); 163 164 if (rx_netbuf == NULL) { 165 DP_STATS_INC(dp_pdev, replenish.nbuf_alloc_fail, 1); 166 continue; 167 } 168 169 ret = qdf_nbuf_map_single(dp_soc->osdev, rx_netbuf, 170 QDF_DMA_BIDIRECTIONAL); 171 if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) { 172 qdf_nbuf_free(rx_netbuf); 173 DP_STATS_INC(dp_pdev, replenish.map_err, 1); 174 continue; 175 } 176 177 paddr = qdf_nbuf_get_frag_paddr(rx_netbuf, 0); 178 179 /* 180 * check if the physical address of nbuf->data is 181 * less then 0x50000000 then free the nbuf and try 182 * allocating new nbuf. We can try for 100 times. 183 * this is a temp WAR till we fix it properly. 184 */ 185 ret = check_x86_paddr(dp_soc, &rx_netbuf, &paddr, dp_pdev); 186 if (ret == QDF_STATUS_E_FAILURE) { 187 DP_STATS_INC(dp_pdev, replenish.x86_fail, 1); 188 break; 189 } 190 191 count++; 192 193 rxdma_ring_entry = hal_srng_src_get_next(dp_soc->hal_soc, 194 rxdma_srng); 195 qdf_assert_always(rxdma_ring_entry); 196 197 next = (*desc_list)->next; 198 199 dp_rx_desc_prep(&((*desc_list)->rx_desc), rx_netbuf); 200 (*desc_list)->rx_desc.in_use = 1; 201 202 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 203 "rx_netbuf=%pK, buf=%pK, paddr=0x%llx, cookie=%d", 204 rx_netbuf, qdf_nbuf_data(rx_netbuf), 205 (unsigned long long)paddr, (*desc_list)->rx_desc.cookie); 206 207 hal_rxdma_buff_addr_info_set(rxdma_ring_entry, paddr, 208 (*desc_list)->rx_desc.cookie, 209 rx_desc_pool->owner); 210 211 *desc_list = next; 212 } 213 214 hal_srng_access_end(dp_soc->hal_soc, rxdma_srng); 215 216 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 217 "successfully replenished %d buffers", num_req_buffers); 218 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 219 "%d rx desc added back to free list", num_desc_to_free); 220 221 DP_STATS_INC_PKT(dp_pdev, replenish.pkts, num_req_buffers, 222 (RX_BUFFER_SIZE * num_req_buffers)); 223 224 free_descs: 225 DP_STATS_INC(dp_pdev, buf_freelist, num_desc_to_free); 226 /* 227 * add any available free desc back to the free list 228 */ 229 if (*desc_list) 230 dp_rx_add_desc_list_to_free_list(dp_soc, desc_list, tail, 231 mac_id, rx_desc_pool); 232 233 return QDF_STATUS_SUCCESS; 234 } 235 236 /* 237 * dp_rx_deliver_raw() - process RAW mode pkts and hand over the 238 * pkts to RAW mode simulation to 239 * decapsulate the pkt. 240 * 241 * @vdev: vdev on which RAW mode is enabled 242 * @nbuf_list: list of RAW pkts to process 243 * @peer: peer object from which the pkt is rx 244 * 245 * Return: void 246 */ 247 void 248 dp_rx_deliver_raw(struct dp_vdev *vdev, qdf_nbuf_t nbuf_list, 249 struct dp_peer *peer) 250 { 251 qdf_nbuf_t deliver_list_head = NULL; 252 qdf_nbuf_t deliver_list_tail = NULL; 253 qdf_nbuf_t nbuf; 254 255 nbuf = nbuf_list; 256 while (nbuf) { 257 qdf_nbuf_t next = qdf_nbuf_next(nbuf); 258 259 DP_RX_LIST_APPEND(deliver_list_head, deliver_list_tail, nbuf); 260 261 DP_STATS_INC(vdev->pdev, rx_raw_pkts, 1); 262 DP_STATS_INC_PKT(peer, rx.raw, 1, qdf_nbuf_len(nbuf)); 263 /* 264 * reset the chfrag_start and chfrag_end bits in nbuf cb 265 * as this is a non-amsdu pkt and RAW mode simulation expects 266 * these bit s to be 0 for non-amsdu pkt. 267 */ 268 if (qdf_nbuf_is_rx_chfrag_start(nbuf) && 269 qdf_nbuf_is_rx_chfrag_end(nbuf)) { 270 qdf_nbuf_set_rx_chfrag_start(nbuf, 0); 271 qdf_nbuf_set_rx_chfrag_end(nbuf, 0); 272 } 273 274 nbuf = next; 275 } 276 277 vdev->osif_rsim_rx_decap(vdev->osif_vdev, &deliver_list_head, 278 &deliver_list_tail, (struct cdp_peer*) peer); 279 280 vdev->osif_rx(vdev->osif_vdev, deliver_list_head); 281 } 282 283 284 #ifdef DP_LFR 285 /* 286 * In case of LFR, data of a new peer might be sent up 287 * even before peer is added. 288 */ 289 static inline struct dp_vdev * 290 dp_get_vdev_from_peer(struct dp_soc *soc, 291 uint16_t peer_id, 292 struct dp_peer *peer, 293 struct hal_rx_mpdu_desc_info mpdu_desc_info) 294 { 295 struct dp_vdev *vdev; 296 uint8_t vdev_id; 297 298 if (unlikely(!peer)) { 299 if (peer_id != HTT_INVALID_PEER) { 300 vdev_id = DP_PEER_METADATA_ID_GET( 301 mpdu_desc_info.peer_meta_data); 302 QDF_TRACE(QDF_MODULE_ID_DP, 303 QDF_TRACE_LEVEL_DEBUG, 304 FL("PeerID %d not found use vdevID %d"), 305 peer_id, vdev_id); 306 vdev = dp_get_vdev_from_soc_vdev_id_wifi3(soc, 307 vdev_id); 308 } else { 309 QDF_TRACE(QDF_MODULE_ID_DP, 310 QDF_TRACE_LEVEL_DEBUG, 311 FL("Invalid PeerID %d"), 312 peer_id); 313 return NULL; 314 } 315 } else { 316 vdev = peer->vdev; 317 } 318 return vdev; 319 } 320 #else 321 static inline struct dp_vdev * 322 dp_get_vdev_from_peer(struct dp_soc *soc, 323 uint16_t peer_id, 324 struct dp_peer *peer, 325 struct hal_rx_mpdu_desc_info mpdu_desc_info) 326 { 327 if (unlikely(!peer)) { 328 QDF_TRACE(QDF_MODULE_ID_DP, 329 QDF_TRACE_LEVEL_DEBUG, 330 FL("Peer not found for peerID %d"), 331 peer_id); 332 return NULL; 333 } else { 334 return peer->vdev; 335 } 336 } 337 #endif 338 339 /** 340 * dp_rx_intrabss_fwd() - Implements the Intra-BSS forwarding logic 341 * 342 * @soc: core txrx main context 343 * @sa_peer : source peer entry 344 * @rx_tlv_hdr : start address of rx tlvs 345 * @nbuf : nbuf that has to be intrabss forwarded 346 * 347 * Return: bool: true if it is forwarded else false 348 */ 349 static bool 350 dp_rx_intrabss_fwd(struct dp_soc *soc, 351 struct dp_peer *sa_peer, 352 uint8_t *rx_tlv_hdr, 353 qdf_nbuf_t nbuf) 354 { 355 uint16_t da_idx; 356 uint16_t len; 357 struct dp_peer *da_peer; 358 struct dp_ast_entry *ast_entry; 359 qdf_nbuf_t nbuf_copy; 360 361 /* check if the destination peer is available in peer table 362 * and also check if the source peer and destination peer 363 * belong to the same vap and destination peer is not bss peer. 364 */ 365 366 if ((hal_rx_msdu_end_da_is_valid_get(rx_tlv_hdr) && 367 !hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr))) { 368 da_idx = hal_rx_msdu_end_da_idx_get(soc->hal_soc, rx_tlv_hdr); 369 370 ast_entry = soc->ast_table[da_idx]; 371 if (!ast_entry) 372 return false; 373 374 da_peer = ast_entry->peer; 375 376 if (!da_peer) 377 return false; 378 379 if (da_peer->vdev == sa_peer->vdev && !da_peer->bss_peer) { 380 memset(nbuf->cb, 0x0, sizeof(nbuf->cb)); 381 len = qdf_nbuf_len(nbuf); 382 383 /* linearize the nbuf just before we send to 384 * dp_tx_send() 385 */ 386 if (qdf_unlikely(qdf_nbuf_get_ext_list(nbuf))) { 387 if (qdf_nbuf_linearize(nbuf) == -ENOMEM) 388 return false; 389 390 nbuf = qdf_nbuf_unshare(nbuf); 391 if (!nbuf) { 392 DP_STATS_INC_PKT(sa_peer, 393 rx.intra_bss.fail, 394 1, 395 len); 396 /* return true even though the pkt is 397 * not forwarded. Basically skb_unshare 398 * failed and we want to continue with 399 * next nbuf. 400 */ 401 return true; 402 } 403 } 404 405 if (!dp_tx_send(sa_peer->vdev, nbuf)) { 406 DP_STATS_INC_PKT(sa_peer, rx.intra_bss.pkts, 407 1, len); 408 return true; 409 } else { 410 DP_STATS_INC_PKT(sa_peer, rx.intra_bss.fail, 1, 411 len); 412 return false; 413 } 414 } 415 } 416 /* if it is a broadcast pkt (eg: ARP) and it is not its own 417 * source, then clone the pkt and send the cloned pkt for 418 * intra BSS forwarding and original pkt up the network stack 419 * Note: how do we handle multicast pkts. do we forward 420 * all multicast pkts as is or let a higher layer module 421 * like igmpsnoop decide whether to forward or not with 422 * Mcast enhancement. 423 */ 424 else if (qdf_unlikely((hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr) && 425 !sa_peer->bss_peer))) { 426 nbuf_copy = qdf_nbuf_copy(nbuf); 427 if (!nbuf_copy) 428 return false; 429 memset(nbuf_copy->cb, 0x0, sizeof(nbuf_copy->cb)); 430 len = qdf_nbuf_len(nbuf_copy); 431 432 if (dp_tx_send(sa_peer->vdev, nbuf_copy)) { 433 DP_STATS_INC_PKT(sa_peer, rx.intra_bss.fail, 1, len); 434 qdf_nbuf_free(nbuf_copy); 435 } else 436 DP_STATS_INC_PKT(sa_peer, rx.intra_bss.pkts, 1, len); 437 } 438 /* return false as we have to still send the original pkt 439 * up the stack 440 */ 441 return false; 442 } 443 444 #ifdef MESH_MODE_SUPPORT 445 446 /** 447 * dp_rx_fill_mesh_stats() - Fills the mesh per packet receive stats 448 * 449 * @vdev: DP Virtual device handle 450 * @nbuf: Buffer pointer 451 * @rx_tlv_hdr: start of rx tlv header 452 * @peer: pointer to peer 453 * 454 * This function allocated memory for mesh receive stats and fill the 455 * required stats. Stores the memory address in skb cb. 456 * 457 * Return: void 458 */ 459 460 void dp_rx_fill_mesh_stats(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 461 uint8_t *rx_tlv_hdr, struct dp_peer *peer) 462 { 463 struct mesh_recv_hdr_s *rx_info = NULL; 464 uint32_t pkt_type; 465 uint32_t nss; 466 uint32_t rate_mcs; 467 uint32_t bw; 468 469 /* fill recv mesh stats */ 470 rx_info = qdf_mem_malloc(sizeof(struct mesh_recv_hdr_s)); 471 472 /* upper layers are resposible to free this memory */ 473 474 if (rx_info == NULL) { 475 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 476 "Memory allocation failed for mesh rx stats"); 477 DP_STATS_INC(vdev->pdev, mesh_mem_alloc, 1); 478 return; 479 } 480 481 rx_info->rs_flags = MESH_RXHDR_VER1; 482 if (qdf_nbuf_is_rx_chfrag_start(nbuf)) 483 rx_info->rs_flags |= MESH_RX_FIRST_MSDU; 484 485 if (qdf_nbuf_is_rx_chfrag_end(nbuf)) 486 rx_info->rs_flags |= MESH_RX_LAST_MSDU; 487 488 if (hal_rx_attn_msdu_get_is_decrypted(rx_tlv_hdr)) { 489 rx_info->rs_flags |= MESH_RX_DECRYPTED; 490 rx_info->rs_keyix = hal_rx_msdu_get_keyid(rx_tlv_hdr); 491 if (vdev->osif_get_key) 492 vdev->osif_get_key(vdev->osif_vdev, 493 &rx_info->rs_decryptkey[0], 494 &peer->mac_addr.raw[0], 495 rx_info->rs_keyix); 496 } 497 498 rx_info->rs_rssi = hal_rx_msdu_start_get_rssi(rx_tlv_hdr); 499 rx_info->rs_channel = hal_rx_msdu_start_get_freq(rx_tlv_hdr); 500 pkt_type = hal_rx_msdu_start_get_pkt_type(rx_tlv_hdr); 501 rate_mcs = hal_rx_msdu_start_rate_mcs_get(rx_tlv_hdr); 502 bw = hal_rx_msdu_start_bw_get(rx_tlv_hdr); 503 nss = hal_rx_msdu_start_nss_get(vdev->pdev->soc->hal_soc, rx_tlv_hdr); 504 rx_info->rs_ratephy1 = rate_mcs | (nss << 0x8) | (pkt_type << 16) | 505 (bw << 24); 506 507 qdf_nbuf_set_rx_fctx_type(nbuf, (void *)rx_info, CB_FTYPE_MESH_RX_INFO); 508 509 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_MED, 510 FL("Mesh rx stats: flags %x, rssi %x, chn %x, rate %x, kix %x"), 511 rx_info->rs_flags, 512 rx_info->rs_rssi, 513 rx_info->rs_channel, 514 rx_info->rs_ratephy1, 515 rx_info->rs_keyix); 516 517 } 518 519 /** 520 * dp_rx_filter_mesh_packets() - Filters mesh unwanted packets 521 * 522 * @vdev: DP Virtual device handle 523 * @nbuf: Buffer pointer 524 * @rx_tlv_hdr: start of rx tlv header 525 * 526 * This checks if the received packet is matching any filter out 527 * catogery and and drop the packet if it matches. 528 * 529 * Return: status(0 indicates drop, 1 indicate to no drop) 530 */ 531 532 QDF_STATUS dp_rx_filter_mesh_packets(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 533 uint8_t *rx_tlv_hdr) 534 { 535 union dp_align_mac_addr mac_addr; 536 537 if (qdf_unlikely(vdev->mesh_rx_filter)) { 538 if (vdev->mesh_rx_filter & MESH_FILTER_OUT_FROMDS) 539 if (hal_rx_mpdu_get_fr_ds(rx_tlv_hdr)) 540 return QDF_STATUS_SUCCESS; 541 542 if (vdev->mesh_rx_filter & MESH_FILTER_OUT_TODS) 543 if (hal_rx_mpdu_get_to_ds(rx_tlv_hdr)) 544 return QDF_STATUS_SUCCESS; 545 546 if (vdev->mesh_rx_filter & MESH_FILTER_OUT_NODS) 547 if (!hal_rx_mpdu_get_fr_ds(rx_tlv_hdr) 548 && !hal_rx_mpdu_get_to_ds(rx_tlv_hdr)) 549 return QDF_STATUS_SUCCESS; 550 551 if (vdev->mesh_rx_filter & MESH_FILTER_OUT_RA) { 552 if (hal_rx_mpdu_get_addr1(rx_tlv_hdr, 553 &mac_addr.raw[0])) 554 return QDF_STATUS_E_FAILURE; 555 556 if (!qdf_mem_cmp(&mac_addr.raw[0], 557 &vdev->mac_addr.raw[0], 558 DP_MAC_ADDR_LEN)) 559 return QDF_STATUS_SUCCESS; 560 } 561 562 if (vdev->mesh_rx_filter & MESH_FILTER_OUT_TA) { 563 if (hal_rx_mpdu_get_addr2(rx_tlv_hdr, 564 &mac_addr.raw[0])) 565 return QDF_STATUS_E_FAILURE; 566 567 if (!qdf_mem_cmp(&mac_addr.raw[0], 568 &vdev->mac_addr.raw[0], 569 DP_MAC_ADDR_LEN)) 570 return QDF_STATUS_SUCCESS; 571 } 572 } 573 574 return QDF_STATUS_E_FAILURE; 575 } 576 577 #else 578 void dp_rx_fill_mesh_stats(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 579 uint8_t *rx_tlv_hdr, struct dp_peer *peer) 580 { 581 } 582 583 QDF_STATUS dp_rx_filter_mesh_packets(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 584 uint8_t *rx_tlv_hdr) 585 { 586 return QDF_STATUS_E_FAILURE; 587 } 588 589 #endif 590 591 #ifdef CONFIG_WIN 592 /** 593 * dp_rx_nac_filter(): Function to perform filtering of non-associated 594 * clients 595 * @pdev: DP pdev handle 596 * @rx_pkt_hdr: Rx packet Header 597 * 598 * return: dp_vdev* 599 */ 600 static 601 struct dp_vdev *dp_rx_nac_filter(struct dp_pdev *pdev, 602 uint8_t *rx_pkt_hdr) 603 { 604 struct ieee80211_frame *wh; 605 struct dp_neighbour_peer *peer = NULL; 606 607 wh = (struct ieee80211_frame *)rx_pkt_hdr; 608 609 if ((wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) != IEEE80211_FC1_DIR_TODS) 610 return NULL; 611 612 qdf_spin_lock_bh(&pdev->neighbour_peer_mutex); 613 TAILQ_FOREACH(peer, &pdev->neighbour_peers_list, 614 neighbour_peer_list_elem) { 615 if (qdf_mem_cmp(&peer->neighbour_peers_macaddr.raw[0], 616 wh->i_addr2, DP_MAC_ADDR_LEN) == 0) { 617 QDF_TRACE( 618 QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 619 FL("NAC configuration matched for mac-%2x:%2x:%2x:%2x:%2x:%2x"), 620 peer->neighbour_peers_macaddr.raw[0], 621 peer->neighbour_peers_macaddr.raw[1], 622 peer->neighbour_peers_macaddr.raw[2], 623 peer->neighbour_peers_macaddr.raw[3], 624 peer->neighbour_peers_macaddr.raw[4], 625 peer->neighbour_peers_macaddr.raw[5]); 626 627 qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex); 628 629 return pdev->monitor_vdev; 630 } 631 } 632 qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex); 633 634 return NULL; 635 } 636 637 /** 638 * dp_rx_process_invalid_peer(): Function to pass invalid peer list to umac 639 * @soc: DP SOC handle 640 * @mpdu: mpdu for which peer is invalid 641 * 642 * return: integer type 643 */ 644 uint8_t dp_rx_process_invalid_peer(struct dp_soc *soc, qdf_nbuf_t mpdu) 645 { 646 struct dp_invalid_peer_msg msg; 647 struct dp_vdev *vdev = NULL; 648 struct dp_pdev *pdev = NULL; 649 struct ieee80211_frame *wh; 650 uint8_t i; 651 qdf_nbuf_t curr_nbuf, next_nbuf; 652 uint8_t *rx_tlv_hdr = qdf_nbuf_data(mpdu); 653 uint8_t *rx_pkt_hdr = hal_rx_pkt_hdr_get(rx_tlv_hdr); 654 655 wh = (struct ieee80211_frame *)rx_pkt_hdr; 656 657 if (!DP_FRAME_IS_DATA(wh)) { 658 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 659 "NAWDS valid only for data frames"); 660 goto free; 661 } 662 663 if (qdf_nbuf_len(mpdu) < sizeof(struct ieee80211_frame)) { 664 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 665 "Invalid nbuf length"); 666 goto free; 667 } 668 669 670 for (i = 0; i < MAX_PDEV_CNT; i++) { 671 pdev = soc->pdev_list[i]; 672 if (!pdev) { 673 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 674 "PDEV not found"); 675 continue; 676 } 677 678 if (pdev->filter_neighbour_peers) { 679 /* Next Hop scenario not yet handle */ 680 vdev = dp_rx_nac_filter(pdev, rx_pkt_hdr); 681 if (vdev) { 682 dp_rx_mon_deliver(soc, i, 683 pdev->invalid_peer_head_msdu, 684 pdev->invalid_peer_tail_msdu); 685 686 pdev->invalid_peer_head_msdu = NULL; 687 pdev->invalid_peer_tail_msdu = NULL; 688 689 return 0; 690 } 691 } 692 693 694 TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) { 695 696 if (qdf_mem_cmp(wh->i_addr1, vdev->mac_addr.raw, 697 DP_MAC_ADDR_LEN) == 0) { 698 goto out; 699 } 700 } 701 } 702 703 if (!vdev) { 704 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 705 "VDEV not found"); 706 goto free; 707 } 708 709 out: 710 msg.wh = wh; 711 qdf_nbuf_pull_head(mpdu, RX_PKT_TLVS_LEN); 712 msg.nbuf = mpdu; 713 msg.vdev_id = vdev->vdev_id; 714 if (pdev->soc->cdp_soc.ol_ops->rx_invalid_peer) 715 pdev->soc->cdp_soc.ol_ops->rx_invalid_peer(pdev->ctrl_pdev, 716 &msg); 717 718 free: 719 /* Drop and free packet */ 720 curr_nbuf = mpdu; 721 while (curr_nbuf) { 722 next_nbuf = qdf_nbuf_next(curr_nbuf); 723 qdf_nbuf_free(curr_nbuf); 724 curr_nbuf = next_nbuf; 725 } 726 727 return 0; 728 } 729 730 /** 731 * dp_rx_process_invalid_peer_wrapper(): Function to wrap invalid peer handler 732 * @soc: DP SOC handle 733 * @mpdu: mpdu for which peer is invalid 734 * @mpdu_done: if an mpdu is completed 735 * 736 * return: integer type 737 */ 738 void dp_rx_process_invalid_peer_wrapper(struct dp_soc *soc, 739 qdf_nbuf_t mpdu, bool mpdu_done) 740 { 741 /* Only trigger the process when mpdu is completed */ 742 if (mpdu_done) 743 dp_rx_process_invalid_peer(soc, mpdu); 744 } 745 #else 746 uint8_t dp_rx_process_invalid_peer(struct dp_soc *soc, qdf_nbuf_t mpdu) 747 { 748 qdf_nbuf_t curr_nbuf, next_nbuf; 749 struct dp_pdev *pdev; 750 uint8_t i; 751 752 curr_nbuf = mpdu; 753 while (curr_nbuf) { 754 next_nbuf = qdf_nbuf_next(curr_nbuf); 755 /* Drop and free packet */ 756 DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1, 757 qdf_nbuf_len(curr_nbuf)); 758 qdf_nbuf_free(curr_nbuf); 759 curr_nbuf = next_nbuf; 760 } 761 762 /* reset the head and tail pointers */ 763 for (i = 0; i < MAX_PDEV_CNT; i++) { 764 pdev = soc->pdev_list[i]; 765 if (!pdev) { 766 QDF_TRACE(QDF_MODULE_ID_DP, 767 QDF_TRACE_LEVEL_ERROR, 768 "PDEV not found"); 769 continue; 770 } 771 772 pdev->invalid_peer_head_msdu = NULL; 773 pdev->invalid_peer_tail_msdu = NULL; 774 } 775 return 0; 776 } 777 778 void dp_rx_process_invalid_peer_wrapper(struct dp_soc *soc, 779 qdf_nbuf_t mpdu, bool mpdu_done) 780 { 781 /* To avoid compiler warning */ 782 mpdu_done = mpdu_done; 783 784 /* Process the nbuf */ 785 dp_rx_process_invalid_peer(soc, mpdu); 786 } 787 #endif 788 789 #if defined(FEATURE_LRO) 790 static void dp_rx_print_lro_info(uint8_t *rx_tlv) 791 { 792 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 793 FL("----------------------RX DESC LRO----------------------")); 794 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 795 FL("lro_eligible 0x%x"), HAL_RX_TLV_GET_LRO_ELIGIBLE(rx_tlv)); 796 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 797 FL("pure_ack 0x%x"), HAL_RX_TLV_GET_TCP_PURE_ACK(rx_tlv)); 798 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 799 FL("chksum 0x%x"), HAL_RX_TLV_GET_TCP_CHKSUM(rx_tlv)); 800 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 801 FL("TCP seq num 0x%x"), HAL_RX_TLV_GET_TCP_SEQ(rx_tlv)); 802 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 803 FL("TCP ack num 0x%x"), HAL_RX_TLV_GET_TCP_ACK(rx_tlv)); 804 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 805 FL("TCP window 0x%x"), HAL_RX_TLV_GET_TCP_WIN(rx_tlv)); 806 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 807 FL("TCP protocol 0x%x"), HAL_RX_TLV_GET_TCP_PROTO(rx_tlv)); 808 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 809 FL("TCP offset 0x%x"), HAL_RX_TLV_GET_TCP_OFFSET(rx_tlv)); 810 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 811 FL("toeplitz 0x%x"), HAL_RX_TLV_GET_FLOW_ID_TOEPLITZ(rx_tlv)); 812 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 813 FL("---------------------------------------------------------")); 814 } 815 816 /** 817 * dp_rx_lro() - LRO related processing 818 * @rx_tlv: TLV data extracted from the rx packet 819 * @peer: destination peer of the msdu 820 * @msdu: network buffer 821 * @ctx: LRO context 822 * 823 * This function performs the LRO related processing of the msdu 824 * 825 * Return: true: LRO enabled false: LRO is not enabled 826 */ 827 static void dp_rx_lro(uint8_t *rx_tlv, struct dp_peer *peer, 828 qdf_nbuf_t msdu, qdf_lro_ctx_t ctx) 829 { 830 if (!peer || !peer->vdev || !peer->vdev->lro_enable) { 831 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG, 832 FL("no peer, no vdev or LRO disabled")); 833 QDF_NBUF_CB_RX_LRO_ELIGIBLE(msdu) = 0; 834 return; 835 } 836 qdf_assert(rx_tlv); 837 dp_rx_print_lro_info(rx_tlv); 838 839 QDF_NBUF_CB_RX_LRO_ELIGIBLE(msdu) = 840 HAL_RX_TLV_GET_LRO_ELIGIBLE(rx_tlv); 841 842 QDF_NBUF_CB_RX_TCP_PURE_ACK(msdu) = 843 HAL_RX_TLV_GET_TCP_PURE_ACK(rx_tlv); 844 845 QDF_NBUF_CB_RX_TCP_CHKSUM(msdu) = 846 HAL_RX_TLV_GET_TCP_CHKSUM(rx_tlv); 847 QDF_NBUF_CB_RX_TCP_SEQ_NUM(msdu) = 848 HAL_RX_TLV_GET_TCP_SEQ(rx_tlv); 849 QDF_NBUF_CB_RX_TCP_ACK_NUM(msdu) = 850 HAL_RX_TLV_GET_TCP_ACK(rx_tlv); 851 QDF_NBUF_CB_RX_TCP_WIN(msdu) = 852 HAL_RX_TLV_GET_TCP_WIN(rx_tlv); 853 QDF_NBUF_CB_RX_TCP_PROTO(msdu) = 854 HAL_RX_TLV_GET_TCP_PROTO(rx_tlv); 855 QDF_NBUF_CB_RX_IPV6_PROTO(msdu) = 856 HAL_RX_TLV_GET_IPV6(rx_tlv); 857 QDF_NBUF_CB_RX_TCP_OFFSET(msdu) = 858 HAL_RX_TLV_GET_TCP_OFFSET(rx_tlv); 859 QDF_NBUF_CB_RX_FLOW_ID(msdu) = 860 HAL_RX_TLV_GET_FLOW_ID_TOEPLITZ(rx_tlv); 861 QDF_NBUF_CB_RX_LRO_CTX(msdu) = (unsigned char *)ctx; 862 863 } 864 #else 865 static void dp_rx_lro(uint8_t *rx_tlv, struct dp_peer *peer, 866 qdf_nbuf_t msdu, qdf_lro_ctx_t ctx) 867 { 868 } 869 #endif 870 871 /** 872 * dp_rx_adjust_nbuf_len() - set appropriate msdu length in nbuf. 873 * 874 * @nbuf: pointer to msdu. 875 * @mpdu_len: mpdu length 876 * 877 * Return: returns true if nbuf is last msdu of mpdu else retuns false. 878 */ 879 static inline bool dp_rx_adjust_nbuf_len(qdf_nbuf_t nbuf, uint16_t *mpdu_len) 880 { 881 bool last_nbuf; 882 883 if (*mpdu_len >= (RX_BUFFER_SIZE - RX_PKT_TLVS_LEN)) { 884 qdf_nbuf_set_pktlen(nbuf, RX_BUFFER_SIZE); 885 last_nbuf = false; 886 } else { 887 qdf_nbuf_set_pktlen(nbuf, (*mpdu_len + RX_PKT_TLVS_LEN)); 888 last_nbuf = true; 889 } 890 891 *mpdu_len -= (RX_BUFFER_SIZE - RX_PKT_TLVS_LEN); 892 893 return last_nbuf; 894 } 895 896 /** 897 * dp_rx_sg_create() - create a frag_list for MSDUs which are spread across 898 * multiple nbufs. 899 * @nbuf: pointer to the first msdu of an amsdu. 900 * @rx_tlv_hdr: pointer to the start of RX TLV headers. 901 * 902 * 903 * This function implements the creation of RX frag_list for cases 904 * where an MSDU is spread across multiple nbufs. 905 * 906 * Return: returns the head nbuf which contains complete frag_list. 907 */ 908 qdf_nbuf_t dp_rx_sg_create(qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr) 909 { 910 qdf_nbuf_t parent, next, frag_list; 911 uint16_t frag_list_len = 0; 912 uint16_t mpdu_len; 913 bool last_nbuf; 914 915 mpdu_len = hal_rx_msdu_start_msdu_len_get(rx_tlv_hdr); 916 /* 917 * this is a case where the complete msdu fits in one single nbuf. 918 * in this case HW sets both start and end bit and we only need to 919 * reset these bits for RAW mode simulator to decap the pkt 920 */ 921 if (qdf_nbuf_is_rx_chfrag_start(nbuf) && 922 qdf_nbuf_is_rx_chfrag_end(nbuf)) { 923 qdf_nbuf_set_pktlen(nbuf, mpdu_len + RX_PKT_TLVS_LEN); 924 qdf_nbuf_pull_head(nbuf, RX_PKT_TLVS_LEN); 925 return nbuf; 926 } 927 928 /* 929 * This is a case where we have multiple msdus (A-MSDU) spread across 930 * multiple nbufs. here we create a fraglist out of these nbufs. 931 * 932 * the moment we encounter a nbuf with continuation bit set we 933 * know for sure we have an MSDU which is spread across multiple 934 * nbufs. We loop through and reap nbufs till we reach last nbuf. 935 */ 936 parent = nbuf; 937 frag_list = nbuf->next; 938 nbuf = nbuf->next; 939 940 /* 941 * set the start bit in the first nbuf we encounter with continuation 942 * bit set. This has the proper mpdu length set as it is the first 943 * msdu of the mpdu. this becomes the parent nbuf and the subsequent 944 * nbufs will form the frag_list of the parent nbuf. 945 */ 946 qdf_nbuf_set_rx_chfrag_start(parent, 1); 947 last_nbuf = dp_rx_adjust_nbuf_len(parent, &mpdu_len); 948 949 /* 950 * this is where we set the length of the fragments which are 951 * associated to the parent nbuf. We iterate through the frag_list 952 * till we hit the last_nbuf of the list. 953 */ 954 do { 955 last_nbuf = dp_rx_adjust_nbuf_len(nbuf, &mpdu_len); 956 qdf_nbuf_pull_head(nbuf, RX_PKT_TLVS_LEN); 957 frag_list_len += qdf_nbuf_len(nbuf); 958 959 if (last_nbuf) { 960 next = nbuf->next; 961 nbuf->next = NULL; 962 break; 963 } 964 965 nbuf = nbuf->next; 966 } while (!last_nbuf); 967 968 qdf_nbuf_set_rx_chfrag_start(nbuf, 0); 969 qdf_nbuf_append_ext_list(parent, frag_list, frag_list_len); 970 parent->next = next; 971 972 qdf_nbuf_pull_head(parent, RX_PKT_TLVS_LEN); 973 return parent; 974 } 975 976 static inline void dp_rx_deliver_to_stack(struct dp_vdev *vdev, 977 struct dp_peer *peer, 978 qdf_nbuf_t nbuf_head, 979 qdf_nbuf_t nbuf_tail) 980 { 981 /* 982 * highly unlikely to have a vdev without a registered rx 983 * callback function. if so let us free the nbuf_list. 984 */ 985 if (qdf_unlikely(!vdev->osif_rx)) { 986 qdf_nbuf_t nbuf; 987 do { 988 nbuf = nbuf_head; 989 nbuf_head = nbuf_head->next; 990 qdf_nbuf_free(nbuf); 991 } while (nbuf_head); 992 993 return; 994 } 995 996 if (qdf_unlikely(vdev->rx_decap_type == htt_cmn_pkt_type_raw) || 997 (vdev->rx_decap_type == htt_cmn_pkt_type_native_wifi)) { 998 vdev->osif_rsim_rx_decap(vdev->osif_vdev, &nbuf_head, 999 &nbuf_tail, (struct cdp_peer *) peer); 1000 } 1001 1002 vdev->osif_rx(vdev->osif_vdev, nbuf_head); 1003 1004 } 1005 1006 /** 1007 * dp_rx_cksum_offload() - set the nbuf checksum as defined by hardware. 1008 * @nbuf: pointer to the first msdu of an amsdu. 1009 * @rx_tlv_hdr: pointer to the start of RX TLV headers. 1010 * 1011 * The ipsumed field of the skb is set based on whether HW validated the 1012 * IP/TCP/UDP checksum. 1013 * 1014 * Return: void 1015 */ 1016 static inline void dp_rx_cksum_offload(struct dp_pdev *pdev, 1017 qdf_nbuf_t nbuf, 1018 uint8_t *rx_tlv_hdr) 1019 { 1020 qdf_nbuf_rx_cksum_t cksum = {0}; 1021 bool ip_csum_err = hal_rx_attn_ip_cksum_fail_get(rx_tlv_hdr); 1022 bool tcp_udp_csum_er = hal_rx_attn_tcp_udp_cksum_fail_get(rx_tlv_hdr); 1023 1024 if (qdf_likely(!ip_csum_err && !tcp_udp_csum_er)) { 1025 cksum.l4_result = QDF_NBUF_RX_CKSUM_TCP_UDP_UNNECESSARY; 1026 qdf_nbuf_set_rx_cksum(nbuf, &cksum); 1027 } else { 1028 DP_STATS_INCC(pdev, err.ip_csum_err, 1, ip_csum_err); 1029 DP_STATS_INCC(pdev, err.tcp_udp_csum_err, 1, tcp_udp_csum_er); 1030 } 1031 } 1032 1033 /** 1034 * dp_rx_msdu_stats_update() - update per msdu stats. 1035 * @soc: core txrx main context 1036 * @nbuf: pointer to the first msdu of an amsdu. 1037 * @rx_tlv_hdr: pointer to the start of RX TLV headers. 1038 * @peer: pointer to the peer object. 1039 * @ring_id: reo dest ring number on which pkt is reaped. 1040 * 1041 * update all the per msdu stats for that nbuf. 1042 * Return: void 1043 */ 1044 static void dp_rx_msdu_stats_update(struct dp_soc *soc, 1045 qdf_nbuf_t nbuf, 1046 uint8_t *rx_tlv_hdr, 1047 struct dp_peer *peer, 1048 uint8_t ring_id) 1049 { 1050 bool is_ampdu, is_not_amsdu; 1051 uint16_t peer_id; 1052 uint32_t sgi, mcs, tid, nss, bw, reception_type, pkt_type; 1053 struct dp_vdev *vdev = peer->vdev; 1054 struct ether_header *eh; 1055 uint16_t msdu_len = qdf_nbuf_len(nbuf); 1056 1057 peer_id = DP_PEER_METADATA_PEER_ID_GET( 1058 hal_rx_mpdu_peer_meta_data_get(rx_tlv_hdr)); 1059 1060 is_not_amsdu = qdf_nbuf_is_rx_chfrag_start(nbuf) & 1061 qdf_nbuf_is_rx_chfrag_end(nbuf); 1062 1063 DP_STATS_INC_PKT(peer, rx.rcvd_reo[ring_id], 1, msdu_len); 1064 DP_STATS_INCC(peer, rx.non_amsdu_cnt, 1, is_not_amsdu); 1065 DP_STATS_INCC(peer, rx.amsdu_cnt, 1, !is_not_amsdu); 1066 1067 if (qdf_unlikely(hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr) && 1068 (vdev->rx_decap_type == htt_cmn_pkt_type_ethernet))) { 1069 eh = (struct ether_header *)qdf_nbuf_data(nbuf); 1070 if (IEEE80211_IS_BROADCAST(eh->ether_dhost)) { 1071 DP_STATS_INC_PKT(peer, rx.bcast, 1, msdu_len); 1072 } else { 1073 DP_STATS_INC_PKT(peer, rx.multicast, 1, msdu_len); 1074 } 1075 } 1076 1077 /* 1078 * currently we can return from here as we have similar stats 1079 * updated at per ppdu level instead of msdu level 1080 */ 1081 if (!soc->process_rx_status) 1082 return; 1083 1084 is_ampdu = hal_rx_mpdu_info_ampdu_flag_get(rx_tlv_hdr); 1085 DP_STATS_INCC(peer, rx.ampdu_cnt, 1, is_ampdu); 1086 DP_STATS_INCC(peer, rx.non_ampdu_cnt, 1, !(is_ampdu)); 1087 1088 sgi = hal_rx_msdu_start_sgi_get(rx_tlv_hdr); 1089 mcs = hal_rx_msdu_start_rate_mcs_get(rx_tlv_hdr); 1090 tid = hal_rx_mpdu_start_tid_get(soc->hal_soc, rx_tlv_hdr); 1091 bw = hal_rx_msdu_start_bw_get(rx_tlv_hdr); 1092 reception_type = hal_rx_msdu_start_reception_type_get(soc->hal_soc, 1093 rx_tlv_hdr); 1094 nss = hal_rx_msdu_start_nss_get(soc->hal_soc, rx_tlv_hdr); 1095 pkt_type = hal_rx_msdu_start_get_pkt_type(rx_tlv_hdr); 1096 1097 /* Save tid to skb->priority */ 1098 DP_RX_TID_SAVE(nbuf, tid); 1099 1100 DP_STATS_INC(peer, rx.bw[bw], 1); 1101 DP_STATS_INC(peer, rx.nss[nss], 1); 1102 DP_STATS_INC(peer, rx.sgi_count[sgi], 1); 1103 DP_STATS_INCC(peer, rx.err.mic_err, 1, 1104 hal_rx_mpdu_end_mic_err_get(rx_tlv_hdr)); 1105 DP_STATS_INCC(peer, rx.err.decrypt_err, 1, 1106 hal_rx_mpdu_end_decrypt_err_get(rx_tlv_hdr)); 1107 1108 DP_STATS_INC(peer, rx.wme_ac_type[TID_TO_WME_AC(tid)], 1); 1109 DP_STATS_INC(peer, rx.reception_type[reception_type], 1); 1110 1111 DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[MAX_MCS], 1, 1112 ((mcs >= MAX_MCS_11A) && (pkt_type == DOT11_A))); 1113 DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[mcs], 1, 1114 ((mcs <= MAX_MCS_11A) && (pkt_type == DOT11_A))); 1115 DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[MAX_MCS], 1, 1116 ((mcs >= MAX_MCS_11B) && (pkt_type == DOT11_B))); 1117 DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[mcs], 1, 1118 ((mcs <= MAX_MCS_11B) && (pkt_type == DOT11_B))); 1119 DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[MAX_MCS], 1, 1120 ((mcs >= MAX_MCS_11A) && (pkt_type == DOT11_N))); 1121 DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[mcs], 1, 1122 ((mcs <= MAX_MCS_11A) && (pkt_type == DOT11_N))); 1123 DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[MAX_MCS], 1, 1124 ((mcs >= MAX_MCS_11AC) && (pkt_type == DOT11_AC))); 1125 DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[mcs], 1, 1126 ((mcs <= MAX_MCS_11AC) && (pkt_type == DOT11_AC))); 1127 DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[MAX_MCS], 1, 1128 ((mcs >= MAX_MCS) && (pkt_type == DOT11_AX))); 1129 DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[mcs], 1, 1130 ((mcs <= MAX_MCS) && (pkt_type == DOT11_AX))); 1131 1132 if ((soc->process_rx_status) && 1133 hal_rx_attn_first_mpdu_get(rx_tlv_hdr)) { 1134 if (soc->cdp_soc.ol_ops->update_dp_stats) { 1135 soc->cdp_soc.ol_ops->update_dp_stats( 1136 vdev->pdev->ctrl_pdev, 1137 &peer->stats, 1138 peer_id, 1139 UPDATE_PEER_STATS); 1140 } 1141 } 1142 } 1143 1144 #ifdef WDS_VENDOR_EXTENSION 1145 int dp_wds_rx_policy_check( 1146 uint8_t *rx_tlv_hdr, 1147 struct dp_vdev *vdev, 1148 struct dp_peer *peer, 1149 int rx_mcast 1150 ) 1151 { 1152 struct dp_peer *bss_peer; 1153 int fr_ds, to_ds, rx_3addr, rx_4addr; 1154 int rx_policy_ucast, rx_policy_mcast; 1155 1156 if (vdev->opmode == wlan_op_mode_ap) { 1157 TAILQ_FOREACH(bss_peer, &vdev->peer_list, peer_list_elem) { 1158 if (bss_peer->bss_peer) { 1159 /* if wds policy check is not enabled on this vdev, accept all frames */ 1160 if (!bss_peer->wds_ecm.wds_rx_filter) { 1161 return 1; 1162 } 1163 break; 1164 } 1165 } 1166 rx_policy_ucast = bss_peer->wds_ecm.wds_rx_ucast_4addr; 1167 rx_policy_mcast = bss_peer->wds_ecm.wds_rx_mcast_4addr; 1168 } else { /* sta mode */ 1169 if (!peer->wds_ecm.wds_rx_filter) { 1170 return 1; 1171 } 1172 rx_policy_ucast = peer->wds_ecm.wds_rx_ucast_4addr; 1173 rx_policy_mcast = peer->wds_ecm.wds_rx_mcast_4addr; 1174 } 1175 1176 /* ------------------------------------------------ 1177 * self 1178 * peer- rx rx- 1179 * wds ucast mcast dir policy accept note 1180 * ------------------------------------------------ 1181 * 1 1 0 11 x1 1 AP configured to accept ds-to-ds Rx ucast from wds peers, constraint met; so, accept 1182 * 1 1 0 01 x1 0 AP configured to accept ds-to-ds Rx ucast from wds peers, constraint not met; so, drop 1183 * 1 1 0 10 x1 0 AP configured to accept ds-to-ds Rx ucast from wds peers, constraint not met; so, drop 1184 * 1 1 0 00 x1 0 bad frame, won't see it 1185 * 1 0 1 11 1x 1 AP configured to accept ds-to-ds Rx mcast from wds peers, constraint met; so, accept 1186 * 1 0 1 01 1x 0 AP configured to accept ds-to-ds Rx mcast from wds peers, constraint not met; so, drop 1187 * 1 0 1 10 1x 0 AP configured to accept ds-to-ds Rx mcast from wds peers, constraint not met; so, drop 1188 * 1 0 1 00 1x 0 bad frame, won't see it 1189 * 1 1 0 11 x0 0 AP configured to accept from-ds Rx ucast from wds peers, constraint not met; so, drop 1190 * 1 1 0 01 x0 0 AP configured to accept from-ds Rx ucast from wds peers, constraint not met; so, drop 1191 * 1 1 0 10 x0 1 AP configured to accept from-ds Rx ucast from wds peers, constraint met; so, accept 1192 * 1 1 0 00 x0 0 bad frame, won't see it 1193 * 1 0 1 11 0x 0 AP configured to accept from-ds Rx mcast from wds peers, constraint not met; so, drop 1194 * 1 0 1 01 0x 0 AP configured to accept from-ds Rx mcast from wds peers, constraint not met; so, drop 1195 * 1 0 1 10 0x 1 AP configured to accept from-ds Rx mcast from wds peers, constraint met; so, accept 1196 * 1 0 1 00 0x 0 bad frame, won't see it 1197 * 1198 * 0 x x 11 xx 0 we only accept td-ds Rx frames from non-wds peers in mode. 1199 * 0 x x 01 xx 1 1200 * 0 x x 10 xx 0 1201 * 0 x x 00 xx 0 bad frame, won't see it 1202 * ------------------------------------------------ 1203 */ 1204 1205 fr_ds = hal_rx_mpdu_get_fr_ds(rx_tlv_hdr); 1206 to_ds = hal_rx_mpdu_get_to_ds(rx_tlv_hdr); 1207 rx_3addr = fr_ds ^ to_ds; 1208 rx_4addr = fr_ds & to_ds; 1209 1210 if (vdev->opmode == wlan_op_mode_ap) { 1211 if ((!peer->wds_enabled && rx_3addr && to_ds) || 1212 (peer->wds_enabled && !rx_mcast && (rx_4addr == rx_policy_ucast)) || 1213 (peer->wds_enabled && rx_mcast && (rx_4addr == rx_policy_mcast))) { 1214 return 1; 1215 } 1216 } else { /* sta mode */ 1217 if ((!rx_mcast && (rx_4addr == rx_policy_ucast)) || 1218 (rx_mcast && (rx_4addr == rx_policy_mcast))) { 1219 return 1; 1220 } 1221 } 1222 return 0; 1223 } 1224 #else 1225 int dp_wds_rx_policy_check( 1226 uint8_t *rx_tlv_hdr, 1227 struct dp_vdev *vdev, 1228 struct dp_peer *peer, 1229 int rx_mcast 1230 ) 1231 { 1232 return 1; 1233 } 1234 #endif 1235 1236 /** 1237 * dp_rx_process() - Brain of the Rx processing functionality 1238 * Called from the bottom half (tasklet/NET_RX_SOFTIRQ) 1239 * @soc: core txrx main context 1240 * @hal_ring: opaque pointer to the HAL Rx Ring, which will be serviced 1241 * @quota: No. of units (packets) that can be serviced in one shot. 1242 * 1243 * This function implements the core of Rx functionality. This is 1244 * expected to handle only non-error frames. 1245 * 1246 * Return: uint32_t: No. of elements processed 1247 */ 1248 uint32_t 1249 dp_rx_process(struct dp_intr *int_ctx, void *hal_ring, uint32_t quota) 1250 { 1251 void *hal_soc; 1252 void *ring_desc; 1253 struct dp_rx_desc *rx_desc = NULL; 1254 qdf_nbuf_t nbuf, next; 1255 union dp_rx_desc_list_elem_t *head[MAX_PDEV_CNT] = { NULL }; 1256 union dp_rx_desc_list_elem_t *tail[MAX_PDEV_CNT] = { NULL }; 1257 uint32_t rx_bufs_used = 0, rx_buf_cookie; 1258 uint32_t l2_hdr_offset = 0; 1259 uint16_t msdu_len = 0; 1260 uint16_t peer_id; 1261 struct dp_peer *peer = NULL; 1262 struct dp_vdev *vdev = NULL; 1263 uint32_t pkt_len = 0; 1264 struct hal_rx_mpdu_desc_info mpdu_desc_info = { 0 }; 1265 struct hal_rx_msdu_desc_info msdu_desc_info = { 0 }; 1266 enum hal_reo_error_status error; 1267 uint32_t peer_mdata; 1268 uint8_t *rx_tlv_hdr; 1269 uint32_t rx_bufs_reaped[MAX_PDEV_CNT] = { 0 }; 1270 uint8_t mac_id = 0; 1271 struct dp_pdev *pdev; 1272 struct dp_srng *dp_rxdma_srng; 1273 struct rx_desc_pool *rx_desc_pool; 1274 struct dp_soc *soc = int_ctx->soc; 1275 uint8_t ring_id = 0; 1276 uint8_t core_id = 0; 1277 qdf_nbuf_t nbuf_head = NULL; 1278 qdf_nbuf_t nbuf_tail = NULL; 1279 qdf_nbuf_t deliver_list_head = NULL; 1280 qdf_nbuf_t deliver_list_tail = NULL; 1281 1282 DP_HIST_INIT(); 1283 /* Debug -- Remove later */ 1284 qdf_assert(soc && hal_ring); 1285 1286 hal_soc = soc->hal_soc; 1287 1288 /* Debug -- Remove later */ 1289 qdf_assert(hal_soc); 1290 1291 hif_pm_runtime_mark_last_busy(soc->osdev->dev); 1292 1293 if (qdf_unlikely(hal_srng_access_start(hal_soc, hal_ring))) { 1294 1295 /* 1296 * Need API to convert from hal_ring pointer to 1297 * Ring Type / Ring Id combo 1298 */ 1299 DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1); 1300 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1301 FL("HAL RING Access Failed -- %pK"), hal_ring); 1302 hal_srng_access_end(hal_soc, hal_ring); 1303 goto done; 1304 } 1305 1306 /* 1307 * start reaping the buffers from reo ring and queue 1308 * them in per vdev queue. 1309 * Process the received pkts in a different per vdev loop. 1310 */ 1311 while (qdf_likely(quota)) { 1312 ring_desc = hal_srng_dst_get_next(hal_soc, hal_ring); 1313 1314 /* 1315 * in case HW has updated hp after we cached the hp 1316 * ring_desc can be NULL even there are entries 1317 * available in the ring. Update the cached_hp 1318 * and reap the buffers available to read complete 1319 * mpdu in one reap 1320 * 1321 * This is needed for RAW mode we have to read all 1322 * msdus corresponding to amsdu in one reap to create 1323 * SG list properly but due to mismatch in cached_hp 1324 * and actual hp sometimes we are unable to read 1325 * complete mpdu in one reap. 1326 */ 1327 if (qdf_unlikely(!ring_desc)) { 1328 hal_srng_access_start_unlocked(hal_soc, hal_ring); 1329 ring_desc = hal_srng_dst_get_next(hal_soc, hal_ring); 1330 if (!ring_desc) 1331 break; 1332 } 1333 1334 error = HAL_RX_ERROR_STATUS_GET(ring_desc); 1335 ring_id = hal_srng_ring_id_get(hal_ring); 1336 1337 if (qdf_unlikely(error == HAL_REO_ERROR_DETECTED)) { 1338 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1339 FL("HAL RING 0x%pK:error %d"), hal_ring, error); 1340 DP_STATS_INC(soc, rx.err.hal_reo_error[ring_id], 1); 1341 /* Don't know how to deal with this -- assert */ 1342 qdf_assert(0); 1343 } 1344 1345 rx_buf_cookie = HAL_RX_REO_BUF_COOKIE_GET(ring_desc); 1346 1347 rx_desc = dp_rx_cookie_2_va_rxdma_buf(soc, rx_buf_cookie); 1348 1349 1350 qdf_assert(rx_desc); 1351 rx_bufs_reaped[rx_desc->pool_id]++; 1352 1353 /* TODO */ 1354 /* 1355 * Need a separate API for unmapping based on 1356 * phyiscal address 1357 */ 1358 qdf_nbuf_unmap_single(soc->osdev, rx_desc->nbuf, 1359 QDF_DMA_BIDIRECTIONAL); 1360 1361 core_id = smp_processor_id(); 1362 DP_STATS_INC(soc, rx.ring_packets[core_id][ring_id], 1); 1363 1364 /* Get MPDU DESC info */ 1365 hal_rx_mpdu_desc_info_get(ring_desc, &mpdu_desc_info); 1366 1367 hal_rx_mpdu_peer_meta_data_set(qdf_nbuf_data(rx_desc->nbuf), 1368 mpdu_desc_info.peer_meta_data); 1369 1370 /* Get MSDU DESC info */ 1371 hal_rx_msdu_desc_info_get(ring_desc, &msdu_desc_info); 1372 1373 /* 1374 * save msdu flags first, last and continuation msdu in 1375 * nbuf->cb 1376 */ 1377 if (msdu_desc_info.msdu_flags & HAL_MSDU_F_FIRST_MSDU_IN_MPDU) 1378 qdf_nbuf_set_rx_chfrag_start(rx_desc->nbuf, 1); 1379 1380 if (msdu_desc_info.msdu_flags & HAL_MSDU_F_MSDU_CONTINUATION) 1381 qdf_nbuf_set_rx_chfrag_cont(rx_desc->nbuf, 1); 1382 1383 if (msdu_desc_info.msdu_flags & HAL_MSDU_F_LAST_MSDU_IN_MPDU) 1384 qdf_nbuf_set_rx_chfrag_end(rx_desc->nbuf, 1); 1385 1386 DP_RX_LIST_APPEND(nbuf_head, nbuf_tail, rx_desc->nbuf); 1387 1388 /* 1389 * if continuation bit is set then we have MSDU spread 1390 * across multiple buffers, let us not decrement quota 1391 * till we reap all buffers of that MSDU. 1392 */ 1393 if (qdf_likely(!qdf_nbuf_is_rx_chfrag_cont(rx_desc->nbuf))) 1394 quota -= 1; 1395 1396 1397 dp_rx_add_to_free_desc_list(&head[rx_desc->pool_id], 1398 &tail[rx_desc->pool_id], 1399 rx_desc); 1400 } 1401 done: 1402 hal_srng_access_end(hal_soc, hal_ring); 1403 1404 /* Update histogram statistics by looping through pdev's */ 1405 DP_RX_HIST_STATS_PER_PDEV(); 1406 1407 for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) { 1408 /* 1409 * continue with next mac_id if no pkts were reaped 1410 * from that pool 1411 */ 1412 if (!rx_bufs_reaped[mac_id]) 1413 continue; 1414 1415 pdev = soc->pdev_list[mac_id]; 1416 dp_rxdma_srng = &pdev->rx_refill_buf_ring; 1417 rx_desc_pool = &soc->rx_desc_buf[mac_id]; 1418 1419 dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng, 1420 rx_desc_pool, rx_bufs_reaped[mac_id], 1421 &head[mac_id], &tail[mac_id]); 1422 } 1423 1424 /* Peer can be NULL is case of LFR */ 1425 if (qdf_likely(peer != NULL)) 1426 vdev = NULL; 1427 1428 /* 1429 * BIG loop where each nbuf is dequeued from global queue, 1430 * processed and queued back on a per vdev basis. These nbufs 1431 * are sent to stack as and when we run out of nbufs 1432 * or a new nbuf dequeued from global queue has a different 1433 * vdev when compared to previous nbuf. 1434 */ 1435 nbuf = nbuf_head; 1436 while (nbuf) { 1437 next = nbuf->next; 1438 rx_tlv_hdr = qdf_nbuf_data(nbuf); 1439 1440 /* 1441 * Check if DMA completed -- msdu_done is the last bit 1442 * to be written 1443 */ 1444 if (qdf_unlikely(!hal_rx_attn_msdu_done_get(rx_tlv_hdr))) { 1445 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1446 FL("MSDU DONE failure")); 1447 hal_rx_dump_pkt_tlvs(hal_soc, rx_tlv_hdr, 1448 QDF_TRACE_LEVEL_INFO); 1449 qdf_assert(0); 1450 } 1451 1452 peer_mdata = hal_rx_mpdu_peer_meta_data_get(rx_tlv_hdr); 1453 peer_id = DP_PEER_METADATA_PEER_ID_GET(peer_mdata); 1454 peer = dp_peer_find_by_id(soc, peer_id); 1455 1456 if (peer) { 1457 QDF_NBUF_CB_DP_TRACE_PRINT(nbuf) = false; 1458 qdf_dp_trace_set_track(nbuf, QDF_RX); 1459 QDF_NBUF_CB_RX_DP_TRACE(nbuf) = 1; 1460 QDF_NBUF_CB_RX_PACKET_TRACK(nbuf) = 1461 QDF_NBUF_RX_PKT_DATA_TRACK; 1462 } 1463 1464 rx_bufs_used++; 1465 1466 if (deliver_list_head && peer && (vdev != peer->vdev)) { 1467 dp_rx_deliver_to_stack(vdev, peer, deliver_list_head, 1468 deliver_list_tail); 1469 deliver_list_head = NULL; 1470 deliver_list_tail = NULL; 1471 } 1472 1473 if (qdf_likely(peer != NULL)) { 1474 vdev = peer->vdev; 1475 } else { 1476 qdf_nbuf_free(nbuf); 1477 nbuf = next; 1478 continue; 1479 } 1480 1481 if (qdf_unlikely(vdev == NULL)) { 1482 qdf_nbuf_free(nbuf); 1483 nbuf = next; 1484 DP_STATS_INC(soc, rx.err.invalid_vdev, 1); 1485 continue; 1486 } 1487 1488 DP_HIST_PACKET_COUNT_INC(vdev->pdev->pdev_id); 1489 /* 1490 * First IF condition: 1491 * 802.11 Fragmented pkts are reinjected to REO 1492 * HW block as SG pkts and for these pkts we only 1493 * need to pull the RX TLVS header length. 1494 * Second IF condition: 1495 * The below condition happens when an MSDU is spread 1496 * across multiple buffers. This can happen in two cases 1497 * 1. The nbuf size is smaller then the received msdu. 1498 * ex: we have set the nbuf size to 2048 during 1499 * nbuf_alloc. but we received an msdu which is 1500 * 2304 bytes in size then this msdu is spread 1501 * across 2 nbufs. 1502 * 1503 * 2. AMSDUs when RAW mode is enabled. 1504 * ex: 1st MSDU is in 1st nbuf and 2nd MSDU is spread 1505 * across 1st nbuf and 2nd nbuf and last MSDU is 1506 * spread across 2nd nbuf and 3rd nbuf. 1507 * 1508 * for these scenarios let us create a skb frag_list and 1509 * append these buffers till the last MSDU of the AMSDU 1510 * Third condition: 1511 * This is the most likely case, we receive 802.3 pkts 1512 * decapsulated by HW, here we need to set the pkt length. 1513 */ 1514 if (qdf_unlikely(qdf_nbuf_get_ext_list(nbuf))) 1515 qdf_nbuf_pull_head(nbuf, RX_PKT_TLVS_LEN); 1516 else if (qdf_unlikely(vdev->rx_decap_type == 1517 htt_cmn_pkt_type_raw)) { 1518 msdu_len = hal_rx_msdu_start_msdu_len_get(rx_tlv_hdr); 1519 nbuf = dp_rx_sg_create(nbuf, rx_tlv_hdr); 1520 1521 DP_STATS_INC(vdev->pdev, rx_raw_pkts, 1); 1522 DP_STATS_INC_PKT(peer, rx.raw, 1, 1523 msdu_len); 1524 1525 next = nbuf->next; 1526 } else { 1527 l2_hdr_offset = 1528 hal_rx_msdu_end_l3_hdr_padding_get(rx_tlv_hdr); 1529 1530 msdu_len = hal_rx_msdu_start_msdu_len_get(rx_tlv_hdr); 1531 pkt_len = msdu_len + l2_hdr_offset + RX_PKT_TLVS_LEN; 1532 1533 qdf_nbuf_set_pktlen(nbuf, pkt_len); 1534 qdf_nbuf_pull_head(nbuf, 1535 RX_PKT_TLVS_LEN + 1536 l2_hdr_offset); 1537 } 1538 1539 if (!dp_wds_rx_policy_check(rx_tlv_hdr, vdev, peer, 1540 hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr))) { 1541 QDF_TRACE(QDF_MODULE_ID_DP, 1542 QDF_TRACE_LEVEL_ERROR, 1543 FL("Policy Check Drop pkt")); 1544 /* Drop & free packet */ 1545 qdf_nbuf_free(nbuf); 1546 /* Statistics */ 1547 nbuf = next; 1548 continue; 1549 } 1550 1551 if (qdf_unlikely(peer && peer->bss_peer)) { 1552 QDF_TRACE(QDF_MODULE_ID_DP, 1553 QDF_TRACE_LEVEL_ERROR, 1554 FL("received pkt with same src MAC")); 1555 DP_STATS_INC(vdev->pdev, dropped.mec, 1); 1556 1557 /* Drop & free packet */ 1558 qdf_nbuf_free(nbuf); 1559 /* Statistics */ 1560 nbuf = next; 1561 continue; 1562 } 1563 1564 if (qdf_unlikely(peer && (peer->nawds_enabled == true) && 1565 (hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr)) && 1566 (hal_rx_get_mpdu_mac_ad4_valid(rx_tlv_hdr) == false))) { 1567 DP_STATS_INC(peer, rx.nawds_mcast_drop, 1); 1568 qdf_nbuf_free(nbuf); 1569 nbuf = next; 1570 continue; 1571 } 1572 1573 dp_rx_cksum_offload(vdev->pdev, nbuf, rx_tlv_hdr); 1574 1575 dp_set_rx_queue(nbuf, ring_id); 1576 1577 /* 1578 * HW structures call this L3 header padding -- 1579 * even though this is actually the offset from 1580 * the buffer beginning where the L2 header 1581 * begins. 1582 */ 1583 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 1584 FL("rxhash: flow id toeplitz: 0x%x"), 1585 hal_rx_msdu_start_toeplitz_get(rx_tlv_hdr)); 1586 1587 dp_rx_msdu_stats_update(soc, nbuf, rx_tlv_hdr, peer, ring_id); 1588 1589 if (qdf_unlikely(vdev->mesh_vdev)) { 1590 if (dp_rx_filter_mesh_packets(vdev, nbuf, 1591 rx_tlv_hdr) 1592 == QDF_STATUS_SUCCESS) { 1593 QDF_TRACE(QDF_MODULE_ID_DP, 1594 QDF_TRACE_LEVEL_INFO_MED, 1595 FL("mesh pkt filtered")); 1596 DP_STATS_INC(vdev->pdev, dropped.mesh_filter, 1597 1); 1598 1599 qdf_nbuf_free(nbuf); 1600 nbuf = next; 1601 continue; 1602 } 1603 dp_rx_fill_mesh_stats(vdev, nbuf, rx_tlv_hdr, peer); 1604 } 1605 1606 #ifdef QCA_WIFI_NAPIER_EMULATION_DBG /* Debug code, remove later */ 1607 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1608 "p_id %d msdu_len %d hdr_off %d", 1609 peer_id, msdu_len, l2_hdr_offset); 1610 1611 print_hex_dump(KERN_ERR, 1612 "\t Pkt Data:", DUMP_PREFIX_NONE, 32, 4, 1613 qdf_nbuf_data(nbuf), 128, false); 1614 #endif /* NAPIER_EMULATION */ 1615 1616 if (qdf_likely(vdev->rx_decap_type == 1617 htt_cmn_pkt_type_ethernet) && 1618 (qdf_likely(!vdev->mesh_vdev)) && 1619 (vdev->wds_enabled)) { 1620 /* WDS Source Port Learning */ 1621 dp_rx_wds_srcport_learn(soc, 1622 rx_tlv_hdr, 1623 peer, 1624 nbuf); 1625 1626 /* Intrabss-fwd */ 1627 if (dp_rx_check_ap_bridge(vdev)) 1628 if (dp_rx_intrabss_fwd(soc, 1629 peer, 1630 rx_tlv_hdr, 1631 nbuf)) { 1632 nbuf = next; 1633 continue; /* Get next desc */ 1634 } 1635 } 1636 1637 dp_rx_lro(rx_tlv_hdr, peer, nbuf, int_ctx->lro_ctx); 1638 1639 DP_RX_LIST_APPEND(deliver_list_head, 1640 deliver_list_tail, 1641 nbuf); 1642 1643 DP_STATS_INC_PKT(peer, rx.to_stack, 1, 1644 qdf_nbuf_len(nbuf)); 1645 1646 nbuf = next; 1647 } 1648 1649 if (deliver_list_head) 1650 dp_rx_deliver_to_stack(vdev, peer, deliver_list_head, 1651 deliver_list_tail); 1652 1653 return rx_bufs_used; /* Assume no scale factor for now */ 1654 } 1655 1656 /** 1657 * dp_rx_detach() - detach dp rx 1658 * @pdev: core txrx pdev context 1659 * 1660 * This function will detach DP RX into main device context 1661 * will free DP Rx resources. 1662 * 1663 * Return: void 1664 */ 1665 void 1666 dp_rx_pdev_detach(struct dp_pdev *pdev) 1667 { 1668 uint8_t pdev_id = pdev->pdev_id; 1669 struct dp_soc *soc = pdev->soc; 1670 struct rx_desc_pool *rx_desc_pool; 1671 1672 rx_desc_pool = &soc->rx_desc_buf[pdev_id]; 1673 1674 if (rx_desc_pool->pool_size != 0) { 1675 dp_rx_desc_pool_free(soc, pdev_id, rx_desc_pool); 1676 } 1677 1678 return; 1679 } 1680 1681 /** 1682 * dp_rx_attach() - attach DP RX 1683 * @pdev: core txrx pdev context 1684 * 1685 * This function will attach a DP RX instance into the main 1686 * device (SOC) context. Will allocate dp rx resource and 1687 * initialize resources. 1688 * 1689 * Return: QDF_STATUS_SUCCESS: success 1690 * QDF_STATUS_E_RESOURCES: Error return 1691 */ 1692 QDF_STATUS 1693 dp_rx_pdev_attach(struct dp_pdev *pdev) 1694 { 1695 uint8_t pdev_id = pdev->pdev_id; 1696 struct dp_soc *soc = pdev->soc; 1697 struct dp_srng rxdma_srng; 1698 uint32_t rxdma_entries; 1699 union dp_rx_desc_list_elem_t *desc_list = NULL; 1700 union dp_rx_desc_list_elem_t *tail = NULL; 1701 struct dp_srng *dp_rxdma_srng; 1702 struct rx_desc_pool *rx_desc_pool; 1703 1704 if (wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx)) { 1705 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, 1706 "nss-wifi<4> skip Rx refil %d", pdev_id); 1707 return QDF_STATUS_SUCCESS; 1708 } 1709 1710 pdev = soc->pdev_list[pdev_id]; 1711 rxdma_srng = pdev->rx_refill_buf_ring; 1712 soc->process_rx_status = CONFIG_PROCESS_RX_STATUS; 1713 rxdma_entries = rxdma_srng.alloc_size/hal_srng_get_entrysize( 1714 soc->hal_soc, RXDMA_BUF); 1715 1716 rx_desc_pool = &soc->rx_desc_buf[pdev_id]; 1717 1718 dp_rx_desc_pool_alloc(soc, pdev_id, rxdma_entries*3, rx_desc_pool); 1719 1720 rx_desc_pool->owner = DP_WBM2SW_RBM; 1721 /* For Rx buffers, WBM release ring is SW RING 3,for all pdev's */ 1722 dp_rxdma_srng = &pdev->rx_refill_buf_ring; 1723 dp_rx_buffers_replenish(soc, pdev_id, dp_rxdma_srng, rx_desc_pool, 1724 0, &desc_list, &tail); 1725 1726 return QDF_STATUS_SUCCESS; 1727 } 1728 1729 /* 1730 * dp_rx_nbuf_prepare() - prepare RX nbuf 1731 * @soc: core txrx main context 1732 * @pdev: core txrx pdev context 1733 * 1734 * This function alloc & map nbuf for RX dma usage, retry it if failed 1735 * until retry times reaches max threshold or succeeded. 1736 * 1737 * Return: qdf_nbuf_t pointer if succeeded, NULL if failed. 1738 */ 1739 qdf_nbuf_t 1740 dp_rx_nbuf_prepare(struct dp_soc *soc, struct dp_pdev *pdev) 1741 { 1742 uint8_t *buf; 1743 int32_t nbuf_retry_count; 1744 QDF_STATUS ret; 1745 qdf_nbuf_t nbuf = NULL; 1746 1747 for (nbuf_retry_count = 0; nbuf_retry_count < 1748 QDF_NBUF_ALLOC_MAP_RETRY_THRESHOLD; 1749 nbuf_retry_count++) { 1750 /* Allocate a new skb */ 1751 nbuf = qdf_nbuf_alloc(soc->osdev, 1752 RX_BUFFER_SIZE, 1753 RX_BUFFER_RESERVATION, 1754 RX_BUFFER_ALIGNMENT, 1755 FALSE); 1756 1757 if (nbuf == NULL) { 1758 DP_STATS_INC(pdev, 1759 replenish.nbuf_alloc_fail, 1); 1760 continue; 1761 } 1762 1763 buf = qdf_nbuf_data(nbuf); 1764 1765 memset(buf, 0, RX_BUFFER_SIZE); 1766 1767 ret = qdf_nbuf_map_single(soc->osdev, nbuf, 1768 QDF_DMA_BIDIRECTIONAL); 1769 1770 /* nbuf map failed */ 1771 if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) { 1772 qdf_nbuf_free(nbuf); 1773 DP_STATS_INC(pdev, replenish.map_err, 1); 1774 continue; 1775 } 1776 /* qdf_nbuf alloc and map succeeded */ 1777 break; 1778 } 1779 1780 /* qdf_nbuf still alloc or map failed */ 1781 if (qdf_unlikely(nbuf_retry_count >= 1782 QDF_NBUF_ALLOC_MAP_RETRY_THRESHOLD)) 1783 return NULL; 1784 1785 return nbuf; 1786 } 1787