1 /* 2 * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for 5 * any purpose with or without fee is hereby granted, provided that the 6 * above copyright notice and this permission notice appear in all 7 * copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 16 * PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 #include "dp_types.h" 20 #include "dp_rx.h" 21 #include "dp_peer.h" 22 #include "hal_rx.h" 23 #include "hal_api.h" 24 #include "qdf_nbuf.h" 25 #ifdef MESH_MODE_SUPPORT 26 #include "if_meta_hdr.h" 27 #endif 28 #include "dp_internal.h" 29 #include "dp_rx_mon.h" 30 #ifdef RX_DESC_DEBUG_CHECK 31 static inline void dp_rx_desc_prep(struct dp_rx_desc *rx_desc, qdf_nbuf_t nbuf) 32 { 33 rx_desc->magic = DP_RX_DESC_MAGIC; 34 rx_desc->nbuf = nbuf; 35 } 36 #else 37 static inline void dp_rx_desc_prep(struct dp_rx_desc *rx_desc, qdf_nbuf_t nbuf) 38 { 39 rx_desc->nbuf = nbuf; 40 } 41 #endif 42 43 #ifdef CONFIG_WIN 44 static inline bool dp_rx_check_ap_bridge(struct dp_vdev *vdev) 45 { 46 return vdev->ap_bridge_enabled; 47 } 48 #else 49 static inline bool dp_rx_check_ap_bridge(struct dp_vdev *vdev) 50 { 51 if (vdev->opmode != wlan_op_mode_sta) 52 return true; 53 else 54 return false; 55 } 56 #endif 57 /* 58 * dp_rx_buffers_replenish() - replenish rxdma ring with rx nbufs 59 * called during dp rx initialization 60 * and at the end of dp_rx_process. 61 * 62 * @soc: core txrx main context 63 * @mac_id: mac_id which is one of 3 mac_ids 64 * @dp_rxdma_srng: dp rxdma circular ring 65 * @rx_desc_pool: Poiter to free Rx descriptor pool 66 * @num_req_buffers: number of buffer to be replenished 67 * @desc_list: list of descs if called from dp_rx_process 68 * or NULL during dp rx initialization or out of buffer 69 * interrupt. 70 * @tail: tail of descs list 71 * Return: return success or failure 72 */ 73 QDF_STATUS dp_rx_buffers_replenish(struct dp_soc *dp_soc, uint32_t mac_id, 74 struct dp_srng *dp_rxdma_srng, 75 struct rx_desc_pool *rx_desc_pool, 76 uint32_t num_req_buffers, 77 union dp_rx_desc_list_elem_t **desc_list, 78 union dp_rx_desc_list_elem_t **tail) 79 { 80 uint32_t num_alloc_desc; 81 uint16_t num_desc_to_free = 0; 82 struct dp_pdev *dp_pdev = dp_get_pdev_for_mac_id(dp_soc, mac_id); 83 uint32_t num_entries_avail; 84 uint32_t count; 85 int sync_hw_ptr = 1; 86 qdf_dma_addr_t paddr; 87 qdf_nbuf_t rx_netbuf; 88 void *rxdma_ring_entry; 89 union dp_rx_desc_list_elem_t *next; 90 QDF_STATUS ret; 91 92 void *rxdma_srng; 93 94 rxdma_srng = dp_rxdma_srng->hal_srng; 95 96 if (!rxdma_srng) { 97 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 98 "rxdma srng not initialized"); 99 DP_STATS_INC(dp_pdev, replenish.rxdma_err, num_req_buffers); 100 return QDF_STATUS_E_FAILURE; 101 } 102 103 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 104 "requested %d buffers for replenish", num_req_buffers); 105 106 hal_srng_access_start(dp_soc->hal_soc, rxdma_srng); 107 num_entries_avail = hal_srng_src_num_avail(dp_soc->hal_soc, 108 rxdma_srng, 109 sync_hw_ptr); 110 111 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 112 "no of availble entries in rxdma ring: %d", 113 num_entries_avail); 114 115 if (!(*desc_list) && (num_entries_avail > 116 ((dp_rxdma_srng->num_entries * 3) / 4))) { 117 num_req_buffers = num_entries_avail; 118 } else if (num_entries_avail < num_req_buffers) { 119 num_desc_to_free = num_req_buffers - num_entries_avail; 120 num_req_buffers = num_entries_avail; 121 } 122 123 if (qdf_unlikely(!num_req_buffers)) { 124 num_desc_to_free = num_req_buffers; 125 hal_srng_access_end(dp_soc->hal_soc, rxdma_srng); 126 goto free_descs; 127 } 128 129 /* 130 * if desc_list is NULL, allocate the descs from freelist 131 */ 132 if (!(*desc_list)) { 133 num_alloc_desc = dp_rx_get_free_desc_list(dp_soc, mac_id, 134 rx_desc_pool, 135 num_req_buffers, 136 desc_list, 137 tail); 138 139 if (!num_alloc_desc) { 140 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 141 "no free rx_descs in freelist"); 142 DP_STATS_INC(dp_pdev, err.desc_alloc_fail, 143 num_req_buffers); 144 hal_srng_access_end(dp_soc->hal_soc, rxdma_srng); 145 return QDF_STATUS_E_NOMEM; 146 } 147 148 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 149 "%d rx desc allocated", num_alloc_desc); 150 num_req_buffers = num_alloc_desc; 151 } 152 153 154 count = 0; 155 156 while (count < num_req_buffers) { 157 rx_netbuf = qdf_nbuf_alloc(dp_soc->osdev, 158 RX_BUFFER_SIZE, 159 RX_BUFFER_RESERVATION, 160 RX_BUFFER_ALIGNMENT, 161 FALSE); 162 163 if (rx_netbuf == NULL) { 164 DP_STATS_INC(dp_pdev, replenish.nbuf_alloc_fail, 1); 165 continue; 166 } 167 168 ret = qdf_nbuf_map_single(dp_soc->osdev, rx_netbuf, 169 QDF_DMA_BIDIRECTIONAL); 170 if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) { 171 qdf_nbuf_free(rx_netbuf); 172 DP_STATS_INC(dp_pdev, replenish.map_err, 1); 173 continue; 174 } 175 176 paddr = qdf_nbuf_get_frag_paddr(rx_netbuf, 0); 177 178 /* 179 * check if the physical address of nbuf->data is 180 * less then 0x50000000 then free the nbuf and try 181 * allocating new nbuf. We can try for 100 times. 182 * this is a temp WAR till we fix it properly. 183 */ 184 ret = check_x86_paddr(dp_soc, &rx_netbuf, &paddr, dp_pdev); 185 if (ret == QDF_STATUS_E_FAILURE) { 186 DP_STATS_INC(dp_pdev, replenish.x86_fail, 1); 187 break; 188 } 189 190 count++; 191 192 rxdma_ring_entry = hal_srng_src_get_next(dp_soc->hal_soc, 193 rxdma_srng); 194 195 next = (*desc_list)->next; 196 197 dp_rx_desc_prep(&((*desc_list)->rx_desc), rx_netbuf); 198 (*desc_list)->rx_desc.in_use = 1; 199 200 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 201 "rx_netbuf=%pK, buf=%pK, paddr=0x%llx, cookie=%d", 202 rx_netbuf, qdf_nbuf_data(rx_netbuf), 203 (unsigned long long)paddr, (*desc_list)->rx_desc.cookie); 204 205 hal_rxdma_buff_addr_info_set(rxdma_ring_entry, paddr, 206 (*desc_list)->rx_desc.cookie, 207 rx_desc_pool->owner); 208 209 *desc_list = next; 210 } 211 212 hal_srng_access_end(dp_soc->hal_soc, rxdma_srng); 213 214 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 215 "successfully replenished %d buffers", num_req_buffers); 216 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 217 "%d rx desc added back to free list", num_desc_to_free); 218 219 DP_STATS_INC_PKT(dp_pdev, replenish.pkts, num_req_buffers, 220 (RX_BUFFER_SIZE * num_req_buffers)); 221 222 free_descs: 223 DP_STATS_INC(dp_pdev, buf_freelist, num_desc_to_free); 224 /* 225 * add any available free desc back to the free list 226 */ 227 if (*desc_list) 228 dp_rx_add_desc_list_to_free_list(dp_soc, desc_list, tail, 229 mac_id, rx_desc_pool); 230 231 return QDF_STATUS_SUCCESS; 232 } 233 234 /* 235 * dp_rx_deliver_raw() - process RAW mode pkts and hand over the 236 * pkts to RAW mode simulation to 237 * decapsulate the pkt. 238 * 239 * @vdev: vdev on which RAW mode is enabled 240 * @nbuf_list: list of RAW pkts to process 241 * @peer: peer object from which the pkt is rx 242 * 243 * Return: void 244 */ 245 void 246 dp_rx_deliver_raw(struct dp_vdev *vdev, qdf_nbuf_t nbuf_list, 247 struct dp_peer *peer) 248 { 249 qdf_nbuf_t deliver_list_head = NULL; 250 qdf_nbuf_t deliver_list_tail = NULL; 251 qdf_nbuf_t nbuf; 252 253 nbuf = nbuf_list; 254 while (nbuf) { 255 qdf_nbuf_t next = qdf_nbuf_next(nbuf); 256 257 DP_RX_LIST_APPEND(deliver_list_head, deliver_list_tail, nbuf); 258 259 DP_STATS_INC(vdev->pdev, rx_raw_pkts, 1); 260 /* 261 * reset the chfrag_start and chfrag_end bits in nbuf cb 262 * as this is a non-amsdu pkt and RAW mode simulation expects 263 * these bit s to be 0 for non-amsdu pkt. 264 */ 265 if (qdf_nbuf_is_rx_chfrag_start(nbuf) && 266 qdf_nbuf_is_rx_chfrag_end(nbuf)) { 267 qdf_nbuf_set_rx_chfrag_start(nbuf, 0); 268 qdf_nbuf_set_rx_chfrag_end(nbuf, 0); 269 } 270 271 nbuf = next; 272 } 273 274 vdev->osif_rsim_rx_decap(vdev->osif_vdev, &deliver_list_head, 275 &deliver_list_tail, (struct cdp_peer*) peer); 276 277 vdev->osif_rx(vdev->osif_vdev, deliver_list_head); 278 } 279 280 281 #ifdef DP_LFR 282 /* 283 * In case of LFR, data of a new peer might be sent up 284 * even before peer is added. 285 */ 286 static inline struct dp_vdev * 287 dp_get_vdev_from_peer(struct dp_soc *soc, 288 uint16_t peer_id, 289 struct dp_peer *peer, 290 struct hal_rx_mpdu_desc_info mpdu_desc_info) 291 { 292 struct dp_vdev *vdev; 293 uint8_t vdev_id; 294 295 if (unlikely(!peer)) { 296 if (peer_id != HTT_INVALID_PEER) { 297 vdev_id = DP_PEER_METADATA_ID_GET( 298 mpdu_desc_info.peer_meta_data); 299 QDF_TRACE(QDF_MODULE_ID_DP, 300 QDF_TRACE_LEVEL_DEBUG, 301 FL("PeerID %d not found use vdevID %d"), 302 peer_id, vdev_id); 303 vdev = dp_get_vdev_from_soc_vdev_id_wifi3(soc, 304 vdev_id); 305 } else { 306 QDF_TRACE(QDF_MODULE_ID_DP, 307 QDF_TRACE_LEVEL_DEBUG, 308 FL("Invalid PeerID %d"), 309 peer_id); 310 return NULL; 311 } 312 } else { 313 vdev = peer->vdev; 314 } 315 return vdev; 316 } 317 #else 318 static inline struct dp_vdev * 319 dp_get_vdev_from_peer(struct dp_soc *soc, 320 uint16_t peer_id, 321 struct dp_peer *peer, 322 struct hal_rx_mpdu_desc_info mpdu_desc_info) 323 { 324 if (unlikely(!peer)) { 325 QDF_TRACE(QDF_MODULE_ID_DP, 326 QDF_TRACE_LEVEL_DEBUG, 327 FL("Peer not found for peerID %d"), 328 peer_id); 329 return NULL; 330 } else { 331 return peer->vdev; 332 } 333 } 334 #endif 335 336 /** 337 * dp_rx_intrabss_fwd() - Implements the Intra-BSS forwarding logic 338 * 339 * @soc: core txrx main context 340 * @sa_peer : source peer entry 341 * @rx_tlv_hdr : start address of rx tlvs 342 * @nbuf : nbuf that has to be intrabss forwarded 343 * 344 * Return: bool: true if it is forwarded else false 345 */ 346 static bool 347 dp_rx_intrabss_fwd(struct dp_soc *soc, 348 struct dp_peer *sa_peer, 349 uint8_t *rx_tlv_hdr, 350 qdf_nbuf_t nbuf) 351 { 352 uint16_t da_idx; 353 uint16_t len; 354 struct dp_peer *da_peer; 355 struct dp_ast_entry *ast_entry; 356 qdf_nbuf_t nbuf_copy; 357 358 /* check if the destination peer is available in peer table 359 * and also check if the source peer and destination peer 360 * belong to the same vap and destination peer is not bss peer. 361 */ 362 363 if ((hal_rx_msdu_end_da_is_valid_get(rx_tlv_hdr) && 364 !hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr))) { 365 da_idx = hal_rx_msdu_end_da_idx_get(rx_tlv_hdr); 366 367 ast_entry = soc->ast_table[da_idx]; 368 if (!ast_entry) 369 return false; 370 371 da_peer = ast_entry->peer; 372 373 if (!da_peer) 374 return false; 375 376 if (da_peer->vdev == sa_peer->vdev && !da_peer->bss_peer) { 377 memset(nbuf->cb, 0x0, sizeof(nbuf->cb)); 378 len = qdf_nbuf_len(nbuf); 379 380 /* linearize the nbuf just before we send to 381 * dp_tx_send() 382 */ 383 if (qdf_unlikely(qdf_nbuf_get_ext_list(nbuf))) { 384 if (qdf_nbuf_linearize(nbuf) == -ENOMEM) 385 return false; 386 387 nbuf = qdf_nbuf_unshare(nbuf); 388 } 389 390 if (!dp_tx_send(sa_peer->vdev, nbuf)) { 391 DP_STATS_INC_PKT(sa_peer, rx.intra_bss.pkts, 392 1, len); 393 return true; 394 } else { 395 DP_STATS_INC_PKT(sa_peer, rx.intra_bss.fail, 1, 396 len); 397 return false; 398 } 399 } 400 } 401 /* if it is a broadcast pkt (eg: ARP) and it is not its own 402 * source, then clone the pkt and send the cloned pkt for 403 * intra BSS forwarding and original pkt up the network stack 404 * Note: how do we handle multicast pkts. do we forward 405 * all multicast pkts as is or let a higher layer module 406 * like igmpsnoop decide whether to forward or not with 407 * Mcast enhancement. 408 */ 409 else if (qdf_unlikely((hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr) && 410 !sa_peer->bss_peer))) { 411 nbuf_copy = qdf_nbuf_copy(nbuf); 412 if (!nbuf_copy) 413 return false; 414 memset(nbuf_copy->cb, 0x0, sizeof(nbuf_copy->cb)); 415 len = qdf_nbuf_len(nbuf_copy); 416 417 if (dp_tx_send(sa_peer->vdev, nbuf_copy)) { 418 DP_STATS_INC_PKT(sa_peer, rx.intra_bss.fail, 1, len); 419 qdf_nbuf_free(nbuf_copy); 420 } else 421 DP_STATS_INC_PKT(sa_peer, rx.intra_bss.pkts, 1, len); 422 } 423 /* return false as we have to still send the original pkt 424 * up the stack 425 */ 426 return false; 427 } 428 429 #ifdef MESH_MODE_SUPPORT 430 431 /** 432 * dp_rx_fill_mesh_stats() - Fills the mesh per packet receive stats 433 * 434 * @vdev: DP Virtual device handle 435 * @nbuf: Buffer pointer 436 * @rx_tlv_hdr: start of rx tlv header 437 * @peer: pointer to peer 438 * 439 * This function allocated memory for mesh receive stats and fill the 440 * required stats. Stores the memory address in skb cb. 441 * 442 * Return: void 443 */ 444 445 void dp_rx_fill_mesh_stats(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 446 uint8_t *rx_tlv_hdr, struct dp_peer *peer) 447 { 448 struct mesh_recv_hdr_s *rx_info = NULL; 449 uint32_t pkt_type; 450 uint32_t nss; 451 uint32_t rate_mcs; 452 uint32_t bw; 453 454 /* fill recv mesh stats */ 455 rx_info = qdf_mem_malloc(sizeof(struct mesh_recv_hdr_s)); 456 457 /* upper layers are resposible to free this memory */ 458 459 if (rx_info == NULL) { 460 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 461 "Memory allocation failed for mesh rx stats"); 462 DP_STATS_INC(vdev->pdev, mesh_mem_alloc, 1); 463 return; 464 } 465 466 rx_info->rs_flags = MESH_RXHDR_VER1; 467 if (qdf_nbuf_is_rx_chfrag_start(nbuf)) 468 rx_info->rs_flags |= MESH_RX_FIRST_MSDU; 469 470 if (qdf_nbuf_is_rx_chfrag_end(nbuf)) 471 rx_info->rs_flags |= MESH_RX_LAST_MSDU; 472 473 if (hal_rx_attn_msdu_get_is_decrypted(rx_tlv_hdr)) { 474 rx_info->rs_flags |= MESH_RX_DECRYPTED; 475 rx_info->rs_keyix = hal_rx_msdu_get_keyid(rx_tlv_hdr); 476 if (vdev->osif_get_key) 477 vdev->osif_get_key(vdev->osif_vdev, 478 &rx_info->rs_decryptkey[0], 479 &peer->mac_addr.raw[0], 480 rx_info->rs_keyix); 481 } 482 483 rx_info->rs_rssi = hal_rx_msdu_start_get_rssi(rx_tlv_hdr); 484 rx_info->rs_channel = hal_rx_msdu_start_get_freq(rx_tlv_hdr); 485 pkt_type = hal_rx_msdu_start_get_pkt_type(rx_tlv_hdr); 486 rate_mcs = hal_rx_msdu_start_rate_mcs_get(rx_tlv_hdr); 487 bw = hal_rx_msdu_start_bw_get(rx_tlv_hdr); 488 nss = hal_rx_msdu_start_nss_get(rx_tlv_hdr); 489 rx_info->rs_ratephy1 = rate_mcs | (nss << 0x8) | (pkt_type << 16) | 490 (bw << 24); 491 492 qdf_nbuf_set_rx_fctx_type(nbuf, (void *)rx_info, CB_FTYPE_MESH_RX_INFO); 493 494 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_MED, 495 FL("Mesh rx stats: flags %x, rssi %x, chn %x, rate %x, kix %x"), 496 rx_info->rs_flags, 497 rx_info->rs_rssi, 498 rx_info->rs_channel, 499 rx_info->rs_ratephy1, 500 rx_info->rs_keyix); 501 502 } 503 504 /** 505 * dp_rx_filter_mesh_packets() - Filters mesh unwanted packets 506 * 507 * @vdev: DP Virtual device handle 508 * @nbuf: Buffer pointer 509 * @rx_tlv_hdr: start of rx tlv header 510 * 511 * This checks if the received packet is matching any filter out 512 * catogery and and drop the packet if it matches. 513 * 514 * Return: status(0 indicates drop, 1 indicate to no drop) 515 */ 516 517 QDF_STATUS dp_rx_filter_mesh_packets(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 518 uint8_t *rx_tlv_hdr) 519 { 520 union dp_align_mac_addr mac_addr; 521 522 if (qdf_unlikely(vdev->mesh_rx_filter)) { 523 if (vdev->mesh_rx_filter & MESH_FILTER_OUT_FROMDS) 524 if (hal_rx_mpdu_get_fr_ds(rx_tlv_hdr)) 525 return QDF_STATUS_SUCCESS; 526 527 if (vdev->mesh_rx_filter & MESH_FILTER_OUT_TODS) 528 if (hal_rx_mpdu_get_to_ds(rx_tlv_hdr)) 529 return QDF_STATUS_SUCCESS; 530 531 if (vdev->mesh_rx_filter & MESH_FILTER_OUT_NODS) 532 if (!hal_rx_mpdu_get_fr_ds(rx_tlv_hdr) 533 && !hal_rx_mpdu_get_to_ds(rx_tlv_hdr)) 534 return QDF_STATUS_SUCCESS; 535 536 if (vdev->mesh_rx_filter & MESH_FILTER_OUT_RA) { 537 if (hal_rx_mpdu_get_addr1(rx_tlv_hdr, 538 &mac_addr.raw[0])) 539 return QDF_STATUS_E_FAILURE; 540 541 if (!qdf_mem_cmp(&mac_addr.raw[0], 542 &vdev->mac_addr.raw[0], 543 DP_MAC_ADDR_LEN)) 544 return QDF_STATUS_SUCCESS; 545 } 546 547 if (vdev->mesh_rx_filter & MESH_FILTER_OUT_TA) { 548 if (hal_rx_mpdu_get_addr2(rx_tlv_hdr, 549 &mac_addr.raw[0])) 550 return QDF_STATUS_E_FAILURE; 551 552 if (!qdf_mem_cmp(&mac_addr.raw[0], 553 &vdev->mac_addr.raw[0], 554 DP_MAC_ADDR_LEN)) 555 return QDF_STATUS_SUCCESS; 556 } 557 } 558 559 return QDF_STATUS_E_FAILURE; 560 } 561 562 #else 563 void dp_rx_fill_mesh_stats(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 564 uint8_t *rx_tlv_hdr, struct dp_peer *peer) 565 { 566 } 567 568 QDF_STATUS dp_rx_filter_mesh_packets(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 569 uint8_t *rx_tlv_hdr) 570 { 571 return QDF_STATUS_E_FAILURE; 572 } 573 574 #endif 575 576 #ifdef CONFIG_WIN 577 /** 578 * dp_rx_nac_filter(): Function to perform filtering of non-associated 579 * clients 580 * @pdev: DP pdev handle 581 * @rx_pkt_hdr: Rx packet Header 582 * 583 * return: dp_vdev* 584 */ 585 static 586 struct dp_vdev *dp_rx_nac_filter(struct dp_pdev *pdev, 587 uint8_t *rx_pkt_hdr) 588 { 589 struct ieee80211_frame *wh; 590 struct dp_neighbour_peer *peer = NULL; 591 592 wh = (struct ieee80211_frame *)rx_pkt_hdr; 593 594 if ((wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) != IEEE80211_FC1_DIR_TODS) 595 return NULL; 596 597 qdf_spin_lock_bh(&pdev->neighbour_peer_mutex); 598 TAILQ_FOREACH(peer, &pdev->neighbour_peers_list, 599 neighbour_peer_list_elem) { 600 if (qdf_mem_cmp(&peer->neighbour_peers_macaddr.raw[0], 601 wh->i_addr2, DP_MAC_ADDR_LEN) == 0) { 602 QDF_TRACE( 603 QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 604 FL("NAC configuration matched for mac-%2x:%2x:%2x:%2x:%2x:%2x"), 605 peer->neighbour_peers_macaddr.raw[0], 606 peer->neighbour_peers_macaddr.raw[1], 607 peer->neighbour_peers_macaddr.raw[2], 608 peer->neighbour_peers_macaddr.raw[3], 609 peer->neighbour_peers_macaddr.raw[4], 610 peer->neighbour_peers_macaddr.raw[5]); 611 612 qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex); 613 614 return pdev->monitor_vdev; 615 } 616 } 617 qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex); 618 619 return NULL; 620 } 621 622 /** 623 * dp_rx_process_nac_rssi_frames(): Store RSSI for configured NAC 624 * @pdev: DP pdev handle 625 * @rx_tlv_hdr: tlv hdr buf 626 * 627 * return: None 628 */ 629 #ifdef ATH_SUPPORT_NAC_RSSI 630 static void dp_rx_process_nac_rssi_frames(struct dp_pdev *pdev, uint8_t *rx_tlv_hdr) 631 { 632 struct dp_vdev *vdev = NULL; 633 struct dp_soc *soc = pdev->soc; 634 uint8_t *rx_pkt_hdr = hal_rx_pkt_hdr_get(rx_tlv_hdr); 635 struct ieee80211_frame *wh = (struct ieee80211_frame *)rx_pkt_hdr; 636 637 if (pdev->nac_rssi_filtering) { 638 TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) { 639 if (vdev->cdp_nac_rssi_enabled && 640 (qdf_mem_cmp(vdev->cdp_nac_rssi.client_mac, 641 wh->i_addr1, DP_MAC_ADDR_LEN) == 0)) { 642 QDF_TRACE(QDF_MODULE_ID_DP, 643 QDF_TRACE_LEVEL_DEBUG, "RSSI updated"); 644 vdev->cdp_nac_rssi.vdev_id = vdev->vdev_id; 645 vdev->cdp_nac_rssi.client_rssi = 646 hal_rx_msdu_start_get_rssi(rx_tlv_hdr); 647 dp_wdi_event_handler(WDI_EVENT_NAC_RSSI, soc, 648 (void *)&vdev->cdp_nac_rssi, 649 HTT_INVALID_PEER, WDI_NO_VAL, 650 pdev->pdev_id); 651 } 652 } 653 } 654 } 655 #else 656 static void dp_rx_process_nac_rssi_frames(struct dp_pdev *pdev, uint8_t *rx_tlv_hdr) 657 { 658 } 659 #endif 660 661 /** 662 * dp_rx_process_invalid_peer(): Function to pass invalid peer list to umac 663 * @soc: DP SOC handle 664 * @mpdu: mpdu for which peer is invalid 665 * 666 * return: integer type 667 */ 668 uint8_t dp_rx_process_invalid_peer(struct dp_soc *soc, qdf_nbuf_t mpdu) 669 { 670 struct dp_invalid_peer_msg msg; 671 struct dp_vdev *vdev = NULL; 672 struct dp_pdev *pdev = NULL; 673 struct ieee80211_frame *wh; 674 uint8_t i; 675 qdf_nbuf_t curr_nbuf, next_nbuf; 676 uint8_t *rx_tlv_hdr = qdf_nbuf_data(mpdu); 677 uint8_t *rx_pkt_hdr = hal_rx_pkt_hdr_get(rx_tlv_hdr); 678 679 wh = (struct ieee80211_frame *)rx_pkt_hdr; 680 681 if (!DP_FRAME_IS_DATA(wh)) { 682 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 683 "NAWDS valid only for data frames"); 684 goto free; 685 } 686 687 if (qdf_nbuf_len(mpdu) < sizeof(struct ieee80211_frame)) { 688 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 689 "Invalid nbuf length"); 690 goto free; 691 } 692 693 694 for (i = 0; i < MAX_PDEV_CNT; i++) { 695 pdev = soc->pdev_list[i]; 696 if (!pdev) { 697 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 698 "PDEV not found"); 699 continue; 700 } 701 702 if (pdev->filter_neighbour_peers) { 703 /* Next Hop scenario not yet handle */ 704 vdev = dp_rx_nac_filter(pdev, rx_pkt_hdr); 705 if (vdev) { 706 dp_rx_mon_deliver(soc, i, 707 pdev->invalid_peer_head_msdu, 708 pdev->invalid_peer_tail_msdu); 709 710 pdev->invalid_peer_head_msdu = NULL; 711 pdev->invalid_peer_tail_msdu = NULL; 712 713 return 0; 714 } 715 } 716 717 718 dp_rx_process_nac_rssi_frames(pdev, rx_tlv_hdr); 719 720 TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) { 721 722 if (qdf_mem_cmp(wh->i_addr1, vdev->mac_addr.raw, 723 DP_MAC_ADDR_LEN) == 0) { 724 goto out; 725 } 726 } 727 } 728 729 if (!vdev) { 730 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 731 "VDEV not found"); 732 goto free; 733 } 734 735 out: 736 msg.wh = wh; 737 qdf_nbuf_pull_head(mpdu, RX_PKT_TLVS_LEN); 738 msg.nbuf = mpdu; 739 msg.vdev_id = vdev->vdev_id; 740 if (pdev->soc->cdp_soc.ol_ops->rx_invalid_peer) 741 pdev->soc->cdp_soc.ol_ops->rx_invalid_peer(pdev->osif_pdev, &msg); 742 743 free: 744 /* Drop and free packet */ 745 curr_nbuf = mpdu; 746 while (curr_nbuf) { 747 next_nbuf = qdf_nbuf_next(curr_nbuf); 748 qdf_nbuf_free(curr_nbuf); 749 curr_nbuf = next_nbuf; 750 } 751 752 return 0; 753 } 754 755 /** 756 * dp_rx_process_invalid_peer_wrapper(): Function to wrap invalid peer handler 757 * @soc: DP SOC handle 758 * @mpdu: mpdu for which peer is invalid 759 * @mpdu_done: if an mpdu is completed 760 * 761 * return: integer type 762 */ 763 void dp_rx_process_invalid_peer_wrapper(struct dp_soc *soc, 764 qdf_nbuf_t mpdu, bool mpdu_done) 765 { 766 /* Only trigger the process when mpdu is completed */ 767 if (mpdu_done) 768 dp_rx_process_invalid_peer(soc, mpdu); 769 } 770 #else 771 uint8_t dp_rx_process_invalid_peer(struct dp_soc *soc, qdf_nbuf_t mpdu) 772 { 773 qdf_nbuf_t curr_nbuf, next_nbuf; 774 struct dp_pdev *pdev; 775 uint8_t i; 776 777 curr_nbuf = mpdu; 778 while (curr_nbuf) { 779 next_nbuf = qdf_nbuf_next(curr_nbuf); 780 /* Drop and free packet */ 781 DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1, 782 qdf_nbuf_len(curr_nbuf)); 783 qdf_nbuf_free(curr_nbuf); 784 curr_nbuf = next_nbuf; 785 } 786 787 /* reset the head and tail pointers */ 788 for (i = 0; i < MAX_PDEV_CNT; i++) { 789 pdev = soc->pdev_list[i]; 790 if (!pdev) { 791 QDF_TRACE(QDF_MODULE_ID_DP, 792 QDF_TRACE_LEVEL_ERROR, 793 "PDEV not found"); 794 continue; 795 } 796 797 pdev->invalid_peer_head_msdu = NULL; 798 pdev->invalid_peer_tail_msdu = NULL; 799 } 800 return 0; 801 } 802 803 void dp_rx_process_invalid_peer_wrapper(struct dp_soc *soc, 804 qdf_nbuf_t mpdu, bool mpdu_done) 805 { 806 /* To avoid compiler warning */ 807 mpdu_done = mpdu_done; 808 809 /* Process the nbuf */ 810 dp_rx_process_invalid_peer(soc, mpdu); 811 } 812 #endif 813 814 #if defined(FEATURE_LRO) 815 static void dp_rx_print_lro_info(uint8_t *rx_tlv) 816 { 817 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 818 FL("----------------------RX DESC LRO----------------------\n")); 819 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 820 FL("lro_eligible 0x%x"), HAL_RX_TLV_GET_LRO_ELIGIBLE(rx_tlv)); 821 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 822 FL("pure_ack 0x%x"), HAL_RX_TLV_GET_TCP_PURE_ACK(rx_tlv)); 823 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 824 FL("chksum 0x%x"), HAL_RX_TLV_GET_TCP_CHKSUM(rx_tlv)); 825 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 826 FL("TCP seq num 0x%x"), HAL_RX_TLV_GET_TCP_SEQ(rx_tlv)); 827 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 828 FL("TCP ack num 0x%x"), HAL_RX_TLV_GET_TCP_ACK(rx_tlv)); 829 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 830 FL("TCP window 0x%x"), HAL_RX_TLV_GET_TCP_WIN(rx_tlv)); 831 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 832 FL("TCP protocol 0x%x"), HAL_RX_TLV_GET_TCP_PROTO(rx_tlv)); 833 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 834 FL("TCP offset 0x%x"), HAL_RX_TLV_GET_TCP_OFFSET(rx_tlv)); 835 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 836 FL("toeplitz 0x%x"), HAL_RX_TLV_GET_FLOW_ID_TOEPLITZ(rx_tlv)); 837 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 838 FL("---------------------------------------------------------\n")); 839 } 840 841 /** 842 * dp_rx_lro() - LRO related processing 843 * @rx_tlv: TLV data extracted from the rx packet 844 * @peer: destination peer of the msdu 845 * @msdu: network buffer 846 * @ctx: LRO context 847 * 848 * This function performs the LRO related processing of the msdu 849 * 850 * Return: true: LRO enabled false: LRO is not enabled 851 */ 852 static void dp_rx_lro(uint8_t *rx_tlv, struct dp_peer *peer, 853 qdf_nbuf_t msdu, qdf_lro_ctx_t ctx) 854 { 855 if (!peer || !peer->vdev || !peer->vdev->lro_enable) { 856 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG, 857 FL("no peer, no vdev or LRO disabled")); 858 QDF_NBUF_CB_RX_LRO_ELIGIBLE(msdu) = 0; 859 return; 860 } 861 qdf_assert(rx_tlv); 862 dp_rx_print_lro_info(rx_tlv); 863 864 QDF_NBUF_CB_RX_LRO_ELIGIBLE(msdu) = 865 HAL_RX_TLV_GET_LRO_ELIGIBLE(rx_tlv); 866 867 QDF_NBUF_CB_RX_TCP_PURE_ACK(msdu) = 868 HAL_RX_TLV_GET_TCP_PURE_ACK(rx_tlv); 869 870 QDF_NBUF_CB_RX_TCP_CHKSUM(msdu) = 871 HAL_RX_TLV_GET_TCP_CHKSUM(rx_tlv); 872 QDF_NBUF_CB_RX_TCP_SEQ_NUM(msdu) = 873 HAL_RX_TLV_GET_TCP_SEQ(rx_tlv); 874 QDF_NBUF_CB_RX_TCP_ACK_NUM(msdu) = 875 HAL_RX_TLV_GET_TCP_ACK(rx_tlv); 876 QDF_NBUF_CB_RX_TCP_WIN(msdu) = 877 HAL_RX_TLV_GET_TCP_WIN(rx_tlv); 878 QDF_NBUF_CB_RX_TCP_PROTO(msdu) = 879 HAL_RX_TLV_GET_TCP_PROTO(rx_tlv); 880 QDF_NBUF_CB_RX_IPV6_PROTO(msdu) = 881 HAL_RX_TLV_GET_IPV6(rx_tlv); 882 QDF_NBUF_CB_RX_TCP_OFFSET(msdu) = 883 HAL_RX_TLV_GET_TCP_OFFSET(rx_tlv); 884 QDF_NBUF_CB_RX_FLOW_ID(msdu) = 885 HAL_RX_TLV_GET_FLOW_ID_TOEPLITZ(rx_tlv); 886 QDF_NBUF_CB_RX_LRO_CTX(msdu) = (unsigned char *)ctx; 887 888 } 889 #else 890 static void dp_rx_lro(uint8_t *rx_tlv, struct dp_peer *peer, 891 qdf_nbuf_t msdu, qdf_lro_ctx_t ctx) 892 { 893 } 894 #endif 895 896 static inline void dp_rx_adjust_nbuf_len(qdf_nbuf_t nbuf, uint16_t *mpdu_len) 897 { 898 if (*mpdu_len >= (RX_BUFFER_SIZE - RX_PKT_TLVS_LEN)) 899 qdf_nbuf_set_pktlen(nbuf, RX_BUFFER_SIZE); 900 else 901 qdf_nbuf_set_pktlen(nbuf, (*mpdu_len + RX_PKT_TLVS_LEN)); 902 903 *mpdu_len -= (RX_BUFFER_SIZE - RX_PKT_TLVS_LEN); 904 } 905 906 /** 907 * dp_rx_sg_create() - create a frag_list for MSDUs which are spread across 908 * multiple nbufs. 909 * @nbuf: nbuf which can may be part of frag_list. 910 * @rx_tlv_hdr: pointer to the start of RX TLV headers. 911 * @mpdu_len: mpdu length. 912 * @is_first_frag: is this the first nbuf in the fragmented MSDU. 913 * @frag_list_len: length of all the fragments combined. 914 * @head_frag_nbuf: parent nbuf 915 * @frag_list_head: pointer to the first nbuf in the frag_list. 916 * @frag_list_tail: pointer to the last nbuf in the frag_list. 917 * 918 * This function implements the creation of RX frag_list for cases 919 * where an MSDU is spread across multiple nbufs. 920 * 921 */ 922 void dp_rx_sg_create(qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr, 923 uint16_t *mpdu_len, bool *is_first_frag, 924 uint16_t *frag_list_len, qdf_nbuf_t *head_frag_nbuf, 925 qdf_nbuf_t *frag_list_head, qdf_nbuf_t *frag_list_tail) 926 { 927 if (qdf_unlikely(qdf_nbuf_is_rx_chfrag_cont(nbuf))) { 928 if (!(*is_first_frag)) { 929 *is_first_frag = 1; 930 qdf_nbuf_set_rx_chfrag_start(nbuf, 1); 931 *mpdu_len = hal_rx_msdu_start_msdu_len_get(rx_tlv_hdr); 932 933 dp_rx_adjust_nbuf_len(nbuf, mpdu_len); 934 *head_frag_nbuf = nbuf; 935 } else { 936 dp_rx_adjust_nbuf_len(nbuf, mpdu_len); 937 qdf_nbuf_pull_head(nbuf, RX_PKT_TLVS_LEN); 938 *frag_list_len += qdf_nbuf_len(nbuf); 939 940 DP_RX_LIST_APPEND(*frag_list_head, 941 *frag_list_tail, 942 nbuf); 943 } 944 } else { 945 if (qdf_unlikely(*is_first_frag)) { 946 qdf_nbuf_set_rx_chfrag_start(nbuf, 0); 947 dp_rx_adjust_nbuf_len(nbuf, mpdu_len); 948 qdf_nbuf_pull_head(nbuf, 949 RX_PKT_TLVS_LEN); 950 *frag_list_len += qdf_nbuf_len(nbuf); 951 952 DP_RX_LIST_APPEND(*frag_list_head, 953 *frag_list_tail, 954 nbuf); 955 956 qdf_nbuf_append_ext_list(*head_frag_nbuf, 957 *frag_list_head, 958 *frag_list_len); 959 960 *is_first_frag = 0; 961 return; 962 } 963 *head_frag_nbuf = nbuf; 964 } 965 } 966 967 static inline void dp_rx_deliver_to_stack(struct dp_vdev *vdev, 968 struct dp_peer *peer, 969 qdf_nbuf_t nbuf_list) 970 { 971 /* 972 * highly unlikely to have a vdev without a registerd rx 973 * callback function. if so let us free the nbuf_list. 974 */ 975 if (qdf_unlikely(!vdev->osif_rx)) { 976 qdf_nbuf_t nbuf; 977 do { 978 nbuf = nbuf_list; 979 nbuf_list = nbuf_list->next; 980 qdf_nbuf_free(nbuf); 981 } while (nbuf_list); 982 983 return; 984 } 985 986 if (qdf_unlikely(vdev->rx_decap_type == htt_cmn_pkt_type_raw) || 987 (vdev->rx_decap_type == htt_cmn_pkt_type_native_wifi)) 988 dp_rx_deliver_raw(vdev, nbuf_list, peer); 989 else 990 vdev->osif_rx(vdev->osif_vdev, nbuf_list); 991 992 } 993 994 /** 995 * dp_rx_cksum_offload() - set the nbuf checksum as defined by hardware. 996 * @nbuf: pointer to the first msdu of an amsdu. 997 * @rx_tlv_hdr: pointer to the start of RX TLV headers. 998 * 999 * The ipsumed field of the skb is set based on whether HW validated the 1000 * IP/TCP/UDP checksum. 1001 * 1002 * Return: void 1003 */ 1004 static inline void dp_rx_cksum_offload(qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr) 1005 { 1006 qdf_nbuf_rx_cksum_t cksum = {0}; 1007 1008 if (qdf_likely(!hal_rx_attn_tcp_udp_cksum_fail_get(rx_tlv_hdr) && 1009 !hal_rx_attn_ip_cksum_fail_get(rx_tlv_hdr))) { 1010 cksum.l4_result = QDF_NBUF_RX_CKSUM_TCP_UDP_UNNECESSARY; 1011 1012 qdf_nbuf_set_rx_cksum(nbuf, &cksum); 1013 } 1014 } 1015 1016 /** 1017 * dp_rx_msdu_stats_update() - update per msdu stats. 1018 * @soc: core txrx main context 1019 * @nbuf: pointer to the first msdu of an amsdu. 1020 * @rx_tlv_hdr: pointer to the start of RX TLV headers. 1021 * @peer: pointer to the peer object. 1022 * @ring_id: reo dest ring number on which pkt is reaped. 1023 * 1024 * update all the per msdu stats for that nbuf. 1025 * Return: void 1026 */ 1027 static void dp_rx_msdu_stats_update(struct dp_soc *soc, 1028 qdf_nbuf_t nbuf, 1029 uint8_t *rx_tlv_hdr, 1030 struct dp_peer *peer, 1031 uint8_t ring_id) 1032 { 1033 bool is_ampdu, is_not_amsdu; 1034 uint16_t peer_id; 1035 uint32_t sgi, mcs, tid, nss, bw, reception_type, pkt_type; 1036 struct dp_vdev *vdev = peer->vdev; 1037 struct ether_header *eh; 1038 uint16_t msdu_len = qdf_nbuf_len(nbuf); 1039 1040 peer_id = DP_PEER_METADATA_PEER_ID_GET( 1041 hal_rx_mpdu_peer_meta_data_get(rx_tlv_hdr)); 1042 1043 is_not_amsdu = qdf_nbuf_is_rx_chfrag_start(nbuf) & 1044 qdf_nbuf_is_rx_chfrag_end(nbuf); 1045 1046 DP_STATS_INC_PKT(peer, rx.rcvd_reo[ring_id], 1, msdu_len); 1047 DP_STATS_INCC(peer, rx.non_amsdu_cnt, 1, is_not_amsdu); 1048 DP_STATS_INCC(peer, rx.amsdu_cnt, 1, !is_not_amsdu); 1049 1050 if (qdf_unlikely(hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr) && 1051 (vdev->rx_decap_type == htt_cmn_pkt_type_ethernet))) { 1052 eh = (struct ether_header *)qdf_nbuf_data(nbuf); 1053 if (IEEE80211_IS_BROADCAST(eh->ether_dhost)) { 1054 DP_STATS_INC_PKT(peer, rx.bcast, 1, msdu_len); 1055 } else { 1056 DP_STATS_INC_PKT(peer, rx.multicast, 1, msdu_len); 1057 } 1058 } 1059 1060 /* 1061 * currently we can return from here as we have similar stats 1062 * updated at per ppdu level instead of msdu level 1063 */ 1064 if (!soc->process_rx_status) 1065 return; 1066 1067 is_ampdu = hal_rx_mpdu_info_ampdu_flag_get(rx_tlv_hdr); 1068 DP_STATS_INCC(peer, rx.ampdu_cnt, 1, is_ampdu); 1069 DP_STATS_INCC(peer, rx.non_ampdu_cnt, 1, !(is_ampdu)); 1070 1071 sgi = hal_rx_msdu_start_sgi_get(rx_tlv_hdr); 1072 mcs = hal_rx_msdu_start_rate_mcs_get(rx_tlv_hdr); 1073 tid = hal_rx_mpdu_start_tid_get(rx_tlv_hdr); 1074 bw = hal_rx_msdu_start_bw_get(rx_tlv_hdr); 1075 reception_type = hal_rx_msdu_start_reception_type_get(rx_tlv_hdr); 1076 nss = hal_rx_msdu_start_nss_get(rx_tlv_hdr); 1077 pkt_type = hal_rx_msdu_start_get_pkt_type(rx_tlv_hdr); 1078 1079 DP_STATS_INC(vdev->pdev, rx.bw[bw], 1); 1080 DP_STATS_INC(vdev->pdev, rx.reception_type[reception_type], 1); 1081 DP_STATS_INCC(vdev->pdev, rx.nss[nss], 1, 1082 ((reception_type == REPT_MU_MIMO) || 1083 (reception_type == REPT_MU_OFDMA_MIMO))); 1084 DP_STATS_INC(peer, rx.sgi_count[sgi], 1); 1085 DP_STATS_INCC(peer, rx.err.mic_err, 1, 1086 hal_rx_mpdu_end_mic_err_get(rx_tlv_hdr)); 1087 DP_STATS_INCC(peer, rx.err.decrypt_err, 1, 1088 hal_rx_mpdu_end_decrypt_err_get(rx_tlv_hdr)); 1089 1090 DP_STATS_INC(peer, rx.wme_ac_type[TID_TO_WME_AC(tid)], 1); 1091 DP_STATS_INC(peer, rx.bw[bw], 1); 1092 DP_STATS_INC(peer, rx.reception_type[reception_type], 1); 1093 1094 DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[MAX_MCS], 1, 1095 ((mcs >= MAX_MCS_11A) && (pkt_type == DOT11_A))); 1096 DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[mcs], 1, 1097 ((mcs <= MAX_MCS_11A) && (pkt_type == DOT11_A))); 1098 DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[MAX_MCS], 1, 1099 ((mcs >= MAX_MCS_11B) && (pkt_type == DOT11_B))); 1100 DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[mcs], 1, 1101 ((mcs <= MAX_MCS_11B) && (pkt_type == DOT11_B))); 1102 DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[MAX_MCS], 1, 1103 ((mcs >= MAX_MCS_11A) && (pkt_type == DOT11_N))); 1104 DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[mcs], 1, 1105 ((mcs <= MAX_MCS_11A) && (pkt_type == DOT11_N))); 1106 DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[MAX_MCS], 1, 1107 ((mcs >= MAX_MCS_11AC) && (pkt_type == DOT11_AC))); 1108 DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[mcs], 1, 1109 ((mcs <= MAX_MCS_11AC) && (pkt_type == DOT11_AC))); 1110 DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[MAX_MCS], 1, 1111 ((mcs >= MAX_MCS) && (pkt_type == DOT11_AX))); 1112 DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[mcs], 1, 1113 ((mcs <= MAX_MCS) && (pkt_type == DOT11_AX))); 1114 1115 if ((soc->process_rx_status) && 1116 hal_rx_attn_first_mpdu_get(rx_tlv_hdr)) { 1117 if (soc->cdp_soc.ol_ops->update_dp_stats) { 1118 soc->cdp_soc.ol_ops->update_dp_stats( 1119 vdev->pdev->osif_pdev, 1120 &peer->stats, 1121 peer_id, 1122 UPDATE_PEER_STATS); 1123 } 1124 } 1125 } 1126 1127 #ifdef WDS_VENDOR_EXTENSION 1128 int dp_wds_rx_policy_check( 1129 uint8_t *rx_tlv_hdr, 1130 struct dp_vdev *vdev, 1131 struct dp_peer *peer, 1132 int rx_mcast 1133 ) 1134 { 1135 struct dp_peer *bss_peer; 1136 int fr_ds, to_ds, rx_3addr, rx_4addr; 1137 int rx_policy_ucast, rx_policy_mcast; 1138 1139 if (vdev->opmode == wlan_op_mode_ap) { 1140 TAILQ_FOREACH(bss_peer, &vdev->peer_list, peer_list_elem) { 1141 if (bss_peer->bss_peer) { 1142 /* if wds policy check is not enabled on this vdev, accept all frames */ 1143 if (!bss_peer->wds_ecm.wds_rx_filter) { 1144 return 1; 1145 } 1146 break; 1147 } 1148 } 1149 rx_policy_ucast = bss_peer->wds_ecm.wds_rx_ucast_4addr; 1150 rx_policy_mcast = bss_peer->wds_ecm.wds_rx_mcast_4addr; 1151 } else { /* sta mode */ 1152 if (!peer->wds_ecm.wds_rx_filter) { 1153 return 1; 1154 } 1155 rx_policy_ucast = peer->wds_ecm.wds_rx_ucast_4addr; 1156 rx_policy_mcast = peer->wds_ecm.wds_rx_mcast_4addr; 1157 } 1158 1159 /* ------------------------------------------------ 1160 * self 1161 * peer- rx rx- 1162 * wds ucast mcast dir policy accept note 1163 * ------------------------------------------------ 1164 * 1 1 0 11 x1 1 AP configured to accept ds-to-ds Rx ucast from wds peers, constraint met; so, accept 1165 * 1 1 0 01 x1 0 AP configured to accept ds-to-ds Rx ucast from wds peers, constraint not met; so, drop 1166 * 1 1 0 10 x1 0 AP configured to accept ds-to-ds Rx ucast from wds peers, constraint not met; so, drop 1167 * 1 1 0 00 x1 0 bad frame, won't see it 1168 * 1 0 1 11 1x 1 AP configured to accept ds-to-ds Rx mcast from wds peers, constraint met; so, accept 1169 * 1 0 1 01 1x 0 AP configured to accept ds-to-ds Rx mcast from wds peers, constraint not met; so, drop 1170 * 1 0 1 10 1x 0 AP configured to accept ds-to-ds Rx mcast from wds peers, constraint not met; so, drop 1171 * 1 0 1 00 1x 0 bad frame, won't see it 1172 * 1 1 0 11 x0 0 AP configured to accept from-ds Rx ucast from wds peers, constraint not met; so, drop 1173 * 1 1 0 01 x0 0 AP configured to accept from-ds Rx ucast from wds peers, constraint not met; so, drop 1174 * 1 1 0 10 x0 1 AP configured to accept from-ds Rx ucast from wds peers, constraint met; so, accept 1175 * 1 1 0 00 x0 0 bad frame, won't see it 1176 * 1 0 1 11 0x 0 AP configured to accept from-ds Rx mcast from wds peers, constraint not met; so, drop 1177 * 1 0 1 01 0x 0 AP configured to accept from-ds Rx mcast from wds peers, constraint not met; so, drop 1178 * 1 0 1 10 0x 1 AP configured to accept from-ds Rx mcast from wds peers, constraint met; so, accept 1179 * 1 0 1 00 0x 0 bad frame, won't see it 1180 * 1181 * 0 x x 11 xx 0 we only accept td-ds Rx frames from non-wds peers in mode. 1182 * 0 x x 01 xx 1 1183 * 0 x x 10 xx 0 1184 * 0 x x 00 xx 0 bad frame, won't see it 1185 * ------------------------------------------------ 1186 */ 1187 1188 fr_ds = hal_rx_mpdu_get_fr_ds(rx_tlv_hdr); 1189 to_ds = hal_rx_mpdu_get_to_ds(rx_tlv_hdr); 1190 rx_3addr = fr_ds ^ to_ds; 1191 rx_4addr = fr_ds & to_ds; 1192 1193 if (vdev->opmode == wlan_op_mode_ap) { 1194 if ((!peer->wds_enabled && rx_3addr && to_ds) || 1195 (peer->wds_enabled && !rx_mcast && (rx_4addr == rx_policy_ucast)) || 1196 (peer->wds_enabled && rx_mcast && (rx_4addr == rx_policy_mcast))) { 1197 return 1; 1198 } 1199 } else { /* sta mode */ 1200 if ((!rx_mcast && (rx_4addr == rx_policy_ucast)) || 1201 (rx_mcast && (rx_4addr == rx_policy_mcast))) { 1202 return 1; 1203 } 1204 } 1205 return 0; 1206 } 1207 #else 1208 int dp_wds_rx_policy_check( 1209 uint8_t *rx_tlv_hdr, 1210 struct dp_vdev *vdev, 1211 struct dp_peer *peer, 1212 int rx_mcast 1213 ) 1214 { 1215 return 1; 1216 } 1217 #endif 1218 1219 /** 1220 * dp_rx_process() - Brain of the Rx processing functionality 1221 * Called from the bottom half (tasklet/NET_RX_SOFTIRQ) 1222 * @soc: core txrx main context 1223 * @hal_ring: opaque pointer to the HAL Rx Ring, which will be serviced 1224 * @quota: No. of units (packets) that can be serviced in one shot. 1225 * 1226 * This function implements the core of Rx functionality. This is 1227 * expected to handle only non-error frames. 1228 * 1229 * Return: uint32_t: No. of elements processed 1230 */ 1231 uint32_t 1232 dp_rx_process(struct dp_intr *int_ctx, void *hal_ring, uint32_t quota) 1233 { 1234 void *hal_soc; 1235 void *ring_desc; 1236 struct dp_rx_desc *rx_desc = NULL; 1237 qdf_nbuf_t nbuf, next; 1238 union dp_rx_desc_list_elem_t *head[MAX_PDEV_CNT] = { NULL }; 1239 union dp_rx_desc_list_elem_t *tail[MAX_PDEV_CNT] = { NULL }; 1240 uint32_t rx_bufs_used = 0, rx_buf_cookie, l2_hdr_offset; 1241 uint16_t msdu_len; 1242 uint16_t peer_id; 1243 struct dp_peer *peer = NULL; 1244 struct dp_vdev *vdev = NULL; 1245 uint32_t pkt_len; 1246 struct hal_rx_mpdu_desc_info mpdu_desc_info = { 0 }; 1247 struct hal_rx_msdu_desc_info msdu_desc_info = { 0 }; 1248 enum hal_reo_error_status error; 1249 uint32_t peer_mdata; 1250 uint8_t *rx_tlv_hdr; 1251 uint32_t rx_bufs_reaped[MAX_PDEV_CNT] = { 0 }; 1252 uint8_t mac_id = 0; 1253 struct dp_pdev *pdev; 1254 struct dp_srng *dp_rxdma_srng; 1255 struct rx_desc_pool *rx_desc_pool; 1256 struct dp_soc *soc = int_ctx->soc; 1257 uint8_t ring_id = 0; 1258 uint8_t core_id = 0; 1259 bool is_first_frag = 0; 1260 uint16_t mpdu_len = 0; 1261 qdf_nbuf_t head_frag_nbuf = NULL; 1262 qdf_nbuf_t frag_list_head = NULL; 1263 qdf_nbuf_t frag_list_tail = NULL; 1264 uint16_t frag_list_len = 0; 1265 qdf_nbuf_t nbuf_head = NULL; 1266 qdf_nbuf_t nbuf_tail = NULL; 1267 qdf_nbuf_t deliver_list_head = NULL; 1268 qdf_nbuf_t deliver_list_tail = NULL; 1269 1270 DP_HIST_INIT(); 1271 /* Debug -- Remove later */ 1272 qdf_assert(soc && hal_ring); 1273 1274 hal_soc = soc->hal_soc; 1275 1276 /* Debug -- Remove later */ 1277 qdf_assert(hal_soc); 1278 1279 hif_pm_runtime_mark_last_busy(soc->osdev->dev); 1280 1281 if (qdf_unlikely(hal_srng_access_start(hal_soc, hal_ring))) { 1282 1283 /* 1284 * Need API to convert from hal_ring pointer to 1285 * Ring Type / Ring Id combo 1286 */ 1287 DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1); 1288 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1289 FL("HAL RING Access Failed -- %pK"), hal_ring); 1290 hal_srng_access_end(hal_soc, hal_ring); 1291 goto done; 1292 } 1293 1294 /* 1295 * start reaping the buffers from reo ring and queue 1296 * them in per vdev queue. 1297 * Process the received pkts in a different per vdev loop. 1298 */ 1299 while (qdf_likely(quota && (ring_desc = 1300 hal_srng_dst_get_next(hal_soc, hal_ring)))) { 1301 1302 error = HAL_RX_ERROR_STATUS_GET(ring_desc); 1303 ring_id = hal_srng_ring_id_get(hal_ring); 1304 1305 if (qdf_unlikely(error == HAL_REO_ERROR_DETECTED)) { 1306 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1307 FL("HAL RING 0x%pK:error %d"), hal_ring, error); 1308 DP_STATS_INC(soc, rx.err.hal_reo_error[ring_id], 1); 1309 /* Don't know how to deal with this -- assert */ 1310 qdf_assert(0); 1311 } 1312 1313 rx_buf_cookie = HAL_RX_REO_BUF_COOKIE_GET(ring_desc); 1314 1315 rx_desc = dp_rx_cookie_2_va_rxdma_buf(soc, rx_buf_cookie); 1316 1317 1318 qdf_assert(rx_desc); 1319 rx_bufs_reaped[rx_desc->pool_id]++; 1320 1321 /* TODO */ 1322 /* 1323 * Need a separate API for unmapping based on 1324 * phyiscal address 1325 */ 1326 qdf_nbuf_unmap_single(soc->osdev, rx_desc->nbuf, 1327 QDF_DMA_BIDIRECTIONAL); 1328 1329 core_id = smp_processor_id(); 1330 DP_STATS_INC(soc, rx.ring_packets[core_id][ring_id], 1); 1331 1332 /* Get MPDU DESC info */ 1333 hal_rx_mpdu_desc_info_get(ring_desc, &mpdu_desc_info); 1334 1335 hal_rx_mpdu_peer_meta_data_set(qdf_nbuf_data(rx_desc->nbuf), 1336 mpdu_desc_info.peer_meta_data); 1337 1338 /* Get MSDU DESC info */ 1339 hal_rx_msdu_desc_info_get(ring_desc, &msdu_desc_info); 1340 1341 /* 1342 * save msdu flags first, last and continuation msdu in 1343 * nbuf->cb 1344 */ 1345 if (msdu_desc_info.msdu_flags & HAL_MSDU_F_FIRST_MSDU_IN_MPDU) 1346 qdf_nbuf_set_rx_chfrag_start(rx_desc->nbuf, 1); 1347 1348 if (msdu_desc_info.msdu_flags & HAL_MSDU_F_MSDU_CONTINUATION) 1349 qdf_nbuf_set_rx_chfrag_cont(rx_desc->nbuf, 1); 1350 1351 if (msdu_desc_info.msdu_flags & HAL_MSDU_F_LAST_MSDU_IN_MPDU) 1352 qdf_nbuf_set_rx_chfrag_end(rx_desc->nbuf, 1); 1353 1354 DP_RX_LIST_APPEND(nbuf_head, nbuf_tail, rx_desc->nbuf); 1355 1356 /* 1357 * if continuation bit is set then we have MSDU spread 1358 * across multiple buffers, let us not decrement quota 1359 * till we reap all buffers of that MSDU. 1360 */ 1361 if (qdf_likely(!qdf_nbuf_is_rx_chfrag_cont(rx_desc->nbuf))) 1362 quota -= 1; 1363 1364 1365 dp_rx_add_to_free_desc_list(&head[rx_desc->pool_id], 1366 &tail[rx_desc->pool_id], 1367 rx_desc); 1368 } 1369 done: 1370 hal_srng_access_end(hal_soc, hal_ring); 1371 1372 /* Update histogram statistics by looping through pdev's */ 1373 DP_RX_HIST_STATS_PER_PDEV(); 1374 1375 for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) { 1376 /* 1377 * continue with next mac_id if no pkts were reaped 1378 * from that pool 1379 */ 1380 if (!rx_bufs_reaped[mac_id]) 1381 continue; 1382 1383 pdev = soc->pdev_list[mac_id]; 1384 dp_rxdma_srng = &pdev->rx_refill_buf_ring; 1385 rx_desc_pool = &soc->rx_desc_buf[mac_id]; 1386 1387 dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng, 1388 rx_desc_pool, rx_bufs_reaped[mac_id], 1389 &head[mac_id], &tail[mac_id]); 1390 } 1391 1392 /* Peer can be NULL is case of LFR */ 1393 if (qdf_likely(peer != NULL)) 1394 vdev = NULL; 1395 1396 /* 1397 * BIG loop where each nbuf is dequeued from global queue, 1398 * processed and queued back on a per vdev basis. These nbufs 1399 * are sent to stack as and when we run out of nbufs 1400 * or a new nbuf dequeued from global queue has a different 1401 * vdev when compared to previous nbuf. 1402 */ 1403 nbuf = nbuf_head; 1404 while (nbuf) { 1405 next = nbuf->next; 1406 rx_tlv_hdr = qdf_nbuf_data(nbuf); 1407 1408 /* 1409 * Check if DMA completed -- msdu_done is the last bit 1410 * to be written 1411 */ 1412 if (qdf_unlikely(!hal_rx_attn_msdu_done_get(rx_tlv_hdr))) { 1413 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1414 FL("MSDU DONE failure")); 1415 hal_rx_dump_pkt_tlvs(rx_tlv_hdr, QDF_TRACE_LEVEL_INFO); 1416 qdf_assert(0); 1417 } 1418 1419 peer_mdata = hal_rx_mpdu_peer_meta_data_get(rx_tlv_hdr); 1420 peer_id = DP_PEER_METADATA_PEER_ID_GET(peer_mdata); 1421 peer = dp_peer_find_by_id(soc, peer_id); 1422 1423 rx_bufs_used++; 1424 1425 if (deliver_list_head && peer && (vdev != peer->vdev)) { 1426 dp_rx_deliver_to_stack(vdev, peer, deliver_list_head); 1427 deliver_list_head = NULL; 1428 deliver_list_tail = NULL; 1429 } 1430 1431 if (qdf_likely(peer != NULL)) { 1432 vdev = peer->vdev; 1433 } else { 1434 qdf_nbuf_free(nbuf); 1435 nbuf = next; 1436 continue; 1437 } 1438 1439 if (qdf_unlikely(vdev == NULL)) { 1440 qdf_nbuf_free(nbuf); 1441 nbuf = next; 1442 DP_STATS_INC(soc, rx.err.invalid_vdev, 1); 1443 continue; 1444 } 1445 1446 DP_HIST_PACKET_COUNT_INC(vdev->pdev->pdev_id); 1447 /* 1448 * The below condition happens when an MSDU is spread 1449 * across multiple buffers. This can happen in two cases 1450 * 1. The nbuf size is smaller then the received msdu. 1451 * ex: we have set the nbuf size to 2048 during 1452 * nbuf_alloc. but we received an msdu which is 1453 * 2304 bytes in size then this msdu is spread 1454 * across 2 nbufs. 1455 * 1456 * 2. AMSDUs when RAW mode is enabled. 1457 * ex: 1st MSDU is in 1st nbuf and 2nd MSDU is spread 1458 * across 1st nbuf and 2nd nbuf and last MSDU is 1459 * spread across 2nd nbuf and 3rd nbuf. 1460 * 1461 * for these scenarios let us create a skb frag_list and 1462 * append these buffers till the last MSDU of the AMSDU 1463 */ 1464 if (qdf_unlikely(vdev->rx_decap_type == 1465 htt_cmn_pkt_type_raw)) { 1466 1467 dp_rx_sg_create(nbuf, rx_tlv_hdr, &mpdu_len, 1468 &is_first_frag, &frag_list_len, 1469 &head_frag_nbuf, 1470 &frag_list_head, 1471 &frag_list_tail); 1472 1473 if (is_first_frag) { 1474 nbuf = next; 1475 continue; 1476 } else { 1477 frag_list_head = NULL; 1478 frag_list_tail = NULL; 1479 nbuf = head_frag_nbuf; 1480 rx_tlv_hdr = qdf_nbuf_data(nbuf); 1481 } 1482 } 1483 1484 if (!dp_wds_rx_policy_check(rx_tlv_hdr, vdev, peer, 1485 hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr))) { 1486 QDF_TRACE(QDF_MODULE_ID_DP, 1487 QDF_TRACE_LEVEL_ERROR, 1488 FL("Policy Check Drop pkt")); 1489 /* Drop & free packet */ 1490 qdf_nbuf_free(nbuf); 1491 /* Statistics */ 1492 nbuf = next; 1493 continue; 1494 } 1495 1496 if (qdf_unlikely(peer && peer->bss_peer)) { 1497 QDF_TRACE(QDF_MODULE_ID_DP, 1498 QDF_TRACE_LEVEL_ERROR, 1499 FL("received pkt with same src MAC")); 1500 DP_STATS_INC(vdev->pdev, dropped.mec, 1); 1501 1502 /* Drop & free packet */ 1503 qdf_nbuf_free(nbuf); 1504 /* Statistics */ 1505 nbuf = next; 1506 continue; 1507 } 1508 1509 if (qdf_unlikely(peer && (peer->nawds_enabled == true) && 1510 (hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr)) && 1511 (hal_rx_get_mpdu_mac_ad4_valid(rx_tlv_hdr) == false))) { 1512 DP_STATS_INC_PKT(peer, rx.nawds_mcast_drop, 1, 1513 qdf_nbuf_len(nbuf)); 1514 qdf_nbuf_free(nbuf); 1515 nbuf = next; 1516 continue; 1517 } 1518 1519 dp_rx_cksum_offload(nbuf, rx_tlv_hdr); 1520 1521 /* 1522 * HW structures call this L3 header padding -- 1523 * even though this is actually the offset from 1524 * the buffer beginning where the L2 header 1525 * begins. 1526 */ 1527 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 1528 FL("rxhash: flow id toeplitz: 0x%x\n"), 1529 hal_rx_msdu_start_toeplitz_get(rx_tlv_hdr)); 1530 1531 l2_hdr_offset = 1532 hal_rx_msdu_end_l3_hdr_padding_get(rx_tlv_hdr); 1533 1534 msdu_len = hal_rx_msdu_start_msdu_len_get(rx_tlv_hdr); 1535 pkt_len = msdu_len + l2_hdr_offset + RX_PKT_TLVS_LEN; 1536 1537 if (unlikely(qdf_nbuf_get_ext_list(nbuf))) 1538 qdf_nbuf_pull_head(nbuf, RX_PKT_TLVS_LEN); 1539 else { 1540 qdf_nbuf_set_pktlen(nbuf, pkt_len); 1541 qdf_nbuf_pull_head(nbuf, 1542 RX_PKT_TLVS_LEN + 1543 l2_hdr_offset); 1544 } 1545 1546 dp_rx_msdu_stats_update(soc, nbuf, rx_tlv_hdr, peer, ring_id); 1547 1548 if (qdf_unlikely(vdev->mesh_vdev)) { 1549 if (dp_rx_filter_mesh_packets(vdev, nbuf, 1550 rx_tlv_hdr) 1551 == QDF_STATUS_SUCCESS) { 1552 QDF_TRACE(QDF_MODULE_ID_DP, 1553 QDF_TRACE_LEVEL_INFO_MED, 1554 FL("mesh pkt filtered")); 1555 DP_STATS_INC(vdev->pdev, dropped.mesh_filter, 1556 1); 1557 1558 qdf_nbuf_free(nbuf); 1559 nbuf = next; 1560 continue; 1561 } 1562 dp_rx_fill_mesh_stats(vdev, nbuf, rx_tlv_hdr, peer); 1563 } 1564 1565 #ifdef QCA_WIFI_NAPIER_EMULATION_DBG /* Debug code, remove later */ 1566 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1567 "p_id %d msdu_len %d hdr_off %d", 1568 peer_id, msdu_len, l2_hdr_offset); 1569 1570 print_hex_dump(KERN_ERR, 1571 "\t Pkt Data:", DUMP_PREFIX_NONE, 32, 4, 1572 qdf_nbuf_data(nbuf), 128, false); 1573 #endif /* NAPIER_EMULATION */ 1574 1575 if (qdf_likely(vdev->rx_decap_type == 1576 htt_cmn_pkt_type_ethernet) && 1577 (qdf_likely(!vdev->mesh_vdev))) { 1578 /* WDS Source Port Learning */ 1579 dp_rx_wds_srcport_learn(soc, 1580 rx_tlv_hdr, 1581 peer, 1582 nbuf); 1583 1584 /* Intrabss-fwd */ 1585 if (dp_rx_check_ap_bridge(vdev)) 1586 if (dp_rx_intrabss_fwd(soc, 1587 peer, 1588 rx_tlv_hdr, 1589 nbuf)) { 1590 nbuf = next; 1591 continue; /* Get next desc */ 1592 } 1593 } 1594 1595 dp_rx_lro(rx_tlv_hdr, peer, nbuf, int_ctx->lro_ctx); 1596 1597 DP_RX_LIST_APPEND(deliver_list_head, 1598 deliver_list_tail, 1599 nbuf); 1600 1601 DP_STATS_INC_PKT(peer, rx.to_stack, 1, 1602 qdf_nbuf_len(nbuf)); 1603 1604 nbuf = next; 1605 } 1606 1607 if (deliver_list_head) 1608 dp_rx_deliver_to_stack(vdev, peer, deliver_list_head); 1609 1610 return rx_bufs_used; /* Assume no scale factor for now */ 1611 } 1612 1613 /** 1614 * dp_rx_detach() - detach dp rx 1615 * @pdev: core txrx pdev context 1616 * 1617 * This function will detach DP RX into main device context 1618 * will free DP Rx resources. 1619 * 1620 * Return: void 1621 */ 1622 void 1623 dp_rx_pdev_detach(struct dp_pdev *pdev) 1624 { 1625 uint8_t pdev_id = pdev->pdev_id; 1626 struct dp_soc *soc = pdev->soc; 1627 struct rx_desc_pool *rx_desc_pool; 1628 1629 rx_desc_pool = &soc->rx_desc_buf[pdev_id]; 1630 1631 if (rx_desc_pool->pool_size != 0) { 1632 dp_rx_desc_pool_free(soc, pdev_id, rx_desc_pool); 1633 } 1634 1635 return; 1636 } 1637 1638 /** 1639 * dp_rx_attach() - attach DP RX 1640 * @pdev: core txrx pdev context 1641 * 1642 * This function will attach a DP RX instance into the main 1643 * device (SOC) context. Will allocate dp rx resource and 1644 * initialize resources. 1645 * 1646 * Return: QDF_STATUS_SUCCESS: success 1647 * QDF_STATUS_E_RESOURCES: Error return 1648 */ 1649 QDF_STATUS 1650 dp_rx_pdev_attach(struct dp_pdev *pdev) 1651 { 1652 uint8_t pdev_id = pdev->pdev_id; 1653 struct dp_soc *soc = pdev->soc; 1654 struct dp_srng rxdma_srng; 1655 uint32_t rxdma_entries; 1656 union dp_rx_desc_list_elem_t *desc_list = NULL; 1657 union dp_rx_desc_list_elem_t *tail = NULL; 1658 struct dp_srng *dp_rxdma_srng; 1659 struct rx_desc_pool *rx_desc_pool; 1660 1661 if (wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx)) { 1662 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1663 "nss-wifi<4> skip Rx refil %d", pdev_id); 1664 return QDF_STATUS_SUCCESS; 1665 } 1666 1667 pdev = soc->pdev_list[pdev_id]; 1668 rxdma_srng = pdev->rx_refill_buf_ring; 1669 soc->process_rx_status = CONFIG_PROCESS_RX_STATUS; 1670 rxdma_entries = rxdma_srng.alloc_size/hal_srng_get_entrysize( 1671 soc->hal_soc, RXDMA_BUF); 1672 1673 rx_desc_pool = &soc->rx_desc_buf[pdev_id]; 1674 1675 dp_rx_desc_pool_alloc(soc, pdev_id, rxdma_entries*3, rx_desc_pool); 1676 1677 rx_desc_pool->owner = DP_WBM2SW_RBM; 1678 /* For Rx buffers, WBM release ring is SW RING 3,for all pdev's */ 1679 dp_rxdma_srng = &pdev->rx_refill_buf_ring; 1680 dp_rx_buffers_replenish(soc, pdev_id, dp_rxdma_srng, rx_desc_pool, 1681 0, &desc_list, &tail); 1682 1683 return QDF_STATUS_SUCCESS; 1684 } 1685 1686 /* 1687 * dp_rx_nbuf_prepare() - prepare RX nbuf 1688 * @soc: core txrx main context 1689 * @pdev: core txrx pdev context 1690 * 1691 * This function alloc & map nbuf for RX dma usage, retry it if failed 1692 * until retry times reaches max threshold or succeeded. 1693 * 1694 * Return: qdf_nbuf_t pointer if succeeded, NULL if failed. 1695 */ 1696 qdf_nbuf_t 1697 dp_rx_nbuf_prepare(struct dp_soc *soc, struct dp_pdev *pdev) 1698 { 1699 uint8_t *buf; 1700 int32_t nbuf_retry_count; 1701 QDF_STATUS ret; 1702 qdf_nbuf_t nbuf = NULL; 1703 1704 for (nbuf_retry_count = 0; nbuf_retry_count < 1705 QDF_NBUF_ALLOC_MAP_RETRY_THRESHOLD; 1706 nbuf_retry_count++) { 1707 /* Allocate a new skb */ 1708 nbuf = qdf_nbuf_alloc(soc->osdev, 1709 RX_BUFFER_SIZE, 1710 RX_BUFFER_RESERVATION, 1711 RX_BUFFER_ALIGNMENT, 1712 FALSE); 1713 1714 if (nbuf == NULL) { 1715 DP_STATS_INC(pdev, 1716 replenish.nbuf_alloc_fail, 1); 1717 continue; 1718 } 1719 1720 buf = qdf_nbuf_data(nbuf); 1721 1722 memset(buf, 0, RX_BUFFER_SIZE); 1723 1724 ret = qdf_nbuf_map_single(soc->osdev, nbuf, 1725 QDF_DMA_BIDIRECTIONAL); 1726 1727 /* nbuf map failed */ 1728 if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) { 1729 qdf_nbuf_free(nbuf); 1730 DP_STATS_INC(pdev, replenish.map_err, 1); 1731 continue; 1732 } 1733 /* qdf_nbuf alloc and map succeeded */ 1734 break; 1735 } 1736 1737 /* qdf_nbuf still alloc or map failed */ 1738 if (qdf_unlikely(nbuf_retry_count >= 1739 QDF_NBUF_ALLOC_MAP_RETRY_THRESHOLD)) 1740 return NULL; 1741 1742 return nbuf; 1743 } 1744