1 /* 2 * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for 5 * any purpose with or without fee is hereby granted, provided that the 6 * above copyright notice and this permission notice appear in all 7 * copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 16 * PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 #include "hal_hw_headers.h" 20 #include "dp_types.h" 21 #include "dp_rx.h" 22 #include "dp_peer.h" 23 #include "hal_rx.h" 24 #include "hal_api.h" 25 #include "qdf_nbuf.h" 26 #ifdef MESH_MODE_SUPPORT 27 #include "if_meta_hdr.h" 28 #endif 29 #include "dp_internal.h" 30 #include "dp_rx_mon.h" 31 #ifdef RX_DESC_DEBUG_CHECK 32 static inline void dp_rx_desc_prep(struct dp_rx_desc *rx_desc, qdf_nbuf_t nbuf) 33 { 34 rx_desc->magic = DP_RX_DESC_MAGIC; 35 rx_desc->nbuf = nbuf; 36 } 37 #else 38 static inline void dp_rx_desc_prep(struct dp_rx_desc *rx_desc, qdf_nbuf_t nbuf) 39 { 40 rx_desc->nbuf = nbuf; 41 } 42 #endif 43 44 #ifdef CONFIG_WIN 45 static inline bool dp_rx_check_ap_bridge(struct dp_vdev *vdev) 46 { 47 return vdev->ap_bridge_enabled; 48 } 49 #else 50 static inline bool dp_rx_check_ap_bridge(struct dp_vdev *vdev) 51 { 52 if (vdev->opmode != wlan_op_mode_sta) 53 return true; 54 else 55 return false; 56 } 57 #endif 58 /* 59 * dp_rx_buffers_replenish() - replenish rxdma ring with rx nbufs 60 * called during dp rx initialization 61 * and at the end of dp_rx_process. 62 * 63 * @soc: core txrx main context 64 * @mac_id: mac_id which is one of 3 mac_ids 65 * @dp_rxdma_srng: dp rxdma circular ring 66 * @rx_desc_pool: Pointer to free Rx descriptor pool 67 * @num_req_buffers: number of buffer to be replenished 68 * @desc_list: list of descs if called from dp_rx_process 69 * or NULL during dp rx initialization or out of buffer 70 * interrupt. 71 * @tail: tail of descs list 72 * Return: return success or failure 73 */ 74 QDF_STATUS dp_rx_buffers_replenish(struct dp_soc *dp_soc, uint32_t mac_id, 75 struct dp_srng *dp_rxdma_srng, 76 struct rx_desc_pool *rx_desc_pool, 77 uint32_t num_req_buffers, 78 union dp_rx_desc_list_elem_t **desc_list, 79 union dp_rx_desc_list_elem_t **tail) 80 { 81 uint32_t num_alloc_desc; 82 uint16_t num_desc_to_free = 0; 83 struct dp_pdev *dp_pdev = dp_get_pdev_for_mac_id(dp_soc, mac_id); 84 uint32_t num_entries_avail; 85 uint32_t count; 86 int sync_hw_ptr = 1; 87 qdf_dma_addr_t paddr; 88 qdf_nbuf_t rx_netbuf; 89 void *rxdma_ring_entry; 90 union dp_rx_desc_list_elem_t *next; 91 QDF_STATUS ret; 92 93 void *rxdma_srng; 94 95 rxdma_srng = dp_rxdma_srng->hal_srng; 96 97 if (!rxdma_srng) { 98 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 99 "rxdma srng not initialized"); 100 DP_STATS_INC(dp_pdev, replenish.rxdma_err, num_req_buffers); 101 return QDF_STATUS_E_FAILURE; 102 } 103 104 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 105 "requested %d buffers for replenish", num_req_buffers); 106 107 hal_srng_access_start(dp_soc->hal_soc, rxdma_srng); 108 num_entries_avail = hal_srng_src_num_avail(dp_soc->hal_soc, 109 rxdma_srng, 110 sync_hw_ptr); 111 112 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 113 "no of available entries in rxdma ring: %d", 114 num_entries_avail); 115 116 if (!(*desc_list) && (num_entries_avail > 117 ((dp_rxdma_srng->num_entries * 3) / 4))) { 118 num_req_buffers = num_entries_avail; 119 } else if (num_entries_avail < num_req_buffers) { 120 num_desc_to_free = num_req_buffers - num_entries_avail; 121 num_req_buffers = num_entries_avail; 122 } 123 124 if (qdf_unlikely(!num_req_buffers)) { 125 num_desc_to_free = num_req_buffers; 126 hal_srng_access_end(dp_soc->hal_soc, rxdma_srng); 127 goto free_descs; 128 } 129 130 /* 131 * if desc_list is NULL, allocate the descs from freelist 132 */ 133 if (!(*desc_list)) { 134 num_alloc_desc = dp_rx_get_free_desc_list(dp_soc, mac_id, 135 rx_desc_pool, 136 num_req_buffers, 137 desc_list, 138 tail); 139 140 if (!num_alloc_desc) { 141 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 142 "no free rx_descs in freelist"); 143 DP_STATS_INC(dp_pdev, err.desc_alloc_fail, 144 num_req_buffers); 145 hal_srng_access_end(dp_soc->hal_soc, rxdma_srng); 146 return QDF_STATUS_E_NOMEM; 147 } 148 149 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 150 "%d rx desc allocated", num_alloc_desc); 151 num_req_buffers = num_alloc_desc; 152 } 153 154 155 count = 0; 156 157 while (count < num_req_buffers) { 158 rx_netbuf = qdf_nbuf_alloc(dp_soc->osdev, 159 RX_BUFFER_SIZE, 160 RX_BUFFER_RESERVATION, 161 RX_BUFFER_ALIGNMENT, 162 FALSE); 163 164 if (rx_netbuf == NULL) { 165 DP_STATS_INC(dp_pdev, replenish.nbuf_alloc_fail, 1); 166 continue; 167 } 168 169 ret = qdf_nbuf_map_single(dp_soc->osdev, rx_netbuf, 170 QDF_DMA_BIDIRECTIONAL); 171 if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) { 172 qdf_nbuf_free(rx_netbuf); 173 DP_STATS_INC(dp_pdev, replenish.map_err, 1); 174 continue; 175 } 176 177 paddr = qdf_nbuf_get_frag_paddr(rx_netbuf, 0); 178 179 /* 180 * check if the physical address of nbuf->data is 181 * less then 0x50000000 then free the nbuf and try 182 * allocating new nbuf. We can try for 100 times. 183 * this is a temp WAR till we fix it properly. 184 */ 185 ret = check_x86_paddr(dp_soc, &rx_netbuf, &paddr, dp_pdev); 186 if (ret == QDF_STATUS_E_FAILURE) { 187 DP_STATS_INC(dp_pdev, replenish.x86_fail, 1); 188 break; 189 } 190 191 count++; 192 193 rxdma_ring_entry = hal_srng_src_get_next(dp_soc->hal_soc, 194 rxdma_srng); 195 qdf_assert_always(rxdma_ring_entry); 196 197 next = (*desc_list)->next; 198 199 dp_rx_desc_prep(&((*desc_list)->rx_desc), rx_netbuf); 200 (*desc_list)->rx_desc.in_use = 1; 201 202 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 203 "rx_netbuf=%pK, buf=%pK, paddr=0x%llx, cookie=%d", 204 rx_netbuf, qdf_nbuf_data(rx_netbuf), 205 (unsigned long long)paddr, (*desc_list)->rx_desc.cookie); 206 207 hal_rxdma_buff_addr_info_set(rxdma_ring_entry, paddr, 208 (*desc_list)->rx_desc.cookie, 209 rx_desc_pool->owner); 210 211 *desc_list = next; 212 } 213 214 hal_srng_access_end(dp_soc->hal_soc, rxdma_srng); 215 216 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 217 "successfully replenished %d buffers", num_req_buffers); 218 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 219 "%d rx desc added back to free list", num_desc_to_free); 220 221 DP_STATS_INC_PKT(dp_pdev, replenish.pkts, num_req_buffers, 222 (RX_BUFFER_SIZE * num_req_buffers)); 223 224 free_descs: 225 DP_STATS_INC(dp_pdev, buf_freelist, num_desc_to_free); 226 /* 227 * add any available free desc back to the free list 228 */ 229 if (*desc_list) 230 dp_rx_add_desc_list_to_free_list(dp_soc, desc_list, tail, 231 mac_id, rx_desc_pool); 232 233 return QDF_STATUS_SUCCESS; 234 } 235 236 /* 237 * dp_rx_deliver_raw() - process RAW mode pkts and hand over the 238 * pkts to RAW mode simulation to 239 * decapsulate the pkt. 240 * 241 * @vdev: vdev on which RAW mode is enabled 242 * @nbuf_list: list of RAW pkts to process 243 * @peer: peer object from which the pkt is rx 244 * 245 * Return: void 246 */ 247 void 248 dp_rx_deliver_raw(struct dp_vdev *vdev, qdf_nbuf_t nbuf_list, 249 struct dp_peer *peer) 250 { 251 qdf_nbuf_t deliver_list_head = NULL; 252 qdf_nbuf_t deliver_list_tail = NULL; 253 qdf_nbuf_t nbuf; 254 255 nbuf = nbuf_list; 256 while (nbuf) { 257 qdf_nbuf_t next = qdf_nbuf_next(nbuf); 258 259 DP_RX_LIST_APPEND(deliver_list_head, deliver_list_tail, nbuf); 260 261 DP_STATS_INC(vdev->pdev, rx_raw_pkts, 1); 262 DP_STATS_INC_PKT(peer, rx.raw, 1, qdf_nbuf_len(nbuf)); 263 /* 264 * reset the chfrag_start and chfrag_end bits in nbuf cb 265 * as this is a non-amsdu pkt and RAW mode simulation expects 266 * these bit s to be 0 for non-amsdu pkt. 267 */ 268 if (qdf_nbuf_is_rx_chfrag_start(nbuf) && 269 qdf_nbuf_is_rx_chfrag_end(nbuf)) { 270 qdf_nbuf_set_rx_chfrag_start(nbuf, 0); 271 qdf_nbuf_set_rx_chfrag_end(nbuf, 0); 272 } 273 274 nbuf = next; 275 } 276 277 vdev->osif_rsim_rx_decap(vdev->osif_vdev, &deliver_list_head, 278 &deliver_list_tail, (struct cdp_peer*) peer); 279 280 vdev->osif_rx(vdev->osif_vdev, deliver_list_head); 281 } 282 283 284 #ifdef DP_LFR 285 /* 286 * In case of LFR, data of a new peer might be sent up 287 * even before peer is added. 288 */ 289 static inline struct dp_vdev * 290 dp_get_vdev_from_peer(struct dp_soc *soc, 291 uint16_t peer_id, 292 struct dp_peer *peer, 293 struct hal_rx_mpdu_desc_info mpdu_desc_info) 294 { 295 struct dp_vdev *vdev; 296 uint8_t vdev_id; 297 298 if (unlikely(!peer)) { 299 if (peer_id != HTT_INVALID_PEER) { 300 vdev_id = DP_PEER_METADATA_ID_GET( 301 mpdu_desc_info.peer_meta_data); 302 QDF_TRACE(QDF_MODULE_ID_DP, 303 QDF_TRACE_LEVEL_DEBUG, 304 FL("PeerID %d not found use vdevID %d"), 305 peer_id, vdev_id); 306 vdev = dp_get_vdev_from_soc_vdev_id_wifi3(soc, 307 vdev_id); 308 } else { 309 QDF_TRACE(QDF_MODULE_ID_DP, 310 QDF_TRACE_LEVEL_DEBUG, 311 FL("Invalid PeerID %d"), 312 peer_id); 313 return NULL; 314 } 315 } else { 316 vdev = peer->vdev; 317 } 318 return vdev; 319 } 320 #else 321 static inline struct dp_vdev * 322 dp_get_vdev_from_peer(struct dp_soc *soc, 323 uint16_t peer_id, 324 struct dp_peer *peer, 325 struct hal_rx_mpdu_desc_info mpdu_desc_info) 326 { 327 if (unlikely(!peer)) { 328 QDF_TRACE(QDF_MODULE_ID_DP, 329 QDF_TRACE_LEVEL_DEBUG, 330 FL("Peer not found for peerID %d"), 331 peer_id); 332 return NULL; 333 } else { 334 return peer->vdev; 335 } 336 } 337 #endif 338 339 /** 340 * dp_rx_da_learn() - Add AST entry based on DA lookup 341 * This is a WAR for HK 1.0 and will 342 * be removed in HK 2.0 343 * 344 * @soc: core txrx main context 345 * @rx_tlv_hdr : start address of rx tlvs 346 * @sa_peer : source peer entry 347 * @nbuf : nbuf to retrieve destination mac for which AST will be added 348 * 349 */ 350 #ifdef FEATURE_WDS 351 static void 352 dp_rx_da_learn(struct dp_soc *soc, 353 uint8_t *rx_tlv_hdr, 354 struct dp_peer *ta_peer, 355 qdf_nbuf_t nbuf) 356 { 357 /* For HKv2 DA port learing is not needed */ 358 if (qdf_likely(soc->ast_override_support)) 359 return; 360 361 if (ta_peer && (ta_peer->vdev->opmode != wlan_op_mode_ap)) 362 return; 363 364 if (qdf_unlikely(!hal_rx_msdu_end_da_is_valid_get(rx_tlv_hdr) && 365 !hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr))) { 366 dp_peer_add_ast(soc, 367 ta_peer, 368 qdf_nbuf_data(nbuf), 369 CDP_TXRX_AST_TYPE_DA, 370 IEEE80211_NODE_F_WDS_HM); 371 } 372 } 373 #else 374 static void 375 dp_rx_da_learn(struct dp_soc *soc, 376 uint8_t *rx_tlv_hdr, 377 struct dp_peer *ta_peer, 378 qdf_nbuf_t nbuf) 379 { 380 } 381 #endif 382 383 /** 384 * dp_rx_intrabss_fwd() - Implements the Intra-BSS forwarding logic 385 * 386 * @soc: core txrx main context 387 * @sa_peer : source peer entry 388 * @rx_tlv_hdr : start address of rx tlvs 389 * @nbuf : nbuf that has to be intrabss forwarded 390 * 391 * Return: bool: true if it is forwarded else false 392 */ 393 static bool 394 dp_rx_intrabss_fwd(struct dp_soc *soc, 395 struct dp_peer *sa_peer, 396 uint8_t *rx_tlv_hdr, 397 qdf_nbuf_t nbuf) 398 { 399 uint16_t da_idx; 400 uint16_t len; 401 struct dp_peer *da_peer; 402 struct dp_ast_entry *ast_entry; 403 qdf_nbuf_t nbuf_copy; 404 405 /* check if the destination peer is available in peer table 406 * and also check if the source peer and destination peer 407 * belong to the same vap and destination peer is not bss peer. 408 */ 409 410 if ((hal_rx_msdu_end_da_is_valid_get(rx_tlv_hdr) && 411 !hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr))) { 412 da_idx = hal_rx_msdu_end_da_idx_get(soc->hal_soc, rx_tlv_hdr); 413 414 ast_entry = soc->ast_table[da_idx]; 415 if (!ast_entry) 416 return false; 417 418 if (ast_entry->type == CDP_TXRX_AST_TYPE_DA) { 419 ast_entry->is_active = TRUE; 420 return false; 421 } 422 423 da_peer = ast_entry->peer; 424 425 if (!da_peer) 426 return false; 427 428 if (da_peer->vdev == sa_peer->vdev && !da_peer->bss_peer) { 429 memset(nbuf->cb, 0x0, sizeof(nbuf->cb)); 430 len = qdf_nbuf_len(nbuf); 431 432 /* linearize the nbuf just before we send to 433 * dp_tx_send() 434 */ 435 if (qdf_unlikely(qdf_nbuf_get_ext_list(nbuf))) { 436 if (qdf_nbuf_linearize(nbuf) == -ENOMEM) 437 return false; 438 439 nbuf = qdf_nbuf_unshare(nbuf); 440 if (!nbuf) { 441 DP_STATS_INC_PKT(sa_peer, 442 rx.intra_bss.fail, 443 1, 444 len); 445 /* return true even though the pkt is 446 * not forwarded. Basically skb_unshare 447 * failed and we want to continue with 448 * next nbuf. 449 */ 450 return true; 451 } 452 } 453 454 if (!dp_tx_send(sa_peer->vdev, nbuf)) { 455 DP_STATS_INC_PKT(sa_peer, rx.intra_bss.pkts, 456 1, len); 457 return true; 458 } else { 459 DP_STATS_INC_PKT(sa_peer, rx.intra_bss.fail, 1, 460 len); 461 return false; 462 } 463 } 464 } 465 /* if it is a broadcast pkt (eg: ARP) and it is not its own 466 * source, then clone the pkt and send the cloned pkt for 467 * intra BSS forwarding and original pkt up the network stack 468 * Note: how do we handle multicast pkts. do we forward 469 * all multicast pkts as is or let a higher layer module 470 * like igmpsnoop decide whether to forward or not with 471 * Mcast enhancement. 472 */ 473 else if (qdf_unlikely((hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr) && 474 !sa_peer->bss_peer))) { 475 nbuf_copy = qdf_nbuf_copy(nbuf); 476 if (!nbuf_copy) 477 return false; 478 memset(nbuf_copy->cb, 0x0, sizeof(nbuf_copy->cb)); 479 len = qdf_nbuf_len(nbuf_copy); 480 481 if (dp_tx_send(sa_peer->vdev, nbuf_copy)) { 482 DP_STATS_INC_PKT(sa_peer, rx.intra_bss.fail, 1, len); 483 qdf_nbuf_free(nbuf_copy); 484 } else 485 DP_STATS_INC_PKT(sa_peer, rx.intra_bss.pkts, 1, len); 486 } 487 /* return false as we have to still send the original pkt 488 * up the stack 489 */ 490 return false; 491 } 492 493 #ifdef MESH_MODE_SUPPORT 494 495 /** 496 * dp_rx_fill_mesh_stats() - Fills the mesh per packet receive stats 497 * 498 * @vdev: DP Virtual device handle 499 * @nbuf: Buffer pointer 500 * @rx_tlv_hdr: start of rx tlv header 501 * @peer: pointer to peer 502 * 503 * This function allocated memory for mesh receive stats and fill the 504 * required stats. Stores the memory address in skb cb. 505 * 506 * Return: void 507 */ 508 509 void dp_rx_fill_mesh_stats(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 510 uint8_t *rx_tlv_hdr, struct dp_peer *peer) 511 { 512 struct mesh_recv_hdr_s *rx_info = NULL; 513 uint32_t pkt_type; 514 uint32_t nss; 515 uint32_t rate_mcs; 516 uint32_t bw; 517 518 /* fill recv mesh stats */ 519 rx_info = qdf_mem_malloc(sizeof(struct mesh_recv_hdr_s)); 520 521 /* upper layers are resposible to free this memory */ 522 523 if (rx_info == NULL) { 524 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 525 "Memory allocation failed for mesh rx stats"); 526 DP_STATS_INC(vdev->pdev, mesh_mem_alloc, 1); 527 return; 528 } 529 530 rx_info->rs_flags = MESH_RXHDR_VER1; 531 if (qdf_nbuf_is_rx_chfrag_start(nbuf)) 532 rx_info->rs_flags |= MESH_RX_FIRST_MSDU; 533 534 if (qdf_nbuf_is_rx_chfrag_end(nbuf)) 535 rx_info->rs_flags |= MESH_RX_LAST_MSDU; 536 537 if (hal_rx_attn_msdu_get_is_decrypted(rx_tlv_hdr)) { 538 rx_info->rs_flags |= MESH_RX_DECRYPTED; 539 rx_info->rs_keyix = hal_rx_msdu_get_keyid(rx_tlv_hdr); 540 if (vdev->osif_get_key) 541 vdev->osif_get_key(vdev->osif_vdev, 542 &rx_info->rs_decryptkey[0], 543 &peer->mac_addr.raw[0], 544 rx_info->rs_keyix); 545 } 546 547 rx_info->rs_rssi = hal_rx_msdu_start_get_rssi(rx_tlv_hdr); 548 rx_info->rs_channel = hal_rx_msdu_start_get_freq(rx_tlv_hdr); 549 pkt_type = hal_rx_msdu_start_get_pkt_type(rx_tlv_hdr); 550 rate_mcs = hal_rx_msdu_start_rate_mcs_get(rx_tlv_hdr); 551 bw = hal_rx_msdu_start_bw_get(rx_tlv_hdr); 552 nss = hal_rx_msdu_start_nss_get(vdev->pdev->soc->hal_soc, rx_tlv_hdr); 553 rx_info->rs_ratephy1 = rate_mcs | (nss << 0x8) | (pkt_type << 16) | 554 (bw << 24); 555 556 qdf_nbuf_set_rx_fctx_type(nbuf, (void *)rx_info, CB_FTYPE_MESH_RX_INFO); 557 558 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_MED, 559 FL("Mesh rx stats: flags %x, rssi %x, chn %x, rate %x, kix %x"), 560 rx_info->rs_flags, 561 rx_info->rs_rssi, 562 rx_info->rs_channel, 563 rx_info->rs_ratephy1, 564 rx_info->rs_keyix); 565 566 } 567 568 /** 569 * dp_rx_filter_mesh_packets() - Filters mesh unwanted packets 570 * 571 * @vdev: DP Virtual device handle 572 * @nbuf: Buffer pointer 573 * @rx_tlv_hdr: start of rx tlv header 574 * 575 * This checks if the received packet is matching any filter out 576 * catogery and and drop the packet if it matches. 577 * 578 * Return: status(0 indicates drop, 1 indicate to no drop) 579 */ 580 581 QDF_STATUS dp_rx_filter_mesh_packets(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 582 uint8_t *rx_tlv_hdr) 583 { 584 union dp_align_mac_addr mac_addr; 585 586 if (qdf_unlikely(vdev->mesh_rx_filter)) { 587 if (vdev->mesh_rx_filter & MESH_FILTER_OUT_FROMDS) 588 if (hal_rx_mpdu_get_fr_ds(rx_tlv_hdr)) 589 return QDF_STATUS_SUCCESS; 590 591 if (vdev->mesh_rx_filter & MESH_FILTER_OUT_TODS) 592 if (hal_rx_mpdu_get_to_ds(rx_tlv_hdr)) 593 return QDF_STATUS_SUCCESS; 594 595 if (vdev->mesh_rx_filter & MESH_FILTER_OUT_NODS) 596 if (!hal_rx_mpdu_get_fr_ds(rx_tlv_hdr) 597 && !hal_rx_mpdu_get_to_ds(rx_tlv_hdr)) 598 return QDF_STATUS_SUCCESS; 599 600 if (vdev->mesh_rx_filter & MESH_FILTER_OUT_RA) { 601 if (hal_rx_mpdu_get_addr1(rx_tlv_hdr, 602 &mac_addr.raw[0])) 603 return QDF_STATUS_E_FAILURE; 604 605 if (!qdf_mem_cmp(&mac_addr.raw[0], 606 &vdev->mac_addr.raw[0], 607 DP_MAC_ADDR_LEN)) 608 return QDF_STATUS_SUCCESS; 609 } 610 611 if (vdev->mesh_rx_filter & MESH_FILTER_OUT_TA) { 612 if (hal_rx_mpdu_get_addr2(rx_tlv_hdr, 613 &mac_addr.raw[0])) 614 return QDF_STATUS_E_FAILURE; 615 616 if (!qdf_mem_cmp(&mac_addr.raw[0], 617 &vdev->mac_addr.raw[0], 618 DP_MAC_ADDR_LEN)) 619 return QDF_STATUS_SUCCESS; 620 } 621 } 622 623 return QDF_STATUS_E_FAILURE; 624 } 625 626 #else 627 void dp_rx_fill_mesh_stats(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 628 uint8_t *rx_tlv_hdr, struct dp_peer *peer) 629 { 630 } 631 632 QDF_STATUS dp_rx_filter_mesh_packets(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 633 uint8_t *rx_tlv_hdr) 634 { 635 return QDF_STATUS_E_FAILURE; 636 } 637 638 #endif 639 640 #ifdef CONFIG_WIN 641 /** 642 * dp_rx_nac_filter(): Function to perform filtering of non-associated 643 * clients 644 * @pdev: DP pdev handle 645 * @rx_pkt_hdr: Rx packet Header 646 * 647 * return: dp_vdev* 648 */ 649 static 650 struct dp_vdev *dp_rx_nac_filter(struct dp_pdev *pdev, 651 uint8_t *rx_pkt_hdr) 652 { 653 struct ieee80211_frame *wh; 654 struct dp_neighbour_peer *peer = NULL; 655 656 wh = (struct ieee80211_frame *)rx_pkt_hdr; 657 658 if ((wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) != IEEE80211_FC1_DIR_TODS) 659 return NULL; 660 661 qdf_spin_lock_bh(&pdev->neighbour_peer_mutex); 662 TAILQ_FOREACH(peer, &pdev->neighbour_peers_list, 663 neighbour_peer_list_elem) { 664 if (qdf_mem_cmp(&peer->neighbour_peers_macaddr.raw[0], 665 wh->i_addr2, DP_MAC_ADDR_LEN) == 0) { 666 QDF_TRACE( 667 QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 668 FL("NAC configuration matched for mac-%2x:%2x:%2x:%2x:%2x:%2x"), 669 peer->neighbour_peers_macaddr.raw[0], 670 peer->neighbour_peers_macaddr.raw[1], 671 peer->neighbour_peers_macaddr.raw[2], 672 peer->neighbour_peers_macaddr.raw[3], 673 peer->neighbour_peers_macaddr.raw[4], 674 peer->neighbour_peers_macaddr.raw[5]); 675 676 qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex); 677 678 return pdev->monitor_vdev; 679 } 680 } 681 qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex); 682 683 return NULL; 684 } 685 686 /** 687 * dp_rx_process_invalid_peer(): Function to pass invalid peer list to umac 688 * @soc: DP SOC handle 689 * @mpdu: mpdu for which peer is invalid 690 * 691 * return: integer type 692 */ 693 uint8_t dp_rx_process_invalid_peer(struct dp_soc *soc, qdf_nbuf_t mpdu) 694 { 695 struct dp_invalid_peer_msg msg; 696 struct dp_vdev *vdev = NULL; 697 struct dp_pdev *pdev = NULL; 698 struct ieee80211_frame *wh; 699 uint8_t i; 700 qdf_nbuf_t curr_nbuf, next_nbuf; 701 uint8_t *rx_tlv_hdr = qdf_nbuf_data(mpdu); 702 uint8_t *rx_pkt_hdr = hal_rx_pkt_hdr_get(rx_tlv_hdr); 703 704 wh = (struct ieee80211_frame *)rx_pkt_hdr; 705 706 if (!DP_FRAME_IS_DATA(wh)) { 707 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 708 "NAWDS valid only for data frames"); 709 goto free; 710 } 711 712 if (qdf_nbuf_len(mpdu) < sizeof(struct ieee80211_frame)) { 713 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 714 "Invalid nbuf length"); 715 goto free; 716 } 717 718 719 for (i = 0; i < MAX_PDEV_CNT; i++) { 720 pdev = soc->pdev_list[i]; 721 if (!pdev) { 722 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 723 "PDEV not found"); 724 continue; 725 } 726 727 if (pdev->filter_neighbour_peers) { 728 /* Next Hop scenario not yet handle */ 729 vdev = dp_rx_nac_filter(pdev, rx_pkt_hdr); 730 if (vdev) { 731 dp_rx_mon_deliver(soc, i, 732 pdev->invalid_peer_head_msdu, 733 pdev->invalid_peer_tail_msdu); 734 735 pdev->invalid_peer_head_msdu = NULL; 736 pdev->invalid_peer_tail_msdu = NULL; 737 738 return 0; 739 } 740 } 741 742 743 TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) { 744 745 if (qdf_mem_cmp(wh->i_addr1, vdev->mac_addr.raw, 746 DP_MAC_ADDR_LEN) == 0) { 747 goto out; 748 } 749 } 750 } 751 752 if (!vdev) { 753 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 754 "VDEV not found"); 755 goto free; 756 } 757 758 out: 759 msg.wh = wh; 760 qdf_nbuf_pull_head(mpdu, RX_PKT_TLVS_LEN); 761 msg.nbuf = mpdu; 762 msg.vdev_id = vdev->vdev_id; 763 if (pdev->soc->cdp_soc.ol_ops->rx_invalid_peer) 764 pdev->soc->cdp_soc.ol_ops->rx_invalid_peer(pdev->ctrl_pdev, 765 &msg); 766 767 free: 768 /* Drop and free packet */ 769 curr_nbuf = mpdu; 770 while (curr_nbuf) { 771 next_nbuf = qdf_nbuf_next(curr_nbuf); 772 qdf_nbuf_free(curr_nbuf); 773 curr_nbuf = next_nbuf; 774 } 775 776 return 0; 777 } 778 779 /** 780 * dp_rx_process_invalid_peer_wrapper(): Function to wrap invalid peer handler 781 * @soc: DP SOC handle 782 * @mpdu: mpdu for which peer is invalid 783 * @mpdu_done: if an mpdu is completed 784 * 785 * return: integer type 786 */ 787 void dp_rx_process_invalid_peer_wrapper(struct dp_soc *soc, 788 qdf_nbuf_t mpdu, bool mpdu_done) 789 { 790 /* Only trigger the process when mpdu is completed */ 791 if (mpdu_done) 792 dp_rx_process_invalid_peer(soc, mpdu); 793 } 794 #else 795 uint8_t dp_rx_process_invalid_peer(struct dp_soc *soc, qdf_nbuf_t mpdu) 796 { 797 qdf_nbuf_t curr_nbuf, next_nbuf; 798 struct dp_pdev *pdev; 799 uint8_t i; 800 struct dp_vdev *vdev = NULL; 801 struct ieee80211_frame *wh; 802 uint8_t *rx_tlv_hdr = qdf_nbuf_data(mpdu); 803 uint8_t *rx_pkt_hdr = hal_rx_pkt_hdr_get(rx_tlv_hdr); 804 805 wh = (struct ieee80211_frame *)rx_pkt_hdr; 806 807 if (!DP_FRAME_IS_DATA(wh)) { 808 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 809 "only for data frames"); 810 goto free; 811 } 812 813 if (qdf_nbuf_len(mpdu) < sizeof(struct ieee80211_frame)) { 814 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 815 "Invalid nbuf length"); 816 goto free; 817 } 818 /* reset the head and tail pointers */ 819 for (i = 0; i < MAX_PDEV_CNT; i++) { 820 pdev = soc->pdev_list[i]; 821 if (!pdev) { 822 QDF_TRACE(QDF_MODULE_ID_DP, 823 QDF_TRACE_LEVEL_ERROR, 824 "PDEV not found"); 825 continue; 826 } 827 828 pdev->invalid_peer_head_msdu = NULL; 829 pdev->invalid_peer_tail_msdu = NULL; 830 831 qdf_spin_lock_bh(&pdev->vdev_list_lock); 832 DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) { 833 if (qdf_mem_cmp(wh->i_addr1, vdev->mac_addr.raw, 834 DP_MAC_ADDR_LEN) == 0) { 835 qdf_spin_unlock_bh(&pdev->vdev_list_lock); 836 goto out; 837 } 838 } 839 qdf_spin_unlock_bh(&pdev->vdev_list_lock); 840 } 841 842 if (NULL == vdev) { 843 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 844 "VDEV not found"); 845 goto free; 846 } 847 848 out: 849 if (soc->cdp_soc.ol_ops->rx_invalid_peer) 850 soc->cdp_soc.ol_ops->rx_invalid_peer(vdev->vdev_id, wh); 851 free: 852 /* Drop and free packet */ 853 curr_nbuf = mpdu; 854 while (curr_nbuf) { 855 next_nbuf = qdf_nbuf_next(curr_nbuf); 856 DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1, 857 qdf_nbuf_len(curr_nbuf)); 858 qdf_nbuf_free(curr_nbuf); 859 curr_nbuf = next_nbuf; 860 } 861 862 return 0; 863 } 864 865 void dp_rx_process_invalid_peer_wrapper(struct dp_soc *soc, 866 qdf_nbuf_t mpdu, bool mpdu_done) 867 { 868 /* Process the nbuf */ 869 dp_rx_process_invalid_peer(soc, mpdu); 870 } 871 #endif 872 873 #if defined(FEATURE_LRO) 874 static void dp_rx_print_lro_info(uint8_t *rx_tlv) 875 { 876 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 877 FL("----------------------RX DESC LRO----------------------")); 878 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 879 FL("lro_eligible 0x%x"), HAL_RX_TLV_GET_LRO_ELIGIBLE(rx_tlv)); 880 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 881 FL("pure_ack 0x%x"), HAL_RX_TLV_GET_TCP_PURE_ACK(rx_tlv)); 882 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 883 FL("chksum 0x%x"), HAL_RX_TLV_GET_TCP_CHKSUM(rx_tlv)); 884 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 885 FL("TCP seq num 0x%x"), HAL_RX_TLV_GET_TCP_SEQ(rx_tlv)); 886 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 887 FL("TCP ack num 0x%x"), HAL_RX_TLV_GET_TCP_ACK(rx_tlv)); 888 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 889 FL("TCP window 0x%x"), HAL_RX_TLV_GET_TCP_WIN(rx_tlv)); 890 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 891 FL("TCP protocol 0x%x"), HAL_RX_TLV_GET_TCP_PROTO(rx_tlv)); 892 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 893 FL("TCP offset 0x%x"), HAL_RX_TLV_GET_TCP_OFFSET(rx_tlv)); 894 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 895 FL("toeplitz 0x%x"), HAL_RX_TLV_GET_FLOW_ID_TOEPLITZ(rx_tlv)); 896 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 897 FL("---------------------------------------------------------")); 898 } 899 900 /** 901 * dp_rx_lro() - LRO related processing 902 * @rx_tlv: TLV data extracted from the rx packet 903 * @peer: destination peer of the msdu 904 * @msdu: network buffer 905 * @ctx: LRO context 906 * 907 * This function performs the LRO related processing of the msdu 908 * 909 * Return: true: LRO enabled false: LRO is not enabled 910 */ 911 static void dp_rx_lro(uint8_t *rx_tlv, struct dp_peer *peer, 912 qdf_nbuf_t msdu, qdf_lro_ctx_t ctx) 913 { 914 if (!peer || !peer->vdev || !peer->vdev->lro_enable) { 915 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG, 916 FL("no peer, no vdev or LRO disabled")); 917 QDF_NBUF_CB_RX_LRO_ELIGIBLE(msdu) = 0; 918 return; 919 } 920 qdf_assert(rx_tlv); 921 dp_rx_print_lro_info(rx_tlv); 922 923 QDF_NBUF_CB_RX_LRO_ELIGIBLE(msdu) = 924 HAL_RX_TLV_GET_LRO_ELIGIBLE(rx_tlv); 925 926 QDF_NBUF_CB_RX_TCP_PURE_ACK(msdu) = 927 HAL_RX_TLV_GET_TCP_PURE_ACK(rx_tlv); 928 929 QDF_NBUF_CB_RX_TCP_CHKSUM(msdu) = 930 HAL_RX_TLV_GET_TCP_CHKSUM(rx_tlv); 931 QDF_NBUF_CB_RX_TCP_SEQ_NUM(msdu) = 932 HAL_RX_TLV_GET_TCP_SEQ(rx_tlv); 933 QDF_NBUF_CB_RX_TCP_ACK_NUM(msdu) = 934 HAL_RX_TLV_GET_TCP_ACK(rx_tlv); 935 QDF_NBUF_CB_RX_TCP_WIN(msdu) = 936 HAL_RX_TLV_GET_TCP_WIN(rx_tlv); 937 QDF_NBUF_CB_RX_TCP_PROTO(msdu) = 938 HAL_RX_TLV_GET_TCP_PROTO(rx_tlv); 939 QDF_NBUF_CB_RX_IPV6_PROTO(msdu) = 940 HAL_RX_TLV_GET_IPV6(rx_tlv); 941 QDF_NBUF_CB_RX_TCP_OFFSET(msdu) = 942 HAL_RX_TLV_GET_TCP_OFFSET(rx_tlv); 943 QDF_NBUF_CB_RX_FLOW_ID(msdu) = 944 HAL_RX_TLV_GET_FLOW_ID_TOEPLITZ(rx_tlv); 945 QDF_NBUF_CB_RX_LRO_CTX(msdu) = (unsigned char *)ctx; 946 947 } 948 #else 949 static void dp_rx_lro(uint8_t *rx_tlv, struct dp_peer *peer, 950 qdf_nbuf_t msdu, qdf_lro_ctx_t ctx) 951 { 952 } 953 #endif 954 955 /** 956 * dp_rx_adjust_nbuf_len() - set appropriate msdu length in nbuf. 957 * 958 * @nbuf: pointer to msdu. 959 * @mpdu_len: mpdu length 960 * 961 * Return: returns true if nbuf is last msdu of mpdu else retuns false. 962 */ 963 static inline bool dp_rx_adjust_nbuf_len(qdf_nbuf_t nbuf, uint16_t *mpdu_len) 964 { 965 bool last_nbuf; 966 967 if (*mpdu_len >= (RX_BUFFER_SIZE - RX_PKT_TLVS_LEN)) { 968 qdf_nbuf_set_pktlen(nbuf, RX_BUFFER_SIZE); 969 last_nbuf = false; 970 } else { 971 qdf_nbuf_set_pktlen(nbuf, (*mpdu_len + RX_PKT_TLVS_LEN)); 972 last_nbuf = true; 973 } 974 975 *mpdu_len -= (RX_BUFFER_SIZE - RX_PKT_TLVS_LEN); 976 977 return last_nbuf; 978 } 979 980 /** 981 * dp_rx_sg_create() - create a frag_list for MSDUs which are spread across 982 * multiple nbufs. 983 * @nbuf: pointer to the first msdu of an amsdu. 984 * @rx_tlv_hdr: pointer to the start of RX TLV headers. 985 * 986 * 987 * This function implements the creation of RX frag_list for cases 988 * where an MSDU is spread across multiple nbufs. 989 * 990 * Return: returns the head nbuf which contains complete frag_list. 991 */ 992 qdf_nbuf_t dp_rx_sg_create(qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr) 993 { 994 qdf_nbuf_t parent, next, frag_list; 995 uint16_t frag_list_len = 0; 996 uint16_t mpdu_len; 997 bool last_nbuf; 998 999 mpdu_len = hal_rx_msdu_start_msdu_len_get(rx_tlv_hdr); 1000 /* 1001 * this is a case where the complete msdu fits in one single nbuf. 1002 * in this case HW sets both start and end bit and we only need to 1003 * reset these bits for RAW mode simulator to decap the pkt 1004 */ 1005 if (qdf_nbuf_is_rx_chfrag_start(nbuf) && 1006 qdf_nbuf_is_rx_chfrag_end(nbuf)) { 1007 qdf_nbuf_set_pktlen(nbuf, mpdu_len + RX_PKT_TLVS_LEN); 1008 qdf_nbuf_pull_head(nbuf, RX_PKT_TLVS_LEN); 1009 return nbuf; 1010 } 1011 1012 /* 1013 * This is a case where we have multiple msdus (A-MSDU) spread across 1014 * multiple nbufs. here we create a fraglist out of these nbufs. 1015 * 1016 * the moment we encounter a nbuf with continuation bit set we 1017 * know for sure we have an MSDU which is spread across multiple 1018 * nbufs. We loop through and reap nbufs till we reach last nbuf. 1019 */ 1020 parent = nbuf; 1021 frag_list = nbuf->next; 1022 nbuf = nbuf->next; 1023 1024 /* 1025 * set the start bit in the first nbuf we encounter with continuation 1026 * bit set. This has the proper mpdu length set as it is the first 1027 * msdu of the mpdu. this becomes the parent nbuf and the subsequent 1028 * nbufs will form the frag_list of the parent nbuf. 1029 */ 1030 qdf_nbuf_set_rx_chfrag_start(parent, 1); 1031 last_nbuf = dp_rx_adjust_nbuf_len(parent, &mpdu_len); 1032 1033 /* 1034 * this is where we set the length of the fragments which are 1035 * associated to the parent nbuf. We iterate through the frag_list 1036 * till we hit the last_nbuf of the list. 1037 */ 1038 do { 1039 last_nbuf = dp_rx_adjust_nbuf_len(nbuf, &mpdu_len); 1040 qdf_nbuf_pull_head(nbuf, RX_PKT_TLVS_LEN); 1041 frag_list_len += qdf_nbuf_len(nbuf); 1042 1043 if (last_nbuf) { 1044 next = nbuf->next; 1045 nbuf->next = NULL; 1046 break; 1047 } 1048 1049 nbuf = nbuf->next; 1050 } while (!last_nbuf); 1051 1052 qdf_nbuf_set_rx_chfrag_start(nbuf, 0); 1053 qdf_nbuf_append_ext_list(parent, frag_list, frag_list_len); 1054 parent->next = next; 1055 1056 qdf_nbuf_pull_head(parent, RX_PKT_TLVS_LEN); 1057 return parent; 1058 } 1059 1060 static inline void dp_rx_deliver_to_stack(struct dp_vdev *vdev, 1061 struct dp_peer *peer, 1062 qdf_nbuf_t nbuf_head, 1063 qdf_nbuf_t nbuf_tail) 1064 { 1065 /* 1066 * highly unlikely to have a vdev without a registered rx 1067 * callback function. if so let us free the nbuf_list. 1068 */ 1069 if (qdf_unlikely(!vdev->osif_rx)) { 1070 qdf_nbuf_t nbuf; 1071 do { 1072 nbuf = nbuf_head; 1073 nbuf_head = nbuf_head->next; 1074 qdf_nbuf_free(nbuf); 1075 } while (nbuf_head); 1076 1077 return; 1078 } 1079 1080 if (qdf_unlikely(vdev->rx_decap_type == htt_cmn_pkt_type_raw) || 1081 (vdev->rx_decap_type == htt_cmn_pkt_type_native_wifi)) { 1082 vdev->osif_rsim_rx_decap(vdev->osif_vdev, &nbuf_head, 1083 &nbuf_tail, (struct cdp_peer *) peer); 1084 } 1085 1086 vdev->osif_rx(vdev->osif_vdev, nbuf_head); 1087 1088 } 1089 1090 /** 1091 * dp_rx_cksum_offload() - set the nbuf checksum as defined by hardware. 1092 * @nbuf: pointer to the first msdu of an amsdu. 1093 * @rx_tlv_hdr: pointer to the start of RX TLV headers. 1094 * 1095 * The ipsumed field of the skb is set based on whether HW validated the 1096 * IP/TCP/UDP checksum. 1097 * 1098 * Return: void 1099 */ 1100 static inline void dp_rx_cksum_offload(struct dp_pdev *pdev, 1101 qdf_nbuf_t nbuf, 1102 uint8_t *rx_tlv_hdr) 1103 { 1104 qdf_nbuf_rx_cksum_t cksum = {0}; 1105 bool ip_csum_err = hal_rx_attn_ip_cksum_fail_get(rx_tlv_hdr); 1106 bool tcp_udp_csum_er = hal_rx_attn_tcp_udp_cksum_fail_get(rx_tlv_hdr); 1107 1108 if (qdf_likely(!ip_csum_err && !tcp_udp_csum_er)) { 1109 cksum.l4_result = QDF_NBUF_RX_CKSUM_TCP_UDP_UNNECESSARY; 1110 qdf_nbuf_set_rx_cksum(nbuf, &cksum); 1111 } else { 1112 DP_STATS_INCC(pdev, err.ip_csum_err, 1, ip_csum_err); 1113 DP_STATS_INCC(pdev, err.tcp_udp_csum_err, 1, tcp_udp_csum_er); 1114 } 1115 } 1116 1117 /** 1118 * dp_rx_msdu_stats_update() - update per msdu stats. 1119 * @soc: core txrx main context 1120 * @nbuf: pointer to the first msdu of an amsdu. 1121 * @rx_tlv_hdr: pointer to the start of RX TLV headers. 1122 * @peer: pointer to the peer object. 1123 * @ring_id: reo dest ring number on which pkt is reaped. 1124 * 1125 * update all the per msdu stats for that nbuf. 1126 * Return: void 1127 */ 1128 static void dp_rx_msdu_stats_update(struct dp_soc *soc, 1129 qdf_nbuf_t nbuf, 1130 uint8_t *rx_tlv_hdr, 1131 struct dp_peer *peer, 1132 uint8_t ring_id) 1133 { 1134 bool is_ampdu, is_not_amsdu; 1135 uint16_t peer_id; 1136 uint32_t sgi, mcs, tid, nss, bw, reception_type, pkt_type; 1137 struct dp_vdev *vdev = peer->vdev; 1138 struct ether_header *eh; 1139 uint16_t msdu_len = qdf_nbuf_len(nbuf); 1140 1141 peer_id = DP_PEER_METADATA_PEER_ID_GET( 1142 hal_rx_mpdu_peer_meta_data_get(rx_tlv_hdr)); 1143 1144 is_not_amsdu = qdf_nbuf_is_rx_chfrag_start(nbuf) & 1145 qdf_nbuf_is_rx_chfrag_end(nbuf); 1146 1147 DP_STATS_INC_PKT(peer, rx.rcvd_reo[ring_id], 1, msdu_len); 1148 DP_STATS_INCC(peer, rx.non_amsdu_cnt, 1, is_not_amsdu); 1149 DP_STATS_INCC(peer, rx.amsdu_cnt, 1, !is_not_amsdu); 1150 1151 if (qdf_unlikely(hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr) && 1152 (vdev->rx_decap_type == htt_cmn_pkt_type_ethernet))) { 1153 eh = (struct ether_header *)qdf_nbuf_data(nbuf); 1154 DP_STATS_INC_PKT(peer, rx.multicast, 1, msdu_len); 1155 if (IEEE80211_IS_BROADCAST(eh->ether_dhost)) { 1156 DP_STATS_INC_PKT(peer, rx.bcast, 1, msdu_len); 1157 1158 } 1159 } 1160 1161 /* 1162 * currently we can return from here as we have similar stats 1163 * updated at per ppdu level instead of msdu level 1164 */ 1165 if (!soc->process_rx_status) 1166 return; 1167 1168 is_ampdu = hal_rx_mpdu_info_ampdu_flag_get(rx_tlv_hdr); 1169 DP_STATS_INCC(peer, rx.ampdu_cnt, 1, is_ampdu); 1170 DP_STATS_INCC(peer, rx.non_ampdu_cnt, 1, !(is_ampdu)); 1171 1172 sgi = hal_rx_msdu_start_sgi_get(rx_tlv_hdr); 1173 mcs = hal_rx_msdu_start_rate_mcs_get(rx_tlv_hdr); 1174 tid = hal_rx_mpdu_start_tid_get(soc->hal_soc, rx_tlv_hdr); 1175 bw = hal_rx_msdu_start_bw_get(rx_tlv_hdr); 1176 reception_type = hal_rx_msdu_start_reception_type_get(soc->hal_soc, 1177 rx_tlv_hdr); 1178 nss = hal_rx_msdu_start_nss_get(soc->hal_soc, rx_tlv_hdr); 1179 pkt_type = hal_rx_msdu_start_get_pkt_type(rx_tlv_hdr); 1180 1181 /* Save tid to skb->priority */ 1182 DP_RX_TID_SAVE(nbuf, tid); 1183 1184 DP_STATS_INC(peer, rx.bw[bw], 1); 1185 DP_STATS_INC(peer, rx.nss[nss], 1); 1186 DP_STATS_INC(peer, rx.sgi_count[sgi], 1); 1187 DP_STATS_INCC(peer, rx.err.mic_err, 1, 1188 hal_rx_mpdu_end_mic_err_get(rx_tlv_hdr)); 1189 DP_STATS_INCC(peer, rx.err.decrypt_err, 1, 1190 hal_rx_mpdu_end_decrypt_err_get(rx_tlv_hdr)); 1191 1192 DP_STATS_INC(peer, rx.wme_ac_type[TID_TO_WME_AC(tid)], 1); 1193 DP_STATS_INC(peer, rx.reception_type[reception_type], 1); 1194 1195 DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1, 1196 ((mcs >= MAX_MCS_11A) && (pkt_type == DOT11_A))); 1197 DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[mcs], 1, 1198 ((mcs <= MAX_MCS_11A) && (pkt_type == DOT11_A))); 1199 DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1, 1200 ((mcs >= MAX_MCS_11B) && (pkt_type == DOT11_B))); 1201 DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[mcs], 1, 1202 ((mcs <= MAX_MCS_11B) && (pkt_type == DOT11_B))); 1203 DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1, 1204 ((mcs >= MAX_MCS_11A) && (pkt_type == DOT11_N))); 1205 DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[mcs], 1, 1206 ((mcs <= MAX_MCS_11A) && (pkt_type == DOT11_N))); 1207 DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1, 1208 ((mcs >= MAX_MCS_11AC) && (pkt_type == DOT11_AC))); 1209 DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[mcs], 1, 1210 ((mcs <= MAX_MCS_11AC) && (pkt_type == DOT11_AC))); 1211 DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1, 1212 ((mcs >= MAX_MCS) && (pkt_type == DOT11_AX))); 1213 DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[mcs], 1, 1214 ((mcs <= MAX_MCS) && (pkt_type == DOT11_AX))); 1215 1216 if ((soc->process_rx_status) && 1217 hal_rx_attn_first_mpdu_get(rx_tlv_hdr)) { 1218 if (soc->cdp_soc.ol_ops->update_dp_stats) { 1219 soc->cdp_soc.ol_ops->update_dp_stats( 1220 vdev->pdev->ctrl_pdev, 1221 &peer->stats, 1222 peer_id, 1223 UPDATE_PEER_STATS); 1224 } 1225 } 1226 } 1227 1228 #ifdef WDS_VENDOR_EXTENSION 1229 int dp_wds_rx_policy_check( 1230 uint8_t *rx_tlv_hdr, 1231 struct dp_vdev *vdev, 1232 struct dp_peer *peer, 1233 int rx_mcast 1234 ) 1235 { 1236 struct dp_peer *bss_peer; 1237 int fr_ds, to_ds, rx_3addr, rx_4addr; 1238 int rx_policy_ucast, rx_policy_mcast; 1239 1240 if (vdev->opmode == wlan_op_mode_ap) { 1241 TAILQ_FOREACH(bss_peer, &vdev->peer_list, peer_list_elem) { 1242 if (bss_peer->bss_peer) { 1243 /* if wds policy check is not enabled on this vdev, accept all frames */ 1244 if (!bss_peer->wds_ecm.wds_rx_filter) { 1245 return 1; 1246 } 1247 break; 1248 } 1249 } 1250 rx_policy_ucast = bss_peer->wds_ecm.wds_rx_ucast_4addr; 1251 rx_policy_mcast = bss_peer->wds_ecm.wds_rx_mcast_4addr; 1252 } else { /* sta mode */ 1253 if (!peer->wds_ecm.wds_rx_filter) { 1254 return 1; 1255 } 1256 rx_policy_ucast = peer->wds_ecm.wds_rx_ucast_4addr; 1257 rx_policy_mcast = peer->wds_ecm.wds_rx_mcast_4addr; 1258 } 1259 1260 /* ------------------------------------------------ 1261 * self 1262 * peer- rx rx- 1263 * wds ucast mcast dir policy accept note 1264 * ------------------------------------------------ 1265 * 1 1 0 11 x1 1 AP configured to accept ds-to-ds Rx ucast from wds peers, constraint met; so, accept 1266 * 1 1 0 01 x1 0 AP configured to accept ds-to-ds Rx ucast from wds peers, constraint not met; so, drop 1267 * 1 1 0 10 x1 0 AP configured to accept ds-to-ds Rx ucast from wds peers, constraint not met; so, drop 1268 * 1 1 0 00 x1 0 bad frame, won't see it 1269 * 1 0 1 11 1x 1 AP configured to accept ds-to-ds Rx mcast from wds peers, constraint met; so, accept 1270 * 1 0 1 01 1x 0 AP configured to accept ds-to-ds Rx mcast from wds peers, constraint not met; so, drop 1271 * 1 0 1 10 1x 0 AP configured to accept ds-to-ds Rx mcast from wds peers, constraint not met; so, drop 1272 * 1 0 1 00 1x 0 bad frame, won't see it 1273 * 1 1 0 11 x0 0 AP configured to accept from-ds Rx ucast from wds peers, constraint not met; so, drop 1274 * 1 1 0 01 x0 0 AP configured to accept from-ds Rx ucast from wds peers, constraint not met; so, drop 1275 * 1 1 0 10 x0 1 AP configured to accept from-ds Rx ucast from wds peers, constraint met; so, accept 1276 * 1 1 0 00 x0 0 bad frame, won't see it 1277 * 1 0 1 11 0x 0 AP configured to accept from-ds Rx mcast from wds peers, constraint not met; so, drop 1278 * 1 0 1 01 0x 0 AP configured to accept from-ds Rx mcast from wds peers, constraint not met; so, drop 1279 * 1 0 1 10 0x 1 AP configured to accept from-ds Rx mcast from wds peers, constraint met; so, accept 1280 * 1 0 1 00 0x 0 bad frame, won't see it 1281 * 1282 * 0 x x 11 xx 0 we only accept td-ds Rx frames from non-wds peers in mode. 1283 * 0 x x 01 xx 1 1284 * 0 x x 10 xx 0 1285 * 0 x x 00 xx 0 bad frame, won't see it 1286 * ------------------------------------------------ 1287 */ 1288 1289 fr_ds = hal_rx_mpdu_get_fr_ds(rx_tlv_hdr); 1290 to_ds = hal_rx_mpdu_get_to_ds(rx_tlv_hdr); 1291 rx_3addr = fr_ds ^ to_ds; 1292 rx_4addr = fr_ds & to_ds; 1293 1294 if (vdev->opmode == wlan_op_mode_ap) { 1295 if ((!peer->wds_enabled && rx_3addr && to_ds) || 1296 (peer->wds_enabled && !rx_mcast && (rx_4addr == rx_policy_ucast)) || 1297 (peer->wds_enabled && rx_mcast && (rx_4addr == rx_policy_mcast))) { 1298 return 1; 1299 } 1300 } else { /* sta mode */ 1301 if ((!rx_mcast && (rx_4addr == rx_policy_ucast)) || 1302 (rx_mcast && (rx_4addr == rx_policy_mcast))) { 1303 return 1; 1304 } 1305 } 1306 return 0; 1307 } 1308 #else 1309 int dp_wds_rx_policy_check( 1310 uint8_t *rx_tlv_hdr, 1311 struct dp_vdev *vdev, 1312 struct dp_peer *peer, 1313 int rx_mcast 1314 ) 1315 { 1316 return 1; 1317 } 1318 #endif 1319 1320 /** 1321 * dp_rx_process() - Brain of the Rx processing functionality 1322 * Called from the bottom half (tasklet/NET_RX_SOFTIRQ) 1323 * @soc: core txrx main context 1324 * @hal_ring: opaque pointer to the HAL Rx Ring, which will be serviced 1325 * @reo_ring_num: ring number (0, 1, 2 or 3) of the reo ring. 1326 * @quota: No. of units (packets) that can be serviced in one shot. 1327 * 1328 * This function implements the core of Rx functionality. This is 1329 * expected to handle only non-error frames. 1330 * 1331 * Return: uint32_t: No. of elements processed 1332 */ 1333 uint32_t dp_rx_process(struct dp_intr *int_ctx, void *hal_ring, 1334 uint8_t reo_ring_num, uint32_t quota) 1335 { 1336 void *hal_soc; 1337 void *ring_desc; 1338 struct dp_rx_desc *rx_desc = NULL; 1339 qdf_nbuf_t nbuf, next; 1340 union dp_rx_desc_list_elem_t *head[MAX_PDEV_CNT] = { NULL }; 1341 union dp_rx_desc_list_elem_t *tail[MAX_PDEV_CNT] = { NULL }; 1342 uint32_t rx_bufs_used = 0, rx_buf_cookie; 1343 uint32_t l2_hdr_offset = 0; 1344 uint16_t msdu_len = 0; 1345 uint16_t peer_id; 1346 struct dp_peer *peer = NULL; 1347 struct dp_vdev *vdev = NULL; 1348 uint32_t pkt_len = 0; 1349 struct hal_rx_mpdu_desc_info mpdu_desc_info = { 0 }; 1350 struct hal_rx_msdu_desc_info msdu_desc_info = { 0 }; 1351 enum hal_reo_error_status error; 1352 uint32_t peer_mdata; 1353 uint8_t *rx_tlv_hdr; 1354 uint32_t rx_bufs_reaped[MAX_PDEV_CNT] = { 0 }; 1355 uint8_t mac_id = 0; 1356 struct dp_pdev *pdev; 1357 struct dp_srng *dp_rxdma_srng; 1358 struct rx_desc_pool *rx_desc_pool; 1359 struct dp_soc *soc = int_ctx->soc; 1360 uint8_t ring_id = 0; 1361 uint8_t core_id = 0; 1362 qdf_nbuf_t nbuf_head = NULL; 1363 qdf_nbuf_t nbuf_tail = NULL; 1364 qdf_nbuf_t deliver_list_head = NULL; 1365 qdf_nbuf_t deliver_list_tail = NULL; 1366 1367 DP_HIST_INIT(); 1368 /* Debug -- Remove later */ 1369 qdf_assert(soc && hal_ring); 1370 1371 hal_soc = soc->hal_soc; 1372 1373 /* Debug -- Remove later */ 1374 qdf_assert(hal_soc); 1375 1376 hif_pm_runtime_mark_last_busy(soc->osdev->dev); 1377 1378 if (qdf_unlikely(hal_srng_access_start(hal_soc, hal_ring))) { 1379 1380 /* 1381 * Need API to convert from hal_ring pointer to 1382 * Ring Type / Ring Id combo 1383 */ 1384 DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1); 1385 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1386 FL("HAL RING Access Failed -- %pK"), hal_ring); 1387 hal_srng_access_end(hal_soc, hal_ring); 1388 goto done; 1389 } 1390 1391 /* 1392 * start reaping the buffers from reo ring and queue 1393 * them in per vdev queue. 1394 * Process the received pkts in a different per vdev loop. 1395 */ 1396 while (qdf_likely(quota)) { 1397 ring_desc = hal_srng_dst_get_next(hal_soc, hal_ring); 1398 1399 /* 1400 * in case HW has updated hp after we cached the hp 1401 * ring_desc can be NULL even there are entries 1402 * available in the ring. Update the cached_hp 1403 * and reap the buffers available to read complete 1404 * mpdu in one reap 1405 * 1406 * This is needed for RAW mode we have to read all 1407 * msdus corresponding to amsdu in one reap to create 1408 * SG list properly but due to mismatch in cached_hp 1409 * and actual hp sometimes we are unable to read 1410 * complete mpdu in one reap. 1411 */ 1412 if (qdf_unlikely(!ring_desc)) { 1413 hal_srng_access_start_unlocked(hal_soc, hal_ring); 1414 ring_desc = hal_srng_dst_get_next(hal_soc, hal_ring); 1415 if (!ring_desc) 1416 break; 1417 DP_STATS_INC(soc, rx.hp_oos, 1); 1418 } 1419 1420 error = HAL_RX_ERROR_STATUS_GET(ring_desc); 1421 ring_id = hal_srng_ring_id_get(hal_ring); 1422 1423 if (qdf_unlikely(error == HAL_REO_ERROR_DETECTED)) { 1424 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1425 FL("HAL RING 0x%pK:error %d"), hal_ring, error); 1426 DP_STATS_INC(soc, rx.err.hal_reo_error[ring_id], 1); 1427 /* Don't know how to deal with this -- assert */ 1428 qdf_assert(0); 1429 } 1430 1431 rx_buf_cookie = HAL_RX_REO_BUF_COOKIE_GET(ring_desc); 1432 1433 rx_desc = dp_rx_cookie_2_va_rxdma_buf(soc, rx_buf_cookie); 1434 1435 1436 qdf_assert(rx_desc); 1437 rx_bufs_reaped[rx_desc->pool_id]++; 1438 1439 /* TODO */ 1440 /* 1441 * Need a separate API for unmapping based on 1442 * phyiscal address 1443 */ 1444 qdf_nbuf_unmap_single(soc->osdev, rx_desc->nbuf, 1445 QDF_DMA_BIDIRECTIONAL); 1446 1447 core_id = smp_processor_id(); 1448 DP_STATS_INC(soc, rx.ring_packets[core_id][ring_id], 1); 1449 1450 /* Get MPDU DESC info */ 1451 hal_rx_mpdu_desc_info_get(ring_desc, &mpdu_desc_info); 1452 1453 hal_rx_mpdu_peer_meta_data_set(qdf_nbuf_data(rx_desc->nbuf), 1454 mpdu_desc_info.peer_meta_data); 1455 1456 /* Get MSDU DESC info */ 1457 hal_rx_msdu_desc_info_get(ring_desc, &msdu_desc_info); 1458 1459 /* 1460 * save msdu flags first, last and continuation msdu in 1461 * nbuf->cb 1462 */ 1463 if (msdu_desc_info.msdu_flags & HAL_MSDU_F_FIRST_MSDU_IN_MPDU) 1464 qdf_nbuf_set_rx_chfrag_start(rx_desc->nbuf, 1); 1465 1466 if (msdu_desc_info.msdu_flags & HAL_MSDU_F_MSDU_CONTINUATION) 1467 qdf_nbuf_set_rx_chfrag_cont(rx_desc->nbuf, 1); 1468 1469 if (msdu_desc_info.msdu_flags & HAL_MSDU_F_LAST_MSDU_IN_MPDU) 1470 qdf_nbuf_set_rx_chfrag_end(rx_desc->nbuf, 1); 1471 1472 QDF_NBUF_CB_RX_CTX_ID(rx_desc->nbuf) = reo_ring_num; 1473 DP_RX_LIST_APPEND(nbuf_head, nbuf_tail, rx_desc->nbuf); 1474 1475 /* 1476 * if continuation bit is set then we have MSDU spread 1477 * across multiple buffers, let us not decrement quota 1478 * till we reap all buffers of that MSDU. 1479 */ 1480 if (qdf_likely(!qdf_nbuf_is_rx_chfrag_cont(rx_desc->nbuf))) 1481 quota -= 1; 1482 1483 1484 dp_rx_add_to_free_desc_list(&head[rx_desc->pool_id], 1485 &tail[rx_desc->pool_id], 1486 rx_desc); 1487 } 1488 done: 1489 hal_srng_access_end(hal_soc, hal_ring); 1490 1491 if (nbuf_tail) 1492 QDF_NBUF_CB_RX_FLUSH_IND(nbuf_tail) = 1; 1493 1494 /* Update histogram statistics by looping through pdev's */ 1495 DP_RX_HIST_STATS_PER_PDEV(); 1496 1497 for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) { 1498 /* 1499 * continue with next mac_id if no pkts were reaped 1500 * from that pool 1501 */ 1502 if (!rx_bufs_reaped[mac_id]) 1503 continue; 1504 1505 pdev = soc->pdev_list[mac_id]; 1506 dp_rxdma_srng = &pdev->rx_refill_buf_ring; 1507 rx_desc_pool = &soc->rx_desc_buf[mac_id]; 1508 1509 dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng, 1510 rx_desc_pool, rx_bufs_reaped[mac_id], 1511 &head[mac_id], &tail[mac_id]); 1512 } 1513 1514 /* Peer can be NULL is case of LFR */ 1515 if (qdf_likely(peer != NULL)) 1516 vdev = NULL; 1517 1518 /* 1519 * BIG loop where each nbuf is dequeued from global queue, 1520 * processed and queued back on a per vdev basis. These nbufs 1521 * are sent to stack as and when we run out of nbufs 1522 * or a new nbuf dequeued from global queue has a different 1523 * vdev when compared to previous nbuf. 1524 */ 1525 nbuf = nbuf_head; 1526 while (nbuf) { 1527 next = nbuf->next; 1528 rx_tlv_hdr = qdf_nbuf_data(nbuf); 1529 1530 /* 1531 * Check if DMA completed -- msdu_done is the last bit 1532 * to be written 1533 */ 1534 if (qdf_unlikely(!hal_rx_attn_msdu_done_get(rx_tlv_hdr))) { 1535 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1536 FL("MSDU DONE failure")); 1537 hal_rx_dump_pkt_tlvs(hal_soc, rx_tlv_hdr, 1538 QDF_TRACE_LEVEL_INFO); 1539 qdf_assert(0); 1540 } 1541 1542 peer_mdata = hal_rx_mpdu_peer_meta_data_get(rx_tlv_hdr); 1543 peer_id = DP_PEER_METADATA_PEER_ID_GET(peer_mdata); 1544 peer = dp_peer_find_by_id(soc, peer_id); 1545 1546 if (peer) { 1547 QDF_NBUF_CB_DP_TRACE_PRINT(nbuf) = false; 1548 qdf_dp_trace_set_track(nbuf, QDF_RX); 1549 QDF_NBUF_CB_RX_DP_TRACE(nbuf) = 1; 1550 QDF_NBUF_CB_RX_PACKET_TRACK(nbuf) = 1551 QDF_NBUF_RX_PKT_DATA_TRACK; 1552 } 1553 1554 rx_bufs_used++; 1555 1556 if (deliver_list_head && peer && (vdev != peer->vdev)) { 1557 dp_rx_deliver_to_stack(vdev, peer, deliver_list_head, 1558 deliver_list_tail); 1559 deliver_list_head = NULL; 1560 deliver_list_tail = NULL; 1561 } 1562 1563 if (qdf_likely(peer != NULL)) { 1564 vdev = peer->vdev; 1565 } else { 1566 DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1, 1567 qdf_nbuf_len(nbuf)); 1568 qdf_nbuf_free(nbuf); 1569 nbuf = next; 1570 continue; 1571 } 1572 1573 if (qdf_unlikely(vdev == NULL)) { 1574 qdf_nbuf_free(nbuf); 1575 nbuf = next; 1576 DP_STATS_INC(soc, rx.err.invalid_vdev, 1); 1577 dp_peer_unref_del_find_by_id(peer); 1578 continue; 1579 } 1580 1581 DP_HIST_PACKET_COUNT_INC(vdev->pdev->pdev_id); 1582 /* 1583 * First IF condition: 1584 * 802.11 Fragmented pkts are reinjected to REO 1585 * HW block as SG pkts and for these pkts we only 1586 * need to pull the RX TLVS header length. 1587 * Second IF condition: 1588 * The below condition happens when an MSDU is spread 1589 * across multiple buffers. This can happen in two cases 1590 * 1. The nbuf size is smaller then the received msdu. 1591 * ex: we have set the nbuf size to 2048 during 1592 * nbuf_alloc. but we received an msdu which is 1593 * 2304 bytes in size then this msdu is spread 1594 * across 2 nbufs. 1595 * 1596 * 2. AMSDUs when RAW mode is enabled. 1597 * ex: 1st MSDU is in 1st nbuf and 2nd MSDU is spread 1598 * across 1st nbuf and 2nd nbuf and last MSDU is 1599 * spread across 2nd nbuf and 3rd nbuf. 1600 * 1601 * for these scenarios let us create a skb frag_list and 1602 * append these buffers till the last MSDU of the AMSDU 1603 * Third condition: 1604 * This is the most likely case, we receive 802.3 pkts 1605 * decapsulated by HW, here we need to set the pkt length. 1606 */ 1607 if (qdf_unlikely(qdf_nbuf_get_ext_list(nbuf))) 1608 qdf_nbuf_pull_head(nbuf, RX_PKT_TLVS_LEN); 1609 else if (qdf_unlikely(vdev->rx_decap_type == 1610 htt_cmn_pkt_type_raw)) { 1611 msdu_len = hal_rx_msdu_start_msdu_len_get(rx_tlv_hdr); 1612 nbuf = dp_rx_sg_create(nbuf, rx_tlv_hdr); 1613 1614 DP_STATS_INC(vdev->pdev, rx_raw_pkts, 1); 1615 DP_STATS_INC_PKT(peer, rx.raw, 1, 1616 msdu_len); 1617 1618 next = nbuf->next; 1619 } else { 1620 l2_hdr_offset = 1621 hal_rx_msdu_end_l3_hdr_padding_get(rx_tlv_hdr); 1622 1623 msdu_len = hal_rx_msdu_start_msdu_len_get(rx_tlv_hdr); 1624 pkt_len = msdu_len + l2_hdr_offset + RX_PKT_TLVS_LEN; 1625 1626 qdf_nbuf_set_pktlen(nbuf, pkt_len); 1627 qdf_nbuf_pull_head(nbuf, 1628 RX_PKT_TLVS_LEN + 1629 l2_hdr_offset); 1630 } 1631 1632 if (!dp_wds_rx_policy_check(rx_tlv_hdr, vdev, peer, 1633 hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr))) { 1634 QDF_TRACE(QDF_MODULE_ID_DP, 1635 QDF_TRACE_LEVEL_ERROR, 1636 FL("Policy Check Drop pkt")); 1637 /* Drop & free packet */ 1638 qdf_nbuf_free(nbuf); 1639 /* Statistics */ 1640 nbuf = next; 1641 dp_peer_unref_del_find_by_id(peer); 1642 continue; 1643 } 1644 1645 if (qdf_unlikely(peer && peer->bss_peer)) { 1646 QDF_TRACE(QDF_MODULE_ID_DP, 1647 QDF_TRACE_LEVEL_ERROR, 1648 FL("received pkt with same src MAC")); 1649 DP_STATS_INC_PKT(peer, rx.mec_drop, 1, msdu_len); 1650 1651 /* Drop & free packet */ 1652 qdf_nbuf_free(nbuf); 1653 /* Statistics */ 1654 nbuf = next; 1655 dp_peer_unref_del_find_by_id(peer); 1656 continue; 1657 } 1658 1659 if (qdf_unlikely(peer && (peer->nawds_enabled == true) && 1660 (hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr)) && 1661 (hal_rx_get_mpdu_mac_ad4_valid(rx_tlv_hdr) == false))) { 1662 DP_STATS_INC(peer, rx.nawds_mcast_drop, 1); 1663 qdf_nbuf_free(nbuf); 1664 nbuf = next; 1665 dp_peer_unref_del_find_by_id(peer); 1666 continue; 1667 } 1668 1669 dp_rx_cksum_offload(vdev->pdev, nbuf, rx_tlv_hdr); 1670 1671 dp_set_rx_queue(nbuf, ring_id); 1672 1673 /* 1674 * HW structures call this L3 header padding -- 1675 * even though this is actually the offset from 1676 * the buffer beginning where the L2 header 1677 * begins. 1678 */ 1679 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 1680 FL("rxhash: flow id toeplitz: 0x%x"), 1681 hal_rx_msdu_start_toeplitz_get(rx_tlv_hdr)); 1682 1683 dp_rx_msdu_stats_update(soc, nbuf, rx_tlv_hdr, peer, ring_id); 1684 1685 if (qdf_unlikely(vdev->mesh_vdev)) { 1686 if (dp_rx_filter_mesh_packets(vdev, nbuf, 1687 rx_tlv_hdr) 1688 == QDF_STATUS_SUCCESS) { 1689 QDF_TRACE(QDF_MODULE_ID_DP, 1690 QDF_TRACE_LEVEL_INFO_MED, 1691 FL("mesh pkt filtered")); 1692 DP_STATS_INC(vdev->pdev, dropped.mesh_filter, 1693 1); 1694 1695 qdf_nbuf_free(nbuf); 1696 nbuf = next; 1697 dp_peer_unref_del_find_by_id(peer); 1698 continue; 1699 } 1700 dp_rx_fill_mesh_stats(vdev, nbuf, rx_tlv_hdr, peer); 1701 } 1702 1703 #ifdef QCA_WIFI_NAPIER_EMULATION_DBG /* Debug code, remove later */ 1704 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1705 "p_id %d msdu_len %d hdr_off %d", 1706 peer_id, msdu_len, l2_hdr_offset); 1707 1708 print_hex_dump(KERN_ERR, 1709 "\t Pkt Data:", DUMP_PREFIX_NONE, 32, 4, 1710 qdf_nbuf_data(nbuf), 128, false); 1711 #endif /* NAPIER_EMULATION */ 1712 1713 if (qdf_likely(vdev->rx_decap_type == 1714 htt_cmn_pkt_type_ethernet) && 1715 qdf_likely(!vdev->mesh_vdev)) { 1716 /* WDS Source Port Learning */ 1717 if (vdev->wds_enabled) { 1718 dp_rx_da_learn(soc, rx_tlv_hdr, peer, nbuf); 1719 dp_rx_wds_srcport_learn(soc, rx_tlv_hdr, 1720 peer, nbuf); 1721 } 1722 1723 /* Intrabss-fwd */ 1724 if (dp_rx_check_ap_bridge(vdev)) 1725 if (dp_rx_intrabss_fwd(soc, 1726 peer, 1727 rx_tlv_hdr, 1728 nbuf)) { 1729 nbuf = next; 1730 dp_peer_unref_del_find_by_id(peer); 1731 continue; /* Get next desc */ 1732 } 1733 } 1734 1735 dp_rx_lro(rx_tlv_hdr, peer, nbuf, int_ctx->lro_ctx); 1736 qdf_nbuf_cb_update_peer_local_id(nbuf, peer->local_id); 1737 DP_RX_LIST_APPEND(deliver_list_head, 1738 deliver_list_tail, 1739 nbuf); 1740 DP_STATS_INC_PKT(peer, rx.to_stack, 1, 1741 qdf_nbuf_len(nbuf)); 1742 1743 nbuf = next; 1744 dp_peer_unref_del_find_by_id(peer); 1745 } 1746 1747 if (deliver_list_head) 1748 dp_rx_deliver_to_stack(vdev, peer, deliver_list_head, 1749 deliver_list_tail); 1750 1751 return rx_bufs_used; /* Assume no scale factor for now */ 1752 } 1753 1754 /** 1755 * dp_rx_detach() - detach dp rx 1756 * @pdev: core txrx pdev context 1757 * 1758 * This function will detach DP RX into main device context 1759 * will free DP Rx resources. 1760 * 1761 * Return: void 1762 */ 1763 void 1764 dp_rx_pdev_detach(struct dp_pdev *pdev) 1765 { 1766 uint8_t pdev_id = pdev->pdev_id; 1767 struct dp_soc *soc = pdev->soc; 1768 struct rx_desc_pool *rx_desc_pool; 1769 1770 rx_desc_pool = &soc->rx_desc_buf[pdev_id]; 1771 1772 if (rx_desc_pool->pool_size != 0) { 1773 dp_rx_desc_pool_free(soc, pdev_id, rx_desc_pool); 1774 } 1775 1776 return; 1777 } 1778 1779 /** 1780 * dp_rx_attach() - attach DP RX 1781 * @pdev: core txrx pdev context 1782 * 1783 * This function will attach a DP RX instance into the main 1784 * device (SOC) context. Will allocate dp rx resource and 1785 * initialize resources. 1786 * 1787 * Return: QDF_STATUS_SUCCESS: success 1788 * QDF_STATUS_E_RESOURCES: Error return 1789 */ 1790 QDF_STATUS 1791 dp_rx_pdev_attach(struct dp_pdev *pdev) 1792 { 1793 uint8_t pdev_id = pdev->pdev_id; 1794 struct dp_soc *soc = pdev->soc; 1795 struct dp_srng rxdma_srng; 1796 uint32_t rxdma_entries; 1797 union dp_rx_desc_list_elem_t *desc_list = NULL; 1798 union dp_rx_desc_list_elem_t *tail = NULL; 1799 struct dp_srng *dp_rxdma_srng; 1800 struct rx_desc_pool *rx_desc_pool; 1801 1802 if (wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx)) { 1803 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, 1804 "nss-wifi<4> skip Rx refil %d", pdev_id); 1805 return QDF_STATUS_SUCCESS; 1806 } 1807 1808 pdev = soc->pdev_list[pdev_id]; 1809 rxdma_srng = pdev->rx_refill_buf_ring; 1810 soc->process_rx_status = CONFIG_PROCESS_RX_STATUS; 1811 rxdma_entries = rxdma_srng.alloc_size/hal_srng_get_entrysize( 1812 soc->hal_soc, RXDMA_BUF); 1813 1814 rx_desc_pool = &soc->rx_desc_buf[pdev_id]; 1815 1816 dp_rx_desc_pool_alloc(soc, pdev_id, rxdma_entries*3, rx_desc_pool); 1817 1818 rx_desc_pool->owner = DP_WBM2SW_RBM; 1819 /* For Rx buffers, WBM release ring is SW RING 3,for all pdev's */ 1820 dp_rxdma_srng = &pdev->rx_refill_buf_ring; 1821 dp_rx_buffers_replenish(soc, pdev_id, dp_rxdma_srng, rx_desc_pool, 1822 0, &desc_list, &tail); 1823 1824 return QDF_STATUS_SUCCESS; 1825 } 1826 1827 /* 1828 * dp_rx_nbuf_prepare() - prepare RX nbuf 1829 * @soc: core txrx main context 1830 * @pdev: core txrx pdev context 1831 * 1832 * This function alloc & map nbuf for RX dma usage, retry it if failed 1833 * until retry times reaches max threshold or succeeded. 1834 * 1835 * Return: qdf_nbuf_t pointer if succeeded, NULL if failed. 1836 */ 1837 qdf_nbuf_t 1838 dp_rx_nbuf_prepare(struct dp_soc *soc, struct dp_pdev *pdev) 1839 { 1840 uint8_t *buf; 1841 int32_t nbuf_retry_count; 1842 QDF_STATUS ret; 1843 qdf_nbuf_t nbuf = NULL; 1844 1845 for (nbuf_retry_count = 0; nbuf_retry_count < 1846 QDF_NBUF_ALLOC_MAP_RETRY_THRESHOLD; 1847 nbuf_retry_count++) { 1848 /* Allocate a new skb */ 1849 nbuf = qdf_nbuf_alloc(soc->osdev, 1850 RX_BUFFER_SIZE, 1851 RX_BUFFER_RESERVATION, 1852 RX_BUFFER_ALIGNMENT, 1853 FALSE); 1854 1855 if (nbuf == NULL) { 1856 DP_STATS_INC(pdev, 1857 replenish.nbuf_alloc_fail, 1); 1858 continue; 1859 } 1860 1861 buf = qdf_nbuf_data(nbuf); 1862 1863 memset(buf, 0, RX_BUFFER_SIZE); 1864 1865 ret = qdf_nbuf_map_single(soc->osdev, nbuf, 1866 QDF_DMA_BIDIRECTIONAL); 1867 1868 /* nbuf map failed */ 1869 if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) { 1870 qdf_nbuf_free(nbuf); 1871 DP_STATS_INC(pdev, replenish.map_err, 1); 1872 continue; 1873 } 1874 /* qdf_nbuf alloc and map succeeded */ 1875 break; 1876 } 1877 1878 /* qdf_nbuf still alloc or map failed */ 1879 if (qdf_unlikely(nbuf_retry_count >= 1880 QDF_NBUF_ALLOC_MAP_RETRY_THRESHOLD)) 1881 return NULL; 1882 1883 return nbuf; 1884 } 1885