1 /* 2 * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for 5 * any purpose with or without fee is hereby granted, provided that the 6 * above copyright notice and this permission notice appear in all 7 * copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 16 * PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 #include "cdp_txrx_cmn_struct.h" 20 #include "hal_hw_headers.h" 21 #include "dp_types.h" 22 #include "dp_rx.h" 23 #include "dp_tx.h" 24 #include "dp_rx_defrag.h" 25 #include "dp_rh_rx.h" 26 #include "dp_rh_htt.h" 27 #include "dp_peer.h" 28 #include "hal_rx.h" 29 #include "hal_rh_rx.h" 30 #include "hal_api.h" 31 #include "hal_rh_api.h" 32 #include "qdf_nbuf.h" 33 #include "dp_internal.h" 34 #ifdef WIFI_MONITOR_SUPPORT 35 #include <dp_mon.h> 36 #endif 37 #ifdef FEATURE_WDS 38 #include "dp_txrx_wds.h" 39 #endif 40 #include "dp_hist.h" 41 #include "dp_rx_buffer_pool.h" 42 #include "dp_rh.h" 43 44 static inline uint8_t dp_rx_get_ctx_id_frm_napiid(uint8_t napi_id) 45 { 46 /* 47 * This is NAPI to CE then to rx context id mapping 48 * example: CE1 is assigned with napi id 3(ce_id+1) 49 * CE1 maps to RX context id 0, so napi id 2 maps to 50 * RX context id 0, this need to optimized further. 51 */ 52 switch (napi_id) { 53 case 2: 54 return 0; 55 case 11: 56 return 1; 57 case 12: 58 return 2; 59 default: 60 dp_err("Invalid napi id: %u, this should not happen", napi_id); 61 qdf_assert_always(0); 62 break; 63 } 64 return 0; 65 } 66 67 void 68 dp_rx_data_flush(void *data) 69 { 70 struct qca_napi_info *napi_info = (struct qca_napi_info *)data; 71 uint8_t rx_ctx_id = dp_rx_get_ctx_id_frm_napiid(napi_info->id); 72 struct dp_soc *soc = cds_get_context(QDF_MODULE_ID_SOC); 73 struct dp_vdev *vdev; 74 int i; 75 76 if (rx_ctx_id == 0 && soc->rx.flags.defrag_timeout_check) { 77 uint32_t now_ms = 78 qdf_system_ticks_to_msecs(qdf_system_ticks()); 79 80 if (now_ms >= soc->rx.defrag.next_flush_ms) 81 dp_rx_defrag_waitlist_flush(soc); 82 } 83 84 /*Get first available vdev to flush all RX packets across soc*/ 85 for (i = 0; i < MAX_VDEV_CNT; i++) { 86 vdev = dp_vdev_get_ref_by_id(soc, i, DP_MOD_ID_RX); 87 if (vdev && vdev->osif_fisa_flush) 88 vdev->osif_fisa_flush(soc, rx_ctx_id); 89 90 if (vdev && vdev->osif_gro_flush) { 91 vdev->osif_gro_flush(vdev->osif_vdev, 92 rx_ctx_id); 93 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_RX); 94 return; 95 } 96 if (vdev) 97 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_RX); 98 } 99 } 100 101 static inline 102 bool is_sa_da_idx_valid(uint32_t max_ast, 103 qdf_nbuf_t nbuf, struct hal_rx_msdu_metadata msdu_info) 104 { 105 if ((qdf_nbuf_is_sa_valid(nbuf) && (msdu_info.sa_idx > max_ast)) || 106 (!qdf_nbuf_is_da_mcbc(nbuf) && qdf_nbuf_is_da_valid(nbuf) && 107 (msdu_info.da_idx > max_ast))) 108 return false; 109 110 return true; 111 } 112 113 #if defined(FEATURE_MCL_REPEATER) && defined(FEATURE_MEC) 114 /** 115 * dp_rx_mec_check_wrapper() - wrapper to dp_rx_mcast_echo_check 116 * @soc: core DP main context 117 * @txrx_peer: dp peer handler 118 * @rx_tlv_hdr: start of the rx TLV header 119 * @nbuf: pkt buffer 120 * 121 * Return: bool (true if it is a looped back pkt else false) 122 */ 123 static inline bool dp_rx_mec_check_wrapper(struct dp_soc *soc, 124 struct dp_txrx_peer *txrx_peer, 125 uint8_t *rx_tlv_hdr, 126 qdf_nbuf_t nbuf) 127 { 128 return dp_rx_mcast_echo_check(soc, txrx_peer, rx_tlv_hdr, nbuf); 129 } 130 #else 131 static inline bool dp_rx_mec_check_wrapper(struct dp_soc *soc, 132 struct dp_txrx_peer *txrx_peer, 133 uint8_t *rx_tlv_hdr, 134 qdf_nbuf_t nbuf) 135 { 136 return false; 137 } 138 #endif 139 140 static bool 141 dp_rx_intrabss_ucast_check_rh(struct dp_soc *soc, qdf_nbuf_t nbuf, 142 struct dp_txrx_peer *ta_txrx_peer, 143 struct hal_rx_msdu_metadata *msdu_metadata, 144 uint8_t *p_tx_vdev_id) 145 { 146 uint16_t da_peer_id; 147 struct dp_txrx_peer *da_peer; 148 struct dp_ast_entry *ast_entry; 149 dp_txrx_ref_handle txrx_ref_handle = NULL; 150 151 if (!qdf_nbuf_is_da_valid(nbuf) || qdf_nbuf_is_da_mcbc(nbuf)) 152 return false; 153 154 ast_entry = soc->ast_table[msdu_metadata->da_idx]; 155 if (!ast_entry) 156 return false; 157 158 if (ast_entry->type == CDP_TXRX_AST_TYPE_DA) { 159 ast_entry->is_active = TRUE; 160 return false; 161 } 162 163 da_peer_id = ast_entry->peer_id; 164 /* TA peer cannot be same as peer(DA) on which AST is present 165 * this indicates a change in topology and that AST entries 166 * are yet to be updated. 167 */ 168 if (da_peer_id == ta_txrx_peer->peer_id || 169 da_peer_id == HTT_INVALID_PEER) 170 return false; 171 172 da_peer = dp_txrx_peer_get_ref_by_id(soc, da_peer_id, 173 &txrx_ref_handle, DP_MOD_ID_RX); 174 if (!da_peer) 175 return false; 176 177 *p_tx_vdev_id = da_peer->vdev->vdev_id; 178 /* If the source or destination peer in the isolation 179 * list then dont forward instead push to bridge stack. 180 */ 181 if (dp_get_peer_isolation(ta_txrx_peer) || 182 dp_get_peer_isolation(da_peer) || 183 da_peer->vdev->vdev_id != ta_txrx_peer->vdev->vdev_id) { 184 dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_RX); 185 return false; 186 } 187 188 if (da_peer->bss_peer) { 189 dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_RX); 190 return false; 191 } 192 193 dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_RX); 194 return true; 195 } 196 197 /* 198 * dp_rx_intrabss_fwd_rh() - Implements the Intra-BSS forwarding logic 199 * 200 * @soc: core txrx main context 201 * @ta_txrx_peer : source peer entry 202 * @rx_tlv_hdr : start address of rx tlvs 203 * @nbuf : nbuf that has to be intrabss forwarded 204 * 205 * Return: bool: true if it is forwarded else false 206 */ 207 static bool 208 dp_rx_intrabss_fwd_rh(struct dp_soc *soc, 209 struct dp_txrx_peer *ta_txrx_peer, 210 uint8_t *rx_tlv_hdr, 211 qdf_nbuf_t nbuf, 212 struct hal_rx_msdu_metadata msdu_metadata, 213 struct cdp_tid_rx_stats *tid_stats) 214 { 215 uint8_t tx_vdev_id; 216 217 /* if it is a broadcast pkt (eg: ARP) and it is not its own 218 * source, then clone the pkt and send the cloned pkt for 219 * intra BSS forwarding and original pkt up the network stack 220 * Note: how do we handle multicast pkts. do we forward 221 * all multicast pkts as is or let a higher layer module 222 * like igmpsnoop decide whether to forward or not with 223 * Mcast enhancement. 224 */ 225 if (qdf_nbuf_is_da_mcbc(nbuf) && !ta_txrx_peer->bss_peer) 226 return dp_rx_intrabss_mcbc_fwd(soc, ta_txrx_peer, rx_tlv_hdr, 227 nbuf, tid_stats, 0); 228 229 if (dp_rx_intrabss_eapol_drop_check(soc, ta_txrx_peer, rx_tlv_hdr, 230 nbuf)) 231 return true; 232 233 if (dp_rx_intrabss_ucast_check_rh(soc, nbuf, ta_txrx_peer, 234 &msdu_metadata, &tx_vdev_id)) 235 return dp_rx_intrabss_ucast_fwd(soc, ta_txrx_peer, tx_vdev_id, 236 rx_tlv_hdr, nbuf, tid_stats, 237 0); 238 239 return false; 240 } 241 242 #ifdef RX_DESC_DEBUG_CHECK 243 static 244 QDF_STATUS dp_rx_desc_nbuf_sanity_check_rh(struct dp_soc *soc, 245 uint32_t *msg_word, 246 struct dp_rx_desc *rx_desc) 247 { 248 uint64_t paddr; 249 250 paddr = (HTT_RX_DATA_MSDU_INFO_BUFFER_ADDR_LOW_GET(*msg_word) | 251 ((uint64_t)(HTT_RX_DATA_MSDU_INFO_BUFFER_ADDR_HIGH_GET(*(msg_word + 1))) << 32)); 252 253 /* Sanity check for possible buffer paddr corruption */ 254 if (dp_rx_desc_paddr_sanity_check(rx_desc, paddr)) 255 return QDF_STATUS_SUCCESS; 256 257 return QDF_STATUS_E_FAILURE; 258 } 259 260 #else 261 static inline 262 QDF_STATUS dp_rx_desc_nbuf_sanity_check_rh(struct dp_soc *soc, 263 uint32_t *msg_word, 264 struct dp_rx_desc *rx_desc) 265 #endif 266 267 #ifdef DUP_RX_DESC_WAR 268 static 269 void dp_rx_dump_info_and_assert_rh(struct dp_soc *soc, 270 uint32_t *msg_word, 271 struct dp_rx_desc *rx_desc) 272 { 273 QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH, 274 msg_word, HTT_RX_DATA_MSDU_INFO_SIZE); 275 dp_rx_desc_dump(rx_desc); 276 } 277 #else 278 static 279 void dp_rx_dump_info_and_assert_rh(struct dp_soc *soc, 280 uint32_t *msg_word, 281 struct dp_rx_desc *rx_desc) 282 { 283 dp_rx_desc_dump(rx_desc); 284 QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH, 285 msg_word, HTT_RX_DATA_MSDU_INFO_SIZE); 286 qdf_assert_always(0); 287 } 288 #endif 289 290 #ifdef WLAN_FEATURE_DP_RX_RING_HISTORY 291 static void 292 dp_rx_ring_record_entry_rh(struct dp_soc *soc, uint8_t ring_num, 293 uint32_t *msg_word) 294 { 295 struct dp_buf_info_record *record; 296 uint32_t idx; 297 298 if (qdf_unlikely(!soc->rx_ring_history[ring_num])) 299 return; 300 301 idx = dp_history_get_next_index(&soc->rx_ring_history[ring_num]->index, 302 DP_RX_HIST_MAX); 303 304 /* No NULL check needed for record since its an array */ 305 record = &soc->rx_ring_history[ring_num]->entry[idx]; 306 307 record->timestamp = qdf_get_log_timestamp(); 308 record->hbi.paddr = 309 (HTT_RX_DATA_MSDU_INFO_BUFFER_ADDR_LOW_GET(*msg_word) | 310 ((uint64_t)(HTT_RX_DATA_MSDU_INFO_BUFFER_ADDR_HIGH_GET(*(msg_word + 1))) << 32)); 311 record->hbi.sw_cookie = 312 HTT_RX_DATA_MSDU_INFO_SW_BUFFER_COOKIE_GET(*(msg_word + 1)); 313 } 314 #else 315 static inline void 316 dp_rx_ring_record_entry_rh(struct dp_soc *soc, uint8_t rx_ring_num, 317 uint32_t *msg_word) {} 318 #endif 319 320 #ifdef WLAN_FEATURE_MARK_FIRST_WAKEUP_PACKET 321 static inline void 322 dp_rx_mark_first_packet_after_wow_wakeup_rh(struct dp_soc *soc, 323 uint32_t *msg_word, 324 qdf_nbuf_t nbuf) 325 { 326 struct dp_pdev *pdev = soc->pdev_list[0]; 327 328 if (!pdev->is_first_wakeup_packet) 329 return; 330 331 if (HTT_RX_DATA_MSDU_INFO_IS_FIRST_PKT_AFTER_WKP_GET(*(msg_word + 2))) { 332 qdf_nbuf_mark_wakeup_frame(nbuf); 333 dp_info("First packet after WOW Wakeup rcvd"); 334 } 335 } 336 #else 337 static inline void 338 dp_rx_mark_first_packet_after_wow_wakeup_rh(struct dp_soc *soc, 339 uint32_t *msg_word, 340 qdf_nbuf_t nbuf) {} 341 #endif 342 343 #ifdef QCA_SUPPORT_EAPOL_OVER_CONTROL_PORT 344 static void 345 dp_rx_deliver_to_osif_stack_rh(struct dp_soc *soc, 346 struct dp_vdev *vdev, 347 struct dp_txrx_peer *txrx_peer, 348 qdf_nbuf_t nbuf, 349 qdf_nbuf_t tail, 350 bool is_eapol) 351 { 352 if (is_eapol && soc->eapol_over_control_port) 353 dp_rx_eapol_deliver_to_stack(soc, vdev, txrx_peer, nbuf, NULL); 354 else 355 dp_rx_deliver_to_stack(soc, vdev, txrx_peer, nbuf, NULL); 356 } 357 #else 358 static void 359 dp_rx_deliver_to_osif_stack_rh(struct dp_soc *soc, 360 struct dp_vdev *vdev, 361 struct dp_txrx_peer *txrx_peer, 362 qdf_nbuf_t nbuf, 363 qdf_nbuf_t tail, 364 bool is_eapol) 365 { 366 dp_rx_deliver_to_stack(soc, vdev, txrx_peer, nbuf, NULL); 367 } 368 #endif 369 370 static void 371 dp_rx_decrypt_unecrypt_err_handler_rh(struct dp_soc *soc, qdf_nbuf_t nbuf, 372 uint8_t error_code, uint8_t mac_id) 373 { 374 uint32_t pkt_len, l2_hdr_offset; 375 uint16_t msdu_len; 376 struct dp_vdev *vdev; 377 struct dp_txrx_peer *txrx_peer = NULL; 378 dp_txrx_ref_handle txrx_ref_handle = NULL; 379 qdf_ether_header_t *eh; 380 bool is_broadcast; 381 uint8_t *rx_tlv_hdr; 382 uint16_t peer_id; 383 uint16_t buf_size; 384 385 buf_size = wlan_cfg_rx_buffer_size(soc->wlan_cfg_ctx); 386 387 rx_tlv_hdr = qdf_nbuf_data(nbuf); 388 389 /* 390 * Check if DMA completed -- msdu_done is the last bit 391 * to be written 392 */ 393 if (!hal_rx_attn_msdu_done_get(soc->hal_soc, rx_tlv_hdr)) { 394 dp_err_rl("MSDU DONE failure"); 395 396 hal_rx_dump_pkt_tlvs(soc->hal_soc, rx_tlv_hdr, 397 QDF_TRACE_LEVEL_INFO); 398 qdf_assert(0); 399 } 400 401 if (qdf_unlikely(qdf_nbuf_is_rx_chfrag_cont(nbuf))) { 402 dp_err("Unsupported MSDU format rcvd for error:%u", error_code); 403 qdf_assert_always(0); 404 goto free_nbuf; 405 } 406 407 peer_id = QDF_NBUF_CB_RX_PEER_ID(nbuf); 408 txrx_peer = dp_tgt_txrx_peer_get_ref_by_id(soc, peer_id, 409 &txrx_ref_handle, 410 DP_MOD_ID_RX); 411 if (!txrx_peer) { 412 QDF_TRACE_ERROR_RL(QDF_MODULE_ID_DP, "txrx_peer is NULL"); 413 DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1, 414 qdf_nbuf_len(nbuf)); 415 /* Trigger invalid peer handler wrapper */ 416 dp_rx_process_invalid_peer_wrapper(soc, nbuf, true, mac_id); 417 return; 418 } 419 420 l2_hdr_offset = hal_rx_msdu_end_l3_hdr_padding_get(soc->hal_soc, 421 rx_tlv_hdr); 422 msdu_len = hal_rx_msdu_start_msdu_len_get(soc->hal_soc, rx_tlv_hdr); 423 pkt_len = msdu_len + l2_hdr_offset + soc->rx_pkt_tlv_size; 424 425 if (qdf_unlikely(pkt_len > buf_size)) { 426 DP_STATS_INC_PKT(soc, rx.err.rx_invalid_pkt_len, 427 1, pkt_len); 428 goto free_nbuf; 429 } 430 431 /* Set length in nbuf */ 432 qdf_nbuf_set_pktlen(nbuf, pkt_len); 433 434 qdf_nbuf_set_next(nbuf, NULL); 435 436 qdf_nbuf_set_rx_chfrag_start(nbuf, 1); 437 qdf_nbuf_set_rx_chfrag_end(nbuf, 1); 438 439 vdev = txrx_peer->vdev; 440 if (!vdev) { 441 dp_rx_info_rl("%pK: INVALID vdev %pK OR osif_rx", soc, 442 vdev); 443 DP_STATS_INC(soc, rx.err.invalid_vdev, 1); 444 goto free_nbuf; 445 } 446 447 /* 448 * Advance the packet start pointer by total size of 449 * pre-header TLV's 450 */ 451 dp_rx_skip_tlvs(soc, nbuf, l2_hdr_offset); 452 453 /* 454 * WAPI cert AP sends rekey frames as unencrypted. 455 * Thus RXDMA will report unencrypted frame error. 456 * To pass WAPI cert case, SW needs to pass unencrypted 457 * rekey frame to stack. 458 * 459 * In dynamic WEP case rekey frames are not encrypted 460 * similar to WAPI. Allow EAPOL when 8021+wep is enabled and 461 * key install is already done 462 */ 463 if ((qdf_nbuf_is_ipv4_wapi_pkt(nbuf)) || 464 ((vdev->sec_type == cdp_sec_type_wep104) && 465 (qdf_nbuf_is_ipv4_eapol_pkt(nbuf)))) { 466 if (qdf_unlikely(hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc, 467 rx_tlv_hdr) && 468 (vdev->rx_decap_type == 469 htt_cmn_pkt_type_ethernet))) { 470 eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf); 471 is_broadcast = (QDF_IS_ADDR_BROADCAST 472 (eh->ether_dhost)) ? 1 : 0; 473 DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, rx.multicast, 474 1, qdf_nbuf_len(nbuf), 0); 475 if (is_broadcast) { 476 DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, rx.bcast, 477 1, qdf_nbuf_len(nbuf), 0); 478 } 479 } else { 480 DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, rx.unicast, 1, 481 qdf_nbuf_len(nbuf), 482 0); 483 } 484 485 if (qdf_unlikely(vdev->rx_decap_type == htt_cmn_pkt_type_raw)) { 486 dp_rx_deliver_raw(vdev, nbuf, txrx_peer, 0); 487 } else { 488 /* Update the protocol tag in SKB based on CCE metadata */ 489 dp_rx_update_protocol_tag(soc, vdev, nbuf, rx_tlv_hdr, 490 EXCEPTION_DEST_RING_ID, true, true); 491 /* Update the flow tag in SKB based on FSE metadata */ 492 dp_rx_update_flow_tag(soc, vdev, nbuf, rx_tlv_hdr, true); 493 DP_PEER_STATS_FLAT_INC(txrx_peer, to_stack.num, 1); 494 qdf_nbuf_set_exc_frame(nbuf, 1); 495 dp_rx_deliver_to_osif_stack_rh(soc, vdev, txrx_peer, nbuf, NULL, 496 qdf_nbuf_is_ipv4_eapol_pkt(nbuf)); 497 } 498 } 499 500 if (txrx_peer) 501 dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_RX); 502 return; 503 504 free_nbuf: 505 if (txrx_peer) 506 dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_RX); 507 dp_rx_nbuf_free(nbuf); 508 } 509 510 static void 511 dp_rx_2k_jump_oor_err_handler_rh(struct dp_soc *soc, qdf_nbuf_t nbuf, 512 uint32_t error_code) 513 { 514 uint32_t frame_mask; 515 struct dp_txrx_peer *txrx_peer = NULL; 516 dp_txrx_ref_handle txrx_ref_handle = NULL; 517 uint8_t *rx_tlv_hdr; 518 uint16_t peer_id; 519 520 rx_tlv_hdr = qdf_nbuf_data(nbuf); 521 if (qdf_unlikely(qdf_nbuf_is_rx_chfrag_cont(nbuf))) { 522 dp_err("Unsupported MSDU format rcvd for error:%u", error_code); 523 qdf_assert_always(0); 524 goto free_nbuf; 525 } 526 527 peer_id = QDF_NBUF_CB_RX_PEER_ID(nbuf); 528 txrx_peer = dp_tgt_txrx_peer_get_ref_by_id(soc, peer_id, 529 &txrx_ref_handle, 530 DP_MOD_ID_RX); 531 if (!txrx_peer) { 532 dp_info_rl("peer not found"); 533 goto free_nbuf; 534 } 535 536 if (error_code == HTT_RXDATA_ERR_OOR) { 537 frame_mask = FRAME_MASK_IPV4_ARP | FRAME_MASK_IPV4_DHCP | 538 FRAME_MASK_IPV4_EAPOL | FRAME_MASK_IPV6_DHCP; 539 } else { 540 frame_mask = FRAME_MASK_IPV4_ARP; 541 } 542 543 if (dp_rx_deliver_special_frame(soc, txrx_peer, nbuf, frame_mask, 544 rx_tlv_hdr)) { 545 if (error_code == HTT_RXDATA_ERR_OOR) { 546 DP_STATS_INC(soc, rx.err.reo_err_oor_to_stack, 1); 547 } else { 548 DP_STATS_INC(soc, rx.err.rx_2k_jump_to_stack, 1); 549 } 550 551 dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_RX); 552 return; 553 } 554 555 free_nbuf: 556 if (txrx_peer) 557 dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_RX); 558 559 if (error_code == HTT_RXDATA_ERR_OOR) { 560 DP_STATS_INC(soc, rx.err.reo_err_oor_drop, 1); 561 } else { 562 DP_STATS_INC(soc, rx.err.rx_2k_jump_drop, 1); 563 } 564 565 dp_rx_nbuf_free(nbuf); 566 } 567 568 static void dp_rx_mic_err_handler_rh(struct dp_soc *soc, qdf_nbuf_t nbuf) 569 { 570 struct dp_vdev *vdev; 571 struct dp_pdev *pdev; 572 struct dp_txrx_peer *txrx_peer = NULL; 573 dp_txrx_ref_handle txrx_ref_handle = NULL; 574 struct ol_if_ops *tops; 575 uint16_t rx_seq, fragno; 576 uint8_t is_raw; 577 uint16_t peer_id; 578 unsigned int tid; 579 QDF_STATUS status; 580 struct cdp_rx_mic_err_info mic_failure_info; 581 582 /* 583 * only first msdu, mpdu start description tlv valid? 584 * and use it for following msdu. 585 */ 586 if (!hal_rx_msdu_end_first_msdu_get(soc->hal_soc, 587 qdf_nbuf_data(nbuf))) 588 return; 589 590 peer_id = QDF_NBUF_CB_RX_PEER_ID(nbuf); 591 txrx_peer = dp_tgt_txrx_peer_get_ref_by_id(soc, peer_id, 592 &txrx_ref_handle, 593 DP_MOD_ID_RX); 594 if (!txrx_peer) { 595 dp_info_rl("txrx_peer not found"); 596 goto fail; 597 } 598 599 vdev = txrx_peer->vdev; 600 if (!vdev) { 601 dp_info_rl("VDEV not found"); 602 goto fail; 603 } 604 605 pdev = vdev->pdev; 606 if (!pdev) { 607 dp_info_rl("PDEV not found"); 608 goto fail; 609 } 610 611 /*TODO is raw support required for evros check*/ 612 is_raw = HAL_IS_DECAP_FORMAT_RAW(soc->hal_soc, qdf_nbuf_data(nbuf)); 613 if (is_raw) { 614 fragno = dp_rx_frag_get_mpdu_frag_number(soc, 615 qdf_nbuf_data(nbuf)); 616 /* Can get only last fragment */ 617 if (fragno) { 618 tid = hal_rx_mpdu_start_tid_get(soc->hal_soc, 619 qdf_nbuf_data(nbuf)); 620 rx_seq = hal_rx_get_rx_sequence(soc->hal_soc, 621 qdf_nbuf_data(nbuf)); 622 623 status = dp_rx_defrag_add_last_frag(soc, txrx_peer, 624 tid, rx_seq, nbuf); 625 dp_info_rl("Frag pkt seq# %d frag# %d consumed " "status %d !", 626 rx_seq, fragno, status); 627 if (txrx_peer) 628 dp_txrx_peer_unref_delete(txrx_ref_handle, 629 DP_MOD_ID_RX); 630 return; 631 } 632 } 633 634 if (qdf_unlikely(qdf_nbuf_is_rx_chfrag_cont(nbuf))) { 635 dp_err("Unsupported MSDU format rcvd in MIC error handler"); 636 qdf_assert_always(0); 637 goto fail; 638 } 639 640 if (hal_rx_mpdu_get_addr1(soc->hal_soc, qdf_nbuf_data(nbuf), 641 &mic_failure_info.da_mac_addr.bytes[0])) { 642 dp_err_rl("Failed to get da_mac_addr"); 643 goto fail; 644 } 645 646 if (hal_rx_mpdu_get_addr2(soc->hal_soc, qdf_nbuf_data(nbuf), 647 &mic_failure_info.ta_mac_addr.bytes[0])) { 648 dp_err_rl("Failed to get ta_mac_addr"); 649 goto fail; 650 } 651 652 mic_failure_info.key_id = 0; 653 mic_failure_info.multicast = 654 IEEE80211_IS_MULTICAST(mic_failure_info.da_mac_addr.bytes); 655 qdf_mem_zero(mic_failure_info.tsc, MIC_SEQ_CTR_SIZE); 656 mic_failure_info.frame_type = cdp_rx_frame_type_802_11; 657 mic_failure_info.data = NULL; 658 mic_failure_info.vdev_id = vdev->vdev_id; 659 660 tops = pdev->soc->cdp_soc.ol_ops; 661 if (tops->rx_mic_error) 662 tops->rx_mic_error(soc->ctrl_psoc, pdev->pdev_id, 663 &mic_failure_info); 664 665 fail: 666 dp_rx_nbuf_free(nbuf); 667 if (txrx_peer) 668 dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_RX); 669 } 670 671 static QDF_STATUS dp_rx_err_handler_rh(struct dp_soc *soc, 672 struct dp_rx_desc *rx_desc, 673 uint32_t error_code) 674 { 675 switch (error_code) { 676 case HTT_RXDATA_ERR_MSDU_LIMIT: 677 case HTT_RXDATA_ERR_FLUSH_REQUEST: 678 case HTT_RXDATA_ERR_ZERO_LEN_MSDU: 679 dp_rx_nbuf_free(rx_desc->nbuf); 680 dp_err_rl("MSDU rcvd with error code: %u", error_code); 681 break; 682 case HTT_RXDATA_ERR_TKIP_MIC: 683 dp_rx_mic_err_handler_rh(soc, rx_desc->nbuf); 684 break; 685 case HTT_RXDATA_ERR_OOR: 686 case HTT_RXDATA_ERR_2K_JUMP: 687 dp_rx_2k_jump_oor_err_handler_rh(soc, rx_desc->nbuf, 688 error_code); 689 break; 690 case HTT_RXDATA_ERR_DECRYPT: 691 case HTT_RXDATA_ERR_UNENCRYPTED: 692 dp_rx_decrypt_unecrypt_err_handler_rh(soc, rx_desc->nbuf, 693 error_code, 694 rx_desc->pool_id); 695 break; 696 default: 697 dp_err("Invalid error packet rcvd, code: %u", error_code); 698 dp_rx_desc_dump(rx_desc); 699 qdf_assert_always(0); 700 dp_rx_nbuf_free(rx_desc->nbuf); 701 return QDF_STATUS_E_INVAL; 702 } 703 704 return QDF_STATUS_SUCCESS; 705 } 706 707 void 708 dp_rx_data_indication_handler(struct dp_soc *soc, qdf_nbuf_t data_ind, 709 uint16_t vdev_id, uint16_t peer_id, 710 uint16_t msdu_count) 711 { 712 uint8_t *data_ind_msg; 713 uint32_t *msg_word; 714 uint32_t rx_ctx_id; 715 hal_soc_handle_t hal_soc; 716 struct dp_rx_desc *rx_desc = NULL; 717 qdf_nbuf_t nbuf, next; 718 union dp_rx_desc_list_elem_t *head[MAX_PDEV_CNT]; 719 union dp_rx_desc_list_elem_t *tail[MAX_PDEV_CNT]; 720 uint32_t num_pending = msdu_count; 721 uint32_t rx_buf_cookie; 722 uint16_t msdu_len = 0; 723 struct dp_txrx_peer *txrx_peer; 724 dp_txrx_ref_handle txrx_ref_handle = NULL; 725 struct dp_vdev *vdev; 726 uint32_t pkt_len = 0; 727 uint8_t *rx_tlv_hdr; 728 uint32_t rx_bufs_reaped[MAX_PDEV_CNT]; 729 uint8_t mac_id = 0; 730 struct dp_pdev *rx_pdev; 731 struct dp_srng *dp_rxdma_srng; 732 struct rx_desc_pool *rx_desc_pool; 733 struct cdp_tid_rx_stats *tid_stats; 734 qdf_nbuf_t nbuf_head; 735 qdf_nbuf_t nbuf_tail; 736 qdf_nbuf_t deliver_list_head; 737 qdf_nbuf_t deliver_list_tail; 738 uint32_t num_rx_bufs_reaped = 0; 739 struct hif_opaque_softc *scn; 740 int32_t tid = 0; 741 bool is_prev_msdu_last = true; 742 uint32_t rx_ol_pkt_cnt = 0; 743 struct hal_rx_msdu_metadata msdu_metadata; 744 qdf_nbuf_t ebuf_head; 745 qdf_nbuf_t ebuf_tail; 746 uint8_t pkt_capture_offload = 0; 747 uint32_t old_tid; 748 uint32_t peer_ext_stats; 749 uint32_t dsf; 750 uint32_t max_ast; 751 uint64_t current_time = 0; 752 uint32_t error; 753 uint32_t error_code; 754 QDF_STATUS status; 755 uint16_t buf_size; 756 757 DP_HIST_INIT(); 758 759 qdf_assert_always(soc && msdu_count); 760 hal_soc = soc->hal_soc; 761 qdf_assert_always(hal_soc); 762 763 scn = soc->hif_handle; 764 dp_runtime_pm_mark_last_busy(soc); 765 buf_size = wlan_cfg_rx_buffer_size(soc->wlan_cfg_ctx); 766 767 /* reset local variables here to be re-used in the function */ 768 nbuf_head = NULL; 769 nbuf_tail = NULL; 770 deliver_list_head = NULL; 771 deliver_list_tail = NULL; 772 txrx_peer = NULL; 773 vdev = NULL; 774 num_rx_bufs_reaped = 0; 775 ebuf_head = NULL; 776 ebuf_tail = NULL; 777 778 qdf_mem_zero(rx_bufs_reaped, sizeof(rx_bufs_reaped)); 779 qdf_mem_zero(head, sizeof(head)); 780 qdf_mem_zero(tail, sizeof(tail)); 781 old_tid = 0xff; 782 dsf = 0; 783 peer_ext_stats = 0; 784 max_ast = 0; 785 rx_pdev = NULL; 786 tid_stats = NULL; 787 788 dp_pkt_get_timestamp(¤t_time); 789 790 peer_ext_stats = wlan_cfg_is_peer_ext_stats_enabled(soc->wlan_cfg_ctx); 791 max_ast = wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx); 792 793 data_ind_msg = qdf_nbuf_data(data_ind); 794 msg_word = 795 (uint32_t *)(data_ind_msg + HTT_RX_DATA_IND_HDR_SIZE); 796 rx_ctx_id = 797 dp_rx_get_ctx_id_frm_napiid(QDF_NBUF_CB_RX_CTX_ID(data_ind)); 798 799 while (qdf_likely(num_pending)) { 800 dp_rx_ring_record_entry_rh(soc, rx_ctx_id, msg_word); 801 rx_buf_cookie = 802 HTT_RX_DATA_MSDU_INFO_SW_BUFFER_COOKIE_GET(*(msg_word + 1)); 803 rx_desc = dp_rx_cookie_2_va_rxdma_buf(soc, rx_buf_cookie); 804 if (qdf_unlikely(!rx_desc && !rx_desc->nbuf && 805 !rx_desc->in_use)) { 806 dp_err("Invalid RX descriptor"); 807 qdf_assert_always(0); 808 /* TODO handle this if its valid case */ 809 } 810 811 status = dp_rx_desc_nbuf_sanity_check_rh(soc, msg_word, 812 rx_desc); 813 if (qdf_unlikely(QDF_IS_STATUS_ERROR(status))) { 814 DP_STATS_INC(soc, rx.err.nbuf_sanity_fail, 1); 815 dp_info_rl("Nbuf sanity check failure!"); 816 dp_rx_dump_info_and_assert_rh(soc, msg_word, rx_desc); 817 rx_desc->in_err_state = 1; 818 continue; 819 } 820 821 if (qdf_unlikely(!dp_rx_desc_check_magic(rx_desc))) { 822 dp_err("Invalid rx_desc cookie=%d", rx_buf_cookie); 823 DP_STATS_INC(soc, rx.err.rx_desc_invalid_magic, 1); 824 dp_rx_dump_info_and_assert_rh(soc, msg_word, rx_desc); 825 continue; 826 } 827 828 msdu_len = 829 HTT_RX_DATA_MSDU_INFO_MSDU_LENGTH_GET(*(msg_word + 2)); 830 831 if (qdf_unlikely( 832 HTT_RX_DATA_MSDU_INFO_MSDU_CONTINUATION_GET(*(msg_word + 2)))) { 833 /* previous msdu has end bit set, so current one is 834 * the new MPDU 835 */ 836 if (is_prev_msdu_last) { 837 /* For new MPDU check if we can read complete 838 * MPDU by comparing the number of buffers 839 * available and number of buffers needed to 840 * reap this MPDU 841 */ 842 if ((msdu_len / 843 (buf_size - 844 soc->rx_pkt_tlv_size) + 1) > 845 num_pending) { 846 DP_STATS_INC(soc, 847 rx.msdu_scatter_wait_break, 848 1); 849 /* This is not expected host cannot deal 850 * with partial frame in single DATA 851 * indication, F.W has to submit full 852 * frame in single DATA indication 853 */ 854 qdf_assert_always(0); 855 } 856 is_prev_msdu_last = false; 857 } 858 } 859 860 if (HTT_RX_DATA_MSDU_INFO_MPDU_RETRY_BIT_GET(*(msg_word + 2))) 861 qdf_nbuf_set_rx_retry_flag(rx_desc->nbuf, 1); 862 863 if (HTT_RX_DATA_MSDU_INFO_RAW_MPDU_FRAME_GET(*(msg_word + 2))) 864 qdf_nbuf_set_raw_frame(rx_desc->nbuf, 1); 865 866 /* 867 * end MSDU has continuation bit set to zero using this to detect 868 * full MSDU 869 */ 870 if (!is_prev_msdu_last && 871 !HTT_RX_DATA_MSDU_INFO_MSDU_CONTINUATION_GET(*(msg_word + 2))) 872 is_prev_msdu_last = true; 873 874 rx_bufs_reaped[rx_desc->pool_id]++; 875 QDF_NBUF_CB_RX_PEER_ID(rx_desc->nbuf) = peer_id; 876 QDF_NBUF_CB_RX_VDEV_ID(rx_desc->nbuf) = vdev_id; 877 dp_rx_mark_first_packet_after_wow_wakeup_rh(soc, msg_word, 878 rx_desc->nbuf); 879 880 /* 881 * save msdu flags first, last and continuation msdu in 882 * nbuf->cb, also save mcbc, is_da_valid, is_sa_valid and 883 * length to nbuf->cb. This ensures the info required for 884 * per pkt processing is always in the same cache line. 885 * This helps in improving throughput for smaller pkt 886 * sizes. 887 */ 888 if (HTT_RX_DATA_MSDU_INFO_FIRST_MSDU_IN_MPDU_GET(*(msg_word + 2))) 889 qdf_nbuf_set_rx_chfrag_start(rx_desc->nbuf, 1); 890 891 if (HTT_RX_DATA_MSDU_INFO_MSDU_CONTINUATION_GET(*(msg_word + 2))) 892 qdf_nbuf_set_rx_chfrag_cont(rx_desc->nbuf, 1); 893 894 if (HTT_RX_DATA_MSDU_INFO_LAST_MSDU_IN_MPDU_GET(*(msg_word + 2))) 895 qdf_nbuf_set_rx_chfrag_end(rx_desc->nbuf, 1); 896 897 if (HTT_RX_DATA_MSDU_INFO_DA_IS_MCBC_GET(*(msg_word + 2))) 898 qdf_nbuf_set_da_mcbc(rx_desc->nbuf, 1); 899 900 if (HTT_RX_DATA_MSDU_INFO_DA_IS_VALID_GET(*(msg_word + 2))) 901 qdf_nbuf_set_da_valid(rx_desc->nbuf, 1); 902 903 if (HTT_RX_DATA_MSDU_INFO_SA_IS_VALID_GET(*(msg_word + 2))) 904 qdf_nbuf_set_sa_valid(rx_desc->nbuf, 1); 905 906 qdf_nbuf_set_tid_val(rx_desc->nbuf, 907 HTT_RX_DATA_MSDU_INFO_TID_INFO_GET(*(msg_word + 2))); 908 909 /* set whether packet took offloads path */ 910 qdf_nbuf_set_rx_reo_dest_ind_or_sw_excpt( 911 rx_desc->nbuf, 912 HTT_RX_DATA_MSDU_INFO_FW_OFFLOADS_INSPECTED_GET(*(msg_word + 1))); 913 914 QDF_NBUF_CB_RX_PKT_LEN(rx_desc->nbuf) = msdu_len; 915 916 QDF_NBUF_CB_RX_CTX_ID(rx_desc->nbuf) = rx_ctx_id; 917 918 /* 919 * TODO move unmap after scattered msdu waiting break logic 920 * in case double skb unmap happened. 921 */ 922 dp_rx_nbuf_unmap(soc, rx_desc, rx_ctx_id); 923 924 error = HTT_RX_DATA_MSDU_INFO_ERROR_VALID_GET(*(msg_word + 3)); 925 if (qdf_unlikely(error)) { 926 dp_rx_err("MSDU RX error encountered error:%u", error); 927 error_code = 928 HTT_RX_DATA_MSDU_INFO_ERROR_INFO_GET(*(msg_word + 3)); 929 dp_rx_err_handler_rh(soc, rx_desc, error_code); 930 931 } else { 932 DP_RX_PROCESS_NBUF(soc, nbuf_head, nbuf_tail, ebuf_head, 933 ebuf_tail, rx_desc); 934 } 935 936 num_pending -= 1; 937 938 dp_rx_add_to_free_desc_list(&head[rx_desc->pool_id], 939 &tail[rx_desc->pool_id], rx_desc); 940 num_rx_bufs_reaped++; 941 942 msg_word += HTT_RX_DATA_MSDU_INFO_SIZE >> 2; 943 } 944 945 dp_rx_per_core_stats_update(soc, rx_ctx_id, num_rx_bufs_reaped); 946 947 for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) { 948 /* 949 * continue with next mac_id if no pkts were reaped 950 * from that pool 951 */ 952 if (!rx_bufs_reaped[mac_id]) 953 continue; 954 955 dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_id]; 956 957 rx_desc_pool = &soc->rx_desc_buf[mac_id]; 958 959 dp_rx_buffers_replenish_simple(soc, mac_id, dp_rxdma_srng, 960 rx_desc_pool, 961 rx_bufs_reaped[mac_id], 962 &head[mac_id], &tail[mac_id]); 963 } 964 965 dp_verbose_debug("replenished %u", rx_bufs_reaped[0]); 966 /* Peer can be NULL is case of LFR */ 967 if (qdf_likely(txrx_peer)) 968 vdev = NULL; 969 970 /* 971 * BIG loop where each nbuf is dequeued from global queue, 972 * processed and queued back on a per vdev basis. These nbufs 973 * are sent to stack as and when we run out of nbufs 974 * or a new nbuf dequeued from global queue has a different 975 * vdev when compared to previous nbuf. 976 */ 977 nbuf = nbuf_head; 978 while (nbuf) { 979 next = nbuf->next; 980 981 if (qdf_unlikely(dp_rx_is_raw_frame_dropped(nbuf))) { 982 nbuf = next; 983 DP_STATS_INC(soc, rx.err.raw_frm_drop, 1); 984 continue; 985 } 986 987 rx_tlv_hdr = qdf_nbuf_data(nbuf); 988 vdev_id = QDF_NBUF_CB_RX_VDEV_ID(nbuf); 989 peer_id = QDF_NBUF_CB_RX_PEER_ID(nbuf); 990 991 /* Get TID from struct cb->tid_val, save to tid */ 992 if (qdf_nbuf_is_rx_chfrag_start(nbuf)) { 993 tid = qdf_nbuf_get_tid_val(nbuf); 994 if (tid >= CDP_MAX_DATA_TIDS) { 995 DP_STATS_INC(soc, rx.err.rx_invalid_tid_err, 1); 996 dp_rx_nbuf_free(nbuf); 997 nbuf = next; 998 continue; 999 } 1000 } 1001 1002 if (qdf_unlikely(!txrx_peer)) { 1003 txrx_peer = 1004 dp_rx_get_txrx_peer_and_vdev(soc, nbuf, peer_id, 1005 &txrx_ref_handle, 1006 pkt_capture_offload, 1007 &vdev, 1008 &rx_pdev, &dsf, 1009 &old_tid); 1010 if (qdf_unlikely(!txrx_peer) || qdf_unlikely(!vdev)) { 1011 nbuf = next; 1012 continue; 1013 } 1014 } else if (txrx_peer && txrx_peer->peer_id != peer_id) { 1015 dp_txrx_peer_unref_delete(txrx_ref_handle, 1016 DP_MOD_ID_RX); 1017 1018 txrx_peer = 1019 dp_rx_get_txrx_peer_and_vdev(soc, nbuf, peer_id, 1020 &txrx_ref_handle, 1021 pkt_capture_offload, 1022 &vdev, 1023 &rx_pdev, &dsf, 1024 &old_tid); 1025 if (qdf_unlikely(!txrx_peer) || qdf_unlikely(!vdev)) { 1026 nbuf = next; 1027 continue; 1028 } 1029 } 1030 1031 if (txrx_peer) { 1032 QDF_NBUF_CB_DP_TRACE_PRINT(nbuf) = false; 1033 qdf_dp_trace_set_track(nbuf, QDF_RX); 1034 QDF_NBUF_CB_RX_DP_TRACE(nbuf) = 1; 1035 QDF_NBUF_CB_RX_PACKET_TRACK(nbuf) = 1036 QDF_NBUF_RX_PKT_DATA_TRACK; 1037 } 1038 1039 /* when hlos tid override is enabled, save tid in 1040 * skb->priority 1041 */ 1042 if (qdf_unlikely(vdev->skip_sw_tid_classification & 1043 DP_TXRX_HLOS_TID_OVERRIDE_ENABLED)) 1044 qdf_nbuf_set_priority(nbuf, tid); 1045 1046 DP_RX_TID_SAVE(nbuf, tid); 1047 if (qdf_unlikely(dsf) || qdf_unlikely(peer_ext_stats) || 1048 dp_rx_pkt_tracepoints_enabled()) 1049 qdf_nbuf_set_timestamp(nbuf); 1050 1051 if (qdf_likely(old_tid != tid)) { 1052 tid_stats = 1053 &rx_pdev->stats.tid_stats.tid_rx_stats[rx_ctx_id][tid]; 1054 old_tid = tid; 1055 } 1056 1057 /* 1058 * Check if DMA completed -- msdu_done is the last bit 1059 * to be written 1060 */ 1061 if (qdf_likely(!qdf_nbuf_is_rx_chfrag_cont(nbuf))) { 1062 if (qdf_unlikely(!hal_rx_attn_msdu_done_get_rh( 1063 rx_tlv_hdr))) { 1064 dp_err_rl("MSDU DONE failure"); 1065 DP_STATS_INC(soc, rx.err.msdu_done_fail, 1); 1066 hal_rx_dump_pkt_tlvs(hal_soc, rx_tlv_hdr, 1067 QDF_TRACE_LEVEL_INFO); 1068 tid_stats->fail_cnt[MSDU_DONE_FAILURE]++; 1069 qdf_assert(0); 1070 dp_rx_nbuf_free(nbuf); 1071 nbuf = next; 1072 continue; 1073 } else if (qdf_unlikely(hal_rx_attn_msdu_len_err_get_rh( 1074 rx_tlv_hdr))) { 1075 DP_STATS_INC(soc, rx.err.msdu_len_err, 1); 1076 dp_rx_nbuf_free(nbuf); 1077 nbuf = next; 1078 continue; 1079 } 1080 } 1081 1082 DP_HIST_PACKET_COUNT_INC(vdev->pdev->pdev_id); 1083 /* 1084 * First IF condition: 1085 * This condition is valid when 802.11 fragemented 1086 * pkts reinjected back, even though this case is 1087 * not valid for Rhine keeping it for sanity, verify 1088 * and remove this first if condition based on test. 1089 * Second IF condition: 1090 * The below condition happens when an MSDU is spread 1091 * across multiple buffers. This can happen in two cases 1092 * 1. The nbuf size is smaller then the received msdu. 1093 * ex: we have set the nbuf size to 2048 during 1094 * nbuf_alloc. but we received an msdu which is 1095 * 2304 bytes in size then this msdu is spread 1096 * across 2 nbufs. 1097 * 1098 * 2. AMSDUs when RAW mode is enabled. 1099 * ex: 1st MSDU is in 1st nbuf and 2nd MSDU is spread 1100 * across 1st nbuf and 2nd nbuf and last MSDU is 1101 * spread across 2nd nbuf and 3rd nbuf. 1102 * 1103 * for these scenarios let us create a skb frag_list and 1104 * append these buffers till the last MSDU of the AMSDU 1105 * Third condition: 1106 * This is the most likely case, we receive 802.3 pkts 1107 * decapsulated by HW, here we need to set the pkt length. 1108 */ 1109 hal_rx_msdu_metadata_get(hal_soc, rx_tlv_hdr, &msdu_metadata); 1110 if (qdf_unlikely(qdf_nbuf_is_frag(nbuf))) { 1111 bool is_mcbc, is_sa_vld, is_da_vld; 1112 1113 is_mcbc = hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc, 1114 rx_tlv_hdr); 1115 is_sa_vld = 1116 hal_rx_msdu_end_sa_is_valid_get(soc->hal_soc, 1117 rx_tlv_hdr); 1118 is_da_vld = 1119 hal_rx_msdu_end_da_is_valid_get(soc->hal_soc, 1120 rx_tlv_hdr); 1121 1122 qdf_nbuf_set_da_mcbc(nbuf, is_mcbc); 1123 qdf_nbuf_set_da_valid(nbuf, is_da_vld); 1124 qdf_nbuf_set_sa_valid(nbuf, is_sa_vld); 1125 1126 qdf_nbuf_pull_head(nbuf, soc->rx_pkt_tlv_size); 1127 } else if (qdf_nbuf_is_rx_chfrag_cont(nbuf)) { 1128 msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf); 1129 nbuf = dp_rx_sg_create(soc, nbuf); 1130 next = nbuf->next; 1131 1132 if (qdf_nbuf_is_raw_frame(nbuf)) { 1133 DP_STATS_INC(vdev->pdev, rx_raw_pkts, 1); 1134 DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, 1135 rx.raw, 1, 1136 msdu_len, 1137 0); 1138 } else { 1139 dp_rx_nbuf_free(nbuf); 1140 DP_STATS_INC(soc, rx.err.scatter_msdu, 1); 1141 dp_info_rl("scatter msdu len %d, dropped", 1142 msdu_len); 1143 nbuf = next; 1144 continue; 1145 } 1146 } else { 1147 msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf); 1148 pkt_len = msdu_len + 1149 msdu_metadata.l3_hdr_pad + 1150 soc->rx_pkt_tlv_size; 1151 1152 qdf_nbuf_set_pktlen(nbuf, pkt_len); 1153 dp_rx_skip_tlvs(soc, nbuf, msdu_metadata.l3_hdr_pad); 1154 } 1155 1156 dp_rx_send_pktlog(soc, rx_pdev, nbuf, QDF_TX_RX_STATUS_OK); 1157 1158 if (!dp_wds_rx_policy_check(rx_tlv_hdr, vdev, txrx_peer)) { 1159 dp_rx_err("%pK: Policy Check Drop pkt", soc); 1160 DP_PEER_PER_PKT_STATS_INC(txrx_peer, 1161 rx.policy_check_drop, 1, 0); 1162 tid_stats->fail_cnt[POLICY_CHECK_DROP]++; 1163 /* Drop & free packet */ 1164 dp_rx_nbuf_free(nbuf); 1165 /* Statistics */ 1166 nbuf = next; 1167 continue; 1168 } 1169 1170 /* 1171 * Drop non-EAPOL frames from unauthorized peer. 1172 */ 1173 if (qdf_likely(txrx_peer) && 1174 qdf_unlikely(!txrx_peer->authorize) && 1175 !qdf_nbuf_is_raw_frame(nbuf)) { 1176 bool is_eapol = qdf_nbuf_is_ipv4_eapol_pkt(nbuf) || 1177 qdf_nbuf_is_ipv4_wapi_pkt(nbuf); 1178 1179 if (!is_eapol) { 1180 DP_PEER_PER_PKT_STATS_INC(txrx_peer, 1181 rx.peer_unauth_rx_pkt_drop, 1182 1, 0); 1183 dp_rx_nbuf_free(nbuf); 1184 nbuf = next; 1185 continue; 1186 } 1187 } 1188 1189 if (soc->process_rx_status) 1190 dp_rx_cksum_offload(vdev->pdev, nbuf, rx_tlv_hdr); 1191 1192 dp_rx_msdu_stats_update(soc, nbuf, rx_tlv_hdr, txrx_peer, 1193 rx_ctx_id, tid_stats, 0); 1194 1195 if (qdf_likely(vdev->rx_decap_type == 1196 htt_cmn_pkt_type_ethernet)) { 1197 /* Due to HW issue, sometimes we see that the sa_idx 1198 * and da_idx are invalid with sa_valid and da_valid 1199 * bits set 1200 * 1201 * in this case we also see that value of 1202 * sa_sw_peer_id is set as 0 1203 * 1204 * Drop the packet if sa_idx and da_idx OOB or 1205 * sa_sw_peerid is 0 1206 */ 1207 if (!is_sa_da_idx_valid(max_ast, nbuf, 1208 msdu_metadata)) { 1209 dp_rx_nbuf_free(nbuf); 1210 nbuf = next; 1211 DP_STATS_INC(soc, rx.err.invalid_sa_da_idx, 1); 1212 continue; 1213 } 1214 if (qdf_unlikely(dp_rx_mec_check_wrapper(soc, 1215 txrx_peer, 1216 rx_tlv_hdr, 1217 nbuf))) { 1218 /* this is a looped back MCBC pkt,drop it */ 1219 DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, 1220 rx.mec_drop, 1, 1221 QDF_NBUF_CB_RX_PKT_LEN(nbuf), 1222 0); 1223 dp_rx_nbuf_free(nbuf); 1224 nbuf = next; 1225 continue; 1226 } 1227 /* WDS Source Port Learning */ 1228 if (qdf_likely(vdev->wds_enabled)) 1229 dp_rx_wds_srcport_learn(soc, 1230 rx_tlv_hdr, 1231 txrx_peer, 1232 nbuf, 1233 msdu_metadata); 1234 1235 /* Intrabss-fwd */ 1236 if (dp_rx_check_ap_bridge(vdev)) 1237 if (dp_rx_intrabss_fwd_rh(soc, txrx_peer, 1238 rx_tlv_hdr, 1239 nbuf, 1240 msdu_metadata, 1241 tid_stats)) { 1242 nbuf = next; 1243 tid_stats->intrabss_cnt++; 1244 continue; /* Get next desc */ 1245 } 1246 } 1247 1248 dp_rx_fill_gro_info(soc, rx_tlv_hdr, nbuf, &rx_ol_pkt_cnt); 1249 1250 dp_rx_update_stats(soc, nbuf); 1251 1252 dp_pkt_add_timestamp(txrx_peer->vdev, QDF_PKT_RX_DRIVER_ENTRY, 1253 current_time, nbuf); 1254 1255 DP_RX_LIST_APPEND(deliver_list_head, 1256 deliver_list_tail, 1257 nbuf); 1258 DP_PEER_STATS_FLAT_INC_PKT(txrx_peer, to_stack, 1, 1259 QDF_NBUF_CB_RX_PKT_LEN(nbuf)); 1260 if (qdf_unlikely(txrx_peer->in_twt)) 1261 DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, 1262 rx.to_stack_twt, 1, 1263 QDF_NBUF_CB_RX_PKT_LEN(nbuf), 1264 0); 1265 1266 tid_stats->delivered_to_stack++; 1267 nbuf = next; 1268 } 1269 1270 DP_RX_DELIVER_TO_STACK(soc, vdev, txrx_peer, peer_id, 1271 pkt_capture_offload, 1272 deliver_list_head, 1273 deliver_list_tail); 1274 1275 if (qdf_likely(txrx_peer)) 1276 dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_RX); 1277 1278 if (vdev && vdev->osif_fisa_flush) 1279 vdev->osif_fisa_flush(soc, rx_ctx_id); 1280 1281 if (vdev && vdev->osif_gro_flush && rx_ol_pkt_cnt) { 1282 vdev->osif_gro_flush(vdev->osif_vdev, 1283 rx_ctx_id); 1284 } 1285 1286 /* Update histogram statistics by looping through pdev's */ 1287 DP_RX_HIST_STATS_PER_PDEV(); 1288 } 1289 1290 /* 1291 * dp_rx_defrag_deliver_rh(): Deliver defrag packet to stack 1292 * @peer: Pointer to the peer 1293 * @tid: Transmit Identifier 1294 * @head: Nbuf to be delivered 1295 * 1296 * Returns: None 1297 */ 1298 static inline void dp_rx_defrag_deliver_rh(struct dp_txrx_peer *txrx_peer, 1299 unsigned int tid, 1300 qdf_nbuf_t head) 1301 { 1302 struct dp_vdev *vdev = txrx_peer->vdev; 1303 struct dp_soc *soc = vdev->pdev->soc; 1304 qdf_nbuf_t deliver_list_head = NULL; 1305 qdf_nbuf_t deliver_list_tail = NULL; 1306 uint8_t *rx_tlv_hdr; 1307 1308 rx_tlv_hdr = qdf_nbuf_data(head); 1309 1310 QDF_NBUF_CB_RX_VDEV_ID(head) = vdev->vdev_id; 1311 qdf_nbuf_set_tid_val(head, tid); 1312 qdf_nbuf_pull_head(head, soc->rx_pkt_tlv_size); 1313 1314 DP_RX_LIST_APPEND(deliver_list_head, deliver_list_tail, 1315 head); 1316 dp_rx_deliver_to_stack(soc, vdev, txrx_peer, deliver_list_head, 1317 deliver_list_tail); 1318 } 1319 1320 static 1321 QDF_STATUS dp_rx_defrag_store_fragment_rh(struct dp_soc *soc, qdf_nbuf_t frag) 1322 { 1323 struct dp_rx_reorder_array_elem *rx_reorder_array_elem; 1324 struct dp_pdev *pdev; 1325 struct dp_txrx_peer *txrx_peer = NULL; 1326 dp_txrx_ref_handle txrx_ref_handle = NULL; 1327 uint16_t peer_id, tid; 1328 uint8_t fragno, more_frag, all_frag_present = 0; 1329 uint16_t rxseq; 1330 QDF_STATUS status; 1331 struct dp_rx_tid_defrag *rx_tid; 1332 uint8_t mpdu_sequence_control_valid; 1333 uint8_t mpdu_frame_control_valid; 1334 uint8_t *rx_buf_start = qdf_nbuf_data(frag); 1335 uint32_t msdu_len; 1336 1337 if (qdf_nbuf_len(frag) > 0) { 1338 dp_rx_info("Dropping unexpected packet with skb_len: %d, data len: %d", 1339 (uint32_t)qdf_nbuf_len(frag), frag->data_len); 1340 DP_STATS_INC(soc, rx.rx_frag_err_len_error, 1); 1341 goto discard_frag; 1342 } 1343 1344 msdu_len = QDF_NBUF_CB_RX_PKT_LEN(frag); 1345 qdf_nbuf_set_pktlen(frag, (msdu_len + soc->rx_pkt_tlv_size)); 1346 qdf_nbuf_append_ext_list(frag, NULL, 0); 1347 1348 /* Check if the packet is from a valid peer */ 1349 peer_id = QDF_NBUF_CB_RX_PEER_ID(frag); 1350 txrx_peer = dp_txrx_peer_get_ref_by_id(soc, peer_id, &txrx_ref_handle, 1351 DP_MOD_ID_RX); 1352 1353 if (!txrx_peer) { 1354 /* We should not receive anything from unknown peer 1355 * however, that might happen while we are in the monitor mode. 1356 * We don't need to handle that here 1357 */ 1358 dp_rx_info_rl("Unknown peer with peer_id %d, dropping fragment", 1359 peer_id); 1360 DP_STATS_INC(soc, rx.rx_frag_err_no_peer, 1); 1361 goto discard_frag; 1362 } 1363 1364 tid = qdf_nbuf_get_tid_val(frag); 1365 if (tid >= DP_MAX_TIDS) { 1366 dp_rx_info("TID out of bounds: %d", tid); 1367 qdf_assert_always(0); 1368 goto discard_frag; 1369 } 1370 1371 mpdu_sequence_control_valid = 1372 hal_rx_get_mpdu_sequence_control_valid(soc->hal_soc, 1373 rx_buf_start); 1374 1375 /* Invalid MPDU sequence control field, MPDU is of no use */ 1376 if (!mpdu_sequence_control_valid) { 1377 dp_rx_err("Invalid MPDU seq control field, dropping MPDU"); 1378 1379 qdf_assert(0); 1380 goto discard_frag; 1381 } 1382 1383 mpdu_frame_control_valid = 1384 hal_rx_get_mpdu_frame_control_valid(soc->hal_soc, 1385 rx_buf_start); 1386 1387 /* Invalid frame control field */ 1388 if (!mpdu_frame_control_valid) { 1389 dp_rx_err("Invalid frame control field, dropping MPDU"); 1390 1391 qdf_assert(0); 1392 goto discard_frag; 1393 } 1394 1395 /* Current mpdu sequence */ 1396 more_frag = dp_rx_frag_get_more_frag_bit(soc, rx_buf_start); 1397 1398 /* HW does not populate the fragment number as of now 1399 * need to get from the 802.11 header 1400 */ 1401 fragno = dp_rx_frag_get_mpdu_frag_number(soc, rx_buf_start); 1402 rxseq = dp_rx_frag_get_mpdu_seq_number(soc, rx_buf_start); 1403 1404 pdev = txrx_peer->vdev->pdev; 1405 rx_tid = &txrx_peer->rx_tid[tid]; 1406 1407 qdf_spin_lock_bh(&rx_tid->defrag_tid_lock); 1408 rx_reorder_array_elem = txrx_peer->rx_tid[tid].array; 1409 if (!rx_reorder_array_elem) { 1410 dp_err_rl("Rcvd Fragmented pkt before tid setup for peer %pK", 1411 txrx_peer); 1412 qdf_spin_unlock_bh(&rx_tid->defrag_tid_lock); 1413 goto discard_frag; 1414 } 1415 1416 /* 1417 * !more_frag: no more fragments to be delivered 1418 * !frag_no: packet is not fragmented 1419 * !rx_reorder_array_elem->head: no saved fragments so far 1420 */ 1421 if (!more_frag && !fragno && !rx_reorder_array_elem->head) { 1422 /* We should not get into this situation here. 1423 * It means an unfragmented packet with fragment flag 1424 * is delivered over frag indication. 1425 * Typically it follows normal rx path. 1426 */ 1427 dp_rx_err("Rcvd unfragmented pkt on fragmented path, dropping"); 1428 1429 qdf_spin_unlock_bh(&rx_tid->defrag_tid_lock); 1430 qdf_assert(0); 1431 goto discard_frag; 1432 } 1433 1434 /* Check if the fragment is for the same sequence or a different one */ 1435 dp_rx_debug("rx_tid %d", tid); 1436 if (rx_reorder_array_elem->head) { 1437 dp_rx_debug("rxseq %d\n", rxseq); 1438 if (rxseq != rx_tid->curr_seq_num) { 1439 dp_rx_debug("mismatch cur_seq %d rxseq %d\n", 1440 rx_tid->curr_seq_num, rxseq); 1441 /* Drop stored fragments if out of sequence 1442 * fragment is received 1443 */ 1444 dp_rx_reorder_flush_frag(txrx_peer, tid); 1445 1446 DP_STATS_INC(soc, rx.rx_frag_oor, 1); 1447 1448 dp_rx_debug("cur rxseq %d\n", rxseq); 1449 /* 1450 * The sequence number for this fragment becomes the 1451 * new sequence number to be processed 1452 */ 1453 rx_tid->curr_seq_num = rxseq; 1454 } 1455 } else { 1456 /* Check if we are processing first fragment if it is 1457 * not first fragment discard fragment. 1458 */ 1459 if (fragno) { 1460 qdf_spin_unlock_bh(&rx_tid->defrag_tid_lock); 1461 goto discard_frag; 1462 } 1463 dp_rx_debug("cur rxseq %d\n", rxseq); 1464 /* Start of a new sequence */ 1465 dp_rx_defrag_cleanup(txrx_peer, tid); 1466 rx_tid->curr_seq_num = rxseq; 1467 } 1468 1469 /* 1470 * If the earlier sequence was dropped, this will be the fresh start. 1471 * Else, continue with next fragment in a given sequence 1472 */ 1473 status = dp_rx_defrag_fraglist_insert(txrx_peer, tid, 1474 &rx_reorder_array_elem->head, 1475 &rx_reorder_array_elem->tail, 1476 frag, &all_frag_present); 1477 1478 if (pdev->soc->rx.flags.defrag_timeout_check) 1479 dp_rx_defrag_waitlist_remove(txrx_peer, tid); 1480 1481 /* Yet to receive more fragments for this sequence number */ 1482 if (!all_frag_present) { 1483 uint32_t now_ms = 1484 qdf_system_ticks_to_msecs(qdf_system_ticks()); 1485 1486 txrx_peer->rx_tid[tid].defrag_timeout_ms = 1487 now_ms + pdev->soc->rx.defrag.timeout_ms; 1488 1489 if (pdev->soc->rx.flags.defrag_timeout_check) 1490 dp_rx_defrag_waitlist_add(txrx_peer, tid); 1491 dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_RX_ERR); 1492 qdf_spin_unlock_bh(&rx_tid->defrag_tid_lock); 1493 1494 return QDF_STATUS_SUCCESS; 1495 } 1496 1497 dp_rx_debug("All fragments received for sequence: %d", rxseq); 1498 1499 /* Process the fragments */ 1500 status = dp_rx_defrag(txrx_peer, tid, rx_reorder_array_elem->head, 1501 rx_reorder_array_elem->tail); 1502 if (QDF_IS_STATUS_ERROR(status)) { 1503 dp_rx_err("Fragment processing failed"); 1504 1505 dp_rx_defrag_cleanup(txrx_peer, tid); 1506 qdf_spin_unlock_bh(&rx_tid->defrag_tid_lock); 1507 goto end; 1508 } 1509 1510 dp_rx_defrag_deliver_rh(txrx_peer, tid, rx_reorder_array_elem->head); 1511 dp_rx_debug("Fragmented sequence successfully reinjected"); 1512 1513 dp_rx_defrag_cleanup(txrx_peer, tid); 1514 qdf_spin_unlock_bh(&rx_tid->defrag_tid_lock); 1515 1516 dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_RX_ERR); 1517 1518 return QDF_STATUS_SUCCESS; 1519 1520 discard_frag: 1521 dp_rx_nbuf_free(frag); 1522 end: 1523 if (txrx_peer) 1524 dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_RX_ERR); 1525 1526 DP_STATS_INC(soc, rx.rx_frag_err, 1); 1527 return QDF_STATUS_E_DEFRAG_ERROR; 1528 } 1529 1530 void 1531 dp_rx_frag_indication_handler(struct dp_soc *soc, qdf_nbuf_t data_ind, 1532 uint16_t vdev_id, uint16_t peer_id) 1533 { 1534 uint8_t *data_ind_msg; 1535 uint32_t *msg_word; 1536 uint32_t rx_ctx_id; 1537 qdf_nbuf_t nbuf; 1538 union dp_rx_desc_list_elem_t *head = NULL; 1539 union dp_rx_desc_list_elem_t *tail = NULL; 1540 QDF_STATUS status = QDF_STATUS_SUCCESS; 1541 uint32_t rx_buf_cookie; 1542 struct dp_rx_desc *rx_desc; 1543 uint8_t mac_id = 0; 1544 1545 qdf_assert(soc); 1546 1547 data_ind_msg = qdf_nbuf_data(data_ind); 1548 msg_word = 1549 (uint32_t *)(data_ind_msg + HTT_RX_DATA_IND_HDR_SIZE); 1550 rx_ctx_id = 1551 dp_rx_get_ctx_id_frm_napiid(QDF_NBUF_CB_RX_CTX_ID(data_ind)); 1552 1553 rx_buf_cookie = 1554 HTT_RX_DATA_MSDU_INFO_SW_BUFFER_COOKIE_GET(*(msg_word + 1)); 1555 rx_desc = dp_rx_cookie_2_va_rxdma_buf(soc, rx_buf_cookie); 1556 if (qdf_unlikely(!rx_desc && !rx_desc->nbuf && 1557 !rx_desc->in_use)) { 1558 dp_rx_err("Invalid RX descriptor"); 1559 qdf_assert_always(0); 1560 /* TODO handle this if its valid case */ 1561 } 1562 1563 if (qdf_unlikely(!dp_rx_desc_check_magic(rx_desc))) { 1564 dp_err("Invalid rx_desc cookie=%d", rx_buf_cookie); 1565 DP_STATS_INC(soc, rx.err.rx_desc_invalid_magic, 1); 1566 qdf_assert(0); 1567 } 1568 1569 nbuf = rx_desc->nbuf; 1570 QDF_NBUF_CB_RX_PKT_LEN(nbuf) = 1571 HTT_RX_DATA_MSDU_INFO_MSDU_LENGTH_GET(*(msg_word + 2)); 1572 qdf_nbuf_set_tid_val(nbuf, HTT_RX_DATA_MSDU_INFO_TID_INFO_GET(*(msg_word + 2))); 1573 QDF_NBUF_CB_RX_PEER_ID(nbuf) = peer_id; 1574 QDF_NBUF_CB_RX_VDEV_ID(nbuf) = vdev_id; 1575 QDF_NBUF_CB_RX_CTX_ID(nbuf) = rx_ctx_id; 1576 1577 dp_rx_nbuf_unmap(soc, rx_desc, rx_ctx_id); 1578 1579 dp_rx_add_to_free_desc_list(&head, &tail, rx_desc); 1580 1581 dp_rx_buffers_replenish_simple(soc, rx_desc->pool_id, 1582 &soc->rx_refill_buf_ring[mac_id], 1583 &soc->rx_desc_buf[rx_desc->pool_id], 1584 1, &head, &tail); 1585 1586 if (dp_rx_buffer_pool_refill(soc, nbuf, rx_desc->pool_id)) 1587 /* fragment queued back to the pool no frag to handle*/ 1588 return; 1589 1590 /* Process fragment-by-fragment */ 1591 status = dp_rx_defrag_store_fragment_rh(soc, nbuf); 1592 if (QDF_IS_STATUS_ERROR(status)) 1593 dp_rx_err("Unable to handle frag ret:%u", status); 1594 } 1595 1596 QDF_STATUS dp_rx_desc_pool_init_rh(struct dp_soc *soc, 1597 struct rx_desc_pool *rx_desc_pool, 1598 uint32_t pool_id) 1599 { 1600 return dp_rx_desc_pool_init_generic(soc, rx_desc_pool, pool_id); 1601 } 1602 1603 void dp_rx_desc_pool_deinit_rh(struct dp_soc *soc, 1604 struct rx_desc_pool *rx_desc_pool, 1605 uint32_t pool_id) 1606 { 1607 } 1608