1 /* 2 * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. 3 * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. 4 * 5 * Permission to use, copy, modify, and/or distribute this software for any 6 * purpose with or without fee is hereby granted, provided that the above 7 * copyright notice and this permission notice appear in all copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 16 */ 17 18 #include "hal_hw_headers.h" 19 #include "dp_types.h" 20 #include "dp_rx.h" 21 #include "dp_peer.h" 22 #include "hal_rx.h" 23 #include "hal_api.h" 24 #include "qdf_trace.h" 25 #include "qdf_nbuf.h" 26 #include "hal_api_mon.h" 27 #include "dp_htt.h" 28 #include "dp_mon.h" 29 #include "dp_rx_mon.h" 30 #include "wlan_cfg.h" 31 #include "dp_internal.h" 32 #include "dp_rx_buffer_pool.h" 33 #include <dp_mon_1.0.h> 34 #include <dp_rx_mon_1.0.h> 35 36 #ifdef WLAN_TX_PKT_CAPTURE_ENH 37 #include "dp_rx_mon_feature.h" 38 #endif 39 40 /* 41 * PPDU id is from 0 to 64k-1. PPDU id read from status ring and PPDU id 42 * read from destination ring shall track each other. If the distance of 43 * two ppdu id is less than 20000. It is assume no wrap around. Otherwise, 44 * It is assume wrap around. 45 */ 46 #define NOT_PPDU_ID_WRAP_AROUND 20000 47 /* 48 * The destination ring processing is stuck if the destrination is not 49 * moving while status ring moves 16 ppdu. the destination ring processing 50 * skips this destination ring ppdu as walkaround 51 */ 52 #define MON_DEST_RING_STUCK_MAX_CNT 16 53 54 #ifdef WLAN_TX_PKT_CAPTURE_ENH 55 void 56 dp_handle_tx_capture(struct dp_soc *soc, struct dp_pdev *pdev, 57 qdf_nbuf_t mon_mpdu) 58 { 59 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 60 struct hal_rx_ppdu_info *ppdu_info = &mon_pdev->ppdu_info; 61 62 if (mon_pdev->tx_capture_enabled 63 == CDP_TX_ENH_CAPTURE_DISABLED) 64 return; 65 66 if ((ppdu_info->sw_frame_group_id == 67 HAL_MPDU_SW_FRAME_GROUP_CTRL_NDPA) || 68 (ppdu_info->sw_frame_group_id == 69 HAL_MPDU_SW_FRAME_GROUP_CTRL_BAR)) 70 dp_handle_tx_capture_from_dest(soc, pdev, mon_mpdu); 71 } 72 73 #ifdef QCA_MONITOR_PKT_SUPPORT 74 static void 75 dp_tx_capture_get_user_id(struct dp_pdev *dp_pdev, void *rx_desc_tlv) 76 { 77 struct dp_mon_pdev *mon_pdev = dp_pdev->monitor_pdev; 78 79 if (mon_pdev->tx_capture_enabled 80 != CDP_TX_ENH_CAPTURE_DISABLED) 81 mon_pdev->ppdu_info.rx_info.user_id = 82 hal_rx_hw_desc_mpdu_user_id(dp_pdev->soc->hal_soc, 83 rx_desc_tlv); 84 } 85 #endif 86 #else 87 static void 88 dp_tx_capture_get_user_id(struct dp_pdev *dp_pdev, void *rx_desc_tlv) 89 { 90 } 91 #endif 92 93 #ifdef QCA_MONITOR_PKT_SUPPORT 94 /** 95 * dp_rx_mon_link_desc_return() - Return a MPDU link descriptor to HW 96 * (WBM), following error handling 97 * 98 * @dp_pdev: core txrx pdev context 99 * @buf_addr_info: void pointer to monitor link descriptor buf addr info 100 * @mac_id: mac_id for which the link desc is released. 101 * 102 * Return: QDF_STATUS 103 */ 104 QDF_STATUS 105 dp_rx_mon_link_desc_return(struct dp_pdev *dp_pdev, 106 hal_buff_addrinfo_t buf_addr_info, int mac_id) 107 { 108 hal_ring_handle_t hal_ring_hdl; 109 hal_soc_handle_t hal_soc; 110 QDF_STATUS status = QDF_STATUS_E_FAILURE; 111 void *src_srng_desc; 112 113 hal_soc = dp_pdev->soc->hal_soc; 114 115 hal_ring_hdl = dp_monitor_get_link_desc_ring(dp_pdev->soc, mac_id); 116 117 qdf_assert(hal_ring_hdl); 118 119 if (qdf_unlikely(hal_srng_access_start(hal_soc, hal_ring_hdl))) { 120 121 /* TODO */ 122 /* 123 * Need API to convert from hal_ring pointer to 124 * Ring Type / Ring Id combo 125 */ 126 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 127 "%s %d : \ 128 HAL RING Access For WBM Release SRNG Failed -- %pK", 129 __func__, __LINE__, hal_ring_hdl); 130 goto done; 131 } 132 133 src_srng_desc = hal_srng_src_get_next(hal_soc, hal_ring_hdl); 134 135 if (qdf_likely(src_srng_desc)) { 136 /* Return link descriptor through WBM ring (SW2WBM)*/ 137 hal_rx_mon_msdu_link_desc_set(hal_soc, 138 src_srng_desc, buf_addr_info); 139 status = QDF_STATUS_SUCCESS; 140 } else { 141 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 142 "%s %d -- Monitor Link Desc WBM Release Ring Full", 143 __func__, __LINE__); 144 } 145 done: 146 hal_srng_access_end(hal_soc, hal_ring_hdl); 147 return status; 148 } 149 150 /** 151 * dp_rx_mon_mpdu_pop() - Return a MPDU link descriptor to HW 152 * (WBM), following error handling 153 * 154 * @soc: core DP main context 155 * @mac_id: mac id which is one of 3 mac_ids 156 * @rxdma_dst_ring_desc: void pointer to monitor link descriptor buf addr info 157 * @head_msdu: head of msdu to be popped 158 * @tail_msdu: tail of msdu to be popped 159 * @npackets: number of packet to be popped 160 * @ppdu_id: ppdu id of processing ppdu 161 * @head: head of descs list to be freed 162 * @tail: tail of decs list to be freed 163 * 164 * Return: number of msdu in MPDU to be popped 165 */ 166 static inline uint32_t 167 dp_rx_mon_mpdu_pop(struct dp_soc *soc, uint32_t mac_id, 168 hal_rxdma_desc_t rxdma_dst_ring_desc, qdf_nbuf_t *head_msdu, 169 qdf_nbuf_t *tail_msdu, uint32_t *npackets, uint32_t *ppdu_id, 170 union dp_rx_desc_list_elem_t **head, 171 union dp_rx_desc_list_elem_t **tail) 172 { 173 struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id); 174 void *rx_desc_tlv, *first_rx_desc_tlv = NULL; 175 void *rx_msdu_link_desc; 176 qdf_nbuf_t msdu; 177 qdf_nbuf_t last; 178 struct hal_rx_msdu_list msdu_list; 179 uint16_t num_msdus; 180 uint32_t rx_buf_size, rx_pkt_offset; 181 struct hal_buf_info buf_info; 182 uint32_t rx_bufs_used = 0; 183 uint32_t msdu_ppdu_id, msdu_cnt; 184 uint8_t *data = NULL; 185 uint32_t i; 186 uint32_t total_frag_len = 0, frag_len = 0; 187 bool is_frag, is_first_msdu; 188 bool drop_mpdu = false, is_frag_non_raw = false; 189 uint8_t bm_action = HAL_BM_ACTION_PUT_IN_IDLE_LIST; 190 qdf_dma_addr_t buf_paddr = 0; 191 uint32_t rx_link_buf_info[HAL_RX_BUFFINFO_NUM_DWORDS]; 192 struct cdp_mon_status *rs; 193 struct dp_mon_pdev *mon_pdev; 194 195 if (qdf_unlikely(!dp_pdev)) { 196 dp_rx_mon_dest_debug("%pK: pdev is null for mac_id = %d", soc, mac_id); 197 return rx_bufs_used; 198 } 199 200 mon_pdev = dp_pdev->monitor_pdev; 201 msdu = 0; 202 203 last = NULL; 204 205 hal_rx_reo_ent_buf_paddr_get(soc->hal_soc, rxdma_dst_ring_desc, 206 &buf_info, &msdu_cnt); 207 208 rs = &mon_pdev->rx_mon_recv_status; 209 rs->cdp_rs_rxdma_err = false; 210 if ((hal_rx_reo_ent_rxdma_push_reason_get(rxdma_dst_ring_desc) == 211 HAL_RX_WBM_RXDMA_PSH_RSN_ERROR)) { 212 uint8_t rxdma_err = 213 hal_rx_reo_ent_rxdma_error_code_get( 214 rxdma_dst_ring_desc); 215 if (qdf_unlikely((rxdma_err == HAL_RXDMA_ERR_FLUSH_REQUEST) || 216 (rxdma_err == HAL_RXDMA_ERR_MPDU_LENGTH) || 217 (rxdma_err == HAL_RXDMA_ERR_OVERFLOW) || 218 (rxdma_err == HAL_RXDMA_ERR_FCS && mon_pdev->mcopy_mode) || 219 (rxdma_err == HAL_RXDMA_ERR_FCS && 220 mon_pdev->rx_pktlog_cbf))) { 221 drop_mpdu = true; 222 mon_pdev->rx_mon_stats.dest_mpdu_drop++; 223 } 224 rs->cdp_rs_rxdma_err = true; 225 } 226 227 is_frag = false; 228 is_first_msdu = true; 229 230 do { 231 if (!msdu_cnt) { 232 drop_mpdu = true; 233 DP_STATS_INC(dp_pdev, invalid_msdu_cnt, 1); 234 } 235 236 /* WAR for duplicate link descriptors received from HW */ 237 if (qdf_unlikely(mon_pdev->mon_last_linkdesc_paddr == 238 buf_info.paddr)) { 239 mon_pdev->rx_mon_stats.dup_mon_linkdesc_cnt++; 240 return rx_bufs_used; 241 } 242 243 rx_msdu_link_desc = 244 dp_rx_cookie_2_mon_link_desc(dp_pdev, 245 &buf_info, mac_id); 246 247 qdf_assert_always(rx_msdu_link_desc); 248 249 hal_rx_msdu_list_get(soc->hal_soc, rx_msdu_link_desc, 250 &msdu_list, &num_msdus); 251 252 for (i = 0; i < num_msdus; i++) { 253 uint16_t l2_hdr_offset; 254 struct dp_rx_desc *rx_desc = NULL; 255 struct rx_desc_pool *rx_desc_pool; 256 257 rx_desc = dp_rx_get_mon_desc(soc, 258 msdu_list.sw_cookie[i]); 259 260 qdf_assert_always(rx_desc); 261 262 msdu = DP_RX_MON_GET_NBUF_FROM_DESC(rx_desc); 263 buf_paddr = dp_rx_mon_get_paddr_from_desc(rx_desc); 264 265 /* WAR for duplicate buffers received from HW */ 266 if (qdf_unlikely(mon_pdev->mon_last_buf_cookie == 267 msdu_list.sw_cookie[i] || 268 DP_RX_MON_IS_BUFFER_ADDR_NULL(rx_desc) || 269 msdu_list.paddr[i] != buf_paddr || 270 !rx_desc->in_use)) { 271 /* Skip duplicate buffer and drop subsequent 272 * buffers in this MPDU 273 */ 274 drop_mpdu = true; 275 mon_pdev->rx_mon_stats.dup_mon_buf_cnt++; 276 mon_pdev->mon_last_linkdesc_paddr = 277 buf_info.paddr; 278 continue; 279 } 280 281 if (rx_desc->unmapped == 0) { 282 rx_desc_pool = dp_rx_get_mon_desc_pool(soc, 283 mac_id, 284 dp_pdev->pdev_id); 285 dp_rx_mon_buffer_unmap(soc, rx_desc, 286 rx_desc_pool->buf_size); 287 rx_desc->unmapped = 1; 288 } 289 290 if (dp_rx_buffer_pool_refill(soc, msdu, 291 rx_desc->pool_id)) { 292 drop_mpdu = true; 293 msdu = NULL; 294 mon_pdev->mon_last_linkdesc_paddr = 295 buf_info.paddr; 296 goto next_msdu; 297 } 298 299 if (drop_mpdu) { 300 mon_pdev->mon_last_linkdesc_paddr = 301 buf_info.paddr; 302 dp_rx_mon_buffer_free(rx_desc); 303 msdu = NULL; 304 goto next_msdu; 305 } 306 307 data = dp_rx_mon_get_buffer_data(rx_desc); 308 rx_desc_tlv = HAL_RX_MON_DEST_GET_DESC(data); 309 310 dp_rx_mon_dest_debug("%pK: i=%d, ppdu_id=%x, num_msdus = %u", 311 soc, i, *ppdu_id, num_msdus); 312 313 if (is_first_msdu) { 314 if (!hal_rx_mpdu_start_tlv_tag_valid( 315 soc->hal_soc, 316 rx_desc_tlv)) { 317 drop_mpdu = true; 318 dp_rx_mon_buffer_free(rx_desc); 319 msdu = NULL; 320 mon_pdev->mon_last_linkdesc_paddr = 321 buf_info.paddr; 322 goto next_msdu; 323 } 324 325 msdu_ppdu_id = hal_rx_hw_desc_get_ppduid_get( 326 soc->hal_soc, 327 rx_desc_tlv, 328 rxdma_dst_ring_desc); 329 is_first_msdu = false; 330 331 dp_rx_mon_dest_debug("%pK: msdu_ppdu_id=%x", 332 soc, msdu_ppdu_id); 333 334 if (*ppdu_id > msdu_ppdu_id) 335 dp_rx_mon_dest_debug("%pK: ppdu_id=%d " 336 "msdu_ppdu_id=%d", soc, 337 *ppdu_id, msdu_ppdu_id); 338 339 if ((*ppdu_id < msdu_ppdu_id) && ( 340 (msdu_ppdu_id - *ppdu_id) < 341 NOT_PPDU_ID_WRAP_AROUND)) { 342 *ppdu_id = msdu_ppdu_id; 343 return rx_bufs_used; 344 } else if ((*ppdu_id > msdu_ppdu_id) && ( 345 (*ppdu_id - msdu_ppdu_id) > 346 NOT_PPDU_ID_WRAP_AROUND)) { 347 *ppdu_id = msdu_ppdu_id; 348 return rx_bufs_used; 349 } 350 351 dp_tx_capture_get_user_id(dp_pdev, 352 rx_desc_tlv); 353 354 if (*ppdu_id == msdu_ppdu_id) 355 mon_pdev->rx_mon_stats.ppdu_id_match++; 356 else 357 mon_pdev->rx_mon_stats.ppdu_id_mismatch 358 ++; 359 360 mon_pdev->mon_last_linkdesc_paddr = 361 buf_info.paddr; 362 363 if (dp_rx_mon_alloc_parent_buffer(head_msdu) 364 != QDF_STATUS_SUCCESS) { 365 DP_STATS_INC(dp_pdev, 366 replenish.nbuf_alloc_fail, 367 1); 368 qdf_frag_free(rx_desc_tlv); 369 dp_rx_mon_dest_debug("failed to allocate parent buffer to hold all frag"); 370 drop_mpdu = true; 371 goto next_msdu; 372 } 373 } 374 375 if (hal_rx_desc_is_first_msdu(soc->hal_soc, 376 rx_desc_tlv)) 377 hal_rx_mon_hw_desc_get_mpdu_status(soc->hal_soc, 378 rx_desc_tlv, 379 &mon_pdev->ppdu_info.rx_status); 380 381 dp_rx_mon_parse_desc_buffer(soc, 382 &(msdu_list.msdu_info[i]), 383 &is_frag, 384 &total_frag_len, 385 &frag_len, 386 &l2_hdr_offset, 387 rx_desc_tlv, 388 &first_rx_desc_tlv, 389 &is_frag_non_raw, data); 390 if (!is_frag && msdu_cnt) 391 msdu_cnt--; 392 393 dp_rx_mon_dest_debug("total_len %u frag_len %u flags %u", 394 total_frag_len, frag_len, 395 msdu_list.msdu_info[i].msdu_flags); 396 397 rx_pkt_offset = dp_rx_mon_get_rx_pkt_tlv_size(soc); 398 399 rx_buf_size = rx_pkt_offset + l2_hdr_offset 400 + frag_len; 401 402 dp_rx_mon_buffer_set_pktlen(msdu, rx_buf_size); 403 #if 0 404 /* Disable it.see packet on msdu done set to 0 */ 405 /* 406 * Check if DMA completed -- msdu_done is the 407 * last bit to be written 408 */ 409 if (!hal_rx_attn_msdu_done_get(rx_desc_tlv)) { 410 411 QDF_TRACE(QDF_MODULE_ID_DP, 412 QDF_TRACE_LEVEL_ERROR, 413 "%s:%d: Pkt Desc", 414 __func__, __LINE__); 415 416 QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_DP, 417 QDF_TRACE_LEVEL_ERROR, 418 rx_desc_tlv, 128); 419 420 qdf_assert_always(0); 421 } 422 #endif 423 dp_rx_mon_dest_debug("%pK: rx_pkt_offset=%d, l2_hdr_offset=%d, msdu_len=%d, frag_len %u", 424 soc, rx_pkt_offset, l2_hdr_offset, 425 msdu_list.msdu_info[i].msdu_len, 426 frag_len); 427 428 if (dp_rx_mon_add_msdu_to_list(soc, head_msdu, msdu, 429 &last, rx_desc_tlv, 430 frag_len, l2_hdr_offset) 431 != QDF_STATUS_SUCCESS) { 432 dp_rx_mon_add_msdu_to_list_failure_handler(rx_desc_tlv, 433 dp_pdev, &last, head_msdu, 434 tail_msdu, __func__); 435 drop_mpdu = true; 436 goto next_msdu; 437 } 438 439 next_msdu: 440 mon_pdev->mon_last_buf_cookie = msdu_list.sw_cookie[i]; 441 rx_bufs_used++; 442 dp_rx_add_to_free_desc_list(head, 443 tail, rx_desc); 444 } 445 446 /* 447 * Store the current link buffer into to the local 448 * structure to be used for release purpose. 449 */ 450 hal_rxdma_buff_addr_info_set(soc->hal_soc, rx_link_buf_info, 451 buf_info.paddr, 452 buf_info.sw_cookie, buf_info.rbm); 453 454 hal_rx_mon_next_link_desc_get(soc->hal_soc, rx_msdu_link_desc, 455 &buf_info); 456 if (dp_rx_monitor_link_desc_return(dp_pdev, 457 (hal_buff_addrinfo_t) 458 rx_link_buf_info, 459 mac_id, 460 bm_action) 461 != QDF_STATUS_SUCCESS) 462 dp_err_rl("monitor link desc return failed"); 463 } while (buf_info.paddr); 464 465 dp_rx_mon_init_tail_msdu(head_msdu, msdu, last, tail_msdu); 466 dp_rx_mon_remove_raw_frame_fcs_len(soc, head_msdu, tail_msdu); 467 468 return rx_bufs_used; 469 } 470 471 #if !defined(DISABLE_MON_CONFIG) && \ 472 (defined(MON_ENABLE_DROP_FOR_NON_MON_PMAC) || \ 473 defined(MON_ENABLE_DROP_FOR_MAC)) 474 /** 475 * dp_rx_mon_drop_one_mpdu() - Drop one mpdu from one rxdma monitor destination 476 * ring. 477 * @pdev: DP pdev handle 478 * @mac_id: MAC id which is being currently processed 479 * @rxdma_dst_ring_desc: RXDMA monitor destination ring entry 480 * @head: HEAD if the rx_desc list to be freed 481 * @tail: TAIL of the rx_desc list to be freed 482 * 483 * Return: Number of msdus which are dropped. 484 */ 485 static int dp_rx_mon_drop_one_mpdu(struct dp_pdev *pdev, 486 uint32_t mac_id, 487 hal_rxdma_desc_t rxdma_dst_ring_desc, 488 union dp_rx_desc_list_elem_t **head, 489 union dp_rx_desc_list_elem_t **tail) 490 { 491 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 492 struct dp_soc *soc = pdev->soc; 493 hal_soc_handle_t hal_soc = soc->hal_soc; 494 struct hal_buf_info buf_info; 495 uint32_t msdu_count = 0; 496 uint32_t rx_bufs_used = 0; 497 void *rx_msdu_link_desc; 498 struct hal_rx_msdu_list msdu_list; 499 uint16_t num_msdus; 500 qdf_nbuf_t nbuf; 501 uint32_t i; 502 uint8_t bm_action = HAL_BM_ACTION_PUT_IN_IDLE_LIST; 503 uint32_t rx_link_buf_info[HAL_RX_BUFFINFO_NUM_DWORDS]; 504 struct rx_desc_pool *rx_desc_pool; 505 506 rx_desc_pool = dp_rx_get_mon_desc_pool(soc, mac_id, pdev->pdev_id); 507 hal_rx_reo_ent_buf_paddr_get(hal_soc, rxdma_dst_ring_desc, 508 &buf_info, &msdu_count); 509 510 do { 511 rx_msdu_link_desc = dp_rx_cookie_2_mon_link_desc(pdev, 512 &buf_info, 513 mac_id); 514 if (qdf_unlikely(!rx_msdu_link_desc)) { 515 mon_pdev->rx_mon_stats.mon_link_desc_invalid++; 516 return rx_bufs_used; 517 } 518 519 hal_rx_msdu_list_get(soc->hal_soc, rx_msdu_link_desc, 520 &msdu_list, &num_msdus); 521 522 for (i = 0; i < num_msdus; i++) { 523 struct dp_rx_desc *rx_desc; 524 qdf_dma_addr_t buf_paddr; 525 526 rx_desc = dp_rx_get_mon_desc(soc, 527 msdu_list.sw_cookie[i]); 528 529 if (qdf_unlikely(!rx_desc)) { 530 mon_pdev->rx_mon_stats. 531 mon_rx_desc_invalid++; 532 continue; 533 } 534 535 nbuf = DP_RX_MON_GET_NBUF_FROM_DESC(rx_desc); 536 buf_paddr = 537 dp_rx_mon_get_paddr_from_desc(rx_desc); 538 539 if (qdf_unlikely(!rx_desc->in_use || !nbuf || 540 msdu_list.paddr[i] != 541 buf_paddr)) { 542 mon_pdev->rx_mon_stats. 543 mon_nbuf_sanity_err++; 544 continue; 545 } 546 rx_bufs_used++; 547 548 if (!rx_desc->unmapped) { 549 dp_rx_mon_buffer_unmap(soc, rx_desc, 550 rx_desc_pool->buf_size); 551 rx_desc->unmapped = 1; 552 } 553 554 qdf_nbuf_free(nbuf); 555 dp_rx_add_to_free_desc_list(head, tail, rx_desc); 556 557 if (!(msdu_list.msdu_info[i].msdu_flags & 558 HAL_MSDU_F_MSDU_CONTINUATION)) 559 msdu_count--; 560 } 561 562 /* 563 * Store the current link buffer into to the local 564 * structure to be used for release purpose. 565 */ 566 hal_rxdma_buff_addr_info_set(soc->hal_soc, 567 rx_link_buf_info, 568 buf_info.paddr, 569 buf_info.sw_cookie, 570 buf_info.rbm); 571 572 hal_rx_mon_next_link_desc_get(soc->hal_soc, 573 rx_msdu_link_desc, 574 &buf_info); 575 if (dp_rx_monitor_link_desc_return(pdev, 576 (hal_buff_addrinfo_t) 577 rx_link_buf_info, 578 mac_id, bm_action) != 579 QDF_STATUS_SUCCESS) 580 dp_info_rl("monitor link desc return failed"); 581 } while (buf_info.paddr && msdu_count); 582 583 return rx_bufs_used; 584 } 585 #endif 586 587 #if !defined(DISABLE_MON_CONFIG) && defined(MON_ENABLE_DROP_FOR_NON_MON_PMAC) 588 /** 589 * dp_rx_mon_check_n_drop_mpdu() - Check if the current MPDU is not from the 590 * PMAC which is being currently processed, and 591 * if yes, drop the MPDU. 592 * @pdev: DP pdev handle 593 * @mac_id: MAC id which is being currently processed 594 * @rxdma_dst_ring_desc: RXDMA monitor destination ring entry 595 * @head: HEAD if the rx_desc list to be freed 596 * @tail: TAIL of the rx_desc list to be freed 597 * @rx_bufs_dropped: Number of msdus dropped 598 * 599 * Return: QDF_STATUS_SUCCESS, if the mpdu was to be dropped 600 * QDF_STATUS_E_INVAL/QDF_STATUS_E_FAILURE, if the mdpu was not dropped 601 */ 602 static QDF_STATUS 603 dp_rx_mon_check_n_drop_mpdu(struct dp_pdev *pdev, uint32_t mac_id, 604 hal_rxdma_desc_t rxdma_dst_ring_desc, 605 union dp_rx_desc_list_elem_t **head, 606 union dp_rx_desc_list_elem_t **tail, 607 uint32_t *rx_bufs_dropped) 608 { 609 struct dp_soc *soc = pdev->soc; 610 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 611 uint32_t lmac_id = DP_MON_INVALID_LMAC_ID; 612 uint8_t src_link_id; 613 QDF_STATUS status; 614 615 if (mon_pdev->mon_chan_band == REG_BAND_UNKNOWN) 616 goto drop_mpdu; 617 618 lmac_id = pdev->ch_band_lmac_id_mapping[mon_pdev->mon_chan_band]; 619 620 status = hal_rx_reo_ent_get_src_link_id(soc->hal_soc, 621 rxdma_dst_ring_desc, 622 &src_link_id); 623 if (QDF_IS_STATUS_ERROR(status)) 624 return QDF_STATUS_E_INVAL; 625 626 if (src_link_id == lmac_id) 627 return QDF_STATUS_E_INVAL; 628 629 drop_mpdu: 630 *rx_bufs_dropped = dp_rx_mon_drop_one_mpdu(pdev, mac_id, 631 rxdma_dst_ring_desc, 632 head, tail); 633 634 return QDF_STATUS_SUCCESS; 635 } 636 #else 637 static inline QDF_STATUS 638 dp_rx_mon_check_n_drop_mpdu(struct dp_pdev *pdev, uint32_t mac_id, 639 hal_rxdma_desc_t rxdma_dst_ring_desc, 640 union dp_rx_desc_list_elem_t **head, 641 union dp_rx_desc_list_elem_t **tail, 642 uint32_t *rx_bufs_dropped) 643 { 644 return QDF_STATUS_E_FAILURE; 645 } 646 #endif 647 648 void dp_rx_mon_dest_process(struct dp_soc *soc, struct dp_intr *int_ctx, 649 uint32_t mac_id, uint32_t quota) 650 { 651 struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id); 652 uint8_t pdev_id; 653 hal_rxdma_desc_t rxdma_dst_ring_desc; 654 hal_soc_handle_t hal_soc; 655 void *mon_dst_srng; 656 union dp_rx_desc_list_elem_t *head = NULL; 657 union dp_rx_desc_list_elem_t *tail = NULL; 658 uint32_t ppdu_id; 659 uint32_t rx_bufs_used; 660 uint32_t mpdu_rx_bufs_used; 661 int mac_for_pdev = mac_id; 662 struct cdp_pdev_mon_stats *rx_mon_stats; 663 struct dp_mon_pdev *mon_pdev; 664 665 if (!pdev) { 666 dp_rx_mon_dest_debug("%pK: pdev is null for mac_id = %d", soc, mac_id); 667 return; 668 } 669 670 mon_pdev = pdev->monitor_pdev; 671 mon_dst_srng = dp_rxdma_get_mon_dst_ring(pdev, mac_for_pdev); 672 673 if (!mon_dst_srng || !hal_srng_initialized(mon_dst_srng)) { 674 dp_rx_mon_dest_err("%pK: : HAL Monitor Destination Ring Init Failed -- %pK", 675 soc, mon_dst_srng); 676 return; 677 } 678 679 hal_soc = soc->hal_soc; 680 681 qdf_assert((hal_soc && pdev)); 682 683 qdf_spin_lock_bh(&mon_pdev->mon_lock); 684 685 if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, mon_dst_srng))) { 686 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 687 "%s %d : HAL Mon Dest Ring access Failed -- %pK", 688 __func__, __LINE__, mon_dst_srng); 689 qdf_spin_unlock_bh(&mon_pdev->mon_lock); 690 return; 691 } 692 693 pdev_id = pdev->pdev_id; 694 ppdu_id = mon_pdev->ppdu_info.com_info.ppdu_id; 695 rx_bufs_used = 0; 696 rx_mon_stats = &mon_pdev->rx_mon_stats; 697 698 while (qdf_likely(rxdma_dst_ring_desc = 699 hal_srng_dst_peek(hal_soc, mon_dst_srng))) { 700 qdf_nbuf_t head_msdu, tail_msdu; 701 uint32_t npackets; 702 uint32_t rx_bufs_dropped; 703 704 rx_bufs_dropped = 0; 705 head_msdu = (qdf_nbuf_t)NULL; 706 tail_msdu = (qdf_nbuf_t)NULL; 707 708 if (QDF_STATUS_SUCCESS == 709 dp_rx_mon_check_n_drop_mpdu(pdev, mac_id, 710 rxdma_dst_ring_desc, 711 &head, &tail, 712 &rx_bufs_dropped)) { 713 /* Increment stats */ 714 rx_bufs_used += rx_bufs_dropped; 715 hal_srng_dst_get_next(hal_soc, mon_dst_srng); 716 continue; 717 } 718 719 mpdu_rx_bufs_used = 720 dp_rx_mon_mpdu_pop(soc, mac_id, 721 rxdma_dst_ring_desc, 722 &head_msdu, &tail_msdu, 723 &npackets, &ppdu_id, 724 &head, &tail); 725 726 rx_bufs_used += mpdu_rx_bufs_used; 727 728 if (mpdu_rx_bufs_used) 729 mon_pdev->mon_dest_ring_stuck_cnt = 0; 730 else 731 mon_pdev->mon_dest_ring_stuck_cnt++; 732 733 if (mon_pdev->mon_dest_ring_stuck_cnt > 734 MON_DEST_RING_STUCK_MAX_CNT) { 735 dp_info("destination ring stuck"); 736 dp_info("ppdu_id status=%d dest=%d", 737 mon_pdev->ppdu_info.com_info.ppdu_id, ppdu_id); 738 rx_mon_stats->mon_rx_dest_stuck++; 739 mon_pdev->ppdu_info.com_info.ppdu_id = ppdu_id; 740 continue; 741 } 742 743 if (ppdu_id != mon_pdev->ppdu_info.com_info.ppdu_id) { 744 rx_mon_stats->stat_ring_ppdu_id_hist[ 745 rx_mon_stats->ppdu_id_hist_idx] = 746 mon_pdev->ppdu_info.com_info.ppdu_id; 747 rx_mon_stats->dest_ring_ppdu_id_hist[ 748 rx_mon_stats->ppdu_id_hist_idx] = ppdu_id; 749 rx_mon_stats->ppdu_id_hist_idx = 750 (rx_mon_stats->ppdu_id_hist_idx + 1) & 751 (MAX_PPDU_ID_HIST - 1); 752 dp_rx_mon_dest_debug("%pK: ppdu_id %x != ppdu_info.com_info.ppdu_id %x", 753 soc, ppdu_id, 754 mon_pdev->ppdu_info.com_info.ppdu_id); 755 break; 756 } 757 758 if (qdf_likely((head_msdu) && (tail_msdu))) { 759 rx_mon_stats->dest_mpdu_done++; 760 dp_rx_mon_deliver(soc, mac_id, head_msdu, tail_msdu); 761 } 762 763 rxdma_dst_ring_desc = 764 hal_srng_dst_get_next(hal_soc, 765 mon_dst_srng); 766 } 767 768 dp_srng_access_end(int_ctx, soc, mon_dst_srng); 769 770 qdf_spin_unlock_bh(&mon_pdev->mon_lock); 771 772 if (rx_bufs_used) { 773 rx_mon_stats->dest_ppdu_done++; 774 dp_rx_buffers_replenish(soc, mac_id, 775 dp_rxdma_get_mon_buf_ring(pdev, 776 mac_for_pdev), 777 dp_rx_get_mon_desc_pool(soc, mac_id, 778 pdev_id), 779 rx_bufs_used, &head, &tail, false); 780 } 781 } 782 783 QDF_STATUS 784 dp_rx_pdev_mon_buf_buffers_alloc(struct dp_pdev *pdev, uint32_t mac_id, 785 bool delayed_replenish) 786 { 787 uint8_t pdev_id = pdev->pdev_id; 788 struct dp_soc *soc = pdev->soc; 789 struct dp_srng *mon_buf_ring; 790 uint32_t num_entries; 791 struct rx_desc_pool *rx_desc_pool; 792 QDF_STATUS status = QDF_STATUS_SUCCESS; 793 struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx = soc->wlan_cfg_ctx; 794 795 mon_buf_ring = dp_rxdma_get_mon_buf_ring(pdev, mac_id); 796 797 num_entries = mon_buf_ring->num_entries; 798 799 rx_desc_pool = dp_rx_get_mon_desc_pool(soc, mac_id, pdev_id); 800 801 dp_debug("Mon RX Desc Pool[%d] entries=%u", pdev_id, num_entries); 802 803 /* Replenish RXDMA monitor buffer ring with 8 buffers only 804 * delayed_replenish_entries is actually 8 but when we call 805 * dp_pdev_rx_buffers_attach() we pass 1 less than 8, hence 806 * added 1 to delayed_replenish_entries to ensure we have 8 807 * entries. Once the monitor VAP is configured we replenish 808 * the complete RXDMA monitor buffer ring. 809 */ 810 if (delayed_replenish) { 811 num_entries = soc_cfg_ctx->delayed_replenish_entries + 1; 812 status = dp_pdev_rx_buffers_attach(soc, mac_id, mon_buf_ring, 813 rx_desc_pool, 814 num_entries - 1); 815 } else { 816 union dp_rx_desc_list_elem_t *tail = NULL; 817 union dp_rx_desc_list_elem_t *desc_list = NULL; 818 819 status = dp_rx_buffers_replenish(soc, mac_id, 820 mon_buf_ring, 821 rx_desc_pool, 822 num_entries, 823 &desc_list, 824 &tail, false); 825 } 826 827 return status; 828 } 829 830 void 831 dp_rx_pdev_mon_buf_desc_pool_init(struct dp_pdev *pdev, uint32_t mac_id) 832 { 833 uint8_t pdev_id = pdev->pdev_id; 834 struct dp_soc *soc = pdev->soc; 835 struct dp_srng *mon_buf_ring; 836 uint32_t num_entries; 837 struct rx_desc_pool *rx_desc_pool; 838 uint32_t rx_desc_pool_size; 839 struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx = soc->wlan_cfg_ctx; 840 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 841 842 mon_buf_ring = &soc->rxdma_mon_buf_ring[mac_id]; 843 844 num_entries = mon_buf_ring->num_entries; 845 846 rx_desc_pool = &soc->rx_desc_mon[mac_id]; 847 848 /* If descriptor pool is already initialized, do not initialize it */ 849 if (rx_desc_pool->freelist) 850 return; 851 852 dp_debug("Mon RX Desc buf Pool[%d] init entries=%u", 853 pdev_id, num_entries); 854 855 rx_desc_pool_size = wlan_cfg_get_dp_soc_rx_sw_desc_weight(soc_cfg_ctx) * 856 num_entries; 857 858 rx_desc_pool->owner = HAL_RX_BUF_RBM_SW3_BM(soc->wbm_sw0_bm_id); 859 rx_desc_pool->buf_size = RX_MONITOR_BUFFER_SIZE; 860 rx_desc_pool->buf_alignment = RX_MONITOR_BUFFER_ALIGNMENT; 861 /* Enable frag processing if feature is enabled */ 862 dp_rx_enable_mon_dest_frag(rx_desc_pool, true); 863 864 dp_rx_desc_pool_init(soc, mac_id, rx_desc_pool_size, rx_desc_pool); 865 866 mon_pdev->mon_last_linkdesc_paddr = 0; 867 868 mon_pdev->mon_last_buf_cookie = DP_RX_DESC_COOKIE_MAX + 1; 869 870 /* Attach full monitor mode resources */ 871 dp_full_mon_attach(pdev); 872 } 873 874 static void 875 dp_rx_pdev_mon_buf_desc_pool_deinit(struct dp_pdev *pdev, uint32_t mac_id) 876 { 877 uint8_t pdev_id = pdev->pdev_id; 878 struct dp_soc *soc = pdev->soc; 879 struct rx_desc_pool *rx_desc_pool; 880 881 rx_desc_pool = &soc->rx_desc_mon[mac_id]; 882 883 dp_debug("Mon RX Desc buf Pool[%d] deinit", pdev_id); 884 885 dp_rx_desc_pool_deinit(soc, rx_desc_pool, mac_id); 886 887 /* Detach full monitor mode resources */ 888 dp_full_mon_detach(pdev); 889 } 890 891 static void 892 dp_rx_pdev_mon_buf_desc_pool_free(struct dp_pdev *pdev, uint32_t mac_id) 893 { 894 uint8_t pdev_id = pdev->pdev_id; 895 struct dp_soc *soc = pdev->soc; 896 struct rx_desc_pool *rx_desc_pool; 897 898 rx_desc_pool = &soc->rx_desc_mon[mac_id]; 899 900 dp_debug("Mon RX Buf Desc Pool Free pdev[%d]", pdev_id); 901 902 dp_rx_desc_pool_free(soc, rx_desc_pool); 903 } 904 905 void dp_rx_pdev_mon_buf_buffers_free(struct dp_pdev *pdev, uint32_t mac_id) 906 { 907 uint8_t pdev_id = pdev->pdev_id; 908 struct dp_soc *soc = pdev->soc; 909 struct rx_desc_pool *rx_desc_pool; 910 911 rx_desc_pool = &soc->rx_desc_mon[mac_id]; 912 913 dp_debug("Mon RX Buf buffers Free pdev[%d]", pdev_id); 914 915 if (rx_desc_pool->rx_mon_dest_frag_enable) 916 dp_rx_desc_frag_free(soc, rx_desc_pool); 917 else 918 dp_rx_desc_nbuf_free(soc, rx_desc_pool, true); 919 } 920 921 QDF_STATUS 922 dp_rx_pdev_mon_buf_desc_pool_alloc(struct dp_pdev *pdev, uint32_t mac_id) 923 { 924 uint8_t pdev_id = pdev->pdev_id; 925 struct dp_soc *soc = pdev->soc; 926 struct dp_srng *mon_buf_ring; 927 uint32_t num_entries; 928 struct rx_desc_pool *rx_desc_pool; 929 uint32_t rx_desc_pool_size; 930 struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx = soc->wlan_cfg_ctx; 931 932 mon_buf_ring = &soc->rxdma_mon_buf_ring[mac_id]; 933 934 num_entries = mon_buf_ring->num_entries; 935 936 rx_desc_pool = &soc->rx_desc_mon[mac_id]; 937 938 dp_debug("Mon RX Desc Pool[%d] entries=%u", 939 pdev_id, num_entries); 940 941 rx_desc_pool_size = wlan_cfg_get_dp_soc_rx_sw_desc_weight(soc_cfg_ctx) * 942 num_entries; 943 944 if (dp_rx_desc_pool_is_allocated(rx_desc_pool) == QDF_STATUS_SUCCESS) 945 return QDF_STATUS_SUCCESS; 946 947 return dp_rx_desc_pool_alloc(soc, rx_desc_pool_size, rx_desc_pool); 948 } 949 950 #if !defined(DISABLE_MON_CONFIG) && defined(MON_ENABLE_DROP_FOR_MAC) 951 uint32_t 952 dp_mon_dest_srng_drop_for_mac(struct dp_pdev *pdev, uint32_t mac_id, 953 bool force_flush) 954 { 955 struct dp_soc *soc = pdev->soc; 956 hal_rxdma_desc_t rxdma_dst_ring_desc; 957 hal_soc_handle_t hal_soc; 958 void *mon_dst_srng; 959 union dp_rx_desc_list_elem_t *head = NULL; 960 union dp_rx_desc_list_elem_t *tail = NULL; 961 uint32_t rx_bufs_used = 0; 962 struct rx_desc_pool *rx_desc_pool; 963 uint32_t reap_cnt = 0; 964 uint32_t rx_bufs_dropped; 965 struct dp_mon_pdev *mon_pdev; 966 bool is_rxdma_dst_ring_common; 967 968 if (qdf_unlikely(!soc || !soc->hal_soc)) 969 return reap_cnt; 970 971 mon_dst_srng = dp_rxdma_get_mon_dst_ring(pdev, mac_id); 972 973 if (qdf_unlikely(!mon_dst_srng || !hal_srng_initialized(mon_dst_srng))) 974 return reap_cnt; 975 976 hal_soc = soc->hal_soc; 977 mon_pdev = pdev->monitor_pdev; 978 979 qdf_spin_lock_bh(&mon_pdev->mon_lock); 980 981 if (qdf_unlikely(hal_srng_access_start(hal_soc, mon_dst_srng))) { 982 qdf_spin_unlock_bh(&mon_pdev->mon_lock); 983 return reap_cnt; 984 } 985 986 rx_desc_pool = dp_rx_get_mon_desc_pool(soc, mac_id, pdev->pdev_id); 987 is_rxdma_dst_ring_common = dp_is_rxdma_dst_ring_common(pdev); 988 989 while ((rxdma_dst_ring_desc = 990 hal_srng_dst_peek(hal_soc, mon_dst_srng)) && 991 (reap_cnt < MON_DROP_REAP_LIMIT || force_flush)) { 992 if (is_rxdma_dst_ring_common && !force_flush) { 993 if (QDF_STATUS_SUCCESS == 994 dp_rx_mon_check_n_drop_mpdu(pdev, mac_id, 995 rxdma_dst_ring_desc, 996 &head, &tail, 997 &rx_bufs_dropped)) { 998 /* Increment stats */ 999 rx_bufs_used += rx_bufs_dropped; 1000 } else { 1001 /* 1002 * If the mpdu was not dropped, we need to 1003 * wait for the entry to be processed, along 1004 * with the status ring entry for the other 1005 * mac. Hence we bail out here. 1006 */ 1007 break; 1008 } 1009 } else { 1010 rx_bufs_used += dp_rx_mon_drop_one_mpdu(pdev, mac_id, 1011 rxdma_dst_ring_desc, 1012 &head, &tail); 1013 } 1014 reap_cnt++; 1015 rxdma_dst_ring_desc = hal_srng_dst_get_next(hal_soc, 1016 mon_dst_srng); 1017 } 1018 1019 hal_srng_access_end(hal_soc, mon_dst_srng); 1020 1021 qdf_spin_unlock_bh(&mon_pdev->mon_lock); 1022 1023 if (rx_bufs_used) { 1024 dp_rx_buffers_replenish(soc, mac_id, 1025 dp_rxdma_get_mon_buf_ring(pdev, mac_id), 1026 rx_desc_pool, 1027 rx_bufs_used, &head, &tail, false); 1028 } 1029 1030 return reap_cnt; 1031 } 1032 #else 1033 #if defined(QCA_SUPPORT_FULL_MON) && defined(WIFI_MONITOR_SUPPORT) 1034 uint32_t 1035 dp_mon_dest_srng_drop_for_mac(struct dp_pdev *pdev, uint32_t mac_id) 1036 { 1037 struct dp_soc *soc = pdev->soc; 1038 hal_rxdma_desc_t rxdma_dst_ring_desc; 1039 hal_soc_handle_t hal_soc; 1040 void *mon_dst_srng; 1041 union dp_rx_desc_list_elem_t *head = NULL; 1042 union dp_rx_desc_list_elem_t *tail = NULL; 1043 uint32_t rx_bufs_used = 0; 1044 void *rx_msdu_link_desc; 1045 uint16_t num_msdus; 1046 struct hal_rx_msdu_list msdu_list; 1047 qdf_nbuf_t nbuf = NULL; 1048 uint32_t i; 1049 uint8_t bm_action = HAL_BM_ACTION_PUT_IN_IDLE_LIST; 1050 uint32_t rx_link_buf_info[HAL_RX_BUFFINFO_NUM_DWORDS]; 1051 struct rx_desc_pool *rx_desc_pool = NULL; 1052 uint32_t reap_cnt = 0; 1053 struct dp_mon_pdev *mon_pdev; 1054 struct hal_rx_mon_desc_info *desc_info; 1055 1056 if (qdf_unlikely(!soc || !soc->hal_soc)) 1057 return reap_cnt; 1058 1059 mon_dst_srng = dp_rxdma_get_mon_dst_ring(pdev, mac_id); 1060 1061 if (qdf_unlikely(!mon_dst_srng || !hal_srng_initialized(mon_dst_srng))) 1062 return reap_cnt; 1063 1064 hal_soc = soc->hal_soc; 1065 mon_pdev = pdev->monitor_pdev; 1066 desc_info = mon_pdev->mon_desc; 1067 1068 rx_desc_pool = dp_rx_get_mon_desc_pool(soc, mac_id, pdev->pdev_id); 1069 1070 while ((rxdma_dst_ring_desc = 1071 hal_srng_dst_peek(hal_soc, mon_dst_srng))) { 1072 qdf_mem_zero(desc_info, sizeof(struct hal_rx_mon_desc_info)); 1073 hal_rx_sw_mon_desc_info_get((struct hal_soc *)soc->hal_soc, 1074 (void *)rxdma_dst_ring_desc, 1075 (void *)desc_info); 1076 1077 if (desc_info->end_of_ppdu) { 1078 rxdma_dst_ring_desc = 1079 hal_srng_dst_get_next(hal_soc, mon_dst_srng); 1080 continue; 1081 } 1082 1083 do { 1084 rx_msdu_link_desc = 1085 dp_rx_cookie_2_mon_link_desc(pdev, 1086 &desc_info-> 1087 link_desc, 1088 mac_id); 1089 1090 if (qdf_unlikely(!rx_msdu_link_desc)) { 1091 mon_pdev->rx_mon_stats.mon_link_desc_invalid++; 1092 goto next_entry; 1093 } 1094 1095 hal_rx_msdu_list_get(soc->hal_soc, rx_msdu_link_desc, 1096 &msdu_list, &num_msdus); 1097 1098 for (i = 0; i < num_msdus; i++) { 1099 struct dp_rx_desc *rx_desc; 1100 qdf_dma_addr_t buf_paddr; 1101 1102 rx_desc = 1103 dp_rx_get_mon_desc(soc, msdu_list. 1104 sw_cookie[i]); 1105 1106 if (qdf_unlikely(!rx_desc)) { 1107 mon_pdev->rx_mon_stats. 1108 mon_rx_desc_invalid++; 1109 continue; 1110 } 1111 1112 nbuf = DP_RX_MON_GET_NBUF_FROM_DESC(rx_desc); 1113 buf_paddr = 1114 dp_rx_mon_get_paddr_from_desc(rx_desc); 1115 1116 if (qdf_unlikely(!rx_desc->in_use || !nbuf || 1117 msdu_list.paddr[i] != 1118 buf_paddr)) { 1119 mon_pdev->rx_mon_stats. 1120 mon_nbuf_sanity_err++; 1121 continue; 1122 } 1123 rx_bufs_used++; 1124 1125 if (!rx_desc->unmapped) { 1126 dp_rx_mon_buffer_unmap(soc, rx_desc, 1127 rx_desc_pool-> 1128 buf_size); 1129 rx_desc->unmapped = 1; 1130 } 1131 1132 dp_rx_mon_buffer_free(rx_desc); 1133 dp_rx_add_to_free_desc_list(&head, &tail, 1134 rx_desc); 1135 1136 if (!(msdu_list.msdu_info[i].msdu_flags & 1137 HAL_MSDU_F_MSDU_CONTINUATION)) 1138 desc_info->msdu_count--; 1139 } 1140 1141 /* 1142 * Store the current link buffer into to the local 1143 * structure to be used for release purpose. 1144 */ 1145 hal_rxdma_buff_addr_info_set(soc->hal_soc, 1146 rx_link_buf_info, 1147 desc_info->link_desc.paddr, 1148 desc_info->link_desc. 1149 sw_cookie, 1150 desc_info->link_desc.rbm); 1151 1152 hal_rx_mon_next_link_desc_get(soc->hal_soc, 1153 rx_msdu_link_desc, 1154 &desc_info->link_desc); 1155 if (dp_rx_monitor_link_desc_return(pdev, 1156 (hal_buff_addrinfo_t) 1157 rx_link_buf_info, 1158 mac_id, bm_action) != 1159 QDF_STATUS_SUCCESS) 1160 dp_info_rl("monitor link desc return failed"); 1161 } while (desc_info->link_desc.paddr); 1162 1163 next_entry: 1164 reap_cnt++; 1165 rxdma_dst_ring_desc = hal_srng_dst_get_next(hal_soc, 1166 mon_dst_srng); 1167 } 1168 1169 if (rx_bufs_used) { 1170 dp_rx_buffers_replenish(soc, mac_id, 1171 dp_rxdma_get_mon_buf_ring(pdev, mac_id), 1172 rx_desc_pool, 1173 rx_bufs_used, &head, &tail, false); 1174 } 1175 1176 return reap_cnt; 1177 } 1178 #else 1179 uint32_t 1180 dp_mon_dest_srng_drop_for_mac(struct dp_pdev *pdev, uint32_t mac_id) 1181 { 1182 return 0; 1183 } 1184 #endif 1185 #endif 1186 1187 static void 1188 dp_rx_pdev_mon_dest_desc_pool_free(struct dp_pdev *pdev, int mac_for_pdev) 1189 { 1190 struct dp_soc *soc = pdev->soc; 1191 1192 dp_rx_pdev_mon_buf_desc_pool_free(pdev, mac_for_pdev); 1193 dp_hw_link_desc_pool_banks_free(soc, mac_for_pdev); 1194 } 1195 1196 static void 1197 dp_rx_pdev_mon_dest_desc_pool_deinit(struct dp_pdev *pdev, int mac_for_pdev) 1198 { 1199 struct dp_soc *soc = pdev->soc; 1200 1201 if (!soc->wlan_cfg_ctx->rxdma1_enable) 1202 return; 1203 1204 dp_rx_pdev_mon_buf_desc_pool_deinit(pdev, mac_for_pdev); 1205 } 1206 1207 static void 1208 dp_rx_pdev_mon_dest_desc_pool_init(struct dp_pdev *pdev, uint32_t mac_for_pdev) 1209 { 1210 struct dp_soc *soc = pdev->soc; 1211 1212 if (!soc->wlan_cfg_ctx->rxdma1_enable || 1213 !wlan_cfg_is_delay_mon_replenish(soc->wlan_cfg_ctx)) 1214 return; 1215 1216 dp_rx_pdev_mon_buf_desc_pool_init(pdev, mac_for_pdev); 1217 dp_link_desc_ring_replenish(soc, mac_for_pdev); 1218 } 1219 1220 static void 1221 dp_rx_pdev_mon_dest_buffers_free(struct dp_pdev *pdev, int mac_for_pdev) 1222 { 1223 struct dp_soc *soc = pdev->soc; 1224 1225 if (!soc->wlan_cfg_ctx->rxdma1_enable) 1226 return; 1227 1228 dp_rx_pdev_mon_buf_buffers_free(pdev, mac_for_pdev); 1229 } 1230 1231 static QDF_STATUS 1232 dp_rx_pdev_mon_dest_buffers_alloc(struct dp_pdev *pdev, int mac_for_pdev) 1233 { 1234 struct dp_soc *soc = pdev->soc; 1235 struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx = soc->wlan_cfg_ctx; 1236 bool delayed_replenish; 1237 QDF_STATUS status = QDF_STATUS_SUCCESS; 1238 1239 delayed_replenish = soc_cfg_ctx->delayed_replenish_entries ? 1 : 0; 1240 if (!soc->wlan_cfg_ctx->rxdma1_enable || 1241 !wlan_cfg_is_delay_mon_replenish(soc->wlan_cfg_ctx)) 1242 return status; 1243 1244 status = dp_rx_pdev_mon_buf_buffers_alloc(pdev, mac_for_pdev, 1245 delayed_replenish); 1246 if (!QDF_IS_STATUS_SUCCESS(status)) 1247 dp_err("dp_rx_pdev_mon_buf_desc_pool_alloc() failed"); 1248 1249 return status; 1250 } 1251 1252 static QDF_STATUS 1253 dp_rx_pdev_mon_dest_desc_pool_alloc(struct dp_pdev *pdev, uint32_t mac_for_pdev) 1254 { 1255 struct dp_soc *soc = pdev->soc; 1256 QDF_STATUS status = QDF_STATUS_SUCCESS; 1257 1258 if (!soc->wlan_cfg_ctx->rxdma1_enable || 1259 !wlan_cfg_is_delay_mon_replenish(soc->wlan_cfg_ctx)) 1260 return status; 1261 1262 /* Allocate sw rx descriptor pool for monitor RxDMA buffer ring */ 1263 status = dp_rx_pdev_mon_buf_desc_pool_alloc(pdev, mac_for_pdev); 1264 if (!QDF_IS_STATUS_SUCCESS(status)) { 1265 dp_err("dp_rx_pdev_mon_buf_desc_pool_alloc() failed"); 1266 goto fail; 1267 } 1268 1269 /* Allocate link descriptors for the monitor link descriptor ring */ 1270 status = dp_hw_link_desc_pool_banks_alloc(soc, mac_for_pdev); 1271 if (!QDF_IS_STATUS_SUCCESS(status)) { 1272 dp_err("dp_hw_link_desc_pool_banks_alloc() failed"); 1273 goto mon_buf_dealloc; 1274 } 1275 1276 return status; 1277 1278 mon_buf_dealloc: 1279 dp_rx_pdev_mon_status_desc_pool_free(pdev, mac_for_pdev); 1280 fail: 1281 return status; 1282 } 1283 #else 1284 static void 1285 dp_rx_pdev_mon_dest_desc_pool_free(struct dp_pdev *pdev, int mac_for_pdev) 1286 { 1287 } 1288 1289 static void 1290 dp_rx_pdev_mon_dest_desc_pool_deinit(struct dp_pdev *pdev, int mac_for_pdev) 1291 { 1292 } 1293 1294 static void 1295 dp_rx_pdev_mon_dest_desc_pool_init(struct dp_pdev *pdev, uint32_t mac_for_pdev) 1296 { 1297 } 1298 1299 static void 1300 dp_rx_pdev_mon_dest_buffers_free(struct dp_pdev *pdev, int mac_for_pdev) 1301 { 1302 } 1303 1304 static QDF_STATUS 1305 dp_rx_pdev_mon_dest_buffers_alloc(struct dp_pdev *pdev, int mac_for_pdev) 1306 { 1307 return QDF_STATUS_SUCCESS; 1308 } 1309 1310 static QDF_STATUS 1311 dp_rx_pdev_mon_dest_desc_pool_alloc(struct dp_pdev *pdev, uint32_t mac_for_pdev) 1312 { 1313 return QDF_STATUS_SUCCESS; 1314 } 1315 1316 #if !defined(DISABLE_MON_CONFIG) && defined(MON_ENABLE_DROP_FOR_MAC) 1317 uint32_t 1318 dp_mon_dest_srng_drop_for_mac(struct dp_pdev *pdev, uint32_t mac_id) 1319 { 1320 return 0; 1321 } 1322 #endif 1323 1324 #if !defined(DISABLE_MON_CONFIG) && defined(MON_ENABLE_DROP_FOR_NON_MON_PMAC) 1325 static QDF_STATUS 1326 dp_rx_mon_check_n_drop_mpdu(struct dp_pdev *pdev, uint32_t mac_id, 1327 hal_rxdma_desc_t rxdma_dst_ring_desc, 1328 union dp_rx_desc_list_elem_t **head, 1329 union dp_rx_desc_list_elem_t **tail, 1330 uint32_t *rx_bufs_dropped) 1331 { 1332 return QDF_STATUS_E_FAILURE; 1333 } 1334 #endif 1335 #endif 1336 1337 #ifdef WLAN_SOFTUMAC_SUPPORT 1338 static void dp_mon_hw_link_desc_bank_free(struct dp_soc *soc, uint32_t mac_id) 1339 { 1340 struct qdf_mem_multi_page_t *pages; 1341 1342 pages = dp_monitor_get_link_desc_pages(soc, mac_id); 1343 if (!pages) { 1344 dp_err("can not get mon link desc pages"); 1345 QDF_ASSERT(0); 1346 return; 1347 } 1348 1349 if (pages->dma_pages) { 1350 wlan_minidump_remove((void *) 1351 pages->dma_pages->page_v_addr_start, 1352 pages->num_pages * pages->page_size, 1353 soc->ctrl_psoc, 1354 WLAN_MD_DP_SRNG_SW2RXDMA_LINK_RING, 1355 "mon hw_link_desc_bank"); 1356 dp_desc_multi_pages_mem_free(soc, QDF_DP_HW_LINK_DESC_TYPE, 1357 pages, 0, false); 1358 } 1359 } 1360 1361 static QDF_STATUS 1362 dp_mon_hw_link_desc_bank_alloc(struct dp_soc *soc, uint32_t mac_id) 1363 { 1364 struct qdf_mem_multi_page_t *pages; 1365 uint32_t *total_link_descs, total_mem_size; 1366 uint32_t num_entries; 1367 uint32_t max_alloc_size = wlan_cfg_max_alloc_size(soc->wlan_cfg_ctx); 1368 int link_desc_size = hal_get_link_desc_size(soc->hal_soc); 1369 int link_desc_align = hal_get_link_desc_align(soc->hal_soc); 1370 uint8_t minidump_str[MINIDUMP_STR_SIZE]; 1371 1372 pages = dp_monitor_get_link_desc_pages(soc, mac_id); 1373 if (!pages) { 1374 dp_err("can not get mon link desc pages"); 1375 QDF_ASSERT(0); 1376 return QDF_STATUS_E_FAULT; 1377 } 1378 1379 /* If link descriptor banks are allocated, return from here */ 1380 if (pages->num_pages) 1381 return QDF_STATUS_SUCCESS; 1382 1383 num_entries = dp_monitor_get_num_link_desc_ring_entries(soc, mac_id); 1384 total_link_descs = dp_monitor_get_total_link_descs(soc, mac_id); 1385 qdf_str_lcopy(minidump_str, "mon_link_desc_bank", 1386 MINIDUMP_STR_SIZE); 1387 1388 /* Round up to power of 2 */ 1389 *total_link_descs = 1; 1390 while (*total_link_descs < num_entries) 1391 *total_link_descs <<= 1; 1392 1393 dp_init_info("%pK: total_link_descs: %u, link_desc_size: %d", 1394 soc, *total_link_descs, link_desc_size); 1395 1396 total_mem_size = *total_link_descs * link_desc_size; 1397 total_mem_size += link_desc_align; 1398 1399 dp_init_info("%pK: total_mem_size: %d", soc, total_mem_size); 1400 1401 dp_set_max_page_size(pages, max_alloc_size); 1402 dp_desc_multi_pages_mem_alloc(soc, QDF_DP_HW_LINK_DESC_TYPE, 1403 pages, link_desc_size, 1404 *total_link_descs, 0, false); 1405 1406 if (!pages->num_pages) { 1407 dp_err("Multi page alloc fail for mon hw link desc pool"); 1408 return QDF_STATUS_E_FAULT; 1409 } 1410 1411 wlan_minidump_log(pages->dma_pages->page_v_addr_start, 1412 pages->num_pages * pages->page_size, 1413 soc->ctrl_psoc, 1414 WLAN_MD_DP_SRNG_SW2RXDMA_LINK_RING, 1415 "mon hw_link_desc_bank"); 1416 1417 return QDF_STATUS_SUCCESS; 1418 } 1419 1420 static void 1421 dp_mon_link_desc_ring_replenish(struct dp_soc *soc, int mac_id) 1422 { 1423 dp_link_desc_ring_replenish(soc, mac_id); 1424 } 1425 #else 1426 static QDF_STATUS 1427 dp_mon_hw_link_desc_bank_alloc(struct dp_soc *soc, uint32_t mac_id) 1428 { 1429 return QDF_STATUS_SUCCESS; 1430 } 1431 1432 static void 1433 dp_mon_hw_link_desc_bank_free(struct dp_soc *soc, uint32_t mac_id) {} 1434 1435 static void 1436 dp_mon_link_desc_ring_replenish(struct dp_soc *soc, int mac_id) {} 1437 #endif 1438 1439 static void 1440 dp_rx_pdev_mon_cmn_desc_pool_free(struct dp_pdev *pdev, int mac_id) 1441 { 1442 struct dp_soc *soc = pdev->soc; 1443 uint8_t pdev_id = pdev->pdev_id; 1444 int mac_for_pdev = dp_get_lmac_id_for_pdev_id(soc, mac_id, pdev_id); 1445 1446 dp_rx_pdev_mon_status_desc_pool_free(pdev, mac_for_pdev); 1447 dp_mon_hw_link_desc_bank_free(soc, mac_for_pdev); 1448 dp_rx_pdev_mon_dest_desc_pool_free(pdev, mac_for_pdev); 1449 } 1450 1451 void dp_rx_pdev_mon_desc_pool_free(struct dp_pdev *pdev) 1452 { 1453 int mac_id; 1454 1455 for (mac_id = 0; mac_id < NUM_RXDMA_STATUS_RINGS_PER_PDEV; mac_id++) 1456 dp_rx_pdev_mon_cmn_desc_pool_free(pdev, mac_id); 1457 } 1458 1459 static void 1460 dp_rx_pdev_mon_cmn_desc_pool_deinit(struct dp_pdev *pdev, int mac_id) 1461 { 1462 struct dp_soc *soc = pdev->soc; 1463 uint8_t pdev_id = pdev->pdev_id; 1464 int mac_for_pdev = dp_get_lmac_id_for_pdev_id(soc, mac_id, pdev_id); 1465 1466 dp_rx_pdev_mon_status_desc_pool_deinit(pdev, mac_for_pdev); 1467 1468 dp_rx_pdev_mon_dest_desc_pool_deinit(pdev, mac_for_pdev); 1469 } 1470 1471 void 1472 dp_rx_pdev_mon_desc_pool_deinit(struct dp_pdev *pdev) 1473 { 1474 int mac_id; 1475 1476 for (mac_id = 0; mac_id < NUM_RXDMA_STATUS_RINGS_PER_PDEV; mac_id++) 1477 dp_rx_pdev_mon_cmn_desc_pool_deinit(pdev, mac_id); 1478 qdf_spinlock_destroy(&pdev->monitor_pdev->mon_lock); 1479 } 1480 1481 static void 1482 dp_rx_pdev_mon_cmn_desc_pool_init(struct dp_pdev *pdev, int mac_id) 1483 { 1484 struct dp_soc *soc = pdev->soc; 1485 uint32_t mac_for_pdev; 1486 1487 mac_for_pdev = dp_get_lmac_id_for_pdev_id(soc, mac_id, pdev->pdev_id); 1488 dp_rx_pdev_mon_status_desc_pool_init(pdev, mac_for_pdev); 1489 dp_mon_link_desc_ring_replenish(soc, mac_for_pdev); 1490 1491 dp_rx_pdev_mon_dest_desc_pool_init(pdev, mac_for_pdev); 1492 } 1493 1494 void 1495 dp_rx_pdev_mon_desc_pool_init(struct dp_pdev *pdev) 1496 { 1497 int mac_id; 1498 1499 for (mac_id = 0; mac_id < NUM_RXDMA_STATUS_RINGS_PER_PDEV; mac_id++) 1500 dp_rx_pdev_mon_cmn_desc_pool_init(pdev, mac_id); 1501 qdf_spinlock_create(&pdev->monitor_pdev->mon_lock); 1502 } 1503 1504 void 1505 dp_rx_pdev_mon_buffers_free(struct dp_pdev *pdev) 1506 { 1507 int mac_id; 1508 int mac_for_pdev; 1509 uint8_t pdev_id = pdev->pdev_id; 1510 struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx = pdev->soc->wlan_cfg_ctx; 1511 1512 for (mac_id = 0; mac_id < soc_cfg_ctx->num_rxdma_status_rings_per_pdev; 1513 mac_id++) { 1514 mac_for_pdev = dp_get_lmac_id_for_pdev_id(pdev->soc, mac_id, 1515 pdev_id); 1516 dp_rx_pdev_mon_status_buffers_free(pdev, mac_for_pdev); 1517 } 1518 1519 for (mac_id = 0; mac_id < soc_cfg_ctx->num_rxdma_dst_rings_per_pdev; 1520 mac_id++) { 1521 mac_for_pdev = dp_get_lmac_id_for_pdev_id(pdev->soc, mac_id, 1522 pdev_id); 1523 dp_rx_pdev_mon_dest_buffers_free(pdev, mac_for_pdev); 1524 } 1525 pdev->monitor_pdev->pdev_mon_init = 0; 1526 } 1527 1528 QDF_STATUS 1529 dp_rx_pdev_mon_buffers_alloc(struct dp_pdev *pdev) 1530 { 1531 int mac_id; 1532 int mac_for_pdev; 1533 QDF_STATUS status = QDF_STATUS_SUCCESS; 1534 uint8_t pdev_id = pdev->pdev_id; 1535 struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx = pdev->soc->wlan_cfg_ctx; 1536 1537 for (mac_id = 0; mac_id < soc_cfg_ctx->num_rxdma_status_rings_per_pdev; 1538 mac_id++) { 1539 mac_for_pdev = dp_get_lmac_id_for_pdev_id(pdev->soc, mac_id, 1540 pdev_id); 1541 status = dp_rx_pdev_mon_status_buffers_alloc(pdev, 1542 mac_for_pdev); 1543 if (!QDF_IS_STATUS_SUCCESS(status)) { 1544 dp_err("dp_rx_pdev_mon_status_desc_pool_alloc() failed"); 1545 goto mon_status_buf_fail; 1546 } 1547 } 1548 1549 for (mac_id = 0; mac_id < soc_cfg_ctx->num_rxdma_dst_rings_per_pdev; 1550 mac_id++) { 1551 mac_for_pdev = dp_get_lmac_id_for_pdev_id(pdev->soc, mac_id, 1552 pdev_id); 1553 status = dp_rx_pdev_mon_dest_buffers_alloc(pdev, mac_for_pdev); 1554 if (!QDF_IS_STATUS_SUCCESS(status)) 1555 goto mon_stat_buf_dealloc; 1556 } 1557 1558 return status; 1559 1560 mon_stat_buf_dealloc: 1561 dp_rx_pdev_mon_status_buffers_free(pdev, mac_for_pdev); 1562 mon_status_buf_fail: 1563 return status; 1564 } 1565 1566 static QDF_STATUS 1567 dp_rx_pdev_mon_cmn_desc_pool_alloc(struct dp_pdev *pdev, int mac_id) 1568 { 1569 struct dp_soc *soc = pdev->soc; 1570 uint8_t pdev_id = pdev->pdev_id; 1571 uint32_t mac_for_pdev; 1572 QDF_STATUS status; 1573 1574 mac_for_pdev = dp_get_lmac_id_for_pdev_id(soc, mac_id, pdev_id); 1575 1576 /* Allocate sw rx descriptor pool for monitor status ring */ 1577 status = dp_rx_pdev_mon_status_desc_pool_alloc(pdev, mac_for_pdev); 1578 if (!QDF_IS_STATUS_SUCCESS(status)) { 1579 dp_err("dp_rx_pdev_mon_status_desc_pool_alloc() failed"); 1580 goto fail; 1581 } 1582 1583 /* Allocate hw link desc bank for monitor mode for 1584 * SOFTUMAC architecture. 1585 */ 1586 status = dp_mon_hw_link_desc_bank_alloc(soc, mac_for_pdev); 1587 if (!QDF_IS_STATUS_SUCCESS(status)) { 1588 dp_err("dp_mon_hw_link_desc_bank_alloc() failed"); 1589 goto mon_status_dealloc; 1590 } 1591 1592 status = dp_rx_pdev_mon_dest_desc_pool_alloc(pdev, mac_for_pdev); 1593 if (!QDF_IS_STATUS_SUCCESS(status)) 1594 goto link_desc_bank_free; 1595 1596 return status; 1597 1598 link_desc_bank_free: 1599 dp_mon_hw_link_desc_bank_free(soc, mac_for_pdev); 1600 mon_status_dealloc: 1601 dp_rx_pdev_mon_status_desc_pool_free(pdev, mac_for_pdev); 1602 fail: 1603 return status; 1604 } 1605 1606 QDF_STATUS 1607 dp_rx_pdev_mon_desc_pool_alloc(struct dp_pdev *pdev) 1608 { 1609 QDF_STATUS status; 1610 int mac_id, count; 1611 1612 for (mac_id = 0; mac_id < NUM_RXDMA_STATUS_RINGS_PER_PDEV; mac_id++) { 1613 status = dp_rx_pdev_mon_cmn_desc_pool_alloc(pdev, mac_id); 1614 if (!QDF_IS_STATUS_SUCCESS(status)) { 1615 dp_rx_mon_dest_err("%pK: %d failed", 1616 pdev->soc, mac_id); 1617 1618 for (count = 0; count < mac_id; count++) 1619 dp_rx_pdev_mon_cmn_desc_pool_free(pdev, count); 1620 1621 return status; 1622 } 1623 } 1624 return status; 1625 } 1626 1627 #ifdef QCA_WIFI_MONITOR_MODE_NO_MSDU_START_TLV_SUPPORT 1628 static inline void 1629 hal_rx_populate_buf_info(struct dp_soc *soc, 1630 struct hal_rx_mon_dest_buf_info *buf_info, 1631 void *rx_desc) 1632 { 1633 hal_rx_priv_info_get_from_tlv(soc->hal_soc, rx_desc, 1634 (uint8_t *)buf_info, 1635 sizeof(*buf_info)); 1636 } 1637 1638 static inline uint8_t 1639 hal_rx_frag_msdu_get_l2_hdr_offset(struct dp_soc *soc, 1640 struct hal_rx_mon_dest_buf_info *buf_info, 1641 void *rx_desc, bool is_first_frag) 1642 { 1643 if (is_first_frag) 1644 return buf_info->l2_hdr_pad; 1645 else 1646 return DP_RX_MON_RAW_L2_HDR_PAD_BYTE; 1647 } 1648 #else 1649 static inline void 1650 hal_rx_populate_buf_info(struct dp_soc *soc, 1651 struct hal_rx_mon_dest_buf_info *buf_info, 1652 void *rx_desc) 1653 { 1654 if (hal_rx_tlv_decap_format_get(soc->hal_soc, rx_desc) == 1655 HAL_HW_RX_DECAP_FORMAT_RAW) 1656 buf_info->is_decap_raw = 1; 1657 1658 if (hal_rx_tlv_mpdu_len_err_get(soc->hal_soc, rx_desc)) 1659 buf_info->mpdu_len_err = 1; 1660 } 1661 1662 static inline uint8_t 1663 hal_rx_frag_msdu_get_l2_hdr_offset(struct dp_soc *soc, 1664 struct hal_rx_mon_dest_buf_info *buf_info, 1665 void *rx_desc, bool is_first_frag) 1666 { 1667 return hal_rx_msdu_end_l3_hdr_padding_get(soc->hal_soc, rx_desc); 1668 } 1669 #endif 1670 1671 static inline 1672 void dp_rx_msdus_set_payload(struct dp_soc *soc, qdf_nbuf_t msdu, 1673 uint8_t l2_hdr_offset) 1674 { 1675 uint8_t *data; 1676 uint32_t rx_pkt_offset; 1677 1678 data = qdf_nbuf_data(msdu); 1679 rx_pkt_offset = dp_rx_mon_get_rx_pkt_tlv_size(soc); 1680 qdf_nbuf_pull_head(msdu, rx_pkt_offset + l2_hdr_offset); 1681 } 1682 1683 static inline qdf_nbuf_t 1684 dp_rx_mon_restitch_mpdu_from_msdus(struct dp_soc *soc, 1685 uint32_t mac_id, 1686 qdf_nbuf_t head_msdu, 1687 qdf_nbuf_t last_msdu, 1688 struct cdp_mon_status *rx_status) 1689 { 1690 qdf_nbuf_t msdu, mpdu_buf, prev_buf, msdu_orig, head_frag_list; 1691 uint32_t wifi_hdr_len, sec_hdr_len, msdu_llc_len, 1692 mpdu_buf_len, decap_hdr_pull_bytes, frag_list_sum_len, dir, 1693 is_amsdu, is_first_frag, amsdu_pad; 1694 void *rx_desc; 1695 char *hdr_desc; 1696 unsigned char *dest; 1697 struct ieee80211_frame *wh; 1698 struct ieee80211_qoscntl *qos; 1699 struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id); 1700 struct dp_mon_pdev *mon_pdev; 1701 struct hal_rx_mon_dest_buf_info buf_info; 1702 uint8_t l2_hdr_offset; 1703 1704 head_frag_list = NULL; 1705 mpdu_buf = NULL; 1706 1707 if (qdf_unlikely(!dp_pdev)) { 1708 dp_rx_mon_dest_debug("%pK: pdev is null for mac_id = %d", 1709 soc, mac_id); 1710 return NULL; 1711 } 1712 1713 mon_pdev = dp_pdev->monitor_pdev; 1714 1715 /* The nbuf has been pulled just beyond the status and points to the 1716 * payload 1717 */ 1718 if (!head_msdu) 1719 goto mpdu_stitch_fail; 1720 1721 msdu_orig = head_msdu; 1722 1723 rx_desc = qdf_nbuf_data(msdu_orig); 1724 qdf_mem_zero(&buf_info, sizeof(buf_info)); 1725 hal_rx_populate_buf_info(soc, &buf_info, rx_desc); 1726 1727 if (buf_info.mpdu_len_err) { 1728 /* It looks like there is some issue on MPDU len err */ 1729 /* Need further investigate if drop the packet */ 1730 DP_STATS_INC(dp_pdev, dropped.mon_rx_drop, 1); 1731 return NULL; 1732 } 1733 1734 rx_desc = qdf_nbuf_data(last_msdu); 1735 1736 rx_status->cdp_rs_fcs_err = hal_rx_tlv_mpdu_fcs_err_get(soc->hal_soc, 1737 rx_desc); 1738 mon_pdev->ppdu_info.rx_status.rs_fcs_err = rx_status->cdp_rs_fcs_err; 1739 1740 /* Fill out the rx_status from the PPDU start and end fields */ 1741 /* HAL_RX_GET_PPDU_STATUS(soc, mac_id, rx_status); */ 1742 1743 rx_desc = qdf_nbuf_data(head_msdu); 1744 1745 /* Easy case - The MSDU status indicates that this is a non-decapped 1746 * packet in RAW mode. 1747 */ 1748 if (buf_info.is_decap_raw) { 1749 /* Note that this path might suffer from headroom unavailabilty 1750 * - but the RX status is usually enough 1751 */ 1752 1753 l2_hdr_offset = hal_rx_frag_msdu_get_l2_hdr_offset(soc, 1754 &buf_info, 1755 rx_desc, 1756 true); 1757 dp_rx_msdus_set_payload(soc, head_msdu, l2_hdr_offset); 1758 1759 dp_rx_mon_dest_debug("%pK: decap format raw head %pK head->next %pK last_msdu %pK last_msdu->next %pK", 1760 soc, head_msdu, head_msdu->next, 1761 last_msdu, last_msdu->next); 1762 1763 mpdu_buf = head_msdu; 1764 1765 prev_buf = mpdu_buf; 1766 1767 frag_list_sum_len = 0; 1768 msdu = qdf_nbuf_next(head_msdu); 1769 is_first_frag = 1; 1770 1771 while (msdu) { 1772 l2_hdr_offset = hal_rx_frag_msdu_get_l2_hdr_offset( 1773 soc, &buf_info, 1774 rx_desc, false); 1775 dp_rx_msdus_set_payload(soc, msdu, l2_hdr_offset); 1776 1777 if (is_first_frag) { 1778 is_first_frag = 0; 1779 head_frag_list = msdu; 1780 } 1781 1782 frag_list_sum_len += qdf_nbuf_len(msdu); 1783 1784 /* Maintain the linking of the cloned MSDUS */ 1785 qdf_nbuf_set_next_ext(prev_buf, msdu); 1786 1787 /* Move to the next */ 1788 prev_buf = msdu; 1789 msdu = qdf_nbuf_next(msdu); 1790 } 1791 1792 qdf_nbuf_trim_tail(prev_buf, HAL_RX_FCS_LEN); 1793 1794 /* If there were more fragments to this RAW frame */ 1795 if (head_frag_list) { 1796 if (frag_list_sum_len < 1797 sizeof(struct ieee80211_frame_min_one)) { 1798 DP_STATS_INC(dp_pdev, dropped.mon_rx_drop, 1); 1799 return NULL; 1800 } 1801 frag_list_sum_len -= HAL_RX_FCS_LEN; 1802 qdf_nbuf_append_ext_list(mpdu_buf, head_frag_list, 1803 frag_list_sum_len); 1804 qdf_nbuf_set_next(mpdu_buf, NULL); 1805 } 1806 1807 goto mpdu_stitch_done; 1808 } 1809 1810 /* Decap mode: 1811 * Calculate the amount of header in decapped packet to knock off based 1812 * on the decap type and the corresponding number of raw bytes to copy 1813 * status header 1814 */ 1815 rx_desc = qdf_nbuf_data(head_msdu); 1816 1817 hdr_desc = hal_rx_desc_get_80211_hdr(soc->hal_soc, rx_desc); 1818 1819 dp_rx_mon_dest_debug("%pK: decap format not raw", soc); 1820 1821 /* Base size */ 1822 wifi_hdr_len = sizeof(struct ieee80211_frame); 1823 wh = (struct ieee80211_frame *)hdr_desc; 1824 1825 dir = wh->i_fc[1] & IEEE80211_FC1_DIR_MASK; 1826 1827 if (dir == IEEE80211_FC1_DIR_DSTODS) 1828 wifi_hdr_len += 6; 1829 1830 is_amsdu = 0; 1831 if (wh->i_fc[0] & QDF_IEEE80211_FC0_SUBTYPE_QOS) { 1832 qos = (struct ieee80211_qoscntl *) 1833 (hdr_desc + wifi_hdr_len); 1834 wifi_hdr_len += 2; 1835 1836 is_amsdu = (qos->i_qos[0] & IEEE80211_QOS_AMSDU); 1837 } 1838 1839 /* Calculate security header length based on 'Protected' 1840 * and 'EXT_IV' flag 1841 */ 1842 if (wh->i_fc[1] & IEEE80211_FC1_WEP) { 1843 char *iv = (char *)wh + wifi_hdr_len; 1844 1845 if (iv[3] & KEY_EXTIV) 1846 sec_hdr_len = 8; 1847 else 1848 sec_hdr_len = 4; 1849 } else { 1850 sec_hdr_len = 0; 1851 } 1852 wifi_hdr_len += sec_hdr_len; 1853 1854 /* MSDU related stuff LLC - AMSDU subframe header etc */ 1855 msdu_llc_len = is_amsdu ? (14 + 8) : 8; 1856 1857 mpdu_buf_len = wifi_hdr_len + msdu_llc_len; 1858 1859 /* "Decap" header to remove from MSDU buffer */ 1860 decap_hdr_pull_bytes = 14; 1861 1862 /* Allocate a new nbuf for holding the 802.11 header retrieved from the 1863 * status of the now decapped first msdu. Leave enough headroom for 1864 * accommodating any radio-tap /prism like PHY header 1865 */ 1866 mpdu_buf = qdf_nbuf_alloc(soc->osdev, 1867 MAX_MONITOR_HEADER + mpdu_buf_len, 1868 MAX_MONITOR_HEADER, 4, FALSE); 1869 1870 if (!mpdu_buf) 1871 goto mpdu_stitch_done; 1872 1873 /* Copy the MPDU related header and enc headers into the first buffer 1874 * - Note that there can be a 2 byte pad between heaader and enc header 1875 */ 1876 1877 prev_buf = mpdu_buf; 1878 dest = qdf_nbuf_put_tail(prev_buf, wifi_hdr_len); 1879 if (!dest) 1880 goto mpdu_stitch_fail; 1881 1882 qdf_mem_copy(dest, hdr_desc, wifi_hdr_len); 1883 hdr_desc += wifi_hdr_len; 1884 1885 #if 0 1886 dest = qdf_nbuf_put_tail(prev_buf, sec_hdr_len); 1887 adf_os_mem_copy(dest, hdr_desc, sec_hdr_len); 1888 hdr_desc += sec_hdr_len; 1889 #endif 1890 1891 /* The first LLC len is copied into the MPDU buffer */ 1892 frag_list_sum_len = 0; 1893 1894 msdu_orig = head_msdu; 1895 is_first_frag = 1; 1896 amsdu_pad = 0; 1897 1898 while (msdu_orig) { 1899 1900 /* TODO: intra AMSDU padding - do we need it ??? */ 1901 1902 msdu = msdu_orig; 1903 1904 if (is_first_frag) { 1905 head_frag_list = msdu; 1906 } else { 1907 /* Reload the hdr ptr only on non-first MSDUs */ 1908 rx_desc = qdf_nbuf_data(msdu_orig); 1909 hdr_desc = hal_rx_desc_get_80211_hdr(soc->hal_soc, 1910 rx_desc); 1911 } 1912 1913 /* Copy this buffers MSDU related status into the prev buffer */ 1914 1915 if (is_first_frag) 1916 is_first_frag = 0; 1917 1918 /* Update protocol and flow tag for MSDU */ 1919 dp_rx_mon_update_protocol_flow_tag(soc, dp_pdev, 1920 msdu_orig, rx_desc); 1921 1922 dest = qdf_nbuf_put_tail(prev_buf, 1923 msdu_llc_len + amsdu_pad); 1924 1925 if (!dest) 1926 goto mpdu_stitch_fail; 1927 1928 dest += amsdu_pad; 1929 qdf_mem_copy(dest, hdr_desc, msdu_llc_len); 1930 1931 l2_hdr_offset = hal_rx_frag_msdu_get_l2_hdr_offset(soc, 1932 &buf_info, 1933 rx_desc, 1934 true); 1935 dp_rx_msdus_set_payload(soc, msdu, l2_hdr_offset); 1936 1937 /* Push the MSDU buffer beyond the decap header */ 1938 qdf_nbuf_pull_head(msdu, decap_hdr_pull_bytes); 1939 frag_list_sum_len += msdu_llc_len + qdf_nbuf_len(msdu) 1940 + amsdu_pad; 1941 1942 /* Set up intra-AMSDU pad to be added to start of next buffer - 1943 * AMSDU pad is 4 byte pad on AMSDU subframe 1944 */ 1945 amsdu_pad = (msdu_llc_len + qdf_nbuf_len(msdu)) & 0x3; 1946 amsdu_pad = amsdu_pad ? (4 - amsdu_pad) : 0; 1947 1948 /* TODO FIXME How do we handle MSDUs that have fraglist - Should 1949 * probably iterate all the frags cloning them along the way and 1950 * and also updating the prev_buf pointer 1951 */ 1952 1953 /* Move to the next */ 1954 prev_buf = msdu; 1955 msdu_orig = qdf_nbuf_next(msdu_orig); 1956 } 1957 1958 #if 0 1959 /* Add in the trailer section - encryption trailer + FCS */ 1960 qdf_nbuf_put_tail(prev_buf, HAL_RX_FCS_LEN); 1961 frag_list_sum_len += HAL_RX_FCS_LEN; 1962 #endif 1963 1964 frag_list_sum_len -= msdu_llc_len; 1965 1966 /* TODO: Convert this to suitable adf routines */ 1967 qdf_nbuf_append_ext_list(mpdu_buf, head_frag_list, 1968 frag_list_sum_len); 1969 1970 dp_rx_mon_dest_debug("%pK: mpdu_buf %pK mpdu_buf->len %u", 1971 soc, mpdu_buf, mpdu_buf->len); 1972 1973 mpdu_stitch_done: 1974 /* Check if this buffer contains the PPDU end status for TSF */ 1975 /* Need revist this code to see where we can get tsf timestamp */ 1976 #if 0 1977 /* PPDU end TLV will be retrieved from monitor status ring */ 1978 last_mpdu = 1979 (*(((u_int32_t *)&rx_desc->attention)) & 1980 RX_ATTENTION_0_LAST_MPDU_MASK) >> 1981 RX_ATTENTION_0_LAST_MPDU_LSB; 1982 1983 if (last_mpdu) 1984 rx_status->rs_tstamp.tsf = rx_desc->ppdu_end.tsf_timestamp; 1985 1986 #endif 1987 return mpdu_buf; 1988 1989 mpdu_stitch_fail: 1990 if ((mpdu_buf) && !buf_info.is_decap_raw) { 1991 dp_rx_mon_dest_err("%pK: mpdu_stitch_fail mpdu_buf %pK", 1992 soc, mpdu_buf); 1993 /* Free the head buffer */ 1994 qdf_nbuf_free(mpdu_buf); 1995 } 1996 return NULL; 1997 } 1998 1999 #ifdef DP_RX_MON_MEM_FRAG 2000 /** 2001 * dp_rx_mon_fraglist_prepare() - Prepare nbuf fraglist from chained skb 2002 * 2003 * @head_msdu: Parent SKB 2004 * @tail_msdu: Last skb in the chained list 2005 * 2006 * Return: Void 2007 */ 2008 void dp_rx_mon_fraglist_prepare(qdf_nbuf_t head_msdu, qdf_nbuf_t tail_msdu) 2009 { 2010 qdf_nbuf_t msdu, mpdu_buf, head_frag_list; 2011 uint32_t frag_list_sum_len; 2012 2013 dp_err("[%s][%d] decap format raw head %pK head->next %pK last_msdu %pK last_msdu->next %pK", 2014 __func__, __LINE__, head_msdu, head_msdu->next, 2015 tail_msdu, tail_msdu->next); 2016 2017 /* Single skb accommodating MPDU worth Data */ 2018 if (tail_msdu == head_msdu) 2019 return; 2020 2021 mpdu_buf = head_msdu; 2022 frag_list_sum_len = 0; 2023 2024 msdu = qdf_nbuf_next(head_msdu); 2025 /* msdu can't be NULL here as it is multiple skb case here */ 2026 2027 /* Head frag list to point to second skb */ 2028 head_frag_list = msdu; 2029 2030 while (msdu) { 2031 frag_list_sum_len += qdf_nbuf_len(msdu); 2032 msdu = qdf_nbuf_next(msdu); 2033 } 2034 2035 qdf_nbuf_append_ext_list(mpdu_buf, head_frag_list, frag_list_sum_len); 2036 2037 /* Make Parent skb next to NULL */ 2038 qdf_nbuf_set_next(mpdu_buf, NULL); 2039 } 2040 2041 /** 2042 * dp_rx_mon_frag_restitch_mpdu_from_msdus() - Restitch logic to 2043 * convert to 802.3 header and adjust frag memory pointing to 2044 * dot3 header and payload in case of Non-Raw frame. 2045 * 2046 * @soc: struct dp_soc * 2047 * @mac_id: MAC id 2048 * @head_msdu: MPDU containing all MSDU as a frag 2049 * @tail_msdu: last skb which accommodate MPDU info 2050 * @rx_status: struct cdp_mon_status * 2051 * 2052 * Return: Adjusted nbuf containing MPDU worth info. 2053 */ 2054 static inline qdf_nbuf_t 2055 dp_rx_mon_frag_restitch_mpdu_from_msdus(struct dp_soc *soc, 2056 uint32_t mac_id, 2057 qdf_nbuf_t head_msdu, 2058 qdf_nbuf_t tail_msdu, 2059 struct cdp_mon_status *rx_status) 2060 { 2061 uint32_t wifi_hdr_len, sec_hdr_len, msdu_llc_len, 2062 mpdu_buf_len, decap_hdr_pull_bytes, dir, 2063 is_amsdu, amsdu_pad, frag_size, tot_msdu_len; 2064 qdf_frag_t rx_desc, rx_src_desc, rx_dest_desc, frag_addr; 2065 char *hdr_desc; 2066 uint8_t num_frags, frags_iter, l2_hdr_offset; 2067 struct ieee80211_frame *wh; 2068 struct ieee80211_qoscntl *qos; 2069 struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id); 2070 int16_t frag_page_offset = 0; 2071 struct hal_rx_mon_dest_buf_info buf_info; 2072 uint32_t pad_byte_pholder = 0; 2073 qdf_nbuf_t msdu_curr; 2074 uint16_t rx_mon_tlv_size = soc->rx_mon_pkt_tlv_size; 2075 struct dp_mon_pdev *mon_pdev; 2076 2077 if (qdf_unlikely(!dp_pdev)) { 2078 dp_rx_mon_dest_debug("%pK: pdev is null for mac_id = %d", 2079 soc, mac_id); 2080 return NULL; 2081 } 2082 2083 mon_pdev = dp_pdev->monitor_pdev; 2084 qdf_mem_zero(&buf_info, sizeof(struct hal_rx_mon_dest_buf_info)); 2085 2086 if (!head_msdu || !tail_msdu) 2087 goto mpdu_stitch_fail; 2088 2089 rx_desc = qdf_nbuf_get_frag_addr(head_msdu, 0) - rx_mon_tlv_size; 2090 2091 if (hal_rx_tlv_mpdu_len_err_get(soc->hal_soc, rx_desc)) { 2092 /* It looks like there is some issue on MPDU len err */ 2093 /* Need further investigate if drop the packet */ 2094 DP_STATS_INC(dp_pdev, dropped.mon_rx_drop, 1); 2095 return NULL; 2096 } 2097 2098 /* Look for FCS error */ 2099 num_frags = qdf_nbuf_get_nr_frags(tail_msdu); 2100 rx_desc = qdf_nbuf_get_frag_addr(tail_msdu, num_frags - 1) - 2101 rx_mon_tlv_size; 2102 rx_status->cdp_rs_fcs_err = hal_rx_tlv_mpdu_fcs_err_get(soc->hal_soc, 2103 rx_desc); 2104 mon_pdev->ppdu_info.rx_status.rs_fcs_err = rx_status->cdp_rs_fcs_err; 2105 2106 rx_desc = qdf_nbuf_get_frag_addr(head_msdu, 0) - rx_mon_tlv_size; 2107 hal_rx_priv_info_get_from_tlv(soc->hal_soc, rx_desc, 2108 (uint8_t *)&buf_info, 2109 sizeof(buf_info)); 2110 2111 /* Easy case - The MSDU status indicates that this is a non-decapped 2112 * packet in RAW mode. 2113 */ 2114 if (buf_info.is_decap_raw == 1) { 2115 if (qdf_unlikely(mon_pdev->ppdu_info.rx_status.rs_fcs_err)) { 2116 hdr_desc = hal_rx_desc_get_80211_hdr(soc->hal_soc, rx_desc); 2117 wh = (struct ieee80211_frame *)hdr_desc; 2118 if ((wh->i_fc[0] & QDF_IEEE80211_FC0_VERSION_MASK) != 2119 QDF_IEEE80211_FC0_VERSION_0) { 2120 DP_STATS_INC(dp_pdev, dropped.mon_ver_err, 1); 2121 return NULL; 2122 } 2123 } 2124 dp_rx_mon_fraglist_prepare(head_msdu, tail_msdu); 2125 goto mpdu_stitch_done; 2126 } 2127 2128 l2_hdr_offset = DP_RX_MON_NONRAW_L2_HDR_PAD_BYTE; 2129 2130 /* Decap mode: 2131 * Calculate the amount of header in decapped packet to knock off based 2132 * on the decap type and the corresponding number of raw bytes to copy 2133 * status header 2134 */ 2135 hdr_desc = hal_rx_desc_get_80211_hdr(soc->hal_soc, rx_desc); 2136 2137 dp_rx_mon_dest_debug("%pK: decap format not raw", soc); 2138 2139 /* Base size */ 2140 wifi_hdr_len = sizeof(struct ieee80211_frame); 2141 wh = (struct ieee80211_frame *)hdr_desc; 2142 2143 dir = wh->i_fc[1] & IEEE80211_FC1_DIR_MASK; 2144 2145 if (dir == IEEE80211_FC1_DIR_DSTODS) 2146 wifi_hdr_len += 6; 2147 2148 is_amsdu = 0; 2149 if (wh->i_fc[0] & QDF_IEEE80211_FC0_SUBTYPE_QOS) { 2150 qos = (struct ieee80211_qoscntl *) 2151 (hdr_desc + wifi_hdr_len); 2152 wifi_hdr_len += 2; 2153 2154 is_amsdu = (qos->i_qos[0] & IEEE80211_QOS_AMSDU); 2155 } 2156 2157 /*Calculate security header length based on 'Protected' 2158 * and 'EXT_IV' flag 2159 */ 2160 if (wh->i_fc[1] & IEEE80211_FC1_WEP) { 2161 char *iv = (char *)wh + wifi_hdr_len; 2162 2163 if (iv[3] & KEY_EXTIV) 2164 sec_hdr_len = 8; 2165 else 2166 sec_hdr_len = 4; 2167 } else { 2168 sec_hdr_len = 0; 2169 } 2170 wifi_hdr_len += sec_hdr_len; 2171 2172 /* MSDU related stuff LLC - AMSDU subframe header etc */ 2173 msdu_llc_len = is_amsdu ? (14 + 8) : 8; 2174 2175 mpdu_buf_len = wifi_hdr_len + msdu_llc_len; 2176 2177 /* "Decap" header to remove from MSDU buffer */ 2178 decap_hdr_pull_bytes = 14; 2179 2180 amsdu_pad = 0; 2181 tot_msdu_len = 0; 2182 2183 /* 2184 * keeping first MSDU ops outside of loop to avoid multiple 2185 * check handling 2186 */ 2187 2188 /* Construct src header */ 2189 rx_src_desc = hdr_desc; 2190 2191 /* 2192 * Update protocol and flow tag for MSDU 2193 * update frag index in ctx_idx field. 2194 * Reset head pointer data of nbuf before updating. 2195 */ 2196 QDF_NBUF_CB_RX_CTX_ID(head_msdu) = 0; 2197 dp_rx_mon_update_protocol_flow_tag(soc, dp_pdev, head_msdu, rx_desc); 2198 2199 /* Construct destination address */ 2200 frag_addr = qdf_nbuf_get_frag_addr(head_msdu, 0); 2201 frag_size = qdf_nbuf_get_frag_size_by_idx(head_msdu, 0); 2202 /* We will come here in 2 scenario: 2203 * 1. First MSDU of MPDU with single buffer 2204 * 2. First buffer of First MSDU of MPDU with continuation 2205 * 2206 * ------------------------------------------------------------ 2207 * | SINGLE BUFFER (<= RX_MONITOR_BUFFER_SIZE - RX_PKT_TLVS_LEN)| 2208 * ------------------------------------------------------------ 2209 * 2210 * ------------------------------------------------------------ 2211 * | First BUFFER with Continuation | ... | 2212 * | (RX_MONITOR_BUFFER_SIZE - RX_PKT_TLVS_LEN) | | 2213 * ------------------------------------------------------------ 2214 */ 2215 pad_byte_pholder = 2216 (RX_MONITOR_BUFFER_SIZE - soc->rx_mon_pkt_tlv_size) - frag_size; 2217 /* Construct destination address 2218 * -------------------------------------------------------------- 2219 * | RX_PKT_TLV | L2_HDR_PAD | Decap HDR | Payload | 2220 * | | / | 2221 * | >Frag address points here / | 2222 * | \ / | 2223 * | \ This bytes needs to / | 2224 * | \ removed to frame pkt / | 2225 * | ----------------------- | 2226 * | | | 2227 * | | | 2228 * | WIFI +LLC HDR will be added here <-| | 2229 * | | | | 2230 * | >Dest addr will point | | 2231 * | somewhere in this area | | 2232 * -------------------------------------------------------------- 2233 */ 2234 rx_dest_desc = 2235 (frag_addr + decap_hdr_pull_bytes + l2_hdr_offset) - 2236 mpdu_buf_len; 2237 /* Add WIFI and LLC header for 1st MSDU of MPDU */ 2238 qdf_mem_copy(rx_dest_desc, rx_src_desc, mpdu_buf_len); 2239 2240 frag_page_offset = 2241 (decap_hdr_pull_bytes + l2_hdr_offset) - mpdu_buf_len; 2242 2243 qdf_nbuf_move_frag_page_offset(head_msdu, 0, frag_page_offset); 2244 2245 frag_size = qdf_nbuf_get_frag_size_by_idx(head_msdu, 0); 2246 2247 if (buf_info.first_buffer && buf_info.last_buffer) { 2248 /* MSDU with single buffer */ 2249 amsdu_pad = frag_size & 0x3; 2250 amsdu_pad = amsdu_pad ? (4 - amsdu_pad) : 0; 2251 if (amsdu_pad && (amsdu_pad <= pad_byte_pholder)) { 2252 char *frag_addr_temp; 2253 2254 qdf_nbuf_trim_add_frag_size(head_msdu, 0, amsdu_pad, 2255 0); 2256 frag_addr_temp = 2257 (char *)qdf_nbuf_get_frag_addr(head_msdu, 0); 2258 frag_addr_temp = (frag_addr_temp + 2259 qdf_nbuf_get_frag_size_by_idx(head_msdu, 0)) - 2260 amsdu_pad; 2261 qdf_mem_zero(frag_addr_temp, amsdu_pad); 2262 amsdu_pad = 0; 2263 } 2264 } else { 2265 /* 2266 * First buffer of Continuation frame and hence 2267 * amsdu_padding doesn't need to be added 2268 * Increase tot_msdu_len so that amsdu_pad byte 2269 * will be calculated for last frame of MSDU 2270 */ 2271 tot_msdu_len = frag_size; 2272 amsdu_pad = 0; 2273 } 2274 2275 /* Here amsdu_pad byte will have some value if 1sf buffer was 2276 * Single buffer MSDU and dint had pholder to adjust amsdu padding 2277 * byte in the end 2278 * So dont initialize to ZERO here 2279 */ 2280 pad_byte_pholder = 0; 2281 for (msdu_curr = head_msdu; msdu_curr;) { 2282 /* frag_iter will start from 0 for second skb onwards */ 2283 if (msdu_curr == head_msdu) 2284 frags_iter = 1; 2285 else 2286 frags_iter = 0; 2287 2288 num_frags = qdf_nbuf_get_nr_frags(msdu_curr); 2289 2290 for (; frags_iter < num_frags; frags_iter++) { 2291 /* Construct destination address 2292 * ---------------------------------------------------------- 2293 * | RX_PKT_TLV | L2_HDR_PAD | Decap HDR | Payload | Pad | 2294 * | | (First buffer) | | | 2295 * | | / / | 2296 * | >Frag address points here / / | 2297 * | \ / / | 2298 * | \ This bytes needs to / / | 2299 * | \ removed to frame pkt/ / | 2300 * | ---------------------- / | 2301 * | | / Add | 2302 * | | / amsdu pad | 2303 * | LLC HDR will be added here <-| | Byte for | 2304 * | | | | last frame | 2305 * | >Dest addr will point | | if space | 2306 * | somewhere in this area | | available | 2307 * | And amsdu_pad will be created if | | | 2308 * | dint get added in last buffer | | | 2309 * | (First Buffer) | | | 2310 * ---------------------------------------------------------- 2311 */ 2312 frag_addr = 2313 qdf_nbuf_get_frag_addr(msdu_curr, frags_iter); 2314 rx_desc = frag_addr - rx_mon_tlv_size; 2315 2316 /* 2317 * Update protocol and flow tag for MSDU 2318 * update frag index in ctx_idx field 2319 */ 2320 QDF_NBUF_CB_RX_CTX_ID(msdu_curr) = frags_iter; 2321 dp_rx_mon_update_protocol_flow_tag(soc, dp_pdev, 2322 msdu_curr, rx_desc); 2323 2324 /* Read buffer info from stored data in tlvs */ 2325 hal_rx_priv_info_get_from_tlv(soc->hal_soc, rx_desc, 2326 (uint8_t *)&buf_info, 2327 sizeof(buf_info)); 2328 2329 frag_size = qdf_nbuf_get_frag_size_by_idx(msdu_curr, 2330 frags_iter); 2331 2332 /* If Middle buffer, dont add any header */ 2333 if ((!buf_info.first_buffer) && 2334 (!buf_info.last_buffer)) { 2335 tot_msdu_len += frag_size; 2336 amsdu_pad = 0; 2337 pad_byte_pholder = 0; 2338 continue; 2339 } 2340 2341 /* Calculate if current buffer has placeholder 2342 * to accommodate amsdu pad byte 2343 */ 2344 pad_byte_pholder = 2345 (RX_MONITOR_BUFFER_SIZE - soc->rx_mon_pkt_tlv_size) 2346 - frag_size; 2347 /* 2348 * We will come here only only three condition: 2349 * 1. Msdu with single Buffer 2350 * 2. First buffer in case MSDU is spread in multiple 2351 * buffer 2352 * 3. Last buffer in case MSDU is spread in multiple 2353 * buffer 2354 * 2355 * First buffER | Last buffer 2356 * Case 1: 1 | 1 2357 * Case 2: 1 | 0 2358 * Case 3: 0 | 1 2359 * 2360 * In 3rd case only l2_hdr_padding byte will be Zero and 2361 * in other case, It will be 2 Bytes. 2362 */ 2363 if (buf_info.first_buffer) 2364 l2_hdr_offset = 2365 DP_RX_MON_NONRAW_L2_HDR_PAD_BYTE; 2366 else 2367 l2_hdr_offset = DP_RX_MON_RAW_L2_HDR_PAD_BYTE; 2368 2369 if (buf_info.first_buffer) { 2370 /* Src addr from where llc header needs to be copied */ 2371 rx_src_desc = 2372 hal_rx_desc_get_80211_hdr(soc->hal_soc, 2373 rx_desc); 2374 2375 /* Size of buffer with llc header */ 2376 frag_size = frag_size - 2377 (l2_hdr_offset + decap_hdr_pull_bytes); 2378 frag_size += msdu_llc_len; 2379 2380 /* Construct destination address */ 2381 rx_dest_desc = frag_addr + 2382 decap_hdr_pull_bytes + l2_hdr_offset; 2383 rx_dest_desc = rx_dest_desc - (msdu_llc_len); 2384 2385 qdf_mem_copy(rx_dest_desc, rx_src_desc, 2386 msdu_llc_len); 2387 2388 /* 2389 * Calculate new page offset and create hole 2390 * if amsdu_pad required. 2391 */ 2392 frag_page_offset = l2_hdr_offset + 2393 decap_hdr_pull_bytes; 2394 frag_page_offset = frag_page_offset - 2395 (msdu_llc_len + amsdu_pad); 2396 2397 qdf_nbuf_move_frag_page_offset(msdu_curr, 2398 frags_iter, 2399 frag_page_offset); 2400 2401 tot_msdu_len = frag_size; 2402 /* 2403 * No amsdu padding required for first frame of 2404 * continuation buffer 2405 */ 2406 if (!buf_info.last_buffer) { 2407 amsdu_pad = 0; 2408 continue; 2409 } 2410 } else { 2411 tot_msdu_len += frag_size; 2412 } 2413 2414 /* Will reach to this place in only two case: 2415 * 1. Single buffer MSDU 2416 * 2. Last buffer of MSDU in case of multiple buf MSDU 2417 */ 2418 2419 /* Check size of buffer if amsdu padding required */ 2420 amsdu_pad = tot_msdu_len & 0x3; 2421 amsdu_pad = amsdu_pad ? (4 - amsdu_pad) : 0; 2422 2423 /* Create placeholder if current buffer can 2424 * accommodate padding. 2425 */ 2426 if (amsdu_pad && (amsdu_pad <= pad_byte_pholder)) { 2427 char *frag_addr_temp; 2428 2429 qdf_nbuf_trim_add_frag_size(msdu_curr, 2430 frags_iter, 2431 amsdu_pad, 0); 2432 frag_addr_temp = (char *)qdf_nbuf_get_frag_addr(msdu_curr, 2433 frags_iter); 2434 frag_addr_temp = (frag_addr_temp + 2435 qdf_nbuf_get_frag_size_by_idx(msdu_curr, frags_iter)) - 2436 amsdu_pad; 2437 qdf_mem_zero(frag_addr_temp, amsdu_pad); 2438 amsdu_pad = 0; 2439 } 2440 2441 /* reset tot_msdu_len */ 2442 tot_msdu_len = 0; 2443 } 2444 msdu_curr = qdf_nbuf_next(msdu_curr); 2445 } 2446 2447 dp_rx_mon_fraglist_prepare(head_msdu, tail_msdu); 2448 2449 dp_rx_mon_dest_debug("%pK: head_msdu %pK head_msdu->len %u", 2450 soc, head_msdu, head_msdu->len); 2451 2452 mpdu_stitch_done: 2453 return head_msdu; 2454 2455 mpdu_stitch_fail: 2456 dp_rx_mon_dest_err("%pK: mpdu_stitch_fail head_msdu %pK", 2457 soc, head_msdu); 2458 return NULL; 2459 } 2460 #endif 2461 2462 #ifdef DP_RX_MON_MEM_FRAG 2463 qdf_nbuf_t dp_rx_mon_restitch_mpdu(struct dp_soc *soc, uint32_t mac_id, 2464 qdf_nbuf_t head_msdu, qdf_nbuf_t tail_msdu, 2465 struct cdp_mon_status *rs) 2466 { 2467 if (qdf_nbuf_get_nr_frags(head_msdu)) 2468 return dp_rx_mon_frag_restitch_mpdu_from_msdus(soc, mac_id, 2469 head_msdu, 2470 tail_msdu, rs); 2471 else 2472 return dp_rx_mon_restitch_mpdu_from_msdus(soc, mac_id, 2473 head_msdu, 2474 tail_msdu, rs); 2475 } 2476 #else 2477 qdf_nbuf_t dp_rx_mon_restitch_mpdu(struct dp_soc *soc, uint32_t mac_id, 2478 qdf_nbuf_t head_msdu, qdf_nbuf_t tail_msdu, 2479 struct cdp_mon_status *rs) 2480 { 2481 return dp_rx_mon_restitch_mpdu_from_msdus(soc, mac_id, head_msdu, 2482 tail_msdu, rs); 2483 } 2484 #endif 2485 2486 #ifdef DP_RX_MON_MEM_FRAG 2487 #if defined(WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG) ||\ 2488 defined(WLAN_SUPPORT_RX_FLOW_TAG) 2489 void dp_rx_mon_update_pf_tag_to_buf_headroom(struct dp_soc *soc, 2490 qdf_nbuf_t nbuf) 2491 { 2492 qdf_nbuf_t ext_list; 2493 2494 if (qdf_unlikely(!soc)) { 2495 dp_err("Soc[%pK] Null. Can't update pftag to nbuf headroom", 2496 soc); 2497 qdf_assert_always(0); 2498 } 2499 2500 if (!wlan_cfg_is_rx_mon_protocol_flow_tag_enabled(soc->wlan_cfg_ctx)) 2501 return; 2502 2503 if (qdf_unlikely(!nbuf)) 2504 return; 2505 2506 /* Return if it dint came from mon Path */ 2507 if (!qdf_nbuf_get_nr_frags(nbuf)) 2508 return; 2509 2510 /* Headroom must be double of PF_TAG_SIZE as we copy it 1stly to head */ 2511 if (qdf_unlikely(qdf_nbuf_headroom(nbuf) < (DP_RX_MON_TOT_PF_TAG_LEN * 2))) { 2512 dp_err("Nbuf avail Headroom[%d] < 2 * DP_RX_MON_PF_TAG_TOT_LEN[%lu]", 2513 qdf_nbuf_headroom(nbuf), DP_RX_MON_TOT_PF_TAG_LEN); 2514 return; 2515 } 2516 2517 qdf_nbuf_push_head(nbuf, DP_RX_MON_TOT_PF_TAG_LEN); 2518 qdf_mem_copy(qdf_nbuf_data(nbuf), qdf_nbuf_head(nbuf), 2519 DP_RX_MON_TOT_PF_TAG_LEN); 2520 qdf_nbuf_pull_head(nbuf, DP_RX_MON_TOT_PF_TAG_LEN); 2521 2522 ext_list = qdf_nbuf_get_ext_list(nbuf); 2523 while (ext_list) { 2524 /* Headroom must be double of PF_TAG_SIZE 2525 * as we copy it 1stly to head 2526 */ 2527 if (qdf_unlikely(qdf_nbuf_headroom(ext_list) < (DP_RX_MON_TOT_PF_TAG_LEN * 2))) { 2528 dp_err("Fraglist Nbuf avail Headroom[%d] < 2 * DP_RX_MON_PF_TAG_TOT_LEN[%lu]", 2529 qdf_nbuf_headroom(ext_list), 2530 DP_RX_MON_TOT_PF_TAG_LEN); 2531 ext_list = qdf_nbuf_queue_next(ext_list); 2532 continue; 2533 } 2534 qdf_nbuf_push_head(ext_list, DP_RX_MON_TOT_PF_TAG_LEN); 2535 qdf_mem_copy(qdf_nbuf_data(ext_list), qdf_nbuf_head(ext_list), 2536 DP_RX_MON_TOT_PF_TAG_LEN); 2537 qdf_nbuf_pull_head(ext_list, DP_RX_MON_TOT_PF_TAG_LEN); 2538 ext_list = qdf_nbuf_queue_next(ext_list); 2539 } 2540 } 2541 #endif 2542 #endif 2543 2544 #ifdef QCA_MONITOR_PKT_SUPPORT 2545 QDF_STATUS dp_mon_htt_dest_srng_setup(struct dp_soc *soc, 2546 struct dp_pdev *pdev, 2547 int mac_id, 2548 int mac_for_pdev) 2549 { 2550 QDF_STATUS status = QDF_STATUS_SUCCESS; 2551 2552 if (soc->wlan_cfg_ctx->rxdma1_enable) { 2553 status = htt_srng_setup(soc->htt_handle, mac_for_pdev, 2554 soc->rxdma_mon_buf_ring[mac_id] 2555 .hal_srng, 2556 RXDMA_MONITOR_BUF); 2557 2558 if (status != QDF_STATUS_SUCCESS) { 2559 dp_mon_err("Failed to send htt srng setup message for Rxdma mon buf ring"); 2560 return status; 2561 } 2562 2563 status = htt_srng_setup(soc->htt_handle, mac_for_pdev, 2564 soc->rxdma_mon_dst_ring[mac_id] 2565 .hal_srng, 2566 RXDMA_MONITOR_DST); 2567 2568 if (status != QDF_STATUS_SUCCESS) { 2569 dp_mon_err("Failed to send htt srng setup message for Rxdma mon dst ring"); 2570 return status; 2571 } 2572 2573 status = htt_srng_setup(soc->htt_handle, mac_for_pdev, 2574 soc->rxdma_mon_desc_ring[mac_id] 2575 .hal_srng, 2576 RXDMA_MONITOR_DESC); 2577 2578 if (status != QDF_STATUS_SUCCESS) { 2579 dp_mon_err("Failed to send htt srng message for Rxdma mon desc ring"); 2580 return status; 2581 } 2582 } 2583 2584 return status; 2585 } 2586 #endif /* QCA_MONITOR_PKT_SUPPORT */ 2587 2588 #ifdef QCA_MONITOR_PKT_SUPPORT 2589 void dp_mon_dest_rings_deinit(struct dp_pdev *pdev, int lmac_id) 2590 { 2591 struct dp_soc *soc = pdev->soc; 2592 2593 if (soc->wlan_cfg_ctx->rxdma1_enable) { 2594 dp_srng_deinit(soc, &soc->rxdma_mon_buf_ring[lmac_id], 2595 RXDMA_MONITOR_BUF, 0); 2596 dp_srng_deinit(soc, &soc->rxdma_mon_dst_ring[lmac_id], 2597 RXDMA_MONITOR_DST, 0); 2598 dp_srng_deinit(soc, &soc->rxdma_mon_desc_ring[lmac_id], 2599 RXDMA_MONITOR_DESC, 0); 2600 } 2601 } 2602 2603 void dp_mon_dest_rings_free(struct dp_pdev *pdev, int lmac_id) 2604 { 2605 struct dp_soc *soc = pdev->soc; 2606 2607 if (soc->wlan_cfg_ctx->rxdma1_enable) { 2608 dp_srng_free(soc, &soc->rxdma_mon_buf_ring[lmac_id]); 2609 dp_srng_free(soc, &soc->rxdma_mon_dst_ring[lmac_id]); 2610 dp_srng_free(soc, &soc->rxdma_mon_desc_ring[lmac_id]); 2611 } 2612 } 2613 2614 QDF_STATUS dp_mon_dest_rings_init(struct dp_pdev *pdev, int lmac_id) 2615 { 2616 struct dp_soc *soc = pdev->soc; 2617 2618 if (soc->wlan_cfg_ctx->rxdma1_enable) { 2619 if (dp_srng_init(soc, &soc->rxdma_mon_buf_ring[lmac_id], 2620 RXDMA_MONITOR_BUF, 0, lmac_id)) { 2621 dp_mon_err("%pK: " RNG_ERR "rxdma_mon_buf_ring ", soc); 2622 goto fail1; 2623 } 2624 2625 if (dp_srng_init(soc, &soc->rxdma_mon_dst_ring[lmac_id], 2626 RXDMA_MONITOR_DST, 0, lmac_id)) { 2627 dp_mon_err("%pK: " RNG_ERR "rxdma_mon_dst_ring", soc); 2628 goto fail1; 2629 } 2630 2631 if (dp_srng_init(soc, &soc->rxdma_mon_desc_ring[lmac_id], 2632 RXDMA_MONITOR_DESC, 0, lmac_id)) { 2633 dp_mon_err("%pK: " RNG_ERR "rxdma_mon_desc_ring", soc); 2634 goto fail1; 2635 } 2636 } 2637 return QDF_STATUS_SUCCESS; 2638 2639 fail1: 2640 return QDF_STATUS_E_NOMEM; 2641 } 2642 2643 QDF_STATUS dp_mon_dest_rings_alloc(struct dp_pdev *pdev, int lmac_id) 2644 { 2645 int entries; 2646 struct dp_soc *soc = pdev->soc; 2647 struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx = pdev->wlan_cfg_ctx; 2648 2649 if (soc->wlan_cfg_ctx->rxdma1_enable) { 2650 entries = wlan_cfg_get_dma_mon_buf_ring_size(pdev_cfg_ctx); 2651 if (dp_srng_alloc(soc, &soc->rxdma_mon_buf_ring[lmac_id], 2652 RXDMA_MONITOR_BUF, entries, 0)) { 2653 dp_mon_err("%pK: " RNG_ERR "rxdma_mon_buf_ring ", soc); 2654 goto fail1; 2655 } 2656 entries = wlan_cfg_get_dma_rx_mon_dest_ring_size(pdev_cfg_ctx); 2657 if (dp_srng_alloc(soc, &soc->rxdma_mon_dst_ring[lmac_id], 2658 RXDMA_MONITOR_DST, entries, 0)) { 2659 dp_mon_err("%pK: " RNG_ERR "rxdma_mon_dst_ring", soc); 2660 goto fail1; 2661 } 2662 entries = wlan_cfg_get_dma_mon_desc_ring_size(pdev_cfg_ctx); 2663 if (dp_srng_alloc(soc, &soc->rxdma_mon_desc_ring[lmac_id], 2664 RXDMA_MONITOR_DESC, entries, 0)) { 2665 dp_mon_err("%pK: " RNG_ERR "rxdma_mon_desc_ring", soc); 2666 goto fail1; 2667 } 2668 } 2669 return QDF_STATUS_SUCCESS; 2670 2671 fail1: 2672 return QDF_STATUS_E_NOMEM; 2673 } 2674 #endif 2675