1 /* 2 * Copyright (c) 2021, The Linux Foundation. All rights reserved. 3 * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. 4 * 5 * Permission to use, copy, modify, and/or distribute this software for any 6 * purpose with or without fee is hereby granted, provided that the above 7 * copyright notice and this permission notice appear in all copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 16 */ 17 #include <dp_types.h> 18 #include "dp_rx.h" 19 #include "dp_peer.h" 20 #include <dp_htt.h> 21 #include <dp_mon_filter.h> 22 #include <dp_mon.h> 23 #include <dp_rx_mon.h> 24 #include <dp_rx_mon_1.0.h> 25 #include <dp_mon_1.0.h> 26 #include <dp_mon_filter_1.0.h> 27 28 #include "htt_ppdu_stats.h" 29 #if defined(DP_CON_MON) 30 #ifndef REMOVE_PKT_LOG 31 #include <pktlog_ac_api.h> 32 #include <pktlog_ac.h> 33 #endif 34 #endif 35 #ifdef FEATURE_PERPKT_INFO 36 #include "dp_ratetable.h" 37 #endif 38 39 #ifdef WLAN_TX_PKT_CAPTURE_ENH 40 #include "dp_tx_capture.h" 41 #endif 42 43 extern QDF_STATUS dp_srng_alloc(struct dp_soc *soc, struct dp_srng *srng, 44 int ring_type, uint32_t num_entries, 45 bool cached); 46 extern void dp_srng_free(struct dp_soc *soc, struct dp_srng *srng); 47 extern QDF_STATUS dp_srng_init(struct dp_soc *soc, struct dp_srng *srng, 48 int ring_type, int ring_num, int mac_id); 49 extern void dp_srng_deinit(struct dp_soc *soc, struct dp_srng *srng, 50 int ring_type, int ring_num); 51 52 extern enum timer_yield_status 53 dp_should_timer_irq_yield(struct dp_soc *soc, uint32_t work_done, 54 uint64_t start_time); 55 56 #ifdef QCA_ENHANCED_STATS_SUPPORT 57 void 58 dp_mon_populate_ppdu_info_1_0(struct hal_rx_ppdu_info *hal_ppdu_info, 59 struct cdp_rx_indication_ppdu *ppdu) 60 { 61 ppdu->u.preamble = hal_ppdu_info->rx_status.preamble_type; 62 ppdu->u.bw = hal_ppdu_info->rx_status.bw; 63 ppdu->punc_bw = 0; 64 } 65 66 /* 67 * is_ppdu_txrx_capture_enabled() - API to check both pktlog and debug_sniffer 68 * modes are enabled or not. 69 * @dp_pdev: dp pdev handle. 70 * 71 * Return: bool 72 */ 73 static inline bool is_ppdu_txrx_capture_enabled(struct dp_pdev *pdev) 74 { 75 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 76 77 if (!mon_pdev->pktlog_ppdu_stats && !mon_pdev->tx_sniffer_enable && 78 !mon_pdev->mcopy_mode) 79 return true; 80 else 81 return false; 82 } 83 84 /** 85 * dp_mon_tx_enable_enhanced_stats_1_0() - Send HTT cmd to FW to enable stats 86 * @pdev: Datapath pdev handle 87 * 88 * Return: none 89 */ 90 static void dp_mon_tx_enable_enhanced_stats_1_0(struct dp_pdev *pdev) 91 { 92 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 93 94 if (is_ppdu_txrx_capture_enabled(pdev) && !mon_pdev->bpr_enable) { 95 dp_h2t_cfg_stats_msg_send(pdev, DP_PPDU_STATS_CFG_ENH_STATS, 96 pdev->pdev_id); 97 } else if (is_ppdu_txrx_capture_enabled(pdev) && 98 mon_pdev->bpr_enable) { 99 dp_h2t_cfg_stats_msg_send(pdev, 100 DP_PPDU_STATS_CFG_BPR_ENH, 101 pdev->pdev_id); 102 } 103 } 104 105 /** 106 * dp_mon_tx_disable_enhanced_stats_1_0() - Send HTT cmd to FW to disable stats 107 * @pdev: Datapath pdev handle 108 * 109 * Return: none 110 */ 111 static void dp_mon_tx_disable_enhanced_stats_1_0(struct dp_pdev *pdev) 112 { 113 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 114 115 if (is_ppdu_txrx_capture_enabled(pdev) && !mon_pdev->bpr_enable) { 116 dp_h2t_cfg_stats_msg_send(pdev, 0, pdev->pdev_id); 117 } else if (is_ppdu_txrx_capture_enabled(pdev) && mon_pdev->bpr_enable) { 118 dp_h2t_cfg_stats_msg_send(pdev, 119 DP_PPDU_STATS_CFG_BPR, 120 pdev->pdev_id); 121 } 122 } 123 #endif 124 125 #ifdef QCA_SUPPORT_FULL_MON 126 static QDF_STATUS 127 dp_config_full_mon_mode(struct cdp_soc_t *soc_handle, 128 uint8_t val) 129 { 130 struct dp_soc *soc = (struct dp_soc *)soc_handle; 131 struct dp_mon_soc *mon_soc = soc->monitor_soc; 132 133 mon_soc->full_mon_mode = val; 134 dp_cdp_err("Configure full monitor mode val: %d ", val); 135 136 return QDF_STATUS_SUCCESS; 137 } 138 139 static QDF_STATUS 140 dp_soc_config_full_mon_mode(struct cdp_pdev *cdp_pdev, uint8_t val) 141 { 142 struct dp_pdev *pdev = (struct dp_pdev *)cdp_pdev; 143 struct dp_soc *soc = pdev->soc; 144 QDF_STATUS status = QDF_STATUS_SUCCESS; 145 struct dp_mon_soc *mon_soc = soc->monitor_soc; 146 147 if (!mon_soc->full_mon_mode) 148 return QDF_STATUS_SUCCESS; 149 150 if ((htt_h2t_full_mon_cfg(soc->htt_handle, 151 pdev->pdev_id, 152 val)) != QDF_STATUS_SUCCESS) { 153 status = QDF_STATUS_E_FAILURE; 154 } 155 156 return status; 157 } 158 #else 159 static inline QDF_STATUS 160 dp_config_full_mon_mode(struct cdp_soc_t *soc_handle, 161 uint8_t val) 162 { 163 return 0; 164 } 165 166 static inline QDF_STATUS 167 dp_soc_config_full_mon_mode(struct cdp_pdev *cdp_pdev, 168 uint8_t val) 169 { 170 return 0; 171 } 172 #endif 173 174 #if !defined(DISABLE_MON_CONFIG) 175 void dp_flush_monitor_rings(struct dp_soc *soc) 176 { 177 struct dp_pdev *pdev = soc->pdev_list[0]; 178 hal_soc_handle_t hal_soc = soc->hal_soc; 179 uint32_t lmac_id; 180 uint32_t hp, tp; 181 int dp_intr_id; 182 int budget; 183 void *mon_dst_srng; 184 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 185 186 /* Reset monitor filters before reaping the ring*/ 187 qdf_spin_lock_bh(&mon_pdev->mon_lock); 188 dp_mon_filter_reset_mon_mode(pdev); 189 if (dp_mon_filter_update(pdev) != QDF_STATUS_SUCCESS) 190 dp_info("failed to reset monitor filters"); 191 qdf_spin_unlock_bh(&mon_pdev->mon_lock); 192 193 if (mon_pdev->mon_chan_band == REG_BAND_UNKNOWN) 194 return; 195 196 lmac_id = pdev->ch_band_lmac_id_mapping[mon_pdev->mon_chan_band]; 197 if (qdf_unlikely(lmac_id == DP_MON_INVALID_LMAC_ID)) 198 return; 199 200 dp_intr_id = soc->mon_intr_id_lmac_map[lmac_id]; 201 if (qdf_unlikely(dp_intr_id == DP_MON_INVALID_LMAC_ID)) 202 return; 203 204 mon_dst_srng = dp_rxdma_get_mon_dst_ring(pdev, lmac_id); 205 206 /* reap full ring */ 207 budget = wlan_cfg_get_dma_mon_stat_ring_size(pdev->wlan_cfg_ctx); 208 209 hal_get_sw_hptp(hal_soc, mon_dst_srng, &tp, &hp); 210 dp_info("Before reap: Monitor DST ring HP %u TP %u", hp, tp); 211 212 dp_mon_process(soc, &soc->intr_ctx[dp_intr_id], lmac_id, budget); 213 214 hal_get_sw_hptp(hal_soc, mon_dst_srng, &tp, &hp); 215 dp_info("After reap: Monitor DST ring HP %u TP %u", hp, tp); 216 } 217 218 static 219 void dp_mon_rings_deinit_1_0(struct dp_pdev *pdev) 220 { 221 int mac_id = 0; 222 struct dp_soc *soc = pdev->soc; 223 224 225 for (mac_id = 0; 226 mac_id < soc->wlan_cfg_ctx->num_rxdma_status_rings_per_pdev; 227 mac_id++) { 228 int lmac_id = dp_get_lmac_id_for_pdev_id(soc, mac_id, 229 pdev->pdev_id); 230 231 dp_srng_deinit(soc, &soc->rxdma_mon_status_ring[lmac_id], 232 RXDMA_MONITOR_STATUS, 0); 233 234 dp_mon_dest_rings_deinit(pdev, lmac_id); 235 } 236 } 237 238 static 239 void dp_mon_rings_free_1_0(struct dp_pdev *pdev) 240 { 241 int mac_id = 0; 242 struct dp_soc *soc = pdev->soc; 243 244 245 for (mac_id = 0; 246 mac_id < soc->wlan_cfg_ctx->num_rxdma_status_rings_per_pdev; 247 mac_id++) { 248 int lmac_id = dp_get_lmac_id_for_pdev_id(soc, mac_id, 249 pdev->pdev_id); 250 251 dp_srng_free(soc, &soc->rxdma_mon_status_ring[lmac_id]); 252 253 dp_mon_dest_rings_free(pdev, lmac_id); 254 } 255 } 256 257 static 258 QDF_STATUS dp_mon_rings_init_1_0(struct dp_pdev *pdev) 259 { 260 struct dp_soc *soc = pdev->soc; 261 int mac_id = 0; 262 263 for (mac_id = 0; 264 mac_id < soc->wlan_cfg_ctx->num_rxdma_status_rings_per_pdev; 265 mac_id++) { 266 int lmac_id = dp_get_lmac_id_for_pdev_id(soc, mac_id, 267 pdev->pdev_id); 268 269 if (dp_srng_init(soc, &soc->rxdma_mon_status_ring[lmac_id], 270 RXDMA_MONITOR_STATUS, 0, lmac_id)) { 271 dp_mon_err("%pK: " RNG_ERR "rxdma_mon_status_ring", 272 soc); 273 goto fail1; 274 } 275 276 if (dp_mon_dest_rings_init(pdev, lmac_id)) 277 goto fail1; 278 } 279 return QDF_STATUS_SUCCESS; 280 281 fail1: 282 dp_mon_rings_deinit_1_0(pdev); 283 return QDF_STATUS_E_NOMEM; 284 } 285 286 static 287 QDF_STATUS dp_mon_rings_alloc_1_0(struct dp_pdev *pdev) 288 { 289 struct dp_soc *soc = pdev->soc; 290 int mac_id = 0; 291 int entries; 292 struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx; 293 294 pdev_cfg_ctx = pdev->wlan_cfg_ctx; 295 296 for (mac_id = 0; 297 mac_id < soc->wlan_cfg_ctx->num_rxdma_status_rings_per_pdev; 298 mac_id++) { 299 int lmac_id = 300 dp_get_lmac_id_for_pdev_id(soc, mac_id, pdev->pdev_id); 301 entries = wlan_cfg_get_dma_mon_stat_ring_size(pdev_cfg_ctx); 302 if (dp_srng_alloc(soc, &soc->rxdma_mon_status_ring[lmac_id], 303 RXDMA_MONITOR_STATUS, entries, 0)) { 304 dp_mon_err("%pK: " RNG_ERR "rxdma_mon_status_ring", 305 soc); 306 goto fail1; 307 } 308 309 if (dp_mon_dest_rings_alloc(pdev, lmac_id)) 310 goto fail1; 311 } 312 return QDF_STATUS_SUCCESS; 313 314 fail1: 315 dp_mon_rings_free_1_0(pdev); 316 return QDF_STATUS_E_NOMEM; 317 } 318 #else 319 inline 320 void dp_flush_monitor_rings(struct dp_soc *soc) 321 { 322 } 323 324 static inline 325 void dp_mon_rings_deinit_1_0(struct dp_pdev *pdev) 326 { 327 } 328 329 static inline 330 void dp_mon_rings_free_1_0(struct dp_pdev *pdev) 331 { 332 } 333 334 static inline 335 QDF_STATUS dp_mon_rings_init_1_0(struct dp_pdev *pdev) 336 { 337 return QDF_STATUS_SUCCESS; 338 } 339 340 static inline 341 QDF_STATUS dp_mon_rings_alloc_1_0(struct dp_pdev *pdev) 342 { 343 return QDF_STATUS_SUCCESS; 344 } 345 346 #endif 347 348 #ifdef QCA_MONITOR_PKT_SUPPORT 349 QDF_STATUS dp_vdev_set_monitor_mode_buf_rings(struct dp_pdev *pdev) 350 { 351 uint32_t mac_id; 352 uint32_t mac_for_pdev; 353 struct dp_srng *mon_buf_ring; 354 uint32_t num_entries; 355 struct dp_soc *soc = pdev->soc; 356 357 /* If delay monitor replenish is disabled, allocate link descriptor 358 * monitor ring buffers of ring size. 359 */ 360 if (!wlan_cfg_is_delay_mon_replenish(soc->wlan_cfg_ctx)) { 361 dp_vdev_set_monitor_mode_rings(pdev, false); 362 } else { 363 for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) { 364 mac_for_pdev = 365 dp_get_lmac_id_for_pdev_id(pdev->soc, 366 mac_id, 367 pdev->pdev_id); 368 369 dp_rx_pdev_mon_buf_buffers_alloc(pdev, mac_for_pdev, 370 FALSE); 371 mon_buf_ring = 372 &pdev->soc->rxdma_mon_buf_ring[mac_for_pdev]; 373 /* 374 * Configure low interrupt threshld when monitor mode is 375 * configured. 376 */ 377 if (mon_buf_ring->hal_srng) { 378 num_entries = mon_buf_ring->num_entries; 379 hal_set_low_threshold(mon_buf_ring->hal_srng, 380 num_entries >> 3); 381 htt_srng_setup(pdev->soc->htt_handle, 382 pdev->pdev_id, 383 mon_buf_ring->hal_srng, 384 RXDMA_MONITOR_BUF); 385 } 386 } 387 } 388 return QDF_STATUS_SUCCESS; 389 } 390 #endif 391 392 #ifdef QCA_MONITOR_PKT_SUPPORT 393 QDF_STATUS dp_vdev_set_monitor_mode_rings(struct dp_pdev *pdev, 394 uint8_t delayed_replenish) 395 { 396 uint32_t mac_id; 397 uint32_t mac_for_pdev; 398 struct dp_soc *soc = pdev->soc; 399 QDF_STATUS status = QDF_STATUS_SUCCESS; 400 struct dp_srng *mon_buf_ring; 401 uint32_t num_entries; 402 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 403 404 405 /* If monitor rings are already initialized, return from here */ 406 if (mon_pdev->pdev_mon_init) 407 return QDF_STATUS_SUCCESS; 408 409 for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) { 410 mac_for_pdev = dp_get_lmac_id_for_pdev_id(pdev->soc, mac_id, 411 pdev->pdev_id); 412 413 /* Allocate sw rx descriptor pool for mon RxDMA buffer ring */ 414 status = dp_rx_pdev_mon_buf_desc_pool_alloc(pdev, mac_for_pdev); 415 if (!QDF_IS_STATUS_SUCCESS(status)) { 416 dp_err("%s: dp_rx_pdev_mon_buf_desc_pool_alloc() failed\n", 417 __func__); 418 goto fail0; 419 } 420 421 dp_rx_pdev_mon_buf_desc_pool_init(pdev, mac_for_pdev); 422 423 /* If monitor buffers are already allocated, 424 * do not allocate. 425 */ 426 status = dp_rx_pdev_mon_buf_buffers_alloc(pdev, mac_for_pdev, 427 delayed_replenish); 428 429 mon_buf_ring = &pdev->soc->rxdma_mon_buf_ring[mac_for_pdev]; 430 /* 431 * Configure low interrupt threshld when monitor mode is 432 * configured. 433 */ 434 if (mon_buf_ring->hal_srng) { 435 num_entries = mon_buf_ring->num_entries; 436 hal_set_low_threshold(mon_buf_ring->hal_srng, 437 num_entries >> 3); 438 htt_srng_setup(pdev->soc->htt_handle, 439 pdev->pdev_id, 440 mon_buf_ring->hal_srng, 441 RXDMA_MONITOR_BUF); 442 } 443 444 /* Allocate link descriptors for the mon link descriptor ring */ 445 status = dp_hw_link_desc_pool_banks_alloc(soc, mac_for_pdev); 446 if (!QDF_IS_STATUS_SUCCESS(status)) { 447 dp_err("%s: dp_hw_link_desc_pool_banks_alloc() failed", 448 __func__); 449 goto fail0; 450 } 451 dp_link_desc_ring_replenish(soc, mac_for_pdev); 452 453 htt_srng_setup(soc->htt_handle, pdev->pdev_id, 454 soc->rxdma_mon_desc_ring[mac_for_pdev].hal_srng, 455 RXDMA_MONITOR_DESC); 456 htt_srng_setup(soc->htt_handle, pdev->pdev_id, 457 soc->rxdma_mon_dst_ring[mac_for_pdev].hal_srng, 458 RXDMA_MONITOR_DST); 459 } 460 mon_pdev->pdev_mon_init = 1; 461 462 return QDF_STATUS_SUCCESS; 463 464 fail0: 465 return QDF_STATUS_E_FAILURE; 466 } 467 #endif 468 469 /* dp_mon_vdev_timer()- timer poll for interrupts 470 * 471 * @arg: SoC Handle 472 * 473 * Return: 474 * 475 */ 476 static void dp_mon_vdev_timer(void *arg) 477 { 478 struct dp_soc *soc = (struct dp_soc *)arg; 479 struct dp_pdev *pdev = soc->pdev_list[0]; 480 enum timer_yield_status yield = DP_TIMER_NO_YIELD; 481 uint32_t work_done = 0, total_work_done = 0; 482 int budget = 0xffff; 483 uint32_t remaining_quota = budget; 484 uint64_t start_time; 485 uint32_t lmac_id = DP_MON_INVALID_LMAC_ID; 486 uint32_t lmac_iter; 487 int max_mac_rings = wlan_cfg_get_num_mac_rings(pdev->wlan_cfg_ctx); 488 struct dp_mon_soc *mon_soc = soc->monitor_soc; 489 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 490 491 if (!qdf_atomic_read(&soc->cmn_init_done)) 492 return; 493 494 if (mon_pdev->mon_chan_band != REG_BAND_UNKNOWN) 495 lmac_id = pdev->ch_band_lmac_id_mapping[mon_pdev->mon_chan_band]; 496 497 start_time = qdf_get_log_timestamp(); 498 dp_update_num_mac_rings_for_dbs(soc, &max_mac_rings); 499 500 while (yield == DP_TIMER_NO_YIELD) { 501 for (lmac_iter = 0; lmac_iter < max_mac_rings; lmac_iter++) { 502 if (lmac_iter == lmac_id) 503 work_done = dp_monitor_process( 504 soc, NULL, 505 lmac_iter, remaining_quota); 506 else 507 work_done = 508 dp_monitor_drop_packets_for_mac(pdev, 509 lmac_iter, 510 remaining_quota); 511 if (work_done) { 512 budget -= work_done; 513 if (budget <= 0) { 514 yield = DP_TIMER_WORK_EXHAUST; 515 goto budget_done; 516 } 517 remaining_quota = budget; 518 total_work_done += work_done; 519 } 520 } 521 522 yield = dp_should_timer_irq_yield(soc, total_work_done, 523 start_time); 524 total_work_done = 0; 525 } 526 527 budget_done: 528 if (yield == DP_TIMER_WORK_EXHAUST || 529 yield == DP_TIMER_TIME_EXHAUST) 530 qdf_timer_mod(&mon_soc->mon_vdev_timer, 1); 531 else 532 qdf_timer_mod(&mon_soc->mon_vdev_timer, DP_INTR_POLL_TIMER_MS); 533 } 534 535 /* MCL specific functions */ 536 #if defined(DP_CON_MON) 537 /* 538 * dp_mon_reap_timer_handler()- timer to reap monitor rings 539 * reqd as we are not getting ppdu end interrupts 540 * @arg: SoC Handle 541 * 542 * Return: 543 * 544 */ 545 static void dp_mon_reap_timer_handler(void *arg) 546 { 547 struct dp_soc *soc = (struct dp_soc *)arg; 548 struct dp_mon_soc *mon_soc = soc->monitor_soc; 549 550 dp_service_mon_rings(soc, QCA_NAPI_BUDGET); 551 552 qdf_timer_mod(&mon_soc->mon_reap_timer, DP_INTR_POLL_TIMER_MS); 553 } 554 555 static void dp_mon_reap_timer_init(struct dp_soc *soc) 556 { 557 struct dp_mon_soc *mon_soc = soc->monitor_soc; 558 559 qdf_spinlock_create(&mon_soc->reap_timer_lock); 560 qdf_timer_init(soc->osdev, &mon_soc->mon_reap_timer, 561 dp_mon_reap_timer_handler, (void *)soc, 562 QDF_TIMER_TYPE_WAKE_APPS); 563 qdf_mem_zero(mon_soc->mon_reap_src_bitmap, 564 sizeof(mon_soc->mon_reap_src_bitmap)); 565 mon_soc->reap_timer_init = 1; 566 } 567 #else 568 static void dp_mon_reap_timer_init(struct dp_soc *soc) 569 { 570 } 571 #endif 572 573 static void dp_mon_reap_timer_deinit(struct dp_soc *soc) 574 { 575 struct dp_mon_soc *mon_soc = soc->monitor_soc; 576 if (mon_soc->reap_timer_init) { 577 mon_soc->reap_timer_init = 0; 578 qdf_timer_free(&mon_soc->mon_reap_timer); 579 qdf_spinlock_destroy(&mon_soc->reap_timer_lock); 580 } 581 } 582 583 /** 584 * dp_mon_reap_timer_start() - start reap timer of monitor status ring 585 * @soc: point to soc 586 * @source: trigger source 587 * 588 * If the source is CDP_MON_REAP_SOURCE_ANY, skip bit set, and start timer 589 * if any bit has been set in the bitmap; while for the other sources, set 590 * the bit and start timer if the bitmap is empty before that. 591 * 592 * Return: true if timer-start is performed, false otherwise. 593 */ 594 static bool 595 dp_mon_reap_timer_start(struct dp_soc *soc, enum cdp_mon_reap_source source) 596 { 597 struct dp_mon_soc *mon_soc = soc->monitor_soc; 598 bool do_start; 599 600 if (!mon_soc->reap_timer_init) 601 return false; 602 603 qdf_spin_lock_bh(&mon_soc->reap_timer_lock); 604 do_start = qdf_bitmap_empty(mon_soc->mon_reap_src_bitmap, 605 CDP_MON_REAP_SOURCE_NUM); 606 if (source == CDP_MON_REAP_SOURCE_ANY) 607 do_start = !do_start; 608 else 609 qdf_set_bit(source, mon_soc->mon_reap_src_bitmap); 610 qdf_spin_unlock_bh(&mon_soc->reap_timer_lock); 611 612 if (do_start) 613 qdf_timer_mod(&mon_soc->mon_reap_timer, DP_INTR_POLL_TIMER_MS); 614 615 return do_start; 616 } 617 618 /** 619 * dp_mon_reap_timer_stop() - stop reap timer of monitor status ring 620 * @soc: point to soc 621 * @source: trigger source 622 * 623 * If the source is CDP_MON_REAP_SOURCE_ANY, skip bit clear, and stop timer 624 * if any bit has been set in the bitmap; while for the other sources, clear 625 * the bit and stop the timer if the bitmap is empty after that. 626 * 627 * Return: true if timer-stop is performed, false otherwise. 628 */ 629 static bool 630 dp_mon_reap_timer_stop(struct dp_soc *soc, enum cdp_mon_reap_source source) 631 { 632 struct dp_mon_soc *mon_soc = soc->monitor_soc; 633 bool do_stop; 634 635 if (!mon_soc->reap_timer_init) 636 return false; 637 638 qdf_spin_lock_bh(&mon_soc->reap_timer_lock); 639 if (source != CDP_MON_REAP_SOURCE_ANY) 640 qdf_clear_bit(source, mon_soc->mon_reap_src_bitmap); 641 642 do_stop = qdf_bitmap_empty(mon_soc->mon_reap_src_bitmap, 643 CDP_MON_REAP_SOURCE_NUM); 644 if (source == CDP_MON_REAP_SOURCE_ANY) 645 do_stop = !do_stop; 646 qdf_spin_unlock_bh(&mon_soc->reap_timer_lock); 647 648 if (do_stop) 649 qdf_timer_sync_cancel(&mon_soc->mon_reap_timer); 650 651 return do_stop; 652 } 653 654 static void dp_mon_vdev_timer_init(struct dp_soc *soc) 655 { 656 struct dp_mon_soc *mon_soc = soc->monitor_soc; 657 658 qdf_timer_init(soc->osdev, &mon_soc->mon_vdev_timer, 659 dp_mon_vdev_timer, (void *)soc, 660 QDF_TIMER_TYPE_WAKE_APPS); 661 mon_soc->mon_vdev_timer_state |= MON_VDEV_TIMER_INIT; 662 } 663 664 static void dp_mon_vdev_timer_deinit(struct dp_soc *soc) 665 { 666 struct dp_mon_soc *mon_soc = soc->monitor_soc; 667 if (mon_soc->mon_vdev_timer_state & MON_VDEV_TIMER_INIT) { 668 qdf_timer_free(&mon_soc->mon_vdev_timer); 669 mon_soc->mon_vdev_timer_state = 0; 670 } 671 } 672 673 static void dp_mon_vdev_timer_start(struct dp_soc *soc) 674 { 675 struct dp_mon_soc *mon_soc = soc->monitor_soc; 676 if (mon_soc->mon_vdev_timer_state & MON_VDEV_TIMER_INIT) { 677 qdf_timer_mod(&mon_soc->mon_vdev_timer, DP_INTR_POLL_TIMER_MS); 678 mon_soc->mon_vdev_timer_state |= MON_VDEV_TIMER_RUNNING; 679 } 680 } 681 682 static bool dp_mon_vdev_timer_stop(struct dp_soc *soc) 683 { 684 struct dp_mon_soc *mon_soc = soc->monitor_soc; 685 if (mon_soc->mon_vdev_timer_state & MON_VDEV_TIMER_RUNNING) { 686 qdf_timer_sync_cancel(&mon_soc->mon_vdev_timer); 687 mon_soc->mon_vdev_timer_state &= ~MON_VDEV_TIMER_RUNNING; 688 return true; 689 } 690 691 return false; 692 } 693 694 static void dp_mon_neighbour_peer_add_ast(struct dp_pdev *pdev, 695 struct dp_peer *ta_peer, 696 uint8_t *mac_addr, 697 qdf_nbuf_t nbuf, 698 uint32_t flags) 699 { 700 struct dp_neighbour_peer *neighbour_peer = NULL; 701 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 702 struct dp_soc *soc = pdev->soc; 703 704 if (mon_pdev->neighbour_peers_added) { 705 qdf_mem_copy(mac_addr, 706 (qdf_nbuf_data(nbuf) + 707 QDF_MAC_ADDR_SIZE), 708 QDF_MAC_ADDR_SIZE); 709 710 qdf_spin_lock_bh(&mon_pdev->neighbour_peer_mutex); 711 TAILQ_FOREACH(neighbour_peer, 712 &mon_pdev->neighbour_peers_list, 713 neighbour_peer_list_elem) { 714 if (!qdf_mem_cmp(&neighbour_peer->neighbour_peers_macaddr, 715 mac_addr, 716 QDF_MAC_ADDR_SIZE)) { 717 dp_peer_add_ast(soc, 718 ta_peer, 719 mac_addr, 720 CDP_TXRX_AST_TYPE_WDS, 721 flags); 722 QDF_TRACE(QDF_MODULE_ID_DP, 723 QDF_TRACE_LEVEL_INFO, 724 "sa valid and nac roamed to wds"); 725 break; 726 } 727 } 728 qdf_spin_unlock_bh(&mon_pdev->neighbour_peer_mutex); 729 } 730 } 731 732 #if !defined(DISABLE_MON_CONFIG) 733 734 /** 735 * dp_mon_htt_srng_setup_1_0() - Prepare HTT messages for Monitor rings 736 * @soc: soc handle 737 * @pdev: physical device handle 738 * @mac_id: ring number 739 * @mac_for_pdev: mac_id 740 * 741 * Return: non-zero for failure, zero for success 742 */ 743 #if defined(DP_CON_MON) 744 static 745 QDF_STATUS dp_mon_htt_srng_setup_1_0(struct dp_soc *soc, 746 struct dp_pdev *pdev, 747 int mac_id, 748 int mac_for_pdev) 749 { 750 QDF_STATUS status = QDF_STATUS_SUCCESS; 751 752 status = dp_mon_htt_dest_srng_setup(soc, pdev, mac_id, mac_for_pdev); 753 if (status != QDF_STATUS_SUCCESS) 754 return status; 755 756 if (!soc->rxdma_mon_status_ring[mac_id].hal_srng) 757 return QDF_STATUS_SUCCESS; 758 759 status = htt_srng_setup(soc->htt_handle, mac_for_pdev, 760 soc->rxdma_mon_status_ring[mac_id] 761 .hal_srng, 762 RXDMA_MONITOR_STATUS); 763 764 if (status != QDF_STATUS_SUCCESS) { 765 dp_mon_err("Failed to send htt srng setup message for Rxdma mon status ring"); 766 return status; 767 } 768 769 return status; 770 } 771 #else 772 /* This is only for WIN */ 773 static 774 QDF_STATUS dp_mon_htt_srng_setup_1_0(struct dp_soc *soc, 775 struct dp_pdev *pdev, 776 int mac_id, 777 int mac_for_pdev) 778 { 779 QDF_STATUS status = QDF_STATUS_SUCCESS; 780 struct dp_mon_soc *mon_soc; 781 782 mon_soc = soc->monitor_soc; 783 if(!mon_soc) { 784 dp_mon_err("%pK: monitor SOC not initialized", soc); 785 return status; 786 } 787 788 if (mon_soc->monitor_mode_v2) 789 return status; 790 791 if (wlan_cfg_is_delay_mon_replenish(soc->wlan_cfg_ctx)) { 792 status = dp_mon_htt_dest_srng_setup(soc, pdev, 793 mac_id, mac_for_pdev); 794 if (status != QDF_STATUS_SUCCESS) 795 return status; 796 } 797 798 if (!soc->rxdma_mon_status_ring[mac_id].hal_srng) 799 return QDF_STATUS_SUCCESS; 800 801 status = htt_srng_setup(soc->htt_handle, mac_for_pdev, 802 soc->rxdma_mon_status_ring[mac_id] 803 .hal_srng, 804 RXDMA_MONITOR_STATUS); 805 806 if (status != QDF_STATUS_SUCCESS) { 807 dp_mon_err("Failed to send htt srng setup msg for Rxdma mon status ring"); 808 return status; 809 } 810 811 return status; 812 } 813 #endif 814 #endif 815 816 /* MCL specific functions */ 817 #if defined(DP_CON_MON) 818 819 /* 820 * dp_service_mon_rings()- service monitor rings 821 * @soc: soc dp handle 822 * @quota: number of ring entry that can be serviced 823 * 824 * Return: None 825 * 826 */ 827 void dp_service_mon_rings(struct dp_soc *soc, uint32_t quota) 828 { 829 int ring = 0, work_done; 830 struct dp_pdev *pdev = NULL; 831 832 for (ring = 0 ; ring < MAX_NUM_LMAC_HW; ring++) { 833 pdev = dp_get_pdev_for_lmac_id(soc, ring); 834 if (!pdev) 835 continue; 836 work_done = dp_mon_process(soc, NULL, ring, quota); 837 838 dp_rx_mon_dest_debug("Reaped %d descs from Monitor rings", 839 work_done); 840 } 841 } 842 #endif 843 844 /* 845 * dp_mon_peer_tx_init() – Initialize receive TID state in monitor peer 846 * @pdev: Datapath pdev 847 * @peer: Datapath peer 848 * 849 */ 850 static void 851 dp_mon_peer_tx_init(struct dp_pdev *pdev, struct dp_peer *peer) 852 { 853 if (!peer->monitor_peer) 854 return; 855 856 dp_peer_tid_queue_init(peer); 857 dp_peer_update_80211_hdr(peer->vdev, peer); 858 } 859 860 /* 861 * dp_mon_peer_tx_cleanup() – Deinitialize receive TID state in monitor peer 862 * @vdev: Datapath vdev 863 * @peer: Datapath peer 864 * 865 */ 866 static void 867 dp_mon_peer_tx_cleanup(struct dp_vdev *vdev, struct dp_peer *peer) 868 { 869 if (!peer->monitor_peer) 870 return; 871 872 dp_peer_tid_queue_cleanup(peer); 873 } 874 875 #ifdef QCA_SUPPORT_BPR 876 static QDF_STATUS 877 dp_set_bpr_enable_1_0(struct dp_pdev *pdev, int val) 878 { 879 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 880 881 switch (val) { 882 case CDP_BPR_DISABLE: 883 mon_pdev->bpr_enable = CDP_BPR_DISABLE; 884 if (!mon_pdev->pktlog_ppdu_stats && 885 !mon_pdev->enhanced_stats_en && 886 !mon_pdev->tx_sniffer_enable && !mon_pdev->mcopy_mode) { 887 dp_h2t_cfg_stats_msg_send(pdev, 0, pdev->pdev_id); 888 } else if (mon_pdev->enhanced_stats_en && 889 !mon_pdev->tx_sniffer_enable && 890 !mon_pdev->mcopy_mode && 891 !mon_pdev->pktlog_ppdu_stats) { 892 dp_h2t_cfg_stats_msg_send(pdev, 893 DP_PPDU_STATS_CFG_ENH_STATS, 894 pdev->pdev_id); 895 } 896 break; 897 case CDP_BPR_ENABLE: 898 mon_pdev->bpr_enable = CDP_BPR_ENABLE; 899 if (!mon_pdev->enhanced_stats_en && 900 !mon_pdev->tx_sniffer_enable && 901 !mon_pdev->mcopy_mode && !mon_pdev->pktlog_ppdu_stats) { 902 dp_h2t_cfg_stats_msg_send(pdev, 903 DP_PPDU_STATS_CFG_BPR, 904 pdev->pdev_id); 905 } else if (mon_pdev->enhanced_stats_en && 906 !mon_pdev->tx_sniffer_enable && 907 !mon_pdev->mcopy_mode && 908 !mon_pdev->pktlog_ppdu_stats) { 909 dp_h2t_cfg_stats_msg_send(pdev, 910 DP_PPDU_STATS_CFG_BPR_ENH, 911 pdev->pdev_id); 912 } else if (mon_pdev->pktlog_ppdu_stats) { 913 dp_h2t_cfg_stats_msg_send(pdev, 914 DP_PPDU_STATS_CFG_BPR_PKTLOG, 915 pdev->pdev_id); 916 } 917 break; 918 default: 919 break; 920 } 921 922 return QDF_STATUS_SUCCESS; 923 } 924 #endif 925 926 #ifdef QCA_ENHANCED_STATS_SUPPORT 927 #if defined(WDI_EVENT_ENABLE) && !defined(WLAN_TX_PKT_CAPTURE_ENH) 928 /** 929 * dp_ppdu_desc_notify_1_0 - Notify upper layer for PPDU indication via WDI 930 * 931 * @pdev: Datapath pdev handle 932 * @nbuf: Buffer to be shipped 933 * 934 * Return: void 935 */ 936 static void dp_ppdu_desc_notify_1_0(struct dp_pdev *pdev, qdf_nbuf_t nbuf) 937 { 938 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 939 struct cdp_tx_completion_ppdu *ppdu_desc = NULL; 940 941 ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(nbuf); 942 943 /** 944 * Deliver PPDU stats only for valid (acked) data 945 * frames if sniffer mode is not enabled. 946 * If sniffer mode is enabled, PPDU stats 947 * for all frames including mgmt/control 948 * frames should be delivered to upper layer 949 */ 950 if (mon_pdev->tx_sniffer_enable || mon_pdev->mcopy_mode) { 951 dp_wdi_event_handler(WDI_EVENT_TX_PPDU_DESC, 952 pdev->soc, 953 nbuf, HTT_INVALID_PEER, 954 WDI_NO_VAL, 955 pdev->pdev_id); 956 } else { 957 if (ppdu_desc->num_mpdu != 0 && 958 ppdu_desc->num_users != 0 && 959 ppdu_desc->frame_ctrl & 960 HTT_FRAMECTRL_DATATYPE) { 961 dp_wdi_event_handler(WDI_EVENT_TX_PPDU_DESC, 962 pdev->soc, 963 nbuf, HTT_INVALID_PEER, 964 WDI_NO_VAL, 965 pdev->pdev_id); 966 } else { 967 qdf_nbuf_free(nbuf); 968 } 969 } 970 } 971 #endif 972 973 /** 974 * dp_ppdu_stats_feat_enable_check_1_0 - Check if feature(s) is enabled to 975 * consume ppdu stats from FW 976 * 977 * @pdev: Datapath pdev handle 978 * 979 * Return: true if enabled, else return false 980 */ 981 static bool dp_ppdu_stats_feat_enable_check_1_0(struct dp_pdev *pdev) 982 { 983 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 984 985 if (!mon_pdev->enhanced_stats_en && !mon_pdev->tx_sniffer_enable && 986 !mon_pdev->mcopy_mode && !mon_pdev->bpr_enable) 987 return false; 988 else 989 return true; 990 } 991 992 /** 993 * dp_mon_tx_stats_update_1_0 - Update Tx stats from HTT PPDU completion path 994 * 995 * @monitor: Monitor peer 996 * @ppdu: Tx PPDU user completion info 997 */ 998 void 999 dp_mon_tx_stats_update_1_0(struct dp_mon_peer *mon_peer, 1000 struct cdp_tx_completion_ppdu_user *ppdu) 1001 { 1002 ppdu->punc_mode = NO_PUNCTURE; 1003 } 1004 #endif 1005 1006 #ifndef QCA_SUPPORT_FULL_MON 1007 /** 1008 * dp_rx_mon_process () - Core brain processing for monitor mode 1009 * 1010 * This API processes monitor destination ring followed by monitor status ring 1011 * Called from bottom half (tasklet/NET_RX_SOFTIRQ) 1012 * 1013 * @soc: datapath soc context 1014 * @int_ctx: interrupt context 1015 * @mac_id: mac_id on which interrupt is received 1016 * @quota: Number of status ring entry that can be serviced in one shot. 1017 * 1018 * @Return: Number of reaped status ring entries 1019 */ 1020 static inline uint32_t 1021 dp_rx_mon_process(struct dp_soc *soc, struct dp_intr *int_ctx, 1022 uint32_t mac_id, uint32_t quota) 1023 { 1024 return quota; 1025 } 1026 #endif 1027 1028 #ifndef DISABLE_MON_CONFIG 1029 static uint32_t 1030 dp_rx_mon_process_1_0(struct dp_soc *soc, struct dp_intr *int_ctx, 1031 uint32_t mac_id, uint32_t quota) 1032 { 1033 struct dp_mon_soc *mon_soc = soc->monitor_soc; 1034 1035 if (qdf_unlikely(mon_soc->full_mon_mode)) 1036 return dp_rx_mon_process(soc, int_ctx, mac_id, quota); 1037 1038 return dp_rx_mon_status_process(soc, int_ctx, mac_id, quota); 1039 } 1040 1041 #if defined(WDI_EVENT_ENABLE) &&\ 1042 (defined(QCA_ENHANCED_STATS_SUPPORT) || !defined(REMOVE_PKT_LOG)) 1043 static inline 1044 void dp_mon_ppdu_stats_handler_register(struct dp_mon_soc *mon_soc) 1045 { 1046 mon_soc->mon_ops->mon_ppdu_stats_ind_handler = 1047 dp_ppdu_stats_ind_handler; 1048 } 1049 #else 1050 static inline 1051 void dp_mon_ppdu_stats_handler_register(struct dp_mon_soc *mon_soc) 1052 { 1053 } 1054 #endif 1055 1056 static void dp_mon_register_intr_ops_1_0(struct dp_soc *soc) 1057 { 1058 struct dp_mon_soc *mon_soc = soc->monitor_soc; 1059 1060 mon_soc->mon_rx_process = dp_rx_mon_process_1_0; 1061 dp_mon_ppdu_stats_handler_register(mon_soc); 1062 } 1063 #endif 1064 1065 /** 1066 * dp_mon_register_feature_ops_1_0() - register feature ops 1067 * 1068 * @soc: dp soc context 1069 * 1070 * @return: void 1071 */ 1072 static void 1073 dp_mon_register_feature_ops_1_0(struct dp_soc *soc) 1074 { 1075 struct dp_mon_ops *mon_ops = dp_mon_ops_get(soc); 1076 1077 if (!mon_ops) { 1078 dp_err("mon_ops is NULL, feature ops registration failed"); 1079 return; 1080 } 1081 1082 mon_ops->mon_config_debug_sniffer = dp_config_debug_sniffer; 1083 mon_ops->mon_peer_tx_init = dp_mon_peer_tx_init; 1084 mon_ops->mon_peer_tx_cleanup = dp_mon_peer_tx_cleanup; 1085 mon_ops->mon_htt_ppdu_stats_attach = dp_htt_ppdu_stats_attach; 1086 mon_ops->mon_htt_ppdu_stats_detach = dp_htt_ppdu_stats_detach; 1087 mon_ops->mon_print_pdev_rx_mon_stats = dp_print_pdev_rx_mon_stats; 1088 mon_ops->mon_set_bsscolor = dp_mon_set_bsscolor; 1089 mon_ops->mon_pdev_get_filter_ucast_data = 1090 dp_pdev_get_filter_ucast_data; 1091 mon_ops->mon_pdev_get_filter_mcast_data = 1092 dp_pdev_get_filter_mcast_data; 1093 mon_ops->mon_pdev_get_filter_non_data = dp_pdev_get_filter_non_data; 1094 mon_ops->mon_neighbour_peer_add_ast = dp_mon_neighbour_peer_add_ast; 1095 #ifdef WLAN_TX_PKT_CAPTURE_ENH 1096 mon_ops->mon_peer_tid_peer_id_update = dp_peer_tid_peer_id_update_1_0; 1097 mon_ops->mon_tx_capture_debugfs_init = dp_tx_capture_debugfs_init_1_0; 1098 mon_ops->mon_tx_add_to_comp_queue = dp_tx_add_to_comp_queue_1_0; 1099 mon_ops->mon_print_pdev_tx_capture_stats = 1100 dp_print_pdev_tx_capture_stats_1_0; 1101 mon_ops->mon_config_enh_tx_capture = dp_config_enh_tx_capture_1_0; 1102 mon_ops->mon_tx_peer_filter = dp_peer_set_tx_capture_enabled_1_0; 1103 mon_ops->mon_peer_tx_capture_get_stats = dp_get_peer_tx_capture_stats; 1104 mon_ops->mon_pdev_tx_capture_get_stats = dp_get_pdev_tx_capture_stats; 1105 #endif 1106 #if (defined(WIFI_MONITOR_SUPPORT) && !defined(WLAN_TX_PKT_CAPTURE_ENH)) 1107 mon_ops->mon_peer_tid_peer_id_update = NULL; 1108 mon_ops->mon_tx_capture_debugfs_init = NULL; 1109 mon_ops->mon_tx_add_to_comp_queue = NULL; 1110 mon_ops->mon_print_pdev_tx_capture_stats = NULL; 1111 mon_ops->mon_config_enh_tx_capture = NULL; 1112 mon_ops->mon_tx_peer_filter = NULL; 1113 #endif 1114 #ifdef WLAN_RX_PKT_CAPTURE_ENH 1115 mon_ops->mon_config_enh_rx_capture = dp_config_enh_rx_capture; 1116 #endif 1117 #ifdef QCA_SUPPORT_BPR 1118 mon_ops->mon_set_bpr_enable = dp_set_bpr_enable_1_0; 1119 #endif 1120 #ifdef ATH_SUPPORT_NAC 1121 mon_ops->mon_set_filter_neigh_peers = dp_set_filter_neigh_peers; 1122 #endif 1123 #ifdef WLAN_ATF_ENABLE 1124 mon_ops->mon_set_atf_stats_enable = dp_set_atf_stats_enable; 1125 #endif 1126 #ifdef FEATURE_NAC_RSSI 1127 mon_ops->mon_filter_neighbour_peer = dp_filter_neighbour_peer; 1128 #endif 1129 #ifdef QCA_MCOPY_SUPPORT 1130 mon_ops->mon_filter_setup_mcopy_mode = 1131 dp_mon_filter_setup_mcopy_mode_1_0; 1132 mon_ops->mon_filter_reset_mcopy_mode = 1133 dp_mon_filter_reset_mcopy_mode_1_0; 1134 mon_ops->mon_mcopy_check_deliver = dp_mcopy_check_deliver; 1135 #endif 1136 #ifdef QCA_ENHANCED_STATS_SUPPORT 1137 mon_ops->mon_filter_setup_enhanced_stats = 1138 dp_mon_filter_setup_enhanced_stats_1_0; 1139 mon_ops->mon_filter_reset_enhanced_stats = 1140 dp_mon_filter_reset_enhanced_stats_1_0; 1141 mon_ops->mon_tx_enable_enhanced_stats = 1142 dp_mon_tx_enable_enhanced_stats_1_0; 1143 mon_ops->mon_tx_disable_enhanced_stats = 1144 dp_mon_tx_disable_enhanced_stats_1_0; 1145 mon_ops->mon_ppdu_stats_feat_enable_check = 1146 dp_ppdu_stats_feat_enable_check_1_0; 1147 #ifndef WLAN_TX_PKT_CAPTURE_ENH 1148 mon_ops->mon_ppdu_desc_deliver = dp_ppdu_desc_deliver; 1149 #ifdef WDI_EVENT_ENABLE 1150 mon_ops->mon_ppdu_desc_notify = dp_ppdu_desc_notify_1_0; 1151 #endif 1152 #else 1153 mon_ops->mon_ppdu_desc_deliver = dp_ppdu_desc_deliver_1_0; 1154 #endif 1155 mon_ops->mon_tx_stats_update = dp_mon_tx_stats_update_1_0; 1156 #endif 1157 #if defined(ATH_SUPPORT_NAC_RSSI) || defined(ATH_SUPPORT_NAC) 1158 mon_ops->mon_filter_setup_smart_monitor = 1159 dp_mon_filter_setup_smart_monitor_1_0; 1160 mon_ops->mon_filter_reset_smart_monitor = 1161 dp_mon_filter_reset_smart_monitor_1_0; 1162 #endif 1163 mon_ops->mon_filter_set_reset_mon_mac_filter = 1164 dp_mon_set_reset_mon_mac_filter_1_0; 1165 #ifdef WLAN_RX_PKT_CAPTURE_ENH 1166 mon_ops->mon_filter_setup_rx_enh_capture = 1167 dp_mon_filter_setup_rx_enh_capture_1_0; 1168 #endif 1169 #ifdef WDI_EVENT_ENABLE 1170 mon_ops->mon_set_pktlog_wifi3 = dp_set_pktlog_wifi3; 1171 mon_ops->mon_filter_setup_rx_pkt_log_full = 1172 dp_mon_filter_setup_rx_pkt_log_full_1_0; 1173 mon_ops->mon_filter_reset_rx_pkt_log_full = 1174 dp_mon_filter_reset_rx_pkt_log_full_1_0; 1175 mon_ops->mon_filter_setup_rx_pkt_log_lite = 1176 dp_mon_filter_setup_rx_pkt_log_lite_1_0; 1177 mon_ops->mon_filter_reset_rx_pkt_log_lite = 1178 dp_mon_filter_reset_rx_pkt_log_lite_1_0; 1179 mon_ops->mon_filter_setup_rx_pkt_log_cbf = 1180 dp_mon_filter_setup_rx_pkt_log_cbf_1_0; 1181 mon_ops->mon_filter_reset_rx_pkt_log_cbf = 1182 dp_mon_filter_reset_rx_pktlog_cbf_1_0; 1183 #ifdef BE_PKTLOG_SUPPORT 1184 mon_ops->mon_filter_setup_pktlog_hybrid = NULL; 1185 mon_ops->mon_filter_reset_pktlog_hybrid = NULL; 1186 #endif 1187 #endif 1188 #if defined(DP_CON_MON) && !defined(REMOVE_PKT_LOG) 1189 mon_ops->mon_pktlogmod_exit = dp_pktlogmod_exit; 1190 #endif 1191 mon_ops->rx_hdr_length_set = NULL; 1192 mon_ops->rx_packet_length_set = NULL; 1193 mon_ops->rx_mon_enable = NULL; 1194 mon_ops->rx_wmask_subscribe = NULL; 1195 mon_ops->rx_enable_mpdu_logging = NULL; 1196 mon_ops->rx_enable_fpmo = NULL; 1197 mon_ops->mon_neighbour_peers_detach = dp_neighbour_peers_detach; 1198 mon_ops->mon_vdev_set_monitor_mode_buf_rings = 1199 dp_vdev_set_monitor_mode_buf_rings; 1200 mon_ops->mon_vdev_set_monitor_mode_rings = 1201 dp_vdev_set_monitor_mode_rings; 1202 #ifdef QCA_ENHANCED_STATS_SUPPORT 1203 mon_ops->mon_rx_stats_update = NULL; 1204 mon_ops->mon_rx_populate_ppdu_usr_info = NULL; 1205 mon_ops->mon_rx_populate_ppdu_info = dp_mon_populate_ppdu_info_1_0; 1206 #endif 1207 #ifdef QCA_UNDECODED_METADATA_SUPPORT 1208 mon_ops->mon_config_undecoded_metadata_capture = 1209 dp_mon_config_undecoded_metadata_capture; 1210 mon_ops->mon_filter_setup_undecoded_metadata_capture = 1211 dp_mon_filter_setup_undecoded_metadata_capture_1_0; 1212 mon_ops->mon_filter_reset_undecoded_metadata_capture = 1213 dp_mon_filter_reset_undecoded_metadata_capture_1_0; 1214 #endif 1215 mon_ops->mon_rx_print_advanced_stats = NULL; 1216 mon_ops->mon_mac_filter_set = dp_mon_mac_filter_set; 1217 } 1218 1219 struct dp_mon_ops monitor_ops_1_0 = { 1220 .mon_soc_cfg_init = dp_mon_soc_cfg_init, 1221 .mon_pdev_alloc = NULL, 1222 .mon_pdev_free = NULL, 1223 .mon_pdev_attach = dp_mon_pdev_attach, 1224 .mon_pdev_detach = dp_mon_pdev_detach, 1225 .mon_pdev_init = dp_mon_pdev_init, 1226 .mon_pdev_deinit = dp_mon_pdev_deinit, 1227 .mon_vdev_attach = dp_mon_vdev_attach, 1228 .mon_vdev_detach = dp_mon_vdev_detach, 1229 .mon_peer_attach = dp_mon_peer_attach, 1230 .mon_peer_detach = dp_mon_peer_detach, 1231 .mon_peer_get_peerstats_ctx = dp_mon_peer_get_peerstats_ctx, 1232 .mon_peer_reset_stats = dp_mon_peer_reset_stats, 1233 .mon_peer_get_stats = dp_mon_peer_get_stats, 1234 .mon_invalid_peer_update_pdev_stats = 1235 dp_mon_invalid_peer_update_pdev_stats, 1236 .mon_peer_get_stats_param = dp_mon_peer_get_stats_param, 1237 .mon_flush_rings = dp_flush_monitor_rings, 1238 #if !defined(DISABLE_MON_CONFIG) 1239 .mon_pdev_htt_srng_setup = dp_mon_htt_srng_setup_1_0, 1240 #endif 1241 #if defined(DP_CON_MON) 1242 .mon_service_rings = dp_service_mon_rings, 1243 #endif 1244 #ifndef DISABLE_MON_CONFIG 1245 .mon_rx_process = NULL, 1246 #endif 1247 #if !defined(DISABLE_MON_CONFIG) && defined(MON_ENABLE_DROP_FOR_MAC) 1248 .mon_drop_packets_for_mac = dp_mon_drop_packets_for_mac, 1249 #endif 1250 .mon_vdev_timer_init = dp_mon_vdev_timer_init, 1251 .mon_vdev_timer_start = dp_mon_vdev_timer_start, 1252 .mon_vdev_timer_stop = dp_mon_vdev_timer_stop, 1253 .mon_vdev_timer_deinit = dp_mon_vdev_timer_deinit, 1254 .mon_reap_timer_init = dp_mon_reap_timer_init, 1255 .mon_reap_timer_start = dp_mon_reap_timer_start, 1256 .mon_reap_timer_stop = dp_mon_reap_timer_stop, 1257 .mon_reap_timer_deinit = dp_mon_reap_timer_deinit, 1258 .mon_filter_setup_rx_mon_mode = dp_mon_filter_setup_mon_mode_1_0, 1259 .mon_filter_reset_rx_mon_mode = dp_mon_filter_reset_mon_mode_1_0, 1260 .mon_filter_setup_tx_mon_mode = NULL, 1261 .mon_filter_reset_tx_mon_mode = NULL, 1262 .rx_mon_filter_update = dp_mon_filter_update_1_0, 1263 .tx_mon_filter_update = NULL, 1264 .set_mon_mode_buf_rings_tx = NULL, 1265 .rx_mon_desc_pool_init = dp_rx_pdev_mon_desc_pool_init, 1266 .rx_mon_desc_pool_deinit = dp_rx_pdev_mon_desc_pool_deinit, 1267 .rx_mon_desc_pool_alloc = dp_rx_pdev_mon_desc_pool_alloc, 1268 .rx_mon_desc_pool_free = dp_rx_pdev_mon_desc_pool_free, 1269 .rx_mon_buffers_alloc = dp_rx_pdev_mon_buffers_alloc, 1270 .rx_mon_buffers_free = dp_rx_pdev_mon_buffers_free, 1271 .tx_mon_desc_pool_init = NULL, 1272 .tx_mon_desc_pool_deinit = NULL, 1273 .tx_mon_desc_pool_alloc = NULL, 1274 .tx_mon_desc_pool_free = NULL, 1275 .tx_mon_filter_alloc = NULL, 1276 .mon_rings_alloc = dp_mon_rings_alloc_1_0, 1277 .mon_rings_free = dp_mon_rings_free_1_0, 1278 .mon_rings_init = dp_mon_rings_init_1_0, 1279 .mon_rings_deinit = dp_mon_rings_deinit_1_0, 1280 #if !defined(DISABLE_MON_CONFIG) 1281 .mon_register_intr_ops = dp_mon_register_intr_ops_1_0, 1282 #endif 1283 .mon_register_feature_ops = dp_mon_register_feature_ops_1_0, 1284 #ifdef WLAN_TX_PKT_CAPTURE_ENH 1285 .mon_tx_ppdu_stats_attach = dp_tx_ppdu_stats_attach_1_0, 1286 .mon_tx_ppdu_stats_detach = dp_tx_ppdu_stats_detach_1_0, 1287 .mon_peer_tx_capture_filter_check = dp_peer_tx_capture_filter_check_1_0, 1288 #endif 1289 #if (defined(WIFI_MONITOR_SUPPORT) && !defined(WLAN_TX_PKT_CAPTURE_ENH)) 1290 .mon_tx_ppdu_stats_attach = NULL, 1291 .mon_tx_ppdu_stats_detach = NULL, 1292 .mon_peer_tx_capture_filter_check = NULL, 1293 #endif 1294 .mon_lite_mon_alloc = NULL, 1295 .mon_lite_mon_dealloc = NULL, 1296 .mon_lite_mon_vdev_delete = NULL, 1297 .mon_lite_mon_disable_rx = NULL, 1298 }; 1299 1300 struct cdp_mon_ops dp_ops_mon_1_0 = { 1301 .txrx_reset_monitor_mode = dp_reset_monitor_mode, 1302 /* Added support for HK advance filter */ 1303 .txrx_set_advance_monitor_filter = dp_pdev_set_advance_monitor_filter, 1304 .txrx_deliver_tx_mgmt = dp_deliver_tx_mgmt, 1305 .config_full_mon_mode = dp_config_full_mon_mode, 1306 .soc_config_full_mon_mode = dp_soc_config_full_mon_mode, 1307 .get_mon_pdev_rx_stats = dp_pdev_get_rx_mon_stats, 1308 .txrx_enable_mon_reap_timer = dp_enable_mon_reap_timer, 1309 #ifdef QCA_SUPPORT_LITE_MONITOR 1310 .txrx_set_lite_mon_config = NULL, 1311 .txrx_get_lite_mon_config = NULL, 1312 .txrx_set_lite_mon_peer_config = NULL, 1313 .txrx_get_lite_mon_peer_config = NULL, 1314 .txrx_is_lite_mon_enabled = NULL, 1315 #endif 1316 .txrx_set_mon_pdev_params_rssi_dbm_conv = 1317 dp_mon_pdev_params_rssi_dbm_conv, 1318 }; 1319 1320 #ifdef QCA_MONITOR_OPS_PER_SOC_SUPPORT 1321 void dp_mon_ops_register_1_0(struct dp_mon_soc *mon_soc) 1322 { 1323 struct dp_mon_ops *mon_ops = NULL; 1324 1325 if (mon_soc->mon_ops) { 1326 dp_mon_err("monitor ops is allocated"); 1327 return; 1328 } 1329 1330 mon_ops = qdf_mem_malloc(sizeof(struct dp_mon_ops)); 1331 if (!mon_ops) { 1332 dp_mon_err("Failed to allocate memory for mon ops"); 1333 return; 1334 } 1335 1336 qdf_mem_copy(mon_ops, &monitor_ops_1_0, sizeof(struct dp_mon_ops)); 1337 mon_soc->mon_ops = mon_ops; 1338 } 1339 1340 void dp_mon_cdp_ops_register_1_0(struct cdp_ops *ops) 1341 { 1342 struct cdp_mon_ops *mon_ops = NULL; 1343 1344 if (ops->mon_ops) { 1345 dp_mon_err("cdp monitor ops is allocated"); 1346 return; 1347 } 1348 1349 mon_ops = qdf_mem_malloc(sizeof(struct cdp_mon_ops)); 1350 if (!mon_ops) { 1351 dp_mon_err("Failed to allocate memory for cdp mon ops"); 1352 return; 1353 } 1354 1355 qdf_mem_copy(mon_ops, &dp_ops_mon_1_0, sizeof(struct cdp_mon_ops)); 1356 ops->mon_ops = mon_ops; 1357 } 1358 #else 1359 void dp_mon_ops_register_1_0(struct dp_mon_soc *mon_soc) 1360 { 1361 mon_soc->mon_ops = &monitor_ops_1_0; 1362 } 1363 1364 void dp_mon_cdp_ops_register_1_0(struct cdp_ops *ops) 1365 { 1366 ops->mon_ops = &dp_ops_mon_1_0; 1367 } 1368 #endif 1369