1 /* 2 * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved. 3 * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved. 4 * 5 * Permission to use, copy, modify, and/or distribute this software for any 6 * purpose with or without fee is hereby granted, provided that the above 7 * copyright notice and this permission notice appear in all copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 16 */ 17 #include <dp_types.h> 18 #include "dp_rx.h" 19 #include "dp_tx.h" 20 #include "dp_peer.h" 21 #include <dp_htt.h> 22 #include <dp_mon_filter.h> 23 #include <dp_htt.h> 24 #include <dp_mon.h> 25 #include <dp_rx_mon.h> 26 #include <dp_internal.h> 27 #include "htt_ppdu_stats.h" 28 #include "dp_cal_client_api.h" 29 #if defined(DP_CON_MON) 30 #ifndef REMOVE_PKT_LOG 31 #include <pktlog_ac_api.h> 32 #include <pktlog_ac.h> 33 #endif 34 #endif 35 #ifdef FEATURE_PERPKT_INFO 36 #include "dp_ratetable.h" 37 #endif 38 #ifdef QCA_SUPPORT_LITE_MONITOR 39 #include "dp_lite_mon.h" 40 #endif 41 #include "dp_mon_1.0.h" 42 #ifdef WLAN_FEATURE_LOCAL_PKT_CAPTURE 43 #include "dp_mon_2.0.h" 44 #include "dp_mon_filter_2.0.h" 45 #endif 46 47 #define DP_INTR_POLL_TIMER_MS 5 48 #define INVALID_FREE_BUFF 0xffffffff 49 50 #ifdef WLAN_RX_PKT_CAPTURE_ENH 51 #include "dp_rx_mon_feature.h" 52 #endif /* WLAN_RX_PKT_CAPTURE_ENH */ 53 54 #ifdef QCA_UNDECODED_METADATA_SUPPORT 55 #define MAX_STRING_LEN_PER_FIELD 6 56 #define DP_UNDECODED_ERR_LENGTH (MAX_STRING_LEN_PER_FIELD * CDP_PHYRX_ERR_MAX) 57 #endif 58 59 #ifdef QCA_MCOPY_SUPPORT 60 static inline void 61 dp_pdev_disable_mcopy_code(struct dp_pdev *pdev) 62 { 63 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 64 65 mon_pdev->mcopy_mode = M_COPY_DISABLED; 66 mon_pdev->mvdev = NULL; 67 } 68 69 static inline void 70 dp_reset_mcopy_mode(struct dp_pdev *pdev) 71 { 72 QDF_STATUS status = QDF_STATUS_SUCCESS; 73 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 74 struct cdp_mon_ops *cdp_ops; 75 76 if (mon_pdev->mcopy_mode) { 77 cdp_ops = dp_mon_cdp_ops_get(pdev->soc); 78 if (cdp_ops && cdp_ops->config_full_mon_mode) 79 cdp_ops->soc_config_full_mon_mode((struct cdp_pdev *)pdev, 80 DP_FULL_MON_ENABLE); 81 dp_pdev_disable_mcopy_code(pdev); 82 dp_mon_filter_reset_mcopy_mode(pdev); 83 status = dp_mon_filter_update(pdev); 84 if (status != QDF_STATUS_SUCCESS) { 85 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 86 FL("Failed to reset AM copy mode filters")); 87 } 88 mon_pdev->monitor_configured = false; 89 } 90 } 91 92 static QDF_STATUS 93 dp_config_mcopy_mode(struct dp_pdev *pdev, int val) 94 { 95 QDF_STATUS status = QDF_STATUS_SUCCESS; 96 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 97 struct dp_mon_ops *mon_ops; 98 struct cdp_mon_ops *cdp_ops; 99 100 if (mon_pdev->mvdev) 101 return QDF_STATUS_E_RESOURCES; 102 103 mon_pdev->mcopy_mode = val; 104 mon_pdev->tx_sniffer_enable = 0; 105 mon_pdev->monitor_configured = true; 106 107 mon_ops = dp_mon_ops_get(pdev->soc); 108 if (!wlan_cfg_is_delay_mon_replenish(pdev->soc->wlan_cfg_ctx)) { 109 if (mon_ops && mon_ops->mon_vdev_set_monitor_mode_rings) 110 mon_ops->mon_vdev_set_monitor_mode_rings(pdev, true); 111 } 112 113 /* 114 * Setup the M copy mode filter. 115 */ 116 cdp_ops = dp_mon_cdp_ops_get(pdev->soc); 117 if (cdp_ops && cdp_ops->config_full_mon_mode) 118 cdp_ops->soc_config_full_mon_mode((struct cdp_pdev *)pdev, 119 DP_FULL_MON_ENABLE); 120 dp_mon_filter_setup_mcopy_mode(pdev); 121 status = dp_mon_filter_update(pdev); 122 if (status != QDF_STATUS_SUCCESS) { 123 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 124 FL("Failed to set M_copy mode filters")); 125 dp_mon_filter_reset_mcopy_mode(pdev); 126 dp_pdev_disable_mcopy_code(pdev); 127 return status; 128 } 129 130 if (!mon_pdev->pktlog_ppdu_stats) 131 dp_h2t_cfg_stats_msg_send(pdev, 132 DP_PPDU_STATS_CFG_SNIFFER, 133 pdev->pdev_id); 134 135 return status; 136 } 137 #else 138 static inline void 139 dp_reset_mcopy_mode(struct dp_pdev *pdev) 140 { 141 } 142 143 static inline QDF_STATUS 144 dp_config_mcopy_mode(struct dp_pdev *pdev, int val) 145 { 146 return QDF_STATUS_E_INVAL; 147 } 148 #endif /* QCA_MCOPY_SUPPORT */ 149 150 #ifdef QCA_UNDECODED_METADATA_SUPPORT 151 static QDF_STATUS 152 dp_reset_undecoded_metadata_capture(struct dp_pdev *pdev) 153 { 154 QDF_STATUS status = QDF_STATUS_SUCCESS; 155 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 156 157 if (mon_pdev->undecoded_metadata_capture) { 158 dp_mon_filter_reset_undecoded_metadata_mode(pdev); 159 status = dp_mon_filter_update(pdev); 160 if (status != QDF_STATUS_SUCCESS) { 161 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 162 FL("Undecoded capture filter reset failed")); 163 } 164 } 165 mon_pdev->undecoded_metadata_capture = 0; 166 return status; 167 } 168 169 static QDF_STATUS 170 dp_enable_undecoded_metadata_capture(struct dp_pdev *pdev, int val) 171 { 172 QDF_STATUS status = QDF_STATUS_SUCCESS; 173 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 174 175 if (!mon_pdev->mvdev) { 176 qdf_err("monitor_pdev is NULL"); 177 return QDF_STATUS_E_RESOURCES; 178 } 179 180 mon_pdev->undecoded_metadata_capture = val; 181 mon_pdev->monitor_configured = true; 182 183 184 /* Setup the undecoded metadata capture mode filter. */ 185 dp_mon_filter_setup_undecoded_metadata_mode(pdev); 186 status = dp_mon_filter_update(pdev); 187 if (status != QDF_STATUS_SUCCESS) { 188 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 189 FL("Failed to set Undecoded capture filters")); 190 dp_mon_filter_reset_undecoded_metadata_mode(pdev); 191 return status; 192 } 193 194 return status; 195 } 196 #else 197 static inline QDF_STATUS 198 dp_reset_undecoded_metadata_capture(struct dp_pdev *pdev) 199 { 200 return QDF_STATUS_E_INVAL; 201 } 202 203 static inline QDF_STATUS 204 dp_enable_undecoded_metadata_capture(struct dp_pdev *pdev, int val) 205 { 206 return QDF_STATUS_E_INVAL; 207 } 208 #endif /* QCA_UNDECODED_METADATA_SUPPORT */ 209 210 QDF_STATUS dp_reset_monitor_mode(struct cdp_soc_t *soc_hdl, 211 uint8_t pdev_id, 212 uint8_t special_monitor) 213 { 214 struct dp_soc *soc = (struct dp_soc *)soc_hdl; 215 struct dp_pdev *pdev = 216 dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc, 217 pdev_id); 218 QDF_STATUS status = QDF_STATUS_SUCCESS; 219 struct dp_mon_pdev *mon_pdev; 220 221 if (!pdev) 222 return QDF_STATUS_E_FAILURE; 223 224 mon_pdev = pdev->monitor_pdev; 225 qdf_spin_lock_bh(&mon_pdev->mon_lock); 226 status = dp_reset_monitor_mode_unlock(soc_hdl, pdev_id, 227 special_monitor); 228 qdf_spin_unlock_bh(&mon_pdev->mon_lock); 229 230 return status; 231 } 232 233 QDF_STATUS dp_reset_monitor_mode_unlock(struct cdp_soc_t *soc_hdl, 234 uint8_t pdev_id, 235 uint8_t special_monitor) 236 { 237 struct dp_soc *soc = (struct dp_soc *)soc_hdl; 238 struct dp_pdev *pdev = 239 dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc, 240 pdev_id); 241 QDF_STATUS status = QDF_STATUS_SUCCESS; 242 struct dp_mon_pdev *mon_pdev; 243 struct cdp_mon_ops *cdp_ops; 244 245 if (!pdev) 246 return QDF_STATUS_E_FAILURE; 247 248 mon_pdev = pdev->monitor_pdev; 249 250 cdp_ops = dp_mon_cdp_ops_get(soc); 251 if (cdp_ops && cdp_ops->soc_config_full_mon_mode) { 252 cdp_ops->soc_config_full_mon_mode((struct cdp_pdev *)pdev, 253 DP_FULL_MON_DISABLE); 254 mon_pdev->hold_mon_dest_ring = false; 255 mon_pdev->is_bkpressure = false; 256 mon_pdev->set_reset_mon = false; 257 #if defined(QCA_SUPPORT_FULL_MON) 258 if (mon_pdev->mon_desc) 259 qdf_mem_zero(mon_pdev->mon_desc, 260 sizeof(struct hal_rx_mon_desc_info)); 261 #endif 262 } 263 264 mon_pdev->mvdev = NULL; 265 266 /* 267 * Lite monitor mode, smart monitor mode and monitor 268 * mode uses this APIs to filter reset and mode disable 269 */ 270 if (mon_pdev->mcopy_mode) { 271 #if defined(QCA_MCOPY_SUPPORT) 272 dp_pdev_disable_mcopy_code(pdev); 273 dp_mon_filter_reset_mcopy_mode(pdev); 274 #endif /* QCA_MCOPY_SUPPORT */ 275 } else if (special_monitor) { 276 #if defined(ATH_SUPPORT_NAC) 277 dp_mon_filter_reset_smart_monitor(pdev); 278 #endif /* ATH_SUPPORT_NAC */ 279 /* for mon 2.0 we make use of lite mon to 280 * set filters for smart monitor use case. 281 */ 282 dp_monitor_lite_mon_disable_rx(pdev); 283 } else if (mon_pdev->undecoded_metadata_capture) { 284 #ifdef QCA_UNDECODED_METADATA_SUPPORT 285 dp_reset_undecoded_metadata_capture(pdev); 286 #endif 287 } else { 288 dp_mon_filter_reset_mon_mode(pdev); 289 } 290 status = dp_mon_filter_update(pdev); 291 if (status != QDF_STATUS_SUCCESS) { 292 dp_rx_mon_dest_err("%pK: Failed to reset monitor filters", 293 soc); 294 } 295 296 mon_pdev->monitor_configured = false; 297 298 return QDF_STATUS_SUCCESS; 299 } 300 301 #ifdef QCA_ADVANCE_MON_FILTER_SUPPORT 302 QDF_STATUS 303 dp_pdev_set_advance_monitor_filter(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, 304 struct cdp_monitor_filter *filter_val) 305 { 306 /* Many monitor VAPs can exists in a system but only one can be up at 307 * anytime 308 */ 309 struct dp_soc *soc = (struct dp_soc *)soc_hdl; 310 struct dp_vdev *vdev; 311 struct dp_pdev *pdev = 312 dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc, 313 pdev_id); 314 QDF_STATUS status = QDF_STATUS_SUCCESS; 315 struct dp_mon_pdev *mon_pdev; 316 317 if (!pdev || !pdev->monitor_pdev) 318 return QDF_STATUS_E_FAILURE; 319 320 mon_pdev = pdev->monitor_pdev; 321 vdev = mon_pdev->mvdev; 322 323 if (!vdev) 324 return QDF_STATUS_E_FAILURE; 325 326 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN, 327 "pdev=%pK, pdev_id=%d, soc=%pK vdev=%pK", 328 pdev, pdev_id, soc, vdev); 329 330 /*Check if current pdev's monitor_vdev exists */ 331 if (!mon_pdev->mvdev) { 332 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 333 "vdev=%pK", vdev); 334 qdf_assert(vdev); 335 } 336 337 /* update filter mode, type in pdev structure */ 338 mon_pdev->mon_filter_mode = filter_val->mode; 339 mon_pdev->fp_mgmt_filter = filter_val->fp_mgmt; 340 mon_pdev->fp_ctrl_filter = filter_val->fp_ctrl; 341 mon_pdev->fp_data_filter = filter_val->fp_data; 342 mon_pdev->mo_mgmt_filter = filter_val->mo_mgmt; 343 mon_pdev->mo_ctrl_filter = filter_val->mo_ctrl; 344 mon_pdev->mo_data_filter = filter_val->mo_data; 345 346 dp_mon_filter_setup_mon_mode(pdev); 347 status = dp_mon_filter_update(pdev); 348 if (status != QDF_STATUS_SUCCESS) { 349 dp_rx_mon_dest_err("%pK: Failed to set filter for adv mon mode", 350 soc); 351 dp_mon_filter_reset_mon_mode(pdev); 352 } 353 354 return status; 355 } 356 #endif 357 358 QDF_STATUS 359 dp_deliver_tx_mgmt(struct cdp_soc_t *cdp_soc, uint8_t pdev_id, qdf_nbuf_t nbuf) 360 { 361 struct dp_pdev *pdev = 362 dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)cdp_soc, 363 pdev_id); 364 365 if (!pdev) 366 return QDF_STATUS_E_FAILURE; 367 368 dp_deliver_mgmt_frm(pdev, nbuf); 369 370 return QDF_STATUS_SUCCESS; 371 } 372 373 #ifdef QCA_SUPPORT_SCAN_SPCL_VAP_STATS 374 /** 375 * dp_scan_spcl_vap_stats_attach() - alloc spcl vap stats struct 376 * @mon_vdev: Datapath mon VDEV handle 377 * 378 * Return: 0 on success, not 0 on failure 379 */ 380 static inline QDF_STATUS 381 dp_scan_spcl_vap_stats_attach(struct dp_mon_vdev *mon_vdev) 382 { 383 mon_vdev->scan_spcl_vap_stats = 384 qdf_mem_malloc(sizeof(struct cdp_scan_spcl_vap_stats)); 385 386 if (!mon_vdev->scan_spcl_vap_stats) { 387 dp_mon_err("scan spcl vap stats attach fail"); 388 return QDF_STATUS_E_NOMEM; 389 } 390 391 return QDF_STATUS_SUCCESS; 392 } 393 394 /** 395 * dp_scan_spcl_vap_stats_detach() - free spcl vap stats struct 396 * @mon_vdev: Datapath mon VDEV handle 397 * 398 * Return: void 399 */ 400 static inline void 401 dp_scan_spcl_vap_stats_detach(struct dp_mon_vdev *mon_vdev) 402 { 403 if (mon_vdev->scan_spcl_vap_stats) { 404 qdf_mem_free(mon_vdev->scan_spcl_vap_stats); 405 mon_vdev->scan_spcl_vap_stats = NULL; 406 } 407 } 408 409 /** 410 * dp_reset_scan_spcl_vap_stats() - reset spcl vap rx stats 411 * @vdev: Datapath VDEV handle 412 * 413 * Return: void 414 */ 415 static inline void 416 dp_reset_scan_spcl_vap_stats(struct dp_vdev *vdev) 417 { 418 struct dp_mon_vdev *mon_vdev; 419 struct dp_mon_pdev *mon_pdev; 420 421 mon_pdev = vdev->pdev->monitor_pdev; 422 if (!mon_pdev || !mon_pdev->reset_scan_spcl_vap_stats_enable) 423 return; 424 425 mon_vdev = vdev->monitor_vdev; 426 if (!mon_vdev || !mon_vdev->scan_spcl_vap_stats) 427 return; 428 429 qdf_mem_zero(mon_vdev->scan_spcl_vap_stats, 430 sizeof(struct cdp_scan_spcl_vap_stats)); 431 } 432 433 /** 434 * dp_get_scan_spcl_vap_stats() - get spcl vap rx stats 435 * @soc_hdl: Datapath soc handle 436 * @vdev_id: vdev id 437 * @stats: structure to hold spcl vap stats 438 * 439 * Return: 0 on success, not 0 on failure 440 */ 441 static QDF_STATUS 442 dp_get_scan_spcl_vap_stats(struct cdp_soc_t *soc_hdl, uint8_t vdev_id, 443 struct cdp_scan_spcl_vap_stats *stats) 444 { 445 struct dp_mon_vdev *mon_vdev = NULL; 446 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 447 struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id, 448 DP_MOD_ID_CDP); 449 450 if (!vdev || !stats) { 451 if (vdev) 452 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP); 453 return QDF_STATUS_E_INVAL; 454 } 455 456 mon_vdev = vdev->monitor_vdev; 457 if (!mon_vdev || !mon_vdev->scan_spcl_vap_stats) { 458 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP); 459 return QDF_STATUS_E_INVAL; 460 } 461 462 qdf_mem_copy(stats, mon_vdev->scan_spcl_vap_stats, 463 sizeof(struct cdp_scan_spcl_vap_stats)); 464 465 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP); 466 return QDF_STATUS_SUCCESS; 467 } 468 #else 469 static inline void 470 dp_reset_scan_spcl_vap_stats(struct dp_vdev *vdev) 471 { 472 } 473 474 static inline QDF_STATUS 475 dp_scan_spcl_vap_stats_attach(struct dp_mon_vdev *mon_vdev) 476 { 477 return QDF_STATUS_SUCCESS; 478 } 479 480 static inline void 481 dp_scan_spcl_vap_stats_detach(struct dp_mon_vdev *mon_vdev) 482 { 483 } 484 #endif 485 486 /** 487 * dp_vdev_set_monitor_mode() - Set DP VDEV to monitor mode 488 * @dp_soc: DP soc context 489 * @vdev_id: vdev ID 490 * @special_monitor: Flag to denote if its smart monitor mode 491 * 492 * Return: 0 on success, not 0 on failure 493 */ 494 QDF_STATUS dp_vdev_set_monitor_mode(struct cdp_soc_t *dp_soc, 495 uint8_t vdev_id, 496 uint8_t special_monitor) 497 { 498 struct dp_soc *soc = (struct dp_soc *)dp_soc; 499 struct dp_pdev *pdev; 500 struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id, 501 DP_MOD_ID_CDP); 502 QDF_STATUS status = QDF_STATUS_SUCCESS; 503 struct dp_mon_pdev *mon_pdev; 504 struct cdp_mon_ops *cdp_ops; 505 506 if (!vdev) 507 return QDF_STATUS_E_FAILURE; 508 509 pdev = vdev->pdev; 510 511 if (!pdev || !pdev->monitor_pdev) { 512 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP); 513 return QDF_STATUS_E_FAILURE; 514 } 515 516 mon_pdev = pdev->monitor_pdev; 517 518 mon_pdev->mvdev = vdev; 519 520 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN, 521 "pdev=%pK, pdev_id=%d, soc=%pK vdev=%pK\n", 522 pdev, pdev->pdev_id, pdev->soc, vdev); 523 524 /* 525 * do not configure monitor buf ring and filter for smart and 526 * lite monitor 527 * for smart monitor filters are added along with first NAC 528 * for lite monitor required configuration done through 529 * dp_set_pdev_param 530 */ 531 532 if (special_monitor) { 533 status = QDF_STATUS_SUCCESS; 534 goto fail; 535 } 536 537 if (mon_pdev->scan_spcl_vap_configured) 538 dp_reset_scan_spcl_vap_stats(vdev); 539 540 /*Check if current pdev's monitor_vdev exists */ 541 if (mon_pdev->monitor_configured) { 542 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG, 543 "monitor vap already created vdev=%pK\n", vdev); 544 status = QDF_STATUS_E_RESOURCES; 545 goto fail; 546 } 547 548 mon_pdev->monitor_configured = true; 549 mon_pdev->phy_ppdu_id_size = hal_rx_get_phy_ppdu_id_size(soc->hal_soc); 550 551 /* If advance monitor filter is applied using lite_mon 552 * via vap configuration, required filters are already applied 553 * hence returning SUCCESS from here. 554 */ 555 if (dp_monitor_lite_mon_is_rx_adv_filter_enable(pdev)) { 556 status = QDF_STATUS_SUCCESS; 557 goto fail; 558 } 559 /* disable lite mon if configured, monitor vap takes 560 * priority over lite mon when its created. Lite mon 561 * can be configured later again. 562 */ 563 dp_monitor_lite_mon_disable_rx(pdev); 564 565 cdp_ops = dp_mon_cdp_ops_get(soc); 566 if (cdp_ops && cdp_ops->soc_config_full_mon_mode) 567 cdp_ops->soc_config_full_mon_mode((struct cdp_pdev *)pdev, 568 DP_FULL_MON_ENABLE); 569 dp_mon_filter_setup_mon_mode(pdev); 570 status = dp_mon_filter_update(pdev); 571 if (status != QDF_STATUS_SUCCESS) { 572 dp_cdp_err("%pK: Failed to reset monitor filters", soc); 573 dp_mon_filter_reset_mon_mode(pdev); 574 mon_pdev->monitor_configured = false; 575 mon_pdev->mvdev = NULL; 576 } 577 578 fail: 579 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP); 580 return status; 581 } 582 583 #ifdef QCA_TX_CAPTURE_SUPPORT 584 static QDF_STATUS 585 dp_config_tx_capture_mode(struct dp_pdev *pdev) 586 { 587 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 588 589 mon_pdev->tx_sniffer_enable = 1; 590 mon_pdev->monitor_configured = false; 591 592 if (!mon_pdev->pktlog_ppdu_stats) 593 dp_h2t_cfg_stats_msg_send(pdev, 594 DP_PPDU_STATS_CFG_SNIFFER, 595 pdev->pdev_id); 596 597 return QDF_STATUS_SUCCESS; 598 } 599 #else 600 #ifdef QCA_MCOPY_SUPPORT 601 static QDF_STATUS 602 dp_config_tx_capture_mode(struct dp_pdev *pdev) 603 { 604 return QDF_STATUS_E_INVAL; 605 } 606 #endif 607 #endif 608 609 #if defined(QCA_MCOPY_SUPPORT) || defined(QCA_TX_CAPTURE_SUPPORT) 610 QDF_STATUS 611 dp_config_debug_sniffer(struct dp_pdev *pdev, int val) 612 { 613 QDF_STATUS status = QDF_STATUS_SUCCESS; 614 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 615 616 /* 617 * Note: The mirror copy mode cannot co-exist with any other 618 * monitor modes. Hence disabling the filter for this mode will 619 * reset the monitor destination ring filters. 620 */ 621 dp_reset_mcopy_mode(pdev); 622 switch (val) { 623 case 0: 624 mon_pdev->tx_sniffer_enable = 0; 625 mon_pdev->monitor_configured = false; 626 627 /* 628 * We don't need to reset the Rx monitor status ring or call 629 * the API dp_ppdu_ring_reset() if all debug sniffer mode is 630 * disabled. The Rx monitor status ring will be disabled when 631 * the last mode using the monitor status ring get disabled. 632 */ 633 if (!mon_pdev->pktlog_ppdu_stats && 634 !mon_pdev->enhanced_stats_en && 635 !mon_pdev->bpr_enable) { 636 dp_h2t_cfg_stats_msg_send(pdev, 0, pdev->pdev_id); 637 } else if (mon_pdev->enhanced_stats_en && 638 !mon_pdev->bpr_enable) { 639 dp_h2t_cfg_stats_msg_send(pdev, 640 DP_PPDU_STATS_CFG_ENH_STATS, 641 pdev->pdev_id); 642 } else if (!mon_pdev->enhanced_stats_en && 643 mon_pdev->bpr_enable) { 644 dp_h2t_cfg_stats_msg_send(pdev, 645 DP_PPDU_STATS_CFG_BPR_ENH, 646 pdev->pdev_id); 647 } else { 648 dp_h2t_cfg_stats_msg_send(pdev, 649 DP_PPDU_STATS_CFG_BPR, 650 pdev->pdev_id); 651 } 652 break; 653 654 case 1: 655 status = dp_config_tx_capture_mode(pdev); 656 break; 657 case 2: 658 case 4: 659 status = dp_config_mcopy_mode(pdev, val); 660 break; 661 662 default: 663 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 664 "Invalid value, mode: %d not supported", val); 665 status = QDF_STATUS_E_INVAL; 666 break; 667 } 668 return status; 669 } 670 #endif 671 672 #ifdef QCA_UNDECODED_METADATA_SUPPORT 673 QDF_STATUS 674 dp_mon_config_undecoded_metadata_capture(struct dp_pdev *pdev, int val) 675 { 676 QDF_STATUS status = QDF_STATUS_SUCCESS; 677 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 678 679 if (!mon_pdev->mvdev && !mon_pdev->scan_spcl_vap_configured) { 680 qdf_err("No monitor or Special vap, undecoded capture not supported"); 681 return QDF_STATUS_E_RESOURCES; 682 } 683 684 if (val) 685 status = dp_enable_undecoded_metadata_capture(pdev, val); 686 else 687 status = dp_reset_undecoded_metadata_capture(pdev); 688 689 return status; 690 } 691 #endif 692 693 /** 694 * dp_monitor_mode_ring_config() - Send the tlv config to fw for monitor buffer 695 * ring based on target 696 * @soc: soc handle 697 * @mac_for_pdev: WIN- pdev_id, MCL- mac id 698 * @pdev: physical device handle 699 * @ring_num: mac id 700 * @htt_tlv_filter: tlv filter 701 * 702 * Return: zero on success, non-zero on failure 703 */ 704 static inline QDF_STATUS 705 dp_monitor_mode_ring_config(struct dp_soc *soc, uint8_t mac_for_pdev, 706 struct dp_pdev *pdev, uint8_t ring_num, 707 struct htt_rx_ring_tlv_filter htt_tlv_filter) 708 { 709 QDF_STATUS status; 710 711 if (soc->wlan_cfg_ctx->rxdma1_enable) 712 status = htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev, 713 soc->rxdma_mon_buf_ring[ring_num] 714 .hal_srng, 715 RXDMA_MONITOR_BUF, 716 RX_MONITOR_BUFFER_SIZE, 717 &htt_tlv_filter); 718 else 719 status = htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev, 720 pdev->rx_mac_buf_ring[ring_num] 721 .hal_srng, 722 RXDMA_BUF, RX_DATA_BUFFER_SIZE, 723 &htt_tlv_filter); 724 725 return status; 726 } 727 728 /** 729 * dp_get_mon_vdev_from_pdev_wifi3() - Get vdev id of monitor mode 730 * @soc_hdl: datapath soc handle 731 * @pdev_id: physical device instance id 732 * 733 * Return: virtual interface id 734 */ 735 static uint8_t dp_get_mon_vdev_from_pdev_wifi3(struct cdp_soc_t *soc_hdl, 736 uint8_t pdev_id) 737 { 738 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 739 struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); 740 741 if (qdf_unlikely(!pdev || !pdev->monitor_pdev || 742 !pdev->monitor_pdev->mvdev)) 743 return -EINVAL; 744 745 return pdev->monitor_pdev->mvdev->vdev_id; 746 } 747 748 #if defined(QCA_TX_CAPTURE_SUPPORT) || defined(QCA_ENHANCED_STATS_SUPPORT) 749 #ifndef WLAN_TX_PKT_CAPTURE_ENH 750 void dp_deliver_mgmt_frm(struct dp_pdev *pdev, qdf_nbuf_t nbuf) 751 { 752 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 753 754 if (mon_pdev->tx_sniffer_enable || mon_pdev->mcopy_mode) { 755 dp_wdi_event_handler(WDI_EVENT_TX_MGMT_CTRL, pdev->soc, 756 nbuf, HTT_INVALID_PEER, 757 WDI_NO_VAL, pdev->pdev_id); 758 } else { 759 if (!mon_pdev->bpr_enable) 760 qdf_nbuf_free(nbuf); 761 } 762 } 763 #endif 764 #endif 765 766 QDF_STATUS dp_htt_ppdu_stats_attach(struct dp_pdev *pdev) 767 { 768 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 769 770 mon_pdev->ppdu_tlv_buf = qdf_mem_malloc(HTT_T2H_MAX_MSG_SIZE); 771 772 if (!mon_pdev->ppdu_tlv_buf) { 773 QDF_TRACE_ERROR(QDF_MODULE_ID_DP, "ppdu_tlv_buf alloc fail"); 774 return QDF_STATUS_E_NOMEM; 775 } 776 777 return QDF_STATUS_SUCCESS; 778 } 779 780 void dp_htt_ppdu_stats_detach(struct dp_pdev *pdev) 781 { 782 struct ppdu_info *ppdu_info, *ppdu_info_next; 783 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 784 785 786 TAILQ_FOREACH_SAFE(ppdu_info, &mon_pdev->ppdu_info_list, 787 ppdu_info_list_elem, ppdu_info_next) { 788 if (!ppdu_info) 789 break; 790 TAILQ_REMOVE(&mon_pdev->ppdu_info_list, 791 ppdu_info, ppdu_info_list_elem); 792 mon_pdev->list_depth--; 793 qdf_assert_always(ppdu_info->nbuf); 794 qdf_nbuf_free(ppdu_info->nbuf); 795 qdf_mem_free(ppdu_info); 796 } 797 798 TAILQ_FOREACH_SAFE(ppdu_info, &mon_pdev->sched_comp_ppdu_list, 799 ppdu_info_list_elem, ppdu_info_next) { 800 if (!ppdu_info) 801 break; 802 TAILQ_REMOVE(&mon_pdev->sched_comp_ppdu_list, 803 ppdu_info, ppdu_info_list_elem); 804 mon_pdev->sched_comp_list_depth--; 805 qdf_assert_always(ppdu_info->nbuf); 806 qdf_nbuf_free(ppdu_info->nbuf); 807 qdf_mem_free(ppdu_info); 808 } 809 810 if (mon_pdev->ppdu_tlv_buf) 811 qdf_mem_free(mon_pdev->ppdu_tlv_buf); 812 } 813 814 QDF_STATUS dp_pdev_get_rx_mon_stats(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, 815 struct cdp_pdev_mon_stats *stats) 816 { 817 struct dp_soc *soc = (struct dp_soc *)soc_hdl; 818 struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); 819 struct dp_mon_pdev *mon_pdev; 820 821 if (!pdev) 822 return QDF_STATUS_E_FAILURE; 823 824 mon_pdev = pdev->monitor_pdev; 825 if (!mon_pdev) 826 return QDF_STATUS_E_FAILURE; 827 828 qdf_mem_copy(stats, &mon_pdev->rx_mon_stats, 829 sizeof(struct cdp_pdev_mon_stats)); 830 831 return QDF_STATUS_SUCCESS; 832 } 833 834 #ifdef QCA_UNDECODED_METADATA_SUPPORT 835 /** 836 * dp_pdev_get_undecoded_capture_stats() - Get undecoded metadata captured 837 * monitor pdev stats 838 * @mon_pdev: Monitor PDEV handle 839 * @rx_mon_stats: Monitor pdev status/destination ring stats 840 * 841 * Return: None 842 */ 843 static inline void 844 dp_pdev_get_undecoded_capture_stats(struct dp_mon_pdev *mon_pdev, 845 struct cdp_pdev_mon_stats *rx_mon_stats) 846 { 847 char undecoded_error[DP_UNDECODED_ERR_LENGTH]; 848 uint8_t index = 0, i; 849 850 DP_PRINT_STATS("Rx Undecoded Frame count:%d", 851 rx_mon_stats->rx_undecoded_count); 852 index = 0; 853 for (i = 0; i < (CDP_PHYRX_ERR_MAX); i++) { 854 index += qdf_snprint(&undecoded_error[index], 855 DP_UNDECODED_ERR_LENGTH - index, 856 " %d", rx_mon_stats->rx_undecoded_error[i]); 857 } 858 DP_PRINT_STATS("Undecoded Error (0-63):%s", undecoded_error); 859 } 860 #else 861 static inline void 862 dp_pdev_get_undecoded_capture_stats(struct dp_mon_pdev *mon_pdev, 863 struct cdp_pdev_mon_stats *rx_mon_stats) 864 { 865 } 866 #endif 867 868 static const char * 869 dp_preamble_type_str[] = { 870 "preamble OFDMA ", 871 "preamble CCK ", 872 "preamble HT ", 873 "preamble VHT ", 874 "preamble HE ", 875 "preamble EHT ", 876 "preamble NO SUPPORT", 877 }; 878 879 static const char * 880 dp_reception_type_str[] = { 881 "reception su ", 882 "reception mu_mimo ", 883 "reception ofdma ", 884 "reception ofdma mimo", 885 }; 886 887 static const char * 888 dp_mu_dl_ul_str[] = { 889 "MU DL", 890 "MU UL", 891 }; 892 893 static inline void 894 dp_print_pdev_mpdu_fcs_ok_cnt(struct cdp_pdev_mon_stats *rx_mon_sts, 895 uint32_t pkt_t, uint32_t rx_t, 896 uint32_t dl_ul, uint32_t user) 897 { 898 DP_PRINT_STATS("%s, %s, %s, user=%d, mpdu_fcs_ok=%d", 899 dp_preamble_type_str[pkt_t], 900 dp_reception_type_str[rx_t], 901 dp_mu_dl_ul_str[dl_ul], 902 user, 903 rx_mon_sts->mpdu_cnt_fcs_ok[pkt_t][rx_t][dl_ul][user]); 904 } 905 906 static inline void 907 dp_print_pdev_mpdu_fcs_err_cnt(struct cdp_pdev_mon_stats *rx_mon_sts, 908 uint32_t pkt_t, uint32_t rx_t, 909 uint32_t dl_ul, uint32_t user) 910 { 911 DP_PRINT_STATS("%s, %s, %s, user=%d, mpdu_fcs_err=%d", 912 dp_preamble_type_str[pkt_t], 913 dp_reception_type_str[rx_t], 914 dp_mu_dl_ul_str[dl_ul], 915 user, 916 rx_mon_sts->mpdu_cnt_fcs_err[pkt_t][rx_t][dl_ul][user]); 917 } 918 919 static inline void 920 dp_print_pdev_mpdu_cnt(struct cdp_pdev_mon_stats *rx_mon_sts, 921 uint32_t pkt_t, uint32_t rx_t, 922 uint32_t dl_ul, uint32_t user) 923 { 924 if (rx_mon_sts->mpdu_cnt_fcs_ok[pkt_t][rx_t][dl_ul][user]) 925 dp_print_pdev_mpdu_fcs_ok_cnt(rx_mon_sts, pkt_t, rx_t, 926 dl_ul, user); 927 928 if (rx_mon_sts->mpdu_cnt_fcs_err[pkt_t][rx_t][dl_ul][user]) 929 dp_print_pdev_mpdu_fcs_err_cnt(rx_mon_sts, pkt_t, rx_t, 930 dl_ul, user); 931 } 932 933 static inline void 934 dp_print_pdev_mpdu_user(struct cdp_pdev_mon_stats *rx_mon_sts, 935 uint32_t pkt_t, uint32_t rx_t, 936 uint32_t dl_ul) 937 { 938 uint32_t user; 939 940 for (user = 0; user < CDP_MU_SNIF_USER_MAX; user++) 941 dp_print_pdev_mpdu_cnt(rx_mon_sts, pkt_t, rx_t, 942 dl_ul, user); 943 } 944 945 static inline void 946 dp_print_pdev_mpdu_dl_ul(struct cdp_pdev_mon_stats *rx_mon_sts, 947 uint32_t pkt_t, uint32_t rx_t) 948 { 949 uint32_t dl_ul; 950 951 for (dl_ul = CDP_MU_TYPE_DL; dl_ul < CDP_MU_TYPE_MAX; dl_ul++) 952 dp_print_pdev_mpdu_user(rx_mon_sts, pkt_t, rx_t, 953 dl_ul); 954 } 955 956 static inline void 957 dp_print_pdev_mpdu_rx_type(struct cdp_pdev_mon_stats *rx_mon_sts, 958 uint32_t pkt_t) 959 { 960 uint32_t rx_t; 961 962 for (rx_t = CDP_RX_TYPE_SU; rx_t < CDP_RX_TYPE_MAX; rx_t++) 963 dp_print_pdev_mpdu_dl_ul(rx_mon_sts, pkt_t, rx_t); 964 } 965 966 static inline void 967 dp_print_pdev_mpdu_pkt_type(struct cdp_pdev_mon_stats *rx_mon_sts) 968 { 969 uint32_t pkt_t; 970 971 for (pkt_t = CDP_PKT_TYPE_OFDM; pkt_t < CDP_PKT_TYPE_MAX; pkt_t++) 972 dp_print_pdev_mpdu_rx_type(rx_mon_sts, pkt_t); 973 } 974 975 static inline void 976 print_ppdu_eht_type_mode( 977 struct cdp_pdev_mon_stats *rx_mon_stats, 978 uint32_t ppdu_type_mode, 979 uint32_t dl_ul) 980 { 981 DP_PRINT_STATS("type_mode=%d, dl_ul=%d, cnt=%d", 982 ppdu_type_mode, 983 dl_ul, 984 rx_mon_stats->ppdu_eht_type_mode[ppdu_type_mode][dl_ul]); 985 } 986 987 static inline void 988 print_ppdu_eth_type_mode_dl_ul( 989 struct cdp_pdev_mon_stats *rx_mon_stats, 990 uint32_t ppdu_type_mode 991 ) 992 { 993 uint32_t dl_ul; 994 995 for (dl_ul = 0; dl_ul < CDP_MU_TYPE_MAX; dl_ul++) { 996 if (rx_mon_stats->ppdu_eht_type_mode[ppdu_type_mode][dl_ul]) 997 print_ppdu_eht_type_mode(rx_mon_stats, 998 ppdu_type_mode, dl_ul); 999 } 1000 } 1001 1002 static inline void 1003 dp_print_pdev_eht_ppdu_cnt(struct dp_pdev *pdev) 1004 { 1005 struct cdp_pdev_mon_stats *rx_mon_stats; 1006 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 1007 uint32_t ppdu_type_mode; 1008 1009 rx_mon_stats = &mon_pdev->rx_mon_stats; 1010 DP_PRINT_STATS("Monitor EHT PPDU Count"); 1011 for (ppdu_type_mode = 0; ppdu_type_mode < CDP_EHT_TYPE_MODE_MAX; 1012 ppdu_type_mode++) { 1013 print_ppdu_eth_type_mode_dl_ul(rx_mon_stats, 1014 ppdu_type_mode); 1015 } 1016 } 1017 1018 static inline void 1019 dp_print_pdev_mpdu_stats(struct dp_pdev *pdev) 1020 { 1021 struct cdp_pdev_mon_stats *rx_mon_stats; 1022 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 1023 1024 rx_mon_stats = &mon_pdev->rx_mon_stats; 1025 DP_PRINT_STATS("Monitor MPDU Count"); 1026 dp_print_pdev_mpdu_pkt_type(rx_mon_stats); 1027 } 1028 1029 void 1030 dp_print_pdev_rx_mon_stats(struct dp_pdev *pdev) 1031 { 1032 struct cdp_pdev_mon_stats *rx_mon_stats; 1033 uint32_t *stat_ring_ppdu_ids; 1034 uint32_t *dest_ring_ppdu_ids; 1035 int i, idx; 1036 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 1037 1038 rx_mon_stats = &mon_pdev->rx_mon_stats; 1039 1040 DP_PRINT_STATS("PDEV Rx Monitor Stats:\n"); 1041 1042 DP_PRINT_STATS("status_ppdu_compl_cnt = %d", 1043 rx_mon_stats->status_ppdu_compl); 1044 DP_PRINT_STATS("status_ppdu_start_cnt = %d", 1045 rx_mon_stats->status_ppdu_start); 1046 DP_PRINT_STATS("status_ppdu_end_cnt = %d", 1047 rx_mon_stats->status_ppdu_end); 1048 DP_PRINT_STATS("status_ppdu_start_mis_cnt = %d", 1049 rx_mon_stats->status_ppdu_start_mis); 1050 DP_PRINT_STATS("status_ppdu_end_mis_cnt = %d", 1051 rx_mon_stats->status_ppdu_end_mis); 1052 1053 DP_PRINT_STATS("start_user_info_cnt = %d", 1054 rx_mon_stats->start_user_info_cnt); 1055 DP_PRINT_STATS("end_user_stats_cnt = %d", 1056 rx_mon_stats->end_user_stats_cnt); 1057 1058 DP_PRINT_STATS("status_ppdu_done_cnt = %d", 1059 rx_mon_stats->status_ppdu_done); 1060 DP_PRINT_STATS("dest_ppdu_done_cnt = %d", 1061 rx_mon_stats->dest_ppdu_done); 1062 DP_PRINT_STATS("dest_mpdu_done_cnt = %d", 1063 rx_mon_stats->dest_mpdu_done); 1064 DP_PRINT_STATS("tlv_tag_status_err_cnt = %u", 1065 rx_mon_stats->tlv_tag_status_err); 1066 DP_PRINT_STATS("mon status DMA not done WAR count= %u", 1067 rx_mon_stats->status_buf_done_war); 1068 DP_PRINT_STATS("dest_mpdu_drop_cnt = %d", 1069 rx_mon_stats->dest_mpdu_drop); 1070 DP_PRINT_STATS("dup_mon_linkdesc_cnt = %d", 1071 rx_mon_stats->dup_mon_linkdesc_cnt); 1072 DP_PRINT_STATS("dup_mon_buf_cnt = %d", 1073 rx_mon_stats->dup_mon_buf_cnt); 1074 DP_PRINT_STATS("mon_rx_buf_reaped = %u", 1075 rx_mon_stats->mon_rx_bufs_reaped_dest); 1076 DP_PRINT_STATS("mon_rx_buf_replenished = %u", 1077 rx_mon_stats->mon_rx_bufs_replenished_dest); 1078 DP_PRINT_STATS("ppdu_id_mismatch = %u", 1079 rx_mon_stats->ppdu_id_mismatch); 1080 DP_PRINT_STATS("mpdu_ppdu_id_match_cnt = %d", 1081 rx_mon_stats->ppdu_id_match); 1082 DP_PRINT_STATS("ppdus dropped frm status ring = %d", 1083 rx_mon_stats->status_ppdu_drop); 1084 DP_PRINT_STATS("ppdus dropped frm dest ring = %d", 1085 rx_mon_stats->dest_ppdu_drop); 1086 DP_PRINT_STATS("mpdu_ppdu_id_mismatch_drop = %u", 1087 rx_mon_stats->mpdu_ppdu_id_mismatch_drop); 1088 DP_PRINT_STATS("mpdu_decap_type_invalid = %u", 1089 rx_mon_stats->mpdu_decap_type_invalid); 1090 DP_PRINT_STATS("pending_desc_count = %u", 1091 rx_mon_stats->pending_desc_count); 1092 stat_ring_ppdu_ids = 1093 (uint32_t *)qdf_mem_malloc(sizeof(uint32_t) * MAX_PPDU_ID_HIST); 1094 dest_ring_ppdu_ids = 1095 (uint32_t *)qdf_mem_malloc(sizeof(uint32_t) * MAX_PPDU_ID_HIST); 1096 1097 if (!stat_ring_ppdu_ids || !dest_ring_ppdu_ids) 1098 DP_PRINT_STATS("Unable to allocate ppdu id hist mem\n"); 1099 1100 qdf_spin_lock_bh(&mon_pdev->mon_lock); 1101 idx = rx_mon_stats->ppdu_id_hist_idx; 1102 qdf_mem_copy(stat_ring_ppdu_ids, 1103 rx_mon_stats->stat_ring_ppdu_id_hist, 1104 sizeof(uint32_t) * MAX_PPDU_ID_HIST); 1105 qdf_mem_copy(dest_ring_ppdu_ids, 1106 rx_mon_stats->dest_ring_ppdu_id_hist, 1107 sizeof(uint32_t) * MAX_PPDU_ID_HIST); 1108 qdf_spin_unlock_bh(&mon_pdev->mon_lock); 1109 1110 DP_PRINT_STATS("PPDU Id history:"); 1111 DP_PRINT_STATS("stat_ring_ppdu_ids\t dest_ring_ppdu_ids"); 1112 for (i = 0; i < MAX_PPDU_ID_HIST; i++) { 1113 idx = (idx + 1) & (MAX_PPDU_ID_HIST - 1); 1114 DP_PRINT_STATS("%*u\t%*u", 16, 1115 rx_mon_stats->stat_ring_ppdu_id_hist[idx], 16, 1116 rx_mon_stats->dest_ring_ppdu_id_hist[idx]); 1117 } 1118 qdf_mem_free(stat_ring_ppdu_ids); 1119 qdf_mem_free(dest_ring_ppdu_ids); 1120 DP_PRINT_STATS("mon_rx_dest_stuck = %d", 1121 rx_mon_stats->mon_rx_dest_stuck); 1122 1123 dp_pdev_get_undecoded_capture_stats(mon_pdev, rx_mon_stats); 1124 dp_mon_rx_print_advanced_stats(pdev->soc, pdev); 1125 1126 dp_print_pdev_mpdu_stats(pdev); 1127 dp_print_pdev_eht_ppdu_cnt(pdev); 1128 1129 } 1130 1131 #ifdef QCA_SUPPORT_BPR 1132 QDF_STATUS 1133 dp_set_bpr_enable(struct dp_pdev *pdev, int val) 1134 { 1135 struct dp_mon_ops *mon_ops; 1136 1137 mon_ops = dp_mon_ops_get(pdev->soc); 1138 if (mon_ops && mon_ops->mon_set_bpr_enable) 1139 return mon_ops->mon_set_bpr_enable(pdev, val); 1140 1141 return QDF_STATUS_E_FAILURE; 1142 } 1143 #endif 1144 1145 #ifdef WDI_EVENT_ENABLE 1146 #ifdef BE_PKTLOG_SUPPORT 1147 static bool 1148 dp_set_hybrid_pktlog_enable(struct dp_pdev *pdev, 1149 struct dp_mon_pdev *mon_pdev, 1150 struct dp_soc *soc) 1151 { 1152 struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx; 1153 struct dp_mon_ops *mon_ops = NULL; 1154 uint16_t num_buffers; 1155 1156 /* Nothing needs to be done if monitor mode is 1157 * enabled 1158 */ 1159 if (mon_pdev->mvdev) 1160 return false; 1161 1162 mon_ops = dp_mon_ops_get(pdev->soc); 1163 if (!mon_ops) { 1164 dp_mon_filter_err("Mon ops uninitialized"); 1165 return QDF_STATUS_E_FAILURE; 1166 } 1167 1168 if (!mon_pdev->pktlog_hybrid_mode) { 1169 mon_pdev->pktlog_hybrid_mode = true; 1170 soc_cfg_ctx = soc->wlan_cfg_ctx; 1171 num_buffers = 1172 wlan_cfg_get_dp_soc_tx_mon_buf_ring_size(soc_cfg_ctx); 1173 1174 if (mon_ops && mon_ops->set_mon_mode_buf_rings_tx) 1175 mon_ops->set_mon_mode_buf_rings_tx(pdev, num_buffers); 1176 1177 dp_mon_filter_setup_pktlog_hybrid(pdev); 1178 if (dp_tx_mon_filter_update(pdev) != 1179 QDF_STATUS_SUCCESS) { 1180 dp_cdp_err("Set hybrid filters failed"); 1181 dp_mon_filter_reset_pktlog_hybrid(pdev); 1182 mon_pdev->rx_pktlog_mode = 1183 DP_RX_PKTLOG_DISABLED; 1184 return false; 1185 } 1186 1187 dp_monitor_reap_timer_start(soc, CDP_MON_REAP_SOURCE_PKTLOG); 1188 } 1189 1190 return true; 1191 } 1192 1193 static void 1194 dp_set_hybrid_pktlog_disable(struct dp_mon_pdev *mon_pdev) 1195 { 1196 mon_pdev->pktlog_hybrid_mode = false; 1197 } 1198 #else 1199 static void 1200 dp_set_hybrid_pktlog_disable(struct dp_mon_pdev *mon_pdev) 1201 { 1202 } 1203 1204 static bool 1205 dp_set_hybrid_pktlog_enable(struct dp_pdev *pdev, 1206 struct dp_mon_pdev *mon_pdev, 1207 struct dp_soc *soc) 1208 { 1209 dp_cdp_err("Hybrid mode is supported only on beryllium"); 1210 return true; 1211 } 1212 #endif 1213 int dp_set_pktlog_wifi3(struct dp_pdev *pdev, uint32_t event, 1214 bool enable) 1215 { 1216 struct dp_soc *soc = NULL; 1217 int max_mac_rings = wlan_cfg_get_num_mac_rings 1218 (pdev->wlan_cfg_ctx); 1219 uint8_t mac_id = 0; 1220 struct dp_mon_ops *mon_ops; 1221 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 1222 1223 soc = pdev->soc; 1224 mon_ops = dp_mon_ops_get(soc); 1225 1226 if (!mon_ops) 1227 return 0; 1228 1229 dp_update_num_mac_rings_for_dbs(soc, &max_mac_rings); 1230 1231 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG, 1232 FL("Max_mac_rings %d "), 1233 max_mac_rings); 1234 1235 if (enable) { 1236 switch (event) { 1237 case WDI_EVENT_RX_DESC: 1238 /* Nothing needs to be done if monitor mode is 1239 * enabled 1240 */ 1241 if (mon_pdev->mvdev) 1242 return 0; 1243 1244 if (mon_pdev->rx_pktlog_mode == DP_RX_PKTLOG_FULL) 1245 break; 1246 1247 mon_pdev->rx_pktlog_mode = DP_RX_PKTLOG_FULL; 1248 dp_mon_filter_setup_rx_pkt_log_full(pdev); 1249 if (dp_mon_filter_update(pdev) != QDF_STATUS_SUCCESS) { 1250 dp_cdp_err("%pK: Pktlog full filters set failed", 1251 soc); 1252 dp_mon_filter_reset_rx_pkt_log_full(pdev); 1253 mon_pdev->rx_pktlog_mode = 1254 DP_RX_PKTLOG_DISABLED; 1255 return 0; 1256 } 1257 1258 dp_monitor_reap_timer_start(soc, 1259 CDP_MON_REAP_SOURCE_PKTLOG); 1260 break; 1261 1262 case WDI_EVENT_LITE_RX: 1263 /* Nothing needs to be done if monitor mode is 1264 * enabled 1265 */ 1266 if (mon_pdev->mvdev) 1267 return 0; 1268 1269 if (mon_pdev->rx_pktlog_mode == DP_RX_PKTLOG_LITE) 1270 break; 1271 1272 mon_pdev->rx_pktlog_mode = DP_RX_PKTLOG_LITE; 1273 1274 /* 1275 * Set the packet log lite mode filter. 1276 */ 1277 dp_mon_filter_setup_rx_pkt_log_lite(pdev); 1278 if (dp_mon_filter_update(pdev) != QDF_STATUS_SUCCESS) { 1279 dp_cdp_err("%pK: Pktlog lite filters set failed", 1280 soc); 1281 dp_mon_filter_reset_rx_pkt_log_lite(pdev); 1282 mon_pdev->rx_pktlog_mode = 1283 DP_RX_PKTLOG_DISABLED; 1284 return 0; 1285 } 1286 1287 dp_monitor_reap_timer_start(soc, 1288 CDP_MON_REAP_SOURCE_PKTLOG); 1289 break; 1290 case WDI_EVENT_LITE_T2H: 1291 for (mac_id = 0; mac_id < max_mac_rings; mac_id++) { 1292 int mac_for_pdev = dp_get_mac_id_for_pdev( 1293 mac_id, pdev->pdev_id); 1294 1295 mon_pdev->pktlog_ppdu_stats = true; 1296 dp_h2t_cfg_stats_msg_send(pdev, 1297 DP_PPDU_TXLITE_STATS_BITMASK_CFG, 1298 mac_for_pdev); 1299 } 1300 break; 1301 1302 case WDI_EVENT_RX_CBF: 1303 /* Nothing needs to be done if monitor mode is 1304 * enabled 1305 */ 1306 if (mon_pdev->mvdev) 1307 return 0; 1308 1309 if (mon_pdev->rx_pktlog_cbf) 1310 break; 1311 1312 mon_pdev->rx_pktlog_cbf = true; 1313 mon_pdev->monitor_configured = true; 1314 if (mon_ops->mon_vdev_set_monitor_mode_buf_rings) 1315 mon_ops->mon_vdev_set_monitor_mode_buf_rings( 1316 pdev); 1317 1318 /* 1319 * Set the packet log lite mode filter. 1320 */ 1321 qdf_info("Non mon mode: Enable destination ring"); 1322 1323 dp_mon_filter_setup_rx_pkt_log_cbf(pdev); 1324 if (dp_mon_filter_update(pdev) != QDF_STATUS_SUCCESS) { 1325 dp_mon_err("Pktlog set CBF filters failed"); 1326 dp_mon_filter_reset_rx_pktlog_cbf(pdev); 1327 mon_pdev->rx_pktlog_mode = 1328 DP_RX_PKTLOG_DISABLED; 1329 mon_pdev->monitor_configured = false; 1330 return 0; 1331 } 1332 1333 dp_monitor_reap_timer_start(soc, 1334 CDP_MON_REAP_SOURCE_PKTLOG); 1335 break; 1336 case WDI_EVENT_HYBRID_TX: 1337 if (!dp_set_hybrid_pktlog_enable(pdev, mon_pdev, soc)) 1338 return 0; 1339 break; 1340 1341 default: 1342 /* Nothing needs to be done for other pktlog types */ 1343 break; 1344 } 1345 } else { 1346 switch (event) { 1347 case WDI_EVENT_RX_DESC: 1348 case WDI_EVENT_LITE_RX: 1349 /* Nothing needs to be done if monitor mode is 1350 * enabled 1351 */ 1352 if (mon_pdev->mvdev) 1353 return 0; 1354 1355 if (mon_pdev->rx_pktlog_mode == DP_RX_PKTLOG_DISABLED) 1356 break; 1357 1358 mon_pdev->rx_pktlog_mode = DP_RX_PKTLOG_DISABLED; 1359 dp_mon_filter_reset_rx_pkt_log_full(pdev); 1360 if (dp_mon_filter_update(pdev) != QDF_STATUS_SUCCESS) { 1361 dp_cdp_err("%pK: Pktlog filters reset failed", 1362 soc); 1363 return 0; 1364 } 1365 1366 dp_mon_filter_reset_rx_pkt_log_lite(pdev); 1367 if (dp_mon_filter_update(pdev) != QDF_STATUS_SUCCESS) { 1368 dp_cdp_err("%pK: Pktlog filters reset failed", 1369 soc); 1370 return 0; 1371 } 1372 1373 dp_monitor_reap_timer_stop(soc, 1374 CDP_MON_REAP_SOURCE_PKTLOG); 1375 break; 1376 case WDI_EVENT_LITE_T2H: 1377 /* 1378 * To disable HTT_H2T_MSG_TYPE_PPDU_STATS_CFG in FW 1379 * passing value 0. Once these macros will define in htt 1380 * header file will use proper macros 1381 */ 1382 for (mac_id = 0; mac_id < max_mac_rings; mac_id++) { 1383 int mac_for_pdev = 1384 dp_get_mac_id_for_pdev(mac_id, 1385 pdev->pdev_id); 1386 1387 mon_pdev->pktlog_ppdu_stats = false; 1388 if (!mon_pdev->enhanced_stats_en && 1389 !mon_pdev->tx_sniffer_enable && 1390 !mon_pdev->mcopy_mode) { 1391 dp_h2t_cfg_stats_msg_send(pdev, 0, 1392 mac_for_pdev); 1393 } else if (mon_pdev->tx_sniffer_enable || 1394 mon_pdev->mcopy_mode) { 1395 dp_h2t_cfg_stats_msg_send(pdev, 1396 DP_PPDU_STATS_CFG_SNIFFER, 1397 mac_for_pdev); 1398 } else if (mon_pdev->enhanced_stats_en) { 1399 dp_h2t_cfg_stats_msg_send(pdev, 1400 DP_PPDU_STATS_CFG_ENH_STATS, 1401 mac_for_pdev); 1402 } 1403 } 1404 1405 break; 1406 case WDI_EVENT_RX_CBF: 1407 mon_pdev->rx_pktlog_cbf = false; 1408 break; 1409 1410 case WDI_EVENT_HYBRID_TX: 1411 dp_set_hybrid_pktlog_disable(mon_pdev); 1412 break; 1413 1414 default: 1415 /* Nothing needs to be done for other pktlog types */ 1416 break; 1417 } 1418 } 1419 return 0; 1420 } 1421 #endif 1422 1423 /* MCL specific functions */ 1424 #if defined(DP_CON_MON) && !defined(REMOVE_PKT_LOG) 1425 void dp_pktlogmod_exit(struct dp_pdev *pdev) 1426 { 1427 struct dp_soc *soc = pdev->soc; 1428 struct hif_opaque_softc *scn = soc->hif_handle; 1429 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 1430 1431 if (!scn) { 1432 dp_mon_err("Invalid hif(scn) handle"); 1433 return; 1434 } 1435 1436 dp_monitor_reap_timer_stop(soc, CDP_MON_REAP_SOURCE_PKTLOG); 1437 pktlogmod_exit(scn); 1438 mon_pdev->pkt_log_init = false; 1439 } 1440 #endif /*DP_CON_MON*/ 1441 1442 #if defined(WDI_EVENT_ENABLE) && defined(QCA_ENHANCED_STATS_SUPPORT) 1443 #ifdef IPA_OFFLOAD 1444 static void 1445 dp_peer_get_tx_rx_stats(struct dp_peer *peer, 1446 struct cdp_interface_peer_stats *peer_stats_intf) 1447 { 1448 struct dp_rx_tid *rx_tid = NULL; 1449 uint8_t i = 0; 1450 1451 for (i = 0; i < DP_MAX_TIDS; i++) { 1452 rx_tid = &peer->rx_tid[i]; 1453 peer_stats_intf->rx_byte_count += 1454 rx_tid->rx_msdu_cnt.bytes; 1455 peer_stats_intf->rx_packet_count += 1456 rx_tid->rx_msdu_cnt.num; 1457 } 1458 peer_stats_intf->tx_packet_count = 1459 peer->monitor_peer->stats.tx.tx_ucast_success.num; 1460 peer_stats_intf->tx_byte_count = 1461 peer->monitor_peer->stats.tx.tx_ucast_success.bytes; 1462 } 1463 #else 1464 static void 1465 dp_peer_get_tx_rx_stats(struct dp_peer *peer, 1466 struct cdp_interface_peer_stats *peer_stats_intf) 1467 { 1468 struct dp_txrx_peer *txrx_peer = NULL; 1469 struct dp_peer *tgt_peer = NULL; 1470 uint8_t inx = 0; 1471 uint8_t stats_arr_size; 1472 1473 tgt_peer = dp_get_tgt_peer_from_peer(peer); 1474 txrx_peer = tgt_peer->txrx_peer; 1475 peer_stats_intf->rx_packet_count = txrx_peer->to_stack.num; 1476 peer_stats_intf->rx_byte_count = txrx_peer->to_stack.bytes; 1477 stats_arr_size = txrx_peer->stats_arr_size; 1478 1479 for (inx = 0; inx < stats_arr_size; inx++) { 1480 peer_stats_intf->tx_packet_count += 1481 txrx_peer->stats[inx].per_pkt_stats.tx.ucast.num; 1482 peer_stats_intf->tx_byte_count += 1483 txrx_peer->stats[inx].per_pkt_stats.tx.tx_success.bytes; 1484 } 1485 } 1486 #endif 1487 1488 QDF_STATUS dp_peer_stats_notify(struct dp_pdev *dp_pdev, struct dp_peer *peer) 1489 { 1490 struct cdp_interface_peer_stats peer_stats_intf = {0}; 1491 struct dp_mon_peer_stats *mon_peer_stats = NULL; 1492 struct dp_peer *tgt_peer = NULL; 1493 struct dp_txrx_peer *txrx_peer = NULL; 1494 1495 if (qdf_unlikely(!peer || !peer->vdev || !peer->monitor_peer)) 1496 return QDF_STATUS_E_FAULT; 1497 1498 tgt_peer = dp_get_tgt_peer_from_peer(peer); 1499 if (qdf_unlikely(!tgt_peer)) 1500 return QDF_STATUS_E_FAULT; 1501 1502 txrx_peer = tgt_peer->txrx_peer; 1503 if (!qdf_unlikely(txrx_peer)) 1504 return QDF_STATUS_E_FAULT; 1505 1506 mon_peer_stats = &peer->monitor_peer->stats; 1507 1508 if (mon_peer_stats->rx.last_snr != mon_peer_stats->rx.snr) 1509 peer_stats_intf.rssi_changed = true; 1510 1511 if ((mon_peer_stats->rx.snr && peer_stats_intf.rssi_changed) || 1512 (mon_peer_stats->tx.tx_rate && 1513 mon_peer_stats->tx.tx_rate != mon_peer_stats->tx.last_tx_rate)) { 1514 qdf_mem_copy(peer_stats_intf.peer_mac, peer->mac_addr.raw, 1515 QDF_MAC_ADDR_SIZE); 1516 peer_stats_intf.vdev_id = peer->vdev->vdev_id; 1517 peer_stats_intf.last_peer_tx_rate = 1518 mon_peer_stats->tx.last_tx_rate; 1519 peer_stats_intf.peer_tx_rate = mon_peer_stats->tx.tx_rate; 1520 peer_stats_intf.peer_rssi = mon_peer_stats->rx.snr; 1521 peer_stats_intf.ack_rssi = mon_peer_stats->tx.last_ack_rssi; 1522 dp_peer_get_tx_rx_stats(peer, &peer_stats_intf); 1523 peer_stats_intf.per = tgt_peer->stats.tx.last_per; 1524 peer_stats_intf.free_buff = INVALID_FREE_BUFF; 1525 dp_wdi_event_handler(WDI_EVENT_PEER_STATS, dp_pdev->soc, 1526 (void *)&peer_stats_intf, 0, 1527 WDI_NO_VAL, dp_pdev->pdev_id); 1528 } 1529 1530 return QDF_STATUS_SUCCESS; 1531 } 1532 #endif 1533 1534 #ifdef FEATURE_NAC_RSSI 1535 /** 1536 * dp_rx_nac_filter() - Function to perform filtering of non-associated 1537 * clients 1538 * @pdev: DP pdev handle 1539 * @rx_pkt_hdr: Rx packet Header 1540 * 1541 * Return: dp_vdev* 1542 */ 1543 static 1544 struct dp_vdev *dp_rx_nac_filter(struct dp_pdev *pdev, 1545 uint8_t *rx_pkt_hdr) 1546 { 1547 struct ieee80211_frame *wh; 1548 struct dp_neighbour_peer *peer = NULL; 1549 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 1550 1551 wh = (struct ieee80211_frame *)rx_pkt_hdr; 1552 1553 if ((wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) != IEEE80211_FC1_DIR_TODS) 1554 return NULL; 1555 1556 qdf_spin_lock_bh(&mon_pdev->neighbour_peer_mutex); 1557 TAILQ_FOREACH(peer, &mon_pdev->neighbour_peers_list, 1558 neighbour_peer_list_elem) { 1559 if (qdf_mem_cmp(&peer->neighbour_peers_macaddr.raw[0], 1560 wh->i_addr2, QDF_MAC_ADDR_SIZE) == 0) { 1561 dp_rx_debug("%pK: NAC configuration matched for mac-%2x:%2x:%2x:%2x:%2x:%2x", 1562 pdev->soc, 1563 peer->neighbour_peers_macaddr.raw[0], 1564 peer->neighbour_peers_macaddr.raw[1], 1565 peer->neighbour_peers_macaddr.raw[2], 1566 peer->neighbour_peers_macaddr.raw[3], 1567 peer->neighbour_peers_macaddr.raw[4], 1568 peer->neighbour_peers_macaddr.raw[5]); 1569 1570 qdf_spin_unlock_bh(&mon_pdev->neighbour_peer_mutex); 1571 1572 return mon_pdev->mvdev; 1573 } 1574 } 1575 qdf_spin_unlock_bh(&mon_pdev->neighbour_peer_mutex); 1576 1577 return NULL; 1578 } 1579 1580 QDF_STATUS dp_filter_neighbour_peer(struct dp_pdev *pdev, 1581 uint8_t *rx_pkt_hdr) 1582 { 1583 struct dp_vdev *vdev = NULL; 1584 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 1585 1586 if (mon_pdev->filter_neighbour_peers) { 1587 /* Next Hop scenario not yet handle */ 1588 vdev = dp_rx_nac_filter(pdev, rx_pkt_hdr); 1589 if (vdev) { 1590 dp_rx_mon_deliver(pdev->soc, pdev->pdev_id, 1591 pdev->invalid_peer_head_msdu, 1592 pdev->invalid_peer_tail_msdu); 1593 1594 pdev->invalid_peer_head_msdu = NULL; 1595 pdev->invalid_peer_tail_msdu = NULL; 1596 return QDF_STATUS_SUCCESS; 1597 } 1598 } 1599 1600 return QDF_STATUS_E_FAILURE; 1601 } 1602 #endif 1603 1604 /** 1605 * dp_update_mon_mac_filter() - Set/reset monitor mac filter 1606 * @soc_hdl: cdp soc handle 1607 * @vdev_id: id of virtual device object 1608 * @cmd: Add/Del command 1609 * 1610 * Return: 0 for success. nonzero for failure. 1611 */ 1612 static QDF_STATUS dp_update_mon_mac_filter(struct cdp_soc_t *soc_hdl, 1613 uint8_t vdev_id, uint32_t cmd) 1614 { 1615 struct dp_soc *soc = (struct dp_soc *)soc_hdl; 1616 struct dp_pdev *pdev; 1617 struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id, 1618 DP_MOD_ID_CDP); 1619 struct dp_mon_pdev *mon_pdev; 1620 QDF_STATUS status = QDF_STATUS_E_FAILURE; 1621 1622 if (!vdev) 1623 return status; 1624 1625 pdev = vdev->pdev; 1626 if (!pdev) { 1627 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP); 1628 return status; 1629 } 1630 1631 mon_pdev = pdev->monitor_pdev; 1632 if (cmd == DP_NAC_PARAM_ADD) { 1633 /* first neighbour added */ 1634 dp_mon_filter_set_reset_mon_mac_filter(pdev, true); 1635 status = dp_mon_filter_update(pdev); 1636 if (status != QDF_STATUS_SUCCESS) { 1637 dp_cdp_err("%pK: Mon mac filter set failed", soc); 1638 dp_mon_filter_set_reset_mon_mac_filter(pdev, false); 1639 } 1640 } else if (cmd == DP_NAC_PARAM_DEL) { 1641 /* last neighbour deleted */ 1642 dp_mon_filter_set_reset_mon_mac_filter(pdev, false); 1643 status = dp_mon_filter_update(pdev); 1644 if (status != QDF_STATUS_SUCCESS) 1645 dp_cdp_err("%pK: Mon mac filter reset failed", soc); 1646 } 1647 1648 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP); 1649 return status; 1650 } 1651 1652 1653 bool 1654 dp_enable_mon_reap_timer(struct cdp_soc_t *soc_hdl, 1655 enum cdp_mon_reap_source source, 1656 bool enable) 1657 { 1658 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 1659 1660 if (enable) 1661 return dp_monitor_reap_timer_start(soc, source); 1662 else 1663 return dp_monitor_reap_timer_stop(soc, source); 1664 } 1665 1666 #if defined(DP_CON_MON) 1667 #ifndef REMOVE_PKT_LOG 1668 void dp_pkt_log_init(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, void *scn) 1669 { 1670 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 1671 struct dp_pdev *handle = 1672 dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); 1673 struct dp_mon_pdev *mon_pdev; 1674 1675 if (!handle) { 1676 dp_mon_err("pdev handle is NULL"); 1677 return; 1678 } 1679 1680 mon_pdev = handle->monitor_pdev; 1681 1682 if (mon_pdev->pkt_log_init) { 1683 dp_mon_err("%pK: Packet log not initialized", soc); 1684 return; 1685 } 1686 1687 pktlog_sethandle(&mon_pdev->pl_dev, scn); 1688 pktlog_set_pdev_id(mon_pdev->pl_dev, pdev_id); 1689 pktlog_set_callback_regtype(PKTLOG_DEFAULT_CALLBACK_REGISTRATION); 1690 1691 if (pktlogmod_init(scn)) { 1692 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1693 "%s: pktlogmod_init failed", __func__); 1694 mon_pdev->pkt_log_init = false; 1695 } else { 1696 mon_pdev->pkt_log_init = true; 1697 } 1698 } 1699 1700 /** 1701 * dp_pkt_log_con_service() - connect packet log service 1702 * @soc_hdl: Datapath soc handle 1703 * @pdev_id: id of data path pdev handle 1704 * @scn: device context 1705 * 1706 * Return: none 1707 */ 1708 static void dp_pkt_log_con_service(struct cdp_soc_t *soc_hdl, 1709 uint8_t pdev_id, void *scn) 1710 { 1711 dp_pkt_log_init(soc_hdl, pdev_id, scn); 1712 pktlog_htc_attach(); 1713 } 1714 1715 /** 1716 * dp_pkt_log_exit() - Wrapper API to cleanup pktlog info 1717 * @soc_hdl: Datapath soc handle 1718 * @pdev_id: id of data path pdev handle 1719 * 1720 * Return: none 1721 */ 1722 static void dp_pkt_log_exit(struct cdp_soc_t *soc_hdl, uint8_t pdev_id) 1723 { 1724 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 1725 struct dp_pdev *pdev = 1726 dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); 1727 1728 if (!pdev) { 1729 dp_err("pdev handle is NULL"); 1730 return; 1731 } 1732 1733 dp_pktlogmod_exit(pdev); 1734 } 1735 1736 #else 1737 static void dp_pkt_log_con_service(struct cdp_soc_t *soc_hdl, 1738 uint8_t pdev_id, void *scn) 1739 { 1740 } 1741 1742 static void dp_pkt_log_exit(struct cdp_soc_t *soc_hdl, uint8_t pdev_id) 1743 { 1744 } 1745 #endif 1746 #endif 1747 1748 void dp_neighbour_peers_detach(struct dp_pdev *pdev) 1749 { 1750 struct dp_neighbour_peer *peer = NULL; 1751 struct dp_neighbour_peer *temp_peer = NULL; 1752 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 1753 1754 TAILQ_FOREACH_SAFE(peer, &mon_pdev->neighbour_peers_list, 1755 neighbour_peer_list_elem, temp_peer) { 1756 /* delete this peer from the list */ 1757 TAILQ_REMOVE(&mon_pdev->neighbour_peers_list, 1758 peer, neighbour_peer_list_elem); 1759 qdf_mem_free(peer); 1760 } 1761 1762 qdf_spinlock_destroy(&mon_pdev->neighbour_peer_mutex); 1763 } 1764 1765 #ifdef QCA_ENHANCED_STATS_SUPPORT 1766 /** 1767 * dp_mon_tx_enable_enhanced_stats() - Enable enhanced Tx stats 1768 * @pdev: Datapath pdev handle 1769 * 1770 * Return: void 1771 */ 1772 static void dp_mon_tx_enable_enhanced_stats(struct dp_pdev *pdev) 1773 { 1774 struct dp_soc *soc = pdev->soc; 1775 struct dp_mon_ops *mon_ops = NULL; 1776 1777 mon_ops = dp_mon_ops_get(soc); 1778 if (mon_ops && mon_ops->mon_tx_enable_enhanced_stats) 1779 mon_ops->mon_tx_enable_enhanced_stats(pdev); 1780 } 1781 1782 /** 1783 * dp_enable_enhanced_stats()- API to enable enhanced statistcs 1784 * @soc: DP_SOC handle 1785 * @pdev_id: id of DP_PDEV handle 1786 * 1787 * Return: QDF_STATUS 1788 */ 1789 QDF_STATUS 1790 dp_enable_enhanced_stats(struct cdp_soc_t *soc, uint8_t pdev_id) 1791 { 1792 struct dp_pdev *pdev = NULL; 1793 QDF_STATUS status = QDF_STATUS_SUCCESS; 1794 struct dp_mon_pdev *mon_pdev; 1795 struct dp_soc *dp_soc = cdp_soc_t_to_dp_soc(soc); 1796 1797 pdev = dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc, 1798 pdev_id); 1799 1800 if (!pdev) 1801 return QDF_STATUS_E_FAILURE; 1802 1803 mon_pdev = pdev->monitor_pdev; 1804 1805 if (!mon_pdev) 1806 return QDF_STATUS_E_FAILURE; 1807 1808 if (mon_pdev->enhanced_stats_en == 0) 1809 dp_cal_client_timer_start(mon_pdev->cal_client_ctx); 1810 1811 mon_pdev->enhanced_stats_en = 1; 1812 pdev->enhanced_stats_en = 1; 1813 pdev->link_peer_stats = wlan_cfg_is_peer_link_stats_enabled( 1814 dp_soc->wlan_cfg_ctx); 1815 1816 dp_mon_filter_setup_enhanced_stats(pdev); 1817 status = dp_mon_filter_update(pdev); 1818 if (status != QDF_STATUS_SUCCESS) { 1819 dp_cdp_err("%pK: Failed to set enhanced mode filters", soc); 1820 dp_mon_filter_reset_enhanced_stats(pdev); 1821 dp_cal_client_timer_stop(mon_pdev->cal_client_ctx); 1822 mon_pdev->enhanced_stats_en = 0; 1823 pdev->enhanced_stats_en = 0; 1824 pdev->link_peer_stats = 0; 1825 return QDF_STATUS_E_FAILURE; 1826 } 1827 1828 dp_mon_tx_enable_enhanced_stats(pdev); 1829 1830 /* reset the tx fast path flag, as enhanced stats are enabled */ 1831 pdev->tx_fast_flag &= ~DP_TX_DESC_FLAG_SIMPLE; 1832 if (dp_soc->hw_txrx_stats_en) 1833 pdev->tx_fast_flag &= ~DP_TX_DESC_FLAG_FASTPATH_SIMPLE; 1834 1835 return QDF_STATUS_SUCCESS; 1836 } 1837 1838 /** 1839 * dp_mon_tx_disable_enhanced_stats() - Disable enhanced Tx stats 1840 * @pdev: Datapath pdev handle 1841 * 1842 * Return: void 1843 */ 1844 static void dp_mon_tx_disable_enhanced_stats(struct dp_pdev *pdev) 1845 { 1846 struct dp_soc *soc = pdev->soc; 1847 struct dp_mon_ops *mon_ops = NULL; 1848 1849 mon_ops = dp_mon_ops_get(soc); 1850 if (mon_ops && mon_ops->mon_tx_disable_enhanced_stats) 1851 mon_ops->mon_tx_disable_enhanced_stats(pdev); 1852 } 1853 1854 /** 1855 * dp_disable_enhanced_stats()- API to disable enhanced statistcs 1856 * 1857 * @soc: the soc handle 1858 * @pdev_id: pdev_id of pdev 1859 * 1860 * Return: QDF_STATUS 1861 */ 1862 QDF_STATUS 1863 dp_disable_enhanced_stats(struct cdp_soc_t *soc, uint8_t pdev_id) 1864 { 1865 struct dp_pdev *pdev = 1866 dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc, 1867 pdev_id); 1868 struct dp_soc *dp_soc = cdp_soc_t_to_dp_soc(soc); 1869 struct dp_mon_pdev *mon_pdev; 1870 1871 if (!pdev || !pdev->monitor_pdev) 1872 return QDF_STATUS_E_FAILURE; 1873 1874 mon_pdev = pdev->monitor_pdev; 1875 1876 if (mon_pdev->enhanced_stats_en == 1) 1877 dp_cal_client_timer_stop(mon_pdev->cal_client_ctx); 1878 1879 mon_pdev->enhanced_stats_en = 0; 1880 pdev->enhanced_stats_en = 0; 1881 pdev->link_peer_stats = 0; 1882 1883 dp_mon_tx_disable_enhanced_stats(pdev); 1884 1885 dp_mon_filter_reset_enhanced_stats(pdev); 1886 if (dp_mon_filter_update(pdev) != QDF_STATUS_SUCCESS) { 1887 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1888 FL("Failed to reset enhanced mode filters")); 1889 } 1890 1891 /* set the tx fast path flag, as enhanced stats are disabled */ 1892 pdev->tx_fast_flag |= DP_TX_DESC_FLAG_SIMPLE; 1893 if (dp_soc->hw_txrx_stats_en) 1894 pdev->tx_fast_flag |= DP_TX_DESC_FLAG_FASTPATH_SIMPLE; 1895 1896 return QDF_STATUS_SUCCESS; 1897 } 1898 1899 #ifdef WDI_EVENT_ENABLE 1900 QDF_STATUS dp_peer_qos_stats_notify(struct dp_pdev *dp_pdev, 1901 struct cdp_rx_stats_ppdu_user *ppdu_user) 1902 { 1903 struct cdp_interface_peer_qos_stats qos_stats_intf = {0}; 1904 1905 if (qdf_unlikely(ppdu_user->peer_id == HTT_INVALID_PEER)) { 1906 dp_mon_warn("Invalid peer id: %u", ppdu_user->peer_id); 1907 return QDF_STATUS_E_FAILURE; 1908 } 1909 1910 qdf_mem_copy(qos_stats_intf.peer_mac, ppdu_user->mac_addr, 1911 QDF_MAC_ADDR_SIZE); 1912 qos_stats_intf.frame_control = ppdu_user->frame_control; 1913 qos_stats_intf.frame_control_info_valid = 1914 ppdu_user->frame_control_info_valid; 1915 qos_stats_intf.qos_control = ppdu_user->qos_control; 1916 qos_stats_intf.qos_control_info_valid = 1917 ppdu_user->qos_control_info_valid; 1918 qos_stats_intf.vdev_id = ppdu_user->vdev_id; 1919 dp_wdi_event_handler(WDI_EVENT_PEER_QOS_STATS, dp_pdev->soc, 1920 (void *)&qos_stats_intf, 0, 1921 WDI_NO_VAL, dp_pdev->pdev_id); 1922 1923 return QDF_STATUS_SUCCESS; 1924 } 1925 #else 1926 static inline QDF_STATUS 1927 dp_peer_qos_stats_notify(struct dp_pdev *dp_pdev, 1928 struct cdp_rx_stats_ppdu_user *ppdu_user) 1929 { 1930 return QDF_STATUS_SUCCESS; 1931 } 1932 #endif 1933 #endif /* QCA_ENHANCED_STATS_SUPPORT */ 1934 1935 /** 1936 * dp_enable_peer_based_pktlog() - Set Flag for peer based filtering 1937 * for pktlog 1938 * @soc: cdp_soc handle 1939 * @pdev_id: id of dp pdev handle 1940 * @mac_addr: Peer mac address 1941 * @enb_dsb: Enable or disable peer based filtering 1942 * 1943 * Return: QDF_STATUS 1944 */ 1945 static int 1946 dp_enable_peer_based_pktlog(struct cdp_soc_t *soc, uint8_t pdev_id, 1947 uint8_t *mac_addr, uint8_t enb_dsb) 1948 { 1949 struct dp_peer *peer; 1950 QDF_STATUS status = QDF_STATUS_E_FAILURE; 1951 struct dp_pdev *pdev = 1952 dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc, 1953 pdev_id); 1954 struct dp_mon_pdev *mon_pdev; 1955 1956 if (!pdev) 1957 return QDF_STATUS_E_FAILURE; 1958 1959 mon_pdev = pdev->monitor_pdev; 1960 1961 peer = dp_peer_find_hash_find((struct dp_soc *)soc, mac_addr, 1962 0, DP_VDEV_ALL, DP_MOD_ID_CDP); 1963 1964 if (!peer) { 1965 dp_mon_err("Peer is NULL"); 1966 return QDF_STATUS_E_FAILURE; 1967 } 1968 1969 if (!IS_MLO_DP_MLD_PEER(peer) && peer->monitor_peer) { 1970 peer->monitor_peer->peer_based_pktlog_filter = enb_dsb; 1971 mon_pdev->dp_peer_based_pktlog = enb_dsb; 1972 status = QDF_STATUS_SUCCESS; 1973 } 1974 1975 dp_peer_unref_delete(peer, DP_MOD_ID_CDP); 1976 1977 return status; 1978 } 1979 1980 /** 1981 * dp_peer_update_pkt_capture_params() - Set Rx & Tx Capture flags for a peer 1982 * @soc: DP_SOC handle 1983 * @pdev_id: id of DP_PDEV handle 1984 * @is_rx_pkt_cap_enable: enable/disable Rx packet capture in monitor mode 1985 * @is_tx_pkt_cap_enable: enable/disable/delete/print 1986 * Tx packet capture in monitor mode 1987 * @peer_mac: MAC address for which the above need to be enabled/disabled 1988 * 1989 * Return: Success if Rx & Tx capture is enabled for peer, false otherwise 1990 */ 1991 #if defined(WLAN_TX_PKT_CAPTURE_ENH) || defined(WLAN_RX_PKT_CAPTURE_ENH) 1992 static QDF_STATUS 1993 dp_peer_update_pkt_capture_params(ol_txrx_soc_handle soc, 1994 uint8_t pdev_id, 1995 bool is_rx_pkt_cap_enable, 1996 uint8_t is_tx_pkt_cap_enable, 1997 uint8_t *peer_mac) 1998 { 1999 struct dp_peer *peer; 2000 QDF_STATUS status = QDF_STATUS_E_FAILURE; 2001 struct dp_pdev *pdev = 2002 dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc, 2003 pdev_id); 2004 if (!pdev) 2005 return QDF_STATUS_E_FAILURE; 2006 2007 peer = dp_peer_find_hash_find((struct dp_soc *)soc, 2008 peer_mac, 0, DP_VDEV_ALL, 2009 DP_MOD_ID_CDP); 2010 if (!peer) 2011 return QDF_STATUS_E_FAILURE; 2012 2013 /* we need to set tx pkt capture for non associated peer */ 2014 if (!IS_MLO_DP_MLD_PEER(peer)) { 2015 status = dp_monitor_tx_peer_filter(pdev, peer, 2016 is_tx_pkt_cap_enable, 2017 peer_mac); 2018 2019 status = dp_peer_set_rx_capture_enabled(pdev, peer, 2020 is_rx_pkt_cap_enable, 2021 peer_mac); 2022 } 2023 2024 dp_peer_unref_delete(peer, DP_MOD_ID_CDP); 2025 2026 return status; 2027 } 2028 #endif 2029 2030 #ifdef QCA_MCOPY_SUPPORT 2031 QDF_STATUS dp_mcopy_check_deliver(struct dp_pdev *pdev, 2032 uint16_t peer_id, 2033 uint32_t ppdu_id, 2034 uint8_t first_msdu) 2035 { 2036 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 2037 2038 if (mon_pdev->mcopy_mode) { 2039 if (mon_pdev->mcopy_mode == M_COPY) { 2040 if ((mon_pdev->m_copy_id.tx_ppdu_id == ppdu_id) && 2041 (mon_pdev->m_copy_id.tx_peer_id == peer_id)) { 2042 return QDF_STATUS_E_INVAL; 2043 } 2044 } 2045 2046 if (!first_msdu) 2047 return QDF_STATUS_E_INVAL; 2048 2049 mon_pdev->m_copy_id.tx_ppdu_id = ppdu_id; 2050 mon_pdev->m_copy_id.tx_peer_id = peer_id; 2051 } 2052 2053 return QDF_STATUS_SUCCESS; 2054 } 2055 #endif 2056 2057 #ifdef WDI_EVENT_ENABLE 2058 #ifndef REMOVE_PKT_LOG 2059 static void *dp_get_pldev(struct cdp_soc_t *soc_hdl, uint8_t pdev_id) 2060 { 2061 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 2062 struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); 2063 2064 if (!pdev || !pdev->monitor_pdev) 2065 return NULL; 2066 2067 return pdev->monitor_pdev->pl_dev; 2068 } 2069 #else 2070 static void *dp_get_pldev(struct cdp_soc_t *soc_hdl, uint8_t pdev_id) 2071 { 2072 return NULL; 2073 } 2074 #endif 2075 #endif 2076 2077 QDF_STATUS dp_rx_populate_cbf_hdr(struct dp_soc *soc, 2078 uint32_t mac_id, 2079 uint32_t event, 2080 qdf_nbuf_t mpdu, 2081 uint32_t msdu_timestamp) 2082 { 2083 uint32_t data_size, hdr_size, ppdu_id, align4byte; 2084 struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id); 2085 uint32_t *msg_word; 2086 2087 if (!pdev) 2088 return QDF_STATUS_E_INVAL; 2089 2090 ppdu_id = pdev->monitor_pdev->ppdu_info.com_info.ppdu_id; 2091 2092 hdr_size = HTT_T2H_PPDU_STATS_IND_HDR_SIZE 2093 + qdf_offsetof(htt_ppdu_stats_rx_mgmtctrl_payload_tlv, payload); 2094 2095 data_size = qdf_nbuf_len(mpdu); 2096 2097 qdf_nbuf_push_head(mpdu, hdr_size); 2098 2099 msg_word = (uint32_t *)qdf_nbuf_data(mpdu); 2100 /* 2101 * Populate the PPDU Stats Indication header 2102 */ 2103 HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_T2H_MSG_TYPE_PPDU_STATS_IND); 2104 HTT_T2H_PPDU_STATS_MAC_ID_SET(*msg_word, mac_id); 2105 HTT_T2H_PPDU_STATS_PDEV_ID_SET(*msg_word, pdev->pdev_id); 2106 align4byte = ((data_size + 2107 qdf_offsetof(htt_ppdu_stats_rx_mgmtctrl_payload_tlv, payload) 2108 + 3) >> 2) << 2; 2109 HTT_T2H_PPDU_STATS_PAYLOAD_SIZE_SET(*msg_word, align4byte); 2110 msg_word++; 2111 HTT_T2H_PPDU_STATS_PPDU_ID_SET(*msg_word, ppdu_id); 2112 msg_word++; 2113 2114 *msg_word = msdu_timestamp; 2115 msg_word++; 2116 /* Skip reserved field */ 2117 msg_word++; 2118 /* 2119 * Populate MGMT_CTRL Payload TLV first 2120 */ 2121 HTT_STATS_TLV_TAG_SET(*msg_word, 2122 HTT_PPDU_STATS_RX_MGMTCTRL_PAYLOAD_TLV); 2123 2124 align4byte = ((data_size - sizeof(htt_tlv_hdr_t) + 2125 qdf_offsetof(htt_ppdu_stats_rx_mgmtctrl_payload_tlv, payload) 2126 + 3) >> 2) << 2; 2127 HTT_STATS_TLV_LENGTH_SET(*msg_word, align4byte); 2128 msg_word++; 2129 2130 HTT_PPDU_STATS_RX_MGMTCTRL_TLV_FRAME_LENGTH_SET( 2131 *msg_word, data_size); 2132 msg_word++; 2133 2134 dp_wdi_event_handler(event, soc, (void *)mpdu, 2135 HTT_INVALID_PEER, WDI_NO_VAL, pdev->pdev_id); 2136 2137 qdf_nbuf_pull_head(mpdu, hdr_size); 2138 2139 return QDF_STATUS_SUCCESS; 2140 } 2141 2142 #ifdef ATH_SUPPORT_EXT_STAT 2143 #ifdef WLAN_CONFIG_TELEMETRY_AGENT 2144 /** 2145 * dp_pdev_clear_link_airtime_stats() - clear airtime stats for given pdev 2146 * @pdev: DP PDEV handle 2147 */ 2148 static inline 2149 void dp_pdev_clear_link_airtime_stats(struct dp_pdev *pdev) 2150 { 2151 uint8_t ac; 2152 2153 for (ac = 0; ac < WME_AC_MAX; ac++) 2154 pdev->stats.telemetry_stats.link_airtime[ac] = 0; 2155 } 2156 2157 /** 2158 * dp_peer_update_telemetry_stats() - update peer telemetry stats 2159 * @soc: Datapath soc 2160 * @peer: Datapath peer 2161 * @arg: argument to callback function 2162 */ 2163 static inline 2164 void dp_peer_update_telemetry_stats(struct dp_soc *soc, 2165 struct dp_peer *peer, 2166 void *arg) 2167 { 2168 struct dp_pdev *pdev; 2169 struct dp_vdev *vdev; 2170 struct dp_mon_peer *mon_peer = NULL; 2171 uint8_t ac; 2172 uint64_t current_time = qdf_get_log_timestamp(); 2173 2174 vdev = peer->vdev; 2175 if (!vdev) 2176 return; 2177 2178 pdev = vdev->pdev; 2179 if (!pdev) 2180 return; 2181 2182 mon_peer = peer->monitor_peer; 2183 if (qdf_likely(mon_peer)) { 2184 for (ac = 0; ac < WME_AC_MAX; ac++) { 2185 mon_peer->stats.airtime_stats.tx_airtime_consumption[ac].avg_consumption_per_sec = 2186 (uint8_t)qdf_do_div((uint64_t)(mon_peer->stats.airtime_stats.tx_airtime_consumption[ac].consumption * 100), 2187 (uint32_t)(current_time - mon_peer->stats.airtime_stats.last_update_time)); 2188 mon_peer->stats.airtime_stats.rx_airtime_consumption[ac].avg_consumption_per_sec = 2189 (uint8_t)qdf_do_div((uint64_t)(mon_peer->stats.airtime_stats.rx_airtime_consumption[ac].consumption * 100), 2190 (uint32_t)(current_time - mon_peer->stats.airtime_stats.last_update_time)); 2191 /* Store each peer airtime consumption in pdev 2192 * link_airtime to calculate pdev's total airtime 2193 * consumption 2194 */ 2195 DP_STATS_INC( 2196 pdev, 2197 telemetry_stats.link_airtime[ac], 2198 mon_peer->stats.airtime_stats.tx_airtime_consumption[ac].consumption); 2199 DP_STATS_INC( 2200 pdev, 2201 telemetry_stats.link_airtime[ac], 2202 mon_peer->stats.airtime_stats.rx_airtime_consumption[ac].consumption); 2203 mon_peer->stats.airtime_stats.tx_airtime_consumption[ac].consumption = 0; 2204 mon_peer->stats.airtime_stats.rx_airtime_consumption[ac].consumption = 0; 2205 } 2206 mon_peer->stats.airtime_stats.last_update_time = current_time; 2207 } 2208 } 2209 2210 QDF_STATUS dp_pdev_update_telemetry_airtime_stats(struct cdp_soc_t *soc, 2211 uint8_t pdev_id) 2212 { 2213 struct dp_pdev *pdev = 2214 dp_get_pdev_from_soc_pdev_id_wifi3(cdp_soc_t_to_dp_soc(soc), 2215 pdev_id); 2216 if (!pdev) 2217 return QDF_STATUS_E_FAILURE; 2218 2219 /* Clear current airtime stats as the below API will increment the stats 2220 * for all peers on top of current value 2221 */ 2222 dp_pdev_clear_link_airtime_stats(pdev); 2223 dp_pdev_iterate_peer(pdev, dp_peer_update_telemetry_stats, NULL, 2224 DP_MOD_ID_CDP); 2225 2226 return QDF_STATUS_SUCCESS; 2227 } 2228 #endif 2229 2230 /** 2231 * dp_peer_cal_clients_stats_update() - update peer stats on cal client timer 2232 * @soc: Datapath SOC 2233 * @peer: Datapath peer 2234 * @arg: argument to iter function 2235 */ 2236 #ifdef IPA_OFFLOAD 2237 static void 2238 dp_peer_cal_clients_stats_update(struct dp_soc *soc, 2239 struct dp_peer *peer, 2240 void *arg) 2241 { 2242 struct cdp_calibr_stats_intf peer_stats_intf = {0}; 2243 struct dp_peer *tgt_peer = NULL; 2244 struct dp_txrx_peer *txrx_peer = NULL; 2245 2246 if (!dp_peer_is_primary_link_peer(peer)) 2247 return; 2248 2249 tgt_peer = dp_get_tgt_peer_from_peer(peer); 2250 if (!tgt_peer || !(tgt_peer->txrx_peer)) 2251 return; 2252 2253 txrx_peer = tgt_peer->txrx_peer; 2254 peer_stats_intf.to_stack = txrx_peer->to_stack; 2255 peer_stats_intf.tx_success = 2256 peer->monitor_peer->stats.tx.tx_ucast_success; 2257 peer_stats_intf.tx_ucast = 2258 peer->monitor_peer->stats.tx.tx_ucast_total; 2259 2260 dp_cal_client_update_peer_stats_wifi3(&peer_stats_intf, 2261 &tgt_peer->stats); 2262 dp_peer_get_rxtid_stats_ipa(peer, dp_peer_update_tid_stats_from_reo); 2263 } 2264 #else 2265 static void 2266 dp_peer_cal_clients_stats_update(struct dp_soc *soc, 2267 struct dp_peer *peer, 2268 void *arg) 2269 { 2270 struct cdp_calibr_stats_intf peer_stats_intf = {0}; 2271 struct dp_peer *tgt_peer = NULL; 2272 struct dp_txrx_peer *txrx_peer = NULL; 2273 uint8_t inx = 0; 2274 uint8_t stats_arr_size; 2275 2276 if (!dp_peer_is_primary_link_peer(peer)) 2277 return; 2278 2279 tgt_peer = dp_get_tgt_peer_from_peer(peer); 2280 if (!tgt_peer || !(tgt_peer->txrx_peer)) 2281 return; 2282 2283 txrx_peer = tgt_peer->txrx_peer; 2284 peer_stats_intf.to_stack = txrx_peer->to_stack; 2285 stats_arr_size = txrx_peer->stats_arr_size; 2286 2287 for (inx = 0; inx < stats_arr_size; inx++) { 2288 peer_stats_intf.tx_success.num += 2289 txrx_peer->stats[inx].per_pkt_stats.tx.tx_success.num; 2290 peer_stats_intf.tx_success.bytes += 2291 txrx_peer->stats[inx].per_pkt_stats.tx.tx_success.bytes; 2292 peer_stats_intf.tx_ucast.num += 2293 txrx_peer->stats[inx].per_pkt_stats.tx.ucast.num; 2294 peer_stats_intf.tx_ucast.bytes += 2295 txrx_peer->stats[inx].per_pkt_stats.tx.ucast.bytes; 2296 } 2297 2298 dp_cal_client_update_peer_stats_wifi3(&peer_stats_intf, 2299 &tgt_peer->stats); 2300 } 2301 #endif 2302 2303 /** 2304 * dp_iterate_update_peer_list() - update peer stats on cal client timer 2305 * @pdev_hdl: pdev handle 2306 */ 2307 static void dp_iterate_update_peer_list(struct cdp_pdev *pdev_hdl) 2308 { 2309 struct dp_pdev *pdev = (struct dp_pdev *)pdev_hdl; 2310 2311 dp_pdev_iterate_peer(pdev, dp_peer_cal_clients_stats_update, NULL, 2312 DP_MOD_ID_CDP); 2313 } 2314 #else 2315 static void dp_iterate_update_peer_list(struct cdp_pdev *pdev_hdl) 2316 { 2317 } 2318 #endif 2319 2320 #ifdef ATH_SUPPORT_NAC 2321 int dp_set_filter_neigh_peers(struct dp_pdev *pdev, 2322 bool val) 2323 { 2324 /* Enable/Disable smart mesh filtering. This flag will be checked 2325 * during rx processing to check if packets are from NAC clients. 2326 */ 2327 pdev->monitor_pdev->filter_neighbour_peers = val; 2328 return 0; 2329 } 2330 #endif /* ATH_SUPPORT_NAC */ 2331 2332 #ifdef WLAN_ATF_ENABLE 2333 void dp_set_atf_stats_enable(struct dp_pdev *pdev, bool value) 2334 { 2335 if (!pdev) { 2336 dp_cdp_err("pdev is NULL"); 2337 return; 2338 } 2339 2340 pdev->monitor_pdev->dp_atf_stats_enable = value; 2341 } 2342 #endif 2343 2344 #ifdef QCA_ENHANCED_STATS_SUPPORT 2345 /** 2346 * dp_process_ppdu_stats_tx_mgmtctrl_payload_tlv() - Process 2347 * htt_ppdu_stats_tx_mgmtctrl_payload_tlv 2348 * @pdev: DP PDEV handle 2349 * @tag_buf: buffer containing the htt_ppdu_stats_tx_mgmtctrl_payload_tlv 2350 * @ppdu_id: PPDU Id 2351 * 2352 * Return: QDF_STATUS_SUCCESS if nbuf has to be freed in caller 2353 */ 2354 static QDF_STATUS 2355 dp_process_ppdu_stats_tx_mgmtctrl_payload_tlv(struct dp_pdev *pdev, 2356 qdf_nbuf_t tag_buf, 2357 uint32_t ppdu_id) 2358 { 2359 uint32_t *nbuf_ptr; 2360 uint8_t trim_size; 2361 size_t head_size; 2362 struct cdp_tx_mgmt_comp_info *ptr_mgmt_comp_info; 2363 uint32_t *msg_word; 2364 uint32_t tsf_hdr; 2365 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 2366 2367 if ((!mon_pdev->tx_sniffer_enable) && (!mon_pdev->mcopy_mode) && 2368 (!mon_pdev->bpr_enable) && (!mon_pdev->tx_capture_enabled)) 2369 return QDF_STATUS_SUCCESS; 2370 2371 /* 2372 * get timestamp from htt_t2h_ppdu_stats_ind_hdr_t 2373 */ 2374 msg_word = (uint32_t *)qdf_nbuf_data(tag_buf); 2375 msg_word = msg_word + 2; 2376 tsf_hdr = *msg_word; 2377 2378 trim_size = ((mon_pdev->mgmtctrl_frm_info.mgmt_buf + 2379 HTT_MGMT_CTRL_TLV_HDR_RESERVERD_LEN) - 2380 qdf_nbuf_data(tag_buf)); 2381 2382 if (!qdf_nbuf_pull_head(tag_buf, trim_size)) 2383 return QDF_STATUS_SUCCESS; 2384 2385 qdf_nbuf_trim_tail(tag_buf, qdf_nbuf_len(tag_buf) - 2386 mon_pdev->mgmtctrl_frm_info.mgmt_buf_len); 2387 2388 if (mon_pdev->tx_capture_enabled) { 2389 head_size = sizeof(struct cdp_tx_mgmt_comp_info); 2390 if (qdf_unlikely(qdf_nbuf_headroom(tag_buf) < head_size)) { 2391 qdf_err("Fail to get headroom h_sz %zu h_avail %d\n", 2392 head_size, qdf_nbuf_headroom(tag_buf)); 2393 qdf_assert_always(0); 2394 return QDF_STATUS_E_NOMEM; 2395 } 2396 ptr_mgmt_comp_info = (struct cdp_tx_mgmt_comp_info *) 2397 qdf_nbuf_push_head(tag_buf, head_size); 2398 qdf_assert_always(ptr_mgmt_comp_info); 2399 ptr_mgmt_comp_info->ppdu_id = ppdu_id; 2400 ptr_mgmt_comp_info->is_sgen_pkt = true; 2401 ptr_mgmt_comp_info->tx_tsf = tsf_hdr; 2402 } else { 2403 head_size = sizeof(ppdu_id); 2404 nbuf_ptr = (uint32_t *)qdf_nbuf_push_head(tag_buf, head_size); 2405 *nbuf_ptr = ppdu_id; 2406 } 2407 if (mon_pdev->bpr_enable) { 2408 dp_wdi_event_handler(WDI_EVENT_TX_BEACON, pdev->soc, 2409 tag_buf, HTT_INVALID_PEER, 2410 WDI_NO_VAL, pdev->pdev_id); 2411 } 2412 2413 dp_deliver_mgmt_frm(pdev, tag_buf); 2414 2415 return QDF_STATUS_E_ALREADY; 2416 } 2417 2418 int 2419 dp_htt_get_ppdu_sniffer_ampdu_tlv_bitmap(uint32_t bitmap) 2420 { 2421 if (bitmap == (HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP_64)) 2422 return HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP_64; 2423 else if (bitmap == (HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP_256)) 2424 return HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP_256; 2425 2426 return 0; 2427 } 2428 2429 /** 2430 * dp_peer_copy_delay_stats() - copy ppdu stats to peer delayed stats. 2431 * @peer: Datapath peer handle 2432 * @ppdu: User PPDU Descriptor 2433 * @cur_ppdu_id: PPDU_ID 2434 * 2435 * Return: None 2436 * 2437 * on Tx data frame, we may get delayed ba set 2438 * in htt_ppdu_stats_user_common_tlv. which mean we get Block Ack(BA) after we 2439 * request Block Ack Request(BAR). Successful msdu is received only after Block 2440 * Ack. To populate peer stats we need successful msdu(data frame). 2441 * So we hold the Tx data stats on delayed_ba for stats update. 2442 */ 2443 static void 2444 dp_peer_copy_delay_stats(struct dp_peer *peer, 2445 struct cdp_tx_completion_ppdu_user *ppdu, 2446 uint32_t cur_ppdu_id) 2447 { 2448 struct dp_pdev *pdev; 2449 struct dp_vdev *vdev; 2450 struct dp_mon_peer *mon_peer = peer->monitor_peer; 2451 2452 if (mon_peer->last_delayed_ba) { 2453 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 2454 "BA not yet recv for prev delayed ppdu[%d] - cur ppdu[%d]", 2455 mon_peer->last_delayed_ba_ppduid, cur_ppdu_id); 2456 vdev = peer->vdev; 2457 if (vdev) { 2458 pdev = vdev->pdev; 2459 pdev->stats.cdp_delayed_ba_not_recev++; 2460 } 2461 } 2462 2463 mon_peer->delayed_ba_ppdu_stats.ltf_size = ppdu->ltf_size; 2464 mon_peer->delayed_ba_ppdu_stats.stbc = ppdu->stbc; 2465 mon_peer->delayed_ba_ppdu_stats.he_re = ppdu->he_re; 2466 mon_peer->delayed_ba_ppdu_stats.txbf = ppdu->txbf; 2467 mon_peer->delayed_ba_ppdu_stats.bw = ppdu->bw; 2468 mon_peer->delayed_ba_ppdu_stats.nss = ppdu->nss; 2469 mon_peer->delayed_ba_ppdu_stats.gi = ppdu->gi; 2470 mon_peer->delayed_ba_ppdu_stats.dcm = ppdu->dcm; 2471 mon_peer->delayed_ba_ppdu_stats.ldpc = ppdu->ldpc; 2472 mon_peer->delayed_ba_ppdu_stats.dcm = ppdu->dcm; 2473 mon_peer->delayed_ba_ppdu_stats.mpdu_tried_ucast = 2474 ppdu->mpdu_tried_ucast; 2475 mon_peer->delayed_ba_ppdu_stats.mpdu_tried_mcast = 2476 ppdu->mpdu_tried_mcast; 2477 mon_peer->delayed_ba_ppdu_stats.frame_ctrl = ppdu->frame_ctrl; 2478 mon_peer->delayed_ba_ppdu_stats.qos_ctrl = ppdu->qos_ctrl; 2479 mon_peer->delayed_ba_ppdu_stats.dcm = ppdu->dcm; 2480 2481 mon_peer->delayed_ba_ppdu_stats.ru_start = ppdu->ru_start; 2482 mon_peer->delayed_ba_ppdu_stats.ru_tones = ppdu->ru_tones; 2483 mon_peer->delayed_ba_ppdu_stats.is_mcast = ppdu->is_mcast; 2484 2485 mon_peer->delayed_ba_ppdu_stats.user_pos = ppdu->user_pos; 2486 mon_peer->delayed_ba_ppdu_stats.mu_group_id = ppdu->mu_group_id; 2487 2488 mon_peer->last_delayed_ba = true; 2489 2490 ppdu->debug_copied = true; 2491 } 2492 2493 /** 2494 * dp_peer_copy_stats_to_bar() - copy delayed stats to ppdu stats. 2495 * @peer: Datapath peer handle 2496 * @ppdu: PPDU Descriptor 2497 * 2498 * Return: None 2499 * 2500 * For Tx BAR, PPDU stats TLV include Block Ack info. PPDU info 2501 * from Tx BAR frame not required to populate peer stats. 2502 * But we need successful MPDU and MSDU to update previous 2503 * transmitted Tx data frame. Overwrite ppdu stats with the previous 2504 * stored ppdu stats. 2505 */ 2506 static void 2507 dp_peer_copy_stats_to_bar(struct dp_peer *peer, 2508 struct cdp_tx_completion_ppdu_user *ppdu) 2509 { 2510 struct dp_mon_peer *mon_peer = peer->monitor_peer; 2511 2512 ppdu->ltf_size = mon_peer->delayed_ba_ppdu_stats.ltf_size; 2513 ppdu->stbc = mon_peer->delayed_ba_ppdu_stats.stbc; 2514 ppdu->he_re = mon_peer->delayed_ba_ppdu_stats.he_re; 2515 ppdu->txbf = mon_peer->delayed_ba_ppdu_stats.txbf; 2516 ppdu->bw = mon_peer->delayed_ba_ppdu_stats.bw; 2517 ppdu->nss = mon_peer->delayed_ba_ppdu_stats.nss; 2518 ppdu->gi = mon_peer->delayed_ba_ppdu_stats.gi; 2519 ppdu->dcm = mon_peer->delayed_ba_ppdu_stats.dcm; 2520 ppdu->ldpc = mon_peer->delayed_ba_ppdu_stats.ldpc; 2521 ppdu->dcm = mon_peer->delayed_ba_ppdu_stats.dcm; 2522 ppdu->mpdu_tried_ucast = 2523 mon_peer->delayed_ba_ppdu_stats.mpdu_tried_ucast; 2524 ppdu->mpdu_tried_mcast = 2525 mon_peer->delayed_ba_ppdu_stats.mpdu_tried_mcast; 2526 ppdu->frame_ctrl = mon_peer->delayed_ba_ppdu_stats.frame_ctrl; 2527 ppdu->qos_ctrl = mon_peer->delayed_ba_ppdu_stats.qos_ctrl; 2528 ppdu->dcm = mon_peer->delayed_ba_ppdu_stats.dcm; 2529 2530 ppdu->ru_start = mon_peer->delayed_ba_ppdu_stats.ru_start; 2531 ppdu->ru_tones = mon_peer->delayed_ba_ppdu_stats.ru_tones; 2532 ppdu->is_mcast = mon_peer->delayed_ba_ppdu_stats.is_mcast; 2533 2534 ppdu->user_pos = mon_peer->delayed_ba_ppdu_stats.user_pos; 2535 ppdu->mu_group_id = mon_peer->delayed_ba_ppdu_stats.mu_group_id; 2536 2537 mon_peer->last_delayed_ba = false; 2538 2539 ppdu->debug_copied = true; 2540 } 2541 2542 /** 2543 * dp_tx_rate_stats_update() - Update rate per-peer statistics 2544 * @peer: Datapath peer handle 2545 * @ppdu: PPDU Descriptor 2546 * 2547 * Return: None 2548 */ 2549 static void 2550 dp_tx_rate_stats_update(struct dp_peer *peer, 2551 struct cdp_tx_completion_ppdu_user *ppdu) 2552 { 2553 uint32_t ratekbps = 0; 2554 uint64_t ppdu_tx_rate = 0; 2555 uint32_t rix; 2556 uint16_t ratecode = 0; 2557 struct dp_mon_peer *mon_peer = NULL; 2558 2559 if (!peer || !ppdu) 2560 return; 2561 2562 if (ppdu->completion_status != HTT_PPDU_STATS_USER_STATUS_OK) 2563 return; 2564 2565 mon_peer = peer->monitor_peer; 2566 if (!mon_peer) 2567 return; 2568 2569 ratekbps = dp_getrateindex(ppdu->gi, 2570 ppdu->mcs, 2571 ppdu->nss, 2572 ppdu->preamble, 2573 ppdu->bw, 2574 ppdu->punc_mode, 2575 &rix, 2576 &ratecode); 2577 2578 if (!ratekbps) 2579 return; 2580 2581 /* Calculate goodput in non-training period 2582 * In training period, don't do anything as 2583 * pending pkt is send as goodput. 2584 */ 2585 if ((!peer->bss_peer) && (!ppdu->sa_is_training)) { 2586 ppdu->sa_goodput = ((ratekbps / CDP_NUM_KB_IN_MB) * 2587 (CDP_PERCENT_MACRO - ppdu->current_rate_per)); 2588 } 2589 ppdu->rix = rix; 2590 ppdu->tx_ratekbps = ratekbps; 2591 ppdu->tx_ratecode = ratecode; 2592 DP_STATS_UPD(mon_peer, tx.tx_rate, ratekbps); 2593 mon_peer->stats.tx.avg_tx_rate = 2594 dp_ath_rate_lpf(mon_peer->stats.tx.avg_tx_rate, ratekbps); 2595 ppdu_tx_rate = dp_ath_rate_out(mon_peer->stats.tx.avg_tx_rate); 2596 DP_STATS_UPD(mon_peer, tx.rnd_avg_tx_rate, ppdu_tx_rate); 2597 2598 mon_peer->stats.tx.bw_info = ppdu->bw; 2599 mon_peer->stats.tx.gi_info = ppdu->gi; 2600 mon_peer->stats.tx.nss_info = ppdu->nss; 2601 mon_peer->stats.tx.mcs_info = ppdu->mcs; 2602 mon_peer->stats.tx.preamble_info = ppdu->preamble; 2603 if (peer->vdev) { 2604 /* 2605 * In STA mode: 2606 * We get ucast stats as BSS peer stats. 2607 * 2608 * In AP mode: 2609 * We get mcast stats as BSS peer stats. 2610 * We get ucast stats as assoc peer stats. 2611 */ 2612 if (peer->vdev->opmode == wlan_op_mode_ap && peer->bss_peer) { 2613 peer->vdev->stats.tx.mcast_last_tx_rate = ratekbps; 2614 peer->vdev->stats.tx.mcast_last_tx_rate_mcs = ppdu->mcs; 2615 } else { 2616 peer->vdev->stats.tx.last_tx_rate = ratekbps; 2617 peer->vdev->stats.tx.last_tx_rate_mcs = ppdu->mcs; 2618 } 2619 } 2620 } 2621 2622 #if defined(FEATURE_PERPKT_INFO) && defined(WDI_EVENT_ENABLE) 2623 void dp_send_stats_event(struct dp_pdev *pdev, struct dp_peer *peer, 2624 uint16_t peer_id) 2625 { 2626 struct cdp_interface_peer_stats peer_stats_intf = {0}; 2627 struct dp_mon_peer *mon_peer = peer->monitor_peer; 2628 struct dp_txrx_peer *txrx_peer = NULL; 2629 uint8_t inx = 0; 2630 uint8_t stats_arr_size; 2631 2632 if (qdf_unlikely(!mon_peer)) 2633 return; 2634 2635 mon_peer->stats.rx.rx_snr_measured_time = qdf_system_ticks(); 2636 peer_stats_intf.rx_avg_snr = mon_peer->stats.rx.avg_snr; 2637 2638 txrx_peer = dp_get_txrx_peer(peer); 2639 if (qdf_likely(txrx_peer)) { 2640 stats_arr_size = txrx_peer->stats_arr_size; 2641 peer_stats_intf.rx_byte_count = txrx_peer->to_stack.bytes; 2642 for (inx = 0; inx < stats_arr_size; inx++) 2643 peer_stats_intf.tx_byte_count += 2644 txrx_peer->stats[inx].per_pkt_stats.tx.tx_success.bytes; 2645 } 2646 2647 dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, pdev->soc, 2648 &peer_stats_intf, peer_id, 2649 UPDATE_PEER_STATS, pdev->pdev_id); 2650 } 2651 #endif 2652 2653 #ifdef WLAN_FEATURE_11BE 2654 /** 2655 * dp_get_ru_index_frm_ru_tones() - get ru index 2656 * @ru_tones: ru tones 2657 * 2658 * Return: ru index 2659 */ 2660 static inline enum cdp_ru_index dp_get_ru_index_frm_ru_tones(uint16_t ru_tones) 2661 { 2662 enum cdp_ru_index ru_index; 2663 2664 switch (ru_tones) { 2665 case RU_26: 2666 ru_index = RU_26_INDEX; 2667 break; 2668 case RU_52: 2669 ru_index = RU_52_INDEX; 2670 break; 2671 case RU_52_26: 2672 ru_index = RU_52_26_INDEX; 2673 break; 2674 case RU_106: 2675 ru_index = RU_106_INDEX; 2676 break; 2677 case RU_106_26: 2678 ru_index = RU_106_26_INDEX; 2679 break; 2680 case RU_242: 2681 ru_index = RU_242_INDEX; 2682 break; 2683 case RU_484: 2684 ru_index = RU_484_INDEX; 2685 break; 2686 case RU_484_242: 2687 ru_index = RU_484_242_INDEX; 2688 break; 2689 case RU_996: 2690 ru_index = RU_996_INDEX; 2691 break; 2692 case RU_996_484: 2693 ru_index = RU_996_484_INDEX; 2694 break; 2695 case RU_996_484_242: 2696 ru_index = RU_996_484_242_INDEX; 2697 break; 2698 case RU_2X996: 2699 ru_index = RU_2X996_INDEX; 2700 break; 2701 case RU_2X996_484: 2702 ru_index = RU_2X996_484_INDEX; 2703 break; 2704 case RU_3X996: 2705 ru_index = RU_3X996_INDEX; 2706 break; 2707 case RU_3X996_484: 2708 ru_index = RU_2X996_484_INDEX; 2709 break; 2710 case RU_4X996: 2711 ru_index = RU_4X996_INDEX; 2712 break; 2713 default: 2714 ru_index = RU_INDEX_MAX; 2715 break; 2716 } 2717 2718 return ru_index; 2719 } 2720 2721 /** 2722 * dp_mon_get_ru_width_from_ru_size() - get ru_width from ru_size enum 2723 * @ru_size: HTT ru_size enum 2724 * 2725 * Return: ru_width of uint32_t type 2726 */ 2727 static uint32_t dp_mon_get_ru_width_from_ru_size(uint16_t ru_size) 2728 { 2729 uint32_t width = 0; 2730 2731 switch (ru_size) { 2732 case HTT_PPDU_STATS_RU_26: 2733 width = RU_26; 2734 break; 2735 case HTT_PPDU_STATS_RU_52: 2736 width = RU_52; 2737 break; 2738 case HTT_PPDU_STATS_RU_52_26: 2739 width = RU_52_26; 2740 break; 2741 case HTT_PPDU_STATS_RU_106: 2742 width = RU_106; 2743 break; 2744 case HTT_PPDU_STATS_RU_106_26: 2745 width = RU_106_26; 2746 break; 2747 case HTT_PPDU_STATS_RU_242: 2748 width = RU_242; 2749 break; 2750 case HTT_PPDU_STATS_RU_484: 2751 width = RU_484; 2752 break; 2753 case HTT_PPDU_STATS_RU_484_242: 2754 width = RU_484_242; 2755 break; 2756 case HTT_PPDU_STATS_RU_996: 2757 width = RU_996; 2758 break; 2759 case HTT_PPDU_STATS_RU_996_484: 2760 width = RU_996_484; 2761 break; 2762 case HTT_PPDU_STATS_RU_996_484_242: 2763 width = RU_996_484_242; 2764 break; 2765 case HTT_PPDU_STATS_RU_996x2: 2766 width = RU_2X996; 2767 break; 2768 case HTT_PPDU_STATS_RU_996x2_484: 2769 width = RU_2X996_484; 2770 break; 2771 case HTT_PPDU_STATS_RU_996x3: 2772 width = RU_3X996; 2773 break; 2774 case HTT_PPDU_STATS_RU_996x3_484: 2775 width = RU_3X996_484; 2776 break; 2777 case HTT_PPDU_STATS_RU_996x4: 2778 width = RU_4X996; 2779 break; 2780 default: 2781 dp_mon_debug("Unsupported ru_size: %d rcvd", ru_size); 2782 } 2783 2784 return width; 2785 } 2786 #else 2787 static inline enum cdp_ru_index dp_get_ru_index_frm_ru_tones(uint16_t ru_tones) 2788 { 2789 enum cdp_ru_index ru_index; 2790 2791 switch (ru_tones) { 2792 case RU_26: 2793 ru_index = RU_26_INDEX; 2794 break; 2795 case RU_52: 2796 ru_index = RU_52_INDEX; 2797 break; 2798 case RU_106: 2799 ru_index = RU_106_INDEX; 2800 break; 2801 case RU_242: 2802 ru_index = RU_242_INDEX; 2803 break; 2804 case RU_484: 2805 ru_index = RU_484_INDEX; 2806 break; 2807 case RU_996: 2808 ru_index = RU_996_INDEX; 2809 break; 2810 default: 2811 ru_index = RU_INDEX_MAX; 2812 break; 2813 } 2814 2815 return ru_index; 2816 } 2817 2818 static uint32_t dp_mon_get_ru_width_from_ru_size(uint16_t ru_size) 2819 { 2820 uint32_t width = 0; 2821 2822 switch (ru_size) { 2823 case HTT_PPDU_STATS_RU_26: 2824 width = RU_26; 2825 break; 2826 case HTT_PPDU_STATS_RU_52: 2827 width = RU_52; 2828 break; 2829 case HTT_PPDU_STATS_RU_106: 2830 width = RU_106; 2831 break; 2832 case HTT_PPDU_STATS_RU_242: 2833 width = RU_242; 2834 break; 2835 case HTT_PPDU_STATS_RU_484: 2836 width = RU_484; 2837 break; 2838 case HTT_PPDU_STATS_RU_996: 2839 width = RU_996; 2840 break; 2841 default: 2842 dp_mon_debug("Unsupported ru_size: %d rcvd", ru_size); 2843 } 2844 2845 return width; 2846 } 2847 #endif 2848 2849 #ifdef WLAN_CONFIG_TELEMETRY_AGENT 2850 /** 2851 * dp_pdev_telemetry_stats_update() - Update pdev telemetry stats 2852 * @pdev: Datapath pdev handle 2853 * @ppdu: PPDU Descriptor 2854 * 2855 * Return: None 2856 */ 2857 static void 2858 dp_pdev_telemetry_stats_update( 2859 struct dp_pdev *pdev, 2860 struct cdp_tx_completion_ppdu_user *ppdu) 2861 { 2862 uint16_t mpdu_tried; 2863 uint16_t mpdu_failed; 2864 uint16_t num_mpdu; 2865 uint8_t ac = 0; 2866 2867 num_mpdu = ppdu->mpdu_success; 2868 mpdu_tried = ppdu->mpdu_tried_ucast + ppdu->mpdu_tried_mcast; 2869 mpdu_failed = mpdu_tried - num_mpdu; 2870 2871 ac = TID_TO_WME_AC(ppdu->tid); 2872 2873 DP_STATS_INC(pdev, telemetry_stats.tx_mpdu_failed[ac], 2874 mpdu_failed); 2875 2876 DP_STATS_INC(pdev, telemetry_stats.tx_mpdu_total[ac], 2877 mpdu_tried); 2878 } 2879 2880 /* 2881 * dp_ppdu_desc_get_txmode() - Get TX mode 2882 * @ppdu: PPDU Descriptor 2883 * 2884 * Return: None 2885 */ 2886 static inline 2887 void dp_ppdu_desc_get_txmode(struct cdp_tx_completion_ppdu *ppdu) 2888 { 2889 uint16_t frame_type = ppdu->htt_frame_type; 2890 2891 ppdu->txmode_type = TX_MODE_TYPE_UNKNOWN; 2892 2893 if (ppdu->frame_type == CDP_PPDU_FTYPE_CTRL && 2894 (frame_type != HTT_STATS_FTYPE_SGEN_MU_TRIG && 2895 frame_type != HTT_STATS_FTYPE_SGEN_BE_MU_TRIG)) 2896 return; 2897 2898 if (frame_type == HTT_STATS_FTYPE_SGEN_MU_BAR || 2899 frame_type == HTT_STATS_FTYPE_SGEN_BE_MU_BAR) { 2900 ppdu->txmode = TX_MODE_UL_OFDMA_MU_BAR_TRIGGER; 2901 ppdu->txmode_type = TX_MODE_TYPE_UL; 2902 2903 return; 2904 } 2905 2906 switch (ppdu->htt_seq_type) { 2907 case HTT_SEQTYPE_SU: 2908 if (frame_type == HTT_STATS_FTYPE_TIDQ_DATA_SU) { 2909 ppdu->txmode = TX_MODE_DL_SU_DATA; 2910 ppdu->txmode_type = TX_MODE_TYPE_DL; 2911 } 2912 break; 2913 case HTT_SEQTYPE_MU_OFDMA: 2914 case HTT_SEQTYPE_BE_MU_OFDMA: 2915 if (frame_type == HTT_STATS_FTYPE_TIDQ_DATA_MU) { 2916 ppdu->txmode = TX_MODE_DL_OFDMA_DATA; 2917 ppdu->txmode_type = TX_MODE_TYPE_DL; 2918 } 2919 break; 2920 case HTT_SEQTYPE_AC_MU_MIMO: 2921 case HTT_SEQTYPE_AX_MU_MIMO: 2922 case HTT_SEQTYPE_BE_MU_MIMO: 2923 if (frame_type == HTT_STATS_FTYPE_TIDQ_DATA_MU) { 2924 ppdu->txmode = TX_MODE_DL_MUMIMO_DATA; 2925 ppdu->txmode_type = TX_MODE_TYPE_DL; 2926 } 2927 break; 2928 case HTT_SEQTYPE_UL_MU_OFDMA_TRIG: 2929 case HTT_SEQTYPE_BE_UL_MU_OFDMA_TRIG: 2930 if (frame_type == HTT_STATS_FTYPE_SGEN_MU_TRIG || 2931 frame_type == HTT_STATS_FTYPE_SGEN_BE_MU_TRIG) { 2932 ppdu->txmode = TX_MODE_UL_OFDMA_BASIC_TRIGGER_DATA; 2933 ppdu->txmode_type = TX_MODE_TYPE_UL; 2934 } 2935 break; 2936 case HTT_SEQTYPE_UL_MU_MIMO_TRIG: 2937 case HTT_SEQTYPE_BE_UL_MU_MIMO_TRIG: 2938 if (frame_type == HTT_STATS_FTYPE_SGEN_MU_TRIG || 2939 frame_type == HTT_STATS_FTYPE_SGEN_BE_MU_TRIG) { 2940 ppdu->txmode = TX_MODE_UL_MUMIMO_BASIC_TRIGGER_DATA; 2941 ppdu->txmode_type = TX_MODE_TYPE_UL; 2942 } 2943 break; 2944 default: 2945 ppdu->txmode_type = TX_MODE_TYPE_UNKNOWN; 2946 break; 2947 } 2948 } 2949 2950 /* 2951 * dp_pdev_update_deter_stats() - Update pdev deterministic stats 2952 * @pdev: Datapath pdev handle 2953 * @ppdu: PPDU Descriptor 2954 * 2955 * Return: None 2956 */ 2957 static inline void 2958 dp_pdev_update_deter_stats(struct dp_pdev *pdev, 2959 struct cdp_tx_completion_ppdu *ppdu) 2960 { 2961 uint32_t user_idx; 2962 2963 if (!pdev || !ppdu) 2964 return; 2965 2966 if (ppdu->txmode_type == TX_MODE_TYPE_UNKNOWN) 2967 return; 2968 2969 if (ppdu->backoff_ac_valid) { 2970 if (ppdu->backoff_ac >= WME_AC_MAX) { 2971 dp_mon_err("backoff_ac %d exceed max limit", 2972 ppdu->backoff_ac); 2973 return; 2974 } 2975 DP_STATS_UPD(pdev, 2976 deter_stats.ch_access_delay[ppdu->backoff_ac], 2977 ppdu->ch_access_delay); 2978 } 2979 2980 if (ppdu->txmode_type == TX_MODE_TYPE_DL) { 2981 DP_STATS_INC(pdev, 2982 deter_stats.dl_mode_cnt[ppdu->txmode], 2983 1); 2984 if (!ppdu->num_users) { 2985 dp_mon_err("dl users is %d", ppdu->num_users); 2986 return; 2987 } 2988 user_idx = ppdu->num_users - 1; 2989 switch (ppdu->txmode) { 2990 case TX_MODE_DL_OFDMA_DATA: 2991 DP_STATS_INC(pdev, 2992 deter_stats.dl_ofdma_usr[user_idx], 2993 1); 2994 break; 2995 case TX_MODE_DL_MUMIMO_DATA: 2996 if (user_idx >= CDP_MU_MAX_MIMO_USERS) { 2997 dp_mon_err("dl mimo users %d exceed max limit", 2998 ppdu->num_users); 2999 return; 3000 } 3001 DP_STATS_INC(pdev, 3002 deter_stats.dl_mimo_usr[user_idx], 3003 1); 3004 break; 3005 } 3006 } else { 3007 DP_STATS_INC(pdev, 3008 deter_stats.ul_mode_cnt[ppdu->txmode], 3009 1); 3010 3011 if (!ppdu->num_ul_users) { 3012 dp_mon_err("dl users is %d", ppdu->num_ul_users); 3013 return; 3014 } 3015 user_idx = ppdu->num_ul_users - 1; 3016 switch (ppdu->txmode) { 3017 case TX_MODE_UL_OFDMA_BASIC_TRIGGER_DATA: 3018 DP_STATS_INC(pdev, 3019 deter_stats.ul_ofdma_usr[user_idx], 3020 1); 3021 break; 3022 case TX_MODE_UL_MUMIMO_BASIC_TRIGGER_DATA: 3023 if (user_idx >= CDP_MU_MAX_MIMO_USERS) { 3024 dp_mon_err("ul mimo users %d exceed max limit", 3025 ppdu->num_ul_users); 3026 return; 3027 } 3028 DP_STATS_INC(pdev, 3029 deter_stats.ul_mimo_usr[user_idx], 3030 1); 3031 break; 3032 } 3033 if (ppdu->num_ul_user_resp_valid) { 3034 if (ppdu->num_ul_user_resp) { 3035 DP_STATS_INC(pdev, 3036 deter_stats.ts[ppdu->txmode].trigger_success, 3037 1); 3038 } else { 3039 DP_STATS_INC(pdev, 3040 deter_stats.ts[ppdu->txmode].trigger_fail, 3041 1); 3042 } 3043 } 3044 } 3045 } 3046 3047 /* 3048 * dp_ppdu_desc_get_msduq() - Get msduq index from bitmap 3049 * @ppdu: PPDU Descriptor 3050 * @msduq_index: MSDUQ index 3051 * 3052 * Return: None 3053 */ 3054 static inline void 3055 dp_ppdu_desc_get_msduq(uint32_t msduq_bitmap, uint32_t *msduq_index) 3056 { 3057 if ((msduq_bitmap & BIT(HTT_MSDUQ_INDEX_NON_UDP)) || 3058 (msduq_bitmap & BIT(HTT_MSDUQ_INDEX_UDP))) { 3059 *msduq_index = MSDUQ_INDEX_DEFAULT; 3060 } else if (msduq_bitmap & BIT(HTT_MSDUQ_INDEX_CUSTOM_PRIO_0)) { 3061 *msduq_index = MSDUQ_INDEX_CUSTOM_PRIO_0; 3062 } else if (msduq_bitmap & BIT(HTT_MSDUQ_INDEX_CUSTOM_PRIO_1)) { 3063 *msduq_index = MSDUQ_INDEX_CUSTOM_PRIO_1; 3064 } else if (msduq_bitmap & BIT(HTT_MSDUQ_INDEX_CUSTOM_EXT_PRIO_0)) { 3065 *msduq_index = MSDUQ_INDEX_CUSTOM_EXT_PRIO_0; 3066 } else if (msduq_bitmap & BIT(HTT_MSDUQ_INDEX_CUSTOM_EXT_PRIO_1)) { 3067 *msduq_index = MSDUQ_INDEX_CUSTOM_EXT_PRIO_1; 3068 } else if (msduq_bitmap & BIT(HTT_MSDUQ_INDEX_CUSTOM_EXT_PRIO_2)) { 3069 *msduq_index = MSDUQ_INDEX_CUSTOM_EXT_PRIO_2; 3070 } else if (msduq_bitmap & BIT(HTT_MSDUQ_INDEX_CUSTOM_EXT_PRIO_3)) { 3071 *msduq_index = MSDUQ_INDEX_CUSTOM_EXT_PRIO_3; 3072 } else { 3073 *msduq_index = MSDUQ_INDEX_MAX; 3074 } 3075 } 3076 3077 /* 3078 * dp_ppdu_desc_user_deter_stats_update() - Update per-peer deterministic stats 3079 * @pdev: Datapath pdev handle 3080 * @peer: Datapath peer handle 3081 * @ppdu_desc: PPDU Descriptor 3082 * @user: PPDU Descriptor per user 3083 * 3084 * Return: None 3085 */ 3086 static void 3087 dp_ppdu_desc_user_deter_stats_update(struct dp_pdev *pdev, 3088 struct dp_peer *peer, 3089 struct cdp_tx_completion_ppdu *ppdu_desc, 3090 struct cdp_tx_completion_ppdu_user *user) 3091 { 3092 struct dp_mon_peer *mon_peer = NULL; 3093 uint64_t avg_tx_rate = 0; 3094 uint32_t ratekbps = 0; 3095 uint32_t rix; 3096 uint32_t msduq; 3097 uint16_t ratecode = 0; 3098 uint8_t txmode; 3099 uint8_t tid; 3100 3101 if (!pdev || !ppdu_desc || !user || !peer) 3102 return; 3103 3104 mon_peer = peer->monitor_peer; 3105 if (qdf_unlikely(!mon_peer)) 3106 return; 3107 3108 if (ppdu_desc->txmode_type == TX_MODE_TYPE_UNKNOWN) 3109 return; 3110 3111 if (ppdu_desc->txmode_type == TX_MODE_TYPE_UL && 3112 (ppdu_desc->txmode != TX_MODE_UL_OFDMA_MU_BAR_TRIGGER)) { 3113 if (user->tid < CDP_UL_TRIG_BK_TID || 3114 user->tid > CDP_UL_TRIG_VO_TID) 3115 return; 3116 3117 user->tid = UL_TRIGGER_TID_TO_DATA_TID(user->tid); 3118 } 3119 3120 if (user->tid >= CDP_DATA_TID_MAX) 3121 return; 3122 3123 ratekbps = dp_getrateindex(user->gi, 3124 user->mcs, 3125 user->nss, 3126 user->preamble, 3127 user->bw, 3128 user->punc_mode, 3129 &rix, 3130 &ratecode); 3131 3132 if (!ratekbps) 3133 return; 3134 3135 avg_tx_rate = mon_peer->stats.deter_stats.avg_tx_rate; 3136 avg_tx_rate = dp_ath_rate_lpf(avg_tx_rate, 3137 ratekbps); 3138 DP_STATS_UPD(mon_peer, 3139 deter_stats.avg_tx_rate, 3140 avg_tx_rate); 3141 3142 txmode = ppdu_desc->txmode; 3143 tid = user->tid; 3144 3145 if (ppdu_desc->txmode_type == TX_MODE_TYPE_DL) { 3146 dp_ppdu_desc_get_msduq(user->msduq_bitmap, &msduq); 3147 if (msduq == MSDUQ_INDEX_MAX) 3148 return; 3149 3150 DP_STATS_INC(mon_peer, 3151 deter_stats.deter[tid].dl_det[msduq][txmode].mode_cnt, 3152 1); 3153 3154 DP_STATS_UPD(mon_peer, 3155 deter_stats.deter[tid].dl_det[msduq][txmode].avg_rate, 3156 avg_tx_rate); 3157 } else { 3158 DP_STATS_INC(mon_peer, 3159 deter_stats.deter[tid].ul_det[txmode].mode_cnt, 3160 1); 3161 3162 DP_STATS_UPD(mon_peer, 3163 deter_stats.deter[tid].ul_det[txmode].avg_rate, 3164 avg_tx_rate); 3165 if (!user->completion_status) { 3166 DP_STATS_INC(mon_peer, 3167 deter_stats.deter[tid].ul_det[txmode].trigger_success, 3168 1); 3169 } else { 3170 DP_STATS_INC(mon_peer, 3171 deter_stats.deter[tid].ul_det[txmode].trigger_fail, 3172 1); 3173 } 3174 } 3175 } 3176 #else 3177 static inline 3178 void dp_ppdu_desc_get_txmode(struct cdp_tx_completion_ppdu *ppdu) 3179 { 3180 } 3181 3182 static inline void 3183 dp_ppdu_desc_get_msduq(uint32_t msduq_bitmap, uint32_t *msduq_index) 3184 { 3185 } 3186 3187 static void 3188 dp_ppdu_desc_user_deter_stats_update(struct dp_pdev *pdev, 3189 struct dp_peer *peer, 3190 struct cdp_tx_completion_ppdu *ppdu_desc, 3191 struct cdp_tx_completion_ppdu_user *user) 3192 { 3193 } 3194 3195 static inline void 3196 dp_pdev_telemetry_stats_update( 3197 struct dp_pdev *pdev, 3198 struct cdp_tx_completion_ppdu_user *ppdu) 3199 { } 3200 3201 static inline void 3202 dp_pdev_update_deter_stats(struct dp_pdev *pdev, 3203 struct cdp_tx_completion_ppdu *ppdu) 3204 { } 3205 #endif 3206 3207 /** 3208 * dp_tx_stats_update() - Update per-peer statistics 3209 * @pdev: Datapath pdev handle 3210 * @peer: Datapath peer handle 3211 * @ppdu: PPDU Descriptor per user 3212 * @ppdu_desc: PPDU Descriptor 3213 * 3214 * Return: None 3215 */ 3216 static void 3217 dp_tx_stats_update(struct dp_pdev *pdev, struct dp_peer *peer, 3218 struct cdp_tx_completion_ppdu_user *ppdu, 3219 struct cdp_tx_completion_ppdu *ppdu_desc) 3220 { 3221 uint8_t preamble, mcs, res_mcs = 0; 3222 uint16_t num_msdu; 3223 uint16_t num_mpdu; 3224 uint16_t mpdu_tried; 3225 uint16_t mpdu_failed; 3226 struct dp_mon_ops *mon_ops; 3227 enum cdp_ru_index ru_index; 3228 struct dp_mon_peer *mon_peer = NULL; 3229 uint32_t ratekbps = 0; 3230 uint64_t tx_byte_count; 3231 uint8_t idx = 0; 3232 bool is_preamble_valid = true; 3233 3234 preamble = ppdu->preamble; 3235 mcs = ppdu->mcs; 3236 num_msdu = ppdu->num_msdu; 3237 num_mpdu = ppdu->mpdu_success; 3238 mpdu_tried = ppdu->mpdu_tried_ucast + ppdu->mpdu_tried_mcast; 3239 mpdu_failed = mpdu_tried - num_mpdu; 3240 tx_byte_count = ppdu->success_bytes; 3241 3242 /* If the peer statistics are already processed as part of 3243 * per-MSDU completion handler, do not process these again in per-PPDU 3244 * indications 3245 */ 3246 if (pdev->soc->process_tx_status) 3247 return; 3248 3249 mon_peer = peer->monitor_peer; 3250 if (!mon_peer) 3251 return; 3252 3253 if (!ppdu->is_mcast) { 3254 DP_STATS_INC(mon_peer, tx.tx_ucast_total.num, num_msdu); 3255 DP_STATS_INC(mon_peer, tx.tx_ucast_total.bytes, 3256 tx_byte_count); 3257 } 3258 3259 if (ppdu->completion_status != HTT_PPDU_STATS_USER_STATUS_OK) { 3260 /* 3261 * All failed mpdu will be retried, so incrementing 3262 * retries mpdu based on mpdu failed. Even for 3263 * ack failure i.e for long retries we get 3264 * mpdu failed equal mpdu tried. 3265 */ 3266 DP_STATS_INC(mon_peer, tx.retries, mpdu_failed); 3267 dp_pdev_telemetry_stats_update(pdev, ppdu); 3268 return; 3269 } 3270 3271 if (ppdu->is_ppdu_cookie_valid) 3272 DP_STATS_INC(mon_peer, tx.num_ppdu_cookie_valid, 1); 3273 3274 if (ppdu->mu_group_id <= MAX_MU_GROUP_ID && 3275 ppdu->ppdu_type != HTT_PPDU_STATS_PPDU_TYPE_SU) { 3276 if (qdf_unlikely(ppdu->mu_group_id && 3277 !(ppdu->mu_group_id & (MAX_MU_GROUP_ID - 1)))) 3278 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 3279 "mu_group_id out of bound!!\n"); 3280 else 3281 DP_STATS_UPD(mon_peer, tx.mu_group_id[ppdu->mu_group_id], 3282 (ppdu->user_pos + 1)); 3283 } 3284 3285 if (ppdu->ppdu_type == HTT_PPDU_STATS_PPDU_TYPE_MU_OFDMA || 3286 ppdu->ppdu_type == HTT_PPDU_STATS_PPDU_TYPE_MU_MIMO_OFDMA) { 3287 DP_STATS_UPD(mon_peer, tx.ru_tones, ppdu->ru_tones); 3288 DP_STATS_UPD(mon_peer, tx.ru_start, ppdu->ru_start); 3289 ru_index = dp_get_ru_index_frm_ru_tones(ppdu->ru_tones); 3290 if (ru_index != RU_INDEX_MAX) { 3291 DP_STATS_INC(mon_peer, tx.ru_loc[ru_index].num_msdu, 3292 num_msdu); 3293 DP_STATS_INC(mon_peer, tx.ru_loc[ru_index].num_mpdu, 3294 num_mpdu); 3295 DP_STATS_INC(mon_peer, tx.ru_loc[ru_index].mpdu_tried, 3296 mpdu_tried); 3297 } 3298 } 3299 3300 /* 3301 * All failed mpdu will be retried, so incrementing 3302 * retries mpdu based on mpdu failed. Even for 3303 * ack failure i.e for long retries we get 3304 * mpdu failed equal mpdu tried. 3305 */ 3306 DP_STATS_INC(mon_peer, tx.retries, mpdu_failed); 3307 3308 DP_STATS_INC(mon_peer, tx.transmit_type[ppdu->ppdu_type].num_msdu, 3309 num_msdu); 3310 DP_STATS_INC(mon_peer, tx.transmit_type[ppdu->ppdu_type].num_mpdu, 3311 num_mpdu); 3312 DP_STATS_INC(mon_peer, tx.transmit_type[ppdu->ppdu_type].mpdu_tried, 3313 mpdu_tried); 3314 3315 DP_STATS_INC(mon_peer, tx.sgi_count[ppdu->gi], num_msdu); 3316 DP_STATS_INC(mon_peer, tx.bw[ppdu->bw], num_msdu); 3317 DP_STATS_INC(mon_peer, tx.nss[ppdu->nss], num_msdu); 3318 if (ppdu->tid < CDP_DATA_TID_MAX) { 3319 DP_STATS_INC(mon_peer, tx.wme_ac_type[TID_TO_WME_AC(ppdu->tid)], 3320 num_msdu); 3321 DP_STATS_INC(mon_peer, 3322 tx.wme_ac_type_bytes[TID_TO_WME_AC(ppdu->tid)], 3323 tx_byte_count); 3324 } 3325 3326 DP_STATS_INCC(mon_peer, tx.stbc, num_msdu, ppdu->stbc); 3327 DP_STATS_INCC(mon_peer, tx.ldpc, num_msdu, ppdu->ldpc); 3328 if (!(ppdu->is_mcast) && ppdu->ack_rssi_valid) 3329 DP_STATS_UPD(mon_peer, tx.last_ack_rssi, ppdu_desc->ack_rssi); 3330 3331 if (!ppdu->is_mcast) { 3332 DP_STATS_INC(mon_peer, tx.tx_ucast_success.num, num_msdu); 3333 DP_STATS_INC(mon_peer, tx.tx_ucast_success.bytes, 3334 tx_byte_count); 3335 } 3336 3337 switch (preamble) { 3338 case DOT11_A: 3339 res_mcs = (mcs < MAX_MCS_11A) ? mcs : (MAX_MCS - 1); 3340 break; 3341 case DOT11_B: 3342 res_mcs = (mcs < MAX_MCS_11B) ? mcs : (MAX_MCS - 1); 3343 break; 3344 case DOT11_N: 3345 res_mcs = (mcs < MAX_MCS_11N) ? mcs : (MAX_MCS - 1); 3346 break; 3347 case DOT11_AC: 3348 res_mcs = (mcs < MAX_MCS_11AC) ? mcs : (MAX_MCS - 1); 3349 break; 3350 case DOT11_AX: 3351 res_mcs = (mcs < MAX_MCS_11AX) ? mcs : (MAX_MCS - 1); 3352 break; 3353 default: 3354 is_preamble_valid = false; 3355 } 3356 3357 DP_STATS_INCC(mon_peer, 3358 tx.pkt_type[preamble].mcs_count[res_mcs], num_msdu, 3359 is_preamble_valid); 3360 DP_STATS_INCC(mon_peer, tx.ampdu_cnt, num_mpdu, ppdu->is_ampdu); 3361 DP_STATS_INCC(mon_peer, tx.non_ampdu_cnt, num_mpdu, !(ppdu->is_ampdu)); 3362 DP_STATS_INCC(mon_peer, tx.pream_punct_cnt, 1, ppdu->pream_punct); 3363 DP_STATS_INC(mon_peer, tx.tx_ppdus, 1); 3364 DP_STATS_INC(mon_peer, tx.tx_mpdus_success, num_mpdu); 3365 DP_STATS_INC(mon_peer, tx.tx_mpdus_tried, mpdu_tried); 3366 3367 for (idx = 0; idx < CDP_RSSI_CHAIN_LEN; idx++) 3368 DP_STATS_UPD(mon_peer, tx.rssi_chain[idx], ppdu->rssi_chain[idx]); 3369 3370 mon_ops = dp_mon_ops_get(pdev->soc); 3371 if (mon_ops && mon_ops->mon_tx_stats_update) 3372 mon_ops->mon_tx_stats_update(mon_peer, ppdu); 3373 3374 if (!ppdu->fixed_rate_used) 3375 dp_tx_rate_stats_update(peer, ppdu); 3376 3377 dp_pdev_telemetry_stats_update(pdev, ppdu); 3378 3379 dp_peer_stats_notify(pdev, peer); 3380 3381 ratekbps = mon_peer->stats.tx.tx_rate; 3382 DP_STATS_UPD(mon_peer, tx.last_tx_rate, ratekbps); 3383 3384 dp_send_stats_event(pdev, peer, ppdu->peer_id); 3385 } 3386 3387 /** 3388 * dp_get_ppdu_info_user_index() - Find and allocate a per-user 3389 * descriptor for a PPDU, if a new peer id arrives in a PPDU 3390 * @pdev: DP pdev handle 3391 * @peer_id: peer unique identifier 3392 * @ppdu_info: per ppdu tlv structure 3393 * 3394 * Return: user index to be populated 3395 */ 3396 static uint8_t dp_get_ppdu_info_user_index(struct dp_pdev *pdev, 3397 uint16_t peer_id, 3398 struct ppdu_info *ppdu_info) 3399 { 3400 uint8_t user_index = 0; 3401 struct cdp_tx_completion_ppdu *ppdu_desc; 3402 struct cdp_tx_completion_ppdu_user *ppdu_user_desc; 3403 3404 ppdu_desc = 3405 (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf); 3406 3407 while ((user_index + 1) <= ppdu_info->last_user) { 3408 ppdu_user_desc = &ppdu_desc->user[user_index]; 3409 if (ppdu_user_desc->peer_id != peer_id) { 3410 user_index++; 3411 continue; 3412 } else { 3413 /* Max users possible is 8 so user array index should 3414 * not exceed 7 3415 */ 3416 qdf_assert_always(user_index <= (ppdu_desc->max_users - 1)); 3417 return user_index; 3418 } 3419 } 3420 3421 ppdu_info->last_user++; 3422 /* Max users possible is 8 so last user should not exceed 8 */ 3423 qdf_assert_always(ppdu_info->last_user <= ppdu_desc->max_users); 3424 return ppdu_info->last_user - 1; 3425 } 3426 3427 /** 3428 * dp_process_ppdu_stats_common_tlv() - Process htt_ppdu_stats_common_tlv 3429 * @pdev: DP pdev handle 3430 * @tag_buf: buffer containing the tlv htt_ppdu_stats_common_tlv 3431 * @ppdu_info: per ppdu tlv structure 3432 * 3433 * Return: void 3434 */ 3435 static void 3436 dp_process_ppdu_stats_common_tlv(struct dp_pdev *pdev, 3437 uint32_t *tag_buf, 3438 struct ppdu_info *ppdu_info) 3439 { 3440 uint16_t frame_type; 3441 uint16_t frame_ctrl; 3442 uint16_t freq; 3443 struct dp_soc *soc = NULL; 3444 struct cdp_tx_completion_ppdu *ppdu_desc = NULL; 3445 uint64_t ppdu_start_timestamp; 3446 uint32_t eval_start_timestamp; 3447 uint32_t *start_tag_buf; 3448 uint32_t *ts_tag_buf; 3449 3450 start_tag_buf = tag_buf; 3451 ppdu_desc = 3452 (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf); 3453 3454 ppdu_desc->ppdu_id = ppdu_info->ppdu_id; 3455 3456 tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(RING_ID_SCH_CMD_ID); 3457 ppdu_info->sched_cmdid = 3458 HTT_PPDU_STATS_COMMON_TLV_SCH_CMDID_GET(*tag_buf); 3459 ppdu_desc->num_users = 3460 HTT_PPDU_STATS_COMMON_TLV_NUM_USERS_GET(*tag_buf); 3461 3462 qdf_assert_always(ppdu_desc->num_users <= ppdu_desc->max_users); 3463 3464 tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(QTYPE_FRM_TYPE); 3465 frame_type = HTT_PPDU_STATS_COMMON_TLV_FRM_TYPE_GET(*tag_buf); 3466 ppdu_desc->htt_frame_type = frame_type; 3467 3468 ppdu_desc->htt_seq_type = 3469 HTT_PPDU_STATS_COMMON_TLV_PPDU_SEQ_TYPE_GET(*tag_buf); 3470 3471 frame_ctrl = ppdu_desc->frame_ctrl; 3472 3473 ppdu_desc->bar_ppdu_id = ppdu_info->ppdu_id; 3474 3475 switch (frame_type) { 3476 case HTT_STATS_FTYPE_TIDQ_DATA_SU: 3477 case HTT_STATS_FTYPE_TIDQ_DATA_MU: 3478 case HTT_STATS_FTYPE_SGEN_QOS_NULL: 3479 /* 3480 * for management packet, frame type come as DATA_SU 3481 * need to check frame_ctrl before setting frame_type 3482 */ 3483 if (HTT_GET_FRAME_CTRL_TYPE(frame_ctrl) <= FRAME_CTRL_TYPE_CTRL) 3484 ppdu_desc->frame_type = CDP_PPDU_FTYPE_CTRL; 3485 else 3486 ppdu_desc->frame_type = CDP_PPDU_FTYPE_DATA; 3487 break; 3488 case HTT_STATS_FTYPE_SGEN_MU_BAR: 3489 case HTT_STATS_FTYPE_SGEN_BAR: 3490 case HTT_STATS_FTYPE_SGEN_BE_MU_BAR: 3491 ppdu_desc->frame_type = CDP_PPDU_FTYPE_BAR; 3492 break; 3493 default: 3494 ppdu_desc->frame_type = CDP_PPDU_FTYPE_CTRL; 3495 break; 3496 } 3497 3498 tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(FES_DUR_US); 3499 ppdu_desc->tx_duration = *tag_buf; 3500 3501 tag_buf = start_tag_buf + 3502 HTT_GET_STATS_CMN_INDEX(SCH_EVAL_START_TSTMP_L32_US); 3503 eval_start_timestamp = *tag_buf; 3504 3505 tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(START_TSTMP_L32_US); 3506 ppdu_desc->ppdu_start_timestamp = *tag_buf; 3507 3508 tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(CHAN_MHZ_PHY_MODE); 3509 freq = HTT_PPDU_STATS_COMMON_TLV_CHAN_MHZ_GET(*tag_buf); 3510 if (freq != ppdu_desc->channel) { 3511 soc = pdev->soc; 3512 ppdu_desc->channel = freq; 3513 pdev->operating_channel.freq = freq; 3514 if (soc && soc->cdp_soc.ol_ops->freq_to_channel) 3515 pdev->operating_channel.num = 3516 soc->cdp_soc.ol_ops->freq_to_channel(soc->ctrl_psoc, 3517 pdev->pdev_id, 3518 freq); 3519 3520 if (soc && soc->cdp_soc.ol_ops->freq_to_band) 3521 pdev->operating_channel.band = 3522 soc->cdp_soc.ol_ops->freq_to_band(soc->ctrl_psoc, 3523 pdev->pdev_id, 3524 freq); 3525 } 3526 3527 ppdu_desc->phy_mode = HTT_PPDU_STATS_COMMON_TLV_PHY_MODE_GET(*tag_buf); 3528 3529 tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(RESV_NUM_UL_BEAM); 3530 ppdu_desc->phy_ppdu_tx_time_us = 3531 HTT_PPDU_STATS_COMMON_TLV_PHY_PPDU_TX_TIME_US_GET(*tag_buf); 3532 ppdu_desc->beam_change = 3533 HTT_PPDU_STATS_COMMON_TLV_BEAM_CHANGE_GET(*tag_buf); 3534 ppdu_desc->doppler = 3535 HTT_PPDU_STATS_COMMON_TLV_DOPPLER_INDICATION_GET(*tag_buf); 3536 ppdu_desc->spatial_reuse = 3537 HTT_PPDU_STATS_COMMON_TLV_SPATIAL_REUSE_GET(*tag_buf); 3538 ppdu_desc->num_ul_users = 3539 HTT_PPDU_STATS_COMMON_TLV_NUM_UL_EXPECTED_USERS_GET(*tag_buf); 3540 3541 dp_tx_capture_htt_frame_counter(pdev, frame_type); 3542 3543 tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(START_TSTMP_U32_US); 3544 ppdu_start_timestamp = *tag_buf; 3545 ppdu_desc->ppdu_start_timestamp |= ((ppdu_start_timestamp << 3546 HTT_SHIFT_UPPER_TIMESTAMP) & 3547 HTT_MASK_UPPER_TIMESTAMP); 3548 3549 ppdu_desc->ppdu_end_timestamp = ppdu_desc->ppdu_start_timestamp + 3550 ppdu_desc->tx_duration; 3551 /* Ack time stamp is same as end time stamp*/ 3552 ppdu_desc->ack_timestamp = ppdu_desc->ppdu_end_timestamp; 3553 3554 ppdu_desc->ppdu_end_timestamp = ppdu_desc->ppdu_start_timestamp + 3555 ppdu_desc->tx_duration; 3556 3557 ppdu_desc->bar_ppdu_start_timestamp = ppdu_desc->ppdu_start_timestamp; 3558 ppdu_desc->bar_ppdu_end_timestamp = ppdu_desc->ppdu_end_timestamp; 3559 ppdu_desc->bar_tx_duration = ppdu_desc->tx_duration; 3560 3561 /* Ack time stamp is same as end time stamp*/ 3562 ppdu_desc->ack_timestamp = ppdu_desc->ppdu_end_timestamp; 3563 3564 tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(BSSCOLOR_OBSS_PSR); 3565 ppdu_desc->bss_color = 3566 HTT_PPDU_STATS_COMMON_TLV_BSS_COLOR_ID_GET(*tag_buf); 3567 3568 ppdu_desc->backoff_ac_valid = 3569 HTT_PPDU_STATS_COMMON_TLV_BACKOFF_AC_VALID_GET(*tag_buf); 3570 if (ppdu_desc->backoff_ac_valid) { 3571 ppdu_desc->backoff_ac = 3572 HTT_PPDU_STATS_COMMON_TLV_BACKOFF_AC_GET(*tag_buf); 3573 ts_tag_buf = start_tag_buf + 3574 HTT_GET_STATS_CMN_INDEX(SCH_EVAL_START_TSTMP_L32_US); 3575 eval_start_timestamp = *ts_tag_buf; 3576 3577 ts_tag_buf = start_tag_buf + 3578 HTT_GET_STATS_CMN_INDEX(START_TSTMP_L32_US); 3579 ppdu_desc->ch_access_delay = 3580 *ts_tag_buf - eval_start_timestamp; 3581 } 3582 ppdu_desc->num_ul_user_resp_valid = 3583 HTT_PPDU_STATS_COMMON_TLV_NUM_UL_USER_RESPONSES_VALID_GET(*tag_buf); 3584 if (ppdu_desc->num_ul_user_resp_valid) 3585 ppdu_desc->num_ul_user_resp = 3586 HTT_PPDU_STATS_COMMON_TLV_NUM_UL_USER_RESPONSES_GET(*tag_buf); 3587 } 3588 3589 /** 3590 * dp_process_ppdu_stats_user_common_tlv() - Process ppdu_stats_user_common 3591 * @pdev: DP PDEV handle 3592 * @tag_buf: buffer containing the tlv htt_ppdu_stats_user_common_tlv 3593 * @ppdu_info: per ppdu tlv structure 3594 * 3595 * Return: void 3596 */ 3597 static void dp_process_ppdu_stats_user_common_tlv( 3598 struct dp_pdev *pdev, uint32_t *tag_buf, 3599 struct ppdu_info *ppdu_info) 3600 { 3601 uint16_t peer_id; 3602 struct cdp_tx_completion_ppdu *ppdu_desc; 3603 struct cdp_tx_completion_ppdu_user *ppdu_user_desc; 3604 uint8_t curr_user_index = 0; 3605 struct dp_peer *peer; 3606 struct dp_vdev *vdev; 3607 uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf); 3608 3609 ppdu_desc = 3610 (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf); 3611 3612 tag_buf++; 3613 peer_id = HTT_PPDU_STATS_USER_RATE_TLV_SW_PEER_ID_GET(*tag_buf); 3614 3615 curr_user_index = 3616 dp_get_ppdu_info_user_index(pdev, 3617 peer_id, ppdu_info); 3618 ppdu_user_desc = &ppdu_desc->user[curr_user_index]; 3619 ppdu_user_desc->tlv_bitmap |= (1 << tlv_type); 3620 3621 ppdu_desc->vdev_id = 3622 HTT_PPDU_STATS_USER_COMMON_TLV_VAP_ID_GET(*tag_buf); 3623 3624 ppdu_user_desc->peer_id = peer_id; 3625 3626 tag_buf++; 3627 3628 if (HTT_PPDU_STATS_USER_COMMON_TLV_DELAYED_BA_GET(*tag_buf)) { 3629 ppdu_user_desc->delayed_ba = 1; 3630 ppdu_desc->delayed_ba = 1; 3631 } 3632 3633 if (HTT_PPDU_STATS_USER_COMMON_TLV_MCAST_GET(*tag_buf)) { 3634 ppdu_user_desc->is_mcast = true; 3635 ppdu_user_desc->mpdu_tried_mcast = 3636 HTT_PPDU_STATS_USER_COMMON_TLV_MPDUS_TRIED_GET(*tag_buf); 3637 ppdu_user_desc->num_mpdu = ppdu_user_desc->mpdu_tried_mcast; 3638 } else { 3639 ppdu_user_desc->mpdu_tried_ucast = 3640 HTT_PPDU_STATS_USER_COMMON_TLV_MPDUS_TRIED_GET(*tag_buf); 3641 } 3642 3643 ppdu_user_desc->is_seq_num_valid = 3644 HTT_PPDU_STATS_USER_COMMON_TLV_IS_SQNUM_VALID_IN_BUFFER_GET(*tag_buf); 3645 tag_buf++; 3646 3647 ppdu_user_desc->qos_ctrl = 3648 HTT_PPDU_STATS_USER_COMMON_TLV_QOS_CTRL_GET(*tag_buf); 3649 ppdu_user_desc->frame_ctrl = 3650 HTT_PPDU_STATS_USER_COMMON_TLV_FRAME_CTRL_GET(*tag_buf); 3651 ppdu_desc->frame_ctrl = ppdu_user_desc->frame_ctrl; 3652 3653 if (ppdu_user_desc->delayed_ba) 3654 ppdu_user_desc->mpdu_success = 0; 3655 3656 tag_buf += 3; 3657 3658 if (HTT_PPDU_STATS_IS_OPAQUE_VALID_GET(*tag_buf)) { 3659 ppdu_user_desc->ppdu_cookie = 3660 HTT_PPDU_STATS_HOST_OPAQUE_COOKIE_GET(*tag_buf); 3661 ppdu_user_desc->is_ppdu_cookie_valid = 1; 3662 } 3663 3664 /* returning earlier causes other feilds unpopulated */ 3665 if (peer_id == DP_SCAN_PEER_ID) { 3666 vdev = dp_vdev_get_ref_by_id(pdev->soc, ppdu_desc->vdev_id, 3667 DP_MOD_ID_TX_PPDU_STATS); 3668 if (!vdev) 3669 return; 3670 qdf_mem_copy(ppdu_user_desc->mac_addr, vdev->mac_addr.raw, 3671 QDF_MAC_ADDR_SIZE); 3672 dp_vdev_unref_delete(pdev->soc, vdev, DP_MOD_ID_TX_PPDU_STATS); 3673 } else { 3674 peer = dp_peer_get_ref_by_id(pdev->soc, peer_id, 3675 DP_MOD_ID_TX_PPDU_STATS); 3676 if (!peer) { 3677 /* 3678 * fw sends peer_id which is about to removed but 3679 * it was already removed in host. 3680 * eg: for disassoc, fw send ppdu stats 3681 * with peer id equal to previously associated 3682 * peer's peer_id but it was removed 3683 */ 3684 vdev = dp_vdev_get_ref_by_id(pdev->soc, 3685 ppdu_desc->vdev_id, 3686 DP_MOD_ID_TX_PPDU_STATS); 3687 if (!vdev) 3688 return; 3689 qdf_mem_copy(ppdu_user_desc->mac_addr, 3690 vdev->mac_addr.raw, QDF_MAC_ADDR_SIZE); 3691 dp_vdev_unref_delete(pdev->soc, vdev, 3692 DP_MOD_ID_TX_PPDU_STATS); 3693 return; 3694 } 3695 qdf_mem_copy(ppdu_user_desc->mac_addr, 3696 peer->mac_addr.raw, QDF_MAC_ADDR_SIZE); 3697 dp_peer_unref_delete(peer, DP_MOD_ID_TX_PPDU_STATS); 3698 } 3699 3700 tag_buf += 10; 3701 ppdu_user_desc->msduq_bitmap = *tag_buf; 3702 } 3703 3704 /** 3705 * dp_process_ppdu_stats_user_rate_tlv() - Process htt_ppdu_stats_user_rate_tlv 3706 * @pdev: DP pdev handle 3707 * @tag_buf: T2H message buffer carrying the user rate TLV 3708 * @ppdu_info: per ppdu tlv structure 3709 * 3710 * Return: void 3711 */ 3712 static void 3713 dp_process_ppdu_stats_user_rate_tlv(struct dp_pdev *pdev, 3714 uint32_t *tag_buf, 3715 struct ppdu_info *ppdu_info) 3716 { 3717 uint16_t peer_id; 3718 struct cdp_tx_completion_ppdu *ppdu_desc; 3719 struct cdp_tx_completion_ppdu_user *ppdu_user_desc; 3720 uint8_t curr_user_index = 0; 3721 struct dp_vdev *vdev; 3722 uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf); 3723 uint8_t bw, ru_format; 3724 uint16_t ru_size; 3725 htt_ppdu_stats_user_rate_tlv *stats_buf = 3726 (htt_ppdu_stats_user_rate_tlv *)tag_buf; 3727 3728 ppdu_desc = 3729 (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf); 3730 3731 tag_buf++; 3732 peer_id = HTT_PPDU_STATS_USER_RATE_TLV_SW_PEER_ID_GET(*tag_buf); 3733 3734 curr_user_index = 3735 dp_get_ppdu_info_user_index(pdev, 3736 peer_id, ppdu_info); 3737 ppdu_user_desc = &ppdu_desc->user[curr_user_index]; 3738 ppdu_user_desc->tlv_bitmap |= (1 << tlv_type); 3739 if (peer_id == DP_SCAN_PEER_ID) { 3740 vdev = dp_vdev_get_ref_by_id(pdev->soc, ppdu_desc->vdev_id, 3741 DP_MOD_ID_TX_PPDU_STATS); 3742 if (!vdev) 3743 return; 3744 dp_vdev_unref_delete(pdev->soc, vdev, 3745 DP_MOD_ID_TX_PPDU_STATS); 3746 } 3747 ppdu_user_desc->peer_id = peer_id; 3748 3749 ppdu_user_desc->tid = 3750 HTT_PPDU_STATS_USER_RATE_TLV_TID_NUM_GET(*tag_buf); 3751 3752 tag_buf += 1; 3753 3754 ppdu_user_desc->user_pos = 3755 HTT_PPDU_STATS_USER_RATE_TLV_USER_POS_GET(*tag_buf); 3756 ppdu_user_desc->mu_group_id = 3757 HTT_PPDU_STATS_USER_RATE_TLV_MU_GROUPID_GET(*tag_buf); 3758 3759 ru_format = HTT_PPDU_STATS_USER_RATE_TLV_RU_FORMAT_GET(*tag_buf); 3760 3761 tag_buf += 1; 3762 3763 if (!ru_format) { 3764 /* ru_format = 0: ru_end, ru_start */ 3765 ppdu_user_desc->ru_start = 3766 HTT_PPDU_STATS_USER_RATE_TLV_RU_START_GET(*tag_buf); 3767 ppdu_user_desc->ru_tones = 3768 (HTT_PPDU_STATS_USER_RATE_TLV_RU_END_GET(*tag_buf) - 3769 HTT_PPDU_STATS_USER_RATE_TLV_RU_START_GET(*tag_buf)) + 1; 3770 } else if (ru_format == 1) { 3771 /* ru_format = 1: ru_index, ru_size */ 3772 ru_size = HTT_PPDU_STATS_USER_RATE_TLV_RU_SIZE_GET(*tag_buf); 3773 ppdu_user_desc->ru_tones = 3774 dp_mon_get_ru_width_from_ru_size(ru_size); 3775 } else { 3776 dp_mon_debug("Unsupported ru_format: %d rcvd", ru_format); 3777 } 3778 ppdu_desc->usr_ru_tones_sum += ppdu_user_desc->ru_tones; 3779 3780 tag_buf += 2; 3781 3782 ppdu_user_desc->ppdu_type = 3783 HTT_PPDU_STATS_USER_RATE_TLV_PPDU_TYPE_GET(*tag_buf); 3784 3785 tag_buf++; 3786 ppdu_user_desc->tx_rate = *tag_buf; 3787 3788 ppdu_user_desc->ltf_size = 3789 HTT_PPDU_STATS_USER_RATE_TLV_LTF_SIZE_GET(*tag_buf); 3790 ppdu_user_desc->stbc = 3791 HTT_PPDU_STATS_USER_RATE_TLV_STBC_GET(*tag_buf); 3792 ppdu_user_desc->he_re = 3793 HTT_PPDU_STATS_USER_RATE_TLV_HE_RE_GET(*tag_buf); 3794 ppdu_user_desc->txbf = 3795 HTT_PPDU_STATS_USER_RATE_TLV_TXBF_GET(*tag_buf); 3796 bw = HTT_PPDU_STATS_USER_RATE_TLV_BW_GET(*tag_buf); 3797 /* Align bw value as per host data structures */ 3798 if (bw == HTT_PPDU_STATS_BANDWIDTH_320MHZ) 3799 ppdu_user_desc->bw = bw - 3; 3800 else 3801 ppdu_user_desc->bw = bw - 2; 3802 ppdu_user_desc->nss = HTT_PPDU_STATS_USER_RATE_TLV_NSS_GET(*tag_buf); 3803 ppdu_desc->usr_nss_sum += ppdu_user_desc->nss; 3804 ppdu_user_desc->mcs = HTT_PPDU_STATS_USER_RATE_TLV_MCS_GET(*tag_buf); 3805 ppdu_user_desc->preamble = 3806 HTT_PPDU_STATS_USER_RATE_TLV_PREAMBLE_GET(*tag_buf); 3807 ppdu_user_desc->gi = HTT_PPDU_STATS_USER_RATE_TLV_GI_GET(*tag_buf); 3808 ppdu_user_desc->dcm = HTT_PPDU_STATS_USER_RATE_TLV_DCM_GET(*tag_buf); 3809 ppdu_user_desc->ldpc = HTT_PPDU_STATS_USER_RATE_TLV_LDPC_GET(*tag_buf); 3810 3811 tag_buf += 2; 3812 ppdu_user_desc->punc_pattern_bitmap = 3813 HTT_PPDU_STATS_USER_RATE_TLV_PUNC_PATTERN_BITMAP_GET(*tag_buf); 3814 ppdu_user_desc->fixed_rate_used = stats_buf->is_min_rate; 3815 } 3816 3817 /** 3818 * dp_process_ppdu_stats_enq_mpdu_bitmap_64_tlv() - Process 3819 * htt_ppdu_stats_enq_mpdu_bitmap_64_tlv 3820 * @pdev: DP PDEV handle 3821 * @tag_buf: buffer containing the tlv htt_ppdu_stats_enq_mpdu_bitmap_64_tlv 3822 * @ppdu_info: per ppdu tlv structure 3823 * 3824 * Return: void 3825 */ 3826 static void dp_process_ppdu_stats_enq_mpdu_bitmap_64_tlv( 3827 struct dp_pdev *pdev, uint32_t *tag_buf, 3828 struct ppdu_info *ppdu_info) 3829 { 3830 htt_ppdu_stats_enq_mpdu_bitmap_64_tlv *dp_stats_buf = 3831 (htt_ppdu_stats_enq_mpdu_bitmap_64_tlv *)tag_buf; 3832 3833 struct cdp_tx_completion_ppdu *ppdu_desc; 3834 struct cdp_tx_completion_ppdu_user *ppdu_user_desc; 3835 uint8_t curr_user_index = 0; 3836 uint16_t peer_id; 3837 uint32_t size = CDP_BA_64_BIT_MAP_SIZE_DWORDS; 3838 uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf); 3839 3840 ppdu_desc = 3841 (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf); 3842 3843 tag_buf++; 3844 3845 peer_id = 3846 HTT_PPDU_STATS_ENQ_MPDU_BITMAP_TLV_SW_PEER_ID_GET(*tag_buf); 3847 3848 curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info); 3849 ppdu_user_desc = &ppdu_desc->user[curr_user_index]; 3850 ppdu_user_desc->tlv_bitmap |= (1 << tlv_type); 3851 ppdu_user_desc->peer_id = peer_id; 3852 3853 ppdu_user_desc->start_seq = dp_stats_buf->start_seq; 3854 qdf_mem_copy(&ppdu_user_desc->enq_bitmap, &dp_stats_buf->enq_bitmap, 3855 sizeof(uint32_t) * CDP_BA_64_BIT_MAP_SIZE_DWORDS); 3856 3857 dp_process_ppdu_stats_update_failed_bitmap(pdev, 3858 (void *)ppdu_user_desc, 3859 ppdu_info->ppdu_id, 3860 size); 3861 } 3862 3863 /** 3864 * dp_process_ppdu_stats_enq_mpdu_bitmap_256_tlv() - Process 3865 * htt_ppdu_stats_enq_mpdu_bitmap_256_tlv 3866 * @pdev: DP PDEV handle 3867 * @tag_buf: buffer containing the tlv htt_ppdu_stats_enq_mpdu_bitmap_256_tlv 3868 * @ppdu_info: per ppdu tlv structure 3869 * 3870 * Return: void 3871 */ 3872 static void dp_process_ppdu_stats_enq_mpdu_bitmap_256_tlv( 3873 struct dp_pdev *pdev, uint32_t *tag_buf, 3874 struct ppdu_info *ppdu_info) 3875 { 3876 htt_ppdu_stats_enq_mpdu_bitmap_256_tlv *dp_stats_buf = 3877 (htt_ppdu_stats_enq_mpdu_bitmap_256_tlv *)tag_buf; 3878 3879 struct cdp_tx_completion_ppdu *ppdu_desc; 3880 struct cdp_tx_completion_ppdu_user *ppdu_user_desc; 3881 uint8_t curr_user_index = 0; 3882 uint16_t peer_id; 3883 uint32_t size = CDP_BA_256_BIT_MAP_SIZE_DWORDS; 3884 uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf); 3885 3886 ppdu_desc = 3887 (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf); 3888 3889 tag_buf++; 3890 3891 peer_id = 3892 HTT_PPDU_STATS_ENQ_MPDU_BITMAP_TLV_SW_PEER_ID_GET(*tag_buf); 3893 3894 curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info); 3895 ppdu_user_desc = &ppdu_desc->user[curr_user_index]; 3896 ppdu_user_desc->tlv_bitmap |= (1 << tlv_type); 3897 ppdu_user_desc->peer_id = peer_id; 3898 3899 ppdu_user_desc->start_seq = dp_stats_buf->start_seq; 3900 qdf_mem_copy(&ppdu_user_desc->enq_bitmap, &dp_stats_buf->enq_bitmap, 3901 sizeof(uint32_t) * CDP_BA_256_BIT_MAP_SIZE_DWORDS); 3902 3903 dp_process_ppdu_stats_update_failed_bitmap(pdev, 3904 (void *)ppdu_user_desc, 3905 ppdu_info->ppdu_id, 3906 size); 3907 } 3908 3909 /** 3910 * dp_process_ppdu_stats_user_cmpltn_common_tlv() - Process 3911 * htt_ppdu_stats_user_cmpltn_common_tlv 3912 * @pdev: DP PDEV handle 3913 * @tag_buf: buffer containing the tlv htt_ppdu_stats_user_cmpltn_common_tlv 3914 * @ppdu_info: per ppdu tlv structure 3915 * 3916 * Return: void 3917 */ 3918 static void dp_process_ppdu_stats_user_cmpltn_common_tlv( 3919 struct dp_pdev *pdev, uint32_t *tag_buf, 3920 struct ppdu_info *ppdu_info) 3921 { 3922 uint16_t peer_id; 3923 struct cdp_tx_completion_ppdu *ppdu_desc; 3924 struct cdp_tx_completion_ppdu_user *ppdu_user_desc; 3925 uint8_t curr_user_index = 0; 3926 uint8_t bw_iter; 3927 htt_ppdu_stats_user_cmpltn_common_tlv *dp_stats_buf = 3928 (htt_ppdu_stats_user_cmpltn_common_tlv *)tag_buf; 3929 uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf); 3930 3931 ppdu_desc = 3932 (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf); 3933 3934 tag_buf++; 3935 peer_id = 3936 HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_SW_PEER_ID_GET(*tag_buf); 3937 3938 curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info); 3939 ppdu_user_desc = &ppdu_desc->user[curr_user_index]; 3940 ppdu_user_desc->tlv_bitmap |= (1 << tlv_type); 3941 ppdu_user_desc->peer_id = peer_id; 3942 3943 ppdu_user_desc->completion_status = 3944 HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_COMPLETION_STATUS_GET( 3945 *tag_buf); 3946 3947 ppdu_user_desc->tid = 3948 HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_TID_NUM_GET(*tag_buf); 3949 3950 tag_buf++; 3951 if (qdf_likely(ppdu_user_desc->completion_status == 3952 HTT_PPDU_STATS_USER_STATUS_OK)) { 3953 ppdu_desc->ack_rssi = dp_stats_buf->ack_rssi; 3954 ppdu_user_desc->usr_ack_rssi = dp_stats_buf->ack_rssi; 3955 ppdu_user_desc->ack_rssi_valid = 1; 3956 } else { 3957 ppdu_user_desc->ack_rssi_valid = 0; 3958 } 3959 3960 tag_buf++; 3961 3962 ppdu_user_desc->mpdu_success = 3963 HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_MPDU_SUCCESS_GET(*tag_buf); 3964 3965 ppdu_user_desc->mpdu_failed = 3966 HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_MPDU_TRIED_GET(*tag_buf) - 3967 ppdu_user_desc->mpdu_success; 3968 3969 tag_buf++; 3970 3971 ppdu_user_desc->long_retries = 3972 HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_LONG_RETRY_GET(*tag_buf); 3973 3974 ppdu_user_desc->short_retries = 3975 HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_SHORT_RETRY_GET(*tag_buf); 3976 ppdu_user_desc->retry_mpdus = 3977 ppdu_user_desc->long_retries + ppdu_user_desc->short_retries; 3978 3979 ppdu_user_desc->is_ampdu = 3980 HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_IS_AMPDU_GET(*tag_buf); 3981 ppdu_info->is_ampdu = ppdu_user_desc->is_ampdu; 3982 3983 ppdu_desc->resp_type = 3984 HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_RESP_TYPE_GET(*tag_buf); 3985 ppdu_desc->mprot_type = 3986 HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_MPROT_TYPE_GET(*tag_buf); 3987 ppdu_desc->rts_success = 3988 HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_RTS_SUCCESS_GET(*tag_buf); 3989 ppdu_desc->rts_failure = 3990 HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_RTS_FAILURE_GET(*tag_buf); 3991 3992 ppdu_user_desc->mprot_type = ppdu_desc->mprot_type; 3993 ppdu_user_desc->rts_success = ppdu_desc->rts_success; 3994 ppdu_user_desc->rts_failure = ppdu_desc->rts_failure; 3995 3996 ppdu_user_desc->pream_punct = 3997 HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_PREAM_PUNC_TX_GET(*tag_buf); 3998 3999 ppdu_info->compltn_common_tlv++; 4000 4001 /* 4002 * MU BAR may send request to n users but we may received ack only from 4003 * m users. To have count of number of users respond back, we have a 4004 * separate counter bar_num_users per PPDU that get increment for every 4005 * htt_ppdu_stats_user_cmpltn_common_tlv 4006 */ 4007 ppdu_desc->bar_num_users++; 4008 4009 tag_buf++; 4010 for (bw_iter = 0; bw_iter < CDP_RSSI_CHAIN_LEN; bw_iter++) { 4011 ppdu_user_desc->rssi_chain[bw_iter] = 4012 HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_CHAIN_RSSI_GET(*tag_buf); 4013 tag_buf++; 4014 } 4015 4016 ppdu_user_desc->sa_tx_antenna = 4017 HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_TX_ANTENNA_MASK_GET(*tag_buf); 4018 4019 tag_buf++; 4020 ppdu_user_desc->sa_is_training = 4021 HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_IS_TRAINING_GET(*tag_buf); 4022 if (ppdu_user_desc->sa_is_training) { 4023 ppdu_user_desc->sa_goodput = 4024 HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_PENDING_TRAINING_PKTS_GET(*tag_buf); 4025 } 4026 4027 tag_buf++; 4028 for (bw_iter = 0; bw_iter < CDP_NUM_SA_BW; bw_iter++) { 4029 ppdu_user_desc->sa_max_rates[bw_iter] = 4030 HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_MAX_RATES_GET(tag_buf[bw_iter]); 4031 } 4032 4033 tag_buf += CDP_NUM_SA_BW; 4034 ppdu_user_desc->current_rate_per = 4035 HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_CURRENT_RATE_PER_GET(*tag_buf); 4036 4037 tag_buf++; 4038 /* Skip SW RTS */ 4039 4040 tag_buf++; 4041 /* Extract 320MHz MAX PHY ratecode */ 4042 ppdu_user_desc->sa_max_rates[CDP_SA_BW320_INX] = 4043 HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_MAX_RATES_GET(*tag_buf); 4044 } 4045 4046 /** 4047 * dp_process_ppdu_stats_user_compltn_ba_bitmap_64_tlv() - Process 4048 * htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv 4049 * @pdev: DP PDEV handle 4050 * @tag_buf: buffer containing the htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv 4051 * @ppdu_info: per ppdu tlv structure 4052 * 4053 * Return: void 4054 */ 4055 static void dp_process_ppdu_stats_user_compltn_ba_bitmap_64_tlv( 4056 struct dp_pdev *pdev, uint32_t *tag_buf, 4057 struct ppdu_info *ppdu_info) 4058 { 4059 htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv *dp_stats_buf = 4060 (htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv *)tag_buf; 4061 struct cdp_tx_completion_ppdu_user *ppdu_user_desc; 4062 struct cdp_tx_completion_ppdu *ppdu_desc; 4063 uint8_t curr_user_index = 0; 4064 uint16_t peer_id; 4065 uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf); 4066 4067 ppdu_desc = 4068 (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf); 4069 4070 tag_buf++; 4071 4072 peer_id = 4073 HTT_PPDU_STATS_USER_CMPLTN_BA_BITMAP_TLV_SW_PEER_ID_GET(*tag_buf); 4074 4075 curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info); 4076 ppdu_user_desc = &ppdu_desc->user[curr_user_index]; 4077 ppdu_user_desc->tlv_bitmap |= (1 << tlv_type); 4078 ppdu_user_desc->peer_id = peer_id; 4079 4080 ppdu_user_desc->ba_seq_no = dp_stats_buf->ba_seq_no; 4081 qdf_mem_copy(&ppdu_user_desc->ba_bitmap, &dp_stats_buf->ba_bitmap, 4082 sizeof(uint32_t) * CDP_BA_64_BIT_MAP_SIZE_DWORDS); 4083 ppdu_user_desc->ba_size = CDP_BA_64_BIT_MAP_SIZE_DWORDS * 32; 4084 } 4085 4086 /** 4087 * dp_process_ppdu_stats_user_compltn_ba_bitmap_256_tlv() - Process 4088 * htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv 4089 * @pdev: DP PDEV handle 4090 * @tag_buf: buffer containing the htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv 4091 * @ppdu_info: per ppdu tlv structure 4092 * 4093 * Return: void 4094 */ 4095 static void dp_process_ppdu_stats_user_compltn_ba_bitmap_256_tlv( 4096 struct dp_pdev *pdev, uint32_t *tag_buf, 4097 struct ppdu_info *ppdu_info) 4098 { 4099 htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv *dp_stats_buf = 4100 (htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv *)tag_buf; 4101 struct cdp_tx_completion_ppdu_user *ppdu_user_desc; 4102 struct cdp_tx_completion_ppdu *ppdu_desc; 4103 uint8_t curr_user_index = 0; 4104 uint16_t peer_id; 4105 uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf); 4106 4107 ppdu_desc = 4108 (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf); 4109 4110 tag_buf++; 4111 4112 peer_id = 4113 HTT_PPDU_STATS_USER_CMPLTN_BA_BITMAP_TLV_SW_PEER_ID_GET(*tag_buf); 4114 4115 curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info); 4116 ppdu_user_desc = &ppdu_desc->user[curr_user_index]; 4117 ppdu_user_desc->tlv_bitmap |= (1 << tlv_type); 4118 ppdu_user_desc->peer_id = peer_id; 4119 4120 ppdu_user_desc->ba_seq_no = dp_stats_buf->ba_seq_no; 4121 qdf_mem_copy(&ppdu_user_desc->ba_bitmap, &dp_stats_buf->ba_bitmap, 4122 sizeof(uint32_t) * CDP_BA_256_BIT_MAP_SIZE_DWORDS); 4123 ppdu_user_desc->ba_size = CDP_BA_256_BIT_MAP_SIZE_DWORDS * 32; 4124 } 4125 4126 /** 4127 * dp_process_ppdu_stats_user_compltn_ack_ba_status_tlv() - Process 4128 * htt_ppdu_stats_user_compltn_ack_ba_status_tlv 4129 * @pdev: DP PDEV handle 4130 * @tag_buf: buffer containing the htt_ppdu_stats_user_compltn_ack_ba_status_tlv 4131 * @ppdu_info: per ppdu tlv structure 4132 * 4133 * Return: void 4134 */ 4135 static void dp_process_ppdu_stats_user_compltn_ack_ba_status_tlv( 4136 struct dp_pdev *pdev, uint32_t *tag_buf, 4137 struct ppdu_info *ppdu_info) 4138 { 4139 uint16_t peer_id; 4140 struct cdp_tx_completion_ppdu *ppdu_desc; 4141 struct cdp_tx_completion_ppdu_user *ppdu_user_desc; 4142 uint8_t curr_user_index = 0; 4143 uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf); 4144 4145 ppdu_desc = 4146 (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf); 4147 4148 tag_buf += 2; 4149 peer_id = 4150 HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_SW_PEER_ID_GET(*tag_buf); 4151 4152 curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info); 4153 ppdu_user_desc = &ppdu_desc->user[curr_user_index]; 4154 ppdu_user_desc->tlv_bitmap |= (1 << tlv_type); 4155 if (!ppdu_user_desc->ack_ba_tlv) { 4156 ppdu_user_desc->ack_ba_tlv = 1; 4157 } else { 4158 pdev->stats.ack_ba_comes_twice++; 4159 return; 4160 } 4161 4162 ppdu_user_desc->peer_id = peer_id; 4163 4164 tag_buf++; 4165 /* not to update ppdu_desc->tid from this TLV */ 4166 ppdu_user_desc->num_mpdu = 4167 HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_NUM_MPDU_GET(*tag_buf); 4168 4169 ppdu_user_desc->num_msdu = 4170 HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_NUM_MSDU_GET(*tag_buf); 4171 4172 ppdu_user_desc->success_msdus = ppdu_user_desc->num_msdu; 4173 4174 tag_buf++; 4175 ppdu_user_desc->start_seq = 4176 HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_START_SEQ_GET( 4177 *tag_buf); 4178 4179 tag_buf++; 4180 ppdu_user_desc->success_bytes = *tag_buf; 4181 4182 /* increase ack ba tlv counter on successful mpdu */ 4183 if (ppdu_user_desc->num_mpdu) 4184 ppdu_info->ack_ba_tlv++; 4185 4186 if (ppdu_user_desc->ba_size == 0) { 4187 ppdu_user_desc->ba_seq_no = ppdu_user_desc->start_seq; 4188 ppdu_user_desc->ba_bitmap[0] = 1; 4189 ppdu_user_desc->ba_size = 1; 4190 } 4191 } 4192 4193 /** 4194 * dp_process_ppdu_stats_user_common_array_tlv() - Process 4195 * htt_ppdu_stats_user_common_array_tlv 4196 * @pdev: DP PDEV handle 4197 * @tag_buf: buffer containing the htt_ppdu_stats_user_compltn_ack_ba_status_tlv 4198 * @ppdu_info: per ppdu tlv structure 4199 * 4200 * Return: void 4201 */ 4202 static void dp_process_ppdu_stats_user_common_array_tlv( 4203 struct dp_pdev *pdev, uint32_t *tag_buf, 4204 struct ppdu_info *ppdu_info) 4205 { 4206 uint32_t peer_id; 4207 struct cdp_tx_completion_ppdu *ppdu_desc; 4208 struct cdp_tx_completion_ppdu_user *ppdu_user_desc; 4209 uint8_t curr_user_index = 0; 4210 struct htt_tx_ppdu_stats_info *dp_stats_buf; 4211 uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf); 4212 4213 ppdu_desc = 4214 (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf); 4215 4216 tag_buf++; 4217 dp_stats_buf = (struct htt_tx_ppdu_stats_info *)tag_buf; 4218 tag_buf += 3; 4219 peer_id = 4220 HTT_PPDU_STATS_ARRAY_ITEM_TLV_PEERID_GET(*tag_buf); 4221 4222 if (!dp_peer_find_by_id_valid(pdev->soc, peer_id)) { 4223 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 4224 "Peer with peer_id: %u not found", peer_id); 4225 return; 4226 } 4227 4228 curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info); 4229 4230 ppdu_user_desc = &ppdu_desc->user[curr_user_index]; 4231 ppdu_user_desc->tlv_bitmap |= (1 << tlv_type); 4232 4233 ppdu_user_desc->retry_bytes = dp_stats_buf->tx_retry_bytes; 4234 ppdu_user_desc->failed_bytes = dp_stats_buf->tx_failed_bytes; 4235 4236 tag_buf++; 4237 4238 ppdu_user_desc->success_msdus = 4239 HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_SUCC_MSDUS_GET(*tag_buf); 4240 ppdu_user_desc->retry_msdus = 4241 HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_RETRY_MSDUS_GET(*tag_buf); 4242 tag_buf++; 4243 ppdu_user_desc->failed_msdus = 4244 HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_FAILED_MSDUS_GET(*tag_buf); 4245 } 4246 4247 /** 4248 * dp_process_ppdu_stats_user_compltn_flush_tlv() - Process 4249 * htt_ppdu_stats_flush_tlv 4250 * @pdev: DP PDEV handle 4251 * @tag_buf: buffer containing the htt_ppdu_stats_flush_tlv 4252 * @ppdu_info: per ppdu tlv structure 4253 * 4254 * Return: void 4255 */ 4256 static void 4257 dp_process_ppdu_stats_user_compltn_flush_tlv(struct dp_pdev *pdev, 4258 uint32_t *tag_buf, 4259 struct ppdu_info *ppdu_info) 4260 { 4261 struct cdp_tx_completion_ppdu *ppdu_desc; 4262 uint32_t peer_id; 4263 uint8_t tid; 4264 struct dp_peer *peer; 4265 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 4266 struct dp_mon_peer *mon_peer = NULL; 4267 4268 ppdu_desc = (struct cdp_tx_completion_ppdu *) 4269 qdf_nbuf_data(ppdu_info->nbuf); 4270 ppdu_desc->is_flush = 1; 4271 4272 tag_buf++; 4273 ppdu_desc->drop_reason = *tag_buf; 4274 4275 tag_buf++; 4276 ppdu_desc->num_msdu = HTT_PPDU_STATS_FLUSH_TLV_NUM_MSDU_GET(*tag_buf); 4277 ppdu_desc->num_mpdu = HTT_PPDU_STATS_FLUSH_TLV_NUM_MPDU_GET(*tag_buf); 4278 ppdu_desc->flow_type = HTT_PPDU_STATS_FLUSH_TLV_FLOW_TYPE_GET(*tag_buf); 4279 4280 tag_buf++; 4281 peer_id = HTT_PPDU_STATS_FLUSH_TLV_SW_PEER_ID_GET(*tag_buf); 4282 tid = HTT_PPDU_STATS_FLUSH_TLV_TID_NUM_GET(*tag_buf); 4283 4284 ppdu_desc->num_users = 1; 4285 ppdu_desc->user[0].peer_id = peer_id; 4286 ppdu_desc->user[0].tid = tid; 4287 4288 ppdu_desc->queue_type = 4289 HTT_PPDU_STATS_FLUSH_TLV_QUEUE_TYPE_GET(*tag_buf); 4290 4291 peer = dp_peer_get_ref_by_id(pdev->soc, peer_id, 4292 DP_MOD_ID_TX_PPDU_STATS); 4293 if (!peer) 4294 goto add_ppdu_to_sched_list; 4295 4296 if (ppdu_desc->drop_reason == HTT_FLUSH_EXCESS_RETRIES) { 4297 mon_peer = peer->monitor_peer; 4298 DP_STATS_INC(mon_peer, 4299 tx.excess_retries_per_ac[TID_TO_WME_AC(tid)], 4300 ppdu_desc->num_msdu); 4301 } 4302 4303 dp_peer_unref_delete(peer, DP_MOD_ID_TX_PPDU_STATS); 4304 4305 add_ppdu_to_sched_list: 4306 ppdu_info->done = 1; 4307 TAILQ_REMOVE(&mon_pdev->ppdu_info_list, ppdu_info, ppdu_info_list_elem); 4308 mon_pdev->list_depth--; 4309 TAILQ_INSERT_TAIL(&mon_pdev->sched_comp_ppdu_list, ppdu_info, 4310 ppdu_info_list_elem); 4311 mon_pdev->sched_comp_list_depth++; 4312 } 4313 4314 /** 4315 * dp_process_ppdu_stats_sch_cmd_status_tlv() - Process schedule command status tlv 4316 * Here we are not going to process the buffer. 4317 * @pdev: DP PDEV handle 4318 * @ppdu_info: per ppdu tlv structure 4319 * 4320 * Return: void 4321 */ 4322 static void 4323 dp_process_ppdu_stats_sch_cmd_status_tlv(struct dp_pdev *pdev, 4324 struct ppdu_info *ppdu_info) 4325 { 4326 struct cdp_tx_completion_ppdu *ppdu_desc; 4327 struct dp_peer *peer; 4328 uint8_t num_users; 4329 uint8_t i; 4330 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 4331 4332 ppdu_desc = (struct cdp_tx_completion_ppdu *) 4333 qdf_nbuf_data(ppdu_info->nbuf); 4334 4335 num_users = ppdu_desc->bar_num_users; 4336 4337 for (i = 0; i < num_users; i++) { 4338 if (ppdu_desc->user[i].user_pos == 0) { 4339 if (ppdu_desc->frame_type == CDP_PPDU_FTYPE_BAR) { 4340 /* update phy mode for bar frame */ 4341 ppdu_desc->phy_mode = 4342 ppdu_desc->user[i].preamble; 4343 ppdu_desc->user[0].mcs = ppdu_desc->user[i].mcs; 4344 break; 4345 } 4346 if (ppdu_desc->frame_type == CDP_PPDU_FTYPE_CTRL) { 4347 ppdu_desc->frame_ctrl = 4348 ppdu_desc->user[i].frame_ctrl; 4349 break; 4350 } 4351 } 4352 } 4353 4354 if (ppdu_desc->frame_type == CDP_PPDU_FTYPE_DATA && 4355 ppdu_desc->delayed_ba) { 4356 qdf_assert_always(ppdu_desc->num_users <= ppdu_desc->max_users); 4357 4358 for (i = 0; i < ppdu_desc->num_users; i++) { 4359 struct cdp_delayed_tx_completion_ppdu_user *delay_ppdu; 4360 uint64_t start_tsf; 4361 uint64_t end_tsf; 4362 uint32_t ppdu_id; 4363 struct dp_mon_peer *mon_peer; 4364 4365 ppdu_id = ppdu_desc->ppdu_id; 4366 peer = dp_peer_get_ref_by_id 4367 (pdev->soc, ppdu_desc->user[i].peer_id, 4368 DP_MOD_ID_TX_PPDU_STATS); 4369 /* 4370 * This check is to make sure peer is not deleted 4371 * after processing the TLVs. 4372 */ 4373 if (!peer) 4374 continue; 4375 4376 if (!peer->monitor_peer) { 4377 dp_peer_unref_delete(peer, 4378 DP_MOD_ID_TX_PPDU_STATS); 4379 continue; 4380 } 4381 4382 mon_peer = peer->monitor_peer; 4383 delay_ppdu = &mon_peer->delayed_ba_ppdu_stats; 4384 start_tsf = ppdu_desc->ppdu_start_timestamp; 4385 end_tsf = ppdu_desc->ppdu_end_timestamp; 4386 /* 4387 * save delayed ba user info 4388 */ 4389 if (ppdu_desc->user[i].delayed_ba) { 4390 dp_peer_copy_delay_stats(peer, 4391 &ppdu_desc->user[i], 4392 ppdu_id); 4393 mon_peer->last_delayed_ba_ppduid = ppdu_id; 4394 delay_ppdu->ppdu_start_timestamp = start_tsf; 4395 delay_ppdu->ppdu_end_timestamp = end_tsf; 4396 } 4397 ppdu_desc->user[i].peer_last_delayed_ba = 4398 mon_peer->last_delayed_ba; 4399 4400 dp_peer_unref_delete(peer, DP_MOD_ID_TX_PPDU_STATS); 4401 4402 if (ppdu_desc->user[i].delayed_ba && 4403 !ppdu_desc->user[i].debug_copied) { 4404 QDF_TRACE(QDF_MODULE_ID_TXRX, 4405 QDF_TRACE_LEVEL_INFO_MED, 4406 "%s: %d ppdu_id[%d] bar_ppdu_id[%d] num_users[%d] usr[%d] htt_frame_type[%d]\n", 4407 __func__, __LINE__, 4408 ppdu_desc->ppdu_id, 4409 ppdu_desc->bar_ppdu_id, 4410 ppdu_desc->num_users, 4411 i, 4412 ppdu_desc->htt_frame_type); 4413 } 4414 } 4415 } 4416 4417 /* 4418 * when frame type is BAR and STATS_COMMON_TLV is set 4419 * copy the store peer delayed info to BAR status 4420 */ 4421 if (ppdu_desc->frame_type == CDP_PPDU_FTYPE_BAR) { 4422 for (i = 0; i < ppdu_desc->bar_num_users; i++) { 4423 struct cdp_delayed_tx_completion_ppdu_user *delay_ppdu; 4424 uint64_t start_tsf; 4425 uint64_t end_tsf; 4426 struct dp_mon_peer *mon_peer; 4427 4428 peer = dp_peer_get_ref_by_id 4429 (pdev->soc, 4430 ppdu_desc->user[i].peer_id, 4431 DP_MOD_ID_TX_PPDU_STATS); 4432 /* 4433 * This check is to make sure peer is not deleted 4434 * after processing the TLVs. 4435 */ 4436 if (!peer) 4437 continue; 4438 4439 if (!peer->monitor_peer) { 4440 dp_peer_unref_delete(peer, 4441 DP_MOD_ID_TX_PPDU_STATS); 4442 continue; 4443 } 4444 4445 mon_peer = peer->monitor_peer; 4446 if (ppdu_desc->user[i].completion_status != 4447 HTT_PPDU_STATS_USER_STATUS_OK) { 4448 dp_peer_unref_delete(peer, 4449 DP_MOD_ID_TX_PPDU_STATS); 4450 continue; 4451 } 4452 4453 delay_ppdu = &mon_peer->delayed_ba_ppdu_stats; 4454 start_tsf = delay_ppdu->ppdu_start_timestamp; 4455 end_tsf = delay_ppdu->ppdu_end_timestamp; 4456 4457 if (mon_peer->last_delayed_ba) { 4458 dp_peer_copy_stats_to_bar(peer, 4459 &ppdu_desc->user[i]); 4460 ppdu_desc->ppdu_id = 4461 mon_peer->last_delayed_ba_ppduid; 4462 ppdu_desc->ppdu_start_timestamp = start_tsf; 4463 ppdu_desc->ppdu_end_timestamp = end_tsf; 4464 } 4465 ppdu_desc->user[i].peer_last_delayed_ba = 4466 mon_peer->last_delayed_ba; 4467 dp_peer_unref_delete(peer, DP_MOD_ID_TX_PPDU_STATS); 4468 } 4469 } 4470 4471 TAILQ_REMOVE(&mon_pdev->ppdu_info_list, ppdu_info, ppdu_info_list_elem); 4472 mon_pdev->list_depth--; 4473 TAILQ_INSERT_TAIL(&mon_pdev->sched_comp_ppdu_list, ppdu_info, 4474 ppdu_info_list_elem); 4475 mon_pdev->sched_comp_list_depth++; 4476 } 4477 4478 /** 4479 * dp_validate_fix_ppdu_tlv() - Function to validate the length of PPDU 4480 * @pdev: DP pdev handle 4481 * @tag_buf: TLV buffer 4482 * @tlv_expected_size: Expected size of Tag 4483 * @tlv_len: TLV length received from FW 4484 * 4485 * If the TLV length sent as part of PPDU TLV is less that expected size i.e 4486 * size of corresponding data structure, pad the remaining bytes with zeros 4487 * and continue processing the TLVs 4488 * 4489 * Return: Pointer to updated TLV 4490 */ 4491 static inline uint32_t *dp_validate_fix_ppdu_tlv(struct dp_pdev *pdev, 4492 uint32_t *tag_buf, 4493 uint16_t tlv_expected_size, 4494 uint16_t tlv_len) 4495 { 4496 uint32_t *tlv_desc = tag_buf; 4497 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 4498 4499 qdf_assert_always(tlv_len != 0); 4500 4501 if (tlv_len < tlv_expected_size) { 4502 qdf_mem_zero(mon_pdev->ppdu_tlv_buf, tlv_expected_size); 4503 qdf_mem_copy(mon_pdev->ppdu_tlv_buf, tag_buf, tlv_len); 4504 tlv_desc = mon_pdev->ppdu_tlv_buf; 4505 } 4506 4507 return tlv_desc; 4508 } 4509 4510 /** 4511 * dp_process_ppdu_tag() - Function to process the PPDU TLVs 4512 * @pdev: DP pdev handle 4513 * @tag_buf: TLV buffer 4514 * @tlv_len: length of tlv 4515 * @ppdu_info: per ppdu tlv structure 4516 * 4517 * Return: void 4518 */ 4519 static void dp_process_ppdu_tag(struct dp_pdev *pdev, 4520 uint32_t *tag_buf, 4521 uint32_t tlv_len, 4522 struct ppdu_info *ppdu_info) 4523 { 4524 uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf); 4525 uint16_t tlv_expected_size; 4526 uint32_t *tlv_desc; 4527 4528 switch (tlv_type) { 4529 case HTT_PPDU_STATS_COMMON_TLV: 4530 tlv_expected_size = sizeof(htt_ppdu_stats_common_tlv); 4531 tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf, 4532 tlv_expected_size, tlv_len); 4533 dp_process_ppdu_stats_common_tlv(pdev, tlv_desc, ppdu_info); 4534 break; 4535 case HTT_PPDU_STATS_USR_COMMON_TLV: 4536 tlv_expected_size = sizeof(htt_ppdu_stats_user_common_tlv); 4537 tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf, 4538 tlv_expected_size, tlv_len); 4539 dp_process_ppdu_stats_user_common_tlv(pdev, tlv_desc, 4540 ppdu_info); 4541 break; 4542 case HTT_PPDU_STATS_USR_RATE_TLV: 4543 tlv_expected_size = sizeof(htt_ppdu_stats_user_rate_tlv); 4544 tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf, 4545 tlv_expected_size, tlv_len); 4546 dp_process_ppdu_stats_user_rate_tlv(pdev, tlv_desc, 4547 ppdu_info); 4548 break; 4549 case HTT_PPDU_STATS_USR_MPDU_ENQ_BITMAP_64_TLV: 4550 tlv_expected_size = 4551 sizeof(htt_ppdu_stats_enq_mpdu_bitmap_64_tlv); 4552 tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf, 4553 tlv_expected_size, tlv_len); 4554 dp_process_ppdu_stats_enq_mpdu_bitmap_64_tlv( 4555 pdev, tlv_desc, ppdu_info); 4556 break; 4557 case HTT_PPDU_STATS_USR_MPDU_ENQ_BITMAP_256_TLV: 4558 tlv_expected_size = 4559 sizeof(htt_ppdu_stats_enq_mpdu_bitmap_256_tlv); 4560 tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf, 4561 tlv_expected_size, tlv_len); 4562 dp_process_ppdu_stats_enq_mpdu_bitmap_256_tlv( 4563 pdev, tlv_desc, ppdu_info); 4564 break; 4565 case HTT_PPDU_STATS_USR_COMPLTN_COMMON_TLV: 4566 tlv_expected_size = 4567 sizeof(htt_ppdu_stats_user_cmpltn_common_tlv); 4568 tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf, 4569 tlv_expected_size, tlv_len); 4570 dp_process_ppdu_stats_user_cmpltn_common_tlv( 4571 pdev, tlv_desc, ppdu_info); 4572 break; 4573 case HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_64_TLV: 4574 tlv_expected_size = 4575 sizeof(htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv); 4576 tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf, 4577 tlv_expected_size, tlv_len); 4578 dp_process_ppdu_stats_user_compltn_ba_bitmap_64_tlv( 4579 pdev, tlv_desc, ppdu_info); 4580 break; 4581 case HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_256_TLV: 4582 tlv_expected_size = 4583 sizeof(htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv); 4584 tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf, 4585 tlv_expected_size, tlv_len); 4586 dp_process_ppdu_stats_user_compltn_ba_bitmap_256_tlv( 4587 pdev, tlv_desc, ppdu_info); 4588 break; 4589 case HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV: 4590 tlv_expected_size = 4591 sizeof(htt_ppdu_stats_user_compltn_ack_ba_status_tlv); 4592 tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf, 4593 tlv_expected_size, tlv_len); 4594 dp_process_ppdu_stats_user_compltn_ack_ba_status_tlv( 4595 pdev, tlv_desc, ppdu_info); 4596 break; 4597 case HTT_PPDU_STATS_USR_COMMON_ARRAY_TLV: 4598 tlv_expected_size = 4599 sizeof(htt_ppdu_stats_usr_common_array_tlv_v); 4600 tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf, 4601 tlv_expected_size, tlv_len); 4602 dp_process_ppdu_stats_user_common_array_tlv( 4603 pdev, tlv_desc, ppdu_info); 4604 break; 4605 case HTT_PPDU_STATS_USR_COMPLTN_FLUSH_TLV: 4606 tlv_expected_size = sizeof(htt_ppdu_stats_flush_tlv); 4607 tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf, 4608 tlv_expected_size, tlv_len); 4609 dp_process_ppdu_stats_user_compltn_flush_tlv(pdev, tlv_desc, 4610 ppdu_info); 4611 break; 4612 case HTT_PPDU_STATS_SCH_CMD_STATUS_TLV: 4613 dp_process_ppdu_stats_sch_cmd_status_tlv(pdev, ppdu_info); 4614 break; 4615 default: 4616 break; 4617 } 4618 } 4619 4620 #ifdef WLAN_CONFIG_TELEMETRY_AGENT 4621 static inline 4622 void dp_ppdu_desc_user_airtime_consumption_update( 4623 struct dp_peer *peer, 4624 struct cdp_tx_completion_ppdu_user *user) 4625 { 4626 struct dp_mon_peer *mon_peer = NULL; 4627 uint8_t ac = 0; 4628 4629 mon_peer = peer->monitor_peer; 4630 if (qdf_unlikely(!mon_peer)) 4631 return; 4632 4633 ac = TID_TO_WME_AC(user->tid); 4634 DP_STATS_INC(mon_peer, airtime_stats.tx_airtime_consumption[ac].consumption, 4635 user->phy_tx_time_us); 4636 } 4637 #else 4638 static inline 4639 void dp_ppdu_desc_user_airtime_consumption_update( 4640 struct dp_peer *peer, 4641 struct cdp_tx_completion_ppdu_user *user) 4642 { } 4643 #endif 4644 4645 #if defined(WLAN_ATF_ENABLE) || defined(WLAN_CONFIG_TELEMETRY_AGENT) 4646 static void 4647 dp_ppdu_desc_user_phy_tx_time_update(struct dp_pdev *pdev, 4648 struct dp_peer *peer, 4649 struct cdp_tx_completion_ppdu *ppdu_desc, 4650 struct cdp_tx_completion_ppdu_user *user) 4651 { 4652 uint32_t nss_ru_width_sum = 0; 4653 struct dp_mon_peer *mon_peer = NULL; 4654 4655 if (!pdev || !ppdu_desc || !user || !peer) 4656 return; 4657 4658 if (ppdu_desc->frame_type != CDP_PPDU_FTYPE_DATA) 4659 return; 4660 4661 mon_peer = peer->monitor_peer; 4662 if (qdf_unlikely(!mon_peer)) 4663 return; 4664 4665 nss_ru_width_sum = ppdu_desc->usr_nss_sum * ppdu_desc->usr_ru_tones_sum; 4666 if (!nss_ru_width_sum) 4667 nss_ru_width_sum = 1; 4668 4669 /* 4670 * For SU-MIMO PPDU phy Tx time is same for the single user. 4671 * For MU-MIMO phy Tx time is calculated per user as below 4672 * user phy tx time = 4673 * Entire PPDU duration * MU Ratio * OFDMA Ratio 4674 * MU Ratio = usr_nss / Sum_of_nss_of_all_users 4675 * OFDMA_ratio = usr_ru_width / Sum_of_ru_width_of_all_users 4676 * usr_ru_widt = ru_end – ru_start + 1 4677 */ 4678 if (ppdu_desc->htt_frame_type == HTT_STATS_FTYPE_TIDQ_DATA_SU) { 4679 user->phy_tx_time_us = ppdu_desc->phy_ppdu_tx_time_us; 4680 } else { 4681 user->phy_tx_time_us = (ppdu_desc->phy_ppdu_tx_time_us * 4682 user->nss * user->ru_tones) / nss_ru_width_sum; 4683 } 4684 4685 dp_ppdu_desc_user_airtime_consumption_update(peer, user); 4686 } 4687 #else 4688 static void 4689 dp_ppdu_desc_user_phy_tx_time_update(struct dp_pdev *pdev, 4690 struct dp_peer *peer, 4691 struct cdp_tx_completion_ppdu *ppdu_desc, 4692 struct cdp_tx_completion_ppdu_user *user) 4693 { 4694 } 4695 #endif 4696 4697 #ifdef WLAN_SUPPORT_CTRL_FRAME_STATS 4698 static void 4699 dp_tx_ctrl_stats_update(struct dp_pdev *pdev, struct dp_peer *peer, 4700 struct cdp_tx_completion_ppdu_user *user) 4701 { 4702 struct dp_mon_peer *mon_peer = NULL; 4703 uint16_t fc = 0; 4704 4705 if (!pdev || !peer || !user) 4706 return; 4707 4708 mon_peer = peer->monitor_peer; 4709 if (qdf_unlikely(!mon_peer)) 4710 return; 4711 4712 if (user->mprot_type) { 4713 DP_STATS_INCC(mon_peer, 4714 tx.rts_success, 1, user->rts_success); 4715 DP_STATS_INCC(mon_peer, 4716 tx.rts_failure, 1, user->rts_failure); 4717 } 4718 fc = user->frame_ctrl; 4719 if ((qdf_cpu_to_le16(fc) & QDF_IEEE80211_FC0_TYPE_MASK) == 4720 QDF_IEEE80211_FC0_TYPE_CTL) { 4721 if ((qdf_cpu_to_le16(fc) & QDF_IEEE80211_FC0_SUBTYPE_MASK) == 4722 QDF_IEEE80211_FC0_SUBTYPE_VHT_NDP_AN) 4723 DP_STATS_INC(mon_peer, tx.ndpa_cnt, 1); 4724 if ((qdf_cpu_to_le16(fc) & QDF_IEEE80211_FC0_SUBTYPE_MASK) == 4725 QDF_IEEE80211_FC0_SUBTYPE_BAR) 4726 DP_STATS_INC(mon_peer, tx.bar_cnt, 1); 4727 } 4728 } 4729 #else 4730 static void 4731 dp_tx_ctrl_stats_update(struct dp_pdev *pdev, struct dp_peer *peer, 4732 struct cdp_tx_completion_ppdu_user *user) 4733 { 4734 } 4735 #endif /* WLAN_SUPPORT_CTRL_FRAME_STATS */ 4736 4737 void 4738 dp_ppdu_desc_user_stats_update(struct dp_pdev *pdev, 4739 struct ppdu_info *ppdu_info) 4740 { 4741 struct cdp_tx_completion_ppdu *ppdu_desc = NULL; 4742 struct dp_peer *peer = NULL; 4743 uint32_t tlv_bitmap_expected; 4744 uint32_t tlv_bitmap_default; 4745 uint16_t i; 4746 uint32_t num_users; 4747 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 4748 4749 ppdu_desc = (struct cdp_tx_completion_ppdu *) 4750 qdf_nbuf_data(ppdu_info->nbuf); 4751 4752 if (ppdu_desc->frame_type != CDP_PPDU_FTYPE_BAR) 4753 ppdu_desc->ppdu_id = ppdu_info->ppdu_id; 4754 4755 tlv_bitmap_expected = HTT_PPDU_DEFAULT_TLV_BITMAP; 4756 if (mon_pdev->tx_sniffer_enable || mon_pdev->mcopy_mode || 4757 mon_pdev->tx_capture_enabled) { 4758 if (ppdu_info->is_ampdu) 4759 tlv_bitmap_expected = 4760 dp_htt_get_ppdu_sniffer_ampdu_tlv_bitmap( 4761 ppdu_info->tlv_bitmap); 4762 } 4763 4764 tlv_bitmap_default = tlv_bitmap_expected; 4765 4766 if (ppdu_desc->frame_type == CDP_PPDU_FTYPE_BAR) { 4767 num_users = ppdu_desc->bar_num_users; 4768 ppdu_desc->num_users = ppdu_desc->bar_num_users; 4769 } else { 4770 num_users = ppdu_desc->num_users; 4771 } 4772 qdf_assert_always(ppdu_desc->num_users <= ppdu_desc->max_users); 4773 4774 if (wlan_cfg_get_sawf_stats_config(pdev->soc->wlan_cfg_ctx)) { 4775 dp_ppdu_desc_get_txmode(ppdu_desc); 4776 dp_pdev_update_deter_stats(pdev, ppdu_desc); 4777 } 4778 4779 for (i = 0; i < num_users; i++) { 4780 ppdu_desc->num_mpdu += ppdu_desc->user[i].num_mpdu; 4781 ppdu_desc->num_msdu += ppdu_desc->user[i].num_msdu; 4782 4783 peer = dp_peer_get_ref_by_id(pdev->soc, 4784 ppdu_desc->user[i].peer_id, 4785 DP_MOD_ID_TX_PPDU_STATS); 4786 /* 4787 * This check is to make sure peer is not deleted 4788 * after processing the TLVs. 4789 */ 4790 if (!peer) 4791 continue; 4792 4793 ppdu_desc->user[i].is_bss_peer = peer->bss_peer; 4794 4795 dp_ppdu_desc_user_phy_tx_time_update(pdev, peer, ppdu_desc, 4796 &ppdu_desc->user[i]); 4797 4798 dp_tx_ctrl_stats_update(pdev, peer, &ppdu_desc->user[i]); 4799 4800 if (wlan_cfg_get_sawf_stats_config(pdev->soc->wlan_cfg_ctx)) { 4801 dp_ppdu_desc_user_deter_stats_update(pdev, 4802 peer, 4803 ppdu_desc, 4804 &ppdu_desc->user[i]); 4805 } 4806 4807 /* 4808 * different frame like DATA, BAR or CTRL has different 4809 * tlv bitmap expected. Apart from ACK_BA_STATUS TLV, we 4810 * receive other tlv in-order/sequential from fw. 4811 * Since ACK_BA_STATUS TLV come from Hardware it is 4812 * asynchronous So we need to depend on some tlv to confirm 4813 * all tlv is received for a ppdu. 4814 * So we depend on both SCHED_CMD_STATUS_TLV and 4815 * ACK_BA_STATUS_TLV. for failure packet we won't get 4816 * ACK_BA_STATUS_TLV. 4817 */ 4818 if (!(ppdu_info->tlv_bitmap & 4819 (1 << HTT_PPDU_STATS_SCH_CMD_STATUS_TLV)) || 4820 (!(ppdu_info->tlv_bitmap & 4821 (1 << HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV)) && 4822 (ppdu_desc->user[i].completion_status == 4823 HTT_PPDU_STATS_USER_STATUS_OK))) { 4824 dp_peer_unref_delete(peer, DP_MOD_ID_TX_PPDU_STATS); 4825 continue; 4826 } 4827 4828 /* 4829 * Update tx stats for data frames having Qos as well as 4830 * non-Qos data tid 4831 */ 4832 4833 if ((ppdu_desc->user[i].tid < CDP_DATA_TID_MAX || 4834 (ppdu_desc->user[i].tid == CDP_DATA_NON_QOS_TID) || 4835 (ppdu_desc->htt_frame_type == 4836 HTT_STATS_FTYPE_SGEN_QOS_NULL) || 4837 ((ppdu_desc->frame_type == CDP_PPDU_FTYPE_BAR) && 4838 (ppdu_desc->num_mpdu > 1))) && 4839 (ppdu_desc->frame_type != CDP_PPDU_FTYPE_CTRL)) { 4840 dp_tx_stats_update(pdev, peer, 4841 &ppdu_desc->user[i], 4842 ppdu_desc); 4843 } 4844 4845 dp_peer_unref_delete(peer, DP_MOD_ID_TX_PPDU_STATS); 4846 tlv_bitmap_expected = tlv_bitmap_default; 4847 } 4848 } 4849 4850 #if !defined(WLAN_TX_PKT_CAPTURE_ENH) || defined(WLAN_PKT_CAPTURE_TX_2_0) || \ 4851 defined(WLAN_PKT_CAPTURE_RX_2_0) 4852 /** 4853 * dp_tx_ppdu_desc_notify() - Notify to upper layer about PPDU via WDI 4854 * 4855 * @pdev: Datapath pdev handle 4856 * @nbuf: Buffer to be delivered to upper layer 4857 * 4858 * Return: void 4859 */ 4860 static void dp_tx_ppdu_desc_notify(struct dp_pdev *pdev, qdf_nbuf_t nbuf) 4861 { 4862 struct dp_soc *soc = pdev->soc; 4863 struct dp_mon_ops *mon_ops = NULL; 4864 4865 mon_ops = dp_mon_ops_get(soc); 4866 if (mon_ops && mon_ops->mon_ppdu_desc_notify) 4867 mon_ops->mon_ppdu_desc_notify(pdev, nbuf); 4868 else 4869 qdf_nbuf_free(nbuf); 4870 } 4871 4872 void dp_ppdu_desc_deliver(struct dp_pdev *pdev, 4873 struct ppdu_info *ppdu_info) 4874 { 4875 struct ppdu_info *s_ppdu_info = NULL; 4876 struct ppdu_info *ppdu_info_next = NULL; 4877 struct cdp_tx_completion_ppdu *ppdu_desc = NULL; 4878 qdf_nbuf_t nbuf; 4879 uint32_t time_delta = 0; 4880 bool starved = 0; 4881 bool matched = 0; 4882 bool recv_ack_ba_done = 0; 4883 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 4884 4885 if (ppdu_info->tlv_bitmap & 4886 (1 << HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV) && 4887 ppdu_info->done) 4888 recv_ack_ba_done = 1; 4889 4890 mon_pdev->last_sched_cmdid = ppdu_info->sched_cmdid; 4891 4892 s_ppdu_info = TAILQ_FIRST(&mon_pdev->sched_comp_ppdu_list); 4893 4894 TAILQ_FOREACH_SAFE(s_ppdu_info, &mon_pdev->sched_comp_ppdu_list, 4895 ppdu_info_list_elem, ppdu_info_next) { 4896 if (s_ppdu_info->tsf_l32 > ppdu_info->tsf_l32) 4897 time_delta = (MAX_TSF_32 - s_ppdu_info->tsf_l32) + 4898 ppdu_info->tsf_l32; 4899 else 4900 time_delta = ppdu_info->tsf_l32 - s_ppdu_info->tsf_l32; 4901 4902 if (!s_ppdu_info->done && !recv_ack_ba_done) { 4903 if (time_delta < MAX_SCHED_STARVE) { 4904 dp_mon_info("pdev[%d] ppdu_id[%d] sched_cmdid[%d] TLV_B[0x%x] TSF[%u] D[%d]", 4905 pdev->pdev_id, 4906 s_ppdu_info->ppdu_id, 4907 s_ppdu_info->sched_cmdid, 4908 s_ppdu_info->tlv_bitmap, 4909 s_ppdu_info->tsf_l32, 4910 s_ppdu_info->done); 4911 break; 4912 } 4913 starved = 1; 4914 } 4915 4916 mon_pdev->delivered_sched_cmdid = s_ppdu_info->sched_cmdid; 4917 TAILQ_REMOVE(&mon_pdev->sched_comp_ppdu_list, s_ppdu_info, 4918 ppdu_info_list_elem); 4919 mon_pdev->sched_comp_list_depth--; 4920 4921 nbuf = s_ppdu_info->nbuf; 4922 qdf_assert_always(nbuf); 4923 ppdu_desc = (struct cdp_tx_completion_ppdu *) 4924 qdf_nbuf_data(nbuf); 4925 ppdu_desc->tlv_bitmap = s_ppdu_info->tlv_bitmap; 4926 4927 if (starved) { 4928 dp_mon_info("ppdu starved fc[0x%x] h_ftype[%d] tlv_bitmap[0x%x] cs[%d]\n", 4929 ppdu_desc->frame_ctrl, 4930 ppdu_desc->htt_frame_type, 4931 ppdu_desc->tlv_bitmap, 4932 ppdu_desc->user[0].completion_status); 4933 starved = 0; 4934 } 4935 4936 if (ppdu_info->ppdu_id == s_ppdu_info->ppdu_id && 4937 ppdu_info->sched_cmdid == s_ppdu_info->sched_cmdid) 4938 matched = 1; 4939 4940 dp_ppdu_desc_user_stats_update(pdev, s_ppdu_info); 4941 4942 qdf_mem_free(s_ppdu_info); 4943 4944 dp_tx_ppdu_desc_notify(pdev, nbuf); 4945 4946 if (matched) 4947 break; 4948 } 4949 } 4950 #endif 4951 4952 /** 4953 * dp_tx_ppdu_desc_deliver() - Deliver PPDU desc to upper layer 4954 * @pdev: Datapath pdev handle 4955 * @ppdu_info: per PPDU TLV descriptor 4956 * 4957 * Return: void 4958 */ 4959 static void dp_tx_ppdu_desc_deliver(struct dp_pdev *pdev, 4960 struct ppdu_info *ppdu_info) 4961 { 4962 struct dp_soc *soc = pdev->soc; 4963 struct dp_mon_ops *mon_ops = NULL; 4964 4965 mon_ops = dp_mon_ops_get(soc); 4966 4967 if (mon_ops && mon_ops->mon_ppdu_desc_deliver) { 4968 mon_ops->mon_ppdu_desc_deliver(pdev, ppdu_info); 4969 } else { 4970 qdf_nbuf_free(ppdu_info->nbuf); 4971 ppdu_info->nbuf = NULL; 4972 qdf_mem_free(ppdu_info); 4973 } 4974 } 4975 4976 /** 4977 * dp_get_ppdu_desc() - Function to allocate new PPDU status 4978 * desc for new ppdu id 4979 * @pdev: DP pdev handle 4980 * @ppdu_id: PPDU unique identifier 4981 * @tlv_type: TLV type received 4982 * @tsf_l32: timestamp received along with ppdu stats indication header 4983 * @max_users: Maximum user for that particular ppdu 4984 * 4985 * Return: ppdu_info per ppdu tlv structure 4986 */ 4987 static 4988 struct ppdu_info *dp_get_ppdu_desc(struct dp_pdev *pdev, uint32_t ppdu_id, 4989 uint8_t tlv_type, uint32_t tsf_l32, 4990 uint8_t max_users) 4991 { 4992 struct ppdu_info *ppdu_info = NULL; 4993 struct ppdu_info *s_ppdu_info = NULL; 4994 struct ppdu_info *ppdu_info_next = NULL; 4995 struct cdp_tx_completion_ppdu *ppdu_desc = NULL; 4996 uint32_t size = 0; 4997 struct cdp_tx_completion_ppdu *tmp_ppdu_desc = NULL; 4998 struct cdp_tx_completion_ppdu_user *tmp_user; 4999 uint32_t time_delta; 5000 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 5001 5002 /* 5003 * Find ppdu_id node exists or not 5004 */ 5005 TAILQ_FOREACH_SAFE(ppdu_info, &mon_pdev->ppdu_info_list, 5006 ppdu_info_list_elem, ppdu_info_next) { 5007 if (ppdu_info && (ppdu_info->ppdu_id == ppdu_id)) { 5008 if (ppdu_info->tsf_l32 > tsf_l32) 5009 time_delta = (MAX_TSF_32 - 5010 ppdu_info->tsf_l32) + tsf_l32; 5011 else 5012 time_delta = tsf_l32 - ppdu_info->tsf_l32; 5013 5014 if (time_delta > WRAP_DROP_TSF_DELTA) { 5015 TAILQ_REMOVE(&mon_pdev->ppdu_info_list, 5016 ppdu_info, ppdu_info_list_elem); 5017 mon_pdev->list_depth--; 5018 pdev->stats.ppdu_wrap_drop++; 5019 tmp_ppdu_desc = 5020 (struct cdp_tx_completion_ppdu *) 5021 qdf_nbuf_data(ppdu_info->nbuf); 5022 tmp_user = &tmp_ppdu_desc->user[0]; 5023 dp_htt_tx_stats_info("S_PID [%d] S_TSF[%u] TLV_BITMAP[0x%x] [CMPLTN - %d ACK_BA - %d] CS[%d] - R_PID[%d] R_TSF[%u] R_TLV_TAG[0x%x]\n", 5024 ppdu_info->ppdu_id, 5025 ppdu_info->tsf_l32, 5026 ppdu_info->tlv_bitmap, 5027 tmp_user->completion_status, 5028 ppdu_info->compltn_common_tlv, 5029 ppdu_info->ack_ba_tlv, 5030 ppdu_id, tsf_l32, 5031 tlv_type); 5032 qdf_nbuf_free(ppdu_info->nbuf); 5033 ppdu_info->nbuf = NULL; 5034 qdf_mem_free(ppdu_info); 5035 } else { 5036 break; 5037 } 5038 } 5039 } 5040 5041 /* 5042 * check if it is ack ba tlv and if it is not there in ppdu info 5043 * list then check it in sched completion ppdu list 5044 */ 5045 if (!ppdu_info && 5046 tlv_type == HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV) { 5047 TAILQ_FOREACH(s_ppdu_info, 5048 &mon_pdev->sched_comp_ppdu_list, 5049 ppdu_info_list_elem) { 5050 if (s_ppdu_info && (s_ppdu_info->ppdu_id == ppdu_id)) { 5051 if (s_ppdu_info->tsf_l32 > tsf_l32) 5052 time_delta = (MAX_TSF_32 - 5053 s_ppdu_info->tsf_l32) + 5054 tsf_l32; 5055 else 5056 time_delta = tsf_l32 - 5057 s_ppdu_info->tsf_l32; 5058 if (time_delta < WRAP_DROP_TSF_DELTA) { 5059 ppdu_info = s_ppdu_info; 5060 break; 5061 } 5062 } else { 5063 /* 5064 * ACK BA STATUS TLV comes sequential order 5065 * if we received ack ba status tlv for second 5066 * ppdu and first ppdu is still waiting for 5067 * ACK BA STATUS TLV. Based on fw comment 5068 * we won't receive it tlv later. So we can 5069 * set ppdu info done. 5070 */ 5071 if (s_ppdu_info) 5072 s_ppdu_info->done = 1; 5073 } 5074 } 5075 } 5076 5077 if (ppdu_info) { 5078 if (ppdu_info->tlv_bitmap & (1 << tlv_type)) { 5079 /* 5080 * if we get tlv_type that is already been processed 5081 * for ppdu, that means we got a new ppdu with same 5082 * ppdu id. Hence Flush the older ppdu 5083 * for MUMIMO and OFDMA, In a PPDU we have 5084 * multiple user with same tlv types. tlv bitmap is 5085 * used to check whether SU or MU_MIMO/OFDMA 5086 */ 5087 if (!(ppdu_info->tlv_bitmap & 5088 (1 << HTT_PPDU_STATS_SCH_CMD_STATUS_TLV))) 5089 return ppdu_info; 5090 5091 ppdu_desc = (struct cdp_tx_completion_ppdu *) 5092 qdf_nbuf_data(ppdu_info->nbuf); 5093 5094 /* 5095 * apart from ACK BA STATUS TLV rest all comes in order 5096 * so if tlv type not ACK BA STATUS TLV we can deliver 5097 * ppdu_info 5098 */ 5099 if ((tlv_type == 5100 HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV) && 5101 ((ppdu_desc->htt_frame_type == 5102 HTT_STATS_FTYPE_SGEN_MU_BAR) || 5103 (ppdu_desc->htt_frame_type == 5104 HTT_STATS_FTYPE_SGEN_BE_MU_BAR))) 5105 return ppdu_info; 5106 5107 dp_tx_ppdu_desc_deliver(pdev, ppdu_info); 5108 } else { 5109 return ppdu_info; 5110 } 5111 } 5112 5113 /* 5114 * Flush the head ppdu descriptor if ppdu desc list reaches max 5115 * threshold 5116 */ 5117 if (mon_pdev->list_depth > HTT_PPDU_DESC_MAX_DEPTH) { 5118 ppdu_info = TAILQ_FIRST(&mon_pdev->ppdu_info_list); 5119 TAILQ_REMOVE(&mon_pdev->ppdu_info_list, 5120 ppdu_info, ppdu_info_list_elem); 5121 mon_pdev->list_depth--; 5122 pdev->stats.ppdu_drop++; 5123 qdf_nbuf_free(ppdu_info->nbuf); 5124 ppdu_info->nbuf = NULL; 5125 qdf_mem_free(ppdu_info); 5126 } 5127 5128 size = sizeof(struct cdp_tx_completion_ppdu) + 5129 (max_users * sizeof(struct cdp_tx_completion_ppdu_user)); 5130 5131 /* 5132 * Allocate new ppdu_info node 5133 */ 5134 ppdu_info = qdf_mem_malloc(sizeof(struct ppdu_info)); 5135 if (!ppdu_info) 5136 return NULL; 5137 5138 ppdu_info->nbuf = qdf_nbuf_alloc(pdev->soc->osdev, size, 5139 0, 4, TRUE); 5140 if (!ppdu_info->nbuf) { 5141 qdf_mem_free(ppdu_info); 5142 return NULL; 5143 } 5144 5145 ppdu_info->ppdu_desc = 5146 (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf); 5147 qdf_mem_zero(qdf_nbuf_data(ppdu_info->nbuf), size); 5148 5149 if (!qdf_nbuf_put_tail(ppdu_info->nbuf, size)) { 5150 dp_mon_err("No tailroom for HTT PPDU"); 5151 qdf_nbuf_free(ppdu_info->nbuf); 5152 ppdu_info->nbuf = NULL; 5153 ppdu_info->last_user = 0; 5154 qdf_mem_free(ppdu_info); 5155 return NULL; 5156 } 5157 5158 ppdu_info->ppdu_desc->max_users = max_users; 5159 ppdu_info->tsf_l32 = tsf_l32; 5160 /* 5161 * No lock is needed because all PPDU TLVs are processed in 5162 * same context and this list is updated in same context 5163 */ 5164 TAILQ_INSERT_TAIL(&mon_pdev->ppdu_info_list, ppdu_info, 5165 ppdu_info_list_elem); 5166 mon_pdev->list_depth++; 5167 return ppdu_info; 5168 } 5169 5170 #define DP_HTT_PPDU_ID_MASK 0x00FFFFFF 5171 /** 5172 * dp_htt_mask_ppdu_id() - Function to mask ppdu_id 5173 * @ppdu_id: PPDU ID 5174 * 5175 * Return: Masked ppdu_id 5176 */ 5177 static inline uint32_t dp_htt_mask_ppdu_id(uint32_t ppdu_id) 5178 { 5179 return (ppdu_id & DP_HTT_PPDU_ID_MASK); 5180 } 5181 5182 /** 5183 * dp_htt_process_tlv() - Function to process each PPDU TLVs 5184 * @pdev: DP pdev handle 5185 * @htt_t2h_msg: HTT target to host message 5186 * 5187 * Return: ppdu_info per ppdu tlv structure 5188 */ 5189 static struct ppdu_info *dp_htt_process_tlv(struct dp_pdev *pdev, 5190 qdf_nbuf_t htt_t2h_msg) 5191 { 5192 uint32_t length; 5193 uint32_t ppdu_id; 5194 uint8_t tlv_type; 5195 uint32_t tlv_length, tlv_bitmap_expected; 5196 uint8_t *tlv_buf; 5197 struct ppdu_info *ppdu_info = NULL; 5198 struct cdp_tx_completion_ppdu *ppdu_desc = NULL; 5199 uint8_t max_users = CDP_MU_MAX_USERS; 5200 uint32_t tsf_l32; 5201 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 5202 5203 uint32_t *msg_word = (uint32_t *)qdf_nbuf_data(htt_t2h_msg); 5204 5205 length = HTT_T2H_PPDU_STATS_PAYLOAD_SIZE_GET(*msg_word); 5206 5207 msg_word = msg_word + 1; 5208 ppdu_id = HTT_T2H_PPDU_STATS_PPDU_ID_GET(*msg_word); 5209 ppdu_id = dp_htt_mask_ppdu_id(ppdu_id); 5210 5211 msg_word = msg_word + 1; 5212 tsf_l32 = (uint32_t)(*msg_word); 5213 5214 msg_word = msg_word + 2; 5215 while (length > 0) { 5216 tlv_buf = (uint8_t *)msg_word; 5217 tlv_type = HTT_STATS_TLV_TAG_GET(*msg_word); 5218 tlv_length = HTT_STATS_TLV_LENGTH_GET(*msg_word); 5219 if (qdf_likely(tlv_type < CDP_PPDU_STATS_MAX_TAG)) 5220 pdev->stats.ppdu_stats_counter[tlv_type]++; 5221 5222 if (tlv_length == 0) 5223 break; 5224 5225 tlv_length += HTT_TLV_HDR_LEN; 5226 5227 /* 5228 * Not allocating separate ppdu descriptor for MGMT Payload 5229 * TLV as this is sent as separate WDI indication and it 5230 * doesn't contain any ppdu information 5231 */ 5232 if (tlv_type == HTT_PPDU_STATS_TX_MGMTCTRL_PAYLOAD_TLV) { 5233 mon_pdev->mgmtctrl_frm_info.mgmt_buf = tlv_buf; 5234 mon_pdev->mgmtctrl_frm_info.ppdu_id = ppdu_id; 5235 mon_pdev->mgmtctrl_frm_info.mgmt_buf_len = 5236 HTT_PPDU_STATS_TX_MGMTCTRL_TLV_FRAME_LENGTH_GET 5237 (*(msg_word + 1)); 5238 msg_word = 5239 (uint32_t *)((uint8_t *)tlv_buf + tlv_length); 5240 length -= (tlv_length); 5241 continue; 5242 } 5243 5244 /* 5245 * retrieve max_users if it's USERS_INFO, 5246 * else, it's 1 for COMPLTN_FLUSH, 5247 * else, use CDP_MU_MAX_USERS 5248 */ 5249 if (tlv_type == HTT_PPDU_STATS_USERS_INFO_TLV) { 5250 max_users = 5251 HTT_PPDU_STATS_USERS_INFO_TLV_MAX_USERS_GET(*(msg_word + 1)); 5252 } else if (tlv_type == HTT_PPDU_STATS_USR_COMPLTN_FLUSH_TLV) { 5253 max_users = 1; 5254 } 5255 5256 ppdu_info = dp_get_ppdu_desc(pdev, ppdu_id, tlv_type, 5257 tsf_l32, max_users); 5258 if (!ppdu_info) 5259 return NULL; 5260 5261 ppdu_info->ppdu_id = ppdu_id; 5262 ppdu_info->tlv_bitmap |= (1 << tlv_type); 5263 5264 dp_process_ppdu_tag(pdev, msg_word, tlv_length, ppdu_info); 5265 5266 /* 5267 * Increment pdev level tlv count to monitor 5268 * missing TLVs 5269 */ 5270 mon_pdev->tlv_count++; 5271 ppdu_info->last_tlv_cnt = mon_pdev->tlv_count; 5272 msg_word = (uint32_t *)((uint8_t *)tlv_buf + tlv_length); 5273 length -= (tlv_length); 5274 } 5275 5276 if (!ppdu_info) 5277 return NULL; 5278 5279 mon_pdev->last_ppdu_id = ppdu_id; 5280 5281 tlv_bitmap_expected = HTT_PPDU_DEFAULT_TLV_BITMAP; 5282 5283 if (mon_pdev->tx_sniffer_enable || mon_pdev->mcopy_mode || 5284 mon_pdev->tx_capture_enabled) { 5285 if (ppdu_info->is_ampdu) 5286 tlv_bitmap_expected = 5287 dp_htt_get_ppdu_sniffer_ampdu_tlv_bitmap( 5288 ppdu_info->tlv_bitmap); 5289 } 5290 5291 ppdu_desc = ppdu_info->ppdu_desc; 5292 5293 if (!ppdu_desc) 5294 return NULL; 5295 5296 if (ppdu_desc->user[ppdu_desc->last_usr_index].completion_status != 5297 HTT_PPDU_STATS_USER_STATUS_OK) { 5298 tlv_bitmap_expected = tlv_bitmap_expected & 0xFF; 5299 } 5300 5301 /* 5302 * for frame type DATA and BAR, we update stats based on MSDU, 5303 * successful msdu and mpdu are populate from ACK BA STATUS TLV 5304 * which comes out of order. successful mpdu also populated from 5305 * COMPLTN COMMON TLV which comes in order. for every ppdu_info 5306 * we store successful mpdu from both tlv and compare before delivering 5307 * to make sure we received ACK BA STATUS TLV. For some self generated 5308 * frame we won't get ack ba status tlv so no need to wait for 5309 * ack ba status tlv. 5310 */ 5311 if (ppdu_desc->frame_type != CDP_PPDU_FTYPE_CTRL && 5312 ppdu_desc->htt_frame_type != HTT_STATS_FTYPE_SGEN_QOS_NULL) { 5313 /* 5314 * most of the time bar frame will have duplicate ack ba 5315 * status tlv 5316 */ 5317 if (ppdu_desc->frame_type == CDP_PPDU_FTYPE_BAR && 5318 (ppdu_info->compltn_common_tlv != ppdu_info->ack_ba_tlv)) 5319 return NULL; 5320 /* 5321 * For data frame, compltn common tlv should match ack ba status 5322 * tlv and completion status. Reason we are checking first user 5323 * for ofdma, completion seen at next MU BAR frm, for mimo 5324 * only for first user completion will be immediate. 5325 */ 5326 if (ppdu_desc->frame_type == CDP_PPDU_FTYPE_DATA && 5327 (ppdu_desc->user[0].completion_status == 0 && 5328 (ppdu_info->compltn_common_tlv != ppdu_info->ack_ba_tlv))) 5329 return NULL; 5330 } 5331 5332 /* 5333 * Once all the TLVs for a given PPDU has been processed, 5334 * return PPDU status to be delivered to higher layer. 5335 * tlv_bitmap_expected can't be available for different frame type. 5336 * But SCHED CMD STATS TLV is the last TLV from the FW for a ppdu. 5337 * apart from ACK BA TLV, FW sends other TLV in sequential order. 5338 * flush tlv comes separate. 5339 */ 5340 if ((ppdu_info->tlv_bitmap != 0 && 5341 (ppdu_info->tlv_bitmap & 5342 (1 << HTT_PPDU_STATS_SCH_CMD_STATUS_TLV))) || 5343 (ppdu_info->tlv_bitmap & 5344 (1 << HTT_PPDU_STATS_USR_COMPLTN_FLUSH_TLV))) { 5345 ppdu_info->done = 1; 5346 return ppdu_info; 5347 } 5348 5349 return NULL; 5350 } 5351 #endif /* QCA_ENHANCED_STATS_SUPPORT */ 5352 5353 #ifdef QCA_ENHANCED_STATS_SUPPORT 5354 /** 5355 * dp_tx_ppdu_stats_feat_enable_check() - Check if feature(s) is enabled to 5356 * consume stats received from FW via HTT 5357 * @pdev: Datapath pdev handle 5358 * 5359 * Return: void 5360 */ 5361 static bool dp_tx_ppdu_stats_feat_enable_check(struct dp_pdev *pdev) 5362 { 5363 struct dp_soc *soc = pdev->soc; 5364 struct dp_mon_ops *mon_ops = NULL; 5365 5366 mon_ops = dp_mon_ops_get(soc); 5367 if (mon_ops && mon_ops->mon_ppdu_stats_feat_enable_check) 5368 return mon_ops->mon_ppdu_stats_feat_enable_check(pdev); 5369 else 5370 return false; 5371 } 5372 #endif 5373 5374 #ifdef WLAN_FEATURE_PKT_CAPTURE_V2 5375 static void dp_htt_process_smu_ppdu_stats_tlv(struct dp_soc *soc, 5376 qdf_nbuf_t htt_t2h_msg) 5377 { 5378 uint32_t length; 5379 uint8_t tlv_type; 5380 uint32_t tlv_length, tlv_expected_size; 5381 uint8_t *tlv_buf; 5382 5383 uint32_t *msg_word = (uint32_t *)qdf_nbuf_data(htt_t2h_msg); 5384 5385 length = HTT_T2H_PPDU_STATS_PAYLOAD_SIZE_GET(*msg_word); 5386 5387 msg_word = msg_word + 4; 5388 5389 while (length > 0) { 5390 tlv_buf = (uint8_t *)msg_word; 5391 tlv_type = HTT_STATS_TLV_TAG_GET(*msg_word); 5392 tlv_length = HTT_STATS_TLV_LENGTH_GET(*msg_word); 5393 5394 if (tlv_length == 0) 5395 break; 5396 5397 tlv_length += HTT_TLV_HDR_LEN; 5398 5399 if (tlv_type == HTT_PPDU_STATS_FOR_SMU_TLV) { 5400 tlv_expected_size = sizeof(htt_ppdu_stats_for_smu_tlv); 5401 5402 if (tlv_length >= tlv_expected_size) 5403 dp_wdi_event_handler( 5404 WDI_EVENT_PKT_CAPTURE_PPDU_STATS, 5405 soc, msg_word, HTT_INVALID_VDEV, 5406 WDI_NO_VAL, 0); 5407 } 5408 msg_word = (uint32_t *)((uint8_t *)tlv_buf + tlv_length); 5409 length -= (tlv_length); 5410 } 5411 } 5412 #endif 5413 5414 #if defined(WDI_EVENT_ENABLE) 5415 #ifdef QCA_ENHANCED_STATS_SUPPORT 5416 /** 5417 * dp_txrx_ppdu_stats_handler() - Function to process HTT PPDU stats from FW 5418 * @soc: DP SOC handle 5419 * @pdev_id: pdev id 5420 * @htt_t2h_msg: HTT message nbuf 5421 * 5422 * Return: void 5423 */ 5424 static bool dp_txrx_ppdu_stats_handler(struct dp_soc *soc, 5425 uint8_t pdev_id, qdf_nbuf_t htt_t2h_msg) 5426 { 5427 struct dp_pdev *pdev; 5428 struct ppdu_info *ppdu_info = NULL; 5429 bool free_buf = true; 5430 struct dp_mon_pdev *mon_pdev; 5431 5432 if (pdev_id >= MAX_PDEV_CNT) 5433 return true; 5434 5435 pdev = soc->pdev_list[pdev_id]; 5436 if (!pdev) 5437 return true; 5438 5439 mon_pdev = pdev->monitor_pdev; 5440 if (!mon_pdev) 5441 return true; 5442 5443 if (!dp_tx_ppdu_stats_feat_enable_check(pdev)) 5444 return free_buf; 5445 5446 qdf_spin_lock_bh(&mon_pdev->ppdu_stats_lock); 5447 ppdu_info = dp_htt_process_tlv(pdev, htt_t2h_msg); 5448 5449 if (mon_pdev->mgmtctrl_frm_info.mgmt_buf) { 5450 if (dp_process_ppdu_stats_tx_mgmtctrl_payload_tlv 5451 (pdev, htt_t2h_msg, mon_pdev->mgmtctrl_frm_info.ppdu_id) != 5452 QDF_STATUS_SUCCESS) 5453 free_buf = false; 5454 } 5455 5456 if (ppdu_info) 5457 dp_tx_ppdu_desc_deliver(pdev, ppdu_info); 5458 5459 mon_pdev->mgmtctrl_frm_info.mgmt_buf = NULL; 5460 mon_pdev->mgmtctrl_frm_info.mgmt_buf_len = 0; 5461 mon_pdev->mgmtctrl_frm_info.ppdu_id = 0; 5462 5463 qdf_spin_unlock_bh(&mon_pdev->ppdu_stats_lock); 5464 5465 return free_buf; 5466 } 5467 #elif defined(WLAN_FEATURE_PKT_CAPTURE_V2) 5468 static bool dp_txrx_ppdu_stats_handler(struct dp_soc *soc, 5469 uint8_t pdev_id, qdf_nbuf_t htt_t2h_msg) 5470 { 5471 if (wlan_cfg_get_pkt_capture_mode(soc->wlan_cfg_ctx)) 5472 dp_htt_process_smu_ppdu_stats_tlv(soc, htt_t2h_msg); 5473 5474 return true; 5475 } 5476 #elif (!defined(REMOVE_PKT_LOG)) 5477 static bool dp_txrx_ppdu_stats_handler(struct dp_soc *soc, 5478 uint8_t pdev_id, qdf_nbuf_t htt_t2h_msg) 5479 { 5480 return true; 5481 } 5482 #endif/* QCA_ENHANCED_STATS_SUPPORT */ 5483 #endif 5484 5485 #if defined(WDI_EVENT_ENABLE) &&\ 5486 (defined(QCA_ENHANCED_STATS_SUPPORT) || !defined(REMOVE_PKT_LOG) || \ 5487 defined(WLAN_FEATURE_PKT_CAPTURE_V2)) 5488 bool 5489 dp_ppdu_stats_ind_handler(struct htt_soc *soc, 5490 uint32_t *msg_word, 5491 qdf_nbuf_t htt_t2h_msg) 5492 { 5493 u_int8_t pdev_id; 5494 u_int8_t target_pdev_id; 5495 bool free_buf; 5496 5497 target_pdev_id = HTT_T2H_PPDU_STATS_PDEV_ID_GET(*msg_word); 5498 pdev_id = dp_get_host_pdev_id_for_target_pdev_id(soc->dp_soc, 5499 target_pdev_id); 5500 dp_wdi_event_handler(WDI_EVENT_LITE_T2H, soc->dp_soc, 5501 htt_t2h_msg, HTT_INVALID_PEER, WDI_NO_VAL, 5502 pdev_id); 5503 5504 free_buf = dp_txrx_ppdu_stats_handler(soc->dp_soc, pdev_id, 5505 htt_t2h_msg); 5506 5507 return free_buf; 5508 } 5509 #endif 5510 5511 void 5512 dp_mon_set_bsscolor(struct dp_pdev *pdev, uint8_t bsscolor) 5513 { 5514 pdev->monitor_pdev->rx_mon_recv_status.bsscolor = bsscolor; 5515 } 5516 5517 bool dp_pdev_get_filter_ucast_data(struct cdp_pdev *pdev_handle) 5518 { 5519 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle; 5520 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 5521 5522 if ((mon_pdev->fp_data_filter & FILTER_DATA_UCAST) || 5523 (mon_pdev->mo_data_filter & FILTER_DATA_UCAST)) 5524 return true; 5525 5526 return false; 5527 } 5528 5529 bool dp_pdev_get_filter_mcast_data(struct cdp_pdev *pdev_handle) 5530 { 5531 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle; 5532 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 5533 5534 if ((mon_pdev->fp_data_filter & FILTER_DATA_MCAST) || 5535 (mon_pdev->mo_data_filter & FILTER_DATA_MCAST)) 5536 return true; 5537 5538 return false; 5539 } 5540 5541 bool dp_pdev_get_filter_non_data(struct cdp_pdev *pdev_handle) 5542 { 5543 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle; 5544 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 5545 5546 if ((mon_pdev->fp_mgmt_filter & FILTER_MGMT_ALL) || 5547 (mon_pdev->mo_mgmt_filter & FILTER_MGMT_ALL)) { 5548 if ((mon_pdev->fp_ctrl_filter & FILTER_CTRL_ALL) || 5549 (mon_pdev->mo_ctrl_filter & FILTER_CTRL_ALL)) { 5550 return true; 5551 } 5552 } 5553 5554 return false; 5555 } 5556 5557 QDF_STATUS dp_mon_soc_cfg_init(struct dp_soc *soc) 5558 { 5559 int target_type; 5560 struct dp_mon_soc *mon_soc = soc->monitor_soc; 5561 struct cdp_mon_ops *cdp_ops; 5562 5563 cdp_ops = dp_mon_cdp_ops_get(soc); 5564 target_type = hal_get_target_type(soc->hal_soc); 5565 switch (target_type) { 5566 case TARGET_TYPE_QCA6290: 5567 case TARGET_TYPE_QCA6390: 5568 case TARGET_TYPE_QCA6490: 5569 case TARGET_TYPE_QCA6750: 5570 case TARGET_TYPE_KIWI: 5571 case TARGET_TYPE_MANGO: 5572 case TARGET_TYPE_PEACH: 5573 case TARGET_TYPE_WCN6450: 5574 /* do nothing */ 5575 break; 5576 case TARGET_TYPE_QCA8074: 5577 wlan_cfg_set_mon_delayed_replenish_entries(soc->wlan_cfg_ctx, 5578 MON_BUF_MIN_ENTRIES); 5579 break; 5580 case TARGET_TYPE_QCA8074V2: 5581 case TARGET_TYPE_QCA6018: 5582 case TARGET_TYPE_QCA9574: 5583 wlan_cfg_set_mon_delayed_replenish_entries(soc->wlan_cfg_ctx, 5584 MON_BUF_MIN_ENTRIES); 5585 mon_soc->hw_nac_monitor_support = 1; 5586 break; 5587 case TARGET_TYPE_QCN9000: 5588 wlan_cfg_set_mon_delayed_replenish_entries(soc->wlan_cfg_ctx, 5589 MON_BUF_MIN_ENTRIES); 5590 mon_soc->hw_nac_monitor_support = 1; 5591 if (cfg_get(soc->ctrl_psoc, CFG_DP_FULL_MON_MODE)) { 5592 if (cdp_ops && cdp_ops->config_full_mon_mode) 5593 cdp_ops->config_full_mon_mode((struct cdp_soc_t *)soc, 1); 5594 } 5595 break; 5596 case TARGET_TYPE_QCA5018: 5597 case TARGET_TYPE_QCN6122: 5598 case TARGET_TYPE_QCN9160: 5599 wlan_cfg_set_mon_delayed_replenish_entries(soc->wlan_cfg_ctx, 5600 MON_BUF_MIN_ENTRIES); 5601 mon_soc->hw_nac_monitor_support = 1; 5602 break; 5603 case TARGET_TYPE_QCN9224: 5604 case TARGET_TYPE_QCA5332: 5605 case TARGET_TYPE_QCN6432: 5606 wlan_cfg_set_mon_delayed_replenish_entries(soc->wlan_cfg_ctx, 5607 MON_BUF_MIN_ENTRIES); 5608 mon_soc->hw_nac_monitor_support = 1; 5609 mon_soc->monitor_mode_v2 = 1; 5610 break; 5611 default: 5612 dp_mon_info("%s: Unknown tgt type %d\n", __func__, target_type); 5613 qdf_assert_always(0); 5614 break; 5615 } 5616 5617 dp_mon_info("hw_nac_monitor_support = %d", 5618 mon_soc->hw_nac_monitor_support); 5619 5620 return QDF_STATUS_SUCCESS; 5621 } 5622 5623 /** 5624 * dp_mon_pdev_per_target_config() - Target specific monitor pdev configuration 5625 * @pdev: PDEV handle [Should be valid] 5626 * 5627 * Return: None 5628 */ 5629 static void dp_mon_pdev_per_target_config(struct dp_pdev *pdev) 5630 { 5631 struct dp_soc *soc = pdev->soc; 5632 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 5633 int target_type; 5634 5635 target_type = hal_get_target_type(soc->hal_soc); 5636 switch (target_type) { 5637 case TARGET_TYPE_KIWI: 5638 case TARGET_TYPE_QCN9224: 5639 case TARGET_TYPE_QCA5332: 5640 case TARGET_TYPE_QCN6432: 5641 case TARGET_TYPE_MANGO: 5642 mon_pdev->is_tlv_hdr_64_bit = true; 5643 mon_pdev->tlv_hdr_size = HAL_RX_TLV64_HDR_SIZE; 5644 break; 5645 case TARGET_TYPE_PEACH: 5646 default: 5647 mon_pdev->is_tlv_hdr_64_bit = false; 5648 mon_pdev->tlv_hdr_size = HAL_RX_TLV32_HDR_SIZE; 5649 break; 5650 } 5651 } 5652 5653 static 5654 QDF_STATUS dp_mon_rings_alloc(struct dp_pdev *pdev) 5655 { 5656 QDF_STATUS status = QDF_STATUS_SUCCESS; 5657 struct dp_mon_ops *mon_ops; 5658 5659 mon_ops = dp_mon_ops_get(pdev->soc); 5660 if (!mon_ops) { 5661 dp_mon_err("mon_ops is NULL"); 5662 return QDF_STATUS_E_FAILURE; 5663 } 5664 5665 if (mon_ops->mon_rings_alloc[0]) { 5666 status = mon_ops->mon_rings_alloc[0](pdev); 5667 if (QDF_IS_STATUS_ERROR(status)) { 5668 dp_mon_err("error: %d", status); 5669 goto error; 5670 } 5671 } 5672 5673 if (mon_ops->mon_rings_alloc[1]) { 5674 status = mon_ops->mon_rings_alloc[1](pdev); 5675 if (QDF_IS_STATUS_ERROR(status)) { 5676 dp_mon_err("error: %d", status); 5677 goto error; 5678 } 5679 } 5680 5681 error: 5682 return status; 5683 } 5684 5685 static 5686 void dp_mon_rings_free(struct dp_pdev *pdev) 5687 { 5688 struct dp_mon_ops *mon_ops; 5689 5690 mon_ops = dp_mon_ops_get(pdev->soc); 5691 if (!mon_ops) { 5692 dp_mon_err("mon_ops is NULL"); 5693 return; 5694 } 5695 5696 if (mon_ops->mon_rings_free[0]) 5697 mon_ops->mon_rings_free[0](pdev); 5698 5699 if (mon_ops->mon_rings_free[1]) 5700 mon_ops->mon_rings_free[1](pdev); 5701 } 5702 5703 static 5704 QDF_STATUS dp_mon_rings_init(struct dp_pdev *pdev) 5705 { 5706 QDF_STATUS status = QDF_STATUS_SUCCESS; 5707 struct dp_mon_ops *mon_ops; 5708 5709 mon_ops = dp_mon_ops_get(pdev->soc); 5710 if (!mon_ops) { 5711 dp_mon_err("mon_ops is NULL"); 5712 return QDF_STATUS_E_FAILURE; 5713 } 5714 5715 if (mon_ops->mon_rings_init[0]) { 5716 status = mon_ops->mon_rings_init[0](pdev); 5717 if (QDF_IS_STATUS_ERROR(status)) { 5718 dp_mon_err("error: %d", status); 5719 goto error; 5720 } 5721 } 5722 5723 if (mon_ops->mon_rings_init[1]) { 5724 status = mon_ops->mon_rings_init[1](pdev); 5725 if (QDF_IS_STATUS_ERROR(status)) { 5726 dp_mon_err("error: %d", status); 5727 goto error; 5728 } 5729 } 5730 5731 error: 5732 return status; 5733 } 5734 5735 static 5736 void dp_mon_rings_deinit(struct dp_pdev *pdev) 5737 { 5738 struct dp_mon_ops *mon_ops; 5739 5740 mon_ops = dp_mon_ops_get(pdev->soc); 5741 if (!mon_ops) { 5742 dp_mon_err("mon_ops is NULL"); 5743 return; 5744 } 5745 5746 if (mon_ops->mon_rings_deinit[0]) 5747 mon_ops->mon_rings_deinit[0](pdev); 5748 5749 if (mon_ops->mon_rings_deinit[1]) 5750 mon_ops->mon_rings_deinit[1](pdev); 5751 } 5752 5753 QDF_STATUS dp_mon_pdev_attach(struct dp_pdev *pdev) 5754 { 5755 struct dp_soc *soc; 5756 struct dp_mon_pdev *mon_pdev; 5757 struct dp_mon_ops *mon_ops; 5758 qdf_size_t mon_pdev_context_size; 5759 5760 if (!pdev) { 5761 dp_mon_err("pdev is NULL"); 5762 goto fail0; 5763 } 5764 5765 soc = pdev->soc; 5766 5767 mon_pdev_context_size = soc->arch_ops.txrx_get_mon_context_size(DP_CONTEXT_TYPE_MON_PDEV); 5768 mon_pdev = dp_context_alloc_mem(soc, DP_MON_PDEV_TYPE, mon_pdev_context_size); 5769 if (!mon_pdev) { 5770 dp_mon_err("%pK: MONITOR pdev allocation failed", pdev); 5771 goto fail0; 5772 } 5773 5774 pdev->monitor_pdev = mon_pdev; 5775 mon_ops = dp_mon_ops_get(pdev->soc); 5776 if (!mon_ops) { 5777 dp_mon_err("%pK: Invalid monitor ops", pdev); 5778 goto fail1; 5779 } 5780 5781 if (mon_ops->mon_pdev_alloc) { 5782 if (mon_ops->mon_pdev_alloc(pdev)) { 5783 dp_mon_err("%pK: MONITOR pdev alloc failed", pdev); 5784 goto fail1; 5785 } 5786 } 5787 5788 if (dp_mon_rings_alloc(pdev)) { 5789 dp_mon_err("%pK: MONITOR rings setup failed", pdev); 5790 goto fail2; 5791 } 5792 5793 /* Rx monitor mode specific init */ 5794 if (mon_ops->rx_mon_desc_pool_alloc) { 5795 if (mon_ops->rx_mon_desc_pool_alloc(pdev)) { 5796 dp_mon_err("%pK: dp_rx_pdev_mon_attach failed", pdev); 5797 goto fail3; 5798 } 5799 } 5800 5801 if (mon_ops->mon_rx_ppdu_info_cache_create) { 5802 if (mon_ops->mon_rx_ppdu_info_cache_create(pdev)) { 5803 dp_mon_err("%pK: dp_rx_pdev_mon_attach failed", pdev); 5804 goto fail4; 5805 } 5806 } 5807 pdev->monitor_pdev = mon_pdev; 5808 dp_mon_pdev_per_target_config(pdev); 5809 5810 return QDF_STATUS_SUCCESS; 5811 fail4: 5812 if (mon_ops->rx_mon_desc_pool_free) 5813 mon_ops->rx_mon_desc_pool_free(pdev); 5814 fail3: 5815 dp_mon_rings_free(pdev); 5816 fail2: 5817 if (mon_ops->mon_pdev_free) 5818 mon_ops->mon_pdev_free(pdev); 5819 fail1: 5820 pdev->monitor_pdev = NULL; 5821 dp_context_free_mem(soc, DP_MON_PDEV_TYPE, mon_pdev); 5822 fail0: 5823 return QDF_STATUS_E_NOMEM; 5824 } 5825 5826 QDF_STATUS dp_mon_pdev_detach(struct dp_pdev *pdev) 5827 { 5828 struct dp_mon_pdev *mon_pdev; 5829 struct dp_mon_ops *mon_ops = NULL; 5830 5831 if (!pdev) { 5832 dp_mon_err("pdev is NULL"); 5833 return QDF_STATUS_E_FAILURE; 5834 } 5835 5836 mon_pdev = pdev->monitor_pdev; 5837 if (!mon_pdev) { 5838 dp_mon_err("Monitor pdev is NULL"); 5839 return QDF_STATUS_E_FAILURE; 5840 } 5841 5842 mon_ops = dp_mon_ops_get(pdev->soc); 5843 if (!mon_ops) { 5844 dp_mon_err("Monitor ops is NULL"); 5845 return QDF_STATUS_E_FAILURE; 5846 } 5847 5848 if (mon_ops->mon_rx_ppdu_info_cache_destroy) 5849 mon_ops->mon_rx_ppdu_info_cache_destroy(pdev); 5850 if (mon_ops->rx_mon_desc_pool_free) 5851 mon_ops->rx_mon_desc_pool_free(pdev); 5852 dp_mon_rings_free(pdev); 5853 if (mon_ops->mon_pdev_free) 5854 mon_ops->mon_pdev_free(pdev); 5855 5856 dp_context_free_mem(pdev->soc, DP_MON_PDEV_TYPE, mon_pdev); 5857 pdev->monitor_pdev = NULL; 5858 return QDF_STATUS_SUCCESS; 5859 } 5860 5861 static void dp_mon_pdev_filter_init(struct dp_mon_pdev *mon_pdev) 5862 { 5863 if (!mon_pdev) 5864 return; 5865 5866 mon_pdev->mon_filter_mode = MON_FILTER_ALL; 5867 mon_pdev->fp_mgmt_filter = FILTER_MGMT_ALL; 5868 mon_pdev->fp_ctrl_filter = FILTER_CTRL_ALL; 5869 mon_pdev->fp_data_filter = FILTER_DATA_ALL; 5870 mon_pdev->mo_mgmt_filter = FILTER_MGMT_ALL; 5871 mon_pdev->mo_ctrl_filter = FILTER_CTRL_ALL; 5872 mon_pdev->mo_data_filter = FILTER_DATA_ALL; 5873 } 5874 5875 #ifdef WLAN_TX_PKT_CAPTURE_ENH 5876 void dp_mon_register_tx_pkt_enh_ops_1_0(struct dp_mon_ops *mon_ops) 5877 { 5878 mon_ops->mon_tx_ppdu_stats_attach = dp_tx_ppdu_stats_attach_1_0; 5879 mon_ops->mon_tx_ppdu_stats_detach = dp_tx_ppdu_stats_detach_1_0; 5880 mon_ops->mon_peer_tx_capture_filter_check = 5881 dp_peer_tx_capture_filter_check_1_0; 5882 } 5883 #elif defined(WLAN_TX_PKT_CAPTURE_ENH_BE) && defined(WLAN_FEATURE_LOCAL_PKT_CAPTURE) 5884 void dp_mon_register_tx_pkt_enh_ops_1_0(struct dp_mon_ops *mon_ops) 5885 { 5886 mon_ops->mon_tx_ppdu_stats_attach = dp_tx_ppdu_stats_attach_2_0; 5887 mon_ops->mon_tx_ppdu_stats_detach = dp_tx_ppdu_stats_detach_2_0; 5888 mon_ops->mon_peer_tx_capture_filter_check = NULL; 5889 } 5890 #elif (defined(WIFI_MONITOR_SUPPORT) && !defined(WLAN_TX_PKT_CAPTURE_ENH)) 5891 void dp_mon_register_tx_pkt_enh_ops_1_0(struct dp_mon_ops *mon_ops) 5892 { 5893 mon_ops->mon_tx_ppdu_stats_attach = NULL; 5894 mon_ops->mon_tx_ppdu_stats_detach = NULL; 5895 mon_ops->mon_peer_tx_capture_filter_check = NULL; 5896 } 5897 #endif 5898 5899 #ifdef WLAN_FEATURE_LOCAL_PKT_CAPTURE 5900 #if !defined(DISABLE_MON_CONFIG) 5901 static inline void dp_mon_config_register_ops(struct dp_mon_ops *mon_ops) 5902 { 5903 mon_ops->mon_pdev_htt_srng_setup[0] = dp_mon_htt_srng_setup_1_0; 5904 mon_ops->mon_pdev_htt_srng_setup[1] = dp_mon_pdev_htt_srng_setup_2_0; 5905 mon_ops->mon_soc_htt_srng_setup = dp_mon_soc_htt_srng_setup_2_0; 5906 } 5907 #else 5908 static inline void dp_mon_config_register_ops(struct dp_mon_ops *mon_ops) 5909 { 5910 } 5911 #endif 5912 5913 void dp_mon_register_lpc_ops_1_0(struct dp_mon_ops *mon_ops) 5914 { 5915 mon_ops->mon_soc_attach[0] = NULL; 5916 mon_ops->mon_soc_detach[0] = NULL; 5917 mon_ops->mon_soc_init[0] = NULL; 5918 mon_ops->mon_soc_deinit[0] = NULL; 5919 mon_ops->mon_soc_attach[1] = dp_mon_soc_attach_2_0; 5920 mon_ops->mon_soc_detach[1] = dp_mon_soc_detach_2_0; 5921 mon_ops->mon_soc_init[1] = dp_mon_soc_init_2_0; 5922 mon_ops->mon_soc_deinit[1] = dp_mon_soc_deinit_2_0; 5923 5924 dp_mon_config_register_ops(mon_ops); 5925 5926 mon_ops->mon_rings_alloc[0] = dp_mon_rings_alloc_1_0; 5927 mon_ops->mon_rings_free[0] = dp_mon_rings_free_1_0; 5928 mon_ops->mon_rings_init[0] = dp_mon_rings_init_1_0; 5929 mon_ops->mon_rings_deinit[0] = dp_mon_rings_deinit_1_0; 5930 mon_ops->mon_rings_alloc[1] = dp_pdev_mon_rings_alloc_2_0; 5931 mon_ops->mon_rings_free[1] = dp_pdev_mon_rings_free_2_0; 5932 mon_ops->mon_rings_init[1] = dp_pdev_mon_rings_init_2_0; 5933 mon_ops->mon_rings_deinit[1] = dp_pdev_mon_rings_deinit_2_0; 5934 5935 mon_ops->mon_filter_setup_tx_mon_mode = 5936 dp_mon_filter_setup_local_pkt_capture_tx; 5937 mon_ops->mon_filter_reset_tx_mon_mode = 5938 dp_mon_filter_reset_local_pkt_capture_tx; 5939 mon_ops->tx_mon_filter_update = dp_tx_mon_filter_update_2_0; 5940 5941 mon_ops->rx_hdr_length_set = dp_rx_mon_hdr_length_set; 5942 dp_mon_register_tx_pkt_enh_ops_1_0(mon_ops); 5943 } 5944 #else 5945 #if !defined(DISABLE_MON_CONFIG) 5946 static inline void dp_mon_config_register_ops(struct dp_mon_ops *mon_ops) 5947 { 5948 mon_ops->mon_pdev_htt_srng_setup[0] = dp_mon_htt_srng_setup_1_0; 5949 mon_ops->mon_pdev_htt_srng_setup[1] = NULL; 5950 mon_ops->mon_soc_htt_srng_setup = NULL; 5951 } 5952 #else 5953 static inline void dp_mon_config_register_ops(struct dp_mon_ops *mon_ops) 5954 { 5955 } 5956 #endif 5957 5958 void dp_mon_register_lpc_ops_1_0(struct dp_mon_ops *mon_ops) 5959 { 5960 mon_ops->mon_soc_attach[0] = NULL; 5961 mon_ops->mon_soc_detach[0] = NULL; 5962 mon_ops->mon_soc_init[0] = NULL; 5963 mon_ops->mon_soc_deinit[0] = NULL; 5964 mon_ops->mon_soc_attach[1] = NULL; 5965 mon_ops->mon_soc_detach[1] = NULL; 5966 mon_ops->mon_soc_init[1] = NULL; 5967 mon_ops->mon_soc_deinit[1] = NULL; 5968 5969 dp_mon_config_register_ops(mon_ops); 5970 5971 mon_ops->mon_rings_alloc[0] = dp_mon_rings_alloc_1_0; 5972 mon_ops->mon_rings_free[0] = dp_mon_rings_free_1_0; 5973 mon_ops->mon_rings_init[0] = dp_mon_rings_init_1_0; 5974 mon_ops->mon_rings_deinit[0] = dp_mon_rings_deinit_1_0; 5975 mon_ops->mon_rings_alloc[1] = NULL; 5976 mon_ops->mon_rings_free[1] = NULL; 5977 mon_ops->mon_rings_init[1] = NULL; 5978 mon_ops->mon_rings_deinit[1] = NULL; 5979 5980 mon_ops->mon_filter_setup_tx_mon_mode = NULL; 5981 mon_ops->mon_filter_reset_tx_mon_mode = NULL; 5982 mon_ops->tx_mon_filter_update = NULL; 5983 5984 mon_ops->rx_hdr_length_set = NULL; 5985 dp_mon_register_tx_pkt_enh_ops_1_0(mon_ops); 5986 } 5987 #endif 5988 5989 QDF_STATUS dp_mon_pdev_init(struct dp_pdev *pdev) 5990 { 5991 struct dp_mon_pdev *mon_pdev; 5992 struct dp_mon_ops *mon_ops = NULL; 5993 5994 if (!pdev) { 5995 dp_mon_err("pdev is NULL"); 5996 return QDF_STATUS_E_FAILURE; 5997 } 5998 5999 mon_pdev = pdev->monitor_pdev; 6000 6001 mon_pdev->invalid_mon_peer = qdf_mem_malloc(sizeof(struct dp_mon_peer)); 6002 if (!mon_pdev->invalid_mon_peer) { 6003 dp_mon_err("%pK: Memory allocation failed for invalid " 6004 "monitor peer", pdev); 6005 return QDF_STATUS_E_NOMEM; 6006 } 6007 6008 mon_ops = dp_mon_ops_get(pdev->soc); 6009 if (!mon_ops) { 6010 dp_mon_err("Monitor ops is NULL"); 6011 goto fail0; 6012 } 6013 6014 mon_pdev->filter = dp_mon_filter_alloc(mon_pdev); 6015 if (!mon_pdev->filter) { 6016 dp_mon_err("%pK: Memory allocation failed for monitor filter", 6017 pdev); 6018 goto fail0; 6019 } 6020 6021 if (mon_ops->tx_mon_filter_alloc) { 6022 if (mon_ops->tx_mon_filter_alloc(pdev)) { 6023 dp_mon_err("%pK: Memory allocation failed for tx monitor " 6024 "filter", pdev); 6025 goto fail1; 6026 } 6027 } 6028 6029 qdf_spinlock_create(&mon_pdev->ppdu_stats_lock); 6030 qdf_spinlock_create(&mon_pdev->neighbour_peer_mutex); 6031 mon_pdev->monitor_configured = false; 6032 mon_pdev->mon_chan_band = REG_BAND_UNKNOWN; 6033 6034 TAILQ_INIT(&mon_pdev->neighbour_peers_list); 6035 mon_pdev->neighbour_peers_added = false; 6036 mon_pdev->monitor_configured = false; 6037 6038 dp_mon_pdev_filter_init(mon_pdev); 6039 /* 6040 * initialize ppdu tlv list 6041 */ 6042 TAILQ_INIT(&mon_pdev->ppdu_info_list); 6043 TAILQ_INIT(&mon_pdev->sched_comp_ppdu_list); 6044 6045 mon_pdev->list_depth = 0; 6046 mon_pdev->tlv_count = 0; 6047 /* initlialize cal client timer */ 6048 dp_cal_client_attach(&mon_pdev->cal_client_ctx, 6049 dp_pdev_to_cdp_pdev(pdev), 6050 pdev->soc->osdev, 6051 &dp_iterate_update_peer_list); 6052 if (dp_htt_ppdu_stats_attach(pdev) != QDF_STATUS_SUCCESS) 6053 goto fail2; 6054 6055 if (mon_ops->mon_lite_mon_alloc) { 6056 if (mon_ops->mon_lite_mon_alloc(pdev) != QDF_STATUS_SUCCESS) { 6057 dp_mon_err("%pK: lite mon alloc failed", pdev); 6058 goto fail3; 6059 } 6060 } 6061 6062 if (dp_mon_rings_init(pdev)) { 6063 dp_mon_err("%pK: MONITOR rings setup failed", pdev); 6064 goto fail4; 6065 } 6066 6067 /* initialize sw monitor rx descriptors */ 6068 if (mon_ops->rx_mon_desc_pool_init) 6069 mon_ops->rx_mon_desc_pool_init(pdev); 6070 6071 /* allocate buffers and replenish the monitor RxDMA ring */ 6072 if (mon_ops->rx_mon_buffers_alloc) { 6073 if (mon_ops->rx_mon_buffers_alloc(pdev)) { 6074 dp_mon_err("%pK: rx mon buffers alloc failed", pdev); 6075 goto fail5; 6076 } 6077 } 6078 6079 /* attach monitor function */ 6080 dp_monitor_tx_ppdu_stats_attach(pdev); 6081 6082 /* mon pdev extended init */ 6083 if (mon_ops->mon_pdev_ext_init) 6084 mon_ops->mon_pdev_ext_init(pdev); 6085 6086 if (mon_ops->mon_rx_pdev_tlv_logger_init) 6087 mon_ops->mon_rx_pdev_tlv_logger_init(pdev); 6088 6089 mon_pdev->is_dp_mon_pdev_initialized = true; 6090 dp_mon_set_local_pkt_capture_running(mon_pdev, false); 6091 6092 return QDF_STATUS_SUCCESS; 6093 6094 fail5: 6095 if (mon_ops->rx_mon_desc_pool_deinit) 6096 mon_ops->rx_mon_desc_pool_deinit(pdev); 6097 6098 dp_mon_rings_deinit(pdev); 6099 fail4: 6100 if (mon_ops->mon_lite_mon_dealloc) 6101 mon_ops->mon_lite_mon_dealloc(pdev); 6102 fail3: 6103 dp_htt_ppdu_stats_detach(pdev); 6104 fail2: 6105 qdf_spinlock_destroy(&mon_pdev->neighbour_peer_mutex); 6106 qdf_spinlock_destroy(&mon_pdev->ppdu_stats_lock); 6107 if (mon_ops->tx_mon_filter_dealloc) 6108 mon_ops->tx_mon_filter_dealloc(pdev); 6109 fail1: 6110 dp_mon_filter_dealloc(mon_pdev); 6111 fail0: 6112 qdf_mem_free(mon_pdev->invalid_mon_peer); 6113 return QDF_STATUS_E_FAILURE; 6114 } 6115 6116 QDF_STATUS dp_mon_pdev_deinit(struct dp_pdev *pdev) 6117 { 6118 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 6119 struct dp_mon_ops *mon_ops = NULL; 6120 6121 mon_ops = dp_mon_ops_get(pdev->soc); 6122 if (!mon_ops) { 6123 dp_mon_err("Monitor ops is NULL"); 6124 return QDF_STATUS_E_FAILURE; 6125 } 6126 6127 if (!mon_pdev->is_dp_mon_pdev_initialized) 6128 return QDF_STATUS_SUCCESS; 6129 6130 dp_mon_filters_reset(pdev); 6131 6132 /* mon pdev extended deinit */ 6133 if (mon_ops->mon_pdev_ext_deinit) 6134 mon_ops->mon_pdev_ext_deinit(pdev); 6135 6136 if (mon_ops->mon_rx_pdev_tlv_logger_deinit) 6137 mon_ops->mon_rx_pdev_tlv_logger_deinit(pdev); 6138 6139 /* detach monitor function */ 6140 dp_monitor_tx_ppdu_stats_detach(pdev); 6141 6142 if (mon_ops->mon_lite_mon_dealloc) 6143 mon_ops->mon_lite_mon_dealloc(pdev); 6144 6145 if (mon_ops->rx_mon_buffers_free) 6146 mon_ops->rx_mon_buffers_free(pdev); 6147 if (mon_ops->rx_mon_desc_pool_deinit) 6148 mon_ops->rx_mon_desc_pool_deinit(pdev); 6149 dp_mon_rings_deinit(pdev); 6150 dp_cal_client_detach(&mon_pdev->cal_client_ctx); 6151 dp_htt_ppdu_stats_detach(pdev); 6152 qdf_spinlock_destroy(&mon_pdev->ppdu_stats_lock); 6153 dp_neighbour_peers_detach(pdev); 6154 dp_pktlogmod_exit(pdev); 6155 if (mon_ops->tx_mon_filter_dealloc) 6156 mon_ops->tx_mon_filter_dealloc(pdev); 6157 if (mon_pdev->filter) 6158 dp_mon_filter_dealloc(mon_pdev); 6159 if (mon_pdev->invalid_mon_peer) 6160 qdf_mem_free(mon_pdev->invalid_mon_peer); 6161 mon_pdev->is_dp_mon_pdev_initialized = false; 6162 dp_mon_set_local_pkt_capture_running(mon_pdev, false); 6163 6164 return QDF_STATUS_SUCCESS; 6165 } 6166 6167 QDF_STATUS dp_mon_vdev_attach(struct dp_vdev *vdev) 6168 { 6169 struct dp_mon_vdev *mon_vdev; 6170 struct dp_pdev *pdev = vdev->pdev; 6171 6172 mon_vdev = (struct dp_mon_vdev *)qdf_mem_malloc(sizeof(*mon_vdev)); 6173 if (!mon_vdev) { 6174 dp_mon_err("%pK: Monitor vdev allocation failed", vdev); 6175 return QDF_STATUS_E_NOMEM; 6176 } 6177 6178 if (pdev && pdev->monitor_pdev && 6179 pdev->monitor_pdev->scan_spcl_vap_configured) 6180 dp_scan_spcl_vap_stats_attach(mon_vdev); 6181 6182 vdev->monitor_vdev = mon_vdev; 6183 6184 return QDF_STATUS_SUCCESS; 6185 } 6186 6187 QDF_STATUS dp_mon_vdev_detach(struct dp_vdev *vdev) 6188 { 6189 struct dp_mon_vdev *mon_vdev = vdev->monitor_vdev; 6190 struct dp_pdev *pdev = vdev->pdev; 6191 struct dp_mon_ops *mon_ops = dp_mon_ops_get(pdev->soc); 6192 6193 if (!mon_ops) 6194 return QDF_STATUS_E_FAILURE; 6195 6196 if (!mon_vdev) 6197 return QDF_STATUS_E_FAILURE; 6198 6199 if (pdev->monitor_pdev->scan_spcl_vap_configured) 6200 dp_scan_spcl_vap_stats_detach(mon_vdev); 6201 6202 qdf_mem_free(mon_vdev); 6203 vdev->monitor_vdev = NULL; 6204 /* set mvdev to NULL only if detach is called for monitor/special vap 6205 */ 6206 if (pdev->monitor_pdev->mvdev == vdev) 6207 pdev->monitor_pdev->mvdev = NULL; 6208 6209 if (mon_ops->mon_lite_mon_vdev_delete) 6210 mon_ops->mon_lite_mon_vdev_delete(pdev, vdev); 6211 6212 return QDF_STATUS_SUCCESS; 6213 } 6214 6215 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE 6216 /** 6217 * dp_mon_peer_attach_notify() - Raise WDI event for peer create 6218 * @peer: DP Peer handle 6219 * 6220 * Return: none 6221 */ 6222 static inline 6223 void dp_mon_peer_attach_notify(struct dp_peer *peer) 6224 { 6225 struct dp_mon_peer *mon_peer = peer->monitor_peer; 6226 struct dp_pdev *pdev; 6227 struct dp_soc *soc; 6228 struct cdp_peer_cookie peer_cookie; 6229 6230 pdev = peer->vdev->pdev; 6231 soc = pdev->soc; 6232 6233 qdf_mem_copy(peer_cookie.mac_addr, peer->mac_addr.raw, 6234 QDF_MAC_ADDR_SIZE); 6235 6236 peer_cookie.ctx = NULL; 6237 peer_cookie.pdev_id = pdev->pdev_id; 6238 peer_cookie.cookie = pdev->next_peer_cookie++; 6239 6240 dp_wdi_event_handler(WDI_EVENT_PEER_CREATE, soc, 6241 (void *)&peer_cookie, 6242 peer->peer_id, WDI_NO_VAL, pdev->pdev_id); 6243 6244 if (soc->peerstats_enabled) { 6245 if (!peer_cookie.ctx) { 6246 pdev->next_peer_cookie--; 6247 qdf_err("Failed to initialize peer rate stats"); 6248 mon_peer->peerstats_ctx = NULL; 6249 } else { 6250 mon_peer->peerstats_ctx = 6251 (struct cdp_peer_rate_stats_ctx *) 6252 peer_cookie.ctx; 6253 } 6254 } 6255 } 6256 6257 /** 6258 * dp_mon_peer_detach_notify() - Raise WDI event for peer destroy 6259 * @peer: DP Peer handle 6260 * 6261 * Return: none 6262 */ 6263 static inline 6264 void dp_mon_peer_detach_notify(struct dp_peer *peer) 6265 { 6266 struct dp_mon_peer *mon_peer = peer->monitor_peer; 6267 struct dp_pdev *pdev; 6268 struct dp_soc *soc; 6269 struct cdp_peer_cookie peer_cookie; 6270 6271 pdev = peer->vdev->pdev; 6272 soc = pdev->soc; 6273 /* send peer destroy event to upper layer */ 6274 qdf_mem_copy(peer_cookie.mac_addr, peer->mac_addr.raw, 6275 QDF_MAC_ADDR_SIZE); 6276 peer_cookie.ctx = NULL; 6277 peer_cookie.ctx = (struct cdp_stats_cookie *)mon_peer->peerstats_ctx; 6278 6279 dp_wdi_event_handler(WDI_EVENT_PEER_DESTROY, 6280 soc, 6281 (void *)&peer_cookie, 6282 peer->peer_id, 6283 WDI_NO_VAL, 6284 pdev->pdev_id); 6285 6286 mon_peer->peerstats_ctx = NULL; 6287 } 6288 #else 6289 static inline 6290 void dp_mon_peer_attach_notify(struct dp_peer *peer) 6291 { 6292 peer->monitor_peer->peerstats_ctx = NULL; 6293 } 6294 6295 static inline 6296 void dp_mon_peer_detach_notify(struct dp_peer *peer) 6297 { 6298 peer->monitor_peer->peerstats_ctx = NULL; 6299 } 6300 #endif 6301 6302 #if defined(WLAN_TX_PKT_CAPTURE_ENH) || defined(FEATURE_PERPKT_INFO) 6303 QDF_STATUS dp_mon_peer_attach(struct dp_peer *peer) 6304 { 6305 struct dp_mon_peer *mon_peer; 6306 struct dp_pdev *pdev; 6307 6308 mon_peer = (struct dp_mon_peer *)qdf_mem_malloc(sizeof(*mon_peer)); 6309 if (!mon_peer) { 6310 dp_mon_err("%pK: MONITOR peer allocation failed", peer); 6311 return QDF_STATUS_E_NOMEM; 6312 } 6313 6314 peer->monitor_peer = mon_peer; 6315 pdev = peer->vdev->pdev; 6316 /* 6317 * In tx_monitor mode, filter may be set for unassociated peer 6318 * when unassociated peer get associated peer need to 6319 * update tx_cap_enabled flag to support peer filter. 6320 */ 6321 dp_monitor_peer_tx_capture_filter_check(pdev, peer); 6322 6323 DP_STATS_INIT(mon_peer); 6324 DP_STATS_UPD(mon_peer, rx.avg_snr, CDP_INVALID_SNR); 6325 6326 dp_mon_peer_attach_notify(peer); 6327 6328 return QDF_STATUS_SUCCESS; 6329 } 6330 #endif 6331 6332 QDF_STATUS dp_mon_peer_detach(struct dp_peer *peer) 6333 { 6334 struct dp_mon_peer *mon_peer = peer->monitor_peer; 6335 6336 if (!mon_peer) 6337 return QDF_STATUS_SUCCESS; 6338 6339 dp_mon_peer_detach_notify(peer); 6340 6341 qdf_mem_free(mon_peer); 6342 peer->monitor_peer = NULL; 6343 6344 return QDF_STATUS_SUCCESS; 6345 } 6346 6347 #ifndef DISABLE_MON_CONFIG 6348 void dp_mon_register_intr_ops(struct dp_soc *soc) 6349 { 6350 struct dp_mon_ops *mon_ops = NULL; 6351 6352 mon_ops = dp_mon_ops_get(soc); 6353 if (!mon_ops) { 6354 dp_mon_err("Monitor ops is NULL"); 6355 return; 6356 } 6357 if (mon_ops->mon_register_intr_ops) 6358 mon_ops->mon_register_intr_ops(soc); 6359 } 6360 #endif 6361 6362 struct cdp_peer_rate_stats_ctx *dp_mon_peer_get_peerstats_ctx(struct 6363 dp_peer *peer) 6364 { 6365 struct dp_mon_peer *mon_peer = peer->monitor_peer; 6366 6367 if (mon_peer) 6368 return mon_peer->peerstats_ctx; 6369 else 6370 return NULL; 6371 } 6372 6373 #ifdef QCA_ENHANCED_STATS_SUPPORT 6374 void dp_mon_peer_reset_stats(struct dp_peer *peer) 6375 { 6376 struct dp_mon_peer *mon_peer = NULL; 6377 6378 mon_peer = peer->monitor_peer; 6379 if (!mon_peer) 6380 return; 6381 6382 DP_STATS_CLR(mon_peer); 6383 DP_STATS_UPD(mon_peer, rx.avg_snr, CDP_INVALID_SNR); 6384 } 6385 6386 void dp_mon_peer_get_stats(struct dp_peer *peer, void *arg, 6387 enum cdp_stat_update_type type) 6388 { 6389 struct dp_mon_peer *mon_peer = peer->monitor_peer; 6390 struct dp_mon_peer_stats *mon_peer_stats; 6391 6392 if (!mon_peer || !arg) 6393 return; 6394 6395 mon_peer_stats = &mon_peer->stats; 6396 6397 switch (type) { 6398 case UPDATE_PEER_STATS: 6399 { 6400 struct cdp_peer_stats *peer_stats = 6401 (struct cdp_peer_stats *)arg; 6402 DP_UPDATE_MON_STATS(peer_stats, mon_peer_stats); 6403 break; 6404 } 6405 case UPDATE_VDEV_STATS_MLD: 6406 { 6407 struct cdp_vdev_stats *vdev_stats = 6408 (struct cdp_vdev_stats *)arg; 6409 DP_UPDATE_MON_STATS(vdev_stats, mon_peer_stats); 6410 break; 6411 } 6412 case UPDATE_VDEV_STATS: 6413 { 6414 struct dp_vdev_stats *vdev_stats = 6415 (struct dp_vdev_stats *)arg; 6416 DP_UPDATE_MON_STATS(vdev_stats, mon_peer_stats); 6417 break; 6418 } 6419 default: 6420 dp_mon_err("Invalid stats_update_type: %u", type); 6421 } 6422 } 6423 6424 void dp_mon_invalid_peer_update_pdev_stats(struct dp_pdev *pdev) 6425 { 6426 struct dp_mon_peer *mon_peer; 6427 struct dp_mon_peer_stats *mon_peer_stats; 6428 struct cdp_pdev_stats *pdev_stats; 6429 6430 if (!pdev || !pdev->monitor_pdev) 6431 return; 6432 6433 mon_peer = pdev->monitor_pdev->invalid_mon_peer; 6434 if (!mon_peer) 6435 return; 6436 6437 mon_peer_stats = &mon_peer->stats; 6438 pdev_stats = &pdev->stats; 6439 DP_UPDATE_MON_STATS(pdev_stats, mon_peer_stats); 6440 } 6441 6442 QDF_STATUS 6443 dp_mon_peer_get_stats_param(struct dp_peer *peer, enum cdp_peer_stats_type type, 6444 cdp_peer_stats_param_t *buf) 6445 { 6446 QDF_STATUS ret = QDF_STATUS_SUCCESS; 6447 struct dp_mon_peer *mon_peer; 6448 6449 mon_peer = peer->monitor_peer; 6450 if (!mon_peer) 6451 return QDF_STATUS_E_FAILURE; 6452 6453 switch (type) { 6454 case cdp_peer_tx_rate: 6455 buf->tx_rate = mon_peer->stats.tx.tx_rate; 6456 break; 6457 case cdp_peer_tx_last_tx_rate: 6458 buf->last_tx_rate = mon_peer->stats.tx.last_tx_rate; 6459 break; 6460 case cdp_peer_tx_ratecode: 6461 buf->tx_ratecode = mon_peer->stats.tx.tx_ratecode; 6462 break; 6463 case cdp_peer_rx_rate: 6464 buf->rx_rate = mon_peer->stats.rx.rx_rate; 6465 break; 6466 case cdp_peer_rx_last_rx_rate: 6467 buf->last_rx_rate = mon_peer->stats.rx.last_rx_rate; 6468 break; 6469 case cdp_peer_rx_ratecode: 6470 buf->rx_ratecode = mon_peer->stats.rx.rx_ratecode; 6471 break; 6472 case cdp_peer_rx_avg_snr: 6473 buf->rx_avg_snr = mon_peer->stats.rx.avg_snr; 6474 break; 6475 case cdp_peer_rx_snr: 6476 buf->rx_snr = mon_peer->stats.rx.snr; 6477 break; 6478 default: 6479 dp_err("Invalid stats type: %u requested", type); 6480 ret = QDF_STATUS_E_FAILURE; 6481 } 6482 6483 return ret; 6484 } 6485 #endif 6486 6487 void dp_mon_ops_register(struct dp_soc *soc) 6488 { 6489 struct dp_mon_soc *mon_soc = soc->monitor_soc; 6490 uint32_t target_type; 6491 6492 target_type = hal_get_target_type(soc->hal_soc); 6493 switch (target_type) { 6494 case TARGET_TYPE_QCA6290: 6495 case TARGET_TYPE_QCA6390: 6496 case TARGET_TYPE_QCA6490: 6497 case TARGET_TYPE_QCA6750: 6498 case TARGET_TYPE_KIWI: 6499 case TARGET_TYPE_MANGO: 6500 case TARGET_TYPE_PEACH: 6501 case TARGET_TYPE_QCA8074: 6502 case TARGET_TYPE_QCA8074V2: 6503 case TARGET_TYPE_QCA6018: 6504 case TARGET_TYPE_QCA9574: 6505 case TARGET_TYPE_QCN9160: 6506 case TARGET_TYPE_QCN9000: 6507 case TARGET_TYPE_QCA5018: 6508 case TARGET_TYPE_QCN6122: 6509 case TARGET_TYPE_WCN6450: 6510 dp_mon_ops_register_1_0(mon_soc); 6511 dp_mon_ops_register_cmn_2_0(mon_soc); 6512 dp_mon_ops_register_tx_2_0(mon_soc); 6513 break; 6514 case TARGET_TYPE_QCN9224: 6515 case TARGET_TYPE_QCA5332: 6516 case TARGET_TYPE_QCN6432: 6517 #if defined(WLAN_PKT_CAPTURE_TX_2_0) || defined(WLAN_PKT_CAPTURE_RX_2_0) 6518 dp_mon_ops_register_2_0(mon_soc); 6519 #endif 6520 break; 6521 default: 6522 dp_mon_err("%s: Unknown tgt type %d", __func__, target_type); 6523 qdf_assert_always(0); 6524 break; 6525 } 6526 } 6527 6528 #ifdef QCA_MONITOR_OPS_PER_SOC_SUPPORT 6529 void dp_mon_ops_free(struct dp_soc *soc) 6530 { 6531 struct cdp_ops *ops = soc->cdp_soc.ops; 6532 struct cdp_mon_ops *cdp_mon_ops = ops->mon_ops; 6533 struct dp_mon_soc *mon_soc = soc->monitor_soc; 6534 struct dp_mon_ops *mon_ops = mon_soc->mon_ops; 6535 6536 if (cdp_mon_ops) 6537 qdf_mem_free(cdp_mon_ops); 6538 6539 if (mon_ops) 6540 qdf_mem_free(mon_ops); 6541 } 6542 #else 6543 void dp_mon_ops_free(struct dp_soc *soc) 6544 { 6545 } 6546 #endif 6547 6548 void dp_mon_cdp_ops_register(struct dp_soc *soc) 6549 { 6550 struct cdp_ops *ops = soc->cdp_soc.ops; 6551 uint32_t target_type; 6552 6553 if (!ops) { 6554 dp_mon_err("cdp_ops is NULL"); 6555 return; 6556 } 6557 6558 target_type = hal_get_target_type(soc->hal_soc); 6559 switch (target_type) { 6560 case TARGET_TYPE_QCA6290: 6561 case TARGET_TYPE_QCA6390: 6562 case TARGET_TYPE_QCA6490: 6563 case TARGET_TYPE_QCA6750: 6564 case TARGET_TYPE_KIWI: 6565 case TARGET_TYPE_MANGO: 6566 case TARGET_TYPE_PEACH: 6567 case TARGET_TYPE_QCA8074: 6568 case TARGET_TYPE_QCA8074V2: 6569 case TARGET_TYPE_QCA6018: 6570 case TARGET_TYPE_QCA9574: 6571 case TARGET_TYPE_QCN9160: 6572 case TARGET_TYPE_QCN9000: 6573 case TARGET_TYPE_QCA5018: 6574 case TARGET_TYPE_QCN6122: 6575 case TARGET_TYPE_WCN6450: 6576 dp_mon_cdp_ops_register_1_0(ops); 6577 #if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE) 6578 dp_cfr_filter_register_1_0(ops); 6579 #endif 6580 if (target_type == TARGET_TYPE_QCN9000 || 6581 target_type == TARGET_TYPE_QCN9160) 6582 ops->mon_ops->txrx_update_mon_mac_filter = 6583 dp_update_mon_mac_filter; 6584 break; 6585 case TARGET_TYPE_QCN9224: 6586 case TARGET_TYPE_QCA5332: 6587 case TARGET_TYPE_QCN6432: 6588 #if defined(WLAN_PKT_CAPTURE_TX_2_0) || defined(WLAN_PKT_CAPTURE_RX_2_0) 6589 dp_mon_cdp_ops_register_2_0(ops); 6590 #if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE) 6591 dp_cfr_filter_register_2_0(ops); 6592 #endif 6593 #endif /* WLAN_PKT_CAPTURE_TX_2_0 && WLAN_PKT_CAPTURE_RX_2_0 */ 6594 break; 6595 default: 6596 dp_mon_err("%s: Unknown tgt type %d", __func__, target_type); 6597 qdf_assert_always(0); 6598 break; 6599 } 6600 6601 ops->cmn_drv_ops->txrx_set_monitor_mode = dp_vdev_set_monitor_mode; 6602 ops->cmn_drv_ops->txrx_get_mon_vdev_from_pdev = 6603 dp_get_mon_vdev_from_pdev_wifi3; 6604 #ifdef DP_PEER_EXTENDED_API 6605 ops->misc_ops->pkt_log_init = dp_pkt_log_init; 6606 ops->misc_ops->pkt_log_con_service = dp_pkt_log_con_service; 6607 ops->misc_ops->pkt_log_exit = dp_pkt_log_exit; 6608 #endif 6609 ops->ctrl_ops->enable_peer_based_pktlog = 6610 dp_enable_peer_based_pktlog; 6611 #if defined(WLAN_TX_PKT_CAPTURE_ENH) || defined(WLAN_RX_PKT_CAPTURE_ENH) 6612 ops->ctrl_ops->txrx_update_peer_pkt_capture_params = 6613 dp_peer_update_pkt_capture_params; 6614 #endif /* WLAN_TX_PKT_CAPTURE_ENH || WLAN_RX_PKT_CAPTURE_ENH */ 6615 #ifdef WDI_EVENT_ENABLE 6616 ops->ctrl_ops->txrx_get_pldev = dp_get_pldev; 6617 #endif 6618 #ifdef QCA_SUPPORT_SCAN_SPCL_VAP_STATS 6619 ops->host_stats_ops->txrx_get_scan_spcl_vap_stats = 6620 dp_get_scan_spcl_vap_stats; 6621 #endif 6622 return; 6623 } 6624 6625 #ifdef QCA_MONITOR_OPS_PER_SOC_SUPPORT 6626 static inline void 6627 dp_mon_cdp_mon_ops_deregister(struct cdp_ops *ops) 6628 { 6629 if (ops->mon_ops) { 6630 qdf_mem_free(ops->mon_ops); 6631 ops->mon_ops = NULL; 6632 } 6633 } 6634 #else 6635 static inline void 6636 dp_mon_cdp_mon_ops_deregister(struct cdp_ops *ops) 6637 { 6638 ops->mon_ops = NULL; 6639 } 6640 #endif 6641 6642 void dp_mon_cdp_ops_deregister(struct dp_soc *soc) 6643 { 6644 struct cdp_ops *ops = soc->cdp_soc.ops; 6645 6646 if (!ops) { 6647 dp_mon_err("cdp_ops is NULL"); 6648 return; 6649 } 6650 6651 dp_mon_cdp_mon_ops_deregister(ops); 6652 6653 ops->cmn_drv_ops->txrx_set_monitor_mode = NULL; 6654 ops->cmn_drv_ops->txrx_get_mon_vdev_from_pdev = NULL; 6655 #ifdef DP_PEER_EXTENDED_API 6656 ops->misc_ops->pkt_log_init = NULL; 6657 ops->misc_ops->pkt_log_con_service = NULL; 6658 ops->misc_ops->pkt_log_exit = NULL; 6659 #endif 6660 ops->ctrl_ops->enable_peer_based_pktlog = NULL; 6661 #if defined(WLAN_TX_PKT_CAPTURE_ENH) || defined(WLAN_RX_PKT_CAPTURE_ENH) 6662 ops->ctrl_ops->txrx_update_peer_pkt_capture_params = NULL; 6663 #endif /* WLAN_TX_PKT_CAPTURE_ENH || WLAN_RX_PKT_CAPTURE_ENH */ 6664 #ifdef WDI_EVENT_ENABLE 6665 ops->ctrl_ops->txrx_get_pldev = NULL; 6666 #endif 6667 return; 6668 } 6669 6670 #if defined(WDI_EVENT_ENABLE) &&\ 6671 (defined(QCA_ENHANCED_STATS_SUPPORT) || !defined(REMOVE_PKT_LOG)) 6672 static inline 6673 void dp_mon_ppdu_stats_handler_deregister(struct dp_mon_soc *mon_soc) 6674 { 6675 mon_soc->mon_ops->mon_ppdu_stats_ind_handler = NULL; 6676 } 6677 #else 6678 static inline 6679 void dp_mon_ppdu_stats_handler_deregister(struct dp_mon_soc *mon_soc) 6680 { 6681 } 6682 #endif 6683 6684 #ifdef QCA_RSSI_DB2DBM 6685 /** 6686 * dp_mon_compute_min_nf() - calculate the min nf value in the 6687 * active chains 20 MHz subbands. 6688 * @conv_params: cdp_rssi_dbm_conv_param_dp structure value 6689 * @min_nf: location to store min NF value 6690 * @chain_idx: active chain index in nfHwdbm array 6691 * 6692 * computation: Need to calculate nfInDbm[][] to A_MIN(nfHwDbm[][]) 6693 * considering row index as active chains and column 6694 * index as 20MHZ subbands per chain. 6695 * example: chain_mask = 0x07 (consider 3 active chains 0,1,2 index) 6696 * BandWidth = 40MHZ (40MHZ includes two 20MHZ subbands so need to 6697 * consider 0,1 index calculate min_nf value) 6698 * 6699 * Return: QDF_STATUS_SUCCESS if value set successfully 6700 * QDF_STATUS_E_INVAL false if error 6701 */ 6702 static QDF_STATUS 6703 dp_mon_compute_min_nf(struct cdp_rssi_dbm_conv_param_dp *conv_params, 6704 int8_t *min_nf, int chain_idx) 6705 { 6706 int j; 6707 *min_nf = conv_params->nf_hw_dbm[chain_idx][0]; 6708 6709 switch (conv_params->curr_bw) { 6710 case CHAN_WIDTH_20: 6711 case CHAN_WIDTH_5: 6712 case CHAN_WIDTH_10: 6713 break; 6714 case CHAN_WIDTH_40: 6715 for (j = 1; j < SUB40BW; j++) { 6716 if (conv_params->nf_hw_dbm[chain_idx][j] < *min_nf) 6717 *min_nf = conv_params->nf_hw_dbm[chain_idx][j]; 6718 } 6719 break; 6720 case CHAN_WIDTH_80: 6721 for (j = 1; j < SUB80BW; j++) { 6722 if (conv_params->nf_hw_dbm[chain_idx][j] < *min_nf) 6723 *min_nf = conv_params->nf_hw_dbm[chain_idx][j]; 6724 } 6725 break; 6726 case CHAN_WIDTH_160: 6727 case CHAN_WIDTH_80P80: 6728 case CHAN_WIDTH_165: 6729 for (j = 1; j < SUB160BW; j++) { 6730 if (conv_params->nf_hw_dbm[chain_idx][j] < *min_nf) 6731 *min_nf = conv_params->nf_hw_dbm[chain_idx][j]; 6732 } 6733 break; 6734 case CHAN_WIDTH_160P160: 6735 case CHAN_WIDTH_320: 6736 for (j = 1; j < SUB320BW; j++) { 6737 if (conv_params->nf_hw_dbm[chain_idx][j] < *min_nf) 6738 *min_nf = conv_params->nf_hw_dbm[chain_idx][j]; 6739 } 6740 break; 6741 default: 6742 dp_cdp_err("Invalid bandwidth %u", conv_params->curr_bw); 6743 return QDF_STATUS_E_INVAL; 6744 } 6745 return QDF_STATUS_SUCCESS; 6746 } 6747 6748 /** 6749 * dp_mon_pdev_params_rssi_dbm_conv() - to set rssi in dbm conversion 6750 * params into monitor pdev. 6751 * @cdp_soc: dp soc handle. 6752 * @params: cdp_rssi_db2dbm_param_dp structure value. 6753 * 6754 * Return: QDF_STATUS_SUCCESS if value set successfully 6755 * QDF_STATUS_E_INVAL false if error 6756 */ 6757 QDF_STATUS 6758 dp_mon_pdev_params_rssi_dbm_conv(struct cdp_soc_t *cdp_soc, 6759 struct cdp_rssi_db2dbm_param_dp *params) 6760 { 6761 struct cdp_rssi_db2dbm_param_dp *dp_rssi_params = params; 6762 uint8_t pdev_id = params->pdev_id; 6763 struct dp_soc *soc = (struct dp_soc *)cdp_soc; 6764 struct dp_pdev *pdev = 6765 dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); 6766 struct dp_mon_pdev *mon_pdev; 6767 struct cdp_rssi_temp_off_param_dp temp_off_param; 6768 struct cdp_rssi_dbm_conv_param_dp conv_params; 6769 int8_t min_nf = 0; 6770 int i; 6771 6772 if (!soc->features.rssi_dbm_conv_support) { 6773 dp_cdp_err("rssi dbm conversion support is false"); 6774 return QDF_STATUS_E_INVAL; 6775 } 6776 if (!pdev || !pdev->monitor_pdev) { 6777 dp_cdp_err("Invalid pdev_id %u", pdev_id); 6778 return QDF_STATUS_E_FAILURE; 6779 } 6780 6781 mon_pdev = pdev->monitor_pdev; 6782 mon_pdev->rssi_dbm_conv_support = 6783 soc->features.rssi_dbm_conv_support; 6784 6785 if (dp_rssi_params->rssi_temp_off_present) { 6786 temp_off_param = dp_rssi_params->temp_off_param; 6787 mon_pdev->rssi_offsets.rssi_temp_offset = 6788 temp_off_param.rssi_temp_offset; 6789 } 6790 if (dp_rssi_params->rssi_dbm_info_present) { 6791 conv_params = dp_rssi_params->rssi_dbm_param; 6792 for (i = 0; i < CDP_MAX_NUM_ANTENNA; i++) { 6793 if (conv_params.curr_rx_chainmask & (0x01 << i)) { 6794 if (QDF_STATUS_E_INVAL == dp_mon_compute_min_nf 6795 (&conv_params, &min_nf, i)) 6796 return QDF_STATUS_E_INVAL; 6797 } else { 6798 continue; 6799 } 6800 } 6801 mon_pdev->rssi_offsets.xlna_bypass_offset = 6802 conv_params.xlna_bypass_offset; 6803 mon_pdev->rssi_offsets.xlna_bypass_threshold = 6804 conv_params.xlna_bypass_threshold; 6805 mon_pdev->rssi_offsets.xbar_config = conv_params.xbar_config; 6806 mon_pdev->rssi_offsets.min_nf_dbm = min_nf; 6807 mon_pdev->rssi_offsets.rssi_offset = 6808 mon_pdev->rssi_offsets.min_nf_dbm + 6809 mon_pdev->rssi_offsets.rssi_temp_offset; 6810 } 6811 return QDF_STATUS_SUCCESS; 6812 } 6813 #endif 6814 6815 void dp_mon_intr_ops_deregister(struct dp_soc *soc) 6816 { 6817 struct dp_mon_soc *mon_soc = soc->monitor_soc; 6818 6819 mon_soc->mon_rx_process = NULL; 6820 dp_mon_ppdu_stats_handler_deregister(mon_soc); 6821 } 6822 6823 void dp_mon_feature_ops_deregister(struct dp_soc *soc) 6824 { 6825 struct dp_mon_ops *mon_ops = dp_mon_ops_get(soc); 6826 6827 if (!mon_ops) { 6828 dp_err("mon_ops is NULL"); 6829 return; 6830 } 6831 6832 mon_ops->mon_config_debug_sniffer = NULL; 6833 mon_ops->mon_peer_tx_init = NULL; 6834 mon_ops->mon_peer_tx_cleanup = NULL; 6835 mon_ops->mon_htt_ppdu_stats_attach = NULL; 6836 mon_ops->mon_htt_ppdu_stats_detach = NULL; 6837 mon_ops->mon_print_pdev_rx_mon_stats = NULL; 6838 mon_ops->mon_set_bsscolor = NULL; 6839 mon_ops->mon_pdev_get_filter_ucast_data = NULL; 6840 mon_ops->mon_pdev_get_filter_mcast_data = NULL; 6841 mon_ops->mon_pdev_get_filter_non_data = NULL; 6842 mon_ops->mon_neighbour_peer_add_ast = NULL; 6843 #ifdef WLAN_TX_PKT_CAPTURE_ENH 6844 mon_ops->mon_peer_tid_peer_id_update = NULL; 6845 mon_ops->mon_tx_ppdu_stats_attach = NULL; 6846 mon_ops->mon_tx_ppdu_stats_detach = NULL; 6847 mon_ops->mon_tx_capture_debugfs_init = NULL; 6848 mon_ops->mon_tx_add_to_comp_queue = NULL; 6849 mon_ops->mon_peer_tx_capture_filter_check = NULL; 6850 mon_ops->mon_print_pdev_tx_capture_stats = NULL; 6851 mon_ops->mon_config_enh_tx_capture = NULL; 6852 #endif 6853 #ifdef WLAN_RX_PKT_CAPTURE_ENH 6854 mon_ops->mon_config_enh_rx_capture = NULL; 6855 #endif 6856 #ifdef QCA_SUPPORT_BPR 6857 mon_ops->mon_set_bpr_enable = NULL; 6858 #endif 6859 #ifdef ATH_SUPPORT_NAC 6860 mon_ops->mon_set_filter_neigh_peers = NULL; 6861 #endif 6862 #ifdef WLAN_ATF_ENABLE 6863 mon_ops->mon_set_atf_stats_enable = NULL; 6864 #endif 6865 #ifdef FEATURE_NAC_RSSI 6866 mon_ops->mon_filter_neighbour_peer = NULL; 6867 #endif 6868 #ifdef QCA_MCOPY_SUPPORT 6869 mon_ops->mon_filter_setup_mcopy_mode = NULL; 6870 mon_ops->mon_filter_reset_mcopy_mode = NULL; 6871 mon_ops->mon_mcopy_check_deliver = NULL; 6872 #endif 6873 #ifdef QCA_ENHANCED_STATS_SUPPORT 6874 mon_ops->mon_filter_setup_enhanced_stats = NULL; 6875 mon_ops->mon_tx_enable_enhanced_stats = NULL; 6876 mon_ops->mon_tx_disable_enhanced_stats = NULL; 6877 mon_ops->mon_ppdu_desc_deliver = NULL; 6878 mon_ops->mon_ppdu_desc_notify = NULL; 6879 mon_ops->mon_ppdu_stats_feat_enable_check = NULL; 6880 #ifdef WLAN_FEATURE_11BE 6881 mon_ops->mon_tx_stats_update = NULL; 6882 #endif 6883 #endif 6884 #if defined(ATH_SUPPORT_NAC_RSSI) || defined(ATH_SUPPORT_NAC) 6885 mon_ops->mon_filter_setup_smart_monitor = NULL; 6886 #endif 6887 mon_ops->mon_filter_set_reset_mon_mac_filter = NULL; 6888 #ifdef WLAN_RX_PKT_CAPTURE_ENH 6889 mon_ops->mon_filter_setup_rx_enh_capture = NULL; 6890 #endif 6891 #ifdef WDI_EVENT_ENABLE 6892 mon_ops->mon_set_pktlog_wifi3 = NULL; 6893 mon_ops->mon_filter_setup_rx_pkt_log_full = NULL; 6894 mon_ops->mon_filter_reset_rx_pkt_log_full = NULL; 6895 mon_ops->mon_filter_setup_rx_pkt_log_lite = NULL; 6896 mon_ops->mon_filter_reset_rx_pkt_log_lite = NULL; 6897 mon_ops->mon_filter_setup_rx_pkt_log_cbf = NULL; 6898 mon_ops->mon_filter_reset_rx_pkt_log_cbf = NULL; 6899 #ifdef BE_PKTLOG_SUPPORT 6900 mon_ops->mon_filter_setup_pktlog_hybrid = NULL; 6901 mon_ops->mon_filter_reset_pktlog_hybrid = NULL; 6902 #endif 6903 #endif 6904 #if defined(DP_CON_MON) && !defined(REMOVE_PKT_LOG) 6905 mon_ops->mon_pktlogmod_exit = NULL; 6906 #endif 6907 mon_ops->rx_hdr_length_set = NULL; 6908 mon_ops->rx_packet_length_set = NULL; 6909 mon_ops->rx_wmask_subscribe = NULL; 6910 mon_ops->rx_pkt_tlv_offset = NULL; 6911 mon_ops->rx_enable_mpdu_logging = NULL; 6912 mon_ops->rx_enable_fpmo = NULL; 6913 mon_ops->mon_neighbour_peers_detach = NULL; 6914 mon_ops->mon_vdev_set_monitor_mode_buf_rings = NULL; 6915 mon_ops->mon_vdev_set_monitor_mode_rings = NULL; 6916 #ifdef QCA_ENHANCED_STATS_SUPPORT 6917 mon_ops->mon_rx_stats_update = NULL; 6918 mon_ops->mon_rx_populate_ppdu_usr_info = NULL; 6919 mon_ops->mon_rx_populate_ppdu_info = NULL; 6920 #endif 6921 } 6922 6923 QDF_STATUS dp_mon_soc_attach(struct dp_soc *soc) 6924 { 6925 struct dp_mon_soc *mon_soc; 6926 qdf_size_t soc_context_size; 6927 6928 if (!soc) { 6929 dp_mon_err("dp_soc is NULL"); 6930 return QDF_STATUS_E_FAILURE; 6931 } 6932 6933 if (soc->arch_ops.txrx_get_mon_context_size) { 6934 soc_context_size = soc->arch_ops.txrx_get_mon_context_size(DP_CONTEXT_TYPE_MON_SOC); 6935 mon_soc = dp_context_alloc_mem(soc, DP_MON_SOC_TYPE, 6936 soc_context_size); 6937 } else { 6938 mon_soc = (struct dp_mon_soc *)qdf_mem_malloc(sizeof(*mon_soc)); 6939 } 6940 if (!mon_soc) { 6941 dp_mon_err("%pK: mem allocation failed", soc); 6942 return QDF_STATUS_E_NOMEM; 6943 } 6944 /* register monitor ops */ 6945 soc->monitor_soc = mon_soc; 6946 dp_mon_ops_register(soc); 6947 dp_mon_register_intr_ops(soc); 6948 6949 dp_mon_cdp_ops_register(soc); 6950 dp_monitor_soc_attach(soc); 6951 dp_mon_register_feature_ops(soc); 6952 return QDF_STATUS_SUCCESS; 6953 } 6954 6955 QDF_STATUS dp_mon_soc_detach(struct dp_soc *soc) 6956 { 6957 struct dp_mon_soc *mon_soc; 6958 6959 if (!soc) { 6960 dp_mon_err("dp_soc is NULL"); 6961 return QDF_STATUS_E_FAILURE; 6962 } 6963 6964 mon_soc = soc->monitor_soc; 6965 dp_monitor_vdev_timer_deinit(soc); 6966 dp_mon_cdp_ops_deregister(soc); 6967 dp_monitor_soc_detach(soc); 6968 soc->monitor_soc = NULL; 6969 qdf_mem_free(mon_soc); 6970 return QDF_STATUS_SUCCESS; 6971 } 6972 6973 #ifdef QCA_SUPPORT_FULL_MON 6974 static void print_ring_tracker_stats(struct dp_mon_pdev *mon_pdev, 6975 uint8_t target) 6976 { 6977 struct dp_ring_ppdu_id_tracker *tracker; 6978 uint8_t i; 6979 6980 if (target) 6981 tracker = mon_pdev->hist_ppdu_id_mon_s; 6982 else 6983 tracker = mon_pdev->hist_ppdu_id_mon_d; 6984 6985 for (i = 0; i < DP_HIST_TRACK_SIZE; i++) { 6986 qdf_print("idx: %d dest_ppdu_id: %d dest_time: %lld d_hp: %d ", 6987 i, tracker[i].ppdu_id_mon_dest, 6988 tracker[i].time_ppdu_id_mon_dest, 6989 tracker[i].dest_hp); 6990 qdf_print("d_tp: %d d_hw_hp: %d d_hw_tp: %d status_ppdu_id: %d", 6991 tracker[i].dest_tp, 6992 tracker[i].dest_hw_hp, 6993 tracker[i].dest_hw_tp, 6994 tracker[i].ppdu_id_mon_status); 6995 qdf_print(" status_time: %lld s_hp: %d s_tp: %d s_hw_hp: %d ", 6996 tracker[i].time_ppdu_id_mon_status, 6997 tracker[i].status_hp, 6998 tracker[i].status_tp, 6999 tracker[i].status_hw_hp); 7000 qdf_print("s_hw_tp: %d\n", 7001 tracker[i].status_hw_tp); 7002 } 7003 } 7004 #else 7005 static void print_ring_tracker_stats(struct dp_mon_pdev *mon_pdev, 7006 uint8_t target) 7007 { 7008 } 7009 #endif 7010 7011 void 7012 dp_check_and_dump_full_mon_info(struct dp_soc *soc, struct dp_pdev *pdev, 7013 int mac_id, int war) 7014 { 7015 struct dp_mon_soc *mon_soc = soc->monitor_soc; 7016 struct dp_mon_pdev *mon_pdev; 7017 hal_soc_handle_t hal_soc; 7018 uint64_t buf_addr; 7019 void *mon_status_srng; 7020 void *rxdma_mon_status_ring_entry; 7021 struct hal_buf_info hbi; 7022 hal_ring_handle_t mon_dest_srng; 7023 void *ring_desc; 7024 struct hal_rx_mon_desc_info desc_info = {0}; 7025 struct dp_rx_desc *rx_desc; 7026 uint64_t ppdu_id = 0; 7027 7028 if (!mon_soc) { 7029 dp_err("Monitor soc is NULL\n"); 7030 return; 7031 } 7032 7033 if (!mon_soc->full_mon_mode) { 7034 dp_err("Full monitor mode is disable\n"); 7035 return; 7036 } 7037 7038 /** 7039 * As rx_mon_ring_mask is set but workdone is 0 7040 * there is a more chance backpressure can happen. 7041 * dump the content of rx monitor status and destination ring 7042 * and move to next pointers. 7043 */ 7044 mon_pdev = pdev->monitor_pdev; 7045 if (!mon_pdev) { 7046 dp_err("mon_pdev is NULL\n"); 7047 return; 7048 } 7049 7050 hal_soc = soc->hal_soc; 7051 7052 if (!war) 7053 qdf_spin_lock_bh(&mon_pdev->mon_lock); 7054 7055 mon_status_srng = soc->rxdma_mon_status_ring[mac_id].hal_srng; 7056 if (!mon_status_srng) 7057 goto unlock_monitor; 7058 7059 dp_print_ring_stat_from_hal(soc, &soc->rxdma_mon_status_ring[mac_id], 7060 RXDMA_MONITOR_STATUS); 7061 rxdma_mon_status_ring_entry = 7062 hal_srng_src_peek_n_get_next(hal_soc, mon_status_srng); 7063 7064 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG, 7065 "hold_mon_dest_ring: %d\n", mon_pdev->hold_mon_dest_ring); 7066 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG, 7067 "mon_pdev last_ppdu_id: %d\n", mon_pdev->last_ppdu_id); 7068 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG, 7069 "soc: %d\n", hal_get_target_type(hal_soc)); 7070 7071 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG, 7072 "reap_status:\n"); 7073 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG, 7074 "\t DP_MON_STATUS_NO_DMA : %lld\n", 7075 mon_pdev->reap_status[DP_MON_STATUS_NO_DMA]); 7076 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG, 7077 "\t DP_MON_STATUS_MATCH : %lld\n", 7078 mon_pdev->reap_status[DP_MON_STATUS_MATCH]); 7079 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG, 7080 "\t DP_MON_STATUS_LAG : %lld\n", 7081 mon_pdev->reap_status[DP_MON_STATUS_LAG]); 7082 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG, 7083 "\t DP_MON_STATUS_LEAD : %lld\n", 7084 mon_pdev->reap_status[DP_MON_STATUS_LEAD]); 7085 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG, 7086 "\t DP_MON_STATUS_REPLENISH : %lld\n", 7087 mon_pdev->reap_status[DP_MON_STATUS_REPLENISH]); 7088 7089 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG, 7090 "prev_status:\n"); 7091 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG, 7092 "\t DP_MON_STATUS_NO_DMA : %lld\n", 7093 mon_pdev->prev_status[DP_MON_STATUS_NO_DMA]); 7094 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG, 7095 "\t DP_MON_STATUS_MATCH : %lld\n", 7096 mon_pdev->prev_status[DP_MON_STATUS_MATCH]); 7097 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG, 7098 "\t DP_MON_STATUS_LAG : %lld\n", 7099 mon_pdev->prev_status[DP_MON_STATUS_LAG]); 7100 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG, 7101 "\t DP_MON_STATUS_LEAD : %lld\n", 7102 mon_pdev->prev_status[DP_MON_STATUS_LEAD]); 7103 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG, 7104 "\t DP_MON_STATUS_REPLENISH : %lld\n", 7105 mon_pdev->prev_status[DP_MON_STATUS_REPLENISH]); 7106 7107 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG, 7108 "match_stats:\n"); 7109 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG, 7110 "\t DP_MON_STATUS_LAG : %lld\n", 7111 mon_pdev->status_match[DP_MON_STATUS_LAG]); 7112 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG, 7113 "\t DP_MON_STATUS_LEAD : %lld\n", 7114 mon_pdev->status_match[DP_MON_STATUS_LEAD]); 7115 7116 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG, 7117 "mismatch: %d\n", 7118 mon_pdev->rx_mon_stats.ppdu_id_mismatch); 7119 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG, 7120 "status_ppdu_drop: %d\n", 7121 mon_pdev->rx_mon_stats.status_ppdu_drop); 7122 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG, 7123 "dest_ppdu_drop: %d\n", 7124 mon_pdev->rx_mon_stats.dest_ppdu_drop); 7125 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG, 7126 "tlv_tag_status_err: %d\n", 7127 mon_pdev->rx_mon_stats.tlv_tag_status_err); 7128 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG, 7129 "status_buf_done_war: %d\n", 7130 mon_pdev->rx_mon_stats.status_buf_done_war); 7131 7132 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG, 7133 "soc[%pK] pdev[%pK] mac_id[%d]\n", 7134 soc, pdev, mac_id); 7135 7136 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG, 7137 "MON DEST TRACKER STATS:\n"); 7138 print_ring_tracker_stats(mon_pdev, 0); 7139 7140 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG, 7141 "MON STA TRACKER STATS:\n"); 7142 print_ring_tracker_stats(mon_pdev, 1); 7143 7144 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG, 7145 "rxdma_mon_status_ring:\n"); 7146 if (!rxdma_mon_status_ring_entry) { 7147 dp_err("rxdma_mon_status_ring_entry NULL\n"); 7148 goto dump_mon_destination_ring; 7149 } 7150 7151 buf_addr = 7152 (HAL_RX_BUFFER_ADDR_31_0_GET(rxdma_mon_status_ring_entry) | 7153 ((uint64_t) 7154 (HAL_RX_BUFFER_ADDR_39_32_GET(rxdma_mon_status_ring_entry)) 7155 << 32)); 7156 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG, 7157 "Buffer address : %llx\n", buf_addr); 7158 if (!buf_addr) 7159 goto dump_mon_destination_ring; 7160 7161 hal_rx_buf_cookie_rbm_get(soc->hal_soc, 7162 (uint32_t *)rxdma_mon_status_ring_entry, 7163 &hbi); 7164 7165 print_hex_dump(KERN_ERR, "\tHAL_BUF_INFO: ", DUMP_PREFIX_NONE, 32, 4, 7166 &hbi, sizeof(struct hal_buf_info), false); 7167 7168 rx_desc = dp_rx_cookie_2_va_mon_status(soc, hbi.sw_cookie); 7169 if (!rx_desc) { 7170 dp_err("rx_desc is NULL\n"); 7171 goto dump_mon_destination_ring; 7172 } 7173 7174 print_hex_dump(KERN_ERR, "\tRX_DESC: ", DUMP_PREFIX_NONE, 32, 4, 7175 rx_desc, sizeof(struct dp_rx_desc), false); 7176 7177 dump_mon_destination_ring: 7178 7179 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG, 7180 "rxdma_mon_destination_ring:\n"); 7181 mon_dest_srng = pdev->soc->rxdma_mon_dst_ring[mac_id].hal_srng; 7182 7183 if (!mon_dest_srng) { 7184 dp_err("rxdma_mon_dst_ring hal_srng is NULL\n"); 7185 goto unlock_monitor; 7186 } 7187 7188 dp_print_ring_stat_from_hal(soc, &soc->rxdma_mon_dst_ring[mac_id], 7189 RXDMA_MONITOR_DST); 7190 7191 ring_desc = hal_srng_dst_peek(hal_soc, mon_dest_srng); 7192 if (!ring_desc) 7193 goto unlock_monitor; 7194 7195 ppdu_id = hal_rx_hw_desc_get_ppduid_get(hal_soc, NULL, ring_desc); 7196 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG, 7197 "Next dest ring ppdu id: %lld\n", ppdu_id); 7198 hal_rx_sw_mon_desc_info_get((struct hal_soc *)soc->hal_soc, 7199 ring_desc, &desc_info); 7200 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG, 7201 "Next desc_info ppdu_id: %d\n", desc_info.ppdu_id); 7202 7203 print_hex_dump(KERN_ERR, "\tDESC_INFO: ", DUMP_PREFIX_NONE, 32, 4, 7204 &desc_info, sizeof(struct hal_rx_mon_desc_info), false); 7205 7206 unlock_monitor: 7207 if (!war) 7208 qdf_spin_unlock_bh(&mon_pdev->mon_lock); 7209 } 7210