1 /* 2 * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved. 3 * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. 4 * 5 * Permission to use, copy, modify, and/or distribute this software for any 6 * purpose with or without fee is hereby granted, provided that the above 7 * copyright notice and this permission notice appear in all copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 16 */ 17 #include <dp_types.h> 18 #include "dp_rx.h" 19 #include "dp_peer.h" 20 #include <dp_htt.h> 21 #include <dp_mon_filter.h> 22 #include <dp_htt.h> 23 #include <dp_mon.h> 24 #include <dp_rx_mon.h> 25 #include <dp_internal.h> 26 #include "htt_ppdu_stats.h" 27 #include "dp_cal_client_api.h" 28 #if defined(DP_CON_MON) 29 #ifndef REMOVE_PKT_LOG 30 #include <pktlog_ac_api.h> 31 #include <pktlog_ac.h> 32 #endif 33 #endif 34 #ifdef FEATURE_PERPKT_INFO 35 #include "dp_ratetable.h" 36 #endif 37 #ifdef QCA_SUPPORT_LITE_MONITOR 38 #include "dp_lite_mon.h" 39 #endif 40 41 #define DP_INTR_POLL_TIMER_MS 5 42 #define INVALID_FREE_BUFF 0xffffffff 43 44 #ifdef WLAN_RX_PKT_CAPTURE_ENH 45 #include "dp_rx_mon_feature.h" 46 #endif /* WLAN_RX_PKT_CAPTURE_ENH */ 47 48 #ifdef QCA_UNDECODED_METADATA_SUPPORT 49 #define MAX_STRING_LEN_PER_FIELD 6 50 #define DP_UNDECODED_ERR_LENGTH (MAX_STRING_LEN_PER_FIELD * CDP_PHYRX_ERR_MAX) 51 #endif 52 53 #ifdef QCA_MCOPY_SUPPORT 54 static inline void 55 dp_pdev_disable_mcopy_code(struct dp_pdev *pdev) 56 { 57 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 58 59 mon_pdev->mcopy_mode = M_COPY_DISABLED; 60 mon_pdev->mvdev = NULL; 61 } 62 63 static inline void 64 dp_reset_mcopy_mode(struct dp_pdev *pdev) 65 { 66 QDF_STATUS status = QDF_STATUS_SUCCESS; 67 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 68 struct cdp_mon_ops *cdp_ops; 69 70 if (mon_pdev->mcopy_mode) { 71 cdp_ops = dp_mon_cdp_ops_get(pdev->soc); 72 if (cdp_ops && cdp_ops->config_full_mon_mode) 73 cdp_ops->soc_config_full_mon_mode((struct cdp_pdev *)pdev, 74 DP_FULL_MON_ENABLE); 75 dp_pdev_disable_mcopy_code(pdev); 76 dp_mon_filter_reset_mcopy_mode(pdev); 77 status = dp_mon_filter_update(pdev); 78 if (status != QDF_STATUS_SUCCESS) { 79 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 80 FL("Failed to reset AM copy mode filters")); 81 } 82 mon_pdev->monitor_configured = false; 83 } 84 } 85 86 static QDF_STATUS 87 dp_config_mcopy_mode(struct dp_pdev *pdev, int val) 88 { 89 QDF_STATUS status = QDF_STATUS_SUCCESS; 90 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 91 struct dp_mon_ops *mon_ops; 92 struct cdp_mon_ops *cdp_ops; 93 94 if (mon_pdev->mvdev) 95 return QDF_STATUS_E_RESOURCES; 96 97 mon_pdev->mcopy_mode = val; 98 mon_pdev->tx_sniffer_enable = 0; 99 mon_pdev->monitor_configured = true; 100 101 mon_ops = dp_mon_ops_get(pdev->soc); 102 if (!wlan_cfg_is_delay_mon_replenish(pdev->soc->wlan_cfg_ctx)) { 103 if (mon_ops && mon_ops->mon_vdev_set_monitor_mode_rings) 104 mon_ops->mon_vdev_set_monitor_mode_rings(pdev, true); 105 } 106 107 /* 108 * Setup the M copy mode filter. 109 */ 110 cdp_ops = dp_mon_cdp_ops_get(pdev->soc); 111 if (cdp_ops && cdp_ops->config_full_mon_mode) 112 cdp_ops->soc_config_full_mon_mode((struct cdp_pdev *)pdev, 113 DP_FULL_MON_ENABLE); 114 dp_mon_filter_setup_mcopy_mode(pdev); 115 status = dp_mon_filter_update(pdev); 116 if (status != QDF_STATUS_SUCCESS) { 117 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 118 FL("Failed to set M_copy mode filters")); 119 dp_mon_filter_reset_mcopy_mode(pdev); 120 dp_pdev_disable_mcopy_code(pdev); 121 return status; 122 } 123 124 if (!mon_pdev->pktlog_ppdu_stats) 125 dp_h2t_cfg_stats_msg_send(pdev, 126 DP_PPDU_STATS_CFG_SNIFFER, 127 pdev->pdev_id); 128 129 return status; 130 } 131 #else 132 static inline void 133 dp_reset_mcopy_mode(struct dp_pdev *pdev) 134 { 135 } 136 137 static inline QDF_STATUS 138 dp_config_mcopy_mode(struct dp_pdev *pdev, int val) 139 { 140 return QDF_STATUS_E_INVAL; 141 } 142 #endif /* QCA_MCOPY_SUPPORT */ 143 144 #ifdef QCA_UNDECODED_METADATA_SUPPORT 145 static QDF_STATUS 146 dp_reset_undecoded_metadata_capture(struct dp_pdev *pdev) 147 { 148 QDF_STATUS status = QDF_STATUS_SUCCESS; 149 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 150 151 if (mon_pdev->undecoded_metadata_capture) { 152 dp_mon_filter_reset_undecoded_metadata_mode(pdev); 153 status = dp_mon_filter_update(pdev); 154 if (status != QDF_STATUS_SUCCESS) { 155 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 156 FL("Undecoded capture filter reset failed")); 157 } 158 } 159 mon_pdev->undecoded_metadata_capture = 0; 160 return status; 161 } 162 163 static QDF_STATUS 164 dp_enable_undecoded_metadata_capture(struct dp_pdev *pdev, int val) 165 { 166 QDF_STATUS status = QDF_STATUS_SUCCESS; 167 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 168 struct dp_mon_ops *mon_ops; 169 170 if (!mon_pdev->mvdev) { 171 qdf_err("monitor_pdev is NULL"); 172 return QDF_STATUS_E_RESOURCES; 173 } 174 175 mon_pdev->undecoded_metadata_capture = val; 176 mon_pdev->monitor_configured = true; 177 178 mon_ops = dp_mon_ops_get(pdev->soc); 179 180 /* Setup the undecoded metadata capture mode filter. */ 181 dp_mon_filter_setup_undecoded_metadata_mode(pdev); 182 status = dp_mon_filter_update(pdev); 183 if (status != QDF_STATUS_SUCCESS) { 184 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 185 FL("Failed to set Undecoded capture filters")); 186 dp_mon_filter_reset_undecoded_metadata_mode(pdev); 187 return status; 188 } 189 190 return status; 191 } 192 #else 193 static inline QDF_STATUS 194 dp_reset_undecoded_metadata_capture(struct dp_pdev *pdev) 195 { 196 return QDF_STATUS_E_INVAL; 197 } 198 199 static inline QDF_STATUS 200 dp_enable_undecoded_metadata_capture(struct dp_pdev *pdev, int val) 201 { 202 return QDF_STATUS_E_INVAL; 203 } 204 #endif /* QCA_UNDECODED_METADATA_SUPPORT */ 205 206 QDF_STATUS dp_reset_monitor_mode(struct cdp_soc_t *soc_hdl, 207 uint8_t pdev_id, 208 uint8_t special_monitor) 209 { 210 struct dp_soc *soc = (struct dp_soc *)soc_hdl; 211 struct dp_pdev *pdev = 212 dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc, 213 pdev_id); 214 QDF_STATUS status = QDF_STATUS_SUCCESS; 215 struct dp_mon_pdev *mon_pdev; 216 struct cdp_mon_ops *cdp_ops; 217 218 if (!pdev) 219 return QDF_STATUS_E_FAILURE; 220 221 mon_pdev = pdev->monitor_pdev; 222 223 qdf_spin_lock_bh(&mon_pdev->mon_lock); 224 225 cdp_ops = dp_mon_cdp_ops_get(soc); 226 if (cdp_ops && cdp_ops->soc_config_full_mon_mode) 227 cdp_ops->soc_config_full_mon_mode((struct cdp_pdev *)pdev, 228 DP_FULL_MON_DISABLE); 229 mon_pdev->mvdev = NULL; 230 231 /* 232 * Lite monitor mode, smart monitor mode and monitor 233 * mode uses this APIs to filter reset and mode disable 234 */ 235 if (mon_pdev->mcopy_mode) { 236 #if defined(QCA_MCOPY_SUPPORT) 237 dp_pdev_disable_mcopy_code(pdev); 238 dp_mon_filter_reset_mcopy_mode(pdev); 239 #endif /* QCA_MCOPY_SUPPORT */ 240 } else if (special_monitor) { 241 #if defined(ATH_SUPPORT_NAC) 242 dp_mon_filter_reset_smart_monitor(pdev); 243 #endif /* ATH_SUPPORT_NAC */ 244 /* for mon 2.0 we make use of lite mon to 245 * set filters for smart monitor use case. 246 */ 247 dp_monitor_lite_mon_disable_rx(pdev); 248 } else if (mon_pdev->undecoded_metadata_capture) { 249 #ifdef QCA_UNDECODED_METADATA_SUPPORT 250 dp_reset_undecoded_metadata_capture(pdev); 251 #endif 252 } else { 253 dp_mon_filter_reset_mon_mode(pdev); 254 } 255 status = dp_mon_filter_update(pdev); 256 if (status != QDF_STATUS_SUCCESS) { 257 dp_rx_mon_dest_err("%pK: Failed to reset monitor filters", 258 soc); 259 } 260 261 mon_pdev->monitor_configured = false; 262 263 qdf_spin_unlock_bh(&mon_pdev->mon_lock); 264 return QDF_STATUS_SUCCESS; 265 } 266 267 #ifdef QCA_ADVANCE_MON_FILTER_SUPPORT 268 QDF_STATUS 269 dp_pdev_set_advance_monitor_filter(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, 270 struct cdp_monitor_filter *filter_val) 271 { 272 /* Many monitor VAPs can exists in a system but only one can be up at 273 * anytime 274 */ 275 struct dp_soc *soc = (struct dp_soc *)soc_hdl; 276 struct dp_vdev *vdev; 277 struct dp_pdev *pdev = 278 dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc, 279 pdev_id); 280 QDF_STATUS status = QDF_STATUS_SUCCESS; 281 struct dp_mon_pdev *mon_pdev; 282 283 if (!pdev || !pdev->monitor_pdev) 284 return QDF_STATUS_E_FAILURE; 285 286 mon_pdev = pdev->monitor_pdev; 287 vdev = mon_pdev->mvdev; 288 289 if (!vdev) 290 return QDF_STATUS_E_FAILURE; 291 292 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN, 293 "pdev=%pK, pdev_id=%d, soc=%pK vdev=%pK", 294 pdev, pdev_id, soc, vdev); 295 296 /*Check if current pdev's monitor_vdev exists */ 297 if (!mon_pdev->mvdev) { 298 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 299 "vdev=%pK", vdev); 300 qdf_assert(vdev); 301 } 302 303 /* update filter mode, type in pdev structure */ 304 mon_pdev->mon_filter_mode = filter_val->mode; 305 mon_pdev->fp_mgmt_filter = filter_val->fp_mgmt; 306 mon_pdev->fp_ctrl_filter = filter_val->fp_ctrl; 307 mon_pdev->fp_data_filter = filter_val->fp_data; 308 mon_pdev->mo_mgmt_filter = filter_val->mo_mgmt; 309 mon_pdev->mo_ctrl_filter = filter_val->mo_ctrl; 310 mon_pdev->mo_data_filter = filter_val->mo_data; 311 312 dp_mon_filter_setup_mon_mode(pdev); 313 status = dp_mon_filter_update(pdev); 314 if (status != QDF_STATUS_SUCCESS) { 315 dp_rx_mon_dest_err("%pK: Failed to set filter for adv mon mode", 316 soc); 317 dp_mon_filter_reset_mon_mode(pdev); 318 } 319 320 return status; 321 } 322 #endif 323 324 QDF_STATUS 325 dp_deliver_tx_mgmt(struct cdp_soc_t *cdp_soc, uint8_t pdev_id, qdf_nbuf_t nbuf) 326 { 327 struct dp_pdev *pdev = 328 dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)cdp_soc, 329 pdev_id); 330 331 if (!pdev) 332 return QDF_STATUS_E_FAILURE; 333 334 dp_deliver_mgmt_frm(pdev, nbuf); 335 336 return QDF_STATUS_SUCCESS; 337 } 338 339 #ifdef QCA_SUPPORT_SCAN_SPCL_VAP_STATS 340 /** 341 * dp_scan_spcl_vap_stats_attach() - alloc spcl vap stats struct 342 * @mon_vdev: Datapath mon VDEV handle 343 * 344 * Return: 0 on success, not 0 on failure 345 */ 346 static inline QDF_STATUS 347 dp_scan_spcl_vap_stats_attach(struct dp_mon_vdev *mon_vdev) 348 { 349 mon_vdev->scan_spcl_vap_stats = 350 qdf_mem_malloc(sizeof(struct cdp_scan_spcl_vap_stats)); 351 352 if (!mon_vdev->scan_spcl_vap_stats) { 353 dp_mon_err("scan spcl vap stats attach fail"); 354 return QDF_STATUS_E_NOMEM; 355 } 356 357 return QDF_STATUS_SUCCESS; 358 } 359 360 /** 361 * dp_scan_spcl_vap_stats_detach() - free spcl vap stats struct 362 * @mon_vdev: Datapath mon VDEV handle 363 * 364 * Return: void 365 */ 366 static inline void 367 dp_scan_spcl_vap_stats_detach(struct dp_mon_vdev *mon_vdev) 368 { 369 if (mon_vdev->scan_spcl_vap_stats) { 370 qdf_mem_free(mon_vdev->scan_spcl_vap_stats); 371 mon_vdev->scan_spcl_vap_stats = NULL; 372 } 373 } 374 375 /** 376 * dp_reset_scan_spcl_vap_stats() - reset spcl vap rx stats 377 * @vdev: Datapath VDEV handle 378 * 379 * Return: void 380 */ 381 static inline void 382 dp_reset_scan_spcl_vap_stats(struct dp_vdev *vdev) 383 { 384 struct dp_mon_vdev *mon_vdev; 385 struct dp_mon_pdev *mon_pdev; 386 387 mon_pdev = vdev->pdev->monitor_pdev; 388 if (!mon_pdev || !mon_pdev->reset_scan_spcl_vap_stats_enable) 389 return; 390 391 mon_vdev = vdev->monitor_vdev; 392 if (!mon_vdev || !mon_vdev->scan_spcl_vap_stats) 393 return; 394 395 qdf_mem_zero(mon_vdev->scan_spcl_vap_stats, 396 sizeof(struct cdp_scan_spcl_vap_stats)); 397 } 398 399 /** 400 * dp_get_scan_spcl_vap_stats() - get spcl vap rx stats 401 * @soc_hdl: Datapath soc handle 402 * @vdev_id: vdev id 403 * @stats: structure to hold spcl vap stats 404 * 405 * Return: 0 on success, not 0 on failure 406 */ 407 static QDF_STATUS 408 dp_get_scan_spcl_vap_stats(struct cdp_soc_t *soc_hdl, uint8_t vdev_id, 409 struct cdp_scan_spcl_vap_stats *stats) 410 { 411 struct dp_mon_vdev *mon_vdev = NULL; 412 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 413 struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id, 414 DP_MOD_ID_CDP); 415 416 if (!vdev || !stats) { 417 if (vdev) 418 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP); 419 return QDF_STATUS_E_INVAL; 420 } 421 422 mon_vdev = vdev->monitor_vdev; 423 if (!mon_vdev || !mon_vdev->scan_spcl_vap_stats) { 424 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP); 425 return QDF_STATUS_E_INVAL; 426 } 427 428 qdf_mem_copy(stats, mon_vdev->scan_spcl_vap_stats, 429 sizeof(struct cdp_scan_spcl_vap_stats)); 430 431 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP); 432 return QDF_STATUS_SUCCESS; 433 } 434 #else 435 static inline void 436 dp_reset_scan_spcl_vap_stats(struct dp_vdev *vdev) 437 { 438 } 439 440 static inline QDF_STATUS 441 dp_scan_spcl_vap_stats_attach(struct dp_mon_vdev *mon_vdev) 442 { 443 return QDF_STATUS_SUCCESS; 444 } 445 446 static inline void 447 dp_scan_spcl_vap_stats_detach(struct dp_mon_vdev *mon_vdev) 448 { 449 } 450 #endif 451 452 /** 453 * dp_vdev_set_monitor_mode() - Set DP VDEV to monitor mode 454 * @vdev_handle: Datapath VDEV handle 455 * @smart_monitor: Flag to denote if its smart monitor mode 456 * 457 * Return: 0 on success, not 0 on failure 458 */ 459 static QDF_STATUS dp_vdev_set_monitor_mode(struct cdp_soc_t *dp_soc, 460 uint8_t vdev_id, 461 uint8_t special_monitor) 462 { 463 struct dp_soc *soc = (struct dp_soc *)dp_soc; 464 struct dp_pdev *pdev; 465 struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id, 466 DP_MOD_ID_CDP); 467 QDF_STATUS status = QDF_STATUS_SUCCESS; 468 struct dp_mon_pdev *mon_pdev; 469 struct cdp_mon_ops *cdp_ops; 470 471 if (!vdev) 472 return QDF_STATUS_E_FAILURE; 473 474 pdev = vdev->pdev; 475 476 if (!pdev || !pdev->monitor_pdev) 477 return QDF_STATUS_E_FAILURE; 478 479 mon_pdev = pdev->monitor_pdev; 480 481 mon_pdev->mvdev = vdev; 482 483 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN, 484 "pdev=%pK, pdev_id=%d, soc=%pK vdev=%pK\n", 485 pdev, pdev->pdev_id, pdev->soc, vdev); 486 487 /* 488 * do not configure monitor buf ring and filter for smart and 489 * lite monitor 490 * for smart monitor filters are added along with first NAC 491 * for lite monitor required configuration done through 492 * dp_set_pdev_param 493 */ 494 495 if (special_monitor) { 496 status = QDF_STATUS_SUCCESS; 497 goto fail; 498 } 499 500 if (mon_pdev->scan_spcl_vap_configured) 501 dp_reset_scan_spcl_vap_stats(vdev); 502 503 /*Check if current pdev's monitor_vdev exists */ 504 if (mon_pdev->monitor_configured) { 505 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG, 506 "monitor vap already created vdev=%pK\n", vdev); 507 status = QDF_STATUS_E_RESOURCES; 508 goto fail; 509 } 510 511 mon_pdev->monitor_configured = true; 512 513 /* disable lite mon if configured, monitor vap takes 514 * priority over lite mon when its created. Lite mon 515 * can be configured later again. 516 */ 517 dp_monitor_lite_mon_disable_rx(pdev); 518 519 cdp_ops = dp_mon_cdp_ops_get(soc); 520 if (cdp_ops && cdp_ops->soc_config_full_mon_mode) 521 cdp_ops->soc_config_full_mon_mode((struct cdp_pdev *)pdev, 522 DP_FULL_MON_ENABLE); 523 dp_mon_filter_setup_mon_mode(pdev); 524 status = dp_mon_filter_update(pdev); 525 if (status != QDF_STATUS_SUCCESS) { 526 dp_cdp_err("%pK: Failed to reset monitor filters", soc); 527 dp_mon_filter_reset_mon_mode(pdev); 528 mon_pdev->monitor_configured = false; 529 mon_pdev->mvdev = NULL; 530 } 531 532 fail: 533 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP); 534 return status; 535 } 536 537 #ifdef QCA_TX_CAPTURE_SUPPORT 538 static QDF_STATUS 539 dp_config_tx_capture_mode(struct dp_pdev *pdev) 540 { 541 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 542 543 mon_pdev->tx_sniffer_enable = 1; 544 mon_pdev->monitor_configured = false; 545 546 if (!mon_pdev->pktlog_ppdu_stats) 547 dp_h2t_cfg_stats_msg_send(pdev, 548 DP_PPDU_STATS_CFG_SNIFFER, 549 pdev->pdev_id); 550 551 return QDF_STATUS_SUCCESS; 552 } 553 #else 554 #ifdef QCA_MCOPY_SUPPORT 555 static QDF_STATUS 556 dp_config_tx_capture_mode(struct dp_pdev *pdev) 557 { 558 return QDF_STATUS_E_INVAL; 559 } 560 #endif 561 #endif 562 563 #if defined(QCA_MCOPY_SUPPORT) || defined(QCA_TX_CAPTURE_SUPPORT) 564 QDF_STATUS 565 dp_config_debug_sniffer(struct dp_pdev *pdev, int val) 566 { 567 QDF_STATUS status = QDF_STATUS_SUCCESS; 568 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 569 570 /* 571 * Note: The mirror copy mode cannot co-exist with any other 572 * monitor modes. Hence disabling the filter for this mode will 573 * reset the monitor destination ring filters. 574 */ 575 dp_reset_mcopy_mode(pdev); 576 switch (val) { 577 case 0: 578 mon_pdev->tx_sniffer_enable = 0; 579 mon_pdev->monitor_configured = false; 580 581 /* 582 * We don't need to reset the Rx monitor status ring or call 583 * the API dp_ppdu_ring_reset() if all debug sniffer mode is 584 * disabled. The Rx monitor status ring will be disabled when 585 * the last mode using the monitor status ring get disabled. 586 */ 587 if (!mon_pdev->pktlog_ppdu_stats && 588 !mon_pdev->enhanced_stats_en && 589 !mon_pdev->bpr_enable) { 590 dp_h2t_cfg_stats_msg_send(pdev, 0, pdev->pdev_id); 591 } else if (mon_pdev->enhanced_stats_en && 592 !mon_pdev->bpr_enable) { 593 dp_h2t_cfg_stats_msg_send(pdev, 594 DP_PPDU_STATS_CFG_ENH_STATS, 595 pdev->pdev_id); 596 } else if (!mon_pdev->enhanced_stats_en && 597 mon_pdev->bpr_enable) { 598 dp_h2t_cfg_stats_msg_send(pdev, 599 DP_PPDU_STATS_CFG_BPR_ENH, 600 pdev->pdev_id); 601 } else { 602 dp_h2t_cfg_stats_msg_send(pdev, 603 DP_PPDU_STATS_CFG_BPR, 604 pdev->pdev_id); 605 } 606 break; 607 608 case 1: 609 status = dp_config_tx_capture_mode(pdev); 610 break; 611 case 2: 612 case 4: 613 status = dp_config_mcopy_mode(pdev, val); 614 break; 615 616 default: 617 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 618 "Invalid value, mode not supported"); 619 status = QDF_STATUS_E_INVAL; 620 break; 621 } 622 return status; 623 } 624 #endif 625 626 #ifdef QCA_UNDECODED_METADATA_SUPPORT 627 QDF_STATUS 628 dp_mon_config_undecoded_metadata_capture(struct dp_pdev *pdev, int val) 629 { 630 QDF_STATUS status = QDF_STATUS_SUCCESS; 631 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 632 633 if (!mon_pdev->mvdev && !mon_pdev->scan_spcl_vap_configured) { 634 qdf_err("No monitor or Special vap, undecoded capture not supported"); 635 return QDF_STATUS_E_RESOURCES; 636 } 637 638 if (val) 639 status = dp_enable_undecoded_metadata_capture(pdev, val); 640 else 641 status = dp_reset_undecoded_metadata_capture(pdev); 642 643 return status; 644 } 645 #endif 646 647 /** 648 * dp_monitor_mode_ring_config() - Send the tlv config to fw for monitor buffer 649 * ring based on target 650 * @soc: soc handle 651 * @mac_for_pdev: WIN- pdev_id, MCL- mac id 652 * @pdev: physical device handle 653 * @ring_num: mac id 654 * @htt_tlv_filter: tlv filter 655 * 656 * Return: zero on success, non-zero on failure 657 */ 658 static inline QDF_STATUS 659 dp_monitor_mode_ring_config(struct dp_soc *soc, uint8_t mac_for_pdev, 660 struct dp_pdev *pdev, uint8_t ring_num, 661 struct htt_rx_ring_tlv_filter htt_tlv_filter) 662 { 663 QDF_STATUS status; 664 665 if (soc->wlan_cfg_ctx->rxdma1_enable) 666 status = htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev, 667 soc->rxdma_mon_buf_ring[ring_num] 668 .hal_srng, 669 RXDMA_MONITOR_BUF, 670 RX_MONITOR_BUFFER_SIZE, 671 &htt_tlv_filter); 672 else 673 status = htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev, 674 pdev->rx_mac_buf_ring[ring_num] 675 .hal_srng, 676 RXDMA_BUF, RX_DATA_BUFFER_SIZE, 677 &htt_tlv_filter); 678 679 return status; 680 } 681 682 /** 683 * dp_get_mon_vdev_from_pdev_wifi3() - Get vdev id of monitor mode 684 * @soc_hdl: datapath soc handle 685 * @pdev_id: physical device instance id 686 * 687 * Return: virtual interface id 688 */ 689 static uint8_t dp_get_mon_vdev_from_pdev_wifi3(struct cdp_soc_t *soc_hdl, 690 uint8_t pdev_id) 691 { 692 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 693 struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); 694 695 if (qdf_unlikely(!pdev || !pdev->monitor_pdev || 696 !pdev->monitor_pdev->mvdev)) 697 return -EINVAL; 698 699 return pdev->monitor_pdev->mvdev->vdev_id; 700 } 701 702 #if defined(QCA_TX_CAPTURE_SUPPORT) || defined(QCA_ENHANCED_STATS_SUPPORT) 703 #ifndef WLAN_TX_PKT_CAPTURE_ENH 704 void dp_deliver_mgmt_frm(struct dp_pdev *pdev, qdf_nbuf_t nbuf) 705 { 706 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 707 708 if (mon_pdev->tx_sniffer_enable || mon_pdev->mcopy_mode) { 709 dp_wdi_event_handler(WDI_EVENT_TX_MGMT_CTRL, pdev->soc, 710 nbuf, HTT_INVALID_PEER, 711 WDI_NO_VAL, pdev->pdev_id); 712 } else { 713 if (!mon_pdev->bpr_enable) 714 qdf_nbuf_free(nbuf); 715 } 716 } 717 #endif 718 #endif 719 720 QDF_STATUS dp_htt_ppdu_stats_attach(struct dp_pdev *pdev) 721 { 722 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 723 724 mon_pdev->ppdu_tlv_buf = qdf_mem_malloc(HTT_T2H_MAX_MSG_SIZE); 725 726 if (!mon_pdev->ppdu_tlv_buf) { 727 QDF_TRACE_ERROR(QDF_MODULE_ID_DP, "ppdu_tlv_buf alloc fail"); 728 return QDF_STATUS_E_NOMEM; 729 } 730 731 return QDF_STATUS_SUCCESS; 732 } 733 734 void dp_htt_ppdu_stats_detach(struct dp_pdev *pdev) 735 { 736 struct ppdu_info *ppdu_info, *ppdu_info_next; 737 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 738 739 740 TAILQ_FOREACH_SAFE(ppdu_info, &mon_pdev->ppdu_info_list, 741 ppdu_info_list_elem, ppdu_info_next) { 742 if (!ppdu_info) 743 break; 744 TAILQ_REMOVE(&mon_pdev->ppdu_info_list, 745 ppdu_info, ppdu_info_list_elem); 746 mon_pdev->list_depth--; 747 qdf_assert_always(ppdu_info->nbuf); 748 qdf_nbuf_free(ppdu_info->nbuf); 749 qdf_mem_free(ppdu_info); 750 } 751 752 TAILQ_FOREACH_SAFE(ppdu_info, &mon_pdev->sched_comp_ppdu_list, 753 ppdu_info_list_elem, ppdu_info_next) { 754 if (!ppdu_info) 755 break; 756 TAILQ_REMOVE(&mon_pdev->sched_comp_ppdu_list, 757 ppdu_info, ppdu_info_list_elem); 758 mon_pdev->sched_comp_list_depth--; 759 qdf_assert_always(ppdu_info->nbuf); 760 qdf_nbuf_free(ppdu_info->nbuf); 761 qdf_mem_free(ppdu_info); 762 } 763 764 if (mon_pdev->ppdu_tlv_buf) 765 qdf_mem_free(mon_pdev->ppdu_tlv_buf); 766 } 767 768 QDF_STATUS dp_pdev_get_rx_mon_stats(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, 769 struct cdp_pdev_mon_stats *stats) 770 { 771 struct dp_soc *soc = (struct dp_soc *)soc_hdl; 772 struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); 773 struct dp_mon_pdev *mon_pdev; 774 775 if (!pdev) 776 return QDF_STATUS_E_FAILURE; 777 778 mon_pdev = pdev->monitor_pdev; 779 if (!mon_pdev) 780 return QDF_STATUS_E_FAILURE; 781 782 qdf_mem_copy(stats, &mon_pdev->rx_mon_stats, 783 sizeof(struct cdp_pdev_mon_stats)); 784 785 return QDF_STATUS_SUCCESS; 786 } 787 788 #ifdef QCA_UNDECODED_METADATA_SUPPORT 789 /** 790 * dp_pdev_get_undecoded_capture_stats() - Get undecoded metadata captured 791 * monitor pdev stats 792 * @mon_pdev: Monitor PDEV handle 793 * @rx_mon_stats: Monitor pdev status/destination ring stats 794 * 795 * Return: None 796 */ 797 static inline void 798 dp_pdev_get_undecoded_capture_stats(struct dp_mon_pdev *mon_pdev, 799 struct cdp_pdev_mon_stats *rx_mon_stats) 800 { 801 char undecoded_error[DP_UNDECODED_ERR_LENGTH]; 802 uint8_t index = 0, i; 803 804 DP_PRINT_STATS("Rx Undecoded Frame count:%d", 805 rx_mon_stats->rx_undecoded_count); 806 index = 0; 807 for (i = 0; i < (CDP_PHYRX_ERR_MAX); i++) { 808 index += qdf_snprint(&undecoded_error[index], 809 DP_UNDECODED_ERR_LENGTH - index, 810 " %d", rx_mon_stats->rx_undecoded_error[i]); 811 } 812 DP_PRINT_STATS("Undecoded Error (0-63):%s", undecoded_error); 813 } 814 #else 815 static inline void 816 dp_pdev_get_undecoded_capture_stats(struct dp_mon_pdev *mon_pdev, 817 struct cdp_pdev_mon_stats *rx_mon_stats) 818 { 819 } 820 #endif 821 822 void 823 dp_print_pdev_rx_mon_stats(struct dp_pdev *pdev) 824 { 825 struct cdp_pdev_mon_stats *rx_mon_stats; 826 uint32_t *stat_ring_ppdu_ids; 827 uint32_t *dest_ring_ppdu_ids; 828 int i, idx; 829 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 830 831 rx_mon_stats = &mon_pdev->rx_mon_stats; 832 833 DP_PRINT_STATS("PDEV Rx Monitor Stats:\n"); 834 835 DP_PRINT_STATS("status_ppdu_compl_cnt = %d", 836 rx_mon_stats->status_ppdu_compl); 837 DP_PRINT_STATS("status_ppdu_start_cnt = %d", 838 rx_mon_stats->status_ppdu_start); 839 DP_PRINT_STATS("status_ppdu_end_cnt = %d", 840 rx_mon_stats->status_ppdu_end); 841 DP_PRINT_STATS("status_ppdu_start_mis_cnt = %d", 842 rx_mon_stats->status_ppdu_start_mis); 843 DP_PRINT_STATS("status_ppdu_end_mis_cnt = %d", 844 rx_mon_stats->status_ppdu_end_mis); 845 DP_PRINT_STATS("status_ppdu_done_cnt = %d", 846 rx_mon_stats->status_ppdu_done); 847 DP_PRINT_STATS("dest_ppdu_done_cnt = %d", 848 rx_mon_stats->dest_ppdu_done); 849 DP_PRINT_STATS("dest_mpdu_done_cnt = %d", 850 rx_mon_stats->dest_mpdu_done); 851 DP_PRINT_STATS("tlv_tag_status_err_cnt = %u", 852 rx_mon_stats->tlv_tag_status_err); 853 DP_PRINT_STATS("mon status DMA not done WAR count= %u", 854 rx_mon_stats->status_buf_done_war); 855 DP_PRINT_STATS("dest_mpdu_drop_cnt = %d", 856 rx_mon_stats->dest_mpdu_drop); 857 DP_PRINT_STATS("dup_mon_linkdesc_cnt = %d", 858 rx_mon_stats->dup_mon_linkdesc_cnt); 859 DP_PRINT_STATS("dup_mon_buf_cnt = %d", 860 rx_mon_stats->dup_mon_buf_cnt); 861 DP_PRINT_STATS("mon_rx_buf_reaped = %u", 862 rx_mon_stats->mon_rx_bufs_reaped_dest); 863 DP_PRINT_STATS("mon_rx_buf_replenished = %u", 864 rx_mon_stats->mon_rx_bufs_replenished_dest); 865 DP_PRINT_STATS("ppdu_id_mismatch = %u", 866 rx_mon_stats->ppdu_id_mismatch); 867 DP_PRINT_STATS("mpdu_ppdu_id_match_cnt = %d", 868 rx_mon_stats->ppdu_id_match); 869 DP_PRINT_STATS("ppdus dropped frm status ring = %d", 870 rx_mon_stats->status_ppdu_drop); 871 DP_PRINT_STATS("ppdus dropped frm dest ring = %d", 872 rx_mon_stats->dest_ppdu_drop); 873 DP_PRINT_STATS("mpdu_ppdu_id_mismatch_drop = %u", 874 rx_mon_stats->mpdu_ppdu_id_mismatch_drop); 875 DP_PRINT_STATS("mpdu_decap_type_invalid = %u", 876 rx_mon_stats->mpdu_decap_type_invalid); 877 stat_ring_ppdu_ids = 878 (uint32_t *)qdf_mem_malloc(sizeof(uint32_t) * MAX_PPDU_ID_HIST); 879 dest_ring_ppdu_ids = 880 (uint32_t *)qdf_mem_malloc(sizeof(uint32_t) * MAX_PPDU_ID_HIST); 881 882 if (!stat_ring_ppdu_ids || !dest_ring_ppdu_ids) 883 DP_PRINT_STATS("Unable to allocate ppdu id hist mem\n"); 884 885 qdf_spin_lock_bh(&mon_pdev->mon_lock); 886 idx = rx_mon_stats->ppdu_id_hist_idx; 887 qdf_mem_copy(stat_ring_ppdu_ids, 888 rx_mon_stats->stat_ring_ppdu_id_hist, 889 sizeof(uint32_t) * MAX_PPDU_ID_HIST); 890 qdf_mem_copy(dest_ring_ppdu_ids, 891 rx_mon_stats->dest_ring_ppdu_id_hist, 892 sizeof(uint32_t) * MAX_PPDU_ID_HIST); 893 qdf_spin_unlock_bh(&mon_pdev->mon_lock); 894 895 DP_PRINT_STATS("PPDU Id history:"); 896 DP_PRINT_STATS("stat_ring_ppdu_ids\t dest_ring_ppdu_ids"); 897 for (i = 0; i < MAX_PPDU_ID_HIST; i++) { 898 idx = (idx + 1) & (MAX_PPDU_ID_HIST - 1); 899 DP_PRINT_STATS("%*u\t%*u", 16, 900 rx_mon_stats->stat_ring_ppdu_id_hist[idx], 16, 901 rx_mon_stats->dest_ring_ppdu_id_hist[idx]); 902 } 903 qdf_mem_free(stat_ring_ppdu_ids); 904 qdf_mem_free(dest_ring_ppdu_ids); 905 DP_PRINT_STATS("mon_rx_dest_stuck = %d", 906 rx_mon_stats->mon_rx_dest_stuck); 907 908 dp_pdev_get_undecoded_capture_stats(mon_pdev, rx_mon_stats); 909 dp_mon_rx_print_advanced_stats(pdev->soc, pdev); 910 } 911 912 #ifdef QCA_SUPPORT_BPR 913 QDF_STATUS 914 dp_set_bpr_enable(struct dp_pdev *pdev, int val) 915 { 916 struct dp_mon_ops *mon_ops; 917 918 mon_ops = dp_mon_ops_get(pdev->soc); 919 if (mon_ops && mon_ops->mon_set_bpr_enable) 920 return mon_ops->mon_set_bpr_enable(pdev, val); 921 922 return QDF_STATUS_E_FAILURE; 923 } 924 #endif 925 926 #ifdef WDI_EVENT_ENABLE 927 #ifdef BE_PKTLOG_SUPPORT 928 static bool 929 dp_set_hybrid_pktlog_enable(struct dp_pdev *pdev, 930 struct dp_mon_pdev *mon_pdev, 931 struct dp_soc *soc) 932 { 933 if (mon_pdev->mvdev) { 934 /* Nothing needs to be done if monitor mode is 935 * enabled 936 */ 937 mon_pdev->pktlog_hybrid_mode = true; 938 return false; 939 } 940 941 if (!mon_pdev->pktlog_hybrid_mode) { 942 mon_pdev->pktlog_hybrid_mode = true; 943 dp_mon_filter_setup_pktlog_hybrid(pdev); 944 if (dp_mon_filter_update(pdev) != 945 QDF_STATUS_SUCCESS) { 946 dp_cdp_err("Set hybrid filters failed"); 947 dp_mon_filter_reset_pktlog_hybrid(pdev); 948 mon_pdev->rx_pktlog_mode = 949 DP_RX_PKTLOG_DISABLED; 950 return false; 951 } 952 953 dp_monitor_reap_timer_start(soc, CDP_MON_REAP_SOURCE_PKTLOG); 954 } 955 956 return true; 957 } 958 959 static void 960 dp_set_hybrid_pktlog_disable(struct dp_mon_pdev *mon_pdev) 961 { 962 mon_pdev->pktlog_hybrid_mode = false; 963 } 964 #else 965 static void 966 dp_set_hybrid_pktlog_disable(struct dp_mon_pdev *mon_pdev) 967 { 968 } 969 970 static bool 971 dp_set_hybrid_pktlog_enable(struct dp_pdev *pdev, 972 struct dp_mon_pdev *mon_pdev, 973 struct dp_soc *soc) 974 { 975 dp_cdp_err("Hybrid mode is supported only on beryllium"); 976 return true; 977 } 978 #endif 979 int dp_set_pktlog_wifi3(struct dp_pdev *pdev, uint32_t event, 980 bool enable) 981 { 982 struct dp_soc *soc = NULL; 983 int max_mac_rings = wlan_cfg_get_num_mac_rings 984 (pdev->wlan_cfg_ctx); 985 uint8_t mac_id = 0; 986 struct dp_mon_soc *mon_soc; 987 struct dp_mon_ops *mon_ops; 988 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 989 990 soc = pdev->soc; 991 mon_soc = soc->monitor_soc; 992 mon_ops = dp_mon_ops_get(soc); 993 994 if (!mon_ops) 995 return 0; 996 997 dp_update_num_mac_rings_for_dbs(soc, &max_mac_rings); 998 999 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG, 1000 FL("Max_mac_rings %d "), 1001 max_mac_rings); 1002 1003 if (enable) { 1004 switch (event) { 1005 case WDI_EVENT_RX_DESC: 1006 if (mon_pdev->mvdev) { 1007 /* Nothing needs to be done if monitor mode is 1008 * enabled 1009 */ 1010 mon_pdev->rx_pktlog_mode = DP_RX_PKTLOG_FULL; 1011 return 0; 1012 } 1013 1014 if (mon_pdev->rx_pktlog_mode == DP_RX_PKTLOG_FULL) 1015 break; 1016 1017 mon_pdev->rx_pktlog_mode = DP_RX_PKTLOG_FULL; 1018 dp_mon_filter_setup_rx_pkt_log_full(pdev); 1019 if (dp_mon_filter_update(pdev) != QDF_STATUS_SUCCESS) { 1020 dp_cdp_err("%pK: Pktlog full filters set failed", 1021 soc); 1022 dp_mon_filter_reset_rx_pkt_log_full(pdev); 1023 mon_pdev->rx_pktlog_mode = 1024 DP_RX_PKTLOG_DISABLED; 1025 return 0; 1026 } 1027 1028 dp_monitor_reap_timer_start(soc, 1029 CDP_MON_REAP_SOURCE_PKTLOG); 1030 break; 1031 1032 case WDI_EVENT_LITE_RX: 1033 if (mon_pdev->mvdev) { 1034 /* Nothing needs to be done if monitor mode is 1035 * enabled 1036 */ 1037 mon_pdev->rx_pktlog_mode = DP_RX_PKTLOG_LITE; 1038 return 0; 1039 } 1040 1041 if (mon_pdev->rx_pktlog_mode == DP_RX_PKTLOG_LITE) 1042 break; 1043 1044 mon_pdev->rx_pktlog_mode = DP_RX_PKTLOG_LITE; 1045 1046 /* 1047 * Set the packet log lite mode filter. 1048 */ 1049 dp_mon_filter_setup_rx_pkt_log_lite(pdev); 1050 if (dp_mon_filter_update(pdev) != QDF_STATUS_SUCCESS) { 1051 dp_cdp_err("%pK: Pktlog lite filters set failed", 1052 soc); 1053 dp_mon_filter_reset_rx_pkt_log_lite(pdev); 1054 mon_pdev->rx_pktlog_mode = 1055 DP_RX_PKTLOG_DISABLED; 1056 return 0; 1057 } 1058 1059 dp_monitor_reap_timer_start(soc, 1060 CDP_MON_REAP_SOURCE_PKTLOG); 1061 break; 1062 case WDI_EVENT_LITE_T2H: 1063 for (mac_id = 0; mac_id < max_mac_rings; mac_id++) { 1064 int mac_for_pdev = dp_get_mac_id_for_pdev( 1065 mac_id, pdev->pdev_id); 1066 1067 mon_pdev->pktlog_ppdu_stats = true; 1068 dp_h2t_cfg_stats_msg_send(pdev, 1069 DP_PPDU_TXLITE_STATS_BITMASK_CFG, 1070 mac_for_pdev); 1071 } 1072 break; 1073 1074 case WDI_EVENT_RX_CBF: 1075 if (mon_pdev->mvdev) { 1076 /* Nothing needs to be done if monitor mode is 1077 * enabled 1078 */ 1079 dp_mon_info("Mon mode, CBF setting filters"); 1080 mon_pdev->rx_pktlog_cbf = true; 1081 return 0; 1082 } 1083 1084 if (mon_pdev->rx_pktlog_cbf) 1085 break; 1086 1087 mon_pdev->rx_pktlog_cbf = true; 1088 mon_pdev->monitor_configured = true; 1089 if (mon_ops->mon_vdev_set_monitor_mode_buf_rings) 1090 mon_ops->mon_vdev_set_monitor_mode_buf_rings( 1091 pdev); 1092 1093 /* 1094 * Set the packet log lite mode filter. 1095 */ 1096 qdf_info("Non mon mode: Enable destination ring"); 1097 1098 dp_mon_filter_setup_rx_pkt_log_cbf(pdev); 1099 if (dp_mon_filter_update(pdev) != QDF_STATUS_SUCCESS) { 1100 dp_mon_err("Pktlog set CBF filters failed"); 1101 dp_mon_filter_reset_rx_pktlog_cbf(pdev); 1102 mon_pdev->rx_pktlog_mode = 1103 DP_RX_PKTLOG_DISABLED; 1104 mon_pdev->monitor_configured = false; 1105 return 0; 1106 } 1107 1108 dp_monitor_reap_timer_start(soc, 1109 CDP_MON_REAP_SOURCE_PKTLOG); 1110 break; 1111 case WDI_EVENT_HYBRID_TX: 1112 if (!dp_set_hybrid_pktlog_enable(pdev, mon_pdev, soc)) 1113 return 0; 1114 break; 1115 1116 default: 1117 /* Nothing needs to be done for other pktlog types */ 1118 break; 1119 } 1120 } else { 1121 switch (event) { 1122 case WDI_EVENT_RX_DESC: 1123 case WDI_EVENT_LITE_RX: 1124 if (mon_pdev->mvdev) { 1125 /* Nothing needs to be done if monitor mode is 1126 * enabled 1127 */ 1128 mon_pdev->rx_pktlog_mode = 1129 DP_RX_PKTLOG_DISABLED; 1130 return 0; 1131 } 1132 1133 if (mon_pdev->rx_pktlog_mode == DP_RX_PKTLOG_DISABLED) 1134 break; 1135 1136 mon_pdev->rx_pktlog_mode = DP_RX_PKTLOG_DISABLED; 1137 dp_mon_filter_reset_rx_pkt_log_full(pdev); 1138 if (dp_mon_filter_update(pdev) != QDF_STATUS_SUCCESS) { 1139 dp_cdp_err("%pK: Pktlog filters reset failed", 1140 soc); 1141 return 0; 1142 } 1143 1144 dp_mon_filter_reset_rx_pkt_log_lite(pdev); 1145 if (dp_mon_filter_update(pdev) != QDF_STATUS_SUCCESS) { 1146 dp_cdp_err("%pK: Pktlog filters reset failed", 1147 soc); 1148 return 0; 1149 } 1150 1151 dp_monitor_reap_timer_stop(soc, 1152 CDP_MON_REAP_SOURCE_PKTLOG); 1153 break; 1154 case WDI_EVENT_LITE_T2H: 1155 /* 1156 * To disable HTT_H2T_MSG_TYPE_PPDU_STATS_CFG in FW 1157 * passing value 0. Once these macros will define in htt 1158 * header file will use proper macros 1159 */ 1160 for (mac_id = 0; mac_id < max_mac_rings; mac_id++) { 1161 int mac_for_pdev = 1162 dp_get_mac_id_for_pdev(mac_id, 1163 pdev->pdev_id); 1164 1165 mon_pdev->pktlog_ppdu_stats = false; 1166 if (!mon_pdev->enhanced_stats_en && 1167 !mon_pdev->tx_sniffer_enable && 1168 !mon_pdev->mcopy_mode) { 1169 dp_h2t_cfg_stats_msg_send(pdev, 0, 1170 mac_for_pdev); 1171 } else if (mon_pdev->tx_sniffer_enable || 1172 mon_pdev->mcopy_mode) { 1173 dp_h2t_cfg_stats_msg_send(pdev, 1174 DP_PPDU_STATS_CFG_SNIFFER, 1175 mac_for_pdev); 1176 } else if (mon_pdev->enhanced_stats_en) { 1177 dp_h2t_cfg_stats_msg_send(pdev, 1178 DP_PPDU_STATS_CFG_ENH_STATS, 1179 mac_for_pdev); 1180 } 1181 } 1182 1183 break; 1184 case WDI_EVENT_RX_CBF: 1185 mon_pdev->rx_pktlog_cbf = false; 1186 break; 1187 1188 case WDI_EVENT_HYBRID_TX: 1189 dp_set_hybrid_pktlog_disable(mon_pdev); 1190 break; 1191 1192 default: 1193 /* Nothing needs to be done for other pktlog types */ 1194 break; 1195 } 1196 } 1197 return 0; 1198 } 1199 #endif 1200 1201 /* MCL specific functions */ 1202 #if defined(DP_CON_MON) && !defined(REMOVE_PKT_LOG) 1203 void dp_pktlogmod_exit(struct dp_pdev *pdev) 1204 { 1205 struct dp_soc *soc = pdev->soc; 1206 struct hif_opaque_softc *scn = soc->hif_handle; 1207 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 1208 1209 if (!scn) { 1210 dp_mon_err("Invalid hif(scn) handle"); 1211 return; 1212 } 1213 1214 dp_monitor_reap_timer_stop(soc, CDP_MON_REAP_SOURCE_PKTLOG); 1215 pktlogmod_exit(scn); 1216 mon_pdev->pkt_log_init = false; 1217 } 1218 #endif /*DP_CON_MON*/ 1219 1220 #if defined(WDI_EVENT_ENABLE) && defined(QCA_ENHANCED_STATS_SUPPORT) 1221 #ifdef IPA_OFFLOAD 1222 void dp_peer_get_tx_rx_stats(struct dp_peer *peer, 1223 struct cdp_interface_peer_stats *peer_stats_intf) 1224 { 1225 struct dp_rx_tid *rx_tid = NULL; 1226 uint8_t i = 0; 1227 1228 for (i = 0; i < DP_MAX_TIDS; i++) { 1229 rx_tid = &peer->rx_tid[i]; 1230 peer_stats_intf->rx_byte_count += 1231 rx_tid->rx_msdu_cnt.bytes; 1232 peer_stats_intf->rx_packet_count += 1233 rx_tid->rx_msdu_cnt.num; 1234 } 1235 peer_stats_intf->tx_packet_count = 1236 peer->monitor_peer->stats.tx.tx_ucast_success.num; 1237 peer_stats_intf->tx_byte_count = 1238 peer->monitor_peer->stats.tx.tx_ucast_success.bytes; 1239 } 1240 #else 1241 void dp_peer_get_tx_rx_stats(struct dp_peer *peer, 1242 struct cdp_interface_peer_stats *peer_stats_intf) 1243 { 1244 struct dp_txrx_peer *txrx_peer = NULL; 1245 struct dp_peer *tgt_peer = NULL; 1246 1247 tgt_peer = dp_get_tgt_peer_from_peer(peer); 1248 txrx_peer = tgt_peer->txrx_peer; 1249 peer_stats_intf->rx_packet_count = txrx_peer->to_stack.num; 1250 peer_stats_intf->rx_byte_count = txrx_peer->to_stack.bytes; 1251 peer_stats_intf->tx_packet_count = 1252 txrx_peer->stats.per_pkt_stats.tx.ucast.num; 1253 peer_stats_intf->tx_byte_count = 1254 txrx_peer->stats.per_pkt_stats.tx.tx_success.bytes; 1255 } 1256 #endif 1257 1258 QDF_STATUS dp_peer_stats_notify(struct dp_pdev *dp_pdev, struct dp_peer *peer) 1259 { 1260 struct cdp_interface_peer_stats peer_stats_intf = {0}; 1261 struct dp_mon_peer_stats *mon_peer_stats = NULL; 1262 struct dp_peer *tgt_peer = NULL; 1263 struct dp_txrx_peer *txrx_peer = NULL; 1264 1265 if (qdf_unlikely(!peer || !peer->vdev || !peer->monitor_peer)) 1266 return QDF_STATUS_E_FAULT; 1267 1268 tgt_peer = dp_get_tgt_peer_from_peer(peer); 1269 if (qdf_unlikely(!tgt_peer)) 1270 return QDF_STATUS_E_FAULT; 1271 1272 txrx_peer = tgt_peer->txrx_peer; 1273 if (!qdf_unlikely(txrx_peer)) 1274 return QDF_STATUS_E_FAULT; 1275 1276 mon_peer_stats = &peer->monitor_peer->stats; 1277 1278 if (mon_peer_stats->rx.last_snr != mon_peer_stats->rx.snr) 1279 peer_stats_intf.rssi_changed = true; 1280 1281 if ((mon_peer_stats->rx.snr && peer_stats_intf.rssi_changed) || 1282 (mon_peer_stats->tx.tx_rate && 1283 mon_peer_stats->tx.tx_rate != mon_peer_stats->tx.last_tx_rate)) { 1284 qdf_mem_copy(peer_stats_intf.peer_mac, peer->mac_addr.raw, 1285 QDF_MAC_ADDR_SIZE); 1286 peer_stats_intf.vdev_id = peer->vdev->vdev_id; 1287 peer_stats_intf.last_peer_tx_rate = 1288 mon_peer_stats->tx.last_tx_rate; 1289 peer_stats_intf.peer_tx_rate = mon_peer_stats->tx.tx_rate; 1290 peer_stats_intf.peer_rssi = mon_peer_stats->rx.snr; 1291 peer_stats_intf.ack_rssi = mon_peer_stats->tx.last_ack_rssi; 1292 dp_peer_get_tx_rx_stats(peer, &peer_stats_intf); 1293 peer_stats_intf.per = tgt_peer->stats.tx.last_per; 1294 peer_stats_intf.free_buff = INVALID_FREE_BUFF; 1295 dp_wdi_event_handler(WDI_EVENT_PEER_STATS, dp_pdev->soc, 1296 (void *)&peer_stats_intf, 0, 1297 WDI_NO_VAL, dp_pdev->pdev_id); 1298 } 1299 1300 return QDF_STATUS_SUCCESS; 1301 } 1302 #endif 1303 1304 #ifdef FEATURE_NAC_RSSI 1305 /** 1306 * dp_rx_nac_filter(): Function to perform filtering of non-associated 1307 * clients 1308 * @pdev: DP pdev handle 1309 * @rx_pkt_hdr: Rx packet Header 1310 * 1311 * return: dp_vdev* 1312 */ 1313 static 1314 struct dp_vdev *dp_rx_nac_filter(struct dp_pdev *pdev, 1315 uint8_t *rx_pkt_hdr) 1316 { 1317 struct ieee80211_frame *wh; 1318 struct dp_neighbour_peer *peer = NULL; 1319 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 1320 1321 wh = (struct ieee80211_frame *)rx_pkt_hdr; 1322 1323 if ((wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) != IEEE80211_FC1_DIR_TODS) 1324 return NULL; 1325 1326 qdf_spin_lock_bh(&mon_pdev->neighbour_peer_mutex); 1327 TAILQ_FOREACH(peer, &mon_pdev->neighbour_peers_list, 1328 neighbour_peer_list_elem) { 1329 if (qdf_mem_cmp(&peer->neighbour_peers_macaddr.raw[0], 1330 wh->i_addr2, QDF_MAC_ADDR_SIZE) == 0) { 1331 dp_rx_debug("%pK: NAC configuration matched for mac-%2x:%2x:%2x:%2x:%2x:%2x", 1332 pdev->soc, 1333 peer->neighbour_peers_macaddr.raw[0], 1334 peer->neighbour_peers_macaddr.raw[1], 1335 peer->neighbour_peers_macaddr.raw[2], 1336 peer->neighbour_peers_macaddr.raw[3], 1337 peer->neighbour_peers_macaddr.raw[4], 1338 peer->neighbour_peers_macaddr.raw[5]); 1339 1340 qdf_spin_unlock_bh(&mon_pdev->neighbour_peer_mutex); 1341 1342 return mon_pdev->mvdev; 1343 } 1344 } 1345 qdf_spin_unlock_bh(&mon_pdev->neighbour_peer_mutex); 1346 1347 return NULL; 1348 } 1349 1350 QDF_STATUS dp_filter_neighbour_peer(struct dp_pdev *pdev, 1351 uint8_t *rx_pkt_hdr) 1352 { 1353 struct dp_vdev *vdev = NULL; 1354 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 1355 1356 if (mon_pdev->filter_neighbour_peers) { 1357 /* Next Hop scenario not yet handle */ 1358 vdev = dp_rx_nac_filter(pdev, rx_pkt_hdr); 1359 if (vdev) { 1360 dp_rx_mon_deliver(pdev->soc, pdev->pdev_id, 1361 pdev->invalid_peer_head_msdu, 1362 pdev->invalid_peer_tail_msdu); 1363 1364 pdev->invalid_peer_head_msdu = NULL; 1365 pdev->invalid_peer_tail_msdu = NULL; 1366 return QDF_STATUS_SUCCESS; 1367 } 1368 } 1369 1370 return QDF_STATUS_E_FAILURE; 1371 } 1372 #endif 1373 1374 #if defined(ATH_SUPPORT_NAC_RSSI) || defined(ATH_SUPPORT_NAC) 1375 /* 1376 * dp_update_filter_neighbour_peers() - set neighbour peers(nac clients) 1377 * address for smart mesh filtering 1378 * @txrx_soc: cdp soc handle 1379 * @vdev_id: id of virtual device object 1380 * @cmd: Add/Del command 1381 * @macaddr: nac client mac address 1382 * 1383 * Return: success/failure 1384 */ 1385 static int dp_update_filter_neighbour_peers(struct cdp_soc_t *soc_hdl, 1386 uint8_t vdev_id, 1387 uint32_t cmd, uint8_t *macaddr) 1388 { 1389 struct dp_soc *soc = (struct dp_soc *)soc_hdl; 1390 struct dp_pdev *pdev; 1391 struct dp_neighbour_peer *peer = NULL; 1392 struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id, 1393 DP_MOD_ID_CDP); 1394 struct dp_mon_pdev *mon_pdev; 1395 1396 if (!vdev || !macaddr) 1397 goto fail0; 1398 1399 pdev = vdev->pdev; 1400 1401 if (!pdev) 1402 goto fail0; 1403 1404 mon_pdev = pdev->monitor_pdev; 1405 1406 /* Store address of NAC (neighbour peer) which will be checked 1407 * against TA of received packets. 1408 */ 1409 if (cmd == DP_NAC_PARAM_ADD) { 1410 peer = (struct dp_neighbour_peer *)qdf_mem_malloc( 1411 sizeof(*peer)); 1412 1413 if (!peer) { 1414 dp_cdp_err("%pK: DP neighbour peer node memory allocation failed" 1415 , soc); 1416 goto fail0; 1417 } 1418 1419 qdf_mem_copy(&peer->neighbour_peers_macaddr.raw[0], 1420 macaddr, QDF_MAC_ADDR_SIZE); 1421 peer->vdev = vdev; 1422 1423 qdf_spin_lock_bh(&mon_pdev->neighbour_peer_mutex); 1424 1425 /* add this neighbour peer into the list */ 1426 TAILQ_INSERT_TAIL(&mon_pdev->neighbour_peers_list, peer, 1427 neighbour_peer_list_elem); 1428 qdf_spin_unlock_bh(&mon_pdev->neighbour_peer_mutex); 1429 1430 /* first neighbour */ 1431 if (!mon_pdev->neighbour_peers_added) { 1432 QDF_STATUS status = QDF_STATUS_SUCCESS; 1433 1434 mon_pdev->neighbour_peers_added = true; 1435 dp_mon_filter_setup_smart_monitor(pdev); 1436 status = dp_mon_filter_update(pdev); 1437 if (status != QDF_STATUS_SUCCESS) { 1438 dp_cdp_err("%pK: smart mon filter setup failed", 1439 soc); 1440 dp_mon_filter_reset_smart_monitor(pdev); 1441 mon_pdev->neighbour_peers_added = false; 1442 } 1443 } 1444 1445 } else if (cmd == DP_NAC_PARAM_DEL) { 1446 qdf_spin_lock_bh(&mon_pdev->neighbour_peer_mutex); 1447 TAILQ_FOREACH(peer, &mon_pdev->neighbour_peers_list, 1448 neighbour_peer_list_elem) { 1449 if (!qdf_mem_cmp(&peer->neighbour_peers_macaddr.raw[0], 1450 macaddr, QDF_MAC_ADDR_SIZE)) { 1451 /* delete this peer from the list */ 1452 TAILQ_REMOVE(&mon_pdev->neighbour_peers_list, 1453 peer, neighbour_peer_list_elem); 1454 qdf_mem_free(peer); 1455 break; 1456 } 1457 } 1458 /* last neighbour deleted */ 1459 if (TAILQ_EMPTY(&mon_pdev->neighbour_peers_list)) { 1460 QDF_STATUS status = QDF_STATUS_SUCCESS; 1461 1462 dp_mon_filter_reset_smart_monitor(pdev); 1463 status = dp_mon_filter_update(pdev); 1464 if (status != QDF_STATUS_SUCCESS) { 1465 dp_cdp_err("%pK: smart mon filter clear failed", 1466 soc); 1467 } 1468 mon_pdev->neighbour_peers_added = false; 1469 } 1470 qdf_spin_unlock_bh(&mon_pdev->neighbour_peer_mutex); 1471 } 1472 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP); 1473 return 1; 1474 1475 fail0: 1476 if (vdev) 1477 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP); 1478 return 0; 1479 } 1480 #endif /* ATH_SUPPORT_NAC_RSSI || ATH_SUPPORT_NAC */ 1481 1482 #ifdef ATH_SUPPORT_NAC_RSSI 1483 /** 1484 * dp_vdev_get_neighbour_rssi(): Store RSSI for configured NAC 1485 * @soc_hdl: DP soc handle 1486 * @vdev_id: id of DP vdev handle 1487 * @mac_addr: neighbour mac 1488 * @rssi: rssi value 1489 * 1490 * Return: 0 for success. nonzero for failure. 1491 */ 1492 static QDF_STATUS dp_vdev_get_neighbour_rssi(struct cdp_soc_t *soc_hdl, 1493 uint8_t vdev_id, 1494 char *mac_addr, 1495 uint8_t *rssi) 1496 { 1497 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 1498 struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id, 1499 DP_MOD_ID_CDP); 1500 struct dp_pdev *pdev; 1501 struct dp_neighbour_peer *peer = NULL; 1502 QDF_STATUS status = QDF_STATUS_E_FAILURE; 1503 struct dp_mon_pdev *mon_pdev; 1504 1505 if (!vdev) 1506 return status; 1507 1508 pdev = vdev->pdev; 1509 mon_pdev = pdev->monitor_pdev; 1510 1511 *rssi = 0; 1512 qdf_spin_lock_bh(&mon_pdev->neighbour_peer_mutex); 1513 TAILQ_FOREACH(peer, &mon_pdev->neighbour_peers_list, 1514 neighbour_peer_list_elem) { 1515 if (qdf_mem_cmp(&peer->neighbour_peers_macaddr.raw[0], 1516 mac_addr, QDF_MAC_ADDR_SIZE) == 0) { 1517 *rssi = peer->rssi; 1518 status = QDF_STATUS_SUCCESS; 1519 break; 1520 } 1521 } 1522 qdf_spin_unlock_bh(&mon_pdev->neighbour_peer_mutex); 1523 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP); 1524 return status; 1525 } 1526 1527 static QDF_STATUS 1528 dp_config_for_nac_rssi(struct cdp_soc_t *cdp_soc, 1529 uint8_t vdev_id, 1530 enum cdp_nac_param_cmd cmd, char *bssid, 1531 char *client_macaddr, 1532 uint8_t chan_num) 1533 { 1534 struct dp_soc *soc = (struct dp_soc *)cdp_soc; 1535 struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id, 1536 DP_MOD_ID_CDP); 1537 struct dp_pdev *pdev; 1538 struct dp_mon_pdev *mon_pdev; 1539 1540 if (!vdev) 1541 return QDF_STATUS_E_FAILURE; 1542 1543 pdev = (struct dp_pdev *)vdev->pdev; 1544 1545 mon_pdev = pdev->monitor_pdev; 1546 mon_pdev->nac_rssi_filtering = 1; 1547 /* Store address of NAC (neighbour peer) which will be checked 1548 * against TA of received packets. 1549 */ 1550 1551 if (cmd == CDP_NAC_PARAM_ADD) { 1552 dp_update_filter_neighbour_peers(cdp_soc, vdev->vdev_id, 1553 DP_NAC_PARAM_ADD, 1554 (uint8_t *)client_macaddr); 1555 } else if (cmd == CDP_NAC_PARAM_DEL) { 1556 dp_update_filter_neighbour_peers(cdp_soc, vdev->vdev_id, 1557 DP_NAC_PARAM_DEL, 1558 (uint8_t *)client_macaddr); 1559 } 1560 1561 if (soc->cdp_soc.ol_ops->config_bssid_in_fw_for_nac_rssi) 1562 soc->cdp_soc.ol_ops->config_bssid_in_fw_for_nac_rssi 1563 (soc->ctrl_psoc, pdev->pdev_id, 1564 vdev->vdev_id, cmd, bssid, client_macaddr); 1565 1566 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP); 1567 return QDF_STATUS_SUCCESS; 1568 } 1569 #endif 1570 1571 bool 1572 dp_enable_mon_reap_timer(struct cdp_soc_t *soc_hdl, 1573 enum cdp_mon_reap_source source, 1574 bool enable) 1575 { 1576 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 1577 1578 if (enable) 1579 return dp_monitor_reap_timer_start(soc, source); 1580 else 1581 return dp_monitor_reap_timer_stop(soc, source); 1582 } 1583 1584 #if defined(DP_CON_MON) 1585 #ifndef REMOVE_PKT_LOG 1586 /** 1587 * dp_pkt_log_init() - API to initialize packet log 1588 * @soc_hdl: Datapath soc handle 1589 * @pdev_id: id of data path pdev handle 1590 * @scn: HIF context 1591 * 1592 * Return: none 1593 */ 1594 void dp_pkt_log_init(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, void *scn) 1595 { 1596 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 1597 struct dp_pdev *handle = 1598 dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); 1599 struct dp_mon_pdev *mon_pdev; 1600 1601 if (!handle) { 1602 dp_mon_err("pdev handle is NULL"); 1603 return; 1604 } 1605 1606 mon_pdev = handle->monitor_pdev; 1607 1608 if (mon_pdev->pkt_log_init) { 1609 dp_mon_err("%pK: Packet log not initialized", soc); 1610 return; 1611 } 1612 1613 pktlog_sethandle(&mon_pdev->pl_dev, scn); 1614 pktlog_set_pdev_id(mon_pdev->pl_dev, pdev_id); 1615 pktlog_set_callback_regtype(PKTLOG_DEFAULT_CALLBACK_REGISTRATION); 1616 1617 if (pktlogmod_init(scn)) { 1618 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1619 "%s: pktlogmod_init failed", __func__); 1620 mon_pdev->pkt_log_init = false; 1621 } else { 1622 mon_pdev->pkt_log_init = true; 1623 } 1624 } 1625 1626 /** 1627 * dp_pkt_log_con_service() - connect packet log service 1628 * @soc_hdl: Datapath soc handle 1629 * @pdev_id: id of data path pdev handle 1630 * @scn: device context 1631 * 1632 * Return: none 1633 */ 1634 static void dp_pkt_log_con_service(struct cdp_soc_t *soc_hdl, 1635 uint8_t pdev_id, void *scn) 1636 { 1637 dp_pkt_log_init(soc_hdl, pdev_id, scn); 1638 pktlog_htc_attach(); 1639 } 1640 1641 /** 1642 * dp_pkt_log_exit() - Wrapper API to cleanup pktlog info 1643 * @soc_hdl: Datapath soc handle 1644 * @pdev_id: id of data path pdev handle 1645 * 1646 * Return: none 1647 */ 1648 static void dp_pkt_log_exit(struct cdp_soc_t *soc_hdl, uint8_t pdev_id) 1649 { 1650 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 1651 struct dp_pdev *pdev = 1652 dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); 1653 1654 if (!pdev) { 1655 dp_err("pdev handle is NULL"); 1656 return; 1657 } 1658 1659 dp_pktlogmod_exit(pdev); 1660 } 1661 1662 #else 1663 static void dp_pkt_log_con_service(struct cdp_soc_t *soc_hdl, 1664 uint8_t pdev_id, void *scn) 1665 { 1666 } 1667 1668 static void dp_pkt_log_exit(struct cdp_soc_t *soc_hdl, uint8_t pdev_id) 1669 { 1670 } 1671 #endif 1672 #endif 1673 1674 void dp_neighbour_peers_detach(struct dp_pdev *pdev) 1675 { 1676 struct dp_neighbour_peer *peer = NULL; 1677 struct dp_neighbour_peer *temp_peer = NULL; 1678 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 1679 1680 TAILQ_FOREACH_SAFE(peer, &mon_pdev->neighbour_peers_list, 1681 neighbour_peer_list_elem, temp_peer) { 1682 /* delete this peer from the list */ 1683 TAILQ_REMOVE(&mon_pdev->neighbour_peers_list, 1684 peer, neighbour_peer_list_elem); 1685 qdf_mem_free(peer); 1686 } 1687 1688 qdf_spinlock_destroy(&mon_pdev->neighbour_peer_mutex); 1689 } 1690 1691 #ifdef QCA_ENHANCED_STATS_SUPPORT 1692 /* 1693 * dp_mon_tx_enable_enhanced_stats() - Enable enhanced Tx stats 1694 * @pdev: Datapath pdev handle 1695 * 1696 * Return: void 1697 */ 1698 static void dp_mon_tx_enable_enhanced_stats(struct dp_pdev *pdev) 1699 { 1700 struct dp_soc *soc = pdev->soc; 1701 struct dp_mon_ops *mon_ops = NULL; 1702 1703 mon_ops = dp_mon_ops_get(soc); 1704 if (mon_ops && mon_ops->mon_tx_enable_enhanced_stats) 1705 mon_ops->mon_tx_enable_enhanced_stats(pdev); 1706 } 1707 1708 /* 1709 * dp_enable_enhanced_stats()- API to enable enhanced statistcs 1710 * @soc_handle: DP_SOC handle 1711 * @pdev_id: id of DP_PDEV handle 1712 * 1713 * Return: QDF_STATUS 1714 */ 1715 static QDF_STATUS 1716 dp_enable_enhanced_stats(struct cdp_soc_t *soc, uint8_t pdev_id) 1717 { 1718 struct dp_pdev *pdev = NULL; 1719 QDF_STATUS status = QDF_STATUS_SUCCESS; 1720 struct dp_mon_pdev *mon_pdev; 1721 1722 pdev = dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc, 1723 pdev_id); 1724 1725 if (!pdev) 1726 return QDF_STATUS_E_FAILURE; 1727 1728 mon_pdev = pdev->monitor_pdev; 1729 1730 if (!mon_pdev) 1731 return QDF_STATUS_E_FAILURE; 1732 1733 if (mon_pdev->enhanced_stats_en == 0) 1734 dp_cal_client_timer_start(mon_pdev->cal_client_ctx); 1735 1736 mon_pdev->enhanced_stats_en = 1; 1737 pdev->enhanced_stats_en = true; 1738 1739 dp_mon_filter_setup_enhanced_stats(pdev); 1740 status = dp_mon_filter_update(pdev); 1741 if (status != QDF_STATUS_SUCCESS) { 1742 dp_cdp_err("%pK: Failed to set enhanced mode filters", soc); 1743 dp_mon_filter_reset_enhanced_stats(pdev); 1744 dp_cal_client_timer_stop(mon_pdev->cal_client_ctx); 1745 mon_pdev->enhanced_stats_en = 0; 1746 pdev->enhanced_stats_en = false; 1747 return QDF_STATUS_E_FAILURE; 1748 } 1749 1750 dp_mon_tx_enable_enhanced_stats(pdev); 1751 1752 return QDF_STATUS_SUCCESS; 1753 } 1754 1755 /* 1756 * dp_mon_tx_disable_enhanced_stats() - Disable enhanced Tx stats 1757 * @pdev: Datapath pdev handle 1758 * 1759 * Return: void 1760 */ 1761 static void dp_mon_tx_disable_enhanced_stats(struct dp_pdev *pdev) 1762 { 1763 struct dp_soc *soc = pdev->soc; 1764 struct dp_mon_ops *mon_ops = NULL; 1765 1766 mon_ops = dp_mon_ops_get(soc); 1767 if (mon_ops && mon_ops->mon_tx_disable_enhanced_stats) 1768 mon_ops->mon_tx_disable_enhanced_stats(pdev); 1769 } 1770 1771 /* 1772 * dp_disable_enhanced_stats()- API to disable enhanced statistcs 1773 * 1774 * @param soc - the soc handle 1775 * @param pdev_id - pdev_id of pdev 1776 * @return - QDF_STATUS 1777 */ 1778 static QDF_STATUS 1779 dp_disable_enhanced_stats(struct cdp_soc_t *soc, uint8_t pdev_id) 1780 { 1781 struct dp_pdev *pdev = 1782 dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc, 1783 pdev_id); 1784 struct dp_mon_pdev *mon_pdev; 1785 1786 1787 if (!pdev || !pdev->monitor_pdev) 1788 return QDF_STATUS_E_FAILURE; 1789 1790 mon_pdev = pdev->monitor_pdev; 1791 1792 if (mon_pdev->enhanced_stats_en == 1) 1793 dp_cal_client_timer_stop(mon_pdev->cal_client_ctx); 1794 1795 mon_pdev->enhanced_stats_en = 0; 1796 pdev->enhanced_stats_en = false; 1797 1798 dp_mon_tx_disable_enhanced_stats(pdev); 1799 1800 dp_mon_filter_reset_enhanced_stats(pdev); 1801 if (dp_mon_filter_update(pdev) != QDF_STATUS_SUCCESS) { 1802 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1803 FL("Failed to reset enhanced mode filters")); 1804 } 1805 1806 return QDF_STATUS_SUCCESS; 1807 } 1808 1809 #ifdef WDI_EVENT_ENABLE 1810 QDF_STATUS dp_peer_qos_stats_notify(struct dp_pdev *dp_pdev, 1811 struct cdp_rx_stats_ppdu_user *ppdu_user) 1812 { 1813 struct cdp_interface_peer_qos_stats qos_stats_intf = {0}; 1814 1815 if (qdf_unlikely(ppdu_user->peer_id == HTT_INVALID_PEER)) { 1816 dp_mon_warn("Invalid peer id"); 1817 return QDF_STATUS_E_FAILURE; 1818 } 1819 1820 qdf_mem_copy(qos_stats_intf.peer_mac, ppdu_user->mac_addr, 1821 QDF_MAC_ADDR_SIZE); 1822 qos_stats_intf.frame_control = ppdu_user->frame_control; 1823 qos_stats_intf.frame_control_info_valid = 1824 ppdu_user->frame_control_info_valid; 1825 qos_stats_intf.qos_control = ppdu_user->qos_control; 1826 qos_stats_intf.qos_control_info_valid = 1827 ppdu_user->qos_control_info_valid; 1828 qos_stats_intf.vdev_id = ppdu_user->vdev_id; 1829 dp_wdi_event_handler(WDI_EVENT_PEER_QOS_STATS, dp_pdev->soc, 1830 (void *)&qos_stats_intf, 0, 1831 WDI_NO_VAL, dp_pdev->pdev_id); 1832 1833 return QDF_STATUS_SUCCESS; 1834 } 1835 #else 1836 static inline QDF_STATUS 1837 dp_peer_qos_stats_notify(struct dp_pdev *dp_pdev, 1838 struct cdp_rx_stats_ppdu_user *ppdu_user) 1839 { 1840 return QDF_STATUS_SUCCESS; 1841 } 1842 #endif 1843 #endif /* QCA_ENHANCED_STATS_SUPPORT */ 1844 1845 /** 1846 * dp_enable_peer_based_pktlog() - Set Flag for peer based filtering 1847 * for pktlog 1848 * @soc: cdp_soc handle 1849 * @pdev_id: id of dp pdev handle 1850 * @mac_addr: Peer mac address 1851 * @enb_dsb: Enable or disable peer based filtering 1852 * 1853 * Return: QDF_STATUS 1854 */ 1855 static int 1856 dp_enable_peer_based_pktlog(struct cdp_soc_t *soc, uint8_t pdev_id, 1857 uint8_t *mac_addr, uint8_t enb_dsb) 1858 { 1859 struct dp_peer *peer; 1860 QDF_STATUS status = QDF_STATUS_E_FAILURE; 1861 struct dp_pdev *pdev = 1862 dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc, 1863 pdev_id); 1864 struct dp_mon_pdev *mon_pdev; 1865 1866 if (!pdev) 1867 return QDF_STATUS_E_FAILURE; 1868 1869 mon_pdev = pdev->monitor_pdev; 1870 1871 peer = dp_peer_find_hash_find((struct dp_soc *)soc, mac_addr, 1872 0, DP_VDEV_ALL, DP_MOD_ID_CDP); 1873 1874 if (!peer) { 1875 dp_mon_err("Invalid Peer"); 1876 return QDF_STATUS_E_FAILURE; 1877 } 1878 1879 if (!IS_MLO_DP_MLD_PEER(peer) && peer->monitor_peer) { 1880 peer->monitor_peer->peer_based_pktlog_filter = enb_dsb; 1881 mon_pdev->dp_peer_based_pktlog = enb_dsb; 1882 status = QDF_STATUS_SUCCESS; 1883 } 1884 1885 dp_peer_unref_delete(peer, DP_MOD_ID_CDP); 1886 1887 return status; 1888 } 1889 1890 /** 1891 * dp_peer_update_pkt_capture_params: Set Rx & Tx Capture flags for a peer 1892 * @soc: DP_SOC handle 1893 * @pdev_id: id of DP_PDEV handle 1894 * @is_rx_pkt_cap_enable: enable/disable Rx packet capture in monitor mode 1895 * @is_tx_pkt_cap_enable: enable/disable/delete/print 1896 * Tx packet capture in monitor mode 1897 * @peer_mac: MAC address for which the above need to be enabled/disabled 1898 * 1899 * Return: Success if Rx & Tx capture is enabled for peer, false otherwise 1900 */ 1901 #if defined(WLAN_TX_PKT_CAPTURE_ENH) || defined(WLAN_RX_PKT_CAPTURE_ENH) 1902 static QDF_STATUS 1903 dp_peer_update_pkt_capture_params(ol_txrx_soc_handle soc, 1904 uint8_t pdev_id, 1905 bool is_rx_pkt_cap_enable, 1906 uint8_t is_tx_pkt_cap_enable, 1907 uint8_t *peer_mac) 1908 { 1909 struct dp_peer *peer; 1910 QDF_STATUS status = QDF_STATUS_E_FAILURE; 1911 struct dp_pdev *pdev = 1912 dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc, 1913 pdev_id); 1914 if (!pdev) 1915 return QDF_STATUS_E_FAILURE; 1916 1917 peer = dp_peer_find_hash_find((struct dp_soc *)soc, 1918 peer_mac, 0, DP_VDEV_ALL, 1919 DP_MOD_ID_CDP); 1920 if (!peer) 1921 return QDF_STATUS_E_FAILURE; 1922 1923 /* we need to set tx pkt capture for non associated peer */ 1924 if (!IS_MLO_DP_MLD_PEER(peer)) { 1925 status = dp_monitor_tx_peer_filter(pdev, peer, 1926 is_tx_pkt_cap_enable, 1927 peer_mac); 1928 1929 status = dp_peer_set_rx_capture_enabled(pdev, peer, 1930 is_rx_pkt_cap_enable, 1931 peer_mac); 1932 } 1933 1934 dp_peer_unref_delete(peer, DP_MOD_ID_CDP); 1935 1936 return status; 1937 } 1938 #endif 1939 1940 #ifdef QCA_MCOPY_SUPPORT 1941 QDF_STATUS dp_mcopy_check_deliver(struct dp_pdev *pdev, 1942 uint16_t peer_id, 1943 uint32_t ppdu_id, 1944 uint8_t first_msdu) 1945 { 1946 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 1947 1948 if (mon_pdev->mcopy_mode) { 1949 if (mon_pdev->mcopy_mode == M_COPY) { 1950 if ((mon_pdev->m_copy_id.tx_ppdu_id == ppdu_id) && 1951 (mon_pdev->m_copy_id.tx_peer_id == peer_id)) { 1952 return QDF_STATUS_E_INVAL; 1953 } 1954 } 1955 1956 if (!first_msdu) 1957 return QDF_STATUS_E_INVAL; 1958 1959 mon_pdev->m_copy_id.tx_ppdu_id = ppdu_id; 1960 mon_pdev->m_copy_id.tx_peer_id = peer_id; 1961 } 1962 1963 return QDF_STATUS_SUCCESS; 1964 } 1965 #endif 1966 1967 #ifdef WDI_EVENT_ENABLE 1968 #ifndef REMOVE_PKT_LOG 1969 static void *dp_get_pldev(struct cdp_soc_t *soc_hdl, uint8_t pdev_id) 1970 { 1971 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 1972 struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); 1973 1974 if (!pdev || !pdev->monitor_pdev) 1975 return NULL; 1976 1977 return pdev->monitor_pdev->pl_dev; 1978 } 1979 #else 1980 static void *dp_get_pldev(struct cdp_soc_t *soc_hdl, uint8_t pdev_id) 1981 { 1982 return NULL; 1983 } 1984 #endif 1985 #endif 1986 1987 QDF_STATUS dp_rx_populate_cbf_hdr(struct dp_soc *soc, 1988 uint32_t mac_id, 1989 uint32_t event, 1990 qdf_nbuf_t mpdu, 1991 uint32_t msdu_timestamp) 1992 { 1993 uint32_t data_size, hdr_size, ppdu_id, align4byte; 1994 struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id); 1995 uint32_t *msg_word; 1996 1997 if (!pdev) 1998 return QDF_STATUS_E_INVAL; 1999 2000 ppdu_id = pdev->monitor_pdev->ppdu_info.com_info.ppdu_id; 2001 2002 hdr_size = HTT_T2H_PPDU_STATS_IND_HDR_SIZE 2003 + qdf_offsetof(htt_ppdu_stats_rx_mgmtctrl_payload_tlv, payload); 2004 2005 data_size = qdf_nbuf_len(mpdu); 2006 2007 qdf_nbuf_push_head(mpdu, hdr_size); 2008 2009 msg_word = (uint32_t *)qdf_nbuf_data(mpdu); 2010 /* 2011 * Populate the PPDU Stats Indication header 2012 */ 2013 HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_T2H_MSG_TYPE_PPDU_STATS_IND); 2014 HTT_T2H_PPDU_STATS_MAC_ID_SET(*msg_word, mac_id); 2015 HTT_T2H_PPDU_STATS_PDEV_ID_SET(*msg_word, pdev->pdev_id); 2016 align4byte = ((data_size + 2017 qdf_offsetof(htt_ppdu_stats_rx_mgmtctrl_payload_tlv, payload) 2018 + 3) >> 2) << 2; 2019 HTT_T2H_PPDU_STATS_PAYLOAD_SIZE_SET(*msg_word, align4byte); 2020 msg_word++; 2021 HTT_T2H_PPDU_STATS_PPDU_ID_SET(*msg_word, ppdu_id); 2022 msg_word++; 2023 2024 *msg_word = msdu_timestamp; 2025 msg_word++; 2026 /* Skip reserved field */ 2027 msg_word++; 2028 /* 2029 * Populate MGMT_CTRL Payload TLV first 2030 */ 2031 HTT_STATS_TLV_TAG_SET(*msg_word, 2032 HTT_PPDU_STATS_RX_MGMTCTRL_PAYLOAD_TLV); 2033 2034 align4byte = ((data_size - sizeof(htt_tlv_hdr_t) + 2035 qdf_offsetof(htt_ppdu_stats_rx_mgmtctrl_payload_tlv, payload) 2036 + 3) >> 2) << 2; 2037 HTT_STATS_TLV_LENGTH_SET(*msg_word, align4byte); 2038 msg_word++; 2039 2040 HTT_PPDU_STATS_RX_MGMTCTRL_TLV_FRAME_LENGTH_SET( 2041 *msg_word, data_size); 2042 msg_word++; 2043 2044 dp_wdi_event_handler(event, soc, (void *)mpdu, 2045 HTT_INVALID_PEER, WDI_NO_VAL, pdev->pdev_id); 2046 2047 qdf_nbuf_pull_head(mpdu, hdr_size); 2048 2049 return QDF_STATUS_SUCCESS; 2050 } 2051 2052 #ifdef ATH_SUPPORT_EXT_STAT 2053 #ifdef WLAN_TELEMETRY_STATS_SUPPORT 2054 /* dp_pdev_clear_link_airtime_stats- clear pdev airtime stats for current index 2055 * @peer : Datapath peer 2056 */ 2057 static inline 2058 void dp_pdev_clear_link_airtime_stats(struct dp_pdev *pdev) 2059 { 2060 uint8_t ac; 2061 2062 for (ac = 0; ac < WME_AC_MAX; ac++) 2063 pdev->stats.telemetry_stats.link_airtime[ac] = 0; 2064 } 2065 2066 /* dp_peer_update_telemetry_stats- update peer telemetry stats 2067 * @peer : Datapath peer 2068 */ 2069 static inline 2070 void dp_peer_update_telemetry_stats(struct dp_peer *peer) 2071 { 2072 struct dp_pdev *pdev; 2073 struct dp_vdev *vdev; 2074 struct dp_mon_peer *mon_peer = NULL; 2075 uint8_t ac; 2076 2077 vdev = peer->vdev; 2078 if (!vdev) 2079 return; 2080 2081 pdev = vdev->pdev; 2082 if (!pdev) 2083 return; 2084 2085 mon_peer = peer->monitor_peer; 2086 if (qdf_likely(mon_peer)) { 2087 DP_STATS_INC(pdev, telemetry_stats.tx_mpdu_failed, 2088 mon_peer->stats.tx.retries); 2089 DP_STATS_INC(pdev, telemetry_stats.tx_mpdu_total, 2090 mon_peer->stats.tx.tx_mpdus_tried); 2091 for (ac = 0; ac < WME_AC_MAX; ac++) { 2092 mon_peer->stats.airtime_consumption[ac].avg_consumption_per_sec = 2093 mon_peer->stats.airtime_consumption[ac].consumption; 2094 /* Store each peer airtime consumption in pdev 2095 * link_airtime to calculate pdev's total airtime 2096 * consumption 2097 */ 2098 DP_STATS_INC( 2099 pdev, 2100 telemetry_stats.link_airtime[ac], 2101 mon_peer->stats.airtime_consumption[ac].consumption); 2102 mon_peer->stats.airtime_consumption[ac].consumption = 0; 2103 } 2104 } 2105 } 2106 #else 2107 static inline 2108 void dp_pdev_clear_link_airtime_stats(struct dp_pdev *pdev) 2109 { } 2110 2111 static inline 2112 void dp_peer_update_telemetry_stats(struct dp_peer *peer) 2113 { } 2114 #endif 2115 2116 /*dp_peer_cal_clients_stats_update - update peer stats on cal client timer 2117 * @soc : Datapath SOC 2118 * @peer : Datapath peer 2119 * @arg : argument to iter function 2120 */ 2121 #ifdef IPA_OFFLOAD 2122 static void 2123 dp_peer_cal_clients_stats_update(struct dp_soc *soc, 2124 struct dp_peer *peer, 2125 void *arg) 2126 { 2127 struct cdp_calibr_stats_intf peer_stats_intf = {0}; 2128 struct dp_peer *tgt_peer = NULL; 2129 struct dp_txrx_peer *txrx_peer = NULL; 2130 2131 dp_peer_update_telemetry_stats(peer); 2132 2133 if (!dp_peer_is_primary_link_peer(peer)) 2134 return; 2135 2136 tgt_peer = dp_get_tgt_peer_from_peer(peer); 2137 if (!tgt_peer || !(tgt_peer->txrx_peer)) 2138 return; 2139 2140 txrx_peer = tgt_peer->txrx_peer; 2141 peer_stats_intf.to_stack = txrx_peer->to_stack; 2142 peer_stats_intf.tx_success = 2143 peer->monitor_peer->stats.tx.tx_ucast_success; 2144 peer_stats_intf.tx_ucast = 2145 peer->monitor_peer->stats.tx.tx_ucast_total; 2146 2147 dp_cal_client_update_peer_stats_wifi3(&peer_stats_intf, 2148 &tgt_peer->stats); 2149 dp_peer_get_rxtid_stats_ipa(peer, dp_peer_update_tid_stats_from_reo); 2150 } 2151 #else 2152 static void 2153 dp_peer_cal_clients_stats_update(struct dp_soc *soc, 2154 struct dp_peer *peer, 2155 void *arg) 2156 { 2157 struct cdp_calibr_stats_intf peer_stats_intf = {0}; 2158 struct dp_peer *tgt_peer = NULL; 2159 struct dp_txrx_peer *txrx_peer = NULL; 2160 2161 dp_peer_update_telemetry_stats(peer); 2162 2163 if (!dp_peer_is_primary_link_peer(peer)) 2164 return; 2165 2166 tgt_peer = dp_get_tgt_peer_from_peer(peer); 2167 if (!tgt_peer || !(tgt_peer->txrx_peer)) 2168 return; 2169 2170 txrx_peer = tgt_peer->txrx_peer; 2171 peer_stats_intf.to_stack = txrx_peer->to_stack; 2172 peer_stats_intf.tx_success = 2173 txrx_peer->stats.per_pkt_stats.tx.tx_success; 2174 peer_stats_intf.tx_ucast = 2175 txrx_peer->stats.per_pkt_stats.tx.ucast; 2176 2177 dp_cal_client_update_peer_stats_wifi3(&peer_stats_intf, 2178 &tgt_peer->stats); 2179 } 2180 #endif 2181 2182 /*dp_iterate_update_peer_list - update peer stats on cal client timer 2183 * @pdev_hdl: pdev handle 2184 */ 2185 static void dp_iterate_update_peer_list(struct cdp_pdev *pdev_hdl) 2186 { 2187 struct dp_pdev *pdev = (struct dp_pdev *)pdev_hdl; 2188 2189 /* Clear current airtime stats as the below API will increment the stats 2190 * for all peers on top of current value 2191 */ 2192 dp_pdev_clear_link_airtime_stats(pdev); 2193 dp_pdev_iterate_peer(pdev, dp_peer_cal_clients_stats_update, NULL, 2194 DP_MOD_ID_CDP); 2195 } 2196 #else 2197 static void dp_iterate_update_peer_list(struct cdp_pdev *pdev_hdl) 2198 { 2199 } 2200 #endif 2201 2202 #ifdef ATH_SUPPORT_NAC 2203 int dp_set_filter_neigh_peers(struct dp_pdev *pdev, 2204 bool val) 2205 { 2206 /* Enable/Disable smart mesh filtering. This flag will be checked 2207 * during rx processing to check if packets are from NAC clients. 2208 */ 2209 pdev->monitor_pdev->filter_neighbour_peers = val; 2210 return 0; 2211 } 2212 #endif /* ATH_SUPPORT_NAC */ 2213 2214 #ifdef WLAN_ATF_ENABLE 2215 void dp_set_atf_stats_enable(struct dp_pdev *pdev, bool value) 2216 { 2217 if (!pdev) { 2218 dp_cdp_err("Invalid pdev"); 2219 return; 2220 } 2221 2222 pdev->monitor_pdev->dp_atf_stats_enable = value; 2223 } 2224 #endif 2225 2226 #ifdef QCA_ENHANCED_STATS_SUPPORT 2227 /* 2228 * dp_process_ppdu_stats_tx_mgmtctrl_payload_tlv: Process 2229 * htt_ppdu_stats_tx_mgmtctrl_payload_tlv 2230 * @pdev: DP PDEV handle 2231 * @tag_buf: buffer containing the htt_ppdu_stats_tx_mgmtctrl_payload_tlv 2232 * @length: tlv_length 2233 * 2234 * return:QDF_STATUS_SUCCESS if nbuf has to be freed in caller 2235 */ 2236 QDF_STATUS 2237 dp_process_ppdu_stats_tx_mgmtctrl_payload_tlv(struct dp_pdev *pdev, 2238 qdf_nbuf_t tag_buf, 2239 uint32_t ppdu_id) 2240 { 2241 uint32_t *nbuf_ptr; 2242 uint8_t trim_size; 2243 size_t head_size; 2244 struct cdp_tx_mgmt_comp_info *ptr_mgmt_comp_info; 2245 uint32_t *msg_word; 2246 uint32_t tsf_hdr; 2247 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 2248 2249 if ((!mon_pdev->tx_sniffer_enable) && (!mon_pdev->mcopy_mode) && 2250 (!mon_pdev->bpr_enable) && (!mon_pdev->tx_capture_enabled)) 2251 return QDF_STATUS_SUCCESS; 2252 2253 /* 2254 * get timestamp from htt_t2h_ppdu_stats_ind_hdr_t 2255 */ 2256 msg_word = (uint32_t *)qdf_nbuf_data(tag_buf); 2257 msg_word = msg_word + 2; 2258 tsf_hdr = *msg_word; 2259 2260 trim_size = ((mon_pdev->mgmtctrl_frm_info.mgmt_buf + 2261 HTT_MGMT_CTRL_TLV_HDR_RESERVERD_LEN) - 2262 qdf_nbuf_data(tag_buf)); 2263 2264 if (!qdf_nbuf_pull_head(tag_buf, trim_size)) 2265 return QDF_STATUS_SUCCESS; 2266 2267 qdf_nbuf_trim_tail(tag_buf, qdf_nbuf_len(tag_buf) - 2268 mon_pdev->mgmtctrl_frm_info.mgmt_buf_len); 2269 2270 if (mon_pdev->tx_capture_enabled) { 2271 head_size = sizeof(struct cdp_tx_mgmt_comp_info); 2272 if (qdf_unlikely(qdf_nbuf_headroom(tag_buf) < head_size)) { 2273 qdf_err("Fail to get headroom h_sz %zu h_avail %d\n", 2274 head_size, qdf_nbuf_headroom(tag_buf)); 2275 qdf_assert_always(0); 2276 return QDF_STATUS_E_NOMEM; 2277 } 2278 ptr_mgmt_comp_info = (struct cdp_tx_mgmt_comp_info *) 2279 qdf_nbuf_push_head(tag_buf, head_size); 2280 qdf_assert_always(ptr_mgmt_comp_info); 2281 ptr_mgmt_comp_info->ppdu_id = ppdu_id; 2282 ptr_mgmt_comp_info->is_sgen_pkt = true; 2283 ptr_mgmt_comp_info->tx_tsf = tsf_hdr; 2284 } else { 2285 head_size = sizeof(ppdu_id); 2286 nbuf_ptr = (uint32_t *)qdf_nbuf_push_head(tag_buf, head_size); 2287 *nbuf_ptr = ppdu_id; 2288 } 2289 if (mon_pdev->bpr_enable) { 2290 dp_wdi_event_handler(WDI_EVENT_TX_BEACON, pdev->soc, 2291 tag_buf, HTT_INVALID_PEER, 2292 WDI_NO_VAL, pdev->pdev_id); 2293 } 2294 2295 dp_deliver_mgmt_frm(pdev, tag_buf); 2296 2297 return QDF_STATUS_E_ALREADY; 2298 } 2299 2300 /* 2301 * dp_htt_get_ppdu_sniffer_ampdu_tlv_bitmap() - Get ppdu stats tlv 2302 * bitmap for sniffer mode 2303 * @bitmap: received bitmap 2304 * 2305 * Return: expected bitmap value, returns zero if doesn't match with 2306 * either 64-bit Tx window or 256-bit window tlv bitmap 2307 */ 2308 int 2309 dp_htt_get_ppdu_sniffer_ampdu_tlv_bitmap(uint32_t bitmap) 2310 { 2311 if (bitmap == (HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP_64)) 2312 return HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP_64; 2313 else if (bitmap == (HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP_256)) 2314 return HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP_256; 2315 2316 return 0; 2317 } 2318 2319 /* 2320 * dp_peer_copy_delay_stats() - copy ppdu stats to peer delayed stats. 2321 * @peer: Datapath peer handle 2322 * @ppdu: User PPDU Descriptor 2323 * @cur_ppdu_id: PPDU_ID 2324 * 2325 * Return: None 2326 * 2327 * on Tx data frame, we may get delayed ba set 2328 * in htt_ppdu_stats_user_common_tlv. which mean we get Block Ack(BA) after we 2329 * request Block Ack Request(BAR). Successful msdu is received only after Block 2330 * Ack. To populate peer stats we need successful msdu(data frame). 2331 * So we hold the Tx data stats on delayed_ba for stats update. 2332 */ 2333 static void 2334 dp_peer_copy_delay_stats(struct dp_peer *peer, 2335 struct cdp_tx_completion_ppdu_user *ppdu, 2336 uint32_t cur_ppdu_id) 2337 { 2338 struct dp_pdev *pdev; 2339 struct dp_vdev *vdev; 2340 struct dp_mon_peer *mon_peer = peer->monitor_peer; 2341 2342 if (mon_peer->last_delayed_ba) { 2343 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 2344 "BA not yet recv for prev delayed ppdu[%d] - cur ppdu[%d]", 2345 mon_peer->last_delayed_ba_ppduid, cur_ppdu_id); 2346 vdev = peer->vdev; 2347 if (vdev) { 2348 pdev = vdev->pdev; 2349 pdev->stats.cdp_delayed_ba_not_recev++; 2350 } 2351 } 2352 2353 mon_peer->delayed_ba_ppdu_stats.ltf_size = ppdu->ltf_size; 2354 mon_peer->delayed_ba_ppdu_stats.stbc = ppdu->stbc; 2355 mon_peer->delayed_ba_ppdu_stats.he_re = ppdu->he_re; 2356 mon_peer->delayed_ba_ppdu_stats.txbf = ppdu->txbf; 2357 mon_peer->delayed_ba_ppdu_stats.bw = ppdu->bw; 2358 mon_peer->delayed_ba_ppdu_stats.nss = ppdu->nss; 2359 mon_peer->delayed_ba_ppdu_stats.gi = ppdu->gi; 2360 mon_peer->delayed_ba_ppdu_stats.dcm = ppdu->dcm; 2361 mon_peer->delayed_ba_ppdu_stats.ldpc = ppdu->ldpc; 2362 mon_peer->delayed_ba_ppdu_stats.dcm = ppdu->dcm; 2363 mon_peer->delayed_ba_ppdu_stats.mpdu_tried_ucast = 2364 ppdu->mpdu_tried_ucast; 2365 mon_peer->delayed_ba_ppdu_stats.mpdu_tried_mcast = 2366 ppdu->mpdu_tried_mcast; 2367 mon_peer->delayed_ba_ppdu_stats.frame_ctrl = ppdu->frame_ctrl; 2368 mon_peer->delayed_ba_ppdu_stats.qos_ctrl = ppdu->qos_ctrl; 2369 mon_peer->delayed_ba_ppdu_stats.dcm = ppdu->dcm; 2370 2371 mon_peer->delayed_ba_ppdu_stats.ru_start = ppdu->ru_start; 2372 mon_peer->delayed_ba_ppdu_stats.ru_tones = ppdu->ru_tones; 2373 mon_peer->delayed_ba_ppdu_stats.is_mcast = ppdu->is_mcast; 2374 2375 mon_peer->delayed_ba_ppdu_stats.user_pos = ppdu->user_pos; 2376 mon_peer->delayed_ba_ppdu_stats.mu_group_id = ppdu->mu_group_id; 2377 2378 mon_peer->last_delayed_ba = true; 2379 2380 ppdu->debug_copied = true; 2381 } 2382 2383 /* 2384 * dp_peer_copy_stats_to_bar() - copy delayed stats to ppdu stats. 2385 * @peer: Datapath peer handle 2386 * @ppdu: PPDU Descriptor 2387 * 2388 * Return: None 2389 * 2390 * For Tx BAR, PPDU stats TLV include Block Ack info. PPDU info 2391 * from Tx BAR frame not required to populate peer stats. 2392 * But we need successful MPDU and MSDU to update previous 2393 * transmitted Tx data frame. Overwrite ppdu stats with the previous 2394 * stored ppdu stats. 2395 */ 2396 static void 2397 dp_peer_copy_stats_to_bar(struct dp_peer *peer, 2398 struct cdp_tx_completion_ppdu_user *ppdu) 2399 { 2400 struct dp_mon_peer *mon_peer = peer->monitor_peer; 2401 2402 ppdu->ltf_size = mon_peer->delayed_ba_ppdu_stats.ltf_size; 2403 ppdu->stbc = mon_peer->delayed_ba_ppdu_stats.stbc; 2404 ppdu->he_re = mon_peer->delayed_ba_ppdu_stats.he_re; 2405 ppdu->txbf = mon_peer->delayed_ba_ppdu_stats.txbf; 2406 ppdu->bw = mon_peer->delayed_ba_ppdu_stats.bw; 2407 ppdu->nss = mon_peer->delayed_ba_ppdu_stats.nss; 2408 ppdu->gi = mon_peer->delayed_ba_ppdu_stats.gi; 2409 ppdu->dcm = mon_peer->delayed_ba_ppdu_stats.dcm; 2410 ppdu->ldpc = mon_peer->delayed_ba_ppdu_stats.ldpc; 2411 ppdu->dcm = mon_peer->delayed_ba_ppdu_stats.dcm; 2412 ppdu->mpdu_tried_ucast = 2413 mon_peer->delayed_ba_ppdu_stats.mpdu_tried_ucast; 2414 ppdu->mpdu_tried_mcast = 2415 mon_peer->delayed_ba_ppdu_stats.mpdu_tried_mcast; 2416 ppdu->frame_ctrl = mon_peer->delayed_ba_ppdu_stats.frame_ctrl; 2417 ppdu->qos_ctrl = mon_peer->delayed_ba_ppdu_stats.qos_ctrl; 2418 ppdu->dcm = mon_peer->delayed_ba_ppdu_stats.dcm; 2419 2420 ppdu->ru_start = mon_peer->delayed_ba_ppdu_stats.ru_start; 2421 ppdu->ru_tones = mon_peer->delayed_ba_ppdu_stats.ru_tones; 2422 ppdu->is_mcast = mon_peer->delayed_ba_ppdu_stats.is_mcast; 2423 2424 ppdu->user_pos = mon_peer->delayed_ba_ppdu_stats.user_pos; 2425 ppdu->mu_group_id = mon_peer->delayed_ba_ppdu_stats.mu_group_id; 2426 2427 mon_peer->last_delayed_ba = false; 2428 2429 ppdu->debug_copied = true; 2430 } 2431 2432 /* 2433 * dp_tx_rate_stats_update() - Update rate per-peer statistics 2434 * @peer: Datapath peer handle 2435 * @ppdu: PPDU Descriptor 2436 * 2437 * Return: None 2438 */ 2439 static void 2440 dp_tx_rate_stats_update(struct dp_peer *peer, 2441 struct cdp_tx_completion_ppdu_user *ppdu) 2442 { 2443 uint32_t ratekbps = 0; 2444 uint64_t ppdu_tx_rate = 0; 2445 uint32_t rix; 2446 uint16_t ratecode = 0; 2447 struct dp_mon_peer *mon_peer = NULL; 2448 2449 if (!peer || !ppdu) 2450 return; 2451 2452 if (ppdu->completion_status != HTT_PPDU_STATS_USER_STATUS_OK) 2453 return; 2454 2455 mon_peer = peer->monitor_peer; 2456 if (!mon_peer) 2457 return; 2458 2459 ratekbps = dp_getrateindex(ppdu->gi, 2460 ppdu->mcs, 2461 ppdu->nss, 2462 ppdu->preamble, 2463 ppdu->bw, 2464 ppdu->punc_mode, 2465 &rix, 2466 &ratecode); 2467 2468 if (!ratekbps) 2469 return; 2470 2471 /* Calculate goodput in non-training period 2472 * In training period, don't do anything as 2473 * pending pkt is send as goodput. 2474 */ 2475 if ((!peer->bss_peer) && (!ppdu->sa_is_training)) { 2476 ppdu->sa_goodput = ((ratekbps / CDP_NUM_KB_IN_MB) * 2477 (CDP_PERCENT_MACRO - ppdu->current_rate_per)); 2478 } 2479 ppdu->rix = rix; 2480 ppdu->tx_ratekbps = ratekbps; 2481 ppdu->tx_ratecode = ratecode; 2482 DP_STATS_UPD(mon_peer, tx.tx_rate, ratekbps); 2483 mon_peer->stats.tx.avg_tx_rate = 2484 dp_ath_rate_lpf(mon_peer->stats.tx.avg_tx_rate, ratekbps); 2485 ppdu_tx_rate = dp_ath_rate_out(mon_peer->stats.tx.avg_tx_rate); 2486 DP_STATS_UPD(mon_peer, tx.rnd_avg_tx_rate, ppdu_tx_rate); 2487 2488 mon_peer->stats.tx.bw_info = ppdu->bw; 2489 mon_peer->stats.tx.gi_info = ppdu->gi; 2490 mon_peer->stats.tx.nss_info = ppdu->nss; 2491 mon_peer->stats.tx.mcs_info = ppdu->mcs; 2492 mon_peer->stats.tx.preamble_info = ppdu->preamble; 2493 if (peer->vdev) { 2494 /* 2495 * In STA mode: 2496 * We get ucast stats as BSS peer stats. 2497 * 2498 * In AP mode: 2499 * We get mcast stats as BSS peer stats. 2500 * We get ucast stats as assoc peer stats. 2501 */ 2502 if (peer->vdev->opmode == wlan_op_mode_ap && peer->bss_peer) { 2503 peer->vdev->stats.tx.mcast_last_tx_rate = ratekbps; 2504 peer->vdev->stats.tx.mcast_last_tx_rate_mcs = ppdu->mcs; 2505 } else { 2506 peer->vdev->stats.tx.last_tx_rate = ratekbps; 2507 peer->vdev->stats.tx.last_tx_rate_mcs = ppdu->mcs; 2508 } 2509 } 2510 } 2511 2512 #if defined(FEATURE_PERPKT_INFO) && defined(WDI_EVENT_ENABLE) 2513 void dp_send_stats_event(struct dp_pdev *pdev, struct dp_peer *peer, 2514 uint16_t peer_id) 2515 { 2516 struct cdp_interface_peer_stats peer_stats_intf = {0}; 2517 struct dp_mon_peer *mon_peer = peer->monitor_peer; 2518 struct dp_txrx_peer *txrx_peer = NULL; 2519 2520 if (qdf_unlikely(!mon_peer)) 2521 return; 2522 2523 mon_peer->stats.rx.rx_snr_measured_time = qdf_system_ticks(); 2524 peer_stats_intf.rx_avg_snr = mon_peer->stats.rx.avg_snr; 2525 2526 txrx_peer = dp_get_txrx_peer(peer); 2527 if (qdf_likely(txrx_peer)) { 2528 peer_stats_intf.rx_byte_count = txrx_peer->to_stack.bytes; 2529 peer_stats_intf.tx_byte_count = 2530 txrx_peer->stats.per_pkt_stats.tx.tx_success.bytes; 2531 } 2532 2533 dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, pdev->soc, 2534 &peer_stats_intf, peer_id, 2535 UPDATE_PEER_STATS, pdev->pdev_id); 2536 } 2537 #endif 2538 2539 #ifdef WLAN_FEATURE_11BE 2540 /* 2541 * dp_get_ru_index_frm_ru_tones() - get ru index 2542 * @ru_tones: ru tones 2543 * 2544 * Return: ru index 2545 */ 2546 static inline enum cdp_ru_index dp_get_ru_index_frm_ru_tones(uint16_t ru_tones) 2547 { 2548 enum cdp_ru_index ru_index; 2549 2550 switch (ru_tones) { 2551 case RU_26: 2552 ru_index = RU_26_INDEX; 2553 break; 2554 case RU_52: 2555 ru_index = RU_52_INDEX; 2556 break; 2557 case RU_52_26: 2558 ru_index = RU_52_26_INDEX; 2559 break; 2560 case RU_106: 2561 ru_index = RU_106_INDEX; 2562 break; 2563 case RU_106_26: 2564 ru_index = RU_106_26_INDEX; 2565 break; 2566 case RU_242: 2567 ru_index = RU_242_INDEX; 2568 break; 2569 case RU_484: 2570 ru_index = RU_484_INDEX; 2571 break; 2572 case RU_484_242: 2573 ru_index = RU_484_242_INDEX; 2574 break; 2575 case RU_996: 2576 ru_index = RU_996_INDEX; 2577 break; 2578 case RU_996_484: 2579 ru_index = RU_996_484_INDEX; 2580 break; 2581 case RU_996_484_242: 2582 ru_index = RU_996_484_242_INDEX; 2583 break; 2584 case RU_2X996: 2585 ru_index = RU_2X996_INDEX; 2586 break; 2587 case RU_2X996_484: 2588 ru_index = RU_2X996_484_INDEX; 2589 break; 2590 case RU_3X996: 2591 ru_index = RU_3X996_INDEX; 2592 break; 2593 case RU_3X996_484: 2594 ru_index = RU_2X996_484_INDEX; 2595 break; 2596 case RU_4X996: 2597 ru_index = RU_4X996_INDEX; 2598 break; 2599 default: 2600 ru_index = RU_INDEX_MAX; 2601 break; 2602 } 2603 2604 return ru_index; 2605 } 2606 2607 /* 2608 * dp_mon_get_ru_width_from_ru_size() - get ru_width from ru_size enum 2609 * @ru_size: HTT ru_size enum 2610 * 2611 * Return: ru_width of uint32_t type 2612 */ 2613 static uint32_t dp_mon_get_ru_width_from_ru_size(uint16_t ru_size) 2614 { 2615 uint32_t width = 0; 2616 2617 switch (ru_size) { 2618 case HTT_PPDU_STATS_RU_26: 2619 width = RU_26; 2620 break; 2621 case HTT_PPDU_STATS_RU_52: 2622 width = RU_52; 2623 break; 2624 case HTT_PPDU_STATS_RU_52_26: 2625 width = RU_52_26; 2626 break; 2627 case HTT_PPDU_STATS_RU_106: 2628 width = RU_106; 2629 break; 2630 case HTT_PPDU_STATS_RU_106_26: 2631 width = RU_106_26; 2632 break; 2633 case HTT_PPDU_STATS_RU_242: 2634 width = RU_242; 2635 break; 2636 case HTT_PPDU_STATS_RU_484: 2637 width = RU_484; 2638 break; 2639 case HTT_PPDU_STATS_RU_484_242: 2640 width = RU_484_242; 2641 break; 2642 case HTT_PPDU_STATS_RU_996: 2643 width = RU_996; 2644 break; 2645 case HTT_PPDU_STATS_RU_996_484: 2646 width = RU_996_484; 2647 break; 2648 case HTT_PPDU_STATS_RU_996_484_242: 2649 width = RU_996_484_242; 2650 break; 2651 case HTT_PPDU_STATS_RU_996x2: 2652 width = RU_2X996; 2653 break; 2654 case HTT_PPDU_STATS_RU_996x2_484: 2655 width = RU_2X996_484; 2656 break; 2657 case HTT_PPDU_STATS_RU_996x3: 2658 width = RU_3X996; 2659 break; 2660 case HTT_PPDU_STATS_RU_996x3_484: 2661 width = RU_3X996_484; 2662 break; 2663 case HTT_PPDU_STATS_RU_996x4: 2664 width = RU_4X996; 2665 break; 2666 default: 2667 dp_mon_debug("Unsupported ru_size: %d rcvd", ru_size); 2668 } 2669 2670 return width; 2671 } 2672 #else 2673 static inline enum cdp_ru_index dp_get_ru_index_frm_ru_tones(uint16_t ru_tones) 2674 { 2675 enum cdp_ru_index ru_index; 2676 2677 switch (ru_tones) { 2678 case RU_26: 2679 ru_index = RU_26_INDEX; 2680 break; 2681 case RU_52: 2682 ru_index = RU_52_INDEX; 2683 break; 2684 case RU_106: 2685 ru_index = RU_106_INDEX; 2686 break; 2687 case RU_242: 2688 ru_index = RU_242_INDEX; 2689 break; 2690 case RU_484: 2691 ru_index = RU_484_INDEX; 2692 break; 2693 case RU_996: 2694 ru_index = RU_996_INDEX; 2695 break; 2696 default: 2697 ru_index = RU_INDEX_MAX; 2698 break; 2699 } 2700 2701 return ru_index; 2702 } 2703 2704 static uint32_t dp_mon_get_ru_width_from_ru_size(uint16_t ru_size) 2705 { 2706 uint32_t width = 0; 2707 2708 switch (ru_size) { 2709 case HTT_PPDU_STATS_RU_26: 2710 width = RU_26; 2711 break; 2712 case HTT_PPDU_STATS_RU_52: 2713 width = RU_52; 2714 break; 2715 case HTT_PPDU_STATS_RU_106: 2716 width = RU_106; 2717 break; 2718 case HTT_PPDU_STATS_RU_242: 2719 width = RU_242; 2720 break; 2721 case HTT_PPDU_STATS_RU_484: 2722 width = RU_484; 2723 break; 2724 case HTT_PPDU_STATS_RU_996: 2725 width = RU_996; 2726 break; 2727 default: 2728 dp_mon_debug("Unsupported ru_size: %d rcvd", ru_size); 2729 } 2730 2731 return width; 2732 } 2733 #endif 2734 2735 /* 2736 * dp_tx_stats_update() - Update per-peer statistics 2737 * @pdev: Datapath pdev handle 2738 * @peer: Datapath peer handle 2739 * @ppdu: PPDU Descriptor 2740 * @ack_rssi: RSSI of last ack received 2741 * 2742 * Return: None 2743 */ 2744 static void 2745 dp_tx_stats_update(struct dp_pdev *pdev, struct dp_peer *peer, 2746 struct cdp_tx_completion_ppdu_user *ppdu, 2747 uint32_t ack_rssi) 2748 { 2749 uint8_t preamble, mcs; 2750 uint16_t num_msdu; 2751 uint16_t num_mpdu; 2752 uint16_t mpdu_tried; 2753 uint16_t mpdu_failed; 2754 struct dp_mon_ops *mon_ops; 2755 enum cdp_ru_index ru_index; 2756 struct dp_mon_peer *mon_peer = NULL; 2757 uint32_t ratekbps = 0; 2758 uint64_t tx_byte_count; 2759 2760 preamble = ppdu->preamble; 2761 mcs = ppdu->mcs; 2762 num_msdu = ppdu->num_msdu; 2763 num_mpdu = ppdu->mpdu_success; 2764 mpdu_tried = ppdu->mpdu_tried_ucast + ppdu->mpdu_tried_mcast; 2765 mpdu_failed = mpdu_tried - num_mpdu; 2766 tx_byte_count = ppdu->success_bytes; 2767 2768 /* If the peer statistics are already processed as part of 2769 * per-MSDU completion handler, do not process these again in per-PPDU 2770 * indications 2771 */ 2772 if (pdev->soc->process_tx_status) 2773 return; 2774 2775 mon_peer = peer->monitor_peer; 2776 if (!mon_peer) 2777 return; 2778 2779 if (!ppdu->is_mcast) { 2780 DP_STATS_INC(mon_peer, tx.tx_ucast_total.num, num_msdu); 2781 DP_STATS_INC(mon_peer, tx.tx_ucast_total.bytes, 2782 tx_byte_count); 2783 } 2784 2785 if (ppdu->completion_status != HTT_PPDU_STATS_USER_STATUS_OK) { 2786 /* 2787 * All failed mpdu will be retried, so incrementing 2788 * retries mpdu based on mpdu failed. Even for 2789 * ack failure i.e for long retries we get 2790 * mpdu failed equal mpdu tried. 2791 */ 2792 DP_STATS_INC(mon_peer, tx.retries, mpdu_failed); 2793 return; 2794 } 2795 2796 if (ppdu->is_ppdu_cookie_valid) 2797 DP_STATS_INC(mon_peer, tx.num_ppdu_cookie_valid, 1); 2798 2799 if (ppdu->mu_group_id <= MAX_MU_GROUP_ID && 2800 ppdu->ppdu_type != HTT_PPDU_STATS_PPDU_TYPE_SU) { 2801 if (qdf_unlikely(ppdu->mu_group_id && 2802 !(ppdu->mu_group_id & (MAX_MU_GROUP_ID - 1)))) 2803 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 2804 "mu_group_id out of bound!!\n"); 2805 else 2806 DP_STATS_UPD(mon_peer, tx.mu_group_id[ppdu->mu_group_id], 2807 (ppdu->user_pos + 1)); 2808 } 2809 2810 if (ppdu->ppdu_type == HTT_PPDU_STATS_PPDU_TYPE_MU_OFDMA || 2811 ppdu->ppdu_type == HTT_PPDU_STATS_PPDU_TYPE_MU_MIMO_OFDMA) { 2812 DP_STATS_UPD(mon_peer, tx.ru_tones, ppdu->ru_tones); 2813 DP_STATS_UPD(mon_peer, tx.ru_start, ppdu->ru_start); 2814 ru_index = dp_get_ru_index_frm_ru_tones(ppdu->ru_tones); 2815 if (ru_index != RU_INDEX_MAX) { 2816 DP_STATS_INC(mon_peer, tx.ru_loc[ru_index].num_msdu, 2817 num_msdu); 2818 DP_STATS_INC(mon_peer, tx.ru_loc[ru_index].num_mpdu, 2819 num_mpdu); 2820 DP_STATS_INC(mon_peer, tx.ru_loc[ru_index].mpdu_tried, 2821 mpdu_tried); 2822 } 2823 } 2824 2825 /* 2826 * All failed mpdu will be retried, so incrementing 2827 * retries mpdu based on mpdu failed. Even for 2828 * ack failure i.e for long retries we get 2829 * mpdu failed equal mpdu tried. 2830 */ 2831 DP_STATS_INC(mon_peer, tx.retries, mpdu_failed); 2832 2833 DP_STATS_INC(mon_peer, tx.transmit_type[ppdu->ppdu_type].num_msdu, 2834 num_msdu); 2835 DP_STATS_INC(mon_peer, tx.transmit_type[ppdu->ppdu_type].num_mpdu, 2836 num_mpdu); 2837 DP_STATS_INC(mon_peer, tx.transmit_type[ppdu->ppdu_type].mpdu_tried, 2838 mpdu_tried); 2839 2840 DP_STATS_INC(mon_peer, tx.sgi_count[ppdu->gi], num_msdu); 2841 DP_STATS_INC(mon_peer, tx.bw[ppdu->bw], num_msdu); 2842 DP_STATS_INC(mon_peer, tx.nss[ppdu->nss], num_msdu); 2843 if (ppdu->tid < CDP_DATA_TID_MAX) 2844 DP_STATS_INC(mon_peer, tx.wme_ac_type[TID_TO_WME_AC(ppdu->tid)], 2845 num_msdu); 2846 DP_STATS_INCC(mon_peer, tx.stbc, num_msdu, ppdu->stbc); 2847 DP_STATS_INCC(mon_peer, tx.ldpc, num_msdu, ppdu->ldpc); 2848 if (!(ppdu->is_mcast) && ppdu->ack_rssi_valid) 2849 DP_STATS_UPD(mon_peer, tx.last_ack_rssi, ack_rssi); 2850 2851 if (!ppdu->is_mcast) { 2852 DP_STATS_INC(mon_peer, tx.tx_ucast_success.num, num_msdu); 2853 DP_STATS_INC(mon_peer, tx.tx_ucast_success.bytes, 2854 tx_byte_count); 2855 } 2856 2857 DP_STATS_INCC(mon_peer, 2858 tx.pkt_type[preamble].mcs_count[MAX_MCS - 1], num_msdu, 2859 ((mcs >= MAX_MCS_11A) && (preamble == DOT11_A))); 2860 DP_STATS_INCC(mon_peer, 2861 tx.pkt_type[preamble].mcs_count[mcs], num_msdu, 2862 ((mcs < MAX_MCS_11A) && (preamble == DOT11_A))); 2863 DP_STATS_INCC(mon_peer, 2864 tx.pkt_type[preamble].mcs_count[MAX_MCS - 1], num_msdu, 2865 ((mcs >= MAX_MCS_11B) && (preamble == DOT11_B))); 2866 DP_STATS_INCC(mon_peer, 2867 tx.pkt_type[preamble].mcs_count[mcs], num_msdu, 2868 ((mcs < (MAX_MCS_11B)) && (preamble == DOT11_B))); 2869 DP_STATS_INCC(mon_peer, 2870 tx.pkt_type[preamble].mcs_count[MAX_MCS - 1], num_msdu, 2871 ((mcs >= MAX_MCS_11A) && (preamble == DOT11_N))); 2872 DP_STATS_INCC(mon_peer, 2873 tx.pkt_type[preamble].mcs_count[mcs], num_msdu, 2874 ((mcs < MAX_MCS_11A) && (preamble == DOT11_N))); 2875 DP_STATS_INCC(mon_peer, 2876 tx.pkt_type[preamble].mcs_count[MAX_MCS - 1], num_msdu, 2877 ((mcs >= MAX_MCS_11AC) && (preamble == DOT11_AC))); 2878 DP_STATS_INCC(mon_peer, 2879 tx.pkt_type[preamble].mcs_count[mcs], num_msdu, 2880 ((mcs < MAX_MCS_11AC) && (preamble == DOT11_AC))); 2881 DP_STATS_INCC(mon_peer, 2882 tx.pkt_type[preamble].mcs_count[MAX_MCS - 1], num_msdu, 2883 ((mcs >= MAX_MCS_11AX) && (preamble == DOT11_AX))); 2884 DP_STATS_INCC(mon_peer, 2885 tx.pkt_type[preamble].mcs_count[mcs], num_msdu, 2886 ((mcs < MAX_MCS_11AX) && (preamble == DOT11_AX))); 2887 DP_STATS_INCC(mon_peer, tx.ampdu_cnt, num_mpdu, ppdu->is_ampdu); 2888 DP_STATS_INCC(mon_peer, tx.non_ampdu_cnt, num_mpdu, !(ppdu->is_ampdu)); 2889 DP_STATS_INCC(mon_peer, tx.pream_punct_cnt, 1, ppdu->pream_punct); 2890 DP_STATS_INC(mon_peer, tx.tx_ppdus, 1); 2891 DP_STATS_INC(mon_peer, tx.tx_mpdus_success, num_mpdu); 2892 DP_STATS_INC(mon_peer, tx.tx_mpdus_tried, mpdu_tried); 2893 2894 mon_ops = dp_mon_ops_get(pdev->soc); 2895 if (mon_ops && mon_ops->mon_tx_stats_update) 2896 mon_ops->mon_tx_stats_update(mon_peer, ppdu); 2897 2898 dp_tx_rate_stats_update(peer, ppdu); 2899 2900 dp_peer_stats_notify(pdev, peer); 2901 2902 ratekbps = mon_peer->stats.tx.tx_rate; 2903 DP_STATS_UPD(mon_peer, tx.last_tx_rate, ratekbps); 2904 2905 dp_send_stats_event(pdev, peer, ppdu->peer_id); 2906 } 2907 2908 /* 2909 * dp_get_ppdu_info_user_index: Find and allocate a per-user descriptor for a PPDU, 2910 * if a new peer id arrives in a PPDU 2911 * pdev: DP pdev handle 2912 * @peer_id : peer unique identifier 2913 * @ppdu_info: per ppdu tlv structure 2914 * 2915 * return:user index to be populated 2916 */ 2917 static uint8_t dp_get_ppdu_info_user_index(struct dp_pdev *pdev, 2918 uint16_t peer_id, 2919 struct ppdu_info *ppdu_info) 2920 { 2921 uint8_t user_index = 0; 2922 struct cdp_tx_completion_ppdu *ppdu_desc; 2923 struct cdp_tx_completion_ppdu_user *ppdu_user_desc; 2924 2925 ppdu_desc = 2926 (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf); 2927 2928 while ((user_index + 1) <= ppdu_info->last_user) { 2929 ppdu_user_desc = &ppdu_desc->user[user_index]; 2930 if (ppdu_user_desc->peer_id != peer_id) { 2931 user_index++; 2932 continue; 2933 } else { 2934 /* Max users possible is 8 so user array index should 2935 * not exceed 7 2936 */ 2937 qdf_assert_always(user_index <= (ppdu_desc->max_users - 1)); 2938 return user_index; 2939 } 2940 } 2941 2942 ppdu_info->last_user++; 2943 /* Max users possible is 8 so last user should not exceed 8 */ 2944 qdf_assert_always(ppdu_info->last_user <= ppdu_desc->max_users); 2945 return ppdu_info->last_user - 1; 2946 } 2947 2948 /* 2949 * dp_process_ppdu_stats_common_tlv: Process htt_ppdu_stats_common_tlv 2950 * pdev: DP pdev handle 2951 * @tag_buf: buffer containing the tlv htt_ppdu_stats_common_tlv 2952 * @ppdu_info: per ppdu tlv structure 2953 * 2954 * return:void 2955 */ 2956 static void 2957 dp_process_ppdu_stats_common_tlv(struct dp_pdev *pdev, 2958 uint32_t *tag_buf, 2959 struct ppdu_info *ppdu_info) 2960 { 2961 uint16_t frame_type; 2962 uint16_t frame_ctrl; 2963 uint16_t freq; 2964 struct dp_soc *soc = NULL; 2965 struct cdp_tx_completion_ppdu *ppdu_desc = NULL; 2966 uint64_t ppdu_start_timestamp; 2967 uint32_t *start_tag_buf; 2968 2969 start_tag_buf = tag_buf; 2970 ppdu_desc = 2971 (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf); 2972 2973 ppdu_desc->ppdu_id = ppdu_info->ppdu_id; 2974 2975 tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(RING_ID_SCH_CMD_ID); 2976 ppdu_info->sched_cmdid = 2977 HTT_PPDU_STATS_COMMON_TLV_SCH_CMDID_GET(*tag_buf); 2978 ppdu_desc->num_users = 2979 HTT_PPDU_STATS_COMMON_TLV_NUM_USERS_GET(*tag_buf); 2980 2981 qdf_assert_always(ppdu_desc->num_users <= ppdu_desc->max_users); 2982 2983 tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(QTYPE_FRM_TYPE); 2984 frame_type = HTT_PPDU_STATS_COMMON_TLV_FRM_TYPE_GET(*tag_buf); 2985 ppdu_desc->htt_frame_type = frame_type; 2986 2987 frame_ctrl = ppdu_desc->frame_ctrl; 2988 2989 ppdu_desc->bar_ppdu_id = ppdu_info->ppdu_id; 2990 2991 switch (frame_type) { 2992 case HTT_STATS_FTYPE_TIDQ_DATA_SU: 2993 case HTT_STATS_FTYPE_TIDQ_DATA_MU: 2994 case HTT_STATS_FTYPE_SGEN_QOS_NULL: 2995 /* 2996 * for management packet, frame type come as DATA_SU 2997 * need to check frame_ctrl before setting frame_type 2998 */ 2999 if (HTT_GET_FRAME_CTRL_TYPE(frame_ctrl) <= FRAME_CTRL_TYPE_CTRL) 3000 ppdu_desc->frame_type = CDP_PPDU_FTYPE_CTRL; 3001 else 3002 ppdu_desc->frame_type = CDP_PPDU_FTYPE_DATA; 3003 break; 3004 case HTT_STATS_FTYPE_SGEN_MU_BAR: 3005 case HTT_STATS_FTYPE_SGEN_BAR: 3006 case HTT_STATS_FTYPE_SGEN_BE_MU_BAR: 3007 ppdu_desc->frame_type = CDP_PPDU_FTYPE_BAR; 3008 break; 3009 default: 3010 ppdu_desc->frame_type = CDP_PPDU_FTYPE_CTRL; 3011 break; 3012 } 3013 3014 tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(FES_DUR_US); 3015 ppdu_desc->tx_duration = *tag_buf; 3016 3017 tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(START_TSTMP_L32_US); 3018 ppdu_desc->ppdu_start_timestamp = *tag_buf; 3019 3020 tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(CHAN_MHZ_PHY_MODE); 3021 freq = HTT_PPDU_STATS_COMMON_TLV_CHAN_MHZ_GET(*tag_buf); 3022 if (freq != ppdu_desc->channel) { 3023 soc = pdev->soc; 3024 ppdu_desc->channel = freq; 3025 pdev->operating_channel.freq = freq; 3026 if (soc && soc->cdp_soc.ol_ops->freq_to_channel) 3027 pdev->operating_channel.num = 3028 soc->cdp_soc.ol_ops->freq_to_channel(soc->ctrl_psoc, 3029 pdev->pdev_id, 3030 freq); 3031 3032 if (soc && soc->cdp_soc.ol_ops->freq_to_band) 3033 pdev->operating_channel.band = 3034 soc->cdp_soc.ol_ops->freq_to_band(soc->ctrl_psoc, 3035 pdev->pdev_id, 3036 freq); 3037 } 3038 3039 ppdu_desc->phy_mode = HTT_PPDU_STATS_COMMON_TLV_PHY_MODE_GET(*tag_buf); 3040 3041 tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(RESV_NUM_UL_BEAM); 3042 ppdu_desc->phy_ppdu_tx_time_us = 3043 HTT_PPDU_STATS_COMMON_TLV_PHY_PPDU_TX_TIME_US_GET(*tag_buf); 3044 ppdu_desc->beam_change = 3045 HTT_PPDU_STATS_COMMON_TLV_BEAM_CHANGE_GET(*tag_buf); 3046 ppdu_desc->doppler = 3047 HTT_PPDU_STATS_COMMON_TLV_DOPPLER_INDICATION_GET(*tag_buf); 3048 ppdu_desc->spatial_reuse = 3049 HTT_PPDU_STATS_COMMON_TLV_SPATIAL_REUSE_GET(*tag_buf); 3050 3051 dp_tx_capture_htt_frame_counter(pdev, frame_type); 3052 3053 tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(START_TSTMP_U32_US); 3054 ppdu_start_timestamp = *tag_buf; 3055 ppdu_desc->ppdu_start_timestamp |= ((ppdu_start_timestamp << 3056 HTT_SHIFT_UPPER_TIMESTAMP) & 3057 HTT_MASK_UPPER_TIMESTAMP); 3058 3059 ppdu_desc->ppdu_end_timestamp = ppdu_desc->ppdu_start_timestamp + 3060 ppdu_desc->tx_duration; 3061 /* Ack time stamp is same as end time stamp*/ 3062 ppdu_desc->ack_timestamp = ppdu_desc->ppdu_end_timestamp; 3063 3064 ppdu_desc->ppdu_end_timestamp = ppdu_desc->ppdu_start_timestamp + 3065 ppdu_desc->tx_duration; 3066 3067 ppdu_desc->bar_ppdu_start_timestamp = ppdu_desc->ppdu_start_timestamp; 3068 ppdu_desc->bar_ppdu_end_timestamp = ppdu_desc->ppdu_end_timestamp; 3069 ppdu_desc->bar_tx_duration = ppdu_desc->tx_duration; 3070 3071 /* Ack time stamp is same as end time stamp*/ 3072 ppdu_desc->ack_timestamp = ppdu_desc->ppdu_end_timestamp; 3073 3074 tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(BSSCOLOR_OBSS_PSR); 3075 ppdu_desc->bss_color = 3076 HTT_PPDU_STATS_COMMON_TLV_BSS_COLOR_ID_GET(*tag_buf); 3077 } 3078 3079 /* 3080 * dp_process_ppdu_stats_user_common_tlv: Process ppdu_stats_user_common 3081 * @tag_buf: buffer containing the tlv htt_ppdu_stats_user_common_tlv 3082 * @ppdu_info: per ppdu tlv structure 3083 * 3084 * return:void 3085 */ 3086 static void dp_process_ppdu_stats_user_common_tlv( 3087 struct dp_pdev *pdev, uint32_t *tag_buf, 3088 struct ppdu_info *ppdu_info) 3089 { 3090 uint16_t peer_id; 3091 struct cdp_tx_completion_ppdu *ppdu_desc; 3092 struct cdp_tx_completion_ppdu_user *ppdu_user_desc; 3093 uint8_t curr_user_index = 0; 3094 struct dp_peer *peer; 3095 struct dp_vdev *vdev; 3096 uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf); 3097 3098 ppdu_desc = 3099 (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf); 3100 3101 tag_buf++; 3102 peer_id = HTT_PPDU_STATS_USER_RATE_TLV_SW_PEER_ID_GET(*tag_buf); 3103 3104 curr_user_index = 3105 dp_get_ppdu_info_user_index(pdev, 3106 peer_id, ppdu_info); 3107 ppdu_user_desc = &ppdu_desc->user[curr_user_index]; 3108 ppdu_user_desc->tlv_bitmap |= (1 << tlv_type); 3109 3110 ppdu_desc->vdev_id = 3111 HTT_PPDU_STATS_USER_COMMON_TLV_VAP_ID_GET(*tag_buf); 3112 3113 ppdu_user_desc->peer_id = peer_id; 3114 3115 tag_buf++; 3116 3117 if (HTT_PPDU_STATS_USER_COMMON_TLV_DELAYED_BA_GET(*tag_buf)) { 3118 ppdu_user_desc->delayed_ba = 1; 3119 ppdu_desc->delayed_ba = 1; 3120 } 3121 3122 if (HTT_PPDU_STATS_USER_COMMON_TLV_MCAST_GET(*tag_buf)) { 3123 ppdu_user_desc->is_mcast = true; 3124 ppdu_user_desc->mpdu_tried_mcast = 3125 HTT_PPDU_STATS_USER_COMMON_TLV_MPDUS_TRIED_GET(*tag_buf); 3126 ppdu_user_desc->num_mpdu = ppdu_user_desc->mpdu_tried_mcast; 3127 } else { 3128 ppdu_user_desc->mpdu_tried_ucast = 3129 HTT_PPDU_STATS_USER_COMMON_TLV_MPDUS_TRIED_GET(*tag_buf); 3130 } 3131 3132 ppdu_user_desc->is_seq_num_valid = 3133 HTT_PPDU_STATS_USER_COMMON_TLV_IS_SQNUM_VALID_IN_BUFFER_GET(*tag_buf); 3134 tag_buf++; 3135 3136 ppdu_user_desc->qos_ctrl = 3137 HTT_PPDU_STATS_USER_COMMON_TLV_QOS_CTRL_GET(*tag_buf); 3138 ppdu_user_desc->frame_ctrl = 3139 HTT_PPDU_STATS_USER_COMMON_TLV_FRAME_CTRL_GET(*tag_buf); 3140 ppdu_desc->frame_ctrl = ppdu_user_desc->frame_ctrl; 3141 3142 if (ppdu_user_desc->delayed_ba) 3143 ppdu_user_desc->mpdu_success = 0; 3144 3145 tag_buf += 3; 3146 3147 if (HTT_PPDU_STATS_IS_OPAQUE_VALID_GET(*tag_buf)) { 3148 ppdu_user_desc->ppdu_cookie = 3149 HTT_PPDU_STATS_HOST_OPAQUE_COOKIE_GET(*tag_buf); 3150 ppdu_user_desc->is_ppdu_cookie_valid = 1; 3151 } 3152 3153 /* returning earlier causes other feilds unpopulated */ 3154 if (peer_id == DP_SCAN_PEER_ID) { 3155 vdev = dp_vdev_get_ref_by_id(pdev->soc, ppdu_desc->vdev_id, 3156 DP_MOD_ID_TX_PPDU_STATS); 3157 if (!vdev) 3158 return; 3159 qdf_mem_copy(ppdu_user_desc->mac_addr, vdev->mac_addr.raw, 3160 QDF_MAC_ADDR_SIZE); 3161 dp_vdev_unref_delete(pdev->soc, vdev, DP_MOD_ID_TX_PPDU_STATS); 3162 } else { 3163 peer = dp_peer_get_ref_by_id(pdev->soc, peer_id, 3164 DP_MOD_ID_TX_PPDU_STATS); 3165 if (!peer) { 3166 /* 3167 * fw sends peer_id which is about to removed but 3168 * it was already removed in host. 3169 * eg: for disassoc, fw send ppdu stats 3170 * with peer id equal to previously associated 3171 * peer's peer_id but it was removed 3172 */ 3173 vdev = dp_vdev_get_ref_by_id(pdev->soc, 3174 ppdu_desc->vdev_id, 3175 DP_MOD_ID_TX_PPDU_STATS); 3176 if (!vdev) 3177 return; 3178 qdf_mem_copy(ppdu_user_desc->mac_addr, 3179 vdev->mac_addr.raw, QDF_MAC_ADDR_SIZE); 3180 dp_vdev_unref_delete(pdev->soc, vdev, 3181 DP_MOD_ID_TX_PPDU_STATS); 3182 return; 3183 } 3184 qdf_mem_copy(ppdu_user_desc->mac_addr, 3185 peer->mac_addr.raw, QDF_MAC_ADDR_SIZE); 3186 dp_peer_unref_delete(peer, DP_MOD_ID_TX_PPDU_STATS); 3187 } 3188 } 3189 3190 /** 3191 * dp_process_ppdu_stats_user_rate_tlv() - Process htt_ppdu_stats_user_rate_tlv 3192 * @pdev: DP pdev handle 3193 * @tag_buf: T2H message buffer carrying the user rate TLV 3194 * @ppdu_info: per ppdu tlv structure 3195 * 3196 * return:void 3197 */ 3198 static void 3199 dp_process_ppdu_stats_user_rate_tlv(struct dp_pdev *pdev, 3200 uint32_t *tag_buf, 3201 struct ppdu_info *ppdu_info) 3202 { 3203 uint16_t peer_id; 3204 struct cdp_tx_completion_ppdu *ppdu_desc; 3205 struct cdp_tx_completion_ppdu_user *ppdu_user_desc; 3206 uint8_t curr_user_index = 0; 3207 struct dp_vdev *vdev; 3208 uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf); 3209 uint8_t bw, ru_format; 3210 uint16_t ru_size; 3211 3212 ppdu_desc = 3213 (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf); 3214 3215 tag_buf++; 3216 peer_id = HTT_PPDU_STATS_USER_RATE_TLV_SW_PEER_ID_GET(*tag_buf); 3217 3218 curr_user_index = 3219 dp_get_ppdu_info_user_index(pdev, 3220 peer_id, ppdu_info); 3221 ppdu_user_desc = &ppdu_desc->user[curr_user_index]; 3222 ppdu_user_desc->tlv_bitmap |= (1 << tlv_type); 3223 if (peer_id == DP_SCAN_PEER_ID) { 3224 vdev = dp_vdev_get_ref_by_id(pdev->soc, ppdu_desc->vdev_id, 3225 DP_MOD_ID_TX_PPDU_STATS); 3226 if (!vdev) 3227 return; 3228 dp_vdev_unref_delete(pdev->soc, vdev, 3229 DP_MOD_ID_TX_PPDU_STATS); 3230 } 3231 ppdu_user_desc->peer_id = peer_id; 3232 3233 ppdu_user_desc->tid = 3234 HTT_PPDU_STATS_USER_RATE_TLV_TID_NUM_GET(*tag_buf); 3235 3236 tag_buf += 1; 3237 3238 ppdu_user_desc->user_pos = 3239 HTT_PPDU_STATS_USER_RATE_TLV_USER_POS_GET(*tag_buf); 3240 ppdu_user_desc->mu_group_id = 3241 HTT_PPDU_STATS_USER_RATE_TLV_MU_GROUPID_GET(*tag_buf); 3242 3243 ru_format = HTT_PPDU_STATS_USER_RATE_TLV_RU_FORMAT_GET(*tag_buf); 3244 3245 tag_buf += 1; 3246 3247 if (!ru_format) { 3248 /* ru_format = 0: ru_end, ru_start */ 3249 ppdu_user_desc->ru_start = 3250 HTT_PPDU_STATS_USER_RATE_TLV_RU_START_GET(*tag_buf); 3251 ppdu_user_desc->ru_tones = 3252 (HTT_PPDU_STATS_USER_RATE_TLV_RU_END_GET(*tag_buf) - 3253 HTT_PPDU_STATS_USER_RATE_TLV_RU_START_GET(*tag_buf)) + 1; 3254 } else if (ru_format == 1) { 3255 /* ru_format = 1: ru_index, ru_size */ 3256 ru_size = HTT_PPDU_STATS_USER_RATE_TLV_RU_SIZE_GET(*tag_buf); 3257 ppdu_user_desc->ru_tones = 3258 dp_mon_get_ru_width_from_ru_size(ru_size); 3259 } else { 3260 dp_mon_debug("Unsupported ru_format: %d rcvd", ru_format); 3261 } 3262 ppdu_desc->usr_ru_tones_sum += ppdu_user_desc->ru_tones; 3263 3264 tag_buf += 2; 3265 3266 ppdu_user_desc->ppdu_type = 3267 HTT_PPDU_STATS_USER_RATE_TLV_PPDU_TYPE_GET(*tag_buf); 3268 3269 tag_buf++; 3270 ppdu_user_desc->tx_rate = *tag_buf; 3271 3272 ppdu_user_desc->ltf_size = 3273 HTT_PPDU_STATS_USER_RATE_TLV_LTF_SIZE_GET(*tag_buf); 3274 ppdu_user_desc->stbc = 3275 HTT_PPDU_STATS_USER_RATE_TLV_STBC_GET(*tag_buf); 3276 ppdu_user_desc->he_re = 3277 HTT_PPDU_STATS_USER_RATE_TLV_HE_RE_GET(*tag_buf); 3278 ppdu_user_desc->txbf = 3279 HTT_PPDU_STATS_USER_RATE_TLV_TXBF_GET(*tag_buf); 3280 bw = HTT_PPDU_STATS_USER_RATE_TLV_BW_GET(*tag_buf); 3281 /* Align bw value as per host data structures */ 3282 if (bw == HTT_PPDU_STATS_BANDWIDTH_320MHZ) 3283 ppdu_user_desc->bw = bw - 3; 3284 else 3285 ppdu_user_desc->bw = bw - 2; 3286 ppdu_user_desc->nss = HTT_PPDU_STATS_USER_RATE_TLV_NSS_GET(*tag_buf); 3287 ppdu_desc->usr_nss_sum += ppdu_user_desc->nss; 3288 ppdu_user_desc->mcs = HTT_PPDU_STATS_USER_RATE_TLV_MCS_GET(*tag_buf); 3289 ppdu_user_desc->preamble = 3290 HTT_PPDU_STATS_USER_RATE_TLV_PREAMBLE_GET(*tag_buf); 3291 ppdu_user_desc->gi = HTT_PPDU_STATS_USER_RATE_TLV_GI_GET(*tag_buf); 3292 ppdu_user_desc->dcm = HTT_PPDU_STATS_USER_RATE_TLV_DCM_GET(*tag_buf); 3293 ppdu_user_desc->ldpc = HTT_PPDU_STATS_USER_RATE_TLV_LDPC_GET(*tag_buf); 3294 3295 tag_buf += 2; 3296 ppdu_user_desc->punc_pattern_bitmap = 3297 HTT_PPDU_STATS_USER_RATE_TLV_PUNC_PATTERN_BITMAP_GET(*tag_buf); 3298 } 3299 3300 /* 3301 * dp_process_ppdu_stats_enq_mpdu_bitmap_64_tlv: Process 3302 * htt_ppdu_stats_enq_mpdu_bitmap_64_tlv 3303 * pdev: DP PDEV handle 3304 * @tag_buf: buffer containing the tlv htt_ppdu_stats_enq_mpdu_bitmap_64_tlv 3305 * @ppdu_info: per ppdu tlv structure 3306 * 3307 * return:void 3308 */ 3309 static void dp_process_ppdu_stats_enq_mpdu_bitmap_64_tlv( 3310 struct dp_pdev *pdev, uint32_t *tag_buf, 3311 struct ppdu_info *ppdu_info) 3312 { 3313 htt_ppdu_stats_enq_mpdu_bitmap_64_tlv *dp_stats_buf = 3314 (htt_ppdu_stats_enq_mpdu_bitmap_64_tlv *)tag_buf; 3315 3316 struct cdp_tx_completion_ppdu *ppdu_desc; 3317 struct cdp_tx_completion_ppdu_user *ppdu_user_desc; 3318 uint8_t curr_user_index = 0; 3319 uint16_t peer_id; 3320 uint32_t size = CDP_BA_64_BIT_MAP_SIZE_DWORDS; 3321 uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf); 3322 3323 ppdu_desc = 3324 (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf); 3325 3326 tag_buf++; 3327 3328 peer_id = 3329 HTT_PPDU_STATS_ENQ_MPDU_BITMAP_TLV_SW_PEER_ID_GET(*tag_buf); 3330 3331 curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info); 3332 ppdu_user_desc = &ppdu_desc->user[curr_user_index]; 3333 ppdu_user_desc->tlv_bitmap |= (1 << tlv_type); 3334 ppdu_user_desc->peer_id = peer_id; 3335 3336 ppdu_user_desc->start_seq = dp_stats_buf->start_seq; 3337 qdf_mem_copy(&ppdu_user_desc->enq_bitmap, &dp_stats_buf->enq_bitmap, 3338 sizeof(uint32_t) * CDP_BA_64_BIT_MAP_SIZE_DWORDS); 3339 3340 dp_process_ppdu_stats_update_failed_bitmap(pdev, 3341 (void *)ppdu_user_desc, 3342 ppdu_info->ppdu_id, 3343 size); 3344 } 3345 3346 /* 3347 * dp_process_ppdu_stats_enq_mpdu_bitmap_256_tlv: Process 3348 * htt_ppdu_stats_enq_mpdu_bitmap_256_tlv 3349 * soc: DP SOC handle 3350 * @tag_buf: buffer containing the tlv htt_ppdu_stats_enq_mpdu_bitmap_256_tlv 3351 * @ppdu_info: per ppdu tlv structure 3352 * 3353 * return:void 3354 */ 3355 static void dp_process_ppdu_stats_enq_mpdu_bitmap_256_tlv( 3356 struct dp_pdev *pdev, uint32_t *tag_buf, 3357 struct ppdu_info *ppdu_info) 3358 { 3359 htt_ppdu_stats_enq_mpdu_bitmap_256_tlv *dp_stats_buf = 3360 (htt_ppdu_stats_enq_mpdu_bitmap_256_tlv *)tag_buf; 3361 3362 struct cdp_tx_completion_ppdu *ppdu_desc; 3363 struct cdp_tx_completion_ppdu_user *ppdu_user_desc; 3364 uint8_t curr_user_index = 0; 3365 uint16_t peer_id; 3366 uint32_t size = CDP_BA_256_BIT_MAP_SIZE_DWORDS; 3367 uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf); 3368 3369 ppdu_desc = 3370 (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf); 3371 3372 tag_buf++; 3373 3374 peer_id = 3375 HTT_PPDU_STATS_ENQ_MPDU_BITMAP_TLV_SW_PEER_ID_GET(*tag_buf); 3376 3377 curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info); 3378 ppdu_user_desc = &ppdu_desc->user[curr_user_index]; 3379 ppdu_user_desc->tlv_bitmap |= (1 << tlv_type); 3380 ppdu_user_desc->peer_id = peer_id; 3381 3382 ppdu_user_desc->start_seq = dp_stats_buf->start_seq; 3383 qdf_mem_copy(&ppdu_user_desc->enq_bitmap, &dp_stats_buf->enq_bitmap, 3384 sizeof(uint32_t) * CDP_BA_256_BIT_MAP_SIZE_DWORDS); 3385 3386 dp_process_ppdu_stats_update_failed_bitmap(pdev, 3387 (void *)ppdu_user_desc, 3388 ppdu_info->ppdu_id, 3389 size); 3390 } 3391 3392 /* 3393 * dp_process_ppdu_stats_user_cmpltn_common_tlv: Process 3394 * htt_ppdu_stats_user_cmpltn_common_tlv 3395 * soc: DP SOC handle 3396 * @tag_buf: buffer containing the tlv htt_ppdu_stats_user_cmpltn_common_tlv 3397 * @ppdu_info: per ppdu tlv structure 3398 * 3399 * return:void 3400 */ 3401 static void dp_process_ppdu_stats_user_cmpltn_common_tlv( 3402 struct dp_pdev *pdev, uint32_t *tag_buf, 3403 struct ppdu_info *ppdu_info) 3404 { 3405 uint16_t peer_id; 3406 struct cdp_tx_completion_ppdu *ppdu_desc; 3407 struct cdp_tx_completion_ppdu_user *ppdu_user_desc; 3408 uint8_t curr_user_index = 0; 3409 uint8_t bw_iter; 3410 htt_ppdu_stats_user_cmpltn_common_tlv *dp_stats_buf = 3411 (htt_ppdu_stats_user_cmpltn_common_tlv *)tag_buf; 3412 uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf); 3413 3414 ppdu_desc = 3415 (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf); 3416 3417 tag_buf++; 3418 peer_id = 3419 HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_SW_PEER_ID_GET(*tag_buf); 3420 3421 curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info); 3422 ppdu_user_desc = &ppdu_desc->user[curr_user_index]; 3423 ppdu_user_desc->tlv_bitmap |= (1 << tlv_type); 3424 ppdu_user_desc->peer_id = peer_id; 3425 3426 ppdu_user_desc->completion_status = 3427 HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_COMPLETION_STATUS_GET( 3428 *tag_buf); 3429 3430 ppdu_user_desc->tid = 3431 HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_TID_NUM_GET(*tag_buf); 3432 3433 tag_buf++; 3434 if (qdf_likely(ppdu_user_desc->completion_status == 3435 HTT_PPDU_STATS_USER_STATUS_OK)) { 3436 ppdu_desc->ack_rssi = dp_stats_buf->ack_rssi; 3437 ppdu_user_desc->usr_ack_rssi = dp_stats_buf->ack_rssi; 3438 ppdu_user_desc->ack_rssi_valid = 1; 3439 } else { 3440 ppdu_user_desc->ack_rssi_valid = 0; 3441 } 3442 3443 tag_buf++; 3444 3445 ppdu_user_desc->mpdu_success = 3446 HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_MPDU_SUCCESS_GET(*tag_buf); 3447 3448 ppdu_user_desc->mpdu_failed = 3449 HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_MPDU_TRIED_GET(*tag_buf) - 3450 ppdu_user_desc->mpdu_success; 3451 3452 tag_buf++; 3453 3454 ppdu_user_desc->long_retries = 3455 HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_LONG_RETRY_GET(*tag_buf); 3456 3457 ppdu_user_desc->short_retries = 3458 HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_SHORT_RETRY_GET(*tag_buf); 3459 ppdu_user_desc->retry_mpdus = 3460 ppdu_user_desc->long_retries + ppdu_user_desc->short_retries; 3461 3462 ppdu_user_desc->is_ampdu = 3463 HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_IS_AMPDU_GET(*tag_buf); 3464 ppdu_info->is_ampdu = ppdu_user_desc->is_ampdu; 3465 3466 ppdu_desc->resp_type = 3467 HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_RESP_TYPE_GET(*tag_buf); 3468 ppdu_desc->mprot_type = 3469 HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_MPROT_TYPE_GET(*tag_buf); 3470 ppdu_desc->rts_success = 3471 HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_RTS_SUCCESS_GET(*tag_buf); 3472 ppdu_desc->rts_failure = 3473 HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_RTS_FAILURE_GET(*tag_buf); 3474 3475 ppdu_user_desc->mprot_type = ppdu_desc->mprot_type; 3476 ppdu_user_desc->rts_success = ppdu_desc->rts_success; 3477 ppdu_user_desc->rts_failure = ppdu_desc->rts_failure; 3478 3479 ppdu_user_desc->pream_punct = 3480 HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_PREAM_PUNC_TX_GET(*tag_buf); 3481 3482 ppdu_info->compltn_common_tlv++; 3483 3484 /* 3485 * MU BAR may send request to n users but we may received ack only from 3486 * m users. To have count of number of users respond back, we have a 3487 * separate counter bar_num_users per PPDU that get increment for every 3488 * htt_ppdu_stats_user_cmpltn_common_tlv 3489 */ 3490 ppdu_desc->bar_num_users++; 3491 3492 tag_buf++; 3493 for (bw_iter = 0; bw_iter < CDP_RSSI_CHAIN_LEN; bw_iter++) { 3494 ppdu_user_desc->rssi_chain[bw_iter] = 3495 HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_CHAIN_RSSI_GET(*tag_buf); 3496 tag_buf++; 3497 } 3498 3499 ppdu_user_desc->sa_tx_antenna = 3500 HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_TX_ANTENNA_MASK_GET(*tag_buf); 3501 3502 tag_buf++; 3503 ppdu_user_desc->sa_is_training = 3504 HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_IS_TRAINING_GET(*tag_buf); 3505 if (ppdu_user_desc->sa_is_training) { 3506 ppdu_user_desc->sa_goodput = 3507 HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_PENDING_TRAINING_PKTS_GET(*tag_buf); 3508 } 3509 3510 tag_buf++; 3511 for (bw_iter = 0; bw_iter < CDP_NUM_SA_BW; bw_iter++) { 3512 ppdu_user_desc->sa_max_rates[bw_iter] = 3513 HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_MAX_RATES_GET(tag_buf[bw_iter]); 3514 } 3515 3516 tag_buf += CDP_NUM_SA_BW; 3517 ppdu_user_desc->current_rate_per = 3518 HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_CURRENT_RATE_PER_GET(*tag_buf); 3519 } 3520 3521 /* 3522 * dp_process_ppdu_stats_user_compltn_ba_bitmap_64_tlv: Process 3523 * htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv 3524 * pdev: DP PDEV handle 3525 * @tag_buf: buffer containing the htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv 3526 * @ppdu_info: per ppdu tlv structure 3527 * 3528 * return:void 3529 */ 3530 static void dp_process_ppdu_stats_user_compltn_ba_bitmap_64_tlv( 3531 struct dp_pdev *pdev, uint32_t *tag_buf, 3532 struct ppdu_info *ppdu_info) 3533 { 3534 htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv *dp_stats_buf = 3535 (htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv *)tag_buf; 3536 struct cdp_tx_completion_ppdu_user *ppdu_user_desc; 3537 struct cdp_tx_completion_ppdu *ppdu_desc; 3538 uint8_t curr_user_index = 0; 3539 uint16_t peer_id; 3540 uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf); 3541 3542 ppdu_desc = 3543 (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf); 3544 3545 tag_buf++; 3546 3547 peer_id = 3548 HTT_PPDU_STATS_USER_CMPLTN_BA_BITMAP_TLV_SW_PEER_ID_GET(*tag_buf); 3549 3550 curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info); 3551 ppdu_user_desc = &ppdu_desc->user[curr_user_index]; 3552 ppdu_user_desc->tlv_bitmap |= (1 << tlv_type); 3553 ppdu_user_desc->peer_id = peer_id; 3554 3555 ppdu_user_desc->ba_seq_no = dp_stats_buf->ba_seq_no; 3556 qdf_mem_copy(&ppdu_user_desc->ba_bitmap, &dp_stats_buf->ba_bitmap, 3557 sizeof(uint32_t) * CDP_BA_64_BIT_MAP_SIZE_DWORDS); 3558 ppdu_user_desc->ba_size = CDP_BA_64_BIT_MAP_SIZE_DWORDS * 32; 3559 } 3560 3561 /* 3562 * dp_process_ppdu_stats_user_compltn_ba_bitmap_256_tlv: Process 3563 * htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv 3564 * pdev: DP PDEV handle 3565 * @tag_buf: buffer containing the htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv 3566 * @ppdu_info: per ppdu tlv structure 3567 * 3568 * return:void 3569 */ 3570 static void dp_process_ppdu_stats_user_compltn_ba_bitmap_256_tlv( 3571 struct dp_pdev *pdev, uint32_t *tag_buf, 3572 struct ppdu_info *ppdu_info) 3573 { 3574 htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv *dp_stats_buf = 3575 (htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv *)tag_buf; 3576 struct cdp_tx_completion_ppdu_user *ppdu_user_desc; 3577 struct cdp_tx_completion_ppdu *ppdu_desc; 3578 uint8_t curr_user_index = 0; 3579 uint16_t peer_id; 3580 uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf); 3581 3582 ppdu_desc = 3583 (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf); 3584 3585 tag_buf++; 3586 3587 peer_id = 3588 HTT_PPDU_STATS_USER_CMPLTN_BA_BITMAP_TLV_SW_PEER_ID_GET(*tag_buf); 3589 3590 curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info); 3591 ppdu_user_desc = &ppdu_desc->user[curr_user_index]; 3592 ppdu_user_desc->tlv_bitmap |= (1 << tlv_type); 3593 ppdu_user_desc->peer_id = peer_id; 3594 3595 ppdu_user_desc->ba_seq_no = dp_stats_buf->ba_seq_no; 3596 qdf_mem_copy(&ppdu_user_desc->ba_bitmap, &dp_stats_buf->ba_bitmap, 3597 sizeof(uint32_t) * CDP_BA_256_BIT_MAP_SIZE_DWORDS); 3598 ppdu_user_desc->ba_size = CDP_BA_256_BIT_MAP_SIZE_DWORDS * 32; 3599 } 3600 3601 /* 3602 * dp_process_ppdu_stats_user_compltn_ack_ba_status_tlv: Process 3603 * htt_ppdu_stats_user_compltn_ack_ba_status_tlv 3604 * pdev: DP PDE handle 3605 * @tag_buf: buffer containing the htt_ppdu_stats_user_compltn_ack_ba_status_tlv 3606 * @ppdu_info: per ppdu tlv structure 3607 * 3608 * return:void 3609 */ 3610 static void dp_process_ppdu_stats_user_compltn_ack_ba_status_tlv( 3611 struct dp_pdev *pdev, uint32_t *tag_buf, 3612 struct ppdu_info *ppdu_info) 3613 { 3614 uint16_t peer_id; 3615 struct cdp_tx_completion_ppdu *ppdu_desc; 3616 struct cdp_tx_completion_ppdu_user *ppdu_user_desc; 3617 uint8_t curr_user_index = 0; 3618 uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf); 3619 3620 ppdu_desc = 3621 (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf); 3622 3623 tag_buf += 2; 3624 peer_id = 3625 HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_SW_PEER_ID_GET(*tag_buf); 3626 3627 curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info); 3628 ppdu_user_desc = &ppdu_desc->user[curr_user_index]; 3629 ppdu_user_desc->tlv_bitmap |= (1 << tlv_type); 3630 if (!ppdu_user_desc->ack_ba_tlv) { 3631 ppdu_user_desc->ack_ba_tlv = 1; 3632 } else { 3633 pdev->stats.ack_ba_comes_twice++; 3634 return; 3635 } 3636 3637 ppdu_user_desc->peer_id = peer_id; 3638 3639 tag_buf++; 3640 /* not to update ppdu_desc->tid from this TLV */ 3641 ppdu_user_desc->num_mpdu = 3642 HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_NUM_MPDU_GET(*tag_buf); 3643 3644 ppdu_user_desc->num_msdu = 3645 HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_NUM_MSDU_GET(*tag_buf); 3646 3647 ppdu_user_desc->success_msdus = ppdu_user_desc->num_msdu; 3648 3649 tag_buf++; 3650 ppdu_user_desc->start_seq = 3651 HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_START_SEQ_GET( 3652 *tag_buf); 3653 3654 tag_buf++; 3655 ppdu_user_desc->success_bytes = *tag_buf; 3656 3657 /* increase ack ba tlv counter on successful mpdu */ 3658 if (ppdu_user_desc->num_mpdu) 3659 ppdu_info->ack_ba_tlv++; 3660 3661 if (ppdu_user_desc->ba_size == 0) { 3662 ppdu_user_desc->ba_seq_no = ppdu_user_desc->start_seq; 3663 ppdu_user_desc->ba_bitmap[0] = 1; 3664 ppdu_user_desc->ba_size = 1; 3665 } 3666 } 3667 3668 /* 3669 * dp_process_ppdu_stats_user_common_array_tlv: Process 3670 * htt_ppdu_stats_user_common_array_tlv 3671 * pdev: DP PDEV handle 3672 * @tag_buf: buffer containing the htt_ppdu_stats_user_compltn_ack_ba_status_tlv 3673 * @ppdu_info: per ppdu tlv structure 3674 * 3675 * return:void 3676 */ 3677 static void dp_process_ppdu_stats_user_common_array_tlv( 3678 struct dp_pdev *pdev, uint32_t *tag_buf, 3679 struct ppdu_info *ppdu_info) 3680 { 3681 uint32_t peer_id; 3682 struct cdp_tx_completion_ppdu *ppdu_desc; 3683 struct cdp_tx_completion_ppdu_user *ppdu_user_desc; 3684 uint8_t curr_user_index = 0; 3685 struct htt_tx_ppdu_stats_info *dp_stats_buf; 3686 uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf); 3687 3688 ppdu_desc = 3689 (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf); 3690 3691 tag_buf++; 3692 dp_stats_buf = (struct htt_tx_ppdu_stats_info *)tag_buf; 3693 tag_buf += 3; 3694 peer_id = 3695 HTT_PPDU_STATS_ARRAY_ITEM_TLV_PEERID_GET(*tag_buf); 3696 3697 if (!dp_peer_find_by_id_valid(pdev->soc, peer_id)) { 3698 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 3699 "Invalid peer"); 3700 return; 3701 } 3702 3703 curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info); 3704 3705 ppdu_user_desc = &ppdu_desc->user[curr_user_index]; 3706 ppdu_user_desc->tlv_bitmap |= (1 << tlv_type); 3707 3708 ppdu_user_desc->retry_bytes = dp_stats_buf->tx_retry_bytes; 3709 ppdu_user_desc->failed_bytes = dp_stats_buf->tx_failed_bytes; 3710 3711 tag_buf++; 3712 3713 ppdu_user_desc->success_msdus = 3714 HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_SUCC_MSDUS_GET(*tag_buf); 3715 ppdu_user_desc->retry_msdus = 3716 HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_RETRY_MSDUS_GET(*tag_buf); 3717 tag_buf++; 3718 ppdu_user_desc->failed_msdus = 3719 HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_FAILED_MSDUS_GET(*tag_buf); 3720 } 3721 3722 /* 3723 * dp_process_ppdu_stats_flush_tlv: Process 3724 * htt_ppdu_stats_flush_tlv 3725 * @pdev: DP PDEV handle 3726 * @tag_buf: buffer containing the htt_ppdu_stats_flush_tlv 3727 * @ppdu_info: per ppdu tlv structure 3728 * 3729 * return:void 3730 */ 3731 static void 3732 dp_process_ppdu_stats_user_compltn_flush_tlv(struct dp_pdev *pdev, 3733 uint32_t *tag_buf, 3734 struct ppdu_info *ppdu_info) 3735 { 3736 struct cdp_tx_completion_ppdu *ppdu_desc; 3737 uint32_t peer_id; 3738 uint8_t tid; 3739 struct dp_peer *peer; 3740 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 3741 struct dp_mon_peer *mon_peer = NULL; 3742 3743 ppdu_desc = (struct cdp_tx_completion_ppdu *) 3744 qdf_nbuf_data(ppdu_info->nbuf); 3745 ppdu_desc->is_flush = 1; 3746 3747 tag_buf++; 3748 ppdu_desc->drop_reason = *tag_buf; 3749 3750 tag_buf++; 3751 ppdu_desc->num_msdu = HTT_PPDU_STATS_FLUSH_TLV_NUM_MSDU_GET(*tag_buf); 3752 ppdu_desc->num_mpdu = HTT_PPDU_STATS_FLUSH_TLV_NUM_MPDU_GET(*tag_buf); 3753 ppdu_desc->flow_type = HTT_PPDU_STATS_FLUSH_TLV_FLOW_TYPE_GET(*tag_buf); 3754 3755 tag_buf++; 3756 peer_id = HTT_PPDU_STATS_FLUSH_TLV_SW_PEER_ID_GET(*tag_buf); 3757 tid = HTT_PPDU_STATS_FLUSH_TLV_TID_NUM_GET(*tag_buf); 3758 3759 ppdu_desc->num_users = 1; 3760 ppdu_desc->user[0].peer_id = peer_id; 3761 ppdu_desc->user[0].tid = tid; 3762 3763 ppdu_desc->queue_type = 3764 HTT_PPDU_STATS_FLUSH_TLV_QUEUE_TYPE_GET(*tag_buf); 3765 3766 peer = dp_peer_get_ref_by_id(pdev->soc, peer_id, 3767 DP_MOD_ID_TX_PPDU_STATS); 3768 if (!peer) 3769 goto add_ppdu_to_sched_list; 3770 3771 if (ppdu_desc->drop_reason == HTT_FLUSH_EXCESS_RETRIES) { 3772 mon_peer = peer->monitor_peer; 3773 DP_STATS_INC(mon_peer, 3774 tx.excess_retries_per_ac[TID_TO_WME_AC(tid)], 3775 ppdu_desc->num_msdu); 3776 } 3777 3778 dp_peer_unref_delete(peer, DP_MOD_ID_TX_PPDU_STATS); 3779 3780 add_ppdu_to_sched_list: 3781 ppdu_info->done = 1; 3782 TAILQ_REMOVE(&mon_pdev->ppdu_info_list, ppdu_info, ppdu_info_list_elem); 3783 mon_pdev->list_depth--; 3784 TAILQ_INSERT_TAIL(&mon_pdev->sched_comp_ppdu_list, ppdu_info, 3785 ppdu_info_list_elem); 3786 mon_pdev->sched_comp_list_depth++; 3787 } 3788 3789 /** 3790 * dp_process_ppdu_stats_sch_cmd_status_tlv: Process schedule command status tlv 3791 * Here we are not going to process the buffer. 3792 * @pdev: DP PDEV handle 3793 * @ppdu_info: per ppdu tlv structure 3794 * 3795 * return:void 3796 */ 3797 static void 3798 dp_process_ppdu_stats_sch_cmd_status_tlv(struct dp_pdev *pdev, 3799 struct ppdu_info *ppdu_info) 3800 { 3801 struct cdp_tx_completion_ppdu *ppdu_desc; 3802 struct dp_peer *peer; 3803 uint8_t num_users; 3804 uint8_t i; 3805 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 3806 3807 ppdu_desc = (struct cdp_tx_completion_ppdu *) 3808 qdf_nbuf_data(ppdu_info->nbuf); 3809 3810 num_users = ppdu_desc->bar_num_users; 3811 3812 for (i = 0; i < num_users; i++) { 3813 if (ppdu_desc->user[i].user_pos == 0) { 3814 if (ppdu_desc->frame_type == CDP_PPDU_FTYPE_BAR) { 3815 /* update phy mode for bar frame */ 3816 ppdu_desc->phy_mode = 3817 ppdu_desc->user[i].preamble; 3818 ppdu_desc->user[0].mcs = ppdu_desc->user[i].mcs; 3819 break; 3820 } 3821 if (ppdu_desc->frame_type == CDP_PPDU_FTYPE_CTRL) { 3822 ppdu_desc->frame_ctrl = 3823 ppdu_desc->user[i].frame_ctrl; 3824 break; 3825 } 3826 } 3827 } 3828 3829 if (ppdu_desc->frame_type == CDP_PPDU_FTYPE_DATA && 3830 ppdu_desc->delayed_ba) { 3831 qdf_assert_always(ppdu_desc->num_users <= ppdu_desc->max_users); 3832 3833 for (i = 0; i < ppdu_desc->num_users; i++) { 3834 struct cdp_delayed_tx_completion_ppdu_user *delay_ppdu; 3835 uint64_t start_tsf; 3836 uint64_t end_tsf; 3837 uint32_t ppdu_id; 3838 struct dp_mon_peer *mon_peer; 3839 3840 ppdu_id = ppdu_desc->ppdu_id; 3841 peer = dp_peer_get_ref_by_id 3842 (pdev->soc, ppdu_desc->user[i].peer_id, 3843 DP_MOD_ID_TX_PPDU_STATS); 3844 /** 3845 * This check is to make sure peer is not deleted 3846 * after processing the TLVs. 3847 */ 3848 if (!peer) 3849 continue; 3850 3851 if (!peer->monitor_peer) { 3852 dp_peer_unref_delete(peer, 3853 DP_MOD_ID_TX_PPDU_STATS); 3854 continue; 3855 } 3856 3857 mon_peer = peer->monitor_peer; 3858 delay_ppdu = &mon_peer->delayed_ba_ppdu_stats; 3859 start_tsf = ppdu_desc->ppdu_start_timestamp; 3860 end_tsf = ppdu_desc->ppdu_end_timestamp; 3861 /** 3862 * save delayed ba user info 3863 */ 3864 if (ppdu_desc->user[i].delayed_ba) { 3865 dp_peer_copy_delay_stats(peer, 3866 &ppdu_desc->user[i], 3867 ppdu_id); 3868 mon_peer->last_delayed_ba_ppduid = ppdu_id; 3869 delay_ppdu->ppdu_start_timestamp = start_tsf; 3870 delay_ppdu->ppdu_end_timestamp = end_tsf; 3871 } 3872 ppdu_desc->user[i].peer_last_delayed_ba = 3873 mon_peer->last_delayed_ba; 3874 3875 dp_peer_unref_delete(peer, DP_MOD_ID_TX_PPDU_STATS); 3876 3877 if (ppdu_desc->user[i].delayed_ba && 3878 !ppdu_desc->user[i].debug_copied) { 3879 QDF_TRACE(QDF_MODULE_ID_TXRX, 3880 QDF_TRACE_LEVEL_INFO_MED, 3881 "%s: %d ppdu_id[%d] bar_ppdu_id[%d] num_users[%d] usr[%d] htt_frame_type[%d]\n", 3882 __func__, __LINE__, 3883 ppdu_desc->ppdu_id, 3884 ppdu_desc->bar_ppdu_id, 3885 ppdu_desc->num_users, 3886 i, 3887 ppdu_desc->htt_frame_type); 3888 } 3889 } 3890 } 3891 3892 /* 3893 * when frame type is BAR and STATS_COMMON_TLV is set 3894 * copy the store peer delayed info to BAR status 3895 */ 3896 if (ppdu_desc->frame_type == CDP_PPDU_FTYPE_BAR) { 3897 for (i = 0; i < ppdu_desc->bar_num_users; i++) { 3898 struct cdp_delayed_tx_completion_ppdu_user *delay_ppdu; 3899 uint64_t start_tsf; 3900 uint64_t end_tsf; 3901 struct dp_mon_peer *mon_peer; 3902 3903 peer = dp_peer_get_ref_by_id 3904 (pdev->soc, 3905 ppdu_desc->user[i].peer_id, 3906 DP_MOD_ID_TX_PPDU_STATS); 3907 /** 3908 * This check is to make sure peer is not deleted 3909 * after processing the TLVs. 3910 */ 3911 if (!peer) 3912 continue; 3913 3914 if (!peer->monitor_peer) { 3915 dp_peer_unref_delete(peer, 3916 DP_MOD_ID_TX_PPDU_STATS); 3917 continue; 3918 } 3919 3920 mon_peer = peer->monitor_peer; 3921 if (ppdu_desc->user[i].completion_status != 3922 HTT_PPDU_STATS_USER_STATUS_OK) { 3923 dp_peer_unref_delete(peer, 3924 DP_MOD_ID_TX_PPDU_STATS); 3925 continue; 3926 } 3927 3928 delay_ppdu = &mon_peer->delayed_ba_ppdu_stats; 3929 start_tsf = delay_ppdu->ppdu_start_timestamp; 3930 end_tsf = delay_ppdu->ppdu_end_timestamp; 3931 3932 if (mon_peer->last_delayed_ba) { 3933 dp_peer_copy_stats_to_bar(peer, 3934 &ppdu_desc->user[i]); 3935 ppdu_desc->ppdu_id = 3936 mon_peer->last_delayed_ba_ppduid; 3937 ppdu_desc->ppdu_start_timestamp = start_tsf; 3938 ppdu_desc->ppdu_end_timestamp = end_tsf; 3939 } 3940 ppdu_desc->user[i].peer_last_delayed_ba = 3941 mon_peer->last_delayed_ba; 3942 dp_peer_unref_delete(peer, DP_MOD_ID_TX_PPDU_STATS); 3943 } 3944 } 3945 3946 TAILQ_REMOVE(&mon_pdev->ppdu_info_list, ppdu_info, ppdu_info_list_elem); 3947 mon_pdev->list_depth--; 3948 TAILQ_INSERT_TAIL(&mon_pdev->sched_comp_ppdu_list, ppdu_info, 3949 ppdu_info_list_elem); 3950 mon_pdev->sched_comp_list_depth++; 3951 } 3952 3953 /** 3954 * dp_validate_fix_ppdu_tlv(): Function to validate the length of PPDU 3955 * 3956 * If the TLV length sent as part of PPDU TLV is less that expected size i.e 3957 * size of corresponding data structure, pad the remaining bytes with zeros 3958 * and continue processing the TLVs 3959 * 3960 * @pdev: DP pdev handle 3961 * @tag_buf: TLV buffer 3962 * @tlv_expected_size: Expected size of Tag 3963 * @tlv_len: TLV length received from FW 3964 * 3965 * Return: Pointer to updated TLV 3966 */ 3967 static inline uint32_t *dp_validate_fix_ppdu_tlv(struct dp_pdev *pdev, 3968 uint32_t *tag_buf, 3969 uint16_t tlv_expected_size, 3970 uint16_t tlv_len) 3971 { 3972 uint32_t *tlv_desc = tag_buf; 3973 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 3974 3975 qdf_assert_always(tlv_len != 0); 3976 3977 if (tlv_len < tlv_expected_size) { 3978 qdf_mem_zero(mon_pdev->ppdu_tlv_buf, tlv_expected_size); 3979 qdf_mem_copy(mon_pdev->ppdu_tlv_buf, tag_buf, tlv_len); 3980 tlv_desc = mon_pdev->ppdu_tlv_buf; 3981 } 3982 3983 return tlv_desc; 3984 } 3985 3986 /** 3987 * dp_process_ppdu_tag(): Function to process the PPDU TLVs 3988 * @pdev: DP pdev handle 3989 * @tag_buf: TLV buffer 3990 * @tlv_len: length of tlv 3991 * @ppdu_info: per ppdu tlv structure 3992 * 3993 * return: void 3994 */ 3995 static void dp_process_ppdu_tag(struct dp_pdev *pdev, 3996 uint32_t *tag_buf, 3997 uint32_t tlv_len, 3998 struct ppdu_info *ppdu_info) 3999 { 4000 uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf); 4001 uint16_t tlv_expected_size; 4002 uint32_t *tlv_desc; 4003 4004 switch (tlv_type) { 4005 case HTT_PPDU_STATS_COMMON_TLV: 4006 tlv_expected_size = sizeof(htt_ppdu_stats_common_tlv); 4007 tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf, 4008 tlv_expected_size, tlv_len); 4009 dp_process_ppdu_stats_common_tlv(pdev, tlv_desc, ppdu_info); 4010 break; 4011 case HTT_PPDU_STATS_USR_COMMON_TLV: 4012 tlv_expected_size = sizeof(htt_ppdu_stats_user_common_tlv); 4013 tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf, 4014 tlv_expected_size, tlv_len); 4015 dp_process_ppdu_stats_user_common_tlv(pdev, tlv_desc, 4016 ppdu_info); 4017 break; 4018 case HTT_PPDU_STATS_USR_RATE_TLV: 4019 tlv_expected_size = sizeof(htt_ppdu_stats_user_rate_tlv); 4020 tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf, 4021 tlv_expected_size, tlv_len); 4022 dp_process_ppdu_stats_user_rate_tlv(pdev, tlv_desc, 4023 ppdu_info); 4024 break; 4025 case HTT_PPDU_STATS_USR_MPDU_ENQ_BITMAP_64_TLV: 4026 tlv_expected_size = 4027 sizeof(htt_ppdu_stats_enq_mpdu_bitmap_64_tlv); 4028 tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf, 4029 tlv_expected_size, tlv_len); 4030 dp_process_ppdu_stats_enq_mpdu_bitmap_64_tlv( 4031 pdev, tlv_desc, ppdu_info); 4032 break; 4033 case HTT_PPDU_STATS_USR_MPDU_ENQ_BITMAP_256_TLV: 4034 tlv_expected_size = 4035 sizeof(htt_ppdu_stats_enq_mpdu_bitmap_256_tlv); 4036 tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf, 4037 tlv_expected_size, tlv_len); 4038 dp_process_ppdu_stats_enq_mpdu_bitmap_256_tlv( 4039 pdev, tlv_desc, ppdu_info); 4040 break; 4041 case HTT_PPDU_STATS_USR_COMPLTN_COMMON_TLV: 4042 tlv_expected_size = 4043 sizeof(htt_ppdu_stats_user_cmpltn_common_tlv); 4044 tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf, 4045 tlv_expected_size, tlv_len); 4046 dp_process_ppdu_stats_user_cmpltn_common_tlv( 4047 pdev, tlv_desc, ppdu_info); 4048 break; 4049 case HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_64_TLV: 4050 tlv_expected_size = 4051 sizeof(htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv); 4052 tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf, 4053 tlv_expected_size, tlv_len); 4054 dp_process_ppdu_stats_user_compltn_ba_bitmap_64_tlv( 4055 pdev, tlv_desc, ppdu_info); 4056 break; 4057 case HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_256_TLV: 4058 tlv_expected_size = 4059 sizeof(htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv); 4060 tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf, 4061 tlv_expected_size, tlv_len); 4062 dp_process_ppdu_stats_user_compltn_ba_bitmap_256_tlv( 4063 pdev, tlv_desc, ppdu_info); 4064 break; 4065 case HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV: 4066 tlv_expected_size = 4067 sizeof(htt_ppdu_stats_user_compltn_ack_ba_status_tlv); 4068 tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf, 4069 tlv_expected_size, tlv_len); 4070 dp_process_ppdu_stats_user_compltn_ack_ba_status_tlv( 4071 pdev, tlv_desc, ppdu_info); 4072 break; 4073 case HTT_PPDU_STATS_USR_COMMON_ARRAY_TLV: 4074 tlv_expected_size = 4075 sizeof(htt_ppdu_stats_usr_common_array_tlv_v); 4076 tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf, 4077 tlv_expected_size, tlv_len); 4078 dp_process_ppdu_stats_user_common_array_tlv( 4079 pdev, tlv_desc, ppdu_info); 4080 break; 4081 case HTT_PPDU_STATS_USR_COMPLTN_FLUSH_TLV: 4082 tlv_expected_size = sizeof(htt_ppdu_stats_flush_tlv); 4083 tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf, 4084 tlv_expected_size, tlv_len); 4085 dp_process_ppdu_stats_user_compltn_flush_tlv(pdev, tlv_desc, 4086 ppdu_info); 4087 break; 4088 case HTT_PPDU_STATS_SCH_CMD_STATUS_TLV: 4089 dp_process_ppdu_stats_sch_cmd_status_tlv(pdev, ppdu_info); 4090 break; 4091 default: 4092 break; 4093 } 4094 } 4095 4096 #ifdef WLAN_TELEMETRY_STATS_SUPPORT 4097 static inline 4098 void dp_ppdu_desc_user_airtime_consumption_update( 4099 struct dp_peer *peer, 4100 struct cdp_tx_completion_ppdu_user *user) 4101 { 4102 struct dp_mon_peer *mon_peer = NULL; 4103 uint8_t ac = 0; 4104 4105 mon_peer = peer->monitor_peer; 4106 if (qdf_unlikely(!mon_peer)) 4107 return; 4108 4109 ac = TID_TO_WME_AC(user->tid); 4110 DP_STATS_INC(mon_peer, airtime_consumption[ac].consumption, 4111 user->phy_tx_time_us); 4112 } 4113 #else 4114 static inline 4115 void dp_ppdu_desc_user_airtime_consumption_update( 4116 struct dp_peer *peer, 4117 struct cdp_tx_completion_ppdu_user *user) 4118 { } 4119 #endif 4120 4121 #if defined(WLAN_ATF_ENABLE) || defined(WLAN_TELEMETRY_STATS_SUPPORT) 4122 static void 4123 dp_ppdu_desc_user_phy_tx_time_update(struct dp_pdev *pdev, 4124 struct dp_peer *peer, 4125 struct cdp_tx_completion_ppdu *ppdu_desc, 4126 struct cdp_tx_completion_ppdu_user *user) 4127 { 4128 uint32_t nss_ru_width_sum = 0; 4129 struct dp_mon_peer *mon_peer = NULL; 4130 4131 if (!pdev || !ppdu_desc || !user || !peer) 4132 return; 4133 4134 if (ppdu_desc->frame_type != CDP_PPDU_FTYPE_DATA) 4135 return; 4136 4137 mon_peer = peer->monitor_peer; 4138 if (qdf_unlikely(!mon_peer)) 4139 return; 4140 4141 nss_ru_width_sum = ppdu_desc->usr_nss_sum * ppdu_desc->usr_ru_tones_sum; 4142 if (!nss_ru_width_sum) 4143 nss_ru_width_sum = 1; 4144 4145 /** 4146 * For SU-MIMO PPDU phy Tx time is same for the single user. 4147 * For MU-MIMO phy Tx time is calculated per user as below 4148 * user phy tx time = 4149 * Entire PPDU duration * MU Ratio * OFDMA Ratio 4150 * MU Ratio = usr_nss / Sum_of_nss_of_all_users 4151 * OFDMA_ratio = usr_ru_width / Sum_of_ru_width_of_all_users 4152 * usr_ru_widt = ru_end – ru_start + 1 4153 */ 4154 if (ppdu_desc->htt_frame_type == HTT_STATS_FTYPE_TIDQ_DATA_SU) { 4155 user->phy_tx_time_us = ppdu_desc->phy_ppdu_tx_time_us; 4156 } else { 4157 user->phy_tx_time_us = (ppdu_desc->phy_ppdu_tx_time_us * 4158 user->nss * user->ru_tones) / nss_ru_width_sum; 4159 } 4160 4161 dp_ppdu_desc_user_airtime_consumption_update(peer, user); 4162 } 4163 #else 4164 static void 4165 dp_ppdu_desc_user_phy_tx_time_update(struct dp_pdev *pdev, 4166 struct dp_peer *peer, 4167 struct cdp_tx_completion_ppdu *ppdu_desc, 4168 struct cdp_tx_completion_ppdu_user *user) 4169 { 4170 } 4171 #endif 4172 4173 #ifdef WLAN_SUPPORT_CTRL_FRAME_STATS 4174 static void 4175 dp_tx_ctrl_stats_update(struct dp_pdev *pdev, struct dp_peer *peer, 4176 struct cdp_tx_completion_ppdu_user *user) 4177 { 4178 struct dp_mon_peer *mon_peer = NULL; 4179 uint16_t fc = 0; 4180 4181 if (!pdev || !peer || !user) 4182 return; 4183 4184 mon_peer = peer->monitor_peer; 4185 if (qdf_unlikely(!mon_peer)) 4186 return; 4187 4188 if (user->mprot_type) { 4189 DP_STATS_INCC(mon_peer, 4190 tx.rts_success, 1, user->rts_success); 4191 DP_STATS_INCC(mon_peer, 4192 tx.rts_failure, 1, user->rts_failure); 4193 } 4194 fc = user->frame_ctrl; 4195 if ((qdf_cpu_to_le16(fc) & QDF_IEEE80211_FC0_TYPE_MASK) == 4196 QDF_IEEE80211_FC0_TYPE_CTL) { 4197 if ((qdf_cpu_to_le16(fc) & QDF_IEEE80211_FC0_SUBTYPE_MASK) == 4198 QDF_IEEE80211_FC0_SUBTYPE_VHT_NDP_AN) 4199 DP_STATS_INC(mon_peer, tx.ndpa_cnt, 1); 4200 if ((qdf_cpu_to_le16(fc) & QDF_IEEE80211_FC0_SUBTYPE_MASK) == 4201 QDF_IEEE80211_FC0_SUBTYPE_BAR) 4202 DP_STATS_INC(mon_peer, tx.bar_cnt, 1); 4203 } 4204 } 4205 #else 4206 static void 4207 dp_tx_ctrl_stats_update(struct dp_pdev *pdev, struct dp_peer *peer, 4208 struct cdp_tx_completion_ppdu_user *user) 4209 { 4210 } 4211 #endif /* WLAN_SUPPORT_CTRL_FRAME_STATS */ 4212 4213 /** 4214 * dp_ppdu_desc_user_stats_update(): Function to update TX user stats 4215 * @pdev: DP pdev handle 4216 * @ppdu_info: per PPDU TLV descriptor 4217 * 4218 * return: void 4219 */ 4220 void 4221 dp_ppdu_desc_user_stats_update(struct dp_pdev *pdev, 4222 struct ppdu_info *ppdu_info) 4223 { 4224 struct cdp_tx_completion_ppdu *ppdu_desc = NULL; 4225 struct dp_peer *peer = NULL; 4226 uint32_t tlv_bitmap_expected; 4227 uint32_t tlv_bitmap_default; 4228 uint16_t i; 4229 uint32_t num_users; 4230 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 4231 4232 ppdu_desc = (struct cdp_tx_completion_ppdu *) 4233 qdf_nbuf_data(ppdu_info->nbuf); 4234 4235 if (ppdu_desc->frame_type != CDP_PPDU_FTYPE_BAR) 4236 ppdu_desc->ppdu_id = ppdu_info->ppdu_id; 4237 4238 tlv_bitmap_expected = HTT_PPDU_DEFAULT_TLV_BITMAP; 4239 if (mon_pdev->tx_sniffer_enable || mon_pdev->mcopy_mode || 4240 mon_pdev->tx_capture_enabled) { 4241 if (ppdu_info->is_ampdu) 4242 tlv_bitmap_expected = 4243 dp_htt_get_ppdu_sniffer_ampdu_tlv_bitmap( 4244 ppdu_info->tlv_bitmap); 4245 } 4246 4247 tlv_bitmap_default = tlv_bitmap_expected; 4248 4249 if (ppdu_desc->frame_type == CDP_PPDU_FTYPE_BAR) { 4250 num_users = ppdu_desc->bar_num_users; 4251 ppdu_desc->num_users = ppdu_desc->bar_num_users; 4252 } else { 4253 num_users = ppdu_desc->num_users; 4254 } 4255 qdf_assert_always(ppdu_desc->num_users <= ppdu_desc->max_users); 4256 4257 for (i = 0; i < num_users; i++) { 4258 ppdu_desc->num_mpdu += ppdu_desc->user[i].num_mpdu; 4259 ppdu_desc->num_msdu += ppdu_desc->user[i].num_msdu; 4260 4261 peer = dp_peer_get_ref_by_id(pdev->soc, 4262 ppdu_desc->user[i].peer_id, 4263 DP_MOD_ID_TX_PPDU_STATS); 4264 /** 4265 * This check is to make sure peer is not deleted 4266 * after processing the TLVs. 4267 */ 4268 if (!peer) 4269 continue; 4270 4271 ppdu_desc->user[i].is_bss_peer = peer->bss_peer; 4272 4273 dp_ppdu_desc_user_phy_tx_time_update(pdev, peer, ppdu_desc, 4274 &ppdu_desc->user[i]); 4275 4276 dp_tx_ctrl_stats_update(pdev, peer, &ppdu_desc->user[i]); 4277 4278 /* 4279 * different frame like DATA, BAR or CTRL has different 4280 * tlv bitmap expected. Apart from ACK_BA_STATUS TLV, we 4281 * receive other tlv in-order/sequential from fw. 4282 * Since ACK_BA_STATUS TLV come from Hardware it is 4283 * asynchronous So we need to depend on some tlv to confirm 4284 * all tlv is received for a ppdu. 4285 * So we depend on both SCHED_CMD_STATUS_TLV and 4286 * ACK_BA_STATUS_TLV. for failure packet we won't get 4287 * ACK_BA_STATUS_TLV. 4288 */ 4289 if (!(ppdu_info->tlv_bitmap & 4290 (1 << HTT_PPDU_STATS_SCH_CMD_STATUS_TLV)) || 4291 (!(ppdu_info->tlv_bitmap & 4292 (1 << HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV)) && 4293 (ppdu_desc->user[i].completion_status == 4294 HTT_PPDU_STATS_USER_STATUS_OK))) { 4295 dp_peer_unref_delete(peer, DP_MOD_ID_TX_PPDU_STATS); 4296 continue; 4297 } 4298 4299 /** 4300 * Update tx stats for data frames having Qos as well as 4301 * non-Qos data tid 4302 */ 4303 4304 if ((ppdu_desc->user[i].tid < CDP_DATA_TID_MAX || 4305 (ppdu_desc->user[i].tid == CDP_DATA_NON_QOS_TID) || 4306 (ppdu_desc->htt_frame_type == 4307 HTT_STATS_FTYPE_SGEN_QOS_NULL) || 4308 ((ppdu_desc->frame_type == CDP_PPDU_FTYPE_BAR) && 4309 (ppdu_desc->num_mpdu > 1))) && 4310 (ppdu_desc->frame_type != CDP_PPDU_FTYPE_CTRL)) { 4311 dp_tx_stats_update(pdev, peer, 4312 &ppdu_desc->user[i], 4313 ppdu_desc->ack_rssi); 4314 } 4315 4316 dp_peer_unref_delete(peer, DP_MOD_ID_TX_PPDU_STATS); 4317 tlv_bitmap_expected = tlv_bitmap_default; 4318 } 4319 } 4320 4321 #if !defined(WLAN_TX_PKT_CAPTURE_ENH) || defined(QCA_MONITOR_2_0_SUPPORT) 4322 /* 4323 * dp_tx_ppdu_desc_notify() - Notify to upper layer about PPDU via WDI 4324 * 4325 * @pdev: Datapath pdev handle 4326 * @nbuf: Buffer to be delivered to upper layer 4327 * 4328 * Return: void 4329 */ 4330 static void dp_tx_ppdu_desc_notify(struct dp_pdev *pdev, qdf_nbuf_t nbuf) 4331 { 4332 struct dp_soc *soc = pdev->soc; 4333 struct dp_mon_ops *mon_ops = NULL; 4334 4335 mon_ops = dp_mon_ops_get(soc); 4336 if (mon_ops && mon_ops->mon_ppdu_desc_notify) 4337 mon_ops->mon_ppdu_desc_notify(pdev, nbuf); 4338 else 4339 qdf_nbuf_free(nbuf); 4340 } 4341 4342 void dp_ppdu_desc_deliver(struct dp_pdev *pdev, 4343 struct ppdu_info *ppdu_info) 4344 { 4345 struct ppdu_info *s_ppdu_info = NULL; 4346 struct ppdu_info *ppdu_info_next = NULL; 4347 struct cdp_tx_completion_ppdu *ppdu_desc = NULL; 4348 qdf_nbuf_t nbuf; 4349 uint32_t time_delta = 0; 4350 bool starved = 0; 4351 bool matched = 0; 4352 bool recv_ack_ba_done = 0; 4353 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 4354 4355 if (ppdu_info->tlv_bitmap & 4356 (1 << HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV) && 4357 ppdu_info->done) 4358 recv_ack_ba_done = 1; 4359 4360 mon_pdev->last_sched_cmdid = ppdu_info->sched_cmdid; 4361 4362 s_ppdu_info = TAILQ_FIRST(&mon_pdev->sched_comp_ppdu_list); 4363 4364 TAILQ_FOREACH_SAFE(s_ppdu_info, &mon_pdev->sched_comp_ppdu_list, 4365 ppdu_info_list_elem, ppdu_info_next) { 4366 if (s_ppdu_info->tsf_l32 > ppdu_info->tsf_l32) 4367 time_delta = (MAX_TSF_32 - s_ppdu_info->tsf_l32) + 4368 ppdu_info->tsf_l32; 4369 else 4370 time_delta = ppdu_info->tsf_l32 - s_ppdu_info->tsf_l32; 4371 4372 if (!s_ppdu_info->done && !recv_ack_ba_done) { 4373 if (time_delta < MAX_SCHED_STARVE) { 4374 dp_mon_info("pdev[%d] ppdu_id[%d] sched_cmdid[%d] TLV_B[0x%x] TSF[%u] D[%d]", 4375 pdev->pdev_id, 4376 s_ppdu_info->ppdu_id, 4377 s_ppdu_info->sched_cmdid, 4378 s_ppdu_info->tlv_bitmap, 4379 s_ppdu_info->tsf_l32, 4380 s_ppdu_info->done); 4381 break; 4382 } 4383 starved = 1; 4384 } 4385 4386 mon_pdev->delivered_sched_cmdid = s_ppdu_info->sched_cmdid; 4387 TAILQ_REMOVE(&mon_pdev->sched_comp_ppdu_list, s_ppdu_info, 4388 ppdu_info_list_elem); 4389 mon_pdev->sched_comp_list_depth--; 4390 4391 nbuf = s_ppdu_info->nbuf; 4392 qdf_assert_always(nbuf); 4393 ppdu_desc = (struct cdp_tx_completion_ppdu *) 4394 qdf_nbuf_data(nbuf); 4395 ppdu_desc->tlv_bitmap = s_ppdu_info->tlv_bitmap; 4396 4397 if (starved) { 4398 dp_mon_info("ppdu starved fc[0x%x] h_ftype[%d] tlv_bitmap[0x%x] cs[%d]\n", 4399 ppdu_desc->frame_ctrl, 4400 ppdu_desc->htt_frame_type, 4401 ppdu_desc->tlv_bitmap, 4402 ppdu_desc->user[0].completion_status); 4403 starved = 0; 4404 } 4405 4406 if (ppdu_info->ppdu_id == s_ppdu_info->ppdu_id && 4407 ppdu_info->sched_cmdid == s_ppdu_info->sched_cmdid) 4408 matched = 1; 4409 4410 dp_ppdu_desc_user_stats_update(pdev, s_ppdu_info); 4411 4412 qdf_mem_free(s_ppdu_info); 4413 4414 dp_tx_ppdu_desc_notify(pdev, nbuf); 4415 4416 if (matched) 4417 break; 4418 } 4419 } 4420 #endif 4421 4422 /* 4423 * dp_tx_ppdu_desc_deliver() - Deliver PPDU desc to upper layer 4424 * 4425 * @pdev: Datapath pdev handle 4426 * @ppdu_info: per PPDU TLV descriptor 4427 * 4428 * Return: void 4429 */ 4430 static void dp_tx_ppdu_desc_deliver(struct dp_pdev *pdev, 4431 struct ppdu_info *ppdu_info) 4432 { 4433 struct dp_soc *soc = pdev->soc; 4434 struct dp_mon_ops *mon_ops = NULL; 4435 4436 mon_ops = dp_mon_ops_get(soc); 4437 4438 if (mon_ops && mon_ops->mon_ppdu_desc_deliver) { 4439 mon_ops->mon_ppdu_desc_deliver(pdev, ppdu_info); 4440 } else { 4441 qdf_nbuf_free(ppdu_info->nbuf); 4442 ppdu_info->nbuf = NULL; 4443 qdf_mem_free(ppdu_info); 4444 } 4445 } 4446 4447 /** 4448 * dp_get_ppdu_desc(): Function to allocate new PPDU status 4449 * desc for new ppdu id 4450 * @pdev: DP pdev handle 4451 * @ppdu_id: PPDU unique identifier 4452 * @tlv_type: TLV type received 4453 * @tsf_l32: timestamp received along with ppdu stats indication header 4454 * @max_users: Maximum user for that particular ppdu 4455 * 4456 * return: ppdu_info per ppdu tlv structure 4457 */ 4458 static 4459 struct ppdu_info *dp_get_ppdu_desc(struct dp_pdev *pdev, uint32_t ppdu_id, 4460 uint8_t tlv_type, uint32_t tsf_l32, 4461 uint8_t max_users) 4462 { 4463 struct ppdu_info *ppdu_info = NULL; 4464 struct ppdu_info *s_ppdu_info = NULL; 4465 struct ppdu_info *ppdu_info_next = NULL; 4466 struct cdp_tx_completion_ppdu *ppdu_desc = NULL; 4467 uint32_t size = 0; 4468 struct cdp_tx_completion_ppdu *tmp_ppdu_desc = NULL; 4469 struct cdp_tx_completion_ppdu_user *tmp_user; 4470 uint32_t time_delta; 4471 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 4472 4473 /* 4474 * Find ppdu_id node exists or not 4475 */ 4476 TAILQ_FOREACH_SAFE(ppdu_info, &mon_pdev->ppdu_info_list, 4477 ppdu_info_list_elem, ppdu_info_next) { 4478 if (ppdu_info && (ppdu_info->ppdu_id == ppdu_id)) { 4479 if (ppdu_info->tsf_l32 > tsf_l32) 4480 time_delta = (MAX_TSF_32 - 4481 ppdu_info->tsf_l32) + tsf_l32; 4482 else 4483 time_delta = tsf_l32 - ppdu_info->tsf_l32; 4484 4485 if (time_delta > WRAP_DROP_TSF_DELTA) { 4486 TAILQ_REMOVE(&mon_pdev->ppdu_info_list, 4487 ppdu_info, ppdu_info_list_elem); 4488 mon_pdev->list_depth--; 4489 pdev->stats.ppdu_wrap_drop++; 4490 tmp_ppdu_desc = 4491 (struct cdp_tx_completion_ppdu *) 4492 qdf_nbuf_data(ppdu_info->nbuf); 4493 tmp_user = &tmp_ppdu_desc->user[0]; 4494 dp_htt_tx_stats_info("S_PID [%d] S_TSF[%u] TLV_BITMAP[0x%x] [CMPLTN - %d ACK_BA - %d] CS[%d] - R_PID[%d] R_TSF[%u] R_TLV_TAG[0x%x]\n", 4495 ppdu_info->ppdu_id, 4496 ppdu_info->tsf_l32, 4497 ppdu_info->tlv_bitmap, 4498 tmp_user->completion_status, 4499 ppdu_info->compltn_common_tlv, 4500 ppdu_info->ack_ba_tlv, 4501 ppdu_id, tsf_l32, 4502 tlv_type); 4503 qdf_nbuf_free(ppdu_info->nbuf); 4504 ppdu_info->nbuf = NULL; 4505 qdf_mem_free(ppdu_info); 4506 } else { 4507 break; 4508 } 4509 } 4510 } 4511 4512 /* 4513 * check if it is ack ba tlv and if it is not there in ppdu info 4514 * list then check it in sched completion ppdu list 4515 */ 4516 if (!ppdu_info && 4517 tlv_type == HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV) { 4518 TAILQ_FOREACH(s_ppdu_info, 4519 &mon_pdev->sched_comp_ppdu_list, 4520 ppdu_info_list_elem) { 4521 if (s_ppdu_info && (s_ppdu_info->ppdu_id == ppdu_id)) { 4522 if (s_ppdu_info->tsf_l32 > tsf_l32) 4523 time_delta = (MAX_TSF_32 - 4524 s_ppdu_info->tsf_l32) + 4525 tsf_l32; 4526 else 4527 time_delta = tsf_l32 - 4528 s_ppdu_info->tsf_l32; 4529 if (time_delta < WRAP_DROP_TSF_DELTA) { 4530 ppdu_info = s_ppdu_info; 4531 break; 4532 } 4533 } else { 4534 /* 4535 * ACK BA STATUS TLV comes sequential order 4536 * if we received ack ba status tlv for second 4537 * ppdu and first ppdu is still waiting for 4538 * ACK BA STATUS TLV. Based on fw comment 4539 * we won't receive it tlv later. So we can 4540 * set ppdu info done. 4541 */ 4542 if (s_ppdu_info) 4543 s_ppdu_info->done = 1; 4544 } 4545 } 4546 } 4547 4548 if (ppdu_info) { 4549 if (ppdu_info->tlv_bitmap & (1 << tlv_type)) { 4550 /** 4551 * if we get tlv_type that is already been processed 4552 * for ppdu, that means we got a new ppdu with same 4553 * ppdu id. Hence Flush the older ppdu 4554 * for MUMIMO and OFDMA, In a PPDU we have 4555 * multiple user with same tlv types. tlv bitmap is 4556 * used to check whether SU or MU_MIMO/OFDMA 4557 */ 4558 if (!(ppdu_info->tlv_bitmap & 4559 (1 << HTT_PPDU_STATS_SCH_CMD_STATUS_TLV))) 4560 return ppdu_info; 4561 4562 ppdu_desc = (struct cdp_tx_completion_ppdu *) 4563 qdf_nbuf_data(ppdu_info->nbuf); 4564 4565 /** 4566 * apart from ACK BA STATUS TLV rest all comes in order 4567 * so if tlv type not ACK BA STATUS TLV we can deliver 4568 * ppdu_info 4569 */ 4570 if ((tlv_type == 4571 HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV) && 4572 ((ppdu_desc->htt_frame_type == 4573 HTT_STATS_FTYPE_SGEN_MU_BAR) || 4574 (ppdu_desc->htt_frame_type == 4575 HTT_STATS_FTYPE_SGEN_BE_MU_BAR))) 4576 return ppdu_info; 4577 4578 dp_tx_ppdu_desc_deliver(pdev, ppdu_info); 4579 } else { 4580 return ppdu_info; 4581 } 4582 } 4583 4584 /** 4585 * Flush the head ppdu descriptor if ppdu desc list reaches max 4586 * threshold 4587 */ 4588 if (mon_pdev->list_depth > HTT_PPDU_DESC_MAX_DEPTH) { 4589 ppdu_info = TAILQ_FIRST(&mon_pdev->ppdu_info_list); 4590 TAILQ_REMOVE(&mon_pdev->ppdu_info_list, 4591 ppdu_info, ppdu_info_list_elem); 4592 mon_pdev->list_depth--; 4593 pdev->stats.ppdu_drop++; 4594 qdf_nbuf_free(ppdu_info->nbuf); 4595 ppdu_info->nbuf = NULL; 4596 qdf_mem_free(ppdu_info); 4597 } 4598 4599 size = sizeof(struct cdp_tx_completion_ppdu) + 4600 (max_users * sizeof(struct cdp_tx_completion_ppdu_user)); 4601 4602 /* 4603 * Allocate new ppdu_info node 4604 */ 4605 ppdu_info = qdf_mem_malloc(sizeof(struct ppdu_info)); 4606 if (!ppdu_info) 4607 return NULL; 4608 4609 ppdu_info->nbuf = qdf_nbuf_alloc(pdev->soc->osdev, size, 4610 0, 4, TRUE); 4611 if (!ppdu_info->nbuf) { 4612 qdf_mem_free(ppdu_info); 4613 return NULL; 4614 } 4615 4616 ppdu_info->ppdu_desc = 4617 (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf); 4618 qdf_mem_zero(qdf_nbuf_data(ppdu_info->nbuf), size); 4619 4620 if (!qdf_nbuf_put_tail(ppdu_info->nbuf, size)) { 4621 dp_mon_err("No tailroom for HTT PPDU"); 4622 qdf_nbuf_free(ppdu_info->nbuf); 4623 ppdu_info->nbuf = NULL; 4624 ppdu_info->last_user = 0; 4625 qdf_mem_free(ppdu_info); 4626 return NULL; 4627 } 4628 4629 ppdu_info->ppdu_desc->max_users = max_users; 4630 ppdu_info->tsf_l32 = tsf_l32; 4631 /** 4632 * No lock is needed because all PPDU TLVs are processed in 4633 * same context and this list is updated in same context 4634 */ 4635 TAILQ_INSERT_TAIL(&mon_pdev->ppdu_info_list, ppdu_info, 4636 ppdu_info_list_elem); 4637 mon_pdev->list_depth++; 4638 return ppdu_info; 4639 } 4640 4641 /** 4642 * dp_htt_process_tlv(): Function to process each PPDU TLVs 4643 * @pdev: DP pdev handle 4644 * @htt_t2h_msg: HTT target to host message 4645 * 4646 * return: ppdu_info per ppdu tlv structure 4647 */ 4648 static struct ppdu_info *dp_htt_process_tlv(struct dp_pdev *pdev, 4649 qdf_nbuf_t htt_t2h_msg) 4650 { 4651 uint32_t length; 4652 uint32_t ppdu_id; 4653 uint8_t tlv_type; 4654 uint32_t tlv_length, tlv_bitmap_expected; 4655 uint8_t *tlv_buf; 4656 struct ppdu_info *ppdu_info = NULL; 4657 struct cdp_tx_completion_ppdu *ppdu_desc = NULL; 4658 uint8_t max_users = CDP_MU_MAX_USERS; 4659 uint32_t tsf_l32; 4660 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 4661 4662 uint32_t *msg_word = (uint32_t *)qdf_nbuf_data(htt_t2h_msg); 4663 4664 length = HTT_T2H_PPDU_STATS_PAYLOAD_SIZE_GET(*msg_word); 4665 4666 msg_word = msg_word + 1; 4667 ppdu_id = HTT_T2H_PPDU_STATS_PPDU_ID_GET(*msg_word); 4668 4669 msg_word = msg_word + 1; 4670 tsf_l32 = (uint32_t)(*msg_word); 4671 4672 msg_word = msg_word + 2; 4673 while (length > 0) { 4674 tlv_buf = (uint8_t *)msg_word; 4675 tlv_type = HTT_STATS_TLV_TAG_GET(*msg_word); 4676 tlv_length = HTT_STATS_TLV_LENGTH_GET(*msg_word); 4677 if (qdf_likely(tlv_type < CDP_PPDU_STATS_MAX_TAG)) 4678 pdev->stats.ppdu_stats_counter[tlv_type]++; 4679 4680 if (tlv_length == 0) 4681 break; 4682 4683 tlv_length += HTT_TLV_HDR_LEN; 4684 4685 /** 4686 * Not allocating separate ppdu descriptor for MGMT Payload 4687 * TLV as this is sent as separate WDI indication and it 4688 * doesn't contain any ppdu information 4689 */ 4690 if (tlv_type == HTT_PPDU_STATS_TX_MGMTCTRL_PAYLOAD_TLV) { 4691 mon_pdev->mgmtctrl_frm_info.mgmt_buf = tlv_buf; 4692 mon_pdev->mgmtctrl_frm_info.ppdu_id = ppdu_id; 4693 mon_pdev->mgmtctrl_frm_info.mgmt_buf_len = 4694 HTT_PPDU_STATS_TX_MGMTCTRL_TLV_FRAME_LENGTH_GET 4695 (*(msg_word + 1)); 4696 msg_word = 4697 (uint32_t *)((uint8_t *)tlv_buf + tlv_length); 4698 length -= (tlv_length); 4699 continue; 4700 } 4701 4702 /* 4703 * retrieve max_users if it's USERS_INFO, 4704 * else, it's 1 for COMPLTN_FLUSH, 4705 * else, use CDP_MU_MAX_USERS 4706 */ 4707 if (tlv_type == HTT_PPDU_STATS_USERS_INFO_TLV) { 4708 max_users = 4709 HTT_PPDU_STATS_USERS_INFO_TLV_MAX_USERS_GET(*(msg_word + 1)); 4710 } else if (tlv_type == HTT_PPDU_STATS_USR_COMPLTN_FLUSH_TLV) { 4711 max_users = 1; 4712 } 4713 4714 ppdu_info = dp_get_ppdu_desc(pdev, ppdu_id, tlv_type, 4715 tsf_l32, max_users); 4716 if (!ppdu_info) 4717 return NULL; 4718 4719 ppdu_info->ppdu_id = ppdu_id; 4720 ppdu_info->tlv_bitmap |= (1 << tlv_type); 4721 4722 dp_process_ppdu_tag(pdev, msg_word, tlv_length, ppdu_info); 4723 4724 /** 4725 * Increment pdev level tlv count to monitor 4726 * missing TLVs 4727 */ 4728 mon_pdev->tlv_count++; 4729 ppdu_info->last_tlv_cnt = mon_pdev->tlv_count; 4730 msg_word = (uint32_t *)((uint8_t *)tlv_buf + tlv_length); 4731 length -= (tlv_length); 4732 } 4733 4734 if (!ppdu_info) 4735 return NULL; 4736 4737 mon_pdev->last_ppdu_id = ppdu_id; 4738 4739 tlv_bitmap_expected = HTT_PPDU_DEFAULT_TLV_BITMAP; 4740 4741 if (mon_pdev->tx_sniffer_enable || mon_pdev->mcopy_mode || 4742 mon_pdev->tx_capture_enabled) { 4743 if (ppdu_info->is_ampdu) 4744 tlv_bitmap_expected = 4745 dp_htt_get_ppdu_sniffer_ampdu_tlv_bitmap( 4746 ppdu_info->tlv_bitmap); 4747 } 4748 4749 ppdu_desc = ppdu_info->ppdu_desc; 4750 4751 if (!ppdu_desc) 4752 return NULL; 4753 4754 if (ppdu_desc->user[ppdu_desc->last_usr_index].completion_status != 4755 HTT_PPDU_STATS_USER_STATUS_OK) { 4756 tlv_bitmap_expected = tlv_bitmap_expected & 0xFF; 4757 } 4758 4759 /* 4760 * for frame type DATA and BAR, we update stats based on MSDU, 4761 * successful msdu and mpdu are populate from ACK BA STATUS TLV 4762 * which comes out of order. successful mpdu also populated from 4763 * COMPLTN COMMON TLV which comes in order. for every ppdu_info 4764 * we store successful mpdu from both tlv and compare before delivering 4765 * to make sure we received ACK BA STATUS TLV. For some self generated 4766 * frame we won't get ack ba status tlv so no need to wait for 4767 * ack ba status tlv. 4768 */ 4769 if (ppdu_desc->frame_type != CDP_PPDU_FTYPE_CTRL && 4770 ppdu_desc->htt_frame_type != HTT_STATS_FTYPE_SGEN_QOS_NULL) { 4771 /* 4772 * most of the time bar frame will have duplicate ack ba 4773 * status tlv 4774 */ 4775 if (ppdu_desc->frame_type == CDP_PPDU_FTYPE_BAR && 4776 (ppdu_info->compltn_common_tlv != ppdu_info->ack_ba_tlv)) 4777 return NULL; 4778 /* 4779 * For data frame, compltn common tlv should match ack ba status 4780 * tlv and completion status. Reason we are checking first user 4781 * for ofdma, completion seen at next MU BAR frm, for mimo 4782 * only for first user completion will be immediate. 4783 */ 4784 if (ppdu_desc->frame_type == CDP_PPDU_FTYPE_DATA && 4785 (ppdu_desc->user[0].completion_status == 0 && 4786 (ppdu_info->compltn_common_tlv != ppdu_info->ack_ba_tlv))) 4787 return NULL; 4788 } 4789 4790 /** 4791 * Once all the TLVs for a given PPDU has been processed, 4792 * return PPDU status to be delivered to higher layer. 4793 * tlv_bitmap_expected can't be available for different frame type. 4794 * But SCHED CMD STATS TLV is the last TLV from the FW for a ppdu. 4795 * apart from ACK BA TLV, FW sends other TLV in sequential order. 4796 * flush tlv comes separate. 4797 */ 4798 if ((ppdu_info->tlv_bitmap != 0 && 4799 (ppdu_info->tlv_bitmap & 4800 (1 << HTT_PPDU_STATS_SCH_CMD_STATUS_TLV))) || 4801 (ppdu_info->tlv_bitmap & 4802 (1 << HTT_PPDU_STATS_USR_COMPLTN_FLUSH_TLV))) { 4803 ppdu_info->done = 1; 4804 return ppdu_info; 4805 } 4806 4807 return NULL; 4808 } 4809 #endif /* QCA_ENHANCED_STATS_SUPPORT */ 4810 4811 #ifdef QCA_ENHANCED_STATS_SUPPORT 4812 /** 4813 * dp_tx_ppdu_stats_feat_enable_check() - Check if feature(s) is enabled to 4814 * consume stats received from FW via HTT 4815 * @pdev: Datapath pdev handle 4816 * 4817 * Return: void 4818 */ 4819 static bool dp_tx_ppdu_stats_feat_enable_check(struct dp_pdev *pdev) 4820 { 4821 struct dp_soc *soc = pdev->soc; 4822 struct dp_mon_ops *mon_ops = NULL; 4823 4824 mon_ops = dp_mon_ops_get(soc); 4825 if (mon_ops && mon_ops->mon_ppdu_stats_feat_enable_check) 4826 return mon_ops->mon_ppdu_stats_feat_enable_check(pdev); 4827 else 4828 return false; 4829 } 4830 #endif 4831 4832 #if defined(WDI_EVENT_ENABLE) 4833 #ifdef QCA_ENHANCED_STATS_SUPPORT 4834 /** 4835 * dp_txrx_ppdu_stats_handler() - Function to process HTT PPDU stats from FW 4836 * @soc: DP SOC handle 4837 * @pdev_id: pdev id 4838 * @htt_t2h_msg: HTT message nbuf 4839 * 4840 * return:void 4841 */ 4842 static bool dp_txrx_ppdu_stats_handler(struct dp_soc *soc, 4843 uint8_t pdev_id, qdf_nbuf_t htt_t2h_msg) 4844 { 4845 struct dp_pdev *pdev; 4846 struct ppdu_info *ppdu_info = NULL; 4847 bool free_buf = true; 4848 struct dp_mon_pdev *mon_pdev; 4849 4850 if (pdev_id >= MAX_PDEV_CNT) 4851 return true; 4852 4853 pdev = soc->pdev_list[pdev_id]; 4854 if (!pdev) 4855 return true; 4856 4857 mon_pdev = pdev->monitor_pdev; 4858 if (!mon_pdev) 4859 return true; 4860 4861 if (!dp_tx_ppdu_stats_feat_enable_check(pdev)) 4862 return free_buf; 4863 4864 qdf_spin_lock_bh(&mon_pdev->ppdu_stats_lock); 4865 ppdu_info = dp_htt_process_tlv(pdev, htt_t2h_msg); 4866 4867 if (mon_pdev->mgmtctrl_frm_info.mgmt_buf) { 4868 if (dp_process_ppdu_stats_tx_mgmtctrl_payload_tlv 4869 (pdev, htt_t2h_msg, mon_pdev->mgmtctrl_frm_info.ppdu_id) != 4870 QDF_STATUS_SUCCESS) 4871 free_buf = false; 4872 } 4873 4874 if (ppdu_info) 4875 dp_tx_ppdu_desc_deliver(pdev, ppdu_info); 4876 4877 mon_pdev->mgmtctrl_frm_info.mgmt_buf = NULL; 4878 mon_pdev->mgmtctrl_frm_info.mgmt_buf_len = 0; 4879 mon_pdev->mgmtctrl_frm_info.ppdu_id = 0; 4880 4881 qdf_spin_unlock_bh(&mon_pdev->ppdu_stats_lock); 4882 4883 return free_buf; 4884 } 4885 #elif (!defined(REMOVE_PKT_LOG)) 4886 static bool dp_txrx_ppdu_stats_handler(struct dp_soc *soc, 4887 uint8_t pdev_id, qdf_nbuf_t htt_t2h_msg) 4888 { 4889 return true; 4890 } 4891 #endif/* QCA_ENHANCED_STATS_SUPPORT */ 4892 #endif 4893 4894 #if defined(WDI_EVENT_ENABLE) &&\ 4895 (defined(QCA_ENHANCED_STATS_SUPPORT) || !defined(REMOVE_PKT_LOG)) 4896 /* 4897 * dp_ppdu_stats_ind_handler() - PPDU stats msg handler 4898 * @htt_soc: HTT SOC handle 4899 * @msg_word: Pointer to payload 4900 * @htt_t2h_msg: HTT msg nbuf 4901 * 4902 * Return: True if buffer should be freed by caller. 4903 */ 4904 bool 4905 dp_ppdu_stats_ind_handler(struct htt_soc *soc, 4906 uint32_t *msg_word, 4907 qdf_nbuf_t htt_t2h_msg) 4908 { 4909 u_int8_t pdev_id; 4910 u_int8_t target_pdev_id; 4911 bool free_buf; 4912 4913 target_pdev_id = HTT_T2H_PPDU_STATS_PDEV_ID_GET(*msg_word); 4914 pdev_id = dp_get_host_pdev_id_for_target_pdev_id(soc->dp_soc, 4915 target_pdev_id); 4916 dp_wdi_event_handler(WDI_EVENT_LITE_T2H, soc->dp_soc, 4917 htt_t2h_msg, HTT_INVALID_PEER, WDI_NO_VAL, 4918 pdev_id); 4919 4920 free_buf = dp_txrx_ppdu_stats_handler(soc->dp_soc, pdev_id, 4921 htt_t2h_msg); 4922 4923 return free_buf; 4924 } 4925 #endif 4926 4927 void 4928 dp_mon_set_bsscolor(struct dp_pdev *pdev, uint8_t bsscolor) 4929 { 4930 pdev->monitor_pdev->rx_mon_recv_status.bsscolor = bsscolor; 4931 } 4932 4933 bool dp_pdev_get_filter_ucast_data(struct cdp_pdev *pdev_handle) 4934 { 4935 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle; 4936 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 4937 4938 if ((mon_pdev->fp_data_filter & FILTER_DATA_UCAST) || 4939 (mon_pdev->mo_data_filter & FILTER_DATA_UCAST)) 4940 return true; 4941 4942 return false; 4943 } 4944 4945 bool dp_pdev_get_filter_mcast_data(struct cdp_pdev *pdev_handle) 4946 { 4947 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle; 4948 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 4949 4950 if ((mon_pdev->fp_data_filter & FILTER_DATA_MCAST) || 4951 (mon_pdev->mo_data_filter & FILTER_DATA_MCAST)) 4952 return true; 4953 4954 return false; 4955 } 4956 4957 bool dp_pdev_get_filter_non_data(struct cdp_pdev *pdev_handle) 4958 { 4959 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle; 4960 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 4961 4962 if ((mon_pdev->fp_mgmt_filter & FILTER_MGMT_ALL) || 4963 (mon_pdev->mo_mgmt_filter & FILTER_MGMT_ALL)) { 4964 if ((mon_pdev->fp_ctrl_filter & FILTER_CTRL_ALL) || 4965 (mon_pdev->mo_ctrl_filter & FILTER_CTRL_ALL)) { 4966 return true; 4967 } 4968 } 4969 4970 return false; 4971 } 4972 4973 QDF_STATUS dp_mon_soc_cfg_init(struct dp_soc *soc) 4974 { 4975 int target_type; 4976 struct dp_mon_soc *mon_soc = soc->monitor_soc; 4977 struct cdp_mon_ops *cdp_ops; 4978 4979 cdp_ops = dp_mon_cdp_ops_get(soc); 4980 target_type = hal_get_target_type(soc->hal_soc); 4981 switch (target_type) { 4982 case TARGET_TYPE_QCA6290: 4983 case TARGET_TYPE_QCA6390: 4984 case TARGET_TYPE_QCA6490: 4985 case TARGET_TYPE_QCA6750: 4986 case TARGET_TYPE_KIWI: 4987 case TARGET_TYPE_MANGO: 4988 /* do nothing */ 4989 break; 4990 case TARGET_TYPE_QCA8074: 4991 wlan_cfg_set_mon_delayed_replenish_entries(soc->wlan_cfg_ctx, 4992 MON_BUF_MIN_ENTRIES); 4993 break; 4994 case TARGET_TYPE_QCA8074V2: 4995 case TARGET_TYPE_QCA6018: 4996 case TARGET_TYPE_QCA9574: 4997 wlan_cfg_set_mon_delayed_replenish_entries(soc->wlan_cfg_ctx, 4998 MON_BUF_MIN_ENTRIES); 4999 mon_soc->hw_nac_monitor_support = 1; 5000 break; 5001 case TARGET_TYPE_QCN9000: 5002 wlan_cfg_set_mon_delayed_replenish_entries(soc->wlan_cfg_ctx, 5003 MON_BUF_MIN_ENTRIES); 5004 mon_soc->hw_nac_monitor_support = 1; 5005 if (cfg_get(soc->ctrl_psoc, CFG_DP_FULL_MON_MODE)) { 5006 if (cdp_ops && cdp_ops->config_full_mon_mode) 5007 cdp_ops->config_full_mon_mode((struct cdp_soc_t *)soc, 1); 5008 } 5009 break; 5010 case TARGET_TYPE_QCA5018: 5011 case TARGET_TYPE_QCN6122: 5012 case TARGET_TYPE_QCN9160: 5013 wlan_cfg_set_mon_delayed_replenish_entries(soc->wlan_cfg_ctx, 5014 MON_BUF_MIN_ENTRIES); 5015 mon_soc->hw_nac_monitor_support = 1; 5016 break; 5017 case TARGET_TYPE_QCN9224: 5018 case TARGET_TYPE_QCA5332: 5019 wlan_cfg_set_mon_delayed_replenish_entries(soc->wlan_cfg_ctx, 5020 MON_BUF_MIN_ENTRIES); 5021 mon_soc->hw_nac_monitor_support = 1; 5022 mon_soc->monitor_mode_v2 = 1; 5023 break; 5024 default: 5025 dp_mon_info("%s: Unknown tgt type %d\n", __func__, target_type); 5026 qdf_assert_always(0); 5027 break; 5028 } 5029 5030 dp_mon_info("hw_nac_monitor_support = %d", 5031 mon_soc->hw_nac_monitor_support); 5032 5033 return QDF_STATUS_SUCCESS; 5034 } 5035 5036 /** 5037 * dp_mon_pdev_per_target_config() - Target specific monitor pdev configuration 5038 * @pdev: PDEV handle [Should be valid] 5039 * 5040 * Return: None 5041 */ 5042 static void dp_mon_pdev_per_target_config(struct dp_pdev *pdev) 5043 { 5044 struct dp_soc *soc = pdev->soc; 5045 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 5046 int target_type; 5047 5048 target_type = hal_get_target_type(soc->hal_soc); 5049 switch (target_type) { 5050 case TARGET_TYPE_KIWI: 5051 case TARGET_TYPE_MANGO: 5052 mon_pdev->is_tlv_hdr_64_bit = true; 5053 break; 5054 default: 5055 mon_pdev->is_tlv_hdr_64_bit = false; 5056 break; 5057 } 5058 } 5059 5060 QDF_STATUS dp_mon_pdev_attach(struct dp_pdev *pdev) 5061 { 5062 struct dp_soc *soc; 5063 struct dp_mon_pdev *mon_pdev; 5064 struct dp_mon_ops *mon_ops; 5065 qdf_size_t mon_pdev_context_size; 5066 5067 if (!pdev) { 5068 dp_mon_err("pdev is NULL"); 5069 goto fail0; 5070 } 5071 5072 soc = pdev->soc; 5073 5074 mon_pdev_context_size = soc->arch_ops.txrx_get_mon_context_size(DP_CONTEXT_TYPE_MON_PDEV); 5075 mon_pdev = dp_context_alloc_mem(soc, DP_MON_PDEV_TYPE, mon_pdev_context_size); 5076 if (!mon_pdev) { 5077 dp_mon_err("%pK: MONITOR pdev allocation failed", pdev); 5078 goto fail0; 5079 } 5080 5081 pdev->monitor_pdev = mon_pdev; 5082 mon_ops = dp_mon_ops_get(pdev->soc); 5083 if (!mon_ops) { 5084 dp_mon_err("%pK: Invalid monitor ops", pdev); 5085 goto fail1; 5086 } 5087 5088 if (mon_ops->mon_pdev_alloc) { 5089 if (mon_ops->mon_pdev_alloc(pdev)) { 5090 dp_mon_err("%pK: MONITOR pdev alloc failed", pdev); 5091 goto fail1; 5092 } 5093 } 5094 5095 if (mon_ops->mon_rings_alloc) { 5096 if (mon_ops->mon_rings_alloc(pdev)) { 5097 dp_mon_err("%pK: MONITOR rings setup failed", pdev); 5098 goto fail2; 5099 } 5100 } 5101 5102 /* Rx monitor mode specific init */ 5103 if (mon_ops->rx_mon_desc_pool_alloc) { 5104 if (mon_ops->rx_mon_desc_pool_alloc(pdev)) { 5105 dp_mon_err("%pK: dp_rx_pdev_mon_attach failed", pdev); 5106 goto fail3; 5107 } 5108 } 5109 5110 if (mon_ops->mon_rx_ppdu_info_cache_create) { 5111 if (mon_ops->mon_rx_ppdu_info_cache_create(pdev)) { 5112 dp_mon_err("%pK: dp_rx_pdev_mon_attach failed", pdev); 5113 goto fail4; 5114 } 5115 } 5116 pdev->monitor_pdev = mon_pdev; 5117 dp_mon_pdev_per_target_config(pdev); 5118 5119 return QDF_STATUS_SUCCESS; 5120 fail4: 5121 if (mon_ops->rx_mon_desc_pool_free) 5122 mon_ops->rx_mon_desc_pool_free(pdev); 5123 fail3: 5124 if (mon_ops->mon_rings_free) 5125 mon_ops->mon_rings_free(pdev); 5126 fail2: 5127 if (mon_ops->mon_pdev_free) 5128 mon_ops->mon_pdev_free(pdev); 5129 fail1: 5130 pdev->monitor_pdev = NULL; 5131 dp_context_free_mem(soc, DP_MON_PDEV_TYPE, mon_pdev); 5132 fail0: 5133 return QDF_STATUS_E_NOMEM; 5134 } 5135 5136 QDF_STATUS dp_mon_pdev_detach(struct dp_pdev *pdev) 5137 { 5138 struct dp_mon_pdev *mon_pdev; 5139 struct dp_mon_ops *mon_ops = NULL; 5140 5141 if (!pdev) { 5142 dp_mon_err("pdev is NULL"); 5143 return QDF_STATUS_E_FAILURE; 5144 } 5145 5146 mon_pdev = pdev->monitor_pdev; 5147 if (!mon_pdev) { 5148 dp_mon_err("Monitor pdev is NULL"); 5149 return QDF_STATUS_E_FAILURE; 5150 } 5151 5152 mon_ops = dp_mon_ops_get(pdev->soc); 5153 if (!mon_ops) { 5154 dp_mon_err("Monitor ops is NULL"); 5155 return QDF_STATUS_E_FAILURE; 5156 } 5157 5158 if (mon_ops->mon_rx_ppdu_info_cache_destroy) 5159 mon_ops->mon_rx_ppdu_info_cache_destroy(pdev); 5160 if (mon_ops->rx_mon_desc_pool_free) 5161 mon_ops->rx_mon_desc_pool_free(pdev); 5162 if (mon_ops->mon_rings_free) 5163 mon_ops->mon_rings_free(pdev); 5164 if (mon_ops->mon_pdev_free) 5165 mon_ops->mon_pdev_free(pdev); 5166 5167 dp_context_free_mem(pdev->soc, DP_MON_PDEV_TYPE, mon_pdev); 5168 pdev->monitor_pdev = NULL; 5169 return QDF_STATUS_SUCCESS; 5170 } 5171 5172 QDF_STATUS dp_mon_pdev_init(struct dp_pdev *pdev) 5173 { 5174 struct dp_soc *soc; 5175 struct dp_mon_pdev *mon_pdev; 5176 struct dp_mon_ops *mon_ops = NULL; 5177 5178 if (!pdev) { 5179 dp_mon_err("pdev is NULL"); 5180 return QDF_STATUS_E_FAILURE; 5181 } 5182 5183 soc = pdev->soc; 5184 mon_pdev = pdev->monitor_pdev; 5185 5186 mon_pdev->invalid_mon_peer = qdf_mem_malloc(sizeof(struct dp_mon_peer)); 5187 if (!mon_pdev->invalid_mon_peer) { 5188 dp_mon_err("%pK: Memory allocation failed for invalid " 5189 "monitor peer", pdev); 5190 return QDF_STATUS_E_NOMEM; 5191 } 5192 5193 mon_ops = dp_mon_ops_get(pdev->soc); 5194 if (!mon_ops) { 5195 dp_mon_err("Monitor ops is NULL"); 5196 goto fail0; 5197 } 5198 5199 mon_pdev->filter = dp_mon_filter_alloc(mon_pdev); 5200 if (!mon_pdev->filter) { 5201 dp_mon_err("%pK: Memory allocation failed for monitor filter", 5202 pdev); 5203 goto fail0; 5204 } 5205 5206 if (mon_ops->tx_mon_filter_alloc) { 5207 if (mon_ops->tx_mon_filter_alloc(pdev)) { 5208 dp_mon_err("%pK: Memory allocation failed for tx monitor " 5209 "filter", pdev); 5210 goto fail1; 5211 } 5212 } 5213 5214 qdf_spinlock_create(&mon_pdev->ppdu_stats_lock); 5215 qdf_spinlock_create(&mon_pdev->neighbour_peer_mutex); 5216 mon_pdev->monitor_configured = false; 5217 mon_pdev->mon_chan_band = REG_BAND_UNKNOWN; 5218 5219 TAILQ_INIT(&mon_pdev->neighbour_peers_list); 5220 mon_pdev->neighbour_peers_added = false; 5221 mon_pdev->monitor_configured = false; 5222 /* Monitor filter init */ 5223 mon_pdev->mon_filter_mode = MON_FILTER_ALL; 5224 mon_pdev->fp_mgmt_filter = FILTER_MGMT_ALL; 5225 mon_pdev->fp_ctrl_filter = FILTER_CTRL_ALL; 5226 mon_pdev->fp_data_filter = FILTER_DATA_ALL; 5227 mon_pdev->mo_mgmt_filter = FILTER_MGMT_ALL; 5228 mon_pdev->mo_ctrl_filter = FILTER_CTRL_ALL; 5229 mon_pdev->mo_data_filter = FILTER_DATA_ALL; 5230 5231 /* 5232 * initialize ppdu tlv list 5233 */ 5234 TAILQ_INIT(&mon_pdev->ppdu_info_list); 5235 TAILQ_INIT(&mon_pdev->sched_comp_ppdu_list); 5236 5237 mon_pdev->list_depth = 0; 5238 mon_pdev->tlv_count = 0; 5239 /* initlialize cal client timer */ 5240 dp_cal_client_attach(&mon_pdev->cal_client_ctx, 5241 dp_pdev_to_cdp_pdev(pdev), 5242 pdev->soc->osdev, 5243 &dp_iterate_update_peer_list); 5244 if (dp_htt_ppdu_stats_attach(pdev) != QDF_STATUS_SUCCESS) 5245 goto fail2; 5246 5247 if (mon_ops->mon_lite_mon_alloc) { 5248 if (mon_ops->mon_lite_mon_alloc(pdev) != QDF_STATUS_SUCCESS) { 5249 dp_mon_err("%pK: lite mon alloc failed", pdev); 5250 goto fail3; 5251 } 5252 } 5253 5254 if (mon_ops->mon_rings_init) { 5255 if (mon_ops->mon_rings_init(pdev)) { 5256 dp_mon_err("%pK: MONITOR rings setup failed", pdev); 5257 goto fail4; 5258 } 5259 } 5260 5261 /* initialize sw monitor rx descriptors */ 5262 if (mon_ops->rx_mon_desc_pool_init) 5263 mon_ops->rx_mon_desc_pool_init(pdev); 5264 5265 /* allocate buffers and replenish the monitor RxDMA ring */ 5266 if (mon_ops->rx_mon_buffers_alloc) { 5267 if (mon_ops->rx_mon_buffers_alloc(pdev)) { 5268 dp_mon_err("%pK: rx mon buffers alloc failed", pdev); 5269 goto fail5; 5270 } 5271 } 5272 5273 /* attach monitor function */ 5274 dp_monitor_tx_ppdu_stats_attach(pdev); 5275 5276 /* mon pdev extended init */ 5277 if (mon_ops->mon_pdev_ext_init) 5278 mon_ops->mon_pdev_ext_init(pdev); 5279 5280 mon_pdev->is_dp_mon_pdev_initialized = true; 5281 5282 return QDF_STATUS_SUCCESS; 5283 5284 fail5: 5285 if (mon_ops->rx_mon_desc_pool_deinit) 5286 mon_ops->rx_mon_desc_pool_deinit(pdev); 5287 5288 if (mon_ops->mon_rings_deinit) 5289 mon_ops->mon_rings_deinit(pdev); 5290 fail4: 5291 if (mon_ops->mon_lite_mon_dealloc) 5292 mon_ops->mon_lite_mon_dealloc(pdev); 5293 fail3: 5294 dp_htt_ppdu_stats_detach(pdev); 5295 fail2: 5296 qdf_spinlock_destroy(&mon_pdev->neighbour_peer_mutex); 5297 qdf_spinlock_destroy(&mon_pdev->ppdu_stats_lock); 5298 if (mon_ops->tx_mon_filter_dealloc) 5299 mon_ops->tx_mon_filter_dealloc(pdev); 5300 fail1: 5301 dp_mon_filter_dealloc(mon_pdev); 5302 fail0: 5303 qdf_mem_free(mon_pdev->invalid_mon_peer); 5304 return QDF_STATUS_E_FAILURE; 5305 } 5306 5307 QDF_STATUS dp_mon_pdev_deinit(struct dp_pdev *pdev) 5308 { 5309 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 5310 struct dp_mon_ops *mon_ops = NULL; 5311 5312 mon_ops = dp_mon_ops_get(pdev->soc); 5313 if (!mon_ops) { 5314 dp_mon_err("Monitor ops is NULL"); 5315 return QDF_STATUS_E_FAILURE; 5316 } 5317 5318 if (!mon_pdev->is_dp_mon_pdev_initialized) 5319 return QDF_STATUS_SUCCESS; 5320 5321 dp_mon_filters_reset(pdev); 5322 5323 /* mon pdev extended deinit */ 5324 if (mon_ops->mon_pdev_ext_deinit) 5325 mon_ops->mon_pdev_ext_deinit(pdev); 5326 5327 /* detach monitor function */ 5328 dp_monitor_tx_ppdu_stats_detach(pdev); 5329 5330 if (mon_ops->rx_mon_buffers_free) 5331 mon_ops->rx_mon_buffers_free(pdev); 5332 if (mon_ops->rx_mon_desc_pool_deinit) 5333 mon_ops->rx_mon_desc_pool_deinit(pdev); 5334 if (mon_ops->mon_rings_deinit) 5335 mon_ops->mon_rings_deinit(pdev); 5336 dp_cal_client_detach(&mon_pdev->cal_client_ctx); 5337 if (mon_ops->mon_lite_mon_dealloc) 5338 mon_ops->mon_lite_mon_dealloc(pdev); 5339 dp_htt_ppdu_stats_detach(pdev); 5340 qdf_spinlock_destroy(&mon_pdev->ppdu_stats_lock); 5341 dp_neighbour_peers_detach(pdev); 5342 dp_pktlogmod_exit(pdev); 5343 if (mon_ops->tx_mon_filter_dealloc) 5344 mon_ops->tx_mon_filter_dealloc(pdev); 5345 if (mon_pdev->filter) 5346 dp_mon_filter_dealloc(mon_pdev); 5347 if (mon_ops->mon_rings_deinit) 5348 mon_ops->mon_rings_deinit(pdev); 5349 if (mon_pdev->invalid_mon_peer) 5350 qdf_mem_free(mon_pdev->invalid_mon_peer); 5351 mon_pdev->is_dp_mon_pdev_initialized = false; 5352 5353 return QDF_STATUS_SUCCESS; 5354 } 5355 5356 QDF_STATUS dp_mon_vdev_attach(struct dp_vdev *vdev) 5357 { 5358 struct dp_mon_vdev *mon_vdev; 5359 struct dp_pdev *pdev = vdev->pdev; 5360 5361 mon_vdev = (struct dp_mon_vdev *)qdf_mem_malloc(sizeof(*mon_vdev)); 5362 if (!mon_vdev) { 5363 dp_mon_err("%pK: Monitor vdev allocation failed", vdev); 5364 return QDF_STATUS_E_NOMEM; 5365 } 5366 5367 if (pdev && pdev->monitor_pdev && 5368 pdev->monitor_pdev->scan_spcl_vap_configured) 5369 dp_scan_spcl_vap_stats_attach(mon_vdev); 5370 5371 vdev->monitor_vdev = mon_vdev; 5372 5373 return QDF_STATUS_SUCCESS; 5374 } 5375 5376 QDF_STATUS dp_mon_vdev_detach(struct dp_vdev *vdev) 5377 { 5378 struct dp_mon_vdev *mon_vdev = vdev->monitor_vdev; 5379 struct dp_pdev *pdev = vdev->pdev; 5380 struct dp_mon_ops *mon_ops = dp_mon_ops_get(pdev->soc); 5381 5382 if (!mon_ops) 5383 return QDF_STATUS_E_FAILURE; 5384 5385 if (!mon_vdev) 5386 return QDF_STATUS_E_FAILURE; 5387 5388 if (pdev->monitor_pdev->scan_spcl_vap_configured) 5389 dp_scan_spcl_vap_stats_detach(mon_vdev); 5390 5391 qdf_mem_free(mon_vdev); 5392 vdev->monitor_vdev = NULL; 5393 /* set mvdev to NULL only if detach is called for monitor/special vap 5394 */ 5395 if (pdev->monitor_pdev->mvdev == vdev) 5396 pdev->monitor_pdev->mvdev = NULL; 5397 5398 if (mon_ops->mon_lite_mon_vdev_delete) 5399 mon_ops->mon_lite_mon_vdev_delete(pdev, vdev); 5400 5401 return QDF_STATUS_SUCCESS; 5402 } 5403 5404 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE 5405 /** 5406 * dp_mon_peer_attach_notify() - Raise WDI event for peer create 5407 * @peer: DP Peer handle 5408 * 5409 * Return: none 5410 */ 5411 static inline 5412 void dp_mon_peer_attach_notify(struct dp_peer *peer) 5413 { 5414 struct dp_mon_peer *mon_peer = peer->monitor_peer; 5415 struct dp_pdev *pdev; 5416 struct dp_soc *soc; 5417 struct cdp_peer_cookie peer_cookie; 5418 5419 pdev = peer->vdev->pdev; 5420 soc = pdev->soc; 5421 5422 qdf_mem_copy(peer_cookie.mac_addr, peer->mac_addr.raw, 5423 QDF_MAC_ADDR_SIZE); 5424 5425 peer_cookie.ctx = NULL; 5426 peer_cookie.pdev_id = pdev->pdev_id; 5427 peer_cookie.cookie = pdev->next_peer_cookie++; 5428 5429 dp_wdi_event_handler(WDI_EVENT_PEER_CREATE, soc, 5430 (void *)&peer_cookie, 5431 peer->peer_id, WDI_NO_VAL, pdev->pdev_id); 5432 5433 if (soc->peerstats_enabled) { 5434 if (!peer_cookie.ctx) { 5435 pdev->next_peer_cookie--; 5436 qdf_err("Failed to initialize peer rate stats"); 5437 mon_peer->peerstats_ctx = NULL; 5438 } else { 5439 mon_peer->peerstats_ctx = 5440 (struct cdp_peer_rate_stats_ctx *) 5441 peer_cookie.ctx; 5442 } 5443 } 5444 } 5445 5446 /** 5447 * dp_mon_peer_detach_notify() - Raise WDI event for peer destroy 5448 * @peer: DP Peer handle 5449 * 5450 * Return: none 5451 */ 5452 static inline 5453 void dp_mon_peer_detach_notify(struct dp_peer *peer) 5454 { 5455 struct dp_mon_peer *mon_peer = peer->monitor_peer; 5456 struct dp_pdev *pdev; 5457 struct dp_soc *soc; 5458 struct cdp_peer_cookie peer_cookie; 5459 5460 pdev = peer->vdev->pdev; 5461 soc = pdev->soc; 5462 /* send peer destroy event to upper layer */ 5463 qdf_mem_copy(peer_cookie.mac_addr, peer->mac_addr.raw, 5464 QDF_MAC_ADDR_SIZE); 5465 peer_cookie.ctx = NULL; 5466 peer_cookie.ctx = (struct cdp_stats_cookie *)mon_peer->peerstats_ctx; 5467 5468 dp_wdi_event_handler(WDI_EVENT_PEER_DESTROY, 5469 soc, 5470 (void *)&peer_cookie, 5471 peer->peer_id, 5472 WDI_NO_VAL, 5473 pdev->pdev_id); 5474 5475 mon_peer->peerstats_ctx = NULL; 5476 } 5477 #else 5478 static inline 5479 void dp_mon_peer_attach_notify(struct dp_peer *peer) 5480 { 5481 peer->monitor_peer->peerstats_ctx = NULL; 5482 } 5483 5484 static inline 5485 void dp_mon_peer_detach_notify(struct dp_peer *peer) 5486 { 5487 peer->monitor_peer->peerstats_ctx = NULL; 5488 } 5489 #endif 5490 5491 #if defined(WLAN_TX_PKT_CAPTURE_ENH) || defined(FEATURE_PERPKT_INFO) 5492 QDF_STATUS dp_mon_peer_attach(struct dp_peer *peer) 5493 { 5494 struct dp_mon_peer *mon_peer; 5495 struct dp_pdev *pdev; 5496 5497 mon_peer = (struct dp_mon_peer *)qdf_mem_malloc(sizeof(*mon_peer)); 5498 if (!mon_peer) { 5499 dp_mon_err("%pK: MONITOR peer allocation failed", peer); 5500 return QDF_STATUS_E_NOMEM; 5501 } 5502 5503 peer->monitor_peer = mon_peer; 5504 pdev = peer->vdev->pdev; 5505 /* 5506 * In tx_monitor mode, filter may be set for unassociated peer 5507 * when unassociated peer get associated peer need to 5508 * update tx_cap_enabled flag to support peer filter. 5509 */ 5510 dp_monitor_peer_tx_capture_filter_check(pdev, peer); 5511 5512 DP_STATS_INIT(mon_peer); 5513 DP_STATS_UPD(mon_peer, rx.avg_snr, CDP_INVALID_SNR); 5514 5515 dp_mon_peer_attach_notify(peer); 5516 5517 return QDF_STATUS_SUCCESS; 5518 } 5519 #endif 5520 5521 QDF_STATUS dp_mon_peer_detach(struct dp_peer *peer) 5522 { 5523 struct dp_mon_peer *mon_peer = peer->monitor_peer; 5524 5525 if (!mon_peer) 5526 return QDF_STATUS_SUCCESS; 5527 5528 dp_mon_peer_detach_notify(peer); 5529 5530 qdf_mem_free(mon_peer); 5531 peer->monitor_peer = NULL; 5532 5533 return QDF_STATUS_SUCCESS; 5534 } 5535 5536 #ifndef DISABLE_MON_CONFIG 5537 void dp_mon_register_intr_ops(struct dp_soc *soc) 5538 { 5539 struct dp_mon_ops *mon_ops = NULL; 5540 5541 mon_ops = dp_mon_ops_get(soc); 5542 if (!mon_ops) { 5543 dp_mon_err("Monitor ops is NULL"); 5544 return; 5545 } 5546 if (mon_ops->mon_register_intr_ops) 5547 mon_ops->mon_register_intr_ops(soc); 5548 } 5549 #endif 5550 5551 struct cdp_peer_rate_stats_ctx *dp_mon_peer_get_peerstats_ctx(struct 5552 dp_peer *peer) 5553 { 5554 struct dp_mon_peer *mon_peer = peer->monitor_peer; 5555 5556 if (mon_peer) 5557 return mon_peer->peerstats_ctx; 5558 else 5559 return NULL; 5560 } 5561 5562 #ifdef QCA_ENHANCED_STATS_SUPPORT 5563 void dp_mon_peer_reset_stats(struct dp_peer *peer) 5564 { 5565 struct dp_mon_peer *mon_peer = NULL; 5566 5567 mon_peer = peer->monitor_peer; 5568 if (!mon_peer) 5569 return; 5570 5571 DP_STATS_CLR(mon_peer); 5572 DP_STATS_UPD(mon_peer, rx.avg_snr, CDP_INVALID_SNR); 5573 } 5574 5575 void dp_mon_peer_get_stats(struct dp_peer *peer, void *arg, 5576 enum cdp_stat_update_type type) 5577 { 5578 struct dp_mon_peer *mon_peer = peer->monitor_peer; 5579 struct dp_mon_peer_stats *mon_peer_stats; 5580 5581 if (!mon_peer || !arg) 5582 return; 5583 5584 mon_peer_stats = &mon_peer->stats; 5585 5586 switch (type) { 5587 case UPDATE_PEER_STATS: 5588 { 5589 struct cdp_peer_stats *peer_stats = 5590 (struct cdp_peer_stats *)arg; 5591 DP_UPDATE_MON_STATS(peer_stats, mon_peer_stats); 5592 break; 5593 } 5594 case UPDATE_VDEV_STATS: 5595 { 5596 struct cdp_vdev_stats *vdev_stats = 5597 (struct cdp_vdev_stats *)arg; 5598 DP_UPDATE_MON_STATS(vdev_stats, mon_peer_stats); 5599 break; 5600 } 5601 default: 5602 dp_mon_err("Invalid stats_update_type"); 5603 } 5604 } 5605 5606 void dp_mon_invalid_peer_update_pdev_stats(struct dp_pdev *pdev) 5607 { 5608 struct dp_mon_peer *mon_peer; 5609 struct dp_mon_peer_stats *mon_peer_stats; 5610 struct cdp_pdev_stats *pdev_stats; 5611 5612 if (!pdev || !pdev->monitor_pdev) 5613 return; 5614 5615 mon_peer = pdev->monitor_pdev->invalid_mon_peer; 5616 if (!mon_peer) 5617 return; 5618 5619 mon_peer_stats = &mon_peer->stats; 5620 pdev_stats = &pdev->stats; 5621 DP_UPDATE_MON_STATS(pdev_stats, mon_peer_stats); 5622 } 5623 5624 QDF_STATUS 5625 dp_mon_peer_get_stats_param(struct dp_peer *peer, enum cdp_peer_stats_type type, 5626 cdp_peer_stats_param_t *buf) 5627 { 5628 QDF_STATUS ret = QDF_STATUS_SUCCESS; 5629 struct dp_mon_peer *mon_peer; 5630 5631 mon_peer = peer->monitor_peer; 5632 if (!mon_peer) 5633 return QDF_STATUS_E_FAILURE; 5634 5635 switch (type) { 5636 case cdp_peer_tx_rate: 5637 buf->tx_rate = mon_peer->stats.tx.tx_rate; 5638 break; 5639 case cdp_peer_tx_last_tx_rate: 5640 buf->last_tx_rate = mon_peer->stats.tx.last_tx_rate; 5641 break; 5642 case cdp_peer_tx_ratecode: 5643 buf->tx_ratecode = mon_peer->stats.tx.tx_ratecode; 5644 break; 5645 case cdp_peer_rx_rate: 5646 buf->rx_rate = mon_peer->stats.rx.rx_rate; 5647 break; 5648 case cdp_peer_rx_last_rx_rate: 5649 buf->last_rx_rate = mon_peer->stats.rx.last_rx_rate; 5650 break; 5651 case cdp_peer_rx_ratecode: 5652 buf->rx_ratecode = mon_peer->stats.rx.rx_ratecode; 5653 break; 5654 case cdp_peer_rx_avg_snr: 5655 buf->rx_avg_snr = mon_peer->stats.rx.avg_snr; 5656 break; 5657 case cdp_peer_rx_snr: 5658 buf->rx_snr = mon_peer->stats.rx.snr; 5659 break; 5660 default: 5661 dp_err("Invalid stats type requested"); 5662 ret = QDF_STATUS_E_FAILURE; 5663 } 5664 5665 return ret; 5666 } 5667 #endif 5668 5669 void dp_mon_ops_register(struct dp_soc *soc) 5670 { 5671 struct dp_mon_soc *mon_soc = soc->monitor_soc; 5672 uint32_t target_type; 5673 5674 target_type = hal_get_target_type(soc->hal_soc); 5675 switch (target_type) { 5676 case TARGET_TYPE_QCA6290: 5677 case TARGET_TYPE_QCA6390: 5678 case TARGET_TYPE_QCA6490: 5679 case TARGET_TYPE_QCA6750: 5680 case TARGET_TYPE_KIWI: 5681 case TARGET_TYPE_MANGO: 5682 case TARGET_TYPE_QCA8074: 5683 case TARGET_TYPE_QCA8074V2: 5684 case TARGET_TYPE_QCA6018: 5685 case TARGET_TYPE_QCA9574: 5686 case TARGET_TYPE_QCN9160: 5687 case TARGET_TYPE_QCN9000: 5688 case TARGET_TYPE_QCA5018: 5689 case TARGET_TYPE_QCN6122: 5690 dp_mon_ops_register_1_0(mon_soc); 5691 break; 5692 case TARGET_TYPE_QCN9224: 5693 case TARGET_TYPE_QCA5332: 5694 #ifdef QCA_MONITOR_2_0_SUPPORT 5695 dp_mon_ops_register_2_0(mon_soc); 5696 #endif 5697 break; 5698 default: 5699 dp_mon_err("%s: Unknown tgt type %d", __func__, target_type); 5700 qdf_assert_always(0); 5701 break; 5702 } 5703 } 5704 5705 #ifdef QCA_MONITOR_OPS_PER_SOC_SUPPORT 5706 void dp_mon_ops_free(struct dp_soc *soc) 5707 { 5708 struct cdp_ops *ops = soc->cdp_soc.ops; 5709 struct cdp_mon_ops *cdp_mon_ops = ops->mon_ops; 5710 struct dp_mon_soc *mon_soc = soc->monitor_soc; 5711 struct dp_mon_ops *mon_ops = mon_soc->mon_ops; 5712 5713 if (cdp_mon_ops) 5714 qdf_mem_free(cdp_mon_ops); 5715 5716 if (mon_ops) 5717 qdf_mem_free(mon_ops); 5718 } 5719 #else 5720 void dp_mon_ops_free(struct dp_soc *soc) 5721 { 5722 } 5723 #endif 5724 5725 void dp_mon_cdp_ops_register(struct dp_soc *soc) 5726 { 5727 struct cdp_ops *ops = soc->cdp_soc.ops; 5728 uint32_t target_type; 5729 5730 if (!ops) { 5731 dp_mon_err("cdp_ops is NULL"); 5732 return; 5733 } 5734 5735 target_type = hal_get_target_type(soc->hal_soc); 5736 switch (target_type) { 5737 case TARGET_TYPE_QCA6290: 5738 case TARGET_TYPE_QCA6390: 5739 case TARGET_TYPE_QCA6490: 5740 case TARGET_TYPE_QCA6750: 5741 case TARGET_TYPE_KIWI: 5742 case TARGET_TYPE_MANGO: 5743 case TARGET_TYPE_QCA8074: 5744 case TARGET_TYPE_QCA8074V2: 5745 case TARGET_TYPE_QCA6018: 5746 case TARGET_TYPE_QCA9574: 5747 case TARGET_TYPE_QCN9160: 5748 case TARGET_TYPE_QCN9000: 5749 case TARGET_TYPE_QCA5018: 5750 case TARGET_TYPE_QCN6122: 5751 dp_mon_cdp_ops_register_1_0(ops); 5752 #ifdef ATH_SUPPORT_NAC_RSSI 5753 ops->ctrl_ops->txrx_vdev_config_for_nac_rssi = 5754 dp_config_for_nac_rssi; 5755 ops->ctrl_ops->txrx_vdev_get_neighbour_rssi = 5756 dp_vdev_get_neighbour_rssi; 5757 #endif 5758 #if defined(ATH_SUPPORT_NAC_RSSI) || defined(ATH_SUPPORT_NAC) 5759 ops->ctrl_ops->txrx_update_filter_neighbour_peers = 5760 dp_update_filter_neighbour_peers; 5761 #endif /* ATH_SUPPORT_NAC_RSSI || ATH_SUPPORT_NAC */ 5762 #if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE) 5763 dp_cfr_filter_register_1_0(ops); 5764 #endif 5765 break; 5766 case TARGET_TYPE_QCN9224: 5767 case TARGET_TYPE_QCA5332: 5768 #ifdef QCA_MONITOR_2_0_SUPPORT 5769 dp_mon_cdp_ops_register_2_0(ops); 5770 #ifdef ATH_SUPPORT_NAC_RSSI 5771 ops->ctrl_ops->txrx_vdev_config_for_nac_rssi = 5772 dp_lite_mon_config_nac_rssi_peer; 5773 ops->ctrl_ops->txrx_vdev_get_neighbour_rssi = 5774 dp_lite_mon_get_nac_peer_rssi; 5775 #endif 5776 #if defined(ATH_SUPPORT_NAC_RSSI) || defined(ATH_SUPPORT_NAC) 5777 ops->ctrl_ops->txrx_update_filter_neighbour_peers = 5778 dp_lite_mon_config_nac_peer; 5779 #endif /* ATH_SUPPORT_NAC_RSSI || ATH_SUPPORT_NAC */ 5780 #if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE) 5781 dp_cfr_filter_register_2_0(ops); 5782 #endif 5783 #endif /* QCA_MONITOR_2_0_SUPPORT */ 5784 break; 5785 default: 5786 dp_mon_err("%s: Unknown tgt type %d", __func__, target_type); 5787 qdf_assert_always(0); 5788 break; 5789 } 5790 5791 ops->cmn_drv_ops->txrx_set_monitor_mode = dp_vdev_set_monitor_mode; 5792 ops->cmn_drv_ops->txrx_get_mon_vdev_from_pdev = 5793 dp_get_mon_vdev_from_pdev_wifi3; 5794 #ifdef DP_PEER_EXTENDED_API 5795 ops->misc_ops->pkt_log_init = dp_pkt_log_init; 5796 ops->misc_ops->pkt_log_con_service = dp_pkt_log_con_service; 5797 ops->misc_ops->pkt_log_exit = dp_pkt_log_exit; 5798 #endif 5799 ops->ctrl_ops->enable_peer_based_pktlog = 5800 dp_enable_peer_based_pktlog; 5801 #if defined(WLAN_TX_PKT_CAPTURE_ENH) || defined(WLAN_RX_PKT_CAPTURE_ENH) 5802 ops->ctrl_ops->txrx_update_peer_pkt_capture_params = 5803 dp_peer_update_pkt_capture_params; 5804 #endif /* WLAN_TX_PKT_CAPTURE_ENH || WLAN_RX_PKT_CAPTURE_ENH */ 5805 #ifdef QCA_ENHANCED_STATS_SUPPORT 5806 ops->host_stats_ops->txrx_enable_enhanced_stats = 5807 dp_enable_enhanced_stats; 5808 ops->host_stats_ops->txrx_disable_enhanced_stats = 5809 dp_disable_enhanced_stats; 5810 #endif /* QCA_ENHANCED_STATS_SUPPORT */ 5811 #ifdef WDI_EVENT_ENABLE 5812 ops->ctrl_ops->txrx_get_pldev = dp_get_pldev; 5813 #endif 5814 #ifdef QCA_SUPPORT_SCAN_SPCL_VAP_STATS 5815 ops->host_stats_ops->txrx_get_scan_spcl_vap_stats = 5816 dp_get_scan_spcl_vap_stats; 5817 #endif 5818 return; 5819 } 5820 5821 #ifdef QCA_MONITOR_OPS_PER_SOC_SUPPORT 5822 static inline void 5823 dp_mon_cdp_mon_ops_deregister(struct cdp_ops *ops) 5824 { 5825 if (ops->mon_ops) { 5826 qdf_mem_free(ops->mon_ops); 5827 ops->mon_ops = NULL; 5828 } 5829 } 5830 #else 5831 static inline void 5832 dp_mon_cdp_mon_ops_deregister(struct cdp_ops *ops) 5833 { 5834 ops->mon_ops = NULL; 5835 } 5836 #endif 5837 5838 void dp_mon_cdp_ops_deregister(struct dp_soc *soc) 5839 { 5840 struct cdp_ops *ops = soc->cdp_soc.ops; 5841 5842 if (!ops) { 5843 dp_mon_err("cdp_ops is NULL"); 5844 return; 5845 } 5846 5847 dp_mon_cdp_mon_ops_deregister(ops); 5848 5849 #if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE) 5850 ops->cfr_ops->txrx_cfr_filter = NULL; 5851 #endif 5852 ops->cmn_drv_ops->txrx_set_monitor_mode = NULL; 5853 ops->cmn_drv_ops->txrx_get_mon_vdev_from_pdev = NULL; 5854 #ifdef DP_PEER_EXTENDED_API 5855 ops->misc_ops->pkt_log_init = NULL; 5856 ops->misc_ops->pkt_log_con_service = NULL; 5857 ops->misc_ops->pkt_log_exit = NULL; 5858 #endif 5859 #ifdef ATH_SUPPORT_NAC_RSSI 5860 ops->ctrl_ops->txrx_vdev_config_for_nac_rssi = NULL; 5861 ops->ctrl_ops->txrx_vdev_get_neighbour_rssi = NULL; 5862 #endif 5863 #if defined(ATH_SUPPORT_NAC_RSSI) || defined(ATH_SUPPORT_NAC) 5864 ops->ctrl_ops->txrx_update_filter_neighbour_peers = NULL; 5865 #endif /* ATH_SUPPORT_NAC_RSSI || ATH_SUPPORT_NAC */ 5866 ops->ctrl_ops->enable_peer_based_pktlog = NULL; 5867 #if defined(WLAN_TX_PKT_CAPTURE_ENH) || defined(WLAN_RX_PKT_CAPTURE_ENH) 5868 ops->ctrl_ops->txrx_update_peer_pkt_capture_params = NULL; 5869 #endif /* WLAN_TX_PKT_CAPTURE_ENH || WLAN_RX_PKT_CAPTURE_ENH */ 5870 #ifdef FEATURE_PERPKT_INFO 5871 ops->host_stats_ops->txrx_enable_enhanced_stats = NULL; 5872 ops->host_stats_ops->txrx_disable_enhanced_stats = NULL; 5873 #endif /* FEATURE_PERPKT_INFO */ 5874 #ifdef WDI_EVENT_ENABLE 5875 ops->ctrl_ops->txrx_get_pldev = NULL; 5876 #endif 5877 return; 5878 } 5879 5880 #if defined(WDI_EVENT_ENABLE) &&\ 5881 (defined(QCA_ENHANCED_STATS_SUPPORT) || !defined(REMOVE_PKT_LOG)) 5882 static inline 5883 void dp_mon_ppdu_stats_handler_deregister(struct dp_mon_soc *mon_soc) 5884 { 5885 mon_soc->mon_ops->mon_ppdu_stats_ind_handler = NULL; 5886 } 5887 #else 5888 static inline 5889 void dp_mon_ppdu_stats_handler_deregister(struct dp_mon_soc *mon_soc) 5890 { 5891 } 5892 #endif 5893 5894 #ifdef QCA_RSSI_DB2DBM 5895 /* 5896 * dp_mon_compute_min_nf() - calculate the min nf value in the 5897 * active chains 20MHZ subbands. 5898 * computation: Need to calculate nfInDbm[][] to A_MIN(nfHwDbm[][]) 5899 * considering row index as active chains and column 5900 * index as 20MHZ subbands per chain. 5901 * example: chain_mask = 0x07 (consider 3 active chains 0,1,2 index) 5902 * BandWidth = 40MHZ (40MHZ includes two 20MHZ subbands so need to 5903 * consider 0,1 index calculate min_nf value) 5904 * 5905 *@conv_params: cdp_rssi_dbm_conv_param_dp structure value 5906 *@chain_idx: active chain index in nfHwdbm array 5907 * 5908 * Return: QDF_STATUS_SUCCESS if value set successfully 5909 * QDF_STATUS_E_INVAL false if error 5910 */ 5911 static QDF_STATUS 5912 dp_mon_compute_min_nf(struct cdp_rssi_dbm_conv_param_dp *conv_params, 5913 int8_t *min_nf, int chain_idx) 5914 { 5915 int j; 5916 *min_nf = conv_params->nf_hw_dbm[chain_idx][0]; 5917 5918 switch (conv_params->curr_bw) { 5919 case CHAN_WIDTH_20: 5920 case CHAN_WIDTH_5: 5921 case CHAN_WIDTH_10: 5922 break; 5923 case CHAN_WIDTH_40: 5924 for (j = 1; j < SUB40BW; j++) { 5925 if (conv_params->nf_hw_dbm[chain_idx][j] < *min_nf) 5926 *min_nf = conv_params->nf_hw_dbm[chain_idx][j]; 5927 } 5928 break; 5929 case CHAN_WIDTH_80: 5930 for (j = 1; j < SUB80BW; j++) { 5931 if (conv_params->nf_hw_dbm[chain_idx][j] < *min_nf) 5932 *min_nf = conv_params->nf_hw_dbm[chain_idx][j]; 5933 } 5934 break; 5935 case CHAN_WIDTH_160: 5936 case CHAN_WIDTH_80P80: 5937 case CHAN_WIDTH_165: 5938 for (j = 1; j < SUB160BW; j++) { 5939 if (conv_params->nf_hw_dbm[chain_idx][j] < *min_nf) 5940 *min_nf = conv_params->nf_hw_dbm[chain_idx][j]; 5941 } 5942 break; 5943 case CHAN_WIDTH_160P160: 5944 case CHAN_WIDTH_320: 5945 for (j = 1; j < SUB320BW; j++) { 5946 if (conv_params->nf_hw_dbm[chain_idx][j] < *min_nf) 5947 *min_nf = conv_params->nf_hw_dbm[chain_idx][j]; 5948 } 5949 break; 5950 default: 5951 dp_cdp_err("Invalid bandwidth %u", conv_params->curr_bw); 5952 return QDF_STATUS_E_INVAL; 5953 } 5954 return QDF_STATUS_SUCCESS; 5955 } 5956 5957 /* 5958 * dp_mon_pdev_params_rssi_dbm_conv() --> to set rssi in dbm conversion 5959 * params into monitor pdev. 5960 *@cdp_soc: dp soc handle. 5961 *@params: cdp_rssi_db2dbm_param_dp structure value. 5962 * 5963 * Return: QDF_STATUS_SUCCESS if value set successfully 5964 * QDF_STATUS_E_INVAL false if error 5965 */ 5966 QDF_STATUS 5967 dp_mon_pdev_params_rssi_dbm_conv(struct cdp_soc_t *cdp_soc, 5968 struct cdp_rssi_db2dbm_param_dp *params) 5969 { 5970 struct cdp_rssi_db2dbm_param_dp *dp_rssi_params = params; 5971 uint8_t pdev_id = params->pdev_id; 5972 struct dp_soc *soc = (struct dp_soc *)cdp_soc; 5973 struct dp_pdev *pdev = 5974 dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); 5975 struct dp_mon_pdev *mon_pdev; 5976 struct cdp_rssi_temp_off_param_dp temp_off_param; 5977 struct cdp_rssi_dbm_conv_param_dp conv_params; 5978 int8_t min_nf = 0; 5979 int i; 5980 5981 if (!soc->features.rssi_dbm_conv_support) { 5982 dp_cdp_err("rssi dbm conversion support is false"); 5983 return QDF_STATUS_E_INVAL; 5984 } 5985 if (!pdev || !pdev->monitor_pdev) { 5986 dp_cdp_err("Invalid pdev_id %u", pdev_id); 5987 return QDF_STATUS_E_FAILURE; 5988 } 5989 5990 mon_pdev = pdev->monitor_pdev; 5991 mon_pdev->rssi_dbm_conv_support = 5992 soc->features.rssi_dbm_conv_support; 5993 5994 if (dp_rssi_params->rssi_temp_off_present) { 5995 temp_off_param = dp_rssi_params->temp_off_param; 5996 mon_pdev->rssi_offsets.rssi_temp_offset = 5997 temp_off_param.rssi_temp_offset; 5998 } 5999 if (dp_rssi_params->rssi_dbm_info_present) { 6000 conv_params = dp_rssi_params->rssi_dbm_param; 6001 for (i = 0; i < CDP_MAX_NUM_ANTENNA; i++) { 6002 if (conv_params.curr_rx_chainmask & (0x01 << i)) { 6003 if (QDF_STATUS_E_INVAL == dp_mon_compute_min_nf 6004 (&conv_params, &min_nf, i)) 6005 return QDF_STATUS_E_INVAL; 6006 } else { 6007 continue; 6008 } 6009 } 6010 mon_pdev->rssi_offsets.xlna_bypass_offset = 6011 conv_params.xlna_bypass_offset; 6012 mon_pdev->rssi_offsets.xlna_bypass_threshold = 6013 conv_params.xlna_bypass_threshold; 6014 mon_pdev->rssi_offsets.xbar_config = conv_params.xbar_config; 6015 mon_pdev->rssi_offsets.min_nf_dbm = min_nf; 6016 mon_pdev->rssi_offsets.rssi_offset = 6017 mon_pdev->rssi_offsets.min_nf_dbm + 6018 mon_pdev->rssi_offsets.rssi_temp_offset; 6019 } 6020 return QDF_STATUS_SUCCESS; 6021 } 6022 #endif 6023 6024 void dp_mon_intr_ops_deregister(struct dp_soc *soc) 6025 { 6026 struct dp_mon_soc *mon_soc = soc->monitor_soc; 6027 6028 mon_soc->mon_rx_process = NULL; 6029 dp_mon_ppdu_stats_handler_deregister(mon_soc); 6030 } 6031 6032 void dp_mon_feature_ops_deregister(struct dp_soc *soc) 6033 { 6034 struct dp_mon_ops *mon_ops = dp_mon_ops_get(soc); 6035 6036 if (!mon_ops) { 6037 dp_err("mon_ops is NULL"); 6038 return; 6039 } 6040 6041 mon_ops->mon_config_debug_sniffer = NULL; 6042 mon_ops->mon_peer_tx_init = NULL; 6043 mon_ops->mon_peer_tx_cleanup = NULL; 6044 mon_ops->mon_htt_ppdu_stats_attach = NULL; 6045 mon_ops->mon_htt_ppdu_stats_detach = NULL; 6046 mon_ops->mon_print_pdev_rx_mon_stats = NULL; 6047 mon_ops->mon_set_bsscolor = NULL; 6048 mon_ops->mon_pdev_get_filter_ucast_data = NULL; 6049 mon_ops->mon_pdev_get_filter_mcast_data = NULL; 6050 mon_ops->mon_pdev_get_filter_non_data = NULL; 6051 mon_ops->mon_neighbour_peer_add_ast = NULL; 6052 #ifdef WLAN_TX_PKT_CAPTURE_ENH 6053 mon_ops->mon_peer_tid_peer_id_update = NULL; 6054 mon_ops->mon_tx_ppdu_stats_attach = NULL; 6055 mon_ops->mon_tx_ppdu_stats_detach = NULL; 6056 mon_ops->mon_tx_capture_debugfs_init = NULL; 6057 mon_ops->mon_tx_add_to_comp_queue = NULL; 6058 mon_ops->mon_peer_tx_capture_filter_check = NULL; 6059 mon_ops->mon_print_pdev_tx_capture_stats = NULL; 6060 mon_ops->mon_config_enh_tx_capture = NULL; 6061 #endif 6062 #ifdef WLAN_RX_PKT_CAPTURE_ENH 6063 mon_ops->mon_config_enh_rx_capture = NULL; 6064 #endif 6065 #ifdef QCA_SUPPORT_BPR 6066 mon_ops->mon_set_bpr_enable = NULL; 6067 #endif 6068 #ifdef ATH_SUPPORT_NAC 6069 mon_ops->mon_set_filter_neigh_peers = NULL; 6070 #endif 6071 #ifdef WLAN_ATF_ENABLE 6072 mon_ops->mon_set_atf_stats_enable = NULL; 6073 #endif 6074 #ifdef FEATURE_NAC_RSSI 6075 mon_ops->mon_filter_neighbour_peer = NULL; 6076 #endif 6077 #ifdef QCA_MCOPY_SUPPORT 6078 mon_ops->mon_filter_setup_mcopy_mode = NULL; 6079 mon_ops->mon_filter_reset_mcopy_mode = NULL; 6080 mon_ops->mon_mcopy_check_deliver = NULL; 6081 #endif 6082 #ifdef QCA_ENHANCED_STATS_SUPPORT 6083 mon_ops->mon_filter_setup_enhanced_stats = NULL; 6084 mon_ops->mon_tx_enable_enhanced_stats = NULL; 6085 mon_ops->mon_tx_disable_enhanced_stats = NULL; 6086 mon_ops->mon_ppdu_desc_deliver = NULL; 6087 mon_ops->mon_ppdu_desc_notify = NULL; 6088 mon_ops->mon_ppdu_stats_feat_enable_check = NULL; 6089 #ifdef WLAN_FEATURE_11BE 6090 mon_ops->mon_tx_stats_update = NULL; 6091 #endif 6092 #endif 6093 #if defined(ATH_SUPPORT_NAC_RSSI) || defined(ATH_SUPPORT_NAC) 6094 mon_ops->mon_filter_setup_smart_monitor = NULL; 6095 #endif 6096 #ifdef WLAN_RX_PKT_CAPTURE_ENH 6097 mon_ops->mon_filter_setup_rx_enh_capture = NULL; 6098 #endif 6099 #ifdef WDI_EVENT_ENABLE 6100 mon_ops->mon_set_pktlog_wifi3 = NULL; 6101 mon_ops->mon_filter_setup_rx_pkt_log_full = NULL; 6102 mon_ops->mon_filter_reset_rx_pkt_log_full = NULL; 6103 mon_ops->mon_filter_setup_rx_pkt_log_lite = NULL; 6104 mon_ops->mon_filter_reset_rx_pkt_log_lite = NULL; 6105 mon_ops->mon_filter_setup_rx_pkt_log_cbf = NULL; 6106 mon_ops->mon_filter_reset_rx_pkt_log_cbf = NULL; 6107 #ifdef BE_PKTLOG_SUPPORT 6108 mon_ops->mon_filter_setup_pktlog_hybrid = NULL; 6109 mon_ops->mon_filter_reset_pktlog_hybrid = NULL; 6110 #endif 6111 #endif 6112 #if defined(DP_CON_MON) && !defined(REMOVE_PKT_LOG) 6113 mon_ops->mon_pktlogmod_exit = NULL; 6114 #endif 6115 mon_ops->rx_hdr_length_set = NULL; 6116 mon_ops->rx_packet_length_set = NULL; 6117 mon_ops->rx_wmask_subscribe = NULL; 6118 mon_ops->rx_enable_mpdu_logging = NULL; 6119 mon_ops->rx_enable_fpmo = NULL; 6120 mon_ops->mon_neighbour_peers_detach = NULL; 6121 mon_ops->mon_vdev_set_monitor_mode_buf_rings = NULL; 6122 mon_ops->mon_vdev_set_monitor_mode_rings = NULL; 6123 #ifdef QCA_ENHANCED_STATS_SUPPORT 6124 mon_ops->mon_rx_stats_update = NULL; 6125 mon_ops->mon_rx_populate_ppdu_usr_info = NULL; 6126 mon_ops->mon_rx_populate_ppdu_info = NULL; 6127 #endif 6128 } 6129 6130 QDF_STATUS dp_mon_soc_attach(struct dp_soc *soc) 6131 { 6132 struct dp_mon_soc *mon_soc; 6133 6134 if (!soc) { 6135 dp_mon_err("dp_soc is NULL"); 6136 return QDF_STATUS_E_FAILURE; 6137 } 6138 6139 mon_soc = (struct dp_mon_soc *)qdf_mem_malloc(sizeof(*mon_soc)); 6140 if (!mon_soc) { 6141 dp_mon_err("%pK: mem allocation failed", soc); 6142 return QDF_STATUS_E_NOMEM; 6143 } 6144 /* register monitor ops */ 6145 soc->monitor_soc = mon_soc; 6146 dp_mon_ops_register(soc); 6147 dp_mon_register_intr_ops(soc); 6148 6149 dp_mon_cdp_ops_register(soc); 6150 dp_mon_register_feature_ops(soc); 6151 return QDF_STATUS_SUCCESS; 6152 } 6153 6154 QDF_STATUS dp_mon_soc_detach(struct dp_soc *soc) 6155 { 6156 struct dp_mon_soc *mon_soc; 6157 6158 if (!soc) { 6159 dp_mon_err("dp_soc is NULL"); 6160 return QDF_STATUS_E_FAILURE; 6161 } 6162 6163 mon_soc = soc->monitor_soc; 6164 dp_monitor_vdev_timer_deinit(soc); 6165 dp_mon_cdp_ops_deregister(soc); 6166 soc->monitor_soc = NULL; 6167 qdf_mem_free(mon_soc); 6168 return QDF_STATUS_SUCCESS; 6169 } 6170