1 /* 2 * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved. 3 * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. 4 * 5 * Permission to use, copy, modify, and/or distribute this software for any 6 * purpose with or without fee is hereby granted, provided that the above 7 * copyright notice and this permission notice appear in all copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 16 */ 17 #include <dp_types.h> 18 #include "dp_rx.h" 19 #include "dp_peer.h" 20 #include <dp_htt.h> 21 #include <dp_mon_filter.h> 22 #include <dp_htt.h> 23 #include <dp_mon.h> 24 #include <dp_rx_mon.h> 25 #include <dp_internal.h> 26 #include "htt_ppdu_stats.h" 27 #include "dp_cal_client_api.h" 28 #if defined(DP_CON_MON) 29 #ifndef REMOVE_PKT_LOG 30 #include <pktlog_ac_api.h> 31 #include <pktlog_ac.h> 32 #endif 33 #endif 34 #ifdef FEATURE_PERPKT_INFO 35 #include "dp_ratetable.h" 36 #endif 37 38 #define DP_INTR_POLL_TIMER_MS 5 39 #define INVALID_FREE_BUFF 0xffffffff 40 41 #ifdef WLAN_RX_PKT_CAPTURE_ENH 42 #include "dp_rx_mon_feature.h" 43 #endif /* WLAN_RX_PKT_CAPTURE_ENH */ 44 45 #ifdef QCA_UNDECODED_METADATA_SUPPORT 46 #define MAX_STRING_LEN_PER_FIELD 6 47 #define DP_UNDECODED_ERR_LENGTH (MAX_STRING_LEN_PER_FIELD * CDP_PHYRX_ERR_MAX) 48 #endif 49 50 #ifdef QCA_MCOPY_SUPPORT 51 static inline void 52 dp_pdev_disable_mcopy_code(struct dp_pdev *pdev) 53 { 54 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 55 56 mon_pdev->mcopy_mode = M_COPY_DISABLED; 57 mon_pdev->mvdev = NULL; 58 } 59 60 static inline void 61 dp_reset_mcopy_mode(struct dp_pdev *pdev) 62 { 63 QDF_STATUS status = QDF_STATUS_SUCCESS; 64 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 65 struct cdp_mon_ops *cdp_ops; 66 67 if (mon_pdev->mcopy_mode) { 68 cdp_ops = dp_mon_cdp_ops_get(pdev->soc); 69 if (cdp_ops && cdp_ops->config_full_mon_mode) 70 cdp_ops->soc_config_full_mon_mode((struct cdp_pdev *)pdev, 71 DP_FULL_MON_ENABLE); 72 dp_pdev_disable_mcopy_code(pdev); 73 dp_mon_filter_reset_mcopy_mode(pdev); 74 status = dp_mon_filter_update(pdev); 75 if (status != QDF_STATUS_SUCCESS) { 76 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 77 FL("Failed to reset AM copy mode filters")); 78 } 79 mon_pdev->monitor_configured = false; 80 } 81 } 82 83 static QDF_STATUS 84 dp_config_mcopy_mode(struct dp_pdev *pdev, int val) 85 { 86 QDF_STATUS status = QDF_STATUS_SUCCESS; 87 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 88 struct dp_mon_ops *mon_ops; 89 struct cdp_mon_ops *cdp_ops; 90 91 if (mon_pdev->mvdev) 92 return QDF_STATUS_E_RESOURCES; 93 94 mon_pdev->mcopy_mode = val; 95 mon_pdev->tx_sniffer_enable = 0; 96 mon_pdev->monitor_configured = true; 97 98 mon_ops = dp_mon_ops_get(pdev->soc); 99 if (!wlan_cfg_is_delay_mon_replenish(pdev->soc->wlan_cfg_ctx)) { 100 if (mon_ops && mon_ops->mon_vdev_set_monitor_mode_rings) 101 mon_ops->mon_vdev_set_monitor_mode_rings(pdev, true); 102 } 103 104 /* 105 * Setup the M copy mode filter. 106 */ 107 cdp_ops = dp_mon_cdp_ops_get(pdev->soc); 108 if (cdp_ops && cdp_ops->config_full_mon_mode) 109 cdp_ops->soc_config_full_mon_mode((struct cdp_pdev *)pdev, 110 DP_FULL_MON_ENABLE); 111 dp_mon_filter_setup_mcopy_mode(pdev); 112 status = dp_mon_filter_update(pdev); 113 if (status != QDF_STATUS_SUCCESS) { 114 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 115 FL("Failed to set M_copy mode filters")); 116 dp_mon_filter_reset_mcopy_mode(pdev); 117 dp_pdev_disable_mcopy_code(pdev); 118 return status; 119 } 120 121 if (!mon_pdev->pktlog_ppdu_stats) 122 dp_h2t_cfg_stats_msg_send(pdev, 123 DP_PPDU_STATS_CFG_SNIFFER, 124 pdev->pdev_id); 125 126 return status; 127 } 128 #else 129 static inline void 130 dp_reset_mcopy_mode(struct dp_pdev *pdev) 131 { 132 } 133 134 static inline QDF_STATUS 135 dp_config_mcopy_mode(struct dp_pdev *pdev, int val) 136 { 137 return QDF_STATUS_E_INVAL; 138 } 139 #endif /* QCA_MCOPY_SUPPORT */ 140 141 #ifdef QCA_UNDECODED_METADATA_SUPPORT 142 static QDF_STATUS 143 dp_reset_undecoded_metadata_capture(struct dp_pdev *pdev) 144 { 145 QDF_STATUS status = QDF_STATUS_SUCCESS; 146 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 147 148 if (mon_pdev->undecoded_metadata_capture) { 149 dp_mon_filter_reset_undecoded_metadata_mode(pdev); 150 status = dp_mon_filter_update(pdev); 151 if (status != QDF_STATUS_SUCCESS) { 152 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 153 FL("Undecoded capture filter reset failed")); 154 } 155 } 156 mon_pdev->undecoded_metadata_capture = 0; 157 return status; 158 } 159 160 static QDF_STATUS 161 dp_enable_undecoded_metadata_capture(struct dp_pdev *pdev, int val) 162 { 163 QDF_STATUS status = QDF_STATUS_SUCCESS; 164 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 165 struct dp_mon_ops *mon_ops; 166 167 if (!mon_pdev->mvdev) { 168 qdf_err("monitor_pdev is NULL"); 169 return QDF_STATUS_E_RESOURCES; 170 } 171 172 mon_pdev->undecoded_metadata_capture = val; 173 mon_pdev->monitor_configured = true; 174 175 mon_ops = dp_mon_ops_get(pdev->soc); 176 177 /* Setup the undecoded metadata capture mode filter. */ 178 dp_mon_filter_setup_undecoded_metadata_mode(pdev); 179 status = dp_mon_filter_update(pdev); 180 if (status != QDF_STATUS_SUCCESS) { 181 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 182 FL("Failed to set Undecoded capture filters")); 183 dp_mon_filter_reset_undecoded_metadata_mode(pdev); 184 return status; 185 } 186 187 return status; 188 } 189 #else 190 static inline QDF_STATUS 191 dp_reset_undecoded_metadata_capture(struct dp_pdev *pdev) 192 { 193 return QDF_STATUS_E_INVAL; 194 } 195 196 static inline QDF_STATUS 197 dp_enable_undecoded_metadata_capture(struct dp_pdev *pdev, int val) 198 { 199 return QDF_STATUS_E_INVAL; 200 } 201 #endif /* QCA_UNDECODED_METADATA_SUPPORT */ 202 203 QDF_STATUS dp_reset_monitor_mode(struct cdp_soc_t *soc_hdl, 204 uint8_t pdev_id, 205 uint8_t special_monitor) 206 { 207 struct dp_soc *soc = (struct dp_soc *)soc_hdl; 208 struct dp_pdev *pdev = 209 dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc, 210 pdev_id); 211 QDF_STATUS status = QDF_STATUS_SUCCESS; 212 struct dp_mon_pdev *mon_pdev; 213 struct cdp_mon_ops *cdp_ops; 214 215 if (!pdev) 216 return QDF_STATUS_E_FAILURE; 217 218 mon_pdev = pdev->monitor_pdev; 219 220 qdf_spin_lock_bh(&mon_pdev->mon_lock); 221 222 cdp_ops = dp_mon_cdp_ops_get(soc); 223 if (cdp_ops && cdp_ops->soc_config_full_mon_mode) 224 cdp_ops->soc_config_full_mon_mode((struct cdp_pdev *)pdev, 225 DP_FULL_MON_DISABLE); 226 mon_pdev->mvdev = NULL; 227 228 /* 229 * Lite monitor mode, smart monitor mode and monitor 230 * mode uses this APIs to filter reset and mode disable 231 */ 232 if (mon_pdev->mcopy_mode) { 233 #if defined(QCA_MCOPY_SUPPORT) 234 dp_pdev_disable_mcopy_code(pdev); 235 dp_mon_filter_reset_mcopy_mode(pdev); 236 #endif /* QCA_MCOPY_SUPPORT */ 237 } else if (special_monitor) { 238 #if defined(ATH_SUPPORT_NAC) 239 dp_mon_filter_reset_smart_monitor(pdev); 240 #endif /* ATH_SUPPORT_NAC */ 241 } else if (mon_pdev->undecoded_metadata_capture) { 242 #ifdef QCA_UNDECODED_METADATA_SUPPORT 243 dp_reset_undecoded_metadata_capture(pdev); 244 #endif 245 } else { 246 dp_mon_filter_reset_mon_mode(pdev); 247 } 248 status = dp_mon_filter_update(pdev); 249 if (status != QDF_STATUS_SUCCESS) { 250 dp_rx_mon_dest_err("%pK: Failed to reset monitor filters", 251 soc); 252 } 253 254 mon_pdev->monitor_configured = false; 255 256 qdf_spin_unlock_bh(&mon_pdev->mon_lock); 257 return QDF_STATUS_SUCCESS; 258 } 259 260 #ifdef QCA_ADVANCE_MON_FILTER_SUPPORT 261 QDF_STATUS 262 dp_pdev_set_advance_monitor_filter(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, 263 struct cdp_monitor_filter *filter_val) 264 { 265 /* Many monitor VAPs can exists in a system but only one can be up at 266 * anytime 267 */ 268 struct dp_soc *soc = (struct dp_soc *)soc_hdl; 269 struct dp_vdev *vdev; 270 struct dp_pdev *pdev = 271 dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc, 272 pdev_id); 273 QDF_STATUS status = QDF_STATUS_SUCCESS; 274 struct dp_mon_pdev *mon_pdev; 275 276 if (!pdev || !pdev->monitor_pdev) 277 return QDF_STATUS_E_FAILURE; 278 279 mon_pdev = pdev->monitor_pdev; 280 vdev = mon_pdev->mvdev; 281 282 if (!vdev) 283 return QDF_STATUS_E_FAILURE; 284 285 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN, 286 "pdev=%pK, pdev_id=%d, soc=%pK vdev=%pK", 287 pdev, pdev_id, soc, vdev); 288 289 /*Check if current pdev's monitor_vdev exists */ 290 if (!mon_pdev->mvdev) { 291 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 292 "vdev=%pK", vdev); 293 qdf_assert(vdev); 294 } 295 296 /* update filter mode, type in pdev structure */ 297 mon_pdev->mon_filter_mode = filter_val->mode; 298 mon_pdev->fp_mgmt_filter = filter_val->fp_mgmt; 299 mon_pdev->fp_ctrl_filter = filter_val->fp_ctrl; 300 mon_pdev->fp_data_filter = filter_val->fp_data; 301 mon_pdev->mo_mgmt_filter = filter_val->mo_mgmt; 302 mon_pdev->mo_ctrl_filter = filter_val->mo_ctrl; 303 mon_pdev->mo_data_filter = filter_val->mo_data; 304 305 dp_mon_filter_setup_mon_mode(pdev); 306 status = dp_mon_filter_update(pdev); 307 if (status != QDF_STATUS_SUCCESS) { 308 dp_rx_mon_dest_err("%pK: Failed to set filter for adv mon mode", 309 soc); 310 dp_mon_filter_reset_mon_mode(pdev); 311 } 312 313 return status; 314 } 315 #endif 316 317 QDF_STATUS 318 dp_deliver_tx_mgmt(struct cdp_soc_t *cdp_soc, uint8_t pdev_id, qdf_nbuf_t nbuf) 319 { 320 struct dp_pdev *pdev = 321 dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)cdp_soc, 322 pdev_id); 323 324 if (!pdev) 325 return QDF_STATUS_E_FAILURE; 326 327 dp_deliver_mgmt_frm(pdev, nbuf); 328 329 return QDF_STATUS_SUCCESS; 330 } 331 332 #ifdef QCA_SUPPORT_SCAN_SPCL_VAP_STATS 333 /** 334 * dp_scan_spcl_vap_stats_attach() - alloc spcl vap stats struct 335 * @mon_vdev: Datapath mon VDEV handle 336 * 337 * Return: 0 on success, not 0 on failure 338 */ 339 static inline QDF_STATUS 340 dp_scan_spcl_vap_stats_attach(struct dp_mon_vdev *mon_vdev) 341 { 342 mon_vdev->scan_spcl_vap_stats = 343 qdf_mem_malloc(sizeof(struct cdp_scan_spcl_vap_stats)); 344 345 if (!mon_vdev->scan_spcl_vap_stats) { 346 dp_mon_err("scan spcl vap stats attach fail"); 347 return QDF_STATUS_E_NOMEM; 348 } 349 350 return QDF_STATUS_SUCCESS; 351 } 352 353 /** 354 * dp_scan_spcl_vap_stats_detach() - free spcl vap stats struct 355 * @mon_vdev: Datapath mon VDEV handle 356 * 357 * Return: void 358 */ 359 static inline void 360 dp_scan_spcl_vap_stats_detach(struct dp_mon_vdev *mon_vdev) 361 { 362 if (mon_vdev->scan_spcl_vap_stats) { 363 qdf_mem_free(mon_vdev->scan_spcl_vap_stats); 364 mon_vdev->scan_spcl_vap_stats = NULL; 365 } 366 } 367 368 /** 369 * dp_reset_scan_spcl_vap_stats() - reset spcl vap rx stats 370 * @vdev: Datapath VDEV handle 371 * 372 * Return: void 373 */ 374 static inline void 375 dp_reset_scan_spcl_vap_stats(struct dp_vdev *vdev) 376 { 377 struct dp_mon_vdev *mon_vdev; 378 struct dp_mon_pdev *mon_pdev; 379 380 mon_pdev = vdev->pdev->monitor_pdev; 381 if (!mon_pdev || !mon_pdev->reset_scan_spcl_vap_stats_enable) 382 return; 383 384 mon_vdev = vdev->monitor_vdev; 385 if (!mon_vdev || !mon_vdev->scan_spcl_vap_stats) 386 return; 387 388 qdf_mem_zero(mon_vdev->scan_spcl_vap_stats, 389 sizeof(struct cdp_scan_spcl_vap_stats)); 390 } 391 392 /** 393 * dp_get_scan_spcl_vap_stats() - get spcl vap rx stats 394 * @soc_hdl: Datapath soc handle 395 * @vdev_id: vdev id 396 * @stats: structure to hold spcl vap stats 397 * 398 * Return: 0 on success, not 0 on failure 399 */ 400 static QDF_STATUS 401 dp_get_scan_spcl_vap_stats(struct cdp_soc_t *soc_hdl, uint8_t vdev_id, 402 struct cdp_scan_spcl_vap_stats *stats) 403 { 404 struct dp_mon_vdev *mon_vdev = NULL; 405 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 406 struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id, 407 DP_MOD_ID_CDP); 408 409 if (!vdev || !stats) { 410 if (vdev) 411 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP); 412 return QDF_STATUS_E_INVAL; 413 } 414 415 mon_vdev = vdev->monitor_vdev; 416 if (!mon_vdev || !mon_vdev->scan_spcl_vap_stats) { 417 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP); 418 return QDF_STATUS_E_INVAL; 419 } 420 421 qdf_mem_copy(stats, mon_vdev->scan_spcl_vap_stats, 422 sizeof(struct cdp_scan_spcl_vap_stats)); 423 424 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP); 425 return QDF_STATUS_SUCCESS; 426 } 427 #else 428 static inline void 429 dp_reset_scan_spcl_vap_stats(struct dp_vdev *vdev) 430 { 431 } 432 433 static inline QDF_STATUS 434 dp_scan_spcl_vap_stats_attach(struct dp_mon_vdev *mon_vdev) 435 { 436 return QDF_STATUS_SUCCESS; 437 } 438 439 static inline void 440 dp_scan_spcl_vap_stats_detach(struct dp_mon_vdev *mon_vdev) 441 { 442 } 443 #endif 444 445 /** 446 * dp_vdev_set_monitor_mode() - Set DP VDEV to monitor mode 447 * @vdev_handle: Datapath VDEV handle 448 * @smart_monitor: Flag to denote if its smart monitor mode 449 * 450 * Return: 0 on success, not 0 on failure 451 */ 452 static QDF_STATUS dp_vdev_set_monitor_mode(struct cdp_soc_t *dp_soc, 453 uint8_t vdev_id, 454 uint8_t special_monitor) 455 { 456 struct dp_soc *soc = (struct dp_soc *)dp_soc; 457 struct dp_pdev *pdev; 458 struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id, 459 DP_MOD_ID_CDP); 460 QDF_STATUS status = QDF_STATUS_SUCCESS; 461 struct dp_mon_pdev *mon_pdev; 462 struct cdp_mon_ops *cdp_ops; 463 464 if (!vdev) 465 return QDF_STATUS_E_FAILURE; 466 467 pdev = vdev->pdev; 468 469 if (!pdev || !pdev->monitor_pdev) 470 return QDF_STATUS_E_FAILURE; 471 472 mon_pdev = pdev->monitor_pdev; 473 474 mon_pdev->mvdev = vdev; 475 476 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN, 477 "pdev=%pK, pdev_id=%d, soc=%pK vdev=%pK\n", 478 pdev, pdev->pdev_id, pdev->soc, vdev); 479 480 /* 481 * do not configure monitor buf ring and filter for smart and 482 * lite monitor 483 * for smart monitor filters are added along with first NAC 484 * for lite monitor required configuration done through 485 * dp_set_pdev_param 486 */ 487 488 if (special_monitor) { 489 status = QDF_STATUS_SUCCESS; 490 goto fail; 491 } 492 493 if (mon_pdev->scan_spcl_vap_configured) 494 dp_reset_scan_spcl_vap_stats(vdev); 495 496 /*Check if current pdev's monitor_vdev exists */ 497 if (mon_pdev->monitor_configured) { 498 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG, 499 "monitor vap already created vdev=%pK\n", vdev); 500 status = QDF_STATUS_E_RESOURCES; 501 goto fail; 502 } 503 504 mon_pdev->monitor_configured = true; 505 506 cdp_ops = dp_mon_cdp_ops_get(soc); 507 if (cdp_ops && cdp_ops->soc_config_full_mon_mode) 508 cdp_ops->soc_config_full_mon_mode((struct cdp_pdev *)pdev, 509 DP_FULL_MON_ENABLE); 510 dp_mon_filter_setup_mon_mode(pdev); 511 status = dp_mon_filter_update(pdev); 512 if (status != QDF_STATUS_SUCCESS) { 513 dp_cdp_err("%pK: Failed to reset monitor filters", soc); 514 dp_mon_filter_reset_mon_mode(pdev); 515 mon_pdev->monitor_configured = false; 516 mon_pdev->mvdev = NULL; 517 } 518 519 fail: 520 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP); 521 return status; 522 } 523 524 #ifdef QCA_TX_CAPTURE_SUPPORT 525 static QDF_STATUS 526 dp_config_tx_capture_mode(struct dp_pdev *pdev) 527 { 528 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 529 530 mon_pdev->tx_sniffer_enable = 1; 531 mon_pdev->monitor_configured = false; 532 533 if (!mon_pdev->pktlog_ppdu_stats) 534 dp_h2t_cfg_stats_msg_send(pdev, 535 DP_PPDU_STATS_CFG_SNIFFER, 536 pdev->pdev_id); 537 538 return QDF_STATUS_SUCCESS; 539 } 540 #else 541 #ifdef QCA_MCOPY_SUPPORT 542 static QDF_STATUS 543 dp_config_tx_capture_mode(struct dp_pdev *pdev) 544 { 545 return QDF_STATUS_E_INVAL; 546 } 547 #endif 548 #endif 549 550 #if defined(QCA_MCOPY_SUPPORT) || defined(QCA_TX_CAPTURE_SUPPORT) 551 QDF_STATUS 552 dp_config_debug_sniffer(struct dp_pdev *pdev, int val) 553 { 554 QDF_STATUS status = QDF_STATUS_SUCCESS; 555 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 556 557 /* 558 * Note: The mirror copy mode cannot co-exist with any other 559 * monitor modes. Hence disabling the filter for this mode will 560 * reset the monitor destination ring filters. 561 */ 562 dp_reset_mcopy_mode(pdev); 563 switch (val) { 564 case 0: 565 mon_pdev->tx_sniffer_enable = 0; 566 mon_pdev->monitor_configured = false; 567 568 /* 569 * We don't need to reset the Rx monitor status ring or call 570 * the API dp_ppdu_ring_reset() if all debug sniffer mode is 571 * disabled. The Rx monitor status ring will be disabled when 572 * the last mode using the monitor status ring get disabled. 573 */ 574 if (!mon_pdev->pktlog_ppdu_stats && 575 !mon_pdev->enhanced_stats_en && 576 !mon_pdev->bpr_enable) { 577 dp_h2t_cfg_stats_msg_send(pdev, 0, pdev->pdev_id); 578 } else if (mon_pdev->enhanced_stats_en && 579 !mon_pdev->bpr_enable) { 580 dp_h2t_cfg_stats_msg_send(pdev, 581 DP_PPDU_STATS_CFG_ENH_STATS, 582 pdev->pdev_id); 583 } else if (!mon_pdev->enhanced_stats_en && 584 mon_pdev->bpr_enable) { 585 dp_h2t_cfg_stats_msg_send(pdev, 586 DP_PPDU_STATS_CFG_BPR_ENH, 587 pdev->pdev_id); 588 } else { 589 dp_h2t_cfg_stats_msg_send(pdev, 590 DP_PPDU_STATS_CFG_BPR, 591 pdev->pdev_id); 592 } 593 break; 594 595 case 1: 596 status = dp_config_tx_capture_mode(pdev); 597 break; 598 case 2: 599 case 4: 600 status = dp_config_mcopy_mode(pdev, val); 601 break; 602 603 default: 604 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 605 "Invalid value, mode not supported"); 606 status = QDF_STATUS_E_INVAL; 607 break; 608 } 609 return status; 610 } 611 #endif 612 613 #ifdef QCA_UNDECODED_METADATA_SUPPORT 614 QDF_STATUS 615 dp_mon_config_undecoded_metadata_capture(struct dp_pdev *pdev, int val) 616 { 617 QDF_STATUS status = QDF_STATUS_SUCCESS; 618 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 619 620 if (!mon_pdev->mvdev && !mon_pdev->scan_spcl_vap_configured) { 621 qdf_err("No monitor or Special vap, undecoded capture not supported"); 622 return QDF_STATUS_E_RESOURCES; 623 } 624 625 if (val) 626 status = dp_enable_undecoded_metadata_capture(pdev, val); 627 else 628 status = dp_reset_undecoded_metadata_capture(pdev); 629 630 return status; 631 } 632 #endif 633 634 /** 635 * dp_monitor_mode_ring_config() - Send the tlv config to fw for monitor buffer 636 * ring based on target 637 * @soc: soc handle 638 * @mac_for_pdev: WIN- pdev_id, MCL- mac id 639 * @pdev: physical device handle 640 * @ring_num: mac id 641 * @htt_tlv_filter: tlv filter 642 * 643 * Return: zero on success, non-zero on failure 644 */ 645 static inline QDF_STATUS 646 dp_monitor_mode_ring_config(struct dp_soc *soc, uint8_t mac_for_pdev, 647 struct dp_pdev *pdev, uint8_t ring_num, 648 struct htt_rx_ring_tlv_filter htt_tlv_filter) 649 { 650 QDF_STATUS status; 651 652 if (soc->wlan_cfg_ctx->rxdma1_enable) 653 status = htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev, 654 soc->rxdma_mon_buf_ring[ring_num] 655 .hal_srng, 656 RXDMA_MONITOR_BUF, 657 RX_MONITOR_BUFFER_SIZE, 658 &htt_tlv_filter); 659 else 660 status = htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev, 661 pdev->rx_mac_buf_ring[ring_num] 662 .hal_srng, 663 RXDMA_BUF, RX_DATA_BUFFER_SIZE, 664 &htt_tlv_filter); 665 666 return status; 667 } 668 669 /** 670 * dp_get_mon_vdev_from_pdev_wifi3() - Get vdev id of monitor mode 671 * @soc_hdl: datapath soc handle 672 * @pdev_id: physical device instance id 673 * 674 * Return: virtual interface id 675 */ 676 static uint8_t dp_get_mon_vdev_from_pdev_wifi3(struct cdp_soc_t *soc_hdl, 677 uint8_t pdev_id) 678 { 679 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 680 struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); 681 682 if (qdf_unlikely(!pdev || !pdev->monitor_pdev || 683 !pdev->monitor_pdev->mvdev)) 684 return -EINVAL; 685 686 return pdev->monitor_pdev->mvdev->vdev_id; 687 } 688 689 #if defined(QCA_TX_CAPTURE_SUPPORT) || defined(QCA_ENHANCED_STATS_SUPPORT) 690 #ifndef WLAN_TX_PKT_CAPTURE_ENH 691 void dp_deliver_mgmt_frm(struct dp_pdev *pdev, qdf_nbuf_t nbuf) 692 { 693 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 694 695 if (mon_pdev->tx_sniffer_enable || mon_pdev->mcopy_mode) { 696 dp_wdi_event_handler(WDI_EVENT_TX_MGMT_CTRL, pdev->soc, 697 nbuf, HTT_INVALID_PEER, 698 WDI_NO_VAL, pdev->pdev_id); 699 } else { 700 if (!mon_pdev->bpr_enable) 701 qdf_nbuf_free(nbuf); 702 } 703 } 704 #endif 705 #endif 706 707 QDF_STATUS dp_htt_ppdu_stats_attach(struct dp_pdev *pdev) 708 { 709 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 710 711 mon_pdev->ppdu_tlv_buf = qdf_mem_malloc(HTT_T2H_MAX_MSG_SIZE); 712 713 if (!mon_pdev->ppdu_tlv_buf) { 714 QDF_TRACE_ERROR(QDF_MODULE_ID_DP, "ppdu_tlv_buf alloc fail"); 715 return QDF_STATUS_E_NOMEM; 716 } 717 718 return QDF_STATUS_SUCCESS; 719 } 720 721 void dp_htt_ppdu_stats_detach(struct dp_pdev *pdev) 722 { 723 struct ppdu_info *ppdu_info, *ppdu_info_next; 724 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 725 726 727 TAILQ_FOREACH_SAFE(ppdu_info, &mon_pdev->ppdu_info_list, 728 ppdu_info_list_elem, ppdu_info_next) { 729 if (!ppdu_info) 730 break; 731 TAILQ_REMOVE(&mon_pdev->ppdu_info_list, 732 ppdu_info, ppdu_info_list_elem); 733 mon_pdev->list_depth--; 734 qdf_assert_always(ppdu_info->nbuf); 735 qdf_nbuf_free(ppdu_info->nbuf); 736 qdf_mem_free(ppdu_info); 737 } 738 739 TAILQ_FOREACH_SAFE(ppdu_info, &mon_pdev->sched_comp_ppdu_list, 740 ppdu_info_list_elem, ppdu_info_next) { 741 if (!ppdu_info) 742 break; 743 TAILQ_REMOVE(&mon_pdev->sched_comp_ppdu_list, 744 ppdu_info, ppdu_info_list_elem); 745 mon_pdev->sched_comp_list_depth--; 746 qdf_assert_always(ppdu_info->nbuf); 747 qdf_nbuf_free(ppdu_info->nbuf); 748 qdf_mem_free(ppdu_info); 749 } 750 751 if (mon_pdev->ppdu_tlv_buf) 752 qdf_mem_free(mon_pdev->ppdu_tlv_buf); 753 } 754 755 QDF_STATUS dp_pdev_get_rx_mon_stats(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, 756 struct cdp_pdev_mon_stats *stats) 757 { 758 struct dp_soc *soc = (struct dp_soc *)soc_hdl; 759 struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); 760 struct dp_mon_pdev *mon_pdev; 761 762 if (!pdev) 763 return QDF_STATUS_E_FAILURE; 764 765 mon_pdev = pdev->monitor_pdev; 766 if (!mon_pdev) 767 return QDF_STATUS_E_FAILURE; 768 769 qdf_mem_copy(stats, &mon_pdev->rx_mon_stats, 770 sizeof(struct cdp_pdev_mon_stats)); 771 772 return QDF_STATUS_SUCCESS; 773 } 774 775 #ifdef QCA_UNDECODED_METADATA_SUPPORT 776 /** 777 * dp_pdev_get_undecoded_capture_stats() - Get undecoded metadata captured 778 * monitor pdev stats 779 * @mon_pdev: Monitor PDEV handle 780 * @rx_mon_stats: Monitor pdev status/destination ring stats 781 * 782 * Return: None 783 */ 784 static inline void 785 dp_pdev_get_undecoded_capture_stats(struct dp_mon_pdev *mon_pdev, 786 struct cdp_pdev_mon_stats *rx_mon_stats) 787 { 788 char undecoded_error[DP_UNDECODED_ERR_LENGTH]; 789 uint8_t index = 0, i; 790 791 DP_PRINT_STATS("Rx Undecoded Frame count:%d", 792 rx_mon_stats->rx_undecoded_count); 793 index = 0; 794 for (i = 0; i < (CDP_PHYRX_ERR_MAX); i++) { 795 index += qdf_snprint(&undecoded_error[index], 796 DP_UNDECODED_ERR_LENGTH - index, 797 " %d", rx_mon_stats->rx_undecoded_error[i]); 798 } 799 DP_PRINT_STATS("Undecoded Error (0-63):%s", undecoded_error); 800 } 801 #else 802 static inline void 803 dp_pdev_get_undecoded_capture_stats(struct dp_mon_pdev *mon_pdev, 804 struct cdp_pdev_mon_stats *rx_mon_stats) 805 { 806 } 807 #endif 808 809 void 810 dp_print_pdev_rx_mon_stats(struct dp_pdev *pdev) 811 { 812 struct cdp_pdev_mon_stats *rx_mon_stats; 813 uint32_t *stat_ring_ppdu_ids; 814 uint32_t *dest_ring_ppdu_ids; 815 int i, idx; 816 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 817 818 rx_mon_stats = &mon_pdev->rx_mon_stats; 819 820 DP_PRINT_STATS("PDEV Rx Monitor Stats:\n"); 821 822 DP_PRINT_STATS("status_ppdu_compl_cnt = %d", 823 rx_mon_stats->status_ppdu_compl); 824 DP_PRINT_STATS("status_ppdu_start_cnt = %d", 825 rx_mon_stats->status_ppdu_start); 826 DP_PRINT_STATS("status_ppdu_end_cnt = %d", 827 rx_mon_stats->status_ppdu_end); 828 DP_PRINT_STATS("status_ppdu_start_mis_cnt = %d", 829 rx_mon_stats->status_ppdu_start_mis); 830 DP_PRINT_STATS("status_ppdu_end_mis_cnt = %d", 831 rx_mon_stats->status_ppdu_end_mis); 832 DP_PRINT_STATS("status_ppdu_done_cnt = %d", 833 rx_mon_stats->status_ppdu_done); 834 DP_PRINT_STATS("dest_ppdu_done_cnt = %d", 835 rx_mon_stats->dest_ppdu_done); 836 DP_PRINT_STATS("dest_mpdu_done_cnt = %d", 837 rx_mon_stats->dest_mpdu_done); 838 DP_PRINT_STATS("tlv_tag_status_err_cnt = %u", 839 rx_mon_stats->tlv_tag_status_err); 840 DP_PRINT_STATS("mon status DMA not done WAR count= %u", 841 rx_mon_stats->status_buf_done_war); 842 DP_PRINT_STATS("dest_mpdu_drop_cnt = %d", 843 rx_mon_stats->dest_mpdu_drop); 844 DP_PRINT_STATS("dup_mon_linkdesc_cnt = %d", 845 rx_mon_stats->dup_mon_linkdesc_cnt); 846 DP_PRINT_STATS("dup_mon_buf_cnt = %d", 847 rx_mon_stats->dup_mon_buf_cnt); 848 DP_PRINT_STATS("mon_rx_buf_reaped = %u", 849 rx_mon_stats->mon_rx_bufs_reaped_dest); 850 DP_PRINT_STATS("mon_rx_buf_replenished = %u", 851 rx_mon_stats->mon_rx_bufs_replenished_dest); 852 DP_PRINT_STATS("ppdu_id_mismatch = %u", 853 rx_mon_stats->ppdu_id_mismatch); 854 DP_PRINT_STATS("mpdu_ppdu_id_match_cnt = %d", 855 rx_mon_stats->ppdu_id_match); 856 DP_PRINT_STATS("ppdus dropped frm status ring = %d", 857 rx_mon_stats->status_ppdu_drop); 858 DP_PRINT_STATS("ppdus dropped frm dest ring = %d", 859 rx_mon_stats->dest_ppdu_drop); 860 stat_ring_ppdu_ids = 861 (uint32_t *)qdf_mem_malloc(sizeof(uint32_t) * MAX_PPDU_ID_HIST); 862 dest_ring_ppdu_ids = 863 (uint32_t *)qdf_mem_malloc(sizeof(uint32_t) * MAX_PPDU_ID_HIST); 864 865 if (!stat_ring_ppdu_ids || !dest_ring_ppdu_ids) 866 DP_PRINT_STATS("Unable to allocate ppdu id hist mem\n"); 867 868 qdf_spin_lock_bh(&mon_pdev->mon_lock); 869 idx = rx_mon_stats->ppdu_id_hist_idx; 870 qdf_mem_copy(stat_ring_ppdu_ids, 871 rx_mon_stats->stat_ring_ppdu_id_hist, 872 sizeof(uint32_t) * MAX_PPDU_ID_HIST); 873 qdf_mem_copy(dest_ring_ppdu_ids, 874 rx_mon_stats->dest_ring_ppdu_id_hist, 875 sizeof(uint32_t) * MAX_PPDU_ID_HIST); 876 qdf_spin_unlock_bh(&mon_pdev->mon_lock); 877 878 DP_PRINT_STATS("PPDU Id history:"); 879 DP_PRINT_STATS("stat_ring_ppdu_ids\t dest_ring_ppdu_ids"); 880 for (i = 0; i < MAX_PPDU_ID_HIST; i++) { 881 idx = (idx + 1) & (MAX_PPDU_ID_HIST - 1); 882 DP_PRINT_STATS("%*u\t%*u", 16, 883 rx_mon_stats->stat_ring_ppdu_id_hist[idx], 16, 884 rx_mon_stats->dest_ring_ppdu_id_hist[idx]); 885 } 886 qdf_mem_free(stat_ring_ppdu_ids); 887 qdf_mem_free(dest_ring_ppdu_ids); 888 DP_PRINT_STATS("mon_rx_dest_stuck = %d", 889 rx_mon_stats->mon_rx_dest_stuck); 890 891 dp_pdev_get_undecoded_capture_stats(mon_pdev, rx_mon_stats); 892 } 893 894 #ifdef QCA_SUPPORT_BPR 895 QDF_STATUS 896 dp_set_bpr_enable(struct dp_pdev *pdev, int val) 897 { 898 struct dp_mon_ops *mon_ops; 899 900 mon_ops = dp_mon_ops_get(pdev->soc); 901 if (mon_ops && mon_ops->mon_set_bpr_enable) 902 return mon_ops->mon_set_bpr_enable(pdev, val); 903 904 return QDF_STATUS_E_FAILURE; 905 } 906 #endif 907 908 #ifdef WDI_EVENT_ENABLE 909 #ifdef BE_PKTLOG_SUPPORT 910 static bool 911 dp_set_hybrid_pktlog_enable(struct dp_pdev *pdev, 912 struct dp_mon_pdev *mon_pdev, 913 struct dp_mon_soc *mon_soc) 914 { 915 if (mon_pdev->mvdev) { 916 /* Nothing needs to be done if monitor mode is 917 * enabled 918 */ 919 mon_pdev->pktlog_hybrid_mode = true; 920 return false; 921 } 922 923 if (!mon_pdev->pktlog_hybrid_mode) { 924 mon_pdev->pktlog_hybrid_mode = true; 925 dp_mon_filter_setup_pktlog_hybrid(pdev); 926 if (dp_mon_filter_update(pdev) != 927 QDF_STATUS_SUCCESS) { 928 dp_cdp_err("Set hybrid filters failed"); 929 dp_mon_filter_reset_pktlog_hybrid(pdev); 930 mon_pdev->rx_pktlog_mode = 931 DP_RX_PKTLOG_DISABLED; 932 return false; 933 } 934 935 if (mon_soc->reap_timer_init && 936 !dp_mon_is_enable_reap_timer_non_pkt(pdev)) 937 qdf_timer_mod(&mon_soc->mon_reap_timer, 938 DP_INTR_POLL_TIMER_MS); 939 } 940 941 return true; 942 } 943 944 static void 945 dp_set_hybrid_pktlog_disable(struct dp_mon_pdev *mon_pdev) 946 { 947 mon_pdev->pktlog_hybrid_mode = false; 948 } 949 #else 950 static void 951 dp_set_hybrid_pktlog_disable(struct dp_mon_pdev *mon_pdev) 952 { 953 } 954 955 static bool 956 dp_set_hybrid_pktlog_enable(struct dp_pdev *pdev, 957 struct dp_mon_pdev *mon_pdev, 958 struct dp_mon_soc *mon_soc) 959 { 960 dp_cdp_err("Hybrid mode is supported only on beryllium"); 961 return true; 962 } 963 #endif 964 int dp_set_pktlog_wifi3(struct dp_pdev *pdev, uint32_t event, 965 bool enable) 966 { 967 struct dp_soc *soc = NULL; 968 int max_mac_rings = wlan_cfg_get_num_mac_rings 969 (pdev->wlan_cfg_ctx); 970 uint8_t mac_id = 0; 971 struct dp_mon_soc *mon_soc; 972 struct dp_mon_ops *mon_ops; 973 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 974 975 soc = pdev->soc; 976 mon_soc = soc->monitor_soc; 977 mon_ops = dp_mon_ops_get(soc); 978 979 if (!mon_ops) 980 return 0; 981 982 dp_update_num_mac_rings_for_dbs(soc, &max_mac_rings); 983 984 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG, 985 FL("Max_mac_rings %d "), 986 max_mac_rings); 987 988 if (enable) { 989 switch (event) { 990 case WDI_EVENT_RX_DESC: 991 if (mon_pdev->mvdev) { 992 /* Nothing needs to be done if monitor mode is 993 * enabled 994 */ 995 mon_pdev->rx_pktlog_mode = DP_RX_PKTLOG_FULL; 996 return 0; 997 } 998 999 if (mon_pdev->rx_pktlog_mode != DP_RX_PKTLOG_FULL) { 1000 mon_pdev->rx_pktlog_mode = DP_RX_PKTLOG_FULL; 1001 dp_mon_filter_setup_rx_pkt_log_full(pdev); 1002 if (dp_mon_filter_update(pdev) != 1003 QDF_STATUS_SUCCESS) { 1004 dp_cdp_err("%pK: Pktlog full filters set failed", soc); 1005 dp_mon_filter_reset_rx_pkt_log_full(pdev); 1006 mon_pdev->rx_pktlog_mode = 1007 DP_RX_PKTLOG_DISABLED; 1008 return 0; 1009 } 1010 1011 if (mon_soc->reap_timer_init && 1012 (!dp_mon_is_enable_reap_timer_non_pkt(pdev))) 1013 qdf_timer_mod(&mon_soc->mon_reap_timer, 1014 DP_INTR_POLL_TIMER_MS); 1015 } 1016 break; 1017 1018 case WDI_EVENT_LITE_RX: 1019 if (mon_pdev->mvdev) { 1020 /* Nothing needs to be done if monitor mode is 1021 * enabled 1022 */ 1023 mon_pdev->rx_pktlog_mode = DP_RX_PKTLOG_LITE; 1024 return 0; 1025 } 1026 if (mon_pdev->rx_pktlog_mode != DP_RX_PKTLOG_LITE) { 1027 mon_pdev->rx_pktlog_mode = DP_RX_PKTLOG_LITE; 1028 1029 /* 1030 * Set the packet log lite mode filter. 1031 */ 1032 dp_mon_filter_setup_rx_pkt_log_lite(pdev); 1033 if (dp_mon_filter_update(pdev) != 1034 QDF_STATUS_SUCCESS) { 1035 dp_cdp_err("%pK: Pktlog lite filters set failed", soc); 1036 dp_mon_filter_reset_rx_pkt_log_lite(pdev); 1037 mon_pdev->rx_pktlog_mode = 1038 DP_RX_PKTLOG_DISABLED; 1039 return 0; 1040 } 1041 1042 if (mon_soc->reap_timer_init && 1043 (!dp_mon_is_enable_reap_timer_non_pkt(pdev))) 1044 qdf_timer_mod(&mon_soc->mon_reap_timer, 1045 DP_INTR_POLL_TIMER_MS); 1046 } 1047 break; 1048 1049 case WDI_EVENT_LITE_T2H: 1050 for (mac_id = 0; mac_id < max_mac_rings; mac_id++) { 1051 int mac_for_pdev = dp_get_mac_id_for_pdev( 1052 mac_id, pdev->pdev_id); 1053 1054 mon_pdev->pktlog_ppdu_stats = true; 1055 dp_h2t_cfg_stats_msg_send(pdev, 1056 DP_PPDU_TXLITE_STATS_BITMASK_CFG, 1057 mac_for_pdev); 1058 } 1059 break; 1060 1061 case WDI_EVENT_RX_CBF: 1062 if (mon_pdev->mvdev) { 1063 /* Nothing needs to be done if monitor mode is 1064 * enabled 1065 */ 1066 dp_mon_info("Mon mode, CBF setting filters"); 1067 mon_pdev->rx_pktlog_cbf = true; 1068 return 0; 1069 } 1070 if (!mon_pdev->rx_pktlog_cbf) { 1071 mon_pdev->rx_pktlog_cbf = true; 1072 mon_pdev->monitor_configured = true; 1073 if (mon_ops->mon_vdev_set_monitor_mode_buf_rings) 1074 mon_ops->mon_vdev_set_monitor_mode_buf_rings(pdev); 1075 /* 1076 * Set the packet log lite mode filter. 1077 */ 1078 qdf_info("Non mon mode: Enable destination ring"); 1079 1080 dp_mon_filter_setup_rx_pkt_log_cbf(pdev); 1081 if (dp_mon_filter_update(pdev) != 1082 QDF_STATUS_SUCCESS) { 1083 dp_mon_err("Pktlog set CBF filters failed"); 1084 dp_mon_filter_reset_rx_pktlog_cbf(pdev); 1085 mon_pdev->rx_pktlog_mode = 1086 DP_RX_PKTLOG_DISABLED; 1087 mon_pdev->monitor_configured = false; 1088 return 0; 1089 } 1090 1091 if (mon_soc->reap_timer_init && 1092 !dp_mon_is_enable_reap_timer_non_pkt(pdev)) 1093 qdf_timer_mod(&mon_soc->mon_reap_timer, 1094 DP_INTR_POLL_TIMER_MS); 1095 } 1096 break; 1097 1098 case WDI_EVENT_HYBRID_TX: 1099 if (!dp_set_hybrid_pktlog_enable(pdev, 1100 mon_pdev, mon_soc)) 1101 return 0; 1102 break; 1103 1104 default: 1105 /* Nothing needs to be done for other pktlog types */ 1106 break; 1107 } 1108 } else { 1109 switch (event) { 1110 case WDI_EVENT_RX_DESC: 1111 case WDI_EVENT_LITE_RX: 1112 if (mon_pdev->mvdev) { 1113 /* Nothing needs to be done if monitor mode is 1114 * enabled 1115 */ 1116 mon_pdev->rx_pktlog_mode = 1117 DP_RX_PKTLOG_DISABLED; 1118 return 0; 1119 } 1120 if (mon_pdev->rx_pktlog_mode != DP_RX_PKTLOG_DISABLED) { 1121 mon_pdev->rx_pktlog_mode = 1122 DP_RX_PKTLOG_DISABLED; 1123 dp_mon_filter_reset_rx_pkt_log_full(pdev); 1124 if (dp_mon_filter_update(pdev) != 1125 QDF_STATUS_SUCCESS) { 1126 dp_cdp_err("%pK: Pktlog filters reset failed", soc); 1127 return 0; 1128 } 1129 1130 dp_mon_filter_reset_rx_pkt_log_lite(pdev); 1131 if (dp_mon_filter_update(pdev) != 1132 QDF_STATUS_SUCCESS) { 1133 dp_cdp_err("%pK: Pktlog filters reset failed", soc); 1134 return 0; 1135 } 1136 1137 if (mon_soc->reap_timer_init && 1138 (!dp_mon_is_enable_reap_timer_non_pkt(pdev))) 1139 qdf_timer_stop(&mon_soc->mon_reap_timer); 1140 } 1141 break; 1142 case WDI_EVENT_LITE_T2H: 1143 /* 1144 * To disable HTT_H2T_MSG_TYPE_PPDU_STATS_CFG in FW 1145 * passing value 0. Once these macros will define in htt 1146 * header file will use proper macros 1147 */ 1148 for (mac_id = 0; mac_id < max_mac_rings; mac_id++) { 1149 int mac_for_pdev = 1150 dp_get_mac_id_for_pdev(mac_id, 1151 pdev->pdev_id); 1152 1153 mon_pdev->pktlog_ppdu_stats = false; 1154 if (!mon_pdev->enhanced_stats_en && 1155 !mon_pdev->tx_sniffer_enable && 1156 !mon_pdev->mcopy_mode) { 1157 dp_h2t_cfg_stats_msg_send(pdev, 0, 1158 mac_for_pdev); 1159 } else if (mon_pdev->tx_sniffer_enable || 1160 mon_pdev->mcopy_mode) { 1161 dp_h2t_cfg_stats_msg_send(pdev, 1162 DP_PPDU_STATS_CFG_SNIFFER, 1163 mac_for_pdev); 1164 } else if (mon_pdev->enhanced_stats_en) { 1165 dp_h2t_cfg_stats_msg_send(pdev, 1166 DP_PPDU_STATS_CFG_ENH_STATS, 1167 mac_for_pdev); 1168 } 1169 } 1170 1171 break; 1172 case WDI_EVENT_RX_CBF: 1173 mon_pdev->rx_pktlog_cbf = false; 1174 break; 1175 1176 case WDI_EVENT_HYBRID_TX: 1177 dp_set_hybrid_pktlog_disable(mon_pdev); 1178 break; 1179 1180 default: 1181 /* Nothing needs to be done for other pktlog types */ 1182 break; 1183 } 1184 } 1185 return 0; 1186 } 1187 #endif 1188 1189 /* MCL specific functions */ 1190 #if defined(DP_CON_MON) && !defined(REMOVE_PKT_LOG) 1191 void dp_pktlogmod_exit(struct dp_pdev *pdev) 1192 { 1193 struct dp_soc *soc = pdev->soc; 1194 struct hif_opaque_softc *scn = soc->hif_handle; 1195 struct dp_mon_soc *mon_soc = soc->monitor_soc; 1196 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 1197 1198 if (!scn) { 1199 dp_mon_err("Invalid hif(scn) handle"); 1200 return; 1201 } 1202 1203 /* stop mon_reap_timer if it has been started */ 1204 if (mon_pdev->rx_pktlog_mode != DP_RX_PKTLOG_DISABLED && 1205 mon_soc->reap_timer_init && 1206 (!dp_mon_is_enable_reap_timer_non_pkt(pdev))) 1207 qdf_timer_sync_cancel(&mon_soc->mon_reap_timer); 1208 1209 pktlogmod_exit(scn); 1210 mon_pdev->pkt_log_init = false; 1211 } 1212 #endif /*DP_CON_MON*/ 1213 1214 #if defined(WDI_EVENT_ENABLE) && defined(QCA_ENHANCED_STATS_SUPPORT) 1215 QDF_STATUS dp_peer_stats_notify(struct dp_pdev *dp_pdev, struct dp_peer *peer) 1216 { 1217 struct cdp_interface_peer_stats peer_stats_intf; 1218 struct dp_mon_peer_stats *mon_peer_stats = NULL; 1219 struct dp_peer *tgt_peer = NULL; 1220 struct dp_txrx_peer *txrx_peer = NULL; 1221 1222 if (!peer || !peer->vdev || !peer->monitor_peer) 1223 return QDF_STATUS_E_FAULT; 1224 1225 tgt_peer = dp_get_tgt_peer_from_peer(peer); 1226 if (!tgt_peer) 1227 return QDF_STATUS_E_FAULT; 1228 1229 txrx_peer = tgt_peer->txrx_peer; 1230 if (!txrx_peer) 1231 return QDF_STATUS_E_FAULT; 1232 1233 mon_peer_stats = &peer->monitor_peer->stats; 1234 1235 qdf_mem_zero(&peer_stats_intf, sizeof(peer_stats_intf)); 1236 if (mon_peer_stats->rx.last_snr != mon_peer_stats->rx.snr) 1237 peer_stats_intf.rssi_changed = true; 1238 1239 if ((mon_peer_stats->rx.snr && peer_stats_intf.rssi_changed) || 1240 (mon_peer_stats->tx.tx_rate && 1241 mon_peer_stats->tx.tx_rate != mon_peer_stats->tx.last_tx_rate)) { 1242 qdf_mem_copy(peer_stats_intf.peer_mac, peer->mac_addr.raw, 1243 QDF_MAC_ADDR_SIZE); 1244 peer_stats_intf.vdev_id = peer->vdev->vdev_id; 1245 peer_stats_intf.last_peer_tx_rate = 1246 mon_peer_stats->tx.last_tx_rate; 1247 peer_stats_intf.peer_tx_rate = mon_peer_stats->tx.tx_rate; 1248 peer_stats_intf.peer_rssi = mon_peer_stats->rx.snr; 1249 peer_stats_intf.ack_rssi = mon_peer_stats->tx.last_ack_rssi; 1250 peer_stats_intf.rx_packet_count = txrx_peer->to_stack.num; 1251 peer_stats_intf.rx_byte_count = txrx_peer->to_stack.bytes; 1252 peer_stats_intf.tx_packet_count = 1253 txrx_peer->stats.per_pkt_stats.tx.ucast.num; 1254 peer_stats_intf.tx_byte_count = 1255 txrx_peer->stats.per_pkt_stats.tx.tx_success.bytes; 1256 peer_stats_intf.per = tgt_peer->stats.tx.last_per; 1257 peer_stats_intf.free_buff = INVALID_FREE_BUFF; 1258 dp_wdi_event_handler(WDI_EVENT_PEER_STATS, dp_pdev->soc, 1259 (void *)&peer_stats_intf, 0, 1260 WDI_NO_VAL, dp_pdev->pdev_id); 1261 } 1262 1263 return QDF_STATUS_SUCCESS; 1264 } 1265 #endif 1266 1267 #ifdef FEATURE_NAC_RSSI 1268 /** 1269 * dp_rx_nac_filter(): Function to perform filtering of non-associated 1270 * clients 1271 * @pdev: DP pdev handle 1272 * @rx_pkt_hdr: Rx packet Header 1273 * 1274 * return: dp_vdev* 1275 */ 1276 static 1277 struct dp_vdev *dp_rx_nac_filter(struct dp_pdev *pdev, 1278 uint8_t *rx_pkt_hdr) 1279 { 1280 struct ieee80211_frame *wh; 1281 struct dp_neighbour_peer *peer = NULL; 1282 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 1283 1284 wh = (struct ieee80211_frame *)rx_pkt_hdr; 1285 1286 if ((wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) != IEEE80211_FC1_DIR_TODS) 1287 return NULL; 1288 1289 qdf_spin_lock_bh(&mon_pdev->neighbour_peer_mutex); 1290 TAILQ_FOREACH(peer, &mon_pdev->neighbour_peers_list, 1291 neighbour_peer_list_elem) { 1292 if (qdf_mem_cmp(&peer->neighbour_peers_macaddr.raw[0], 1293 wh->i_addr2, QDF_MAC_ADDR_SIZE) == 0) { 1294 dp_rx_debug("%pK: NAC configuration matched for mac-%2x:%2x:%2x:%2x:%2x:%2x", 1295 pdev->soc, 1296 peer->neighbour_peers_macaddr.raw[0], 1297 peer->neighbour_peers_macaddr.raw[1], 1298 peer->neighbour_peers_macaddr.raw[2], 1299 peer->neighbour_peers_macaddr.raw[3], 1300 peer->neighbour_peers_macaddr.raw[4], 1301 peer->neighbour_peers_macaddr.raw[5]); 1302 1303 qdf_spin_unlock_bh(&mon_pdev->neighbour_peer_mutex); 1304 1305 return mon_pdev->mvdev; 1306 } 1307 } 1308 qdf_spin_unlock_bh(&mon_pdev->neighbour_peer_mutex); 1309 1310 return NULL; 1311 } 1312 1313 QDF_STATUS dp_filter_neighbour_peer(struct dp_pdev *pdev, 1314 uint8_t *rx_pkt_hdr) 1315 { 1316 struct dp_vdev *vdev = NULL; 1317 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 1318 1319 if (mon_pdev->filter_neighbour_peers) { 1320 /* Next Hop scenario not yet handle */ 1321 vdev = dp_rx_nac_filter(pdev, rx_pkt_hdr); 1322 if (vdev) { 1323 dp_rx_mon_deliver(pdev->soc, pdev->pdev_id, 1324 pdev->invalid_peer_head_msdu, 1325 pdev->invalid_peer_tail_msdu); 1326 1327 pdev->invalid_peer_head_msdu = NULL; 1328 pdev->invalid_peer_tail_msdu = NULL; 1329 return QDF_STATUS_SUCCESS; 1330 } 1331 } 1332 1333 return QDF_STATUS_E_FAILURE; 1334 } 1335 #endif 1336 1337 #if defined(ATH_SUPPORT_NAC_RSSI) || defined(ATH_SUPPORT_NAC) 1338 /* 1339 * dp_update_filter_neighbour_peers() - set neighbour peers(nac clients) 1340 * address for smart mesh filtering 1341 * @txrx_soc: cdp soc handle 1342 * @vdev_id: id of virtual device object 1343 * @cmd: Add/Del command 1344 * @macaddr: nac client mac address 1345 * 1346 * Return: success/failure 1347 */ 1348 static int dp_update_filter_neighbour_peers(struct cdp_soc_t *soc_hdl, 1349 uint8_t vdev_id, 1350 uint32_t cmd, uint8_t *macaddr) 1351 { 1352 struct dp_soc *soc = (struct dp_soc *)soc_hdl; 1353 struct dp_pdev *pdev; 1354 struct dp_neighbour_peer *peer = NULL; 1355 struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id, 1356 DP_MOD_ID_CDP); 1357 struct dp_mon_pdev *mon_pdev; 1358 1359 if (!vdev || !macaddr) 1360 goto fail0; 1361 1362 pdev = vdev->pdev; 1363 1364 if (!pdev) 1365 goto fail0; 1366 1367 mon_pdev = pdev->monitor_pdev; 1368 1369 /* Store address of NAC (neighbour peer) which will be checked 1370 * against TA of received packets. 1371 */ 1372 if (cmd == DP_NAC_PARAM_ADD) { 1373 peer = (struct dp_neighbour_peer *)qdf_mem_malloc( 1374 sizeof(*peer)); 1375 1376 if (!peer) { 1377 dp_cdp_err("%pK: DP neighbour peer node memory allocation failed" 1378 , soc); 1379 goto fail0; 1380 } 1381 1382 qdf_mem_copy(&peer->neighbour_peers_macaddr.raw[0], 1383 macaddr, QDF_MAC_ADDR_SIZE); 1384 peer->vdev = vdev; 1385 1386 qdf_spin_lock_bh(&mon_pdev->neighbour_peer_mutex); 1387 1388 /* add this neighbour peer into the list */ 1389 TAILQ_INSERT_TAIL(&mon_pdev->neighbour_peers_list, peer, 1390 neighbour_peer_list_elem); 1391 qdf_spin_unlock_bh(&mon_pdev->neighbour_peer_mutex); 1392 1393 /* first neighbour */ 1394 if (!mon_pdev->neighbour_peers_added) { 1395 QDF_STATUS status = QDF_STATUS_SUCCESS; 1396 1397 mon_pdev->neighbour_peers_added = true; 1398 dp_mon_filter_setup_smart_monitor(pdev); 1399 status = dp_mon_filter_update(pdev); 1400 if (status != QDF_STATUS_SUCCESS) { 1401 dp_cdp_err("%pK: smart mon filter setup failed", 1402 soc); 1403 dp_mon_filter_reset_smart_monitor(pdev); 1404 mon_pdev->neighbour_peers_added = false; 1405 } 1406 } 1407 1408 } else if (cmd == DP_NAC_PARAM_DEL) { 1409 qdf_spin_lock_bh(&mon_pdev->neighbour_peer_mutex); 1410 TAILQ_FOREACH(peer, &mon_pdev->neighbour_peers_list, 1411 neighbour_peer_list_elem) { 1412 if (!qdf_mem_cmp(&peer->neighbour_peers_macaddr.raw[0], 1413 macaddr, QDF_MAC_ADDR_SIZE)) { 1414 /* delete this peer from the list */ 1415 TAILQ_REMOVE(&mon_pdev->neighbour_peers_list, 1416 peer, neighbour_peer_list_elem); 1417 qdf_mem_free(peer); 1418 break; 1419 } 1420 } 1421 /* last neighbour deleted */ 1422 if (TAILQ_EMPTY(&mon_pdev->neighbour_peers_list)) { 1423 QDF_STATUS status = QDF_STATUS_SUCCESS; 1424 1425 dp_mon_filter_reset_smart_monitor(pdev); 1426 status = dp_mon_filter_update(pdev); 1427 if (status != QDF_STATUS_SUCCESS) { 1428 dp_cdp_err("%pK: smart mon filter clear failed", 1429 soc); 1430 } 1431 mon_pdev->neighbour_peers_added = false; 1432 } 1433 qdf_spin_unlock_bh(&mon_pdev->neighbour_peer_mutex); 1434 } 1435 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP); 1436 return 1; 1437 1438 fail0: 1439 if (vdev) 1440 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP); 1441 return 0; 1442 } 1443 #endif /* ATH_SUPPORT_NAC_RSSI || ATH_SUPPORT_NAC */ 1444 1445 #ifdef ATH_SUPPORT_NAC_RSSI 1446 /** 1447 * dp_vdev_get_neighbour_rssi(): Store RSSI for configured NAC 1448 * @soc_hdl: DP soc handle 1449 * @vdev_id: id of DP vdev handle 1450 * @mac_addr: neighbour mac 1451 * @rssi: rssi value 1452 * 1453 * Return: 0 for success. nonzero for failure. 1454 */ 1455 static QDF_STATUS dp_vdev_get_neighbour_rssi(struct cdp_soc_t *soc_hdl, 1456 uint8_t vdev_id, 1457 char *mac_addr, 1458 uint8_t *rssi) 1459 { 1460 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 1461 struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id, 1462 DP_MOD_ID_CDP); 1463 struct dp_pdev *pdev; 1464 struct dp_neighbour_peer *peer = NULL; 1465 QDF_STATUS status = QDF_STATUS_E_FAILURE; 1466 struct dp_mon_pdev *mon_pdev; 1467 1468 if (!vdev) 1469 return status; 1470 1471 pdev = vdev->pdev; 1472 mon_pdev = pdev->monitor_pdev; 1473 1474 *rssi = 0; 1475 qdf_spin_lock_bh(&mon_pdev->neighbour_peer_mutex); 1476 TAILQ_FOREACH(peer, &mon_pdev->neighbour_peers_list, 1477 neighbour_peer_list_elem) { 1478 if (qdf_mem_cmp(&peer->neighbour_peers_macaddr.raw[0], 1479 mac_addr, QDF_MAC_ADDR_SIZE) == 0) { 1480 *rssi = peer->rssi; 1481 status = QDF_STATUS_SUCCESS; 1482 break; 1483 } 1484 } 1485 qdf_spin_unlock_bh(&mon_pdev->neighbour_peer_mutex); 1486 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP); 1487 return status; 1488 } 1489 1490 static QDF_STATUS 1491 dp_config_for_nac_rssi(struct cdp_soc_t *cdp_soc, 1492 uint8_t vdev_id, 1493 enum cdp_nac_param_cmd cmd, char *bssid, 1494 char *client_macaddr, 1495 uint8_t chan_num) 1496 { 1497 struct dp_soc *soc = (struct dp_soc *)cdp_soc; 1498 struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id, 1499 DP_MOD_ID_CDP); 1500 struct dp_pdev *pdev; 1501 struct dp_mon_pdev *mon_pdev; 1502 1503 if (!vdev) 1504 return QDF_STATUS_E_FAILURE; 1505 1506 pdev = (struct dp_pdev *)vdev->pdev; 1507 1508 mon_pdev = pdev->monitor_pdev; 1509 mon_pdev->nac_rssi_filtering = 1; 1510 /* Store address of NAC (neighbour peer) which will be checked 1511 * against TA of received packets. 1512 */ 1513 1514 if (cmd == CDP_NAC_PARAM_ADD) { 1515 dp_update_filter_neighbour_peers(cdp_soc, vdev->vdev_id, 1516 DP_NAC_PARAM_ADD, 1517 (uint8_t *)client_macaddr); 1518 } else if (cmd == CDP_NAC_PARAM_DEL) { 1519 dp_update_filter_neighbour_peers(cdp_soc, vdev->vdev_id, 1520 DP_NAC_PARAM_DEL, 1521 (uint8_t *)client_macaddr); 1522 } 1523 1524 if (soc->cdp_soc.ol_ops->config_bssid_in_fw_for_nac_rssi) 1525 soc->cdp_soc.ol_ops->config_bssid_in_fw_for_nac_rssi 1526 (soc->ctrl_psoc, pdev->pdev_id, 1527 vdev->vdev_id, cmd, bssid, client_macaddr); 1528 1529 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP); 1530 return QDF_STATUS_SUCCESS; 1531 } 1532 #endif 1533 1534 #if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE) 1535 /* 1536 * dp_cfr_filter() - Configure HOST RX monitor status ring for CFR 1537 * @soc_hdl: Datapath soc handle 1538 * @pdev_id: id of data path pdev handle 1539 * @enable: Enable/Disable CFR 1540 * @filter_val: Flag to select Filter for monitor mode 1541 */ 1542 static void dp_cfr_filter(struct cdp_soc_t *soc_hdl, 1543 uint8_t pdev_id, 1544 bool enable, 1545 struct cdp_monitor_filter *filter_val) 1546 { 1547 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 1548 struct dp_pdev *pdev = NULL; 1549 struct htt_rx_ring_tlv_filter htt_tlv_filter = {0}; 1550 int max_mac_rings; 1551 uint8_t mac_id = 0; 1552 struct dp_mon_pdev *mon_pdev; 1553 1554 pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); 1555 if (!pdev) { 1556 dp_mon_err("pdev is NULL"); 1557 return; 1558 } 1559 1560 mon_pdev = pdev->monitor_pdev; 1561 1562 if (mon_pdev->mvdev) { 1563 dp_mon_info("No action is needed since mon mode is enabled\n"); 1564 return; 1565 } 1566 soc = pdev->soc; 1567 pdev->cfr_rcc_mode = false; 1568 max_mac_rings = wlan_cfg_get_num_mac_rings(pdev->wlan_cfg_ctx); 1569 dp_update_num_mac_rings_for_dbs(soc, &max_mac_rings); 1570 1571 dp_mon_debug("Max_mac_rings %d", max_mac_rings); 1572 dp_mon_info("enable : %d, mode: 0x%x", enable, filter_val->mode); 1573 1574 if (enable) { 1575 pdev->cfr_rcc_mode = true; 1576 1577 htt_tlv_filter.ppdu_start = 1; 1578 htt_tlv_filter.ppdu_end = 1; 1579 htt_tlv_filter.ppdu_end_user_stats = 1; 1580 htt_tlv_filter.ppdu_end_user_stats_ext = 1; 1581 htt_tlv_filter.ppdu_end_status_done = 1; 1582 htt_tlv_filter.mpdu_start = 1; 1583 htt_tlv_filter.offset_valid = false; 1584 1585 htt_tlv_filter.enable_fp = 1586 (filter_val->mode & MON_FILTER_PASS) ? 1 : 0; 1587 htt_tlv_filter.enable_md = 0; 1588 htt_tlv_filter.enable_mo = 1589 (filter_val->mode & MON_FILTER_OTHER) ? 1 : 0; 1590 htt_tlv_filter.fp_mgmt_filter = filter_val->fp_mgmt; 1591 htt_tlv_filter.fp_ctrl_filter = filter_val->fp_ctrl; 1592 htt_tlv_filter.fp_data_filter = filter_val->fp_data; 1593 htt_tlv_filter.mo_mgmt_filter = filter_val->mo_mgmt; 1594 htt_tlv_filter.mo_ctrl_filter = filter_val->mo_ctrl; 1595 htt_tlv_filter.mo_data_filter = filter_val->mo_data; 1596 } 1597 1598 for (mac_id = 0; 1599 mac_id < soc->wlan_cfg_ctx->num_rxdma_status_rings_per_pdev; 1600 mac_id++) { 1601 int mac_for_pdev = 1602 dp_get_mac_id_for_pdev(mac_id, 1603 pdev->pdev_id); 1604 1605 htt_h2t_rx_ring_cfg(soc->htt_handle, 1606 mac_for_pdev, 1607 soc->rxdma_mon_status_ring[mac_id] 1608 .hal_srng, 1609 RXDMA_MONITOR_STATUS, 1610 RX_MON_STATUS_BUF_SIZE, 1611 &htt_tlv_filter); 1612 } 1613 } 1614 #endif 1615 1616 void 1617 dp_enable_mon_reap_timer(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, 1618 bool enable) 1619 { 1620 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 1621 struct dp_pdev *pdev = NULL; 1622 struct dp_mon_soc *mon_soc = soc->monitor_soc; 1623 struct dp_mon_pdev *mon_pdev; 1624 1625 pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); 1626 if (!pdev) { 1627 dp_mon_err("pdev is NULL"); 1628 return; 1629 } 1630 1631 mon_pdev = pdev->monitor_pdev; 1632 mon_pdev->enable_reap_timer_non_pkt = enable; 1633 if (mon_pdev->rx_pktlog_mode != DP_RX_PKTLOG_DISABLED) { 1634 dp_mon_debug("pktlog enabled %d", mon_pdev->rx_pktlog_mode); 1635 return; 1636 } 1637 1638 if (!mon_soc->reap_timer_init) { 1639 dp_mon_err("reap timer not init"); 1640 return; 1641 } 1642 1643 if (enable) 1644 qdf_timer_mod(&mon_soc->mon_reap_timer, 1645 DP_INTR_POLL_TIMER_MS); 1646 else 1647 qdf_timer_sync_cancel(&mon_soc->mon_reap_timer); 1648 } 1649 1650 #if defined(DP_CON_MON) 1651 #ifndef REMOVE_PKT_LOG 1652 /** 1653 * dp_pkt_log_init() - API to initialize packet log 1654 * @soc_hdl: Datapath soc handle 1655 * @pdev_id: id of data path pdev handle 1656 * @scn: HIF context 1657 * 1658 * Return: none 1659 */ 1660 void dp_pkt_log_init(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, void *scn) 1661 { 1662 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 1663 struct dp_pdev *handle = 1664 dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); 1665 struct dp_mon_pdev *mon_pdev; 1666 1667 if (!handle) { 1668 dp_mon_err("pdev handle is NULL"); 1669 return; 1670 } 1671 1672 mon_pdev = handle->monitor_pdev; 1673 1674 if (mon_pdev->pkt_log_init) { 1675 dp_mon_err("%pK: Packet log not initialized", soc); 1676 return; 1677 } 1678 1679 pktlog_sethandle(&mon_pdev->pl_dev, scn); 1680 pktlog_set_pdev_id(mon_pdev->pl_dev, pdev_id); 1681 pktlog_set_callback_regtype(PKTLOG_DEFAULT_CALLBACK_REGISTRATION); 1682 1683 if (pktlogmod_init(scn)) { 1684 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1685 "%s: pktlogmod_init failed", __func__); 1686 mon_pdev->pkt_log_init = false; 1687 } else { 1688 mon_pdev->pkt_log_init = true; 1689 } 1690 } 1691 1692 /** 1693 * dp_pkt_log_con_service() - connect packet log service 1694 * @soc_hdl: Datapath soc handle 1695 * @pdev_id: id of data path pdev handle 1696 * @scn: device context 1697 * 1698 * Return: none 1699 */ 1700 static void dp_pkt_log_con_service(struct cdp_soc_t *soc_hdl, 1701 uint8_t pdev_id, void *scn) 1702 { 1703 dp_pkt_log_init(soc_hdl, pdev_id, scn); 1704 pktlog_htc_attach(); 1705 } 1706 1707 /** 1708 * dp_pkt_log_exit() - Wrapper API to cleanup pktlog info 1709 * @soc_hdl: Datapath soc handle 1710 * @pdev_id: id of data path pdev handle 1711 * 1712 * Return: none 1713 */ 1714 static void dp_pkt_log_exit(struct cdp_soc_t *soc_hdl, uint8_t pdev_id) 1715 { 1716 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 1717 struct dp_pdev *pdev = 1718 dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); 1719 1720 if (!pdev) { 1721 dp_err("pdev handle is NULL"); 1722 return; 1723 } 1724 1725 dp_pktlogmod_exit(pdev); 1726 } 1727 1728 #else 1729 static void dp_pkt_log_con_service(struct cdp_soc_t *soc_hdl, 1730 uint8_t pdev_id, void *scn) 1731 { 1732 } 1733 1734 static void dp_pkt_log_exit(struct cdp_soc_t *soc_hdl, uint8_t pdev_id) 1735 { 1736 } 1737 #endif 1738 #endif 1739 1740 void dp_neighbour_peers_detach(struct dp_pdev *pdev) 1741 { 1742 struct dp_neighbour_peer *peer = NULL; 1743 struct dp_neighbour_peer *temp_peer = NULL; 1744 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 1745 1746 TAILQ_FOREACH_SAFE(peer, &mon_pdev->neighbour_peers_list, 1747 neighbour_peer_list_elem, temp_peer) { 1748 /* delete this peer from the list */ 1749 TAILQ_REMOVE(&mon_pdev->neighbour_peers_list, 1750 peer, neighbour_peer_list_elem); 1751 qdf_mem_free(peer); 1752 } 1753 1754 qdf_spinlock_destroy(&mon_pdev->neighbour_peer_mutex); 1755 } 1756 1757 #ifdef QCA_ENHANCED_STATS_SUPPORT 1758 /* 1759 * dp_mon_tx_enable_enhanced_stats() - Enable enhanced Tx stats 1760 * @pdev: Datapath pdev handle 1761 * 1762 * Return: void 1763 */ 1764 static void dp_mon_tx_enable_enhanced_stats(struct dp_pdev *pdev) 1765 { 1766 struct dp_soc *soc = pdev->soc; 1767 struct dp_mon_ops *mon_ops = NULL; 1768 1769 mon_ops = dp_mon_ops_get(soc); 1770 if (mon_ops && mon_ops->mon_tx_enable_enhanced_stats) 1771 mon_ops->mon_tx_enable_enhanced_stats(pdev); 1772 } 1773 1774 /* 1775 * dp_enable_enhanced_stats()- API to enable enhanced statistcs 1776 * @soc_handle: DP_SOC handle 1777 * @pdev_id: id of DP_PDEV handle 1778 * 1779 * Return: QDF_STATUS 1780 */ 1781 static QDF_STATUS 1782 dp_enable_enhanced_stats(struct cdp_soc_t *soc, uint8_t pdev_id) 1783 { 1784 struct dp_pdev *pdev = NULL; 1785 QDF_STATUS status = QDF_STATUS_SUCCESS; 1786 struct dp_mon_pdev *mon_pdev; 1787 1788 pdev = dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc, 1789 pdev_id); 1790 1791 if (!pdev) 1792 return QDF_STATUS_E_FAILURE; 1793 1794 mon_pdev = pdev->monitor_pdev; 1795 1796 if (!mon_pdev) 1797 return QDF_STATUS_E_FAILURE; 1798 1799 if (mon_pdev->enhanced_stats_en == 0) 1800 dp_cal_client_timer_start(mon_pdev->cal_client_ctx); 1801 1802 mon_pdev->enhanced_stats_en = 1; 1803 pdev->enhanced_stats_en = true; 1804 1805 dp_mon_filter_setup_enhanced_stats(pdev); 1806 status = dp_mon_filter_update(pdev); 1807 if (status != QDF_STATUS_SUCCESS) { 1808 dp_cdp_err("%pK: Failed to set enhanced mode filters", soc); 1809 dp_mon_filter_reset_enhanced_stats(pdev); 1810 dp_cal_client_timer_stop(mon_pdev->cal_client_ctx); 1811 mon_pdev->enhanced_stats_en = 0; 1812 pdev->enhanced_stats_en = false; 1813 return QDF_STATUS_E_FAILURE; 1814 } 1815 1816 dp_mon_tx_enable_enhanced_stats(pdev); 1817 1818 return QDF_STATUS_SUCCESS; 1819 } 1820 1821 /* 1822 * dp_mon_tx_disable_enhanced_stats() - Disable enhanced Tx stats 1823 * @pdev: Datapath pdev handle 1824 * 1825 * Return: void 1826 */ 1827 static void dp_mon_tx_disable_enhanced_stats(struct dp_pdev *pdev) 1828 { 1829 struct dp_soc *soc = pdev->soc; 1830 struct dp_mon_ops *mon_ops = NULL; 1831 1832 mon_ops = dp_mon_ops_get(soc); 1833 if (mon_ops && mon_ops->mon_tx_disable_enhanced_stats) 1834 mon_ops->mon_tx_disable_enhanced_stats(pdev); 1835 } 1836 1837 /* 1838 * dp_disable_enhanced_stats()- API to disable enhanced statistcs 1839 * 1840 * @param soc - the soc handle 1841 * @param pdev_id - pdev_id of pdev 1842 * @return - QDF_STATUS 1843 */ 1844 static QDF_STATUS 1845 dp_disable_enhanced_stats(struct cdp_soc_t *soc, uint8_t pdev_id) 1846 { 1847 struct dp_pdev *pdev = 1848 dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc, 1849 pdev_id); 1850 struct dp_mon_pdev *mon_pdev; 1851 1852 1853 if (!pdev || !pdev->monitor_pdev) 1854 return QDF_STATUS_E_FAILURE; 1855 1856 mon_pdev = pdev->monitor_pdev; 1857 1858 if (mon_pdev->enhanced_stats_en == 1) 1859 dp_cal_client_timer_stop(mon_pdev->cal_client_ctx); 1860 1861 mon_pdev->enhanced_stats_en = 0; 1862 pdev->enhanced_stats_en = false; 1863 1864 dp_mon_tx_disable_enhanced_stats(pdev); 1865 1866 dp_mon_filter_reset_enhanced_stats(pdev); 1867 if (dp_mon_filter_update(pdev) != QDF_STATUS_SUCCESS) { 1868 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1869 FL("Failed to reset enhanced mode filters")); 1870 } 1871 1872 return QDF_STATUS_SUCCESS; 1873 } 1874 1875 #ifdef WDI_EVENT_ENABLE 1876 QDF_STATUS dp_peer_qos_stats_notify(struct dp_pdev *dp_pdev, 1877 struct cdp_rx_stats_ppdu_user *ppdu_user) 1878 { 1879 struct cdp_interface_peer_qos_stats qos_stats_intf; 1880 1881 if (ppdu_user->peer_id == HTT_INVALID_PEER) { 1882 dp_mon_warn("Invalid peer id"); 1883 return QDF_STATUS_E_FAILURE; 1884 } 1885 qdf_mem_zero(&qos_stats_intf, sizeof(qos_stats_intf)); 1886 1887 qdf_mem_copy(qos_stats_intf.peer_mac, ppdu_user->mac_addr, 1888 QDF_MAC_ADDR_SIZE); 1889 qos_stats_intf.frame_control = ppdu_user->frame_control; 1890 qos_stats_intf.frame_control_info_valid = 1891 ppdu_user->frame_control_info_valid; 1892 qos_stats_intf.qos_control = ppdu_user->qos_control; 1893 qos_stats_intf.qos_control_info_valid = 1894 ppdu_user->qos_control_info_valid; 1895 qos_stats_intf.vdev_id = ppdu_user->vdev_id; 1896 dp_wdi_event_handler(WDI_EVENT_PEER_QOS_STATS, dp_pdev->soc, 1897 (void *)&qos_stats_intf, 0, 1898 WDI_NO_VAL, dp_pdev->pdev_id); 1899 1900 return QDF_STATUS_SUCCESS; 1901 } 1902 #else 1903 static inline QDF_STATUS 1904 dp_peer_qos_stats_notify(struct dp_pdev *dp_pdev, 1905 struct cdp_rx_stats_ppdu_user *ppdu_user) 1906 { 1907 return QDF_STATUS_SUCCESS; 1908 } 1909 #endif 1910 #endif /* QCA_ENHANCED_STATS_SUPPORT */ 1911 1912 /** 1913 * dp_enable_peer_based_pktlog() - Set Flag for peer based filtering 1914 * for pktlog 1915 * @soc: cdp_soc handle 1916 * @pdev_id: id of dp pdev handle 1917 * @mac_addr: Peer mac address 1918 * @enb_dsb: Enable or disable peer based filtering 1919 * 1920 * Return: QDF_STATUS 1921 */ 1922 static int 1923 dp_enable_peer_based_pktlog(struct cdp_soc_t *soc, uint8_t pdev_id, 1924 uint8_t *mac_addr, uint8_t enb_dsb) 1925 { 1926 struct dp_peer *peer; 1927 QDF_STATUS status = QDF_STATUS_E_FAILURE; 1928 struct dp_pdev *pdev = 1929 dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc, 1930 pdev_id); 1931 struct dp_mon_pdev *mon_pdev; 1932 1933 if (!pdev) 1934 return QDF_STATUS_E_FAILURE; 1935 1936 mon_pdev = pdev->monitor_pdev; 1937 1938 peer = dp_peer_find_hash_find((struct dp_soc *)soc, mac_addr, 1939 0, DP_VDEV_ALL, DP_MOD_ID_CDP); 1940 1941 if (!peer) { 1942 dp_mon_err("Invalid Peer"); 1943 return QDF_STATUS_E_FAILURE; 1944 } 1945 1946 if (!IS_MLO_DP_MLD_PEER(peer) && peer->monitor_peer) { 1947 peer->monitor_peer->peer_based_pktlog_filter = enb_dsb; 1948 mon_pdev->dp_peer_based_pktlog = enb_dsb; 1949 status = QDF_STATUS_SUCCESS; 1950 } 1951 1952 dp_peer_unref_delete(peer, DP_MOD_ID_CDP); 1953 1954 return status; 1955 } 1956 1957 /** 1958 * dp_peer_update_pkt_capture_params: Set Rx & Tx Capture flags for a peer 1959 * @soc: DP_SOC handle 1960 * @pdev_id: id of DP_PDEV handle 1961 * @is_rx_pkt_cap_enable: enable/disable Rx packet capture in monitor mode 1962 * @is_tx_pkt_cap_enable: enable/disable/delete/print 1963 * Tx packet capture in monitor mode 1964 * @peer_mac: MAC address for which the above need to be enabled/disabled 1965 * 1966 * Return: Success if Rx & Tx capture is enabled for peer, false otherwise 1967 */ 1968 #if defined(WLAN_TX_PKT_CAPTURE_ENH) || defined(WLAN_RX_PKT_CAPTURE_ENH) 1969 static QDF_STATUS 1970 dp_peer_update_pkt_capture_params(ol_txrx_soc_handle soc, 1971 uint8_t pdev_id, 1972 bool is_rx_pkt_cap_enable, 1973 uint8_t is_tx_pkt_cap_enable, 1974 uint8_t *peer_mac) 1975 { 1976 struct dp_peer *peer; 1977 QDF_STATUS status = QDF_STATUS_E_FAILURE; 1978 struct dp_pdev *pdev = 1979 dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc, 1980 pdev_id); 1981 if (!pdev) 1982 return QDF_STATUS_E_FAILURE; 1983 1984 peer = dp_peer_find_hash_find((struct dp_soc *)soc, 1985 peer_mac, 0, DP_VDEV_ALL, 1986 DP_MOD_ID_CDP); 1987 if (!peer) 1988 return QDF_STATUS_E_FAILURE; 1989 1990 /* we need to set tx pkt capture for non associated peer */ 1991 if (!IS_MLO_DP_MLD_PEER(peer)) { 1992 status = dp_monitor_tx_peer_filter(pdev, peer, 1993 is_tx_pkt_cap_enable, 1994 peer_mac); 1995 1996 status = dp_peer_set_rx_capture_enabled(pdev, peer, 1997 is_rx_pkt_cap_enable, 1998 peer_mac); 1999 } 2000 2001 dp_peer_unref_delete(peer, DP_MOD_ID_CDP); 2002 2003 return status; 2004 } 2005 #endif 2006 2007 #ifdef QCA_MCOPY_SUPPORT 2008 QDF_STATUS dp_mcopy_check_deliver(struct dp_pdev *pdev, 2009 uint16_t peer_id, 2010 uint32_t ppdu_id, 2011 uint8_t first_msdu) 2012 { 2013 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 2014 2015 if (mon_pdev->mcopy_mode) { 2016 if (mon_pdev->mcopy_mode == M_COPY) { 2017 if ((mon_pdev->m_copy_id.tx_ppdu_id == ppdu_id) && 2018 (mon_pdev->m_copy_id.tx_peer_id == peer_id)) { 2019 return QDF_STATUS_E_INVAL; 2020 } 2021 } 2022 2023 if (!first_msdu) 2024 return QDF_STATUS_E_INVAL; 2025 2026 mon_pdev->m_copy_id.tx_ppdu_id = ppdu_id; 2027 mon_pdev->m_copy_id.tx_peer_id = peer_id; 2028 } 2029 2030 return QDF_STATUS_SUCCESS; 2031 } 2032 #endif 2033 2034 #ifdef WDI_EVENT_ENABLE 2035 #ifndef REMOVE_PKT_LOG 2036 static void *dp_get_pldev(struct cdp_soc_t *soc_hdl, uint8_t pdev_id) 2037 { 2038 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 2039 struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); 2040 2041 if (!pdev || !pdev->monitor_pdev) 2042 return NULL; 2043 2044 return pdev->monitor_pdev->pl_dev; 2045 } 2046 #else 2047 static void *dp_get_pldev(struct cdp_soc_t *soc_hdl, uint8_t pdev_id) 2048 { 2049 return NULL; 2050 } 2051 #endif 2052 #endif 2053 2054 QDF_STATUS dp_rx_populate_cbf_hdr(struct dp_soc *soc, 2055 uint32_t mac_id, 2056 uint32_t event, 2057 qdf_nbuf_t mpdu, 2058 uint32_t msdu_timestamp) 2059 { 2060 uint32_t data_size, hdr_size, ppdu_id, align4byte; 2061 struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id); 2062 uint32_t *msg_word; 2063 2064 if (!pdev) 2065 return QDF_STATUS_E_INVAL; 2066 2067 ppdu_id = pdev->monitor_pdev->ppdu_info.com_info.ppdu_id; 2068 2069 hdr_size = HTT_T2H_PPDU_STATS_IND_HDR_SIZE 2070 + qdf_offsetof(htt_ppdu_stats_rx_mgmtctrl_payload_tlv, payload); 2071 2072 data_size = qdf_nbuf_len(mpdu); 2073 2074 qdf_nbuf_push_head(mpdu, hdr_size); 2075 2076 msg_word = (uint32_t *)qdf_nbuf_data(mpdu); 2077 /* 2078 * Populate the PPDU Stats Indication header 2079 */ 2080 HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_T2H_MSG_TYPE_PPDU_STATS_IND); 2081 HTT_T2H_PPDU_STATS_MAC_ID_SET(*msg_word, mac_id); 2082 HTT_T2H_PPDU_STATS_PDEV_ID_SET(*msg_word, pdev->pdev_id); 2083 align4byte = ((data_size + 2084 qdf_offsetof(htt_ppdu_stats_rx_mgmtctrl_payload_tlv, payload) 2085 + 3) >> 2) << 2; 2086 HTT_T2H_PPDU_STATS_PAYLOAD_SIZE_SET(*msg_word, align4byte); 2087 msg_word++; 2088 HTT_T2H_PPDU_STATS_PPDU_ID_SET(*msg_word, ppdu_id); 2089 msg_word++; 2090 2091 *msg_word = msdu_timestamp; 2092 msg_word++; 2093 /* Skip reserved field */ 2094 msg_word++; 2095 /* 2096 * Populate MGMT_CTRL Payload TLV first 2097 */ 2098 HTT_STATS_TLV_TAG_SET(*msg_word, 2099 HTT_PPDU_STATS_RX_MGMTCTRL_PAYLOAD_TLV); 2100 2101 align4byte = ((data_size - sizeof(htt_tlv_hdr_t) + 2102 qdf_offsetof(htt_ppdu_stats_rx_mgmtctrl_payload_tlv, payload) 2103 + 3) >> 2) << 2; 2104 HTT_STATS_TLV_LENGTH_SET(*msg_word, align4byte); 2105 msg_word++; 2106 2107 HTT_PPDU_STATS_RX_MGMTCTRL_TLV_FRAME_LENGTH_SET( 2108 *msg_word, data_size); 2109 msg_word++; 2110 2111 dp_wdi_event_handler(event, soc, (void *)mpdu, 2112 HTT_INVALID_PEER, WDI_NO_VAL, pdev->pdev_id); 2113 2114 qdf_nbuf_pull_head(mpdu, hdr_size); 2115 2116 return QDF_STATUS_SUCCESS; 2117 } 2118 2119 #ifdef ATH_SUPPORT_EXT_STAT 2120 /*dp_peer_cal_clients_stats_update - update peer stats on cal client timer 2121 * @soc : Datapath SOC 2122 * @peer : Datapath peer 2123 * @arg : argument to iter function 2124 */ 2125 static void 2126 dp_peer_cal_clients_stats_update(struct dp_soc *soc, 2127 struct dp_peer *peer, 2128 void *arg) 2129 { 2130 struct cdp_calibr_stats_intf peer_stats_intf = {0}; 2131 struct dp_peer *tgt_peer = NULL; 2132 struct dp_txrx_peer *txrx_peer = NULL; 2133 2134 if (!dp_peer_is_primary_link_peer(peer)) 2135 return; 2136 2137 tgt_peer = dp_get_tgt_peer_from_peer(peer); 2138 if (!tgt_peer || !(tgt_peer->txrx_peer)) 2139 return; 2140 2141 txrx_peer = tgt_peer->txrx_peer; 2142 peer_stats_intf.to_stack = txrx_peer->to_stack; 2143 peer_stats_intf.tx_success = 2144 txrx_peer->stats.per_pkt_stats.tx.tx_success; 2145 peer_stats_intf.tx_ucast = 2146 txrx_peer->stats.per_pkt_stats.tx.ucast; 2147 2148 dp_cal_client_update_peer_stats_wifi3(&peer_stats_intf, 2149 &tgt_peer->stats); 2150 } 2151 2152 /*dp_iterate_update_peer_list - update peer stats on cal client timer 2153 * @pdev_hdl: pdev handle 2154 */ 2155 static void dp_iterate_update_peer_list(struct cdp_pdev *pdev_hdl) 2156 { 2157 struct dp_pdev *pdev = (struct dp_pdev *)pdev_hdl; 2158 2159 dp_pdev_iterate_peer(pdev, dp_peer_cal_clients_stats_update, NULL, 2160 DP_MOD_ID_CDP); 2161 } 2162 #else 2163 static void dp_iterate_update_peer_list(struct cdp_pdev *pdev_hdl) 2164 { 2165 } 2166 #endif 2167 2168 #ifdef ATH_SUPPORT_NAC 2169 int dp_set_filter_neigh_peers(struct dp_pdev *pdev, 2170 bool val) 2171 { 2172 /* Enable/Disable smart mesh filtering. This flag will be checked 2173 * during rx processing to check if packets are from NAC clients. 2174 */ 2175 pdev->monitor_pdev->filter_neighbour_peers = val; 2176 return 0; 2177 } 2178 #endif /* ATH_SUPPORT_NAC */ 2179 2180 #ifdef WLAN_ATF_ENABLE 2181 void dp_set_atf_stats_enable(struct dp_pdev *pdev, bool value) 2182 { 2183 if (!pdev) { 2184 dp_cdp_err("Invalid pdev"); 2185 return; 2186 } 2187 2188 pdev->monitor_pdev->dp_atf_stats_enable = value; 2189 } 2190 #endif 2191 2192 #ifdef QCA_ENHANCED_STATS_SUPPORT 2193 /* 2194 * dp_process_ppdu_stats_tx_mgmtctrl_payload_tlv: Process 2195 * htt_ppdu_stats_tx_mgmtctrl_payload_tlv 2196 * @pdev: DP PDEV handle 2197 * @tag_buf: buffer containing the htt_ppdu_stats_tx_mgmtctrl_payload_tlv 2198 * @length: tlv_length 2199 * 2200 * return:QDF_STATUS_SUCCESS if nbuf has to be freed in caller 2201 */ 2202 QDF_STATUS 2203 dp_process_ppdu_stats_tx_mgmtctrl_payload_tlv(struct dp_pdev *pdev, 2204 qdf_nbuf_t tag_buf, 2205 uint32_t ppdu_id) 2206 { 2207 uint32_t *nbuf_ptr; 2208 uint8_t trim_size; 2209 size_t head_size; 2210 struct cdp_tx_mgmt_comp_info *ptr_mgmt_comp_info; 2211 uint32_t *msg_word; 2212 uint32_t tsf_hdr; 2213 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 2214 2215 if ((!mon_pdev->tx_sniffer_enable) && (!mon_pdev->mcopy_mode) && 2216 (!mon_pdev->bpr_enable) && (!mon_pdev->tx_capture_enabled)) 2217 return QDF_STATUS_SUCCESS; 2218 2219 /* 2220 * get timestamp from htt_t2h_ppdu_stats_ind_hdr_t 2221 */ 2222 msg_word = (uint32_t *)qdf_nbuf_data(tag_buf); 2223 msg_word = msg_word + 2; 2224 tsf_hdr = *msg_word; 2225 2226 trim_size = ((mon_pdev->mgmtctrl_frm_info.mgmt_buf + 2227 HTT_MGMT_CTRL_TLV_HDR_RESERVERD_LEN) - 2228 qdf_nbuf_data(tag_buf)); 2229 2230 if (!qdf_nbuf_pull_head(tag_buf, trim_size)) 2231 return QDF_STATUS_SUCCESS; 2232 2233 qdf_nbuf_trim_tail(tag_buf, qdf_nbuf_len(tag_buf) - 2234 mon_pdev->mgmtctrl_frm_info.mgmt_buf_len); 2235 2236 if (mon_pdev->tx_capture_enabled) { 2237 head_size = sizeof(struct cdp_tx_mgmt_comp_info); 2238 if (qdf_unlikely(qdf_nbuf_headroom(tag_buf) < head_size)) { 2239 qdf_err("Fail to get headroom h_sz %zu h_avail %d\n", 2240 head_size, qdf_nbuf_headroom(tag_buf)); 2241 qdf_assert_always(0); 2242 return QDF_STATUS_E_NOMEM; 2243 } 2244 ptr_mgmt_comp_info = (struct cdp_tx_mgmt_comp_info *) 2245 qdf_nbuf_push_head(tag_buf, head_size); 2246 qdf_assert_always(ptr_mgmt_comp_info); 2247 ptr_mgmt_comp_info->ppdu_id = ppdu_id; 2248 ptr_mgmt_comp_info->is_sgen_pkt = true; 2249 ptr_mgmt_comp_info->tx_tsf = tsf_hdr; 2250 } else { 2251 head_size = sizeof(ppdu_id); 2252 nbuf_ptr = (uint32_t *)qdf_nbuf_push_head(tag_buf, head_size); 2253 *nbuf_ptr = ppdu_id; 2254 } 2255 if (mon_pdev->bpr_enable) { 2256 dp_wdi_event_handler(WDI_EVENT_TX_BEACON, pdev->soc, 2257 tag_buf, HTT_INVALID_PEER, 2258 WDI_NO_VAL, pdev->pdev_id); 2259 } 2260 2261 dp_deliver_mgmt_frm(pdev, tag_buf); 2262 2263 return QDF_STATUS_E_ALREADY; 2264 } 2265 2266 /* 2267 * dp_htt_get_ppdu_sniffer_ampdu_tlv_bitmap() - Get ppdu stats tlv 2268 * bitmap for sniffer mode 2269 * @bitmap: received bitmap 2270 * 2271 * Return: expected bitmap value, returns zero if doesn't match with 2272 * either 64-bit Tx window or 256-bit window tlv bitmap 2273 */ 2274 int 2275 dp_htt_get_ppdu_sniffer_ampdu_tlv_bitmap(uint32_t bitmap) 2276 { 2277 if (bitmap == (HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP_64)) 2278 return HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP_64; 2279 else if (bitmap == (HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP_256)) 2280 return HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP_256; 2281 2282 return 0; 2283 } 2284 2285 /* 2286 * dp_peer_copy_delay_stats() - copy ppdu stats to peer delayed stats. 2287 * @peer: Datapath peer handle 2288 * @ppdu: User PPDU Descriptor 2289 * @cur_ppdu_id: PPDU_ID 2290 * 2291 * Return: None 2292 * 2293 * on Tx data frame, we may get delayed ba set 2294 * in htt_ppdu_stats_user_common_tlv. which mean we get Block Ack(BA) after we 2295 * request Block Ack Request(BAR). Successful msdu is received only after Block 2296 * Ack. To populate peer stats we need successful msdu(data frame). 2297 * So we hold the Tx data stats on delayed_ba for stats update. 2298 */ 2299 static void 2300 dp_peer_copy_delay_stats(struct dp_peer *peer, 2301 struct cdp_tx_completion_ppdu_user *ppdu, 2302 uint32_t cur_ppdu_id) 2303 { 2304 struct dp_pdev *pdev; 2305 struct dp_vdev *vdev; 2306 struct dp_mon_peer *mon_peer = peer->monitor_peer; 2307 2308 if (mon_peer->last_delayed_ba) { 2309 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 2310 "BA not yet recv for prev delayed ppdu[%d] - cur ppdu[%d]", 2311 mon_peer->last_delayed_ba_ppduid, cur_ppdu_id); 2312 vdev = peer->vdev; 2313 if (vdev) { 2314 pdev = vdev->pdev; 2315 pdev->stats.cdp_delayed_ba_not_recev++; 2316 } 2317 } 2318 2319 mon_peer->delayed_ba_ppdu_stats.ltf_size = ppdu->ltf_size; 2320 mon_peer->delayed_ba_ppdu_stats.stbc = ppdu->stbc; 2321 mon_peer->delayed_ba_ppdu_stats.he_re = ppdu->he_re; 2322 mon_peer->delayed_ba_ppdu_stats.txbf = ppdu->txbf; 2323 mon_peer->delayed_ba_ppdu_stats.bw = ppdu->bw; 2324 mon_peer->delayed_ba_ppdu_stats.nss = ppdu->nss; 2325 mon_peer->delayed_ba_ppdu_stats.gi = ppdu->gi; 2326 mon_peer->delayed_ba_ppdu_stats.dcm = ppdu->dcm; 2327 mon_peer->delayed_ba_ppdu_stats.ldpc = ppdu->ldpc; 2328 mon_peer->delayed_ba_ppdu_stats.dcm = ppdu->dcm; 2329 mon_peer->delayed_ba_ppdu_stats.mpdu_tried_ucast = 2330 ppdu->mpdu_tried_ucast; 2331 mon_peer->delayed_ba_ppdu_stats.mpdu_tried_mcast = 2332 ppdu->mpdu_tried_mcast; 2333 mon_peer->delayed_ba_ppdu_stats.frame_ctrl = ppdu->frame_ctrl; 2334 mon_peer->delayed_ba_ppdu_stats.qos_ctrl = ppdu->qos_ctrl; 2335 mon_peer->delayed_ba_ppdu_stats.dcm = ppdu->dcm; 2336 2337 mon_peer->delayed_ba_ppdu_stats.ru_start = ppdu->ru_start; 2338 mon_peer->delayed_ba_ppdu_stats.ru_tones = ppdu->ru_tones; 2339 mon_peer->delayed_ba_ppdu_stats.is_mcast = ppdu->is_mcast; 2340 2341 mon_peer->delayed_ba_ppdu_stats.user_pos = ppdu->user_pos; 2342 mon_peer->delayed_ba_ppdu_stats.mu_group_id = ppdu->mu_group_id; 2343 2344 mon_peer->last_delayed_ba = true; 2345 2346 ppdu->debug_copied = true; 2347 } 2348 2349 /* 2350 * dp_peer_copy_stats_to_bar() - copy delayed stats to ppdu stats. 2351 * @peer: Datapath peer handle 2352 * @ppdu: PPDU Descriptor 2353 * 2354 * Return: None 2355 * 2356 * For Tx BAR, PPDU stats TLV include Block Ack info. PPDU info 2357 * from Tx BAR frame not required to populate peer stats. 2358 * But we need successful MPDU and MSDU to update previous 2359 * transmitted Tx data frame. Overwrite ppdu stats with the previous 2360 * stored ppdu stats. 2361 */ 2362 static void 2363 dp_peer_copy_stats_to_bar(struct dp_peer *peer, 2364 struct cdp_tx_completion_ppdu_user *ppdu) 2365 { 2366 struct dp_mon_peer *mon_peer = peer->monitor_peer; 2367 2368 ppdu->ltf_size = mon_peer->delayed_ba_ppdu_stats.ltf_size; 2369 ppdu->stbc = mon_peer->delayed_ba_ppdu_stats.stbc; 2370 ppdu->he_re = mon_peer->delayed_ba_ppdu_stats.he_re; 2371 ppdu->txbf = mon_peer->delayed_ba_ppdu_stats.txbf; 2372 ppdu->bw = mon_peer->delayed_ba_ppdu_stats.bw; 2373 ppdu->nss = mon_peer->delayed_ba_ppdu_stats.nss; 2374 ppdu->gi = mon_peer->delayed_ba_ppdu_stats.gi; 2375 ppdu->dcm = mon_peer->delayed_ba_ppdu_stats.dcm; 2376 ppdu->ldpc = mon_peer->delayed_ba_ppdu_stats.ldpc; 2377 ppdu->dcm = mon_peer->delayed_ba_ppdu_stats.dcm; 2378 ppdu->mpdu_tried_ucast = 2379 mon_peer->delayed_ba_ppdu_stats.mpdu_tried_ucast; 2380 ppdu->mpdu_tried_mcast = 2381 mon_peer->delayed_ba_ppdu_stats.mpdu_tried_mcast; 2382 ppdu->frame_ctrl = mon_peer->delayed_ba_ppdu_stats.frame_ctrl; 2383 ppdu->qos_ctrl = mon_peer->delayed_ba_ppdu_stats.qos_ctrl; 2384 ppdu->dcm = mon_peer->delayed_ba_ppdu_stats.dcm; 2385 2386 ppdu->ru_start = mon_peer->delayed_ba_ppdu_stats.ru_start; 2387 ppdu->ru_tones = mon_peer->delayed_ba_ppdu_stats.ru_tones; 2388 ppdu->is_mcast = mon_peer->delayed_ba_ppdu_stats.is_mcast; 2389 2390 ppdu->user_pos = mon_peer->delayed_ba_ppdu_stats.user_pos; 2391 ppdu->mu_group_id = mon_peer->delayed_ba_ppdu_stats.mu_group_id; 2392 2393 mon_peer->last_delayed_ba = false; 2394 2395 ppdu->debug_copied = true; 2396 } 2397 2398 /* 2399 * dp_tx_rate_stats_update() - Update rate per-peer statistics 2400 * @peer: Datapath peer handle 2401 * @ppdu: PPDU Descriptor 2402 * 2403 * Return: None 2404 */ 2405 static void 2406 dp_tx_rate_stats_update(struct dp_peer *peer, 2407 struct cdp_tx_completion_ppdu_user *ppdu) 2408 { 2409 uint32_t ratekbps = 0; 2410 uint64_t ppdu_tx_rate = 0; 2411 uint32_t rix; 2412 uint16_t ratecode = 0; 2413 enum PUNCTURED_MODES punc_mode = NO_PUNCTURE; 2414 struct dp_mon_peer *mon_peer = NULL; 2415 2416 if (!peer || !ppdu) 2417 return; 2418 2419 if (ppdu->completion_status != HTT_PPDU_STATS_USER_STATUS_OK) 2420 return; 2421 2422 mon_peer = peer->monitor_peer; 2423 if (!mon_peer) 2424 return; 2425 2426 ratekbps = dp_getrateindex(ppdu->gi, 2427 ppdu->mcs, 2428 ppdu->nss, 2429 ppdu->preamble, 2430 ppdu->bw, 2431 punc_mode, 2432 &rix, 2433 &ratecode); 2434 2435 DP_STATS_UPD(mon_peer, tx.last_tx_rate, ratekbps); 2436 2437 if (!ratekbps) 2438 return; 2439 2440 /* Calculate goodput in non-training period 2441 * In training period, don't do anything as 2442 * pending pkt is send as goodput. 2443 */ 2444 if ((!peer->bss_peer) && (!ppdu->sa_is_training)) { 2445 ppdu->sa_goodput = ((ratekbps / CDP_NUM_KB_IN_MB) * 2446 (CDP_PERCENT_MACRO - ppdu->current_rate_per)); 2447 } 2448 ppdu->rix = rix; 2449 ppdu->tx_ratekbps = ratekbps; 2450 ppdu->tx_ratecode = ratecode; 2451 mon_peer->stats.tx.avg_tx_rate = 2452 dp_ath_rate_lpf(mon_peer->stats.tx.avg_tx_rate, ratekbps); 2453 ppdu_tx_rate = dp_ath_rate_out(mon_peer->stats.tx.avg_tx_rate); 2454 DP_STATS_UPD(mon_peer, tx.rnd_avg_tx_rate, ppdu_tx_rate); 2455 2456 mon_peer->stats.tx.bw_info = ppdu->bw; 2457 mon_peer->stats.tx.gi_info = ppdu->gi; 2458 mon_peer->stats.tx.nss_info = ppdu->nss; 2459 mon_peer->stats.tx.mcs_info = ppdu->mcs; 2460 mon_peer->stats.tx.preamble_info = ppdu->preamble; 2461 if (peer->vdev) { 2462 /* 2463 * In STA mode: 2464 * We get ucast stats as BSS peer stats. 2465 * 2466 * In AP mode: 2467 * We get mcast stats as BSS peer stats. 2468 * We get ucast stats as assoc peer stats. 2469 */ 2470 if (peer->vdev->opmode == wlan_op_mode_ap && peer->bss_peer) { 2471 peer->vdev->stats.tx.mcast_last_tx_rate = ratekbps; 2472 peer->vdev->stats.tx.mcast_last_tx_rate_mcs = ppdu->mcs; 2473 } else { 2474 peer->vdev->stats.tx.last_tx_rate = ratekbps; 2475 peer->vdev->stats.tx.last_tx_rate_mcs = ppdu->mcs; 2476 } 2477 } 2478 } 2479 2480 #if defined(FEATURE_PERPKT_INFO) && defined(WDI_EVENT_ENABLE) 2481 void dp_send_stats_event(struct dp_pdev *pdev, struct dp_peer *peer, 2482 uint16_t peer_id) 2483 { 2484 struct cdp_interface_peer_stats peer_stats_intf; 2485 struct dp_mon_peer *mon_peer = peer->monitor_peer; 2486 struct dp_txrx_peer *txrx_peer = NULL; 2487 2488 if (!mon_peer) 2489 return; 2490 2491 qdf_mem_zero(&peer_stats_intf, 2492 sizeof(struct cdp_interface_peer_stats)); 2493 mon_peer->stats.rx.rx_snr_measured_time = qdf_system_ticks(); 2494 peer_stats_intf.rx_avg_snr = mon_peer->stats.rx.avg_snr; 2495 2496 txrx_peer = dp_get_txrx_peer(peer); 2497 if (txrx_peer) { 2498 peer_stats_intf.rx_byte_count = txrx_peer->to_stack.bytes; 2499 peer_stats_intf.tx_byte_count = 2500 txrx_peer->stats.per_pkt_stats.tx.tx_success.bytes; 2501 } 2502 2503 dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, pdev->soc, 2504 &peer_stats_intf, peer_id, 2505 UPDATE_PEER_STATS, pdev->pdev_id); 2506 } 2507 #endif 2508 2509 /* 2510 * dp_get_ru_index_frm_ru_tones() - get ru index 2511 * @ru_tones: ru tones 2512 * 2513 * Return: ru index 2514 */ 2515 #ifdef WLAN_FEATURE_11BE 2516 static inline enum cdp_ru_index dp_get_ru_index_frm_ru_tones(uint16_t ru_tones) 2517 { 2518 enum cdp_ru_index ru_index; 2519 2520 switch (ru_tones) { 2521 case RU_26: 2522 ru_index = RU_26_INDEX; 2523 break; 2524 case RU_52: 2525 ru_index = RU_52_INDEX; 2526 break; 2527 case RU_52_26: 2528 ru_index = RU_52_26_INDEX; 2529 break; 2530 case RU_106: 2531 ru_index = RU_106_INDEX; 2532 break; 2533 case RU_106_26: 2534 ru_index = RU_106_26_INDEX; 2535 break; 2536 case RU_242: 2537 ru_index = RU_242_INDEX; 2538 break; 2539 case RU_484: 2540 ru_index = RU_484_INDEX; 2541 break; 2542 case RU_484_242: 2543 ru_index = RU_484_242_INDEX; 2544 break; 2545 case RU_996: 2546 ru_index = RU_996_INDEX; 2547 break; 2548 case RU_996_484: 2549 ru_index = RU_996_484_INDEX; 2550 break; 2551 case RU_996_484_242: 2552 ru_index = RU_996_484_242_INDEX; 2553 break; 2554 case RU_2X996: 2555 ru_index = RU_2X996_INDEX; 2556 break; 2557 case RU_2X996_484: 2558 ru_index = RU_2X996_484_INDEX; 2559 break; 2560 case RU_3X996: 2561 ru_index = RU_3X996_INDEX; 2562 break; 2563 case RU_3X996_484: 2564 ru_index = RU_2X996_484_INDEX; 2565 break; 2566 case RU_4X996: 2567 ru_index = RU_4X996_INDEX; 2568 break; 2569 default: 2570 ru_index = RU_INDEX_MAX; 2571 break; 2572 } 2573 2574 return ru_index; 2575 } 2576 #else 2577 static inline enum cdp_ru_index dp_get_ru_index_frm_ru_tones(uint16_t ru_tones) 2578 { 2579 enum cdp_ru_index ru_index; 2580 2581 switch (ru_tones) { 2582 case RU_26: 2583 ru_index = RU_26_INDEX; 2584 break; 2585 case RU_52: 2586 ru_index = RU_52_INDEX; 2587 break; 2588 case RU_106: 2589 ru_index = RU_106_INDEX; 2590 break; 2591 case RU_242: 2592 ru_index = RU_242_INDEX; 2593 break; 2594 case RU_484: 2595 ru_index = RU_484_INDEX; 2596 break; 2597 case RU_996: 2598 ru_index = RU_996_INDEX; 2599 break; 2600 default: 2601 ru_index = RU_INDEX_MAX; 2602 break; 2603 } 2604 2605 return ru_index; 2606 } 2607 #endif 2608 2609 /* 2610 * dp_tx_stats_update() - Update per-peer statistics 2611 * @pdev: Datapath pdev handle 2612 * @peer: Datapath peer handle 2613 * @ppdu: PPDU Descriptor 2614 * @ack_rssi: RSSI of last ack received 2615 * 2616 * Return: None 2617 */ 2618 static void 2619 dp_tx_stats_update(struct dp_pdev *pdev, struct dp_peer *peer, 2620 struct cdp_tx_completion_ppdu_user *ppdu, 2621 uint32_t ack_rssi) 2622 { 2623 uint8_t preamble, mcs; 2624 uint16_t num_msdu; 2625 uint16_t num_mpdu; 2626 uint16_t mpdu_tried; 2627 uint16_t mpdu_failed; 2628 struct dp_mon_ops *mon_ops; 2629 enum cdp_ru_index ru_index; 2630 struct dp_mon_peer *mon_peer = NULL; 2631 2632 preamble = ppdu->preamble; 2633 mcs = ppdu->mcs; 2634 num_msdu = ppdu->num_msdu; 2635 num_mpdu = ppdu->mpdu_success; 2636 mpdu_tried = ppdu->mpdu_tried_ucast + ppdu->mpdu_tried_mcast; 2637 mpdu_failed = mpdu_tried - num_mpdu; 2638 2639 /* If the peer statistics are already processed as part of 2640 * per-MSDU completion handler, do not process these again in per-PPDU 2641 * indications 2642 */ 2643 if (pdev->soc->process_tx_status) 2644 return; 2645 2646 mon_peer = peer->monitor_peer; 2647 if (!mon_peer) 2648 return; 2649 2650 if (ppdu->completion_status != HTT_PPDU_STATS_USER_STATUS_OK) { 2651 /* 2652 * All failed mpdu will be retried, so incrementing 2653 * retries mpdu based on mpdu failed. Even for 2654 * ack failure i.e for long retries we get 2655 * mpdu failed equal mpdu tried. 2656 */ 2657 DP_STATS_INC(mon_peer, tx.retries, mpdu_failed); 2658 return; 2659 } 2660 2661 if (ppdu->is_ppdu_cookie_valid) 2662 DP_STATS_INC(mon_peer, tx.num_ppdu_cookie_valid, 1); 2663 2664 if (ppdu->mu_group_id <= MAX_MU_GROUP_ID && 2665 ppdu->ppdu_type != HTT_PPDU_STATS_PPDU_TYPE_SU) { 2666 if (unlikely(!(ppdu->mu_group_id & (MAX_MU_GROUP_ID - 1)))) 2667 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 2668 "mu_group_id out of bound!!\n"); 2669 else 2670 DP_STATS_UPD(mon_peer, tx.mu_group_id[ppdu->mu_group_id], 2671 (ppdu->user_pos + 1)); 2672 } 2673 2674 if (ppdu->ppdu_type == HTT_PPDU_STATS_PPDU_TYPE_MU_OFDMA || 2675 ppdu->ppdu_type == HTT_PPDU_STATS_PPDU_TYPE_MU_MIMO_OFDMA) { 2676 DP_STATS_UPD(mon_peer, tx.ru_tones, ppdu->ru_tones); 2677 DP_STATS_UPD(mon_peer, tx.ru_start, ppdu->ru_start); 2678 ru_index = dp_get_ru_index_frm_ru_tones(ppdu->ru_tones); 2679 if (ru_index != RU_INDEX_MAX) { 2680 DP_STATS_INC(mon_peer, tx.ru_loc[ru_index].num_msdu, 2681 num_msdu); 2682 DP_STATS_INC(mon_peer, tx.ru_loc[ru_index].num_mpdu, 2683 num_mpdu); 2684 DP_STATS_INC(mon_peer, tx.ru_loc[ru_index].mpdu_tried, 2685 mpdu_tried); 2686 } 2687 } 2688 2689 /* 2690 * All failed mpdu will be retried, so incrementing 2691 * retries mpdu based on mpdu failed. Even for 2692 * ack failure i.e for long retries we get 2693 * mpdu failed equal mpdu tried. 2694 */ 2695 DP_STATS_INC(mon_peer, tx.retries, mpdu_failed); 2696 2697 DP_STATS_INC(mon_peer, tx.transmit_type[ppdu->ppdu_type].num_msdu, 2698 num_msdu); 2699 DP_STATS_INC(mon_peer, tx.transmit_type[ppdu->ppdu_type].num_mpdu, 2700 num_mpdu); 2701 DP_STATS_INC(mon_peer, tx.transmit_type[ppdu->ppdu_type].mpdu_tried, 2702 mpdu_tried); 2703 2704 DP_STATS_UPD(mon_peer, tx.tx_rate, ppdu->tx_rate); 2705 DP_STATS_INC(mon_peer, tx.sgi_count[ppdu->gi], num_msdu); 2706 DP_STATS_INC(mon_peer, tx.bw[ppdu->bw], num_msdu); 2707 DP_STATS_INC(mon_peer, tx.nss[ppdu->nss], num_msdu); 2708 if (ppdu->tid < CDP_DATA_TID_MAX) 2709 DP_STATS_INC(mon_peer, tx.wme_ac_type[TID_TO_WME_AC(ppdu->tid)], 2710 num_msdu); 2711 DP_STATS_INCC(mon_peer, tx.stbc, num_msdu, ppdu->stbc); 2712 DP_STATS_INCC(mon_peer, tx.ldpc, num_msdu, ppdu->ldpc); 2713 if (!(ppdu->is_mcast) && ppdu->ack_rssi_valid) 2714 DP_STATS_UPD(mon_peer, tx.last_ack_rssi, ack_rssi); 2715 2716 DP_STATS_INCC(mon_peer, 2717 tx.pkt_type[preamble].mcs_count[MAX_MCS - 1], num_msdu, 2718 ((mcs >= MAX_MCS_11A) && (preamble == DOT11_A))); 2719 DP_STATS_INCC(mon_peer, 2720 tx.pkt_type[preamble].mcs_count[mcs], num_msdu, 2721 ((mcs < MAX_MCS_11A) && (preamble == DOT11_A))); 2722 DP_STATS_INCC(mon_peer, 2723 tx.pkt_type[preamble].mcs_count[MAX_MCS - 1], num_msdu, 2724 ((mcs >= MAX_MCS_11B) && (preamble == DOT11_B))); 2725 DP_STATS_INCC(mon_peer, 2726 tx.pkt_type[preamble].mcs_count[mcs], num_msdu, 2727 ((mcs < (MAX_MCS_11B)) && (preamble == DOT11_B))); 2728 DP_STATS_INCC(mon_peer, 2729 tx.pkt_type[preamble].mcs_count[MAX_MCS - 1], num_msdu, 2730 ((mcs >= MAX_MCS_11A) && (preamble == DOT11_N))); 2731 DP_STATS_INCC(mon_peer, 2732 tx.pkt_type[preamble].mcs_count[mcs], num_msdu, 2733 ((mcs < MAX_MCS_11A) && (preamble == DOT11_N))); 2734 DP_STATS_INCC(mon_peer, 2735 tx.pkt_type[preamble].mcs_count[MAX_MCS - 1], num_msdu, 2736 ((mcs >= MAX_MCS_11AC) && (preamble == DOT11_AC))); 2737 DP_STATS_INCC(mon_peer, 2738 tx.pkt_type[preamble].mcs_count[mcs], num_msdu, 2739 ((mcs < MAX_MCS_11AC) && (preamble == DOT11_AC))); 2740 DP_STATS_INCC(mon_peer, 2741 tx.pkt_type[preamble].mcs_count[MAX_MCS - 1], num_msdu, 2742 ((mcs >= MAX_MCS_11AX) && (preamble == DOT11_AX))); 2743 DP_STATS_INCC(mon_peer, 2744 tx.pkt_type[preamble].mcs_count[mcs], num_msdu, 2745 ((mcs < MAX_MCS_11AX) && (preamble == DOT11_AX))); 2746 DP_STATS_INCC(mon_peer, tx.ampdu_cnt, num_mpdu, ppdu->is_ampdu); 2747 DP_STATS_INCC(mon_peer, tx.non_ampdu_cnt, num_mpdu, !(ppdu->is_ampdu)); 2748 DP_STATS_INCC(mon_peer, tx.pream_punct_cnt, 1, ppdu->pream_punct); 2749 DP_STATS_INC(mon_peer, tx.tx_ppdus, 1); 2750 DP_STATS_INC(mon_peer, tx.tx_mpdus_success, num_mpdu); 2751 DP_STATS_INC(mon_peer, tx.tx_mpdus_tried, mpdu_tried); 2752 2753 mon_ops = dp_mon_ops_get(pdev->soc); 2754 if (mon_ops && mon_ops->mon_tx_stats_update) 2755 mon_ops->mon_tx_stats_update(mon_peer, ppdu); 2756 2757 dp_peer_stats_notify(pdev, peer); 2758 2759 dp_send_stats_event(pdev, peer, ppdu->peer_id); 2760 } 2761 2762 /* 2763 * dp_get_ppdu_info_user_index: Find and allocate a per-user descriptor for a PPDU, 2764 * if a new peer id arrives in a PPDU 2765 * pdev: DP pdev handle 2766 * @peer_id : peer unique identifier 2767 * @ppdu_info: per ppdu tlv structure 2768 * 2769 * return:user index to be populated 2770 */ 2771 static uint8_t dp_get_ppdu_info_user_index(struct dp_pdev *pdev, 2772 uint16_t peer_id, 2773 struct ppdu_info *ppdu_info) 2774 { 2775 uint8_t user_index = 0; 2776 struct cdp_tx_completion_ppdu *ppdu_desc; 2777 struct cdp_tx_completion_ppdu_user *ppdu_user_desc; 2778 2779 ppdu_desc = 2780 (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf); 2781 2782 while ((user_index + 1) <= ppdu_info->last_user) { 2783 ppdu_user_desc = &ppdu_desc->user[user_index]; 2784 if (ppdu_user_desc->peer_id != peer_id) { 2785 user_index++; 2786 continue; 2787 } else { 2788 /* Max users possible is 8 so user array index should 2789 * not exceed 7 2790 */ 2791 qdf_assert_always(user_index <= (ppdu_desc->max_users - 1)); 2792 return user_index; 2793 } 2794 } 2795 2796 ppdu_info->last_user++; 2797 /* Max users possible is 8 so last user should not exceed 8 */ 2798 qdf_assert_always(ppdu_info->last_user <= ppdu_desc->max_users); 2799 return ppdu_info->last_user - 1; 2800 } 2801 2802 /* 2803 * dp_process_ppdu_stats_common_tlv: Process htt_ppdu_stats_common_tlv 2804 * pdev: DP pdev handle 2805 * @tag_buf: buffer containing the tlv htt_ppdu_stats_common_tlv 2806 * @ppdu_info: per ppdu tlv structure 2807 * 2808 * return:void 2809 */ 2810 static void 2811 dp_process_ppdu_stats_common_tlv(struct dp_pdev *pdev, 2812 uint32_t *tag_buf, 2813 struct ppdu_info *ppdu_info) 2814 { 2815 uint16_t frame_type; 2816 uint16_t frame_ctrl; 2817 uint16_t freq; 2818 struct dp_soc *soc = NULL; 2819 struct cdp_tx_completion_ppdu *ppdu_desc = NULL; 2820 uint64_t ppdu_start_timestamp; 2821 uint32_t *start_tag_buf; 2822 2823 start_tag_buf = tag_buf; 2824 ppdu_desc = 2825 (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf); 2826 2827 ppdu_desc->ppdu_id = ppdu_info->ppdu_id; 2828 2829 tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(RING_ID_SCH_CMD_ID); 2830 ppdu_info->sched_cmdid = 2831 HTT_PPDU_STATS_COMMON_TLV_SCH_CMDID_GET(*tag_buf); 2832 ppdu_desc->num_users = 2833 HTT_PPDU_STATS_COMMON_TLV_NUM_USERS_GET(*tag_buf); 2834 2835 qdf_assert_always(ppdu_desc->num_users <= ppdu_desc->max_users); 2836 2837 tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(QTYPE_FRM_TYPE); 2838 frame_type = HTT_PPDU_STATS_COMMON_TLV_FRM_TYPE_GET(*tag_buf); 2839 ppdu_desc->htt_frame_type = frame_type; 2840 2841 frame_ctrl = ppdu_desc->frame_ctrl; 2842 2843 ppdu_desc->bar_ppdu_id = ppdu_info->ppdu_id; 2844 2845 switch (frame_type) { 2846 case HTT_STATS_FTYPE_TIDQ_DATA_SU: 2847 case HTT_STATS_FTYPE_TIDQ_DATA_MU: 2848 case HTT_STATS_FTYPE_SGEN_QOS_NULL: 2849 /* 2850 * for management packet, frame type come as DATA_SU 2851 * need to check frame_ctrl before setting frame_type 2852 */ 2853 if (HTT_GET_FRAME_CTRL_TYPE(frame_ctrl) <= FRAME_CTRL_TYPE_CTRL) 2854 ppdu_desc->frame_type = CDP_PPDU_FTYPE_CTRL; 2855 else 2856 ppdu_desc->frame_type = CDP_PPDU_FTYPE_DATA; 2857 break; 2858 case HTT_STATS_FTYPE_SGEN_MU_BAR: 2859 case HTT_STATS_FTYPE_SGEN_BAR: 2860 ppdu_desc->frame_type = CDP_PPDU_FTYPE_BAR; 2861 break; 2862 default: 2863 ppdu_desc->frame_type = CDP_PPDU_FTYPE_CTRL; 2864 break; 2865 } 2866 2867 tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(FES_DUR_US); 2868 ppdu_desc->tx_duration = *tag_buf; 2869 2870 tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(START_TSTMP_L32_US); 2871 ppdu_desc->ppdu_start_timestamp = *tag_buf; 2872 2873 tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(CHAN_MHZ_PHY_MODE); 2874 freq = HTT_PPDU_STATS_COMMON_TLV_CHAN_MHZ_GET(*tag_buf); 2875 if (freq != ppdu_desc->channel) { 2876 soc = pdev->soc; 2877 ppdu_desc->channel = freq; 2878 pdev->operating_channel.freq = freq; 2879 if (soc && soc->cdp_soc.ol_ops->freq_to_channel) 2880 pdev->operating_channel.num = 2881 soc->cdp_soc.ol_ops->freq_to_channel(soc->ctrl_psoc, 2882 pdev->pdev_id, 2883 freq); 2884 2885 if (soc && soc->cdp_soc.ol_ops->freq_to_band) 2886 pdev->operating_channel.band = 2887 soc->cdp_soc.ol_ops->freq_to_band(soc->ctrl_psoc, 2888 pdev->pdev_id, 2889 freq); 2890 } 2891 2892 ppdu_desc->phy_mode = HTT_PPDU_STATS_COMMON_TLV_PHY_MODE_GET(*tag_buf); 2893 2894 tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(RESV_NUM_UL_BEAM); 2895 ppdu_desc->phy_ppdu_tx_time_us = 2896 HTT_PPDU_STATS_COMMON_TLV_PHY_PPDU_TX_TIME_US_GET(*tag_buf); 2897 ppdu_desc->beam_change = 2898 HTT_PPDU_STATS_COMMON_TLV_BEAM_CHANGE_GET(*tag_buf); 2899 ppdu_desc->doppler = 2900 HTT_PPDU_STATS_COMMON_TLV_DOPPLER_INDICATION_GET(*tag_buf); 2901 ppdu_desc->spatial_reuse = 2902 HTT_PPDU_STATS_COMMON_TLV_SPATIAL_REUSE_GET(*tag_buf); 2903 2904 dp_tx_capture_htt_frame_counter(pdev, frame_type); 2905 2906 tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(START_TSTMP_U32_US); 2907 ppdu_start_timestamp = *tag_buf; 2908 ppdu_desc->ppdu_start_timestamp |= ((ppdu_start_timestamp << 2909 HTT_SHIFT_UPPER_TIMESTAMP) & 2910 HTT_MASK_UPPER_TIMESTAMP); 2911 2912 ppdu_desc->ppdu_end_timestamp = ppdu_desc->ppdu_start_timestamp + 2913 ppdu_desc->tx_duration; 2914 /* Ack time stamp is same as end time stamp*/ 2915 ppdu_desc->ack_timestamp = ppdu_desc->ppdu_end_timestamp; 2916 2917 ppdu_desc->ppdu_end_timestamp = ppdu_desc->ppdu_start_timestamp + 2918 ppdu_desc->tx_duration; 2919 2920 ppdu_desc->bar_ppdu_start_timestamp = ppdu_desc->ppdu_start_timestamp; 2921 ppdu_desc->bar_ppdu_end_timestamp = ppdu_desc->ppdu_end_timestamp; 2922 ppdu_desc->bar_tx_duration = ppdu_desc->tx_duration; 2923 2924 /* Ack time stamp is same as end time stamp*/ 2925 ppdu_desc->ack_timestamp = ppdu_desc->ppdu_end_timestamp; 2926 2927 tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(BSSCOLOR_OBSS_PSR); 2928 ppdu_desc->bss_color = 2929 HTT_PPDU_STATS_COMMON_TLV_BSS_COLOR_ID_GET(*tag_buf); 2930 } 2931 2932 /* 2933 * dp_process_ppdu_stats_user_common_tlv: Process ppdu_stats_user_common 2934 * @tag_buf: buffer containing the tlv htt_ppdu_stats_user_common_tlv 2935 * @ppdu_info: per ppdu tlv structure 2936 * 2937 * return:void 2938 */ 2939 static void dp_process_ppdu_stats_user_common_tlv( 2940 struct dp_pdev *pdev, uint32_t *tag_buf, 2941 struct ppdu_info *ppdu_info) 2942 { 2943 uint16_t peer_id; 2944 struct cdp_tx_completion_ppdu *ppdu_desc; 2945 struct cdp_tx_completion_ppdu_user *ppdu_user_desc; 2946 uint8_t curr_user_index = 0; 2947 struct dp_peer *peer; 2948 struct dp_vdev *vdev; 2949 uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf); 2950 2951 ppdu_desc = 2952 (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf); 2953 2954 tag_buf++; 2955 peer_id = HTT_PPDU_STATS_USER_RATE_TLV_SW_PEER_ID_GET(*tag_buf); 2956 2957 curr_user_index = 2958 dp_get_ppdu_info_user_index(pdev, 2959 peer_id, ppdu_info); 2960 ppdu_user_desc = &ppdu_desc->user[curr_user_index]; 2961 ppdu_user_desc->tlv_bitmap |= (1 << tlv_type); 2962 2963 ppdu_desc->vdev_id = 2964 HTT_PPDU_STATS_USER_COMMON_TLV_VAP_ID_GET(*tag_buf); 2965 2966 ppdu_user_desc->peer_id = peer_id; 2967 2968 tag_buf++; 2969 2970 if (HTT_PPDU_STATS_USER_COMMON_TLV_DELAYED_BA_GET(*tag_buf)) { 2971 ppdu_user_desc->delayed_ba = 1; 2972 ppdu_desc->delayed_ba = 1; 2973 } 2974 2975 if (HTT_PPDU_STATS_USER_COMMON_TLV_MCAST_GET(*tag_buf)) { 2976 ppdu_user_desc->is_mcast = true; 2977 ppdu_user_desc->mpdu_tried_mcast = 2978 HTT_PPDU_STATS_USER_COMMON_TLV_MPDUS_TRIED_GET(*tag_buf); 2979 ppdu_user_desc->num_mpdu = ppdu_user_desc->mpdu_tried_mcast; 2980 } else { 2981 ppdu_user_desc->mpdu_tried_ucast = 2982 HTT_PPDU_STATS_USER_COMMON_TLV_MPDUS_TRIED_GET(*tag_buf); 2983 } 2984 2985 ppdu_user_desc->is_seq_num_valid = 2986 HTT_PPDU_STATS_USER_COMMON_TLV_IS_SQNUM_VALID_IN_BUFFER_GET(*tag_buf); 2987 tag_buf++; 2988 2989 ppdu_user_desc->qos_ctrl = 2990 HTT_PPDU_STATS_USER_COMMON_TLV_QOS_CTRL_GET(*tag_buf); 2991 ppdu_user_desc->frame_ctrl = 2992 HTT_PPDU_STATS_USER_COMMON_TLV_FRAME_CTRL_GET(*tag_buf); 2993 ppdu_desc->frame_ctrl = ppdu_user_desc->frame_ctrl; 2994 2995 if (ppdu_user_desc->delayed_ba) 2996 ppdu_user_desc->mpdu_success = 0; 2997 2998 tag_buf += 3; 2999 3000 if (HTT_PPDU_STATS_IS_OPAQUE_VALID_GET(*tag_buf)) { 3001 ppdu_user_desc->ppdu_cookie = 3002 HTT_PPDU_STATS_HOST_OPAQUE_COOKIE_GET(*tag_buf); 3003 ppdu_user_desc->is_ppdu_cookie_valid = 1; 3004 } 3005 3006 /* returning earlier causes other feilds unpopulated */ 3007 if (peer_id == DP_SCAN_PEER_ID) { 3008 vdev = dp_vdev_get_ref_by_id(pdev->soc, ppdu_desc->vdev_id, 3009 DP_MOD_ID_TX_PPDU_STATS); 3010 if (!vdev) 3011 return; 3012 qdf_mem_copy(ppdu_user_desc->mac_addr, vdev->mac_addr.raw, 3013 QDF_MAC_ADDR_SIZE); 3014 dp_vdev_unref_delete(pdev->soc, vdev, DP_MOD_ID_TX_PPDU_STATS); 3015 } else { 3016 peer = dp_peer_get_ref_by_id(pdev->soc, peer_id, 3017 DP_MOD_ID_TX_PPDU_STATS); 3018 if (!peer) { 3019 /* 3020 * fw sends peer_id which is about to removed but 3021 * it was already removed in host. 3022 * eg: for disassoc, fw send ppdu stats 3023 * with peer id equal to previously associated 3024 * peer's peer_id but it was removed 3025 */ 3026 vdev = dp_vdev_get_ref_by_id(pdev->soc, 3027 ppdu_desc->vdev_id, 3028 DP_MOD_ID_TX_PPDU_STATS); 3029 if (!vdev) 3030 return; 3031 qdf_mem_copy(ppdu_user_desc->mac_addr, 3032 vdev->mac_addr.raw, QDF_MAC_ADDR_SIZE); 3033 dp_vdev_unref_delete(pdev->soc, vdev, 3034 DP_MOD_ID_TX_PPDU_STATS); 3035 return; 3036 } 3037 qdf_mem_copy(ppdu_user_desc->mac_addr, 3038 peer->mac_addr.raw, QDF_MAC_ADDR_SIZE); 3039 dp_peer_unref_delete(peer, DP_MOD_ID_TX_PPDU_STATS); 3040 } 3041 } 3042 3043 /** 3044 * dp_process_ppdu_stats_user_rate_tlv() - Process htt_ppdu_stats_user_rate_tlv 3045 * @pdev: DP pdev handle 3046 * @tag_buf: T2H message buffer carrying the user rate TLV 3047 * @ppdu_info: per ppdu tlv structure 3048 * 3049 * return:void 3050 */ 3051 static void 3052 dp_process_ppdu_stats_user_rate_tlv(struct dp_pdev *pdev, 3053 uint32_t *tag_buf, 3054 struct ppdu_info *ppdu_info) 3055 { 3056 uint16_t peer_id; 3057 struct cdp_tx_completion_ppdu *ppdu_desc; 3058 struct cdp_tx_completion_ppdu_user *ppdu_user_desc; 3059 uint8_t curr_user_index = 0; 3060 struct dp_vdev *vdev; 3061 uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf); 3062 uint8_t bw; 3063 3064 ppdu_desc = 3065 (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf); 3066 3067 tag_buf++; 3068 peer_id = HTT_PPDU_STATS_USER_RATE_TLV_SW_PEER_ID_GET(*tag_buf); 3069 3070 curr_user_index = 3071 dp_get_ppdu_info_user_index(pdev, 3072 peer_id, ppdu_info); 3073 ppdu_user_desc = &ppdu_desc->user[curr_user_index]; 3074 ppdu_user_desc->tlv_bitmap |= (1 << tlv_type); 3075 if (peer_id == DP_SCAN_PEER_ID) { 3076 vdev = dp_vdev_get_ref_by_id(pdev->soc, ppdu_desc->vdev_id, 3077 DP_MOD_ID_TX_PPDU_STATS); 3078 if (!vdev) 3079 return; 3080 dp_vdev_unref_delete(pdev->soc, vdev, 3081 DP_MOD_ID_TX_PPDU_STATS); 3082 } 3083 ppdu_user_desc->peer_id = peer_id; 3084 3085 ppdu_user_desc->tid = 3086 HTT_PPDU_STATS_USER_RATE_TLV_TID_NUM_GET(*tag_buf); 3087 3088 tag_buf += 1; 3089 3090 ppdu_user_desc->user_pos = 3091 HTT_PPDU_STATS_USER_RATE_TLV_USER_POS_GET(*tag_buf); 3092 ppdu_user_desc->mu_group_id = 3093 HTT_PPDU_STATS_USER_RATE_TLV_MU_GROUPID_GET(*tag_buf); 3094 3095 tag_buf += 1; 3096 3097 ppdu_user_desc->ru_start = 3098 HTT_PPDU_STATS_USER_RATE_TLV_RU_START_GET(*tag_buf); 3099 ppdu_user_desc->ru_tones = 3100 (HTT_PPDU_STATS_USER_RATE_TLV_RU_END_GET(*tag_buf) - 3101 HTT_PPDU_STATS_USER_RATE_TLV_RU_START_GET(*tag_buf)) + 1; 3102 ppdu_desc->usr_ru_tones_sum += ppdu_user_desc->ru_tones; 3103 3104 tag_buf += 2; 3105 3106 ppdu_user_desc->ppdu_type = 3107 HTT_PPDU_STATS_USER_RATE_TLV_PPDU_TYPE_GET(*tag_buf); 3108 3109 tag_buf++; 3110 ppdu_user_desc->tx_rate = *tag_buf; 3111 3112 ppdu_user_desc->ltf_size = 3113 HTT_PPDU_STATS_USER_RATE_TLV_LTF_SIZE_GET(*tag_buf); 3114 ppdu_user_desc->stbc = 3115 HTT_PPDU_STATS_USER_RATE_TLV_STBC_GET(*tag_buf); 3116 ppdu_user_desc->he_re = 3117 HTT_PPDU_STATS_USER_RATE_TLV_HE_RE_GET(*tag_buf); 3118 ppdu_user_desc->txbf = 3119 HTT_PPDU_STATS_USER_RATE_TLV_TXBF_GET(*tag_buf); 3120 bw = HTT_PPDU_STATS_USER_RATE_TLV_BW_GET(*tag_buf); 3121 /* Align bw value as per host data structures */ 3122 if (bw == HTT_PPDU_STATS_BANDWIDTH_320MHZ) 3123 ppdu_user_desc->bw = bw - 3; 3124 else 3125 ppdu_user_desc->bw = bw - 2; 3126 ppdu_user_desc->nss = HTT_PPDU_STATS_USER_RATE_TLV_NSS_GET(*tag_buf); 3127 ppdu_desc->usr_nss_sum += ppdu_user_desc->nss; 3128 ppdu_user_desc->mcs = HTT_PPDU_STATS_USER_RATE_TLV_MCS_GET(*tag_buf); 3129 ppdu_user_desc->preamble = 3130 HTT_PPDU_STATS_USER_RATE_TLV_PREAMBLE_GET(*tag_buf); 3131 ppdu_user_desc->gi = HTT_PPDU_STATS_USER_RATE_TLV_GI_GET(*tag_buf); 3132 ppdu_user_desc->dcm = HTT_PPDU_STATS_USER_RATE_TLV_DCM_GET(*tag_buf); 3133 ppdu_user_desc->ldpc = HTT_PPDU_STATS_USER_RATE_TLV_LDPC_GET(*tag_buf); 3134 } 3135 3136 /* 3137 * dp_process_ppdu_stats_enq_mpdu_bitmap_64_tlv: Process 3138 * htt_ppdu_stats_enq_mpdu_bitmap_64_tlv 3139 * pdev: DP PDEV handle 3140 * @tag_buf: buffer containing the tlv htt_ppdu_stats_enq_mpdu_bitmap_64_tlv 3141 * @ppdu_info: per ppdu tlv structure 3142 * 3143 * return:void 3144 */ 3145 static void dp_process_ppdu_stats_enq_mpdu_bitmap_64_tlv( 3146 struct dp_pdev *pdev, uint32_t *tag_buf, 3147 struct ppdu_info *ppdu_info) 3148 { 3149 htt_ppdu_stats_enq_mpdu_bitmap_64_tlv *dp_stats_buf = 3150 (htt_ppdu_stats_enq_mpdu_bitmap_64_tlv *)tag_buf; 3151 3152 struct cdp_tx_completion_ppdu *ppdu_desc; 3153 struct cdp_tx_completion_ppdu_user *ppdu_user_desc; 3154 uint8_t curr_user_index = 0; 3155 uint16_t peer_id; 3156 uint32_t size = CDP_BA_64_BIT_MAP_SIZE_DWORDS; 3157 uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf); 3158 3159 ppdu_desc = 3160 (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf); 3161 3162 tag_buf++; 3163 3164 peer_id = 3165 HTT_PPDU_STATS_ENQ_MPDU_BITMAP_TLV_SW_PEER_ID_GET(*tag_buf); 3166 3167 curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info); 3168 ppdu_user_desc = &ppdu_desc->user[curr_user_index]; 3169 ppdu_user_desc->tlv_bitmap |= (1 << tlv_type); 3170 ppdu_user_desc->peer_id = peer_id; 3171 3172 ppdu_user_desc->start_seq = dp_stats_buf->start_seq; 3173 qdf_mem_copy(&ppdu_user_desc->enq_bitmap, &dp_stats_buf->enq_bitmap, 3174 sizeof(uint32_t) * CDP_BA_64_BIT_MAP_SIZE_DWORDS); 3175 3176 dp_process_ppdu_stats_update_failed_bitmap(pdev, 3177 (void *)ppdu_user_desc, 3178 ppdu_info->ppdu_id, 3179 size); 3180 } 3181 3182 /* 3183 * dp_process_ppdu_stats_enq_mpdu_bitmap_256_tlv: Process 3184 * htt_ppdu_stats_enq_mpdu_bitmap_256_tlv 3185 * soc: DP SOC handle 3186 * @tag_buf: buffer containing the tlv htt_ppdu_stats_enq_mpdu_bitmap_256_tlv 3187 * @ppdu_info: per ppdu tlv structure 3188 * 3189 * return:void 3190 */ 3191 static void dp_process_ppdu_stats_enq_mpdu_bitmap_256_tlv( 3192 struct dp_pdev *pdev, uint32_t *tag_buf, 3193 struct ppdu_info *ppdu_info) 3194 { 3195 htt_ppdu_stats_enq_mpdu_bitmap_256_tlv *dp_stats_buf = 3196 (htt_ppdu_stats_enq_mpdu_bitmap_256_tlv *)tag_buf; 3197 3198 struct cdp_tx_completion_ppdu *ppdu_desc; 3199 struct cdp_tx_completion_ppdu_user *ppdu_user_desc; 3200 uint8_t curr_user_index = 0; 3201 uint16_t peer_id; 3202 uint32_t size = CDP_BA_256_BIT_MAP_SIZE_DWORDS; 3203 uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf); 3204 3205 ppdu_desc = 3206 (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf); 3207 3208 tag_buf++; 3209 3210 peer_id = 3211 HTT_PPDU_STATS_ENQ_MPDU_BITMAP_TLV_SW_PEER_ID_GET(*tag_buf); 3212 3213 curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info); 3214 ppdu_user_desc = &ppdu_desc->user[curr_user_index]; 3215 ppdu_user_desc->tlv_bitmap |= (1 << tlv_type); 3216 ppdu_user_desc->peer_id = peer_id; 3217 3218 ppdu_user_desc->start_seq = dp_stats_buf->start_seq; 3219 qdf_mem_copy(&ppdu_user_desc->enq_bitmap, &dp_stats_buf->enq_bitmap, 3220 sizeof(uint32_t) * CDP_BA_256_BIT_MAP_SIZE_DWORDS); 3221 3222 dp_process_ppdu_stats_update_failed_bitmap(pdev, 3223 (void *)ppdu_user_desc, 3224 ppdu_info->ppdu_id, 3225 size); 3226 } 3227 3228 /* 3229 * dp_process_ppdu_stats_user_cmpltn_common_tlv: Process 3230 * htt_ppdu_stats_user_cmpltn_common_tlv 3231 * soc: DP SOC handle 3232 * @tag_buf: buffer containing the tlv htt_ppdu_stats_user_cmpltn_common_tlv 3233 * @ppdu_info: per ppdu tlv structure 3234 * 3235 * return:void 3236 */ 3237 static void dp_process_ppdu_stats_user_cmpltn_common_tlv( 3238 struct dp_pdev *pdev, uint32_t *tag_buf, 3239 struct ppdu_info *ppdu_info) 3240 { 3241 uint16_t peer_id; 3242 struct cdp_tx_completion_ppdu *ppdu_desc; 3243 struct cdp_tx_completion_ppdu_user *ppdu_user_desc; 3244 uint8_t curr_user_index = 0; 3245 uint8_t bw_iter; 3246 htt_ppdu_stats_user_cmpltn_common_tlv *dp_stats_buf = 3247 (htt_ppdu_stats_user_cmpltn_common_tlv *)tag_buf; 3248 uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf); 3249 3250 ppdu_desc = 3251 (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf); 3252 3253 tag_buf++; 3254 peer_id = 3255 HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_SW_PEER_ID_GET(*tag_buf); 3256 3257 curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info); 3258 ppdu_user_desc = &ppdu_desc->user[curr_user_index]; 3259 ppdu_user_desc->tlv_bitmap |= (1 << tlv_type); 3260 ppdu_user_desc->peer_id = peer_id; 3261 3262 ppdu_user_desc->completion_status = 3263 HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_COMPLETION_STATUS_GET( 3264 *tag_buf); 3265 3266 ppdu_user_desc->tid = 3267 HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_TID_NUM_GET(*tag_buf); 3268 3269 tag_buf++; 3270 if (qdf_likely(ppdu_user_desc->completion_status == 3271 HTT_PPDU_STATS_USER_STATUS_OK)) { 3272 ppdu_desc->ack_rssi = dp_stats_buf->ack_rssi; 3273 ppdu_user_desc->usr_ack_rssi = dp_stats_buf->ack_rssi; 3274 ppdu_user_desc->ack_rssi_valid = 1; 3275 } else { 3276 ppdu_user_desc->ack_rssi_valid = 0; 3277 } 3278 3279 tag_buf++; 3280 3281 ppdu_user_desc->mpdu_success = 3282 HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_MPDU_SUCCESS_GET(*tag_buf); 3283 3284 ppdu_user_desc->mpdu_failed = 3285 HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_MPDU_TRIED_GET(*tag_buf) - 3286 ppdu_user_desc->mpdu_success; 3287 3288 tag_buf++; 3289 3290 ppdu_user_desc->long_retries = 3291 HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_LONG_RETRY_GET(*tag_buf); 3292 3293 ppdu_user_desc->short_retries = 3294 HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_SHORT_RETRY_GET(*tag_buf); 3295 ppdu_user_desc->retry_mpdus = 3296 ppdu_user_desc->long_retries + ppdu_user_desc->short_retries; 3297 3298 ppdu_user_desc->is_ampdu = 3299 HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_IS_AMPDU_GET(*tag_buf); 3300 ppdu_info->is_ampdu = ppdu_user_desc->is_ampdu; 3301 3302 ppdu_desc->resp_type = 3303 HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_RESP_TYPE_GET(*tag_buf); 3304 ppdu_desc->mprot_type = 3305 HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_MPROT_TYPE_GET(*tag_buf); 3306 ppdu_desc->rts_success = 3307 HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_RTS_SUCCESS_GET(*tag_buf); 3308 ppdu_desc->rts_failure = 3309 HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_RTS_FAILURE_GET(*tag_buf); 3310 ppdu_user_desc->pream_punct = 3311 HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_PREAM_PUNC_TX_GET(*tag_buf); 3312 3313 ppdu_info->compltn_common_tlv++; 3314 3315 /* 3316 * MU BAR may send request to n users but we may received ack only from 3317 * m users. To have count of number of users respond back, we have a 3318 * separate counter bar_num_users per PPDU that get increment for every 3319 * htt_ppdu_stats_user_cmpltn_common_tlv 3320 */ 3321 ppdu_desc->bar_num_users++; 3322 3323 tag_buf++; 3324 for (bw_iter = 0; bw_iter < CDP_RSSI_CHAIN_LEN; bw_iter++) { 3325 ppdu_user_desc->rssi_chain[bw_iter] = 3326 HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_CHAIN_RSSI_GET(*tag_buf); 3327 tag_buf++; 3328 } 3329 3330 ppdu_user_desc->sa_tx_antenna = 3331 HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_TX_ANTENNA_MASK_GET(*tag_buf); 3332 3333 tag_buf++; 3334 ppdu_user_desc->sa_is_training = 3335 HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_IS_TRAINING_GET(*tag_buf); 3336 if (ppdu_user_desc->sa_is_training) { 3337 ppdu_user_desc->sa_goodput = 3338 HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_PENDING_TRAINING_PKTS_GET(*tag_buf); 3339 } 3340 3341 tag_buf++; 3342 for (bw_iter = 0; bw_iter < CDP_NUM_SA_BW; bw_iter++) { 3343 ppdu_user_desc->sa_max_rates[bw_iter] = 3344 HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_MAX_RATES_GET(tag_buf[bw_iter]); 3345 } 3346 3347 tag_buf += CDP_NUM_SA_BW; 3348 ppdu_user_desc->current_rate_per = 3349 HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_CURRENT_RATE_PER_GET(*tag_buf); 3350 } 3351 3352 /* 3353 * dp_process_ppdu_stats_user_compltn_ba_bitmap_64_tlv: Process 3354 * htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv 3355 * pdev: DP PDEV handle 3356 * @tag_buf: buffer containing the htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv 3357 * @ppdu_info: per ppdu tlv structure 3358 * 3359 * return:void 3360 */ 3361 static void dp_process_ppdu_stats_user_compltn_ba_bitmap_64_tlv( 3362 struct dp_pdev *pdev, uint32_t *tag_buf, 3363 struct ppdu_info *ppdu_info) 3364 { 3365 htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv *dp_stats_buf = 3366 (htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv *)tag_buf; 3367 struct cdp_tx_completion_ppdu_user *ppdu_user_desc; 3368 struct cdp_tx_completion_ppdu *ppdu_desc; 3369 uint8_t curr_user_index = 0; 3370 uint16_t peer_id; 3371 uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf); 3372 3373 ppdu_desc = 3374 (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf); 3375 3376 tag_buf++; 3377 3378 peer_id = 3379 HTT_PPDU_STATS_USER_CMPLTN_BA_BITMAP_TLV_SW_PEER_ID_GET(*tag_buf); 3380 3381 curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info); 3382 ppdu_user_desc = &ppdu_desc->user[curr_user_index]; 3383 ppdu_user_desc->tlv_bitmap |= (1 << tlv_type); 3384 ppdu_user_desc->peer_id = peer_id; 3385 3386 ppdu_user_desc->ba_seq_no = dp_stats_buf->ba_seq_no; 3387 qdf_mem_copy(&ppdu_user_desc->ba_bitmap, &dp_stats_buf->ba_bitmap, 3388 sizeof(uint32_t) * CDP_BA_64_BIT_MAP_SIZE_DWORDS); 3389 ppdu_user_desc->ba_size = CDP_BA_64_BIT_MAP_SIZE_DWORDS * 32; 3390 } 3391 3392 /* 3393 * dp_process_ppdu_stats_user_compltn_ba_bitmap_256_tlv: Process 3394 * htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv 3395 * pdev: DP PDEV handle 3396 * @tag_buf: buffer containing the htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv 3397 * @ppdu_info: per ppdu tlv structure 3398 * 3399 * return:void 3400 */ 3401 static void dp_process_ppdu_stats_user_compltn_ba_bitmap_256_tlv( 3402 struct dp_pdev *pdev, uint32_t *tag_buf, 3403 struct ppdu_info *ppdu_info) 3404 { 3405 htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv *dp_stats_buf = 3406 (htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv *)tag_buf; 3407 struct cdp_tx_completion_ppdu_user *ppdu_user_desc; 3408 struct cdp_tx_completion_ppdu *ppdu_desc; 3409 uint8_t curr_user_index = 0; 3410 uint16_t peer_id; 3411 uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf); 3412 3413 ppdu_desc = 3414 (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf); 3415 3416 tag_buf++; 3417 3418 peer_id = 3419 HTT_PPDU_STATS_USER_CMPLTN_BA_BITMAP_TLV_SW_PEER_ID_GET(*tag_buf); 3420 3421 curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info); 3422 ppdu_user_desc = &ppdu_desc->user[curr_user_index]; 3423 ppdu_user_desc->tlv_bitmap |= (1 << tlv_type); 3424 ppdu_user_desc->peer_id = peer_id; 3425 3426 ppdu_user_desc->ba_seq_no = dp_stats_buf->ba_seq_no; 3427 qdf_mem_copy(&ppdu_user_desc->ba_bitmap, &dp_stats_buf->ba_bitmap, 3428 sizeof(uint32_t) * CDP_BA_256_BIT_MAP_SIZE_DWORDS); 3429 ppdu_user_desc->ba_size = CDP_BA_256_BIT_MAP_SIZE_DWORDS * 32; 3430 } 3431 3432 /* 3433 * dp_process_ppdu_stats_user_compltn_ack_ba_status_tlv: Process 3434 * htt_ppdu_stats_user_compltn_ack_ba_status_tlv 3435 * pdev: DP PDE handle 3436 * @tag_buf: buffer containing the htt_ppdu_stats_user_compltn_ack_ba_status_tlv 3437 * @ppdu_info: per ppdu tlv structure 3438 * 3439 * return:void 3440 */ 3441 static void dp_process_ppdu_stats_user_compltn_ack_ba_status_tlv( 3442 struct dp_pdev *pdev, uint32_t *tag_buf, 3443 struct ppdu_info *ppdu_info) 3444 { 3445 uint16_t peer_id; 3446 struct cdp_tx_completion_ppdu *ppdu_desc; 3447 struct cdp_tx_completion_ppdu_user *ppdu_user_desc; 3448 uint8_t curr_user_index = 0; 3449 uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf); 3450 3451 ppdu_desc = 3452 (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf); 3453 3454 tag_buf += 2; 3455 peer_id = 3456 HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_SW_PEER_ID_GET(*tag_buf); 3457 3458 curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info); 3459 ppdu_user_desc = &ppdu_desc->user[curr_user_index]; 3460 ppdu_user_desc->tlv_bitmap |= (1 << tlv_type); 3461 if (!ppdu_user_desc->ack_ba_tlv) { 3462 ppdu_user_desc->ack_ba_tlv = 1; 3463 } else { 3464 pdev->stats.ack_ba_comes_twice++; 3465 return; 3466 } 3467 3468 ppdu_user_desc->peer_id = peer_id; 3469 3470 tag_buf++; 3471 /* not to update ppdu_desc->tid from this TLV */ 3472 ppdu_user_desc->num_mpdu = 3473 HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_NUM_MPDU_GET(*tag_buf); 3474 3475 ppdu_user_desc->num_msdu = 3476 HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_NUM_MSDU_GET(*tag_buf); 3477 3478 ppdu_user_desc->success_msdus = ppdu_user_desc->num_msdu; 3479 3480 tag_buf++; 3481 ppdu_user_desc->start_seq = 3482 HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_START_SEQ_GET( 3483 *tag_buf); 3484 3485 tag_buf++; 3486 ppdu_user_desc->success_bytes = *tag_buf; 3487 3488 /* increase ack ba tlv counter on successful mpdu */ 3489 if (ppdu_user_desc->num_mpdu) 3490 ppdu_info->ack_ba_tlv++; 3491 3492 if (ppdu_user_desc->ba_size == 0) { 3493 ppdu_user_desc->ba_seq_no = ppdu_user_desc->start_seq; 3494 ppdu_user_desc->ba_bitmap[0] = 1; 3495 ppdu_user_desc->ba_size = 1; 3496 } 3497 } 3498 3499 /* 3500 * dp_process_ppdu_stats_user_common_array_tlv: Process 3501 * htt_ppdu_stats_user_common_array_tlv 3502 * pdev: DP PDEV handle 3503 * @tag_buf: buffer containing the htt_ppdu_stats_user_compltn_ack_ba_status_tlv 3504 * @ppdu_info: per ppdu tlv structure 3505 * 3506 * return:void 3507 */ 3508 static void dp_process_ppdu_stats_user_common_array_tlv( 3509 struct dp_pdev *pdev, uint32_t *tag_buf, 3510 struct ppdu_info *ppdu_info) 3511 { 3512 uint32_t peer_id; 3513 struct cdp_tx_completion_ppdu *ppdu_desc; 3514 struct cdp_tx_completion_ppdu_user *ppdu_user_desc; 3515 uint8_t curr_user_index = 0; 3516 struct htt_tx_ppdu_stats_info *dp_stats_buf; 3517 uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf); 3518 3519 ppdu_desc = 3520 (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf); 3521 3522 tag_buf++; 3523 dp_stats_buf = (struct htt_tx_ppdu_stats_info *)tag_buf; 3524 tag_buf += 3; 3525 peer_id = 3526 HTT_PPDU_STATS_ARRAY_ITEM_TLV_PEERID_GET(*tag_buf); 3527 3528 if (!dp_peer_find_by_id_valid(pdev->soc, peer_id)) { 3529 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 3530 "Invalid peer"); 3531 return; 3532 } 3533 3534 curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info); 3535 3536 ppdu_user_desc = &ppdu_desc->user[curr_user_index]; 3537 ppdu_user_desc->tlv_bitmap |= (1 << tlv_type); 3538 3539 ppdu_user_desc->retry_bytes = dp_stats_buf->tx_retry_bytes; 3540 ppdu_user_desc->failed_bytes = dp_stats_buf->tx_failed_bytes; 3541 3542 tag_buf++; 3543 3544 ppdu_user_desc->success_msdus = 3545 HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_SUCC_MSDUS_GET(*tag_buf); 3546 ppdu_user_desc->retry_bytes = 3547 HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_RETRY_MSDUS_GET(*tag_buf); 3548 tag_buf++; 3549 ppdu_user_desc->failed_msdus = 3550 HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_FAILED_MSDUS_GET(*tag_buf); 3551 } 3552 3553 /* 3554 * dp_process_ppdu_stats_flush_tlv: Process 3555 * htt_ppdu_stats_flush_tlv 3556 * @pdev: DP PDEV handle 3557 * @tag_buf: buffer containing the htt_ppdu_stats_flush_tlv 3558 * @ppdu_info: per ppdu tlv structure 3559 * 3560 * return:void 3561 */ 3562 static void 3563 dp_process_ppdu_stats_user_compltn_flush_tlv(struct dp_pdev *pdev, 3564 uint32_t *tag_buf, 3565 struct ppdu_info *ppdu_info) 3566 { 3567 struct cdp_tx_completion_ppdu *ppdu_desc; 3568 uint32_t peer_id; 3569 uint8_t tid; 3570 struct dp_peer *peer; 3571 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 3572 struct dp_mon_peer *mon_peer = NULL; 3573 3574 ppdu_desc = (struct cdp_tx_completion_ppdu *) 3575 qdf_nbuf_data(ppdu_info->nbuf); 3576 ppdu_desc->is_flush = 1; 3577 3578 tag_buf++; 3579 ppdu_desc->drop_reason = *tag_buf; 3580 3581 tag_buf++; 3582 ppdu_desc->num_msdu = HTT_PPDU_STATS_FLUSH_TLV_NUM_MSDU_GET(*tag_buf); 3583 ppdu_desc->num_mpdu = HTT_PPDU_STATS_FLUSH_TLV_NUM_MPDU_GET(*tag_buf); 3584 ppdu_desc->flow_type = HTT_PPDU_STATS_FLUSH_TLV_FLOW_TYPE_GET(*tag_buf); 3585 3586 tag_buf++; 3587 peer_id = HTT_PPDU_STATS_FLUSH_TLV_SW_PEER_ID_GET(*tag_buf); 3588 tid = HTT_PPDU_STATS_FLUSH_TLV_TID_NUM_GET(*tag_buf); 3589 3590 ppdu_desc->num_users = 1; 3591 ppdu_desc->user[0].peer_id = peer_id; 3592 ppdu_desc->user[0].tid = tid; 3593 3594 ppdu_desc->queue_type = 3595 HTT_PPDU_STATS_FLUSH_TLV_QUEUE_TYPE_GET(*tag_buf); 3596 3597 peer = dp_peer_get_ref_by_id(pdev->soc, peer_id, 3598 DP_MOD_ID_TX_PPDU_STATS); 3599 if (!peer) 3600 goto add_ppdu_to_sched_list; 3601 3602 if (ppdu_desc->drop_reason == HTT_FLUSH_EXCESS_RETRIES) { 3603 mon_peer = peer->monitor_peer; 3604 DP_STATS_INC(mon_peer, 3605 tx.excess_retries_per_ac[TID_TO_WME_AC(tid)], 3606 ppdu_desc->num_msdu); 3607 } 3608 3609 dp_peer_unref_delete(peer, DP_MOD_ID_TX_PPDU_STATS); 3610 3611 add_ppdu_to_sched_list: 3612 ppdu_info->done = 1; 3613 TAILQ_REMOVE(&mon_pdev->ppdu_info_list, ppdu_info, ppdu_info_list_elem); 3614 mon_pdev->list_depth--; 3615 TAILQ_INSERT_TAIL(&mon_pdev->sched_comp_ppdu_list, ppdu_info, 3616 ppdu_info_list_elem); 3617 mon_pdev->sched_comp_list_depth++; 3618 } 3619 3620 /** 3621 * dp_process_ppdu_stats_sch_cmd_status_tlv: Process schedule command status tlv 3622 * Here we are not going to process the buffer. 3623 * @pdev: DP PDEV handle 3624 * @ppdu_info: per ppdu tlv structure 3625 * 3626 * return:void 3627 */ 3628 static void 3629 dp_process_ppdu_stats_sch_cmd_status_tlv(struct dp_pdev *pdev, 3630 struct ppdu_info *ppdu_info) 3631 { 3632 struct cdp_tx_completion_ppdu *ppdu_desc; 3633 struct dp_peer *peer; 3634 uint8_t num_users; 3635 uint8_t i; 3636 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 3637 3638 ppdu_desc = (struct cdp_tx_completion_ppdu *) 3639 qdf_nbuf_data(ppdu_info->nbuf); 3640 3641 num_users = ppdu_desc->bar_num_users; 3642 3643 for (i = 0; i < num_users; i++) { 3644 if (ppdu_desc->user[i].user_pos == 0) { 3645 if (ppdu_desc->frame_type == CDP_PPDU_FTYPE_BAR) { 3646 /* update phy mode for bar frame */ 3647 ppdu_desc->phy_mode = 3648 ppdu_desc->user[i].preamble; 3649 ppdu_desc->user[0].mcs = ppdu_desc->user[i].mcs; 3650 break; 3651 } 3652 if (ppdu_desc->frame_type == CDP_PPDU_FTYPE_CTRL) { 3653 ppdu_desc->frame_ctrl = 3654 ppdu_desc->user[i].frame_ctrl; 3655 break; 3656 } 3657 } 3658 } 3659 3660 if (ppdu_desc->frame_type == CDP_PPDU_FTYPE_DATA && 3661 ppdu_desc->delayed_ba) { 3662 qdf_assert_always(ppdu_desc->num_users <= ppdu_desc->max_users); 3663 3664 for (i = 0; i < ppdu_desc->num_users; i++) { 3665 struct cdp_delayed_tx_completion_ppdu_user *delay_ppdu; 3666 uint64_t start_tsf; 3667 uint64_t end_tsf; 3668 uint32_t ppdu_id; 3669 struct dp_mon_peer *mon_peer; 3670 3671 ppdu_id = ppdu_desc->ppdu_id; 3672 peer = dp_peer_get_ref_by_id 3673 (pdev->soc, ppdu_desc->user[i].peer_id, 3674 DP_MOD_ID_TX_PPDU_STATS); 3675 /** 3676 * This check is to make sure peer is not deleted 3677 * after processing the TLVs. 3678 */ 3679 if (!peer) 3680 continue; 3681 3682 mon_peer = peer->monitor_peer; 3683 delay_ppdu = &mon_peer->delayed_ba_ppdu_stats; 3684 start_tsf = ppdu_desc->ppdu_start_timestamp; 3685 end_tsf = ppdu_desc->ppdu_end_timestamp; 3686 /** 3687 * save delayed ba user info 3688 */ 3689 if (ppdu_desc->user[i].delayed_ba) { 3690 dp_peer_copy_delay_stats(peer, 3691 &ppdu_desc->user[i], 3692 ppdu_id); 3693 mon_peer->last_delayed_ba_ppduid = ppdu_id; 3694 delay_ppdu->ppdu_start_timestamp = start_tsf; 3695 delay_ppdu->ppdu_end_timestamp = end_tsf; 3696 } 3697 ppdu_desc->user[i].peer_last_delayed_ba = 3698 mon_peer->last_delayed_ba; 3699 3700 dp_peer_unref_delete(peer, DP_MOD_ID_TX_PPDU_STATS); 3701 3702 if (ppdu_desc->user[i].delayed_ba && 3703 !ppdu_desc->user[i].debug_copied) { 3704 QDF_TRACE(QDF_MODULE_ID_TXRX, 3705 QDF_TRACE_LEVEL_INFO_MED, 3706 "%s: %d ppdu_id[%d] bar_ppdu_id[%d] num_users[%d] usr[%d] htt_frame_type[%d]\n", 3707 __func__, __LINE__, 3708 ppdu_desc->ppdu_id, 3709 ppdu_desc->bar_ppdu_id, 3710 ppdu_desc->num_users, 3711 i, 3712 ppdu_desc->htt_frame_type); 3713 } 3714 } 3715 } 3716 3717 /* 3718 * when frame type is BAR and STATS_COMMON_TLV is set 3719 * copy the store peer delayed info to BAR status 3720 */ 3721 if (ppdu_desc->frame_type == CDP_PPDU_FTYPE_BAR) { 3722 for (i = 0; i < ppdu_desc->bar_num_users; i++) { 3723 struct cdp_delayed_tx_completion_ppdu_user *delay_ppdu; 3724 uint64_t start_tsf; 3725 uint64_t end_tsf; 3726 struct dp_mon_peer *mon_peer; 3727 3728 peer = dp_peer_get_ref_by_id 3729 (pdev->soc, 3730 ppdu_desc->user[i].peer_id, 3731 DP_MOD_ID_TX_PPDU_STATS); 3732 /** 3733 * This check is to make sure peer is not deleted 3734 * after processing the TLVs. 3735 */ 3736 if (!peer) 3737 continue; 3738 3739 mon_peer = peer->monitor_peer; 3740 if (ppdu_desc->user[i].completion_status != 3741 HTT_PPDU_STATS_USER_STATUS_OK) { 3742 dp_peer_unref_delete(peer, 3743 DP_MOD_ID_TX_PPDU_STATS); 3744 continue; 3745 } 3746 3747 delay_ppdu = &mon_peer->delayed_ba_ppdu_stats; 3748 start_tsf = delay_ppdu->ppdu_start_timestamp; 3749 end_tsf = delay_ppdu->ppdu_end_timestamp; 3750 3751 if (mon_peer->last_delayed_ba) { 3752 dp_peer_copy_stats_to_bar(peer, 3753 &ppdu_desc->user[i]); 3754 ppdu_desc->ppdu_id = 3755 mon_peer->last_delayed_ba_ppduid; 3756 ppdu_desc->ppdu_start_timestamp = start_tsf; 3757 ppdu_desc->ppdu_end_timestamp = end_tsf; 3758 } 3759 ppdu_desc->user[i].peer_last_delayed_ba = 3760 mon_peer->last_delayed_ba; 3761 dp_peer_unref_delete(peer, DP_MOD_ID_TX_PPDU_STATS); 3762 } 3763 } 3764 3765 TAILQ_REMOVE(&mon_pdev->ppdu_info_list, ppdu_info, ppdu_info_list_elem); 3766 mon_pdev->list_depth--; 3767 TAILQ_INSERT_TAIL(&mon_pdev->sched_comp_ppdu_list, ppdu_info, 3768 ppdu_info_list_elem); 3769 mon_pdev->sched_comp_list_depth++; 3770 } 3771 3772 /** 3773 * dp_validate_fix_ppdu_tlv(): Function to validate the length of PPDU 3774 * 3775 * If the TLV length sent as part of PPDU TLV is less that expected size i.e 3776 * size of corresponding data structure, pad the remaining bytes with zeros 3777 * and continue processing the TLVs 3778 * 3779 * @pdev: DP pdev handle 3780 * @tag_buf: TLV buffer 3781 * @tlv_expected_size: Expected size of Tag 3782 * @tlv_len: TLV length received from FW 3783 * 3784 * Return: Pointer to updated TLV 3785 */ 3786 static inline uint32_t *dp_validate_fix_ppdu_tlv(struct dp_pdev *pdev, 3787 uint32_t *tag_buf, 3788 uint16_t tlv_expected_size, 3789 uint16_t tlv_len) 3790 { 3791 uint32_t *tlv_desc = tag_buf; 3792 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 3793 3794 qdf_assert_always(tlv_len != 0); 3795 3796 if (tlv_len < tlv_expected_size) { 3797 qdf_mem_zero(mon_pdev->ppdu_tlv_buf, tlv_expected_size); 3798 qdf_mem_copy(mon_pdev->ppdu_tlv_buf, tag_buf, tlv_len); 3799 tlv_desc = mon_pdev->ppdu_tlv_buf; 3800 } 3801 3802 return tlv_desc; 3803 } 3804 3805 /** 3806 * dp_process_ppdu_tag(): Function to process the PPDU TLVs 3807 * @pdev: DP pdev handle 3808 * @tag_buf: TLV buffer 3809 * @tlv_len: length of tlv 3810 * @ppdu_info: per ppdu tlv structure 3811 * 3812 * return: void 3813 */ 3814 static void dp_process_ppdu_tag(struct dp_pdev *pdev, 3815 uint32_t *tag_buf, 3816 uint32_t tlv_len, 3817 struct ppdu_info *ppdu_info) 3818 { 3819 uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf); 3820 uint16_t tlv_expected_size; 3821 uint32_t *tlv_desc; 3822 3823 switch (tlv_type) { 3824 case HTT_PPDU_STATS_COMMON_TLV: 3825 tlv_expected_size = sizeof(htt_ppdu_stats_common_tlv); 3826 tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf, 3827 tlv_expected_size, tlv_len); 3828 dp_process_ppdu_stats_common_tlv(pdev, tlv_desc, ppdu_info); 3829 break; 3830 case HTT_PPDU_STATS_USR_COMMON_TLV: 3831 tlv_expected_size = sizeof(htt_ppdu_stats_user_common_tlv); 3832 tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf, 3833 tlv_expected_size, tlv_len); 3834 dp_process_ppdu_stats_user_common_tlv(pdev, tlv_desc, 3835 ppdu_info); 3836 break; 3837 case HTT_PPDU_STATS_USR_RATE_TLV: 3838 tlv_expected_size = sizeof(htt_ppdu_stats_user_rate_tlv); 3839 tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf, 3840 tlv_expected_size, tlv_len); 3841 dp_process_ppdu_stats_user_rate_tlv(pdev, tlv_desc, 3842 ppdu_info); 3843 break; 3844 case HTT_PPDU_STATS_USR_MPDU_ENQ_BITMAP_64_TLV: 3845 tlv_expected_size = 3846 sizeof(htt_ppdu_stats_enq_mpdu_bitmap_64_tlv); 3847 tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf, 3848 tlv_expected_size, tlv_len); 3849 dp_process_ppdu_stats_enq_mpdu_bitmap_64_tlv( 3850 pdev, tlv_desc, ppdu_info); 3851 break; 3852 case HTT_PPDU_STATS_USR_MPDU_ENQ_BITMAP_256_TLV: 3853 tlv_expected_size = 3854 sizeof(htt_ppdu_stats_enq_mpdu_bitmap_256_tlv); 3855 tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf, 3856 tlv_expected_size, tlv_len); 3857 dp_process_ppdu_stats_enq_mpdu_bitmap_256_tlv( 3858 pdev, tlv_desc, ppdu_info); 3859 break; 3860 case HTT_PPDU_STATS_USR_COMPLTN_COMMON_TLV: 3861 tlv_expected_size = 3862 sizeof(htt_ppdu_stats_user_cmpltn_common_tlv); 3863 tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf, 3864 tlv_expected_size, tlv_len); 3865 dp_process_ppdu_stats_user_cmpltn_common_tlv( 3866 pdev, tlv_desc, ppdu_info); 3867 break; 3868 case HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_64_TLV: 3869 tlv_expected_size = 3870 sizeof(htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv); 3871 tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf, 3872 tlv_expected_size, tlv_len); 3873 dp_process_ppdu_stats_user_compltn_ba_bitmap_64_tlv( 3874 pdev, tlv_desc, ppdu_info); 3875 break; 3876 case HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_256_TLV: 3877 tlv_expected_size = 3878 sizeof(htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv); 3879 tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf, 3880 tlv_expected_size, tlv_len); 3881 dp_process_ppdu_stats_user_compltn_ba_bitmap_256_tlv( 3882 pdev, tlv_desc, ppdu_info); 3883 break; 3884 case HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV: 3885 tlv_expected_size = 3886 sizeof(htt_ppdu_stats_user_compltn_ack_ba_status_tlv); 3887 tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf, 3888 tlv_expected_size, tlv_len); 3889 dp_process_ppdu_stats_user_compltn_ack_ba_status_tlv( 3890 pdev, tlv_desc, ppdu_info); 3891 break; 3892 case HTT_PPDU_STATS_USR_COMMON_ARRAY_TLV: 3893 tlv_expected_size = 3894 sizeof(htt_ppdu_stats_usr_common_array_tlv_v); 3895 tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf, 3896 tlv_expected_size, tlv_len); 3897 dp_process_ppdu_stats_user_common_array_tlv( 3898 pdev, tlv_desc, ppdu_info); 3899 break; 3900 case HTT_PPDU_STATS_USR_COMPLTN_FLUSH_TLV: 3901 tlv_expected_size = sizeof(htt_ppdu_stats_flush_tlv); 3902 tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf, 3903 tlv_expected_size, tlv_len); 3904 dp_process_ppdu_stats_user_compltn_flush_tlv(pdev, tlv_desc, 3905 ppdu_info); 3906 break; 3907 case HTT_PPDU_STATS_SCH_CMD_STATUS_TLV: 3908 dp_process_ppdu_stats_sch_cmd_status_tlv(pdev, ppdu_info); 3909 break; 3910 default: 3911 break; 3912 } 3913 } 3914 3915 #ifdef WLAN_ATF_ENABLE 3916 static void 3917 dp_ppdu_desc_user_phy_tx_time_update(struct dp_pdev *pdev, 3918 struct cdp_tx_completion_ppdu *ppdu_desc, 3919 struct cdp_tx_completion_ppdu_user *user) 3920 { 3921 uint32_t nss_ru_width_sum = 0; 3922 struct dp_mon_pdev *mon_pdev = NULL; 3923 3924 if (!pdev || !ppdu_desc || !user) 3925 return; 3926 3927 mon_pdev = pdev->monitor_pdev; 3928 3929 if (!mon_pdev || !mon_pdev->dp_atf_stats_enable) 3930 return; 3931 3932 if (ppdu_desc->frame_type != CDP_PPDU_FTYPE_DATA) 3933 return; 3934 3935 nss_ru_width_sum = ppdu_desc->usr_nss_sum * ppdu_desc->usr_ru_tones_sum; 3936 if (!nss_ru_width_sum) 3937 nss_ru_width_sum = 1; 3938 3939 /** 3940 * For SU-MIMO PPDU phy Tx time is same for the single user. 3941 * For MU-MIMO phy Tx time is calculated per user as below 3942 * user phy tx time = 3943 * Entire PPDU duration * MU Ratio * OFDMA Ratio 3944 * MU Ratio = usr_nss / Sum_of_nss_of_all_users 3945 * OFDMA_ratio = usr_ru_width / Sum_of_ru_width_of_all_users 3946 * usr_ru_widt = ru_end – ru_start + 1 3947 */ 3948 if (ppdu_desc->htt_frame_type == HTT_STATS_FTYPE_TIDQ_DATA_SU) { 3949 user->phy_tx_time_us = ppdu_desc->phy_ppdu_tx_time_us; 3950 } else { 3951 user->phy_tx_time_us = (ppdu_desc->phy_ppdu_tx_time_us * 3952 user->nss * user->ru_tones) / nss_ru_width_sum; 3953 } 3954 } 3955 #else 3956 static void 3957 dp_ppdu_desc_user_phy_tx_time_update(struct dp_pdev *pdev, 3958 struct cdp_tx_completion_ppdu *ppdu_desc, 3959 struct cdp_tx_completion_ppdu_user *user) 3960 { 3961 } 3962 #endif 3963 3964 /** 3965 * dp_ppdu_desc_user_stats_update(): Function to update TX user stats 3966 * @pdev: DP pdev handle 3967 * @ppdu_info: per PPDU TLV descriptor 3968 * 3969 * return: void 3970 */ 3971 void 3972 dp_ppdu_desc_user_stats_update(struct dp_pdev *pdev, 3973 struct ppdu_info *ppdu_info) 3974 { 3975 struct cdp_tx_completion_ppdu *ppdu_desc = NULL; 3976 struct dp_peer *peer = NULL; 3977 uint32_t tlv_bitmap_expected; 3978 uint32_t tlv_bitmap_default; 3979 uint16_t i; 3980 uint32_t num_users; 3981 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 3982 3983 ppdu_desc = (struct cdp_tx_completion_ppdu *) 3984 qdf_nbuf_data(ppdu_info->nbuf); 3985 3986 if (ppdu_desc->frame_type != CDP_PPDU_FTYPE_BAR) 3987 ppdu_desc->ppdu_id = ppdu_info->ppdu_id; 3988 3989 tlv_bitmap_expected = HTT_PPDU_DEFAULT_TLV_BITMAP; 3990 if (mon_pdev->tx_sniffer_enable || mon_pdev->mcopy_mode || 3991 mon_pdev->tx_capture_enabled) { 3992 if (ppdu_info->is_ampdu) 3993 tlv_bitmap_expected = 3994 dp_htt_get_ppdu_sniffer_ampdu_tlv_bitmap( 3995 ppdu_info->tlv_bitmap); 3996 } 3997 3998 tlv_bitmap_default = tlv_bitmap_expected; 3999 4000 if (ppdu_desc->frame_type == CDP_PPDU_FTYPE_BAR) { 4001 num_users = ppdu_desc->bar_num_users; 4002 ppdu_desc->num_users = ppdu_desc->bar_num_users; 4003 } else { 4004 num_users = ppdu_desc->num_users; 4005 } 4006 qdf_assert_always(ppdu_desc->num_users <= ppdu_desc->max_users); 4007 4008 for (i = 0; i < num_users; i++) { 4009 ppdu_desc->num_mpdu += ppdu_desc->user[i].num_mpdu; 4010 ppdu_desc->num_msdu += ppdu_desc->user[i].num_msdu; 4011 4012 peer = dp_peer_get_ref_by_id(pdev->soc, 4013 ppdu_desc->user[i].peer_id, 4014 DP_MOD_ID_TX_PPDU_STATS); 4015 /** 4016 * This check is to make sure peer is not deleted 4017 * after processing the TLVs. 4018 */ 4019 if (!peer) 4020 continue; 4021 4022 ppdu_desc->user[i].is_bss_peer = peer->bss_peer; 4023 /* 4024 * different frame like DATA, BAR or CTRL has different 4025 * tlv bitmap expected. Apart from ACK_BA_STATUS TLV, we 4026 * receive other tlv in-order/sequential from fw. 4027 * Since ACK_BA_STATUS TLV come from Hardware it is 4028 * asynchronous So we need to depend on some tlv to confirm 4029 * all tlv is received for a ppdu. 4030 * So we depend on both SCHED_CMD_STATUS_TLV and 4031 * ACK_BA_STATUS_TLV. for failure packet we won't get 4032 * ACK_BA_STATUS_TLV. 4033 */ 4034 if (!(ppdu_info->tlv_bitmap & 4035 (1 << HTT_PPDU_STATS_SCH_CMD_STATUS_TLV)) || 4036 (!(ppdu_info->tlv_bitmap & 4037 (1 << HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV)) && 4038 (ppdu_desc->user[i].completion_status == 4039 HTT_PPDU_STATS_USER_STATUS_OK))) { 4040 dp_peer_unref_delete(peer, DP_MOD_ID_TX_PPDU_STATS); 4041 continue; 4042 } 4043 4044 /** 4045 * Update tx stats for data frames having Qos as well as 4046 * non-Qos data tid 4047 */ 4048 4049 if ((ppdu_desc->user[i].tid < CDP_DATA_TID_MAX || 4050 (ppdu_desc->user[i].tid == CDP_DATA_NON_QOS_TID) || 4051 (ppdu_desc->htt_frame_type == 4052 HTT_STATS_FTYPE_SGEN_QOS_NULL) || 4053 ((ppdu_desc->frame_type == CDP_PPDU_FTYPE_BAR) && 4054 (ppdu_desc->num_mpdu > 1))) && 4055 (ppdu_desc->frame_type != CDP_PPDU_FTYPE_CTRL)) { 4056 dp_tx_stats_update(pdev, peer, 4057 &ppdu_desc->user[i], 4058 ppdu_desc->ack_rssi); 4059 dp_tx_rate_stats_update(peer, &ppdu_desc->user[i]); 4060 } 4061 4062 dp_ppdu_desc_user_phy_tx_time_update(pdev, ppdu_desc, 4063 &ppdu_desc->user[i]); 4064 4065 dp_peer_unref_delete(peer, DP_MOD_ID_TX_PPDU_STATS); 4066 tlv_bitmap_expected = tlv_bitmap_default; 4067 } 4068 } 4069 4070 #if !defined(WLAN_TX_PKT_CAPTURE_ENH) || defined(QCA_MONITOR_2_0_SUPPORT) 4071 /* 4072 * dp_tx_ppdu_desc_notify() - Notify to upper layer about PPDU via WDI 4073 * 4074 * @pdev: Datapath pdev handle 4075 * @nbuf: Buffer to be delivered to upper layer 4076 * 4077 * Return: void 4078 */ 4079 static void dp_tx_ppdu_desc_notify(struct dp_pdev *pdev, qdf_nbuf_t nbuf) 4080 { 4081 struct dp_soc *soc = pdev->soc; 4082 struct dp_mon_ops *mon_ops = NULL; 4083 4084 mon_ops = dp_mon_ops_get(soc); 4085 if (mon_ops && mon_ops->mon_ppdu_desc_notify) 4086 mon_ops->mon_ppdu_desc_notify(pdev, nbuf); 4087 else 4088 qdf_nbuf_free(nbuf); 4089 } 4090 4091 void dp_ppdu_desc_deliver(struct dp_pdev *pdev, 4092 struct ppdu_info *ppdu_info) 4093 { 4094 struct ppdu_info *s_ppdu_info = NULL; 4095 struct ppdu_info *ppdu_info_next = NULL; 4096 struct cdp_tx_completion_ppdu *ppdu_desc = NULL; 4097 qdf_nbuf_t nbuf; 4098 uint32_t time_delta = 0; 4099 bool starved = 0; 4100 bool matched = 0; 4101 bool recv_ack_ba_done = 0; 4102 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 4103 4104 if (ppdu_info->tlv_bitmap & 4105 (1 << HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV) && 4106 ppdu_info->done) 4107 recv_ack_ba_done = 1; 4108 4109 mon_pdev->last_sched_cmdid = ppdu_info->sched_cmdid; 4110 4111 s_ppdu_info = TAILQ_FIRST(&mon_pdev->sched_comp_ppdu_list); 4112 4113 TAILQ_FOREACH_SAFE(s_ppdu_info, &mon_pdev->sched_comp_ppdu_list, 4114 ppdu_info_list_elem, ppdu_info_next) { 4115 if (s_ppdu_info->tsf_l32 > ppdu_info->tsf_l32) 4116 time_delta = (MAX_TSF_32 - s_ppdu_info->tsf_l32) + 4117 ppdu_info->tsf_l32; 4118 else 4119 time_delta = ppdu_info->tsf_l32 - s_ppdu_info->tsf_l32; 4120 4121 if (!s_ppdu_info->done && !recv_ack_ba_done) { 4122 if (time_delta < MAX_SCHED_STARVE) { 4123 dp_mon_info("pdev[%d] ppdu_id[%d] sched_cmdid[%d] TLV_B[0x%x] TSF[%u] D[%d]", 4124 pdev->pdev_id, 4125 s_ppdu_info->ppdu_id, 4126 s_ppdu_info->sched_cmdid, 4127 s_ppdu_info->tlv_bitmap, 4128 s_ppdu_info->tsf_l32, 4129 s_ppdu_info->done); 4130 break; 4131 } 4132 starved = 1; 4133 } 4134 4135 mon_pdev->delivered_sched_cmdid = s_ppdu_info->sched_cmdid; 4136 TAILQ_REMOVE(&mon_pdev->sched_comp_ppdu_list, s_ppdu_info, 4137 ppdu_info_list_elem); 4138 mon_pdev->sched_comp_list_depth--; 4139 4140 nbuf = s_ppdu_info->nbuf; 4141 qdf_assert_always(nbuf); 4142 ppdu_desc = (struct cdp_tx_completion_ppdu *) 4143 qdf_nbuf_data(nbuf); 4144 ppdu_desc->tlv_bitmap = s_ppdu_info->tlv_bitmap; 4145 4146 if (starved) { 4147 dp_mon_err("ppdu starved fc[0x%x] h_ftype[%d] tlv_bitmap[0x%x] cs[%d]\n", 4148 ppdu_desc->frame_ctrl, 4149 ppdu_desc->htt_frame_type, 4150 ppdu_desc->tlv_bitmap, 4151 ppdu_desc->user[0].completion_status); 4152 starved = 0; 4153 } 4154 4155 if (ppdu_info->ppdu_id == s_ppdu_info->ppdu_id && 4156 ppdu_info->sched_cmdid == s_ppdu_info->sched_cmdid) 4157 matched = 1; 4158 4159 dp_ppdu_desc_user_stats_update(pdev, s_ppdu_info); 4160 4161 qdf_mem_free(s_ppdu_info); 4162 4163 dp_tx_ppdu_desc_notify(pdev, nbuf); 4164 4165 if (matched) 4166 break; 4167 } 4168 } 4169 #endif 4170 4171 /* 4172 * dp_tx_ppdu_desc_deliver() - Deliver PPDU desc to upper layer 4173 * 4174 * @pdev: Datapath pdev handle 4175 * @ppdu_info: per PPDU TLV descriptor 4176 * 4177 * Return: void 4178 */ 4179 static void dp_tx_ppdu_desc_deliver(struct dp_pdev *pdev, 4180 struct ppdu_info *ppdu_info) 4181 { 4182 struct dp_soc *soc = pdev->soc; 4183 struct dp_mon_ops *mon_ops = NULL; 4184 4185 mon_ops = dp_mon_ops_get(soc); 4186 4187 if (mon_ops && mon_ops->mon_ppdu_desc_deliver) { 4188 mon_ops->mon_ppdu_desc_deliver(pdev, ppdu_info); 4189 } else { 4190 qdf_nbuf_free(ppdu_info->nbuf); 4191 ppdu_info->nbuf = NULL; 4192 qdf_mem_free(ppdu_info); 4193 } 4194 } 4195 4196 /** 4197 * dp_get_ppdu_desc(): Function to allocate new PPDU status 4198 * desc for new ppdu id 4199 * @pdev: DP pdev handle 4200 * @ppdu_id: PPDU unique identifier 4201 * @tlv_type: TLV type received 4202 * @tsf_l32: timestamp received along with ppdu stats indication header 4203 * @max_users: Maximum user for that particular ppdu 4204 * 4205 * return: ppdu_info per ppdu tlv structure 4206 */ 4207 static 4208 struct ppdu_info *dp_get_ppdu_desc(struct dp_pdev *pdev, uint32_t ppdu_id, 4209 uint8_t tlv_type, uint32_t tsf_l32, 4210 uint8_t max_users) 4211 { 4212 struct ppdu_info *ppdu_info = NULL; 4213 struct ppdu_info *s_ppdu_info = NULL; 4214 struct ppdu_info *ppdu_info_next = NULL; 4215 struct cdp_tx_completion_ppdu *ppdu_desc = NULL; 4216 uint32_t size = 0; 4217 struct cdp_tx_completion_ppdu *tmp_ppdu_desc = NULL; 4218 struct cdp_tx_completion_ppdu_user *tmp_user; 4219 uint32_t time_delta; 4220 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 4221 4222 /* 4223 * Find ppdu_id node exists or not 4224 */ 4225 TAILQ_FOREACH_SAFE(ppdu_info, &mon_pdev->ppdu_info_list, 4226 ppdu_info_list_elem, ppdu_info_next) { 4227 if (ppdu_info && (ppdu_info->ppdu_id == ppdu_id)) { 4228 if (ppdu_info->tsf_l32 > tsf_l32) 4229 time_delta = (MAX_TSF_32 - 4230 ppdu_info->tsf_l32) + tsf_l32; 4231 else 4232 time_delta = tsf_l32 - ppdu_info->tsf_l32; 4233 4234 if (time_delta > WRAP_DROP_TSF_DELTA) { 4235 TAILQ_REMOVE(&mon_pdev->ppdu_info_list, 4236 ppdu_info, ppdu_info_list_elem); 4237 mon_pdev->list_depth--; 4238 pdev->stats.ppdu_wrap_drop++; 4239 tmp_ppdu_desc = 4240 (struct cdp_tx_completion_ppdu *) 4241 qdf_nbuf_data(ppdu_info->nbuf); 4242 tmp_user = &tmp_ppdu_desc->user[0]; 4243 dp_htt_tx_stats_info("S_PID [%d] S_TSF[%u] TLV_BITMAP[0x%x] [CMPLTN - %d ACK_BA - %d] CS[%d] - R_PID[%d] R_TSF[%u] R_TLV_TAG[0x%x]\n", 4244 ppdu_info->ppdu_id, 4245 ppdu_info->tsf_l32, 4246 ppdu_info->tlv_bitmap, 4247 tmp_user->completion_status, 4248 ppdu_info->compltn_common_tlv, 4249 ppdu_info->ack_ba_tlv, 4250 ppdu_id, tsf_l32, 4251 tlv_type); 4252 qdf_nbuf_free(ppdu_info->nbuf); 4253 ppdu_info->nbuf = NULL; 4254 qdf_mem_free(ppdu_info); 4255 } else { 4256 break; 4257 } 4258 } 4259 } 4260 4261 /* 4262 * check if it is ack ba tlv and if it is not there in ppdu info 4263 * list then check it in sched completion ppdu list 4264 */ 4265 if (!ppdu_info && 4266 tlv_type == HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV) { 4267 TAILQ_FOREACH(s_ppdu_info, 4268 &mon_pdev->sched_comp_ppdu_list, 4269 ppdu_info_list_elem) { 4270 if (s_ppdu_info && (s_ppdu_info->ppdu_id == ppdu_id)) { 4271 if (s_ppdu_info->tsf_l32 > tsf_l32) 4272 time_delta = (MAX_TSF_32 - 4273 s_ppdu_info->tsf_l32) + 4274 tsf_l32; 4275 else 4276 time_delta = tsf_l32 - 4277 s_ppdu_info->tsf_l32; 4278 if (time_delta < WRAP_DROP_TSF_DELTA) { 4279 ppdu_info = s_ppdu_info; 4280 break; 4281 } 4282 } else { 4283 /* 4284 * ACK BA STATUS TLV comes sequential order 4285 * if we received ack ba status tlv for second 4286 * ppdu and first ppdu is still waiting for 4287 * ACK BA STATUS TLV. Based on fw comment 4288 * we won't receive it tlv later. So we can 4289 * set ppdu info done. 4290 */ 4291 if (s_ppdu_info) 4292 s_ppdu_info->done = 1; 4293 } 4294 } 4295 } 4296 4297 if (ppdu_info) { 4298 if (ppdu_info->tlv_bitmap & (1 << tlv_type)) { 4299 /** 4300 * if we get tlv_type that is already been processed 4301 * for ppdu, that means we got a new ppdu with same 4302 * ppdu id. Hence Flush the older ppdu 4303 * for MUMIMO and OFDMA, In a PPDU we have 4304 * multiple user with same tlv types. tlv bitmap is 4305 * used to check whether SU or MU_MIMO/OFDMA 4306 */ 4307 if (!(ppdu_info->tlv_bitmap & 4308 (1 << HTT_PPDU_STATS_SCH_CMD_STATUS_TLV))) 4309 return ppdu_info; 4310 4311 ppdu_desc = (struct cdp_tx_completion_ppdu *) 4312 qdf_nbuf_data(ppdu_info->nbuf); 4313 4314 /** 4315 * apart from ACK BA STATUS TLV rest all comes in order 4316 * so if tlv type not ACK BA STATUS TLV we can deliver 4317 * ppdu_info 4318 */ 4319 if ((tlv_type == 4320 HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV) && 4321 (ppdu_desc->htt_frame_type == 4322 HTT_STATS_FTYPE_SGEN_MU_BAR)) 4323 return ppdu_info; 4324 4325 dp_tx_ppdu_desc_deliver(pdev, ppdu_info); 4326 } else { 4327 return ppdu_info; 4328 } 4329 } 4330 4331 /** 4332 * Flush the head ppdu descriptor if ppdu desc list reaches max 4333 * threshold 4334 */ 4335 if (mon_pdev->list_depth > HTT_PPDU_DESC_MAX_DEPTH) { 4336 ppdu_info = TAILQ_FIRST(&mon_pdev->ppdu_info_list); 4337 TAILQ_REMOVE(&mon_pdev->ppdu_info_list, 4338 ppdu_info, ppdu_info_list_elem); 4339 mon_pdev->list_depth--; 4340 pdev->stats.ppdu_drop++; 4341 qdf_nbuf_free(ppdu_info->nbuf); 4342 ppdu_info->nbuf = NULL; 4343 qdf_mem_free(ppdu_info); 4344 } 4345 4346 size = sizeof(struct cdp_tx_completion_ppdu) + 4347 (max_users * sizeof(struct cdp_tx_completion_ppdu_user)); 4348 4349 /* 4350 * Allocate new ppdu_info node 4351 */ 4352 ppdu_info = qdf_mem_malloc(sizeof(struct ppdu_info)); 4353 if (!ppdu_info) 4354 return NULL; 4355 4356 ppdu_info->nbuf = qdf_nbuf_alloc(pdev->soc->osdev, size, 4357 0, 4, TRUE); 4358 if (!ppdu_info->nbuf) { 4359 qdf_mem_free(ppdu_info); 4360 return NULL; 4361 } 4362 4363 ppdu_info->ppdu_desc = 4364 (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf); 4365 qdf_mem_zero(qdf_nbuf_data(ppdu_info->nbuf), size); 4366 4367 if (!qdf_nbuf_put_tail(ppdu_info->nbuf, size)) { 4368 dp_mon_err("No tailroom for HTT PPDU"); 4369 qdf_nbuf_free(ppdu_info->nbuf); 4370 ppdu_info->nbuf = NULL; 4371 ppdu_info->last_user = 0; 4372 qdf_mem_free(ppdu_info); 4373 return NULL; 4374 } 4375 4376 ppdu_info->ppdu_desc->max_users = max_users; 4377 ppdu_info->tsf_l32 = tsf_l32; 4378 /** 4379 * No lock is needed because all PPDU TLVs are processed in 4380 * same context and this list is updated in same context 4381 */ 4382 TAILQ_INSERT_TAIL(&mon_pdev->ppdu_info_list, ppdu_info, 4383 ppdu_info_list_elem); 4384 mon_pdev->list_depth++; 4385 return ppdu_info; 4386 } 4387 4388 /** 4389 * dp_htt_process_tlv(): Function to process each PPDU TLVs 4390 * @pdev: DP pdev handle 4391 * @htt_t2h_msg: HTT target to host message 4392 * 4393 * return: ppdu_info per ppdu tlv structure 4394 */ 4395 static struct ppdu_info *dp_htt_process_tlv(struct dp_pdev *pdev, 4396 qdf_nbuf_t htt_t2h_msg) 4397 { 4398 uint32_t length; 4399 uint32_t ppdu_id; 4400 uint8_t tlv_type; 4401 uint32_t tlv_length, tlv_bitmap_expected; 4402 uint8_t *tlv_buf; 4403 struct ppdu_info *ppdu_info = NULL; 4404 struct cdp_tx_completion_ppdu *ppdu_desc = NULL; 4405 uint8_t max_users = CDP_MU_MAX_USERS; 4406 uint32_t tsf_l32; 4407 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 4408 4409 uint32_t *msg_word = (uint32_t *)qdf_nbuf_data(htt_t2h_msg); 4410 4411 length = HTT_T2H_PPDU_STATS_PAYLOAD_SIZE_GET(*msg_word); 4412 4413 msg_word = msg_word + 1; 4414 ppdu_id = HTT_T2H_PPDU_STATS_PPDU_ID_GET(*msg_word); 4415 4416 msg_word = msg_word + 1; 4417 tsf_l32 = (uint32_t)(*msg_word); 4418 4419 msg_word = msg_word + 2; 4420 while (length > 0) { 4421 tlv_buf = (uint8_t *)msg_word; 4422 tlv_type = HTT_STATS_TLV_TAG_GET(*msg_word); 4423 tlv_length = HTT_STATS_TLV_LENGTH_GET(*msg_word); 4424 if (qdf_likely(tlv_type < CDP_PPDU_STATS_MAX_TAG)) 4425 pdev->stats.ppdu_stats_counter[tlv_type]++; 4426 4427 if (tlv_length == 0) 4428 break; 4429 4430 tlv_length += HTT_TLV_HDR_LEN; 4431 4432 /** 4433 * Not allocating separate ppdu descriptor for MGMT Payload 4434 * TLV as this is sent as separate WDI indication and it 4435 * doesn't contain any ppdu information 4436 */ 4437 if (tlv_type == HTT_PPDU_STATS_TX_MGMTCTRL_PAYLOAD_TLV) { 4438 mon_pdev->mgmtctrl_frm_info.mgmt_buf = tlv_buf; 4439 mon_pdev->mgmtctrl_frm_info.ppdu_id = ppdu_id; 4440 mon_pdev->mgmtctrl_frm_info.mgmt_buf_len = 4441 HTT_PPDU_STATS_TX_MGMTCTRL_TLV_FRAME_LENGTH_GET 4442 (*(msg_word + 1)); 4443 msg_word = 4444 (uint32_t *)((uint8_t *)tlv_buf + tlv_length); 4445 length -= (tlv_length); 4446 continue; 4447 } 4448 4449 /* 4450 * retrieve max_users if it's USERS_INFO, 4451 * else, it's 1 for COMPLTN_FLUSH, 4452 * else, use CDP_MU_MAX_USERS 4453 */ 4454 if (tlv_type == HTT_PPDU_STATS_USERS_INFO_TLV) { 4455 max_users = 4456 HTT_PPDU_STATS_USERS_INFO_TLV_MAX_USERS_GET(*(msg_word + 1)); 4457 } else if (tlv_type == HTT_PPDU_STATS_USR_COMPLTN_FLUSH_TLV) { 4458 max_users = 1; 4459 } 4460 4461 ppdu_info = dp_get_ppdu_desc(pdev, ppdu_id, tlv_type, 4462 tsf_l32, max_users); 4463 if (!ppdu_info) 4464 return NULL; 4465 4466 ppdu_info->ppdu_id = ppdu_id; 4467 ppdu_info->tlv_bitmap |= (1 << tlv_type); 4468 4469 dp_process_ppdu_tag(pdev, msg_word, tlv_length, ppdu_info); 4470 4471 /** 4472 * Increment pdev level tlv count to monitor 4473 * missing TLVs 4474 */ 4475 mon_pdev->tlv_count++; 4476 ppdu_info->last_tlv_cnt = mon_pdev->tlv_count; 4477 msg_word = (uint32_t *)((uint8_t *)tlv_buf + tlv_length); 4478 length -= (tlv_length); 4479 } 4480 4481 if (!ppdu_info) 4482 return NULL; 4483 4484 mon_pdev->last_ppdu_id = ppdu_id; 4485 4486 tlv_bitmap_expected = HTT_PPDU_DEFAULT_TLV_BITMAP; 4487 4488 if (mon_pdev->tx_sniffer_enable || mon_pdev->mcopy_mode || 4489 mon_pdev->tx_capture_enabled) { 4490 if (ppdu_info->is_ampdu) 4491 tlv_bitmap_expected = 4492 dp_htt_get_ppdu_sniffer_ampdu_tlv_bitmap( 4493 ppdu_info->tlv_bitmap); 4494 } 4495 4496 ppdu_desc = ppdu_info->ppdu_desc; 4497 4498 if (!ppdu_desc) 4499 return NULL; 4500 4501 if (ppdu_desc->user[ppdu_desc->last_usr_index].completion_status != 4502 HTT_PPDU_STATS_USER_STATUS_OK) { 4503 tlv_bitmap_expected = tlv_bitmap_expected & 0xFF; 4504 } 4505 4506 /* 4507 * for frame type DATA and BAR, we update stats based on MSDU, 4508 * successful msdu and mpdu are populate from ACK BA STATUS TLV 4509 * which comes out of order. successful mpdu also populated from 4510 * COMPLTN COMMON TLV which comes in order. for every ppdu_info 4511 * we store successful mpdu from both tlv and compare before delivering 4512 * to make sure we received ACK BA STATUS TLV. For some self generated 4513 * frame we won't get ack ba status tlv so no need to wait for 4514 * ack ba status tlv. 4515 */ 4516 if (ppdu_desc->frame_type != CDP_PPDU_FTYPE_CTRL && 4517 ppdu_desc->htt_frame_type != HTT_STATS_FTYPE_SGEN_QOS_NULL) { 4518 /* 4519 * most of the time bar frame will have duplicate ack ba 4520 * status tlv 4521 */ 4522 if (ppdu_desc->frame_type == CDP_PPDU_FTYPE_BAR && 4523 (ppdu_info->compltn_common_tlv != ppdu_info->ack_ba_tlv)) 4524 return NULL; 4525 /* 4526 * For data frame, compltn common tlv should match ack ba status 4527 * tlv and completion status. Reason we are checking first user 4528 * for ofdma, completion seen at next MU BAR frm, for mimo 4529 * only for first user completion will be immediate. 4530 */ 4531 if (ppdu_desc->frame_type == CDP_PPDU_FTYPE_DATA && 4532 (ppdu_desc->user[0].completion_status == 0 && 4533 (ppdu_info->compltn_common_tlv != ppdu_info->ack_ba_tlv))) 4534 return NULL; 4535 } 4536 4537 /** 4538 * Once all the TLVs for a given PPDU has been processed, 4539 * return PPDU status to be delivered to higher layer. 4540 * tlv_bitmap_expected can't be available for different frame type. 4541 * But SCHED CMD STATS TLV is the last TLV from the FW for a ppdu. 4542 * apart from ACK BA TLV, FW sends other TLV in sequential order. 4543 * flush tlv comes separate. 4544 */ 4545 if ((ppdu_info->tlv_bitmap != 0 && 4546 (ppdu_info->tlv_bitmap & 4547 (1 << HTT_PPDU_STATS_SCH_CMD_STATUS_TLV))) || 4548 (ppdu_info->tlv_bitmap & 4549 (1 << HTT_PPDU_STATS_USR_COMPLTN_FLUSH_TLV))) { 4550 ppdu_info->done = 1; 4551 return ppdu_info; 4552 } 4553 4554 return NULL; 4555 } 4556 #endif /* QCA_ENHANCED_STATS_SUPPORT */ 4557 4558 #if defined(WDI_EVENT_ENABLE) 4559 #ifdef QCA_ENHANCED_STATS_SUPPORT 4560 /** 4561 * dp_tx_ppdu_stats_feat_enable_check() - Check if feature(s) is enabled to 4562 * consume stats received from FW via HTT 4563 * @pdev: Datapath pdev handle 4564 * 4565 * Return: void 4566 */ 4567 static bool dp_tx_ppdu_stats_feat_enable_check(struct dp_pdev *pdev) 4568 { 4569 struct dp_soc *soc = pdev->soc; 4570 struct dp_mon_ops *mon_ops = NULL; 4571 4572 mon_ops = dp_mon_ops_get(soc); 4573 if (mon_ops && mon_ops->mon_ppdu_stats_feat_enable_check) 4574 return mon_ops->mon_ppdu_stats_feat_enable_check(pdev); 4575 else 4576 return false; 4577 } 4578 4579 /** 4580 * dp_txrx_ppdu_stats_handler() - Function to process HTT PPDU stats from FW 4581 * @soc: DP SOC handle 4582 * @pdev_id: pdev id 4583 * @htt_t2h_msg: HTT message nbuf 4584 * 4585 * return:void 4586 */ 4587 static bool dp_txrx_ppdu_stats_handler(struct dp_soc *soc, 4588 uint8_t pdev_id, qdf_nbuf_t htt_t2h_msg) 4589 { 4590 struct dp_pdev *pdev; 4591 struct ppdu_info *ppdu_info = NULL; 4592 bool free_buf = true; 4593 struct dp_mon_pdev *mon_pdev; 4594 4595 if (pdev_id >= MAX_PDEV_CNT) 4596 return true; 4597 4598 pdev = soc->pdev_list[pdev_id]; 4599 if (!pdev) 4600 return true; 4601 4602 mon_pdev = pdev->monitor_pdev; 4603 if (!mon_pdev) 4604 return true; 4605 4606 if (!dp_tx_ppdu_stats_feat_enable_check(pdev)) 4607 return free_buf; 4608 4609 qdf_spin_lock_bh(&mon_pdev->ppdu_stats_lock); 4610 ppdu_info = dp_htt_process_tlv(pdev, htt_t2h_msg); 4611 4612 if (mon_pdev->mgmtctrl_frm_info.mgmt_buf) { 4613 if (dp_process_ppdu_stats_tx_mgmtctrl_payload_tlv 4614 (pdev, htt_t2h_msg, mon_pdev->mgmtctrl_frm_info.ppdu_id) != 4615 QDF_STATUS_SUCCESS) 4616 free_buf = false; 4617 } 4618 4619 if (ppdu_info) 4620 dp_tx_ppdu_desc_deliver(pdev, ppdu_info); 4621 4622 mon_pdev->mgmtctrl_frm_info.mgmt_buf = NULL; 4623 mon_pdev->mgmtctrl_frm_info.mgmt_buf_len = 0; 4624 mon_pdev->mgmtctrl_frm_info.ppdu_id = 0; 4625 4626 qdf_spin_unlock_bh(&mon_pdev->ppdu_stats_lock); 4627 4628 return free_buf; 4629 } 4630 #else 4631 static bool dp_txrx_ppdu_stats_handler(struct dp_soc *soc, 4632 uint8_t pdev_id, qdf_nbuf_t htt_t2h_msg) 4633 { 4634 return true; 4635 } 4636 #endif/* QCA_ENHANCED_STATS_SUPPORT */ 4637 #endif 4638 4639 #if defined(WDI_EVENT_ENABLE) &&\ 4640 (defined(QCA_ENHANCED_STATS_SUPPORT) || !defined(REMOVE_PKT_LOG)) 4641 /* 4642 * dp_ppdu_stats_ind_handler() - PPDU stats msg handler 4643 * @htt_soc: HTT SOC handle 4644 * @msg_word: Pointer to payload 4645 * @htt_t2h_msg: HTT msg nbuf 4646 * 4647 * Return: True if buffer should be freed by caller. 4648 */ 4649 bool 4650 dp_ppdu_stats_ind_handler(struct htt_soc *soc, 4651 uint32_t *msg_word, 4652 qdf_nbuf_t htt_t2h_msg) 4653 { 4654 u_int8_t pdev_id; 4655 u_int8_t target_pdev_id; 4656 bool free_buf; 4657 4658 target_pdev_id = HTT_T2H_PPDU_STATS_PDEV_ID_GET(*msg_word); 4659 pdev_id = dp_get_host_pdev_id_for_target_pdev_id(soc->dp_soc, 4660 target_pdev_id); 4661 dp_wdi_event_handler(WDI_EVENT_LITE_T2H, soc->dp_soc, 4662 htt_t2h_msg, HTT_INVALID_PEER, WDI_NO_VAL, 4663 pdev_id); 4664 4665 free_buf = dp_txrx_ppdu_stats_handler(soc->dp_soc, pdev_id, 4666 htt_t2h_msg); 4667 4668 return free_buf; 4669 } 4670 #endif 4671 4672 void 4673 dp_mon_set_bsscolor(struct dp_pdev *pdev, uint8_t bsscolor) 4674 { 4675 pdev->monitor_pdev->rx_mon_recv_status.bsscolor = bsscolor; 4676 } 4677 4678 bool dp_pdev_get_filter_ucast_data(struct cdp_pdev *pdev_handle) 4679 { 4680 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle; 4681 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 4682 4683 if ((mon_pdev->fp_data_filter & FILTER_DATA_UCAST) || 4684 (mon_pdev->mo_data_filter & FILTER_DATA_UCAST)) 4685 return true; 4686 4687 return false; 4688 } 4689 4690 bool dp_pdev_get_filter_mcast_data(struct cdp_pdev *pdev_handle) 4691 { 4692 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle; 4693 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 4694 4695 if ((mon_pdev->fp_data_filter & FILTER_DATA_MCAST) || 4696 (mon_pdev->mo_data_filter & FILTER_DATA_MCAST)) 4697 return true; 4698 4699 return false; 4700 } 4701 4702 bool dp_pdev_get_filter_non_data(struct cdp_pdev *pdev_handle) 4703 { 4704 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle; 4705 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 4706 4707 if ((mon_pdev->fp_mgmt_filter & FILTER_MGMT_ALL) || 4708 (mon_pdev->mo_mgmt_filter & FILTER_MGMT_ALL)) { 4709 if ((mon_pdev->fp_ctrl_filter & FILTER_CTRL_ALL) || 4710 (mon_pdev->mo_ctrl_filter & FILTER_CTRL_ALL)) { 4711 return true; 4712 } 4713 } 4714 4715 return false; 4716 } 4717 4718 QDF_STATUS dp_mon_soc_cfg_init(struct dp_soc *soc) 4719 { 4720 int target_type; 4721 struct dp_mon_soc *mon_soc = soc->monitor_soc; 4722 struct cdp_mon_ops *cdp_ops; 4723 4724 cdp_ops = dp_mon_cdp_ops_get(soc); 4725 target_type = hal_get_target_type(soc->hal_soc); 4726 switch (target_type) { 4727 case TARGET_TYPE_QCA6290: 4728 case TARGET_TYPE_QCA6390: 4729 case TARGET_TYPE_QCA6490: 4730 case TARGET_TYPE_QCA6750: 4731 case TARGET_TYPE_KIWI: 4732 /* do nothing */ 4733 break; 4734 case TARGET_TYPE_QCA8074: 4735 wlan_cfg_set_mon_delayed_replenish_entries(soc->wlan_cfg_ctx, 4736 MON_BUF_MIN_ENTRIES); 4737 break; 4738 case TARGET_TYPE_QCA8074V2: 4739 case TARGET_TYPE_QCA6018: 4740 case TARGET_TYPE_QCA9574: 4741 wlan_cfg_set_mon_delayed_replenish_entries(soc->wlan_cfg_ctx, 4742 MON_BUF_MIN_ENTRIES); 4743 mon_soc->hw_nac_monitor_support = 1; 4744 break; 4745 case TARGET_TYPE_QCN9000: 4746 wlan_cfg_set_mon_delayed_replenish_entries(soc->wlan_cfg_ctx, 4747 MON_BUF_MIN_ENTRIES); 4748 mon_soc->hw_nac_monitor_support = 1; 4749 if (cfg_get(soc->ctrl_psoc, CFG_DP_FULL_MON_MODE)) { 4750 if (cdp_ops && cdp_ops->config_full_mon_mode) 4751 cdp_ops->config_full_mon_mode((struct cdp_soc_t *)soc, 1); 4752 } 4753 break; 4754 case TARGET_TYPE_QCA5018: 4755 case TARGET_TYPE_QCN6122: 4756 wlan_cfg_set_mon_delayed_replenish_entries(soc->wlan_cfg_ctx, 4757 MON_BUF_MIN_ENTRIES); 4758 mon_soc->hw_nac_monitor_support = 1; 4759 break; 4760 case TARGET_TYPE_QCN9224: 4761 wlan_cfg_set_mon_delayed_replenish_entries(soc->wlan_cfg_ctx, 4762 MON_BUF_MIN_ENTRIES); 4763 mon_soc->hw_nac_monitor_support = 1; 4764 mon_soc->monitor_mode_v2 = 1; 4765 break; 4766 default: 4767 dp_mon_info("%s: Unknown tgt type %d\n", __func__, target_type); 4768 qdf_assert_always(0); 4769 break; 4770 } 4771 4772 dp_mon_info("hw_nac_monitor_support = %d", 4773 mon_soc->hw_nac_monitor_support); 4774 4775 return QDF_STATUS_SUCCESS; 4776 } 4777 4778 /** 4779 * dp_mon_pdev_per_target_config() - Target specific monitor pdev configuration 4780 * @pdev: PDEV handle [Should be valid] 4781 * 4782 * Return: None 4783 */ 4784 static void dp_mon_pdev_per_target_config(struct dp_pdev *pdev) 4785 { 4786 struct dp_soc *soc = pdev->soc; 4787 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 4788 int target_type; 4789 4790 target_type = hal_get_target_type(soc->hal_soc); 4791 switch (target_type) { 4792 case TARGET_TYPE_KIWI: 4793 mon_pdev->is_tlv_hdr_64_bit = true; 4794 break; 4795 default: 4796 mon_pdev->is_tlv_hdr_64_bit = false; 4797 break; 4798 } 4799 } 4800 4801 QDF_STATUS dp_mon_pdev_attach(struct dp_pdev *pdev) 4802 { 4803 struct dp_soc *soc; 4804 struct dp_mon_pdev *mon_pdev; 4805 struct dp_mon_ops *mon_ops; 4806 qdf_size_t mon_pdev_context_size; 4807 4808 if (!pdev) { 4809 dp_mon_err("pdev is NULL"); 4810 goto fail0; 4811 } 4812 4813 soc = pdev->soc; 4814 4815 mon_pdev_context_size = soc->arch_ops.txrx_get_mon_context_size(DP_CONTEXT_TYPE_MON_PDEV); 4816 mon_pdev = dp_context_alloc_mem(soc, DP_MON_PDEV_TYPE, mon_pdev_context_size); 4817 if (!mon_pdev) { 4818 dp_mon_err("%pK: MONITOR pdev allocation failed", pdev); 4819 goto fail0; 4820 } 4821 4822 pdev->monitor_pdev = mon_pdev; 4823 mon_ops = dp_mon_ops_get(pdev->soc); 4824 if (!mon_ops) { 4825 dp_mon_err("%pK: Invalid monitor ops", pdev); 4826 goto fail1; 4827 } 4828 4829 if (mon_ops->mon_pdev_alloc) { 4830 if (mon_ops->mon_pdev_alloc(pdev)) { 4831 dp_mon_err("%pK: MONITOR pdev alloc failed", pdev); 4832 goto fail1; 4833 } 4834 } 4835 4836 if (mon_ops->mon_rings_alloc) { 4837 if (mon_ops->mon_rings_alloc(pdev)) { 4838 dp_mon_err("%pK: MONITOR rings setup failed", pdev); 4839 goto fail2; 4840 } 4841 } 4842 4843 /* Rx monitor mode specific init */ 4844 if (mon_ops->rx_mon_desc_pool_alloc) { 4845 if (mon_ops->rx_mon_desc_pool_alloc(pdev)) { 4846 dp_mon_err("%pK: dp_rx_pdev_mon_attach failed", pdev); 4847 goto fail3; 4848 } 4849 } 4850 4851 pdev->monitor_pdev = mon_pdev; 4852 dp_mon_pdev_per_target_config(pdev); 4853 4854 return QDF_STATUS_SUCCESS; 4855 fail3: 4856 if (mon_ops->mon_rings_free) 4857 mon_ops->mon_rings_free(pdev); 4858 fail2: 4859 if (mon_ops->mon_pdev_free) 4860 mon_ops->mon_pdev_free(pdev); 4861 fail1: 4862 pdev->monitor_pdev = NULL; 4863 qdf_mem_free(mon_pdev); 4864 fail0: 4865 return QDF_STATUS_E_NOMEM; 4866 } 4867 4868 QDF_STATUS dp_mon_pdev_detach(struct dp_pdev *pdev) 4869 { 4870 struct dp_mon_pdev *mon_pdev; 4871 struct dp_mon_ops *mon_ops = NULL; 4872 4873 if (!pdev) { 4874 dp_mon_err("pdev is NULL"); 4875 return QDF_STATUS_E_FAILURE; 4876 } 4877 4878 mon_pdev = pdev->monitor_pdev; 4879 if (!mon_pdev) { 4880 dp_mon_err("Monitor pdev is NULL"); 4881 return QDF_STATUS_E_FAILURE; 4882 } 4883 4884 mon_ops = dp_mon_ops_get(pdev->soc); 4885 if (!mon_ops) { 4886 dp_mon_err("Monitor ops is NULL"); 4887 return QDF_STATUS_E_FAILURE; 4888 } 4889 4890 if (mon_ops->rx_mon_desc_pool_free) 4891 mon_ops->rx_mon_desc_pool_free(pdev); 4892 if (mon_ops->mon_rings_free) 4893 mon_ops->mon_rings_free(pdev); 4894 if (mon_ops->mon_pdev_free) 4895 mon_ops->mon_pdev_free(pdev); 4896 4897 qdf_mem_free(mon_pdev); 4898 pdev->monitor_pdev = NULL; 4899 return QDF_STATUS_SUCCESS; 4900 } 4901 4902 QDF_STATUS dp_mon_pdev_init(struct dp_pdev *pdev) 4903 { 4904 struct dp_soc *soc; 4905 struct dp_mon_pdev *mon_pdev; 4906 struct dp_mon_ops *mon_ops = NULL; 4907 4908 if (!pdev) { 4909 dp_mon_err("pdev is NULL"); 4910 return QDF_STATUS_E_FAILURE; 4911 } 4912 4913 soc = pdev->soc; 4914 mon_pdev = pdev->monitor_pdev; 4915 4916 mon_pdev->invalid_mon_peer = qdf_mem_malloc(sizeof(struct dp_mon_peer)); 4917 if (!mon_pdev->invalid_mon_peer) { 4918 dp_mon_err("%pK: Memory allocation failed for invalid " 4919 "monitor peer", pdev); 4920 return QDF_STATUS_E_NOMEM; 4921 } 4922 4923 mon_ops = dp_mon_ops_get(pdev->soc); 4924 if (!mon_ops) { 4925 dp_mon_err("Monitor ops is NULL"); 4926 goto fail0; 4927 } 4928 4929 mon_pdev->filter = dp_mon_filter_alloc(mon_pdev); 4930 if (!mon_pdev->filter) { 4931 dp_mon_err("%pK: Memory allocation failed for monitor filter", 4932 pdev); 4933 goto fail0; 4934 } 4935 4936 if (mon_ops->tx_mon_filter_alloc) { 4937 if (mon_ops->tx_mon_filter_alloc(pdev)) { 4938 dp_mon_err("%pK: Memory allocation failed for tx monitor " 4939 "filter", pdev); 4940 goto fail1; 4941 } 4942 } 4943 4944 qdf_spinlock_create(&mon_pdev->ppdu_stats_lock); 4945 qdf_spinlock_create(&mon_pdev->neighbour_peer_mutex); 4946 mon_pdev->monitor_configured = false; 4947 mon_pdev->mon_chan_band = REG_BAND_UNKNOWN; 4948 4949 TAILQ_INIT(&mon_pdev->neighbour_peers_list); 4950 mon_pdev->neighbour_peers_added = false; 4951 mon_pdev->monitor_configured = false; 4952 /* Monitor filter init */ 4953 mon_pdev->mon_filter_mode = MON_FILTER_ALL; 4954 mon_pdev->fp_mgmt_filter = FILTER_MGMT_ALL; 4955 mon_pdev->fp_ctrl_filter = FILTER_CTRL_ALL; 4956 mon_pdev->fp_data_filter = FILTER_DATA_ALL; 4957 mon_pdev->mo_mgmt_filter = FILTER_MGMT_ALL; 4958 mon_pdev->mo_ctrl_filter = FILTER_CTRL_ALL; 4959 mon_pdev->mo_data_filter = FILTER_DATA_ALL; 4960 4961 /* 4962 * initialize ppdu tlv list 4963 */ 4964 TAILQ_INIT(&mon_pdev->ppdu_info_list); 4965 TAILQ_INIT(&mon_pdev->sched_comp_ppdu_list); 4966 4967 mon_pdev->list_depth = 0; 4968 mon_pdev->tlv_count = 0; 4969 /* initlialize cal client timer */ 4970 dp_cal_client_attach(&mon_pdev->cal_client_ctx, 4971 dp_pdev_to_cdp_pdev(pdev), 4972 pdev->soc->osdev, 4973 &dp_iterate_update_peer_list); 4974 if (dp_htt_ppdu_stats_attach(pdev) != QDF_STATUS_SUCCESS) 4975 goto fail2; 4976 4977 if (mon_ops->mon_rings_init) { 4978 if (mon_ops->mon_rings_init(pdev)) { 4979 dp_mon_err("%pK: MONITOR rings setup failed", pdev); 4980 goto fail3; 4981 } 4982 } 4983 4984 /* initialize sw monitor rx descriptors */ 4985 if (mon_ops->rx_mon_desc_pool_init) 4986 mon_ops->rx_mon_desc_pool_init(pdev); 4987 4988 /* allocate buffers and replenish the monitor RxDMA ring */ 4989 if (mon_ops->rx_mon_buffers_alloc) { 4990 if (mon_ops->rx_mon_buffers_alloc(pdev)) { 4991 dp_mon_err("%pK: rx mon buffers alloc failed", pdev); 4992 goto fail4; 4993 } 4994 } 4995 4996 /* attach monitor function */ 4997 dp_monitor_tx_ppdu_stats_attach(pdev); 4998 4999 /* mon pdev extended init */ 5000 if (mon_ops->mon_pdev_ext_init) 5001 mon_ops->mon_pdev_ext_init(pdev); 5002 5003 mon_pdev->is_dp_mon_pdev_initialized = true; 5004 5005 return QDF_STATUS_SUCCESS; 5006 5007 fail4: 5008 if (mon_ops->rx_mon_desc_pool_deinit) 5009 mon_ops->rx_mon_desc_pool_deinit(pdev); 5010 5011 if (mon_ops->mon_rings_deinit) 5012 mon_ops->mon_rings_deinit(pdev); 5013 5014 fail3: 5015 dp_htt_ppdu_stats_detach(pdev); 5016 fail2: 5017 qdf_spinlock_destroy(&mon_pdev->neighbour_peer_mutex); 5018 qdf_spinlock_destroy(&mon_pdev->ppdu_stats_lock); 5019 if (mon_ops->tx_mon_filter_dealloc) 5020 mon_ops->tx_mon_filter_dealloc(pdev); 5021 fail1: 5022 dp_mon_filter_dealloc(mon_pdev); 5023 fail0: 5024 qdf_mem_free(mon_pdev->invalid_mon_peer); 5025 return QDF_STATUS_E_FAILURE; 5026 } 5027 5028 QDF_STATUS dp_mon_pdev_deinit(struct dp_pdev *pdev) 5029 { 5030 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 5031 struct dp_mon_ops *mon_ops = NULL; 5032 5033 mon_ops = dp_mon_ops_get(pdev->soc); 5034 if (!mon_ops) { 5035 dp_mon_err("Monitor ops is NULL"); 5036 return QDF_STATUS_E_FAILURE; 5037 } 5038 5039 if (!mon_pdev->is_dp_mon_pdev_initialized) 5040 return QDF_STATUS_SUCCESS; 5041 5042 dp_mon_filters_reset(pdev); 5043 /* detach monitor function */ 5044 dp_monitor_tx_ppdu_stats_detach(pdev); 5045 5046 if (mon_ops->rx_mon_buffers_free) 5047 mon_ops->rx_mon_buffers_free(pdev); 5048 if (mon_ops->rx_mon_desc_pool_deinit) 5049 mon_ops->rx_mon_desc_pool_deinit(pdev); 5050 if (mon_ops->mon_rings_deinit) 5051 mon_ops->mon_rings_deinit(pdev); 5052 dp_cal_client_detach(&mon_pdev->cal_client_ctx); 5053 dp_htt_ppdu_stats_detach(pdev); 5054 qdf_spinlock_destroy(&mon_pdev->ppdu_stats_lock); 5055 dp_neighbour_peers_detach(pdev); 5056 dp_pktlogmod_exit(pdev); 5057 if (mon_ops->tx_mon_filter_dealloc) 5058 mon_ops->tx_mon_filter_dealloc(pdev); 5059 if (mon_pdev->filter) 5060 dp_mon_filter_dealloc(mon_pdev); 5061 if (mon_ops->mon_rings_deinit) 5062 mon_ops->mon_rings_deinit(pdev); 5063 if (mon_pdev->invalid_mon_peer) 5064 qdf_mem_free(mon_pdev->invalid_mon_peer); 5065 mon_pdev->is_dp_mon_pdev_initialized = false; 5066 5067 return QDF_STATUS_SUCCESS; 5068 } 5069 5070 QDF_STATUS dp_mon_vdev_attach(struct dp_vdev *vdev) 5071 { 5072 struct dp_mon_vdev *mon_vdev; 5073 struct dp_pdev *pdev = vdev->pdev; 5074 5075 mon_vdev = (struct dp_mon_vdev *)qdf_mem_malloc(sizeof(*mon_vdev)); 5076 if (!mon_vdev) { 5077 dp_mon_err("%pK: Monitor vdev allocation failed", vdev); 5078 return QDF_STATUS_E_NOMEM; 5079 } 5080 5081 if (pdev->monitor_pdev->scan_spcl_vap_configured) 5082 dp_scan_spcl_vap_stats_attach(mon_vdev); 5083 5084 vdev->monitor_vdev = mon_vdev; 5085 5086 return QDF_STATUS_SUCCESS; 5087 } 5088 5089 QDF_STATUS dp_mon_vdev_detach(struct dp_vdev *vdev) 5090 { 5091 struct dp_mon_vdev *mon_vdev = vdev->monitor_vdev; 5092 struct dp_pdev *pdev = vdev->pdev; 5093 5094 if (!mon_vdev) 5095 return QDF_STATUS_E_FAILURE; 5096 5097 if (pdev->monitor_pdev->scan_spcl_vap_configured) 5098 dp_scan_spcl_vap_stats_detach(mon_vdev); 5099 5100 qdf_mem_free(mon_vdev); 5101 vdev->monitor_vdev = NULL; 5102 /* set mvdev to NULL only if detach is called for monitor/special vap 5103 */ 5104 if (pdev->monitor_pdev->mvdev == vdev) 5105 pdev->monitor_pdev->mvdev = NULL; 5106 5107 return QDF_STATUS_SUCCESS; 5108 } 5109 5110 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE 5111 /** 5112 * dp_mon_peer_attach_notify() - Raise WDI event for peer create 5113 * @peer: DP Peer handle 5114 * 5115 * Return: none 5116 */ 5117 static inline 5118 void dp_mon_peer_attach_notify(struct dp_peer *peer) 5119 { 5120 struct dp_mon_peer *mon_peer = peer->monitor_peer; 5121 struct dp_pdev *pdev; 5122 struct dp_soc *soc; 5123 struct cdp_peer_cookie peer_cookie; 5124 5125 pdev = peer->vdev->pdev; 5126 soc = pdev->soc; 5127 5128 qdf_mem_copy(peer_cookie.mac_addr, peer->mac_addr.raw, 5129 QDF_MAC_ADDR_SIZE); 5130 5131 peer_cookie.ctx = NULL; 5132 peer_cookie.pdev_id = pdev->pdev_id; 5133 peer_cookie.cookie = pdev->next_peer_cookie++; 5134 5135 dp_wdi_event_handler(WDI_EVENT_PEER_CREATE, soc, 5136 (void *)&peer_cookie, 5137 peer->peer_id, WDI_NO_VAL, pdev->pdev_id); 5138 5139 if (soc->rdkstats_enabled) { 5140 if (!peer_cookie.ctx) { 5141 pdev->next_peer_cookie--; 5142 qdf_err("Failed to initialize peer rate stats"); 5143 mon_peer->rdkstats_ctx = NULL; 5144 } else { 5145 mon_peer->rdkstats_ctx = (struct cdp_peer_rate_stats_ctx *) 5146 peer_cookie.ctx; 5147 } 5148 } 5149 } 5150 5151 /** 5152 * dp_mon_peer_detach_notify() - Raise WDI event for peer destroy 5153 * @peer: DP Peer handle 5154 * 5155 * Return: none 5156 */ 5157 static inline 5158 void dp_mon_peer_detach_notify(struct dp_peer *peer) 5159 { 5160 struct dp_mon_peer *mon_peer = peer->monitor_peer; 5161 struct dp_pdev *pdev; 5162 struct dp_soc *soc; 5163 struct cdp_peer_cookie peer_cookie; 5164 5165 pdev = peer->vdev->pdev; 5166 soc = pdev->soc; 5167 /* send peer destroy event to upper layer */ 5168 qdf_mem_copy(peer_cookie.mac_addr, peer->mac_addr.raw, 5169 QDF_MAC_ADDR_SIZE); 5170 peer_cookie.ctx = NULL; 5171 peer_cookie.ctx = (struct cdp_stats_cookie *)mon_peer->rdkstats_ctx; 5172 5173 dp_wdi_event_handler(WDI_EVENT_PEER_DESTROY, 5174 soc, 5175 (void *)&peer_cookie, 5176 peer->peer_id, 5177 WDI_NO_VAL, 5178 pdev->pdev_id); 5179 5180 mon_peer->rdkstats_ctx = NULL; 5181 } 5182 #else 5183 static inline 5184 void dp_mon_peer_attach_notify(struct dp_peer *peer) 5185 { 5186 peer->monitor_peer->rdkstats_ctx = NULL; 5187 } 5188 5189 static inline 5190 void dp_mon_peer_detach_notify(struct dp_peer *peer) 5191 { 5192 peer->monitor_peer->rdkstats_ctx = NULL; 5193 } 5194 #endif 5195 5196 #if defined(WLAN_TX_PKT_CAPTURE_ENH) || defined(FEATURE_PERPKT_INFO) 5197 QDF_STATUS dp_mon_peer_attach(struct dp_peer *peer) 5198 { 5199 struct dp_mon_peer *mon_peer; 5200 struct dp_pdev *pdev; 5201 5202 mon_peer = (struct dp_mon_peer *)qdf_mem_malloc(sizeof(*mon_peer)); 5203 if (!mon_peer) { 5204 dp_mon_err("%pK: MONITOR peer allocation failed", peer); 5205 return QDF_STATUS_E_NOMEM; 5206 } 5207 5208 peer->monitor_peer = mon_peer; 5209 pdev = peer->vdev->pdev; 5210 /* 5211 * In tx_monitor mode, filter may be set for unassociated peer 5212 * when unassociated peer get associated peer need to 5213 * update tx_cap_enabled flag to support peer filter. 5214 */ 5215 dp_monitor_peer_tx_capture_filter_check(pdev, peer); 5216 5217 DP_STATS_INIT(mon_peer); 5218 DP_STATS_UPD(mon_peer, rx.avg_snr, CDP_INVALID_SNR); 5219 5220 dp_mon_peer_attach_notify(peer); 5221 5222 return QDF_STATUS_SUCCESS; 5223 } 5224 #endif 5225 5226 QDF_STATUS dp_mon_peer_detach(struct dp_peer *peer) 5227 { 5228 struct dp_mon_peer *mon_peer = peer->monitor_peer; 5229 5230 if (!mon_peer) 5231 return QDF_STATUS_SUCCESS; 5232 5233 dp_mon_peer_detach_notify(peer); 5234 5235 qdf_mem_free(mon_peer); 5236 peer->monitor_peer = NULL; 5237 5238 return QDF_STATUS_SUCCESS; 5239 } 5240 5241 #ifndef DISABLE_MON_CONFIG 5242 void dp_mon_register_intr_ops(struct dp_soc *soc) 5243 { 5244 struct dp_mon_ops *mon_ops = NULL; 5245 5246 mon_ops = dp_mon_ops_get(soc); 5247 if (!mon_ops) { 5248 dp_mon_err("Monitor ops is NULL"); 5249 return; 5250 } 5251 if (mon_ops->mon_register_intr_ops) 5252 mon_ops->mon_register_intr_ops(soc); 5253 } 5254 #endif 5255 5256 struct cdp_peer_rate_stats_ctx *dp_mon_peer_get_rdkstats_ctx(struct dp_peer *peer) 5257 { 5258 struct dp_mon_peer *mon_peer = peer->monitor_peer; 5259 5260 if (mon_peer) 5261 return mon_peer->rdkstats_ctx; 5262 else 5263 return NULL; 5264 } 5265 5266 #ifdef QCA_ENHANCED_STATS_SUPPORT 5267 void dp_mon_peer_reset_stats(struct dp_peer *peer) 5268 { 5269 struct dp_mon_peer *mon_peer = NULL; 5270 5271 mon_peer = peer->monitor_peer; 5272 if (!mon_peer) 5273 return; 5274 5275 DP_STATS_CLR(mon_peer); 5276 DP_STATS_UPD(mon_peer, rx.avg_snr, CDP_INVALID_SNR); 5277 } 5278 5279 void dp_mon_peer_get_stats(struct dp_peer *peer, void *arg, 5280 enum cdp_stat_update_type type) 5281 { 5282 struct dp_mon_peer *mon_peer = peer->monitor_peer; 5283 struct dp_mon_peer_stats *mon_peer_stats; 5284 5285 if (!mon_peer || !arg) 5286 return; 5287 5288 mon_peer_stats = &mon_peer->stats; 5289 5290 switch (type) { 5291 case UPDATE_PEER_STATS: 5292 { 5293 struct cdp_peer_stats *peer_stats = 5294 (struct cdp_peer_stats *)arg; 5295 DP_UPDATE_MON_STATS(peer_stats, mon_peer_stats); 5296 break; 5297 } 5298 case UPDATE_VDEV_STATS: 5299 { 5300 struct cdp_vdev_stats *vdev_stats = 5301 (struct cdp_vdev_stats *)arg; 5302 DP_UPDATE_MON_STATS(vdev_stats, mon_peer_stats); 5303 break; 5304 } 5305 default: 5306 dp_mon_err("Invalid stats_update_type"); 5307 } 5308 } 5309 5310 void dp_mon_invalid_peer_update_pdev_stats(struct dp_pdev *pdev) 5311 { 5312 struct dp_mon_peer *mon_peer; 5313 struct dp_mon_peer_stats *mon_peer_stats; 5314 struct cdp_pdev_stats *pdev_stats; 5315 5316 if (!pdev || !pdev->monitor_pdev) 5317 return; 5318 5319 mon_peer = pdev->monitor_pdev->invalid_mon_peer; 5320 if (!mon_peer) 5321 return; 5322 5323 mon_peer_stats = &mon_peer->stats; 5324 pdev_stats = &pdev->stats; 5325 DP_UPDATE_MON_STATS(pdev_stats, mon_peer_stats); 5326 } 5327 5328 QDF_STATUS 5329 dp_mon_peer_get_stats_param(struct dp_peer *peer, enum cdp_peer_stats_type type, 5330 cdp_peer_stats_param_t *buf) 5331 { 5332 QDF_STATUS ret = QDF_STATUS_SUCCESS; 5333 struct dp_mon_peer *mon_peer; 5334 5335 mon_peer = peer->monitor_peer; 5336 if (!mon_peer) 5337 return QDF_STATUS_E_FAILURE; 5338 5339 switch (type) { 5340 case cdp_peer_tx_rate: 5341 buf->tx_rate = mon_peer->stats.tx.tx_rate; 5342 break; 5343 case cdp_peer_tx_last_tx_rate: 5344 buf->last_tx_rate = mon_peer->stats.tx.last_tx_rate; 5345 break; 5346 case cdp_peer_tx_ratecode: 5347 buf->tx_ratecode = mon_peer->stats.tx.tx_ratecode; 5348 break; 5349 case cdp_peer_rx_rate: 5350 buf->rx_rate = mon_peer->stats.rx.rx_rate; 5351 break; 5352 case cdp_peer_rx_last_rx_rate: 5353 buf->last_rx_rate = mon_peer->stats.rx.last_rx_rate; 5354 break; 5355 case cdp_peer_rx_ratecode: 5356 buf->rx_ratecode = mon_peer->stats.rx.rx_ratecode; 5357 break; 5358 case cdp_peer_rx_avg_snr: 5359 buf->rx_avg_snr = mon_peer->stats.rx.avg_snr; 5360 break; 5361 case cdp_peer_rx_snr: 5362 buf->rx_snr = mon_peer->stats.rx.snr; 5363 break; 5364 default: 5365 dp_err("Invalid stats type requested"); 5366 ret = QDF_STATUS_E_FAILURE; 5367 } 5368 5369 return ret; 5370 } 5371 #endif 5372 5373 void dp_mon_ops_register(struct dp_soc *soc) 5374 { 5375 struct dp_mon_soc *mon_soc = soc->monitor_soc; 5376 uint32_t target_type; 5377 5378 target_type = hal_get_target_type(soc->hal_soc); 5379 switch (target_type) { 5380 case TARGET_TYPE_QCA6290: 5381 case TARGET_TYPE_QCA6390: 5382 case TARGET_TYPE_QCA6490: 5383 case TARGET_TYPE_QCA6750: 5384 case TARGET_TYPE_KIWI: 5385 case TARGET_TYPE_QCA8074: 5386 case TARGET_TYPE_QCA8074V2: 5387 case TARGET_TYPE_QCA6018: 5388 case TARGET_TYPE_QCA9574: 5389 case TARGET_TYPE_QCN9000: 5390 case TARGET_TYPE_QCA5018: 5391 case TARGET_TYPE_QCN6122: 5392 dp_mon_ops_register_1_0(mon_soc); 5393 break; 5394 case TARGET_TYPE_QCN9224: 5395 #ifdef QCA_MONITOR_2_0_SUPPORT 5396 dp_mon_ops_register_2_0(mon_soc); 5397 #endif 5398 break; 5399 default: 5400 dp_mon_err("%s: Unknown tgt type %d", __func__, target_type); 5401 qdf_assert_always(0); 5402 break; 5403 } 5404 } 5405 5406 #ifdef QCA_MONITOR_OPS_PER_SOC_SUPPORT 5407 void dp_mon_ops_free(struct dp_soc *soc) 5408 { 5409 struct cdp_ops *ops = soc->cdp_soc.ops; 5410 struct cdp_mon_ops *cdp_mon_ops = ops->mon_ops; 5411 struct dp_mon_soc *mon_soc = soc->monitor_soc; 5412 struct dp_mon_ops *mon_ops = mon_soc->mon_ops; 5413 5414 if (cdp_mon_ops) 5415 qdf_mem_free(cdp_mon_ops); 5416 5417 if (mon_ops) 5418 qdf_mem_free(mon_ops); 5419 } 5420 #else 5421 void dp_mon_ops_free(struct dp_soc *soc) 5422 { 5423 } 5424 #endif 5425 5426 void dp_mon_cdp_ops_register(struct dp_soc *soc) 5427 { 5428 struct cdp_ops *ops = soc->cdp_soc.ops; 5429 uint32_t target_type; 5430 5431 if (!ops) { 5432 dp_mon_err("cdp_ops is NULL"); 5433 return; 5434 } 5435 5436 target_type = hal_get_target_type(soc->hal_soc); 5437 switch (target_type) { 5438 case TARGET_TYPE_QCA6290: 5439 case TARGET_TYPE_QCA6390: 5440 case TARGET_TYPE_QCA6490: 5441 case TARGET_TYPE_QCA6750: 5442 case TARGET_TYPE_KIWI: 5443 case TARGET_TYPE_QCA8074: 5444 case TARGET_TYPE_QCA8074V2: 5445 case TARGET_TYPE_QCA6018: 5446 case TARGET_TYPE_QCA9574: 5447 case TARGET_TYPE_QCN9000: 5448 case TARGET_TYPE_QCA5018: 5449 case TARGET_TYPE_QCN6122: 5450 dp_mon_cdp_ops_register_1_0(ops); 5451 break; 5452 case TARGET_TYPE_QCN9224: 5453 #ifdef QCA_MONITOR_2_0_SUPPORT 5454 dp_mon_cdp_ops_register_2_0(ops); 5455 #endif 5456 break; 5457 default: 5458 dp_mon_err("%s: Unknown tgt type %d", __func__, target_type); 5459 qdf_assert_always(0); 5460 break; 5461 } 5462 5463 #if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE) 5464 ops->cfr_ops->txrx_cfr_filter = dp_cfr_filter; 5465 #endif 5466 ops->cmn_drv_ops->txrx_set_monitor_mode = dp_vdev_set_monitor_mode; 5467 ops->cmn_drv_ops->txrx_get_mon_vdev_from_pdev = 5468 dp_get_mon_vdev_from_pdev_wifi3; 5469 #ifdef DP_PEER_EXTENDED_API 5470 ops->misc_ops->pkt_log_init = dp_pkt_log_init; 5471 ops->misc_ops->pkt_log_con_service = dp_pkt_log_con_service; 5472 ops->misc_ops->pkt_log_exit = dp_pkt_log_exit; 5473 #endif 5474 #ifdef ATH_SUPPORT_NAC_RSSI 5475 ops->ctrl_ops->txrx_vdev_config_for_nac_rssi = dp_config_for_nac_rssi; 5476 ops->ctrl_ops->txrx_vdev_get_neighbour_rssi = 5477 dp_vdev_get_neighbour_rssi; 5478 #endif 5479 #if defined(ATH_SUPPORT_NAC_RSSI) || defined(ATH_SUPPORT_NAC) 5480 ops->ctrl_ops->txrx_update_filter_neighbour_peers = 5481 dp_update_filter_neighbour_peers; 5482 #endif /* ATH_SUPPORT_NAC_RSSI || ATH_SUPPORT_NAC */ 5483 ops->ctrl_ops->enable_peer_based_pktlog = 5484 dp_enable_peer_based_pktlog; 5485 #if defined(WLAN_TX_PKT_CAPTURE_ENH) || defined(WLAN_RX_PKT_CAPTURE_ENH) 5486 ops->ctrl_ops->txrx_update_peer_pkt_capture_params = 5487 dp_peer_update_pkt_capture_params; 5488 #endif /* WLAN_TX_PKT_CAPTURE_ENH || WLAN_RX_PKT_CAPTURE_ENH */ 5489 #ifdef QCA_ENHANCED_STATS_SUPPORT 5490 ops->host_stats_ops->txrx_enable_enhanced_stats = 5491 dp_enable_enhanced_stats; 5492 ops->host_stats_ops->txrx_disable_enhanced_stats = 5493 dp_disable_enhanced_stats; 5494 #endif /* QCA_ENHANCED_STATS_SUPPORT */ 5495 #ifdef WDI_EVENT_ENABLE 5496 ops->ctrl_ops->txrx_get_pldev = dp_get_pldev; 5497 #endif 5498 #ifdef QCA_SUPPORT_SCAN_SPCL_VAP_STATS 5499 ops->host_stats_ops->txrx_get_scan_spcl_vap_stats = 5500 dp_get_scan_spcl_vap_stats; 5501 #endif 5502 return; 5503 } 5504 5505 #ifdef QCA_MONITOR_OPS_PER_SOC_SUPPORT 5506 static inline void 5507 dp_mon_cdp_mon_ops_deregister(struct cdp_ops *ops) 5508 { 5509 if (ops->mon_ops) { 5510 qdf_mem_free(ops->mon_ops); 5511 ops->mon_ops = NULL; 5512 } 5513 } 5514 #else 5515 static inline void 5516 dp_mon_cdp_mon_ops_deregister(struct cdp_ops *ops) 5517 { 5518 ops->mon_ops = NULL; 5519 } 5520 #endif 5521 5522 void dp_mon_cdp_ops_deregister(struct dp_soc *soc) 5523 { 5524 struct cdp_ops *ops = soc->cdp_soc.ops; 5525 5526 if (!ops) { 5527 dp_mon_err("cdp_ops is NULL"); 5528 return; 5529 } 5530 5531 dp_mon_cdp_mon_ops_deregister(ops); 5532 5533 #if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE) 5534 ops->cfr_ops->txrx_cfr_filter = NULL; 5535 #endif 5536 ops->cmn_drv_ops->txrx_set_monitor_mode = NULL; 5537 ops->cmn_drv_ops->txrx_get_mon_vdev_from_pdev = NULL; 5538 #ifdef DP_PEER_EXTENDED_API 5539 ops->misc_ops->pkt_log_init = NULL; 5540 ops->misc_ops->pkt_log_con_service = NULL; 5541 ops->misc_ops->pkt_log_exit = NULL; 5542 #endif 5543 #ifdef ATH_SUPPORT_NAC_RSSI 5544 ops->ctrl_ops->txrx_vdev_config_for_nac_rssi = NULL; 5545 ops->ctrl_ops->txrx_vdev_get_neighbour_rssi = NULL; 5546 #endif 5547 #if defined(ATH_SUPPORT_NAC_RSSI) || defined(ATH_SUPPORT_NAC) 5548 ops->ctrl_ops->txrx_update_filter_neighbour_peers = NULL; 5549 #endif /* ATH_SUPPORT_NAC_RSSI || ATH_SUPPORT_NAC */ 5550 ops->ctrl_ops->enable_peer_based_pktlog = NULL; 5551 #if defined(WLAN_TX_PKT_CAPTURE_ENH) || defined(WLAN_RX_PKT_CAPTURE_ENH) 5552 ops->ctrl_ops->txrx_update_peer_pkt_capture_params = NULL; 5553 #endif /* WLAN_TX_PKT_CAPTURE_ENH || WLAN_RX_PKT_CAPTURE_ENH */ 5554 #ifdef FEATURE_PERPKT_INFO 5555 ops->host_stats_ops->txrx_enable_enhanced_stats = NULL; 5556 ops->host_stats_ops->txrx_disable_enhanced_stats = NULL; 5557 #endif /* FEATURE_PERPKT_INFO */ 5558 #ifdef WDI_EVENT_ENABLE 5559 ops->ctrl_ops->txrx_get_pldev = NULL; 5560 #endif 5561 return; 5562 } 5563 5564 void dp_mon_intr_ops_deregister(struct dp_soc *soc) 5565 { 5566 struct dp_mon_soc *mon_soc = soc->monitor_soc; 5567 5568 mon_soc->mon_rx_process = NULL; 5569 } 5570 5571 void dp_mon_feature_ops_deregister(struct dp_soc *soc) 5572 { 5573 struct dp_mon_ops *mon_ops = dp_mon_ops_get(soc); 5574 5575 if (!mon_ops) { 5576 dp_err("mon_ops is NULL"); 5577 return; 5578 } 5579 5580 mon_ops->mon_config_debug_sniffer = NULL; 5581 mon_ops->mon_peer_tx_init = NULL; 5582 mon_ops->mon_peer_tx_cleanup = NULL; 5583 mon_ops->mon_htt_ppdu_stats_attach = NULL; 5584 mon_ops->mon_htt_ppdu_stats_detach = NULL; 5585 mon_ops->mon_print_pdev_rx_mon_stats = NULL; 5586 mon_ops->mon_set_bsscolor = NULL; 5587 mon_ops->mon_pdev_get_filter_ucast_data = NULL; 5588 mon_ops->mon_pdev_get_filter_mcast_data = NULL; 5589 mon_ops->mon_pdev_get_filter_non_data = NULL; 5590 mon_ops->mon_neighbour_peer_add_ast = NULL; 5591 #ifdef WLAN_TX_PKT_CAPTURE_ENH 5592 mon_ops->mon_peer_tid_peer_id_update = NULL; 5593 mon_ops->mon_tx_ppdu_stats_attach = NULL; 5594 mon_ops->mon_tx_ppdu_stats_detach = NULL; 5595 mon_ops->mon_tx_capture_debugfs_init = NULL; 5596 mon_ops->mon_tx_add_to_comp_queue = NULL; 5597 mon_ops->mon_peer_tx_capture_filter_check = NULL; 5598 mon_ops->mon_print_pdev_tx_capture_stats = NULL; 5599 mon_ops->mon_config_enh_tx_capture = NULL; 5600 #endif 5601 #if defined(WDI_EVENT_ENABLE) &&\ 5602 (defined(QCA_ENHANCED_STATS_SUPPORT) || !defined(REMOVE_PKT_LOG)) 5603 mon_ops->mon_ppdu_stats_ind_handler = NULL; 5604 mon_ops->mon_ppdu_desc_deliver = NULL; 5605 mon_ops->mon_ppdu_desc_notify = NULL; 5606 mon_ops->mon_ppdu_stats_feat_enable_check = NULL; 5607 #endif 5608 #ifdef WLAN_RX_PKT_CAPTURE_ENH 5609 mon_ops->mon_config_enh_rx_capture = NULL; 5610 #endif 5611 #ifdef QCA_SUPPORT_BPR 5612 mon_ops->mon_set_bpr_enable = NULL; 5613 #endif 5614 #ifdef ATH_SUPPORT_NAC 5615 mon_ops->mon_set_filter_neigh_peers = NULL; 5616 #endif 5617 #ifdef WLAN_ATF_ENABLE 5618 mon_ops->mon_set_atf_stats_enable = NULL; 5619 #endif 5620 #ifdef FEATURE_NAC_RSSI 5621 mon_ops->mon_filter_neighbour_peer = NULL; 5622 #endif 5623 #ifdef QCA_MCOPY_SUPPORT 5624 mon_ops->mon_filter_setup_mcopy_mode = NULL; 5625 mon_ops->mon_filter_reset_mcopy_mode = NULL; 5626 mon_ops->mon_mcopy_check_deliver = NULL; 5627 #endif 5628 #ifdef QCA_ENHANCED_STATS_SUPPORT 5629 mon_ops->mon_filter_setup_enhanced_stats = NULL; 5630 mon_ops->mon_tx_enable_enhanced_stats = NULL; 5631 mon_ops->mon_tx_disable_enhanced_stats = NULL; 5632 #ifdef WLAN_FEATURE_11BE 5633 mon_ops->mon_tx_stats_update = NULL; 5634 #endif 5635 #endif 5636 #if defined(ATH_SUPPORT_NAC_RSSI) || defined(ATH_SUPPORT_NAC) 5637 mon_ops->mon_filter_setup_smart_monitor = NULL; 5638 #endif 5639 #ifdef WLAN_RX_PKT_CAPTURE_ENH 5640 mon_ops->mon_filter_setup_rx_enh_capture = NULL; 5641 #endif 5642 #ifdef WDI_EVENT_ENABLE 5643 mon_ops->mon_set_pktlog_wifi3 = NULL; 5644 mon_ops->mon_filter_setup_rx_pkt_log_full = NULL; 5645 mon_ops->mon_filter_reset_rx_pkt_log_full = NULL; 5646 mon_ops->mon_filter_setup_rx_pkt_log_lite = NULL; 5647 mon_ops->mon_filter_reset_rx_pkt_log_lite = NULL; 5648 mon_ops->mon_filter_setup_rx_pkt_log_cbf = NULL; 5649 mon_ops->mon_filter_reset_rx_pkt_log_cbf = NULL; 5650 #ifdef BE_PKTLOG_SUPPORT 5651 mon_ops->mon_filter_setup_pktlog_hybrid = NULL; 5652 mon_ops->mon_filter_reset_pktlog_hybrid = NULL; 5653 #endif 5654 #endif 5655 #if defined(DP_CON_MON) && !defined(REMOVE_PKT_LOG) 5656 mon_ops->mon_pktlogmod_exit = NULL; 5657 #endif 5658 mon_ops->rx_packet_length_set = NULL; 5659 mon_ops->rx_wmask_subscribe = NULL; 5660 mon_ops->rx_enable_mpdu_logging = NULL; 5661 mon_ops->mon_neighbour_peers_detach = NULL; 5662 mon_ops->mon_vdev_set_monitor_mode_buf_rings = NULL; 5663 mon_ops->mon_vdev_set_monitor_mode_rings = NULL; 5664 #ifdef QCA_ENHANCED_STATS_SUPPORT 5665 mon_ops->mon_rx_stats_update = NULL; 5666 mon_ops->mon_rx_populate_ppdu_usr_info = NULL; 5667 mon_ops->mon_rx_populate_ppdu_info = NULL; 5668 #endif 5669 } 5670 5671 QDF_STATUS dp_mon_soc_attach(struct dp_soc *soc) 5672 { 5673 struct dp_mon_soc *mon_soc; 5674 5675 if (!soc) { 5676 dp_mon_err("dp_soc is NULL"); 5677 return QDF_STATUS_E_FAILURE; 5678 } 5679 5680 mon_soc = (struct dp_mon_soc *)qdf_mem_malloc(sizeof(*mon_soc)); 5681 if (!mon_soc) { 5682 dp_mon_err("%pK: mem allocation failed", soc); 5683 return QDF_STATUS_E_NOMEM; 5684 } 5685 /* register monitor ops */ 5686 soc->monitor_soc = mon_soc; 5687 dp_mon_ops_register(soc); 5688 dp_mon_register_intr_ops(soc); 5689 5690 dp_mon_cdp_ops_register(soc); 5691 dp_mon_register_feature_ops(soc); 5692 return QDF_STATUS_SUCCESS; 5693 } 5694 5695 QDF_STATUS dp_mon_soc_detach(struct dp_soc *soc) 5696 { 5697 struct dp_mon_soc *mon_soc; 5698 5699 if (!soc) { 5700 dp_mon_err("dp_soc is NULL"); 5701 return QDF_STATUS_E_FAILURE; 5702 } 5703 5704 mon_soc = soc->monitor_soc; 5705 dp_monitor_vdev_timer_deinit(soc); 5706 dp_mon_cdp_ops_deregister(soc); 5707 soc->monitor_soc = NULL; 5708 qdf_mem_free(mon_soc); 5709 return QDF_STATUS_SUCCESS; 5710 } 5711