1 /* 2 * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved. 3 * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved. 4 * 5 * Permission to use, copy, modify, and/or distribute this software for 6 * any purpose with or without fee is hereby granted, provided that the 7 * above copyright notice and this permission notice appear in all 8 * copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 11 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 12 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 13 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 14 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 15 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 16 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 17 * PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20 #ifndef _DP_INTERNAL_H_ 21 #define _DP_INTERNAL_H_ 22 23 #include "dp_types.h" 24 #include "dp_htt.h" 25 26 #define RX_BUFFER_SIZE_PKTLOG_LITE 1024 27 28 #define DP_PEER_WDS_COUNT_INVALID UINT_MAX 29 30 #define DP_BLOCKMEM_SIZE 4096 31 #define WBM2_SW_PPE_REL_RING_ID 6 32 #define WBM2_SW_PPE_REL_MAP_ID 11 33 /* Alignment for consistent memory for DP rings*/ 34 #define DP_RING_BASE_ALIGN 32 35 36 #define DP_RSSI_INVAL 0x80 37 #define DP_RSSI_AVG_WEIGHT 2 38 /* 39 * Formula to derive avg_rssi is taken from wifi2.o firmware 40 */ 41 #define DP_GET_AVG_RSSI(avg_rssi, last_rssi) \ 42 (((avg_rssi) - (((uint8_t)(avg_rssi)) >> DP_RSSI_AVG_WEIGHT)) \ 43 + ((((uint8_t)(last_rssi)) >> DP_RSSI_AVG_WEIGHT))) 44 45 /* Macro For NYSM value received in VHT TLV */ 46 #define VHT_SGI_NYSM 3 47 48 #define INVALID_WBM_RING_NUM 0xF 49 50 #ifdef FEATURE_DIRECT_LINK 51 #define DIRECT_LINK_REFILL_RING_ENTRIES 64 52 #ifdef IPA_OFFLOAD 53 #ifdef IPA_WDI3_VLAN_SUPPORT 54 #define DIRECT_LINK_REFILL_RING_IDX 4 55 #else 56 #define DIRECT_LINK_REFILL_RING_IDX 3 57 #endif 58 #else 59 #define DIRECT_LINK_REFILL_RING_IDX 2 60 #endif 61 #endif 62 63 /* struct htt_dbgfs_cfg - structure to maintain required htt data 64 * @msg_word: htt msg sent to upper layer 65 * @m: qdf debugfs file pointer 66 */ 67 struct htt_dbgfs_cfg { 68 uint32_t *msg_word; 69 qdf_debugfs_file_t m; 70 }; 71 72 /* Cookie MSB bits assigned for different use case. 73 * Note: User can't use last 3 bits, as it is reserved for pdev_id. 74 * If in future number of pdev are more than 3. 75 */ 76 /* Reserve for default case */ 77 #define DBG_STATS_COOKIE_DEFAULT 0x0 78 79 /* Reserve for DP Stats: 3rd bit */ 80 #define DBG_STATS_COOKIE_DP_STATS BIT(3) 81 82 /* Reserve for HTT Stats debugfs support: 4th bit */ 83 #define DBG_STATS_COOKIE_HTT_DBGFS BIT(4) 84 85 /*Reserve for HTT Stats debugfs support: 5th bit */ 86 #define DBG_SYSFS_STATS_COOKIE BIT(5) 87 88 /* Reserve for HTT Stats OBSS PD support: 6th bit */ 89 #define DBG_STATS_COOKIE_HTT_OBSS BIT(6) 90 91 /** 92 * Bitmap of HTT PPDU TLV types for Default mode 93 */ 94 #define HTT_PPDU_DEFAULT_TLV_BITMAP \ 95 (1 << HTT_PPDU_STATS_COMMON_TLV) | \ 96 (1 << HTT_PPDU_STATS_USR_COMMON_TLV) | \ 97 (1 << HTT_PPDU_STATS_USR_RATE_TLV) | \ 98 (1 << HTT_PPDU_STATS_SCH_CMD_STATUS_TLV) | \ 99 (1 << HTT_PPDU_STATS_USR_COMPLTN_COMMON_TLV) | \ 100 (1 << HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV) 101 102 /* PPDU STATS CFG */ 103 #define DP_PPDU_STATS_CFG_ALL 0xFFFF 104 105 /* PPDU stats mask sent to FW to enable enhanced stats */ 106 #define DP_PPDU_STATS_CFG_ENH_STATS \ 107 (HTT_PPDU_DEFAULT_TLV_BITMAP) | \ 108 (1 << HTT_PPDU_STATS_USR_COMPLTN_FLUSH_TLV) | \ 109 (1 << HTT_PPDU_STATS_USR_COMMON_ARRAY_TLV) | \ 110 (1 << HTT_PPDU_STATS_USERS_INFO_TLV) 111 112 /* PPDU stats mask sent to FW to support debug sniffer feature */ 113 #define DP_PPDU_STATS_CFG_SNIFFER \ 114 (HTT_PPDU_DEFAULT_TLV_BITMAP) | \ 115 (1 << HTT_PPDU_STATS_USR_MPDU_ENQ_BITMAP_64_TLV) | \ 116 (1 << HTT_PPDU_STATS_USR_MPDU_ENQ_BITMAP_256_TLV) | \ 117 (1 << HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_64_TLV) | \ 118 (1 << HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_256_TLV) | \ 119 (1 << HTT_PPDU_STATS_USR_COMPLTN_FLUSH_TLV) | \ 120 (1 << HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_256_TLV) | \ 121 (1 << HTT_PPDU_STATS_USR_COMPLTN_FLUSH_TLV) | \ 122 (1 << HTT_PPDU_STATS_USR_COMMON_ARRAY_TLV) | \ 123 (1 << HTT_PPDU_STATS_TX_MGMTCTRL_PAYLOAD_TLV) | \ 124 (1 << HTT_PPDU_STATS_USERS_INFO_TLV) 125 126 /* PPDU stats mask sent to FW to support BPR feature*/ 127 #define DP_PPDU_STATS_CFG_BPR \ 128 (1 << HTT_PPDU_STATS_TX_MGMTCTRL_PAYLOAD_TLV) | \ 129 (1 << HTT_PPDU_STATS_USERS_INFO_TLV) 130 131 /* PPDU stats mask sent to FW to support BPR and enhanced stats feature */ 132 #define DP_PPDU_STATS_CFG_BPR_ENH (DP_PPDU_STATS_CFG_BPR | \ 133 DP_PPDU_STATS_CFG_ENH_STATS) 134 /* PPDU stats mask sent to FW to support BPR and pcktlog stats feature */ 135 #define DP_PPDU_STATS_CFG_BPR_PKTLOG (DP_PPDU_STATS_CFG_BPR | \ 136 DP_PPDU_TXLITE_STATS_BITMASK_CFG) 137 138 /** 139 * Bitmap of HTT PPDU delayed ba TLV types for Default mode 140 */ 141 #define HTT_PPDU_DELAYED_BA_TLV_BITMAP \ 142 (1 << HTT_PPDU_STATS_COMMON_TLV) | \ 143 (1 << HTT_PPDU_STATS_USR_COMMON_TLV) | \ 144 (1 << HTT_PPDU_STATS_USR_RATE_TLV) 145 146 /** 147 * Bitmap of HTT PPDU TLV types for Delayed BA 148 */ 149 #define HTT_PPDU_STATUS_TLV_BITMAP \ 150 (1 << HTT_PPDU_STATS_COMMON_TLV) | \ 151 (1 << HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV) 152 153 /** 154 * Bitmap of HTT PPDU TLV types for Sniffer mode bitmap 64 155 */ 156 #define HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP_64 \ 157 ((1 << HTT_PPDU_STATS_COMMON_TLV) | \ 158 (1 << HTT_PPDU_STATS_USR_COMMON_TLV) | \ 159 (1 << HTT_PPDU_STATS_USR_RATE_TLV) | \ 160 (1 << HTT_PPDU_STATS_SCH_CMD_STATUS_TLV) | \ 161 (1 << HTT_PPDU_STATS_USR_COMPLTN_COMMON_TLV) | \ 162 (1 << HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV) | \ 163 (1 << HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_64_TLV) | \ 164 (1 << HTT_PPDU_STATS_USR_MPDU_ENQ_BITMAP_64_TLV)) 165 166 /** 167 * Bitmap of HTT PPDU TLV types for Sniffer mode bitmap 256 168 */ 169 #define HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP_256 \ 170 ((1 << HTT_PPDU_STATS_COMMON_TLV) | \ 171 (1 << HTT_PPDU_STATS_USR_COMMON_TLV) | \ 172 (1 << HTT_PPDU_STATS_USR_RATE_TLV) | \ 173 (1 << HTT_PPDU_STATS_SCH_CMD_STATUS_TLV) | \ 174 (1 << HTT_PPDU_STATS_USR_COMPLTN_COMMON_TLV) | \ 175 (1 << HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV) | \ 176 (1 << HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_256_TLV) | \ 177 (1 << HTT_PPDU_STATS_USR_MPDU_ENQ_BITMAP_256_TLV)) 178 179 static const enum cdp_packet_type hal_2_dp_pkt_type_map[HAL_DOT11_MAX] = { 180 [HAL_DOT11A] = DOT11_A, 181 [HAL_DOT11B] = DOT11_B, 182 [HAL_DOT11N_MM] = DOT11_N, 183 [HAL_DOT11AC] = DOT11_AC, 184 [HAL_DOT11AX] = DOT11_AX, 185 [HAL_DOT11BA] = DOT11_MAX, 186 #ifdef WLAN_FEATURE_11BE 187 [HAL_DOT11BE] = DOT11_BE, 188 #else 189 [HAL_DOT11BE] = DOT11_MAX, 190 #endif 191 [HAL_DOT11AZ] = DOT11_MAX, 192 [HAL_DOT11N_GF] = DOT11_MAX, 193 }; 194 195 #ifdef WLAN_FEATURE_11BE 196 /** 197 * dp_get_mcs_array_index_by_pkt_type_mcs () - get the destination mcs index 198 in array 199 * @pkt_type: host SW pkt type 200 * @mcs: mcs value for TX/RX rate 201 * 202 * Return: succeeded - valid index in mcs array 203 fail - same value as MCS_MAX 204 */ 205 static inline uint8_t 206 dp_get_mcs_array_index_by_pkt_type_mcs(uint32_t pkt_type, uint32_t mcs) 207 { 208 uint8_t dst_mcs_idx = MCS_INVALID_ARRAY_INDEX; 209 210 switch (pkt_type) { 211 case DOT11_A: 212 dst_mcs_idx = 213 mcs >= MAX_MCS_11A ? (MAX_MCS - 1) : mcs; 214 break; 215 case DOT11_B: 216 dst_mcs_idx = 217 mcs >= MAX_MCS_11B ? (MAX_MCS - 1) : mcs; 218 break; 219 case DOT11_N: 220 dst_mcs_idx = 221 mcs >= MAX_MCS_11N ? (MAX_MCS - 1) : mcs; 222 break; 223 case DOT11_AC: 224 dst_mcs_idx = 225 mcs >= MAX_MCS_11AC ? (MAX_MCS - 1) : mcs; 226 break; 227 case DOT11_AX: 228 dst_mcs_idx = 229 mcs >= MAX_MCS_11AX ? (MAX_MCS - 1) : mcs; 230 break; 231 case DOT11_BE: 232 dst_mcs_idx = 233 mcs >= MAX_MCS_11BE ? (MAX_MCS - 1) : mcs; 234 break; 235 default: 236 break; 237 } 238 239 return dst_mcs_idx; 240 } 241 #else 242 static inline uint8_t 243 dp_get_mcs_array_index_by_pkt_type_mcs(uint32_t pkt_type, uint32_t mcs) 244 { 245 uint8_t dst_mcs_idx = MCS_INVALID_ARRAY_INDEX; 246 247 switch (pkt_type) { 248 case DOT11_A: 249 dst_mcs_idx = 250 mcs >= MAX_MCS_11A ? (MAX_MCS - 1) : mcs; 251 break; 252 case DOT11_B: 253 dst_mcs_idx = 254 mcs >= MAX_MCS_11B ? (MAX_MCS - 1) : mcs; 255 break; 256 case DOT11_N: 257 dst_mcs_idx = 258 mcs >= MAX_MCS_11N ? (MAX_MCS - 1) : mcs; 259 break; 260 case DOT11_AC: 261 dst_mcs_idx = 262 mcs >= MAX_MCS_11AC ? (MAX_MCS - 1) : mcs; 263 break; 264 case DOT11_AX: 265 dst_mcs_idx = 266 mcs >= MAX_MCS_11AX ? (MAX_MCS - 1) : mcs; 267 break; 268 default: 269 break; 270 } 271 272 return dst_mcs_idx; 273 } 274 #endif 275 276 #ifdef WIFI_MONITOR_SUPPORT 277 QDF_STATUS dp_mon_soc_attach(struct dp_soc *soc); 278 QDF_STATUS dp_mon_soc_detach(struct dp_soc *soc); 279 #else 280 static inline 281 QDF_STATUS dp_mon_soc_attach(struct dp_soc *soc) 282 { 283 return QDF_STATUS_SUCCESS; 284 } 285 286 static inline 287 QDF_STATUS dp_mon_soc_detach(struct dp_soc *soc) 288 { 289 return QDF_STATUS_SUCCESS; 290 } 291 #endif 292 293 /* 294 * dp_rx_err_match_dhost() - function to check whether dest-mac is correct 295 * @eh: Ethernet header of incoming packet 296 * @vdev: dp_vdev object of the VAP on which this data packet is received 297 * 298 * Return: 1 if the destination mac is correct, 299 * 0 if this frame is not correctly destined to this VAP/MLD 300 */ 301 int dp_rx_err_match_dhost(qdf_ether_header_t *eh, struct dp_vdev *vdev); 302 303 #ifdef MONITOR_MODULARIZED_ENABLE 304 static inline bool dp_monitor_modularized_enable(void) 305 { 306 return TRUE; 307 } 308 309 static inline QDF_STATUS 310 dp_mon_soc_attach_wrapper(struct dp_soc *soc) { return QDF_STATUS_SUCCESS; } 311 312 static inline QDF_STATUS 313 dp_mon_soc_detach_wrapper(struct dp_soc *soc) { return QDF_STATUS_SUCCESS; } 314 #else 315 static inline bool dp_monitor_modularized_enable(void) 316 { 317 return FALSE; 318 } 319 320 static inline QDF_STATUS dp_mon_soc_attach_wrapper(struct dp_soc *soc) 321 { 322 return dp_mon_soc_attach(soc); 323 } 324 325 static inline QDF_STATUS dp_mon_soc_detach_wrapper(struct dp_soc *soc) 326 { 327 return dp_mon_soc_detach(soc); 328 } 329 #endif 330 331 #ifndef WIFI_MONITOR_SUPPORT 332 #define MON_BUF_MIN_ENTRIES 64 333 334 static inline QDF_STATUS dp_monitor_pdev_attach(struct dp_pdev *pdev) 335 { 336 return QDF_STATUS_SUCCESS; 337 } 338 339 static inline QDF_STATUS dp_monitor_pdev_detach(struct dp_pdev *pdev) 340 { 341 return QDF_STATUS_SUCCESS; 342 } 343 344 static inline QDF_STATUS dp_monitor_vdev_attach(struct dp_vdev *vdev) 345 { 346 return QDF_STATUS_E_FAILURE; 347 } 348 349 static inline QDF_STATUS dp_monitor_vdev_detach(struct dp_vdev *vdev) 350 { 351 return QDF_STATUS_E_FAILURE; 352 } 353 354 static inline QDF_STATUS dp_monitor_peer_attach(struct dp_soc *soc, 355 struct dp_peer *peer) 356 { 357 return QDF_STATUS_SUCCESS; 358 } 359 360 static inline QDF_STATUS dp_monitor_peer_detach(struct dp_soc *soc, 361 struct dp_peer *peer) 362 { 363 return QDF_STATUS_E_FAILURE; 364 } 365 366 static inline struct cdp_peer_rate_stats_ctx* 367 dp_monitor_peer_get_peerstats_ctx(struct dp_soc *soc, struct dp_peer *peer) 368 { 369 return NULL; 370 } 371 372 static inline 373 void dp_monitor_peer_reset_stats(struct dp_soc *soc, struct dp_peer *peer) 374 { 375 } 376 377 static inline 378 void dp_monitor_peer_get_stats(struct dp_soc *soc, struct dp_peer *peer, 379 void *arg, enum cdp_stat_update_type type) 380 { 381 } 382 383 static inline 384 void dp_monitor_invalid_peer_update_pdev_stats(struct dp_soc *soc, 385 struct dp_pdev *pdev) 386 { 387 } 388 389 static inline 390 QDF_STATUS dp_monitor_peer_get_stats_param(struct dp_soc *soc, 391 struct dp_peer *peer, 392 enum cdp_peer_stats_type type, 393 cdp_peer_stats_param_t *buf) 394 { 395 return QDF_STATUS_E_FAILURE; 396 } 397 398 static inline QDF_STATUS dp_monitor_pdev_init(struct dp_pdev *pdev) 399 { 400 return QDF_STATUS_SUCCESS; 401 } 402 403 static inline QDF_STATUS dp_monitor_pdev_deinit(struct dp_pdev *pdev) 404 { 405 return QDF_STATUS_SUCCESS; 406 } 407 408 static inline QDF_STATUS dp_monitor_soc_cfg_init(struct dp_soc *soc) 409 { 410 return QDF_STATUS_SUCCESS; 411 } 412 413 static inline QDF_STATUS dp_monitor_config_debug_sniffer(struct dp_pdev *pdev, 414 int val) 415 { 416 return QDF_STATUS_E_FAILURE; 417 } 418 419 static inline void dp_monitor_flush_rings(struct dp_soc *soc) 420 { 421 } 422 423 static inline QDF_STATUS dp_monitor_htt_srng_setup(struct dp_soc *soc, 424 struct dp_pdev *pdev, 425 int mac_id, 426 int mac_for_pdev) 427 { 428 return QDF_STATUS_SUCCESS; 429 } 430 431 static inline void dp_monitor_service_mon_rings(struct dp_soc *soc, 432 uint32_t quota) 433 { 434 } 435 436 static inline 437 uint32_t dp_monitor_process(struct dp_soc *soc, struct dp_intr *int_ctx, 438 uint32_t mac_id, uint32_t quota) 439 { 440 return 0; 441 } 442 443 static inline 444 uint32_t dp_monitor_drop_packets_for_mac(struct dp_pdev *pdev, 445 uint32_t mac_id, uint32_t quota) 446 { 447 return 0; 448 } 449 450 static inline void dp_monitor_peer_tx_init(struct dp_pdev *pdev, 451 struct dp_peer *peer) 452 { 453 } 454 455 static inline void dp_monitor_peer_tx_cleanup(struct dp_vdev *vdev, 456 struct dp_peer *peer) 457 { 458 } 459 460 static inline 461 void dp_monitor_peer_tid_peer_id_update(struct dp_soc *soc, 462 struct dp_peer *peer, 463 uint16_t peer_id) 464 { 465 } 466 467 static inline void dp_monitor_tx_ppdu_stats_attach(struct dp_pdev *pdev) 468 { 469 } 470 471 static inline void dp_monitor_tx_ppdu_stats_detach(struct dp_pdev *pdev) 472 { 473 } 474 475 static inline 476 QDF_STATUS dp_monitor_tx_capture_debugfs_init(struct dp_pdev *pdev) 477 { 478 return QDF_STATUS_SUCCESS; 479 } 480 481 static inline void dp_monitor_peer_tx_capture_filter_check(struct dp_pdev *pdev, 482 struct dp_peer *peer) 483 { 484 } 485 486 static inline 487 QDF_STATUS dp_monitor_tx_add_to_comp_queue(struct dp_soc *soc, 488 struct dp_tx_desc_s *desc, 489 struct hal_tx_completion_status *ts, 490 uint16_t peer_id) 491 { 492 return QDF_STATUS_E_FAILURE; 493 } 494 495 static inline 496 QDF_STATUS monitor_update_msdu_to_list(struct dp_soc *soc, 497 struct dp_pdev *pdev, 498 struct dp_peer *peer, 499 struct hal_tx_completion_status *ts, 500 qdf_nbuf_t netbuf) 501 { 502 return QDF_STATUS_E_FAILURE; 503 } 504 505 static inline bool dp_monitor_ppdu_stats_ind_handler(struct htt_soc *soc, 506 uint32_t *msg_word, 507 qdf_nbuf_t htt_t2h_msg) 508 { 509 return true; 510 } 511 512 static inline QDF_STATUS dp_monitor_htt_ppdu_stats_attach(struct dp_pdev *pdev) 513 { 514 return QDF_STATUS_SUCCESS; 515 } 516 517 static inline void dp_monitor_htt_ppdu_stats_detach(struct dp_pdev *pdev) 518 { 519 } 520 521 static inline void dp_monitor_print_pdev_rx_mon_stats(struct dp_pdev *pdev) 522 { 523 } 524 525 static inline QDF_STATUS dp_monitor_config_enh_tx_capture(struct dp_pdev *pdev, 526 uint32_t val) 527 { 528 return QDF_STATUS_E_INVAL; 529 } 530 531 static inline QDF_STATUS dp_monitor_tx_peer_filter(struct dp_pdev *pdev, 532 struct dp_peer *peer, 533 uint8_t is_tx_pkt_cap_enable, 534 uint8_t *peer_mac) 535 { 536 return QDF_STATUS_E_INVAL; 537 } 538 539 static inline QDF_STATUS dp_monitor_config_enh_rx_capture(struct dp_pdev *pdev, 540 uint32_t val) 541 { 542 return QDF_STATUS_E_INVAL; 543 } 544 545 static inline 546 QDF_STATUS dp_monitor_set_bpr_enable(struct dp_pdev *pdev, uint32_t val) 547 { 548 return QDF_STATUS_E_FAILURE; 549 } 550 551 static inline 552 int dp_monitor_set_filter_neigh_peers(struct dp_pdev *pdev, bool val) 553 { 554 return 0; 555 } 556 557 static inline 558 void dp_monitor_set_atf_stats_enable(struct dp_pdev *pdev, bool value) 559 { 560 } 561 562 static inline 563 void dp_monitor_set_bsscolor(struct dp_pdev *pdev, uint8_t bsscolor) 564 { 565 } 566 567 static inline 568 bool dp_monitor_pdev_get_filter_mcast_data(struct cdp_pdev *pdev_handle) 569 { 570 return false; 571 } 572 573 static inline 574 bool dp_monitor_pdev_get_filter_non_data(struct cdp_pdev *pdev_handle) 575 { 576 return false; 577 } 578 579 static inline 580 bool dp_monitor_pdev_get_filter_ucast_data(struct cdp_pdev *pdev_handle) 581 { 582 return false; 583 } 584 585 static inline 586 int dp_monitor_set_pktlog_wifi3(struct dp_pdev *pdev, uint32_t event, 587 bool enable) 588 { 589 return 0; 590 } 591 592 static inline void dp_monitor_pktlogmod_exit(struct dp_pdev *pdev) 593 { 594 } 595 596 static inline 597 QDF_STATUS dp_monitor_vdev_set_monitor_mode_buf_rings(struct dp_pdev *pdev) 598 { 599 return QDF_STATUS_E_FAILURE; 600 } 601 602 static inline 603 void dp_monitor_neighbour_peers_detach(struct dp_pdev *pdev) 604 { 605 } 606 607 static inline QDF_STATUS dp_monitor_filter_neighbour_peer(struct dp_pdev *pdev, 608 uint8_t *rx_pkt_hdr) 609 { 610 return QDF_STATUS_E_FAILURE; 611 } 612 613 static inline void dp_monitor_print_pdev_tx_capture_stats(struct dp_pdev *pdev) 614 { 615 } 616 617 static inline 618 void dp_monitor_reap_timer_init(struct dp_soc *soc) 619 { 620 } 621 622 static inline 623 void dp_monitor_reap_timer_deinit(struct dp_soc *soc) 624 { 625 } 626 627 static inline 628 bool dp_monitor_reap_timer_start(struct dp_soc *soc, 629 enum cdp_mon_reap_source source) 630 { 631 return false; 632 } 633 634 static inline 635 bool dp_monitor_reap_timer_stop(struct dp_soc *soc, 636 enum cdp_mon_reap_source source) 637 { 638 return false; 639 } 640 641 static inline void 642 dp_monitor_reap_timer_suspend(struct dp_soc *soc) 643 { 644 } 645 646 static inline 647 void dp_monitor_vdev_timer_init(struct dp_soc *soc) 648 { 649 } 650 651 static inline 652 void dp_monitor_vdev_timer_deinit(struct dp_soc *soc) 653 { 654 } 655 656 static inline 657 void dp_monitor_vdev_timer_start(struct dp_soc *soc) 658 { 659 } 660 661 static inline 662 bool dp_monitor_vdev_timer_stop(struct dp_soc *soc) 663 { 664 return false; 665 } 666 667 static inline struct qdf_mem_multi_page_t* 668 dp_monitor_get_link_desc_pages(struct dp_soc *soc, uint32_t mac_id) 669 { 670 return NULL; 671 } 672 673 static inline uint32_t * 674 dp_monitor_get_total_link_descs(struct dp_soc *soc, uint32_t mac_id) 675 { 676 return NULL; 677 } 678 679 static inline QDF_STATUS dp_monitor_drop_inv_peer_pkts(struct dp_vdev *vdev) 680 { 681 return QDF_STATUS_E_FAILURE; 682 } 683 684 static inline bool dp_is_enable_reap_timer_non_pkt(struct dp_pdev *pdev) 685 { 686 return false; 687 } 688 689 static inline void dp_monitor_vdev_register_osif(struct dp_vdev *vdev, 690 struct ol_txrx_ops *txrx_ops) 691 { 692 } 693 694 static inline bool dp_monitor_is_vdev_timer_running(struct dp_soc *soc) 695 { 696 return false; 697 } 698 699 static inline 700 void dp_monitor_pdev_set_mon_vdev(struct dp_vdev *vdev) 701 { 702 } 703 704 static inline void dp_monitor_vdev_delete(struct dp_soc *soc, 705 struct dp_vdev *vdev) 706 { 707 } 708 709 static inline void dp_peer_ppdu_delayed_ba_init(struct dp_peer *peer) 710 { 711 } 712 713 static inline void dp_monitor_neighbour_peer_add_ast(struct dp_pdev *pdev, 714 struct dp_peer *ta_peer, 715 uint8_t *mac_addr, 716 qdf_nbuf_t nbuf, 717 uint32_t flags) 718 { 719 } 720 721 static inline void 722 dp_monitor_set_chan_band(struct dp_pdev *pdev, enum reg_wifi_band chan_band) 723 { 724 } 725 726 static inline void 727 dp_monitor_set_chan_freq(struct dp_pdev *pdev, qdf_freq_t chan_freq) 728 { 729 } 730 731 static inline void dp_monitor_set_chan_num(struct dp_pdev *pdev, int chan_num) 732 { 733 } 734 735 static inline bool dp_monitor_is_enable_mcopy_mode(struct dp_pdev *pdev) 736 { 737 return false; 738 } 739 740 static inline 741 void dp_monitor_neighbour_peer_list_remove(struct dp_pdev *pdev, 742 struct dp_vdev *vdev, 743 struct dp_neighbour_peer *peer) 744 { 745 } 746 747 static inline bool dp_monitor_is_chan_band_known(struct dp_pdev *pdev) 748 { 749 return false; 750 } 751 752 static inline enum reg_wifi_band 753 dp_monitor_get_chan_band(struct dp_pdev *pdev) 754 { 755 return 0; 756 } 757 758 static inline int 759 dp_monitor_get_chan_num(struct dp_pdev *pdev) 760 { 761 return 0; 762 } 763 764 static inline qdf_freq_t 765 dp_monitor_get_chan_freq(struct dp_pdev *pdev) 766 { 767 return 0; 768 } 769 770 static inline void dp_monitor_get_mpdu_status(struct dp_pdev *pdev, 771 struct dp_soc *soc, 772 uint8_t *rx_tlv_hdr) 773 { 774 } 775 776 static inline void dp_monitor_print_tx_stats(struct dp_pdev *pdev) 777 { 778 } 779 780 static inline 781 QDF_STATUS dp_monitor_mcopy_check_deliver(struct dp_pdev *pdev, 782 uint16_t peer_id, uint32_t ppdu_id, 783 uint8_t first_msdu) 784 { 785 return QDF_STATUS_SUCCESS; 786 } 787 788 static inline bool dp_monitor_is_enable_tx_sniffer(struct dp_pdev *pdev) 789 { 790 return false; 791 } 792 793 static inline struct dp_vdev* 794 dp_monitor_get_monitor_vdev_from_pdev(struct dp_pdev *pdev) 795 { 796 return NULL; 797 } 798 799 static inline QDF_STATUS dp_monitor_check_com_info_ppdu_id(struct dp_pdev *pdev, 800 void *rx_desc) 801 { 802 return QDF_STATUS_E_FAILURE; 803 } 804 805 static inline struct mon_rx_status* 806 dp_monitor_get_rx_status(struct dp_pdev *pdev) 807 { 808 return NULL; 809 } 810 811 static inline 812 void dp_monitor_pdev_config_scan_spcl_vap(struct dp_pdev *pdev, bool val) 813 { 814 } 815 816 static inline 817 void dp_monitor_pdev_reset_scan_spcl_vap_stats_enable(struct dp_pdev *pdev, 818 bool val) 819 { 820 } 821 822 static inline QDF_STATUS 823 dp_monitor_peer_tx_capture_get_stats(struct dp_soc *soc, struct dp_peer *peer, 824 struct cdp_peer_tx_capture_stats *stats) 825 { 826 return QDF_STATUS_E_FAILURE; 827 } 828 829 static inline QDF_STATUS 830 dp_monitor_pdev_tx_capture_get_stats(struct dp_soc *soc, struct dp_pdev *pdev, 831 struct cdp_pdev_tx_capture_stats *stats) 832 { 833 return QDF_STATUS_E_FAILURE; 834 } 835 836 #ifdef DP_POWER_SAVE 837 static inline 838 void dp_monitor_pktlog_reap_pending_frames(struct dp_pdev *pdev) 839 { 840 } 841 842 static inline 843 void dp_monitor_pktlog_start_reap_timer(struct dp_pdev *pdev) 844 { 845 } 846 #endif 847 848 static inline bool dp_monitor_is_configured(struct dp_pdev *pdev) 849 { 850 return false; 851 } 852 853 static inline void 854 dp_mon_rx_hdr_length_set(struct dp_soc *soc, uint32_t *msg_word, 855 struct htt_rx_ring_tlv_filter *tlv_filter) 856 { 857 } 858 859 static inline void dp_monitor_soc_init(struct dp_soc *soc) 860 { 861 } 862 863 static inline void dp_monitor_soc_deinit(struct dp_soc *soc) 864 { 865 } 866 867 static inline 868 QDF_STATUS dp_monitor_config_undecoded_metadata_capture(struct dp_pdev *pdev, 869 int val) 870 { 871 return QDF_STATUS_SUCCESS; 872 } 873 874 static inline QDF_STATUS 875 dp_monitor_config_undecoded_metadata_phyrx_error_mask(struct dp_pdev *pdev, 876 int mask1, int mask2) 877 { 878 return QDF_STATUS_SUCCESS; 879 } 880 881 static inline QDF_STATUS 882 dp_monitor_get_undecoded_metadata_phyrx_error_mask(struct dp_pdev *pdev, 883 int *mask, int *mask_cont) 884 { 885 return QDF_STATUS_SUCCESS; 886 } 887 888 static inline QDF_STATUS dp_monitor_soc_htt_srng_setup(struct dp_soc *soc) 889 { 890 return QDF_STATUS_E_FAILURE; 891 } 892 893 static inline bool dp_is_monitor_mode_using_poll(struct dp_soc *soc) 894 { 895 return false; 896 } 897 898 static inline 899 uint32_t dp_tx_mon_buf_refill(struct dp_intr *int_ctx) 900 { 901 return 0; 902 } 903 904 static inline uint32_t 905 dp_tx_mon_process(struct dp_soc *soc, struct dp_intr *int_ctx, 906 uint32_t mac_id, uint32_t quota) 907 { 908 return 0; 909 } 910 911 static inline uint32_t 912 dp_print_txmon_ring_stat_from_hal(struct dp_pdev *pdev) 913 { 914 return 0; 915 } 916 917 static inline 918 uint32_t dp_rx_mon_buf_refill(struct dp_intr *int_ctx) 919 { 920 return 0; 921 } 922 923 static inline bool dp_monitor_is_tx_cap_enabled(struct dp_peer *peer) 924 { 925 return 0; 926 } 927 928 static inline bool dp_monitor_is_rx_cap_enabled(struct dp_peer *peer) 929 { 930 return 0; 931 } 932 933 static inline void 934 dp_rx_mon_enable(struct dp_soc *soc, uint32_t *msg_word, 935 struct htt_rx_ring_tlv_filter *tlv_filter) 936 { 937 } 938 939 static inline void 940 dp_mon_rx_packet_length_set(struct dp_soc *soc, uint32_t *msg_word, 941 struct htt_rx_ring_tlv_filter *tlv_filter) 942 { 943 } 944 945 static inline void 946 dp_mon_rx_enable_mpdu_logging(struct dp_soc *soc, uint32_t *msg_word, 947 struct htt_rx_ring_tlv_filter *tlv_filter) 948 { 949 } 950 951 static inline void 952 dp_mon_rx_wmask_subscribe(struct dp_soc *soc, uint32_t *msg_word, 953 struct htt_rx_ring_tlv_filter *tlv_filter) 954 { 955 } 956 957 static inline void 958 dp_mon_rx_mac_filter_set(struct dp_soc *soc, uint32_t *msg_word, 959 struct htt_rx_ring_tlv_filter *tlv_filter) 960 { 961 } 962 963 static inline void 964 dp_mon_rx_enable_pkt_tlv_offset(struct dp_soc *soc, uint32_t *msg_word, 965 struct htt_rx_ring_tlv_filter *tlv_filter) 966 { 967 } 968 969 #ifdef WLAN_TELEMETRY_STATS_SUPPORT 970 static inline 971 void dp_monitor_peer_telemetry_stats(struct dp_peer *peer, 972 struct cdp_peer_telemetry_stats *stats) 973 { 974 } 975 976 static inline 977 void dp_monitor_peer_deter_stats(struct dp_peer *peer, 978 struct cdp_peer_telemetry_stats *stats) 979 { 980 } 981 #endif /* WLAN_TELEMETRY_STATS_SUPPORT */ 982 #endif /* !WIFI_MONITOR_SUPPORT */ 983 984 /** 985 * cdp_soc_t_to_dp_soc() - typecast cdp_soc_t to 986 * dp soc handle 987 * @psoc: CDP psoc handle 988 * 989 * Return: struct dp_soc pointer 990 */ 991 static inline 992 struct dp_soc *cdp_soc_t_to_dp_soc(struct cdp_soc_t *psoc) 993 { 994 return (struct dp_soc *)psoc; 995 } 996 997 #define DP_MAX_TIMER_EXEC_TIME_TICKS \ 998 (QDF_LOG_TIMESTAMP_CYCLES_PER_10_US * 100 * 20) 999 1000 /** 1001 * enum timer_yield_status - yield status code used in monitor mode timer. 1002 * @DP_TIMER_NO_YIELD: do not yield 1003 * @DP_TIMER_WORK_DONE: yield because work is done 1004 * @DP_TIMER_WORK_EXHAUST: yield because work quota is exhausted 1005 * @DP_TIMER_TIME_EXHAUST: yield due to time slot exhausted 1006 */ 1007 enum timer_yield_status { 1008 DP_TIMER_NO_YIELD, 1009 DP_TIMER_WORK_DONE, 1010 DP_TIMER_WORK_EXHAUST, 1011 DP_TIMER_TIME_EXHAUST, 1012 }; 1013 1014 #if DP_PRINT_ENABLE 1015 #include <qdf_types.h> /* qdf_vprint */ 1016 #include <cdp_txrx_handle.h> 1017 1018 enum { 1019 /* FATAL_ERR - print only irrecoverable error messages */ 1020 DP_PRINT_LEVEL_FATAL_ERR, 1021 1022 /* ERR - include non-fatal err messages */ 1023 DP_PRINT_LEVEL_ERR, 1024 1025 /* WARN - include warnings */ 1026 DP_PRINT_LEVEL_WARN, 1027 1028 /* INFO1 - include fundamental, infrequent events */ 1029 DP_PRINT_LEVEL_INFO1, 1030 1031 /* INFO2 - include non-fundamental but infrequent events */ 1032 DP_PRINT_LEVEL_INFO2, 1033 }; 1034 1035 #define dp_print(level, fmt, ...) do { \ 1036 if (level <= g_txrx_print_level) \ 1037 qdf_print(fmt, ## __VA_ARGS__); \ 1038 while (0) 1039 #define DP_PRINT(level, fmt, ...) do { \ 1040 dp_print(level, "DP: " fmt, ## __VA_ARGS__); \ 1041 while (0) 1042 #else 1043 #define DP_PRINT(level, fmt, ...) 1044 #endif /* DP_PRINT_ENABLE */ 1045 1046 #define DP_TRACE(LVL, fmt, args ...) \ 1047 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_##LVL, \ 1048 fmt, ## args) 1049 1050 #ifdef WLAN_SYSFS_DP_STATS 1051 void DP_PRINT_STATS(const char *fmt, ...); 1052 #else /* WLAN_SYSFS_DP_STATS */ 1053 #ifdef DP_PRINT_NO_CONSOLE 1054 /* Stat prints should not go to console or kernel logs.*/ 1055 #define DP_PRINT_STATS(fmt, args ...)\ 1056 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH, \ 1057 fmt, ## args) 1058 #else 1059 #define DP_PRINT_STATS(fmt, args ...)\ 1060 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,\ 1061 fmt, ## args) 1062 #endif 1063 #endif /* WLAN_SYSFS_DP_STATS */ 1064 1065 #define DP_STATS_INIT(_handle) \ 1066 qdf_mem_zero(&((_handle)->stats), sizeof((_handle)->stats)) 1067 1068 #define DP_STATS_CLR(_handle) \ 1069 qdf_mem_zero(&((_handle)->stats), sizeof((_handle)->stats)) 1070 1071 #ifndef DISABLE_DP_STATS 1072 #define DP_STATS_INC(_handle, _field, _delta) \ 1073 { \ 1074 if (likely(_handle)) \ 1075 _handle->stats._field += _delta; \ 1076 } 1077 1078 #define DP_PEER_STATS_FLAT_INC(_handle, _field, _delta) \ 1079 { \ 1080 if (likely(_handle)) \ 1081 _handle->_field += _delta; \ 1082 } 1083 1084 #define DP_STATS_INCC(_handle, _field, _delta, _cond) \ 1085 { \ 1086 if (_cond && likely(_handle)) \ 1087 _handle->stats._field += _delta; \ 1088 } 1089 1090 #define DP_STATS_DEC(_handle, _field, _delta) \ 1091 { \ 1092 if (likely(_handle)) \ 1093 _handle->stats._field -= _delta; \ 1094 } 1095 1096 #define DP_PEER_STATS_FLAT_DEC(_handle, _field, _delta) \ 1097 { \ 1098 if (likely(_handle)) \ 1099 _handle->_field -= _delta; \ 1100 } 1101 1102 #define DP_STATS_UPD(_handle, _field, _delta) \ 1103 { \ 1104 if (likely(_handle)) \ 1105 _handle->stats._field = _delta; \ 1106 } 1107 1108 #define DP_STATS_INC_PKT(_handle, _field, _count, _bytes) \ 1109 { \ 1110 DP_STATS_INC(_handle, _field.num, _count); \ 1111 DP_STATS_INC(_handle, _field.bytes, _bytes) \ 1112 } 1113 1114 #define DP_PEER_STATS_FLAT_INC_PKT(_handle, _field, _count, _bytes) \ 1115 { \ 1116 DP_PEER_STATS_FLAT_INC(_handle, _field.num, _count); \ 1117 DP_PEER_STATS_FLAT_INC(_handle, _field.bytes, _bytes) \ 1118 } 1119 1120 #define DP_STATS_INCC_PKT(_handle, _field, _count, _bytes, _cond) \ 1121 { \ 1122 DP_STATS_INCC(_handle, _field.num, _count, _cond); \ 1123 DP_STATS_INCC(_handle, _field.bytes, _bytes, _cond) \ 1124 } 1125 1126 #define DP_STATS_AGGR(_handle_a, _handle_b, _field) \ 1127 { \ 1128 _handle_a->stats._field += _handle_b->stats._field; \ 1129 } 1130 1131 #define DP_STATS_AGGR_PKT(_handle_a, _handle_b, _field) \ 1132 { \ 1133 DP_STATS_AGGR(_handle_a, _handle_b, _field.num); \ 1134 DP_STATS_AGGR(_handle_a, _handle_b, _field.bytes);\ 1135 } 1136 1137 #define DP_STATS_UPD_STRUCT(_handle_a, _handle_b, _field) \ 1138 { \ 1139 _handle_a->stats._field = _handle_b->stats._field; \ 1140 } 1141 1142 #else 1143 #define DP_STATS_INC(_handle, _field, _delta) 1144 #define DP_PEER_STATS_FLAT_INC(_handle, _field, _delta) 1145 #define DP_STATS_INCC(_handle, _field, _delta, _cond) 1146 #define DP_STATS_DEC(_handle, _field, _delta) 1147 #define DP_PEER_STATS_FLAT_DEC(_handle, _field, _delta) 1148 #define DP_STATS_UPD(_handle, _field, _delta) 1149 #define DP_STATS_INC_PKT(_handle, _field, _count, _bytes) 1150 #define DP_PEER_STATS_FLAT_INC_PKT(_handle, _field, _count, _bytes) 1151 #define DP_STATS_INCC_PKT(_handle, _field, _count, _bytes, _cond) 1152 #define DP_STATS_AGGR(_handle_a, _handle_b, _field) 1153 #define DP_STATS_AGGR_PKT(_handle_a, _handle_b, _field) 1154 #endif 1155 1156 #define DP_PEER_PER_PKT_STATS_INC(_handle, _field, _delta) \ 1157 { \ 1158 DP_STATS_INC(_handle, per_pkt_stats._field, _delta); \ 1159 } 1160 1161 #define DP_PEER_PER_PKT_STATS_INCC(_handle, _field, _delta, _cond) \ 1162 { \ 1163 DP_STATS_INCC(_handle, per_pkt_stats._field, _delta, _cond); \ 1164 } 1165 1166 #define DP_PEER_PER_PKT_STATS_INC_PKT(_handle, _field, _count, _bytes) \ 1167 { \ 1168 DP_PEER_PER_PKT_STATS_INC(_handle, _field.num, _count); \ 1169 DP_PEER_PER_PKT_STATS_INC(_handle, _field.bytes, _bytes) \ 1170 } 1171 1172 #define DP_PEER_PER_PKT_STATS_INCC_PKT(_handle, _field, _count, _bytes, _cond) \ 1173 { \ 1174 DP_PEER_PER_PKT_STATS_INCC(_handle, _field.num, _count, _cond); \ 1175 DP_PEER_PER_PKT_STATS_INCC(_handle, _field.bytes, _bytes, _cond) \ 1176 } 1177 1178 #define DP_PEER_PER_PKT_STATS_UPD(_handle, _field, _delta) \ 1179 { \ 1180 DP_STATS_UPD(_handle, per_pkt_stats._field, _delta); \ 1181 } 1182 1183 #ifndef QCA_ENHANCED_STATS_SUPPORT 1184 #define DP_PEER_EXTD_STATS_INC(_handle, _field, _delta) \ 1185 { \ 1186 DP_STATS_INC(_handle, extd_stats._field, _delta); \ 1187 } 1188 1189 #define DP_PEER_EXTD_STATS_INCC(_handle, _field, _delta, _cond) \ 1190 { \ 1191 DP_STATS_INCC(_handle, extd_stats._field, _delta, _cond); \ 1192 } 1193 1194 #define DP_PEER_EXTD_STATS_UPD(_handle, _field, _delta) \ 1195 { \ 1196 DP_STATS_UPD(_handle, extd_stats._field, _delta); \ 1197 } 1198 #endif 1199 1200 #if defined(QCA_VDEV_STATS_HW_OFFLOAD_SUPPORT) && \ 1201 defined(QCA_ENHANCED_STATS_SUPPORT) 1202 #define DP_PEER_TO_STACK_INCC_PKT(_handle, _count, _bytes, _cond) \ 1203 { \ 1204 if (_cond || !(_handle->hw_txrx_stats_en)) \ 1205 DP_PEER_STATS_FLAT_INC_PKT(_handle, to_stack, _count, _bytes); \ 1206 } 1207 1208 #define DP_PEER_TO_STACK_DECC(_handle, _count, _cond) \ 1209 { \ 1210 if (_cond || !(_handle->hw_txrx_stats_en)) \ 1211 DP_PEER_STATS_FLAT_DEC(_handle, to_stack.num, _count); \ 1212 } 1213 1214 #define DP_PEER_MC_INCC_PKT(_handle, _count, _bytes, _cond) \ 1215 { \ 1216 if (_cond || !(_handle->hw_txrx_stats_en)) \ 1217 DP_PEER_PER_PKT_STATS_INC_PKT(_handle, rx.multicast, _count, _bytes); \ 1218 } 1219 1220 #define DP_PEER_BC_INCC_PKT(_handle, _count, _bytes, _cond) \ 1221 { \ 1222 if (_cond || !(_handle->hw_txrx_stats_en)) \ 1223 DP_PEER_PER_PKT_STATS_INC_PKT(_handle, rx.bcast, _count, _bytes); \ 1224 } 1225 #elif defined(QCA_VDEV_STATS_HW_OFFLOAD_SUPPORT) 1226 #define DP_PEER_TO_STACK_INCC_PKT(_handle, _count, _bytes, _cond) \ 1227 { \ 1228 if (!(_handle->hw_txrx_stats_en)) \ 1229 DP_PEER_STATS_FLAT_INC_PKT(_handle, to_stack, _count, _bytes); \ 1230 } 1231 1232 #define DP_PEER_TO_STACK_DECC(_handle, _count, _cond) \ 1233 { \ 1234 if (!(_handle->hw_txrx_stats_en)) \ 1235 DP_PEER_STATS_FLAT_DEC(_handle, to_stack.num, _count); \ 1236 } 1237 1238 #define DP_PEER_MC_INCC_PKT(_handle, _count, _bytes, _cond) \ 1239 { \ 1240 if (!(_handle->hw_txrx_stats_en)) \ 1241 DP_PEER_PER_PKT_STATS_INC_PKT(_handle, rx.multicast, _count, _bytes); \ 1242 } 1243 1244 #define DP_PEER_BC_INCC_PKT(_handle, _count, _bytes, _cond) \ 1245 { \ 1246 if (!(_handle->hw_txrx_stats_en)) \ 1247 DP_PEER_PER_PKT_STATS_INC_PKT(_handle, rx.bcast, _count, _bytes); \ 1248 } 1249 #else 1250 #define DP_PEER_TO_STACK_INCC_PKT(_handle, _count, _bytes, _cond) \ 1251 DP_PEER_STATS_FLAT_INC_PKT(_handle, to_stack, _count, _bytes); 1252 1253 #define DP_PEER_TO_STACK_DECC(_handle, _count, _cond) \ 1254 DP_PEER_STATS_FLAT_DEC(_handle, to_stack.num, _count); 1255 1256 #define DP_PEER_MC_INCC_PKT(_handle, _count, _bytes, _cond) \ 1257 DP_PEER_PER_PKT_STATS_INC_PKT(_handle, rx.multicast, _count, _bytes); 1258 1259 #define DP_PEER_BC_INCC_PKT(_handle, _count, _bytes, _cond) \ 1260 DP_PEER_PER_PKT_STATS_INC_PKT(_handle, rx.bcast, _count, _bytes); 1261 #endif 1262 1263 #ifdef ENABLE_DP_HIST_STATS 1264 #define DP_HIST_INIT() \ 1265 uint32_t num_of_packets[MAX_PDEV_CNT] = {0}; 1266 1267 #define DP_HIST_PACKET_COUNT_INC(_pdev_id) \ 1268 { \ 1269 ++num_of_packets[_pdev_id]; \ 1270 } 1271 1272 #define DP_TX_HISTOGRAM_UPDATE(_pdev, _p_cntrs) \ 1273 do { \ 1274 if (_p_cntrs == 1) { \ 1275 DP_STATS_INC(_pdev, \ 1276 tx_comp_histogram.pkts_1, 1); \ 1277 } else if (_p_cntrs > 1 && _p_cntrs <= 20) { \ 1278 DP_STATS_INC(_pdev, \ 1279 tx_comp_histogram.pkts_2_20, 1); \ 1280 } else if (_p_cntrs > 20 && _p_cntrs <= 40) { \ 1281 DP_STATS_INC(_pdev, \ 1282 tx_comp_histogram.pkts_21_40, 1); \ 1283 } else if (_p_cntrs > 40 && _p_cntrs <= 60) { \ 1284 DP_STATS_INC(_pdev, \ 1285 tx_comp_histogram.pkts_41_60, 1); \ 1286 } else if (_p_cntrs > 60 && _p_cntrs <= 80) { \ 1287 DP_STATS_INC(_pdev, \ 1288 tx_comp_histogram.pkts_61_80, 1); \ 1289 } else if (_p_cntrs > 80 && _p_cntrs <= 100) { \ 1290 DP_STATS_INC(_pdev, \ 1291 tx_comp_histogram.pkts_81_100, 1); \ 1292 } else if (_p_cntrs > 100 && _p_cntrs <= 200) { \ 1293 DP_STATS_INC(_pdev, \ 1294 tx_comp_histogram.pkts_101_200, 1); \ 1295 } else if (_p_cntrs > 200) { \ 1296 DP_STATS_INC(_pdev, \ 1297 tx_comp_histogram.pkts_201_plus, 1); \ 1298 } \ 1299 } while (0) 1300 1301 #define DP_RX_HISTOGRAM_UPDATE(_pdev, _p_cntrs) \ 1302 do { \ 1303 if (_p_cntrs == 1) { \ 1304 DP_STATS_INC(_pdev, \ 1305 rx_ind_histogram.pkts_1, 1); \ 1306 } else if (_p_cntrs > 1 && _p_cntrs <= 20) { \ 1307 DP_STATS_INC(_pdev, \ 1308 rx_ind_histogram.pkts_2_20, 1); \ 1309 } else if (_p_cntrs > 20 && _p_cntrs <= 40) { \ 1310 DP_STATS_INC(_pdev, \ 1311 rx_ind_histogram.pkts_21_40, 1); \ 1312 } else if (_p_cntrs > 40 && _p_cntrs <= 60) { \ 1313 DP_STATS_INC(_pdev, \ 1314 rx_ind_histogram.pkts_41_60, 1); \ 1315 } else if (_p_cntrs > 60 && _p_cntrs <= 80) { \ 1316 DP_STATS_INC(_pdev, \ 1317 rx_ind_histogram.pkts_61_80, 1); \ 1318 } else if (_p_cntrs > 80 && _p_cntrs <= 100) { \ 1319 DP_STATS_INC(_pdev, \ 1320 rx_ind_histogram.pkts_81_100, 1); \ 1321 } else if (_p_cntrs > 100 && _p_cntrs <= 200) { \ 1322 DP_STATS_INC(_pdev, \ 1323 rx_ind_histogram.pkts_101_200, 1); \ 1324 } else if (_p_cntrs > 200) { \ 1325 DP_STATS_INC(_pdev, \ 1326 rx_ind_histogram.pkts_201_plus, 1); \ 1327 } \ 1328 } while (0) 1329 1330 #define DP_TX_HIST_STATS_PER_PDEV() \ 1331 do { \ 1332 uint8_t hist_stats = 0; \ 1333 for (hist_stats = 0; hist_stats < soc->pdev_count; \ 1334 hist_stats++) { \ 1335 DP_TX_HISTOGRAM_UPDATE(soc->pdev_list[hist_stats], \ 1336 num_of_packets[hist_stats]); \ 1337 } \ 1338 } while (0) 1339 1340 1341 #define DP_RX_HIST_STATS_PER_PDEV() \ 1342 do { \ 1343 uint8_t hist_stats = 0; \ 1344 for (hist_stats = 0; hist_stats < soc->pdev_count; \ 1345 hist_stats++) { \ 1346 DP_RX_HISTOGRAM_UPDATE(soc->pdev_list[hist_stats], \ 1347 num_of_packets[hist_stats]); \ 1348 } \ 1349 } while (0) 1350 1351 #else 1352 #define DP_HIST_INIT() 1353 #define DP_HIST_PACKET_COUNT_INC(_pdev_id) 1354 #define DP_TX_HISTOGRAM_UPDATE(_pdev, _p_cntrs) 1355 #define DP_RX_HISTOGRAM_UPDATE(_pdev, _p_cntrs) 1356 #define DP_RX_HIST_STATS_PER_PDEV() 1357 #define DP_TX_HIST_STATS_PER_PDEV() 1358 #endif /* DISABLE_DP_STATS */ 1359 1360 #define FRAME_MASK_IPV4_ARP 1 1361 #define FRAME_MASK_IPV4_DHCP 2 1362 #define FRAME_MASK_IPV4_EAPOL 4 1363 #define FRAME_MASK_IPV6_DHCP 8 1364 1365 static inline int dp_log2_ceil(unsigned int value) 1366 { 1367 unsigned int tmp = value; 1368 int log2 = -1; 1369 1370 while (tmp) { 1371 log2++; 1372 tmp >>= 1; 1373 } 1374 if (1 << log2 != value) 1375 log2++; 1376 return log2; 1377 } 1378 1379 #ifdef QCA_SUPPORT_PEER_ISOLATION 1380 #define dp_get_peer_isolation(_peer) ((_peer)->isolation) 1381 1382 static inline void dp_set_peer_isolation(struct dp_txrx_peer *txrx_peer, 1383 bool val) 1384 { 1385 txrx_peer->isolation = val; 1386 } 1387 1388 #else 1389 #define dp_get_peer_isolation(_peer) (0) 1390 1391 static inline void dp_set_peer_isolation(struct dp_txrx_peer *peer, bool val) 1392 { 1393 } 1394 #endif /* QCA_SUPPORT_PEER_ISOLATION */ 1395 1396 bool dp_vdev_is_wds_ext_enabled(struct dp_vdev *vdev); 1397 1398 #ifdef QCA_SUPPORT_WDS_EXTENDED 1399 static inline void dp_wds_ext_peer_init(struct dp_txrx_peer *txrx_peer) 1400 { 1401 txrx_peer->wds_ext.init = 0; 1402 } 1403 #else 1404 static inline void dp_wds_ext_peer_init(struct dp_txrx_peer *txrx_peer) 1405 { 1406 } 1407 #endif /* QCA_SUPPORT_WDS_EXTENDED */ 1408 1409 #ifdef QCA_HOST2FW_RXBUF_RING 1410 static inline 1411 struct dp_srng *dp_get_rxdma_ring(struct dp_pdev *pdev, int lmac_id) 1412 { 1413 return &pdev->rx_mac_buf_ring[lmac_id]; 1414 } 1415 #else 1416 static inline 1417 struct dp_srng *dp_get_rxdma_ring(struct dp_pdev *pdev, int lmac_id) 1418 { 1419 return &pdev->soc->rx_refill_buf_ring[lmac_id]; 1420 } 1421 #endif 1422 1423 /** 1424 * The lmac ID for a particular channel band is fixed. 1425 * 2.4GHz band uses lmac_id = 1 1426 * 5GHz/6GHz band uses lmac_id=0 1427 */ 1428 #define DP_INVALID_LMAC_ID (-1) 1429 #define DP_MON_INVALID_LMAC_ID (-1) 1430 #define DP_MAC0_LMAC_ID 0 1431 #define DP_MAC1_LMAC_ID 1 1432 1433 #ifdef FEATURE_TSO_STATS 1434 /** 1435 * dp_init_tso_stats() - Clear tso stats 1436 * @pdev: pdev handle 1437 * 1438 * Return: None 1439 */ 1440 static inline 1441 void dp_init_tso_stats(struct dp_pdev *pdev) 1442 { 1443 if (pdev) { 1444 qdf_mem_zero(&((pdev)->stats.tso_stats), 1445 sizeof((pdev)->stats.tso_stats)); 1446 qdf_atomic_init(&pdev->tso_idx); 1447 } 1448 } 1449 1450 /** 1451 * dp_stats_tso_segment_histogram_update() - TSO Segment Histogram 1452 * @pdev: pdev handle 1453 * @_p_cntrs: number of tso segments for a tso packet 1454 * 1455 * Return: None 1456 */ 1457 void dp_stats_tso_segment_histogram_update(struct dp_pdev *pdev, 1458 uint8_t _p_cntrs); 1459 1460 /** 1461 * dp_tso_segment_update() - Collect tso segment information 1462 * @pdev: pdev handle 1463 * @stats_idx: tso packet number 1464 * @idx: tso segment number 1465 * @seg: tso segment 1466 * 1467 * Return: None 1468 */ 1469 void dp_tso_segment_update(struct dp_pdev *pdev, 1470 uint32_t stats_idx, 1471 uint8_t idx, 1472 struct qdf_tso_seg_t seg); 1473 1474 /** 1475 * dp_tso_packet_update() - TSO Packet information 1476 * @pdev: pdev handle 1477 * @stats_idx: tso packet number 1478 * @msdu: nbuf handle 1479 * @num_segs: tso segments 1480 * 1481 * Return: None 1482 */ 1483 void dp_tso_packet_update(struct dp_pdev *pdev, uint32_t stats_idx, 1484 qdf_nbuf_t msdu, uint16_t num_segs); 1485 1486 /** 1487 * dp_tso_segment_stats_update() - TSO Segment stats 1488 * @pdev: pdev handle 1489 * @stats_seg: tso segment list 1490 * @stats_idx: tso packet number 1491 * 1492 * Return: None 1493 */ 1494 void dp_tso_segment_stats_update(struct dp_pdev *pdev, 1495 struct qdf_tso_seg_elem_t *stats_seg, 1496 uint32_t stats_idx); 1497 1498 /** 1499 * dp_print_tso_stats() - dump tso statistics 1500 * @soc:soc handle 1501 * @level: verbosity level 1502 * 1503 * Return: None 1504 */ 1505 void dp_print_tso_stats(struct dp_soc *soc, 1506 enum qdf_stats_verbosity_level level); 1507 1508 /** 1509 * dp_txrx_clear_tso_stats() - clear tso stats 1510 * @soc: soc handle 1511 * 1512 * Return: None 1513 */ 1514 void dp_txrx_clear_tso_stats(struct dp_soc *soc); 1515 #else 1516 static inline 1517 void dp_init_tso_stats(struct dp_pdev *pdev) 1518 { 1519 } 1520 1521 static inline 1522 void dp_stats_tso_segment_histogram_update(struct dp_pdev *pdev, 1523 uint8_t _p_cntrs) 1524 { 1525 } 1526 1527 static inline 1528 void dp_tso_segment_update(struct dp_pdev *pdev, 1529 uint32_t stats_idx, 1530 uint32_t idx, 1531 struct qdf_tso_seg_t seg) 1532 { 1533 } 1534 1535 static inline 1536 void dp_tso_packet_update(struct dp_pdev *pdev, uint32_t stats_idx, 1537 qdf_nbuf_t msdu, uint16_t num_segs) 1538 { 1539 } 1540 1541 static inline 1542 void dp_tso_segment_stats_update(struct dp_pdev *pdev, 1543 struct qdf_tso_seg_elem_t *stats_seg, 1544 uint32_t stats_idx) 1545 { 1546 } 1547 1548 static inline 1549 void dp_print_tso_stats(struct dp_soc *soc, 1550 enum qdf_stats_verbosity_level level) 1551 { 1552 } 1553 1554 static inline 1555 void dp_txrx_clear_tso_stats(struct dp_soc *soc) 1556 { 1557 } 1558 #endif /* FEATURE_TSO_STATS */ 1559 1560 /* dp_txrx_get_peer_per_pkt_stats_param() - Get peer per pkt stats param 1561 * @peer: DP peer handle 1562 * @type: Requested stats type 1563 * @ buf: Buffer to hold the value 1564 * 1565 * Return: status success/failure 1566 */ 1567 QDF_STATUS dp_txrx_get_peer_per_pkt_stats_param(struct dp_peer *peer, 1568 enum cdp_peer_stats_type type, 1569 cdp_peer_stats_param_t *buf); 1570 1571 /* dp_txrx_get_peer_extd_stats_param() - Get peer extd stats param 1572 * @peer: DP peer handle 1573 * @type: Requested stats type 1574 * @ buf: Buffer to hold the value 1575 * 1576 * Return: status success/failure 1577 */ 1578 QDF_STATUS dp_txrx_get_peer_extd_stats_param(struct dp_peer *peer, 1579 enum cdp_peer_stats_type type, 1580 cdp_peer_stats_param_t *buf); 1581 1582 #define DP_HTT_T2H_HP_PIPE 5 1583 /** 1584 * dp_update_pdev_stats(): Update the pdev stats 1585 * @tgtobj: pdev handle 1586 * @srcobj: vdev stats structure 1587 * 1588 * Update the pdev stats from the specified vdev stats 1589 * 1590 * return: None 1591 */ 1592 void dp_update_pdev_stats(struct dp_pdev *tgtobj, 1593 struct cdp_vdev_stats *srcobj); 1594 1595 /** 1596 * dp_update_vdev_ingress_stats(): Update the vdev ingress stats 1597 * @tgtobj: vdev handle 1598 * 1599 * Update the vdev ingress stats 1600 * 1601 * return: None 1602 */ 1603 void dp_update_vdev_ingress_stats(struct dp_vdev *tgtobj); 1604 1605 /** 1606 * dp_update_vdev_rate_stats() - Update the vdev rate stats 1607 * @tgtobj: tgt buffer for vdev stats 1608 * @srcobj: srcobj vdev stats 1609 * 1610 * Return: None 1611 */ 1612 void dp_update_vdev_rate_stats(struct cdp_vdev_stats *tgtobj, 1613 struct cdp_vdev_stats *srcobj); 1614 1615 /** 1616 * dp_update_pdev_ingress_stats(): Update the pdev ingress stats 1617 * @tgtobj: pdev handle 1618 * @srcobj: vdev stats structure 1619 * 1620 * Update the pdev ingress stats from the specified vdev stats 1621 * 1622 * return: None 1623 */ 1624 void dp_update_pdev_ingress_stats(struct dp_pdev *tgtobj, 1625 struct dp_vdev *srcobj); 1626 1627 /** 1628 * dp_update_vdev_stats(): Update the vdev stats 1629 * @soc: soc handle 1630 * @srcobj: DP_PEER object 1631 * @arg: point to vdev stats structure 1632 * 1633 * Update the vdev stats from the specified peer stats 1634 * 1635 * return: None 1636 */ 1637 void dp_update_vdev_stats(struct dp_soc *soc, 1638 struct dp_peer *srcobj, 1639 void *arg); 1640 1641 /** 1642 * dp_update_vdev_stats_on_peer_unmap() - Update the vdev stats on peer unmap 1643 * @vdev: DP_VDEV handle 1644 * @peer: DP_PEER handle 1645 * 1646 * Return: None 1647 */ 1648 void dp_update_vdev_stats_on_peer_unmap(struct dp_vdev *vdev, 1649 struct dp_peer *peer); 1650 1651 #ifdef IPA_OFFLOAD 1652 #define DP_IPA_UPDATE_RX_STATS(__tgtobj, __srcobj) \ 1653 { \ 1654 DP_STATS_AGGR_PKT(__tgtobj, __srcobj, rx.rx_total); \ 1655 } 1656 1657 #define DP_IPA_UPDATE_PER_PKT_RX_STATS(__tgtobj, __srcobj) \ 1658 { \ 1659 (__tgtobj)->rx.rx_total.num += (__srcobj)->rx.rx_total.num; \ 1660 (__tgtobj)->rx.rx_total.bytes += (__srcobj)->rx.rx_total.bytes; \ 1661 } 1662 #else 1663 #define DP_IPA_UPDATE_PER_PKT_RX_STATS(tgtobj, srcobj) \ 1664 1665 #define DP_IPA_UPDATE_RX_STATS(tgtobj, srcobj) 1666 #endif 1667 1668 #define DP_UPDATE_STATS(_tgtobj, _srcobj) \ 1669 do { \ 1670 uint8_t i; \ 1671 uint8_t pream_type; \ 1672 for (pream_type = 0; pream_type < DOT11_MAX; pream_type++) { \ 1673 for (i = 0; i < MAX_MCS; i++) { \ 1674 DP_STATS_AGGR(_tgtobj, _srcobj, \ 1675 tx.pkt_type[pream_type].mcs_count[i]); \ 1676 DP_STATS_AGGR(_tgtobj, _srcobj, \ 1677 rx.pkt_type[pream_type].mcs_count[i]); \ 1678 } \ 1679 } \ 1680 \ 1681 for (i = 0; i < MAX_BW; i++) { \ 1682 DP_STATS_AGGR(_tgtobj, _srcobj, tx.bw[i]); \ 1683 DP_STATS_AGGR(_tgtobj, _srcobj, rx.bw[i]); \ 1684 } \ 1685 \ 1686 for (i = 0; i < SS_COUNT; i++) { \ 1687 DP_STATS_AGGR(_tgtobj, _srcobj, rx.nss[i]); \ 1688 DP_STATS_AGGR(_tgtobj, _srcobj, tx.nss[i]); \ 1689 } \ 1690 for (i = 0; i < WME_AC_MAX; i++) { \ 1691 DP_STATS_AGGR(_tgtobj, _srcobj, tx.wme_ac_type[i]); \ 1692 DP_STATS_AGGR(_tgtobj, _srcobj, rx.wme_ac_type[i]); \ 1693 DP_STATS_AGGR(_tgtobj, _srcobj, \ 1694 tx.wme_ac_type_bytes[i]); \ 1695 DP_STATS_AGGR(_tgtobj, _srcobj, \ 1696 rx.wme_ac_type_bytes[i]); \ 1697 DP_STATS_AGGR(_tgtobj, _srcobj, \ 1698 tx.wme_ac_type_bytes[i]); \ 1699 DP_STATS_AGGR(_tgtobj, _srcobj, \ 1700 rx.wme_ac_type_bytes[i]); \ 1701 DP_STATS_AGGR(_tgtobj, _srcobj, tx.excess_retries_per_ac[i]); \ 1702 \ 1703 } \ 1704 \ 1705 for (i = 0; i < MAX_GI; i++) { \ 1706 DP_STATS_AGGR(_tgtobj, _srcobj, tx.sgi_count[i]); \ 1707 DP_STATS_AGGR(_tgtobj, _srcobj, rx.sgi_count[i]); \ 1708 } \ 1709 \ 1710 for (i = 0; i < MAX_RECEPTION_TYPES; i++) \ 1711 DP_STATS_AGGR(_tgtobj, _srcobj, rx.reception_type[i]); \ 1712 \ 1713 if (!wlan_cfg_get_vdev_stats_hw_offload_config(soc->wlan_cfg_ctx)) { \ 1714 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, tx.comp_pkt); \ 1715 DP_STATS_AGGR(_tgtobj, _srcobj, tx.tx_failed); \ 1716 } \ 1717 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, tx.ucast); \ 1718 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, tx.mcast); \ 1719 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, tx.bcast); \ 1720 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, tx.tx_success); \ 1721 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, tx.nawds_mcast); \ 1722 DP_STATS_AGGR(_tgtobj, _srcobj, tx.nawds_mcast_drop); \ 1723 DP_STATS_AGGR(_tgtobj, _srcobj, tx.ofdma); \ 1724 DP_STATS_AGGR(_tgtobj, _srcobj, tx.stbc); \ 1725 DP_STATS_AGGR(_tgtobj, _srcobj, tx.ldpc); \ 1726 DP_STATS_AGGR(_tgtobj, _srcobj, tx.retries); \ 1727 DP_STATS_AGGR(_tgtobj, _srcobj, tx.non_amsdu_cnt); \ 1728 DP_STATS_AGGR(_tgtobj, _srcobj, tx.amsdu_cnt); \ 1729 DP_STATS_AGGR(_tgtobj, _srcobj, tx.non_ampdu_cnt); \ 1730 DP_STATS_AGGR(_tgtobj, _srcobj, tx.ampdu_cnt); \ 1731 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, tx.dropped.fw_rem); \ 1732 DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.fw_rem_tx); \ 1733 DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.fw_rem_notx); \ 1734 DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.fw_reason1); \ 1735 DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.fw_reason2); \ 1736 DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.fw_reason3); \ 1737 DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.fw_rem_queue_disable); \ 1738 DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.fw_rem_no_match); \ 1739 DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.drop_threshold); \ 1740 DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.drop_link_desc_na); \ 1741 DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.invalid_drop); \ 1742 DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.mcast_vdev_drop); \ 1743 DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.invalid_rr); \ 1744 DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.age_out); \ 1745 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, tx.tx_ucast_total); \ 1746 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, tx.tx_ucast_success); \ 1747 \ 1748 DP_STATS_AGGR(_tgtobj, _srcobj, rx.err.mic_err); \ 1749 DP_STATS_AGGR(_tgtobj, _srcobj, rx.err.decrypt_err); \ 1750 DP_STATS_AGGR(_tgtobj, _srcobj, rx.err.fcserr); \ 1751 DP_STATS_AGGR(_tgtobj, _srcobj, rx.err.pn_err); \ 1752 DP_STATS_AGGR(_tgtobj, _srcobj, rx.err.oor_err); \ 1753 DP_STATS_AGGR(_tgtobj, _srcobj, rx.err.jump_2k_err); \ 1754 DP_STATS_AGGR(_tgtobj, _srcobj, rx.err.rxdma_wifi_parse_err); \ 1755 if (_srcobj->stats.rx.snr != 0) \ 1756 DP_STATS_UPD_STRUCT(_tgtobj, _srcobj, rx.snr); \ 1757 DP_STATS_UPD_STRUCT(_tgtobj, _srcobj, rx.rx_rate); \ 1758 DP_STATS_AGGR(_tgtobj, _srcobj, rx.non_ampdu_cnt); \ 1759 DP_STATS_AGGR(_tgtobj, _srcobj, rx.ampdu_cnt); \ 1760 DP_STATS_AGGR(_tgtobj, _srcobj, rx.non_amsdu_cnt); \ 1761 DP_STATS_AGGR(_tgtobj, _srcobj, rx.amsdu_cnt); \ 1762 DP_STATS_AGGR(_tgtobj, _srcobj, rx.nawds_mcast_drop); \ 1763 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.to_stack); \ 1764 \ 1765 for (i = 0; i < CDP_MAX_RX_RINGS; i++) \ 1766 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.rcvd_reo[i]); \ 1767 \ 1768 for (i = 0; i < CDP_MAX_LMACS; i++) \ 1769 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.rx_lmac[i]); \ 1770 \ 1771 _srcobj->stats.rx.unicast.num = \ 1772 _srcobj->stats.rx.to_stack.num - \ 1773 _srcobj->stats.rx.multicast.num; \ 1774 _srcobj->stats.rx.unicast.bytes = \ 1775 _srcobj->stats.rx.to_stack.bytes - \ 1776 _srcobj->stats.rx.multicast.bytes; \ 1777 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.unicast); \ 1778 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.multicast); \ 1779 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.bcast); \ 1780 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.raw); \ 1781 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.intra_bss.pkts); \ 1782 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.intra_bss.fail); \ 1783 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.mec_drop); \ 1784 \ 1785 _tgtobj->stats.tx.last_ack_rssi = \ 1786 _srcobj->stats.tx.last_ack_rssi; \ 1787 DP_STATS_AGGR(_tgtobj, _srcobj, rx.multipass_rx_pkt_drop); \ 1788 DP_STATS_AGGR(_tgtobj, _srcobj, rx.peer_unauth_rx_pkt_drop); \ 1789 DP_STATS_AGGR(_tgtobj, _srcobj, rx.policy_check_drop); \ 1790 DP_IPA_UPDATE_RX_STATS(_tgtobj, _srcobj); \ 1791 } while (0) 1792 1793 #ifdef VDEV_PEER_PROTOCOL_COUNT 1794 #define DP_UPDATE_PROTOCOL_COUNT_STATS(_tgtobj, _srcobj) \ 1795 { \ 1796 uint8_t j; \ 1797 for (j = 0; j < CDP_TRACE_MAX; j++) { \ 1798 _tgtobj->tx.protocol_trace_cnt[j].egress_cnt += \ 1799 _srcobj->tx.protocol_trace_cnt[j].egress_cnt; \ 1800 _tgtobj->tx.protocol_trace_cnt[j].ingress_cnt += \ 1801 _srcobj->tx.protocol_trace_cnt[j].ingress_cnt; \ 1802 _tgtobj->rx.protocol_trace_cnt[j].egress_cnt += \ 1803 _srcobj->rx.protocol_trace_cnt[j].egress_cnt; \ 1804 _tgtobj->rx.protocol_trace_cnt[j].ingress_cnt += \ 1805 _srcobj->rx.protocol_trace_cnt[j].ingress_cnt; \ 1806 } \ 1807 } 1808 #else 1809 #define DP_UPDATE_PROTOCOL_COUNT_STATS(_tgtobj, _srcobj) 1810 #endif 1811 1812 #ifdef WLAN_FEATURE_11BE 1813 #define DP_UPDATE_11BE_STATS(_tgtobj, _srcobj) \ 1814 do { \ 1815 uint8_t i, mu_type; \ 1816 for (i = 0; i < MAX_MCS; i++) { \ 1817 _tgtobj->tx.su_be_ppdu_cnt.mcs_count[i] += \ 1818 _srcobj->tx.su_be_ppdu_cnt.mcs_count[i]; \ 1819 _tgtobj->rx.su_be_ppdu_cnt.mcs_count[i] += \ 1820 _srcobj->rx.su_be_ppdu_cnt.mcs_count[i]; \ 1821 } \ 1822 for (mu_type = 0; mu_type < TXRX_TYPE_MU_MAX; mu_type++) { \ 1823 for (i = 0; i < MAX_MCS; i++) { \ 1824 _tgtobj->tx.mu_be_ppdu_cnt[mu_type].mcs_count[i] += \ 1825 _srcobj->tx.mu_be_ppdu_cnt[mu_type].mcs_count[i]; \ 1826 _tgtobj->rx.mu_be_ppdu_cnt[mu_type].mcs_count[i] += \ 1827 _srcobj->rx.mu_be_ppdu_cnt[mu_type].mcs_count[i]; \ 1828 } \ 1829 } \ 1830 for (i = 0; i < MAX_PUNCTURED_MODE; i++) { \ 1831 _tgtobj->tx.punc_bw[i] += _srcobj->tx.punc_bw[i]; \ 1832 _tgtobj->rx.punc_bw[i] += _srcobj->rx.punc_bw[i]; \ 1833 } \ 1834 } while (0) 1835 #else 1836 #define DP_UPDATE_11BE_STATS(_tgtobj, _srcobj) 1837 #endif 1838 1839 #define DP_UPDATE_PER_PKT_STATS(_tgtobj, _srcobj) \ 1840 do { \ 1841 uint8_t i; \ 1842 _tgtobj->tx.ucast.num += _srcobj->tx.ucast.num; \ 1843 _tgtobj->tx.ucast.bytes += _srcobj->tx.ucast.bytes; \ 1844 _tgtobj->tx.mcast.num += _srcobj->tx.mcast.num; \ 1845 _tgtobj->tx.mcast.bytes += _srcobj->tx.mcast.bytes; \ 1846 _tgtobj->tx.bcast.num += _srcobj->tx.bcast.num; \ 1847 _tgtobj->tx.bcast.bytes += _srcobj->tx.bcast.bytes; \ 1848 _tgtobj->tx.nawds_mcast.num += _srcobj->tx.nawds_mcast.num; \ 1849 _tgtobj->tx.nawds_mcast.bytes += \ 1850 _srcobj->tx.nawds_mcast.bytes; \ 1851 _tgtobj->tx.tx_success.num += _srcobj->tx.tx_success.num; \ 1852 _tgtobj->tx.tx_success.bytes += _srcobj->tx.tx_success.bytes; \ 1853 _tgtobj->tx.nawds_mcast_drop += _srcobj->tx.nawds_mcast_drop; \ 1854 _tgtobj->tx.ofdma += _srcobj->tx.ofdma; \ 1855 _tgtobj->tx.non_amsdu_cnt += _srcobj->tx.non_amsdu_cnt; \ 1856 _tgtobj->tx.amsdu_cnt += _srcobj->tx.amsdu_cnt; \ 1857 _tgtobj->tx.dropped.fw_rem.num += \ 1858 _srcobj->tx.dropped.fw_rem.num; \ 1859 _tgtobj->tx.dropped.fw_rem.bytes += \ 1860 _srcobj->tx.dropped.fw_rem.bytes; \ 1861 _tgtobj->tx.dropped.fw_rem_notx += \ 1862 _srcobj->tx.dropped.fw_rem_notx; \ 1863 _tgtobj->tx.dropped.fw_rem_tx += \ 1864 _srcobj->tx.dropped.fw_rem_tx; \ 1865 _tgtobj->tx.dropped.age_out += _srcobj->tx.dropped.age_out; \ 1866 _tgtobj->tx.dropped.fw_reason1 += \ 1867 _srcobj->tx.dropped.fw_reason1; \ 1868 _tgtobj->tx.dropped.fw_reason2 += \ 1869 _srcobj->tx.dropped.fw_reason2; \ 1870 _tgtobj->tx.dropped.fw_reason3 += \ 1871 _srcobj->tx.dropped.fw_reason3; \ 1872 _tgtobj->tx.dropped.fw_rem_queue_disable += \ 1873 _srcobj->tx.dropped.fw_rem_queue_disable; \ 1874 _tgtobj->tx.dropped.fw_rem_no_match += \ 1875 _srcobj->tx.dropped.fw_rem_no_match; \ 1876 _tgtobj->tx.dropped.drop_threshold += \ 1877 _srcobj->tx.dropped.drop_threshold; \ 1878 _tgtobj->tx.dropped.drop_link_desc_na += \ 1879 _srcobj->tx.dropped.drop_link_desc_na; \ 1880 _tgtobj->tx.dropped.invalid_drop += \ 1881 _srcobj->tx.dropped.invalid_drop; \ 1882 _tgtobj->tx.dropped.mcast_vdev_drop += \ 1883 _srcobj->tx.dropped.mcast_vdev_drop; \ 1884 _tgtobj->tx.dropped.invalid_rr += \ 1885 _srcobj->tx.dropped.invalid_rr; \ 1886 _tgtobj->tx.failed_retry_count += \ 1887 _srcobj->tx.failed_retry_count; \ 1888 _tgtobj->tx.retry_count += _srcobj->tx.retry_count; \ 1889 _tgtobj->tx.multiple_retry_count += \ 1890 _srcobj->tx.multiple_retry_count; \ 1891 _tgtobj->tx.tx_success_twt.num += \ 1892 _srcobj->tx.tx_success_twt.num; \ 1893 _tgtobj->tx.tx_success_twt.bytes += \ 1894 _srcobj->tx.tx_success_twt.bytes; \ 1895 _tgtobj->tx.last_tx_ts = _srcobj->tx.last_tx_ts; \ 1896 _tgtobj->tx.release_src_not_tqm += \ 1897 _srcobj->tx.release_src_not_tqm; \ 1898 for (i = 0; i < QDF_PROTO_SUBTYPE_MAX; i++) { \ 1899 _tgtobj->tx.no_ack_count[i] += \ 1900 _srcobj->tx.no_ack_count[i];\ 1901 } \ 1902 \ 1903 _tgtobj->rx.multicast.num += _srcobj->rx.multicast.num; \ 1904 _tgtobj->rx.multicast.bytes += _srcobj->rx.multicast.bytes; \ 1905 _tgtobj->rx.bcast.num += _srcobj->rx.bcast.num; \ 1906 _tgtobj->rx.bcast.bytes += _srcobj->rx.bcast.bytes; \ 1907 if (_tgtobj->rx.to_stack.num >= _tgtobj->rx.multicast.num) \ 1908 _tgtobj->rx.unicast.num = \ 1909 _tgtobj->rx.to_stack.num - _tgtobj->rx.multicast.num; \ 1910 if (_tgtobj->rx.to_stack.bytes >= _tgtobj->rx.multicast.bytes) \ 1911 _tgtobj->rx.unicast.bytes = \ 1912 _tgtobj->rx.to_stack.bytes - _tgtobj->rx.multicast.bytes; \ 1913 _tgtobj->rx.raw.num += _srcobj->rx.raw.num; \ 1914 _tgtobj->rx.raw.bytes += _srcobj->rx.raw.bytes; \ 1915 _tgtobj->rx.nawds_mcast_drop += _srcobj->rx.nawds_mcast_drop; \ 1916 _tgtobj->rx.mcast_3addr_drop += _srcobj->rx.mcast_3addr_drop; \ 1917 _tgtobj->rx.mec_drop.num += _srcobj->rx.mec_drop.num; \ 1918 _tgtobj->rx.mec_drop.bytes += _srcobj->rx.mec_drop.bytes; \ 1919 _tgtobj->rx.intra_bss.pkts.num += \ 1920 _srcobj->rx.intra_bss.pkts.num; \ 1921 _tgtobj->rx.intra_bss.pkts.bytes += \ 1922 _srcobj->rx.intra_bss.pkts.bytes; \ 1923 _tgtobj->rx.intra_bss.fail.num += \ 1924 _srcobj->rx.intra_bss.fail.num; \ 1925 _tgtobj->rx.intra_bss.fail.bytes += \ 1926 _srcobj->rx.intra_bss.fail.bytes; \ 1927 _tgtobj->rx.intra_bss.mdns_no_fwd += \ 1928 _srcobj->rx.intra_bss.mdns_no_fwd; \ 1929 _tgtobj->rx.err.mic_err += _srcobj->rx.err.mic_err; \ 1930 _tgtobj->rx.err.decrypt_err += _srcobj->rx.err.decrypt_err; \ 1931 _tgtobj->rx.err.fcserr += _srcobj->rx.err.fcserr; \ 1932 _tgtobj->rx.err.pn_err += _srcobj->rx.err.pn_err; \ 1933 _tgtobj->rx.err.oor_err += _srcobj->rx.err.oor_err; \ 1934 _tgtobj->rx.err.jump_2k_err += _srcobj->rx.err.jump_2k_err; \ 1935 _tgtobj->rx.err.rxdma_wifi_parse_err += \ 1936 _srcobj->rx.err.rxdma_wifi_parse_err; \ 1937 _tgtobj->rx.non_amsdu_cnt += _srcobj->rx.non_amsdu_cnt; \ 1938 _tgtobj->rx.amsdu_cnt += _srcobj->rx.amsdu_cnt; \ 1939 _tgtobj->rx.rx_retries += _srcobj->rx.rx_retries; \ 1940 _tgtobj->rx.multipass_rx_pkt_drop += \ 1941 _srcobj->rx.multipass_rx_pkt_drop; \ 1942 _tgtobj->rx.peer_unauth_rx_pkt_drop += \ 1943 _srcobj->rx.peer_unauth_rx_pkt_drop; \ 1944 _tgtobj->rx.policy_check_drop += \ 1945 _srcobj->rx.policy_check_drop; \ 1946 _tgtobj->rx.to_stack_twt.num += _srcobj->rx.to_stack_twt.num; \ 1947 _tgtobj->rx.to_stack_twt.bytes += \ 1948 _srcobj->rx.to_stack_twt.bytes; \ 1949 _tgtobj->rx.last_rx_ts = _srcobj->rx.last_rx_ts; \ 1950 for (i = 0; i < CDP_MAX_RX_RINGS; i++) { \ 1951 _tgtobj->rx.rcvd_reo[i].num += \ 1952 _srcobj->rx.rcvd_reo[i].num; \ 1953 _tgtobj->rx.rcvd_reo[i].bytes += \ 1954 _srcobj->rx.rcvd_reo[i].bytes; \ 1955 } \ 1956 for (i = 0; i < CDP_MAX_LMACS; i++) { \ 1957 _tgtobj->rx.rx_lmac[i].num += \ 1958 _srcobj->rx.rx_lmac[i].num; \ 1959 _tgtobj->rx.rx_lmac[i].bytes += \ 1960 _srcobj->rx.rx_lmac[i].bytes; \ 1961 } \ 1962 DP_IPA_UPDATE_PER_PKT_RX_STATS(_tgtobj, _srcobj); \ 1963 DP_UPDATE_PROTOCOL_COUNT_STATS(_tgtobj, _srcobj); \ 1964 } while (0) 1965 1966 #define DP_UPDATE_EXTD_STATS(_tgtobj, _srcobj) \ 1967 do { \ 1968 uint8_t i, pream_type, mu_type; \ 1969 _tgtobj->tx.stbc += _srcobj->tx.stbc; \ 1970 _tgtobj->tx.ldpc += _srcobj->tx.ldpc; \ 1971 _tgtobj->tx.retries += _srcobj->tx.retries; \ 1972 _tgtobj->tx.ampdu_cnt += _srcobj->tx.ampdu_cnt; \ 1973 _tgtobj->tx.non_ampdu_cnt += _srcobj->tx.non_ampdu_cnt; \ 1974 _tgtobj->tx.num_ppdu_cookie_valid += \ 1975 _srcobj->tx.num_ppdu_cookie_valid; \ 1976 _tgtobj->tx.tx_ppdus += _srcobj->tx.tx_ppdus; \ 1977 _tgtobj->tx.tx_mpdus_success += _srcobj->tx.tx_mpdus_success; \ 1978 _tgtobj->tx.tx_mpdus_tried += _srcobj->tx.tx_mpdus_tried; \ 1979 _tgtobj->tx.tx_rate = _srcobj->tx.tx_rate; \ 1980 _tgtobj->tx.last_tx_rate = _srcobj->tx.last_tx_rate; \ 1981 _tgtobj->tx.last_tx_rate_mcs = _srcobj->tx.last_tx_rate_mcs; \ 1982 _tgtobj->tx.mcast_last_tx_rate = \ 1983 _srcobj->tx.mcast_last_tx_rate; \ 1984 _tgtobj->tx.mcast_last_tx_rate_mcs = \ 1985 _srcobj->tx.mcast_last_tx_rate_mcs; \ 1986 _tgtobj->tx.rnd_avg_tx_rate = _srcobj->tx.rnd_avg_tx_rate; \ 1987 _tgtobj->tx.avg_tx_rate = _srcobj->tx.avg_tx_rate; \ 1988 _tgtobj->tx.tx_ratecode = _srcobj->tx.tx_ratecode; \ 1989 _tgtobj->tx.pream_punct_cnt += _srcobj->tx.pream_punct_cnt; \ 1990 _tgtobj->tx.ru_start = _srcobj->tx.ru_start; \ 1991 _tgtobj->tx.ru_tones = _srcobj->tx.ru_tones; \ 1992 _tgtobj->tx.last_ack_rssi = _srcobj->tx.last_ack_rssi; \ 1993 _tgtobj->tx.nss_info = _srcobj->tx.nss_info; \ 1994 _tgtobj->tx.mcs_info = _srcobj->tx.mcs_info; \ 1995 _tgtobj->tx.bw_info = _srcobj->tx.bw_info; \ 1996 _tgtobj->tx.gi_info = _srcobj->tx.gi_info; \ 1997 _tgtobj->tx.preamble_info = _srcobj->tx.preamble_info; \ 1998 _tgtobj->tx.retries_mpdu += _srcobj->tx.retries_mpdu; \ 1999 _tgtobj->tx.mpdu_success_with_retries += \ 2000 _srcobj->tx.mpdu_success_with_retries; \ 2001 _tgtobj->tx.rts_success = _srcobj->tx.rts_success; \ 2002 _tgtobj->tx.rts_failure = _srcobj->tx.rts_failure; \ 2003 _tgtobj->tx.bar_cnt = _srcobj->tx.bar_cnt; \ 2004 _tgtobj->tx.ndpa_cnt = _srcobj->tx.ndpa_cnt; \ 2005 for (pream_type = 0; pream_type < DOT11_MAX; pream_type++) { \ 2006 for (i = 0; i < MAX_MCS; i++) \ 2007 _tgtobj->tx.pkt_type[pream_type].mcs_count[i] += \ 2008 _srcobj->tx.pkt_type[pream_type].mcs_count[i]; \ 2009 } \ 2010 for (i = 0; i < WME_AC_MAX; i++) { \ 2011 _tgtobj->tx.wme_ac_type[i] += _srcobj->tx.wme_ac_type[i]; \ 2012 _tgtobj->tx.wme_ac_type_bytes[i] += \ 2013 _srcobj->tx.wme_ac_type_bytes[i]; \ 2014 _tgtobj->tx.excess_retries_per_ac[i] += \ 2015 _srcobj->tx.excess_retries_per_ac[i]; \ 2016 } \ 2017 for (i = 0; i < MAX_GI; i++) { \ 2018 _tgtobj->tx.sgi_count[i] += _srcobj->tx.sgi_count[i]; \ 2019 } \ 2020 for (i = 0; i < SS_COUNT; i++) { \ 2021 _tgtobj->tx.nss[i] += _srcobj->tx.nss[i]; \ 2022 } \ 2023 for (i = 0; i < MAX_BW; i++) { \ 2024 _tgtobj->tx.bw[i] += _srcobj->tx.bw[i]; \ 2025 } \ 2026 for (i = 0; i < MAX_RU_LOCATIONS; i++) { \ 2027 _tgtobj->tx.ru_loc[i].num_msdu += \ 2028 _srcobj->tx.ru_loc[i].num_msdu; \ 2029 _tgtobj->tx.ru_loc[i].num_mpdu += \ 2030 _srcobj->tx.ru_loc[i].num_mpdu; \ 2031 _tgtobj->tx.ru_loc[i].mpdu_tried += \ 2032 _srcobj->tx.ru_loc[i].mpdu_tried; \ 2033 } \ 2034 for (i = 0; i < MAX_TRANSMIT_TYPES; i++) { \ 2035 _tgtobj->tx.transmit_type[i].num_msdu += \ 2036 _srcobj->tx.transmit_type[i].num_msdu; \ 2037 _tgtobj->tx.transmit_type[i].num_mpdu += \ 2038 _srcobj->tx.transmit_type[i].num_mpdu; \ 2039 _tgtobj->tx.transmit_type[i].mpdu_tried += \ 2040 _srcobj->tx.transmit_type[i].mpdu_tried; \ 2041 } \ 2042 for (i = 0; i < MAX_MU_GROUP_ID; i++) { \ 2043 _tgtobj->tx.mu_group_id[i] = _srcobj->tx.mu_group_id[i]; \ 2044 } \ 2045 _tgtobj->tx.tx_ucast_total.num += \ 2046 _srcobj->tx.tx_ucast_total.num;\ 2047 _tgtobj->tx.tx_ucast_total.bytes += \ 2048 _srcobj->tx.tx_ucast_total.bytes;\ 2049 _tgtobj->tx.tx_ucast_success.num += \ 2050 _srcobj->tx.tx_ucast_success.num; \ 2051 _tgtobj->tx.tx_ucast_success.bytes += \ 2052 _srcobj->tx.tx_ucast_success.bytes; \ 2053 \ 2054 _tgtobj->rx.mpdu_cnt_fcs_ok += _srcobj->rx.mpdu_cnt_fcs_ok; \ 2055 _tgtobj->rx.mpdu_cnt_fcs_err += _srcobj->rx.mpdu_cnt_fcs_err; \ 2056 _tgtobj->rx.non_ampdu_cnt += _srcobj->rx.non_ampdu_cnt; \ 2057 _tgtobj->rx.ampdu_cnt += _srcobj->rx.ampdu_cnt; \ 2058 _tgtobj->rx.rx_mpdus += _srcobj->rx.rx_mpdus; \ 2059 _tgtobj->rx.rx_ppdus += _srcobj->rx.rx_ppdus; \ 2060 _tgtobj->rx.rx_rate = _srcobj->rx.rx_rate; \ 2061 _tgtobj->rx.last_rx_rate = _srcobj->rx.last_rx_rate; \ 2062 _tgtobj->rx.rnd_avg_rx_rate = _srcobj->rx.rnd_avg_rx_rate; \ 2063 _tgtobj->rx.avg_rx_rate = _srcobj->rx.avg_rx_rate; \ 2064 _tgtobj->rx.rx_ratecode = _srcobj->rx.rx_ratecode; \ 2065 _tgtobj->rx.avg_snr = _srcobj->rx.avg_snr; \ 2066 _tgtobj->rx.rx_snr_measured_time = \ 2067 _srcobj->rx.rx_snr_measured_time; \ 2068 _tgtobj->rx.snr = _srcobj->rx.snr; \ 2069 _tgtobj->rx.last_snr = _srcobj->rx.last_snr; \ 2070 _tgtobj->rx.nss_info = _srcobj->rx.nss_info; \ 2071 _tgtobj->rx.mcs_info = _srcobj->rx.mcs_info; \ 2072 _tgtobj->rx.bw_info = _srcobj->rx.bw_info; \ 2073 _tgtobj->rx.gi_info = _srcobj->rx.gi_info; \ 2074 _tgtobj->rx.preamble_info = _srcobj->rx.preamble_info; \ 2075 _tgtobj->rx.mpdu_retry_cnt += _srcobj->rx.mpdu_retry_cnt; \ 2076 _tgtobj->rx.bar_cnt = _srcobj->rx.bar_cnt; \ 2077 _tgtobj->rx.ndpa_cnt = _srcobj->rx.ndpa_cnt; \ 2078 for (pream_type = 0; pream_type < DOT11_MAX; pream_type++) { \ 2079 for (i = 0; i < MAX_MCS; i++) { \ 2080 _tgtobj->rx.pkt_type[pream_type].mcs_count[i] += \ 2081 _srcobj->rx.pkt_type[pream_type].mcs_count[i]; \ 2082 } \ 2083 } \ 2084 for (i = 0; i < WME_AC_MAX; i++) { \ 2085 _tgtobj->rx.wme_ac_type[i] += _srcobj->rx.wme_ac_type[i]; \ 2086 _tgtobj->rx.wme_ac_type_bytes[i] += \ 2087 _srcobj->rx.wme_ac_type_bytes[i]; \ 2088 } \ 2089 for (i = 0; i < MAX_MCS; i++) { \ 2090 _tgtobj->rx.su_ax_ppdu_cnt.mcs_count[i] += \ 2091 _srcobj->rx.su_ax_ppdu_cnt.mcs_count[i]; \ 2092 _tgtobj->rx.rx_mpdu_cnt[i] += _srcobj->rx.rx_mpdu_cnt[i]; \ 2093 } \ 2094 for (mu_type = 0 ; mu_type < TXRX_TYPE_MU_MAX; mu_type++) { \ 2095 _tgtobj->rx.rx_mu[mu_type].mpdu_cnt_fcs_ok += \ 2096 _srcobj->rx.rx_mu[mu_type].mpdu_cnt_fcs_ok; \ 2097 _tgtobj->rx.rx_mu[mu_type].mpdu_cnt_fcs_err += \ 2098 _srcobj->rx.rx_mu[mu_type].mpdu_cnt_fcs_err; \ 2099 for (i = 0; i < SS_COUNT; i++) \ 2100 _tgtobj->rx.rx_mu[mu_type].ppdu_nss[i] += \ 2101 _srcobj->rx.rx_mu[mu_type].ppdu_nss[i]; \ 2102 for (i = 0; i < MAX_MCS; i++) \ 2103 _tgtobj->rx.rx_mu[mu_type].ppdu.mcs_count[i] += \ 2104 _srcobj->rx.rx_mu[mu_type].ppdu.mcs_count[i]; \ 2105 } \ 2106 for (i = 0; i < MAX_RECEPTION_TYPES; i++) { \ 2107 _tgtobj->rx.reception_type[i] += \ 2108 _srcobj->rx.reception_type[i]; \ 2109 _tgtobj->rx.ppdu_cnt[i] += _srcobj->rx.ppdu_cnt[i]; \ 2110 } \ 2111 for (i = 0; i < MAX_GI; i++) { \ 2112 _tgtobj->rx.sgi_count[i] += _srcobj->rx.sgi_count[i]; \ 2113 } \ 2114 for (i = 0; i < SS_COUNT; i++) { \ 2115 _tgtobj->rx.nss[i] += _srcobj->rx.nss[i]; \ 2116 _tgtobj->rx.ppdu_nss[i] += _srcobj->rx.ppdu_nss[i]; \ 2117 } \ 2118 for (i = 0; i < MAX_BW; i++) { \ 2119 _tgtobj->rx.bw[i] += _srcobj->rx.bw[i]; \ 2120 } \ 2121 DP_UPDATE_11BE_STATS(_tgtobj, _srcobj); \ 2122 } while (0) 2123 2124 /** 2125 * dp_peer_find_attach() - Allocates memory for peer objects 2126 * @soc: SoC handle 2127 * 2128 * Return: QDF_STATUS 2129 */ 2130 QDF_STATUS dp_peer_find_attach(struct dp_soc *soc); 2131 extern void dp_peer_find_detach(struct dp_soc *soc); 2132 extern void dp_peer_find_hash_add(struct dp_soc *soc, struct dp_peer *peer); 2133 extern void dp_peer_find_hash_remove(struct dp_soc *soc, struct dp_peer *peer); 2134 extern void dp_peer_find_hash_erase(struct dp_soc *soc); 2135 void dp_peer_vdev_list_add(struct dp_soc *soc, struct dp_vdev *vdev, 2136 struct dp_peer *peer); 2137 void dp_peer_vdev_list_remove(struct dp_soc *soc, struct dp_vdev *vdev, 2138 struct dp_peer *peer); 2139 void dp_peer_find_id_to_obj_add(struct dp_soc *soc, 2140 struct dp_peer *peer, 2141 uint16_t peer_id); 2142 void dp_txrx_peer_attach_add(struct dp_soc *soc, 2143 struct dp_peer *peer, 2144 struct dp_txrx_peer *txrx_peer); 2145 void dp_peer_find_id_to_obj_remove(struct dp_soc *soc, 2146 uint16_t peer_id); 2147 void dp_vdev_unref_delete(struct dp_soc *soc, struct dp_vdev *vdev, 2148 enum dp_mod_id mod_id); 2149 2150 /* 2151 * dp_peer_ppdu_delayed_ba_cleanup() free ppdu allocated in peer 2152 * @peer: Datapath peer 2153 * 2154 * return: void 2155 */ 2156 void dp_peer_ppdu_delayed_ba_cleanup(struct dp_peer *peer); 2157 2158 extern void dp_peer_rx_init(struct dp_pdev *pdev, struct dp_peer *peer); 2159 void dp_peer_cleanup(struct dp_vdev *vdev, struct dp_peer *peer); 2160 void dp_peer_rx_cleanup(struct dp_vdev *vdev, struct dp_peer *peer); 2161 2162 #ifdef DP_PEER_EXTENDED_API 2163 /** 2164 * dp_register_peer() - Register peer into physical device 2165 * @soc_hdl - data path soc handle 2166 * @pdev_id - device instance id 2167 * @sta_desc - peer description 2168 * 2169 * Register peer into physical device 2170 * 2171 * Return: QDF_STATUS_SUCCESS registration success 2172 * QDF_STATUS_E_FAULT peer not found 2173 */ 2174 QDF_STATUS dp_register_peer(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, 2175 struct ol_txrx_desc_type *sta_desc); 2176 2177 /** 2178 * dp_clear_peer() - remove peer from physical device 2179 * @soc_hdl - data path soc handle 2180 * @pdev_id - device instance id 2181 * @peer_addr - peer mac address 2182 * 2183 * remove peer from physical device 2184 * 2185 * Return: QDF_STATUS_SUCCESS registration success 2186 * QDF_STATUS_E_FAULT peer not found 2187 */ 2188 QDF_STATUS dp_clear_peer(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, 2189 struct qdf_mac_addr peer_addr); 2190 2191 /* 2192 * dp_find_peer_exist_on_vdev - find if peer exists on the given vdev 2193 * @soc: datapath soc handle 2194 * @vdev_id: vdev instance id 2195 * @peer_mac_addr: peer mac address 2196 * 2197 * Return: true or false 2198 */ 2199 bool dp_find_peer_exist_on_vdev(struct cdp_soc_t *soc_hdl, uint8_t vdev_id, 2200 uint8_t *peer_addr); 2201 2202 /* 2203 * dp_find_peer_exist_on_other_vdev - find if peer exists 2204 * on other than the given vdev 2205 * @soc: datapath soc handle 2206 * @vdev_id: vdev instance id 2207 * @peer_mac_addr: peer mac address 2208 * @max_bssid: max number of bssids 2209 * 2210 * Return: true or false 2211 */ 2212 bool dp_find_peer_exist_on_other_vdev(struct cdp_soc_t *soc_hdl, 2213 uint8_t vdev_id, uint8_t *peer_addr, 2214 uint16_t max_bssid); 2215 2216 /** 2217 * dp_peer_state_update() - update peer local state 2218 * @pdev - data path device instance 2219 * @peer_addr - peer mac address 2220 * @state - new peer local state 2221 * 2222 * update peer local state 2223 * 2224 * Return: QDF_STATUS_SUCCESS registration success 2225 */ 2226 QDF_STATUS dp_peer_state_update(struct cdp_soc_t *soc, uint8_t *peer_mac, 2227 enum ol_txrx_peer_state state); 2228 2229 /** 2230 * dp_get_vdevid() - Get virtual interface id which peer registered 2231 * @soc - datapath soc handle 2232 * @peer_mac - peer mac address 2233 * @vdev_id - virtual interface id which peer registered 2234 * 2235 * Get virtual interface id which peer registered 2236 * 2237 * Return: QDF_STATUS_SUCCESS registration success 2238 */ 2239 QDF_STATUS dp_get_vdevid(struct cdp_soc_t *soc_hdl, uint8_t *peer_mac, 2240 uint8_t *vdev_id); 2241 struct cdp_vdev *dp_get_vdev_by_peer_addr(struct cdp_pdev *pdev_handle, 2242 struct qdf_mac_addr peer_addr); 2243 struct cdp_vdev *dp_get_vdev_for_peer(void *peer); 2244 uint8_t *dp_peer_get_peer_mac_addr(void *peer); 2245 2246 /** 2247 * dp_get_peer_state() - Get local peer state 2248 * @soc - datapath soc handle 2249 * @vdev_id - vdev id 2250 * @peer_mac - peer mac addr 2251 * 2252 * Get local peer state 2253 * 2254 * Return: peer status 2255 */ 2256 int dp_get_peer_state(struct cdp_soc_t *soc, uint8_t vdev_id, 2257 uint8_t *peer_mac); 2258 void dp_local_peer_id_pool_init(struct dp_pdev *pdev); 2259 void dp_local_peer_id_alloc(struct dp_pdev *pdev, struct dp_peer *peer); 2260 void dp_local_peer_id_free(struct dp_pdev *pdev, struct dp_peer *peer); 2261 /** 2262 * dp_set_peer_as_tdls_peer() - set tdls peer flag to peer 2263 * @soc_hdl: datapath soc handle 2264 * @vdev_id: vdev_id 2265 * @peer_mac: peer mac addr 2266 * @val: tdls peer flag 2267 * 2268 * Return: none 2269 */ 2270 void dp_set_peer_as_tdls_peer(struct cdp_soc_t *soc_hdl, uint8_t vdev_id, 2271 uint8_t *peer_mac, bool val); 2272 #else 2273 /** 2274 * dp_get_vdevid() - Get virtual interface id which peer registered 2275 * @soc - datapath soc handle 2276 * @peer_mac - peer mac address 2277 * @vdev_id - virtual interface id which peer registered 2278 * 2279 * Get virtual interface id which peer registered 2280 * 2281 * Return: QDF_STATUS_SUCCESS registration success 2282 */ 2283 static inline 2284 QDF_STATUS dp_get_vdevid(struct cdp_soc_t *soc_hdl, uint8_t *peer_mac, 2285 uint8_t *vdev_id) 2286 { 2287 return QDF_STATUS_E_NOSUPPORT; 2288 } 2289 2290 static inline void dp_local_peer_id_pool_init(struct dp_pdev *pdev) 2291 { 2292 } 2293 2294 static inline 2295 void dp_local_peer_id_alloc(struct dp_pdev *pdev, struct dp_peer *peer) 2296 { 2297 } 2298 2299 static inline 2300 void dp_local_peer_id_free(struct dp_pdev *pdev, struct dp_peer *peer) 2301 { 2302 } 2303 2304 static inline 2305 void dp_set_peer_as_tdls_peer(struct cdp_soc_t *soc_hdl, uint8_t vdev_id, 2306 uint8_t *peer_mac, bool val) 2307 { 2308 } 2309 #endif 2310 2311 /* 2312 * dp_find_peer_exist - find peer if already exists 2313 * @soc: datapath soc handle 2314 * @pdev_id: physical device instance id 2315 * @peer_mac_addr: peer mac address 2316 * 2317 * Return: true or false 2318 */ 2319 bool dp_find_peer_exist(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, 2320 uint8_t *peer_addr); 2321 2322 int dp_addba_resp_tx_completion_wifi3(struct cdp_soc_t *cdp_soc, 2323 uint8_t *peer_mac, uint16_t vdev_id, 2324 uint8_t tid, 2325 int status); 2326 int dp_addba_requestprocess_wifi3(struct cdp_soc_t *cdp_soc, 2327 uint8_t *peer_mac, uint16_t vdev_id, 2328 uint8_t dialogtoken, uint16_t tid, 2329 uint16_t batimeout, 2330 uint16_t buffersize, 2331 uint16_t startseqnum); 2332 QDF_STATUS dp_addba_responsesetup_wifi3(struct cdp_soc_t *cdp_soc, 2333 uint8_t *peer_mac, uint16_t vdev_id, 2334 uint8_t tid, uint8_t *dialogtoken, 2335 uint16_t *statuscode, 2336 uint16_t *buffersize, 2337 uint16_t *batimeout); 2338 QDF_STATUS dp_set_addba_response(struct cdp_soc_t *cdp_soc, 2339 uint8_t *peer_mac, 2340 uint16_t vdev_id, uint8_t tid, 2341 uint16_t statuscode); 2342 int dp_delba_process_wifi3(struct cdp_soc_t *cdp_soc, uint8_t *peer_mac, 2343 uint16_t vdev_id, int tid, 2344 uint16_t reasoncode); 2345 2346 /** 2347 * dp_rx_tid_update_ba_win_size() - Update the DP tid BA window size 2348 * @soc: soc handle 2349 * @peer_mac: mac address of peer handle 2350 * @vdev_id: id of vdev handle 2351 * @tid: tid 2352 * @buffersize: BA window size 2353 * 2354 * Return: success/failure of tid update 2355 */ 2356 QDF_STATUS dp_rx_tid_update_ba_win_size(struct cdp_soc_t *cdp_soc, 2357 uint8_t *peer_mac, uint16_t vdev_id, 2358 uint8_t tid, uint16_t buffersize); 2359 2360 /* 2361 * dp_delba_tx_completion_wifi3() - Handle delba tx completion 2362 * 2363 * @cdp_soc: soc handle 2364 * @vdev_id: id of the vdev handle 2365 * @peer_mac: peer mac address 2366 * @tid: Tid number 2367 * @status: Tx completion status 2368 * Indicate status of delba Tx to DP for stats update and retry 2369 * delba if tx failed. 2370 * 2371 */ 2372 int dp_delba_tx_completion_wifi3(struct cdp_soc_t *cdp_soc, uint8_t *peer_mac, 2373 uint16_t vdev_id, uint8_t tid, 2374 int status); 2375 extern QDF_STATUS dp_rx_tid_setup_wifi3(struct dp_peer *peer, int tid, 2376 uint32_t ba_window_size, 2377 uint32_t start_seq); 2378 2379 #ifdef DP_UMAC_HW_RESET_SUPPORT 2380 void dp_pause_reo_send_cmd(struct dp_soc *soc); 2381 2382 void dp_resume_reo_send_cmd(struct dp_soc *soc); 2383 void dp_cleanup_reo_cmd_module(struct dp_soc *soc); 2384 void dp_reo_desc_freelist_destroy(struct dp_soc *soc); 2385 void dp_reset_rx_reo_tid_queue(struct dp_soc *soc, void *hw_qdesc_vaddr, 2386 uint32_t size); 2387 #endif 2388 2389 extern QDF_STATUS dp_reo_send_cmd(struct dp_soc *soc, 2390 enum hal_reo_cmd_type type, struct hal_reo_cmd_params *params, 2391 void (*callback_fn), void *data); 2392 2393 extern void dp_reo_cmdlist_destroy(struct dp_soc *soc); 2394 2395 /** 2396 * dp_reo_status_ring_handler - Handler for REO Status ring 2397 * @int_ctx: pointer to DP interrupt context 2398 * @soc: DP Soc handle 2399 * 2400 * Returns: Number of descriptors reaped 2401 */ 2402 uint32_t dp_reo_status_ring_handler(struct dp_intr *int_ctx, 2403 struct dp_soc *soc); 2404 void dp_aggregate_vdev_stats(struct dp_vdev *vdev, 2405 struct cdp_vdev_stats *vdev_stats); 2406 void dp_rx_tid_stats_cb(struct dp_soc *soc, void *cb_ctxt, 2407 union hal_reo_status *reo_status); 2408 void dp_rx_bar_stats_cb(struct dp_soc *soc, void *cb_ctxt, 2409 union hal_reo_status *reo_status); 2410 uint16_t dp_tx_me_send_convert_ucast(struct cdp_soc_t *soc, uint8_t vdev_id, 2411 qdf_nbuf_t nbuf, 2412 uint8_t newmac[][QDF_MAC_ADDR_SIZE], 2413 uint8_t new_mac_cnt, uint8_t tid, 2414 bool is_igmp, bool is_dms_pkt); 2415 void dp_tx_me_alloc_descriptor(struct cdp_soc_t *soc, uint8_t pdev_id); 2416 2417 void dp_tx_me_free_descriptor(struct cdp_soc_t *soc, uint8_t pdev_id); 2418 QDF_STATUS dp_h2t_ext_stats_msg_send(struct dp_pdev *pdev, 2419 uint32_t stats_type_upload_mask, uint32_t config_param_0, 2420 uint32_t config_param_1, uint32_t config_param_2, 2421 uint32_t config_param_3, int cookie, int cookie_msb, 2422 uint8_t mac_id); 2423 void dp_htt_stats_print_tag(struct dp_pdev *pdev, 2424 uint8_t tag_type, uint32_t *tag_buf); 2425 void dp_htt_stats_copy_tag(struct dp_pdev *pdev, uint8_t tag_type, uint32_t *tag_buf); 2426 QDF_STATUS dp_h2t_3tuple_config_send(struct dp_pdev *pdev, uint32_t tuple_mask, 2427 uint8_t mac_id); 2428 /** 2429 * dp_rxtid_stats_cmd_cb - function pointer for peer 2430 * rx tid stats cmd call_back 2431 */ 2432 typedef void (*dp_rxtid_stats_cmd_cb)(struct dp_soc *soc, void *cb_ctxt, 2433 union hal_reo_status *reo_status); 2434 int dp_peer_rxtid_stats(struct dp_peer *peer, 2435 dp_rxtid_stats_cmd_cb dp_stats_cmd_cb, 2436 void *cb_ctxt); 2437 #ifdef IPA_OFFLOAD 2438 void dp_peer_update_tid_stats_from_reo(struct dp_soc *soc, void *cb_ctxt, 2439 union hal_reo_status *reo_status); 2440 int dp_peer_get_rxtid_stats_ipa(struct dp_peer *peer, 2441 dp_rxtid_stats_cmd_cb dp_stats_cmd_cb); 2442 #ifdef QCA_ENHANCED_STATS_SUPPORT 2443 void dp_peer_aggregate_tid_stats(struct dp_peer *peer); 2444 #endif 2445 #else 2446 static inline void dp_peer_aggregate_tid_stats(struct dp_peer *peer) 2447 { 2448 } 2449 #endif 2450 QDF_STATUS 2451 dp_set_pn_check_wifi3(struct cdp_soc_t *soc, uint8_t vdev_id, 2452 uint8_t *peer_mac, enum cdp_sec_type sec_type, 2453 uint32_t *rx_pn); 2454 2455 QDF_STATUS 2456 dp_set_key_sec_type_wifi3(struct cdp_soc_t *soc, uint8_t vdev_id, 2457 uint8_t *peer_mac, enum cdp_sec_type sec_type, 2458 bool is_unicast); 2459 2460 void *dp_get_pdev_for_mac_id(struct dp_soc *soc, uint32_t mac_id); 2461 2462 QDF_STATUS 2463 dp_set_michael_key(struct cdp_soc_t *soc, uint8_t vdev_id, 2464 uint8_t *peer_mac, 2465 bool is_unicast, uint32_t *key); 2466 2467 /** 2468 * dp_check_pdev_exists() - Validate pdev before use 2469 * @soc - dp soc handle 2470 * @data - pdev handle 2471 * 2472 * Return: 0 - success/invalid - failure 2473 */ 2474 bool dp_check_pdev_exists(struct dp_soc *soc, struct dp_pdev *data); 2475 2476 /** 2477 * dp_update_delay_stats() - Update delay statistics in structure 2478 * and fill min, max and avg delay 2479 * @tstats: tid tx stats 2480 * @rstats: tid rx stats 2481 * @delay: delay in ms 2482 * @tid: tid value 2483 * @mode: type of tx delay mode 2484 * @ring id: ring number 2485 * @delay_in_us: flag to indicate whether the delay is in ms or us 2486 * 2487 * Return: none 2488 */ 2489 void dp_update_delay_stats(struct cdp_tid_tx_stats *tstats, 2490 struct cdp_tid_rx_stats *rstats, uint32_t delay, 2491 uint8_t tid, uint8_t mode, uint8_t ring_id, 2492 bool delay_in_us); 2493 2494 /** 2495 * dp_print_ring_stats(): Print tail and head pointer 2496 * @pdev: DP_PDEV handle 2497 * 2498 * Return:void 2499 */ 2500 void dp_print_ring_stats(struct dp_pdev *pdev); 2501 2502 /** 2503 * dp_print_ring_stat_from_hal(): Print tail and head pointer through hal 2504 * @soc: soc handle 2505 * @srng: srng handle 2506 * @ring_type: ring type 2507 * 2508 * Return:void 2509 */ 2510 void 2511 dp_print_ring_stat_from_hal(struct dp_soc *soc, struct dp_srng *srng, 2512 enum hal_ring_type ring_type); 2513 /** 2514 * dp_print_pdev_cfg_params() - Print the pdev cfg parameters 2515 * @pdev_handle: DP pdev handle 2516 * 2517 * Return - void 2518 */ 2519 void dp_print_pdev_cfg_params(struct dp_pdev *pdev); 2520 2521 /** 2522 * dp_print_soc_cfg_params()- Dump soc wlan config parameters 2523 * @soc_handle: Soc handle 2524 * 2525 * Return: void 2526 */ 2527 void dp_print_soc_cfg_params(struct dp_soc *soc); 2528 2529 /** 2530 * dp_srng_get_str_from_ring_type() - Return string name for a ring 2531 * @ring_type: Ring 2532 * 2533 * Return: char const pointer 2534 */ 2535 const 2536 char *dp_srng_get_str_from_hal_ring_type(enum hal_ring_type ring_type); 2537 2538 /* 2539 * dp_txrx_path_stats() - Function to display dump stats 2540 * @soc - soc handle 2541 * 2542 * return: none 2543 */ 2544 void dp_txrx_path_stats(struct dp_soc *soc); 2545 2546 /* 2547 * dp_print_per_ring_stats(): Packet count per ring 2548 * @soc - soc handle 2549 * 2550 * Return - None 2551 */ 2552 void dp_print_per_ring_stats(struct dp_soc *soc); 2553 2554 /** 2555 * dp_aggregate_pdev_stats(): Consolidate stats at PDEV level 2556 * @pdev: DP PDEV handle 2557 * 2558 * return: void 2559 */ 2560 void dp_aggregate_pdev_stats(struct dp_pdev *pdev); 2561 2562 /** 2563 * dp_print_rx_rates(): Print Rx rate stats 2564 * @vdev: DP_VDEV handle 2565 * 2566 * Return:void 2567 */ 2568 void dp_print_rx_rates(struct dp_vdev *vdev); 2569 2570 /** 2571 * dp_print_tx_rates(): Print tx rates 2572 * @vdev: DP_VDEV handle 2573 * 2574 * Return:void 2575 */ 2576 void dp_print_tx_rates(struct dp_vdev *vdev); 2577 2578 /** 2579 * dp_print_peer_stats():print peer stats 2580 * @peer: DP_PEER handle 2581 * @peer_stats: buffer holding peer stats 2582 * 2583 * return void 2584 */ 2585 void dp_print_peer_stats(struct dp_peer *peer, 2586 struct cdp_peer_stats *peer_stats); 2587 2588 /** 2589 * dp_print_pdev_tx_stats(): Print Pdev level TX stats 2590 * @pdev: DP_PDEV Handle 2591 * 2592 * Return:void 2593 */ 2594 void 2595 dp_print_pdev_tx_stats(struct dp_pdev *pdev); 2596 2597 /** 2598 * dp_print_pdev_rx_stats(): Print Pdev level RX stats 2599 * @pdev: DP_PDEV Handle 2600 * 2601 * Return: void 2602 */ 2603 void 2604 dp_print_pdev_rx_stats(struct dp_pdev *pdev); 2605 2606 /** 2607 * dp_print_soc_tx_stats(): Print SOC level stats 2608 * @soc DP_SOC Handle 2609 * 2610 * Return: void 2611 */ 2612 void dp_print_soc_tx_stats(struct dp_soc *soc); 2613 2614 #ifdef QCA_SUPPORT_GLOBAL_DESC 2615 /** 2616 * dp_print_global_desc_count(): Print global desc in use 2617 * 2618 * Return: void 2619 */ 2620 void dp_print_global_desc_count(void); 2621 #else 2622 /** 2623 * dp_print_global_desc_count(): Print global desc in use 2624 * 2625 * Return: void 2626 */ 2627 static inline 2628 void dp_print_global_desc_count(void) 2629 { 2630 } 2631 #endif 2632 2633 /** 2634 * dp_print_soc_interrupt_stats() - Print interrupt stats for the soc 2635 * @soc: dp_soc handle 2636 * 2637 * Return: None 2638 */ 2639 void dp_print_soc_interrupt_stats(struct dp_soc *soc); 2640 2641 /** 2642 * dp_print_tx_ppeds_stats() - Print Tx in use stats for the soc in DS 2643 * @soc: dp_soc handle 2644 * 2645 * Return: None 2646 */ 2647 2648 void dp_print_tx_ppeds_stats(struct dp_soc *soc); 2649 #ifdef WLAN_DP_SRNG_USAGE_WM_TRACKING 2650 /** 2651 * dp_dump_srng_high_wm_stats() - Print the ring usage high watermark stats 2652 * for all SRNGs 2653 * @soc: DP soc handle 2654 * @srng_mask: SRNGs mask for dumping usage watermark stats 2655 * 2656 * Return: None 2657 */ 2658 void dp_dump_srng_high_wm_stats(struct dp_soc *soc, uint64_t srng_mask); 2659 #else 2660 /** 2661 * dp_dump_srng_high_wm_stats() - Print the ring usage high watermark stats 2662 * for all SRNGs 2663 * @soc: DP soc handle 2664 * @srng_mask: SRNGs mask for dumping usage watermark stats 2665 * 2666 * Return: None 2667 */ 2668 static inline 2669 void dp_dump_srng_high_wm_stats(struct dp_soc *soc, uint64_t srng_mask) 2670 { 2671 } 2672 #endif 2673 2674 /** 2675 * dp_print_soc_rx_stats: Print SOC level Rx stats 2676 * @soc: DP_SOC Handle 2677 * 2678 * Return:void 2679 */ 2680 void dp_print_soc_rx_stats(struct dp_soc *soc); 2681 2682 /** 2683 * dp_get_mac_id_for_pdev() - Return mac corresponding to pdev for mac 2684 * 2685 * @mac_id: MAC id 2686 * @pdev_id: pdev_id corresponding to pdev, 0 for MCL 2687 * 2688 * Single pdev using both MACs will operate on both MAC rings, 2689 * which is the case for MCL. 2690 * For WIN each PDEV will operate one ring, so index is zero. 2691 * 2692 */ 2693 static inline int dp_get_mac_id_for_pdev(uint32_t mac_id, uint32_t pdev_id) 2694 { 2695 if (mac_id && pdev_id) { 2696 qdf_print("Both mac_id and pdev_id cannot be non zero"); 2697 QDF_BUG(0); 2698 return 0; 2699 } 2700 return (mac_id + pdev_id); 2701 } 2702 2703 /** 2704 * dp_get_lmac_id_for_pdev_id() - Return lmac id corresponding to host pdev id 2705 * @soc: soc pointer 2706 * @mac_id: MAC id 2707 * @pdev_id: pdev_id corresponding to pdev, 0 for MCL 2708 * 2709 * For MCL, Single pdev using both MACs will operate on both MAC rings. 2710 * 2711 * For WIN, each PDEV will operate one ring. 2712 * 2713 */ 2714 static inline int 2715 dp_get_lmac_id_for_pdev_id 2716 (struct dp_soc *soc, uint32_t mac_id, uint32_t pdev_id) 2717 { 2718 if (!wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx)) { 2719 if (mac_id && pdev_id) { 2720 qdf_print("Both mac_id and pdev_id cannot be non zero"); 2721 QDF_BUG(0); 2722 return 0; 2723 } 2724 return (mac_id + pdev_id); 2725 } 2726 2727 return soc->pdev_list[pdev_id]->lmac_id; 2728 } 2729 2730 /** 2731 * dp_get_pdev_for_lmac_id() - Return pdev pointer corresponding to lmac id 2732 * @soc: soc pointer 2733 * @lmac_id: LMAC id 2734 * 2735 * For MCL, Single pdev exists 2736 * 2737 * For WIN, each PDEV will operate one ring. 2738 * 2739 */ 2740 static inline struct dp_pdev * 2741 dp_get_pdev_for_lmac_id(struct dp_soc *soc, uint32_t lmac_id) 2742 { 2743 uint8_t i = 0; 2744 2745 if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx)) { 2746 i = wlan_cfg_get_pdev_idx(soc->wlan_cfg_ctx, lmac_id); 2747 return ((i < MAX_PDEV_CNT) ? soc->pdev_list[i] : NULL); 2748 } 2749 2750 /* Typically for MCL as there only 1 PDEV*/ 2751 return soc->pdev_list[0]; 2752 } 2753 2754 /** 2755 * dp_calculate_target_pdev_id_from_host_pdev_id() - Return target pdev 2756 * corresponding to host pdev id 2757 * @soc: soc pointer 2758 * @mac_for_pdev: pdev_id corresponding to host pdev for WIN, mac id for MCL 2759 * 2760 * returns target pdev_id for host pdev id. For WIN, this is derived through 2761 * a two step process: 2762 * 1. Get lmac_id corresponding to host pdev_id (lmac_id can change 2763 * during mode switch) 2764 * 2. Get target pdev_id (set up during WMI ready) from lmac_id 2765 * 2766 * For MCL, return the offset-1 translated mac_id 2767 */ 2768 static inline int 2769 dp_calculate_target_pdev_id_from_host_pdev_id 2770 (struct dp_soc *soc, uint32_t mac_for_pdev) 2771 { 2772 struct dp_pdev *pdev; 2773 2774 if (!wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx)) 2775 return DP_SW2HW_MACID(mac_for_pdev); 2776 2777 pdev = soc->pdev_list[mac_for_pdev]; 2778 2779 /*non-MCL case, get original target_pdev mapping*/ 2780 return wlan_cfg_get_target_pdev_id(soc->wlan_cfg_ctx, pdev->lmac_id); 2781 } 2782 2783 /** 2784 * dp_get_target_pdev_id_for_host_pdev_id() - Return target pdev corresponding 2785 * to host pdev id 2786 * @soc: soc pointer 2787 * @mac_for_pdev: pdev_id corresponding to host pdev for WIN, mac id for MCL 2788 * 2789 * returns target pdev_id for host pdev id. 2790 * For WIN, return the value stored in pdev object. 2791 * For MCL, return the offset-1 translated mac_id. 2792 */ 2793 static inline int 2794 dp_get_target_pdev_id_for_host_pdev_id 2795 (struct dp_soc *soc, uint32_t mac_for_pdev) 2796 { 2797 struct dp_pdev *pdev; 2798 2799 if (!wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx)) 2800 return DP_SW2HW_MACID(mac_for_pdev); 2801 2802 pdev = soc->pdev_list[mac_for_pdev]; 2803 2804 return pdev->target_pdev_id; 2805 } 2806 2807 /** 2808 * dp_get_host_pdev_id_for_target_pdev_id() - Return host pdev corresponding 2809 * to target pdev id 2810 * @soc: soc pointer 2811 * @pdev_id: pdev_id corresponding to target pdev 2812 * 2813 * returns host pdev_id for target pdev id. For WIN, this is derived through 2814 * a two step process: 2815 * 1. Get lmac_id corresponding to target pdev_id 2816 * 2. Get host pdev_id (set up during WMI ready) from lmac_id 2817 * 2818 * For MCL, return the 0-offset pdev_id 2819 */ 2820 static inline int 2821 dp_get_host_pdev_id_for_target_pdev_id 2822 (struct dp_soc *soc, uint32_t pdev_id) 2823 { 2824 struct dp_pdev *pdev; 2825 int lmac_id; 2826 2827 if (!wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx)) 2828 return DP_HW2SW_MACID(pdev_id); 2829 2830 /*non-MCL case, get original target_lmac mapping from target pdev*/ 2831 lmac_id = wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, 2832 DP_HW2SW_MACID(pdev_id)); 2833 2834 /*Get host pdev from lmac*/ 2835 pdev = dp_get_pdev_for_lmac_id(soc, lmac_id); 2836 2837 return pdev ? pdev->pdev_id : INVALID_PDEV_ID; 2838 } 2839 2840 /* 2841 * dp_get_mac_id_for_mac() - Return mac corresponding WIN and MCL mac_ids 2842 * 2843 * @soc: handle to DP soc 2844 * @mac_id: MAC id 2845 * 2846 * Single pdev using both MACs will operate on both MAC rings, 2847 * which is the case for MCL. 2848 * For WIN each PDEV will operate one ring, so index is zero. 2849 * 2850 */ 2851 static inline int dp_get_mac_id_for_mac(struct dp_soc *soc, uint32_t mac_id) 2852 { 2853 /* 2854 * Single pdev using both MACs will operate on both MAC rings, 2855 * which is the case for MCL. 2856 */ 2857 if (!wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx)) 2858 return mac_id; 2859 2860 /* For WIN each PDEV will operate one ring, so index is zero. */ 2861 return 0; 2862 } 2863 2864 /* 2865 * dp_is_subtype_data() - check if the frame subtype is data 2866 * 2867 * @frame_ctrl: Frame control field 2868 * 2869 * check the frame control field and verify if the packet 2870 * is a data packet. 2871 * 2872 * Return: true or false 2873 */ 2874 static inline bool dp_is_subtype_data(uint16_t frame_ctrl) 2875 { 2876 if (((qdf_cpu_to_le16(frame_ctrl) & QDF_IEEE80211_FC0_TYPE_MASK) == 2877 QDF_IEEE80211_FC0_TYPE_DATA) && 2878 (((qdf_cpu_to_le16(frame_ctrl) & QDF_IEEE80211_FC0_SUBTYPE_MASK) == 2879 QDF_IEEE80211_FC0_SUBTYPE_DATA) || 2880 ((qdf_cpu_to_le16(frame_ctrl) & QDF_IEEE80211_FC0_SUBTYPE_MASK) == 2881 QDF_IEEE80211_FC0_SUBTYPE_QOS))) { 2882 return true; 2883 } 2884 2885 return false; 2886 } 2887 2888 #ifdef WDI_EVENT_ENABLE 2889 QDF_STATUS dp_h2t_cfg_stats_msg_send(struct dp_pdev *pdev, 2890 uint32_t stats_type_upload_mask, 2891 uint8_t mac_id); 2892 2893 int dp_wdi_event_unsub(struct cdp_soc_t *soc, uint8_t pdev_id, 2894 wdi_event_subscribe *event_cb_sub_handle, 2895 uint32_t event); 2896 2897 int dp_wdi_event_sub(struct cdp_soc_t *soc, uint8_t pdev_id, 2898 wdi_event_subscribe *event_cb_sub_handle, 2899 uint32_t event); 2900 2901 void dp_wdi_event_handler(enum WDI_EVENT event, struct dp_soc *soc, 2902 void *data, u_int16_t peer_id, 2903 int status, u_int8_t pdev_id); 2904 2905 int dp_wdi_event_attach(struct dp_pdev *txrx_pdev); 2906 int dp_wdi_event_detach(struct dp_pdev *txrx_pdev); 2907 2908 static inline void 2909 dp_hif_update_pipe_callback(struct dp_soc *dp_soc, 2910 void *cb_context, 2911 QDF_STATUS (*callback)(void *, qdf_nbuf_t, uint8_t), 2912 uint8_t pipe_id) 2913 { 2914 struct hif_msg_callbacks hif_pipe_callbacks = { 0 }; 2915 2916 /* TODO: Temporary change to bypass HTC connection for this new 2917 * HIF pipe, which will be used for packet log and other high- 2918 * priority HTT messages. Proper HTC connection to be added 2919 * later once required FW changes are available 2920 */ 2921 hif_pipe_callbacks.rxCompletionHandler = callback; 2922 hif_pipe_callbacks.Context = cb_context; 2923 hif_update_pipe_callback(dp_soc->hif_handle, 2924 DP_HTT_T2H_HP_PIPE, &hif_pipe_callbacks); 2925 } 2926 #else 2927 static inline int dp_wdi_event_unsub(struct cdp_soc_t *soc, uint8_t pdev_id, 2928 wdi_event_subscribe *event_cb_sub_handle, 2929 uint32_t event) 2930 { 2931 return 0; 2932 } 2933 2934 static inline int dp_wdi_event_sub(struct cdp_soc_t *soc, uint8_t pdev_id, 2935 wdi_event_subscribe *event_cb_sub_handle, 2936 uint32_t event) 2937 { 2938 return 0; 2939 } 2940 2941 static inline 2942 void dp_wdi_event_handler(enum WDI_EVENT event, 2943 struct dp_soc *soc, 2944 void *data, u_int16_t peer_id, 2945 int status, u_int8_t pdev_id) 2946 { 2947 } 2948 2949 static inline int dp_wdi_event_attach(struct dp_pdev *txrx_pdev) 2950 { 2951 return 0; 2952 } 2953 2954 static inline int dp_wdi_event_detach(struct dp_pdev *txrx_pdev) 2955 { 2956 return 0; 2957 } 2958 2959 static inline QDF_STATUS dp_h2t_cfg_stats_msg_send(struct dp_pdev *pdev, 2960 uint32_t stats_type_upload_mask, uint8_t mac_id) 2961 { 2962 return 0; 2963 } 2964 2965 static inline void 2966 dp_hif_update_pipe_callback(struct dp_soc *dp_soc, void *cb_context, 2967 QDF_STATUS (*callback)(void *, qdf_nbuf_t, uint8_t), 2968 uint8_t pipe_id) 2969 { 2970 } 2971 #endif /* CONFIG_WIN */ 2972 2973 #ifdef VDEV_PEER_PROTOCOL_COUNT 2974 /** 2975 * dp_vdev_peer_stats_update_protocol_cnt() - update per-peer protocol counters 2976 * @vdev: VDEV DP object 2977 * @nbuf: data packet 2978 * @peer: DP TXRX Peer object 2979 * @is_egress: whether egress or ingress 2980 * @is_rx: whether rx or tx 2981 * 2982 * This function updates the per-peer protocol counters 2983 * Return: void 2984 */ 2985 void dp_vdev_peer_stats_update_protocol_cnt(struct dp_vdev *vdev, 2986 qdf_nbuf_t nbuf, 2987 struct dp_txrx_peer *txrx_peer, 2988 bool is_egress, 2989 bool is_rx); 2990 2991 /** 2992 * dp_vdev_peer_stats_update_protocol_cnt() - update per-peer protocol counters 2993 * @soc: SOC DP object 2994 * @vdev_id: vdev_id 2995 * @nbuf: data packet 2996 * @is_egress: whether egress or ingress 2997 * @is_rx: whether rx or tx 2998 * 2999 * This function updates the per-peer protocol counters 3000 * Return: void 3001 */ 3002 3003 void dp_peer_stats_update_protocol_cnt(struct cdp_soc_t *soc, 3004 int8_t vdev_id, 3005 qdf_nbuf_t nbuf, 3006 bool is_egress, 3007 bool is_rx); 3008 3009 void dp_vdev_peer_stats_update_protocol_cnt_tx(struct dp_vdev *vdev_hdl, 3010 qdf_nbuf_t nbuf); 3011 3012 #else 3013 #define dp_vdev_peer_stats_update_protocol_cnt(vdev, nbuf, txrx_peer, \ 3014 is_egress, is_rx) 3015 3016 static inline 3017 void dp_vdev_peer_stats_update_protocol_cnt_tx(struct dp_vdev *vdev_hdl, 3018 qdf_nbuf_t nbuf) 3019 { 3020 } 3021 3022 #endif 3023 3024 #ifdef QCA_LL_TX_FLOW_CONTROL_V2 3025 void dp_tx_dump_flow_pool_info(struct cdp_soc_t *soc_hdl); 3026 3027 /** 3028 * dp_tx_dump_flow_pool_info_compact() - dump flow pool info 3029 * @soc: DP soc context 3030 * 3031 * Return: none 3032 */ 3033 void dp_tx_dump_flow_pool_info_compact(struct dp_soc *soc); 3034 int dp_tx_delete_flow_pool(struct dp_soc *soc, struct dp_tx_desc_pool_s *pool, 3035 bool force); 3036 #else 3037 static inline void dp_tx_dump_flow_pool_info_compact(struct dp_soc *soc) 3038 { 3039 } 3040 #endif /* QCA_LL_TX_FLOW_CONTROL_V2 */ 3041 3042 #ifdef QCA_OL_DP_SRNG_LOCK_LESS_ACCESS 3043 static inline int 3044 dp_hal_srng_access_start(hal_soc_handle_t soc, hal_ring_handle_t hal_ring_hdl) 3045 { 3046 return hal_srng_access_start_unlocked(soc, hal_ring_hdl); 3047 } 3048 3049 static inline void 3050 dp_hal_srng_access_end(hal_soc_handle_t soc, hal_ring_handle_t hal_ring_hdl) 3051 { 3052 hal_srng_access_end_unlocked(soc, hal_ring_hdl); 3053 } 3054 3055 #else 3056 static inline int 3057 dp_hal_srng_access_start(hal_soc_handle_t soc, hal_ring_handle_t hal_ring_hdl) 3058 { 3059 return hal_srng_access_start(soc, hal_ring_hdl); 3060 } 3061 3062 static inline void 3063 dp_hal_srng_access_end(hal_soc_handle_t soc, hal_ring_handle_t hal_ring_hdl) 3064 { 3065 hal_srng_access_end(soc, hal_ring_hdl); 3066 } 3067 #endif 3068 3069 #ifdef WLAN_FEATURE_DP_EVENT_HISTORY 3070 /** 3071 * dp_srng_access_start() - Wrapper function to log access start of a hal ring 3072 * @int_ctx: pointer to DP interrupt context. This should not be NULL 3073 * @soc: DP Soc handle 3074 * @hal_ring: opaque pointer to the HAL Rx Error Ring, which will be serviced 3075 * 3076 * Return: 0 on success; error on failure 3077 */ 3078 int dp_srng_access_start(struct dp_intr *int_ctx, struct dp_soc *dp_soc, 3079 hal_ring_handle_t hal_ring_hdl); 3080 3081 /** 3082 * dp_srng_access_end() - Wrapper function to log access end of a hal ring 3083 * @int_ctx: pointer to DP interrupt context. This should not be NULL 3084 * @soc: DP Soc handle 3085 * @hal_ring: opaque pointer to the HAL Rx Error Ring, which will be serviced 3086 * 3087 * Return: void 3088 */ 3089 void dp_srng_access_end(struct dp_intr *int_ctx, struct dp_soc *dp_soc, 3090 hal_ring_handle_t hal_ring_hdl); 3091 3092 #else 3093 static inline int dp_srng_access_start(struct dp_intr *int_ctx, 3094 struct dp_soc *dp_soc, 3095 hal_ring_handle_t hal_ring_hdl) 3096 { 3097 hal_soc_handle_t hal_soc = dp_soc->hal_soc; 3098 3099 return dp_hal_srng_access_start(hal_soc, hal_ring_hdl); 3100 } 3101 3102 static inline void dp_srng_access_end(struct dp_intr *int_ctx, 3103 struct dp_soc *dp_soc, 3104 hal_ring_handle_t hal_ring_hdl) 3105 { 3106 hal_soc_handle_t hal_soc = dp_soc->hal_soc; 3107 3108 return dp_hal_srng_access_end(hal_soc, hal_ring_hdl); 3109 } 3110 #endif /* WLAN_FEATURE_DP_EVENT_HISTORY */ 3111 3112 #ifdef QCA_CACHED_RING_DESC 3113 /** 3114 * dp_srng_dst_get_next() - Wrapper function to get next ring desc 3115 * @dp_socsoc: DP Soc handle 3116 * @hal_ring: opaque pointer to the HAL Destination Ring 3117 * 3118 * Return: HAL ring descriptor 3119 */ 3120 static inline void *dp_srng_dst_get_next(struct dp_soc *dp_soc, 3121 hal_ring_handle_t hal_ring_hdl) 3122 { 3123 hal_soc_handle_t hal_soc = dp_soc->hal_soc; 3124 3125 return hal_srng_dst_get_next_cached(hal_soc, hal_ring_hdl); 3126 } 3127 3128 /** 3129 * dp_srng_dst_inv_cached_descs() - Wrapper function to invalidate cached 3130 * descriptors 3131 * @dp_socsoc: DP Soc handle 3132 * @hal_ring: opaque pointer to the HAL Rx Destination ring 3133 * @num_entries: Entry count 3134 * 3135 * Return: None 3136 */ 3137 static inline void dp_srng_dst_inv_cached_descs(struct dp_soc *dp_soc, 3138 hal_ring_handle_t hal_ring_hdl, 3139 uint32_t num_entries) 3140 { 3141 hal_soc_handle_t hal_soc = dp_soc->hal_soc; 3142 3143 hal_srng_dst_inv_cached_descs(hal_soc, hal_ring_hdl, num_entries); 3144 } 3145 #else 3146 static inline void *dp_srng_dst_get_next(struct dp_soc *dp_soc, 3147 hal_ring_handle_t hal_ring_hdl) 3148 { 3149 hal_soc_handle_t hal_soc = dp_soc->hal_soc; 3150 3151 return hal_srng_dst_get_next(hal_soc, hal_ring_hdl); 3152 } 3153 3154 static inline void dp_srng_dst_inv_cached_descs(struct dp_soc *dp_soc, 3155 hal_ring_handle_t hal_ring_hdl, 3156 uint32_t num_entries) 3157 { 3158 } 3159 #endif /* QCA_CACHED_RING_DESC */ 3160 3161 #if defined(QCA_CACHED_RING_DESC) && \ 3162 (defined(QCA_DP_RX_HW_SW_NBUF_DESC_PREFETCH) || \ 3163 defined(QCA_DP_TX_HW_SW_NBUF_DESC_PREFETCH)) 3164 /** 3165 * dp_srng_dst_prefetch() - Wrapper function to prefetch descs from dest ring 3166 * @hal_soc_hdl: HAL SOC handle 3167 * @hal_ring: opaque pointer to the HAL Rx Destination ring 3168 * @num_entries: Entry count 3169 * 3170 * Return: None 3171 */ 3172 static inline void *dp_srng_dst_prefetch(hal_soc_handle_t hal_soc, 3173 hal_ring_handle_t hal_ring_hdl, 3174 uint32_t num_entries) 3175 { 3176 return hal_srng_dst_prefetch(hal_soc, hal_ring_hdl, num_entries); 3177 } 3178 3179 /** 3180 * dp_srng_dst_prefetch_32_byte_desc() - Wrapper function to prefetch 3181 * 32 byte descriptor starting at 3182 * 64 byte offset 3183 * @hal_soc_hdl: HAL SOC handle 3184 * @hal_ring: opaque pointer to the HAL Rx Destination ring 3185 * @num_entries: Entry count 3186 * 3187 * Return: None 3188 */ 3189 static inline 3190 void *dp_srng_dst_prefetch_32_byte_desc(hal_soc_handle_t hal_soc, 3191 hal_ring_handle_t hal_ring_hdl, 3192 uint32_t num_entries) 3193 { 3194 return hal_srng_dst_prefetch_32_byte_desc(hal_soc, hal_ring_hdl, 3195 num_entries); 3196 } 3197 #else 3198 static inline void *dp_srng_dst_prefetch(hal_soc_handle_t hal_soc, 3199 hal_ring_handle_t hal_ring_hdl, 3200 uint32_t num_entries) 3201 { 3202 return NULL; 3203 } 3204 3205 static inline 3206 void *dp_srng_dst_prefetch_32_byte_desc(hal_soc_handle_t hal_soc, 3207 hal_ring_handle_t hal_ring_hdl, 3208 uint32_t num_entries) 3209 { 3210 return NULL; 3211 } 3212 #endif 3213 3214 #ifdef QCA_ENH_V3_STATS_SUPPORT 3215 /** 3216 * dp_pdev_print_delay_stats(): Print pdev level delay stats 3217 * @pdev: DP_PDEV handle 3218 * 3219 * Return:void 3220 */ 3221 void dp_pdev_print_delay_stats(struct dp_pdev *pdev); 3222 3223 /** 3224 * dp_pdev_print_tid_stats(): Print pdev level tid stats 3225 * @pdev: DP_PDEV handle 3226 * 3227 * Return:void 3228 */ 3229 void dp_pdev_print_tid_stats(struct dp_pdev *pdev); 3230 3231 /** 3232 * dp_pdev_print_rx_error_stats(): Print pdev level rx error stats 3233 * @pdev: DP_PDEV handle 3234 * 3235 * Return:void 3236 */ 3237 void dp_pdev_print_rx_error_stats(struct dp_pdev *pdev); 3238 #endif /* QCA_ENH_V3_STATS_SUPPORT */ 3239 3240 /** 3241 * dp_pdev_get_tid_stats(): Get accumulated pdev level tid_stats 3242 * @soc_hdl: soc handle 3243 * @pdev_id: id of dp_pdev handle 3244 * @tid_stats: Pointer for cdp_tid_stats_intf 3245 * 3246 * Return: QDF_STATUS_SUCCESS or QDF_STATUS_E_INVAL 3247 */ 3248 QDF_STATUS dp_pdev_get_tid_stats(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, 3249 struct cdp_tid_stats_intf *tid_stats); 3250 3251 void dp_soc_set_txrx_ring_map(struct dp_soc *soc); 3252 3253 /** 3254 * dp_vdev_to_cdp_vdev() - typecast dp vdev to cdp vdev 3255 * @vdev: DP vdev handle 3256 * 3257 * Return: struct cdp_vdev pointer 3258 */ 3259 static inline 3260 struct cdp_vdev *dp_vdev_to_cdp_vdev(struct dp_vdev *vdev) 3261 { 3262 return (struct cdp_vdev *)vdev; 3263 } 3264 3265 /** 3266 * dp_pdev_to_cdp_pdev() - typecast dp pdev to cdp pdev 3267 * @pdev: DP pdev handle 3268 * 3269 * Return: struct cdp_pdev pointer 3270 */ 3271 static inline 3272 struct cdp_pdev *dp_pdev_to_cdp_pdev(struct dp_pdev *pdev) 3273 { 3274 return (struct cdp_pdev *)pdev; 3275 } 3276 3277 /** 3278 * dp_soc_to_cdp_soc() - typecast dp psoc to cdp psoc 3279 * @psoc: DP psoc handle 3280 * 3281 * Return: struct cdp_soc pointer 3282 */ 3283 static inline 3284 struct cdp_soc *dp_soc_to_cdp_soc(struct dp_soc *psoc) 3285 { 3286 return (struct cdp_soc *)psoc; 3287 } 3288 3289 /** 3290 * dp_soc_to_cdp_soc_t() - typecast dp psoc to 3291 * ol txrx soc handle 3292 * @psoc: DP psoc handle 3293 * 3294 * Return: struct cdp_soc_t pointer 3295 */ 3296 static inline 3297 struct cdp_soc_t *dp_soc_to_cdp_soc_t(struct dp_soc *psoc) 3298 { 3299 return (struct cdp_soc_t *)psoc; 3300 } 3301 3302 #if defined(WLAN_SUPPORT_RX_FLOW_TAG) || defined(WLAN_SUPPORT_RX_FISA) 3303 /** 3304 * dp_rx_flow_update_fse_stats() - Update a flow's statistics 3305 * @pdev: pdev handle 3306 * @flow_id: flow index (truncated hash) in the Rx FST 3307 * 3308 * Return: Success when flow statistcs is updated, error on failure 3309 */ 3310 QDF_STATUS dp_rx_flow_get_fse_stats(struct dp_pdev *pdev, 3311 struct cdp_rx_flow_info *rx_flow_info, 3312 struct cdp_flow_stats *stats); 3313 3314 /** 3315 * dp_rx_flow_delete_entry() - Delete a flow entry from flow search table 3316 * @pdev: pdev handle 3317 * @rx_flow_info: DP flow parameters 3318 * 3319 * Return: Success when flow is deleted, error on failure 3320 */ 3321 QDF_STATUS dp_rx_flow_delete_entry(struct dp_pdev *pdev, 3322 struct cdp_rx_flow_info *rx_flow_info); 3323 3324 /** 3325 * dp_rx_flow_add_entry() - Add a flow entry to flow search table 3326 * @pdev: DP pdev instance 3327 * @rx_flow_info: DP flow parameters 3328 * 3329 * Return: Success when flow is added, no-memory or already exists on error 3330 */ 3331 QDF_STATUS dp_rx_flow_add_entry(struct dp_pdev *pdev, 3332 struct cdp_rx_flow_info *rx_flow_info); 3333 3334 /** 3335 * dp_rx_fst_attach() - Initialize Rx FST and setup necessary parameters 3336 * @soc: SoC handle 3337 * @pdev: Pdev handle 3338 * 3339 * Return: Handle to flow search table entry 3340 */ 3341 QDF_STATUS dp_rx_fst_attach(struct dp_soc *soc, struct dp_pdev *pdev); 3342 3343 /** 3344 * dp_rx_fst_detach() - De-initialize Rx FST 3345 * @soc: SoC handle 3346 * @pdev: Pdev handle 3347 * 3348 * Return: None 3349 */ 3350 void dp_rx_fst_detach(struct dp_soc *soc, struct dp_pdev *pdev); 3351 3352 /** 3353 * dp_rx_flow_send_fst_fw_setup() - Program FST parameters in FW/HW post-attach 3354 * @soc: SoC handle 3355 * @pdev: Pdev handle 3356 * 3357 * Return: Success when fst parameters are programmed in FW, error otherwise 3358 */ 3359 QDF_STATUS dp_rx_flow_send_fst_fw_setup(struct dp_soc *soc, 3360 struct dp_pdev *pdev); 3361 3362 /** dp_mon_rx_update_rx_flow_tag_stats() - Update a mon flow's statistics 3363 * @pdev: pdev handle 3364 * @flow_id: flow index (truncated hash) in the Rx FST 3365 * 3366 * Return: Success when flow statistcs is updated, error on failure 3367 */ 3368 QDF_STATUS 3369 dp_mon_rx_update_rx_flow_tag_stats(struct dp_pdev *pdev, uint32_t flow_id); 3370 3371 #else /* !((WLAN_SUPPORT_RX_FLOW_TAG) || defined(WLAN_SUPPORT_RX_FISA)) */ 3372 3373 /** 3374 * dp_rx_fst_attach() - Initialize Rx FST and setup necessary parameters 3375 * @soc: SoC handle 3376 * @pdev: Pdev handle 3377 * 3378 * Return: Handle to flow search table entry 3379 */ 3380 static inline 3381 QDF_STATUS dp_rx_fst_attach(struct dp_soc *soc, struct dp_pdev *pdev) 3382 { 3383 return QDF_STATUS_SUCCESS; 3384 } 3385 3386 /** 3387 * dp_rx_fst_detach() - De-initialize Rx FST 3388 * @soc: SoC handle 3389 * @pdev: Pdev handle 3390 * 3391 * Return: None 3392 */ 3393 static inline 3394 void dp_rx_fst_detach(struct dp_soc *soc, struct dp_pdev *pdev) 3395 { 3396 } 3397 #endif 3398 3399 /** 3400 * dp_rx_fst_attach_wrapper() - wrapper API for dp_rx_fst_attach 3401 * @soc: SoC handle 3402 * @pdev: Pdev handle 3403 * 3404 * Return: Handle to flow search table entry 3405 */ 3406 extern QDF_STATUS 3407 dp_rx_fst_attach_wrapper(struct dp_soc *soc, struct dp_pdev *pdev); 3408 3409 /** 3410 * dp_rx_fst_detach_wrapper() - wrapper API for dp_rx_fst_detach 3411 * @soc: SoC handle 3412 * @pdev: Pdev handle 3413 * 3414 * Return: None 3415 */ 3416 extern void 3417 dp_rx_fst_detach_wrapper(struct dp_soc *soc, struct dp_pdev *pdev); 3418 3419 /** 3420 * dp_vdev_get_ref() - API to take a reference for VDEV object 3421 * 3422 * @soc : core DP soc context 3423 * @vdev : DP vdev 3424 * @mod_id : module id 3425 * 3426 * Return: QDF_STATUS_SUCCESS if reference held successfully 3427 * else QDF_STATUS_E_INVAL 3428 */ 3429 static inline 3430 QDF_STATUS dp_vdev_get_ref(struct dp_soc *soc, struct dp_vdev *vdev, 3431 enum dp_mod_id mod_id) 3432 { 3433 if (!qdf_atomic_inc_not_zero(&vdev->ref_cnt)) 3434 return QDF_STATUS_E_INVAL; 3435 3436 qdf_atomic_inc(&vdev->mod_refs[mod_id]); 3437 3438 return QDF_STATUS_SUCCESS; 3439 } 3440 3441 /** 3442 * dp_vdev_get_ref_by_id() - Returns vdev object given the vdev id 3443 * @soc: core DP soc context 3444 * @vdev_id: vdev id from vdev object can be retrieved 3445 * @mod_id: module id which is requesting the reference 3446 * 3447 * Return: struct dp_vdev*: Pointer to DP vdev object 3448 */ 3449 static inline struct dp_vdev * 3450 dp_vdev_get_ref_by_id(struct dp_soc *soc, uint8_t vdev_id, 3451 enum dp_mod_id mod_id) 3452 { 3453 struct dp_vdev *vdev = NULL; 3454 if (qdf_unlikely(vdev_id >= MAX_VDEV_CNT)) 3455 return NULL; 3456 3457 qdf_spin_lock_bh(&soc->vdev_map_lock); 3458 vdev = soc->vdev_id_map[vdev_id]; 3459 3460 if (!vdev || dp_vdev_get_ref(soc, vdev, mod_id) != QDF_STATUS_SUCCESS) { 3461 qdf_spin_unlock_bh(&soc->vdev_map_lock); 3462 return NULL; 3463 } 3464 qdf_spin_unlock_bh(&soc->vdev_map_lock); 3465 3466 return vdev; 3467 } 3468 3469 /** 3470 * dp_get_pdev_from_soc_pdev_id_wifi3() - Returns pdev object given the pdev id 3471 * @soc: core DP soc context 3472 * @pdev_id: pdev id from pdev object can be retrieved 3473 * 3474 * Return: struct dp_pdev*: Pointer to DP pdev object 3475 */ 3476 static inline struct dp_pdev * 3477 dp_get_pdev_from_soc_pdev_id_wifi3(struct dp_soc *soc, 3478 uint8_t pdev_id) 3479 { 3480 if (qdf_unlikely(pdev_id >= MAX_PDEV_CNT)) 3481 return NULL; 3482 3483 return soc->pdev_list[pdev_id]; 3484 } 3485 3486 /* 3487 * dp_rx_tid_update_wifi3() – Update receive TID state 3488 * @peer: Datapath peer handle 3489 * @tid: TID 3490 * @ba_window_size: BlockAck window size 3491 * @start_seq: Starting sequence number 3492 * @bar_update: BAR update triggered 3493 * 3494 * Return: QDF_STATUS code 3495 */ 3496 QDF_STATUS dp_rx_tid_update_wifi3(struct dp_peer *peer, int tid, uint32_t 3497 ba_window_size, uint32_t start_seq, 3498 bool bar_update); 3499 3500 /** 3501 * dp_get_peer_mac_list(): function to get peer mac list of vdev 3502 * @soc: Datapath soc handle 3503 * @vdev_id: vdev id 3504 * @newmac: Table of the clients mac 3505 * @mac_cnt: No. of MACs required 3506 * @limit: Limit the number of clients 3507 * 3508 * return: no of clients 3509 */ 3510 uint16_t dp_get_peer_mac_list(ol_txrx_soc_handle soc, uint8_t vdev_id, 3511 u_int8_t newmac[][QDF_MAC_ADDR_SIZE], 3512 u_int16_t mac_cnt, bool limit); 3513 3514 /* 3515 * dp_update_num_mac_rings_for_dbs() - Update No of MAC rings based on 3516 * DBS check 3517 * @soc: DP SoC context 3518 * @max_mac_rings: Pointer to variable for No of MAC rings 3519 * 3520 * Return: None 3521 */ 3522 void dp_update_num_mac_rings_for_dbs(struct dp_soc *soc, 3523 int *max_mac_rings); 3524 3525 3526 #if defined(WLAN_SUPPORT_RX_FISA) 3527 void dp_rx_dump_fisa_table(struct dp_soc *soc); 3528 3529 /** 3530 * dp_print_fisa_stats() - Print FISA stats 3531 * @soc: DP soc handle 3532 * 3533 * Return: None 3534 */ 3535 void dp_print_fisa_stats(struct dp_soc *soc); 3536 3537 /* 3538 * dp_rx_fst_update_cmem_params() - Update CMEM FST params 3539 * @soc: DP SoC context 3540 * @num_entries: Number of flow search entries 3541 * @cmem_ba_lo: CMEM base address low 3542 * @cmem_ba_hi: CMEM base address high 3543 * 3544 * Return: None 3545 */ 3546 void dp_rx_fst_update_cmem_params(struct dp_soc *soc, uint16_t num_entries, 3547 uint32_t cmem_ba_lo, uint32_t cmem_ba_hi); 3548 3549 void 3550 dp_rx_fst_update_pm_suspend_status(struct dp_soc *soc, bool suspended); 3551 3552 /* 3553 * dp_rx_fst_requeue_wq() - Re-queue pending work queue tasks 3554 * @soc: DP SoC context 3555 * 3556 * Return: None 3557 */ 3558 void dp_rx_fst_requeue_wq(struct dp_soc *soc); 3559 #else 3560 static inline void 3561 dp_rx_fst_update_cmem_params(struct dp_soc *soc, uint16_t num_entries, 3562 uint32_t cmem_ba_lo, uint32_t cmem_ba_hi) 3563 { 3564 } 3565 3566 static inline void 3567 dp_rx_fst_update_pm_suspend_status(struct dp_soc *soc, bool suspended) 3568 { 3569 } 3570 3571 static inline void 3572 dp_rx_fst_requeue_wq(struct dp_soc *soc) 3573 { 3574 } 3575 3576 static inline void dp_print_fisa_stats(struct dp_soc *soc) 3577 { 3578 } 3579 #endif /* WLAN_SUPPORT_RX_FISA */ 3580 3581 #ifdef MAX_ALLOC_PAGE_SIZE 3582 /** 3583 * dp_set_page_size() - Set the max page size for hw link desc. 3584 * For MCL the page size is set to OS defined value and for WIN 3585 * the page size is set to the max_alloc_size cfg ini 3586 * param. 3587 * This is to ensure that WIN gets contiguous memory allocations 3588 * as per requirement. 3589 * @pages: link desc page handle 3590 * @max_alloc_size: max_alloc_size 3591 * 3592 * Return: None 3593 */ 3594 static inline 3595 void dp_set_max_page_size(struct qdf_mem_multi_page_t *pages, 3596 uint32_t max_alloc_size) 3597 { 3598 pages->page_size = qdf_page_size; 3599 } 3600 3601 #else 3602 static inline 3603 void dp_set_max_page_size(struct qdf_mem_multi_page_t *pages, 3604 uint32_t max_alloc_size) 3605 { 3606 pages->page_size = max_alloc_size; 3607 } 3608 #endif /* MAX_ALLOC_PAGE_SIZE */ 3609 3610 /** 3611 * dp_history_get_next_index() - get the next entry to record an entry 3612 * in the history. 3613 * @curr_idx: Current index where the last entry is written. 3614 * @max_entries: Max number of entries in the history 3615 * 3616 * This function assumes that the max number os entries is a power of 2. 3617 * 3618 * Returns: The index where the next entry is to be written. 3619 */ 3620 static inline uint32_t dp_history_get_next_index(qdf_atomic_t *curr_idx, 3621 uint32_t max_entries) 3622 { 3623 uint32_t idx = qdf_atomic_inc_return(curr_idx); 3624 3625 return idx & (max_entries - 1); 3626 } 3627 3628 /** 3629 * dp_rx_skip_tlvs() - Skip TLVs len + L2 hdr_offset, save in nbuf->cb 3630 * @nbuf: nbuf cb to be updated 3631 * @l2_hdr_offset: l2_hdr_offset 3632 * 3633 * Return: None 3634 */ 3635 void dp_rx_skip_tlvs(struct dp_soc *soc, qdf_nbuf_t nbuf, uint32_t l3_padding); 3636 3637 #ifndef FEATURE_WDS 3638 static inline void 3639 dp_hmwds_ast_add_notify(struct dp_peer *peer, 3640 uint8_t *mac_addr, 3641 enum cdp_txrx_ast_entry_type type, 3642 QDF_STATUS err, 3643 bool is_peer_map) 3644 { 3645 } 3646 #endif 3647 3648 #ifdef HTT_STATS_DEBUGFS_SUPPORT 3649 /* dp_pdev_htt_stats_dbgfs_init() - Function to allocate memory and initialize 3650 * debugfs for HTT stats 3651 * @pdev: dp pdev handle 3652 * 3653 * Return: QDF_STATUS 3654 */ 3655 QDF_STATUS dp_pdev_htt_stats_dbgfs_init(struct dp_pdev *pdev); 3656 3657 /* dp_pdev_htt_stats_dbgfs_deinit() - Function to remove debugfs entry for 3658 * HTT stats 3659 * @pdev: dp pdev handle 3660 * 3661 * Return: none 3662 */ 3663 void dp_pdev_htt_stats_dbgfs_deinit(struct dp_pdev *pdev); 3664 #else 3665 3666 /* dp_pdev_htt_stats_dbgfs_init() - Function to allocate memory and initialize 3667 * debugfs for HTT stats 3668 * @pdev: dp pdev handle 3669 * 3670 * Return: QDF_STATUS 3671 */ 3672 static inline QDF_STATUS 3673 dp_pdev_htt_stats_dbgfs_init(struct dp_pdev *pdev) 3674 { 3675 return QDF_STATUS_SUCCESS; 3676 } 3677 3678 /* dp_pdev_htt_stats_dbgfs_deinit() - Function to remove debugfs entry for 3679 * HTT stats 3680 * @pdev: dp pdev handle 3681 * 3682 * Return: none 3683 */ 3684 static inline void 3685 dp_pdev_htt_stats_dbgfs_deinit(struct dp_pdev *pdev) 3686 { 3687 } 3688 #endif /* HTT_STATS_DEBUGFS_SUPPORT */ 3689 3690 #ifndef WLAN_DP_FEATURE_SW_LATENCY_MGR 3691 /** 3692 * dp_soc_swlm_attach() - attach the software latency manager resources 3693 * @soc: Datapath global soc handle 3694 * 3695 * Returns: QDF_STATUS 3696 */ 3697 static inline QDF_STATUS dp_soc_swlm_attach(struct dp_soc *soc) 3698 { 3699 return QDF_STATUS_SUCCESS; 3700 } 3701 3702 /** 3703 * dp_soc_swlm_detach() - detach the software latency manager resources 3704 * @soc: Datapath global soc handle 3705 * 3706 * Returns: QDF_STATUS 3707 */ 3708 static inline QDF_STATUS dp_soc_swlm_detach(struct dp_soc *soc) 3709 { 3710 return QDF_STATUS_SUCCESS; 3711 } 3712 #endif /* !WLAN_DP_FEATURE_SW_LATENCY_MGR */ 3713 3714 /** 3715 * dp_get_peer_id(): function to get peer id by mac 3716 * @soc: Datapath soc handle 3717 * @vdev_id: vdev id 3718 * @mac: Peer mac address 3719 * 3720 * return: valid peer id on success 3721 * HTT_INVALID_PEER on failure 3722 */ 3723 uint16_t dp_get_peer_id(ol_txrx_soc_handle soc, uint8_t vdev_id, uint8_t *mac); 3724 3725 #ifdef QCA_SUPPORT_WDS_EXTENDED 3726 /** 3727 * dp_wds_ext_set_peer_state(): function to set peer state 3728 * @soc: Datapath soc handle 3729 * @vdev_id: vdev id 3730 * @mac: Peer mac address 3731 * @rx: rx function pointer 3732 * 3733 * return: QDF_STATUS_SUCCESS on success 3734 * QDF_STATUS_E_INVAL if peer is not found 3735 * QDF_STATUS_E_ALREADY if rx is already set/unset 3736 */ 3737 QDF_STATUS dp_wds_ext_set_peer_rx(ol_txrx_soc_handle soc, 3738 uint8_t vdev_id, 3739 uint8_t *mac, 3740 ol_txrx_rx_fp rx, 3741 ol_osif_peer_handle osif_peer); 3742 #endif /* QCA_SUPPORT_WDS_EXTENDED */ 3743 3744 #ifdef DP_MEM_PRE_ALLOC 3745 3746 /** 3747 * dp_context_alloc_mem() - allocate memory for DP context 3748 * @soc: datapath soc handle 3749 * @ctxt_type: DP context type 3750 * @ctxt_size: DP context size 3751 * 3752 * Return: DP context address 3753 */ 3754 void *dp_context_alloc_mem(struct dp_soc *soc, enum dp_ctxt_type ctxt_type, 3755 size_t ctxt_size); 3756 3757 /** 3758 * dp_context_free_mem() - Free memory of DP context 3759 * @soc: datapath soc handle 3760 * @ctxt_type: DP context type 3761 * @vaddr: Address of context memory 3762 * 3763 * Return: None 3764 */ 3765 void dp_context_free_mem(struct dp_soc *soc, enum dp_ctxt_type ctxt_type, 3766 void *vaddr); 3767 3768 /** 3769 * dp_desc_multi_pages_mem_alloc() - alloc memory over multiple pages 3770 * @soc: datapath soc handle 3771 * @desc_type: memory request source type 3772 * @pages: multi page information storage 3773 * @element_size: each element size 3774 * @element_num: total number of elements should be allocated 3775 * @memctxt: memory context 3776 * @cacheable: coherent memory or cacheable memory 3777 * 3778 * This function is a wrapper for memory allocation over multiple 3779 * pages, if dp prealloc method is registered, then will try prealloc 3780 * firstly. if prealloc failed, fall back to regular way over 3781 * qdf_mem_multi_pages_alloc(). 3782 * 3783 * Return: None 3784 */ 3785 void dp_desc_multi_pages_mem_alloc(struct dp_soc *soc, 3786 enum dp_desc_type desc_type, 3787 struct qdf_mem_multi_page_t *pages, 3788 size_t element_size, 3789 uint32_t element_num, 3790 qdf_dma_context_t memctxt, 3791 bool cacheable); 3792 3793 /** 3794 * dp_desc_multi_pages_mem_free() - free multiple pages memory 3795 * @soc: datapath soc handle 3796 * @desc_type: memory request source type 3797 * @pages: multi page information storage 3798 * @memctxt: memory context 3799 * @cacheable: coherent memory or cacheable memory 3800 * 3801 * This function is a wrapper for multiple pages memory free, 3802 * if memory is got from prealloc pool, put it back to pool. 3803 * otherwise free by qdf_mem_multi_pages_free(). 3804 * 3805 * Return: None 3806 */ 3807 void dp_desc_multi_pages_mem_free(struct dp_soc *soc, 3808 enum dp_desc_type desc_type, 3809 struct qdf_mem_multi_page_t *pages, 3810 qdf_dma_context_t memctxt, 3811 bool cacheable); 3812 3813 #else 3814 static inline 3815 void *dp_context_alloc_mem(struct dp_soc *soc, enum dp_ctxt_type ctxt_type, 3816 size_t ctxt_size) 3817 { 3818 return qdf_mem_malloc(ctxt_size); 3819 } 3820 3821 static inline 3822 void dp_context_free_mem(struct dp_soc *soc, enum dp_ctxt_type ctxt_type, 3823 void *vaddr) 3824 { 3825 qdf_mem_free(vaddr); 3826 } 3827 3828 static inline 3829 void dp_desc_multi_pages_mem_alloc(struct dp_soc *soc, 3830 enum dp_desc_type desc_type, 3831 struct qdf_mem_multi_page_t *pages, 3832 size_t element_size, 3833 uint32_t element_num, 3834 qdf_dma_context_t memctxt, 3835 bool cacheable) 3836 { 3837 qdf_mem_multi_pages_alloc(soc->osdev, pages, element_size, 3838 element_num, memctxt, cacheable); 3839 } 3840 3841 static inline 3842 void dp_desc_multi_pages_mem_free(struct dp_soc *soc, 3843 enum dp_desc_type desc_type, 3844 struct qdf_mem_multi_page_t *pages, 3845 qdf_dma_context_t memctxt, 3846 bool cacheable) 3847 { 3848 qdf_mem_multi_pages_free(soc->osdev, pages, 3849 memctxt, cacheable); 3850 } 3851 #endif 3852 3853 /** 3854 * struct dp_frag_history_opaque_atomic - Opaque struct for adding a fragmented 3855 * history. 3856 * @index: atomic index 3857 * @num_entries_per_slot: Number of entries per slot 3858 * @allocated: is allocated or not 3859 * @entry: pointers to array of records 3860 */ 3861 struct dp_frag_history_opaque_atomic { 3862 qdf_atomic_t index; 3863 uint16_t num_entries_per_slot; 3864 uint16_t allocated; 3865 void *entry[0]; 3866 }; 3867 3868 static inline QDF_STATUS 3869 dp_soc_frag_history_attach(struct dp_soc *soc, void *history_hdl, 3870 uint32_t max_slots, uint32_t max_entries_per_slot, 3871 uint32_t entry_size, 3872 bool attempt_prealloc, enum dp_ctxt_type ctxt_type) 3873 { 3874 struct dp_frag_history_opaque_atomic *history = 3875 (struct dp_frag_history_opaque_atomic *)history_hdl; 3876 size_t alloc_size = max_entries_per_slot * entry_size; 3877 int i; 3878 3879 for (i = 0; i < max_slots; i++) { 3880 if (attempt_prealloc) 3881 history->entry[i] = dp_context_alloc_mem(soc, ctxt_type, 3882 alloc_size); 3883 else 3884 history->entry[i] = qdf_mem_malloc(alloc_size); 3885 3886 if (!history->entry[i]) 3887 goto exit; 3888 } 3889 3890 qdf_atomic_init(&history->index); 3891 history->allocated = 1; 3892 history->num_entries_per_slot = max_entries_per_slot; 3893 3894 return QDF_STATUS_SUCCESS; 3895 exit: 3896 for (i = i - 1; i >= 0; i--) { 3897 if (attempt_prealloc) 3898 dp_context_free_mem(soc, ctxt_type, history->entry[i]); 3899 else 3900 qdf_mem_free(history->entry[i]); 3901 } 3902 3903 return QDF_STATUS_E_NOMEM; 3904 } 3905 3906 static inline 3907 void dp_soc_frag_history_detach(struct dp_soc *soc, 3908 void *history_hdl, uint32_t max_slots, 3909 bool attempt_prealloc, 3910 enum dp_ctxt_type ctxt_type) 3911 { 3912 struct dp_frag_history_opaque_atomic *history = 3913 (struct dp_frag_history_opaque_atomic *)history_hdl; 3914 int i; 3915 3916 for (i = 0; i < max_slots; i++) { 3917 if (attempt_prealloc) 3918 dp_context_free_mem(soc, ctxt_type, history->entry[i]); 3919 else 3920 qdf_mem_free(history->entry[i]); 3921 } 3922 3923 history->allocated = 0; 3924 } 3925 3926 /** 3927 * dp_get_frag_hist_next_atomic_idx() - get the next entry index to record an 3928 * entry in a fragmented history with 3929 * index being atomic. 3930 * @curr_idx: address of the current index where the last entry was written 3931 * @next_idx: pointer to update the next index 3932 * @slot: pointer to update the history slot to be selected 3933 * @slot_shift: BITwise shift mask for slot (in index) 3934 * @max_entries_per_slot: Max number of entries in a slot of history 3935 * @max_entries: Total number of entries in the history (sum of all slots) 3936 * 3937 * This function assumes that the "max_entries_per_slot" and "max_entries" 3938 * are a power-of-2. 3939 * 3940 * Return: None 3941 */ 3942 static inline void 3943 dp_get_frag_hist_next_atomic_idx(qdf_atomic_t *curr_idx, uint32_t *next_idx, 3944 uint16_t *slot, uint32_t slot_shift, 3945 uint32_t max_entries_per_slot, 3946 uint32_t max_entries) 3947 { 3948 uint32_t idx; 3949 3950 idx = qdf_do_div_rem(qdf_atomic_inc_return(curr_idx), max_entries); 3951 3952 *slot = idx >> slot_shift; 3953 *next_idx = idx & (max_entries_per_slot - 1); 3954 } 3955 3956 #ifdef FEATURE_RUNTIME_PM 3957 /** 3958 * dp_runtime_get() - Get dp runtime refcount 3959 * @soc: Datapath soc handle 3960 * 3961 * Get dp runtime refcount by increment of an atomic variable, which can block 3962 * dp runtime resume to wait to flush pending tx by runtime suspend. 3963 * 3964 * Return: Current refcount 3965 */ 3966 static inline int32_t dp_runtime_get(struct dp_soc *soc) 3967 { 3968 return qdf_atomic_inc_return(&soc->dp_runtime_refcount); 3969 } 3970 3971 /** 3972 * dp_runtime_put() - Return dp runtime refcount 3973 * @soc: Datapath soc handle 3974 * 3975 * Return dp runtime refcount by decrement of an atomic variable, allow dp 3976 * runtime resume finish. 3977 * 3978 * Return: Current refcount 3979 */ 3980 static inline int32_t dp_runtime_put(struct dp_soc *soc) 3981 { 3982 return qdf_atomic_dec_return(&soc->dp_runtime_refcount); 3983 } 3984 3985 /** 3986 * dp_runtime_get_refcount() - Get dp runtime refcount 3987 * @soc: Datapath soc handle 3988 * 3989 * Get dp runtime refcount by returning an atomic variable 3990 * 3991 * Return: Current refcount 3992 */ 3993 static inline int32_t dp_runtime_get_refcount(struct dp_soc *soc) 3994 { 3995 return qdf_atomic_read(&soc->dp_runtime_refcount); 3996 } 3997 3998 /** 3999 * dp_runtime_init() - Init DP related runtime PM clients and runtime refcount 4000 * @soc: Datapath soc handle 4001 * 4002 * Return: QDF_STATUS 4003 */ 4004 static inline void dp_runtime_init(struct dp_soc *soc) 4005 { 4006 hif_rtpm_register(HIF_RTPM_ID_DP, NULL); 4007 hif_rtpm_register(HIF_RTPM_ID_DP_RING_STATS, NULL); 4008 qdf_atomic_init(&soc->dp_runtime_refcount); 4009 } 4010 4011 /** 4012 * dp_runtime_deinit() - Deinit DP related runtime PM clients 4013 * 4014 * Return: None 4015 */ 4016 static inline void dp_runtime_deinit(void) 4017 { 4018 hif_rtpm_deregister(HIF_RTPM_ID_DP); 4019 hif_rtpm_deregister(HIF_RTPM_ID_DP_RING_STATS); 4020 } 4021 4022 /** 4023 * dp_runtime_pm_mark_last_busy() - Mark last busy when rx path in use 4024 * @soc: Datapath soc handle 4025 * 4026 * Return: None 4027 */ 4028 static inline void dp_runtime_pm_mark_last_busy(struct dp_soc *soc) 4029 { 4030 soc->rx_last_busy = qdf_get_log_timestamp_usecs(); 4031 4032 hif_rtpm_mark_last_busy(HIF_RTPM_ID_DP); 4033 } 4034 #else 4035 static inline int32_t dp_runtime_get(struct dp_soc *soc) 4036 { 4037 return 0; 4038 } 4039 4040 static inline int32_t dp_runtime_put(struct dp_soc *soc) 4041 { 4042 return 0; 4043 } 4044 4045 static inline QDF_STATUS dp_runtime_init(struct dp_soc *soc) 4046 { 4047 return QDF_STATUS_SUCCESS; 4048 } 4049 4050 static inline void dp_runtime_deinit(void) 4051 { 4052 } 4053 4054 static inline void dp_runtime_pm_mark_last_busy(struct dp_soc *soc) 4055 { 4056 } 4057 #endif 4058 4059 static inline enum QDF_GLOBAL_MODE dp_soc_get_con_mode(struct dp_soc *soc) 4060 { 4061 if (soc->cdp_soc.ol_ops->get_con_mode) 4062 return soc->cdp_soc.ol_ops->get_con_mode(); 4063 4064 return QDF_GLOBAL_MAX_MODE; 4065 } 4066 4067 /* 4068 * dp_pdev_bkp_stats_detach() - detach resources for back pressure stats 4069 * processing 4070 * @pdev: Datapath PDEV handle 4071 * 4072 */ 4073 void dp_pdev_bkp_stats_detach(struct dp_pdev *pdev); 4074 4075 /* 4076 * dp_pdev_bkp_stats_attach() - attach resources for back pressure stats 4077 * processing 4078 * @pdev: Datapath PDEV handle 4079 * 4080 * Return: QDF_STATUS_SUCCESS: Success 4081 * QDF_STATUS_E_NOMEM: Error 4082 */ 4083 4084 QDF_STATUS dp_pdev_bkp_stats_attach(struct dp_pdev *pdev); 4085 4086 /** 4087 * dp_peer_flush_frags() - Flush all fragments for a particular 4088 * peer 4089 * @soc_hdl - data path soc handle 4090 * @vdev_id - vdev id 4091 * @peer_addr - peer mac address 4092 * 4093 * Return: None 4094 */ 4095 void dp_peer_flush_frags(struct cdp_soc_t *soc_hdl, uint8_t vdev_id, 4096 uint8_t *peer_mac); 4097 4098 /** 4099 * dp_soc_reset_mon_intr_mask() - reset mon intr mask 4100 * @soc: pointer to dp_soc handle 4101 * 4102 * Return: 4103 */ 4104 void dp_soc_reset_mon_intr_mask(struct dp_soc *soc); 4105 4106 /** 4107 * dp_txrx_get_soc_stats() - will return cdp_soc_stats 4108 * @soc_hdl: soc handle 4109 * @soc_stats: buffer to hold the values 4110 * 4111 * Return: QDF_STATUS_SUCCESS: Success 4112 * QDF_STATUS_E_FAILURE: Error 4113 */ 4114 QDF_STATUS dp_txrx_get_soc_stats(struct cdp_soc_t *soc_hdl, 4115 struct cdp_soc_stats *soc_stats); 4116 4117 /** 4118 * dp_txrx_get_peer_delay_stats() - to get peer delay stats per TIDs 4119 * @soc: soc handle 4120 * @vdev_id: id of vdev handle 4121 * @peer_mac: mac of DP_PEER handle 4122 * @delay_stats: pointer to delay stats array 4123 * 4124 * Return: QDF_STATUS_SUCCESS: Success 4125 * QDF_STATUS_E_FAILURE: Error 4126 */ 4127 QDF_STATUS 4128 dp_txrx_get_peer_delay_stats(struct cdp_soc_t *soc_hdl, uint8_t vdev_id, 4129 uint8_t *peer_mac, 4130 struct cdp_delay_tid_stats *delay_stats); 4131 4132 /** 4133 * dp_txrx_get_peer_jitter_stats() - to get peer jitter stats per TIDs 4134 * @soc: soc handle 4135 * @pdev_id: id of pdev handle 4136 * @vdev_id: id of vdev handle 4137 * @peer_mac: mac of DP_PEER handle 4138 * @tid_stats: pointer to jitter stats array 4139 * 4140 * Return: QDF_STATUS_SUCCESS: Success 4141 * QDF_STATUS_E_FAILURE: Error 4142 */ 4143 QDF_STATUS 4144 dp_txrx_get_peer_jitter_stats(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, 4145 uint8_t vdev_id, uint8_t *peer_mac, 4146 struct cdp_peer_tid_stats *tid_stats); 4147 4148 /* dp_peer_get_tx_capture_stats - to get peer Tx Capture stats 4149 * @soc_hdl: soc handle 4150 * @vdev_id: id of vdev handle 4151 * @peer_mac: mac of DP_PEER handle 4152 * @stats: pointer to peer tx capture stats 4153 * 4154 * Return: QDF_STATUS_SUCCESS: Success 4155 * QDF_STATUS_E_FAILURE: Error 4156 */ 4157 QDF_STATUS 4158 dp_peer_get_tx_capture_stats(struct cdp_soc_t *soc_hdl, 4159 uint8_t vdev_id, uint8_t *peer_mac, 4160 struct cdp_peer_tx_capture_stats *stats); 4161 4162 /* dp_pdev_get_tx_capture_stats - to get pdev Tx Capture stats 4163 * @soc_hdl: soc handle 4164 * @pdev_id: id of pdev handle 4165 * @stats: pointer to pdev tx capture stats 4166 * 4167 * Return: QDF_STATUS_SUCCESS: Success 4168 * QDF_STATUS_E_FAILURE: Error 4169 */ 4170 QDF_STATUS 4171 dp_pdev_get_tx_capture_stats(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, 4172 struct cdp_pdev_tx_capture_stats *stats); 4173 4174 #ifdef HW_TX_DELAY_STATS_ENABLE 4175 /* 4176 * dp_is_vdev_tx_delay_stats_enabled(): Check if tx delay stats 4177 * is enabled for vdev 4178 * @vdev: dp vdev 4179 * 4180 * Return: true if tx delay stats is enabled for vdev else false 4181 */ 4182 static inline uint8_t dp_is_vdev_tx_delay_stats_enabled(struct dp_vdev *vdev) 4183 { 4184 return vdev->hw_tx_delay_stats_enabled; 4185 } 4186 4187 /* 4188 * dp_pdev_print_tx_delay_stats(): Print vdev tx delay stats 4189 * for pdev 4190 * @soc: dp soc 4191 * 4192 * Return: None 4193 */ 4194 void dp_pdev_print_tx_delay_stats(struct dp_soc *soc); 4195 4196 /** 4197 * dp_pdev_clear_tx_delay_stats() - clear tx delay stats 4198 * @soc: soc handle 4199 * 4200 * Return: None 4201 */ 4202 void dp_pdev_clear_tx_delay_stats(struct dp_soc *soc); 4203 #else 4204 static inline uint8_t dp_is_vdev_tx_delay_stats_enabled(struct dp_vdev *vdev) 4205 { 4206 return 0; 4207 } 4208 4209 static inline void dp_pdev_print_tx_delay_stats(struct dp_soc *soc) 4210 { 4211 } 4212 4213 static inline void dp_pdev_clear_tx_delay_stats(struct dp_soc *soc) 4214 { 4215 } 4216 #endif 4217 4218 static inline void 4219 dp_get_rx_hash_key_bytes(struct cdp_lro_hash_config *lro_hash) 4220 { 4221 qdf_get_random_bytes(lro_hash->toeplitz_hash_ipv4, 4222 (sizeof(lro_hash->toeplitz_hash_ipv4[0]) * 4223 LRO_IPV4_SEED_ARR_SZ)); 4224 qdf_get_random_bytes(lro_hash->toeplitz_hash_ipv6, 4225 (sizeof(lro_hash->toeplitz_hash_ipv6[0]) * 4226 LRO_IPV6_SEED_ARR_SZ)); 4227 } 4228 4229 #ifdef WLAN_TELEMETRY_STATS_SUPPORT 4230 /* 4231 * dp_get_pdev_telemetry_stats- API to get pdev telemetry stats 4232 * @soc_hdl: soc handle 4233 * @pdev_id: id of pdev handle 4234 * @stats: pointer to pdev telemetry stats 4235 * 4236 * Return: QDF_STATUS_SUCCESS: Success 4237 * QDF_STATUS_E_FAILURE: Error 4238 */ 4239 QDF_STATUS 4240 dp_get_pdev_telemetry_stats(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, 4241 struct cdp_pdev_telemetry_stats *stats); 4242 4243 /* 4244 * dp_get_peer_telemetry_stats- API to get peer telemetry stats 4245 * @soc_hdl: soc handle 4246 * @addr: peer mac 4247 * @stats: pointer to peer telemetry stats 4248 * 4249 * Return: QDF_STATUS_SUCCESS: Success 4250 * QDF_STATUS_E_FAILURE: Error 4251 */ 4252 QDF_STATUS 4253 dp_get_peer_telemetry_stats(struct cdp_soc_t *soc_hdl, uint8_t *addr, 4254 struct cdp_peer_telemetry_stats *stats); 4255 4256 /* 4257 * dp_get_peer_deter_stats- API to get peer deterministic stats 4258 * @soc_hdl: soc handle 4259 * @vdev_id: id of vdev handle 4260 * @addr: peer mac 4261 * @stats: pointer to peer deterministic stats 4262 * 4263 * Return: QDF_STATUS_SUCCESS: Success 4264 * QDF_STATUS_E_FAILURE: Error 4265 */ 4266 QDF_STATUS 4267 dp_get_peer_deter_stats(struct cdp_soc_t *soc_hdl, 4268 uint8_t vdev_id, 4269 uint8_t *addr, 4270 struct cdp_peer_deter_stats *stats); 4271 4272 /* 4273 * dp_get_pdev_deter_stats- API to get pdev deterministic stats 4274 * @soc_hdl: soc handle 4275 * @pdev_id: id of pdev handle 4276 * @stats: pointer to pdev deterministic stats 4277 * 4278 * Return: QDF_STATUS_SUCCESS: Success 4279 * QDF_STATUS_E_FAILURE: Error 4280 */ 4281 QDF_STATUS 4282 dp_get_pdev_deter_stats(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, 4283 struct cdp_pdev_deter_stats *stats); 4284 4285 /* 4286 * dp_update_pdev_chan_util_stats- API to update channel utilization stats 4287 * @soc_hdl: soc handle 4288 * @pdev_id: id of pdev handle 4289 * @ch_util: Pointer to channel util stats 4290 * 4291 * Return: QDF_STATUS_SUCCESS: Success 4292 * QDF_STATUS_E_FAILURE: Error 4293 */ 4294 QDF_STATUS 4295 dp_update_pdev_chan_util_stats(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, 4296 struct cdp_pdev_chan_util_stats *ch_util); 4297 #endif /* WLAN_TELEMETRY_STATS_SUPPORT */ 4298 4299 #ifdef CONNECTIVITY_PKTLOG 4300 /* 4301 * dp_tx_send_pktlog() - send tx packet log 4302 * @soc: soc handle 4303 * @pdev: pdev handle 4304 * @tx_desc: TX software descriptor 4305 * @nbuf: nbuf 4306 * @status: status of tx packet 4307 * 4308 * This function is used to send tx packet for logging 4309 * 4310 * Return: None 4311 * 4312 */ 4313 static inline 4314 void dp_tx_send_pktlog(struct dp_soc *soc, struct dp_pdev *pdev, 4315 struct dp_tx_desc_s *tx_desc, 4316 qdf_nbuf_t nbuf, enum qdf_dp_tx_rx_status status) 4317 { 4318 ol_txrx_pktdump_cb packetdump_cb = pdev->dp_tx_packetdump_cb; 4319 4320 if (qdf_unlikely(packetdump_cb) && 4321 dp_tx_frm_std == tx_desc->frm_type) { 4322 packetdump_cb((ol_txrx_soc_handle)soc, pdev->pdev_id, 4323 tx_desc->vdev_id, nbuf, status, QDF_TX_DATA_PKT); 4324 } 4325 } 4326 4327 /* 4328 * dp_rx_send_pktlog() - send rx packet log 4329 * @soc: soc handle 4330 * @pdev: pdev handle 4331 * @nbuf: nbuf 4332 * @status: status of rx packet 4333 * 4334 * This function is used to send rx packet for logging 4335 * 4336 * Return: None 4337 * 4338 */ 4339 static inline 4340 void dp_rx_send_pktlog(struct dp_soc *soc, struct dp_pdev *pdev, 4341 qdf_nbuf_t nbuf, enum qdf_dp_tx_rx_status status) 4342 { 4343 ol_txrx_pktdump_cb packetdump_cb = pdev->dp_rx_packetdump_cb; 4344 4345 if (qdf_unlikely(packetdump_cb)) { 4346 packetdump_cb((ol_txrx_soc_handle)soc, pdev->pdev_id, 4347 QDF_NBUF_CB_RX_VDEV_ID(nbuf), 4348 nbuf, status, QDF_RX_DATA_PKT); 4349 } 4350 } 4351 4352 /* 4353 * dp_rx_err_send_pktlog() - send rx error packet log 4354 * @soc: soc handle 4355 * @pdev: pdev handle 4356 * @mpdu_desc_info: MPDU descriptor info 4357 * @nbuf: nbuf 4358 * @status: status of rx packet 4359 * @set_pktlen: weither to set packet length 4360 * 4361 * This API should only be called when we have not removed 4362 * Rx TLV from head, and head is pointing to rx_tlv 4363 * 4364 * This function is used to send rx packet from error path 4365 * for logging for which rx packet tlv is not removed. 4366 * 4367 * Return: None 4368 * 4369 */ 4370 static inline 4371 void dp_rx_err_send_pktlog(struct dp_soc *soc, struct dp_pdev *pdev, 4372 struct hal_rx_mpdu_desc_info *mpdu_desc_info, 4373 qdf_nbuf_t nbuf, enum qdf_dp_tx_rx_status status, 4374 bool set_pktlen) 4375 { 4376 ol_txrx_pktdump_cb packetdump_cb = pdev->dp_rx_packetdump_cb; 4377 qdf_size_t skip_size; 4378 uint16_t msdu_len, nbuf_len; 4379 uint8_t *rx_tlv_hdr; 4380 struct hal_rx_msdu_metadata msdu_metadata; 4381 4382 if (qdf_unlikely(packetdump_cb)) { 4383 rx_tlv_hdr = qdf_nbuf_data(nbuf); 4384 nbuf_len = hal_rx_msdu_start_msdu_len_get(soc->hal_soc, 4385 rx_tlv_hdr); 4386 hal_rx_msdu_metadata_get(soc->hal_soc, rx_tlv_hdr, 4387 &msdu_metadata); 4388 4389 if (mpdu_desc_info->bar_frame || 4390 (mpdu_desc_info->mpdu_flags & HAL_MPDU_F_FRAGMENT)) 4391 skip_size = soc->rx_pkt_tlv_size; 4392 else 4393 skip_size = soc->rx_pkt_tlv_size + 4394 msdu_metadata.l3_hdr_pad; 4395 4396 if (set_pktlen) { 4397 msdu_len = nbuf_len + skip_size; 4398 qdf_nbuf_set_pktlen(nbuf, qdf_min(msdu_len, 4399 (uint16_t)RX_DATA_BUFFER_SIZE)); 4400 } 4401 4402 qdf_nbuf_pull_head(nbuf, skip_size); 4403 packetdump_cb((ol_txrx_soc_handle)soc, pdev->pdev_id, 4404 QDF_NBUF_CB_RX_VDEV_ID(nbuf), 4405 nbuf, status, QDF_RX_DATA_PKT); 4406 qdf_nbuf_push_head(nbuf, skip_size); 4407 } 4408 } 4409 4410 #else 4411 static inline 4412 void dp_tx_send_pktlog(struct dp_soc *soc, struct dp_pdev *pdev, 4413 struct dp_tx_desc_s *tx_desc, 4414 qdf_nbuf_t nbuf, enum qdf_dp_tx_rx_status status) 4415 { 4416 } 4417 4418 static inline 4419 void dp_rx_send_pktlog(struct dp_soc *soc, struct dp_pdev *pdev, 4420 qdf_nbuf_t nbuf, enum qdf_dp_tx_rx_status status) 4421 { 4422 } 4423 4424 static inline 4425 void dp_rx_err_send_pktlog(struct dp_soc *soc, struct dp_pdev *pdev, 4426 struct hal_rx_mpdu_desc_info *mpdu_desc_info, 4427 qdf_nbuf_t nbuf, enum qdf_dp_tx_rx_status status, 4428 bool set_pktlen) 4429 { 4430 } 4431 #endif 4432 4433 /* 4434 * dp_pdev_update_fast_rx_flag() - Update Fast rx flag for a PDEV 4435 * @soc : Data path soc handle 4436 * @pdev : PDEV handle 4437 * 4438 * return: None 4439 */ 4440 void dp_pdev_update_fast_rx_flag(struct dp_soc *soc, struct dp_pdev *pdev); 4441 4442 #ifdef FEATURE_DIRECT_LINK 4443 /* 4444 * dp_setup_direct_link_refill_ring(): Setup direct link refill ring for pdev 4445 * @soc_hdl: DP SOC handle 4446 * @pdev_id: pdev id 4447 * 4448 * Return: Handle to SRNG 4449 */ 4450 struct dp_srng *dp_setup_direct_link_refill_ring(struct cdp_soc_t *soc_hdl, 4451 uint8_t pdev_id); 4452 4453 /* 4454 * dp_destroy_direct_link_refill_ring(): Destroy direct link refill ring for 4455 * pdev 4456 * @soc_hdl: DP SOC handle 4457 * @pdev_id: pdev id 4458 * 4459 * Return: None 4460 */ 4461 void dp_destroy_direct_link_refill_ring(struct cdp_soc_t *soc_hdl, 4462 uint8_t pdev_id); 4463 #else 4464 static inline 4465 struct dp_srng *dp_setup_direct_link_refill_ring(struct cdp_soc_t *soc_hdl, 4466 uint8_t pdev_id) 4467 { 4468 return NULL; 4469 } 4470 4471 static inline 4472 void dp_destroy_direct_link_refill_ring(struct cdp_soc_t *soc_hdl, 4473 uint8_t pdev_id) 4474 { 4475 } 4476 #endif 4477 4478 #ifdef WLAN_FEATURE_DP_CFG_EVENT_HISTORY 4479 static inline 4480 void dp_cfg_event_record(struct dp_soc *soc, 4481 enum dp_cfg_event_type event, 4482 union dp_cfg_event_desc *cfg_event_desc) 4483 { 4484 struct dp_cfg_event_history *cfg_event_history = 4485 &soc->cfg_event_history; 4486 struct dp_cfg_event *entry; 4487 uint32_t idx; 4488 uint16_t slot; 4489 4490 dp_get_frag_hist_next_atomic_idx(&cfg_event_history->index, &idx, 4491 &slot, 4492 DP_CFG_EVT_HIST_SLOT_SHIFT, 4493 DP_CFG_EVT_HIST_PER_SLOT_MAX, 4494 DP_CFG_EVT_HISTORY_SIZE); 4495 4496 entry = &cfg_event_history->entry[slot][idx]; 4497 4498 entry->timestamp = qdf_get_log_timestamp(); 4499 entry->type = event; 4500 qdf_mem_copy(&entry->event_desc, cfg_event_desc, 4501 sizeof(entry->event_desc)); 4502 } 4503 4504 static inline void 4505 dp_cfg_event_record_vdev_evt(struct dp_soc *soc, enum dp_cfg_event_type event, 4506 struct dp_vdev *vdev) 4507 { 4508 union dp_cfg_event_desc cfg_evt_desc = {0}; 4509 struct dp_vdev_attach_detach_desc *vdev_evt = 4510 &cfg_evt_desc.vdev_evt; 4511 4512 if (qdf_unlikely(event != DP_CFG_EVENT_VDEV_ATTACH && 4513 event != DP_CFG_EVENT_VDEV_UNREF_DEL && 4514 event != DP_CFG_EVENT_VDEV_DETACH)) { 4515 qdf_assert_always(0); 4516 return; 4517 } 4518 4519 vdev_evt->vdev = vdev; 4520 vdev_evt->vdev_id = vdev->vdev_id; 4521 vdev_evt->ref_count = qdf_atomic_read(&vdev->ref_cnt); 4522 vdev_evt->mac_addr = vdev->mac_addr; 4523 4524 dp_cfg_event_record(soc, event, &cfg_evt_desc); 4525 } 4526 4527 static inline void 4528 dp_cfg_event_record_peer_evt(struct dp_soc *soc, enum dp_cfg_event_type event, 4529 struct dp_peer *peer, struct dp_vdev *vdev, 4530 uint8_t is_reuse) 4531 { 4532 union dp_cfg_event_desc cfg_evt_desc = {0}; 4533 struct dp_peer_cmn_ops_desc *peer_evt = &cfg_evt_desc.peer_cmn_evt; 4534 4535 if (qdf_unlikely(event != DP_CFG_EVENT_PEER_CREATE && 4536 event != DP_CFG_EVENT_PEER_DELETE && 4537 event != DP_CFG_EVENT_PEER_UNREF_DEL)) { 4538 qdf_assert_always(0); 4539 return; 4540 } 4541 4542 peer_evt->peer = peer; 4543 peer_evt->vdev = vdev; 4544 peer_evt->vdev_id = vdev->vdev_id; 4545 peer_evt->is_reuse = is_reuse; 4546 peer_evt->peer_ref_count = qdf_atomic_read(&peer->ref_cnt); 4547 peer_evt->vdev_ref_count = qdf_atomic_read(&vdev->ref_cnt); 4548 peer_evt->mac_addr = peer->mac_addr; 4549 peer_evt->vdev_mac_addr = vdev->mac_addr; 4550 4551 dp_cfg_event_record(soc, event, &cfg_evt_desc); 4552 } 4553 4554 static inline void 4555 dp_cfg_event_record_mlo_link_delink_evt(struct dp_soc *soc, 4556 enum dp_cfg_event_type event, 4557 struct dp_peer *mld_peer, 4558 struct dp_peer *link_peer, 4559 uint8_t idx, uint8_t result) 4560 { 4561 union dp_cfg_event_desc cfg_evt_desc = {0}; 4562 struct dp_mlo_add_del_link_desc *mlo_link_delink_evt = 4563 &cfg_evt_desc.mlo_link_delink_evt; 4564 4565 if (qdf_unlikely(event != DP_CFG_EVENT_MLO_ADD_LINK && 4566 event != DP_CFG_EVENT_MLO_DEL_LINK)) { 4567 qdf_assert_always(0); 4568 return; 4569 } 4570 4571 mlo_link_delink_evt->link_peer = link_peer; 4572 mlo_link_delink_evt->mld_peer = mld_peer; 4573 mlo_link_delink_evt->link_mac_addr = link_peer->mac_addr; 4574 mlo_link_delink_evt->mld_mac_addr = mld_peer->mac_addr; 4575 mlo_link_delink_evt->num_links = mld_peer->num_links; 4576 mlo_link_delink_evt->action_result = result; 4577 mlo_link_delink_evt->idx = idx; 4578 4579 dp_cfg_event_record(soc, event, &cfg_evt_desc); 4580 } 4581 4582 static inline void 4583 dp_cfg_event_record_mlo_setup_vdev_update_evt(struct dp_soc *soc, 4584 struct dp_peer *mld_peer, 4585 struct dp_vdev *prev_vdev, 4586 struct dp_vdev *new_vdev) 4587 { 4588 union dp_cfg_event_desc cfg_evt_desc = {0}; 4589 struct dp_mlo_setup_vdev_update_desc *vdev_update_evt = 4590 &cfg_evt_desc.mlo_setup_vdev_update; 4591 4592 vdev_update_evt->mld_peer = mld_peer; 4593 vdev_update_evt->prev_vdev = prev_vdev; 4594 vdev_update_evt->new_vdev = new_vdev; 4595 4596 dp_cfg_event_record(soc, DP_CFG_EVENT_MLO_SETUP_VDEV_UPDATE, 4597 &cfg_evt_desc); 4598 } 4599 4600 static inline void 4601 dp_cfg_event_record_peer_map_unmap_evt(struct dp_soc *soc, 4602 enum dp_cfg_event_type event, 4603 struct dp_peer *peer, 4604 uint8_t *mac_addr, 4605 uint8_t is_ml_peer, 4606 uint16_t peer_id, uint16_t ml_peer_id, 4607 uint16_t hw_peer_id, uint8_t vdev_id) 4608 { 4609 union dp_cfg_event_desc cfg_evt_desc = {0}; 4610 struct dp_rx_peer_map_unmap_desc *peer_map_unmap_evt = 4611 &cfg_evt_desc.peer_map_unmap_evt; 4612 4613 if (qdf_unlikely(event != DP_CFG_EVENT_PEER_MAP && 4614 event != DP_CFG_EVENT_PEER_UNMAP && 4615 event != DP_CFG_EVENT_MLO_PEER_MAP && 4616 event != DP_CFG_EVENT_MLO_PEER_UNMAP)) { 4617 qdf_assert_always(0); 4618 return; 4619 } 4620 4621 peer_map_unmap_evt->peer_id = peer_id; 4622 peer_map_unmap_evt->ml_peer_id = ml_peer_id; 4623 peer_map_unmap_evt->hw_peer_id = hw_peer_id; 4624 peer_map_unmap_evt->vdev_id = vdev_id; 4625 /* Peer may be NULL at times, but its not an issue. */ 4626 peer_map_unmap_evt->peer = peer; 4627 peer_map_unmap_evt->is_ml_peer = is_ml_peer; 4628 qdf_mem_copy(&peer_map_unmap_evt->mac_addr.raw, mac_addr, 4629 QDF_MAC_ADDR_SIZE); 4630 4631 dp_cfg_event_record(soc, event, &cfg_evt_desc); 4632 } 4633 4634 static inline void 4635 dp_cfg_event_record_peer_setup_evt(struct dp_soc *soc, 4636 enum dp_cfg_event_type event, 4637 struct dp_peer *peer, 4638 struct dp_vdev *vdev, 4639 uint8_t vdev_id, 4640 struct cdp_peer_setup_info *peer_setup_info) 4641 { 4642 union dp_cfg_event_desc cfg_evt_desc = {0}; 4643 struct dp_peer_setup_desc *peer_setup_evt = 4644 &cfg_evt_desc.peer_setup_evt; 4645 4646 if (qdf_unlikely(event != DP_CFG_EVENT_PEER_SETUP && 4647 event != DP_CFG_EVENT_MLO_SETUP)) { 4648 qdf_assert_always(0); 4649 return; 4650 } 4651 4652 peer_setup_evt->peer = peer; 4653 peer_setup_evt->vdev = vdev; 4654 if (vdev) 4655 peer_setup_evt->vdev_ref_count = qdf_atomic_read(&vdev->ref_cnt); 4656 peer_setup_evt->mac_addr = peer->mac_addr; 4657 peer_setup_evt->vdev_id = vdev_id; 4658 if (peer_setup_info) { 4659 peer_setup_evt->is_first_link = peer_setup_info->is_first_link; 4660 peer_setup_evt->is_primary_link = peer_setup_info->is_primary_link; 4661 qdf_mem_copy(peer_setup_evt->mld_mac_addr.raw, 4662 peer_setup_info->mld_peer_mac, 4663 QDF_MAC_ADDR_SIZE); 4664 } 4665 4666 dp_cfg_event_record(soc, event, &cfg_evt_desc); 4667 } 4668 #else 4669 4670 static inline void 4671 dp_cfg_event_record_vdev_evt(struct dp_soc *soc, enum dp_cfg_event_type event, 4672 struct dp_vdev *vdev) 4673 { 4674 } 4675 4676 static inline void 4677 dp_cfg_event_record_peer_evt(struct dp_soc *soc, enum dp_cfg_event_type event, 4678 struct dp_peer *peer, struct dp_vdev *vdev, 4679 uint8_t is_reuse) 4680 { 4681 } 4682 4683 static inline void 4684 dp_cfg_event_record_mlo_link_delink_evt(struct dp_soc *soc, 4685 enum dp_cfg_event_type event, 4686 struct dp_peer *mld_peer, 4687 struct dp_peer *link_peer, 4688 uint8_t idx, uint8_t result) 4689 { 4690 } 4691 4692 static inline void 4693 dp_cfg_event_record_mlo_setup_vdev_update_evt(struct dp_soc *soc, 4694 struct dp_peer *mld_peer, 4695 struct dp_vdev *prev_vdev, 4696 struct dp_vdev *new_vdev) 4697 { 4698 } 4699 4700 static inline void 4701 dp_cfg_event_record_peer_map_unmap_evt(struct dp_soc *soc, 4702 enum dp_cfg_event_type event, 4703 struct dp_peer *peer, 4704 uint8_t *mac_addr, 4705 uint8_t is_ml_peer, 4706 uint16_t peer_id, uint16_t ml_peer_id, 4707 uint16_t hw_peer_id, uint8_t vdev_id) 4708 { 4709 } 4710 4711 static inline void 4712 dp_cfg_event_record_peer_setup_evt(struct dp_soc *soc, 4713 enum dp_cfg_event_type event, 4714 struct dp_peer *peer, 4715 struct dp_vdev *vdev, 4716 uint8_t vdev_id, 4717 struct cdp_peer_setup_info *peer_setup_info) 4718 { 4719 } 4720 #endif 4721 4722 /* 4723 * dp_soc_interrupt_detach() - Deregister any allocations done for interrupts 4724 * @txrx_soc: DP SOC handle 4725 * 4726 * Return: none 4727 */ 4728 void dp_soc_interrupt_detach(struct cdp_soc_t *txrx_soc); 4729 4730 void dp_get_peer_stats(struct dp_peer *peer, 4731 struct cdp_peer_stats *peer_stats); 4732 4733 #endif /* #ifndef _DP_INTERNAL_H_ */ 4734