1 /* 2 * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved. 3 * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved. 4 * 5 * Permission to use, copy, modify, and/or distribute this software for 6 * any purpose with or without fee is hereby granted, provided that the 7 * above copyright notice and this permission notice appear in all 8 * copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 11 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 12 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 13 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 14 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 15 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 16 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 17 * PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20 #ifndef _DP_INTERNAL_H_ 21 #define _DP_INTERNAL_H_ 22 23 #include "dp_types.h" 24 #include "dp_htt.h" 25 #include "dp_rx_tid.h" 26 27 #define RX_BUFFER_SIZE_PKTLOG_LITE 1024 28 29 #define DP_PEER_WDS_COUNT_INVALID UINT_MAX 30 31 #define DP_BLOCKMEM_SIZE 4096 32 #define WBM2_SW_PPE_REL_RING_ID 6 33 #define WBM2_SW_PPE_REL_MAP_ID 11 34 #define DP_TX_PPEDS_POOL_ID 0xF 35 36 /* Alignment for consistent memory for DP rings*/ 37 #define DP_RING_BASE_ALIGN 32 38 39 #define DP_RSSI_INVAL 0x80 40 #define DP_RSSI_AVG_WEIGHT 2 41 /* 42 * Formula to derive avg_rssi is taken from wifi2.o firmware 43 */ 44 #define DP_GET_AVG_RSSI(avg_rssi, last_rssi) \ 45 (((avg_rssi) - (((uint8_t)(avg_rssi)) >> DP_RSSI_AVG_WEIGHT)) \ 46 + ((((uint8_t)(last_rssi)) >> DP_RSSI_AVG_WEIGHT))) 47 48 /* Macro For NYSM value received in VHT TLV */ 49 #define VHT_SGI_NYSM 3 50 51 #define INVALID_WBM_RING_NUM 0xF 52 53 #ifdef FEATURE_DIRECT_LINK 54 #define DIRECT_LINK_REFILL_RING_ENTRIES 64 55 #ifdef IPA_OFFLOAD 56 #ifdef IPA_WDI3_VLAN_SUPPORT 57 #define DIRECT_LINK_REFILL_RING_IDX 4 58 #else 59 #define DIRECT_LINK_REFILL_RING_IDX 3 60 #endif 61 #else 62 #define DIRECT_LINK_REFILL_RING_IDX 2 63 #endif 64 #endif 65 66 #define DP_MAX_VLAN_IDS 4096 67 #define DP_VLAN_UNTAGGED 0 68 #define DP_VLAN_TAGGED_MULTICAST 1 69 #define DP_VLAN_TAGGED_UNICAST 2 70 71 /** 72 * struct htt_dbgfs_cfg - structure to maintain required htt data 73 * @msg_word: htt msg sent to upper layer 74 * @m: qdf debugfs file pointer 75 */ 76 struct htt_dbgfs_cfg { 77 uint32_t *msg_word; 78 qdf_debugfs_file_t m; 79 }; 80 81 /* Cookie MSB bits assigned for different use case. 82 * Note: User can't use last 3 bits, as it is reserved for pdev_id. 83 * If in future number of pdev are more than 3. 84 */ 85 /* Reserve for default case */ 86 #define DBG_STATS_COOKIE_DEFAULT 0x0 87 88 /* Reserve for DP Stats: 3rd bit */ 89 #define DBG_STATS_COOKIE_DP_STATS BIT(3) 90 91 /* Reserve for HTT Stats debugfs support: 4th bit */ 92 #define DBG_STATS_COOKIE_HTT_DBGFS BIT(4) 93 94 /*Reserve for HTT Stats debugfs support: 5th bit */ 95 #define DBG_SYSFS_STATS_COOKIE BIT(5) 96 97 /* Reserve for HTT Stats OBSS PD support: 6th bit */ 98 #define DBG_STATS_COOKIE_HTT_OBSS BIT(6) 99 100 /* 101 * Bitmap of HTT PPDU TLV types for Default mode 102 */ 103 #define HTT_PPDU_DEFAULT_TLV_BITMAP \ 104 (1 << HTT_PPDU_STATS_COMMON_TLV) | \ 105 (1 << HTT_PPDU_STATS_USR_COMMON_TLV) | \ 106 (1 << HTT_PPDU_STATS_USR_RATE_TLV) | \ 107 (1 << HTT_PPDU_STATS_SCH_CMD_STATUS_TLV) | \ 108 (1 << HTT_PPDU_STATS_USR_COMPLTN_COMMON_TLV) | \ 109 (1 << HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV) 110 111 /* PPDU STATS CFG */ 112 #define DP_PPDU_STATS_CFG_ALL 0xFFFF 113 114 /* PPDU stats mask sent to FW to enable enhanced stats */ 115 #define DP_PPDU_STATS_CFG_ENH_STATS \ 116 (HTT_PPDU_DEFAULT_TLV_BITMAP) | \ 117 (1 << HTT_PPDU_STATS_USR_COMPLTN_FLUSH_TLV) | \ 118 (1 << HTT_PPDU_STATS_USR_COMMON_ARRAY_TLV) | \ 119 (1 << HTT_PPDU_STATS_USERS_INFO_TLV) 120 121 /* PPDU stats mask sent to FW to support debug sniffer feature */ 122 #define DP_PPDU_STATS_CFG_SNIFFER \ 123 (HTT_PPDU_DEFAULT_TLV_BITMAP) | \ 124 (1 << HTT_PPDU_STATS_USR_MPDU_ENQ_BITMAP_64_TLV) | \ 125 (1 << HTT_PPDU_STATS_USR_MPDU_ENQ_BITMAP_256_TLV) | \ 126 (1 << HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_64_TLV) | \ 127 (1 << HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_256_TLV) | \ 128 (1 << HTT_PPDU_STATS_USR_COMPLTN_FLUSH_TLV) | \ 129 (1 << HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_256_TLV) | \ 130 (1 << HTT_PPDU_STATS_USR_COMPLTN_FLUSH_TLV) | \ 131 (1 << HTT_PPDU_STATS_USR_COMMON_ARRAY_TLV) | \ 132 (1 << HTT_PPDU_STATS_TX_MGMTCTRL_PAYLOAD_TLV) | \ 133 (1 << HTT_PPDU_STATS_USERS_INFO_TLV) 134 135 /* PPDU stats mask sent to FW to support BPR feature*/ 136 #define DP_PPDU_STATS_CFG_BPR \ 137 (1 << HTT_PPDU_STATS_TX_MGMTCTRL_PAYLOAD_TLV) | \ 138 (1 << HTT_PPDU_STATS_USERS_INFO_TLV) 139 140 /* PPDU stats mask sent to FW to support BPR and enhanced stats feature */ 141 #define DP_PPDU_STATS_CFG_BPR_ENH (DP_PPDU_STATS_CFG_BPR | \ 142 DP_PPDU_STATS_CFG_ENH_STATS) 143 /* PPDU stats mask sent to FW to support BPR and pcktlog stats feature */ 144 #define DP_PPDU_STATS_CFG_BPR_PKTLOG (DP_PPDU_STATS_CFG_BPR | \ 145 DP_PPDU_TXLITE_STATS_BITMASK_CFG) 146 147 /* 148 * Bitmap of HTT PPDU delayed ba TLV types for Default mode 149 */ 150 #define HTT_PPDU_DELAYED_BA_TLV_BITMAP \ 151 (1 << HTT_PPDU_STATS_COMMON_TLV) | \ 152 (1 << HTT_PPDU_STATS_USR_COMMON_TLV) | \ 153 (1 << HTT_PPDU_STATS_USR_RATE_TLV) 154 155 /* 156 * Bitmap of HTT PPDU TLV types for Delayed BA 157 */ 158 #define HTT_PPDU_STATUS_TLV_BITMAP \ 159 (1 << HTT_PPDU_STATS_COMMON_TLV) | \ 160 (1 << HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV) 161 162 /* 163 * Bitmap of HTT PPDU TLV types for Sniffer mode bitmap 64 164 */ 165 #define HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP_64 \ 166 ((1 << HTT_PPDU_STATS_COMMON_TLV) | \ 167 (1 << HTT_PPDU_STATS_USR_COMMON_TLV) | \ 168 (1 << HTT_PPDU_STATS_USR_RATE_TLV) | \ 169 (1 << HTT_PPDU_STATS_SCH_CMD_STATUS_TLV) | \ 170 (1 << HTT_PPDU_STATS_USR_COMPLTN_COMMON_TLV) | \ 171 (1 << HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV) | \ 172 (1 << HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_64_TLV) | \ 173 (1 << HTT_PPDU_STATS_USR_MPDU_ENQ_BITMAP_64_TLV)) 174 175 /* 176 * Bitmap of HTT PPDU TLV types for Sniffer mode bitmap 256 177 */ 178 #define HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP_256 \ 179 ((1 << HTT_PPDU_STATS_COMMON_TLV) | \ 180 (1 << HTT_PPDU_STATS_USR_COMMON_TLV) | \ 181 (1 << HTT_PPDU_STATS_USR_RATE_TLV) | \ 182 (1 << HTT_PPDU_STATS_SCH_CMD_STATUS_TLV) | \ 183 (1 << HTT_PPDU_STATS_USR_COMPLTN_COMMON_TLV) | \ 184 (1 << HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV) | \ 185 (1 << HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_256_TLV) | \ 186 (1 << HTT_PPDU_STATS_USR_MPDU_ENQ_BITMAP_256_TLV)) 187 188 static const enum cdp_packet_type hal_2_dp_pkt_type_map[HAL_DOT11_MAX] = { 189 [HAL_DOT11A] = DOT11_A, 190 [HAL_DOT11B] = DOT11_B, 191 [HAL_DOT11N_MM] = DOT11_N, 192 [HAL_DOT11AC] = DOT11_AC, 193 [HAL_DOT11AX] = DOT11_AX, 194 [HAL_DOT11BA] = DOT11_MAX, 195 #ifdef WLAN_FEATURE_11BE 196 [HAL_DOT11BE] = DOT11_BE, 197 #else 198 [HAL_DOT11BE] = DOT11_MAX, 199 #endif 200 [HAL_DOT11AZ] = DOT11_MAX, 201 [HAL_DOT11N_GF] = DOT11_MAX, 202 }; 203 204 #ifdef GLOBAL_ASSERT_AVOIDANCE 205 #define dp_assert_always_internal_stat(_expr, _handle, _field) \ 206 (qdf_unlikely(!(_expr)) ? ((_handle)->stats._field++, true) : false) 207 208 #define dp_assert_always_internal_ds_stat(_expr, _handle, _field) \ 209 ((_handle)->ppeds_stats._field++) 210 211 static inline bool dp_assert_always_internal(bool expr) 212 { 213 return !expr; 214 } 215 #else 216 static inline bool __dp_assert_always_internal(bool expr) 217 { 218 qdf_assert_always(expr); 219 220 return false; 221 } 222 223 #define dp_assert_always_internal(_expr) __dp_assert_always_internal(_expr) 224 225 #define dp_assert_always_internal_stat(_expr, _handle, _field) \ 226 dp_assert_always_internal(_expr) 227 228 #define dp_assert_always_internal_ds_stat(_expr, _handle, _field) \ 229 dp_assert_always_internal(_expr) 230 #endif 231 232 #ifdef WLAN_FEATURE_11BE 233 /** 234 * dp_get_mcs_array_index_by_pkt_type_mcs() - get the destination mcs index 235 * in array 236 * @pkt_type: host SW pkt type 237 * @mcs: mcs value for TX/RX rate 238 * 239 * Return: succeeded - valid index in mcs array 240 * fail - same value as MCS_MAX 241 */ 242 static inline uint8_t 243 dp_get_mcs_array_index_by_pkt_type_mcs(uint32_t pkt_type, uint32_t mcs) 244 { 245 uint8_t dst_mcs_idx = MCS_INVALID_ARRAY_INDEX; 246 247 switch (pkt_type) { 248 case DOT11_A: 249 dst_mcs_idx = 250 mcs >= MAX_MCS_11A ? (MAX_MCS - 1) : mcs; 251 break; 252 case DOT11_B: 253 dst_mcs_idx = 254 mcs >= MAX_MCS_11B ? (MAX_MCS - 1) : mcs; 255 break; 256 case DOT11_N: 257 dst_mcs_idx = 258 mcs >= MAX_MCS_11N ? (MAX_MCS - 1) : mcs; 259 break; 260 case DOT11_AC: 261 dst_mcs_idx = 262 mcs >= MAX_MCS_11AC ? (MAX_MCS - 1) : mcs; 263 break; 264 case DOT11_AX: 265 dst_mcs_idx = 266 mcs >= MAX_MCS_11AX ? (MAX_MCS - 1) : mcs; 267 break; 268 case DOT11_BE: 269 dst_mcs_idx = 270 mcs >= MAX_MCS_11BE ? (MAX_MCS - 1) : mcs; 271 break; 272 default: 273 break; 274 } 275 276 return dst_mcs_idx; 277 } 278 #else 279 static inline uint8_t 280 dp_get_mcs_array_index_by_pkt_type_mcs(uint32_t pkt_type, uint32_t mcs) 281 { 282 uint8_t dst_mcs_idx = MCS_INVALID_ARRAY_INDEX; 283 284 switch (pkt_type) { 285 case DOT11_A: 286 dst_mcs_idx = 287 mcs >= MAX_MCS_11A ? (MAX_MCS - 1) : mcs; 288 break; 289 case DOT11_B: 290 dst_mcs_idx = 291 mcs >= MAX_MCS_11B ? (MAX_MCS - 1) : mcs; 292 break; 293 case DOT11_N: 294 dst_mcs_idx = 295 mcs >= MAX_MCS_11N ? (MAX_MCS - 1) : mcs; 296 break; 297 case DOT11_AC: 298 dst_mcs_idx = 299 mcs >= MAX_MCS_11AC ? (MAX_MCS - 1) : mcs; 300 break; 301 case DOT11_AX: 302 dst_mcs_idx = 303 mcs >= MAX_MCS_11AX ? (MAX_MCS - 1) : mcs; 304 break; 305 default: 306 break; 307 } 308 309 return dst_mcs_idx; 310 } 311 #endif 312 313 #ifdef WIFI_MONITOR_SUPPORT 314 QDF_STATUS dp_mon_soc_attach(struct dp_soc *soc); 315 QDF_STATUS dp_mon_soc_detach(struct dp_soc *soc); 316 #else 317 static inline 318 QDF_STATUS dp_mon_soc_attach(struct dp_soc *soc) 319 { 320 return QDF_STATUS_SUCCESS; 321 } 322 323 static inline 324 QDF_STATUS dp_mon_soc_detach(struct dp_soc *soc) 325 { 326 return QDF_STATUS_SUCCESS; 327 } 328 #endif 329 330 /** 331 * dp_rx_err_match_dhost() - function to check whether dest-mac is correct 332 * @eh: Ethernet header of incoming packet 333 * @vdev: dp_vdev object of the VAP on which this data packet is received 334 * 335 * Return: 1 if the destination mac is correct, 336 * 0 if this frame is not correctly destined to this VAP/MLD 337 */ 338 int dp_rx_err_match_dhost(qdf_ether_header_t *eh, struct dp_vdev *vdev); 339 340 #ifdef MONITOR_MODULARIZED_ENABLE 341 static inline bool dp_monitor_modularized_enable(void) 342 { 343 return TRUE; 344 } 345 346 static inline QDF_STATUS 347 dp_mon_soc_attach_wrapper(struct dp_soc *soc) { return QDF_STATUS_SUCCESS; } 348 349 static inline QDF_STATUS 350 dp_mon_soc_detach_wrapper(struct dp_soc *soc) { return QDF_STATUS_SUCCESS; } 351 #else 352 static inline bool dp_monitor_modularized_enable(void) 353 { 354 return FALSE; 355 } 356 357 static inline QDF_STATUS dp_mon_soc_attach_wrapper(struct dp_soc *soc) 358 { 359 return dp_mon_soc_attach(soc); 360 } 361 362 static inline QDF_STATUS dp_mon_soc_detach_wrapper(struct dp_soc *soc) 363 { 364 return dp_mon_soc_detach(soc); 365 } 366 #endif 367 368 #ifndef WIFI_MONITOR_SUPPORT 369 #define MON_BUF_MIN_ENTRIES 64 370 371 static inline QDF_STATUS dp_monitor_pdev_attach(struct dp_pdev *pdev) 372 { 373 return QDF_STATUS_SUCCESS; 374 } 375 376 static inline QDF_STATUS dp_monitor_pdev_detach(struct dp_pdev *pdev) 377 { 378 return QDF_STATUS_SUCCESS; 379 } 380 381 static inline QDF_STATUS dp_monitor_vdev_attach(struct dp_vdev *vdev) 382 { 383 return QDF_STATUS_E_FAILURE; 384 } 385 386 static inline QDF_STATUS dp_monitor_vdev_detach(struct dp_vdev *vdev) 387 { 388 return QDF_STATUS_E_FAILURE; 389 } 390 391 static inline QDF_STATUS dp_monitor_peer_attach(struct dp_soc *soc, 392 struct dp_peer *peer) 393 { 394 return QDF_STATUS_SUCCESS; 395 } 396 397 static inline QDF_STATUS dp_monitor_peer_detach(struct dp_soc *soc, 398 struct dp_peer *peer) 399 { 400 return QDF_STATUS_E_FAILURE; 401 } 402 403 static inline struct cdp_peer_rate_stats_ctx* 404 dp_monitor_peer_get_peerstats_ctx(struct dp_soc *soc, struct dp_peer *peer) 405 { 406 return NULL; 407 } 408 409 static inline 410 void dp_monitor_peer_reset_stats(struct dp_soc *soc, struct dp_peer *peer) 411 { 412 } 413 414 static inline 415 void dp_monitor_peer_get_stats(struct dp_soc *soc, struct dp_peer *peer, 416 void *arg, enum cdp_stat_update_type type) 417 { 418 } 419 420 static inline 421 void dp_monitor_invalid_peer_update_pdev_stats(struct dp_soc *soc, 422 struct dp_pdev *pdev) 423 { 424 } 425 426 static inline 427 QDF_STATUS dp_monitor_peer_get_stats_param(struct dp_soc *soc, 428 struct dp_peer *peer, 429 enum cdp_peer_stats_type type, 430 cdp_peer_stats_param_t *buf) 431 { 432 return QDF_STATUS_E_FAILURE; 433 } 434 435 static inline QDF_STATUS dp_monitor_pdev_init(struct dp_pdev *pdev) 436 { 437 return QDF_STATUS_SUCCESS; 438 } 439 440 static inline QDF_STATUS dp_monitor_pdev_deinit(struct dp_pdev *pdev) 441 { 442 return QDF_STATUS_SUCCESS; 443 } 444 445 static inline QDF_STATUS dp_monitor_soc_cfg_init(struct dp_soc *soc) 446 { 447 return QDF_STATUS_SUCCESS; 448 } 449 450 static inline QDF_STATUS dp_monitor_config_debug_sniffer(struct dp_pdev *pdev, 451 int val) 452 { 453 return QDF_STATUS_E_FAILURE; 454 } 455 456 static inline void dp_monitor_flush_rings(struct dp_soc *soc) 457 { 458 } 459 460 static inline QDF_STATUS dp_monitor_htt_srng_setup(struct dp_soc *soc, 461 struct dp_pdev *pdev, 462 int mac_id, 463 int mac_for_pdev) 464 { 465 return QDF_STATUS_SUCCESS; 466 } 467 468 static inline void dp_monitor_service_mon_rings(struct dp_soc *soc, 469 uint32_t quota) 470 { 471 } 472 473 static inline 474 uint32_t dp_monitor_process(struct dp_soc *soc, struct dp_intr *int_ctx, 475 uint32_t mac_id, uint32_t quota) 476 { 477 return 0; 478 } 479 480 static inline 481 uint32_t dp_monitor_drop_packets_for_mac(struct dp_pdev *pdev, 482 uint32_t mac_id, uint32_t quota) 483 { 484 return 0; 485 } 486 487 static inline void dp_monitor_peer_tx_init(struct dp_pdev *pdev, 488 struct dp_peer *peer) 489 { 490 } 491 492 static inline void dp_monitor_peer_tx_cleanup(struct dp_vdev *vdev, 493 struct dp_peer *peer) 494 { 495 } 496 497 static inline 498 void dp_monitor_peer_tid_peer_id_update(struct dp_soc *soc, 499 struct dp_peer *peer, 500 uint16_t peer_id) 501 { 502 } 503 504 static inline void dp_monitor_tx_ppdu_stats_attach(struct dp_pdev *pdev) 505 { 506 } 507 508 static inline void dp_monitor_tx_ppdu_stats_detach(struct dp_pdev *pdev) 509 { 510 } 511 512 static inline 513 QDF_STATUS dp_monitor_tx_capture_debugfs_init(struct dp_pdev *pdev) 514 { 515 return QDF_STATUS_SUCCESS; 516 } 517 518 static inline void dp_monitor_peer_tx_capture_filter_check(struct dp_pdev *pdev, 519 struct dp_peer *peer) 520 { 521 } 522 523 static inline 524 QDF_STATUS dp_monitor_tx_add_to_comp_queue(struct dp_soc *soc, 525 struct dp_tx_desc_s *desc, 526 struct hal_tx_completion_status *ts, 527 uint16_t peer_id) 528 { 529 return QDF_STATUS_E_FAILURE; 530 } 531 532 static inline 533 QDF_STATUS monitor_update_msdu_to_list(struct dp_soc *soc, 534 struct dp_pdev *pdev, 535 struct dp_peer *peer, 536 struct hal_tx_completion_status *ts, 537 qdf_nbuf_t netbuf) 538 { 539 return QDF_STATUS_E_FAILURE; 540 } 541 542 static inline bool dp_monitor_ppdu_stats_ind_handler(struct htt_soc *soc, 543 uint32_t *msg_word, 544 qdf_nbuf_t htt_t2h_msg) 545 { 546 return true; 547 } 548 549 static inline QDF_STATUS dp_monitor_htt_ppdu_stats_attach(struct dp_pdev *pdev) 550 { 551 return QDF_STATUS_SUCCESS; 552 } 553 554 static inline void dp_monitor_htt_ppdu_stats_detach(struct dp_pdev *pdev) 555 { 556 } 557 558 static inline void dp_monitor_print_pdev_rx_mon_stats(struct dp_pdev *pdev) 559 { 560 } 561 562 static inline QDF_STATUS dp_monitor_config_enh_tx_capture(struct dp_pdev *pdev, 563 uint32_t val) 564 { 565 return QDF_STATUS_E_INVAL; 566 } 567 568 static inline QDF_STATUS dp_monitor_tx_peer_filter(struct dp_pdev *pdev, 569 struct dp_peer *peer, 570 uint8_t is_tx_pkt_cap_enable, 571 uint8_t *peer_mac) 572 { 573 return QDF_STATUS_E_INVAL; 574 } 575 576 static inline QDF_STATUS dp_monitor_config_enh_rx_capture(struct dp_pdev *pdev, 577 uint32_t val) 578 { 579 return QDF_STATUS_E_INVAL; 580 } 581 582 static inline 583 QDF_STATUS dp_monitor_set_bpr_enable(struct dp_pdev *pdev, uint32_t val) 584 { 585 return QDF_STATUS_E_FAILURE; 586 } 587 588 static inline 589 int dp_monitor_set_filter_neigh_peers(struct dp_pdev *pdev, bool val) 590 { 591 return 0; 592 } 593 594 static inline 595 void dp_monitor_set_atf_stats_enable(struct dp_pdev *pdev, bool value) 596 { 597 } 598 599 static inline 600 void dp_monitor_set_bsscolor(struct dp_pdev *pdev, uint8_t bsscolor) 601 { 602 } 603 604 static inline 605 bool dp_monitor_pdev_get_filter_mcast_data(struct cdp_pdev *pdev_handle) 606 { 607 return false; 608 } 609 610 static inline 611 bool dp_monitor_pdev_get_filter_non_data(struct cdp_pdev *pdev_handle) 612 { 613 return false; 614 } 615 616 static inline 617 bool dp_monitor_pdev_get_filter_ucast_data(struct cdp_pdev *pdev_handle) 618 { 619 return false; 620 } 621 622 static inline 623 int dp_monitor_set_pktlog_wifi3(struct dp_pdev *pdev, uint32_t event, 624 bool enable) 625 { 626 return 0; 627 } 628 629 static inline void dp_monitor_pktlogmod_exit(struct dp_pdev *pdev) 630 { 631 } 632 633 static inline 634 QDF_STATUS dp_monitor_vdev_set_monitor_mode_buf_rings(struct dp_pdev *pdev) 635 { 636 return QDF_STATUS_E_FAILURE; 637 } 638 639 static inline 640 void dp_monitor_neighbour_peers_detach(struct dp_pdev *pdev) 641 { 642 } 643 644 static inline QDF_STATUS dp_monitor_filter_neighbour_peer(struct dp_pdev *pdev, 645 uint8_t *rx_pkt_hdr) 646 { 647 return QDF_STATUS_E_FAILURE; 648 } 649 650 static inline void dp_monitor_print_pdev_tx_capture_stats(struct dp_pdev *pdev) 651 { 652 } 653 654 static inline 655 void dp_monitor_reap_timer_init(struct dp_soc *soc) 656 { 657 } 658 659 static inline 660 void dp_monitor_reap_timer_deinit(struct dp_soc *soc) 661 { 662 } 663 664 static inline 665 bool dp_monitor_reap_timer_start(struct dp_soc *soc, 666 enum cdp_mon_reap_source source) 667 { 668 return false; 669 } 670 671 static inline 672 bool dp_monitor_reap_timer_stop(struct dp_soc *soc, 673 enum cdp_mon_reap_source source) 674 { 675 return false; 676 } 677 678 static inline void 679 dp_monitor_reap_timer_suspend(struct dp_soc *soc) 680 { 681 } 682 683 static inline 684 void dp_monitor_vdev_timer_init(struct dp_soc *soc) 685 { 686 } 687 688 static inline 689 void dp_monitor_vdev_timer_deinit(struct dp_soc *soc) 690 { 691 } 692 693 static inline 694 void dp_monitor_vdev_timer_start(struct dp_soc *soc) 695 { 696 } 697 698 static inline 699 bool dp_monitor_vdev_timer_stop(struct dp_soc *soc) 700 { 701 return false; 702 } 703 704 static inline struct qdf_mem_multi_page_t* 705 dp_monitor_get_link_desc_pages(struct dp_soc *soc, uint32_t mac_id) 706 { 707 return NULL; 708 } 709 710 static inline struct dp_srng* 711 dp_monitor_get_link_desc_ring(struct dp_soc *soc, uint32_t mac_id) 712 { 713 return NULL; 714 } 715 716 static inline uint32_t 717 dp_monitor_get_num_link_desc_ring_entries(struct dp_soc *soc) 718 { 719 return 0; 720 } 721 722 static inline uint32_t * 723 dp_monitor_get_total_link_descs(struct dp_soc *soc, uint32_t mac_id) 724 { 725 return NULL; 726 } 727 728 static inline QDF_STATUS dp_monitor_drop_inv_peer_pkts(struct dp_vdev *vdev) 729 { 730 return QDF_STATUS_E_FAILURE; 731 } 732 733 static inline bool dp_is_enable_reap_timer_non_pkt(struct dp_pdev *pdev) 734 { 735 return false; 736 } 737 738 static inline void dp_monitor_vdev_register_osif(struct dp_vdev *vdev, 739 struct ol_txrx_ops *txrx_ops) 740 { 741 } 742 743 static inline bool dp_monitor_is_vdev_timer_running(struct dp_soc *soc) 744 { 745 return false; 746 } 747 748 static inline 749 void dp_monitor_pdev_set_mon_vdev(struct dp_vdev *vdev) 750 { 751 } 752 753 static inline void dp_monitor_vdev_delete(struct dp_soc *soc, 754 struct dp_vdev *vdev) 755 { 756 } 757 758 static inline void dp_peer_ppdu_delayed_ba_init(struct dp_peer *peer) 759 { 760 } 761 762 static inline void dp_monitor_neighbour_peer_add_ast(struct dp_pdev *pdev, 763 struct dp_peer *ta_peer, 764 uint8_t *mac_addr, 765 qdf_nbuf_t nbuf, 766 uint32_t flags) 767 { 768 } 769 770 static inline void 771 dp_monitor_set_chan_band(struct dp_pdev *pdev, enum reg_wifi_band chan_band) 772 { 773 } 774 775 static inline void 776 dp_monitor_set_chan_freq(struct dp_pdev *pdev, qdf_freq_t chan_freq) 777 { 778 } 779 780 static inline void dp_monitor_set_chan_num(struct dp_pdev *pdev, int chan_num) 781 { 782 } 783 784 static inline bool dp_monitor_is_enable_mcopy_mode(struct dp_pdev *pdev) 785 { 786 return false; 787 } 788 789 static inline 790 void dp_monitor_neighbour_peer_list_remove(struct dp_pdev *pdev, 791 struct dp_vdev *vdev, 792 struct dp_neighbour_peer *peer) 793 { 794 } 795 796 static inline bool dp_monitor_is_chan_band_known(struct dp_pdev *pdev) 797 { 798 return false; 799 } 800 801 static inline enum reg_wifi_band 802 dp_monitor_get_chan_band(struct dp_pdev *pdev) 803 { 804 return 0; 805 } 806 807 static inline int 808 dp_monitor_get_chan_num(struct dp_pdev *pdev) 809 { 810 return 0; 811 } 812 813 static inline qdf_freq_t 814 dp_monitor_get_chan_freq(struct dp_pdev *pdev) 815 { 816 return 0; 817 } 818 819 static inline void dp_monitor_get_mpdu_status(struct dp_pdev *pdev, 820 struct dp_soc *soc, 821 uint8_t *rx_tlv_hdr) 822 { 823 } 824 825 static inline void dp_monitor_print_tx_stats(struct dp_pdev *pdev) 826 { 827 } 828 829 static inline 830 QDF_STATUS dp_monitor_mcopy_check_deliver(struct dp_pdev *pdev, 831 uint16_t peer_id, uint32_t ppdu_id, 832 uint8_t first_msdu) 833 { 834 return QDF_STATUS_SUCCESS; 835 } 836 837 static inline bool dp_monitor_is_enable_tx_sniffer(struct dp_pdev *pdev) 838 { 839 return false; 840 } 841 842 static inline struct dp_vdev* 843 dp_monitor_get_monitor_vdev_from_pdev(struct dp_pdev *pdev) 844 { 845 return NULL; 846 } 847 848 static inline QDF_STATUS dp_monitor_check_com_info_ppdu_id(struct dp_pdev *pdev, 849 void *rx_desc) 850 { 851 return QDF_STATUS_E_FAILURE; 852 } 853 854 static inline struct mon_rx_status* 855 dp_monitor_get_rx_status(struct dp_pdev *pdev) 856 { 857 return NULL; 858 } 859 860 static inline 861 void dp_monitor_pdev_config_scan_spcl_vap(struct dp_pdev *pdev, bool val) 862 { 863 } 864 865 static inline 866 void dp_monitor_pdev_reset_scan_spcl_vap_stats_enable(struct dp_pdev *pdev, 867 bool val) 868 { 869 } 870 871 static inline QDF_STATUS 872 dp_monitor_peer_tx_capture_get_stats(struct dp_soc *soc, struct dp_peer *peer, 873 struct cdp_peer_tx_capture_stats *stats) 874 { 875 return QDF_STATUS_E_FAILURE; 876 } 877 878 static inline QDF_STATUS 879 dp_monitor_pdev_tx_capture_get_stats(struct dp_soc *soc, struct dp_pdev *pdev, 880 struct cdp_pdev_tx_capture_stats *stats) 881 { 882 return QDF_STATUS_E_FAILURE; 883 } 884 885 #ifdef DP_POWER_SAVE 886 static inline 887 void dp_monitor_pktlog_reap_pending_frames(struct dp_pdev *pdev) 888 { 889 } 890 891 static inline 892 void dp_monitor_pktlog_start_reap_timer(struct dp_pdev *pdev) 893 { 894 } 895 #endif 896 897 static inline bool dp_monitor_is_configured(struct dp_pdev *pdev) 898 { 899 return false; 900 } 901 902 static inline void 903 dp_mon_rx_hdr_length_set(struct dp_soc *soc, uint32_t *msg_word, 904 struct htt_rx_ring_tlv_filter *tlv_filter) 905 { 906 } 907 908 static inline void dp_monitor_soc_init(struct dp_soc *soc) 909 { 910 } 911 912 static inline void dp_monitor_soc_deinit(struct dp_soc *soc) 913 { 914 } 915 916 static inline 917 QDF_STATUS dp_monitor_config_undecoded_metadata_capture(struct dp_pdev *pdev, 918 int val) 919 { 920 return QDF_STATUS_SUCCESS; 921 } 922 923 static inline QDF_STATUS 924 dp_monitor_config_undecoded_metadata_phyrx_error_mask(struct dp_pdev *pdev, 925 int mask1, int mask2) 926 { 927 return QDF_STATUS_SUCCESS; 928 } 929 930 static inline QDF_STATUS 931 dp_monitor_get_undecoded_metadata_phyrx_error_mask(struct dp_pdev *pdev, 932 int *mask, int *mask_cont) 933 { 934 return QDF_STATUS_SUCCESS; 935 } 936 937 static inline QDF_STATUS dp_monitor_soc_htt_srng_setup(struct dp_soc *soc) 938 { 939 return QDF_STATUS_E_FAILURE; 940 } 941 942 static inline bool dp_is_monitor_mode_using_poll(struct dp_soc *soc) 943 { 944 return false; 945 } 946 947 static inline 948 uint32_t dp_tx_mon_buf_refill(struct dp_intr *int_ctx) 949 { 950 return 0; 951 } 952 953 static inline uint32_t 954 dp_tx_mon_process(struct dp_soc *soc, struct dp_intr *int_ctx, 955 uint32_t mac_id, uint32_t quota) 956 { 957 return 0; 958 } 959 960 static inline uint32_t 961 dp_print_txmon_ring_stat_from_hal(struct dp_pdev *pdev) 962 { 963 return 0; 964 } 965 966 static inline 967 uint32_t dp_rx_mon_buf_refill(struct dp_intr *int_ctx) 968 { 969 return 0; 970 } 971 972 static inline bool dp_monitor_is_tx_cap_enabled(struct dp_peer *peer) 973 { 974 return 0; 975 } 976 977 static inline bool dp_monitor_is_rx_cap_enabled(struct dp_peer *peer) 978 { 979 return 0; 980 } 981 982 static inline void 983 dp_rx_mon_enable(struct dp_soc *soc, uint32_t *msg_word, 984 struct htt_rx_ring_tlv_filter *tlv_filter) 985 { 986 } 987 988 static inline void 989 dp_mon_rx_packet_length_set(struct dp_soc *soc, uint32_t *msg_word, 990 struct htt_rx_ring_tlv_filter *tlv_filter) 991 { 992 } 993 994 static inline void 995 dp_mon_rx_enable_mpdu_logging(struct dp_soc *soc, uint32_t *msg_word, 996 struct htt_rx_ring_tlv_filter *tlv_filter) 997 { 998 } 999 1000 static inline void 1001 dp_mon_rx_wmask_subscribe(struct dp_soc *soc, 1002 uint32_t *msg_word, int pdev_id, 1003 struct htt_rx_ring_tlv_filter *tlv_filter) 1004 { 1005 } 1006 1007 static inline void 1008 dp_mon_rx_mac_filter_set(struct dp_soc *soc, uint32_t *msg_word, 1009 struct htt_rx_ring_tlv_filter *tlv_filter) 1010 { 1011 } 1012 1013 static inline void 1014 dp_mon_rx_enable_pkt_tlv_offset(struct dp_soc *soc, uint32_t *msg_word, 1015 struct htt_rx_ring_tlv_filter *tlv_filter) 1016 { 1017 } 1018 1019 static inline void 1020 dp_mon_rx_enable_fpmo(struct dp_soc *soc, uint32_t *msg_word, 1021 struct htt_rx_ring_tlv_filter *tlv_filter) 1022 { 1023 } 1024 1025 #ifdef WLAN_CONFIG_TELEMETRY_AGENT 1026 static inline 1027 void dp_monitor_peer_telemetry_stats(struct dp_peer *peer, 1028 struct cdp_peer_telemetry_stats *stats) 1029 { 1030 } 1031 1032 static inline 1033 void dp_monitor_peer_deter_stats(struct dp_peer *peer, 1034 struct cdp_peer_telemetry_stats *stats) 1035 { 1036 } 1037 #endif /* WLAN_CONFIG_TELEMETRY_AGENT */ 1038 #endif /* !WIFI_MONITOR_SUPPORT */ 1039 1040 /** 1041 * cdp_soc_t_to_dp_soc() - typecast cdp_soc_t to 1042 * dp soc handle 1043 * @psoc: CDP psoc handle 1044 * 1045 * Return: struct dp_soc pointer 1046 */ 1047 static inline 1048 struct dp_soc *cdp_soc_t_to_dp_soc(struct cdp_soc_t *psoc) 1049 { 1050 return (struct dp_soc *)psoc; 1051 } 1052 1053 #define DP_MAX_TIMER_EXEC_TIME_TICKS \ 1054 (QDF_LOG_TIMESTAMP_CYCLES_PER_10_US * 100 * 20) 1055 1056 /** 1057 * enum timer_yield_status - yield status code used in monitor mode timer. 1058 * @DP_TIMER_NO_YIELD: do not yield 1059 * @DP_TIMER_WORK_DONE: yield because work is done 1060 * @DP_TIMER_WORK_EXHAUST: yield because work quota is exhausted 1061 * @DP_TIMER_TIME_EXHAUST: yield due to time slot exhausted 1062 */ 1063 enum timer_yield_status { 1064 DP_TIMER_NO_YIELD, 1065 DP_TIMER_WORK_DONE, 1066 DP_TIMER_WORK_EXHAUST, 1067 DP_TIMER_TIME_EXHAUST, 1068 }; 1069 1070 #if DP_PRINT_ENABLE 1071 #include <qdf_types.h> /* qdf_vprint */ 1072 #include <cdp_txrx_handle.h> 1073 1074 enum { 1075 /* FATAL_ERR - print only irrecoverable error messages */ 1076 DP_PRINT_LEVEL_FATAL_ERR, 1077 1078 /* ERR - include non-fatal err messages */ 1079 DP_PRINT_LEVEL_ERR, 1080 1081 /* WARN - include warnings */ 1082 DP_PRINT_LEVEL_WARN, 1083 1084 /* INFO1 - include fundamental, infrequent events */ 1085 DP_PRINT_LEVEL_INFO1, 1086 1087 /* INFO2 - include non-fundamental but infrequent events */ 1088 DP_PRINT_LEVEL_INFO2, 1089 }; 1090 1091 #define dp_print(level, fmt, ...) do { \ 1092 if (level <= g_txrx_print_level) \ 1093 qdf_print(fmt, ## __VA_ARGS__); \ 1094 while (0) 1095 #define DP_PRINT(level, fmt, ...) do { \ 1096 dp_print(level, "DP: " fmt, ## __VA_ARGS__); \ 1097 while (0) 1098 #else 1099 #define DP_PRINT(level, fmt, ...) 1100 #endif /* DP_PRINT_ENABLE */ 1101 1102 #define DP_TRACE(LVL, fmt, args ...) \ 1103 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_##LVL, \ 1104 fmt, ## args) 1105 1106 #ifdef WLAN_SYSFS_DP_STATS 1107 void DP_PRINT_STATS(const char *fmt, ...); 1108 #else /* WLAN_SYSFS_DP_STATS */ 1109 #ifdef DP_PRINT_NO_CONSOLE 1110 /* Stat prints should not go to console or kernel logs.*/ 1111 #define DP_PRINT_STATS(fmt, args ...)\ 1112 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH, \ 1113 fmt, ## args) 1114 #else 1115 #define DP_PRINT_STATS(fmt, args ...)\ 1116 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,\ 1117 fmt, ## args) 1118 #endif 1119 #endif /* WLAN_SYSFS_DP_STATS */ 1120 1121 #define DP_STATS_INIT(_handle) \ 1122 qdf_mem_zero(&((_handle)->stats), sizeof((_handle)->stats)) 1123 1124 #define DP_TXRX_PEER_STATS_INIT(_handle, size) \ 1125 qdf_mem_zero(&((_handle)->stats[0]), size) 1126 1127 #define DP_STATS_CLR(_handle) \ 1128 qdf_mem_zero(&((_handle)->stats), sizeof((_handle)->stats)) 1129 1130 #define DP_TXRX_PEER_STATS_CLR(_handle, size) \ 1131 qdf_mem_zero(&((_handle)->stats[0]), size) 1132 1133 #ifndef DISABLE_DP_STATS 1134 #define DP_STATS_INC(_handle, _field, _delta) \ 1135 { \ 1136 if (likely(_handle)) \ 1137 _handle->stats._field += _delta; \ 1138 } 1139 1140 #define DP_PEER_LINK_STATS_INC(_handle, _field, _delta, _link) \ 1141 { \ 1142 if (likely(_handle)) \ 1143 _handle->stats[_link]._field += _delta; \ 1144 } 1145 1146 #define DP_PEER_STATS_FLAT_INC(_handle, _field, _delta) \ 1147 { \ 1148 if (likely(_handle)) \ 1149 _handle->_field += _delta; \ 1150 } 1151 1152 #define DP_STATS_INCC(_handle, _field, _delta, _cond) \ 1153 { \ 1154 if (_cond && likely(_handle)) \ 1155 _handle->stats._field += _delta; \ 1156 } 1157 1158 #define DP_PEER_LINK_STATS_INCC(_handle, _field, _delta, _cond, _link) \ 1159 { \ 1160 if (_cond && likely(_handle)) \ 1161 _handle->stats[_link]._field += _delta; \ 1162 } 1163 1164 #define DP_STATS_DEC(_handle, _field, _delta) \ 1165 { \ 1166 if (likely(_handle)) \ 1167 _handle->stats._field -= _delta; \ 1168 } 1169 1170 #define DP_PEER_STATS_FLAT_DEC(_handle, _field, _delta) \ 1171 { \ 1172 if (likely(_handle)) \ 1173 _handle->_field -= _delta; \ 1174 } 1175 1176 #define DP_STATS_UPD(_handle, _field, _delta) \ 1177 { \ 1178 if (likely(_handle)) \ 1179 _handle->stats._field = _delta; \ 1180 } 1181 1182 #define DP_PEER_LINK_STATS_UPD(_handle, _field, _delta, _link) \ 1183 { \ 1184 if (likely(_handle)) \ 1185 _handle->stats[_link]._field = _delta; \ 1186 } 1187 1188 #define DP_STATS_INC_PKT(_handle, _field, _count, _bytes) \ 1189 { \ 1190 DP_STATS_INC(_handle, _field.num, _count); \ 1191 DP_STATS_INC(_handle, _field.bytes, _bytes) \ 1192 } 1193 1194 #define DP_PEER_STATS_FLAT_INC_PKT(_handle, _field, _count, _bytes) \ 1195 { \ 1196 DP_PEER_STATS_FLAT_INC(_handle, _field.num, _count); \ 1197 DP_PEER_STATS_FLAT_INC(_handle, _field.bytes, _bytes) \ 1198 } 1199 1200 #define DP_STATS_INCC_PKT(_handle, _field, _count, _bytes, _cond) \ 1201 { \ 1202 DP_STATS_INCC(_handle, _field.num, _count, _cond); \ 1203 DP_STATS_INCC(_handle, _field.bytes, _bytes, _cond) \ 1204 } 1205 1206 #define DP_STATS_AGGR(_handle_a, _handle_b, _field) \ 1207 { \ 1208 _handle_a->stats._field += _handle_b->stats._field; \ 1209 } 1210 1211 #define DP_STATS_AGGR_PKT(_handle_a, _handle_b, _field) \ 1212 { \ 1213 DP_STATS_AGGR(_handle_a, _handle_b, _field.num); \ 1214 DP_STATS_AGGR(_handle_a, _handle_b, _field.bytes);\ 1215 } 1216 1217 #define DP_STATS_AGGR_IDX(_handle_a, _handle_b, _arr, _field, _idx) \ 1218 { \ 1219 _handle_a->stats._arr._field += _handle_b->stats._arr[_idx]._field; \ 1220 } 1221 1222 #define DP_STATS_AGGR_PKT_IDX(_handle_a, _handle_b, _arr, _field, _idx)\ 1223 { \ 1224 DP_STATS_AGGR_IDX(_handle_a, _handle_b, _arr, _field.num, _idx); \ 1225 DP_STATS_AGGR_IDX(_handle_a, _handle_b, _arr, _field.bytes, _idx);\ 1226 } 1227 1228 #define DP_STATS_UPD_STRUCT(_handle_a, _handle_b, _field) \ 1229 { \ 1230 _handle_a->stats._field = _handle_b->stats._field; \ 1231 } 1232 1233 #else 1234 #define DP_STATS_INC(_handle, _field, _delta) 1235 #define DP_PEER_LINK_STATS_INC(_handle, _field, _delta, _link) 1236 #define DP_PEER_STATS_FLAT_INC(_handle, _field, _delta) 1237 #define DP_STATS_INCC(_handle, _field, _delta, _cond) 1238 #define DP_PEER_LINK_STATS_INCC(_handle, _field, _delta, _cond, _link) 1239 #define DP_STATS_DEC(_handle, _field, _delta) 1240 #define DP_PEER_STATS_FLAT_DEC(_handle, _field, _delta) 1241 #define DP_STATS_UPD(_handle, _field, _delta) 1242 #define DP_PEER_LINK_STATS_UPD(_handle, _field, _delta, _link) 1243 #define DP_STATS_INC_PKT(_handle, _field, _count, _bytes) 1244 #define DP_PEER_STATS_FLAT_INC_PKT(_handle, _field, _count, _bytes) 1245 #define DP_STATS_INCC_PKT(_handle, _field, _count, _bytes, _cond) 1246 #define DP_STATS_AGGR(_handle_a, _handle_b, _field) 1247 #define DP_STATS_AGGR_PKT(_handle_a, _handle_b, _field) 1248 #define DP_STATS_AGGR_IDX(_handle_a, _handle_b, _arr, _field, _idx) 1249 #define DP_STATS_AGGR_PKT_IDX(_handle_a, _handle_b, _arr, _field, _idx) 1250 #endif 1251 1252 #define DP_PEER_PER_PKT_STATS_INC(_handle, _field, _delta, _link) \ 1253 { \ 1254 DP_PEER_LINK_STATS_INC(_handle, per_pkt_stats._field, _delta, _link); \ 1255 } 1256 1257 #define DP_PEER_PER_PKT_STATS_INCC(_handle, _field, _delta, _cond, _link) \ 1258 { \ 1259 DP_PEER_LINK_STATS_INCC(_handle, per_pkt_stats._field, _delta, _cond, _link); \ 1260 } 1261 1262 #define DP_PEER_PER_PKT_STATS_INC_PKT(_handle, _field, _count, _bytes, _link) \ 1263 { \ 1264 DP_PEER_PER_PKT_STATS_INC(_handle, _field.num, _count, _link); \ 1265 DP_PEER_PER_PKT_STATS_INC(_handle, _field.bytes, _bytes, _link) \ 1266 } 1267 1268 #define DP_PEER_PER_PKT_STATS_INCC_PKT(_handle, _field, _count, _bytes, _cond, _link) \ 1269 { \ 1270 DP_PEER_PER_PKT_STATS_INCC(_handle, _field.num, _count, _cond, _link); \ 1271 DP_PEER_PER_PKT_STATS_INCC(_handle, _field.bytes, _bytes, _cond, _link) \ 1272 } 1273 1274 #define DP_PEER_PER_PKT_STATS_UPD(_handle, _field, _delta, _link) \ 1275 { \ 1276 DP_PEER_LINK_STATS_UPD(_handle, per_pkt_stats._field, _delta, _link); \ 1277 } 1278 1279 #ifndef QCA_ENHANCED_STATS_SUPPORT 1280 #define DP_PEER_EXTD_STATS_INC(_handle, _field, _delta, _link) \ 1281 { \ 1282 DP_PEER_LINK_STATS_INC(_handle, extd_stats._field, _delta, _link); \ 1283 } 1284 1285 #define DP_PEER_EXTD_STATS_INCC(_handle, _field, _delta, _cond, _link) \ 1286 { \ 1287 DP_PEER_LINK_STATS_INCC(_handle, extd_stats._field, _delta, _cond, _link); \ 1288 } 1289 1290 #define DP_PEER_EXTD_STATS_UPD(_handle, _field, _delta, _link) \ 1291 { \ 1292 DP_PEER_LINK_STATS_UPD(_handle, extd_stats._field, _delta, _link); \ 1293 } 1294 #endif 1295 1296 #if defined(QCA_VDEV_STATS_HW_OFFLOAD_SUPPORT) && \ 1297 defined(QCA_ENHANCED_STATS_SUPPORT) 1298 #define DP_PEER_TO_STACK_INCC_PKT(_handle, _count, _bytes, _cond) \ 1299 { \ 1300 if (_cond || !(_handle->hw_txrx_stats_en)) \ 1301 DP_PEER_STATS_FLAT_INC_PKT(_handle, to_stack, _count, _bytes); \ 1302 } 1303 1304 #define DP_PEER_TO_STACK_DECC(_handle, _count, _cond) \ 1305 { \ 1306 if (_cond || !(_handle->hw_txrx_stats_en)) \ 1307 DP_PEER_STATS_FLAT_DEC(_handle, to_stack.num, _count); \ 1308 } 1309 1310 #define DP_PEER_MC_INCC_PKT(_handle, _count, _bytes, _cond, _link) \ 1311 { \ 1312 if (_cond || !(_handle->hw_txrx_stats_en)) \ 1313 DP_PEER_PER_PKT_STATS_INC_PKT(_handle, rx.multicast, _count, _bytes, _link); \ 1314 } 1315 1316 #define DP_PEER_BC_INCC_PKT(_handle, _count, _bytes, _cond, _link) \ 1317 { \ 1318 if (_cond || !(_handle->hw_txrx_stats_en)) \ 1319 DP_PEER_PER_PKT_STATS_INC_PKT(_handle, rx.bcast, _count, _bytes, _link); \ 1320 } 1321 1322 #define DP_PEER_UC_INCC_PKT(_handle, _count, _bytes, _cond, _link) \ 1323 { \ 1324 if (_cond || !(_handle->hw_txrx_stats_en)) \ 1325 DP_PEER_PER_PKT_STATS_INC_PKT(_handle, rx.unicast, _count, _bytes, _link); \ 1326 } 1327 #elif defined(QCA_VDEV_STATS_HW_OFFLOAD_SUPPORT) 1328 #define DP_PEER_TO_STACK_INCC_PKT(_handle, _count, _bytes, _cond) \ 1329 { \ 1330 if (!(_handle->hw_txrx_stats_en)) \ 1331 DP_PEER_STATS_FLAT_INC_PKT(_handle, to_stack, _count, _bytes); \ 1332 } 1333 1334 #define DP_PEER_TO_STACK_DECC(_handle, _count, _cond) \ 1335 { \ 1336 if (!(_handle->hw_txrx_stats_en)) \ 1337 DP_PEER_STATS_FLAT_DEC(_handle, to_stack.num, _count); \ 1338 } 1339 1340 #define DP_PEER_MC_INCC_PKT(_handle, _count, _bytes, _cond, _link) \ 1341 { \ 1342 if (!(_handle->hw_txrx_stats_en)) \ 1343 DP_PEER_PER_PKT_STATS_INC_PKT(_handle, rx.multicast, _count, _bytes, _link); \ 1344 } 1345 1346 #define DP_PEER_BC_INCC_PKT(_handle, _count, _bytes, _cond, _link) \ 1347 { \ 1348 if (!(_handle->hw_txrx_stats_en)) \ 1349 DP_PEER_PER_PKT_STATS_INC_PKT(_handle, rx.bcast, _count, _bytes, _link); \ 1350 } 1351 1352 #define DP_PEER_UC_INCC_PKT(_handle, _count, _bytes, _cond, _link) \ 1353 { \ 1354 if (!(_handle->hw_txrx_stats_en)) \ 1355 DP_PEER_PER_PKT_STATS_INC_PKT(_handle, rx.unicast, _count, _bytes, _link); \ 1356 } 1357 #else 1358 #define DP_PEER_TO_STACK_INCC_PKT(_handle, _count, _bytes, _cond) \ 1359 DP_PEER_STATS_FLAT_INC_PKT(_handle, to_stack, _count, _bytes); 1360 1361 #define DP_PEER_TO_STACK_DECC(_handle, _count, _cond) \ 1362 DP_PEER_STATS_FLAT_DEC(_handle, to_stack.num, _count); 1363 1364 #define DP_PEER_MC_INCC_PKT(_handle, _count, _bytes, _cond, _link) \ 1365 DP_PEER_PER_PKT_STATS_INC_PKT(_handle, rx.multicast, _count, _bytes, _link); 1366 1367 #define DP_PEER_BC_INCC_PKT(_handle, _count, _bytes, _cond, _link) \ 1368 DP_PEER_PER_PKT_STATS_INC_PKT(_handle, rx.bcast, _count, _bytes, _link); 1369 1370 #define DP_PEER_UC_INCC_PKT(_handle, _count, _bytes, _cond, _link) \ 1371 DP_PEER_PER_PKT_STATS_INC_PKT(_handle, rx.unicast, _count, _bytes, _link); 1372 #endif 1373 1374 #ifdef ENABLE_DP_HIST_STATS 1375 #define DP_HIST_INIT() \ 1376 uint32_t num_of_packets[MAX_PDEV_CNT] = {0}; 1377 1378 #define DP_HIST_PACKET_COUNT_INC(_pdev_id) \ 1379 { \ 1380 ++num_of_packets[_pdev_id]; \ 1381 } 1382 1383 #define DP_TX_HISTOGRAM_UPDATE(_pdev, _p_cntrs) \ 1384 do { \ 1385 if (_p_cntrs == 1) { \ 1386 DP_STATS_INC(_pdev, \ 1387 tx_comp_histogram.pkts_1, 1); \ 1388 } else if (_p_cntrs > 1 && _p_cntrs <= 20) { \ 1389 DP_STATS_INC(_pdev, \ 1390 tx_comp_histogram.pkts_2_20, 1); \ 1391 } else if (_p_cntrs > 20 && _p_cntrs <= 40) { \ 1392 DP_STATS_INC(_pdev, \ 1393 tx_comp_histogram.pkts_21_40, 1); \ 1394 } else if (_p_cntrs > 40 && _p_cntrs <= 60) { \ 1395 DP_STATS_INC(_pdev, \ 1396 tx_comp_histogram.pkts_41_60, 1); \ 1397 } else if (_p_cntrs > 60 && _p_cntrs <= 80) { \ 1398 DP_STATS_INC(_pdev, \ 1399 tx_comp_histogram.pkts_61_80, 1); \ 1400 } else if (_p_cntrs > 80 && _p_cntrs <= 100) { \ 1401 DP_STATS_INC(_pdev, \ 1402 tx_comp_histogram.pkts_81_100, 1); \ 1403 } else if (_p_cntrs > 100 && _p_cntrs <= 200) { \ 1404 DP_STATS_INC(_pdev, \ 1405 tx_comp_histogram.pkts_101_200, 1); \ 1406 } else if (_p_cntrs > 200) { \ 1407 DP_STATS_INC(_pdev, \ 1408 tx_comp_histogram.pkts_201_plus, 1); \ 1409 } \ 1410 } while (0) 1411 1412 #define DP_RX_HISTOGRAM_UPDATE(_pdev, _p_cntrs) \ 1413 do { \ 1414 if (_p_cntrs == 1) { \ 1415 DP_STATS_INC(_pdev, \ 1416 rx_ind_histogram.pkts_1, 1); \ 1417 } else if (_p_cntrs > 1 && _p_cntrs <= 20) { \ 1418 DP_STATS_INC(_pdev, \ 1419 rx_ind_histogram.pkts_2_20, 1); \ 1420 } else if (_p_cntrs > 20 && _p_cntrs <= 40) { \ 1421 DP_STATS_INC(_pdev, \ 1422 rx_ind_histogram.pkts_21_40, 1); \ 1423 } else if (_p_cntrs > 40 && _p_cntrs <= 60) { \ 1424 DP_STATS_INC(_pdev, \ 1425 rx_ind_histogram.pkts_41_60, 1); \ 1426 } else if (_p_cntrs > 60 && _p_cntrs <= 80) { \ 1427 DP_STATS_INC(_pdev, \ 1428 rx_ind_histogram.pkts_61_80, 1); \ 1429 } else if (_p_cntrs > 80 && _p_cntrs <= 100) { \ 1430 DP_STATS_INC(_pdev, \ 1431 rx_ind_histogram.pkts_81_100, 1); \ 1432 } else if (_p_cntrs > 100 && _p_cntrs <= 200) { \ 1433 DP_STATS_INC(_pdev, \ 1434 rx_ind_histogram.pkts_101_200, 1); \ 1435 } else if (_p_cntrs > 200) { \ 1436 DP_STATS_INC(_pdev, \ 1437 rx_ind_histogram.pkts_201_plus, 1); \ 1438 } \ 1439 } while (0) 1440 1441 #define DP_TX_HIST_STATS_PER_PDEV() \ 1442 do { \ 1443 uint8_t hist_stats = 0; \ 1444 for (hist_stats = 0; hist_stats < soc->pdev_count; \ 1445 hist_stats++) { \ 1446 DP_TX_HISTOGRAM_UPDATE(soc->pdev_list[hist_stats], \ 1447 num_of_packets[hist_stats]); \ 1448 } \ 1449 } while (0) 1450 1451 1452 #define DP_RX_HIST_STATS_PER_PDEV() \ 1453 do { \ 1454 uint8_t hist_stats = 0; \ 1455 for (hist_stats = 0; hist_stats < soc->pdev_count; \ 1456 hist_stats++) { \ 1457 DP_RX_HISTOGRAM_UPDATE(soc->pdev_list[hist_stats], \ 1458 num_of_packets[hist_stats]); \ 1459 } \ 1460 } while (0) 1461 1462 #else 1463 #define DP_HIST_INIT() 1464 #define DP_HIST_PACKET_COUNT_INC(_pdev_id) 1465 #define DP_TX_HISTOGRAM_UPDATE(_pdev, _p_cntrs) 1466 #define DP_RX_HISTOGRAM_UPDATE(_pdev, _p_cntrs) 1467 #define DP_RX_HIST_STATS_PER_PDEV() 1468 #define DP_TX_HIST_STATS_PER_PDEV() 1469 #endif /* DISABLE_DP_STATS */ 1470 1471 #define FRAME_MASK_IPV4_ARP 1 1472 #define FRAME_MASK_IPV4_DHCP 2 1473 #define FRAME_MASK_IPV4_EAPOL 4 1474 #define FRAME_MASK_IPV6_DHCP 8 1475 1476 static inline int dp_log2_ceil(unsigned int value) 1477 { 1478 unsigned int tmp = value; 1479 int log2 = -1; 1480 1481 if (qdf_unlikely(value == 0)) 1482 return 0; 1483 while (tmp) { 1484 log2++; 1485 tmp >>= 1; 1486 } 1487 if (1 << log2 != value) 1488 log2++; 1489 return log2; 1490 } 1491 1492 #ifdef QCA_SUPPORT_PEER_ISOLATION 1493 #define dp_get_peer_isolation(_peer) ((_peer)->isolation) 1494 1495 static inline void dp_set_peer_isolation(struct dp_txrx_peer *txrx_peer, 1496 bool val) 1497 { 1498 txrx_peer->isolation = val; 1499 } 1500 1501 #else 1502 #define dp_get_peer_isolation(_peer) (0) 1503 1504 static inline void dp_set_peer_isolation(struct dp_txrx_peer *peer, bool val) 1505 { 1506 } 1507 #endif /* QCA_SUPPORT_PEER_ISOLATION */ 1508 1509 bool dp_vdev_is_wds_ext_enabled(struct dp_vdev *vdev); 1510 1511 #ifdef QCA_SUPPORT_WDS_EXTENDED 1512 static inline void dp_wds_ext_peer_init(struct dp_txrx_peer *txrx_peer) 1513 { 1514 txrx_peer->wds_ext.osif_peer = NULL; 1515 txrx_peer->wds_ext.init = 0; 1516 } 1517 #else 1518 static inline void dp_wds_ext_peer_init(struct dp_txrx_peer *txrx_peer) 1519 { 1520 } 1521 #endif /* QCA_SUPPORT_WDS_EXTENDED */ 1522 1523 #ifdef QCA_HOST2FW_RXBUF_RING 1524 static inline 1525 struct dp_srng *dp_get_rxdma_ring(struct dp_pdev *pdev, int lmac_id) 1526 { 1527 return &pdev->rx_mac_buf_ring[lmac_id]; 1528 } 1529 #else 1530 static inline 1531 struct dp_srng *dp_get_rxdma_ring(struct dp_pdev *pdev, int lmac_id) 1532 { 1533 return &pdev->soc->rx_refill_buf_ring[lmac_id]; 1534 } 1535 #endif 1536 1537 /* 1538 * The lmac ID for a particular channel band is fixed. 1539 * 2.4GHz band uses lmac_id = 1 1540 * 5GHz/6GHz band uses lmac_id=0 1541 */ 1542 #define DP_INVALID_LMAC_ID (-1) 1543 #define DP_MON_INVALID_LMAC_ID (-1) 1544 #define DP_MAC0_LMAC_ID 0 1545 #define DP_MAC1_LMAC_ID 1 1546 1547 #ifdef FEATURE_TSO_STATS 1548 /** 1549 * dp_init_tso_stats() - Clear tso stats 1550 * @pdev: pdev handle 1551 * 1552 * Return: None 1553 */ 1554 static inline 1555 void dp_init_tso_stats(struct dp_pdev *pdev) 1556 { 1557 if (pdev) { 1558 qdf_mem_zero(&((pdev)->stats.tso_stats), 1559 sizeof((pdev)->stats.tso_stats)); 1560 qdf_atomic_init(&pdev->tso_idx); 1561 } 1562 } 1563 1564 /** 1565 * dp_stats_tso_segment_histogram_update() - TSO Segment Histogram 1566 * @pdev: pdev handle 1567 * @_p_cntrs: number of tso segments for a tso packet 1568 * 1569 * Return: None 1570 */ 1571 void dp_stats_tso_segment_histogram_update(struct dp_pdev *pdev, 1572 uint8_t _p_cntrs); 1573 1574 /** 1575 * dp_tso_segment_update() - Collect tso segment information 1576 * @pdev: pdev handle 1577 * @stats_idx: tso packet number 1578 * @idx: tso segment number 1579 * @seg: tso segment 1580 * 1581 * Return: None 1582 */ 1583 void dp_tso_segment_update(struct dp_pdev *pdev, 1584 uint32_t stats_idx, 1585 uint8_t idx, 1586 struct qdf_tso_seg_t seg); 1587 1588 /** 1589 * dp_tso_packet_update() - TSO Packet information 1590 * @pdev: pdev handle 1591 * @stats_idx: tso packet number 1592 * @msdu: nbuf handle 1593 * @num_segs: tso segments 1594 * 1595 * Return: None 1596 */ 1597 void dp_tso_packet_update(struct dp_pdev *pdev, uint32_t stats_idx, 1598 qdf_nbuf_t msdu, uint16_t num_segs); 1599 1600 /** 1601 * dp_tso_segment_stats_update() - TSO Segment stats 1602 * @pdev: pdev handle 1603 * @stats_seg: tso segment list 1604 * @stats_idx: tso packet number 1605 * 1606 * Return: None 1607 */ 1608 void dp_tso_segment_stats_update(struct dp_pdev *pdev, 1609 struct qdf_tso_seg_elem_t *stats_seg, 1610 uint32_t stats_idx); 1611 1612 /** 1613 * dp_print_tso_stats() - dump tso statistics 1614 * @soc:soc handle 1615 * @level: verbosity level 1616 * 1617 * Return: None 1618 */ 1619 void dp_print_tso_stats(struct dp_soc *soc, 1620 enum qdf_stats_verbosity_level level); 1621 1622 /** 1623 * dp_txrx_clear_tso_stats() - clear tso stats 1624 * @soc: soc handle 1625 * 1626 * Return: None 1627 */ 1628 void dp_txrx_clear_tso_stats(struct dp_soc *soc); 1629 #else 1630 static inline 1631 void dp_init_tso_stats(struct dp_pdev *pdev) 1632 { 1633 } 1634 1635 static inline 1636 void dp_stats_tso_segment_histogram_update(struct dp_pdev *pdev, 1637 uint8_t _p_cntrs) 1638 { 1639 } 1640 1641 static inline 1642 void dp_tso_segment_update(struct dp_pdev *pdev, 1643 uint32_t stats_idx, 1644 uint32_t idx, 1645 struct qdf_tso_seg_t seg) 1646 { 1647 } 1648 1649 static inline 1650 void dp_tso_packet_update(struct dp_pdev *pdev, uint32_t stats_idx, 1651 qdf_nbuf_t msdu, uint16_t num_segs) 1652 { 1653 } 1654 1655 static inline 1656 void dp_tso_segment_stats_update(struct dp_pdev *pdev, 1657 struct qdf_tso_seg_elem_t *stats_seg, 1658 uint32_t stats_idx) 1659 { 1660 } 1661 1662 static inline 1663 void dp_print_tso_stats(struct dp_soc *soc, 1664 enum qdf_stats_verbosity_level level) 1665 { 1666 } 1667 1668 static inline 1669 void dp_txrx_clear_tso_stats(struct dp_soc *soc) 1670 { 1671 } 1672 #endif /* FEATURE_TSO_STATS */ 1673 1674 /** 1675 * dp_txrx_get_peer_per_pkt_stats_param() - Get peer per pkt stats param 1676 * @peer: DP peer handle 1677 * @type: Requested stats type 1678 * @buf: Buffer to hold the value 1679 * 1680 * Return: status success/failure 1681 */ 1682 QDF_STATUS dp_txrx_get_peer_per_pkt_stats_param(struct dp_peer *peer, 1683 enum cdp_peer_stats_type type, 1684 cdp_peer_stats_param_t *buf); 1685 1686 /** 1687 * dp_txrx_get_peer_extd_stats_param() - Get peer extd stats param 1688 * @peer: DP peer handle 1689 * @type: Requested stats type 1690 * @buf: Buffer to hold the value 1691 * 1692 * Return: status success/failure 1693 */ 1694 QDF_STATUS dp_txrx_get_peer_extd_stats_param(struct dp_peer *peer, 1695 enum cdp_peer_stats_type type, 1696 cdp_peer_stats_param_t *buf); 1697 1698 #define DP_HTT_T2H_HP_PIPE 5 1699 /** 1700 * dp_update_pdev_stats(): Update the pdev stats 1701 * @tgtobj: pdev handle 1702 * @srcobj: vdev stats structure 1703 * 1704 * Update the pdev stats from the specified vdev stats 1705 * 1706 * Return: None 1707 */ 1708 void dp_update_pdev_stats(struct dp_pdev *tgtobj, 1709 struct cdp_vdev_stats *srcobj); 1710 1711 /** 1712 * dp_update_vdev_ingress_stats(): Update the vdev ingress stats 1713 * @tgtobj: vdev handle 1714 * 1715 * Update the vdev ingress stats 1716 * 1717 * Return: None 1718 */ 1719 void dp_update_vdev_ingress_stats(struct dp_vdev *tgtobj); 1720 1721 /** 1722 * dp_update_vdev_rate_stats() - Update the vdev rate stats 1723 * @tgtobj: tgt buffer for cdp vdev stats 1724 * @srcobj: srcobj dp vdev stats 1725 * 1726 * Return: None 1727 */ 1728 void dp_update_vdev_rate_stats(struct cdp_vdev_stats *tgtobj, 1729 struct dp_vdev_stats *srcobj); 1730 1731 /** 1732 * dp_update_pdev_ingress_stats(): Update the pdev ingress stats 1733 * @tgtobj: pdev handle 1734 * @srcobj: vdev stats structure 1735 * 1736 * Update the pdev ingress stats from the specified vdev stats 1737 * 1738 * Return: None 1739 */ 1740 void dp_update_pdev_ingress_stats(struct dp_pdev *tgtobj, 1741 struct dp_vdev *srcobj); 1742 1743 /** 1744 * dp_copy_vdev_stats_to_tgt_buf(): Update the cdp vdev ingress stats from 1745 * dp vdev ingress stats 1746 * @vdev_stats: cdp vdev stats structure 1747 * @stats: dp vdev stats structure 1748 * @xmit_type: xmit type of packet - MLD/Link 1749 * 1750 * Update the cdp vdev ingress stats from dp vdev ingress stats 1751 * 1752 * Return: None 1753 */ 1754 1755 void dp_copy_vdev_stats_to_tgt_buf(struct cdp_vdev_stats *vdev_stats, 1756 struct dp_vdev_stats *stats, 1757 enum dp_pkt_xmit_type xmit_type); 1758 1759 /** 1760 * dp_update_vdev_stats(): Update the vdev stats 1761 * @soc: soc handle 1762 * @srcobj: DP_PEER object 1763 * @arg: point to vdev stats structure 1764 * 1765 * Update the vdev stats from the specified peer stats 1766 * 1767 * Return: None 1768 */ 1769 void dp_update_vdev_stats(struct dp_soc *soc, 1770 struct dp_peer *srcobj, 1771 void *arg); 1772 1773 /** 1774 * dp_update_vdev_stats_on_peer_unmap() - Update the vdev stats on peer unmap 1775 * @vdev: DP_VDEV handle 1776 * @peer: DP_PEER handle 1777 * 1778 * Return: None 1779 */ 1780 void dp_update_vdev_stats_on_peer_unmap(struct dp_vdev *vdev, 1781 struct dp_peer *peer); 1782 1783 #ifdef IPA_OFFLOAD 1784 #define DP_IPA_UPDATE_RX_STATS(__tgtobj, __srcobj) \ 1785 { \ 1786 DP_STATS_AGGR_PKT(__tgtobj, __srcobj, rx.rx_total); \ 1787 } 1788 1789 #define DP_IPA_UPDATE_PER_PKT_RX_STATS(__tgtobj, __srcobj) \ 1790 { \ 1791 (__tgtobj)->rx.rx_total.num += (__srcobj)->rx.rx_total.num; \ 1792 (__tgtobj)->rx.rx_total.bytes += (__srcobj)->rx.rx_total.bytes; \ 1793 } 1794 #else 1795 #define DP_IPA_UPDATE_PER_PKT_RX_STATS(tgtobj, srcobj) \ 1796 1797 #define DP_IPA_UPDATE_RX_STATS(tgtobj, srcobj) 1798 #endif 1799 1800 #define DP_UPDATE_STATS(_tgtobj, _srcobj) \ 1801 do { \ 1802 uint8_t i; \ 1803 uint8_t pream_type; \ 1804 for (pream_type = 0; pream_type < DOT11_MAX; pream_type++) { \ 1805 for (i = 0; i < MAX_MCS; i++) { \ 1806 DP_STATS_AGGR(_tgtobj, _srcobj, \ 1807 tx.pkt_type[pream_type].mcs_count[i]); \ 1808 DP_STATS_AGGR(_tgtobj, _srcobj, \ 1809 rx.pkt_type[pream_type].mcs_count[i]); \ 1810 } \ 1811 } \ 1812 \ 1813 for (i = 0; i < MAX_BW; i++) { \ 1814 DP_STATS_AGGR(_tgtobj, _srcobj, tx.bw[i]); \ 1815 DP_STATS_AGGR(_tgtobj, _srcobj, rx.bw[i]); \ 1816 } \ 1817 \ 1818 for (i = 0; i < SS_COUNT; i++) { \ 1819 DP_STATS_AGGR(_tgtobj, _srcobj, rx.nss[i]); \ 1820 DP_STATS_AGGR(_tgtobj, _srcobj, tx.nss[i]); \ 1821 } \ 1822 for (i = 0; i < WME_AC_MAX; i++) { \ 1823 DP_STATS_AGGR(_tgtobj, _srcobj, tx.wme_ac_type[i]); \ 1824 DP_STATS_AGGR(_tgtobj, _srcobj, rx.wme_ac_type[i]); \ 1825 DP_STATS_AGGR(_tgtobj, _srcobj, \ 1826 tx.wme_ac_type_bytes[i]); \ 1827 DP_STATS_AGGR(_tgtobj, _srcobj, \ 1828 rx.wme_ac_type_bytes[i]); \ 1829 DP_STATS_AGGR(_tgtobj, _srcobj, tx.excess_retries_per_ac[i]); \ 1830 \ 1831 } \ 1832 \ 1833 for (i = 0; i < MAX_GI; i++) { \ 1834 DP_STATS_AGGR(_tgtobj, _srcobj, tx.sgi_count[i]); \ 1835 DP_STATS_AGGR(_tgtobj, _srcobj, rx.sgi_count[i]); \ 1836 } \ 1837 \ 1838 for (i = 0; i < MAX_RECEPTION_TYPES; i++) \ 1839 DP_STATS_AGGR(_tgtobj, _srcobj, rx.reception_type[i]); \ 1840 \ 1841 if (!wlan_cfg_get_vdev_stats_hw_offload_config(soc->wlan_cfg_ctx)) { \ 1842 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, tx.comp_pkt); \ 1843 DP_STATS_AGGR(_tgtobj, _srcobj, tx.tx_failed); \ 1844 } \ 1845 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, tx.ucast); \ 1846 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, tx.mcast); \ 1847 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, tx.bcast); \ 1848 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, tx.tx_success); \ 1849 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, tx.nawds_mcast); \ 1850 DP_STATS_AGGR(_tgtobj, _srcobj, tx.nawds_mcast_drop); \ 1851 DP_STATS_AGGR(_tgtobj, _srcobj, tx.ofdma); \ 1852 DP_STATS_AGGR(_tgtobj, _srcobj, tx.stbc); \ 1853 DP_STATS_AGGR(_tgtobj, _srcobj, tx.ldpc); \ 1854 DP_STATS_AGGR(_tgtobj, _srcobj, tx.retries); \ 1855 DP_STATS_AGGR(_tgtobj, _srcobj, tx.non_amsdu_cnt); \ 1856 DP_STATS_AGGR(_tgtobj, _srcobj, tx.amsdu_cnt); \ 1857 DP_STATS_AGGR(_tgtobj, _srcobj, tx.non_ampdu_cnt); \ 1858 DP_STATS_AGGR(_tgtobj, _srcobj, tx.ampdu_cnt); \ 1859 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, tx.dropped.fw_rem); \ 1860 DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.fw_rem_tx); \ 1861 DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.fw_rem_notx); \ 1862 DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.fw_reason1); \ 1863 DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.fw_reason2); \ 1864 DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.fw_reason3); \ 1865 DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.fw_rem_queue_disable); \ 1866 DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.fw_rem_no_match); \ 1867 DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.drop_threshold); \ 1868 DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.drop_link_desc_na); \ 1869 DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.invalid_drop); \ 1870 DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.mcast_vdev_drop); \ 1871 DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.invalid_rr); \ 1872 DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.age_out); \ 1873 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, tx.tx_ucast_total); \ 1874 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, tx.tx_ucast_success); \ 1875 \ 1876 DP_STATS_AGGR(_tgtobj, _srcobj, rx.err.mic_err); \ 1877 DP_STATS_AGGR(_tgtobj, _srcobj, rx.err.decrypt_err); \ 1878 DP_STATS_AGGR(_tgtobj, _srcobj, rx.err.fcserr); \ 1879 DP_STATS_AGGR(_tgtobj, _srcobj, rx.err.pn_err); \ 1880 DP_STATS_AGGR(_tgtobj, _srcobj, rx.err.oor_err); \ 1881 DP_STATS_AGGR(_tgtobj, _srcobj, rx.err.jump_2k_err); \ 1882 DP_STATS_AGGR(_tgtobj, _srcobj, rx.err.rxdma_wifi_parse_err); \ 1883 if (_srcobj->stats.rx.snr != 0) \ 1884 DP_STATS_UPD_STRUCT(_tgtobj, _srcobj, rx.snr); \ 1885 DP_STATS_UPD_STRUCT(_tgtobj, _srcobj, rx.rx_rate); \ 1886 DP_STATS_AGGR(_tgtobj, _srcobj, rx.non_ampdu_cnt); \ 1887 DP_STATS_AGGR(_tgtobj, _srcobj, rx.ampdu_cnt); \ 1888 DP_STATS_AGGR(_tgtobj, _srcobj, rx.non_amsdu_cnt); \ 1889 DP_STATS_AGGR(_tgtobj, _srcobj, rx.amsdu_cnt); \ 1890 DP_STATS_AGGR(_tgtobj, _srcobj, rx.nawds_mcast_drop); \ 1891 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.to_stack); \ 1892 \ 1893 for (i = 0; i < CDP_MAX_RX_RINGS; i++) \ 1894 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.rcvd_reo[i]); \ 1895 \ 1896 for (i = 0; i < CDP_MAX_LMACS; i++) \ 1897 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.rx_lmac[i]); \ 1898 \ 1899 _srcobj->stats.rx.unicast.num = \ 1900 _srcobj->stats.rx.to_stack.num - \ 1901 _srcobj->stats.rx.multicast.num; \ 1902 _srcobj->stats.rx.unicast.bytes = \ 1903 _srcobj->stats.rx.to_stack.bytes - \ 1904 _srcobj->stats.rx.multicast.bytes; \ 1905 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.unicast); \ 1906 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.multicast); \ 1907 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.bcast); \ 1908 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.raw); \ 1909 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.intra_bss.pkts); \ 1910 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.intra_bss.fail); \ 1911 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.mec_drop); \ 1912 \ 1913 _tgtobj->stats.tx.last_ack_rssi = \ 1914 _srcobj->stats.tx.last_ack_rssi; \ 1915 DP_STATS_AGGR(_tgtobj, _srcobj, rx.multipass_rx_pkt_drop); \ 1916 DP_STATS_AGGR(_tgtobj, _srcobj, rx.peer_unauth_rx_pkt_drop); \ 1917 DP_STATS_AGGR(_tgtobj, _srcobj, rx.policy_check_drop); \ 1918 DP_IPA_UPDATE_RX_STATS(_tgtobj, _srcobj); \ 1919 } while (0) 1920 1921 #ifdef VDEV_PEER_PROTOCOL_COUNT 1922 #define DP_UPDATE_PROTOCOL_COUNT_STATS(_tgtobj, _srcobj) \ 1923 { \ 1924 uint8_t j; \ 1925 for (j = 0; j < CDP_TRACE_MAX; j++) { \ 1926 _tgtobj->tx.protocol_trace_cnt[j].egress_cnt += \ 1927 _srcobj->tx.protocol_trace_cnt[j].egress_cnt; \ 1928 _tgtobj->tx.protocol_trace_cnt[j].ingress_cnt += \ 1929 _srcobj->tx.protocol_trace_cnt[j].ingress_cnt; \ 1930 _tgtobj->rx.protocol_trace_cnt[j].egress_cnt += \ 1931 _srcobj->rx.protocol_trace_cnt[j].egress_cnt; \ 1932 _tgtobj->rx.protocol_trace_cnt[j].ingress_cnt += \ 1933 _srcobj->rx.protocol_trace_cnt[j].ingress_cnt; \ 1934 } \ 1935 } 1936 #else 1937 #define DP_UPDATE_PROTOCOL_COUNT_STATS(_tgtobj, _srcobj) 1938 #endif 1939 1940 #ifdef WLAN_FEATURE_11BE 1941 #define DP_UPDATE_11BE_STATS(_tgtobj, _srcobj) \ 1942 do { \ 1943 uint8_t i, mu_type; \ 1944 for (i = 0; i < MAX_MCS; i++) { \ 1945 _tgtobj->tx.su_be_ppdu_cnt.mcs_count[i] += \ 1946 _srcobj->tx.su_be_ppdu_cnt.mcs_count[i]; \ 1947 _tgtobj->rx.su_be_ppdu_cnt.mcs_count[i] += \ 1948 _srcobj->rx.su_be_ppdu_cnt.mcs_count[i]; \ 1949 } \ 1950 for (mu_type = 0; mu_type < TXRX_TYPE_MU_MAX; mu_type++) { \ 1951 for (i = 0; i < MAX_MCS; i++) { \ 1952 _tgtobj->tx.mu_be_ppdu_cnt[mu_type].mcs_count[i] += \ 1953 _srcobj->tx.mu_be_ppdu_cnt[mu_type].mcs_count[i]; \ 1954 _tgtobj->rx.mu_be_ppdu_cnt[mu_type].mcs_count[i] += \ 1955 _srcobj->rx.mu_be_ppdu_cnt[mu_type].mcs_count[i]; \ 1956 } \ 1957 } \ 1958 for (i = 0; i < MAX_PUNCTURED_MODE; i++) { \ 1959 _tgtobj->tx.punc_bw[i] += _srcobj->tx.punc_bw[i]; \ 1960 _tgtobj->rx.punc_bw[i] += _srcobj->rx.punc_bw[i]; \ 1961 } \ 1962 } while (0) 1963 #else 1964 #define DP_UPDATE_11BE_STATS(_tgtobj, _srcobj) 1965 #endif 1966 1967 #define DP_UPDATE_BASIC_STATS(_tgtobj, _srcobj) \ 1968 do { \ 1969 _tgtobj->tx.comp_pkt.num += _srcobj->tx.comp_pkt.num; \ 1970 _tgtobj->tx.comp_pkt.bytes += _srcobj->tx.comp_pkt.bytes; \ 1971 _tgtobj->tx.tx_failed += _srcobj->tx.tx_failed; \ 1972 _tgtobj->rx.to_stack.num += _srcobj->rx.to_stack.num; \ 1973 _tgtobj->rx.to_stack.bytes += _srcobj->rx.to_stack.bytes; \ 1974 } while (0) 1975 1976 #define DP_UPDATE_PER_PKT_STATS(_tgtobj, _srcobj) \ 1977 do { \ 1978 uint8_t i; \ 1979 _tgtobj->tx.ucast.num += _srcobj->tx.ucast.num; \ 1980 _tgtobj->tx.ucast.bytes += _srcobj->tx.ucast.bytes; \ 1981 _tgtobj->tx.mcast.num += _srcobj->tx.mcast.num; \ 1982 _tgtobj->tx.mcast.bytes += _srcobj->tx.mcast.bytes; \ 1983 _tgtobj->tx.bcast.num += _srcobj->tx.bcast.num; \ 1984 _tgtobj->tx.bcast.bytes += _srcobj->tx.bcast.bytes; \ 1985 _tgtobj->tx.nawds_mcast.num += _srcobj->tx.nawds_mcast.num; \ 1986 _tgtobj->tx.nawds_mcast.bytes += \ 1987 _srcobj->tx.nawds_mcast.bytes; \ 1988 _tgtobj->tx.tx_success.num += _srcobj->tx.tx_success.num; \ 1989 _tgtobj->tx.tx_success.bytes += _srcobj->tx.tx_success.bytes; \ 1990 _tgtobj->tx.nawds_mcast_drop += _srcobj->tx.nawds_mcast_drop; \ 1991 _tgtobj->tx.ofdma += _srcobj->tx.ofdma; \ 1992 _tgtobj->tx.non_amsdu_cnt += _srcobj->tx.non_amsdu_cnt; \ 1993 _tgtobj->tx.amsdu_cnt += _srcobj->tx.amsdu_cnt; \ 1994 _tgtobj->tx.dropped.fw_rem.num += \ 1995 _srcobj->tx.dropped.fw_rem.num; \ 1996 _tgtobj->tx.dropped.fw_rem.bytes += \ 1997 _srcobj->tx.dropped.fw_rem.bytes; \ 1998 _tgtobj->tx.dropped.fw_rem_notx += \ 1999 _srcobj->tx.dropped.fw_rem_notx; \ 2000 _tgtobj->tx.dropped.fw_rem_tx += \ 2001 _srcobj->tx.dropped.fw_rem_tx; \ 2002 _tgtobj->tx.dropped.age_out += _srcobj->tx.dropped.age_out; \ 2003 _tgtobj->tx.dropped.fw_reason1 += \ 2004 _srcobj->tx.dropped.fw_reason1; \ 2005 _tgtobj->tx.dropped.fw_reason2 += \ 2006 _srcobj->tx.dropped.fw_reason2; \ 2007 _tgtobj->tx.dropped.fw_reason3 += \ 2008 _srcobj->tx.dropped.fw_reason3; \ 2009 _tgtobj->tx.dropped.fw_rem_queue_disable += \ 2010 _srcobj->tx.dropped.fw_rem_queue_disable; \ 2011 _tgtobj->tx.dropped.fw_rem_no_match += \ 2012 _srcobj->tx.dropped.fw_rem_no_match; \ 2013 _tgtobj->tx.dropped.drop_threshold += \ 2014 _srcobj->tx.dropped.drop_threshold; \ 2015 _tgtobj->tx.dropped.drop_link_desc_na += \ 2016 _srcobj->tx.dropped.drop_link_desc_na; \ 2017 _tgtobj->tx.dropped.invalid_drop += \ 2018 _srcobj->tx.dropped.invalid_drop; \ 2019 _tgtobj->tx.dropped.mcast_vdev_drop += \ 2020 _srcobj->tx.dropped.mcast_vdev_drop; \ 2021 _tgtobj->tx.dropped.invalid_rr += \ 2022 _srcobj->tx.dropped.invalid_rr; \ 2023 _tgtobj->tx.failed_retry_count += \ 2024 _srcobj->tx.failed_retry_count; \ 2025 _tgtobj->tx.retry_count += _srcobj->tx.retry_count; \ 2026 _tgtobj->tx.multiple_retry_count += \ 2027 _srcobj->tx.multiple_retry_count; \ 2028 _tgtobj->tx.tx_success_twt.num += \ 2029 _srcobj->tx.tx_success_twt.num; \ 2030 _tgtobj->tx.tx_success_twt.bytes += \ 2031 _srcobj->tx.tx_success_twt.bytes; \ 2032 _tgtobj->tx.last_tx_ts = _srcobj->tx.last_tx_ts; \ 2033 _tgtobj->tx.release_src_not_tqm += \ 2034 _srcobj->tx.release_src_not_tqm; \ 2035 for (i = 0; i < QDF_PROTO_SUBTYPE_MAX; i++) { \ 2036 _tgtobj->tx.no_ack_count[i] += \ 2037 _srcobj->tx.no_ack_count[i];\ 2038 } \ 2039 \ 2040 _tgtobj->rx.multicast.num += _srcobj->rx.multicast.num; \ 2041 _tgtobj->rx.multicast.bytes += _srcobj->rx.multicast.bytes; \ 2042 _tgtobj->rx.rx_success.num += _srcobj->rx.rx_success.num;\ 2043 _tgtobj->rx.rx_success.bytes += _srcobj->rx.rx_success.bytes;\ 2044 _tgtobj->rx.bcast.num += _srcobj->rx.bcast.num; \ 2045 _tgtobj->rx.bcast.bytes += _srcobj->rx.bcast.bytes; \ 2046 _tgtobj->rx.unicast.num += _srcobj->rx.unicast.num; \ 2047 _tgtobj->rx.unicast.bytes += _srcobj->rx.unicast.bytes; \ 2048 _tgtobj->rx.raw.num += _srcobj->rx.raw.num; \ 2049 _tgtobj->rx.raw.bytes += _srcobj->rx.raw.bytes; \ 2050 _tgtobj->rx.nawds_mcast_drop += _srcobj->rx.nawds_mcast_drop; \ 2051 _tgtobj->rx.mcast_3addr_drop += _srcobj->rx.mcast_3addr_drop; \ 2052 _tgtobj->rx.mec_drop.num += _srcobj->rx.mec_drop.num; \ 2053 _tgtobj->rx.mec_drop.bytes += _srcobj->rx.mec_drop.bytes; \ 2054 _tgtobj->rx.ppeds_drop.num += _srcobj->rx.ppeds_drop.num; \ 2055 _tgtobj->rx.ppeds_drop.bytes += _srcobj->rx.ppeds_drop.bytes; \ 2056 _tgtobj->rx.intra_bss.pkts.num += \ 2057 _srcobj->rx.intra_bss.pkts.num; \ 2058 _tgtobj->rx.intra_bss.pkts.bytes += \ 2059 _srcobj->rx.intra_bss.pkts.bytes; \ 2060 _tgtobj->rx.intra_bss.fail.num += \ 2061 _srcobj->rx.intra_bss.fail.num; \ 2062 _tgtobj->rx.intra_bss.fail.bytes += \ 2063 _srcobj->rx.intra_bss.fail.bytes; \ 2064 _tgtobj->rx.intra_bss.mdns_no_fwd += \ 2065 _srcobj->rx.intra_bss.mdns_no_fwd; \ 2066 _tgtobj->rx.err.mic_err += _srcobj->rx.err.mic_err; \ 2067 _tgtobj->rx.err.decrypt_err += _srcobj->rx.err.decrypt_err; \ 2068 _tgtobj->rx.err.fcserr += _srcobj->rx.err.fcserr; \ 2069 _tgtobj->rx.err.pn_err += _srcobj->rx.err.pn_err; \ 2070 _tgtobj->rx.err.oor_err += _srcobj->rx.err.oor_err; \ 2071 _tgtobj->rx.err.jump_2k_err += _srcobj->rx.err.jump_2k_err; \ 2072 _tgtobj->rx.err.rxdma_wifi_parse_err += \ 2073 _srcobj->rx.err.rxdma_wifi_parse_err; \ 2074 _tgtobj->rx.non_amsdu_cnt += _srcobj->rx.non_amsdu_cnt; \ 2075 _tgtobj->rx.amsdu_cnt += _srcobj->rx.amsdu_cnt; \ 2076 _tgtobj->rx.rx_retries += _srcobj->rx.rx_retries; \ 2077 _tgtobj->rx.multipass_rx_pkt_drop += \ 2078 _srcobj->rx.multipass_rx_pkt_drop; \ 2079 _tgtobj->rx.peer_unauth_rx_pkt_drop += \ 2080 _srcobj->rx.peer_unauth_rx_pkt_drop; \ 2081 _tgtobj->rx.policy_check_drop += \ 2082 _srcobj->rx.policy_check_drop; \ 2083 _tgtobj->rx.to_stack_twt.num += _srcobj->rx.to_stack_twt.num; \ 2084 _tgtobj->rx.to_stack_twt.bytes += \ 2085 _srcobj->rx.to_stack_twt.bytes; \ 2086 _tgtobj->rx.last_rx_ts = _srcobj->rx.last_rx_ts; \ 2087 for (i = 0; i < CDP_MAX_RX_RINGS; i++) { \ 2088 _tgtobj->rx.rcvd_reo[i].num += \ 2089 _srcobj->rx.rcvd_reo[i].num; \ 2090 _tgtobj->rx.rcvd_reo[i].bytes += \ 2091 _srcobj->rx.rcvd_reo[i].bytes; \ 2092 _tgtobj->rx.rcvd.num += \ 2093 _srcobj->rx.rcvd_reo[i].num; \ 2094 _tgtobj->rx.rcvd.bytes += \ 2095 _srcobj->rx.rcvd_reo[i].bytes; \ 2096 } \ 2097 for (i = 0; i < CDP_MAX_LMACS; i++) { \ 2098 _tgtobj->rx.rx_lmac[i].num += \ 2099 _srcobj->rx.rx_lmac[i].num; \ 2100 _tgtobj->rx.rx_lmac[i].bytes += \ 2101 _srcobj->rx.rx_lmac[i].bytes; \ 2102 } \ 2103 DP_IPA_UPDATE_PER_PKT_RX_STATS(_tgtobj, _srcobj); \ 2104 DP_UPDATE_PROTOCOL_COUNT_STATS(_tgtobj, _srcobj); \ 2105 } while (0) 2106 2107 #define DP_UPDATE_EXTD_STATS(_tgtobj, _srcobj) \ 2108 do { \ 2109 uint8_t i, pream_type, mu_type; \ 2110 _tgtobj->tx.stbc += _srcobj->tx.stbc; \ 2111 _tgtobj->tx.ldpc += _srcobj->tx.ldpc; \ 2112 _tgtobj->tx.retries += _srcobj->tx.retries; \ 2113 _tgtobj->tx.ampdu_cnt += _srcobj->tx.ampdu_cnt; \ 2114 _tgtobj->tx.non_ampdu_cnt += _srcobj->tx.non_ampdu_cnt; \ 2115 _tgtobj->tx.num_ppdu_cookie_valid += \ 2116 _srcobj->tx.num_ppdu_cookie_valid; \ 2117 _tgtobj->tx.tx_ppdus += _srcobj->tx.tx_ppdus; \ 2118 _tgtobj->tx.tx_mpdus_success += _srcobj->tx.tx_mpdus_success; \ 2119 _tgtobj->tx.tx_mpdus_tried += _srcobj->tx.tx_mpdus_tried; \ 2120 _tgtobj->tx.tx_rate = _srcobj->tx.tx_rate; \ 2121 _tgtobj->tx.last_tx_rate = _srcobj->tx.last_tx_rate; \ 2122 _tgtobj->tx.last_tx_rate_mcs = _srcobj->tx.last_tx_rate_mcs; \ 2123 _tgtobj->tx.mcast_last_tx_rate = \ 2124 _srcobj->tx.mcast_last_tx_rate; \ 2125 _tgtobj->tx.mcast_last_tx_rate_mcs = \ 2126 _srcobj->tx.mcast_last_tx_rate_mcs; \ 2127 _tgtobj->tx.rnd_avg_tx_rate = _srcobj->tx.rnd_avg_tx_rate; \ 2128 _tgtobj->tx.avg_tx_rate = _srcobj->tx.avg_tx_rate; \ 2129 _tgtobj->tx.tx_ratecode = _srcobj->tx.tx_ratecode; \ 2130 _tgtobj->tx.pream_punct_cnt += _srcobj->tx.pream_punct_cnt; \ 2131 _tgtobj->tx.ru_start = _srcobj->tx.ru_start; \ 2132 _tgtobj->tx.ru_tones = _srcobj->tx.ru_tones; \ 2133 _tgtobj->tx.last_ack_rssi = _srcobj->tx.last_ack_rssi; \ 2134 _tgtobj->tx.nss_info = _srcobj->tx.nss_info; \ 2135 _tgtobj->tx.mcs_info = _srcobj->tx.mcs_info; \ 2136 _tgtobj->tx.bw_info = _srcobj->tx.bw_info; \ 2137 _tgtobj->tx.gi_info = _srcobj->tx.gi_info; \ 2138 _tgtobj->tx.preamble_info = _srcobj->tx.preamble_info; \ 2139 _tgtobj->tx.retries_mpdu += _srcobj->tx.retries_mpdu; \ 2140 _tgtobj->tx.mpdu_success_with_retries += \ 2141 _srcobj->tx.mpdu_success_with_retries; \ 2142 _tgtobj->tx.rts_success = _srcobj->tx.rts_success; \ 2143 _tgtobj->tx.rts_failure = _srcobj->tx.rts_failure; \ 2144 _tgtobj->tx.bar_cnt = _srcobj->tx.bar_cnt; \ 2145 _tgtobj->tx.ndpa_cnt = _srcobj->tx.ndpa_cnt; \ 2146 for (pream_type = 0; pream_type < DOT11_MAX; pream_type++) { \ 2147 for (i = 0; i < MAX_MCS; i++) \ 2148 _tgtobj->tx.pkt_type[pream_type].mcs_count[i] += \ 2149 _srcobj->tx.pkt_type[pream_type].mcs_count[i]; \ 2150 } \ 2151 for (i = 0; i < WME_AC_MAX; i++) { \ 2152 _tgtobj->tx.wme_ac_type[i] += _srcobj->tx.wme_ac_type[i]; \ 2153 _tgtobj->tx.wme_ac_type_bytes[i] += \ 2154 _srcobj->tx.wme_ac_type_bytes[i]; \ 2155 _tgtobj->tx.excess_retries_per_ac[i] += \ 2156 _srcobj->tx.excess_retries_per_ac[i]; \ 2157 } \ 2158 for (i = 0; i < MAX_GI; i++) { \ 2159 _tgtobj->tx.sgi_count[i] += _srcobj->tx.sgi_count[i]; \ 2160 } \ 2161 for (i = 0; i < SS_COUNT; i++) { \ 2162 _tgtobj->tx.nss[i] += _srcobj->tx.nss[i]; \ 2163 } \ 2164 for (i = 0; i < MAX_BW; i++) { \ 2165 _tgtobj->tx.bw[i] += _srcobj->tx.bw[i]; \ 2166 } \ 2167 for (i = 0; i < MAX_RU_LOCATIONS; i++) { \ 2168 _tgtobj->tx.ru_loc[i].num_msdu += \ 2169 _srcobj->tx.ru_loc[i].num_msdu; \ 2170 _tgtobj->tx.ru_loc[i].num_mpdu += \ 2171 _srcobj->tx.ru_loc[i].num_mpdu; \ 2172 _tgtobj->tx.ru_loc[i].mpdu_tried += \ 2173 _srcobj->tx.ru_loc[i].mpdu_tried; \ 2174 } \ 2175 for (i = 0; i < MAX_TRANSMIT_TYPES; i++) { \ 2176 _tgtobj->tx.transmit_type[i].num_msdu += \ 2177 _srcobj->tx.transmit_type[i].num_msdu; \ 2178 _tgtobj->tx.transmit_type[i].num_mpdu += \ 2179 _srcobj->tx.transmit_type[i].num_mpdu; \ 2180 _tgtobj->tx.transmit_type[i].mpdu_tried += \ 2181 _srcobj->tx.transmit_type[i].mpdu_tried; \ 2182 } \ 2183 for (i = 0; i < MAX_MU_GROUP_ID; i++) { \ 2184 _tgtobj->tx.mu_group_id[i] = _srcobj->tx.mu_group_id[i]; \ 2185 } \ 2186 _tgtobj->tx.tx_ucast_total.num += \ 2187 _srcobj->tx.tx_ucast_total.num;\ 2188 _tgtobj->tx.tx_ucast_total.bytes += \ 2189 _srcobj->tx.tx_ucast_total.bytes;\ 2190 _tgtobj->tx.tx_ucast_success.num += \ 2191 _srcobj->tx.tx_ucast_success.num; \ 2192 _tgtobj->tx.tx_ucast_success.bytes += \ 2193 _srcobj->tx.tx_ucast_success.bytes; \ 2194 \ 2195 for (i = 0; i < CDP_RSSI_CHAIN_LEN; i++) \ 2196 _tgtobj->tx.rssi_chain[i] = _srcobj->tx.rssi_chain[i]; \ 2197 _tgtobj->rx.mpdu_cnt_fcs_ok += _srcobj->rx.mpdu_cnt_fcs_ok; \ 2198 _tgtobj->rx.mpdu_cnt_fcs_err += _srcobj->rx.mpdu_cnt_fcs_err; \ 2199 _tgtobj->rx.non_ampdu_cnt += _srcobj->rx.non_ampdu_cnt; \ 2200 _tgtobj->rx.ampdu_cnt += _srcobj->rx.ampdu_cnt; \ 2201 _tgtobj->rx.rx_mpdus += _srcobj->rx.rx_mpdus; \ 2202 _tgtobj->rx.rx_ppdus += _srcobj->rx.rx_ppdus; \ 2203 _tgtobj->rx.rx_rate = _srcobj->rx.rx_rate; \ 2204 _tgtobj->rx.last_rx_rate = _srcobj->rx.last_rx_rate; \ 2205 _tgtobj->rx.rnd_avg_rx_rate = _srcobj->rx.rnd_avg_rx_rate; \ 2206 _tgtobj->rx.avg_rx_rate = _srcobj->rx.avg_rx_rate; \ 2207 _tgtobj->rx.rx_ratecode = _srcobj->rx.rx_ratecode; \ 2208 _tgtobj->rx.avg_snr = _srcobj->rx.avg_snr; \ 2209 _tgtobj->rx.rx_snr_measured_time = \ 2210 _srcobj->rx.rx_snr_measured_time; \ 2211 _tgtobj->rx.snr = _srcobj->rx.snr; \ 2212 _tgtobj->rx.last_snr = _srcobj->rx.last_snr; \ 2213 _tgtobj->rx.nss_info = _srcobj->rx.nss_info; \ 2214 _tgtobj->rx.mcs_info = _srcobj->rx.mcs_info; \ 2215 _tgtobj->rx.bw_info = _srcobj->rx.bw_info; \ 2216 _tgtobj->rx.gi_info = _srcobj->rx.gi_info; \ 2217 _tgtobj->rx.preamble_info = _srcobj->rx.preamble_info; \ 2218 _tgtobj->rx.mpdu_retry_cnt += _srcobj->rx.mpdu_retry_cnt; \ 2219 _tgtobj->rx.bar_cnt = _srcobj->rx.bar_cnt; \ 2220 _tgtobj->rx.ndpa_cnt = _srcobj->rx.ndpa_cnt; \ 2221 for (pream_type = 0; pream_type < DOT11_MAX; pream_type++) { \ 2222 for (i = 0; i < MAX_MCS; i++) { \ 2223 _tgtobj->rx.pkt_type[pream_type].mcs_count[i] += \ 2224 _srcobj->rx.pkt_type[pream_type].mcs_count[i]; \ 2225 } \ 2226 } \ 2227 for (i = 0; i < WME_AC_MAX; i++) { \ 2228 _tgtobj->rx.wme_ac_type[i] += _srcobj->rx.wme_ac_type[i]; \ 2229 _tgtobj->rx.wme_ac_type_bytes[i] += \ 2230 _srcobj->rx.wme_ac_type_bytes[i]; \ 2231 } \ 2232 for (i = 0; i < MAX_MCS; i++) { \ 2233 _tgtobj->rx.su_ax_ppdu_cnt.mcs_count[i] += \ 2234 _srcobj->rx.su_ax_ppdu_cnt.mcs_count[i]; \ 2235 _tgtobj->rx.rx_mpdu_cnt[i] += _srcobj->rx.rx_mpdu_cnt[i]; \ 2236 } \ 2237 for (mu_type = 0 ; mu_type < TXRX_TYPE_MU_MAX; mu_type++) { \ 2238 _tgtobj->rx.rx_mu[mu_type].mpdu_cnt_fcs_ok += \ 2239 _srcobj->rx.rx_mu[mu_type].mpdu_cnt_fcs_ok; \ 2240 _tgtobj->rx.rx_mu[mu_type].mpdu_cnt_fcs_err += \ 2241 _srcobj->rx.rx_mu[mu_type].mpdu_cnt_fcs_err; \ 2242 for (i = 0; i < SS_COUNT; i++) \ 2243 _tgtobj->rx.rx_mu[mu_type].ppdu_nss[i] += \ 2244 _srcobj->rx.rx_mu[mu_type].ppdu_nss[i]; \ 2245 for (i = 0; i < MAX_MCS; i++) \ 2246 _tgtobj->rx.rx_mu[mu_type].ppdu.mcs_count[i] += \ 2247 _srcobj->rx.rx_mu[mu_type].ppdu.mcs_count[i]; \ 2248 } \ 2249 for (i = 0; i < MAX_RECEPTION_TYPES; i++) { \ 2250 _tgtobj->rx.reception_type[i] += \ 2251 _srcobj->rx.reception_type[i]; \ 2252 _tgtobj->rx.ppdu_cnt[i] += _srcobj->rx.ppdu_cnt[i]; \ 2253 } \ 2254 for (i = 0; i < MAX_GI; i++) { \ 2255 _tgtobj->rx.sgi_count[i] += _srcobj->rx.sgi_count[i]; \ 2256 } \ 2257 for (i = 0; i < SS_COUNT; i++) { \ 2258 _tgtobj->rx.nss[i] += _srcobj->rx.nss[i]; \ 2259 _tgtobj->rx.ppdu_nss[i] += _srcobj->rx.ppdu_nss[i]; \ 2260 } \ 2261 for (i = 0; i < MAX_BW; i++) { \ 2262 _tgtobj->rx.bw[i] += _srcobj->rx.bw[i]; \ 2263 } \ 2264 DP_UPDATE_11BE_STATS(_tgtobj, _srcobj); \ 2265 } while (0) 2266 2267 #define DP_UPDATE_VDEV_STATS_FOR_UNMAPPED_PEERS(_tgtobj, _srcobj) \ 2268 do { \ 2269 DP_UPDATE_BASIC_STATS(_tgtobj, _srcobj); \ 2270 DP_UPDATE_PER_PKT_STATS(_tgtobj, _srcobj); \ 2271 DP_UPDATE_EXTD_STATS(_tgtobj, _srcobj); \ 2272 } while (0) 2273 2274 #define DP_UPDATE_RX_INGRESS_STATS(_tgtobj, _srcobj) \ 2275 do { \ 2276 _tgtobj->rx_i.reo_rcvd_pkt.num += \ 2277 _srcobj->rx_i.reo_rcvd_pkt.num; \ 2278 _tgtobj->rx_i.reo_rcvd_pkt.bytes += \ 2279 _srcobj->rx_i.reo_rcvd_pkt.bytes; \ 2280 _tgtobj->rx_i.null_q_desc_pkt.num += \ 2281 _srcobj->rx_i.null_q_desc_pkt.num; \ 2282 _tgtobj->rx_i.null_q_desc_pkt.bytes += \ 2283 _srcobj->rx_i.null_q_desc_pkt.bytes; \ 2284 _tgtobj->rx_i.routed_eapol_pkt.num += \ 2285 _srcobj->rx_i.routed_eapol_pkt.num; \ 2286 _tgtobj->rx_i.routed_eapol_pkt.bytes += \ 2287 _srcobj->rx_i.routed_eapol_pkt.bytes; \ 2288 } while (0) 2289 2290 #define DP_UPDATE_LINK_VDEV_INGRESS_STATS(_tgtobj, _srcobj, _xmit_type) \ 2291 do { \ 2292 uint8_t i = 0; \ 2293 uint8_t idx = 0; \ 2294 enum dp_pkt_xmit_type temp_xmit_type = _xmit_type; \ 2295 if (temp_xmit_type == DP_XMIT_MLD) { \ 2296 idx = DP_VDEV_XMIT_TYPE; \ 2297 temp_xmit_type = DP_VDEV_XMIT_TYPE; \ 2298 } else if (temp_xmit_type == DP_XMIT_TOTAL) { \ 2299 temp_xmit_type = DP_VDEV_XMIT_TYPE; \ 2300 } \ 2301 for (; idx <= temp_xmit_type; idx++) { \ 2302 _tgtobj->tx_i.rcvd.num += _srcobj->tx_i[idx].rcvd.num; \ 2303 _tgtobj->tx_i.rcvd.bytes += \ 2304 _srcobj->tx_i[idx].rcvd.bytes; \ 2305 _tgtobj->tx_i.rcvd_in_fast_xmit_flow += \ 2306 _srcobj->tx_i[idx].rcvd_in_fast_xmit_flow; \ 2307 for (i = 0; i < CDP_MAX_TX_DATA_RINGS; i++) { \ 2308 _tgtobj->tx_i.rcvd_per_core[i] += \ 2309 _srcobj->tx_i[idx].rcvd_per_core[i]; \ 2310 } \ 2311 _tgtobj->tx_i.processed.num += \ 2312 _srcobj->tx_i[idx].processed.num; \ 2313 _tgtobj->tx_i.processed.bytes += \ 2314 _srcobj->tx_i[idx].processed.bytes; \ 2315 _tgtobj->tx_i.reinject_pkts.num += \ 2316 _srcobj->tx_i[idx].reinject_pkts.num; \ 2317 _tgtobj->tx_i.reinject_pkts.bytes += \ 2318 _srcobj->tx_i[idx].reinject_pkts.bytes; \ 2319 _tgtobj->tx_i.inspect_pkts.num += \ 2320 _srcobj->tx_i[idx].inspect_pkts.num; \ 2321 _tgtobj->tx_i.inspect_pkts.bytes += \ 2322 _srcobj->tx_i[idx].inspect_pkts.bytes; \ 2323 _tgtobj->tx_i.nawds_mcast.num += \ 2324 _srcobj->tx_i[idx].nawds_mcast.num; \ 2325 _tgtobj->tx_i.nawds_mcast.bytes += \ 2326 _srcobj->tx_i[idx].nawds_mcast.bytes; \ 2327 _tgtobj->tx_i.bcast.num += \ 2328 _srcobj->tx_i[idx].bcast.num; \ 2329 _tgtobj->tx_i.bcast.bytes += \ 2330 _srcobj->tx_i[idx].bcast.bytes; \ 2331 _tgtobj->tx_i.raw.raw_pkt.num += \ 2332 _srcobj->tx_i[idx].raw.raw_pkt.num; \ 2333 _tgtobj->tx_i.raw.raw_pkt.bytes += \ 2334 _srcobj->tx_i[idx].raw.raw_pkt.bytes; \ 2335 _tgtobj->tx_i.raw.dma_map_error += \ 2336 _srcobj->tx_i[idx].raw.dma_map_error; \ 2337 _tgtobj->tx_i.raw.invalid_raw_pkt_datatype += \ 2338 _srcobj->tx_i[idx].raw.invalid_raw_pkt_datatype; \ 2339 _tgtobj->tx_i.raw.num_frags_overflow_err += \ 2340 _srcobj->tx_i[idx].raw.num_frags_overflow_err; \ 2341 _tgtobj->tx_i.sg.sg_pkt.num += \ 2342 _srcobj->tx_i[idx].sg.sg_pkt.num; \ 2343 _tgtobj->tx_i.sg.sg_pkt.bytes += \ 2344 _srcobj->tx_i[idx].sg.sg_pkt.bytes; \ 2345 _tgtobj->tx_i.sg.non_sg_pkts.num += \ 2346 _srcobj->tx_i[idx].sg.non_sg_pkts.num; \ 2347 _tgtobj->tx_i.sg.non_sg_pkts.bytes += \ 2348 _srcobj->tx_i[idx].sg.non_sg_pkts.bytes; \ 2349 _tgtobj->tx_i.sg.dropped_host.num += \ 2350 _srcobj->tx_i[idx].sg.dropped_host.num; \ 2351 _tgtobj->tx_i.sg.dropped_host.bytes += \ 2352 _srcobj->tx_i[idx].sg.dropped_host.bytes; \ 2353 _tgtobj->tx_i.sg.dropped_target += \ 2354 _srcobj->tx_i[idx].sg.dropped_target; \ 2355 _tgtobj->tx_i.sg.dma_map_error += \ 2356 _srcobj->tx_i[idx].sg.dma_map_error; \ 2357 _tgtobj->tx_i.mcast_en.mcast_pkt.num += \ 2358 _srcobj->tx_i[idx].mcast_en.mcast_pkt.num; \ 2359 _tgtobj->tx_i.mcast_en.mcast_pkt.bytes += \ 2360 _srcobj->tx_i[idx].mcast_en.mcast_pkt.bytes; \ 2361 _tgtobj->tx_i.mcast_en.dropped_map_error += \ 2362 _srcobj->tx_i[idx].mcast_en.dropped_map_error; \ 2363 _tgtobj->tx_i.mcast_en.dropped_self_mac += \ 2364 _srcobj->tx_i[idx].mcast_en.dropped_self_mac; \ 2365 _tgtobj->tx_i.mcast_en.dropped_send_fail += \ 2366 _srcobj->tx_i[idx].mcast_en.dropped_send_fail; \ 2367 _tgtobj->tx_i.mcast_en.ucast += \ 2368 _srcobj->tx_i[idx].mcast_en.ucast; \ 2369 _tgtobj->tx_i.mcast_en.fail_seg_alloc += \ 2370 _srcobj->tx_i[idx].mcast_en.fail_seg_alloc; \ 2371 _tgtobj->tx_i.mcast_en.clone_fail += \ 2372 _srcobj->tx_i[idx].mcast_en.clone_fail; \ 2373 _tgtobj->tx_i.igmp_mcast_en.igmp_rcvd += \ 2374 _srcobj->tx_i[idx].igmp_mcast_en.igmp_rcvd; \ 2375 _tgtobj->tx_i.igmp_mcast_en.igmp_ucast_converted += \ 2376 _srcobj->tx_i[idx].igmp_mcast_en.igmp_ucast_converted; \ 2377 _tgtobj->tx_i.dropped.desc_na.num += \ 2378 _srcobj->tx_i[idx].dropped.desc_na.num; \ 2379 _tgtobj->tx_i.dropped.desc_na.bytes += \ 2380 _srcobj->tx_i[idx].dropped.desc_na.bytes; \ 2381 _tgtobj->tx_i.dropped.desc_na_exc_alloc_fail.num += \ 2382 _srcobj->tx_i[idx].dropped.desc_na_exc_alloc_fail.num; \ 2383 _tgtobj->tx_i.dropped.desc_na_exc_alloc_fail.bytes += \ 2384 _srcobj->tx_i[idx].dropped.desc_na_exc_alloc_fail.bytes; \ 2385 _tgtobj->tx_i.dropped.desc_na_exc_outstand.num += \ 2386 _srcobj->tx_i[idx].dropped.desc_na_exc_outstand.num; \ 2387 _tgtobj->tx_i.dropped.desc_na_exc_outstand.bytes += \ 2388 _srcobj->tx_i[idx].dropped.desc_na_exc_outstand.bytes; \ 2389 _tgtobj->tx_i.dropped.exc_desc_na.num += \ 2390 _srcobj->tx_i[idx].dropped.exc_desc_na.num; \ 2391 _tgtobj->tx_i.dropped.exc_desc_na.bytes += \ 2392 _srcobj->tx_i[idx].dropped.exc_desc_na.bytes; \ 2393 _tgtobj->tx_i.dropped.ring_full += \ 2394 _srcobj->tx_i[idx].dropped.ring_full; \ 2395 _tgtobj->tx_i.dropped.enqueue_fail += \ 2396 _srcobj->tx_i[idx].dropped.enqueue_fail; \ 2397 _tgtobj->tx_i.dropped.dma_error += \ 2398 _srcobj->tx_i[idx].dropped.dma_error; \ 2399 _tgtobj->tx_i.dropped.res_full += \ 2400 _srcobj->tx_i[idx].dropped.res_full; \ 2401 _tgtobj->tx_i.dropped.headroom_insufficient += \ 2402 _srcobj->tx_i[idx].dropped.headroom_insufficient; \ 2403 _tgtobj->tx_i.dropped.fail_per_pkt_vdev_id_check += \ 2404 _srcobj->tx_i[idx].dropped.fail_per_pkt_vdev_id_check; \ 2405 _tgtobj->tx_i.dropped.drop_ingress += \ 2406 _srcobj->tx_i[idx].dropped.drop_ingress; \ 2407 _tgtobj->tx_i.dropped.invalid_peer_id_in_exc_path += \ 2408 _srcobj->tx_i[idx].dropped.invalid_peer_id_in_exc_path; \ 2409 _tgtobj->tx_i.dropped.tx_mcast_drop += \ 2410 _srcobj->tx_i[idx].dropped.tx_mcast_drop; \ 2411 _tgtobj->tx_i.dropped.fw2wbm_tx_drop += \ 2412 _srcobj->tx_i[idx].dropped.fw2wbm_tx_drop; \ 2413 _tgtobj->tx_i.dropped.dropped_pkt.bytes += \ 2414 _srcobj->tx_i[idx].dropped.dropped_pkt.bytes; \ 2415 _tgtobj->tx_i.mesh.exception_fw += \ 2416 _srcobj->tx_i[idx].mesh.exception_fw; \ 2417 _tgtobj->tx_i.mesh.completion_fw += \ 2418 _srcobj->tx_i[idx].mesh.completion_fw; \ 2419 _tgtobj->tx_i.cce_classified += \ 2420 _srcobj->tx_i[idx].cce_classified; \ 2421 _tgtobj->tx_i.cce_classified_raw += \ 2422 _srcobj->tx_i[idx].cce_classified_raw; \ 2423 _tgtobj->tx_i.sniffer_rcvd.num += \ 2424 _srcobj->tx_i[idx].sniffer_rcvd.num; \ 2425 _tgtobj->tx_i.sniffer_rcvd.bytes += \ 2426 _srcobj->tx_i[idx].sniffer_rcvd.bytes; \ 2427 } \ 2428 _tgtobj->tx_i.dropped.dropped_pkt.num = \ 2429 _tgtobj->tx_i.dropped.dma_error + \ 2430 _tgtobj->tx_i.dropped.ring_full + \ 2431 _tgtobj->tx_i.dropped.enqueue_fail + \ 2432 _tgtobj->tx_i.dropped.fail_per_pkt_vdev_id_check + \ 2433 _tgtobj->tx_i.dropped.desc_na.num + \ 2434 _tgtobj->tx_i.dropped.res_full + \ 2435 _tgtobj->tx_i.dropped.drop_ingress + \ 2436 _tgtobj->tx_i.dropped.headroom_insufficient + \ 2437 _tgtobj->tx_i.dropped.invalid_peer_id_in_exc_path + \ 2438 _tgtobj->tx_i.dropped.tx_mcast_drop + \ 2439 _tgtobj->tx_i.dropped.fw2wbm_tx_drop; \ 2440 DP_UPDATE_RX_INGRESS_STATS(_tgtobj, _srcobj); \ 2441 } while (0) 2442 2443 #define DP_UPDATE_MLD_VDEV_INGRESS_STATS(_tgtobj, _srcobj, _xmit_type) \ 2444 do { \ 2445 uint8_t i = 0; \ 2446 uint8_t idx = 0; \ 2447 enum dp_pkt_xmit_type temp_xmit_type = _xmit_type; \ 2448 if (temp_xmit_type == DP_XMIT_MLD) { \ 2449 idx = DP_VDEV_XMIT_TYPE; \ 2450 temp_xmit_type = DP_VDEV_XMIT_TYPE; \ 2451 } else if (temp_xmit_type == DP_XMIT_TOTAL) { \ 2452 temp_xmit_type = DP_VDEV_XMIT_TYPE; \ 2453 } \ 2454 for (; idx <= temp_xmit_type; idx++) { \ 2455 _tgtobj->tx_i[idx].rcvd.num += _srcobj->tx_i[idx].rcvd.num; \ 2456 _tgtobj->tx_i[idx].rcvd.bytes += \ 2457 _srcobj->tx_i[idx].rcvd.bytes; \ 2458 _tgtobj->tx_i[idx].rcvd_in_fast_xmit_flow += \ 2459 _srcobj->tx_i[idx].rcvd_in_fast_xmit_flow; \ 2460 for (i = 0; i < CDP_MAX_TX_DATA_RINGS; i++) { \ 2461 _tgtobj->tx_i[idx].rcvd_per_core[i] += \ 2462 _srcobj->tx_i[idx].rcvd_per_core[i]; \ 2463 } \ 2464 _tgtobj->tx_i[idx].processed.num += \ 2465 _srcobj->tx_i[idx].processed.num; \ 2466 _tgtobj->tx_i[idx].processed.bytes += \ 2467 _srcobj->tx_i[idx].processed.bytes; \ 2468 _tgtobj->tx_i[idx].reinject_pkts.num += \ 2469 _srcobj->tx_i[idx].reinject_pkts.num; \ 2470 _tgtobj->tx_i[idx].reinject_pkts.bytes += \ 2471 _srcobj->tx_i[idx].reinject_pkts.bytes; \ 2472 _tgtobj->tx_i[idx].inspect_pkts.num += \ 2473 _srcobj->tx_i[idx].inspect_pkts.num; \ 2474 _tgtobj->tx_i[idx].inspect_pkts.bytes += \ 2475 _srcobj->tx_i[idx].inspect_pkts.bytes; \ 2476 _tgtobj->tx_i[idx].nawds_mcast.num += \ 2477 _srcobj->tx_i[idx].nawds_mcast.num; \ 2478 _tgtobj->tx_i[idx].nawds_mcast.bytes += \ 2479 _srcobj->tx_i[idx].nawds_mcast.bytes; \ 2480 _tgtobj->tx_i[idx].bcast.num += \ 2481 _srcobj->tx_i[idx].bcast.num; \ 2482 _tgtobj->tx_i[idx].bcast.bytes += \ 2483 _srcobj->tx_i[idx].bcast.bytes; \ 2484 _tgtobj->tx_i[idx].raw.raw_pkt.num += \ 2485 _srcobj->tx_i[idx].raw.raw_pkt.num; \ 2486 _tgtobj->tx_i[idx].raw.raw_pkt.bytes += \ 2487 _srcobj->tx_i[idx].raw.raw_pkt.bytes; \ 2488 _tgtobj->tx_i[idx].raw.dma_map_error += \ 2489 _srcobj->tx_i[idx].raw.dma_map_error; \ 2490 _tgtobj->tx_i[idx].raw.invalid_raw_pkt_datatype += \ 2491 _srcobj->tx_i[idx].raw.invalid_raw_pkt_datatype; \ 2492 _tgtobj->tx_i[idx].raw.num_frags_overflow_err += \ 2493 _srcobj->tx_i[idx].raw.num_frags_overflow_err; \ 2494 _tgtobj->tx_i[idx].sg.sg_pkt.num += \ 2495 _srcobj->tx_i[idx].sg.sg_pkt.num; \ 2496 _tgtobj->tx_i[idx].sg.sg_pkt.bytes += \ 2497 _srcobj->tx_i[idx].sg.sg_pkt.bytes; \ 2498 _tgtobj->tx_i[idx].sg.non_sg_pkts.num += \ 2499 _srcobj->tx_i[idx].sg.non_sg_pkts.num; \ 2500 _tgtobj->tx_i[idx].sg.non_sg_pkts.bytes += \ 2501 _srcobj->tx_i[idx].sg.non_sg_pkts.bytes; \ 2502 _tgtobj->tx_i[idx].sg.dropped_host.num += \ 2503 _srcobj->tx_i[idx].sg.dropped_host.num; \ 2504 _tgtobj->tx_i[idx].sg.dropped_host.bytes += \ 2505 _srcobj->tx_i[idx].sg.dropped_host.bytes; \ 2506 _tgtobj->tx_i[idx].sg.dropped_target += \ 2507 _srcobj->tx_i[idx].sg.dropped_target; \ 2508 _tgtobj->tx_i[idx].sg.dma_map_error += \ 2509 _srcobj->tx_i[idx].sg.dma_map_error; \ 2510 _tgtobj->tx_i[idx].mcast_en.mcast_pkt.num += \ 2511 _srcobj->tx_i[idx].mcast_en.mcast_pkt.num; \ 2512 _tgtobj->tx_i[idx].mcast_en.mcast_pkt.bytes += \ 2513 _srcobj->tx_i[idx].mcast_en.mcast_pkt.bytes; \ 2514 _tgtobj->tx_i[idx].mcast_en.dropped_map_error += \ 2515 _srcobj->tx_i[idx].mcast_en.dropped_map_error; \ 2516 _tgtobj->tx_i[idx].mcast_en.dropped_self_mac += \ 2517 _srcobj->tx_i[idx].mcast_en.dropped_self_mac; \ 2518 _tgtobj->tx_i[idx].mcast_en.dropped_send_fail += \ 2519 _srcobj->tx_i[idx].mcast_en.dropped_send_fail; \ 2520 _tgtobj->tx_i[idx].mcast_en.ucast += \ 2521 _srcobj->tx_i[idx].mcast_en.ucast; \ 2522 _tgtobj->tx_i[idx].mcast_en.fail_seg_alloc += \ 2523 _srcobj->tx_i[idx].mcast_en.fail_seg_alloc; \ 2524 _tgtobj->tx_i[idx].mcast_en.clone_fail += \ 2525 _srcobj->tx_i[idx].mcast_en.clone_fail; \ 2526 _tgtobj->tx_i[idx].igmp_mcast_en.igmp_rcvd += \ 2527 _srcobj->tx_i[idx].igmp_mcast_en.igmp_rcvd; \ 2528 _tgtobj->tx_i[idx].igmp_mcast_en.igmp_ucast_converted += \ 2529 _srcobj->tx_i[idx].igmp_mcast_en.igmp_ucast_converted; \ 2530 _tgtobj->tx_i[idx].dropped.desc_na.num += \ 2531 _srcobj->tx_i[idx].dropped.desc_na.num; \ 2532 _tgtobj->tx_i[idx].dropped.desc_na.bytes += \ 2533 _srcobj->tx_i[idx].dropped.desc_na.bytes; \ 2534 _tgtobj->tx_i[idx].dropped.desc_na_exc_alloc_fail.num += \ 2535 _srcobj->tx_i[idx].dropped.desc_na_exc_alloc_fail.num; \ 2536 _tgtobj->tx_i[idx].dropped.desc_na_exc_alloc_fail.bytes += \ 2537 _srcobj->tx_i[idx].dropped.desc_na_exc_alloc_fail.bytes; \ 2538 _tgtobj->tx_i[idx].dropped.desc_na_exc_outstand.num += \ 2539 _srcobj->tx_i[idx].dropped.desc_na_exc_outstand.num; \ 2540 _tgtobj->tx_i[idx].dropped.desc_na_exc_outstand.bytes += \ 2541 _srcobj->tx_i[idx].dropped.desc_na_exc_outstand.bytes; \ 2542 _tgtobj->tx_i[idx].dropped.exc_desc_na.num += \ 2543 _srcobj->tx_i[idx].dropped.exc_desc_na.num; \ 2544 _tgtobj->tx_i[idx].dropped.exc_desc_na.bytes += \ 2545 _srcobj->tx_i[idx].dropped.exc_desc_na.bytes; \ 2546 _tgtobj->tx_i[idx].dropped.ring_full += \ 2547 _srcobj->tx_i[idx].dropped.ring_full; \ 2548 _tgtobj->tx_i[idx].dropped.enqueue_fail += \ 2549 _srcobj->tx_i[idx].dropped.enqueue_fail; \ 2550 _tgtobj->tx_i[idx].dropped.dma_error += \ 2551 _srcobj->tx_i[idx].dropped.dma_error; \ 2552 _tgtobj->tx_i[idx].dropped.res_full += \ 2553 _srcobj->tx_i[idx].dropped.res_full; \ 2554 _tgtobj->tx_i[idx].dropped.headroom_insufficient += \ 2555 _srcobj->tx_i[idx].dropped.headroom_insufficient; \ 2556 _tgtobj->tx_i[idx].dropped.fail_per_pkt_vdev_id_check += \ 2557 _srcobj->tx_i[idx].dropped.fail_per_pkt_vdev_id_check; \ 2558 _tgtobj->tx_i[idx].dropped.drop_ingress += \ 2559 _srcobj->tx_i[idx].dropped.drop_ingress; \ 2560 _tgtobj->tx_i[idx].dropped.invalid_peer_id_in_exc_path += \ 2561 _srcobj->tx_i[idx].dropped.invalid_peer_id_in_exc_path; \ 2562 _tgtobj->tx_i[idx].dropped.tx_mcast_drop += \ 2563 _srcobj->tx_i[idx].dropped.tx_mcast_drop; \ 2564 _tgtobj->tx_i[idx].dropped.fw2wbm_tx_drop += \ 2565 _srcobj->tx_i[idx].dropped.fw2wbm_tx_drop; \ 2566 _tgtobj->tx_i[idx].dropped.dropped_pkt.bytes += \ 2567 _srcobj->tx_i[idx].dropped.dropped_pkt.bytes; \ 2568 _tgtobj->tx_i[idx].mesh.exception_fw += \ 2569 _srcobj->tx_i[idx].mesh.exception_fw; \ 2570 _tgtobj->tx_i[idx].mesh.completion_fw += \ 2571 _srcobj->tx_i[idx].mesh.completion_fw; \ 2572 _tgtobj->tx_i[idx].cce_classified += \ 2573 _srcobj->tx_i[idx].cce_classified; \ 2574 _tgtobj->tx_i[idx].cce_classified_raw += \ 2575 _srcobj->tx_i[idx].cce_classified_raw; \ 2576 _tgtobj->tx_i[idx].sniffer_rcvd.num += \ 2577 _srcobj->tx_i[idx].sniffer_rcvd.num; \ 2578 _tgtobj->tx_i[idx].sniffer_rcvd.bytes += \ 2579 _srcobj->tx_i[idx].sniffer_rcvd.bytes; \ 2580 _tgtobj->tx_i[idx].dropped.dropped_pkt.num = \ 2581 _tgtobj->tx_i[idx].dropped.dma_error + \ 2582 _tgtobj->tx_i[idx].dropped.ring_full + \ 2583 _tgtobj->tx_i[idx].dropped.enqueue_fail + \ 2584 _tgtobj->tx_i[idx].dropped.fail_per_pkt_vdev_id_check + \ 2585 _tgtobj->tx_i[idx].dropped.desc_na.num + \ 2586 _tgtobj->tx_i[idx].dropped.res_full + \ 2587 _tgtobj->tx_i[idx].dropped.drop_ingress + \ 2588 _tgtobj->tx_i[idx].dropped.headroom_insufficient + \ 2589 _tgtobj->tx_i[idx].dropped.invalid_peer_id_in_exc_path + \ 2590 _tgtobj->tx_i[idx].dropped.tx_mcast_drop + \ 2591 _tgtobj->tx_i[idx].dropped.fw2wbm_tx_drop; \ 2592 } \ 2593 DP_UPDATE_RX_INGRESS_STATS(_tgtobj, _srcobj); \ 2594 } while (0) 2595 2596 #define DP_UPDATE_TO_MLD_VDEV_STATS(_tgtobj, _srcobj, _xmit_type) \ 2597 do { \ 2598 DP_UPDATE_MLD_VDEV_INGRESS_STATS(_tgtobj, _srcobj, _xmit_type); \ 2599 DP_UPDATE_VDEV_STATS_FOR_UNMAPPED_PEERS(_tgtobj, _srcobj); \ 2600 } while (0) 2601 2602 #define DP_UPDATE_TO_LINK_VDEV_STATS(_tgtobj, _srcobj, _xmit_type) \ 2603 do { \ 2604 DP_UPDATE_LINK_VDEV_INGRESS_STATS(_tgtobj, _srcobj, _xmit_type); \ 2605 DP_UPDATE_VDEV_STATS_FOR_UNMAPPED_PEERS(_tgtobj, _srcobj); \ 2606 } while (0) 2607 /** 2608 * dp_peer_find_attach() - Allocates memory for peer objects 2609 * @soc: SoC handle 2610 * 2611 * Return: QDF_STATUS 2612 */ 2613 QDF_STATUS dp_peer_find_attach(struct dp_soc *soc); 2614 2615 /** 2616 * dp_peer_find_detach() - Frees memory for peer objects 2617 * @soc: SoC handle 2618 * 2619 * Return: none 2620 */ 2621 void dp_peer_find_detach(struct dp_soc *soc); 2622 2623 /** 2624 * dp_peer_find_hash_add() - add peer to peer_hash_table 2625 * @soc: soc handle 2626 * @peer: peer handle 2627 * 2628 * Return: none 2629 */ 2630 void dp_peer_find_hash_add(struct dp_soc *soc, struct dp_peer *peer); 2631 2632 /** 2633 * dp_peer_find_hash_remove() - remove peer from peer_hash_table 2634 * @soc: soc handle 2635 * @peer: peer handle 2636 * 2637 * Return: none 2638 */ 2639 void dp_peer_find_hash_remove(struct dp_soc *soc, struct dp_peer *peer); 2640 2641 /* unused?? */ 2642 void dp_peer_find_hash_erase(struct dp_soc *soc); 2643 2644 /** 2645 * dp_peer_vdev_list_add() - add peer into vdev's peer list 2646 * @soc: soc handle 2647 * @vdev: vdev handle 2648 * @peer: peer handle 2649 * 2650 * Return: none 2651 */ 2652 void dp_peer_vdev_list_add(struct dp_soc *soc, struct dp_vdev *vdev, 2653 struct dp_peer *peer); 2654 2655 /** 2656 * dp_peer_vdev_list_remove() - remove peer from vdev's peer list 2657 * @soc: SoC handle 2658 * @vdev: VDEV handle 2659 * @peer: peer handle 2660 * 2661 * Return: none 2662 */ 2663 void dp_peer_vdev_list_remove(struct dp_soc *soc, struct dp_vdev *vdev, 2664 struct dp_peer *peer); 2665 2666 /** 2667 * dp_peer_find_id_to_obj_add() - Add peer into peer_id table 2668 * @soc: SoC handle 2669 * @peer: peer handle 2670 * @peer_id: peer_id 2671 * 2672 * Return: None 2673 */ 2674 void dp_peer_find_id_to_obj_add(struct dp_soc *soc, 2675 struct dp_peer *peer, 2676 uint16_t peer_id); 2677 2678 /** 2679 * dp_txrx_peer_attach_add() - Attach txrx_peer and add it to peer_id table 2680 * @soc: SoC handle 2681 * @peer: peer handle 2682 * @txrx_peer: txrx peer handle 2683 * 2684 * Return: None 2685 */ 2686 void dp_txrx_peer_attach_add(struct dp_soc *soc, 2687 struct dp_peer *peer, 2688 struct dp_txrx_peer *txrx_peer); 2689 2690 /** 2691 * dp_peer_find_id_to_obj_remove() - remove peer from peer_id table 2692 * @soc: SoC handle 2693 * @peer_id: peer_id 2694 * 2695 * Return: None 2696 */ 2697 void dp_peer_find_id_to_obj_remove(struct dp_soc *soc, 2698 uint16_t peer_id); 2699 2700 /** 2701 * dp_vdev_unref_delete() - check and process vdev delete 2702 * @soc: DP specific soc pointer 2703 * @vdev: DP specific vdev pointer 2704 * @mod_id: module id 2705 * 2706 */ 2707 void dp_vdev_unref_delete(struct dp_soc *soc, struct dp_vdev *vdev, 2708 enum dp_mod_id mod_id); 2709 2710 /** 2711 * dp_peer_ppdu_delayed_ba_cleanup() - free ppdu allocated in peer 2712 * @peer: Datapath peer 2713 * 2714 * Return: void 2715 */ 2716 void dp_peer_ppdu_delayed_ba_cleanup(struct dp_peer *peer); 2717 2718 /** 2719 * dp_peer_rx_init() - Initialize receive TID state 2720 * @pdev: Datapath pdev 2721 * @peer: Datapath peer 2722 * 2723 */ 2724 void dp_peer_rx_init(struct dp_pdev *pdev, struct dp_peer *peer); 2725 2726 /** 2727 * dp_peer_cleanup() - Cleanup peer information 2728 * @vdev: Datapath vdev 2729 * @peer: Datapath peer 2730 * 2731 */ 2732 void dp_peer_cleanup(struct dp_vdev *vdev, struct dp_peer *peer); 2733 2734 #ifdef DP_PEER_EXTENDED_API 2735 /** 2736 * dp_register_peer() - Register peer into physical device 2737 * @soc_hdl: data path soc handle 2738 * @pdev_id: device instance id 2739 * @sta_desc: peer description 2740 * 2741 * Register peer into physical device 2742 * 2743 * Return: QDF_STATUS_SUCCESS registration success 2744 * QDF_STATUS_E_FAULT peer not found 2745 */ 2746 QDF_STATUS dp_register_peer(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, 2747 struct ol_txrx_desc_type *sta_desc); 2748 2749 /** 2750 * dp_clear_peer() - remove peer from physical device 2751 * @soc_hdl: data path soc handle 2752 * @pdev_id: device instance id 2753 * @peer_addr: peer mac address 2754 * 2755 * remove peer from physical device 2756 * 2757 * Return: QDF_STATUS_SUCCESS registration success 2758 * QDF_STATUS_E_FAULT peer not found 2759 */ 2760 QDF_STATUS dp_clear_peer(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, 2761 struct qdf_mac_addr peer_addr); 2762 2763 /** 2764 * dp_find_peer_exist_on_vdev - find if peer exists on the given vdev 2765 * @soc_hdl: datapath soc handle 2766 * @vdev_id: vdev instance id 2767 * @peer_addr: peer mac address 2768 * 2769 * Return: true or false 2770 */ 2771 bool dp_find_peer_exist_on_vdev(struct cdp_soc_t *soc_hdl, uint8_t vdev_id, 2772 uint8_t *peer_addr); 2773 2774 /** 2775 * dp_find_peer_exist_on_other_vdev - find if peer exists 2776 * on other than the given vdev 2777 * @soc_hdl: datapath soc handle 2778 * @vdev_id: vdev instance id 2779 * @peer_addr: peer mac address 2780 * @max_bssid: max number of bssids 2781 * 2782 * Return: true or false 2783 */ 2784 bool dp_find_peer_exist_on_other_vdev(struct cdp_soc_t *soc_hdl, 2785 uint8_t vdev_id, uint8_t *peer_addr, 2786 uint16_t max_bssid); 2787 2788 /** 2789 * dp_peer_state_update() - update peer local state 2790 * @soc: datapath soc handle 2791 * @peer_mac: peer mac address 2792 * @state: new peer local state 2793 * 2794 * update peer local state 2795 * 2796 * Return: QDF_STATUS_SUCCESS registration success 2797 */ 2798 QDF_STATUS dp_peer_state_update(struct cdp_soc_t *soc, uint8_t *peer_mac, 2799 enum ol_txrx_peer_state state); 2800 2801 /** 2802 * dp_get_vdevid() - Get virtual interface id which peer registered 2803 * @soc_hdl: datapath soc handle 2804 * @peer_mac: peer mac address 2805 * @vdev_id: virtual interface id which peer registered 2806 * 2807 * Get virtual interface id which peer registered 2808 * 2809 * Return: QDF_STATUS_SUCCESS registration success 2810 */ 2811 QDF_STATUS dp_get_vdevid(struct cdp_soc_t *soc_hdl, uint8_t *peer_mac, 2812 uint8_t *vdev_id); 2813 2814 struct cdp_vdev *dp_get_vdev_by_peer_addr(struct cdp_pdev *pdev_handle, 2815 struct qdf_mac_addr peer_addr); 2816 2817 /** 2818 * dp_get_vdev_for_peer() - Get virtual interface instance which peer belongs 2819 * @peer: peer instance 2820 * 2821 * Get virtual interface instance which peer belongs 2822 * 2823 * Return: virtual interface instance pointer 2824 * NULL in case cannot find 2825 */ 2826 struct cdp_vdev *dp_get_vdev_for_peer(void *peer); 2827 2828 /** 2829 * dp_peer_get_peer_mac_addr() - Get peer mac address 2830 * @peer: peer instance 2831 * 2832 * Get peer mac address 2833 * 2834 * Return: peer mac address pointer 2835 * NULL in case cannot find 2836 */ 2837 uint8_t *dp_peer_get_peer_mac_addr(void *peer); 2838 2839 /** 2840 * dp_get_peer_state() - Get local peer state 2841 * @soc: datapath soc handle 2842 * @vdev_id: vdev id 2843 * @peer_mac: peer mac addr 2844 * 2845 * Get local peer state 2846 * 2847 * Return: peer status 2848 */ 2849 int dp_get_peer_state(struct cdp_soc_t *soc, uint8_t vdev_id, 2850 uint8_t *peer_mac); 2851 2852 /** 2853 * dp_local_peer_id_pool_init() - local peer id pool alloc for physical device 2854 * @pdev: data path device instance 2855 * 2856 * local peer id pool alloc for physical device 2857 * 2858 * Return: none 2859 */ 2860 void dp_local_peer_id_pool_init(struct dp_pdev *pdev); 2861 2862 /** 2863 * dp_local_peer_id_alloc() - allocate local peer id 2864 * @pdev: data path device instance 2865 * @peer: new peer instance 2866 * 2867 * allocate local peer id 2868 * 2869 * Return: none 2870 */ 2871 void dp_local_peer_id_alloc(struct dp_pdev *pdev, struct dp_peer *peer); 2872 2873 /** 2874 * dp_local_peer_id_free() - remove local peer id 2875 * @pdev: data path device instance 2876 * @peer: peer instance should be removed 2877 * 2878 * remove local peer id 2879 * 2880 * Return: none 2881 */ 2882 void dp_local_peer_id_free(struct dp_pdev *pdev, struct dp_peer *peer); 2883 2884 /** 2885 * dp_set_peer_as_tdls_peer() - set tdls peer flag to peer 2886 * @soc_hdl: datapath soc handle 2887 * @vdev_id: vdev_id 2888 * @peer_mac: peer mac addr 2889 * @val: tdls peer flag 2890 * 2891 * Return: none 2892 */ 2893 void dp_set_peer_as_tdls_peer(struct cdp_soc_t *soc_hdl, uint8_t vdev_id, 2894 uint8_t *peer_mac, bool val); 2895 #else 2896 static inline 2897 QDF_STATUS dp_get_vdevid(struct cdp_soc_t *soc_hdl, uint8_t *peer_mac, 2898 uint8_t *vdev_id) 2899 { 2900 return QDF_STATUS_E_NOSUPPORT; 2901 } 2902 2903 static inline void dp_local_peer_id_pool_init(struct dp_pdev *pdev) 2904 { 2905 } 2906 2907 static inline 2908 void dp_local_peer_id_alloc(struct dp_pdev *pdev, struct dp_peer *peer) 2909 { 2910 } 2911 2912 static inline 2913 void dp_local_peer_id_free(struct dp_pdev *pdev, struct dp_peer *peer) 2914 { 2915 } 2916 2917 static inline 2918 void dp_set_peer_as_tdls_peer(struct cdp_soc_t *soc_hdl, uint8_t vdev_id, 2919 uint8_t *peer_mac, bool val) 2920 { 2921 } 2922 #endif 2923 2924 /** 2925 * dp_find_peer_exist - find peer if already exists 2926 * @soc_hdl: datapath soc handle 2927 * @pdev_id: physical device instance id 2928 * @peer_addr: peer mac address 2929 * 2930 * Return: true or false 2931 */ 2932 bool dp_find_peer_exist(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, 2933 uint8_t *peer_addr); 2934 2935 #ifdef DP_UMAC_HW_RESET_SUPPORT 2936 /** 2937 * dp_pause_reo_send_cmd() - Pause Reo send commands. 2938 * @soc: dp soc 2939 * 2940 * Return: none 2941 */ 2942 void dp_pause_reo_send_cmd(struct dp_soc *soc); 2943 2944 /** 2945 * dp_resume_reo_send_cmd() - Resume Reo send commands. 2946 * @soc: dp soc 2947 * 2948 * Return: none 2949 */ 2950 void dp_resume_reo_send_cmd(struct dp_soc *soc); 2951 2952 /** 2953 * dp_cleanup_reo_cmd_module - Clean up the reo cmd module 2954 * @soc: DP SoC handle 2955 * 2956 * Return: none 2957 */ 2958 void dp_cleanup_reo_cmd_module(struct dp_soc *soc); 2959 2960 /** 2961 * dp_reo_desc_freelist_destroy() - Flush REO descriptors from deferred freelist 2962 * @soc: DP SOC handle 2963 * 2964 * Return: none 2965 */ 2966 void dp_reo_desc_freelist_destroy(struct dp_soc *soc); 2967 2968 /** 2969 * dp_reset_rx_reo_tid_queue() - Reset the reo tid queues 2970 * @soc: dp soc 2971 * @hw_qdesc_vaddr: starting address of the tid queues 2972 * @size: size of the memory pointed to by hw_qdesc_vaddr 2973 * 2974 * Return: none 2975 */ 2976 void dp_reset_rx_reo_tid_queue(struct dp_soc *soc, void *hw_qdesc_vaddr, 2977 uint32_t size); 2978 2979 2980 static inline void dp_umac_reset_trigger_pre_reset_notify_cb(struct dp_soc *soc) 2981 { 2982 notify_pre_reset_fw_callback callback = soc->notify_fw_callback; 2983 2984 if (callback) 2985 callback(soc); 2986 } 2987 2988 /** 2989 * dp_reset_global_tx_desc_cleanup_flag() - Reset cleanup needed flag 2990 * @soc: dp soc handle 2991 * 2992 * Return: None 2993 */ 2994 void dp_reset_global_tx_desc_cleanup_flag(struct dp_soc *soc); 2995 2996 /** 2997 * dp_get_global_tx_desc_cleanup_flag() - Get cleanup needed flag 2998 * @soc: dp soc handle 2999 * 3000 * Return: cleanup needed/ not needed 3001 */ 3002 bool dp_get_global_tx_desc_cleanup_flag(struct dp_soc *soc); 3003 3004 3005 #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP) 3006 /** 3007 * dp_umac_reset_complete_umac_recovery() - Complete Umac reset session 3008 * @soc: dp soc handle 3009 * 3010 * Return: void 3011 */ 3012 void dp_umac_reset_complete_umac_recovery(struct dp_soc *soc); 3013 3014 /** 3015 * dp_umac_reset_initiate_umac_recovery() - Initiate Umac reset session 3016 * @soc: dp soc handle 3017 * @umac_reset_ctx: Umac reset context 3018 * @rx_event: Rx event received 3019 * @is_target_recovery: Flag to indicate if it is triggered for target recovery 3020 * 3021 * Return: status 3022 */ 3023 QDF_STATUS dp_umac_reset_initiate_umac_recovery(struct dp_soc *soc, 3024 struct dp_soc_umac_reset_ctx *umac_reset_ctx, 3025 enum umac_reset_rx_event rx_event, 3026 bool is_target_recovery); 3027 3028 /** 3029 * dp_umac_reset_handle_action_cb() - Function to call action callback 3030 * @soc: dp soc handle 3031 * @umac_reset_ctx: Umac reset context 3032 * @action: Action to call the callback for 3033 * 3034 * Return: QDF_STATUS status 3035 */ 3036 QDF_STATUS dp_umac_reset_handle_action_cb(struct dp_soc *soc, 3037 struct dp_soc_umac_reset_ctx *umac_reset_ctx, 3038 enum umac_reset_action action); 3039 3040 /** 3041 * dp_umac_reset_post_tx_cmd() - Iterate partner socs and post Tx command 3042 * @umac_reset_ctx: UMAC reset context 3043 * @tx_cmd: Tx command to be posted 3044 * 3045 * Return: QDF status of operation 3046 */ 3047 QDF_STATUS 3048 dp_umac_reset_post_tx_cmd(struct dp_soc_umac_reset_ctx *umac_reset_ctx, 3049 enum umac_reset_tx_cmd tx_cmd); 3050 3051 /** 3052 * dp_umac_reset_initiator_check() - Check if soc is the Umac reset initiator 3053 * @soc: dp soc handle 3054 * 3055 * Return: true if the soc is initiator or false otherwise 3056 */ 3057 bool dp_umac_reset_initiator_check(struct dp_soc *soc); 3058 3059 /** 3060 * dp_umac_reset_target_recovery_check() - Check if this is for target recovery 3061 * @soc: dp soc handle 3062 * 3063 * Return: true if the session is for target recovery or false otherwise 3064 */ 3065 bool dp_umac_reset_target_recovery_check(struct dp_soc *soc); 3066 3067 /** 3068 * dp_umac_reset_is_soc_ignored() - Check if this soc is to be ignored 3069 * @soc: dp soc handle 3070 * 3071 * Return: true if the soc is ignored or false otherwise 3072 */ 3073 bool dp_umac_reset_is_soc_ignored(struct dp_soc *soc); 3074 3075 /** 3076 * dp_mlo_umac_reset_stats_print() - API to print MLO umac reset stats 3077 * @soc: dp soc handle 3078 * 3079 * Return: QDF_STATUS 3080 */ 3081 QDF_STATUS dp_mlo_umac_reset_stats_print(struct dp_soc *soc); 3082 #else 3083 static inline 3084 QDF_STATUS dp_mlo_umac_reset_stats_print(struct dp_soc *soc) 3085 { 3086 return QDF_STATUS_SUCCESS; 3087 } 3088 #endif 3089 #else 3090 static inline void dp_umac_reset_trigger_pre_reset_notify_cb(struct dp_soc *soc) 3091 { 3092 } 3093 #endif 3094 3095 #if defined(DP_UMAC_HW_RESET_SUPPORT) && defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP) 3096 /** 3097 * dp_umac_reset_notify_asserted_soc() - API to notify the asserted SOC 3098 * @soc: dp soc 3099 * 3100 * Return: QDF_STATUS 3101 */ 3102 QDF_STATUS dp_umac_reset_notify_asserted_soc(struct dp_soc *soc); 3103 3104 /** 3105 * dp_get_umac_reset_in_progress_state() - API to check umac reset in progress 3106 * state 3107 * @psoc: dp soc handle 3108 * 3109 * Return: umac reset state 3110 */ 3111 enum cdp_umac_reset_state 3112 dp_get_umac_reset_in_progress_state(struct cdp_soc_t *psoc); 3113 #else 3114 static inline 3115 QDF_STATUS dp_umac_reset_notify_asserted_soc(struct dp_soc *soc) 3116 { 3117 return QDF_STATUS_SUCCESS; 3118 } 3119 3120 static inline enum cdp_umac_reset_state 3121 dp_get_umac_reset_in_progress_state(struct cdp_soc_t *psoc) 3122 { 3123 return CDP_UMAC_RESET_NOT_IN_PROGRESS; 3124 } 3125 #endif 3126 3127 #ifndef WLAN_SOFTUMAC_SUPPORT 3128 QDF_STATUS dp_reo_send_cmd(struct dp_soc *soc, enum hal_reo_cmd_type type, 3129 struct hal_reo_cmd_params *params, 3130 void (*callback_fn), void *data); 3131 3132 /** 3133 * dp_reo_cmdlist_destroy() - Free REO commands in the queue 3134 * @soc: DP SoC handle 3135 * 3136 * Return: none 3137 */ 3138 void dp_reo_cmdlist_destroy(struct dp_soc *soc); 3139 3140 /** 3141 * dp_reo_status_ring_handler() - Handler for REO Status ring 3142 * @int_ctx: pointer to DP interrupt context 3143 * @soc: DP Soc handle 3144 * 3145 * Return: Number of descriptors reaped 3146 */ 3147 uint32_t dp_reo_status_ring_handler(struct dp_intr *int_ctx, 3148 struct dp_soc *soc); 3149 #endif 3150 3151 /** 3152 * dp_aggregate_vdev_stats() - Consolidate stats at VDEV level 3153 * @vdev: DP VDEV handle 3154 * @vdev_stats: aggregate statistics 3155 * @xmit_type: xmit type of packet - MLD/Link 3156 * return: void 3157 */ 3158 void dp_aggregate_vdev_stats(struct dp_vdev *vdev, 3159 struct cdp_vdev_stats *vdev_stats, 3160 enum dp_pkt_xmit_type xmit_type); 3161 3162 /** 3163 * dp_txrx_get_vdev_stats() - Update buffer with cdp_vdev_stats 3164 * @soc_hdl: CDP SoC handle 3165 * @vdev_id: vdev Id 3166 * @buf: buffer for vdev stats 3167 * @is_aggregate: are aggregate stats being collected 3168 * 3169 * Return: QDF_STATUS 3170 */ 3171 QDF_STATUS 3172 dp_txrx_get_vdev_stats(struct cdp_soc_t *soc_hdl, uint8_t vdev_id, 3173 void *buf, bool is_aggregate); 3174 3175 /** 3176 * dp_rx_bar_stats_cb() - BAR received stats callback 3177 * @soc: SOC handle 3178 * @cb_ctxt: Call back context 3179 * @reo_status: Reo status 3180 * 3181 * Return: void 3182 */ 3183 void dp_rx_bar_stats_cb(struct dp_soc *soc, void *cb_ctxt, 3184 union hal_reo_status *reo_status); 3185 3186 uint16_t dp_tx_me_send_convert_ucast(struct cdp_soc_t *soc, uint8_t vdev_id, 3187 qdf_nbuf_t nbuf, 3188 uint8_t newmac[][QDF_MAC_ADDR_SIZE], 3189 uint8_t new_mac_cnt, uint8_t tid, 3190 bool is_igmp, bool is_dms_pkt); 3191 void dp_tx_me_alloc_descriptor(struct cdp_soc_t *soc, uint8_t pdev_id); 3192 3193 void dp_tx_me_free_descriptor(struct cdp_soc_t *soc, uint8_t pdev_id); 3194 3195 /** 3196 * dp_h2t_ext_stats_msg_send(): function to construct HTT message to pass to FW 3197 * @pdev: DP PDEV handle 3198 * @stats_type_upload_mask: stats type requested by user 3199 * @config_param_0: extra configuration parameters 3200 * @config_param_1: extra configuration parameters 3201 * @config_param_2: extra configuration parameters 3202 * @config_param_3: extra configuration parameters 3203 * @cookie: 3204 * @cookie_msb: 3205 * @mac_id: mac number 3206 * 3207 * Return: QDF STATUS 3208 */ 3209 QDF_STATUS dp_h2t_ext_stats_msg_send(struct dp_pdev *pdev, 3210 uint32_t stats_type_upload_mask, uint32_t config_param_0, 3211 uint32_t config_param_1, uint32_t config_param_2, 3212 uint32_t config_param_3, int cookie, int cookie_msb, 3213 uint8_t mac_id); 3214 3215 /** 3216 * dp_htt_stats_print_tag() - function to select the tag type and 3217 * print the corresponding tag structure 3218 * @pdev: pdev pointer 3219 * @tag_type: tag type that is to be printed 3220 * @tag_buf: pointer to the tag structure 3221 * 3222 * Return: void 3223 */ 3224 void dp_htt_stats_print_tag(struct dp_pdev *pdev, 3225 uint8_t tag_type, uint32_t *tag_buf); 3226 3227 /** 3228 * dp_htt_stats_copy_tag() - function to select the tag type and 3229 * copy the corresponding tag structure 3230 * @pdev: DP_PDEV handle 3231 * @tag_type: tag type that is to be printed 3232 * @tag_buf: pointer to the tag structure 3233 * 3234 * Return: void 3235 */ 3236 void dp_htt_stats_copy_tag(struct dp_pdev *pdev, uint8_t tag_type, uint32_t *tag_buf); 3237 3238 /** 3239 * dp_h2t_3tuple_config_send(): function to construct 3 tuple configuration 3240 * HTT message to pass to FW 3241 * @pdev: DP PDEV handle 3242 * @tuple_mask: tuple configuration to report 3 tuple hash value in either 3243 * toeplitz_2_or_4 or flow_id_toeplitz in MSDU START TLV. 3244 * 3245 * tuple_mask[1:0]: 3246 * 00 - Do not report 3 tuple hash value 3247 * 10 - Report 3 tuple hash value in toeplitz_2_or_4 3248 * 01 - Report 3 tuple hash value in flow_id_toeplitz 3249 * 11 - Report 3 tuple hash value in both toeplitz_2_or_4 & flow_id_toeplitz 3250 * @mac_id: MAC ID 3251 * 3252 * Return: QDF STATUS 3253 */ 3254 QDF_STATUS dp_h2t_3tuple_config_send(struct dp_pdev *pdev, uint32_t tuple_mask, 3255 uint8_t mac_id); 3256 3257 #ifdef IPA_OFFLOAD 3258 /** 3259 * dp_peer_update_tid_stats_from_reo() - update rx pkt and byte count from reo 3260 * @soc: soc handle 3261 * @cb_ctxt: combination of peer_id and tid 3262 * @reo_status: reo status 3263 * 3264 * Return: void 3265 */ 3266 void dp_peer_update_tid_stats_from_reo(struct dp_soc *soc, void *cb_ctxt, 3267 union hal_reo_status *reo_status); 3268 3269 int dp_peer_get_rxtid_stats_ipa(struct dp_peer *peer, 3270 dp_rxtid_stats_cmd_cb dp_stats_cmd_cb); 3271 #ifdef IPA_OPT_WIFI_DP 3272 void dp_ipa_wdi_opt_dpath_notify_flt_rlsd(int flt0_rslt, 3273 int flt1_rslt); 3274 void dp_ipa_wdi_opt_dpath_notify_flt_add_rem_cb(int flt0_rslt, int flt1_rslt); 3275 void dp_ipa_wdi_opt_dpath_notify_flt_rsvd(bool is_success); 3276 #endif 3277 #ifdef QCA_ENHANCED_STATS_SUPPORT 3278 /** 3279 * dp_peer_aggregate_tid_stats - aggregate rx tid stats 3280 * @peer: Data Path peer 3281 * 3282 * Return: void 3283 */ 3284 void dp_peer_aggregate_tid_stats(struct dp_peer *peer); 3285 #endif 3286 #else 3287 static inline void dp_peer_aggregate_tid_stats(struct dp_peer *peer) 3288 { 3289 } 3290 #endif 3291 3292 /** 3293 * dp_set_key_sec_type_wifi3() - set security mode of key 3294 * @soc: Datapath soc handle 3295 * @vdev_id: id of atapath vdev 3296 * @peer_mac: Datapath peer mac address 3297 * @sec_type: security type 3298 * @is_unicast: key type 3299 * 3300 */ 3301 QDF_STATUS 3302 dp_set_key_sec_type_wifi3(struct cdp_soc_t *soc, uint8_t vdev_id, 3303 uint8_t *peer_mac, enum cdp_sec_type sec_type, 3304 bool is_unicast); 3305 3306 /** 3307 * dp_get_pdev_for_mac_id() - Return pdev for mac_id 3308 * @soc: handle to DP soc 3309 * @mac_id: MAC id 3310 * 3311 * Return: Return pdev corresponding to MAC 3312 */ 3313 void *dp_get_pdev_for_mac_id(struct dp_soc *soc, uint32_t mac_id); 3314 3315 QDF_STATUS 3316 dp_set_michael_key(struct cdp_soc_t *soc, uint8_t vdev_id, 3317 uint8_t *peer_mac, 3318 bool is_unicast, uint32_t *key); 3319 3320 /** 3321 * dp_check_pdev_exists() - Validate pdev before use 3322 * @soc: dp soc handle 3323 * @data: pdev handle 3324 * 3325 * Return: 0 - success/invalid - failure 3326 */ 3327 bool dp_check_pdev_exists(struct dp_soc *soc, struct dp_pdev *data); 3328 3329 /** 3330 * dp_update_delay_stats() - Update delay statistics in structure 3331 * and fill min, max and avg delay 3332 * @tstats: tid tx stats 3333 * @rstats: tid rx stats 3334 * @delay: delay in ms 3335 * @tid: tid value 3336 * @mode: type of tx delay mode 3337 * @ring_id: ring number 3338 * @delay_in_us: flag to indicate whether the delay is in ms or us 3339 * 3340 * Return: none 3341 */ 3342 void dp_update_delay_stats(struct cdp_tid_tx_stats *tstats, 3343 struct cdp_tid_rx_stats *rstats, uint32_t delay, 3344 uint8_t tid, uint8_t mode, uint8_t ring_id, 3345 bool delay_in_us); 3346 3347 /** 3348 * dp_print_ring_stats(): Print tail and head pointer 3349 * @pdev: DP_PDEV handle 3350 * 3351 * Return: void 3352 */ 3353 void dp_print_ring_stats(struct dp_pdev *pdev); 3354 3355 /** 3356 * dp_print_ring_stat_from_hal(): Print tail and head pointer through hal 3357 * @soc: soc handle 3358 * @srng: srng handle 3359 * @ring_type: ring type 3360 * 3361 * Return: void 3362 */ 3363 void 3364 dp_print_ring_stat_from_hal(struct dp_soc *soc, struct dp_srng *srng, 3365 enum hal_ring_type ring_type); 3366 3367 /** 3368 * dp_print_pdev_cfg_params() - Print the pdev cfg parameters 3369 * @pdev: DP pdev handle 3370 * 3371 * Return: void 3372 */ 3373 void dp_print_pdev_cfg_params(struct dp_pdev *pdev); 3374 3375 /** 3376 * dp_print_soc_cfg_params()- Dump soc wlan config parameters 3377 * @soc: Soc handle 3378 * 3379 * Return: void 3380 */ 3381 void dp_print_soc_cfg_params(struct dp_soc *soc); 3382 3383 /** 3384 * dp_srng_get_str_from_hal_ring_type() - Return string name for a ring 3385 * @ring_type: Ring 3386 * 3387 * Return: char const pointer 3388 */ 3389 const 3390 char *dp_srng_get_str_from_hal_ring_type(enum hal_ring_type ring_type); 3391 3392 /** 3393 * dp_txrx_path_stats() - Function to display dump stats 3394 * @soc: soc handle 3395 * 3396 * Return: none 3397 */ 3398 void dp_txrx_path_stats(struct dp_soc *soc); 3399 3400 /** 3401 * dp_print_per_ring_stats(): Packet count per ring 3402 * @soc: soc handle 3403 * 3404 * Return: None 3405 */ 3406 void dp_print_per_ring_stats(struct dp_soc *soc); 3407 3408 /** 3409 * dp_aggregate_pdev_stats(): Consolidate stats at PDEV level 3410 * @pdev: DP PDEV handle 3411 * 3412 * Return: void 3413 */ 3414 void dp_aggregate_pdev_stats(struct dp_pdev *pdev); 3415 3416 /** 3417 * dp_print_rx_rates(): Print Rx rate stats 3418 * @vdev: DP_VDEV handle 3419 * 3420 * Return:void 3421 */ 3422 void dp_print_rx_rates(struct dp_vdev *vdev); 3423 3424 /** 3425 * dp_print_tx_rates(): Print tx rates 3426 * @vdev: DP_VDEV handle 3427 * 3428 * Return:void 3429 */ 3430 void dp_print_tx_rates(struct dp_vdev *vdev); 3431 3432 /** 3433 * dp_print_peer_stats():print peer stats 3434 * @peer: DP_PEER handle 3435 * @peer_stats: buffer holding peer stats 3436 * 3437 * return void 3438 */ 3439 void dp_print_peer_stats(struct dp_peer *peer, 3440 struct cdp_peer_stats *peer_stats); 3441 3442 /** 3443 * dp_print_pdev_tx_stats(): Print Pdev level TX stats 3444 * @pdev: DP_PDEV Handle 3445 * 3446 * Return:void 3447 */ 3448 void 3449 dp_print_pdev_tx_stats(struct dp_pdev *pdev); 3450 3451 #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MCAST_MLO) 3452 /** 3453 * dp_print_vdev_mlo_mcast_tx_stats(): Print vdev level mlo mcast tx stats 3454 * @vdev: DP_VDEV Handle 3455 * 3456 * Return:void 3457 */ 3458 void 3459 dp_print_vdev_mlo_mcast_tx_stats(struct dp_vdev *vdev); 3460 #else 3461 /** 3462 * dp_print_vdev_mlo_mcast_tx_stats(): Print vdev level mlo mcast tx stats 3463 * @vdev: DP_VDEV Handle 3464 * 3465 * Return:void 3466 */ 3467 static inline 3468 void dp_print_vdev_mlo_mcast_tx_stats(struct dp_vdev *vdev) 3469 { 3470 } 3471 #endif 3472 3473 /** 3474 * dp_print_pdev_rx_stats(): Print Pdev level RX stats 3475 * @pdev: DP_PDEV Handle 3476 * 3477 * Return: void 3478 */ 3479 void 3480 dp_print_pdev_rx_stats(struct dp_pdev *pdev); 3481 3482 /** 3483 * dp_print_soc_tx_stats(): Print SOC level stats 3484 * @soc: DP_SOC Handle 3485 * 3486 * Return: void 3487 */ 3488 void dp_print_soc_tx_stats(struct dp_soc *soc); 3489 3490 #ifdef QCA_SUPPORT_DP_GLOBAL_CTX 3491 /** 3492 * dp_print_global_desc_count(): Print global desc in use 3493 * 3494 * Return: void 3495 */ 3496 void dp_print_global_desc_count(void); 3497 #else 3498 /** 3499 * dp_print_global_desc_count(): Print global desc in use 3500 * 3501 * Return: void 3502 */ 3503 static inline 3504 void dp_print_global_desc_count(void) 3505 { 3506 } 3507 #endif 3508 3509 /** 3510 * dp_print_soc_interrupt_stats() - Print interrupt stats for the soc 3511 * @soc: dp_soc handle 3512 * 3513 * Return: None 3514 */ 3515 void dp_print_soc_interrupt_stats(struct dp_soc *soc); 3516 3517 /** 3518 * dp_print_tx_ppeds_stats() - Print Tx in use stats for the soc in DS 3519 * @soc: dp_soc handle 3520 * 3521 * Return: None 3522 */ 3523 3524 void dp_print_tx_ppeds_stats(struct dp_soc *soc); 3525 3526 /* REO destination ring's watermark mask */ 3527 #define DP_SRNG_WM_MASK_REO_DST BIT(REO_DST) 3528 /* TX completion ring's watermark mask */ 3529 #define DP_SRNG_WM_MASK_TX_COMP BIT(WBM2SW_RELEASE) 3530 /* All srng's watermark mask */ 3531 #define DP_SRNG_WM_MASK_ALL 0xFFFFFFFF 3532 3533 #ifdef WLAN_DP_SRNG_USAGE_WM_TRACKING 3534 /** 3535 * dp_dump_srng_high_wm_stats() - Print the ring usage high watermark stats 3536 * for all SRNGs 3537 * @soc: DP soc handle 3538 * @srng_mask: SRNGs mask for dumping usage watermark stats 3539 * 3540 * Return: None 3541 */ 3542 void dp_dump_srng_high_wm_stats(struct dp_soc *soc, uint64_t srng_mask); 3543 #else 3544 static inline 3545 void dp_dump_srng_high_wm_stats(struct dp_soc *soc, uint64_t srng_mask) 3546 { 3547 } 3548 #endif 3549 3550 /** 3551 * dp_print_soc_rx_stats() - Print SOC level Rx stats 3552 * @soc: DP_SOC Handle 3553 * 3554 * Return: void 3555 */ 3556 void dp_print_soc_rx_stats(struct dp_soc *soc); 3557 3558 /** 3559 * dp_get_mac_id_for_pdev() - Return mac corresponding to pdev for mac 3560 * 3561 * @mac_id: MAC id 3562 * @pdev_id: pdev_id corresponding to pdev, 0 for MCL 3563 * 3564 * Single pdev using both MACs will operate on both MAC rings, 3565 * which is the case for MCL. 3566 * For WIN each PDEV will operate one ring, so index is zero. 3567 * 3568 */ 3569 static inline int dp_get_mac_id_for_pdev(uint32_t mac_id, uint32_t pdev_id) 3570 { 3571 if (mac_id && pdev_id) { 3572 qdf_print("Both mac_id and pdev_id cannot be non zero"); 3573 QDF_BUG(0); 3574 return 0; 3575 } 3576 return (mac_id + pdev_id); 3577 } 3578 3579 /** 3580 * dp_get_lmac_id_for_pdev_id() - Return lmac id corresponding to host pdev id 3581 * @soc: soc pointer 3582 * @mac_id: MAC id 3583 * @pdev_id: pdev_id corresponding to pdev, 0 for MCL 3584 * 3585 * For MCL, Single pdev using both MACs will operate on both MAC rings. 3586 * 3587 * For WIN, each PDEV will operate one ring. 3588 * 3589 */ 3590 static inline int 3591 dp_get_lmac_id_for_pdev_id 3592 (struct dp_soc *soc, uint32_t mac_id, uint32_t pdev_id) 3593 { 3594 if (!wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx)) { 3595 if (mac_id && pdev_id) { 3596 qdf_print("Both mac_id and pdev_id cannot be non zero"); 3597 QDF_BUG(0); 3598 return 0; 3599 } 3600 return (mac_id + pdev_id); 3601 } 3602 3603 return soc->pdev_list[pdev_id]->lmac_id; 3604 } 3605 3606 /** 3607 * dp_get_pdev_for_lmac_id() - Return pdev pointer corresponding to lmac id 3608 * @soc: soc pointer 3609 * @lmac_id: LMAC id 3610 * 3611 * For MCL, Single pdev exists 3612 * 3613 * For WIN, each PDEV will operate one ring. 3614 * 3615 */ 3616 static inline struct dp_pdev * 3617 dp_get_pdev_for_lmac_id(struct dp_soc *soc, uint32_t lmac_id) 3618 { 3619 uint8_t i = 0; 3620 3621 if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx)) { 3622 i = wlan_cfg_get_pdev_idx(soc->wlan_cfg_ctx, lmac_id); 3623 return ((i < MAX_PDEV_CNT) ? soc->pdev_list[i] : NULL); 3624 } 3625 3626 /* Typically for MCL as there only 1 PDEV*/ 3627 return soc->pdev_list[0]; 3628 } 3629 3630 /** 3631 * dp_calculate_target_pdev_id_from_host_pdev_id() - Return target pdev 3632 * corresponding to host pdev id 3633 * @soc: soc pointer 3634 * @mac_for_pdev: pdev_id corresponding to host pdev for WIN, mac id for MCL 3635 * 3636 * Return: target pdev_id for host pdev id. For WIN, this is derived through 3637 * a two step process: 3638 * 1. Get lmac_id corresponding to host pdev_id (lmac_id can change 3639 * during mode switch) 3640 * 2. Get target pdev_id (set up during WMI ready) from lmac_id 3641 * 3642 * For MCL, return the offset-1 translated mac_id 3643 */ 3644 static inline int 3645 dp_calculate_target_pdev_id_from_host_pdev_id 3646 (struct dp_soc *soc, uint32_t mac_for_pdev) 3647 { 3648 struct dp_pdev *pdev; 3649 3650 if (!wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx)) 3651 return DP_SW2HW_MACID(mac_for_pdev); 3652 3653 pdev = soc->pdev_list[mac_for_pdev]; 3654 3655 /*non-MCL case, get original target_pdev mapping*/ 3656 return wlan_cfg_get_target_pdev_id(soc->wlan_cfg_ctx, pdev->lmac_id); 3657 } 3658 3659 /** 3660 * dp_get_target_pdev_id_for_host_pdev_id() - Return target pdev corresponding 3661 * to host pdev id 3662 * @soc: soc pointer 3663 * @mac_for_pdev: pdev_id corresponding to host pdev for WIN, mac id for MCL 3664 * 3665 * Return: target pdev_id for host pdev id. 3666 * For WIN, return the value stored in pdev object. 3667 * For MCL, return the offset-1 translated mac_id. 3668 */ 3669 static inline int 3670 dp_get_target_pdev_id_for_host_pdev_id 3671 (struct dp_soc *soc, uint32_t mac_for_pdev) 3672 { 3673 struct dp_pdev *pdev; 3674 3675 if (!wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx)) 3676 return DP_SW2HW_MACID(mac_for_pdev); 3677 3678 pdev = soc->pdev_list[mac_for_pdev]; 3679 3680 return pdev->target_pdev_id; 3681 } 3682 3683 /** 3684 * dp_get_host_pdev_id_for_target_pdev_id() - Return host pdev corresponding 3685 * to target pdev id 3686 * @soc: soc pointer 3687 * @pdev_id: pdev_id corresponding to target pdev 3688 * 3689 * Return: host pdev_id for target pdev id. For WIN, this is derived through 3690 * a two step process: 3691 * 1. Get lmac_id corresponding to target pdev_id 3692 * 2. Get host pdev_id (set up during WMI ready) from lmac_id 3693 * 3694 * For MCL, return the 0-offset pdev_id 3695 */ 3696 static inline int 3697 dp_get_host_pdev_id_for_target_pdev_id 3698 (struct dp_soc *soc, uint32_t pdev_id) 3699 { 3700 struct dp_pdev *pdev; 3701 int lmac_id; 3702 3703 if (!wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx)) 3704 return DP_HW2SW_MACID(pdev_id); 3705 3706 /*non-MCL case, get original target_lmac mapping from target pdev*/ 3707 lmac_id = wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, 3708 DP_HW2SW_MACID(pdev_id)); 3709 3710 /*Get host pdev from lmac*/ 3711 pdev = dp_get_pdev_for_lmac_id(soc, lmac_id); 3712 3713 return pdev ? pdev->pdev_id : INVALID_PDEV_ID; 3714 } 3715 3716 /** 3717 * dp_get_mac_id_for_mac() - Return mac corresponding WIN and MCL mac_ids 3718 * 3719 * @soc: handle to DP soc 3720 * @mac_id: MAC id 3721 * 3722 * Single pdev using both MACs will operate on both MAC rings, 3723 * which is the case for MCL. 3724 * For WIN each PDEV will operate one ring, so index is zero. 3725 * 3726 */ 3727 static inline int dp_get_mac_id_for_mac(struct dp_soc *soc, uint32_t mac_id) 3728 { 3729 /* 3730 * Single pdev using both MACs will operate on both MAC rings, 3731 * which is the case for MCL. 3732 */ 3733 if (!wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx)) 3734 return mac_id; 3735 3736 /* For WIN each PDEV will operate one ring, so index is zero. */ 3737 return 0; 3738 } 3739 3740 /** 3741 * dp_is_subtype_data() - check if the frame subtype is data 3742 * 3743 * @frame_ctrl: Frame control field 3744 * 3745 * check the frame control field and verify if the packet 3746 * is a data packet. 3747 * 3748 * Return: true or false 3749 */ 3750 static inline bool dp_is_subtype_data(uint16_t frame_ctrl) 3751 { 3752 if (((qdf_cpu_to_le16(frame_ctrl) & QDF_IEEE80211_FC0_TYPE_MASK) == 3753 QDF_IEEE80211_FC0_TYPE_DATA) && 3754 (((qdf_cpu_to_le16(frame_ctrl) & QDF_IEEE80211_FC0_SUBTYPE_MASK) == 3755 QDF_IEEE80211_FC0_SUBTYPE_DATA) || 3756 ((qdf_cpu_to_le16(frame_ctrl) & QDF_IEEE80211_FC0_SUBTYPE_MASK) == 3757 QDF_IEEE80211_FC0_SUBTYPE_QOS))) { 3758 return true; 3759 } 3760 3761 return false; 3762 } 3763 3764 #ifdef WDI_EVENT_ENABLE 3765 /** 3766 * dp_h2t_cfg_stats_msg_send(): function to construct HTT message to pass to FW 3767 * @pdev: DP PDEV handle 3768 * @stats_type_upload_mask: stats type requested by user 3769 * @mac_id: Mac id number 3770 * 3771 * return: QDF STATUS 3772 */ 3773 QDF_STATUS dp_h2t_cfg_stats_msg_send(struct dp_pdev *pdev, 3774 uint32_t stats_type_upload_mask, 3775 uint8_t mac_id); 3776 3777 /** 3778 * dp_wdi_event_unsub() - WDI event unsubscribe 3779 * @soc: soc handle 3780 * @pdev_id: id of pdev 3781 * @event_cb_sub_handle: subscribed event handle 3782 * @event: Event to be unsubscribe 3783 * 3784 * Return: 0 for success. nonzero for failure. 3785 */ 3786 int dp_wdi_event_unsub(struct cdp_soc_t *soc, uint8_t pdev_id, 3787 wdi_event_subscribe *event_cb_sub_handle, 3788 uint32_t event); 3789 3790 /** 3791 * dp_wdi_event_sub() - Subscribe WDI event 3792 * @soc: soc handle 3793 * @pdev_id: id of pdev 3794 * @event_cb_sub_handle: subscribe event handle 3795 * @event: Event to be subscribe 3796 * 3797 * Return: 0 for success. nonzero for failure. 3798 */ 3799 int dp_wdi_event_sub(struct cdp_soc_t *soc, uint8_t pdev_id, 3800 wdi_event_subscribe *event_cb_sub_handle, 3801 uint32_t event); 3802 3803 /** 3804 * dp_wdi_event_handler() - Event handler for WDI event 3805 * @event: wdi event number 3806 * @soc: soc handle 3807 * @data: pointer to data 3808 * @peer_id: peer id number 3809 * @status: HTT rx status 3810 * @pdev_id: id of pdev 3811 * 3812 * It will be called to register WDI event 3813 * 3814 * Return: None 3815 */ 3816 void dp_wdi_event_handler(enum WDI_EVENT event, struct dp_soc *soc, 3817 void *data, u_int16_t peer_id, 3818 int status, u_int8_t pdev_id); 3819 3820 /** 3821 * dp_wdi_event_attach() - Attach wdi event 3822 * @txrx_pdev: DP pdev handle 3823 * 3824 * Return: 0 for success. nonzero for failure. 3825 */ 3826 int dp_wdi_event_attach(struct dp_pdev *txrx_pdev); 3827 3828 /** 3829 * dp_wdi_event_detach() - Detach WDI event 3830 * @txrx_pdev: DP pdev handle 3831 * 3832 * Return: 0 for success. nonzero for failure. 3833 */ 3834 int dp_wdi_event_detach(struct dp_pdev *txrx_pdev); 3835 3836 static inline void 3837 dp_hif_update_pipe_callback(struct dp_soc *dp_soc, 3838 void *cb_context, 3839 QDF_STATUS (*callback)(void *, qdf_nbuf_t, uint8_t), 3840 uint8_t pipe_id) 3841 { 3842 struct hif_msg_callbacks hif_pipe_callbacks = { 0 }; 3843 3844 /* TODO: Temporary change to bypass HTC connection for this new 3845 * HIF pipe, which will be used for packet log and other high- 3846 * priority HTT messages. Proper HTC connection to be added 3847 * later once required FW changes are available 3848 */ 3849 hif_pipe_callbacks.rxCompletionHandler = callback; 3850 hif_pipe_callbacks.Context = cb_context; 3851 hif_update_pipe_callback(dp_soc->hif_handle, 3852 DP_HTT_T2H_HP_PIPE, &hif_pipe_callbacks); 3853 } 3854 #else 3855 static inline int dp_wdi_event_unsub(struct cdp_soc_t *soc, uint8_t pdev_id, 3856 wdi_event_subscribe *event_cb_sub_handle, 3857 uint32_t event) 3858 { 3859 return 0; 3860 } 3861 3862 static inline int dp_wdi_event_sub(struct cdp_soc_t *soc, uint8_t pdev_id, 3863 wdi_event_subscribe *event_cb_sub_handle, 3864 uint32_t event) 3865 { 3866 return 0; 3867 } 3868 3869 static inline 3870 void dp_wdi_event_handler(enum WDI_EVENT event, 3871 struct dp_soc *soc, 3872 void *data, u_int16_t peer_id, 3873 int status, u_int8_t pdev_id) 3874 { 3875 } 3876 3877 static inline int dp_wdi_event_attach(struct dp_pdev *txrx_pdev) 3878 { 3879 return 0; 3880 } 3881 3882 static inline int dp_wdi_event_detach(struct dp_pdev *txrx_pdev) 3883 { 3884 return 0; 3885 } 3886 3887 static inline QDF_STATUS dp_h2t_cfg_stats_msg_send(struct dp_pdev *pdev, 3888 uint32_t stats_type_upload_mask, uint8_t mac_id) 3889 { 3890 return 0; 3891 } 3892 3893 static inline void 3894 dp_hif_update_pipe_callback(struct dp_soc *dp_soc, void *cb_context, 3895 QDF_STATUS (*callback)(void *, qdf_nbuf_t, uint8_t), 3896 uint8_t pipe_id) 3897 { 3898 } 3899 #endif 3900 3901 #ifdef VDEV_PEER_PROTOCOL_COUNT 3902 /** 3903 * dp_vdev_peer_stats_update_protocol_cnt() - update per-peer protocol counters 3904 * @vdev: VDEV DP object 3905 * @nbuf: data packet 3906 * @txrx_peer: DP TXRX Peer object 3907 * @is_egress: whether egress or ingress 3908 * @is_rx: whether rx or tx 3909 * 3910 * This function updates the per-peer protocol counters 3911 * Return: void 3912 */ 3913 void dp_vdev_peer_stats_update_protocol_cnt(struct dp_vdev *vdev, 3914 qdf_nbuf_t nbuf, 3915 struct dp_txrx_peer *txrx_peer, 3916 bool is_egress, 3917 bool is_rx); 3918 3919 /** 3920 * dp_peer_stats_update_protocol_cnt() - update per-peer protocol counters 3921 * @soc: SOC DP object 3922 * @vdev_id: vdev_id 3923 * @nbuf: data packet 3924 * @is_egress: whether egress or ingress 3925 * @is_rx: whether rx or tx 3926 * 3927 * This function updates the per-peer protocol counters 3928 * 3929 * Return: void 3930 */ 3931 void dp_peer_stats_update_protocol_cnt(struct cdp_soc_t *soc, 3932 int8_t vdev_id, 3933 qdf_nbuf_t nbuf, 3934 bool is_egress, 3935 bool is_rx); 3936 3937 void dp_vdev_peer_stats_update_protocol_cnt_tx(struct dp_vdev *vdev_hdl, 3938 qdf_nbuf_t nbuf); 3939 3940 #else 3941 #define dp_vdev_peer_stats_update_protocol_cnt(vdev, nbuf, txrx_peer, \ 3942 is_egress, is_rx) 3943 3944 static inline 3945 void dp_vdev_peer_stats_update_protocol_cnt_tx(struct dp_vdev *vdev_hdl, 3946 qdf_nbuf_t nbuf) 3947 { 3948 } 3949 3950 #endif 3951 3952 #ifdef QCA_LL_TX_FLOW_CONTROL_V2 3953 /** 3954 * dp_tx_dump_flow_pool_info() - dump global_pool and flow_pool info 3955 * @soc_hdl: Handle to struct cdp_soc 3956 * 3957 * Return: none 3958 */ 3959 void dp_tx_dump_flow_pool_info(struct cdp_soc_t *soc_hdl); 3960 3961 /** 3962 * dp_tx_dump_flow_pool_info_compact() - dump flow pool info 3963 * @soc: DP soc context 3964 * 3965 * Return: none 3966 */ 3967 void dp_tx_dump_flow_pool_info_compact(struct dp_soc *soc); 3968 int dp_tx_delete_flow_pool(struct dp_soc *soc, struct dp_tx_desc_pool_s *pool, 3969 bool force); 3970 #else 3971 static inline void dp_tx_dump_flow_pool_info_compact(struct dp_soc *soc) 3972 { 3973 } 3974 #endif /* QCA_LL_TX_FLOW_CONTROL_V2 */ 3975 3976 #ifdef QCA_OL_DP_SRNG_LOCK_LESS_ACCESS 3977 static inline int 3978 dp_hal_srng_access_start(hal_soc_handle_t soc, hal_ring_handle_t hal_ring_hdl) 3979 { 3980 return hal_srng_access_start_unlocked(soc, hal_ring_hdl); 3981 } 3982 3983 static inline void 3984 dp_hal_srng_access_end(hal_soc_handle_t soc, hal_ring_handle_t hal_ring_hdl) 3985 { 3986 hal_srng_access_end_unlocked(soc, hal_ring_hdl); 3987 } 3988 3989 #else 3990 static inline int 3991 dp_hal_srng_access_start(hal_soc_handle_t soc, hal_ring_handle_t hal_ring_hdl) 3992 { 3993 return hal_srng_access_start(soc, hal_ring_hdl); 3994 } 3995 3996 static inline void 3997 dp_hal_srng_access_end(hal_soc_handle_t soc, hal_ring_handle_t hal_ring_hdl) 3998 { 3999 hal_srng_access_end(soc, hal_ring_hdl); 4000 } 4001 #endif 4002 4003 #ifdef WLAN_FEATURE_DP_EVENT_HISTORY 4004 /** 4005 * dp_srng_access_start() - Wrapper function to log access start of a hal ring 4006 * @int_ctx: pointer to DP interrupt context. This should not be NULL 4007 * @dp_soc: DP Soc handle 4008 * @hal_ring_hdl: opaque pointer to the HAL Rx Error Ring, which will be 4009 * serviced 4010 * 4011 * Return: 0 on success; error on failure 4012 */ 4013 int dp_srng_access_start(struct dp_intr *int_ctx, struct dp_soc *dp_soc, 4014 hal_ring_handle_t hal_ring_hdl); 4015 4016 /** 4017 * dp_srng_access_end() - Wrapper function to log access end of a hal ring 4018 * @int_ctx: pointer to DP interrupt context. This should not be NULL 4019 * @dp_soc: DP Soc handle 4020 * @hal_ring_hdl: opaque pointer to the HAL Rx Error Ring, which will be 4021 * serviced 4022 * 4023 * Return: void 4024 */ 4025 void dp_srng_access_end(struct dp_intr *int_ctx, struct dp_soc *dp_soc, 4026 hal_ring_handle_t hal_ring_hdl); 4027 4028 #else 4029 static inline int dp_srng_access_start(struct dp_intr *int_ctx, 4030 struct dp_soc *dp_soc, 4031 hal_ring_handle_t hal_ring_hdl) 4032 { 4033 hal_soc_handle_t hal_soc = dp_soc->hal_soc; 4034 4035 return dp_hal_srng_access_start(hal_soc, hal_ring_hdl); 4036 } 4037 4038 static inline void dp_srng_access_end(struct dp_intr *int_ctx, 4039 struct dp_soc *dp_soc, 4040 hal_ring_handle_t hal_ring_hdl) 4041 { 4042 hal_soc_handle_t hal_soc = dp_soc->hal_soc; 4043 4044 return dp_hal_srng_access_end(hal_soc, hal_ring_hdl); 4045 } 4046 #endif /* WLAN_FEATURE_DP_EVENT_HISTORY */ 4047 4048 #ifdef QCA_CACHED_RING_DESC 4049 /** 4050 * dp_srng_dst_get_next() - Wrapper function to get next ring desc 4051 * @dp_soc: DP Soc handle 4052 * @hal_ring_hdl: opaque pointer to the HAL Destination Ring 4053 * 4054 * Return: HAL ring descriptor 4055 */ 4056 static inline void *dp_srng_dst_get_next(struct dp_soc *dp_soc, 4057 hal_ring_handle_t hal_ring_hdl) 4058 { 4059 hal_soc_handle_t hal_soc = dp_soc->hal_soc; 4060 4061 return hal_srng_dst_get_next_cached(hal_soc, hal_ring_hdl); 4062 } 4063 4064 /** 4065 * dp_srng_dst_inv_cached_descs() - Wrapper function to invalidate cached 4066 * descriptors 4067 * @dp_soc: DP Soc handle 4068 * @hal_ring_hdl: opaque pointer to the HAL Rx Destination ring 4069 * @num_entries: Entry count 4070 * 4071 * Return: None 4072 */ 4073 static inline void dp_srng_dst_inv_cached_descs(struct dp_soc *dp_soc, 4074 hal_ring_handle_t hal_ring_hdl, 4075 uint32_t num_entries) 4076 { 4077 hal_soc_handle_t hal_soc = dp_soc->hal_soc; 4078 4079 hal_srng_dst_inv_cached_descs(hal_soc, hal_ring_hdl, num_entries); 4080 } 4081 #else 4082 static inline void *dp_srng_dst_get_next(struct dp_soc *dp_soc, 4083 hal_ring_handle_t hal_ring_hdl) 4084 { 4085 hal_soc_handle_t hal_soc = dp_soc->hal_soc; 4086 4087 return hal_srng_dst_get_next(hal_soc, hal_ring_hdl); 4088 } 4089 4090 static inline void dp_srng_dst_inv_cached_descs(struct dp_soc *dp_soc, 4091 hal_ring_handle_t hal_ring_hdl, 4092 uint32_t num_entries) 4093 { 4094 } 4095 #endif /* QCA_CACHED_RING_DESC */ 4096 4097 #if defined(QCA_CACHED_RING_DESC) && \ 4098 (defined(QCA_DP_RX_HW_SW_NBUF_DESC_PREFETCH) || \ 4099 defined(QCA_DP_TX_HW_SW_NBUF_DESC_PREFETCH)) 4100 /** 4101 * dp_srng_dst_prefetch() - Wrapper function to prefetch descs from dest ring 4102 * @hal_soc: HAL SOC handle 4103 * @hal_ring_hdl: opaque pointer to the HAL Rx Destination ring 4104 * @num_entries: Entry count 4105 * 4106 * Return: None 4107 */ 4108 static inline void *dp_srng_dst_prefetch(hal_soc_handle_t hal_soc, 4109 hal_ring_handle_t hal_ring_hdl, 4110 uint32_t num_entries) 4111 { 4112 return hal_srng_dst_prefetch(hal_soc, hal_ring_hdl, num_entries); 4113 } 4114 4115 /** 4116 * dp_srng_dst_prefetch_32_byte_desc() - Wrapper function to prefetch 4117 * 32 byte descriptor starting at 4118 * 64 byte offset 4119 * @hal_soc: HAL SOC handle 4120 * @hal_ring_hdl: opaque pointer to the HAL Rx Destination ring 4121 * @num_entries: Entry count 4122 * 4123 * Return: None 4124 */ 4125 static inline 4126 void *dp_srng_dst_prefetch_32_byte_desc(hal_soc_handle_t hal_soc, 4127 hal_ring_handle_t hal_ring_hdl, 4128 uint32_t num_entries) 4129 { 4130 return hal_srng_dst_prefetch_32_byte_desc(hal_soc, hal_ring_hdl, 4131 num_entries); 4132 } 4133 #else 4134 static inline void *dp_srng_dst_prefetch(hal_soc_handle_t hal_soc, 4135 hal_ring_handle_t hal_ring_hdl, 4136 uint32_t num_entries) 4137 { 4138 return NULL; 4139 } 4140 4141 static inline 4142 void *dp_srng_dst_prefetch_32_byte_desc(hal_soc_handle_t hal_soc, 4143 hal_ring_handle_t hal_ring_hdl, 4144 uint32_t num_entries) 4145 { 4146 return NULL; 4147 } 4148 #endif 4149 4150 #ifdef QCA_ENH_V3_STATS_SUPPORT 4151 /** 4152 * dp_pdev_print_delay_stats(): Print pdev level delay stats 4153 * @pdev: DP_PDEV handle 4154 * 4155 * Return:void 4156 */ 4157 void dp_pdev_print_delay_stats(struct dp_pdev *pdev); 4158 4159 /** 4160 * dp_pdev_print_tid_stats(): Print pdev level tid stats 4161 * @pdev: DP_PDEV handle 4162 * 4163 * Return:void 4164 */ 4165 void dp_pdev_print_tid_stats(struct dp_pdev *pdev); 4166 4167 /** 4168 * dp_pdev_print_rx_error_stats(): Print pdev level rx error stats 4169 * @pdev: DP_PDEV handle 4170 * 4171 * Return:void 4172 */ 4173 void dp_pdev_print_rx_error_stats(struct dp_pdev *pdev); 4174 #endif /* QCA_ENH_V3_STATS_SUPPORT */ 4175 4176 /** 4177 * dp_pdev_get_tid_stats(): Get accumulated pdev level tid_stats 4178 * @soc_hdl: soc handle 4179 * @pdev_id: id of dp_pdev handle 4180 * @tid_stats: Pointer for cdp_tid_stats_intf 4181 * 4182 * Return: QDF_STATUS_SUCCESS or QDF_STATUS_E_INVAL 4183 */ 4184 QDF_STATUS dp_pdev_get_tid_stats(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, 4185 struct cdp_tid_stats_intf *tid_stats); 4186 4187 /** 4188 * dp_soc_set_txrx_ring_map() 4189 * @soc: DP handler for soc 4190 * 4191 * Return: Void 4192 */ 4193 void dp_soc_set_txrx_ring_map(struct dp_soc *soc); 4194 4195 /** 4196 * dp_vdev_to_cdp_vdev() - typecast dp vdev to cdp vdev 4197 * @vdev: DP vdev handle 4198 * 4199 * Return: struct cdp_vdev pointer 4200 */ 4201 static inline 4202 struct cdp_vdev *dp_vdev_to_cdp_vdev(struct dp_vdev *vdev) 4203 { 4204 return (struct cdp_vdev *)vdev; 4205 } 4206 4207 /** 4208 * dp_pdev_to_cdp_pdev() - typecast dp pdev to cdp pdev 4209 * @pdev: DP pdev handle 4210 * 4211 * Return: struct cdp_pdev pointer 4212 */ 4213 static inline 4214 struct cdp_pdev *dp_pdev_to_cdp_pdev(struct dp_pdev *pdev) 4215 { 4216 return (struct cdp_pdev *)pdev; 4217 } 4218 4219 /** 4220 * dp_soc_to_cdp_soc() - typecast dp psoc to cdp psoc 4221 * @psoc: DP psoc handle 4222 * 4223 * Return: struct cdp_soc pointer 4224 */ 4225 static inline 4226 struct cdp_soc *dp_soc_to_cdp_soc(struct dp_soc *psoc) 4227 { 4228 return (struct cdp_soc *)psoc; 4229 } 4230 4231 /** 4232 * dp_soc_to_cdp_soc_t() - typecast dp psoc to ol txrx soc handle 4233 * @psoc: DP psoc handle 4234 * 4235 * Return: struct cdp_soc_t pointer 4236 */ 4237 static inline 4238 struct cdp_soc_t *dp_soc_to_cdp_soc_t(struct dp_soc *psoc) 4239 { 4240 return (struct cdp_soc_t *)psoc; 4241 } 4242 4243 #if defined(WLAN_SUPPORT_RX_FLOW_TAG) 4244 /** 4245 * dp_rx_flow_get_fse_stats() - Retrieve a flow's statistics 4246 * @pdev: pdev handle 4247 * @rx_flow_info: flow information in the Rx FST 4248 * @stats: stats to update 4249 * 4250 * Return: Success when flow statistcs is updated, error on failure 4251 */ 4252 QDF_STATUS dp_rx_flow_get_fse_stats(struct dp_pdev *pdev, 4253 struct cdp_rx_flow_info *rx_flow_info, 4254 struct cdp_flow_stats *stats); 4255 4256 /** 4257 * dp_rx_flow_delete_entry() - Delete a flow entry from flow search table 4258 * @pdev: pdev handle 4259 * @rx_flow_info: DP flow parameters 4260 * 4261 * Return: Success when flow is deleted, error on failure 4262 */ 4263 QDF_STATUS dp_rx_flow_delete_entry(struct dp_pdev *pdev, 4264 struct cdp_rx_flow_info *rx_flow_info); 4265 4266 /** 4267 * dp_rx_flow_add_entry() - Add a flow entry to flow search table 4268 * @pdev: DP pdev instance 4269 * @rx_flow_info: DP flow parameters 4270 * 4271 * Return: Success when flow is added, no-memory or already exists on error 4272 */ 4273 QDF_STATUS dp_rx_flow_add_entry(struct dp_pdev *pdev, 4274 struct cdp_rx_flow_info *rx_flow_info); 4275 4276 /** 4277 * dp_rx_fst_attach() - Initialize Rx FST and setup necessary parameters 4278 * @soc: SoC handle 4279 * @pdev: Pdev handle 4280 * 4281 * Return: Handle to flow search table entry 4282 */ 4283 QDF_STATUS dp_rx_fst_attach(struct dp_soc *soc, struct dp_pdev *pdev); 4284 4285 /** 4286 * dp_rx_fst_detach() - De-initialize Rx FST 4287 * @soc: SoC handle 4288 * @pdev: Pdev handle 4289 * 4290 * Return: None 4291 */ 4292 void dp_rx_fst_detach(struct dp_soc *soc, struct dp_pdev *pdev); 4293 4294 /** 4295 * dp_mon_rx_update_rx_flow_tag_stats() - Update a mon flow's statistics 4296 * @pdev: pdev handle 4297 * @flow_id: flow index (truncated hash) in the Rx FST 4298 * 4299 * Return: Success when flow statistcs is updated, error on failure 4300 */ 4301 QDF_STATUS 4302 dp_mon_rx_update_rx_flow_tag_stats(struct dp_pdev *pdev, uint32_t flow_id); 4303 #endif 4304 4305 #ifdef WLAN_SUPPORT_RX_FLOW_TAG 4306 /** 4307 * dp_rx_flow_send_fst_fw_setup() - Program FST parameters in FW/HW post-attach 4308 * @soc: SoC handle 4309 * @pdev: Pdev handle 4310 * 4311 * Return: Success when fst parameters are programmed in FW, error otherwise 4312 */ 4313 QDF_STATUS dp_rx_flow_send_fst_fw_setup(struct dp_soc *soc, 4314 struct dp_pdev *pdev); 4315 #endif 4316 4317 /** 4318 * dp_rx_fst_attach_wrapper() - wrapper API for dp_rx_fst_attach 4319 * @soc: SoC handle 4320 * @pdev: Pdev handle 4321 * 4322 * Return: Handle to flow search table entry 4323 */ 4324 extern QDF_STATUS 4325 dp_rx_fst_attach_wrapper(struct dp_soc *soc, struct dp_pdev *pdev); 4326 4327 /** 4328 * dp_rx_fst_detach_wrapper() - wrapper API for dp_rx_fst_detach 4329 * @soc: SoC handle 4330 * @pdev: Pdev handle 4331 * 4332 * Return: None 4333 */ 4334 extern void 4335 dp_rx_fst_detach_wrapper(struct dp_soc *soc, struct dp_pdev *pdev); 4336 4337 /** 4338 * dp_vdev_get_ref() - API to take a reference for VDEV object 4339 * 4340 * @soc : core DP soc context 4341 * @vdev : DP vdev 4342 * @mod_id : module id 4343 * 4344 * Return: QDF_STATUS_SUCCESS if reference held successfully 4345 * else QDF_STATUS_E_INVAL 4346 */ 4347 static inline 4348 QDF_STATUS dp_vdev_get_ref(struct dp_soc *soc, struct dp_vdev *vdev, 4349 enum dp_mod_id mod_id) 4350 { 4351 if (!qdf_atomic_inc_not_zero(&vdev->ref_cnt)) 4352 return QDF_STATUS_E_INVAL; 4353 4354 qdf_atomic_inc(&vdev->mod_refs[mod_id]); 4355 4356 return QDF_STATUS_SUCCESS; 4357 } 4358 4359 /** 4360 * dp_vdev_get_ref_by_id() - Returns vdev object given the vdev id 4361 * @soc: core DP soc context 4362 * @vdev_id: vdev id from vdev object can be retrieved 4363 * @mod_id: module id which is requesting the reference 4364 * 4365 * Return: struct dp_vdev*: Pointer to DP vdev object 4366 */ 4367 static inline struct dp_vdev * 4368 dp_vdev_get_ref_by_id(struct dp_soc *soc, uint8_t vdev_id, 4369 enum dp_mod_id mod_id) 4370 { 4371 struct dp_vdev *vdev = NULL; 4372 if (qdf_unlikely(vdev_id >= MAX_VDEV_CNT)) 4373 return NULL; 4374 4375 qdf_spin_lock_bh(&soc->vdev_map_lock); 4376 vdev = soc->vdev_id_map[vdev_id]; 4377 4378 if (!vdev || dp_vdev_get_ref(soc, vdev, mod_id) != QDF_STATUS_SUCCESS) { 4379 qdf_spin_unlock_bh(&soc->vdev_map_lock); 4380 return NULL; 4381 } 4382 qdf_spin_unlock_bh(&soc->vdev_map_lock); 4383 4384 return vdev; 4385 } 4386 4387 /** 4388 * dp_get_pdev_from_soc_pdev_id_wifi3() - Returns pdev object given the pdev id 4389 * @soc: core DP soc context 4390 * @pdev_id: pdev id from pdev object can be retrieved 4391 * 4392 * Return: struct dp_pdev*: Pointer to DP pdev object 4393 */ 4394 static inline struct dp_pdev * 4395 dp_get_pdev_from_soc_pdev_id_wifi3(struct dp_soc *soc, 4396 uint8_t pdev_id) 4397 { 4398 if (qdf_unlikely(pdev_id >= MAX_PDEV_CNT)) 4399 return NULL; 4400 4401 return soc->pdev_list[pdev_id]; 4402 } 4403 4404 /** 4405 * dp_get_peer_mac_list(): function to get peer mac list of vdev 4406 * @soc: Datapath soc handle 4407 * @vdev_id: vdev id 4408 * @newmac: Table of the clients mac 4409 * @mac_cnt: No. of MACs required 4410 * @limit: Limit the number of clients 4411 * 4412 * Return: no of clients 4413 */ 4414 uint16_t dp_get_peer_mac_list(ol_txrx_soc_handle soc, uint8_t vdev_id, 4415 u_int8_t newmac[][QDF_MAC_ADDR_SIZE], 4416 u_int16_t mac_cnt, bool limit); 4417 4418 /** 4419 * dp_update_num_mac_rings_for_dbs() - Update No of MAC rings based on 4420 * DBS check 4421 * @soc: DP SoC context 4422 * @max_mac_rings: Pointer to variable for No of MAC rings 4423 * 4424 * Return: None 4425 */ 4426 void dp_update_num_mac_rings_for_dbs(struct dp_soc *soc, 4427 int *max_mac_rings); 4428 4429 4430 #if defined(WLAN_SUPPORT_RX_FISA) 4431 /** 4432 * dp_rx_fst_update_cmem_params() - Update CMEM FST params 4433 * @soc: DP SoC context 4434 * @num_entries: Number of flow search entries 4435 * @cmem_ba_lo: CMEM base address low 4436 * @cmem_ba_hi: CMEM base address high 4437 * 4438 * Return: None 4439 */ 4440 void dp_rx_fst_update_cmem_params(struct dp_soc *soc, uint16_t num_entries, 4441 uint32_t cmem_ba_lo, uint32_t cmem_ba_hi); 4442 4443 /** 4444 * dp_fisa_config() - FISA config handler 4445 * @cdp_soc: CDP SoC handle 4446 * @pdev_id: PDEV ID 4447 * @config_id: FISA config ID 4448 * @cfg: FISA config msg data 4449 */ 4450 QDF_STATUS dp_fisa_config(ol_txrx_soc_handle cdp_soc, uint8_t pdev_id, 4451 enum cdp_fisa_config_id config_id, 4452 union cdp_fisa_config *cfg); 4453 #else 4454 static inline void 4455 dp_rx_fst_update_cmem_params(struct dp_soc *soc, uint16_t num_entries, 4456 uint32_t cmem_ba_lo, uint32_t cmem_ba_hi) 4457 { 4458 } 4459 #endif /* WLAN_SUPPORT_RX_FISA */ 4460 4461 #ifdef MAX_ALLOC_PAGE_SIZE 4462 /** 4463 * dp_set_max_page_size() - Set the max page size for hw link desc. 4464 * @pages: link desc page handle 4465 * @max_alloc_size: max_alloc_size 4466 * 4467 * For MCL the page size is set to OS defined value and for WIN 4468 * the page size is set to the max_alloc_size cfg ini 4469 * param. 4470 * This is to ensure that WIN gets contiguous memory allocations 4471 * as per requirement. 4472 * 4473 * Return: None 4474 */ 4475 static inline 4476 void dp_set_max_page_size(struct qdf_mem_multi_page_t *pages, 4477 uint32_t max_alloc_size) 4478 { 4479 pages->page_size = qdf_page_size; 4480 } 4481 4482 #else 4483 static inline 4484 void dp_set_max_page_size(struct qdf_mem_multi_page_t *pages, 4485 uint32_t max_alloc_size) 4486 { 4487 pages->page_size = max_alloc_size; 4488 } 4489 #endif /* MAX_ALLOC_PAGE_SIZE */ 4490 4491 /** 4492 * dp_history_get_next_index() - get the next entry to record an entry 4493 * in the history. 4494 * @curr_idx: Current index where the last entry is written. 4495 * @max_entries: Max number of entries in the history 4496 * 4497 * This function assumes that the max number os entries is a power of 2. 4498 * 4499 * Return: The index where the next entry is to be written. 4500 */ 4501 static inline uint32_t dp_history_get_next_index(qdf_atomic_t *curr_idx, 4502 uint32_t max_entries) 4503 { 4504 uint32_t idx = qdf_atomic_inc_return(curr_idx); 4505 4506 return idx & (max_entries - 1); 4507 } 4508 4509 /** 4510 * dp_rx_skip_tlvs() - Skip TLVs len + L3 padding, save in nbuf->cb 4511 * @soc: Datapath soc handle 4512 * @nbuf: nbuf cb to be updated 4513 * @l3_padding: L3 padding 4514 * 4515 * Return: None 4516 */ 4517 void dp_rx_skip_tlvs(struct dp_soc *soc, qdf_nbuf_t nbuf, uint32_t l3_padding); 4518 4519 #ifndef FEATURE_WDS 4520 static inline void 4521 dp_hmwds_ast_add_notify(struct dp_peer *peer, 4522 uint8_t *mac_addr, 4523 enum cdp_txrx_ast_entry_type type, 4524 QDF_STATUS err, 4525 bool is_peer_map) 4526 { 4527 } 4528 #endif 4529 4530 #ifdef HTT_STATS_DEBUGFS_SUPPORT 4531 /** 4532 * dp_pdev_htt_stats_dbgfs_init() - Function to allocate memory and initialize 4533 * debugfs for HTT stats 4534 * @pdev: dp pdev handle 4535 * 4536 * Return: QDF_STATUS 4537 */ 4538 QDF_STATUS dp_pdev_htt_stats_dbgfs_init(struct dp_pdev *pdev); 4539 4540 /** 4541 * dp_pdev_htt_stats_dbgfs_deinit() - Function to remove debugfs entry for 4542 * HTT stats 4543 * @pdev: dp pdev handle 4544 * 4545 * Return: none 4546 */ 4547 void dp_pdev_htt_stats_dbgfs_deinit(struct dp_pdev *pdev); 4548 #else 4549 4550 /** 4551 * dp_pdev_htt_stats_dbgfs_init() - Function to allocate memory and initialize 4552 * debugfs for HTT stats 4553 * @pdev: dp pdev handle 4554 * 4555 * Return: QDF_STATUS 4556 */ 4557 static inline QDF_STATUS 4558 dp_pdev_htt_stats_dbgfs_init(struct dp_pdev *pdev) 4559 { 4560 return QDF_STATUS_SUCCESS; 4561 } 4562 4563 /** 4564 * dp_pdev_htt_stats_dbgfs_deinit() - Function to remove debugfs entry for 4565 * HTT stats 4566 * @pdev: dp pdev handle 4567 * 4568 * Return: none 4569 */ 4570 static inline void 4571 dp_pdev_htt_stats_dbgfs_deinit(struct dp_pdev *pdev) 4572 { 4573 } 4574 #endif /* HTT_STATS_DEBUGFS_SUPPORT */ 4575 4576 #ifndef WLAN_DP_FEATURE_SW_LATENCY_MGR 4577 /** 4578 * dp_soc_swlm_attach() - attach the software latency manager resources 4579 * @soc: Datapath global soc handle 4580 * 4581 * Return: QDF_STATUS 4582 */ 4583 static inline QDF_STATUS dp_soc_swlm_attach(struct dp_soc *soc) 4584 { 4585 return QDF_STATUS_SUCCESS; 4586 } 4587 4588 /** 4589 * dp_soc_swlm_detach() - detach the software latency manager resources 4590 * @soc: Datapath global soc handle 4591 * 4592 * Return: QDF_STATUS 4593 */ 4594 static inline QDF_STATUS dp_soc_swlm_detach(struct dp_soc *soc) 4595 { 4596 return QDF_STATUS_SUCCESS; 4597 } 4598 #endif /* !WLAN_DP_FEATURE_SW_LATENCY_MGR */ 4599 4600 #ifndef WLAN_DP_PROFILE_SUPPORT 4601 static inline void wlan_dp_soc_cfg_sync_profile(struct cdp_soc_t *cdp_soc) {} 4602 4603 static inline void wlan_dp_pdev_cfg_sync_profile(struct cdp_soc_t *cdp_soc, 4604 uint8_t pdev_id) {} 4605 #endif 4606 4607 /** 4608 * dp_get_peer_id(): function to get peer id by mac 4609 * @soc: Datapath soc handle 4610 * @vdev_id: vdev id 4611 * @mac: Peer mac address 4612 * 4613 * Return: valid peer id on success 4614 * HTT_INVALID_PEER on failure 4615 */ 4616 uint16_t dp_get_peer_id(ol_txrx_soc_handle soc, uint8_t vdev_id, uint8_t *mac); 4617 4618 #ifdef QCA_SUPPORT_WDS_EXTENDED 4619 /** 4620 * dp_wds_ext_set_peer_rx(): function to set peer rx handler 4621 * @soc: Datapath soc handle 4622 * @vdev_id: vdev id 4623 * @mac: Peer mac address 4624 * @rx: rx function pointer 4625 * @osif_peer: OSIF peer handle 4626 * 4627 * Return: QDF_STATUS_SUCCESS on success 4628 * QDF_STATUS_E_INVAL if peer is not found 4629 * QDF_STATUS_E_ALREADY if rx is already set/unset 4630 */ 4631 QDF_STATUS dp_wds_ext_set_peer_rx(ol_txrx_soc_handle soc, 4632 uint8_t vdev_id, 4633 uint8_t *mac, 4634 ol_txrx_rx_fp rx, 4635 ol_osif_peer_handle osif_peer); 4636 4637 /** 4638 * dp_wds_ext_get_peer_osif_handle(): function to get peer osif handle 4639 * @soc: Datapath soc handle 4640 * @vdev_id: vdev id 4641 * @mac: Peer mac address 4642 * @osif_peer: OSIF peer handle 4643 * 4644 * Return: QDF_STATUS_SUCCESS on success 4645 * QDF_STATUS_E_INVAL if peer is not found 4646 */ 4647 QDF_STATUS dp_wds_ext_get_peer_osif_handle( 4648 ol_txrx_soc_handle soc, 4649 uint8_t vdev_id, 4650 uint8_t *mac, 4651 ol_osif_peer_handle *osif_peer); 4652 4653 /** 4654 * dp_wds_ext_set_peer_bit(): function to set wds-ext peer bit 4655 * @soc: Datapath soc handle 4656 * @mac: Peer mac address 4657 * 4658 * Return: QDF_STATUS_SUCCESS on success 4659 * QDF_STATUS_E_INVAL if peer is not found 4660 */ 4661 QDF_STATUS dp_wds_ext_set_peer_bit(ol_txrx_soc_handle soc, uint8_t *mac); 4662 4663 #endif /* QCA_SUPPORT_WDS_EXTENDED */ 4664 4665 #ifdef DP_MEM_PRE_ALLOC 4666 4667 /** 4668 * dp_context_alloc_mem() - allocate memory for DP context 4669 * @soc: datapath soc handle 4670 * @ctxt_type: DP context type 4671 * @ctxt_size: DP context size 4672 * 4673 * Return: DP context address 4674 */ 4675 void *dp_context_alloc_mem(struct dp_soc *soc, enum dp_ctxt_type ctxt_type, 4676 size_t ctxt_size); 4677 4678 /** 4679 * dp_context_free_mem() - Free memory of DP context 4680 * @soc: datapath soc handle 4681 * @ctxt_type: DP context type 4682 * @vaddr: Address of context memory 4683 * 4684 * Return: None 4685 */ 4686 void dp_context_free_mem(struct dp_soc *soc, enum dp_ctxt_type ctxt_type, 4687 void *vaddr); 4688 4689 /** 4690 * dp_desc_multi_pages_mem_alloc() - alloc memory over multiple pages 4691 * @soc: datapath soc handle 4692 * @desc_type: memory request source type 4693 * @pages: multi page information storage 4694 * @element_size: each element size 4695 * @element_num: total number of elements should be allocated 4696 * @memctxt: memory context 4697 * @cacheable: coherent memory or cacheable memory 4698 * 4699 * This function is a wrapper for memory allocation over multiple 4700 * pages, if dp prealloc method is registered, then will try prealloc 4701 * firstly. if prealloc failed, fall back to regular way over 4702 * qdf_mem_multi_pages_alloc(). 4703 * 4704 * Return: None 4705 */ 4706 void dp_desc_multi_pages_mem_alloc(struct dp_soc *soc, 4707 enum qdf_dp_desc_type desc_type, 4708 struct qdf_mem_multi_page_t *pages, 4709 size_t element_size, 4710 uint32_t element_num, 4711 qdf_dma_context_t memctxt, 4712 bool cacheable); 4713 4714 /** 4715 * dp_desc_multi_pages_mem_free() - free multiple pages memory 4716 * @soc: datapath soc handle 4717 * @desc_type: memory request source type 4718 * @pages: multi page information storage 4719 * @memctxt: memory context 4720 * @cacheable: coherent memory or cacheable memory 4721 * 4722 * This function is a wrapper for multiple pages memory free, 4723 * if memory is got from prealloc pool, put it back to pool. 4724 * otherwise free by qdf_mem_multi_pages_free(). 4725 * 4726 * Return: None 4727 */ 4728 void dp_desc_multi_pages_mem_free(struct dp_soc *soc, 4729 enum qdf_dp_desc_type desc_type, 4730 struct qdf_mem_multi_page_t *pages, 4731 qdf_dma_context_t memctxt, 4732 bool cacheable); 4733 4734 #else 4735 static inline 4736 void *dp_context_alloc_mem(struct dp_soc *soc, enum dp_ctxt_type ctxt_type, 4737 size_t ctxt_size) 4738 { 4739 return qdf_mem_malloc(ctxt_size); 4740 } 4741 4742 static inline 4743 void dp_context_free_mem(struct dp_soc *soc, enum dp_ctxt_type ctxt_type, 4744 void *vaddr) 4745 { 4746 qdf_mem_free(vaddr); 4747 } 4748 4749 static inline 4750 void dp_desc_multi_pages_mem_alloc(struct dp_soc *soc, 4751 enum qdf_dp_desc_type desc_type, 4752 struct qdf_mem_multi_page_t *pages, 4753 size_t element_size, 4754 uint32_t element_num, 4755 qdf_dma_context_t memctxt, 4756 bool cacheable) 4757 { 4758 qdf_mem_multi_pages_alloc(soc->osdev, pages, element_size, 4759 element_num, memctxt, cacheable); 4760 } 4761 4762 static inline 4763 void dp_desc_multi_pages_mem_free(struct dp_soc *soc, 4764 enum qdf_dp_desc_type desc_type, 4765 struct qdf_mem_multi_page_t *pages, 4766 qdf_dma_context_t memctxt, 4767 bool cacheable) 4768 { 4769 qdf_mem_multi_pages_free(soc->osdev, pages, 4770 memctxt, cacheable); 4771 } 4772 #endif 4773 4774 /** 4775 * struct dp_frag_history_opaque_atomic - Opaque struct for adding a fragmented 4776 * history. 4777 * @index: atomic index 4778 * @num_entries_per_slot: Number of entries per slot 4779 * @allocated: is allocated or not 4780 * @entry: pointers to array of records 4781 */ 4782 struct dp_frag_history_opaque_atomic { 4783 qdf_atomic_t index; 4784 uint16_t num_entries_per_slot; 4785 uint16_t allocated; 4786 void *entry[0]; 4787 }; 4788 4789 static inline QDF_STATUS 4790 dp_soc_frag_history_attach(struct dp_soc *soc, void *history_hdl, 4791 uint32_t max_slots, uint32_t max_entries_per_slot, 4792 uint32_t entry_size, 4793 bool attempt_prealloc, enum dp_ctxt_type ctxt_type) 4794 { 4795 struct dp_frag_history_opaque_atomic *history = 4796 (struct dp_frag_history_opaque_atomic *)history_hdl; 4797 size_t alloc_size = max_entries_per_slot * entry_size; 4798 int i; 4799 4800 for (i = 0; i < max_slots; i++) { 4801 if (attempt_prealloc) 4802 history->entry[i] = dp_context_alloc_mem(soc, ctxt_type, 4803 alloc_size); 4804 else 4805 history->entry[i] = qdf_mem_malloc(alloc_size); 4806 4807 if (!history->entry[i]) 4808 goto exit; 4809 } 4810 4811 qdf_atomic_init(&history->index); 4812 history->allocated = 1; 4813 history->num_entries_per_slot = max_entries_per_slot; 4814 4815 return QDF_STATUS_SUCCESS; 4816 exit: 4817 for (i = i - 1; i >= 0; i--) { 4818 if (attempt_prealloc) 4819 dp_context_free_mem(soc, ctxt_type, history->entry[i]); 4820 else 4821 qdf_mem_free(history->entry[i]); 4822 } 4823 4824 return QDF_STATUS_E_NOMEM; 4825 } 4826 4827 static inline 4828 void dp_soc_frag_history_detach(struct dp_soc *soc, 4829 void *history_hdl, uint32_t max_slots, 4830 bool attempt_prealloc, 4831 enum dp_ctxt_type ctxt_type) 4832 { 4833 struct dp_frag_history_opaque_atomic *history = 4834 (struct dp_frag_history_opaque_atomic *)history_hdl; 4835 int i; 4836 4837 for (i = 0; i < max_slots; i++) { 4838 if (attempt_prealloc) 4839 dp_context_free_mem(soc, ctxt_type, history->entry[i]); 4840 else 4841 qdf_mem_free(history->entry[i]); 4842 } 4843 4844 history->allocated = 0; 4845 } 4846 4847 /** 4848 * dp_get_frag_hist_next_atomic_idx() - get the next entry index to record an 4849 * entry in a fragmented history with 4850 * index being atomic. 4851 * @curr_idx: address of the current index where the last entry was written 4852 * @next_idx: pointer to update the next index 4853 * @slot: pointer to update the history slot to be selected 4854 * @slot_shift: BITwise shift mask for slot (in index) 4855 * @max_entries_per_slot: Max number of entries in a slot of history 4856 * @max_entries: Total number of entries in the history (sum of all slots) 4857 * 4858 * This function assumes that the "max_entries_per_slot" and "max_entries" 4859 * are a power-of-2. 4860 * 4861 * Return: None 4862 */ 4863 static inline void 4864 dp_get_frag_hist_next_atomic_idx(qdf_atomic_t *curr_idx, uint32_t *next_idx, 4865 uint16_t *slot, uint32_t slot_shift, 4866 uint32_t max_entries_per_slot, 4867 uint32_t max_entries) 4868 { 4869 uint32_t idx; 4870 4871 idx = qdf_do_div_rem(qdf_atomic_inc_return(curr_idx), max_entries); 4872 4873 *slot = idx >> slot_shift; 4874 *next_idx = idx & (max_entries_per_slot - 1); 4875 } 4876 4877 #ifdef FEATURE_RUNTIME_PM 4878 /** 4879 * dp_runtime_get() - Get dp runtime refcount 4880 * @soc: Datapath soc handle 4881 * 4882 * Get dp runtime refcount by increment of an atomic variable, which can block 4883 * dp runtime resume to wait to flush pending tx by runtime suspend. 4884 * 4885 * Return: Current refcount 4886 */ 4887 static inline int32_t dp_runtime_get(struct dp_soc *soc) 4888 { 4889 return qdf_atomic_inc_return(&soc->dp_runtime_refcount); 4890 } 4891 4892 /** 4893 * dp_runtime_put() - Return dp runtime refcount 4894 * @soc: Datapath soc handle 4895 * 4896 * Return dp runtime refcount by decrement of an atomic variable, allow dp 4897 * runtime resume finish. 4898 * 4899 * Return: Current refcount 4900 */ 4901 static inline int32_t dp_runtime_put(struct dp_soc *soc) 4902 { 4903 return qdf_atomic_dec_return(&soc->dp_runtime_refcount); 4904 } 4905 4906 /** 4907 * dp_runtime_get_refcount() - Get dp runtime refcount 4908 * @soc: Datapath soc handle 4909 * 4910 * Get dp runtime refcount by returning an atomic variable 4911 * 4912 * Return: Current refcount 4913 */ 4914 static inline int32_t dp_runtime_get_refcount(struct dp_soc *soc) 4915 { 4916 return qdf_atomic_read(&soc->dp_runtime_refcount); 4917 } 4918 4919 /** 4920 * dp_runtime_init() - Init DP related runtime PM clients and runtime refcount 4921 * @soc: Datapath soc handle 4922 * 4923 * Return: QDF_STATUS 4924 */ 4925 static inline void dp_runtime_init(struct dp_soc *soc) 4926 { 4927 hif_rtpm_register(HIF_RTPM_ID_DP, NULL); 4928 hif_rtpm_register(HIF_RTPM_ID_DP_RING_STATS, NULL); 4929 qdf_atomic_init(&soc->dp_runtime_refcount); 4930 } 4931 4932 /** 4933 * dp_runtime_deinit() - Deinit DP related runtime PM clients 4934 * 4935 * Return: None 4936 */ 4937 static inline void dp_runtime_deinit(void) 4938 { 4939 hif_rtpm_deregister(HIF_RTPM_ID_DP); 4940 hif_rtpm_deregister(HIF_RTPM_ID_DP_RING_STATS); 4941 } 4942 4943 /** 4944 * dp_runtime_pm_mark_last_busy() - Mark last busy when rx path in use 4945 * @soc: Datapath soc handle 4946 * 4947 * Return: None 4948 */ 4949 static inline void dp_runtime_pm_mark_last_busy(struct dp_soc *soc) 4950 { 4951 soc->rx_last_busy = qdf_get_log_timestamp_usecs(); 4952 4953 hif_rtpm_mark_last_busy(HIF_RTPM_ID_DP); 4954 } 4955 #else 4956 static inline int32_t dp_runtime_get(struct dp_soc *soc) 4957 { 4958 return 0; 4959 } 4960 4961 static inline int32_t dp_runtime_put(struct dp_soc *soc) 4962 { 4963 return 0; 4964 } 4965 4966 static inline QDF_STATUS dp_runtime_init(struct dp_soc *soc) 4967 { 4968 return QDF_STATUS_SUCCESS; 4969 } 4970 4971 static inline void dp_runtime_deinit(void) 4972 { 4973 } 4974 4975 static inline void dp_runtime_pm_mark_last_busy(struct dp_soc *soc) 4976 { 4977 } 4978 #endif 4979 4980 static inline enum QDF_GLOBAL_MODE dp_soc_get_con_mode(struct dp_soc *soc) 4981 { 4982 if (soc->cdp_soc.ol_ops->get_con_mode) 4983 return soc->cdp_soc.ol_ops->get_con_mode(); 4984 4985 return QDF_GLOBAL_MAX_MODE; 4986 } 4987 4988 /** 4989 * dp_pdev_bkp_stats_detach() - detach resources for back pressure stats 4990 * processing 4991 * @pdev: Datapath PDEV handle 4992 * 4993 */ 4994 void dp_pdev_bkp_stats_detach(struct dp_pdev *pdev); 4995 4996 /** 4997 * dp_pdev_bkp_stats_attach() - attach resources for back pressure stats 4998 * processing 4999 * @pdev: Datapath PDEV handle 5000 * 5001 * Return: QDF_STATUS_SUCCESS: Success 5002 * QDF_STATUS_E_NOMEM: Error 5003 */ 5004 5005 QDF_STATUS dp_pdev_bkp_stats_attach(struct dp_pdev *pdev); 5006 5007 /** 5008 * dp_peer_flush_frags() - Flush all fragments for a particular 5009 * peer 5010 * @soc_hdl: data path soc handle 5011 * @vdev_id: vdev id 5012 * @peer_mac: peer mac address 5013 * 5014 * Return: None 5015 */ 5016 void dp_peer_flush_frags(struct cdp_soc_t *soc_hdl, uint8_t vdev_id, 5017 uint8_t *peer_mac); 5018 5019 /** 5020 * dp_soc_reset_mon_intr_mask() - reset mon intr mask 5021 * @soc: pointer to dp_soc handle 5022 * 5023 * Return: 5024 */ 5025 void dp_soc_reset_mon_intr_mask(struct dp_soc *soc); 5026 5027 /** 5028 * dp_txrx_get_soc_stats() - will return cdp_soc_stats 5029 * @soc_hdl: soc handle 5030 * @soc_stats: buffer to hold the values 5031 * 5032 * Return: QDF_STATUS_SUCCESS: Success 5033 * QDF_STATUS_E_FAILURE: Error 5034 */ 5035 QDF_STATUS dp_txrx_get_soc_stats(struct cdp_soc_t *soc_hdl, 5036 struct cdp_soc_stats *soc_stats); 5037 5038 /** 5039 * dp_txrx_get_peer_delay_stats() - to get peer delay stats per TIDs 5040 * @soc_hdl: soc handle 5041 * @vdev_id: id of vdev handle 5042 * @peer_mac: mac of DP_PEER handle 5043 * @delay_stats: pointer to delay stats array 5044 * 5045 * Return: QDF_STATUS_SUCCESS: Success 5046 * QDF_STATUS_E_FAILURE: Error 5047 */ 5048 QDF_STATUS 5049 dp_txrx_get_peer_delay_stats(struct cdp_soc_t *soc_hdl, uint8_t vdev_id, 5050 uint8_t *peer_mac, 5051 struct cdp_delay_tid_stats *delay_stats); 5052 5053 /** 5054 * dp_txrx_get_peer_jitter_stats() - to get peer jitter stats per TIDs 5055 * @soc_hdl: soc handle 5056 * @pdev_id: id of pdev handle 5057 * @vdev_id: id of vdev handle 5058 * @peer_mac: mac of DP_PEER handle 5059 * @tid_stats: pointer to jitter stats array 5060 * 5061 * Return: QDF_STATUS_SUCCESS: Success 5062 * QDF_STATUS_E_FAILURE: Error 5063 */ 5064 QDF_STATUS 5065 dp_txrx_get_peer_jitter_stats(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, 5066 uint8_t vdev_id, uint8_t *peer_mac, 5067 struct cdp_peer_tid_stats *tid_stats); 5068 5069 /** 5070 * dp_peer_get_tx_capture_stats() - to get peer Tx Capture stats 5071 * @soc_hdl: soc handle 5072 * @vdev_id: id of vdev handle 5073 * @peer_mac: mac of DP_PEER handle 5074 * @stats: pointer to peer tx capture stats 5075 * 5076 * Return: QDF_STATUS_SUCCESS: Success 5077 * QDF_STATUS_E_FAILURE: Error 5078 */ 5079 QDF_STATUS 5080 dp_peer_get_tx_capture_stats(struct cdp_soc_t *soc_hdl, 5081 uint8_t vdev_id, uint8_t *peer_mac, 5082 struct cdp_peer_tx_capture_stats *stats); 5083 5084 /** 5085 * dp_pdev_get_tx_capture_stats() - to get pdev Tx Capture stats 5086 * @soc_hdl: soc handle 5087 * @pdev_id: id of pdev handle 5088 * @stats: pointer to pdev tx capture stats 5089 * 5090 * Return: QDF_STATUS_SUCCESS: Success 5091 * QDF_STATUS_E_FAILURE: Error 5092 */ 5093 QDF_STATUS 5094 dp_pdev_get_tx_capture_stats(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, 5095 struct cdp_pdev_tx_capture_stats *stats); 5096 5097 #ifdef HW_TX_DELAY_STATS_ENABLE 5098 /** 5099 * dp_is_vdev_tx_delay_stats_enabled(): Check if tx delay stats 5100 * is enabled for vdev 5101 * @vdev: dp vdev 5102 * 5103 * Return: true if tx delay stats is enabled for vdev else false 5104 */ 5105 static inline uint8_t dp_is_vdev_tx_delay_stats_enabled(struct dp_vdev *vdev) 5106 { 5107 return vdev->hw_tx_delay_stats_enabled; 5108 } 5109 5110 /** 5111 * dp_pdev_print_tx_delay_stats(): Print vdev tx delay stats 5112 * for pdev 5113 * @soc: dp soc 5114 * 5115 * Return: None 5116 */ 5117 void dp_pdev_print_tx_delay_stats(struct dp_soc *soc); 5118 5119 /** 5120 * dp_pdev_clear_tx_delay_stats() - clear tx delay stats 5121 * @soc: soc handle 5122 * 5123 * Return: None 5124 */ 5125 void dp_pdev_clear_tx_delay_stats(struct dp_soc *soc); 5126 #else 5127 static inline uint8_t dp_is_vdev_tx_delay_stats_enabled(struct dp_vdev *vdev) 5128 { 5129 return 0; 5130 } 5131 5132 static inline void dp_pdev_print_tx_delay_stats(struct dp_soc *soc) 5133 { 5134 } 5135 5136 static inline void dp_pdev_clear_tx_delay_stats(struct dp_soc *soc) 5137 { 5138 } 5139 #endif 5140 5141 static inline void 5142 dp_get_rx_hash_key_bytes(struct cdp_lro_hash_config *lro_hash) 5143 { 5144 qdf_get_random_bytes(lro_hash->toeplitz_hash_ipv4, 5145 (sizeof(lro_hash->toeplitz_hash_ipv4[0]) * 5146 LRO_IPV4_SEED_ARR_SZ)); 5147 qdf_get_random_bytes(lro_hash->toeplitz_hash_ipv6, 5148 (sizeof(lro_hash->toeplitz_hash_ipv6[0]) * 5149 LRO_IPV6_SEED_ARR_SZ)); 5150 } 5151 5152 #ifdef WLAN_CONFIG_TELEMETRY_AGENT 5153 /** 5154 * dp_get_pdev_telemetry_stats- API to get pdev telemetry stats 5155 * @soc_hdl: soc handle 5156 * @pdev_id: id of pdev handle 5157 * @stats: pointer to pdev telemetry stats 5158 * 5159 * Return: QDF_STATUS_SUCCESS: Success 5160 * QDF_STATUS_E_FAILURE: Error 5161 */ 5162 QDF_STATUS 5163 dp_get_pdev_telemetry_stats(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, 5164 struct cdp_pdev_telemetry_stats *stats); 5165 5166 /** 5167 * dp_get_peer_telemetry_stats() - API to get peer telemetry stats 5168 * @soc_hdl: soc handle 5169 * @addr: peer mac 5170 * @stats: pointer to peer telemetry stats 5171 * 5172 * Return: QDF_STATUS_SUCCESS: Success 5173 * QDF_STATUS_E_FAILURE: Error 5174 */ 5175 QDF_STATUS 5176 dp_get_peer_telemetry_stats(struct cdp_soc_t *soc_hdl, uint8_t *addr, 5177 struct cdp_peer_telemetry_stats *stats); 5178 5179 /** 5180 * dp_get_peer_deter_stats() - API to get peer deterministic stats 5181 * @soc_hdl: soc handle 5182 * @vdev_id: id of vdev handle 5183 * @addr: peer mac 5184 * @stats: pointer to peer deterministic stats 5185 * 5186 * Return: QDF_STATUS_SUCCESS: Success 5187 * QDF_STATUS_E_FAILURE: Error 5188 */ 5189 QDF_STATUS 5190 dp_get_peer_deter_stats(struct cdp_soc_t *soc_hdl, 5191 uint8_t vdev_id, 5192 uint8_t *addr, 5193 struct cdp_peer_deter_stats *stats); 5194 5195 /** 5196 * dp_get_pdev_deter_stats() - API to get pdev deterministic stats 5197 * @soc_hdl: soc handle 5198 * @pdev_id: id of pdev handle 5199 * @stats: pointer to pdev deterministic stats 5200 * 5201 * Return: QDF_STATUS_SUCCESS: Success 5202 * QDF_STATUS_E_FAILURE: Error 5203 */ 5204 QDF_STATUS 5205 dp_get_pdev_deter_stats(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, 5206 struct cdp_pdev_deter_stats *stats); 5207 5208 /** 5209 * dp_update_pdev_chan_util_stats() - API to update channel utilization stats 5210 * @soc_hdl: soc handle 5211 * @pdev_id: id of pdev handle 5212 * @ch_util: Pointer to channel util stats 5213 * 5214 * Return: QDF_STATUS_SUCCESS: Success 5215 * QDF_STATUS_E_FAILURE: Error 5216 */ 5217 QDF_STATUS 5218 dp_update_pdev_chan_util_stats(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, 5219 struct cdp_pdev_chan_util_stats *ch_util); 5220 #endif /* WLAN_CONFIG_TELEMETRY_AGENT */ 5221 5222 #ifdef CONNECTIVITY_PKTLOG 5223 /** 5224 * dp_tx_send_pktlog() - send tx packet log 5225 * @soc: soc handle 5226 * @pdev: pdev handle 5227 * @tx_desc: TX software descriptor 5228 * @nbuf: nbuf 5229 * @status: status of tx packet 5230 * 5231 * This function is used to send tx packet for logging 5232 * 5233 * Return: None 5234 * 5235 */ 5236 static inline 5237 void dp_tx_send_pktlog(struct dp_soc *soc, struct dp_pdev *pdev, 5238 struct dp_tx_desc_s *tx_desc, 5239 qdf_nbuf_t nbuf, enum qdf_dp_tx_rx_status status) 5240 { 5241 ol_txrx_pktdump_cb packetdump_cb = pdev->dp_tx_packetdump_cb; 5242 5243 if (qdf_unlikely(packetdump_cb) && 5244 dp_tx_frm_std == tx_desc->frm_type) { 5245 packetdump_cb((ol_txrx_soc_handle)soc, pdev->pdev_id, 5246 tx_desc->vdev_id, nbuf, status, QDF_TX_DATA_PKT); 5247 } 5248 } 5249 5250 /** 5251 * dp_rx_send_pktlog() - send rx packet log 5252 * @soc: soc handle 5253 * @pdev: pdev handle 5254 * @nbuf: nbuf 5255 * @status: status of rx packet 5256 * 5257 * This function is used to send rx packet for logging 5258 * 5259 * Return: None 5260 * 5261 */ 5262 static inline 5263 void dp_rx_send_pktlog(struct dp_soc *soc, struct dp_pdev *pdev, 5264 qdf_nbuf_t nbuf, enum qdf_dp_tx_rx_status status) 5265 { 5266 ol_txrx_pktdump_cb packetdump_cb = pdev->dp_rx_packetdump_cb; 5267 5268 if (qdf_unlikely(packetdump_cb)) { 5269 packetdump_cb((ol_txrx_soc_handle)soc, pdev->pdev_id, 5270 QDF_NBUF_CB_RX_VDEV_ID(nbuf), 5271 nbuf, status, QDF_RX_DATA_PKT); 5272 } 5273 } 5274 5275 /** 5276 * dp_rx_err_send_pktlog() - send rx error packet log 5277 * @soc: soc handle 5278 * @pdev: pdev handle 5279 * @mpdu_desc_info: MPDU descriptor info 5280 * @nbuf: nbuf 5281 * @status: status of rx packet 5282 * @set_pktlen: weither to set packet length 5283 * 5284 * This API should only be called when we have not removed 5285 * Rx TLV from head, and head is pointing to rx_tlv 5286 * 5287 * This function is used to send rx packet from error path 5288 * for logging for which rx packet tlv is not removed. 5289 * 5290 * Return: None 5291 * 5292 */ 5293 static inline 5294 void dp_rx_err_send_pktlog(struct dp_soc *soc, struct dp_pdev *pdev, 5295 struct hal_rx_mpdu_desc_info *mpdu_desc_info, 5296 qdf_nbuf_t nbuf, enum qdf_dp_tx_rx_status status, 5297 bool set_pktlen) 5298 { 5299 ol_txrx_pktdump_cb packetdump_cb = pdev->dp_rx_packetdump_cb; 5300 qdf_size_t skip_size; 5301 uint16_t msdu_len, nbuf_len; 5302 uint8_t *rx_tlv_hdr; 5303 struct hal_rx_msdu_metadata msdu_metadata; 5304 uint16_t buf_size; 5305 5306 buf_size = wlan_cfg_rx_buffer_size(soc->wlan_cfg_ctx); 5307 5308 if (qdf_unlikely(packetdump_cb)) { 5309 rx_tlv_hdr = qdf_nbuf_data(nbuf); 5310 nbuf_len = hal_rx_msdu_start_msdu_len_get(soc->hal_soc, 5311 rx_tlv_hdr); 5312 hal_rx_msdu_metadata_get(soc->hal_soc, rx_tlv_hdr, 5313 &msdu_metadata); 5314 5315 if (mpdu_desc_info->bar_frame || 5316 (mpdu_desc_info->mpdu_flags & HAL_MPDU_F_FRAGMENT)) 5317 skip_size = soc->rx_pkt_tlv_size; 5318 else 5319 skip_size = soc->rx_pkt_tlv_size + 5320 msdu_metadata.l3_hdr_pad; 5321 5322 if (set_pktlen) { 5323 msdu_len = nbuf_len + skip_size; 5324 qdf_nbuf_set_pktlen(nbuf, qdf_min(msdu_len, buf_size)); 5325 } 5326 5327 qdf_nbuf_pull_head(nbuf, skip_size); 5328 packetdump_cb((ol_txrx_soc_handle)soc, pdev->pdev_id, 5329 QDF_NBUF_CB_RX_VDEV_ID(nbuf), 5330 nbuf, status, QDF_RX_DATA_PKT); 5331 qdf_nbuf_push_head(nbuf, skip_size); 5332 } 5333 } 5334 5335 #else 5336 static inline 5337 void dp_tx_send_pktlog(struct dp_soc *soc, struct dp_pdev *pdev, 5338 struct dp_tx_desc_s *tx_desc, 5339 qdf_nbuf_t nbuf, enum qdf_dp_tx_rx_status status) 5340 { 5341 } 5342 5343 static inline 5344 void dp_rx_send_pktlog(struct dp_soc *soc, struct dp_pdev *pdev, 5345 qdf_nbuf_t nbuf, enum qdf_dp_tx_rx_status status) 5346 { 5347 } 5348 5349 static inline 5350 void dp_rx_err_send_pktlog(struct dp_soc *soc, struct dp_pdev *pdev, 5351 struct hal_rx_mpdu_desc_info *mpdu_desc_info, 5352 qdf_nbuf_t nbuf, enum qdf_dp_tx_rx_status status, 5353 bool set_pktlen) 5354 { 5355 } 5356 #endif 5357 5358 /** 5359 * dp_pdev_update_fast_rx_flag() - Update Fast rx flag for a PDEV 5360 * @soc : Data path soc handle 5361 * @pdev : PDEV handle 5362 * 5363 * Return: None 5364 */ 5365 void dp_pdev_update_fast_rx_flag(struct dp_soc *soc, struct dp_pdev *pdev); 5366 5367 #ifdef FEATURE_DIRECT_LINK 5368 /** 5369 * dp_setup_direct_link_refill_ring(): Setup direct link refill ring for pdev 5370 * @soc_hdl: DP SOC handle 5371 * @pdev_id: pdev id 5372 * 5373 * Return: Handle to SRNG 5374 */ 5375 struct dp_srng *dp_setup_direct_link_refill_ring(struct cdp_soc_t *soc_hdl, 5376 uint8_t pdev_id); 5377 5378 /** 5379 * dp_destroy_direct_link_refill_ring(): Destroy direct link refill ring for 5380 * pdev 5381 * @soc_hdl: DP SOC handle 5382 * @pdev_id: pdev id 5383 * 5384 * Return: None 5385 */ 5386 void dp_destroy_direct_link_refill_ring(struct cdp_soc_t *soc_hdl, 5387 uint8_t pdev_id); 5388 #else 5389 static inline 5390 struct dp_srng *dp_setup_direct_link_refill_ring(struct cdp_soc_t *soc_hdl, 5391 uint8_t pdev_id) 5392 { 5393 return NULL; 5394 } 5395 5396 static inline 5397 void dp_destroy_direct_link_refill_ring(struct cdp_soc_t *soc_hdl, 5398 uint8_t pdev_id) 5399 { 5400 } 5401 #endif 5402 5403 #ifdef WLAN_FEATURE_DP_CFG_EVENT_HISTORY 5404 static inline 5405 void dp_cfg_event_record(struct dp_soc *soc, 5406 enum dp_cfg_event_type event, 5407 union dp_cfg_event_desc *cfg_event_desc) 5408 { 5409 struct dp_cfg_event_history *cfg_event_history = 5410 &soc->cfg_event_history; 5411 struct dp_cfg_event *entry; 5412 uint32_t idx; 5413 uint16_t slot; 5414 5415 dp_get_frag_hist_next_atomic_idx(&cfg_event_history->index, &idx, 5416 &slot, 5417 DP_CFG_EVT_HIST_SLOT_SHIFT, 5418 DP_CFG_EVT_HIST_PER_SLOT_MAX, 5419 DP_CFG_EVT_HISTORY_SIZE); 5420 5421 entry = &cfg_event_history->entry[slot][idx]; 5422 5423 entry->timestamp = qdf_get_log_timestamp(); 5424 entry->type = event; 5425 qdf_mem_copy(&entry->event_desc, cfg_event_desc, 5426 sizeof(entry->event_desc)); 5427 } 5428 5429 static inline void 5430 dp_cfg_event_record_vdev_evt(struct dp_soc *soc, enum dp_cfg_event_type event, 5431 struct dp_vdev *vdev) 5432 { 5433 union dp_cfg_event_desc cfg_evt_desc = {0}; 5434 struct dp_vdev_attach_detach_desc *vdev_evt = 5435 &cfg_evt_desc.vdev_evt; 5436 5437 if (qdf_unlikely(event != DP_CFG_EVENT_VDEV_ATTACH && 5438 event != DP_CFG_EVENT_VDEV_UNREF_DEL && 5439 event != DP_CFG_EVENT_VDEV_DETACH)) { 5440 qdf_assert_always(0); 5441 return; 5442 } 5443 5444 vdev_evt->vdev = vdev; 5445 vdev_evt->vdev_id = vdev->vdev_id; 5446 vdev_evt->ref_count = qdf_atomic_read(&vdev->ref_cnt); 5447 vdev_evt->mac_addr = vdev->mac_addr; 5448 5449 dp_cfg_event_record(soc, event, &cfg_evt_desc); 5450 } 5451 5452 static inline void 5453 dp_cfg_event_record_peer_evt(struct dp_soc *soc, enum dp_cfg_event_type event, 5454 struct dp_peer *peer, struct dp_vdev *vdev, 5455 uint8_t is_reuse) 5456 { 5457 union dp_cfg_event_desc cfg_evt_desc = {0}; 5458 struct dp_peer_cmn_ops_desc *peer_evt = &cfg_evt_desc.peer_cmn_evt; 5459 5460 if (qdf_unlikely(event != DP_CFG_EVENT_PEER_CREATE && 5461 event != DP_CFG_EVENT_PEER_DELETE && 5462 event != DP_CFG_EVENT_PEER_UNREF_DEL)) { 5463 qdf_assert_always(0); 5464 return; 5465 } 5466 5467 peer_evt->peer = peer; 5468 peer_evt->vdev = vdev; 5469 peer_evt->vdev_id = vdev->vdev_id; 5470 peer_evt->is_reuse = is_reuse; 5471 peer_evt->peer_ref_count = qdf_atomic_read(&peer->ref_cnt); 5472 peer_evt->vdev_ref_count = qdf_atomic_read(&vdev->ref_cnt); 5473 peer_evt->mac_addr = peer->mac_addr; 5474 peer_evt->vdev_mac_addr = vdev->mac_addr; 5475 5476 dp_cfg_event_record(soc, event, &cfg_evt_desc); 5477 } 5478 5479 static inline void 5480 dp_cfg_event_record_mlo_link_delink_evt(struct dp_soc *soc, 5481 enum dp_cfg_event_type event, 5482 struct dp_peer *mld_peer, 5483 struct dp_peer *link_peer, 5484 uint8_t idx, uint8_t result) 5485 { 5486 union dp_cfg_event_desc cfg_evt_desc = {0}; 5487 struct dp_mlo_add_del_link_desc *mlo_link_delink_evt = 5488 &cfg_evt_desc.mlo_link_delink_evt; 5489 5490 if (qdf_unlikely(event != DP_CFG_EVENT_MLO_ADD_LINK && 5491 event != DP_CFG_EVENT_MLO_DEL_LINK)) { 5492 qdf_assert_always(0); 5493 return; 5494 } 5495 5496 mlo_link_delink_evt->link_peer = link_peer; 5497 mlo_link_delink_evt->mld_peer = mld_peer; 5498 mlo_link_delink_evt->link_mac_addr = link_peer->mac_addr; 5499 mlo_link_delink_evt->mld_mac_addr = mld_peer->mac_addr; 5500 mlo_link_delink_evt->num_links = mld_peer->num_links; 5501 mlo_link_delink_evt->action_result = result; 5502 mlo_link_delink_evt->idx = idx; 5503 5504 dp_cfg_event_record(soc, event, &cfg_evt_desc); 5505 } 5506 5507 static inline void 5508 dp_cfg_event_record_mlo_setup_vdev_update_evt(struct dp_soc *soc, 5509 struct dp_peer *mld_peer, 5510 struct dp_vdev *prev_vdev, 5511 struct dp_vdev *new_vdev) 5512 { 5513 union dp_cfg_event_desc cfg_evt_desc = {0}; 5514 struct dp_mlo_setup_vdev_update_desc *vdev_update_evt = 5515 &cfg_evt_desc.mlo_setup_vdev_update; 5516 5517 vdev_update_evt->mld_peer = mld_peer; 5518 vdev_update_evt->prev_vdev = prev_vdev; 5519 vdev_update_evt->new_vdev = new_vdev; 5520 5521 dp_cfg_event_record(soc, DP_CFG_EVENT_MLO_SETUP_VDEV_UPDATE, 5522 &cfg_evt_desc); 5523 } 5524 5525 static inline void 5526 dp_cfg_event_record_peer_map_unmap_evt(struct dp_soc *soc, 5527 enum dp_cfg_event_type event, 5528 struct dp_peer *peer, 5529 uint8_t *mac_addr, 5530 uint8_t is_ml_peer, 5531 uint16_t peer_id, uint16_t ml_peer_id, 5532 uint16_t hw_peer_id, uint8_t vdev_id) 5533 { 5534 union dp_cfg_event_desc cfg_evt_desc = {0}; 5535 struct dp_rx_peer_map_unmap_desc *peer_map_unmap_evt = 5536 &cfg_evt_desc.peer_map_unmap_evt; 5537 5538 if (qdf_unlikely(event != DP_CFG_EVENT_PEER_MAP && 5539 event != DP_CFG_EVENT_PEER_UNMAP && 5540 event != DP_CFG_EVENT_MLO_PEER_MAP && 5541 event != DP_CFG_EVENT_MLO_PEER_UNMAP)) { 5542 qdf_assert_always(0); 5543 return; 5544 } 5545 5546 peer_map_unmap_evt->peer_id = peer_id; 5547 peer_map_unmap_evt->ml_peer_id = ml_peer_id; 5548 peer_map_unmap_evt->hw_peer_id = hw_peer_id; 5549 peer_map_unmap_evt->vdev_id = vdev_id; 5550 /* Peer may be NULL at times, but its not an issue. */ 5551 peer_map_unmap_evt->peer = peer; 5552 peer_map_unmap_evt->is_ml_peer = is_ml_peer; 5553 qdf_mem_copy(&peer_map_unmap_evt->mac_addr.raw, mac_addr, 5554 QDF_MAC_ADDR_SIZE); 5555 5556 dp_cfg_event_record(soc, event, &cfg_evt_desc); 5557 } 5558 5559 static inline void 5560 dp_cfg_event_record_peer_setup_evt(struct dp_soc *soc, 5561 enum dp_cfg_event_type event, 5562 struct dp_peer *peer, 5563 struct dp_vdev *vdev, 5564 uint8_t vdev_id, 5565 struct cdp_peer_setup_info *peer_setup_info) 5566 { 5567 union dp_cfg_event_desc cfg_evt_desc = {0}; 5568 struct dp_peer_setup_desc *peer_setup_evt = 5569 &cfg_evt_desc.peer_setup_evt; 5570 5571 if (qdf_unlikely(event != DP_CFG_EVENT_PEER_SETUP && 5572 event != DP_CFG_EVENT_MLO_SETUP)) { 5573 qdf_assert_always(0); 5574 return; 5575 } 5576 5577 peer_setup_evt->peer = peer; 5578 peer_setup_evt->vdev = vdev; 5579 if (vdev) 5580 peer_setup_evt->vdev_ref_count = qdf_atomic_read(&vdev->ref_cnt); 5581 peer_setup_evt->mac_addr = peer->mac_addr; 5582 peer_setup_evt->vdev_id = vdev_id; 5583 if (peer_setup_info) { 5584 peer_setup_evt->is_first_link = peer_setup_info->is_first_link; 5585 peer_setup_evt->is_primary_link = peer_setup_info->is_primary_link; 5586 qdf_mem_copy(peer_setup_evt->mld_mac_addr.raw, 5587 peer_setup_info->mld_peer_mac, 5588 QDF_MAC_ADDR_SIZE); 5589 } 5590 5591 dp_cfg_event_record(soc, event, &cfg_evt_desc); 5592 } 5593 #else 5594 5595 static inline void 5596 dp_cfg_event_record_vdev_evt(struct dp_soc *soc, enum dp_cfg_event_type event, 5597 struct dp_vdev *vdev) 5598 { 5599 } 5600 5601 static inline void 5602 dp_cfg_event_record_peer_evt(struct dp_soc *soc, enum dp_cfg_event_type event, 5603 struct dp_peer *peer, struct dp_vdev *vdev, 5604 uint8_t is_reuse) 5605 { 5606 } 5607 5608 static inline void 5609 dp_cfg_event_record_mlo_link_delink_evt(struct dp_soc *soc, 5610 enum dp_cfg_event_type event, 5611 struct dp_peer *mld_peer, 5612 struct dp_peer *link_peer, 5613 uint8_t idx, uint8_t result) 5614 { 5615 } 5616 5617 static inline void 5618 dp_cfg_event_record_mlo_setup_vdev_update_evt(struct dp_soc *soc, 5619 struct dp_peer *mld_peer, 5620 struct dp_vdev *prev_vdev, 5621 struct dp_vdev *new_vdev) 5622 { 5623 } 5624 5625 static inline void 5626 dp_cfg_event_record_peer_map_unmap_evt(struct dp_soc *soc, 5627 enum dp_cfg_event_type event, 5628 struct dp_peer *peer, 5629 uint8_t *mac_addr, 5630 uint8_t is_ml_peer, 5631 uint16_t peer_id, uint16_t ml_peer_id, 5632 uint16_t hw_peer_id, uint8_t vdev_id) 5633 { 5634 } 5635 5636 static inline void 5637 dp_cfg_event_record_peer_setup_evt(struct dp_soc *soc, 5638 enum dp_cfg_event_type event, 5639 struct dp_peer *peer, 5640 struct dp_vdev *vdev, 5641 uint8_t vdev_id, 5642 struct cdp_peer_setup_info *peer_setup_info) 5643 { 5644 } 5645 #endif 5646 5647 #ifndef WLAN_SOFTUMAC_SUPPORT 5648 /** 5649 * dp_soc_interrupt_detach() - Deregister any allocations done for interrupts 5650 * @txrx_soc: DP SOC handle 5651 * 5652 * Return: none 5653 */ 5654 void dp_soc_interrupt_detach(struct cdp_soc_t *txrx_soc); 5655 #endif 5656 5657 /** 5658 * dp_get_peer_stats()- Get peer stats 5659 * @peer: Datapath peer 5660 * @peer_stats: buffer for peer stats 5661 * 5662 * Return: none 5663 */ 5664 void dp_get_peer_stats(struct dp_peer *peer, 5665 struct cdp_peer_stats *peer_stats); 5666 5667 /** 5668 * dp_get_per_link_peer_stats()- Get per link peer stats 5669 * @peer: Datapath peer 5670 * @peer_stats: buffer for peer stats 5671 * @peer_type: Peer type 5672 * @num_link: Number of ML links 5673 * 5674 * Return: status success/failure 5675 */ 5676 QDF_STATUS dp_get_per_link_peer_stats(struct dp_peer *peer, 5677 struct cdp_peer_stats *peer_stats, 5678 enum cdp_peer_type peer_type, 5679 uint8_t num_link); 5680 /** 5681 * dp_get_peer_hw_link_id() - get peer hardware link id 5682 * @soc: soc handle 5683 * @pdev: data path pdev 5684 * 5685 * Return: link_id 5686 */ 5687 static inline int 5688 dp_get_peer_hw_link_id(struct dp_soc *soc, 5689 struct dp_pdev *pdev) 5690 { 5691 if (wlan_cfg_is_peer_link_stats_enabled(soc->wlan_cfg_ctx)) 5692 return ((soc->arch_ops.get_hw_link_id(pdev)) + 1); 5693 5694 return 0; 5695 } 5696 5697 #ifdef QCA_MULTIPASS_SUPPORT 5698 /** 5699 * dp_tx_remove_vlan_tag() - Remove 4 bytes of vlan tag 5700 * @vdev: DP vdev handle 5701 * @nbuf: network buffer 5702 * 5703 * Return: void 5704 */ 5705 void dp_tx_remove_vlan_tag(struct dp_vdev *vdev, qdf_nbuf_t nbuf); 5706 #endif 5707 5708 /** 5709 * dp_print_per_link_stats() - Print per link peer stats. 5710 * @soc_hdl: soc handle. 5711 * @vdev_id: vdev_id. 5712 * 5713 * Return: None. 5714 */ 5715 void dp_print_per_link_stats(struct cdp_soc_t *soc_hdl, uint8_t vdev_id); 5716 5717 /** 5718 * dp_get_ring_stats_from_hal(): get hal level ring pointer values 5719 * @soc: DP_SOC handle 5720 * @srng: DP_SRNG handle 5721 * @ring_type: srng src/dst ring 5722 * @_tailp: pointer to tail of ring 5723 * @_headp: pointer to head of ring 5724 * @_hw_headp: pointer to head of ring in HW 5725 * @_hw_tailp: pointer to tail of ring in HW 5726 * 5727 * Return: void 5728 */ 5729 static inline void 5730 dp_get_ring_stats_from_hal(struct dp_soc *soc, struct dp_srng *srng, 5731 enum hal_ring_type ring_type, 5732 uint32_t *_tailp, uint32_t *_headp, 5733 int32_t *_hw_headp, int32_t *_hw_tailp) 5734 { 5735 uint32_t tailp; 5736 uint32_t headp; 5737 int32_t hw_headp = -1; 5738 int32_t hw_tailp = -1; 5739 struct hal_soc *hal_soc; 5740 5741 if (soc && srng && srng->hal_srng) { 5742 hal_soc = (struct hal_soc *)soc->hal_soc; 5743 hal_get_sw_hptp(soc->hal_soc, srng->hal_srng, &tailp, &headp); 5744 *_headp = headp; 5745 *_tailp = tailp; 5746 5747 hal_get_hw_hptp(soc->hal_soc, srng->hal_srng, &hw_headp, 5748 &hw_tailp, ring_type); 5749 *_hw_headp = hw_headp; 5750 *_hw_tailp = hw_tailp; 5751 } 5752 } 5753 5754 /** 5755 * dp_update_vdev_be_basic_stats() - Update vdev basic stats 5756 * @txrx_peer: DP txrx_peer handle 5757 * @tgtobj: Pointer to buffer for be vdev stats 5758 * 5759 * Return: None 5760 */ 5761 void dp_update_vdev_be_basic_stats(struct dp_txrx_peer *txrx_peer, 5762 struct dp_vdev_stats *tgtobj); 5763 5764 /** 5765 * dp_update_vdev_basic_stats() - Update vdev basic stats 5766 * @txrx_peer: DP txrx_peer handle 5767 * @tgtobj: Pointer to buffer for vdev stats 5768 * 5769 * Return: None 5770 */ 5771 void dp_update_vdev_basic_stats(struct dp_txrx_peer *txrx_peer, 5772 struct cdp_vdev_stats *tgtobj); 5773 5774 /** 5775 * dp_get_vdev_stats_for_unmap_peer_legacy() - Update vdev basic stats 5776 * @vdev: vdev associated with the peer 5777 * @peer: unmapped peer 5778 * 5779 * Return: None 5780 */ 5781 void dp_get_vdev_stats_for_unmap_peer_legacy(struct dp_vdev *vdev, 5782 struct dp_peer *peer); 5783 5784 #ifdef WLAN_FEATURE_TX_LATENCY_STATS 5785 /** 5786 * dp_h2t_tx_latency_stats_cfg_msg_send(): send HTT message for tx latency 5787 * stats config to FW 5788 * @dp_soc: DP SOC handle 5789 * @vdev_id: vdev id 5790 * @enable: indicates enablement of the feature 5791 * @period: statistical period for transmit latency in terms of ms 5792 * @granularity: granularity for tx latency distribution 5793 * 5794 * return: QDF STATUS 5795 */ 5796 QDF_STATUS 5797 dp_h2t_tx_latency_stats_cfg_msg_send(struct dp_soc *dp_soc, uint16_t vdev_id, 5798 bool enable, uint32_t period, 5799 uint32_t granularity); 5800 5801 /** 5802 * dp_tx_latency_stats_update_cca() - update transmit latency statistics for 5803 * CCA 5804 * @soc: dp soc handle 5805 * @peer_id: peer id 5806 * @granularity: granularity of distribution 5807 * @distribution: distribution of transmit latency statistics 5808 * @avg: average of CCA latency(in microseconds) within a cycle 5809 * 5810 * Return: None 5811 */ 5812 void 5813 dp_tx_latency_stats_update_cca(struct dp_soc *soc, uint16_t peer_id, 5814 uint32_t granularity, uint32_t *distribution, 5815 uint32_t avg); 5816 5817 /** 5818 * dp_tx_latency_stats_report() - report transmit latency statistics for each 5819 * vdev of specified pdev 5820 * @soc: dp soc handle 5821 * @pdev: dp pdev Handle 5822 * 5823 * Return: None 5824 */ 5825 void dp_tx_latency_stats_report(struct dp_soc *soc, struct dp_pdev *pdev); 5826 #endif 5827 #ifdef WLAN_FEATURE_SSR_DRIVER_DUMP 5828 /** 5829 * dp_ssr_dump_srng_register() - Register DP ring with SSR dump. 5830 * @region_name: ring name to register. 5831 * @srng: dp srng handler. 5832 * @num: Ring number 5833 * 5834 * num = -1. If there is only single ring 5835 * num = ring number. If there are multiple rings pass ring number. 5836 * e.g. in case of REO pass reo number (0..n). 5837 * 5838 * Return: None. 5839 */ 5840 void 5841 dp_ssr_dump_srng_register(char *region_name, struct dp_srng *srng, int num); 5842 5843 /** 5844 * dp_ssr_dump_srng_unregister() - Unegister DP ring with SSR dump. 5845 * @region_name: ring name to unregister. 5846 * @num: Ring number 5847 * 5848 * num = -1. If there is only single ring 5849 * num = ring number. If there are multiple rings pass ring number. 5850 * e.g. in case of REO pass reo number (0..n). 5851 * 5852 * Return: None. 5853 */ 5854 void dp_ssr_dump_srng_unregister(char *region_name, int num); 5855 5856 /** 5857 * dp_ssr_dump_pdev_register() - Register DP Pdev with SSR dump. 5858 * @pdev: Pdev handle to register. 5859 * @pdev_id: Pdev ID. 5860 * 5861 * Return: None. 5862 */ 5863 void dp_ssr_dump_pdev_register(struct dp_pdev *pdev, uint8_t pdev_id); 5864 5865 /** 5866 * dp_ssr_dump_pdev_unregister() - Unregister DP Pdev with SSR dump. 5867 * @pdev_id: Pdev ID. 5868 * 5869 * Return: None. 5870 */ 5871 void dp_ssr_dump_pdev_unregister(uint8_t pdev_id); 5872 #else 5873 static inline 5874 void dp_ssr_dump_srng_register(char *region_name, struct dp_srng *srng, int num) 5875 { 5876 } 5877 5878 static inline 5879 void dp_ssr_dump_srng_unregister(char *region_name, int num) 5880 { 5881 } 5882 5883 static inline 5884 void dp_ssr_dump_pdev_register(struct dp_pdev *pdev, uint8_t pdev_id) 5885 { 5886 } 5887 5888 static inline 5889 void dp_ssr_dump_pdev_unregister(uint8_t pdev_id) 5890 { 5891 } 5892 #endif 5893 #endif /* #ifndef _DP_INTERNAL_H_ */ 5894