1 /* 2 * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved. 3 * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved. 4 * 5 * Permission to use, copy, modify, and/or distribute this software for 6 * any purpose with or without fee is hereby granted, provided that the 7 * above copyright notice and this permission notice appear in all 8 * copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 11 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 12 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 13 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 14 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 15 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 16 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 17 * PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20 #ifndef _DP_INTERNAL_H_ 21 #define _DP_INTERNAL_H_ 22 23 #include "dp_types.h" 24 #include "dp_htt.h" 25 #include "dp_rx_tid.h" 26 27 #define RX_BUFFER_SIZE_PKTLOG_LITE 1024 28 29 #define DP_PEER_WDS_COUNT_INVALID UINT_MAX 30 31 #define DP_BLOCKMEM_SIZE 4096 32 #define WBM2_SW_PPE_REL_RING_ID 6 33 #define WBM2_SW_PPE_REL_MAP_ID 11 34 #define DP_TX_PPEDS_POOL_ID 0xF 35 36 /* Alignment for consistent memory for DP rings*/ 37 #define DP_RING_BASE_ALIGN 32 38 39 #define DP_RSSI_INVAL 0x80 40 #define DP_RSSI_AVG_WEIGHT 2 41 /* 42 * Formula to derive avg_rssi is taken from wifi2.o firmware 43 */ 44 #define DP_GET_AVG_RSSI(avg_rssi, last_rssi) \ 45 (((avg_rssi) - (((uint8_t)(avg_rssi)) >> DP_RSSI_AVG_WEIGHT)) \ 46 + ((((uint8_t)(last_rssi)) >> DP_RSSI_AVG_WEIGHT))) 47 48 /* Macro For NYSM value received in VHT TLV */ 49 #define VHT_SGI_NYSM 3 50 51 #define INVALID_WBM_RING_NUM 0xF 52 53 #ifdef FEATURE_DIRECT_LINK 54 #define DIRECT_LINK_REFILL_RING_ENTRIES 64 55 #ifdef IPA_OFFLOAD 56 #ifdef IPA_WDI3_VLAN_SUPPORT 57 #define DIRECT_LINK_REFILL_RING_IDX 4 58 #else 59 #define DIRECT_LINK_REFILL_RING_IDX 3 60 #endif 61 #else 62 #define DIRECT_LINK_REFILL_RING_IDX 2 63 #endif 64 #endif 65 66 #define DP_MAX_VLAN_IDS 4096 67 #define DP_VLAN_UNTAGGED 0 68 #define DP_VLAN_TAGGED_MULTICAST 1 69 #define DP_VLAN_TAGGED_UNICAST 2 70 71 /** 72 * struct htt_dbgfs_cfg - structure to maintain required htt data 73 * @msg_word: htt msg sent to upper layer 74 * @m: qdf debugfs file pointer 75 */ 76 struct htt_dbgfs_cfg { 77 uint32_t *msg_word; 78 qdf_debugfs_file_t m; 79 }; 80 81 /* Cookie MSB bits assigned for different use case. 82 * Note: User can't use last 3 bits, as it is reserved for pdev_id. 83 * If in future number of pdev are more than 3. 84 */ 85 /* Reserve for default case */ 86 #define DBG_STATS_COOKIE_DEFAULT 0x0 87 88 /* Reserve for DP Stats: 3rd bit */ 89 #define DBG_STATS_COOKIE_DP_STATS BIT(3) 90 91 /* Reserve for HTT Stats debugfs support: 4th bit */ 92 #define DBG_STATS_COOKIE_HTT_DBGFS BIT(4) 93 94 /*Reserve for HTT Stats debugfs support: 5th bit */ 95 #define DBG_SYSFS_STATS_COOKIE BIT(5) 96 97 /* Reserve for HTT Stats OBSS PD support: 6th bit */ 98 #define DBG_STATS_COOKIE_HTT_OBSS BIT(6) 99 100 /* 101 * Bitmap of HTT PPDU TLV types for Default mode 102 */ 103 #define HTT_PPDU_DEFAULT_TLV_BITMAP \ 104 (1 << HTT_PPDU_STATS_COMMON_TLV) | \ 105 (1 << HTT_PPDU_STATS_USR_COMMON_TLV) | \ 106 (1 << HTT_PPDU_STATS_USR_RATE_TLV) | \ 107 (1 << HTT_PPDU_STATS_SCH_CMD_STATUS_TLV) | \ 108 (1 << HTT_PPDU_STATS_USR_COMPLTN_COMMON_TLV) | \ 109 (1 << HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV) 110 111 /* PPDU STATS CFG */ 112 #define DP_PPDU_STATS_CFG_ALL 0xFFFF 113 114 /* PPDU stats mask sent to FW to enable enhanced stats */ 115 #define DP_PPDU_STATS_CFG_ENH_STATS \ 116 (HTT_PPDU_DEFAULT_TLV_BITMAP) | \ 117 (1 << HTT_PPDU_STATS_USR_COMPLTN_FLUSH_TLV) | \ 118 (1 << HTT_PPDU_STATS_USR_COMMON_ARRAY_TLV) | \ 119 (1 << HTT_PPDU_STATS_USERS_INFO_TLV) 120 121 /* PPDU stats mask sent to FW to support debug sniffer feature */ 122 #define DP_PPDU_STATS_CFG_SNIFFER \ 123 (HTT_PPDU_DEFAULT_TLV_BITMAP) | \ 124 (1 << HTT_PPDU_STATS_USR_MPDU_ENQ_BITMAP_64_TLV) | \ 125 (1 << HTT_PPDU_STATS_USR_MPDU_ENQ_BITMAP_256_TLV) | \ 126 (1 << HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_64_TLV) | \ 127 (1 << HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_256_TLV) | \ 128 (1 << HTT_PPDU_STATS_USR_COMPLTN_FLUSH_TLV) | \ 129 (1 << HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_256_TLV) | \ 130 (1 << HTT_PPDU_STATS_USR_COMPLTN_FLUSH_TLV) | \ 131 (1 << HTT_PPDU_STATS_USR_COMMON_ARRAY_TLV) | \ 132 (1 << HTT_PPDU_STATS_TX_MGMTCTRL_PAYLOAD_TLV) | \ 133 (1 << HTT_PPDU_STATS_USERS_INFO_TLV) 134 135 /* PPDU stats mask sent to FW to support BPR feature*/ 136 #define DP_PPDU_STATS_CFG_BPR \ 137 (1 << HTT_PPDU_STATS_TX_MGMTCTRL_PAYLOAD_TLV) | \ 138 (1 << HTT_PPDU_STATS_USERS_INFO_TLV) 139 140 /* PPDU stats mask sent to FW to support BPR and enhanced stats feature */ 141 #define DP_PPDU_STATS_CFG_BPR_ENH (DP_PPDU_STATS_CFG_BPR | \ 142 DP_PPDU_STATS_CFG_ENH_STATS) 143 /* PPDU stats mask sent to FW to support BPR and pcktlog stats feature */ 144 #define DP_PPDU_STATS_CFG_BPR_PKTLOG (DP_PPDU_STATS_CFG_BPR | \ 145 DP_PPDU_TXLITE_STATS_BITMASK_CFG) 146 147 /* 148 * Bitmap of HTT PPDU delayed ba TLV types for Default mode 149 */ 150 #define HTT_PPDU_DELAYED_BA_TLV_BITMAP \ 151 (1 << HTT_PPDU_STATS_COMMON_TLV) | \ 152 (1 << HTT_PPDU_STATS_USR_COMMON_TLV) | \ 153 (1 << HTT_PPDU_STATS_USR_RATE_TLV) 154 155 /* 156 * Bitmap of HTT PPDU TLV types for Delayed BA 157 */ 158 #define HTT_PPDU_STATUS_TLV_BITMAP \ 159 (1 << HTT_PPDU_STATS_COMMON_TLV) | \ 160 (1 << HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV) 161 162 /* 163 * Bitmap of HTT PPDU TLV types for Sniffer mode bitmap 64 164 */ 165 #define HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP_64 \ 166 ((1 << HTT_PPDU_STATS_COMMON_TLV) | \ 167 (1 << HTT_PPDU_STATS_USR_COMMON_TLV) | \ 168 (1 << HTT_PPDU_STATS_USR_RATE_TLV) | \ 169 (1 << HTT_PPDU_STATS_SCH_CMD_STATUS_TLV) | \ 170 (1 << HTT_PPDU_STATS_USR_COMPLTN_COMMON_TLV) | \ 171 (1 << HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV) | \ 172 (1 << HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_64_TLV) | \ 173 (1 << HTT_PPDU_STATS_USR_MPDU_ENQ_BITMAP_64_TLV)) 174 175 /* 176 * Bitmap of HTT PPDU TLV types for Sniffer mode bitmap 256 177 */ 178 #define HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP_256 \ 179 ((1 << HTT_PPDU_STATS_COMMON_TLV) | \ 180 (1 << HTT_PPDU_STATS_USR_COMMON_TLV) | \ 181 (1 << HTT_PPDU_STATS_USR_RATE_TLV) | \ 182 (1 << HTT_PPDU_STATS_SCH_CMD_STATUS_TLV) | \ 183 (1 << HTT_PPDU_STATS_USR_COMPLTN_COMMON_TLV) | \ 184 (1 << HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV) | \ 185 (1 << HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_256_TLV) | \ 186 (1 << HTT_PPDU_STATS_USR_MPDU_ENQ_BITMAP_256_TLV)) 187 188 static const enum cdp_packet_type hal_2_dp_pkt_type_map[HAL_DOT11_MAX] = { 189 [HAL_DOT11A] = DOT11_A, 190 [HAL_DOT11B] = DOT11_B, 191 [HAL_DOT11N_MM] = DOT11_N, 192 [HAL_DOT11AC] = DOT11_AC, 193 [HAL_DOT11AX] = DOT11_AX, 194 [HAL_DOT11BA] = DOT11_MAX, 195 #ifdef WLAN_FEATURE_11BE 196 [HAL_DOT11BE] = DOT11_BE, 197 #else 198 [HAL_DOT11BE] = DOT11_MAX, 199 #endif 200 [HAL_DOT11AZ] = DOT11_MAX, 201 [HAL_DOT11N_GF] = DOT11_MAX, 202 }; 203 204 #ifdef GLOBAL_ASSERT_AVOIDANCE 205 #define dp_assert_always_internal_stat(_expr, _handle, _field) \ 206 (qdf_unlikely(!(_expr)) ? ((_handle)->stats._field++, true) : false) 207 208 #define dp_assert_always_internal_ds_stat(_expr, _handle, _field) \ 209 ((_handle)->ppeds_stats._field++) 210 211 static inline bool dp_assert_always_internal(bool expr) 212 { 213 return !expr; 214 } 215 #else 216 static inline bool __dp_assert_always_internal(bool expr) 217 { 218 qdf_assert_always(expr); 219 220 return false; 221 } 222 223 #define dp_assert_always_internal(_expr) __dp_assert_always_internal(_expr) 224 225 #define dp_assert_always_internal_stat(_expr, _handle, _field) \ 226 dp_assert_always_internal(_expr) 227 228 #define dp_assert_always_internal_ds_stat(_expr, _handle, _field) \ 229 dp_assert_always_internal(_expr) 230 #endif 231 232 #ifdef WLAN_FEATURE_11BE 233 /** 234 * dp_get_mcs_array_index_by_pkt_type_mcs() - get the destination mcs index 235 * in array 236 * @pkt_type: host SW pkt type 237 * @mcs: mcs value for TX/RX rate 238 * 239 * Return: succeeded - valid index in mcs array 240 * fail - same value as MCS_MAX 241 */ 242 static inline uint8_t 243 dp_get_mcs_array_index_by_pkt_type_mcs(uint32_t pkt_type, uint32_t mcs) 244 { 245 uint8_t dst_mcs_idx = MCS_INVALID_ARRAY_INDEX; 246 247 switch (pkt_type) { 248 case DOT11_A: 249 dst_mcs_idx = 250 mcs >= MAX_MCS_11A ? (MAX_MCS - 1) : mcs; 251 break; 252 case DOT11_B: 253 dst_mcs_idx = 254 mcs >= MAX_MCS_11B ? (MAX_MCS - 1) : mcs; 255 break; 256 case DOT11_N: 257 dst_mcs_idx = 258 mcs >= MAX_MCS_11N ? (MAX_MCS - 1) : mcs; 259 break; 260 case DOT11_AC: 261 dst_mcs_idx = 262 mcs >= MAX_MCS_11AC ? (MAX_MCS - 1) : mcs; 263 break; 264 case DOT11_AX: 265 dst_mcs_idx = 266 mcs >= MAX_MCS_11AX ? (MAX_MCS - 1) : mcs; 267 break; 268 case DOT11_BE: 269 dst_mcs_idx = 270 mcs >= MAX_MCS_11BE ? (MAX_MCS - 1) : mcs; 271 break; 272 default: 273 break; 274 } 275 276 return dst_mcs_idx; 277 } 278 #else 279 static inline uint8_t 280 dp_get_mcs_array_index_by_pkt_type_mcs(uint32_t pkt_type, uint32_t mcs) 281 { 282 uint8_t dst_mcs_idx = MCS_INVALID_ARRAY_INDEX; 283 284 switch (pkt_type) { 285 case DOT11_A: 286 dst_mcs_idx = 287 mcs >= MAX_MCS_11A ? (MAX_MCS - 1) : mcs; 288 break; 289 case DOT11_B: 290 dst_mcs_idx = 291 mcs >= MAX_MCS_11B ? (MAX_MCS - 1) : mcs; 292 break; 293 case DOT11_N: 294 dst_mcs_idx = 295 mcs >= MAX_MCS_11N ? (MAX_MCS - 1) : mcs; 296 break; 297 case DOT11_AC: 298 dst_mcs_idx = 299 mcs >= MAX_MCS_11AC ? (MAX_MCS - 1) : mcs; 300 break; 301 case DOT11_AX: 302 dst_mcs_idx = 303 mcs >= MAX_MCS_11AX ? (MAX_MCS - 1) : mcs; 304 break; 305 default: 306 break; 307 } 308 309 return dst_mcs_idx; 310 } 311 #endif 312 313 #ifdef WIFI_MONITOR_SUPPORT 314 QDF_STATUS dp_mon_soc_attach(struct dp_soc *soc); 315 QDF_STATUS dp_mon_soc_detach(struct dp_soc *soc); 316 #else 317 static inline 318 QDF_STATUS dp_mon_soc_attach(struct dp_soc *soc) 319 { 320 return QDF_STATUS_SUCCESS; 321 } 322 323 static inline 324 QDF_STATUS dp_mon_soc_detach(struct dp_soc *soc) 325 { 326 return QDF_STATUS_SUCCESS; 327 } 328 #endif 329 330 /** 331 * dp_rx_err_match_dhost() - function to check whether dest-mac is correct 332 * @eh: Ethernet header of incoming packet 333 * @vdev: dp_vdev object of the VAP on which this data packet is received 334 * 335 * Return: 1 if the destination mac is correct, 336 * 0 if this frame is not correctly destined to this VAP/MLD 337 */ 338 int dp_rx_err_match_dhost(qdf_ether_header_t *eh, struct dp_vdev *vdev); 339 340 #ifdef MONITOR_MODULARIZED_ENABLE 341 static inline bool dp_monitor_modularized_enable(void) 342 { 343 return TRUE; 344 } 345 346 static inline QDF_STATUS 347 dp_mon_soc_attach_wrapper(struct dp_soc *soc) { return QDF_STATUS_SUCCESS; } 348 349 static inline QDF_STATUS 350 dp_mon_soc_detach_wrapper(struct dp_soc *soc) { return QDF_STATUS_SUCCESS; } 351 #else 352 static inline bool dp_monitor_modularized_enable(void) 353 { 354 return FALSE; 355 } 356 357 static inline QDF_STATUS dp_mon_soc_attach_wrapper(struct dp_soc *soc) 358 { 359 return dp_mon_soc_attach(soc); 360 } 361 362 static inline QDF_STATUS dp_mon_soc_detach_wrapper(struct dp_soc *soc) 363 { 364 return dp_mon_soc_detach(soc); 365 } 366 #endif 367 368 #ifndef WIFI_MONITOR_SUPPORT 369 #define MON_BUF_MIN_ENTRIES 64 370 371 static inline QDF_STATUS dp_monitor_pdev_attach(struct dp_pdev *pdev) 372 { 373 return QDF_STATUS_SUCCESS; 374 } 375 376 static inline QDF_STATUS dp_monitor_pdev_detach(struct dp_pdev *pdev) 377 { 378 return QDF_STATUS_SUCCESS; 379 } 380 381 static inline QDF_STATUS dp_monitor_vdev_attach(struct dp_vdev *vdev) 382 { 383 return QDF_STATUS_E_FAILURE; 384 } 385 386 static inline QDF_STATUS dp_monitor_vdev_detach(struct dp_vdev *vdev) 387 { 388 return QDF_STATUS_E_FAILURE; 389 } 390 391 static inline QDF_STATUS dp_monitor_peer_attach(struct dp_soc *soc, 392 struct dp_peer *peer) 393 { 394 return QDF_STATUS_SUCCESS; 395 } 396 397 static inline QDF_STATUS dp_monitor_peer_detach(struct dp_soc *soc, 398 struct dp_peer *peer) 399 { 400 return QDF_STATUS_E_FAILURE; 401 } 402 403 static inline struct cdp_peer_rate_stats_ctx* 404 dp_monitor_peer_get_peerstats_ctx(struct dp_soc *soc, struct dp_peer *peer) 405 { 406 return NULL; 407 } 408 409 static inline 410 void dp_monitor_peer_reset_stats(struct dp_soc *soc, struct dp_peer *peer) 411 { 412 } 413 414 static inline 415 void dp_monitor_peer_get_stats(struct dp_soc *soc, struct dp_peer *peer, 416 void *arg, enum cdp_stat_update_type type) 417 { 418 } 419 420 static inline 421 void dp_monitor_invalid_peer_update_pdev_stats(struct dp_soc *soc, 422 struct dp_pdev *pdev) 423 { 424 } 425 426 static inline 427 QDF_STATUS dp_monitor_peer_get_stats_param(struct dp_soc *soc, 428 struct dp_peer *peer, 429 enum cdp_peer_stats_type type, 430 cdp_peer_stats_param_t *buf) 431 { 432 return QDF_STATUS_E_FAILURE; 433 } 434 435 static inline QDF_STATUS dp_monitor_pdev_init(struct dp_pdev *pdev) 436 { 437 return QDF_STATUS_SUCCESS; 438 } 439 440 static inline QDF_STATUS dp_monitor_pdev_deinit(struct dp_pdev *pdev) 441 { 442 return QDF_STATUS_SUCCESS; 443 } 444 445 static inline QDF_STATUS dp_monitor_soc_cfg_init(struct dp_soc *soc) 446 { 447 return QDF_STATUS_SUCCESS; 448 } 449 450 static inline QDF_STATUS dp_monitor_config_debug_sniffer(struct dp_pdev *pdev, 451 int val) 452 { 453 return QDF_STATUS_E_FAILURE; 454 } 455 456 static inline void dp_monitor_flush_rings(struct dp_soc *soc) 457 { 458 } 459 460 static inline QDF_STATUS dp_monitor_htt_srng_setup(struct dp_soc *soc, 461 struct dp_pdev *pdev, 462 int mac_id, 463 int mac_for_pdev) 464 { 465 return QDF_STATUS_SUCCESS; 466 } 467 468 static inline void dp_monitor_service_mon_rings(struct dp_soc *soc, 469 uint32_t quota) 470 { 471 } 472 473 static inline 474 uint32_t dp_monitor_process(struct dp_soc *soc, struct dp_intr *int_ctx, 475 uint32_t mac_id, uint32_t quota) 476 { 477 return 0; 478 } 479 480 static inline 481 uint32_t dp_monitor_drop_packets_for_mac(struct dp_pdev *pdev, 482 uint32_t mac_id, uint32_t quota) 483 { 484 return 0; 485 } 486 487 static inline void dp_monitor_peer_tx_init(struct dp_pdev *pdev, 488 struct dp_peer *peer) 489 { 490 } 491 492 static inline void dp_monitor_peer_tx_cleanup(struct dp_vdev *vdev, 493 struct dp_peer *peer) 494 { 495 } 496 497 static inline 498 void dp_monitor_peer_tid_peer_id_update(struct dp_soc *soc, 499 struct dp_peer *peer, 500 uint16_t peer_id) 501 { 502 } 503 504 static inline void dp_monitor_tx_ppdu_stats_attach(struct dp_pdev *pdev) 505 { 506 } 507 508 static inline void dp_monitor_tx_ppdu_stats_detach(struct dp_pdev *pdev) 509 { 510 } 511 512 static inline 513 QDF_STATUS dp_monitor_tx_capture_debugfs_init(struct dp_pdev *pdev) 514 { 515 return QDF_STATUS_SUCCESS; 516 } 517 518 static inline void dp_monitor_peer_tx_capture_filter_check(struct dp_pdev *pdev, 519 struct dp_peer *peer) 520 { 521 } 522 523 static inline 524 QDF_STATUS dp_monitor_tx_add_to_comp_queue(struct dp_soc *soc, 525 struct dp_tx_desc_s *desc, 526 struct hal_tx_completion_status *ts, 527 uint16_t peer_id) 528 { 529 return QDF_STATUS_E_FAILURE; 530 } 531 532 static inline 533 QDF_STATUS monitor_update_msdu_to_list(struct dp_soc *soc, 534 struct dp_pdev *pdev, 535 struct dp_peer *peer, 536 struct hal_tx_completion_status *ts, 537 qdf_nbuf_t netbuf) 538 { 539 return QDF_STATUS_E_FAILURE; 540 } 541 542 static inline bool dp_monitor_ppdu_stats_ind_handler(struct htt_soc *soc, 543 uint32_t *msg_word, 544 qdf_nbuf_t htt_t2h_msg) 545 { 546 return true; 547 } 548 549 static inline QDF_STATUS dp_monitor_htt_ppdu_stats_attach(struct dp_pdev *pdev) 550 { 551 return QDF_STATUS_SUCCESS; 552 } 553 554 static inline void dp_monitor_htt_ppdu_stats_detach(struct dp_pdev *pdev) 555 { 556 } 557 558 static inline void dp_monitor_print_pdev_rx_mon_stats(struct dp_pdev *pdev) 559 { 560 } 561 562 static inline QDF_STATUS dp_monitor_config_enh_tx_capture(struct dp_pdev *pdev, 563 uint32_t val) 564 { 565 return QDF_STATUS_E_INVAL; 566 } 567 568 static inline QDF_STATUS dp_monitor_tx_peer_filter(struct dp_pdev *pdev, 569 struct dp_peer *peer, 570 uint8_t is_tx_pkt_cap_enable, 571 uint8_t *peer_mac) 572 { 573 return QDF_STATUS_E_INVAL; 574 } 575 576 static inline QDF_STATUS dp_monitor_config_enh_rx_capture(struct dp_pdev *pdev, 577 uint32_t val) 578 { 579 return QDF_STATUS_E_INVAL; 580 } 581 582 static inline 583 QDF_STATUS dp_monitor_set_bpr_enable(struct dp_pdev *pdev, uint32_t val) 584 { 585 return QDF_STATUS_E_FAILURE; 586 } 587 588 static inline 589 int dp_monitor_set_filter_neigh_peers(struct dp_pdev *pdev, bool val) 590 { 591 return 0; 592 } 593 594 static inline 595 void dp_monitor_set_atf_stats_enable(struct dp_pdev *pdev, bool value) 596 { 597 } 598 599 static inline 600 void dp_monitor_set_bsscolor(struct dp_pdev *pdev, uint8_t bsscolor) 601 { 602 } 603 604 static inline 605 bool dp_monitor_pdev_get_filter_mcast_data(struct cdp_pdev *pdev_handle) 606 { 607 return false; 608 } 609 610 static inline 611 bool dp_monitor_pdev_get_filter_non_data(struct cdp_pdev *pdev_handle) 612 { 613 return false; 614 } 615 616 static inline 617 bool dp_monitor_pdev_get_filter_ucast_data(struct cdp_pdev *pdev_handle) 618 { 619 return false; 620 } 621 622 static inline 623 int dp_monitor_set_pktlog_wifi3(struct dp_pdev *pdev, uint32_t event, 624 bool enable) 625 { 626 return 0; 627 } 628 629 static inline void dp_monitor_pktlogmod_exit(struct dp_pdev *pdev) 630 { 631 } 632 633 static inline 634 QDF_STATUS dp_monitor_vdev_set_monitor_mode_buf_rings(struct dp_pdev *pdev) 635 { 636 return QDF_STATUS_E_FAILURE; 637 } 638 639 static inline 640 void dp_monitor_neighbour_peers_detach(struct dp_pdev *pdev) 641 { 642 } 643 644 static inline QDF_STATUS dp_monitor_filter_neighbour_peer(struct dp_pdev *pdev, 645 uint8_t *rx_pkt_hdr) 646 { 647 return QDF_STATUS_E_FAILURE; 648 } 649 650 static inline void dp_monitor_print_pdev_tx_capture_stats(struct dp_pdev *pdev) 651 { 652 } 653 654 static inline 655 void dp_monitor_reap_timer_init(struct dp_soc *soc) 656 { 657 } 658 659 static inline 660 void dp_monitor_reap_timer_deinit(struct dp_soc *soc) 661 { 662 } 663 664 static inline 665 bool dp_monitor_reap_timer_start(struct dp_soc *soc, 666 enum cdp_mon_reap_source source) 667 { 668 return false; 669 } 670 671 static inline 672 bool dp_monitor_reap_timer_stop(struct dp_soc *soc, 673 enum cdp_mon_reap_source source) 674 { 675 return false; 676 } 677 678 static inline void 679 dp_monitor_reap_timer_suspend(struct dp_soc *soc) 680 { 681 } 682 683 static inline 684 void dp_monitor_vdev_timer_init(struct dp_soc *soc) 685 { 686 } 687 688 static inline 689 void dp_monitor_vdev_timer_deinit(struct dp_soc *soc) 690 { 691 } 692 693 static inline 694 void dp_monitor_vdev_timer_start(struct dp_soc *soc) 695 { 696 } 697 698 static inline 699 bool dp_monitor_vdev_timer_stop(struct dp_soc *soc) 700 { 701 return false; 702 } 703 704 static inline struct qdf_mem_multi_page_t* 705 dp_monitor_get_link_desc_pages(struct dp_soc *soc, uint32_t mac_id) 706 { 707 return NULL; 708 } 709 710 static inline struct dp_srng* 711 dp_monitor_get_link_desc_ring(struct dp_soc *soc, uint32_t mac_id) 712 { 713 return NULL; 714 } 715 716 static inline uint32_t 717 dp_monitor_get_num_link_desc_ring_entries(struct dp_soc *soc) 718 { 719 return 0; 720 } 721 722 static inline uint32_t * 723 dp_monitor_get_total_link_descs(struct dp_soc *soc, uint32_t mac_id) 724 { 725 return NULL; 726 } 727 728 static inline QDF_STATUS dp_monitor_drop_inv_peer_pkts(struct dp_vdev *vdev) 729 { 730 return QDF_STATUS_E_FAILURE; 731 } 732 733 static inline bool dp_is_enable_reap_timer_non_pkt(struct dp_pdev *pdev) 734 { 735 return false; 736 } 737 738 static inline void dp_monitor_vdev_register_osif(struct dp_vdev *vdev, 739 struct ol_txrx_ops *txrx_ops) 740 { 741 } 742 743 static inline bool dp_monitor_is_vdev_timer_running(struct dp_soc *soc) 744 { 745 return false; 746 } 747 748 static inline 749 void dp_monitor_pdev_set_mon_vdev(struct dp_vdev *vdev) 750 { 751 } 752 753 static inline void dp_monitor_vdev_delete(struct dp_soc *soc, 754 struct dp_vdev *vdev) 755 { 756 } 757 758 static inline void dp_peer_ppdu_delayed_ba_init(struct dp_peer *peer) 759 { 760 } 761 762 static inline void dp_monitor_neighbour_peer_add_ast(struct dp_pdev *pdev, 763 struct dp_peer *ta_peer, 764 uint8_t *mac_addr, 765 qdf_nbuf_t nbuf, 766 uint32_t flags) 767 { 768 } 769 770 static inline void 771 dp_monitor_set_chan_band(struct dp_pdev *pdev, enum reg_wifi_band chan_band) 772 { 773 } 774 775 static inline void 776 dp_monitor_set_chan_freq(struct dp_pdev *pdev, qdf_freq_t chan_freq) 777 { 778 } 779 780 static inline void dp_monitor_set_chan_num(struct dp_pdev *pdev, int chan_num) 781 { 782 } 783 784 static inline bool dp_monitor_is_enable_mcopy_mode(struct dp_pdev *pdev) 785 { 786 return false; 787 } 788 789 static inline 790 void dp_monitor_neighbour_peer_list_remove(struct dp_pdev *pdev, 791 struct dp_vdev *vdev, 792 struct dp_neighbour_peer *peer) 793 { 794 } 795 796 static inline bool dp_monitor_is_chan_band_known(struct dp_pdev *pdev) 797 { 798 return false; 799 } 800 801 static inline enum reg_wifi_band 802 dp_monitor_get_chan_band(struct dp_pdev *pdev) 803 { 804 return 0; 805 } 806 807 static inline int 808 dp_monitor_get_chan_num(struct dp_pdev *pdev) 809 { 810 return 0; 811 } 812 813 static inline qdf_freq_t 814 dp_monitor_get_chan_freq(struct dp_pdev *pdev) 815 { 816 return 0; 817 } 818 819 static inline void dp_monitor_get_mpdu_status(struct dp_pdev *pdev, 820 struct dp_soc *soc, 821 uint8_t *rx_tlv_hdr) 822 { 823 } 824 825 static inline void dp_monitor_print_tx_stats(struct dp_pdev *pdev) 826 { 827 } 828 829 static inline 830 QDF_STATUS dp_monitor_mcopy_check_deliver(struct dp_pdev *pdev, 831 uint16_t peer_id, uint32_t ppdu_id, 832 uint8_t first_msdu) 833 { 834 return QDF_STATUS_SUCCESS; 835 } 836 837 static inline bool dp_monitor_is_enable_tx_sniffer(struct dp_pdev *pdev) 838 { 839 return false; 840 } 841 842 static inline struct dp_vdev* 843 dp_monitor_get_monitor_vdev_from_pdev(struct dp_pdev *pdev) 844 { 845 return NULL; 846 } 847 848 static inline QDF_STATUS dp_monitor_check_com_info_ppdu_id(struct dp_pdev *pdev, 849 void *rx_desc) 850 { 851 return QDF_STATUS_E_FAILURE; 852 } 853 854 static inline struct mon_rx_status* 855 dp_monitor_get_rx_status(struct dp_pdev *pdev) 856 { 857 return NULL; 858 } 859 860 static inline 861 void dp_monitor_pdev_config_scan_spcl_vap(struct dp_pdev *pdev, bool val) 862 { 863 } 864 865 static inline 866 void dp_monitor_pdev_reset_scan_spcl_vap_stats_enable(struct dp_pdev *pdev, 867 bool val) 868 { 869 } 870 871 static inline QDF_STATUS 872 dp_monitor_peer_tx_capture_get_stats(struct dp_soc *soc, struct dp_peer *peer, 873 struct cdp_peer_tx_capture_stats *stats) 874 { 875 return QDF_STATUS_E_FAILURE; 876 } 877 878 static inline QDF_STATUS 879 dp_monitor_pdev_tx_capture_get_stats(struct dp_soc *soc, struct dp_pdev *pdev, 880 struct cdp_pdev_tx_capture_stats *stats) 881 { 882 return QDF_STATUS_E_FAILURE; 883 } 884 885 #ifdef DP_POWER_SAVE 886 static inline 887 void dp_monitor_pktlog_reap_pending_frames(struct dp_pdev *pdev) 888 { 889 } 890 891 static inline 892 void dp_monitor_pktlog_start_reap_timer(struct dp_pdev *pdev) 893 { 894 } 895 #endif 896 897 static inline bool dp_monitor_is_configured(struct dp_pdev *pdev) 898 { 899 return false; 900 } 901 902 static inline void 903 dp_mon_rx_hdr_length_set(struct dp_soc *soc, uint32_t *msg_word, 904 struct htt_rx_ring_tlv_filter *tlv_filter) 905 { 906 } 907 908 static inline void dp_monitor_soc_init(struct dp_soc *soc) 909 { 910 } 911 912 static inline void dp_monitor_soc_deinit(struct dp_soc *soc) 913 { 914 } 915 916 static inline 917 QDF_STATUS dp_monitor_config_undecoded_metadata_capture(struct dp_pdev *pdev, 918 int val) 919 { 920 return QDF_STATUS_SUCCESS; 921 } 922 923 static inline QDF_STATUS 924 dp_monitor_config_undecoded_metadata_phyrx_error_mask(struct dp_pdev *pdev, 925 int mask1, int mask2) 926 { 927 return QDF_STATUS_SUCCESS; 928 } 929 930 static inline QDF_STATUS 931 dp_monitor_get_undecoded_metadata_phyrx_error_mask(struct dp_pdev *pdev, 932 int *mask, int *mask_cont) 933 { 934 return QDF_STATUS_SUCCESS; 935 } 936 937 static inline QDF_STATUS dp_monitor_soc_htt_srng_setup(struct dp_soc *soc) 938 { 939 return QDF_STATUS_E_FAILURE; 940 } 941 942 static inline bool dp_is_monitor_mode_using_poll(struct dp_soc *soc) 943 { 944 return false; 945 } 946 947 static inline 948 uint32_t dp_tx_mon_buf_refill(struct dp_intr *int_ctx) 949 { 950 return 0; 951 } 952 953 static inline uint32_t 954 dp_tx_mon_process(struct dp_soc *soc, struct dp_intr *int_ctx, 955 uint32_t mac_id, uint32_t quota) 956 { 957 return 0; 958 } 959 960 static inline uint32_t 961 dp_print_txmon_ring_stat_from_hal(struct dp_pdev *pdev) 962 { 963 return 0; 964 } 965 966 static inline 967 uint32_t dp_rx_mon_buf_refill(struct dp_intr *int_ctx) 968 { 969 return 0; 970 } 971 972 static inline bool dp_monitor_is_tx_cap_enabled(struct dp_peer *peer) 973 { 974 return 0; 975 } 976 977 static inline bool dp_monitor_is_rx_cap_enabled(struct dp_peer *peer) 978 { 979 return 0; 980 } 981 982 static inline void 983 dp_rx_mon_enable(struct dp_soc *soc, uint32_t *msg_word, 984 struct htt_rx_ring_tlv_filter *tlv_filter) 985 { 986 } 987 988 static inline void 989 dp_mon_rx_packet_length_set(struct dp_soc *soc, uint32_t *msg_word, 990 struct htt_rx_ring_tlv_filter *tlv_filter) 991 { 992 } 993 994 static inline void 995 dp_mon_rx_enable_mpdu_logging(struct dp_soc *soc, uint32_t *msg_word, 996 struct htt_rx_ring_tlv_filter *tlv_filter) 997 { 998 } 999 1000 static inline void 1001 dp_mon_rx_wmask_subscribe(struct dp_soc *soc, 1002 uint32_t *msg_word, int pdev_id, 1003 struct htt_rx_ring_tlv_filter *tlv_filter) 1004 { 1005 } 1006 1007 static inline void 1008 dp_mon_rx_mac_filter_set(struct dp_soc *soc, uint32_t *msg_word, 1009 struct htt_rx_ring_tlv_filter *tlv_filter) 1010 { 1011 } 1012 1013 static inline void 1014 dp_mon_rx_enable_pkt_tlv_offset(struct dp_soc *soc, uint32_t *msg_word, 1015 struct htt_rx_ring_tlv_filter *tlv_filter) 1016 { 1017 } 1018 1019 static inline void 1020 dp_mon_rx_enable_fpmo(struct dp_soc *soc, uint32_t *msg_word, 1021 struct htt_rx_ring_tlv_filter *tlv_filter) 1022 { 1023 } 1024 1025 #ifdef WLAN_CONFIG_TELEMETRY_AGENT 1026 static inline 1027 void dp_monitor_peer_telemetry_stats(struct dp_peer *peer, 1028 struct cdp_peer_telemetry_stats *stats) 1029 { 1030 } 1031 1032 static inline 1033 void dp_monitor_peer_deter_stats(struct dp_peer *peer, 1034 struct cdp_peer_telemetry_stats *stats) 1035 { 1036 } 1037 #endif /* WLAN_CONFIG_TELEMETRY_AGENT */ 1038 #endif /* !WIFI_MONITOR_SUPPORT */ 1039 1040 /** 1041 * cdp_soc_t_to_dp_soc() - typecast cdp_soc_t to 1042 * dp soc handle 1043 * @psoc: CDP psoc handle 1044 * 1045 * Return: struct dp_soc pointer 1046 */ 1047 static inline 1048 struct dp_soc *cdp_soc_t_to_dp_soc(struct cdp_soc_t *psoc) 1049 { 1050 return (struct dp_soc *)psoc; 1051 } 1052 1053 #define DP_MAX_TIMER_EXEC_TIME_TICKS \ 1054 (QDF_LOG_TIMESTAMP_CYCLES_PER_10_US * 100 * 20) 1055 1056 /** 1057 * enum timer_yield_status - yield status code used in monitor mode timer. 1058 * @DP_TIMER_NO_YIELD: do not yield 1059 * @DP_TIMER_WORK_DONE: yield because work is done 1060 * @DP_TIMER_WORK_EXHAUST: yield because work quota is exhausted 1061 * @DP_TIMER_TIME_EXHAUST: yield due to time slot exhausted 1062 */ 1063 enum timer_yield_status { 1064 DP_TIMER_NO_YIELD, 1065 DP_TIMER_WORK_DONE, 1066 DP_TIMER_WORK_EXHAUST, 1067 DP_TIMER_TIME_EXHAUST, 1068 }; 1069 1070 #if DP_PRINT_ENABLE 1071 #include <qdf_types.h> /* qdf_vprint */ 1072 #include <cdp_txrx_handle.h> 1073 1074 enum { 1075 /* FATAL_ERR - print only irrecoverable error messages */ 1076 DP_PRINT_LEVEL_FATAL_ERR, 1077 1078 /* ERR - include non-fatal err messages */ 1079 DP_PRINT_LEVEL_ERR, 1080 1081 /* WARN - include warnings */ 1082 DP_PRINT_LEVEL_WARN, 1083 1084 /* INFO1 - include fundamental, infrequent events */ 1085 DP_PRINT_LEVEL_INFO1, 1086 1087 /* INFO2 - include non-fundamental but infrequent events */ 1088 DP_PRINT_LEVEL_INFO2, 1089 }; 1090 1091 #define dp_print(level, fmt, ...) do { \ 1092 if (level <= g_txrx_print_level) \ 1093 qdf_print(fmt, ## __VA_ARGS__); \ 1094 while (0) 1095 #define DP_PRINT(level, fmt, ...) do { \ 1096 dp_print(level, "DP: " fmt, ## __VA_ARGS__); \ 1097 while (0) 1098 #else 1099 #define DP_PRINT(level, fmt, ...) 1100 #endif /* DP_PRINT_ENABLE */ 1101 1102 #define DP_TRACE(LVL, fmt, args ...) \ 1103 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_##LVL, \ 1104 fmt, ## args) 1105 1106 #ifdef WLAN_SYSFS_DP_STATS 1107 void DP_PRINT_STATS(const char *fmt, ...); 1108 #else /* WLAN_SYSFS_DP_STATS */ 1109 #ifdef DP_PRINT_NO_CONSOLE 1110 /* Stat prints should not go to console or kernel logs.*/ 1111 #define DP_PRINT_STATS(fmt, args ...)\ 1112 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH, \ 1113 fmt, ## args) 1114 #else 1115 #define DP_PRINT_STATS(fmt, args ...)\ 1116 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,\ 1117 fmt, ## args) 1118 #endif 1119 #endif /* WLAN_SYSFS_DP_STATS */ 1120 1121 #define DP_STATS_INIT(_handle) \ 1122 qdf_mem_zero(&((_handle)->stats), sizeof((_handle)->stats)) 1123 1124 #define DP_TXRX_PEER_STATS_INIT(_handle, size) \ 1125 qdf_mem_zero(&((_handle)->stats[0]), size) 1126 1127 #define DP_STATS_CLR(_handle) \ 1128 qdf_mem_zero(&((_handle)->stats), sizeof((_handle)->stats)) 1129 1130 #define DP_TXRX_PEER_STATS_CLR(_handle, size) \ 1131 qdf_mem_zero(&((_handle)->stats[0]), size) 1132 1133 #ifndef DISABLE_DP_STATS 1134 #define DP_STATS_INC(_handle, _field, _delta) \ 1135 { \ 1136 if (likely(_handle)) \ 1137 _handle->stats._field += _delta; \ 1138 } 1139 1140 #define DP_PEER_LINK_STATS_INC(_handle, _field, _delta, _link) \ 1141 { \ 1142 if (likely(_handle)) \ 1143 _handle->stats[_link]._field += _delta; \ 1144 } 1145 1146 #define DP_PEER_STATS_FLAT_INC(_handle, _field, _delta) \ 1147 { \ 1148 if (likely(_handle)) \ 1149 _handle->_field += _delta; \ 1150 } 1151 1152 #define DP_STATS_INCC(_handle, _field, _delta, _cond) \ 1153 { \ 1154 if (_cond && likely(_handle)) \ 1155 _handle->stats._field += _delta; \ 1156 } 1157 1158 #define DP_PEER_LINK_STATS_INCC(_handle, _field, _delta, _cond, _link) \ 1159 { \ 1160 if (_cond && likely(_handle)) \ 1161 _handle->stats[_link]._field += _delta; \ 1162 } 1163 1164 #define DP_STATS_DEC(_handle, _field, _delta) \ 1165 { \ 1166 if (likely(_handle)) \ 1167 _handle->stats._field -= _delta; \ 1168 } 1169 1170 #define DP_PEER_STATS_FLAT_DEC(_handle, _field, _delta) \ 1171 { \ 1172 if (likely(_handle)) \ 1173 _handle->_field -= _delta; \ 1174 } 1175 1176 #define DP_STATS_UPD(_handle, _field, _delta) \ 1177 { \ 1178 if (likely(_handle)) \ 1179 _handle->stats._field = _delta; \ 1180 } 1181 1182 #define DP_PEER_LINK_STATS_UPD(_handle, _field, _delta, _link) \ 1183 { \ 1184 if (likely(_handle)) \ 1185 _handle->stats[_link]._field = _delta; \ 1186 } 1187 1188 #define DP_STATS_INC_PKT(_handle, _field, _count, _bytes) \ 1189 { \ 1190 DP_STATS_INC(_handle, _field.num, _count); \ 1191 DP_STATS_INC(_handle, _field.bytes, _bytes) \ 1192 } 1193 1194 #define DP_PEER_STATS_FLAT_INC_PKT(_handle, _field, _count, _bytes) \ 1195 { \ 1196 DP_PEER_STATS_FLAT_INC(_handle, _field.num, _count); \ 1197 DP_PEER_STATS_FLAT_INC(_handle, _field.bytes, _bytes) \ 1198 } 1199 1200 #define DP_STATS_INCC_PKT(_handle, _field, _count, _bytes, _cond) \ 1201 { \ 1202 DP_STATS_INCC(_handle, _field.num, _count, _cond); \ 1203 DP_STATS_INCC(_handle, _field.bytes, _bytes, _cond) \ 1204 } 1205 1206 #define DP_STATS_AGGR(_handle_a, _handle_b, _field) \ 1207 { \ 1208 _handle_a->stats._field += _handle_b->stats._field; \ 1209 } 1210 1211 #define DP_STATS_AGGR_PKT(_handle_a, _handle_b, _field) \ 1212 { \ 1213 DP_STATS_AGGR(_handle_a, _handle_b, _field.num); \ 1214 DP_STATS_AGGR(_handle_a, _handle_b, _field.bytes);\ 1215 } 1216 1217 #define DP_STATS_UPD_STRUCT(_handle_a, _handle_b, _field) \ 1218 { \ 1219 _handle_a->stats._field = _handle_b->stats._field; \ 1220 } 1221 1222 #else 1223 #define DP_STATS_INC(_handle, _field, _delta) 1224 #define DP_PEER_LINK_STATS_INC(_handle, _field, _delta, _link) 1225 #define DP_PEER_STATS_FLAT_INC(_handle, _field, _delta) 1226 #define DP_STATS_INCC(_handle, _field, _delta, _cond) 1227 #define DP_PEER_LINK_STATS_INCC(_handle, _field, _delta, _cond, _link) 1228 #define DP_STATS_DEC(_handle, _field, _delta) 1229 #define DP_PEER_STATS_FLAT_DEC(_handle, _field, _delta) 1230 #define DP_STATS_UPD(_handle, _field, _delta) 1231 #define DP_PEER_LINK_STATS_UPD(_handle, _field, _delta, _link) 1232 #define DP_STATS_INC_PKT(_handle, _field, _count, _bytes) 1233 #define DP_PEER_STATS_FLAT_INC_PKT(_handle, _field, _count, _bytes) 1234 #define DP_STATS_INCC_PKT(_handle, _field, _count, _bytes, _cond) 1235 #define DP_STATS_AGGR(_handle_a, _handle_b, _field) 1236 #define DP_STATS_AGGR_PKT(_handle_a, _handle_b, _field) 1237 #endif 1238 1239 #define DP_PEER_PER_PKT_STATS_INC(_handle, _field, _delta, _link) \ 1240 { \ 1241 DP_PEER_LINK_STATS_INC(_handle, per_pkt_stats._field, _delta, _link); \ 1242 } 1243 1244 #define DP_PEER_PER_PKT_STATS_INCC(_handle, _field, _delta, _cond, _link) \ 1245 { \ 1246 DP_PEER_LINK_STATS_INCC(_handle, per_pkt_stats._field, _delta, _cond, _link); \ 1247 } 1248 1249 #define DP_PEER_PER_PKT_STATS_INC_PKT(_handle, _field, _count, _bytes, _link) \ 1250 { \ 1251 DP_PEER_PER_PKT_STATS_INC(_handle, _field.num, _count, _link); \ 1252 DP_PEER_PER_PKT_STATS_INC(_handle, _field.bytes, _bytes, _link) \ 1253 } 1254 1255 #define DP_PEER_PER_PKT_STATS_INCC_PKT(_handle, _field, _count, _bytes, _cond, _link) \ 1256 { \ 1257 DP_PEER_PER_PKT_STATS_INCC(_handle, _field.num, _count, _cond, _link); \ 1258 DP_PEER_PER_PKT_STATS_INCC(_handle, _field.bytes, _bytes, _cond, _link) \ 1259 } 1260 1261 #define DP_PEER_PER_PKT_STATS_UPD(_handle, _field, _delta, _link) \ 1262 { \ 1263 DP_PEER_LINK_STATS_UPD(_handle, per_pkt_stats._field, _delta, _link); \ 1264 } 1265 1266 #ifndef QCA_ENHANCED_STATS_SUPPORT 1267 #define DP_PEER_EXTD_STATS_INC(_handle, _field, _delta, _link) \ 1268 { \ 1269 DP_PEER_LINK_STATS_INC(_handle, extd_stats._field, _delta, _link); \ 1270 } 1271 1272 #define DP_PEER_EXTD_STATS_INCC(_handle, _field, _delta, _cond, _link) \ 1273 { \ 1274 DP_PEER_LINK_STATS_INCC(_handle, extd_stats._field, _delta, _cond, _link); \ 1275 } 1276 1277 #define DP_PEER_EXTD_STATS_UPD(_handle, _field, _delta, _link) \ 1278 { \ 1279 DP_PEER_LINK_STATS_UPD(_handle, extd_stats._field, _delta, _link); \ 1280 } 1281 #endif 1282 1283 #if defined(QCA_VDEV_STATS_HW_OFFLOAD_SUPPORT) && \ 1284 defined(QCA_ENHANCED_STATS_SUPPORT) 1285 #define DP_PEER_TO_STACK_INCC_PKT(_handle, _count, _bytes, _cond) \ 1286 { \ 1287 if (_cond || !(_handle->hw_txrx_stats_en)) \ 1288 DP_PEER_STATS_FLAT_INC_PKT(_handle, to_stack, _count, _bytes); \ 1289 } 1290 1291 #define DP_PEER_TO_STACK_DECC(_handle, _count, _cond) \ 1292 { \ 1293 if (_cond || !(_handle->hw_txrx_stats_en)) \ 1294 DP_PEER_STATS_FLAT_DEC(_handle, to_stack.num, _count); \ 1295 } 1296 1297 #define DP_PEER_MC_INCC_PKT(_handle, _count, _bytes, _cond, _link) \ 1298 { \ 1299 if (_cond || !(_handle->hw_txrx_stats_en)) \ 1300 DP_PEER_PER_PKT_STATS_INC_PKT(_handle, rx.multicast, _count, _bytes, _link); \ 1301 } 1302 1303 #define DP_PEER_BC_INCC_PKT(_handle, _count, _bytes, _cond, _link) \ 1304 { \ 1305 if (_cond || !(_handle->hw_txrx_stats_en)) \ 1306 DP_PEER_PER_PKT_STATS_INC_PKT(_handle, rx.bcast, _count, _bytes, _link); \ 1307 } 1308 1309 #define DP_PEER_UC_INCC_PKT(_handle, _count, _bytes, _cond, _link) \ 1310 { \ 1311 if (_cond || !(_handle->hw_txrx_stats_en)) \ 1312 DP_PEER_PER_PKT_STATS_INC_PKT(_handle, rx.unicast, _count, _bytes, _link); \ 1313 } 1314 #elif defined(QCA_VDEV_STATS_HW_OFFLOAD_SUPPORT) 1315 #define DP_PEER_TO_STACK_INCC_PKT(_handle, _count, _bytes, _cond) \ 1316 { \ 1317 if (!(_handle->hw_txrx_stats_en)) \ 1318 DP_PEER_STATS_FLAT_INC_PKT(_handle, to_stack, _count, _bytes); \ 1319 } 1320 1321 #define DP_PEER_TO_STACK_DECC(_handle, _count, _cond) \ 1322 { \ 1323 if (!(_handle->hw_txrx_stats_en)) \ 1324 DP_PEER_STATS_FLAT_DEC(_handle, to_stack.num, _count); \ 1325 } 1326 1327 #define DP_PEER_MC_INCC_PKT(_handle, _count, _bytes, _cond, _link) \ 1328 { \ 1329 if (!(_handle->hw_txrx_stats_en)) \ 1330 DP_PEER_PER_PKT_STATS_INC_PKT(_handle, rx.multicast, _count, _bytes, _link); \ 1331 } 1332 1333 #define DP_PEER_BC_INCC_PKT(_handle, _count, _bytes, _cond, _link) \ 1334 { \ 1335 if (!(_handle->hw_txrx_stats_en)) \ 1336 DP_PEER_PER_PKT_STATS_INC_PKT(_handle, rx.bcast, _count, _bytes, _link); \ 1337 } 1338 1339 #define DP_PEER_UC_INCC_PKT(_handle, _count, _bytes, _cond, _link) \ 1340 { \ 1341 if (!(_handle->hw_txrx_stats_en)) \ 1342 DP_PEER_PER_PKT_STATS_INC_PKT(_handle, rx.unicast, _count, _bytes, _link); \ 1343 } 1344 #else 1345 #define DP_PEER_TO_STACK_INCC_PKT(_handle, _count, _bytes, _cond) \ 1346 DP_PEER_STATS_FLAT_INC_PKT(_handle, to_stack, _count, _bytes); 1347 1348 #define DP_PEER_TO_STACK_DECC(_handle, _count, _cond) \ 1349 DP_PEER_STATS_FLAT_DEC(_handle, to_stack.num, _count); 1350 1351 #define DP_PEER_MC_INCC_PKT(_handle, _count, _bytes, _cond, _link) \ 1352 DP_PEER_PER_PKT_STATS_INC_PKT(_handle, rx.multicast, _count, _bytes, _link); 1353 1354 #define DP_PEER_BC_INCC_PKT(_handle, _count, _bytes, _cond, _link) \ 1355 DP_PEER_PER_PKT_STATS_INC_PKT(_handle, rx.bcast, _count, _bytes, _link); 1356 1357 #define DP_PEER_UC_INCC_PKT(_handle, _count, _bytes, _cond, _link) \ 1358 DP_PEER_PER_PKT_STATS_INC_PKT(_handle, rx.unicast, _count, _bytes, _link); 1359 #endif 1360 1361 #ifdef ENABLE_DP_HIST_STATS 1362 #define DP_HIST_INIT() \ 1363 uint32_t num_of_packets[MAX_PDEV_CNT] = {0}; 1364 1365 #define DP_HIST_PACKET_COUNT_INC(_pdev_id) \ 1366 { \ 1367 ++num_of_packets[_pdev_id]; \ 1368 } 1369 1370 #define DP_TX_HISTOGRAM_UPDATE(_pdev, _p_cntrs) \ 1371 do { \ 1372 if (_p_cntrs == 1) { \ 1373 DP_STATS_INC(_pdev, \ 1374 tx_comp_histogram.pkts_1, 1); \ 1375 } else if (_p_cntrs > 1 && _p_cntrs <= 20) { \ 1376 DP_STATS_INC(_pdev, \ 1377 tx_comp_histogram.pkts_2_20, 1); \ 1378 } else if (_p_cntrs > 20 && _p_cntrs <= 40) { \ 1379 DP_STATS_INC(_pdev, \ 1380 tx_comp_histogram.pkts_21_40, 1); \ 1381 } else if (_p_cntrs > 40 && _p_cntrs <= 60) { \ 1382 DP_STATS_INC(_pdev, \ 1383 tx_comp_histogram.pkts_41_60, 1); \ 1384 } else if (_p_cntrs > 60 && _p_cntrs <= 80) { \ 1385 DP_STATS_INC(_pdev, \ 1386 tx_comp_histogram.pkts_61_80, 1); \ 1387 } else if (_p_cntrs > 80 && _p_cntrs <= 100) { \ 1388 DP_STATS_INC(_pdev, \ 1389 tx_comp_histogram.pkts_81_100, 1); \ 1390 } else if (_p_cntrs > 100 && _p_cntrs <= 200) { \ 1391 DP_STATS_INC(_pdev, \ 1392 tx_comp_histogram.pkts_101_200, 1); \ 1393 } else if (_p_cntrs > 200) { \ 1394 DP_STATS_INC(_pdev, \ 1395 tx_comp_histogram.pkts_201_plus, 1); \ 1396 } \ 1397 } while (0) 1398 1399 #define DP_RX_HISTOGRAM_UPDATE(_pdev, _p_cntrs) \ 1400 do { \ 1401 if (_p_cntrs == 1) { \ 1402 DP_STATS_INC(_pdev, \ 1403 rx_ind_histogram.pkts_1, 1); \ 1404 } else if (_p_cntrs > 1 && _p_cntrs <= 20) { \ 1405 DP_STATS_INC(_pdev, \ 1406 rx_ind_histogram.pkts_2_20, 1); \ 1407 } else if (_p_cntrs > 20 && _p_cntrs <= 40) { \ 1408 DP_STATS_INC(_pdev, \ 1409 rx_ind_histogram.pkts_21_40, 1); \ 1410 } else if (_p_cntrs > 40 && _p_cntrs <= 60) { \ 1411 DP_STATS_INC(_pdev, \ 1412 rx_ind_histogram.pkts_41_60, 1); \ 1413 } else if (_p_cntrs > 60 && _p_cntrs <= 80) { \ 1414 DP_STATS_INC(_pdev, \ 1415 rx_ind_histogram.pkts_61_80, 1); \ 1416 } else if (_p_cntrs > 80 && _p_cntrs <= 100) { \ 1417 DP_STATS_INC(_pdev, \ 1418 rx_ind_histogram.pkts_81_100, 1); \ 1419 } else if (_p_cntrs > 100 && _p_cntrs <= 200) { \ 1420 DP_STATS_INC(_pdev, \ 1421 rx_ind_histogram.pkts_101_200, 1); \ 1422 } else if (_p_cntrs > 200) { \ 1423 DP_STATS_INC(_pdev, \ 1424 rx_ind_histogram.pkts_201_plus, 1); \ 1425 } \ 1426 } while (0) 1427 1428 #define DP_TX_HIST_STATS_PER_PDEV() \ 1429 do { \ 1430 uint8_t hist_stats = 0; \ 1431 for (hist_stats = 0; hist_stats < soc->pdev_count; \ 1432 hist_stats++) { \ 1433 DP_TX_HISTOGRAM_UPDATE(soc->pdev_list[hist_stats], \ 1434 num_of_packets[hist_stats]); \ 1435 } \ 1436 } while (0) 1437 1438 1439 #define DP_RX_HIST_STATS_PER_PDEV() \ 1440 do { \ 1441 uint8_t hist_stats = 0; \ 1442 for (hist_stats = 0; hist_stats < soc->pdev_count; \ 1443 hist_stats++) { \ 1444 DP_RX_HISTOGRAM_UPDATE(soc->pdev_list[hist_stats], \ 1445 num_of_packets[hist_stats]); \ 1446 } \ 1447 } while (0) 1448 1449 #else 1450 #define DP_HIST_INIT() 1451 #define DP_HIST_PACKET_COUNT_INC(_pdev_id) 1452 #define DP_TX_HISTOGRAM_UPDATE(_pdev, _p_cntrs) 1453 #define DP_RX_HISTOGRAM_UPDATE(_pdev, _p_cntrs) 1454 #define DP_RX_HIST_STATS_PER_PDEV() 1455 #define DP_TX_HIST_STATS_PER_PDEV() 1456 #endif /* DISABLE_DP_STATS */ 1457 1458 #define FRAME_MASK_IPV4_ARP 1 1459 #define FRAME_MASK_IPV4_DHCP 2 1460 #define FRAME_MASK_IPV4_EAPOL 4 1461 #define FRAME_MASK_IPV6_DHCP 8 1462 1463 static inline int dp_log2_ceil(unsigned int value) 1464 { 1465 unsigned int tmp = value; 1466 int log2 = -1; 1467 1468 if (qdf_unlikely(value == 0)) 1469 return 0; 1470 while (tmp) { 1471 log2++; 1472 tmp >>= 1; 1473 } 1474 if (1 << log2 != value) 1475 log2++; 1476 return log2; 1477 } 1478 1479 #ifdef QCA_SUPPORT_PEER_ISOLATION 1480 #define dp_get_peer_isolation(_peer) ((_peer)->isolation) 1481 1482 static inline void dp_set_peer_isolation(struct dp_txrx_peer *txrx_peer, 1483 bool val) 1484 { 1485 txrx_peer->isolation = val; 1486 } 1487 1488 #else 1489 #define dp_get_peer_isolation(_peer) (0) 1490 1491 static inline void dp_set_peer_isolation(struct dp_txrx_peer *peer, bool val) 1492 { 1493 } 1494 #endif /* QCA_SUPPORT_PEER_ISOLATION */ 1495 1496 bool dp_vdev_is_wds_ext_enabled(struct dp_vdev *vdev); 1497 1498 #ifdef QCA_SUPPORT_WDS_EXTENDED 1499 static inline void dp_wds_ext_peer_init(struct dp_txrx_peer *txrx_peer) 1500 { 1501 txrx_peer->wds_ext.osif_peer = NULL; 1502 txrx_peer->wds_ext.init = 0; 1503 } 1504 #else 1505 static inline void dp_wds_ext_peer_init(struct dp_txrx_peer *txrx_peer) 1506 { 1507 } 1508 #endif /* QCA_SUPPORT_WDS_EXTENDED */ 1509 1510 #ifdef QCA_HOST2FW_RXBUF_RING 1511 static inline 1512 struct dp_srng *dp_get_rxdma_ring(struct dp_pdev *pdev, int lmac_id) 1513 { 1514 return &pdev->rx_mac_buf_ring[lmac_id]; 1515 } 1516 #else 1517 static inline 1518 struct dp_srng *dp_get_rxdma_ring(struct dp_pdev *pdev, int lmac_id) 1519 { 1520 return &pdev->soc->rx_refill_buf_ring[lmac_id]; 1521 } 1522 #endif 1523 1524 /* 1525 * The lmac ID for a particular channel band is fixed. 1526 * 2.4GHz band uses lmac_id = 1 1527 * 5GHz/6GHz band uses lmac_id=0 1528 */ 1529 #define DP_INVALID_LMAC_ID (-1) 1530 #define DP_MON_INVALID_LMAC_ID (-1) 1531 #define DP_MAC0_LMAC_ID 0 1532 #define DP_MAC1_LMAC_ID 1 1533 1534 #ifdef FEATURE_TSO_STATS 1535 /** 1536 * dp_init_tso_stats() - Clear tso stats 1537 * @pdev: pdev handle 1538 * 1539 * Return: None 1540 */ 1541 static inline 1542 void dp_init_tso_stats(struct dp_pdev *pdev) 1543 { 1544 if (pdev) { 1545 qdf_mem_zero(&((pdev)->stats.tso_stats), 1546 sizeof((pdev)->stats.tso_stats)); 1547 qdf_atomic_init(&pdev->tso_idx); 1548 } 1549 } 1550 1551 /** 1552 * dp_stats_tso_segment_histogram_update() - TSO Segment Histogram 1553 * @pdev: pdev handle 1554 * @_p_cntrs: number of tso segments for a tso packet 1555 * 1556 * Return: None 1557 */ 1558 void dp_stats_tso_segment_histogram_update(struct dp_pdev *pdev, 1559 uint8_t _p_cntrs); 1560 1561 /** 1562 * dp_tso_segment_update() - Collect tso segment information 1563 * @pdev: pdev handle 1564 * @stats_idx: tso packet number 1565 * @idx: tso segment number 1566 * @seg: tso segment 1567 * 1568 * Return: None 1569 */ 1570 void dp_tso_segment_update(struct dp_pdev *pdev, 1571 uint32_t stats_idx, 1572 uint8_t idx, 1573 struct qdf_tso_seg_t seg); 1574 1575 /** 1576 * dp_tso_packet_update() - TSO Packet information 1577 * @pdev: pdev handle 1578 * @stats_idx: tso packet number 1579 * @msdu: nbuf handle 1580 * @num_segs: tso segments 1581 * 1582 * Return: None 1583 */ 1584 void dp_tso_packet_update(struct dp_pdev *pdev, uint32_t stats_idx, 1585 qdf_nbuf_t msdu, uint16_t num_segs); 1586 1587 /** 1588 * dp_tso_segment_stats_update() - TSO Segment stats 1589 * @pdev: pdev handle 1590 * @stats_seg: tso segment list 1591 * @stats_idx: tso packet number 1592 * 1593 * Return: None 1594 */ 1595 void dp_tso_segment_stats_update(struct dp_pdev *pdev, 1596 struct qdf_tso_seg_elem_t *stats_seg, 1597 uint32_t stats_idx); 1598 1599 /** 1600 * dp_print_tso_stats() - dump tso statistics 1601 * @soc:soc handle 1602 * @level: verbosity level 1603 * 1604 * Return: None 1605 */ 1606 void dp_print_tso_stats(struct dp_soc *soc, 1607 enum qdf_stats_verbosity_level level); 1608 1609 /** 1610 * dp_txrx_clear_tso_stats() - clear tso stats 1611 * @soc: soc handle 1612 * 1613 * Return: None 1614 */ 1615 void dp_txrx_clear_tso_stats(struct dp_soc *soc); 1616 #else 1617 static inline 1618 void dp_init_tso_stats(struct dp_pdev *pdev) 1619 { 1620 } 1621 1622 static inline 1623 void dp_stats_tso_segment_histogram_update(struct dp_pdev *pdev, 1624 uint8_t _p_cntrs) 1625 { 1626 } 1627 1628 static inline 1629 void dp_tso_segment_update(struct dp_pdev *pdev, 1630 uint32_t stats_idx, 1631 uint32_t idx, 1632 struct qdf_tso_seg_t seg) 1633 { 1634 } 1635 1636 static inline 1637 void dp_tso_packet_update(struct dp_pdev *pdev, uint32_t stats_idx, 1638 qdf_nbuf_t msdu, uint16_t num_segs) 1639 { 1640 } 1641 1642 static inline 1643 void dp_tso_segment_stats_update(struct dp_pdev *pdev, 1644 struct qdf_tso_seg_elem_t *stats_seg, 1645 uint32_t stats_idx) 1646 { 1647 } 1648 1649 static inline 1650 void dp_print_tso_stats(struct dp_soc *soc, 1651 enum qdf_stats_verbosity_level level) 1652 { 1653 } 1654 1655 static inline 1656 void dp_txrx_clear_tso_stats(struct dp_soc *soc) 1657 { 1658 } 1659 #endif /* FEATURE_TSO_STATS */ 1660 1661 /** 1662 * dp_txrx_get_peer_per_pkt_stats_param() - Get peer per pkt stats param 1663 * @peer: DP peer handle 1664 * @type: Requested stats type 1665 * @buf: Buffer to hold the value 1666 * 1667 * Return: status success/failure 1668 */ 1669 QDF_STATUS dp_txrx_get_peer_per_pkt_stats_param(struct dp_peer *peer, 1670 enum cdp_peer_stats_type type, 1671 cdp_peer_stats_param_t *buf); 1672 1673 /** 1674 * dp_txrx_get_peer_extd_stats_param() - Get peer extd stats param 1675 * @peer: DP peer handle 1676 * @type: Requested stats type 1677 * @buf: Buffer to hold the value 1678 * 1679 * Return: status success/failure 1680 */ 1681 QDF_STATUS dp_txrx_get_peer_extd_stats_param(struct dp_peer *peer, 1682 enum cdp_peer_stats_type type, 1683 cdp_peer_stats_param_t *buf); 1684 1685 #define DP_HTT_T2H_HP_PIPE 5 1686 /** 1687 * dp_update_pdev_stats(): Update the pdev stats 1688 * @tgtobj: pdev handle 1689 * @srcobj: vdev stats structure 1690 * 1691 * Update the pdev stats from the specified vdev stats 1692 * 1693 * Return: None 1694 */ 1695 void dp_update_pdev_stats(struct dp_pdev *tgtobj, 1696 struct cdp_vdev_stats *srcobj); 1697 1698 /** 1699 * dp_update_vdev_ingress_stats(): Update the vdev ingress stats 1700 * @tgtobj: vdev handle 1701 * 1702 * Update the vdev ingress stats 1703 * 1704 * Return: None 1705 */ 1706 void dp_update_vdev_ingress_stats(struct dp_vdev *tgtobj); 1707 1708 /** 1709 * dp_update_vdev_rate_stats() - Update the vdev rate stats 1710 * @tgtobj: tgt buffer for vdev stats 1711 * @srcobj: srcobj vdev stats 1712 * 1713 * Return: None 1714 */ 1715 void dp_update_vdev_rate_stats(struct cdp_vdev_stats *tgtobj, 1716 struct cdp_vdev_stats *srcobj); 1717 1718 /** 1719 * dp_update_pdev_ingress_stats(): Update the pdev ingress stats 1720 * @tgtobj: pdev handle 1721 * @srcobj: vdev stats structure 1722 * 1723 * Update the pdev ingress stats from the specified vdev stats 1724 * 1725 * Return: None 1726 */ 1727 void dp_update_pdev_ingress_stats(struct dp_pdev *tgtobj, 1728 struct dp_vdev *srcobj); 1729 1730 /** 1731 * dp_update_vdev_stats(): Update the vdev stats 1732 * @soc: soc handle 1733 * @srcobj: DP_PEER object 1734 * @arg: point to vdev stats structure 1735 * 1736 * Update the vdev stats from the specified peer stats 1737 * 1738 * Return: None 1739 */ 1740 void dp_update_vdev_stats(struct dp_soc *soc, 1741 struct dp_peer *srcobj, 1742 void *arg); 1743 1744 /** 1745 * dp_update_vdev_stats_on_peer_unmap() - Update the vdev stats on peer unmap 1746 * @vdev: DP_VDEV handle 1747 * @peer: DP_PEER handle 1748 * 1749 * Return: None 1750 */ 1751 void dp_update_vdev_stats_on_peer_unmap(struct dp_vdev *vdev, 1752 struct dp_peer *peer); 1753 1754 #ifdef IPA_OFFLOAD 1755 #define DP_IPA_UPDATE_RX_STATS(__tgtobj, __srcobj) \ 1756 { \ 1757 DP_STATS_AGGR_PKT(__tgtobj, __srcobj, rx.rx_total); \ 1758 } 1759 1760 #define DP_IPA_UPDATE_PER_PKT_RX_STATS(__tgtobj, __srcobj) \ 1761 { \ 1762 (__tgtobj)->rx.rx_total.num += (__srcobj)->rx.rx_total.num; \ 1763 (__tgtobj)->rx.rx_total.bytes += (__srcobj)->rx.rx_total.bytes; \ 1764 } 1765 #else 1766 #define DP_IPA_UPDATE_PER_PKT_RX_STATS(tgtobj, srcobj) \ 1767 1768 #define DP_IPA_UPDATE_RX_STATS(tgtobj, srcobj) 1769 #endif 1770 1771 #define DP_UPDATE_STATS(_tgtobj, _srcobj) \ 1772 do { \ 1773 uint8_t i; \ 1774 uint8_t pream_type; \ 1775 for (pream_type = 0; pream_type < DOT11_MAX; pream_type++) { \ 1776 for (i = 0; i < MAX_MCS; i++) { \ 1777 DP_STATS_AGGR(_tgtobj, _srcobj, \ 1778 tx.pkt_type[pream_type].mcs_count[i]); \ 1779 DP_STATS_AGGR(_tgtobj, _srcobj, \ 1780 rx.pkt_type[pream_type].mcs_count[i]); \ 1781 } \ 1782 } \ 1783 \ 1784 for (i = 0; i < MAX_BW; i++) { \ 1785 DP_STATS_AGGR(_tgtobj, _srcobj, tx.bw[i]); \ 1786 DP_STATS_AGGR(_tgtobj, _srcobj, rx.bw[i]); \ 1787 } \ 1788 \ 1789 for (i = 0; i < SS_COUNT; i++) { \ 1790 DP_STATS_AGGR(_tgtobj, _srcobj, rx.nss[i]); \ 1791 DP_STATS_AGGR(_tgtobj, _srcobj, tx.nss[i]); \ 1792 } \ 1793 for (i = 0; i < WME_AC_MAX; i++) { \ 1794 DP_STATS_AGGR(_tgtobj, _srcobj, tx.wme_ac_type[i]); \ 1795 DP_STATS_AGGR(_tgtobj, _srcobj, rx.wme_ac_type[i]); \ 1796 DP_STATS_AGGR(_tgtobj, _srcobj, \ 1797 tx.wme_ac_type_bytes[i]); \ 1798 DP_STATS_AGGR(_tgtobj, _srcobj, \ 1799 rx.wme_ac_type_bytes[i]); \ 1800 DP_STATS_AGGR(_tgtobj, _srcobj, tx.excess_retries_per_ac[i]); \ 1801 \ 1802 } \ 1803 \ 1804 for (i = 0; i < MAX_GI; i++) { \ 1805 DP_STATS_AGGR(_tgtobj, _srcobj, tx.sgi_count[i]); \ 1806 DP_STATS_AGGR(_tgtobj, _srcobj, rx.sgi_count[i]); \ 1807 } \ 1808 \ 1809 for (i = 0; i < MAX_RECEPTION_TYPES; i++) \ 1810 DP_STATS_AGGR(_tgtobj, _srcobj, rx.reception_type[i]); \ 1811 \ 1812 if (!wlan_cfg_get_vdev_stats_hw_offload_config(soc->wlan_cfg_ctx)) { \ 1813 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, tx.comp_pkt); \ 1814 DP_STATS_AGGR(_tgtobj, _srcobj, tx.tx_failed); \ 1815 } \ 1816 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, tx.ucast); \ 1817 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, tx.mcast); \ 1818 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, tx.bcast); \ 1819 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, tx.tx_success); \ 1820 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, tx.nawds_mcast); \ 1821 DP_STATS_AGGR(_tgtobj, _srcobj, tx.nawds_mcast_drop); \ 1822 DP_STATS_AGGR(_tgtobj, _srcobj, tx.ofdma); \ 1823 DP_STATS_AGGR(_tgtobj, _srcobj, tx.stbc); \ 1824 DP_STATS_AGGR(_tgtobj, _srcobj, tx.ldpc); \ 1825 DP_STATS_AGGR(_tgtobj, _srcobj, tx.retries); \ 1826 DP_STATS_AGGR(_tgtobj, _srcobj, tx.non_amsdu_cnt); \ 1827 DP_STATS_AGGR(_tgtobj, _srcobj, tx.amsdu_cnt); \ 1828 DP_STATS_AGGR(_tgtobj, _srcobj, tx.non_ampdu_cnt); \ 1829 DP_STATS_AGGR(_tgtobj, _srcobj, tx.ampdu_cnt); \ 1830 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, tx.dropped.fw_rem); \ 1831 DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.fw_rem_tx); \ 1832 DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.fw_rem_notx); \ 1833 DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.fw_reason1); \ 1834 DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.fw_reason2); \ 1835 DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.fw_reason3); \ 1836 DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.fw_rem_queue_disable); \ 1837 DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.fw_rem_no_match); \ 1838 DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.drop_threshold); \ 1839 DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.drop_link_desc_na); \ 1840 DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.invalid_drop); \ 1841 DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.mcast_vdev_drop); \ 1842 DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.invalid_rr); \ 1843 DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.age_out); \ 1844 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, tx.tx_ucast_total); \ 1845 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, tx.tx_ucast_success); \ 1846 \ 1847 DP_STATS_AGGR(_tgtobj, _srcobj, rx.err.mic_err); \ 1848 DP_STATS_AGGR(_tgtobj, _srcobj, rx.err.decrypt_err); \ 1849 DP_STATS_AGGR(_tgtobj, _srcobj, rx.err.fcserr); \ 1850 DP_STATS_AGGR(_tgtobj, _srcobj, rx.err.pn_err); \ 1851 DP_STATS_AGGR(_tgtobj, _srcobj, rx.err.oor_err); \ 1852 DP_STATS_AGGR(_tgtobj, _srcobj, rx.err.jump_2k_err); \ 1853 DP_STATS_AGGR(_tgtobj, _srcobj, rx.err.rxdma_wifi_parse_err); \ 1854 if (_srcobj->stats.rx.snr != 0) \ 1855 DP_STATS_UPD_STRUCT(_tgtobj, _srcobj, rx.snr); \ 1856 DP_STATS_UPD_STRUCT(_tgtobj, _srcobj, rx.rx_rate); \ 1857 DP_STATS_AGGR(_tgtobj, _srcobj, rx.non_ampdu_cnt); \ 1858 DP_STATS_AGGR(_tgtobj, _srcobj, rx.ampdu_cnt); \ 1859 DP_STATS_AGGR(_tgtobj, _srcobj, rx.non_amsdu_cnt); \ 1860 DP_STATS_AGGR(_tgtobj, _srcobj, rx.amsdu_cnt); \ 1861 DP_STATS_AGGR(_tgtobj, _srcobj, rx.nawds_mcast_drop); \ 1862 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.to_stack); \ 1863 \ 1864 for (i = 0; i < CDP_MAX_RX_RINGS; i++) \ 1865 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.rcvd_reo[i]); \ 1866 \ 1867 for (i = 0; i < CDP_MAX_LMACS; i++) \ 1868 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.rx_lmac[i]); \ 1869 \ 1870 _srcobj->stats.rx.unicast.num = \ 1871 _srcobj->stats.rx.to_stack.num - \ 1872 _srcobj->stats.rx.multicast.num; \ 1873 _srcobj->stats.rx.unicast.bytes = \ 1874 _srcobj->stats.rx.to_stack.bytes - \ 1875 _srcobj->stats.rx.multicast.bytes; \ 1876 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.unicast); \ 1877 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.multicast); \ 1878 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.bcast); \ 1879 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.raw); \ 1880 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.intra_bss.pkts); \ 1881 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.intra_bss.fail); \ 1882 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.mec_drop); \ 1883 \ 1884 _tgtobj->stats.tx.last_ack_rssi = \ 1885 _srcobj->stats.tx.last_ack_rssi; \ 1886 DP_STATS_AGGR(_tgtobj, _srcobj, rx.multipass_rx_pkt_drop); \ 1887 DP_STATS_AGGR(_tgtobj, _srcobj, rx.peer_unauth_rx_pkt_drop); \ 1888 DP_STATS_AGGR(_tgtobj, _srcobj, rx.policy_check_drop); \ 1889 DP_IPA_UPDATE_RX_STATS(_tgtobj, _srcobj); \ 1890 } while (0) 1891 1892 #ifdef VDEV_PEER_PROTOCOL_COUNT 1893 #define DP_UPDATE_PROTOCOL_COUNT_STATS(_tgtobj, _srcobj) \ 1894 { \ 1895 uint8_t j; \ 1896 for (j = 0; j < CDP_TRACE_MAX; j++) { \ 1897 _tgtobj->tx.protocol_trace_cnt[j].egress_cnt += \ 1898 _srcobj->tx.protocol_trace_cnt[j].egress_cnt; \ 1899 _tgtobj->tx.protocol_trace_cnt[j].ingress_cnt += \ 1900 _srcobj->tx.protocol_trace_cnt[j].ingress_cnt; \ 1901 _tgtobj->rx.protocol_trace_cnt[j].egress_cnt += \ 1902 _srcobj->rx.protocol_trace_cnt[j].egress_cnt; \ 1903 _tgtobj->rx.protocol_trace_cnt[j].ingress_cnt += \ 1904 _srcobj->rx.protocol_trace_cnt[j].ingress_cnt; \ 1905 } \ 1906 } 1907 #else 1908 #define DP_UPDATE_PROTOCOL_COUNT_STATS(_tgtobj, _srcobj) 1909 #endif 1910 1911 #ifdef WLAN_FEATURE_11BE 1912 #define DP_UPDATE_11BE_STATS(_tgtobj, _srcobj) \ 1913 do { \ 1914 uint8_t i, mu_type; \ 1915 for (i = 0; i < MAX_MCS; i++) { \ 1916 _tgtobj->tx.su_be_ppdu_cnt.mcs_count[i] += \ 1917 _srcobj->tx.su_be_ppdu_cnt.mcs_count[i]; \ 1918 _tgtobj->rx.su_be_ppdu_cnt.mcs_count[i] += \ 1919 _srcobj->rx.su_be_ppdu_cnt.mcs_count[i]; \ 1920 } \ 1921 for (mu_type = 0; mu_type < TXRX_TYPE_MU_MAX; mu_type++) { \ 1922 for (i = 0; i < MAX_MCS; i++) { \ 1923 _tgtobj->tx.mu_be_ppdu_cnt[mu_type].mcs_count[i] += \ 1924 _srcobj->tx.mu_be_ppdu_cnt[mu_type].mcs_count[i]; \ 1925 _tgtobj->rx.mu_be_ppdu_cnt[mu_type].mcs_count[i] += \ 1926 _srcobj->rx.mu_be_ppdu_cnt[mu_type].mcs_count[i]; \ 1927 } \ 1928 } \ 1929 for (i = 0; i < MAX_PUNCTURED_MODE; i++) { \ 1930 _tgtobj->tx.punc_bw[i] += _srcobj->tx.punc_bw[i]; \ 1931 _tgtobj->rx.punc_bw[i] += _srcobj->rx.punc_bw[i]; \ 1932 } \ 1933 } while (0) 1934 #else 1935 #define DP_UPDATE_11BE_STATS(_tgtobj, _srcobj) 1936 #endif 1937 1938 #define DP_UPDATE_BASIC_STATS(_tgtobj, _srcobj) \ 1939 do { \ 1940 _tgtobj->tx.comp_pkt.num += _srcobj->tx.comp_pkt.num; \ 1941 _tgtobj->tx.comp_pkt.bytes += _srcobj->tx.comp_pkt.bytes; \ 1942 _tgtobj->tx.tx_failed += _srcobj->tx.tx_failed; \ 1943 _tgtobj->rx.to_stack.num += _srcobj->rx.to_stack.num; \ 1944 _tgtobj->rx.to_stack.bytes += _srcobj->rx.to_stack.bytes; \ 1945 } while (0) 1946 1947 #define DP_UPDATE_PER_PKT_STATS(_tgtobj, _srcobj) \ 1948 do { \ 1949 uint8_t i; \ 1950 _tgtobj->tx.ucast.num += _srcobj->tx.ucast.num; \ 1951 _tgtobj->tx.ucast.bytes += _srcobj->tx.ucast.bytes; \ 1952 _tgtobj->tx.mcast.num += _srcobj->tx.mcast.num; \ 1953 _tgtobj->tx.mcast.bytes += _srcobj->tx.mcast.bytes; \ 1954 _tgtobj->tx.bcast.num += _srcobj->tx.bcast.num; \ 1955 _tgtobj->tx.bcast.bytes += _srcobj->tx.bcast.bytes; \ 1956 _tgtobj->tx.nawds_mcast.num += _srcobj->tx.nawds_mcast.num; \ 1957 _tgtobj->tx.nawds_mcast.bytes += \ 1958 _srcobj->tx.nawds_mcast.bytes; \ 1959 _tgtobj->tx.tx_success.num += _srcobj->tx.tx_success.num; \ 1960 _tgtobj->tx.tx_success.bytes += _srcobj->tx.tx_success.bytes; \ 1961 _tgtobj->tx.nawds_mcast_drop += _srcobj->tx.nawds_mcast_drop; \ 1962 _tgtobj->tx.ofdma += _srcobj->tx.ofdma; \ 1963 _tgtobj->tx.non_amsdu_cnt += _srcobj->tx.non_amsdu_cnt; \ 1964 _tgtobj->tx.amsdu_cnt += _srcobj->tx.amsdu_cnt; \ 1965 _tgtobj->tx.dropped.fw_rem.num += \ 1966 _srcobj->tx.dropped.fw_rem.num; \ 1967 _tgtobj->tx.dropped.fw_rem.bytes += \ 1968 _srcobj->tx.dropped.fw_rem.bytes; \ 1969 _tgtobj->tx.dropped.fw_rem_notx += \ 1970 _srcobj->tx.dropped.fw_rem_notx; \ 1971 _tgtobj->tx.dropped.fw_rem_tx += \ 1972 _srcobj->tx.dropped.fw_rem_tx; \ 1973 _tgtobj->tx.dropped.age_out += _srcobj->tx.dropped.age_out; \ 1974 _tgtobj->tx.dropped.fw_reason1 += \ 1975 _srcobj->tx.dropped.fw_reason1; \ 1976 _tgtobj->tx.dropped.fw_reason2 += \ 1977 _srcobj->tx.dropped.fw_reason2; \ 1978 _tgtobj->tx.dropped.fw_reason3 += \ 1979 _srcobj->tx.dropped.fw_reason3; \ 1980 _tgtobj->tx.dropped.fw_rem_queue_disable += \ 1981 _srcobj->tx.dropped.fw_rem_queue_disable; \ 1982 _tgtobj->tx.dropped.fw_rem_no_match += \ 1983 _srcobj->tx.dropped.fw_rem_no_match; \ 1984 _tgtobj->tx.dropped.drop_threshold += \ 1985 _srcobj->tx.dropped.drop_threshold; \ 1986 _tgtobj->tx.dropped.drop_link_desc_na += \ 1987 _srcobj->tx.dropped.drop_link_desc_na; \ 1988 _tgtobj->tx.dropped.invalid_drop += \ 1989 _srcobj->tx.dropped.invalid_drop; \ 1990 _tgtobj->tx.dropped.mcast_vdev_drop += \ 1991 _srcobj->tx.dropped.mcast_vdev_drop; \ 1992 _tgtobj->tx.dropped.invalid_rr += \ 1993 _srcobj->tx.dropped.invalid_rr; \ 1994 _tgtobj->tx.failed_retry_count += \ 1995 _srcobj->tx.failed_retry_count; \ 1996 _tgtobj->tx.retry_count += _srcobj->tx.retry_count; \ 1997 _tgtobj->tx.multiple_retry_count += \ 1998 _srcobj->tx.multiple_retry_count; \ 1999 _tgtobj->tx.tx_success_twt.num += \ 2000 _srcobj->tx.tx_success_twt.num; \ 2001 _tgtobj->tx.tx_success_twt.bytes += \ 2002 _srcobj->tx.tx_success_twt.bytes; \ 2003 _tgtobj->tx.last_tx_ts = _srcobj->tx.last_tx_ts; \ 2004 _tgtobj->tx.release_src_not_tqm += \ 2005 _srcobj->tx.release_src_not_tqm; \ 2006 for (i = 0; i < QDF_PROTO_SUBTYPE_MAX; i++) { \ 2007 _tgtobj->tx.no_ack_count[i] += \ 2008 _srcobj->tx.no_ack_count[i];\ 2009 } \ 2010 \ 2011 _tgtobj->rx.multicast.num += _srcobj->rx.multicast.num; \ 2012 _tgtobj->rx.multicast.bytes += _srcobj->rx.multicast.bytes; \ 2013 _tgtobj->rx.rx_success.num += _srcobj->rx.rx_success.num;\ 2014 _tgtobj->rx.rx_success.bytes += _srcobj->rx.rx_success.bytes;\ 2015 _tgtobj->rx.bcast.num += _srcobj->rx.bcast.num; \ 2016 _tgtobj->rx.bcast.bytes += _srcobj->rx.bcast.bytes; \ 2017 _tgtobj->rx.unicast.num += _srcobj->rx.unicast.num; \ 2018 _tgtobj->rx.unicast.bytes += _srcobj->rx.unicast.bytes; \ 2019 _tgtobj->rx.raw.num += _srcobj->rx.raw.num; \ 2020 _tgtobj->rx.raw.bytes += _srcobj->rx.raw.bytes; \ 2021 _tgtobj->rx.nawds_mcast_drop += _srcobj->rx.nawds_mcast_drop; \ 2022 _tgtobj->rx.mcast_3addr_drop += _srcobj->rx.mcast_3addr_drop; \ 2023 _tgtobj->rx.mec_drop.num += _srcobj->rx.mec_drop.num; \ 2024 _tgtobj->rx.mec_drop.bytes += _srcobj->rx.mec_drop.bytes; \ 2025 _tgtobj->rx.ppeds_drop.num += _srcobj->rx.ppeds_drop.num; \ 2026 _tgtobj->rx.ppeds_drop.bytes += _srcobj->rx.ppeds_drop.bytes; \ 2027 _tgtobj->rx.intra_bss.pkts.num += \ 2028 _srcobj->rx.intra_bss.pkts.num; \ 2029 _tgtobj->rx.intra_bss.pkts.bytes += \ 2030 _srcobj->rx.intra_bss.pkts.bytes; \ 2031 _tgtobj->rx.intra_bss.fail.num += \ 2032 _srcobj->rx.intra_bss.fail.num; \ 2033 _tgtobj->rx.intra_bss.fail.bytes += \ 2034 _srcobj->rx.intra_bss.fail.bytes; \ 2035 _tgtobj->rx.intra_bss.mdns_no_fwd += \ 2036 _srcobj->rx.intra_bss.mdns_no_fwd; \ 2037 _tgtobj->rx.err.mic_err += _srcobj->rx.err.mic_err; \ 2038 _tgtobj->rx.err.decrypt_err += _srcobj->rx.err.decrypt_err; \ 2039 _tgtobj->rx.err.fcserr += _srcobj->rx.err.fcserr; \ 2040 _tgtobj->rx.err.pn_err += _srcobj->rx.err.pn_err; \ 2041 _tgtobj->rx.err.oor_err += _srcobj->rx.err.oor_err; \ 2042 _tgtobj->rx.err.jump_2k_err += _srcobj->rx.err.jump_2k_err; \ 2043 _tgtobj->rx.err.rxdma_wifi_parse_err += \ 2044 _srcobj->rx.err.rxdma_wifi_parse_err; \ 2045 _tgtobj->rx.non_amsdu_cnt += _srcobj->rx.non_amsdu_cnt; \ 2046 _tgtobj->rx.amsdu_cnt += _srcobj->rx.amsdu_cnt; \ 2047 _tgtobj->rx.rx_retries += _srcobj->rx.rx_retries; \ 2048 _tgtobj->rx.multipass_rx_pkt_drop += \ 2049 _srcobj->rx.multipass_rx_pkt_drop; \ 2050 _tgtobj->rx.peer_unauth_rx_pkt_drop += \ 2051 _srcobj->rx.peer_unauth_rx_pkt_drop; \ 2052 _tgtobj->rx.policy_check_drop += \ 2053 _srcobj->rx.policy_check_drop; \ 2054 _tgtobj->rx.to_stack_twt.num += _srcobj->rx.to_stack_twt.num; \ 2055 _tgtobj->rx.to_stack_twt.bytes += \ 2056 _srcobj->rx.to_stack_twt.bytes; \ 2057 _tgtobj->rx.last_rx_ts = _srcobj->rx.last_rx_ts; \ 2058 for (i = 0; i < CDP_MAX_RX_RINGS; i++) { \ 2059 _tgtobj->rx.rcvd_reo[i].num += \ 2060 _srcobj->rx.rcvd_reo[i].num; \ 2061 _tgtobj->rx.rcvd_reo[i].bytes += \ 2062 _srcobj->rx.rcvd_reo[i].bytes; \ 2063 _tgtobj->rx.rcvd.num += \ 2064 _srcobj->rx.rcvd_reo[i].num; \ 2065 _tgtobj->rx.rcvd.bytes += \ 2066 _srcobj->rx.rcvd_reo[i].bytes; \ 2067 } \ 2068 for (i = 0; i < CDP_MAX_LMACS; i++) { \ 2069 _tgtobj->rx.rx_lmac[i].num += \ 2070 _srcobj->rx.rx_lmac[i].num; \ 2071 _tgtobj->rx.rx_lmac[i].bytes += \ 2072 _srcobj->rx.rx_lmac[i].bytes; \ 2073 } \ 2074 DP_IPA_UPDATE_PER_PKT_RX_STATS(_tgtobj, _srcobj); \ 2075 DP_UPDATE_PROTOCOL_COUNT_STATS(_tgtobj, _srcobj); \ 2076 } while (0) 2077 2078 #define DP_UPDATE_EXTD_STATS(_tgtobj, _srcobj) \ 2079 do { \ 2080 uint8_t i, pream_type, mu_type; \ 2081 _tgtobj->tx.stbc += _srcobj->tx.stbc; \ 2082 _tgtobj->tx.ldpc += _srcobj->tx.ldpc; \ 2083 _tgtobj->tx.retries += _srcobj->tx.retries; \ 2084 _tgtobj->tx.ampdu_cnt += _srcobj->tx.ampdu_cnt; \ 2085 _tgtobj->tx.non_ampdu_cnt += _srcobj->tx.non_ampdu_cnt; \ 2086 _tgtobj->tx.num_ppdu_cookie_valid += \ 2087 _srcobj->tx.num_ppdu_cookie_valid; \ 2088 _tgtobj->tx.tx_ppdus += _srcobj->tx.tx_ppdus; \ 2089 _tgtobj->tx.tx_mpdus_success += _srcobj->tx.tx_mpdus_success; \ 2090 _tgtobj->tx.tx_mpdus_tried += _srcobj->tx.tx_mpdus_tried; \ 2091 _tgtobj->tx.tx_rate = _srcobj->tx.tx_rate; \ 2092 _tgtobj->tx.last_tx_rate = _srcobj->tx.last_tx_rate; \ 2093 _tgtobj->tx.last_tx_rate_mcs = _srcobj->tx.last_tx_rate_mcs; \ 2094 _tgtobj->tx.mcast_last_tx_rate = \ 2095 _srcobj->tx.mcast_last_tx_rate; \ 2096 _tgtobj->tx.mcast_last_tx_rate_mcs = \ 2097 _srcobj->tx.mcast_last_tx_rate_mcs; \ 2098 _tgtobj->tx.rnd_avg_tx_rate = _srcobj->tx.rnd_avg_tx_rate; \ 2099 _tgtobj->tx.avg_tx_rate = _srcobj->tx.avg_tx_rate; \ 2100 _tgtobj->tx.tx_ratecode = _srcobj->tx.tx_ratecode; \ 2101 _tgtobj->tx.pream_punct_cnt += _srcobj->tx.pream_punct_cnt; \ 2102 _tgtobj->tx.ru_start = _srcobj->tx.ru_start; \ 2103 _tgtobj->tx.ru_tones = _srcobj->tx.ru_tones; \ 2104 _tgtobj->tx.last_ack_rssi = _srcobj->tx.last_ack_rssi; \ 2105 _tgtobj->tx.nss_info = _srcobj->tx.nss_info; \ 2106 _tgtobj->tx.mcs_info = _srcobj->tx.mcs_info; \ 2107 _tgtobj->tx.bw_info = _srcobj->tx.bw_info; \ 2108 _tgtobj->tx.gi_info = _srcobj->tx.gi_info; \ 2109 _tgtobj->tx.preamble_info = _srcobj->tx.preamble_info; \ 2110 _tgtobj->tx.retries_mpdu += _srcobj->tx.retries_mpdu; \ 2111 _tgtobj->tx.mpdu_success_with_retries += \ 2112 _srcobj->tx.mpdu_success_with_retries; \ 2113 _tgtobj->tx.rts_success = _srcobj->tx.rts_success; \ 2114 _tgtobj->tx.rts_failure = _srcobj->tx.rts_failure; \ 2115 _tgtobj->tx.bar_cnt = _srcobj->tx.bar_cnt; \ 2116 _tgtobj->tx.ndpa_cnt = _srcobj->tx.ndpa_cnt; \ 2117 for (pream_type = 0; pream_type < DOT11_MAX; pream_type++) { \ 2118 for (i = 0; i < MAX_MCS; i++) \ 2119 _tgtobj->tx.pkt_type[pream_type].mcs_count[i] += \ 2120 _srcobj->tx.pkt_type[pream_type].mcs_count[i]; \ 2121 } \ 2122 for (i = 0; i < WME_AC_MAX; i++) { \ 2123 _tgtobj->tx.wme_ac_type[i] += _srcobj->tx.wme_ac_type[i]; \ 2124 _tgtobj->tx.wme_ac_type_bytes[i] += \ 2125 _srcobj->tx.wme_ac_type_bytes[i]; \ 2126 _tgtobj->tx.excess_retries_per_ac[i] += \ 2127 _srcobj->tx.excess_retries_per_ac[i]; \ 2128 } \ 2129 for (i = 0; i < MAX_GI; i++) { \ 2130 _tgtobj->tx.sgi_count[i] += _srcobj->tx.sgi_count[i]; \ 2131 } \ 2132 for (i = 0; i < SS_COUNT; i++) { \ 2133 _tgtobj->tx.nss[i] += _srcobj->tx.nss[i]; \ 2134 } \ 2135 for (i = 0; i < MAX_BW; i++) { \ 2136 _tgtobj->tx.bw[i] += _srcobj->tx.bw[i]; \ 2137 } \ 2138 for (i = 0; i < MAX_RU_LOCATIONS; i++) { \ 2139 _tgtobj->tx.ru_loc[i].num_msdu += \ 2140 _srcobj->tx.ru_loc[i].num_msdu; \ 2141 _tgtobj->tx.ru_loc[i].num_mpdu += \ 2142 _srcobj->tx.ru_loc[i].num_mpdu; \ 2143 _tgtobj->tx.ru_loc[i].mpdu_tried += \ 2144 _srcobj->tx.ru_loc[i].mpdu_tried; \ 2145 } \ 2146 for (i = 0; i < MAX_TRANSMIT_TYPES; i++) { \ 2147 _tgtobj->tx.transmit_type[i].num_msdu += \ 2148 _srcobj->tx.transmit_type[i].num_msdu; \ 2149 _tgtobj->tx.transmit_type[i].num_mpdu += \ 2150 _srcobj->tx.transmit_type[i].num_mpdu; \ 2151 _tgtobj->tx.transmit_type[i].mpdu_tried += \ 2152 _srcobj->tx.transmit_type[i].mpdu_tried; \ 2153 } \ 2154 for (i = 0; i < MAX_MU_GROUP_ID; i++) { \ 2155 _tgtobj->tx.mu_group_id[i] = _srcobj->tx.mu_group_id[i]; \ 2156 } \ 2157 _tgtobj->tx.tx_ucast_total.num += \ 2158 _srcobj->tx.tx_ucast_total.num;\ 2159 _tgtobj->tx.tx_ucast_total.bytes += \ 2160 _srcobj->tx.tx_ucast_total.bytes;\ 2161 _tgtobj->tx.tx_ucast_success.num += \ 2162 _srcobj->tx.tx_ucast_success.num; \ 2163 _tgtobj->tx.tx_ucast_success.bytes += \ 2164 _srcobj->tx.tx_ucast_success.bytes; \ 2165 \ 2166 for (i = 0; i < CDP_RSSI_CHAIN_LEN; i++) \ 2167 _tgtobj->tx.rssi_chain[i] = _srcobj->tx.rssi_chain[i]; \ 2168 _tgtobj->rx.mpdu_cnt_fcs_ok += _srcobj->rx.mpdu_cnt_fcs_ok; \ 2169 _tgtobj->rx.mpdu_cnt_fcs_err += _srcobj->rx.mpdu_cnt_fcs_err; \ 2170 _tgtobj->rx.non_ampdu_cnt += _srcobj->rx.non_ampdu_cnt; \ 2171 _tgtobj->rx.ampdu_cnt += _srcobj->rx.ampdu_cnt; \ 2172 _tgtobj->rx.rx_mpdus += _srcobj->rx.rx_mpdus; \ 2173 _tgtobj->rx.rx_ppdus += _srcobj->rx.rx_ppdus; \ 2174 _tgtobj->rx.rx_rate = _srcobj->rx.rx_rate; \ 2175 _tgtobj->rx.last_rx_rate = _srcobj->rx.last_rx_rate; \ 2176 _tgtobj->rx.rnd_avg_rx_rate = _srcobj->rx.rnd_avg_rx_rate; \ 2177 _tgtobj->rx.avg_rx_rate = _srcobj->rx.avg_rx_rate; \ 2178 _tgtobj->rx.rx_ratecode = _srcobj->rx.rx_ratecode; \ 2179 _tgtobj->rx.avg_snr = _srcobj->rx.avg_snr; \ 2180 _tgtobj->rx.rx_snr_measured_time = \ 2181 _srcobj->rx.rx_snr_measured_time; \ 2182 _tgtobj->rx.snr = _srcobj->rx.snr; \ 2183 _tgtobj->rx.last_snr = _srcobj->rx.last_snr; \ 2184 _tgtobj->rx.nss_info = _srcobj->rx.nss_info; \ 2185 _tgtobj->rx.mcs_info = _srcobj->rx.mcs_info; \ 2186 _tgtobj->rx.bw_info = _srcobj->rx.bw_info; \ 2187 _tgtobj->rx.gi_info = _srcobj->rx.gi_info; \ 2188 _tgtobj->rx.preamble_info = _srcobj->rx.preamble_info; \ 2189 _tgtobj->rx.mpdu_retry_cnt += _srcobj->rx.mpdu_retry_cnt; \ 2190 _tgtobj->rx.bar_cnt = _srcobj->rx.bar_cnt; \ 2191 _tgtobj->rx.ndpa_cnt = _srcobj->rx.ndpa_cnt; \ 2192 for (pream_type = 0; pream_type < DOT11_MAX; pream_type++) { \ 2193 for (i = 0; i < MAX_MCS; i++) { \ 2194 _tgtobj->rx.pkt_type[pream_type].mcs_count[i] += \ 2195 _srcobj->rx.pkt_type[pream_type].mcs_count[i]; \ 2196 } \ 2197 } \ 2198 for (i = 0; i < WME_AC_MAX; i++) { \ 2199 _tgtobj->rx.wme_ac_type[i] += _srcobj->rx.wme_ac_type[i]; \ 2200 _tgtobj->rx.wme_ac_type_bytes[i] += \ 2201 _srcobj->rx.wme_ac_type_bytes[i]; \ 2202 } \ 2203 for (i = 0; i < MAX_MCS; i++) { \ 2204 _tgtobj->rx.su_ax_ppdu_cnt.mcs_count[i] += \ 2205 _srcobj->rx.su_ax_ppdu_cnt.mcs_count[i]; \ 2206 _tgtobj->rx.rx_mpdu_cnt[i] += _srcobj->rx.rx_mpdu_cnt[i]; \ 2207 } \ 2208 for (mu_type = 0 ; mu_type < TXRX_TYPE_MU_MAX; mu_type++) { \ 2209 _tgtobj->rx.rx_mu[mu_type].mpdu_cnt_fcs_ok += \ 2210 _srcobj->rx.rx_mu[mu_type].mpdu_cnt_fcs_ok; \ 2211 _tgtobj->rx.rx_mu[mu_type].mpdu_cnt_fcs_err += \ 2212 _srcobj->rx.rx_mu[mu_type].mpdu_cnt_fcs_err; \ 2213 for (i = 0; i < SS_COUNT; i++) \ 2214 _tgtobj->rx.rx_mu[mu_type].ppdu_nss[i] += \ 2215 _srcobj->rx.rx_mu[mu_type].ppdu_nss[i]; \ 2216 for (i = 0; i < MAX_MCS; i++) \ 2217 _tgtobj->rx.rx_mu[mu_type].ppdu.mcs_count[i] += \ 2218 _srcobj->rx.rx_mu[mu_type].ppdu.mcs_count[i]; \ 2219 } \ 2220 for (i = 0; i < MAX_RECEPTION_TYPES; i++) { \ 2221 _tgtobj->rx.reception_type[i] += \ 2222 _srcobj->rx.reception_type[i]; \ 2223 _tgtobj->rx.ppdu_cnt[i] += _srcobj->rx.ppdu_cnt[i]; \ 2224 } \ 2225 for (i = 0; i < MAX_GI; i++) { \ 2226 _tgtobj->rx.sgi_count[i] += _srcobj->rx.sgi_count[i]; \ 2227 } \ 2228 for (i = 0; i < SS_COUNT; i++) { \ 2229 _tgtobj->rx.nss[i] += _srcobj->rx.nss[i]; \ 2230 _tgtobj->rx.ppdu_nss[i] += _srcobj->rx.ppdu_nss[i]; \ 2231 } \ 2232 for (i = 0; i < MAX_BW; i++) { \ 2233 _tgtobj->rx.bw[i] += _srcobj->rx.bw[i]; \ 2234 } \ 2235 DP_UPDATE_11BE_STATS(_tgtobj, _srcobj); \ 2236 } while (0) 2237 2238 #define DP_UPDATE_VDEV_STATS_FOR_UNMAPPED_PEERS(_tgtobj, _srcobj) \ 2239 do { \ 2240 DP_UPDATE_BASIC_STATS(_tgtobj, _srcobj); \ 2241 DP_UPDATE_PER_PKT_STATS(_tgtobj, _srcobj); \ 2242 DP_UPDATE_EXTD_STATS(_tgtobj, _srcobj); \ 2243 } while (0) 2244 2245 #define DP_UPDATE_INGRESS_STATS(_tgtobj, _srcobj) \ 2246 do { \ 2247 uint8_t i = 0; \ 2248 _tgtobj->tx_i.rcvd.num += _srcobj->tx_i.rcvd.num; \ 2249 _tgtobj->tx_i.rcvd.bytes += _srcobj->tx_i.rcvd.bytes; \ 2250 _tgtobj->tx_i.rcvd_in_fast_xmit_flow += \ 2251 _srcobj->tx_i.rcvd_in_fast_xmit_flow; \ 2252 for (i = 0; i < CDP_MAX_TX_DATA_RINGS; i++) { \ 2253 _tgtobj->tx_i.rcvd_per_core[i] += \ 2254 _srcobj->tx_i.rcvd_per_core[i]; \ 2255 } \ 2256 _tgtobj->tx_i.processed.num += _srcobj->tx_i.processed.num; \ 2257 _tgtobj->tx_i.processed.bytes += \ 2258 _srcobj->tx_i.processed.bytes; \ 2259 _tgtobj->tx_i.reinject_pkts.num += \ 2260 _srcobj->tx_i.reinject_pkts.num; \ 2261 _tgtobj->tx_i.reinject_pkts.bytes += \ 2262 _srcobj->tx_i.reinject_pkts.bytes; \ 2263 _tgtobj->tx_i.inspect_pkts.num += \ 2264 _srcobj->tx_i.inspect_pkts.num; \ 2265 _tgtobj->tx_i.inspect_pkts.bytes += \ 2266 _srcobj->tx_i.inspect_pkts.bytes; \ 2267 _tgtobj->tx_i.nawds_mcast.num += \ 2268 _srcobj->tx_i.nawds_mcast.num; \ 2269 _tgtobj->tx_i.nawds_mcast.bytes += \ 2270 _srcobj->tx_i.nawds_mcast.bytes; \ 2271 _tgtobj->tx_i.bcast.num += _srcobj->tx_i.bcast.num; \ 2272 _tgtobj->tx_i.bcast.bytes += _srcobj->tx_i.bcast.bytes; \ 2273 _tgtobj->tx_i.raw.raw_pkt.num += \ 2274 _srcobj->tx_i.raw.raw_pkt.num; \ 2275 _tgtobj->tx_i.raw.raw_pkt.bytes += \ 2276 _srcobj->tx_i.raw.raw_pkt.bytes; \ 2277 _tgtobj->tx_i.raw.dma_map_error += \ 2278 _srcobj->tx_i.raw.dma_map_error; \ 2279 _tgtobj->tx_i.raw.invalid_raw_pkt_datatype += \ 2280 _srcobj->tx_i.raw.invalid_raw_pkt_datatype; \ 2281 _tgtobj->tx_i.raw.num_frags_overflow_err += \ 2282 _srcobj->tx_i.raw.num_frags_overflow_err; \ 2283 _tgtobj->tx_i.sg.sg_pkt.num += _srcobj->tx_i.sg.sg_pkt.num; \ 2284 _tgtobj->tx_i.sg.sg_pkt.bytes += \ 2285 _srcobj->tx_i.sg.sg_pkt.bytes; \ 2286 _tgtobj->tx_i.sg.non_sg_pkts.num += \ 2287 _srcobj->tx_i.sg.non_sg_pkts.num; \ 2288 _tgtobj->tx_i.sg.non_sg_pkts.bytes += \ 2289 _srcobj->tx_i.sg.non_sg_pkts.bytes; \ 2290 _tgtobj->tx_i.sg.dropped_host.num += \ 2291 _srcobj->tx_i.sg.dropped_host.num; \ 2292 _tgtobj->tx_i.sg.dropped_host.bytes += \ 2293 _srcobj->tx_i.sg.dropped_host.bytes; \ 2294 _tgtobj->tx_i.sg.dropped_target += \ 2295 _srcobj->tx_i.sg.dropped_target; \ 2296 _tgtobj->tx_i.sg.dma_map_error += \ 2297 _srcobj->tx_i.sg.dma_map_error; \ 2298 _tgtobj->tx_i.mcast_en.mcast_pkt.num += \ 2299 _srcobj->tx_i.mcast_en.mcast_pkt.num; \ 2300 _tgtobj->tx_i.mcast_en.mcast_pkt.bytes += \ 2301 _srcobj->tx_i.mcast_en.mcast_pkt.bytes; \ 2302 _tgtobj->tx_i.mcast_en.dropped_map_error += \ 2303 _srcobj->tx_i.mcast_en.dropped_map_error; \ 2304 _tgtobj->tx_i.mcast_en.dropped_self_mac += \ 2305 _srcobj->tx_i.mcast_en.dropped_self_mac; \ 2306 _tgtobj->tx_i.mcast_en.dropped_send_fail += \ 2307 _srcobj->tx_i.mcast_en.dropped_send_fail; \ 2308 _tgtobj->tx_i.mcast_en.ucast += _srcobj->tx_i.mcast_en.ucast; \ 2309 _tgtobj->tx_i.mcast_en.fail_seg_alloc += \ 2310 _srcobj->tx_i.mcast_en.fail_seg_alloc; \ 2311 _tgtobj->tx_i.mcast_en.clone_fail += \ 2312 _srcobj->tx_i.mcast_en.clone_fail; \ 2313 _tgtobj->tx_i.igmp_mcast_en.igmp_rcvd += \ 2314 _srcobj->tx_i.igmp_mcast_en.igmp_rcvd; \ 2315 _tgtobj->tx_i.igmp_mcast_en.igmp_ucast_converted += \ 2316 _srcobj->tx_i.igmp_mcast_en.igmp_ucast_converted; \ 2317 _tgtobj->tx_i.dropped.desc_na.num += \ 2318 _srcobj->tx_i.dropped.desc_na.num; \ 2319 _tgtobj->tx_i.dropped.desc_na.bytes += \ 2320 _srcobj->tx_i.dropped.desc_na.bytes; \ 2321 _tgtobj->tx_i.dropped.desc_na_exc_alloc_fail.num += \ 2322 _srcobj->tx_i.dropped.desc_na_exc_alloc_fail.num; \ 2323 _tgtobj->tx_i.dropped.desc_na_exc_alloc_fail.bytes += \ 2324 _srcobj->tx_i.dropped.desc_na_exc_alloc_fail.bytes; \ 2325 _tgtobj->tx_i.dropped.desc_na_exc_outstand.num += \ 2326 _srcobj->tx_i.dropped.desc_na_exc_outstand.num; \ 2327 _tgtobj->tx_i.dropped.desc_na_exc_outstand.bytes += \ 2328 _srcobj->tx_i.dropped.desc_na_exc_outstand.bytes; \ 2329 _tgtobj->tx_i.dropped.exc_desc_na.num += \ 2330 _srcobj->tx_i.dropped.exc_desc_na.num; \ 2331 _tgtobj->tx_i.dropped.exc_desc_na.bytes += \ 2332 _srcobj->tx_i.dropped.exc_desc_na.bytes; \ 2333 _tgtobj->tx_i.dropped.ring_full += \ 2334 _srcobj->tx_i.dropped.ring_full; \ 2335 _tgtobj->tx_i.dropped.enqueue_fail += \ 2336 _srcobj->tx_i.dropped.enqueue_fail; \ 2337 _tgtobj->tx_i.dropped.dma_error += \ 2338 _srcobj->tx_i.dropped.dma_error; \ 2339 _tgtobj->tx_i.dropped.res_full += \ 2340 _srcobj->tx_i.dropped.res_full; \ 2341 _tgtobj->tx_i.dropped.headroom_insufficient += \ 2342 _srcobj->tx_i.dropped.headroom_insufficient; \ 2343 _tgtobj->tx_i.dropped.fail_per_pkt_vdev_id_check += \ 2344 _srcobj->tx_i.dropped.fail_per_pkt_vdev_id_check; \ 2345 _tgtobj->tx_i.dropped.drop_ingress += \ 2346 _srcobj->tx_i.dropped.drop_ingress; \ 2347 _tgtobj->tx_i.dropped.invalid_peer_id_in_exc_path += \ 2348 _srcobj->tx_i.dropped.invalid_peer_id_in_exc_path; \ 2349 _tgtobj->tx_i.dropped.tx_mcast_drop += \ 2350 _srcobj->tx_i.dropped.tx_mcast_drop; \ 2351 _tgtobj->tx_i.dropped.fw2wbm_tx_drop += \ 2352 _srcobj->tx_i.dropped.fw2wbm_tx_drop; \ 2353 _tgtobj->tx_i.dropped.dropped_pkt.num = \ 2354 _tgtobj->tx_i.dropped.dma_error + \ 2355 _tgtobj->tx_i.dropped.ring_full + \ 2356 _tgtobj->tx_i.dropped.enqueue_fail + \ 2357 _tgtobj->tx_i.dropped.fail_per_pkt_vdev_id_check + \ 2358 _tgtobj->tx_i.dropped.desc_na.num + \ 2359 _tgtobj->tx_i.dropped.res_full + \ 2360 _tgtobj->tx_i.dropped.drop_ingress + \ 2361 _tgtobj->tx_i.dropped.headroom_insufficient + \ 2362 _tgtobj->tx_i.dropped.invalid_peer_id_in_exc_path + \ 2363 _tgtobj->tx_i.dropped.tx_mcast_drop + \ 2364 _tgtobj->tx_i.dropped.fw2wbm_tx_drop; \ 2365 _tgtobj->tx_i.dropped.dropped_pkt.bytes += \ 2366 _srcobj->tx_i.dropped.dropped_pkt.bytes; \ 2367 _tgtobj->tx_i.mesh.exception_fw += \ 2368 _srcobj->tx_i.mesh.exception_fw; \ 2369 _tgtobj->tx_i.mesh.completion_fw += \ 2370 _srcobj->tx_i.mesh.completion_fw; \ 2371 _tgtobj->tx_i.cce_classified += \ 2372 _srcobj->tx_i.cce_classified; \ 2373 _tgtobj->tx_i.cce_classified_raw += \ 2374 _srcobj->tx_i.cce_classified_raw; \ 2375 _tgtobj->tx_i.sniffer_rcvd.num += \ 2376 _srcobj->tx_i.sniffer_rcvd.num; \ 2377 _tgtobj->tx_i.sniffer_rcvd.bytes += \ 2378 _srcobj->tx_i.sniffer_rcvd.bytes; \ 2379 _tgtobj->rx_i.reo_rcvd_pkt.num += \ 2380 _srcobj->rx_i.reo_rcvd_pkt.num; \ 2381 _tgtobj->rx_i.reo_rcvd_pkt.bytes += \ 2382 _srcobj->rx_i.reo_rcvd_pkt.bytes; \ 2383 _tgtobj->rx_i.null_q_desc_pkt.num += \ 2384 _srcobj->rx_i.null_q_desc_pkt.num; \ 2385 _tgtobj->rx_i.null_q_desc_pkt.bytes += \ 2386 _srcobj->rx_i.null_q_desc_pkt.bytes; \ 2387 _tgtobj->rx_i.routed_eapol_pkt.num += \ 2388 _srcobj->rx_i.routed_eapol_pkt.num; \ 2389 _tgtobj->rx_i.routed_eapol_pkt.bytes += \ 2390 _srcobj->rx_i.routed_eapol_pkt.bytes; \ 2391 } while (0) 2392 2393 #define DP_UPDATE_VDEV_STATS(_tgtobj, _srcobj) \ 2394 do { \ 2395 DP_UPDATE_INGRESS_STATS(_tgtobj, _srcobj); \ 2396 DP_UPDATE_VDEV_STATS_FOR_UNMAPPED_PEERS(_tgtobj, _srcobj); \ 2397 } while (0) 2398 2399 /** 2400 * dp_peer_find_attach() - Allocates memory for peer objects 2401 * @soc: SoC handle 2402 * 2403 * Return: QDF_STATUS 2404 */ 2405 QDF_STATUS dp_peer_find_attach(struct dp_soc *soc); 2406 2407 /** 2408 * dp_peer_find_detach() - Frees memory for peer objects 2409 * @soc: SoC handle 2410 * 2411 * Return: none 2412 */ 2413 void dp_peer_find_detach(struct dp_soc *soc); 2414 2415 /** 2416 * dp_peer_find_hash_add() - add peer to peer_hash_table 2417 * @soc: soc handle 2418 * @peer: peer handle 2419 * 2420 * Return: none 2421 */ 2422 void dp_peer_find_hash_add(struct dp_soc *soc, struct dp_peer *peer); 2423 2424 /** 2425 * dp_peer_find_hash_remove() - remove peer from peer_hash_table 2426 * @soc: soc handle 2427 * @peer: peer handle 2428 * 2429 * Return: none 2430 */ 2431 void dp_peer_find_hash_remove(struct dp_soc *soc, struct dp_peer *peer); 2432 2433 /* unused?? */ 2434 void dp_peer_find_hash_erase(struct dp_soc *soc); 2435 2436 /** 2437 * dp_peer_vdev_list_add() - add peer into vdev's peer list 2438 * @soc: soc handle 2439 * @vdev: vdev handle 2440 * @peer: peer handle 2441 * 2442 * Return: none 2443 */ 2444 void dp_peer_vdev_list_add(struct dp_soc *soc, struct dp_vdev *vdev, 2445 struct dp_peer *peer); 2446 2447 /** 2448 * dp_peer_vdev_list_remove() - remove peer from vdev's peer list 2449 * @soc: SoC handle 2450 * @vdev: VDEV handle 2451 * @peer: peer handle 2452 * 2453 * Return: none 2454 */ 2455 void dp_peer_vdev_list_remove(struct dp_soc *soc, struct dp_vdev *vdev, 2456 struct dp_peer *peer); 2457 2458 /** 2459 * dp_peer_find_id_to_obj_add() - Add peer into peer_id table 2460 * @soc: SoC handle 2461 * @peer: peer handle 2462 * @peer_id: peer_id 2463 * 2464 * Return: None 2465 */ 2466 void dp_peer_find_id_to_obj_add(struct dp_soc *soc, 2467 struct dp_peer *peer, 2468 uint16_t peer_id); 2469 2470 /** 2471 * dp_txrx_peer_attach_add() - Attach txrx_peer and add it to peer_id table 2472 * @soc: SoC handle 2473 * @peer: peer handle 2474 * @txrx_peer: txrx peer handle 2475 * 2476 * Return: None 2477 */ 2478 void dp_txrx_peer_attach_add(struct dp_soc *soc, 2479 struct dp_peer *peer, 2480 struct dp_txrx_peer *txrx_peer); 2481 2482 /** 2483 * dp_peer_find_id_to_obj_remove() - remove peer from peer_id table 2484 * @soc: SoC handle 2485 * @peer_id: peer_id 2486 * 2487 * Return: None 2488 */ 2489 void dp_peer_find_id_to_obj_remove(struct dp_soc *soc, 2490 uint16_t peer_id); 2491 2492 /** 2493 * dp_vdev_unref_delete() - check and process vdev delete 2494 * @soc: DP specific soc pointer 2495 * @vdev: DP specific vdev pointer 2496 * @mod_id: module id 2497 * 2498 */ 2499 void dp_vdev_unref_delete(struct dp_soc *soc, struct dp_vdev *vdev, 2500 enum dp_mod_id mod_id); 2501 2502 /** 2503 * dp_peer_ppdu_delayed_ba_cleanup() - free ppdu allocated in peer 2504 * @peer: Datapath peer 2505 * 2506 * Return: void 2507 */ 2508 void dp_peer_ppdu_delayed_ba_cleanup(struct dp_peer *peer); 2509 2510 /** 2511 * dp_peer_rx_init() - Initialize receive TID state 2512 * @pdev: Datapath pdev 2513 * @peer: Datapath peer 2514 * 2515 */ 2516 void dp_peer_rx_init(struct dp_pdev *pdev, struct dp_peer *peer); 2517 2518 /** 2519 * dp_peer_cleanup() - Cleanup peer information 2520 * @vdev: Datapath vdev 2521 * @peer: Datapath peer 2522 * 2523 */ 2524 void dp_peer_cleanup(struct dp_vdev *vdev, struct dp_peer *peer); 2525 2526 #ifdef DP_PEER_EXTENDED_API 2527 /** 2528 * dp_register_peer() - Register peer into physical device 2529 * @soc_hdl: data path soc handle 2530 * @pdev_id: device instance id 2531 * @sta_desc: peer description 2532 * 2533 * Register peer into physical device 2534 * 2535 * Return: QDF_STATUS_SUCCESS registration success 2536 * QDF_STATUS_E_FAULT peer not found 2537 */ 2538 QDF_STATUS dp_register_peer(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, 2539 struct ol_txrx_desc_type *sta_desc); 2540 2541 /** 2542 * dp_clear_peer() - remove peer from physical device 2543 * @soc_hdl: data path soc handle 2544 * @pdev_id: device instance id 2545 * @peer_addr: peer mac address 2546 * 2547 * remove peer from physical device 2548 * 2549 * Return: QDF_STATUS_SUCCESS registration success 2550 * QDF_STATUS_E_FAULT peer not found 2551 */ 2552 QDF_STATUS dp_clear_peer(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, 2553 struct qdf_mac_addr peer_addr); 2554 2555 /** 2556 * dp_find_peer_exist_on_vdev - find if peer exists on the given vdev 2557 * @soc_hdl: datapath soc handle 2558 * @vdev_id: vdev instance id 2559 * @peer_addr: peer mac address 2560 * 2561 * Return: true or false 2562 */ 2563 bool dp_find_peer_exist_on_vdev(struct cdp_soc_t *soc_hdl, uint8_t vdev_id, 2564 uint8_t *peer_addr); 2565 2566 /** 2567 * dp_find_peer_exist_on_other_vdev - find if peer exists 2568 * on other than the given vdev 2569 * @soc_hdl: datapath soc handle 2570 * @vdev_id: vdev instance id 2571 * @peer_addr: peer mac address 2572 * @max_bssid: max number of bssids 2573 * 2574 * Return: true or false 2575 */ 2576 bool dp_find_peer_exist_on_other_vdev(struct cdp_soc_t *soc_hdl, 2577 uint8_t vdev_id, uint8_t *peer_addr, 2578 uint16_t max_bssid); 2579 2580 /** 2581 * dp_peer_state_update() - update peer local state 2582 * @soc: datapath soc handle 2583 * @peer_mac: peer mac address 2584 * @state: new peer local state 2585 * 2586 * update peer local state 2587 * 2588 * Return: QDF_STATUS_SUCCESS registration success 2589 */ 2590 QDF_STATUS dp_peer_state_update(struct cdp_soc_t *soc, uint8_t *peer_mac, 2591 enum ol_txrx_peer_state state); 2592 2593 /** 2594 * dp_get_vdevid() - Get virtual interface id which peer registered 2595 * @soc_hdl: datapath soc handle 2596 * @peer_mac: peer mac address 2597 * @vdev_id: virtual interface id which peer registered 2598 * 2599 * Get virtual interface id which peer registered 2600 * 2601 * Return: QDF_STATUS_SUCCESS registration success 2602 */ 2603 QDF_STATUS dp_get_vdevid(struct cdp_soc_t *soc_hdl, uint8_t *peer_mac, 2604 uint8_t *vdev_id); 2605 2606 struct cdp_vdev *dp_get_vdev_by_peer_addr(struct cdp_pdev *pdev_handle, 2607 struct qdf_mac_addr peer_addr); 2608 2609 /** 2610 * dp_get_vdev_for_peer() - Get virtual interface instance which peer belongs 2611 * @peer: peer instance 2612 * 2613 * Get virtual interface instance which peer belongs 2614 * 2615 * Return: virtual interface instance pointer 2616 * NULL in case cannot find 2617 */ 2618 struct cdp_vdev *dp_get_vdev_for_peer(void *peer); 2619 2620 /** 2621 * dp_peer_get_peer_mac_addr() - Get peer mac address 2622 * @peer: peer instance 2623 * 2624 * Get peer mac address 2625 * 2626 * Return: peer mac address pointer 2627 * NULL in case cannot find 2628 */ 2629 uint8_t *dp_peer_get_peer_mac_addr(void *peer); 2630 2631 /** 2632 * dp_get_peer_state() - Get local peer state 2633 * @soc: datapath soc handle 2634 * @vdev_id: vdev id 2635 * @peer_mac: peer mac addr 2636 * 2637 * Get local peer state 2638 * 2639 * Return: peer status 2640 */ 2641 int dp_get_peer_state(struct cdp_soc_t *soc, uint8_t vdev_id, 2642 uint8_t *peer_mac); 2643 2644 /** 2645 * dp_local_peer_id_pool_init() - local peer id pool alloc for physical device 2646 * @pdev: data path device instance 2647 * 2648 * local peer id pool alloc for physical device 2649 * 2650 * Return: none 2651 */ 2652 void dp_local_peer_id_pool_init(struct dp_pdev *pdev); 2653 2654 /** 2655 * dp_local_peer_id_alloc() - allocate local peer id 2656 * @pdev: data path device instance 2657 * @peer: new peer instance 2658 * 2659 * allocate local peer id 2660 * 2661 * Return: none 2662 */ 2663 void dp_local_peer_id_alloc(struct dp_pdev *pdev, struct dp_peer *peer); 2664 2665 /** 2666 * dp_local_peer_id_free() - remove local peer id 2667 * @pdev: data path device instance 2668 * @peer: peer instance should be removed 2669 * 2670 * remove local peer id 2671 * 2672 * Return: none 2673 */ 2674 void dp_local_peer_id_free(struct dp_pdev *pdev, struct dp_peer *peer); 2675 2676 /** 2677 * dp_set_peer_as_tdls_peer() - set tdls peer flag to peer 2678 * @soc_hdl: datapath soc handle 2679 * @vdev_id: vdev_id 2680 * @peer_mac: peer mac addr 2681 * @val: tdls peer flag 2682 * 2683 * Return: none 2684 */ 2685 void dp_set_peer_as_tdls_peer(struct cdp_soc_t *soc_hdl, uint8_t vdev_id, 2686 uint8_t *peer_mac, bool val); 2687 #else 2688 static inline 2689 QDF_STATUS dp_get_vdevid(struct cdp_soc_t *soc_hdl, uint8_t *peer_mac, 2690 uint8_t *vdev_id) 2691 { 2692 return QDF_STATUS_E_NOSUPPORT; 2693 } 2694 2695 static inline void dp_local_peer_id_pool_init(struct dp_pdev *pdev) 2696 { 2697 } 2698 2699 static inline 2700 void dp_local_peer_id_alloc(struct dp_pdev *pdev, struct dp_peer *peer) 2701 { 2702 } 2703 2704 static inline 2705 void dp_local_peer_id_free(struct dp_pdev *pdev, struct dp_peer *peer) 2706 { 2707 } 2708 2709 static inline 2710 void dp_set_peer_as_tdls_peer(struct cdp_soc_t *soc_hdl, uint8_t vdev_id, 2711 uint8_t *peer_mac, bool val) 2712 { 2713 } 2714 #endif 2715 2716 /** 2717 * dp_find_peer_exist - find peer if already exists 2718 * @soc_hdl: datapath soc handle 2719 * @pdev_id: physical device instance id 2720 * @peer_addr: peer mac address 2721 * 2722 * Return: true or false 2723 */ 2724 bool dp_find_peer_exist(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, 2725 uint8_t *peer_addr); 2726 2727 #ifdef DP_UMAC_HW_RESET_SUPPORT 2728 /** 2729 * dp_pause_reo_send_cmd() - Pause Reo send commands. 2730 * @soc: dp soc 2731 * 2732 * Return: none 2733 */ 2734 void dp_pause_reo_send_cmd(struct dp_soc *soc); 2735 2736 /** 2737 * dp_resume_reo_send_cmd() - Resume Reo send commands. 2738 * @soc: dp soc 2739 * 2740 * Return: none 2741 */ 2742 void dp_resume_reo_send_cmd(struct dp_soc *soc); 2743 2744 /** 2745 * dp_cleanup_reo_cmd_module - Clean up the reo cmd module 2746 * @soc: DP SoC handle 2747 * 2748 * Return: none 2749 */ 2750 void dp_cleanup_reo_cmd_module(struct dp_soc *soc); 2751 2752 /** 2753 * dp_reo_desc_freelist_destroy() - Flush REO descriptors from deferred freelist 2754 * @soc: DP SOC handle 2755 * 2756 * Return: none 2757 */ 2758 void dp_reo_desc_freelist_destroy(struct dp_soc *soc); 2759 2760 /** 2761 * dp_reset_rx_reo_tid_queue() - Reset the reo tid queues 2762 * @soc: dp soc 2763 * @hw_qdesc_vaddr: starting address of the tid queues 2764 * @size: size of the memory pointed to by hw_qdesc_vaddr 2765 * 2766 * Return: none 2767 */ 2768 void dp_reset_rx_reo_tid_queue(struct dp_soc *soc, void *hw_qdesc_vaddr, 2769 uint32_t size); 2770 2771 2772 static inline void dp_umac_reset_trigger_pre_reset_notify_cb(struct dp_soc *soc) 2773 { 2774 notify_pre_reset_fw_callback callback = soc->notify_fw_callback; 2775 2776 if (callback) 2777 callback(soc); 2778 } 2779 2780 #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP) 2781 /** 2782 * dp_umac_reset_complete_umac_recovery() - Complete Umac reset session 2783 * @soc: dp soc handle 2784 * 2785 * Return: void 2786 */ 2787 void dp_umac_reset_complete_umac_recovery(struct dp_soc *soc); 2788 2789 /** 2790 * dp_umac_reset_initiate_umac_recovery() - Initiate Umac reset session 2791 * @soc: dp soc handle 2792 * @umac_reset_ctx: Umac reset context 2793 * @rx_event: Rx event received 2794 * @is_target_recovery: Flag to indicate if it is triggered for target recovery 2795 * 2796 * Return: status 2797 */ 2798 QDF_STATUS dp_umac_reset_initiate_umac_recovery(struct dp_soc *soc, 2799 struct dp_soc_umac_reset_ctx *umac_reset_ctx, 2800 enum umac_reset_rx_event rx_event, 2801 bool is_target_recovery); 2802 2803 /** 2804 * dp_umac_reset_handle_action_cb() - Function to call action callback 2805 * @soc: dp soc handle 2806 * @umac_reset_ctx: Umac reset context 2807 * @action: Action to call the callback for 2808 * 2809 * Return: QDF_STATUS status 2810 */ 2811 QDF_STATUS dp_umac_reset_handle_action_cb(struct dp_soc *soc, 2812 struct dp_soc_umac_reset_ctx *umac_reset_ctx, 2813 enum umac_reset_action action); 2814 2815 /** 2816 * dp_umac_reset_post_tx_cmd() - Iterate partner socs and post Tx command 2817 * @umac_reset_ctx: UMAC reset context 2818 * @tx_cmd: Tx command to be posted 2819 * 2820 * Return: QDF status of operation 2821 */ 2822 QDF_STATUS 2823 dp_umac_reset_post_tx_cmd(struct dp_soc_umac_reset_ctx *umac_reset_ctx, 2824 enum umac_reset_tx_cmd tx_cmd); 2825 2826 /** 2827 * dp_umac_reset_initiator_check() - Check if soc is the Umac reset initiator 2828 * @soc: dp soc handle 2829 * 2830 * Return: true if the soc is initiator or false otherwise 2831 */ 2832 bool dp_umac_reset_initiator_check(struct dp_soc *soc); 2833 2834 /** 2835 * dp_umac_reset_target_recovery_check() - Check if this is for target recovery 2836 * @soc: dp soc handle 2837 * 2838 * Return: true if the session is for target recovery or false otherwise 2839 */ 2840 bool dp_umac_reset_target_recovery_check(struct dp_soc *soc); 2841 2842 /** 2843 * dp_umac_reset_is_soc_ignored() - Check if this soc is to be ignored 2844 * @soc: dp soc handle 2845 * 2846 * Return: true if the soc is ignored or false otherwise 2847 */ 2848 bool dp_umac_reset_is_soc_ignored(struct dp_soc *soc); 2849 2850 /** 2851 * dp_mlo_umac_reset_stats_print() - API to print MLO umac reset stats 2852 * @soc: dp soc handle 2853 * 2854 * Return: QDF_STATUS 2855 */ 2856 QDF_STATUS dp_mlo_umac_reset_stats_print(struct dp_soc *soc); 2857 #else 2858 static inline 2859 QDF_STATUS dp_mlo_umac_reset_stats_print(struct dp_soc *soc) 2860 { 2861 return QDF_STATUS_SUCCESS; 2862 } 2863 #endif 2864 #else 2865 static inline void dp_umac_reset_trigger_pre_reset_notify_cb(struct dp_soc *soc) 2866 { 2867 } 2868 #endif 2869 2870 #if defined(DP_UMAC_HW_RESET_SUPPORT) && defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP) 2871 /** 2872 * dp_umac_reset_notify_asserted_soc() - API to notify the asserted SOC 2873 * @soc: dp soc 2874 * 2875 * Return: QDF_STATUS 2876 */ 2877 QDF_STATUS dp_umac_reset_notify_asserted_soc(struct dp_soc *soc); 2878 2879 /** 2880 * dp_get_umac_reset_in_progress_state() - API to check umac reset in progress 2881 * state 2882 * @psoc: dp soc handle 2883 * 2884 * Return: umac reset state 2885 */ 2886 enum cdp_umac_reset_state 2887 dp_get_umac_reset_in_progress_state(struct cdp_soc_t *psoc); 2888 #else 2889 static inline 2890 QDF_STATUS dp_umac_reset_notify_asserted_soc(struct dp_soc *soc) 2891 { 2892 return QDF_STATUS_SUCCESS; 2893 } 2894 2895 static inline enum cdp_umac_reset_state 2896 dp_get_umac_reset_in_progress_state(struct cdp_soc_t *psoc) 2897 { 2898 return CDP_UMAC_RESET_NOT_IN_PROGRESS; 2899 } 2900 #endif 2901 2902 #ifndef WLAN_SOFTUMAC_SUPPORT 2903 QDF_STATUS dp_reo_send_cmd(struct dp_soc *soc, enum hal_reo_cmd_type type, 2904 struct hal_reo_cmd_params *params, 2905 void (*callback_fn), void *data); 2906 2907 /** 2908 * dp_reo_cmdlist_destroy() - Free REO commands in the queue 2909 * @soc: DP SoC handle 2910 * 2911 * Return: none 2912 */ 2913 void dp_reo_cmdlist_destroy(struct dp_soc *soc); 2914 2915 /** 2916 * dp_reo_status_ring_handler() - Handler for REO Status ring 2917 * @int_ctx: pointer to DP interrupt context 2918 * @soc: DP Soc handle 2919 * 2920 * Return: Number of descriptors reaped 2921 */ 2922 uint32_t dp_reo_status_ring_handler(struct dp_intr *int_ctx, 2923 struct dp_soc *soc); 2924 #endif 2925 2926 /** 2927 * dp_aggregate_vdev_stats() - Consolidate stats at VDEV level 2928 * @vdev: DP VDEV handle 2929 * @vdev_stats: aggregate statistics 2930 * 2931 * return: void 2932 */ 2933 void dp_aggregate_vdev_stats(struct dp_vdev *vdev, 2934 struct cdp_vdev_stats *vdev_stats); 2935 2936 /** 2937 * dp_txrx_get_vdev_stats() - Update buffer with cdp_vdev_stats 2938 * @soc_hdl: CDP SoC handle 2939 * @vdev_id: vdev Id 2940 * @buf: buffer for vdev stats 2941 * @is_aggregate: are aggregate stats being collected 2942 * 2943 * Return: QDF_STATUS 2944 */ 2945 QDF_STATUS 2946 dp_txrx_get_vdev_stats(struct cdp_soc_t *soc_hdl, uint8_t vdev_id, 2947 void *buf, bool is_aggregate); 2948 2949 /** 2950 * dp_rx_bar_stats_cb() - BAR received stats callback 2951 * @soc: SOC handle 2952 * @cb_ctxt: Call back context 2953 * @reo_status: Reo status 2954 * 2955 * Return: void 2956 */ 2957 void dp_rx_bar_stats_cb(struct dp_soc *soc, void *cb_ctxt, 2958 union hal_reo_status *reo_status); 2959 2960 uint16_t dp_tx_me_send_convert_ucast(struct cdp_soc_t *soc, uint8_t vdev_id, 2961 qdf_nbuf_t nbuf, 2962 uint8_t newmac[][QDF_MAC_ADDR_SIZE], 2963 uint8_t new_mac_cnt, uint8_t tid, 2964 bool is_igmp, bool is_dms_pkt); 2965 void dp_tx_me_alloc_descriptor(struct cdp_soc_t *soc, uint8_t pdev_id); 2966 2967 void dp_tx_me_free_descriptor(struct cdp_soc_t *soc, uint8_t pdev_id); 2968 2969 /** 2970 * dp_h2t_ext_stats_msg_send(): function to construct HTT message to pass to FW 2971 * @pdev: DP PDEV handle 2972 * @stats_type_upload_mask: stats type requested by user 2973 * @config_param_0: extra configuration parameters 2974 * @config_param_1: extra configuration parameters 2975 * @config_param_2: extra configuration parameters 2976 * @config_param_3: extra configuration parameters 2977 * @cookie: 2978 * @cookie_msb: 2979 * @mac_id: mac number 2980 * 2981 * Return: QDF STATUS 2982 */ 2983 QDF_STATUS dp_h2t_ext_stats_msg_send(struct dp_pdev *pdev, 2984 uint32_t stats_type_upload_mask, uint32_t config_param_0, 2985 uint32_t config_param_1, uint32_t config_param_2, 2986 uint32_t config_param_3, int cookie, int cookie_msb, 2987 uint8_t mac_id); 2988 2989 /** 2990 * dp_htt_stats_print_tag() - function to select the tag type and 2991 * print the corresponding tag structure 2992 * @pdev: pdev pointer 2993 * @tag_type: tag type that is to be printed 2994 * @tag_buf: pointer to the tag structure 2995 * 2996 * Return: void 2997 */ 2998 void dp_htt_stats_print_tag(struct dp_pdev *pdev, 2999 uint8_t tag_type, uint32_t *tag_buf); 3000 3001 /** 3002 * dp_htt_stats_copy_tag() - function to select the tag type and 3003 * copy the corresponding tag structure 3004 * @pdev: DP_PDEV handle 3005 * @tag_type: tag type that is to be printed 3006 * @tag_buf: pointer to the tag structure 3007 * 3008 * Return: void 3009 */ 3010 void dp_htt_stats_copy_tag(struct dp_pdev *pdev, uint8_t tag_type, uint32_t *tag_buf); 3011 3012 /** 3013 * dp_h2t_3tuple_config_send(): function to construct 3 tuple configuration 3014 * HTT message to pass to FW 3015 * @pdev: DP PDEV handle 3016 * @tuple_mask: tuple configuration to report 3 tuple hash value in either 3017 * toeplitz_2_or_4 or flow_id_toeplitz in MSDU START TLV. 3018 * 3019 * tuple_mask[1:0]: 3020 * 00 - Do not report 3 tuple hash value 3021 * 10 - Report 3 tuple hash value in toeplitz_2_or_4 3022 * 01 - Report 3 tuple hash value in flow_id_toeplitz 3023 * 11 - Report 3 tuple hash value in both toeplitz_2_or_4 & flow_id_toeplitz 3024 * @mac_id: MAC ID 3025 * 3026 * Return: QDF STATUS 3027 */ 3028 QDF_STATUS dp_h2t_3tuple_config_send(struct dp_pdev *pdev, uint32_t tuple_mask, 3029 uint8_t mac_id); 3030 3031 #ifdef IPA_OFFLOAD 3032 /** 3033 * dp_peer_update_tid_stats_from_reo() - update rx pkt and byte count from reo 3034 * @soc: soc handle 3035 * @cb_ctxt: combination of peer_id and tid 3036 * @reo_status: reo status 3037 * 3038 * Return: void 3039 */ 3040 void dp_peer_update_tid_stats_from_reo(struct dp_soc *soc, void *cb_ctxt, 3041 union hal_reo_status *reo_status); 3042 3043 int dp_peer_get_rxtid_stats_ipa(struct dp_peer *peer, 3044 dp_rxtid_stats_cmd_cb dp_stats_cmd_cb); 3045 #ifdef IPA_OPT_WIFI_DP 3046 void dp_ipa_wdi_opt_dpath_notify_flt_rlsd(int flt0_rslt, 3047 int flt1_rslt); 3048 void dp_ipa_wdi_opt_dpath_notify_flt_add_rem_cb(int flt0_rslt, int flt1_rslt); 3049 void dp_ipa_wdi_opt_dpath_notify_flt_rsvd(bool is_success); 3050 #endif 3051 #ifdef QCA_ENHANCED_STATS_SUPPORT 3052 /** 3053 * dp_peer_aggregate_tid_stats - aggregate rx tid stats 3054 * @peer: Data Path peer 3055 * 3056 * Return: void 3057 */ 3058 void dp_peer_aggregate_tid_stats(struct dp_peer *peer); 3059 #endif 3060 #else 3061 static inline void dp_peer_aggregate_tid_stats(struct dp_peer *peer) 3062 { 3063 } 3064 #endif 3065 3066 /** 3067 * dp_set_key_sec_type_wifi3() - set security mode of key 3068 * @soc: Datapath soc handle 3069 * @vdev_id: id of atapath vdev 3070 * @peer_mac: Datapath peer mac address 3071 * @sec_type: security type 3072 * @is_unicast: key type 3073 * 3074 */ 3075 QDF_STATUS 3076 dp_set_key_sec_type_wifi3(struct cdp_soc_t *soc, uint8_t vdev_id, 3077 uint8_t *peer_mac, enum cdp_sec_type sec_type, 3078 bool is_unicast); 3079 3080 /** 3081 * dp_get_pdev_for_mac_id() - Return pdev for mac_id 3082 * @soc: handle to DP soc 3083 * @mac_id: MAC id 3084 * 3085 * Return: Return pdev corresponding to MAC 3086 */ 3087 void *dp_get_pdev_for_mac_id(struct dp_soc *soc, uint32_t mac_id); 3088 3089 QDF_STATUS 3090 dp_set_michael_key(struct cdp_soc_t *soc, uint8_t vdev_id, 3091 uint8_t *peer_mac, 3092 bool is_unicast, uint32_t *key); 3093 3094 /** 3095 * dp_check_pdev_exists() - Validate pdev before use 3096 * @soc: dp soc handle 3097 * @data: pdev handle 3098 * 3099 * Return: 0 - success/invalid - failure 3100 */ 3101 bool dp_check_pdev_exists(struct dp_soc *soc, struct dp_pdev *data); 3102 3103 /** 3104 * dp_update_delay_stats() - Update delay statistics in structure 3105 * and fill min, max and avg delay 3106 * @tstats: tid tx stats 3107 * @rstats: tid rx stats 3108 * @delay: delay in ms 3109 * @tid: tid value 3110 * @mode: type of tx delay mode 3111 * @ring_id: ring number 3112 * @delay_in_us: flag to indicate whether the delay is in ms or us 3113 * 3114 * Return: none 3115 */ 3116 void dp_update_delay_stats(struct cdp_tid_tx_stats *tstats, 3117 struct cdp_tid_rx_stats *rstats, uint32_t delay, 3118 uint8_t tid, uint8_t mode, uint8_t ring_id, 3119 bool delay_in_us); 3120 3121 /** 3122 * dp_print_ring_stats(): Print tail and head pointer 3123 * @pdev: DP_PDEV handle 3124 * 3125 * Return: void 3126 */ 3127 void dp_print_ring_stats(struct dp_pdev *pdev); 3128 3129 /** 3130 * dp_print_ring_stat_from_hal(): Print tail and head pointer through hal 3131 * @soc: soc handle 3132 * @srng: srng handle 3133 * @ring_type: ring type 3134 * 3135 * Return: void 3136 */ 3137 void 3138 dp_print_ring_stat_from_hal(struct dp_soc *soc, struct dp_srng *srng, 3139 enum hal_ring_type ring_type); 3140 3141 /** 3142 * dp_print_pdev_cfg_params() - Print the pdev cfg parameters 3143 * @pdev: DP pdev handle 3144 * 3145 * Return: void 3146 */ 3147 void dp_print_pdev_cfg_params(struct dp_pdev *pdev); 3148 3149 /** 3150 * dp_print_soc_cfg_params()- Dump soc wlan config parameters 3151 * @soc: Soc handle 3152 * 3153 * Return: void 3154 */ 3155 void dp_print_soc_cfg_params(struct dp_soc *soc); 3156 3157 /** 3158 * dp_srng_get_str_from_hal_ring_type() - Return string name for a ring 3159 * @ring_type: Ring 3160 * 3161 * Return: char const pointer 3162 */ 3163 const 3164 char *dp_srng_get_str_from_hal_ring_type(enum hal_ring_type ring_type); 3165 3166 /** 3167 * dp_txrx_path_stats() - Function to display dump stats 3168 * @soc: soc handle 3169 * 3170 * Return: none 3171 */ 3172 void dp_txrx_path_stats(struct dp_soc *soc); 3173 3174 /** 3175 * dp_print_per_ring_stats(): Packet count per ring 3176 * @soc: soc handle 3177 * 3178 * Return: None 3179 */ 3180 void dp_print_per_ring_stats(struct dp_soc *soc); 3181 3182 /** 3183 * dp_aggregate_pdev_stats(): Consolidate stats at PDEV level 3184 * @pdev: DP PDEV handle 3185 * 3186 * Return: void 3187 */ 3188 void dp_aggregate_pdev_stats(struct dp_pdev *pdev); 3189 3190 /** 3191 * dp_print_rx_rates(): Print Rx rate stats 3192 * @vdev: DP_VDEV handle 3193 * 3194 * Return:void 3195 */ 3196 void dp_print_rx_rates(struct dp_vdev *vdev); 3197 3198 /** 3199 * dp_print_tx_rates(): Print tx rates 3200 * @vdev: DP_VDEV handle 3201 * 3202 * Return:void 3203 */ 3204 void dp_print_tx_rates(struct dp_vdev *vdev); 3205 3206 /** 3207 * dp_print_peer_stats():print peer stats 3208 * @peer: DP_PEER handle 3209 * @peer_stats: buffer holding peer stats 3210 * 3211 * return void 3212 */ 3213 void dp_print_peer_stats(struct dp_peer *peer, 3214 struct cdp_peer_stats *peer_stats); 3215 3216 /** 3217 * dp_print_pdev_tx_stats(): Print Pdev level TX stats 3218 * @pdev: DP_PDEV Handle 3219 * 3220 * Return:void 3221 */ 3222 void 3223 dp_print_pdev_tx_stats(struct dp_pdev *pdev); 3224 3225 #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MCAST_MLO) 3226 /** 3227 * dp_print_vdev_mlo_mcast_tx_stats(): Print vdev level mlo mcast tx stats 3228 * @vdev: DP_VDEV Handle 3229 * 3230 * Return:void 3231 */ 3232 void 3233 dp_print_vdev_mlo_mcast_tx_stats(struct dp_vdev *vdev); 3234 #else 3235 /** 3236 * dp_print_vdev_mlo_mcast_tx_stats(): Print vdev level mlo mcast tx stats 3237 * @vdev: DP_VDEV Handle 3238 * 3239 * Return:void 3240 */ 3241 static inline 3242 void dp_print_vdev_mlo_mcast_tx_stats(struct dp_vdev *vdev) 3243 { 3244 } 3245 #endif 3246 3247 /** 3248 * dp_print_pdev_rx_stats(): Print Pdev level RX stats 3249 * @pdev: DP_PDEV Handle 3250 * 3251 * Return: void 3252 */ 3253 void 3254 dp_print_pdev_rx_stats(struct dp_pdev *pdev); 3255 3256 /** 3257 * dp_print_soc_tx_stats(): Print SOC level stats 3258 * @soc: DP_SOC Handle 3259 * 3260 * Return: void 3261 */ 3262 void dp_print_soc_tx_stats(struct dp_soc *soc); 3263 3264 #ifdef QCA_SUPPORT_DP_GLOBAL_CTX 3265 /** 3266 * dp_print_global_desc_count(): Print global desc in use 3267 * 3268 * Return: void 3269 */ 3270 void dp_print_global_desc_count(void); 3271 #else 3272 /** 3273 * dp_print_global_desc_count(): Print global desc in use 3274 * 3275 * Return: void 3276 */ 3277 static inline 3278 void dp_print_global_desc_count(void) 3279 { 3280 } 3281 #endif 3282 3283 /** 3284 * dp_print_soc_interrupt_stats() - Print interrupt stats for the soc 3285 * @soc: dp_soc handle 3286 * 3287 * Return: None 3288 */ 3289 void dp_print_soc_interrupt_stats(struct dp_soc *soc); 3290 3291 /** 3292 * dp_print_tx_ppeds_stats() - Print Tx in use stats for the soc in DS 3293 * @soc: dp_soc handle 3294 * 3295 * Return: None 3296 */ 3297 3298 void dp_print_tx_ppeds_stats(struct dp_soc *soc); 3299 #ifdef WLAN_DP_SRNG_USAGE_WM_TRACKING 3300 /** 3301 * dp_dump_srng_high_wm_stats() - Print the ring usage high watermark stats 3302 * for all SRNGs 3303 * @soc: DP soc handle 3304 * @srng_mask: SRNGs mask for dumping usage watermark stats 3305 * 3306 * Return: None 3307 */ 3308 void dp_dump_srng_high_wm_stats(struct dp_soc *soc, uint64_t srng_mask); 3309 #else 3310 static inline 3311 void dp_dump_srng_high_wm_stats(struct dp_soc *soc, uint64_t srng_mask) 3312 { 3313 } 3314 #endif 3315 3316 /** 3317 * dp_print_soc_rx_stats() - Print SOC level Rx stats 3318 * @soc: DP_SOC Handle 3319 * 3320 * Return: void 3321 */ 3322 void dp_print_soc_rx_stats(struct dp_soc *soc); 3323 3324 /** 3325 * dp_get_mac_id_for_pdev() - Return mac corresponding to pdev for mac 3326 * 3327 * @mac_id: MAC id 3328 * @pdev_id: pdev_id corresponding to pdev, 0 for MCL 3329 * 3330 * Single pdev using both MACs will operate on both MAC rings, 3331 * which is the case for MCL. 3332 * For WIN each PDEV will operate one ring, so index is zero. 3333 * 3334 */ 3335 static inline int dp_get_mac_id_for_pdev(uint32_t mac_id, uint32_t pdev_id) 3336 { 3337 if (mac_id && pdev_id) { 3338 qdf_print("Both mac_id and pdev_id cannot be non zero"); 3339 QDF_BUG(0); 3340 return 0; 3341 } 3342 return (mac_id + pdev_id); 3343 } 3344 3345 /** 3346 * dp_get_lmac_id_for_pdev_id() - Return lmac id corresponding to host pdev id 3347 * @soc: soc pointer 3348 * @mac_id: MAC id 3349 * @pdev_id: pdev_id corresponding to pdev, 0 for MCL 3350 * 3351 * For MCL, Single pdev using both MACs will operate on both MAC rings. 3352 * 3353 * For WIN, each PDEV will operate one ring. 3354 * 3355 */ 3356 static inline int 3357 dp_get_lmac_id_for_pdev_id 3358 (struct dp_soc *soc, uint32_t mac_id, uint32_t pdev_id) 3359 { 3360 if (!wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx)) { 3361 if (mac_id && pdev_id) { 3362 qdf_print("Both mac_id and pdev_id cannot be non zero"); 3363 QDF_BUG(0); 3364 return 0; 3365 } 3366 return (mac_id + pdev_id); 3367 } 3368 3369 return soc->pdev_list[pdev_id]->lmac_id; 3370 } 3371 3372 /** 3373 * dp_get_pdev_for_lmac_id() - Return pdev pointer corresponding to lmac id 3374 * @soc: soc pointer 3375 * @lmac_id: LMAC id 3376 * 3377 * For MCL, Single pdev exists 3378 * 3379 * For WIN, each PDEV will operate one ring. 3380 * 3381 */ 3382 static inline struct dp_pdev * 3383 dp_get_pdev_for_lmac_id(struct dp_soc *soc, uint32_t lmac_id) 3384 { 3385 uint8_t i = 0; 3386 3387 if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx)) { 3388 i = wlan_cfg_get_pdev_idx(soc->wlan_cfg_ctx, lmac_id); 3389 return ((i < MAX_PDEV_CNT) ? soc->pdev_list[i] : NULL); 3390 } 3391 3392 /* Typically for MCL as there only 1 PDEV*/ 3393 return soc->pdev_list[0]; 3394 } 3395 3396 /** 3397 * dp_calculate_target_pdev_id_from_host_pdev_id() - Return target pdev 3398 * corresponding to host pdev id 3399 * @soc: soc pointer 3400 * @mac_for_pdev: pdev_id corresponding to host pdev for WIN, mac id for MCL 3401 * 3402 * Return: target pdev_id for host pdev id. For WIN, this is derived through 3403 * a two step process: 3404 * 1. Get lmac_id corresponding to host pdev_id (lmac_id can change 3405 * during mode switch) 3406 * 2. Get target pdev_id (set up during WMI ready) from lmac_id 3407 * 3408 * For MCL, return the offset-1 translated mac_id 3409 */ 3410 static inline int 3411 dp_calculate_target_pdev_id_from_host_pdev_id 3412 (struct dp_soc *soc, uint32_t mac_for_pdev) 3413 { 3414 struct dp_pdev *pdev; 3415 3416 if (!wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx)) 3417 return DP_SW2HW_MACID(mac_for_pdev); 3418 3419 pdev = soc->pdev_list[mac_for_pdev]; 3420 3421 /*non-MCL case, get original target_pdev mapping*/ 3422 return wlan_cfg_get_target_pdev_id(soc->wlan_cfg_ctx, pdev->lmac_id); 3423 } 3424 3425 /** 3426 * dp_get_target_pdev_id_for_host_pdev_id() - Return target pdev corresponding 3427 * to host pdev id 3428 * @soc: soc pointer 3429 * @mac_for_pdev: pdev_id corresponding to host pdev for WIN, mac id for MCL 3430 * 3431 * Return: target pdev_id for host pdev id. 3432 * For WIN, return the value stored in pdev object. 3433 * For MCL, return the offset-1 translated mac_id. 3434 */ 3435 static inline int 3436 dp_get_target_pdev_id_for_host_pdev_id 3437 (struct dp_soc *soc, uint32_t mac_for_pdev) 3438 { 3439 struct dp_pdev *pdev; 3440 3441 if (!wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx)) 3442 return DP_SW2HW_MACID(mac_for_pdev); 3443 3444 pdev = soc->pdev_list[mac_for_pdev]; 3445 3446 return pdev->target_pdev_id; 3447 } 3448 3449 /** 3450 * dp_get_host_pdev_id_for_target_pdev_id() - Return host pdev corresponding 3451 * to target pdev id 3452 * @soc: soc pointer 3453 * @pdev_id: pdev_id corresponding to target pdev 3454 * 3455 * Return: host pdev_id for target pdev id. For WIN, this is derived through 3456 * a two step process: 3457 * 1. Get lmac_id corresponding to target pdev_id 3458 * 2. Get host pdev_id (set up during WMI ready) from lmac_id 3459 * 3460 * For MCL, return the 0-offset pdev_id 3461 */ 3462 static inline int 3463 dp_get_host_pdev_id_for_target_pdev_id 3464 (struct dp_soc *soc, uint32_t pdev_id) 3465 { 3466 struct dp_pdev *pdev; 3467 int lmac_id; 3468 3469 if (!wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx)) 3470 return DP_HW2SW_MACID(pdev_id); 3471 3472 /*non-MCL case, get original target_lmac mapping from target pdev*/ 3473 lmac_id = wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, 3474 DP_HW2SW_MACID(pdev_id)); 3475 3476 /*Get host pdev from lmac*/ 3477 pdev = dp_get_pdev_for_lmac_id(soc, lmac_id); 3478 3479 return pdev ? pdev->pdev_id : INVALID_PDEV_ID; 3480 } 3481 3482 /** 3483 * dp_get_mac_id_for_mac() - Return mac corresponding WIN and MCL mac_ids 3484 * 3485 * @soc: handle to DP soc 3486 * @mac_id: MAC id 3487 * 3488 * Single pdev using both MACs will operate on both MAC rings, 3489 * which is the case for MCL. 3490 * For WIN each PDEV will operate one ring, so index is zero. 3491 * 3492 */ 3493 static inline int dp_get_mac_id_for_mac(struct dp_soc *soc, uint32_t mac_id) 3494 { 3495 /* 3496 * Single pdev using both MACs will operate on both MAC rings, 3497 * which is the case for MCL. 3498 */ 3499 if (!wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx)) 3500 return mac_id; 3501 3502 /* For WIN each PDEV will operate one ring, so index is zero. */ 3503 return 0; 3504 } 3505 3506 /** 3507 * dp_is_subtype_data() - check if the frame subtype is data 3508 * 3509 * @frame_ctrl: Frame control field 3510 * 3511 * check the frame control field and verify if the packet 3512 * is a data packet. 3513 * 3514 * Return: true or false 3515 */ 3516 static inline bool dp_is_subtype_data(uint16_t frame_ctrl) 3517 { 3518 if (((qdf_cpu_to_le16(frame_ctrl) & QDF_IEEE80211_FC0_TYPE_MASK) == 3519 QDF_IEEE80211_FC0_TYPE_DATA) && 3520 (((qdf_cpu_to_le16(frame_ctrl) & QDF_IEEE80211_FC0_SUBTYPE_MASK) == 3521 QDF_IEEE80211_FC0_SUBTYPE_DATA) || 3522 ((qdf_cpu_to_le16(frame_ctrl) & QDF_IEEE80211_FC0_SUBTYPE_MASK) == 3523 QDF_IEEE80211_FC0_SUBTYPE_QOS))) { 3524 return true; 3525 } 3526 3527 return false; 3528 } 3529 3530 #ifdef WDI_EVENT_ENABLE 3531 /** 3532 * dp_h2t_cfg_stats_msg_send(): function to construct HTT message to pass to FW 3533 * @pdev: DP PDEV handle 3534 * @stats_type_upload_mask: stats type requested by user 3535 * @mac_id: Mac id number 3536 * 3537 * return: QDF STATUS 3538 */ 3539 QDF_STATUS dp_h2t_cfg_stats_msg_send(struct dp_pdev *pdev, 3540 uint32_t stats_type_upload_mask, 3541 uint8_t mac_id); 3542 3543 /** 3544 * dp_wdi_event_unsub() - WDI event unsubscribe 3545 * @soc: soc handle 3546 * @pdev_id: id of pdev 3547 * @event_cb_sub_handle: subscribed event handle 3548 * @event: Event to be unsubscribe 3549 * 3550 * Return: 0 for success. nonzero for failure. 3551 */ 3552 int dp_wdi_event_unsub(struct cdp_soc_t *soc, uint8_t pdev_id, 3553 wdi_event_subscribe *event_cb_sub_handle, 3554 uint32_t event); 3555 3556 /** 3557 * dp_wdi_event_sub() - Subscribe WDI event 3558 * @soc: soc handle 3559 * @pdev_id: id of pdev 3560 * @event_cb_sub_handle: subscribe event handle 3561 * @event: Event to be subscribe 3562 * 3563 * Return: 0 for success. nonzero for failure. 3564 */ 3565 int dp_wdi_event_sub(struct cdp_soc_t *soc, uint8_t pdev_id, 3566 wdi_event_subscribe *event_cb_sub_handle, 3567 uint32_t event); 3568 3569 /** 3570 * dp_wdi_event_handler() - Event handler for WDI event 3571 * @event: wdi event number 3572 * @soc: soc handle 3573 * @data: pointer to data 3574 * @peer_id: peer id number 3575 * @status: HTT rx status 3576 * @pdev_id: id of pdev 3577 * 3578 * It will be called to register WDI event 3579 * 3580 * Return: None 3581 */ 3582 void dp_wdi_event_handler(enum WDI_EVENT event, struct dp_soc *soc, 3583 void *data, u_int16_t peer_id, 3584 int status, u_int8_t pdev_id); 3585 3586 /** 3587 * dp_wdi_event_attach() - Attach wdi event 3588 * @txrx_pdev: DP pdev handle 3589 * 3590 * Return: 0 for success. nonzero for failure. 3591 */ 3592 int dp_wdi_event_attach(struct dp_pdev *txrx_pdev); 3593 3594 /** 3595 * dp_wdi_event_detach() - Detach WDI event 3596 * @txrx_pdev: DP pdev handle 3597 * 3598 * Return: 0 for success. nonzero for failure. 3599 */ 3600 int dp_wdi_event_detach(struct dp_pdev *txrx_pdev); 3601 3602 static inline void 3603 dp_hif_update_pipe_callback(struct dp_soc *dp_soc, 3604 void *cb_context, 3605 QDF_STATUS (*callback)(void *, qdf_nbuf_t, uint8_t), 3606 uint8_t pipe_id) 3607 { 3608 struct hif_msg_callbacks hif_pipe_callbacks = { 0 }; 3609 3610 /* TODO: Temporary change to bypass HTC connection for this new 3611 * HIF pipe, which will be used for packet log and other high- 3612 * priority HTT messages. Proper HTC connection to be added 3613 * later once required FW changes are available 3614 */ 3615 hif_pipe_callbacks.rxCompletionHandler = callback; 3616 hif_pipe_callbacks.Context = cb_context; 3617 hif_update_pipe_callback(dp_soc->hif_handle, 3618 DP_HTT_T2H_HP_PIPE, &hif_pipe_callbacks); 3619 } 3620 #else 3621 static inline int dp_wdi_event_unsub(struct cdp_soc_t *soc, uint8_t pdev_id, 3622 wdi_event_subscribe *event_cb_sub_handle, 3623 uint32_t event) 3624 { 3625 return 0; 3626 } 3627 3628 static inline int dp_wdi_event_sub(struct cdp_soc_t *soc, uint8_t pdev_id, 3629 wdi_event_subscribe *event_cb_sub_handle, 3630 uint32_t event) 3631 { 3632 return 0; 3633 } 3634 3635 static inline 3636 void dp_wdi_event_handler(enum WDI_EVENT event, 3637 struct dp_soc *soc, 3638 void *data, u_int16_t peer_id, 3639 int status, u_int8_t pdev_id) 3640 { 3641 } 3642 3643 static inline int dp_wdi_event_attach(struct dp_pdev *txrx_pdev) 3644 { 3645 return 0; 3646 } 3647 3648 static inline int dp_wdi_event_detach(struct dp_pdev *txrx_pdev) 3649 { 3650 return 0; 3651 } 3652 3653 static inline QDF_STATUS dp_h2t_cfg_stats_msg_send(struct dp_pdev *pdev, 3654 uint32_t stats_type_upload_mask, uint8_t mac_id) 3655 { 3656 return 0; 3657 } 3658 3659 static inline void 3660 dp_hif_update_pipe_callback(struct dp_soc *dp_soc, void *cb_context, 3661 QDF_STATUS (*callback)(void *, qdf_nbuf_t, uint8_t), 3662 uint8_t pipe_id) 3663 { 3664 } 3665 #endif 3666 3667 #ifdef VDEV_PEER_PROTOCOL_COUNT 3668 /** 3669 * dp_vdev_peer_stats_update_protocol_cnt() - update per-peer protocol counters 3670 * @vdev: VDEV DP object 3671 * @nbuf: data packet 3672 * @txrx_peer: DP TXRX Peer object 3673 * @is_egress: whether egress or ingress 3674 * @is_rx: whether rx or tx 3675 * 3676 * This function updates the per-peer protocol counters 3677 * Return: void 3678 */ 3679 void dp_vdev_peer_stats_update_protocol_cnt(struct dp_vdev *vdev, 3680 qdf_nbuf_t nbuf, 3681 struct dp_txrx_peer *txrx_peer, 3682 bool is_egress, 3683 bool is_rx); 3684 3685 /** 3686 * dp_peer_stats_update_protocol_cnt() - update per-peer protocol counters 3687 * @soc: SOC DP object 3688 * @vdev_id: vdev_id 3689 * @nbuf: data packet 3690 * @is_egress: whether egress or ingress 3691 * @is_rx: whether rx or tx 3692 * 3693 * This function updates the per-peer protocol counters 3694 * 3695 * Return: void 3696 */ 3697 void dp_peer_stats_update_protocol_cnt(struct cdp_soc_t *soc, 3698 int8_t vdev_id, 3699 qdf_nbuf_t nbuf, 3700 bool is_egress, 3701 bool is_rx); 3702 3703 void dp_vdev_peer_stats_update_protocol_cnt_tx(struct dp_vdev *vdev_hdl, 3704 qdf_nbuf_t nbuf); 3705 3706 #else 3707 #define dp_vdev_peer_stats_update_protocol_cnt(vdev, nbuf, txrx_peer, \ 3708 is_egress, is_rx) 3709 3710 static inline 3711 void dp_vdev_peer_stats_update_protocol_cnt_tx(struct dp_vdev *vdev_hdl, 3712 qdf_nbuf_t nbuf) 3713 { 3714 } 3715 3716 #endif 3717 3718 #ifdef QCA_LL_TX_FLOW_CONTROL_V2 3719 /** 3720 * dp_tx_dump_flow_pool_info() - dump global_pool and flow_pool info 3721 * @soc_hdl: Handle to struct cdp_soc 3722 * 3723 * Return: none 3724 */ 3725 void dp_tx_dump_flow_pool_info(struct cdp_soc_t *soc_hdl); 3726 3727 /** 3728 * dp_tx_dump_flow_pool_info_compact() - dump flow pool info 3729 * @soc: DP soc context 3730 * 3731 * Return: none 3732 */ 3733 void dp_tx_dump_flow_pool_info_compact(struct dp_soc *soc); 3734 int dp_tx_delete_flow_pool(struct dp_soc *soc, struct dp_tx_desc_pool_s *pool, 3735 bool force); 3736 #else 3737 static inline void dp_tx_dump_flow_pool_info_compact(struct dp_soc *soc) 3738 { 3739 } 3740 #endif /* QCA_LL_TX_FLOW_CONTROL_V2 */ 3741 3742 #ifdef QCA_OL_DP_SRNG_LOCK_LESS_ACCESS 3743 static inline int 3744 dp_hal_srng_access_start(hal_soc_handle_t soc, hal_ring_handle_t hal_ring_hdl) 3745 { 3746 return hal_srng_access_start_unlocked(soc, hal_ring_hdl); 3747 } 3748 3749 static inline void 3750 dp_hal_srng_access_end(hal_soc_handle_t soc, hal_ring_handle_t hal_ring_hdl) 3751 { 3752 hal_srng_access_end_unlocked(soc, hal_ring_hdl); 3753 } 3754 3755 #else 3756 static inline int 3757 dp_hal_srng_access_start(hal_soc_handle_t soc, hal_ring_handle_t hal_ring_hdl) 3758 { 3759 return hal_srng_access_start(soc, hal_ring_hdl); 3760 } 3761 3762 static inline void 3763 dp_hal_srng_access_end(hal_soc_handle_t soc, hal_ring_handle_t hal_ring_hdl) 3764 { 3765 hal_srng_access_end(soc, hal_ring_hdl); 3766 } 3767 #endif 3768 3769 #ifdef WLAN_FEATURE_DP_EVENT_HISTORY 3770 /** 3771 * dp_srng_access_start() - Wrapper function to log access start of a hal ring 3772 * @int_ctx: pointer to DP interrupt context. This should not be NULL 3773 * @dp_soc: DP Soc handle 3774 * @hal_ring_hdl: opaque pointer to the HAL Rx Error Ring, which will be 3775 * serviced 3776 * 3777 * Return: 0 on success; error on failure 3778 */ 3779 int dp_srng_access_start(struct dp_intr *int_ctx, struct dp_soc *dp_soc, 3780 hal_ring_handle_t hal_ring_hdl); 3781 3782 /** 3783 * dp_srng_access_end() - Wrapper function to log access end of a hal ring 3784 * @int_ctx: pointer to DP interrupt context. This should not be NULL 3785 * @dp_soc: DP Soc handle 3786 * @hal_ring_hdl: opaque pointer to the HAL Rx Error Ring, which will be 3787 * serviced 3788 * 3789 * Return: void 3790 */ 3791 void dp_srng_access_end(struct dp_intr *int_ctx, struct dp_soc *dp_soc, 3792 hal_ring_handle_t hal_ring_hdl); 3793 3794 #else 3795 static inline int dp_srng_access_start(struct dp_intr *int_ctx, 3796 struct dp_soc *dp_soc, 3797 hal_ring_handle_t hal_ring_hdl) 3798 { 3799 hal_soc_handle_t hal_soc = dp_soc->hal_soc; 3800 3801 return dp_hal_srng_access_start(hal_soc, hal_ring_hdl); 3802 } 3803 3804 static inline void dp_srng_access_end(struct dp_intr *int_ctx, 3805 struct dp_soc *dp_soc, 3806 hal_ring_handle_t hal_ring_hdl) 3807 { 3808 hal_soc_handle_t hal_soc = dp_soc->hal_soc; 3809 3810 return dp_hal_srng_access_end(hal_soc, hal_ring_hdl); 3811 } 3812 #endif /* WLAN_FEATURE_DP_EVENT_HISTORY */ 3813 3814 #ifdef QCA_CACHED_RING_DESC 3815 /** 3816 * dp_srng_dst_get_next() - Wrapper function to get next ring desc 3817 * @dp_soc: DP Soc handle 3818 * @hal_ring_hdl: opaque pointer to the HAL Destination Ring 3819 * 3820 * Return: HAL ring descriptor 3821 */ 3822 static inline void *dp_srng_dst_get_next(struct dp_soc *dp_soc, 3823 hal_ring_handle_t hal_ring_hdl) 3824 { 3825 hal_soc_handle_t hal_soc = dp_soc->hal_soc; 3826 3827 return hal_srng_dst_get_next_cached(hal_soc, hal_ring_hdl); 3828 } 3829 3830 /** 3831 * dp_srng_dst_inv_cached_descs() - Wrapper function to invalidate cached 3832 * descriptors 3833 * @dp_soc: DP Soc handle 3834 * @hal_ring_hdl: opaque pointer to the HAL Rx Destination ring 3835 * @num_entries: Entry count 3836 * 3837 * Return: None 3838 */ 3839 static inline void dp_srng_dst_inv_cached_descs(struct dp_soc *dp_soc, 3840 hal_ring_handle_t hal_ring_hdl, 3841 uint32_t num_entries) 3842 { 3843 hal_soc_handle_t hal_soc = dp_soc->hal_soc; 3844 3845 hal_srng_dst_inv_cached_descs(hal_soc, hal_ring_hdl, num_entries); 3846 } 3847 #else 3848 static inline void *dp_srng_dst_get_next(struct dp_soc *dp_soc, 3849 hal_ring_handle_t hal_ring_hdl) 3850 { 3851 hal_soc_handle_t hal_soc = dp_soc->hal_soc; 3852 3853 return hal_srng_dst_get_next(hal_soc, hal_ring_hdl); 3854 } 3855 3856 static inline void dp_srng_dst_inv_cached_descs(struct dp_soc *dp_soc, 3857 hal_ring_handle_t hal_ring_hdl, 3858 uint32_t num_entries) 3859 { 3860 } 3861 #endif /* QCA_CACHED_RING_DESC */ 3862 3863 #if defined(QCA_CACHED_RING_DESC) && \ 3864 (defined(QCA_DP_RX_HW_SW_NBUF_DESC_PREFETCH) || \ 3865 defined(QCA_DP_TX_HW_SW_NBUF_DESC_PREFETCH)) 3866 /** 3867 * dp_srng_dst_prefetch() - Wrapper function to prefetch descs from dest ring 3868 * @hal_soc: HAL SOC handle 3869 * @hal_ring_hdl: opaque pointer to the HAL Rx Destination ring 3870 * @num_entries: Entry count 3871 * 3872 * Return: None 3873 */ 3874 static inline void *dp_srng_dst_prefetch(hal_soc_handle_t hal_soc, 3875 hal_ring_handle_t hal_ring_hdl, 3876 uint32_t num_entries) 3877 { 3878 return hal_srng_dst_prefetch(hal_soc, hal_ring_hdl, num_entries); 3879 } 3880 3881 /** 3882 * dp_srng_dst_prefetch_32_byte_desc() - Wrapper function to prefetch 3883 * 32 byte descriptor starting at 3884 * 64 byte offset 3885 * @hal_soc: HAL SOC handle 3886 * @hal_ring_hdl: opaque pointer to the HAL Rx Destination ring 3887 * @num_entries: Entry count 3888 * 3889 * Return: None 3890 */ 3891 static inline 3892 void *dp_srng_dst_prefetch_32_byte_desc(hal_soc_handle_t hal_soc, 3893 hal_ring_handle_t hal_ring_hdl, 3894 uint32_t num_entries) 3895 { 3896 return hal_srng_dst_prefetch_32_byte_desc(hal_soc, hal_ring_hdl, 3897 num_entries); 3898 } 3899 #else 3900 static inline void *dp_srng_dst_prefetch(hal_soc_handle_t hal_soc, 3901 hal_ring_handle_t hal_ring_hdl, 3902 uint32_t num_entries) 3903 { 3904 return NULL; 3905 } 3906 3907 static inline 3908 void *dp_srng_dst_prefetch_32_byte_desc(hal_soc_handle_t hal_soc, 3909 hal_ring_handle_t hal_ring_hdl, 3910 uint32_t num_entries) 3911 { 3912 return NULL; 3913 } 3914 #endif 3915 3916 #ifdef QCA_ENH_V3_STATS_SUPPORT 3917 /** 3918 * dp_pdev_print_delay_stats(): Print pdev level delay stats 3919 * @pdev: DP_PDEV handle 3920 * 3921 * Return:void 3922 */ 3923 void dp_pdev_print_delay_stats(struct dp_pdev *pdev); 3924 3925 /** 3926 * dp_pdev_print_tid_stats(): Print pdev level tid stats 3927 * @pdev: DP_PDEV handle 3928 * 3929 * Return:void 3930 */ 3931 void dp_pdev_print_tid_stats(struct dp_pdev *pdev); 3932 3933 /** 3934 * dp_pdev_print_rx_error_stats(): Print pdev level rx error stats 3935 * @pdev: DP_PDEV handle 3936 * 3937 * Return:void 3938 */ 3939 void dp_pdev_print_rx_error_stats(struct dp_pdev *pdev); 3940 #endif /* QCA_ENH_V3_STATS_SUPPORT */ 3941 3942 /** 3943 * dp_pdev_get_tid_stats(): Get accumulated pdev level tid_stats 3944 * @soc_hdl: soc handle 3945 * @pdev_id: id of dp_pdev handle 3946 * @tid_stats: Pointer for cdp_tid_stats_intf 3947 * 3948 * Return: QDF_STATUS_SUCCESS or QDF_STATUS_E_INVAL 3949 */ 3950 QDF_STATUS dp_pdev_get_tid_stats(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, 3951 struct cdp_tid_stats_intf *tid_stats); 3952 3953 /** 3954 * dp_soc_set_txrx_ring_map() 3955 * @soc: DP handler for soc 3956 * 3957 * Return: Void 3958 */ 3959 void dp_soc_set_txrx_ring_map(struct dp_soc *soc); 3960 3961 /** 3962 * dp_vdev_to_cdp_vdev() - typecast dp vdev to cdp vdev 3963 * @vdev: DP vdev handle 3964 * 3965 * Return: struct cdp_vdev pointer 3966 */ 3967 static inline 3968 struct cdp_vdev *dp_vdev_to_cdp_vdev(struct dp_vdev *vdev) 3969 { 3970 return (struct cdp_vdev *)vdev; 3971 } 3972 3973 /** 3974 * dp_pdev_to_cdp_pdev() - typecast dp pdev to cdp pdev 3975 * @pdev: DP pdev handle 3976 * 3977 * Return: struct cdp_pdev pointer 3978 */ 3979 static inline 3980 struct cdp_pdev *dp_pdev_to_cdp_pdev(struct dp_pdev *pdev) 3981 { 3982 return (struct cdp_pdev *)pdev; 3983 } 3984 3985 /** 3986 * dp_soc_to_cdp_soc() - typecast dp psoc to cdp psoc 3987 * @psoc: DP psoc handle 3988 * 3989 * Return: struct cdp_soc pointer 3990 */ 3991 static inline 3992 struct cdp_soc *dp_soc_to_cdp_soc(struct dp_soc *psoc) 3993 { 3994 return (struct cdp_soc *)psoc; 3995 } 3996 3997 /** 3998 * dp_soc_to_cdp_soc_t() - typecast dp psoc to ol txrx soc handle 3999 * @psoc: DP psoc handle 4000 * 4001 * Return: struct cdp_soc_t pointer 4002 */ 4003 static inline 4004 struct cdp_soc_t *dp_soc_to_cdp_soc_t(struct dp_soc *psoc) 4005 { 4006 return (struct cdp_soc_t *)psoc; 4007 } 4008 4009 #if defined(WLAN_SUPPORT_RX_FLOW_TAG) 4010 /** 4011 * dp_rx_flow_get_fse_stats() - Retrieve a flow's statistics 4012 * @pdev: pdev handle 4013 * @rx_flow_info: flow information in the Rx FST 4014 * @stats: stats to update 4015 * 4016 * Return: Success when flow statistcs is updated, error on failure 4017 */ 4018 QDF_STATUS dp_rx_flow_get_fse_stats(struct dp_pdev *pdev, 4019 struct cdp_rx_flow_info *rx_flow_info, 4020 struct cdp_flow_stats *stats); 4021 4022 /** 4023 * dp_rx_flow_delete_entry() - Delete a flow entry from flow search table 4024 * @pdev: pdev handle 4025 * @rx_flow_info: DP flow parameters 4026 * 4027 * Return: Success when flow is deleted, error on failure 4028 */ 4029 QDF_STATUS dp_rx_flow_delete_entry(struct dp_pdev *pdev, 4030 struct cdp_rx_flow_info *rx_flow_info); 4031 4032 /** 4033 * dp_rx_flow_add_entry() - Add a flow entry to flow search table 4034 * @pdev: DP pdev instance 4035 * @rx_flow_info: DP flow parameters 4036 * 4037 * Return: Success when flow is added, no-memory or already exists on error 4038 */ 4039 QDF_STATUS dp_rx_flow_add_entry(struct dp_pdev *pdev, 4040 struct cdp_rx_flow_info *rx_flow_info); 4041 4042 /** 4043 * dp_rx_fst_attach() - Initialize Rx FST and setup necessary parameters 4044 * @soc: SoC handle 4045 * @pdev: Pdev handle 4046 * 4047 * Return: Handle to flow search table entry 4048 */ 4049 QDF_STATUS dp_rx_fst_attach(struct dp_soc *soc, struct dp_pdev *pdev); 4050 4051 /** 4052 * dp_rx_fst_detach() - De-initialize Rx FST 4053 * @soc: SoC handle 4054 * @pdev: Pdev handle 4055 * 4056 * Return: None 4057 */ 4058 void dp_rx_fst_detach(struct dp_soc *soc, struct dp_pdev *pdev); 4059 4060 /** 4061 * dp_mon_rx_update_rx_flow_tag_stats() - Update a mon flow's statistics 4062 * @pdev: pdev handle 4063 * @flow_id: flow index (truncated hash) in the Rx FST 4064 * 4065 * Return: Success when flow statistcs is updated, error on failure 4066 */ 4067 QDF_STATUS 4068 dp_mon_rx_update_rx_flow_tag_stats(struct dp_pdev *pdev, uint32_t flow_id); 4069 #endif 4070 4071 #ifdef WLAN_SUPPORT_RX_FLOW_TAG 4072 /** 4073 * dp_rx_flow_send_fst_fw_setup() - Program FST parameters in FW/HW post-attach 4074 * @soc: SoC handle 4075 * @pdev: Pdev handle 4076 * 4077 * Return: Success when fst parameters are programmed in FW, error otherwise 4078 */ 4079 QDF_STATUS dp_rx_flow_send_fst_fw_setup(struct dp_soc *soc, 4080 struct dp_pdev *pdev); 4081 #endif 4082 4083 /** 4084 * dp_rx_fst_attach_wrapper() - wrapper API for dp_rx_fst_attach 4085 * @soc: SoC handle 4086 * @pdev: Pdev handle 4087 * 4088 * Return: Handle to flow search table entry 4089 */ 4090 extern QDF_STATUS 4091 dp_rx_fst_attach_wrapper(struct dp_soc *soc, struct dp_pdev *pdev); 4092 4093 /** 4094 * dp_rx_fst_detach_wrapper() - wrapper API for dp_rx_fst_detach 4095 * @soc: SoC handle 4096 * @pdev: Pdev handle 4097 * 4098 * Return: None 4099 */ 4100 extern void 4101 dp_rx_fst_detach_wrapper(struct dp_soc *soc, struct dp_pdev *pdev); 4102 4103 /** 4104 * dp_vdev_get_ref() - API to take a reference for VDEV object 4105 * 4106 * @soc : core DP soc context 4107 * @vdev : DP vdev 4108 * @mod_id : module id 4109 * 4110 * Return: QDF_STATUS_SUCCESS if reference held successfully 4111 * else QDF_STATUS_E_INVAL 4112 */ 4113 static inline 4114 QDF_STATUS dp_vdev_get_ref(struct dp_soc *soc, struct dp_vdev *vdev, 4115 enum dp_mod_id mod_id) 4116 { 4117 if (!qdf_atomic_inc_not_zero(&vdev->ref_cnt)) 4118 return QDF_STATUS_E_INVAL; 4119 4120 qdf_atomic_inc(&vdev->mod_refs[mod_id]); 4121 4122 return QDF_STATUS_SUCCESS; 4123 } 4124 4125 /** 4126 * dp_vdev_get_ref_by_id() - Returns vdev object given the vdev id 4127 * @soc: core DP soc context 4128 * @vdev_id: vdev id from vdev object can be retrieved 4129 * @mod_id: module id which is requesting the reference 4130 * 4131 * Return: struct dp_vdev*: Pointer to DP vdev object 4132 */ 4133 static inline struct dp_vdev * 4134 dp_vdev_get_ref_by_id(struct dp_soc *soc, uint8_t vdev_id, 4135 enum dp_mod_id mod_id) 4136 { 4137 struct dp_vdev *vdev = NULL; 4138 if (qdf_unlikely(vdev_id >= MAX_VDEV_CNT)) 4139 return NULL; 4140 4141 qdf_spin_lock_bh(&soc->vdev_map_lock); 4142 vdev = soc->vdev_id_map[vdev_id]; 4143 4144 if (!vdev || dp_vdev_get_ref(soc, vdev, mod_id) != QDF_STATUS_SUCCESS) { 4145 qdf_spin_unlock_bh(&soc->vdev_map_lock); 4146 return NULL; 4147 } 4148 qdf_spin_unlock_bh(&soc->vdev_map_lock); 4149 4150 return vdev; 4151 } 4152 4153 /** 4154 * dp_get_pdev_from_soc_pdev_id_wifi3() - Returns pdev object given the pdev id 4155 * @soc: core DP soc context 4156 * @pdev_id: pdev id from pdev object can be retrieved 4157 * 4158 * Return: struct dp_pdev*: Pointer to DP pdev object 4159 */ 4160 static inline struct dp_pdev * 4161 dp_get_pdev_from_soc_pdev_id_wifi3(struct dp_soc *soc, 4162 uint8_t pdev_id) 4163 { 4164 if (qdf_unlikely(pdev_id >= MAX_PDEV_CNT)) 4165 return NULL; 4166 4167 return soc->pdev_list[pdev_id]; 4168 } 4169 4170 /** 4171 * dp_get_peer_mac_list(): function to get peer mac list of vdev 4172 * @soc: Datapath soc handle 4173 * @vdev_id: vdev id 4174 * @newmac: Table of the clients mac 4175 * @mac_cnt: No. of MACs required 4176 * @limit: Limit the number of clients 4177 * 4178 * Return: no of clients 4179 */ 4180 uint16_t dp_get_peer_mac_list(ol_txrx_soc_handle soc, uint8_t vdev_id, 4181 u_int8_t newmac[][QDF_MAC_ADDR_SIZE], 4182 u_int16_t mac_cnt, bool limit); 4183 4184 /** 4185 * dp_update_num_mac_rings_for_dbs() - Update No of MAC rings based on 4186 * DBS check 4187 * @soc: DP SoC context 4188 * @max_mac_rings: Pointer to variable for No of MAC rings 4189 * 4190 * Return: None 4191 */ 4192 void dp_update_num_mac_rings_for_dbs(struct dp_soc *soc, 4193 int *max_mac_rings); 4194 4195 4196 #if defined(WLAN_SUPPORT_RX_FISA) 4197 /** 4198 * dp_rx_fst_update_cmem_params() - Update CMEM FST params 4199 * @soc: DP SoC context 4200 * @num_entries: Number of flow search entries 4201 * @cmem_ba_lo: CMEM base address low 4202 * @cmem_ba_hi: CMEM base address high 4203 * 4204 * Return: None 4205 */ 4206 void dp_rx_fst_update_cmem_params(struct dp_soc *soc, uint16_t num_entries, 4207 uint32_t cmem_ba_lo, uint32_t cmem_ba_hi); 4208 4209 /** 4210 * dp_fisa_config() - FISA config handler 4211 * @cdp_soc: CDP SoC handle 4212 * @pdev_id: PDEV ID 4213 * @config_id: FISA config ID 4214 * @cfg: FISA config msg data 4215 */ 4216 QDF_STATUS dp_fisa_config(ol_txrx_soc_handle cdp_soc, uint8_t pdev_id, 4217 enum cdp_fisa_config_id config_id, 4218 union cdp_fisa_config *cfg); 4219 #else 4220 static inline void 4221 dp_rx_fst_update_cmem_params(struct dp_soc *soc, uint16_t num_entries, 4222 uint32_t cmem_ba_lo, uint32_t cmem_ba_hi) 4223 { 4224 } 4225 #endif /* WLAN_SUPPORT_RX_FISA */ 4226 4227 #ifdef MAX_ALLOC_PAGE_SIZE 4228 /** 4229 * dp_set_max_page_size() - Set the max page size for hw link desc. 4230 * @pages: link desc page handle 4231 * @max_alloc_size: max_alloc_size 4232 * 4233 * For MCL the page size is set to OS defined value and for WIN 4234 * the page size is set to the max_alloc_size cfg ini 4235 * param. 4236 * This is to ensure that WIN gets contiguous memory allocations 4237 * as per requirement. 4238 * 4239 * Return: None 4240 */ 4241 static inline 4242 void dp_set_max_page_size(struct qdf_mem_multi_page_t *pages, 4243 uint32_t max_alloc_size) 4244 { 4245 pages->page_size = qdf_page_size; 4246 } 4247 4248 #else 4249 static inline 4250 void dp_set_max_page_size(struct qdf_mem_multi_page_t *pages, 4251 uint32_t max_alloc_size) 4252 { 4253 pages->page_size = max_alloc_size; 4254 } 4255 #endif /* MAX_ALLOC_PAGE_SIZE */ 4256 4257 /** 4258 * dp_history_get_next_index() - get the next entry to record an entry 4259 * in the history. 4260 * @curr_idx: Current index where the last entry is written. 4261 * @max_entries: Max number of entries in the history 4262 * 4263 * This function assumes that the max number os entries is a power of 2. 4264 * 4265 * Return: The index where the next entry is to be written. 4266 */ 4267 static inline uint32_t dp_history_get_next_index(qdf_atomic_t *curr_idx, 4268 uint32_t max_entries) 4269 { 4270 uint32_t idx = qdf_atomic_inc_return(curr_idx); 4271 4272 return idx & (max_entries - 1); 4273 } 4274 4275 /** 4276 * dp_rx_skip_tlvs() - Skip TLVs len + L3 padding, save in nbuf->cb 4277 * @soc: Datapath soc handle 4278 * @nbuf: nbuf cb to be updated 4279 * @l3_padding: L3 padding 4280 * 4281 * Return: None 4282 */ 4283 void dp_rx_skip_tlvs(struct dp_soc *soc, qdf_nbuf_t nbuf, uint32_t l3_padding); 4284 4285 #ifndef FEATURE_WDS 4286 static inline void 4287 dp_hmwds_ast_add_notify(struct dp_peer *peer, 4288 uint8_t *mac_addr, 4289 enum cdp_txrx_ast_entry_type type, 4290 QDF_STATUS err, 4291 bool is_peer_map) 4292 { 4293 } 4294 #endif 4295 4296 #ifdef HTT_STATS_DEBUGFS_SUPPORT 4297 /** 4298 * dp_pdev_htt_stats_dbgfs_init() - Function to allocate memory and initialize 4299 * debugfs for HTT stats 4300 * @pdev: dp pdev handle 4301 * 4302 * Return: QDF_STATUS 4303 */ 4304 QDF_STATUS dp_pdev_htt_stats_dbgfs_init(struct dp_pdev *pdev); 4305 4306 /** 4307 * dp_pdev_htt_stats_dbgfs_deinit() - Function to remove debugfs entry for 4308 * HTT stats 4309 * @pdev: dp pdev handle 4310 * 4311 * Return: none 4312 */ 4313 void dp_pdev_htt_stats_dbgfs_deinit(struct dp_pdev *pdev); 4314 #else 4315 4316 /** 4317 * dp_pdev_htt_stats_dbgfs_init() - Function to allocate memory and initialize 4318 * debugfs for HTT stats 4319 * @pdev: dp pdev handle 4320 * 4321 * Return: QDF_STATUS 4322 */ 4323 static inline QDF_STATUS 4324 dp_pdev_htt_stats_dbgfs_init(struct dp_pdev *pdev) 4325 { 4326 return QDF_STATUS_SUCCESS; 4327 } 4328 4329 /** 4330 * dp_pdev_htt_stats_dbgfs_deinit() - Function to remove debugfs entry for 4331 * HTT stats 4332 * @pdev: dp pdev handle 4333 * 4334 * Return: none 4335 */ 4336 static inline void 4337 dp_pdev_htt_stats_dbgfs_deinit(struct dp_pdev *pdev) 4338 { 4339 } 4340 #endif /* HTT_STATS_DEBUGFS_SUPPORT */ 4341 4342 #ifndef WLAN_DP_FEATURE_SW_LATENCY_MGR 4343 /** 4344 * dp_soc_swlm_attach() - attach the software latency manager resources 4345 * @soc: Datapath global soc handle 4346 * 4347 * Return: QDF_STATUS 4348 */ 4349 static inline QDF_STATUS dp_soc_swlm_attach(struct dp_soc *soc) 4350 { 4351 return QDF_STATUS_SUCCESS; 4352 } 4353 4354 /** 4355 * dp_soc_swlm_detach() - detach the software latency manager resources 4356 * @soc: Datapath global soc handle 4357 * 4358 * Return: QDF_STATUS 4359 */ 4360 static inline QDF_STATUS dp_soc_swlm_detach(struct dp_soc *soc) 4361 { 4362 return QDF_STATUS_SUCCESS; 4363 } 4364 #endif /* !WLAN_DP_FEATURE_SW_LATENCY_MGR */ 4365 4366 #ifndef WLAN_DP_PROFILE_SUPPORT 4367 static inline void wlan_dp_soc_cfg_sync_profile(struct cdp_soc_t *cdp_soc) {} 4368 4369 static inline void wlan_dp_pdev_cfg_sync_profile(struct cdp_soc_t *cdp_soc, 4370 uint8_t pdev_id) {} 4371 #endif 4372 4373 /** 4374 * dp_get_peer_id(): function to get peer id by mac 4375 * @soc: Datapath soc handle 4376 * @vdev_id: vdev id 4377 * @mac: Peer mac address 4378 * 4379 * Return: valid peer id on success 4380 * HTT_INVALID_PEER on failure 4381 */ 4382 uint16_t dp_get_peer_id(ol_txrx_soc_handle soc, uint8_t vdev_id, uint8_t *mac); 4383 4384 #ifdef QCA_SUPPORT_WDS_EXTENDED 4385 /** 4386 * dp_wds_ext_set_peer_rx(): function to set peer rx handler 4387 * @soc: Datapath soc handle 4388 * @vdev_id: vdev id 4389 * @mac: Peer mac address 4390 * @rx: rx function pointer 4391 * @osif_peer: OSIF peer handle 4392 * 4393 * Return: QDF_STATUS_SUCCESS on success 4394 * QDF_STATUS_E_INVAL if peer is not found 4395 * QDF_STATUS_E_ALREADY if rx is already set/unset 4396 */ 4397 QDF_STATUS dp_wds_ext_set_peer_rx(ol_txrx_soc_handle soc, 4398 uint8_t vdev_id, 4399 uint8_t *mac, 4400 ol_txrx_rx_fp rx, 4401 ol_osif_peer_handle osif_peer); 4402 4403 /** 4404 * dp_wds_ext_get_peer_osif_handle(): function to get peer osif handle 4405 * @soc: Datapath soc handle 4406 * @vdev_id: vdev id 4407 * @mac: Peer mac address 4408 * @osif_peer: OSIF peer handle 4409 * 4410 * Return: QDF_STATUS_SUCCESS on success 4411 * QDF_STATUS_E_INVAL if peer is not found 4412 */ 4413 QDF_STATUS dp_wds_ext_get_peer_osif_handle( 4414 ol_txrx_soc_handle soc, 4415 uint8_t vdev_id, 4416 uint8_t *mac, 4417 ol_osif_peer_handle *osif_peer); 4418 4419 #endif /* QCA_SUPPORT_WDS_EXTENDED */ 4420 4421 #ifdef DP_MEM_PRE_ALLOC 4422 4423 /** 4424 * dp_context_alloc_mem() - allocate memory for DP context 4425 * @soc: datapath soc handle 4426 * @ctxt_type: DP context type 4427 * @ctxt_size: DP context size 4428 * 4429 * Return: DP context address 4430 */ 4431 void *dp_context_alloc_mem(struct dp_soc *soc, enum dp_ctxt_type ctxt_type, 4432 size_t ctxt_size); 4433 4434 /** 4435 * dp_context_free_mem() - Free memory of DP context 4436 * @soc: datapath soc handle 4437 * @ctxt_type: DP context type 4438 * @vaddr: Address of context memory 4439 * 4440 * Return: None 4441 */ 4442 void dp_context_free_mem(struct dp_soc *soc, enum dp_ctxt_type ctxt_type, 4443 void *vaddr); 4444 4445 /** 4446 * dp_desc_multi_pages_mem_alloc() - alloc memory over multiple pages 4447 * @soc: datapath soc handle 4448 * @desc_type: memory request source type 4449 * @pages: multi page information storage 4450 * @element_size: each element size 4451 * @element_num: total number of elements should be allocated 4452 * @memctxt: memory context 4453 * @cacheable: coherent memory or cacheable memory 4454 * 4455 * This function is a wrapper for memory allocation over multiple 4456 * pages, if dp prealloc method is registered, then will try prealloc 4457 * firstly. if prealloc failed, fall back to regular way over 4458 * qdf_mem_multi_pages_alloc(). 4459 * 4460 * Return: None 4461 */ 4462 void dp_desc_multi_pages_mem_alloc(struct dp_soc *soc, 4463 enum qdf_dp_desc_type desc_type, 4464 struct qdf_mem_multi_page_t *pages, 4465 size_t element_size, 4466 uint32_t element_num, 4467 qdf_dma_context_t memctxt, 4468 bool cacheable); 4469 4470 /** 4471 * dp_desc_multi_pages_mem_free() - free multiple pages memory 4472 * @soc: datapath soc handle 4473 * @desc_type: memory request source type 4474 * @pages: multi page information storage 4475 * @memctxt: memory context 4476 * @cacheable: coherent memory or cacheable memory 4477 * 4478 * This function is a wrapper for multiple pages memory free, 4479 * if memory is got from prealloc pool, put it back to pool. 4480 * otherwise free by qdf_mem_multi_pages_free(). 4481 * 4482 * Return: None 4483 */ 4484 void dp_desc_multi_pages_mem_free(struct dp_soc *soc, 4485 enum qdf_dp_desc_type desc_type, 4486 struct qdf_mem_multi_page_t *pages, 4487 qdf_dma_context_t memctxt, 4488 bool cacheable); 4489 4490 #else 4491 static inline 4492 void *dp_context_alloc_mem(struct dp_soc *soc, enum dp_ctxt_type ctxt_type, 4493 size_t ctxt_size) 4494 { 4495 return qdf_mem_malloc(ctxt_size); 4496 } 4497 4498 static inline 4499 void dp_context_free_mem(struct dp_soc *soc, enum dp_ctxt_type ctxt_type, 4500 void *vaddr) 4501 { 4502 qdf_mem_free(vaddr); 4503 } 4504 4505 static inline 4506 void dp_desc_multi_pages_mem_alloc(struct dp_soc *soc, 4507 enum qdf_dp_desc_type desc_type, 4508 struct qdf_mem_multi_page_t *pages, 4509 size_t element_size, 4510 uint32_t element_num, 4511 qdf_dma_context_t memctxt, 4512 bool cacheable) 4513 { 4514 qdf_mem_multi_pages_alloc(soc->osdev, pages, element_size, 4515 element_num, memctxt, cacheable); 4516 } 4517 4518 static inline 4519 void dp_desc_multi_pages_mem_free(struct dp_soc *soc, 4520 enum qdf_dp_desc_type desc_type, 4521 struct qdf_mem_multi_page_t *pages, 4522 qdf_dma_context_t memctxt, 4523 bool cacheable) 4524 { 4525 qdf_mem_multi_pages_free(soc->osdev, pages, 4526 memctxt, cacheable); 4527 } 4528 #endif 4529 4530 /** 4531 * struct dp_frag_history_opaque_atomic - Opaque struct for adding a fragmented 4532 * history. 4533 * @index: atomic index 4534 * @num_entries_per_slot: Number of entries per slot 4535 * @allocated: is allocated or not 4536 * @entry: pointers to array of records 4537 */ 4538 struct dp_frag_history_opaque_atomic { 4539 qdf_atomic_t index; 4540 uint16_t num_entries_per_slot; 4541 uint16_t allocated; 4542 void *entry[0]; 4543 }; 4544 4545 static inline QDF_STATUS 4546 dp_soc_frag_history_attach(struct dp_soc *soc, void *history_hdl, 4547 uint32_t max_slots, uint32_t max_entries_per_slot, 4548 uint32_t entry_size, 4549 bool attempt_prealloc, enum dp_ctxt_type ctxt_type) 4550 { 4551 struct dp_frag_history_opaque_atomic *history = 4552 (struct dp_frag_history_opaque_atomic *)history_hdl; 4553 size_t alloc_size = max_entries_per_slot * entry_size; 4554 int i; 4555 4556 for (i = 0; i < max_slots; i++) { 4557 if (attempt_prealloc) 4558 history->entry[i] = dp_context_alloc_mem(soc, ctxt_type, 4559 alloc_size); 4560 else 4561 history->entry[i] = qdf_mem_malloc(alloc_size); 4562 4563 if (!history->entry[i]) 4564 goto exit; 4565 } 4566 4567 qdf_atomic_init(&history->index); 4568 history->allocated = 1; 4569 history->num_entries_per_slot = max_entries_per_slot; 4570 4571 return QDF_STATUS_SUCCESS; 4572 exit: 4573 for (i = i - 1; i >= 0; i--) { 4574 if (attempt_prealloc) 4575 dp_context_free_mem(soc, ctxt_type, history->entry[i]); 4576 else 4577 qdf_mem_free(history->entry[i]); 4578 } 4579 4580 return QDF_STATUS_E_NOMEM; 4581 } 4582 4583 static inline 4584 void dp_soc_frag_history_detach(struct dp_soc *soc, 4585 void *history_hdl, uint32_t max_slots, 4586 bool attempt_prealloc, 4587 enum dp_ctxt_type ctxt_type) 4588 { 4589 struct dp_frag_history_opaque_atomic *history = 4590 (struct dp_frag_history_opaque_atomic *)history_hdl; 4591 int i; 4592 4593 for (i = 0; i < max_slots; i++) { 4594 if (attempt_prealloc) 4595 dp_context_free_mem(soc, ctxt_type, history->entry[i]); 4596 else 4597 qdf_mem_free(history->entry[i]); 4598 } 4599 4600 history->allocated = 0; 4601 } 4602 4603 /** 4604 * dp_get_frag_hist_next_atomic_idx() - get the next entry index to record an 4605 * entry in a fragmented history with 4606 * index being atomic. 4607 * @curr_idx: address of the current index where the last entry was written 4608 * @next_idx: pointer to update the next index 4609 * @slot: pointer to update the history slot to be selected 4610 * @slot_shift: BITwise shift mask for slot (in index) 4611 * @max_entries_per_slot: Max number of entries in a slot of history 4612 * @max_entries: Total number of entries in the history (sum of all slots) 4613 * 4614 * This function assumes that the "max_entries_per_slot" and "max_entries" 4615 * are a power-of-2. 4616 * 4617 * Return: None 4618 */ 4619 static inline void 4620 dp_get_frag_hist_next_atomic_idx(qdf_atomic_t *curr_idx, uint32_t *next_idx, 4621 uint16_t *slot, uint32_t slot_shift, 4622 uint32_t max_entries_per_slot, 4623 uint32_t max_entries) 4624 { 4625 uint32_t idx; 4626 4627 idx = qdf_do_div_rem(qdf_atomic_inc_return(curr_idx), max_entries); 4628 4629 *slot = idx >> slot_shift; 4630 *next_idx = idx & (max_entries_per_slot - 1); 4631 } 4632 4633 #ifdef FEATURE_RUNTIME_PM 4634 /** 4635 * dp_runtime_get() - Get dp runtime refcount 4636 * @soc: Datapath soc handle 4637 * 4638 * Get dp runtime refcount by increment of an atomic variable, which can block 4639 * dp runtime resume to wait to flush pending tx by runtime suspend. 4640 * 4641 * Return: Current refcount 4642 */ 4643 static inline int32_t dp_runtime_get(struct dp_soc *soc) 4644 { 4645 return qdf_atomic_inc_return(&soc->dp_runtime_refcount); 4646 } 4647 4648 /** 4649 * dp_runtime_put() - Return dp runtime refcount 4650 * @soc: Datapath soc handle 4651 * 4652 * Return dp runtime refcount by decrement of an atomic variable, allow dp 4653 * runtime resume finish. 4654 * 4655 * Return: Current refcount 4656 */ 4657 static inline int32_t dp_runtime_put(struct dp_soc *soc) 4658 { 4659 return qdf_atomic_dec_return(&soc->dp_runtime_refcount); 4660 } 4661 4662 /** 4663 * dp_runtime_get_refcount() - Get dp runtime refcount 4664 * @soc: Datapath soc handle 4665 * 4666 * Get dp runtime refcount by returning an atomic variable 4667 * 4668 * Return: Current refcount 4669 */ 4670 static inline int32_t dp_runtime_get_refcount(struct dp_soc *soc) 4671 { 4672 return qdf_atomic_read(&soc->dp_runtime_refcount); 4673 } 4674 4675 /** 4676 * dp_runtime_init() - Init DP related runtime PM clients and runtime refcount 4677 * @soc: Datapath soc handle 4678 * 4679 * Return: QDF_STATUS 4680 */ 4681 static inline void dp_runtime_init(struct dp_soc *soc) 4682 { 4683 hif_rtpm_register(HIF_RTPM_ID_DP, NULL); 4684 hif_rtpm_register(HIF_RTPM_ID_DP_RING_STATS, NULL); 4685 qdf_atomic_init(&soc->dp_runtime_refcount); 4686 } 4687 4688 /** 4689 * dp_runtime_deinit() - Deinit DP related runtime PM clients 4690 * 4691 * Return: None 4692 */ 4693 static inline void dp_runtime_deinit(void) 4694 { 4695 hif_rtpm_deregister(HIF_RTPM_ID_DP); 4696 hif_rtpm_deregister(HIF_RTPM_ID_DP_RING_STATS); 4697 } 4698 4699 /** 4700 * dp_runtime_pm_mark_last_busy() - Mark last busy when rx path in use 4701 * @soc: Datapath soc handle 4702 * 4703 * Return: None 4704 */ 4705 static inline void dp_runtime_pm_mark_last_busy(struct dp_soc *soc) 4706 { 4707 soc->rx_last_busy = qdf_get_log_timestamp_usecs(); 4708 4709 hif_rtpm_mark_last_busy(HIF_RTPM_ID_DP); 4710 } 4711 #else 4712 static inline int32_t dp_runtime_get(struct dp_soc *soc) 4713 { 4714 return 0; 4715 } 4716 4717 static inline int32_t dp_runtime_put(struct dp_soc *soc) 4718 { 4719 return 0; 4720 } 4721 4722 static inline QDF_STATUS dp_runtime_init(struct dp_soc *soc) 4723 { 4724 return QDF_STATUS_SUCCESS; 4725 } 4726 4727 static inline void dp_runtime_deinit(void) 4728 { 4729 } 4730 4731 static inline void dp_runtime_pm_mark_last_busy(struct dp_soc *soc) 4732 { 4733 } 4734 #endif 4735 4736 static inline enum QDF_GLOBAL_MODE dp_soc_get_con_mode(struct dp_soc *soc) 4737 { 4738 if (soc->cdp_soc.ol_ops->get_con_mode) 4739 return soc->cdp_soc.ol_ops->get_con_mode(); 4740 4741 return QDF_GLOBAL_MAX_MODE; 4742 } 4743 4744 /** 4745 * dp_pdev_bkp_stats_detach() - detach resources for back pressure stats 4746 * processing 4747 * @pdev: Datapath PDEV handle 4748 * 4749 */ 4750 void dp_pdev_bkp_stats_detach(struct dp_pdev *pdev); 4751 4752 /** 4753 * dp_pdev_bkp_stats_attach() - attach resources for back pressure stats 4754 * processing 4755 * @pdev: Datapath PDEV handle 4756 * 4757 * Return: QDF_STATUS_SUCCESS: Success 4758 * QDF_STATUS_E_NOMEM: Error 4759 */ 4760 4761 QDF_STATUS dp_pdev_bkp_stats_attach(struct dp_pdev *pdev); 4762 4763 /** 4764 * dp_peer_flush_frags() - Flush all fragments for a particular 4765 * peer 4766 * @soc_hdl: data path soc handle 4767 * @vdev_id: vdev id 4768 * @peer_mac: peer mac address 4769 * 4770 * Return: None 4771 */ 4772 void dp_peer_flush_frags(struct cdp_soc_t *soc_hdl, uint8_t vdev_id, 4773 uint8_t *peer_mac); 4774 4775 /** 4776 * dp_soc_reset_mon_intr_mask() - reset mon intr mask 4777 * @soc: pointer to dp_soc handle 4778 * 4779 * Return: 4780 */ 4781 void dp_soc_reset_mon_intr_mask(struct dp_soc *soc); 4782 4783 /** 4784 * dp_txrx_get_soc_stats() - will return cdp_soc_stats 4785 * @soc_hdl: soc handle 4786 * @soc_stats: buffer to hold the values 4787 * 4788 * Return: QDF_STATUS_SUCCESS: Success 4789 * QDF_STATUS_E_FAILURE: Error 4790 */ 4791 QDF_STATUS dp_txrx_get_soc_stats(struct cdp_soc_t *soc_hdl, 4792 struct cdp_soc_stats *soc_stats); 4793 4794 /** 4795 * dp_txrx_get_peer_delay_stats() - to get peer delay stats per TIDs 4796 * @soc_hdl: soc handle 4797 * @vdev_id: id of vdev handle 4798 * @peer_mac: mac of DP_PEER handle 4799 * @delay_stats: pointer to delay stats array 4800 * 4801 * Return: QDF_STATUS_SUCCESS: Success 4802 * QDF_STATUS_E_FAILURE: Error 4803 */ 4804 QDF_STATUS 4805 dp_txrx_get_peer_delay_stats(struct cdp_soc_t *soc_hdl, uint8_t vdev_id, 4806 uint8_t *peer_mac, 4807 struct cdp_delay_tid_stats *delay_stats); 4808 4809 /** 4810 * dp_txrx_get_peer_jitter_stats() - to get peer jitter stats per TIDs 4811 * @soc_hdl: soc handle 4812 * @pdev_id: id of pdev handle 4813 * @vdev_id: id of vdev handle 4814 * @peer_mac: mac of DP_PEER handle 4815 * @tid_stats: pointer to jitter stats array 4816 * 4817 * Return: QDF_STATUS_SUCCESS: Success 4818 * QDF_STATUS_E_FAILURE: Error 4819 */ 4820 QDF_STATUS 4821 dp_txrx_get_peer_jitter_stats(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, 4822 uint8_t vdev_id, uint8_t *peer_mac, 4823 struct cdp_peer_tid_stats *tid_stats); 4824 4825 /** 4826 * dp_peer_get_tx_capture_stats() - to get peer Tx Capture stats 4827 * @soc_hdl: soc handle 4828 * @vdev_id: id of vdev handle 4829 * @peer_mac: mac of DP_PEER handle 4830 * @stats: pointer to peer tx capture stats 4831 * 4832 * Return: QDF_STATUS_SUCCESS: Success 4833 * QDF_STATUS_E_FAILURE: Error 4834 */ 4835 QDF_STATUS 4836 dp_peer_get_tx_capture_stats(struct cdp_soc_t *soc_hdl, 4837 uint8_t vdev_id, uint8_t *peer_mac, 4838 struct cdp_peer_tx_capture_stats *stats); 4839 4840 /** 4841 * dp_pdev_get_tx_capture_stats() - to get pdev Tx Capture stats 4842 * @soc_hdl: soc handle 4843 * @pdev_id: id of pdev handle 4844 * @stats: pointer to pdev tx capture stats 4845 * 4846 * Return: QDF_STATUS_SUCCESS: Success 4847 * QDF_STATUS_E_FAILURE: Error 4848 */ 4849 QDF_STATUS 4850 dp_pdev_get_tx_capture_stats(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, 4851 struct cdp_pdev_tx_capture_stats *stats); 4852 4853 #ifdef HW_TX_DELAY_STATS_ENABLE 4854 /** 4855 * dp_is_vdev_tx_delay_stats_enabled(): Check if tx delay stats 4856 * is enabled for vdev 4857 * @vdev: dp vdev 4858 * 4859 * Return: true if tx delay stats is enabled for vdev else false 4860 */ 4861 static inline uint8_t dp_is_vdev_tx_delay_stats_enabled(struct dp_vdev *vdev) 4862 { 4863 return vdev->hw_tx_delay_stats_enabled; 4864 } 4865 4866 /** 4867 * dp_pdev_print_tx_delay_stats(): Print vdev tx delay stats 4868 * for pdev 4869 * @soc: dp soc 4870 * 4871 * Return: None 4872 */ 4873 void dp_pdev_print_tx_delay_stats(struct dp_soc *soc); 4874 4875 /** 4876 * dp_pdev_clear_tx_delay_stats() - clear tx delay stats 4877 * @soc: soc handle 4878 * 4879 * Return: None 4880 */ 4881 void dp_pdev_clear_tx_delay_stats(struct dp_soc *soc); 4882 #else 4883 static inline uint8_t dp_is_vdev_tx_delay_stats_enabled(struct dp_vdev *vdev) 4884 { 4885 return 0; 4886 } 4887 4888 static inline void dp_pdev_print_tx_delay_stats(struct dp_soc *soc) 4889 { 4890 } 4891 4892 static inline void dp_pdev_clear_tx_delay_stats(struct dp_soc *soc) 4893 { 4894 } 4895 #endif 4896 4897 static inline void 4898 dp_get_rx_hash_key_bytes(struct cdp_lro_hash_config *lro_hash) 4899 { 4900 qdf_get_random_bytes(lro_hash->toeplitz_hash_ipv4, 4901 (sizeof(lro_hash->toeplitz_hash_ipv4[0]) * 4902 LRO_IPV4_SEED_ARR_SZ)); 4903 qdf_get_random_bytes(lro_hash->toeplitz_hash_ipv6, 4904 (sizeof(lro_hash->toeplitz_hash_ipv6[0]) * 4905 LRO_IPV6_SEED_ARR_SZ)); 4906 } 4907 4908 #ifdef WLAN_CONFIG_TELEMETRY_AGENT 4909 /** 4910 * dp_get_pdev_telemetry_stats- API to get pdev telemetry stats 4911 * @soc_hdl: soc handle 4912 * @pdev_id: id of pdev handle 4913 * @stats: pointer to pdev telemetry stats 4914 * 4915 * Return: QDF_STATUS_SUCCESS: Success 4916 * QDF_STATUS_E_FAILURE: Error 4917 */ 4918 QDF_STATUS 4919 dp_get_pdev_telemetry_stats(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, 4920 struct cdp_pdev_telemetry_stats *stats); 4921 4922 /** 4923 * dp_get_peer_telemetry_stats() - API to get peer telemetry stats 4924 * @soc_hdl: soc handle 4925 * @addr: peer mac 4926 * @stats: pointer to peer telemetry stats 4927 * 4928 * Return: QDF_STATUS_SUCCESS: Success 4929 * QDF_STATUS_E_FAILURE: Error 4930 */ 4931 QDF_STATUS 4932 dp_get_peer_telemetry_stats(struct cdp_soc_t *soc_hdl, uint8_t *addr, 4933 struct cdp_peer_telemetry_stats *stats); 4934 4935 /** 4936 * dp_get_peer_deter_stats() - API to get peer deterministic stats 4937 * @soc_hdl: soc handle 4938 * @vdev_id: id of vdev handle 4939 * @addr: peer mac 4940 * @stats: pointer to peer deterministic stats 4941 * 4942 * Return: QDF_STATUS_SUCCESS: Success 4943 * QDF_STATUS_E_FAILURE: Error 4944 */ 4945 QDF_STATUS 4946 dp_get_peer_deter_stats(struct cdp_soc_t *soc_hdl, 4947 uint8_t vdev_id, 4948 uint8_t *addr, 4949 struct cdp_peer_deter_stats *stats); 4950 4951 /** 4952 * dp_get_pdev_deter_stats() - API to get pdev deterministic stats 4953 * @soc_hdl: soc handle 4954 * @pdev_id: id of pdev handle 4955 * @stats: pointer to pdev deterministic stats 4956 * 4957 * Return: QDF_STATUS_SUCCESS: Success 4958 * QDF_STATUS_E_FAILURE: Error 4959 */ 4960 QDF_STATUS 4961 dp_get_pdev_deter_stats(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, 4962 struct cdp_pdev_deter_stats *stats); 4963 4964 /** 4965 * dp_update_pdev_chan_util_stats() - API to update channel utilization stats 4966 * @soc_hdl: soc handle 4967 * @pdev_id: id of pdev handle 4968 * @ch_util: Pointer to channel util stats 4969 * 4970 * Return: QDF_STATUS_SUCCESS: Success 4971 * QDF_STATUS_E_FAILURE: Error 4972 */ 4973 QDF_STATUS 4974 dp_update_pdev_chan_util_stats(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, 4975 struct cdp_pdev_chan_util_stats *ch_util); 4976 #endif /* WLAN_CONFIG_TELEMETRY_AGENT */ 4977 4978 #ifdef CONNECTIVITY_PKTLOG 4979 /** 4980 * dp_tx_send_pktlog() - send tx packet log 4981 * @soc: soc handle 4982 * @pdev: pdev handle 4983 * @tx_desc: TX software descriptor 4984 * @nbuf: nbuf 4985 * @status: status of tx packet 4986 * 4987 * This function is used to send tx packet for logging 4988 * 4989 * Return: None 4990 * 4991 */ 4992 static inline 4993 void dp_tx_send_pktlog(struct dp_soc *soc, struct dp_pdev *pdev, 4994 struct dp_tx_desc_s *tx_desc, 4995 qdf_nbuf_t nbuf, enum qdf_dp_tx_rx_status status) 4996 { 4997 ol_txrx_pktdump_cb packetdump_cb = pdev->dp_tx_packetdump_cb; 4998 4999 if (qdf_unlikely(packetdump_cb) && 5000 dp_tx_frm_std == tx_desc->frm_type) { 5001 packetdump_cb((ol_txrx_soc_handle)soc, pdev->pdev_id, 5002 tx_desc->vdev_id, nbuf, status, QDF_TX_DATA_PKT); 5003 } 5004 } 5005 5006 /** 5007 * dp_rx_send_pktlog() - send rx packet log 5008 * @soc: soc handle 5009 * @pdev: pdev handle 5010 * @nbuf: nbuf 5011 * @status: status of rx packet 5012 * 5013 * This function is used to send rx packet for logging 5014 * 5015 * Return: None 5016 * 5017 */ 5018 static inline 5019 void dp_rx_send_pktlog(struct dp_soc *soc, struct dp_pdev *pdev, 5020 qdf_nbuf_t nbuf, enum qdf_dp_tx_rx_status status) 5021 { 5022 ol_txrx_pktdump_cb packetdump_cb = pdev->dp_rx_packetdump_cb; 5023 5024 if (qdf_unlikely(packetdump_cb)) { 5025 packetdump_cb((ol_txrx_soc_handle)soc, pdev->pdev_id, 5026 QDF_NBUF_CB_RX_VDEV_ID(nbuf), 5027 nbuf, status, QDF_RX_DATA_PKT); 5028 } 5029 } 5030 5031 /** 5032 * dp_rx_err_send_pktlog() - send rx error packet log 5033 * @soc: soc handle 5034 * @pdev: pdev handle 5035 * @mpdu_desc_info: MPDU descriptor info 5036 * @nbuf: nbuf 5037 * @status: status of rx packet 5038 * @set_pktlen: weither to set packet length 5039 * 5040 * This API should only be called when we have not removed 5041 * Rx TLV from head, and head is pointing to rx_tlv 5042 * 5043 * This function is used to send rx packet from error path 5044 * for logging for which rx packet tlv is not removed. 5045 * 5046 * Return: None 5047 * 5048 */ 5049 static inline 5050 void dp_rx_err_send_pktlog(struct dp_soc *soc, struct dp_pdev *pdev, 5051 struct hal_rx_mpdu_desc_info *mpdu_desc_info, 5052 qdf_nbuf_t nbuf, enum qdf_dp_tx_rx_status status, 5053 bool set_pktlen) 5054 { 5055 ol_txrx_pktdump_cb packetdump_cb = pdev->dp_rx_packetdump_cb; 5056 qdf_size_t skip_size; 5057 uint16_t msdu_len, nbuf_len; 5058 uint8_t *rx_tlv_hdr; 5059 struct hal_rx_msdu_metadata msdu_metadata; 5060 uint16_t buf_size; 5061 5062 buf_size = wlan_cfg_rx_buffer_size(soc->wlan_cfg_ctx); 5063 5064 if (qdf_unlikely(packetdump_cb)) { 5065 rx_tlv_hdr = qdf_nbuf_data(nbuf); 5066 nbuf_len = hal_rx_msdu_start_msdu_len_get(soc->hal_soc, 5067 rx_tlv_hdr); 5068 hal_rx_msdu_metadata_get(soc->hal_soc, rx_tlv_hdr, 5069 &msdu_metadata); 5070 5071 if (mpdu_desc_info->bar_frame || 5072 (mpdu_desc_info->mpdu_flags & HAL_MPDU_F_FRAGMENT)) 5073 skip_size = soc->rx_pkt_tlv_size; 5074 else 5075 skip_size = soc->rx_pkt_tlv_size + 5076 msdu_metadata.l3_hdr_pad; 5077 5078 if (set_pktlen) { 5079 msdu_len = nbuf_len + skip_size; 5080 qdf_nbuf_set_pktlen(nbuf, qdf_min(msdu_len, buf_size)); 5081 } 5082 5083 qdf_nbuf_pull_head(nbuf, skip_size); 5084 packetdump_cb((ol_txrx_soc_handle)soc, pdev->pdev_id, 5085 QDF_NBUF_CB_RX_VDEV_ID(nbuf), 5086 nbuf, status, QDF_RX_DATA_PKT); 5087 qdf_nbuf_push_head(nbuf, skip_size); 5088 } 5089 } 5090 5091 #else 5092 static inline 5093 void dp_tx_send_pktlog(struct dp_soc *soc, struct dp_pdev *pdev, 5094 struct dp_tx_desc_s *tx_desc, 5095 qdf_nbuf_t nbuf, enum qdf_dp_tx_rx_status status) 5096 { 5097 } 5098 5099 static inline 5100 void dp_rx_send_pktlog(struct dp_soc *soc, struct dp_pdev *pdev, 5101 qdf_nbuf_t nbuf, enum qdf_dp_tx_rx_status status) 5102 { 5103 } 5104 5105 static inline 5106 void dp_rx_err_send_pktlog(struct dp_soc *soc, struct dp_pdev *pdev, 5107 struct hal_rx_mpdu_desc_info *mpdu_desc_info, 5108 qdf_nbuf_t nbuf, enum qdf_dp_tx_rx_status status, 5109 bool set_pktlen) 5110 { 5111 } 5112 #endif 5113 5114 /** 5115 * dp_pdev_update_fast_rx_flag() - Update Fast rx flag for a PDEV 5116 * @soc : Data path soc handle 5117 * @pdev : PDEV handle 5118 * 5119 * Return: None 5120 */ 5121 void dp_pdev_update_fast_rx_flag(struct dp_soc *soc, struct dp_pdev *pdev); 5122 5123 #ifdef FEATURE_DIRECT_LINK 5124 /** 5125 * dp_setup_direct_link_refill_ring(): Setup direct link refill ring for pdev 5126 * @soc_hdl: DP SOC handle 5127 * @pdev_id: pdev id 5128 * 5129 * Return: Handle to SRNG 5130 */ 5131 struct dp_srng *dp_setup_direct_link_refill_ring(struct cdp_soc_t *soc_hdl, 5132 uint8_t pdev_id); 5133 5134 /** 5135 * dp_destroy_direct_link_refill_ring(): Destroy direct link refill ring for 5136 * pdev 5137 * @soc_hdl: DP SOC handle 5138 * @pdev_id: pdev id 5139 * 5140 * Return: None 5141 */ 5142 void dp_destroy_direct_link_refill_ring(struct cdp_soc_t *soc_hdl, 5143 uint8_t pdev_id); 5144 #else 5145 static inline 5146 struct dp_srng *dp_setup_direct_link_refill_ring(struct cdp_soc_t *soc_hdl, 5147 uint8_t pdev_id) 5148 { 5149 return NULL; 5150 } 5151 5152 static inline 5153 void dp_destroy_direct_link_refill_ring(struct cdp_soc_t *soc_hdl, 5154 uint8_t pdev_id) 5155 { 5156 } 5157 #endif 5158 5159 #ifdef WLAN_FEATURE_DP_CFG_EVENT_HISTORY 5160 static inline 5161 void dp_cfg_event_record(struct dp_soc *soc, 5162 enum dp_cfg_event_type event, 5163 union dp_cfg_event_desc *cfg_event_desc) 5164 { 5165 struct dp_cfg_event_history *cfg_event_history = 5166 &soc->cfg_event_history; 5167 struct dp_cfg_event *entry; 5168 uint32_t idx; 5169 uint16_t slot; 5170 5171 dp_get_frag_hist_next_atomic_idx(&cfg_event_history->index, &idx, 5172 &slot, 5173 DP_CFG_EVT_HIST_SLOT_SHIFT, 5174 DP_CFG_EVT_HIST_PER_SLOT_MAX, 5175 DP_CFG_EVT_HISTORY_SIZE); 5176 5177 entry = &cfg_event_history->entry[slot][idx]; 5178 5179 entry->timestamp = qdf_get_log_timestamp(); 5180 entry->type = event; 5181 qdf_mem_copy(&entry->event_desc, cfg_event_desc, 5182 sizeof(entry->event_desc)); 5183 } 5184 5185 static inline void 5186 dp_cfg_event_record_vdev_evt(struct dp_soc *soc, enum dp_cfg_event_type event, 5187 struct dp_vdev *vdev) 5188 { 5189 union dp_cfg_event_desc cfg_evt_desc = {0}; 5190 struct dp_vdev_attach_detach_desc *vdev_evt = 5191 &cfg_evt_desc.vdev_evt; 5192 5193 if (qdf_unlikely(event != DP_CFG_EVENT_VDEV_ATTACH && 5194 event != DP_CFG_EVENT_VDEV_UNREF_DEL && 5195 event != DP_CFG_EVENT_VDEV_DETACH)) { 5196 qdf_assert_always(0); 5197 return; 5198 } 5199 5200 vdev_evt->vdev = vdev; 5201 vdev_evt->vdev_id = vdev->vdev_id; 5202 vdev_evt->ref_count = qdf_atomic_read(&vdev->ref_cnt); 5203 vdev_evt->mac_addr = vdev->mac_addr; 5204 5205 dp_cfg_event_record(soc, event, &cfg_evt_desc); 5206 } 5207 5208 static inline void 5209 dp_cfg_event_record_peer_evt(struct dp_soc *soc, enum dp_cfg_event_type event, 5210 struct dp_peer *peer, struct dp_vdev *vdev, 5211 uint8_t is_reuse) 5212 { 5213 union dp_cfg_event_desc cfg_evt_desc = {0}; 5214 struct dp_peer_cmn_ops_desc *peer_evt = &cfg_evt_desc.peer_cmn_evt; 5215 5216 if (qdf_unlikely(event != DP_CFG_EVENT_PEER_CREATE && 5217 event != DP_CFG_EVENT_PEER_DELETE && 5218 event != DP_CFG_EVENT_PEER_UNREF_DEL)) { 5219 qdf_assert_always(0); 5220 return; 5221 } 5222 5223 peer_evt->peer = peer; 5224 peer_evt->vdev = vdev; 5225 peer_evt->vdev_id = vdev->vdev_id; 5226 peer_evt->is_reuse = is_reuse; 5227 peer_evt->peer_ref_count = qdf_atomic_read(&peer->ref_cnt); 5228 peer_evt->vdev_ref_count = qdf_atomic_read(&vdev->ref_cnt); 5229 peer_evt->mac_addr = peer->mac_addr; 5230 peer_evt->vdev_mac_addr = vdev->mac_addr; 5231 5232 dp_cfg_event_record(soc, event, &cfg_evt_desc); 5233 } 5234 5235 static inline void 5236 dp_cfg_event_record_mlo_link_delink_evt(struct dp_soc *soc, 5237 enum dp_cfg_event_type event, 5238 struct dp_peer *mld_peer, 5239 struct dp_peer *link_peer, 5240 uint8_t idx, uint8_t result) 5241 { 5242 union dp_cfg_event_desc cfg_evt_desc = {0}; 5243 struct dp_mlo_add_del_link_desc *mlo_link_delink_evt = 5244 &cfg_evt_desc.mlo_link_delink_evt; 5245 5246 if (qdf_unlikely(event != DP_CFG_EVENT_MLO_ADD_LINK && 5247 event != DP_CFG_EVENT_MLO_DEL_LINK)) { 5248 qdf_assert_always(0); 5249 return; 5250 } 5251 5252 mlo_link_delink_evt->link_peer = link_peer; 5253 mlo_link_delink_evt->mld_peer = mld_peer; 5254 mlo_link_delink_evt->link_mac_addr = link_peer->mac_addr; 5255 mlo_link_delink_evt->mld_mac_addr = mld_peer->mac_addr; 5256 mlo_link_delink_evt->num_links = mld_peer->num_links; 5257 mlo_link_delink_evt->action_result = result; 5258 mlo_link_delink_evt->idx = idx; 5259 5260 dp_cfg_event_record(soc, event, &cfg_evt_desc); 5261 } 5262 5263 static inline void 5264 dp_cfg_event_record_mlo_setup_vdev_update_evt(struct dp_soc *soc, 5265 struct dp_peer *mld_peer, 5266 struct dp_vdev *prev_vdev, 5267 struct dp_vdev *new_vdev) 5268 { 5269 union dp_cfg_event_desc cfg_evt_desc = {0}; 5270 struct dp_mlo_setup_vdev_update_desc *vdev_update_evt = 5271 &cfg_evt_desc.mlo_setup_vdev_update; 5272 5273 vdev_update_evt->mld_peer = mld_peer; 5274 vdev_update_evt->prev_vdev = prev_vdev; 5275 vdev_update_evt->new_vdev = new_vdev; 5276 5277 dp_cfg_event_record(soc, DP_CFG_EVENT_MLO_SETUP_VDEV_UPDATE, 5278 &cfg_evt_desc); 5279 } 5280 5281 static inline void 5282 dp_cfg_event_record_peer_map_unmap_evt(struct dp_soc *soc, 5283 enum dp_cfg_event_type event, 5284 struct dp_peer *peer, 5285 uint8_t *mac_addr, 5286 uint8_t is_ml_peer, 5287 uint16_t peer_id, uint16_t ml_peer_id, 5288 uint16_t hw_peer_id, uint8_t vdev_id) 5289 { 5290 union dp_cfg_event_desc cfg_evt_desc = {0}; 5291 struct dp_rx_peer_map_unmap_desc *peer_map_unmap_evt = 5292 &cfg_evt_desc.peer_map_unmap_evt; 5293 5294 if (qdf_unlikely(event != DP_CFG_EVENT_PEER_MAP && 5295 event != DP_CFG_EVENT_PEER_UNMAP && 5296 event != DP_CFG_EVENT_MLO_PEER_MAP && 5297 event != DP_CFG_EVENT_MLO_PEER_UNMAP)) { 5298 qdf_assert_always(0); 5299 return; 5300 } 5301 5302 peer_map_unmap_evt->peer_id = peer_id; 5303 peer_map_unmap_evt->ml_peer_id = ml_peer_id; 5304 peer_map_unmap_evt->hw_peer_id = hw_peer_id; 5305 peer_map_unmap_evt->vdev_id = vdev_id; 5306 /* Peer may be NULL at times, but its not an issue. */ 5307 peer_map_unmap_evt->peer = peer; 5308 peer_map_unmap_evt->is_ml_peer = is_ml_peer; 5309 qdf_mem_copy(&peer_map_unmap_evt->mac_addr.raw, mac_addr, 5310 QDF_MAC_ADDR_SIZE); 5311 5312 dp_cfg_event_record(soc, event, &cfg_evt_desc); 5313 } 5314 5315 static inline void 5316 dp_cfg_event_record_peer_setup_evt(struct dp_soc *soc, 5317 enum dp_cfg_event_type event, 5318 struct dp_peer *peer, 5319 struct dp_vdev *vdev, 5320 uint8_t vdev_id, 5321 struct cdp_peer_setup_info *peer_setup_info) 5322 { 5323 union dp_cfg_event_desc cfg_evt_desc = {0}; 5324 struct dp_peer_setup_desc *peer_setup_evt = 5325 &cfg_evt_desc.peer_setup_evt; 5326 5327 if (qdf_unlikely(event != DP_CFG_EVENT_PEER_SETUP && 5328 event != DP_CFG_EVENT_MLO_SETUP)) { 5329 qdf_assert_always(0); 5330 return; 5331 } 5332 5333 peer_setup_evt->peer = peer; 5334 peer_setup_evt->vdev = vdev; 5335 if (vdev) 5336 peer_setup_evt->vdev_ref_count = qdf_atomic_read(&vdev->ref_cnt); 5337 peer_setup_evt->mac_addr = peer->mac_addr; 5338 peer_setup_evt->vdev_id = vdev_id; 5339 if (peer_setup_info) { 5340 peer_setup_evt->is_first_link = peer_setup_info->is_first_link; 5341 peer_setup_evt->is_primary_link = peer_setup_info->is_primary_link; 5342 qdf_mem_copy(peer_setup_evt->mld_mac_addr.raw, 5343 peer_setup_info->mld_peer_mac, 5344 QDF_MAC_ADDR_SIZE); 5345 } 5346 5347 dp_cfg_event_record(soc, event, &cfg_evt_desc); 5348 } 5349 #else 5350 5351 static inline void 5352 dp_cfg_event_record_vdev_evt(struct dp_soc *soc, enum dp_cfg_event_type event, 5353 struct dp_vdev *vdev) 5354 { 5355 } 5356 5357 static inline void 5358 dp_cfg_event_record_peer_evt(struct dp_soc *soc, enum dp_cfg_event_type event, 5359 struct dp_peer *peer, struct dp_vdev *vdev, 5360 uint8_t is_reuse) 5361 { 5362 } 5363 5364 static inline void 5365 dp_cfg_event_record_mlo_link_delink_evt(struct dp_soc *soc, 5366 enum dp_cfg_event_type event, 5367 struct dp_peer *mld_peer, 5368 struct dp_peer *link_peer, 5369 uint8_t idx, uint8_t result) 5370 { 5371 } 5372 5373 static inline void 5374 dp_cfg_event_record_mlo_setup_vdev_update_evt(struct dp_soc *soc, 5375 struct dp_peer *mld_peer, 5376 struct dp_vdev *prev_vdev, 5377 struct dp_vdev *new_vdev) 5378 { 5379 } 5380 5381 static inline void 5382 dp_cfg_event_record_peer_map_unmap_evt(struct dp_soc *soc, 5383 enum dp_cfg_event_type event, 5384 struct dp_peer *peer, 5385 uint8_t *mac_addr, 5386 uint8_t is_ml_peer, 5387 uint16_t peer_id, uint16_t ml_peer_id, 5388 uint16_t hw_peer_id, uint8_t vdev_id) 5389 { 5390 } 5391 5392 static inline void 5393 dp_cfg_event_record_peer_setup_evt(struct dp_soc *soc, 5394 enum dp_cfg_event_type event, 5395 struct dp_peer *peer, 5396 struct dp_vdev *vdev, 5397 uint8_t vdev_id, 5398 struct cdp_peer_setup_info *peer_setup_info) 5399 { 5400 } 5401 #endif 5402 5403 #ifndef WLAN_SOFTUMAC_SUPPORT 5404 /** 5405 * dp_soc_interrupt_detach() - Deregister any allocations done for interrupts 5406 * @txrx_soc: DP SOC handle 5407 * 5408 * Return: none 5409 */ 5410 void dp_soc_interrupt_detach(struct cdp_soc_t *txrx_soc); 5411 #endif 5412 5413 /** 5414 * dp_get_peer_stats()- Get peer stats 5415 * @peer: Datapath peer 5416 * @peer_stats: buffer for peer stats 5417 * 5418 * Return: none 5419 */ 5420 void dp_get_peer_stats(struct dp_peer *peer, 5421 struct cdp_peer_stats *peer_stats); 5422 5423 /** 5424 * dp_get_per_link_peer_stats()- Get per link peer stats 5425 * @peer: Datapath peer 5426 * @peer_stats: buffer for peer stats 5427 * @peer_type: Peer type 5428 * @num_link: Number of ML links 5429 * 5430 * Return: status success/failure 5431 */ 5432 QDF_STATUS dp_get_per_link_peer_stats(struct dp_peer *peer, 5433 struct cdp_peer_stats *peer_stats, 5434 enum cdp_peer_type peer_type, 5435 uint8_t num_link); 5436 /** 5437 * dp_get_peer_hw_link_id() - get peer hardware link id 5438 * @soc: soc handle 5439 * @pdev: data path pdev 5440 * 5441 * Return: link_id 5442 */ 5443 static inline int 5444 dp_get_peer_hw_link_id(struct dp_soc *soc, 5445 struct dp_pdev *pdev) 5446 { 5447 if (wlan_cfg_is_peer_link_stats_enabled(soc->wlan_cfg_ctx)) 5448 return ((soc->arch_ops.get_hw_link_id(pdev)) + 1); 5449 5450 return 0; 5451 } 5452 5453 #ifdef QCA_MULTIPASS_SUPPORT 5454 /** 5455 * dp_tx_remove_vlan_tag() - Remove 4 bytes of vlan tag 5456 * @vdev: DP vdev handle 5457 * @nbuf: network buffer 5458 * 5459 * Return: void 5460 */ 5461 void dp_tx_remove_vlan_tag(struct dp_vdev *vdev, qdf_nbuf_t nbuf); 5462 #endif 5463 5464 /** 5465 * dp_print_per_link_stats() - Print per link peer stats. 5466 * @soc_hdl: soc handle. 5467 * @vdev_id: vdev_id. 5468 * 5469 * Return: None. 5470 */ 5471 void dp_print_per_link_stats(struct cdp_soc_t *soc_hdl, uint8_t vdev_id); 5472 5473 /** 5474 * dp_get_ring_stats_from_hal(): get hal level ring pointer values 5475 * @soc: DP_SOC handle 5476 * @srng: DP_SRNG handle 5477 * @ring_type: srng src/dst ring 5478 * @_tailp: pointer to tail of ring 5479 * @_headp: pointer to head of ring 5480 * @_hw_headp: pointer to head of ring in HW 5481 * @_hw_tailp: pointer to tail of ring in HW 5482 * 5483 * Return: void 5484 */ 5485 static inline void 5486 dp_get_ring_stats_from_hal(struct dp_soc *soc, struct dp_srng *srng, 5487 enum hal_ring_type ring_type, 5488 uint32_t *_tailp, uint32_t *_headp, 5489 int32_t *_hw_headp, int32_t *_hw_tailp) 5490 { 5491 uint32_t tailp; 5492 uint32_t headp; 5493 int32_t hw_headp = -1; 5494 int32_t hw_tailp = -1; 5495 struct hal_soc *hal_soc; 5496 5497 if (soc && srng && srng->hal_srng) { 5498 hal_soc = (struct hal_soc *)soc->hal_soc; 5499 hal_get_sw_hptp(soc->hal_soc, srng->hal_srng, &tailp, &headp); 5500 *_headp = headp; 5501 *_tailp = tailp; 5502 5503 hal_get_hw_hptp(soc->hal_soc, srng->hal_srng, &hw_headp, 5504 &hw_tailp, ring_type); 5505 *_hw_headp = hw_headp; 5506 *_hw_tailp = hw_tailp; 5507 } 5508 } 5509 5510 #ifdef WLAN_FEATURE_TX_LATENCY_STATS 5511 /** 5512 * dp_h2t_tx_latency_stats_cfg_msg_send(): send HTT message for tx latency 5513 * stats config to FW 5514 * @dp_soc: DP SOC handle 5515 * @vdev_id: vdev id 5516 * @enable: indicates enablement of the feature 5517 * @period: statistical period for transmit latency in terms of ms 5518 * @granularity: granularity for tx latency distribution 5519 * 5520 * return: QDF STATUS 5521 */ 5522 QDF_STATUS 5523 dp_h2t_tx_latency_stats_cfg_msg_send(struct dp_soc *dp_soc, uint16_t vdev_id, 5524 bool enable, uint32_t period, 5525 uint32_t granularity); 5526 5527 /** 5528 * dp_tx_latency_stats_update_cca() - update transmit latency statistics for 5529 * CCA 5530 * @soc: dp soc handle 5531 * @peer_id: peer id 5532 * @granularity: granularity of distribution 5533 * @distribution: distribution of transmit latency statistics 5534 * @avg: average of CCA latency(in microseconds) within a cycle 5535 * 5536 * Return: None 5537 */ 5538 void 5539 dp_tx_latency_stats_update_cca(struct dp_soc *soc, uint16_t peer_id, 5540 uint32_t granularity, uint32_t *distribution, 5541 uint32_t avg); 5542 5543 /** 5544 * dp_tx_latency_stats_report() - report transmit latency statistics for each 5545 * vdev of specified pdev 5546 * @soc: dp soc handle 5547 * @pdev: dp pdev Handle 5548 * 5549 * Return: None 5550 */ 5551 void dp_tx_latency_stats_report(struct dp_soc *soc, struct dp_pdev *pdev); 5552 #endif 5553 #endif /* #ifndef _DP_INTERNAL_H_ */ 5554