1 /* 2 * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved. 3 * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved. 4 * 5 * Permission to use, copy, modify, and/or distribute this software for 6 * any purpose with or without fee is hereby granted, provided that the 7 * above copyright notice and this permission notice appear in all 8 * copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 11 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 12 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 13 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 14 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 15 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 16 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 17 * PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20 #ifndef _DP_INTERNAL_H_ 21 #define _DP_INTERNAL_H_ 22 23 #include "dp_types.h" 24 #include "dp_htt.h" 25 #include "dp_rx_tid.h" 26 27 #define RX_BUFFER_SIZE_PKTLOG_LITE 1024 28 29 #define DP_PEER_WDS_COUNT_INVALID UINT_MAX 30 31 #define DP_BLOCKMEM_SIZE 4096 32 #define WBM2_SW_PPE_REL_RING_ID 6 33 #define WBM2_SW_PPE_REL_MAP_ID 11 34 #define DP_TX_PPEDS_POOL_ID 0xF 35 36 /* Alignment for consistent memory for DP rings*/ 37 #define DP_RING_BASE_ALIGN 32 38 39 #define DP_RSSI_INVAL 0x80 40 #define DP_RSSI_AVG_WEIGHT 2 41 /* 42 * Formula to derive avg_rssi is taken from wifi2.o firmware 43 */ 44 #define DP_GET_AVG_RSSI(avg_rssi, last_rssi) \ 45 (((avg_rssi) - (((uint8_t)(avg_rssi)) >> DP_RSSI_AVG_WEIGHT)) \ 46 + ((((uint8_t)(last_rssi)) >> DP_RSSI_AVG_WEIGHT))) 47 48 /* Macro For NYSM value received in VHT TLV */ 49 #define VHT_SGI_NYSM 3 50 51 #define INVALID_WBM_RING_NUM 0xF 52 53 #ifdef FEATURE_DIRECT_LINK 54 #define DIRECT_LINK_REFILL_RING_ENTRIES 64 55 #ifdef IPA_OFFLOAD 56 #ifdef IPA_WDI3_VLAN_SUPPORT 57 #define DIRECT_LINK_REFILL_RING_IDX 4 58 #else 59 #define DIRECT_LINK_REFILL_RING_IDX 3 60 #endif 61 #else 62 #define DIRECT_LINK_REFILL_RING_IDX 2 63 #endif 64 #endif 65 66 #define DP_MAX_VLAN_IDS 4096 67 #define DP_VLAN_UNTAGGED 0 68 #define DP_VLAN_TAGGED_MULTICAST 1 69 #define DP_VLAN_TAGGED_UNICAST 2 70 71 /** 72 * struct htt_dbgfs_cfg - structure to maintain required htt data 73 * @msg_word: htt msg sent to upper layer 74 * @m: qdf debugfs file pointer 75 */ 76 struct htt_dbgfs_cfg { 77 uint32_t *msg_word; 78 qdf_debugfs_file_t m; 79 }; 80 81 /* Cookie MSB bits assigned for different use case. 82 * Note: User can't use last 3 bits, as it is reserved for pdev_id. 83 * If in future number of pdev are more than 3. 84 */ 85 /* Reserve for default case */ 86 #define DBG_STATS_COOKIE_DEFAULT 0x0 87 88 /* Reserve for DP Stats: 3rd bit */ 89 #define DBG_STATS_COOKIE_DP_STATS BIT(3) 90 91 /* Reserve for HTT Stats debugfs support: 4th bit */ 92 #define DBG_STATS_COOKIE_HTT_DBGFS BIT(4) 93 94 /*Reserve for HTT Stats debugfs support: 5th bit */ 95 #define DBG_SYSFS_STATS_COOKIE BIT(5) 96 97 /* Reserve for HTT Stats OBSS PD support: 6th bit */ 98 #define DBG_STATS_COOKIE_HTT_OBSS BIT(6) 99 100 /* 101 * Bitmap of HTT PPDU TLV types for Default mode 102 */ 103 #define HTT_PPDU_DEFAULT_TLV_BITMAP \ 104 (1 << HTT_PPDU_STATS_COMMON_TLV) | \ 105 (1 << HTT_PPDU_STATS_USR_COMMON_TLV) | \ 106 (1 << HTT_PPDU_STATS_USR_RATE_TLV) | \ 107 (1 << HTT_PPDU_STATS_SCH_CMD_STATUS_TLV) | \ 108 (1 << HTT_PPDU_STATS_USR_COMPLTN_COMMON_TLV) | \ 109 (1 << HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV) 110 111 /* PPDU STATS CFG */ 112 #define DP_PPDU_STATS_CFG_ALL 0xFFFF 113 114 /* PPDU stats mask sent to FW to enable enhanced stats */ 115 #define DP_PPDU_STATS_CFG_ENH_STATS \ 116 (HTT_PPDU_DEFAULT_TLV_BITMAP) | \ 117 (1 << HTT_PPDU_STATS_USR_COMPLTN_FLUSH_TLV) | \ 118 (1 << HTT_PPDU_STATS_USR_COMMON_ARRAY_TLV) | \ 119 (1 << HTT_PPDU_STATS_USERS_INFO_TLV) 120 121 /* PPDU stats mask sent to FW to support debug sniffer feature */ 122 #define DP_PPDU_STATS_CFG_SNIFFER \ 123 (HTT_PPDU_DEFAULT_TLV_BITMAP) | \ 124 (1 << HTT_PPDU_STATS_USR_MPDU_ENQ_BITMAP_64_TLV) | \ 125 (1 << HTT_PPDU_STATS_USR_MPDU_ENQ_BITMAP_256_TLV) | \ 126 (1 << HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_64_TLV) | \ 127 (1 << HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_256_TLV) | \ 128 (1 << HTT_PPDU_STATS_USR_COMPLTN_FLUSH_TLV) | \ 129 (1 << HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_256_TLV) | \ 130 (1 << HTT_PPDU_STATS_USR_COMPLTN_FLUSH_TLV) | \ 131 (1 << HTT_PPDU_STATS_USR_COMMON_ARRAY_TLV) | \ 132 (1 << HTT_PPDU_STATS_TX_MGMTCTRL_PAYLOAD_TLV) | \ 133 (1 << HTT_PPDU_STATS_USERS_INFO_TLV) 134 135 /* PPDU stats mask sent to FW to support BPR feature*/ 136 #define DP_PPDU_STATS_CFG_BPR \ 137 (1 << HTT_PPDU_STATS_TX_MGMTCTRL_PAYLOAD_TLV) | \ 138 (1 << HTT_PPDU_STATS_USERS_INFO_TLV) 139 140 /* PPDU stats mask sent to FW to support BPR and enhanced stats feature */ 141 #define DP_PPDU_STATS_CFG_BPR_ENH (DP_PPDU_STATS_CFG_BPR | \ 142 DP_PPDU_STATS_CFG_ENH_STATS) 143 /* PPDU stats mask sent to FW to support BPR and pcktlog stats feature */ 144 #define DP_PPDU_STATS_CFG_BPR_PKTLOG (DP_PPDU_STATS_CFG_BPR | \ 145 DP_PPDU_TXLITE_STATS_BITMASK_CFG) 146 147 /* 148 * Bitmap of HTT PPDU delayed ba TLV types for Default mode 149 */ 150 #define HTT_PPDU_DELAYED_BA_TLV_BITMAP \ 151 (1 << HTT_PPDU_STATS_COMMON_TLV) | \ 152 (1 << HTT_PPDU_STATS_USR_COMMON_TLV) | \ 153 (1 << HTT_PPDU_STATS_USR_RATE_TLV) 154 155 /* 156 * Bitmap of HTT PPDU TLV types for Delayed BA 157 */ 158 #define HTT_PPDU_STATUS_TLV_BITMAP \ 159 (1 << HTT_PPDU_STATS_COMMON_TLV) | \ 160 (1 << HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV) 161 162 /* 163 * Bitmap of HTT PPDU TLV types for Sniffer mode bitmap 64 164 */ 165 #define HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP_64 \ 166 ((1 << HTT_PPDU_STATS_COMMON_TLV) | \ 167 (1 << HTT_PPDU_STATS_USR_COMMON_TLV) | \ 168 (1 << HTT_PPDU_STATS_USR_RATE_TLV) | \ 169 (1 << HTT_PPDU_STATS_SCH_CMD_STATUS_TLV) | \ 170 (1 << HTT_PPDU_STATS_USR_COMPLTN_COMMON_TLV) | \ 171 (1 << HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV) | \ 172 (1 << HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_64_TLV) | \ 173 (1 << HTT_PPDU_STATS_USR_MPDU_ENQ_BITMAP_64_TLV)) 174 175 /* 176 * Bitmap of HTT PPDU TLV types for Sniffer mode bitmap 256 177 */ 178 #define HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP_256 \ 179 ((1 << HTT_PPDU_STATS_COMMON_TLV) | \ 180 (1 << HTT_PPDU_STATS_USR_COMMON_TLV) | \ 181 (1 << HTT_PPDU_STATS_USR_RATE_TLV) | \ 182 (1 << HTT_PPDU_STATS_SCH_CMD_STATUS_TLV) | \ 183 (1 << HTT_PPDU_STATS_USR_COMPLTN_COMMON_TLV) | \ 184 (1 << HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV) | \ 185 (1 << HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_256_TLV) | \ 186 (1 << HTT_PPDU_STATS_USR_MPDU_ENQ_BITMAP_256_TLV)) 187 188 static const enum cdp_packet_type hal_2_dp_pkt_type_map[HAL_DOT11_MAX] = { 189 [HAL_DOT11A] = DOT11_A, 190 [HAL_DOT11B] = DOT11_B, 191 [HAL_DOT11N_MM] = DOT11_N, 192 [HAL_DOT11AC] = DOT11_AC, 193 [HAL_DOT11AX] = DOT11_AX, 194 [HAL_DOT11BA] = DOT11_MAX, 195 #ifdef WLAN_FEATURE_11BE 196 [HAL_DOT11BE] = DOT11_BE, 197 #else 198 [HAL_DOT11BE] = DOT11_MAX, 199 #endif 200 [HAL_DOT11AZ] = DOT11_MAX, 201 [HAL_DOT11N_GF] = DOT11_MAX, 202 }; 203 204 #ifdef GLOBAL_ASSERT_AVOIDANCE 205 #define dp_assert_always_internal_stat(_expr, _handle, _field) \ 206 (qdf_unlikely(!(_expr)) ? ((_handle)->stats._field++, true) : false) 207 208 #define dp_assert_always_internal_ds_stat(_expr, _handle, _field) \ 209 ((_handle)->ppeds_stats._field++) 210 211 static inline bool dp_assert_always_internal(bool expr) 212 { 213 return !expr; 214 } 215 #else 216 static inline bool __dp_assert_always_internal(bool expr) 217 { 218 qdf_assert_always(expr); 219 220 return false; 221 } 222 223 #define dp_assert_always_internal(_expr) __dp_assert_always_internal(_expr) 224 225 #define dp_assert_always_internal_stat(_expr, _handle, _field) \ 226 dp_assert_always_internal(_expr) 227 228 #define dp_assert_always_internal_ds_stat(_expr, _handle, _field) \ 229 dp_assert_always_internal(_expr) 230 #endif 231 232 #ifdef WLAN_FEATURE_11BE 233 /** 234 * dp_get_mcs_array_index_by_pkt_type_mcs() - get the destination mcs index 235 * in array 236 * @pkt_type: host SW pkt type 237 * @mcs: mcs value for TX/RX rate 238 * 239 * Return: succeeded - valid index in mcs array 240 * fail - same value as MCS_MAX 241 */ 242 static inline uint8_t 243 dp_get_mcs_array_index_by_pkt_type_mcs(uint32_t pkt_type, uint32_t mcs) 244 { 245 uint8_t dst_mcs_idx = MCS_INVALID_ARRAY_INDEX; 246 247 switch (pkt_type) { 248 case DOT11_A: 249 dst_mcs_idx = 250 mcs >= MAX_MCS_11A ? (MAX_MCS - 1) : mcs; 251 break; 252 case DOT11_B: 253 dst_mcs_idx = 254 mcs >= MAX_MCS_11B ? (MAX_MCS - 1) : mcs; 255 break; 256 case DOT11_N: 257 dst_mcs_idx = 258 mcs >= MAX_MCS_11N ? (MAX_MCS - 1) : mcs; 259 break; 260 case DOT11_AC: 261 dst_mcs_idx = 262 mcs >= MAX_MCS_11AC ? (MAX_MCS - 1) : mcs; 263 break; 264 case DOT11_AX: 265 dst_mcs_idx = 266 mcs >= MAX_MCS_11AX ? (MAX_MCS - 1) : mcs; 267 break; 268 case DOT11_BE: 269 dst_mcs_idx = 270 mcs >= MAX_MCS_11BE ? (MAX_MCS - 1) : mcs; 271 break; 272 default: 273 break; 274 } 275 276 return dst_mcs_idx; 277 } 278 #else 279 static inline uint8_t 280 dp_get_mcs_array_index_by_pkt_type_mcs(uint32_t pkt_type, uint32_t mcs) 281 { 282 uint8_t dst_mcs_idx = MCS_INVALID_ARRAY_INDEX; 283 284 switch (pkt_type) { 285 case DOT11_A: 286 dst_mcs_idx = 287 mcs >= MAX_MCS_11A ? (MAX_MCS - 1) : mcs; 288 break; 289 case DOT11_B: 290 dst_mcs_idx = 291 mcs >= MAX_MCS_11B ? (MAX_MCS - 1) : mcs; 292 break; 293 case DOT11_N: 294 dst_mcs_idx = 295 mcs >= MAX_MCS_11N ? (MAX_MCS - 1) : mcs; 296 break; 297 case DOT11_AC: 298 dst_mcs_idx = 299 mcs >= MAX_MCS_11AC ? (MAX_MCS - 1) : mcs; 300 break; 301 case DOT11_AX: 302 dst_mcs_idx = 303 mcs >= MAX_MCS_11AX ? (MAX_MCS - 1) : mcs; 304 break; 305 default: 306 break; 307 } 308 309 return dst_mcs_idx; 310 } 311 #endif 312 313 #ifdef WIFI_MONITOR_SUPPORT 314 QDF_STATUS dp_mon_soc_attach(struct dp_soc *soc); 315 QDF_STATUS dp_mon_soc_detach(struct dp_soc *soc); 316 #else 317 static inline 318 QDF_STATUS dp_mon_soc_attach(struct dp_soc *soc) 319 { 320 return QDF_STATUS_SUCCESS; 321 } 322 323 static inline 324 QDF_STATUS dp_mon_soc_detach(struct dp_soc *soc) 325 { 326 return QDF_STATUS_SUCCESS; 327 } 328 #endif 329 330 /** 331 * dp_rx_err_match_dhost() - function to check whether dest-mac is correct 332 * @eh: Ethernet header of incoming packet 333 * @vdev: dp_vdev object of the VAP on which this data packet is received 334 * 335 * Return: 1 if the destination mac is correct, 336 * 0 if this frame is not correctly destined to this VAP/MLD 337 */ 338 int dp_rx_err_match_dhost(qdf_ether_header_t *eh, struct dp_vdev *vdev); 339 340 #ifdef MONITOR_MODULARIZED_ENABLE 341 static inline bool dp_monitor_modularized_enable(void) 342 { 343 return TRUE; 344 } 345 346 static inline QDF_STATUS 347 dp_mon_soc_attach_wrapper(struct dp_soc *soc) { return QDF_STATUS_SUCCESS; } 348 349 static inline QDF_STATUS 350 dp_mon_soc_detach_wrapper(struct dp_soc *soc) { return QDF_STATUS_SUCCESS; } 351 #else 352 static inline bool dp_monitor_modularized_enable(void) 353 { 354 return FALSE; 355 } 356 357 static inline QDF_STATUS dp_mon_soc_attach_wrapper(struct dp_soc *soc) 358 { 359 return dp_mon_soc_attach(soc); 360 } 361 362 static inline QDF_STATUS dp_mon_soc_detach_wrapper(struct dp_soc *soc) 363 { 364 return dp_mon_soc_detach(soc); 365 } 366 #endif 367 368 #ifndef WIFI_MONITOR_SUPPORT 369 #define MON_BUF_MIN_ENTRIES 64 370 371 static inline QDF_STATUS dp_monitor_pdev_attach(struct dp_pdev *pdev) 372 { 373 return QDF_STATUS_SUCCESS; 374 } 375 376 static inline QDF_STATUS dp_monitor_pdev_detach(struct dp_pdev *pdev) 377 { 378 return QDF_STATUS_SUCCESS; 379 } 380 381 static inline QDF_STATUS dp_monitor_vdev_attach(struct dp_vdev *vdev) 382 { 383 return QDF_STATUS_E_FAILURE; 384 } 385 386 static inline QDF_STATUS dp_monitor_vdev_detach(struct dp_vdev *vdev) 387 { 388 return QDF_STATUS_E_FAILURE; 389 } 390 391 static inline QDF_STATUS dp_monitor_peer_attach(struct dp_soc *soc, 392 struct dp_peer *peer) 393 { 394 return QDF_STATUS_SUCCESS; 395 } 396 397 static inline QDF_STATUS dp_monitor_peer_detach(struct dp_soc *soc, 398 struct dp_peer *peer) 399 { 400 return QDF_STATUS_E_FAILURE; 401 } 402 403 static inline struct cdp_peer_rate_stats_ctx* 404 dp_monitor_peer_get_peerstats_ctx(struct dp_soc *soc, struct dp_peer *peer) 405 { 406 return NULL; 407 } 408 409 static inline 410 void dp_monitor_peer_reset_stats(struct dp_soc *soc, struct dp_peer *peer) 411 { 412 } 413 414 static inline 415 void dp_monitor_peer_get_stats(struct dp_soc *soc, struct dp_peer *peer, 416 void *arg, enum cdp_stat_update_type type) 417 { 418 } 419 420 static inline 421 void dp_monitor_invalid_peer_update_pdev_stats(struct dp_soc *soc, 422 struct dp_pdev *pdev) 423 { 424 } 425 426 static inline 427 QDF_STATUS dp_monitor_peer_get_stats_param(struct dp_soc *soc, 428 struct dp_peer *peer, 429 enum cdp_peer_stats_type type, 430 cdp_peer_stats_param_t *buf) 431 { 432 return QDF_STATUS_E_FAILURE; 433 } 434 435 static inline QDF_STATUS dp_monitor_pdev_init(struct dp_pdev *pdev) 436 { 437 return QDF_STATUS_SUCCESS; 438 } 439 440 static inline QDF_STATUS dp_monitor_pdev_deinit(struct dp_pdev *pdev) 441 { 442 return QDF_STATUS_SUCCESS; 443 } 444 445 static inline QDF_STATUS dp_monitor_soc_cfg_init(struct dp_soc *soc) 446 { 447 return QDF_STATUS_SUCCESS; 448 } 449 450 static inline QDF_STATUS dp_monitor_config_debug_sniffer(struct dp_pdev *pdev, 451 int val) 452 { 453 return QDF_STATUS_E_FAILURE; 454 } 455 456 static inline void dp_monitor_flush_rings(struct dp_soc *soc) 457 { 458 } 459 460 static inline QDF_STATUS dp_monitor_htt_srng_setup(struct dp_soc *soc, 461 struct dp_pdev *pdev, 462 int mac_id, 463 int mac_for_pdev) 464 { 465 return QDF_STATUS_SUCCESS; 466 } 467 468 static inline void dp_monitor_service_mon_rings(struct dp_soc *soc, 469 uint32_t quota) 470 { 471 } 472 473 static inline 474 uint32_t dp_monitor_process(struct dp_soc *soc, struct dp_intr *int_ctx, 475 uint32_t mac_id, uint32_t quota) 476 { 477 return 0; 478 } 479 480 static inline 481 uint32_t dp_monitor_drop_packets_for_mac(struct dp_pdev *pdev, 482 uint32_t mac_id, uint32_t quota) 483 { 484 return 0; 485 } 486 487 static inline void dp_monitor_peer_tx_init(struct dp_pdev *pdev, 488 struct dp_peer *peer) 489 { 490 } 491 492 static inline void dp_monitor_peer_tx_cleanup(struct dp_vdev *vdev, 493 struct dp_peer *peer) 494 { 495 } 496 497 static inline 498 void dp_monitor_peer_tid_peer_id_update(struct dp_soc *soc, 499 struct dp_peer *peer, 500 uint16_t peer_id) 501 { 502 } 503 504 static inline void dp_monitor_tx_ppdu_stats_attach(struct dp_pdev *pdev) 505 { 506 } 507 508 static inline void dp_monitor_tx_ppdu_stats_detach(struct dp_pdev *pdev) 509 { 510 } 511 512 static inline 513 QDF_STATUS dp_monitor_tx_capture_debugfs_init(struct dp_pdev *pdev) 514 { 515 return QDF_STATUS_SUCCESS; 516 } 517 518 static inline void dp_monitor_peer_tx_capture_filter_check(struct dp_pdev *pdev, 519 struct dp_peer *peer) 520 { 521 } 522 523 static inline 524 QDF_STATUS dp_monitor_tx_add_to_comp_queue(struct dp_soc *soc, 525 struct dp_tx_desc_s *desc, 526 struct hal_tx_completion_status *ts, 527 uint16_t peer_id) 528 { 529 return QDF_STATUS_E_FAILURE; 530 } 531 532 static inline 533 QDF_STATUS monitor_update_msdu_to_list(struct dp_soc *soc, 534 struct dp_pdev *pdev, 535 struct dp_peer *peer, 536 struct hal_tx_completion_status *ts, 537 qdf_nbuf_t netbuf) 538 { 539 return QDF_STATUS_E_FAILURE; 540 } 541 542 static inline bool dp_monitor_ppdu_stats_ind_handler(struct htt_soc *soc, 543 uint32_t *msg_word, 544 qdf_nbuf_t htt_t2h_msg) 545 { 546 return true; 547 } 548 549 static inline QDF_STATUS dp_monitor_htt_ppdu_stats_attach(struct dp_pdev *pdev) 550 { 551 return QDF_STATUS_SUCCESS; 552 } 553 554 static inline void dp_monitor_htt_ppdu_stats_detach(struct dp_pdev *pdev) 555 { 556 } 557 558 static inline void dp_monitor_print_pdev_rx_mon_stats(struct dp_pdev *pdev) 559 { 560 } 561 562 static inline QDF_STATUS dp_monitor_config_enh_tx_capture(struct dp_pdev *pdev, 563 uint32_t val) 564 { 565 return QDF_STATUS_E_INVAL; 566 } 567 568 static inline QDF_STATUS dp_monitor_tx_peer_filter(struct dp_pdev *pdev, 569 struct dp_peer *peer, 570 uint8_t is_tx_pkt_cap_enable, 571 uint8_t *peer_mac) 572 { 573 return QDF_STATUS_E_INVAL; 574 } 575 576 static inline QDF_STATUS dp_monitor_config_enh_rx_capture(struct dp_pdev *pdev, 577 uint32_t val) 578 { 579 return QDF_STATUS_E_INVAL; 580 } 581 582 static inline 583 QDF_STATUS dp_monitor_set_bpr_enable(struct dp_pdev *pdev, uint32_t val) 584 { 585 return QDF_STATUS_E_FAILURE; 586 } 587 588 static inline 589 int dp_monitor_set_filter_neigh_peers(struct dp_pdev *pdev, bool val) 590 { 591 return 0; 592 } 593 594 static inline 595 void dp_monitor_set_atf_stats_enable(struct dp_pdev *pdev, bool value) 596 { 597 } 598 599 static inline 600 void dp_monitor_set_bsscolor(struct dp_pdev *pdev, uint8_t bsscolor) 601 { 602 } 603 604 static inline 605 bool dp_monitor_pdev_get_filter_mcast_data(struct cdp_pdev *pdev_handle) 606 { 607 return false; 608 } 609 610 static inline 611 bool dp_monitor_pdev_get_filter_non_data(struct cdp_pdev *pdev_handle) 612 { 613 return false; 614 } 615 616 static inline 617 bool dp_monitor_pdev_get_filter_ucast_data(struct cdp_pdev *pdev_handle) 618 { 619 return false; 620 } 621 622 static inline 623 int dp_monitor_set_pktlog_wifi3(struct dp_pdev *pdev, uint32_t event, 624 bool enable) 625 { 626 return 0; 627 } 628 629 static inline void dp_monitor_pktlogmod_exit(struct dp_pdev *pdev) 630 { 631 } 632 633 static inline 634 QDF_STATUS dp_monitor_vdev_set_monitor_mode_buf_rings(struct dp_pdev *pdev) 635 { 636 return QDF_STATUS_E_FAILURE; 637 } 638 639 static inline 640 void dp_monitor_neighbour_peers_detach(struct dp_pdev *pdev) 641 { 642 } 643 644 static inline QDF_STATUS dp_monitor_filter_neighbour_peer(struct dp_pdev *pdev, 645 uint8_t *rx_pkt_hdr) 646 { 647 return QDF_STATUS_E_FAILURE; 648 } 649 650 static inline void dp_monitor_print_pdev_tx_capture_stats(struct dp_pdev *pdev) 651 { 652 } 653 654 static inline 655 void dp_monitor_reap_timer_init(struct dp_soc *soc) 656 { 657 } 658 659 static inline 660 void dp_monitor_reap_timer_deinit(struct dp_soc *soc) 661 { 662 } 663 664 static inline 665 bool dp_monitor_reap_timer_start(struct dp_soc *soc, 666 enum cdp_mon_reap_source source) 667 { 668 return false; 669 } 670 671 static inline 672 bool dp_monitor_reap_timer_stop(struct dp_soc *soc, 673 enum cdp_mon_reap_source source) 674 { 675 return false; 676 } 677 678 static inline void 679 dp_monitor_reap_timer_suspend(struct dp_soc *soc) 680 { 681 } 682 683 static inline 684 void dp_monitor_vdev_timer_init(struct dp_soc *soc) 685 { 686 } 687 688 static inline 689 void dp_monitor_vdev_timer_deinit(struct dp_soc *soc) 690 { 691 } 692 693 static inline 694 void dp_monitor_vdev_timer_start(struct dp_soc *soc) 695 { 696 } 697 698 static inline 699 bool dp_monitor_vdev_timer_stop(struct dp_soc *soc) 700 { 701 return false; 702 } 703 704 static inline struct qdf_mem_multi_page_t* 705 dp_monitor_get_link_desc_pages(struct dp_soc *soc, uint32_t mac_id) 706 { 707 return NULL; 708 } 709 710 static inline struct dp_srng* 711 dp_monitor_get_link_desc_ring(struct dp_soc *soc, uint32_t mac_id) 712 { 713 return NULL; 714 } 715 716 static inline uint32_t 717 dp_monitor_get_num_link_desc_ring_entries(struct dp_soc *soc) 718 { 719 return 0; 720 } 721 722 static inline uint32_t * 723 dp_monitor_get_total_link_descs(struct dp_soc *soc, uint32_t mac_id) 724 { 725 return NULL; 726 } 727 728 static inline QDF_STATUS dp_monitor_drop_inv_peer_pkts(struct dp_vdev *vdev) 729 { 730 return QDF_STATUS_E_FAILURE; 731 } 732 733 static inline bool dp_is_enable_reap_timer_non_pkt(struct dp_pdev *pdev) 734 { 735 return false; 736 } 737 738 static inline void dp_monitor_vdev_register_osif(struct dp_vdev *vdev, 739 struct ol_txrx_ops *txrx_ops) 740 { 741 } 742 743 static inline bool dp_monitor_is_vdev_timer_running(struct dp_soc *soc) 744 { 745 return false; 746 } 747 748 static inline 749 void dp_monitor_pdev_set_mon_vdev(struct dp_vdev *vdev) 750 { 751 } 752 753 static inline void dp_monitor_vdev_delete(struct dp_soc *soc, 754 struct dp_vdev *vdev) 755 { 756 } 757 758 static inline void dp_peer_ppdu_delayed_ba_init(struct dp_peer *peer) 759 { 760 } 761 762 static inline void dp_monitor_neighbour_peer_add_ast(struct dp_pdev *pdev, 763 struct dp_peer *ta_peer, 764 uint8_t *mac_addr, 765 qdf_nbuf_t nbuf, 766 uint32_t flags) 767 { 768 } 769 770 static inline void 771 dp_monitor_set_chan_band(struct dp_pdev *pdev, enum reg_wifi_band chan_band) 772 { 773 } 774 775 static inline void 776 dp_monitor_set_chan_freq(struct dp_pdev *pdev, qdf_freq_t chan_freq) 777 { 778 } 779 780 static inline void dp_monitor_set_chan_num(struct dp_pdev *pdev, int chan_num) 781 { 782 } 783 784 static inline bool dp_monitor_is_enable_mcopy_mode(struct dp_pdev *pdev) 785 { 786 return false; 787 } 788 789 static inline 790 void dp_monitor_neighbour_peer_list_remove(struct dp_pdev *pdev, 791 struct dp_vdev *vdev, 792 struct dp_neighbour_peer *peer) 793 { 794 } 795 796 static inline bool dp_monitor_is_chan_band_known(struct dp_pdev *pdev) 797 { 798 return false; 799 } 800 801 static inline enum reg_wifi_band 802 dp_monitor_get_chan_band(struct dp_pdev *pdev) 803 { 804 return 0; 805 } 806 807 static inline int 808 dp_monitor_get_chan_num(struct dp_pdev *pdev) 809 { 810 return 0; 811 } 812 813 static inline qdf_freq_t 814 dp_monitor_get_chan_freq(struct dp_pdev *pdev) 815 { 816 return 0; 817 } 818 819 static inline void dp_monitor_get_mpdu_status(struct dp_pdev *pdev, 820 struct dp_soc *soc, 821 uint8_t *rx_tlv_hdr) 822 { 823 } 824 825 static inline void dp_monitor_print_tx_stats(struct dp_pdev *pdev) 826 { 827 } 828 829 static inline 830 QDF_STATUS dp_monitor_mcopy_check_deliver(struct dp_pdev *pdev, 831 uint16_t peer_id, uint32_t ppdu_id, 832 uint8_t first_msdu) 833 { 834 return QDF_STATUS_SUCCESS; 835 } 836 837 static inline bool dp_monitor_is_enable_tx_sniffer(struct dp_pdev *pdev) 838 { 839 return false; 840 } 841 842 static inline struct dp_vdev* 843 dp_monitor_get_monitor_vdev_from_pdev(struct dp_pdev *pdev) 844 { 845 return NULL; 846 } 847 848 static inline QDF_STATUS dp_monitor_check_com_info_ppdu_id(struct dp_pdev *pdev, 849 void *rx_desc) 850 { 851 return QDF_STATUS_E_FAILURE; 852 } 853 854 static inline struct mon_rx_status* 855 dp_monitor_get_rx_status(struct dp_pdev *pdev) 856 { 857 return NULL; 858 } 859 860 static inline 861 void dp_monitor_pdev_config_scan_spcl_vap(struct dp_pdev *pdev, bool val) 862 { 863 } 864 865 static inline 866 void dp_monitor_pdev_reset_scan_spcl_vap_stats_enable(struct dp_pdev *pdev, 867 bool val) 868 { 869 } 870 871 static inline QDF_STATUS 872 dp_monitor_peer_tx_capture_get_stats(struct dp_soc *soc, struct dp_peer *peer, 873 struct cdp_peer_tx_capture_stats *stats) 874 { 875 return QDF_STATUS_E_FAILURE; 876 } 877 878 static inline QDF_STATUS 879 dp_monitor_pdev_tx_capture_get_stats(struct dp_soc *soc, struct dp_pdev *pdev, 880 struct cdp_pdev_tx_capture_stats *stats) 881 { 882 return QDF_STATUS_E_FAILURE; 883 } 884 885 #ifdef DP_POWER_SAVE 886 static inline 887 void dp_monitor_pktlog_reap_pending_frames(struct dp_pdev *pdev) 888 { 889 } 890 891 static inline 892 void dp_monitor_pktlog_start_reap_timer(struct dp_pdev *pdev) 893 { 894 } 895 #endif 896 897 static inline bool dp_monitor_is_configured(struct dp_pdev *pdev) 898 { 899 return false; 900 } 901 902 static inline void 903 dp_mon_rx_hdr_length_set(struct dp_soc *soc, uint32_t *msg_word, 904 struct htt_rx_ring_tlv_filter *tlv_filter) 905 { 906 } 907 908 static inline void dp_monitor_soc_init(struct dp_soc *soc) 909 { 910 } 911 912 static inline void dp_monitor_soc_deinit(struct dp_soc *soc) 913 { 914 } 915 916 static inline 917 QDF_STATUS dp_monitor_config_undecoded_metadata_capture(struct dp_pdev *pdev, 918 int val) 919 { 920 return QDF_STATUS_SUCCESS; 921 } 922 923 static inline QDF_STATUS 924 dp_monitor_config_undecoded_metadata_phyrx_error_mask(struct dp_pdev *pdev, 925 int mask1, int mask2) 926 { 927 return QDF_STATUS_SUCCESS; 928 } 929 930 static inline QDF_STATUS 931 dp_monitor_get_undecoded_metadata_phyrx_error_mask(struct dp_pdev *pdev, 932 int *mask, int *mask_cont) 933 { 934 return QDF_STATUS_SUCCESS; 935 } 936 937 static inline QDF_STATUS dp_monitor_soc_htt_srng_setup(struct dp_soc *soc) 938 { 939 return QDF_STATUS_E_FAILURE; 940 } 941 942 static inline bool dp_is_monitor_mode_using_poll(struct dp_soc *soc) 943 { 944 return false; 945 } 946 947 static inline 948 uint32_t dp_tx_mon_buf_refill(struct dp_intr *int_ctx) 949 { 950 return 0; 951 } 952 953 static inline uint32_t 954 dp_tx_mon_process(struct dp_soc *soc, struct dp_intr *int_ctx, 955 uint32_t mac_id, uint32_t quota) 956 { 957 return 0; 958 } 959 960 static inline uint32_t 961 dp_print_txmon_ring_stat_from_hal(struct dp_pdev *pdev) 962 { 963 return 0; 964 } 965 966 static inline 967 uint32_t dp_rx_mon_buf_refill(struct dp_intr *int_ctx) 968 { 969 return 0; 970 } 971 972 static inline bool dp_monitor_is_tx_cap_enabled(struct dp_peer *peer) 973 { 974 return 0; 975 } 976 977 static inline bool dp_monitor_is_rx_cap_enabled(struct dp_peer *peer) 978 { 979 return 0; 980 } 981 982 static inline void 983 dp_rx_mon_enable(struct dp_soc *soc, uint32_t *msg_word, 984 struct htt_rx_ring_tlv_filter *tlv_filter) 985 { 986 } 987 988 static inline void 989 dp_mon_rx_packet_length_set(struct dp_soc *soc, uint32_t *msg_word, 990 struct htt_rx_ring_tlv_filter *tlv_filter) 991 { 992 } 993 994 static inline void 995 dp_mon_rx_enable_mpdu_logging(struct dp_soc *soc, uint32_t *msg_word, 996 struct htt_rx_ring_tlv_filter *tlv_filter) 997 { 998 } 999 1000 static inline void 1001 dp_mon_rx_wmask_subscribe(struct dp_soc *soc, 1002 uint32_t *msg_word, int pdev_id, 1003 struct htt_rx_ring_tlv_filter *tlv_filter) 1004 { 1005 } 1006 1007 static inline void 1008 dp_mon_rx_mac_filter_set(struct dp_soc *soc, uint32_t *msg_word, 1009 struct htt_rx_ring_tlv_filter *tlv_filter) 1010 { 1011 } 1012 1013 static inline void 1014 dp_mon_rx_enable_pkt_tlv_offset(struct dp_soc *soc, uint32_t *msg_word, 1015 struct htt_rx_ring_tlv_filter *tlv_filter) 1016 { 1017 } 1018 1019 static inline void 1020 dp_mon_rx_enable_fpmo(struct dp_soc *soc, uint32_t *msg_word, 1021 struct htt_rx_ring_tlv_filter *tlv_filter) 1022 { 1023 } 1024 1025 #ifdef WLAN_CONFIG_TELEMETRY_AGENT 1026 static inline 1027 void dp_monitor_peer_telemetry_stats(struct dp_peer *peer, 1028 struct cdp_peer_telemetry_stats *stats) 1029 { 1030 } 1031 1032 static inline 1033 void dp_monitor_peer_deter_stats(struct dp_peer *peer, 1034 struct cdp_peer_telemetry_stats *stats) 1035 { 1036 } 1037 #endif /* WLAN_CONFIG_TELEMETRY_AGENT */ 1038 #endif /* !WIFI_MONITOR_SUPPORT */ 1039 1040 /** 1041 * cdp_soc_t_to_dp_soc() - typecast cdp_soc_t to 1042 * dp soc handle 1043 * @psoc: CDP psoc handle 1044 * 1045 * Return: struct dp_soc pointer 1046 */ 1047 static inline 1048 struct dp_soc *cdp_soc_t_to_dp_soc(struct cdp_soc_t *psoc) 1049 { 1050 return (struct dp_soc *)psoc; 1051 } 1052 1053 #define DP_MAX_TIMER_EXEC_TIME_TICKS \ 1054 (QDF_LOG_TIMESTAMP_CYCLES_PER_10_US * 100 * 20) 1055 1056 /** 1057 * enum timer_yield_status - yield status code used in monitor mode timer. 1058 * @DP_TIMER_NO_YIELD: do not yield 1059 * @DP_TIMER_WORK_DONE: yield because work is done 1060 * @DP_TIMER_WORK_EXHAUST: yield because work quota is exhausted 1061 * @DP_TIMER_TIME_EXHAUST: yield due to time slot exhausted 1062 */ 1063 enum timer_yield_status { 1064 DP_TIMER_NO_YIELD, 1065 DP_TIMER_WORK_DONE, 1066 DP_TIMER_WORK_EXHAUST, 1067 DP_TIMER_TIME_EXHAUST, 1068 }; 1069 1070 #if DP_PRINT_ENABLE 1071 #include <qdf_types.h> /* qdf_vprint */ 1072 #include <cdp_txrx_handle.h> 1073 1074 enum { 1075 /* FATAL_ERR - print only irrecoverable error messages */ 1076 DP_PRINT_LEVEL_FATAL_ERR, 1077 1078 /* ERR - include non-fatal err messages */ 1079 DP_PRINT_LEVEL_ERR, 1080 1081 /* WARN - include warnings */ 1082 DP_PRINT_LEVEL_WARN, 1083 1084 /* INFO1 - include fundamental, infrequent events */ 1085 DP_PRINT_LEVEL_INFO1, 1086 1087 /* INFO2 - include non-fundamental but infrequent events */ 1088 DP_PRINT_LEVEL_INFO2, 1089 }; 1090 1091 #define dp_print(level, fmt, ...) do { \ 1092 if (level <= g_txrx_print_level) \ 1093 qdf_print(fmt, ## __VA_ARGS__); \ 1094 while (0) 1095 #define DP_PRINT(level, fmt, ...) do { \ 1096 dp_print(level, "DP: " fmt, ## __VA_ARGS__); \ 1097 while (0) 1098 #else 1099 #define DP_PRINT(level, fmt, ...) 1100 #endif /* DP_PRINT_ENABLE */ 1101 1102 #define DP_TRACE(LVL, fmt, args ...) \ 1103 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_##LVL, \ 1104 fmt, ## args) 1105 1106 #ifdef WLAN_SYSFS_DP_STATS 1107 void DP_PRINT_STATS(const char *fmt, ...); 1108 #else /* WLAN_SYSFS_DP_STATS */ 1109 #ifdef DP_PRINT_NO_CONSOLE 1110 /* Stat prints should not go to console or kernel logs.*/ 1111 #define DP_PRINT_STATS(fmt, args ...)\ 1112 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH, \ 1113 fmt, ## args) 1114 #else 1115 #define DP_PRINT_STATS(fmt, args ...)\ 1116 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,\ 1117 fmt, ## args) 1118 #endif 1119 #endif /* WLAN_SYSFS_DP_STATS */ 1120 1121 #define DP_STATS_INIT(_handle) \ 1122 qdf_mem_zero(&((_handle)->stats), sizeof((_handle)->stats)) 1123 1124 #define DP_TXRX_PEER_STATS_INIT(_handle, size) \ 1125 qdf_mem_zero(&((_handle)->stats[0]), size) 1126 1127 #define DP_STATS_CLR(_handle) \ 1128 qdf_mem_zero(&((_handle)->stats), sizeof((_handle)->stats)) 1129 1130 #define DP_TXRX_PEER_STATS_CLR(_handle, size) \ 1131 qdf_mem_zero(&((_handle)->stats[0]), size) 1132 1133 #ifndef DISABLE_DP_STATS 1134 #define DP_STATS_INC(_handle, _field, _delta) \ 1135 { \ 1136 if (likely(_handle)) \ 1137 _handle->stats._field += _delta; \ 1138 } 1139 1140 #define DP_PEER_LINK_STATS_INC(_handle, _field, _delta, _link) \ 1141 { \ 1142 if (likely(_handle)) \ 1143 _handle->stats[_link]._field += _delta; \ 1144 } 1145 1146 #define DP_PEER_STATS_FLAT_INC(_handle, _field, _delta) \ 1147 { \ 1148 if (likely(_handle)) \ 1149 _handle->_field += _delta; \ 1150 } 1151 1152 #define DP_STATS_INCC(_handle, _field, _delta, _cond) \ 1153 { \ 1154 if (_cond && likely(_handle)) \ 1155 _handle->stats._field += _delta; \ 1156 } 1157 1158 #define DP_PEER_LINK_STATS_INCC(_handle, _field, _delta, _cond, _link) \ 1159 { \ 1160 if (_cond && likely(_handle)) \ 1161 _handle->stats[_link]._field += _delta; \ 1162 } 1163 1164 #define DP_STATS_DEC(_handle, _field, _delta) \ 1165 { \ 1166 if (likely(_handle)) \ 1167 _handle->stats._field -= _delta; \ 1168 } 1169 1170 #define DP_PEER_STATS_FLAT_DEC(_handle, _field, _delta) \ 1171 { \ 1172 if (likely(_handle)) \ 1173 _handle->_field -= _delta; \ 1174 } 1175 1176 #define DP_STATS_UPD(_handle, _field, _delta) \ 1177 { \ 1178 if (likely(_handle)) \ 1179 _handle->stats._field = _delta; \ 1180 } 1181 1182 #define DP_PEER_LINK_STATS_UPD(_handle, _field, _delta, _link) \ 1183 { \ 1184 if (likely(_handle)) \ 1185 _handle->stats[_link]._field = _delta; \ 1186 } 1187 1188 #define DP_STATS_INC_PKT(_handle, _field, _count, _bytes) \ 1189 { \ 1190 DP_STATS_INC(_handle, _field.num, _count); \ 1191 DP_STATS_INC(_handle, _field.bytes, _bytes) \ 1192 } 1193 1194 #define DP_PEER_STATS_FLAT_INC_PKT(_handle, _field, _count, _bytes) \ 1195 { \ 1196 DP_PEER_STATS_FLAT_INC(_handle, _field.num, _count); \ 1197 DP_PEER_STATS_FLAT_INC(_handle, _field.bytes, _bytes) \ 1198 } 1199 1200 #define DP_STATS_INCC_PKT(_handle, _field, _count, _bytes, _cond) \ 1201 { \ 1202 DP_STATS_INCC(_handle, _field.num, _count, _cond); \ 1203 DP_STATS_INCC(_handle, _field.bytes, _bytes, _cond) \ 1204 } 1205 1206 #define DP_STATS_AGGR(_handle_a, _handle_b, _field) \ 1207 { \ 1208 _handle_a->stats._field += _handle_b->stats._field; \ 1209 } 1210 1211 #define DP_STATS_AGGR_PKT(_handle_a, _handle_b, _field) \ 1212 { \ 1213 DP_STATS_AGGR(_handle_a, _handle_b, _field.num); \ 1214 DP_STATS_AGGR(_handle_a, _handle_b, _field.bytes);\ 1215 } 1216 1217 #define DP_STATS_UPD_STRUCT(_handle_a, _handle_b, _field) \ 1218 { \ 1219 _handle_a->stats._field = _handle_b->stats._field; \ 1220 } 1221 1222 #else 1223 #define DP_STATS_INC(_handle, _field, _delta) 1224 #define DP_PEER_LINK_STATS_INC(_handle, _field, _delta, _link) 1225 #define DP_PEER_STATS_FLAT_INC(_handle, _field, _delta) 1226 #define DP_STATS_INCC(_handle, _field, _delta, _cond) 1227 #define DP_PEER_LINK_STATS_INCC(_handle, _field, _delta, _cond, _link) 1228 #define DP_STATS_DEC(_handle, _field, _delta) 1229 #define DP_PEER_STATS_FLAT_DEC(_handle, _field, _delta) 1230 #define DP_STATS_UPD(_handle, _field, _delta) 1231 #define DP_PEER_LINK_STATS_UPD(_handle, _field, _delta, _link) 1232 #define DP_STATS_INC_PKT(_handle, _field, _count, _bytes) 1233 #define DP_PEER_STATS_FLAT_INC_PKT(_handle, _field, _count, _bytes) 1234 #define DP_STATS_INCC_PKT(_handle, _field, _count, _bytes, _cond) 1235 #define DP_STATS_AGGR(_handle_a, _handle_b, _field) 1236 #define DP_STATS_AGGR_PKT(_handle_a, _handle_b, _field) 1237 #endif 1238 1239 #define DP_PEER_PER_PKT_STATS_INC(_handle, _field, _delta, _link) \ 1240 { \ 1241 DP_PEER_LINK_STATS_INC(_handle, per_pkt_stats._field, _delta, _link); \ 1242 } 1243 1244 #define DP_PEER_PER_PKT_STATS_INCC(_handle, _field, _delta, _cond, _link) \ 1245 { \ 1246 DP_PEER_LINK_STATS_INCC(_handle, per_pkt_stats._field, _delta, _cond, _link); \ 1247 } 1248 1249 #define DP_PEER_PER_PKT_STATS_INC_PKT(_handle, _field, _count, _bytes, _link) \ 1250 { \ 1251 DP_PEER_PER_PKT_STATS_INC(_handle, _field.num, _count, _link); \ 1252 DP_PEER_PER_PKT_STATS_INC(_handle, _field.bytes, _bytes, _link) \ 1253 } 1254 1255 #define DP_PEER_PER_PKT_STATS_INCC_PKT(_handle, _field, _count, _bytes, _cond, _link) \ 1256 { \ 1257 DP_PEER_PER_PKT_STATS_INCC(_handle, _field.num, _count, _cond, _link); \ 1258 DP_PEER_PER_PKT_STATS_INCC(_handle, _field.bytes, _bytes, _cond, _link) \ 1259 } 1260 1261 #define DP_PEER_PER_PKT_STATS_UPD(_handle, _field, _delta, _link) \ 1262 { \ 1263 DP_PEER_LINK_STATS_UPD(_handle, per_pkt_stats._field, _delta, _link); \ 1264 } 1265 1266 #ifndef QCA_ENHANCED_STATS_SUPPORT 1267 #define DP_PEER_EXTD_STATS_INC(_handle, _field, _delta, _link) \ 1268 { \ 1269 DP_PEER_LINK_STATS_INC(_handle, extd_stats._field, _delta, _link); \ 1270 } 1271 1272 #define DP_PEER_EXTD_STATS_INCC(_handle, _field, _delta, _cond, _link) \ 1273 { \ 1274 DP_PEER_LINK_STATS_INCC(_handle, extd_stats._field, _delta, _cond, _link); \ 1275 } 1276 1277 #define DP_PEER_EXTD_STATS_UPD(_handle, _field, _delta, _link) \ 1278 { \ 1279 DP_PEER_LINK_STATS_UPD(_handle, extd_stats._field, _delta, _link); \ 1280 } 1281 #endif 1282 1283 #if defined(QCA_VDEV_STATS_HW_OFFLOAD_SUPPORT) && \ 1284 defined(QCA_ENHANCED_STATS_SUPPORT) 1285 #define DP_PEER_TO_STACK_INCC_PKT(_handle, _count, _bytes, _cond) \ 1286 { \ 1287 if (_cond || !(_handle->hw_txrx_stats_en)) \ 1288 DP_PEER_STATS_FLAT_INC_PKT(_handle, to_stack, _count, _bytes); \ 1289 } 1290 1291 #define DP_PEER_TO_STACK_DECC(_handle, _count, _cond) \ 1292 { \ 1293 if (_cond || !(_handle->hw_txrx_stats_en)) \ 1294 DP_PEER_STATS_FLAT_DEC(_handle, to_stack.num, _count); \ 1295 } 1296 1297 #define DP_PEER_MC_INCC_PKT(_handle, _count, _bytes, _cond, _link) \ 1298 { \ 1299 if (_cond || !(_handle->hw_txrx_stats_en)) \ 1300 DP_PEER_PER_PKT_STATS_INC_PKT(_handle, rx.multicast, _count, _bytes, _link); \ 1301 } 1302 1303 #define DP_PEER_BC_INCC_PKT(_handle, _count, _bytes, _cond, _link) \ 1304 { \ 1305 if (_cond || !(_handle->hw_txrx_stats_en)) \ 1306 DP_PEER_PER_PKT_STATS_INC_PKT(_handle, rx.bcast, _count, _bytes, _link); \ 1307 } 1308 1309 #define DP_PEER_UC_INCC_PKT(_handle, _count, _bytes, _cond, _link) \ 1310 { \ 1311 if (_cond || !(_handle->hw_txrx_stats_en)) \ 1312 DP_PEER_PER_PKT_STATS_INC_PKT(_handle, rx.unicast, _count, _bytes, _link); \ 1313 } 1314 #elif defined(QCA_VDEV_STATS_HW_OFFLOAD_SUPPORT) 1315 #define DP_PEER_TO_STACK_INCC_PKT(_handle, _count, _bytes, _cond) \ 1316 { \ 1317 if (!(_handle->hw_txrx_stats_en)) \ 1318 DP_PEER_STATS_FLAT_INC_PKT(_handle, to_stack, _count, _bytes); \ 1319 } 1320 1321 #define DP_PEER_TO_STACK_DECC(_handle, _count, _cond) \ 1322 { \ 1323 if (!(_handle->hw_txrx_stats_en)) \ 1324 DP_PEER_STATS_FLAT_DEC(_handle, to_stack.num, _count); \ 1325 } 1326 1327 #define DP_PEER_MC_INCC_PKT(_handle, _count, _bytes, _cond, _link) \ 1328 { \ 1329 if (!(_handle->hw_txrx_stats_en)) \ 1330 DP_PEER_PER_PKT_STATS_INC_PKT(_handle, rx.multicast, _count, _bytes, _link); \ 1331 } 1332 1333 #define DP_PEER_BC_INCC_PKT(_handle, _count, _bytes, _cond, _link) \ 1334 { \ 1335 if (!(_handle->hw_txrx_stats_en)) \ 1336 DP_PEER_PER_PKT_STATS_INC_PKT(_handle, rx.bcast, _count, _bytes, _link); \ 1337 } 1338 1339 #define DP_PEER_UC_INCC_PKT(_handle, _count, _bytes, _cond, _link) \ 1340 { \ 1341 if (!(_handle->hw_txrx_stats_en)) \ 1342 DP_PEER_PER_PKT_STATS_INC_PKT(_handle, rx.unicast, _count, _bytes, _link); \ 1343 } 1344 #else 1345 #define DP_PEER_TO_STACK_INCC_PKT(_handle, _count, _bytes, _cond) \ 1346 DP_PEER_STATS_FLAT_INC_PKT(_handle, to_stack, _count, _bytes); 1347 1348 #define DP_PEER_TO_STACK_DECC(_handle, _count, _cond) \ 1349 DP_PEER_STATS_FLAT_DEC(_handle, to_stack.num, _count); 1350 1351 #define DP_PEER_MC_INCC_PKT(_handle, _count, _bytes, _cond, _link) \ 1352 DP_PEER_PER_PKT_STATS_INC_PKT(_handle, rx.multicast, _count, _bytes, _link); 1353 1354 #define DP_PEER_BC_INCC_PKT(_handle, _count, _bytes, _cond, _link) \ 1355 DP_PEER_PER_PKT_STATS_INC_PKT(_handle, rx.bcast, _count, _bytes, _link); 1356 1357 #define DP_PEER_UC_INCC_PKT(_handle, _count, _bytes, _cond, _link) \ 1358 DP_PEER_PER_PKT_STATS_INC_PKT(_handle, rx.unicast, _count, _bytes, _link); 1359 #endif 1360 1361 #ifdef ENABLE_DP_HIST_STATS 1362 #define DP_HIST_INIT() \ 1363 uint32_t num_of_packets[MAX_PDEV_CNT] = {0}; 1364 1365 #define DP_HIST_PACKET_COUNT_INC(_pdev_id) \ 1366 { \ 1367 ++num_of_packets[_pdev_id]; \ 1368 } 1369 1370 #define DP_TX_HISTOGRAM_UPDATE(_pdev, _p_cntrs) \ 1371 do { \ 1372 if (_p_cntrs == 1) { \ 1373 DP_STATS_INC(_pdev, \ 1374 tx_comp_histogram.pkts_1, 1); \ 1375 } else if (_p_cntrs > 1 && _p_cntrs <= 20) { \ 1376 DP_STATS_INC(_pdev, \ 1377 tx_comp_histogram.pkts_2_20, 1); \ 1378 } else if (_p_cntrs > 20 && _p_cntrs <= 40) { \ 1379 DP_STATS_INC(_pdev, \ 1380 tx_comp_histogram.pkts_21_40, 1); \ 1381 } else if (_p_cntrs > 40 && _p_cntrs <= 60) { \ 1382 DP_STATS_INC(_pdev, \ 1383 tx_comp_histogram.pkts_41_60, 1); \ 1384 } else if (_p_cntrs > 60 && _p_cntrs <= 80) { \ 1385 DP_STATS_INC(_pdev, \ 1386 tx_comp_histogram.pkts_61_80, 1); \ 1387 } else if (_p_cntrs > 80 && _p_cntrs <= 100) { \ 1388 DP_STATS_INC(_pdev, \ 1389 tx_comp_histogram.pkts_81_100, 1); \ 1390 } else if (_p_cntrs > 100 && _p_cntrs <= 200) { \ 1391 DP_STATS_INC(_pdev, \ 1392 tx_comp_histogram.pkts_101_200, 1); \ 1393 } else if (_p_cntrs > 200) { \ 1394 DP_STATS_INC(_pdev, \ 1395 tx_comp_histogram.pkts_201_plus, 1); \ 1396 } \ 1397 } while (0) 1398 1399 #define DP_RX_HISTOGRAM_UPDATE(_pdev, _p_cntrs) \ 1400 do { \ 1401 if (_p_cntrs == 1) { \ 1402 DP_STATS_INC(_pdev, \ 1403 rx_ind_histogram.pkts_1, 1); \ 1404 } else if (_p_cntrs > 1 && _p_cntrs <= 20) { \ 1405 DP_STATS_INC(_pdev, \ 1406 rx_ind_histogram.pkts_2_20, 1); \ 1407 } else if (_p_cntrs > 20 && _p_cntrs <= 40) { \ 1408 DP_STATS_INC(_pdev, \ 1409 rx_ind_histogram.pkts_21_40, 1); \ 1410 } else if (_p_cntrs > 40 && _p_cntrs <= 60) { \ 1411 DP_STATS_INC(_pdev, \ 1412 rx_ind_histogram.pkts_41_60, 1); \ 1413 } else if (_p_cntrs > 60 && _p_cntrs <= 80) { \ 1414 DP_STATS_INC(_pdev, \ 1415 rx_ind_histogram.pkts_61_80, 1); \ 1416 } else if (_p_cntrs > 80 && _p_cntrs <= 100) { \ 1417 DP_STATS_INC(_pdev, \ 1418 rx_ind_histogram.pkts_81_100, 1); \ 1419 } else if (_p_cntrs > 100 && _p_cntrs <= 200) { \ 1420 DP_STATS_INC(_pdev, \ 1421 rx_ind_histogram.pkts_101_200, 1); \ 1422 } else if (_p_cntrs > 200) { \ 1423 DP_STATS_INC(_pdev, \ 1424 rx_ind_histogram.pkts_201_plus, 1); \ 1425 } \ 1426 } while (0) 1427 1428 #define DP_TX_HIST_STATS_PER_PDEV() \ 1429 do { \ 1430 uint8_t hist_stats = 0; \ 1431 for (hist_stats = 0; hist_stats < soc->pdev_count; \ 1432 hist_stats++) { \ 1433 DP_TX_HISTOGRAM_UPDATE(soc->pdev_list[hist_stats], \ 1434 num_of_packets[hist_stats]); \ 1435 } \ 1436 } while (0) 1437 1438 1439 #define DP_RX_HIST_STATS_PER_PDEV() \ 1440 do { \ 1441 uint8_t hist_stats = 0; \ 1442 for (hist_stats = 0; hist_stats < soc->pdev_count; \ 1443 hist_stats++) { \ 1444 DP_RX_HISTOGRAM_UPDATE(soc->pdev_list[hist_stats], \ 1445 num_of_packets[hist_stats]); \ 1446 } \ 1447 } while (0) 1448 1449 #else 1450 #define DP_HIST_INIT() 1451 #define DP_HIST_PACKET_COUNT_INC(_pdev_id) 1452 #define DP_TX_HISTOGRAM_UPDATE(_pdev, _p_cntrs) 1453 #define DP_RX_HISTOGRAM_UPDATE(_pdev, _p_cntrs) 1454 #define DP_RX_HIST_STATS_PER_PDEV() 1455 #define DP_TX_HIST_STATS_PER_PDEV() 1456 #endif /* DISABLE_DP_STATS */ 1457 1458 #define FRAME_MASK_IPV4_ARP 1 1459 #define FRAME_MASK_IPV4_DHCP 2 1460 #define FRAME_MASK_IPV4_EAPOL 4 1461 #define FRAME_MASK_IPV6_DHCP 8 1462 1463 static inline int dp_log2_ceil(unsigned int value) 1464 { 1465 unsigned int tmp = value; 1466 int log2 = -1; 1467 1468 while (tmp) { 1469 log2++; 1470 tmp >>= 1; 1471 } 1472 if (1 << log2 != value) 1473 log2++; 1474 return log2; 1475 } 1476 1477 #ifdef QCA_SUPPORT_PEER_ISOLATION 1478 #define dp_get_peer_isolation(_peer) ((_peer)->isolation) 1479 1480 static inline void dp_set_peer_isolation(struct dp_txrx_peer *txrx_peer, 1481 bool val) 1482 { 1483 txrx_peer->isolation = val; 1484 } 1485 1486 #else 1487 #define dp_get_peer_isolation(_peer) (0) 1488 1489 static inline void dp_set_peer_isolation(struct dp_txrx_peer *peer, bool val) 1490 { 1491 } 1492 #endif /* QCA_SUPPORT_PEER_ISOLATION */ 1493 1494 bool dp_vdev_is_wds_ext_enabled(struct dp_vdev *vdev); 1495 1496 #ifdef QCA_SUPPORT_WDS_EXTENDED 1497 static inline void dp_wds_ext_peer_init(struct dp_txrx_peer *txrx_peer) 1498 { 1499 txrx_peer->wds_ext.osif_peer = NULL; 1500 txrx_peer->wds_ext.init = 0; 1501 } 1502 #else 1503 static inline void dp_wds_ext_peer_init(struct dp_txrx_peer *txrx_peer) 1504 { 1505 } 1506 #endif /* QCA_SUPPORT_WDS_EXTENDED */ 1507 1508 #ifdef QCA_HOST2FW_RXBUF_RING 1509 static inline 1510 struct dp_srng *dp_get_rxdma_ring(struct dp_pdev *pdev, int lmac_id) 1511 { 1512 return &pdev->rx_mac_buf_ring[lmac_id]; 1513 } 1514 #else 1515 static inline 1516 struct dp_srng *dp_get_rxdma_ring(struct dp_pdev *pdev, int lmac_id) 1517 { 1518 return &pdev->soc->rx_refill_buf_ring[lmac_id]; 1519 } 1520 #endif 1521 1522 /* 1523 * The lmac ID for a particular channel band is fixed. 1524 * 2.4GHz band uses lmac_id = 1 1525 * 5GHz/6GHz band uses lmac_id=0 1526 */ 1527 #define DP_INVALID_LMAC_ID (-1) 1528 #define DP_MON_INVALID_LMAC_ID (-1) 1529 #define DP_MAC0_LMAC_ID 0 1530 #define DP_MAC1_LMAC_ID 1 1531 1532 #ifdef FEATURE_TSO_STATS 1533 /** 1534 * dp_init_tso_stats() - Clear tso stats 1535 * @pdev: pdev handle 1536 * 1537 * Return: None 1538 */ 1539 static inline 1540 void dp_init_tso_stats(struct dp_pdev *pdev) 1541 { 1542 if (pdev) { 1543 qdf_mem_zero(&((pdev)->stats.tso_stats), 1544 sizeof((pdev)->stats.tso_stats)); 1545 qdf_atomic_init(&pdev->tso_idx); 1546 } 1547 } 1548 1549 /** 1550 * dp_stats_tso_segment_histogram_update() - TSO Segment Histogram 1551 * @pdev: pdev handle 1552 * @_p_cntrs: number of tso segments for a tso packet 1553 * 1554 * Return: None 1555 */ 1556 void dp_stats_tso_segment_histogram_update(struct dp_pdev *pdev, 1557 uint8_t _p_cntrs); 1558 1559 /** 1560 * dp_tso_segment_update() - Collect tso segment information 1561 * @pdev: pdev handle 1562 * @stats_idx: tso packet number 1563 * @idx: tso segment number 1564 * @seg: tso segment 1565 * 1566 * Return: None 1567 */ 1568 void dp_tso_segment_update(struct dp_pdev *pdev, 1569 uint32_t stats_idx, 1570 uint8_t idx, 1571 struct qdf_tso_seg_t seg); 1572 1573 /** 1574 * dp_tso_packet_update() - TSO Packet information 1575 * @pdev: pdev handle 1576 * @stats_idx: tso packet number 1577 * @msdu: nbuf handle 1578 * @num_segs: tso segments 1579 * 1580 * Return: None 1581 */ 1582 void dp_tso_packet_update(struct dp_pdev *pdev, uint32_t stats_idx, 1583 qdf_nbuf_t msdu, uint16_t num_segs); 1584 1585 /** 1586 * dp_tso_segment_stats_update() - TSO Segment stats 1587 * @pdev: pdev handle 1588 * @stats_seg: tso segment list 1589 * @stats_idx: tso packet number 1590 * 1591 * Return: None 1592 */ 1593 void dp_tso_segment_stats_update(struct dp_pdev *pdev, 1594 struct qdf_tso_seg_elem_t *stats_seg, 1595 uint32_t stats_idx); 1596 1597 /** 1598 * dp_print_tso_stats() - dump tso statistics 1599 * @soc:soc handle 1600 * @level: verbosity level 1601 * 1602 * Return: None 1603 */ 1604 void dp_print_tso_stats(struct dp_soc *soc, 1605 enum qdf_stats_verbosity_level level); 1606 1607 /** 1608 * dp_txrx_clear_tso_stats() - clear tso stats 1609 * @soc: soc handle 1610 * 1611 * Return: None 1612 */ 1613 void dp_txrx_clear_tso_stats(struct dp_soc *soc); 1614 #else 1615 static inline 1616 void dp_init_tso_stats(struct dp_pdev *pdev) 1617 { 1618 } 1619 1620 static inline 1621 void dp_stats_tso_segment_histogram_update(struct dp_pdev *pdev, 1622 uint8_t _p_cntrs) 1623 { 1624 } 1625 1626 static inline 1627 void dp_tso_segment_update(struct dp_pdev *pdev, 1628 uint32_t stats_idx, 1629 uint32_t idx, 1630 struct qdf_tso_seg_t seg) 1631 { 1632 } 1633 1634 static inline 1635 void dp_tso_packet_update(struct dp_pdev *pdev, uint32_t stats_idx, 1636 qdf_nbuf_t msdu, uint16_t num_segs) 1637 { 1638 } 1639 1640 static inline 1641 void dp_tso_segment_stats_update(struct dp_pdev *pdev, 1642 struct qdf_tso_seg_elem_t *stats_seg, 1643 uint32_t stats_idx) 1644 { 1645 } 1646 1647 static inline 1648 void dp_print_tso_stats(struct dp_soc *soc, 1649 enum qdf_stats_verbosity_level level) 1650 { 1651 } 1652 1653 static inline 1654 void dp_txrx_clear_tso_stats(struct dp_soc *soc) 1655 { 1656 } 1657 #endif /* FEATURE_TSO_STATS */ 1658 1659 /** 1660 * dp_txrx_get_peer_per_pkt_stats_param() - Get peer per pkt stats param 1661 * @peer: DP peer handle 1662 * @type: Requested stats type 1663 * @buf: Buffer to hold the value 1664 * 1665 * Return: status success/failure 1666 */ 1667 QDF_STATUS dp_txrx_get_peer_per_pkt_stats_param(struct dp_peer *peer, 1668 enum cdp_peer_stats_type type, 1669 cdp_peer_stats_param_t *buf); 1670 1671 /** 1672 * dp_txrx_get_peer_extd_stats_param() - Get peer extd stats param 1673 * @peer: DP peer handle 1674 * @type: Requested stats type 1675 * @buf: Buffer to hold the value 1676 * 1677 * Return: status success/failure 1678 */ 1679 QDF_STATUS dp_txrx_get_peer_extd_stats_param(struct dp_peer *peer, 1680 enum cdp_peer_stats_type type, 1681 cdp_peer_stats_param_t *buf); 1682 1683 #define DP_HTT_T2H_HP_PIPE 5 1684 /** 1685 * dp_update_pdev_stats(): Update the pdev stats 1686 * @tgtobj: pdev handle 1687 * @srcobj: vdev stats structure 1688 * 1689 * Update the pdev stats from the specified vdev stats 1690 * 1691 * Return: None 1692 */ 1693 void dp_update_pdev_stats(struct dp_pdev *tgtobj, 1694 struct cdp_vdev_stats *srcobj); 1695 1696 /** 1697 * dp_update_vdev_ingress_stats(): Update the vdev ingress stats 1698 * @tgtobj: vdev handle 1699 * 1700 * Update the vdev ingress stats 1701 * 1702 * Return: None 1703 */ 1704 void dp_update_vdev_ingress_stats(struct dp_vdev *tgtobj); 1705 1706 /** 1707 * dp_update_vdev_rate_stats() - Update the vdev rate stats 1708 * @tgtobj: tgt buffer for vdev stats 1709 * @srcobj: srcobj vdev stats 1710 * 1711 * Return: None 1712 */ 1713 void dp_update_vdev_rate_stats(struct cdp_vdev_stats *tgtobj, 1714 struct cdp_vdev_stats *srcobj); 1715 1716 /** 1717 * dp_update_pdev_ingress_stats(): Update the pdev ingress stats 1718 * @tgtobj: pdev handle 1719 * @srcobj: vdev stats structure 1720 * 1721 * Update the pdev ingress stats from the specified vdev stats 1722 * 1723 * Return: None 1724 */ 1725 void dp_update_pdev_ingress_stats(struct dp_pdev *tgtobj, 1726 struct dp_vdev *srcobj); 1727 1728 /** 1729 * dp_update_vdev_stats(): Update the vdev stats 1730 * @soc: soc handle 1731 * @srcobj: DP_PEER object 1732 * @arg: point to vdev stats structure 1733 * 1734 * Update the vdev stats from the specified peer stats 1735 * 1736 * Return: None 1737 */ 1738 void dp_update_vdev_stats(struct dp_soc *soc, 1739 struct dp_peer *srcobj, 1740 void *arg); 1741 1742 /** 1743 * dp_update_vdev_stats_on_peer_unmap() - Update the vdev stats on peer unmap 1744 * @vdev: DP_VDEV handle 1745 * @peer: DP_PEER handle 1746 * 1747 * Return: None 1748 */ 1749 void dp_update_vdev_stats_on_peer_unmap(struct dp_vdev *vdev, 1750 struct dp_peer *peer); 1751 1752 #ifdef IPA_OFFLOAD 1753 #define DP_IPA_UPDATE_RX_STATS(__tgtobj, __srcobj) \ 1754 { \ 1755 DP_STATS_AGGR_PKT(__tgtobj, __srcobj, rx.rx_total); \ 1756 } 1757 1758 #define DP_IPA_UPDATE_PER_PKT_RX_STATS(__tgtobj, __srcobj) \ 1759 { \ 1760 (__tgtobj)->rx.rx_total.num += (__srcobj)->rx.rx_total.num; \ 1761 (__tgtobj)->rx.rx_total.bytes += (__srcobj)->rx.rx_total.bytes; \ 1762 } 1763 #else 1764 #define DP_IPA_UPDATE_PER_PKT_RX_STATS(tgtobj, srcobj) \ 1765 1766 #define DP_IPA_UPDATE_RX_STATS(tgtobj, srcobj) 1767 #endif 1768 1769 #define DP_UPDATE_STATS(_tgtobj, _srcobj) \ 1770 do { \ 1771 uint8_t i; \ 1772 uint8_t pream_type; \ 1773 for (pream_type = 0; pream_type < DOT11_MAX; pream_type++) { \ 1774 for (i = 0; i < MAX_MCS; i++) { \ 1775 DP_STATS_AGGR(_tgtobj, _srcobj, \ 1776 tx.pkt_type[pream_type].mcs_count[i]); \ 1777 DP_STATS_AGGR(_tgtobj, _srcobj, \ 1778 rx.pkt_type[pream_type].mcs_count[i]); \ 1779 } \ 1780 } \ 1781 \ 1782 for (i = 0; i < MAX_BW; i++) { \ 1783 DP_STATS_AGGR(_tgtobj, _srcobj, tx.bw[i]); \ 1784 DP_STATS_AGGR(_tgtobj, _srcobj, rx.bw[i]); \ 1785 } \ 1786 \ 1787 for (i = 0; i < SS_COUNT; i++) { \ 1788 DP_STATS_AGGR(_tgtobj, _srcobj, rx.nss[i]); \ 1789 DP_STATS_AGGR(_tgtobj, _srcobj, tx.nss[i]); \ 1790 } \ 1791 for (i = 0; i < WME_AC_MAX; i++) { \ 1792 DP_STATS_AGGR(_tgtobj, _srcobj, tx.wme_ac_type[i]); \ 1793 DP_STATS_AGGR(_tgtobj, _srcobj, rx.wme_ac_type[i]); \ 1794 DP_STATS_AGGR(_tgtobj, _srcobj, \ 1795 tx.wme_ac_type_bytes[i]); \ 1796 DP_STATS_AGGR(_tgtobj, _srcobj, \ 1797 rx.wme_ac_type_bytes[i]); \ 1798 DP_STATS_AGGR(_tgtobj, _srcobj, tx.excess_retries_per_ac[i]); \ 1799 \ 1800 } \ 1801 \ 1802 for (i = 0; i < MAX_GI; i++) { \ 1803 DP_STATS_AGGR(_tgtobj, _srcobj, tx.sgi_count[i]); \ 1804 DP_STATS_AGGR(_tgtobj, _srcobj, rx.sgi_count[i]); \ 1805 } \ 1806 \ 1807 for (i = 0; i < MAX_RECEPTION_TYPES; i++) \ 1808 DP_STATS_AGGR(_tgtobj, _srcobj, rx.reception_type[i]); \ 1809 \ 1810 if (!wlan_cfg_get_vdev_stats_hw_offload_config(soc->wlan_cfg_ctx)) { \ 1811 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, tx.comp_pkt); \ 1812 DP_STATS_AGGR(_tgtobj, _srcobj, tx.tx_failed); \ 1813 } \ 1814 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, tx.ucast); \ 1815 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, tx.mcast); \ 1816 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, tx.bcast); \ 1817 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, tx.tx_success); \ 1818 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, tx.nawds_mcast); \ 1819 DP_STATS_AGGR(_tgtobj, _srcobj, tx.nawds_mcast_drop); \ 1820 DP_STATS_AGGR(_tgtobj, _srcobj, tx.ofdma); \ 1821 DP_STATS_AGGR(_tgtobj, _srcobj, tx.stbc); \ 1822 DP_STATS_AGGR(_tgtobj, _srcobj, tx.ldpc); \ 1823 DP_STATS_AGGR(_tgtobj, _srcobj, tx.retries); \ 1824 DP_STATS_AGGR(_tgtobj, _srcobj, tx.non_amsdu_cnt); \ 1825 DP_STATS_AGGR(_tgtobj, _srcobj, tx.amsdu_cnt); \ 1826 DP_STATS_AGGR(_tgtobj, _srcobj, tx.non_ampdu_cnt); \ 1827 DP_STATS_AGGR(_tgtobj, _srcobj, tx.ampdu_cnt); \ 1828 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, tx.dropped.fw_rem); \ 1829 DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.fw_rem_tx); \ 1830 DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.fw_rem_notx); \ 1831 DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.fw_reason1); \ 1832 DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.fw_reason2); \ 1833 DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.fw_reason3); \ 1834 DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.fw_rem_queue_disable); \ 1835 DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.fw_rem_no_match); \ 1836 DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.drop_threshold); \ 1837 DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.drop_link_desc_na); \ 1838 DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.invalid_drop); \ 1839 DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.mcast_vdev_drop); \ 1840 DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.invalid_rr); \ 1841 DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.age_out); \ 1842 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, tx.tx_ucast_total); \ 1843 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, tx.tx_ucast_success); \ 1844 \ 1845 DP_STATS_AGGR(_tgtobj, _srcobj, rx.err.mic_err); \ 1846 DP_STATS_AGGR(_tgtobj, _srcobj, rx.err.decrypt_err); \ 1847 DP_STATS_AGGR(_tgtobj, _srcobj, rx.err.fcserr); \ 1848 DP_STATS_AGGR(_tgtobj, _srcobj, rx.err.pn_err); \ 1849 DP_STATS_AGGR(_tgtobj, _srcobj, rx.err.oor_err); \ 1850 DP_STATS_AGGR(_tgtobj, _srcobj, rx.err.jump_2k_err); \ 1851 DP_STATS_AGGR(_tgtobj, _srcobj, rx.err.rxdma_wifi_parse_err); \ 1852 if (_srcobj->stats.rx.snr != 0) \ 1853 DP_STATS_UPD_STRUCT(_tgtobj, _srcobj, rx.snr); \ 1854 DP_STATS_UPD_STRUCT(_tgtobj, _srcobj, rx.rx_rate); \ 1855 DP_STATS_AGGR(_tgtobj, _srcobj, rx.non_ampdu_cnt); \ 1856 DP_STATS_AGGR(_tgtobj, _srcobj, rx.ampdu_cnt); \ 1857 DP_STATS_AGGR(_tgtobj, _srcobj, rx.non_amsdu_cnt); \ 1858 DP_STATS_AGGR(_tgtobj, _srcobj, rx.amsdu_cnt); \ 1859 DP_STATS_AGGR(_tgtobj, _srcobj, rx.nawds_mcast_drop); \ 1860 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.to_stack); \ 1861 \ 1862 for (i = 0; i < CDP_MAX_RX_RINGS; i++) \ 1863 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.rcvd_reo[i]); \ 1864 \ 1865 for (i = 0; i < CDP_MAX_LMACS; i++) \ 1866 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.rx_lmac[i]); \ 1867 \ 1868 _srcobj->stats.rx.unicast.num = \ 1869 _srcobj->stats.rx.to_stack.num - \ 1870 _srcobj->stats.rx.multicast.num; \ 1871 _srcobj->stats.rx.unicast.bytes = \ 1872 _srcobj->stats.rx.to_stack.bytes - \ 1873 _srcobj->stats.rx.multicast.bytes; \ 1874 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.unicast); \ 1875 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.multicast); \ 1876 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.bcast); \ 1877 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.raw); \ 1878 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.intra_bss.pkts); \ 1879 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.intra_bss.fail); \ 1880 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.mec_drop); \ 1881 \ 1882 _tgtobj->stats.tx.last_ack_rssi = \ 1883 _srcobj->stats.tx.last_ack_rssi; \ 1884 DP_STATS_AGGR(_tgtobj, _srcobj, rx.multipass_rx_pkt_drop); \ 1885 DP_STATS_AGGR(_tgtobj, _srcobj, rx.peer_unauth_rx_pkt_drop); \ 1886 DP_STATS_AGGR(_tgtobj, _srcobj, rx.policy_check_drop); \ 1887 DP_IPA_UPDATE_RX_STATS(_tgtobj, _srcobj); \ 1888 } while (0) 1889 1890 #ifdef VDEV_PEER_PROTOCOL_COUNT 1891 #define DP_UPDATE_PROTOCOL_COUNT_STATS(_tgtobj, _srcobj) \ 1892 { \ 1893 uint8_t j; \ 1894 for (j = 0; j < CDP_TRACE_MAX; j++) { \ 1895 _tgtobj->tx.protocol_trace_cnt[j].egress_cnt += \ 1896 _srcobj->tx.protocol_trace_cnt[j].egress_cnt; \ 1897 _tgtobj->tx.protocol_trace_cnt[j].ingress_cnt += \ 1898 _srcobj->tx.protocol_trace_cnt[j].ingress_cnt; \ 1899 _tgtobj->rx.protocol_trace_cnt[j].egress_cnt += \ 1900 _srcobj->rx.protocol_trace_cnt[j].egress_cnt; \ 1901 _tgtobj->rx.protocol_trace_cnt[j].ingress_cnt += \ 1902 _srcobj->rx.protocol_trace_cnt[j].ingress_cnt; \ 1903 } \ 1904 } 1905 #else 1906 #define DP_UPDATE_PROTOCOL_COUNT_STATS(_tgtobj, _srcobj) 1907 #endif 1908 1909 #ifdef WLAN_FEATURE_11BE 1910 #define DP_UPDATE_11BE_STATS(_tgtobj, _srcobj) \ 1911 do { \ 1912 uint8_t i, mu_type; \ 1913 for (i = 0; i < MAX_MCS; i++) { \ 1914 _tgtobj->tx.su_be_ppdu_cnt.mcs_count[i] += \ 1915 _srcobj->tx.su_be_ppdu_cnt.mcs_count[i]; \ 1916 _tgtobj->rx.su_be_ppdu_cnt.mcs_count[i] += \ 1917 _srcobj->rx.su_be_ppdu_cnt.mcs_count[i]; \ 1918 } \ 1919 for (mu_type = 0; mu_type < TXRX_TYPE_MU_MAX; mu_type++) { \ 1920 for (i = 0; i < MAX_MCS; i++) { \ 1921 _tgtobj->tx.mu_be_ppdu_cnt[mu_type].mcs_count[i] += \ 1922 _srcobj->tx.mu_be_ppdu_cnt[mu_type].mcs_count[i]; \ 1923 _tgtobj->rx.mu_be_ppdu_cnt[mu_type].mcs_count[i] += \ 1924 _srcobj->rx.mu_be_ppdu_cnt[mu_type].mcs_count[i]; \ 1925 } \ 1926 } \ 1927 for (i = 0; i < MAX_PUNCTURED_MODE; i++) { \ 1928 _tgtobj->tx.punc_bw[i] += _srcobj->tx.punc_bw[i]; \ 1929 _tgtobj->rx.punc_bw[i] += _srcobj->rx.punc_bw[i]; \ 1930 } \ 1931 } while (0) 1932 #else 1933 #define DP_UPDATE_11BE_STATS(_tgtobj, _srcobj) 1934 #endif 1935 1936 #define DP_UPDATE_BASIC_STATS(_tgtobj, _srcobj) \ 1937 do { \ 1938 _tgtobj->tx.comp_pkt.num += _srcobj->tx.comp_pkt.num; \ 1939 _tgtobj->tx.comp_pkt.bytes += _srcobj->tx.comp_pkt.bytes; \ 1940 _tgtobj->tx.tx_failed += _srcobj->tx.tx_failed; \ 1941 _tgtobj->rx.to_stack.num += _srcobj->rx.to_stack.num; \ 1942 _tgtobj->rx.to_stack.bytes += _srcobj->rx.to_stack.bytes; \ 1943 } while (0) 1944 1945 #define DP_UPDATE_PER_PKT_STATS(_tgtobj, _srcobj) \ 1946 do { \ 1947 uint8_t i; \ 1948 _tgtobj->tx.ucast.num += _srcobj->tx.ucast.num; \ 1949 _tgtobj->tx.ucast.bytes += _srcobj->tx.ucast.bytes; \ 1950 _tgtobj->tx.mcast.num += _srcobj->tx.mcast.num; \ 1951 _tgtobj->tx.mcast.bytes += _srcobj->tx.mcast.bytes; \ 1952 _tgtobj->tx.bcast.num += _srcobj->tx.bcast.num; \ 1953 _tgtobj->tx.bcast.bytes += _srcobj->tx.bcast.bytes; \ 1954 _tgtobj->tx.nawds_mcast.num += _srcobj->tx.nawds_mcast.num; \ 1955 _tgtobj->tx.nawds_mcast.bytes += \ 1956 _srcobj->tx.nawds_mcast.bytes; \ 1957 _tgtobj->tx.tx_success.num += _srcobj->tx.tx_success.num; \ 1958 _tgtobj->tx.tx_success.bytes += _srcobj->tx.tx_success.bytes; \ 1959 _tgtobj->tx.nawds_mcast_drop += _srcobj->tx.nawds_mcast_drop; \ 1960 _tgtobj->tx.ofdma += _srcobj->tx.ofdma; \ 1961 _tgtobj->tx.non_amsdu_cnt += _srcobj->tx.non_amsdu_cnt; \ 1962 _tgtobj->tx.amsdu_cnt += _srcobj->tx.amsdu_cnt; \ 1963 _tgtobj->tx.dropped.fw_rem.num += \ 1964 _srcobj->tx.dropped.fw_rem.num; \ 1965 _tgtobj->tx.dropped.fw_rem.bytes += \ 1966 _srcobj->tx.dropped.fw_rem.bytes; \ 1967 _tgtobj->tx.dropped.fw_rem_notx += \ 1968 _srcobj->tx.dropped.fw_rem_notx; \ 1969 _tgtobj->tx.dropped.fw_rem_tx += \ 1970 _srcobj->tx.dropped.fw_rem_tx; \ 1971 _tgtobj->tx.dropped.age_out += _srcobj->tx.dropped.age_out; \ 1972 _tgtobj->tx.dropped.fw_reason1 += \ 1973 _srcobj->tx.dropped.fw_reason1; \ 1974 _tgtobj->tx.dropped.fw_reason2 += \ 1975 _srcobj->tx.dropped.fw_reason2; \ 1976 _tgtobj->tx.dropped.fw_reason3 += \ 1977 _srcobj->tx.dropped.fw_reason3; \ 1978 _tgtobj->tx.dropped.fw_rem_queue_disable += \ 1979 _srcobj->tx.dropped.fw_rem_queue_disable; \ 1980 _tgtobj->tx.dropped.fw_rem_no_match += \ 1981 _srcobj->tx.dropped.fw_rem_no_match; \ 1982 _tgtobj->tx.dropped.drop_threshold += \ 1983 _srcobj->tx.dropped.drop_threshold; \ 1984 _tgtobj->tx.dropped.drop_link_desc_na += \ 1985 _srcobj->tx.dropped.drop_link_desc_na; \ 1986 _tgtobj->tx.dropped.invalid_drop += \ 1987 _srcobj->tx.dropped.invalid_drop; \ 1988 _tgtobj->tx.dropped.mcast_vdev_drop += \ 1989 _srcobj->tx.dropped.mcast_vdev_drop; \ 1990 _tgtobj->tx.dropped.invalid_rr += \ 1991 _srcobj->tx.dropped.invalid_rr; \ 1992 _tgtobj->tx.failed_retry_count += \ 1993 _srcobj->tx.failed_retry_count; \ 1994 _tgtobj->tx.retry_count += _srcobj->tx.retry_count; \ 1995 _tgtobj->tx.multiple_retry_count += \ 1996 _srcobj->tx.multiple_retry_count; \ 1997 _tgtobj->tx.tx_success_twt.num += \ 1998 _srcobj->tx.tx_success_twt.num; \ 1999 _tgtobj->tx.tx_success_twt.bytes += \ 2000 _srcobj->tx.tx_success_twt.bytes; \ 2001 _tgtobj->tx.last_tx_ts = _srcobj->tx.last_tx_ts; \ 2002 _tgtobj->tx.release_src_not_tqm += \ 2003 _srcobj->tx.release_src_not_tqm; \ 2004 for (i = 0; i < QDF_PROTO_SUBTYPE_MAX; i++) { \ 2005 _tgtobj->tx.no_ack_count[i] += \ 2006 _srcobj->tx.no_ack_count[i];\ 2007 } \ 2008 \ 2009 _tgtobj->rx.multicast.num += _srcobj->rx.multicast.num; \ 2010 _tgtobj->rx.multicast.bytes += _srcobj->rx.multicast.bytes; \ 2011 _tgtobj->rx.rx_success.num += _srcobj->rx.rx_success.num;\ 2012 _tgtobj->rx.rx_success.bytes += _srcobj->rx.rx_success.bytes;\ 2013 _tgtobj->rx.bcast.num += _srcobj->rx.bcast.num; \ 2014 _tgtobj->rx.bcast.bytes += _srcobj->rx.bcast.bytes; \ 2015 _tgtobj->rx.unicast.num += _srcobj->rx.unicast.num; \ 2016 _tgtobj->rx.unicast.bytes += _srcobj->rx.unicast.bytes; \ 2017 _tgtobj->rx.raw.num += _srcobj->rx.raw.num; \ 2018 _tgtobj->rx.raw.bytes += _srcobj->rx.raw.bytes; \ 2019 _tgtobj->rx.nawds_mcast_drop += _srcobj->rx.nawds_mcast_drop; \ 2020 _tgtobj->rx.mcast_3addr_drop += _srcobj->rx.mcast_3addr_drop; \ 2021 _tgtobj->rx.mec_drop.num += _srcobj->rx.mec_drop.num; \ 2022 _tgtobj->rx.mec_drop.bytes += _srcobj->rx.mec_drop.bytes; \ 2023 _tgtobj->rx.ppeds_drop.num += _srcobj->rx.ppeds_drop.num; \ 2024 _tgtobj->rx.ppeds_drop.bytes += _srcobj->rx.ppeds_drop.bytes; \ 2025 _tgtobj->rx.intra_bss.pkts.num += \ 2026 _srcobj->rx.intra_bss.pkts.num; \ 2027 _tgtobj->rx.intra_bss.pkts.bytes += \ 2028 _srcobj->rx.intra_bss.pkts.bytes; \ 2029 _tgtobj->rx.intra_bss.fail.num += \ 2030 _srcobj->rx.intra_bss.fail.num; \ 2031 _tgtobj->rx.intra_bss.fail.bytes += \ 2032 _srcobj->rx.intra_bss.fail.bytes; \ 2033 _tgtobj->rx.intra_bss.mdns_no_fwd += \ 2034 _srcobj->rx.intra_bss.mdns_no_fwd; \ 2035 _tgtobj->rx.err.mic_err += _srcobj->rx.err.mic_err; \ 2036 _tgtobj->rx.err.decrypt_err += _srcobj->rx.err.decrypt_err; \ 2037 _tgtobj->rx.err.fcserr += _srcobj->rx.err.fcserr; \ 2038 _tgtobj->rx.err.pn_err += _srcobj->rx.err.pn_err; \ 2039 _tgtobj->rx.err.oor_err += _srcobj->rx.err.oor_err; \ 2040 _tgtobj->rx.err.jump_2k_err += _srcobj->rx.err.jump_2k_err; \ 2041 _tgtobj->rx.err.rxdma_wifi_parse_err += \ 2042 _srcobj->rx.err.rxdma_wifi_parse_err; \ 2043 _tgtobj->rx.non_amsdu_cnt += _srcobj->rx.non_amsdu_cnt; \ 2044 _tgtobj->rx.amsdu_cnt += _srcobj->rx.amsdu_cnt; \ 2045 _tgtobj->rx.rx_retries += _srcobj->rx.rx_retries; \ 2046 _tgtobj->rx.multipass_rx_pkt_drop += \ 2047 _srcobj->rx.multipass_rx_pkt_drop; \ 2048 _tgtobj->rx.peer_unauth_rx_pkt_drop += \ 2049 _srcobj->rx.peer_unauth_rx_pkt_drop; \ 2050 _tgtobj->rx.policy_check_drop += \ 2051 _srcobj->rx.policy_check_drop; \ 2052 _tgtobj->rx.to_stack_twt.num += _srcobj->rx.to_stack_twt.num; \ 2053 _tgtobj->rx.to_stack_twt.bytes += \ 2054 _srcobj->rx.to_stack_twt.bytes; \ 2055 _tgtobj->rx.last_rx_ts = _srcobj->rx.last_rx_ts; \ 2056 for (i = 0; i < CDP_MAX_RX_RINGS; i++) { \ 2057 _tgtobj->rx.rcvd_reo[i].num += \ 2058 _srcobj->rx.rcvd_reo[i].num; \ 2059 _tgtobj->rx.rcvd_reo[i].bytes += \ 2060 _srcobj->rx.rcvd_reo[i].bytes; \ 2061 _tgtobj->rx.rcvd.num += \ 2062 _srcobj->rx.rcvd_reo[i].num; \ 2063 _tgtobj->rx.rcvd.bytes += \ 2064 _srcobj->rx.rcvd_reo[i].bytes; \ 2065 } \ 2066 for (i = 0; i < CDP_MAX_LMACS; i++) { \ 2067 _tgtobj->rx.rx_lmac[i].num += \ 2068 _srcobj->rx.rx_lmac[i].num; \ 2069 _tgtobj->rx.rx_lmac[i].bytes += \ 2070 _srcobj->rx.rx_lmac[i].bytes; \ 2071 } \ 2072 DP_IPA_UPDATE_PER_PKT_RX_STATS(_tgtobj, _srcobj); \ 2073 DP_UPDATE_PROTOCOL_COUNT_STATS(_tgtobj, _srcobj); \ 2074 } while (0) 2075 2076 #define DP_UPDATE_EXTD_STATS(_tgtobj, _srcobj) \ 2077 do { \ 2078 uint8_t i, pream_type, mu_type; \ 2079 _tgtobj->tx.stbc += _srcobj->tx.stbc; \ 2080 _tgtobj->tx.ldpc += _srcobj->tx.ldpc; \ 2081 _tgtobj->tx.retries += _srcobj->tx.retries; \ 2082 _tgtobj->tx.ampdu_cnt += _srcobj->tx.ampdu_cnt; \ 2083 _tgtobj->tx.non_ampdu_cnt += _srcobj->tx.non_ampdu_cnt; \ 2084 _tgtobj->tx.num_ppdu_cookie_valid += \ 2085 _srcobj->tx.num_ppdu_cookie_valid; \ 2086 _tgtobj->tx.tx_ppdus += _srcobj->tx.tx_ppdus; \ 2087 _tgtobj->tx.tx_mpdus_success += _srcobj->tx.tx_mpdus_success; \ 2088 _tgtobj->tx.tx_mpdus_tried += _srcobj->tx.tx_mpdus_tried; \ 2089 _tgtobj->tx.tx_rate = _srcobj->tx.tx_rate; \ 2090 _tgtobj->tx.last_tx_rate = _srcobj->tx.last_tx_rate; \ 2091 _tgtobj->tx.last_tx_rate_mcs = _srcobj->tx.last_tx_rate_mcs; \ 2092 _tgtobj->tx.mcast_last_tx_rate = \ 2093 _srcobj->tx.mcast_last_tx_rate; \ 2094 _tgtobj->tx.mcast_last_tx_rate_mcs = \ 2095 _srcobj->tx.mcast_last_tx_rate_mcs; \ 2096 _tgtobj->tx.rnd_avg_tx_rate = _srcobj->tx.rnd_avg_tx_rate; \ 2097 _tgtobj->tx.avg_tx_rate = _srcobj->tx.avg_tx_rate; \ 2098 _tgtobj->tx.tx_ratecode = _srcobj->tx.tx_ratecode; \ 2099 _tgtobj->tx.pream_punct_cnt += _srcobj->tx.pream_punct_cnt; \ 2100 _tgtobj->tx.ru_start = _srcobj->tx.ru_start; \ 2101 _tgtobj->tx.ru_tones = _srcobj->tx.ru_tones; \ 2102 _tgtobj->tx.last_ack_rssi = _srcobj->tx.last_ack_rssi; \ 2103 _tgtobj->tx.nss_info = _srcobj->tx.nss_info; \ 2104 _tgtobj->tx.mcs_info = _srcobj->tx.mcs_info; \ 2105 _tgtobj->tx.bw_info = _srcobj->tx.bw_info; \ 2106 _tgtobj->tx.gi_info = _srcobj->tx.gi_info; \ 2107 _tgtobj->tx.preamble_info = _srcobj->tx.preamble_info; \ 2108 _tgtobj->tx.retries_mpdu += _srcobj->tx.retries_mpdu; \ 2109 _tgtobj->tx.mpdu_success_with_retries += \ 2110 _srcobj->tx.mpdu_success_with_retries; \ 2111 _tgtobj->tx.rts_success = _srcobj->tx.rts_success; \ 2112 _tgtobj->tx.rts_failure = _srcobj->tx.rts_failure; \ 2113 _tgtobj->tx.bar_cnt = _srcobj->tx.bar_cnt; \ 2114 _tgtobj->tx.ndpa_cnt = _srcobj->tx.ndpa_cnt; \ 2115 for (pream_type = 0; pream_type < DOT11_MAX; pream_type++) { \ 2116 for (i = 0; i < MAX_MCS; i++) \ 2117 _tgtobj->tx.pkt_type[pream_type].mcs_count[i] += \ 2118 _srcobj->tx.pkt_type[pream_type].mcs_count[i]; \ 2119 } \ 2120 for (i = 0; i < WME_AC_MAX; i++) { \ 2121 _tgtobj->tx.wme_ac_type[i] += _srcobj->tx.wme_ac_type[i]; \ 2122 _tgtobj->tx.wme_ac_type_bytes[i] += \ 2123 _srcobj->tx.wme_ac_type_bytes[i]; \ 2124 _tgtobj->tx.excess_retries_per_ac[i] += \ 2125 _srcobj->tx.excess_retries_per_ac[i]; \ 2126 } \ 2127 for (i = 0; i < MAX_GI; i++) { \ 2128 _tgtobj->tx.sgi_count[i] += _srcobj->tx.sgi_count[i]; \ 2129 } \ 2130 for (i = 0; i < SS_COUNT; i++) { \ 2131 _tgtobj->tx.nss[i] += _srcobj->tx.nss[i]; \ 2132 } \ 2133 for (i = 0; i < MAX_BW; i++) { \ 2134 _tgtobj->tx.bw[i] += _srcobj->tx.bw[i]; \ 2135 } \ 2136 for (i = 0; i < MAX_RU_LOCATIONS; i++) { \ 2137 _tgtobj->tx.ru_loc[i].num_msdu += \ 2138 _srcobj->tx.ru_loc[i].num_msdu; \ 2139 _tgtobj->tx.ru_loc[i].num_mpdu += \ 2140 _srcobj->tx.ru_loc[i].num_mpdu; \ 2141 _tgtobj->tx.ru_loc[i].mpdu_tried += \ 2142 _srcobj->tx.ru_loc[i].mpdu_tried; \ 2143 } \ 2144 for (i = 0; i < MAX_TRANSMIT_TYPES; i++) { \ 2145 _tgtobj->tx.transmit_type[i].num_msdu += \ 2146 _srcobj->tx.transmit_type[i].num_msdu; \ 2147 _tgtobj->tx.transmit_type[i].num_mpdu += \ 2148 _srcobj->tx.transmit_type[i].num_mpdu; \ 2149 _tgtobj->tx.transmit_type[i].mpdu_tried += \ 2150 _srcobj->tx.transmit_type[i].mpdu_tried; \ 2151 } \ 2152 for (i = 0; i < MAX_MU_GROUP_ID; i++) { \ 2153 _tgtobj->tx.mu_group_id[i] = _srcobj->tx.mu_group_id[i]; \ 2154 } \ 2155 _tgtobj->tx.tx_ucast_total.num += \ 2156 _srcobj->tx.tx_ucast_total.num;\ 2157 _tgtobj->tx.tx_ucast_total.bytes += \ 2158 _srcobj->tx.tx_ucast_total.bytes;\ 2159 _tgtobj->tx.tx_ucast_success.num += \ 2160 _srcobj->tx.tx_ucast_success.num; \ 2161 _tgtobj->tx.tx_ucast_success.bytes += \ 2162 _srcobj->tx.tx_ucast_success.bytes; \ 2163 \ 2164 for (i = 0; i < CDP_RSSI_CHAIN_LEN; i++) \ 2165 _tgtobj->tx.rssi_chain[i] = _srcobj->tx.rssi_chain[i]; \ 2166 _tgtobj->rx.mpdu_cnt_fcs_ok += _srcobj->rx.mpdu_cnt_fcs_ok; \ 2167 _tgtobj->rx.mpdu_cnt_fcs_err += _srcobj->rx.mpdu_cnt_fcs_err; \ 2168 _tgtobj->rx.non_ampdu_cnt += _srcobj->rx.non_ampdu_cnt; \ 2169 _tgtobj->rx.ampdu_cnt += _srcobj->rx.ampdu_cnt; \ 2170 _tgtobj->rx.rx_mpdus += _srcobj->rx.rx_mpdus; \ 2171 _tgtobj->rx.rx_ppdus += _srcobj->rx.rx_ppdus; \ 2172 _tgtobj->rx.rx_rate = _srcobj->rx.rx_rate; \ 2173 _tgtobj->rx.last_rx_rate = _srcobj->rx.last_rx_rate; \ 2174 _tgtobj->rx.rnd_avg_rx_rate = _srcobj->rx.rnd_avg_rx_rate; \ 2175 _tgtobj->rx.avg_rx_rate = _srcobj->rx.avg_rx_rate; \ 2176 _tgtobj->rx.rx_ratecode = _srcobj->rx.rx_ratecode; \ 2177 _tgtobj->rx.avg_snr = _srcobj->rx.avg_snr; \ 2178 _tgtobj->rx.rx_snr_measured_time = \ 2179 _srcobj->rx.rx_snr_measured_time; \ 2180 _tgtobj->rx.snr = _srcobj->rx.snr; \ 2181 _tgtobj->rx.last_snr = _srcobj->rx.last_snr; \ 2182 _tgtobj->rx.nss_info = _srcobj->rx.nss_info; \ 2183 _tgtobj->rx.mcs_info = _srcobj->rx.mcs_info; \ 2184 _tgtobj->rx.bw_info = _srcobj->rx.bw_info; \ 2185 _tgtobj->rx.gi_info = _srcobj->rx.gi_info; \ 2186 _tgtobj->rx.preamble_info = _srcobj->rx.preamble_info; \ 2187 _tgtobj->rx.mpdu_retry_cnt += _srcobj->rx.mpdu_retry_cnt; \ 2188 _tgtobj->rx.bar_cnt = _srcobj->rx.bar_cnt; \ 2189 _tgtobj->rx.ndpa_cnt = _srcobj->rx.ndpa_cnt; \ 2190 for (pream_type = 0; pream_type < DOT11_MAX; pream_type++) { \ 2191 for (i = 0; i < MAX_MCS; i++) { \ 2192 _tgtobj->rx.pkt_type[pream_type].mcs_count[i] += \ 2193 _srcobj->rx.pkt_type[pream_type].mcs_count[i]; \ 2194 } \ 2195 } \ 2196 for (i = 0; i < WME_AC_MAX; i++) { \ 2197 _tgtobj->rx.wme_ac_type[i] += _srcobj->rx.wme_ac_type[i]; \ 2198 _tgtobj->rx.wme_ac_type_bytes[i] += \ 2199 _srcobj->rx.wme_ac_type_bytes[i]; \ 2200 } \ 2201 for (i = 0; i < MAX_MCS; i++) { \ 2202 _tgtobj->rx.su_ax_ppdu_cnt.mcs_count[i] += \ 2203 _srcobj->rx.su_ax_ppdu_cnt.mcs_count[i]; \ 2204 _tgtobj->rx.rx_mpdu_cnt[i] += _srcobj->rx.rx_mpdu_cnt[i]; \ 2205 } \ 2206 for (mu_type = 0 ; mu_type < TXRX_TYPE_MU_MAX; mu_type++) { \ 2207 _tgtobj->rx.rx_mu[mu_type].mpdu_cnt_fcs_ok += \ 2208 _srcobj->rx.rx_mu[mu_type].mpdu_cnt_fcs_ok; \ 2209 _tgtobj->rx.rx_mu[mu_type].mpdu_cnt_fcs_err += \ 2210 _srcobj->rx.rx_mu[mu_type].mpdu_cnt_fcs_err; \ 2211 for (i = 0; i < SS_COUNT; i++) \ 2212 _tgtobj->rx.rx_mu[mu_type].ppdu_nss[i] += \ 2213 _srcobj->rx.rx_mu[mu_type].ppdu_nss[i]; \ 2214 for (i = 0; i < MAX_MCS; i++) \ 2215 _tgtobj->rx.rx_mu[mu_type].ppdu.mcs_count[i] += \ 2216 _srcobj->rx.rx_mu[mu_type].ppdu.mcs_count[i]; \ 2217 } \ 2218 for (i = 0; i < MAX_RECEPTION_TYPES; i++) { \ 2219 _tgtobj->rx.reception_type[i] += \ 2220 _srcobj->rx.reception_type[i]; \ 2221 _tgtobj->rx.ppdu_cnt[i] += _srcobj->rx.ppdu_cnt[i]; \ 2222 } \ 2223 for (i = 0; i < MAX_GI; i++) { \ 2224 _tgtobj->rx.sgi_count[i] += _srcobj->rx.sgi_count[i]; \ 2225 } \ 2226 for (i = 0; i < SS_COUNT; i++) { \ 2227 _tgtobj->rx.nss[i] += _srcobj->rx.nss[i]; \ 2228 _tgtobj->rx.ppdu_nss[i] += _srcobj->rx.ppdu_nss[i]; \ 2229 } \ 2230 for (i = 0; i < MAX_BW; i++) { \ 2231 _tgtobj->rx.bw[i] += _srcobj->rx.bw[i]; \ 2232 } \ 2233 DP_UPDATE_11BE_STATS(_tgtobj, _srcobj); \ 2234 } while (0) 2235 2236 #define DP_UPDATE_VDEV_STATS_FOR_UNMAPPED_PEERS(_tgtobj, _srcobj) \ 2237 do { \ 2238 DP_UPDATE_BASIC_STATS(_tgtobj, _srcobj); \ 2239 DP_UPDATE_PER_PKT_STATS(_tgtobj, _srcobj); \ 2240 DP_UPDATE_EXTD_STATS(_tgtobj, _srcobj); \ 2241 } while (0) 2242 2243 #define DP_UPDATE_INGRESS_STATS(_tgtobj, _srcobj) \ 2244 do { \ 2245 uint8_t i = 0; \ 2246 _tgtobj->tx_i.rcvd.num += _srcobj->tx_i.rcvd.num; \ 2247 _tgtobj->tx_i.rcvd.bytes += _srcobj->tx_i.rcvd.bytes; \ 2248 _tgtobj->tx_i.rcvd_in_fast_xmit_flow += \ 2249 _srcobj->tx_i.rcvd_in_fast_xmit_flow; \ 2250 for (i = 0; i < CDP_MAX_TX_DATA_RINGS; i++) { \ 2251 _tgtobj->tx_i.rcvd_per_core[i] += \ 2252 _srcobj->tx_i.rcvd_per_core[i]; \ 2253 } \ 2254 _tgtobj->tx_i.processed.num += _srcobj->tx_i.processed.num; \ 2255 _tgtobj->tx_i.processed.bytes += \ 2256 _srcobj->tx_i.processed.bytes; \ 2257 _tgtobj->tx_i.reinject_pkts.num += \ 2258 _srcobj->tx_i.reinject_pkts.num; \ 2259 _tgtobj->tx_i.reinject_pkts.bytes += \ 2260 _srcobj->tx_i.reinject_pkts.bytes; \ 2261 _tgtobj->tx_i.inspect_pkts.num += \ 2262 _srcobj->tx_i.inspect_pkts.num; \ 2263 _tgtobj->tx_i.inspect_pkts.bytes += \ 2264 _srcobj->tx_i.inspect_pkts.bytes; \ 2265 _tgtobj->tx_i.nawds_mcast.num += \ 2266 _srcobj->tx_i.nawds_mcast.num; \ 2267 _tgtobj->tx_i.nawds_mcast.bytes += \ 2268 _srcobj->tx_i.nawds_mcast.bytes; \ 2269 _tgtobj->tx_i.bcast.num += _srcobj->tx_i.bcast.num; \ 2270 _tgtobj->tx_i.bcast.bytes += _srcobj->tx_i.bcast.bytes; \ 2271 _tgtobj->tx_i.raw.raw_pkt.num += \ 2272 _srcobj->tx_i.raw.raw_pkt.num; \ 2273 _tgtobj->tx_i.raw.raw_pkt.bytes += \ 2274 _srcobj->tx_i.raw.raw_pkt.bytes; \ 2275 _tgtobj->tx_i.raw.dma_map_error += \ 2276 _srcobj->tx_i.raw.dma_map_error; \ 2277 _tgtobj->tx_i.raw.invalid_raw_pkt_datatype += \ 2278 _srcobj->tx_i.raw.invalid_raw_pkt_datatype; \ 2279 _tgtobj->tx_i.raw.num_frags_overflow_err += \ 2280 _srcobj->tx_i.raw.num_frags_overflow_err; \ 2281 _tgtobj->tx_i.sg.sg_pkt.num += _srcobj->tx_i.sg.sg_pkt.num; \ 2282 _tgtobj->tx_i.sg.sg_pkt.bytes += \ 2283 _srcobj->tx_i.sg.sg_pkt.bytes; \ 2284 _tgtobj->tx_i.sg.non_sg_pkts.num += \ 2285 _srcobj->tx_i.sg.non_sg_pkts.num; \ 2286 _tgtobj->tx_i.sg.non_sg_pkts.bytes += \ 2287 _srcobj->tx_i.sg.non_sg_pkts.bytes; \ 2288 _tgtobj->tx_i.sg.dropped_host.num += \ 2289 _srcobj->tx_i.sg.dropped_host.num; \ 2290 _tgtobj->tx_i.sg.dropped_host.bytes += \ 2291 _srcobj->tx_i.sg.dropped_host.bytes; \ 2292 _tgtobj->tx_i.sg.dropped_target += \ 2293 _srcobj->tx_i.sg.dropped_target; \ 2294 _tgtobj->tx_i.sg.dma_map_error += \ 2295 _srcobj->tx_i.sg.dma_map_error; \ 2296 _tgtobj->tx_i.mcast_en.mcast_pkt.num += \ 2297 _srcobj->tx_i.mcast_en.mcast_pkt.num; \ 2298 _tgtobj->tx_i.mcast_en.mcast_pkt.bytes += \ 2299 _srcobj->tx_i.mcast_en.mcast_pkt.bytes; \ 2300 _tgtobj->tx_i.mcast_en.dropped_map_error += \ 2301 _srcobj->tx_i.mcast_en.dropped_map_error; \ 2302 _tgtobj->tx_i.mcast_en.dropped_self_mac += \ 2303 _srcobj->tx_i.mcast_en.dropped_self_mac; \ 2304 _tgtobj->tx_i.mcast_en.dropped_send_fail += \ 2305 _srcobj->tx_i.mcast_en.dropped_send_fail; \ 2306 _tgtobj->tx_i.mcast_en.ucast += _srcobj->tx_i.mcast_en.ucast; \ 2307 _tgtobj->tx_i.mcast_en.fail_seg_alloc += \ 2308 _srcobj->tx_i.mcast_en.fail_seg_alloc; \ 2309 _tgtobj->tx_i.mcast_en.clone_fail += \ 2310 _srcobj->tx_i.mcast_en.clone_fail; \ 2311 _tgtobj->tx_i.igmp_mcast_en.igmp_rcvd += \ 2312 _srcobj->tx_i.igmp_mcast_en.igmp_rcvd; \ 2313 _tgtobj->tx_i.igmp_mcast_en.igmp_ucast_converted += \ 2314 _srcobj->tx_i.igmp_mcast_en.igmp_ucast_converted; \ 2315 _tgtobj->tx_i.dropped.desc_na.num += \ 2316 _srcobj->tx_i.dropped.desc_na.num; \ 2317 _tgtobj->tx_i.dropped.desc_na.bytes += \ 2318 _srcobj->tx_i.dropped.desc_na.bytes; \ 2319 _tgtobj->tx_i.dropped.desc_na_exc_alloc_fail.num += \ 2320 _srcobj->tx_i.dropped.desc_na_exc_alloc_fail.num; \ 2321 _tgtobj->tx_i.dropped.desc_na_exc_alloc_fail.bytes += \ 2322 _srcobj->tx_i.dropped.desc_na_exc_alloc_fail.bytes; \ 2323 _tgtobj->tx_i.dropped.desc_na_exc_outstand.num += \ 2324 _srcobj->tx_i.dropped.desc_na_exc_outstand.num; \ 2325 _tgtobj->tx_i.dropped.desc_na_exc_outstand.bytes += \ 2326 _srcobj->tx_i.dropped.desc_na_exc_outstand.bytes; \ 2327 _tgtobj->tx_i.dropped.exc_desc_na.num += \ 2328 _srcobj->tx_i.dropped.exc_desc_na.num; \ 2329 _tgtobj->tx_i.dropped.exc_desc_na.bytes += \ 2330 _srcobj->tx_i.dropped.exc_desc_na.bytes; \ 2331 _tgtobj->tx_i.dropped.ring_full += \ 2332 _srcobj->tx_i.dropped.ring_full; \ 2333 _tgtobj->tx_i.dropped.enqueue_fail += \ 2334 _srcobj->tx_i.dropped.enqueue_fail; \ 2335 _tgtobj->tx_i.dropped.dma_error += \ 2336 _srcobj->tx_i.dropped.dma_error; \ 2337 _tgtobj->tx_i.dropped.res_full += \ 2338 _srcobj->tx_i.dropped.res_full; \ 2339 _tgtobj->tx_i.dropped.headroom_insufficient += \ 2340 _srcobj->tx_i.dropped.headroom_insufficient; \ 2341 _tgtobj->tx_i.dropped.fail_per_pkt_vdev_id_check += \ 2342 _srcobj->tx_i.dropped.fail_per_pkt_vdev_id_check; \ 2343 _tgtobj->tx_i.dropped.drop_ingress += \ 2344 _srcobj->tx_i.dropped.drop_ingress; \ 2345 _tgtobj->tx_i.dropped.invalid_peer_id_in_exc_path += \ 2346 _srcobj->tx_i.dropped.invalid_peer_id_in_exc_path; \ 2347 _tgtobj->tx_i.dropped.tx_mcast_drop += \ 2348 _srcobj->tx_i.dropped.tx_mcast_drop; \ 2349 _tgtobj->tx_i.dropped.fw2wbm_tx_drop += \ 2350 _srcobj->tx_i.dropped.fw2wbm_tx_drop; \ 2351 _tgtobj->tx_i.dropped.dropped_pkt.num = \ 2352 _tgtobj->tx_i.dropped.dma_error + \ 2353 _tgtobj->tx_i.dropped.ring_full + \ 2354 _tgtobj->tx_i.dropped.enqueue_fail + \ 2355 _tgtobj->tx_i.dropped.fail_per_pkt_vdev_id_check + \ 2356 _tgtobj->tx_i.dropped.desc_na.num + \ 2357 _tgtobj->tx_i.dropped.res_full + \ 2358 _tgtobj->tx_i.dropped.drop_ingress + \ 2359 _tgtobj->tx_i.dropped.headroom_insufficient + \ 2360 _tgtobj->tx_i.dropped.invalid_peer_id_in_exc_path + \ 2361 _tgtobj->tx_i.dropped.tx_mcast_drop + \ 2362 _tgtobj->tx_i.dropped.fw2wbm_tx_drop; \ 2363 _tgtobj->tx_i.dropped.dropped_pkt.bytes += \ 2364 _srcobj->tx_i.dropped.dropped_pkt.bytes; \ 2365 _tgtobj->tx_i.mesh.exception_fw += \ 2366 _srcobj->tx_i.mesh.exception_fw; \ 2367 _tgtobj->tx_i.mesh.completion_fw += \ 2368 _srcobj->tx_i.mesh.completion_fw; \ 2369 _tgtobj->tx_i.cce_classified += \ 2370 _srcobj->tx_i.cce_classified; \ 2371 _tgtobj->tx_i.cce_classified_raw += \ 2372 _srcobj->tx_i.cce_classified_raw; \ 2373 _tgtobj->tx_i.sniffer_rcvd.num += \ 2374 _srcobj->tx_i.sniffer_rcvd.num; \ 2375 _tgtobj->tx_i.sniffer_rcvd.bytes += \ 2376 _srcobj->tx_i.sniffer_rcvd.bytes; \ 2377 _tgtobj->rx_i.reo_rcvd_pkt.num += \ 2378 _srcobj->rx_i.reo_rcvd_pkt.num; \ 2379 _tgtobj->rx_i.reo_rcvd_pkt.bytes += \ 2380 _srcobj->rx_i.reo_rcvd_pkt.bytes; \ 2381 _tgtobj->rx_i.null_q_desc_pkt.num += \ 2382 _srcobj->rx_i.null_q_desc_pkt.num; \ 2383 _tgtobj->rx_i.null_q_desc_pkt.bytes += \ 2384 _srcobj->rx_i.null_q_desc_pkt.bytes; \ 2385 _tgtobj->rx_i.routed_eapol_pkt.num += \ 2386 _srcobj->rx_i.routed_eapol_pkt.num; \ 2387 _tgtobj->rx_i.routed_eapol_pkt.bytes += \ 2388 _srcobj->rx_i.routed_eapol_pkt.bytes; \ 2389 } while (0) 2390 2391 #define DP_UPDATE_VDEV_STATS(_tgtobj, _srcobj) \ 2392 do { \ 2393 DP_UPDATE_INGRESS_STATS(_tgtobj, _srcobj); \ 2394 DP_UPDATE_VDEV_STATS_FOR_UNMAPPED_PEERS(_tgtobj, _srcobj); \ 2395 } while (0) 2396 2397 /** 2398 * dp_peer_find_attach() - Allocates memory for peer objects 2399 * @soc: SoC handle 2400 * 2401 * Return: QDF_STATUS 2402 */ 2403 QDF_STATUS dp_peer_find_attach(struct dp_soc *soc); 2404 2405 /** 2406 * dp_peer_find_detach() - Frees memory for peer objects 2407 * @soc: SoC handle 2408 * 2409 * Return: none 2410 */ 2411 void dp_peer_find_detach(struct dp_soc *soc); 2412 2413 /** 2414 * dp_peer_find_hash_add() - add peer to peer_hash_table 2415 * @soc: soc handle 2416 * @peer: peer handle 2417 * 2418 * Return: none 2419 */ 2420 void dp_peer_find_hash_add(struct dp_soc *soc, struct dp_peer *peer); 2421 2422 /** 2423 * dp_peer_find_hash_remove() - remove peer from peer_hash_table 2424 * @soc: soc handle 2425 * @peer: peer handle 2426 * 2427 * Return: none 2428 */ 2429 void dp_peer_find_hash_remove(struct dp_soc *soc, struct dp_peer *peer); 2430 2431 /* unused?? */ 2432 void dp_peer_find_hash_erase(struct dp_soc *soc); 2433 2434 /** 2435 * dp_peer_vdev_list_add() - add peer into vdev's peer list 2436 * @soc: soc handle 2437 * @vdev: vdev handle 2438 * @peer: peer handle 2439 * 2440 * Return: none 2441 */ 2442 void dp_peer_vdev_list_add(struct dp_soc *soc, struct dp_vdev *vdev, 2443 struct dp_peer *peer); 2444 2445 /** 2446 * dp_peer_vdev_list_remove() - remove peer from vdev's peer list 2447 * @soc: SoC handle 2448 * @vdev: VDEV handle 2449 * @peer: peer handle 2450 * 2451 * Return: none 2452 */ 2453 void dp_peer_vdev_list_remove(struct dp_soc *soc, struct dp_vdev *vdev, 2454 struct dp_peer *peer); 2455 2456 /** 2457 * dp_peer_find_id_to_obj_add() - Add peer into peer_id table 2458 * @soc: SoC handle 2459 * @peer: peer handle 2460 * @peer_id: peer_id 2461 * 2462 * Return: None 2463 */ 2464 void dp_peer_find_id_to_obj_add(struct dp_soc *soc, 2465 struct dp_peer *peer, 2466 uint16_t peer_id); 2467 2468 /** 2469 * dp_txrx_peer_attach_add() - Attach txrx_peer and add it to peer_id table 2470 * @soc: SoC handle 2471 * @peer: peer handle 2472 * @txrx_peer: txrx peer handle 2473 * 2474 * Return: None 2475 */ 2476 void dp_txrx_peer_attach_add(struct dp_soc *soc, 2477 struct dp_peer *peer, 2478 struct dp_txrx_peer *txrx_peer); 2479 2480 /** 2481 * dp_peer_find_id_to_obj_remove() - remove peer from peer_id table 2482 * @soc: SoC handle 2483 * @peer_id: peer_id 2484 * 2485 * Return: None 2486 */ 2487 void dp_peer_find_id_to_obj_remove(struct dp_soc *soc, 2488 uint16_t peer_id); 2489 2490 /** 2491 * dp_vdev_unref_delete() - check and process vdev delete 2492 * @soc: DP specific soc pointer 2493 * @vdev: DP specific vdev pointer 2494 * @mod_id: module id 2495 * 2496 */ 2497 void dp_vdev_unref_delete(struct dp_soc *soc, struct dp_vdev *vdev, 2498 enum dp_mod_id mod_id); 2499 2500 /** 2501 * dp_peer_ppdu_delayed_ba_cleanup() - free ppdu allocated in peer 2502 * @peer: Datapath peer 2503 * 2504 * Return: void 2505 */ 2506 void dp_peer_ppdu_delayed_ba_cleanup(struct dp_peer *peer); 2507 2508 /** 2509 * dp_peer_rx_init() - Initialize receive TID state 2510 * @pdev: Datapath pdev 2511 * @peer: Datapath peer 2512 * 2513 */ 2514 void dp_peer_rx_init(struct dp_pdev *pdev, struct dp_peer *peer); 2515 2516 /** 2517 * dp_peer_cleanup() - Cleanup peer information 2518 * @vdev: Datapath vdev 2519 * @peer: Datapath peer 2520 * 2521 */ 2522 void dp_peer_cleanup(struct dp_vdev *vdev, struct dp_peer *peer); 2523 2524 #ifdef DP_PEER_EXTENDED_API 2525 /** 2526 * dp_register_peer() - Register peer into physical device 2527 * @soc_hdl: data path soc handle 2528 * @pdev_id: device instance id 2529 * @sta_desc: peer description 2530 * 2531 * Register peer into physical device 2532 * 2533 * Return: QDF_STATUS_SUCCESS registration success 2534 * QDF_STATUS_E_FAULT peer not found 2535 */ 2536 QDF_STATUS dp_register_peer(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, 2537 struct ol_txrx_desc_type *sta_desc); 2538 2539 /** 2540 * dp_clear_peer() - remove peer from physical device 2541 * @soc_hdl: data path soc handle 2542 * @pdev_id: device instance id 2543 * @peer_addr: peer mac address 2544 * 2545 * remove peer from physical device 2546 * 2547 * Return: QDF_STATUS_SUCCESS registration success 2548 * QDF_STATUS_E_FAULT peer not found 2549 */ 2550 QDF_STATUS dp_clear_peer(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, 2551 struct qdf_mac_addr peer_addr); 2552 2553 /** 2554 * dp_find_peer_exist_on_vdev - find if peer exists on the given vdev 2555 * @soc_hdl: datapath soc handle 2556 * @vdev_id: vdev instance id 2557 * @peer_addr: peer mac address 2558 * 2559 * Return: true or false 2560 */ 2561 bool dp_find_peer_exist_on_vdev(struct cdp_soc_t *soc_hdl, uint8_t vdev_id, 2562 uint8_t *peer_addr); 2563 2564 /** 2565 * dp_find_peer_exist_on_other_vdev - find if peer exists 2566 * on other than the given vdev 2567 * @soc_hdl: datapath soc handle 2568 * @vdev_id: vdev instance id 2569 * @peer_addr: peer mac address 2570 * @max_bssid: max number of bssids 2571 * 2572 * Return: true or false 2573 */ 2574 bool dp_find_peer_exist_on_other_vdev(struct cdp_soc_t *soc_hdl, 2575 uint8_t vdev_id, uint8_t *peer_addr, 2576 uint16_t max_bssid); 2577 2578 /** 2579 * dp_peer_state_update() - update peer local state 2580 * @soc: datapath soc handle 2581 * @peer_mac: peer mac address 2582 * @state: new peer local state 2583 * 2584 * update peer local state 2585 * 2586 * Return: QDF_STATUS_SUCCESS registration success 2587 */ 2588 QDF_STATUS dp_peer_state_update(struct cdp_soc_t *soc, uint8_t *peer_mac, 2589 enum ol_txrx_peer_state state); 2590 2591 /** 2592 * dp_get_vdevid() - Get virtual interface id which peer registered 2593 * @soc_hdl: datapath soc handle 2594 * @peer_mac: peer mac address 2595 * @vdev_id: virtual interface id which peer registered 2596 * 2597 * Get virtual interface id which peer registered 2598 * 2599 * Return: QDF_STATUS_SUCCESS registration success 2600 */ 2601 QDF_STATUS dp_get_vdevid(struct cdp_soc_t *soc_hdl, uint8_t *peer_mac, 2602 uint8_t *vdev_id); 2603 2604 struct cdp_vdev *dp_get_vdev_by_peer_addr(struct cdp_pdev *pdev_handle, 2605 struct qdf_mac_addr peer_addr); 2606 2607 /** 2608 * dp_get_vdev_for_peer() - Get virtual interface instance which peer belongs 2609 * @peer: peer instance 2610 * 2611 * Get virtual interface instance which peer belongs 2612 * 2613 * Return: virtual interface instance pointer 2614 * NULL in case cannot find 2615 */ 2616 struct cdp_vdev *dp_get_vdev_for_peer(void *peer); 2617 2618 /** 2619 * dp_peer_get_peer_mac_addr() - Get peer mac address 2620 * @peer: peer instance 2621 * 2622 * Get peer mac address 2623 * 2624 * Return: peer mac address pointer 2625 * NULL in case cannot find 2626 */ 2627 uint8_t *dp_peer_get_peer_mac_addr(void *peer); 2628 2629 /** 2630 * dp_get_peer_state() - Get local peer state 2631 * @soc: datapath soc handle 2632 * @vdev_id: vdev id 2633 * @peer_mac: peer mac addr 2634 * 2635 * Get local peer state 2636 * 2637 * Return: peer status 2638 */ 2639 int dp_get_peer_state(struct cdp_soc_t *soc, uint8_t vdev_id, 2640 uint8_t *peer_mac); 2641 2642 /** 2643 * dp_local_peer_id_pool_init() - local peer id pool alloc for physical device 2644 * @pdev: data path device instance 2645 * 2646 * local peer id pool alloc for physical device 2647 * 2648 * Return: none 2649 */ 2650 void dp_local_peer_id_pool_init(struct dp_pdev *pdev); 2651 2652 /** 2653 * dp_local_peer_id_alloc() - allocate local peer id 2654 * @pdev: data path device instance 2655 * @peer: new peer instance 2656 * 2657 * allocate local peer id 2658 * 2659 * Return: none 2660 */ 2661 void dp_local_peer_id_alloc(struct dp_pdev *pdev, struct dp_peer *peer); 2662 2663 /** 2664 * dp_local_peer_id_free() - remove local peer id 2665 * @pdev: data path device instance 2666 * @peer: peer instance should be removed 2667 * 2668 * remove local peer id 2669 * 2670 * Return: none 2671 */ 2672 void dp_local_peer_id_free(struct dp_pdev *pdev, struct dp_peer *peer); 2673 2674 /** 2675 * dp_set_peer_as_tdls_peer() - set tdls peer flag to peer 2676 * @soc_hdl: datapath soc handle 2677 * @vdev_id: vdev_id 2678 * @peer_mac: peer mac addr 2679 * @val: tdls peer flag 2680 * 2681 * Return: none 2682 */ 2683 void dp_set_peer_as_tdls_peer(struct cdp_soc_t *soc_hdl, uint8_t vdev_id, 2684 uint8_t *peer_mac, bool val); 2685 #else 2686 static inline 2687 QDF_STATUS dp_get_vdevid(struct cdp_soc_t *soc_hdl, uint8_t *peer_mac, 2688 uint8_t *vdev_id) 2689 { 2690 return QDF_STATUS_E_NOSUPPORT; 2691 } 2692 2693 static inline void dp_local_peer_id_pool_init(struct dp_pdev *pdev) 2694 { 2695 } 2696 2697 static inline 2698 void dp_local_peer_id_alloc(struct dp_pdev *pdev, struct dp_peer *peer) 2699 { 2700 } 2701 2702 static inline 2703 void dp_local_peer_id_free(struct dp_pdev *pdev, struct dp_peer *peer) 2704 { 2705 } 2706 2707 static inline 2708 void dp_set_peer_as_tdls_peer(struct cdp_soc_t *soc_hdl, uint8_t vdev_id, 2709 uint8_t *peer_mac, bool val) 2710 { 2711 } 2712 #endif 2713 2714 /** 2715 * dp_find_peer_exist - find peer if already exists 2716 * @soc_hdl: datapath soc handle 2717 * @pdev_id: physical device instance id 2718 * @peer_addr: peer mac address 2719 * 2720 * Return: true or false 2721 */ 2722 bool dp_find_peer_exist(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, 2723 uint8_t *peer_addr); 2724 2725 #ifdef DP_UMAC_HW_RESET_SUPPORT 2726 /** 2727 * dp_pause_reo_send_cmd() - Pause Reo send commands. 2728 * @soc: dp soc 2729 * 2730 * Return: none 2731 */ 2732 void dp_pause_reo_send_cmd(struct dp_soc *soc); 2733 2734 /** 2735 * dp_resume_reo_send_cmd() - Resume Reo send commands. 2736 * @soc: dp soc 2737 * 2738 * Return: none 2739 */ 2740 void dp_resume_reo_send_cmd(struct dp_soc *soc); 2741 2742 /** 2743 * dp_cleanup_reo_cmd_module - Clean up the reo cmd module 2744 * @soc: DP SoC handle 2745 * 2746 * Return: none 2747 */ 2748 void dp_cleanup_reo_cmd_module(struct dp_soc *soc); 2749 2750 /** 2751 * dp_reo_desc_freelist_destroy() - Flush REO descriptors from deferred freelist 2752 * @soc: DP SOC handle 2753 * 2754 * Return: none 2755 */ 2756 void dp_reo_desc_freelist_destroy(struct dp_soc *soc); 2757 2758 /** 2759 * dp_reset_rx_reo_tid_queue() - Reset the reo tid queues 2760 * @soc: dp soc 2761 * @hw_qdesc_vaddr: starting address of the tid queues 2762 * @size: size of the memory pointed to by hw_qdesc_vaddr 2763 * 2764 * Return: none 2765 */ 2766 void dp_reset_rx_reo_tid_queue(struct dp_soc *soc, void *hw_qdesc_vaddr, 2767 uint32_t size); 2768 2769 2770 static inline void dp_umac_reset_trigger_pre_reset_notify_cb(struct dp_soc *soc) 2771 { 2772 notify_pre_reset_fw_callback callback = soc->notify_fw_callback; 2773 2774 if (callback) 2775 callback(soc); 2776 } 2777 2778 #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP) 2779 /** 2780 * dp_umac_reset_complete_umac_recovery() - Complete Umac reset session 2781 * @soc: dp soc handle 2782 * 2783 * Return: void 2784 */ 2785 void dp_umac_reset_complete_umac_recovery(struct dp_soc *soc); 2786 2787 /** 2788 * dp_umac_reset_initiate_umac_recovery() - Initiate Umac reset session 2789 * @soc: dp soc handle 2790 * @umac_reset_ctx: Umac reset context 2791 * @rx_event: Rx event received 2792 * @is_target_recovery: Flag to indicate if it is triggered for target recovery 2793 * 2794 * Return: status 2795 */ 2796 QDF_STATUS dp_umac_reset_initiate_umac_recovery(struct dp_soc *soc, 2797 struct dp_soc_umac_reset_ctx *umac_reset_ctx, 2798 enum umac_reset_rx_event rx_event, 2799 bool is_target_recovery); 2800 2801 /** 2802 * dp_umac_reset_handle_action_cb() - Function to call action callback 2803 * @soc: dp soc handle 2804 * @umac_reset_ctx: Umac reset context 2805 * @action: Action to call the callback for 2806 * 2807 * Return: QDF_STATUS status 2808 */ 2809 QDF_STATUS dp_umac_reset_handle_action_cb(struct dp_soc *soc, 2810 struct dp_soc_umac_reset_ctx *umac_reset_ctx, 2811 enum umac_reset_action action); 2812 2813 /** 2814 * dp_umac_reset_post_tx_cmd() - Iterate partner socs and post Tx command 2815 * @umac_reset_ctx: UMAC reset context 2816 * @tx_cmd: Tx command to be posted 2817 * 2818 * Return: QDF status of operation 2819 */ 2820 QDF_STATUS 2821 dp_umac_reset_post_tx_cmd(struct dp_soc_umac_reset_ctx *umac_reset_ctx, 2822 enum umac_reset_tx_cmd tx_cmd); 2823 2824 /** 2825 * dp_umac_reset_initiator_check() - Check if soc is the Umac reset initiator 2826 * @soc: dp soc handle 2827 * 2828 * Return: true if the soc is initiator or false otherwise 2829 */ 2830 bool dp_umac_reset_initiator_check(struct dp_soc *soc); 2831 2832 /** 2833 * dp_umac_reset_target_recovery_check() - Check if this is for target recovery 2834 * @soc: dp soc handle 2835 * 2836 * Return: true if the session is for target recovery or false otherwise 2837 */ 2838 bool dp_umac_reset_target_recovery_check(struct dp_soc *soc); 2839 2840 /** 2841 * dp_umac_reset_is_soc_ignored() - Check if this soc is to be ignored 2842 * @soc: dp soc handle 2843 * 2844 * Return: true if the soc is ignored or false otherwise 2845 */ 2846 bool dp_umac_reset_is_soc_ignored(struct dp_soc *soc); 2847 2848 /** 2849 * dp_mlo_umac_reset_stats_print() - API to print MLO umac reset stats 2850 * @soc: dp soc handle 2851 * 2852 * Return: QDF_STATUS 2853 */ 2854 QDF_STATUS dp_mlo_umac_reset_stats_print(struct dp_soc *soc); 2855 #else 2856 static inline 2857 QDF_STATUS dp_mlo_umac_reset_stats_print(struct dp_soc *soc) 2858 { 2859 return QDF_STATUS_SUCCESS; 2860 } 2861 #endif 2862 #else 2863 static inline void dp_umac_reset_trigger_pre_reset_notify_cb(struct dp_soc *soc) 2864 { 2865 } 2866 #endif 2867 2868 #if defined(DP_UMAC_HW_RESET_SUPPORT) && defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP) 2869 /** 2870 * dp_umac_reset_notify_asserted_soc() - API to notify the asserted SOC 2871 * @soc: dp soc 2872 * 2873 * Return: QDF_STATUS 2874 */ 2875 QDF_STATUS dp_umac_reset_notify_asserted_soc(struct dp_soc *soc); 2876 2877 /** 2878 * dp_get_umac_reset_in_progress_state() - API to check umac reset in progress 2879 * state 2880 * @psoc: dp soc handle 2881 * 2882 * Return: umac reset state 2883 */ 2884 enum cdp_umac_reset_state 2885 dp_get_umac_reset_in_progress_state(struct cdp_soc_t *psoc); 2886 #else 2887 static inline 2888 QDF_STATUS dp_umac_reset_notify_asserted_soc(struct dp_soc *soc) 2889 { 2890 return QDF_STATUS_SUCCESS; 2891 } 2892 2893 static inline enum cdp_umac_reset_state 2894 dp_get_umac_reset_in_progress_state(struct cdp_soc_t *psoc) 2895 { 2896 return CDP_UMAC_RESET_NOT_IN_PROGRESS; 2897 } 2898 #endif 2899 2900 #ifndef WLAN_SOFTUMAC_SUPPORT 2901 QDF_STATUS dp_reo_send_cmd(struct dp_soc *soc, enum hal_reo_cmd_type type, 2902 struct hal_reo_cmd_params *params, 2903 void (*callback_fn), void *data); 2904 2905 /** 2906 * dp_reo_cmdlist_destroy() - Free REO commands in the queue 2907 * @soc: DP SoC handle 2908 * 2909 * Return: none 2910 */ 2911 void dp_reo_cmdlist_destroy(struct dp_soc *soc); 2912 2913 /** 2914 * dp_reo_status_ring_handler() - Handler for REO Status ring 2915 * @int_ctx: pointer to DP interrupt context 2916 * @soc: DP Soc handle 2917 * 2918 * Return: Number of descriptors reaped 2919 */ 2920 uint32_t dp_reo_status_ring_handler(struct dp_intr *int_ctx, 2921 struct dp_soc *soc); 2922 #endif 2923 2924 /** 2925 * dp_aggregate_vdev_stats() - Consolidate stats at VDEV level 2926 * @vdev: DP VDEV handle 2927 * @vdev_stats: aggregate statistics 2928 * 2929 * return: void 2930 */ 2931 void dp_aggregate_vdev_stats(struct dp_vdev *vdev, 2932 struct cdp_vdev_stats *vdev_stats); 2933 2934 /** 2935 * dp_txrx_get_vdev_stats() - Update buffer with cdp_vdev_stats 2936 * @soc_hdl: CDP SoC handle 2937 * @vdev_id: vdev Id 2938 * @buf: buffer for vdev stats 2939 * @is_aggregate: are aggregate stats being collected 2940 * 2941 * Return: QDF_STATUS 2942 */ 2943 QDF_STATUS 2944 dp_txrx_get_vdev_stats(struct cdp_soc_t *soc_hdl, uint8_t vdev_id, 2945 void *buf, bool is_aggregate); 2946 2947 /** 2948 * dp_rx_bar_stats_cb() - BAR received stats callback 2949 * @soc: SOC handle 2950 * @cb_ctxt: Call back context 2951 * @reo_status: Reo status 2952 * 2953 * Return: void 2954 */ 2955 void dp_rx_bar_stats_cb(struct dp_soc *soc, void *cb_ctxt, 2956 union hal_reo_status *reo_status); 2957 2958 uint16_t dp_tx_me_send_convert_ucast(struct cdp_soc_t *soc, uint8_t vdev_id, 2959 qdf_nbuf_t nbuf, 2960 uint8_t newmac[][QDF_MAC_ADDR_SIZE], 2961 uint8_t new_mac_cnt, uint8_t tid, 2962 bool is_igmp, bool is_dms_pkt); 2963 void dp_tx_me_alloc_descriptor(struct cdp_soc_t *soc, uint8_t pdev_id); 2964 2965 void dp_tx_me_free_descriptor(struct cdp_soc_t *soc, uint8_t pdev_id); 2966 2967 /** 2968 * dp_h2t_ext_stats_msg_send(): function to construct HTT message to pass to FW 2969 * @pdev: DP PDEV handle 2970 * @stats_type_upload_mask: stats type requested by user 2971 * @config_param_0: extra configuration parameters 2972 * @config_param_1: extra configuration parameters 2973 * @config_param_2: extra configuration parameters 2974 * @config_param_3: extra configuration parameters 2975 * @cookie: 2976 * @cookie_msb: 2977 * @mac_id: mac number 2978 * 2979 * Return: QDF STATUS 2980 */ 2981 QDF_STATUS dp_h2t_ext_stats_msg_send(struct dp_pdev *pdev, 2982 uint32_t stats_type_upload_mask, uint32_t config_param_0, 2983 uint32_t config_param_1, uint32_t config_param_2, 2984 uint32_t config_param_3, int cookie, int cookie_msb, 2985 uint8_t mac_id); 2986 2987 /** 2988 * dp_htt_stats_print_tag() - function to select the tag type and 2989 * print the corresponding tag structure 2990 * @pdev: pdev pointer 2991 * @tag_type: tag type that is to be printed 2992 * @tag_buf: pointer to the tag structure 2993 * 2994 * Return: void 2995 */ 2996 void dp_htt_stats_print_tag(struct dp_pdev *pdev, 2997 uint8_t tag_type, uint32_t *tag_buf); 2998 2999 /** 3000 * dp_htt_stats_copy_tag() - function to select the tag type and 3001 * copy the corresponding tag structure 3002 * @pdev: DP_PDEV handle 3003 * @tag_type: tag type that is to be printed 3004 * @tag_buf: pointer to the tag structure 3005 * 3006 * Return: void 3007 */ 3008 void dp_htt_stats_copy_tag(struct dp_pdev *pdev, uint8_t tag_type, uint32_t *tag_buf); 3009 3010 /** 3011 * dp_h2t_3tuple_config_send(): function to construct 3 tuple configuration 3012 * HTT message to pass to FW 3013 * @pdev: DP PDEV handle 3014 * @tuple_mask: tuple configuration to report 3 tuple hash value in either 3015 * toeplitz_2_or_4 or flow_id_toeplitz in MSDU START TLV. 3016 * 3017 * tuple_mask[1:0]: 3018 * 00 - Do not report 3 tuple hash value 3019 * 10 - Report 3 tuple hash value in toeplitz_2_or_4 3020 * 01 - Report 3 tuple hash value in flow_id_toeplitz 3021 * 11 - Report 3 tuple hash value in both toeplitz_2_or_4 & flow_id_toeplitz 3022 * @mac_id: MAC ID 3023 * 3024 * Return: QDF STATUS 3025 */ 3026 QDF_STATUS dp_h2t_3tuple_config_send(struct dp_pdev *pdev, uint32_t tuple_mask, 3027 uint8_t mac_id); 3028 3029 #ifdef IPA_OFFLOAD 3030 /** 3031 * dp_peer_update_tid_stats_from_reo() - update rx pkt and byte count from reo 3032 * @soc: soc handle 3033 * @cb_ctxt: combination of peer_id and tid 3034 * @reo_status: reo status 3035 * 3036 * Return: void 3037 */ 3038 void dp_peer_update_tid_stats_from_reo(struct dp_soc *soc, void *cb_ctxt, 3039 union hal_reo_status *reo_status); 3040 3041 int dp_peer_get_rxtid_stats_ipa(struct dp_peer *peer, 3042 dp_rxtid_stats_cmd_cb dp_stats_cmd_cb); 3043 #ifdef IPA_OPT_WIFI_DP 3044 void dp_ipa_wdi_opt_dpath_notify_flt_rlsd(int flt0_rslt, 3045 int flt1_rslt); 3046 void dp_ipa_wdi_opt_dpath_notify_flt_add_rem_cb(int flt0_rslt, int flt1_rslt); 3047 void dp_ipa_wdi_opt_dpath_notify_flt_rsvd(bool is_success); 3048 #endif 3049 #ifdef QCA_ENHANCED_STATS_SUPPORT 3050 /** 3051 * dp_peer_aggregate_tid_stats - aggregate rx tid stats 3052 * @peer: Data Path peer 3053 * 3054 * Return: void 3055 */ 3056 void dp_peer_aggregate_tid_stats(struct dp_peer *peer); 3057 #endif 3058 #else 3059 static inline void dp_peer_aggregate_tid_stats(struct dp_peer *peer) 3060 { 3061 } 3062 #endif 3063 3064 /** 3065 * dp_set_key_sec_type_wifi3() - set security mode of key 3066 * @soc: Datapath soc handle 3067 * @vdev_id: id of atapath vdev 3068 * @peer_mac: Datapath peer mac address 3069 * @sec_type: security type 3070 * @is_unicast: key type 3071 * 3072 */ 3073 QDF_STATUS 3074 dp_set_key_sec_type_wifi3(struct cdp_soc_t *soc, uint8_t vdev_id, 3075 uint8_t *peer_mac, enum cdp_sec_type sec_type, 3076 bool is_unicast); 3077 3078 /** 3079 * dp_get_pdev_for_mac_id() - Return pdev for mac_id 3080 * @soc: handle to DP soc 3081 * @mac_id: MAC id 3082 * 3083 * Return: Return pdev corresponding to MAC 3084 */ 3085 void *dp_get_pdev_for_mac_id(struct dp_soc *soc, uint32_t mac_id); 3086 3087 QDF_STATUS 3088 dp_set_michael_key(struct cdp_soc_t *soc, uint8_t vdev_id, 3089 uint8_t *peer_mac, 3090 bool is_unicast, uint32_t *key); 3091 3092 /** 3093 * dp_check_pdev_exists() - Validate pdev before use 3094 * @soc: dp soc handle 3095 * @data: pdev handle 3096 * 3097 * Return: 0 - success/invalid - failure 3098 */ 3099 bool dp_check_pdev_exists(struct dp_soc *soc, struct dp_pdev *data); 3100 3101 /** 3102 * dp_update_delay_stats() - Update delay statistics in structure 3103 * and fill min, max and avg delay 3104 * @tstats: tid tx stats 3105 * @rstats: tid rx stats 3106 * @delay: delay in ms 3107 * @tid: tid value 3108 * @mode: type of tx delay mode 3109 * @ring_id: ring number 3110 * @delay_in_us: flag to indicate whether the delay is in ms or us 3111 * 3112 * Return: none 3113 */ 3114 void dp_update_delay_stats(struct cdp_tid_tx_stats *tstats, 3115 struct cdp_tid_rx_stats *rstats, uint32_t delay, 3116 uint8_t tid, uint8_t mode, uint8_t ring_id, 3117 bool delay_in_us); 3118 3119 /** 3120 * dp_print_ring_stats(): Print tail and head pointer 3121 * @pdev: DP_PDEV handle 3122 * 3123 * Return: void 3124 */ 3125 void dp_print_ring_stats(struct dp_pdev *pdev); 3126 3127 /** 3128 * dp_print_ring_stat_from_hal(): Print tail and head pointer through hal 3129 * @soc: soc handle 3130 * @srng: srng handle 3131 * @ring_type: ring type 3132 * 3133 * Return: void 3134 */ 3135 void 3136 dp_print_ring_stat_from_hal(struct dp_soc *soc, struct dp_srng *srng, 3137 enum hal_ring_type ring_type); 3138 3139 /** 3140 * dp_print_pdev_cfg_params() - Print the pdev cfg parameters 3141 * @pdev: DP pdev handle 3142 * 3143 * Return: void 3144 */ 3145 void dp_print_pdev_cfg_params(struct dp_pdev *pdev); 3146 3147 /** 3148 * dp_print_soc_cfg_params()- Dump soc wlan config parameters 3149 * @soc: Soc handle 3150 * 3151 * Return: void 3152 */ 3153 void dp_print_soc_cfg_params(struct dp_soc *soc); 3154 3155 /** 3156 * dp_srng_get_str_from_hal_ring_type() - Return string name for a ring 3157 * @ring_type: Ring 3158 * 3159 * Return: char const pointer 3160 */ 3161 const 3162 char *dp_srng_get_str_from_hal_ring_type(enum hal_ring_type ring_type); 3163 3164 /** 3165 * dp_txrx_path_stats() - Function to display dump stats 3166 * @soc: soc handle 3167 * 3168 * Return: none 3169 */ 3170 void dp_txrx_path_stats(struct dp_soc *soc); 3171 3172 /** 3173 * dp_print_per_ring_stats(): Packet count per ring 3174 * @soc: soc handle 3175 * 3176 * Return: None 3177 */ 3178 void dp_print_per_ring_stats(struct dp_soc *soc); 3179 3180 /** 3181 * dp_aggregate_pdev_stats(): Consolidate stats at PDEV level 3182 * @pdev: DP PDEV handle 3183 * 3184 * Return: void 3185 */ 3186 void dp_aggregate_pdev_stats(struct dp_pdev *pdev); 3187 3188 /** 3189 * dp_print_rx_rates(): Print Rx rate stats 3190 * @vdev: DP_VDEV handle 3191 * 3192 * Return:void 3193 */ 3194 void dp_print_rx_rates(struct dp_vdev *vdev); 3195 3196 /** 3197 * dp_print_tx_rates(): Print tx rates 3198 * @vdev: DP_VDEV handle 3199 * 3200 * Return:void 3201 */ 3202 void dp_print_tx_rates(struct dp_vdev *vdev); 3203 3204 /** 3205 * dp_print_peer_stats():print peer stats 3206 * @peer: DP_PEER handle 3207 * @peer_stats: buffer holding peer stats 3208 * 3209 * return void 3210 */ 3211 void dp_print_peer_stats(struct dp_peer *peer, 3212 struct cdp_peer_stats *peer_stats); 3213 3214 /** 3215 * dp_print_pdev_tx_stats(): Print Pdev level TX stats 3216 * @pdev: DP_PDEV Handle 3217 * 3218 * Return:void 3219 */ 3220 void 3221 dp_print_pdev_tx_stats(struct dp_pdev *pdev); 3222 3223 #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MCAST_MLO) 3224 /** 3225 * dp_print_vdev_mlo_mcast_tx_stats(): Print vdev level mlo mcast tx stats 3226 * @vdev: DP_VDEV Handle 3227 * 3228 * Return:void 3229 */ 3230 void 3231 dp_print_vdev_mlo_mcast_tx_stats(struct dp_vdev *vdev); 3232 #else 3233 /** 3234 * dp_print_vdev_mlo_mcast_tx_stats(): Print vdev level mlo mcast tx stats 3235 * @vdev: DP_VDEV Handle 3236 * 3237 * Return:void 3238 */ 3239 static inline 3240 void dp_print_vdev_mlo_mcast_tx_stats(struct dp_vdev *vdev) 3241 { 3242 } 3243 #endif 3244 3245 /** 3246 * dp_print_pdev_rx_stats(): Print Pdev level RX stats 3247 * @pdev: DP_PDEV Handle 3248 * 3249 * Return: void 3250 */ 3251 void 3252 dp_print_pdev_rx_stats(struct dp_pdev *pdev); 3253 3254 /** 3255 * dp_print_soc_tx_stats(): Print SOC level stats 3256 * @soc: DP_SOC Handle 3257 * 3258 * Return: void 3259 */ 3260 void dp_print_soc_tx_stats(struct dp_soc *soc); 3261 3262 #ifdef QCA_SUPPORT_DP_GLOBAL_CTX 3263 /** 3264 * dp_print_global_desc_count(): Print global desc in use 3265 * 3266 * Return: void 3267 */ 3268 void dp_print_global_desc_count(void); 3269 #else 3270 /** 3271 * dp_print_global_desc_count(): Print global desc in use 3272 * 3273 * Return: void 3274 */ 3275 static inline 3276 void dp_print_global_desc_count(void) 3277 { 3278 } 3279 #endif 3280 3281 /** 3282 * dp_print_soc_interrupt_stats() - Print interrupt stats for the soc 3283 * @soc: dp_soc handle 3284 * 3285 * Return: None 3286 */ 3287 void dp_print_soc_interrupt_stats(struct dp_soc *soc); 3288 3289 /** 3290 * dp_print_tx_ppeds_stats() - Print Tx in use stats for the soc in DS 3291 * @soc: dp_soc handle 3292 * 3293 * Return: None 3294 */ 3295 3296 void dp_print_tx_ppeds_stats(struct dp_soc *soc); 3297 #ifdef WLAN_DP_SRNG_USAGE_WM_TRACKING 3298 /** 3299 * dp_dump_srng_high_wm_stats() - Print the ring usage high watermark stats 3300 * for all SRNGs 3301 * @soc: DP soc handle 3302 * @srng_mask: SRNGs mask for dumping usage watermark stats 3303 * 3304 * Return: None 3305 */ 3306 void dp_dump_srng_high_wm_stats(struct dp_soc *soc, uint64_t srng_mask); 3307 #else 3308 static inline 3309 void dp_dump_srng_high_wm_stats(struct dp_soc *soc, uint64_t srng_mask) 3310 { 3311 } 3312 #endif 3313 3314 /** 3315 * dp_print_soc_rx_stats() - Print SOC level Rx stats 3316 * @soc: DP_SOC Handle 3317 * 3318 * Return: void 3319 */ 3320 void dp_print_soc_rx_stats(struct dp_soc *soc); 3321 3322 /** 3323 * dp_get_mac_id_for_pdev() - Return mac corresponding to pdev for mac 3324 * 3325 * @mac_id: MAC id 3326 * @pdev_id: pdev_id corresponding to pdev, 0 for MCL 3327 * 3328 * Single pdev using both MACs will operate on both MAC rings, 3329 * which is the case for MCL. 3330 * For WIN each PDEV will operate one ring, so index is zero. 3331 * 3332 */ 3333 static inline int dp_get_mac_id_for_pdev(uint32_t mac_id, uint32_t pdev_id) 3334 { 3335 if (mac_id && pdev_id) { 3336 qdf_print("Both mac_id and pdev_id cannot be non zero"); 3337 QDF_BUG(0); 3338 return 0; 3339 } 3340 return (mac_id + pdev_id); 3341 } 3342 3343 /** 3344 * dp_get_lmac_id_for_pdev_id() - Return lmac id corresponding to host pdev id 3345 * @soc: soc pointer 3346 * @mac_id: MAC id 3347 * @pdev_id: pdev_id corresponding to pdev, 0 for MCL 3348 * 3349 * For MCL, Single pdev using both MACs will operate on both MAC rings. 3350 * 3351 * For WIN, each PDEV will operate one ring. 3352 * 3353 */ 3354 static inline int 3355 dp_get_lmac_id_for_pdev_id 3356 (struct dp_soc *soc, uint32_t mac_id, uint32_t pdev_id) 3357 { 3358 if (!wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx)) { 3359 if (mac_id && pdev_id) { 3360 qdf_print("Both mac_id and pdev_id cannot be non zero"); 3361 QDF_BUG(0); 3362 return 0; 3363 } 3364 return (mac_id + pdev_id); 3365 } 3366 3367 return soc->pdev_list[pdev_id]->lmac_id; 3368 } 3369 3370 /** 3371 * dp_get_pdev_for_lmac_id() - Return pdev pointer corresponding to lmac id 3372 * @soc: soc pointer 3373 * @lmac_id: LMAC id 3374 * 3375 * For MCL, Single pdev exists 3376 * 3377 * For WIN, each PDEV will operate one ring. 3378 * 3379 */ 3380 static inline struct dp_pdev * 3381 dp_get_pdev_for_lmac_id(struct dp_soc *soc, uint32_t lmac_id) 3382 { 3383 uint8_t i = 0; 3384 3385 if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx)) { 3386 i = wlan_cfg_get_pdev_idx(soc->wlan_cfg_ctx, lmac_id); 3387 return ((i < MAX_PDEV_CNT) ? soc->pdev_list[i] : NULL); 3388 } 3389 3390 /* Typically for MCL as there only 1 PDEV*/ 3391 return soc->pdev_list[0]; 3392 } 3393 3394 /** 3395 * dp_calculate_target_pdev_id_from_host_pdev_id() - Return target pdev 3396 * corresponding to host pdev id 3397 * @soc: soc pointer 3398 * @mac_for_pdev: pdev_id corresponding to host pdev for WIN, mac id for MCL 3399 * 3400 * Return: target pdev_id for host pdev id. For WIN, this is derived through 3401 * a two step process: 3402 * 1. Get lmac_id corresponding to host pdev_id (lmac_id can change 3403 * during mode switch) 3404 * 2. Get target pdev_id (set up during WMI ready) from lmac_id 3405 * 3406 * For MCL, return the offset-1 translated mac_id 3407 */ 3408 static inline int 3409 dp_calculate_target_pdev_id_from_host_pdev_id 3410 (struct dp_soc *soc, uint32_t mac_for_pdev) 3411 { 3412 struct dp_pdev *pdev; 3413 3414 if (!wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx)) 3415 return DP_SW2HW_MACID(mac_for_pdev); 3416 3417 pdev = soc->pdev_list[mac_for_pdev]; 3418 3419 /*non-MCL case, get original target_pdev mapping*/ 3420 return wlan_cfg_get_target_pdev_id(soc->wlan_cfg_ctx, pdev->lmac_id); 3421 } 3422 3423 /** 3424 * dp_get_target_pdev_id_for_host_pdev_id() - Return target pdev corresponding 3425 * to host pdev id 3426 * @soc: soc pointer 3427 * @mac_for_pdev: pdev_id corresponding to host pdev for WIN, mac id for MCL 3428 * 3429 * Return: target pdev_id for host pdev id. 3430 * For WIN, return the value stored in pdev object. 3431 * For MCL, return the offset-1 translated mac_id. 3432 */ 3433 static inline int 3434 dp_get_target_pdev_id_for_host_pdev_id 3435 (struct dp_soc *soc, uint32_t mac_for_pdev) 3436 { 3437 struct dp_pdev *pdev; 3438 3439 if (!wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx)) 3440 return DP_SW2HW_MACID(mac_for_pdev); 3441 3442 pdev = soc->pdev_list[mac_for_pdev]; 3443 3444 return pdev->target_pdev_id; 3445 } 3446 3447 /** 3448 * dp_get_host_pdev_id_for_target_pdev_id() - Return host pdev corresponding 3449 * to target pdev id 3450 * @soc: soc pointer 3451 * @pdev_id: pdev_id corresponding to target pdev 3452 * 3453 * Return: host pdev_id for target pdev id. For WIN, this is derived through 3454 * a two step process: 3455 * 1. Get lmac_id corresponding to target pdev_id 3456 * 2. Get host pdev_id (set up during WMI ready) from lmac_id 3457 * 3458 * For MCL, return the 0-offset pdev_id 3459 */ 3460 static inline int 3461 dp_get_host_pdev_id_for_target_pdev_id 3462 (struct dp_soc *soc, uint32_t pdev_id) 3463 { 3464 struct dp_pdev *pdev; 3465 int lmac_id; 3466 3467 if (!wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx)) 3468 return DP_HW2SW_MACID(pdev_id); 3469 3470 /*non-MCL case, get original target_lmac mapping from target pdev*/ 3471 lmac_id = wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, 3472 DP_HW2SW_MACID(pdev_id)); 3473 3474 /*Get host pdev from lmac*/ 3475 pdev = dp_get_pdev_for_lmac_id(soc, lmac_id); 3476 3477 return pdev ? pdev->pdev_id : INVALID_PDEV_ID; 3478 } 3479 3480 /** 3481 * dp_get_mac_id_for_mac() - Return mac corresponding WIN and MCL mac_ids 3482 * 3483 * @soc: handle to DP soc 3484 * @mac_id: MAC id 3485 * 3486 * Single pdev using both MACs will operate on both MAC rings, 3487 * which is the case for MCL. 3488 * For WIN each PDEV will operate one ring, so index is zero. 3489 * 3490 */ 3491 static inline int dp_get_mac_id_for_mac(struct dp_soc *soc, uint32_t mac_id) 3492 { 3493 /* 3494 * Single pdev using both MACs will operate on both MAC rings, 3495 * which is the case for MCL. 3496 */ 3497 if (!wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx)) 3498 return mac_id; 3499 3500 /* For WIN each PDEV will operate one ring, so index is zero. */ 3501 return 0; 3502 } 3503 3504 /** 3505 * dp_is_subtype_data() - check if the frame subtype is data 3506 * 3507 * @frame_ctrl: Frame control field 3508 * 3509 * check the frame control field and verify if the packet 3510 * is a data packet. 3511 * 3512 * Return: true or false 3513 */ 3514 static inline bool dp_is_subtype_data(uint16_t frame_ctrl) 3515 { 3516 if (((qdf_cpu_to_le16(frame_ctrl) & QDF_IEEE80211_FC0_TYPE_MASK) == 3517 QDF_IEEE80211_FC0_TYPE_DATA) && 3518 (((qdf_cpu_to_le16(frame_ctrl) & QDF_IEEE80211_FC0_SUBTYPE_MASK) == 3519 QDF_IEEE80211_FC0_SUBTYPE_DATA) || 3520 ((qdf_cpu_to_le16(frame_ctrl) & QDF_IEEE80211_FC0_SUBTYPE_MASK) == 3521 QDF_IEEE80211_FC0_SUBTYPE_QOS))) { 3522 return true; 3523 } 3524 3525 return false; 3526 } 3527 3528 #ifdef WDI_EVENT_ENABLE 3529 /** 3530 * dp_h2t_cfg_stats_msg_send(): function to construct HTT message to pass to FW 3531 * @pdev: DP PDEV handle 3532 * @stats_type_upload_mask: stats type requested by user 3533 * @mac_id: Mac id number 3534 * 3535 * return: QDF STATUS 3536 */ 3537 QDF_STATUS dp_h2t_cfg_stats_msg_send(struct dp_pdev *pdev, 3538 uint32_t stats_type_upload_mask, 3539 uint8_t mac_id); 3540 3541 /** 3542 * dp_wdi_event_unsub() - WDI event unsubscribe 3543 * @soc: soc handle 3544 * @pdev_id: id of pdev 3545 * @event_cb_sub_handle: subscribed event handle 3546 * @event: Event to be unsubscribe 3547 * 3548 * Return: 0 for success. nonzero for failure. 3549 */ 3550 int dp_wdi_event_unsub(struct cdp_soc_t *soc, uint8_t pdev_id, 3551 wdi_event_subscribe *event_cb_sub_handle, 3552 uint32_t event); 3553 3554 /** 3555 * dp_wdi_event_sub() - Subscribe WDI event 3556 * @soc: soc handle 3557 * @pdev_id: id of pdev 3558 * @event_cb_sub_handle: subscribe event handle 3559 * @event: Event to be subscribe 3560 * 3561 * Return: 0 for success. nonzero for failure. 3562 */ 3563 int dp_wdi_event_sub(struct cdp_soc_t *soc, uint8_t pdev_id, 3564 wdi_event_subscribe *event_cb_sub_handle, 3565 uint32_t event); 3566 3567 /** 3568 * dp_wdi_event_handler() - Event handler for WDI event 3569 * @event: wdi event number 3570 * @soc: soc handle 3571 * @data: pointer to data 3572 * @peer_id: peer id number 3573 * @status: HTT rx status 3574 * @pdev_id: id of pdev 3575 * 3576 * It will be called to register WDI event 3577 * 3578 * Return: None 3579 */ 3580 void dp_wdi_event_handler(enum WDI_EVENT event, struct dp_soc *soc, 3581 void *data, u_int16_t peer_id, 3582 int status, u_int8_t pdev_id); 3583 3584 /** 3585 * dp_wdi_event_attach() - Attach wdi event 3586 * @txrx_pdev: DP pdev handle 3587 * 3588 * Return: 0 for success. nonzero for failure. 3589 */ 3590 int dp_wdi_event_attach(struct dp_pdev *txrx_pdev); 3591 3592 /** 3593 * dp_wdi_event_detach() - Detach WDI event 3594 * @txrx_pdev: DP pdev handle 3595 * 3596 * Return: 0 for success. nonzero for failure. 3597 */ 3598 int dp_wdi_event_detach(struct dp_pdev *txrx_pdev); 3599 3600 static inline void 3601 dp_hif_update_pipe_callback(struct dp_soc *dp_soc, 3602 void *cb_context, 3603 QDF_STATUS (*callback)(void *, qdf_nbuf_t, uint8_t), 3604 uint8_t pipe_id) 3605 { 3606 struct hif_msg_callbacks hif_pipe_callbacks = { 0 }; 3607 3608 /* TODO: Temporary change to bypass HTC connection for this new 3609 * HIF pipe, which will be used for packet log and other high- 3610 * priority HTT messages. Proper HTC connection to be added 3611 * later once required FW changes are available 3612 */ 3613 hif_pipe_callbacks.rxCompletionHandler = callback; 3614 hif_pipe_callbacks.Context = cb_context; 3615 hif_update_pipe_callback(dp_soc->hif_handle, 3616 DP_HTT_T2H_HP_PIPE, &hif_pipe_callbacks); 3617 } 3618 #else 3619 static inline int dp_wdi_event_unsub(struct cdp_soc_t *soc, uint8_t pdev_id, 3620 wdi_event_subscribe *event_cb_sub_handle, 3621 uint32_t event) 3622 { 3623 return 0; 3624 } 3625 3626 static inline int dp_wdi_event_sub(struct cdp_soc_t *soc, uint8_t pdev_id, 3627 wdi_event_subscribe *event_cb_sub_handle, 3628 uint32_t event) 3629 { 3630 return 0; 3631 } 3632 3633 static inline 3634 void dp_wdi_event_handler(enum WDI_EVENT event, 3635 struct dp_soc *soc, 3636 void *data, u_int16_t peer_id, 3637 int status, u_int8_t pdev_id) 3638 { 3639 } 3640 3641 static inline int dp_wdi_event_attach(struct dp_pdev *txrx_pdev) 3642 { 3643 return 0; 3644 } 3645 3646 static inline int dp_wdi_event_detach(struct dp_pdev *txrx_pdev) 3647 { 3648 return 0; 3649 } 3650 3651 static inline QDF_STATUS dp_h2t_cfg_stats_msg_send(struct dp_pdev *pdev, 3652 uint32_t stats_type_upload_mask, uint8_t mac_id) 3653 { 3654 return 0; 3655 } 3656 3657 static inline void 3658 dp_hif_update_pipe_callback(struct dp_soc *dp_soc, void *cb_context, 3659 QDF_STATUS (*callback)(void *, qdf_nbuf_t, uint8_t), 3660 uint8_t pipe_id) 3661 { 3662 } 3663 #endif 3664 3665 #ifdef VDEV_PEER_PROTOCOL_COUNT 3666 /** 3667 * dp_vdev_peer_stats_update_protocol_cnt() - update per-peer protocol counters 3668 * @vdev: VDEV DP object 3669 * @nbuf: data packet 3670 * @txrx_peer: DP TXRX Peer object 3671 * @is_egress: whether egress or ingress 3672 * @is_rx: whether rx or tx 3673 * 3674 * This function updates the per-peer protocol counters 3675 * Return: void 3676 */ 3677 void dp_vdev_peer_stats_update_protocol_cnt(struct dp_vdev *vdev, 3678 qdf_nbuf_t nbuf, 3679 struct dp_txrx_peer *txrx_peer, 3680 bool is_egress, 3681 bool is_rx); 3682 3683 /** 3684 * dp_peer_stats_update_protocol_cnt() - update per-peer protocol counters 3685 * @soc: SOC DP object 3686 * @vdev_id: vdev_id 3687 * @nbuf: data packet 3688 * @is_egress: whether egress or ingress 3689 * @is_rx: whether rx or tx 3690 * 3691 * This function updates the per-peer protocol counters 3692 * 3693 * Return: void 3694 */ 3695 void dp_peer_stats_update_protocol_cnt(struct cdp_soc_t *soc, 3696 int8_t vdev_id, 3697 qdf_nbuf_t nbuf, 3698 bool is_egress, 3699 bool is_rx); 3700 3701 void dp_vdev_peer_stats_update_protocol_cnt_tx(struct dp_vdev *vdev_hdl, 3702 qdf_nbuf_t nbuf); 3703 3704 #else 3705 #define dp_vdev_peer_stats_update_protocol_cnt(vdev, nbuf, txrx_peer, \ 3706 is_egress, is_rx) 3707 3708 static inline 3709 void dp_vdev_peer_stats_update_protocol_cnt_tx(struct dp_vdev *vdev_hdl, 3710 qdf_nbuf_t nbuf) 3711 { 3712 } 3713 3714 #endif 3715 3716 #ifdef QCA_LL_TX_FLOW_CONTROL_V2 3717 /** 3718 * dp_tx_dump_flow_pool_info() - dump global_pool and flow_pool info 3719 * @soc_hdl: Handle to struct cdp_soc 3720 * 3721 * Return: none 3722 */ 3723 void dp_tx_dump_flow_pool_info(struct cdp_soc_t *soc_hdl); 3724 3725 /** 3726 * dp_tx_dump_flow_pool_info_compact() - dump flow pool info 3727 * @soc: DP soc context 3728 * 3729 * Return: none 3730 */ 3731 void dp_tx_dump_flow_pool_info_compact(struct dp_soc *soc); 3732 int dp_tx_delete_flow_pool(struct dp_soc *soc, struct dp_tx_desc_pool_s *pool, 3733 bool force); 3734 #else 3735 static inline void dp_tx_dump_flow_pool_info_compact(struct dp_soc *soc) 3736 { 3737 } 3738 #endif /* QCA_LL_TX_FLOW_CONTROL_V2 */ 3739 3740 #ifdef QCA_OL_DP_SRNG_LOCK_LESS_ACCESS 3741 static inline int 3742 dp_hal_srng_access_start(hal_soc_handle_t soc, hal_ring_handle_t hal_ring_hdl) 3743 { 3744 return hal_srng_access_start_unlocked(soc, hal_ring_hdl); 3745 } 3746 3747 static inline void 3748 dp_hal_srng_access_end(hal_soc_handle_t soc, hal_ring_handle_t hal_ring_hdl) 3749 { 3750 hal_srng_access_end_unlocked(soc, hal_ring_hdl); 3751 } 3752 3753 #else 3754 static inline int 3755 dp_hal_srng_access_start(hal_soc_handle_t soc, hal_ring_handle_t hal_ring_hdl) 3756 { 3757 return hal_srng_access_start(soc, hal_ring_hdl); 3758 } 3759 3760 static inline void 3761 dp_hal_srng_access_end(hal_soc_handle_t soc, hal_ring_handle_t hal_ring_hdl) 3762 { 3763 hal_srng_access_end(soc, hal_ring_hdl); 3764 } 3765 #endif 3766 3767 #ifdef WLAN_FEATURE_DP_EVENT_HISTORY 3768 /** 3769 * dp_srng_access_start() - Wrapper function to log access start of a hal ring 3770 * @int_ctx: pointer to DP interrupt context. This should not be NULL 3771 * @dp_soc: DP Soc handle 3772 * @hal_ring_hdl: opaque pointer to the HAL Rx Error Ring, which will be 3773 * serviced 3774 * 3775 * Return: 0 on success; error on failure 3776 */ 3777 int dp_srng_access_start(struct dp_intr *int_ctx, struct dp_soc *dp_soc, 3778 hal_ring_handle_t hal_ring_hdl); 3779 3780 /** 3781 * dp_srng_access_end() - Wrapper function to log access end of a hal ring 3782 * @int_ctx: pointer to DP interrupt context. This should not be NULL 3783 * @dp_soc: DP Soc handle 3784 * @hal_ring_hdl: opaque pointer to the HAL Rx Error Ring, which will be 3785 * serviced 3786 * 3787 * Return: void 3788 */ 3789 void dp_srng_access_end(struct dp_intr *int_ctx, struct dp_soc *dp_soc, 3790 hal_ring_handle_t hal_ring_hdl); 3791 3792 #else 3793 static inline int dp_srng_access_start(struct dp_intr *int_ctx, 3794 struct dp_soc *dp_soc, 3795 hal_ring_handle_t hal_ring_hdl) 3796 { 3797 hal_soc_handle_t hal_soc = dp_soc->hal_soc; 3798 3799 return dp_hal_srng_access_start(hal_soc, hal_ring_hdl); 3800 } 3801 3802 static inline void dp_srng_access_end(struct dp_intr *int_ctx, 3803 struct dp_soc *dp_soc, 3804 hal_ring_handle_t hal_ring_hdl) 3805 { 3806 hal_soc_handle_t hal_soc = dp_soc->hal_soc; 3807 3808 return dp_hal_srng_access_end(hal_soc, hal_ring_hdl); 3809 } 3810 #endif /* WLAN_FEATURE_DP_EVENT_HISTORY */ 3811 3812 #ifdef QCA_CACHED_RING_DESC 3813 /** 3814 * dp_srng_dst_get_next() - Wrapper function to get next ring desc 3815 * @dp_soc: DP Soc handle 3816 * @hal_ring_hdl: opaque pointer to the HAL Destination Ring 3817 * 3818 * Return: HAL ring descriptor 3819 */ 3820 static inline void *dp_srng_dst_get_next(struct dp_soc *dp_soc, 3821 hal_ring_handle_t hal_ring_hdl) 3822 { 3823 hal_soc_handle_t hal_soc = dp_soc->hal_soc; 3824 3825 return hal_srng_dst_get_next_cached(hal_soc, hal_ring_hdl); 3826 } 3827 3828 /** 3829 * dp_srng_dst_inv_cached_descs() - Wrapper function to invalidate cached 3830 * descriptors 3831 * @dp_soc: DP Soc handle 3832 * @hal_ring_hdl: opaque pointer to the HAL Rx Destination ring 3833 * @num_entries: Entry count 3834 * 3835 * Return: None 3836 */ 3837 static inline void dp_srng_dst_inv_cached_descs(struct dp_soc *dp_soc, 3838 hal_ring_handle_t hal_ring_hdl, 3839 uint32_t num_entries) 3840 { 3841 hal_soc_handle_t hal_soc = dp_soc->hal_soc; 3842 3843 hal_srng_dst_inv_cached_descs(hal_soc, hal_ring_hdl, num_entries); 3844 } 3845 #else 3846 static inline void *dp_srng_dst_get_next(struct dp_soc *dp_soc, 3847 hal_ring_handle_t hal_ring_hdl) 3848 { 3849 hal_soc_handle_t hal_soc = dp_soc->hal_soc; 3850 3851 return hal_srng_dst_get_next(hal_soc, hal_ring_hdl); 3852 } 3853 3854 static inline void dp_srng_dst_inv_cached_descs(struct dp_soc *dp_soc, 3855 hal_ring_handle_t hal_ring_hdl, 3856 uint32_t num_entries) 3857 { 3858 } 3859 #endif /* QCA_CACHED_RING_DESC */ 3860 3861 #if defined(QCA_CACHED_RING_DESC) && \ 3862 (defined(QCA_DP_RX_HW_SW_NBUF_DESC_PREFETCH) || \ 3863 defined(QCA_DP_TX_HW_SW_NBUF_DESC_PREFETCH)) 3864 /** 3865 * dp_srng_dst_prefetch() - Wrapper function to prefetch descs from dest ring 3866 * @hal_soc: HAL SOC handle 3867 * @hal_ring_hdl: opaque pointer to the HAL Rx Destination ring 3868 * @num_entries: Entry count 3869 * 3870 * Return: None 3871 */ 3872 static inline void *dp_srng_dst_prefetch(hal_soc_handle_t hal_soc, 3873 hal_ring_handle_t hal_ring_hdl, 3874 uint32_t num_entries) 3875 { 3876 return hal_srng_dst_prefetch(hal_soc, hal_ring_hdl, num_entries); 3877 } 3878 3879 /** 3880 * dp_srng_dst_prefetch_32_byte_desc() - Wrapper function to prefetch 3881 * 32 byte descriptor starting at 3882 * 64 byte offset 3883 * @hal_soc: HAL SOC handle 3884 * @hal_ring_hdl: opaque pointer to the HAL Rx Destination ring 3885 * @num_entries: Entry count 3886 * 3887 * Return: None 3888 */ 3889 static inline 3890 void *dp_srng_dst_prefetch_32_byte_desc(hal_soc_handle_t hal_soc, 3891 hal_ring_handle_t hal_ring_hdl, 3892 uint32_t num_entries) 3893 { 3894 return hal_srng_dst_prefetch_32_byte_desc(hal_soc, hal_ring_hdl, 3895 num_entries); 3896 } 3897 #else 3898 static inline void *dp_srng_dst_prefetch(hal_soc_handle_t hal_soc, 3899 hal_ring_handle_t hal_ring_hdl, 3900 uint32_t num_entries) 3901 { 3902 return NULL; 3903 } 3904 3905 static inline 3906 void *dp_srng_dst_prefetch_32_byte_desc(hal_soc_handle_t hal_soc, 3907 hal_ring_handle_t hal_ring_hdl, 3908 uint32_t num_entries) 3909 { 3910 return NULL; 3911 } 3912 #endif 3913 3914 #ifdef QCA_ENH_V3_STATS_SUPPORT 3915 /** 3916 * dp_pdev_print_delay_stats(): Print pdev level delay stats 3917 * @pdev: DP_PDEV handle 3918 * 3919 * Return:void 3920 */ 3921 void dp_pdev_print_delay_stats(struct dp_pdev *pdev); 3922 3923 /** 3924 * dp_pdev_print_tid_stats(): Print pdev level tid stats 3925 * @pdev: DP_PDEV handle 3926 * 3927 * Return:void 3928 */ 3929 void dp_pdev_print_tid_stats(struct dp_pdev *pdev); 3930 3931 /** 3932 * dp_pdev_print_rx_error_stats(): Print pdev level rx error stats 3933 * @pdev: DP_PDEV handle 3934 * 3935 * Return:void 3936 */ 3937 void dp_pdev_print_rx_error_stats(struct dp_pdev *pdev); 3938 #endif /* QCA_ENH_V3_STATS_SUPPORT */ 3939 3940 /** 3941 * dp_pdev_get_tid_stats(): Get accumulated pdev level tid_stats 3942 * @soc_hdl: soc handle 3943 * @pdev_id: id of dp_pdev handle 3944 * @tid_stats: Pointer for cdp_tid_stats_intf 3945 * 3946 * Return: QDF_STATUS_SUCCESS or QDF_STATUS_E_INVAL 3947 */ 3948 QDF_STATUS dp_pdev_get_tid_stats(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, 3949 struct cdp_tid_stats_intf *tid_stats); 3950 3951 /** 3952 * dp_soc_set_txrx_ring_map() 3953 * @soc: DP handler for soc 3954 * 3955 * Return: Void 3956 */ 3957 void dp_soc_set_txrx_ring_map(struct dp_soc *soc); 3958 3959 /** 3960 * dp_vdev_to_cdp_vdev() - typecast dp vdev to cdp vdev 3961 * @vdev: DP vdev handle 3962 * 3963 * Return: struct cdp_vdev pointer 3964 */ 3965 static inline 3966 struct cdp_vdev *dp_vdev_to_cdp_vdev(struct dp_vdev *vdev) 3967 { 3968 return (struct cdp_vdev *)vdev; 3969 } 3970 3971 /** 3972 * dp_pdev_to_cdp_pdev() - typecast dp pdev to cdp pdev 3973 * @pdev: DP pdev handle 3974 * 3975 * Return: struct cdp_pdev pointer 3976 */ 3977 static inline 3978 struct cdp_pdev *dp_pdev_to_cdp_pdev(struct dp_pdev *pdev) 3979 { 3980 return (struct cdp_pdev *)pdev; 3981 } 3982 3983 /** 3984 * dp_soc_to_cdp_soc() - typecast dp psoc to cdp psoc 3985 * @psoc: DP psoc handle 3986 * 3987 * Return: struct cdp_soc pointer 3988 */ 3989 static inline 3990 struct cdp_soc *dp_soc_to_cdp_soc(struct dp_soc *psoc) 3991 { 3992 return (struct cdp_soc *)psoc; 3993 } 3994 3995 /** 3996 * dp_soc_to_cdp_soc_t() - typecast dp psoc to ol txrx soc handle 3997 * @psoc: DP psoc handle 3998 * 3999 * Return: struct cdp_soc_t pointer 4000 */ 4001 static inline 4002 struct cdp_soc_t *dp_soc_to_cdp_soc_t(struct dp_soc *psoc) 4003 { 4004 return (struct cdp_soc_t *)psoc; 4005 } 4006 4007 #if defined(WLAN_SUPPORT_RX_FLOW_TAG) 4008 /** 4009 * dp_rx_flow_get_fse_stats() - Retrieve a flow's statistics 4010 * @pdev: pdev handle 4011 * @rx_flow_info: flow information in the Rx FST 4012 * @stats: stats to update 4013 * 4014 * Return: Success when flow statistcs is updated, error on failure 4015 */ 4016 QDF_STATUS dp_rx_flow_get_fse_stats(struct dp_pdev *pdev, 4017 struct cdp_rx_flow_info *rx_flow_info, 4018 struct cdp_flow_stats *stats); 4019 4020 /** 4021 * dp_rx_flow_delete_entry() - Delete a flow entry from flow search table 4022 * @pdev: pdev handle 4023 * @rx_flow_info: DP flow parameters 4024 * 4025 * Return: Success when flow is deleted, error on failure 4026 */ 4027 QDF_STATUS dp_rx_flow_delete_entry(struct dp_pdev *pdev, 4028 struct cdp_rx_flow_info *rx_flow_info); 4029 4030 /** 4031 * dp_rx_flow_add_entry() - Add a flow entry to flow search table 4032 * @pdev: DP pdev instance 4033 * @rx_flow_info: DP flow parameters 4034 * 4035 * Return: Success when flow is added, no-memory or already exists on error 4036 */ 4037 QDF_STATUS dp_rx_flow_add_entry(struct dp_pdev *pdev, 4038 struct cdp_rx_flow_info *rx_flow_info); 4039 4040 /** 4041 * dp_rx_fst_attach() - Initialize Rx FST and setup necessary parameters 4042 * @soc: SoC handle 4043 * @pdev: Pdev handle 4044 * 4045 * Return: Handle to flow search table entry 4046 */ 4047 QDF_STATUS dp_rx_fst_attach(struct dp_soc *soc, struct dp_pdev *pdev); 4048 4049 /** 4050 * dp_rx_fst_detach() - De-initialize Rx FST 4051 * @soc: SoC handle 4052 * @pdev: Pdev handle 4053 * 4054 * Return: None 4055 */ 4056 void dp_rx_fst_detach(struct dp_soc *soc, struct dp_pdev *pdev); 4057 4058 /** 4059 * dp_mon_rx_update_rx_flow_tag_stats() - Update a mon flow's statistics 4060 * @pdev: pdev handle 4061 * @flow_id: flow index (truncated hash) in the Rx FST 4062 * 4063 * Return: Success when flow statistcs is updated, error on failure 4064 */ 4065 QDF_STATUS 4066 dp_mon_rx_update_rx_flow_tag_stats(struct dp_pdev *pdev, uint32_t flow_id); 4067 #endif 4068 4069 #ifdef WLAN_SUPPORT_RX_FLOW_TAG 4070 /** 4071 * dp_rx_flow_send_fst_fw_setup() - Program FST parameters in FW/HW post-attach 4072 * @soc: SoC handle 4073 * @pdev: Pdev handle 4074 * 4075 * Return: Success when fst parameters are programmed in FW, error otherwise 4076 */ 4077 QDF_STATUS dp_rx_flow_send_fst_fw_setup(struct dp_soc *soc, 4078 struct dp_pdev *pdev); 4079 #endif 4080 4081 /** 4082 * dp_rx_fst_attach_wrapper() - wrapper API for dp_rx_fst_attach 4083 * @soc: SoC handle 4084 * @pdev: Pdev handle 4085 * 4086 * Return: Handle to flow search table entry 4087 */ 4088 extern QDF_STATUS 4089 dp_rx_fst_attach_wrapper(struct dp_soc *soc, struct dp_pdev *pdev); 4090 4091 /** 4092 * dp_rx_fst_detach_wrapper() - wrapper API for dp_rx_fst_detach 4093 * @soc: SoC handle 4094 * @pdev: Pdev handle 4095 * 4096 * Return: None 4097 */ 4098 extern void 4099 dp_rx_fst_detach_wrapper(struct dp_soc *soc, struct dp_pdev *pdev); 4100 4101 /** 4102 * dp_vdev_get_ref() - API to take a reference for VDEV object 4103 * 4104 * @soc : core DP soc context 4105 * @vdev : DP vdev 4106 * @mod_id : module id 4107 * 4108 * Return: QDF_STATUS_SUCCESS if reference held successfully 4109 * else QDF_STATUS_E_INVAL 4110 */ 4111 static inline 4112 QDF_STATUS dp_vdev_get_ref(struct dp_soc *soc, struct dp_vdev *vdev, 4113 enum dp_mod_id mod_id) 4114 { 4115 if (!qdf_atomic_inc_not_zero(&vdev->ref_cnt)) 4116 return QDF_STATUS_E_INVAL; 4117 4118 qdf_atomic_inc(&vdev->mod_refs[mod_id]); 4119 4120 return QDF_STATUS_SUCCESS; 4121 } 4122 4123 /** 4124 * dp_vdev_get_ref_by_id() - Returns vdev object given the vdev id 4125 * @soc: core DP soc context 4126 * @vdev_id: vdev id from vdev object can be retrieved 4127 * @mod_id: module id which is requesting the reference 4128 * 4129 * Return: struct dp_vdev*: Pointer to DP vdev object 4130 */ 4131 static inline struct dp_vdev * 4132 dp_vdev_get_ref_by_id(struct dp_soc *soc, uint8_t vdev_id, 4133 enum dp_mod_id mod_id) 4134 { 4135 struct dp_vdev *vdev = NULL; 4136 if (qdf_unlikely(vdev_id >= MAX_VDEV_CNT)) 4137 return NULL; 4138 4139 qdf_spin_lock_bh(&soc->vdev_map_lock); 4140 vdev = soc->vdev_id_map[vdev_id]; 4141 4142 if (!vdev || dp_vdev_get_ref(soc, vdev, mod_id) != QDF_STATUS_SUCCESS) { 4143 qdf_spin_unlock_bh(&soc->vdev_map_lock); 4144 return NULL; 4145 } 4146 qdf_spin_unlock_bh(&soc->vdev_map_lock); 4147 4148 return vdev; 4149 } 4150 4151 /** 4152 * dp_get_pdev_from_soc_pdev_id_wifi3() - Returns pdev object given the pdev id 4153 * @soc: core DP soc context 4154 * @pdev_id: pdev id from pdev object can be retrieved 4155 * 4156 * Return: struct dp_pdev*: Pointer to DP pdev object 4157 */ 4158 static inline struct dp_pdev * 4159 dp_get_pdev_from_soc_pdev_id_wifi3(struct dp_soc *soc, 4160 uint8_t pdev_id) 4161 { 4162 if (qdf_unlikely(pdev_id >= MAX_PDEV_CNT)) 4163 return NULL; 4164 4165 return soc->pdev_list[pdev_id]; 4166 } 4167 4168 /** 4169 * dp_get_peer_mac_list(): function to get peer mac list of vdev 4170 * @soc: Datapath soc handle 4171 * @vdev_id: vdev id 4172 * @newmac: Table of the clients mac 4173 * @mac_cnt: No. of MACs required 4174 * @limit: Limit the number of clients 4175 * 4176 * Return: no of clients 4177 */ 4178 uint16_t dp_get_peer_mac_list(ol_txrx_soc_handle soc, uint8_t vdev_id, 4179 u_int8_t newmac[][QDF_MAC_ADDR_SIZE], 4180 u_int16_t mac_cnt, bool limit); 4181 4182 /** 4183 * dp_update_num_mac_rings_for_dbs() - Update No of MAC rings based on 4184 * DBS check 4185 * @soc: DP SoC context 4186 * @max_mac_rings: Pointer to variable for No of MAC rings 4187 * 4188 * Return: None 4189 */ 4190 void dp_update_num_mac_rings_for_dbs(struct dp_soc *soc, 4191 int *max_mac_rings); 4192 4193 4194 #if defined(WLAN_SUPPORT_RX_FISA) 4195 /** 4196 * dp_rx_fst_update_cmem_params() - Update CMEM FST params 4197 * @soc: DP SoC context 4198 * @num_entries: Number of flow search entries 4199 * @cmem_ba_lo: CMEM base address low 4200 * @cmem_ba_hi: CMEM base address high 4201 * 4202 * Return: None 4203 */ 4204 void dp_rx_fst_update_cmem_params(struct dp_soc *soc, uint16_t num_entries, 4205 uint32_t cmem_ba_lo, uint32_t cmem_ba_hi); 4206 4207 /** 4208 * dp_fisa_config() - FISA config handler 4209 * @cdp_soc: CDP SoC handle 4210 * @pdev_id: PDEV ID 4211 * @config_id: FISA config ID 4212 * @cfg: FISA config msg data 4213 */ 4214 QDF_STATUS dp_fisa_config(ol_txrx_soc_handle cdp_soc, uint8_t pdev_id, 4215 enum cdp_fisa_config_id config_id, 4216 union cdp_fisa_config *cfg); 4217 #else 4218 static inline void 4219 dp_rx_fst_update_cmem_params(struct dp_soc *soc, uint16_t num_entries, 4220 uint32_t cmem_ba_lo, uint32_t cmem_ba_hi) 4221 { 4222 } 4223 #endif /* WLAN_SUPPORT_RX_FISA */ 4224 4225 #ifdef MAX_ALLOC_PAGE_SIZE 4226 /** 4227 * dp_set_max_page_size() - Set the max page size for hw link desc. 4228 * @pages: link desc page handle 4229 * @max_alloc_size: max_alloc_size 4230 * 4231 * For MCL the page size is set to OS defined value and for WIN 4232 * the page size is set to the max_alloc_size cfg ini 4233 * param. 4234 * This is to ensure that WIN gets contiguous memory allocations 4235 * as per requirement. 4236 * 4237 * Return: None 4238 */ 4239 static inline 4240 void dp_set_max_page_size(struct qdf_mem_multi_page_t *pages, 4241 uint32_t max_alloc_size) 4242 { 4243 pages->page_size = qdf_page_size; 4244 } 4245 4246 #else 4247 static inline 4248 void dp_set_max_page_size(struct qdf_mem_multi_page_t *pages, 4249 uint32_t max_alloc_size) 4250 { 4251 pages->page_size = max_alloc_size; 4252 } 4253 #endif /* MAX_ALLOC_PAGE_SIZE */ 4254 4255 /** 4256 * dp_history_get_next_index() - get the next entry to record an entry 4257 * in the history. 4258 * @curr_idx: Current index where the last entry is written. 4259 * @max_entries: Max number of entries in the history 4260 * 4261 * This function assumes that the max number os entries is a power of 2. 4262 * 4263 * Return: The index where the next entry is to be written. 4264 */ 4265 static inline uint32_t dp_history_get_next_index(qdf_atomic_t *curr_idx, 4266 uint32_t max_entries) 4267 { 4268 uint32_t idx = qdf_atomic_inc_return(curr_idx); 4269 4270 return idx & (max_entries - 1); 4271 } 4272 4273 /** 4274 * dp_rx_skip_tlvs() - Skip TLVs len + L3 padding, save in nbuf->cb 4275 * @soc: Datapath soc handle 4276 * @nbuf: nbuf cb to be updated 4277 * @l3_padding: L3 padding 4278 * 4279 * Return: None 4280 */ 4281 void dp_rx_skip_tlvs(struct dp_soc *soc, qdf_nbuf_t nbuf, uint32_t l3_padding); 4282 4283 #ifndef FEATURE_WDS 4284 static inline void 4285 dp_hmwds_ast_add_notify(struct dp_peer *peer, 4286 uint8_t *mac_addr, 4287 enum cdp_txrx_ast_entry_type type, 4288 QDF_STATUS err, 4289 bool is_peer_map) 4290 { 4291 } 4292 #endif 4293 4294 #ifdef HTT_STATS_DEBUGFS_SUPPORT 4295 /** 4296 * dp_pdev_htt_stats_dbgfs_init() - Function to allocate memory and initialize 4297 * debugfs for HTT stats 4298 * @pdev: dp pdev handle 4299 * 4300 * Return: QDF_STATUS 4301 */ 4302 QDF_STATUS dp_pdev_htt_stats_dbgfs_init(struct dp_pdev *pdev); 4303 4304 /** 4305 * dp_pdev_htt_stats_dbgfs_deinit() - Function to remove debugfs entry for 4306 * HTT stats 4307 * @pdev: dp pdev handle 4308 * 4309 * Return: none 4310 */ 4311 void dp_pdev_htt_stats_dbgfs_deinit(struct dp_pdev *pdev); 4312 #else 4313 4314 /** 4315 * dp_pdev_htt_stats_dbgfs_init() - Function to allocate memory and initialize 4316 * debugfs for HTT stats 4317 * @pdev: dp pdev handle 4318 * 4319 * Return: QDF_STATUS 4320 */ 4321 static inline QDF_STATUS 4322 dp_pdev_htt_stats_dbgfs_init(struct dp_pdev *pdev) 4323 { 4324 return QDF_STATUS_SUCCESS; 4325 } 4326 4327 /** 4328 * dp_pdev_htt_stats_dbgfs_deinit() - Function to remove debugfs entry for 4329 * HTT stats 4330 * @pdev: dp pdev handle 4331 * 4332 * Return: none 4333 */ 4334 static inline void 4335 dp_pdev_htt_stats_dbgfs_deinit(struct dp_pdev *pdev) 4336 { 4337 } 4338 #endif /* HTT_STATS_DEBUGFS_SUPPORT */ 4339 4340 #ifndef WLAN_DP_FEATURE_SW_LATENCY_MGR 4341 /** 4342 * dp_soc_swlm_attach() - attach the software latency manager resources 4343 * @soc: Datapath global soc handle 4344 * 4345 * Return: QDF_STATUS 4346 */ 4347 static inline QDF_STATUS dp_soc_swlm_attach(struct dp_soc *soc) 4348 { 4349 return QDF_STATUS_SUCCESS; 4350 } 4351 4352 /** 4353 * dp_soc_swlm_detach() - detach the software latency manager resources 4354 * @soc: Datapath global soc handle 4355 * 4356 * Return: QDF_STATUS 4357 */ 4358 static inline QDF_STATUS dp_soc_swlm_detach(struct dp_soc *soc) 4359 { 4360 return QDF_STATUS_SUCCESS; 4361 } 4362 #endif /* !WLAN_DP_FEATURE_SW_LATENCY_MGR */ 4363 4364 #ifndef WLAN_DP_PROFILE_SUPPORT 4365 static inline void wlan_dp_soc_cfg_sync_profile(struct cdp_soc_t *cdp_soc) {} 4366 4367 static inline void wlan_dp_pdev_cfg_sync_profile(struct cdp_soc_t *cdp_soc, 4368 uint8_t pdev_id) {} 4369 #endif 4370 4371 /** 4372 * dp_get_peer_id(): function to get peer id by mac 4373 * @soc: Datapath soc handle 4374 * @vdev_id: vdev id 4375 * @mac: Peer mac address 4376 * 4377 * Return: valid peer id on success 4378 * HTT_INVALID_PEER on failure 4379 */ 4380 uint16_t dp_get_peer_id(ol_txrx_soc_handle soc, uint8_t vdev_id, uint8_t *mac); 4381 4382 #ifdef QCA_SUPPORT_WDS_EXTENDED 4383 /** 4384 * dp_wds_ext_set_peer_rx(): function to set peer rx handler 4385 * @soc: Datapath soc handle 4386 * @vdev_id: vdev id 4387 * @mac: Peer mac address 4388 * @rx: rx function pointer 4389 * @osif_peer: OSIF peer handle 4390 * 4391 * Return: QDF_STATUS_SUCCESS on success 4392 * QDF_STATUS_E_INVAL if peer is not found 4393 * QDF_STATUS_E_ALREADY if rx is already set/unset 4394 */ 4395 QDF_STATUS dp_wds_ext_set_peer_rx(ol_txrx_soc_handle soc, 4396 uint8_t vdev_id, 4397 uint8_t *mac, 4398 ol_txrx_rx_fp rx, 4399 ol_osif_peer_handle osif_peer); 4400 4401 /** 4402 * dp_wds_ext_get_peer_osif_handle(): function to get peer osif handle 4403 * @soc: Datapath soc handle 4404 * @vdev_id: vdev id 4405 * @mac: Peer mac address 4406 * @osif_peer: OSIF peer handle 4407 * 4408 * Return: QDF_STATUS_SUCCESS on success 4409 * QDF_STATUS_E_INVAL if peer is not found 4410 */ 4411 QDF_STATUS dp_wds_ext_get_peer_osif_handle( 4412 ol_txrx_soc_handle soc, 4413 uint8_t vdev_id, 4414 uint8_t *mac, 4415 ol_osif_peer_handle *osif_peer); 4416 4417 #endif /* QCA_SUPPORT_WDS_EXTENDED */ 4418 4419 #ifdef DP_MEM_PRE_ALLOC 4420 4421 /** 4422 * dp_context_alloc_mem() - allocate memory for DP context 4423 * @soc: datapath soc handle 4424 * @ctxt_type: DP context type 4425 * @ctxt_size: DP context size 4426 * 4427 * Return: DP context address 4428 */ 4429 void *dp_context_alloc_mem(struct dp_soc *soc, enum dp_ctxt_type ctxt_type, 4430 size_t ctxt_size); 4431 4432 /** 4433 * dp_context_free_mem() - Free memory of DP context 4434 * @soc: datapath soc handle 4435 * @ctxt_type: DP context type 4436 * @vaddr: Address of context memory 4437 * 4438 * Return: None 4439 */ 4440 void dp_context_free_mem(struct dp_soc *soc, enum dp_ctxt_type ctxt_type, 4441 void *vaddr); 4442 4443 /** 4444 * dp_desc_multi_pages_mem_alloc() - alloc memory over multiple pages 4445 * @soc: datapath soc handle 4446 * @desc_type: memory request source type 4447 * @pages: multi page information storage 4448 * @element_size: each element size 4449 * @element_num: total number of elements should be allocated 4450 * @memctxt: memory context 4451 * @cacheable: coherent memory or cacheable memory 4452 * 4453 * This function is a wrapper for memory allocation over multiple 4454 * pages, if dp prealloc method is registered, then will try prealloc 4455 * firstly. if prealloc failed, fall back to regular way over 4456 * qdf_mem_multi_pages_alloc(). 4457 * 4458 * Return: None 4459 */ 4460 void dp_desc_multi_pages_mem_alloc(struct dp_soc *soc, 4461 enum qdf_dp_desc_type desc_type, 4462 struct qdf_mem_multi_page_t *pages, 4463 size_t element_size, 4464 uint32_t element_num, 4465 qdf_dma_context_t memctxt, 4466 bool cacheable); 4467 4468 /** 4469 * dp_desc_multi_pages_mem_free() - free multiple pages memory 4470 * @soc: datapath soc handle 4471 * @desc_type: memory request source type 4472 * @pages: multi page information storage 4473 * @memctxt: memory context 4474 * @cacheable: coherent memory or cacheable memory 4475 * 4476 * This function is a wrapper for multiple pages memory free, 4477 * if memory is got from prealloc pool, put it back to pool. 4478 * otherwise free by qdf_mem_multi_pages_free(). 4479 * 4480 * Return: None 4481 */ 4482 void dp_desc_multi_pages_mem_free(struct dp_soc *soc, 4483 enum qdf_dp_desc_type desc_type, 4484 struct qdf_mem_multi_page_t *pages, 4485 qdf_dma_context_t memctxt, 4486 bool cacheable); 4487 4488 #else 4489 static inline 4490 void *dp_context_alloc_mem(struct dp_soc *soc, enum dp_ctxt_type ctxt_type, 4491 size_t ctxt_size) 4492 { 4493 return qdf_mem_malloc(ctxt_size); 4494 } 4495 4496 static inline 4497 void dp_context_free_mem(struct dp_soc *soc, enum dp_ctxt_type ctxt_type, 4498 void *vaddr) 4499 { 4500 qdf_mem_free(vaddr); 4501 } 4502 4503 static inline 4504 void dp_desc_multi_pages_mem_alloc(struct dp_soc *soc, 4505 enum qdf_dp_desc_type desc_type, 4506 struct qdf_mem_multi_page_t *pages, 4507 size_t element_size, 4508 uint32_t element_num, 4509 qdf_dma_context_t memctxt, 4510 bool cacheable) 4511 { 4512 qdf_mem_multi_pages_alloc(soc->osdev, pages, element_size, 4513 element_num, memctxt, cacheable); 4514 } 4515 4516 static inline 4517 void dp_desc_multi_pages_mem_free(struct dp_soc *soc, 4518 enum qdf_dp_desc_type desc_type, 4519 struct qdf_mem_multi_page_t *pages, 4520 qdf_dma_context_t memctxt, 4521 bool cacheable) 4522 { 4523 qdf_mem_multi_pages_free(soc->osdev, pages, 4524 memctxt, cacheable); 4525 } 4526 #endif 4527 4528 /** 4529 * struct dp_frag_history_opaque_atomic - Opaque struct for adding a fragmented 4530 * history. 4531 * @index: atomic index 4532 * @num_entries_per_slot: Number of entries per slot 4533 * @allocated: is allocated or not 4534 * @entry: pointers to array of records 4535 */ 4536 struct dp_frag_history_opaque_atomic { 4537 qdf_atomic_t index; 4538 uint16_t num_entries_per_slot; 4539 uint16_t allocated; 4540 void *entry[0]; 4541 }; 4542 4543 static inline QDF_STATUS 4544 dp_soc_frag_history_attach(struct dp_soc *soc, void *history_hdl, 4545 uint32_t max_slots, uint32_t max_entries_per_slot, 4546 uint32_t entry_size, 4547 bool attempt_prealloc, enum dp_ctxt_type ctxt_type) 4548 { 4549 struct dp_frag_history_opaque_atomic *history = 4550 (struct dp_frag_history_opaque_atomic *)history_hdl; 4551 size_t alloc_size = max_entries_per_slot * entry_size; 4552 int i; 4553 4554 for (i = 0; i < max_slots; i++) { 4555 if (attempt_prealloc) 4556 history->entry[i] = dp_context_alloc_mem(soc, ctxt_type, 4557 alloc_size); 4558 else 4559 history->entry[i] = qdf_mem_malloc(alloc_size); 4560 4561 if (!history->entry[i]) 4562 goto exit; 4563 } 4564 4565 qdf_atomic_init(&history->index); 4566 history->allocated = 1; 4567 history->num_entries_per_slot = max_entries_per_slot; 4568 4569 return QDF_STATUS_SUCCESS; 4570 exit: 4571 for (i = i - 1; i >= 0; i--) { 4572 if (attempt_prealloc) 4573 dp_context_free_mem(soc, ctxt_type, history->entry[i]); 4574 else 4575 qdf_mem_free(history->entry[i]); 4576 } 4577 4578 return QDF_STATUS_E_NOMEM; 4579 } 4580 4581 static inline 4582 void dp_soc_frag_history_detach(struct dp_soc *soc, 4583 void *history_hdl, uint32_t max_slots, 4584 bool attempt_prealloc, 4585 enum dp_ctxt_type ctxt_type) 4586 { 4587 struct dp_frag_history_opaque_atomic *history = 4588 (struct dp_frag_history_opaque_atomic *)history_hdl; 4589 int i; 4590 4591 for (i = 0; i < max_slots; i++) { 4592 if (attempt_prealloc) 4593 dp_context_free_mem(soc, ctxt_type, history->entry[i]); 4594 else 4595 qdf_mem_free(history->entry[i]); 4596 } 4597 4598 history->allocated = 0; 4599 } 4600 4601 /** 4602 * dp_get_frag_hist_next_atomic_idx() - get the next entry index to record an 4603 * entry in a fragmented history with 4604 * index being atomic. 4605 * @curr_idx: address of the current index where the last entry was written 4606 * @next_idx: pointer to update the next index 4607 * @slot: pointer to update the history slot to be selected 4608 * @slot_shift: BITwise shift mask for slot (in index) 4609 * @max_entries_per_slot: Max number of entries in a slot of history 4610 * @max_entries: Total number of entries in the history (sum of all slots) 4611 * 4612 * This function assumes that the "max_entries_per_slot" and "max_entries" 4613 * are a power-of-2. 4614 * 4615 * Return: None 4616 */ 4617 static inline void 4618 dp_get_frag_hist_next_atomic_idx(qdf_atomic_t *curr_idx, uint32_t *next_idx, 4619 uint16_t *slot, uint32_t slot_shift, 4620 uint32_t max_entries_per_slot, 4621 uint32_t max_entries) 4622 { 4623 uint32_t idx; 4624 4625 idx = qdf_do_div_rem(qdf_atomic_inc_return(curr_idx), max_entries); 4626 4627 *slot = idx >> slot_shift; 4628 *next_idx = idx & (max_entries_per_slot - 1); 4629 } 4630 4631 #ifdef FEATURE_RUNTIME_PM 4632 /** 4633 * dp_runtime_get() - Get dp runtime refcount 4634 * @soc: Datapath soc handle 4635 * 4636 * Get dp runtime refcount by increment of an atomic variable, which can block 4637 * dp runtime resume to wait to flush pending tx by runtime suspend. 4638 * 4639 * Return: Current refcount 4640 */ 4641 static inline int32_t dp_runtime_get(struct dp_soc *soc) 4642 { 4643 return qdf_atomic_inc_return(&soc->dp_runtime_refcount); 4644 } 4645 4646 /** 4647 * dp_runtime_put() - Return dp runtime refcount 4648 * @soc: Datapath soc handle 4649 * 4650 * Return dp runtime refcount by decrement of an atomic variable, allow dp 4651 * runtime resume finish. 4652 * 4653 * Return: Current refcount 4654 */ 4655 static inline int32_t dp_runtime_put(struct dp_soc *soc) 4656 { 4657 return qdf_atomic_dec_return(&soc->dp_runtime_refcount); 4658 } 4659 4660 /** 4661 * dp_runtime_get_refcount() - Get dp runtime refcount 4662 * @soc: Datapath soc handle 4663 * 4664 * Get dp runtime refcount by returning an atomic variable 4665 * 4666 * Return: Current refcount 4667 */ 4668 static inline int32_t dp_runtime_get_refcount(struct dp_soc *soc) 4669 { 4670 return qdf_atomic_read(&soc->dp_runtime_refcount); 4671 } 4672 4673 /** 4674 * dp_runtime_init() - Init DP related runtime PM clients and runtime refcount 4675 * @soc: Datapath soc handle 4676 * 4677 * Return: QDF_STATUS 4678 */ 4679 static inline void dp_runtime_init(struct dp_soc *soc) 4680 { 4681 hif_rtpm_register(HIF_RTPM_ID_DP, NULL); 4682 hif_rtpm_register(HIF_RTPM_ID_DP_RING_STATS, NULL); 4683 qdf_atomic_init(&soc->dp_runtime_refcount); 4684 } 4685 4686 /** 4687 * dp_runtime_deinit() - Deinit DP related runtime PM clients 4688 * 4689 * Return: None 4690 */ 4691 static inline void dp_runtime_deinit(void) 4692 { 4693 hif_rtpm_deregister(HIF_RTPM_ID_DP); 4694 hif_rtpm_deregister(HIF_RTPM_ID_DP_RING_STATS); 4695 } 4696 4697 /** 4698 * dp_runtime_pm_mark_last_busy() - Mark last busy when rx path in use 4699 * @soc: Datapath soc handle 4700 * 4701 * Return: None 4702 */ 4703 static inline void dp_runtime_pm_mark_last_busy(struct dp_soc *soc) 4704 { 4705 soc->rx_last_busy = qdf_get_log_timestamp_usecs(); 4706 4707 hif_rtpm_mark_last_busy(HIF_RTPM_ID_DP); 4708 } 4709 #else 4710 static inline int32_t dp_runtime_get(struct dp_soc *soc) 4711 { 4712 return 0; 4713 } 4714 4715 static inline int32_t dp_runtime_put(struct dp_soc *soc) 4716 { 4717 return 0; 4718 } 4719 4720 static inline QDF_STATUS dp_runtime_init(struct dp_soc *soc) 4721 { 4722 return QDF_STATUS_SUCCESS; 4723 } 4724 4725 static inline void dp_runtime_deinit(void) 4726 { 4727 } 4728 4729 static inline void dp_runtime_pm_mark_last_busy(struct dp_soc *soc) 4730 { 4731 } 4732 #endif 4733 4734 static inline enum QDF_GLOBAL_MODE dp_soc_get_con_mode(struct dp_soc *soc) 4735 { 4736 if (soc->cdp_soc.ol_ops->get_con_mode) 4737 return soc->cdp_soc.ol_ops->get_con_mode(); 4738 4739 return QDF_GLOBAL_MAX_MODE; 4740 } 4741 4742 /** 4743 * dp_pdev_bkp_stats_detach() - detach resources for back pressure stats 4744 * processing 4745 * @pdev: Datapath PDEV handle 4746 * 4747 */ 4748 void dp_pdev_bkp_stats_detach(struct dp_pdev *pdev); 4749 4750 /** 4751 * dp_pdev_bkp_stats_attach() - attach resources for back pressure stats 4752 * processing 4753 * @pdev: Datapath PDEV handle 4754 * 4755 * Return: QDF_STATUS_SUCCESS: Success 4756 * QDF_STATUS_E_NOMEM: Error 4757 */ 4758 4759 QDF_STATUS dp_pdev_bkp_stats_attach(struct dp_pdev *pdev); 4760 4761 /** 4762 * dp_peer_flush_frags() - Flush all fragments for a particular 4763 * peer 4764 * @soc_hdl: data path soc handle 4765 * @vdev_id: vdev id 4766 * @peer_mac: peer mac address 4767 * 4768 * Return: None 4769 */ 4770 void dp_peer_flush_frags(struct cdp_soc_t *soc_hdl, uint8_t vdev_id, 4771 uint8_t *peer_mac); 4772 4773 /** 4774 * dp_soc_reset_mon_intr_mask() - reset mon intr mask 4775 * @soc: pointer to dp_soc handle 4776 * 4777 * Return: 4778 */ 4779 void dp_soc_reset_mon_intr_mask(struct dp_soc *soc); 4780 4781 /** 4782 * dp_txrx_get_soc_stats() - will return cdp_soc_stats 4783 * @soc_hdl: soc handle 4784 * @soc_stats: buffer to hold the values 4785 * 4786 * Return: QDF_STATUS_SUCCESS: Success 4787 * QDF_STATUS_E_FAILURE: Error 4788 */ 4789 QDF_STATUS dp_txrx_get_soc_stats(struct cdp_soc_t *soc_hdl, 4790 struct cdp_soc_stats *soc_stats); 4791 4792 /** 4793 * dp_txrx_get_peer_delay_stats() - to get peer delay stats per TIDs 4794 * @soc_hdl: soc handle 4795 * @vdev_id: id of vdev handle 4796 * @peer_mac: mac of DP_PEER handle 4797 * @delay_stats: pointer to delay stats array 4798 * 4799 * Return: QDF_STATUS_SUCCESS: Success 4800 * QDF_STATUS_E_FAILURE: Error 4801 */ 4802 QDF_STATUS 4803 dp_txrx_get_peer_delay_stats(struct cdp_soc_t *soc_hdl, uint8_t vdev_id, 4804 uint8_t *peer_mac, 4805 struct cdp_delay_tid_stats *delay_stats); 4806 4807 /** 4808 * dp_txrx_get_peer_jitter_stats() - to get peer jitter stats per TIDs 4809 * @soc_hdl: soc handle 4810 * @pdev_id: id of pdev handle 4811 * @vdev_id: id of vdev handle 4812 * @peer_mac: mac of DP_PEER handle 4813 * @tid_stats: pointer to jitter stats array 4814 * 4815 * Return: QDF_STATUS_SUCCESS: Success 4816 * QDF_STATUS_E_FAILURE: Error 4817 */ 4818 QDF_STATUS 4819 dp_txrx_get_peer_jitter_stats(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, 4820 uint8_t vdev_id, uint8_t *peer_mac, 4821 struct cdp_peer_tid_stats *tid_stats); 4822 4823 /** 4824 * dp_peer_get_tx_capture_stats() - to get peer Tx Capture stats 4825 * @soc_hdl: soc handle 4826 * @vdev_id: id of vdev handle 4827 * @peer_mac: mac of DP_PEER handle 4828 * @stats: pointer to peer tx capture stats 4829 * 4830 * Return: QDF_STATUS_SUCCESS: Success 4831 * QDF_STATUS_E_FAILURE: Error 4832 */ 4833 QDF_STATUS 4834 dp_peer_get_tx_capture_stats(struct cdp_soc_t *soc_hdl, 4835 uint8_t vdev_id, uint8_t *peer_mac, 4836 struct cdp_peer_tx_capture_stats *stats); 4837 4838 /** 4839 * dp_pdev_get_tx_capture_stats() - to get pdev Tx Capture stats 4840 * @soc_hdl: soc handle 4841 * @pdev_id: id of pdev handle 4842 * @stats: pointer to pdev tx capture stats 4843 * 4844 * Return: QDF_STATUS_SUCCESS: Success 4845 * QDF_STATUS_E_FAILURE: Error 4846 */ 4847 QDF_STATUS 4848 dp_pdev_get_tx_capture_stats(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, 4849 struct cdp_pdev_tx_capture_stats *stats); 4850 4851 #ifdef HW_TX_DELAY_STATS_ENABLE 4852 /** 4853 * dp_is_vdev_tx_delay_stats_enabled(): Check if tx delay stats 4854 * is enabled for vdev 4855 * @vdev: dp vdev 4856 * 4857 * Return: true if tx delay stats is enabled for vdev else false 4858 */ 4859 static inline uint8_t dp_is_vdev_tx_delay_stats_enabled(struct dp_vdev *vdev) 4860 { 4861 return vdev->hw_tx_delay_stats_enabled; 4862 } 4863 4864 /** 4865 * dp_pdev_print_tx_delay_stats(): Print vdev tx delay stats 4866 * for pdev 4867 * @soc: dp soc 4868 * 4869 * Return: None 4870 */ 4871 void dp_pdev_print_tx_delay_stats(struct dp_soc *soc); 4872 4873 /** 4874 * dp_pdev_clear_tx_delay_stats() - clear tx delay stats 4875 * @soc: soc handle 4876 * 4877 * Return: None 4878 */ 4879 void dp_pdev_clear_tx_delay_stats(struct dp_soc *soc); 4880 #else 4881 static inline uint8_t dp_is_vdev_tx_delay_stats_enabled(struct dp_vdev *vdev) 4882 { 4883 return 0; 4884 } 4885 4886 static inline void dp_pdev_print_tx_delay_stats(struct dp_soc *soc) 4887 { 4888 } 4889 4890 static inline void dp_pdev_clear_tx_delay_stats(struct dp_soc *soc) 4891 { 4892 } 4893 #endif 4894 4895 static inline void 4896 dp_get_rx_hash_key_bytes(struct cdp_lro_hash_config *lro_hash) 4897 { 4898 qdf_get_random_bytes(lro_hash->toeplitz_hash_ipv4, 4899 (sizeof(lro_hash->toeplitz_hash_ipv4[0]) * 4900 LRO_IPV4_SEED_ARR_SZ)); 4901 qdf_get_random_bytes(lro_hash->toeplitz_hash_ipv6, 4902 (sizeof(lro_hash->toeplitz_hash_ipv6[0]) * 4903 LRO_IPV6_SEED_ARR_SZ)); 4904 } 4905 4906 #ifdef WLAN_CONFIG_TELEMETRY_AGENT 4907 /** 4908 * dp_get_pdev_telemetry_stats- API to get pdev telemetry stats 4909 * @soc_hdl: soc handle 4910 * @pdev_id: id of pdev handle 4911 * @stats: pointer to pdev telemetry stats 4912 * 4913 * Return: QDF_STATUS_SUCCESS: Success 4914 * QDF_STATUS_E_FAILURE: Error 4915 */ 4916 QDF_STATUS 4917 dp_get_pdev_telemetry_stats(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, 4918 struct cdp_pdev_telemetry_stats *stats); 4919 4920 /** 4921 * dp_get_peer_telemetry_stats() - API to get peer telemetry stats 4922 * @soc_hdl: soc handle 4923 * @addr: peer mac 4924 * @stats: pointer to peer telemetry stats 4925 * 4926 * Return: QDF_STATUS_SUCCESS: Success 4927 * QDF_STATUS_E_FAILURE: Error 4928 */ 4929 QDF_STATUS 4930 dp_get_peer_telemetry_stats(struct cdp_soc_t *soc_hdl, uint8_t *addr, 4931 struct cdp_peer_telemetry_stats *stats); 4932 4933 /** 4934 * dp_get_peer_deter_stats() - API to get peer deterministic stats 4935 * @soc_hdl: soc handle 4936 * @vdev_id: id of vdev handle 4937 * @addr: peer mac 4938 * @stats: pointer to peer deterministic stats 4939 * 4940 * Return: QDF_STATUS_SUCCESS: Success 4941 * QDF_STATUS_E_FAILURE: Error 4942 */ 4943 QDF_STATUS 4944 dp_get_peer_deter_stats(struct cdp_soc_t *soc_hdl, 4945 uint8_t vdev_id, 4946 uint8_t *addr, 4947 struct cdp_peer_deter_stats *stats); 4948 4949 /** 4950 * dp_get_pdev_deter_stats() - API to get pdev deterministic stats 4951 * @soc_hdl: soc handle 4952 * @pdev_id: id of pdev handle 4953 * @stats: pointer to pdev deterministic stats 4954 * 4955 * Return: QDF_STATUS_SUCCESS: Success 4956 * QDF_STATUS_E_FAILURE: Error 4957 */ 4958 QDF_STATUS 4959 dp_get_pdev_deter_stats(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, 4960 struct cdp_pdev_deter_stats *stats); 4961 4962 /** 4963 * dp_update_pdev_chan_util_stats() - API to update channel utilization stats 4964 * @soc_hdl: soc handle 4965 * @pdev_id: id of pdev handle 4966 * @ch_util: Pointer to channel util stats 4967 * 4968 * Return: QDF_STATUS_SUCCESS: Success 4969 * QDF_STATUS_E_FAILURE: Error 4970 */ 4971 QDF_STATUS 4972 dp_update_pdev_chan_util_stats(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, 4973 struct cdp_pdev_chan_util_stats *ch_util); 4974 #endif /* WLAN_CONFIG_TELEMETRY_AGENT */ 4975 4976 #ifdef CONNECTIVITY_PKTLOG 4977 /** 4978 * dp_tx_send_pktlog() - send tx packet log 4979 * @soc: soc handle 4980 * @pdev: pdev handle 4981 * @tx_desc: TX software descriptor 4982 * @nbuf: nbuf 4983 * @status: status of tx packet 4984 * 4985 * This function is used to send tx packet for logging 4986 * 4987 * Return: None 4988 * 4989 */ 4990 static inline 4991 void dp_tx_send_pktlog(struct dp_soc *soc, struct dp_pdev *pdev, 4992 struct dp_tx_desc_s *tx_desc, 4993 qdf_nbuf_t nbuf, enum qdf_dp_tx_rx_status status) 4994 { 4995 ol_txrx_pktdump_cb packetdump_cb = pdev->dp_tx_packetdump_cb; 4996 4997 if (qdf_unlikely(packetdump_cb) && 4998 dp_tx_frm_std == tx_desc->frm_type) { 4999 packetdump_cb((ol_txrx_soc_handle)soc, pdev->pdev_id, 5000 tx_desc->vdev_id, nbuf, status, QDF_TX_DATA_PKT); 5001 } 5002 } 5003 5004 /** 5005 * dp_rx_send_pktlog() - send rx packet log 5006 * @soc: soc handle 5007 * @pdev: pdev handle 5008 * @nbuf: nbuf 5009 * @status: status of rx packet 5010 * 5011 * This function is used to send rx packet for logging 5012 * 5013 * Return: None 5014 * 5015 */ 5016 static inline 5017 void dp_rx_send_pktlog(struct dp_soc *soc, struct dp_pdev *pdev, 5018 qdf_nbuf_t nbuf, enum qdf_dp_tx_rx_status status) 5019 { 5020 ol_txrx_pktdump_cb packetdump_cb = pdev->dp_rx_packetdump_cb; 5021 5022 if (qdf_unlikely(packetdump_cb)) { 5023 packetdump_cb((ol_txrx_soc_handle)soc, pdev->pdev_id, 5024 QDF_NBUF_CB_RX_VDEV_ID(nbuf), 5025 nbuf, status, QDF_RX_DATA_PKT); 5026 } 5027 } 5028 5029 /** 5030 * dp_rx_err_send_pktlog() - send rx error packet log 5031 * @soc: soc handle 5032 * @pdev: pdev handle 5033 * @mpdu_desc_info: MPDU descriptor info 5034 * @nbuf: nbuf 5035 * @status: status of rx packet 5036 * @set_pktlen: weither to set packet length 5037 * 5038 * This API should only be called when we have not removed 5039 * Rx TLV from head, and head is pointing to rx_tlv 5040 * 5041 * This function is used to send rx packet from error path 5042 * for logging for which rx packet tlv is not removed. 5043 * 5044 * Return: None 5045 * 5046 */ 5047 static inline 5048 void dp_rx_err_send_pktlog(struct dp_soc *soc, struct dp_pdev *pdev, 5049 struct hal_rx_mpdu_desc_info *mpdu_desc_info, 5050 qdf_nbuf_t nbuf, enum qdf_dp_tx_rx_status status, 5051 bool set_pktlen) 5052 { 5053 ol_txrx_pktdump_cb packetdump_cb = pdev->dp_rx_packetdump_cb; 5054 qdf_size_t skip_size; 5055 uint16_t msdu_len, nbuf_len; 5056 uint8_t *rx_tlv_hdr; 5057 struct hal_rx_msdu_metadata msdu_metadata; 5058 uint16_t buf_size; 5059 5060 buf_size = wlan_cfg_rx_buffer_size(soc->wlan_cfg_ctx); 5061 5062 if (qdf_unlikely(packetdump_cb)) { 5063 rx_tlv_hdr = qdf_nbuf_data(nbuf); 5064 nbuf_len = hal_rx_msdu_start_msdu_len_get(soc->hal_soc, 5065 rx_tlv_hdr); 5066 hal_rx_msdu_metadata_get(soc->hal_soc, rx_tlv_hdr, 5067 &msdu_metadata); 5068 5069 if (mpdu_desc_info->bar_frame || 5070 (mpdu_desc_info->mpdu_flags & HAL_MPDU_F_FRAGMENT)) 5071 skip_size = soc->rx_pkt_tlv_size; 5072 else 5073 skip_size = soc->rx_pkt_tlv_size + 5074 msdu_metadata.l3_hdr_pad; 5075 5076 if (set_pktlen) { 5077 msdu_len = nbuf_len + skip_size; 5078 qdf_nbuf_set_pktlen(nbuf, qdf_min(msdu_len, buf_size)); 5079 } 5080 5081 qdf_nbuf_pull_head(nbuf, skip_size); 5082 packetdump_cb((ol_txrx_soc_handle)soc, pdev->pdev_id, 5083 QDF_NBUF_CB_RX_VDEV_ID(nbuf), 5084 nbuf, status, QDF_RX_DATA_PKT); 5085 qdf_nbuf_push_head(nbuf, skip_size); 5086 } 5087 } 5088 5089 #else 5090 static inline 5091 void dp_tx_send_pktlog(struct dp_soc *soc, struct dp_pdev *pdev, 5092 struct dp_tx_desc_s *tx_desc, 5093 qdf_nbuf_t nbuf, enum qdf_dp_tx_rx_status status) 5094 { 5095 } 5096 5097 static inline 5098 void dp_rx_send_pktlog(struct dp_soc *soc, struct dp_pdev *pdev, 5099 qdf_nbuf_t nbuf, enum qdf_dp_tx_rx_status status) 5100 { 5101 } 5102 5103 static inline 5104 void dp_rx_err_send_pktlog(struct dp_soc *soc, struct dp_pdev *pdev, 5105 struct hal_rx_mpdu_desc_info *mpdu_desc_info, 5106 qdf_nbuf_t nbuf, enum qdf_dp_tx_rx_status status, 5107 bool set_pktlen) 5108 { 5109 } 5110 #endif 5111 5112 /** 5113 * dp_pdev_update_fast_rx_flag() - Update Fast rx flag for a PDEV 5114 * @soc : Data path soc handle 5115 * @pdev : PDEV handle 5116 * 5117 * Return: None 5118 */ 5119 void dp_pdev_update_fast_rx_flag(struct dp_soc *soc, struct dp_pdev *pdev); 5120 5121 #ifdef FEATURE_DIRECT_LINK 5122 /** 5123 * dp_setup_direct_link_refill_ring(): Setup direct link refill ring for pdev 5124 * @soc_hdl: DP SOC handle 5125 * @pdev_id: pdev id 5126 * 5127 * Return: Handle to SRNG 5128 */ 5129 struct dp_srng *dp_setup_direct_link_refill_ring(struct cdp_soc_t *soc_hdl, 5130 uint8_t pdev_id); 5131 5132 /** 5133 * dp_destroy_direct_link_refill_ring(): Destroy direct link refill ring for 5134 * pdev 5135 * @soc_hdl: DP SOC handle 5136 * @pdev_id: pdev id 5137 * 5138 * Return: None 5139 */ 5140 void dp_destroy_direct_link_refill_ring(struct cdp_soc_t *soc_hdl, 5141 uint8_t pdev_id); 5142 #else 5143 static inline 5144 struct dp_srng *dp_setup_direct_link_refill_ring(struct cdp_soc_t *soc_hdl, 5145 uint8_t pdev_id) 5146 { 5147 return NULL; 5148 } 5149 5150 static inline 5151 void dp_destroy_direct_link_refill_ring(struct cdp_soc_t *soc_hdl, 5152 uint8_t pdev_id) 5153 { 5154 } 5155 #endif 5156 5157 #ifdef WLAN_FEATURE_DP_CFG_EVENT_HISTORY 5158 static inline 5159 void dp_cfg_event_record(struct dp_soc *soc, 5160 enum dp_cfg_event_type event, 5161 union dp_cfg_event_desc *cfg_event_desc) 5162 { 5163 struct dp_cfg_event_history *cfg_event_history = 5164 &soc->cfg_event_history; 5165 struct dp_cfg_event *entry; 5166 uint32_t idx; 5167 uint16_t slot; 5168 5169 dp_get_frag_hist_next_atomic_idx(&cfg_event_history->index, &idx, 5170 &slot, 5171 DP_CFG_EVT_HIST_SLOT_SHIFT, 5172 DP_CFG_EVT_HIST_PER_SLOT_MAX, 5173 DP_CFG_EVT_HISTORY_SIZE); 5174 5175 entry = &cfg_event_history->entry[slot][idx]; 5176 5177 entry->timestamp = qdf_get_log_timestamp(); 5178 entry->type = event; 5179 qdf_mem_copy(&entry->event_desc, cfg_event_desc, 5180 sizeof(entry->event_desc)); 5181 } 5182 5183 static inline void 5184 dp_cfg_event_record_vdev_evt(struct dp_soc *soc, enum dp_cfg_event_type event, 5185 struct dp_vdev *vdev) 5186 { 5187 union dp_cfg_event_desc cfg_evt_desc = {0}; 5188 struct dp_vdev_attach_detach_desc *vdev_evt = 5189 &cfg_evt_desc.vdev_evt; 5190 5191 if (qdf_unlikely(event != DP_CFG_EVENT_VDEV_ATTACH && 5192 event != DP_CFG_EVENT_VDEV_UNREF_DEL && 5193 event != DP_CFG_EVENT_VDEV_DETACH)) { 5194 qdf_assert_always(0); 5195 return; 5196 } 5197 5198 vdev_evt->vdev = vdev; 5199 vdev_evt->vdev_id = vdev->vdev_id; 5200 vdev_evt->ref_count = qdf_atomic_read(&vdev->ref_cnt); 5201 vdev_evt->mac_addr = vdev->mac_addr; 5202 5203 dp_cfg_event_record(soc, event, &cfg_evt_desc); 5204 } 5205 5206 static inline void 5207 dp_cfg_event_record_peer_evt(struct dp_soc *soc, enum dp_cfg_event_type event, 5208 struct dp_peer *peer, struct dp_vdev *vdev, 5209 uint8_t is_reuse) 5210 { 5211 union dp_cfg_event_desc cfg_evt_desc = {0}; 5212 struct dp_peer_cmn_ops_desc *peer_evt = &cfg_evt_desc.peer_cmn_evt; 5213 5214 if (qdf_unlikely(event != DP_CFG_EVENT_PEER_CREATE && 5215 event != DP_CFG_EVENT_PEER_DELETE && 5216 event != DP_CFG_EVENT_PEER_UNREF_DEL)) { 5217 qdf_assert_always(0); 5218 return; 5219 } 5220 5221 peer_evt->peer = peer; 5222 peer_evt->vdev = vdev; 5223 peer_evt->vdev_id = vdev->vdev_id; 5224 peer_evt->is_reuse = is_reuse; 5225 peer_evt->peer_ref_count = qdf_atomic_read(&peer->ref_cnt); 5226 peer_evt->vdev_ref_count = qdf_atomic_read(&vdev->ref_cnt); 5227 peer_evt->mac_addr = peer->mac_addr; 5228 peer_evt->vdev_mac_addr = vdev->mac_addr; 5229 5230 dp_cfg_event_record(soc, event, &cfg_evt_desc); 5231 } 5232 5233 static inline void 5234 dp_cfg_event_record_mlo_link_delink_evt(struct dp_soc *soc, 5235 enum dp_cfg_event_type event, 5236 struct dp_peer *mld_peer, 5237 struct dp_peer *link_peer, 5238 uint8_t idx, uint8_t result) 5239 { 5240 union dp_cfg_event_desc cfg_evt_desc = {0}; 5241 struct dp_mlo_add_del_link_desc *mlo_link_delink_evt = 5242 &cfg_evt_desc.mlo_link_delink_evt; 5243 5244 if (qdf_unlikely(event != DP_CFG_EVENT_MLO_ADD_LINK && 5245 event != DP_CFG_EVENT_MLO_DEL_LINK)) { 5246 qdf_assert_always(0); 5247 return; 5248 } 5249 5250 mlo_link_delink_evt->link_peer = link_peer; 5251 mlo_link_delink_evt->mld_peer = mld_peer; 5252 mlo_link_delink_evt->link_mac_addr = link_peer->mac_addr; 5253 mlo_link_delink_evt->mld_mac_addr = mld_peer->mac_addr; 5254 mlo_link_delink_evt->num_links = mld_peer->num_links; 5255 mlo_link_delink_evt->action_result = result; 5256 mlo_link_delink_evt->idx = idx; 5257 5258 dp_cfg_event_record(soc, event, &cfg_evt_desc); 5259 } 5260 5261 static inline void 5262 dp_cfg_event_record_mlo_setup_vdev_update_evt(struct dp_soc *soc, 5263 struct dp_peer *mld_peer, 5264 struct dp_vdev *prev_vdev, 5265 struct dp_vdev *new_vdev) 5266 { 5267 union dp_cfg_event_desc cfg_evt_desc = {0}; 5268 struct dp_mlo_setup_vdev_update_desc *vdev_update_evt = 5269 &cfg_evt_desc.mlo_setup_vdev_update; 5270 5271 vdev_update_evt->mld_peer = mld_peer; 5272 vdev_update_evt->prev_vdev = prev_vdev; 5273 vdev_update_evt->new_vdev = new_vdev; 5274 5275 dp_cfg_event_record(soc, DP_CFG_EVENT_MLO_SETUP_VDEV_UPDATE, 5276 &cfg_evt_desc); 5277 } 5278 5279 static inline void 5280 dp_cfg_event_record_peer_map_unmap_evt(struct dp_soc *soc, 5281 enum dp_cfg_event_type event, 5282 struct dp_peer *peer, 5283 uint8_t *mac_addr, 5284 uint8_t is_ml_peer, 5285 uint16_t peer_id, uint16_t ml_peer_id, 5286 uint16_t hw_peer_id, uint8_t vdev_id) 5287 { 5288 union dp_cfg_event_desc cfg_evt_desc = {0}; 5289 struct dp_rx_peer_map_unmap_desc *peer_map_unmap_evt = 5290 &cfg_evt_desc.peer_map_unmap_evt; 5291 5292 if (qdf_unlikely(event != DP_CFG_EVENT_PEER_MAP && 5293 event != DP_CFG_EVENT_PEER_UNMAP && 5294 event != DP_CFG_EVENT_MLO_PEER_MAP && 5295 event != DP_CFG_EVENT_MLO_PEER_UNMAP)) { 5296 qdf_assert_always(0); 5297 return; 5298 } 5299 5300 peer_map_unmap_evt->peer_id = peer_id; 5301 peer_map_unmap_evt->ml_peer_id = ml_peer_id; 5302 peer_map_unmap_evt->hw_peer_id = hw_peer_id; 5303 peer_map_unmap_evt->vdev_id = vdev_id; 5304 /* Peer may be NULL at times, but its not an issue. */ 5305 peer_map_unmap_evt->peer = peer; 5306 peer_map_unmap_evt->is_ml_peer = is_ml_peer; 5307 qdf_mem_copy(&peer_map_unmap_evt->mac_addr.raw, mac_addr, 5308 QDF_MAC_ADDR_SIZE); 5309 5310 dp_cfg_event_record(soc, event, &cfg_evt_desc); 5311 } 5312 5313 static inline void 5314 dp_cfg_event_record_peer_setup_evt(struct dp_soc *soc, 5315 enum dp_cfg_event_type event, 5316 struct dp_peer *peer, 5317 struct dp_vdev *vdev, 5318 uint8_t vdev_id, 5319 struct cdp_peer_setup_info *peer_setup_info) 5320 { 5321 union dp_cfg_event_desc cfg_evt_desc = {0}; 5322 struct dp_peer_setup_desc *peer_setup_evt = 5323 &cfg_evt_desc.peer_setup_evt; 5324 5325 if (qdf_unlikely(event != DP_CFG_EVENT_PEER_SETUP && 5326 event != DP_CFG_EVENT_MLO_SETUP)) { 5327 qdf_assert_always(0); 5328 return; 5329 } 5330 5331 peer_setup_evt->peer = peer; 5332 peer_setup_evt->vdev = vdev; 5333 if (vdev) 5334 peer_setup_evt->vdev_ref_count = qdf_atomic_read(&vdev->ref_cnt); 5335 peer_setup_evt->mac_addr = peer->mac_addr; 5336 peer_setup_evt->vdev_id = vdev_id; 5337 if (peer_setup_info) { 5338 peer_setup_evt->is_first_link = peer_setup_info->is_first_link; 5339 peer_setup_evt->is_primary_link = peer_setup_info->is_primary_link; 5340 qdf_mem_copy(peer_setup_evt->mld_mac_addr.raw, 5341 peer_setup_info->mld_peer_mac, 5342 QDF_MAC_ADDR_SIZE); 5343 } 5344 5345 dp_cfg_event_record(soc, event, &cfg_evt_desc); 5346 } 5347 #else 5348 5349 static inline void 5350 dp_cfg_event_record_vdev_evt(struct dp_soc *soc, enum dp_cfg_event_type event, 5351 struct dp_vdev *vdev) 5352 { 5353 } 5354 5355 static inline void 5356 dp_cfg_event_record_peer_evt(struct dp_soc *soc, enum dp_cfg_event_type event, 5357 struct dp_peer *peer, struct dp_vdev *vdev, 5358 uint8_t is_reuse) 5359 { 5360 } 5361 5362 static inline void 5363 dp_cfg_event_record_mlo_link_delink_evt(struct dp_soc *soc, 5364 enum dp_cfg_event_type event, 5365 struct dp_peer *mld_peer, 5366 struct dp_peer *link_peer, 5367 uint8_t idx, uint8_t result) 5368 { 5369 } 5370 5371 static inline void 5372 dp_cfg_event_record_mlo_setup_vdev_update_evt(struct dp_soc *soc, 5373 struct dp_peer *mld_peer, 5374 struct dp_vdev *prev_vdev, 5375 struct dp_vdev *new_vdev) 5376 { 5377 } 5378 5379 static inline void 5380 dp_cfg_event_record_peer_map_unmap_evt(struct dp_soc *soc, 5381 enum dp_cfg_event_type event, 5382 struct dp_peer *peer, 5383 uint8_t *mac_addr, 5384 uint8_t is_ml_peer, 5385 uint16_t peer_id, uint16_t ml_peer_id, 5386 uint16_t hw_peer_id, uint8_t vdev_id) 5387 { 5388 } 5389 5390 static inline void 5391 dp_cfg_event_record_peer_setup_evt(struct dp_soc *soc, 5392 enum dp_cfg_event_type event, 5393 struct dp_peer *peer, 5394 struct dp_vdev *vdev, 5395 uint8_t vdev_id, 5396 struct cdp_peer_setup_info *peer_setup_info) 5397 { 5398 } 5399 #endif 5400 5401 #ifndef WLAN_SOFTUMAC_SUPPORT 5402 /** 5403 * dp_soc_interrupt_detach() - Deregister any allocations done for interrupts 5404 * @txrx_soc: DP SOC handle 5405 * 5406 * Return: none 5407 */ 5408 void dp_soc_interrupt_detach(struct cdp_soc_t *txrx_soc); 5409 #endif 5410 5411 /** 5412 * dp_get_peer_stats()- Get peer stats 5413 * @peer: Datapath peer 5414 * @peer_stats: buffer for peer stats 5415 * 5416 * Return: none 5417 */ 5418 void dp_get_peer_stats(struct dp_peer *peer, 5419 struct cdp_peer_stats *peer_stats); 5420 5421 /** 5422 * dp_get_per_link_peer_stats()- Get per link peer stats 5423 * @peer: Datapath peer 5424 * @peer_stats: buffer for peer stats 5425 * @peer_type: Peer type 5426 * @num_link: Number of ML links 5427 * 5428 * Return: status success/failure 5429 */ 5430 QDF_STATUS dp_get_per_link_peer_stats(struct dp_peer *peer, 5431 struct cdp_peer_stats *peer_stats, 5432 enum cdp_peer_type peer_type, 5433 uint8_t num_link); 5434 /** 5435 * dp_get_peer_hw_link_id() - get peer hardware link id 5436 * @soc: soc handle 5437 * @pdev: data path pdev 5438 * 5439 * Return: link_id 5440 */ 5441 static inline int 5442 dp_get_peer_hw_link_id(struct dp_soc *soc, 5443 struct dp_pdev *pdev) 5444 { 5445 if (wlan_cfg_is_peer_link_stats_enabled(soc->wlan_cfg_ctx)) 5446 return ((soc->arch_ops.get_hw_link_id(pdev)) + 1); 5447 5448 return 0; 5449 } 5450 5451 #ifdef QCA_MULTIPASS_SUPPORT 5452 /** 5453 * dp_tx_remove_vlan_tag() - Remove 4 bytes of vlan tag 5454 * @vdev: DP vdev handle 5455 * @nbuf: network buffer 5456 * 5457 * Return: void 5458 */ 5459 void dp_tx_remove_vlan_tag(struct dp_vdev *vdev, qdf_nbuf_t nbuf); 5460 #endif 5461 5462 /** 5463 * dp_print_per_link_stats() - Print per link peer stats. 5464 * @soc_hdl: soc handle. 5465 * @vdev_id: vdev_id. 5466 * 5467 * Return: None. 5468 */ 5469 void dp_print_per_link_stats(struct cdp_soc_t *soc_hdl, uint8_t vdev_id); 5470 5471 /** 5472 * dp_get_ring_stats_from_hal(): get hal level ring pointer values 5473 * @soc: DP_SOC handle 5474 * @srng: DP_SRNG handle 5475 * @ring_type: srng src/dst ring 5476 * @_tailp: pointer to tail of ring 5477 * @_headp: pointer to head of ring 5478 * @_hw_headp: pointer to head of ring in HW 5479 * @_hw_tailp: pointer to tail of ring in HW 5480 * 5481 * Return: void 5482 */ 5483 static inline void 5484 dp_get_ring_stats_from_hal(struct dp_soc *soc, struct dp_srng *srng, 5485 enum hal_ring_type ring_type, 5486 uint32_t *_tailp, uint32_t *_headp, 5487 int32_t *_hw_headp, int32_t *_hw_tailp) 5488 { 5489 uint32_t tailp; 5490 uint32_t headp; 5491 int32_t hw_headp = -1; 5492 int32_t hw_tailp = -1; 5493 struct hal_soc *hal_soc; 5494 5495 if (soc && srng && srng->hal_srng) { 5496 hal_soc = (struct hal_soc *)soc->hal_soc; 5497 hal_get_sw_hptp(soc->hal_soc, srng->hal_srng, &tailp, &headp); 5498 *_headp = headp; 5499 *_tailp = tailp; 5500 5501 hal_get_hw_hptp(soc->hal_soc, srng->hal_srng, &hw_headp, 5502 &hw_tailp, ring_type); 5503 *_hw_headp = hw_headp; 5504 *_hw_tailp = hw_tailp; 5505 } 5506 } 5507 5508 #endif /* #ifndef _DP_INTERNAL_H_ */ 5509