1 /* 2 * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved. 3 * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved. 4 * 5 * Permission to use, copy, modify, and/or distribute this software for 6 * any purpose with or without fee is hereby granted, provided that the 7 * above copyright notice and this permission notice appear in all 8 * copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 11 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 12 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 13 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 14 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 15 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 16 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 17 * PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20 #ifndef _DP_INTERNAL_H_ 21 #define _DP_INTERNAL_H_ 22 23 #include "dp_types.h" 24 #include "dp_htt.h" 25 #include "dp_rx_tid.h" 26 27 #define RX_BUFFER_SIZE_PKTLOG_LITE 1024 28 29 #define DP_PEER_WDS_COUNT_INVALID UINT_MAX 30 31 #define DP_BLOCKMEM_SIZE 4096 32 #define WBM2_SW_PPE_REL_RING_ID 6 33 #define WBM2_SW_PPE_REL_MAP_ID 11 34 #define DP_TX_PPEDS_POOL_ID 0xF 35 36 /* Alignment for consistent memory for DP rings*/ 37 #define DP_RING_BASE_ALIGN 32 38 39 #define DP_RSSI_INVAL 0x80 40 #define DP_RSSI_AVG_WEIGHT 2 41 /* 42 * Formula to derive avg_rssi is taken from wifi2.o firmware 43 */ 44 #define DP_GET_AVG_RSSI(avg_rssi, last_rssi) \ 45 (((avg_rssi) - (((uint8_t)(avg_rssi)) >> DP_RSSI_AVG_WEIGHT)) \ 46 + ((((uint8_t)(last_rssi)) >> DP_RSSI_AVG_WEIGHT))) 47 48 /* Macro For NYSM value received in VHT TLV */ 49 #define VHT_SGI_NYSM 3 50 51 #define INVALID_WBM_RING_NUM 0xF 52 53 #ifdef FEATURE_DIRECT_LINK 54 #define DIRECT_LINK_REFILL_RING_ENTRIES 64 55 #ifdef IPA_OFFLOAD 56 #ifdef IPA_WDI3_VLAN_SUPPORT 57 #define DIRECT_LINK_REFILL_RING_IDX 4 58 #else 59 #define DIRECT_LINK_REFILL_RING_IDX 3 60 #endif 61 #else 62 #define DIRECT_LINK_REFILL_RING_IDX 2 63 #endif 64 #endif 65 66 #define DP_MAX_VLAN_IDS 4096 67 #define DP_VLAN_UNTAGGED 0 68 #define DP_VLAN_TAGGED_MULTICAST 1 69 #define DP_VLAN_TAGGED_UNICAST 2 70 71 /** 72 * struct htt_dbgfs_cfg - structure to maintain required htt data 73 * @msg_word: htt msg sent to upper layer 74 * @m: qdf debugfs file pointer 75 */ 76 struct htt_dbgfs_cfg { 77 uint32_t *msg_word; 78 qdf_debugfs_file_t m; 79 }; 80 81 /* Cookie MSB bits assigned for different use case. 82 * Note: User can't use last 3 bits, as it is reserved for pdev_id. 83 * If in future number of pdev are more than 3. 84 */ 85 /* Reserve for default case */ 86 #define DBG_STATS_COOKIE_DEFAULT 0x0 87 88 /* Reserve for DP Stats: 3rd bit */ 89 #define DBG_STATS_COOKIE_DP_STATS BIT(3) 90 91 /* Reserve for HTT Stats debugfs support: 4th bit */ 92 #define DBG_STATS_COOKIE_HTT_DBGFS BIT(4) 93 94 /*Reserve for HTT Stats debugfs support: 5th bit */ 95 #define DBG_SYSFS_STATS_COOKIE BIT(5) 96 97 /* Reserve for HTT Stats OBSS PD support: 6th bit */ 98 #define DBG_STATS_COOKIE_HTT_OBSS BIT(6) 99 100 /* 101 * Bitmap of HTT PPDU TLV types for Default mode 102 */ 103 #define HTT_PPDU_DEFAULT_TLV_BITMAP \ 104 (1 << HTT_PPDU_STATS_COMMON_TLV) | \ 105 (1 << HTT_PPDU_STATS_USR_COMMON_TLV) | \ 106 (1 << HTT_PPDU_STATS_USR_RATE_TLV) | \ 107 (1 << HTT_PPDU_STATS_SCH_CMD_STATUS_TLV) | \ 108 (1 << HTT_PPDU_STATS_USR_COMPLTN_COMMON_TLV) | \ 109 (1 << HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV) 110 111 /* PPDU STATS CFG */ 112 #define DP_PPDU_STATS_CFG_ALL 0xFFFF 113 114 /* PPDU stats mask sent to FW to enable enhanced stats */ 115 #define DP_PPDU_STATS_CFG_ENH_STATS \ 116 (HTT_PPDU_DEFAULT_TLV_BITMAP) | \ 117 (1 << HTT_PPDU_STATS_USR_COMPLTN_FLUSH_TLV) | \ 118 (1 << HTT_PPDU_STATS_USR_COMMON_ARRAY_TLV) | \ 119 (1 << HTT_PPDU_STATS_USERS_INFO_TLV) 120 121 /* PPDU stats mask sent to FW to support debug sniffer feature */ 122 #define DP_PPDU_STATS_CFG_SNIFFER \ 123 (HTT_PPDU_DEFAULT_TLV_BITMAP) | \ 124 (1 << HTT_PPDU_STATS_USR_MPDU_ENQ_BITMAP_64_TLV) | \ 125 (1 << HTT_PPDU_STATS_USR_MPDU_ENQ_BITMAP_256_TLV) | \ 126 (1 << HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_64_TLV) | \ 127 (1 << HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_256_TLV) | \ 128 (1 << HTT_PPDU_STATS_USR_COMPLTN_FLUSH_TLV) | \ 129 (1 << HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_256_TLV) | \ 130 (1 << HTT_PPDU_STATS_USR_COMPLTN_FLUSH_TLV) | \ 131 (1 << HTT_PPDU_STATS_USR_COMMON_ARRAY_TLV) | \ 132 (1 << HTT_PPDU_STATS_TX_MGMTCTRL_PAYLOAD_TLV) | \ 133 (1 << HTT_PPDU_STATS_USERS_INFO_TLV) 134 135 /* PPDU stats mask sent to FW to support BPR feature*/ 136 #define DP_PPDU_STATS_CFG_BPR \ 137 (1 << HTT_PPDU_STATS_TX_MGMTCTRL_PAYLOAD_TLV) | \ 138 (1 << HTT_PPDU_STATS_USERS_INFO_TLV) 139 140 /* PPDU stats mask sent to FW to support BPR and enhanced stats feature */ 141 #define DP_PPDU_STATS_CFG_BPR_ENH (DP_PPDU_STATS_CFG_BPR | \ 142 DP_PPDU_STATS_CFG_ENH_STATS) 143 /* PPDU stats mask sent to FW to support BPR and pcktlog stats feature */ 144 #define DP_PPDU_STATS_CFG_BPR_PKTLOG (DP_PPDU_STATS_CFG_BPR | \ 145 DP_PPDU_TXLITE_STATS_BITMASK_CFG) 146 147 /* 148 * Bitmap of HTT PPDU delayed ba TLV types for Default mode 149 */ 150 #define HTT_PPDU_DELAYED_BA_TLV_BITMAP \ 151 (1 << HTT_PPDU_STATS_COMMON_TLV) | \ 152 (1 << HTT_PPDU_STATS_USR_COMMON_TLV) | \ 153 (1 << HTT_PPDU_STATS_USR_RATE_TLV) 154 155 /* 156 * Bitmap of HTT PPDU TLV types for Delayed BA 157 */ 158 #define HTT_PPDU_STATUS_TLV_BITMAP \ 159 (1 << HTT_PPDU_STATS_COMMON_TLV) | \ 160 (1 << HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV) 161 162 /* 163 * Bitmap of HTT PPDU TLV types for Sniffer mode bitmap 64 164 */ 165 #define HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP_64 \ 166 ((1 << HTT_PPDU_STATS_COMMON_TLV) | \ 167 (1 << HTT_PPDU_STATS_USR_COMMON_TLV) | \ 168 (1 << HTT_PPDU_STATS_USR_RATE_TLV) | \ 169 (1 << HTT_PPDU_STATS_SCH_CMD_STATUS_TLV) | \ 170 (1 << HTT_PPDU_STATS_USR_COMPLTN_COMMON_TLV) | \ 171 (1 << HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV) | \ 172 (1 << HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_64_TLV) | \ 173 (1 << HTT_PPDU_STATS_USR_MPDU_ENQ_BITMAP_64_TLV)) 174 175 /* 176 * Bitmap of HTT PPDU TLV types for Sniffer mode bitmap 256 177 */ 178 #define HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP_256 \ 179 ((1 << HTT_PPDU_STATS_COMMON_TLV) | \ 180 (1 << HTT_PPDU_STATS_USR_COMMON_TLV) | \ 181 (1 << HTT_PPDU_STATS_USR_RATE_TLV) | \ 182 (1 << HTT_PPDU_STATS_SCH_CMD_STATUS_TLV) | \ 183 (1 << HTT_PPDU_STATS_USR_COMPLTN_COMMON_TLV) | \ 184 (1 << HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV) | \ 185 (1 << HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_256_TLV) | \ 186 (1 << HTT_PPDU_STATS_USR_MPDU_ENQ_BITMAP_256_TLV)) 187 188 static const enum cdp_packet_type hal_2_dp_pkt_type_map[HAL_DOT11_MAX] = { 189 [HAL_DOT11A] = DOT11_A, 190 [HAL_DOT11B] = DOT11_B, 191 [HAL_DOT11N_MM] = DOT11_N, 192 [HAL_DOT11AC] = DOT11_AC, 193 [HAL_DOT11AX] = DOT11_AX, 194 [HAL_DOT11BA] = DOT11_MAX, 195 #ifdef WLAN_FEATURE_11BE 196 [HAL_DOT11BE] = DOT11_BE, 197 #else 198 [HAL_DOT11BE] = DOT11_MAX, 199 #endif 200 [HAL_DOT11AZ] = DOT11_MAX, 201 [HAL_DOT11N_GF] = DOT11_MAX, 202 }; 203 204 #ifdef WLAN_FEATURE_11BE 205 /** 206 * dp_get_mcs_array_index_by_pkt_type_mcs() - get the destination mcs index 207 * in array 208 * @pkt_type: host SW pkt type 209 * @mcs: mcs value for TX/RX rate 210 * 211 * Return: succeeded - valid index in mcs array 212 * fail - same value as MCS_MAX 213 */ 214 static inline uint8_t 215 dp_get_mcs_array_index_by_pkt_type_mcs(uint32_t pkt_type, uint32_t mcs) 216 { 217 uint8_t dst_mcs_idx = MCS_INVALID_ARRAY_INDEX; 218 219 switch (pkt_type) { 220 case DOT11_A: 221 dst_mcs_idx = 222 mcs >= MAX_MCS_11A ? (MAX_MCS - 1) : mcs; 223 break; 224 case DOT11_B: 225 dst_mcs_idx = 226 mcs >= MAX_MCS_11B ? (MAX_MCS - 1) : mcs; 227 break; 228 case DOT11_N: 229 dst_mcs_idx = 230 mcs >= MAX_MCS_11N ? (MAX_MCS - 1) : mcs; 231 break; 232 case DOT11_AC: 233 dst_mcs_idx = 234 mcs >= MAX_MCS_11AC ? (MAX_MCS - 1) : mcs; 235 break; 236 case DOT11_AX: 237 dst_mcs_idx = 238 mcs >= MAX_MCS_11AX ? (MAX_MCS - 1) : mcs; 239 break; 240 case DOT11_BE: 241 dst_mcs_idx = 242 mcs >= MAX_MCS_11BE ? (MAX_MCS - 1) : mcs; 243 break; 244 default: 245 break; 246 } 247 248 return dst_mcs_idx; 249 } 250 #else 251 static inline uint8_t 252 dp_get_mcs_array_index_by_pkt_type_mcs(uint32_t pkt_type, uint32_t mcs) 253 { 254 uint8_t dst_mcs_idx = MCS_INVALID_ARRAY_INDEX; 255 256 switch (pkt_type) { 257 case DOT11_A: 258 dst_mcs_idx = 259 mcs >= MAX_MCS_11A ? (MAX_MCS - 1) : mcs; 260 break; 261 case DOT11_B: 262 dst_mcs_idx = 263 mcs >= MAX_MCS_11B ? (MAX_MCS - 1) : mcs; 264 break; 265 case DOT11_N: 266 dst_mcs_idx = 267 mcs >= MAX_MCS_11N ? (MAX_MCS - 1) : mcs; 268 break; 269 case DOT11_AC: 270 dst_mcs_idx = 271 mcs >= MAX_MCS_11AC ? (MAX_MCS - 1) : mcs; 272 break; 273 case DOT11_AX: 274 dst_mcs_idx = 275 mcs >= MAX_MCS_11AX ? (MAX_MCS - 1) : mcs; 276 break; 277 default: 278 break; 279 } 280 281 return dst_mcs_idx; 282 } 283 #endif 284 285 #ifdef WIFI_MONITOR_SUPPORT 286 QDF_STATUS dp_mon_soc_attach(struct dp_soc *soc); 287 QDF_STATUS dp_mon_soc_detach(struct dp_soc *soc); 288 #else 289 static inline 290 QDF_STATUS dp_mon_soc_attach(struct dp_soc *soc) 291 { 292 return QDF_STATUS_SUCCESS; 293 } 294 295 static inline 296 QDF_STATUS dp_mon_soc_detach(struct dp_soc *soc) 297 { 298 return QDF_STATUS_SUCCESS; 299 } 300 #endif 301 302 /** 303 * dp_rx_err_match_dhost() - function to check whether dest-mac is correct 304 * @eh: Ethernet header of incoming packet 305 * @vdev: dp_vdev object of the VAP on which this data packet is received 306 * 307 * Return: 1 if the destination mac is correct, 308 * 0 if this frame is not correctly destined to this VAP/MLD 309 */ 310 int dp_rx_err_match_dhost(qdf_ether_header_t *eh, struct dp_vdev *vdev); 311 312 #ifdef MONITOR_MODULARIZED_ENABLE 313 static inline bool dp_monitor_modularized_enable(void) 314 { 315 return TRUE; 316 } 317 318 static inline QDF_STATUS 319 dp_mon_soc_attach_wrapper(struct dp_soc *soc) { return QDF_STATUS_SUCCESS; } 320 321 static inline QDF_STATUS 322 dp_mon_soc_detach_wrapper(struct dp_soc *soc) { return QDF_STATUS_SUCCESS; } 323 #else 324 static inline bool dp_monitor_modularized_enable(void) 325 { 326 return FALSE; 327 } 328 329 static inline QDF_STATUS dp_mon_soc_attach_wrapper(struct dp_soc *soc) 330 { 331 return dp_mon_soc_attach(soc); 332 } 333 334 static inline QDF_STATUS dp_mon_soc_detach_wrapper(struct dp_soc *soc) 335 { 336 return dp_mon_soc_detach(soc); 337 } 338 #endif 339 340 #ifndef WIFI_MONITOR_SUPPORT 341 #define MON_BUF_MIN_ENTRIES 64 342 343 static inline QDF_STATUS dp_monitor_pdev_attach(struct dp_pdev *pdev) 344 { 345 return QDF_STATUS_SUCCESS; 346 } 347 348 static inline QDF_STATUS dp_monitor_pdev_detach(struct dp_pdev *pdev) 349 { 350 return QDF_STATUS_SUCCESS; 351 } 352 353 static inline QDF_STATUS dp_monitor_vdev_attach(struct dp_vdev *vdev) 354 { 355 return QDF_STATUS_E_FAILURE; 356 } 357 358 static inline QDF_STATUS dp_monitor_vdev_detach(struct dp_vdev *vdev) 359 { 360 return QDF_STATUS_E_FAILURE; 361 } 362 363 static inline QDF_STATUS dp_monitor_peer_attach(struct dp_soc *soc, 364 struct dp_peer *peer) 365 { 366 return QDF_STATUS_SUCCESS; 367 } 368 369 static inline QDF_STATUS dp_monitor_peer_detach(struct dp_soc *soc, 370 struct dp_peer *peer) 371 { 372 return QDF_STATUS_E_FAILURE; 373 } 374 375 static inline struct cdp_peer_rate_stats_ctx* 376 dp_monitor_peer_get_peerstats_ctx(struct dp_soc *soc, struct dp_peer *peer) 377 { 378 return NULL; 379 } 380 381 static inline 382 void dp_monitor_peer_reset_stats(struct dp_soc *soc, struct dp_peer *peer) 383 { 384 } 385 386 static inline 387 void dp_monitor_peer_get_stats(struct dp_soc *soc, struct dp_peer *peer, 388 void *arg, enum cdp_stat_update_type type) 389 { 390 } 391 392 static inline 393 void dp_monitor_invalid_peer_update_pdev_stats(struct dp_soc *soc, 394 struct dp_pdev *pdev) 395 { 396 } 397 398 static inline 399 QDF_STATUS dp_monitor_peer_get_stats_param(struct dp_soc *soc, 400 struct dp_peer *peer, 401 enum cdp_peer_stats_type type, 402 cdp_peer_stats_param_t *buf) 403 { 404 return QDF_STATUS_E_FAILURE; 405 } 406 407 static inline QDF_STATUS dp_monitor_pdev_init(struct dp_pdev *pdev) 408 { 409 return QDF_STATUS_SUCCESS; 410 } 411 412 static inline QDF_STATUS dp_monitor_pdev_deinit(struct dp_pdev *pdev) 413 { 414 return QDF_STATUS_SUCCESS; 415 } 416 417 static inline QDF_STATUS dp_monitor_soc_cfg_init(struct dp_soc *soc) 418 { 419 return QDF_STATUS_SUCCESS; 420 } 421 422 static inline QDF_STATUS dp_monitor_config_debug_sniffer(struct dp_pdev *pdev, 423 int val) 424 { 425 return QDF_STATUS_E_FAILURE; 426 } 427 428 static inline void dp_monitor_flush_rings(struct dp_soc *soc) 429 { 430 } 431 432 static inline QDF_STATUS dp_monitor_htt_srng_setup(struct dp_soc *soc, 433 struct dp_pdev *pdev, 434 int mac_id, 435 int mac_for_pdev) 436 { 437 return QDF_STATUS_SUCCESS; 438 } 439 440 static inline void dp_monitor_service_mon_rings(struct dp_soc *soc, 441 uint32_t quota) 442 { 443 } 444 445 static inline 446 uint32_t dp_monitor_process(struct dp_soc *soc, struct dp_intr *int_ctx, 447 uint32_t mac_id, uint32_t quota) 448 { 449 return 0; 450 } 451 452 static inline 453 uint32_t dp_monitor_drop_packets_for_mac(struct dp_pdev *pdev, 454 uint32_t mac_id, uint32_t quota) 455 { 456 return 0; 457 } 458 459 static inline void dp_monitor_peer_tx_init(struct dp_pdev *pdev, 460 struct dp_peer *peer) 461 { 462 } 463 464 static inline void dp_monitor_peer_tx_cleanup(struct dp_vdev *vdev, 465 struct dp_peer *peer) 466 { 467 } 468 469 static inline 470 void dp_monitor_peer_tid_peer_id_update(struct dp_soc *soc, 471 struct dp_peer *peer, 472 uint16_t peer_id) 473 { 474 } 475 476 static inline void dp_monitor_tx_ppdu_stats_attach(struct dp_pdev *pdev) 477 { 478 } 479 480 static inline void dp_monitor_tx_ppdu_stats_detach(struct dp_pdev *pdev) 481 { 482 } 483 484 static inline 485 QDF_STATUS dp_monitor_tx_capture_debugfs_init(struct dp_pdev *pdev) 486 { 487 return QDF_STATUS_SUCCESS; 488 } 489 490 static inline void dp_monitor_peer_tx_capture_filter_check(struct dp_pdev *pdev, 491 struct dp_peer *peer) 492 { 493 } 494 495 static inline 496 QDF_STATUS dp_monitor_tx_add_to_comp_queue(struct dp_soc *soc, 497 struct dp_tx_desc_s *desc, 498 struct hal_tx_completion_status *ts, 499 uint16_t peer_id) 500 { 501 return QDF_STATUS_E_FAILURE; 502 } 503 504 static inline 505 QDF_STATUS monitor_update_msdu_to_list(struct dp_soc *soc, 506 struct dp_pdev *pdev, 507 struct dp_peer *peer, 508 struct hal_tx_completion_status *ts, 509 qdf_nbuf_t netbuf) 510 { 511 return QDF_STATUS_E_FAILURE; 512 } 513 514 static inline bool dp_monitor_ppdu_stats_ind_handler(struct htt_soc *soc, 515 uint32_t *msg_word, 516 qdf_nbuf_t htt_t2h_msg) 517 { 518 return true; 519 } 520 521 static inline QDF_STATUS dp_monitor_htt_ppdu_stats_attach(struct dp_pdev *pdev) 522 { 523 return QDF_STATUS_SUCCESS; 524 } 525 526 static inline void dp_monitor_htt_ppdu_stats_detach(struct dp_pdev *pdev) 527 { 528 } 529 530 static inline void dp_monitor_print_pdev_rx_mon_stats(struct dp_pdev *pdev) 531 { 532 } 533 534 static inline QDF_STATUS dp_monitor_config_enh_tx_capture(struct dp_pdev *pdev, 535 uint32_t val) 536 { 537 return QDF_STATUS_E_INVAL; 538 } 539 540 static inline QDF_STATUS dp_monitor_tx_peer_filter(struct dp_pdev *pdev, 541 struct dp_peer *peer, 542 uint8_t is_tx_pkt_cap_enable, 543 uint8_t *peer_mac) 544 { 545 return QDF_STATUS_E_INVAL; 546 } 547 548 static inline QDF_STATUS dp_monitor_config_enh_rx_capture(struct dp_pdev *pdev, 549 uint32_t val) 550 { 551 return QDF_STATUS_E_INVAL; 552 } 553 554 static inline 555 QDF_STATUS dp_monitor_set_bpr_enable(struct dp_pdev *pdev, uint32_t val) 556 { 557 return QDF_STATUS_E_FAILURE; 558 } 559 560 static inline 561 int dp_monitor_set_filter_neigh_peers(struct dp_pdev *pdev, bool val) 562 { 563 return 0; 564 } 565 566 static inline 567 void dp_monitor_set_atf_stats_enable(struct dp_pdev *pdev, bool value) 568 { 569 } 570 571 static inline 572 void dp_monitor_set_bsscolor(struct dp_pdev *pdev, uint8_t bsscolor) 573 { 574 } 575 576 static inline 577 bool dp_monitor_pdev_get_filter_mcast_data(struct cdp_pdev *pdev_handle) 578 { 579 return false; 580 } 581 582 static inline 583 bool dp_monitor_pdev_get_filter_non_data(struct cdp_pdev *pdev_handle) 584 { 585 return false; 586 } 587 588 static inline 589 bool dp_monitor_pdev_get_filter_ucast_data(struct cdp_pdev *pdev_handle) 590 { 591 return false; 592 } 593 594 static inline 595 int dp_monitor_set_pktlog_wifi3(struct dp_pdev *pdev, uint32_t event, 596 bool enable) 597 { 598 return 0; 599 } 600 601 static inline void dp_monitor_pktlogmod_exit(struct dp_pdev *pdev) 602 { 603 } 604 605 static inline 606 QDF_STATUS dp_monitor_vdev_set_monitor_mode_buf_rings(struct dp_pdev *pdev) 607 { 608 return QDF_STATUS_E_FAILURE; 609 } 610 611 static inline 612 void dp_monitor_neighbour_peers_detach(struct dp_pdev *pdev) 613 { 614 } 615 616 static inline QDF_STATUS dp_monitor_filter_neighbour_peer(struct dp_pdev *pdev, 617 uint8_t *rx_pkt_hdr) 618 { 619 return QDF_STATUS_E_FAILURE; 620 } 621 622 static inline void dp_monitor_print_pdev_tx_capture_stats(struct dp_pdev *pdev) 623 { 624 } 625 626 static inline 627 void dp_monitor_reap_timer_init(struct dp_soc *soc) 628 { 629 } 630 631 static inline 632 void dp_monitor_reap_timer_deinit(struct dp_soc *soc) 633 { 634 } 635 636 static inline 637 bool dp_monitor_reap_timer_start(struct dp_soc *soc, 638 enum cdp_mon_reap_source source) 639 { 640 return false; 641 } 642 643 static inline 644 bool dp_monitor_reap_timer_stop(struct dp_soc *soc, 645 enum cdp_mon_reap_source source) 646 { 647 return false; 648 } 649 650 static inline void 651 dp_monitor_reap_timer_suspend(struct dp_soc *soc) 652 { 653 } 654 655 static inline 656 void dp_monitor_vdev_timer_init(struct dp_soc *soc) 657 { 658 } 659 660 static inline 661 void dp_monitor_vdev_timer_deinit(struct dp_soc *soc) 662 { 663 } 664 665 static inline 666 void dp_monitor_vdev_timer_start(struct dp_soc *soc) 667 { 668 } 669 670 static inline 671 bool dp_monitor_vdev_timer_stop(struct dp_soc *soc) 672 { 673 return false; 674 } 675 676 static inline struct qdf_mem_multi_page_t* 677 dp_monitor_get_link_desc_pages(struct dp_soc *soc, uint32_t mac_id) 678 { 679 return NULL; 680 } 681 682 static inline uint32_t * 683 dp_monitor_get_total_link_descs(struct dp_soc *soc, uint32_t mac_id) 684 { 685 return NULL; 686 } 687 688 static inline QDF_STATUS dp_monitor_drop_inv_peer_pkts(struct dp_vdev *vdev) 689 { 690 return QDF_STATUS_E_FAILURE; 691 } 692 693 static inline bool dp_is_enable_reap_timer_non_pkt(struct dp_pdev *pdev) 694 { 695 return false; 696 } 697 698 static inline void dp_monitor_vdev_register_osif(struct dp_vdev *vdev, 699 struct ol_txrx_ops *txrx_ops) 700 { 701 } 702 703 static inline bool dp_monitor_is_vdev_timer_running(struct dp_soc *soc) 704 { 705 return false; 706 } 707 708 static inline 709 void dp_monitor_pdev_set_mon_vdev(struct dp_vdev *vdev) 710 { 711 } 712 713 static inline void dp_monitor_vdev_delete(struct dp_soc *soc, 714 struct dp_vdev *vdev) 715 { 716 } 717 718 static inline void dp_peer_ppdu_delayed_ba_init(struct dp_peer *peer) 719 { 720 } 721 722 static inline void dp_monitor_neighbour_peer_add_ast(struct dp_pdev *pdev, 723 struct dp_peer *ta_peer, 724 uint8_t *mac_addr, 725 qdf_nbuf_t nbuf, 726 uint32_t flags) 727 { 728 } 729 730 static inline void 731 dp_monitor_set_chan_band(struct dp_pdev *pdev, enum reg_wifi_band chan_band) 732 { 733 } 734 735 static inline void 736 dp_monitor_set_chan_freq(struct dp_pdev *pdev, qdf_freq_t chan_freq) 737 { 738 } 739 740 static inline void dp_monitor_set_chan_num(struct dp_pdev *pdev, int chan_num) 741 { 742 } 743 744 static inline bool dp_monitor_is_enable_mcopy_mode(struct dp_pdev *pdev) 745 { 746 return false; 747 } 748 749 static inline 750 void dp_monitor_neighbour_peer_list_remove(struct dp_pdev *pdev, 751 struct dp_vdev *vdev, 752 struct dp_neighbour_peer *peer) 753 { 754 } 755 756 static inline bool dp_monitor_is_chan_band_known(struct dp_pdev *pdev) 757 { 758 return false; 759 } 760 761 static inline enum reg_wifi_band 762 dp_monitor_get_chan_band(struct dp_pdev *pdev) 763 { 764 return 0; 765 } 766 767 static inline int 768 dp_monitor_get_chan_num(struct dp_pdev *pdev) 769 { 770 return 0; 771 } 772 773 static inline qdf_freq_t 774 dp_monitor_get_chan_freq(struct dp_pdev *pdev) 775 { 776 return 0; 777 } 778 779 static inline void dp_monitor_get_mpdu_status(struct dp_pdev *pdev, 780 struct dp_soc *soc, 781 uint8_t *rx_tlv_hdr) 782 { 783 } 784 785 static inline void dp_monitor_print_tx_stats(struct dp_pdev *pdev) 786 { 787 } 788 789 static inline 790 QDF_STATUS dp_monitor_mcopy_check_deliver(struct dp_pdev *pdev, 791 uint16_t peer_id, uint32_t ppdu_id, 792 uint8_t first_msdu) 793 { 794 return QDF_STATUS_SUCCESS; 795 } 796 797 static inline bool dp_monitor_is_enable_tx_sniffer(struct dp_pdev *pdev) 798 { 799 return false; 800 } 801 802 static inline struct dp_vdev* 803 dp_monitor_get_monitor_vdev_from_pdev(struct dp_pdev *pdev) 804 { 805 return NULL; 806 } 807 808 static inline QDF_STATUS dp_monitor_check_com_info_ppdu_id(struct dp_pdev *pdev, 809 void *rx_desc) 810 { 811 return QDF_STATUS_E_FAILURE; 812 } 813 814 static inline struct mon_rx_status* 815 dp_monitor_get_rx_status(struct dp_pdev *pdev) 816 { 817 return NULL; 818 } 819 820 static inline 821 void dp_monitor_pdev_config_scan_spcl_vap(struct dp_pdev *pdev, bool val) 822 { 823 } 824 825 static inline 826 void dp_monitor_pdev_reset_scan_spcl_vap_stats_enable(struct dp_pdev *pdev, 827 bool val) 828 { 829 } 830 831 static inline QDF_STATUS 832 dp_monitor_peer_tx_capture_get_stats(struct dp_soc *soc, struct dp_peer *peer, 833 struct cdp_peer_tx_capture_stats *stats) 834 { 835 return QDF_STATUS_E_FAILURE; 836 } 837 838 static inline QDF_STATUS 839 dp_monitor_pdev_tx_capture_get_stats(struct dp_soc *soc, struct dp_pdev *pdev, 840 struct cdp_pdev_tx_capture_stats *stats) 841 { 842 return QDF_STATUS_E_FAILURE; 843 } 844 845 #ifdef DP_POWER_SAVE 846 static inline 847 void dp_monitor_pktlog_reap_pending_frames(struct dp_pdev *pdev) 848 { 849 } 850 851 static inline 852 void dp_monitor_pktlog_start_reap_timer(struct dp_pdev *pdev) 853 { 854 } 855 #endif 856 857 static inline bool dp_monitor_is_configured(struct dp_pdev *pdev) 858 { 859 return false; 860 } 861 862 static inline void 863 dp_mon_rx_hdr_length_set(struct dp_soc *soc, uint32_t *msg_word, 864 struct htt_rx_ring_tlv_filter *tlv_filter) 865 { 866 } 867 868 static inline void dp_monitor_soc_init(struct dp_soc *soc) 869 { 870 } 871 872 static inline void dp_monitor_soc_deinit(struct dp_soc *soc) 873 { 874 } 875 876 static inline 877 QDF_STATUS dp_monitor_config_undecoded_metadata_capture(struct dp_pdev *pdev, 878 int val) 879 { 880 return QDF_STATUS_SUCCESS; 881 } 882 883 static inline QDF_STATUS 884 dp_monitor_config_undecoded_metadata_phyrx_error_mask(struct dp_pdev *pdev, 885 int mask1, int mask2) 886 { 887 return QDF_STATUS_SUCCESS; 888 } 889 890 static inline QDF_STATUS 891 dp_monitor_get_undecoded_metadata_phyrx_error_mask(struct dp_pdev *pdev, 892 int *mask, int *mask_cont) 893 { 894 return QDF_STATUS_SUCCESS; 895 } 896 897 static inline QDF_STATUS dp_monitor_soc_htt_srng_setup(struct dp_soc *soc) 898 { 899 return QDF_STATUS_E_FAILURE; 900 } 901 902 static inline bool dp_is_monitor_mode_using_poll(struct dp_soc *soc) 903 { 904 return false; 905 } 906 907 static inline 908 uint32_t dp_tx_mon_buf_refill(struct dp_intr *int_ctx) 909 { 910 return 0; 911 } 912 913 static inline uint32_t 914 dp_tx_mon_process(struct dp_soc *soc, struct dp_intr *int_ctx, 915 uint32_t mac_id, uint32_t quota) 916 { 917 return 0; 918 } 919 920 static inline uint32_t 921 dp_print_txmon_ring_stat_from_hal(struct dp_pdev *pdev) 922 { 923 return 0; 924 } 925 926 static inline 927 uint32_t dp_rx_mon_buf_refill(struct dp_intr *int_ctx) 928 { 929 return 0; 930 } 931 932 static inline bool dp_monitor_is_tx_cap_enabled(struct dp_peer *peer) 933 { 934 return 0; 935 } 936 937 static inline bool dp_monitor_is_rx_cap_enabled(struct dp_peer *peer) 938 { 939 return 0; 940 } 941 942 static inline void 943 dp_rx_mon_enable(struct dp_soc *soc, uint32_t *msg_word, 944 struct htt_rx_ring_tlv_filter *tlv_filter) 945 { 946 } 947 948 static inline void 949 dp_mon_rx_packet_length_set(struct dp_soc *soc, uint32_t *msg_word, 950 struct htt_rx_ring_tlv_filter *tlv_filter) 951 { 952 } 953 954 static inline void 955 dp_mon_rx_enable_mpdu_logging(struct dp_soc *soc, uint32_t *msg_word, 956 struct htt_rx_ring_tlv_filter *tlv_filter) 957 { 958 } 959 960 static inline void 961 dp_mon_rx_wmask_subscribe(struct dp_soc *soc, uint32_t *msg_word, 962 struct htt_rx_ring_tlv_filter *tlv_filter) 963 { 964 } 965 966 static inline void 967 dp_mon_rx_mac_filter_set(struct dp_soc *soc, uint32_t *msg_word, 968 struct htt_rx_ring_tlv_filter *tlv_filter) 969 { 970 } 971 972 static inline void 973 dp_mon_rx_enable_pkt_tlv_offset(struct dp_soc *soc, uint32_t *msg_word, 974 struct htt_rx_ring_tlv_filter *tlv_filter) 975 { 976 } 977 978 static inline void 979 dp_mon_rx_enable_fpmo(struct dp_soc *soc, uint32_t *msg_word, 980 struct htt_rx_ring_tlv_filter *tlv_filter) 981 { 982 } 983 984 #ifdef WLAN_CONFIG_TELEMETRY_AGENT 985 static inline 986 void dp_monitor_peer_telemetry_stats(struct dp_peer *peer, 987 struct cdp_peer_telemetry_stats *stats) 988 { 989 } 990 991 static inline 992 void dp_monitor_peer_deter_stats(struct dp_peer *peer, 993 struct cdp_peer_telemetry_stats *stats) 994 { 995 } 996 #endif /* WLAN_CONFIG_TELEMETRY_AGENT */ 997 #endif /* !WIFI_MONITOR_SUPPORT */ 998 999 /** 1000 * cdp_soc_t_to_dp_soc() - typecast cdp_soc_t to 1001 * dp soc handle 1002 * @psoc: CDP psoc handle 1003 * 1004 * Return: struct dp_soc pointer 1005 */ 1006 static inline 1007 struct dp_soc *cdp_soc_t_to_dp_soc(struct cdp_soc_t *psoc) 1008 { 1009 return (struct dp_soc *)psoc; 1010 } 1011 1012 #define DP_MAX_TIMER_EXEC_TIME_TICKS \ 1013 (QDF_LOG_TIMESTAMP_CYCLES_PER_10_US * 100 * 20) 1014 1015 /** 1016 * enum timer_yield_status - yield status code used in monitor mode timer. 1017 * @DP_TIMER_NO_YIELD: do not yield 1018 * @DP_TIMER_WORK_DONE: yield because work is done 1019 * @DP_TIMER_WORK_EXHAUST: yield because work quota is exhausted 1020 * @DP_TIMER_TIME_EXHAUST: yield due to time slot exhausted 1021 */ 1022 enum timer_yield_status { 1023 DP_TIMER_NO_YIELD, 1024 DP_TIMER_WORK_DONE, 1025 DP_TIMER_WORK_EXHAUST, 1026 DP_TIMER_TIME_EXHAUST, 1027 }; 1028 1029 #if DP_PRINT_ENABLE 1030 #include <qdf_types.h> /* qdf_vprint */ 1031 #include <cdp_txrx_handle.h> 1032 1033 enum { 1034 /* FATAL_ERR - print only irrecoverable error messages */ 1035 DP_PRINT_LEVEL_FATAL_ERR, 1036 1037 /* ERR - include non-fatal err messages */ 1038 DP_PRINT_LEVEL_ERR, 1039 1040 /* WARN - include warnings */ 1041 DP_PRINT_LEVEL_WARN, 1042 1043 /* INFO1 - include fundamental, infrequent events */ 1044 DP_PRINT_LEVEL_INFO1, 1045 1046 /* INFO2 - include non-fundamental but infrequent events */ 1047 DP_PRINT_LEVEL_INFO2, 1048 }; 1049 1050 #define dp_print(level, fmt, ...) do { \ 1051 if (level <= g_txrx_print_level) \ 1052 qdf_print(fmt, ## __VA_ARGS__); \ 1053 while (0) 1054 #define DP_PRINT(level, fmt, ...) do { \ 1055 dp_print(level, "DP: " fmt, ## __VA_ARGS__); \ 1056 while (0) 1057 #else 1058 #define DP_PRINT(level, fmt, ...) 1059 #endif /* DP_PRINT_ENABLE */ 1060 1061 #define DP_TRACE(LVL, fmt, args ...) \ 1062 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_##LVL, \ 1063 fmt, ## args) 1064 1065 #ifdef WLAN_SYSFS_DP_STATS 1066 void DP_PRINT_STATS(const char *fmt, ...); 1067 #else /* WLAN_SYSFS_DP_STATS */ 1068 #ifdef DP_PRINT_NO_CONSOLE 1069 /* Stat prints should not go to console or kernel logs.*/ 1070 #define DP_PRINT_STATS(fmt, args ...)\ 1071 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH, \ 1072 fmt, ## args) 1073 #else 1074 #define DP_PRINT_STATS(fmt, args ...)\ 1075 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,\ 1076 fmt, ## args) 1077 #endif 1078 #endif /* WLAN_SYSFS_DP_STATS */ 1079 1080 #define DP_STATS_INIT(_handle) \ 1081 qdf_mem_zero(&((_handle)->stats), sizeof((_handle)->stats)) 1082 1083 #define DP_TXRX_PEER_STATS_INIT(_handle, size) \ 1084 qdf_mem_zero(&((_handle)->stats[0]), size) 1085 1086 #define DP_STATS_CLR(_handle) \ 1087 qdf_mem_zero(&((_handle)->stats), sizeof((_handle)->stats)) 1088 1089 #define DP_TXRX_PEER_STATS_CLR(_handle, size) \ 1090 qdf_mem_zero(&((_handle)->stats[0]), size) 1091 1092 #ifndef DISABLE_DP_STATS 1093 #define DP_STATS_INC(_handle, _field, _delta) \ 1094 { \ 1095 if (likely(_handle)) \ 1096 _handle->stats._field += _delta; \ 1097 } 1098 1099 #define DP_PEER_LINK_STATS_INC(_handle, _field, _delta, _link) \ 1100 { \ 1101 if (likely(_handle)) \ 1102 _handle->stats[_link]._field += _delta; \ 1103 } 1104 1105 #define DP_PEER_STATS_FLAT_INC(_handle, _field, _delta) \ 1106 { \ 1107 if (likely(_handle)) \ 1108 _handle->_field += _delta; \ 1109 } 1110 1111 #define DP_STATS_INCC(_handle, _field, _delta, _cond) \ 1112 { \ 1113 if (_cond && likely(_handle)) \ 1114 _handle->stats._field += _delta; \ 1115 } 1116 1117 #define DP_PEER_LINK_STATS_INCC(_handle, _field, _delta, _cond, _link) \ 1118 { \ 1119 if (_cond && likely(_handle)) \ 1120 _handle->stats[_link]._field += _delta; \ 1121 } 1122 1123 #define DP_STATS_DEC(_handle, _field, _delta) \ 1124 { \ 1125 if (likely(_handle)) \ 1126 _handle->stats._field -= _delta; \ 1127 } 1128 1129 #define DP_PEER_STATS_FLAT_DEC(_handle, _field, _delta) \ 1130 { \ 1131 if (likely(_handle)) \ 1132 _handle->_field -= _delta; \ 1133 } 1134 1135 #define DP_STATS_UPD(_handle, _field, _delta) \ 1136 { \ 1137 if (likely(_handle)) \ 1138 _handle->stats._field = _delta; \ 1139 } 1140 1141 #define DP_PEER_LINK_STATS_UPD(_handle, _field, _delta, _link) \ 1142 { \ 1143 if (likely(_handle)) \ 1144 _handle->stats[_link]._field = _delta; \ 1145 } 1146 1147 #define DP_STATS_INC_PKT(_handle, _field, _count, _bytes) \ 1148 { \ 1149 DP_STATS_INC(_handle, _field.num, _count); \ 1150 DP_STATS_INC(_handle, _field.bytes, _bytes) \ 1151 } 1152 1153 #define DP_PEER_STATS_FLAT_INC_PKT(_handle, _field, _count, _bytes) \ 1154 { \ 1155 DP_PEER_STATS_FLAT_INC(_handle, _field.num, _count); \ 1156 DP_PEER_STATS_FLAT_INC(_handle, _field.bytes, _bytes) \ 1157 } 1158 1159 #define DP_STATS_INCC_PKT(_handle, _field, _count, _bytes, _cond) \ 1160 { \ 1161 DP_STATS_INCC(_handle, _field.num, _count, _cond); \ 1162 DP_STATS_INCC(_handle, _field.bytes, _bytes, _cond) \ 1163 } 1164 1165 #define DP_STATS_AGGR(_handle_a, _handle_b, _field) \ 1166 { \ 1167 _handle_a->stats._field += _handle_b->stats._field; \ 1168 } 1169 1170 #define DP_STATS_AGGR_PKT(_handle_a, _handle_b, _field) \ 1171 { \ 1172 DP_STATS_AGGR(_handle_a, _handle_b, _field.num); \ 1173 DP_STATS_AGGR(_handle_a, _handle_b, _field.bytes);\ 1174 } 1175 1176 #define DP_STATS_UPD_STRUCT(_handle_a, _handle_b, _field) \ 1177 { \ 1178 _handle_a->stats._field = _handle_b->stats._field; \ 1179 } 1180 1181 #else 1182 #define DP_STATS_INC(_handle, _field, _delta) 1183 #define DP_PEER_LINK_STATS_INC(_handle, _field, _delta, _link) 1184 #define DP_PEER_STATS_FLAT_INC(_handle, _field, _delta) 1185 #define DP_STATS_INCC(_handle, _field, _delta, _cond) 1186 #define DP_PEER_LINK_STATS_INCC(_handle, _field, _delta, _cond, _link) 1187 #define DP_STATS_DEC(_handle, _field, _delta) 1188 #define DP_PEER_STATS_FLAT_DEC(_handle, _field, _delta) 1189 #define DP_STATS_UPD(_handle, _field, _delta) 1190 #define DP_PEER_LINK_STATS_UPD(_handle, _field, _delta, _link) 1191 #define DP_STATS_INC_PKT(_handle, _field, _count, _bytes) 1192 #define DP_PEER_STATS_FLAT_INC_PKT(_handle, _field, _count, _bytes) 1193 #define DP_STATS_INCC_PKT(_handle, _field, _count, _bytes, _cond) 1194 #define DP_STATS_AGGR(_handle_a, _handle_b, _field) 1195 #define DP_STATS_AGGR_PKT(_handle_a, _handle_b, _field) 1196 #endif 1197 1198 #define DP_PEER_PER_PKT_STATS_INC(_handle, _field, _delta, _link) \ 1199 { \ 1200 DP_PEER_LINK_STATS_INC(_handle, per_pkt_stats._field, _delta, _link); \ 1201 } 1202 1203 #define DP_PEER_PER_PKT_STATS_INCC(_handle, _field, _delta, _cond, _link) \ 1204 { \ 1205 DP_PEER_LINK_STATS_INCC(_handle, per_pkt_stats._field, _delta, _cond, _link); \ 1206 } 1207 1208 #define DP_PEER_PER_PKT_STATS_INC_PKT(_handle, _field, _count, _bytes, _link) \ 1209 { \ 1210 DP_PEER_PER_PKT_STATS_INC(_handle, _field.num, _count, _link); \ 1211 DP_PEER_PER_PKT_STATS_INC(_handle, _field.bytes, _bytes, _link) \ 1212 } 1213 1214 #define DP_PEER_PER_PKT_STATS_INCC_PKT(_handle, _field, _count, _bytes, _cond, _link) \ 1215 { \ 1216 DP_PEER_PER_PKT_STATS_INCC(_handle, _field.num, _count, _cond, _link); \ 1217 DP_PEER_PER_PKT_STATS_INCC(_handle, _field.bytes, _bytes, _cond, _link) \ 1218 } 1219 1220 #define DP_PEER_PER_PKT_STATS_UPD(_handle, _field, _delta, _link) \ 1221 { \ 1222 DP_PEER_LINK_STATS_UPD(_handle, per_pkt_stats._field, _delta, _link); \ 1223 } 1224 1225 #ifndef QCA_ENHANCED_STATS_SUPPORT 1226 #define DP_PEER_EXTD_STATS_INC(_handle, _field, _delta, _link) \ 1227 { \ 1228 DP_PEER_LINK_STATS_INC(_handle, extd_stats._field, _delta, _link); \ 1229 } 1230 1231 #define DP_PEER_EXTD_STATS_INCC(_handle, _field, _delta, _cond, _link) \ 1232 { \ 1233 DP_PEER_LINK_STATS_INCC(_handle, extd_stats._field, _delta, _cond, _link); \ 1234 } 1235 1236 #define DP_PEER_EXTD_STATS_UPD(_handle, _field, _delta, _link) \ 1237 { \ 1238 DP_PEER_LINK_STATS_UPD(_handle, extd_stats._field, _delta, _link); \ 1239 } 1240 #endif 1241 1242 #if defined(QCA_VDEV_STATS_HW_OFFLOAD_SUPPORT) && \ 1243 defined(QCA_ENHANCED_STATS_SUPPORT) 1244 #define DP_PEER_TO_STACK_INCC_PKT(_handle, _count, _bytes, _cond) \ 1245 { \ 1246 if (_cond || !(_handle->hw_txrx_stats_en)) \ 1247 DP_PEER_STATS_FLAT_INC_PKT(_handle, to_stack, _count, _bytes); \ 1248 } 1249 1250 #define DP_PEER_TO_STACK_DECC(_handle, _count, _cond) \ 1251 { \ 1252 if (_cond || !(_handle->hw_txrx_stats_en)) \ 1253 DP_PEER_STATS_FLAT_DEC(_handle, to_stack.num, _count); \ 1254 } 1255 1256 #define DP_PEER_MC_INCC_PKT(_handle, _count, _bytes, _cond, _link) \ 1257 { \ 1258 if (_cond || !(_handle->hw_txrx_stats_en)) \ 1259 DP_PEER_PER_PKT_STATS_INC_PKT(_handle, rx.multicast, _count, _bytes, _link); \ 1260 } 1261 1262 #define DP_PEER_BC_INCC_PKT(_handle, _count, _bytes, _cond, _link) \ 1263 { \ 1264 if (_cond || !(_handle->hw_txrx_stats_en)) \ 1265 DP_PEER_PER_PKT_STATS_INC_PKT(_handle, rx.bcast, _count, _bytes, _link); \ 1266 } 1267 1268 #define DP_PEER_UC_INCC_PKT(_handle, _count, _bytes, _cond, _link) \ 1269 { \ 1270 if (_cond || !(_handle->hw_txrx_stats_en)) \ 1271 DP_PEER_PER_PKT_STATS_INC_PKT(_handle, rx.unicast, _count, _bytes, _link); \ 1272 } 1273 #elif defined(QCA_VDEV_STATS_HW_OFFLOAD_SUPPORT) 1274 #define DP_PEER_TO_STACK_INCC_PKT(_handle, _count, _bytes, _cond) \ 1275 { \ 1276 if (!(_handle->hw_txrx_stats_en)) \ 1277 DP_PEER_STATS_FLAT_INC_PKT(_handle, to_stack, _count, _bytes); \ 1278 } 1279 1280 #define DP_PEER_TO_STACK_DECC(_handle, _count, _cond) \ 1281 { \ 1282 if (!(_handle->hw_txrx_stats_en)) \ 1283 DP_PEER_STATS_FLAT_DEC(_handle, to_stack.num, _count); \ 1284 } 1285 1286 #define DP_PEER_MC_INCC_PKT(_handle, _count, _bytes, _cond, _link) \ 1287 { \ 1288 if (!(_handle->hw_txrx_stats_en)) \ 1289 DP_PEER_PER_PKT_STATS_INC_PKT(_handle, rx.multicast, _count, _bytes, _link); \ 1290 } 1291 1292 #define DP_PEER_BC_INCC_PKT(_handle, _count, _bytes, _cond, _link) \ 1293 { \ 1294 if (!(_handle->hw_txrx_stats_en)) \ 1295 DP_PEER_PER_PKT_STATS_INC_PKT(_handle, rx.bcast, _count, _bytes, _link); \ 1296 } 1297 1298 #define DP_PEER_UC_INCC_PKT(_handle, _count, _bytes, _cond, _link) \ 1299 { \ 1300 if (!(_handle->hw_txrx_stats_en)) \ 1301 DP_PEER_PER_PKT_STATS_INC_PKT(_handle, rx.unicast, _count, _bytes, _link); \ 1302 } 1303 #else 1304 #define DP_PEER_TO_STACK_INCC_PKT(_handle, _count, _bytes, _cond) \ 1305 DP_PEER_STATS_FLAT_INC_PKT(_handle, to_stack, _count, _bytes); 1306 1307 #define DP_PEER_TO_STACK_DECC(_handle, _count, _cond) \ 1308 DP_PEER_STATS_FLAT_DEC(_handle, to_stack.num, _count); 1309 1310 #define DP_PEER_MC_INCC_PKT(_handle, _count, _bytes, _cond, _link) \ 1311 DP_PEER_PER_PKT_STATS_INC_PKT(_handle, rx.multicast, _count, _bytes, _link); 1312 1313 #define DP_PEER_BC_INCC_PKT(_handle, _count, _bytes, _cond, _link) \ 1314 DP_PEER_PER_PKT_STATS_INC_PKT(_handle, rx.bcast, _count, _bytes, _link); 1315 1316 #define DP_PEER_UC_INCC_PKT(_handle, _count, _bytes, _cond, _link) \ 1317 DP_PEER_PER_PKT_STATS_INC_PKT(_handle, rx.unicast, _count, _bytes, _link); 1318 #endif 1319 1320 #ifdef ENABLE_DP_HIST_STATS 1321 #define DP_HIST_INIT() \ 1322 uint32_t num_of_packets[MAX_PDEV_CNT] = {0}; 1323 1324 #define DP_HIST_PACKET_COUNT_INC(_pdev_id) \ 1325 { \ 1326 ++num_of_packets[_pdev_id]; \ 1327 } 1328 1329 #define DP_TX_HISTOGRAM_UPDATE(_pdev, _p_cntrs) \ 1330 do { \ 1331 if (_p_cntrs == 1) { \ 1332 DP_STATS_INC(_pdev, \ 1333 tx_comp_histogram.pkts_1, 1); \ 1334 } else if (_p_cntrs > 1 && _p_cntrs <= 20) { \ 1335 DP_STATS_INC(_pdev, \ 1336 tx_comp_histogram.pkts_2_20, 1); \ 1337 } else if (_p_cntrs > 20 && _p_cntrs <= 40) { \ 1338 DP_STATS_INC(_pdev, \ 1339 tx_comp_histogram.pkts_21_40, 1); \ 1340 } else if (_p_cntrs > 40 && _p_cntrs <= 60) { \ 1341 DP_STATS_INC(_pdev, \ 1342 tx_comp_histogram.pkts_41_60, 1); \ 1343 } else if (_p_cntrs > 60 && _p_cntrs <= 80) { \ 1344 DP_STATS_INC(_pdev, \ 1345 tx_comp_histogram.pkts_61_80, 1); \ 1346 } else if (_p_cntrs > 80 && _p_cntrs <= 100) { \ 1347 DP_STATS_INC(_pdev, \ 1348 tx_comp_histogram.pkts_81_100, 1); \ 1349 } else if (_p_cntrs > 100 && _p_cntrs <= 200) { \ 1350 DP_STATS_INC(_pdev, \ 1351 tx_comp_histogram.pkts_101_200, 1); \ 1352 } else if (_p_cntrs > 200) { \ 1353 DP_STATS_INC(_pdev, \ 1354 tx_comp_histogram.pkts_201_plus, 1); \ 1355 } \ 1356 } while (0) 1357 1358 #define DP_RX_HISTOGRAM_UPDATE(_pdev, _p_cntrs) \ 1359 do { \ 1360 if (_p_cntrs == 1) { \ 1361 DP_STATS_INC(_pdev, \ 1362 rx_ind_histogram.pkts_1, 1); \ 1363 } else if (_p_cntrs > 1 && _p_cntrs <= 20) { \ 1364 DP_STATS_INC(_pdev, \ 1365 rx_ind_histogram.pkts_2_20, 1); \ 1366 } else if (_p_cntrs > 20 && _p_cntrs <= 40) { \ 1367 DP_STATS_INC(_pdev, \ 1368 rx_ind_histogram.pkts_21_40, 1); \ 1369 } else if (_p_cntrs > 40 && _p_cntrs <= 60) { \ 1370 DP_STATS_INC(_pdev, \ 1371 rx_ind_histogram.pkts_41_60, 1); \ 1372 } else if (_p_cntrs > 60 && _p_cntrs <= 80) { \ 1373 DP_STATS_INC(_pdev, \ 1374 rx_ind_histogram.pkts_61_80, 1); \ 1375 } else if (_p_cntrs > 80 && _p_cntrs <= 100) { \ 1376 DP_STATS_INC(_pdev, \ 1377 rx_ind_histogram.pkts_81_100, 1); \ 1378 } else if (_p_cntrs > 100 && _p_cntrs <= 200) { \ 1379 DP_STATS_INC(_pdev, \ 1380 rx_ind_histogram.pkts_101_200, 1); \ 1381 } else if (_p_cntrs > 200) { \ 1382 DP_STATS_INC(_pdev, \ 1383 rx_ind_histogram.pkts_201_plus, 1); \ 1384 } \ 1385 } while (0) 1386 1387 #define DP_TX_HIST_STATS_PER_PDEV() \ 1388 do { \ 1389 uint8_t hist_stats = 0; \ 1390 for (hist_stats = 0; hist_stats < soc->pdev_count; \ 1391 hist_stats++) { \ 1392 DP_TX_HISTOGRAM_UPDATE(soc->pdev_list[hist_stats], \ 1393 num_of_packets[hist_stats]); \ 1394 } \ 1395 } while (0) 1396 1397 1398 #define DP_RX_HIST_STATS_PER_PDEV() \ 1399 do { \ 1400 uint8_t hist_stats = 0; \ 1401 for (hist_stats = 0; hist_stats < soc->pdev_count; \ 1402 hist_stats++) { \ 1403 DP_RX_HISTOGRAM_UPDATE(soc->pdev_list[hist_stats], \ 1404 num_of_packets[hist_stats]); \ 1405 } \ 1406 } while (0) 1407 1408 #else 1409 #define DP_HIST_INIT() 1410 #define DP_HIST_PACKET_COUNT_INC(_pdev_id) 1411 #define DP_TX_HISTOGRAM_UPDATE(_pdev, _p_cntrs) 1412 #define DP_RX_HISTOGRAM_UPDATE(_pdev, _p_cntrs) 1413 #define DP_RX_HIST_STATS_PER_PDEV() 1414 #define DP_TX_HIST_STATS_PER_PDEV() 1415 #endif /* DISABLE_DP_STATS */ 1416 1417 #define FRAME_MASK_IPV4_ARP 1 1418 #define FRAME_MASK_IPV4_DHCP 2 1419 #define FRAME_MASK_IPV4_EAPOL 4 1420 #define FRAME_MASK_IPV6_DHCP 8 1421 1422 static inline int dp_log2_ceil(unsigned int value) 1423 { 1424 unsigned int tmp = value; 1425 int log2 = -1; 1426 1427 while (tmp) { 1428 log2++; 1429 tmp >>= 1; 1430 } 1431 if (1 << log2 != value) 1432 log2++; 1433 return log2; 1434 } 1435 1436 #ifdef QCA_SUPPORT_PEER_ISOLATION 1437 #define dp_get_peer_isolation(_peer) ((_peer)->isolation) 1438 1439 static inline void dp_set_peer_isolation(struct dp_txrx_peer *txrx_peer, 1440 bool val) 1441 { 1442 txrx_peer->isolation = val; 1443 } 1444 1445 #else 1446 #define dp_get_peer_isolation(_peer) (0) 1447 1448 static inline void dp_set_peer_isolation(struct dp_txrx_peer *peer, bool val) 1449 { 1450 } 1451 #endif /* QCA_SUPPORT_PEER_ISOLATION */ 1452 1453 bool dp_vdev_is_wds_ext_enabled(struct dp_vdev *vdev); 1454 1455 #ifdef QCA_SUPPORT_WDS_EXTENDED 1456 static inline void dp_wds_ext_peer_init(struct dp_txrx_peer *txrx_peer) 1457 { 1458 txrx_peer->wds_ext.osif_peer = NULL; 1459 txrx_peer->wds_ext.init = 0; 1460 } 1461 #else 1462 static inline void dp_wds_ext_peer_init(struct dp_txrx_peer *txrx_peer) 1463 { 1464 } 1465 #endif /* QCA_SUPPORT_WDS_EXTENDED */ 1466 1467 #ifdef QCA_HOST2FW_RXBUF_RING 1468 static inline 1469 struct dp_srng *dp_get_rxdma_ring(struct dp_pdev *pdev, int lmac_id) 1470 { 1471 return &pdev->rx_mac_buf_ring[lmac_id]; 1472 } 1473 #else 1474 static inline 1475 struct dp_srng *dp_get_rxdma_ring(struct dp_pdev *pdev, int lmac_id) 1476 { 1477 return &pdev->soc->rx_refill_buf_ring[lmac_id]; 1478 } 1479 #endif 1480 1481 /* 1482 * The lmac ID for a particular channel band is fixed. 1483 * 2.4GHz band uses lmac_id = 1 1484 * 5GHz/6GHz band uses lmac_id=0 1485 */ 1486 #define DP_INVALID_LMAC_ID (-1) 1487 #define DP_MON_INVALID_LMAC_ID (-1) 1488 #define DP_MAC0_LMAC_ID 0 1489 #define DP_MAC1_LMAC_ID 1 1490 1491 #ifdef FEATURE_TSO_STATS 1492 /** 1493 * dp_init_tso_stats() - Clear tso stats 1494 * @pdev: pdev handle 1495 * 1496 * Return: None 1497 */ 1498 static inline 1499 void dp_init_tso_stats(struct dp_pdev *pdev) 1500 { 1501 if (pdev) { 1502 qdf_mem_zero(&((pdev)->stats.tso_stats), 1503 sizeof((pdev)->stats.tso_stats)); 1504 qdf_atomic_init(&pdev->tso_idx); 1505 } 1506 } 1507 1508 /** 1509 * dp_stats_tso_segment_histogram_update() - TSO Segment Histogram 1510 * @pdev: pdev handle 1511 * @_p_cntrs: number of tso segments for a tso packet 1512 * 1513 * Return: None 1514 */ 1515 void dp_stats_tso_segment_histogram_update(struct dp_pdev *pdev, 1516 uint8_t _p_cntrs); 1517 1518 /** 1519 * dp_tso_segment_update() - Collect tso segment information 1520 * @pdev: pdev handle 1521 * @stats_idx: tso packet number 1522 * @idx: tso segment number 1523 * @seg: tso segment 1524 * 1525 * Return: None 1526 */ 1527 void dp_tso_segment_update(struct dp_pdev *pdev, 1528 uint32_t stats_idx, 1529 uint8_t idx, 1530 struct qdf_tso_seg_t seg); 1531 1532 /** 1533 * dp_tso_packet_update() - TSO Packet information 1534 * @pdev: pdev handle 1535 * @stats_idx: tso packet number 1536 * @msdu: nbuf handle 1537 * @num_segs: tso segments 1538 * 1539 * Return: None 1540 */ 1541 void dp_tso_packet_update(struct dp_pdev *pdev, uint32_t stats_idx, 1542 qdf_nbuf_t msdu, uint16_t num_segs); 1543 1544 /** 1545 * dp_tso_segment_stats_update() - TSO Segment stats 1546 * @pdev: pdev handle 1547 * @stats_seg: tso segment list 1548 * @stats_idx: tso packet number 1549 * 1550 * Return: None 1551 */ 1552 void dp_tso_segment_stats_update(struct dp_pdev *pdev, 1553 struct qdf_tso_seg_elem_t *stats_seg, 1554 uint32_t stats_idx); 1555 1556 /** 1557 * dp_print_tso_stats() - dump tso statistics 1558 * @soc:soc handle 1559 * @level: verbosity level 1560 * 1561 * Return: None 1562 */ 1563 void dp_print_tso_stats(struct dp_soc *soc, 1564 enum qdf_stats_verbosity_level level); 1565 1566 /** 1567 * dp_txrx_clear_tso_stats() - clear tso stats 1568 * @soc: soc handle 1569 * 1570 * Return: None 1571 */ 1572 void dp_txrx_clear_tso_stats(struct dp_soc *soc); 1573 #else 1574 static inline 1575 void dp_init_tso_stats(struct dp_pdev *pdev) 1576 { 1577 } 1578 1579 static inline 1580 void dp_stats_tso_segment_histogram_update(struct dp_pdev *pdev, 1581 uint8_t _p_cntrs) 1582 { 1583 } 1584 1585 static inline 1586 void dp_tso_segment_update(struct dp_pdev *pdev, 1587 uint32_t stats_idx, 1588 uint32_t idx, 1589 struct qdf_tso_seg_t seg) 1590 { 1591 } 1592 1593 static inline 1594 void dp_tso_packet_update(struct dp_pdev *pdev, uint32_t stats_idx, 1595 qdf_nbuf_t msdu, uint16_t num_segs) 1596 { 1597 } 1598 1599 static inline 1600 void dp_tso_segment_stats_update(struct dp_pdev *pdev, 1601 struct qdf_tso_seg_elem_t *stats_seg, 1602 uint32_t stats_idx) 1603 { 1604 } 1605 1606 static inline 1607 void dp_print_tso_stats(struct dp_soc *soc, 1608 enum qdf_stats_verbosity_level level) 1609 { 1610 } 1611 1612 static inline 1613 void dp_txrx_clear_tso_stats(struct dp_soc *soc) 1614 { 1615 } 1616 #endif /* FEATURE_TSO_STATS */ 1617 1618 /** 1619 * dp_txrx_get_peer_per_pkt_stats_param() - Get peer per pkt stats param 1620 * @peer: DP peer handle 1621 * @type: Requested stats type 1622 * @buf: Buffer to hold the value 1623 * 1624 * Return: status success/failure 1625 */ 1626 QDF_STATUS dp_txrx_get_peer_per_pkt_stats_param(struct dp_peer *peer, 1627 enum cdp_peer_stats_type type, 1628 cdp_peer_stats_param_t *buf); 1629 1630 /** 1631 * dp_txrx_get_peer_extd_stats_param() - Get peer extd stats param 1632 * @peer: DP peer handle 1633 * @type: Requested stats type 1634 * @buf: Buffer to hold the value 1635 * 1636 * Return: status success/failure 1637 */ 1638 QDF_STATUS dp_txrx_get_peer_extd_stats_param(struct dp_peer *peer, 1639 enum cdp_peer_stats_type type, 1640 cdp_peer_stats_param_t *buf); 1641 1642 #define DP_HTT_T2H_HP_PIPE 5 1643 /** 1644 * dp_update_pdev_stats(): Update the pdev stats 1645 * @tgtobj: pdev handle 1646 * @srcobj: vdev stats structure 1647 * 1648 * Update the pdev stats from the specified vdev stats 1649 * 1650 * Return: None 1651 */ 1652 void dp_update_pdev_stats(struct dp_pdev *tgtobj, 1653 struct cdp_vdev_stats *srcobj); 1654 1655 /** 1656 * dp_update_vdev_ingress_stats(): Update the vdev ingress stats 1657 * @tgtobj: vdev handle 1658 * 1659 * Update the vdev ingress stats 1660 * 1661 * Return: None 1662 */ 1663 void dp_update_vdev_ingress_stats(struct dp_vdev *tgtobj); 1664 1665 /** 1666 * dp_update_vdev_rate_stats() - Update the vdev rate stats 1667 * @tgtobj: tgt buffer for vdev stats 1668 * @srcobj: srcobj vdev stats 1669 * 1670 * Return: None 1671 */ 1672 void dp_update_vdev_rate_stats(struct cdp_vdev_stats *tgtobj, 1673 struct cdp_vdev_stats *srcobj); 1674 1675 /** 1676 * dp_update_pdev_ingress_stats(): Update the pdev ingress stats 1677 * @tgtobj: pdev handle 1678 * @srcobj: vdev stats structure 1679 * 1680 * Update the pdev ingress stats from the specified vdev stats 1681 * 1682 * Return: None 1683 */ 1684 void dp_update_pdev_ingress_stats(struct dp_pdev *tgtobj, 1685 struct dp_vdev *srcobj); 1686 1687 /** 1688 * dp_update_vdev_stats(): Update the vdev stats 1689 * @soc: soc handle 1690 * @srcobj: DP_PEER object 1691 * @arg: point to vdev stats structure 1692 * 1693 * Update the vdev stats from the specified peer stats 1694 * 1695 * Return: None 1696 */ 1697 void dp_update_vdev_stats(struct dp_soc *soc, 1698 struct dp_peer *srcobj, 1699 void *arg); 1700 1701 /** 1702 * dp_update_vdev_stats_on_peer_unmap() - Update the vdev stats on peer unmap 1703 * @vdev: DP_VDEV handle 1704 * @peer: DP_PEER handle 1705 * 1706 * Return: None 1707 */ 1708 void dp_update_vdev_stats_on_peer_unmap(struct dp_vdev *vdev, 1709 struct dp_peer *peer); 1710 1711 #ifdef IPA_OFFLOAD 1712 #define DP_IPA_UPDATE_RX_STATS(__tgtobj, __srcobj) \ 1713 { \ 1714 DP_STATS_AGGR_PKT(__tgtobj, __srcobj, rx.rx_total); \ 1715 } 1716 1717 #define DP_IPA_UPDATE_PER_PKT_RX_STATS(__tgtobj, __srcobj) \ 1718 { \ 1719 (__tgtobj)->rx.rx_total.num += (__srcobj)->rx.rx_total.num; \ 1720 (__tgtobj)->rx.rx_total.bytes += (__srcobj)->rx.rx_total.bytes; \ 1721 } 1722 #else 1723 #define DP_IPA_UPDATE_PER_PKT_RX_STATS(tgtobj, srcobj) \ 1724 1725 #define DP_IPA_UPDATE_RX_STATS(tgtobj, srcobj) 1726 #endif 1727 1728 #define DP_UPDATE_STATS(_tgtobj, _srcobj) \ 1729 do { \ 1730 uint8_t i; \ 1731 uint8_t pream_type; \ 1732 for (pream_type = 0; pream_type < DOT11_MAX; pream_type++) { \ 1733 for (i = 0; i < MAX_MCS; i++) { \ 1734 DP_STATS_AGGR(_tgtobj, _srcobj, \ 1735 tx.pkt_type[pream_type].mcs_count[i]); \ 1736 DP_STATS_AGGR(_tgtobj, _srcobj, \ 1737 rx.pkt_type[pream_type].mcs_count[i]); \ 1738 } \ 1739 } \ 1740 \ 1741 for (i = 0; i < MAX_BW; i++) { \ 1742 DP_STATS_AGGR(_tgtobj, _srcobj, tx.bw[i]); \ 1743 DP_STATS_AGGR(_tgtobj, _srcobj, rx.bw[i]); \ 1744 } \ 1745 \ 1746 for (i = 0; i < SS_COUNT; i++) { \ 1747 DP_STATS_AGGR(_tgtobj, _srcobj, rx.nss[i]); \ 1748 DP_STATS_AGGR(_tgtobj, _srcobj, tx.nss[i]); \ 1749 } \ 1750 for (i = 0; i < WME_AC_MAX; i++) { \ 1751 DP_STATS_AGGR(_tgtobj, _srcobj, tx.wme_ac_type[i]); \ 1752 DP_STATS_AGGR(_tgtobj, _srcobj, rx.wme_ac_type[i]); \ 1753 DP_STATS_AGGR(_tgtobj, _srcobj, \ 1754 tx.wme_ac_type_bytes[i]); \ 1755 DP_STATS_AGGR(_tgtobj, _srcobj, \ 1756 rx.wme_ac_type_bytes[i]); \ 1757 DP_STATS_AGGR(_tgtobj, _srcobj, tx.excess_retries_per_ac[i]); \ 1758 \ 1759 } \ 1760 \ 1761 for (i = 0; i < MAX_GI; i++) { \ 1762 DP_STATS_AGGR(_tgtobj, _srcobj, tx.sgi_count[i]); \ 1763 DP_STATS_AGGR(_tgtobj, _srcobj, rx.sgi_count[i]); \ 1764 } \ 1765 \ 1766 for (i = 0; i < MAX_RECEPTION_TYPES; i++) \ 1767 DP_STATS_AGGR(_tgtobj, _srcobj, rx.reception_type[i]); \ 1768 \ 1769 if (!wlan_cfg_get_vdev_stats_hw_offload_config(soc->wlan_cfg_ctx)) { \ 1770 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, tx.comp_pkt); \ 1771 DP_STATS_AGGR(_tgtobj, _srcobj, tx.tx_failed); \ 1772 } \ 1773 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, tx.ucast); \ 1774 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, tx.mcast); \ 1775 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, tx.bcast); \ 1776 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, tx.tx_success); \ 1777 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, tx.nawds_mcast); \ 1778 DP_STATS_AGGR(_tgtobj, _srcobj, tx.nawds_mcast_drop); \ 1779 DP_STATS_AGGR(_tgtobj, _srcobj, tx.ofdma); \ 1780 DP_STATS_AGGR(_tgtobj, _srcobj, tx.stbc); \ 1781 DP_STATS_AGGR(_tgtobj, _srcobj, tx.ldpc); \ 1782 DP_STATS_AGGR(_tgtobj, _srcobj, tx.retries); \ 1783 DP_STATS_AGGR(_tgtobj, _srcobj, tx.non_amsdu_cnt); \ 1784 DP_STATS_AGGR(_tgtobj, _srcobj, tx.amsdu_cnt); \ 1785 DP_STATS_AGGR(_tgtobj, _srcobj, tx.non_ampdu_cnt); \ 1786 DP_STATS_AGGR(_tgtobj, _srcobj, tx.ampdu_cnt); \ 1787 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, tx.dropped.fw_rem); \ 1788 DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.fw_rem_tx); \ 1789 DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.fw_rem_notx); \ 1790 DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.fw_reason1); \ 1791 DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.fw_reason2); \ 1792 DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.fw_reason3); \ 1793 DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.fw_rem_queue_disable); \ 1794 DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.fw_rem_no_match); \ 1795 DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.drop_threshold); \ 1796 DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.drop_link_desc_na); \ 1797 DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.invalid_drop); \ 1798 DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.mcast_vdev_drop); \ 1799 DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.invalid_rr); \ 1800 DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.age_out); \ 1801 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, tx.tx_ucast_total); \ 1802 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, tx.tx_ucast_success); \ 1803 \ 1804 DP_STATS_AGGR(_tgtobj, _srcobj, rx.err.mic_err); \ 1805 DP_STATS_AGGR(_tgtobj, _srcobj, rx.err.decrypt_err); \ 1806 DP_STATS_AGGR(_tgtobj, _srcobj, rx.err.fcserr); \ 1807 DP_STATS_AGGR(_tgtobj, _srcobj, rx.err.pn_err); \ 1808 DP_STATS_AGGR(_tgtobj, _srcobj, rx.err.oor_err); \ 1809 DP_STATS_AGGR(_tgtobj, _srcobj, rx.err.jump_2k_err); \ 1810 DP_STATS_AGGR(_tgtobj, _srcobj, rx.err.rxdma_wifi_parse_err); \ 1811 if (_srcobj->stats.rx.snr != 0) \ 1812 DP_STATS_UPD_STRUCT(_tgtobj, _srcobj, rx.snr); \ 1813 DP_STATS_UPD_STRUCT(_tgtobj, _srcobj, rx.rx_rate); \ 1814 DP_STATS_AGGR(_tgtobj, _srcobj, rx.non_ampdu_cnt); \ 1815 DP_STATS_AGGR(_tgtobj, _srcobj, rx.ampdu_cnt); \ 1816 DP_STATS_AGGR(_tgtobj, _srcobj, rx.non_amsdu_cnt); \ 1817 DP_STATS_AGGR(_tgtobj, _srcobj, rx.amsdu_cnt); \ 1818 DP_STATS_AGGR(_tgtobj, _srcobj, rx.nawds_mcast_drop); \ 1819 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.to_stack); \ 1820 \ 1821 for (i = 0; i < CDP_MAX_RX_RINGS; i++) \ 1822 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.rcvd_reo[i]); \ 1823 \ 1824 for (i = 0; i < CDP_MAX_LMACS; i++) \ 1825 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.rx_lmac[i]); \ 1826 \ 1827 _srcobj->stats.rx.unicast.num = \ 1828 _srcobj->stats.rx.to_stack.num - \ 1829 _srcobj->stats.rx.multicast.num; \ 1830 _srcobj->stats.rx.unicast.bytes = \ 1831 _srcobj->stats.rx.to_stack.bytes - \ 1832 _srcobj->stats.rx.multicast.bytes; \ 1833 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.unicast); \ 1834 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.multicast); \ 1835 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.bcast); \ 1836 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.raw); \ 1837 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.intra_bss.pkts); \ 1838 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.intra_bss.fail); \ 1839 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.mec_drop); \ 1840 \ 1841 _tgtobj->stats.tx.last_ack_rssi = \ 1842 _srcobj->stats.tx.last_ack_rssi; \ 1843 DP_STATS_AGGR(_tgtobj, _srcobj, rx.multipass_rx_pkt_drop); \ 1844 DP_STATS_AGGR(_tgtobj, _srcobj, rx.peer_unauth_rx_pkt_drop); \ 1845 DP_STATS_AGGR(_tgtobj, _srcobj, rx.policy_check_drop); \ 1846 DP_IPA_UPDATE_RX_STATS(_tgtobj, _srcobj); \ 1847 } while (0) 1848 1849 #ifdef VDEV_PEER_PROTOCOL_COUNT 1850 #define DP_UPDATE_PROTOCOL_COUNT_STATS(_tgtobj, _srcobj) \ 1851 { \ 1852 uint8_t j; \ 1853 for (j = 0; j < CDP_TRACE_MAX; j++) { \ 1854 _tgtobj->tx.protocol_trace_cnt[j].egress_cnt += \ 1855 _srcobj->tx.protocol_trace_cnt[j].egress_cnt; \ 1856 _tgtobj->tx.protocol_trace_cnt[j].ingress_cnt += \ 1857 _srcobj->tx.protocol_trace_cnt[j].ingress_cnt; \ 1858 _tgtobj->rx.protocol_trace_cnt[j].egress_cnt += \ 1859 _srcobj->rx.protocol_trace_cnt[j].egress_cnt; \ 1860 _tgtobj->rx.protocol_trace_cnt[j].ingress_cnt += \ 1861 _srcobj->rx.protocol_trace_cnt[j].ingress_cnt; \ 1862 } \ 1863 } 1864 #else 1865 #define DP_UPDATE_PROTOCOL_COUNT_STATS(_tgtobj, _srcobj) 1866 #endif 1867 1868 #ifdef WLAN_FEATURE_11BE 1869 #define DP_UPDATE_11BE_STATS(_tgtobj, _srcobj) \ 1870 do { \ 1871 uint8_t i, mu_type; \ 1872 for (i = 0; i < MAX_MCS; i++) { \ 1873 _tgtobj->tx.su_be_ppdu_cnt.mcs_count[i] += \ 1874 _srcobj->tx.su_be_ppdu_cnt.mcs_count[i]; \ 1875 _tgtobj->rx.su_be_ppdu_cnt.mcs_count[i] += \ 1876 _srcobj->rx.su_be_ppdu_cnt.mcs_count[i]; \ 1877 } \ 1878 for (mu_type = 0; mu_type < TXRX_TYPE_MU_MAX; mu_type++) { \ 1879 for (i = 0; i < MAX_MCS; i++) { \ 1880 _tgtobj->tx.mu_be_ppdu_cnt[mu_type].mcs_count[i] += \ 1881 _srcobj->tx.mu_be_ppdu_cnt[mu_type].mcs_count[i]; \ 1882 _tgtobj->rx.mu_be_ppdu_cnt[mu_type].mcs_count[i] += \ 1883 _srcobj->rx.mu_be_ppdu_cnt[mu_type].mcs_count[i]; \ 1884 } \ 1885 } \ 1886 for (i = 0; i < MAX_PUNCTURED_MODE; i++) { \ 1887 _tgtobj->tx.punc_bw[i] += _srcobj->tx.punc_bw[i]; \ 1888 _tgtobj->rx.punc_bw[i] += _srcobj->rx.punc_bw[i]; \ 1889 } \ 1890 } while (0) 1891 #else 1892 #define DP_UPDATE_11BE_STATS(_tgtobj, _srcobj) 1893 #endif 1894 1895 #define DP_UPDATE_BASIC_STATS(_tgtobj, _srcobj) \ 1896 do { \ 1897 _tgtobj->tx.comp_pkt.num += _srcobj->tx.comp_pkt.num; \ 1898 _tgtobj->tx.comp_pkt.bytes += _srcobj->tx.comp_pkt.bytes; \ 1899 _tgtobj->tx.tx_failed += _srcobj->tx.tx_failed; \ 1900 _tgtobj->rx.to_stack.num += _srcobj->rx.to_stack.num; \ 1901 _tgtobj->rx.to_stack.bytes += _srcobj->rx.to_stack.bytes; \ 1902 } while (0) 1903 1904 #define DP_UPDATE_PER_PKT_STATS(_tgtobj, _srcobj) \ 1905 do { \ 1906 uint8_t i; \ 1907 _tgtobj->tx.ucast.num += _srcobj->tx.ucast.num; \ 1908 _tgtobj->tx.ucast.bytes += _srcobj->tx.ucast.bytes; \ 1909 _tgtobj->tx.mcast.num += _srcobj->tx.mcast.num; \ 1910 _tgtobj->tx.mcast.bytes += _srcobj->tx.mcast.bytes; \ 1911 _tgtobj->tx.bcast.num += _srcobj->tx.bcast.num; \ 1912 _tgtobj->tx.bcast.bytes += _srcobj->tx.bcast.bytes; \ 1913 _tgtobj->tx.nawds_mcast.num += _srcobj->tx.nawds_mcast.num; \ 1914 _tgtobj->tx.nawds_mcast.bytes += \ 1915 _srcobj->tx.nawds_mcast.bytes; \ 1916 _tgtobj->tx.tx_success.num += _srcobj->tx.tx_success.num; \ 1917 _tgtobj->tx.tx_success.bytes += _srcobj->tx.tx_success.bytes; \ 1918 _tgtobj->tx.nawds_mcast_drop += _srcobj->tx.nawds_mcast_drop; \ 1919 _tgtobj->tx.ofdma += _srcobj->tx.ofdma; \ 1920 _tgtobj->tx.non_amsdu_cnt += _srcobj->tx.non_amsdu_cnt; \ 1921 _tgtobj->tx.amsdu_cnt += _srcobj->tx.amsdu_cnt; \ 1922 _tgtobj->tx.dropped.fw_rem.num += \ 1923 _srcobj->tx.dropped.fw_rem.num; \ 1924 _tgtobj->tx.dropped.fw_rem.bytes += \ 1925 _srcobj->tx.dropped.fw_rem.bytes; \ 1926 _tgtobj->tx.dropped.fw_rem_notx += \ 1927 _srcobj->tx.dropped.fw_rem_notx; \ 1928 _tgtobj->tx.dropped.fw_rem_tx += \ 1929 _srcobj->tx.dropped.fw_rem_tx; \ 1930 _tgtobj->tx.dropped.age_out += _srcobj->tx.dropped.age_out; \ 1931 _tgtobj->tx.dropped.fw_reason1 += \ 1932 _srcobj->tx.dropped.fw_reason1; \ 1933 _tgtobj->tx.dropped.fw_reason2 += \ 1934 _srcobj->tx.dropped.fw_reason2; \ 1935 _tgtobj->tx.dropped.fw_reason3 += \ 1936 _srcobj->tx.dropped.fw_reason3; \ 1937 _tgtobj->tx.dropped.fw_rem_queue_disable += \ 1938 _srcobj->tx.dropped.fw_rem_queue_disable; \ 1939 _tgtobj->tx.dropped.fw_rem_no_match += \ 1940 _srcobj->tx.dropped.fw_rem_no_match; \ 1941 _tgtobj->tx.dropped.drop_threshold += \ 1942 _srcobj->tx.dropped.drop_threshold; \ 1943 _tgtobj->tx.dropped.drop_link_desc_na += \ 1944 _srcobj->tx.dropped.drop_link_desc_na; \ 1945 _tgtobj->tx.dropped.invalid_drop += \ 1946 _srcobj->tx.dropped.invalid_drop; \ 1947 _tgtobj->tx.dropped.mcast_vdev_drop += \ 1948 _srcobj->tx.dropped.mcast_vdev_drop; \ 1949 _tgtobj->tx.dropped.invalid_rr += \ 1950 _srcobj->tx.dropped.invalid_rr; \ 1951 _tgtobj->tx.failed_retry_count += \ 1952 _srcobj->tx.failed_retry_count; \ 1953 _tgtobj->tx.retry_count += _srcobj->tx.retry_count; \ 1954 _tgtobj->tx.multiple_retry_count += \ 1955 _srcobj->tx.multiple_retry_count; \ 1956 _tgtobj->tx.tx_success_twt.num += \ 1957 _srcobj->tx.tx_success_twt.num; \ 1958 _tgtobj->tx.tx_success_twt.bytes += \ 1959 _srcobj->tx.tx_success_twt.bytes; \ 1960 _tgtobj->tx.last_tx_ts = _srcobj->tx.last_tx_ts; \ 1961 _tgtobj->tx.release_src_not_tqm += \ 1962 _srcobj->tx.release_src_not_tqm; \ 1963 for (i = 0; i < QDF_PROTO_SUBTYPE_MAX; i++) { \ 1964 _tgtobj->tx.no_ack_count[i] += \ 1965 _srcobj->tx.no_ack_count[i];\ 1966 } \ 1967 \ 1968 _tgtobj->rx.multicast.num += _srcobj->rx.multicast.num; \ 1969 _tgtobj->rx.multicast.bytes += _srcobj->rx.multicast.bytes; \ 1970 _tgtobj->rx.bcast.num += _srcobj->rx.bcast.num; \ 1971 _tgtobj->rx.bcast.bytes += _srcobj->rx.bcast.bytes; \ 1972 _tgtobj->rx.unicast.num += _srcobj->rx.unicast.num; \ 1973 _tgtobj->rx.unicast.bytes += _srcobj->rx.unicast.bytes; \ 1974 _tgtobj->rx.raw.num += _srcobj->rx.raw.num; \ 1975 _tgtobj->rx.raw.bytes += _srcobj->rx.raw.bytes; \ 1976 _tgtobj->rx.nawds_mcast_drop += _srcobj->rx.nawds_mcast_drop; \ 1977 _tgtobj->rx.mcast_3addr_drop += _srcobj->rx.mcast_3addr_drop; \ 1978 _tgtobj->rx.mec_drop.num += _srcobj->rx.mec_drop.num; \ 1979 _tgtobj->rx.mec_drop.bytes += _srcobj->rx.mec_drop.bytes; \ 1980 _tgtobj->rx.ppeds_drop.num += _srcobj->rx.ppeds_drop.num; \ 1981 _tgtobj->rx.ppeds_drop.bytes += _srcobj->rx.ppeds_drop.bytes; \ 1982 _tgtobj->rx.intra_bss.pkts.num += \ 1983 _srcobj->rx.intra_bss.pkts.num; \ 1984 _tgtobj->rx.intra_bss.pkts.bytes += \ 1985 _srcobj->rx.intra_bss.pkts.bytes; \ 1986 _tgtobj->rx.intra_bss.fail.num += \ 1987 _srcobj->rx.intra_bss.fail.num; \ 1988 _tgtobj->rx.intra_bss.fail.bytes += \ 1989 _srcobj->rx.intra_bss.fail.bytes; \ 1990 _tgtobj->rx.intra_bss.mdns_no_fwd += \ 1991 _srcobj->rx.intra_bss.mdns_no_fwd; \ 1992 _tgtobj->rx.err.mic_err += _srcobj->rx.err.mic_err; \ 1993 _tgtobj->rx.err.decrypt_err += _srcobj->rx.err.decrypt_err; \ 1994 _tgtobj->rx.err.fcserr += _srcobj->rx.err.fcserr; \ 1995 _tgtobj->rx.err.pn_err += _srcobj->rx.err.pn_err; \ 1996 _tgtobj->rx.err.oor_err += _srcobj->rx.err.oor_err; \ 1997 _tgtobj->rx.err.jump_2k_err += _srcobj->rx.err.jump_2k_err; \ 1998 _tgtobj->rx.err.rxdma_wifi_parse_err += \ 1999 _srcobj->rx.err.rxdma_wifi_parse_err; \ 2000 _tgtobj->rx.non_amsdu_cnt += _srcobj->rx.non_amsdu_cnt; \ 2001 _tgtobj->rx.amsdu_cnt += _srcobj->rx.amsdu_cnt; \ 2002 _tgtobj->rx.rx_retries += _srcobj->rx.rx_retries; \ 2003 _tgtobj->rx.multipass_rx_pkt_drop += \ 2004 _srcobj->rx.multipass_rx_pkt_drop; \ 2005 _tgtobj->rx.peer_unauth_rx_pkt_drop += \ 2006 _srcobj->rx.peer_unauth_rx_pkt_drop; \ 2007 _tgtobj->rx.policy_check_drop += \ 2008 _srcobj->rx.policy_check_drop; \ 2009 _tgtobj->rx.to_stack_twt.num += _srcobj->rx.to_stack_twt.num; \ 2010 _tgtobj->rx.to_stack_twt.bytes += \ 2011 _srcobj->rx.to_stack_twt.bytes; \ 2012 _tgtobj->rx.last_rx_ts = _srcobj->rx.last_rx_ts; \ 2013 for (i = 0; i < CDP_MAX_RX_RINGS; i++) { \ 2014 _tgtobj->rx.rcvd_reo[i].num += \ 2015 _srcobj->rx.rcvd_reo[i].num; \ 2016 _tgtobj->rx.rcvd_reo[i].bytes += \ 2017 _srcobj->rx.rcvd_reo[i].bytes; \ 2018 } \ 2019 for (i = 0; i < CDP_MAX_LMACS; i++) { \ 2020 _tgtobj->rx.rx_lmac[i].num += \ 2021 _srcobj->rx.rx_lmac[i].num; \ 2022 _tgtobj->rx.rx_lmac[i].bytes += \ 2023 _srcobj->rx.rx_lmac[i].bytes; \ 2024 } \ 2025 DP_IPA_UPDATE_PER_PKT_RX_STATS(_tgtobj, _srcobj); \ 2026 DP_UPDATE_PROTOCOL_COUNT_STATS(_tgtobj, _srcobj); \ 2027 } while (0) 2028 2029 #define DP_UPDATE_EXTD_STATS(_tgtobj, _srcobj) \ 2030 do { \ 2031 uint8_t i, pream_type, mu_type; \ 2032 _tgtobj->tx.stbc += _srcobj->tx.stbc; \ 2033 _tgtobj->tx.ldpc += _srcobj->tx.ldpc; \ 2034 _tgtobj->tx.retries += _srcobj->tx.retries; \ 2035 _tgtobj->tx.ampdu_cnt += _srcobj->tx.ampdu_cnt; \ 2036 _tgtobj->tx.non_ampdu_cnt += _srcobj->tx.non_ampdu_cnt; \ 2037 _tgtobj->tx.num_ppdu_cookie_valid += \ 2038 _srcobj->tx.num_ppdu_cookie_valid; \ 2039 _tgtobj->tx.tx_ppdus += _srcobj->tx.tx_ppdus; \ 2040 _tgtobj->tx.tx_mpdus_success += _srcobj->tx.tx_mpdus_success; \ 2041 _tgtobj->tx.tx_mpdus_tried += _srcobj->tx.tx_mpdus_tried; \ 2042 _tgtobj->tx.tx_rate = _srcobj->tx.tx_rate; \ 2043 _tgtobj->tx.last_tx_rate = _srcobj->tx.last_tx_rate; \ 2044 _tgtobj->tx.last_tx_rate_mcs = _srcobj->tx.last_tx_rate_mcs; \ 2045 _tgtobj->tx.mcast_last_tx_rate = \ 2046 _srcobj->tx.mcast_last_tx_rate; \ 2047 _tgtobj->tx.mcast_last_tx_rate_mcs = \ 2048 _srcobj->tx.mcast_last_tx_rate_mcs; \ 2049 _tgtobj->tx.rnd_avg_tx_rate = _srcobj->tx.rnd_avg_tx_rate; \ 2050 _tgtobj->tx.avg_tx_rate = _srcobj->tx.avg_tx_rate; \ 2051 _tgtobj->tx.tx_ratecode = _srcobj->tx.tx_ratecode; \ 2052 _tgtobj->tx.pream_punct_cnt += _srcobj->tx.pream_punct_cnt; \ 2053 _tgtobj->tx.ru_start = _srcobj->tx.ru_start; \ 2054 _tgtobj->tx.ru_tones = _srcobj->tx.ru_tones; \ 2055 _tgtobj->tx.last_ack_rssi = _srcobj->tx.last_ack_rssi; \ 2056 _tgtobj->tx.nss_info = _srcobj->tx.nss_info; \ 2057 _tgtobj->tx.mcs_info = _srcobj->tx.mcs_info; \ 2058 _tgtobj->tx.bw_info = _srcobj->tx.bw_info; \ 2059 _tgtobj->tx.gi_info = _srcobj->tx.gi_info; \ 2060 _tgtobj->tx.preamble_info = _srcobj->tx.preamble_info; \ 2061 _tgtobj->tx.retries_mpdu += _srcobj->tx.retries_mpdu; \ 2062 _tgtobj->tx.mpdu_success_with_retries += \ 2063 _srcobj->tx.mpdu_success_with_retries; \ 2064 _tgtobj->tx.rts_success = _srcobj->tx.rts_success; \ 2065 _tgtobj->tx.rts_failure = _srcobj->tx.rts_failure; \ 2066 _tgtobj->tx.bar_cnt = _srcobj->tx.bar_cnt; \ 2067 _tgtobj->tx.ndpa_cnt = _srcobj->tx.ndpa_cnt; \ 2068 for (pream_type = 0; pream_type < DOT11_MAX; pream_type++) { \ 2069 for (i = 0; i < MAX_MCS; i++) \ 2070 _tgtobj->tx.pkt_type[pream_type].mcs_count[i] += \ 2071 _srcobj->tx.pkt_type[pream_type].mcs_count[i]; \ 2072 } \ 2073 for (i = 0; i < WME_AC_MAX; i++) { \ 2074 _tgtobj->tx.wme_ac_type[i] += _srcobj->tx.wme_ac_type[i]; \ 2075 _tgtobj->tx.wme_ac_type_bytes[i] += \ 2076 _srcobj->tx.wme_ac_type_bytes[i]; \ 2077 _tgtobj->tx.excess_retries_per_ac[i] += \ 2078 _srcobj->tx.excess_retries_per_ac[i]; \ 2079 } \ 2080 for (i = 0; i < MAX_GI; i++) { \ 2081 _tgtobj->tx.sgi_count[i] += _srcobj->tx.sgi_count[i]; \ 2082 } \ 2083 for (i = 0; i < SS_COUNT; i++) { \ 2084 _tgtobj->tx.nss[i] += _srcobj->tx.nss[i]; \ 2085 } \ 2086 for (i = 0; i < MAX_BW; i++) { \ 2087 _tgtobj->tx.bw[i] += _srcobj->tx.bw[i]; \ 2088 } \ 2089 for (i = 0; i < MAX_RU_LOCATIONS; i++) { \ 2090 _tgtobj->tx.ru_loc[i].num_msdu += \ 2091 _srcobj->tx.ru_loc[i].num_msdu; \ 2092 _tgtobj->tx.ru_loc[i].num_mpdu += \ 2093 _srcobj->tx.ru_loc[i].num_mpdu; \ 2094 _tgtobj->tx.ru_loc[i].mpdu_tried += \ 2095 _srcobj->tx.ru_loc[i].mpdu_tried; \ 2096 } \ 2097 for (i = 0; i < MAX_TRANSMIT_TYPES; i++) { \ 2098 _tgtobj->tx.transmit_type[i].num_msdu += \ 2099 _srcobj->tx.transmit_type[i].num_msdu; \ 2100 _tgtobj->tx.transmit_type[i].num_mpdu += \ 2101 _srcobj->tx.transmit_type[i].num_mpdu; \ 2102 _tgtobj->tx.transmit_type[i].mpdu_tried += \ 2103 _srcobj->tx.transmit_type[i].mpdu_tried; \ 2104 } \ 2105 for (i = 0; i < MAX_MU_GROUP_ID; i++) { \ 2106 _tgtobj->tx.mu_group_id[i] = _srcobj->tx.mu_group_id[i]; \ 2107 } \ 2108 _tgtobj->tx.tx_ucast_total.num += \ 2109 _srcobj->tx.tx_ucast_total.num;\ 2110 _tgtobj->tx.tx_ucast_total.bytes += \ 2111 _srcobj->tx.tx_ucast_total.bytes;\ 2112 _tgtobj->tx.tx_ucast_success.num += \ 2113 _srcobj->tx.tx_ucast_success.num; \ 2114 _tgtobj->tx.tx_ucast_success.bytes += \ 2115 _srcobj->tx.tx_ucast_success.bytes; \ 2116 \ 2117 for (i = 0; i < CDP_RSSI_CHAIN_LEN; i++) \ 2118 _tgtobj->tx.rssi_chain[i] = _srcobj->tx.rssi_chain[i]; \ 2119 _tgtobj->rx.mpdu_cnt_fcs_ok += _srcobj->rx.mpdu_cnt_fcs_ok; \ 2120 _tgtobj->rx.mpdu_cnt_fcs_err += _srcobj->rx.mpdu_cnt_fcs_err; \ 2121 _tgtobj->rx.non_ampdu_cnt += _srcobj->rx.non_ampdu_cnt; \ 2122 _tgtobj->rx.ampdu_cnt += _srcobj->rx.ampdu_cnt; \ 2123 _tgtobj->rx.rx_mpdus += _srcobj->rx.rx_mpdus; \ 2124 _tgtobj->rx.rx_ppdus += _srcobj->rx.rx_ppdus; \ 2125 _tgtobj->rx.rx_rate = _srcobj->rx.rx_rate; \ 2126 _tgtobj->rx.last_rx_rate = _srcobj->rx.last_rx_rate; \ 2127 _tgtobj->rx.rnd_avg_rx_rate = _srcobj->rx.rnd_avg_rx_rate; \ 2128 _tgtobj->rx.avg_rx_rate = _srcobj->rx.avg_rx_rate; \ 2129 _tgtobj->rx.rx_ratecode = _srcobj->rx.rx_ratecode; \ 2130 _tgtobj->rx.avg_snr = _srcobj->rx.avg_snr; \ 2131 _tgtobj->rx.rx_snr_measured_time = \ 2132 _srcobj->rx.rx_snr_measured_time; \ 2133 _tgtobj->rx.snr = _srcobj->rx.snr; \ 2134 _tgtobj->rx.last_snr = _srcobj->rx.last_snr; \ 2135 _tgtobj->rx.nss_info = _srcobj->rx.nss_info; \ 2136 _tgtobj->rx.mcs_info = _srcobj->rx.mcs_info; \ 2137 _tgtobj->rx.bw_info = _srcobj->rx.bw_info; \ 2138 _tgtobj->rx.gi_info = _srcobj->rx.gi_info; \ 2139 _tgtobj->rx.preamble_info = _srcobj->rx.preamble_info; \ 2140 _tgtobj->rx.mpdu_retry_cnt += _srcobj->rx.mpdu_retry_cnt; \ 2141 _tgtobj->rx.bar_cnt = _srcobj->rx.bar_cnt; \ 2142 _tgtobj->rx.ndpa_cnt = _srcobj->rx.ndpa_cnt; \ 2143 for (pream_type = 0; pream_type < DOT11_MAX; pream_type++) { \ 2144 for (i = 0; i < MAX_MCS; i++) { \ 2145 _tgtobj->rx.pkt_type[pream_type].mcs_count[i] += \ 2146 _srcobj->rx.pkt_type[pream_type].mcs_count[i]; \ 2147 } \ 2148 } \ 2149 for (i = 0; i < WME_AC_MAX; i++) { \ 2150 _tgtobj->rx.wme_ac_type[i] += _srcobj->rx.wme_ac_type[i]; \ 2151 _tgtobj->rx.wme_ac_type_bytes[i] += \ 2152 _srcobj->rx.wme_ac_type_bytes[i]; \ 2153 } \ 2154 for (i = 0; i < MAX_MCS; i++) { \ 2155 _tgtobj->rx.su_ax_ppdu_cnt.mcs_count[i] += \ 2156 _srcobj->rx.su_ax_ppdu_cnt.mcs_count[i]; \ 2157 _tgtobj->rx.rx_mpdu_cnt[i] += _srcobj->rx.rx_mpdu_cnt[i]; \ 2158 } \ 2159 for (mu_type = 0 ; mu_type < TXRX_TYPE_MU_MAX; mu_type++) { \ 2160 _tgtobj->rx.rx_mu[mu_type].mpdu_cnt_fcs_ok += \ 2161 _srcobj->rx.rx_mu[mu_type].mpdu_cnt_fcs_ok; \ 2162 _tgtobj->rx.rx_mu[mu_type].mpdu_cnt_fcs_err += \ 2163 _srcobj->rx.rx_mu[mu_type].mpdu_cnt_fcs_err; \ 2164 for (i = 0; i < SS_COUNT; i++) \ 2165 _tgtobj->rx.rx_mu[mu_type].ppdu_nss[i] += \ 2166 _srcobj->rx.rx_mu[mu_type].ppdu_nss[i]; \ 2167 for (i = 0; i < MAX_MCS; i++) \ 2168 _tgtobj->rx.rx_mu[mu_type].ppdu.mcs_count[i] += \ 2169 _srcobj->rx.rx_mu[mu_type].ppdu.mcs_count[i]; \ 2170 } \ 2171 for (i = 0; i < MAX_RECEPTION_TYPES; i++) { \ 2172 _tgtobj->rx.reception_type[i] += \ 2173 _srcobj->rx.reception_type[i]; \ 2174 _tgtobj->rx.ppdu_cnt[i] += _srcobj->rx.ppdu_cnt[i]; \ 2175 } \ 2176 for (i = 0; i < MAX_GI; i++) { \ 2177 _tgtobj->rx.sgi_count[i] += _srcobj->rx.sgi_count[i]; \ 2178 } \ 2179 for (i = 0; i < SS_COUNT; i++) { \ 2180 _tgtobj->rx.nss[i] += _srcobj->rx.nss[i]; \ 2181 _tgtobj->rx.ppdu_nss[i] += _srcobj->rx.ppdu_nss[i]; \ 2182 } \ 2183 for (i = 0; i < MAX_BW; i++) { \ 2184 _tgtobj->rx.bw[i] += _srcobj->rx.bw[i]; \ 2185 } \ 2186 DP_UPDATE_11BE_STATS(_tgtobj, _srcobj); \ 2187 } while (0) 2188 2189 #define DP_UPDATE_VDEV_STATS_FOR_UNMAPPED_PEERS(_tgtobj, _srcobj) \ 2190 do { \ 2191 DP_UPDATE_BASIC_STATS(_tgtobj, _srcobj); \ 2192 DP_UPDATE_PER_PKT_STATS(_tgtobj, _srcobj); \ 2193 DP_UPDATE_EXTD_STATS(_tgtobj, _srcobj); \ 2194 } while (0) 2195 2196 #define DP_UPDATE_INGRESS_STATS(_tgtobj, _srcobj) \ 2197 do { \ 2198 uint8_t i = 0; \ 2199 _tgtobj->tx_i.rcvd.num += _srcobj->tx_i.rcvd.num; \ 2200 _tgtobj->tx_i.rcvd.bytes += _srcobj->tx_i.rcvd.bytes; \ 2201 _tgtobj->tx_i.rcvd_in_fast_xmit_flow += \ 2202 _srcobj->tx_i.rcvd_in_fast_xmit_flow; \ 2203 for (i = 0; i < CDP_MAX_TX_DATA_RINGS; i++) { \ 2204 _tgtobj->tx_i.rcvd_per_core[i] += \ 2205 _srcobj->tx_i.rcvd_per_core[i]; \ 2206 } \ 2207 _tgtobj->tx_i.processed.num += _srcobj->tx_i.processed.num; \ 2208 _tgtobj->tx_i.processed.bytes += \ 2209 _srcobj->tx_i.processed.bytes; \ 2210 _tgtobj->tx_i.reinject_pkts.num += \ 2211 _srcobj->tx_i.reinject_pkts.num; \ 2212 _tgtobj->tx_i.reinject_pkts.bytes += \ 2213 _srcobj->tx_i.reinject_pkts.bytes; \ 2214 _tgtobj->tx_i.inspect_pkts.num += \ 2215 _srcobj->tx_i.inspect_pkts.num; \ 2216 _tgtobj->tx_i.inspect_pkts.bytes += \ 2217 _srcobj->tx_i.inspect_pkts.bytes; \ 2218 _tgtobj->tx_i.nawds_mcast.num += \ 2219 _srcobj->tx_i.nawds_mcast.num; \ 2220 _tgtobj->tx_i.nawds_mcast.bytes += \ 2221 _srcobj->tx_i.nawds_mcast.bytes; \ 2222 _tgtobj->tx_i.bcast.num += _srcobj->tx_i.bcast.num; \ 2223 _tgtobj->tx_i.bcast.bytes += _srcobj->tx_i.bcast.bytes; \ 2224 _tgtobj->tx_i.raw.raw_pkt.num += \ 2225 _srcobj->tx_i.raw.raw_pkt.num; \ 2226 _tgtobj->tx_i.raw.raw_pkt.bytes += \ 2227 _srcobj->tx_i.raw.raw_pkt.bytes; \ 2228 _tgtobj->tx_i.raw.dma_map_error += \ 2229 _srcobj->tx_i.raw.dma_map_error; \ 2230 _tgtobj->tx_i.raw.invalid_raw_pkt_datatype += \ 2231 _srcobj->tx_i.raw.invalid_raw_pkt_datatype; \ 2232 _tgtobj->tx_i.raw.num_frags_overflow_err += \ 2233 _srcobj->tx_i.raw.num_frags_overflow_err; \ 2234 _tgtobj->tx_i.sg.sg_pkt.num += _srcobj->tx_i.sg.sg_pkt.num; \ 2235 _tgtobj->tx_i.sg.sg_pkt.bytes += \ 2236 _srcobj->tx_i.sg.sg_pkt.bytes; \ 2237 _tgtobj->tx_i.sg.non_sg_pkts.num += \ 2238 _srcobj->tx_i.sg.non_sg_pkts.num; \ 2239 _tgtobj->tx_i.sg.non_sg_pkts.bytes += \ 2240 _srcobj->tx_i.sg.non_sg_pkts.bytes; \ 2241 _tgtobj->tx_i.sg.dropped_host.num += \ 2242 _srcobj->tx_i.sg.dropped_host.num; \ 2243 _tgtobj->tx_i.sg.dropped_host.bytes += \ 2244 _srcobj->tx_i.sg.dropped_host.bytes; \ 2245 _tgtobj->tx_i.sg.dropped_target += \ 2246 _srcobj->tx_i.sg.dropped_target; \ 2247 _tgtobj->tx_i.sg.dma_map_error += \ 2248 _srcobj->tx_i.sg.dma_map_error; \ 2249 _tgtobj->tx_i.mcast_en.mcast_pkt.num += \ 2250 _srcobj->tx_i.mcast_en.mcast_pkt.num; \ 2251 _tgtobj->tx_i.mcast_en.mcast_pkt.bytes += \ 2252 _srcobj->tx_i.mcast_en.mcast_pkt.bytes; \ 2253 _tgtobj->tx_i.mcast_en.dropped_map_error += \ 2254 _srcobj->tx_i.mcast_en.dropped_map_error; \ 2255 _tgtobj->tx_i.mcast_en.dropped_self_mac += \ 2256 _srcobj->tx_i.mcast_en.dropped_self_mac; \ 2257 _tgtobj->tx_i.mcast_en.dropped_send_fail += \ 2258 _srcobj->tx_i.mcast_en.dropped_send_fail; \ 2259 _tgtobj->tx_i.mcast_en.ucast += _srcobj->tx_i.mcast_en.ucast; \ 2260 _tgtobj->tx_i.mcast_en.fail_seg_alloc += \ 2261 _srcobj->tx_i.mcast_en.fail_seg_alloc; \ 2262 _tgtobj->tx_i.mcast_en.clone_fail += \ 2263 _srcobj->tx_i.mcast_en.clone_fail; \ 2264 _tgtobj->tx_i.igmp_mcast_en.igmp_rcvd += \ 2265 _srcobj->tx_i.igmp_mcast_en.igmp_rcvd; \ 2266 _tgtobj->tx_i.igmp_mcast_en.igmp_ucast_converted += \ 2267 _srcobj->tx_i.igmp_mcast_en.igmp_ucast_converted; \ 2268 _tgtobj->tx_i.dropped.desc_na.num += \ 2269 _srcobj->tx_i.dropped.desc_na.num; \ 2270 _tgtobj->tx_i.dropped.desc_na.bytes += \ 2271 _srcobj->tx_i.dropped.desc_na.bytes; \ 2272 _tgtobj->tx_i.dropped.desc_na_exc_alloc_fail.num += \ 2273 _srcobj->tx_i.dropped.desc_na_exc_alloc_fail.num; \ 2274 _tgtobj->tx_i.dropped.desc_na_exc_alloc_fail.bytes += \ 2275 _srcobj->tx_i.dropped.desc_na_exc_alloc_fail.bytes; \ 2276 _tgtobj->tx_i.dropped.desc_na_exc_outstand.num += \ 2277 _srcobj->tx_i.dropped.desc_na_exc_outstand.num; \ 2278 _tgtobj->tx_i.dropped.desc_na_exc_outstand.bytes += \ 2279 _srcobj->tx_i.dropped.desc_na_exc_outstand.bytes; \ 2280 _tgtobj->tx_i.dropped.exc_desc_na.num += \ 2281 _srcobj->tx_i.dropped.exc_desc_na.num; \ 2282 _tgtobj->tx_i.dropped.exc_desc_na.bytes += \ 2283 _srcobj->tx_i.dropped.exc_desc_na.bytes; \ 2284 _tgtobj->tx_i.dropped.ring_full += \ 2285 _srcobj->tx_i.dropped.ring_full; \ 2286 _tgtobj->tx_i.dropped.enqueue_fail += \ 2287 _srcobj->tx_i.dropped.enqueue_fail; \ 2288 _tgtobj->tx_i.dropped.dma_error += \ 2289 _srcobj->tx_i.dropped.dma_error; \ 2290 _tgtobj->tx_i.dropped.res_full += \ 2291 _srcobj->tx_i.dropped.res_full; \ 2292 _tgtobj->tx_i.dropped.headroom_insufficient += \ 2293 _srcobj->tx_i.dropped.headroom_insufficient; \ 2294 _tgtobj->tx_i.dropped.fail_per_pkt_vdev_id_check += \ 2295 _srcobj->tx_i.dropped.fail_per_pkt_vdev_id_check; \ 2296 _tgtobj->tx_i.dropped.drop_ingress += \ 2297 _srcobj->tx_i.dropped.drop_ingress; \ 2298 _tgtobj->tx_i.dropped.invalid_peer_id_in_exc_path += \ 2299 _srcobj->tx_i.dropped.invalid_peer_id_in_exc_path; \ 2300 _tgtobj->tx_i.dropped.tx_mcast_drop += \ 2301 _srcobj->tx_i.dropped.tx_mcast_drop; \ 2302 _tgtobj->tx_i.dropped.fw2wbm_tx_drop += \ 2303 _srcobj->tx_i.dropped.fw2wbm_tx_drop; \ 2304 _tgtobj->tx_i.dropped.dropped_pkt.num = \ 2305 _tgtobj->tx_i.dropped.dma_error + \ 2306 _tgtobj->tx_i.dropped.ring_full + \ 2307 _tgtobj->tx_i.dropped.enqueue_fail + \ 2308 _tgtobj->tx_i.dropped.fail_per_pkt_vdev_id_check + \ 2309 _tgtobj->tx_i.dropped.desc_na.num + \ 2310 _tgtobj->tx_i.dropped.res_full + \ 2311 _tgtobj->tx_i.dropped.drop_ingress + \ 2312 _tgtobj->tx_i.dropped.headroom_insufficient + \ 2313 _tgtobj->tx_i.dropped.invalid_peer_id_in_exc_path + \ 2314 _tgtobj->tx_i.dropped.tx_mcast_drop + \ 2315 _tgtobj->tx_i.dropped.fw2wbm_tx_drop; \ 2316 _tgtobj->tx_i.dropped.dropped_pkt.bytes += \ 2317 _srcobj->tx_i.dropped.dropped_pkt.bytes; \ 2318 _tgtobj->tx_i.mesh.exception_fw += \ 2319 _srcobj->tx_i.mesh.exception_fw; \ 2320 _tgtobj->tx_i.mesh.completion_fw += \ 2321 _srcobj->tx_i.mesh.completion_fw; \ 2322 _tgtobj->tx_i.cce_classified += \ 2323 _srcobj->tx_i.cce_classified; \ 2324 _tgtobj->tx_i.cce_classified_raw += \ 2325 _srcobj->tx_i.cce_classified_raw; \ 2326 _tgtobj->tx_i.sniffer_rcvd.num += \ 2327 _srcobj->tx_i.sniffer_rcvd.num; \ 2328 _tgtobj->tx_i.sniffer_rcvd.bytes += \ 2329 _srcobj->tx_i.sniffer_rcvd.bytes; \ 2330 _tgtobj->rx_i.reo_rcvd_pkt.num += \ 2331 _srcobj->rx_i.reo_rcvd_pkt.num; \ 2332 _tgtobj->rx_i.reo_rcvd_pkt.bytes += \ 2333 _srcobj->rx_i.reo_rcvd_pkt.bytes; \ 2334 _tgtobj->rx_i.null_q_desc_pkt.num += \ 2335 _srcobj->rx_i.null_q_desc_pkt.num; \ 2336 _tgtobj->rx_i.null_q_desc_pkt.bytes += \ 2337 _srcobj->rx_i.null_q_desc_pkt.bytes; \ 2338 _tgtobj->rx_i.routed_eapol_pkt.num += \ 2339 _srcobj->rx_i.routed_eapol_pkt.num; \ 2340 _tgtobj->rx_i.routed_eapol_pkt.bytes += \ 2341 _srcobj->rx_i.routed_eapol_pkt.bytes; \ 2342 } while (0) 2343 /** 2344 * dp_peer_find_attach() - Allocates memory for peer objects 2345 * @soc: SoC handle 2346 * 2347 * Return: QDF_STATUS 2348 */ 2349 QDF_STATUS dp_peer_find_attach(struct dp_soc *soc); 2350 2351 /** 2352 * dp_peer_find_detach() - Frees memory for peer objects 2353 * @soc: SoC handle 2354 * 2355 * Return: none 2356 */ 2357 void dp_peer_find_detach(struct dp_soc *soc); 2358 2359 /** 2360 * dp_peer_find_hash_add() - add peer to peer_hash_table 2361 * @soc: soc handle 2362 * @peer: peer handle 2363 * 2364 * Return: none 2365 */ 2366 void dp_peer_find_hash_add(struct dp_soc *soc, struct dp_peer *peer); 2367 2368 /** 2369 * dp_peer_find_hash_remove() - remove peer from peer_hash_table 2370 * @soc: soc handle 2371 * @peer: peer handle 2372 * 2373 * Return: none 2374 */ 2375 void dp_peer_find_hash_remove(struct dp_soc *soc, struct dp_peer *peer); 2376 2377 /* unused?? */ 2378 void dp_peer_find_hash_erase(struct dp_soc *soc); 2379 2380 /** 2381 * dp_peer_vdev_list_add() - add peer into vdev's peer list 2382 * @soc: soc handle 2383 * @vdev: vdev handle 2384 * @peer: peer handle 2385 * 2386 * Return: none 2387 */ 2388 void dp_peer_vdev_list_add(struct dp_soc *soc, struct dp_vdev *vdev, 2389 struct dp_peer *peer); 2390 2391 /** 2392 * dp_peer_vdev_list_remove() - remove peer from vdev's peer list 2393 * @soc: SoC handle 2394 * @vdev: VDEV handle 2395 * @peer: peer handle 2396 * 2397 * Return: none 2398 */ 2399 void dp_peer_vdev_list_remove(struct dp_soc *soc, struct dp_vdev *vdev, 2400 struct dp_peer *peer); 2401 2402 /** 2403 * dp_peer_find_id_to_obj_add() - Add peer into peer_id table 2404 * @soc: SoC handle 2405 * @peer: peer handle 2406 * @peer_id: peer_id 2407 * 2408 * Return: None 2409 */ 2410 void dp_peer_find_id_to_obj_add(struct dp_soc *soc, 2411 struct dp_peer *peer, 2412 uint16_t peer_id); 2413 2414 /** 2415 * dp_txrx_peer_attach_add() - Attach txrx_peer and add it to peer_id table 2416 * @soc: SoC handle 2417 * @peer: peer handle 2418 * @txrx_peer: txrx peer handle 2419 * 2420 * Return: None 2421 */ 2422 void dp_txrx_peer_attach_add(struct dp_soc *soc, 2423 struct dp_peer *peer, 2424 struct dp_txrx_peer *txrx_peer); 2425 2426 /** 2427 * dp_peer_find_id_to_obj_remove() - remove peer from peer_id table 2428 * @soc: SoC handle 2429 * @peer_id: peer_id 2430 * 2431 * Return: None 2432 */ 2433 void dp_peer_find_id_to_obj_remove(struct dp_soc *soc, 2434 uint16_t peer_id); 2435 2436 /** 2437 * dp_vdev_unref_delete() - check and process vdev delete 2438 * @soc: DP specific soc pointer 2439 * @vdev: DP specific vdev pointer 2440 * @mod_id: module id 2441 * 2442 */ 2443 void dp_vdev_unref_delete(struct dp_soc *soc, struct dp_vdev *vdev, 2444 enum dp_mod_id mod_id); 2445 2446 /** 2447 * dp_peer_ppdu_delayed_ba_cleanup() - free ppdu allocated in peer 2448 * @peer: Datapath peer 2449 * 2450 * Return: void 2451 */ 2452 void dp_peer_ppdu_delayed_ba_cleanup(struct dp_peer *peer); 2453 2454 /** 2455 * dp_peer_rx_init() - Initialize receive TID state 2456 * @pdev: Datapath pdev 2457 * @peer: Datapath peer 2458 * 2459 */ 2460 void dp_peer_rx_init(struct dp_pdev *pdev, struct dp_peer *peer); 2461 2462 /** 2463 * dp_peer_cleanup() - Cleanup peer information 2464 * @vdev: Datapath vdev 2465 * @peer: Datapath peer 2466 * 2467 */ 2468 void dp_peer_cleanup(struct dp_vdev *vdev, struct dp_peer *peer); 2469 2470 #ifdef DP_PEER_EXTENDED_API 2471 /** 2472 * dp_register_peer() - Register peer into physical device 2473 * @soc_hdl: data path soc handle 2474 * @pdev_id: device instance id 2475 * @sta_desc: peer description 2476 * 2477 * Register peer into physical device 2478 * 2479 * Return: QDF_STATUS_SUCCESS registration success 2480 * QDF_STATUS_E_FAULT peer not found 2481 */ 2482 QDF_STATUS dp_register_peer(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, 2483 struct ol_txrx_desc_type *sta_desc); 2484 2485 /** 2486 * dp_clear_peer() - remove peer from physical device 2487 * @soc_hdl: data path soc handle 2488 * @pdev_id: device instance id 2489 * @peer_addr: peer mac address 2490 * 2491 * remove peer from physical device 2492 * 2493 * Return: QDF_STATUS_SUCCESS registration success 2494 * QDF_STATUS_E_FAULT peer not found 2495 */ 2496 QDF_STATUS dp_clear_peer(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, 2497 struct qdf_mac_addr peer_addr); 2498 2499 /** 2500 * dp_find_peer_exist_on_vdev - find if peer exists on the given vdev 2501 * @soc_hdl: datapath soc handle 2502 * @vdev_id: vdev instance id 2503 * @peer_addr: peer mac address 2504 * 2505 * Return: true or false 2506 */ 2507 bool dp_find_peer_exist_on_vdev(struct cdp_soc_t *soc_hdl, uint8_t vdev_id, 2508 uint8_t *peer_addr); 2509 2510 /** 2511 * dp_find_peer_exist_on_other_vdev - find if peer exists 2512 * on other than the given vdev 2513 * @soc_hdl: datapath soc handle 2514 * @vdev_id: vdev instance id 2515 * @peer_addr: peer mac address 2516 * @max_bssid: max number of bssids 2517 * 2518 * Return: true or false 2519 */ 2520 bool dp_find_peer_exist_on_other_vdev(struct cdp_soc_t *soc_hdl, 2521 uint8_t vdev_id, uint8_t *peer_addr, 2522 uint16_t max_bssid); 2523 2524 /** 2525 * dp_peer_state_update() - update peer local state 2526 * @soc: datapath soc handle 2527 * @peer_mac: peer mac address 2528 * @state: new peer local state 2529 * 2530 * update peer local state 2531 * 2532 * Return: QDF_STATUS_SUCCESS registration success 2533 */ 2534 QDF_STATUS dp_peer_state_update(struct cdp_soc_t *soc, uint8_t *peer_mac, 2535 enum ol_txrx_peer_state state); 2536 2537 /** 2538 * dp_get_vdevid() - Get virtual interface id which peer registered 2539 * @soc_hdl: datapath soc handle 2540 * @peer_mac: peer mac address 2541 * @vdev_id: virtual interface id which peer registered 2542 * 2543 * Get virtual interface id which peer registered 2544 * 2545 * Return: QDF_STATUS_SUCCESS registration success 2546 */ 2547 QDF_STATUS dp_get_vdevid(struct cdp_soc_t *soc_hdl, uint8_t *peer_mac, 2548 uint8_t *vdev_id); 2549 2550 struct cdp_vdev *dp_get_vdev_by_peer_addr(struct cdp_pdev *pdev_handle, 2551 struct qdf_mac_addr peer_addr); 2552 2553 /** 2554 * dp_get_vdev_for_peer() - Get virtual interface instance which peer belongs 2555 * @peer: peer instance 2556 * 2557 * Get virtual interface instance which peer belongs 2558 * 2559 * Return: virtual interface instance pointer 2560 * NULL in case cannot find 2561 */ 2562 struct cdp_vdev *dp_get_vdev_for_peer(void *peer); 2563 2564 /** 2565 * dp_peer_get_peer_mac_addr() - Get peer mac address 2566 * @peer: peer instance 2567 * 2568 * Get peer mac address 2569 * 2570 * Return: peer mac address pointer 2571 * NULL in case cannot find 2572 */ 2573 uint8_t *dp_peer_get_peer_mac_addr(void *peer); 2574 2575 /** 2576 * dp_get_peer_state() - Get local peer state 2577 * @soc: datapath soc handle 2578 * @vdev_id: vdev id 2579 * @peer_mac: peer mac addr 2580 * 2581 * Get local peer state 2582 * 2583 * Return: peer status 2584 */ 2585 int dp_get_peer_state(struct cdp_soc_t *soc, uint8_t vdev_id, 2586 uint8_t *peer_mac); 2587 2588 /** 2589 * dp_local_peer_id_pool_init() - local peer id pool alloc for physical device 2590 * @pdev: data path device instance 2591 * 2592 * local peer id pool alloc for physical device 2593 * 2594 * Return: none 2595 */ 2596 void dp_local_peer_id_pool_init(struct dp_pdev *pdev); 2597 2598 /** 2599 * dp_local_peer_id_alloc() - allocate local peer id 2600 * @pdev: data path device instance 2601 * @peer: new peer instance 2602 * 2603 * allocate local peer id 2604 * 2605 * Return: none 2606 */ 2607 void dp_local_peer_id_alloc(struct dp_pdev *pdev, struct dp_peer *peer); 2608 2609 /** 2610 * dp_local_peer_id_free() - remove local peer id 2611 * @pdev: data path device instance 2612 * @peer: peer instance should be removed 2613 * 2614 * remove local peer id 2615 * 2616 * Return: none 2617 */ 2618 void dp_local_peer_id_free(struct dp_pdev *pdev, struct dp_peer *peer); 2619 2620 /** 2621 * dp_set_peer_as_tdls_peer() - set tdls peer flag to peer 2622 * @soc_hdl: datapath soc handle 2623 * @vdev_id: vdev_id 2624 * @peer_mac: peer mac addr 2625 * @val: tdls peer flag 2626 * 2627 * Return: none 2628 */ 2629 void dp_set_peer_as_tdls_peer(struct cdp_soc_t *soc_hdl, uint8_t vdev_id, 2630 uint8_t *peer_mac, bool val); 2631 #else 2632 static inline 2633 QDF_STATUS dp_get_vdevid(struct cdp_soc_t *soc_hdl, uint8_t *peer_mac, 2634 uint8_t *vdev_id) 2635 { 2636 return QDF_STATUS_E_NOSUPPORT; 2637 } 2638 2639 static inline void dp_local_peer_id_pool_init(struct dp_pdev *pdev) 2640 { 2641 } 2642 2643 static inline 2644 void dp_local_peer_id_alloc(struct dp_pdev *pdev, struct dp_peer *peer) 2645 { 2646 } 2647 2648 static inline 2649 void dp_local_peer_id_free(struct dp_pdev *pdev, struct dp_peer *peer) 2650 { 2651 } 2652 2653 static inline 2654 void dp_set_peer_as_tdls_peer(struct cdp_soc_t *soc_hdl, uint8_t vdev_id, 2655 uint8_t *peer_mac, bool val) 2656 { 2657 } 2658 #endif 2659 2660 /** 2661 * dp_find_peer_exist - find peer if already exists 2662 * @soc_hdl: datapath soc handle 2663 * @pdev_id: physical device instance id 2664 * @peer_addr: peer mac address 2665 * 2666 * Return: true or false 2667 */ 2668 bool dp_find_peer_exist(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, 2669 uint8_t *peer_addr); 2670 2671 #ifdef DP_UMAC_HW_RESET_SUPPORT 2672 /** 2673 * dp_pause_reo_send_cmd() - Pause Reo send commands. 2674 * @soc: dp soc 2675 * 2676 * Return: none 2677 */ 2678 void dp_pause_reo_send_cmd(struct dp_soc *soc); 2679 2680 /** 2681 * dp_resume_reo_send_cmd() - Resume Reo send commands. 2682 * @soc: dp soc 2683 * 2684 * Return: none 2685 */ 2686 void dp_resume_reo_send_cmd(struct dp_soc *soc); 2687 2688 /** 2689 * dp_cleanup_reo_cmd_module - Clean up the reo cmd module 2690 * @soc: DP SoC handle 2691 * 2692 * Return: none 2693 */ 2694 void dp_cleanup_reo_cmd_module(struct dp_soc *soc); 2695 2696 /** 2697 * dp_reo_desc_freelist_destroy() - Flush REO descriptors from deferred freelist 2698 * @soc: DP SOC handle 2699 * 2700 * Return: none 2701 */ 2702 void dp_reo_desc_freelist_destroy(struct dp_soc *soc); 2703 2704 /** 2705 * dp_reset_rx_reo_tid_queue() - Reset the reo tid queues 2706 * @soc: dp soc 2707 * @hw_qdesc_vaddr: starting address of the tid queues 2708 * @size: size of the memory pointed to by hw_qdesc_vaddr 2709 * 2710 * Return: none 2711 */ 2712 void dp_reset_rx_reo_tid_queue(struct dp_soc *soc, void *hw_qdesc_vaddr, 2713 uint32_t size); 2714 2715 #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP) 2716 /** 2717 * dp_umac_reset_complete_umac_recovery() - Complete Umac reset session 2718 * @soc: dp soc handle 2719 * 2720 * Return: void 2721 */ 2722 void dp_umac_reset_complete_umac_recovery(struct dp_soc *soc); 2723 2724 /** 2725 * dp_umac_reset_initiate_umac_recovery() - Initiate Umac reset session 2726 * @soc: dp soc handle 2727 * @umac_reset_ctx: Umac reset context 2728 * @rx_event: Rx event received 2729 * @is_target_recovery: Flag to indicate if it is triggered for target recovery 2730 * 2731 * Return: status 2732 */ 2733 QDF_STATUS dp_umac_reset_initiate_umac_recovery(struct dp_soc *soc, 2734 struct dp_soc_umac_reset_ctx *umac_reset_ctx, 2735 enum umac_reset_rx_event rx_event, 2736 bool is_target_recovery); 2737 2738 /** 2739 * dp_umac_reset_handle_action_cb() - Function to call action callback 2740 * @soc: dp soc handle 2741 * @umac_reset_ctx: Umac reset context 2742 * @action: Action to call the callback for 2743 * 2744 * Return: QDF_STATUS status 2745 */ 2746 QDF_STATUS dp_umac_reset_handle_action_cb(struct dp_soc *soc, 2747 struct dp_soc_umac_reset_ctx *umac_reset_ctx, 2748 enum umac_reset_action action); 2749 2750 /** 2751 * dp_umac_reset_post_tx_cmd() - Iterate partner socs and post Tx command 2752 * @umac_reset_ctx: UMAC reset context 2753 * @tx_cmd: Tx command to be posted 2754 * 2755 * Return: QDF status of operation 2756 */ 2757 QDF_STATUS 2758 dp_umac_reset_post_tx_cmd(struct dp_soc_umac_reset_ctx *umac_reset_ctx, 2759 enum umac_reset_tx_cmd tx_cmd); 2760 2761 /** 2762 * dp_umac_reset_initiator_check() - Check if soc is the Umac reset initiator 2763 * @soc: dp soc handle 2764 * 2765 * Return: true if the soc is initiator or false otherwise 2766 */ 2767 bool dp_umac_reset_initiator_check(struct dp_soc *soc); 2768 2769 /** 2770 * dp_umac_reset_target_recovery_check() - Check if this is for target recovery 2771 * @soc: dp soc handle 2772 * 2773 * Return: true if the session is for target recovery or false otherwise 2774 */ 2775 bool dp_umac_reset_target_recovery_check(struct dp_soc *soc); 2776 2777 /** 2778 * dp_umac_reset_is_soc_ignored() - Check if this soc is to be ignored 2779 * @soc: dp soc handle 2780 * 2781 * Return: true if the soc is ignored or false otherwise 2782 */ 2783 bool dp_umac_reset_is_soc_ignored(struct dp_soc *soc); 2784 2785 /** 2786 * dp_mlo_umac_reset_stats_print() - API to print MLO umac reset stats 2787 * @soc: dp soc handle 2788 * 2789 * Return: QDF_STATUS 2790 */ 2791 QDF_STATUS dp_mlo_umac_reset_stats_print(struct dp_soc *soc); 2792 #else 2793 static inline 2794 QDF_STATUS dp_mlo_umac_reset_stats_print(struct dp_soc *soc) 2795 { 2796 return QDF_STATUS_SUCCESS; 2797 } 2798 #endif 2799 2800 #endif 2801 2802 #if defined(DP_UMAC_HW_RESET_SUPPORT) && defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP) 2803 /** 2804 * dp_umac_reset_notify_asserted_soc() - API to notify the asserted SOC 2805 * @soc: dp soc 2806 * 2807 * Return: QDF_STATUS 2808 */ 2809 QDF_STATUS dp_umac_reset_notify_asserted_soc(struct dp_soc *soc); 2810 2811 /** 2812 * dp_umac_reset_is_inprogress() - Check if umac reset is in progress 2813 * @psoc: dp soc handle 2814 * 2815 * Return: true if umac reset is in progress, else false. 2816 */ 2817 bool dp_umac_reset_is_inprogress(struct cdp_soc_t *psoc); 2818 #else 2819 static inline 2820 QDF_STATUS dp_umac_reset_notify_asserted_soc(struct dp_soc *soc) 2821 { 2822 return QDF_STATUS_SUCCESS; 2823 } 2824 2825 static inline 2826 bool dp_umac_reset_is_inprogress(struct cdp_soc_t *psoc) 2827 { 2828 return false; 2829 } 2830 #endif 2831 2832 #ifndef WLAN_SOFTUMAC_SUPPORT 2833 QDF_STATUS dp_reo_send_cmd(struct dp_soc *soc, enum hal_reo_cmd_type type, 2834 struct hal_reo_cmd_params *params, 2835 void (*callback_fn), void *data); 2836 2837 /** 2838 * dp_reo_cmdlist_destroy() - Free REO commands in the queue 2839 * @soc: DP SoC handle 2840 * 2841 * Return: none 2842 */ 2843 void dp_reo_cmdlist_destroy(struct dp_soc *soc); 2844 2845 /** 2846 * dp_reo_status_ring_handler() - Handler for REO Status ring 2847 * @int_ctx: pointer to DP interrupt context 2848 * @soc: DP Soc handle 2849 * 2850 * Return: Number of descriptors reaped 2851 */ 2852 uint32_t dp_reo_status_ring_handler(struct dp_intr *int_ctx, 2853 struct dp_soc *soc); 2854 #endif 2855 2856 /** 2857 * dp_aggregate_vdev_stats() - Consolidate stats at VDEV level 2858 * @vdev: DP VDEV handle 2859 * @vdev_stats: aggregate statistics 2860 * 2861 * return: void 2862 */ 2863 void dp_aggregate_vdev_stats(struct dp_vdev *vdev, 2864 struct cdp_vdev_stats *vdev_stats); 2865 2866 /** 2867 * dp_txrx_get_vdev_stats() - Update buffer with cdp_vdev_stats 2868 * @soc_hdl: CDP SoC handle 2869 * @vdev_id: vdev Id 2870 * @buf: buffer for vdev stats 2871 * @is_aggregate: are aggregate stats being collected 2872 * 2873 * Return: QDF_STATUS 2874 */ 2875 QDF_STATUS 2876 dp_txrx_get_vdev_stats(struct cdp_soc_t *soc_hdl, uint8_t vdev_id, 2877 void *buf, bool is_aggregate); 2878 2879 /** 2880 * dp_rx_bar_stats_cb() - BAR received stats callback 2881 * @soc: SOC handle 2882 * @cb_ctxt: Call back context 2883 * @reo_status: Reo status 2884 * 2885 * Return: void 2886 */ 2887 void dp_rx_bar_stats_cb(struct dp_soc *soc, void *cb_ctxt, 2888 union hal_reo_status *reo_status); 2889 2890 uint16_t dp_tx_me_send_convert_ucast(struct cdp_soc_t *soc, uint8_t vdev_id, 2891 qdf_nbuf_t nbuf, 2892 uint8_t newmac[][QDF_MAC_ADDR_SIZE], 2893 uint8_t new_mac_cnt, uint8_t tid, 2894 bool is_igmp, bool is_dms_pkt); 2895 void dp_tx_me_alloc_descriptor(struct cdp_soc_t *soc, uint8_t pdev_id); 2896 2897 void dp_tx_me_free_descriptor(struct cdp_soc_t *soc, uint8_t pdev_id); 2898 2899 /** 2900 * dp_h2t_ext_stats_msg_send(): function to construct HTT message to pass to FW 2901 * @pdev: DP PDEV handle 2902 * @stats_type_upload_mask: stats type requested by user 2903 * @config_param_0: extra configuration parameters 2904 * @config_param_1: extra configuration parameters 2905 * @config_param_2: extra configuration parameters 2906 * @config_param_3: extra configuration parameters 2907 * @cookie: 2908 * @cookie_msb: 2909 * @mac_id: mac number 2910 * 2911 * Return: QDF STATUS 2912 */ 2913 QDF_STATUS dp_h2t_ext_stats_msg_send(struct dp_pdev *pdev, 2914 uint32_t stats_type_upload_mask, uint32_t config_param_0, 2915 uint32_t config_param_1, uint32_t config_param_2, 2916 uint32_t config_param_3, int cookie, int cookie_msb, 2917 uint8_t mac_id); 2918 2919 /** 2920 * dp_htt_stats_print_tag() - function to select the tag type and 2921 * print the corresponding tag structure 2922 * @pdev: pdev pointer 2923 * @tag_type: tag type that is to be printed 2924 * @tag_buf: pointer to the tag structure 2925 * 2926 * Return: void 2927 */ 2928 void dp_htt_stats_print_tag(struct dp_pdev *pdev, 2929 uint8_t tag_type, uint32_t *tag_buf); 2930 2931 /** 2932 * dp_htt_stats_copy_tag() - function to select the tag type and 2933 * copy the corresponding tag structure 2934 * @pdev: DP_PDEV handle 2935 * @tag_type: tag type that is to be printed 2936 * @tag_buf: pointer to the tag structure 2937 * 2938 * Return: void 2939 */ 2940 void dp_htt_stats_copy_tag(struct dp_pdev *pdev, uint8_t tag_type, uint32_t *tag_buf); 2941 2942 /** 2943 * dp_h2t_3tuple_config_send(): function to construct 3 tuple configuration 2944 * HTT message to pass to FW 2945 * @pdev: DP PDEV handle 2946 * @tuple_mask: tuple configuration to report 3 tuple hash value in either 2947 * toeplitz_2_or_4 or flow_id_toeplitz in MSDU START TLV. 2948 * 2949 * tuple_mask[1:0]: 2950 * 00 - Do not report 3 tuple hash value 2951 * 10 - Report 3 tuple hash value in toeplitz_2_or_4 2952 * 01 - Report 3 tuple hash value in flow_id_toeplitz 2953 * 11 - Report 3 tuple hash value in both toeplitz_2_or_4 & flow_id_toeplitz 2954 * @mac_id: MAC ID 2955 * 2956 * Return: QDF STATUS 2957 */ 2958 QDF_STATUS dp_h2t_3tuple_config_send(struct dp_pdev *pdev, uint32_t tuple_mask, 2959 uint8_t mac_id); 2960 2961 #ifdef IPA_OFFLOAD 2962 /** 2963 * dp_peer_update_tid_stats_from_reo() - update rx pkt and byte count from reo 2964 * @soc: soc handle 2965 * @cb_ctxt: combination of peer_id and tid 2966 * @reo_status: reo status 2967 * 2968 * Return: void 2969 */ 2970 void dp_peer_update_tid_stats_from_reo(struct dp_soc *soc, void *cb_ctxt, 2971 union hal_reo_status *reo_status); 2972 2973 int dp_peer_get_rxtid_stats_ipa(struct dp_peer *peer, 2974 dp_rxtid_stats_cmd_cb dp_stats_cmd_cb); 2975 #ifdef IPA_OPT_WIFI_DP 2976 void dp_ipa_wdi_opt_dpath_notify_flt_rlsd(int flt0_rslt, 2977 int flt1_rslt); 2978 void dp_ipa_wdi_opt_dpath_notify_flt_add_rem_cb(int flt0_rslt, int flt1_rslt); 2979 void dp_ipa_wdi_opt_dpath_notify_flt_rsvd(bool is_success); 2980 #endif 2981 #ifdef QCA_ENHANCED_STATS_SUPPORT 2982 /** 2983 * dp_peer_aggregate_tid_stats - aggregate rx tid stats 2984 * @peer: Data Path peer 2985 * 2986 * Return: void 2987 */ 2988 void dp_peer_aggregate_tid_stats(struct dp_peer *peer); 2989 #endif 2990 #else 2991 static inline void dp_peer_aggregate_tid_stats(struct dp_peer *peer) 2992 { 2993 } 2994 #endif 2995 2996 /** 2997 * dp_set_key_sec_type_wifi3() - set security mode of key 2998 * @soc: Datapath soc handle 2999 * @vdev_id: id of atapath vdev 3000 * @peer_mac: Datapath peer mac address 3001 * @sec_type: security type 3002 * @is_unicast: key type 3003 * 3004 */ 3005 QDF_STATUS 3006 dp_set_key_sec_type_wifi3(struct cdp_soc_t *soc, uint8_t vdev_id, 3007 uint8_t *peer_mac, enum cdp_sec_type sec_type, 3008 bool is_unicast); 3009 3010 /** 3011 * dp_get_pdev_for_mac_id() - Return pdev for mac_id 3012 * @soc: handle to DP soc 3013 * @mac_id: MAC id 3014 * 3015 * Return: Return pdev corresponding to MAC 3016 */ 3017 void *dp_get_pdev_for_mac_id(struct dp_soc *soc, uint32_t mac_id); 3018 3019 QDF_STATUS 3020 dp_set_michael_key(struct cdp_soc_t *soc, uint8_t vdev_id, 3021 uint8_t *peer_mac, 3022 bool is_unicast, uint32_t *key); 3023 3024 /** 3025 * dp_check_pdev_exists() - Validate pdev before use 3026 * @soc: dp soc handle 3027 * @data: pdev handle 3028 * 3029 * Return: 0 - success/invalid - failure 3030 */ 3031 bool dp_check_pdev_exists(struct dp_soc *soc, struct dp_pdev *data); 3032 3033 /** 3034 * dp_update_delay_stats() - Update delay statistics in structure 3035 * and fill min, max and avg delay 3036 * @tstats: tid tx stats 3037 * @rstats: tid rx stats 3038 * @delay: delay in ms 3039 * @tid: tid value 3040 * @mode: type of tx delay mode 3041 * @ring_id: ring number 3042 * @delay_in_us: flag to indicate whether the delay is in ms or us 3043 * 3044 * Return: none 3045 */ 3046 void dp_update_delay_stats(struct cdp_tid_tx_stats *tstats, 3047 struct cdp_tid_rx_stats *rstats, uint32_t delay, 3048 uint8_t tid, uint8_t mode, uint8_t ring_id, 3049 bool delay_in_us); 3050 3051 /** 3052 * dp_print_ring_stats(): Print tail and head pointer 3053 * @pdev: DP_PDEV handle 3054 * 3055 * Return: void 3056 */ 3057 void dp_print_ring_stats(struct dp_pdev *pdev); 3058 3059 /** 3060 * dp_print_ring_stat_from_hal(): Print tail and head pointer through hal 3061 * @soc: soc handle 3062 * @srng: srng handle 3063 * @ring_type: ring type 3064 * 3065 * Return: void 3066 */ 3067 void 3068 dp_print_ring_stat_from_hal(struct dp_soc *soc, struct dp_srng *srng, 3069 enum hal_ring_type ring_type); 3070 3071 /** 3072 * dp_print_pdev_cfg_params() - Print the pdev cfg parameters 3073 * @pdev: DP pdev handle 3074 * 3075 * Return: void 3076 */ 3077 void dp_print_pdev_cfg_params(struct dp_pdev *pdev); 3078 3079 /** 3080 * dp_print_soc_cfg_params()- Dump soc wlan config parameters 3081 * @soc: Soc handle 3082 * 3083 * Return: void 3084 */ 3085 void dp_print_soc_cfg_params(struct dp_soc *soc); 3086 3087 /** 3088 * dp_srng_get_str_from_hal_ring_type() - Return string name for a ring 3089 * @ring_type: Ring 3090 * 3091 * Return: char const pointer 3092 */ 3093 const 3094 char *dp_srng_get_str_from_hal_ring_type(enum hal_ring_type ring_type); 3095 3096 /** 3097 * dp_txrx_path_stats() - Function to display dump stats 3098 * @soc: soc handle 3099 * 3100 * Return: none 3101 */ 3102 void dp_txrx_path_stats(struct dp_soc *soc); 3103 3104 /** 3105 * dp_print_per_ring_stats(): Packet count per ring 3106 * @soc: soc handle 3107 * 3108 * Return: None 3109 */ 3110 void dp_print_per_ring_stats(struct dp_soc *soc); 3111 3112 /** 3113 * dp_aggregate_pdev_stats(): Consolidate stats at PDEV level 3114 * @pdev: DP PDEV handle 3115 * 3116 * Return: void 3117 */ 3118 void dp_aggregate_pdev_stats(struct dp_pdev *pdev); 3119 3120 /** 3121 * dp_print_rx_rates(): Print Rx rate stats 3122 * @vdev: DP_VDEV handle 3123 * 3124 * Return:void 3125 */ 3126 void dp_print_rx_rates(struct dp_vdev *vdev); 3127 3128 /** 3129 * dp_print_tx_rates(): Print tx rates 3130 * @vdev: DP_VDEV handle 3131 * 3132 * Return:void 3133 */ 3134 void dp_print_tx_rates(struct dp_vdev *vdev); 3135 3136 /** 3137 * dp_print_peer_stats():print peer stats 3138 * @peer: DP_PEER handle 3139 * @peer_stats: buffer holding peer stats 3140 * 3141 * return void 3142 */ 3143 void dp_print_peer_stats(struct dp_peer *peer, 3144 struct cdp_peer_stats *peer_stats); 3145 3146 /** 3147 * dp_print_pdev_tx_stats(): Print Pdev level TX stats 3148 * @pdev: DP_PDEV Handle 3149 * 3150 * Return:void 3151 */ 3152 void 3153 dp_print_pdev_tx_stats(struct dp_pdev *pdev); 3154 3155 /** 3156 * dp_print_pdev_rx_stats(): Print Pdev level RX stats 3157 * @pdev: DP_PDEV Handle 3158 * 3159 * Return: void 3160 */ 3161 void 3162 dp_print_pdev_rx_stats(struct dp_pdev *pdev); 3163 3164 /** 3165 * dp_print_soc_tx_stats(): Print SOC level stats 3166 * @soc: DP_SOC Handle 3167 * 3168 * Return: void 3169 */ 3170 void dp_print_soc_tx_stats(struct dp_soc *soc); 3171 3172 #ifdef QCA_SUPPORT_DP_GLOBAL_CTX 3173 /** 3174 * dp_print_global_desc_count(): Print global desc in use 3175 * 3176 * Return: void 3177 */ 3178 void dp_print_global_desc_count(void); 3179 #else 3180 /** 3181 * dp_print_global_desc_count(): Print global desc in use 3182 * 3183 * Return: void 3184 */ 3185 static inline 3186 void dp_print_global_desc_count(void) 3187 { 3188 } 3189 #endif 3190 3191 /** 3192 * dp_print_soc_interrupt_stats() - Print interrupt stats for the soc 3193 * @soc: dp_soc handle 3194 * 3195 * Return: None 3196 */ 3197 void dp_print_soc_interrupt_stats(struct dp_soc *soc); 3198 3199 /** 3200 * dp_print_tx_ppeds_stats() - Print Tx in use stats for the soc in DS 3201 * @soc: dp_soc handle 3202 * 3203 * Return: None 3204 */ 3205 3206 void dp_print_tx_ppeds_stats(struct dp_soc *soc); 3207 #ifdef WLAN_DP_SRNG_USAGE_WM_TRACKING 3208 /** 3209 * dp_dump_srng_high_wm_stats() - Print the ring usage high watermark stats 3210 * for all SRNGs 3211 * @soc: DP soc handle 3212 * @srng_mask: SRNGs mask for dumping usage watermark stats 3213 * 3214 * Return: None 3215 */ 3216 void dp_dump_srng_high_wm_stats(struct dp_soc *soc, uint64_t srng_mask); 3217 #else 3218 static inline 3219 void dp_dump_srng_high_wm_stats(struct dp_soc *soc, uint64_t srng_mask) 3220 { 3221 } 3222 #endif 3223 3224 /** 3225 * dp_print_soc_rx_stats() - Print SOC level Rx stats 3226 * @soc: DP_SOC Handle 3227 * 3228 * Return: void 3229 */ 3230 void dp_print_soc_rx_stats(struct dp_soc *soc); 3231 3232 /** 3233 * dp_get_mac_id_for_pdev() - Return mac corresponding to pdev for mac 3234 * 3235 * @mac_id: MAC id 3236 * @pdev_id: pdev_id corresponding to pdev, 0 for MCL 3237 * 3238 * Single pdev using both MACs will operate on both MAC rings, 3239 * which is the case for MCL. 3240 * For WIN each PDEV will operate one ring, so index is zero. 3241 * 3242 */ 3243 static inline int dp_get_mac_id_for_pdev(uint32_t mac_id, uint32_t pdev_id) 3244 { 3245 if (mac_id && pdev_id) { 3246 qdf_print("Both mac_id and pdev_id cannot be non zero"); 3247 QDF_BUG(0); 3248 return 0; 3249 } 3250 return (mac_id + pdev_id); 3251 } 3252 3253 /** 3254 * dp_get_lmac_id_for_pdev_id() - Return lmac id corresponding to host pdev id 3255 * @soc: soc pointer 3256 * @mac_id: MAC id 3257 * @pdev_id: pdev_id corresponding to pdev, 0 for MCL 3258 * 3259 * For MCL, Single pdev using both MACs will operate on both MAC rings. 3260 * 3261 * For WIN, each PDEV will operate one ring. 3262 * 3263 */ 3264 static inline int 3265 dp_get_lmac_id_for_pdev_id 3266 (struct dp_soc *soc, uint32_t mac_id, uint32_t pdev_id) 3267 { 3268 if (!wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx)) { 3269 if (mac_id && pdev_id) { 3270 qdf_print("Both mac_id and pdev_id cannot be non zero"); 3271 QDF_BUG(0); 3272 return 0; 3273 } 3274 return (mac_id + pdev_id); 3275 } 3276 3277 return soc->pdev_list[pdev_id]->lmac_id; 3278 } 3279 3280 /** 3281 * dp_get_pdev_for_lmac_id() - Return pdev pointer corresponding to lmac id 3282 * @soc: soc pointer 3283 * @lmac_id: LMAC id 3284 * 3285 * For MCL, Single pdev exists 3286 * 3287 * For WIN, each PDEV will operate one ring. 3288 * 3289 */ 3290 static inline struct dp_pdev * 3291 dp_get_pdev_for_lmac_id(struct dp_soc *soc, uint32_t lmac_id) 3292 { 3293 uint8_t i = 0; 3294 3295 if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx)) { 3296 i = wlan_cfg_get_pdev_idx(soc->wlan_cfg_ctx, lmac_id); 3297 return ((i < MAX_PDEV_CNT) ? soc->pdev_list[i] : NULL); 3298 } 3299 3300 /* Typically for MCL as there only 1 PDEV*/ 3301 return soc->pdev_list[0]; 3302 } 3303 3304 /** 3305 * dp_calculate_target_pdev_id_from_host_pdev_id() - Return target pdev 3306 * corresponding to host pdev id 3307 * @soc: soc pointer 3308 * @mac_for_pdev: pdev_id corresponding to host pdev for WIN, mac id for MCL 3309 * 3310 * Return: target pdev_id for host pdev id. For WIN, this is derived through 3311 * a two step process: 3312 * 1. Get lmac_id corresponding to host pdev_id (lmac_id can change 3313 * during mode switch) 3314 * 2. Get target pdev_id (set up during WMI ready) from lmac_id 3315 * 3316 * For MCL, return the offset-1 translated mac_id 3317 */ 3318 static inline int 3319 dp_calculate_target_pdev_id_from_host_pdev_id 3320 (struct dp_soc *soc, uint32_t mac_for_pdev) 3321 { 3322 struct dp_pdev *pdev; 3323 3324 if (!wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx)) 3325 return DP_SW2HW_MACID(mac_for_pdev); 3326 3327 pdev = soc->pdev_list[mac_for_pdev]; 3328 3329 /*non-MCL case, get original target_pdev mapping*/ 3330 return wlan_cfg_get_target_pdev_id(soc->wlan_cfg_ctx, pdev->lmac_id); 3331 } 3332 3333 /** 3334 * dp_get_target_pdev_id_for_host_pdev_id() - Return target pdev corresponding 3335 * to host pdev id 3336 * @soc: soc pointer 3337 * @mac_for_pdev: pdev_id corresponding to host pdev for WIN, mac id for MCL 3338 * 3339 * Return: target pdev_id for host pdev id. 3340 * For WIN, return the value stored in pdev object. 3341 * For MCL, return the offset-1 translated mac_id. 3342 */ 3343 static inline int 3344 dp_get_target_pdev_id_for_host_pdev_id 3345 (struct dp_soc *soc, uint32_t mac_for_pdev) 3346 { 3347 struct dp_pdev *pdev; 3348 3349 if (!wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx)) 3350 return DP_SW2HW_MACID(mac_for_pdev); 3351 3352 pdev = soc->pdev_list[mac_for_pdev]; 3353 3354 return pdev->target_pdev_id; 3355 } 3356 3357 /** 3358 * dp_get_host_pdev_id_for_target_pdev_id() - Return host pdev corresponding 3359 * to target pdev id 3360 * @soc: soc pointer 3361 * @pdev_id: pdev_id corresponding to target pdev 3362 * 3363 * Return: host pdev_id for target pdev id. For WIN, this is derived through 3364 * a two step process: 3365 * 1. Get lmac_id corresponding to target pdev_id 3366 * 2. Get host pdev_id (set up during WMI ready) from lmac_id 3367 * 3368 * For MCL, return the 0-offset pdev_id 3369 */ 3370 static inline int 3371 dp_get_host_pdev_id_for_target_pdev_id 3372 (struct dp_soc *soc, uint32_t pdev_id) 3373 { 3374 struct dp_pdev *pdev; 3375 int lmac_id; 3376 3377 if (!wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx)) 3378 return DP_HW2SW_MACID(pdev_id); 3379 3380 /*non-MCL case, get original target_lmac mapping from target pdev*/ 3381 lmac_id = wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, 3382 DP_HW2SW_MACID(pdev_id)); 3383 3384 /*Get host pdev from lmac*/ 3385 pdev = dp_get_pdev_for_lmac_id(soc, lmac_id); 3386 3387 return pdev ? pdev->pdev_id : INVALID_PDEV_ID; 3388 } 3389 3390 /** 3391 * dp_get_mac_id_for_mac() - Return mac corresponding WIN and MCL mac_ids 3392 * 3393 * @soc: handle to DP soc 3394 * @mac_id: MAC id 3395 * 3396 * Single pdev using both MACs will operate on both MAC rings, 3397 * which is the case for MCL. 3398 * For WIN each PDEV will operate one ring, so index is zero. 3399 * 3400 */ 3401 static inline int dp_get_mac_id_for_mac(struct dp_soc *soc, uint32_t mac_id) 3402 { 3403 /* 3404 * Single pdev using both MACs will operate on both MAC rings, 3405 * which is the case for MCL. 3406 */ 3407 if (!wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx)) 3408 return mac_id; 3409 3410 /* For WIN each PDEV will operate one ring, so index is zero. */ 3411 return 0; 3412 } 3413 3414 /** 3415 * dp_is_subtype_data() - check if the frame subtype is data 3416 * 3417 * @frame_ctrl: Frame control field 3418 * 3419 * check the frame control field and verify if the packet 3420 * is a data packet. 3421 * 3422 * Return: true or false 3423 */ 3424 static inline bool dp_is_subtype_data(uint16_t frame_ctrl) 3425 { 3426 if (((qdf_cpu_to_le16(frame_ctrl) & QDF_IEEE80211_FC0_TYPE_MASK) == 3427 QDF_IEEE80211_FC0_TYPE_DATA) && 3428 (((qdf_cpu_to_le16(frame_ctrl) & QDF_IEEE80211_FC0_SUBTYPE_MASK) == 3429 QDF_IEEE80211_FC0_SUBTYPE_DATA) || 3430 ((qdf_cpu_to_le16(frame_ctrl) & QDF_IEEE80211_FC0_SUBTYPE_MASK) == 3431 QDF_IEEE80211_FC0_SUBTYPE_QOS))) { 3432 return true; 3433 } 3434 3435 return false; 3436 } 3437 3438 #ifdef WDI_EVENT_ENABLE 3439 /** 3440 * dp_h2t_cfg_stats_msg_send(): function to construct HTT message to pass to FW 3441 * @pdev: DP PDEV handle 3442 * @stats_type_upload_mask: stats type requested by user 3443 * @mac_id: Mac id number 3444 * 3445 * return: QDF STATUS 3446 */ 3447 QDF_STATUS dp_h2t_cfg_stats_msg_send(struct dp_pdev *pdev, 3448 uint32_t stats_type_upload_mask, 3449 uint8_t mac_id); 3450 3451 /** 3452 * dp_wdi_event_unsub() - WDI event unsubscribe 3453 * @soc: soc handle 3454 * @pdev_id: id of pdev 3455 * @event_cb_sub_handle: subscribed event handle 3456 * @event: Event to be unsubscribe 3457 * 3458 * Return: 0 for success. nonzero for failure. 3459 */ 3460 int dp_wdi_event_unsub(struct cdp_soc_t *soc, uint8_t pdev_id, 3461 wdi_event_subscribe *event_cb_sub_handle, 3462 uint32_t event); 3463 3464 /** 3465 * dp_wdi_event_sub() - Subscribe WDI event 3466 * @soc: soc handle 3467 * @pdev_id: id of pdev 3468 * @event_cb_sub_handle: subscribe event handle 3469 * @event: Event to be subscribe 3470 * 3471 * Return: 0 for success. nonzero for failure. 3472 */ 3473 int dp_wdi_event_sub(struct cdp_soc_t *soc, uint8_t pdev_id, 3474 wdi_event_subscribe *event_cb_sub_handle, 3475 uint32_t event); 3476 3477 /** 3478 * dp_wdi_event_handler() - Event handler for WDI event 3479 * @event: wdi event number 3480 * @soc: soc handle 3481 * @data: pointer to data 3482 * @peer_id: peer id number 3483 * @status: HTT rx status 3484 * @pdev_id: id of pdev 3485 * 3486 * It will be called to register WDI event 3487 * 3488 * Return: None 3489 */ 3490 void dp_wdi_event_handler(enum WDI_EVENT event, struct dp_soc *soc, 3491 void *data, u_int16_t peer_id, 3492 int status, u_int8_t pdev_id); 3493 3494 /** 3495 * dp_wdi_event_attach() - Attach wdi event 3496 * @txrx_pdev: DP pdev handle 3497 * 3498 * Return: 0 for success. nonzero for failure. 3499 */ 3500 int dp_wdi_event_attach(struct dp_pdev *txrx_pdev); 3501 3502 /** 3503 * dp_wdi_event_detach() - Detach WDI event 3504 * @txrx_pdev: DP pdev handle 3505 * 3506 * Return: 0 for success. nonzero for failure. 3507 */ 3508 int dp_wdi_event_detach(struct dp_pdev *txrx_pdev); 3509 3510 static inline void 3511 dp_hif_update_pipe_callback(struct dp_soc *dp_soc, 3512 void *cb_context, 3513 QDF_STATUS (*callback)(void *, qdf_nbuf_t, uint8_t), 3514 uint8_t pipe_id) 3515 { 3516 struct hif_msg_callbacks hif_pipe_callbacks = { 0 }; 3517 3518 /* TODO: Temporary change to bypass HTC connection for this new 3519 * HIF pipe, which will be used for packet log and other high- 3520 * priority HTT messages. Proper HTC connection to be added 3521 * later once required FW changes are available 3522 */ 3523 hif_pipe_callbacks.rxCompletionHandler = callback; 3524 hif_pipe_callbacks.Context = cb_context; 3525 hif_update_pipe_callback(dp_soc->hif_handle, 3526 DP_HTT_T2H_HP_PIPE, &hif_pipe_callbacks); 3527 } 3528 #else 3529 static inline int dp_wdi_event_unsub(struct cdp_soc_t *soc, uint8_t pdev_id, 3530 wdi_event_subscribe *event_cb_sub_handle, 3531 uint32_t event) 3532 { 3533 return 0; 3534 } 3535 3536 static inline int dp_wdi_event_sub(struct cdp_soc_t *soc, uint8_t pdev_id, 3537 wdi_event_subscribe *event_cb_sub_handle, 3538 uint32_t event) 3539 { 3540 return 0; 3541 } 3542 3543 static inline 3544 void dp_wdi_event_handler(enum WDI_EVENT event, 3545 struct dp_soc *soc, 3546 void *data, u_int16_t peer_id, 3547 int status, u_int8_t pdev_id) 3548 { 3549 } 3550 3551 static inline int dp_wdi_event_attach(struct dp_pdev *txrx_pdev) 3552 { 3553 return 0; 3554 } 3555 3556 static inline int dp_wdi_event_detach(struct dp_pdev *txrx_pdev) 3557 { 3558 return 0; 3559 } 3560 3561 static inline QDF_STATUS dp_h2t_cfg_stats_msg_send(struct dp_pdev *pdev, 3562 uint32_t stats_type_upload_mask, uint8_t mac_id) 3563 { 3564 return 0; 3565 } 3566 3567 static inline void 3568 dp_hif_update_pipe_callback(struct dp_soc *dp_soc, void *cb_context, 3569 QDF_STATUS (*callback)(void *, qdf_nbuf_t, uint8_t), 3570 uint8_t pipe_id) 3571 { 3572 } 3573 #endif 3574 3575 #ifdef VDEV_PEER_PROTOCOL_COUNT 3576 /** 3577 * dp_vdev_peer_stats_update_protocol_cnt() - update per-peer protocol counters 3578 * @vdev: VDEV DP object 3579 * @nbuf: data packet 3580 * @txrx_peer: DP TXRX Peer object 3581 * @is_egress: whether egress or ingress 3582 * @is_rx: whether rx or tx 3583 * 3584 * This function updates the per-peer protocol counters 3585 * Return: void 3586 */ 3587 void dp_vdev_peer_stats_update_protocol_cnt(struct dp_vdev *vdev, 3588 qdf_nbuf_t nbuf, 3589 struct dp_txrx_peer *txrx_peer, 3590 bool is_egress, 3591 bool is_rx); 3592 3593 /** 3594 * dp_peer_stats_update_protocol_cnt() - update per-peer protocol counters 3595 * @soc: SOC DP object 3596 * @vdev_id: vdev_id 3597 * @nbuf: data packet 3598 * @is_egress: whether egress or ingress 3599 * @is_rx: whether rx or tx 3600 * 3601 * This function updates the per-peer protocol counters 3602 * 3603 * Return: void 3604 */ 3605 void dp_peer_stats_update_protocol_cnt(struct cdp_soc_t *soc, 3606 int8_t vdev_id, 3607 qdf_nbuf_t nbuf, 3608 bool is_egress, 3609 bool is_rx); 3610 3611 void dp_vdev_peer_stats_update_protocol_cnt_tx(struct dp_vdev *vdev_hdl, 3612 qdf_nbuf_t nbuf); 3613 3614 #else 3615 #define dp_vdev_peer_stats_update_protocol_cnt(vdev, nbuf, txrx_peer, \ 3616 is_egress, is_rx) 3617 3618 static inline 3619 void dp_vdev_peer_stats_update_protocol_cnt_tx(struct dp_vdev *vdev_hdl, 3620 qdf_nbuf_t nbuf) 3621 { 3622 } 3623 3624 #endif 3625 3626 #ifdef QCA_LL_TX_FLOW_CONTROL_V2 3627 /** 3628 * dp_tx_dump_flow_pool_info() - dump global_pool and flow_pool info 3629 * @soc_hdl: Handle to struct cdp_soc 3630 * 3631 * Return: none 3632 */ 3633 void dp_tx_dump_flow_pool_info(struct cdp_soc_t *soc_hdl); 3634 3635 /** 3636 * dp_tx_dump_flow_pool_info_compact() - dump flow pool info 3637 * @soc: DP soc context 3638 * 3639 * Return: none 3640 */ 3641 void dp_tx_dump_flow_pool_info_compact(struct dp_soc *soc); 3642 int dp_tx_delete_flow_pool(struct dp_soc *soc, struct dp_tx_desc_pool_s *pool, 3643 bool force); 3644 #else 3645 static inline void dp_tx_dump_flow_pool_info_compact(struct dp_soc *soc) 3646 { 3647 } 3648 #endif /* QCA_LL_TX_FLOW_CONTROL_V2 */ 3649 3650 #ifdef QCA_OL_DP_SRNG_LOCK_LESS_ACCESS 3651 static inline int 3652 dp_hal_srng_access_start(hal_soc_handle_t soc, hal_ring_handle_t hal_ring_hdl) 3653 { 3654 return hal_srng_access_start_unlocked(soc, hal_ring_hdl); 3655 } 3656 3657 static inline void 3658 dp_hal_srng_access_end(hal_soc_handle_t soc, hal_ring_handle_t hal_ring_hdl) 3659 { 3660 hal_srng_access_end_unlocked(soc, hal_ring_hdl); 3661 } 3662 3663 #else 3664 static inline int 3665 dp_hal_srng_access_start(hal_soc_handle_t soc, hal_ring_handle_t hal_ring_hdl) 3666 { 3667 return hal_srng_access_start(soc, hal_ring_hdl); 3668 } 3669 3670 static inline void 3671 dp_hal_srng_access_end(hal_soc_handle_t soc, hal_ring_handle_t hal_ring_hdl) 3672 { 3673 hal_srng_access_end(soc, hal_ring_hdl); 3674 } 3675 #endif 3676 3677 #ifdef WLAN_FEATURE_DP_EVENT_HISTORY 3678 /** 3679 * dp_srng_access_start() - Wrapper function to log access start of a hal ring 3680 * @int_ctx: pointer to DP interrupt context. This should not be NULL 3681 * @dp_soc: DP Soc handle 3682 * @hal_ring_hdl: opaque pointer to the HAL Rx Error Ring, which will be 3683 * serviced 3684 * 3685 * Return: 0 on success; error on failure 3686 */ 3687 int dp_srng_access_start(struct dp_intr *int_ctx, struct dp_soc *dp_soc, 3688 hal_ring_handle_t hal_ring_hdl); 3689 3690 /** 3691 * dp_srng_access_end() - Wrapper function to log access end of a hal ring 3692 * @int_ctx: pointer to DP interrupt context. This should not be NULL 3693 * @dp_soc: DP Soc handle 3694 * @hal_ring_hdl: opaque pointer to the HAL Rx Error Ring, which will be 3695 * serviced 3696 * 3697 * Return: void 3698 */ 3699 void dp_srng_access_end(struct dp_intr *int_ctx, struct dp_soc *dp_soc, 3700 hal_ring_handle_t hal_ring_hdl); 3701 3702 #else 3703 static inline int dp_srng_access_start(struct dp_intr *int_ctx, 3704 struct dp_soc *dp_soc, 3705 hal_ring_handle_t hal_ring_hdl) 3706 { 3707 hal_soc_handle_t hal_soc = dp_soc->hal_soc; 3708 3709 return dp_hal_srng_access_start(hal_soc, hal_ring_hdl); 3710 } 3711 3712 static inline void dp_srng_access_end(struct dp_intr *int_ctx, 3713 struct dp_soc *dp_soc, 3714 hal_ring_handle_t hal_ring_hdl) 3715 { 3716 hal_soc_handle_t hal_soc = dp_soc->hal_soc; 3717 3718 return dp_hal_srng_access_end(hal_soc, hal_ring_hdl); 3719 } 3720 #endif /* WLAN_FEATURE_DP_EVENT_HISTORY */ 3721 3722 #ifdef QCA_CACHED_RING_DESC 3723 /** 3724 * dp_srng_dst_get_next() - Wrapper function to get next ring desc 3725 * @dp_soc: DP Soc handle 3726 * @hal_ring_hdl: opaque pointer to the HAL Destination Ring 3727 * 3728 * Return: HAL ring descriptor 3729 */ 3730 static inline void *dp_srng_dst_get_next(struct dp_soc *dp_soc, 3731 hal_ring_handle_t hal_ring_hdl) 3732 { 3733 hal_soc_handle_t hal_soc = dp_soc->hal_soc; 3734 3735 return hal_srng_dst_get_next_cached(hal_soc, hal_ring_hdl); 3736 } 3737 3738 /** 3739 * dp_srng_dst_inv_cached_descs() - Wrapper function to invalidate cached 3740 * descriptors 3741 * @dp_soc: DP Soc handle 3742 * @hal_ring_hdl: opaque pointer to the HAL Rx Destination ring 3743 * @num_entries: Entry count 3744 * 3745 * Return: None 3746 */ 3747 static inline void dp_srng_dst_inv_cached_descs(struct dp_soc *dp_soc, 3748 hal_ring_handle_t hal_ring_hdl, 3749 uint32_t num_entries) 3750 { 3751 hal_soc_handle_t hal_soc = dp_soc->hal_soc; 3752 3753 hal_srng_dst_inv_cached_descs(hal_soc, hal_ring_hdl, num_entries); 3754 } 3755 #else 3756 static inline void *dp_srng_dst_get_next(struct dp_soc *dp_soc, 3757 hal_ring_handle_t hal_ring_hdl) 3758 { 3759 hal_soc_handle_t hal_soc = dp_soc->hal_soc; 3760 3761 return hal_srng_dst_get_next(hal_soc, hal_ring_hdl); 3762 } 3763 3764 static inline void dp_srng_dst_inv_cached_descs(struct dp_soc *dp_soc, 3765 hal_ring_handle_t hal_ring_hdl, 3766 uint32_t num_entries) 3767 { 3768 } 3769 #endif /* QCA_CACHED_RING_DESC */ 3770 3771 #if defined(QCA_CACHED_RING_DESC) && \ 3772 (defined(QCA_DP_RX_HW_SW_NBUF_DESC_PREFETCH) || \ 3773 defined(QCA_DP_TX_HW_SW_NBUF_DESC_PREFETCH)) 3774 /** 3775 * dp_srng_dst_prefetch() - Wrapper function to prefetch descs from dest ring 3776 * @hal_soc: HAL SOC handle 3777 * @hal_ring_hdl: opaque pointer to the HAL Rx Destination ring 3778 * @num_entries: Entry count 3779 * 3780 * Return: None 3781 */ 3782 static inline void *dp_srng_dst_prefetch(hal_soc_handle_t hal_soc, 3783 hal_ring_handle_t hal_ring_hdl, 3784 uint32_t num_entries) 3785 { 3786 return hal_srng_dst_prefetch(hal_soc, hal_ring_hdl, num_entries); 3787 } 3788 3789 /** 3790 * dp_srng_dst_prefetch_32_byte_desc() - Wrapper function to prefetch 3791 * 32 byte descriptor starting at 3792 * 64 byte offset 3793 * @hal_soc: HAL SOC handle 3794 * @hal_ring_hdl: opaque pointer to the HAL Rx Destination ring 3795 * @num_entries: Entry count 3796 * 3797 * Return: None 3798 */ 3799 static inline 3800 void *dp_srng_dst_prefetch_32_byte_desc(hal_soc_handle_t hal_soc, 3801 hal_ring_handle_t hal_ring_hdl, 3802 uint32_t num_entries) 3803 { 3804 return hal_srng_dst_prefetch_32_byte_desc(hal_soc, hal_ring_hdl, 3805 num_entries); 3806 } 3807 #else 3808 static inline void *dp_srng_dst_prefetch(hal_soc_handle_t hal_soc, 3809 hal_ring_handle_t hal_ring_hdl, 3810 uint32_t num_entries) 3811 { 3812 return NULL; 3813 } 3814 3815 static inline 3816 void *dp_srng_dst_prefetch_32_byte_desc(hal_soc_handle_t hal_soc, 3817 hal_ring_handle_t hal_ring_hdl, 3818 uint32_t num_entries) 3819 { 3820 return NULL; 3821 } 3822 #endif 3823 3824 #ifdef QCA_ENH_V3_STATS_SUPPORT 3825 /** 3826 * dp_pdev_print_delay_stats(): Print pdev level delay stats 3827 * @pdev: DP_PDEV handle 3828 * 3829 * Return:void 3830 */ 3831 void dp_pdev_print_delay_stats(struct dp_pdev *pdev); 3832 3833 /** 3834 * dp_pdev_print_tid_stats(): Print pdev level tid stats 3835 * @pdev: DP_PDEV handle 3836 * 3837 * Return:void 3838 */ 3839 void dp_pdev_print_tid_stats(struct dp_pdev *pdev); 3840 3841 /** 3842 * dp_pdev_print_rx_error_stats(): Print pdev level rx error stats 3843 * @pdev: DP_PDEV handle 3844 * 3845 * Return:void 3846 */ 3847 void dp_pdev_print_rx_error_stats(struct dp_pdev *pdev); 3848 #endif /* QCA_ENH_V3_STATS_SUPPORT */ 3849 3850 /** 3851 * dp_pdev_get_tid_stats(): Get accumulated pdev level tid_stats 3852 * @soc_hdl: soc handle 3853 * @pdev_id: id of dp_pdev handle 3854 * @tid_stats: Pointer for cdp_tid_stats_intf 3855 * 3856 * Return: QDF_STATUS_SUCCESS or QDF_STATUS_E_INVAL 3857 */ 3858 QDF_STATUS dp_pdev_get_tid_stats(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, 3859 struct cdp_tid_stats_intf *tid_stats); 3860 3861 /** 3862 * dp_soc_set_txrx_ring_map() 3863 * @soc: DP handler for soc 3864 * 3865 * Return: Void 3866 */ 3867 void dp_soc_set_txrx_ring_map(struct dp_soc *soc); 3868 3869 /** 3870 * dp_vdev_to_cdp_vdev() - typecast dp vdev to cdp vdev 3871 * @vdev: DP vdev handle 3872 * 3873 * Return: struct cdp_vdev pointer 3874 */ 3875 static inline 3876 struct cdp_vdev *dp_vdev_to_cdp_vdev(struct dp_vdev *vdev) 3877 { 3878 return (struct cdp_vdev *)vdev; 3879 } 3880 3881 /** 3882 * dp_pdev_to_cdp_pdev() - typecast dp pdev to cdp pdev 3883 * @pdev: DP pdev handle 3884 * 3885 * Return: struct cdp_pdev pointer 3886 */ 3887 static inline 3888 struct cdp_pdev *dp_pdev_to_cdp_pdev(struct dp_pdev *pdev) 3889 { 3890 return (struct cdp_pdev *)pdev; 3891 } 3892 3893 /** 3894 * dp_soc_to_cdp_soc() - typecast dp psoc to cdp psoc 3895 * @psoc: DP psoc handle 3896 * 3897 * Return: struct cdp_soc pointer 3898 */ 3899 static inline 3900 struct cdp_soc *dp_soc_to_cdp_soc(struct dp_soc *psoc) 3901 { 3902 return (struct cdp_soc *)psoc; 3903 } 3904 3905 /** 3906 * dp_soc_to_cdp_soc_t() - typecast dp psoc to ol txrx soc handle 3907 * @psoc: DP psoc handle 3908 * 3909 * Return: struct cdp_soc_t pointer 3910 */ 3911 static inline 3912 struct cdp_soc_t *dp_soc_to_cdp_soc_t(struct dp_soc *psoc) 3913 { 3914 return (struct cdp_soc_t *)psoc; 3915 } 3916 3917 #if defined(WLAN_SUPPORT_RX_FLOW_TAG) || defined(WLAN_SUPPORT_RX_FISA) 3918 /** 3919 * dp_rx_flow_get_fse_stats() - Retrieve a flow's statistics 3920 * @pdev: pdev handle 3921 * @rx_flow_info: flow information in the Rx FST 3922 * @stats: stats to update 3923 * 3924 * Return: Success when flow statistcs is updated, error on failure 3925 */ 3926 QDF_STATUS dp_rx_flow_get_fse_stats(struct dp_pdev *pdev, 3927 struct cdp_rx_flow_info *rx_flow_info, 3928 struct cdp_flow_stats *stats); 3929 3930 /** 3931 * dp_rx_flow_delete_entry() - Delete a flow entry from flow search table 3932 * @pdev: pdev handle 3933 * @rx_flow_info: DP flow parameters 3934 * 3935 * Return: Success when flow is deleted, error on failure 3936 */ 3937 QDF_STATUS dp_rx_flow_delete_entry(struct dp_pdev *pdev, 3938 struct cdp_rx_flow_info *rx_flow_info); 3939 3940 /** 3941 * dp_rx_flow_add_entry() - Add a flow entry to flow search table 3942 * @pdev: DP pdev instance 3943 * @rx_flow_info: DP flow parameters 3944 * 3945 * Return: Success when flow is added, no-memory or already exists on error 3946 */ 3947 QDF_STATUS dp_rx_flow_add_entry(struct dp_pdev *pdev, 3948 struct cdp_rx_flow_info *rx_flow_info); 3949 3950 /** 3951 * dp_rx_fst_attach() - Initialize Rx FST and setup necessary parameters 3952 * @soc: SoC handle 3953 * @pdev: Pdev handle 3954 * 3955 * Return: Handle to flow search table entry 3956 */ 3957 QDF_STATUS dp_rx_fst_attach(struct dp_soc *soc, struct dp_pdev *pdev); 3958 3959 /** 3960 * dp_rx_fst_detach() - De-initialize Rx FST 3961 * @soc: SoC handle 3962 * @pdev: Pdev handle 3963 * 3964 * Return: None 3965 */ 3966 void dp_rx_fst_detach(struct dp_soc *soc, struct dp_pdev *pdev); 3967 3968 /** 3969 * dp_rx_flow_send_fst_fw_setup() - Program FST parameters in FW/HW post-attach 3970 * @soc: SoC handle 3971 * @pdev: Pdev handle 3972 * 3973 * Return: Success when fst parameters are programmed in FW, error otherwise 3974 */ 3975 QDF_STATUS dp_rx_flow_send_fst_fw_setup(struct dp_soc *soc, 3976 struct dp_pdev *pdev); 3977 3978 /** 3979 * dp_mon_rx_update_rx_flow_tag_stats() - Update a mon flow's statistics 3980 * @pdev: pdev handle 3981 * @flow_id: flow index (truncated hash) in the Rx FST 3982 * 3983 * Return: Success when flow statistcs is updated, error on failure 3984 */ 3985 QDF_STATUS 3986 dp_mon_rx_update_rx_flow_tag_stats(struct dp_pdev *pdev, uint32_t flow_id); 3987 3988 #else /* !((WLAN_SUPPORT_RX_FLOW_TAG) || defined(WLAN_SUPPORT_RX_FISA)) */ 3989 3990 /** 3991 * dp_rx_fst_attach() - Initialize Rx FST and setup necessary parameters 3992 * @soc: SoC handle 3993 * @pdev: Pdev handle 3994 * 3995 * Return: Handle to flow search table entry 3996 */ 3997 static inline 3998 QDF_STATUS dp_rx_fst_attach(struct dp_soc *soc, struct dp_pdev *pdev) 3999 { 4000 return QDF_STATUS_SUCCESS; 4001 } 4002 4003 /** 4004 * dp_rx_fst_detach() - De-initialize Rx FST 4005 * @soc: SoC handle 4006 * @pdev: Pdev handle 4007 * 4008 * Return: None 4009 */ 4010 static inline 4011 void dp_rx_fst_detach(struct dp_soc *soc, struct dp_pdev *pdev) 4012 { 4013 } 4014 #endif 4015 4016 /** 4017 * dp_rx_fst_attach_wrapper() - wrapper API for dp_rx_fst_attach 4018 * @soc: SoC handle 4019 * @pdev: Pdev handle 4020 * 4021 * Return: Handle to flow search table entry 4022 */ 4023 extern QDF_STATUS 4024 dp_rx_fst_attach_wrapper(struct dp_soc *soc, struct dp_pdev *pdev); 4025 4026 /** 4027 * dp_rx_fst_detach_wrapper() - wrapper API for dp_rx_fst_detach 4028 * @soc: SoC handle 4029 * @pdev: Pdev handle 4030 * 4031 * Return: None 4032 */ 4033 extern void 4034 dp_rx_fst_detach_wrapper(struct dp_soc *soc, struct dp_pdev *pdev); 4035 4036 /** 4037 * dp_vdev_get_ref() - API to take a reference for VDEV object 4038 * 4039 * @soc : core DP soc context 4040 * @vdev : DP vdev 4041 * @mod_id : module id 4042 * 4043 * Return: QDF_STATUS_SUCCESS if reference held successfully 4044 * else QDF_STATUS_E_INVAL 4045 */ 4046 static inline 4047 QDF_STATUS dp_vdev_get_ref(struct dp_soc *soc, struct dp_vdev *vdev, 4048 enum dp_mod_id mod_id) 4049 { 4050 if (!qdf_atomic_inc_not_zero(&vdev->ref_cnt)) 4051 return QDF_STATUS_E_INVAL; 4052 4053 qdf_atomic_inc(&vdev->mod_refs[mod_id]); 4054 4055 return QDF_STATUS_SUCCESS; 4056 } 4057 4058 /** 4059 * dp_vdev_get_ref_by_id() - Returns vdev object given the vdev id 4060 * @soc: core DP soc context 4061 * @vdev_id: vdev id from vdev object can be retrieved 4062 * @mod_id: module id which is requesting the reference 4063 * 4064 * Return: struct dp_vdev*: Pointer to DP vdev object 4065 */ 4066 static inline struct dp_vdev * 4067 dp_vdev_get_ref_by_id(struct dp_soc *soc, uint8_t vdev_id, 4068 enum dp_mod_id mod_id) 4069 { 4070 struct dp_vdev *vdev = NULL; 4071 if (qdf_unlikely(vdev_id >= MAX_VDEV_CNT)) 4072 return NULL; 4073 4074 qdf_spin_lock_bh(&soc->vdev_map_lock); 4075 vdev = soc->vdev_id_map[vdev_id]; 4076 4077 if (!vdev || dp_vdev_get_ref(soc, vdev, mod_id) != QDF_STATUS_SUCCESS) { 4078 qdf_spin_unlock_bh(&soc->vdev_map_lock); 4079 return NULL; 4080 } 4081 qdf_spin_unlock_bh(&soc->vdev_map_lock); 4082 4083 return vdev; 4084 } 4085 4086 /** 4087 * dp_get_pdev_from_soc_pdev_id_wifi3() - Returns pdev object given the pdev id 4088 * @soc: core DP soc context 4089 * @pdev_id: pdev id from pdev object can be retrieved 4090 * 4091 * Return: struct dp_pdev*: Pointer to DP pdev object 4092 */ 4093 static inline struct dp_pdev * 4094 dp_get_pdev_from_soc_pdev_id_wifi3(struct dp_soc *soc, 4095 uint8_t pdev_id) 4096 { 4097 if (qdf_unlikely(pdev_id >= MAX_PDEV_CNT)) 4098 return NULL; 4099 4100 return soc->pdev_list[pdev_id]; 4101 } 4102 4103 /** 4104 * dp_get_peer_mac_list(): function to get peer mac list of vdev 4105 * @soc: Datapath soc handle 4106 * @vdev_id: vdev id 4107 * @newmac: Table of the clients mac 4108 * @mac_cnt: No. of MACs required 4109 * @limit: Limit the number of clients 4110 * 4111 * Return: no of clients 4112 */ 4113 uint16_t dp_get_peer_mac_list(ol_txrx_soc_handle soc, uint8_t vdev_id, 4114 u_int8_t newmac[][QDF_MAC_ADDR_SIZE], 4115 u_int16_t mac_cnt, bool limit); 4116 4117 /** 4118 * dp_update_num_mac_rings_for_dbs() - Update No of MAC rings based on 4119 * DBS check 4120 * @soc: DP SoC context 4121 * @max_mac_rings: Pointer to variable for No of MAC rings 4122 * 4123 * Return: None 4124 */ 4125 void dp_update_num_mac_rings_for_dbs(struct dp_soc *soc, 4126 int *max_mac_rings); 4127 4128 4129 #if defined(WLAN_SUPPORT_RX_FISA) 4130 void dp_rx_dump_fisa_table(struct dp_soc *soc); 4131 4132 /** 4133 * dp_print_fisa_stats() - Print FISA stats 4134 * @soc: DP soc handle 4135 * 4136 * Return: None 4137 */ 4138 void dp_print_fisa_stats(struct dp_soc *soc); 4139 4140 /** 4141 * dp_rx_fst_update_cmem_params() - Update CMEM FST params 4142 * @soc: DP SoC context 4143 * @num_entries: Number of flow search entries 4144 * @cmem_ba_lo: CMEM base address low 4145 * @cmem_ba_hi: CMEM base address high 4146 * 4147 * Return: None 4148 */ 4149 void dp_rx_fst_update_cmem_params(struct dp_soc *soc, uint16_t num_entries, 4150 uint32_t cmem_ba_lo, uint32_t cmem_ba_hi); 4151 4152 void 4153 dp_rx_fst_update_pm_suspend_status(struct dp_soc *soc, bool suspended); 4154 4155 /** 4156 * dp_rx_fst_requeue_wq() - Re-queue pending work queue tasks 4157 * @soc: DP SoC context 4158 * 4159 * Return: None 4160 */ 4161 void dp_rx_fst_requeue_wq(struct dp_soc *soc); 4162 #else 4163 static inline void 4164 dp_rx_fst_update_cmem_params(struct dp_soc *soc, uint16_t num_entries, 4165 uint32_t cmem_ba_lo, uint32_t cmem_ba_hi) 4166 { 4167 } 4168 4169 static inline void 4170 dp_rx_fst_update_pm_suspend_status(struct dp_soc *soc, bool suspended) 4171 { 4172 } 4173 4174 static inline void 4175 dp_rx_fst_requeue_wq(struct dp_soc *soc) 4176 { 4177 } 4178 4179 static inline void dp_print_fisa_stats(struct dp_soc *soc) 4180 { 4181 } 4182 #endif /* WLAN_SUPPORT_RX_FISA */ 4183 4184 #ifdef MAX_ALLOC_PAGE_SIZE 4185 /** 4186 * dp_set_max_page_size() - Set the max page size for hw link desc. 4187 * @pages: link desc page handle 4188 * @max_alloc_size: max_alloc_size 4189 * 4190 * For MCL the page size is set to OS defined value and for WIN 4191 * the page size is set to the max_alloc_size cfg ini 4192 * param. 4193 * This is to ensure that WIN gets contiguous memory allocations 4194 * as per requirement. 4195 * 4196 * Return: None 4197 */ 4198 static inline 4199 void dp_set_max_page_size(struct qdf_mem_multi_page_t *pages, 4200 uint32_t max_alloc_size) 4201 { 4202 pages->page_size = qdf_page_size; 4203 } 4204 4205 #else 4206 static inline 4207 void dp_set_max_page_size(struct qdf_mem_multi_page_t *pages, 4208 uint32_t max_alloc_size) 4209 { 4210 pages->page_size = max_alloc_size; 4211 } 4212 #endif /* MAX_ALLOC_PAGE_SIZE */ 4213 4214 /** 4215 * dp_history_get_next_index() - get the next entry to record an entry 4216 * in the history. 4217 * @curr_idx: Current index where the last entry is written. 4218 * @max_entries: Max number of entries in the history 4219 * 4220 * This function assumes that the max number os entries is a power of 2. 4221 * 4222 * Return: The index where the next entry is to be written. 4223 */ 4224 static inline uint32_t dp_history_get_next_index(qdf_atomic_t *curr_idx, 4225 uint32_t max_entries) 4226 { 4227 uint32_t idx = qdf_atomic_inc_return(curr_idx); 4228 4229 return idx & (max_entries - 1); 4230 } 4231 4232 /** 4233 * dp_rx_skip_tlvs() - Skip TLVs len + L3 padding, save in nbuf->cb 4234 * @soc: Datapath soc handle 4235 * @nbuf: nbuf cb to be updated 4236 * @l3_padding: L3 padding 4237 * 4238 * Return: None 4239 */ 4240 void dp_rx_skip_tlvs(struct dp_soc *soc, qdf_nbuf_t nbuf, uint32_t l3_padding); 4241 4242 #ifndef FEATURE_WDS 4243 static inline void 4244 dp_hmwds_ast_add_notify(struct dp_peer *peer, 4245 uint8_t *mac_addr, 4246 enum cdp_txrx_ast_entry_type type, 4247 QDF_STATUS err, 4248 bool is_peer_map) 4249 { 4250 } 4251 #endif 4252 4253 #ifdef HTT_STATS_DEBUGFS_SUPPORT 4254 /** 4255 * dp_pdev_htt_stats_dbgfs_init() - Function to allocate memory and initialize 4256 * debugfs for HTT stats 4257 * @pdev: dp pdev handle 4258 * 4259 * Return: QDF_STATUS 4260 */ 4261 QDF_STATUS dp_pdev_htt_stats_dbgfs_init(struct dp_pdev *pdev); 4262 4263 /** 4264 * dp_pdev_htt_stats_dbgfs_deinit() - Function to remove debugfs entry for 4265 * HTT stats 4266 * @pdev: dp pdev handle 4267 * 4268 * Return: none 4269 */ 4270 void dp_pdev_htt_stats_dbgfs_deinit(struct dp_pdev *pdev); 4271 #else 4272 4273 /** 4274 * dp_pdev_htt_stats_dbgfs_init() - Function to allocate memory and initialize 4275 * debugfs for HTT stats 4276 * @pdev: dp pdev handle 4277 * 4278 * Return: QDF_STATUS 4279 */ 4280 static inline QDF_STATUS 4281 dp_pdev_htt_stats_dbgfs_init(struct dp_pdev *pdev) 4282 { 4283 return QDF_STATUS_SUCCESS; 4284 } 4285 4286 /** 4287 * dp_pdev_htt_stats_dbgfs_deinit() - Function to remove debugfs entry for 4288 * HTT stats 4289 * @pdev: dp pdev handle 4290 * 4291 * Return: none 4292 */ 4293 static inline void 4294 dp_pdev_htt_stats_dbgfs_deinit(struct dp_pdev *pdev) 4295 { 4296 } 4297 #endif /* HTT_STATS_DEBUGFS_SUPPORT */ 4298 4299 #ifndef WLAN_DP_FEATURE_SW_LATENCY_MGR 4300 /** 4301 * dp_soc_swlm_attach() - attach the software latency manager resources 4302 * @soc: Datapath global soc handle 4303 * 4304 * Return: QDF_STATUS 4305 */ 4306 static inline QDF_STATUS dp_soc_swlm_attach(struct dp_soc *soc) 4307 { 4308 return QDF_STATUS_SUCCESS; 4309 } 4310 4311 /** 4312 * dp_soc_swlm_detach() - detach the software latency manager resources 4313 * @soc: Datapath global soc handle 4314 * 4315 * Return: QDF_STATUS 4316 */ 4317 static inline QDF_STATUS dp_soc_swlm_detach(struct dp_soc *soc) 4318 { 4319 return QDF_STATUS_SUCCESS; 4320 } 4321 #endif /* !WLAN_DP_FEATURE_SW_LATENCY_MGR */ 4322 4323 #ifndef WLAN_DP_PROFILE_SUPPORT 4324 static inline void wlan_dp_soc_cfg_sync_profile(struct cdp_soc_t *cdp_soc) {} 4325 4326 static inline void wlan_dp_pdev_cfg_sync_profile(struct cdp_soc_t *cdp_soc, 4327 uint8_t pdev_id) {} 4328 #endif 4329 4330 /** 4331 * dp_get_peer_id(): function to get peer id by mac 4332 * @soc: Datapath soc handle 4333 * @vdev_id: vdev id 4334 * @mac: Peer mac address 4335 * 4336 * Return: valid peer id on success 4337 * HTT_INVALID_PEER on failure 4338 */ 4339 uint16_t dp_get_peer_id(ol_txrx_soc_handle soc, uint8_t vdev_id, uint8_t *mac); 4340 4341 #ifdef QCA_SUPPORT_WDS_EXTENDED 4342 /** 4343 * dp_wds_ext_set_peer_rx(): function to set peer rx handler 4344 * @soc: Datapath soc handle 4345 * @vdev_id: vdev id 4346 * @mac: Peer mac address 4347 * @rx: rx function pointer 4348 * @osif_peer: OSIF peer handle 4349 * 4350 * Return: QDF_STATUS_SUCCESS on success 4351 * QDF_STATUS_E_INVAL if peer is not found 4352 * QDF_STATUS_E_ALREADY if rx is already set/unset 4353 */ 4354 QDF_STATUS dp_wds_ext_set_peer_rx(ol_txrx_soc_handle soc, 4355 uint8_t vdev_id, 4356 uint8_t *mac, 4357 ol_txrx_rx_fp rx, 4358 ol_osif_peer_handle osif_peer); 4359 4360 /** 4361 * dp_wds_ext_get_peer_osif_handle(): function to get peer osif handle 4362 * @soc: Datapath soc handle 4363 * @vdev_id: vdev id 4364 * @mac: Peer mac address 4365 * @osif_peer: OSIF peer handle 4366 * 4367 * Return: QDF_STATUS_SUCCESS on success 4368 * QDF_STATUS_E_INVAL if peer is not found 4369 */ 4370 QDF_STATUS dp_wds_ext_get_peer_osif_handle( 4371 ol_txrx_soc_handle soc, 4372 uint8_t vdev_id, 4373 uint8_t *mac, 4374 ol_osif_peer_handle *osif_peer); 4375 4376 #endif /* QCA_SUPPORT_WDS_EXTENDED */ 4377 4378 #ifdef DP_MEM_PRE_ALLOC 4379 4380 /** 4381 * dp_context_alloc_mem() - allocate memory for DP context 4382 * @soc: datapath soc handle 4383 * @ctxt_type: DP context type 4384 * @ctxt_size: DP context size 4385 * 4386 * Return: DP context address 4387 */ 4388 void *dp_context_alloc_mem(struct dp_soc *soc, enum dp_ctxt_type ctxt_type, 4389 size_t ctxt_size); 4390 4391 /** 4392 * dp_context_free_mem() - Free memory of DP context 4393 * @soc: datapath soc handle 4394 * @ctxt_type: DP context type 4395 * @vaddr: Address of context memory 4396 * 4397 * Return: None 4398 */ 4399 void dp_context_free_mem(struct dp_soc *soc, enum dp_ctxt_type ctxt_type, 4400 void *vaddr); 4401 4402 /** 4403 * dp_desc_multi_pages_mem_alloc() - alloc memory over multiple pages 4404 * @soc: datapath soc handle 4405 * @desc_type: memory request source type 4406 * @pages: multi page information storage 4407 * @element_size: each element size 4408 * @element_num: total number of elements should be allocated 4409 * @memctxt: memory context 4410 * @cacheable: coherent memory or cacheable memory 4411 * 4412 * This function is a wrapper for memory allocation over multiple 4413 * pages, if dp prealloc method is registered, then will try prealloc 4414 * firstly. if prealloc failed, fall back to regular way over 4415 * qdf_mem_multi_pages_alloc(). 4416 * 4417 * Return: None 4418 */ 4419 void dp_desc_multi_pages_mem_alloc(struct dp_soc *soc, 4420 enum qdf_dp_desc_type desc_type, 4421 struct qdf_mem_multi_page_t *pages, 4422 size_t element_size, 4423 uint32_t element_num, 4424 qdf_dma_context_t memctxt, 4425 bool cacheable); 4426 4427 /** 4428 * dp_desc_multi_pages_mem_free() - free multiple pages memory 4429 * @soc: datapath soc handle 4430 * @desc_type: memory request source type 4431 * @pages: multi page information storage 4432 * @memctxt: memory context 4433 * @cacheable: coherent memory or cacheable memory 4434 * 4435 * This function is a wrapper for multiple pages memory free, 4436 * if memory is got from prealloc pool, put it back to pool. 4437 * otherwise free by qdf_mem_multi_pages_free(). 4438 * 4439 * Return: None 4440 */ 4441 void dp_desc_multi_pages_mem_free(struct dp_soc *soc, 4442 enum qdf_dp_desc_type desc_type, 4443 struct qdf_mem_multi_page_t *pages, 4444 qdf_dma_context_t memctxt, 4445 bool cacheable); 4446 4447 #else 4448 static inline 4449 void *dp_context_alloc_mem(struct dp_soc *soc, enum dp_ctxt_type ctxt_type, 4450 size_t ctxt_size) 4451 { 4452 return qdf_mem_malloc(ctxt_size); 4453 } 4454 4455 static inline 4456 void dp_context_free_mem(struct dp_soc *soc, enum dp_ctxt_type ctxt_type, 4457 void *vaddr) 4458 { 4459 qdf_mem_free(vaddr); 4460 } 4461 4462 static inline 4463 void dp_desc_multi_pages_mem_alloc(struct dp_soc *soc, 4464 enum qdf_dp_desc_type desc_type, 4465 struct qdf_mem_multi_page_t *pages, 4466 size_t element_size, 4467 uint32_t element_num, 4468 qdf_dma_context_t memctxt, 4469 bool cacheable) 4470 { 4471 qdf_mem_multi_pages_alloc(soc->osdev, pages, element_size, 4472 element_num, memctxt, cacheable); 4473 } 4474 4475 static inline 4476 void dp_desc_multi_pages_mem_free(struct dp_soc *soc, 4477 enum qdf_dp_desc_type desc_type, 4478 struct qdf_mem_multi_page_t *pages, 4479 qdf_dma_context_t memctxt, 4480 bool cacheable) 4481 { 4482 qdf_mem_multi_pages_free(soc->osdev, pages, 4483 memctxt, cacheable); 4484 } 4485 #endif 4486 4487 /** 4488 * struct dp_frag_history_opaque_atomic - Opaque struct for adding a fragmented 4489 * history. 4490 * @index: atomic index 4491 * @num_entries_per_slot: Number of entries per slot 4492 * @allocated: is allocated or not 4493 * @entry: pointers to array of records 4494 */ 4495 struct dp_frag_history_opaque_atomic { 4496 qdf_atomic_t index; 4497 uint16_t num_entries_per_slot; 4498 uint16_t allocated; 4499 void *entry[0]; 4500 }; 4501 4502 static inline QDF_STATUS 4503 dp_soc_frag_history_attach(struct dp_soc *soc, void *history_hdl, 4504 uint32_t max_slots, uint32_t max_entries_per_slot, 4505 uint32_t entry_size, 4506 bool attempt_prealloc, enum dp_ctxt_type ctxt_type) 4507 { 4508 struct dp_frag_history_opaque_atomic *history = 4509 (struct dp_frag_history_opaque_atomic *)history_hdl; 4510 size_t alloc_size = max_entries_per_slot * entry_size; 4511 int i; 4512 4513 for (i = 0; i < max_slots; i++) { 4514 if (attempt_prealloc) 4515 history->entry[i] = dp_context_alloc_mem(soc, ctxt_type, 4516 alloc_size); 4517 else 4518 history->entry[i] = qdf_mem_malloc(alloc_size); 4519 4520 if (!history->entry[i]) 4521 goto exit; 4522 } 4523 4524 qdf_atomic_init(&history->index); 4525 history->allocated = 1; 4526 history->num_entries_per_slot = max_entries_per_slot; 4527 4528 return QDF_STATUS_SUCCESS; 4529 exit: 4530 for (i = i - 1; i >= 0; i--) { 4531 if (attempt_prealloc) 4532 dp_context_free_mem(soc, ctxt_type, history->entry[i]); 4533 else 4534 qdf_mem_free(history->entry[i]); 4535 } 4536 4537 return QDF_STATUS_E_NOMEM; 4538 } 4539 4540 static inline 4541 void dp_soc_frag_history_detach(struct dp_soc *soc, 4542 void *history_hdl, uint32_t max_slots, 4543 bool attempt_prealloc, 4544 enum dp_ctxt_type ctxt_type) 4545 { 4546 struct dp_frag_history_opaque_atomic *history = 4547 (struct dp_frag_history_opaque_atomic *)history_hdl; 4548 int i; 4549 4550 for (i = 0; i < max_slots; i++) { 4551 if (attempt_prealloc) 4552 dp_context_free_mem(soc, ctxt_type, history->entry[i]); 4553 else 4554 qdf_mem_free(history->entry[i]); 4555 } 4556 4557 history->allocated = 0; 4558 } 4559 4560 /** 4561 * dp_get_frag_hist_next_atomic_idx() - get the next entry index to record an 4562 * entry in a fragmented history with 4563 * index being atomic. 4564 * @curr_idx: address of the current index where the last entry was written 4565 * @next_idx: pointer to update the next index 4566 * @slot: pointer to update the history slot to be selected 4567 * @slot_shift: BITwise shift mask for slot (in index) 4568 * @max_entries_per_slot: Max number of entries in a slot of history 4569 * @max_entries: Total number of entries in the history (sum of all slots) 4570 * 4571 * This function assumes that the "max_entries_per_slot" and "max_entries" 4572 * are a power-of-2. 4573 * 4574 * Return: None 4575 */ 4576 static inline void 4577 dp_get_frag_hist_next_atomic_idx(qdf_atomic_t *curr_idx, uint32_t *next_idx, 4578 uint16_t *slot, uint32_t slot_shift, 4579 uint32_t max_entries_per_slot, 4580 uint32_t max_entries) 4581 { 4582 uint32_t idx; 4583 4584 idx = qdf_do_div_rem(qdf_atomic_inc_return(curr_idx), max_entries); 4585 4586 *slot = idx >> slot_shift; 4587 *next_idx = idx & (max_entries_per_slot - 1); 4588 } 4589 4590 #ifdef FEATURE_RUNTIME_PM 4591 /** 4592 * dp_runtime_get() - Get dp runtime refcount 4593 * @soc: Datapath soc handle 4594 * 4595 * Get dp runtime refcount by increment of an atomic variable, which can block 4596 * dp runtime resume to wait to flush pending tx by runtime suspend. 4597 * 4598 * Return: Current refcount 4599 */ 4600 static inline int32_t dp_runtime_get(struct dp_soc *soc) 4601 { 4602 return qdf_atomic_inc_return(&soc->dp_runtime_refcount); 4603 } 4604 4605 /** 4606 * dp_runtime_put() - Return dp runtime refcount 4607 * @soc: Datapath soc handle 4608 * 4609 * Return dp runtime refcount by decrement of an atomic variable, allow dp 4610 * runtime resume finish. 4611 * 4612 * Return: Current refcount 4613 */ 4614 static inline int32_t dp_runtime_put(struct dp_soc *soc) 4615 { 4616 return qdf_atomic_dec_return(&soc->dp_runtime_refcount); 4617 } 4618 4619 /** 4620 * dp_runtime_get_refcount() - Get dp runtime refcount 4621 * @soc: Datapath soc handle 4622 * 4623 * Get dp runtime refcount by returning an atomic variable 4624 * 4625 * Return: Current refcount 4626 */ 4627 static inline int32_t dp_runtime_get_refcount(struct dp_soc *soc) 4628 { 4629 return qdf_atomic_read(&soc->dp_runtime_refcount); 4630 } 4631 4632 /** 4633 * dp_runtime_init() - Init DP related runtime PM clients and runtime refcount 4634 * @soc: Datapath soc handle 4635 * 4636 * Return: QDF_STATUS 4637 */ 4638 static inline void dp_runtime_init(struct dp_soc *soc) 4639 { 4640 hif_rtpm_register(HIF_RTPM_ID_DP, NULL); 4641 hif_rtpm_register(HIF_RTPM_ID_DP_RING_STATS, NULL); 4642 qdf_atomic_init(&soc->dp_runtime_refcount); 4643 } 4644 4645 /** 4646 * dp_runtime_deinit() - Deinit DP related runtime PM clients 4647 * 4648 * Return: None 4649 */ 4650 static inline void dp_runtime_deinit(void) 4651 { 4652 hif_rtpm_deregister(HIF_RTPM_ID_DP); 4653 hif_rtpm_deregister(HIF_RTPM_ID_DP_RING_STATS); 4654 } 4655 4656 /** 4657 * dp_runtime_pm_mark_last_busy() - Mark last busy when rx path in use 4658 * @soc: Datapath soc handle 4659 * 4660 * Return: None 4661 */ 4662 static inline void dp_runtime_pm_mark_last_busy(struct dp_soc *soc) 4663 { 4664 soc->rx_last_busy = qdf_get_log_timestamp_usecs(); 4665 4666 hif_rtpm_mark_last_busy(HIF_RTPM_ID_DP); 4667 } 4668 #else 4669 static inline int32_t dp_runtime_get(struct dp_soc *soc) 4670 { 4671 return 0; 4672 } 4673 4674 static inline int32_t dp_runtime_put(struct dp_soc *soc) 4675 { 4676 return 0; 4677 } 4678 4679 static inline QDF_STATUS dp_runtime_init(struct dp_soc *soc) 4680 { 4681 return QDF_STATUS_SUCCESS; 4682 } 4683 4684 static inline void dp_runtime_deinit(void) 4685 { 4686 } 4687 4688 static inline void dp_runtime_pm_mark_last_busy(struct dp_soc *soc) 4689 { 4690 } 4691 #endif 4692 4693 static inline enum QDF_GLOBAL_MODE dp_soc_get_con_mode(struct dp_soc *soc) 4694 { 4695 if (soc->cdp_soc.ol_ops->get_con_mode) 4696 return soc->cdp_soc.ol_ops->get_con_mode(); 4697 4698 return QDF_GLOBAL_MAX_MODE; 4699 } 4700 4701 /** 4702 * dp_pdev_bkp_stats_detach() - detach resources for back pressure stats 4703 * processing 4704 * @pdev: Datapath PDEV handle 4705 * 4706 */ 4707 void dp_pdev_bkp_stats_detach(struct dp_pdev *pdev); 4708 4709 /** 4710 * dp_pdev_bkp_stats_attach() - attach resources for back pressure stats 4711 * processing 4712 * @pdev: Datapath PDEV handle 4713 * 4714 * Return: QDF_STATUS_SUCCESS: Success 4715 * QDF_STATUS_E_NOMEM: Error 4716 */ 4717 4718 QDF_STATUS dp_pdev_bkp_stats_attach(struct dp_pdev *pdev); 4719 4720 /** 4721 * dp_peer_flush_frags() - Flush all fragments for a particular 4722 * peer 4723 * @soc_hdl: data path soc handle 4724 * @vdev_id: vdev id 4725 * @peer_mac: peer mac address 4726 * 4727 * Return: None 4728 */ 4729 void dp_peer_flush_frags(struct cdp_soc_t *soc_hdl, uint8_t vdev_id, 4730 uint8_t *peer_mac); 4731 4732 /** 4733 * dp_soc_reset_mon_intr_mask() - reset mon intr mask 4734 * @soc: pointer to dp_soc handle 4735 * 4736 * Return: 4737 */ 4738 void dp_soc_reset_mon_intr_mask(struct dp_soc *soc); 4739 4740 /** 4741 * dp_txrx_get_soc_stats() - will return cdp_soc_stats 4742 * @soc_hdl: soc handle 4743 * @soc_stats: buffer to hold the values 4744 * 4745 * Return: QDF_STATUS_SUCCESS: Success 4746 * QDF_STATUS_E_FAILURE: Error 4747 */ 4748 QDF_STATUS dp_txrx_get_soc_stats(struct cdp_soc_t *soc_hdl, 4749 struct cdp_soc_stats *soc_stats); 4750 4751 /** 4752 * dp_txrx_get_peer_delay_stats() - to get peer delay stats per TIDs 4753 * @soc_hdl: soc handle 4754 * @vdev_id: id of vdev handle 4755 * @peer_mac: mac of DP_PEER handle 4756 * @delay_stats: pointer to delay stats array 4757 * 4758 * Return: QDF_STATUS_SUCCESS: Success 4759 * QDF_STATUS_E_FAILURE: Error 4760 */ 4761 QDF_STATUS 4762 dp_txrx_get_peer_delay_stats(struct cdp_soc_t *soc_hdl, uint8_t vdev_id, 4763 uint8_t *peer_mac, 4764 struct cdp_delay_tid_stats *delay_stats); 4765 4766 /** 4767 * dp_txrx_get_peer_jitter_stats() - to get peer jitter stats per TIDs 4768 * @soc_hdl: soc handle 4769 * @pdev_id: id of pdev handle 4770 * @vdev_id: id of vdev handle 4771 * @peer_mac: mac of DP_PEER handle 4772 * @tid_stats: pointer to jitter stats array 4773 * 4774 * Return: QDF_STATUS_SUCCESS: Success 4775 * QDF_STATUS_E_FAILURE: Error 4776 */ 4777 QDF_STATUS 4778 dp_txrx_get_peer_jitter_stats(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, 4779 uint8_t vdev_id, uint8_t *peer_mac, 4780 struct cdp_peer_tid_stats *tid_stats); 4781 4782 /** 4783 * dp_peer_get_tx_capture_stats() - to get peer Tx Capture stats 4784 * @soc_hdl: soc handle 4785 * @vdev_id: id of vdev handle 4786 * @peer_mac: mac of DP_PEER handle 4787 * @stats: pointer to peer tx capture stats 4788 * 4789 * Return: QDF_STATUS_SUCCESS: Success 4790 * QDF_STATUS_E_FAILURE: Error 4791 */ 4792 QDF_STATUS 4793 dp_peer_get_tx_capture_stats(struct cdp_soc_t *soc_hdl, 4794 uint8_t vdev_id, uint8_t *peer_mac, 4795 struct cdp_peer_tx_capture_stats *stats); 4796 4797 /** 4798 * dp_pdev_get_tx_capture_stats() - to get pdev Tx Capture stats 4799 * @soc_hdl: soc handle 4800 * @pdev_id: id of pdev handle 4801 * @stats: pointer to pdev tx capture stats 4802 * 4803 * Return: QDF_STATUS_SUCCESS: Success 4804 * QDF_STATUS_E_FAILURE: Error 4805 */ 4806 QDF_STATUS 4807 dp_pdev_get_tx_capture_stats(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, 4808 struct cdp_pdev_tx_capture_stats *stats); 4809 4810 #ifdef HW_TX_DELAY_STATS_ENABLE 4811 /** 4812 * dp_is_vdev_tx_delay_stats_enabled(): Check if tx delay stats 4813 * is enabled for vdev 4814 * @vdev: dp vdev 4815 * 4816 * Return: true if tx delay stats is enabled for vdev else false 4817 */ 4818 static inline uint8_t dp_is_vdev_tx_delay_stats_enabled(struct dp_vdev *vdev) 4819 { 4820 return vdev->hw_tx_delay_stats_enabled; 4821 } 4822 4823 /** 4824 * dp_pdev_print_tx_delay_stats(): Print vdev tx delay stats 4825 * for pdev 4826 * @soc: dp soc 4827 * 4828 * Return: None 4829 */ 4830 void dp_pdev_print_tx_delay_stats(struct dp_soc *soc); 4831 4832 /** 4833 * dp_pdev_clear_tx_delay_stats() - clear tx delay stats 4834 * @soc: soc handle 4835 * 4836 * Return: None 4837 */ 4838 void dp_pdev_clear_tx_delay_stats(struct dp_soc *soc); 4839 #else 4840 static inline uint8_t dp_is_vdev_tx_delay_stats_enabled(struct dp_vdev *vdev) 4841 { 4842 return 0; 4843 } 4844 4845 static inline void dp_pdev_print_tx_delay_stats(struct dp_soc *soc) 4846 { 4847 } 4848 4849 static inline void dp_pdev_clear_tx_delay_stats(struct dp_soc *soc) 4850 { 4851 } 4852 #endif 4853 4854 static inline void 4855 dp_get_rx_hash_key_bytes(struct cdp_lro_hash_config *lro_hash) 4856 { 4857 qdf_get_random_bytes(lro_hash->toeplitz_hash_ipv4, 4858 (sizeof(lro_hash->toeplitz_hash_ipv4[0]) * 4859 LRO_IPV4_SEED_ARR_SZ)); 4860 qdf_get_random_bytes(lro_hash->toeplitz_hash_ipv6, 4861 (sizeof(lro_hash->toeplitz_hash_ipv6[0]) * 4862 LRO_IPV6_SEED_ARR_SZ)); 4863 } 4864 4865 #ifdef WLAN_CONFIG_TELEMETRY_AGENT 4866 /** 4867 * dp_get_pdev_telemetry_stats- API to get pdev telemetry stats 4868 * @soc_hdl: soc handle 4869 * @pdev_id: id of pdev handle 4870 * @stats: pointer to pdev telemetry stats 4871 * 4872 * Return: QDF_STATUS_SUCCESS: Success 4873 * QDF_STATUS_E_FAILURE: Error 4874 */ 4875 QDF_STATUS 4876 dp_get_pdev_telemetry_stats(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, 4877 struct cdp_pdev_telemetry_stats *stats); 4878 4879 /** 4880 * dp_get_peer_telemetry_stats() - API to get peer telemetry stats 4881 * @soc_hdl: soc handle 4882 * @addr: peer mac 4883 * @stats: pointer to peer telemetry stats 4884 * 4885 * Return: QDF_STATUS_SUCCESS: Success 4886 * QDF_STATUS_E_FAILURE: Error 4887 */ 4888 QDF_STATUS 4889 dp_get_peer_telemetry_stats(struct cdp_soc_t *soc_hdl, uint8_t *addr, 4890 struct cdp_peer_telemetry_stats *stats); 4891 4892 /** 4893 * dp_get_peer_deter_stats() - API to get peer deterministic stats 4894 * @soc_hdl: soc handle 4895 * @vdev_id: id of vdev handle 4896 * @addr: peer mac 4897 * @stats: pointer to peer deterministic stats 4898 * 4899 * Return: QDF_STATUS_SUCCESS: Success 4900 * QDF_STATUS_E_FAILURE: Error 4901 */ 4902 QDF_STATUS 4903 dp_get_peer_deter_stats(struct cdp_soc_t *soc_hdl, 4904 uint8_t vdev_id, 4905 uint8_t *addr, 4906 struct cdp_peer_deter_stats *stats); 4907 4908 /** 4909 * dp_get_pdev_deter_stats() - API to get pdev deterministic stats 4910 * @soc_hdl: soc handle 4911 * @pdev_id: id of pdev handle 4912 * @stats: pointer to pdev deterministic stats 4913 * 4914 * Return: QDF_STATUS_SUCCESS: Success 4915 * QDF_STATUS_E_FAILURE: Error 4916 */ 4917 QDF_STATUS 4918 dp_get_pdev_deter_stats(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, 4919 struct cdp_pdev_deter_stats *stats); 4920 4921 /** 4922 * dp_update_pdev_chan_util_stats() - API to update channel utilization stats 4923 * @soc_hdl: soc handle 4924 * @pdev_id: id of pdev handle 4925 * @ch_util: Pointer to channel util stats 4926 * 4927 * Return: QDF_STATUS_SUCCESS: Success 4928 * QDF_STATUS_E_FAILURE: Error 4929 */ 4930 QDF_STATUS 4931 dp_update_pdev_chan_util_stats(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, 4932 struct cdp_pdev_chan_util_stats *ch_util); 4933 #endif /* WLAN_CONFIG_TELEMETRY_AGENT */ 4934 4935 #ifdef CONNECTIVITY_PKTLOG 4936 /** 4937 * dp_tx_send_pktlog() - send tx packet log 4938 * @soc: soc handle 4939 * @pdev: pdev handle 4940 * @tx_desc: TX software descriptor 4941 * @nbuf: nbuf 4942 * @status: status of tx packet 4943 * 4944 * This function is used to send tx packet for logging 4945 * 4946 * Return: None 4947 * 4948 */ 4949 static inline 4950 void dp_tx_send_pktlog(struct dp_soc *soc, struct dp_pdev *pdev, 4951 struct dp_tx_desc_s *tx_desc, 4952 qdf_nbuf_t nbuf, enum qdf_dp_tx_rx_status status) 4953 { 4954 ol_txrx_pktdump_cb packetdump_cb = pdev->dp_tx_packetdump_cb; 4955 4956 if (qdf_unlikely(packetdump_cb) && 4957 dp_tx_frm_std == tx_desc->frm_type) { 4958 packetdump_cb((ol_txrx_soc_handle)soc, pdev->pdev_id, 4959 tx_desc->vdev_id, nbuf, status, QDF_TX_DATA_PKT); 4960 } 4961 } 4962 4963 /** 4964 * dp_rx_send_pktlog() - send rx packet log 4965 * @soc: soc handle 4966 * @pdev: pdev handle 4967 * @nbuf: nbuf 4968 * @status: status of rx packet 4969 * 4970 * This function is used to send rx packet for logging 4971 * 4972 * Return: None 4973 * 4974 */ 4975 static inline 4976 void dp_rx_send_pktlog(struct dp_soc *soc, struct dp_pdev *pdev, 4977 qdf_nbuf_t nbuf, enum qdf_dp_tx_rx_status status) 4978 { 4979 ol_txrx_pktdump_cb packetdump_cb = pdev->dp_rx_packetdump_cb; 4980 4981 if (qdf_unlikely(packetdump_cb)) { 4982 packetdump_cb((ol_txrx_soc_handle)soc, pdev->pdev_id, 4983 QDF_NBUF_CB_RX_VDEV_ID(nbuf), 4984 nbuf, status, QDF_RX_DATA_PKT); 4985 } 4986 } 4987 4988 /** 4989 * dp_rx_err_send_pktlog() - send rx error packet log 4990 * @soc: soc handle 4991 * @pdev: pdev handle 4992 * @mpdu_desc_info: MPDU descriptor info 4993 * @nbuf: nbuf 4994 * @status: status of rx packet 4995 * @set_pktlen: weither to set packet length 4996 * 4997 * This API should only be called when we have not removed 4998 * Rx TLV from head, and head is pointing to rx_tlv 4999 * 5000 * This function is used to send rx packet from error path 5001 * for logging for which rx packet tlv is not removed. 5002 * 5003 * Return: None 5004 * 5005 */ 5006 static inline 5007 void dp_rx_err_send_pktlog(struct dp_soc *soc, struct dp_pdev *pdev, 5008 struct hal_rx_mpdu_desc_info *mpdu_desc_info, 5009 qdf_nbuf_t nbuf, enum qdf_dp_tx_rx_status status, 5010 bool set_pktlen) 5011 { 5012 ol_txrx_pktdump_cb packetdump_cb = pdev->dp_rx_packetdump_cb; 5013 qdf_size_t skip_size; 5014 uint16_t msdu_len, nbuf_len; 5015 uint8_t *rx_tlv_hdr; 5016 struct hal_rx_msdu_metadata msdu_metadata; 5017 5018 if (qdf_unlikely(packetdump_cb)) { 5019 rx_tlv_hdr = qdf_nbuf_data(nbuf); 5020 nbuf_len = hal_rx_msdu_start_msdu_len_get(soc->hal_soc, 5021 rx_tlv_hdr); 5022 hal_rx_msdu_metadata_get(soc->hal_soc, rx_tlv_hdr, 5023 &msdu_metadata); 5024 5025 if (mpdu_desc_info->bar_frame || 5026 (mpdu_desc_info->mpdu_flags & HAL_MPDU_F_FRAGMENT)) 5027 skip_size = soc->rx_pkt_tlv_size; 5028 else 5029 skip_size = soc->rx_pkt_tlv_size + 5030 msdu_metadata.l3_hdr_pad; 5031 5032 if (set_pktlen) { 5033 msdu_len = nbuf_len + skip_size; 5034 qdf_nbuf_set_pktlen(nbuf, qdf_min(msdu_len, 5035 (uint16_t)RX_DATA_BUFFER_SIZE)); 5036 } 5037 5038 qdf_nbuf_pull_head(nbuf, skip_size); 5039 packetdump_cb((ol_txrx_soc_handle)soc, pdev->pdev_id, 5040 QDF_NBUF_CB_RX_VDEV_ID(nbuf), 5041 nbuf, status, QDF_RX_DATA_PKT); 5042 qdf_nbuf_push_head(nbuf, skip_size); 5043 } 5044 } 5045 5046 #else 5047 static inline 5048 void dp_tx_send_pktlog(struct dp_soc *soc, struct dp_pdev *pdev, 5049 struct dp_tx_desc_s *tx_desc, 5050 qdf_nbuf_t nbuf, enum qdf_dp_tx_rx_status status) 5051 { 5052 } 5053 5054 static inline 5055 void dp_rx_send_pktlog(struct dp_soc *soc, struct dp_pdev *pdev, 5056 qdf_nbuf_t nbuf, enum qdf_dp_tx_rx_status status) 5057 { 5058 } 5059 5060 static inline 5061 void dp_rx_err_send_pktlog(struct dp_soc *soc, struct dp_pdev *pdev, 5062 struct hal_rx_mpdu_desc_info *mpdu_desc_info, 5063 qdf_nbuf_t nbuf, enum qdf_dp_tx_rx_status status, 5064 bool set_pktlen) 5065 { 5066 } 5067 #endif 5068 5069 /** 5070 * dp_pdev_update_fast_rx_flag() - Update Fast rx flag for a PDEV 5071 * @soc : Data path soc handle 5072 * @pdev : PDEV handle 5073 * 5074 * Return: None 5075 */ 5076 void dp_pdev_update_fast_rx_flag(struct dp_soc *soc, struct dp_pdev *pdev); 5077 5078 #ifdef FEATURE_DIRECT_LINK 5079 /** 5080 * dp_setup_direct_link_refill_ring(): Setup direct link refill ring for pdev 5081 * @soc_hdl: DP SOC handle 5082 * @pdev_id: pdev id 5083 * 5084 * Return: Handle to SRNG 5085 */ 5086 struct dp_srng *dp_setup_direct_link_refill_ring(struct cdp_soc_t *soc_hdl, 5087 uint8_t pdev_id); 5088 5089 /** 5090 * dp_destroy_direct_link_refill_ring(): Destroy direct link refill ring for 5091 * pdev 5092 * @soc_hdl: DP SOC handle 5093 * @pdev_id: pdev id 5094 * 5095 * Return: None 5096 */ 5097 void dp_destroy_direct_link_refill_ring(struct cdp_soc_t *soc_hdl, 5098 uint8_t pdev_id); 5099 #else 5100 static inline 5101 struct dp_srng *dp_setup_direct_link_refill_ring(struct cdp_soc_t *soc_hdl, 5102 uint8_t pdev_id) 5103 { 5104 return NULL; 5105 } 5106 5107 static inline 5108 void dp_destroy_direct_link_refill_ring(struct cdp_soc_t *soc_hdl, 5109 uint8_t pdev_id) 5110 { 5111 } 5112 #endif 5113 5114 #ifdef WLAN_FEATURE_DP_CFG_EVENT_HISTORY 5115 static inline 5116 void dp_cfg_event_record(struct dp_soc *soc, 5117 enum dp_cfg_event_type event, 5118 union dp_cfg_event_desc *cfg_event_desc) 5119 { 5120 struct dp_cfg_event_history *cfg_event_history = 5121 &soc->cfg_event_history; 5122 struct dp_cfg_event *entry; 5123 uint32_t idx; 5124 uint16_t slot; 5125 5126 dp_get_frag_hist_next_atomic_idx(&cfg_event_history->index, &idx, 5127 &slot, 5128 DP_CFG_EVT_HIST_SLOT_SHIFT, 5129 DP_CFG_EVT_HIST_PER_SLOT_MAX, 5130 DP_CFG_EVT_HISTORY_SIZE); 5131 5132 entry = &cfg_event_history->entry[slot][idx]; 5133 5134 entry->timestamp = qdf_get_log_timestamp(); 5135 entry->type = event; 5136 qdf_mem_copy(&entry->event_desc, cfg_event_desc, 5137 sizeof(entry->event_desc)); 5138 } 5139 5140 static inline void 5141 dp_cfg_event_record_vdev_evt(struct dp_soc *soc, enum dp_cfg_event_type event, 5142 struct dp_vdev *vdev) 5143 { 5144 union dp_cfg_event_desc cfg_evt_desc = {0}; 5145 struct dp_vdev_attach_detach_desc *vdev_evt = 5146 &cfg_evt_desc.vdev_evt; 5147 5148 if (qdf_unlikely(event != DP_CFG_EVENT_VDEV_ATTACH && 5149 event != DP_CFG_EVENT_VDEV_UNREF_DEL && 5150 event != DP_CFG_EVENT_VDEV_DETACH)) { 5151 qdf_assert_always(0); 5152 return; 5153 } 5154 5155 vdev_evt->vdev = vdev; 5156 vdev_evt->vdev_id = vdev->vdev_id; 5157 vdev_evt->ref_count = qdf_atomic_read(&vdev->ref_cnt); 5158 vdev_evt->mac_addr = vdev->mac_addr; 5159 5160 dp_cfg_event_record(soc, event, &cfg_evt_desc); 5161 } 5162 5163 static inline void 5164 dp_cfg_event_record_peer_evt(struct dp_soc *soc, enum dp_cfg_event_type event, 5165 struct dp_peer *peer, struct dp_vdev *vdev, 5166 uint8_t is_reuse) 5167 { 5168 union dp_cfg_event_desc cfg_evt_desc = {0}; 5169 struct dp_peer_cmn_ops_desc *peer_evt = &cfg_evt_desc.peer_cmn_evt; 5170 5171 if (qdf_unlikely(event != DP_CFG_EVENT_PEER_CREATE && 5172 event != DP_CFG_EVENT_PEER_DELETE && 5173 event != DP_CFG_EVENT_PEER_UNREF_DEL)) { 5174 qdf_assert_always(0); 5175 return; 5176 } 5177 5178 peer_evt->peer = peer; 5179 peer_evt->vdev = vdev; 5180 peer_evt->vdev_id = vdev->vdev_id; 5181 peer_evt->is_reuse = is_reuse; 5182 peer_evt->peer_ref_count = qdf_atomic_read(&peer->ref_cnt); 5183 peer_evt->vdev_ref_count = qdf_atomic_read(&vdev->ref_cnt); 5184 peer_evt->mac_addr = peer->mac_addr; 5185 peer_evt->vdev_mac_addr = vdev->mac_addr; 5186 5187 dp_cfg_event_record(soc, event, &cfg_evt_desc); 5188 } 5189 5190 static inline void 5191 dp_cfg_event_record_mlo_link_delink_evt(struct dp_soc *soc, 5192 enum dp_cfg_event_type event, 5193 struct dp_peer *mld_peer, 5194 struct dp_peer *link_peer, 5195 uint8_t idx, uint8_t result) 5196 { 5197 union dp_cfg_event_desc cfg_evt_desc = {0}; 5198 struct dp_mlo_add_del_link_desc *mlo_link_delink_evt = 5199 &cfg_evt_desc.mlo_link_delink_evt; 5200 5201 if (qdf_unlikely(event != DP_CFG_EVENT_MLO_ADD_LINK && 5202 event != DP_CFG_EVENT_MLO_DEL_LINK)) { 5203 qdf_assert_always(0); 5204 return; 5205 } 5206 5207 mlo_link_delink_evt->link_peer = link_peer; 5208 mlo_link_delink_evt->mld_peer = mld_peer; 5209 mlo_link_delink_evt->link_mac_addr = link_peer->mac_addr; 5210 mlo_link_delink_evt->mld_mac_addr = mld_peer->mac_addr; 5211 mlo_link_delink_evt->num_links = mld_peer->num_links; 5212 mlo_link_delink_evt->action_result = result; 5213 mlo_link_delink_evt->idx = idx; 5214 5215 dp_cfg_event_record(soc, event, &cfg_evt_desc); 5216 } 5217 5218 static inline void 5219 dp_cfg_event_record_mlo_setup_vdev_update_evt(struct dp_soc *soc, 5220 struct dp_peer *mld_peer, 5221 struct dp_vdev *prev_vdev, 5222 struct dp_vdev *new_vdev) 5223 { 5224 union dp_cfg_event_desc cfg_evt_desc = {0}; 5225 struct dp_mlo_setup_vdev_update_desc *vdev_update_evt = 5226 &cfg_evt_desc.mlo_setup_vdev_update; 5227 5228 vdev_update_evt->mld_peer = mld_peer; 5229 vdev_update_evt->prev_vdev = prev_vdev; 5230 vdev_update_evt->new_vdev = new_vdev; 5231 5232 dp_cfg_event_record(soc, DP_CFG_EVENT_MLO_SETUP_VDEV_UPDATE, 5233 &cfg_evt_desc); 5234 } 5235 5236 static inline void 5237 dp_cfg_event_record_peer_map_unmap_evt(struct dp_soc *soc, 5238 enum dp_cfg_event_type event, 5239 struct dp_peer *peer, 5240 uint8_t *mac_addr, 5241 uint8_t is_ml_peer, 5242 uint16_t peer_id, uint16_t ml_peer_id, 5243 uint16_t hw_peer_id, uint8_t vdev_id) 5244 { 5245 union dp_cfg_event_desc cfg_evt_desc = {0}; 5246 struct dp_rx_peer_map_unmap_desc *peer_map_unmap_evt = 5247 &cfg_evt_desc.peer_map_unmap_evt; 5248 5249 if (qdf_unlikely(event != DP_CFG_EVENT_PEER_MAP && 5250 event != DP_CFG_EVENT_PEER_UNMAP && 5251 event != DP_CFG_EVENT_MLO_PEER_MAP && 5252 event != DP_CFG_EVENT_MLO_PEER_UNMAP)) { 5253 qdf_assert_always(0); 5254 return; 5255 } 5256 5257 peer_map_unmap_evt->peer_id = peer_id; 5258 peer_map_unmap_evt->ml_peer_id = ml_peer_id; 5259 peer_map_unmap_evt->hw_peer_id = hw_peer_id; 5260 peer_map_unmap_evt->vdev_id = vdev_id; 5261 /* Peer may be NULL at times, but its not an issue. */ 5262 peer_map_unmap_evt->peer = peer; 5263 peer_map_unmap_evt->is_ml_peer = is_ml_peer; 5264 qdf_mem_copy(&peer_map_unmap_evt->mac_addr.raw, mac_addr, 5265 QDF_MAC_ADDR_SIZE); 5266 5267 dp_cfg_event_record(soc, event, &cfg_evt_desc); 5268 } 5269 5270 static inline void 5271 dp_cfg_event_record_peer_setup_evt(struct dp_soc *soc, 5272 enum dp_cfg_event_type event, 5273 struct dp_peer *peer, 5274 struct dp_vdev *vdev, 5275 uint8_t vdev_id, 5276 struct cdp_peer_setup_info *peer_setup_info) 5277 { 5278 union dp_cfg_event_desc cfg_evt_desc = {0}; 5279 struct dp_peer_setup_desc *peer_setup_evt = 5280 &cfg_evt_desc.peer_setup_evt; 5281 5282 if (qdf_unlikely(event != DP_CFG_EVENT_PEER_SETUP && 5283 event != DP_CFG_EVENT_MLO_SETUP)) { 5284 qdf_assert_always(0); 5285 return; 5286 } 5287 5288 peer_setup_evt->peer = peer; 5289 peer_setup_evt->vdev = vdev; 5290 if (vdev) 5291 peer_setup_evt->vdev_ref_count = qdf_atomic_read(&vdev->ref_cnt); 5292 peer_setup_evt->mac_addr = peer->mac_addr; 5293 peer_setup_evt->vdev_id = vdev_id; 5294 if (peer_setup_info) { 5295 peer_setup_evt->is_first_link = peer_setup_info->is_first_link; 5296 peer_setup_evt->is_primary_link = peer_setup_info->is_primary_link; 5297 qdf_mem_copy(peer_setup_evt->mld_mac_addr.raw, 5298 peer_setup_info->mld_peer_mac, 5299 QDF_MAC_ADDR_SIZE); 5300 } 5301 5302 dp_cfg_event_record(soc, event, &cfg_evt_desc); 5303 } 5304 #else 5305 5306 static inline void 5307 dp_cfg_event_record_vdev_evt(struct dp_soc *soc, enum dp_cfg_event_type event, 5308 struct dp_vdev *vdev) 5309 { 5310 } 5311 5312 static inline void 5313 dp_cfg_event_record_peer_evt(struct dp_soc *soc, enum dp_cfg_event_type event, 5314 struct dp_peer *peer, struct dp_vdev *vdev, 5315 uint8_t is_reuse) 5316 { 5317 } 5318 5319 static inline void 5320 dp_cfg_event_record_mlo_link_delink_evt(struct dp_soc *soc, 5321 enum dp_cfg_event_type event, 5322 struct dp_peer *mld_peer, 5323 struct dp_peer *link_peer, 5324 uint8_t idx, uint8_t result) 5325 { 5326 } 5327 5328 static inline void 5329 dp_cfg_event_record_mlo_setup_vdev_update_evt(struct dp_soc *soc, 5330 struct dp_peer *mld_peer, 5331 struct dp_vdev *prev_vdev, 5332 struct dp_vdev *new_vdev) 5333 { 5334 } 5335 5336 static inline void 5337 dp_cfg_event_record_peer_map_unmap_evt(struct dp_soc *soc, 5338 enum dp_cfg_event_type event, 5339 struct dp_peer *peer, 5340 uint8_t *mac_addr, 5341 uint8_t is_ml_peer, 5342 uint16_t peer_id, uint16_t ml_peer_id, 5343 uint16_t hw_peer_id, uint8_t vdev_id) 5344 { 5345 } 5346 5347 static inline void 5348 dp_cfg_event_record_peer_setup_evt(struct dp_soc *soc, 5349 enum dp_cfg_event_type event, 5350 struct dp_peer *peer, 5351 struct dp_vdev *vdev, 5352 uint8_t vdev_id, 5353 struct cdp_peer_setup_info *peer_setup_info) 5354 { 5355 } 5356 #endif 5357 5358 #ifndef WLAN_SOFTUMAC_SUPPORT 5359 /** 5360 * dp_soc_interrupt_detach() - Deregister any allocations done for interrupts 5361 * @txrx_soc: DP SOC handle 5362 * 5363 * Return: none 5364 */ 5365 void dp_soc_interrupt_detach(struct cdp_soc_t *txrx_soc); 5366 #endif 5367 5368 /** 5369 * dp_get_peer_stats()- Get peer stats 5370 * @peer: Datapath peer 5371 * @peer_stats: buffer for peer stats 5372 * 5373 * Return: none 5374 */ 5375 void dp_get_peer_stats(struct dp_peer *peer, 5376 struct cdp_peer_stats *peer_stats); 5377 5378 /** 5379 * dp_get_per_link_peer_stats()- Get per link peer stats 5380 * @peer: Datapath peer 5381 * @peer_stats: buffer for peer stats 5382 * @peer_type: Peer type 5383 * @num_link: Number of ML links 5384 * 5385 * Return: status success/failure 5386 */ 5387 QDF_STATUS dp_get_per_link_peer_stats(struct dp_peer *peer, 5388 struct cdp_peer_stats *peer_stats, 5389 enum cdp_peer_type peer_type, 5390 uint8_t num_link); 5391 /** 5392 * dp_get_peer_hw_link_id() - get peer hardware link id 5393 * @soc: soc handle 5394 * @pdev: data path pdev 5395 * 5396 * Return: link_id 5397 */ 5398 static inline int 5399 dp_get_peer_hw_link_id(struct dp_soc *soc, 5400 struct dp_pdev *pdev) 5401 { 5402 if (wlan_cfg_is_peer_link_stats_enabled(soc->wlan_cfg_ctx)) 5403 return ((soc->arch_ops.get_hw_link_id(pdev)) + 1); 5404 5405 return 0; 5406 } 5407 5408 #ifdef QCA_MULTIPASS_SUPPORT 5409 /** 5410 * dp_tx_remove_vlan_tag() - Remove 4 bytes of vlan tag 5411 * @vdev: DP vdev handle 5412 * @nbuf: network buffer 5413 * 5414 * Return: void 5415 */ 5416 void dp_tx_remove_vlan_tag(struct dp_vdev *vdev, qdf_nbuf_t nbuf); 5417 #endif 5418 5419 /** 5420 * dp_print_per_link_stats() - Print per link peer stats. 5421 * @soc_hdl: soc handle. 5422 * @vdev_id: vdev_id. 5423 * 5424 * Return: None. 5425 */ 5426 void dp_print_per_link_stats(struct cdp_soc_t *soc_hdl, uint8_t vdev_id); 5427 #endif /* #ifndef _DP_INTERNAL_H_ */ 5428