1 /* 2 * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved. 3 * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. 4 * 5 * Permission to use, copy, modify, and/or distribute this software for 6 * any purpose with or without fee is hereby granted, provided that the 7 * above copyright notice and this permission notice appear in all 8 * copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 11 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 12 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 13 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 14 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 15 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 16 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 17 * PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20 #ifndef _DP_INTERNAL_H_ 21 #define _DP_INTERNAL_H_ 22 23 #include "dp_types.h" 24 #include "dp_htt.h" 25 26 #define RX_BUFFER_SIZE_PKTLOG_LITE 1024 27 28 #define DP_PEER_WDS_COUNT_INVALID UINT_MAX 29 30 #define DP_BLOCKMEM_SIZE 4096 31 32 /* Alignment for consistent memory for DP rings*/ 33 #define DP_RING_BASE_ALIGN 32 34 35 #define DP_RSSI_INVAL 0x80 36 #define DP_RSSI_AVG_WEIGHT 2 37 /* 38 * Formula to derive avg_rssi is taken from wifi2.o firmware 39 */ 40 #define DP_GET_AVG_RSSI(avg_rssi, last_rssi) \ 41 (((avg_rssi) - (((uint8_t)(avg_rssi)) >> DP_RSSI_AVG_WEIGHT)) \ 42 + ((((uint8_t)(last_rssi)) >> DP_RSSI_AVG_WEIGHT))) 43 44 /* Macro For NYSM value received in VHT TLV */ 45 #define VHT_SGI_NYSM 3 46 47 #define INVALID_WBM_RING_NUM 0xF 48 49 /* struct htt_dbgfs_cfg - structure to maintain required htt data 50 * @msg_word: htt msg sent to upper layer 51 * @m: qdf debugfs file pointer 52 */ 53 struct htt_dbgfs_cfg { 54 uint32_t *msg_word; 55 qdf_debugfs_file_t m; 56 }; 57 58 /* Cookie MSB bits assigned for different use case. 59 * Note: User can't use last 3 bits, as it is reserved for pdev_id. 60 * If in future number of pdev are more than 3. 61 */ 62 /* Reserve for default case */ 63 #define DBG_STATS_COOKIE_DEFAULT 0x0 64 65 /* Reserve for DP Stats: 3rd bit */ 66 #define DBG_STATS_COOKIE_DP_STATS BIT(3) 67 68 /* Reserve for HTT Stats debugfs support: 4th bit */ 69 #define DBG_STATS_COOKIE_HTT_DBGFS BIT(4) 70 71 /*Reserve for HTT Stats debugfs support: 5th bit */ 72 #define DBG_SYSFS_STATS_COOKIE BIT(5) 73 74 /* Reserve for HTT Stats OBSS PD support: 6th bit */ 75 #define DBG_STATS_COOKIE_HTT_OBSS BIT(6) 76 77 /** 78 * Bitmap of HTT PPDU TLV types for Default mode 79 */ 80 #define HTT_PPDU_DEFAULT_TLV_BITMAP \ 81 (1 << HTT_PPDU_STATS_COMMON_TLV) | \ 82 (1 << HTT_PPDU_STATS_USR_COMMON_TLV) | \ 83 (1 << HTT_PPDU_STATS_USR_RATE_TLV) | \ 84 (1 << HTT_PPDU_STATS_SCH_CMD_STATUS_TLV) | \ 85 (1 << HTT_PPDU_STATS_USR_COMPLTN_COMMON_TLV) | \ 86 (1 << HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV) 87 88 /* PPDU STATS CFG */ 89 #define DP_PPDU_STATS_CFG_ALL 0xFFFF 90 91 /* PPDU stats mask sent to FW to enable enhanced stats */ 92 #define DP_PPDU_STATS_CFG_ENH_STATS \ 93 (HTT_PPDU_DEFAULT_TLV_BITMAP) | \ 94 (1 << HTT_PPDU_STATS_USR_COMPLTN_FLUSH_TLV) | \ 95 (1 << HTT_PPDU_STATS_USR_COMMON_ARRAY_TLV) | \ 96 (1 << HTT_PPDU_STATS_USERS_INFO_TLV) 97 98 /* PPDU stats mask sent to FW to support debug sniffer feature */ 99 #define DP_PPDU_STATS_CFG_SNIFFER \ 100 (HTT_PPDU_DEFAULT_TLV_BITMAP) | \ 101 (1 << HTT_PPDU_STATS_USR_MPDU_ENQ_BITMAP_64_TLV) | \ 102 (1 << HTT_PPDU_STATS_USR_MPDU_ENQ_BITMAP_256_TLV) | \ 103 (1 << HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_64_TLV) | \ 104 (1 << HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_256_TLV) | \ 105 (1 << HTT_PPDU_STATS_USR_COMPLTN_FLUSH_TLV) | \ 106 (1 << HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_256_TLV) | \ 107 (1 << HTT_PPDU_STATS_USR_COMPLTN_FLUSH_TLV) | \ 108 (1 << HTT_PPDU_STATS_USR_COMMON_ARRAY_TLV) | \ 109 (1 << HTT_PPDU_STATS_TX_MGMTCTRL_PAYLOAD_TLV) | \ 110 (1 << HTT_PPDU_STATS_USERS_INFO_TLV) 111 112 /* PPDU stats mask sent to FW to support BPR feature*/ 113 #define DP_PPDU_STATS_CFG_BPR \ 114 (1 << HTT_PPDU_STATS_TX_MGMTCTRL_PAYLOAD_TLV) | \ 115 (1 << HTT_PPDU_STATS_USERS_INFO_TLV) 116 117 /* PPDU stats mask sent to FW to support BPR and enhanced stats feature */ 118 #define DP_PPDU_STATS_CFG_BPR_ENH (DP_PPDU_STATS_CFG_BPR | \ 119 DP_PPDU_STATS_CFG_ENH_STATS) 120 /* PPDU stats mask sent to FW to support BPR and pcktlog stats feature */ 121 #define DP_PPDU_STATS_CFG_BPR_PKTLOG (DP_PPDU_STATS_CFG_BPR | \ 122 DP_PPDU_TXLITE_STATS_BITMASK_CFG) 123 124 /** 125 * Bitmap of HTT PPDU delayed ba TLV types for Default mode 126 */ 127 #define HTT_PPDU_DELAYED_BA_TLV_BITMAP \ 128 (1 << HTT_PPDU_STATS_COMMON_TLV) | \ 129 (1 << HTT_PPDU_STATS_USR_COMMON_TLV) | \ 130 (1 << HTT_PPDU_STATS_USR_RATE_TLV) 131 132 /** 133 * Bitmap of HTT PPDU TLV types for Delayed BA 134 */ 135 #define HTT_PPDU_STATUS_TLV_BITMAP \ 136 (1 << HTT_PPDU_STATS_COMMON_TLV) | \ 137 (1 << HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV) 138 139 /** 140 * Bitmap of HTT PPDU TLV types for Sniffer mode bitmap 64 141 */ 142 #define HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP_64 \ 143 ((1 << HTT_PPDU_STATS_COMMON_TLV) | \ 144 (1 << HTT_PPDU_STATS_USR_COMMON_TLV) | \ 145 (1 << HTT_PPDU_STATS_USR_RATE_TLV) | \ 146 (1 << HTT_PPDU_STATS_SCH_CMD_STATUS_TLV) | \ 147 (1 << HTT_PPDU_STATS_USR_COMPLTN_COMMON_TLV) | \ 148 (1 << HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV) | \ 149 (1 << HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_64_TLV) | \ 150 (1 << HTT_PPDU_STATS_USR_MPDU_ENQ_BITMAP_64_TLV)) 151 152 /** 153 * Bitmap of HTT PPDU TLV types for Sniffer mode bitmap 256 154 */ 155 #define HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP_256 \ 156 ((1 << HTT_PPDU_STATS_COMMON_TLV) | \ 157 (1 << HTT_PPDU_STATS_USR_COMMON_TLV) | \ 158 (1 << HTT_PPDU_STATS_USR_RATE_TLV) | \ 159 (1 << HTT_PPDU_STATS_SCH_CMD_STATUS_TLV) | \ 160 (1 << HTT_PPDU_STATS_USR_COMPLTN_COMMON_TLV) | \ 161 (1 << HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV) | \ 162 (1 << HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_256_TLV) | \ 163 (1 << HTT_PPDU_STATS_USR_MPDU_ENQ_BITMAP_256_TLV)) 164 165 static const enum cdp_packet_type hal_2_dp_pkt_type_map[HAL_DOT11_MAX] = { 166 [HAL_DOT11A] = DOT11_A, 167 [HAL_DOT11B] = DOT11_B, 168 [HAL_DOT11N_MM] = DOT11_N, 169 [HAL_DOT11AC] = DOT11_AC, 170 [HAL_DOT11AX] = DOT11_AX, 171 [HAL_DOT11BA] = DOT11_MAX, 172 #ifdef WLAN_FEATURE_11BE 173 [HAL_DOT11BE] = DOT11_BE, 174 #else 175 [HAL_DOT11BE] = DOT11_MAX, 176 #endif 177 [HAL_DOT11AZ] = DOT11_MAX, 178 [HAL_DOT11N_GF] = DOT11_MAX, 179 }; 180 181 #ifdef WLAN_FEATURE_11BE 182 /** 183 * dp_get_mcs_array_index_by_pkt_type_mcs () - get the destination mcs index 184 in array 185 * @pkt_type: host SW pkt type 186 * @mcs: mcs value for TX/RX rate 187 * 188 * Return: succeeded - valid index in mcs array 189 fail - same value as MCS_MAX 190 */ 191 static inline uint8_t 192 dp_get_mcs_array_index_by_pkt_type_mcs(uint32_t pkt_type, uint32_t mcs) 193 { 194 uint8_t dst_mcs_idx = MCS_INVALID_ARRAY_INDEX; 195 196 switch (pkt_type) { 197 case DOT11_A: 198 dst_mcs_idx = 199 mcs >= MAX_MCS_11A ? (MAX_MCS - 1) : mcs; 200 break; 201 case DOT11_B: 202 dst_mcs_idx = 203 mcs >= MAX_MCS_11B ? (MAX_MCS - 1) : mcs; 204 break; 205 case DOT11_N: 206 dst_mcs_idx = 207 mcs >= MAX_MCS_11N ? (MAX_MCS - 1) : mcs; 208 break; 209 case DOT11_AC: 210 dst_mcs_idx = 211 mcs >= MAX_MCS_11AC ? (MAX_MCS - 1) : mcs; 212 break; 213 case DOT11_AX: 214 dst_mcs_idx = 215 mcs >= MAX_MCS_11AX ? (MAX_MCS - 1) : mcs; 216 break; 217 case DOT11_BE: 218 dst_mcs_idx = 219 mcs >= MAX_MCS_11BE ? (MAX_MCS - 1) : mcs; 220 break; 221 default: 222 break; 223 } 224 225 return dst_mcs_idx; 226 } 227 #else 228 static inline uint8_t 229 dp_get_mcs_array_index_by_pkt_type_mcs(uint32_t pkt_type, uint32_t mcs) 230 { 231 uint8_t dst_mcs_idx = MCS_INVALID_ARRAY_INDEX; 232 233 switch (pkt_type) { 234 case DOT11_A: 235 dst_mcs_idx = 236 mcs >= MAX_MCS_11A ? (MAX_MCS - 1) : mcs; 237 break; 238 case DOT11_B: 239 dst_mcs_idx = 240 mcs >= MAX_MCS_11B ? (MAX_MCS - 1) : mcs; 241 break; 242 case DOT11_N: 243 dst_mcs_idx = 244 mcs >= MAX_MCS_11N ? (MAX_MCS - 1) : mcs; 245 break; 246 case DOT11_AC: 247 dst_mcs_idx = 248 mcs >= MAX_MCS_11AC ? (MAX_MCS - 1) : mcs; 249 break; 250 case DOT11_AX: 251 dst_mcs_idx = 252 mcs >= MAX_MCS_11AX ? (MAX_MCS - 1) : mcs; 253 break; 254 default: 255 break; 256 } 257 258 return dst_mcs_idx; 259 } 260 #endif 261 262 #ifdef WIFI_MONITOR_SUPPORT 263 QDF_STATUS dp_mon_soc_attach(struct dp_soc *soc); 264 QDF_STATUS dp_mon_soc_detach(struct dp_soc *soc); 265 #else 266 static inline 267 QDF_STATUS dp_mon_soc_attach(struct dp_soc *soc) 268 { 269 return QDF_STATUS_SUCCESS; 270 } 271 272 static inline 273 QDF_STATUS dp_mon_soc_detach(struct dp_soc *soc) 274 { 275 return QDF_STATUS_SUCCESS; 276 } 277 #endif 278 279 /* 280 * dp_rx_err_match_dhost() - function to check whether dest-mac is correct 281 * @eh: Ethernet header of incoming packet 282 * @vdev: dp_vdev object of the VAP on which this data packet is received 283 * 284 * Return: 1 if the destination mac is correct, 285 * 0 if this frame is not correctly destined to this VAP/MLD 286 */ 287 int dp_rx_err_match_dhost(qdf_ether_header_t *eh, struct dp_vdev *vdev); 288 289 #ifdef MONITOR_MODULARIZED_ENABLE 290 static inline bool dp_monitor_modularized_enable(void) 291 { 292 return TRUE; 293 } 294 295 static inline QDF_STATUS 296 dp_mon_soc_attach_wrapper(struct dp_soc *soc) { return QDF_STATUS_SUCCESS; } 297 298 static inline QDF_STATUS 299 dp_mon_soc_detach_wrapper(struct dp_soc *soc) { return QDF_STATUS_SUCCESS; } 300 #else 301 static inline bool dp_monitor_modularized_enable(void) 302 { 303 return FALSE; 304 } 305 306 static inline QDF_STATUS dp_mon_soc_attach_wrapper(struct dp_soc *soc) 307 { 308 return dp_mon_soc_attach(soc); 309 } 310 311 static inline QDF_STATUS dp_mon_soc_detach_wrapper(struct dp_soc *soc) 312 { 313 return dp_mon_soc_detach(soc); 314 } 315 #endif 316 317 #ifndef WIFI_MONITOR_SUPPORT 318 #define MON_BUF_MIN_ENTRIES 64 319 320 static inline QDF_STATUS dp_monitor_pdev_attach(struct dp_pdev *pdev) 321 { 322 return QDF_STATUS_SUCCESS; 323 } 324 325 static inline QDF_STATUS dp_monitor_pdev_detach(struct dp_pdev *pdev) 326 { 327 return QDF_STATUS_SUCCESS; 328 } 329 330 static inline QDF_STATUS dp_monitor_vdev_attach(struct dp_vdev *vdev) 331 { 332 return QDF_STATUS_E_FAILURE; 333 } 334 335 static inline QDF_STATUS dp_monitor_vdev_detach(struct dp_vdev *vdev) 336 { 337 return QDF_STATUS_E_FAILURE; 338 } 339 340 static inline QDF_STATUS dp_monitor_peer_attach(struct dp_soc *soc, 341 struct dp_peer *peer) 342 { 343 return QDF_STATUS_SUCCESS; 344 } 345 346 static inline QDF_STATUS dp_monitor_peer_detach(struct dp_soc *soc, 347 struct dp_peer *peer) 348 { 349 return QDF_STATUS_E_FAILURE; 350 } 351 352 static inline struct cdp_peer_rate_stats_ctx* 353 dp_monitor_peer_get_peerstats_ctx(struct dp_soc *soc, struct dp_peer *peer) 354 { 355 return NULL; 356 } 357 358 static inline 359 void dp_monitor_peer_reset_stats(struct dp_soc *soc, struct dp_peer *peer) 360 { 361 } 362 363 static inline 364 void dp_monitor_peer_get_stats(struct dp_soc *soc, struct dp_peer *peer, 365 void *arg, enum cdp_stat_update_type type) 366 { 367 } 368 369 static inline 370 void dp_monitor_invalid_peer_update_pdev_stats(struct dp_soc *soc, 371 struct dp_pdev *pdev) 372 { 373 } 374 375 static inline 376 QDF_STATUS dp_monitor_peer_get_stats_param(struct dp_soc *soc, 377 struct dp_peer *peer, 378 enum cdp_peer_stats_type type, 379 cdp_peer_stats_param_t *buf) 380 { 381 return QDF_STATUS_E_FAILURE; 382 } 383 384 static inline QDF_STATUS dp_monitor_pdev_init(struct dp_pdev *pdev) 385 { 386 return QDF_STATUS_SUCCESS; 387 } 388 389 static inline QDF_STATUS dp_monitor_pdev_deinit(struct dp_pdev *pdev) 390 { 391 return QDF_STATUS_SUCCESS; 392 } 393 394 static inline QDF_STATUS dp_monitor_soc_cfg_init(struct dp_soc *soc) 395 { 396 return QDF_STATUS_SUCCESS; 397 } 398 399 static inline QDF_STATUS dp_monitor_config_debug_sniffer(struct dp_pdev *pdev, 400 int val) 401 { 402 return QDF_STATUS_E_FAILURE; 403 } 404 405 static inline void dp_monitor_flush_rings(struct dp_soc *soc) 406 { 407 } 408 409 static inline QDF_STATUS dp_monitor_htt_srng_setup(struct dp_soc *soc, 410 struct dp_pdev *pdev, 411 int mac_id, 412 int mac_for_pdev) 413 { 414 return QDF_STATUS_SUCCESS; 415 } 416 417 static inline void dp_monitor_service_mon_rings(struct dp_soc *soc, 418 uint32_t quota) 419 { 420 } 421 422 static inline 423 uint32_t dp_monitor_process(struct dp_soc *soc, struct dp_intr *int_ctx, 424 uint32_t mac_id, uint32_t quota) 425 { 426 return 0; 427 } 428 429 static inline 430 uint32_t dp_monitor_drop_packets_for_mac(struct dp_pdev *pdev, 431 uint32_t mac_id, uint32_t quota) 432 { 433 return 0; 434 } 435 436 static inline void dp_monitor_peer_tx_init(struct dp_pdev *pdev, 437 struct dp_peer *peer) 438 { 439 } 440 441 static inline void dp_monitor_peer_tx_cleanup(struct dp_vdev *vdev, 442 struct dp_peer *peer) 443 { 444 } 445 446 static inline 447 void dp_monitor_peer_tid_peer_id_update(struct dp_soc *soc, 448 struct dp_peer *peer, 449 uint16_t peer_id) 450 { 451 } 452 453 static inline void dp_monitor_tx_ppdu_stats_attach(struct dp_pdev *pdev) 454 { 455 } 456 457 static inline void dp_monitor_tx_ppdu_stats_detach(struct dp_pdev *pdev) 458 { 459 } 460 461 static inline 462 QDF_STATUS dp_monitor_tx_capture_debugfs_init(struct dp_pdev *pdev) 463 { 464 return QDF_STATUS_SUCCESS; 465 } 466 467 static inline void dp_monitor_peer_tx_capture_filter_check(struct dp_pdev *pdev, 468 struct dp_peer *peer) 469 { 470 } 471 472 static inline 473 QDF_STATUS dp_monitor_tx_add_to_comp_queue(struct dp_soc *soc, 474 struct dp_tx_desc_s *desc, 475 struct hal_tx_completion_status *ts, 476 uint16_t peer_id) 477 { 478 return QDF_STATUS_E_FAILURE; 479 } 480 481 static inline 482 QDF_STATUS monitor_update_msdu_to_list(struct dp_soc *soc, 483 struct dp_pdev *pdev, 484 struct dp_peer *peer, 485 struct hal_tx_completion_status *ts, 486 qdf_nbuf_t netbuf) 487 { 488 return QDF_STATUS_E_FAILURE; 489 } 490 491 static inline bool dp_monitor_ppdu_stats_ind_handler(struct htt_soc *soc, 492 uint32_t *msg_word, 493 qdf_nbuf_t htt_t2h_msg) 494 { 495 return true; 496 } 497 498 static inline QDF_STATUS dp_monitor_htt_ppdu_stats_attach(struct dp_pdev *pdev) 499 { 500 return QDF_STATUS_SUCCESS; 501 } 502 503 static inline void dp_monitor_htt_ppdu_stats_detach(struct dp_pdev *pdev) 504 { 505 } 506 507 static inline void dp_monitor_print_pdev_rx_mon_stats(struct dp_pdev *pdev) 508 { 509 } 510 511 static inline QDF_STATUS dp_monitor_config_enh_tx_capture(struct dp_pdev *pdev, 512 uint32_t val) 513 { 514 return QDF_STATUS_E_INVAL; 515 } 516 517 static inline QDF_STATUS dp_monitor_tx_peer_filter(struct dp_pdev *pdev, 518 struct dp_peer *peer, 519 uint8_t is_tx_pkt_cap_enable, 520 uint8_t *peer_mac) 521 { 522 return QDF_STATUS_E_INVAL; 523 } 524 525 static inline QDF_STATUS dp_monitor_config_enh_rx_capture(struct dp_pdev *pdev, 526 uint32_t val) 527 { 528 return QDF_STATUS_E_INVAL; 529 } 530 531 static inline 532 QDF_STATUS dp_monitor_set_bpr_enable(struct dp_pdev *pdev, uint32_t val) 533 { 534 return QDF_STATUS_E_FAILURE; 535 } 536 537 static inline 538 int dp_monitor_set_filter_neigh_peers(struct dp_pdev *pdev, bool val) 539 { 540 return 0; 541 } 542 543 static inline 544 void dp_monitor_set_atf_stats_enable(struct dp_pdev *pdev, bool value) 545 { 546 } 547 548 static inline 549 void dp_monitor_set_bsscolor(struct dp_pdev *pdev, uint8_t bsscolor) 550 { 551 } 552 553 static inline 554 bool dp_monitor_pdev_get_filter_mcast_data(struct cdp_pdev *pdev_handle) 555 { 556 return false; 557 } 558 559 static inline 560 bool dp_monitor_pdev_get_filter_non_data(struct cdp_pdev *pdev_handle) 561 { 562 return false; 563 } 564 565 static inline 566 bool dp_monitor_pdev_get_filter_ucast_data(struct cdp_pdev *pdev_handle) 567 { 568 return false; 569 } 570 571 static inline 572 int dp_monitor_set_pktlog_wifi3(struct dp_pdev *pdev, uint32_t event, 573 bool enable) 574 { 575 return 0; 576 } 577 578 static inline void dp_monitor_pktlogmod_exit(struct dp_pdev *pdev) 579 { 580 } 581 582 static inline 583 QDF_STATUS dp_monitor_vdev_set_monitor_mode_buf_rings(struct dp_pdev *pdev) 584 { 585 return QDF_STATUS_E_FAILURE; 586 } 587 588 static inline 589 void dp_monitor_neighbour_peers_detach(struct dp_pdev *pdev) 590 { 591 } 592 593 static inline QDF_STATUS dp_monitor_filter_neighbour_peer(struct dp_pdev *pdev, 594 uint8_t *rx_pkt_hdr) 595 { 596 return QDF_STATUS_E_FAILURE; 597 } 598 599 static inline void dp_monitor_print_pdev_tx_capture_stats(struct dp_pdev *pdev) 600 { 601 } 602 603 static inline 604 void dp_monitor_reap_timer_init(struct dp_soc *soc) 605 { 606 } 607 608 static inline 609 void dp_monitor_reap_timer_deinit(struct dp_soc *soc) 610 { 611 } 612 613 static inline 614 bool dp_monitor_reap_timer_start(struct dp_soc *soc, 615 enum cdp_mon_reap_source source) 616 { 617 return false; 618 } 619 620 static inline 621 bool dp_monitor_reap_timer_stop(struct dp_soc *soc, 622 enum cdp_mon_reap_source source) 623 { 624 return false; 625 } 626 627 static inline void 628 dp_monitor_reap_timer_suspend(struct dp_soc *soc) 629 { 630 } 631 632 static inline 633 void dp_monitor_vdev_timer_init(struct dp_soc *soc) 634 { 635 } 636 637 static inline 638 void dp_monitor_vdev_timer_deinit(struct dp_soc *soc) 639 { 640 } 641 642 static inline 643 void dp_monitor_vdev_timer_start(struct dp_soc *soc) 644 { 645 } 646 647 static inline 648 bool dp_monitor_vdev_timer_stop(struct dp_soc *soc) 649 { 650 return false; 651 } 652 653 static inline struct qdf_mem_multi_page_t* 654 dp_monitor_get_link_desc_pages(struct dp_soc *soc, uint32_t mac_id) 655 { 656 return NULL; 657 } 658 659 static inline uint32_t * 660 dp_monitor_get_total_link_descs(struct dp_soc *soc, uint32_t mac_id) 661 { 662 return NULL; 663 } 664 665 static inline QDF_STATUS dp_monitor_drop_inv_peer_pkts(struct dp_vdev *vdev) 666 { 667 return QDF_STATUS_E_FAILURE; 668 } 669 670 static inline bool dp_is_enable_reap_timer_non_pkt(struct dp_pdev *pdev) 671 { 672 return false; 673 } 674 675 static inline void dp_monitor_vdev_register_osif(struct dp_vdev *vdev, 676 struct ol_txrx_ops *txrx_ops) 677 { 678 } 679 680 static inline bool dp_monitor_is_vdev_timer_running(struct dp_soc *soc) 681 { 682 return false; 683 } 684 685 static inline 686 void dp_monitor_pdev_set_mon_vdev(struct dp_vdev *vdev) 687 { 688 } 689 690 static inline void dp_monitor_vdev_delete(struct dp_soc *soc, 691 struct dp_vdev *vdev) 692 { 693 } 694 695 static inline void dp_peer_ppdu_delayed_ba_init(struct dp_peer *peer) 696 { 697 } 698 699 static inline void dp_monitor_neighbour_peer_add_ast(struct dp_pdev *pdev, 700 struct dp_peer *ta_peer, 701 uint8_t *mac_addr, 702 qdf_nbuf_t nbuf, 703 uint32_t flags) 704 { 705 } 706 707 static inline void 708 dp_monitor_set_chan_band(struct dp_pdev *pdev, enum reg_wifi_band chan_band) 709 { 710 } 711 712 static inline void 713 dp_monitor_set_chan_freq(struct dp_pdev *pdev, qdf_freq_t chan_freq) 714 { 715 } 716 717 static inline void dp_monitor_set_chan_num(struct dp_pdev *pdev, int chan_num) 718 { 719 } 720 721 static inline bool dp_monitor_is_enable_mcopy_mode(struct dp_pdev *pdev) 722 { 723 return false; 724 } 725 726 static inline 727 void dp_monitor_neighbour_peer_list_remove(struct dp_pdev *pdev, 728 struct dp_vdev *vdev, 729 struct dp_neighbour_peer *peer) 730 { 731 } 732 733 static inline bool dp_monitor_is_chan_band_known(struct dp_pdev *pdev) 734 { 735 return false; 736 } 737 738 static inline enum reg_wifi_band 739 dp_monitor_get_chan_band(struct dp_pdev *pdev) 740 { 741 return 0; 742 } 743 744 static inline int 745 dp_monitor_get_chan_num(struct dp_pdev *pdev) 746 { 747 return 0; 748 } 749 750 static inline qdf_freq_t 751 dp_monitor_get_chan_freq(struct dp_pdev *pdev) 752 { 753 return 0; 754 } 755 756 static inline void dp_monitor_get_mpdu_status(struct dp_pdev *pdev, 757 struct dp_soc *soc, 758 uint8_t *rx_tlv_hdr) 759 { 760 } 761 762 static inline void dp_monitor_print_tx_stats(struct dp_pdev *pdev) 763 { 764 } 765 766 static inline 767 QDF_STATUS dp_monitor_mcopy_check_deliver(struct dp_pdev *pdev, 768 uint16_t peer_id, uint32_t ppdu_id, 769 uint8_t first_msdu) 770 { 771 return QDF_STATUS_SUCCESS; 772 } 773 774 static inline bool dp_monitor_is_enable_tx_sniffer(struct dp_pdev *pdev) 775 { 776 return false; 777 } 778 779 static inline struct dp_vdev* 780 dp_monitor_get_monitor_vdev_from_pdev(struct dp_pdev *pdev) 781 { 782 return NULL; 783 } 784 785 static inline QDF_STATUS dp_monitor_check_com_info_ppdu_id(struct dp_pdev *pdev, 786 void *rx_desc) 787 { 788 return QDF_STATUS_E_FAILURE; 789 } 790 791 static inline struct mon_rx_status* 792 dp_monitor_get_rx_status(struct dp_pdev *pdev) 793 { 794 return NULL; 795 } 796 797 static inline 798 void dp_monitor_pdev_config_scan_spcl_vap(struct dp_pdev *pdev, bool val) 799 { 800 } 801 802 static inline 803 void dp_monitor_pdev_reset_scan_spcl_vap_stats_enable(struct dp_pdev *pdev, 804 bool val) 805 { 806 } 807 808 static inline QDF_STATUS 809 dp_monitor_peer_tx_capture_get_stats(struct dp_soc *soc, struct dp_peer *peer, 810 struct cdp_peer_tx_capture_stats *stats) 811 { 812 return QDF_STATUS_E_FAILURE; 813 } 814 815 static inline QDF_STATUS 816 dp_monitor_pdev_tx_capture_get_stats(struct dp_soc *soc, struct dp_pdev *pdev, 817 struct cdp_pdev_tx_capture_stats *stats) 818 { 819 return QDF_STATUS_E_FAILURE; 820 } 821 822 #ifdef DP_POWER_SAVE 823 static inline 824 void dp_monitor_pktlog_reap_pending_frames(struct dp_pdev *pdev) 825 { 826 } 827 828 static inline 829 void dp_monitor_pktlog_start_reap_timer(struct dp_pdev *pdev) 830 { 831 } 832 #endif 833 834 static inline bool dp_monitor_is_configured(struct dp_pdev *pdev) 835 { 836 return false; 837 } 838 839 static inline void 840 dp_mon_rx_hdr_length_set(struct dp_soc *soc, uint32_t *msg_word, 841 struct htt_rx_ring_tlv_filter *tlv_filter) 842 { 843 } 844 845 static inline void dp_monitor_soc_init(struct dp_soc *soc) 846 { 847 } 848 849 static inline void dp_monitor_soc_deinit(struct dp_soc *soc) 850 { 851 } 852 853 static inline 854 QDF_STATUS dp_monitor_config_undecoded_metadata_capture(struct dp_pdev *pdev, 855 int val) 856 { 857 return QDF_STATUS_SUCCESS; 858 } 859 860 static inline QDF_STATUS 861 dp_monitor_config_undecoded_metadata_phyrx_error_mask(struct dp_pdev *pdev, 862 int mask1, int mask2) 863 { 864 return QDF_STATUS_SUCCESS; 865 } 866 867 static inline QDF_STATUS 868 dp_monitor_get_undecoded_metadata_phyrx_error_mask(struct dp_pdev *pdev, 869 int *mask, int *mask_cont) 870 { 871 return QDF_STATUS_SUCCESS; 872 } 873 874 static inline QDF_STATUS dp_monitor_soc_htt_srng_setup(struct dp_soc *soc) 875 { 876 return QDF_STATUS_E_FAILURE; 877 } 878 879 static inline bool dp_is_monitor_mode_using_poll(struct dp_soc *soc) 880 { 881 return false; 882 } 883 884 static inline 885 uint32_t dp_tx_mon_buf_refill(struct dp_intr *int_ctx) 886 { 887 return 0; 888 } 889 890 static inline uint32_t 891 dp_tx_mon_process(struct dp_soc *soc, struct dp_intr *int_ctx, 892 uint32_t mac_id, uint32_t quota) 893 { 894 return 0; 895 } 896 897 static inline 898 uint32_t dp_rx_mon_buf_refill(struct dp_intr *int_ctx) 899 { 900 return 0; 901 } 902 903 static inline bool dp_monitor_is_tx_cap_enabled(struct dp_peer *peer) 904 { 905 return 0; 906 } 907 908 static inline bool dp_monitor_is_rx_cap_enabled(struct dp_peer *peer) 909 { 910 return 0; 911 } 912 913 static inline void 914 dp_rx_mon_enable(struct dp_soc *soc, uint32_t *msg_word, 915 struct htt_rx_ring_tlv_filter *tlv_filter) 916 { 917 } 918 919 static inline void 920 dp_mon_rx_packet_length_set(struct dp_soc *soc, uint32_t *msg_word, 921 struct htt_rx_ring_tlv_filter *tlv_filter) 922 { 923 } 924 925 static inline void 926 dp_mon_rx_enable_mpdu_logging(struct dp_soc *soc, uint32_t *msg_word, 927 struct htt_rx_ring_tlv_filter *tlv_filter) 928 { 929 } 930 931 static inline void 932 dp_mon_rx_wmask_subscribe(struct dp_soc *soc, uint32_t *msg_word, 933 struct htt_rx_ring_tlv_filter *tlv_filter) 934 { 935 } 936 937 #ifdef WLAN_TELEMETRY_STATS_SUPPORT 938 static inline 939 void dp_monitor_peer_telemetry_stats(struct dp_peer *peer, 940 struct cdp_peer_telemetry_stats *stats) 941 { 942 } 943 #endif /* WLAN_TELEMETRY_STATS_SUPPORT */ 944 #endif /* !WIFI_MONITOR_SUPPORT */ 945 946 /** 947 * cdp_soc_t_to_dp_soc() - typecast cdp_soc_t to 948 * dp soc handle 949 * @psoc: CDP psoc handle 950 * 951 * Return: struct dp_soc pointer 952 */ 953 static inline 954 struct dp_soc *cdp_soc_t_to_dp_soc(struct cdp_soc_t *psoc) 955 { 956 return (struct dp_soc *)psoc; 957 } 958 959 #define DP_MAX_TIMER_EXEC_TIME_TICKS \ 960 (QDF_LOG_TIMESTAMP_CYCLES_PER_10_US * 100 * 20) 961 962 /** 963 * enum timer_yield_status - yield status code used in monitor mode timer. 964 * @DP_TIMER_NO_YIELD: do not yield 965 * @DP_TIMER_WORK_DONE: yield because work is done 966 * @DP_TIMER_WORK_EXHAUST: yield because work quota is exhausted 967 * @DP_TIMER_TIME_EXHAUST: yield due to time slot exhausted 968 */ 969 enum timer_yield_status { 970 DP_TIMER_NO_YIELD, 971 DP_TIMER_WORK_DONE, 972 DP_TIMER_WORK_EXHAUST, 973 DP_TIMER_TIME_EXHAUST, 974 }; 975 976 #if DP_PRINT_ENABLE 977 #include <qdf_types.h> /* qdf_vprint */ 978 #include <cdp_txrx_handle.h> 979 980 enum { 981 /* FATAL_ERR - print only irrecoverable error messages */ 982 DP_PRINT_LEVEL_FATAL_ERR, 983 984 /* ERR - include non-fatal err messages */ 985 DP_PRINT_LEVEL_ERR, 986 987 /* WARN - include warnings */ 988 DP_PRINT_LEVEL_WARN, 989 990 /* INFO1 - include fundamental, infrequent events */ 991 DP_PRINT_LEVEL_INFO1, 992 993 /* INFO2 - include non-fundamental but infrequent events */ 994 DP_PRINT_LEVEL_INFO2, 995 }; 996 997 #define dp_print(level, fmt, ...) do { \ 998 if (level <= g_txrx_print_level) \ 999 qdf_print(fmt, ## __VA_ARGS__); \ 1000 while (0) 1001 #define DP_PRINT(level, fmt, ...) do { \ 1002 dp_print(level, "DP: " fmt, ## __VA_ARGS__); \ 1003 while (0) 1004 #else 1005 #define DP_PRINT(level, fmt, ...) 1006 #endif /* DP_PRINT_ENABLE */ 1007 1008 #define DP_TRACE(LVL, fmt, args ...) \ 1009 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_##LVL, \ 1010 fmt, ## args) 1011 1012 #ifdef WLAN_SYSFS_DP_STATS 1013 void DP_PRINT_STATS(const char *fmt, ...); 1014 #else /* WLAN_SYSFS_DP_STATS */ 1015 #ifdef DP_PRINT_NO_CONSOLE 1016 /* Stat prints should not go to console or kernel logs.*/ 1017 #define DP_PRINT_STATS(fmt, args ...)\ 1018 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH, \ 1019 fmt, ## args) 1020 #else 1021 #define DP_PRINT_STATS(fmt, args ...)\ 1022 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,\ 1023 fmt, ## args) 1024 #endif 1025 #endif /* WLAN_SYSFS_DP_STATS */ 1026 1027 #define DP_STATS_INIT(_handle) \ 1028 qdf_mem_zero(&((_handle)->stats), sizeof((_handle)->stats)) 1029 1030 #define DP_STATS_CLR(_handle) \ 1031 qdf_mem_zero(&((_handle)->stats), sizeof((_handle)->stats)) 1032 1033 #ifndef DISABLE_DP_STATS 1034 #define DP_STATS_INC(_handle, _field, _delta) \ 1035 { \ 1036 if (likely(_handle)) \ 1037 _handle->stats._field += _delta; \ 1038 } 1039 1040 #define DP_PEER_STATS_FLAT_INC(_handle, _field, _delta) \ 1041 { \ 1042 if (likely(_handle)) \ 1043 _handle->_field += _delta; \ 1044 } 1045 1046 #define DP_STATS_INCC(_handle, _field, _delta, _cond) \ 1047 { \ 1048 if (_cond && likely(_handle)) \ 1049 _handle->stats._field += _delta; \ 1050 } 1051 1052 #define DP_STATS_DEC(_handle, _field, _delta) \ 1053 { \ 1054 if (likely(_handle)) \ 1055 _handle->stats._field -= _delta; \ 1056 } 1057 1058 #define DP_PEER_STATS_FLAT_DEC(_handle, _field, _delta) \ 1059 { \ 1060 if (likely(_handle)) \ 1061 _handle->_field -= _delta; \ 1062 } 1063 1064 #define DP_STATS_UPD(_handle, _field, _delta) \ 1065 { \ 1066 if (likely(_handle)) \ 1067 _handle->stats._field = _delta; \ 1068 } 1069 1070 #define DP_STATS_INC_PKT(_handle, _field, _count, _bytes) \ 1071 { \ 1072 DP_STATS_INC(_handle, _field.num, _count); \ 1073 DP_STATS_INC(_handle, _field.bytes, _bytes) \ 1074 } 1075 1076 #define DP_PEER_STATS_FLAT_INC_PKT(_handle, _field, _count, _bytes) \ 1077 { \ 1078 DP_PEER_STATS_FLAT_INC(_handle, _field.num, _count); \ 1079 DP_PEER_STATS_FLAT_INC(_handle, _field.bytes, _bytes) \ 1080 } 1081 1082 #define DP_STATS_INCC_PKT(_handle, _field, _count, _bytes, _cond) \ 1083 { \ 1084 DP_STATS_INCC(_handle, _field.num, _count, _cond); \ 1085 DP_STATS_INCC(_handle, _field.bytes, _bytes, _cond) \ 1086 } 1087 1088 #define DP_STATS_AGGR(_handle_a, _handle_b, _field) \ 1089 { \ 1090 _handle_a->stats._field += _handle_b->stats._field; \ 1091 } 1092 1093 #define DP_STATS_AGGR_PKT(_handle_a, _handle_b, _field) \ 1094 { \ 1095 DP_STATS_AGGR(_handle_a, _handle_b, _field.num); \ 1096 DP_STATS_AGGR(_handle_a, _handle_b, _field.bytes);\ 1097 } 1098 1099 #define DP_STATS_UPD_STRUCT(_handle_a, _handle_b, _field) \ 1100 { \ 1101 _handle_a->stats._field = _handle_b->stats._field; \ 1102 } 1103 1104 #else 1105 #define DP_STATS_INC(_handle, _field, _delta) 1106 #define DP_PEER_STATS_FLAT_INC(_handle, _field, _delta) 1107 #define DP_STATS_INCC(_handle, _field, _delta, _cond) 1108 #define DP_STATS_DEC(_handle, _field, _delta) 1109 #define DP_PEER_STATS_FLAT_DEC(_handle, _field, _delta) 1110 #define DP_STATS_UPD(_handle, _field, _delta) 1111 #define DP_STATS_INC_PKT(_handle, _field, _count, _bytes) 1112 #define DP_PEER_STATS_FLAT_INC_PKT(_handle, _field, _count, _bytes) 1113 #define DP_STATS_INCC_PKT(_handle, _field, _count, _bytes, _cond) 1114 #define DP_STATS_AGGR(_handle_a, _handle_b, _field) 1115 #define DP_STATS_AGGR_PKT(_handle_a, _handle_b, _field) 1116 #endif 1117 1118 #define DP_PEER_PER_PKT_STATS_INC(_handle, _field, _delta) \ 1119 { \ 1120 DP_STATS_INC(_handle, per_pkt_stats._field, _delta); \ 1121 } 1122 1123 #define DP_PEER_PER_PKT_STATS_INCC(_handle, _field, _delta, _cond) \ 1124 { \ 1125 DP_STATS_INCC(_handle, per_pkt_stats._field, _delta, _cond); \ 1126 } 1127 1128 #define DP_PEER_PER_PKT_STATS_INC_PKT(_handle, _field, _count, _bytes) \ 1129 { \ 1130 DP_PEER_PER_PKT_STATS_INC(_handle, _field.num, _count); \ 1131 DP_PEER_PER_PKT_STATS_INC(_handle, _field.bytes, _bytes) \ 1132 } 1133 1134 #define DP_PEER_PER_PKT_STATS_INCC_PKT(_handle, _field, _count, _bytes, _cond) \ 1135 { \ 1136 DP_PEER_PER_PKT_STATS_INCC(_handle, _field.num, _count, _cond); \ 1137 DP_PEER_PER_PKT_STATS_INCC(_handle, _field.bytes, _bytes, _cond) \ 1138 } 1139 1140 #ifndef QCA_ENHANCED_STATS_SUPPORT 1141 #define DP_PEER_EXTD_STATS_INC(_handle, _field, _delta) \ 1142 { \ 1143 DP_STATS_INC(_handle, extd_stats._field, _delta); \ 1144 } 1145 1146 #define DP_PEER_EXTD_STATS_INCC(_handle, _field, _delta, _cond) \ 1147 { \ 1148 DP_STATS_INCC(_handle, extd_stats._field, _delta, _cond); \ 1149 } 1150 1151 #define DP_PEER_EXTD_STATS_UPD(_handle, _field, _delta) \ 1152 { \ 1153 DP_STATS_UPD(_handle, extd_stats._field, _delta); \ 1154 } 1155 #endif 1156 1157 #if defined(QCA_VDEV_STATS_HW_OFFLOAD_SUPPORT) && \ 1158 defined(QCA_ENHANCED_STATS_SUPPORT) 1159 #define DP_PEER_TO_STACK_INCC_PKT(_handle, _count, _bytes, _cond) \ 1160 { \ 1161 if (_cond || !(_handle->hw_txrx_stats_en)) \ 1162 DP_PEER_STATS_FLAT_INC_PKT(_handle, to_stack, _count, _bytes); \ 1163 } 1164 1165 #define DP_PEER_TO_STACK_DECC(_handle, _count, _cond) \ 1166 { \ 1167 if (_cond || !(_handle->hw_txrx_stats_en)) \ 1168 DP_PEER_STATS_FLAT_DEC(_handle, to_stack.num, _count); \ 1169 } 1170 1171 #define DP_PEER_MC_INCC_PKT(_handle, _count, _bytes, _cond) \ 1172 { \ 1173 if (_cond || !(_handle->hw_txrx_stats_en)) \ 1174 DP_PEER_PER_PKT_STATS_INC_PKT(_handle, rx.multicast, _count, _bytes); \ 1175 } 1176 1177 #define DP_PEER_BC_INCC_PKT(_handle, _count, _bytes, _cond) \ 1178 { \ 1179 if (_cond || !(_handle->hw_txrx_stats_en)) \ 1180 DP_PEER_PER_PKT_STATS_INC_PKT(_handle, rx.bcast, _count, _bytes); \ 1181 } 1182 #elif defined(QCA_VDEV_STATS_HW_OFFLOAD_SUPPORT) 1183 #define DP_PEER_TO_STACK_INCC_PKT(_handle, _count, _bytes, _cond) \ 1184 { \ 1185 if (!(_handle->hw_txrx_stats_en)) \ 1186 DP_PEER_STATS_FLAT_INC_PKT(_handle, to_stack, _count, _bytes); \ 1187 } 1188 1189 #define DP_PEER_TO_STACK_DECC(_handle, _count, _cond) \ 1190 { \ 1191 if (!(_handle->hw_txrx_stats_en)) \ 1192 DP_PEER_STATS_FLAT_DEC(_handle, to_stack.num, _count); \ 1193 } 1194 1195 #define DP_PEER_MC_INCC_PKT(_handle, _count, _bytes, _cond) \ 1196 { \ 1197 if (!(_handle->hw_txrx_stats_en)) \ 1198 DP_PEER_PER_PKT_STATS_INC_PKT(_handle, rx.multicast, _count, _bytes); \ 1199 } 1200 1201 #define DP_PEER_BC_INCC_PKT(_handle, _count, _bytes, _cond) \ 1202 { \ 1203 if (!(_handle->hw_txrx_stats_en)) \ 1204 DP_PEER_PER_PKT_STATS_INC_PKT(_handle, rx.bcast, _count, _bytes); \ 1205 } 1206 #else 1207 #define DP_PEER_TO_STACK_INCC_PKT(_handle, _count, _bytes, _cond) \ 1208 DP_PEER_STATS_FLAT_INC_PKT(_handle, to_stack, _count, _bytes); 1209 1210 #define DP_PEER_TO_STACK_DECC(_handle, _count, _cond) \ 1211 DP_PEER_STATS_FLAT_DEC(_handle, to_stack.num, _count); 1212 1213 #define DP_PEER_MC_INCC_PKT(_handle, _count, _bytes, _cond) \ 1214 DP_PEER_PER_PKT_STATS_INC_PKT(_handle, rx.multicast, _count, _bytes); 1215 1216 #define DP_PEER_BC_INCC_PKT(_handle, _count, _bytes, _cond) \ 1217 DP_PEER_PER_PKT_STATS_INC_PKT(_handle, rx.bcast, _count, _bytes); 1218 #endif 1219 1220 #ifdef ENABLE_DP_HIST_STATS 1221 #define DP_HIST_INIT() \ 1222 uint32_t num_of_packets[MAX_PDEV_CNT] = {0}; 1223 1224 #define DP_HIST_PACKET_COUNT_INC(_pdev_id) \ 1225 { \ 1226 ++num_of_packets[_pdev_id]; \ 1227 } 1228 1229 #define DP_TX_HISTOGRAM_UPDATE(_pdev, _p_cntrs) \ 1230 do { \ 1231 if (_p_cntrs == 1) { \ 1232 DP_STATS_INC(_pdev, \ 1233 tx_comp_histogram.pkts_1, 1); \ 1234 } else if (_p_cntrs > 1 && _p_cntrs <= 20) { \ 1235 DP_STATS_INC(_pdev, \ 1236 tx_comp_histogram.pkts_2_20, 1); \ 1237 } else if (_p_cntrs > 20 && _p_cntrs <= 40) { \ 1238 DP_STATS_INC(_pdev, \ 1239 tx_comp_histogram.pkts_21_40, 1); \ 1240 } else if (_p_cntrs > 40 && _p_cntrs <= 60) { \ 1241 DP_STATS_INC(_pdev, \ 1242 tx_comp_histogram.pkts_41_60, 1); \ 1243 } else if (_p_cntrs > 60 && _p_cntrs <= 80) { \ 1244 DP_STATS_INC(_pdev, \ 1245 tx_comp_histogram.pkts_61_80, 1); \ 1246 } else if (_p_cntrs > 80 && _p_cntrs <= 100) { \ 1247 DP_STATS_INC(_pdev, \ 1248 tx_comp_histogram.pkts_81_100, 1); \ 1249 } else if (_p_cntrs > 100 && _p_cntrs <= 200) { \ 1250 DP_STATS_INC(_pdev, \ 1251 tx_comp_histogram.pkts_101_200, 1); \ 1252 } else if (_p_cntrs > 200) { \ 1253 DP_STATS_INC(_pdev, \ 1254 tx_comp_histogram.pkts_201_plus, 1); \ 1255 } \ 1256 } while (0) 1257 1258 #define DP_RX_HISTOGRAM_UPDATE(_pdev, _p_cntrs) \ 1259 do { \ 1260 if (_p_cntrs == 1) { \ 1261 DP_STATS_INC(_pdev, \ 1262 rx_ind_histogram.pkts_1, 1); \ 1263 } else if (_p_cntrs > 1 && _p_cntrs <= 20) { \ 1264 DP_STATS_INC(_pdev, \ 1265 rx_ind_histogram.pkts_2_20, 1); \ 1266 } else if (_p_cntrs > 20 && _p_cntrs <= 40) { \ 1267 DP_STATS_INC(_pdev, \ 1268 rx_ind_histogram.pkts_21_40, 1); \ 1269 } else if (_p_cntrs > 40 && _p_cntrs <= 60) { \ 1270 DP_STATS_INC(_pdev, \ 1271 rx_ind_histogram.pkts_41_60, 1); \ 1272 } else if (_p_cntrs > 60 && _p_cntrs <= 80) { \ 1273 DP_STATS_INC(_pdev, \ 1274 rx_ind_histogram.pkts_61_80, 1); \ 1275 } else if (_p_cntrs > 80 && _p_cntrs <= 100) { \ 1276 DP_STATS_INC(_pdev, \ 1277 rx_ind_histogram.pkts_81_100, 1); \ 1278 } else if (_p_cntrs > 100 && _p_cntrs <= 200) { \ 1279 DP_STATS_INC(_pdev, \ 1280 rx_ind_histogram.pkts_101_200, 1); \ 1281 } else if (_p_cntrs > 200) { \ 1282 DP_STATS_INC(_pdev, \ 1283 rx_ind_histogram.pkts_201_plus, 1); \ 1284 } \ 1285 } while (0) 1286 1287 #define DP_TX_HIST_STATS_PER_PDEV() \ 1288 do { \ 1289 uint8_t hist_stats = 0; \ 1290 for (hist_stats = 0; hist_stats < soc->pdev_count; \ 1291 hist_stats++) { \ 1292 DP_TX_HISTOGRAM_UPDATE(soc->pdev_list[hist_stats], \ 1293 num_of_packets[hist_stats]); \ 1294 } \ 1295 } while (0) 1296 1297 1298 #define DP_RX_HIST_STATS_PER_PDEV() \ 1299 do { \ 1300 uint8_t hist_stats = 0; \ 1301 for (hist_stats = 0; hist_stats < soc->pdev_count; \ 1302 hist_stats++) { \ 1303 DP_RX_HISTOGRAM_UPDATE(soc->pdev_list[hist_stats], \ 1304 num_of_packets[hist_stats]); \ 1305 } \ 1306 } while (0) 1307 1308 #else 1309 #define DP_HIST_INIT() 1310 #define DP_HIST_PACKET_COUNT_INC(_pdev_id) 1311 #define DP_TX_HISTOGRAM_UPDATE(_pdev, _p_cntrs) 1312 #define DP_RX_HISTOGRAM_UPDATE(_pdev, _p_cntrs) 1313 #define DP_RX_HIST_STATS_PER_PDEV() 1314 #define DP_TX_HIST_STATS_PER_PDEV() 1315 #endif /* DISABLE_DP_STATS */ 1316 1317 #define FRAME_MASK_IPV4_ARP 1 1318 #define FRAME_MASK_IPV4_DHCP 2 1319 #define FRAME_MASK_IPV4_EAPOL 4 1320 #define FRAME_MASK_IPV6_DHCP 8 1321 1322 static inline int dp_log2_ceil(unsigned int value) 1323 { 1324 unsigned int tmp = value; 1325 int log2 = -1; 1326 1327 while (tmp) { 1328 log2++; 1329 tmp >>= 1; 1330 } 1331 if (1 << log2 != value) 1332 log2++; 1333 return log2; 1334 } 1335 1336 #ifdef QCA_SUPPORT_PEER_ISOLATION 1337 #define dp_get_peer_isolation(_peer) ((_peer)->isolation) 1338 1339 static inline void dp_set_peer_isolation(struct dp_txrx_peer *txrx_peer, 1340 bool val) 1341 { 1342 txrx_peer->isolation = val; 1343 } 1344 1345 #else 1346 #define dp_get_peer_isolation(_peer) (0) 1347 1348 static inline void dp_set_peer_isolation(struct dp_txrx_peer *peer, bool val) 1349 { 1350 } 1351 #endif /* QCA_SUPPORT_PEER_ISOLATION */ 1352 1353 #ifdef QCA_SUPPORT_WDS_EXTENDED 1354 static inline void dp_wds_ext_peer_init(struct dp_txrx_peer *txrx_peer) 1355 { 1356 txrx_peer->wds_ext.init = 0; 1357 } 1358 #else 1359 static inline void dp_wds_ext_peer_init(struct dp_txrx_peer *txrx_peer) 1360 { 1361 } 1362 #endif /* QCA_SUPPORT_WDS_EXTENDED */ 1363 1364 #ifdef QCA_HOST2FW_RXBUF_RING 1365 static inline 1366 struct dp_srng *dp_get_rxdma_ring(struct dp_pdev *pdev, int lmac_id) 1367 { 1368 return &pdev->rx_mac_buf_ring[lmac_id]; 1369 } 1370 #else 1371 static inline 1372 struct dp_srng *dp_get_rxdma_ring(struct dp_pdev *pdev, int lmac_id) 1373 { 1374 return &pdev->soc->rx_refill_buf_ring[lmac_id]; 1375 } 1376 #endif 1377 1378 /** 1379 * The lmac ID for a particular channel band is fixed. 1380 * 2.4GHz band uses lmac_id = 1 1381 * 5GHz/6GHz band uses lmac_id=0 1382 */ 1383 #define DP_INVALID_LMAC_ID (-1) 1384 #define DP_MON_INVALID_LMAC_ID (-1) 1385 #define DP_MAC0_LMAC_ID 0 1386 #define DP_MAC1_LMAC_ID 1 1387 1388 #ifdef FEATURE_TSO_STATS 1389 /** 1390 * dp_init_tso_stats() - Clear tso stats 1391 * @pdev: pdev handle 1392 * 1393 * Return: None 1394 */ 1395 static inline 1396 void dp_init_tso_stats(struct dp_pdev *pdev) 1397 { 1398 if (pdev) { 1399 qdf_mem_zero(&((pdev)->stats.tso_stats), 1400 sizeof((pdev)->stats.tso_stats)); 1401 qdf_atomic_init(&pdev->tso_idx); 1402 } 1403 } 1404 1405 /** 1406 * dp_stats_tso_segment_histogram_update() - TSO Segment Histogram 1407 * @pdev: pdev handle 1408 * @_p_cntrs: number of tso segments for a tso packet 1409 * 1410 * Return: None 1411 */ 1412 void dp_stats_tso_segment_histogram_update(struct dp_pdev *pdev, 1413 uint8_t _p_cntrs); 1414 1415 /** 1416 * dp_tso_segment_update() - Collect tso segment information 1417 * @pdev: pdev handle 1418 * @stats_idx: tso packet number 1419 * @idx: tso segment number 1420 * @seg: tso segment 1421 * 1422 * Return: None 1423 */ 1424 void dp_tso_segment_update(struct dp_pdev *pdev, 1425 uint32_t stats_idx, 1426 uint8_t idx, 1427 struct qdf_tso_seg_t seg); 1428 1429 /** 1430 * dp_tso_packet_update() - TSO Packet information 1431 * @pdev: pdev handle 1432 * @stats_idx: tso packet number 1433 * @msdu: nbuf handle 1434 * @num_segs: tso segments 1435 * 1436 * Return: None 1437 */ 1438 void dp_tso_packet_update(struct dp_pdev *pdev, uint32_t stats_idx, 1439 qdf_nbuf_t msdu, uint16_t num_segs); 1440 1441 /** 1442 * dp_tso_segment_stats_update() - TSO Segment stats 1443 * @pdev: pdev handle 1444 * @stats_seg: tso segment list 1445 * @stats_idx: tso packet number 1446 * 1447 * Return: None 1448 */ 1449 void dp_tso_segment_stats_update(struct dp_pdev *pdev, 1450 struct qdf_tso_seg_elem_t *stats_seg, 1451 uint32_t stats_idx); 1452 1453 /** 1454 * dp_print_tso_stats() - dump tso statistics 1455 * @soc:soc handle 1456 * @level: verbosity level 1457 * 1458 * Return: None 1459 */ 1460 void dp_print_tso_stats(struct dp_soc *soc, 1461 enum qdf_stats_verbosity_level level); 1462 1463 /** 1464 * dp_txrx_clear_tso_stats() - clear tso stats 1465 * @soc: soc handle 1466 * 1467 * Return: None 1468 */ 1469 void dp_txrx_clear_tso_stats(struct dp_soc *soc); 1470 #else 1471 static inline 1472 void dp_init_tso_stats(struct dp_pdev *pdev) 1473 { 1474 } 1475 1476 static inline 1477 void dp_stats_tso_segment_histogram_update(struct dp_pdev *pdev, 1478 uint8_t _p_cntrs) 1479 { 1480 } 1481 1482 static inline 1483 void dp_tso_segment_update(struct dp_pdev *pdev, 1484 uint32_t stats_idx, 1485 uint32_t idx, 1486 struct qdf_tso_seg_t seg) 1487 { 1488 } 1489 1490 static inline 1491 void dp_tso_packet_update(struct dp_pdev *pdev, uint32_t stats_idx, 1492 qdf_nbuf_t msdu, uint16_t num_segs) 1493 { 1494 } 1495 1496 static inline 1497 void dp_tso_segment_stats_update(struct dp_pdev *pdev, 1498 struct qdf_tso_seg_elem_t *stats_seg, 1499 uint32_t stats_idx) 1500 { 1501 } 1502 1503 static inline 1504 void dp_print_tso_stats(struct dp_soc *soc, 1505 enum qdf_stats_verbosity_level level) 1506 { 1507 } 1508 1509 static inline 1510 void dp_txrx_clear_tso_stats(struct dp_soc *soc) 1511 { 1512 } 1513 #endif /* FEATURE_TSO_STATS */ 1514 1515 /* dp_txrx_get_peer_per_pkt_stats_param() - Get peer per pkt stats param 1516 * @peer: DP peer handle 1517 * @type: Requested stats type 1518 * @ buf: Buffer to hold the value 1519 * 1520 * Return: status success/failure 1521 */ 1522 QDF_STATUS dp_txrx_get_peer_per_pkt_stats_param(struct dp_peer *peer, 1523 enum cdp_peer_stats_type type, 1524 cdp_peer_stats_param_t *buf); 1525 1526 /* dp_txrx_get_peer_extd_stats_param() - Get peer extd stats param 1527 * @peer: DP peer handle 1528 * @type: Requested stats type 1529 * @ buf: Buffer to hold the value 1530 * 1531 * Return: status success/failure 1532 */ 1533 QDF_STATUS dp_txrx_get_peer_extd_stats_param(struct dp_peer *peer, 1534 enum cdp_peer_stats_type type, 1535 cdp_peer_stats_param_t *buf); 1536 1537 #define DP_HTT_T2H_HP_PIPE 5 1538 /** 1539 * dp_update_pdev_stats(): Update the pdev stats 1540 * @tgtobj: pdev handle 1541 * @srcobj: vdev stats structure 1542 * 1543 * Update the pdev stats from the specified vdev stats 1544 * 1545 * return: None 1546 */ 1547 void dp_update_pdev_stats(struct dp_pdev *tgtobj, 1548 struct cdp_vdev_stats *srcobj); 1549 1550 /** 1551 * dp_update_vdev_ingress_stats(): Update the vdev ingress stats 1552 * @tgtobj: vdev handle 1553 * 1554 * Update the vdev ingress stats 1555 * 1556 * return: None 1557 */ 1558 void dp_update_vdev_ingress_stats(struct dp_vdev *tgtobj); 1559 1560 /** 1561 * dp_update_vdev_rate_stats() - Update the vdev rate stats 1562 * @tgtobj: tgt buffer for vdev stats 1563 * @srcobj: srcobj vdev stats 1564 * 1565 * Return: None 1566 */ 1567 void dp_update_vdev_rate_stats(struct cdp_vdev_stats *tgtobj, 1568 struct cdp_vdev_stats *srcobj); 1569 1570 /** 1571 * dp_update_pdev_ingress_stats(): Update the pdev ingress stats 1572 * @tgtobj: pdev handle 1573 * @srcobj: vdev stats structure 1574 * 1575 * Update the pdev ingress stats from the specified vdev stats 1576 * 1577 * return: None 1578 */ 1579 void dp_update_pdev_ingress_stats(struct dp_pdev *tgtobj, 1580 struct dp_vdev *srcobj); 1581 1582 /** 1583 * dp_update_vdev_stats(): Update the vdev stats 1584 * @soc: soc handle 1585 * @srcobj: DP_PEER object 1586 * @arg: point to vdev stats structure 1587 * 1588 * Update the vdev stats from the specified peer stats 1589 * 1590 * return: None 1591 */ 1592 void dp_update_vdev_stats(struct dp_soc *soc, 1593 struct dp_peer *srcobj, 1594 void *arg); 1595 1596 /** 1597 * dp_update_vdev_stats_on_peer_unmap() - Update the vdev stats on peer unmap 1598 * @vdev: DP_VDEV handle 1599 * @peer: DP_PEER handle 1600 * 1601 * Return: None 1602 */ 1603 void dp_update_vdev_stats_on_peer_unmap(struct dp_vdev *vdev, 1604 struct dp_peer *peer); 1605 1606 #define DP_UPDATE_STATS(_tgtobj, _srcobj) \ 1607 do { \ 1608 uint8_t i; \ 1609 uint8_t pream_type; \ 1610 for (pream_type = 0; pream_type < DOT11_MAX; pream_type++) { \ 1611 for (i = 0; i < MAX_MCS; i++) { \ 1612 DP_STATS_AGGR(_tgtobj, _srcobj, \ 1613 tx.pkt_type[pream_type].mcs_count[i]); \ 1614 DP_STATS_AGGR(_tgtobj, _srcobj, \ 1615 rx.pkt_type[pream_type].mcs_count[i]); \ 1616 } \ 1617 } \ 1618 \ 1619 for (i = 0; i < MAX_BW; i++) { \ 1620 DP_STATS_AGGR(_tgtobj, _srcobj, tx.bw[i]); \ 1621 DP_STATS_AGGR(_tgtobj, _srcobj, rx.bw[i]); \ 1622 } \ 1623 \ 1624 for (i = 0; i < SS_COUNT; i++) { \ 1625 DP_STATS_AGGR(_tgtobj, _srcobj, rx.nss[i]); \ 1626 DP_STATS_AGGR(_tgtobj, _srcobj, tx.nss[i]); \ 1627 } \ 1628 for (i = 0; i < WME_AC_MAX; i++) { \ 1629 DP_STATS_AGGR(_tgtobj, _srcobj, tx.wme_ac_type[i]); \ 1630 DP_STATS_AGGR(_tgtobj, _srcobj, rx.wme_ac_type[i]); \ 1631 DP_STATS_AGGR(_tgtobj, _srcobj, tx.excess_retries_per_ac[i]); \ 1632 \ 1633 } \ 1634 \ 1635 for (i = 0; i < MAX_GI; i++) { \ 1636 DP_STATS_AGGR(_tgtobj, _srcobj, tx.sgi_count[i]); \ 1637 DP_STATS_AGGR(_tgtobj, _srcobj, rx.sgi_count[i]); \ 1638 } \ 1639 \ 1640 for (i = 0; i < MAX_RECEPTION_TYPES; i++) \ 1641 DP_STATS_AGGR(_tgtobj, _srcobj, rx.reception_type[i]); \ 1642 \ 1643 if (!wlan_cfg_get_vdev_stats_hw_offload_config(soc->wlan_cfg_ctx)) { \ 1644 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, tx.comp_pkt); \ 1645 DP_STATS_AGGR(_tgtobj, _srcobj, tx.tx_failed); \ 1646 } \ 1647 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, tx.ucast); \ 1648 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, tx.mcast); \ 1649 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, tx.bcast); \ 1650 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, tx.tx_success); \ 1651 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, tx.nawds_mcast); \ 1652 DP_STATS_AGGR(_tgtobj, _srcobj, tx.nawds_mcast_drop); \ 1653 DP_STATS_AGGR(_tgtobj, _srcobj, tx.ofdma); \ 1654 DP_STATS_AGGR(_tgtobj, _srcobj, tx.stbc); \ 1655 DP_STATS_AGGR(_tgtobj, _srcobj, tx.ldpc); \ 1656 DP_STATS_AGGR(_tgtobj, _srcobj, tx.retries); \ 1657 DP_STATS_AGGR(_tgtobj, _srcobj, tx.non_amsdu_cnt); \ 1658 DP_STATS_AGGR(_tgtobj, _srcobj, tx.amsdu_cnt); \ 1659 DP_STATS_AGGR(_tgtobj, _srcobj, tx.non_ampdu_cnt); \ 1660 DP_STATS_AGGR(_tgtobj, _srcobj, tx.ampdu_cnt); \ 1661 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, tx.dropped.fw_rem); \ 1662 DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.fw_rem_tx); \ 1663 DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.fw_rem_notx); \ 1664 DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.fw_reason1); \ 1665 DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.fw_reason2); \ 1666 DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.fw_reason3); \ 1667 DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.fw_rem_queue_disable); \ 1668 DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.fw_rem_no_match); \ 1669 DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.drop_threshold); \ 1670 DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.drop_link_desc_na); \ 1671 DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.invalid_drop); \ 1672 DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.mcast_vdev_drop); \ 1673 DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.invalid_rr); \ 1674 DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.age_out); \ 1675 \ 1676 DP_STATS_AGGR(_tgtobj, _srcobj, rx.err.mic_err); \ 1677 DP_STATS_AGGR(_tgtobj, _srcobj, rx.err.decrypt_err); \ 1678 DP_STATS_AGGR(_tgtobj, _srcobj, rx.err.fcserr); \ 1679 DP_STATS_AGGR(_tgtobj, _srcobj, rx.err.pn_err); \ 1680 DP_STATS_AGGR(_tgtobj, _srcobj, rx.err.oor_err); \ 1681 DP_STATS_AGGR(_tgtobj, _srcobj, rx.err.jump_2k_err); \ 1682 DP_STATS_AGGR(_tgtobj, _srcobj, rx.err.rxdma_wifi_parse_err); \ 1683 if (_srcobj->stats.rx.snr != 0) \ 1684 DP_STATS_UPD_STRUCT(_tgtobj, _srcobj, rx.snr); \ 1685 DP_STATS_UPD_STRUCT(_tgtobj, _srcobj, rx.rx_rate); \ 1686 DP_STATS_AGGR(_tgtobj, _srcobj, rx.non_ampdu_cnt); \ 1687 DP_STATS_AGGR(_tgtobj, _srcobj, rx.ampdu_cnt); \ 1688 DP_STATS_AGGR(_tgtobj, _srcobj, rx.non_amsdu_cnt); \ 1689 DP_STATS_AGGR(_tgtobj, _srcobj, rx.amsdu_cnt); \ 1690 DP_STATS_AGGR(_tgtobj, _srcobj, rx.nawds_mcast_drop); \ 1691 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.to_stack); \ 1692 \ 1693 for (i = 0; i < CDP_MAX_RX_RINGS; i++) \ 1694 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.rcvd_reo[i]); \ 1695 \ 1696 for (i = 0; i < CDP_MAX_LMACS; i++) \ 1697 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.rx_lmac[i]); \ 1698 \ 1699 _srcobj->stats.rx.unicast.num = \ 1700 _srcobj->stats.rx.to_stack.num - \ 1701 _srcobj->stats.rx.multicast.num; \ 1702 _srcobj->stats.rx.unicast.bytes = \ 1703 _srcobj->stats.rx.to_stack.bytes - \ 1704 _srcobj->stats.rx.multicast.bytes; \ 1705 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.unicast); \ 1706 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.multicast); \ 1707 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.bcast); \ 1708 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.raw); \ 1709 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.intra_bss.pkts); \ 1710 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.intra_bss.fail); \ 1711 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.mec_drop); \ 1712 \ 1713 _tgtobj->stats.tx.last_ack_rssi = \ 1714 _srcobj->stats.tx.last_ack_rssi; \ 1715 DP_STATS_AGGR(_tgtobj, _srcobj, rx.multipass_rx_pkt_drop); \ 1716 DP_STATS_AGGR(_tgtobj, _srcobj, rx.peer_unauth_rx_pkt_drop); \ 1717 DP_STATS_AGGR(_tgtobj, _srcobj, rx.policy_check_drop); \ 1718 } while (0) 1719 1720 #ifdef VDEV_PEER_PROTOCOL_COUNT 1721 #define DP_UPDATE_PROTOCOL_COUNT_STATS(_tgtobj, _srcobj) \ 1722 { \ 1723 uint8_t j; \ 1724 for (j = 0; j < CDP_TRACE_MAX; j++) { \ 1725 _tgtobj->tx.protocol_trace_cnt[j].egress_cnt += \ 1726 _srcobj->tx.protocol_trace_cnt[j].egress_cnt; \ 1727 _tgtobj->tx.protocol_trace_cnt[j].ingress_cnt += \ 1728 _srcobj->tx.protocol_trace_cnt[j].ingress_cnt; \ 1729 _tgtobj->rx.protocol_trace_cnt[j].egress_cnt += \ 1730 _srcobj->rx.protocol_trace_cnt[j].egress_cnt; \ 1731 _tgtobj->rx.protocol_trace_cnt[j].ingress_cnt += \ 1732 _srcobj->rx.protocol_trace_cnt[j].ingress_cnt; \ 1733 } \ 1734 } 1735 #else 1736 #define DP_UPDATE_PROTOCOL_COUNT_STATS(_tgtobj, _srcobj) 1737 #endif 1738 1739 #ifdef WLAN_FEATURE_11BE 1740 #define DP_UPDATE_11BE_STATS(_tgtobj, _srcobj) \ 1741 do { \ 1742 uint8_t i, mu_type; \ 1743 for (i = 0; i < MAX_MCS; i++) { \ 1744 _tgtobj->tx.su_be_ppdu_cnt.mcs_count[i] += \ 1745 _srcobj->tx.su_be_ppdu_cnt.mcs_count[i]; \ 1746 _tgtobj->rx.su_be_ppdu_cnt.mcs_count[i] += \ 1747 _srcobj->rx.su_be_ppdu_cnt.mcs_count[i]; \ 1748 } \ 1749 for (mu_type = 0; mu_type < TXRX_TYPE_MU_MAX; mu_type++) { \ 1750 for (i = 0; i < MAX_MCS; i++) { \ 1751 _tgtobj->tx.mu_be_ppdu_cnt[mu_type].mcs_count[i] += \ 1752 _srcobj->tx.mu_be_ppdu_cnt[mu_type].mcs_count[i]; \ 1753 _tgtobj->rx.mu_be_ppdu_cnt[mu_type].mcs_count[i] += \ 1754 _srcobj->rx.mu_be_ppdu_cnt[mu_type].mcs_count[i]; \ 1755 } \ 1756 } \ 1757 for (i = 0; i < MAX_PUNCTURED_MODE; i++) { \ 1758 _tgtobj->tx.punc_bw[i] += _srcobj->tx.punc_bw[i]; \ 1759 _tgtobj->rx.punc_bw[i] += _srcobj->rx.punc_bw[i]; \ 1760 } \ 1761 } while (0) 1762 #else 1763 #define DP_UPDATE_11BE_STATS(_tgtobj, _srcobj) 1764 #endif 1765 1766 #define DP_UPDATE_PER_PKT_STATS(_tgtobj, _srcobj) \ 1767 do { \ 1768 uint8_t i; \ 1769 _tgtobj->tx.ucast.num += _srcobj->tx.ucast.num; \ 1770 _tgtobj->tx.ucast.bytes += _srcobj->tx.ucast.bytes; \ 1771 _tgtobj->tx.mcast.num += _srcobj->tx.mcast.num; \ 1772 _tgtobj->tx.mcast.bytes += _srcobj->tx.mcast.bytes; \ 1773 _tgtobj->tx.bcast.num += _srcobj->tx.bcast.num; \ 1774 _tgtobj->tx.bcast.bytes += _srcobj->tx.bcast.bytes; \ 1775 _tgtobj->tx.nawds_mcast.num += _srcobj->tx.nawds_mcast.num; \ 1776 _tgtobj->tx.nawds_mcast.bytes += \ 1777 _srcobj->tx.nawds_mcast.bytes; \ 1778 _tgtobj->tx.tx_success.num += _srcobj->tx.tx_success.num; \ 1779 _tgtobj->tx.tx_success.bytes += _srcobj->tx.tx_success.bytes; \ 1780 _tgtobj->tx.nawds_mcast_drop += _srcobj->tx.nawds_mcast_drop; \ 1781 _tgtobj->tx.ofdma += _srcobj->tx.ofdma; \ 1782 _tgtobj->tx.non_amsdu_cnt += _srcobj->tx.non_amsdu_cnt; \ 1783 _tgtobj->tx.amsdu_cnt += _srcobj->tx.amsdu_cnt; \ 1784 _tgtobj->tx.dropped.fw_rem.num += \ 1785 _srcobj->tx.dropped.fw_rem.num; \ 1786 _tgtobj->tx.dropped.fw_rem.bytes += \ 1787 _srcobj->tx.dropped.fw_rem.bytes; \ 1788 _tgtobj->tx.dropped.fw_rem_notx += \ 1789 _srcobj->tx.dropped.fw_rem_notx; \ 1790 _tgtobj->tx.dropped.fw_rem_tx += \ 1791 _srcobj->tx.dropped.fw_rem_tx; \ 1792 _tgtobj->tx.dropped.age_out += _srcobj->tx.dropped.age_out; \ 1793 _tgtobj->tx.dropped.fw_reason1 += \ 1794 _srcobj->tx.dropped.fw_reason1; \ 1795 _tgtobj->tx.dropped.fw_reason2 += \ 1796 _srcobj->tx.dropped.fw_reason2; \ 1797 _tgtobj->tx.dropped.fw_reason3 += \ 1798 _srcobj->tx.dropped.fw_reason3; \ 1799 _tgtobj->tx.dropped.fw_rem_queue_disable += \ 1800 _srcobj->tx.dropped.fw_rem_queue_disable; \ 1801 _tgtobj->tx.dropped.fw_rem_no_match += \ 1802 _srcobj->tx.dropped.fw_rem_no_match; \ 1803 _tgtobj->tx.dropped.drop_threshold += \ 1804 _srcobj->tx.dropped.drop_threshold; \ 1805 _tgtobj->tx.dropped.drop_link_desc_na += \ 1806 _srcobj->tx.dropped.drop_link_desc_na; \ 1807 _tgtobj->tx.dropped.invalid_drop += \ 1808 _srcobj->tx.dropped.invalid_drop; \ 1809 _tgtobj->tx.dropped.mcast_vdev_drop += \ 1810 _srcobj->tx.dropped.mcast_vdev_drop; \ 1811 _tgtobj->tx.dropped.invalid_rr += \ 1812 _srcobj->tx.dropped.invalid_rr; \ 1813 _tgtobj->tx.failed_retry_count += \ 1814 _srcobj->tx.failed_retry_count; \ 1815 _tgtobj->tx.retry_count += _srcobj->tx.retry_count; \ 1816 _tgtobj->tx.multiple_retry_count += \ 1817 _srcobj->tx.multiple_retry_count; \ 1818 _tgtobj->tx.tx_success_twt.num += \ 1819 _srcobj->tx.tx_success_twt.num; \ 1820 _tgtobj->tx.tx_success_twt.bytes += \ 1821 _srcobj->tx.tx_success_twt.bytes; \ 1822 _tgtobj->tx.last_tx_ts = _srcobj->tx.last_tx_ts; \ 1823 _tgtobj->tx.release_src_not_tqm += \ 1824 _srcobj->tx.release_src_not_tqm; \ 1825 for (i = 0; i < QDF_PROTO_SUBTYPE_MAX; i++) { \ 1826 _tgtobj->tx.no_ack_count[i] += \ 1827 _srcobj->tx.no_ack_count[i];\ 1828 } \ 1829 \ 1830 _tgtobj->rx.multicast.num += _srcobj->rx.multicast.num; \ 1831 _tgtobj->rx.multicast.bytes += _srcobj->rx.multicast.bytes; \ 1832 _tgtobj->rx.bcast.num += _srcobj->rx.bcast.num; \ 1833 _tgtobj->rx.bcast.bytes += _srcobj->rx.bcast.bytes; \ 1834 if (_tgtobj->rx.to_stack.num >= _tgtobj->rx.multicast.num) \ 1835 _tgtobj->rx.unicast.num = \ 1836 _tgtobj->rx.to_stack.num - _tgtobj->rx.multicast.num; \ 1837 if (_tgtobj->rx.to_stack.bytes >= _tgtobj->rx.multicast.bytes) \ 1838 _tgtobj->rx.unicast.bytes = \ 1839 _tgtobj->rx.to_stack.bytes - _tgtobj->rx.multicast.bytes; \ 1840 _tgtobj->rx.raw.num += _srcobj->rx.raw.num; \ 1841 _tgtobj->rx.raw.bytes += _srcobj->rx.raw.bytes; \ 1842 _tgtobj->rx.nawds_mcast_drop += _srcobj->rx.nawds_mcast_drop; \ 1843 _tgtobj->rx.mcast_3addr_drop += _srcobj->rx.mcast_3addr_drop; \ 1844 _tgtobj->rx.mec_drop.num += _srcobj->rx.mec_drop.num; \ 1845 _tgtobj->rx.mec_drop.bytes += _srcobj->rx.mec_drop.bytes; \ 1846 _tgtobj->rx.intra_bss.pkts.num += \ 1847 _srcobj->rx.intra_bss.pkts.num; \ 1848 _tgtobj->rx.intra_bss.pkts.bytes += \ 1849 _srcobj->rx.intra_bss.pkts.bytes; \ 1850 _tgtobj->rx.intra_bss.fail.num += \ 1851 _srcobj->rx.intra_bss.fail.num; \ 1852 _tgtobj->rx.intra_bss.fail.bytes += \ 1853 _srcobj->rx.intra_bss.fail.bytes; \ 1854 _tgtobj->rx.intra_bss.mdns_no_fwd += \ 1855 _srcobj->rx.intra_bss.mdns_no_fwd; \ 1856 _tgtobj->rx.err.mic_err += _srcobj->rx.err.mic_err; \ 1857 _tgtobj->rx.err.decrypt_err += _srcobj->rx.err.decrypt_err; \ 1858 _tgtobj->rx.err.fcserr += _srcobj->rx.err.fcserr; \ 1859 _tgtobj->rx.err.pn_err += _srcobj->rx.err.pn_err; \ 1860 _tgtobj->rx.err.oor_err += _srcobj->rx.err.oor_err; \ 1861 _tgtobj->rx.err.jump_2k_err += _srcobj->rx.err.jump_2k_err; \ 1862 _tgtobj->rx.err.rxdma_wifi_parse_err += \ 1863 _srcobj->rx.err.rxdma_wifi_parse_err; \ 1864 _tgtobj->rx.non_amsdu_cnt += _srcobj->rx.non_amsdu_cnt; \ 1865 _tgtobj->rx.amsdu_cnt += _srcobj->rx.amsdu_cnt; \ 1866 _tgtobj->rx.rx_retries += _srcobj->rx.rx_retries; \ 1867 _tgtobj->rx.multipass_rx_pkt_drop += \ 1868 _srcobj->rx.multipass_rx_pkt_drop; \ 1869 _tgtobj->rx.peer_unauth_rx_pkt_drop += \ 1870 _srcobj->rx.peer_unauth_rx_pkt_drop; \ 1871 _tgtobj->rx.policy_check_drop += \ 1872 _srcobj->rx.policy_check_drop; \ 1873 _tgtobj->rx.to_stack_twt.num += _srcobj->rx.to_stack_twt.num; \ 1874 _tgtobj->rx.to_stack_twt.bytes += \ 1875 _srcobj->rx.to_stack_twt.bytes; \ 1876 _tgtobj->rx.last_rx_ts = _srcobj->rx.last_rx_ts; \ 1877 for (i = 0; i < CDP_MAX_RX_RINGS; i++) { \ 1878 _tgtobj->rx.rcvd_reo[i].num += \ 1879 _srcobj->rx.rcvd_reo[i].num; \ 1880 _tgtobj->rx.rcvd_reo[i].bytes += \ 1881 _srcobj->rx.rcvd_reo[i].bytes; \ 1882 } \ 1883 for (i = 0; i < CDP_MAX_LMACS; i++) { \ 1884 _tgtobj->rx.rx_lmac[i].num += \ 1885 _srcobj->rx.rx_lmac[i].num; \ 1886 _tgtobj->rx.rx_lmac[i].bytes += \ 1887 _srcobj->rx.rx_lmac[i].bytes; \ 1888 } \ 1889 DP_UPDATE_PROTOCOL_COUNT_STATS(_tgtobj, _srcobj); \ 1890 } while (0) 1891 1892 #define DP_UPDATE_EXTD_STATS(_tgtobj, _srcobj) \ 1893 do { \ 1894 uint8_t i, pream_type, mu_type; \ 1895 _tgtobj->tx.stbc += _srcobj->tx.stbc; \ 1896 _tgtobj->tx.ldpc += _srcobj->tx.ldpc; \ 1897 _tgtobj->tx.retries += _srcobj->tx.retries; \ 1898 _tgtobj->tx.ampdu_cnt += _srcobj->tx.ampdu_cnt; \ 1899 _tgtobj->tx.non_ampdu_cnt += _srcobj->tx.non_ampdu_cnt; \ 1900 _tgtobj->tx.num_ppdu_cookie_valid += \ 1901 _srcobj->tx.num_ppdu_cookie_valid; \ 1902 _tgtobj->tx.tx_ppdus += _srcobj->tx.tx_ppdus; \ 1903 _tgtobj->tx.tx_mpdus_success += _srcobj->tx.tx_mpdus_success; \ 1904 _tgtobj->tx.tx_mpdus_tried += _srcobj->tx.tx_mpdus_tried; \ 1905 _tgtobj->tx.tx_rate = _srcobj->tx.tx_rate; \ 1906 _tgtobj->tx.last_tx_rate = _srcobj->tx.last_tx_rate; \ 1907 _tgtobj->tx.last_tx_rate_mcs = _srcobj->tx.last_tx_rate_mcs; \ 1908 _tgtobj->tx.mcast_last_tx_rate = \ 1909 _srcobj->tx.mcast_last_tx_rate; \ 1910 _tgtobj->tx.mcast_last_tx_rate_mcs = \ 1911 _srcobj->tx.mcast_last_tx_rate_mcs; \ 1912 _tgtobj->tx.rnd_avg_tx_rate = _srcobj->tx.rnd_avg_tx_rate; \ 1913 _tgtobj->tx.avg_tx_rate = _srcobj->tx.avg_tx_rate; \ 1914 _tgtobj->tx.tx_ratecode = _srcobj->tx.tx_ratecode; \ 1915 _tgtobj->tx.pream_punct_cnt += _srcobj->tx.pream_punct_cnt; \ 1916 _tgtobj->tx.ru_start = _srcobj->tx.ru_start; \ 1917 _tgtobj->tx.ru_tones = _srcobj->tx.ru_tones; \ 1918 _tgtobj->tx.last_ack_rssi = _srcobj->tx.last_ack_rssi; \ 1919 _tgtobj->tx.nss_info = _srcobj->tx.nss_info; \ 1920 _tgtobj->tx.mcs_info = _srcobj->tx.mcs_info; \ 1921 _tgtobj->tx.bw_info = _srcobj->tx.bw_info; \ 1922 _tgtobj->tx.gi_info = _srcobj->tx.gi_info; \ 1923 _tgtobj->tx.preamble_info = _srcobj->tx.preamble_info; \ 1924 _tgtobj->tx.retries_mpdu += _srcobj->tx.retries_mpdu; \ 1925 _tgtobj->tx.mpdu_success_with_retries += \ 1926 _srcobj->tx.mpdu_success_with_retries; \ 1927 _tgtobj->tx.rts_success = _srcobj->tx.rts_success; \ 1928 _tgtobj->tx.rts_failure = _srcobj->tx.rts_failure; \ 1929 _tgtobj->tx.bar_cnt = _srcobj->tx.bar_cnt; \ 1930 _tgtobj->tx.ndpa_cnt = _srcobj->tx.ndpa_cnt; \ 1931 for (pream_type = 0; pream_type < DOT11_MAX; pream_type++) { \ 1932 for (i = 0; i < MAX_MCS; i++) \ 1933 _tgtobj->tx.pkt_type[pream_type].mcs_count[i] += \ 1934 _srcobj->tx.pkt_type[pream_type].mcs_count[i]; \ 1935 } \ 1936 for (i = 0; i < WME_AC_MAX; i++) { \ 1937 _tgtobj->tx.wme_ac_type[i] += _srcobj->tx.wme_ac_type[i]; \ 1938 _tgtobj->tx.excess_retries_per_ac[i] += \ 1939 _srcobj->tx.excess_retries_per_ac[i]; \ 1940 } \ 1941 for (i = 0; i < MAX_GI; i++) { \ 1942 _tgtobj->tx.sgi_count[i] += _srcobj->tx.sgi_count[i]; \ 1943 } \ 1944 for (i = 0; i < SS_COUNT; i++) { \ 1945 _tgtobj->tx.nss[i] += _srcobj->tx.nss[i]; \ 1946 } \ 1947 for (i = 0; i < MAX_BW; i++) { \ 1948 _tgtobj->tx.bw[i] += _srcobj->tx.bw[i]; \ 1949 } \ 1950 for (i = 0; i < MAX_RU_LOCATIONS; i++) { \ 1951 _tgtobj->tx.ru_loc[i].num_msdu += \ 1952 _srcobj->tx.ru_loc[i].num_msdu; \ 1953 _tgtobj->tx.ru_loc[i].num_mpdu += \ 1954 _srcobj->tx.ru_loc[i].num_mpdu; \ 1955 _tgtobj->tx.ru_loc[i].mpdu_tried += \ 1956 _srcobj->tx.ru_loc[i].mpdu_tried; \ 1957 } \ 1958 for (i = 0; i < MAX_TRANSMIT_TYPES; i++) { \ 1959 _tgtobj->tx.transmit_type[i].num_msdu += \ 1960 _srcobj->tx.transmit_type[i].num_msdu; \ 1961 _tgtobj->tx.transmit_type[i].num_mpdu += \ 1962 _srcobj->tx.transmit_type[i].num_mpdu; \ 1963 _tgtobj->tx.transmit_type[i].mpdu_tried += \ 1964 _srcobj->tx.transmit_type[i].mpdu_tried; \ 1965 } \ 1966 for (i = 0; i < MAX_MU_GROUP_ID; i++) { \ 1967 _tgtobj->tx.mu_group_id[i] = _srcobj->tx.mu_group_id[i]; \ 1968 } \ 1969 \ 1970 _tgtobj->rx.mpdu_cnt_fcs_ok += _srcobj->rx.mpdu_cnt_fcs_ok; \ 1971 _tgtobj->rx.mpdu_cnt_fcs_err += _srcobj->rx.mpdu_cnt_fcs_err; \ 1972 _tgtobj->rx.non_ampdu_cnt += _srcobj->rx.non_ampdu_cnt; \ 1973 _tgtobj->rx.ampdu_cnt += _srcobj->rx.ampdu_cnt; \ 1974 _tgtobj->rx.rx_mpdus += _srcobj->rx.rx_mpdus; \ 1975 _tgtobj->rx.rx_ppdus += _srcobj->rx.rx_ppdus; \ 1976 _tgtobj->rx.rx_rate = _srcobj->rx.rx_rate; \ 1977 _tgtobj->rx.last_rx_rate = _srcobj->rx.last_rx_rate; \ 1978 _tgtobj->rx.rnd_avg_rx_rate = _srcobj->rx.rnd_avg_rx_rate; \ 1979 _tgtobj->rx.avg_rx_rate = _srcobj->rx.avg_rx_rate; \ 1980 _tgtobj->rx.rx_ratecode = _srcobj->rx.rx_ratecode; \ 1981 _tgtobj->rx.avg_snr = _srcobj->rx.avg_snr; \ 1982 _tgtobj->rx.rx_snr_measured_time = \ 1983 _srcobj->rx.rx_snr_measured_time; \ 1984 _tgtobj->rx.snr = _srcobj->rx.snr; \ 1985 _tgtobj->rx.last_snr = _srcobj->rx.last_snr; \ 1986 _tgtobj->rx.nss_info = _srcobj->rx.nss_info; \ 1987 _tgtobj->rx.mcs_info = _srcobj->rx.mcs_info; \ 1988 _tgtobj->rx.bw_info = _srcobj->rx.bw_info; \ 1989 _tgtobj->rx.gi_info = _srcobj->rx.gi_info; \ 1990 _tgtobj->rx.preamble_info = _srcobj->rx.preamble_info; \ 1991 _tgtobj->rx.mpdu_retry_cnt += _srcobj->rx.mpdu_retry_cnt; \ 1992 _tgtobj->rx.bar_cnt = _srcobj->rx.bar_cnt; \ 1993 _tgtobj->rx.ndpa_cnt = _srcobj->rx.ndpa_cnt; \ 1994 for (pream_type = 0; pream_type < DOT11_MAX; pream_type++) { \ 1995 for (i = 0; i < MAX_MCS; i++) { \ 1996 _tgtobj->rx.pkt_type[pream_type].mcs_count[i] += \ 1997 _srcobj->rx.pkt_type[pream_type].mcs_count[i]; \ 1998 } \ 1999 } \ 2000 for (i = 0; i < WME_AC_MAX; i++) { \ 2001 _tgtobj->rx.wme_ac_type[i] += _srcobj->rx.wme_ac_type[i]; \ 2002 } \ 2003 for (i = 0; i < MAX_MCS; i++) { \ 2004 _tgtobj->rx.su_ax_ppdu_cnt.mcs_count[i] += \ 2005 _srcobj->rx.su_ax_ppdu_cnt.mcs_count[i]; \ 2006 _tgtobj->rx.rx_mpdu_cnt[i] += _srcobj->rx.rx_mpdu_cnt[i]; \ 2007 } \ 2008 for (mu_type = 0 ; mu_type < TXRX_TYPE_MU_MAX; mu_type++) { \ 2009 _tgtobj->rx.rx_mu[mu_type].mpdu_cnt_fcs_ok += \ 2010 _srcobj->rx.rx_mu[mu_type].mpdu_cnt_fcs_ok; \ 2011 _tgtobj->rx.rx_mu[mu_type].mpdu_cnt_fcs_err += \ 2012 _srcobj->rx.rx_mu[mu_type].mpdu_cnt_fcs_err; \ 2013 for (i = 0; i < SS_COUNT; i++) \ 2014 _tgtobj->rx.rx_mu[mu_type].ppdu_nss[i] += \ 2015 _srcobj->rx.rx_mu[mu_type].ppdu_nss[i]; \ 2016 for (i = 0; i < MAX_MCS; i++) \ 2017 _tgtobj->rx.rx_mu[mu_type].ppdu.mcs_count[i] += \ 2018 _srcobj->rx.rx_mu[mu_type].ppdu.mcs_count[i]; \ 2019 } \ 2020 for (i = 0; i < MAX_RECEPTION_TYPES; i++) { \ 2021 _tgtobj->rx.reception_type[i] += \ 2022 _srcobj->rx.reception_type[i]; \ 2023 _tgtobj->rx.ppdu_cnt[i] += _srcobj->rx.ppdu_cnt[i]; \ 2024 } \ 2025 for (i = 0; i < MAX_GI; i++) { \ 2026 _tgtobj->rx.sgi_count[i] += _srcobj->rx.sgi_count[i]; \ 2027 } \ 2028 for (i = 0; i < SS_COUNT; i++) { \ 2029 _tgtobj->rx.nss[i] += _srcobj->rx.nss[i]; \ 2030 _tgtobj->rx.ppdu_nss[i] += _srcobj->rx.ppdu_nss[i]; \ 2031 } \ 2032 for (i = 0; i < MAX_BW; i++) { \ 2033 _tgtobj->rx.bw[i] += _srcobj->rx.bw[i]; \ 2034 } \ 2035 DP_UPDATE_11BE_STATS(_tgtobj, _srcobj); \ 2036 } while (0) 2037 2038 /** 2039 * dp_peer_find_attach() - Allocates memory for peer objects 2040 * @soc: SoC handle 2041 * 2042 * Return: QDF_STATUS 2043 */ 2044 QDF_STATUS dp_peer_find_attach(struct dp_soc *soc); 2045 extern void dp_peer_find_detach(struct dp_soc *soc); 2046 extern void dp_peer_find_hash_add(struct dp_soc *soc, struct dp_peer *peer); 2047 extern void dp_peer_find_hash_remove(struct dp_soc *soc, struct dp_peer *peer); 2048 extern void dp_peer_find_hash_erase(struct dp_soc *soc); 2049 void dp_peer_vdev_list_add(struct dp_soc *soc, struct dp_vdev *vdev, 2050 struct dp_peer *peer); 2051 void dp_peer_vdev_list_remove(struct dp_soc *soc, struct dp_vdev *vdev, 2052 struct dp_peer *peer); 2053 void dp_peer_find_id_to_obj_add(struct dp_soc *soc, 2054 struct dp_peer *peer, 2055 uint16_t peer_id); 2056 void dp_txrx_peer_attach_add(struct dp_soc *soc, 2057 struct dp_peer *peer, 2058 struct dp_txrx_peer *txrx_peer); 2059 void dp_peer_find_id_to_obj_remove(struct dp_soc *soc, 2060 uint16_t peer_id); 2061 void dp_vdev_unref_delete(struct dp_soc *soc, struct dp_vdev *vdev, 2062 enum dp_mod_id mod_id); 2063 2064 /* 2065 * dp_peer_ppdu_delayed_ba_cleanup() free ppdu allocated in peer 2066 * @peer: Datapath peer 2067 * 2068 * return: void 2069 */ 2070 void dp_peer_ppdu_delayed_ba_cleanup(struct dp_peer *peer); 2071 2072 extern void dp_peer_rx_init(struct dp_pdev *pdev, struct dp_peer *peer); 2073 void dp_peer_cleanup(struct dp_vdev *vdev, struct dp_peer *peer); 2074 void dp_peer_rx_cleanup(struct dp_vdev *vdev, struct dp_peer *peer); 2075 2076 #ifdef DP_PEER_EXTENDED_API 2077 /** 2078 * dp_register_peer() - Register peer into physical device 2079 * @soc_hdl - data path soc handle 2080 * @pdev_id - device instance id 2081 * @sta_desc - peer description 2082 * 2083 * Register peer into physical device 2084 * 2085 * Return: QDF_STATUS_SUCCESS registration success 2086 * QDF_STATUS_E_FAULT peer not found 2087 */ 2088 QDF_STATUS dp_register_peer(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, 2089 struct ol_txrx_desc_type *sta_desc); 2090 2091 /** 2092 * dp_clear_peer() - remove peer from physical device 2093 * @soc_hdl - data path soc handle 2094 * @pdev_id - device instance id 2095 * @peer_addr - peer mac address 2096 * 2097 * remove peer from physical device 2098 * 2099 * Return: QDF_STATUS_SUCCESS registration success 2100 * QDF_STATUS_E_FAULT peer not found 2101 */ 2102 QDF_STATUS dp_clear_peer(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, 2103 struct qdf_mac_addr peer_addr); 2104 2105 /* 2106 * dp_find_peer_exist - find peer if already exists 2107 * @soc: datapath soc handle 2108 * @pdev_id: physical device instance id 2109 * @peer_mac_addr: peer mac address 2110 * 2111 * Return: true or false 2112 */ 2113 bool dp_find_peer_exist(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, 2114 uint8_t *peer_addr); 2115 2116 /* 2117 * dp_find_peer_exist_on_vdev - find if peer exists on the given vdev 2118 * @soc: datapath soc handle 2119 * @vdev_id: vdev instance id 2120 * @peer_mac_addr: peer mac address 2121 * 2122 * Return: true or false 2123 */ 2124 bool dp_find_peer_exist_on_vdev(struct cdp_soc_t *soc_hdl, uint8_t vdev_id, 2125 uint8_t *peer_addr); 2126 2127 /* 2128 * dp_find_peer_exist_on_other_vdev - find if peer exists 2129 * on other than the given vdev 2130 * @soc: datapath soc handle 2131 * @vdev_id: vdev instance id 2132 * @peer_mac_addr: peer mac address 2133 * @max_bssid: max number of bssids 2134 * 2135 * Return: true or false 2136 */ 2137 bool dp_find_peer_exist_on_other_vdev(struct cdp_soc_t *soc_hdl, 2138 uint8_t vdev_id, uint8_t *peer_addr, 2139 uint16_t max_bssid); 2140 2141 /** 2142 * dp_peer_state_update() - update peer local state 2143 * @pdev - data path device instance 2144 * @peer_addr - peer mac address 2145 * @state - new peer local state 2146 * 2147 * update peer local state 2148 * 2149 * Return: QDF_STATUS_SUCCESS registration success 2150 */ 2151 QDF_STATUS dp_peer_state_update(struct cdp_soc_t *soc, uint8_t *peer_mac, 2152 enum ol_txrx_peer_state state); 2153 2154 /** 2155 * dp_get_vdevid() - Get virtual interface id which peer registered 2156 * @soc - datapath soc handle 2157 * @peer_mac - peer mac address 2158 * @vdev_id - virtual interface id which peer registered 2159 * 2160 * Get virtual interface id which peer registered 2161 * 2162 * Return: QDF_STATUS_SUCCESS registration success 2163 */ 2164 QDF_STATUS dp_get_vdevid(struct cdp_soc_t *soc_hdl, uint8_t *peer_mac, 2165 uint8_t *vdev_id); 2166 struct cdp_vdev *dp_get_vdev_by_peer_addr(struct cdp_pdev *pdev_handle, 2167 struct qdf_mac_addr peer_addr); 2168 struct cdp_vdev *dp_get_vdev_for_peer(void *peer); 2169 uint8_t *dp_peer_get_peer_mac_addr(void *peer); 2170 2171 /** 2172 * dp_get_peer_state() - Get local peer state 2173 * @soc - datapath soc handle 2174 * @vdev_id - vdev id 2175 * @peer_mac - peer mac addr 2176 * 2177 * Get local peer state 2178 * 2179 * Return: peer status 2180 */ 2181 int dp_get_peer_state(struct cdp_soc_t *soc, uint8_t vdev_id, 2182 uint8_t *peer_mac); 2183 void dp_local_peer_id_pool_init(struct dp_pdev *pdev); 2184 void dp_local_peer_id_alloc(struct dp_pdev *pdev, struct dp_peer *peer); 2185 void dp_local_peer_id_free(struct dp_pdev *pdev, struct dp_peer *peer); 2186 /** 2187 * dp_set_peer_as_tdls_peer() - set tdls peer flag to peer 2188 * @soc_hdl: datapath soc handle 2189 * @vdev_id: vdev_id 2190 * @peer_mac: peer mac addr 2191 * @val: tdls peer flag 2192 * 2193 * Return: none 2194 */ 2195 void dp_set_peer_as_tdls_peer(struct cdp_soc_t *soc_hdl, uint8_t vdev_id, 2196 uint8_t *peer_mac, bool val); 2197 #else 2198 /** 2199 * dp_get_vdevid() - Get virtual interface id which peer registered 2200 * @soc - datapath soc handle 2201 * @peer_mac - peer mac address 2202 * @vdev_id - virtual interface id which peer registered 2203 * 2204 * Get virtual interface id which peer registered 2205 * 2206 * Return: QDF_STATUS_SUCCESS registration success 2207 */ 2208 static inline 2209 QDF_STATUS dp_get_vdevid(struct cdp_soc_t *soc_hdl, uint8_t *peer_mac, 2210 uint8_t *vdev_id) 2211 { 2212 return QDF_STATUS_E_NOSUPPORT; 2213 } 2214 2215 static inline void dp_local_peer_id_pool_init(struct dp_pdev *pdev) 2216 { 2217 } 2218 2219 static inline 2220 void dp_local_peer_id_alloc(struct dp_pdev *pdev, struct dp_peer *peer) 2221 { 2222 } 2223 2224 static inline 2225 void dp_local_peer_id_free(struct dp_pdev *pdev, struct dp_peer *peer) 2226 { 2227 } 2228 2229 static inline 2230 void dp_set_peer_as_tdls_peer(struct cdp_soc_t *soc_hdl, uint8_t vdev_id, 2231 uint8_t *peer_mac, bool val) 2232 { 2233 } 2234 #endif 2235 2236 int dp_addba_resp_tx_completion_wifi3(struct cdp_soc_t *cdp_soc, 2237 uint8_t *peer_mac, uint16_t vdev_id, 2238 uint8_t tid, 2239 int status); 2240 int dp_addba_requestprocess_wifi3(struct cdp_soc_t *cdp_soc, 2241 uint8_t *peer_mac, uint16_t vdev_id, 2242 uint8_t dialogtoken, uint16_t tid, 2243 uint16_t batimeout, 2244 uint16_t buffersize, 2245 uint16_t startseqnum); 2246 QDF_STATUS dp_addba_responsesetup_wifi3(struct cdp_soc_t *cdp_soc, 2247 uint8_t *peer_mac, uint16_t vdev_id, 2248 uint8_t tid, uint8_t *dialogtoken, 2249 uint16_t *statuscode, 2250 uint16_t *buffersize, 2251 uint16_t *batimeout); 2252 QDF_STATUS dp_set_addba_response(struct cdp_soc_t *cdp_soc, 2253 uint8_t *peer_mac, 2254 uint16_t vdev_id, uint8_t tid, 2255 uint16_t statuscode); 2256 int dp_delba_process_wifi3(struct cdp_soc_t *cdp_soc, uint8_t *peer_mac, 2257 uint16_t vdev_id, int tid, 2258 uint16_t reasoncode); 2259 2260 /** 2261 * dp_rx_tid_update_ba_win_size() - Update the DP tid BA window size 2262 * @soc: soc handle 2263 * @peer_mac: mac address of peer handle 2264 * @vdev_id: id of vdev handle 2265 * @tid: tid 2266 * @buffersize: BA window size 2267 * 2268 * Return: success/failure of tid update 2269 */ 2270 QDF_STATUS dp_rx_tid_update_ba_win_size(struct cdp_soc_t *cdp_soc, 2271 uint8_t *peer_mac, uint16_t vdev_id, 2272 uint8_t tid, uint16_t buffersize); 2273 2274 /* 2275 * dp_delba_tx_completion_wifi3() - Handle delba tx completion 2276 * 2277 * @cdp_soc: soc handle 2278 * @vdev_id: id of the vdev handle 2279 * @peer_mac: peer mac address 2280 * @tid: Tid number 2281 * @status: Tx completion status 2282 * Indicate status of delba Tx to DP for stats update and retry 2283 * delba if tx failed. 2284 * 2285 */ 2286 int dp_delba_tx_completion_wifi3(struct cdp_soc_t *cdp_soc, uint8_t *peer_mac, 2287 uint16_t vdev_id, uint8_t tid, 2288 int status); 2289 extern QDF_STATUS dp_rx_tid_setup_wifi3(struct dp_peer *peer, int tid, 2290 uint32_t ba_window_size, 2291 uint32_t start_seq); 2292 2293 #ifdef DP_UMAC_HW_RESET_SUPPORT 2294 void dp_pause_reo_send_cmd(struct dp_soc *soc); 2295 2296 void dp_resume_reo_send_cmd(struct dp_soc *soc); 2297 void dp_cleanup_reo_cmd_module(struct dp_soc *soc); 2298 void dp_reo_desc_freelist_destroy(struct dp_soc *soc); 2299 void dp_reset_rx_reo_tid_queue(struct dp_soc *soc, void *hw_qdesc_vaddr, 2300 uint32_t size); 2301 #endif 2302 2303 extern QDF_STATUS dp_reo_send_cmd(struct dp_soc *soc, 2304 enum hal_reo_cmd_type type, struct hal_reo_cmd_params *params, 2305 void (*callback_fn), void *data); 2306 2307 extern void dp_reo_cmdlist_destroy(struct dp_soc *soc); 2308 2309 /** 2310 * dp_reo_status_ring_handler - Handler for REO Status ring 2311 * @int_ctx: pointer to DP interrupt context 2312 * @soc: DP Soc handle 2313 * 2314 * Returns: Number of descriptors reaped 2315 */ 2316 uint32_t dp_reo_status_ring_handler(struct dp_intr *int_ctx, 2317 struct dp_soc *soc); 2318 void dp_aggregate_vdev_stats(struct dp_vdev *vdev, 2319 struct cdp_vdev_stats *vdev_stats); 2320 void dp_rx_tid_stats_cb(struct dp_soc *soc, void *cb_ctxt, 2321 union hal_reo_status *reo_status); 2322 void dp_rx_bar_stats_cb(struct dp_soc *soc, void *cb_ctxt, 2323 union hal_reo_status *reo_status); 2324 uint16_t dp_tx_me_send_convert_ucast(struct cdp_soc_t *soc, uint8_t vdev_id, 2325 qdf_nbuf_t nbuf, 2326 uint8_t newmac[][QDF_MAC_ADDR_SIZE], 2327 uint8_t new_mac_cnt, uint8_t tid, 2328 bool is_igmp, bool is_dms_pkt); 2329 void dp_tx_me_alloc_descriptor(struct cdp_soc_t *soc, uint8_t pdev_id); 2330 2331 void dp_tx_me_free_descriptor(struct cdp_soc_t *soc, uint8_t pdev_id); 2332 QDF_STATUS dp_h2t_ext_stats_msg_send(struct dp_pdev *pdev, 2333 uint32_t stats_type_upload_mask, uint32_t config_param_0, 2334 uint32_t config_param_1, uint32_t config_param_2, 2335 uint32_t config_param_3, int cookie, int cookie_msb, 2336 uint8_t mac_id); 2337 void dp_htt_stats_print_tag(struct dp_pdev *pdev, 2338 uint8_t tag_type, uint32_t *tag_buf); 2339 void dp_htt_stats_copy_tag(struct dp_pdev *pdev, uint8_t tag_type, uint32_t *tag_buf); 2340 QDF_STATUS dp_h2t_3tuple_config_send(struct dp_pdev *pdev, uint32_t tuple_mask, 2341 uint8_t mac_id); 2342 /** 2343 * dp_rxtid_stats_cmd_cb - function pointer for peer 2344 * rx tid stats cmd call_back 2345 */ 2346 typedef void (*dp_rxtid_stats_cmd_cb)(struct dp_soc *soc, void *cb_ctxt, 2347 union hal_reo_status *reo_status); 2348 int dp_peer_rxtid_stats(struct dp_peer *peer, 2349 dp_rxtid_stats_cmd_cb dp_stats_cmd_cb, 2350 void *cb_ctxt); 2351 #ifdef IPA_OFFLOAD 2352 void dp_peer_update_tid_stats_from_reo(struct dp_soc *soc, void *cb_ctxt, 2353 union hal_reo_status *reo_status); 2354 int dp_peer_get_rxtid_stats_ipa(struct dp_peer *peer, 2355 dp_rxtid_stats_cmd_cb dp_stats_cmd_cb); 2356 #endif 2357 QDF_STATUS 2358 dp_set_pn_check_wifi3(struct cdp_soc_t *soc, uint8_t vdev_id, 2359 uint8_t *peer_mac, enum cdp_sec_type sec_type, 2360 uint32_t *rx_pn); 2361 2362 QDF_STATUS 2363 dp_set_key_sec_type_wifi3(struct cdp_soc_t *soc, uint8_t vdev_id, 2364 uint8_t *peer_mac, enum cdp_sec_type sec_type, 2365 bool is_unicast); 2366 2367 void *dp_get_pdev_for_mac_id(struct dp_soc *soc, uint32_t mac_id); 2368 2369 QDF_STATUS 2370 dp_set_michael_key(struct cdp_soc_t *soc, uint8_t vdev_id, 2371 uint8_t *peer_mac, 2372 bool is_unicast, uint32_t *key); 2373 2374 /** 2375 * dp_check_pdev_exists() - Validate pdev before use 2376 * @soc - dp soc handle 2377 * @data - pdev handle 2378 * 2379 * Return: 0 - success/invalid - failure 2380 */ 2381 bool dp_check_pdev_exists(struct dp_soc *soc, struct dp_pdev *data); 2382 2383 /** 2384 * dp_update_delay_stats() - Update delay statistics in structure 2385 * and fill min, max and avg delay 2386 * @tstats: tid tx stats 2387 * @rstats: tid rx stats 2388 * @delay: delay in ms 2389 * @tid: tid value 2390 * @mode: type of tx delay mode 2391 * @ring id: ring number 2392 * @delay_in_us: flag to indicate whether the delay is in ms or us 2393 * 2394 * Return: none 2395 */ 2396 void dp_update_delay_stats(struct cdp_tid_tx_stats *tstats, 2397 struct cdp_tid_rx_stats *rstats, uint32_t delay, 2398 uint8_t tid, uint8_t mode, uint8_t ring_id, 2399 bool delay_in_us); 2400 2401 /** 2402 * dp_print_ring_stats(): Print tail and head pointer 2403 * @pdev: DP_PDEV handle 2404 * 2405 * Return:void 2406 */ 2407 void dp_print_ring_stats(struct dp_pdev *pdev); 2408 2409 /** 2410 * dp_print_ring_stat_from_hal(): Print tail and head pointer through hal 2411 * @soc: soc handle 2412 * @srng: srng handle 2413 * @ring_type: ring type 2414 * 2415 * Return:void 2416 */ 2417 void 2418 dp_print_ring_stat_from_hal(struct dp_soc *soc, struct dp_srng *srng, 2419 enum hal_ring_type ring_type); 2420 /** 2421 * dp_print_pdev_cfg_params() - Print the pdev cfg parameters 2422 * @pdev_handle: DP pdev handle 2423 * 2424 * Return - void 2425 */ 2426 void dp_print_pdev_cfg_params(struct dp_pdev *pdev); 2427 2428 /** 2429 * dp_print_soc_cfg_params()- Dump soc wlan config parameters 2430 * @soc_handle: Soc handle 2431 * 2432 * Return: void 2433 */ 2434 void dp_print_soc_cfg_params(struct dp_soc *soc); 2435 2436 /** 2437 * dp_srng_get_str_from_ring_type() - Return string name for a ring 2438 * @ring_type: Ring 2439 * 2440 * Return: char const pointer 2441 */ 2442 const 2443 char *dp_srng_get_str_from_hal_ring_type(enum hal_ring_type ring_type); 2444 2445 /* 2446 * dp_txrx_path_stats() - Function to display dump stats 2447 * @soc - soc handle 2448 * 2449 * return: none 2450 */ 2451 void dp_txrx_path_stats(struct dp_soc *soc); 2452 2453 /* 2454 * dp_print_per_ring_stats(): Packet count per ring 2455 * @soc - soc handle 2456 * 2457 * Return - None 2458 */ 2459 void dp_print_per_ring_stats(struct dp_soc *soc); 2460 2461 /** 2462 * dp_aggregate_pdev_stats(): Consolidate stats at PDEV level 2463 * @pdev: DP PDEV handle 2464 * 2465 * return: void 2466 */ 2467 void dp_aggregate_pdev_stats(struct dp_pdev *pdev); 2468 2469 /** 2470 * dp_print_rx_rates(): Print Rx rate stats 2471 * @vdev: DP_VDEV handle 2472 * 2473 * Return:void 2474 */ 2475 void dp_print_rx_rates(struct dp_vdev *vdev); 2476 2477 /** 2478 * dp_print_tx_rates(): Print tx rates 2479 * @vdev: DP_VDEV handle 2480 * 2481 * Return:void 2482 */ 2483 void dp_print_tx_rates(struct dp_vdev *vdev); 2484 2485 /** 2486 * dp_print_peer_stats():print peer stats 2487 * @peer: DP_PEER handle 2488 * @peer_stats: buffer holding peer stats 2489 * 2490 * return void 2491 */ 2492 void dp_print_peer_stats(struct dp_peer *peer, 2493 struct cdp_peer_stats *peer_stats); 2494 2495 /** 2496 * dp_print_pdev_tx_stats(): Print Pdev level TX stats 2497 * @pdev: DP_PDEV Handle 2498 * 2499 * Return:void 2500 */ 2501 void 2502 dp_print_pdev_tx_stats(struct dp_pdev *pdev); 2503 2504 /** 2505 * dp_print_pdev_rx_stats(): Print Pdev level RX stats 2506 * @pdev: DP_PDEV Handle 2507 * 2508 * Return: void 2509 */ 2510 void 2511 dp_print_pdev_rx_stats(struct dp_pdev *pdev); 2512 2513 /** 2514 * dp_print_soc_tx_stats(): Print SOC level stats 2515 * @soc DP_SOC Handle 2516 * 2517 * Return: void 2518 */ 2519 void dp_print_soc_tx_stats(struct dp_soc *soc); 2520 2521 /** 2522 * dp_print_soc_interrupt_stats() - Print interrupt stats for the soc 2523 * @soc: dp_soc handle 2524 * 2525 * Return: None 2526 */ 2527 void dp_print_soc_interrupt_stats(struct dp_soc *soc); 2528 2529 #ifdef WLAN_DP_SRNG_USAGE_WM_TRACKING 2530 /** 2531 * dp_dump_srng_high_wm_stats() - Print the ring usage high watermark stats 2532 * for all SRNGs 2533 * @soc: DP soc handle 2534 * @srng_mask: SRNGs mask for dumping usage watermark stats 2535 * 2536 * Return: None 2537 */ 2538 void dp_dump_srng_high_wm_stats(struct dp_soc *soc, uint64_t srng_mask); 2539 #else 2540 /** 2541 * dp_dump_srng_high_wm_stats() - Print the ring usage high watermark stats 2542 * for all SRNGs 2543 * @soc: DP soc handle 2544 * @srng_mask: SRNGs mask for dumping usage watermark stats 2545 * 2546 * Return: None 2547 */ 2548 static inline 2549 void dp_dump_srng_high_wm_stats(struct dp_soc *soc, uint64_t srng_mask) 2550 { 2551 } 2552 #endif 2553 2554 /** 2555 * dp_print_soc_rx_stats: Print SOC level Rx stats 2556 * @soc: DP_SOC Handle 2557 * 2558 * Return:void 2559 */ 2560 void dp_print_soc_rx_stats(struct dp_soc *soc); 2561 2562 /** 2563 * dp_get_mac_id_for_pdev() - Return mac corresponding to pdev for mac 2564 * 2565 * @mac_id: MAC id 2566 * @pdev_id: pdev_id corresponding to pdev, 0 for MCL 2567 * 2568 * Single pdev using both MACs will operate on both MAC rings, 2569 * which is the case for MCL. 2570 * For WIN each PDEV will operate one ring, so index is zero. 2571 * 2572 */ 2573 static inline int dp_get_mac_id_for_pdev(uint32_t mac_id, uint32_t pdev_id) 2574 { 2575 if (mac_id && pdev_id) { 2576 qdf_print("Both mac_id and pdev_id cannot be non zero"); 2577 QDF_BUG(0); 2578 return 0; 2579 } 2580 return (mac_id + pdev_id); 2581 } 2582 2583 /** 2584 * dp_get_lmac_id_for_pdev_id() - Return lmac id corresponding to host pdev id 2585 * @soc: soc pointer 2586 * @mac_id: MAC id 2587 * @pdev_id: pdev_id corresponding to pdev, 0 for MCL 2588 * 2589 * For MCL, Single pdev using both MACs will operate on both MAC rings. 2590 * 2591 * For WIN, each PDEV will operate one ring. 2592 * 2593 */ 2594 static inline int 2595 dp_get_lmac_id_for_pdev_id 2596 (struct dp_soc *soc, uint32_t mac_id, uint32_t pdev_id) 2597 { 2598 if (!wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx)) { 2599 if (mac_id && pdev_id) { 2600 qdf_print("Both mac_id and pdev_id cannot be non zero"); 2601 QDF_BUG(0); 2602 return 0; 2603 } 2604 return (mac_id + pdev_id); 2605 } 2606 2607 return soc->pdev_list[pdev_id]->lmac_id; 2608 } 2609 2610 /** 2611 * dp_get_pdev_for_lmac_id() - Return pdev pointer corresponding to lmac id 2612 * @soc: soc pointer 2613 * @lmac_id: LMAC id 2614 * 2615 * For MCL, Single pdev exists 2616 * 2617 * For WIN, each PDEV will operate one ring. 2618 * 2619 */ 2620 static inline struct dp_pdev * 2621 dp_get_pdev_for_lmac_id(struct dp_soc *soc, uint32_t lmac_id) 2622 { 2623 uint8_t i = 0; 2624 2625 if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx)) { 2626 i = wlan_cfg_get_pdev_idx(soc->wlan_cfg_ctx, lmac_id); 2627 return ((i < MAX_PDEV_CNT) ? soc->pdev_list[i] : NULL); 2628 } 2629 2630 /* Typically for MCL as there only 1 PDEV*/ 2631 return soc->pdev_list[0]; 2632 } 2633 2634 /** 2635 * dp_calculate_target_pdev_id_from_host_pdev_id() - Return target pdev 2636 * corresponding to host pdev id 2637 * @soc: soc pointer 2638 * @mac_for_pdev: pdev_id corresponding to host pdev for WIN, mac id for MCL 2639 * 2640 * returns target pdev_id for host pdev id. For WIN, this is derived through 2641 * a two step process: 2642 * 1. Get lmac_id corresponding to host pdev_id (lmac_id can change 2643 * during mode switch) 2644 * 2. Get target pdev_id (set up during WMI ready) from lmac_id 2645 * 2646 * For MCL, return the offset-1 translated mac_id 2647 */ 2648 static inline int 2649 dp_calculate_target_pdev_id_from_host_pdev_id 2650 (struct dp_soc *soc, uint32_t mac_for_pdev) 2651 { 2652 struct dp_pdev *pdev; 2653 2654 if (!wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx)) 2655 return DP_SW2HW_MACID(mac_for_pdev); 2656 2657 pdev = soc->pdev_list[mac_for_pdev]; 2658 2659 /*non-MCL case, get original target_pdev mapping*/ 2660 return wlan_cfg_get_target_pdev_id(soc->wlan_cfg_ctx, pdev->lmac_id); 2661 } 2662 2663 /** 2664 * dp_get_target_pdev_id_for_host_pdev_id() - Return target pdev corresponding 2665 * to host pdev id 2666 * @soc: soc pointer 2667 * @mac_for_pdev: pdev_id corresponding to host pdev for WIN, mac id for MCL 2668 * 2669 * returns target pdev_id for host pdev id. 2670 * For WIN, return the value stored in pdev object. 2671 * For MCL, return the offset-1 translated mac_id. 2672 */ 2673 static inline int 2674 dp_get_target_pdev_id_for_host_pdev_id 2675 (struct dp_soc *soc, uint32_t mac_for_pdev) 2676 { 2677 struct dp_pdev *pdev; 2678 2679 if (!wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx)) 2680 return DP_SW2HW_MACID(mac_for_pdev); 2681 2682 pdev = soc->pdev_list[mac_for_pdev]; 2683 2684 return pdev->target_pdev_id; 2685 } 2686 2687 /** 2688 * dp_get_host_pdev_id_for_target_pdev_id() - Return host pdev corresponding 2689 * to target pdev id 2690 * @soc: soc pointer 2691 * @pdev_id: pdev_id corresponding to target pdev 2692 * 2693 * returns host pdev_id for target pdev id. For WIN, this is derived through 2694 * a two step process: 2695 * 1. Get lmac_id corresponding to target pdev_id 2696 * 2. Get host pdev_id (set up during WMI ready) from lmac_id 2697 * 2698 * For MCL, return the 0-offset pdev_id 2699 */ 2700 static inline int 2701 dp_get_host_pdev_id_for_target_pdev_id 2702 (struct dp_soc *soc, uint32_t pdev_id) 2703 { 2704 struct dp_pdev *pdev; 2705 int lmac_id; 2706 2707 if (!wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx)) 2708 return DP_HW2SW_MACID(pdev_id); 2709 2710 /*non-MCL case, get original target_lmac mapping from target pdev*/ 2711 lmac_id = wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, 2712 DP_HW2SW_MACID(pdev_id)); 2713 2714 /*Get host pdev from lmac*/ 2715 pdev = dp_get_pdev_for_lmac_id(soc, lmac_id); 2716 2717 return pdev ? pdev->pdev_id : INVALID_PDEV_ID; 2718 } 2719 2720 /* 2721 * dp_get_mac_id_for_mac() - Return mac corresponding WIN and MCL mac_ids 2722 * 2723 * @soc: handle to DP soc 2724 * @mac_id: MAC id 2725 * 2726 * Single pdev using both MACs will operate on both MAC rings, 2727 * which is the case for MCL. 2728 * For WIN each PDEV will operate one ring, so index is zero. 2729 * 2730 */ 2731 static inline int dp_get_mac_id_for_mac(struct dp_soc *soc, uint32_t mac_id) 2732 { 2733 /* 2734 * Single pdev using both MACs will operate on both MAC rings, 2735 * which is the case for MCL. 2736 */ 2737 if (!wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx)) 2738 return mac_id; 2739 2740 /* For WIN each PDEV will operate one ring, so index is zero. */ 2741 return 0; 2742 } 2743 2744 /* 2745 * dp_is_subtype_data() - check if the frame subtype is data 2746 * 2747 * @frame_ctrl: Frame control field 2748 * 2749 * check the frame control field and verify if the packet 2750 * is a data packet. 2751 * 2752 * Return: true or false 2753 */ 2754 static inline bool dp_is_subtype_data(uint16_t frame_ctrl) 2755 { 2756 if (((qdf_cpu_to_le16(frame_ctrl) & QDF_IEEE80211_FC0_TYPE_MASK) == 2757 QDF_IEEE80211_FC0_TYPE_DATA) && 2758 (((qdf_cpu_to_le16(frame_ctrl) & QDF_IEEE80211_FC0_SUBTYPE_MASK) == 2759 QDF_IEEE80211_FC0_SUBTYPE_DATA) || 2760 ((qdf_cpu_to_le16(frame_ctrl) & QDF_IEEE80211_FC0_SUBTYPE_MASK) == 2761 QDF_IEEE80211_FC0_SUBTYPE_QOS))) { 2762 return true; 2763 } 2764 2765 return false; 2766 } 2767 2768 #ifdef WDI_EVENT_ENABLE 2769 QDF_STATUS dp_h2t_cfg_stats_msg_send(struct dp_pdev *pdev, 2770 uint32_t stats_type_upload_mask, 2771 uint8_t mac_id); 2772 2773 int dp_wdi_event_unsub(struct cdp_soc_t *soc, uint8_t pdev_id, 2774 wdi_event_subscribe *event_cb_sub_handle, 2775 uint32_t event); 2776 2777 int dp_wdi_event_sub(struct cdp_soc_t *soc, uint8_t pdev_id, 2778 wdi_event_subscribe *event_cb_sub_handle, 2779 uint32_t event); 2780 2781 void dp_wdi_event_handler(enum WDI_EVENT event, struct dp_soc *soc, 2782 void *data, u_int16_t peer_id, 2783 int status, u_int8_t pdev_id); 2784 2785 int dp_wdi_event_attach(struct dp_pdev *txrx_pdev); 2786 int dp_wdi_event_detach(struct dp_pdev *txrx_pdev); 2787 2788 static inline void 2789 dp_hif_update_pipe_callback(struct dp_soc *dp_soc, 2790 void *cb_context, 2791 QDF_STATUS (*callback)(void *, qdf_nbuf_t, uint8_t), 2792 uint8_t pipe_id) 2793 { 2794 struct hif_msg_callbacks hif_pipe_callbacks = { 0 }; 2795 2796 /* TODO: Temporary change to bypass HTC connection for this new 2797 * HIF pipe, which will be used for packet log and other high- 2798 * priority HTT messages. Proper HTC connection to be added 2799 * later once required FW changes are available 2800 */ 2801 hif_pipe_callbacks.rxCompletionHandler = callback; 2802 hif_pipe_callbacks.Context = cb_context; 2803 hif_update_pipe_callback(dp_soc->hif_handle, 2804 DP_HTT_T2H_HP_PIPE, &hif_pipe_callbacks); 2805 } 2806 #else 2807 static inline int dp_wdi_event_unsub(struct cdp_soc_t *soc, uint8_t pdev_id, 2808 wdi_event_subscribe *event_cb_sub_handle, 2809 uint32_t event) 2810 { 2811 return 0; 2812 } 2813 2814 static inline int dp_wdi_event_sub(struct cdp_soc_t *soc, uint8_t pdev_id, 2815 wdi_event_subscribe *event_cb_sub_handle, 2816 uint32_t event) 2817 { 2818 return 0; 2819 } 2820 2821 static inline 2822 void dp_wdi_event_handler(enum WDI_EVENT event, 2823 struct dp_soc *soc, 2824 void *data, u_int16_t peer_id, 2825 int status, u_int8_t pdev_id) 2826 { 2827 } 2828 2829 static inline int dp_wdi_event_attach(struct dp_pdev *txrx_pdev) 2830 { 2831 return 0; 2832 } 2833 2834 static inline int dp_wdi_event_detach(struct dp_pdev *txrx_pdev) 2835 { 2836 return 0; 2837 } 2838 2839 static inline QDF_STATUS dp_h2t_cfg_stats_msg_send(struct dp_pdev *pdev, 2840 uint32_t stats_type_upload_mask, uint8_t mac_id) 2841 { 2842 return 0; 2843 } 2844 2845 static inline void 2846 dp_hif_update_pipe_callback(struct dp_soc *dp_soc, void *cb_context, 2847 QDF_STATUS (*callback)(void *, qdf_nbuf_t, uint8_t), 2848 uint8_t pipe_id) 2849 { 2850 } 2851 #endif /* CONFIG_WIN */ 2852 2853 #ifdef VDEV_PEER_PROTOCOL_COUNT 2854 /** 2855 * dp_vdev_peer_stats_update_protocol_cnt() - update per-peer protocol counters 2856 * @vdev: VDEV DP object 2857 * @nbuf: data packet 2858 * @peer: DP TXRX Peer object 2859 * @is_egress: whether egress or ingress 2860 * @is_rx: whether rx or tx 2861 * 2862 * This function updates the per-peer protocol counters 2863 * Return: void 2864 */ 2865 void dp_vdev_peer_stats_update_protocol_cnt(struct dp_vdev *vdev, 2866 qdf_nbuf_t nbuf, 2867 struct dp_txrx_peer *txrx_peer, 2868 bool is_egress, 2869 bool is_rx); 2870 2871 /** 2872 * dp_vdev_peer_stats_update_protocol_cnt() - update per-peer protocol counters 2873 * @soc: SOC DP object 2874 * @vdev_id: vdev_id 2875 * @nbuf: data packet 2876 * @is_egress: whether egress or ingress 2877 * @is_rx: whether rx or tx 2878 * 2879 * This function updates the per-peer protocol counters 2880 * Return: void 2881 */ 2882 2883 void dp_peer_stats_update_protocol_cnt(struct cdp_soc_t *soc, 2884 int8_t vdev_id, 2885 qdf_nbuf_t nbuf, 2886 bool is_egress, 2887 bool is_rx); 2888 2889 void dp_vdev_peer_stats_update_protocol_cnt_tx(struct dp_vdev *vdev_hdl, 2890 qdf_nbuf_t nbuf); 2891 2892 #else 2893 #define dp_vdev_peer_stats_update_protocol_cnt(vdev, nbuf, txrx_peer, \ 2894 is_egress, is_rx) 2895 2896 static inline 2897 void dp_vdev_peer_stats_update_protocol_cnt_tx(struct dp_vdev *vdev_hdl, 2898 qdf_nbuf_t nbuf) 2899 { 2900 } 2901 2902 #endif 2903 2904 #ifdef QCA_LL_TX_FLOW_CONTROL_V2 2905 void dp_tx_dump_flow_pool_info(struct cdp_soc_t *soc_hdl); 2906 2907 /** 2908 * dp_tx_dump_flow_pool_info_compact() - dump flow pool info 2909 * @soc: DP soc context 2910 * 2911 * Return: none 2912 */ 2913 void dp_tx_dump_flow_pool_info_compact(struct dp_soc *soc); 2914 int dp_tx_delete_flow_pool(struct dp_soc *soc, struct dp_tx_desc_pool_s *pool, 2915 bool force); 2916 #else 2917 static inline void dp_tx_dump_flow_pool_info_compact(struct dp_soc *soc) 2918 { 2919 } 2920 #endif /* QCA_LL_TX_FLOW_CONTROL_V2 */ 2921 2922 #ifdef QCA_OL_DP_SRNG_LOCK_LESS_ACCESS 2923 static inline int 2924 dp_hal_srng_access_start(hal_soc_handle_t soc, hal_ring_handle_t hal_ring_hdl) 2925 { 2926 return hal_srng_access_start_unlocked(soc, hal_ring_hdl); 2927 } 2928 2929 static inline void 2930 dp_hal_srng_access_end(hal_soc_handle_t soc, hal_ring_handle_t hal_ring_hdl) 2931 { 2932 hal_srng_access_end_unlocked(soc, hal_ring_hdl); 2933 } 2934 2935 #else 2936 static inline int 2937 dp_hal_srng_access_start(hal_soc_handle_t soc, hal_ring_handle_t hal_ring_hdl) 2938 { 2939 return hal_srng_access_start(soc, hal_ring_hdl); 2940 } 2941 2942 static inline void 2943 dp_hal_srng_access_end(hal_soc_handle_t soc, hal_ring_handle_t hal_ring_hdl) 2944 { 2945 hal_srng_access_end(soc, hal_ring_hdl); 2946 } 2947 #endif 2948 2949 #ifdef WLAN_FEATURE_DP_EVENT_HISTORY 2950 /** 2951 * dp_srng_access_start() - Wrapper function to log access start of a hal ring 2952 * @int_ctx: pointer to DP interrupt context. This should not be NULL 2953 * @soc: DP Soc handle 2954 * @hal_ring: opaque pointer to the HAL Rx Error Ring, which will be serviced 2955 * 2956 * Return: 0 on success; error on failure 2957 */ 2958 int dp_srng_access_start(struct dp_intr *int_ctx, struct dp_soc *dp_soc, 2959 hal_ring_handle_t hal_ring_hdl); 2960 2961 /** 2962 * dp_srng_access_end() - Wrapper function to log access end of a hal ring 2963 * @int_ctx: pointer to DP interrupt context. This should not be NULL 2964 * @soc: DP Soc handle 2965 * @hal_ring: opaque pointer to the HAL Rx Error Ring, which will be serviced 2966 * 2967 * Return: void 2968 */ 2969 void dp_srng_access_end(struct dp_intr *int_ctx, struct dp_soc *dp_soc, 2970 hal_ring_handle_t hal_ring_hdl); 2971 2972 #else 2973 static inline int dp_srng_access_start(struct dp_intr *int_ctx, 2974 struct dp_soc *dp_soc, 2975 hal_ring_handle_t hal_ring_hdl) 2976 { 2977 hal_soc_handle_t hal_soc = dp_soc->hal_soc; 2978 2979 return dp_hal_srng_access_start(hal_soc, hal_ring_hdl); 2980 } 2981 2982 static inline void dp_srng_access_end(struct dp_intr *int_ctx, 2983 struct dp_soc *dp_soc, 2984 hal_ring_handle_t hal_ring_hdl) 2985 { 2986 hal_soc_handle_t hal_soc = dp_soc->hal_soc; 2987 2988 return dp_hal_srng_access_end(hal_soc, hal_ring_hdl); 2989 } 2990 #endif /* WLAN_FEATURE_DP_EVENT_HISTORY */ 2991 2992 #ifdef QCA_CACHED_RING_DESC 2993 /** 2994 * dp_srng_dst_get_next() - Wrapper function to get next ring desc 2995 * @dp_socsoc: DP Soc handle 2996 * @hal_ring: opaque pointer to the HAL Destination Ring 2997 * 2998 * Return: HAL ring descriptor 2999 */ 3000 static inline void *dp_srng_dst_get_next(struct dp_soc *dp_soc, 3001 hal_ring_handle_t hal_ring_hdl) 3002 { 3003 hal_soc_handle_t hal_soc = dp_soc->hal_soc; 3004 3005 return hal_srng_dst_get_next_cached(hal_soc, hal_ring_hdl); 3006 } 3007 3008 /** 3009 * dp_srng_dst_inv_cached_descs() - Wrapper function to invalidate cached 3010 * descriptors 3011 * @dp_socsoc: DP Soc handle 3012 * @hal_ring: opaque pointer to the HAL Rx Destination ring 3013 * @num_entries: Entry count 3014 * 3015 * Return: None 3016 */ 3017 static inline void dp_srng_dst_inv_cached_descs(struct dp_soc *dp_soc, 3018 hal_ring_handle_t hal_ring_hdl, 3019 uint32_t num_entries) 3020 { 3021 hal_soc_handle_t hal_soc = dp_soc->hal_soc; 3022 3023 hal_srng_dst_inv_cached_descs(hal_soc, hal_ring_hdl, num_entries); 3024 } 3025 #else 3026 static inline void *dp_srng_dst_get_next(struct dp_soc *dp_soc, 3027 hal_ring_handle_t hal_ring_hdl) 3028 { 3029 hal_soc_handle_t hal_soc = dp_soc->hal_soc; 3030 3031 return hal_srng_dst_get_next(hal_soc, hal_ring_hdl); 3032 } 3033 3034 static inline void dp_srng_dst_inv_cached_descs(struct dp_soc *dp_soc, 3035 hal_ring_handle_t hal_ring_hdl, 3036 uint32_t num_entries) 3037 { 3038 } 3039 #endif /* QCA_CACHED_RING_DESC */ 3040 3041 #if defined(QCA_CACHED_RING_DESC) && \ 3042 (defined(QCA_DP_RX_HW_SW_NBUF_DESC_PREFETCH) || \ 3043 defined(QCA_DP_TX_HW_SW_NBUF_DESC_PREFETCH)) 3044 /** 3045 * dp_srng_dst_prefetch() - Wrapper function to prefetch descs from dest ring 3046 * @hal_soc_hdl: HAL SOC handle 3047 * @hal_ring: opaque pointer to the HAL Rx Destination ring 3048 * @num_entries: Entry count 3049 * 3050 * Return: None 3051 */ 3052 static inline void *dp_srng_dst_prefetch(hal_soc_handle_t hal_soc, 3053 hal_ring_handle_t hal_ring_hdl, 3054 uint32_t num_entries) 3055 { 3056 return hal_srng_dst_prefetch(hal_soc, hal_ring_hdl, num_entries); 3057 } 3058 3059 /** 3060 * dp_srng_dst_prefetch_32_byte_desc() - Wrapper function to prefetch 3061 * 32 byte descriptor starting at 3062 * 64 byte offset 3063 * @hal_soc_hdl: HAL SOC handle 3064 * @hal_ring: opaque pointer to the HAL Rx Destination ring 3065 * @num_entries: Entry count 3066 * 3067 * Return: None 3068 */ 3069 static inline 3070 void *dp_srng_dst_prefetch_32_byte_desc(hal_soc_handle_t hal_soc, 3071 hal_ring_handle_t hal_ring_hdl, 3072 uint32_t num_entries) 3073 { 3074 return hal_srng_dst_prefetch_32_byte_desc(hal_soc, hal_ring_hdl, 3075 num_entries); 3076 } 3077 #else 3078 static inline void *dp_srng_dst_prefetch(hal_soc_handle_t hal_soc, 3079 hal_ring_handle_t hal_ring_hdl, 3080 uint32_t num_entries) 3081 { 3082 return NULL; 3083 } 3084 3085 static inline 3086 void *dp_srng_dst_prefetch_32_byte_desc(hal_soc_handle_t hal_soc, 3087 hal_ring_handle_t hal_ring_hdl, 3088 uint32_t num_entries) 3089 { 3090 return NULL; 3091 } 3092 #endif 3093 3094 #ifdef QCA_ENH_V3_STATS_SUPPORT 3095 /** 3096 * dp_pdev_print_delay_stats(): Print pdev level delay stats 3097 * @pdev: DP_PDEV handle 3098 * 3099 * Return:void 3100 */ 3101 void dp_pdev_print_delay_stats(struct dp_pdev *pdev); 3102 3103 /** 3104 * dp_pdev_print_tid_stats(): Print pdev level tid stats 3105 * @pdev: DP_PDEV handle 3106 * 3107 * Return:void 3108 */ 3109 void dp_pdev_print_tid_stats(struct dp_pdev *pdev); 3110 3111 /** 3112 * dp_pdev_print_rx_error_stats(): Print pdev level rx error stats 3113 * @pdev: DP_PDEV handle 3114 * 3115 * Return:void 3116 */ 3117 void dp_pdev_print_rx_error_stats(struct dp_pdev *pdev); 3118 #endif /* QCA_ENH_V3_STATS_SUPPORT */ 3119 3120 /** 3121 * dp_pdev_get_tid_stats(): Get accumulated pdev level tid_stats 3122 * @soc_hdl: soc handle 3123 * @pdev_id: id of dp_pdev handle 3124 * @tid_stats: Pointer for cdp_tid_stats_intf 3125 * 3126 * Return: QDF_STATUS_SUCCESS or QDF_STATUS_E_INVAL 3127 */ 3128 QDF_STATUS dp_pdev_get_tid_stats(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, 3129 struct cdp_tid_stats_intf *tid_stats); 3130 3131 void dp_soc_set_txrx_ring_map(struct dp_soc *soc); 3132 3133 /** 3134 * dp_vdev_to_cdp_vdev() - typecast dp vdev to cdp vdev 3135 * @vdev: DP vdev handle 3136 * 3137 * Return: struct cdp_vdev pointer 3138 */ 3139 static inline 3140 struct cdp_vdev *dp_vdev_to_cdp_vdev(struct dp_vdev *vdev) 3141 { 3142 return (struct cdp_vdev *)vdev; 3143 } 3144 3145 /** 3146 * dp_pdev_to_cdp_pdev() - typecast dp pdev to cdp pdev 3147 * @pdev: DP pdev handle 3148 * 3149 * Return: struct cdp_pdev pointer 3150 */ 3151 static inline 3152 struct cdp_pdev *dp_pdev_to_cdp_pdev(struct dp_pdev *pdev) 3153 { 3154 return (struct cdp_pdev *)pdev; 3155 } 3156 3157 /** 3158 * dp_soc_to_cdp_soc() - typecast dp psoc to cdp psoc 3159 * @psoc: DP psoc handle 3160 * 3161 * Return: struct cdp_soc pointer 3162 */ 3163 static inline 3164 struct cdp_soc *dp_soc_to_cdp_soc(struct dp_soc *psoc) 3165 { 3166 return (struct cdp_soc *)psoc; 3167 } 3168 3169 /** 3170 * dp_soc_to_cdp_soc_t() - typecast dp psoc to 3171 * ol txrx soc handle 3172 * @psoc: DP psoc handle 3173 * 3174 * Return: struct cdp_soc_t pointer 3175 */ 3176 static inline 3177 struct cdp_soc_t *dp_soc_to_cdp_soc_t(struct dp_soc *psoc) 3178 { 3179 return (struct cdp_soc_t *)psoc; 3180 } 3181 3182 #if defined(WLAN_SUPPORT_RX_FLOW_TAG) || defined(WLAN_SUPPORT_RX_FISA) 3183 /** 3184 * dp_rx_flow_update_fse_stats() - Update a flow's statistics 3185 * @pdev: pdev handle 3186 * @flow_id: flow index (truncated hash) in the Rx FST 3187 * 3188 * Return: Success when flow statistcs is updated, error on failure 3189 */ 3190 QDF_STATUS dp_rx_flow_get_fse_stats(struct dp_pdev *pdev, 3191 struct cdp_rx_flow_info *rx_flow_info, 3192 struct cdp_flow_stats *stats); 3193 3194 /** 3195 * dp_rx_flow_delete_entry() - Delete a flow entry from flow search table 3196 * @pdev: pdev handle 3197 * @rx_flow_info: DP flow parameters 3198 * 3199 * Return: Success when flow is deleted, error on failure 3200 */ 3201 QDF_STATUS dp_rx_flow_delete_entry(struct dp_pdev *pdev, 3202 struct cdp_rx_flow_info *rx_flow_info); 3203 3204 /** 3205 * dp_rx_flow_add_entry() - Add a flow entry to flow search table 3206 * @pdev: DP pdev instance 3207 * @rx_flow_info: DP flow parameters 3208 * 3209 * Return: Success when flow is added, no-memory or already exists on error 3210 */ 3211 QDF_STATUS dp_rx_flow_add_entry(struct dp_pdev *pdev, 3212 struct cdp_rx_flow_info *rx_flow_info); 3213 3214 /** 3215 * dp_rx_fst_attach() - Initialize Rx FST and setup necessary parameters 3216 * @soc: SoC handle 3217 * @pdev: Pdev handle 3218 * 3219 * Return: Handle to flow search table entry 3220 */ 3221 QDF_STATUS dp_rx_fst_attach(struct dp_soc *soc, struct dp_pdev *pdev); 3222 3223 /** 3224 * dp_rx_fst_detach() - De-initialize Rx FST 3225 * @soc: SoC handle 3226 * @pdev: Pdev handle 3227 * 3228 * Return: None 3229 */ 3230 void dp_rx_fst_detach(struct dp_soc *soc, struct dp_pdev *pdev); 3231 3232 /** 3233 * dp_rx_flow_send_fst_fw_setup() - Program FST parameters in FW/HW post-attach 3234 * @soc: SoC handle 3235 * @pdev: Pdev handle 3236 * 3237 * Return: Success when fst parameters are programmed in FW, error otherwise 3238 */ 3239 QDF_STATUS dp_rx_flow_send_fst_fw_setup(struct dp_soc *soc, 3240 struct dp_pdev *pdev); 3241 3242 /** dp_mon_rx_update_rx_flow_tag_stats() - Update a mon flow's statistics 3243 * @pdev: pdev handle 3244 * @flow_id: flow index (truncated hash) in the Rx FST 3245 * 3246 * Return: Success when flow statistcs is updated, error on failure 3247 */ 3248 QDF_STATUS 3249 dp_mon_rx_update_rx_flow_tag_stats(struct dp_pdev *pdev, uint32_t flow_id); 3250 3251 #else /* !((WLAN_SUPPORT_RX_FLOW_TAG) || defined(WLAN_SUPPORT_RX_FISA)) */ 3252 3253 /** 3254 * dp_rx_fst_attach() - Initialize Rx FST and setup necessary parameters 3255 * @soc: SoC handle 3256 * @pdev: Pdev handle 3257 * 3258 * Return: Handle to flow search table entry 3259 */ 3260 static inline 3261 QDF_STATUS dp_rx_fst_attach(struct dp_soc *soc, struct dp_pdev *pdev) 3262 { 3263 return QDF_STATUS_SUCCESS; 3264 } 3265 3266 /** 3267 * dp_rx_fst_detach() - De-initialize Rx FST 3268 * @soc: SoC handle 3269 * @pdev: Pdev handle 3270 * 3271 * Return: None 3272 */ 3273 static inline 3274 void dp_rx_fst_detach(struct dp_soc *soc, struct dp_pdev *pdev) 3275 { 3276 } 3277 #endif 3278 3279 /** 3280 * dp_vdev_get_ref() - API to take a reference for VDEV object 3281 * 3282 * @soc : core DP soc context 3283 * @vdev : DP vdev 3284 * @mod_id : module id 3285 * 3286 * Return: QDF_STATUS_SUCCESS if reference held successfully 3287 * else QDF_STATUS_E_INVAL 3288 */ 3289 static inline 3290 QDF_STATUS dp_vdev_get_ref(struct dp_soc *soc, struct dp_vdev *vdev, 3291 enum dp_mod_id mod_id) 3292 { 3293 if (!qdf_atomic_inc_not_zero(&vdev->ref_cnt)) 3294 return QDF_STATUS_E_INVAL; 3295 3296 qdf_atomic_inc(&vdev->mod_refs[mod_id]); 3297 3298 return QDF_STATUS_SUCCESS; 3299 } 3300 3301 /** 3302 * dp_vdev_get_ref_by_id() - Returns vdev object given the vdev id 3303 * @soc: core DP soc context 3304 * @vdev_id: vdev id from vdev object can be retrieved 3305 * @mod_id: module id which is requesting the reference 3306 * 3307 * Return: struct dp_vdev*: Pointer to DP vdev object 3308 */ 3309 static inline struct dp_vdev * 3310 dp_vdev_get_ref_by_id(struct dp_soc *soc, uint8_t vdev_id, 3311 enum dp_mod_id mod_id) 3312 { 3313 struct dp_vdev *vdev = NULL; 3314 if (qdf_unlikely(vdev_id >= MAX_VDEV_CNT)) 3315 return NULL; 3316 3317 qdf_spin_lock_bh(&soc->vdev_map_lock); 3318 vdev = soc->vdev_id_map[vdev_id]; 3319 3320 if (!vdev || dp_vdev_get_ref(soc, vdev, mod_id) != QDF_STATUS_SUCCESS) { 3321 qdf_spin_unlock_bh(&soc->vdev_map_lock); 3322 return NULL; 3323 } 3324 qdf_spin_unlock_bh(&soc->vdev_map_lock); 3325 3326 return vdev; 3327 } 3328 3329 /** 3330 * dp_get_pdev_from_soc_pdev_id_wifi3() - Returns pdev object given the pdev id 3331 * @soc: core DP soc context 3332 * @pdev_id: pdev id from pdev object can be retrieved 3333 * 3334 * Return: struct dp_pdev*: Pointer to DP pdev object 3335 */ 3336 static inline struct dp_pdev * 3337 dp_get_pdev_from_soc_pdev_id_wifi3(struct dp_soc *soc, 3338 uint8_t pdev_id) 3339 { 3340 if (qdf_unlikely(pdev_id >= MAX_PDEV_CNT)) 3341 return NULL; 3342 3343 return soc->pdev_list[pdev_id]; 3344 } 3345 3346 /* 3347 * dp_rx_tid_update_wifi3() – Update receive TID state 3348 * @peer: Datapath peer handle 3349 * @tid: TID 3350 * @ba_window_size: BlockAck window size 3351 * @start_seq: Starting sequence number 3352 * @bar_update: BAR update triggered 3353 * 3354 * Return: QDF_STATUS code 3355 */ 3356 QDF_STATUS dp_rx_tid_update_wifi3(struct dp_peer *peer, int tid, uint32_t 3357 ba_window_size, uint32_t start_seq, 3358 bool bar_update); 3359 3360 /** 3361 * dp_get_peer_mac_list(): function to get peer mac list of vdev 3362 * @soc: Datapath soc handle 3363 * @vdev_id: vdev id 3364 * @newmac: Table of the clients mac 3365 * @mac_cnt: No. of MACs required 3366 * @limit: Limit the number of clients 3367 * 3368 * return: no of clients 3369 */ 3370 uint16_t dp_get_peer_mac_list(ol_txrx_soc_handle soc, uint8_t vdev_id, 3371 u_int8_t newmac[][QDF_MAC_ADDR_SIZE], 3372 u_int16_t mac_cnt, bool limit); 3373 3374 /* 3375 * dp_update_num_mac_rings_for_dbs() - Update No of MAC rings based on 3376 * DBS check 3377 * @soc: DP SoC context 3378 * @max_mac_rings: Pointer to variable for No of MAC rings 3379 * 3380 * Return: None 3381 */ 3382 void dp_update_num_mac_rings_for_dbs(struct dp_soc *soc, 3383 int *max_mac_rings); 3384 3385 3386 #if defined(WLAN_SUPPORT_RX_FISA) 3387 void dp_rx_dump_fisa_table(struct dp_soc *soc); 3388 3389 /** 3390 * dp_print_fisa_stats() - Print FISA stats 3391 * @soc: DP soc handle 3392 * 3393 * Return: None 3394 */ 3395 void dp_print_fisa_stats(struct dp_soc *soc); 3396 3397 /* 3398 * dp_rx_fst_update_cmem_params() - Update CMEM FST params 3399 * @soc: DP SoC context 3400 * @num_entries: Number of flow search entries 3401 * @cmem_ba_lo: CMEM base address low 3402 * @cmem_ba_hi: CMEM base address high 3403 * 3404 * Return: None 3405 */ 3406 void dp_rx_fst_update_cmem_params(struct dp_soc *soc, uint16_t num_entries, 3407 uint32_t cmem_ba_lo, uint32_t cmem_ba_hi); 3408 3409 void 3410 dp_rx_fst_update_pm_suspend_status(struct dp_soc *soc, bool suspended); 3411 #else 3412 static inline void 3413 dp_rx_fst_update_cmem_params(struct dp_soc *soc, uint16_t num_entries, 3414 uint32_t cmem_ba_lo, uint32_t cmem_ba_hi) 3415 { 3416 } 3417 3418 static inline void 3419 dp_rx_fst_update_pm_suspend_status(struct dp_soc *soc, bool suspended) 3420 { 3421 } 3422 3423 static inline void dp_print_fisa_stats(struct dp_soc *soc) 3424 { 3425 } 3426 #endif /* WLAN_SUPPORT_RX_FISA */ 3427 3428 #ifdef MAX_ALLOC_PAGE_SIZE 3429 /** 3430 * dp_set_page_size() - Set the max page size for hw link desc. 3431 * For MCL the page size is set to OS defined value and for WIN 3432 * the page size is set to the max_alloc_size cfg ini 3433 * param. 3434 * This is to ensure that WIN gets contiguous memory allocations 3435 * as per requirement. 3436 * @pages: link desc page handle 3437 * @max_alloc_size: max_alloc_size 3438 * 3439 * Return: None 3440 */ 3441 static inline 3442 void dp_set_max_page_size(struct qdf_mem_multi_page_t *pages, 3443 uint32_t max_alloc_size) 3444 { 3445 pages->page_size = qdf_page_size; 3446 } 3447 3448 #else 3449 static inline 3450 void dp_set_max_page_size(struct qdf_mem_multi_page_t *pages, 3451 uint32_t max_alloc_size) 3452 { 3453 pages->page_size = max_alloc_size; 3454 } 3455 #endif /* MAX_ALLOC_PAGE_SIZE */ 3456 3457 /** 3458 * dp_history_get_next_index() - get the next entry to record an entry 3459 * in the history. 3460 * @curr_idx: Current index where the last entry is written. 3461 * @max_entries: Max number of entries in the history 3462 * 3463 * This function assumes that the max number os entries is a power of 2. 3464 * 3465 * Returns: The index where the next entry is to be written. 3466 */ 3467 static inline uint32_t dp_history_get_next_index(qdf_atomic_t *curr_idx, 3468 uint32_t max_entries) 3469 { 3470 uint32_t idx = qdf_atomic_inc_return(curr_idx); 3471 3472 return idx & (max_entries - 1); 3473 } 3474 3475 /** 3476 * dp_rx_skip_tlvs() - Skip TLVs len + L2 hdr_offset, save in nbuf->cb 3477 * @nbuf: nbuf cb to be updated 3478 * @l2_hdr_offset: l2_hdr_offset 3479 * 3480 * Return: None 3481 */ 3482 void dp_rx_skip_tlvs(struct dp_soc *soc, qdf_nbuf_t nbuf, uint32_t l3_padding); 3483 3484 #ifndef FEATURE_WDS 3485 static inline void 3486 dp_hmwds_ast_add_notify(struct dp_peer *peer, 3487 uint8_t *mac_addr, 3488 enum cdp_txrx_ast_entry_type type, 3489 QDF_STATUS err, 3490 bool is_peer_map) 3491 { 3492 } 3493 #endif 3494 3495 #ifdef HTT_STATS_DEBUGFS_SUPPORT 3496 /* dp_pdev_htt_stats_dbgfs_init() - Function to allocate memory and initialize 3497 * debugfs for HTT stats 3498 * @pdev: dp pdev handle 3499 * 3500 * Return: QDF_STATUS 3501 */ 3502 QDF_STATUS dp_pdev_htt_stats_dbgfs_init(struct dp_pdev *pdev); 3503 3504 /* dp_pdev_htt_stats_dbgfs_deinit() - Function to remove debugfs entry for 3505 * HTT stats 3506 * @pdev: dp pdev handle 3507 * 3508 * Return: none 3509 */ 3510 void dp_pdev_htt_stats_dbgfs_deinit(struct dp_pdev *pdev); 3511 #else 3512 3513 /* dp_pdev_htt_stats_dbgfs_init() - Function to allocate memory and initialize 3514 * debugfs for HTT stats 3515 * @pdev: dp pdev handle 3516 * 3517 * Return: QDF_STATUS 3518 */ 3519 static inline QDF_STATUS 3520 dp_pdev_htt_stats_dbgfs_init(struct dp_pdev *pdev) 3521 { 3522 return QDF_STATUS_SUCCESS; 3523 } 3524 3525 /* dp_pdev_htt_stats_dbgfs_deinit() - Function to remove debugfs entry for 3526 * HTT stats 3527 * @pdev: dp pdev handle 3528 * 3529 * Return: none 3530 */ 3531 static inline void 3532 dp_pdev_htt_stats_dbgfs_deinit(struct dp_pdev *pdev) 3533 { 3534 } 3535 #endif /* HTT_STATS_DEBUGFS_SUPPORT */ 3536 3537 #ifndef WLAN_DP_FEATURE_SW_LATENCY_MGR 3538 /** 3539 * dp_soc_swlm_attach() - attach the software latency manager resources 3540 * @soc: Datapath global soc handle 3541 * 3542 * Returns: QDF_STATUS 3543 */ 3544 static inline QDF_STATUS dp_soc_swlm_attach(struct dp_soc *soc) 3545 { 3546 return QDF_STATUS_SUCCESS; 3547 } 3548 3549 /** 3550 * dp_soc_swlm_detach() - detach the software latency manager resources 3551 * @soc: Datapath global soc handle 3552 * 3553 * Returns: QDF_STATUS 3554 */ 3555 static inline QDF_STATUS dp_soc_swlm_detach(struct dp_soc *soc) 3556 { 3557 return QDF_STATUS_SUCCESS; 3558 } 3559 #endif /* !WLAN_DP_FEATURE_SW_LATENCY_MGR */ 3560 3561 /** 3562 * dp_get_peer_id(): function to get peer id by mac 3563 * @soc: Datapath soc handle 3564 * @vdev_id: vdev id 3565 * @mac: Peer mac address 3566 * 3567 * return: valid peer id on success 3568 * HTT_INVALID_PEER on failure 3569 */ 3570 uint16_t dp_get_peer_id(ol_txrx_soc_handle soc, uint8_t vdev_id, uint8_t *mac); 3571 3572 #ifdef QCA_SUPPORT_WDS_EXTENDED 3573 /** 3574 * dp_wds_ext_set_peer_state(): function to set peer state 3575 * @soc: Datapath soc handle 3576 * @vdev_id: vdev id 3577 * @mac: Peer mac address 3578 * @rx: rx function pointer 3579 * 3580 * return: QDF_STATUS_SUCCESS on success 3581 * QDF_STATUS_E_INVAL if peer is not found 3582 * QDF_STATUS_E_ALREADY if rx is already set/unset 3583 */ 3584 QDF_STATUS dp_wds_ext_set_peer_rx(ol_txrx_soc_handle soc, 3585 uint8_t vdev_id, 3586 uint8_t *mac, 3587 ol_txrx_rx_fp rx, 3588 ol_osif_peer_handle osif_peer); 3589 #endif /* QCA_SUPPORT_WDS_EXTENDED */ 3590 3591 #ifdef DP_MEM_PRE_ALLOC 3592 3593 /** 3594 * dp_context_alloc_mem() - allocate memory for DP context 3595 * @soc: datapath soc handle 3596 * @ctxt_type: DP context type 3597 * @ctxt_size: DP context size 3598 * 3599 * Return: DP context address 3600 */ 3601 void *dp_context_alloc_mem(struct dp_soc *soc, enum dp_ctxt_type ctxt_type, 3602 size_t ctxt_size); 3603 3604 /** 3605 * dp_context_free_mem() - Free memory of DP context 3606 * @soc: datapath soc handle 3607 * @ctxt_type: DP context type 3608 * @vaddr: Address of context memory 3609 * 3610 * Return: None 3611 */ 3612 void dp_context_free_mem(struct dp_soc *soc, enum dp_ctxt_type ctxt_type, 3613 void *vaddr); 3614 3615 /** 3616 * dp_desc_multi_pages_mem_alloc() - alloc memory over multiple pages 3617 * @soc: datapath soc handle 3618 * @desc_type: memory request source type 3619 * @pages: multi page information storage 3620 * @element_size: each element size 3621 * @element_num: total number of elements should be allocated 3622 * @memctxt: memory context 3623 * @cacheable: coherent memory or cacheable memory 3624 * 3625 * This function is a wrapper for memory allocation over multiple 3626 * pages, if dp prealloc method is registered, then will try prealloc 3627 * firstly. if prealloc failed, fall back to regular way over 3628 * qdf_mem_multi_pages_alloc(). 3629 * 3630 * Return: None 3631 */ 3632 void dp_desc_multi_pages_mem_alloc(struct dp_soc *soc, 3633 enum dp_desc_type desc_type, 3634 struct qdf_mem_multi_page_t *pages, 3635 size_t element_size, 3636 uint32_t element_num, 3637 qdf_dma_context_t memctxt, 3638 bool cacheable); 3639 3640 /** 3641 * dp_desc_multi_pages_mem_free() - free multiple pages memory 3642 * @soc: datapath soc handle 3643 * @desc_type: memory request source type 3644 * @pages: multi page information storage 3645 * @memctxt: memory context 3646 * @cacheable: coherent memory or cacheable memory 3647 * 3648 * This function is a wrapper for multiple pages memory free, 3649 * if memory is got from prealloc pool, put it back to pool. 3650 * otherwise free by qdf_mem_multi_pages_free(). 3651 * 3652 * Return: None 3653 */ 3654 void dp_desc_multi_pages_mem_free(struct dp_soc *soc, 3655 enum dp_desc_type desc_type, 3656 struct qdf_mem_multi_page_t *pages, 3657 qdf_dma_context_t memctxt, 3658 bool cacheable); 3659 3660 #else 3661 static inline 3662 void *dp_context_alloc_mem(struct dp_soc *soc, enum dp_ctxt_type ctxt_type, 3663 size_t ctxt_size) 3664 { 3665 return qdf_mem_malloc(ctxt_size); 3666 } 3667 3668 static inline 3669 void dp_context_free_mem(struct dp_soc *soc, enum dp_ctxt_type ctxt_type, 3670 void *vaddr) 3671 { 3672 qdf_mem_free(vaddr); 3673 } 3674 3675 static inline 3676 void dp_desc_multi_pages_mem_alloc(struct dp_soc *soc, 3677 enum dp_desc_type desc_type, 3678 struct qdf_mem_multi_page_t *pages, 3679 size_t element_size, 3680 uint32_t element_num, 3681 qdf_dma_context_t memctxt, 3682 bool cacheable) 3683 { 3684 qdf_mem_multi_pages_alloc(soc->osdev, pages, element_size, 3685 element_num, memctxt, cacheable); 3686 } 3687 3688 static inline 3689 void dp_desc_multi_pages_mem_free(struct dp_soc *soc, 3690 enum dp_desc_type desc_type, 3691 struct qdf_mem_multi_page_t *pages, 3692 qdf_dma_context_t memctxt, 3693 bool cacheable) 3694 { 3695 qdf_mem_multi_pages_free(soc->osdev, pages, 3696 memctxt, cacheable); 3697 } 3698 #endif 3699 3700 /** 3701 * struct dp_frag_history_opaque_atomic - Opaque struct for adding a fragmented 3702 * history. 3703 * @index: atomic index 3704 * @num_entries_per_slot: Number of entries per slot 3705 * @allocated: is allocated or not 3706 * @entry: pointers to array of records 3707 */ 3708 struct dp_frag_history_opaque_atomic { 3709 qdf_atomic_t index; 3710 uint16_t num_entries_per_slot; 3711 uint16_t allocated; 3712 void *entry[0]; 3713 }; 3714 3715 static inline QDF_STATUS 3716 dp_soc_frag_history_attach(struct dp_soc *soc, void *history_hdl, 3717 uint32_t max_slots, uint32_t max_entries_per_slot, 3718 uint32_t entry_size, 3719 bool attempt_prealloc, enum dp_ctxt_type ctxt_type) 3720 { 3721 struct dp_frag_history_opaque_atomic *history = 3722 (struct dp_frag_history_opaque_atomic *)history_hdl; 3723 size_t alloc_size = max_entries_per_slot * entry_size; 3724 int i; 3725 3726 for (i = 0; i < max_slots; i++) { 3727 if (attempt_prealloc) 3728 history->entry[i] = dp_context_alloc_mem(soc, ctxt_type, 3729 alloc_size); 3730 else 3731 history->entry[i] = qdf_mem_malloc(alloc_size); 3732 3733 if (!history->entry[i]) 3734 goto exit; 3735 } 3736 3737 qdf_atomic_init(&history->index); 3738 history->allocated = 1; 3739 history->num_entries_per_slot = max_entries_per_slot; 3740 3741 return QDF_STATUS_SUCCESS; 3742 exit: 3743 for (i = i - 1; i >= 0; i--) { 3744 if (attempt_prealloc) 3745 dp_context_free_mem(soc, ctxt_type, history->entry[i]); 3746 else 3747 qdf_mem_free(history->entry[i]); 3748 } 3749 3750 return QDF_STATUS_E_NOMEM; 3751 } 3752 3753 static inline 3754 void dp_soc_frag_history_detach(struct dp_soc *soc, 3755 void *history_hdl, uint32_t max_slots, 3756 bool attempt_prealloc, 3757 enum dp_ctxt_type ctxt_type) 3758 { 3759 struct dp_frag_history_opaque_atomic *history = 3760 (struct dp_frag_history_opaque_atomic *)history_hdl; 3761 int i; 3762 3763 for (i = 0; i < max_slots; i++) { 3764 if (attempt_prealloc) 3765 dp_context_free_mem(soc, ctxt_type, history->entry[i]); 3766 else 3767 qdf_mem_free(history->entry[i]); 3768 } 3769 3770 history->allocated = 0; 3771 } 3772 3773 /** 3774 * dp_get_frag_hist_next_atomic_idx() - get the next entry index to record an 3775 * entry in a fragmented history with 3776 * index being atomic. 3777 * @curr_idx: address of the current index where the last entry was written 3778 * @next_idx: pointer to update the next index 3779 * @slot: pointer to update the history slot to be selected 3780 * @slot_shift: BITwise shift mask for slot (in index) 3781 * @max_entries_per_slot: Max number of entries in a slot of history 3782 * @max_entries: Total number of entries in the history (sum of all slots) 3783 * 3784 * This function assumes that the "max_entries_per_slot" and "max_entries" 3785 * are a power-of-2. 3786 * 3787 * Return: None 3788 */ 3789 static inline void 3790 dp_get_frag_hist_next_atomic_idx(qdf_atomic_t *curr_idx, uint32_t *next_idx, 3791 uint16_t *slot, uint32_t slot_shift, 3792 uint32_t max_entries_per_slot, 3793 uint32_t max_entries) 3794 { 3795 uint32_t idx; 3796 3797 idx = qdf_do_div_rem(qdf_atomic_inc_return(curr_idx), max_entries); 3798 3799 *slot = idx >> slot_shift; 3800 *next_idx = idx & (max_entries_per_slot - 1); 3801 } 3802 3803 #ifdef FEATURE_RUNTIME_PM 3804 /** 3805 * dp_runtime_get() - Get dp runtime refcount 3806 * @soc: Datapath soc handle 3807 * 3808 * Get dp runtime refcount by increment of an atomic variable, which can block 3809 * dp runtime resume to wait to flush pending tx by runtime suspend. 3810 * 3811 * Return: Current refcount 3812 */ 3813 static inline int32_t dp_runtime_get(struct dp_soc *soc) 3814 { 3815 return qdf_atomic_inc_return(&soc->dp_runtime_refcount); 3816 } 3817 3818 /** 3819 * dp_runtime_put() - Return dp runtime refcount 3820 * @soc: Datapath soc handle 3821 * 3822 * Return dp runtime refcount by decrement of an atomic variable, allow dp 3823 * runtime resume finish. 3824 * 3825 * Return: Current refcount 3826 */ 3827 static inline int32_t dp_runtime_put(struct dp_soc *soc) 3828 { 3829 return qdf_atomic_dec_return(&soc->dp_runtime_refcount); 3830 } 3831 3832 /** 3833 * dp_runtime_get_refcount() - Get dp runtime refcount 3834 * @soc: Datapath soc handle 3835 * 3836 * Get dp runtime refcount by returning an atomic variable 3837 * 3838 * Return: Current refcount 3839 */ 3840 static inline int32_t dp_runtime_get_refcount(struct dp_soc *soc) 3841 { 3842 return qdf_atomic_read(&soc->dp_runtime_refcount); 3843 } 3844 3845 /** 3846 * dp_runtime_init() - Init DP related runtime PM clients and runtime refcount 3847 * @soc: Datapath soc handle 3848 * 3849 * Return: QDF_STATUS 3850 */ 3851 static inline void dp_runtime_init(struct dp_soc *soc) 3852 { 3853 hif_rtpm_register(HIF_RTPM_ID_DP, NULL); 3854 hif_rtpm_register(HIF_RTPM_ID_DP_RING_STATS, NULL); 3855 qdf_atomic_init(&soc->dp_runtime_refcount); 3856 } 3857 3858 /** 3859 * dp_runtime_deinit() - Deinit DP related runtime PM clients 3860 * 3861 * Return: None 3862 */ 3863 static inline void dp_runtime_deinit(void) 3864 { 3865 hif_rtpm_deregister(HIF_RTPM_ID_DP); 3866 hif_rtpm_deregister(HIF_RTPM_ID_DP_RING_STATS); 3867 } 3868 3869 /** 3870 * dp_runtime_pm_mark_last_busy() - Mark last busy when rx path in use 3871 * @soc: Datapath soc handle 3872 * 3873 * Return: None 3874 */ 3875 static inline void dp_runtime_pm_mark_last_busy(struct dp_soc *soc) 3876 { 3877 soc->rx_last_busy = qdf_get_log_timestamp_usecs(); 3878 3879 hif_rtpm_mark_last_busy(HIF_RTPM_ID_DP); 3880 } 3881 #else 3882 static inline int32_t dp_runtime_get(struct dp_soc *soc) 3883 { 3884 return 0; 3885 } 3886 3887 static inline int32_t dp_runtime_put(struct dp_soc *soc) 3888 { 3889 return 0; 3890 } 3891 3892 static inline QDF_STATUS dp_runtime_init(struct dp_soc *soc) 3893 { 3894 return QDF_STATUS_SUCCESS; 3895 } 3896 3897 static inline void dp_runtime_deinit(void) 3898 { 3899 } 3900 3901 static inline void dp_runtime_pm_mark_last_busy(struct dp_soc *soc) 3902 { 3903 } 3904 #endif 3905 3906 static inline enum QDF_GLOBAL_MODE dp_soc_get_con_mode(struct dp_soc *soc) 3907 { 3908 if (soc->cdp_soc.ol_ops->get_con_mode) 3909 return soc->cdp_soc.ol_ops->get_con_mode(); 3910 3911 return QDF_GLOBAL_MAX_MODE; 3912 } 3913 3914 /* 3915 * dp_pdev_bkp_stats_detach() - detach resources for back pressure stats 3916 * processing 3917 * @pdev: Datapath PDEV handle 3918 * 3919 */ 3920 void dp_pdev_bkp_stats_detach(struct dp_pdev *pdev); 3921 3922 /* 3923 * dp_pdev_bkp_stats_attach() - attach resources for back pressure stats 3924 * processing 3925 * @pdev: Datapath PDEV handle 3926 * 3927 * Return: QDF_STATUS_SUCCESS: Success 3928 * QDF_STATUS_E_NOMEM: Error 3929 */ 3930 3931 QDF_STATUS dp_pdev_bkp_stats_attach(struct dp_pdev *pdev); 3932 3933 /** 3934 * dp_peer_flush_frags() - Flush all fragments for a particular 3935 * peer 3936 * @soc_hdl - data path soc handle 3937 * @vdev_id - vdev id 3938 * @peer_addr - peer mac address 3939 * 3940 * Return: None 3941 */ 3942 void dp_peer_flush_frags(struct cdp_soc_t *soc_hdl, uint8_t vdev_id, 3943 uint8_t *peer_mac); 3944 3945 /** 3946 * dp_soc_reset_mon_intr_mask() - reset mon intr mask 3947 * @soc: pointer to dp_soc handle 3948 * 3949 * Return: 3950 */ 3951 void dp_soc_reset_mon_intr_mask(struct dp_soc *soc); 3952 3953 /** 3954 * dp_txrx_get_soc_stats() - will return cdp_soc_stats 3955 * @soc_hdl: soc handle 3956 * @soc_stats: buffer to hold the values 3957 * 3958 * Return: QDF_STATUS_SUCCESS: Success 3959 * QDF_STATUS_E_FAILURE: Error 3960 */ 3961 QDF_STATUS dp_txrx_get_soc_stats(struct cdp_soc_t *soc_hdl, 3962 struct cdp_soc_stats *soc_stats); 3963 3964 /** 3965 * dp_txrx_get_peer_delay_stats() - to get peer delay stats per TIDs 3966 * @soc: soc handle 3967 * @vdev_id: id of vdev handle 3968 * @peer_mac: mac of DP_PEER handle 3969 * @delay_stats: pointer to delay stats array 3970 * 3971 * Return: QDF_STATUS_SUCCESS: Success 3972 * QDF_STATUS_E_FAILURE: Error 3973 */ 3974 QDF_STATUS 3975 dp_txrx_get_peer_delay_stats(struct cdp_soc_t *soc_hdl, uint8_t vdev_id, 3976 uint8_t *peer_mac, 3977 struct cdp_delay_tid_stats *delay_stats); 3978 3979 /** 3980 * dp_txrx_get_peer_jitter_stats() - to get peer jitter stats per TIDs 3981 * @soc: soc handle 3982 * @pdev_id: id of pdev handle 3983 * @vdev_id: id of vdev handle 3984 * @peer_mac: mac of DP_PEER handle 3985 * @tid_stats: pointer to jitter stats array 3986 * 3987 * Return: QDF_STATUS_SUCCESS: Success 3988 * QDF_STATUS_E_FAILURE: Error 3989 */ 3990 QDF_STATUS 3991 dp_txrx_get_peer_jitter_stats(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, 3992 uint8_t vdev_id, uint8_t *peer_mac, 3993 struct cdp_peer_tid_stats *tid_stats); 3994 3995 /* dp_peer_get_tx_capture_stats - to get peer Tx Capture stats 3996 * @soc_hdl: soc handle 3997 * @vdev_id: id of vdev handle 3998 * @peer_mac: mac of DP_PEER handle 3999 * @stats: pointer to peer tx capture stats 4000 * 4001 * Return: QDF_STATUS_SUCCESS: Success 4002 * QDF_STATUS_E_FAILURE: Error 4003 */ 4004 QDF_STATUS 4005 dp_peer_get_tx_capture_stats(struct cdp_soc_t *soc_hdl, 4006 uint8_t vdev_id, uint8_t *peer_mac, 4007 struct cdp_peer_tx_capture_stats *stats); 4008 4009 /* dp_pdev_get_tx_capture_stats - to get pdev Tx Capture stats 4010 * @soc_hdl: soc handle 4011 * @pdev_id: id of pdev handle 4012 * @stats: pointer to pdev tx capture stats 4013 * 4014 * Return: QDF_STATUS_SUCCESS: Success 4015 * QDF_STATUS_E_FAILURE: Error 4016 */ 4017 QDF_STATUS 4018 dp_pdev_get_tx_capture_stats(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, 4019 struct cdp_pdev_tx_capture_stats *stats); 4020 4021 #ifdef HW_TX_DELAY_STATS_ENABLE 4022 /* 4023 * dp_is_vdev_tx_delay_stats_enabled(): Check if tx delay stats 4024 * is enabled for vdev 4025 * @vdev: dp vdev 4026 * 4027 * Return: true if tx delay stats is enabled for vdev else false 4028 */ 4029 static inline uint8_t dp_is_vdev_tx_delay_stats_enabled(struct dp_vdev *vdev) 4030 { 4031 return vdev->hw_tx_delay_stats_enabled; 4032 } 4033 4034 /* 4035 * dp_pdev_print_tx_delay_stats(): Print vdev tx delay stats 4036 * for pdev 4037 * @soc: dp soc 4038 * 4039 * Return: None 4040 */ 4041 void dp_pdev_print_tx_delay_stats(struct dp_soc *soc); 4042 4043 /** 4044 * dp_pdev_clear_tx_delay_stats() - clear tx delay stats 4045 * @soc: soc handle 4046 * 4047 * Return: None 4048 */ 4049 void dp_pdev_clear_tx_delay_stats(struct dp_soc *soc); 4050 #else 4051 static inline uint8_t dp_is_vdev_tx_delay_stats_enabled(struct dp_vdev *vdev) 4052 { 4053 return 0; 4054 } 4055 4056 static inline void dp_pdev_print_tx_delay_stats(struct dp_soc *soc) 4057 { 4058 } 4059 4060 static inline void dp_pdev_clear_tx_delay_stats(struct dp_soc *soc) 4061 { 4062 } 4063 #endif 4064 4065 static inline void 4066 dp_get_rx_hash_key_bytes(struct cdp_lro_hash_config *lro_hash) 4067 { 4068 qdf_get_random_bytes(lro_hash->toeplitz_hash_ipv4, 4069 (sizeof(lro_hash->toeplitz_hash_ipv4[0]) * 4070 LRO_IPV4_SEED_ARR_SZ)); 4071 qdf_get_random_bytes(lro_hash->toeplitz_hash_ipv6, 4072 (sizeof(lro_hash->toeplitz_hash_ipv6[0]) * 4073 LRO_IPV6_SEED_ARR_SZ)); 4074 } 4075 4076 #ifdef WLAN_TELEMETRY_STATS_SUPPORT 4077 /* 4078 * dp_get_pdev_telemetry_stats- API to get pdev telemetry stats 4079 * @soc_hdl: soc handle 4080 * @pdev_id: id of pdev handle 4081 * @stats: pointer to pdev telemetry stats 4082 * 4083 * Return: QDF_STATUS_SUCCESS: Success 4084 * QDF_STATUS_E_FAILURE: Error 4085 */ 4086 QDF_STATUS 4087 dp_get_pdev_telemetry_stats(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, 4088 struct cdp_pdev_telemetry_stats *stats); 4089 4090 /* 4091 * dp_get_peer_telemetry_stats- API to get peer telemetry stats 4092 * @soc_hdl: soc handle 4093 * @addr: peer mac 4094 * @stats: pointer to peer telemetry stats 4095 * 4096 * Return: QDF_STATUS_SUCCESS: Success 4097 * QDF_STATUS_E_FAILURE: Error 4098 */ 4099 QDF_STATUS 4100 dp_get_peer_telemetry_stats(struct cdp_soc_t *soc_hdl, uint8_t *addr, 4101 struct cdp_peer_telemetry_stats *stats); 4102 #endif /* WLAN_TELEMETRY_STATS_SUPPORT */ 4103 4104 #ifdef CONNECTIVITY_PKTLOG 4105 /* 4106 * dp_tx_send_pktlog() - send tx packet log 4107 * @soc: soc handle 4108 * @pdev: pdev handle 4109 * @tx_desc: TX software descriptor 4110 * @nbuf: nbuf 4111 * @status: status of tx packet 4112 * 4113 * This function is used to send tx packet for logging 4114 * 4115 * Return: None 4116 * 4117 */ 4118 static inline 4119 void dp_tx_send_pktlog(struct dp_soc *soc, struct dp_pdev *pdev, 4120 struct dp_tx_desc_s *tx_desc, 4121 qdf_nbuf_t nbuf, enum qdf_dp_tx_rx_status status) 4122 { 4123 ol_txrx_pktdump_cb packetdump_cb = pdev->dp_tx_packetdump_cb; 4124 4125 if (qdf_unlikely(packetdump_cb) && 4126 dp_tx_frm_std == tx_desc->frm_type) { 4127 packetdump_cb((ol_txrx_soc_handle)soc, pdev->pdev_id, 4128 QDF_NBUF_CB_TX_VDEV_CTX(nbuf), 4129 nbuf, status, QDF_TX_DATA_PKT); 4130 } 4131 } 4132 4133 /* 4134 * dp_rx_send_pktlog() - send rx packet log 4135 * @soc: soc handle 4136 * @pdev: pdev handle 4137 * @nbuf: nbuf 4138 * @status: status of rx packet 4139 * 4140 * This function is used to send rx packet for logging 4141 * 4142 * Return: None 4143 * 4144 */ 4145 static inline 4146 void dp_rx_send_pktlog(struct dp_soc *soc, struct dp_pdev *pdev, 4147 qdf_nbuf_t nbuf, enum qdf_dp_tx_rx_status status) 4148 { 4149 ol_txrx_pktdump_cb packetdump_cb = pdev->dp_rx_packetdump_cb; 4150 4151 if (qdf_unlikely(packetdump_cb)) { 4152 packetdump_cb((ol_txrx_soc_handle)soc, pdev->pdev_id, 4153 QDF_NBUF_CB_RX_VDEV_ID(nbuf), 4154 nbuf, status, QDF_RX_DATA_PKT); 4155 } 4156 } 4157 4158 /* 4159 * dp_rx_err_send_pktlog() - send rx error packet log 4160 * @soc: soc handle 4161 * @pdev: pdev handle 4162 * @mpdu_desc_info: MPDU descriptor info 4163 * @nbuf: nbuf 4164 * @status: status of rx packet 4165 * @set_pktlen: weither to set packet length 4166 * 4167 * This API should only be called when we have not removed 4168 * Rx TLV from head, and head is pointing to rx_tlv 4169 * 4170 * This function is used to send rx packet from error path 4171 * for logging for which rx packet tlv is not removed. 4172 * 4173 * Return: None 4174 * 4175 */ 4176 static inline 4177 void dp_rx_err_send_pktlog(struct dp_soc *soc, struct dp_pdev *pdev, 4178 struct hal_rx_mpdu_desc_info *mpdu_desc_info, 4179 qdf_nbuf_t nbuf, enum qdf_dp_tx_rx_status status, 4180 bool set_pktlen) 4181 { 4182 ol_txrx_pktdump_cb packetdump_cb = pdev->dp_rx_packetdump_cb; 4183 qdf_size_t skip_size; 4184 uint16_t msdu_len, nbuf_len; 4185 uint8_t *rx_tlv_hdr; 4186 struct hal_rx_msdu_metadata msdu_metadata; 4187 4188 if (qdf_unlikely(packetdump_cb)) { 4189 rx_tlv_hdr = qdf_nbuf_data(nbuf); 4190 nbuf_len = hal_rx_msdu_start_msdu_len_get(soc->hal_soc, 4191 rx_tlv_hdr); 4192 hal_rx_msdu_metadata_get(soc->hal_soc, rx_tlv_hdr, 4193 &msdu_metadata); 4194 4195 if (mpdu_desc_info->bar_frame || 4196 (mpdu_desc_info->mpdu_flags & HAL_MPDU_F_FRAGMENT)) 4197 skip_size = soc->rx_pkt_tlv_size; 4198 else 4199 skip_size = soc->rx_pkt_tlv_size + 4200 msdu_metadata.l3_hdr_pad; 4201 4202 if (set_pktlen) { 4203 msdu_len = nbuf_len + skip_size; 4204 qdf_nbuf_set_pktlen(nbuf, qdf_min(msdu_len, 4205 (uint16_t)RX_DATA_BUFFER_SIZE)); 4206 } 4207 4208 qdf_nbuf_pull_head(nbuf, skip_size); 4209 packetdump_cb((ol_txrx_soc_handle)soc, pdev->pdev_id, 4210 QDF_NBUF_CB_RX_VDEV_ID(nbuf), 4211 nbuf, status, QDF_RX_DATA_PKT); 4212 qdf_nbuf_push_head(nbuf, skip_size); 4213 } 4214 } 4215 4216 #else 4217 static inline 4218 void dp_tx_send_pktlog(struct dp_soc *soc, struct dp_pdev *pdev, 4219 struct dp_tx_desc_s *tx_desc, 4220 qdf_nbuf_t nbuf, enum qdf_dp_tx_rx_status status) 4221 { 4222 } 4223 4224 static inline 4225 void dp_rx_send_pktlog(struct dp_soc *soc, struct dp_pdev *pdev, 4226 qdf_nbuf_t nbuf, enum qdf_dp_tx_rx_status status) 4227 { 4228 } 4229 4230 static inline 4231 void dp_rx_err_send_pktlog(struct dp_soc *soc, struct dp_pdev *pdev, 4232 struct hal_rx_mpdu_desc_info *mpdu_desc_info, 4233 qdf_nbuf_t nbuf, enum qdf_dp_tx_rx_status status, 4234 bool set_pktlen) 4235 { 4236 } 4237 #endif 4238 4239 /* 4240 * dp_pdev_update_fast_rx_flag() - Update Fast rx flag for a PDEV 4241 * @soc : Data path soc handle 4242 * @pdev : PDEV handle 4243 * 4244 * return: None 4245 */ 4246 void dp_pdev_update_fast_rx_flag(struct dp_soc *soc, struct dp_pdev *pdev); 4247 #endif /* #ifndef _DP_INTERNAL_H_ */ 4248