1 /* 2 * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved. 3 * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. 4 * 5 * Permission to use, copy, modify, and/or distribute this software for 6 * any purpose with or without fee is hereby granted, provided that the 7 * above copyright notice and this permission notice appear in all 8 * copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 11 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 12 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 13 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 14 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 15 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 16 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 17 * PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20 #ifndef _DP_INTERNAL_H_ 21 #define _DP_INTERNAL_H_ 22 23 #include "dp_types.h" 24 25 #define RX_BUFFER_SIZE_PKTLOG_LITE 1024 26 27 #define DP_PEER_WDS_COUNT_INVALID UINT_MAX 28 29 #define DP_BLOCKMEM_SIZE 4096 30 31 /* Alignment for consistent memory for DP rings*/ 32 #define DP_RING_BASE_ALIGN 32 33 34 #define DP_RSSI_INVAL 0x80 35 #define DP_RSSI_AVG_WEIGHT 2 36 /* 37 * Formula to derive avg_rssi is taken from wifi2.o firmware 38 */ 39 #define DP_GET_AVG_RSSI(avg_rssi, last_rssi) \ 40 (((avg_rssi) - (((uint8_t)(avg_rssi)) >> DP_RSSI_AVG_WEIGHT)) \ 41 + ((((uint8_t)(last_rssi)) >> DP_RSSI_AVG_WEIGHT))) 42 43 /* Macro For NYSM value received in VHT TLV */ 44 #define VHT_SGI_NYSM 3 45 46 #define INVALID_WBM_RING_NUM 0xF 47 48 /* struct htt_dbgfs_cfg - structure to maintain required htt data 49 * @msg_word: htt msg sent to upper layer 50 * @m: qdf debugfs file pointer 51 */ 52 struct htt_dbgfs_cfg { 53 uint32_t *msg_word; 54 qdf_debugfs_file_t m; 55 }; 56 57 /* Cookie MSB bits assigned for different use case. 58 * Note: User can't use last 3 bits, as it is reserved for pdev_id. 59 * If in future number of pdev are more than 3. 60 */ 61 /* Reserve for default case */ 62 #define DBG_STATS_COOKIE_DEFAULT 0x0 63 64 /* Reserve for DP Stats: 3rd bit */ 65 #define DBG_STATS_COOKIE_DP_STATS BIT(3) 66 67 /* Reserve for HTT Stats debugfs support: 4th bit */ 68 #define DBG_STATS_COOKIE_HTT_DBGFS BIT(4) 69 70 /*Reserve for HTT Stats debugfs support: 5th bit */ 71 #define DBG_SYSFS_STATS_COOKIE BIT(5) 72 73 /** 74 * Bitmap of HTT PPDU TLV types for Default mode 75 */ 76 #define HTT_PPDU_DEFAULT_TLV_BITMAP \ 77 (1 << HTT_PPDU_STATS_COMMON_TLV) | \ 78 (1 << HTT_PPDU_STATS_USR_COMMON_TLV) | \ 79 (1 << HTT_PPDU_STATS_USR_RATE_TLV) | \ 80 (1 << HTT_PPDU_STATS_SCH_CMD_STATUS_TLV) | \ 81 (1 << HTT_PPDU_STATS_USR_COMPLTN_COMMON_TLV) | \ 82 (1 << HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV) 83 84 /* PPDU STATS CFG */ 85 #define DP_PPDU_STATS_CFG_ALL 0xFFFF 86 87 /* PPDU stats mask sent to FW to enable enhanced stats */ 88 #define DP_PPDU_STATS_CFG_ENH_STATS \ 89 (HTT_PPDU_DEFAULT_TLV_BITMAP) | \ 90 (1 << HTT_PPDU_STATS_USR_COMPLTN_FLUSH_TLV) | \ 91 (1 << HTT_PPDU_STATS_USR_COMMON_ARRAY_TLV) | \ 92 (1 << HTT_PPDU_STATS_USERS_INFO_TLV) 93 94 /* PPDU stats mask sent to FW to support debug sniffer feature */ 95 #define DP_PPDU_STATS_CFG_SNIFFER \ 96 (HTT_PPDU_DEFAULT_TLV_BITMAP) | \ 97 (1 << HTT_PPDU_STATS_USR_MPDU_ENQ_BITMAP_64_TLV) | \ 98 (1 << HTT_PPDU_STATS_USR_MPDU_ENQ_BITMAP_256_TLV) | \ 99 (1 << HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_64_TLV) | \ 100 (1 << HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_256_TLV) | \ 101 (1 << HTT_PPDU_STATS_USR_COMPLTN_FLUSH_TLV) | \ 102 (1 << HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_256_TLV) | \ 103 (1 << HTT_PPDU_STATS_USR_COMPLTN_FLUSH_TLV) | \ 104 (1 << HTT_PPDU_STATS_USR_COMMON_ARRAY_TLV) | \ 105 (1 << HTT_PPDU_STATS_TX_MGMTCTRL_PAYLOAD_TLV) | \ 106 (1 << HTT_PPDU_STATS_USERS_INFO_TLV) 107 108 /* PPDU stats mask sent to FW to support BPR feature*/ 109 #define DP_PPDU_STATS_CFG_BPR \ 110 (1 << HTT_PPDU_STATS_TX_MGMTCTRL_PAYLOAD_TLV) | \ 111 (1 << HTT_PPDU_STATS_USERS_INFO_TLV) 112 113 /* PPDU stats mask sent to FW to support BPR and enhanced stats feature */ 114 #define DP_PPDU_STATS_CFG_BPR_ENH (DP_PPDU_STATS_CFG_BPR | \ 115 DP_PPDU_STATS_CFG_ENH_STATS) 116 /* PPDU stats mask sent to FW to support BPR and pcktlog stats feature */ 117 #define DP_PPDU_STATS_CFG_BPR_PKTLOG (DP_PPDU_STATS_CFG_BPR | \ 118 DP_PPDU_TXLITE_STATS_BITMASK_CFG) 119 120 /** 121 * Bitmap of HTT PPDU delayed ba TLV types for Default mode 122 */ 123 #define HTT_PPDU_DELAYED_BA_TLV_BITMAP \ 124 (1 << HTT_PPDU_STATS_COMMON_TLV) | \ 125 (1 << HTT_PPDU_STATS_USR_COMMON_TLV) | \ 126 (1 << HTT_PPDU_STATS_USR_RATE_TLV) 127 128 /** 129 * Bitmap of HTT PPDU TLV types for Delayed BA 130 */ 131 #define HTT_PPDU_STATUS_TLV_BITMAP \ 132 (1 << HTT_PPDU_STATS_COMMON_TLV) | \ 133 (1 << HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV) 134 135 /** 136 * Bitmap of HTT PPDU TLV types for Sniffer mode bitmap 64 137 */ 138 #define HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP_64 \ 139 ((1 << HTT_PPDU_STATS_COMMON_TLV) | \ 140 (1 << HTT_PPDU_STATS_USR_COMMON_TLV) | \ 141 (1 << HTT_PPDU_STATS_USR_RATE_TLV) | \ 142 (1 << HTT_PPDU_STATS_SCH_CMD_STATUS_TLV) | \ 143 (1 << HTT_PPDU_STATS_USR_COMPLTN_COMMON_TLV) | \ 144 (1 << HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV) | \ 145 (1 << HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_64_TLV) | \ 146 (1 << HTT_PPDU_STATS_USR_MPDU_ENQ_BITMAP_64_TLV)) 147 148 /** 149 * Bitmap of HTT PPDU TLV types for Sniffer mode bitmap 256 150 */ 151 #define HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP_256 \ 152 ((1 << HTT_PPDU_STATS_COMMON_TLV) | \ 153 (1 << HTT_PPDU_STATS_USR_COMMON_TLV) | \ 154 (1 << HTT_PPDU_STATS_USR_RATE_TLV) | \ 155 (1 << HTT_PPDU_STATS_SCH_CMD_STATUS_TLV) | \ 156 (1 << HTT_PPDU_STATS_USR_COMPLTN_COMMON_TLV) | \ 157 (1 << HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV) | \ 158 (1 << HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_256_TLV) | \ 159 (1 << HTT_PPDU_STATS_USR_MPDU_ENQ_BITMAP_256_TLV)) 160 161 static const enum cdp_packet_type hal_2_dp_pkt_type_map[HAL_DOT11_MAX] = { 162 [HAL_DOT11A] = DOT11_A, 163 [HAL_DOT11B] = DOT11_B, 164 [HAL_DOT11N_MM] = DOT11_N, 165 [HAL_DOT11AC] = DOT11_AC, 166 [HAL_DOT11AX] = DOT11_AX, 167 [HAL_DOT11BA] = DOT11_MAX, 168 #ifdef WLAN_FEATURE_11BE 169 [HAL_DOT11BE] = DOT11_BE, 170 #else 171 [HAL_DOT11BE] = DOT11_MAX, 172 #endif 173 [HAL_DOT11AZ] = DOT11_MAX, 174 [HAL_DOT11N_GF] = DOT11_MAX, 175 }; 176 177 #ifdef WLAN_FEATURE_11BE 178 /** 179 * dp_get_mcs_array_index_by_pkt_type_mcs () - get the destination mcs index 180 in array 181 * @pkt_type: host SW pkt type 182 * @mcs: mcs value for TX/RX rate 183 * 184 * Return: succeeded - valid index in mcs array 185 fail - same value as MCS_MAX 186 */ 187 static inline uint8_t 188 dp_get_mcs_array_index_by_pkt_type_mcs(uint32_t pkt_type, uint32_t mcs) 189 { 190 uint8_t dst_mcs_idx = MCS_INVALID_ARRAY_INDEX; 191 192 switch (pkt_type) { 193 case DOT11_A: 194 dst_mcs_idx = 195 mcs >= MAX_MCS_11A ? (MAX_MCS - 1) : mcs; 196 break; 197 case DOT11_B: 198 dst_mcs_idx = 199 mcs >= MAX_MCS_11B ? (MAX_MCS - 1) : mcs; 200 break; 201 case DOT11_N: 202 dst_mcs_idx = 203 mcs >= MAX_MCS_11N ? (MAX_MCS - 1) : mcs; 204 break; 205 case DOT11_AC: 206 dst_mcs_idx = 207 mcs >= MAX_MCS_11AC ? (MAX_MCS - 1) : mcs; 208 break; 209 case DOT11_AX: 210 dst_mcs_idx = 211 mcs >= MAX_MCS_11AX ? (MAX_MCS - 1) : mcs; 212 break; 213 case DOT11_BE: 214 dst_mcs_idx = 215 mcs >= MAX_MCS_11BE ? (MAX_MCS - 1) : mcs; 216 break; 217 default: 218 break; 219 } 220 221 return dst_mcs_idx; 222 } 223 #else 224 static inline uint8_t 225 dp_get_mcs_array_index_by_pkt_type_mcs(uint32_t pkt_type, uint32_t mcs) 226 { 227 uint8_t dst_mcs_idx = MCS_INVALID_ARRAY_INDEX; 228 229 switch (pkt_type) { 230 case DOT11_A: 231 dst_mcs_idx = 232 mcs >= MAX_MCS_11A ? (MAX_MCS - 1) : mcs; 233 break; 234 case DOT11_B: 235 dst_mcs_idx = 236 mcs >= MAX_MCS_11B ? (MAX_MCS - 1) : mcs; 237 break; 238 case DOT11_N: 239 dst_mcs_idx = 240 mcs >= MAX_MCS_11N ? (MAX_MCS - 1) : mcs; 241 break; 242 case DOT11_AC: 243 dst_mcs_idx = 244 mcs >= MAX_MCS_11AC ? (MAX_MCS - 1) : mcs; 245 break; 246 case DOT11_AX: 247 dst_mcs_idx = 248 mcs >= MAX_MCS_11AX ? (MAX_MCS - 1) : mcs; 249 break; 250 default: 251 break; 252 } 253 254 return dst_mcs_idx; 255 } 256 #endif 257 258 QDF_STATUS dp_mon_soc_attach(struct dp_soc *soc); 259 QDF_STATUS dp_mon_soc_detach(struct dp_soc *soc); 260 261 /* 262 * dp_rx_err_match_dhost() - function to check whether dest-mac is correct 263 * @eh: Ethernet header of incoming packet 264 * @vdev: dp_vdev object of the VAP on which this data packet is received 265 * 266 * Return: 1 if the destination mac is correct, 267 * 0 if this frame is not correctly destined to this VAP/MLD 268 */ 269 int dp_rx_err_match_dhost(qdf_ether_header_t *eh, struct dp_vdev *vdev); 270 271 #ifdef MONITOR_MODULARIZED_ENABLE 272 static inline bool dp_monitor_modularized_enable(void) 273 { 274 return TRUE; 275 } 276 277 static inline QDF_STATUS 278 dp_mon_soc_attach_wrapper(struct dp_soc *soc) { return QDF_STATUS_SUCCESS; } 279 280 static inline QDF_STATUS 281 dp_mon_soc_detach_wrapper(struct dp_soc *soc) { return QDF_STATUS_SUCCESS; } 282 #else 283 static inline bool dp_monitor_modularized_enable(void) 284 { 285 return FALSE; 286 } 287 288 static inline QDF_STATUS dp_mon_soc_attach_wrapper(struct dp_soc *soc) 289 { 290 return dp_mon_soc_attach(soc); 291 } 292 293 static inline QDF_STATUS dp_mon_soc_detach_wrapper(struct dp_soc *soc) 294 { 295 return dp_mon_soc_detach(soc); 296 } 297 #endif 298 299 #ifndef WIFI_MONITOR_SUPPORT 300 #define MON_BUF_MIN_ENTRIES 64 301 302 static inline QDF_STATUS dp_monitor_pdev_attach(struct dp_pdev *pdev) 303 { 304 return QDF_STATUS_SUCCESS; 305 } 306 307 static inline QDF_STATUS dp_monitor_pdev_detach(struct dp_pdev *pdev) 308 { 309 return QDF_STATUS_SUCCESS; 310 } 311 312 static inline QDF_STATUS dp_monitor_vdev_attach(struct dp_vdev *vdev) 313 { 314 return QDF_STATUS_E_FAILURE; 315 } 316 317 static inline QDF_STATUS dp_monitor_vdev_detach(struct dp_vdev *vdev) 318 { 319 return QDF_STATUS_E_FAILURE; 320 } 321 322 static inline QDF_STATUS dp_monitor_peer_attach(struct dp_soc *soc, 323 struct dp_peer *peer) 324 { 325 return QDF_STATUS_SUCCESS; 326 } 327 328 static inline QDF_STATUS dp_monitor_peer_detach(struct dp_soc *soc, 329 struct dp_peer *peer) 330 { 331 return QDF_STATUS_E_FAILURE; 332 } 333 334 static inline struct cdp_peer_rate_stats_ctx* 335 dp_monitor_peer_get_peerstats_ctx(struct dp_soc *soc, struct dp_peer *peer) 336 { 337 return NULL; 338 } 339 340 static inline 341 void dp_monitor_peer_reset_stats(struct dp_soc *soc, struct dp_peer *peer) 342 { 343 } 344 345 static inline 346 void dp_monitor_peer_get_stats(struct dp_soc *soc, struct dp_peer *peer, 347 void *arg, enum cdp_stat_update_type type) 348 { 349 } 350 351 static inline 352 void dp_monitor_invalid_peer_update_pdev_stats(struct dp_soc *soc, 353 struct dp_pdev *pdev) 354 { 355 } 356 357 static inline 358 QDF_STATUS dp_monitor_peer_get_stats_param(struct dp_soc *soc, 359 struct dp_peer *peer, 360 enum cdp_peer_stats_type type, 361 cdp_peer_stats_param_t *buf) 362 { 363 return QDF_STATUS_E_FAILURE; 364 } 365 366 static inline QDF_STATUS dp_monitor_pdev_init(struct dp_pdev *pdev) 367 { 368 return QDF_STATUS_SUCCESS; 369 } 370 371 static inline QDF_STATUS dp_monitor_pdev_deinit(struct dp_pdev *pdev) 372 { 373 return QDF_STATUS_SUCCESS; 374 } 375 376 static inline QDF_STATUS dp_monitor_soc_cfg_init(struct dp_soc *soc) 377 { 378 return QDF_STATUS_SUCCESS; 379 } 380 381 static inline QDF_STATUS dp_monitor_config_debug_sniffer(struct dp_pdev *pdev, 382 int val) 383 { 384 return QDF_STATUS_E_FAILURE; 385 } 386 387 static inline void dp_monitor_flush_rings(struct dp_soc *soc) 388 { 389 } 390 391 static inline QDF_STATUS dp_monitor_htt_srng_setup(struct dp_soc *soc, 392 struct dp_pdev *pdev, 393 int mac_id, 394 int mac_for_pdev) 395 { 396 return QDF_STATUS_SUCCESS; 397 } 398 399 static inline void dp_monitor_service_mon_rings(struct dp_soc *soc, 400 uint32_t quota) 401 { 402 } 403 404 static inline 405 uint32_t dp_monitor_process(struct dp_soc *soc, struct dp_intr *int_ctx, 406 uint32_t mac_id, uint32_t quota) 407 { 408 return 0; 409 } 410 411 static inline 412 uint32_t dp_monitor_drop_packets_for_mac(struct dp_pdev *pdev, 413 uint32_t mac_id, uint32_t quota) 414 { 415 return 0; 416 } 417 418 static inline void dp_monitor_peer_tx_init(struct dp_pdev *pdev, 419 struct dp_peer *peer) 420 { 421 } 422 423 static inline void dp_monitor_peer_tx_cleanup(struct dp_vdev *vdev, 424 struct dp_peer *peer) 425 { 426 } 427 428 static inline 429 void dp_monitor_peer_tid_peer_id_update(struct dp_soc *soc, 430 struct dp_peer *peer, 431 uint16_t peer_id) 432 { 433 } 434 435 static inline void dp_monitor_tx_ppdu_stats_attach(struct dp_pdev *pdev) 436 { 437 } 438 439 static inline void dp_monitor_tx_ppdu_stats_detach(struct dp_pdev *pdev) 440 { 441 } 442 443 static inline 444 QDF_STATUS dp_monitor_tx_capture_debugfs_init(struct dp_pdev *pdev) 445 { 446 return QDF_STATUS_SUCCESS; 447 } 448 449 static inline void dp_monitor_peer_tx_capture_filter_check(struct dp_pdev *pdev, 450 struct dp_peer *peer) 451 { 452 } 453 454 static inline 455 QDF_STATUS dp_monitor_tx_add_to_comp_queue(struct dp_soc *soc, 456 struct dp_tx_desc_s *desc, 457 struct hal_tx_completion_status *ts, 458 uint16_t peer_id) 459 { 460 return QDF_STATUS_E_FAILURE; 461 } 462 463 static inline 464 QDF_STATUS monitor_update_msdu_to_list(struct dp_soc *soc, 465 struct dp_pdev *pdev, 466 struct dp_peer *peer, 467 struct hal_tx_completion_status *ts, 468 qdf_nbuf_t netbuf) 469 { 470 return QDF_STATUS_E_FAILURE; 471 } 472 473 static inline bool dp_monitor_ppdu_stats_ind_handler(struct htt_soc *soc, 474 uint32_t *msg_word, 475 qdf_nbuf_t htt_t2h_msg) 476 { 477 return true; 478 } 479 480 static inline QDF_STATUS dp_monitor_htt_ppdu_stats_attach(struct dp_pdev *pdev) 481 { 482 return QDF_STATUS_SUCCESS; 483 } 484 485 static inline void dp_monitor_htt_ppdu_stats_detach(struct dp_pdev *pdev) 486 { 487 } 488 489 static inline void dp_monitor_print_pdev_rx_mon_stats(struct dp_pdev *pdev) 490 { 491 } 492 493 static inline QDF_STATUS dp_monitor_config_enh_tx_capture(struct dp_pdev *pdev, 494 uint32_t val) 495 { 496 return QDF_STATUS_E_INVAL; 497 } 498 499 static inline QDF_STATUS dp_monitor_tx_peer_filter(struct dp_pdev *pdev, 500 struct dp_peer *peer, 501 uint8_t is_tx_pkt_cap_enable, 502 uint8_t *peer_mac) 503 { 504 return QDF_STATUS_E_INVAL; 505 } 506 507 static inline QDF_STATUS dp_monitor_config_enh_rx_capture(struct dp_pdev *pdev, 508 uint32_t val) 509 { 510 return QDF_STATUS_E_INVAL; 511 } 512 513 static inline 514 QDF_STATUS dp_monitor_set_bpr_enable(struct dp_pdev *pdev, uint32_t val) 515 { 516 return QDF_STATUS_E_FAILURE; 517 } 518 519 static inline 520 int dp_monitor_set_filter_neigh_peers(struct dp_pdev *pdev, bool val) 521 { 522 return 0; 523 } 524 525 static inline 526 void dp_monitor_set_atf_stats_enable(struct dp_pdev *pdev, bool value) 527 { 528 } 529 530 static inline 531 void dp_monitor_set_bsscolor(struct dp_pdev *pdev, uint8_t bsscolor) 532 { 533 } 534 535 static inline 536 bool dp_monitor_pdev_get_filter_mcast_data(struct cdp_pdev *pdev_handle) 537 { 538 return false; 539 } 540 541 static inline 542 bool dp_monitor_pdev_get_filter_non_data(struct cdp_pdev *pdev_handle) 543 { 544 return false; 545 } 546 547 static inline 548 bool dp_monitor_pdev_get_filter_ucast_data(struct cdp_pdev *pdev_handle) 549 { 550 return false; 551 } 552 553 static inline 554 int dp_monitor_set_pktlog_wifi3(struct dp_pdev *pdev, uint32_t event, 555 bool enable) 556 { 557 return 0; 558 } 559 560 static inline void dp_monitor_pktlogmod_exit(struct dp_pdev *pdev) 561 { 562 } 563 564 static inline 565 void dp_monitor_vdev_set_monitor_mode_buf_rings(struct dp_pdev *pdev) 566 { 567 } 568 569 static inline 570 void dp_monitor_neighbour_peers_detach(struct dp_pdev *pdev) 571 { 572 } 573 574 static inline QDF_STATUS dp_monitor_filter_neighbour_peer(struct dp_pdev *pdev, 575 uint8_t *rx_pkt_hdr) 576 { 577 return QDF_STATUS_E_FAILURE; 578 } 579 580 static inline void dp_monitor_print_pdev_tx_capture_stats(struct dp_pdev *pdev) 581 { 582 } 583 584 static inline 585 void dp_monitor_reap_timer_init(struct dp_soc *soc) 586 { 587 } 588 589 static inline 590 void dp_monitor_reap_timer_deinit(struct dp_soc *soc) 591 { 592 } 593 594 static inline 595 bool dp_monitor_reap_timer_start(struct dp_soc *soc, 596 enum cdp_mon_reap_source source) 597 { 598 return false; 599 } 600 601 static inline 602 bool dp_monitor_reap_timer_stop(struct dp_soc *soc, 603 enum cdp_mon_reap_source source) 604 { 605 return false; 606 } 607 608 static inline 609 void dp_monitor_vdev_timer_init(struct dp_soc *soc) 610 { 611 } 612 613 static inline 614 void dp_monitor_vdev_timer_deinit(struct dp_soc *soc) 615 { 616 } 617 618 static inline 619 void dp_monitor_vdev_timer_start(struct dp_soc *soc) 620 { 621 } 622 623 static inline 624 bool dp_monitor_vdev_timer_stop(struct dp_soc *soc) 625 { 626 return false; 627 } 628 629 static inline struct qdf_mem_multi_page_t* 630 dp_monitor_get_link_desc_pages(struct dp_soc *soc, uint32_t mac_id) 631 { 632 return NULL; 633 } 634 635 static inline uint32_t * 636 dp_monitor_get_total_link_descs(struct dp_soc *soc, uint32_t mac_id) 637 { 638 return NULL; 639 } 640 641 static inline QDF_STATUS dp_monitor_drop_inv_peer_pkts(struct dp_vdev *vdev) 642 { 643 return QDF_STATUS_E_FAILURE; 644 } 645 646 static inline bool dp_is_enable_reap_timer_non_pkt(struct dp_pdev *pdev) 647 { 648 return false; 649 } 650 651 static inline void dp_monitor_vdev_register_osif(struct dp_vdev *vdev, 652 struct ol_txrx_ops *txrx_ops) 653 { 654 } 655 656 static inline bool dp_monitor_is_vdev_timer_running(struct dp_soc *soc) 657 { 658 return false; 659 } 660 661 static inline 662 void dp_monitor_pdev_set_mon_vdev(struct dp_vdev *vdev) 663 { 664 } 665 666 static inline void dp_monitor_vdev_delete(struct dp_soc *soc, 667 struct dp_vdev *vdev) 668 { 669 } 670 671 static inline void dp_peer_ppdu_delayed_ba_init(struct dp_peer *peer) 672 { 673 } 674 675 static inline void dp_monitor_neighbour_peer_add_ast(struct dp_pdev *pdev, 676 struct dp_peer *ta_peer, 677 uint8_t *mac_addr, 678 qdf_nbuf_t nbuf, 679 uint32_t flags) 680 { 681 } 682 683 static inline void 684 dp_monitor_set_chan_band(struct dp_pdev *pdev, enum reg_wifi_band chan_band) 685 { 686 } 687 688 static inline void 689 dp_monitor_set_chan_freq(struct dp_pdev *pdev, qdf_freq_t chan_freq) 690 { 691 } 692 693 static inline void dp_monitor_set_chan_num(struct dp_pdev *pdev, int chan_num) 694 { 695 } 696 697 static inline bool dp_monitor_is_enable_mcopy_mode(struct dp_pdev *pdev) 698 { 699 return false; 700 } 701 702 static inline 703 void dp_monitor_neighbour_peer_list_remove(struct dp_pdev *pdev, 704 struct dp_vdev *vdev, 705 struct dp_neighbour_peer *peer) 706 { 707 } 708 709 static inline bool dp_monitor_is_chan_band_known(struct dp_pdev *pdev) 710 { 711 return false; 712 } 713 714 static inline enum reg_wifi_band 715 dp_monitor_get_chan_band(struct dp_pdev *pdev) 716 { 717 return 0; 718 } 719 720 static inline void dp_monitor_get_mpdu_status(struct dp_pdev *pdev, 721 struct dp_soc *soc, 722 uint8_t *rx_tlv_hdr) 723 { 724 } 725 726 static inline void dp_monitor_print_tx_stats(struct dp_pdev *pdev) 727 { 728 } 729 730 static inline 731 QDF_STATUS dp_monitor_mcopy_check_deliver(struct dp_pdev *pdev, 732 uint16_t peer_id, uint32_t ppdu_id, 733 uint8_t first_msdu) 734 { 735 return QDF_STATUS_SUCCESS; 736 } 737 738 static inline bool dp_monitor_is_enable_tx_sniffer(struct dp_pdev *pdev) 739 { 740 return false; 741 } 742 743 static inline struct dp_vdev* 744 dp_monitor_get_monitor_vdev_from_pdev(struct dp_pdev *pdev) 745 { 746 return NULL; 747 } 748 749 static inline QDF_STATUS dp_monitor_check_com_info_ppdu_id(struct dp_pdev *pdev, 750 void *rx_desc) 751 { 752 return QDF_STATUS_E_FAILURE; 753 } 754 755 static inline struct mon_rx_status* 756 dp_monitor_get_rx_status(struct dp_pdev *pdev) 757 { 758 return NULL; 759 } 760 761 static inline 762 void dp_monitor_pdev_config_scan_spcl_vap(struct dp_pdev *pdev, bool val) 763 { 764 } 765 766 static inline 767 void dp_monitor_pdev_reset_scan_spcl_vap_stats_enable(struct dp_pdev *pdev, 768 bool val) 769 { 770 } 771 772 static inline QDF_STATUS 773 dp_monitor_peer_tx_capture_get_stats(struct dp_soc *soc, struct dp_peer *peer, 774 struct cdp_peer_tx_capture_stats *stats) 775 { 776 return QDF_STATUS_E_FAILURE; 777 } 778 779 static inline QDF_STATUS 780 dp_monitor_pdev_tx_capture_get_stats(struct dp_soc *soc, struct dp_pdev *pdev, 781 struct cdp_pdev_tx_capture_stats *stats) 782 { 783 return QDF_STATUS_E_FAILURE; 784 } 785 786 #ifdef DP_POWER_SAVE 787 static inline 788 void dp_monitor_pktlog_reap_pending_frames(struct dp_pdev *pdev) 789 { 790 } 791 792 static inline 793 void dp_monitor_pktlog_start_reap_timer(struct dp_pdev *pdev) 794 { 795 } 796 #endif 797 798 static inline bool dp_monitor_is_configured(struct dp_pdev *pdev) 799 { 800 return false; 801 } 802 803 static inline void 804 dp_mon_rx_hdr_length_set(struct dp_soc *soc, uint32_t *msg_word, 805 struct htt_rx_ring_tlv_filter *tlv_filter) 806 { 807 } 808 #endif 809 810 /** 811 * cdp_soc_t_to_dp_soc() - typecast cdp_soc_t to 812 * dp soc handle 813 * @psoc: CDP psoc handle 814 * 815 * Return: struct dp_soc pointer 816 */ 817 static inline 818 struct dp_soc *cdp_soc_t_to_dp_soc(struct cdp_soc_t *psoc) 819 { 820 return (struct dp_soc *)psoc; 821 } 822 823 #define DP_MAX_TIMER_EXEC_TIME_TICKS \ 824 (QDF_LOG_TIMESTAMP_CYCLES_PER_10_US * 100 * 20) 825 826 /** 827 * enum timer_yield_status - yield status code used in monitor mode timer. 828 * @DP_TIMER_NO_YIELD: do not yield 829 * @DP_TIMER_WORK_DONE: yield because work is done 830 * @DP_TIMER_WORK_EXHAUST: yield because work quota is exhausted 831 * @DP_TIMER_TIME_EXHAUST: yield due to time slot exhausted 832 */ 833 enum timer_yield_status { 834 DP_TIMER_NO_YIELD, 835 DP_TIMER_WORK_DONE, 836 DP_TIMER_WORK_EXHAUST, 837 DP_TIMER_TIME_EXHAUST, 838 }; 839 840 #if DP_PRINT_ENABLE 841 #include <stdarg.h> /* va_list */ 842 #include <qdf_types.h> /* qdf_vprint */ 843 #include <cdp_txrx_handle.h> 844 845 enum { 846 /* FATAL_ERR - print only irrecoverable error messages */ 847 DP_PRINT_LEVEL_FATAL_ERR, 848 849 /* ERR - include non-fatal err messages */ 850 DP_PRINT_LEVEL_ERR, 851 852 /* WARN - include warnings */ 853 DP_PRINT_LEVEL_WARN, 854 855 /* INFO1 - include fundamental, infrequent events */ 856 DP_PRINT_LEVEL_INFO1, 857 858 /* INFO2 - include non-fundamental but infrequent events */ 859 DP_PRINT_LEVEL_INFO2, 860 }; 861 862 #define dp_print(level, fmt, ...) do { \ 863 if (level <= g_txrx_print_level) \ 864 qdf_print(fmt, ## __VA_ARGS__); \ 865 while (0) 866 #define DP_PRINT(level, fmt, ...) do { \ 867 dp_print(level, "DP: " fmt, ## __VA_ARGS__); \ 868 while (0) 869 #else 870 #define DP_PRINT(level, fmt, ...) 871 #endif /* DP_PRINT_ENABLE */ 872 873 #define DP_TRACE(LVL, fmt, args ...) \ 874 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_##LVL, \ 875 fmt, ## args) 876 877 #ifdef WLAN_SYSFS_DP_STATS 878 void DP_PRINT_STATS(const char *fmt, ...); 879 #else /* WLAN_SYSFS_DP_STATS */ 880 #ifdef DP_PRINT_NO_CONSOLE 881 /* Stat prints should not go to console or kernel logs.*/ 882 #define DP_PRINT_STATS(fmt, args ...)\ 883 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH, \ 884 fmt, ## args) 885 #else 886 #define DP_PRINT_STATS(fmt, args ...)\ 887 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,\ 888 fmt, ## args) 889 #endif 890 #endif /* WLAN_SYSFS_DP_STATS */ 891 892 #define DP_STATS_INIT(_handle) \ 893 qdf_mem_zero(&((_handle)->stats), sizeof((_handle)->stats)) 894 895 #define DP_STATS_CLR(_handle) \ 896 qdf_mem_zero(&((_handle)->stats), sizeof((_handle)->stats)) 897 898 #ifndef DISABLE_DP_STATS 899 #define DP_STATS_INC(_handle, _field, _delta) \ 900 { \ 901 if (likely(_handle)) \ 902 _handle->stats._field += _delta; \ 903 } 904 905 #define DP_PEER_STATS_FLAT_INC(_handle, _field, _delta) \ 906 { \ 907 if (likely(_handle)) \ 908 _handle->_field += _delta; \ 909 } 910 911 #define DP_STATS_INCC(_handle, _field, _delta, _cond) \ 912 { \ 913 if (_cond && likely(_handle)) \ 914 _handle->stats._field += _delta; \ 915 } 916 917 #define DP_STATS_DEC(_handle, _field, _delta) \ 918 { \ 919 if (likely(_handle)) \ 920 _handle->stats._field -= _delta; \ 921 } 922 923 #define DP_PEER_STATS_FLAT_DEC(_handle, _field, _delta) \ 924 { \ 925 if (likely(_handle)) \ 926 _handle->_field -= _delta; \ 927 } 928 929 #define DP_STATS_UPD(_handle, _field, _delta) \ 930 { \ 931 if (likely(_handle)) \ 932 _handle->stats._field = _delta; \ 933 } 934 935 #define DP_STATS_INC_PKT(_handle, _field, _count, _bytes) \ 936 { \ 937 DP_STATS_INC(_handle, _field.num, _count); \ 938 DP_STATS_INC(_handle, _field.bytes, _bytes) \ 939 } 940 941 #define DP_PEER_STATS_FLAT_INC_PKT(_handle, _field, _count, _bytes) \ 942 { \ 943 DP_PEER_STATS_FLAT_INC(_handle, _field.num, _count); \ 944 DP_PEER_STATS_FLAT_INC(_handle, _field.bytes, _bytes) \ 945 } 946 947 #define DP_STATS_INCC_PKT(_handle, _field, _count, _bytes, _cond) \ 948 { \ 949 DP_STATS_INCC(_handle, _field.num, _count, _cond); \ 950 DP_STATS_INCC(_handle, _field.bytes, _bytes, _cond) \ 951 } 952 953 #define DP_STATS_AGGR(_handle_a, _handle_b, _field) \ 954 { \ 955 _handle_a->stats._field += _handle_b->stats._field; \ 956 } 957 958 #define DP_STATS_AGGR_PKT(_handle_a, _handle_b, _field) \ 959 { \ 960 DP_STATS_AGGR(_handle_a, _handle_b, _field.num); \ 961 DP_STATS_AGGR(_handle_a, _handle_b, _field.bytes);\ 962 } 963 964 #define DP_STATS_UPD_STRUCT(_handle_a, _handle_b, _field) \ 965 { \ 966 _handle_a->stats._field = _handle_b->stats._field; \ 967 } 968 969 #else 970 #define DP_STATS_INC(_handle, _field, _delta) 971 #define DP_PEER_STATS_FLAT_INC(_handle, _field, _delta) 972 #define DP_STATS_INCC(_handle, _field, _delta, _cond) 973 #define DP_STATS_DEC(_handle, _field, _delta) 974 #define DP_PEER_STATS_FLAT_DEC(_handle, _field, _delta) 975 #define DP_STATS_UPD(_handle, _field, _delta) 976 #define DP_STATS_INC_PKT(_handle, _field, _count, _bytes) 977 #define DP_PEER_STATS_FLAT_INC_PKT(_handle, _field, _count, _bytes) 978 #define DP_STATS_INCC_PKT(_handle, _field, _count, _bytes, _cond) 979 #define DP_STATS_AGGR(_handle_a, _handle_b, _field) 980 #define DP_STATS_AGGR_PKT(_handle_a, _handle_b, _field) 981 #endif 982 983 #define DP_PEER_PER_PKT_STATS_INC(_handle, _field, _delta) \ 984 { \ 985 DP_STATS_INC(_handle, per_pkt_stats._field, _delta); \ 986 } 987 988 #define DP_PEER_PER_PKT_STATS_INCC(_handle, _field, _delta, _cond) \ 989 { \ 990 DP_STATS_INCC(_handle, per_pkt_stats._field, _delta, _cond); \ 991 } 992 993 #define DP_PEER_PER_PKT_STATS_INC_PKT(_handle, _field, _count, _bytes) \ 994 { \ 995 DP_PEER_PER_PKT_STATS_INC(_handle, _field.num, _count); \ 996 DP_PEER_PER_PKT_STATS_INC(_handle, _field.bytes, _bytes) \ 997 } 998 999 #define DP_PEER_PER_PKT_STATS_INCC_PKT(_handle, _field, _count, _bytes, _cond) \ 1000 { \ 1001 DP_PEER_PER_PKT_STATS_INCC(_handle, _field.num, _count, _cond); \ 1002 DP_PEER_PER_PKT_STATS_INCC(_handle, _field.bytes, _bytes, _cond) \ 1003 } 1004 1005 #ifndef QCA_ENHANCED_STATS_SUPPORT 1006 #define DP_PEER_EXTD_STATS_INC(_handle, _field, _delta) \ 1007 { \ 1008 DP_STATS_INC(_handle, extd_stats._field, _delta); \ 1009 } 1010 1011 #define DP_PEER_EXTD_STATS_INCC(_handle, _field, _delta, _cond) \ 1012 { \ 1013 DP_STATS_INCC(_handle, extd_stats._field, _delta, _cond); \ 1014 } 1015 1016 #define DP_PEER_EXTD_STATS_UPD(_handle, _field, _delta) \ 1017 { \ 1018 DP_STATS_UPD(_handle, extd_stats._field, _delta); \ 1019 } 1020 #endif 1021 1022 #if defined(QCA_VDEV_STATS_HW_OFFLOAD_SUPPORT) && \ 1023 defined(QCA_ENHANCED_STATS_SUPPORT) 1024 #define DP_PEER_TO_STACK_INCC_PKT(_handle, _count, _bytes, _cond) \ 1025 { \ 1026 if (!(_handle->hw_txrx_stats_en) || _cond) \ 1027 DP_PEER_STATS_FLAT_INC_PKT(_handle, to_stack, _count, _bytes); \ 1028 } 1029 1030 #define DP_PEER_TO_STACK_DECC(_handle, _count, _cond) \ 1031 { \ 1032 if (!(_handle->hw_txrx_stats_en) || _cond) \ 1033 DP_PEER_STATS_FLAT_DEC(_handle, to_stack.num, _count); \ 1034 } 1035 1036 #define DP_PEER_MC_INCC_PKT(_handle, _count, _bytes, _cond) \ 1037 { \ 1038 if (!(_handle->hw_txrx_stats_en) || _cond) \ 1039 DP_PEER_PER_PKT_STATS_INC_PKT(_handle, rx.multicast, _count, _bytes); \ 1040 } 1041 1042 #define DP_PEER_BC_INCC_PKT(_handle, _count, _bytes, _cond) \ 1043 { \ 1044 if (!(_handle->hw_txrx_stats_en) || _cond) \ 1045 DP_PEER_PER_PKT_STATS_INC_PKT(_handle, rx.bcast, _count, _bytes); \ 1046 } 1047 #elif defined(QCA_VDEV_STATS_HW_OFFLOAD_SUPPORT) 1048 #define DP_PEER_TO_STACK_INCC_PKT(_handle, _count, _bytes, _cond) \ 1049 { \ 1050 if (!(_handle->hw_txrx_stats_en)) \ 1051 DP_PEER_STATS_FLAT_INC_PKT(_handle, to_stack, _count, _bytes); \ 1052 } 1053 1054 #define DP_PEER_TO_STACK_DECC(_handle, _count, _cond) \ 1055 { \ 1056 if (!(_handle->hw_txrx_stats_en)) \ 1057 DP_PEER_STATS_FLAT_DEC(_handle, to_stack.num, _count); \ 1058 } 1059 1060 #define DP_PEER_MC_INCC_PKT(_handle, _count, _bytes, _cond) \ 1061 { \ 1062 if (!(_handle->hw_txrx_stats_en)) \ 1063 DP_PEER_PER_PKT_STATS_INC_PKT(_handle, rx.multicast, _count, _bytes); \ 1064 } 1065 1066 #define DP_PEER_BC_INCC_PKT(_handle, _count, _bytes, _cond) \ 1067 { \ 1068 if (!(_handle->hw_txrx_stats_en)) \ 1069 DP_PEER_PER_PKT_STATS_INC_PKT(_handle, rx.bcast, _count, _bytes); \ 1070 } 1071 #else 1072 #define DP_PEER_TO_STACK_INCC_PKT(_handle, _count, _bytes, _cond) \ 1073 DP_PEER_STATS_FLAT_INC_PKT(_handle, to_stack, _count, _bytes); 1074 1075 #define DP_PEER_TO_STACK_DECC(_handle, _count, _cond) \ 1076 DP_PEER_STATS_FLAT_DEC(_handle, to_stack.num, _count); 1077 1078 #define DP_PEER_MC_INCC_PKT(_handle, _count, _bytes, _cond) \ 1079 DP_PEER_PER_PKT_STATS_INC_PKT(_handle, rx.multicast, _count, _bytes); 1080 1081 #define DP_PEER_BC_INCC_PKT(_handle, _count, _bytes, _cond) \ 1082 DP_PEER_PER_PKT_STATS_INC_PKT(_handle, rx.bcast, _count, _bytes); 1083 #endif 1084 1085 #ifdef ENABLE_DP_HIST_STATS 1086 #define DP_HIST_INIT() \ 1087 uint32_t num_of_packets[MAX_PDEV_CNT] = {0}; 1088 1089 #define DP_HIST_PACKET_COUNT_INC(_pdev_id) \ 1090 { \ 1091 ++num_of_packets[_pdev_id]; \ 1092 } 1093 1094 #define DP_TX_HISTOGRAM_UPDATE(_pdev, _p_cntrs) \ 1095 do { \ 1096 if (_p_cntrs == 1) { \ 1097 DP_STATS_INC(_pdev, \ 1098 tx_comp_histogram.pkts_1, 1); \ 1099 } else if (_p_cntrs > 1 && _p_cntrs <= 20) { \ 1100 DP_STATS_INC(_pdev, \ 1101 tx_comp_histogram.pkts_2_20, 1); \ 1102 } else if (_p_cntrs > 20 && _p_cntrs <= 40) { \ 1103 DP_STATS_INC(_pdev, \ 1104 tx_comp_histogram.pkts_21_40, 1); \ 1105 } else if (_p_cntrs > 40 && _p_cntrs <= 60) { \ 1106 DP_STATS_INC(_pdev, \ 1107 tx_comp_histogram.pkts_41_60, 1); \ 1108 } else if (_p_cntrs > 60 && _p_cntrs <= 80) { \ 1109 DP_STATS_INC(_pdev, \ 1110 tx_comp_histogram.pkts_61_80, 1); \ 1111 } else if (_p_cntrs > 80 && _p_cntrs <= 100) { \ 1112 DP_STATS_INC(_pdev, \ 1113 tx_comp_histogram.pkts_81_100, 1); \ 1114 } else if (_p_cntrs > 100 && _p_cntrs <= 200) { \ 1115 DP_STATS_INC(_pdev, \ 1116 tx_comp_histogram.pkts_101_200, 1); \ 1117 } else if (_p_cntrs > 200) { \ 1118 DP_STATS_INC(_pdev, \ 1119 tx_comp_histogram.pkts_201_plus, 1); \ 1120 } \ 1121 } while (0) 1122 1123 #define DP_RX_HISTOGRAM_UPDATE(_pdev, _p_cntrs) \ 1124 do { \ 1125 if (_p_cntrs == 1) { \ 1126 DP_STATS_INC(_pdev, \ 1127 rx_ind_histogram.pkts_1, 1); \ 1128 } else if (_p_cntrs > 1 && _p_cntrs <= 20) { \ 1129 DP_STATS_INC(_pdev, \ 1130 rx_ind_histogram.pkts_2_20, 1); \ 1131 } else if (_p_cntrs > 20 && _p_cntrs <= 40) { \ 1132 DP_STATS_INC(_pdev, \ 1133 rx_ind_histogram.pkts_21_40, 1); \ 1134 } else if (_p_cntrs > 40 && _p_cntrs <= 60) { \ 1135 DP_STATS_INC(_pdev, \ 1136 rx_ind_histogram.pkts_41_60, 1); \ 1137 } else if (_p_cntrs > 60 && _p_cntrs <= 80) { \ 1138 DP_STATS_INC(_pdev, \ 1139 rx_ind_histogram.pkts_61_80, 1); \ 1140 } else if (_p_cntrs > 80 && _p_cntrs <= 100) { \ 1141 DP_STATS_INC(_pdev, \ 1142 rx_ind_histogram.pkts_81_100, 1); \ 1143 } else if (_p_cntrs > 100 && _p_cntrs <= 200) { \ 1144 DP_STATS_INC(_pdev, \ 1145 rx_ind_histogram.pkts_101_200, 1); \ 1146 } else if (_p_cntrs > 200) { \ 1147 DP_STATS_INC(_pdev, \ 1148 rx_ind_histogram.pkts_201_plus, 1); \ 1149 } \ 1150 } while (0) 1151 1152 #define DP_TX_HIST_STATS_PER_PDEV() \ 1153 do { \ 1154 uint8_t hist_stats = 0; \ 1155 for (hist_stats = 0; hist_stats < soc->pdev_count; \ 1156 hist_stats++) { \ 1157 DP_TX_HISTOGRAM_UPDATE(soc->pdev_list[hist_stats], \ 1158 num_of_packets[hist_stats]); \ 1159 } \ 1160 } while (0) 1161 1162 1163 #define DP_RX_HIST_STATS_PER_PDEV() \ 1164 do { \ 1165 uint8_t hist_stats = 0; \ 1166 for (hist_stats = 0; hist_stats < soc->pdev_count; \ 1167 hist_stats++) { \ 1168 DP_RX_HISTOGRAM_UPDATE(soc->pdev_list[hist_stats], \ 1169 num_of_packets[hist_stats]); \ 1170 } \ 1171 } while (0) 1172 1173 #else 1174 #define DP_HIST_INIT() 1175 #define DP_HIST_PACKET_COUNT_INC(_pdev_id) 1176 #define DP_TX_HISTOGRAM_UPDATE(_pdev, _p_cntrs) 1177 #define DP_RX_HISTOGRAM_UPDATE(_pdev, _p_cntrs) 1178 #define DP_RX_HIST_STATS_PER_PDEV() 1179 #define DP_TX_HIST_STATS_PER_PDEV() 1180 #endif /* DISABLE_DP_STATS */ 1181 1182 #define FRAME_MASK_IPV4_ARP 1 1183 #define FRAME_MASK_IPV4_DHCP 2 1184 #define FRAME_MASK_IPV4_EAPOL 4 1185 #define FRAME_MASK_IPV6_DHCP 8 1186 1187 static inline int dp_log2_ceil(unsigned int value) 1188 { 1189 unsigned int tmp = value; 1190 int log2 = -1; 1191 1192 while (tmp) { 1193 log2++; 1194 tmp >>= 1; 1195 } 1196 if (1 << log2 != value) 1197 log2++; 1198 return log2; 1199 } 1200 1201 #ifdef QCA_SUPPORT_PEER_ISOLATION 1202 #define dp_get_peer_isolation(_peer) ((_peer)->isolation) 1203 1204 static inline void dp_set_peer_isolation(struct dp_txrx_peer *txrx_peer, 1205 bool val) 1206 { 1207 txrx_peer->isolation = val; 1208 } 1209 1210 #else 1211 #define dp_get_peer_isolation(_peer) (0) 1212 1213 static inline void dp_set_peer_isolation(struct dp_txrx_peer *peer, bool val) 1214 { 1215 } 1216 #endif /* QCA_SUPPORT_PEER_ISOLATION */ 1217 1218 #ifdef QCA_SUPPORT_WDS_EXTENDED 1219 static inline void dp_wds_ext_peer_init(struct dp_txrx_peer *txrx_peer) 1220 { 1221 txrx_peer->wds_ext.init = 0; 1222 } 1223 #else 1224 static inline void dp_wds_ext_peer_init(struct dp_txrx_peer *txrx_peer) 1225 { 1226 } 1227 #endif /* QCA_SUPPORT_WDS_EXTENDED */ 1228 1229 #ifdef QCA_HOST2FW_RXBUF_RING 1230 static inline 1231 struct dp_srng *dp_get_rxdma_ring(struct dp_pdev *pdev, int lmac_id) 1232 { 1233 return &pdev->rx_mac_buf_ring[lmac_id]; 1234 } 1235 #else 1236 static inline 1237 struct dp_srng *dp_get_rxdma_ring(struct dp_pdev *pdev, int lmac_id) 1238 { 1239 return &pdev->soc->rx_refill_buf_ring[lmac_id]; 1240 } 1241 #endif 1242 1243 /** 1244 * The lmac ID for a particular channel band is fixed. 1245 * 2.4GHz band uses lmac_id = 1 1246 * 5GHz/6GHz band uses lmac_id=0 1247 */ 1248 #define DP_INVALID_LMAC_ID (-1) 1249 #define DP_MON_INVALID_LMAC_ID (-1) 1250 #define DP_MAC0_LMAC_ID 0 1251 #define DP_MAC1_LMAC_ID 1 1252 1253 #ifdef FEATURE_TSO_STATS 1254 /** 1255 * dp_init_tso_stats() - Clear tso stats 1256 * @pdev: pdev handle 1257 * 1258 * Return: None 1259 */ 1260 static inline 1261 void dp_init_tso_stats(struct dp_pdev *pdev) 1262 { 1263 if (pdev) { 1264 qdf_mem_zero(&((pdev)->stats.tso_stats), 1265 sizeof((pdev)->stats.tso_stats)); 1266 qdf_atomic_init(&pdev->tso_idx); 1267 } 1268 } 1269 1270 /** 1271 * dp_stats_tso_segment_histogram_update() - TSO Segment Histogram 1272 * @pdev: pdev handle 1273 * @_p_cntrs: number of tso segments for a tso packet 1274 * 1275 * Return: None 1276 */ 1277 void dp_stats_tso_segment_histogram_update(struct dp_pdev *pdev, 1278 uint8_t _p_cntrs); 1279 1280 /** 1281 * dp_tso_segment_update() - Collect tso segment information 1282 * @pdev: pdev handle 1283 * @stats_idx: tso packet number 1284 * @idx: tso segment number 1285 * @seg: tso segment 1286 * 1287 * Return: None 1288 */ 1289 void dp_tso_segment_update(struct dp_pdev *pdev, 1290 uint32_t stats_idx, 1291 uint8_t idx, 1292 struct qdf_tso_seg_t seg); 1293 1294 /** 1295 * dp_tso_packet_update() - TSO Packet information 1296 * @pdev: pdev handle 1297 * @stats_idx: tso packet number 1298 * @msdu: nbuf handle 1299 * @num_segs: tso segments 1300 * 1301 * Return: None 1302 */ 1303 void dp_tso_packet_update(struct dp_pdev *pdev, uint32_t stats_idx, 1304 qdf_nbuf_t msdu, uint16_t num_segs); 1305 1306 /** 1307 * dp_tso_segment_stats_update() - TSO Segment stats 1308 * @pdev: pdev handle 1309 * @stats_seg: tso segment list 1310 * @stats_idx: tso packet number 1311 * 1312 * Return: None 1313 */ 1314 void dp_tso_segment_stats_update(struct dp_pdev *pdev, 1315 struct qdf_tso_seg_elem_t *stats_seg, 1316 uint32_t stats_idx); 1317 1318 /** 1319 * dp_print_tso_stats() - dump tso statistics 1320 * @soc:soc handle 1321 * @level: verbosity level 1322 * 1323 * Return: None 1324 */ 1325 void dp_print_tso_stats(struct dp_soc *soc, 1326 enum qdf_stats_verbosity_level level); 1327 1328 /** 1329 * dp_txrx_clear_tso_stats() - clear tso stats 1330 * @soc: soc handle 1331 * 1332 * Return: None 1333 */ 1334 void dp_txrx_clear_tso_stats(struct dp_soc *soc); 1335 #else 1336 static inline 1337 void dp_init_tso_stats(struct dp_pdev *pdev) 1338 { 1339 } 1340 1341 static inline 1342 void dp_stats_tso_segment_histogram_update(struct dp_pdev *pdev, 1343 uint8_t _p_cntrs) 1344 { 1345 } 1346 1347 static inline 1348 void dp_tso_segment_update(struct dp_pdev *pdev, 1349 uint32_t stats_idx, 1350 uint32_t idx, 1351 struct qdf_tso_seg_t seg) 1352 { 1353 } 1354 1355 static inline 1356 void dp_tso_packet_update(struct dp_pdev *pdev, uint32_t stats_idx, 1357 qdf_nbuf_t msdu, uint16_t num_segs) 1358 { 1359 } 1360 1361 static inline 1362 void dp_tso_segment_stats_update(struct dp_pdev *pdev, 1363 struct qdf_tso_seg_elem_t *stats_seg, 1364 uint32_t stats_idx) 1365 { 1366 } 1367 1368 static inline 1369 void dp_print_tso_stats(struct dp_soc *soc, 1370 enum qdf_stats_verbosity_level level) 1371 { 1372 } 1373 1374 static inline 1375 void dp_txrx_clear_tso_stats(struct dp_soc *soc) 1376 { 1377 } 1378 #endif /* FEATURE_TSO_STATS */ 1379 1380 /* dp_txrx_get_peer_per_pkt_stats_param() - Get peer per pkt stats param 1381 * @peer: DP peer handle 1382 * @type: Requested stats type 1383 * @ buf: Buffer to hold the value 1384 * 1385 * Return: status success/failure 1386 */ 1387 QDF_STATUS dp_txrx_get_peer_per_pkt_stats_param(struct dp_peer *peer, 1388 enum cdp_peer_stats_type type, 1389 cdp_peer_stats_param_t *buf); 1390 1391 /* dp_txrx_get_peer_extd_stats_param() - Get peer extd stats param 1392 * @peer: DP peer handle 1393 * @type: Requested stats type 1394 * @ buf: Buffer to hold the value 1395 * 1396 * Return: status success/failure 1397 */ 1398 QDF_STATUS dp_txrx_get_peer_extd_stats_param(struct dp_peer *peer, 1399 enum cdp_peer_stats_type type, 1400 cdp_peer_stats_param_t *buf); 1401 1402 #define DP_HTT_T2H_HP_PIPE 5 1403 /** 1404 * dp_update_pdev_stats(): Update the pdev stats 1405 * @tgtobj: pdev handle 1406 * @srcobj: vdev stats structure 1407 * 1408 * Update the pdev stats from the specified vdev stats 1409 * 1410 * return: None 1411 */ 1412 void dp_update_pdev_stats(struct dp_pdev *tgtobj, 1413 struct cdp_vdev_stats *srcobj); 1414 1415 /** 1416 * dp_update_vdev_ingress_stats(): Update the vdev ingress stats 1417 * @tgtobj: vdev handle 1418 * 1419 * Update the vdev ingress stats 1420 * 1421 * return: None 1422 */ 1423 void dp_update_vdev_ingress_stats(struct dp_vdev *tgtobj); 1424 1425 /** 1426 * dp_update_vdev_rate_stats() - Update the vdev rate stats 1427 * @tgtobj: tgt buffer for vdev stats 1428 * @srcobj: srcobj vdev stats 1429 * 1430 * Return: None 1431 */ 1432 void dp_update_vdev_rate_stats(struct cdp_vdev_stats *tgtobj, 1433 struct cdp_vdev_stats *srcobj); 1434 1435 /** 1436 * dp_update_pdev_ingress_stats(): Update the pdev ingress stats 1437 * @tgtobj: pdev handle 1438 * @srcobj: vdev stats structure 1439 * 1440 * Update the pdev ingress stats from the specified vdev stats 1441 * 1442 * return: None 1443 */ 1444 void dp_update_pdev_ingress_stats(struct dp_pdev *tgtobj, 1445 struct dp_vdev *srcobj); 1446 1447 /** 1448 * dp_update_vdev_stats(): Update the vdev stats 1449 * @soc: soc handle 1450 * @srcobj: DP_PEER object 1451 * @arg: point to vdev stats structure 1452 * 1453 * Update the vdev stats from the specified peer stats 1454 * 1455 * return: None 1456 */ 1457 void dp_update_vdev_stats(struct dp_soc *soc, 1458 struct dp_peer *srcobj, 1459 void *arg); 1460 1461 /** 1462 * dp_update_vdev_stats_on_peer_unmap() - Update the vdev stats on peer unmap 1463 * @vdev: DP_VDEV handle 1464 * @peer: DP_PEER handle 1465 * 1466 * Return: None 1467 */ 1468 void dp_update_vdev_stats_on_peer_unmap(struct dp_vdev *vdev, 1469 struct dp_peer *peer); 1470 1471 #define DP_UPDATE_STATS(_tgtobj, _srcobj) \ 1472 do { \ 1473 uint8_t i; \ 1474 uint8_t pream_type; \ 1475 for (pream_type = 0; pream_type < DOT11_MAX; pream_type++) { \ 1476 for (i = 0; i < MAX_MCS; i++) { \ 1477 DP_STATS_AGGR(_tgtobj, _srcobj, \ 1478 tx.pkt_type[pream_type].mcs_count[i]); \ 1479 DP_STATS_AGGR(_tgtobj, _srcobj, \ 1480 rx.pkt_type[pream_type].mcs_count[i]); \ 1481 } \ 1482 } \ 1483 \ 1484 for (i = 0; i < MAX_BW; i++) { \ 1485 DP_STATS_AGGR(_tgtobj, _srcobj, tx.bw[i]); \ 1486 DP_STATS_AGGR(_tgtobj, _srcobj, rx.bw[i]); \ 1487 } \ 1488 \ 1489 for (i = 0; i < SS_COUNT; i++) { \ 1490 DP_STATS_AGGR(_tgtobj, _srcobj, rx.nss[i]); \ 1491 DP_STATS_AGGR(_tgtobj, _srcobj, tx.nss[i]); \ 1492 } \ 1493 for (i = 0; i < WME_AC_MAX; i++) { \ 1494 DP_STATS_AGGR(_tgtobj, _srcobj, tx.wme_ac_type[i]); \ 1495 DP_STATS_AGGR(_tgtobj, _srcobj, rx.wme_ac_type[i]); \ 1496 DP_STATS_AGGR(_tgtobj, _srcobj, tx.excess_retries_per_ac[i]); \ 1497 \ 1498 } \ 1499 \ 1500 for (i = 0; i < MAX_GI; i++) { \ 1501 DP_STATS_AGGR(_tgtobj, _srcobj, tx.sgi_count[i]); \ 1502 DP_STATS_AGGR(_tgtobj, _srcobj, rx.sgi_count[i]); \ 1503 } \ 1504 \ 1505 for (i = 0; i < MAX_RECEPTION_TYPES; i++) \ 1506 DP_STATS_AGGR(_tgtobj, _srcobj, rx.reception_type[i]); \ 1507 \ 1508 if (!wlan_cfg_get_vdev_stats_hw_offload_config(soc->wlan_cfg_ctx)) { \ 1509 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, tx.comp_pkt); \ 1510 DP_STATS_AGGR(_tgtobj, _srcobj, tx.tx_failed); \ 1511 } \ 1512 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, tx.ucast); \ 1513 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, tx.mcast); \ 1514 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, tx.bcast); \ 1515 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, tx.tx_success); \ 1516 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, tx.nawds_mcast); \ 1517 DP_STATS_AGGR(_tgtobj, _srcobj, tx.nawds_mcast_drop); \ 1518 DP_STATS_AGGR(_tgtobj, _srcobj, tx.ofdma); \ 1519 DP_STATS_AGGR(_tgtobj, _srcobj, tx.stbc); \ 1520 DP_STATS_AGGR(_tgtobj, _srcobj, tx.ldpc); \ 1521 DP_STATS_AGGR(_tgtobj, _srcobj, tx.retries); \ 1522 DP_STATS_AGGR(_tgtobj, _srcobj, tx.non_amsdu_cnt); \ 1523 DP_STATS_AGGR(_tgtobj, _srcobj, tx.amsdu_cnt); \ 1524 DP_STATS_AGGR(_tgtobj, _srcobj, tx.non_ampdu_cnt); \ 1525 DP_STATS_AGGR(_tgtobj, _srcobj, tx.ampdu_cnt); \ 1526 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, tx.dropped.fw_rem); \ 1527 DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.fw_rem_tx); \ 1528 DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.fw_rem_notx); \ 1529 DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.fw_reason1); \ 1530 DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.fw_reason2); \ 1531 DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.fw_reason3); \ 1532 DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.fw_rem_queue_disable); \ 1533 DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.fw_rem_no_match); \ 1534 DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.drop_threshold); \ 1535 DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.drop_link_desc_na); \ 1536 DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.invalid_drop); \ 1537 DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.mcast_vdev_drop); \ 1538 DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.invalid_rr); \ 1539 DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.age_out); \ 1540 \ 1541 DP_STATS_AGGR(_tgtobj, _srcobj, rx.err.mic_err); \ 1542 DP_STATS_AGGR(_tgtobj, _srcobj, rx.err.decrypt_err); \ 1543 DP_STATS_AGGR(_tgtobj, _srcobj, rx.err.fcserr); \ 1544 DP_STATS_AGGR(_tgtobj, _srcobj, rx.err.pn_err); \ 1545 DP_STATS_AGGR(_tgtobj, _srcobj, rx.err.oor_err); \ 1546 DP_STATS_AGGR(_tgtobj, _srcobj, rx.err.jump_2k_err); \ 1547 DP_STATS_AGGR(_tgtobj, _srcobj, rx.err.rxdma_wifi_parse_err); \ 1548 if (_srcobj->stats.rx.snr != 0) \ 1549 DP_STATS_UPD_STRUCT(_tgtobj, _srcobj, rx.snr); \ 1550 DP_STATS_UPD_STRUCT(_tgtobj, _srcobj, rx.rx_rate); \ 1551 DP_STATS_AGGR(_tgtobj, _srcobj, rx.non_ampdu_cnt); \ 1552 DP_STATS_AGGR(_tgtobj, _srcobj, rx.ampdu_cnt); \ 1553 DP_STATS_AGGR(_tgtobj, _srcobj, rx.non_amsdu_cnt); \ 1554 DP_STATS_AGGR(_tgtobj, _srcobj, rx.amsdu_cnt); \ 1555 DP_STATS_AGGR(_tgtobj, _srcobj, rx.nawds_mcast_drop); \ 1556 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.to_stack); \ 1557 \ 1558 for (i = 0; i < CDP_MAX_RX_RINGS; i++) \ 1559 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.rcvd_reo[i]); \ 1560 \ 1561 for (i = 0; i < CDP_MAX_LMACS; i++) \ 1562 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.rx_lmac[i]); \ 1563 \ 1564 _srcobj->stats.rx.unicast.num = \ 1565 _srcobj->stats.rx.to_stack.num - \ 1566 _srcobj->stats.rx.multicast.num; \ 1567 _srcobj->stats.rx.unicast.bytes = \ 1568 _srcobj->stats.rx.to_stack.bytes - \ 1569 _srcobj->stats.rx.multicast.bytes; \ 1570 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.unicast); \ 1571 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.multicast); \ 1572 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.bcast); \ 1573 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.raw); \ 1574 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.intra_bss.pkts); \ 1575 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.intra_bss.fail); \ 1576 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.mec_drop); \ 1577 \ 1578 _tgtobj->stats.tx.last_ack_rssi = \ 1579 _srcobj->stats.tx.last_ack_rssi; \ 1580 DP_STATS_AGGR(_tgtobj, _srcobj, rx.multipass_rx_pkt_drop); \ 1581 DP_STATS_AGGR(_tgtobj, _srcobj, rx.peer_unauth_rx_pkt_drop); \ 1582 DP_STATS_AGGR(_tgtobj, _srcobj, rx.policy_check_drop); \ 1583 } while (0) 1584 1585 #ifdef VDEV_PEER_PROTOCOL_COUNT 1586 #define DP_UPDATE_PROTOCOL_COUNT_STATS(_tgtobj, _srcobj) \ 1587 { \ 1588 uint8_t j; \ 1589 for (j = 0; j < CDP_TRACE_MAX; j++) { \ 1590 _tgtobj->tx.protocol_trace_cnt[j].egress_cnt += \ 1591 _srcobj->tx.protocol_trace_cnt[j].egress_cnt; \ 1592 _tgtobj->tx.protocol_trace_cnt[j].ingress_cnt += \ 1593 _srcobj->tx.protocol_trace_cnt[j].ingress_cnt; \ 1594 _tgtobj->rx.protocol_trace_cnt[j].egress_cnt += \ 1595 _srcobj->rx.protocol_trace_cnt[j].egress_cnt; \ 1596 _tgtobj->rx.protocol_trace_cnt[j].ingress_cnt += \ 1597 _srcobj->rx.protocol_trace_cnt[j].ingress_cnt; \ 1598 } \ 1599 } 1600 #else 1601 #define DP_UPDATE_PROTOCOL_COUNT_STATS(_tgtobj, _srcobj) 1602 #endif 1603 1604 #ifdef WLAN_FEATURE_11BE 1605 #define DP_UPDATE_11BE_STATS(_tgtobj, _srcobj) \ 1606 do { \ 1607 uint8_t i, mu_type; \ 1608 for (i = 0; i < MAX_MCS; i++) { \ 1609 _tgtobj->tx.su_be_ppdu_cnt.mcs_count[i] += \ 1610 _srcobj->tx.su_be_ppdu_cnt.mcs_count[i]; \ 1611 _tgtobj->rx.su_be_ppdu_cnt.mcs_count[i] += \ 1612 _srcobj->rx.su_be_ppdu_cnt.mcs_count[i]; \ 1613 } \ 1614 for (mu_type = 0; mu_type < TXRX_TYPE_MU_MAX; mu_type++) { \ 1615 for (i = 0; i < MAX_MCS; i++) { \ 1616 _tgtobj->tx.mu_be_ppdu_cnt[mu_type].mcs_count[i] += \ 1617 _srcobj->tx.mu_be_ppdu_cnt[mu_type].mcs_count[i]; \ 1618 _tgtobj->rx.mu_be_ppdu_cnt[mu_type].mcs_count[i] += \ 1619 _srcobj->rx.mu_be_ppdu_cnt[mu_type].mcs_count[i]; \ 1620 } \ 1621 } \ 1622 for (i = 0; i < MAX_PUNCTURED_MODE; i++) { \ 1623 _tgtobj->tx.punc_bw[i] += _srcobj->tx.punc_bw[i]; \ 1624 _tgtobj->rx.punc_bw[i] += _srcobj->rx.punc_bw[i]; \ 1625 } \ 1626 } while (0) 1627 #else 1628 #define DP_UPDATE_11BE_STATS(_tgtobj, _srcobj) 1629 #endif 1630 1631 #define DP_UPDATE_PER_PKT_STATS(_tgtobj, _srcobj) \ 1632 do { \ 1633 uint8_t i; \ 1634 _tgtobj->tx.ucast.num += _srcobj->tx.ucast.num; \ 1635 _tgtobj->tx.ucast.bytes += _srcobj->tx.ucast.bytes; \ 1636 _tgtobj->tx.mcast.num += _srcobj->tx.mcast.num; \ 1637 _tgtobj->tx.mcast.bytes += _srcobj->tx.mcast.bytes; \ 1638 _tgtobj->tx.bcast.num += _srcobj->tx.bcast.num; \ 1639 _tgtobj->tx.bcast.bytes += _srcobj->tx.bcast.bytes; \ 1640 _tgtobj->tx.nawds_mcast.num += _srcobj->tx.nawds_mcast.num; \ 1641 _tgtobj->tx.nawds_mcast.bytes += \ 1642 _srcobj->tx.nawds_mcast.bytes; \ 1643 _tgtobj->tx.tx_success.num += _srcobj->tx.tx_success.num; \ 1644 _tgtobj->tx.tx_success.bytes += _srcobj->tx.tx_success.bytes; \ 1645 _tgtobj->tx.nawds_mcast_drop += _srcobj->tx.nawds_mcast_drop; \ 1646 _tgtobj->tx.ofdma += _srcobj->tx.ofdma; \ 1647 _tgtobj->tx.non_amsdu_cnt += _srcobj->tx.non_amsdu_cnt; \ 1648 _tgtobj->tx.amsdu_cnt += _srcobj->tx.amsdu_cnt; \ 1649 _tgtobj->tx.dropped.fw_rem.num += \ 1650 _srcobj->tx.dropped.fw_rem.num; \ 1651 _tgtobj->tx.dropped.fw_rem.bytes += \ 1652 _srcobj->tx.dropped.fw_rem.bytes; \ 1653 _tgtobj->tx.dropped.fw_rem_notx += \ 1654 _srcobj->tx.dropped.fw_rem_notx; \ 1655 _tgtobj->tx.dropped.fw_rem_tx += \ 1656 _srcobj->tx.dropped.fw_rem_tx; \ 1657 _tgtobj->tx.dropped.age_out += _srcobj->tx.dropped.age_out; \ 1658 _tgtobj->tx.dropped.fw_reason1 += \ 1659 _srcobj->tx.dropped.fw_reason1; \ 1660 _tgtobj->tx.dropped.fw_reason2 += \ 1661 _srcobj->tx.dropped.fw_reason2; \ 1662 _tgtobj->tx.dropped.fw_reason3 += \ 1663 _srcobj->tx.dropped.fw_reason3; \ 1664 _tgtobj->tx.dropped.fw_rem_queue_disable += \ 1665 _srcobj->tx.dropped.fw_rem_queue_disable; \ 1666 _tgtobj->tx.dropped.fw_rem_no_match += \ 1667 _srcobj->tx.dropped.fw_rem_no_match; \ 1668 _tgtobj->tx.dropped.drop_threshold += \ 1669 _srcobj->tx.dropped.drop_threshold; \ 1670 _tgtobj->tx.dropped.drop_link_desc_na += \ 1671 _srcobj->tx.dropped.drop_link_desc_na; \ 1672 _tgtobj->tx.dropped.invalid_drop += \ 1673 _srcobj->tx.dropped.invalid_drop; \ 1674 _tgtobj->tx.dropped.mcast_vdev_drop += \ 1675 _srcobj->tx.dropped.mcast_vdev_drop; \ 1676 _tgtobj->tx.dropped.invalid_rr += \ 1677 _srcobj->tx.dropped.invalid_rr; \ 1678 _tgtobj->tx.failed_retry_count += \ 1679 _srcobj->tx.failed_retry_count; \ 1680 _tgtobj->tx.retry_count += _srcobj->tx.retry_count; \ 1681 _tgtobj->tx.multiple_retry_count += \ 1682 _srcobj->tx.multiple_retry_count; \ 1683 _tgtobj->tx.tx_success_twt.num += \ 1684 _srcobj->tx.tx_success_twt.num; \ 1685 _tgtobj->tx.tx_success_twt.bytes += \ 1686 _srcobj->tx.tx_success_twt.bytes; \ 1687 _tgtobj->tx.last_tx_ts = _srcobj->tx.last_tx_ts; \ 1688 _tgtobj->tx.release_src_not_tqm += \ 1689 _srcobj->tx.release_src_not_tqm; \ 1690 for (i = 0; i < QDF_PROTO_SUBTYPE_MAX; i++) { \ 1691 _tgtobj->tx.no_ack_count[i] += \ 1692 _srcobj->tx.no_ack_count[i];\ 1693 } \ 1694 \ 1695 _tgtobj->rx.multicast.num += _srcobj->rx.multicast.num; \ 1696 _tgtobj->rx.multicast.bytes += _srcobj->rx.multicast.bytes; \ 1697 _tgtobj->rx.bcast.num += _srcobj->rx.bcast.num; \ 1698 _tgtobj->rx.bcast.bytes += _srcobj->rx.bcast.bytes; \ 1699 if (_tgtobj->rx.to_stack.num >= _tgtobj->rx.multicast.num) \ 1700 _tgtobj->rx.unicast.num = \ 1701 _tgtobj->rx.to_stack.num - _tgtobj->rx.multicast.num; \ 1702 if (_tgtobj->rx.to_stack.bytes >= _tgtobj->rx.multicast.bytes) \ 1703 _tgtobj->rx.unicast.bytes = \ 1704 _tgtobj->rx.to_stack.bytes - _tgtobj->rx.multicast.bytes; \ 1705 _tgtobj->rx.raw.num += _srcobj->rx.raw.num; \ 1706 _tgtobj->rx.raw.bytes += _srcobj->rx.raw.bytes; \ 1707 _tgtobj->rx.nawds_mcast_drop += _srcobj->rx.nawds_mcast_drop; \ 1708 _tgtobj->rx.mcast_3addr_drop += _srcobj->rx.mcast_3addr_drop; \ 1709 _tgtobj->rx.mec_drop.num += _srcobj->rx.mec_drop.num; \ 1710 _tgtobj->rx.mec_drop.bytes += _srcobj->rx.mec_drop.bytes; \ 1711 _tgtobj->rx.intra_bss.pkts.num += \ 1712 _srcobj->rx.intra_bss.pkts.num; \ 1713 _tgtobj->rx.intra_bss.pkts.bytes += \ 1714 _srcobj->rx.intra_bss.pkts.bytes; \ 1715 _tgtobj->rx.intra_bss.fail.num += \ 1716 _srcobj->rx.intra_bss.fail.num; \ 1717 _tgtobj->rx.intra_bss.fail.bytes += \ 1718 _srcobj->rx.intra_bss.fail.bytes; \ 1719 _tgtobj->rx.intra_bss.mdns_no_fwd += \ 1720 _srcobj->rx.intra_bss.mdns_no_fwd; \ 1721 _tgtobj->rx.err.mic_err += _srcobj->rx.err.mic_err; \ 1722 _tgtobj->rx.err.decrypt_err += _srcobj->rx.err.decrypt_err; \ 1723 _tgtobj->rx.err.fcserr += _srcobj->rx.err.fcserr; \ 1724 _tgtobj->rx.err.pn_err += _srcobj->rx.err.pn_err; \ 1725 _tgtobj->rx.err.oor_err += _srcobj->rx.err.oor_err; \ 1726 _tgtobj->rx.err.jump_2k_err += _srcobj->rx.err.jump_2k_err; \ 1727 _tgtobj->rx.err.rxdma_wifi_parse_err += \ 1728 _srcobj->rx.err.rxdma_wifi_parse_err; \ 1729 _tgtobj->rx.non_amsdu_cnt += _srcobj->rx.non_amsdu_cnt; \ 1730 _tgtobj->rx.amsdu_cnt += _srcobj->rx.amsdu_cnt; \ 1731 _tgtobj->rx.rx_retries += _srcobj->rx.rx_retries; \ 1732 _tgtobj->rx.multipass_rx_pkt_drop += \ 1733 _srcobj->rx.multipass_rx_pkt_drop; \ 1734 _tgtobj->rx.peer_unauth_rx_pkt_drop += \ 1735 _srcobj->rx.peer_unauth_rx_pkt_drop; \ 1736 _tgtobj->rx.policy_check_drop += \ 1737 _srcobj->rx.policy_check_drop; \ 1738 _tgtobj->rx.to_stack_twt.num += _srcobj->rx.to_stack_twt.num; \ 1739 _tgtobj->rx.to_stack_twt.bytes += \ 1740 _srcobj->rx.to_stack_twt.bytes; \ 1741 _tgtobj->rx.last_rx_ts = _srcobj->rx.last_rx_ts; \ 1742 for (i = 0; i < CDP_MAX_RX_RINGS; i++) { \ 1743 _tgtobj->rx.rcvd_reo[i].num += \ 1744 _srcobj->rx.rcvd_reo[i].num; \ 1745 _tgtobj->rx.rcvd_reo[i].bytes += \ 1746 _srcobj->rx.rcvd_reo[i].bytes; \ 1747 } \ 1748 for (i = 0; i < CDP_MAX_LMACS; i++) { \ 1749 _tgtobj->rx.rx_lmac[i].num += \ 1750 _srcobj->rx.rx_lmac[i].num; \ 1751 _tgtobj->rx.rx_lmac[i].bytes += \ 1752 _srcobj->rx.rx_lmac[i].bytes; \ 1753 } \ 1754 DP_UPDATE_PROTOCOL_COUNT_STATS(_tgtobj, _srcobj); \ 1755 } while (0) 1756 1757 #define DP_UPDATE_EXTD_STATS(_tgtobj, _srcobj) \ 1758 do { \ 1759 uint8_t i, pream_type, mu_type; \ 1760 _tgtobj->tx.stbc += _srcobj->tx.stbc; \ 1761 _tgtobj->tx.ldpc += _srcobj->tx.ldpc; \ 1762 _tgtobj->tx.retries += _srcobj->tx.retries; \ 1763 _tgtobj->tx.ampdu_cnt += _srcobj->tx.ampdu_cnt; \ 1764 _tgtobj->tx.non_ampdu_cnt += _srcobj->tx.non_ampdu_cnt; \ 1765 _tgtobj->tx.num_ppdu_cookie_valid += \ 1766 _srcobj->tx.num_ppdu_cookie_valid; \ 1767 _tgtobj->tx.tx_ppdus += _srcobj->tx.tx_ppdus; \ 1768 _tgtobj->tx.tx_mpdus_success += _srcobj->tx.tx_mpdus_success; \ 1769 _tgtobj->tx.tx_mpdus_tried += _srcobj->tx.tx_mpdus_tried; \ 1770 _tgtobj->tx.tx_rate = _srcobj->tx.tx_rate; \ 1771 _tgtobj->tx.last_tx_rate = _srcobj->tx.last_tx_rate; \ 1772 _tgtobj->tx.last_tx_rate_mcs = _srcobj->tx.last_tx_rate_mcs; \ 1773 _tgtobj->tx.mcast_last_tx_rate = \ 1774 _srcobj->tx.mcast_last_tx_rate; \ 1775 _tgtobj->tx.mcast_last_tx_rate_mcs = \ 1776 _srcobj->tx.mcast_last_tx_rate_mcs; \ 1777 _tgtobj->tx.rnd_avg_tx_rate = _srcobj->tx.rnd_avg_tx_rate; \ 1778 _tgtobj->tx.avg_tx_rate = _srcobj->tx.avg_tx_rate; \ 1779 _tgtobj->tx.tx_ratecode = _srcobj->tx.tx_ratecode; \ 1780 _tgtobj->tx.pream_punct_cnt += _srcobj->tx.pream_punct_cnt; \ 1781 _tgtobj->tx.ru_start = _srcobj->tx.ru_start; \ 1782 _tgtobj->tx.ru_tones = _srcobj->tx.ru_tones; \ 1783 _tgtobj->tx.last_ack_rssi = _srcobj->tx.last_ack_rssi; \ 1784 _tgtobj->tx.nss_info = _srcobj->tx.nss_info; \ 1785 _tgtobj->tx.mcs_info = _srcobj->tx.mcs_info; \ 1786 _tgtobj->tx.bw_info = _srcobj->tx.bw_info; \ 1787 _tgtobj->tx.gi_info = _srcobj->tx.gi_info; \ 1788 _tgtobj->tx.preamble_info = _srcobj->tx.preamble_info; \ 1789 _tgtobj->tx.retries_mpdu += _srcobj->tx.retries_mpdu; \ 1790 _tgtobj->tx.mpdu_success_with_retries += \ 1791 _srcobj->tx.mpdu_success_with_retries; \ 1792 for (pream_type = 0; pream_type < DOT11_MAX; pream_type++) { \ 1793 for (i = 0; i < MAX_MCS; i++) \ 1794 _tgtobj->tx.pkt_type[pream_type].mcs_count[i] += \ 1795 _srcobj->tx.pkt_type[pream_type].mcs_count[i]; \ 1796 } \ 1797 for (i = 0; i < WME_AC_MAX; i++) { \ 1798 _tgtobj->tx.wme_ac_type[i] += _srcobj->tx.wme_ac_type[i]; \ 1799 _tgtobj->tx.excess_retries_per_ac[i] += \ 1800 _srcobj->tx.excess_retries_per_ac[i]; \ 1801 } \ 1802 for (i = 0; i < MAX_GI; i++) { \ 1803 _tgtobj->tx.sgi_count[i] += _srcobj->tx.sgi_count[i]; \ 1804 } \ 1805 for (i = 0; i < SS_COUNT; i++) { \ 1806 _tgtobj->tx.nss[i] += _srcobj->tx.nss[i]; \ 1807 } \ 1808 for (i = 0; i < MAX_BW; i++) { \ 1809 _tgtobj->tx.bw[i] += _srcobj->tx.bw[i]; \ 1810 } \ 1811 for (i = 0; i < MAX_RU_LOCATIONS; i++) { \ 1812 _tgtobj->tx.ru_loc[i].num_msdu += \ 1813 _srcobj->tx.ru_loc[i].num_msdu; \ 1814 _tgtobj->tx.ru_loc[i].num_mpdu += \ 1815 _srcobj->tx.ru_loc[i].num_mpdu; \ 1816 _tgtobj->tx.ru_loc[i].mpdu_tried += \ 1817 _srcobj->tx.ru_loc[i].mpdu_tried; \ 1818 } \ 1819 for (i = 0; i < MAX_TRANSMIT_TYPES; i++) { \ 1820 _tgtobj->tx.transmit_type[i].num_msdu += \ 1821 _srcobj->tx.transmit_type[i].num_msdu; \ 1822 _tgtobj->tx.transmit_type[i].num_mpdu += \ 1823 _srcobj->tx.transmit_type[i].num_mpdu; \ 1824 _tgtobj->tx.transmit_type[i].mpdu_tried += \ 1825 _srcobj->tx.transmit_type[i].mpdu_tried; \ 1826 } \ 1827 for (i = 0; i < MAX_MU_GROUP_ID; i++) { \ 1828 _tgtobj->tx.mu_group_id[i] = _srcobj->tx.mu_group_id[i]; \ 1829 } \ 1830 \ 1831 _tgtobj->rx.mpdu_cnt_fcs_ok += _srcobj->rx.mpdu_cnt_fcs_ok; \ 1832 _tgtobj->rx.mpdu_cnt_fcs_err += _srcobj->rx.mpdu_cnt_fcs_err; \ 1833 _tgtobj->rx.non_ampdu_cnt += _srcobj->rx.non_ampdu_cnt; \ 1834 _tgtobj->rx.ampdu_cnt += _srcobj->rx.ampdu_cnt; \ 1835 _tgtobj->rx.rx_mpdus += _srcobj->rx.rx_mpdus; \ 1836 _tgtobj->rx.rx_ppdus += _srcobj->rx.rx_ppdus; \ 1837 _tgtobj->rx.rx_rate = _srcobj->rx.rx_rate; \ 1838 _tgtobj->rx.last_rx_rate = _srcobj->rx.last_rx_rate; \ 1839 _tgtobj->rx.rnd_avg_rx_rate = _srcobj->rx.rnd_avg_rx_rate; \ 1840 _tgtobj->rx.avg_rx_rate = _srcobj->rx.avg_rx_rate; \ 1841 _tgtobj->rx.rx_ratecode = _srcobj->rx.rx_ratecode; \ 1842 _tgtobj->rx.avg_snr = _srcobj->rx.avg_snr; \ 1843 _tgtobj->rx.rx_snr_measured_time = \ 1844 _srcobj->rx.rx_snr_measured_time; \ 1845 _tgtobj->rx.snr = _srcobj->rx.snr; \ 1846 _tgtobj->rx.last_snr = _srcobj->rx.last_snr; \ 1847 _tgtobj->rx.nss_info = _srcobj->rx.nss_info; \ 1848 _tgtobj->rx.mcs_info = _srcobj->rx.mcs_info; \ 1849 _tgtobj->rx.bw_info = _srcobj->rx.bw_info; \ 1850 _tgtobj->rx.gi_info = _srcobj->rx.gi_info; \ 1851 _tgtobj->rx.preamble_info = _srcobj->rx.preamble_info; \ 1852 _tgtobj->rx.mpdu_retry_cnt += _srcobj->rx.mpdu_retry_cnt; \ 1853 for (pream_type = 0; pream_type < DOT11_MAX; pream_type++) { \ 1854 for (i = 0; i < MAX_MCS; i++) { \ 1855 _tgtobj->rx.pkt_type[pream_type].mcs_count[i] += \ 1856 _srcobj->rx.pkt_type[pream_type].mcs_count[i]; \ 1857 } \ 1858 } \ 1859 for (i = 0; i < WME_AC_MAX; i++) { \ 1860 _tgtobj->rx.wme_ac_type[i] += _srcobj->rx.wme_ac_type[i]; \ 1861 } \ 1862 for (i = 0; i < MAX_MCS; i++) { \ 1863 _tgtobj->rx.su_ax_ppdu_cnt.mcs_count[i] += \ 1864 _srcobj->rx.su_ax_ppdu_cnt.mcs_count[i]; \ 1865 _tgtobj->rx.rx_mpdu_cnt[i] += _srcobj->rx.rx_mpdu_cnt[i]; \ 1866 } \ 1867 for (mu_type = 0 ; mu_type < TXRX_TYPE_MU_MAX; mu_type++) { \ 1868 _tgtobj->rx.rx_mu[mu_type].mpdu_cnt_fcs_ok += \ 1869 _srcobj->rx.rx_mu[mu_type].mpdu_cnt_fcs_ok; \ 1870 _tgtobj->rx.rx_mu[mu_type].mpdu_cnt_fcs_err += \ 1871 _srcobj->rx.rx_mu[mu_type].mpdu_cnt_fcs_err; \ 1872 for (i = 0; i < SS_COUNT; i++) \ 1873 _tgtobj->rx.rx_mu[mu_type].ppdu_nss[i] += \ 1874 _srcobj->rx.rx_mu[mu_type].ppdu_nss[i]; \ 1875 for (i = 0; i < MAX_MCS; i++) \ 1876 _tgtobj->rx.rx_mu[mu_type].ppdu.mcs_count[i] += \ 1877 _srcobj->rx.rx_mu[mu_type].ppdu.mcs_count[i]; \ 1878 } \ 1879 for (i = 0; i < MAX_RECEPTION_TYPES; i++) { \ 1880 _tgtobj->rx.reception_type[i] += \ 1881 _srcobj->rx.reception_type[i]; \ 1882 _tgtobj->rx.ppdu_cnt[i] += _srcobj->rx.ppdu_cnt[i]; \ 1883 } \ 1884 for (i = 0; i < MAX_GI; i++) { \ 1885 _tgtobj->rx.sgi_count[i] += _srcobj->rx.sgi_count[i]; \ 1886 } \ 1887 for (i = 0; i < SS_COUNT; i++) { \ 1888 _tgtobj->rx.nss[i] += _srcobj->rx.nss[i]; \ 1889 _tgtobj->rx.ppdu_nss[i] += _srcobj->rx.ppdu_nss[i]; \ 1890 } \ 1891 for (i = 0; i < MAX_BW; i++) { \ 1892 _tgtobj->rx.bw[i] += _srcobj->rx.bw[i]; \ 1893 } \ 1894 DP_UPDATE_11BE_STATS(_tgtobj, _srcobj); \ 1895 } while (0) 1896 1897 /** 1898 * dp_peer_find_attach() - Allocates memory for peer objects 1899 * @soc: SoC handle 1900 * 1901 * Return: QDF_STATUS 1902 */ 1903 QDF_STATUS dp_peer_find_attach(struct dp_soc *soc); 1904 extern void dp_peer_find_detach(struct dp_soc *soc); 1905 extern void dp_peer_find_hash_add(struct dp_soc *soc, struct dp_peer *peer); 1906 extern void dp_peer_find_hash_remove(struct dp_soc *soc, struct dp_peer *peer); 1907 extern void dp_peer_find_hash_erase(struct dp_soc *soc); 1908 void dp_peer_vdev_list_add(struct dp_soc *soc, struct dp_vdev *vdev, 1909 struct dp_peer *peer); 1910 void dp_peer_vdev_list_remove(struct dp_soc *soc, struct dp_vdev *vdev, 1911 struct dp_peer *peer); 1912 void dp_peer_find_id_to_obj_add(struct dp_soc *soc, 1913 struct dp_peer *peer, 1914 uint16_t peer_id); 1915 void dp_txrx_peer_attach_add(struct dp_soc *soc, 1916 struct dp_peer *peer, 1917 struct dp_txrx_peer *txrx_peer); 1918 void dp_peer_find_id_to_obj_remove(struct dp_soc *soc, 1919 uint16_t peer_id); 1920 void dp_vdev_unref_delete(struct dp_soc *soc, struct dp_vdev *vdev, 1921 enum dp_mod_id mod_id); 1922 1923 /* 1924 * dp_peer_ppdu_delayed_ba_cleanup() free ppdu allocated in peer 1925 * @peer: Datapath peer 1926 * 1927 * return: void 1928 */ 1929 void dp_peer_ppdu_delayed_ba_cleanup(struct dp_peer *peer); 1930 1931 extern void dp_peer_rx_init(struct dp_pdev *pdev, struct dp_peer *peer); 1932 void dp_peer_cleanup(struct dp_vdev *vdev, struct dp_peer *peer); 1933 void dp_peer_rx_cleanup(struct dp_vdev *vdev, struct dp_peer *peer); 1934 1935 #ifdef DP_PEER_EXTENDED_API 1936 /** 1937 * dp_register_peer() - Register peer into physical device 1938 * @soc_hdl - data path soc handle 1939 * @pdev_id - device instance id 1940 * @sta_desc - peer description 1941 * 1942 * Register peer into physical device 1943 * 1944 * Return: QDF_STATUS_SUCCESS registration success 1945 * QDF_STATUS_E_FAULT peer not found 1946 */ 1947 QDF_STATUS dp_register_peer(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, 1948 struct ol_txrx_desc_type *sta_desc); 1949 1950 /** 1951 * dp_clear_peer() - remove peer from physical device 1952 * @soc_hdl - data path soc handle 1953 * @pdev_id - device instance id 1954 * @peer_addr - peer mac address 1955 * 1956 * remove peer from physical device 1957 * 1958 * Return: QDF_STATUS_SUCCESS registration success 1959 * QDF_STATUS_E_FAULT peer not found 1960 */ 1961 QDF_STATUS dp_clear_peer(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, 1962 struct qdf_mac_addr peer_addr); 1963 1964 /* 1965 * dp_find_peer_exist - find peer if already exists 1966 * @soc: datapath soc handle 1967 * @pdev_id: physical device instance id 1968 * @peer_mac_addr: peer mac address 1969 * 1970 * Return: true or false 1971 */ 1972 bool dp_find_peer_exist(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, 1973 uint8_t *peer_addr); 1974 1975 /* 1976 * dp_find_peer_exist_on_vdev - find if peer exists on the given vdev 1977 * @soc: datapath soc handle 1978 * @vdev_id: vdev instance id 1979 * @peer_mac_addr: peer mac address 1980 * 1981 * Return: true or false 1982 */ 1983 bool dp_find_peer_exist_on_vdev(struct cdp_soc_t *soc_hdl, uint8_t vdev_id, 1984 uint8_t *peer_addr); 1985 1986 /* 1987 * dp_find_peer_exist_on_other_vdev - find if peer exists 1988 * on other than the given vdev 1989 * @soc: datapath soc handle 1990 * @vdev_id: vdev instance id 1991 * @peer_mac_addr: peer mac address 1992 * @max_bssid: max number of bssids 1993 * 1994 * Return: true or false 1995 */ 1996 bool dp_find_peer_exist_on_other_vdev(struct cdp_soc_t *soc_hdl, 1997 uint8_t vdev_id, uint8_t *peer_addr, 1998 uint16_t max_bssid); 1999 2000 /** 2001 * dp_peer_state_update() - update peer local state 2002 * @pdev - data path device instance 2003 * @peer_addr - peer mac address 2004 * @state - new peer local state 2005 * 2006 * update peer local state 2007 * 2008 * Return: QDF_STATUS_SUCCESS registration success 2009 */ 2010 QDF_STATUS dp_peer_state_update(struct cdp_soc_t *soc, uint8_t *peer_mac, 2011 enum ol_txrx_peer_state state); 2012 2013 /** 2014 * dp_get_vdevid() - Get virtual interface id which peer registered 2015 * @soc - datapath soc handle 2016 * @peer_mac - peer mac address 2017 * @vdev_id - virtual interface id which peer registered 2018 * 2019 * Get virtual interface id which peer registered 2020 * 2021 * Return: QDF_STATUS_SUCCESS registration success 2022 */ 2023 QDF_STATUS dp_get_vdevid(struct cdp_soc_t *soc_hdl, uint8_t *peer_mac, 2024 uint8_t *vdev_id); 2025 struct cdp_vdev *dp_get_vdev_by_peer_addr(struct cdp_pdev *pdev_handle, 2026 struct qdf_mac_addr peer_addr); 2027 struct cdp_vdev *dp_get_vdev_for_peer(void *peer); 2028 uint8_t *dp_peer_get_peer_mac_addr(void *peer); 2029 2030 /** 2031 * dp_get_peer_state() - Get local peer state 2032 * @soc - datapath soc handle 2033 * @vdev_id - vdev id 2034 * @peer_mac - peer mac addr 2035 * 2036 * Get local peer state 2037 * 2038 * Return: peer status 2039 */ 2040 int dp_get_peer_state(struct cdp_soc_t *soc, uint8_t vdev_id, 2041 uint8_t *peer_mac); 2042 void dp_local_peer_id_pool_init(struct dp_pdev *pdev); 2043 void dp_local_peer_id_alloc(struct dp_pdev *pdev, struct dp_peer *peer); 2044 void dp_local_peer_id_free(struct dp_pdev *pdev, struct dp_peer *peer); 2045 /** 2046 * dp_set_peer_as_tdls_peer() - set tdls peer flag to peer 2047 * @soc_hdl: datapath soc handle 2048 * @vdev_id: vdev_id 2049 * @peer_mac: peer mac addr 2050 * @val: tdls peer flag 2051 * 2052 * Return: none 2053 */ 2054 void dp_set_peer_as_tdls_peer(struct cdp_soc_t *soc_hdl, uint8_t vdev_id, 2055 uint8_t *peer_mac, bool val); 2056 #else 2057 /** 2058 * dp_get_vdevid() - Get virtual interface id which peer registered 2059 * @soc - datapath soc handle 2060 * @peer_mac - peer mac address 2061 * @vdev_id - virtual interface id which peer registered 2062 * 2063 * Get virtual interface id which peer registered 2064 * 2065 * Return: QDF_STATUS_SUCCESS registration success 2066 */ 2067 static inline 2068 QDF_STATUS dp_get_vdevid(struct cdp_soc_t *soc_hdl, uint8_t *peer_mac, 2069 uint8_t *vdev_id) 2070 { 2071 return QDF_STATUS_E_NOSUPPORT; 2072 } 2073 2074 static inline void dp_local_peer_id_pool_init(struct dp_pdev *pdev) 2075 { 2076 } 2077 2078 static inline 2079 void dp_local_peer_id_alloc(struct dp_pdev *pdev, struct dp_peer *peer) 2080 { 2081 } 2082 2083 static inline 2084 void dp_local_peer_id_free(struct dp_pdev *pdev, struct dp_peer *peer) 2085 { 2086 } 2087 2088 static inline 2089 void dp_set_peer_as_tdls_peer(struct cdp_soc_t *soc_hdl, uint8_t vdev_id, 2090 uint8_t *peer_mac, bool val) 2091 { 2092 } 2093 #endif 2094 2095 int dp_addba_resp_tx_completion_wifi3(struct cdp_soc_t *cdp_soc, 2096 uint8_t *peer_mac, uint16_t vdev_id, 2097 uint8_t tid, 2098 int status); 2099 int dp_addba_requestprocess_wifi3(struct cdp_soc_t *cdp_soc, 2100 uint8_t *peer_mac, uint16_t vdev_id, 2101 uint8_t dialogtoken, uint16_t tid, 2102 uint16_t batimeout, 2103 uint16_t buffersize, 2104 uint16_t startseqnum); 2105 QDF_STATUS dp_addba_responsesetup_wifi3(struct cdp_soc_t *cdp_soc, 2106 uint8_t *peer_mac, uint16_t vdev_id, 2107 uint8_t tid, uint8_t *dialogtoken, 2108 uint16_t *statuscode, 2109 uint16_t *buffersize, 2110 uint16_t *batimeout); 2111 QDF_STATUS dp_set_addba_response(struct cdp_soc_t *cdp_soc, 2112 uint8_t *peer_mac, 2113 uint16_t vdev_id, uint8_t tid, 2114 uint16_t statuscode); 2115 int dp_delba_process_wifi3(struct cdp_soc_t *cdp_soc, uint8_t *peer_mac, 2116 uint16_t vdev_id, int tid, 2117 uint16_t reasoncode); 2118 2119 /** 2120 * dp_rx_tid_update_ba_win_size() - Update the DP tid BA window size 2121 * @soc: soc handle 2122 * @peer_mac: mac address of peer handle 2123 * @vdev_id: id of vdev handle 2124 * @tid: tid 2125 * @buffersize: BA window size 2126 * 2127 * Return: success/failure of tid update 2128 */ 2129 QDF_STATUS dp_rx_tid_update_ba_win_size(struct cdp_soc_t *cdp_soc, 2130 uint8_t *peer_mac, uint16_t vdev_id, 2131 uint8_t tid, uint16_t buffersize); 2132 2133 /* 2134 * dp_delba_tx_completion_wifi3() - Handle delba tx completion 2135 * 2136 * @cdp_soc: soc handle 2137 * @vdev_id: id of the vdev handle 2138 * @peer_mac: peer mac address 2139 * @tid: Tid number 2140 * @status: Tx completion status 2141 * Indicate status of delba Tx to DP for stats update and retry 2142 * delba if tx failed. 2143 * 2144 */ 2145 int dp_delba_tx_completion_wifi3(struct cdp_soc_t *cdp_soc, uint8_t *peer_mac, 2146 uint16_t vdev_id, uint8_t tid, 2147 int status); 2148 extern QDF_STATUS dp_rx_tid_setup_wifi3(struct dp_peer *peer, int tid, 2149 uint32_t ba_window_size, 2150 uint32_t start_seq); 2151 2152 extern QDF_STATUS dp_reo_send_cmd(struct dp_soc *soc, 2153 enum hal_reo_cmd_type type, struct hal_reo_cmd_params *params, 2154 void (*callback_fn), void *data); 2155 2156 extern void dp_reo_cmdlist_destroy(struct dp_soc *soc); 2157 2158 /** 2159 * dp_reo_status_ring_handler - Handler for REO Status ring 2160 * @int_ctx: pointer to DP interrupt context 2161 * @soc: DP Soc handle 2162 * 2163 * Returns: Number of descriptors reaped 2164 */ 2165 uint32_t dp_reo_status_ring_handler(struct dp_intr *int_ctx, 2166 struct dp_soc *soc); 2167 void dp_aggregate_vdev_stats(struct dp_vdev *vdev, 2168 struct cdp_vdev_stats *vdev_stats); 2169 void dp_rx_tid_stats_cb(struct dp_soc *soc, void *cb_ctxt, 2170 union hal_reo_status *reo_status); 2171 void dp_rx_bar_stats_cb(struct dp_soc *soc, void *cb_ctxt, 2172 union hal_reo_status *reo_status); 2173 uint16_t dp_tx_me_send_convert_ucast(struct cdp_soc_t *soc, uint8_t vdev_id, 2174 qdf_nbuf_t nbuf, 2175 uint8_t newmac[][QDF_MAC_ADDR_SIZE], 2176 uint8_t new_mac_cnt, uint8_t tid, 2177 bool is_igmp, bool is_dms_pkt); 2178 void dp_tx_me_alloc_descriptor(struct cdp_soc_t *soc, uint8_t pdev_id); 2179 2180 void dp_tx_me_free_descriptor(struct cdp_soc_t *soc, uint8_t pdev_id); 2181 QDF_STATUS dp_h2t_ext_stats_msg_send(struct dp_pdev *pdev, 2182 uint32_t stats_type_upload_mask, uint32_t config_param_0, 2183 uint32_t config_param_1, uint32_t config_param_2, 2184 uint32_t config_param_3, int cookie, int cookie_msb, 2185 uint8_t mac_id); 2186 void dp_htt_stats_print_tag(struct dp_pdev *pdev, 2187 uint8_t tag_type, uint32_t *tag_buf); 2188 void dp_htt_stats_copy_tag(struct dp_pdev *pdev, uint8_t tag_type, uint32_t *tag_buf); 2189 QDF_STATUS dp_h2t_3tuple_config_send(struct dp_pdev *pdev, uint32_t tuple_mask, 2190 uint8_t mac_id); 2191 /** 2192 * dp_rxtid_stats_cmd_cb - function pointer for peer 2193 * rx tid stats cmd call_back 2194 */ 2195 typedef void (*dp_rxtid_stats_cmd_cb)(struct dp_soc *soc, void *cb_ctxt, 2196 union hal_reo_status *reo_status); 2197 int dp_peer_rxtid_stats(struct dp_peer *peer, 2198 dp_rxtid_stats_cmd_cb dp_stats_cmd_cb, 2199 void *cb_ctxt); 2200 #ifdef IPA_OFFLOAD 2201 void dp_peer_update_tid_stats_from_reo(struct dp_soc *soc, void *cb_ctxt, 2202 union hal_reo_status *reo_status); 2203 int dp_peer_get_rxtid_stats_ipa(struct dp_peer *peer, 2204 dp_rxtid_stats_cmd_cb dp_stats_cmd_cb); 2205 #endif 2206 QDF_STATUS 2207 dp_set_pn_check_wifi3(struct cdp_soc_t *soc, uint8_t vdev_id, 2208 uint8_t *peer_mac, enum cdp_sec_type sec_type, 2209 uint32_t *rx_pn); 2210 2211 QDF_STATUS 2212 dp_set_key_sec_type_wifi3(struct cdp_soc_t *soc, uint8_t vdev_id, 2213 uint8_t *peer_mac, enum cdp_sec_type sec_type, 2214 bool is_unicast); 2215 2216 void *dp_get_pdev_for_mac_id(struct dp_soc *soc, uint32_t mac_id); 2217 2218 QDF_STATUS 2219 dp_set_michael_key(struct cdp_soc_t *soc, uint8_t vdev_id, 2220 uint8_t *peer_mac, 2221 bool is_unicast, uint32_t *key); 2222 2223 /** 2224 * dp_check_pdev_exists() - Validate pdev before use 2225 * @soc - dp soc handle 2226 * @data - pdev handle 2227 * 2228 * Return: 0 - success/invalid - failure 2229 */ 2230 bool dp_check_pdev_exists(struct dp_soc *soc, struct dp_pdev *data); 2231 2232 /** 2233 * dp_update_delay_stats() - Update delay statistics in structure 2234 * and fill min, max and avg delay 2235 * @tstats: tid tx stats 2236 * @rstats: tid rx stats 2237 * @delay: delay in ms 2238 * @tid: tid value 2239 * @mode: type of tx delay mode 2240 * @ring id: ring number 2241 * @delay_in_us: flag to indicate whether the delay is in ms or us 2242 * 2243 * Return: none 2244 */ 2245 void dp_update_delay_stats(struct cdp_tid_tx_stats *tstats, 2246 struct cdp_tid_rx_stats *rstats, uint32_t delay, 2247 uint8_t tid, uint8_t mode, uint8_t ring_id, 2248 bool delay_in_us); 2249 2250 /** 2251 * dp_print_ring_stats(): Print tail and head pointer 2252 * @pdev: DP_PDEV handle 2253 * 2254 * Return:void 2255 */ 2256 void dp_print_ring_stats(struct dp_pdev *pdev); 2257 2258 /** 2259 * dp_print_pdev_cfg_params() - Print the pdev cfg parameters 2260 * @pdev_handle: DP pdev handle 2261 * 2262 * Return - void 2263 */ 2264 void dp_print_pdev_cfg_params(struct dp_pdev *pdev); 2265 2266 /** 2267 * dp_print_soc_cfg_params()- Dump soc wlan config parameters 2268 * @soc_handle: Soc handle 2269 * 2270 * Return: void 2271 */ 2272 void dp_print_soc_cfg_params(struct dp_soc *soc); 2273 2274 /** 2275 * dp_srng_get_str_from_ring_type() - Return string name for a ring 2276 * @ring_type: Ring 2277 * 2278 * Return: char const pointer 2279 */ 2280 const 2281 char *dp_srng_get_str_from_hal_ring_type(enum hal_ring_type ring_type); 2282 2283 /* 2284 * dp_txrx_path_stats() - Function to display dump stats 2285 * @soc - soc handle 2286 * 2287 * return: none 2288 */ 2289 void dp_txrx_path_stats(struct dp_soc *soc); 2290 2291 /* 2292 * dp_print_per_ring_stats(): Packet count per ring 2293 * @soc - soc handle 2294 * 2295 * Return - None 2296 */ 2297 void dp_print_per_ring_stats(struct dp_soc *soc); 2298 2299 /** 2300 * dp_aggregate_pdev_stats(): Consolidate stats at PDEV level 2301 * @pdev: DP PDEV handle 2302 * 2303 * return: void 2304 */ 2305 void dp_aggregate_pdev_stats(struct dp_pdev *pdev); 2306 2307 /** 2308 * dp_print_rx_rates(): Print Rx rate stats 2309 * @vdev: DP_VDEV handle 2310 * 2311 * Return:void 2312 */ 2313 void dp_print_rx_rates(struct dp_vdev *vdev); 2314 2315 /** 2316 * dp_print_tx_rates(): Print tx rates 2317 * @vdev: DP_VDEV handle 2318 * 2319 * Return:void 2320 */ 2321 void dp_print_tx_rates(struct dp_vdev *vdev); 2322 2323 /** 2324 * dp_print_peer_stats():print peer stats 2325 * @peer: DP_PEER handle 2326 * @peer_stats: buffer holding peer stats 2327 * 2328 * return void 2329 */ 2330 void dp_print_peer_stats(struct dp_peer *peer, 2331 struct cdp_peer_stats *peer_stats); 2332 2333 /** 2334 * dp_print_pdev_tx_stats(): Print Pdev level TX stats 2335 * @pdev: DP_PDEV Handle 2336 * 2337 * Return:void 2338 */ 2339 void 2340 dp_print_pdev_tx_stats(struct dp_pdev *pdev); 2341 2342 /** 2343 * dp_print_pdev_rx_stats(): Print Pdev level RX stats 2344 * @pdev: DP_PDEV Handle 2345 * 2346 * Return: void 2347 */ 2348 void 2349 dp_print_pdev_rx_stats(struct dp_pdev *pdev); 2350 2351 /** 2352 * dp_print_soc_tx_stats(): Print SOC level stats 2353 * @soc DP_SOC Handle 2354 * 2355 * Return: void 2356 */ 2357 void dp_print_soc_tx_stats(struct dp_soc *soc); 2358 2359 /** 2360 * dp_print_soc_interrupt_stats() - Print interrupt stats for the soc 2361 * @soc: dp_soc handle 2362 * 2363 * Return: None 2364 */ 2365 void dp_print_soc_interrupt_stats(struct dp_soc *soc); 2366 2367 #ifdef WLAN_DP_SRNG_USAGE_WM_TRACKING 2368 /** 2369 * dp_dump_srng_high_wm_stats() - Print the ring usage high watermark stats 2370 * for all SRNGs 2371 * @soc: DP soc handle 2372 * @srng_mask: SRNGs mask for dumping usage watermark stats 2373 * 2374 * Return: None 2375 */ 2376 void dp_dump_srng_high_wm_stats(struct dp_soc *soc, uint64_t srng_mask); 2377 #else 2378 /** 2379 * dp_dump_srng_high_wm_stats() - Print the ring usage high watermark stats 2380 * for all SRNGs 2381 * @soc: DP soc handle 2382 * @srng_mask: SRNGs mask for dumping usage watermark stats 2383 * 2384 * Return: None 2385 */ 2386 static inline 2387 void dp_dump_srng_high_wm_stats(struct dp_soc *soc, uint64_t srng_mask) 2388 { 2389 } 2390 #endif 2391 2392 /** 2393 * dp_print_soc_rx_stats: Print SOC level Rx stats 2394 * @soc: DP_SOC Handle 2395 * 2396 * Return:void 2397 */ 2398 void dp_print_soc_rx_stats(struct dp_soc *soc); 2399 2400 /** 2401 * dp_get_mac_id_for_pdev() - Return mac corresponding to pdev for mac 2402 * 2403 * @mac_id: MAC id 2404 * @pdev_id: pdev_id corresponding to pdev, 0 for MCL 2405 * 2406 * Single pdev using both MACs will operate on both MAC rings, 2407 * which is the case for MCL. 2408 * For WIN each PDEV will operate one ring, so index is zero. 2409 * 2410 */ 2411 static inline int dp_get_mac_id_for_pdev(uint32_t mac_id, uint32_t pdev_id) 2412 { 2413 if (mac_id && pdev_id) { 2414 qdf_print("Both mac_id and pdev_id cannot be non zero"); 2415 QDF_BUG(0); 2416 return 0; 2417 } 2418 return (mac_id + pdev_id); 2419 } 2420 2421 /** 2422 * dp_get_lmac_id_for_pdev_id() - Return lmac id corresponding to host pdev id 2423 * @soc: soc pointer 2424 * @mac_id: MAC id 2425 * @pdev_id: pdev_id corresponding to pdev, 0 for MCL 2426 * 2427 * For MCL, Single pdev using both MACs will operate on both MAC rings. 2428 * 2429 * For WIN, each PDEV will operate one ring. 2430 * 2431 */ 2432 static inline int 2433 dp_get_lmac_id_for_pdev_id 2434 (struct dp_soc *soc, uint32_t mac_id, uint32_t pdev_id) 2435 { 2436 if (!wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx)) { 2437 if (mac_id && pdev_id) { 2438 qdf_print("Both mac_id and pdev_id cannot be non zero"); 2439 QDF_BUG(0); 2440 return 0; 2441 } 2442 return (mac_id + pdev_id); 2443 } 2444 2445 return soc->pdev_list[pdev_id]->lmac_id; 2446 } 2447 2448 /** 2449 * dp_get_pdev_for_lmac_id() - Return pdev pointer corresponding to lmac id 2450 * @soc: soc pointer 2451 * @lmac_id: LMAC id 2452 * 2453 * For MCL, Single pdev exists 2454 * 2455 * For WIN, each PDEV will operate one ring. 2456 * 2457 */ 2458 static inline struct dp_pdev * 2459 dp_get_pdev_for_lmac_id(struct dp_soc *soc, uint32_t lmac_id) 2460 { 2461 uint8_t i = 0; 2462 2463 if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx)) { 2464 i = wlan_cfg_get_pdev_idx(soc->wlan_cfg_ctx, lmac_id); 2465 return ((i < MAX_PDEV_CNT) ? soc->pdev_list[i] : NULL); 2466 } 2467 2468 /* Typically for MCL as there only 1 PDEV*/ 2469 return soc->pdev_list[0]; 2470 } 2471 2472 /** 2473 * dp_calculate_target_pdev_id_from_host_pdev_id() - Return target pdev 2474 * corresponding to host pdev id 2475 * @soc: soc pointer 2476 * @mac_for_pdev: pdev_id corresponding to host pdev for WIN, mac id for MCL 2477 * 2478 * returns target pdev_id for host pdev id. For WIN, this is derived through 2479 * a two step process: 2480 * 1. Get lmac_id corresponding to host pdev_id (lmac_id can change 2481 * during mode switch) 2482 * 2. Get target pdev_id (set up during WMI ready) from lmac_id 2483 * 2484 * For MCL, return the offset-1 translated mac_id 2485 */ 2486 static inline int 2487 dp_calculate_target_pdev_id_from_host_pdev_id 2488 (struct dp_soc *soc, uint32_t mac_for_pdev) 2489 { 2490 struct dp_pdev *pdev; 2491 2492 if (!wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx)) 2493 return DP_SW2HW_MACID(mac_for_pdev); 2494 2495 pdev = soc->pdev_list[mac_for_pdev]; 2496 2497 /*non-MCL case, get original target_pdev mapping*/ 2498 return wlan_cfg_get_target_pdev_id(soc->wlan_cfg_ctx, pdev->lmac_id); 2499 } 2500 2501 /** 2502 * dp_get_target_pdev_id_for_host_pdev_id() - Return target pdev corresponding 2503 * to host pdev id 2504 * @soc: soc pointer 2505 * @mac_for_pdev: pdev_id corresponding to host pdev for WIN, mac id for MCL 2506 * 2507 * returns target pdev_id for host pdev id. 2508 * For WIN, return the value stored in pdev object. 2509 * For MCL, return the offset-1 translated mac_id. 2510 */ 2511 static inline int 2512 dp_get_target_pdev_id_for_host_pdev_id 2513 (struct dp_soc *soc, uint32_t mac_for_pdev) 2514 { 2515 struct dp_pdev *pdev; 2516 2517 if (!wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx)) 2518 return DP_SW2HW_MACID(mac_for_pdev); 2519 2520 pdev = soc->pdev_list[mac_for_pdev]; 2521 2522 return pdev->target_pdev_id; 2523 } 2524 2525 /** 2526 * dp_get_host_pdev_id_for_target_pdev_id() - Return host pdev corresponding 2527 * to target pdev id 2528 * @soc: soc pointer 2529 * @pdev_id: pdev_id corresponding to target pdev 2530 * 2531 * returns host pdev_id for target pdev id. For WIN, this is derived through 2532 * a two step process: 2533 * 1. Get lmac_id corresponding to target pdev_id 2534 * 2. Get host pdev_id (set up during WMI ready) from lmac_id 2535 * 2536 * For MCL, return the 0-offset pdev_id 2537 */ 2538 static inline int 2539 dp_get_host_pdev_id_for_target_pdev_id 2540 (struct dp_soc *soc, uint32_t pdev_id) 2541 { 2542 struct dp_pdev *pdev; 2543 int lmac_id; 2544 2545 if (!wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx)) 2546 return DP_HW2SW_MACID(pdev_id); 2547 2548 /*non-MCL case, get original target_lmac mapping from target pdev*/ 2549 lmac_id = wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, 2550 DP_HW2SW_MACID(pdev_id)); 2551 2552 /*Get host pdev from lmac*/ 2553 pdev = dp_get_pdev_for_lmac_id(soc, lmac_id); 2554 2555 return pdev ? pdev->pdev_id : INVALID_PDEV_ID; 2556 } 2557 2558 /* 2559 * dp_get_mac_id_for_mac() - Return mac corresponding WIN and MCL mac_ids 2560 * 2561 * @soc: handle to DP soc 2562 * @mac_id: MAC id 2563 * 2564 * Single pdev using both MACs will operate on both MAC rings, 2565 * which is the case for MCL. 2566 * For WIN each PDEV will operate one ring, so index is zero. 2567 * 2568 */ 2569 static inline int dp_get_mac_id_for_mac(struct dp_soc *soc, uint32_t mac_id) 2570 { 2571 /* 2572 * Single pdev using both MACs will operate on both MAC rings, 2573 * which is the case for MCL. 2574 */ 2575 if (!wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx)) 2576 return mac_id; 2577 2578 /* For WIN each PDEV will operate one ring, so index is zero. */ 2579 return 0; 2580 } 2581 2582 /* 2583 * dp_is_subtype_data() - check if the frame subtype is data 2584 * 2585 * @frame_ctrl: Frame control field 2586 * 2587 * check the frame control field and verify if the packet 2588 * is a data packet. 2589 * 2590 * Return: true or false 2591 */ 2592 static inline bool dp_is_subtype_data(uint16_t frame_ctrl) 2593 { 2594 if (((qdf_cpu_to_le16(frame_ctrl) & QDF_IEEE80211_FC0_TYPE_MASK) == 2595 QDF_IEEE80211_FC0_TYPE_DATA) && 2596 (((qdf_cpu_to_le16(frame_ctrl) & QDF_IEEE80211_FC0_SUBTYPE_MASK) == 2597 QDF_IEEE80211_FC0_SUBTYPE_DATA) || 2598 ((qdf_cpu_to_le16(frame_ctrl) & QDF_IEEE80211_FC0_SUBTYPE_MASK) == 2599 QDF_IEEE80211_FC0_SUBTYPE_QOS))) { 2600 return true; 2601 } 2602 2603 return false; 2604 } 2605 2606 #ifdef WDI_EVENT_ENABLE 2607 QDF_STATUS dp_h2t_cfg_stats_msg_send(struct dp_pdev *pdev, 2608 uint32_t stats_type_upload_mask, 2609 uint8_t mac_id); 2610 2611 int dp_wdi_event_unsub(struct cdp_soc_t *soc, uint8_t pdev_id, 2612 wdi_event_subscribe *event_cb_sub_handle, 2613 uint32_t event); 2614 2615 int dp_wdi_event_sub(struct cdp_soc_t *soc, uint8_t pdev_id, 2616 wdi_event_subscribe *event_cb_sub_handle, 2617 uint32_t event); 2618 2619 void dp_wdi_event_handler(enum WDI_EVENT event, struct dp_soc *soc, 2620 void *data, u_int16_t peer_id, 2621 int status, u_int8_t pdev_id); 2622 2623 int dp_wdi_event_attach(struct dp_pdev *txrx_pdev); 2624 int dp_wdi_event_detach(struct dp_pdev *txrx_pdev); 2625 2626 static inline void 2627 dp_hif_update_pipe_callback(struct dp_soc *dp_soc, 2628 void *cb_context, 2629 QDF_STATUS (*callback)(void *, qdf_nbuf_t, uint8_t), 2630 uint8_t pipe_id) 2631 { 2632 struct hif_msg_callbacks hif_pipe_callbacks; 2633 2634 /* TODO: Temporary change to bypass HTC connection for this new 2635 * HIF pipe, which will be used for packet log and other high- 2636 * priority HTT messages. Proper HTC connection to be added 2637 * later once required FW changes are available 2638 */ 2639 hif_pipe_callbacks.rxCompletionHandler = callback; 2640 hif_pipe_callbacks.Context = cb_context; 2641 hif_update_pipe_callback(dp_soc->hif_handle, 2642 DP_HTT_T2H_HP_PIPE, &hif_pipe_callbacks); 2643 } 2644 #else 2645 static inline int dp_wdi_event_unsub(struct cdp_soc_t *soc, uint8_t pdev_id, 2646 wdi_event_subscribe *event_cb_sub_handle, 2647 uint32_t event) 2648 { 2649 return 0; 2650 } 2651 2652 static inline int dp_wdi_event_sub(struct cdp_soc_t *soc, uint8_t pdev_id, 2653 wdi_event_subscribe *event_cb_sub_handle, 2654 uint32_t event) 2655 { 2656 return 0; 2657 } 2658 2659 static inline 2660 void dp_wdi_event_handler(enum WDI_EVENT event, 2661 struct dp_soc *soc, 2662 void *data, u_int16_t peer_id, 2663 int status, u_int8_t pdev_id) 2664 { 2665 } 2666 2667 static inline int dp_wdi_event_attach(struct dp_pdev *txrx_pdev) 2668 { 2669 return 0; 2670 } 2671 2672 static inline int dp_wdi_event_detach(struct dp_pdev *txrx_pdev) 2673 { 2674 return 0; 2675 } 2676 2677 static inline QDF_STATUS dp_h2t_cfg_stats_msg_send(struct dp_pdev *pdev, 2678 uint32_t stats_type_upload_mask, uint8_t mac_id) 2679 { 2680 return 0; 2681 } 2682 2683 static inline void 2684 dp_hif_update_pipe_callback(struct dp_soc *dp_soc, void *cb_context, 2685 QDF_STATUS (*callback)(void *, qdf_nbuf_t, uint8_t), 2686 uint8_t pipe_id) 2687 { 2688 } 2689 #endif /* CONFIG_WIN */ 2690 2691 #ifdef VDEV_PEER_PROTOCOL_COUNT 2692 /** 2693 * dp_vdev_peer_stats_update_protocol_cnt() - update per-peer protocol counters 2694 * @vdev: VDEV DP object 2695 * @nbuf: data packet 2696 * @peer: DP TXRX Peer object 2697 * @is_egress: whether egress or ingress 2698 * @is_rx: whether rx or tx 2699 * 2700 * This function updates the per-peer protocol counters 2701 * Return: void 2702 */ 2703 void dp_vdev_peer_stats_update_protocol_cnt(struct dp_vdev *vdev, 2704 qdf_nbuf_t nbuf, 2705 struct dp_txrx_peer *txrx_peer, 2706 bool is_egress, 2707 bool is_rx); 2708 2709 /** 2710 * dp_vdev_peer_stats_update_protocol_cnt() - update per-peer protocol counters 2711 * @soc: SOC DP object 2712 * @vdev_id: vdev_id 2713 * @nbuf: data packet 2714 * @is_egress: whether egress or ingress 2715 * @is_rx: whether rx or tx 2716 * 2717 * This function updates the per-peer protocol counters 2718 * Return: void 2719 */ 2720 2721 void dp_peer_stats_update_protocol_cnt(struct cdp_soc_t *soc, 2722 int8_t vdev_id, 2723 qdf_nbuf_t nbuf, 2724 bool is_egress, 2725 bool is_rx); 2726 2727 void dp_vdev_peer_stats_update_protocol_cnt_tx(struct dp_vdev *vdev_hdl, 2728 qdf_nbuf_t nbuf); 2729 2730 #else 2731 #define dp_vdev_peer_stats_update_protocol_cnt(vdev, nbuf, txrx_peer, \ 2732 is_egress, is_rx) 2733 2734 static inline 2735 void dp_vdev_peer_stats_update_protocol_cnt_tx(struct dp_vdev *vdev_hdl, 2736 qdf_nbuf_t nbuf) 2737 { 2738 } 2739 2740 #endif 2741 2742 #ifdef QCA_LL_TX_FLOW_CONTROL_V2 2743 void dp_tx_dump_flow_pool_info(struct cdp_soc_t *soc_hdl); 2744 2745 /** 2746 * dp_tx_dump_flow_pool_info_compact() - dump flow pool info 2747 * @soc: DP soc context 2748 * 2749 * Return: none 2750 */ 2751 void dp_tx_dump_flow_pool_info_compact(struct dp_soc *soc); 2752 int dp_tx_delete_flow_pool(struct dp_soc *soc, struct dp_tx_desc_pool_s *pool, 2753 bool force); 2754 #else 2755 static inline void dp_tx_dump_flow_pool_info_compact(struct dp_soc *soc) 2756 { 2757 } 2758 #endif /* QCA_LL_TX_FLOW_CONTROL_V2 */ 2759 2760 #ifdef QCA_OL_DP_SRNG_LOCK_LESS_ACCESS 2761 static inline int 2762 dp_hal_srng_access_start(hal_soc_handle_t soc, hal_ring_handle_t hal_ring_hdl) 2763 { 2764 return hal_srng_access_start_unlocked(soc, hal_ring_hdl); 2765 } 2766 2767 static inline void 2768 dp_hal_srng_access_end(hal_soc_handle_t soc, hal_ring_handle_t hal_ring_hdl) 2769 { 2770 hal_srng_access_end_unlocked(soc, hal_ring_hdl); 2771 } 2772 2773 #else 2774 static inline int 2775 dp_hal_srng_access_start(hal_soc_handle_t soc, hal_ring_handle_t hal_ring_hdl) 2776 { 2777 return hal_srng_access_start(soc, hal_ring_hdl); 2778 } 2779 2780 static inline void 2781 dp_hal_srng_access_end(hal_soc_handle_t soc, hal_ring_handle_t hal_ring_hdl) 2782 { 2783 hal_srng_access_end(soc, hal_ring_hdl); 2784 } 2785 #endif 2786 2787 #ifdef WLAN_FEATURE_DP_EVENT_HISTORY 2788 /** 2789 * dp_srng_access_start() - Wrapper function to log access start of a hal ring 2790 * @int_ctx: pointer to DP interrupt context. This should not be NULL 2791 * @soc: DP Soc handle 2792 * @hal_ring: opaque pointer to the HAL Rx Error Ring, which will be serviced 2793 * 2794 * Return: 0 on success; error on failure 2795 */ 2796 int dp_srng_access_start(struct dp_intr *int_ctx, struct dp_soc *dp_soc, 2797 hal_ring_handle_t hal_ring_hdl); 2798 2799 /** 2800 * dp_srng_access_end() - Wrapper function to log access end of a hal ring 2801 * @int_ctx: pointer to DP interrupt context. This should not be NULL 2802 * @soc: DP Soc handle 2803 * @hal_ring: opaque pointer to the HAL Rx Error Ring, which will be serviced 2804 * 2805 * Return: void 2806 */ 2807 void dp_srng_access_end(struct dp_intr *int_ctx, struct dp_soc *dp_soc, 2808 hal_ring_handle_t hal_ring_hdl); 2809 2810 #else 2811 static inline int dp_srng_access_start(struct dp_intr *int_ctx, 2812 struct dp_soc *dp_soc, 2813 hal_ring_handle_t hal_ring_hdl) 2814 { 2815 hal_soc_handle_t hal_soc = dp_soc->hal_soc; 2816 2817 return dp_hal_srng_access_start(hal_soc, hal_ring_hdl); 2818 } 2819 2820 static inline void dp_srng_access_end(struct dp_intr *int_ctx, 2821 struct dp_soc *dp_soc, 2822 hal_ring_handle_t hal_ring_hdl) 2823 { 2824 hal_soc_handle_t hal_soc = dp_soc->hal_soc; 2825 2826 return dp_hal_srng_access_end(hal_soc, hal_ring_hdl); 2827 } 2828 #endif /* WLAN_FEATURE_DP_EVENT_HISTORY */ 2829 2830 #ifdef QCA_CACHED_RING_DESC 2831 /** 2832 * dp_srng_dst_get_next() - Wrapper function to get next ring desc 2833 * @dp_socsoc: DP Soc handle 2834 * @hal_ring: opaque pointer to the HAL Destination Ring 2835 * 2836 * Return: HAL ring descriptor 2837 */ 2838 static inline void *dp_srng_dst_get_next(struct dp_soc *dp_soc, 2839 hal_ring_handle_t hal_ring_hdl) 2840 { 2841 hal_soc_handle_t hal_soc = dp_soc->hal_soc; 2842 2843 return hal_srng_dst_get_next_cached(hal_soc, hal_ring_hdl); 2844 } 2845 2846 /** 2847 * dp_srng_dst_inv_cached_descs() - Wrapper function to invalidate cached 2848 * descriptors 2849 * @dp_socsoc: DP Soc handle 2850 * @hal_ring: opaque pointer to the HAL Rx Destination ring 2851 * @num_entries: Entry count 2852 * 2853 * Return: None 2854 */ 2855 static inline void dp_srng_dst_inv_cached_descs(struct dp_soc *dp_soc, 2856 hal_ring_handle_t hal_ring_hdl, 2857 uint32_t num_entries) 2858 { 2859 hal_soc_handle_t hal_soc = dp_soc->hal_soc; 2860 2861 hal_srng_dst_inv_cached_descs(hal_soc, hal_ring_hdl, num_entries); 2862 } 2863 #else 2864 static inline void *dp_srng_dst_get_next(struct dp_soc *dp_soc, 2865 hal_ring_handle_t hal_ring_hdl) 2866 { 2867 hal_soc_handle_t hal_soc = dp_soc->hal_soc; 2868 2869 return hal_srng_dst_get_next(hal_soc, hal_ring_hdl); 2870 } 2871 2872 static inline void dp_srng_dst_inv_cached_descs(struct dp_soc *dp_soc, 2873 hal_ring_handle_t hal_ring_hdl, 2874 uint32_t num_entries) 2875 { 2876 } 2877 #endif /* QCA_CACHED_RING_DESC */ 2878 2879 #if defined(QCA_CACHED_RING_DESC) && \ 2880 (defined(QCA_DP_RX_HW_SW_NBUF_DESC_PREFETCH) || \ 2881 defined(QCA_DP_TX_HW_SW_NBUF_DESC_PREFETCH)) 2882 /** 2883 * dp_srng_dst_prefetch() - Wrapper function to prefetch descs from dest ring 2884 * @hal_soc_hdl: HAL SOC handle 2885 * @hal_ring: opaque pointer to the HAL Rx Destination ring 2886 * @num_entries: Entry count 2887 * 2888 * Return: None 2889 */ 2890 static inline void *dp_srng_dst_prefetch(hal_soc_handle_t hal_soc, 2891 hal_ring_handle_t hal_ring_hdl, 2892 uint32_t num_entries) 2893 { 2894 return hal_srng_dst_prefetch(hal_soc, hal_ring_hdl, num_entries); 2895 } 2896 #else 2897 static inline void *dp_srng_dst_prefetch(hal_soc_handle_t hal_soc, 2898 hal_ring_handle_t hal_ring_hdl, 2899 uint32_t num_entries) 2900 { 2901 return NULL; 2902 } 2903 #endif 2904 2905 #ifdef QCA_ENH_V3_STATS_SUPPORT 2906 /** 2907 * dp_pdev_print_delay_stats(): Print pdev level delay stats 2908 * @pdev: DP_PDEV handle 2909 * 2910 * Return:void 2911 */ 2912 void dp_pdev_print_delay_stats(struct dp_pdev *pdev); 2913 2914 /** 2915 * dp_pdev_print_tid_stats(): Print pdev level tid stats 2916 * @pdev: DP_PDEV handle 2917 * 2918 * Return:void 2919 */ 2920 void dp_pdev_print_tid_stats(struct dp_pdev *pdev); 2921 2922 /** 2923 * dp_pdev_print_rx_error_stats(): Print pdev level rx error stats 2924 * @pdev: DP_PDEV handle 2925 * 2926 * Return:void 2927 */ 2928 void dp_pdev_print_rx_error_stats(struct dp_pdev *pdev); 2929 #endif /* QCA_ENH_V3_STATS_SUPPORT */ 2930 2931 /** 2932 * dp_pdev_get_tid_stats(): Get accumulated pdev level tid_stats 2933 * @soc_hdl: soc handle 2934 * @pdev_id: id of dp_pdev handle 2935 * @tid_stats: Pointer for cdp_tid_stats_intf 2936 * 2937 * Return: QDF_STATUS_SUCCESS or QDF_STATUS_E_INVAL 2938 */ 2939 QDF_STATUS dp_pdev_get_tid_stats(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, 2940 struct cdp_tid_stats_intf *tid_stats); 2941 2942 void dp_soc_set_txrx_ring_map(struct dp_soc *soc); 2943 2944 /** 2945 * dp_vdev_to_cdp_vdev() - typecast dp vdev to cdp vdev 2946 * @vdev: DP vdev handle 2947 * 2948 * Return: struct cdp_vdev pointer 2949 */ 2950 static inline 2951 struct cdp_vdev *dp_vdev_to_cdp_vdev(struct dp_vdev *vdev) 2952 { 2953 return (struct cdp_vdev *)vdev; 2954 } 2955 2956 /** 2957 * dp_pdev_to_cdp_pdev() - typecast dp pdev to cdp pdev 2958 * @pdev: DP pdev handle 2959 * 2960 * Return: struct cdp_pdev pointer 2961 */ 2962 static inline 2963 struct cdp_pdev *dp_pdev_to_cdp_pdev(struct dp_pdev *pdev) 2964 { 2965 return (struct cdp_pdev *)pdev; 2966 } 2967 2968 /** 2969 * dp_soc_to_cdp_soc() - typecast dp psoc to cdp psoc 2970 * @psoc: DP psoc handle 2971 * 2972 * Return: struct cdp_soc pointer 2973 */ 2974 static inline 2975 struct cdp_soc *dp_soc_to_cdp_soc(struct dp_soc *psoc) 2976 { 2977 return (struct cdp_soc *)psoc; 2978 } 2979 2980 /** 2981 * dp_soc_to_cdp_soc_t() - typecast dp psoc to 2982 * ol txrx soc handle 2983 * @psoc: DP psoc handle 2984 * 2985 * Return: struct cdp_soc_t pointer 2986 */ 2987 static inline 2988 struct cdp_soc_t *dp_soc_to_cdp_soc_t(struct dp_soc *psoc) 2989 { 2990 return (struct cdp_soc_t *)psoc; 2991 } 2992 2993 #if defined(WLAN_SUPPORT_RX_FLOW_TAG) || defined(WLAN_SUPPORT_RX_FISA) 2994 /** 2995 * dp_rx_flow_update_fse_stats() - Update a flow's statistics 2996 * @pdev: pdev handle 2997 * @flow_id: flow index (truncated hash) in the Rx FST 2998 * 2999 * Return: Success when flow statistcs is updated, error on failure 3000 */ 3001 QDF_STATUS dp_rx_flow_get_fse_stats(struct dp_pdev *pdev, 3002 struct cdp_rx_flow_info *rx_flow_info, 3003 struct cdp_flow_stats *stats); 3004 3005 /** 3006 * dp_rx_flow_delete_entry() - Delete a flow entry from flow search table 3007 * @pdev: pdev handle 3008 * @rx_flow_info: DP flow parameters 3009 * 3010 * Return: Success when flow is deleted, error on failure 3011 */ 3012 QDF_STATUS dp_rx_flow_delete_entry(struct dp_pdev *pdev, 3013 struct cdp_rx_flow_info *rx_flow_info); 3014 3015 /** 3016 * dp_rx_flow_add_entry() - Add a flow entry to flow search table 3017 * @pdev: DP pdev instance 3018 * @rx_flow_info: DP flow paramaters 3019 * 3020 * Return: Success when flow is added, no-memory or already exists on error 3021 */ 3022 QDF_STATUS dp_rx_flow_add_entry(struct dp_pdev *pdev, 3023 struct cdp_rx_flow_info *rx_flow_info); 3024 3025 /** 3026 * dp_rx_fst_attach() - Initialize Rx FST and setup necessary parameters 3027 * @soc: SoC handle 3028 * @pdev: Pdev handle 3029 * 3030 * Return: Handle to flow search table entry 3031 */ 3032 QDF_STATUS dp_rx_fst_attach(struct dp_soc *soc, struct dp_pdev *pdev); 3033 3034 /** 3035 * dp_rx_fst_detach() - De-initialize Rx FST 3036 * @soc: SoC handle 3037 * @pdev: Pdev handle 3038 * 3039 * Return: None 3040 */ 3041 void dp_rx_fst_detach(struct dp_soc *soc, struct dp_pdev *pdev); 3042 3043 /** 3044 * dp_rx_flow_send_fst_fw_setup() - Program FST parameters in FW/HW post-attach 3045 * @soc: SoC handle 3046 * @pdev: Pdev handle 3047 * 3048 * Return: Success when fst parameters are programmed in FW, error otherwise 3049 */ 3050 QDF_STATUS dp_rx_flow_send_fst_fw_setup(struct dp_soc *soc, 3051 struct dp_pdev *pdev); 3052 3053 /** dp_mon_rx_update_rx_flow_tag_stats() - Update a mon flow's statistics 3054 * @pdev: pdev handle 3055 * @flow_id: flow index (truncated hash) in the Rx FST 3056 * 3057 * Return: Success when flow statistcs is updated, error on failure 3058 */ 3059 QDF_STATUS 3060 dp_mon_rx_update_rx_flow_tag_stats(struct dp_pdev *pdev, uint32_t flow_id); 3061 3062 #else /* !((WLAN_SUPPORT_RX_FLOW_TAG) || defined(WLAN_SUPPORT_RX_FISA)) */ 3063 3064 /** 3065 * dp_rx_fst_attach() - Initialize Rx FST and setup necessary parameters 3066 * @soc: SoC handle 3067 * @pdev: Pdev handle 3068 * 3069 * Return: Handle to flow search table entry 3070 */ 3071 static inline 3072 QDF_STATUS dp_rx_fst_attach(struct dp_soc *soc, struct dp_pdev *pdev) 3073 { 3074 return QDF_STATUS_SUCCESS; 3075 } 3076 3077 /** 3078 * dp_rx_fst_detach() - De-initialize Rx FST 3079 * @soc: SoC handle 3080 * @pdev: Pdev handle 3081 * 3082 * Return: None 3083 */ 3084 static inline 3085 void dp_rx_fst_detach(struct dp_soc *soc, struct dp_pdev *pdev) 3086 { 3087 } 3088 #endif 3089 3090 /** 3091 * dp_vdev_get_ref() - API to take a reference for VDEV object 3092 * 3093 * @soc : core DP soc context 3094 * @vdev : DP vdev 3095 * @mod_id : module id 3096 * 3097 * Return: QDF_STATUS_SUCCESS if reference held successfully 3098 * else QDF_STATUS_E_INVAL 3099 */ 3100 static inline 3101 QDF_STATUS dp_vdev_get_ref(struct dp_soc *soc, struct dp_vdev *vdev, 3102 enum dp_mod_id mod_id) 3103 { 3104 if (!qdf_atomic_inc_not_zero(&vdev->ref_cnt)) 3105 return QDF_STATUS_E_INVAL; 3106 3107 qdf_atomic_inc(&vdev->mod_refs[mod_id]); 3108 3109 return QDF_STATUS_SUCCESS; 3110 } 3111 3112 /** 3113 * dp_vdev_get_ref_by_id() - Returns vdev object given the vdev id 3114 * @soc: core DP soc context 3115 * @vdev_id: vdev id from vdev object can be retrieved 3116 * @mod_id: module id which is requesting the reference 3117 * 3118 * Return: struct dp_vdev*: Pointer to DP vdev object 3119 */ 3120 static inline struct dp_vdev * 3121 dp_vdev_get_ref_by_id(struct dp_soc *soc, uint8_t vdev_id, 3122 enum dp_mod_id mod_id) 3123 { 3124 struct dp_vdev *vdev = NULL; 3125 if (qdf_unlikely(vdev_id >= MAX_VDEV_CNT)) 3126 return NULL; 3127 3128 qdf_spin_lock_bh(&soc->vdev_map_lock); 3129 vdev = soc->vdev_id_map[vdev_id]; 3130 3131 if (!vdev || dp_vdev_get_ref(soc, vdev, mod_id) != QDF_STATUS_SUCCESS) { 3132 qdf_spin_unlock_bh(&soc->vdev_map_lock); 3133 return NULL; 3134 } 3135 qdf_spin_unlock_bh(&soc->vdev_map_lock); 3136 3137 return vdev; 3138 } 3139 3140 /** 3141 * dp_get_pdev_from_soc_pdev_id_wifi3() - Returns pdev object given the pdev id 3142 * @soc: core DP soc context 3143 * @pdev_id: pdev id from pdev object can be retrieved 3144 * 3145 * Return: struct dp_pdev*: Pointer to DP pdev object 3146 */ 3147 static inline struct dp_pdev * 3148 dp_get_pdev_from_soc_pdev_id_wifi3(struct dp_soc *soc, 3149 uint8_t pdev_id) 3150 { 3151 if (qdf_unlikely(pdev_id >= MAX_PDEV_CNT)) 3152 return NULL; 3153 3154 return soc->pdev_list[pdev_id]; 3155 } 3156 3157 /* 3158 * dp_rx_tid_update_wifi3() – Update receive TID state 3159 * @peer: Datapath peer handle 3160 * @tid: TID 3161 * @ba_window_size: BlockAck window size 3162 * @start_seq: Starting sequence number 3163 * @bar_update: BAR update triggered 3164 * 3165 * Return: QDF_STATUS code 3166 */ 3167 QDF_STATUS dp_rx_tid_update_wifi3(struct dp_peer *peer, int tid, uint32_t 3168 ba_window_size, uint32_t start_seq, 3169 bool bar_update); 3170 3171 /** 3172 * dp_get_peer_mac_list(): function to get peer mac list of vdev 3173 * @soc: Datapath soc handle 3174 * @vdev_id: vdev id 3175 * @newmac: Table of the clients mac 3176 * @mac_cnt: No. of MACs required 3177 * @limit: Limit the number of clients 3178 * 3179 * return: no of clients 3180 */ 3181 uint16_t dp_get_peer_mac_list(ol_txrx_soc_handle soc, uint8_t vdev_id, 3182 u_int8_t newmac[][QDF_MAC_ADDR_SIZE], 3183 u_int16_t mac_cnt, bool limit); 3184 3185 /* 3186 * dp_update_num_mac_rings_for_dbs() - Update No of MAC rings based on 3187 * DBS check 3188 * @soc: DP SoC context 3189 * @max_mac_rings: Pointer to variable for No of MAC rings 3190 * 3191 * Return: None 3192 */ 3193 void dp_update_num_mac_rings_for_dbs(struct dp_soc *soc, 3194 int *max_mac_rings); 3195 3196 3197 #if defined(WLAN_SUPPORT_RX_FISA) 3198 void dp_rx_dump_fisa_table(struct dp_soc *soc); 3199 3200 /* 3201 * dp_rx_fst_update_cmem_params() - Update CMEM FST params 3202 * @soc: DP SoC context 3203 * @num_entries: Number of flow search entries 3204 * @cmem_ba_lo: CMEM base address low 3205 * @cmem_ba_hi: CMEM base address high 3206 * 3207 * Return: None 3208 */ 3209 void dp_rx_fst_update_cmem_params(struct dp_soc *soc, uint16_t num_entries, 3210 uint32_t cmem_ba_lo, uint32_t cmem_ba_hi); 3211 3212 void 3213 dp_rx_fst_update_pm_suspend_status(struct dp_soc *soc, bool suspended); 3214 #else 3215 static inline void 3216 dp_rx_fst_update_cmem_params(struct dp_soc *soc, uint16_t num_entries, 3217 uint32_t cmem_ba_lo, uint32_t cmem_ba_hi) 3218 { 3219 } 3220 3221 static inline void 3222 dp_rx_fst_update_pm_suspend_status(struct dp_soc *soc, bool suspended) 3223 { 3224 } 3225 #endif /* WLAN_SUPPORT_RX_FISA */ 3226 3227 #ifdef MAX_ALLOC_PAGE_SIZE 3228 /** 3229 * dp_set_page_size() - Set the max page size for hw link desc. 3230 * For MCL the page size is set to OS defined value and for WIN 3231 * the page size is set to the max_alloc_size cfg ini 3232 * param. 3233 * This is to ensure that WIN gets contiguous memory allocations 3234 * as per requirement. 3235 * @pages: link desc page handle 3236 * @max_alloc_size: max_alloc_size 3237 * 3238 * Return: None 3239 */ 3240 static inline 3241 void dp_set_max_page_size(struct qdf_mem_multi_page_t *pages, 3242 uint32_t max_alloc_size) 3243 { 3244 pages->page_size = qdf_page_size; 3245 } 3246 3247 #else 3248 static inline 3249 void dp_set_max_page_size(struct qdf_mem_multi_page_t *pages, 3250 uint32_t max_alloc_size) 3251 { 3252 pages->page_size = max_alloc_size; 3253 } 3254 #endif /* MAX_ALLOC_PAGE_SIZE */ 3255 3256 /** 3257 * dp_history_get_next_index() - get the next entry to record an entry 3258 * in the history. 3259 * @curr_idx: Current index where the last entry is written. 3260 * @max_entries: Max number of entries in the history 3261 * 3262 * This function assumes that the max number os entries is a power of 2. 3263 * 3264 * Returns: The index where the next entry is to be written. 3265 */ 3266 static inline uint32_t dp_history_get_next_index(qdf_atomic_t *curr_idx, 3267 uint32_t max_entries) 3268 { 3269 uint32_t idx = qdf_atomic_inc_return(curr_idx); 3270 3271 return idx & (max_entries - 1); 3272 } 3273 3274 /** 3275 * dp_rx_skip_tlvs() - Skip TLVs len + L2 hdr_offset, save in nbuf->cb 3276 * @nbuf: nbuf cb to be updated 3277 * @l2_hdr_offset: l2_hdr_offset 3278 * 3279 * Return: None 3280 */ 3281 void dp_rx_skip_tlvs(struct dp_soc *soc, qdf_nbuf_t nbuf, uint32_t l3_padding); 3282 3283 #ifndef FEATURE_WDS 3284 static inline void 3285 dp_hmwds_ast_add_notify(struct dp_peer *peer, 3286 uint8_t *mac_addr, 3287 enum cdp_txrx_ast_entry_type type, 3288 QDF_STATUS err, 3289 bool is_peer_map) 3290 { 3291 } 3292 #endif 3293 3294 #ifdef HTT_STATS_DEBUGFS_SUPPORT 3295 /* dp_pdev_htt_stats_dbgfs_init() - Function to allocate memory and initialize 3296 * debugfs for HTT stats 3297 * @pdev: dp pdev handle 3298 * 3299 * Return: QDF_STATUS 3300 */ 3301 QDF_STATUS dp_pdev_htt_stats_dbgfs_init(struct dp_pdev *pdev); 3302 3303 /* dp_pdev_htt_stats_dbgfs_deinit() - Function to remove debugfs entry for 3304 * HTT stats 3305 * @pdev: dp pdev handle 3306 * 3307 * Return: none 3308 */ 3309 void dp_pdev_htt_stats_dbgfs_deinit(struct dp_pdev *pdev); 3310 #else 3311 3312 /* dp_pdev_htt_stats_dbgfs_init() - Function to allocate memory and initialize 3313 * debugfs for HTT stats 3314 * @pdev: dp pdev handle 3315 * 3316 * Return: QDF_STATUS 3317 */ 3318 static inline QDF_STATUS 3319 dp_pdev_htt_stats_dbgfs_init(struct dp_pdev *pdev) 3320 { 3321 return QDF_STATUS_SUCCESS; 3322 } 3323 3324 /* dp_pdev_htt_stats_dbgfs_deinit() - Function to remove debugfs entry for 3325 * HTT stats 3326 * @pdev: dp pdev handle 3327 * 3328 * Return: none 3329 */ 3330 static inline void 3331 dp_pdev_htt_stats_dbgfs_deinit(struct dp_pdev *pdev) 3332 { 3333 } 3334 #endif /* HTT_STATS_DEBUGFS_SUPPORT */ 3335 3336 #ifndef WLAN_DP_FEATURE_SW_LATENCY_MGR 3337 /** 3338 * dp_soc_swlm_attach() - attach the software latency manager resources 3339 * @soc: Datapath global soc handle 3340 * 3341 * Returns: QDF_STATUS 3342 */ 3343 static inline QDF_STATUS dp_soc_swlm_attach(struct dp_soc *soc) 3344 { 3345 return QDF_STATUS_SUCCESS; 3346 } 3347 3348 /** 3349 * dp_soc_swlm_detach() - detach the software latency manager resources 3350 * @soc: Datapath global soc handle 3351 * 3352 * Returns: QDF_STATUS 3353 */ 3354 static inline QDF_STATUS dp_soc_swlm_detach(struct dp_soc *soc) 3355 { 3356 return QDF_STATUS_SUCCESS; 3357 } 3358 #endif /* !WLAN_DP_FEATURE_SW_LATENCY_MGR */ 3359 3360 /** 3361 * dp_get_peer_id(): function to get peer id by mac 3362 * @soc: Datapath soc handle 3363 * @vdev_id: vdev id 3364 * @mac: Peer mac address 3365 * 3366 * return: valid peer id on success 3367 * HTT_INVALID_PEER on failure 3368 */ 3369 uint16_t dp_get_peer_id(ol_txrx_soc_handle soc, uint8_t vdev_id, uint8_t *mac); 3370 3371 #ifdef QCA_SUPPORT_WDS_EXTENDED 3372 /** 3373 * dp_wds_ext_set_peer_state(): function to set peer state 3374 * @soc: Datapath soc handle 3375 * @vdev_id: vdev id 3376 * @mac: Peer mac address 3377 * @rx: rx function pointer 3378 * 3379 * return: QDF_STATUS_SUCCESS on success 3380 * QDF_STATUS_E_INVAL if peer is not found 3381 * QDF_STATUS_E_ALREADY if rx is already set/unset 3382 */ 3383 QDF_STATUS dp_wds_ext_set_peer_rx(ol_txrx_soc_handle soc, 3384 uint8_t vdev_id, 3385 uint8_t *mac, 3386 ol_txrx_rx_fp rx, 3387 ol_osif_peer_handle osif_peer); 3388 #endif /* QCA_SUPPORT_WDS_EXTENDED */ 3389 3390 #ifdef DP_MEM_PRE_ALLOC 3391 3392 /** 3393 * dp_context_alloc_mem() - allocate memory for DP context 3394 * @soc: datapath soc handle 3395 * @ctxt_type: DP context type 3396 * @ctxt_size: DP context size 3397 * 3398 * Return: DP context address 3399 */ 3400 void *dp_context_alloc_mem(struct dp_soc *soc, enum dp_ctxt_type ctxt_type, 3401 size_t ctxt_size); 3402 3403 /** 3404 * dp_context_free_mem() - Free memory of DP context 3405 * @soc: datapath soc handle 3406 * @ctxt_type: DP context type 3407 * @vaddr: Address of context memory 3408 * 3409 * Return: None 3410 */ 3411 void dp_context_free_mem(struct dp_soc *soc, enum dp_ctxt_type ctxt_type, 3412 void *vaddr); 3413 3414 /** 3415 * dp_desc_multi_pages_mem_alloc() - alloc memory over multiple pages 3416 * @soc: datapath soc handle 3417 * @desc_type: memory request source type 3418 * @pages: multi page information storage 3419 * @element_size: each element size 3420 * @element_num: total number of elements should be allocated 3421 * @memctxt: memory context 3422 * @cacheable: coherent memory or cacheable memory 3423 * 3424 * This function is a wrapper for memory allocation over multiple 3425 * pages, if dp prealloc method is registered, then will try prealloc 3426 * firstly. if prealloc failed, fall back to regular way over 3427 * qdf_mem_multi_pages_alloc(). 3428 * 3429 * Return: None 3430 */ 3431 void dp_desc_multi_pages_mem_alloc(struct dp_soc *soc, 3432 enum dp_desc_type desc_type, 3433 struct qdf_mem_multi_page_t *pages, 3434 size_t element_size, 3435 uint32_t element_num, 3436 qdf_dma_context_t memctxt, 3437 bool cacheable); 3438 3439 /** 3440 * dp_desc_multi_pages_mem_free() - free multiple pages memory 3441 * @soc: datapath soc handle 3442 * @desc_type: memory request source type 3443 * @pages: multi page information storage 3444 * @memctxt: memory context 3445 * @cacheable: coherent memory or cacheable memory 3446 * 3447 * This function is a wrapper for multiple pages memory free, 3448 * if memory is got from prealloc pool, put it back to pool. 3449 * otherwise free by qdf_mem_multi_pages_free(). 3450 * 3451 * Return: None 3452 */ 3453 void dp_desc_multi_pages_mem_free(struct dp_soc *soc, 3454 enum dp_desc_type desc_type, 3455 struct qdf_mem_multi_page_t *pages, 3456 qdf_dma_context_t memctxt, 3457 bool cacheable); 3458 3459 #else 3460 static inline 3461 void *dp_context_alloc_mem(struct dp_soc *soc, enum dp_ctxt_type ctxt_type, 3462 size_t ctxt_size) 3463 { 3464 return qdf_mem_malloc(ctxt_size); 3465 } 3466 3467 static inline 3468 void dp_context_free_mem(struct dp_soc *soc, enum dp_ctxt_type ctxt_type, 3469 void *vaddr) 3470 { 3471 qdf_mem_free(vaddr); 3472 } 3473 3474 static inline 3475 void dp_desc_multi_pages_mem_alloc(struct dp_soc *soc, 3476 enum dp_desc_type desc_type, 3477 struct qdf_mem_multi_page_t *pages, 3478 size_t element_size, 3479 uint32_t element_num, 3480 qdf_dma_context_t memctxt, 3481 bool cacheable) 3482 { 3483 qdf_mem_multi_pages_alloc(soc->osdev, pages, element_size, 3484 element_num, memctxt, cacheable); 3485 } 3486 3487 static inline 3488 void dp_desc_multi_pages_mem_free(struct dp_soc *soc, 3489 enum dp_desc_type desc_type, 3490 struct qdf_mem_multi_page_t *pages, 3491 qdf_dma_context_t memctxt, 3492 bool cacheable) 3493 { 3494 qdf_mem_multi_pages_free(soc->osdev, pages, 3495 memctxt, cacheable); 3496 } 3497 #endif 3498 3499 #ifdef FEATURE_RUNTIME_PM 3500 /** 3501 * dp_runtime_get() - Get dp runtime refcount 3502 * @soc: Datapath soc handle 3503 * 3504 * Get dp runtime refcount by increment of an atomic variable, which can block 3505 * dp runtime resume to wait to flush pending tx by runtime suspend. 3506 * 3507 * Return: Current refcount 3508 */ 3509 static inline int32_t dp_runtime_get(struct dp_soc *soc) 3510 { 3511 return qdf_atomic_inc_return(&soc->dp_runtime_refcount); 3512 } 3513 3514 /** 3515 * dp_runtime_put() - Return dp runtime refcount 3516 * @soc: Datapath soc handle 3517 * 3518 * Return dp runtime refcount by decrement of an atomic variable, allow dp 3519 * runtime resume finish. 3520 * 3521 * Return: Current refcount 3522 */ 3523 static inline int32_t dp_runtime_put(struct dp_soc *soc) 3524 { 3525 return qdf_atomic_dec_return(&soc->dp_runtime_refcount); 3526 } 3527 3528 /** 3529 * dp_runtime_get_refcount() - Get dp runtime refcount 3530 * @soc: Datapath soc handle 3531 * 3532 * Get dp runtime refcount by returning an atomic variable 3533 * 3534 * Return: Current refcount 3535 */ 3536 static inline int32_t dp_runtime_get_refcount(struct dp_soc *soc) 3537 { 3538 return qdf_atomic_read(&soc->dp_runtime_refcount); 3539 } 3540 3541 /** 3542 * dp_runtime_init() - Init DP related runtime PM clients and runtime refcount 3543 * @soc: Datapath soc handle 3544 * 3545 * Return: QDF_STATUS 3546 */ 3547 static inline void dp_runtime_init(struct dp_soc *soc) 3548 { 3549 hif_rtpm_register(HIF_RTPM_ID_DP, NULL); 3550 hif_rtpm_register(HIF_RTPM_ID_DP_RING_STATS, NULL); 3551 qdf_atomic_init(&soc->dp_runtime_refcount); 3552 } 3553 3554 /** 3555 * dp_runtime_deinit() - Deinit DP related runtime PM clients 3556 * 3557 * Return: None 3558 */ 3559 static inline void dp_runtime_deinit(void) 3560 { 3561 hif_rtpm_deregister(HIF_RTPM_ID_DP); 3562 hif_rtpm_deregister(HIF_RTPM_ID_DP_RING_STATS); 3563 } 3564 3565 /** 3566 * dp_runtime_pm_mark_last_busy() - Mark last busy when rx path in use 3567 * @soc: Datapath soc handle 3568 * 3569 * Return: None 3570 */ 3571 static inline void dp_runtime_pm_mark_last_busy(struct dp_soc *soc) 3572 { 3573 soc->rx_last_busy = qdf_get_log_timestamp_usecs(); 3574 3575 hif_rtpm_mark_last_busy(HIF_RTPM_ID_DP); 3576 } 3577 #else 3578 static inline int32_t dp_runtime_get(struct dp_soc *soc) 3579 { 3580 return 0; 3581 } 3582 3583 static inline int32_t dp_runtime_put(struct dp_soc *soc) 3584 { 3585 return 0; 3586 } 3587 3588 static inline QDF_STATUS dp_runtime_init(struct dp_soc *soc) 3589 { 3590 return QDF_STATUS_SUCCESS; 3591 } 3592 3593 static inline void dp_runtime_deinit(void) 3594 { 3595 } 3596 3597 static inline void dp_runtime_pm_mark_last_busy(struct dp_soc *soc) 3598 { 3599 } 3600 #endif 3601 3602 static inline enum QDF_GLOBAL_MODE dp_soc_get_con_mode(struct dp_soc *soc) 3603 { 3604 if (soc->cdp_soc.ol_ops->get_con_mode) 3605 return soc->cdp_soc.ol_ops->get_con_mode(); 3606 3607 return QDF_GLOBAL_MAX_MODE; 3608 } 3609 3610 /* 3611 * dp_pdev_bkp_stats_detach() - detach resources for back pressure stats 3612 * processing 3613 * @pdev: Datapath PDEV handle 3614 * 3615 */ 3616 void dp_pdev_bkp_stats_detach(struct dp_pdev *pdev); 3617 3618 /* 3619 * dp_pdev_bkp_stats_attach() - attach resources for back pressure stats 3620 * processing 3621 * @pdev: Datapath PDEV handle 3622 * 3623 * Return: QDF_STATUS_SUCCESS: Success 3624 * QDF_STATUS_E_NOMEM: Error 3625 */ 3626 3627 QDF_STATUS dp_pdev_bkp_stats_attach(struct dp_pdev *pdev); 3628 3629 /** 3630 * dp_peer_flush_frags() - Flush all fragments for a particular 3631 * peer 3632 * @soc_hdl - data path soc handle 3633 * @vdev_id - vdev id 3634 * @peer_addr - peer mac address 3635 * 3636 * Return: None 3637 */ 3638 void dp_peer_flush_frags(struct cdp_soc_t *soc_hdl, uint8_t vdev_id, 3639 uint8_t *peer_mac); 3640 3641 /** 3642 * dp_soc_reset_mon_intr_mask() - reset mon intr mask 3643 * @soc: pointer to dp_soc handle 3644 * 3645 * Return: 3646 */ 3647 void dp_soc_reset_mon_intr_mask(struct dp_soc *soc); 3648 3649 /** 3650 * dp_txrx_get_soc_stats() - will return cdp_soc_stats 3651 * @soc_hdl: soc handle 3652 * @soc_stats: buffer to hold the values 3653 * 3654 * Return: QDF_STATUS_SUCCESS: Success 3655 * QDF_STATUS_E_FAILURE: Error 3656 */ 3657 QDF_STATUS dp_txrx_get_soc_stats(struct cdp_soc_t *soc_hdl, 3658 struct cdp_soc_stats *soc_stats); 3659 3660 /** 3661 * dp_txrx_get_peer_delay_stats() - to get peer delay stats per TIDs 3662 * @soc: soc handle 3663 * @vdev_id: id of vdev handle 3664 * @peer_mac: mac of DP_PEER handle 3665 * @delay_stats: pointer to delay stats array 3666 * 3667 * Return: QDF_STATUS_SUCCESS: Success 3668 * QDF_STATUS_E_FAILURE: Error 3669 */ 3670 QDF_STATUS 3671 dp_txrx_get_peer_delay_stats(struct cdp_soc_t *soc_hdl, uint8_t vdev_id, 3672 uint8_t *peer_mac, 3673 struct cdp_delay_tid_stats *delay_stats); 3674 3675 /** 3676 * dp_txrx_get_peer_jitter_stats() - to get peer jitter stats per TIDs 3677 * @soc: soc handle 3678 * @pdev_id: id of pdev handle 3679 * @vdev_id: id of vdev handle 3680 * @peer_mac: mac of DP_PEER handle 3681 * @tid_stats: pointer to jitter stats array 3682 * 3683 * Return: QDF_STATUS_SUCCESS: Success 3684 * QDF_STATUS_E_FAILURE: Error 3685 */ 3686 QDF_STATUS 3687 dp_txrx_get_peer_jitter_stats(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, 3688 uint8_t vdev_id, uint8_t *peer_mac, 3689 struct cdp_peer_tid_stats *tid_stats); 3690 3691 /* dp_peer_get_tx_capture_stats - to get peer Tx Capture stats 3692 * @soc_hdl: soc handle 3693 * @vdev_id: id of vdev handle 3694 * @peer_mac: mac of DP_PEER handle 3695 * @stats: pointer to peer tx capture stats 3696 * 3697 * Return: QDF_STATUS_SUCCESS: Success 3698 * QDF_STATUS_E_FAILURE: Error 3699 */ 3700 QDF_STATUS 3701 dp_peer_get_tx_capture_stats(struct cdp_soc_t *soc_hdl, 3702 uint8_t vdev_id, uint8_t *peer_mac, 3703 struct cdp_peer_tx_capture_stats *stats); 3704 3705 /* dp_pdev_get_tx_capture_stats - to get pdev Tx Capture stats 3706 * @soc_hdl: soc handle 3707 * @pdev_id: id of pdev handle 3708 * @stats: pointer to pdev tx capture stats 3709 * 3710 * Return: QDF_STATUS_SUCCESS: Success 3711 * QDF_STATUS_E_FAILURE: Error 3712 */ 3713 QDF_STATUS 3714 dp_pdev_get_tx_capture_stats(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, 3715 struct cdp_pdev_tx_capture_stats *stats); 3716 3717 #ifdef HW_TX_DELAY_STATS_ENABLE 3718 /* 3719 * dp_is_vdev_tx_delay_stats_enabled(): Check if tx delay stats 3720 * is enabled for vdev 3721 * @vdev: dp vdev 3722 * 3723 * Return: true if tx delay stats is enabled for vdev else false 3724 */ 3725 static inline uint8_t dp_is_vdev_tx_delay_stats_enabled(struct dp_vdev *vdev) 3726 { 3727 return vdev->hw_tx_delay_stats_enabled; 3728 } 3729 3730 /* 3731 * dp_pdev_print_tx_delay_stats(): Print vdev tx delay stats 3732 * for pdev 3733 * @soc: dp soc 3734 * 3735 * Return: None 3736 */ 3737 void dp_pdev_print_tx_delay_stats(struct dp_soc *soc); 3738 3739 /** 3740 * dp_pdev_clear_tx_delay_stats() - clear tx delay stats 3741 * @soc: soc handle 3742 * 3743 * Return: None 3744 */ 3745 void dp_pdev_clear_tx_delay_stats(struct dp_soc *soc); 3746 #else 3747 static inline uint8_t dp_is_vdev_tx_delay_stats_enabled(struct dp_vdev *vdev) 3748 { 3749 return 0; 3750 } 3751 3752 static inline void dp_pdev_print_tx_delay_stats(struct dp_soc *soc) 3753 { 3754 } 3755 3756 static inline void dp_pdev_clear_tx_delay_stats(struct dp_soc *soc) 3757 { 3758 } 3759 #endif 3760 3761 static inline void 3762 dp_get_rx_hash_key_bytes(struct cdp_lro_hash_config *lro_hash) 3763 { 3764 qdf_get_random_bytes(lro_hash->toeplitz_hash_ipv4, 3765 (sizeof(lro_hash->toeplitz_hash_ipv4[0]) * 3766 LRO_IPV4_SEED_ARR_SZ)); 3767 qdf_get_random_bytes(lro_hash->toeplitz_hash_ipv6, 3768 (sizeof(lro_hash->toeplitz_hash_ipv6[0]) * 3769 LRO_IPV6_SEED_ARR_SZ)); 3770 } 3771 3772 #ifdef WLAN_TELEMETRY_STATS_SUPPORT 3773 /* 3774 * dp_get_pdev_telemetry_stats- API to get pdev telemetry stats 3775 * @soc_hdl: soc handle 3776 * @pdev_id: id of pdev handle 3777 * @stats: pointer to pdev telemetry stats 3778 * 3779 * Return: QDF_STATUS_SUCCESS: Success 3780 * QDF_STATUS_E_FAILURE: Error 3781 */ 3782 QDF_STATUS 3783 dp_get_pdev_telemetry_stats(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, 3784 struct cdp_pdev_telemetry_stats *stats); 3785 3786 /* 3787 * dp_get_peer_telemetry_stats- API to get peer telemetry stats 3788 * @soc_hdl: soc handle 3789 * @addr: peer mac 3790 * @stats: pointer to peer telemetry stats 3791 * 3792 * Return: QDF_STATUS_SUCCESS: Success 3793 * QDF_STATUS_E_FAILURE: Error 3794 */ 3795 QDF_STATUS 3796 dp_get_peer_telemetry_stats(struct cdp_soc_t *soc_hdl, uint8_t *addr, 3797 struct cdp_peer_telemetry_stats *stats); 3798 #endif /* WLAN_TELEMETRY_STATS_SUPPORT */ 3799 3800 #ifdef CONNECTIVITY_PKTLOG 3801 /* 3802 * dp_tx_send_pktlog() - send tx packet log 3803 * @soc: soc handle 3804 * @pdev: pdev handle 3805 * @tx_desc: TX software descriptor 3806 * @nbuf: nbuf 3807 * @status: status of tx packet 3808 * 3809 * This function is used to send tx packet for logging 3810 * 3811 * Return: None 3812 * 3813 */ 3814 static inline 3815 void dp_tx_send_pktlog(struct dp_soc *soc, struct dp_pdev *pdev, 3816 struct dp_tx_desc_s *tx_desc, 3817 qdf_nbuf_t nbuf, enum qdf_dp_tx_rx_status status) 3818 { 3819 ol_txrx_pktdump_cb packetdump_cb = pdev->dp_tx_packetdump_cb; 3820 3821 if (qdf_unlikely(packetdump_cb) && 3822 dp_tx_frm_std == tx_desc->frm_type) { 3823 packetdump_cb((ol_txrx_soc_handle)soc, pdev->pdev_id, 3824 QDF_NBUF_CB_TX_VDEV_CTX(nbuf), 3825 nbuf, status, QDF_TX_DATA_PKT); 3826 } 3827 } 3828 3829 /* 3830 * dp_rx_send_pktlog() - send rx packet log 3831 * @soc: soc handle 3832 * @pdev: pdev handle 3833 * @nbuf: nbuf 3834 * @status: status of rx packet 3835 * 3836 * This function is used to send rx packet for logging 3837 * 3838 * Return: None 3839 * 3840 */ 3841 static inline 3842 void dp_rx_send_pktlog(struct dp_soc *soc, struct dp_pdev *pdev, 3843 qdf_nbuf_t nbuf, enum qdf_dp_tx_rx_status status) 3844 { 3845 ol_txrx_pktdump_cb packetdump_cb = pdev->dp_rx_packetdump_cb; 3846 3847 if (qdf_unlikely(packetdump_cb)) { 3848 packetdump_cb((ol_txrx_soc_handle)soc, pdev->pdev_id, 3849 QDF_NBUF_CB_RX_VDEV_ID(nbuf), 3850 nbuf, status, QDF_RX_DATA_PKT); 3851 } 3852 } 3853 3854 /* 3855 * dp_rx_err_send_pktlog() - send rx error packet log 3856 * @soc: soc handle 3857 * @pdev: pdev handle 3858 * @mpdu_desc_info: MPDU descriptor info 3859 * @nbuf: nbuf 3860 * @status: status of rx packet 3861 * @set_pktlen: weither to set packet length 3862 * 3863 * This API should only be called when we have not removed 3864 * Rx TLV from head, and head is pointing to rx_tlv 3865 * 3866 * This function is used to send rx packet from erro path 3867 * for logging for which rx packet tlv is not removed. 3868 * 3869 * Return: None 3870 * 3871 */ 3872 static inline 3873 void dp_rx_err_send_pktlog(struct dp_soc *soc, struct dp_pdev *pdev, 3874 struct hal_rx_mpdu_desc_info *mpdu_desc_info, 3875 qdf_nbuf_t nbuf, enum qdf_dp_tx_rx_status status, 3876 bool set_pktlen) 3877 { 3878 ol_txrx_pktdump_cb packetdump_cb = pdev->dp_rx_packetdump_cb; 3879 qdf_size_t skip_size; 3880 uint16_t msdu_len, nbuf_len; 3881 uint8_t *rx_tlv_hdr; 3882 struct hal_rx_msdu_metadata msdu_metadata; 3883 3884 if (qdf_unlikely(packetdump_cb)) { 3885 rx_tlv_hdr = qdf_nbuf_data(nbuf); 3886 nbuf_len = hal_rx_msdu_start_msdu_len_get(soc->hal_soc, 3887 rx_tlv_hdr); 3888 hal_rx_msdu_metadata_get(soc->hal_soc, rx_tlv_hdr, 3889 &msdu_metadata); 3890 3891 if (mpdu_desc_info->bar_frame || 3892 (mpdu_desc_info->mpdu_flags & HAL_MPDU_F_FRAGMENT)) 3893 skip_size = soc->rx_pkt_tlv_size; 3894 else 3895 skip_size = soc->rx_pkt_tlv_size + 3896 msdu_metadata.l3_hdr_pad; 3897 3898 if (set_pktlen) { 3899 msdu_len = nbuf_len + skip_size; 3900 qdf_nbuf_set_pktlen(nbuf, qdf_min(msdu_len, 3901 (uint16_t)RX_DATA_BUFFER_SIZE)); 3902 } 3903 3904 qdf_nbuf_pull_head(nbuf, skip_size); 3905 packetdump_cb((ol_txrx_soc_handle)soc, pdev->pdev_id, 3906 QDF_NBUF_CB_RX_VDEV_ID(nbuf), 3907 nbuf, status, QDF_RX_DATA_PKT); 3908 qdf_nbuf_push_head(nbuf, skip_size); 3909 } 3910 } 3911 3912 #else 3913 static inline 3914 void dp_tx_send_pktlog(struct dp_soc *soc, struct dp_pdev *pdev, 3915 struct dp_tx_desc_s *tx_desc, 3916 qdf_nbuf_t nbuf, enum qdf_dp_tx_rx_status status) 3917 { 3918 } 3919 3920 static inline 3921 void dp_rx_send_pktlog(struct dp_soc *soc, struct dp_pdev *pdev, 3922 qdf_nbuf_t nbuf, enum qdf_dp_tx_rx_status status) 3923 { 3924 } 3925 3926 static inline 3927 void dp_rx_err_send_pktlog(struct dp_soc *soc, struct dp_pdev *pdev, 3928 struct hal_rx_mpdu_desc_info *mpdu_desc_info, 3929 qdf_nbuf_t nbuf, enum qdf_dp_tx_rx_status status, 3930 bool set_pktlen) 3931 { 3932 } 3933 #endif 3934 #endif /* #ifndef _DP_INTERNAL_H_ */ 3935