1 /* 2 * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved. 3 * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. 4 * 5 * Permission to use, copy, modify, and/or distribute this software for 6 * any purpose with or without fee is hereby granted, provided that the 7 * above copyright notice and this permission notice appear in all 8 * copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 11 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 12 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 13 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 14 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 15 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 16 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 17 * PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20 #ifndef _DP_INTERNAL_H_ 21 #define _DP_INTERNAL_H_ 22 23 #include "dp_types.h" 24 #include "dp_htt.h" 25 #include "dp_rx_tid.h" 26 27 #define RX_BUFFER_SIZE_PKTLOG_LITE 1024 28 29 #define DP_PEER_WDS_COUNT_INVALID UINT_MAX 30 31 #define DP_BLOCKMEM_SIZE 4096 32 #define WBM2_SW_PPE_REL_RING_ID 6 33 #define WBM2_SW_PPE_REL_MAP_ID 11 34 #define DP_TX_PPEDS_POOL_ID 0xF 35 36 /* Alignment for consistent memory for DP rings*/ 37 #define DP_RING_BASE_ALIGN 32 38 39 #define DP_RSSI_INVAL 0x80 40 #define DP_RSSI_AVG_WEIGHT 2 41 /* 42 * Formula to derive avg_rssi is taken from wifi2.o firmware 43 */ 44 #define DP_GET_AVG_RSSI(avg_rssi, last_rssi) \ 45 (((avg_rssi) - (((uint8_t)(avg_rssi)) >> DP_RSSI_AVG_WEIGHT)) \ 46 + ((((uint8_t)(last_rssi)) >> DP_RSSI_AVG_WEIGHT))) 47 48 /* Macro For NYSM value received in VHT TLV */ 49 #define VHT_SGI_NYSM 3 50 51 #define INVALID_WBM_RING_NUM 0xF 52 53 #ifdef FEATURE_DIRECT_LINK 54 #define DIRECT_LINK_REFILL_RING_ENTRIES 64 55 #ifdef IPA_OFFLOAD 56 #ifdef IPA_WDI3_VLAN_SUPPORT 57 #define DIRECT_LINK_REFILL_RING_IDX 4 58 #else 59 #define DIRECT_LINK_REFILL_RING_IDX 3 60 #endif 61 #else 62 #define DIRECT_LINK_REFILL_RING_IDX 2 63 #endif 64 #endif 65 66 #define DP_MAX_VLAN_IDS 4096 67 #define DP_VLAN_UNTAGGED 0 68 #define DP_VLAN_TAGGED_MULTICAST 1 69 #define DP_VLAN_TAGGED_UNICAST 2 70 71 /** 72 * struct htt_dbgfs_cfg - structure to maintain required htt data 73 * @msg_word: htt msg sent to upper layer 74 * @m: qdf debugfs file pointer 75 */ 76 struct htt_dbgfs_cfg { 77 uint32_t *msg_word; 78 qdf_debugfs_file_t m; 79 }; 80 81 /* Cookie MSB bits assigned for different use case. 82 * Note: User can't use last 3 bits, as it is reserved for pdev_id. 83 * If in future number of pdev are more than 3. 84 */ 85 /* Reserve for default case */ 86 #define DBG_STATS_COOKIE_DEFAULT 0x0 87 88 /* Reserve for DP Stats: 3rd bit */ 89 #define DBG_STATS_COOKIE_DP_STATS BIT(3) 90 91 /* Reserve for HTT Stats debugfs support: 4th bit */ 92 #define DBG_STATS_COOKIE_HTT_DBGFS BIT(4) 93 94 /*Reserve for HTT Stats debugfs support: 5th bit */ 95 #define DBG_SYSFS_STATS_COOKIE BIT(5) 96 97 /* Reserve for HTT Stats OBSS PD support: 6th bit */ 98 #define DBG_STATS_COOKIE_HTT_OBSS BIT(6) 99 100 /* 101 * Bitmap of HTT PPDU TLV types for Default mode 102 */ 103 #define HTT_PPDU_DEFAULT_TLV_BITMAP \ 104 (1 << HTT_PPDU_STATS_COMMON_TLV) | \ 105 (1 << HTT_PPDU_STATS_USR_COMMON_TLV) | \ 106 (1 << HTT_PPDU_STATS_USR_RATE_TLV) | \ 107 (1 << HTT_PPDU_STATS_SCH_CMD_STATUS_TLV) | \ 108 (1 << HTT_PPDU_STATS_USR_COMPLTN_COMMON_TLV) | \ 109 (1 << HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV) 110 111 /* PPDU STATS CFG */ 112 #define DP_PPDU_STATS_CFG_ALL 0xFFFF 113 114 /* PPDU stats mask sent to FW to enable enhanced stats */ 115 #define DP_PPDU_STATS_CFG_ENH_STATS \ 116 (HTT_PPDU_DEFAULT_TLV_BITMAP) | \ 117 (1 << HTT_PPDU_STATS_USR_COMPLTN_FLUSH_TLV) | \ 118 (1 << HTT_PPDU_STATS_USR_COMMON_ARRAY_TLV) | \ 119 (1 << HTT_PPDU_STATS_USERS_INFO_TLV) 120 121 /* PPDU stats mask sent to FW to support debug sniffer feature */ 122 #define DP_PPDU_STATS_CFG_SNIFFER \ 123 (HTT_PPDU_DEFAULT_TLV_BITMAP) | \ 124 (1 << HTT_PPDU_STATS_USR_MPDU_ENQ_BITMAP_64_TLV) | \ 125 (1 << HTT_PPDU_STATS_USR_MPDU_ENQ_BITMAP_256_TLV) | \ 126 (1 << HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_64_TLV) | \ 127 (1 << HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_256_TLV) | \ 128 (1 << HTT_PPDU_STATS_USR_COMPLTN_FLUSH_TLV) | \ 129 (1 << HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_256_TLV) | \ 130 (1 << HTT_PPDU_STATS_USR_COMPLTN_FLUSH_TLV) | \ 131 (1 << HTT_PPDU_STATS_USR_COMMON_ARRAY_TLV) | \ 132 (1 << HTT_PPDU_STATS_TX_MGMTCTRL_PAYLOAD_TLV) | \ 133 (1 << HTT_PPDU_STATS_USERS_INFO_TLV) 134 135 /* PPDU stats mask sent to FW to support BPR feature*/ 136 #define DP_PPDU_STATS_CFG_BPR \ 137 (1 << HTT_PPDU_STATS_TX_MGMTCTRL_PAYLOAD_TLV) | \ 138 (1 << HTT_PPDU_STATS_USERS_INFO_TLV) 139 140 /* PPDU stats mask sent to FW to support BPR and enhanced stats feature */ 141 #define DP_PPDU_STATS_CFG_BPR_ENH (DP_PPDU_STATS_CFG_BPR | \ 142 DP_PPDU_STATS_CFG_ENH_STATS) 143 /* PPDU stats mask sent to FW to support BPR and pcktlog stats feature */ 144 #define DP_PPDU_STATS_CFG_BPR_PKTLOG (DP_PPDU_STATS_CFG_BPR | \ 145 DP_PPDU_TXLITE_STATS_BITMASK_CFG) 146 147 /* 148 * Bitmap of HTT PPDU delayed ba TLV types for Default mode 149 */ 150 #define HTT_PPDU_DELAYED_BA_TLV_BITMAP \ 151 (1 << HTT_PPDU_STATS_COMMON_TLV) | \ 152 (1 << HTT_PPDU_STATS_USR_COMMON_TLV) | \ 153 (1 << HTT_PPDU_STATS_USR_RATE_TLV) 154 155 /* 156 * Bitmap of HTT PPDU TLV types for Delayed BA 157 */ 158 #define HTT_PPDU_STATUS_TLV_BITMAP \ 159 (1 << HTT_PPDU_STATS_COMMON_TLV) | \ 160 (1 << HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV) 161 162 /* 163 * Bitmap of HTT PPDU TLV types for Sniffer mode bitmap 64 164 */ 165 #define HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP_64 \ 166 ((1 << HTT_PPDU_STATS_COMMON_TLV) | \ 167 (1 << HTT_PPDU_STATS_USR_COMMON_TLV) | \ 168 (1 << HTT_PPDU_STATS_USR_RATE_TLV) | \ 169 (1 << HTT_PPDU_STATS_SCH_CMD_STATUS_TLV) | \ 170 (1 << HTT_PPDU_STATS_USR_COMPLTN_COMMON_TLV) | \ 171 (1 << HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV) | \ 172 (1 << HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_64_TLV) | \ 173 (1 << HTT_PPDU_STATS_USR_MPDU_ENQ_BITMAP_64_TLV)) 174 175 /* 176 * Bitmap of HTT PPDU TLV types for Sniffer mode bitmap 256 177 */ 178 #define HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP_256 \ 179 ((1 << HTT_PPDU_STATS_COMMON_TLV) | \ 180 (1 << HTT_PPDU_STATS_USR_COMMON_TLV) | \ 181 (1 << HTT_PPDU_STATS_USR_RATE_TLV) | \ 182 (1 << HTT_PPDU_STATS_SCH_CMD_STATUS_TLV) | \ 183 (1 << HTT_PPDU_STATS_USR_COMPLTN_COMMON_TLV) | \ 184 (1 << HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV) | \ 185 (1 << HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_256_TLV) | \ 186 (1 << HTT_PPDU_STATS_USR_MPDU_ENQ_BITMAP_256_TLV)) 187 188 static const enum cdp_packet_type hal_2_dp_pkt_type_map[HAL_DOT11_MAX] = { 189 [HAL_DOT11A] = DOT11_A, 190 [HAL_DOT11B] = DOT11_B, 191 [HAL_DOT11N_MM] = DOT11_N, 192 [HAL_DOT11AC] = DOT11_AC, 193 [HAL_DOT11AX] = DOT11_AX, 194 [HAL_DOT11BA] = DOT11_MAX, 195 #ifdef WLAN_FEATURE_11BE 196 [HAL_DOT11BE] = DOT11_BE, 197 #else 198 [HAL_DOT11BE] = DOT11_MAX, 199 #endif 200 [HAL_DOT11AZ] = DOT11_MAX, 201 [HAL_DOT11N_GF] = DOT11_MAX, 202 }; 203 204 #ifdef GLOBAL_ASSERT_AVOIDANCE 205 #define dp_assert_always_internal_stat(_expr, _handle, _field) \ 206 (qdf_unlikely(!(_expr)) ? ((_handle)->stats._field++, true) : false) 207 208 #define dp_assert_always_internal_ds_stat(_expr, _handle, _field) \ 209 ((_handle)->ppeds_stats._field++) 210 dp_assert_always_internal(bool expr)211 static inline bool dp_assert_always_internal(bool expr) 212 { 213 return !expr; 214 } 215 #else __dp_assert_always_internal(bool expr)216 static inline bool __dp_assert_always_internal(bool expr) 217 { 218 qdf_assert_always(expr); 219 220 return false; 221 } 222 223 #define dp_assert_always_internal(_expr) __dp_assert_always_internal(_expr) 224 225 #define dp_assert_always_internal_stat(_expr, _handle, _field) \ 226 dp_assert_always_internal(_expr) 227 228 #define dp_assert_always_internal_ds_stat(_expr, _handle, _field) \ 229 dp_assert_always_internal(_expr) 230 #endif 231 232 #ifdef WLAN_FEATURE_11BE 233 /** 234 * dp_get_mcs_array_index_by_pkt_type_mcs() - get the destination mcs index 235 * in array 236 * @pkt_type: host SW pkt type 237 * @mcs: mcs value for TX/RX rate 238 * 239 * Return: succeeded - valid index in mcs array 240 * fail - same value as MCS_MAX 241 */ 242 static inline uint8_t dp_get_mcs_array_index_by_pkt_type_mcs(uint32_t pkt_type,uint32_t mcs)243 dp_get_mcs_array_index_by_pkt_type_mcs(uint32_t pkt_type, uint32_t mcs) 244 { 245 uint8_t dst_mcs_idx = MCS_INVALID_ARRAY_INDEX; 246 247 switch (pkt_type) { 248 case DOT11_A: 249 dst_mcs_idx = 250 mcs >= MAX_MCS_11A ? (MAX_MCS - 1) : mcs; 251 break; 252 case DOT11_B: 253 dst_mcs_idx = 254 mcs >= MAX_MCS_11B ? (MAX_MCS - 1) : mcs; 255 break; 256 case DOT11_N: 257 dst_mcs_idx = 258 mcs >= MAX_MCS_11N ? (MAX_MCS - 1) : mcs; 259 break; 260 case DOT11_AC: 261 dst_mcs_idx = 262 mcs >= MAX_MCS_11AC ? (MAX_MCS - 1) : mcs; 263 break; 264 case DOT11_AX: 265 dst_mcs_idx = 266 mcs >= MAX_MCS_11AX ? (MAX_MCS - 1) : mcs; 267 break; 268 case DOT11_BE: 269 dst_mcs_idx = 270 mcs >= MAX_MCS_11BE ? (MAX_MCS - 1) : mcs; 271 break; 272 default: 273 break; 274 } 275 276 return dst_mcs_idx; 277 } 278 #else 279 static inline uint8_t dp_get_mcs_array_index_by_pkt_type_mcs(uint32_t pkt_type,uint32_t mcs)280 dp_get_mcs_array_index_by_pkt_type_mcs(uint32_t pkt_type, uint32_t mcs) 281 { 282 uint8_t dst_mcs_idx = MCS_INVALID_ARRAY_INDEX; 283 284 switch (pkt_type) { 285 case DOT11_A: 286 dst_mcs_idx = 287 mcs >= MAX_MCS_11A ? (MAX_MCS - 1) : mcs; 288 break; 289 case DOT11_B: 290 dst_mcs_idx = 291 mcs >= MAX_MCS_11B ? (MAX_MCS - 1) : mcs; 292 break; 293 case DOT11_N: 294 dst_mcs_idx = 295 mcs >= MAX_MCS_11N ? (MAX_MCS - 1) : mcs; 296 break; 297 case DOT11_AC: 298 dst_mcs_idx = 299 mcs >= MAX_MCS_11AC ? (MAX_MCS - 1) : mcs; 300 break; 301 case DOT11_AX: 302 dst_mcs_idx = 303 mcs >= MAX_MCS_11AX ? (MAX_MCS - 1) : mcs; 304 break; 305 default: 306 break; 307 } 308 309 return dst_mcs_idx; 310 } 311 #endif 312 313 #ifdef WIFI_MONITOR_SUPPORT 314 QDF_STATUS dp_mon_soc_attach(struct dp_soc *soc); 315 QDF_STATUS dp_mon_soc_detach(struct dp_soc *soc); 316 #else 317 static inline dp_mon_soc_attach(struct dp_soc * soc)318 QDF_STATUS dp_mon_soc_attach(struct dp_soc *soc) 319 { 320 return QDF_STATUS_SUCCESS; 321 } 322 323 static inline dp_mon_soc_detach(struct dp_soc * soc)324 QDF_STATUS dp_mon_soc_detach(struct dp_soc *soc) 325 { 326 return QDF_STATUS_SUCCESS; 327 } 328 #endif 329 330 /** 331 * dp_rx_err_match_dhost() - function to check whether dest-mac is correct 332 * @eh: Ethernet header of incoming packet 333 * @vdev: dp_vdev object of the VAP on which this data packet is received 334 * 335 * Return: 1 if the destination mac is correct, 336 * 0 if this frame is not correctly destined to this VAP/MLD 337 */ 338 int dp_rx_err_match_dhost(qdf_ether_header_t *eh, struct dp_vdev *vdev); 339 340 #ifdef MONITOR_MODULARIZED_ENABLE dp_monitor_modularized_enable(void)341 static inline bool dp_monitor_modularized_enable(void) 342 { 343 return TRUE; 344 } 345 346 static inline QDF_STATUS dp_mon_soc_attach_wrapper(struct dp_soc * soc)347 dp_mon_soc_attach_wrapper(struct dp_soc *soc) { return QDF_STATUS_SUCCESS; } 348 349 static inline QDF_STATUS dp_mon_soc_detach_wrapper(struct dp_soc * soc)350 dp_mon_soc_detach_wrapper(struct dp_soc *soc) { return QDF_STATUS_SUCCESS; } 351 #else dp_monitor_modularized_enable(void)352 static inline bool dp_monitor_modularized_enable(void) 353 { 354 return FALSE; 355 } 356 dp_mon_soc_attach_wrapper(struct dp_soc * soc)357 static inline QDF_STATUS dp_mon_soc_attach_wrapper(struct dp_soc *soc) 358 { 359 return dp_mon_soc_attach(soc); 360 } 361 dp_mon_soc_detach_wrapper(struct dp_soc * soc)362 static inline QDF_STATUS dp_mon_soc_detach_wrapper(struct dp_soc *soc) 363 { 364 return dp_mon_soc_detach(soc); 365 } 366 #endif 367 368 #ifndef WIFI_MONITOR_SUPPORT 369 #define MON_BUF_MIN_ENTRIES 64 370 dp_monitor_pdev_attach(struct dp_pdev * pdev)371 static inline QDF_STATUS dp_monitor_pdev_attach(struct dp_pdev *pdev) 372 { 373 return QDF_STATUS_SUCCESS; 374 } 375 dp_monitor_pdev_detach(struct dp_pdev * pdev)376 static inline QDF_STATUS dp_monitor_pdev_detach(struct dp_pdev *pdev) 377 { 378 return QDF_STATUS_SUCCESS; 379 } 380 dp_monitor_vdev_attach(struct dp_vdev * vdev)381 static inline QDF_STATUS dp_monitor_vdev_attach(struct dp_vdev *vdev) 382 { 383 return QDF_STATUS_E_FAILURE; 384 } 385 dp_monitor_vdev_detach(struct dp_vdev * vdev)386 static inline QDF_STATUS dp_monitor_vdev_detach(struct dp_vdev *vdev) 387 { 388 return QDF_STATUS_E_FAILURE; 389 } 390 dp_monitor_peer_attach(struct dp_soc * soc,struct dp_peer * peer)391 static inline QDF_STATUS dp_monitor_peer_attach(struct dp_soc *soc, 392 struct dp_peer *peer) 393 { 394 return QDF_STATUS_SUCCESS; 395 } 396 dp_monitor_peer_detach(struct dp_soc * soc,struct dp_peer * peer)397 static inline QDF_STATUS dp_monitor_peer_detach(struct dp_soc *soc, 398 struct dp_peer *peer) 399 { 400 return QDF_STATUS_E_FAILURE; 401 } 402 403 static inline struct cdp_peer_rate_stats_ctx* dp_monitor_peer_get_peerstats_ctx(struct dp_soc * soc,struct dp_peer * peer)404 dp_monitor_peer_get_peerstats_ctx(struct dp_soc *soc, struct dp_peer *peer) 405 { 406 return NULL; 407 } 408 409 static inline dp_monitor_peer_reset_stats(struct dp_soc * soc,struct dp_peer * peer)410 void dp_monitor_peer_reset_stats(struct dp_soc *soc, struct dp_peer *peer) 411 { 412 } 413 414 static inline dp_monitor_peer_get_stats(struct dp_soc * soc,struct dp_peer * peer,void * arg,enum cdp_stat_update_type type)415 void dp_monitor_peer_get_stats(struct dp_soc *soc, struct dp_peer *peer, 416 void *arg, enum cdp_stat_update_type type) 417 { 418 } 419 420 static inline dp_monitor_invalid_peer_update_pdev_stats(struct dp_soc * soc,struct dp_pdev * pdev)421 void dp_monitor_invalid_peer_update_pdev_stats(struct dp_soc *soc, 422 struct dp_pdev *pdev) 423 { 424 } 425 426 static inline dp_monitor_peer_get_stats_param(struct dp_soc * soc,struct dp_peer * peer,enum cdp_peer_stats_type type,cdp_peer_stats_param_t * buf)427 QDF_STATUS dp_monitor_peer_get_stats_param(struct dp_soc *soc, 428 struct dp_peer *peer, 429 enum cdp_peer_stats_type type, 430 cdp_peer_stats_param_t *buf) 431 { 432 return QDF_STATUS_E_FAILURE; 433 } 434 dp_monitor_pdev_init(struct dp_pdev * pdev)435 static inline QDF_STATUS dp_monitor_pdev_init(struct dp_pdev *pdev) 436 { 437 return QDF_STATUS_SUCCESS; 438 } 439 dp_monitor_pdev_deinit(struct dp_pdev * pdev)440 static inline QDF_STATUS dp_monitor_pdev_deinit(struct dp_pdev *pdev) 441 { 442 return QDF_STATUS_SUCCESS; 443 } 444 dp_monitor_soc_cfg_init(struct dp_soc * soc)445 static inline QDF_STATUS dp_monitor_soc_cfg_init(struct dp_soc *soc) 446 { 447 return QDF_STATUS_SUCCESS; 448 } 449 dp_monitor_config_debug_sniffer(struct dp_pdev * pdev,int val)450 static inline QDF_STATUS dp_monitor_config_debug_sniffer(struct dp_pdev *pdev, 451 int val) 452 { 453 return QDF_STATUS_E_FAILURE; 454 } 455 dp_monitor_flush_rings(struct dp_soc * soc)456 static inline void dp_monitor_flush_rings(struct dp_soc *soc) 457 { 458 } 459 dp_monitor_htt_srng_setup(struct dp_soc * soc,struct dp_pdev * pdev,int mac_id,int mac_for_pdev)460 static inline QDF_STATUS dp_monitor_htt_srng_setup(struct dp_soc *soc, 461 struct dp_pdev *pdev, 462 int mac_id, 463 int mac_for_pdev) 464 { 465 return QDF_STATUS_SUCCESS; 466 } 467 dp_monitor_service_mon_rings(struct dp_soc * soc,uint32_t quota)468 static inline void dp_monitor_service_mon_rings(struct dp_soc *soc, 469 uint32_t quota) 470 { 471 } 472 473 static inline dp_monitor_process(struct dp_soc * soc,struct dp_intr * int_ctx,uint32_t mac_id,uint32_t quota)474 uint32_t dp_monitor_process(struct dp_soc *soc, struct dp_intr *int_ctx, 475 uint32_t mac_id, uint32_t quota) 476 { 477 return 0; 478 } 479 480 static inline dp_monitor_drop_packets_for_mac(struct dp_pdev * pdev,uint32_t mac_id,uint32_t quota)481 uint32_t dp_monitor_drop_packets_for_mac(struct dp_pdev *pdev, 482 uint32_t mac_id, uint32_t quota) 483 { 484 return 0; 485 } 486 dp_monitor_peer_tx_init(struct dp_pdev * pdev,struct dp_peer * peer)487 static inline void dp_monitor_peer_tx_init(struct dp_pdev *pdev, 488 struct dp_peer *peer) 489 { 490 } 491 dp_monitor_peer_tx_cleanup(struct dp_vdev * vdev,struct dp_peer * peer)492 static inline void dp_monitor_peer_tx_cleanup(struct dp_vdev *vdev, 493 struct dp_peer *peer) 494 { 495 } 496 497 static inline dp_monitor_peer_tid_peer_id_update(struct dp_soc * soc,struct dp_peer * peer,uint16_t peer_id)498 void dp_monitor_peer_tid_peer_id_update(struct dp_soc *soc, 499 struct dp_peer *peer, 500 uint16_t peer_id) 501 { 502 } 503 dp_monitor_tx_ppdu_stats_attach(struct dp_pdev * pdev)504 static inline void dp_monitor_tx_ppdu_stats_attach(struct dp_pdev *pdev) 505 { 506 } 507 dp_monitor_tx_ppdu_stats_detach(struct dp_pdev * pdev)508 static inline void dp_monitor_tx_ppdu_stats_detach(struct dp_pdev *pdev) 509 { 510 } 511 512 static inline dp_monitor_tx_capture_debugfs_init(struct dp_pdev * pdev)513 QDF_STATUS dp_monitor_tx_capture_debugfs_init(struct dp_pdev *pdev) 514 { 515 return QDF_STATUS_SUCCESS; 516 } 517 dp_monitor_peer_tx_capture_filter_check(struct dp_pdev * pdev,struct dp_peer * peer)518 static inline void dp_monitor_peer_tx_capture_filter_check(struct dp_pdev *pdev, 519 struct dp_peer *peer) 520 { 521 } 522 523 static inline dp_monitor_tx_add_to_comp_queue(struct dp_soc * soc,struct dp_tx_desc_s * desc,struct hal_tx_completion_status * ts,uint16_t peer_id)524 QDF_STATUS dp_monitor_tx_add_to_comp_queue(struct dp_soc *soc, 525 struct dp_tx_desc_s *desc, 526 struct hal_tx_completion_status *ts, 527 uint16_t peer_id) 528 { 529 return QDF_STATUS_E_FAILURE; 530 } 531 532 static inline monitor_update_msdu_to_list(struct dp_soc * soc,struct dp_pdev * pdev,struct dp_peer * peer,struct hal_tx_completion_status * ts,qdf_nbuf_t netbuf)533 QDF_STATUS monitor_update_msdu_to_list(struct dp_soc *soc, 534 struct dp_pdev *pdev, 535 struct dp_peer *peer, 536 struct hal_tx_completion_status *ts, 537 qdf_nbuf_t netbuf) 538 { 539 return QDF_STATUS_E_FAILURE; 540 } 541 dp_monitor_ppdu_stats_ind_handler(struct htt_soc * soc,uint32_t * msg_word,qdf_nbuf_t htt_t2h_msg)542 static inline bool dp_monitor_ppdu_stats_ind_handler(struct htt_soc *soc, 543 uint32_t *msg_word, 544 qdf_nbuf_t htt_t2h_msg) 545 { 546 return true; 547 } 548 dp_monitor_htt_ppdu_stats_attach(struct dp_pdev * pdev)549 static inline QDF_STATUS dp_monitor_htt_ppdu_stats_attach(struct dp_pdev *pdev) 550 { 551 return QDF_STATUS_SUCCESS; 552 } 553 dp_monitor_htt_ppdu_stats_detach(struct dp_pdev * pdev)554 static inline void dp_monitor_htt_ppdu_stats_detach(struct dp_pdev *pdev) 555 { 556 } 557 dp_monitor_print_pdev_rx_mon_stats(struct dp_pdev * pdev)558 static inline void dp_monitor_print_pdev_rx_mon_stats(struct dp_pdev *pdev) 559 { 560 } 561 dp_monitor_config_enh_tx_capture(struct dp_pdev * pdev,uint32_t val)562 static inline QDF_STATUS dp_monitor_config_enh_tx_capture(struct dp_pdev *pdev, 563 uint32_t val) 564 { 565 return QDF_STATUS_E_INVAL; 566 } 567 dp_monitor_tx_peer_filter(struct dp_pdev * pdev,struct dp_peer * peer,uint8_t is_tx_pkt_cap_enable,uint8_t * peer_mac)568 static inline QDF_STATUS dp_monitor_tx_peer_filter(struct dp_pdev *pdev, 569 struct dp_peer *peer, 570 uint8_t is_tx_pkt_cap_enable, 571 uint8_t *peer_mac) 572 { 573 return QDF_STATUS_E_INVAL; 574 } 575 dp_monitor_config_enh_rx_capture(struct dp_pdev * pdev,uint32_t val)576 static inline QDF_STATUS dp_monitor_config_enh_rx_capture(struct dp_pdev *pdev, 577 uint32_t val) 578 { 579 return QDF_STATUS_E_INVAL; 580 } 581 582 static inline dp_monitor_set_bpr_enable(struct dp_pdev * pdev,uint32_t val)583 QDF_STATUS dp_monitor_set_bpr_enable(struct dp_pdev *pdev, uint32_t val) 584 { 585 return QDF_STATUS_E_FAILURE; 586 } 587 588 static inline dp_monitor_set_filter_neigh_peers(struct dp_pdev * pdev,bool val)589 int dp_monitor_set_filter_neigh_peers(struct dp_pdev *pdev, bool val) 590 { 591 return 0; 592 } 593 594 static inline dp_monitor_set_atf_stats_enable(struct dp_pdev * pdev,bool value)595 void dp_monitor_set_atf_stats_enable(struct dp_pdev *pdev, bool value) 596 { 597 } 598 599 static inline dp_monitor_set_bsscolor(struct dp_pdev * pdev,uint8_t bsscolor)600 void dp_monitor_set_bsscolor(struct dp_pdev *pdev, uint8_t bsscolor) 601 { 602 } 603 604 static inline dp_monitor_pdev_get_filter_mcast_data(struct cdp_pdev * pdev_handle)605 bool dp_monitor_pdev_get_filter_mcast_data(struct cdp_pdev *pdev_handle) 606 { 607 return false; 608 } 609 610 static inline dp_monitor_pdev_get_filter_non_data(struct cdp_pdev * pdev_handle)611 bool dp_monitor_pdev_get_filter_non_data(struct cdp_pdev *pdev_handle) 612 { 613 return false; 614 } 615 616 static inline dp_monitor_pdev_get_filter_ucast_data(struct cdp_pdev * pdev_handle)617 bool dp_monitor_pdev_get_filter_ucast_data(struct cdp_pdev *pdev_handle) 618 { 619 return false; 620 } 621 622 static inline dp_monitor_set_pktlog_wifi3(struct dp_pdev * pdev,uint32_t event,bool enable)623 int dp_monitor_set_pktlog_wifi3(struct dp_pdev *pdev, uint32_t event, 624 bool enable) 625 { 626 return 0; 627 } 628 dp_monitor_pktlogmod_exit(struct dp_pdev * pdev)629 static inline void dp_monitor_pktlogmod_exit(struct dp_pdev *pdev) 630 { 631 } 632 633 static inline dp_monitor_vdev_set_monitor_mode_buf_rings(struct dp_pdev * pdev)634 QDF_STATUS dp_monitor_vdev_set_monitor_mode_buf_rings(struct dp_pdev *pdev) 635 { 636 return QDF_STATUS_E_FAILURE; 637 } 638 639 static inline dp_monitor_neighbour_peers_detach(struct dp_pdev * pdev)640 void dp_monitor_neighbour_peers_detach(struct dp_pdev *pdev) 641 { 642 } 643 dp_monitor_filter_neighbour_peer(struct dp_pdev * pdev,uint8_t * rx_pkt_hdr)644 static inline QDF_STATUS dp_monitor_filter_neighbour_peer(struct dp_pdev *pdev, 645 uint8_t *rx_pkt_hdr) 646 { 647 return QDF_STATUS_E_FAILURE; 648 } 649 dp_monitor_print_pdev_tx_capture_stats(struct dp_pdev * pdev)650 static inline void dp_monitor_print_pdev_tx_capture_stats(struct dp_pdev *pdev) 651 { 652 } 653 654 static inline dp_monitor_reap_timer_init(struct dp_soc * soc)655 void dp_monitor_reap_timer_init(struct dp_soc *soc) 656 { 657 } 658 659 static inline dp_monitor_reap_timer_deinit(struct dp_soc * soc)660 void dp_monitor_reap_timer_deinit(struct dp_soc *soc) 661 { 662 } 663 664 static inline dp_monitor_reap_timer_start(struct dp_soc * soc,enum cdp_mon_reap_source source)665 bool dp_monitor_reap_timer_start(struct dp_soc *soc, 666 enum cdp_mon_reap_source source) 667 { 668 return false; 669 } 670 671 static inline dp_monitor_reap_timer_stop(struct dp_soc * soc,enum cdp_mon_reap_source source)672 bool dp_monitor_reap_timer_stop(struct dp_soc *soc, 673 enum cdp_mon_reap_source source) 674 { 675 return false; 676 } 677 678 static inline void dp_monitor_reap_timer_suspend(struct dp_soc * soc)679 dp_monitor_reap_timer_suspend(struct dp_soc *soc) 680 { 681 } 682 683 static inline dp_monitor_vdev_timer_init(struct dp_soc * soc)684 void dp_monitor_vdev_timer_init(struct dp_soc *soc) 685 { 686 } 687 688 static inline dp_monitor_vdev_timer_deinit(struct dp_soc * soc)689 void dp_monitor_vdev_timer_deinit(struct dp_soc *soc) 690 { 691 } 692 693 static inline dp_monitor_vdev_timer_start(struct dp_soc * soc)694 void dp_monitor_vdev_timer_start(struct dp_soc *soc) 695 { 696 } 697 698 static inline dp_monitor_vdev_timer_stop(struct dp_soc * soc)699 bool dp_monitor_vdev_timer_stop(struct dp_soc *soc) 700 { 701 return false; 702 } 703 704 static inline struct qdf_mem_multi_page_t* dp_monitor_get_link_desc_pages(struct dp_soc * soc,uint32_t mac_id)705 dp_monitor_get_link_desc_pages(struct dp_soc *soc, uint32_t mac_id) 706 { 707 return NULL; 708 } 709 710 static inline struct dp_srng* dp_monitor_get_link_desc_ring(struct dp_soc * soc,uint32_t mac_id)711 dp_monitor_get_link_desc_ring(struct dp_soc *soc, uint32_t mac_id) 712 { 713 return NULL; 714 } 715 716 static inline uint32_t dp_monitor_get_num_link_desc_ring_entries(struct dp_soc * soc)717 dp_monitor_get_num_link_desc_ring_entries(struct dp_soc *soc) 718 { 719 return 0; 720 } 721 722 static inline uint32_t * dp_monitor_get_total_link_descs(struct dp_soc * soc,uint32_t mac_id)723 dp_monitor_get_total_link_descs(struct dp_soc *soc, uint32_t mac_id) 724 { 725 return NULL; 726 } 727 dp_monitor_drop_inv_peer_pkts(struct dp_vdev * vdev)728 static inline QDF_STATUS dp_monitor_drop_inv_peer_pkts(struct dp_vdev *vdev) 729 { 730 return QDF_STATUS_E_FAILURE; 731 } 732 dp_is_enable_reap_timer_non_pkt(struct dp_pdev * pdev)733 static inline bool dp_is_enable_reap_timer_non_pkt(struct dp_pdev *pdev) 734 { 735 return false; 736 } 737 dp_monitor_vdev_register_osif(struct dp_vdev * vdev,struct ol_txrx_ops * txrx_ops)738 static inline void dp_monitor_vdev_register_osif(struct dp_vdev *vdev, 739 struct ol_txrx_ops *txrx_ops) 740 { 741 } 742 dp_monitor_is_vdev_timer_running(struct dp_soc * soc)743 static inline bool dp_monitor_is_vdev_timer_running(struct dp_soc *soc) 744 { 745 return false; 746 } 747 748 static inline dp_monitor_pdev_set_mon_vdev(struct dp_vdev * vdev)749 void dp_monitor_pdev_set_mon_vdev(struct dp_vdev *vdev) 750 { 751 } 752 dp_monitor_vdev_delete(struct dp_soc * soc,struct dp_vdev * vdev)753 static inline void dp_monitor_vdev_delete(struct dp_soc *soc, 754 struct dp_vdev *vdev) 755 { 756 } 757 dp_peer_ppdu_delayed_ba_init(struct dp_peer * peer)758 static inline void dp_peer_ppdu_delayed_ba_init(struct dp_peer *peer) 759 { 760 } 761 dp_monitor_neighbour_peer_add_ast(struct dp_pdev * pdev,struct dp_peer * ta_peer,uint8_t * mac_addr,qdf_nbuf_t nbuf,uint32_t flags)762 static inline void dp_monitor_neighbour_peer_add_ast(struct dp_pdev *pdev, 763 struct dp_peer *ta_peer, 764 uint8_t *mac_addr, 765 qdf_nbuf_t nbuf, 766 uint32_t flags) 767 { 768 } 769 770 static inline void dp_monitor_set_chan_band(struct dp_pdev * pdev,enum reg_wifi_band chan_band)771 dp_monitor_set_chan_band(struct dp_pdev *pdev, enum reg_wifi_band chan_band) 772 { 773 } 774 775 static inline void dp_monitor_set_chan_freq(struct dp_pdev * pdev,qdf_freq_t chan_freq)776 dp_monitor_set_chan_freq(struct dp_pdev *pdev, qdf_freq_t chan_freq) 777 { 778 } 779 dp_monitor_set_chan_num(struct dp_pdev * pdev,int chan_num)780 static inline void dp_monitor_set_chan_num(struct dp_pdev *pdev, int chan_num) 781 { 782 } 783 dp_monitor_is_enable_mcopy_mode(struct dp_pdev * pdev)784 static inline bool dp_monitor_is_enable_mcopy_mode(struct dp_pdev *pdev) 785 { 786 return false; 787 } 788 789 static inline dp_monitor_neighbour_peer_list_remove(struct dp_pdev * pdev,struct dp_vdev * vdev,struct dp_neighbour_peer * peer)790 void dp_monitor_neighbour_peer_list_remove(struct dp_pdev *pdev, 791 struct dp_vdev *vdev, 792 struct dp_neighbour_peer *peer) 793 { 794 } 795 dp_monitor_is_chan_band_known(struct dp_pdev * pdev)796 static inline bool dp_monitor_is_chan_band_known(struct dp_pdev *pdev) 797 { 798 return false; 799 } 800 801 static inline enum reg_wifi_band dp_monitor_get_chan_band(struct dp_pdev * pdev)802 dp_monitor_get_chan_band(struct dp_pdev *pdev) 803 { 804 return 0; 805 } 806 807 static inline int dp_monitor_get_chan_num(struct dp_pdev * pdev)808 dp_monitor_get_chan_num(struct dp_pdev *pdev) 809 { 810 return 0; 811 } 812 813 static inline qdf_freq_t dp_monitor_get_chan_freq(struct dp_pdev * pdev)814 dp_monitor_get_chan_freq(struct dp_pdev *pdev) 815 { 816 return 0; 817 } 818 dp_monitor_get_mpdu_status(struct dp_pdev * pdev,struct dp_soc * soc,uint8_t * rx_tlv_hdr)819 static inline void dp_monitor_get_mpdu_status(struct dp_pdev *pdev, 820 struct dp_soc *soc, 821 uint8_t *rx_tlv_hdr) 822 { 823 } 824 dp_monitor_print_tx_stats(struct dp_pdev * pdev)825 static inline void dp_monitor_print_tx_stats(struct dp_pdev *pdev) 826 { 827 } 828 829 static inline dp_monitor_mcopy_check_deliver(struct dp_pdev * pdev,uint16_t peer_id,uint32_t ppdu_id,uint8_t first_msdu)830 QDF_STATUS dp_monitor_mcopy_check_deliver(struct dp_pdev *pdev, 831 uint16_t peer_id, uint32_t ppdu_id, 832 uint8_t first_msdu) 833 { 834 return QDF_STATUS_SUCCESS; 835 } 836 dp_monitor_is_enable_tx_sniffer(struct dp_pdev * pdev)837 static inline bool dp_monitor_is_enable_tx_sniffer(struct dp_pdev *pdev) 838 { 839 return false; 840 } 841 842 static inline struct dp_vdev* dp_monitor_get_monitor_vdev_from_pdev(struct dp_pdev * pdev)843 dp_monitor_get_monitor_vdev_from_pdev(struct dp_pdev *pdev) 844 { 845 return NULL; 846 } 847 dp_monitor_check_com_info_ppdu_id(struct dp_pdev * pdev,void * rx_desc)848 static inline QDF_STATUS dp_monitor_check_com_info_ppdu_id(struct dp_pdev *pdev, 849 void *rx_desc) 850 { 851 return QDF_STATUS_E_FAILURE; 852 } 853 854 static inline struct mon_rx_status* dp_monitor_get_rx_status(struct dp_pdev * pdev)855 dp_monitor_get_rx_status(struct dp_pdev *pdev) 856 { 857 return NULL; 858 } 859 860 static inline dp_monitor_pdev_config_scan_spcl_vap(struct dp_pdev * pdev,bool val)861 void dp_monitor_pdev_config_scan_spcl_vap(struct dp_pdev *pdev, bool val) 862 { 863 } 864 865 static inline dp_monitor_pdev_reset_scan_spcl_vap_stats_enable(struct dp_pdev * pdev,bool val)866 void dp_monitor_pdev_reset_scan_spcl_vap_stats_enable(struct dp_pdev *pdev, 867 bool val) 868 { 869 } 870 871 static inline QDF_STATUS dp_monitor_peer_tx_capture_get_stats(struct dp_soc * soc,struct dp_peer * peer,struct cdp_peer_tx_capture_stats * stats)872 dp_monitor_peer_tx_capture_get_stats(struct dp_soc *soc, struct dp_peer *peer, 873 struct cdp_peer_tx_capture_stats *stats) 874 { 875 return QDF_STATUS_E_FAILURE; 876 } 877 878 static inline QDF_STATUS dp_monitor_pdev_tx_capture_get_stats(struct dp_soc * soc,struct dp_pdev * pdev,struct cdp_pdev_tx_capture_stats * stats)879 dp_monitor_pdev_tx_capture_get_stats(struct dp_soc *soc, struct dp_pdev *pdev, 880 struct cdp_pdev_tx_capture_stats *stats) 881 { 882 return QDF_STATUS_E_FAILURE; 883 } 884 885 #ifdef DP_POWER_SAVE 886 static inline dp_monitor_pktlog_reap_pending_frames(struct dp_pdev * pdev)887 void dp_monitor_pktlog_reap_pending_frames(struct dp_pdev *pdev) 888 { 889 } 890 891 static inline dp_monitor_pktlog_start_reap_timer(struct dp_pdev * pdev)892 void dp_monitor_pktlog_start_reap_timer(struct dp_pdev *pdev) 893 { 894 } 895 #endif 896 dp_monitor_is_configured(struct dp_pdev * pdev)897 static inline bool dp_monitor_is_configured(struct dp_pdev *pdev) 898 { 899 return false; 900 } 901 902 static inline void dp_mon_rx_hdr_length_set(struct dp_soc * soc,uint32_t * msg_word,struct htt_rx_ring_tlv_filter * tlv_filter)903 dp_mon_rx_hdr_length_set(struct dp_soc *soc, uint32_t *msg_word, 904 struct htt_rx_ring_tlv_filter *tlv_filter) 905 { 906 } 907 dp_monitor_soc_init(struct dp_soc * soc)908 static inline void dp_monitor_soc_init(struct dp_soc *soc) 909 { 910 } 911 dp_monitor_soc_deinit(struct dp_soc * soc)912 static inline void dp_monitor_soc_deinit(struct dp_soc *soc) 913 { 914 } 915 916 static inline dp_monitor_config_undecoded_metadata_capture(struct dp_pdev * pdev,int val)917 QDF_STATUS dp_monitor_config_undecoded_metadata_capture(struct dp_pdev *pdev, 918 int val) 919 { 920 return QDF_STATUS_SUCCESS; 921 } 922 923 static inline QDF_STATUS dp_monitor_config_undecoded_metadata_phyrx_error_mask(struct dp_pdev * pdev,int mask1,int mask2)924 dp_monitor_config_undecoded_metadata_phyrx_error_mask(struct dp_pdev *pdev, 925 int mask1, int mask2) 926 { 927 return QDF_STATUS_SUCCESS; 928 } 929 930 static inline QDF_STATUS dp_monitor_get_undecoded_metadata_phyrx_error_mask(struct dp_pdev * pdev,int * mask,int * mask_cont)931 dp_monitor_get_undecoded_metadata_phyrx_error_mask(struct dp_pdev *pdev, 932 int *mask, int *mask_cont) 933 { 934 return QDF_STATUS_SUCCESS; 935 } 936 dp_monitor_soc_htt_srng_setup(struct dp_soc * soc)937 static inline QDF_STATUS dp_monitor_soc_htt_srng_setup(struct dp_soc *soc) 938 { 939 return QDF_STATUS_E_FAILURE; 940 } 941 dp_is_monitor_mode_using_poll(struct dp_soc * soc)942 static inline bool dp_is_monitor_mode_using_poll(struct dp_soc *soc) 943 { 944 return false; 945 } 946 947 static inline dp_tx_mon_buf_refill(struct dp_intr * int_ctx)948 uint32_t dp_tx_mon_buf_refill(struct dp_intr *int_ctx) 949 { 950 return 0; 951 } 952 953 static inline uint32_t dp_tx_mon_process(struct dp_soc * soc,struct dp_intr * int_ctx,uint32_t mac_id,uint32_t quota)954 dp_tx_mon_process(struct dp_soc *soc, struct dp_intr *int_ctx, 955 uint32_t mac_id, uint32_t quota) 956 { 957 return 0; 958 } 959 960 static inline uint32_t dp_print_txmon_ring_stat_from_hal(struct dp_pdev * pdev)961 dp_print_txmon_ring_stat_from_hal(struct dp_pdev *pdev) 962 { 963 return 0; 964 } 965 966 static inline dp_rx_mon_buf_refill(struct dp_intr * int_ctx)967 uint32_t dp_rx_mon_buf_refill(struct dp_intr *int_ctx) 968 { 969 return 0; 970 } 971 dp_monitor_is_tx_cap_enabled(struct dp_peer * peer)972 static inline bool dp_monitor_is_tx_cap_enabled(struct dp_peer *peer) 973 { 974 return 0; 975 } 976 dp_monitor_is_rx_cap_enabled(struct dp_peer * peer)977 static inline bool dp_monitor_is_rx_cap_enabled(struct dp_peer *peer) 978 { 979 return 0; 980 } 981 982 static inline void dp_rx_mon_enable(struct dp_soc * soc,uint32_t * msg_word,struct htt_rx_ring_tlv_filter * tlv_filter)983 dp_rx_mon_enable(struct dp_soc *soc, uint32_t *msg_word, 984 struct htt_rx_ring_tlv_filter *tlv_filter) 985 { 986 } 987 988 static inline void dp_mon_rx_packet_length_set(struct dp_soc * soc,uint32_t * msg_word,struct htt_rx_ring_tlv_filter * tlv_filter)989 dp_mon_rx_packet_length_set(struct dp_soc *soc, uint32_t *msg_word, 990 struct htt_rx_ring_tlv_filter *tlv_filter) 991 { 992 } 993 994 static inline void dp_mon_rx_enable_mpdu_logging(struct dp_soc * soc,uint32_t * msg_word,struct htt_rx_ring_tlv_filter * tlv_filter)995 dp_mon_rx_enable_mpdu_logging(struct dp_soc *soc, uint32_t *msg_word, 996 struct htt_rx_ring_tlv_filter *tlv_filter) 997 { 998 } 999 1000 static inline void dp_mon_rx_wmask_subscribe(struct dp_soc * soc,uint32_t * msg_word,int pdev_id,struct htt_rx_ring_tlv_filter * tlv_filter)1001 dp_mon_rx_wmask_subscribe(struct dp_soc *soc, 1002 uint32_t *msg_word, int pdev_id, 1003 struct htt_rx_ring_tlv_filter *tlv_filter) 1004 { 1005 } 1006 1007 static inline void dp_mon_rx_mac_filter_set(struct dp_soc * soc,uint32_t * msg_word,struct htt_rx_ring_tlv_filter * tlv_filter)1008 dp_mon_rx_mac_filter_set(struct dp_soc *soc, uint32_t *msg_word, 1009 struct htt_rx_ring_tlv_filter *tlv_filter) 1010 { 1011 } 1012 1013 static inline void dp_mon_rx_enable_pkt_tlv_offset(struct dp_soc * soc,uint32_t * msg_word,struct htt_rx_ring_tlv_filter * tlv_filter)1014 dp_mon_rx_enable_pkt_tlv_offset(struct dp_soc *soc, uint32_t *msg_word, 1015 struct htt_rx_ring_tlv_filter *tlv_filter) 1016 { 1017 } 1018 1019 static inline void dp_mon_rx_enable_fpmo(struct dp_soc * soc,uint32_t * msg_word,struct htt_rx_ring_tlv_filter * tlv_filter)1020 dp_mon_rx_enable_fpmo(struct dp_soc *soc, uint32_t *msg_word, 1021 struct htt_rx_ring_tlv_filter *tlv_filter) 1022 { 1023 } 1024 1025 #ifdef WLAN_CONFIG_TELEMETRY_AGENT 1026 static inline dp_monitor_peer_telemetry_stats(struct dp_peer * peer,struct cdp_peer_telemetry_stats * stats)1027 void dp_monitor_peer_telemetry_stats(struct dp_peer *peer, 1028 struct cdp_peer_telemetry_stats *stats) 1029 { 1030 } 1031 1032 static inline dp_monitor_peer_deter_stats(struct dp_peer * peer,struct cdp_peer_telemetry_stats * stats)1033 void dp_monitor_peer_deter_stats(struct dp_peer *peer, 1034 struct cdp_peer_telemetry_stats *stats) 1035 { 1036 } 1037 #endif /* WLAN_CONFIG_TELEMETRY_AGENT */ 1038 #endif /* !WIFI_MONITOR_SUPPORT */ 1039 1040 /** 1041 * cdp_soc_t_to_dp_soc() - typecast cdp_soc_t to 1042 * dp soc handle 1043 * @psoc: CDP psoc handle 1044 * 1045 * Return: struct dp_soc pointer 1046 */ 1047 static inline cdp_soc_t_to_dp_soc(struct cdp_soc_t * psoc)1048 struct dp_soc *cdp_soc_t_to_dp_soc(struct cdp_soc_t *psoc) 1049 { 1050 return (struct dp_soc *)psoc; 1051 } 1052 1053 #define DP_MAX_TIMER_EXEC_TIME_TICKS \ 1054 (QDF_LOG_TIMESTAMP_CYCLES_PER_10_US * 100 * 20) 1055 1056 /** 1057 * enum timer_yield_status - yield status code used in monitor mode timer. 1058 * @DP_TIMER_NO_YIELD: do not yield 1059 * @DP_TIMER_WORK_DONE: yield because work is done 1060 * @DP_TIMER_WORK_EXHAUST: yield because work quota is exhausted 1061 * @DP_TIMER_TIME_EXHAUST: yield due to time slot exhausted 1062 */ 1063 enum timer_yield_status { 1064 DP_TIMER_NO_YIELD, 1065 DP_TIMER_WORK_DONE, 1066 DP_TIMER_WORK_EXHAUST, 1067 DP_TIMER_TIME_EXHAUST, 1068 }; 1069 1070 #if DP_PRINT_ENABLE 1071 #include <qdf_types.h> /* qdf_vprint */ 1072 #include <cdp_txrx_handle.h> 1073 1074 enum { 1075 /* FATAL_ERR - print only irrecoverable error messages */ 1076 DP_PRINT_LEVEL_FATAL_ERR, 1077 1078 /* ERR - include non-fatal err messages */ 1079 DP_PRINT_LEVEL_ERR, 1080 1081 /* WARN - include warnings */ 1082 DP_PRINT_LEVEL_WARN, 1083 1084 /* INFO1 - include fundamental, infrequent events */ 1085 DP_PRINT_LEVEL_INFO1, 1086 1087 /* INFO2 - include non-fundamental but infrequent events */ 1088 DP_PRINT_LEVEL_INFO2, 1089 }; 1090 1091 #define dp_print(level, fmt, ...) do { \ 1092 if (level <= g_txrx_print_level) \ 1093 qdf_print(fmt, ## __VA_ARGS__); \ 1094 while (0) 1095 #define DP_PRINT(level, fmt, ...) do { \ 1096 dp_print(level, "DP: " fmt, ## __VA_ARGS__); \ 1097 while (0) 1098 #else 1099 #define DP_PRINT(level, fmt, ...) 1100 #endif /* DP_PRINT_ENABLE */ 1101 1102 #define DP_TRACE(LVL, fmt, args ...) \ 1103 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_##LVL, \ 1104 fmt, ## args) 1105 1106 #ifdef WLAN_SYSFS_DP_STATS 1107 void DP_PRINT_STATS(const char *fmt, ...); 1108 #else /* WLAN_SYSFS_DP_STATS */ 1109 #ifdef DP_PRINT_NO_CONSOLE 1110 /* Stat prints should not go to console or kernel logs.*/ 1111 #define DP_PRINT_STATS(fmt, args ...)\ 1112 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH, \ 1113 fmt, ## args) 1114 #else 1115 #define DP_PRINT_STATS(fmt, args ...)\ 1116 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,\ 1117 fmt, ## args) 1118 #endif 1119 #endif /* WLAN_SYSFS_DP_STATS */ 1120 1121 #define DP_STATS_INIT(_handle) \ 1122 qdf_mem_zero(&((_handle)->stats), sizeof((_handle)->stats)) 1123 1124 #define DP_TXRX_PEER_STATS_INIT(_handle, size) \ 1125 qdf_mem_zero(&((_handle)->stats[0]), size) 1126 1127 #define DP_STATS_CLR(_handle) \ 1128 qdf_mem_zero(&((_handle)->stats), sizeof((_handle)->stats)) 1129 1130 #define DP_TXRX_PEER_STATS_CLR(_handle, size) \ 1131 qdf_mem_zero(&((_handle)->stats[0]), size) 1132 1133 #ifndef DISABLE_DP_STATS 1134 #define DP_STATS_INC(_handle, _field, _delta) \ 1135 { \ 1136 if (likely(_handle)) \ 1137 _handle->stats._field += _delta; \ 1138 } 1139 1140 #define DP_PEER_LINK_STATS_INC(_handle, _field, _delta, _link) \ 1141 { \ 1142 if (likely(_handle)) \ 1143 _handle->stats[_link]._field += _delta; \ 1144 } 1145 1146 #define DP_PEER_STATS_FLAT_INC(_handle, _field, _delta) \ 1147 { \ 1148 if (likely(_handle)) \ 1149 _handle->_field += _delta; \ 1150 } 1151 1152 #define DP_STATS_INCC(_handle, _field, _delta, _cond) \ 1153 { \ 1154 if (_cond && likely(_handle)) \ 1155 _handle->stats._field += _delta; \ 1156 } 1157 1158 #define DP_PEER_LINK_STATS_INCC(_handle, _field, _delta, _cond, _link) \ 1159 { \ 1160 if (_cond && likely(_handle)) \ 1161 _handle->stats[_link]._field += _delta; \ 1162 } 1163 1164 #define DP_STATS_DEC(_handle, _field, _delta) \ 1165 { \ 1166 if (likely(_handle)) \ 1167 _handle->stats._field -= _delta; \ 1168 } 1169 1170 #define DP_PEER_STATS_FLAT_DEC(_handle, _field, _delta) \ 1171 { \ 1172 if (likely(_handle)) \ 1173 _handle->_field -= _delta; \ 1174 } 1175 1176 #define DP_STATS_UPD(_handle, _field, _delta) \ 1177 { \ 1178 if (likely(_handle)) \ 1179 _handle->stats._field = _delta; \ 1180 } 1181 1182 #define DP_PEER_LINK_STATS_UPD(_handle, _field, _delta, _link) \ 1183 { \ 1184 if (likely(_handle)) \ 1185 _handle->stats[_link]._field = _delta; \ 1186 } 1187 1188 #define DP_STATS_INC_PKT(_handle, _field, _count, _bytes) \ 1189 { \ 1190 DP_STATS_INC(_handle, _field.num, _count); \ 1191 DP_STATS_INC(_handle, _field.bytes, _bytes) \ 1192 } 1193 1194 #define DP_PEER_STATS_FLAT_INC_PKT(_handle, _field, _count, _bytes) \ 1195 { \ 1196 DP_PEER_STATS_FLAT_INC(_handle, _field.num, _count); \ 1197 DP_PEER_STATS_FLAT_INC(_handle, _field.bytes, _bytes) \ 1198 } 1199 1200 #define DP_STATS_INCC_PKT(_handle, _field, _count, _bytes, _cond) \ 1201 { \ 1202 DP_STATS_INCC(_handle, _field.num, _count, _cond); \ 1203 DP_STATS_INCC(_handle, _field.bytes, _bytes, _cond) \ 1204 } 1205 1206 #define DP_STATS_AGGR(_handle_a, _handle_b, _field) \ 1207 { \ 1208 _handle_a->stats._field += _handle_b->stats._field; \ 1209 } 1210 1211 #define DP_STATS_AGGR_PKT(_handle_a, _handle_b, _field) \ 1212 { \ 1213 DP_STATS_AGGR(_handle_a, _handle_b, _field.num); \ 1214 DP_STATS_AGGR(_handle_a, _handle_b, _field.bytes);\ 1215 } 1216 1217 #define DP_STATS_AGGR_IDX(_handle_a, _handle_b, _arr, _field, _idx) \ 1218 { \ 1219 _handle_a->stats._arr._field += _handle_b->stats._arr[_idx]._field; \ 1220 } 1221 1222 #define DP_STATS_AGGR_PKT_IDX(_handle_a, _handle_b, _arr, _field, _idx)\ 1223 { \ 1224 DP_STATS_AGGR_IDX(_handle_a, _handle_b, _arr, _field.num, _idx); \ 1225 DP_STATS_AGGR_IDX(_handle_a, _handle_b, _arr, _field.bytes, _idx);\ 1226 } 1227 1228 #define DP_STATS_UPD_STRUCT(_handle_a, _handle_b, _field) \ 1229 { \ 1230 _handle_a->stats._field = _handle_b->stats._field; \ 1231 } 1232 1233 #else 1234 #define DP_STATS_INC(_handle, _field, _delta) 1235 #define DP_PEER_LINK_STATS_INC(_handle, _field, _delta, _link) 1236 #define DP_PEER_STATS_FLAT_INC(_handle, _field, _delta) 1237 #define DP_STATS_INCC(_handle, _field, _delta, _cond) 1238 #define DP_PEER_LINK_STATS_INCC(_handle, _field, _delta, _cond, _link) 1239 #define DP_STATS_DEC(_handle, _field, _delta) 1240 #define DP_PEER_STATS_FLAT_DEC(_handle, _field, _delta) 1241 #define DP_STATS_UPD(_handle, _field, _delta) 1242 #define DP_PEER_LINK_STATS_UPD(_handle, _field, _delta, _link) 1243 #define DP_STATS_INC_PKT(_handle, _field, _count, _bytes) 1244 #define DP_PEER_STATS_FLAT_INC_PKT(_handle, _field, _count, _bytes) 1245 #define DP_STATS_INCC_PKT(_handle, _field, _count, _bytes, _cond) 1246 #define DP_STATS_AGGR(_handle_a, _handle_b, _field) 1247 #define DP_STATS_AGGR_PKT(_handle_a, _handle_b, _field) 1248 #define DP_STATS_AGGR_IDX(_handle_a, _handle_b, _arr, _field, _idx) 1249 #define DP_STATS_AGGR_PKT_IDX(_handle_a, _handle_b, _arr, _field, _idx) 1250 #endif 1251 1252 #define DP_PEER_PER_PKT_STATS_INC(_handle, _field, _delta, _link) \ 1253 { \ 1254 DP_PEER_LINK_STATS_INC(_handle, per_pkt_stats._field, _delta, _link); \ 1255 } 1256 1257 #define DP_PEER_PER_PKT_STATS_INCC(_handle, _field, _delta, _cond, _link) \ 1258 { \ 1259 DP_PEER_LINK_STATS_INCC(_handle, per_pkt_stats._field, _delta, _cond, _link); \ 1260 } 1261 1262 #define DP_PEER_PER_PKT_STATS_INC_PKT(_handle, _field, _count, _bytes, _link) \ 1263 { \ 1264 DP_PEER_PER_PKT_STATS_INC(_handle, _field.num, _count, _link); \ 1265 DP_PEER_PER_PKT_STATS_INC(_handle, _field.bytes, _bytes, _link) \ 1266 } 1267 1268 #define DP_PEER_PER_PKT_STATS_INCC_PKT(_handle, _field, _count, _bytes, _cond, _link) \ 1269 { \ 1270 DP_PEER_PER_PKT_STATS_INCC(_handle, _field.num, _count, _cond, _link); \ 1271 DP_PEER_PER_PKT_STATS_INCC(_handle, _field.bytes, _bytes, _cond, _link) \ 1272 } 1273 1274 #define DP_PEER_PER_PKT_STATS_UPD(_handle, _field, _delta, _link) \ 1275 { \ 1276 DP_PEER_LINK_STATS_UPD(_handle, per_pkt_stats._field, _delta, _link); \ 1277 } 1278 1279 #ifndef QCA_ENHANCED_STATS_SUPPORT 1280 #define DP_PEER_EXTD_STATS_INC(_handle, _field, _delta, _link) \ 1281 { \ 1282 DP_PEER_LINK_STATS_INC(_handle, extd_stats._field, _delta, _link); \ 1283 } 1284 1285 #define DP_PEER_EXTD_STATS_INCC(_handle, _field, _delta, _cond, _link) \ 1286 { \ 1287 DP_PEER_LINK_STATS_INCC(_handle, extd_stats._field, _delta, _cond, _link); \ 1288 } 1289 1290 #define DP_PEER_EXTD_STATS_UPD(_handle, _field, _delta, _link) \ 1291 { \ 1292 DP_PEER_LINK_STATS_UPD(_handle, extd_stats._field, _delta, _link); \ 1293 } 1294 #endif 1295 1296 #if defined(QCA_VDEV_STATS_HW_OFFLOAD_SUPPORT) && \ 1297 defined(QCA_ENHANCED_STATS_SUPPORT) 1298 #define DP_PEER_TO_STACK_INCC_PKT(_handle, _count, _bytes, _cond) \ 1299 { \ 1300 if (_cond || !(_handle->hw_txrx_stats_en)) \ 1301 DP_PEER_STATS_FLAT_INC_PKT(_handle, to_stack, _count, _bytes); \ 1302 } 1303 1304 #define DP_PEER_TO_STACK_DECC(_handle, _count, _cond) \ 1305 { \ 1306 if (_cond || !(_handle->hw_txrx_stats_en)) \ 1307 DP_PEER_STATS_FLAT_DEC(_handle, to_stack.num, _count); \ 1308 } 1309 1310 #define DP_PEER_MC_INCC_PKT(_handle, _count, _bytes, _cond, _link) \ 1311 { \ 1312 if (_cond || !(_handle->hw_txrx_stats_en)) \ 1313 DP_PEER_PER_PKT_STATS_INC_PKT(_handle, rx.multicast, _count, _bytes, _link); \ 1314 } 1315 1316 #define DP_PEER_BC_INCC_PKT(_handle, _count, _bytes, _cond, _link) \ 1317 { \ 1318 if (_cond || !(_handle->hw_txrx_stats_en)) \ 1319 DP_PEER_PER_PKT_STATS_INC_PKT(_handle, rx.bcast, _count, _bytes, _link); \ 1320 } 1321 1322 #define DP_PEER_UC_INCC_PKT(_handle, _count, _bytes, _cond, _link) \ 1323 { \ 1324 if (_cond || !(_handle->hw_txrx_stats_en)) \ 1325 DP_PEER_PER_PKT_STATS_INC_PKT(_handle, rx.unicast, _count, _bytes, _link); \ 1326 } 1327 #elif defined(QCA_VDEV_STATS_HW_OFFLOAD_SUPPORT) 1328 #define DP_PEER_TO_STACK_INCC_PKT(_handle, _count, _bytes, _cond) \ 1329 { \ 1330 if (!(_handle->hw_txrx_stats_en)) \ 1331 DP_PEER_STATS_FLAT_INC_PKT(_handle, to_stack, _count, _bytes); \ 1332 } 1333 1334 #define DP_PEER_TO_STACK_DECC(_handle, _count, _cond) \ 1335 { \ 1336 if (!(_handle->hw_txrx_stats_en)) \ 1337 DP_PEER_STATS_FLAT_DEC(_handle, to_stack.num, _count); \ 1338 } 1339 1340 #define DP_PEER_MC_INCC_PKT(_handle, _count, _bytes, _cond, _link) \ 1341 { \ 1342 if (!(_handle->hw_txrx_stats_en)) \ 1343 DP_PEER_PER_PKT_STATS_INC_PKT(_handle, rx.multicast, _count, _bytes, _link); \ 1344 } 1345 1346 #define DP_PEER_BC_INCC_PKT(_handle, _count, _bytes, _cond, _link) \ 1347 { \ 1348 if (!(_handle->hw_txrx_stats_en)) \ 1349 DP_PEER_PER_PKT_STATS_INC_PKT(_handle, rx.bcast, _count, _bytes, _link); \ 1350 } 1351 1352 #define DP_PEER_UC_INCC_PKT(_handle, _count, _bytes, _cond, _link) \ 1353 { \ 1354 if (!(_handle->hw_txrx_stats_en)) \ 1355 DP_PEER_PER_PKT_STATS_INC_PKT(_handle, rx.unicast, _count, _bytes, _link); \ 1356 } 1357 #else 1358 #define DP_PEER_TO_STACK_INCC_PKT(_handle, _count, _bytes, _cond) \ 1359 DP_PEER_STATS_FLAT_INC_PKT(_handle, to_stack, _count, _bytes); 1360 1361 #define DP_PEER_TO_STACK_DECC(_handle, _count, _cond) \ 1362 DP_PEER_STATS_FLAT_DEC(_handle, to_stack.num, _count); 1363 1364 #define DP_PEER_MC_INCC_PKT(_handle, _count, _bytes, _cond, _link) \ 1365 DP_PEER_PER_PKT_STATS_INC_PKT(_handle, rx.multicast, _count, _bytes, _link); 1366 1367 #define DP_PEER_BC_INCC_PKT(_handle, _count, _bytes, _cond, _link) \ 1368 DP_PEER_PER_PKT_STATS_INC_PKT(_handle, rx.bcast, _count, _bytes, _link); 1369 1370 #define DP_PEER_UC_INCC_PKT(_handle, _count, _bytes, _cond, _link) \ 1371 DP_PEER_PER_PKT_STATS_INC_PKT(_handle, rx.unicast, _count, _bytes, _link); 1372 #endif 1373 1374 #ifdef ENABLE_DP_HIST_STATS 1375 #define DP_HIST_INIT() \ 1376 uint32_t num_of_packets[MAX_PDEV_CNT] = {0}; 1377 1378 #define DP_HIST_PACKET_COUNT_INC(_pdev_id) \ 1379 { \ 1380 ++num_of_packets[_pdev_id]; \ 1381 } 1382 1383 #define DP_TX_HISTOGRAM_UPDATE(_pdev, _p_cntrs) \ 1384 do { \ 1385 if (_p_cntrs == 1) { \ 1386 DP_STATS_INC(_pdev, \ 1387 tx_comp_histogram.pkts_1, 1); \ 1388 } else if (_p_cntrs > 1 && _p_cntrs <= 20) { \ 1389 DP_STATS_INC(_pdev, \ 1390 tx_comp_histogram.pkts_2_20, 1); \ 1391 } else if (_p_cntrs > 20 && _p_cntrs <= 40) { \ 1392 DP_STATS_INC(_pdev, \ 1393 tx_comp_histogram.pkts_21_40, 1); \ 1394 } else if (_p_cntrs > 40 && _p_cntrs <= 60) { \ 1395 DP_STATS_INC(_pdev, \ 1396 tx_comp_histogram.pkts_41_60, 1); \ 1397 } else if (_p_cntrs > 60 && _p_cntrs <= 80) { \ 1398 DP_STATS_INC(_pdev, \ 1399 tx_comp_histogram.pkts_61_80, 1); \ 1400 } else if (_p_cntrs > 80 && _p_cntrs <= 100) { \ 1401 DP_STATS_INC(_pdev, \ 1402 tx_comp_histogram.pkts_81_100, 1); \ 1403 } else if (_p_cntrs > 100 && _p_cntrs <= 200) { \ 1404 DP_STATS_INC(_pdev, \ 1405 tx_comp_histogram.pkts_101_200, 1); \ 1406 } else if (_p_cntrs > 200) { \ 1407 DP_STATS_INC(_pdev, \ 1408 tx_comp_histogram.pkts_201_plus, 1); \ 1409 } \ 1410 } while (0) 1411 1412 #define DP_RX_HISTOGRAM_UPDATE(_pdev, _p_cntrs) \ 1413 do { \ 1414 if (_p_cntrs == 1) { \ 1415 DP_STATS_INC(_pdev, \ 1416 rx_ind_histogram.pkts_1, 1); \ 1417 } else if (_p_cntrs > 1 && _p_cntrs <= 20) { \ 1418 DP_STATS_INC(_pdev, \ 1419 rx_ind_histogram.pkts_2_20, 1); \ 1420 } else if (_p_cntrs > 20 && _p_cntrs <= 40) { \ 1421 DP_STATS_INC(_pdev, \ 1422 rx_ind_histogram.pkts_21_40, 1); \ 1423 } else if (_p_cntrs > 40 && _p_cntrs <= 60) { \ 1424 DP_STATS_INC(_pdev, \ 1425 rx_ind_histogram.pkts_41_60, 1); \ 1426 } else if (_p_cntrs > 60 && _p_cntrs <= 80) { \ 1427 DP_STATS_INC(_pdev, \ 1428 rx_ind_histogram.pkts_61_80, 1); \ 1429 } else if (_p_cntrs > 80 && _p_cntrs <= 100) { \ 1430 DP_STATS_INC(_pdev, \ 1431 rx_ind_histogram.pkts_81_100, 1); \ 1432 } else if (_p_cntrs > 100 && _p_cntrs <= 200) { \ 1433 DP_STATS_INC(_pdev, \ 1434 rx_ind_histogram.pkts_101_200, 1); \ 1435 } else if (_p_cntrs > 200) { \ 1436 DP_STATS_INC(_pdev, \ 1437 rx_ind_histogram.pkts_201_plus, 1); \ 1438 } \ 1439 } while (0) 1440 1441 #define DP_TX_HIST_STATS_PER_PDEV() \ 1442 do { \ 1443 uint8_t hist_stats = 0; \ 1444 for (hist_stats = 0; hist_stats < soc->pdev_count; \ 1445 hist_stats++) { \ 1446 DP_TX_HISTOGRAM_UPDATE(soc->pdev_list[hist_stats], \ 1447 num_of_packets[hist_stats]); \ 1448 } \ 1449 } while (0) 1450 1451 1452 #define DP_RX_HIST_STATS_PER_PDEV() \ 1453 do { \ 1454 uint8_t hist_stats = 0; \ 1455 for (hist_stats = 0; hist_stats < soc->pdev_count; \ 1456 hist_stats++) { \ 1457 DP_RX_HISTOGRAM_UPDATE(soc->pdev_list[hist_stats], \ 1458 num_of_packets[hist_stats]); \ 1459 } \ 1460 } while (0) 1461 1462 #else 1463 #define DP_HIST_INIT() 1464 #define DP_HIST_PACKET_COUNT_INC(_pdev_id) 1465 #define DP_TX_HISTOGRAM_UPDATE(_pdev, _p_cntrs) 1466 #define DP_RX_HISTOGRAM_UPDATE(_pdev, _p_cntrs) 1467 #define DP_RX_HIST_STATS_PER_PDEV() 1468 #define DP_TX_HIST_STATS_PER_PDEV() 1469 #endif /* DISABLE_DP_STATS */ 1470 1471 #define FRAME_MASK_IPV4_ARP 0x1 1472 #define FRAME_MASK_IPV4_DHCP 0x2 1473 #define FRAME_MASK_IPV4_EAPOL 0x4 1474 #define FRAME_MASK_IPV6_DHCP 0x8 1475 #define FRAME_MASK_DNS_QUERY 0x10 1476 #define FRAME_MASK_DNS_RESP 0x20 1477 dp_log2_ceil(unsigned int value)1478 static inline int dp_log2_ceil(unsigned int value) 1479 { 1480 unsigned int tmp = value; 1481 int log2 = -1; 1482 1483 if (qdf_unlikely(value == 0)) 1484 return 0; 1485 while (tmp) { 1486 log2++; 1487 tmp >>= 1; 1488 } 1489 if (1 << log2 != value) 1490 log2++; 1491 return log2; 1492 } 1493 1494 #ifdef QCA_SUPPORT_PEER_ISOLATION 1495 #define dp_get_peer_isolation(_peer) ((_peer)->isolation) 1496 dp_set_peer_isolation(struct dp_txrx_peer * txrx_peer,bool val)1497 static inline void dp_set_peer_isolation(struct dp_txrx_peer *txrx_peer, 1498 bool val) 1499 { 1500 txrx_peer->isolation = val; 1501 } 1502 1503 #else 1504 #define dp_get_peer_isolation(_peer) (0) 1505 dp_set_peer_isolation(struct dp_txrx_peer * peer,bool val)1506 static inline void dp_set_peer_isolation(struct dp_txrx_peer *peer, bool val) 1507 { 1508 } 1509 #endif /* QCA_SUPPORT_PEER_ISOLATION */ 1510 1511 bool dp_vdev_is_wds_ext_enabled(struct dp_vdev *vdev); 1512 1513 #ifdef QCA_SUPPORT_WDS_EXTENDED dp_wds_ext_peer_init(struct dp_txrx_peer * txrx_peer)1514 static inline void dp_wds_ext_peer_init(struct dp_txrx_peer *txrx_peer) 1515 { 1516 txrx_peer->wds_ext.osif_peer = NULL; 1517 txrx_peer->wds_ext.init = 0; 1518 } 1519 #else dp_wds_ext_peer_init(struct dp_txrx_peer * txrx_peer)1520 static inline void dp_wds_ext_peer_init(struct dp_txrx_peer *txrx_peer) 1521 { 1522 } 1523 #endif /* QCA_SUPPORT_WDS_EXTENDED */ 1524 1525 #ifdef QCA_HOST2FW_RXBUF_RING 1526 static inline dp_get_rxdma_ring(struct dp_pdev * pdev,int lmac_id)1527 struct dp_srng *dp_get_rxdma_ring(struct dp_pdev *pdev, int lmac_id) 1528 { 1529 return &pdev->rx_mac_buf_ring[lmac_id]; 1530 } 1531 #else 1532 static inline dp_get_rxdma_ring(struct dp_pdev * pdev,int lmac_id)1533 struct dp_srng *dp_get_rxdma_ring(struct dp_pdev *pdev, int lmac_id) 1534 { 1535 return &pdev->soc->rx_refill_buf_ring[lmac_id]; 1536 } 1537 #endif 1538 1539 /* 1540 * The lmac ID for a particular channel band is fixed. 1541 * 2.4GHz band uses lmac_id = 1 1542 * 5GHz/6GHz band uses lmac_id=0 1543 */ 1544 #define DP_INVALID_LMAC_ID (-1) 1545 #define DP_MON_INVALID_LMAC_ID (-1) 1546 #define DP_MAC0_LMAC_ID 0 1547 #define DP_MAC1_LMAC_ID 1 1548 1549 #ifdef FEATURE_TSO_STATS 1550 /** 1551 * dp_init_tso_stats() - Clear tso stats 1552 * @pdev: pdev handle 1553 * 1554 * Return: None 1555 */ 1556 static inline dp_init_tso_stats(struct dp_pdev * pdev)1557 void dp_init_tso_stats(struct dp_pdev *pdev) 1558 { 1559 if (pdev) { 1560 qdf_mem_zero(&((pdev)->stats.tso_stats), 1561 sizeof((pdev)->stats.tso_stats)); 1562 qdf_atomic_init(&pdev->tso_idx); 1563 } 1564 } 1565 1566 /** 1567 * dp_stats_tso_segment_histogram_update() - TSO Segment Histogram 1568 * @pdev: pdev handle 1569 * @_p_cntrs: number of tso segments for a tso packet 1570 * 1571 * Return: None 1572 */ 1573 void dp_stats_tso_segment_histogram_update(struct dp_pdev *pdev, 1574 uint8_t _p_cntrs); 1575 1576 /** 1577 * dp_tso_segment_update() - Collect tso segment information 1578 * @pdev: pdev handle 1579 * @stats_idx: tso packet number 1580 * @idx: tso segment number 1581 * @seg: tso segment 1582 * 1583 * Return: None 1584 */ 1585 void dp_tso_segment_update(struct dp_pdev *pdev, 1586 uint32_t stats_idx, 1587 uint8_t idx, 1588 struct qdf_tso_seg_t seg); 1589 1590 /** 1591 * dp_tso_packet_update() - TSO Packet information 1592 * @pdev: pdev handle 1593 * @stats_idx: tso packet number 1594 * @msdu: nbuf handle 1595 * @num_segs: tso segments 1596 * 1597 * Return: None 1598 */ 1599 void dp_tso_packet_update(struct dp_pdev *pdev, uint32_t stats_idx, 1600 qdf_nbuf_t msdu, uint16_t num_segs); 1601 1602 /** 1603 * dp_tso_segment_stats_update() - TSO Segment stats 1604 * @pdev: pdev handle 1605 * @stats_seg: tso segment list 1606 * @stats_idx: tso packet number 1607 * 1608 * Return: None 1609 */ 1610 void dp_tso_segment_stats_update(struct dp_pdev *pdev, 1611 struct qdf_tso_seg_elem_t *stats_seg, 1612 uint32_t stats_idx); 1613 1614 /** 1615 * dp_print_tso_stats() - dump tso statistics 1616 * @soc:soc handle 1617 * @level: verbosity level 1618 * 1619 * Return: None 1620 */ 1621 void dp_print_tso_stats(struct dp_soc *soc, 1622 enum qdf_stats_verbosity_level level); 1623 1624 /** 1625 * dp_txrx_clear_tso_stats() - clear tso stats 1626 * @soc: soc handle 1627 * 1628 * Return: None 1629 */ 1630 void dp_txrx_clear_tso_stats(struct dp_soc *soc); 1631 #else 1632 static inline dp_init_tso_stats(struct dp_pdev * pdev)1633 void dp_init_tso_stats(struct dp_pdev *pdev) 1634 { 1635 } 1636 1637 static inline dp_stats_tso_segment_histogram_update(struct dp_pdev * pdev,uint8_t _p_cntrs)1638 void dp_stats_tso_segment_histogram_update(struct dp_pdev *pdev, 1639 uint8_t _p_cntrs) 1640 { 1641 } 1642 1643 static inline dp_tso_segment_update(struct dp_pdev * pdev,uint32_t stats_idx,uint32_t idx,struct qdf_tso_seg_t seg)1644 void dp_tso_segment_update(struct dp_pdev *pdev, 1645 uint32_t stats_idx, 1646 uint32_t idx, 1647 struct qdf_tso_seg_t seg) 1648 { 1649 } 1650 1651 static inline dp_tso_packet_update(struct dp_pdev * pdev,uint32_t stats_idx,qdf_nbuf_t msdu,uint16_t num_segs)1652 void dp_tso_packet_update(struct dp_pdev *pdev, uint32_t stats_idx, 1653 qdf_nbuf_t msdu, uint16_t num_segs) 1654 { 1655 } 1656 1657 static inline dp_tso_segment_stats_update(struct dp_pdev * pdev,struct qdf_tso_seg_elem_t * stats_seg,uint32_t stats_idx)1658 void dp_tso_segment_stats_update(struct dp_pdev *pdev, 1659 struct qdf_tso_seg_elem_t *stats_seg, 1660 uint32_t stats_idx) 1661 { 1662 } 1663 1664 static inline dp_print_tso_stats(struct dp_soc * soc,enum qdf_stats_verbosity_level level)1665 void dp_print_tso_stats(struct dp_soc *soc, 1666 enum qdf_stats_verbosity_level level) 1667 { 1668 } 1669 1670 static inline dp_txrx_clear_tso_stats(struct dp_soc * soc)1671 void dp_txrx_clear_tso_stats(struct dp_soc *soc) 1672 { 1673 } 1674 #endif /* FEATURE_TSO_STATS */ 1675 1676 /** 1677 * dp_txrx_get_peer_per_pkt_stats_param() - Get peer per pkt stats param 1678 * @peer: DP peer handle 1679 * @type: Requested stats type 1680 * @buf: Buffer to hold the value 1681 * 1682 * Return: status success/failure 1683 */ 1684 QDF_STATUS dp_txrx_get_peer_per_pkt_stats_param(struct dp_peer *peer, 1685 enum cdp_peer_stats_type type, 1686 cdp_peer_stats_param_t *buf); 1687 1688 /** 1689 * dp_txrx_get_peer_extd_stats_param() - Get peer extd stats param 1690 * @peer: DP peer handle 1691 * @type: Requested stats type 1692 * @buf: Buffer to hold the value 1693 * 1694 * Return: status success/failure 1695 */ 1696 QDF_STATUS dp_txrx_get_peer_extd_stats_param(struct dp_peer *peer, 1697 enum cdp_peer_stats_type type, 1698 cdp_peer_stats_param_t *buf); 1699 1700 #define DP_HTT_T2H_HP_PIPE 5 1701 /** 1702 * dp_update_pdev_stats(): Update the pdev stats 1703 * @tgtobj: pdev handle 1704 * @srcobj: vdev stats structure 1705 * 1706 * Update the pdev stats from the specified vdev stats 1707 * 1708 * Return: None 1709 */ 1710 void dp_update_pdev_stats(struct dp_pdev *tgtobj, 1711 struct cdp_vdev_stats *srcobj); 1712 1713 /** 1714 * dp_update_vdev_ingress_stats(): Update the vdev ingress stats 1715 * @tgtobj: vdev handle 1716 * 1717 * Update the vdev ingress stats 1718 * 1719 * Return: None 1720 */ 1721 void dp_update_vdev_ingress_stats(struct dp_vdev *tgtobj); 1722 1723 /** 1724 * dp_update_vdev_rate_stats() - Update the vdev rate stats 1725 * @tgtobj: tgt buffer for cdp vdev stats 1726 * @srcobj: srcobj dp vdev stats 1727 * 1728 * Return: None 1729 */ 1730 void dp_update_vdev_rate_stats(struct cdp_vdev_stats *tgtobj, 1731 struct dp_vdev_stats *srcobj); 1732 1733 /** 1734 * dp_update_pdev_ingress_stats(): Update the pdev ingress stats 1735 * @tgtobj: pdev handle 1736 * @srcobj: vdev stats structure 1737 * 1738 * Update the pdev ingress stats from the specified vdev stats 1739 * 1740 * Return: None 1741 */ 1742 void dp_update_pdev_ingress_stats(struct dp_pdev *tgtobj, 1743 struct dp_vdev *srcobj); 1744 1745 /** 1746 * dp_copy_vdev_stats_to_tgt_buf(): Update the cdp vdev ingress stats from 1747 * dp vdev ingress stats 1748 * @vdev_stats: cdp vdev stats structure 1749 * @stats: dp vdev stats structure 1750 * @xmit_type: xmit type of packet - MLD/Link 1751 * 1752 * Update the cdp vdev ingress stats from dp vdev ingress stats 1753 * 1754 * Return: None 1755 */ 1756 1757 void dp_copy_vdev_stats_to_tgt_buf(struct cdp_vdev_stats *vdev_stats, 1758 struct dp_vdev_stats *stats, 1759 enum dp_pkt_xmit_type xmit_type); 1760 1761 /** 1762 * dp_update_vdev_stats(): Update the vdev stats 1763 * @soc: soc handle 1764 * @srcobj: DP_PEER object 1765 * @arg: point to vdev stats structure 1766 * 1767 * Update the vdev stats from the specified peer stats 1768 * 1769 * Return: None 1770 */ 1771 void dp_update_vdev_stats(struct dp_soc *soc, 1772 struct dp_peer *srcobj, 1773 void *arg); 1774 1775 /** 1776 * dp_update_vdev_stats_on_peer_unmap() - Update the vdev stats on peer unmap 1777 * @vdev: DP_VDEV handle 1778 * @peer: DP_PEER handle 1779 * 1780 * Return: None 1781 */ 1782 void dp_update_vdev_stats_on_peer_unmap(struct dp_vdev *vdev, 1783 struct dp_peer *peer); 1784 1785 #ifdef IPA_OFFLOAD 1786 #define DP_IPA_UPDATE_RX_STATS(__tgtobj, __srcobj) \ 1787 { \ 1788 DP_STATS_AGGR_PKT(__tgtobj, __srcobj, rx.rx_total); \ 1789 } 1790 1791 #define DP_IPA_UPDATE_PER_PKT_RX_STATS(__tgtobj, __srcobj) \ 1792 { \ 1793 (__tgtobj)->rx.rx_total.num += (__srcobj)->rx.rx_total.num; \ 1794 (__tgtobj)->rx.rx_total.bytes += (__srcobj)->rx.rx_total.bytes; \ 1795 } 1796 #else 1797 #define DP_IPA_UPDATE_PER_PKT_RX_STATS(tgtobj, srcobj) \ 1798 1799 #define DP_IPA_UPDATE_RX_STATS(tgtobj, srcobj) 1800 #endif 1801 1802 #define DP_UPDATE_STATS(_tgtobj, _srcobj) \ 1803 do { \ 1804 uint8_t i; \ 1805 uint8_t pream_type; \ 1806 for (pream_type = 0; pream_type < DOT11_MAX; pream_type++) { \ 1807 for (i = 0; i < MAX_MCS; i++) { \ 1808 DP_STATS_AGGR(_tgtobj, _srcobj, \ 1809 tx.pkt_type[pream_type].mcs_count[i]); \ 1810 DP_STATS_AGGR(_tgtobj, _srcobj, \ 1811 rx.pkt_type[pream_type].mcs_count[i]); \ 1812 } \ 1813 } \ 1814 \ 1815 for (i = 0; i < MAX_BW; i++) { \ 1816 DP_STATS_AGGR(_tgtobj, _srcobj, tx.bw[i]); \ 1817 DP_STATS_AGGR(_tgtobj, _srcobj, rx.bw[i]); \ 1818 } \ 1819 \ 1820 for (i = 0; i < SS_COUNT; i++) { \ 1821 DP_STATS_AGGR(_tgtobj, _srcobj, rx.nss[i]); \ 1822 DP_STATS_AGGR(_tgtobj, _srcobj, tx.nss[i]); \ 1823 } \ 1824 for (i = 0; i < WME_AC_MAX; i++) { \ 1825 DP_STATS_AGGR(_tgtobj, _srcobj, tx.wme_ac_type[i]); \ 1826 DP_STATS_AGGR(_tgtobj, _srcobj, rx.wme_ac_type[i]); \ 1827 DP_STATS_AGGR(_tgtobj, _srcobj, \ 1828 tx.wme_ac_type_bytes[i]); \ 1829 DP_STATS_AGGR(_tgtobj, _srcobj, \ 1830 rx.wme_ac_type_bytes[i]); \ 1831 DP_STATS_AGGR(_tgtobj, _srcobj, tx.excess_retries_per_ac[i]); \ 1832 \ 1833 } \ 1834 \ 1835 for (i = 0; i < MAX_GI; i++) { \ 1836 DP_STATS_AGGR(_tgtobj, _srcobj, tx.sgi_count[i]); \ 1837 DP_STATS_AGGR(_tgtobj, _srcobj, rx.sgi_count[i]); \ 1838 } \ 1839 \ 1840 for (i = 0; i < MAX_RECEPTION_TYPES; i++) \ 1841 DP_STATS_AGGR(_tgtobj, _srcobj, rx.reception_type[i]); \ 1842 \ 1843 if (!wlan_cfg_get_vdev_stats_hw_offload_config(soc->wlan_cfg_ctx)) { \ 1844 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, tx.comp_pkt); \ 1845 DP_STATS_AGGR(_tgtobj, _srcobj, tx.tx_failed); \ 1846 } \ 1847 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, tx.ucast); \ 1848 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, tx.mcast); \ 1849 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, tx.bcast); \ 1850 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, tx.tx_success); \ 1851 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, tx.nawds_mcast); \ 1852 DP_STATS_AGGR(_tgtobj, _srcobj, tx.nawds_mcast_drop); \ 1853 DP_STATS_AGGR(_tgtobj, _srcobj, tx.ofdma); \ 1854 DP_STATS_AGGR(_tgtobj, _srcobj, tx.stbc); \ 1855 DP_STATS_AGGR(_tgtobj, _srcobj, tx.ldpc); \ 1856 DP_STATS_AGGR(_tgtobj, _srcobj, tx.retries); \ 1857 DP_STATS_AGGR(_tgtobj, _srcobj, tx.non_amsdu_cnt); \ 1858 DP_STATS_AGGR(_tgtobj, _srcobj, tx.amsdu_cnt); \ 1859 DP_STATS_AGGR(_tgtobj, _srcobj, tx.non_ampdu_cnt); \ 1860 DP_STATS_AGGR(_tgtobj, _srcobj, tx.ampdu_cnt); \ 1861 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, tx.dropped.fw_rem); \ 1862 DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.fw_rem_tx); \ 1863 DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.fw_rem_notx); \ 1864 DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.fw_reason1); \ 1865 DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.fw_reason2); \ 1866 DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.fw_reason3); \ 1867 DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.fw_rem_queue_disable); \ 1868 DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.fw_rem_no_match); \ 1869 DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.drop_threshold); \ 1870 DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.drop_link_desc_na); \ 1871 DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.invalid_drop); \ 1872 DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.mcast_vdev_drop); \ 1873 DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.invalid_rr); \ 1874 DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.age_out); \ 1875 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, tx.tx_ucast_total); \ 1876 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, tx.tx_ucast_success); \ 1877 \ 1878 DP_STATS_AGGR(_tgtobj, _srcobj, rx.err.mic_err); \ 1879 DP_STATS_AGGR(_tgtobj, _srcobj, rx.err.decrypt_err); \ 1880 DP_STATS_AGGR(_tgtobj, _srcobj, rx.err.fcserr); \ 1881 DP_STATS_AGGR(_tgtobj, _srcobj, rx.err.pn_err); \ 1882 DP_STATS_AGGR(_tgtobj, _srcobj, rx.err.oor_err); \ 1883 DP_STATS_AGGR(_tgtobj, _srcobj, rx.err.jump_2k_err); \ 1884 DP_STATS_AGGR(_tgtobj, _srcobj, rx.err.rxdma_wifi_parse_err); \ 1885 if (_srcobj->stats.rx.snr != 0) \ 1886 DP_STATS_UPD_STRUCT(_tgtobj, _srcobj, rx.snr); \ 1887 DP_STATS_UPD_STRUCT(_tgtobj, _srcobj, rx.rx_rate); \ 1888 DP_STATS_AGGR(_tgtobj, _srcobj, rx.non_ampdu_cnt); \ 1889 DP_STATS_AGGR(_tgtobj, _srcobj, rx.ampdu_cnt); \ 1890 DP_STATS_AGGR(_tgtobj, _srcobj, rx.non_amsdu_cnt); \ 1891 DP_STATS_AGGR(_tgtobj, _srcobj, rx.amsdu_cnt); \ 1892 DP_STATS_AGGR(_tgtobj, _srcobj, rx.nawds_mcast_drop); \ 1893 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.to_stack); \ 1894 \ 1895 for (i = 0; i < CDP_MAX_RX_RINGS; i++) \ 1896 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.rcvd_reo[i]); \ 1897 \ 1898 for (i = 0; i < CDP_MAX_LMACS; i++) \ 1899 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.rx_lmac[i]); \ 1900 \ 1901 _srcobj->stats.rx.unicast.num = \ 1902 _srcobj->stats.rx.to_stack.num - \ 1903 _srcobj->stats.rx.multicast.num; \ 1904 _srcobj->stats.rx.unicast.bytes = \ 1905 _srcobj->stats.rx.to_stack.bytes - \ 1906 _srcobj->stats.rx.multicast.bytes; \ 1907 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.unicast); \ 1908 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.multicast); \ 1909 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.bcast); \ 1910 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.raw); \ 1911 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.intra_bss.pkts); \ 1912 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.intra_bss.fail); \ 1913 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.mec_drop); \ 1914 \ 1915 _tgtobj->stats.tx.last_ack_rssi = \ 1916 _srcobj->stats.tx.last_ack_rssi; \ 1917 DP_STATS_AGGR(_tgtobj, _srcobj, rx.multipass_rx_pkt_drop); \ 1918 DP_STATS_AGGR(_tgtobj, _srcobj, rx.peer_unauth_rx_pkt_drop); \ 1919 DP_STATS_AGGR(_tgtobj, _srcobj, rx.policy_check_drop); \ 1920 DP_IPA_UPDATE_RX_STATS(_tgtobj, _srcobj); \ 1921 } while (0) 1922 1923 #ifdef VDEV_PEER_PROTOCOL_COUNT 1924 #define DP_UPDATE_PROTOCOL_COUNT_STATS(_tgtobj, _srcobj) \ 1925 { \ 1926 uint8_t j; \ 1927 for (j = 0; j < CDP_TRACE_MAX; j++) { \ 1928 _tgtobj->tx.protocol_trace_cnt[j].egress_cnt += \ 1929 _srcobj->tx.protocol_trace_cnt[j].egress_cnt; \ 1930 _tgtobj->tx.protocol_trace_cnt[j].ingress_cnt += \ 1931 _srcobj->tx.protocol_trace_cnt[j].ingress_cnt; \ 1932 _tgtobj->rx.protocol_trace_cnt[j].egress_cnt += \ 1933 _srcobj->rx.protocol_trace_cnt[j].egress_cnt; \ 1934 _tgtobj->rx.protocol_trace_cnt[j].ingress_cnt += \ 1935 _srcobj->rx.protocol_trace_cnt[j].ingress_cnt; \ 1936 } \ 1937 } 1938 #else 1939 #define DP_UPDATE_PROTOCOL_COUNT_STATS(_tgtobj, _srcobj) 1940 #endif 1941 1942 #ifdef WLAN_FEATURE_11BE 1943 #define DP_UPDATE_11BE_STATS(_tgtobj, _srcobj) \ 1944 do { \ 1945 uint8_t i, mu_type; \ 1946 for (i = 0; i < MAX_MCS; i++) { \ 1947 _tgtobj->tx.su_be_ppdu_cnt.mcs_count[i] += \ 1948 _srcobj->tx.su_be_ppdu_cnt.mcs_count[i]; \ 1949 _tgtobj->rx.su_be_ppdu_cnt.mcs_count[i] += \ 1950 _srcobj->rx.su_be_ppdu_cnt.mcs_count[i]; \ 1951 } \ 1952 for (mu_type = 0; mu_type < TXRX_TYPE_MU_MAX; mu_type++) { \ 1953 for (i = 0; i < MAX_MCS; i++) { \ 1954 _tgtobj->tx.mu_be_ppdu_cnt[mu_type].mcs_count[i] += \ 1955 _srcobj->tx.mu_be_ppdu_cnt[mu_type].mcs_count[i]; \ 1956 _tgtobj->rx.mu_be_ppdu_cnt[mu_type].mcs_count[i] += \ 1957 _srcobj->rx.mu_be_ppdu_cnt[mu_type].mcs_count[i]; \ 1958 } \ 1959 } \ 1960 for (i = 0; i < MAX_PUNCTURED_MODE; i++) { \ 1961 _tgtobj->tx.punc_bw[i] += _srcobj->tx.punc_bw[i]; \ 1962 _tgtobj->rx.punc_bw[i] += _srcobj->rx.punc_bw[i]; \ 1963 } \ 1964 } while (0) 1965 #else 1966 #define DP_UPDATE_11BE_STATS(_tgtobj, _srcobj) 1967 #endif 1968 1969 #define DP_UPDATE_BASIC_STATS(_tgtobj, _srcobj) \ 1970 do { \ 1971 _tgtobj->tx.comp_pkt.num += _srcobj->tx.comp_pkt.num; \ 1972 _tgtobj->tx.comp_pkt.bytes += _srcobj->tx.comp_pkt.bytes; \ 1973 _tgtobj->tx.tx_failed += _srcobj->tx.tx_failed; \ 1974 _tgtobj->rx.to_stack.num += _srcobj->rx.to_stack.num; \ 1975 _tgtobj->rx.to_stack.bytes += _srcobj->rx.to_stack.bytes; \ 1976 } while (0) 1977 1978 #define DP_UPDATE_PER_PKT_STATS(_tgtobj, _srcobj) \ 1979 do { \ 1980 uint8_t i; \ 1981 _tgtobj->tx.ucast.num += _srcobj->tx.ucast.num; \ 1982 _tgtobj->tx.ucast.bytes += _srcobj->tx.ucast.bytes; \ 1983 _tgtobj->tx.mcast.num += _srcobj->tx.mcast.num; \ 1984 _tgtobj->tx.mcast.bytes += _srcobj->tx.mcast.bytes; \ 1985 _tgtobj->tx.bcast.num += _srcobj->tx.bcast.num; \ 1986 _tgtobj->tx.bcast.bytes += _srcobj->tx.bcast.bytes; \ 1987 _tgtobj->tx.nawds_mcast.num += _srcobj->tx.nawds_mcast.num; \ 1988 _tgtobj->tx.nawds_mcast.bytes += \ 1989 _srcobj->tx.nawds_mcast.bytes; \ 1990 _tgtobj->tx.tx_success.num += _srcobj->tx.tx_success.num; \ 1991 _tgtobj->tx.tx_success.bytes += _srcobj->tx.tx_success.bytes; \ 1992 _tgtobj->tx.nawds_mcast_drop += _srcobj->tx.nawds_mcast_drop; \ 1993 _tgtobj->tx.ofdma += _srcobj->tx.ofdma; \ 1994 _tgtobj->tx.non_amsdu_cnt += _srcobj->tx.non_amsdu_cnt; \ 1995 _tgtobj->tx.amsdu_cnt += _srcobj->tx.amsdu_cnt; \ 1996 _tgtobj->tx.dropped.fw_rem.num += \ 1997 _srcobj->tx.dropped.fw_rem.num; \ 1998 _tgtobj->tx.dropped.fw_rem.bytes += \ 1999 _srcobj->tx.dropped.fw_rem.bytes; \ 2000 _tgtobj->tx.dropped.fw_rem_notx += \ 2001 _srcobj->tx.dropped.fw_rem_notx; \ 2002 _tgtobj->tx.dropped.fw_rem_tx += \ 2003 _srcobj->tx.dropped.fw_rem_tx; \ 2004 _tgtobj->tx.dropped.age_out += _srcobj->tx.dropped.age_out; \ 2005 _tgtobj->tx.dropped.fw_reason1 += \ 2006 _srcobj->tx.dropped.fw_reason1; \ 2007 _tgtobj->tx.dropped.fw_reason2 += \ 2008 _srcobj->tx.dropped.fw_reason2; \ 2009 _tgtobj->tx.dropped.fw_reason3 += \ 2010 _srcobj->tx.dropped.fw_reason3; \ 2011 _tgtobj->tx.dropped.fw_rem_queue_disable += \ 2012 _srcobj->tx.dropped.fw_rem_queue_disable; \ 2013 _tgtobj->tx.dropped.fw_rem_no_match += \ 2014 _srcobj->tx.dropped.fw_rem_no_match; \ 2015 _tgtobj->tx.dropped.drop_threshold += \ 2016 _srcobj->tx.dropped.drop_threshold; \ 2017 _tgtobj->tx.dropped.drop_link_desc_na += \ 2018 _srcobj->tx.dropped.drop_link_desc_na; \ 2019 _tgtobj->tx.dropped.invalid_drop += \ 2020 _srcobj->tx.dropped.invalid_drop; \ 2021 _tgtobj->tx.dropped.mcast_vdev_drop += \ 2022 _srcobj->tx.dropped.mcast_vdev_drop; \ 2023 _tgtobj->tx.dropped.invalid_rr += \ 2024 _srcobj->tx.dropped.invalid_rr; \ 2025 _tgtobj->tx.failed_retry_count += \ 2026 _srcobj->tx.failed_retry_count; \ 2027 _tgtobj->tx.retry_count += _srcobj->tx.retry_count; \ 2028 _tgtobj->tx.multiple_retry_count += \ 2029 _srcobj->tx.multiple_retry_count; \ 2030 _tgtobj->tx.tx_success_twt.num += \ 2031 _srcobj->tx.tx_success_twt.num; \ 2032 _tgtobj->tx.tx_success_twt.bytes += \ 2033 _srcobj->tx.tx_success_twt.bytes; \ 2034 _tgtobj->tx.last_tx_ts = _srcobj->tx.last_tx_ts; \ 2035 _tgtobj->tx.release_src_not_tqm += \ 2036 _srcobj->tx.release_src_not_tqm; \ 2037 for (i = 0; i < QDF_PROTO_SUBTYPE_MAX; i++) { \ 2038 _tgtobj->tx.no_ack_count[i] += \ 2039 _srcobj->tx.no_ack_count[i];\ 2040 } \ 2041 \ 2042 _tgtobj->rx.multicast.num += _srcobj->rx.multicast.num; \ 2043 _tgtobj->rx.multicast.bytes += _srcobj->rx.multicast.bytes; \ 2044 _tgtobj->rx.rx_success.num += _srcobj->rx.rx_success.num;\ 2045 _tgtobj->rx.rx_success.bytes += _srcobj->rx.rx_success.bytes;\ 2046 _tgtobj->rx.bcast.num += _srcobj->rx.bcast.num; \ 2047 _tgtobj->rx.bcast.bytes += _srcobj->rx.bcast.bytes; \ 2048 _tgtobj->rx.unicast.num += _srcobj->rx.unicast.num; \ 2049 _tgtobj->rx.unicast.bytes += _srcobj->rx.unicast.bytes; \ 2050 _tgtobj->rx.raw.num += _srcobj->rx.raw.num; \ 2051 _tgtobj->rx.raw.bytes += _srcobj->rx.raw.bytes; \ 2052 _tgtobj->rx.nawds_mcast_drop += _srcobj->rx.nawds_mcast_drop; \ 2053 _tgtobj->rx.mcast_3addr_drop += _srcobj->rx.mcast_3addr_drop; \ 2054 _tgtobj->rx.mec_drop.num += _srcobj->rx.mec_drop.num; \ 2055 _tgtobj->rx.mec_drop.bytes += _srcobj->rx.mec_drop.bytes; \ 2056 _tgtobj->rx.ppeds_drop.num += _srcobj->rx.ppeds_drop.num; \ 2057 _tgtobj->rx.ppeds_drop.bytes += _srcobj->rx.ppeds_drop.bytes; \ 2058 _tgtobj->rx.intra_bss.pkts.num += \ 2059 _srcobj->rx.intra_bss.pkts.num; \ 2060 _tgtobj->rx.intra_bss.pkts.bytes += \ 2061 _srcobj->rx.intra_bss.pkts.bytes; \ 2062 _tgtobj->rx.intra_bss.fail.num += \ 2063 _srcobj->rx.intra_bss.fail.num; \ 2064 _tgtobj->rx.intra_bss.fail.bytes += \ 2065 _srcobj->rx.intra_bss.fail.bytes; \ 2066 _tgtobj->rx.intra_bss.mdns_no_fwd += \ 2067 _srcobj->rx.intra_bss.mdns_no_fwd; \ 2068 _tgtobj->rx.err.mic_err += _srcobj->rx.err.mic_err; \ 2069 _tgtobj->rx.err.decrypt_err += _srcobj->rx.err.decrypt_err; \ 2070 _tgtobj->rx.err.fcserr += _srcobj->rx.err.fcserr; \ 2071 _tgtobj->rx.err.pn_err += _srcobj->rx.err.pn_err; \ 2072 _tgtobj->rx.err.oor_err += _srcobj->rx.err.oor_err; \ 2073 _tgtobj->rx.err.jump_2k_err += _srcobj->rx.err.jump_2k_err; \ 2074 _tgtobj->rx.err.rxdma_wifi_parse_err += \ 2075 _srcobj->rx.err.rxdma_wifi_parse_err; \ 2076 _tgtobj->rx.non_amsdu_cnt += _srcobj->rx.non_amsdu_cnt; \ 2077 _tgtobj->rx.amsdu_cnt += _srcobj->rx.amsdu_cnt; \ 2078 _tgtobj->rx.rx_retries += _srcobj->rx.rx_retries; \ 2079 _tgtobj->rx.multipass_rx_pkt_drop += \ 2080 _srcobj->rx.multipass_rx_pkt_drop; \ 2081 _tgtobj->rx.peer_unauth_rx_pkt_drop += \ 2082 _srcobj->rx.peer_unauth_rx_pkt_drop; \ 2083 _tgtobj->rx.policy_check_drop += \ 2084 _srcobj->rx.policy_check_drop; \ 2085 _tgtobj->rx.to_stack_twt.num += _srcobj->rx.to_stack_twt.num; \ 2086 _tgtobj->rx.to_stack_twt.bytes += \ 2087 _srcobj->rx.to_stack_twt.bytes; \ 2088 _tgtobj->rx.last_rx_ts = _srcobj->rx.last_rx_ts; \ 2089 for (i = 0; i < CDP_MAX_RX_RINGS; i++) { \ 2090 _tgtobj->rx.rcvd_reo[i].num += \ 2091 _srcobj->rx.rcvd_reo[i].num; \ 2092 _tgtobj->rx.rcvd_reo[i].bytes += \ 2093 _srcobj->rx.rcvd_reo[i].bytes; \ 2094 _tgtobj->rx.rcvd.num += \ 2095 _srcobj->rx.rcvd_reo[i].num; \ 2096 _tgtobj->rx.rcvd.bytes += \ 2097 _srcobj->rx.rcvd_reo[i].bytes; \ 2098 } \ 2099 for (i = 0; i < CDP_MAX_LMACS; i++) { \ 2100 _tgtobj->rx.rx_lmac[i].num += \ 2101 _srcobj->rx.rx_lmac[i].num; \ 2102 _tgtobj->rx.rx_lmac[i].bytes += \ 2103 _srcobj->rx.rx_lmac[i].bytes; \ 2104 } \ 2105 DP_IPA_UPDATE_PER_PKT_RX_STATS(_tgtobj, _srcobj); \ 2106 DP_UPDATE_PROTOCOL_COUNT_STATS(_tgtobj, _srcobj); \ 2107 } while (0) 2108 2109 #define DP_UPDATE_EXTD_STATS(_tgtobj, _srcobj) \ 2110 do { \ 2111 uint8_t i, pream_type, mu_type; \ 2112 _tgtobj->tx.stbc += _srcobj->tx.stbc; \ 2113 _tgtobj->tx.ldpc += _srcobj->tx.ldpc; \ 2114 _tgtobj->tx.retries += _srcobj->tx.retries; \ 2115 _tgtobj->tx.ampdu_cnt += _srcobj->tx.ampdu_cnt; \ 2116 _tgtobj->tx.non_ampdu_cnt += _srcobj->tx.non_ampdu_cnt; \ 2117 _tgtobj->tx.num_ppdu_cookie_valid += \ 2118 _srcobj->tx.num_ppdu_cookie_valid; \ 2119 _tgtobj->tx.tx_ppdus += _srcobj->tx.tx_ppdus; \ 2120 _tgtobj->tx.tx_mpdus_success += _srcobj->tx.tx_mpdus_success; \ 2121 _tgtobj->tx.tx_mpdus_tried += _srcobj->tx.tx_mpdus_tried; \ 2122 _tgtobj->tx.tx_rate = _srcobj->tx.tx_rate; \ 2123 _tgtobj->tx.last_tx_rate = _srcobj->tx.last_tx_rate; \ 2124 _tgtobj->tx.last_tx_rate_mcs = _srcobj->tx.last_tx_rate_mcs; \ 2125 _tgtobj->tx.mcast_last_tx_rate = \ 2126 _srcobj->tx.mcast_last_tx_rate; \ 2127 _tgtobj->tx.mcast_last_tx_rate_mcs = \ 2128 _srcobj->tx.mcast_last_tx_rate_mcs; \ 2129 _tgtobj->tx.rnd_avg_tx_rate = _srcobj->tx.rnd_avg_tx_rate; \ 2130 _tgtobj->tx.avg_tx_rate = _srcobj->tx.avg_tx_rate; \ 2131 _tgtobj->tx.tx_ratecode = _srcobj->tx.tx_ratecode; \ 2132 _tgtobj->tx.pream_punct_cnt += _srcobj->tx.pream_punct_cnt; \ 2133 _tgtobj->tx.ru_start = _srcobj->tx.ru_start; \ 2134 _tgtobj->tx.ru_tones = _srcobj->tx.ru_tones; \ 2135 _tgtobj->tx.last_ack_rssi = _srcobj->tx.last_ack_rssi; \ 2136 _tgtobj->tx.nss_info = _srcobj->tx.nss_info; \ 2137 _tgtobj->tx.mcs_info = _srcobj->tx.mcs_info; \ 2138 _tgtobj->tx.bw_info = _srcobj->tx.bw_info; \ 2139 _tgtobj->tx.gi_info = _srcobj->tx.gi_info; \ 2140 _tgtobj->tx.preamble_info = _srcobj->tx.preamble_info; \ 2141 _tgtobj->tx.retries_mpdu += _srcobj->tx.retries_mpdu; \ 2142 _tgtobj->tx.mpdu_success_with_retries += \ 2143 _srcobj->tx.mpdu_success_with_retries; \ 2144 _tgtobj->tx.rts_success = _srcobj->tx.rts_success; \ 2145 _tgtobj->tx.rts_failure = _srcobj->tx.rts_failure; \ 2146 _tgtobj->tx.bar_cnt = _srcobj->tx.bar_cnt; \ 2147 _tgtobj->tx.ndpa_cnt = _srcobj->tx.ndpa_cnt; \ 2148 for (pream_type = 0; pream_type < DOT11_MAX; pream_type++) { \ 2149 for (i = 0; i < MAX_MCS; i++) \ 2150 _tgtobj->tx.pkt_type[pream_type].mcs_count[i] += \ 2151 _srcobj->tx.pkt_type[pream_type].mcs_count[i]; \ 2152 } \ 2153 for (i = 0; i < WME_AC_MAX; i++) { \ 2154 _tgtobj->tx.wme_ac_type[i] += _srcobj->tx.wme_ac_type[i]; \ 2155 _tgtobj->tx.wme_ac_type_bytes[i] += \ 2156 _srcobj->tx.wme_ac_type_bytes[i]; \ 2157 _tgtobj->tx.excess_retries_per_ac[i] += \ 2158 _srcobj->tx.excess_retries_per_ac[i]; \ 2159 } \ 2160 for (i = 0; i < MAX_GI; i++) { \ 2161 _tgtobj->tx.sgi_count[i] += _srcobj->tx.sgi_count[i]; \ 2162 } \ 2163 for (i = 0; i < SS_COUNT; i++) { \ 2164 _tgtobj->tx.nss[i] += _srcobj->tx.nss[i]; \ 2165 } \ 2166 for (i = 0; i < MAX_BW; i++) { \ 2167 _tgtobj->tx.bw[i] += _srcobj->tx.bw[i]; \ 2168 } \ 2169 for (i = 0; i < MAX_RU_LOCATIONS; i++) { \ 2170 _tgtobj->tx.ru_loc[i].num_msdu += \ 2171 _srcobj->tx.ru_loc[i].num_msdu; \ 2172 _tgtobj->tx.ru_loc[i].num_mpdu += \ 2173 _srcobj->tx.ru_loc[i].num_mpdu; \ 2174 _tgtobj->tx.ru_loc[i].mpdu_tried += \ 2175 _srcobj->tx.ru_loc[i].mpdu_tried; \ 2176 } \ 2177 for (i = 0; i < MAX_TRANSMIT_TYPES; i++) { \ 2178 _tgtobj->tx.transmit_type[i].num_msdu += \ 2179 _srcobj->tx.transmit_type[i].num_msdu; \ 2180 _tgtobj->tx.transmit_type[i].num_mpdu += \ 2181 _srcobj->tx.transmit_type[i].num_mpdu; \ 2182 _tgtobj->tx.transmit_type[i].mpdu_tried += \ 2183 _srcobj->tx.transmit_type[i].mpdu_tried; \ 2184 } \ 2185 for (i = 0; i < MAX_MU_GROUP_ID; i++) { \ 2186 _tgtobj->tx.mu_group_id[i] = _srcobj->tx.mu_group_id[i]; \ 2187 } \ 2188 _tgtobj->tx.tx_ucast_total.num += \ 2189 _srcobj->tx.tx_ucast_total.num;\ 2190 _tgtobj->tx.tx_ucast_total.bytes += \ 2191 _srcobj->tx.tx_ucast_total.bytes;\ 2192 _tgtobj->tx.tx_ucast_success.num += \ 2193 _srcobj->tx.tx_ucast_success.num; \ 2194 _tgtobj->tx.tx_ucast_success.bytes += \ 2195 _srcobj->tx.tx_ucast_success.bytes; \ 2196 \ 2197 for (i = 0; i < CDP_RSSI_CHAIN_LEN; i++) \ 2198 _tgtobj->tx.rssi_chain[i] = _srcobj->tx.rssi_chain[i]; \ 2199 _tgtobj->rx.mpdu_cnt_fcs_ok += _srcobj->rx.mpdu_cnt_fcs_ok; \ 2200 _tgtobj->rx.mpdu_cnt_fcs_err += _srcobj->rx.mpdu_cnt_fcs_err; \ 2201 _tgtobj->rx.non_ampdu_cnt += _srcobj->rx.non_ampdu_cnt; \ 2202 _tgtobj->rx.ampdu_cnt += _srcobj->rx.ampdu_cnt; \ 2203 _tgtobj->rx.rx_mpdus += _srcobj->rx.rx_mpdus; \ 2204 _tgtobj->rx.rx_ppdus += _srcobj->rx.rx_ppdus; \ 2205 _tgtobj->rx.rx_rate = _srcobj->rx.rx_rate; \ 2206 _tgtobj->rx.last_rx_rate = _srcobj->rx.last_rx_rate; \ 2207 _tgtobj->rx.rnd_avg_rx_rate = _srcobj->rx.rnd_avg_rx_rate; \ 2208 _tgtobj->rx.avg_rx_rate = _srcobj->rx.avg_rx_rate; \ 2209 _tgtobj->rx.rx_ratecode = _srcobj->rx.rx_ratecode; \ 2210 _tgtobj->rx.avg_snr = _srcobj->rx.avg_snr; \ 2211 _tgtobj->rx.rx_snr_measured_time = \ 2212 _srcobj->rx.rx_snr_measured_time; \ 2213 _tgtobj->rx.snr = _srcobj->rx.snr; \ 2214 _tgtobj->rx.last_snr = _srcobj->rx.last_snr; \ 2215 _tgtobj->rx.nss_info = _srcobj->rx.nss_info; \ 2216 _tgtobj->rx.mcs_info = _srcobj->rx.mcs_info; \ 2217 _tgtobj->rx.bw_info = _srcobj->rx.bw_info; \ 2218 _tgtobj->rx.gi_info = _srcobj->rx.gi_info; \ 2219 _tgtobj->rx.preamble_info = _srcobj->rx.preamble_info; \ 2220 _tgtobj->rx.mpdu_retry_cnt += _srcobj->rx.mpdu_retry_cnt; \ 2221 _tgtobj->rx.bar_cnt = _srcobj->rx.bar_cnt; \ 2222 _tgtobj->rx.ndpa_cnt = _srcobj->rx.ndpa_cnt; \ 2223 for (pream_type = 0; pream_type < DOT11_MAX; pream_type++) { \ 2224 for (i = 0; i < MAX_MCS; i++) { \ 2225 _tgtobj->rx.pkt_type[pream_type].mcs_count[i] += \ 2226 _srcobj->rx.pkt_type[pream_type].mcs_count[i]; \ 2227 } \ 2228 } \ 2229 for (i = 0; i < WME_AC_MAX; i++) { \ 2230 _tgtobj->rx.wme_ac_type[i] += _srcobj->rx.wme_ac_type[i]; \ 2231 _tgtobj->rx.wme_ac_type_bytes[i] += \ 2232 _srcobj->rx.wme_ac_type_bytes[i]; \ 2233 } \ 2234 for (i = 0; i < MAX_MCS; i++) { \ 2235 _tgtobj->rx.su_ax_ppdu_cnt.mcs_count[i] += \ 2236 _srcobj->rx.su_ax_ppdu_cnt.mcs_count[i]; \ 2237 _tgtobj->rx.rx_mpdu_cnt[i] += _srcobj->rx.rx_mpdu_cnt[i]; \ 2238 } \ 2239 for (mu_type = 0 ; mu_type < TXRX_TYPE_MU_MAX; mu_type++) { \ 2240 _tgtobj->rx.rx_mu[mu_type].mpdu_cnt_fcs_ok += \ 2241 _srcobj->rx.rx_mu[mu_type].mpdu_cnt_fcs_ok; \ 2242 _tgtobj->rx.rx_mu[mu_type].mpdu_cnt_fcs_err += \ 2243 _srcobj->rx.rx_mu[mu_type].mpdu_cnt_fcs_err; \ 2244 for (i = 0; i < SS_COUNT; i++) \ 2245 _tgtobj->rx.rx_mu[mu_type].ppdu_nss[i] += \ 2246 _srcobj->rx.rx_mu[mu_type].ppdu_nss[i]; \ 2247 for (i = 0; i < MAX_MCS; i++) \ 2248 _tgtobj->rx.rx_mu[mu_type].ppdu.mcs_count[i] += \ 2249 _srcobj->rx.rx_mu[mu_type].ppdu.mcs_count[i]; \ 2250 } \ 2251 for (i = 0; i < MAX_RECEPTION_TYPES; i++) { \ 2252 _tgtobj->rx.reception_type[i] += \ 2253 _srcobj->rx.reception_type[i]; \ 2254 _tgtobj->rx.ppdu_cnt[i] += _srcobj->rx.ppdu_cnt[i]; \ 2255 } \ 2256 for (i = 0; i < MAX_GI; i++) { \ 2257 _tgtobj->rx.sgi_count[i] += _srcobj->rx.sgi_count[i]; \ 2258 } \ 2259 for (i = 0; i < SS_COUNT; i++) { \ 2260 _tgtobj->rx.nss[i] += _srcobj->rx.nss[i]; \ 2261 _tgtobj->rx.ppdu_nss[i] += _srcobj->rx.ppdu_nss[i]; \ 2262 } \ 2263 for (i = 0; i < MAX_BW; i++) { \ 2264 _tgtobj->rx.bw[i] += _srcobj->rx.bw[i]; \ 2265 } \ 2266 DP_UPDATE_11BE_STATS(_tgtobj, _srcobj); \ 2267 } while (0) 2268 2269 #define DP_UPDATE_VDEV_STATS_FOR_UNMAPPED_PEERS(_tgtobj, _srcobj) \ 2270 do { \ 2271 DP_UPDATE_BASIC_STATS(_tgtobj, _srcobj); \ 2272 DP_UPDATE_PER_PKT_STATS(_tgtobj, _srcobj); \ 2273 DP_UPDATE_EXTD_STATS(_tgtobj, _srcobj); \ 2274 } while (0) 2275 2276 #define DP_UPDATE_RX_INGRESS_STATS(_tgtobj, _srcobj) \ 2277 do { \ 2278 _tgtobj->rx_i.reo_rcvd_pkt.num += \ 2279 _srcobj->rx_i.reo_rcvd_pkt.num; \ 2280 _tgtobj->rx_i.reo_rcvd_pkt.bytes += \ 2281 _srcobj->rx_i.reo_rcvd_pkt.bytes; \ 2282 _tgtobj->rx_i.null_q_desc_pkt.num += \ 2283 _srcobj->rx_i.null_q_desc_pkt.num; \ 2284 _tgtobj->rx_i.null_q_desc_pkt.bytes += \ 2285 _srcobj->rx_i.null_q_desc_pkt.bytes; \ 2286 _tgtobj->rx_i.routed_eapol_pkt.num += \ 2287 _srcobj->rx_i.routed_eapol_pkt.num; \ 2288 _tgtobj->rx_i.routed_eapol_pkt.bytes += \ 2289 _srcobj->rx_i.routed_eapol_pkt.bytes; \ 2290 } while (0) 2291 2292 #define DP_UPDATE_LINK_VDEV_INGRESS_STATS(_tgtobj, _srcobj, _xmit_type) \ 2293 do { \ 2294 uint8_t i = 0; \ 2295 uint8_t idx = 0; \ 2296 enum dp_pkt_xmit_type temp_xmit_type = _xmit_type; \ 2297 if (temp_xmit_type == DP_XMIT_MLD) { \ 2298 idx = DP_VDEV_XMIT_TYPE; \ 2299 temp_xmit_type = DP_VDEV_XMIT_TYPE; \ 2300 } else if (temp_xmit_type == DP_XMIT_TOTAL) { \ 2301 temp_xmit_type = DP_VDEV_XMIT_TYPE; \ 2302 } \ 2303 for (; idx <= temp_xmit_type; idx++) { \ 2304 _tgtobj->tx_i.rcvd.num += _srcobj->tx_i[idx].rcvd.num; \ 2305 _tgtobj->tx_i.rcvd.bytes += \ 2306 _srcobj->tx_i[idx].rcvd.bytes; \ 2307 _tgtobj->tx_i.rcvd_in_fast_xmit_flow += \ 2308 _srcobj->tx_i[idx].rcvd_in_fast_xmit_flow; \ 2309 for (i = 0; i < CDP_MAX_TX_DATA_RINGS; i++) { \ 2310 _tgtobj->tx_i.rcvd_per_core[i] += \ 2311 _srcobj->tx_i[idx].rcvd_per_core[i]; \ 2312 } \ 2313 _tgtobj->tx_i.processed.num += \ 2314 _srcobj->tx_i[idx].processed.num; \ 2315 _tgtobj->tx_i.processed.bytes += \ 2316 _srcobj->tx_i[idx].processed.bytes; \ 2317 _tgtobj->tx_i.reinject_pkts.num += \ 2318 _srcobj->tx_i[idx].reinject_pkts.num; \ 2319 _tgtobj->tx_i.reinject_pkts.bytes += \ 2320 _srcobj->tx_i[idx].reinject_pkts.bytes; \ 2321 _tgtobj->tx_i.inspect_pkts.num += \ 2322 _srcobj->tx_i[idx].inspect_pkts.num; \ 2323 _tgtobj->tx_i.inspect_pkts.bytes += \ 2324 _srcobj->tx_i[idx].inspect_pkts.bytes; \ 2325 _tgtobj->tx_i.nawds_mcast.num += \ 2326 _srcobj->tx_i[idx].nawds_mcast.num; \ 2327 _tgtobj->tx_i.nawds_mcast.bytes += \ 2328 _srcobj->tx_i[idx].nawds_mcast.bytes; \ 2329 _tgtobj->tx_i.bcast.num += \ 2330 _srcobj->tx_i[idx].bcast.num; \ 2331 _tgtobj->tx_i.bcast.bytes += \ 2332 _srcobj->tx_i[idx].bcast.bytes; \ 2333 _tgtobj->tx_i.raw.raw_pkt.num += \ 2334 _srcobj->tx_i[idx].raw.raw_pkt.num; \ 2335 _tgtobj->tx_i.raw.raw_pkt.bytes += \ 2336 _srcobj->tx_i[idx].raw.raw_pkt.bytes; \ 2337 _tgtobj->tx_i.raw.dma_map_error += \ 2338 _srcobj->tx_i[idx].raw.dma_map_error; \ 2339 _tgtobj->tx_i.raw.invalid_raw_pkt_datatype += \ 2340 _srcobj->tx_i[idx].raw.invalid_raw_pkt_datatype; \ 2341 _tgtobj->tx_i.raw.num_frags_overflow_err += \ 2342 _srcobj->tx_i[idx].raw.num_frags_overflow_err; \ 2343 _tgtobj->tx_i.sg.sg_pkt.num += \ 2344 _srcobj->tx_i[idx].sg.sg_pkt.num; \ 2345 _tgtobj->tx_i.sg.sg_pkt.bytes += \ 2346 _srcobj->tx_i[idx].sg.sg_pkt.bytes; \ 2347 _tgtobj->tx_i.sg.non_sg_pkts.num += \ 2348 _srcobj->tx_i[idx].sg.non_sg_pkts.num; \ 2349 _tgtobj->tx_i.sg.non_sg_pkts.bytes += \ 2350 _srcobj->tx_i[idx].sg.non_sg_pkts.bytes; \ 2351 _tgtobj->tx_i.sg.dropped_host.num += \ 2352 _srcobj->tx_i[idx].sg.dropped_host.num; \ 2353 _tgtobj->tx_i.sg.dropped_host.bytes += \ 2354 _srcobj->tx_i[idx].sg.dropped_host.bytes; \ 2355 _tgtobj->tx_i.sg.dropped_target += \ 2356 _srcobj->tx_i[idx].sg.dropped_target; \ 2357 _tgtobj->tx_i.sg.dma_map_error += \ 2358 _srcobj->tx_i[idx].sg.dma_map_error; \ 2359 _tgtobj->tx_i.mcast_en.mcast_pkt.num += \ 2360 _srcobj->tx_i[idx].mcast_en.mcast_pkt.num; \ 2361 _tgtobj->tx_i.mcast_en.mcast_pkt.bytes += \ 2362 _srcobj->tx_i[idx].mcast_en.mcast_pkt.bytes; \ 2363 _tgtobj->tx_i.mcast_en.dropped_map_error += \ 2364 _srcobj->tx_i[idx].mcast_en.dropped_map_error; \ 2365 _tgtobj->tx_i.mcast_en.dropped_self_mac += \ 2366 _srcobj->tx_i[idx].mcast_en.dropped_self_mac; \ 2367 _tgtobj->tx_i.mcast_en.dropped_send_fail += \ 2368 _srcobj->tx_i[idx].mcast_en.dropped_send_fail; \ 2369 _tgtobj->tx_i.mcast_en.ucast += \ 2370 _srcobj->tx_i[idx].mcast_en.ucast; \ 2371 _tgtobj->tx_i.mcast_en.fail_seg_alloc += \ 2372 _srcobj->tx_i[idx].mcast_en.fail_seg_alloc; \ 2373 _tgtobj->tx_i.mcast_en.clone_fail += \ 2374 _srcobj->tx_i[idx].mcast_en.clone_fail; \ 2375 _tgtobj->tx_i.igmp_mcast_en.igmp_rcvd += \ 2376 _srcobj->tx_i[idx].igmp_mcast_en.igmp_rcvd; \ 2377 _tgtobj->tx_i.igmp_mcast_en.igmp_ucast_converted += \ 2378 _srcobj->tx_i[idx].igmp_mcast_en.igmp_ucast_converted; \ 2379 _tgtobj->tx_i.dropped.desc_na.num += \ 2380 _srcobj->tx_i[idx].dropped.desc_na.num; \ 2381 _tgtobj->tx_i.dropped.desc_na.bytes += \ 2382 _srcobj->tx_i[idx].dropped.desc_na.bytes; \ 2383 _tgtobj->tx_i.dropped.desc_na_exc_alloc_fail.num += \ 2384 _srcobj->tx_i[idx].dropped.desc_na_exc_alloc_fail.num; \ 2385 _tgtobj->tx_i.dropped.desc_na_exc_alloc_fail.bytes += \ 2386 _srcobj->tx_i[idx].dropped.desc_na_exc_alloc_fail.bytes; \ 2387 _tgtobj->tx_i.dropped.desc_na_exc_outstand.num += \ 2388 _srcobj->tx_i[idx].dropped.desc_na_exc_outstand.num; \ 2389 _tgtobj->tx_i.dropped.desc_na_exc_outstand.bytes += \ 2390 _srcobj->tx_i[idx].dropped.desc_na_exc_outstand.bytes; \ 2391 _tgtobj->tx_i.dropped.exc_desc_na.num += \ 2392 _srcobj->tx_i[idx].dropped.exc_desc_na.num; \ 2393 _tgtobj->tx_i.dropped.exc_desc_na.bytes += \ 2394 _srcobj->tx_i[idx].dropped.exc_desc_na.bytes; \ 2395 _tgtobj->tx_i.dropped.ring_full += \ 2396 _srcobj->tx_i[idx].dropped.ring_full; \ 2397 _tgtobj->tx_i.dropped.enqueue_fail += \ 2398 _srcobj->tx_i[idx].dropped.enqueue_fail; \ 2399 _tgtobj->tx_i.dropped.dma_error += \ 2400 _srcobj->tx_i[idx].dropped.dma_error; \ 2401 _tgtobj->tx_i.dropped.res_full += \ 2402 _srcobj->tx_i[idx].dropped.res_full; \ 2403 _tgtobj->tx_i.dropped.headroom_insufficient += \ 2404 _srcobj->tx_i[idx].dropped.headroom_insufficient; \ 2405 _tgtobj->tx_i.dropped.fail_per_pkt_vdev_id_check += \ 2406 _srcobj->tx_i[idx].dropped.fail_per_pkt_vdev_id_check; \ 2407 _tgtobj->tx_i.dropped.drop_ingress += \ 2408 _srcobj->tx_i[idx].dropped.drop_ingress; \ 2409 _tgtobj->tx_i.dropped.invalid_peer_id_in_exc_path += \ 2410 _srcobj->tx_i[idx].dropped.invalid_peer_id_in_exc_path; \ 2411 _tgtobj->tx_i.dropped.tx_mcast_drop += \ 2412 _srcobj->tx_i[idx].dropped.tx_mcast_drop; \ 2413 _tgtobj->tx_i.dropped.fw2wbm_tx_drop += \ 2414 _srcobj->tx_i[idx].dropped.fw2wbm_tx_drop; \ 2415 _tgtobj->tx_i.dropped.dropped_pkt.bytes += \ 2416 _srcobj->tx_i[idx].dropped.dropped_pkt.bytes; \ 2417 _tgtobj->tx_i.mesh.exception_fw += \ 2418 _srcobj->tx_i[idx].mesh.exception_fw; \ 2419 _tgtobj->tx_i.mesh.completion_fw += \ 2420 _srcobj->tx_i[idx].mesh.completion_fw; \ 2421 _tgtobj->tx_i.cce_classified += \ 2422 _srcobj->tx_i[idx].cce_classified; \ 2423 _tgtobj->tx_i.cce_classified_raw += \ 2424 _srcobj->tx_i[idx].cce_classified_raw; \ 2425 _tgtobj->tx_i.sniffer_rcvd.num += \ 2426 _srcobj->tx_i[idx].sniffer_rcvd.num; \ 2427 _tgtobj->tx_i.sniffer_rcvd.bytes += \ 2428 _srcobj->tx_i[idx].sniffer_rcvd.bytes; \ 2429 } \ 2430 _tgtobj->tx_i.dropped.dropped_pkt.num = \ 2431 _tgtobj->tx_i.dropped.dma_error + \ 2432 _tgtobj->tx_i.dropped.ring_full + \ 2433 _tgtobj->tx_i.dropped.enqueue_fail + \ 2434 _tgtobj->tx_i.dropped.fail_per_pkt_vdev_id_check + \ 2435 _tgtobj->tx_i.dropped.desc_na.num + \ 2436 _tgtobj->tx_i.dropped.res_full + \ 2437 _tgtobj->tx_i.dropped.drop_ingress + \ 2438 _tgtobj->tx_i.dropped.headroom_insufficient + \ 2439 _tgtobj->tx_i.dropped.invalid_peer_id_in_exc_path + \ 2440 _tgtobj->tx_i.dropped.tx_mcast_drop + \ 2441 _tgtobj->tx_i.dropped.fw2wbm_tx_drop; \ 2442 DP_UPDATE_RX_INGRESS_STATS(_tgtobj, _srcobj); \ 2443 } while (0) 2444 2445 #define DP_UPDATE_MLD_VDEV_INGRESS_STATS(_tgtobj, _srcobj, _xmit_type) \ 2446 do { \ 2447 uint8_t i = 0; \ 2448 uint8_t idx = 0; \ 2449 enum dp_pkt_xmit_type temp_xmit_type = _xmit_type; \ 2450 if (temp_xmit_type == DP_XMIT_MLD) { \ 2451 idx = DP_VDEV_XMIT_TYPE; \ 2452 temp_xmit_type = DP_VDEV_XMIT_TYPE; \ 2453 } else if (temp_xmit_type == DP_XMIT_TOTAL) { \ 2454 temp_xmit_type = DP_VDEV_XMIT_TYPE; \ 2455 } \ 2456 for (; idx <= temp_xmit_type; idx++) { \ 2457 _tgtobj->tx_i[idx].rcvd.num += _srcobj->tx_i[idx].rcvd.num; \ 2458 _tgtobj->tx_i[idx].rcvd.bytes += \ 2459 _srcobj->tx_i[idx].rcvd.bytes; \ 2460 _tgtobj->tx_i[idx].rcvd_in_fast_xmit_flow += \ 2461 _srcobj->tx_i[idx].rcvd_in_fast_xmit_flow; \ 2462 for (i = 0; i < CDP_MAX_TX_DATA_RINGS; i++) { \ 2463 _tgtobj->tx_i[idx].rcvd_per_core[i] += \ 2464 _srcobj->tx_i[idx].rcvd_per_core[i]; \ 2465 } \ 2466 _tgtobj->tx_i[idx].processed.num += \ 2467 _srcobj->tx_i[idx].processed.num; \ 2468 _tgtobj->tx_i[idx].processed.bytes += \ 2469 _srcobj->tx_i[idx].processed.bytes; \ 2470 _tgtobj->tx_i[idx].reinject_pkts.num += \ 2471 _srcobj->tx_i[idx].reinject_pkts.num; \ 2472 _tgtobj->tx_i[idx].reinject_pkts.bytes += \ 2473 _srcobj->tx_i[idx].reinject_pkts.bytes; \ 2474 _tgtobj->tx_i[idx].inspect_pkts.num += \ 2475 _srcobj->tx_i[idx].inspect_pkts.num; \ 2476 _tgtobj->tx_i[idx].inspect_pkts.bytes += \ 2477 _srcobj->tx_i[idx].inspect_pkts.bytes; \ 2478 _tgtobj->tx_i[idx].nawds_mcast.num += \ 2479 _srcobj->tx_i[idx].nawds_mcast.num; \ 2480 _tgtobj->tx_i[idx].nawds_mcast.bytes += \ 2481 _srcobj->tx_i[idx].nawds_mcast.bytes; \ 2482 _tgtobj->tx_i[idx].bcast.num += \ 2483 _srcobj->tx_i[idx].bcast.num; \ 2484 _tgtobj->tx_i[idx].bcast.bytes += \ 2485 _srcobj->tx_i[idx].bcast.bytes; \ 2486 _tgtobj->tx_i[idx].raw.raw_pkt.num += \ 2487 _srcobj->tx_i[idx].raw.raw_pkt.num; \ 2488 _tgtobj->tx_i[idx].raw.raw_pkt.bytes += \ 2489 _srcobj->tx_i[idx].raw.raw_pkt.bytes; \ 2490 _tgtobj->tx_i[idx].raw.dma_map_error += \ 2491 _srcobj->tx_i[idx].raw.dma_map_error; \ 2492 _tgtobj->tx_i[idx].raw.invalid_raw_pkt_datatype += \ 2493 _srcobj->tx_i[idx].raw.invalid_raw_pkt_datatype; \ 2494 _tgtobj->tx_i[idx].raw.num_frags_overflow_err += \ 2495 _srcobj->tx_i[idx].raw.num_frags_overflow_err; \ 2496 _tgtobj->tx_i[idx].sg.sg_pkt.num += \ 2497 _srcobj->tx_i[idx].sg.sg_pkt.num; \ 2498 _tgtobj->tx_i[idx].sg.sg_pkt.bytes += \ 2499 _srcobj->tx_i[idx].sg.sg_pkt.bytes; \ 2500 _tgtobj->tx_i[idx].sg.non_sg_pkts.num += \ 2501 _srcobj->tx_i[idx].sg.non_sg_pkts.num; \ 2502 _tgtobj->tx_i[idx].sg.non_sg_pkts.bytes += \ 2503 _srcobj->tx_i[idx].sg.non_sg_pkts.bytes; \ 2504 _tgtobj->tx_i[idx].sg.dropped_host.num += \ 2505 _srcobj->tx_i[idx].sg.dropped_host.num; \ 2506 _tgtobj->tx_i[idx].sg.dropped_host.bytes += \ 2507 _srcobj->tx_i[idx].sg.dropped_host.bytes; \ 2508 _tgtobj->tx_i[idx].sg.dropped_target += \ 2509 _srcobj->tx_i[idx].sg.dropped_target; \ 2510 _tgtobj->tx_i[idx].sg.dma_map_error += \ 2511 _srcobj->tx_i[idx].sg.dma_map_error; \ 2512 _tgtobj->tx_i[idx].mcast_en.mcast_pkt.num += \ 2513 _srcobj->tx_i[idx].mcast_en.mcast_pkt.num; \ 2514 _tgtobj->tx_i[idx].mcast_en.mcast_pkt.bytes += \ 2515 _srcobj->tx_i[idx].mcast_en.mcast_pkt.bytes; \ 2516 _tgtobj->tx_i[idx].mcast_en.dropped_map_error += \ 2517 _srcobj->tx_i[idx].mcast_en.dropped_map_error; \ 2518 _tgtobj->tx_i[idx].mcast_en.dropped_self_mac += \ 2519 _srcobj->tx_i[idx].mcast_en.dropped_self_mac; \ 2520 _tgtobj->tx_i[idx].mcast_en.dropped_send_fail += \ 2521 _srcobj->tx_i[idx].mcast_en.dropped_send_fail; \ 2522 _tgtobj->tx_i[idx].mcast_en.ucast += \ 2523 _srcobj->tx_i[idx].mcast_en.ucast; \ 2524 _tgtobj->tx_i[idx].mcast_en.fail_seg_alloc += \ 2525 _srcobj->tx_i[idx].mcast_en.fail_seg_alloc; \ 2526 _tgtobj->tx_i[idx].mcast_en.clone_fail += \ 2527 _srcobj->tx_i[idx].mcast_en.clone_fail; \ 2528 _tgtobj->tx_i[idx].igmp_mcast_en.igmp_rcvd += \ 2529 _srcobj->tx_i[idx].igmp_mcast_en.igmp_rcvd; \ 2530 _tgtobj->tx_i[idx].igmp_mcast_en.igmp_ucast_converted += \ 2531 _srcobj->tx_i[idx].igmp_mcast_en.igmp_ucast_converted; \ 2532 _tgtobj->tx_i[idx].dropped.desc_na.num += \ 2533 _srcobj->tx_i[idx].dropped.desc_na.num; \ 2534 _tgtobj->tx_i[idx].dropped.desc_na.bytes += \ 2535 _srcobj->tx_i[idx].dropped.desc_na.bytes; \ 2536 _tgtobj->tx_i[idx].dropped.desc_na_exc_alloc_fail.num += \ 2537 _srcobj->tx_i[idx].dropped.desc_na_exc_alloc_fail.num; \ 2538 _tgtobj->tx_i[idx].dropped.desc_na_exc_alloc_fail.bytes += \ 2539 _srcobj->tx_i[idx].dropped.desc_na_exc_alloc_fail.bytes; \ 2540 _tgtobj->tx_i[idx].dropped.desc_na_exc_outstand.num += \ 2541 _srcobj->tx_i[idx].dropped.desc_na_exc_outstand.num; \ 2542 _tgtobj->tx_i[idx].dropped.desc_na_exc_outstand.bytes += \ 2543 _srcobj->tx_i[idx].dropped.desc_na_exc_outstand.bytes; \ 2544 _tgtobj->tx_i[idx].dropped.exc_desc_na.num += \ 2545 _srcobj->tx_i[idx].dropped.exc_desc_na.num; \ 2546 _tgtobj->tx_i[idx].dropped.exc_desc_na.bytes += \ 2547 _srcobj->tx_i[idx].dropped.exc_desc_na.bytes; \ 2548 _tgtobj->tx_i[idx].dropped.ring_full += \ 2549 _srcobj->tx_i[idx].dropped.ring_full; \ 2550 _tgtobj->tx_i[idx].dropped.enqueue_fail += \ 2551 _srcobj->tx_i[idx].dropped.enqueue_fail; \ 2552 _tgtobj->tx_i[idx].dropped.dma_error += \ 2553 _srcobj->tx_i[idx].dropped.dma_error; \ 2554 _tgtobj->tx_i[idx].dropped.res_full += \ 2555 _srcobj->tx_i[idx].dropped.res_full; \ 2556 _tgtobj->tx_i[idx].dropped.headroom_insufficient += \ 2557 _srcobj->tx_i[idx].dropped.headroom_insufficient; \ 2558 _tgtobj->tx_i[idx].dropped.fail_per_pkt_vdev_id_check += \ 2559 _srcobj->tx_i[idx].dropped.fail_per_pkt_vdev_id_check; \ 2560 _tgtobj->tx_i[idx].dropped.drop_ingress += \ 2561 _srcobj->tx_i[idx].dropped.drop_ingress; \ 2562 _tgtobj->tx_i[idx].dropped.invalid_peer_id_in_exc_path += \ 2563 _srcobj->tx_i[idx].dropped.invalid_peer_id_in_exc_path; \ 2564 _tgtobj->tx_i[idx].dropped.tx_mcast_drop += \ 2565 _srcobj->tx_i[idx].dropped.tx_mcast_drop; \ 2566 _tgtobj->tx_i[idx].dropped.fw2wbm_tx_drop += \ 2567 _srcobj->tx_i[idx].dropped.fw2wbm_tx_drop; \ 2568 _tgtobj->tx_i[idx].dropped.dropped_pkt.bytes += \ 2569 _srcobj->tx_i[idx].dropped.dropped_pkt.bytes; \ 2570 _tgtobj->tx_i[idx].mesh.exception_fw += \ 2571 _srcobj->tx_i[idx].mesh.exception_fw; \ 2572 _tgtobj->tx_i[idx].mesh.completion_fw += \ 2573 _srcobj->tx_i[idx].mesh.completion_fw; \ 2574 _tgtobj->tx_i[idx].cce_classified += \ 2575 _srcobj->tx_i[idx].cce_classified; \ 2576 _tgtobj->tx_i[idx].cce_classified_raw += \ 2577 _srcobj->tx_i[idx].cce_classified_raw; \ 2578 _tgtobj->tx_i[idx].sniffer_rcvd.num += \ 2579 _srcobj->tx_i[idx].sniffer_rcvd.num; \ 2580 _tgtobj->tx_i[idx].sniffer_rcvd.bytes += \ 2581 _srcobj->tx_i[idx].sniffer_rcvd.bytes; \ 2582 _tgtobj->tx_i[idx].dropped.dropped_pkt.num = \ 2583 _tgtobj->tx_i[idx].dropped.dma_error + \ 2584 _tgtobj->tx_i[idx].dropped.ring_full + \ 2585 _tgtobj->tx_i[idx].dropped.enqueue_fail + \ 2586 _tgtobj->tx_i[idx].dropped.fail_per_pkt_vdev_id_check + \ 2587 _tgtobj->tx_i[idx].dropped.desc_na.num + \ 2588 _tgtobj->tx_i[idx].dropped.res_full + \ 2589 _tgtobj->tx_i[idx].dropped.drop_ingress + \ 2590 _tgtobj->tx_i[idx].dropped.headroom_insufficient + \ 2591 _tgtobj->tx_i[idx].dropped.invalid_peer_id_in_exc_path + \ 2592 _tgtobj->tx_i[idx].dropped.tx_mcast_drop + \ 2593 _tgtobj->tx_i[idx].dropped.fw2wbm_tx_drop; \ 2594 } \ 2595 DP_UPDATE_RX_INGRESS_STATS(_tgtobj, _srcobj); \ 2596 } while (0) 2597 2598 #define DP_UPDATE_TO_MLD_VDEV_STATS(_tgtobj, _srcobj, _xmit_type) \ 2599 do { \ 2600 DP_UPDATE_MLD_VDEV_INGRESS_STATS(_tgtobj, _srcobj, _xmit_type); \ 2601 DP_UPDATE_VDEV_STATS_FOR_UNMAPPED_PEERS(_tgtobj, _srcobj); \ 2602 } while (0) 2603 2604 #define DP_UPDATE_TO_LINK_VDEV_STATS(_tgtobj, _srcobj, _xmit_type) \ 2605 do { \ 2606 DP_UPDATE_LINK_VDEV_INGRESS_STATS(_tgtobj, _srcobj, _xmit_type); \ 2607 DP_UPDATE_VDEV_STATS_FOR_UNMAPPED_PEERS(_tgtobj, _srcobj); \ 2608 } while (0) 2609 /** 2610 * dp_peer_find_attach() - Allocates memory for peer objects 2611 * @soc: SoC handle 2612 * 2613 * Return: QDF_STATUS 2614 */ 2615 QDF_STATUS dp_peer_find_attach(struct dp_soc *soc); 2616 2617 /** 2618 * dp_peer_find_detach() - Frees memory for peer objects 2619 * @soc: SoC handle 2620 * 2621 * Return: none 2622 */ 2623 void dp_peer_find_detach(struct dp_soc *soc); 2624 2625 /** 2626 * dp_peer_find_hash_add() - add peer to peer_hash_table 2627 * @soc: soc handle 2628 * @peer: peer handle 2629 * 2630 * Return: none 2631 */ 2632 void dp_peer_find_hash_add(struct dp_soc *soc, struct dp_peer *peer); 2633 2634 /** 2635 * dp_peer_find_hash_remove() - remove peer from peer_hash_table 2636 * @soc: soc handle 2637 * @peer: peer handle 2638 * 2639 * Return: none 2640 */ 2641 void dp_peer_find_hash_remove(struct dp_soc *soc, struct dp_peer *peer); 2642 2643 /* unused?? */ 2644 void dp_peer_find_hash_erase(struct dp_soc *soc); 2645 2646 /** 2647 * dp_peer_vdev_list_add() - add peer into vdev's peer list 2648 * @soc: soc handle 2649 * @vdev: vdev handle 2650 * @peer: peer handle 2651 * 2652 * Return: none 2653 */ 2654 void dp_peer_vdev_list_add(struct dp_soc *soc, struct dp_vdev *vdev, 2655 struct dp_peer *peer); 2656 2657 /** 2658 * dp_peer_vdev_list_remove() - remove peer from vdev's peer list 2659 * @soc: SoC handle 2660 * @vdev: VDEV handle 2661 * @peer: peer handle 2662 * 2663 * Return: none 2664 */ 2665 void dp_peer_vdev_list_remove(struct dp_soc *soc, struct dp_vdev *vdev, 2666 struct dp_peer *peer); 2667 2668 /** 2669 * dp_peer_find_id_to_obj_add() - Add peer into peer_id table 2670 * @soc: SoC handle 2671 * @peer: peer handle 2672 * @peer_id: peer_id 2673 * 2674 * Return: None 2675 */ 2676 void dp_peer_find_id_to_obj_add(struct dp_soc *soc, 2677 struct dp_peer *peer, 2678 uint16_t peer_id); 2679 2680 /** 2681 * dp_txrx_peer_attach_add() - Attach txrx_peer and add it to peer_id table 2682 * @soc: SoC handle 2683 * @peer: peer handle 2684 * @txrx_peer: txrx peer handle 2685 * 2686 * Return: None 2687 */ 2688 void dp_txrx_peer_attach_add(struct dp_soc *soc, 2689 struct dp_peer *peer, 2690 struct dp_txrx_peer *txrx_peer); 2691 2692 /** 2693 * dp_peer_find_id_to_obj_remove() - remove peer from peer_id table 2694 * @soc: SoC handle 2695 * @peer_id: peer_id 2696 * 2697 * Return: None 2698 */ 2699 void dp_peer_find_id_to_obj_remove(struct dp_soc *soc, 2700 uint16_t peer_id); 2701 2702 /** 2703 * dp_vdev_unref_delete() - check and process vdev delete 2704 * @soc: DP specific soc pointer 2705 * @vdev: DP specific vdev pointer 2706 * @mod_id: module id 2707 * 2708 */ 2709 void dp_vdev_unref_delete(struct dp_soc *soc, struct dp_vdev *vdev, 2710 enum dp_mod_id mod_id); 2711 2712 /** 2713 * dp_peer_ppdu_delayed_ba_cleanup() - free ppdu allocated in peer 2714 * @peer: Datapath peer 2715 * 2716 * Return: void 2717 */ 2718 void dp_peer_ppdu_delayed_ba_cleanup(struct dp_peer *peer); 2719 2720 /** 2721 * dp_peer_rx_init() - Initialize receive TID state 2722 * @pdev: Datapath pdev 2723 * @peer: Datapath peer 2724 * 2725 */ 2726 void dp_peer_rx_init(struct dp_pdev *pdev, struct dp_peer *peer); 2727 2728 /** 2729 * dp_peer_rx_init_wrapper() - Initialize receive TID state, based on peer type 2730 * @pdev: Datapath pdev 2731 * @peer: Datapath peer 2732 * @setup_info: setup info received for setting up the peer 2733 * 2734 * Return: None 2735 */ 2736 void dp_peer_rx_init_wrapper(struct dp_pdev *pdev, struct dp_peer *peer, 2737 struct cdp_peer_setup_info *setup_info); 2738 2739 /** 2740 * dp_peer_cleanup() - Cleanup peer information 2741 * @vdev: Datapath vdev 2742 * @peer: Datapath peer 2743 * 2744 */ 2745 void dp_peer_cleanup(struct dp_vdev *vdev, struct dp_peer *peer); 2746 2747 #ifdef DP_PEER_EXTENDED_API 2748 /** 2749 * dp_register_peer() - Register peer into physical device 2750 * @soc_hdl: data path soc handle 2751 * @pdev_id: device instance id 2752 * @sta_desc: peer description 2753 * 2754 * Register peer into physical device 2755 * 2756 * Return: QDF_STATUS_SUCCESS registration success 2757 * QDF_STATUS_E_FAULT peer not found 2758 */ 2759 QDF_STATUS dp_register_peer(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, 2760 struct ol_txrx_desc_type *sta_desc); 2761 2762 /** 2763 * dp_clear_peer() - remove peer from physical device 2764 * @soc_hdl: data path soc handle 2765 * @pdev_id: device instance id 2766 * @peer_addr: peer mac address 2767 * 2768 * remove peer from physical device 2769 * 2770 * Return: QDF_STATUS_SUCCESS registration success 2771 * QDF_STATUS_E_FAULT peer not found 2772 */ 2773 QDF_STATUS dp_clear_peer(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, 2774 struct qdf_mac_addr peer_addr); 2775 2776 /** 2777 * dp_find_peer_exist_on_vdev - find if peer exists on the given vdev 2778 * @soc_hdl: datapath soc handle 2779 * @vdev_id: vdev instance id 2780 * @peer_addr: peer mac address 2781 * 2782 * Return: true or false 2783 */ 2784 bool dp_find_peer_exist_on_vdev(struct cdp_soc_t *soc_hdl, uint8_t vdev_id, 2785 uint8_t *peer_addr); 2786 2787 /** 2788 * dp_find_peer_exist_on_other_vdev - find if peer exists 2789 * on other than the given vdev 2790 * @soc_hdl: datapath soc handle 2791 * @vdev_id: vdev instance id 2792 * @peer_addr: peer mac address 2793 * @max_bssid: max number of bssids 2794 * 2795 * Return: true or false 2796 */ 2797 bool dp_find_peer_exist_on_other_vdev(struct cdp_soc_t *soc_hdl, 2798 uint8_t vdev_id, uint8_t *peer_addr, 2799 uint16_t max_bssid); 2800 2801 /** 2802 * dp_peer_state_update() - update peer local state 2803 * @soc: datapath soc handle 2804 * @peer_mac: peer mac address 2805 * @state: new peer local state 2806 * 2807 * update peer local state 2808 * 2809 * Return: QDF_STATUS_SUCCESS registration success 2810 */ 2811 QDF_STATUS dp_peer_state_update(struct cdp_soc_t *soc, uint8_t *peer_mac, 2812 enum ol_txrx_peer_state state); 2813 2814 /** 2815 * dp_get_vdevid() - Get virtual interface id which peer registered 2816 * @soc_hdl: datapath soc handle 2817 * @peer_mac: peer mac address 2818 * @vdev_id: virtual interface id which peer registered 2819 * 2820 * Get virtual interface id which peer registered 2821 * 2822 * Return: QDF_STATUS_SUCCESS registration success 2823 */ 2824 QDF_STATUS dp_get_vdevid(struct cdp_soc_t *soc_hdl, uint8_t *peer_mac, 2825 uint8_t *vdev_id); 2826 2827 struct cdp_vdev *dp_get_vdev_by_peer_addr(struct cdp_pdev *pdev_handle, 2828 struct qdf_mac_addr peer_addr); 2829 2830 /** 2831 * dp_get_vdev_for_peer() - Get virtual interface instance which peer belongs 2832 * @peer: peer instance 2833 * 2834 * Get virtual interface instance which peer belongs 2835 * 2836 * Return: virtual interface instance pointer 2837 * NULL in case cannot find 2838 */ 2839 struct cdp_vdev *dp_get_vdev_for_peer(void *peer); 2840 2841 /** 2842 * dp_peer_get_peer_mac_addr() - Get peer mac address 2843 * @peer: peer instance 2844 * 2845 * Get peer mac address 2846 * 2847 * Return: peer mac address pointer 2848 * NULL in case cannot find 2849 */ 2850 uint8_t *dp_peer_get_peer_mac_addr(void *peer); 2851 2852 /** 2853 * dp_get_peer_state() - Get local peer state 2854 * @soc: datapath soc handle 2855 * @vdev_id: vdev id 2856 * @peer_mac: peer mac addr 2857 * @slowpath: call from slowpath or not 2858 * 2859 * Get local peer state 2860 * 2861 * Return: peer status 2862 */ 2863 int dp_get_peer_state(struct cdp_soc_t *soc, uint8_t vdev_id, 2864 uint8_t *peer_mac, bool slowpath); 2865 2866 /** 2867 * dp_local_peer_id_pool_init() - local peer id pool alloc for physical device 2868 * @pdev: data path device instance 2869 * 2870 * local peer id pool alloc for physical device 2871 * 2872 * Return: none 2873 */ 2874 void dp_local_peer_id_pool_init(struct dp_pdev *pdev); 2875 2876 /** 2877 * dp_local_peer_id_alloc() - allocate local peer id 2878 * @pdev: data path device instance 2879 * @peer: new peer instance 2880 * 2881 * allocate local peer id 2882 * 2883 * Return: none 2884 */ 2885 void dp_local_peer_id_alloc(struct dp_pdev *pdev, struct dp_peer *peer); 2886 2887 /** 2888 * dp_local_peer_id_free() - remove local peer id 2889 * @pdev: data path device instance 2890 * @peer: peer instance should be removed 2891 * 2892 * remove local peer id 2893 * 2894 * Return: none 2895 */ 2896 void dp_local_peer_id_free(struct dp_pdev *pdev, struct dp_peer *peer); 2897 2898 /** 2899 * dp_set_peer_as_tdls_peer() - set tdls peer flag to peer 2900 * @soc_hdl: datapath soc handle 2901 * @vdev_id: vdev_id 2902 * @peer_mac: peer mac addr 2903 * @val: tdls peer flag 2904 * 2905 * Return: none 2906 */ 2907 void dp_set_peer_as_tdls_peer(struct cdp_soc_t *soc_hdl, uint8_t vdev_id, 2908 uint8_t *peer_mac, bool val); 2909 #else 2910 static inline dp_get_vdevid(struct cdp_soc_t * soc_hdl,uint8_t * peer_mac,uint8_t * vdev_id)2911 QDF_STATUS dp_get_vdevid(struct cdp_soc_t *soc_hdl, uint8_t *peer_mac, 2912 uint8_t *vdev_id) 2913 { 2914 return QDF_STATUS_E_NOSUPPORT; 2915 } 2916 dp_local_peer_id_pool_init(struct dp_pdev * pdev)2917 static inline void dp_local_peer_id_pool_init(struct dp_pdev *pdev) 2918 { 2919 } 2920 2921 static inline dp_local_peer_id_alloc(struct dp_pdev * pdev,struct dp_peer * peer)2922 void dp_local_peer_id_alloc(struct dp_pdev *pdev, struct dp_peer *peer) 2923 { 2924 } 2925 2926 static inline dp_local_peer_id_free(struct dp_pdev * pdev,struct dp_peer * peer)2927 void dp_local_peer_id_free(struct dp_pdev *pdev, struct dp_peer *peer) 2928 { 2929 } 2930 2931 static inline dp_set_peer_as_tdls_peer(struct cdp_soc_t * soc_hdl,uint8_t vdev_id,uint8_t * peer_mac,bool val)2932 void dp_set_peer_as_tdls_peer(struct cdp_soc_t *soc_hdl, uint8_t vdev_id, 2933 uint8_t *peer_mac, bool val) 2934 { 2935 } 2936 #endif 2937 2938 /** 2939 * dp_find_peer_exist - find peer if already exists 2940 * @soc_hdl: datapath soc handle 2941 * @pdev_id: physical device instance id 2942 * @peer_addr: peer mac address 2943 * 2944 * Return: true or false 2945 */ 2946 bool dp_find_peer_exist(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, 2947 uint8_t *peer_addr); 2948 2949 #ifdef DP_UMAC_HW_RESET_SUPPORT 2950 /** 2951 * dp_pause_reo_send_cmd() - Pause Reo send commands. 2952 * @soc: dp soc 2953 * 2954 * Return: none 2955 */ 2956 void dp_pause_reo_send_cmd(struct dp_soc *soc); 2957 2958 /** 2959 * dp_resume_reo_send_cmd() - Resume Reo send commands. 2960 * @soc: dp soc 2961 * 2962 * Return: none 2963 */ 2964 void dp_resume_reo_send_cmd(struct dp_soc *soc); 2965 2966 /** 2967 * dp_cleanup_reo_cmd_module - Clean up the reo cmd module 2968 * @soc: DP SoC handle 2969 * 2970 * Return: none 2971 */ 2972 void dp_cleanup_reo_cmd_module(struct dp_soc *soc); 2973 2974 /** 2975 * dp_reo_desc_freelist_destroy() - Flush REO descriptors from deferred freelist 2976 * @soc: DP SOC handle 2977 * 2978 * Return: none 2979 */ 2980 void dp_reo_desc_freelist_destroy(struct dp_soc *soc); 2981 2982 /** 2983 * dp_reset_rx_reo_tid_queue() - Reset the reo tid queues 2984 * @soc: dp soc 2985 * @hw_qdesc_vaddr: starting address of the tid queues 2986 * @size: size of the memory pointed to by hw_qdesc_vaddr 2987 * 2988 * Return: none 2989 */ 2990 void dp_reset_rx_reo_tid_queue(struct dp_soc *soc, void *hw_qdesc_vaddr, 2991 uint32_t size); 2992 2993 dp_umac_reset_trigger_pre_reset_notify_cb(struct dp_soc * soc)2994 static inline void dp_umac_reset_trigger_pre_reset_notify_cb(struct dp_soc *soc) 2995 { 2996 notify_pre_reset_fw_callback callback = soc->notify_fw_callback; 2997 2998 if (callback) 2999 callback(soc); 3000 } 3001 3002 /** 3003 * dp_reset_global_tx_desc_cleanup_flag() - Reset cleanup needed flag 3004 * @soc: dp soc handle 3005 * 3006 * Return: None 3007 */ 3008 void dp_reset_global_tx_desc_cleanup_flag(struct dp_soc *soc); 3009 3010 /** 3011 * dp_get_global_tx_desc_cleanup_flag() - Get cleanup needed flag 3012 * @soc: dp soc handle 3013 * 3014 * Return: cleanup needed/ not needed 3015 */ 3016 bool dp_get_global_tx_desc_cleanup_flag(struct dp_soc *soc); 3017 3018 3019 #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP) 3020 /** 3021 * dp_umac_reset_complete_umac_recovery() - Complete Umac reset session 3022 * @soc: dp soc handle 3023 * 3024 * Return: void 3025 */ 3026 void dp_umac_reset_complete_umac_recovery(struct dp_soc *soc); 3027 3028 /** 3029 * dp_umac_reset_initiate_umac_recovery() - Initiate Umac reset session 3030 * @soc: dp soc handle 3031 * @umac_reset_ctx: Umac reset context 3032 * @rx_event: Rx event received 3033 * @is_target_recovery: Flag to indicate if it is triggered for target recovery 3034 * 3035 * Return: status 3036 */ 3037 QDF_STATUS dp_umac_reset_initiate_umac_recovery(struct dp_soc *soc, 3038 struct dp_soc_umac_reset_ctx *umac_reset_ctx, 3039 enum umac_reset_rx_event rx_event, 3040 bool is_target_recovery); 3041 3042 /** 3043 * dp_umac_reset_handle_action_cb() - Function to call action callback 3044 * @soc: dp soc handle 3045 * @umac_reset_ctx: Umac reset context 3046 * @action: Action to call the callback for 3047 * 3048 * Return: QDF_STATUS status 3049 */ 3050 QDF_STATUS dp_umac_reset_handle_action_cb(struct dp_soc *soc, 3051 struct dp_soc_umac_reset_ctx *umac_reset_ctx, 3052 enum umac_reset_action action); 3053 3054 /** 3055 * dp_umac_reset_post_tx_cmd() - Iterate partner socs and post Tx command 3056 * @umac_reset_ctx: UMAC reset context 3057 * @tx_cmd: Tx command to be posted 3058 * 3059 * Return: QDF status of operation 3060 */ 3061 QDF_STATUS 3062 dp_umac_reset_post_tx_cmd(struct dp_soc_umac_reset_ctx *umac_reset_ctx, 3063 enum umac_reset_tx_cmd tx_cmd); 3064 3065 /** 3066 * dp_umac_reset_initiator_check() - Check if soc is the Umac reset initiator 3067 * @soc: dp soc handle 3068 * 3069 * Return: true if the soc is initiator or false otherwise 3070 */ 3071 bool dp_umac_reset_initiator_check(struct dp_soc *soc); 3072 3073 /** 3074 * dp_umac_reset_target_recovery_check() - Check if this is for target recovery 3075 * @soc: dp soc handle 3076 * 3077 * Return: true if the session is for target recovery or false otherwise 3078 */ 3079 bool dp_umac_reset_target_recovery_check(struct dp_soc *soc); 3080 3081 /** 3082 * dp_umac_reset_is_soc_ignored() - Check if this soc is to be ignored 3083 * @soc: dp soc handle 3084 * 3085 * Return: true if the soc is ignored or false otherwise 3086 */ 3087 bool dp_umac_reset_is_soc_ignored(struct dp_soc *soc); 3088 3089 /** 3090 * dp_mlo_umac_reset_stats_print() - API to print MLO umac reset stats 3091 * @soc: dp soc handle 3092 * 3093 * Return: QDF_STATUS 3094 */ 3095 QDF_STATUS dp_mlo_umac_reset_stats_print(struct dp_soc *soc); 3096 #else 3097 static inline dp_mlo_umac_reset_stats_print(struct dp_soc * soc)3098 QDF_STATUS dp_mlo_umac_reset_stats_print(struct dp_soc *soc) 3099 { 3100 return QDF_STATUS_SUCCESS; 3101 } 3102 #endif 3103 #else dp_umac_reset_trigger_pre_reset_notify_cb(struct dp_soc * soc)3104 static inline void dp_umac_reset_trigger_pre_reset_notify_cb(struct dp_soc *soc) 3105 { 3106 } 3107 #endif 3108 3109 #if defined(DP_UMAC_HW_RESET_SUPPORT) && defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP) 3110 /** 3111 * dp_umac_reset_notify_asserted_soc() - API to notify the asserted SOC 3112 * @soc: dp soc 3113 * 3114 * Return: QDF_STATUS 3115 */ 3116 QDF_STATUS dp_umac_reset_notify_asserted_soc(struct dp_soc *soc); 3117 3118 /** 3119 * dp_get_umac_reset_in_progress_state() - API to check umac reset in progress 3120 * state 3121 * @psoc: dp soc handle 3122 * 3123 * Return: umac reset state 3124 */ 3125 enum cdp_umac_reset_state 3126 dp_get_umac_reset_in_progress_state(struct cdp_soc_t *psoc); 3127 #else 3128 static inline dp_umac_reset_notify_asserted_soc(struct dp_soc * soc)3129 QDF_STATUS dp_umac_reset_notify_asserted_soc(struct dp_soc *soc) 3130 { 3131 return QDF_STATUS_SUCCESS; 3132 } 3133 3134 static inline enum cdp_umac_reset_state dp_get_umac_reset_in_progress_state(struct cdp_soc_t * psoc)3135 dp_get_umac_reset_in_progress_state(struct cdp_soc_t *psoc) 3136 { 3137 return CDP_UMAC_RESET_NOT_IN_PROGRESS; 3138 } 3139 #endif 3140 3141 #ifndef WLAN_SOFTUMAC_SUPPORT 3142 QDF_STATUS dp_reo_send_cmd(struct dp_soc *soc, enum hal_reo_cmd_type type, 3143 struct hal_reo_cmd_params *params, 3144 void (*callback_fn), void *data); 3145 3146 /** 3147 * dp_reo_cmdlist_destroy() - Free REO commands in the queue 3148 * @soc: DP SoC handle 3149 * 3150 * Return: none 3151 */ 3152 void dp_reo_cmdlist_destroy(struct dp_soc *soc); 3153 3154 /** 3155 * dp_reo_status_ring_handler() - Handler for REO Status ring 3156 * @int_ctx: pointer to DP interrupt context 3157 * @soc: DP Soc handle 3158 * 3159 * Return: Number of descriptors reaped 3160 */ 3161 uint32_t dp_reo_status_ring_handler(struct dp_intr *int_ctx, 3162 struct dp_soc *soc); 3163 #endif 3164 3165 /** 3166 * dp_aggregate_vdev_stats() - Consolidate stats at VDEV level 3167 * @vdev: DP VDEV handle 3168 * @vdev_stats: aggregate statistics 3169 * @xmit_type: xmit type of packet - MLD/Link 3170 * return: void 3171 */ 3172 void dp_aggregate_vdev_stats(struct dp_vdev *vdev, 3173 struct cdp_vdev_stats *vdev_stats, 3174 enum dp_pkt_xmit_type xmit_type); 3175 3176 /** 3177 * dp_txrx_get_vdev_stats() - Update buffer with cdp_vdev_stats 3178 * @soc_hdl: CDP SoC handle 3179 * @vdev_id: vdev Id 3180 * @buf: buffer for vdev stats 3181 * @is_aggregate: are aggregate stats being collected 3182 * 3183 * Return: QDF_STATUS 3184 */ 3185 QDF_STATUS 3186 dp_txrx_get_vdev_stats(struct cdp_soc_t *soc_hdl, uint8_t vdev_id, 3187 void *buf, bool is_aggregate); 3188 3189 /** 3190 * dp_rx_bar_stats_cb() - BAR received stats callback 3191 * @soc: SOC handle 3192 * @cb_ctxt: Call back context 3193 * @reo_status: Reo status 3194 * 3195 * Return: void 3196 */ 3197 void dp_rx_bar_stats_cb(struct dp_soc *soc, void *cb_ctxt, 3198 union hal_reo_status *reo_status); 3199 3200 uint16_t dp_tx_me_send_convert_ucast(struct cdp_soc_t *soc, uint8_t vdev_id, 3201 qdf_nbuf_t nbuf, 3202 uint8_t newmac[][QDF_MAC_ADDR_SIZE], 3203 uint8_t new_mac_cnt, uint8_t tid, 3204 bool is_igmp, bool is_dms_pkt); 3205 void dp_tx_me_alloc_descriptor(struct cdp_soc_t *soc, uint8_t pdev_id); 3206 3207 void dp_tx_me_free_descriptor(struct cdp_soc_t *soc, uint8_t pdev_id); 3208 3209 /** 3210 * dp_h2t_ext_stats_msg_send(): function to construct HTT message to pass to FW 3211 * @pdev: DP PDEV handle 3212 * @stats_type_upload_mask: stats type requested by user 3213 * @config_param_0: extra configuration parameters 3214 * @config_param_1: extra configuration parameters 3215 * @config_param_2: extra configuration parameters 3216 * @config_param_3: extra configuration parameters 3217 * @cookie: 3218 * @cookie_msb: 3219 * @mac_id: mac number 3220 * 3221 * Return: QDF STATUS 3222 */ 3223 QDF_STATUS dp_h2t_ext_stats_msg_send(struct dp_pdev *pdev, 3224 uint32_t stats_type_upload_mask, uint32_t config_param_0, 3225 uint32_t config_param_1, uint32_t config_param_2, 3226 uint32_t config_param_3, int cookie, int cookie_msb, 3227 uint8_t mac_id); 3228 3229 /** 3230 * dp_htt_stats_print_tag() - function to select the tag type and 3231 * print the corresponding tag structure 3232 * @pdev: pdev pointer 3233 * @tag_type: tag type that is to be printed 3234 * @tag_buf: pointer to the tag structure 3235 * 3236 * Return: void 3237 */ 3238 void dp_htt_stats_print_tag(struct dp_pdev *pdev, 3239 uint8_t tag_type, uint32_t *tag_buf); 3240 3241 /** 3242 * dp_htt_stats_copy_tag() - function to select the tag type and 3243 * copy the corresponding tag structure 3244 * @pdev: DP_PDEV handle 3245 * @tag_type: tag type that is to be printed 3246 * @tag_buf: pointer to the tag structure 3247 * 3248 * Return: void 3249 */ 3250 void dp_htt_stats_copy_tag(struct dp_pdev *pdev, uint8_t tag_type, uint32_t *tag_buf); 3251 3252 /** 3253 * dp_h2t_3tuple_config_send(): function to construct 3 tuple configuration 3254 * HTT message to pass to FW 3255 * @pdev: DP PDEV handle 3256 * @tuple_mask: tuple configuration to report 3 tuple hash value in either 3257 * toeplitz_2_or_4 or flow_id_toeplitz in MSDU START TLV. 3258 * 3259 * tuple_mask[1:0]: 3260 * 00 - Do not report 3 tuple hash value 3261 * 10 - Report 3 tuple hash value in toeplitz_2_or_4 3262 * 01 - Report 3 tuple hash value in flow_id_toeplitz 3263 * 11 - Report 3 tuple hash value in both toeplitz_2_or_4 & flow_id_toeplitz 3264 * @mac_id: MAC ID 3265 * 3266 * Return: QDF STATUS 3267 */ 3268 QDF_STATUS dp_h2t_3tuple_config_send(struct dp_pdev *pdev, uint32_t tuple_mask, 3269 uint8_t mac_id); 3270 3271 #ifdef IPA_OFFLOAD 3272 /** 3273 * dp_peer_update_tid_stats_from_reo() - update rx pkt and byte count from reo 3274 * @soc: soc handle 3275 * @cb_ctxt: combination of peer_id and tid 3276 * @reo_status: reo status 3277 * 3278 * Return: void 3279 */ 3280 void dp_peer_update_tid_stats_from_reo(struct dp_soc *soc, void *cb_ctxt, 3281 union hal_reo_status *reo_status); 3282 3283 int dp_peer_get_rxtid_stats_ipa(struct dp_peer *peer, 3284 dp_rxtid_stats_cmd_cb dp_stats_cmd_cb); 3285 #ifdef IPA_OPT_WIFI_DP 3286 void dp_ipa_wdi_opt_dpath_notify_flt_rlsd(int flt0_rslt, 3287 int flt1_rslt); 3288 void dp_ipa_wdi_opt_dpath_notify_flt_add_rem_cb(int flt0_rslt, int flt1_rslt); 3289 void dp_ipa_wdi_opt_dpath_notify_flt_rsvd(bool is_success); 3290 #endif 3291 #ifdef QCA_ENHANCED_STATS_SUPPORT 3292 /** 3293 * dp_peer_aggregate_tid_stats - aggregate rx tid stats 3294 * @peer: Data Path peer 3295 * 3296 * Return: void 3297 */ 3298 void dp_peer_aggregate_tid_stats(struct dp_peer *peer); 3299 #endif 3300 #else dp_peer_aggregate_tid_stats(struct dp_peer * peer)3301 static inline void dp_peer_aggregate_tid_stats(struct dp_peer *peer) 3302 { 3303 } 3304 #endif 3305 3306 /** 3307 * dp_set_key_sec_type_wifi3() - set security mode of key 3308 * @soc: Datapath soc handle 3309 * @vdev_id: id of atapath vdev 3310 * @peer_mac: Datapath peer mac address 3311 * @sec_type: security type 3312 * @is_unicast: key type 3313 * 3314 */ 3315 QDF_STATUS 3316 dp_set_key_sec_type_wifi3(struct cdp_soc_t *soc, uint8_t vdev_id, 3317 uint8_t *peer_mac, enum cdp_sec_type sec_type, 3318 bool is_unicast); 3319 3320 /** 3321 * dp_get_pdev_for_mac_id() - Return pdev for mac_id 3322 * @soc: handle to DP soc 3323 * @mac_id: MAC id 3324 * 3325 * Return: Return pdev corresponding to MAC 3326 */ 3327 void *dp_get_pdev_for_mac_id(struct dp_soc *soc, uint32_t mac_id); 3328 3329 QDF_STATUS 3330 dp_set_michael_key(struct cdp_soc_t *soc, uint8_t vdev_id, 3331 uint8_t *peer_mac, 3332 bool is_unicast, uint32_t *key); 3333 3334 /** 3335 * dp_check_pdev_exists() - Validate pdev before use 3336 * @soc: dp soc handle 3337 * @data: pdev handle 3338 * 3339 * Return: 0 - success/invalid - failure 3340 */ 3341 bool dp_check_pdev_exists(struct dp_soc *soc, struct dp_pdev *data); 3342 3343 /** 3344 * dp_update_delay_stats() - Update delay statistics in structure 3345 * and fill min, max and avg delay 3346 * @tstats: tid tx stats 3347 * @rstats: tid rx stats 3348 * @delay: delay in ms 3349 * @tid: tid value 3350 * @mode: type of tx delay mode 3351 * @ring_id: ring number 3352 * @delay_in_us: flag to indicate whether the delay is in ms or us 3353 * 3354 * Return: none 3355 */ 3356 void dp_update_delay_stats(struct cdp_tid_tx_stats *tstats, 3357 struct cdp_tid_rx_stats *rstats, uint32_t delay, 3358 uint8_t tid, uint8_t mode, uint8_t ring_id, 3359 bool delay_in_us); 3360 3361 /** 3362 * dp_print_ring_stats(): Print tail and head pointer 3363 * @pdev: DP_PDEV handle 3364 * 3365 * Return: void 3366 */ 3367 void dp_print_ring_stats(struct dp_pdev *pdev); 3368 3369 /** 3370 * dp_print_ring_stat_from_hal(): Print tail and head pointer through hal 3371 * @soc: soc handle 3372 * @srng: srng handle 3373 * @ring_type: ring type 3374 * 3375 * Return: void 3376 */ 3377 void 3378 dp_print_ring_stat_from_hal(struct dp_soc *soc, struct dp_srng *srng, 3379 enum hal_ring_type ring_type); 3380 3381 /** 3382 * dp_print_pdev_cfg_params() - Print the pdev cfg parameters 3383 * @pdev: DP pdev handle 3384 * 3385 * Return: void 3386 */ 3387 void dp_print_pdev_cfg_params(struct dp_pdev *pdev); 3388 3389 /** 3390 * dp_print_soc_cfg_params()- Dump soc wlan config parameters 3391 * @soc: Soc handle 3392 * 3393 * Return: void 3394 */ 3395 void dp_print_soc_cfg_params(struct dp_soc *soc); 3396 3397 /** 3398 * dp_srng_get_str_from_hal_ring_type() - Return string name for a ring 3399 * @ring_type: Ring 3400 * 3401 * Return: char const pointer 3402 */ 3403 const 3404 char *dp_srng_get_str_from_hal_ring_type(enum hal_ring_type ring_type); 3405 3406 /** 3407 * dp_txrx_path_stats() - Function to display dump stats 3408 * @soc: soc handle 3409 * 3410 * Return: none 3411 */ 3412 void dp_txrx_path_stats(struct dp_soc *soc); 3413 3414 /** 3415 * dp_print_per_ring_stats(): Packet count per ring 3416 * @soc: soc handle 3417 * 3418 * Return: None 3419 */ 3420 void dp_print_per_ring_stats(struct dp_soc *soc); 3421 3422 /** 3423 * dp_aggregate_pdev_stats(): Consolidate stats at PDEV level 3424 * @pdev: DP PDEV handle 3425 * 3426 * Return: void 3427 */ 3428 void dp_aggregate_pdev_stats(struct dp_pdev *pdev); 3429 3430 /** 3431 * dp_print_rx_rates(): Print Rx rate stats 3432 * @vdev: DP_VDEV handle 3433 * 3434 * Return:void 3435 */ 3436 void dp_print_rx_rates(struct dp_vdev *vdev); 3437 3438 /** 3439 * dp_print_tx_rates(): Print tx rates 3440 * @vdev: DP_VDEV handle 3441 * 3442 * Return:void 3443 */ 3444 void dp_print_tx_rates(struct dp_vdev *vdev); 3445 3446 /** 3447 * dp_print_peer_stats():print peer stats 3448 * @peer: DP_PEER handle 3449 * @peer_stats: buffer holding peer stats 3450 * 3451 * return void 3452 */ 3453 void dp_print_peer_stats(struct dp_peer *peer, 3454 struct cdp_peer_stats *peer_stats); 3455 3456 /** 3457 * dp_print_pdev_tx_stats(): Print Pdev level TX stats 3458 * @pdev: DP_PDEV Handle 3459 * 3460 * Return:void 3461 */ 3462 void 3463 dp_print_pdev_tx_stats(struct dp_pdev *pdev); 3464 3465 #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MCAST_MLO) 3466 /** 3467 * dp_print_vdev_mlo_mcast_tx_stats(): Print vdev level mlo mcast tx stats 3468 * @vdev: DP_VDEV Handle 3469 * 3470 * Return:void 3471 */ 3472 void 3473 dp_print_vdev_mlo_mcast_tx_stats(struct dp_vdev *vdev); 3474 #else 3475 /** 3476 * dp_print_vdev_mlo_mcast_tx_stats(): Print vdev level mlo mcast tx stats 3477 * @vdev: DP_VDEV Handle 3478 * 3479 * Return:void 3480 */ 3481 static inline dp_print_vdev_mlo_mcast_tx_stats(struct dp_vdev * vdev)3482 void dp_print_vdev_mlo_mcast_tx_stats(struct dp_vdev *vdev) 3483 { 3484 } 3485 #endif 3486 3487 /** 3488 * dp_print_pdev_rx_stats(): Print Pdev level RX stats 3489 * @pdev: DP_PDEV Handle 3490 * 3491 * Return: void 3492 */ 3493 void 3494 dp_print_pdev_rx_stats(struct dp_pdev *pdev); 3495 3496 /** 3497 * dp_print_soc_tx_stats(): Print SOC level stats 3498 * @soc: DP_SOC Handle 3499 * 3500 * Return: void 3501 */ 3502 void dp_print_soc_tx_stats(struct dp_soc *soc); 3503 3504 #ifdef QCA_SUPPORT_DP_GLOBAL_CTX 3505 /** 3506 * dp_print_global_desc_count(): Print global desc in use 3507 * 3508 * Return: void 3509 */ 3510 void dp_print_global_desc_count(void); 3511 #else 3512 /** 3513 * dp_print_global_desc_count(): Print global desc in use 3514 * 3515 * Return: void 3516 */ 3517 static inline dp_print_global_desc_count(void)3518 void dp_print_global_desc_count(void) 3519 { 3520 } 3521 #endif 3522 3523 /** 3524 * dp_print_soc_interrupt_stats() - Print interrupt stats for the soc 3525 * @soc: dp_soc handle 3526 * 3527 * Return: None 3528 */ 3529 void dp_print_soc_interrupt_stats(struct dp_soc *soc); 3530 3531 /** 3532 * dp_print_tx_ppeds_stats() - Print Tx in use stats for the soc in DS 3533 * @soc: dp_soc handle 3534 * 3535 * Return: None 3536 */ 3537 3538 void dp_print_tx_ppeds_stats(struct dp_soc *soc); 3539 3540 /* REO destination ring's watermark mask */ 3541 #define DP_SRNG_WM_MASK_REO_DST BIT(REO_DST) 3542 /* TX completion ring's watermark mask */ 3543 #define DP_SRNG_WM_MASK_TX_COMP BIT(WBM2SW_RELEASE) 3544 /* All srng's watermark mask */ 3545 #define DP_SRNG_WM_MASK_ALL 0xFFFFFFFF 3546 3547 #ifdef WLAN_DP_SRNG_USAGE_WM_TRACKING 3548 /** 3549 * dp_dump_srng_high_wm_stats() - Print the ring usage high watermark stats 3550 * for all SRNGs 3551 * @soc: DP soc handle 3552 * @srng_mask: SRNGs mask for dumping usage watermark stats 3553 * 3554 * Return: None 3555 */ 3556 void dp_dump_srng_high_wm_stats(struct dp_soc *soc, uint64_t srng_mask); 3557 #else 3558 static inline dp_dump_srng_high_wm_stats(struct dp_soc * soc,uint64_t srng_mask)3559 void dp_dump_srng_high_wm_stats(struct dp_soc *soc, uint64_t srng_mask) 3560 { 3561 } 3562 #endif 3563 3564 /** 3565 * dp_print_soc_rx_stats() - Print SOC level Rx stats 3566 * @soc: DP_SOC Handle 3567 * 3568 * Return: void 3569 */ 3570 void dp_print_soc_rx_stats(struct dp_soc *soc); 3571 3572 /** 3573 * dp_get_mac_id_for_pdev() - Return mac corresponding to pdev for mac 3574 * 3575 * @mac_id: MAC id 3576 * @pdev_id: pdev_id corresponding to pdev, 0 for MCL 3577 * 3578 * Single pdev using both MACs will operate on both MAC rings, 3579 * which is the case for MCL. 3580 * For WIN each PDEV will operate one ring, so index is zero. 3581 * 3582 */ dp_get_mac_id_for_pdev(uint32_t mac_id,uint32_t pdev_id)3583 static inline int dp_get_mac_id_for_pdev(uint32_t mac_id, uint32_t pdev_id) 3584 { 3585 if (mac_id && pdev_id) { 3586 qdf_print("Both mac_id and pdev_id cannot be non zero"); 3587 QDF_BUG(0); 3588 return 0; 3589 } 3590 return (mac_id + pdev_id); 3591 } 3592 3593 /** 3594 * dp_get_lmac_id_for_pdev_id() - Return lmac id corresponding to host pdev id 3595 * @soc: soc pointer 3596 * @mac_id: MAC id 3597 * @pdev_id: pdev_id corresponding to pdev, 0 for MCL 3598 * 3599 * For MCL, Single pdev using both MACs will operate on both MAC rings. 3600 * 3601 * For WIN, each PDEV will operate one ring. 3602 * 3603 */ 3604 static inline int dp_get_lmac_id_for_pdev_id(struct dp_soc * soc,uint32_t mac_id,uint32_t pdev_id)3605 dp_get_lmac_id_for_pdev_id 3606 (struct dp_soc *soc, uint32_t mac_id, uint32_t pdev_id) 3607 { 3608 if (!wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx)) { 3609 if (mac_id && pdev_id) { 3610 qdf_print("Both mac_id and pdev_id cannot be non zero"); 3611 QDF_BUG(0); 3612 return 0; 3613 } 3614 return (mac_id + pdev_id); 3615 } 3616 3617 return soc->pdev_list[pdev_id]->lmac_id; 3618 } 3619 3620 /** 3621 * dp_get_pdev_for_lmac_id() - Return pdev pointer corresponding to lmac id 3622 * @soc: soc pointer 3623 * @lmac_id: LMAC id 3624 * 3625 * For MCL, Single pdev exists 3626 * 3627 * For WIN, each PDEV will operate one ring. 3628 * 3629 */ 3630 static inline struct dp_pdev * dp_get_pdev_for_lmac_id(struct dp_soc * soc,uint32_t lmac_id)3631 dp_get_pdev_for_lmac_id(struct dp_soc *soc, uint32_t lmac_id) 3632 { 3633 uint8_t i = 0; 3634 3635 if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx)) { 3636 i = wlan_cfg_get_pdev_idx(soc->wlan_cfg_ctx, lmac_id); 3637 return ((i < MAX_PDEV_CNT) ? soc->pdev_list[i] : NULL); 3638 } 3639 3640 /* Typically for MCL as there only 1 PDEV*/ 3641 return soc->pdev_list[0]; 3642 } 3643 3644 /** 3645 * dp_calculate_target_pdev_id_from_host_pdev_id() - Return target pdev 3646 * corresponding to host pdev id 3647 * @soc: soc pointer 3648 * @mac_for_pdev: pdev_id corresponding to host pdev for WIN, mac id for MCL 3649 * 3650 * Return: target pdev_id for host pdev id. For WIN, this is derived through 3651 * a two step process: 3652 * 1. Get lmac_id corresponding to host pdev_id (lmac_id can change 3653 * during mode switch) 3654 * 2. Get target pdev_id (set up during WMI ready) from lmac_id 3655 * 3656 * For MCL, return the offset-1 translated mac_id 3657 */ 3658 static inline int dp_calculate_target_pdev_id_from_host_pdev_id(struct dp_soc * soc,uint32_t mac_for_pdev)3659 dp_calculate_target_pdev_id_from_host_pdev_id 3660 (struct dp_soc *soc, uint32_t mac_for_pdev) 3661 { 3662 struct dp_pdev *pdev; 3663 3664 if (!wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx)) 3665 return DP_SW2HW_MACID(mac_for_pdev); 3666 3667 pdev = soc->pdev_list[mac_for_pdev]; 3668 3669 /*non-MCL case, get original target_pdev mapping*/ 3670 return wlan_cfg_get_target_pdev_id(soc->wlan_cfg_ctx, pdev->lmac_id); 3671 } 3672 3673 /** 3674 * dp_get_target_pdev_id_for_host_pdev_id() - Return target pdev corresponding 3675 * to host pdev id 3676 * @soc: soc pointer 3677 * @mac_for_pdev: pdev_id corresponding to host pdev for WIN, mac id for MCL 3678 * 3679 * Return: target pdev_id for host pdev id. 3680 * For WIN, return the value stored in pdev object. 3681 * For MCL, return the offset-1 translated mac_id. 3682 */ 3683 static inline int dp_get_target_pdev_id_for_host_pdev_id(struct dp_soc * soc,uint32_t mac_for_pdev)3684 dp_get_target_pdev_id_for_host_pdev_id 3685 (struct dp_soc *soc, uint32_t mac_for_pdev) 3686 { 3687 struct dp_pdev *pdev; 3688 3689 if (!wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx)) 3690 return DP_SW2HW_MACID(mac_for_pdev); 3691 3692 pdev = soc->pdev_list[mac_for_pdev]; 3693 3694 return pdev->target_pdev_id; 3695 } 3696 3697 /** 3698 * dp_get_host_pdev_id_for_target_pdev_id() - Return host pdev corresponding 3699 * to target pdev id 3700 * @soc: soc pointer 3701 * @pdev_id: pdev_id corresponding to target pdev 3702 * 3703 * Return: host pdev_id for target pdev id. For WIN, this is derived through 3704 * a two step process: 3705 * 1. Get lmac_id corresponding to target pdev_id 3706 * 2. Get host pdev_id (set up during WMI ready) from lmac_id 3707 * 3708 * For MCL, return the 0-offset pdev_id 3709 */ 3710 static inline int dp_get_host_pdev_id_for_target_pdev_id(struct dp_soc * soc,uint32_t pdev_id)3711 dp_get_host_pdev_id_for_target_pdev_id 3712 (struct dp_soc *soc, uint32_t pdev_id) 3713 { 3714 struct dp_pdev *pdev; 3715 int lmac_id; 3716 3717 if (!wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx)) 3718 return DP_HW2SW_MACID(pdev_id); 3719 3720 /*non-MCL case, get original target_lmac mapping from target pdev*/ 3721 lmac_id = wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, 3722 DP_HW2SW_MACID(pdev_id)); 3723 3724 /*Get host pdev from lmac*/ 3725 pdev = dp_get_pdev_for_lmac_id(soc, lmac_id); 3726 3727 return pdev ? pdev->pdev_id : INVALID_PDEV_ID; 3728 } 3729 3730 /** 3731 * dp_get_mac_id_for_mac() - Return mac corresponding WIN and MCL mac_ids 3732 * 3733 * @soc: handle to DP soc 3734 * @mac_id: MAC id 3735 * 3736 * Single pdev using both MACs will operate on both MAC rings, 3737 * which is the case for MCL. 3738 * For WIN each PDEV will operate one ring, so index is zero. 3739 * 3740 */ dp_get_mac_id_for_mac(struct dp_soc * soc,uint32_t mac_id)3741 static inline int dp_get_mac_id_for_mac(struct dp_soc *soc, uint32_t mac_id) 3742 { 3743 /* 3744 * Single pdev using both MACs will operate on both MAC rings, 3745 * which is the case for MCL. 3746 */ 3747 if (!wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx)) 3748 return mac_id; 3749 3750 /* For WIN each PDEV will operate one ring, so index is zero. */ 3751 return 0; 3752 } 3753 3754 /** 3755 * dp_is_subtype_data() - check if the frame subtype is data 3756 * 3757 * @frame_ctrl: Frame control field 3758 * 3759 * check the frame control field and verify if the packet 3760 * is a data packet. 3761 * 3762 * Return: true or false 3763 */ dp_is_subtype_data(uint16_t frame_ctrl)3764 static inline bool dp_is_subtype_data(uint16_t frame_ctrl) 3765 { 3766 if (((qdf_cpu_to_le16(frame_ctrl) & QDF_IEEE80211_FC0_TYPE_MASK) == 3767 QDF_IEEE80211_FC0_TYPE_DATA) && 3768 (((qdf_cpu_to_le16(frame_ctrl) & QDF_IEEE80211_FC0_SUBTYPE_MASK) == 3769 QDF_IEEE80211_FC0_SUBTYPE_DATA) || 3770 ((qdf_cpu_to_le16(frame_ctrl) & QDF_IEEE80211_FC0_SUBTYPE_MASK) == 3771 QDF_IEEE80211_FC0_SUBTYPE_QOS))) { 3772 return true; 3773 } 3774 3775 return false; 3776 } 3777 3778 #ifdef WDI_EVENT_ENABLE 3779 /** 3780 * dp_h2t_cfg_stats_msg_send(): function to construct HTT message to pass to FW 3781 * @pdev: DP PDEV handle 3782 * @stats_type_upload_mask: stats type requested by user 3783 * @mac_id: Mac id number 3784 * 3785 * return: QDF STATUS 3786 */ 3787 QDF_STATUS dp_h2t_cfg_stats_msg_send(struct dp_pdev *pdev, 3788 uint32_t stats_type_upload_mask, 3789 uint8_t mac_id); 3790 3791 /** 3792 * dp_wdi_event_unsub() - WDI event unsubscribe 3793 * @soc: soc handle 3794 * @pdev_id: id of pdev 3795 * @event_cb_sub_handle: subscribed event handle 3796 * @event: Event to be unsubscribe 3797 * 3798 * Return: 0 for success. nonzero for failure. 3799 */ 3800 int dp_wdi_event_unsub(struct cdp_soc_t *soc, uint8_t pdev_id, 3801 wdi_event_subscribe *event_cb_sub_handle, 3802 uint32_t event); 3803 3804 /** 3805 * dp_wdi_event_sub() - Subscribe WDI event 3806 * @soc: soc handle 3807 * @pdev_id: id of pdev 3808 * @event_cb_sub_handle: subscribe event handle 3809 * @event: Event to be subscribe 3810 * 3811 * Return: 0 for success. nonzero for failure. 3812 */ 3813 int dp_wdi_event_sub(struct cdp_soc_t *soc, uint8_t pdev_id, 3814 wdi_event_subscribe *event_cb_sub_handle, 3815 uint32_t event); 3816 3817 /** 3818 * dp_wdi_event_handler() - Event handler for WDI event 3819 * @event: wdi event number 3820 * @soc: soc handle 3821 * @data: pointer to data 3822 * @peer_id: peer id number 3823 * @status: HTT rx status 3824 * @pdev_id: id of pdev 3825 * 3826 * It will be called to register WDI event 3827 * 3828 * Return: None 3829 */ 3830 void dp_wdi_event_handler(enum WDI_EVENT event, struct dp_soc *soc, 3831 void *data, u_int16_t peer_id, 3832 int status, u_int8_t pdev_id); 3833 3834 /** 3835 * dp_wdi_event_attach() - Attach wdi event 3836 * @txrx_pdev: DP pdev handle 3837 * 3838 * Return: 0 for success. nonzero for failure. 3839 */ 3840 int dp_wdi_event_attach(struct dp_pdev *txrx_pdev); 3841 3842 /** 3843 * dp_wdi_event_detach() - Detach WDI event 3844 * @txrx_pdev: DP pdev handle 3845 * 3846 * Return: 0 for success. nonzero for failure. 3847 */ 3848 int dp_wdi_event_detach(struct dp_pdev *txrx_pdev); 3849 3850 static inline void dp_hif_update_pipe_callback(struct dp_soc * dp_soc,void * cb_context,QDF_STATUS (* callback)(void *,qdf_nbuf_t,uint8_t),uint8_t pipe_id)3851 dp_hif_update_pipe_callback(struct dp_soc *dp_soc, 3852 void *cb_context, 3853 QDF_STATUS (*callback)(void *, qdf_nbuf_t, uint8_t), 3854 uint8_t pipe_id) 3855 { 3856 struct hif_msg_callbacks hif_pipe_callbacks = { 0 }; 3857 3858 /* TODO: Temporary change to bypass HTC connection for this new 3859 * HIF pipe, which will be used for packet log and other high- 3860 * priority HTT messages. Proper HTC connection to be added 3861 * later once required FW changes are available 3862 */ 3863 hif_pipe_callbacks.rxCompletionHandler = callback; 3864 hif_pipe_callbacks.Context = cb_context; 3865 hif_update_pipe_callback(dp_soc->hif_handle, 3866 DP_HTT_T2H_HP_PIPE, &hif_pipe_callbacks); 3867 } 3868 #else dp_wdi_event_unsub(struct cdp_soc_t * soc,uint8_t pdev_id,wdi_event_subscribe * event_cb_sub_handle,uint32_t event)3869 static inline int dp_wdi_event_unsub(struct cdp_soc_t *soc, uint8_t pdev_id, 3870 wdi_event_subscribe *event_cb_sub_handle, 3871 uint32_t event) 3872 { 3873 return 0; 3874 } 3875 dp_wdi_event_sub(struct cdp_soc_t * soc,uint8_t pdev_id,wdi_event_subscribe * event_cb_sub_handle,uint32_t event)3876 static inline int dp_wdi_event_sub(struct cdp_soc_t *soc, uint8_t pdev_id, 3877 wdi_event_subscribe *event_cb_sub_handle, 3878 uint32_t event) 3879 { 3880 return 0; 3881 } 3882 3883 static inline dp_wdi_event_handler(enum WDI_EVENT event,struct dp_soc * soc,void * data,u_int16_t peer_id,int status,u_int8_t pdev_id)3884 void dp_wdi_event_handler(enum WDI_EVENT event, 3885 struct dp_soc *soc, 3886 void *data, u_int16_t peer_id, 3887 int status, u_int8_t pdev_id) 3888 { 3889 } 3890 dp_wdi_event_attach(struct dp_pdev * txrx_pdev)3891 static inline int dp_wdi_event_attach(struct dp_pdev *txrx_pdev) 3892 { 3893 return 0; 3894 } 3895 dp_wdi_event_detach(struct dp_pdev * txrx_pdev)3896 static inline int dp_wdi_event_detach(struct dp_pdev *txrx_pdev) 3897 { 3898 return 0; 3899 } 3900 dp_h2t_cfg_stats_msg_send(struct dp_pdev * pdev,uint32_t stats_type_upload_mask,uint8_t mac_id)3901 static inline QDF_STATUS dp_h2t_cfg_stats_msg_send(struct dp_pdev *pdev, 3902 uint32_t stats_type_upload_mask, uint8_t mac_id) 3903 { 3904 return 0; 3905 } 3906 3907 static inline void dp_hif_update_pipe_callback(struct dp_soc * dp_soc,void * cb_context,QDF_STATUS (* callback)(void *,qdf_nbuf_t,uint8_t),uint8_t pipe_id)3908 dp_hif_update_pipe_callback(struct dp_soc *dp_soc, void *cb_context, 3909 QDF_STATUS (*callback)(void *, qdf_nbuf_t, uint8_t), 3910 uint8_t pipe_id) 3911 { 3912 } 3913 #endif 3914 3915 #ifdef VDEV_PEER_PROTOCOL_COUNT 3916 /** 3917 * dp_vdev_peer_stats_update_protocol_cnt() - update per-peer protocol counters 3918 * @vdev: VDEV DP object 3919 * @nbuf: data packet 3920 * @txrx_peer: DP TXRX Peer object 3921 * @is_egress: whether egress or ingress 3922 * @is_rx: whether rx or tx 3923 * 3924 * This function updates the per-peer protocol counters 3925 * Return: void 3926 */ 3927 void dp_vdev_peer_stats_update_protocol_cnt(struct dp_vdev *vdev, 3928 qdf_nbuf_t nbuf, 3929 struct dp_txrx_peer *txrx_peer, 3930 bool is_egress, 3931 bool is_rx); 3932 3933 /** 3934 * dp_peer_stats_update_protocol_cnt() - update per-peer protocol counters 3935 * @soc: SOC DP object 3936 * @vdev_id: vdev_id 3937 * @nbuf: data packet 3938 * @is_egress: whether egress or ingress 3939 * @is_rx: whether rx or tx 3940 * 3941 * This function updates the per-peer protocol counters 3942 * 3943 * Return: void 3944 */ 3945 void dp_peer_stats_update_protocol_cnt(struct cdp_soc_t *soc, 3946 int8_t vdev_id, 3947 qdf_nbuf_t nbuf, 3948 bool is_egress, 3949 bool is_rx); 3950 3951 void dp_vdev_peer_stats_update_protocol_cnt_tx(struct dp_vdev *vdev_hdl, 3952 qdf_nbuf_t nbuf); 3953 3954 #else 3955 #define dp_vdev_peer_stats_update_protocol_cnt(vdev, nbuf, txrx_peer, \ 3956 is_egress, is_rx) 3957 3958 static inline dp_vdev_peer_stats_update_protocol_cnt_tx(struct dp_vdev * vdev_hdl,qdf_nbuf_t nbuf)3959 void dp_vdev_peer_stats_update_protocol_cnt_tx(struct dp_vdev *vdev_hdl, 3960 qdf_nbuf_t nbuf) 3961 { 3962 } 3963 3964 #endif 3965 3966 #ifdef QCA_LL_TX_FLOW_CONTROL_V2 3967 /** 3968 * dp_tx_dump_flow_pool_info() - dump global_pool and flow_pool info 3969 * @soc_hdl: Handle to struct cdp_soc 3970 * 3971 * Return: none 3972 */ 3973 void dp_tx_dump_flow_pool_info(struct cdp_soc_t *soc_hdl); 3974 3975 /** 3976 * dp_tx_dump_flow_pool_info_compact() - dump flow pool info 3977 * @soc: DP soc context 3978 * 3979 * Return: none 3980 */ 3981 void dp_tx_dump_flow_pool_info_compact(struct dp_soc *soc); 3982 int dp_tx_delete_flow_pool(struct dp_soc *soc, struct dp_tx_desc_pool_s *pool, 3983 bool force); 3984 #else dp_tx_dump_flow_pool_info_compact(struct dp_soc * soc)3985 static inline void dp_tx_dump_flow_pool_info_compact(struct dp_soc *soc) 3986 { 3987 } 3988 #endif /* QCA_LL_TX_FLOW_CONTROL_V2 */ 3989 3990 #ifdef QCA_OL_DP_SRNG_LOCK_LESS_ACCESS 3991 static inline int dp_hal_srng_access_start(hal_soc_handle_t soc,hal_ring_handle_t hal_ring_hdl)3992 dp_hal_srng_access_start(hal_soc_handle_t soc, hal_ring_handle_t hal_ring_hdl) 3993 { 3994 return hal_srng_access_start_unlocked(soc, hal_ring_hdl); 3995 } 3996 3997 static inline void dp_hal_srng_access_end(hal_soc_handle_t soc,hal_ring_handle_t hal_ring_hdl)3998 dp_hal_srng_access_end(hal_soc_handle_t soc, hal_ring_handle_t hal_ring_hdl) 3999 { 4000 hal_srng_access_end_unlocked(soc, hal_ring_hdl); 4001 } 4002 4003 #else 4004 static inline int dp_hal_srng_access_start(hal_soc_handle_t soc,hal_ring_handle_t hal_ring_hdl)4005 dp_hal_srng_access_start(hal_soc_handle_t soc, hal_ring_handle_t hal_ring_hdl) 4006 { 4007 return hal_srng_access_start(soc, hal_ring_hdl); 4008 } 4009 4010 static inline void dp_hal_srng_access_end(hal_soc_handle_t soc,hal_ring_handle_t hal_ring_hdl)4011 dp_hal_srng_access_end(hal_soc_handle_t soc, hal_ring_handle_t hal_ring_hdl) 4012 { 4013 hal_srng_access_end(soc, hal_ring_hdl); 4014 } 4015 #endif 4016 4017 #ifdef WLAN_FEATURE_DP_EVENT_HISTORY 4018 /** 4019 * dp_srng_access_start() - Wrapper function to log access start of a hal ring 4020 * @int_ctx: pointer to DP interrupt context. This should not be NULL 4021 * @dp_soc: DP Soc handle 4022 * @hal_ring_hdl: opaque pointer to the HAL Rx Error Ring, which will be 4023 * serviced 4024 * 4025 * Return: 0 on success; error on failure 4026 */ 4027 int dp_srng_access_start(struct dp_intr *int_ctx, struct dp_soc *dp_soc, 4028 hal_ring_handle_t hal_ring_hdl); 4029 4030 /** 4031 * dp_srng_access_end() - Wrapper function to log access end of a hal ring 4032 * @int_ctx: pointer to DP interrupt context. This should not be NULL 4033 * @dp_soc: DP Soc handle 4034 * @hal_ring_hdl: opaque pointer to the HAL Rx Error Ring, which will be 4035 * serviced 4036 * 4037 * Return: void 4038 */ 4039 void dp_srng_access_end(struct dp_intr *int_ctx, struct dp_soc *dp_soc, 4040 hal_ring_handle_t hal_ring_hdl); 4041 4042 #else dp_srng_access_start(struct dp_intr * int_ctx,struct dp_soc * dp_soc,hal_ring_handle_t hal_ring_hdl)4043 static inline int dp_srng_access_start(struct dp_intr *int_ctx, 4044 struct dp_soc *dp_soc, 4045 hal_ring_handle_t hal_ring_hdl) 4046 { 4047 hal_soc_handle_t hal_soc = dp_soc->hal_soc; 4048 4049 return dp_hal_srng_access_start(hal_soc, hal_ring_hdl); 4050 } 4051 dp_srng_access_end(struct dp_intr * int_ctx,struct dp_soc * dp_soc,hal_ring_handle_t hal_ring_hdl)4052 static inline void dp_srng_access_end(struct dp_intr *int_ctx, 4053 struct dp_soc *dp_soc, 4054 hal_ring_handle_t hal_ring_hdl) 4055 { 4056 hal_soc_handle_t hal_soc = dp_soc->hal_soc; 4057 4058 return dp_hal_srng_access_end(hal_soc, hal_ring_hdl); 4059 } 4060 #endif /* WLAN_FEATURE_DP_EVENT_HISTORY */ 4061 4062 #ifdef QCA_CACHED_RING_DESC 4063 /** 4064 * dp_srng_dst_get_next() - Wrapper function to get next ring desc 4065 * @dp_soc: DP Soc handle 4066 * @hal_ring_hdl: opaque pointer to the HAL Destination Ring 4067 * 4068 * Return: HAL ring descriptor 4069 */ dp_srng_dst_get_next(struct dp_soc * dp_soc,hal_ring_handle_t hal_ring_hdl)4070 static inline void *dp_srng_dst_get_next(struct dp_soc *dp_soc, 4071 hal_ring_handle_t hal_ring_hdl) 4072 { 4073 hal_soc_handle_t hal_soc = dp_soc->hal_soc; 4074 4075 return hal_srng_dst_get_next_cached(hal_soc, hal_ring_hdl); 4076 } 4077 4078 /** 4079 * dp_srng_dst_inv_cached_descs() - Wrapper function to invalidate cached 4080 * descriptors 4081 * @dp_soc: DP Soc handle 4082 * @hal_ring_hdl: opaque pointer to the HAL Rx Destination ring 4083 * @num_entries: Entry count 4084 * 4085 * Return: HAL ring descriptor 4086 */ dp_srng_dst_inv_cached_descs(struct dp_soc * dp_soc,hal_ring_handle_t hal_ring_hdl,uint32_t num_entries)4087 static inline void *dp_srng_dst_inv_cached_descs(struct dp_soc *dp_soc, 4088 hal_ring_handle_t hal_ring_hdl, 4089 uint32_t num_entries) 4090 { 4091 hal_soc_handle_t hal_soc = dp_soc->hal_soc; 4092 4093 return hal_srng_dst_inv_cached_descs(hal_soc, hal_ring_hdl, 4094 num_entries); 4095 } 4096 #else dp_srng_dst_get_next(struct dp_soc * dp_soc,hal_ring_handle_t hal_ring_hdl)4097 static inline void *dp_srng_dst_get_next(struct dp_soc *dp_soc, 4098 hal_ring_handle_t hal_ring_hdl) 4099 { 4100 hal_soc_handle_t hal_soc = dp_soc->hal_soc; 4101 4102 return hal_srng_dst_get_next(hal_soc, hal_ring_hdl); 4103 } 4104 dp_srng_dst_inv_cached_descs(struct dp_soc * dp_soc,hal_ring_handle_t hal_ring_hdl,uint32_t num_entries)4105 static inline void *dp_srng_dst_inv_cached_descs(struct dp_soc *dp_soc, 4106 hal_ring_handle_t hal_ring_hdl, 4107 uint32_t num_entries) 4108 { 4109 return NULL; 4110 } 4111 #endif /* QCA_CACHED_RING_DESC */ 4112 4113 #if defined(QCA_CACHED_RING_DESC) && \ 4114 (defined(QCA_DP_RX_HW_SW_NBUF_DESC_PREFETCH) || \ 4115 defined(QCA_DP_TX_HW_SW_NBUF_DESC_PREFETCH)) 4116 /** 4117 * dp_srng_dst_prefetch() - Wrapper function to prefetch descs from dest ring 4118 * @hal_soc: HAL SOC handle 4119 * @hal_ring_hdl: opaque pointer to the HAL Rx Destination ring 4120 * @num_entries: Entry count 4121 * 4122 * Return: None 4123 */ dp_srng_dst_prefetch(hal_soc_handle_t hal_soc,hal_ring_handle_t hal_ring_hdl,uint32_t num_entries)4124 static inline void *dp_srng_dst_prefetch(hal_soc_handle_t hal_soc, 4125 hal_ring_handle_t hal_ring_hdl, 4126 uint32_t num_entries) 4127 { 4128 return hal_srng_dst_prefetch(hal_soc, hal_ring_hdl, num_entries); 4129 } 4130 4131 /** 4132 * dp_srng_dst_prefetch_32_byte_desc() - Wrapper function to prefetch 4133 * 32 byte descriptor starting at 4134 * 64 byte offset 4135 * @hal_soc: HAL SOC handle 4136 * @hal_ring_hdl: opaque pointer to the HAL Rx Destination ring 4137 * @num_entries: Entry count 4138 * 4139 * Return: None 4140 */ 4141 static inline dp_srng_dst_prefetch_32_byte_desc(hal_soc_handle_t hal_soc,hal_ring_handle_t hal_ring_hdl,uint32_t num_entries)4142 void *dp_srng_dst_prefetch_32_byte_desc(hal_soc_handle_t hal_soc, 4143 hal_ring_handle_t hal_ring_hdl, 4144 uint32_t num_entries) 4145 { 4146 return hal_srng_dst_prefetch_32_byte_desc(hal_soc, hal_ring_hdl, 4147 num_entries); 4148 } 4149 #else dp_srng_dst_prefetch(hal_soc_handle_t hal_soc,hal_ring_handle_t hal_ring_hdl,uint32_t num_entries)4150 static inline void *dp_srng_dst_prefetch(hal_soc_handle_t hal_soc, 4151 hal_ring_handle_t hal_ring_hdl, 4152 uint32_t num_entries) 4153 { 4154 return NULL; 4155 } 4156 4157 static inline dp_srng_dst_prefetch_32_byte_desc(hal_soc_handle_t hal_soc,hal_ring_handle_t hal_ring_hdl,uint32_t num_entries)4158 void *dp_srng_dst_prefetch_32_byte_desc(hal_soc_handle_t hal_soc, 4159 hal_ring_handle_t hal_ring_hdl, 4160 uint32_t num_entries) 4161 { 4162 return NULL; 4163 } 4164 #endif 4165 4166 #ifdef QCA_ENH_V3_STATS_SUPPORT 4167 /** 4168 * dp_pdev_print_delay_stats(): Print pdev level delay stats 4169 * @pdev: DP_PDEV handle 4170 * 4171 * Return:void 4172 */ 4173 void dp_pdev_print_delay_stats(struct dp_pdev *pdev); 4174 4175 /** 4176 * dp_pdev_print_tid_stats(): Print pdev level tid stats 4177 * @pdev: DP_PDEV handle 4178 * 4179 * Return:void 4180 */ 4181 void dp_pdev_print_tid_stats(struct dp_pdev *pdev); 4182 4183 /** 4184 * dp_pdev_print_rx_error_stats(): Print pdev level rx error stats 4185 * @pdev: DP_PDEV handle 4186 * 4187 * Return:void 4188 */ 4189 void dp_pdev_print_rx_error_stats(struct dp_pdev *pdev); 4190 #endif /* QCA_ENH_V3_STATS_SUPPORT */ 4191 4192 /** 4193 * dp_pdev_get_tid_stats(): Get accumulated pdev level tid_stats 4194 * @soc_hdl: soc handle 4195 * @pdev_id: id of dp_pdev handle 4196 * @tid_stats: Pointer for cdp_tid_stats_intf 4197 * 4198 * Return: QDF_STATUS_SUCCESS or QDF_STATUS_E_INVAL 4199 */ 4200 QDF_STATUS dp_pdev_get_tid_stats(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, 4201 struct cdp_tid_stats_intf *tid_stats); 4202 4203 /** 4204 * dp_soc_set_txrx_ring_map() 4205 * @soc: DP handler for soc 4206 * 4207 * Return: Void 4208 */ 4209 void dp_soc_set_txrx_ring_map(struct dp_soc *soc); 4210 4211 /** 4212 * dp_vdev_to_cdp_vdev() - typecast dp vdev to cdp vdev 4213 * @vdev: DP vdev handle 4214 * 4215 * Return: struct cdp_vdev pointer 4216 */ 4217 static inline dp_vdev_to_cdp_vdev(struct dp_vdev * vdev)4218 struct cdp_vdev *dp_vdev_to_cdp_vdev(struct dp_vdev *vdev) 4219 { 4220 return (struct cdp_vdev *)vdev; 4221 } 4222 4223 /** 4224 * dp_pdev_to_cdp_pdev() - typecast dp pdev to cdp pdev 4225 * @pdev: DP pdev handle 4226 * 4227 * Return: struct cdp_pdev pointer 4228 */ 4229 static inline dp_pdev_to_cdp_pdev(struct dp_pdev * pdev)4230 struct cdp_pdev *dp_pdev_to_cdp_pdev(struct dp_pdev *pdev) 4231 { 4232 return (struct cdp_pdev *)pdev; 4233 } 4234 4235 /** 4236 * dp_soc_to_cdp_soc() - typecast dp psoc to cdp psoc 4237 * @psoc: DP psoc handle 4238 * 4239 * Return: struct cdp_soc pointer 4240 */ 4241 static inline dp_soc_to_cdp_soc(struct dp_soc * psoc)4242 struct cdp_soc *dp_soc_to_cdp_soc(struct dp_soc *psoc) 4243 { 4244 return (struct cdp_soc *)psoc; 4245 } 4246 4247 /** 4248 * dp_soc_to_cdp_soc_t() - typecast dp psoc to ol txrx soc handle 4249 * @psoc: DP psoc handle 4250 * 4251 * Return: struct cdp_soc_t pointer 4252 */ 4253 static inline dp_soc_to_cdp_soc_t(struct dp_soc * psoc)4254 struct cdp_soc_t *dp_soc_to_cdp_soc_t(struct dp_soc *psoc) 4255 { 4256 return (struct cdp_soc_t *)psoc; 4257 } 4258 4259 #if defined(WLAN_SUPPORT_RX_FLOW_TAG) 4260 /** 4261 * dp_rx_flow_get_fse_stats() - Retrieve a flow's statistics 4262 * @pdev: pdev handle 4263 * @rx_flow_info: flow information in the Rx FST 4264 * @stats: stats to update 4265 * 4266 * Return: Success when flow statistcs is updated, error on failure 4267 */ 4268 QDF_STATUS dp_rx_flow_get_fse_stats(struct dp_pdev *pdev, 4269 struct cdp_rx_flow_info *rx_flow_info, 4270 struct cdp_flow_stats *stats); 4271 4272 /** 4273 * dp_rx_flow_delete_entry() - Delete a flow entry from flow search table 4274 * @pdev: pdev handle 4275 * @rx_flow_info: DP flow parameters 4276 * 4277 * Return: Success when flow is deleted, error on failure 4278 */ 4279 QDF_STATUS dp_rx_flow_delete_entry(struct dp_pdev *pdev, 4280 struct cdp_rx_flow_info *rx_flow_info); 4281 4282 /** 4283 * dp_rx_flow_add_entry() - Add a flow entry to flow search table 4284 * @pdev: DP pdev instance 4285 * @rx_flow_info: DP flow parameters 4286 * 4287 * Return: Success when flow is added, no-memory or already exists on error 4288 */ 4289 QDF_STATUS dp_rx_flow_add_entry(struct dp_pdev *pdev, 4290 struct cdp_rx_flow_info *rx_flow_info); 4291 4292 /** 4293 * dp_rx_fst_attach() - Initialize Rx FST and setup necessary parameters 4294 * @soc: SoC handle 4295 * @pdev: Pdev handle 4296 * 4297 * Return: Handle to flow search table entry 4298 */ 4299 QDF_STATUS dp_rx_fst_attach(struct dp_soc *soc, struct dp_pdev *pdev); 4300 4301 /** 4302 * dp_rx_fst_detach() - De-initialize Rx FST 4303 * @soc: SoC handle 4304 * @pdev: Pdev handle 4305 * 4306 * Return: None 4307 */ 4308 void dp_rx_fst_detach(struct dp_soc *soc, struct dp_pdev *pdev); 4309 4310 /** 4311 * dp_mon_rx_update_rx_flow_tag_stats() - Update a mon flow's statistics 4312 * @pdev: pdev handle 4313 * @flow_id: flow index (truncated hash) in the Rx FST 4314 * 4315 * Return: Success when flow statistcs is updated, error on failure 4316 */ 4317 QDF_STATUS 4318 dp_mon_rx_update_rx_flow_tag_stats(struct dp_pdev *pdev, uint32_t flow_id); 4319 #endif 4320 4321 #ifdef WLAN_SUPPORT_RX_FLOW_TAG 4322 /** 4323 * dp_rx_flow_send_fst_fw_setup() - Program FST parameters in FW/HW post-attach 4324 * @soc: SoC handle 4325 * @pdev: Pdev handle 4326 * 4327 * Return: Success when fst parameters are programmed in FW, error otherwise 4328 */ 4329 QDF_STATUS dp_rx_flow_send_fst_fw_setup(struct dp_soc *soc, 4330 struct dp_pdev *pdev); 4331 #endif 4332 4333 /** 4334 * dp_rx_fst_attach_wrapper() - wrapper API for dp_rx_fst_attach 4335 * @soc: SoC handle 4336 * @pdev: Pdev handle 4337 * 4338 * Return: Handle to flow search table entry 4339 */ 4340 extern QDF_STATUS 4341 dp_rx_fst_attach_wrapper(struct dp_soc *soc, struct dp_pdev *pdev); 4342 4343 /** 4344 * dp_rx_fst_detach_wrapper() - wrapper API for dp_rx_fst_detach 4345 * @soc: SoC handle 4346 * @pdev: Pdev handle 4347 * 4348 * Return: None 4349 */ 4350 extern void 4351 dp_rx_fst_detach_wrapper(struct dp_soc *soc, struct dp_pdev *pdev); 4352 4353 /** 4354 * dp_vdev_get_ref() - API to take a reference for VDEV object 4355 * 4356 * @soc : core DP soc context 4357 * @vdev : DP vdev 4358 * @mod_id : module id 4359 * 4360 * Return: QDF_STATUS_SUCCESS if reference held successfully 4361 * else QDF_STATUS_E_INVAL 4362 */ 4363 static inline dp_vdev_get_ref(struct dp_soc * soc,struct dp_vdev * vdev,enum dp_mod_id mod_id)4364 QDF_STATUS dp_vdev_get_ref(struct dp_soc *soc, struct dp_vdev *vdev, 4365 enum dp_mod_id mod_id) 4366 { 4367 if (!qdf_atomic_inc_not_zero(&vdev->ref_cnt)) 4368 return QDF_STATUS_E_INVAL; 4369 4370 qdf_atomic_inc(&vdev->mod_refs[mod_id]); 4371 4372 return QDF_STATUS_SUCCESS; 4373 } 4374 4375 /** 4376 * dp_vdev_get_ref_by_id() - Returns vdev object given the vdev id 4377 * @soc: core DP soc context 4378 * @vdev_id: vdev id from vdev object can be retrieved 4379 * @mod_id: module id which is requesting the reference 4380 * 4381 * Return: struct dp_vdev*: Pointer to DP vdev object 4382 */ 4383 static inline struct dp_vdev * dp_vdev_get_ref_by_id(struct dp_soc * soc,uint8_t vdev_id,enum dp_mod_id mod_id)4384 dp_vdev_get_ref_by_id(struct dp_soc *soc, uint8_t vdev_id, 4385 enum dp_mod_id mod_id) 4386 { 4387 struct dp_vdev *vdev = NULL; 4388 if (qdf_unlikely(vdev_id >= MAX_VDEV_CNT)) 4389 return NULL; 4390 4391 qdf_spin_lock_bh(&soc->vdev_map_lock); 4392 vdev = soc->vdev_id_map[vdev_id]; 4393 4394 if (!vdev || dp_vdev_get_ref(soc, vdev, mod_id) != QDF_STATUS_SUCCESS) { 4395 qdf_spin_unlock_bh(&soc->vdev_map_lock); 4396 return NULL; 4397 } 4398 qdf_spin_unlock_bh(&soc->vdev_map_lock); 4399 4400 return vdev; 4401 } 4402 4403 /** 4404 * dp_get_pdev_from_soc_pdev_id_wifi3() - Returns pdev object given the pdev id 4405 * @soc: core DP soc context 4406 * @pdev_id: pdev id from pdev object can be retrieved 4407 * 4408 * Return: struct dp_pdev*: Pointer to DP pdev object 4409 */ 4410 static inline struct dp_pdev * dp_get_pdev_from_soc_pdev_id_wifi3(struct dp_soc * soc,uint8_t pdev_id)4411 dp_get_pdev_from_soc_pdev_id_wifi3(struct dp_soc *soc, 4412 uint8_t pdev_id) 4413 { 4414 if (qdf_unlikely(pdev_id >= MAX_PDEV_CNT)) 4415 return NULL; 4416 4417 return soc->pdev_list[pdev_id]; 4418 } 4419 4420 /** 4421 * dp_get_peer_mac_list(): function to get peer mac list of vdev 4422 * @soc: Datapath soc handle 4423 * @vdev_id: vdev id 4424 * @newmac: Table of the clients mac 4425 * @mac_cnt: No. of MACs required 4426 * @limit: Limit the number of clients 4427 * 4428 * Return: no of clients 4429 */ 4430 uint16_t dp_get_peer_mac_list(ol_txrx_soc_handle soc, uint8_t vdev_id, 4431 u_int8_t newmac[][QDF_MAC_ADDR_SIZE], 4432 u_int16_t mac_cnt, bool limit); 4433 4434 /** 4435 * dp_update_num_mac_rings_for_dbs() - Update No of MAC rings based on 4436 * DBS check 4437 * @soc: DP SoC context 4438 * @max_mac_rings: Pointer to variable for No of MAC rings 4439 * 4440 * Return: None 4441 */ 4442 void dp_update_num_mac_rings_for_dbs(struct dp_soc *soc, 4443 int *max_mac_rings); 4444 4445 4446 #if defined(WLAN_SUPPORT_RX_FISA) 4447 /** 4448 * dp_rx_fst_update_cmem_params() - Update CMEM FST params 4449 * @soc: DP SoC context 4450 * @num_entries: Number of flow search entries 4451 * @cmem_ba_lo: CMEM base address low 4452 * @cmem_ba_hi: CMEM base address high 4453 * 4454 * Return: None 4455 */ 4456 void dp_rx_fst_update_cmem_params(struct dp_soc *soc, uint16_t num_entries, 4457 uint32_t cmem_ba_lo, uint32_t cmem_ba_hi); 4458 4459 /** 4460 * dp_fisa_config() - FISA config handler 4461 * @cdp_soc: CDP SoC handle 4462 * @pdev_id: PDEV ID 4463 * @config_id: FISA config ID 4464 * @cfg: FISA config msg data 4465 */ 4466 QDF_STATUS dp_fisa_config(ol_txrx_soc_handle cdp_soc, uint8_t pdev_id, 4467 enum cdp_fisa_config_id config_id, 4468 union cdp_fisa_config *cfg); 4469 #else 4470 static inline void dp_rx_fst_update_cmem_params(struct dp_soc * soc,uint16_t num_entries,uint32_t cmem_ba_lo,uint32_t cmem_ba_hi)4471 dp_rx_fst_update_cmem_params(struct dp_soc *soc, uint16_t num_entries, 4472 uint32_t cmem_ba_lo, uint32_t cmem_ba_hi) 4473 { 4474 } 4475 #endif /* WLAN_SUPPORT_RX_FISA */ 4476 4477 #ifdef MAX_ALLOC_PAGE_SIZE 4478 /** 4479 * dp_set_max_page_size() - Set the max page size for hw link desc. 4480 * @pages: link desc page handle 4481 * @max_alloc_size: max_alloc_size 4482 * 4483 * For MCL the page size is set to OS defined value and for WIN 4484 * the page size is set to the max_alloc_size cfg ini 4485 * param. 4486 * This is to ensure that WIN gets contiguous memory allocations 4487 * as per requirement. 4488 * 4489 * Return: None 4490 */ 4491 static inline dp_set_max_page_size(struct qdf_mem_multi_page_t * pages,uint32_t max_alloc_size)4492 void dp_set_max_page_size(struct qdf_mem_multi_page_t *pages, 4493 uint32_t max_alloc_size) 4494 { 4495 pages->page_size = qdf_page_size; 4496 } 4497 4498 #else 4499 static inline dp_set_max_page_size(struct qdf_mem_multi_page_t * pages,uint32_t max_alloc_size)4500 void dp_set_max_page_size(struct qdf_mem_multi_page_t *pages, 4501 uint32_t max_alloc_size) 4502 { 4503 pages->page_size = max_alloc_size; 4504 } 4505 #endif /* MAX_ALLOC_PAGE_SIZE */ 4506 4507 /** 4508 * dp_get_next_index() - get the next entry to record an entry 4509 * in the history. 4510 * @curr_idx: Current index where the last entry is written. 4511 * @max_entries: Max number of entries in the history 4512 * 4513 * This function assumes that the max number os entries is a power of 2. 4514 * 4515 * Return: The index where the next entry is to be written. 4516 */ 4517 dp_get_next_index(qdf_atomic_t * curr_idx,uint32_t max_entries)4518 static inline uint32_t dp_get_next_index(qdf_atomic_t *curr_idx, 4519 uint32_t max_entries) 4520 { 4521 uint32_t idx = qdf_atomic_inc_return(curr_idx); 4522 4523 return idx & (max_entries - 1); 4524 } 4525 4526 /** 4527 * dp_history_get_next_index() - get the next entry to record an entry 4528 * in the history. 4529 * @curr_idx: Current index where the last entry is written. 4530 * @max_entries: Max number of entries in the history 4531 * 4532 * This function assumes that the max number os entries is a power of 2. 4533 * 4534 * Return: The index where the next entry is to be written. 4535 */ dp_history_get_next_index(qdf_atomic_t * curr_idx,uint32_t max_entries)4536 static inline uint32_t dp_history_get_next_index(qdf_atomic_t *curr_idx, 4537 uint32_t max_entries) 4538 { 4539 return dp_get_next_index(curr_idx, max_entries); 4540 } 4541 4542 /** 4543 * dp_rx_skip_tlvs() - Skip TLVs len + L3 padding, save in nbuf->cb 4544 * @soc: Datapath soc handle 4545 * @nbuf: nbuf cb to be updated 4546 * @l3_padding: L3 padding 4547 * 4548 * Return: None 4549 */ 4550 void dp_rx_skip_tlvs(struct dp_soc *soc, qdf_nbuf_t nbuf, uint32_t l3_padding); 4551 4552 #ifndef FEATURE_WDS 4553 static inline void dp_hmwds_ast_add_notify(struct dp_peer * peer,uint8_t * mac_addr,enum cdp_txrx_ast_entry_type type,QDF_STATUS err,bool is_peer_map)4554 dp_hmwds_ast_add_notify(struct dp_peer *peer, 4555 uint8_t *mac_addr, 4556 enum cdp_txrx_ast_entry_type type, 4557 QDF_STATUS err, 4558 bool is_peer_map) 4559 { 4560 } 4561 #endif 4562 4563 #ifdef HTT_STATS_DEBUGFS_SUPPORT 4564 /** 4565 * dp_pdev_htt_stats_dbgfs_init() - Function to allocate memory and initialize 4566 * debugfs for HTT stats 4567 * @pdev: dp pdev handle 4568 * 4569 * Return: QDF_STATUS 4570 */ 4571 QDF_STATUS dp_pdev_htt_stats_dbgfs_init(struct dp_pdev *pdev); 4572 4573 /** 4574 * dp_pdev_htt_stats_dbgfs_deinit() - Function to remove debugfs entry for 4575 * HTT stats 4576 * @pdev: dp pdev handle 4577 * 4578 * Return: none 4579 */ 4580 void dp_pdev_htt_stats_dbgfs_deinit(struct dp_pdev *pdev); 4581 #else 4582 4583 /** 4584 * dp_pdev_htt_stats_dbgfs_init() - Function to allocate memory and initialize 4585 * debugfs for HTT stats 4586 * @pdev: dp pdev handle 4587 * 4588 * Return: QDF_STATUS 4589 */ 4590 static inline QDF_STATUS dp_pdev_htt_stats_dbgfs_init(struct dp_pdev * pdev)4591 dp_pdev_htt_stats_dbgfs_init(struct dp_pdev *pdev) 4592 { 4593 return QDF_STATUS_SUCCESS; 4594 } 4595 4596 /** 4597 * dp_pdev_htt_stats_dbgfs_deinit() - Function to remove debugfs entry for 4598 * HTT stats 4599 * @pdev: dp pdev handle 4600 * 4601 * Return: none 4602 */ 4603 static inline void dp_pdev_htt_stats_dbgfs_deinit(struct dp_pdev * pdev)4604 dp_pdev_htt_stats_dbgfs_deinit(struct dp_pdev *pdev) 4605 { 4606 } 4607 #endif /* HTT_STATS_DEBUGFS_SUPPORT */ 4608 4609 #ifndef WLAN_DP_FEATURE_SW_LATENCY_MGR 4610 /** 4611 * dp_soc_swlm_attach() - attach the software latency manager resources 4612 * @soc: Datapath global soc handle 4613 * 4614 * Return: QDF_STATUS 4615 */ dp_soc_swlm_attach(struct dp_soc * soc)4616 static inline QDF_STATUS dp_soc_swlm_attach(struct dp_soc *soc) 4617 { 4618 return QDF_STATUS_SUCCESS; 4619 } 4620 4621 /** 4622 * dp_soc_swlm_detach() - detach the software latency manager resources 4623 * @soc: Datapath global soc handle 4624 * 4625 * Return: QDF_STATUS 4626 */ dp_soc_swlm_detach(struct dp_soc * soc)4627 static inline QDF_STATUS dp_soc_swlm_detach(struct dp_soc *soc) 4628 { 4629 return QDF_STATUS_SUCCESS; 4630 } 4631 #endif /* !WLAN_DP_FEATURE_SW_LATENCY_MGR */ 4632 4633 #ifndef WLAN_DP_PROFILE_SUPPORT wlan_dp_soc_cfg_sync_profile(struct cdp_soc_t * cdp_soc)4634 static inline void wlan_dp_soc_cfg_sync_profile(struct cdp_soc_t *cdp_soc) {} 4635 wlan_dp_pdev_cfg_sync_profile(struct cdp_soc_t * cdp_soc,uint8_t pdev_id)4636 static inline void wlan_dp_pdev_cfg_sync_profile(struct cdp_soc_t *cdp_soc, 4637 uint8_t pdev_id) {} 4638 #endif 4639 4640 /** 4641 * dp_get_peer_id(): function to get peer id by mac 4642 * @soc: Datapath soc handle 4643 * @vdev_id: vdev id 4644 * @mac: Peer mac address 4645 * 4646 * Return: valid peer id on success 4647 * HTT_INVALID_PEER on failure 4648 */ 4649 uint16_t dp_get_peer_id(ol_txrx_soc_handle soc, uint8_t vdev_id, uint8_t *mac); 4650 4651 #ifdef QCA_SUPPORT_WDS_EXTENDED 4652 /** 4653 * dp_wds_ext_set_peer_rx(): function to set peer rx handler 4654 * @soc: Datapath soc handle 4655 * @vdev_id: vdev id 4656 * @mac: Peer mac address 4657 * @rx: rx function pointer 4658 * @osif_peer: OSIF peer handle 4659 * 4660 * Return: QDF_STATUS_SUCCESS on success 4661 * QDF_STATUS_E_INVAL if peer is not found 4662 * QDF_STATUS_E_ALREADY if rx is already set/unset 4663 */ 4664 QDF_STATUS dp_wds_ext_set_peer_rx(ol_txrx_soc_handle soc, 4665 uint8_t vdev_id, 4666 uint8_t *mac, 4667 ol_txrx_rx_fp rx, 4668 ol_osif_peer_handle osif_peer); 4669 4670 /** 4671 * dp_wds_ext_get_peer_osif_handle(): function to get peer osif handle 4672 * @soc: Datapath soc handle 4673 * @vdev_id: vdev id 4674 * @mac: Peer mac address 4675 * @osif_peer: OSIF peer handle 4676 * 4677 * Return: QDF_STATUS_SUCCESS on success 4678 * QDF_STATUS_E_INVAL if peer is not found 4679 */ 4680 QDF_STATUS dp_wds_ext_get_peer_osif_handle( 4681 ol_txrx_soc_handle soc, 4682 uint8_t vdev_id, 4683 uint8_t *mac, 4684 ol_osif_peer_handle *osif_peer); 4685 4686 /** 4687 * dp_wds_ext_set_peer_bit(): function to set wds-ext peer bit 4688 * @soc: Datapath soc handle 4689 * @mac: Peer mac address 4690 * 4691 * Return: QDF_STATUS_SUCCESS on success 4692 * QDF_STATUS_E_INVAL if peer is not found 4693 */ 4694 QDF_STATUS dp_wds_ext_set_peer_bit(ol_txrx_soc_handle soc, uint8_t *mac); 4695 4696 #endif /* QCA_SUPPORT_WDS_EXTENDED */ 4697 4698 #ifdef DP_MEM_PRE_ALLOC 4699 4700 /** 4701 * dp_context_alloc_mem() - allocate memory for DP context 4702 * @soc: datapath soc handle 4703 * @ctxt_type: DP context type 4704 * @ctxt_size: DP context size 4705 * 4706 * Return: DP context address 4707 */ 4708 void *dp_context_alloc_mem(struct dp_soc *soc, enum dp_ctxt_type ctxt_type, 4709 size_t ctxt_size); 4710 4711 /** 4712 * dp_context_free_mem() - Free memory of DP context 4713 * @soc: datapath soc handle 4714 * @ctxt_type: DP context type 4715 * @vaddr: Address of context memory 4716 * 4717 * Return: None 4718 */ 4719 void dp_context_free_mem(struct dp_soc *soc, enum dp_ctxt_type ctxt_type, 4720 void *vaddr); 4721 4722 /** 4723 * dp_desc_multi_pages_mem_alloc() - alloc memory over multiple pages 4724 * @soc: datapath soc handle 4725 * @desc_type: memory request source type 4726 * @pages: multi page information storage 4727 * @element_size: each element size 4728 * @element_num: total number of elements should be allocated 4729 * @memctxt: memory context 4730 * @cacheable: coherent memory or cacheable memory 4731 * 4732 * This function is a wrapper for memory allocation over multiple 4733 * pages, if dp prealloc method is registered, then will try prealloc 4734 * firstly. if prealloc failed, fall back to regular way over 4735 * qdf_mem_multi_pages_alloc(). 4736 * 4737 * Return: None 4738 */ 4739 void dp_desc_multi_pages_mem_alloc(struct dp_soc *soc, 4740 enum qdf_dp_desc_type desc_type, 4741 struct qdf_mem_multi_page_t *pages, 4742 size_t element_size, 4743 uint32_t element_num, 4744 qdf_dma_context_t memctxt, 4745 bool cacheable); 4746 4747 /** 4748 * dp_desc_multi_pages_mem_free() - free multiple pages memory 4749 * @soc: datapath soc handle 4750 * @desc_type: memory request source type 4751 * @pages: multi page information storage 4752 * @memctxt: memory context 4753 * @cacheable: coherent memory or cacheable memory 4754 * 4755 * This function is a wrapper for multiple pages memory free, 4756 * if memory is got from prealloc pool, put it back to pool. 4757 * otherwise free by qdf_mem_multi_pages_free(). 4758 * 4759 * Return: None 4760 */ 4761 void dp_desc_multi_pages_mem_free(struct dp_soc *soc, 4762 enum qdf_dp_desc_type desc_type, 4763 struct qdf_mem_multi_page_t *pages, 4764 qdf_dma_context_t memctxt, 4765 bool cacheable); 4766 4767 #else 4768 static inline dp_context_alloc_mem(struct dp_soc * soc,enum dp_ctxt_type ctxt_type,size_t ctxt_size)4769 void *dp_context_alloc_mem(struct dp_soc *soc, enum dp_ctxt_type ctxt_type, 4770 size_t ctxt_size) 4771 { 4772 return qdf_mem_malloc(ctxt_size); 4773 } 4774 4775 static inline dp_context_free_mem(struct dp_soc * soc,enum dp_ctxt_type ctxt_type,void * vaddr)4776 void dp_context_free_mem(struct dp_soc *soc, enum dp_ctxt_type ctxt_type, 4777 void *vaddr) 4778 { 4779 qdf_mem_free(vaddr); 4780 } 4781 4782 static inline dp_desc_multi_pages_mem_alloc(struct dp_soc * soc,enum qdf_dp_desc_type desc_type,struct qdf_mem_multi_page_t * pages,size_t element_size,uint32_t element_num,qdf_dma_context_t memctxt,bool cacheable)4783 void dp_desc_multi_pages_mem_alloc(struct dp_soc *soc, 4784 enum qdf_dp_desc_type desc_type, 4785 struct qdf_mem_multi_page_t *pages, 4786 size_t element_size, 4787 uint32_t element_num, 4788 qdf_dma_context_t memctxt, 4789 bool cacheable) 4790 { 4791 qdf_mem_multi_pages_alloc(soc->osdev, pages, element_size, 4792 element_num, memctxt, cacheable); 4793 } 4794 4795 static inline dp_desc_multi_pages_mem_free(struct dp_soc * soc,enum qdf_dp_desc_type desc_type,struct qdf_mem_multi_page_t * pages,qdf_dma_context_t memctxt,bool cacheable)4796 void dp_desc_multi_pages_mem_free(struct dp_soc *soc, 4797 enum qdf_dp_desc_type desc_type, 4798 struct qdf_mem_multi_page_t *pages, 4799 qdf_dma_context_t memctxt, 4800 bool cacheable) 4801 { 4802 qdf_mem_multi_pages_free(soc->osdev, pages, 4803 memctxt, cacheable); 4804 } 4805 #endif 4806 4807 /** 4808 * struct dp_frag_history_opaque_atomic - Opaque struct for adding a fragmented 4809 * history. 4810 * @index: atomic index 4811 * @num_entries_per_slot: Number of entries per slot 4812 * @allocated: is allocated or not 4813 * @entry: pointers to array of records 4814 */ 4815 struct dp_frag_history_opaque_atomic { 4816 qdf_atomic_t index; 4817 uint16_t num_entries_per_slot; 4818 uint16_t allocated; 4819 void *entry[]; 4820 }; 4821 4822 static inline QDF_STATUS dp_soc_frag_history_attach(struct dp_soc * soc,void * history_hdl,uint32_t max_slots,uint32_t max_entries_per_slot,uint32_t entry_size,bool attempt_prealloc,enum dp_ctxt_type ctxt_type)4823 dp_soc_frag_history_attach(struct dp_soc *soc, void *history_hdl, 4824 uint32_t max_slots, uint32_t max_entries_per_slot, 4825 uint32_t entry_size, 4826 bool attempt_prealloc, enum dp_ctxt_type ctxt_type) 4827 { 4828 struct dp_frag_history_opaque_atomic *history = 4829 (struct dp_frag_history_opaque_atomic *)history_hdl; 4830 size_t alloc_size = max_entries_per_slot * entry_size; 4831 int i; 4832 4833 for (i = 0; i < max_slots; i++) { 4834 if (attempt_prealloc) 4835 history->entry[i] = dp_context_alloc_mem(soc, ctxt_type, 4836 alloc_size); 4837 else 4838 history->entry[i] = qdf_mem_malloc(alloc_size); 4839 4840 if (!history->entry[i]) 4841 goto exit; 4842 } 4843 4844 qdf_atomic_init(&history->index); 4845 history->allocated = 1; 4846 history->num_entries_per_slot = max_entries_per_slot; 4847 4848 return QDF_STATUS_SUCCESS; 4849 exit: 4850 for (i = i - 1; i >= 0; i--) { 4851 if (attempt_prealloc) 4852 dp_context_free_mem(soc, ctxt_type, history->entry[i]); 4853 else 4854 qdf_mem_free(history->entry[i]); 4855 } 4856 4857 return QDF_STATUS_E_NOMEM; 4858 } 4859 4860 static inline dp_soc_frag_history_detach(struct dp_soc * soc,void * history_hdl,uint32_t max_slots,bool attempt_prealloc,enum dp_ctxt_type ctxt_type)4861 void dp_soc_frag_history_detach(struct dp_soc *soc, 4862 void *history_hdl, uint32_t max_slots, 4863 bool attempt_prealloc, 4864 enum dp_ctxt_type ctxt_type) 4865 { 4866 struct dp_frag_history_opaque_atomic *history = 4867 (struct dp_frag_history_opaque_atomic *)history_hdl; 4868 int i; 4869 4870 for (i = 0; i < max_slots; i++) { 4871 if (attempt_prealloc) 4872 dp_context_free_mem(soc, ctxt_type, history->entry[i]); 4873 else 4874 qdf_mem_free(history->entry[i]); 4875 } 4876 4877 history->allocated = 0; 4878 } 4879 4880 /** 4881 * dp_get_frag_hist_next_atomic_idx() - get the next entry index to record an 4882 * entry in a fragmented history with 4883 * index being atomic. 4884 * @curr_idx: address of the current index where the last entry was written 4885 * @next_idx: pointer to update the next index 4886 * @slot: pointer to update the history slot to be selected 4887 * @slot_shift: BITwise shift mask for slot (in index) 4888 * @max_entries_per_slot: Max number of entries in a slot of history 4889 * @max_entries: Total number of entries in the history (sum of all slots) 4890 * 4891 * This function assumes that the "max_entries_per_slot" and "max_entries" 4892 * are a power-of-2. 4893 * 4894 * Return: None 4895 */ 4896 static inline void dp_get_frag_hist_next_atomic_idx(qdf_atomic_t * curr_idx,uint32_t * next_idx,uint16_t * slot,uint32_t slot_shift,uint32_t max_entries_per_slot,uint32_t max_entries)4897 dp_get_frag_hist_next_atomic_idx(qdf_atomic_t *curr_idx, uint32_t *next_idx, 4898 uint16_t *slot, uint32_t slot_shift, 4899 uint32_t max_entries_per_slot, 4900 uint32_t max_entries) 4901 { 4902 uint32_t idx; 4903 4904 idx = qdf_do_div_rem(qdf_atomic_inc_return(curr_idx), max_entries); 4905 4906 *slot = idx >> slot_shift; 4907 *next_idx = idx & (max_entries_per_slot - 1); 4908 } 4909 4910 #ifdef FEATURE_RUNTIME_PM 4911 /** 4912 * dp_runtime_get() - Get dp runtime refcount 4913 * @soc: Datapath soc handle 4914 * 4915 * Get dp runtime refcount by increment of an atomic variable, which can block 4916 * dp runtime resume to wait to flush pending tx by runtime suspend. 4917 * 4918 * Return: Current refcount 4919 */ dp_runtime_get(struct dp_soc * soc)4920 static inline int32_t dp_runtime_get(struct dp_soc *soc) 4921 { 4922 return qdf_atomic_inc_return(&soc->dp_runtime_refcount); 4923 } 4924 4925 /** 4926 * dp_runtime_put() - Return dp runtime refcount 4927 * @soc: Datapath soc handle 4928 * 4929 * Return dp runtime refcount by decrement of an atomic variable, allow dp 4930 * runtime resume finish. 4931 * 4932 * Return: Current refcount 4933 */ dp_runtime_put(struct dp_soc * soc)4934 static inline int32_t dp_runtime_put(struct dp_soc *soc) 4935 { 4936 return qdf_atomic_dec_return(&soc->dp_runtime_refcount); 4937 } 4938 4939 /** 4940 * dp_runtime_get_refcount() - Get dp runtime refcount 4941 * @soc: Datapath soc handle 4942 * 4943 * Get dp runtime refcount by returning an atomic variable 4944 * 4945 * Return: Current refcount 4946 */ dp_runtime_get_refcount(struct dp_soc * soc)4947 static inline int32_t dp_runtime_get_refcount(struct dp_soc *soc) 4948 { 4949 return qdf_atomic_read(&soc->dp_runtime_refcount); 4950 } 4951 4952 /** 4953 * dp_runtime_init() - Init DP related runtime PM clients and runtime refcount 4954 * @soc: Datapath soc handle 4955 * 4956 * Return: QDF_STATUS 4957 */ dp_runtime_init(struct dp_soc * soc)4958 static inline void dp_runtime_init(struct dp_soc *soc) 4959 { 4960 hif_rtpm_register(HIF_RTPM_ID_DP, NULL); 4961 hif_rtpm_register(HIF_RTPM_ID_DP_RING_STATS, NULL); 4962 qdf_atomic_init(&soc->dp_runtime_refcount); 4963 } 4964 4965 /** 4966 * dp_runtime_deinit() - Deinit DP related runtime PM clients 4967 * 4968 * Return: None 4969 */ dp_runtime_deinit(void)4970 static inline void dp_runtime_deinit(void) 4971 { 4972 hif_rtpm_deregister(HIF_RTPM_ID_DP); 4973 hif_rtpm_deregister(HIF_RTPM_ID_DP_RING_STATS); 4974 } 4975 4976 /** 4977 * dp_runtime_pm_mark_last_busy() - Mark last busy when rx path in use 4978 * @soc: Datapath soc handle 4979 * 4980 * Return: None 4981 */ dp_runtime_pm_mark_last_busy(struct dp_soc * soc)4982 static inline void dp_runtime_pm_mark_last_busy(struct dp_soc *soc) 4983 { 4984 soc->rx_last_busy = qdf_get_log_timestamp_usecs(); 4985 4986 hif_rtpm_mark_last_busy(HIF_RTPM_ID_DP); 4987 } 4988 #else dp_runtime_get(struct dp_soc * soc)4989 static inline int32_t dp_runtime_get(struct dp_soc *soc) 4990 { 4991 return 0; 4992 } 4993 dp_runtime_put(struct dp_soc * soc)4994 static inline int32_t dp_runtime_put(struct dp_soc *soc) 4995 { 4996 return 0; 4997 } 4998 dp_runtime_init(struct dp_soc * soc)4999 static inline QDF_STATUS dp_runtime_init(struct dp_soc *soc) 5000 { 5001 return QDF_STATUS_SUCCESS; 5002 } 5003 dp_runtime_deinit(void)5004 static inline void dp_runtime_deinit(void) 5005 { 5006 } 5007 dp_runtime_pm_mark_last_busy(struct dp_soc * soc)5008 static inline void dp_runtime_pm_mark_last_busy(struct dp_soc *soc) 5009 { 5010 } 5011 #endif 5012 dp_soc_get_con_mode(struct dp_soc * soc)5013 static inline enum QDF_GLOBAL_MODE dp_soc_get_con_mode(struct dp_soc *soc) 5014 { 5015 if (soc->cdp_soc.ol_ops->get_con_mode) 5016 return soc->cdp_soc.ol_ops->get_con_mode(); 5017 5018 return QDF_GLOBAL_MAX_MODE; 5019 } 5020 5021 /** 5022 * dp_pdev_bkp_stats_detach() - detach resources for back pressure stats 5023 * processing 5024 * @pdev: Datapath PDEV handle 5025 * 5026 */ 5027 void dp_pdev_bkp_stats_detach(struct dp_pdev *pdev); 5028 5029 /** 5030 * dp_pdev_bkp_stats_attach() - attach resources for back pressure stats 5031 * processing 5032 * @pdev: Datapath PDEV handle 5033 * 5034 * Return: QDF_STATUS_SUCCESS: Success 5035 * QDF_STATUS_E_NOMEM: Error 5036 */ 5037 5038 QDF_STATUS dp_pdev_bkp_stats_attach(struct dp_pdev *pdev); 5039 5040 /** 5041 * dp_peer_flush_frags() - Flush all fragments for a particular 5042 * peer 5043 * @soc_hdl: data path soc handle 5044 * @vdev_id: vdev id 5045 * @peer_mac: peer mac address 5046 * 5047 * Return: None 5048 */ 5049 void dp_peer_flush_frags(struct cdp_soc_t *soc_hdl, uint8_t vdev_id, 5050 uint8_t *peer_mac); 5051 5052 /** 5053 * dp_soc_reset_mon_intr_mask() - reset mon intr mask 5054 * @soc: pointer to dp_soc handle 5055 * 5056 * Return: 5057 */ 5058 void dp_soc_reset_mon_intr_mask(struct dp_soc *soc); 5059 5060 /** 5061 * dp_txrx_get_soc_stats() - will return cdp_soc_stats 5062 * @soc_hdl: soc handle 5063 * @soc_stats: buffer to hold the values 5064 * 5065 * Return: QDF_STATUS_SUCCESS: Success 5066 * QDF_STATUS_E_FAILURE: Error 5067 */ 5068 QDF_STATUS dp_txrx_get_soc_stats(struct cdp_soc_t *soc_hdl, 5069 struct cdp_soc_stats *soc_stats); 5070 5071 /** 5072 * dp_txrx_get_peer_delay_stats() - to get peer delay stats per TIDs 5073 * @soc_hdl: soc handle 5074 * @vdev_id: id of vdev handle 5075 * @peer_mac: mac of DP_PEER handle 5076 * @delay_stats: pointer to delay stats array 5077 * 5078 * Return: QDF_STATUS_SUCCESS: Success 5079 * QDF_STATUS_E_FAILURE: Error 5080 */ 5081 QDF_STATUS 5082 dp_txrx_get_peer_delay_stats(struct cdp_soc_t *soc_hdl, uint8_t vdev_id, 5083 uint8_t *peer_mac, 5084 struct cdp_delay_tid_stats *delay_stats); 5085 5086 /** 5087 * dp_txrx_get_peer_jitter_stats() - to get peer jitter stats per TIDs 5088 * @soc_hdl: soc handle 5089 * @pdev_id: id of pdev handle 5090 * @vdev_id: id of vdev handle 5091 * @peer_mac: mac of DP_PEER handle 5092 * @tid_stats: pointer to jitter stats array 5093 * 5094 * Return: QDF_STATUS_SUCCESS: Success 5095 * QDF_STATUS_E_FAILURE: Error 5096 */ 5097 QDF_STATUS 5098 dp_txrx_get_peer_jitter_stats(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, 5099 uint8_t vdev_id, uint8_t *peer_mac, 5100 struct cdp_peer_tid_stats *tid_stats); 5101 5102 /** 5103 * dp_peer_get_tx_capture_stats() - to get peer Tx Capture stats 5104 * @soc_hdl: soc handle 5105 * @vdev_id: id of vdev handle 5106 * @peer_mac: mac of DP_PEER handle 5107 * @stats: pointer to peer tx capture stats 5108 * 5109 * Return: QDF_STATUS_SUCCESS: Success 5110 * QDF_STATUS_E_FAILURE: Error 5111 */ 5112 QDF_STATUS 5113 dp_peer_get_tx_capture_stats(struct cdp_soc_t *soc_hdl, 5114 uint8_t vdev_id, uint8_t *peer_mac, 5115 struct cdp_peer_tx_capture_stats *stats); 5116 5117 /** 5118 * dp_pdev_get_tx_capture_stats() - to get pdev Tx Capture stats 5119 * @soc_hdl: soc handle 5120 * @pdev_id: id of pdev handle 5121 * @stats: pointer to pdev tx capture stats 5122 * 5123 * Return: QDF_STATUS_SUCCESS: Success 5124 * QDF_STATUS_E_FAILURE: Error 5125 */ 5126 QDF_STATUS 5127 dp_pdev_get_tx_capture_stats(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, 5128 struct cdp_pdev_tx_capture_stats *stats); 5129 5130 #ifdef HW_TX_DELAY_STATS_ENABLE 5131 /** 5132 * dp_is_vdev_tx_delay_stats_enabled(): Check if tx delay stats 5133 * is enabled for vdev 5134 * @vdev: dp vdev 5135 * 5136 * Return: true if tx delay stats is enabled for vdev else false 5137 */ dp_is_vdev_tx_delay_stats_enabled(struct dp_vdev * vdev)5138 static inline uint8_t dp_is_vdev_tx_delay_stats_enabled(struct dp_vdev *vdev) 5139 { 5140 return vdev->hw_tx_delay_stats_enabled; 5141 } 5142 5143 /** 5144 * dp_pdev_print_tx_delay_stats(): Print vdev tx delay stats 5145 * for pdev 5146 * @soc: dp soc 5147 * 5148 * Return: None 5149 */ 5150 void dp_pdev_print_tx_delay_stats(struct dp_soc *soc); 5151 5152 /** 5153 * dp_pdev_clear_tx_delay_stats() - clear tx delay stats 5154 * @soc: soc handle 5155 * 5156 * Return: None 5157 */ 5158 void dp_pdev_clear_tx_delay_stats(struct dp_soc *soc); 5159 #else dp_is_vdev_tx_delay_stats_enabled(struct dp_vdev * vdev)5160 static inline uint8_t dp_is_vdev_tx_delay_stats_enabled(struct dp_vdev *vdev) 5161 { 5162 return 0; 5163 } 5164 dp_pdev_print_tx_delay_stats(struct dp_soc * soc)5165 static inline void dp_pdev_print_tx_delay_stats(struct dp_soc *soc) 5166 { 5167 } 5168 dp_pdev_clear_tx_delay_stats(struct dp_soc * soc)5169 static inline void dp_pdev_clear_tx_delay_stats(struct dp_soc *soc) 5170 { 5171 } 5172 #endif 5173 5174 static inline void dp_get_rx_hash_key_bytes(struct cdp_lro_hash_config * lro_hash)5175 dp_get_rx_hash_key_bytes(struct cdp_lro_hash_config *lro_hash) 5176 { 5177 qdf_get_random_bytes(lro_hash->toeplitz_hash_ipv4, 5178 (sizeof(lro_hash->toeplitz_hash_ipv4[0]) * 5179 LRO_IPV4_SEED_ARR_SZ)); 5180 qdf_get_random_bytes(lro_hash->toeplitz_hash_ipv6, 5181 (sizeof(lro_hash->toeplitz_hash_ipv6[0]) * 5182 LRO_IPV6_SEED_ARR_SZ)); 5183 } 5184 5185 #ifdef WLAN_CONFIG_TELEMETRY_AGENT 5186 /** 5187 * dp_get_pdev_telemetry_stats- API to get pdev telemetry stats 5188 * @soc_hdl: soc handle 5189 * @pdev_id: id of pdev handle 5190 * @stats: pointer to pdev telemetry stats 5191 * 5192 * Return: QDF_STATUS_SUCCESS: Success 5193 * QDF_STATUS_E_FAILURE: Error 5194 */ 5195 QDF_STATUS 5196 dp_get_pdev_telemetry_stats(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, 5197 struct cdp_pdev_telemetry_stats *stats); 5198 5199 /** 5200 * dp_get_peer_telemetry_stats() - API to get peer telemetry stats 5201 * @soc_hdl: soc handle 5202 * @addr: peer mac 5203 * @stats: pointer to peer telemetry stats 5204 * 5205 * Return: QDF_STATUS_SUCCESS: Success 5206 * QDF_STATUS_E_FAILURE: Error 5207 */ 5208 QDF_STATUS 5209 dp_get_peer_telemetry_stats(struct cdp_soc_t *soc_hdl, uint8_t *addr, 5210 struct cdp_peer_telemetry_stats *stats); 5211 5212 /** 5213 * dp_get_peer_deter_stats() - API to get peer deterministic stats 5214 * @soc_hdl: soc handle 5215 * @vdev_id: id of vdev handle 5216 * @addr: peer mac 5217 * @stats: pointer to peer deterministic stats 5218 * 5219 * Return: QDF_STATUS_SUCCESS: Success 5220 * QDF_STATUS_E_FAILURE: Error 5221 */ 5222 QDF_STATUS 5223 dp_get_peer_deter_stats(struct cdp_soc_t *soc_hdl, 5224 uint8_t vdev_id, 5225 uint8_t *addr, 5226 struct cdp_peer_deter_stats *stats); 5227 5228 /** 5229 * dp_get_pdev_deter_stats() - API to get pdev deterministic stats 5230 * @soc_hdl: soc handle 5231 * @pdev_id: id of pdev handle 5232 * @stats: pointer to pdev deterministic stats 5233 * 5234 * Return: QDF_STATUS_SUCCESS: Success 5235 * QDF_STATUS_E_FAILURE: Error 5236 */ 5237 QDF_STATUS 5238 dp_get_pdev_deter_stats(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, 5239 struct cdp_pdev_deter_stats *stats); 5240 5241 /** 5242 * dp_update_pdev_chan_util_stats() - API to update channel utilization stats 5243 * @soc_hdl: soc handle 5244 * @pdev_id: id of pdev handle 5245 * @ch_util: Pointer to channel util stats 5246 * 5247 * Return: QDF_STATUS_SUCCESS: Success 5248 * QDF_STATUS_E_FAILURE: Error 5249 */ 5250 QDF_STATUS 5251 dp_update_pdev_chan_util_stats(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, 5252 struct cdp_pdev_chan_util_stats *ch_util); 5253 #endif /* WLAN_CONFIG_TELEMETRY_AGENT */ 5254 5255 #ifdef CONNECTIVITY_PKTLOG 5256 /** 5257 * dp_tx_send_pktlog() - send tx packet log 5258 * @soc: soc handle 5259 * @pdev: pdev handle 5260 * @tx_desc: TX software descriptor 5261 * @nbuf: nbuf 5262 * @status: status of tx packet 5263 * 5264 * This function is used to send tx packet for logging 5265 * 5266 * Return: None 5267 * 5268 */ 5269 static inline dp_tx_send_pktlog(struct dp_soc * soc,struct dp_pdev * pdev,struct dp_tx_desc_s * tx_desc,qdf_nbuf_t nbuf,enum qdf_dp_tx_rx_status status)5270 void dp_tx_send_pktlog(struct dp_soc *soc, struct dp_pdev *pdev, 5271 struct dp_tx_desc_s *tx_desc, 5272 qdf_nbuf_t nbuf, enum qdf_dp_tx_rx_status status) 5273 { 5274 ol_txrx_pktdump_cb packetdump_cb = pdev->dp_tx_packetdump_cb; 5275 5276 if (qdf_unlikely(packetdump_cb) && 5277 dp_tx_frm_std == tx_desc->frm_type) { 5278 packetdump_cb((ol_txrx_soc_handle)soc, pdev->pdev_id, 5279 tx_desc->vdev_id, nbuf, status, QDF_TX_DATA_PKT); 5280 } 5281 } 5282 5283 /** 5284 * dp_rx_send_pktlog() - send rx packet log 5285 * @soc: soc handle 5286 * @pdev: pdev handle 5287 * @nbuf: nbuf 5288 * @status: status of rx packet 5289 * 5290 * This function is used to send rx packet for logging 5291 * 5292 * Return: None 5293 * 5294 */ 5295 static inline dp_rx_send_pktlog(struct dp_soc * soc,struct dp_pdev * pdev,qdf_nbuf_t nbuf,enum qdf_dp_tx_rx_status status)5296 void dp_rx_send_pktlog(struct dp_soc *soc, struct dp_pdev *pdev, 5297 qdf_nbuf_t nbuf, enum qdf_dp_tx_rx_status status) 5298 { 5299 ol_txrx_pktdump_cb packetdump_cb = pdev->dp_rx_packetdump_cb; 5300 5301 if (qdf_unlikely(packetdump_cb)) { 5302 packetdump_cb((ol_txrx_soc_handle)soc, pdev->pdev_id, 5303 QDF_NBUF_CB_RX_VDEV_ID(nbuf), 5304 nbuf, status, QDF_RX_DATA_PKT); 5305 } 5306 } 5307 5308 /** 5309 * dp_rx_err_send_pktlog() - send rx error packet log 5310 * @soc: soc handle 5311 * @pdev: pdev handle 5312 * @mpdu_desc_info: MPDU descriptor info 5313 * @nbuf: nbuf 5314 * @status: status of rx packet 5315 * @set_pktlen: weither to set packet length 5316 * 5317 * This API should only be called when we have not removed 5318 * Rx TLV from head, and head is pointing to rx_tlv 5319 * 5320 * This function is used to send rx packet from error path 5321 * for logging for which rx packet tlv is not removed. 5322 * 5323 * Return: None 5324 * 5325 */ 5326 static inline dp_rx_err_send_pktlog(struct dp_soc * soc,struct dp_pdev * pdev,struct hal_rx_mpdu_desc_info * mpdu_desc_info,qdf_nbuf_t nbuf,enum qdf_dp_tx_rx_status status,bool set_pktlen)5327 void dp_rx_err_send_pktlog(struct dp_soc *soc, struct dp_pdev *pdev, 5328 struct hal_rx_mpdu_desc_info *mpdu_desc_info, 5329 qdf_nbuf_t nbuf, enum qdf_dp_tx_rx_status status, 5330 bool set_pktlen) 5331 { 5332 ol_txrx_pktdump_cb packetdump_cb = pdev->dp_rx_packetdump_cb; 5333 qdf_size_t skip_size; 5334 uint16_t msdu_len, nbuf_len; 5335 uint8_t *rx_tlv_hdr; 5336 struct hal_rx_msdu_metadata msdu_metadata; 5337 uint16_t buf_size; 5338 5339 buf_size = wlan_cfg_rx_buffer_size(soc->wlan_cfg_ctx); 5340 5341 if (qdf_unlikely(packetdump_cb)) { 5342 rx_tlv_hdr = qdf_nbuf_data(nbuf); 5343 nbuf_len = hal_rx_msdu_start_msdu_len_get(soc->hal_soc, 5344 rx_tlv_hdr); 5345 hal_rx_msdu_metadata_get(soc->hal_soc, rx_tlv_hdr, 5346 &msdu_metadata); 5347 5348 if (mpdu_desc_info->bar_frame || 5349 (mpdu_desc_info->mpdu_flags & HAL_MPDU_F_FRAGMENT)) 5350 skip_size = soc->rx_pkt_tlv_size; 5351 else 5352 skip_size = soc->rx_pkt_tlv_size + 5353 msdu_metadata.l3_hdr_pad; 5354 5355 if (set_pktlen) { 5356 msdu_len = nbuf_len + skip_size; 5357 qdf_nbuf_set_pktlen(nbuf, qdf_min(msdu_len, buf_size)); 5358 } 5359 5360 qdf_nbuf_pull_head(nbuf, skip_size); 5361 packetdump_cb((ol_txrx_soc_handle)soc, pdev->pdev_id, 5362 QDF_NBUF_CB_RX_VDEV_ID(nbuf), 5363 nbuf, status, QDF_RX_DATA_PKT); 5364 qdf_nbuf_push_head(nbuf, skip_size); 5365 } 5366 } 5367 5368 #else 5369 static inline dp_tx_send_pktlog(struct dp_soc * soc,struct dp_pdev * pdev,struct dp_tx_desc_s * tx_desc,qdf_nbuf_t nbuf,enum qdf_dp_tx_rx_status status)5370 void dp_tx_send_pktlog(struct dp_soc *soc, struct dp_pdev *pdev, 5371 struct dp_tx_desc_s *tx_desc, 5372 qdf_nbuf_t nbuf, enum qdf_dp_tx_rx_status status) 5373 { 5374 } 5375 5376 static inline dp_rx_send_pktlog(struct dp_soc * soc,struct dp_pdev * pdev,qdf_nbuf_t nbuf,enum qdf_dp_tx_rx_status status)5377 void dp_rx_send_pktlog(struct dp_soc *soc, struct dp_pdev *pdev, 5378 qdf_nbuf_t nbuf, enum qdf_dp_tx_rx_status status) 5379 { 5380 } 5381 5382 static inline dp_rx_err_send_pktlog(struct dp_soc * soc,struct dp_pdev * pdev,struct hal_rx_mpdu_desc_info * mpdu_desc_info,qdf_nbuf_t nbuf,enum qdf_dp_tx_rx_status status,bool set_pktlen)5383 void dp_rx_err_send_pktlog(struct dp_soc *soc, struct dp_pdev *pdev, 5384 struct hal_rx_mpdu_desc_info *mpdu_desc_info, 5385 qdf_nbuf_t nbuf, enum qdf_dp_tx_rx_status status, 5386 bool set_pktlen) 5387 { 5388 } 5389 #endif 5390 5391 /** 5392 * dp_pdev_update_fast_rx_flag() - Update Fast rx flag for a PDEV 5393 * @soc : Data path soc handle 5394 * @pdev : PDEV handle 5395 * 5396 * Return: None 5397 */ 5398 void dp_pdev_update_fast_rx_flag(struct dp_soc *soc, struct dp_pdev *pdev); 5399 5400 #ifdef FEATURE_DIRECT_LINK 5401 /** 5402 * dp_setup_direct_link_refill_ring(): Setup direct link refill ring for pdev 5403 * @soc_hdl: DP SOC handle 5404 * @pdev_id: pdev id 5405 * 5406 * Return: Handle to SRNG 5407 */ 5408 struct dp_srng *dp_setup_direct_link_refill_ring(struct cdp_soc_t *soc_hdl, 5409 uint8_t pdev_id); 5410 5411 /** 5412 * dp_destroy_direct_link_refill_ring(): Destroy direct link refill ring for 5413 * pdev 5414 * @soc_hdl: DP SOC handle 5415 * @pdev_id: pdev id 5416 * 5417 * Return: None 5418 */ 5419 void dp_destroy_direct_link_refill_ring(struct cdp_soc_t *soc_hdl, 5420 uint8_t pdev_id); 5421 #else 5422 static inline dp_setup_direct_link_refill_ring(struct cdp_soc_t * soc_hdl,uint8_t pdev_id)5423 struct dp_srng *dp_setup_direct_link_refill_ring(struct cdp_soc_t *soc_hdl, 5424 uint8_t pdev_id) 5425 { 5426 return NULL; 5427 } 5428 5429 static inline dp_destroy_direct_link_refill_ring(struct cdp_soc_t * soc_hdl,uint8_t pdev_id)5430 void dp_destroy_direct_link_refill_ring(struct cdp_soc_t *soc_hdl, 5431 uint8_t pdev_id) 5432 { 5433 } 5434 #endif 5435 5436 #ifdef WLAN_FEATURE_DP_CFG_EVENT_HISTORY 5437 static inline dp_cfg_event_record(struct dp_soc * soc,enum dp_cfg_event_type event,union dp_cfg_event_desc * cfg_event_desc)5438 void dp_cfg_event_record(struct dp_soc *soc, 5439 enum dp_cfg_event_type event, 5440 union dp_cfg_event_desc *cfg_event_desc) 5441 { 5442 struct dp_cfg_event_history *cfg_event_history = 5443 &soc->cfg_event_history; 5444 struct dp_cfg_event *entry; 5445 uint32_t idx; 5446 uint16_t slot; 5447 5448 dp_get_frag_hist_next_atomic_idx(&cfg_event_history->index, &idx, 5449 &slot, 5450 DP_CFG_EVT_HIST_SLOT_SHIFT, 5451 DP_CFG_EVT_HIST_PER_SLOT_MAX, 5452 DP_CFG_EVT_HISTORY_SIZE); 5453 5454 entry = &cfg_event_history->entry[slot][idx]; 5455 5456 entry->timestamp = qdf_get_log_timestamp(); 5457 entry->type = event; 5458 qdf_mem_copy(&entry->event_desc, cfg_event_desc, 5459 sizeof(entry->event_desc)); 5460 } 5461 5462 static inline void dp_cfg_event_record_vdev_evt(struct dp_soc * soc,enum dp_cfg_event_type event,struct dp_vdev * vdev)5463 dp_cfg_event_record_vdev_evt(struct dp_soc *soc, enum dp_cfg_event_type event, 5464 struct dp_vdev *vdev) 5465 { 5466 union dp_cfg_event_desc cfg_evt_desc = {0}; 5467 struct dp_vdev_attach_detach_desc *vdev_evt = 5468 &cfg_evt_desc.vdev_evt; 5469 5470 if (qdf_unlikely(event != DP_CFG_EVENT_VDEV_ATTACH && 5471 event != DP_CFG_EVENT_VDEV_UNREF_DEL && 5472 event != DP_CFG_EVENT_VDEV_DETACH)) { 5473 qdf_assert_always(0); 5474 return; 5475 } 5476 5477 vdev_evt->vdev = vdev; 5478 vdev_evt->vdev_id = vdev->vdev_id; 5479 vdev_evt->ref_count = qdf_atomic_read(&vdev->ref_cnt); 5480 vdev_evt->mac_addr = vdev->mac_addr; 5481 5482 dp_cfg_event_record(soc, event, &cfg_evt_desc); 5483 } 5484 5485 static inline void dp_cfg_event_record_peer_evt(struct dp_soc * soc,enum dp_cfg_event_type event,struct dp_peer * peer,struct dp_vdev * vdev,uint8_t is_reuse)5486 dp_cfg_event_record_peer_evt(struct dp_soc *soc, enum dp_cfg_event_type event, 5487 struct dp_peer *peer, struct dp_vdev *vdev, 5488 uint8_t is_reuse) 5489 { 5490 union dp_cfg_event_desc cfg_evt_desc = {0}; 5491 struct dp_peer_cmn_ops_desc *peer_evt = &cfg_evt_desc.peer_cmn_evt; 5492 5493 if (qdf_unlikely(event != DP_CFG_EVENT_PEER_CREATE && 5494 event != DP_CFG_EVENT_PEER_DELETE && 5495 event != DP_CFG_EVENT_PEER_UNREF_DEL)) { 5496 qdf_assert_always(0); 5497 return; 5498 } 5499 5500 peer_evt->peer = peer; 5501 peer_evt->vdev = vdev; 5502 peer_evt->vdev_id = vdev->vdev_id; 5503 peer_evt->is_reuse = is_reuse; 5504 peer_evt->peer_ref_count = qdf_atomic_read(&peer->ref_cnt); 5505 peer_evt->vdev_ref_count = qdf_atomic_read(&vdev->ref_cnt); 5506 peer_evt->mac_addr = peer->mac_addr; 5507 peer_evt->vdev_mac_addr = vdev->mac_addr; 5508 5509 dp_cfg_event_record(soc, event, &cfg_evt_desc); 5510 } 5511 5512 static inline void dp_cfg_event_record_mlo_link_delink_evt(struct dp_soc * soc,enum dp_cfg_event_type event,struct dp_peer * mld_peer,struct dp_peer * link_peer,uint8_t idx,uint8_t result)5513 dp_cfg_event_record_mlo_link_delink_evt(struct dp_soc *soc, 5514 enum dp_cfg_event_type event, 5515 struct dp_peer *mld_peer, 5516 struct dp_peer *link_peer, 5517 uint8_t idx, uint8_t result) 5518 { 5519 union dp_cfg_event_desc cfg_evt_desc = {0}; 5520 struct dp_mlo_add_del_link_desc *mlo_link_delink_evt = 5521 &cfg_evt_desc.mlo_link_delink_evt; 5522 5523 if (qdf_unlikely(event != DP_CFG_EVENT_MLO_ADD_LINK && 5524 event != DP_CFG_EVENT_MLO_DEL_LINK)) { 5525 qdf_assert_always(0); 5526 return; 5527 } 5528 5529 mlo_link_delink_evt->link_peer = link_peer; 5530 mlo_link_delink_evt->mld_peer = mld_peer; 5531 mlo_link_delink_evt->link_mac_addr = link_peer->mac_addr; 5532 mlo_link_delink_evt->mld_mac_addr = mld_peer->mac_addr; 5533 mlo_link_delink_evt->num_links = mld_peer->num_links; 5534 mlo_link_delink_evt->action_result = result; 5535 mlo_link_delink_evt->idx = idx; 5536 5537 dp_cfg_event_record(soc, event, &cfg_evt_desc); 5538 } 5539 5540 static inline void dp_cfg_event_record_mlo_setup_vdev_update_evt(struct dp_soc * soc,struct dp_peer * mld_peer,struct dp_vdev * prev_vdev,struct dp_vdev * new_vdev)5541 dp_cfg_event_record_mlo_setup_vdev_update_evt(struct dp_soc *soc, 5542 struct dp_peer *mld_peer, 5543 struct dp_vdev *prev_vdev, 5544 struct dp_vdev *new_vdev) 5545 { 5546 union dp_cfg_event_desc cfg_evt_desc = {0}; 5547 struct dp_mlo_setup_vdev_update_desc *vdev_update_evt = 5548 &cfg_evt_desc.mlo_setup_vdev_update; 5549 5550 vdev_update_evt->mld_peer = mld_peer; 5551 vdev_update_evt->prev_vdev = prev_vdev; 5552 vdev_update_evt->new_vdev = new_vdev; 5553 5554 dp_cfg_event_record(soc, DP_CFG_EVENT_MLO_SETUP_VDEV_UPDATE, 5555 &cfg_evt_desc); 5556 } 5557 5558 static inline void dp_cfg_event_record_peer_map_unmap_evt(struct dp_soc * soc,enum dp_cfg_event_type event,struct dp_peer * peer,uint8_t * mac_addr,uint8_t is_ml_peer,uint16_t peer_id,uint16_t ml_peer_id,uint16_t hw_peer_id,uint8_t vdev_id)5559 dp_cfg_event_record_peer_map_unmap_evt(struct dp_soc *soc, 5560 enum dp_cfg_event_type event, 5561 struct dp_peer *peer, 5562 uint8_t *mac_addr, 5563 uint8_t is_ml_peer, 5564 uint16_t peer_id, uint16_t ml_peer_id, 5565 uint16_t hw_peer_id, uint8_t vdev_id) 5566 { 5567 union dp_cfg_event_desc cfg_evt_desc = {0}; 5568 struct dp_rx_peer_map_unmap_desc *peer_map_unmap_evt = 5569 &cfg_evt_desc.peer_map_unmap_evt; 5570 5571 if (qdf_unlikely(event != DP_CFG_EVENT_PEER_MAP && 5572 event != DP_CFG_EVENT_PEER_UNMAP && 5573 event != DP_CFG_EVENT_MLO_PEER_MAP && 5574 event != DP_CFG_EVENT_MLO_PEER_UNMAP)) { 5575 qdf_assert_always(0); 5576 return; 5577 } 5578 5579 peer_map_unmap_evt->peer_id = peer_id; 5580 peer_map_unmap_evt->ml_peer_id = ml_peer_id; 5581 peer_map_unmap_evt->hw_peer_id = hw_peer_id; 5582 peer_map_unmap_evt->vdev_id = vdev_id; 5583 /* Peer may be NULL at times, but its not an issue. */ 5584 peer_map_unmap_evt->peer = peer; 5585 peer_map_unmap_evt->is_ml_peer = is_ml_peer; 5586 qdf_mem_copy(&peer_map_unmap_evt->mac_addr.raw, mac_addr, 5587 QDF_MAC_ADDR_SIZE); 5588 5589 dp_cfg_event_record(soc, event, &cfg_evt_desc); 5590 } 5591 5592 static inline void dp_cfg_event_record_peer_setup_evt(struct dp_soc * soc,enum dp_cfg_event_type event,struct dp_peer * peer,struct dp_vdev * vdev,uint8_t vdev_id,struct cdp_peer_setup_info * peer_setup_info)5593 dp_cfg_event_record_peer_setup_evt(struct dp_soc *soc, 5594 enum dp_cfg_event_type event, 5595 struct dp_peer *peer, 5596 struct dp_vdev *vdev, 5597 uint8_t vdev_id, 5598 struct cdp_peer_setup_info *peer_setup_info) 5599 { 5600 union dp_cfg_event_desc cfg_evt_desc = {0}; 5601 struct dp_peer_setup_desc *peer_setup_evt = 5602 &cfg_evt_desc.peer_setup_evt; 5603 5604 if (qdf_unlikely(event != DP_CFG_EVENT_PEER_SETUP && 5605 event != DP_CFG_EVENT_MLO_SETUP)) { 5606 qdf_assert_always(0); 5607 return; 5608 } 5609 5610 peer_setup_evt->peer = peer; 5611 peer_setup_evt->vdev = vdev; 5612 if (vdev) 5613 peer_setup_evt->vdev_ref_count = qdf_atomic_read(&vdev->ref_cnt); 5614 peer_setup_evt->mac_addr = peer->mac_addr; 5615 peer_setup_evt->vdev_id = vdev_id; 5616 if (peer_setup_info) { 5617 peer_setup_evt->is_first_link = peer_setup_info->is_first_link; 5618 peer_setup_evt->is_primary_link = peer_setup_info->is_primary_link; 5619 qdf_mem_copy(peer_setup_evt->mld_mac_addr.raw, 5620 peer_setup_info->mld_peer_mac, 5621 QDF_MAC_ADDR_SIZE); 5622 } 5623 5624 dp_cfg_event_record(soc, event, &cfg_evt_desc); 5625 } 5626 #else 5627 5628 static inline void dp_cfg_event_record_vdev_evt(struct dp_soc * soc,enum dp_cfg_event_type event,struct dp_vdev * vdev)5629 dp_cfg_event_record_vdev_evt(struct dp_soc *soc, enum dp_cfg_event_type event, 5630 struct dp_vdev *vdev) 5631 { 5632 } 5633 5634 static inline void dp_cfg_event_record_peer_evt(struct dp_soc * soc,enum dp_cfg_event_type event,struct dp_peer * peer,struct dp_vdev * vdev,uint8_t is_reuse)5635 dp_cfg_event_record_peer_evt(struct dp_soc *soc, enum dp_cfg_event_type event, 5636 struct dp_peer *peer, struct dp_vdev *vdev, 5637 uint8_t is_reuse) 5638 { 5639 } 5640 5641 static inline void dp_cfg_event_record_mlo_link_delink_evt(struct dp_soc * soc,enum dp_cfg_event_type event,struct dp_peer * mld_peer,struct dp_peer * link_peer,uint8_t idx,uint8_t result)5642 dp_cfg_event_record_mlo_link_delink_evt(struct dp_soc *soc, 5643 enum dp_cfg_event_type event, 5644 struct dp_peer *mld_peer, 5645 struct dp_peer *link_peer, 5646 uint8_t idx, uint8_t result) 5647 { 5648 } 5649 5650 static inline void dp_cfg_event_record_mlo_setup_vdev_update_evt(struct dp_soc * soc,struct dp_peer * mld_peer,struct dp_vdev * prev_vdev,struct dp_vdev * new_vdev)5651 dp_cfg_event_record_mlo_setup_vdev_update_evt(struct dp_soc *soc, 5652 struct dp_peer *mld_peer, 5653 struct dp_vdev *prev_vdev, 5654 struct dp_vdev *new_vdev) 5655 { 5656 } 5657 5658 static inline void dp_cfg_event_record_peer_map_unmap_evt(struct dp_soc * soc,enum dp_cfg_event_type event,struct dp_peer * peer,uint8_t * mac_addr,uint8_t is_ml_peer,uint16_t peer_id,uint16_t ml_peer_id,uint16_t hw_peer_id,uint8_t vdev_id)5659 dp_cfg_event_record_peer_map_unmap_evt(struct dp_soc *soc, 5660 enum dp_cfg_event_type event, 5661 struct dp_peer *peer, 5662 uint8_t *mac_addr, 5663 uint8_t is_ml_peer, 5664 uint16_t peer_id, uint16_t ml_peer_id, 5665 uint16_t hw_peer_id, uint8_t vdev_id) 5666 { 5667 } 5668 5669 static inline void dp_cfg_event_record_peer_setup_evt(struct dp_soc * soc,enum dp_cfg_event_type event,struct dp_peer * peer,struct dp_vdev * vdev,uint8_t vdev_id,struct cdp_peer_setup_info * peer_setup_info)5670 dp_cfg_event_record_peer_setup_evt(struct dp_soc *soc, 5671 enum dp_cfg_event_type event, 5672 struct dp_peer *peer, 5673 struct dp_vdev *vdev, 5674 uint8_t vdev_id, 5675 struct cdp_peer_setup_info *peer_setup_info) 5676 { 5677 } 5678 #endif 5679 5680 #ifndef WLAN_SOFTUMAC_SUPPORT 5681 /** 5682 * dp_soc_interrupt_detach() - Deregister any allocations done for interrupts 5683 * @txrx_soc: DP SOC handle 5684 * 5685 * Return: none 5686 */ 5687 void dp_soc_interrupt_detach(struct cdp_soc_t *txrx_soc); 5688 #endif 5689 5690 /** 5691 * dp_get_peer_stats()- Get peer stats 5692 * @peer: Datapath peer 5693 * @peer_stats: buffer for peer stats 5694 * 5695 * Return: none 5696 */ 5697 void dp_get_peer_stats(struct dp_peer *peer, 5698 struct cdp_peer_stats *peer_stats); 5699 5700 /** 5701 * dp_get_per_link_peer_stats()- Get per link peer stats 5702 * @peer: Datapath peer 5703 * @peer_stats: buffer for peer stats 5704 * @peer_type: Peer type 5705 * @num_link: Number of ML links 5706 * 5707 * Return: status success/failure 5708 */ 5709 QDF_STATUS dp_get_per_link_peer_stats(struct dp_peer *peer, 5710 struct cdp_peer_stats *peer_stats, 5711 enum cdp_peer_type peer_type, 5712 uint8_t num_link); 5713 /** 5714 * dp_get_peer_hw_link_id() - get peer hardware link id 5715 * @soc: soc handle 5716 * @pdev: data path pdev 5717 * 5718 * Return: link_id 5719 */ 5720 static inline int dp_get_peer_hw_link_id(struct dp_soc * soc,struct dp_pdev * pdev)5721 dp_get_peer_hw_link_id(struct dp_soc *soc, 5722 struct dp_pdev *pdev) 5723 { 5724 if (wlan_cfg_is_peer_link_stats_enabled(soc->wlan_cfg_ctx)) 5725 return ((soc->arch_ops.get_hw_link_id(pdev)) + 1); 5726 5727 return 0; 5728 } 5729 5730 #ifdef QCA_MULTIPASS_SUPPORT 5731 /** 5732 * dp_tx_remove_vlan_tag() - Remove 4 bytes of vlan tag 5733 * @vdev: DP vdev handle 5734 * @nbuf: network buffer 5735 * 5736 * Return: void 5737 */ 5738 void dp_tx_remove_vlan_tag(struct dp_vdev *vdev, qdf_nbuf_t nbuf); 5739 #endif 5740 5741 /** 5742 * dp_print_per_link_stats() - Print per link peer stats. 5743 * @soc_hdl: soc handle. 5744 * @vdev_id: vdev_id. 5745 * 5746 * Return: None. 5747 */ 5748 void dp_print_per_link_stats(struct cdp_soc_t *soc_hdl, uint8_t vdev_id); 5749 5750 /** 5751 * dp_get_ring_stats_from_hal(): get hal level ring pointer values 5752 * @soc: DP_SOC handle 5753 * @srng: DP_SRNG handle 5754 * @ring_type: srng src/dst ring 5755 * @_tailp: pointer to tail of ring 5756 * @_headp: pointer to head of ring 5757 * @_hw_headp: pointer to head of ring in HW 5758 * @_hw_tailp: pointer to tail of ring in HW 5759 * 5760 * Return: void 5761 */ 5762 static inline void dp_get_ring_stats_from_hal(struct dp_soc * soc,struct dp_srng * srng,enum hal_ring_type ring_type,uint32_t * _tailp,uint32_t * _headp,int32_t * _hw_headp,int32_t * _hw_tailp)5763 dp_get_ring_stats_from_hal(struct dp_soc *soc, struct dp_srng *srng, 5764 enum hal_ring_type ring_type, 5765 uint32_t *_tailp, uint32_t *_headp, 5766 int32_t *_hw_headp, int32_t *_hw_tailp) 5767 { 5768 uint32_t tailp; 5769 uint32_t headp; 5770 int32_t hw_headp = -1; 5771 int32_t hw_tailp = -1; 5772 struct hal_soc *hal_soc; 5773 5774 if (soc && srng && srng->hal_srng) { 5775 hal_soc = (struct hal_soc *)soc->hal_soc; 5776 hal_get_sw_hptp(soc->hal_soc, srng->hal_srng, &tailp, &headp); 5777 *_headp = headp; 5778 *_tailp = tailp; 5779 5780 hal_get_hw_hptp(soc->hal_soc, srng->hal_srng, &hw_headp, 5781 &hw_tailp, ring_type); 5782 *_hw_headp = hw_headp; 5783 *_hw_tailp = hw_tailp; 5784 } 5785 } 5786 5787 /** 5788 * dp_update_vdev_be_basic_stats() - Update vdev basic stats 5789 * @txrx_peer: DP txrx_peer handle 5790 * @tgtobj: Pointer to buffer for be vdev stats 5791 * 5792 * Return: None 5793 */ 5794 void dp_update_vdev_be_basic_stats(struct dp_txrx_peer *txrx_peer, 5795 struct dp_vdev_stats *tgtobj); 5796 5797 /** 5798 * dp_update_vdev_basic_stats() - Update vdev basic stats 5799 * @txrx_peer: DP txrx_peer handle 5800 * @tgtobj: Pointer to buffer for vdev stats 5801 * 5802 * Return: None 5803 */ 5804 void dp_update_vdev_basic_stats(struct dp_txrx_peer *txrx_peer, 5805 struct cdp_vdev_stats *tgtobj); 5806 5807 /** 5808 * dp_get_vdev_stats_for_unmap_peer_legacy() - Update vdev basic stats 5809 * @vdev: vdev associated with the peer 5810 * @peer: unmapped peer 5811 * 5812 * Return: None 5813 */ 5814 void dp_get_vdev_stats_for_unmap_peer_legacy(struct dp_vdev *vdev, 5815 struct dp_peer *peer); 5816 5817 #ifdef WLAN_FEATURE_TX_LATENCY_STATS 5818 /** 5819 * dp_h2t_tx_latency_stats_cfg_msg_send(): send HTT message for tx latency 5820 * stats config to FW 5821 * @dp_soc: DP SOC handle 5822 * @vdev_id: vdev id 5823 * @enable: indicates enablement of the feature 5824 * @period: statistical period for transmit latency in terms of ms 5825 * @granularity: granularity for tx latency distribution 5826 * 5827 * return: QDF STATUS 5828 */ 5829 QDF_STATUS 5830 dp_h2t_tx_latency_stats_cfg_msg_send(struct dp_soc *dp_soc, uint16_t vdev_id, 5831 bool enable, uint32_t period, 5832 uint32_t granularity); 5833 5834 /** 5835 * dp_tx_latency_stats_update_cca() - update transmit latency statistics for 5836 * CCA 5837 * @soc: dp soc handle 5838 * @peer_id: peer id 5839 * @granularity: granularity of distribution 5840 * @distribution: distribution of transmit latency statistics 5841 * @avg: average of CCA latency(in microseconds) within a cycle 5842 * 5843 * Return: None 5844 */ 5845 void 5846 dp_tx_latency_stats_update_cca(struct dp_soc *soc, uint16_t peer_id, 5847 uint32_t granularity, uint32_t *distribution, 5848 uint32_t avg); 5849 5850 /** 5851 * dp_tx_latency_stats_report() - report transmit latency statistics for each 5852 * vdev of specified pdev 5853 * @soc: dp soc handle 5854 * @pdev: dp pdev Handle 5855 * 5856 * Return: None 5857 */ 5858 void dp_tx_latency_stats_report(struct dp_soc *soc, struct dp_pdev *pdev); 5859 #endif 5860 #ifdef WLAN_FEATURE_SSR_DRIVER_DUMP 5861 /** 5862 * dp_ssr_dump_srng_register() - Register DP ring with SSR dump. 5863 * @region_name: ring name to register. 5864 * @srng: dp srng handler. 5865 * @num: Ring number 5866 * 5867 * num = -1. If there is only single ring 5868 * num = ring number. If there are multiple rings pass ring number. 5869 * e.g. in case of REO pass reo number (0..n). 5870 * 5871 * Return: None. 5872 */ 5873 void 5874 dp_ssr_dump_srng_register(char *region_name, struct dp_srng *srng, int num); 5875 5876 /** 5877 * dp_ssr_dump_srng_unregister() - Unegister DP ring with SSR dump. 5878 * @region_name: ring name to unregister. 5879 * @num: Ring number 5880 * 5881 * num = -1. If there is only single ring 5882 * num = ring number. If there are multiple rings pass ring number. 5883 * e.g. in case of REO pass reo number (0..n). 5884 * 5885 * Return: None. 5886 */ 5887 void dp_ssr_dump_srng_unregister(char *region_name, int num); 5888 5889 /** 5890 * dp_ssr_dump_pdev_register() - Register DP Pdev with SSR dump. 5891 * @pdev: Pdev handle to register. 5892 * @pdev_id: Pdev ID. 5893 * 5894 * Return: None. 5895 */ 5896 void dp_ssr_dump_pdev_register(struct dp_pdev *pdev, uint8_t pdev_id); 5897 5898 /** 5899 * dp_ssr_dump_pdev_unregister() - Unregister DP Pdev with SSR dump. 5900 * @pdev_id: Pdev ID. 5901 * 5902 * Return: None. 5903 */ 5904 void dp_ssr_dump_pdev_unregister(uint8_t pdev_id); 5905 #else 5906 static inline dp_ssr_dump_srng_register(char * region_name,struct dp_srng * srng,int num)5907 void dp_ssr_dump_srng_register(char *region_name, struct dp_srng *srng, int num) 5908 { 5909 } 5910 5911 static inline dp_ssr_dump_srng_unregister(char * region_name,int num)5912 void dp_ssr_dump_srng_unregister(char *region_name, int num) 5913 { 5914 } 5915 5916 static inline dp_ssr_dump_pdev_register(struct dp_pdev * pdev,uint8_t pdev_id)5917 void dp_ssr_dump_pdev_register(struct dp_pdev *pdev, uint8_t pdev_id) 5918 { 5919 } 5920 5921 static inline dp_ssr_dump_pdev_unregister(uint8_t pdev_id)5922 void dp_ssr_dump_pdev_unregister(uint8_t pdev_id) 5923 { 5924 } 5925 #endif 5926 5927 /** 5928 * dp_get_peer_vdev_roaming_in_progress() - Check if peer's vdev is in roaming 5929 * state. 5930 * @peer: DP peer handle 5931 * 5932 * Return: true if the peer's vdev is in roaming state 5933 * else false. 5934 */ 5935 bool dp_get_peer_vdev_roaming_in_progress(struct dp_peer *peer); 5936 5937 #endif /* #ifndef _DP_INTERNAL_H_ */ 5938