1 /* 2 * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved. 3 * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved. 4 * 5 * Permission to use, copy, modify, and/or distribute this software for 6 * any purpose with or without fee is hereby granted, provided that the 7 * above copyright notice and this permission notice appear in all 8 * copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 11 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 12 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 13 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 14 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 15 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 16 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 17 * PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20 #include <wlan_ipa_obj_mgmt_api.h> 21 #include <qdf_types.h> 22 #include <qdf_lock.h> 23 #include <qdf_net_types.h> 24 #include <qdf_lro.h> 25 #include <qdf_module.h> 26 #include <hal_hw_headers.h> 27 #include <hal_api.h> 28 #include <hif.h> 29 #include <htt.h> 30 #include <wdi_event.h> 31 #include <queue.h> 32 #include "dp_types.h" 33 #include "dp_rings.h" 34 #include "dp_internal.h" 35 #include "dp_tx.h" 36 #include "dp_tx_desc.h" 37 #include "dp_rx.h" 38 #ifdef DP_RATETABLE_SUPPORT 39 #include "dp_ratetable.h" 40 #endif 41 #include <cdp_txrx_handle.h> 42 #include <wlan_cfg.h> 43 #include <wlan_utility.h> 44 #include "cdp_txrx_cmn_struct.h" 45 #include "cdp_txrx_stats_struct.h" 46 #include "cdp_txrx_cmn_reg.h" 47 #include <qdf_util.h> 48 #include "dp_peer.h" 49 #include "htt_stats.h" 50 #include "dp_htt.h" 51 #ifdef WLAN_SUPPORT_RX_FISA 52 #include <wlan_dp_fisa_rx.h> 53 #endif 54 #include "htt_ppdu_stats.h" 55 #include "qdf_mem.h" /* qdf_mem_malloc,free */ 56 #include "cfg_ucfg_api.h" 57 #include <wlan_module_ids.h> 58 #ifdef QCA_MULTIPASS_SUPPORT 59 #include <enet.h> 60 #endif 61 62 #ifdef QCA_LL_TX_FLOW_CONTROL_V2 63 #include "cdp_txrx_flow_ctrl_v2.h" 64 #else 65 66 static inline void 67 cdp_dump_flow_pool_info(struct cdp_soc_t *soc) 68 { 69 return; 70 } 71 #endif 72 #ifdef WIFI_MONITOR_SUPPORT 73 #include <dp_mon.h> 74 #endif 75 #include "dp_ipa.h" 76 #ifdef FEATURE_WDS 77 #include "dp_txrx_wds.h" 78 #endif 79 #ifdef WLAN_SUPPORT_MSCS 80 #include "dp_mscs.h" 81 #endif 82 #ifdef WLAN_SUPPORT_MESH_LATENCY 83 #include "dp_mesh_latency.h" 84 #endif 85 #ifdef WLAN_SUPPORT_SCS 86 #include "dp_scs.h" 87 #endif 88 #ifdef ATH_SUPPORT_IQUE 89 #include "dp_txrx_me.h" 90 #endif 91 #if defined(DP_CON_MON) 92 #ifndef REMOVE_PKT_LOG 93 #include <pktlog_ac_api.h> 94 #include <pktlog_ac.h> 95 #endif 96 #endif 97 #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR 98 #include <wlan_dp_swlm.h> 99 #endif 100 #ifdef WLAN_DP_PROFILE_SUPPORT 101 #include <wlan_dp_main.h> 102 #endif 103 #ifdef CONFIG_SAWF_DEF_QUEUES 104 #include "dp_sawf.h" 105 #endif 106 #ifdef WLAN_SUPPORT_RX_FLOW_TAG 107 #include "dp_rx_tag.h" 108 #endif 109 #ifdef WLAN_FEATURE_PEER_TXQ_FLUSH_CONF 110 #include <target_if_dp.h> 111 #endif 112 #include "qdf_ssr_driver_dump.h" 113 114 #ifdef WLAN_SUPPORT_DPDK 115 #include <dp_dpdk.h> 116 #endif 117 118 #ifdef QCA_DP_ENABLE_TX_COMP_RING4 119 #define TXCOMP_RING4_NUM 3 120 #else 121 #define TXCOMP_RING4_NUM WBM2SW_TXCOMP_RING4_NUM 122 #endif 123 124 #if defined(DP_PEER_EXTENDED_API) || defined(WLAN_DP_PENDING_MEM_FLUSH) 125 #define SET_PEER_REF_CNT_ONE(_peer) \ 126 qdf_atomic_set(&(_peer)->ref_cnt, 1) 127 #else 128 #define SET_PEER_REF_CNT_ONE(_peer) 129 #endif 130 131 #ifdef WLAN_SYSFS_DP_STATS 132 /* sysfs event wait time for firmware stat request unit milliseconds */ 133 #define WLAN_SYSFS_STAT_REQ_WAIT_MS 3000 134 #endif 135 136 #ifdef QCA_DP_TX_FW_METADATA_V2 137 #define DP_TX_TCL_METADATA_PDEV_ID_SET(_var, _val) \ 138 HTT_TX_TCL_METADATA_V2_PDEV_ID_SET(_var, _val) 139 #else 140 #define DP_TX_TCL_METADATA_PDEV_ID_SET(_var, _val) \ 141 HTT_TX_TCL_METADATA_PDEV_ID_SET(_var, _val) 142 #endif 143 #define MLD_MODE_INVALID 0xFF 144 145 QDF_COMPILE_TIME_ASSERT(max_rx_rings_check, 146 MAX_REO_DEST_RINGS == CDP_MAX_RX_RINGS); 147 148 QDF_COMPILE_TIME_ASSERT(max_tx_rings_check, 149 MAX_TCL_DATA_RINGS == CDP_MAX_TX_COMP_RINGS); 150 151 void dp_configure_arch_ops(struct dp_soc *soc); 152 qdf_size_t dp_get_soc_context_size(uint16_t device_id); 153 154 /* 155 * The max size of cdp_peer_stats_param_t is limited to 16 bytes. 156 * If the buffer size is exceeding this size limit, 157 * dp_txrx_get_peer_stats is to be used instead. 158 */ 159 QDF_COMPILE_TIME_ASSERT(cdp_peer_stats_param_t_max_size, 160 (sizeof(cdp_peer_stats_param_t) <= 16)); 161 162 #ifdef WLAN_FEATURE_DP_EVENT_HISTORY 163 /* 164 * If WLAN_CFG_INT_NUM_CONTEXTS is changed, HIF_NUM_INT_CONTEXTS 165 * also should be updated accordingly 166 */ 167 QDF_COMPILE_TIME_ASSERT(num_intr_grps, 168 HIF_NUM_INT_CONTEXTS == WLAN_CFG_INT_NUM_CONTEXTS); 169 170 /* 171 * HIF_EVENT_HIST_MAX should always be power of 2 172 */ 173 QDF_COMPILE_TIME_ASSERT(hif_event_history_size, 174 (HIF_EVENT_HIST_MAX & (HIF_EVENT_HIST_MAX - 1)) == 0); 175 #endif /* WLAN_FEATURE_DP_EVENT_HISTORY */ 176 177 /* 178 * If WLAN_CFG_INT_NUM_CONTEXTS is changed, 179 * WLAN_CFG_INT_NUM_CONTEXTS_MAX should also be updated 180 */ 181 QDF_COMPILE_TIME_ASSERT(wlan_cfg_num_int_ctxs, 182 WLAN_CFG_INT_NUM_CONTEXTS_MAX >= 183 WLAN_CFG_INT_NUM_CONTEXTS); 184 185 static void dp_soc_unset_qref_debug_list(struct dp_soc *soc); 186 static QDF_STATUS dp_sysfs_deinitialize_stats(struct dp_soc *soc_hdl); 187 static QDF_STATUS dp_sysfs_initialize_stats(struct dp_soc *soc_hdl); 188 189 static void dp_pdev_srng_deinit(struct dp_pdev *pdev); 190 static QDF_STATUS dp_pdev_srng_init(struct dp_pdev *pdev); 191 static void dp_pdev_srng_free(struct dp_pdev *pdev); 192 static QDF_STATUS dp_pdev_srng_alloc(struct dp_pdev *pdev); 193 194 static inline 195 QDF_STATUS dp_pdev_attach_wifi3(struct cdp_soc_t *txrx_soc, 196 struct cdp_pdev_attach_params *params); 197 198 static int dp_pdev_post_attach_wifi3(struct cdp_soc_t *psoc, uint8_t pdev_id); 199 200 static QDF_STATUS 201 dp_pdev_init_wifi3(struct cdp_soc_t *txrx_soc, 202 HTC_HANDLE htc_handle, 203 qdf_device_t qdf_osdev, 204 uint8_t pdev_id); 205 206 static QDF_STATUS 207 dp_pdev_deinit_wifi3(struct cdp_soc_t *psoc, uint8_t pdev_id, int force); 208 209 static void dp_soc_detach_wifi3(struct cdp_soc_t *txrx_soc); 210 static void dp_soc_deinit_wifi3(struct cdp_soc_t *txrx_soc); 211 212 static void dp_pdev_detach(struct cdp_pdev *txrx_pdev, int force); 213 static QDF_STATUS dp_pdev_detach_wifi3(struct cdp_soc_t *psoc, 214 uint8_t pdev_id, 215 int force); 216 static struct dp_soc * 217 dp_soc_attach(struct cdp_ctrl_objmgr_psoc *ctrl_psoc, 218 struct cdp_soc_attach_params *params); 219 static inline QDF_STATUS dp_peer_create_wifi3(struct cdp_soc_t *soc_hdl, 220 uint8_t vdev_id, 221 uint8_t *peer_mac_addr, 222 enum cdp_peer_type peer_type); 223 static QDF_STATUS dp_peer_delete_wifi3(struct cdp_soc_t *soc_hdl, 224 uint8_t vdev_id, 225 uint8_t *peer_mac, uint32_t bitmap, 226 enum cdp_peer_type peer_type); 227 static void dp_vdev_flush_peers(struct cdp_vdev *vdev_handle, 228 bool unmap_only, 229 bool mlo_peers_only); 230 #ifdef ENABLE_VERBOSE_DEBUG 231 bool is_dp_verbose_debug_enabled; 232 #endif 233 234 #if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE) 235 static bool dp_get_cfr_rcc(struct cdp_soc_t *soc_hdl, uint8_t pdev_id); 236 static void dp_set_cfr_rcc(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, 237 bool enable); 238 static inline void 239 dp_get_cfr_dbg_stats(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, 240 struct cdp_cfr_rcc_stats *cfr_rcc_stats); 241 static inline void 242 dp_clear_cfr_dbg_stats(struct cdp_soc_t *soc_hdl, uint8_t pdev_id); 243 #endif 244 245 #ifdef DP_UMAC_HW_RESET_SUPPORT 246 static QDF_STATUS dp_umac_reset_action_trigger_recovery(struct dp_soc *soc); 247 static QDF_STATUS dp_umac_reset_handle_pre_reset(struct dp_soc *soc); 248 static QDF_STATUS dp_umac_reset_handle_post_reset(struct dp_soc *soc); 249 static QDF_STATUS dp_umac_reset_handle_post_reset_complete(struct dp_soc *soc); 250 #endif 251 252 #define MON_VDEV_TIMER_INIT 0x1 253 #define MON_VDEV_TIMER_RUNNING 0x2 254 255 #define DP_MCS_LENGTH (6*MAX_MCS) 256 257 #define DP_CURR_FW_STATS_AVAIL 19 258 #define DP_HTT_DBG_EXT_STATS_MAX 256 259 #define DP_MAX_SLEEP_TIME 100 260 #ifndef QCA_WIFI_3_0_EMU 261 #define SUSPEND_DRAIN_WAIT 500 262 #else 263 #define SUSPEND_DRAIN_WAIT 3000 264 #endif 265 266 #ifdef IPA_OFFLOAD 267 /* Exclude IPA rings from the interrupt context */ 268 #define TX_RING_MASK_VAL 0xb 269 #define RX_RING_MASK_VAL 0x7 270 #else 271 #define TX_RING_MASK_VAL 0xF 272 #define RX_RING_MASK_VAL 0xF 273 #endif 274 275 #define STR_MAXLEN 64 276 277 #define RNG_ERR "SRNG setup failed for" 278 279 /** 280 * enum dp_stats_type - Select the type of statistics 281 * @STATS_FW: Firmware-based statistic 282 * @STATS_HOST: Host-based statistic 283 * @STATS_TYPE_MAX: maximum enumeration 284 */ 285 enum dp_stats_type { 286 STATS_FW = 0, 287 STATS_HOST = 1, 288 STATS_TYPE_MAX = 2, 289 }; 290 291 /** 292 * enum dp_fw_stats - General Firmware statistics options 293 * @TXRX_FW_STATS_INVALID: statistic is not available 294 */ 295 enum dp_fw_stats { 296 TXRX_FW_STATS_INVALID = -1, 297 }; 298 299 /* 300 * dp_stats_mapping_table - Firmware and Host statistics 301 * currently supported 302 */ 303 #ifndef WLAN_SOFTUMAC_SUPPORT 304 const int dp_stats_mapping_table[][STATS_TYPE_MAX] = { 305 {HTT_DBG_EXT_STATS_RESET, TXRX_HOST_STATS_INVALID}, 306 {HTT_DBG_EXT_STATS_PDEV_TX, TXRX_HOST_STATS_INVALID}, 307 {HTT_DBG_EXT_STATS_PDEV_RX, TXRX_HOST_STATS_INVALID}, 308 {HTT_DBG_EXT_STATS_PDEV_TX_HWQ, TXRX_HOST_STATS_INVALID}, 309 {HTT_DBG_EXT_STATS_PDEV_TX_SCHED, TXRX_HOST_STATS_INVALID}, 310 {HTT_DBG_EXT_STATS_PDEV_ERROR, TXRX_HOST_STATS_INVALID}, 311 {HTT_DBG_EXT_STATS_PDEV_TQM, TXRX_HOST_STATS_INVALID}, 312 {HTT_DBG_EXT_STATS_TQM_CMDQ, TXRX_HOST_STATS_INVALID}, 313 {HTT_DBG_EXT_STATS_TX_DE_INFO, TXRX_HOST_STATS_INVALID}, 314 {HTT_DBG_EXT_STATS_PDEV_TX_RATE, TXRX_HOST_STATS_INVALID}, 315 {HTT_DBG_EXT_STATS_PDEV_RX_RATE, TXRX_HOST_STATS_INVALID}, 316 {TXRX_FW_STATS_INVALID, TXRX_HOST_STATS_INVALID}, 317 {HTT_DBG_EXT_STATS_TX_SELFGEN_INFO, TXRX_HOST_STATS_INVALID}, 318 {HTT_DBG_EXT_STATS_TX_MU_HWQ, TXRX_HOST_STATS_INVALID}, 319 {HTT_DBG_EXT_STATS_RING_IF_INFO, TXRX_HOST_STATS_INVALID}, 320 {HTT_DBG_EXT_STATS_SRNG_INFO, TXRX_HOST_STATS_INVALID}, 321 {HTT_DBG_EXT_STATS_SFM_INFO, TXRX_HOST_STATS_INVALID}, 322 {HTT_DBG_EXT_STATS_PDEV_TX_MU, TXRX_HOST_STATS_INVALID}, 323 {HTT_DBG_EXT_STATS_ACTIVE_PEERS_LIST, TXRX_HOST_STATS_INVALID}, 324 /* Last ENUM for HTT FW STATS */ 325 {DP_HTT_DBG_EXT_STATS_MAX, TXRX_HOST_STATS_INVALID}, 326 {TXRX_FW_STATS_INVALID, TXRX_CLEAR_STATS}, 327 {TXRX_FW_STATS_INVALID, TXRX_RX_RATE_STATS}, 328 {TXRX_FW_STATS_INVALID, TXRX_TX_RATE_STATS}, 329 {TXRX_FW_STATS_INVALID, TXRX_TX_HOST_STATS}, 330 {TXRX_FW_STATS_INVALID, TXRX_RX_HOST_STATS}, 331 {TXRX_FW_STATS_INVALID, TXRX_AST_STATS}, 332 {TXRX_FW_STATS_INVALID, TXRX_SRNG_PTR_STATS}, 333 {TXRX_FW_STATS_INVALID, TXRX_RX_MON_STATS}, 334 {TXRX_FW_STATS_INVALID, TXRX_REO_QUEUE_STATS}, 335 {TXRX_FW_STATS_INVALID, TXRX_SOC_CFG_PARAMS}, 336 {TXRX_FW_STATS_INVALID, TXRX_PDEV_CFG_PARAMS}, 337 {TXRX_FW_STATS_INVALID, TXRX_NAPI_STATS}, 338 {TXRX_FW_STATS_INVALID, TXRX_SOC_INTERRUPT_STATS}, 339 {TXRX_FW_STATS_INVALID, TXRX_SOC_FSE_STATS}, 340 {TXRX_FW_STATS_INVALID, TXRX_HAL_REG_WRITE_STATS}, 341 {TXRX_FW_STATS_INVALID, TXRX_SOC_REO_HW_DESC_DUMP}, 342 {TXRX_FW_STATS_INVALID, TXRX_SOC_WBM_IDLE_HPTP_DUMP}, 343 {TXRX_FW_STATS_INVALID, TXRX_SRNG_USAGE_WM_STATS}, 344 {HTT_DBG_EXT_STATS_PDEV_RX_RATE_EXT, TXRX_HOST_STATS_INVALID}, 345 {HTT_DBG_EXT_STATS_TX_SOUNDING_INFO, TXRX_HOST_STATS_INVALID}, 346 {TXRX_FW_STATS_INVALID, TXRX_PEER_STATS}, 347 }; 348 #else 349 const int dp_stats_mapping_table[][STATS_TYPE_MAX] = { 350 {HTT_DBG_EXT_STATS_RESET, TXRX_HOST_STATS_INVALID}, 351 {HTT_DBG_EXT_STATS_PDEV_TX, TXRX_HOST_STATS_INVALID}, 352 {HTT_DBG_EXT_STATS_PDEV_RX, TXRX_HOST_STATS_INVALID}, 353 {HTT_DBG_EXT_STATS_PDEV_TX_HWQ, TXRX_HOST_STATS_INVALID}, 354 {HTT_DBG_EXT_STATS_PDEV_TX_SCHED, TXRX_HOST_STATS_INVALID}, 355 {HTT_DBG_EXT_STATS_PDEV_ERROR, TXRX_HOST_STATS_INVALID}, 356 {HTT_DBG_EXT_STATS_PDEV_TQM, TXRX_HOST_STATS_INVALID}, 357 {HTT_DBG_EXT_STATS_TQM_CMDQ, TXRX_HOST_STATS_INVALID}, 358 {HTT_DBG_EXT_STATS_TX_DE_INFO, TXRX_HOST_STATS_INVALID}, 359 {HTT_DBG_EXT_STATS_PDEV_TX_RATE, TXRX_HOST_STATS_INVALID}, 360 {HTT_DBG_EXT_STATS_PDEV_RX_RATE, TXRX_HOST_STATS_INVALID}, 361 {TXRX_FW_STATS_INVALID, TXRX_HOST_STATS_INVALID}, 362 {HTT_DBG_EXT_STATS_TX_SELFGEN_INFO, TXRX_HOST_STATS_INVALID}, 363 {HTT_DBG_EXT_STATS_TX_MU_HWQ, TXRX_HOST_STATS_INVALID}, 364 {HTT_DBG_EXT_STATS_RING_IF_INFO, TXRX_HOST_STATS_INVALID}, 365 {HTT_DBG_EXT_STATS_SRNG_INFO, TXRX_HOST_STATS_INVALID}, 366 {HTT_DBG_EXT_STATS_SFM_INFO, TXRX_HOST_STATS_INVALID}, 367 {HTT_DBG_EXT_STATS_PDEV_TX_MU, TXRX_HOST_STATS_INVALID}, 368 {HTT_DBG_EXT_STATS_ACTIVE_PEERS_LIST, TXRX_HOST_STATS_INVALID}, 369 /* Last ENUM for HTT FW STATS */ 370 {DP_HTT_DBG_EXT_STATS_MAX, TXRX_HOST_STATS_INVALID}, 371 {TXRX_FW_STATS_INVALID, TXRX_CLEAR_STATS}, 372 {TXRX_FW_STATS_INVALID, TXRX_RX_RATE_STATS}, 373 {TXRX_FW_STATS_INVALID, TXRX_TX_RATE_STATS}, 374 {TXRX_FW_STATS_INVALID, TXRX_TX_HOST_STATS}, 375 {TXRX_FW_STATS_INVALID, TXRX_RX_HOST_STATS}, 376 {TXRX_FW_STATS_INVALID, TXRX_AST_STATS}, 377 {TXRX_FW_STATS_INVALID, TXRX_SRNG_PTR_STATS}, 378 {TXRX_FW_STATS_INVALID, TXRX_RX_MON_STATS}, 379 {TXRX_FW_STATS_INVALID, TXRX_HOST_STATS_INVALID}, 380 {TXRX_FW_STATS_INVALID, TXRX_SOC_CFG_PARAMS}, 381 {TXRX_FW_STATS_INVALID, TXRX_PDEV_CFG_PARAMS}, 382 {TXRX_FW_STATS_INVALID, TXRX_NAPI_STATS}, 383 {TXRX_FW_STATS_INVALID, TXRX_SOC_INTERRUPT_STATS}, 384 {TXRX_FW_STATS_INVALID, TXRX_SOC_FSE_STATS}, 385 {TXRX_FW_STATS_INVALID, TXRX_HAL_REG_WRITE_STATS}, 386 {TXRX_FW_STATS_INVALID, TXRX_HOST_STATS_INVALID}, 387 {TXRX_FW_STATS_INVALID, TXRX_HOST_STATS_INVALID}, 388 {TXRX_FW_STATS_INVALID, TXRX_HOST_STATS_INVALID}, 389 {HTT_DBG_EXT_STATS_PDEV_RX_RATE_EXT, TXRX_HOST_STATS_INVALID}, 390 {HTT_DBG_EXT_STATS_TX_SOUNDING_INFO, TXRX_HOST_STATS_INVALID} 391 }; 392 #endif 393 394 /* MCL specific functions */ 395 #if defined(DP_CON_MON) 396 397 #ifdef IPA_OFFLOAD 398 /** 399 * dp_get_num_rx_contexts() - get number of RX contexts 400 * @soc_hdl: cdp opaque soc handle 401 * 402 * Return: number of RX contexts 403 */ 404 static int dp_get_num_rx_contexts(struct cdp_soc_t *soc_hdl) 405 { 406 int num_rx_contexts; 407 uint32_t reo_ring_map; 408 struct dp_soc *soc = (struct dp_soc *)soc_hdl; 409 410 reo_ring_map = wlan_cfg_get_reo_rings_mapping(soc->wlan_cfg_ctx); 411 412 switch (soc->arch_id) { 413 case CDP_ARCH_TYPE_BE: 414 /* 2 REO rings are used for IPA */ 415 reo_ring_map &= ~(BIT(3) | BIT(7)); 416 417 break; 418 case CDP_ARCH_TYPE_LI: 419 /* 1 REO ring is used for IPA */ 420 reo_ring_map &= ~BIT(3); 421 break; 422 default: 423 dp_err("unknown arch_id 0x%x", soc->arch_id); 424 QDF_BUG(0); 425 } 426 /* 427 * qdf_get_hweight32 prefer over qdf_get_hweight8 in case map is scaled 428 * in future 429 */ 430 num_rx_contexts = qdf_get_hweight32(reo_ring_map); 431 432 return num_rx_contexts; 433 } 434 #else 435 #ifdef WLAN_SOFTUMAC_SUPPORT 436 static int dp_get_num_rx_contexts(struct cdp_soc_t *soc_hdl) 437 { 438 uint32_t rx_rings_config; 439 struct dp_soc *soc = (struct dp_soc *)soc_hdl; 440 441 rx_rings_config = wlan_cfg_get_rx_rings_mapping(soc->wlan_cfg_ctx); 442 /* 443 * qdf_get_hweight32 prefer over qdf_get_hweight8 in case map is scaled 444 * in future 445 */ 446 return qdf_get_hweight32(rx_rings_config); 447 } 448 #else 449 static int dp_get_num_rx_contexts(struct cdp_soc_t *soc_hdl) 450 { 451 int num_rx_contexts; 452 uint32_t reo_config; 453 struct dp_soc *soc = (struct dp_soc *)soc_hdl; 454 455 reo_config = wlan_cfg_get_reo_rings_mapping(soc->wlan_cfg_ctx); 456 /* 457 * qdf_get_hweight32 prefer over qdf_get_hweight8 in case map is scaled 458 * in future 459 */ 460 num_rx_contexts = qdf_get_hweight32(reo_config); 461 462 return num_rx_contexts; 463 } 464 #endif /* WLAN_SOFTUMAC_SUPPORT */ 465 #endif 466 467 #endif 468 469 #ifdef FEATURE_MEC 470 void dp_peer_mec_flush_entries(struct dp_soc *soc) 471 { 472 unsigned int index; 473 struct dp_mec_entry *mecentry, *mecentry_next; 474 475 TAILQ_HEAD(, dp_mec_entry) free_list; 476 TAILQ_INIT(&free_list); 477 478 if (!soc->mec_hash.mask) 479 return; 480 481 if (!soc->mec_hash.bins) 482 return; 483 484 if (!qdf_atomic_read(&soc->mec_cnt)) 485 return; 486 487 qdf_spin_lock_bh(&soc->mec_lock); 488 for (index = 0; index <= soc->mec_hash.mask; index++) { 489 if (!TAILQ_EMPTY(&soc->mec_hash.bins[index])) { 490 TAILQ_FOREACH_SAFE(mecentry, &soc->mec_hash.bins[index], 491 hash_list_elem, mecentry_next) { 492 dp_peer_mec_detach_entry(soc, mecentry, &free_list); 493 } 494 } 495 } 496 qdf_spin_unlock_bh(&soc->mec_lock); 497 498 dp_peer_mec_free_list(soc, &free_list); 499 } 500 501 /** 502 * dp_print_mec_stats() - Dump MEC entries in table 503 * @soc: Datapath soc handle 504 * 505 * Return: none 506 */ 507 static void dp_print_mec_stats(struct dp_soc *soc) 508 { 509 int i; 510 uint32_t index; 511 struct dp_mec_entry *mecentry = NULL, *mec_list; 512 uint32_t num_entries = 0; 513 514 DP_PRINT_STATS("MEC Stats:"); 515 DP_PRINT_STATS(" Entries Added = %d", soc->stats.mec.added); 516 DP_PRINT_STATS(" Entries Deleted = %d", soc->stats.mec.deleted); 517 518 if (!qdf_atomic_read(&soc->mec_cnt)) 519 return; 520 521 mec_list = qdf_mem_malloc(sizeof(*mecentry) * DP_PEER_MAX_MEC_ENTRY); 522 if (!mec_list) { 523 dp_peer_warn("%pK: failed to allocate mec_list", soc); 524 return; 525 } 526 527 DP_PRINT_STATS("MEC Table:"); 528 for (index = 0; index <= soc->mec_hash.mask; index++) { 529 qdf_spin_lock_bh(&soc->mec_lock); 530 if (TAILQ_EMPTY(&soc->mec_hash.bins[index])) { 531 qdf_spin_unlock_bh(&soc->mec_lock); 532 continue; 533 } 534 535 TAILQ_FOREACH(mecentry, &soc->mec_hash.bins[index], 536 hash_list_elem) { 537 qdf_mem_copy(&mec_list[num_entries], mecentry, 538 sizeof(*mecentry)); 539 num_entries++; 540 } 541 qdf_spin_unlock_bh(&soc->mec_lock); 542 } 543 544 if (!num_entries) { 545 qdf_mem_free(mec_list); 546 return; 547 } 548 549 for (i = 0; i < num_entries; i++) { 550 DP_PRINT_STATS("%6d mac_addr = " QDF_MAC_ADDR_FMT 551 " is_active = %d pdev_id = %d vdev_id = %d", 552 i, 553 QDF_MAC_ADDR_REF(mec_list[i].mac_addr.raw), 554 mec_list[i].is_active, 555 mec_list[i].pdev_id, 556 mec_list[i].vdev_id); 557 } 558 qdf_mem_free(mec_list); 559 } 560 #else 561 static void dp_print_mec_stats(struct dp_soc *soc) 562 { 563 } 564 #endif 565 566 static int dp_peer_add_ast_wifi3(struct cdp_soc_t *soc_hdl, 567 uint8_t vdev_id, 568 uint8_t *peer_mac, 569 uint8_t *mac_addr, 570 enum cdp_txrx_ast_entry_type type, 571 uint32_t flags) 572 { 573 int ret = -1; 574 QDF_STATUS status = QDF_STATUS_SUCCESS; 575 struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc_hdl, 576 peer_mac, 0, vdev_id, 577 DP_MOD_ID_CDP); 578 579 if (!peer) { 580 dp_peer_debug("Peer is NULL!"); 581 return ret; 582 } 583 584 status = dp_peer_add_ast((struct dp_soc *)soc_hdl, 585 peer, 586 mac_addr, 587 type, 588 flags); 589 if ((status == QDF_STATUS_SUCCESS) || 590 (status == QDF_STATUS_E_ALREADY) || 591 (status == QDF_STATUS_E_AGAIN)) 592 ret = 0; 593 594 dp_hmwds_ast_add_notify(peer, mac_addr, 595 type, status, false); 596 597 dp_peer_unref_delete(peer, DP_MOD_ID_CDP); 598 599 return ret; 600 } 601 602 static int dp_peer_update_ast_wifi3(struct cdp_soc_t *soc_hdl, 603 uint8_t vdev_id, 604 uint8_t *peer_mac, 605 uint8_t *wds_macaddr, 606 uint32_t flags) 607 { 608 int status = -1; 609 struct dp_soc *soc = (struct dp_soc *)soc_hdl; 610 struct dp_ast_entry *ast_entry = NULL; 611 struct dp_peer *peer; 612 613 if (soc->ast_offload_support) 614 return status; 615 616 peer = dp_peer_find_hash_find((struct dp_soc *)soc_hdl, 617 peer_mac, 0, vdev_id, 618 DP_MOD_ID_CDP); 619 620 if (!peer) { 621 dp_peer_debug("Peer is NULL!"); 622 return status; 623 } 624 625 qdf_spin_lock_bh(&soc->ast_lock); 626 ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, wds_macaddr, 627 peer->vdev->pdev->pdev_id); 628 629 if (ast_entry) { 630 status = dp_peer_update_ast(soc, 631 peer, 632 ast_entry, flags); 633 } 634 qdf_spin_unlock_bh(&soc->ast_lock); 635 636 dp_peer_unref_delete(peer, DP_MOD_ID_CDP); 637 638 return status; 639 } 640 641 /** 642 * dp_peer_reset_ast_entries() - Deletes all HMWDS entries for a peer 643 * @soc: Datapath SOC handle 644 * @peer: DP peer 645 * @arg: callback argument 646 * 647 * Return: None 648 */ 649 static void 650 dp_peer_reset_ast_entries(struct dp_soc *soc, struct dp_peer *peer, void *arg) 651 { 652 struct dp_ast_entry *ast_entry = NULL; 653 struct dp_ast_entry *tmp_ast_entry; 654 655 DP_PEER_ITERATE_ASE_LIST(peer, ast_entry, tmp_ast_entry) { 656 if ((ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM) || 657 (ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM_SEC)) 658 dp_peer_del_ast(soc, ast_entry); 659 } 660 } 661 662 /** 663 * dp_wds_reset_ast_wifi3() - Reset the is_active param for ast entry 664 * @soc_hdl: Datapath SOC handle 665 * @wds_macaddr: WDS entry MAC Address 666 * @peer_mac_addr: WDS entry MAC Address 667 * @vdev_id: id of vdev handle 668 * 669 * Return: QDF_STATUS 670 */ 671 static QDF_STATUS dp_wds_reset_ast_wifi3(struct cdp_soc_t *soc_hdl, 672 uint8_t *wds_macaddr, 673 uint8_t *peer_mac_addr, 674 uint8_t vdev_id) 675 { 676 struct dp_soc *soc = (struct dp_soc *)soc_hdl; 677 struct dp_ast_entry *ast_entry = NULL; 678 struct dp_peer *peer; 679 struct dp_pdev *pdev; 680 struct dp_vdev *vdev; 681 682 if (soc->ast_offload_support) 683 return QDF_STATUS_E_FAILURE; 684 685 vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP); 686 687 if (!vdev) 688 return QDF_STATUS_E_FAILURE; 689 690 pdev = vdev->pdev; 691 692 if (peer_mac_addr) { 693 peer = dp_peer_find_hash_find(soc, peer_mac_addr, 694 0, vdev->vdev_id, 695 DP_MOD_ID_CDP); 696 if (!peer) { 697 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP); 698 return QDF_STATUS_E_FAILURE; 699 } 700 701 qdf_spin_lock_bh(&soc->ast_lock); 702 dp_peer_reset_ast_entries(soc, peer, NULL); 703 qdf_spin_unlock_bh(&soc->ast_lock); 704 dp_peer_unref_delete(peer, DP_MOD_ID_CDP); 705 } else if (wds_macaddr) { 706 qdf_spin_lock_bh(&soc->ast_lock); 707 ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, wds_macaddr, 708 pdev->pdev_id); 709 710 if (ast_entry) { 711 if ((ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM) || 712 (ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM_SEC)) 713 dp_peer_del_ast(soc, ast_entry); 714 } 715 qdf_spin_unlock_bh(&soc->ast_lock); 716 } 717 718 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP); 719 return QDF_STATUS_SUCCESS; 720 } 721 722 /** 723 * dp_wds_reset_ast_table_wifi3() - Reset the is_active param for all ast entry 724 * @soc_hdl: Datapath SOC handle 725 * @vdev_id: id of vdev object 726 * 727 * Return: QDF_STATUS 728 */ 729 static QDF_STATUS 730 dp_wds_reset_ast_table_wifi3(struct cdp_soc_t *soc_hdl, 731 uint8_t vdev_id) 732 { 733 struct dp_soc *soc = (struct dp_soc *) soc_hdl; 734 735 if (soc->ast_offload_support) 736 return QDF_STATUS_SUCCESS; 737 738 qdf_spin_lock_bh(&soc->ast_lock); 739 740 dp_soc_iterate_peer(soc, dp_peer_reset_ast_entries, NULL, 741 DP_MOD_ID_CDP); 742 qdf_spin_unlock_bh(&soc->ast_lock); 743 744 return QDF_STATUS_SUCCESS; 745 } 746 747 /** 748 * dp_peer_flush_ast_entries() - Delete all wds and hmwds ast entries of a peer 749 * @soc: Datapath SOC 750 * @peer: Datapath peer 751 * @arg: arg to callback 752 * 753 * Return: None 754 */ 755 static void 756 dp_peer_flush_ast_entries(struct dp_soc *soc, struct dp_peer *peer, void *arg) 757 { 758 struct dp_ast_entry *ase = NULL; 759 struct dp_ast_entry *temp_ase; 760 761 DP_PEER_ITERATE_ASE_LIST(peer, ase, temp_ase) { 762 if ((ase->type == 763 CDP_TXRX_AST_TYPE_STATIC) || 764 (ase->type == 765 CDP_TXRX_AST_TYPE_SELF) || 766 (ase->type == 767 CDP_TXRX_AST_TYPE_STA_BSS)) 768 continue; 769 dp_peer_del_ast(soc, ase); 770 } 771 } 772 773 /** 774 * dp_wds_flush_ast_table_wifi3() - Delete all wds and hmwds ast entry 775 * @soc_hdl: Datapath SOC handle 776 * 777 * Return: None 778 */ 779 static void dp_wds_flush_ast_table_wifi3(struct cdp_soc_t *soc_hdl) 780 { 781 struct dp_soc *soc = (struct dp_soc *) soc_hdl; 782 783 qdf_spin_lock_bh(&soc->ast_lock); 784 785 dp_soc_iterate_peer(soc, dp_peer_flush_ast_entries, NULL, 786 DP_MOD_ID_CDP); 787 788 qdf_spin_unlock_bh(&soc->ast_lock); 789 dp_peer_mec_flush_entries(soc); 790 } 791 792 #if defined(IPA_WDS_EASYMESH_FEATURE) && defined(FEATURE_AST) 793 /** 794 * dp_peer_send_wds_disconnect() - Send Disconnect event to IPA for each peer 795 * @soc: Datapath SOC 796 * @peer: Datapath peer 797 * 798 * Return: None 799 */ 800 static void 801 dp_peer_send_wds_disconnect(struct dp_soc *soc, struct dp_peer *peer) 802 { 803 struct dp_ast_entry *ase = NULL; 804 struct dp_ast_entry *temp_ase; 805 806 DP_PEER_ITERATE_ASE_LIST(peer, ase, temp_ase) { 807 if (ase->type == CDP_TXRX_AST_TYPE_WDS) { 808 soc->cdp_soc.ol_ops->peer_send_wds_disconnect(soc->ctrl_psoc, 809 ase->mac_addr.raw, 810 ase->vdev_id); 811 } 812 } 813 } 814 #elif defined(FEATURE_AST) 815 static void 816 dp_peer_send_wds_disconnect(struct dp_soc *soc, struct dp_peer *peer) 817 { 818 } 819 #endif 820 821 /** 822 * dp_peer_check_ast_offload() - check ast offload support is enable or not 823 * @soc: soc handle 824 * 825 * Return: false in case of IPA and true/false in IPQ case 826 * 827 */ 828 #if defined(IPA_OFFLOAD) && defined(QCA_WIFI_QCN9224) 829 static inline bool dp_peer_check_ast_offload(struct dp_soc *soc) 830 { 831 return false; 832 } 833 #else 834 static inline bool dp_peer_check_ast_offload(struct dp_soc *soc) 835 { 836 if (soc->ast_offload_support) 837 return true; 838 839 return false; 840 } 841 #endif 842 843 /** 844 * dp_peer_get_ast_info_by_soc_wifi3() - search the soc AST hash table 845 * and return ast entry information 846 * of first ast entry found in the 847 * table with given mac address 848 * @soc_hdl: data path soc handle 849 * @ast_mac_addr: AST entry mac address 850 * @ast_entry_info: ast entry information 851 * 852 * Return: true if ast entry found with ast_mac_addr 853 * false if ast entry not found 854 */ 855 static bool dp_peer_get_ast_info_by_soc_wifi3 856 (struct cdp_soc_t *soc_hdl, 857 uint8_t *ast_mac_addr, 858 struct cdp_ast_entry_info *ast_entry_info) 859 { 860 struct dp_ast_entry *ast_entry = NULL; 861 struct dp_soc *soc = (struct dp_soc *)soc_hdl; 862 struct dp_peer *peer = NULL; 863 864 if (dp_peer_check_ast_offload(soc)) 865 return false; 866 867 qdf_spin_lock_bh(&soc->ast_lock); 868 869 ast_entry = dp_peer_ast_hash_find_soc(soc, ast_mac_addr); 870 if ((!ast_entry) || 871 (ast_entry->delete_in_progress && !ast_entry->callback)) { 872 qdf_spin_unlock_bh(&soc->ast_lock); 873 return false; 874 } 875 876 peer = dp_peer_get_ref_by_id(soc, ast_entry->peer_id, 877 DP_MOD_ID_AST); 878 if (!peer) { 879 qdf_spin_unlock_bh(&soc->ast_lock); 880 return false; 881 } 882 883 ast_entry_info->type = ast_entry->type; 884 ast_entry_info->pdev_id = ast_entry->pdev_id; 885 ast_entry_info->vdev_id = ast_entry->vdev_id; 886 ast_entry_info->peer_id = ast_entry->peer_id; 887 qdf_mem_copy(&ast_entry_info->peer_mac_addr[0], 888 &peer->mac_addr.raw[0], 889 QDF_MAC_ADDR_SIZE); 890 dp_peer_unref_delete(peer, DP_MOD_ID_AST); 891 qdf_spin_unlock_bh(&soc->ast_lock); 892 return true; 893 } 894 895 /** 896 * dp_peer_get_ast_info_by_pdevid_wifi3() - search the soc AST hash table 897 * and return ast entry information 898 * if mac address and pdev_id matches 899 * @soc_hdl: data path soc handle 900 * @ast_mac_addr: AST entry mac address 901 * @pdev_id: pdev_id 902 * @ast_entry_info: ast entry information 903 * 904 * Return: true if ast entry found with ast_mac_addr 905 * false if ast entry not found 906 */ 907 static bool dp_peer_get_ast_info_by_pdevid_wifi3 908 (struct cdp_soc_t *soc_hdl, 909 uint8_t *ast_mac_addr, 910 uint8_t pdev_id, 911 struct cdp_ast_entry_info *ast_entry_info) 912 { 913 struct dp_ast_entry *ast_entry; 914 struct dp_soc *soc = (struct dp_soc *)soc_hdl; 915 struct dp_peer *peer = NULL; 916 917 if (soc->ast_offload_support) 918 return false; 919 920 qdf_spin_lock_bh(&soc->ast_lock); 921 922 ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, ast_mac_addr, 923 pdev_id); 924 925 if ((!ast_entry) || 926 (ast_entry->delete_in_progress && !ast_entry->callback)) { 927 qdf_spin_unlock_bh(&soc->ast_lock); 928 return false; 929 } 930 931 peer = dp_peer_get_ref_by_id(soc, ast_entry->peer_id, 932 DP_MOD_ID_AST); 933 if (!peer) { 934 qdf_spin_unlock_bh(&soc->ast_lock); 935 return false; 936 } 937 938 ast_entry_info->type = ast_entry->type; 939 ast_entry_info->pdev_id = ast_entry->pdev_id; 940 ast_entry_info->vdev_id = ast_entry->vdev_id; 941 ast_entry_info->peer_id = ast_entry->peer_id; 942 qdf_mem_copy(&ast_entry_info->peer_mac_addr[0], 943 &peer->mac_addr.raw[0], 944 QDF_MAC_ADDR_SIZE); 945 dp_peer_unref_delete(peer, DP_MOD_ID_AST); 946 qdf_spin_unlock_bh(&soc->ast_lock); 947 return true; 948 } 949 950 /** 951 * dp_peer_ast_entry_del_by_soc() - delete the ast entry from soc AST hash table 952 * with given mac address 953 * @soc_handle: data path soc handle 954 * @mac_addr: AST entry mac address 955 * @callback: callback function to called on ast delete response from FW 956 * @cookie: argument to be passed to callback 957 * 958 * Return: QDF_STATUS_SUCCESS if ast entry found with ast_mac_addr and delete 959 * is sent 960 * QDF_STATUS_E_INVAL false if ast entry not found 961 */ 962 static QDF_STATUS dp_peer_ast_entry_del_by_soc(struct cdp_soc_t *soc_handle, 963 uint8_t *mac_addr, 964 txrx_ast_free_cb callback, 965 void *cookie) 966 967 { 968 struct dp_soc *soc = (struct dp_soc *)soc_handle; 969 struct dp_ast_entry *ast_entry = NULL; 970 txrx_ast_free_cb cb = NULL; 971 void *arg = NULL; 972 973 if (soc->ast_offload_support) 974 return -QDF_STATUS_E_INVAL; 975 976 qdf_spin_lock_bh(&soc->ast_lock); 977 ast_entry = dp_peer_ast_hash_find_soc(soc, mac_addr); 978 if (!ast_entry) { 979 qdf_spin_unlock_bh(&soc->ast_lock); 980 return -QDF_STATUS_E_INVAL; 981 } 982 983 if (ast_entry->callback) { 984 cb = ast_entry->callback; 985 arg = ast_entry->cookie; 986 } 987 988 ast_entry->callback = callback; 989 ast_entry->cookie = cookie; 990 991 /* 992 * if delete_in_progress is set AST delete is sent to target 993 * and host is waiting for response should not send delete 994 * again 995 */ 996 if (!ast_entry->delete_in_progress) 997 dp_peer_del_ast(soc, ast_entry); 998 999 qdf_spin_unlock_bh(&soc->ast_lock); 1000 if (cb) { 1001 cb(soc->ctrl_psoc, 1002 dp_soc_to_cdp_soc(soc), 1003 arg, 1004 CDP_TXRX_AST_DELETE_IN_PROGRESS); 1005 } 1006 return QDF_STATUS_SUCCESS; 1007 } 1008 1009 /** 1010 * dp_peer_ast_entry_del_by_pdev() - delete the ast entry from soc AST hash 1011 * table if mac address and pdev_id matches 1012 * @soc_handle: data path soc handle 1013 * @mac_addr: AST entry mac address 1014 * @pdev_id: pdev id 1015 * @callback: callback function to called on ast delete response from FW 1016 * @cookie: argument to be passed to callback 1017 * 1018 * Return: QDF_STATUS_SUCCESS if ast entry found with ast_mac_addr and delete 1019 * is sent 1020 * QDF_STATUS_E_INVAL false if ast entry not found 1021 */ 1022 1023 static QDF_STATUS dp_peer_ast_entry_del_by_pdev(struct cdp_soc_t *soc_handle, 1024 uint8_t *mac_addr, 1025 uint8_t pdev_id, 1026 txrx_ast_free_cb callback, 1027 void *cookie) 1028 1029 { 1030 struct dp_soc *soc = (struct dp_soc *)soc_handle; 1031 struct dp_ast_entry *ast_entry; 1032 txrx_ast_free_cb cb = NULL; 1033 void *arg = NULL; 1034 1035 if (soc->ast_offload_support) 1036 return -QDF_STATUS_E_INVAL; 1037 1038 qdf_spin_lock_bh(&soc->ast_lock); 1039 ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, mac_addr, pdev_id); 1040 1041 if (!ast_entry) { 1042 qdf_spin_unlock_bh(&soc->ast_lock); 1043 return -QDF_STATUS_E_INVAL; 1044 } 1045 1046 if (ast_entry->callback) { 1047 cb = ast_entry->callback; 1048 arg = ast_entry->cookie; 1049 } 1050 1051 ast_entry->callback = callback; 1052 ast_entry->cookie = cookie; 1053 1054 /* 1055 * if delete_in_progress is set AST delete is sent to target 1056 * and host is waiting for response should not sent delete 1057 * again 1058 */ 1059 if (!ast_entry->delete_in_progress) 1060 dp_peer_del_ast(soc, ast_entry); 1061 1062 qdf_spin_unlock_bh(&soc->ast_lock); 1063 1064 if (cb) { 1065 cb(soc->ctrl_psoc, 1066 dp_soc_to_cdp_soc(soc), 1067 arg, 1068 CDP_TXRX_AST_DELETE_IN_PROGRESS); 1069 } 1070 return QDF_STATUS_SUCCESS; 1071 } 1072 1073 /** 1074 * dp_peer_HMWDS_ast_entry_del() - delete the ast entry from soc AST hash 1075 * table if HMWDS rem-addr command is issued 1076 * 1077 * @soc_handle: data path soc handle 1078 * @vdev_id: vdev id 1079 * @wds_macaddr: AST entry mac address to delete 1080 * @type: cdp_txrx_ast_entry_type to send to FW 1081 * @delete_in_fw: flag to indicate AST entry deletion in FW 1082 * 1083 * Return: QDF_STATUS_SUCCESS if ast entry found with ast_mac_addr and delete 1084 * is sent 1085 * QDF_STATUS_E_INVAL false if ast entry not found 1086 */ 1087 static QDF_STATUS dp_peer_HMWDS_ast_entry_del(struct cdp_soc_t *soc_handle, 1088 uint8_t vdev_id, 1089 uint8_t *wds_macaddr, 1090 uint8_t type, 1091 uint8_t delete_in_fw) 1092 { 1093 struct dp_soc *soc = (struct dp_soc *)soc_handle; 1094 1095 if (soc->ast_offload_support) { 1096 dp_del_wds_entry_wrapper(soc, vdev_id, wds_macaddr, type, 1097 delete_in_fw); 1098 return QDF_STATUS_SUCCESS; 1099 } 1100 1101 return -QDF_STATUS_E_INVAL; 1102 } 1103 1104 #ifdef FEATURE_AST 1105 /** 1106 * dp_print_mlo_ast_stats() - Print AST stats for MLO peers 1107 * 1108 * @soc: core DP soc context 1109 * 1110 * Return: void 1111 */ 1112 static void dp_print_mlo_ast_stats(struct dp_soc *soc) 1113 { 1114 if (soc->arch_ops.print_mlo_ast_stats) 1115 soc->arch_ops.print_mlo_ast_stats(soc); 1116 } 1117 1118 void 1119 dp_print_peer_ast_entries(struct dp_soc *soc, struct dp_peer *peer, void *arg) 1120 { 1121 struct dp_ast_entry *ase, *tmp_ase; 1122 uint32_t num_entries = 0; 1123 char type[CDP_TXRX_AST_TYPE_MAX][10] = { 1124 "NONE", "STATIC", "SELF", "WDS", "HMWDS", "BSS", 1125 "DA", "HMWDS_SEC", "MLD"}; 1126 1127 DP_PEER_ITERATE_ASE_LIST(peer, ase, tmp_ase) { 1128 DP_PRINT_STATS("%6d mac_addr = "QDF_MAC_ADDR_FMT 1129 " peer_mac_addr = "QDF_MAC_ADDR_FMT 1130 " peer_id = %u" 1131 " type = %s" 1132 " next_hop = %d" 1133 " is_active = %d" 1134 " ast_idx = %d" 1135 " ast_hash = %d" 1136 " delete_in_progress = %d" 1137 " pdev_id = %d" 1138 " vdev_id = %d", 1139 ++num_entries, 1140 QDF_MAC_ADDR_REF(ase->mac_addr.raw), 1141 QDF_MAC_ADDR_REF(peer->mac_addr.raw), 1142 ase->peer_id, 1143 type[ase->type], 1144 ase->next_hop, 1145 ase->is_active, 1146 ase->ast_idx, 1147 ase->ast_hash_value, 1148 ase->delete_in_progress, 1149 ase->pdev_id, 1150 ase->vdev_id); 1151 } 1152 } 1153 1154 void dp_print_ast_stats(struct dp_soc *soc) 1155 { 1156 DP_PRINT_STATS("AST Stats:"); 1157 DP_PRINT_STATS(" Entries Added = %d", soc->stats.ast.added); 1158 DP_PRINT_STATS(" Entries Deleted = %d", soc->stats.ast.deleted); 1159 DP_PRINT_STATS(" Entries Agedout = %d", soc->stats.ast.aged_out); 1160 DP_PRINT_STATS(" Entries MAP ERR = %d", soc->stats.ast.map_err); 1161 DP_PRINT_STATS(" Entries Mismatch ERR = %d", 1162 soc->stats.ast.ast_mismatch); 1163 1164 DP_PRINT_STATS("AST Table:"); 1165 1166 qdf_spin_lock_bh(&soc->ast_lock); 1167 1168 dp_soc_iterate_peer(soc, dp_print_peer_ast_entries, NULL, 1169 DP_MOD_ID_GENERIC_STATS); 1170 1171 qdf_spin_unlock_bh(&soc->ast_lock); 1172 1173 dp_print_mlo_ast_stats(soc); 1174 } 1175 #else 1176 void dp_print_ast_stats(struct dp_soc *soc) 1177 { 1178 DP_PRINT_STATS("AST Stats not available.Enable FEATURE_AST"); 1179 return; 1180 } 1181 #endif 1182 1183 /** 1184 * dp_print_peer_info() - Dump peer info 1185 * @soc: Datapath soc handle 1186 * @peer: Datapath peer handle 1187 * @arg: argument to iter function 1188 * 1189 * Return: void 1190 */ 1191 static void 1192 dp_print_peer_info(struct dp_soc *soc, struct dp_peer *peer, void *arg) 1193 { 1194 struct dp_txrx_peer *txrx_peer = NULL; 1195 1196 txrx_peer = dp_get_txrx_peer(peer); 1197 if (!txrx_peer) 1198 return; 1199 1200 DP_PRINT_STATS(" peer id = %d" 1201 " peer_mac_addr = "QDF_MAC_ADDR_FMT 1202 " nawds_enabled = %d" 1203 " bss_peer = %d" 1204 " wds_enabled = %d" 1205 " tx_cap_enabled = %d" 1206 " rx_cap_enabled = %d", 1207 peer->peer_id, 1208 QDF_MAC_ADDR_REF(peer->mac_addr.raw), 1209 txrx_peer->nawds_enabled, 1210 txrx_peer->bss_peer, 1211 txrx_peer->wds_enabled, 1212 dp_monitor_is_tx_cap_enabled(peer), 1213 dp_monitor_is_rx_cap_enabled(peer)); 1214 } 1215 1216 /** 1217 * dp_print_peer_table() - Dump all Peer stats 1218 * @vdev: Datapath Vdev handle 1219 * 1220 * Return: void 1221 */ 1222 static void dp_print_peer_table(struct dp_vdev *vdev) 1223 { 1224 DP_PRINT_STATS("Dumping Peer Table Stats:"); 1225 dp_vdev_iterate_peer(vdev, dp_print_peer_info, NULL, 1226 DP_MOD_ID_GENERIC_STATS); 1227 } 1228 1229 #ifdef DP_MEM_PRE_ALLOC 1230 1231 void *dp_context_alloc_mem(struct dp_soc *soc, enum dp_ctxt_type ctxt_type, 1232 size_t ctxt_size) 1233 { 1234 void *ctxt_mem; 1235 1236 if (!soc->cdp_soc.ol_ops->dp_prealloc_get_context) { 1237 dp_warn("dp_prealloc_get_context null!"); 1238 goto dynamic_alloc; 1239 } 1240 1241 ctxt_mem = soc->cdp_soc.ol_ops->dp_prealloc_get_context(ctxt_type, 1242 ctxt_size); 1243 1244 if (ctxt_mem) 1245 goto end; 1246 1247 dynamic_alloc: 1248 dp_info("switch to dynamic-alloc for type %d, size %zu", 1249 ctxt_type, ctxt_size); 1250 ctxt_mem = qdf_mem_malloc(ctxt_size); 1251 end: 1252 return ctxt_mem; 1253 } 1254 1255 void dp_context_free_mem(struct dp_soc *soc, enum dp_ctxt_type ctxt_type, 1256 void *vaddr) 1257 { 1258 QDF_STATUS status; 1259 1260 if (soc->cdp_soc.ol_ops->dp_prealloc_put_context) { 1261 status = soc->cdp_soc.ol_ops->dp_prealloc_put_context( 1262 ctxt_type, 1263 vaddr); 1264 } else { 1265 dp_warn("dp_prealloc_put_context null!"); 1266 status = QDF_STATUS_E_NOSUPPORT; 1267 } 1268 1269 if (QDF_IS_STATUS_ERROR(status)) { 1270 dp_info("Context type %d not pre-allocated", ctxt_type); 1271 qdf_mem_free(vaddr); 1272 } 1273 } 1274 1275 static inline 1276 void *dp_srng_aligned_mem_alloc_consistent(struct dp_soc *soc, 1277 struct dp_srng *srng, 1278 uint32_t ring_type) 1279 { 1280 void *mem; 1281 1282 qdf_assert(!srng->is_mem_prealloc); 1283 1284 if (!soc->cdp_soc.ol_ops->dp_prealloc_get_consistent) { 1285 dp_warn("dp_prealloc_get_consistent is null!"); 1286 goto qdf; 1287 } 1288 1289 mem = 1290 soc->cdp_soc.ol_ops->dp_prealloc_get_consistent 1291 (&srng->alloc_size, 1292 &srng->base_vaddr_unaligned, 1293 &srng->base_paddr_unaligned, 1294 &srng->base_paddr_aligned, 1295 DP_RING_BASE_ALIGN, ring_type); 1296 1297 if (mem) { 1298 srng->is_mem_prealloc = true; 1299 goto end; 1300 } 1301 qdf: 1302 mem = qdf_aligned_mem_alloc_consistent(soc->osdev, &srng->alloc_size, 1303 &srng->base_vaddr_unaligned, 1304 &srng->base_paddr_unaligned, 1305 &srng->base_paddr_aligned, 1306 DP_RING_BASE_ALIGN); 1307 end: 1308 dp_info("%s memory %pK dp_srng %pK ring_type %d alloc_size %d num_entries %d", 1309 srng->is_mem_prealloc ? "pre-alloc" : "dynamic-alloc", mem, 1310 srng, ring_type, srng->alloc_size, srng->num_entries); 1311 return mem; 1312 } 1313 1314 static inline void dp_srng_mem_free_consistent(struct dp_soc *soc, 1315 struct dp_srng *srng) 1316 { 1317 if (srng->is_mem_prealloc) { 1318 if (!soc->cdp_soc.ol_ops->dp_prealloc_put_consistent) { 1319 dp_warn("dp_prealloc_put_consistent is null!"); 1320 QDF_BUG(0); 1321 return; 1322 } 1323 soc->cdp_soc.ol_ops->dp_prealloc_put_consistent 1324 (srng->alloc_size, 1325 srng->base_vaddr_unaligned, 1326 srng->base_paddr_unaligned); 1327 1328 } else { 1329 qdf_mem_free_consistent(soc->osdev, soc->osdev->dev, 1330 srng->alloc_size, 1331 srng->base_vaddr_unaligned, 1332 srng->base_paddr_unaligned, 0); 1333 } 1334 } 1335 1336 void dp_desc_multi_pages_mem_alloc(struct dp_soc *soc, 1337 enum qdf_dp_desc_type desc_type, 1338 struct qdf_mem_multi_page_t *pages, 1339 size_t element_size, 1340 uint32_t element_num, 1341 qdf_dma_context_t memctxt, 1342 bool cacheable) 1343 { 1344 if (!soc->cdp_soc.ol_ops->dp_get_multi_pages) { 1345 dp_warn("dp_get_multi_pages is null!"); 1346 goto qdf; 1347 } 1348 1349 pages->num_pages = 0; 1350 pages->is_mem_prealloc = 0; 1351 soc->cdp_soc.ol_ops->dp_get_multi_pages(desc_type, 1352 element_size, 1353 element_num, 1354 pages, 1355 cacheable); 1356 if (pages->num_pages) 1357 goto end; 1358 1359 qdf: 1360 qdf_mem_multi_pages_alloc(soc->osdev, pages, element_size, 1361 element_num, memctxt, cacheable); 1362 end: 1363 dp_info("%s desc_type %d element_size %d element_num %d cacheable %d", 1364 pages->is_mem_prealloc ? "pre-alloc" : "dynamic-alloc", 1365 desc_type, (int)element_size, element_num, cacheable); 1366 } 1367 1368 void dp_desc_multi_pages_mem_free(struct dp_soc *soc, 1369 enum qdf_dp_desc_type desc_type, 1370 struct qdf_mem_multi_page_t *pages, 1371 qdf_dma_context_t memctxt, 1372 bool cacheable) 1373 { 1374 if (pages->is_mem_prealloc) { 1375 if (!soc->cdp_soc.ol_ops->dp_put_multi_pages) { 1376 dp_warn("dp_put_multi_pages is null!"); 1377 QDF_BUG(0); 1378 return; 1379 } 1380 1381 soc->cdp_soc.ol_ops->dp_put_multi_pages(desc_type, pages); 1382 qdf_mem_zero(pages, sizeof(*pages)); 1383 } else { 1384 qdf_mem_multi_pages_free(soc->osdev, pages, 1385 memctxt, cacheable); 1386 } 1387 } 1388 1389 #else 1390 1391 static inline 1392 void *dp_srng_aligned_mem_alloc_consistent(struct dp_soc *soc, 1393 struct dp_srng *srng, 1394 uint32_t ring_type) 1395 1396 { 1397 void *mem; 1398 1399 mem = qdf_aligned_mem_alloc_consistent(soc->osdev, &srng->alloc_size, 1400 &srng->base_vaddr_unaligned, 1401 &srng->base_paddr_unaligned, 1402 &srng->base_paddr_aligned, 1403 DP_RING_BASE_ALIGN); 1404 if (mem) 1405 qdf_mem_set(srng->base_vaddr_unaligned, 0, srng->alloc_size); 1406 1407 return mem; 1408 } 1409 1410 static inline void dp_srng_mem_free_consistent(struct dp_soc *soc, 1411 struct dp_srng *srng) 1412 { 1413 qdf_mem_free_consistent(soc->osdev, soc->osdev->dev, 1414 srng->alloc_size, 1415 srng->base_vaddr_unaligned, 1416 srng->base_paddr_unaligned, 0); 1417 } 1418 1419 #endif /* DP_MEM_PRE_ALLOC */ 1420 1421 #ifdef QCA_SUPPORT_WDS_EXTENDED 1422 bool dp_vdev_is_wds_ext_enabled(struct dp_vdev *vdev) 1423 { 1424 return vdev->wds_ext_enabled; 1425 } 1426 #else 1427 bool dp_vdev_is_wds_ext_enabled(struct dp_vdev *vdev) 1428 { 1429 return false; 1430 } 1431 #endif 1432 1433 void dp_pdev_update_fast_rx_flag(struct dp_soc *soc, struct dp_pdev *pdev) 1434 { 1435 struct dp_vdev *vdev = NULL; 1436 uint8_t rx_fast_flag = true; 1437 1438 /* Check if protocol tagging enable */ 1439 if (pdev->is_rx_protocol_tagging_enabled) { 1440 rx_fast_flag = false; 1441 goto update_flag; 1442 } 1443 1444 qdf_spin_lock_bh(&pdev->vdev_list_lock); 1445 TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) { 1446 /* Check if any VDEV has NAWDS enabled */ 1447 if (vdev->nawds_enabled) { 1448 rx_fast_flag = false; 1449 break; 1450 } 1451 1452 /* Check if any VDEV has multipass enabled */ 1453 if (vdev->multipass_en) { 1454 rx_fast_flag = false; 1455 break; 1456 } 1457 1458 /* Check if any VDEV has mesh enabled */ 1459 if (vdev->mesh_vdev) { 1460 rx_fast_flag = false; 1461 break; 1462 } 1463 } 1464 qdf_spin_unlock_bh(&pdev->vdev_list_lock); 1465 1466 update_flag: 1467 dp_init_info("Updated Rx fast flag to %u", rx_fast_flag); 1468 pdev->rx_fast_flag = rx_fast_flag; 1469 } 1470 1471 void dp_soc_set_interrupt_mode(struct dp_soc *soc) 1472 { 1473 uint32_t msi_base_data, msi_vector_start; 1474 int msi_vector_count, ret; 1475 1476 soc->intr_mode = DP_INTR_INTEGRATED; 1477 1478 if (!(soc->wlan_cfg_ctx->napi_enabled) || 1479 (dp_is_monitor_mode_using_poll(soc) && 1480 soc->cdp_soc.ol_ops->get_con_mode && 1481 soc->cdp_soc.ol_ops->get_con_mode() == QDF_GLOBAL_MONITOR_MODE)) { 1482 soc->intr_mode = DP_INTR_POLL; 1483 } else { 1484 ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP", 1485 &msi_vector_count, 1486 &msi_base_data, 1487 &msi_vector_start); 1488 if (ret) 1489 return; 1490 1491 soc->intr_mode = DP_INTR_MSI; 1492 } 1493 } 1494 1495 static int dp_srng_calculate_msi_group(struct dp_soc *soc, 1496 enum hal_ring_type ring_type, 1497 int ring_num, 1498 int *reg_msi_grp_num, 1499 bool nf_irq_support, 1500 int *nf_msi_grp_num) 1501 { 1502 struct wlan_cfg_dp_soc_ctxt *cfg_ctx = soc->wlan_cfg_ctx; 1503 uint8_t *grp_mask, *nf_irq_mask = NULL; 1504 bool nf_irq_enabled = false; 1505 uint8_t wbm2_sw_rx_rel_ring_id; 1506 1507 switch (ring_type) { 1508 case WBM2SW_RELEASE: 1509 wbm2_sw_rx_rel_ring_id = 1510 wlan_cfg_get_rx_rel_ring_id(cfg_ctx); 1511 if (ring_num == wbm2_sw_rx_rel_ring_id) { 1512 /* dp_rx_wbm_err_process - soc->rx_rel_ring */ 1513 grp_mask = &cfg_ctx->int_rx_wbm_rel_ring_mask[0]; 1514 ring_num = 0; 1515 } else if (ring_num == WBM2_SW_PPE_REL_RING_ID) { 1516 grp_mask = &cfg_ctx->int_ppeds_wbm_release_ring_mask[0]; 1517 ring_num = 0; 1518 } else { /* dp_tx_comp_handler - soc->tx_comp_ring */ 1519 grp_mask = &soc->wlan_cfg_ctx->int_tx_ring_mask[0]; 1520 nf_irq_mask = dp_srng_get_near_full_irq_mask(soc, 1521 ring_type, 1522 ring_num); 1523 if (nf_irq_mask) 1524 nf_irq_enabled = true; 1525 1526 /* 1527 * Using ring 4 as 4th tx completion ring since ring 3 1528 * is Rx error ring 1529 */ 1530 if (ring_num == WBM2SW_TXCOMP_RING4_NUM) 1531 ring_num = TXCOMP_RING4_NUM; 1532 } 1533 break; 1534 1535 case REO_EXCEPTION: 1536 /* dp_rx_err_process - &soc->reo_exception_ring */ 1537 grp_mask = &soc->wlan_cfg_ctx->int_rx_err_ring_mask[0]; 1538 break; 1539 1540 case REO_DST: 1541 /* dp_rx_process - soc->reo_dest_ring */ 1542 grp_mask = &soc->wlan_cfg_ctx->int_rx_ring_mask[0]; 1543 nf_irq_mask = dp_srng_get_near_full_irq_mask(soc, ring_type, 1544 ring_num); 1545 if (nf_irq_mask) 1546 nf_irq_enabled = true; 1547 break; 1548 1549 case REO_STATUS: 1550 /* dp_reo_status_ring_handler - soc->reo_status_ring */ 1551 grp_mask = &soc->wlan_cfg_ctx->int_reo_status_ring_mask[0]; 1552 break; 1553 1554 /* dp_rx_mon_status_srng_process - pdev->rxdma_mon_status_ring*/ 1555 case RXDMA_MONITOR_STATUS: 1556 /* dp_rx_mon_dest_process - pdev->rxdma_mon_dst_ring */ 1557 case RXDMA_MONITOR_DST: 1558 /* dp_mon_process */ 1559 grp_mask = &soc->wlan_cfg_ctx->int_rx_mon_ring_mask[0]; 1560 break; 1561 case TX_MONITOR_DST: 1562 /* dp_tx_mon_process */ 1563 grp_mask = &soc->wlan_cfg_ctx->int_tx_mon_ring_mask[0]; 1564 break; 1565 case RXDMA_DST: 1566 /* dp_rxdma_err_process */ 1567 grp_mask = &soc->wlan_cfg_ctx->int_rxdma2host_ring_mask[0]; 1568 break; 1569 1570 case RXDMA_BUF: 1571 grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_ring_mask[0]; 1572 break; 1573 1574 case RXDMA_MONITOR_BUF: 1575 grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_mon_ring_mask[0]; 1576 break; 1577 1578 case TX_MONITOR_BUF: 1579 grp_mask = &soc->wlan_cfg_ctx->int_host2txmon_ring_mask[0]; 1580 break; 1581 1582 case REO2PPE: 1583 grp_mask = &soc->wlan_cfg_ctx->int_reo2ppe_ring_mask[0]; 1584 break; 1585 1586 case PPE2TCL: 1587 grp_mask = &soc->wlan_cfg_ctx->int_ppe2tcl_ring_mask[0]; 1588 break; 1589 1590 case TCL_DATA: 1591 /* CMD_CREDIT_RING is used as command in 8074 and credit in 9000 */ 1592 case TCL_CMD_CREDIT: 1593 case REO_CMD: 1594 case SW2WBM_RELEASE: 1595 case WBM_IDLE_LINK: 1596 /* normally empty SW_TO_HW rings */ 1597 return -QDF_STATUS_E_NOENT; 1598 break; 1599 1600 case TCL_STATUS: 1601 case REO_REINJECT: 1602 /* misc unused rings */ 1603 return -QDF_STATUS_E_NOENT; 1604 break; 1605 1606 case CE_SRC: 1607 case CE_DST: 1608 case CE_DST_STATUS: 1609 /* CE_rings - currently handled by hif */ 1610 default: 1611 return -QDF_STATUS_E_NOENT; 1612 break; 1613 } 1614 1615 *reg_msi_grp_num = dp_srng_find_ring_in_mask(ring_num, grp_mask); 1616 1617 if (nf_irq_support && nf_irq_enabled) { 1618 *nf_msi_grp_num = dp_srng_find_ring_in_mask(ring_num, 1619 nf_irq_mask); 1620 } 1621 1622 return QDF_STATUS_SUCCESS; 1623 } 1624 1625 #if defined(IPA_OFFLOAD) && defined(IPA_WDI3_VLAN_SUPPORT) 1626 static void 1627 dp_ipa_vlan_srng_msi_setup(struct hal_srng_params *ring_params, int ring_type, 1628 int ring_num) 1629 { 1630 if (wlan_ipa_is_vlan_enabled()) { 1631 if ((ring_type == REO_DST) && 1632 (ring_num == IPA_ALT_REO_DEST_RING_IDX)) { 1633 ring_params->msi_addr = 0; 1634 ring_params->msi_data = 0; 1635 ring_params->flags &= ~HAL_SRNG_MSI_INTR; 1636 } 1637 } 1638 } 1639 #else 1640 static inline void 1641 dp_ipa_vlan_srng_msi_setup(struct hal_srng_params *ring_params, int ring_type, 1642 int ring_num) 1643 { 1644 } 1645 #endif 1646 1647 void dp_srng_msi_setup(struct dp_soc *soc, struct dp_srng *srng, 1648 struct hal_srng_params *ring_params, 1649 int ring_type, int ring_num) 1650 { 1651 int reg_msi_grp_num; 1652 /* 1653 * nf_msi_grp_num needs to be initialized with negative value, 1654 * to avoid configuring near-full msi for WBM2SW3 ring 1655 */ 1656 int nf_msi_grp_num = -1; 1657 int msi_data_count; 1658 int ret; 1659 uint32_t msi_data_start, msi_irq_start, addr_low, addr_high; 1660 bool nf_irq_support; 1661 int vector; 1662 1663 ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP", 1664 &msi_data_count, &msi_data_start, 1665 &msi_irq_start); 1666 1667 if (ret) 1668 return; 1669 1670 nf_irq_support = hal_srng_is_near_full_irq_supported(soc->hal_soc, 1671 ring_type, 1672 ring_num); 1673 ret = dp_srng_calculate_msi_group(soc, ring_type, ring_num, 1674 ®_msi_grp_num, 1675 nf_irq_support, 1676 &nf_msi_grp_num); 1677 if (ret < 0) { 1678 dp_init_info("%pK: ring not part of an ext_group; ring_type: %d,ring_num %d", 1679 soc, ring_type, ring_num); 1680 ring_params->msi_addr = 0; 1681 ring_params->msi_data = 0; 1682 dp_srng_set_msi2_ring_params(soc, ring_params, 0, 0); 1683 return; 1684 } 1685 1686 if (reg_msi_grp_num < 0) { 1687 dp_init_info("%pK: ring not part of an ext_group; ring_type: %d,ring_num %d", 1688 soc, ring_type, ring_num); 1689 ring_params->msi_addr = 0; 1690 ring_params->msi_data = 0; 1691 goto configure_msi2; 1692 } 1693 1694 if (dp_is_msi_group_number_invalid(soc, reg_msi_grp_num, 1695 msi_data_count)) { 1696 dp_init_warn("%pK: 2 msi_groups will share an msi; msi_group_num %d", 1697 soc, reg_msi_grp_num); 1698 QDF_ASSERT(0); 1699 } 1700 1701 pld_get_msi_address(soc->osdev->dev, &addr_low, &addr_high); 1702 1703 ring_params->msi_addr = addr_low; 1704 ring_params->msi_addr |= (qdf_dma_addr_t)(((uint64_t)addr_high) << 32); 1705 ring_params->msi_data = (reg_msi_grp_num % msi_data_count) 1706 + msi_data_start; 1707 ring_params->flags |= HAL_SRNG_MSI_INTR; 1708 1709 dp_ipa_vlan_srng_msi_setup(ring_params, ring_type, ring_num); 1710 1711 dp_debug("ring type %u ring_num %u msi->data %u msi_addr %llx", 1712 ring_type, ring_num, ring_params->msi_data, 1713 (uint64_t)ring_params->msi_addr); 1714 1715 vector = msi_irq_start + (reg_msi_grp_num % msi_data_count); 1716 1717 /* 1718 * During umac reset ppeds interrupts free is not called. 1719 * Avoid registering interrupts again. 1720 * 1721 */ 1722 if (dp_check_umac_reset_in_progress(soc)) 1723 goto configure_msi2; 1724 1725 if (soc->arch_ops.dp_register_ppeds_interrupts) 1726 if (soc->arch_ops.dp_register_ppeds_interrupts(soc, srng, 1727 vector, 1728 ring_type, 1729 ring_num)) 1730 return; 1731 1732 configure_msi2: 1733 if (!nf_irq_support) { 1734 dp_srng_set_msi2_ring_params(soc, ring_params, 0, 0); 1735 return; 1736 } 1737 1738 dp_srng_msi2_setup(soc, ring_params, ring_type, ring_num, 1739 nf_msi_grp_num); 1740 } 1741 1742 #ifdef WLAN_DP_PER_RING_TYPE_CONFIG 1743 /** 1744 * dp_srng_configure_interrupt_thresholds() - Retrieve interrupt 1745 * threshold values from the wlan_srng_cfg table for each ring type 1746 * @soc: device handle 1747 * @ring_params: per ring specific parameters 1748 * @ring_type: Ring type 1749 * @ring_num: Ring number for a given ring type 1750 * @num_entries: number of entries to fill 1751 * 1752 * Fill the ring params with the interrupt threshold 1753 * configuration parameters available in the per ring type wlan_srng_cfg 1754 * table. 1755 * 1756 * Return: None 1757 */ 1758 void 1759 dp_srng_configure_interrupt_thresholds(struct dp_soc *soc, 1760 struct hal_srng_params *ring_params, 1761 int ring_type, int ring_num, 1762 int num_entries) 1763 { 1764 uint8_t wbm2_sw_rx_rel_ring_id; 1765 1766 wbm2_sw_rx_rel_ring_id = wlan_cfg_get_rx_rel_ring_id(soc->wlan_cfg_ctx); 1767 1768 if (ring_type == REO_DST) { 1769 ring_params->intr_timer_thres_us = 1770 wlan_cfg_get_int_timer_threshold_rx(soc->wlan_cfg_ctx); 1771 ring_params->intr_batch_cntr_thres_entries = 1772 wlan_cfg_get_int_batch_threshold_rx(soc->wlan_cfg_ctx); 1773 } else if (ring_type == WBM2SW_RELEASE && 1774 (ring_num == wbm2_sw_rx_rel_ring_id)) { 1775 ring_params->intr_timer_thres_us = 1776 wlan_cfg_get_int_timer_threshold_other(soc->wlan_cfg_ctx); 1777 ring_params->intr_batch_cntr_thres_entries = 1778 wlan_cfg_get_int_batch_threshold_other(soc->wlan_cfg_ctx); 1779 } else { 1780 ring_params->intr_timer_thres_us = 1781 soc->wlan_srng_cfg[ring_type].timer_threshold; 1782 ring_params->intr_batch_cntr_thres_entries = 1783 soc->wlan_srng_cfg[ring_type].batch_count_threshold; 1784 } 1785 ring_params->low_threshold = 1786 soc->wlan_srng_cfg[ring_type].low_threshold; 1787 if (ring_params->low_threshold) 1788 ring_params->flags |= HAL_SRNG_LOW_THRES_INTR_ENABLE; 1789 1790 dp_srng_configure_nf_interrupt_thresholds(soc, ring_params, ring_type); 1791 } 1792 #else 1793 void 1794 dp_srng_configure_interrupt_thresholds(struct dp_soc *soc, 1795 struct hal_srng_params *ring_params, 1796 int ring_type, int ring_num, 1797 int num_entries) 1798 { 1799 uint8_t wbm2_sw_rx_rel_ring_id; 1800 bool rx_refill_lt_disable; 1801 1802 wbm2_sw_rx_rel_ring_id = wlan_cfg_get_rx_rel_ring_id(soc->wlan_cfg_ctx); 1803 1804 if (ring_type == REO_DST || ring_type == REO2PPE) { 1805 ring_params->intr_timer_thres_us = 1806 wlan_cfg_get_int_timer_threshold_rx(soc->wlan_cfg_ctx); 1807 ring_params->intr_batch_cntr_thres_entries = 1808 wlan_cfg_get_int_batch_threshold_rx(soc->wlan_cfg_ctx); 1809 } else if (ring_type == WBM2SW_RELEASE && 1810 (ring_num < wbm2_sw_rx_rel_ring_id || 1811 ring_num == WBM2SW_TXCOMP_RING4_NUM || 1812 ring_num == WBM2_SW_PPE_REL_RING_ID)) { 1813 ring_params->intr_timer_thres_us = 1814 wlan_cfg_get_int_timer_threshold_tx(soc->wlan_cfg_ctx); 1815 ring_params->intr_batch_cntr_thres_entries = 1816 wlan_cfg_get_int_batch_threshold_tx(soc->wlan_cfg_ctx); 1817 } else if (ring_type == RXDMA_BUF) { 1818 rx_refill_lt_disable = 1819 wlan_cfg_get_dp_soc_rxdma_refill_lt_disable 1820 (soc->wlan_cfg_ctx); 1821 ring_params->intr_timer_thres_us = 1822 wlan_cfg_get_int_timer_threshold_rx(soc->wlan_cfg_ctx); 1823 1824 if (!rx_refill_lt_disable) { 1825 ring_params->low_threshold = num_entries >> 3; 1826 ring_params->flags |= HAL_SRNG_LOW_THRES_INTR_ENABLE; 1827 ring_params->intr_batch_cntr_thres_entries = 0; 1828 } 1829 } else { 1830 ring_params->intr_timer_thres_us = 1831 wlan_cfg_get_int_timer_threshold_other(soc->wlan_cfg_ctx); 1832 ring_params->intr_batch_cntr_thres_entries = 1833 wlan_cfg_get_int_batch_threshold_other(soc->wlan_cfg_ctx); 1834 } 1835 1836 /* These rings donot require interrupt to host. Make them zero */ 1837 switch (ring_type) { 1838 case REO_REINJECT: 1839 case REO_CMD: 1840 case TCL_DATA: 1841 case TCL_CMD_CREDIT: 1842 case TCL_STATUS: 1843 case WBM_IDLE_LINK: 1844 case SW2WBM_RELEASE: 1845 case SW2RXDMA_NEW: 1846 ring_params->intr_timer_thres_us = 0; 1847 ring_params->intr_batch_cntr_thres_entries = 0; 1848 break; 1849 case PPE2TCL: 1850 ring_params->intr_timer_thres_us = 1851 wlan_cfg_get_int_timer_threshold_ppe2tcl(soc->wlan_cfg_ctx); 1852 ring_params->intr_batch_cntr_thres_entries = 1853 wlan_cfg_get_int_batch_threshold_ppe2tcl(soc->wlan_cfg_ctx); 1854 break; 1855 case RXDMA_MONITOR_DST: 1856 ring_params->intr_timer_thres_us = 1857 wlan_cfg_get_int_timer_threshold_mon_dest(soc->wlan_cfg_ctx); 1858 ring_params->intr_batch_cntr_thres_entries = 1859 wlan_cfg_get_int_batch_threshold_mon_dest(soc->wlan_cfg_ctx); 1860 break; 1861 } 1862 1863 /* Enable low threshold interrupts for rx buffer rings (regular and 1864 * monitor buffer rings. 1865 * TODO: See if this is required for any other ring 1866 */ 1867 if ((ring_type == RXDMA_MONITOR_BUF) || 1868 (ring_type == RXDMA_MONITOR_STATUS || 1869 (ring_type == TX_MONITOR_BUF))) { 1870 /* TODO: Setting low threshold to 1/8th of ring size 1871 * see if this needs to be configurable 1872 */ 1873 ring_params->low_threshold = num_entries >> 3; 1874 ring_params->intr_timer_thres_us = 1875 wlan_cfg_get_int_timer_threshold_rx(soc->wlan_cfg_ctx); 1876 ring_params->flags |= HAL_SRNG_LOW_THRES_INTR_ENABLE; 1877 ring_params->intr_batch_cntr_thres_entries = 0; 1878 } 1879 1880 /* During initialisation monitor rings are only filled with 1881 * MON_BUF_MIN_ENTRIES entries. So low threshold needs to be set to 1882 * a value less than that. Low threshold value is reconfigured again 1883 * to 1/8th of the ring size when monitor vap is created. 1884 */ 1885 if (ring_type == RXDMA_MONITOR_BUF) 1886 ring_params->low_threshold = MON_BUF_MIN_ENTRIES >> 1; 1887 1888 /* In case of PCI chipsets, we dont have PPDU end interrupts, 1889 * so MONITOR STATUS ring is reaped by receiving MSI from srng. 1890 * Keep batch threshold as 8 so that interrupt is received for 1891 * every 4 packets in MONITOR_STATUS ring 1892 */ 1893 if ((ring_type == RXDMA_MONITOR_STATUS) && 1894 (soc->intr_mode == DP_INTR_MSI)) 1895 ring_params->intr_batch_cntr_thres_entries = 4; 1896 } 1897 #endif 1898 1899 static int dp_process_rxdma_dst_ring(struct dp_soc *soc, 1900 struct dp_intr *int_ctx, 1901 int mac_for_pdev, 1902 int total_budget) 1903 { 1904 uint32_t target_type; 1905 1906 target_type = hal_get_target_type(soc->hal_soc); 1907 if (target_type == TARGET_TYPE_QCN9160) 1908 return dp_monitor_process(soc, int_ctx, 1909 mac_for_pdev, total_budget); 1910 else 1911 return dp_rxdma_err_process(int_ctx, soc, mac_for_pdev, 1912 total_budget); 1913 } 1914 1915 /** 1916 * dp_process_lmac_rings() - Process LMAC rings 1917 * @int_ctx: interrupt context 1918 * @total_budget: budget of work which can be done 1919 * 1920 * Return: work done 1921 */ 1922 int dp_process_lmac_rings(struct dp_intr *int_ctx, int total_budget) 1923 { 1924 struct dp_intr_stats *intr_stats = &int_ctx->intr_stats; 1925 struct dp_soc *soc = int_ctx->soc; 1926 uint32_t remaining_quota = total_budget; 1927 struct dp_pdev *pdev = NULL; 1928 uint32_t work_done = 0; 1929 int budget = total_budget; 1930 int ring = 0; 1931 bool rx_refill_lt_disable; 1932 1933 rx_refill_lt_disable = 1934 wlan_cfg_get_dp_soc_rxdma_refill_lt_disable(soc->wlan_cfg_ctx); 1935 1936 /* Process LMAC interrupts */ 1937 for (ring = 0 ; ring < MAX_NUM_LMAC_HW; ring++) { 1938 int mac_for_pdev = ring; 1939 1940 pdev = dp_get_pdev_for_lmac_id(soc, mac_for_pdev); 1941 if (!pdev) 1942 continue; 1943 if (int_ctx->rx_mon_ring_mask & (1 << mac_for_pdev)) { 1944 work_done = dp_monitor_process(soc, int_ctx, 1945 mac_for_pdev, 1946 remaining_quota); 1947 if (work_done) 1948 intr_stats->num_rx_mon_ring_masks++; 1949 budget -= work_done; 1950 if (budget <= 0) 1951 goto budget_done; 1952 remaining_quota = budget; 1953 } 1954 1955 if (int_ctx->tx_mon_ring_mask & (1 << mac_for_pdev)) { 1956 work_done = dp_tx_mon_process(soc, int_ctx, 1957 mac_for_pdev, 1958 remaining_quota); 1959 if (work_done) 1960 intr_stats->num_tx_mon_ring_masks++; 1961 budget -= work_done; 1962 if (budget <= 0) 1963 goto budget_done; 1964 remaining_quota = budget; 1965 } 1966 1967 if (int_ctx->rxdma2host_ring_mask & 1968 (1 << mac_for_pdev)) { 1969 work_done = dp_process_rxdma_dst_ring(soc, int_ctx, 1970 mac_for_pdev, 1971 remaining_quota); 1972 if (work_done) 1973 intr_stats->num_rxdma2host_ring_masks++; 1974 budget -= work_done; 1975 if (budget <= 0) 1976 goto budget_done; 1977 remaining_quota = budget; 1978 } 1979 1980 if (int_ctx->host2rxdma_ring_mask & (1 << mac_for_pdev)) { 1981 struct dp_srng *rx_refill_buf_ring; 1982 struct rx_desc_pool *rx_desc_pool; 1983 1984 rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev]; 1985 if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx)) 1986 rx_refill_buf_ring = 1987 &soc->rx_refill_buf_ring[mac_for_pdev]; 1988 else 1989 rx_refill_buf_ring = 1990 &soc->rx_refill_buf_ring[pdev->lmac_id]; 1991 1992 intr_stats->num_host2rxdma_ring_masks++; 1993 1994 if (!rx_refill_lt_disable) 1995 dp_rx_buffers_lt_replenish_simple 1996 (soc, mac_for_pdev, 1997 rx_refill_buf_ring, 1998 rx_desc_pool, 1999 false); 2000 } 2001 } 2002 2003 if (int_ctx->host2rxdma_mon_ring_mask) 2004 dp_rx_mon_buf_refill(int_ctx); 2005 2006 if (int_ctx->host2txmon_ring_mask) 2007 dp_tx_mon_buf_refill(int_ctx); 2008 2009 budget_done: 2010 return total_budget - budget; 2011 } 2012 2013 uint32_t dp_service_srngs_wrapper(void *dp_ctx, uint32_t dp_budget, int cpu) 2014 { 2015 struct dp_intr *int_ctx = (struct dp_intr *)dp_ctx; 2016 struct dp_soc *soc = int_ctx->soc; 2017 2018 return soc->arch_ops.dp_service_srngs(dp_ctx, dp_budget, cpu); 2019 } 2020 2021 #ifdef QCA_SUPPORT_LEGACY_INTERRUPTS 2022 /** 2023 * dp_soc_interrupt_map_calculate_wifi3_pci_legacy() - 2024 * Calculate interrupt map for legacy interrupts 2025 * @soc: DP soc handle 2026 * @intr_ctx_num: Interrupt context number 2027 * @irq_id_map: IRQ map 2028 * @num_irq_r: Number of interrupts assigned for this context 2029 * 2030 * Return: void 2031 */ 2032 static void dp_soc_interrupt_map_calculate_wifi3_pci_legacy(struct dp_soc *soc, 2033 int intr_ctx_num, 2034 int *irq_id_map, 2035 int *num_irq_r) 2036 { 2037 int j; 2038 int num_irq = 0; 2039 int tx_mask = wlan_cfg_get_tx_ring_mask( 2040 soc->wlan_cfg_ctx, intr_ctx_num); 2041 int rx_mask = wlan_cfg_get_rx_ring_mask( 2042 soc->wlan_cfg_ctx, intr_ctx_num); 2043 int rx_mon_mask = wlan_cfg_get_rx_mon_ring_mask( 2044 soc->wlan_cfg_ctx, intr_ctx_num); 2045 int rx_err_ring_mask = wlan_cfg_get_rx_err_ring_mask( 2046 soc->wlan_cfg_ctx, intr_ctx_num); 2047 int rx_wbm_rel_ring_mask = wlan_cfg_get_rx_wbm_rel_ring_mask( 2048 soc->wlan_cfg_ctx, intr_ctx_num); 2049 int reo_status_ring_mask = wlan_cfg_get_reo_status_ring_mask( 2050 soc->wlan_cfg_ctx, intr_ctx_num); 2051 int rxdma2host_ring_mask = wlan_cfg_get_rxdma2host_ring_mask( 2052 soc->wlan_cfg_ctx, intr_ctx_num); 2053 int host2rxdma_ring_mask = wlan_cfg_get_host2rxdma_ring_mask( 2054 soc->wlan_cfg_ctx, intr_ctx_num); 2055 int host2rxdma_mon_ring_mask = wlan_cfg_get_host2rxdma_mon_ring_mask( 2056 soc->wlan_cfg_ctx, intr_ctx_num); 2057 int host2txmon_ring_mask = wlan_cfg_get_host2txmon_ring_mask( 2058 soc->wlan_cfg_ctx, intr_ctx_num); 2059 int txmon2host_mon_ring_mask = wlan_cfg_get_tx_mon_ring_mask( 2060 soc->wlan_cfg_ctx, intr_ctx_num); 2061 soc->intr_mode = DP_INTR_LEGACY_VIRTUAL_IRQ; 2062 for (j = 0; j < HIF_MAX_GRP_IRQ; j++) { 2063 if (tx_mask & (1 << j)) 2064 irq_id_map[num_irq++] = (wbm2sw0_release - j); 2065 if (rx_mask & (1 << j)) 2066 irq_id_map[num_irq++] = (reo2sw1_intr - j); 2067 if (rx_mon_mask & (1 << j)) 2068 irq_id_map[num_irq++] = (rxmon2sw_p0_dest0 - j); 2069 if (rx_err_ring_mask & (1 << j)) 2070 irq_id_map[num_irq++] = (reo2sw0_intr - j); 2071 if (rx_wbm_rel_ring_mask & (1 << j)) 2072 irq_id_map[num_irq++] = (wbm2sw5_release - j); 2073 if (reo_status_ring_mask & (1 << j)) 2074 irq_id_map[num_irq++] = (reo_status - j); 2075 if (rxdma2host_ring_mask & (1 << j)) 2076 irq_id_map[num_irq++] = (rxdma2sw_dst_ring0 - j); 2077 if (host2rxdma_ring_mask & (1 << j)) 2078 irq_id_map[num_irq++] = (sw2rxdma_0 - j); 2079 if (host2rxdma_mon_ring_mask & (1 << j)) 2080 irq_id_map[num_irq++] = (sw2rxmon_src_ring - j); 2081 if (host2txmon_ring_mask & (1 << j)) 2082 irq_id_map[num_irq++] = sw2txmon_src_ring; 2083 if (txmon2host_mon_ring_mask & (1 << j)) 2084 irq_id_map[num_irq++] = (txmon2sw_p0_dest0 - j); 2085 } 2086 *num_irq_r = num_irq; 2087 } 2088 #else 2089 static void dp_soc_interrupt_map_calculate_wifi3_pci_legacy(struct dp_soc *soc, 2090 int intr_ctx_num, 2091 int *irq_id_map, 2092 int *num_irq_r) 2093 { 2094 } 2095 #endif 2096 2097 static void 2098 dp_soc_interrupt_map_calculate_integrated(struct dp_soc *soc, int intr_ctx_num, 2099 int *irq_id_map, int *num_irq_r) 2100 { 2101 int j; 2102 int num_irq = 0; 2103 2104 int tx_mask = 2105 wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num); 2106 int rx_mask = 2107 wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num); 2108 int rx_mon_mask = 2109 wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num); 2110 int rx_err_ring_mask = wlan_cfg_get_rx_err_ring_mask( 2111 soc->wlan_cfg_ctx, intr_ctx_num); 2112 int rx_wbm_rel_ring_mask = wlan_cfg_get_rx_wbm_rel_ring_mask( 2113 soc->wlan_cfg_ctx, intr_ctx_num); 2114 int reo_status_ring_mask = wlan_cfg_get_reo_status_ring_mask( 2115 soc->wlan_cfg_ctx, intr_ctx_num); 2116 int rxdma2host_ring_mask = wlan_cfg_get_rxdma2host_ring_mask( 2117 soc->wlan_cfg_ctx, intr_ctx_num); 2118 int host2rxdma_ring_mask = wlan_cfg_get_host2rxdma_ring_mask( 2119 soc->wlan_cfg_ctx, intr_ctx_num); 2120 int host2rxdma_mon_ring_mask = wlan_cfg_get_host2rxdma_mon_ring_mask( 2121 soc->wlan_cfg_ctx, intr_ctx_num); 2122 int host2txmon_ring_mask = wlan_cfg_get_host2txmon_ring_mask( 2123 soc->wlan_cfg_ctx, intr_ctx_num); 2124 int txmon2host_mon_ring_mask = wlan_cfg_get_tx_mon_ring_mask( 2125 soc->wlan_cfg_ctx, intr_ctx_num); 2126 2127 soc->intr_mode = DP_INTR_INTEGRATED; 2128 2129 for (j = 0; j < HIF_MAX_GRP_IRQ; j++) { 2130 if (tx_mask & (1 << j)) { 2131 irq_id_map[num_irq++] = 2132 (wbm2host_tx_completions_ring1 - j); 2133 } 2134 2135 if (rx_mask & (1 << j)) { 2136 irq_id_map[num_irq++] = 2137 (reo2host_destination_ring1 - j); 2138 } 2139 2140 if (rxdma2host_ring_mask & (1 << j)) { 2141 irq_id_map[num_irq++] = 2142 rxdma2host_destination_ring_mac1 - j; 2143 } 2144 2145 if (host2rxdma_ring_mask & (1 << j)) { 2146 irq_id_map[num_irq++] = 2147 host2rxdma_host_buf_ring_mac1 - j; 2148 } 2149 2150 if (host2rxdma_mon_ring_mask & (1 << j)) { 2151 irq_id_map[num_irq++] = 2152 host2rxdma_monitor_ring1 - j; 2153 } 2154 2155 if (rx_mon_mask & (1 << j)) { 2156 irq_id_map[num_irq++] = 2157 ppdu_end_interrupts_mac1 - j; 2158 irq_id_map[num_irq++] = 2159 rxdma2host_monitor_status_ring_mac1 - j; 2160 irq_id_map[num_irq++] = 2161 rxdma2host_monitor_destination_mac1 - j; 2162 } 2163 2164 if (rx_wbm_rel_ring_mask & (1 << j)) 2165 irq_id_map[num_irq++] = wbm2host_rx_release; 2166 2167 if (rx_err_ring_mask & (1 << j)) 2168 irq_id_map[num_irq++] = reo2host_exception; 2169 2170 if (reo_status_ring_mask & (1 << j)) 2171 irq_id_map[num_irq++] = reo2host_status; 2172 2173 if (host2txmon_ring_mask & (1 << j)) 2174 irq_id_map[num_irq++] = host2tx_monitor_ring1; 2175 2176 if (txmon2host_mon_ring_mask & (1 << j)) { 2177 irq_id_map[num_irq++] = 2178 (txmon2host_monitor_destination_mac1 - j); 2179 } 2180 } 2181 *num_irq_r = num_irq; 2182 } 2183 2184 static void 2185 dp_soc_interrupt_map_calculate_msi(struct dp_soc *soc, int intr_ctx_num, 2186 int *irq_id_map, int *num_irq_r, 2187 int msi_vector_count, int msi_vector_start) 2188 { 2189 int tx_mask = wlan_cfg_get_tx_ring_mask( 2190 soc->wlan_cfg_ctx, intr_ctx_num); 2191 int rx_mask = wlan_cfg_get_rx_ring_mask( 2192 soc->wlan_cfg_ctx, intr_ctx_num); 2193 int rx_mon_mask = wlan_cfg_get_rx_mon_ring_mask( 2194 soc->wlan_cfg_ctx, intr_ctx_num); 2195 int tx_mon_mask = wlan_cfg_get_tx_mon_ring_mask( 2196 soc->wlan_cfg_ctx, intr_ctx_num); 2197 int rx_err_ring_mask = wlan_cfg_get_rx_err_ring_mask( 2198 soc->wlan_cfg_ctx, intr_ctx_num); 2199 int rx_wbm_rel_ring_mask = wlan_cfg_get_rx_wbm_rel_ring_mask( 2200 soc->wlan_cfg_ctx, intr_ctx_num); 2201 int reo_status_ring_mask = wlan_cfg_get_reo_status_ring_mask( 2202 soc->wlan_cfg_ctx, intr_ctx_num); 2203 int rxdma2host_ring_mask = wlan_cfg_get_rxdma2host_ring_mask( 2204 soc->wlan_cfg_ctx, intr_ctx_num); 2205 int host2rxdma_ring_mask = wlan_cfg_get_host2rxdma_ring_mask( 2206 soc->wlan_cfg_ctx, intr_ctx_num); 2207 int host2rxdma_mon_ring_mask = wlan_cfg_get_host2rxdma_mon_ring_mask( 2208 soc->wlan_cfg_ctx, intr_ctx_num); 2209 int rx_near_full_grp_1_mask = 2210 wlan_cfg_get_rx_near_full_grp_1_mask(soc->wlan_cfg_ctx, 2211 intr_ctx_num); 2212 int rx_near_full_grp_2_mask = 2213 wlan_cfg_get_rx_near_full_grp_2_mask(soc->wlan_cfg_ctx, 2214 intr_ctx_num); 2215 int tx_ring_near_full_mask = 2216 wlan_cfg_get_tx_ring_near_full_mask(soc->wlan_cfg_ctx, 2217 intr_ctx_num); 2218 2219 int host2txmon_ring_mask = 2220 wlan_cfg_get_host2txmon_ring_mask(soc->wlan_cfg_ctx, 2221 intr_ctx_num); 2222 unsigned int vector = 2223 (intr_ctx_num % msi_vector_count) + msi_vector_start; 2224 int num_irq = 0; 2225 2226 soc->intr_mode = DP_INTR_MSI; 2227 2228 if (tx_mask | rx_mask | rx_mon_mask | tx_mon_mask | rx_err_ring_mask | 2229 rx_wbm_rel_ring_mask | reo_status_ring_mask | rxdma2host_ring_mask | 2230 host2rxdma_ring_mask | host2rxdma_mon_ring_mask | 2231 rx_near_full_grp_1_mask | rx_near_full_grp_2_mask | 2232 tx_ring_near_full_mask | host2txmon_ring_mask) 2233 irq_id_map[num_irq++] = 2234 pld_get_msi_irq(soc->osdev->dev, vector); 2235 2236 *num_irq_r = num_irq; 2237 } 2238 2239 void dp_soc_interrupt_map_calculate(struct dp_soc *soc, int intr_ctx_num, 2240 int *irq_id_map, int *num_irq) 2241 { 2242 int msi_vector_count, ret; 2243 uint32_t msi_base_data, msi_vector_start; 2244 2245 if (pld_get_enable_intx(soc->osdev->dev)) { 2246 return dp_soc_interrupt_map_calculate_wifi3_pci_legacy(soc, 2247 intr_ctx_num, irq_id_map, num_irq); 2248 } 2249 2250 ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP", 2251 &msi_vector_count, 2252 &msi_base_data, 2253 &msi_vector_start); 2254 if (ret) 2255 return dp_soc_interrupt_map_calculate_integrated(soc, 2256 intr_ctx_num, irq_id_map, num_irq); 2257 2258 else 2259 dp_soc_interrupt_map_calculate_msi(soc, 2260 intr_ctx_num, irq_id_map, num_irq, 2261 msi_vector_count, msi_vector_start); 2262 } 2263 2264 void dp_srng_free(struct dp_soc *soc, struct dp_srng *srng) 2265 { 2266 if (srng->alloc_size && srng->base_vaddr_unaligned) { 2267 if (!srng->cached) { 2268 dp_srng_mem_free_consistent(soc, srng); 2269 } else { 2270 qdf_mem_free(srng->base_vaddr_unaligned); 2271 } 2272 srng->alloc_size = 0; 2273 srng->base_vaddr_unaligned = NULL; 2274 } 2275 srng->hal_srng = NULL; 2276 } 2277 2278 qdf_export_symbol(dp_srng_free); 2279 2280 QDF_STATUS dp_srng_init(struct dp_soc *soc, struct dp_srng *srng, int ring_type, 2281 int ring_num, int mac_id) 2282 { 2283 return soc->arch_ops.txrx_srng_init(soc, srng, ring_type, 2284 ring_num, mac_id); 2285 } 2286 2287 qdf_export_symbol(dp_srng_init); 2288 2289 QDF_STATUS dp_srng_alloc(struct dp_soc *soc, struct dp_srng *srng, 2290 int ring_type, uint32_t num_entries, 2291 bool cached) 2292 { 2293 hal_soc_handle_t hal_soc = soc->hal_soc; 2294 uint32_t entry_size = hal_srng_get_entrysize(hal_soc, ring_type); 2295 uint32_t max_entries = hal_srng_max_entries(hal_soc, ring_type); 2296 2297 if (srng->base_vaddr_unaligned) { 2298 dp_init_err("%pK: Ring type: %d, is already allocated", 2299 soc, ring_type); 2300 return QDF_STATUS_SUCCESS; 2301 } 2302 2303 num_entries = (num_entries > max_entries) ? max_entries : num_entries; 2304 srng->hal_srng = NULL; 2305 srng->alloc_size = num_entries * entry_size; 2306 srng->num_entries = num_entries; 2307 srng->cached = cached; 2308 2309 if (!cached) { 2310 srng->base_vaddr_aligned = 2311 dp_srng_aligned_mem_alloc_consistent(soc, 2312 srng, 2313 ring_type); 2314 } else { 2315 srng->base_vaddr_aligned = qdf_aligned_malloc( 2316 &srng->alloc_size, 2317 &srng->base_vaddr_unaligned, 2318 &srng->base_paddr_unaligned, 2319 &srng->base_paddr_aligned, 2320 DP_RING_BASE_ALIGN); 2321 } 2322 2323 if (!srng->base_vaddr_aligned) 2324 return QDF_STATUS_E_NOMEM; 2325 2326 return QDF_STATUS_SUCCESS; 2327 } 2328 2329 qdf_export_symbol(dp_srng_alloc); 2330 2331 void dp_srng_deinit(struct dp_soc *soc, struct dp_srng *srng, 2332 int ring_type, int ring_num) 2333 { 2334 if (!srng->hal_srng) { 2335 dp_init_err("%pK: Ring type: %d, num:%d not setup", 2336 soc, ring_type, ring_num); 2337 return; 2338 } 2339 2340 if (dp_check_umac_reset_in_progress(soc)) 2341 goto srng_cleanup; 2342 2343 if (soc->arch_ops.dp_free_ppeds_interrupts) 2344 soc->arch_ops.dp_free_ppeds_interrupts(soc, srng, ring_type, 2345 ring_num); 2346 2347 srng_cleanup: 2348 hal_srng_cleanup(soc->hal_soc, srng->hal_srng, 2349 dp_check_umac_reset_in_progress(soc)); 2350 srng->hal_srng = NULL; 2351 } 2352 2353 qdf_export_symbol(dp_srng_deinit); 2354 2355 /* TODO: Need this interface from HIF */ 2356 void *hif_get_hal_handle(struct hif_opaque_softc *hif_handle); 2357 2358 #ifdef WLAN_FEATURE_DP_EVENT_HISTORY 2359 int dp_srng_access_start(struct dp_intr *int_ctx, struct dp_soc *dp_soc, 2360 hal_ring_handle_t hal_ring_hdl) 2361 { 2362 hal_soc_handle_t hal_soc = dp_soc->hal_soc; 2363 uint32_t hp, tp; 2364 uint8_t ring_id; 2365 2366 if (!int_ctx) 2367 return dp_hal_srng_access_start(hal_soc, hal_ring_hdl); 2368 2369 hal_get_sw_hptp(hal_soc, hal_ring_hdl, &tp, &hp); 2370 ring_id = hal_srng_ring_id_get(hal_ring_hdl); 2371 2372 hif_record_event(dp_soc->hif_handle, int_ctx->dp_intr_id, 2373 ring_id, hp, tp, HIF_EVENT_SRNG_ACCESS_START); 2374 2375 return dp_hal_srng_access_start(hal_soc, hal_ring_hdl); 2376 } 2377 2378 void dp_srng_access_end(struct dp_intr *int_ctx, struct dp_soc *dp_soc, 2379 hal_ring_handle_t hal_ring_hdl) 2380 { 2381 hal_soc_handle_t hal_soc = dp_soc->hal_soc; 2382 uint32_t hp, tp; 2383 uint8_t ring_id; 2384 2385 if (!int_ctx) 2386 return dp_hal_srng_access_end(hal_soc, hal_ring_hdl); 2387 2388 hal_get_sw_hptp(hal_soc, hal_ring_hdl, &tp, &hp); 2389 ring_id = hal_srng_ring_id_get(hal_ring_hdl); 2390 2391 hif_record_event(dp_soc->hif_handle, int_ctx->dp_intr_id, 2392 ring_id, hp, tp, HIF_EVENT_SRNG_ACCESS_END); 2393 2394 return dp_hal_srng_access_end(hal_soc, hal_ring_hdl); 2395 } 2396 2397 static inline void dp_srng_record_timer_entry(struct dp_soc *dp_soc, 2398 uint8_t hist_group_id) 2399 { 2400 hif_record_event(dp_soc->hif_handle, hist_group_id, 2401 0, 0, 0, HIF_EVENT_TIMER_ENTRY); 2402 } 2403 2404 static inline void dp_srng_record_timer_exit(struct dp_soc *dp_soc, 2405 uint8_t hist_group_id) 2406 { 2407 hif_record_event(dp_soc->hif_handle, hist_group_id, 2408 0, 0, 0, HIF_EVENT_TIMER_EXIT); 2409 } 2410 #else 2411 2412 static inline void dp_srng_record_timer_entry(struct dp_soc *dp_soc, 2413 uint8_t hist_group_id) 2414 { 2415 } 2416 2417 static inline void dp_srng_record_timer_exit(struct dp_soc *dp_soc, 2418 uint8_t hist_group_id) 2419 { 2420 } 2421 2422 #endif /* WLAN_FEATURE_DP_EVENT_HISTORY */ 2423 2424 enum timer_yield_status 2425 dp_should_timer_irq_yield(struct dp_soc *soc, uint32_t work_done, 2426 uint64_t start_time) 2427 { 2428 uint64_t cur_time = qdf_get_log_timestamp(); 2429 2430 if (!work_done) 2431 return DP_TIMER_WORK_DONE; 2432 2433 if (cur_time - start_time > DP_MAX_TIMER_EXEC_TIME_TICKS) 2434 return DP_TIMER_TIME_EXHAUST; 2435 2436 return DP_TIMER_NO_YIELD; 2437 } 2438 2439 qdf_export_symbol(dp_should_timer_irq_yield); 2440 2441 void dp_interrupt_timer(void *arg) 2442 { 2443 struct dp_soc *soc = (struct dp_soc *) arg; 2444 struct dp_pdev *pdev = soc->pdev_list[0]; 2445 enum timer_yield_status yield = DP_TIMER_NO_YIELD; 2446 uint32_t work_done = 0, total_work_done = 0; 2447 int budget = 0xffff, i; 2448 uint32_t remaining_quota = budget; 2449 uint64_t start_time; 2450 uint32_t lmac_id = DP_MON_INVALID_LMAC_ID; 2451 uint8_t dp_intr_id = wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); 2452 uint32_t lmac_iter; 2453 int max_mac_rings = wlan_cfg_get_num_mac_rings(pdev->wlan_cfg_ctx); 2454 enum reg_wifi_band mon_band; 2455 int cpu = dp_srng_get_cpu(); 2456 2457 /* 2458 * this logic makes all data path interfacing rings (UMAC/LMAC) 2459 * and Monitor rings polling mode when NSS offload is disabled 2460 */ 2461 if (wlan_cfg_is_poll_mode_enabled(soc->wlan_cfg_ctx) && 2462 !wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx)) { 2463 if (qdf_atomic_read(&soc->cmn_init_done)) { 2464 for (i = 0; i < wlan_cfg_get_num_contexts( 2465 soc->wlan_cfg_ctx); i++) 2466 soc->arch_ops.dp_service_srngs(&soc->intr_ctx[i], 0xffff, 2467 cpu); 2468 2469 qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS); 2470 } 2471 return; 2472 } 2473 2474 if (!qdf_atomic_read(&soc->cmn_init_done)) 2475 return; 2476 2477 if (dp_monitor_is_chan_band_known(pdev)) { 2478 mon_band = dp_monitor_get_chan_band(pdev); 2479 lmac_id = pdev->ch_band_lmac_id_mapping[mon_band]; 2480 if (qdf_likely(lmac_id != DP_MON_INVALID_LMAC_ID)) { 2481 dp_intr_id = soc->mon_intr_id_lmac_map[lmac_id]; 2482 dp_srng_record_timer_entry(soc, dp_intr_id); 2483 } 2484 } 2485 2486 start_time = qdf_get_log_timestamp(); 2487 dp_update_num_mac_rings_for_dbs(soc, &max_mac_rings); 2488 2489 while (yield == DP_TIMER_NO_YIELD) { 2490 for (lmac_iter = 0; lmac_iter < max_mac_rings; lmac_iter++) { 2491 if (lmac_iter == lmac_id) 2492 work_done = dp_monitor_process(soc, 2493 &soc->intr_ctx[dp_intr_id], 2494 lmac_iter, remaining_quota); 2495 else 2496 work_done = 2497 dp_monitor_drop_packets_for_mac(pdev, 2498 lmac_iter, 2499 remaining_quota); 2500 if (work_done) { 2501 budget -= work_done; 2502 if (budget <= 0) { 2503 yield = DP_TIMER_WORK_EXHAUST; 2504 goto budget_done; 2505 } 2506 remaining_quota = budget; 2507 total_work_done += work_done; 2508 } 2509 } 2510 2511 yield = dp_should_timer_irq_yield(soc, total_work_done, 2512 start_time); 2513 total_work_done = 0; 2514 } 2515 2516 budget_done: 2517 if (yield == DP_TIMER_WORK_EXHAUST || 2518 yield == DP_TIMER_TIME_EXHAUST) 2519 qdf_timer_mod(&soc->int_timer, 1); 2520 else 2521 qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS); 2522 2523 if (lmac_id != DP_MON_INVALID_LMAC_ID) 2524 dp_srng_record_timer_exit(soc, dp_intr_id); 2525 } 2526 2527 /** 2528 * dp_soc_interrupt_detach_wrapper() - wrapper function for interrupt detach 2529 * @txrx_soc: DP SOC handle 2530 * 2531 * Return: None 2532 */ 2533 static void dp_soc_interrupt_detach_wrapper(struct cdp_soc_t *txrx_soc) 2534 { 2535 struct dp_soc *soc = (struct dp_soc *)txrx_soc; 2536 2537 return soc->arch_ops.dp_soc_interrupt_detach(txrx_soc); 2538 } 2539 2540 #if defined(DP_INTR_POLL_BOTH) 2541 /** 2542 * dp_soc_interrupt_attach_wrapper() - Register handlers for DP interrupts 2543 * @txrx_soc: DP SOC handle 2544 * 2545 * Call the appropriate attach function based on the mode of operation. 2546 * This is a WAR for enabling monitor mode. 2547 * 2548 * Return: 0 for success. nonzero for failure. 2549 */ 2550 static QDF_STATUS dp_soc_interrupt_attach_wrapper(struct cdp_soc_t *txrx_soc) 2551 { 2552 struct dp_soc *soc = (struct dp_soc *)txrx_soc; 2553 2554 if (!(soc->wlan_cfg_ctx->napi_enabled) || 2555 (dp_is_monitor_mode_using_poll(soc) && 2556 soc->cdp_soc.ol_ops->get_con_mode && 2557 soc->cdp_soc.ol_ops->get_con_mode() == 2558 QDF_GLOBAL_MONITOR_MODE)) { 2559 dp_info("Poll mode"); 2560 return soc->arch_ops.dp_soc_attach_poll(txrx_soc); 2561 } else { 2562 dp_info("Interrupt mode"); 2563 return soc->arch_ops.dp_soc_interrupt_attach(txrx_soc); 2564 } 2565 } 2566 #else 2567 #if defined(DP_INTR_POLL_BASED) && DP_INTR_POLL_BASED 2568 static QDF_STATUS dp_soc_interrupt_attach_wrapper(struct cdp_soc_t *txrx_soc) 2569 { 2570 struct dp_soc *soc = (struct dp_soc *)txrx_soc; 2571 2572 return soc->arch_ops.dp_soc_attach_poll(txrx_soc); 2573 } 2574 #else 2575 static QDF_STATUS dp_soc_interrupt_attach_wrapper(struct cdp_soc_t *txrx_soc) 2576 { 2577 struct dp_soc *soc = (struct dp_soc *)txrx_soc; 2578 2579 if (wlan_cfg_is_poll_mode_enabled(soc->wlan_cfg_ctx)) 2580 return soc->arch_ops.dp_soc_attach_poll(txrx_soc); 2581 else 2582 return soc->arch_ops.dp_soc_interrupt_attach(txrx_soc); 2583 } 2584 #endif 2585 #endif 2586 2587 void dp_link_desc_ring_replenish(struct dp_soc *soc, uint32_t mac_id) 2588 { 2589 uint32_t cookie = 0; 2590 uint32_t page_idx = 0; 2591 struct qdf_mem_multi_page_t *pages; 2592 struct qdf_mem_dma_page_t *dma_pages; 2593 uint32_t offset = 0; 2594 uint32_t count = 0; 2595 uint32_t desc_id = 0; 2596 void *desc_srng; 2597 int link_desc_size = hal_get_link_desc_size(soc->hal_soc); 2598 uint32_t *total_link_descs_addr; 2599 uint32_t total_link_descs; 2600 uint32_t scatter_buf_num; 2601 uint32_t num_entries_per_buf = 0; 2602 uint32_t rem_entries; 2603 uint32_t num_descs_per_page; 2604 uint32_t num_scatter_bufs = 0; 2605 uint8_t *scatter_buf_ptr; 2606 void *desc; 2607 2608 num_scatter_bufs = soc->num_scatter_bufs; 2609 2610 if (mac_id == WLAN_INVALID_PDEV_ID) { 2611 pages = &soc->link_desc_pages; 2612 total_link_descs = soc->total_link_descs; 2613 desc_srng = soc->wbm_idle_link_ring.hal_srng; 2614 } else { 2615 pages = dp_monitor_get_link_desc_pages(soc, mac_id); 2616 /* dp_monitor_get_link_desc_pages returns NULL only 2617 * if monitor SOC is NULL 2618 */ 2619 if (!pages) { 2620 dp_err("can not get link desc pages"); 2621 QDF_ASSERT(0); 2622 return; 2623 } 2624 total_link_descs_addr = 2625 dp_monitor_get_total_link_descs(soc, mac_id); 2626 total_link_descs = *total_link_descs_addr; 2627 desc_srng = dp_monitor_get_link_desc_ring(soc, mac_id); 2628 } 2629 2630 dma_pages = pages->dma_pages; 2631 do { 2632 qdf_mem_zero(dma_pages[page_idx].page_v_addr_start, 2633 pages->page_size); 2634 page_idx++; 2635 } while (page_idx < pages->num_pages); 2636 2637 if (desc_srng) { 2638 hal_srng_access_start_unlocked(soc->hal_soc, desc_srng); 2639 page_idx = 0; 2640 count = 0; 2641 offset = 0; 2642 2643 qdf_assert(pages->num_element_per_page != 0); 2644 while ((desc = hal_srng_src_get_next(soc->hal_soc, 2645 desc_srng)) && 2646 (count < total_link_descs)) { 2647 page_idx = count / pages->num_element_per_page; 2648 if (desc_id == pages->num_element_per_page) 2649 desc_id = 0; 2650 2651 offset = count % pages->num_element_per_page; 2652 cookie = LINK_DESC_COOKIE(desc_id, page_idx, 2653 soc->link_desc_id_start); 2654 2655 hal_set_link_desc_addr(soc->hal_soc, desc, cookie, 2656 dma_pages[page_idx].page_p_addr 2657 + (offset * link_desc_size), 2658 soc->idle_link_bm_id); 2659 count++; 2660 desc_id++; 2661 } 2662 hal_srng_access_end_unlocked(soc->hal_soc, desc_srng); 2663 } else { 2664 /* Populate idle list scatter buffers with link descriptor 2665 * pointers 2666 */ 2667 scatter_buf_num = 0; 2668 num_entries_per_buf = hal_idle_scatter_buf_num_entries( 2669 soc->hal_soc, 2670 soc->wbm_idle_scatter_buf_size); 2671 2672 scatter_buf_ptr = (uint8_t *)( 2673 soc->wbm_idle_scatter_buf_base_vaddr[scatter_buf_num]); 2674 rem_entries = num_entries_per_buf; 2675 page_idx = 0; count = 0; 2676 offset = 0; 2677 num_descs_per_page = pages->num_element_per_page; 2678 2679 qdf_assert(num_descs_per_page != 0); 2680 while (count < total_link_descs) { 2681 page_idx = count / num_descs_per_page; 2682 offset = count % num_descs_per_page; 2683 if (desc_id == pages->num_element_per_page) 2684 desc_id = 0; 2685 2686 cookie = LINK_DESC_COOKIE(desc_id, page_idx, 2687 soc->link_desc_id_start); 2688 hal_set_link_desc_addr(soc->hal_soc, 2689 (void *)scatter_buf_ptr, 2690 cookie, 2691 dma_pages[page_idx].page_p_addr + 2692 (offset * link_desc_size), 2693 soc->idle_link_bm_id); 2694 rem_entries--; 2695 if (rem_entries) { 2696 scatter_buf_ptr += link_desc_size; 2697 } else { 2698 rem_entries = num_entries_per_buf; 2699 scatter_buf_num++; 2700 if (scatter_buf_num >= num_scatter_bufs) { 2701 scatter_buf_num--; 2702 break; 2703 } 2704 2705 scatter_buf_ptr = (uint8_t *) 2706 (soc->wbm_idle_scatter_buf_base_vaddr[ 2707 scatter_buf_num]); 2708 } 2709 count++; 2710 desc_id++; 2711 } 2712 /* Setup link descriptor idle list in HW */ 2713 hal_setup_link_idle_list(soc->hal_soc, 2714 soc->wbm_idle_scatter_buf_base_paddr, 2715 soc->wbm_idle_scatter_buf_base_vaddr, 2716 num_scatter_bufs, soc->wbm_idle_scatter_buf_size, 2717 (uint32_t)(scatter_buf_ptr - 2718 (uint8_t *)(soc->wbm_idle_scatter_buf_base_vaddr[ 2719 scatter_buf_num])), total_link_descs); 2720 } 2721 } 2722 2723 qdf_export_symbol(dp_link_desc_ring_replenish); 2724 2725 /** 2726 * dp_soc_ppeds_stop() - Stop PPE DS processing 2727 * @soc_handle: DP SOC handle 2728 * 2729 * Return: none 2730 */ 2731 static void dp_soc_ppeds_stop(struct cdp_soc_t *soc_handle) 2732 { 2733 struct dp_soc *soc = (struct dp_soc *)soc_handle; 2734 2735 if (soc->arch_ops.txrx_soc_ppeds_stop) 2736 soc->arch_ops.txrx_soc_ppeds_stop(soc); 2737 } 2738 2739 #ifdef ENABLE_VERBOSE_DEBUG 2740 void dp_enable_verbose_debug(struct dp_soc *soc) 2741 { 2742 struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx; 2743 2744 soc_cfg_ctx = soc->wlan_cfg_ctx; 2745 2746 if (soc_cfg_ctx->per_pkt_trace & dp_verbose_debug_mask) 2747 is_dp_verbose_debug_enabled = true; 2748 2749 if (soc_cfg_ctx->per_pkt_trace & hal_verbose_debug_mask) 2750 hal_set_verbose_debug(true); 2751 else 2752 hal_set_verbose_debug(false); 2753 } 2754 #else 2755 void dp_enable_verbose_debug(struct dp_soc *soc) 2756 { 2757 } 2758 #endif 2759 2760 static QDF_STATUS dp_lro_hash_setup(struct dp_soc *soc, struct dp_pdev *pdev) 2761 { 2762 struct cdp_lro_hash_config lro_hash; 2763 QDF_STATUS status; 2764 2765 if (!wlan_cfg_is_lro_enabled(soc->wlan_cfg_ctx) && 2766 !wlan_cfg_is_gro_enabled(soc->wlan_cfg_ctx) && 2767 !wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx)) { 2768 dp_err("LRO, GRO and RX hash disabled"); 2769 return QDF_STATUS_E_FAILURE; 2770 } 2771 2772 qdf_mem_zero(&lro_hash, sizeof(lro_hash)); 2773 2774 if (wlan_cfg_is_lro_enabled(soc->wlan_cfg_ctx) || 2775 wlan_cfg_is_gro_enabled(soc->wlan_cfg_ctx)) { 2776 lro_hash.lro_enable = 1; 2777 lro_hash.tcp_flag = QDF_TCPHDR_ACK; 2778 lro_hash.tcp_flag_mask = QDF_TCPHDR_FIN | QDF_TCPHDR_SYN | 2779 QDF_TCPHDR_RST | QDF_TCPHDR_ACK | QDF_TCPHDR_URG | 2780 QDF_TCPHDR_ECE | QDF_TCPHDR_CWR; 2781 } 2782 2783 soc->arch_ops.get_rx_hash_key(soc, &lro_hash); 2784 2785 qdf_assert(soc->cdp_soc.ol_ops->lro_hash_config); 2786 2787 if (!soc->cdp_soc.ol_ops->lro_hash_config) { 2788 QDF_BUG(0); 2789 dp_err("lro_hash_config not configured"); 2790 return QDF_STATUS_E_FAILURE; 2791 } 2792 2793 status = soc->cdp_soc.ol_ops->lro_hash_config(soc->ctrl_psoc, 2794 pdev->pdev_id, 2795 &lro_hash); 2796 if (!QDF_IS_STATUS_SUCCESS(status)) { 2797 dp_err("failed to send lro_hash_config to FW %u", status); 2798 return status; 2799 } 2800 2801 dp_info("LRO CMD config: lro_enable: 0x%x tcp_flag 0x%x tcp_flag_mask 0x%x", 2802 lro_hash.lro_enable, lro_hash.tcp_flag, 2803 lro_hash.tcp_flag_mask); 2804 2805 dp_info("toeplitz_hash_ipv4:"); 2806 qdf_trace_hex_dump(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 2807 lro_hash.toeplitz_hash_ipv4, 2808 (sizeof(lro_hash.toeplitz_hash_ipv4[0]) * 2809 LRO_IPV4_SEED_ARR_SZ)); 2810 2811 dp_info("toeplitz_hash_ipv6:"); 2812 qdf_trace_hex_dump(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 2813 lro_hash.toeplitz_hash_ipv6, 2814 (sizeof(lro_hash.toeplitz_hash_ipv6[0]) * 2815 LRO_IPV6_SEED_ARR_SZ)); 2816 2817 return status; 2818 } 2819 2820 #if defined(WLAN_MAX_PDEVS) && (WLAN_MAX_PDEVS == 1) 2821 /** 2822 * dp_reap_timer_init() - initialize the reap timer 2823 * @soc: data path SoC handle 2824 * 2825 * Return: void 2826 */ 2827 static void dp_reap_timer_init(struct dp_soc *soc) 2828 { 2829 /* 2830 * Timer to reap rxdma status rings. 2831 * Needed until we enable ppdu end interrupts 2832 */ 2833 dp_monitor_reap_timer_init(soc); 2834 dp_monitor_vdev_timer_init(soc); 2835 } 2836 2837 /** 2838 * dp_reap_timer_deinit() - de-initialize the reap timer 2839 * @soc: data path SoC handle 2840 * 2841 * Return: void 2842 */ 2843 static void dp_reap_timer_deinit(struct dp_soc *soc) 2844 { 2845 dp_monitor_reap_timer_deinit(soc); 2846 } 2847 #else 2848 /* WIN use case */ 2849 static void dp_reap_timer_init(struct dp_soc *soc) 2850 { 2851 /* Configure LMAC rings in Polled mode */ 2852 if (soc->lmac_polled_mode) { 2853 /* 2854 * Timer to reap lmac rings. 2855 */ 2856 qdf_timer_init(soc->osdev, &soc->lmac_reap_timer, 2857 dp_service_lmac_rings, (void *)soc, 2858 QDF_TIMER_TYPE_WAKE_APPS); 2859 soc->lmac_timer_init = 1; 2860 qdf_timer_mod(&soc->lmac_reap_timer, DP_INTR_POLL_TIMER_MS); 2861 } 2862 } 2863 2864 static void dp_reap_timer_deinit(struct dp_soc *soc) 2865 { 2866 if (soc->lmac_timer_init) { 2867 qdf_timer_stop(&soc->lmac_reap_timer); 2868 qdf_timer_free(&soc->lmac_reap_timer); 2869 soc->lmac_timer_init = 0; 2870 } 2871 } 2872 #endif 2873 2874 #ifdef QCA_HOST2FW_RXBUF_RING 2875 /** 2876 * dp_rxdma_ring_alloc() - allocate the RXDMA rings 2877 * @soc: data path SoC handle 2878 * @pdev: Physical device handle 2879 * 2880 * Return: 0 - success, > 0 - failure 2881 */ 2882 static int dp_rxdma_ring_alloc(struct dp_soc *soc, struct dp_pdev *pdev) 2883 { 2884 struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx; 2885 int max_mac_rings; 2886 int i; 2887 int ring_size; 2888 2889 pdev_cfg_ctx = pdev->wlan_cfg_ctx; 2890 max_mac_rings = wlan_cfg_get_num_mac_rings(pdev_cfg_ctx); 2891 ring_size = wlan_cfg_get_rx_dma_buf_ring_size(pdev_cfg_ctx); 2892 2893 for (i = 0; i < max_mac_rings; i++) { 2894 dp_verbose_debug("pdev_id %d mac_id %d", pdev->pdev_id, i); 2895 if (dp_srng_alloc(soc, &pdev->rx_mac_buf_ring[i], 2896 RXDMA_BUF, ring_size, 0)) { 2897 dp_init_err("%pK: failed rx mac ring setup", soc); 2898 return QDF_STATUS_E_FAILURE; 2899 } 2900 } 2901 return QDF_STATUS_SUCCESS; 2902 } 2903 2904 /** 2905 * dp_rxdma_ring_setup() - configure the RXDMA rings 2906 * @soc: data path SoC handle 2907 * @pdev: Physical device handle 2908 * 2909 * Return: 0 - success, > 0 - failure 2910 */ 2911 static int dp_rxdma_ring_setup(struct dp_soc *soc, struct dp_pdev *pdev) 2912 { 2913 struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx; 2914 int max_mac_rings; 2915 int i; 2916 2917 pdev_cfg_ctx = pdev->wlan_cfg_ctx; 2918 max_mac_rings = wlan_cfg_get_num_mac_rings(pdev_cfg_ctx); 2919 2920 for (i = 0; i < max_mac_rings; i++) { 2921 dp_verbose_debug("pdev_id %d mac_id %d", pdev->pdev_id, i); 2922 if (dp_srng_init(soc, &pdev->rx_mac_buf_ring[i], 2923 RXDMA_BUF, 1, i)) { 2924 dp_init_err("%pK: failed rx mac ring setup", soc); 2925 return QDF_STATUS_E_FAILURE; 2926 } 2927 dp_ssr_dump_srng_register("rx_mac_buf_ring", 2928 &pdev->rx_mac_buf_ring[i], i); 2929 } 2930 return QDF_STATUS_SUCCESS; 2931 } 2932 2933 /** 2934 * dp_rxdma_ring_cleanup() - Deinit the RXDMA rings and reap timer 2935 * @soc: data path SoC handle 2936 * @pdev: Physical device handle 2937 * 2938 * Return: void 2939 */ 2940 static void dp_rxdma_ring_cleanup(struct dp_soc *soc, struct dp_pdev *pdev) 2941 { 2942 int i; 2943 2944 for (i = 0; i < MAX_RX_MAC_RINGS; i++) { 2945 dp_ssr_dump_srng_unregister("rx_mac_buf_ring", i); 2946 dp_srng_deinit(soc, &pdev->rx_mac_buf_ring[i], RXDMA_BUF, 1); 2947 } 2948 2949 dp_reap_timer_deinit(soc); 2950 } 2951 2952 /** 2953 * dp_rxdma_ring_free() - Free the RXDMA rings 2954 * @pdev: Physical device handle 2955 * 2956 * Return: void 2957 */ 2958 static void dp_rxdma_ring_free(struct dp_pdev *pdev) 2959 { 2960 int i; 2961 2962 for (i = 0; i < MAX_RX_MAC_RINGS; i++) 2963 dp_srng_free(pdev->soc, &pdev->rx_mac_buf_ring[i]); 2964 } 2965 2966 #else 2967 static int dp_rxdma_ring_alloc(struct dp_soc *soc, struct dp_pdev *pdev) 2968 { 2969 return QDF_STATUS_SUCCESS; 2970 } 2971 2972 static int dp_rxdma_ring_setup(struct dp_soc *soc, struct dp_pdev *pdev) 2973 { 2974 return QDF_STATUS_SUCCESS; 2975 } 2976 2977 static void dp_rxdma_ring_cleanup(struct dp_soc *soc, struct dp_pdev *pdev) 2978 { 2979 dp_reap_timer_deinit(soc); 2980 } 2981 2982 static void dp_rxdma_ring_free(struct dp_pdev *pdev) 2983 { 2984 } 2985 #endif 2986 2987 #ifdef IPA_OFFLOAD 2988 /** 2989 * dp_setup_ipa_rx_refill_buf_ring - Setup second Rx refill buffer ring 2990 * @soc: data path instance 2991 * @pdev: core txrx pdev context 2992 * 2993 * Return: QDF_STATUS_SUCCESS: success 2994 * QDF_STATUS_E_RESOURCES: Error return 2995 */ 2996 static int dp_setup_ipa_rx_refill_buf_ring(struct dp_soc *soc, 2997 struct dp_pdev *pdev) 2998 { 2999 struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx; 3000 int entries; 3001 3002 if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) { 3003 soc_cfg_ctx = soc->wlan_cfg_ctx; 3004 entries = 3005 wlan_cfg_get_dp_soc_rxdma_refill_ring_size(soc_cfg_ctx); 3006 3007 /* Setup second Rx refill buffer ring */ 3008 if (dp_srng_alloc(soc, &pdev->rx_refill_buf_ring2, RXDMA_BUF, 3009 entries, 0)) { 3010 dp_init_err("%pK: dp_srng_alloc failed second" 3011 "rx refill ring", soc); 3012 return QDF_STATUS_E_FAILURE; 3013 } 3014 } 3015 3016 return QDF_STATUS_SUCCESS; 3017 } 3018 3019 #ifdef IPA_WDI3_VLAN_SUPPORT 3020 static int dp_setup_ipa_rx_alt_refill_buf_ring(struct dp_soc *soc, 3021 struct dp_pdev *pdev) 3022 { 3023 struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx; 3024 int entries; 3025 3026 if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx) && 3027 wlan_ipa_is_vlan_enabled()) { 3028 soc_cfg_ctx = soc->wlan_cfg_ctx; 3029 entries = 3030 wlan_cfg_get_dp_soc_rxdma_refill_ring_size(soc_cfg_ctx); 3031 3032 /* Setup second Rx refill buffer ring */ 3033 if (dp_srng_alloc(soc, &pdev->rx_refill_buf_ring3, RXDMA_BUF, 3034 entries, 0)) { 3035 dp_init_err("%pK: alloc failed for 3rd rx refill ring", 3036 soc); 3037 return QDF_STATUS_E_FAILURE; 3038 } 3039 } 3040 3041 return QDF_STATUS_SUCCESS; 3042 } 3043 3044 static int dp_init_ipa_rx_alt_refill_buf_ring(struct dp_soc *soc, 3045 struct dp_pdev *pdev) 3046 { 3047 if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx) && 3048 wlan_ipa_is_vlan_enabled()) { 3049 if (dp_srng_init(soc, &pdev->rx_refill_buf_ring3, RXDMA_BUF, 3050 IPA_RX_ALT_REFILL_BUF_RING_IDX, 3051 pdev->pdev_id)) { 3052 dp_init_err("%pK: init failed for 3rd rx refill ring", 3053 soc); 3054 return QDF_STATUS_E_FAILURE; 3055 } 3056 } 3057 3058 return QDF_STATUS_SUCCESS; 3059 } 3060 3061 static void dp_deinit_ipa_rx_alt_refill_buf_ring(struct dp_soc *soc, 3062 struct dp_pdev *pdev) 3063 { 3064 if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx) && 3065 wlan_ipa_is_vlan_enabled()) 3066 dp_srng_deinit(soc, &pdev->rx_refill_buf_ring3, RXDMA_BUF, 0); 3067 } 3068 3069 static void dp_free_ipa_rx_alt_refill_buf_ring(struct dp_soc *soc, 3070 struct dp_pdev *pdev) 3071 { 3072 if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx) && 3073 wlan_ipa_is_vlan_enabled()) 3074 dp_srng_free(soc, &pdev->rx_refill_buf_ring3); 3075 } 3076 #else 3077 static int dp_setup_ipa_rx_alt_refill_buf_ring(struct dp_soc *soc, 3078 struct dp_pdev *pdev) 3079 { 3080 return QDF_STATUS_SUCCESS; 3081 } 3082 3083 static int dp_init_ipa_rx_alt_refill_buf_ring(struct dp_soc *soc, 3084 struct dp_pdev *pdev) 3085 { 3086 return QDF_STATUS_SUCCESS; 3087 } 3088 3089 static void dp_deinit_ipa_rx_alt_refill_buf_ring(struct dp_soc *soc, 3090 struct dp_pdev *pdev) 3091 { 3092 } 3093 3094 static void dp_free_ipa_rx_alt_refill_buf_ring(struct dp_soc *soc, 3095 struct dp_pdev *pdev) 3096 { 3097 } 3098 #endif 3099 3100 /** 3101 * dp_deinit_ipa_rx_refill_buf_ring - deinit second Rx refill buffer ring 3102 * @soc: data path instance 3103 * @pdev: core txrx pdev context 3104 * 3105 * Return: void 3106 */ 3107 static void dp_deinit_ipa_rx_refill_buf_ring(struct dp_soc *soc, 3108 struct dp_pdev *pdev) 3109 { 3110 if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) 3111 dp_srng_deinit(soc, &pdev->rx_refill_buf_ring2, RXDMA_BUF, 0); 3112 } 3113 3114 /** 3115 * dp_init_ipa_rx_refill_buf_ring - Init second Rx refill buffer ring 3116 * @soc: data path instance 3117 * @pdev: core txrx pdev context 3118 * 3119 * Return: QDF_STATUS_SUCCESS: success 3120 * QDF_STATUS_E_RESOURCES: Error return 3121 */ 3122 static int dp_init_ipa_rx_refill_buf_ring(struct dp_soc *soc, 3123 struct dp_pdev *pdev) 3124 { 3125 if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) { 3126 if (dp_srng_init(soc, &pdev->rx_refill_buf_ring2, RXDMA_BUF, 3127 IPA_RX_REFILL_BUF_RING_IDX, pdev->pdev_id)) { 3128 dp_init_err("%pK: dp_srng_init failed second" 3129 "rx refill ring", soc); 3130 return QDF_STATUS_E_FAILURE; 3131 } 3132 } 3133 3134 if (dp_init_ipa_rx_alt_refill_buf_ring(soc, pdev)) { 3135 dp_deinit_ipa_rx_refill_buf_ring(soc, pdev); 3136 return QDF_STATUS_E_FAILURE; 3137 } 3138 3139 return QDF_STATUS_SUCCESS; 3140 } 3141 3142 /** 3143 * dp_free_ipa_rx_refill_buf_ring - free second Rx refill buffer ring 3144 * @soc: data path instance 3145 * @pdev: core txrx pdev context 3146 * 3147 * Return: void 3148 */ 3149 static void dp_free_ipa_rx_refill_buf_ring(struct dp_soc *soc, 3150 struct dp_pdev *pdev) 3151 { 3152 if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) 3153 dp_srng_free(soc, &pdev->rx_refill_buf_ring2); 3154 } 3155 #else 3156 static int dp_setup_ipa_rx_refill_buf_ring(struct dp_soc *soc, 3157 struct dp_pdev *pdev) 3158 { 3159 return QDF_STATUS_SUCCESS; 3160 } 3161 3162 static int dp_init_ipa_rx_refill_buf_ring(struct dp_soc *soc, 3163 struct dp_pdev *pdev) 3164 { 3165 return QDF_STATUS_SUCCESS; 3166 } 3167 3168 static void dp_deinit_ipa_rx_refill_buf_ring(struct dp_soc *soc, 3169 struct dp_pdev *pdev) 3170 { 3171 } 3172 3173 static void dp_free_ipa_rx_refill_buf_ring(struct dp_soc *soc, 3174 struct dp_pdev *pdev) 3175 { 3176 } 3177 3178 static int dp_setup_ipa_rx_alt_refill_buf_ring(struct dp_soc *soc, 3179 struct dp_pdev *pdev) 3180 { 3181 return QDF_STATUS_SUCCESS; 3182 } 3183 3184 static void dp_deinit_ipa_rx_alt_refill_buf_ring(struct dp_soc *soc, 3185 struct dp_pdev *pdev) 3186 { 3187 } 3188 3189 static void dp_free_ipa_rx_alt_refill_buf_ring(struct dp_soc *soc, 3190 struct dp_pdev *pdev) 3191 { 3192 } 3193 #endif 3194 3195 #ifdef WLAN_FEATURE_DP_CFG_EVENT_HISTORY 3196 3197 /** 3198 * dp_soc_cfg_history_attach() - Allocate and attach datapath config events 3199 * history 3200 * @soc: DP soc handle 3201 * 3202 * Return: None 3203 */ 3204 static void dp_soc_cfg_history_attach(struct dp_soc *soc) 3205 { 3206 dp_soc_frag_history_attach(soc, &soc->cfg_event_history, 3207 DP_CFG_EVT_HIST_MAX_SLOTS, 3208 DP_CFG_EVT_HIST_PER_SLOT_MAX, 3209 sizeof(struct dp_cfg_event), 3210 true, DP_CFG_EVENT_HIST_TYPE); 3211 } 3212 3213 /** 3214 * dp_soc_cfg_history_detach() - Detach and free DP config events history 3215 * @soc: DP soc handle 3216 * 3217 * Return: none 3218 */ 3219 static void dp_soc_cfg_history_detach(struct dp_soc *soc) 3220 { 3221 dp_soc_frag_history_detach(soc, &soc->cfg_event_history, 3222 DP_CFG_EVT_HIST_MAX_SLOTS, 3223 true, DP_CFG_EVENT_HIST_TYPE); 3224 } 3225 3226 #else 3227 static void dp_soc_cfg_history_attach(struct dp_soc *soc) 3228 { 3229 } 3230 3231 static void dp_soc_cfg_history_detach(struct dp_soc *soc) 3232 { 3233 } 3234 #endif 3235 3236 #ifdef DP_TX_HW_DESC_HISTORY 3237 /** 3238 * dp_soc_tx_hw_desc_history_attach - Attach TX HW descriptor history 3239 * 3240 * @soc: DP soc handle 3241 * 3242 * Return: None 3243 */ 3244 static void dp_soc_tx_hw_desc_history_attach(struct dp_soc *soc) 3245 { 3246 dp_soc_frag_history_attach(soc, &soc->tx_hw_desc_history, 3247 DP_TX_HW_DESC_HIST_MAX_SLOTS, 3248 DP_TX_HW_DESC_HIST_PER_SLOT_MAX, 3249 sizeof(struct dp_tx_hw_desc_evt), 3250 true, DP_TX_HW_DESC_HIST_TYPE); 3251 } 3252 3253 static void dp_soc_tx_hw_desc_history_detach(struct dp_soc *soc) 3254 { 3255 dp_soc_frag_history_detach(soc, &soc->tx_hw_desc_history, 3256 DP_TX_HW_DESC_HIST_MAX_SLOTS, 3257 true, DP_TX_HW_DESC_HIST_TYPE); 3258 } 3259 3260 #else /* DP_TX_HW_DESC_HISTORY */ 3261 static inline void 3262 dp_soc_tx_hw_desc_history_attach(struct dp_soc *soc) 3263 { 3264 } 3265 3266 static inline void 3267 dp_soc_tx_hw_desc_history_detach(struct dp_soc *soc) 3268 { 3269 } 3270 #endif /* DP_TX_HW_DESC_HISTORY */ 3271 3272 #ifdef WLAN_FEATURE_DP_RX_RING_HISTORY 3273 #ifndef RX_DEFRAG_DO_NOT_REINJECT 3274 /** 3275 * dp_soc_rx_reinject_ring_history_attach - Attach the reo reinject ring 3276 * history. 3277 * @soc: DP soc handle 3278 * 3279 * Return: None 3280 */ 3281 static void dp_soc_rx_reinject_ring_history_attach(struct dp_soc *soc) 3282 { 3283 soc->rx_reinject_ring_history = 3284 dp_context_alloc_mem(soc, DP_RX_REINJECT_RING_HIST_TYPE, 3285 sizeof(struct dp_rx_reinject_history)); 3286 if (soc->rx_reinject_ring_history) 3287 qdf_atomic_init(&soc->rx_reinject_ring_history->index); 3288 } 3289 #else /* RX_DEFRAG_DO_NOT_REINJECT */ 3290 static inline void 3291 dp_soc_rx_reinject_ring_history_attach(struct dp_soc *soc) 3292 { 3293 } 3294 #endif /* RX_DEFRAG_DO_NOT_REINJECT */ 3295 3296 /** 3297 * dp_soc_rx_history_attach() - Attach the ring history record buffers 3298 * @soc: DP soc structure 3299 * 3300 * This function allocates the memory for recording the rx ring, rx error 3301 * ring and the reinject ring entries. There is no error returned in case 3302 * of allocation failure since the record function checks if the history is 3303 * initialized or not. We do not want to fail the driver load in case of 3304 * failure to allocate memory for debug history. 3305 * 3306 * Return: None 3307 */ 3308 static void dp_soc_rx_history_attach(struct dp_soc *soc) 3309 { 3310 int i; 3311 uint32_t rx_ring_hist_size; 3312 uint32_t rx_refill_ring_hist_size; 3313 3314 rx_ring_hist_size = sizeof(*soc->rx_ring_history[0]); 3315 rx_refill_ring_hist_size = sizeof(*soc->rx_refill_ring_history[0]); 3316 3317 for (i = 0; i < MAX_REO_DEST_RINGS; i++) { 3318 soc->rx_ring_history[i] = dp_context_alloc_mem( 3319 soc, DP_RX_RING_HIST_TYPE, rx_ring_hist_size); 3320 if (soc->rx_ring_history[i]) 3321 qdf_atomic_init(&soc->rx_ring_history[i]->index); 3322 } 3323 3324 soc->rx_err_ring_history = dp_context_alloc_mem( 3325 soc, DP_RX_ERR_RING_HIST_TYPE, rx_ring_hist_size); 3326 if (soc->rx_err_ring_history) 3327 qdf_atomic_init(&soc->rx_err_ring_history->index); 3328 3329 dp_soc_rx_reinject_ring_history_attach(soc); 3330 3331 for (i = 0; i < MAX_PDEV_CNT; i++) { 3332 soc->rx_refill_ring_history[i] = dp_context_alloc_mem( 3333 soc, 3334 DP_RX_REFILL_RING_HIST_TYPE, 3335 rx_refill_ring_hist_size); 3336 3337 if (soc->rx_refill_ring_history[i]) 3338 qdf_atomic_init(&soc->rx_refill_ring_history[i]->index); 3339 } 3340 } 3341 3342 static void dp_soc_rx_history_detach(struct dp_soc *soc) 3343 { 3344 int i; 3345 3346 for (i = 0; i < MAX_REO_DEST_RINGS; i++) 3347 dp_context_free_mem(soc, DP_RX_RING_HIST_TYPE, 3348 soc->rx_ring_history[i]); 3349 3350 dp_context_free_mem(soc, DP_RX_ERR_RING_HIST_TYPE, 3351 soc->rx_err_ring_history); 3352 3353 /* 3354 * No need for a featurized detach since qdf_mem_free takes 3355 * care of NULL pointer. 3356 */ 3357 dp_context_free_mem(soc, DP_RX_REINJECT_RING_HIST_TYPE, 3358 soc->rx_reinject_ring_history); 3359 3360 for (i = 0; i < MAX_PDEV_CNT; i++) 3361 dp_context_free_mem(soc, DP_RX_REFILL_RING_HIST_TYPE, 3362 soc->rx_refill_ring_history[i]); 3363 } 3364 3365 #else 3366 static inline void dp_soc_rx_history_attach(struct dp_soc *soc) 3367 { 3368 } 3369 3370 static inline void dp_soc_rx_history_detach(struct dp_soc *soc) 3371 { 3372 } 3373 #endif 3374 3375 #ifdef WLAN_FEATURE_DP_MON_STATUS_RING_HISTORY 3376 /** 3377 * dp_soc_mon_status_ring_history_attach() - Attach the monitor status 3378 * buffer record history. 3379 * @soc: DP soc handle 3380 * 3381 * This function allocates memory to track the event for a monitor 3382 * status buffer, before its parsed and freed. 3383 * 3384 * Return: None 3385 */ 3386 static void dp_soc_mon_status_ring_history_attach(struct dp_soc *soc) 3387 { 3388 soc->mon_status_ring_history = dp_context_alloc_mem(soc, 3389 DP_MON_STATUS_BUF_HIST_TYPE, 3390 sizeof(struct dp_mon_status_ring_history)); 3391 if (!soc->mon_status_ring_history) { 3392 dp_err("Failed to alloc memory for mon status ring history"); 3393 return; 3394 } 3395 } 3396 3397 /** 3398 * dp_soc_mon_status_ring_history_detach() - Detach the monitor status buffer 3399 * record history. 3400 * @soc: DP soc handle 3401 * 3402 * Return: None 3403 */ 3404 static void dp_soc_mon_status_ring_history_detach(struct dp_soc *soc) 3405 { 3406 dp_context_free_mem(soc, DP_MON_STATUS_BUF_HIST_TYPE, 3407 soc->mon_status_ring_history); 3408 } 3409 #else 3410 static void dp_soc_mon_status_ring_history_attach(struct dp_soc *soc) 3411 { 3412 } 3413 3414 static void dp_soc_mon_status_ring_history_detach(struct dp_soc *soc) 3415 { 3416 } 3417 #endif 3418 3419 #ifdef WLAN_FEATURE_DP_TX_DESC_HISTORY 3420 /** 3421 * dp_soc_tx_history_attach() - Attach the ring history record buffers 3422 * @soc: DP soc structure 3423 * 3424 * This function allocates the memory for recording the tx tcl ring and 3425 * the tx comp ring entries. There is no error returned in case 3426 * of allocation failure since the record function checks if the history is 3427 * initialized or not. We do not want to fail the driver load in case of 3428 * failure to allocate memory for debug history. 3429 * 3430 * Return: None 3431 */ 3432 static void dp_soc_tx_history_attach(struct dp_soc *soc) 3433 { 3434 dp_soc_frag_history_attach(soc, &soc->tx_tcl_history, 3435 DP_TX_TCL_HIST_MAX_SLOTS, 3436 DP_TX_TCL_HIST_PER_SLOT_MAX, 3437 sizeof(struct dp_tx_desc_event), 3438 true, DP_TX_TCL_HIST_TYPE); 3439 dp_soc_frag_history_attach(soc, &soc->tx_comp_history, 3440 DP_TX_COMP_HIST_MAX_SLOTS, 3441 DP_TX_COMP_HIST_PER_SLOT_MAX, 3442 sizeof(struct dp_tx_desc_event), 3443 true, DP_TX_COMP_HIST_TYPE); 3444 } 3445 3446 /** 3447 * dp_soc_tx_history_detach() - Detach the ring history record buffers 3448 * @soc: DP soc structure 3449 * 3450 * This function frees the memory for recording the tx tcl ring and 3451 * the tx comp ring entries. 3452 * 3453 * Return: None 3454 */ 3455 static void dp_soc_tx_history_detach(struct dp_soc *soc) 3456 { 3457 dp_soc_frag_history_detach(soc, &soc->tx_tcl_history, 3458 DP_TX_TCL_HIST_MAX_SLOTS, 3459 true, DP_TX_TCL_HIST_TYPE); 3460 dp_soc_frag_history_detach(soc, &soc->tx_comp_history, 3461 DP_TX_COMP_HIST_MAX_SLOTS, 3462 true, DP_TX_COMP_HIST_TYPE); 3463 } 3464 3465 #else 3466 static inline void dp_soc_tx_history_attach(struct dp_soc *soc) 3467 { 3468 } 3469 3470 static inline void dp_soc_tx_history_detach(struct dp_soc *soc) 3471 { 3472 } 3473 #endif /* WLAN_FEATURE_DP_TX_DESC_HISTORY */ 3474 3475 #ifdef DP_RX_MSDU_DONE_FAIL_HISTORY 3476 static void dp_soc_msdu_done_fail_history_attach(struct dp_soc *soc) 3477 { 3478 soc->msdu_done_fail_hist = 3479 qdf_mem_malloc(sizeof(struct dp_msdu_done_fail_history)); 3480 if (soc->msdu_done_fail_hist) 3481 qdf_atomic_init(&soc->msdu_done_fail_hist->index); 3482 } 3483 3484 static void dp_soc_msdu_done_fail_history_detach(struct dp_soc *soc) 3485 { 3486 if (soc->msdu_done_fail_hist) 3487 qdf_mem_free(soc->msdu_done_fail_hist); 3488 } 3489 #else 3490 static inline void dp_soc_msdu_done_fail_history_attach(struct dp_soc *soc) 3491 { 3492 } 3493 3494 static inline void dp_soc_msdu_done_fail_history_detach(struct dp_soc *soc) 3495 { 3496 } 3497 #endif 3498 3499 #ifdef DP_RX_PEEK_MSDU_DONE_WAR 3500 static void dp_soc_msdu_done_fail_desc_list_attach(struct dp_soc *soc) 3501 { 3502 qdf_atomic_init(&soc->msdu_done_fail_desc_list.index); 3503 qdf_atomic_set(&soc->msdu_done_fail_desc_list.index, 3504 DP_MSDU_DONE_FAIL_DESCS_MAX - 1); 3505 } 3506 #else 3507 static void dp_soc_msdu_done_fail_desc_list_attach(struct dp_soc *soc) 3508 { 3509 } 3510 #endif 3511 3512 #ifdef WLAN_SUPPORT_RX_FLOW_TAG 3513 QDF_STATUS 3514 dp_rx_fst_attach_wrapper(struct dp_soc *soc, struct dp_pdev *pdev) 3515 { 3516 struct dp_rx_fst *rx_fst = NULL; 3517 QDF_STATUS ret = QDF_STATUS_SUCCESS; 3518 3519 /* for Lithium the below API is not registered 3520 * hence fst attach happens for each pdev 3521 */ 3522 if (!soc->arch_ops.dp_get_rx_fst) 3523 return dp_rx_fst_attach(soc, pdev); 3524 3525 rx_fst = soc->arch_ops.dp_get_rx_fst(); 3526 3527 /* for BE the FST attach is called only once per 3528 * ML context. if rx_fst is already registered 3529 * increase the ref count and return. 3530 */ 3531 if (rx_fst) { 3532 soc->rx_fst = rx_fst; 3533 pdev->rx_fst = rx_fst; 3534 soc->arch_ops.dp_rx_fst_ref(); 3535 } else { 3536 ret = dp_rx_fst_attach(soc, pdev); 3537 if ((ret != QDF_STATUS_SUCCESS) && 3538 (ret != QDF_STATUS_E_NOSUPPORT)) 3539 return ret; 3540 3541 soc->arch_ops.dp_set_rx_fst(soc->rx_fst); 3542 soc->arch_ops.dp_rx_fst_ref(); 3543 } 3544 return ret; 3545 } 3546 3547 void 3548 dp_rx_fst_detach_wrapper(struct dp_soc *soc, struct dp_pdev *pdev) 3549 { 3550 struct dp_rx_fst *rx_fst = NULL; 3551 3552 /* for Lithium the below API is not registered 3553 * hence fst detach happens for each pdev 3554 */ 3555 if (!soc->arch_ops.dp_get_rx_fst) { 3556 dp_rx_fst_detach(soc, pdev); 3557 return; 3558 } 3559 3560 rx_fst = soc->arch_ops.dp_get_rx_fst(); 3561 3562 /* for BE the FST detach is called only when last 3563 * ref count reaches 1. 3564 */ 3565 if (rx_fst) { 3566 if (soc->arch_ops.dp_rx_fst_deref() == 1) 3567 dp_rx_fst_detach(soc, pdev); 3568 } 3569 pdev->rx_fst = NULL; 3570 } 3571 #else 3572 QDF_STATUS 3573 dp_rx_fst_attach_wrapper(struct dp_soc *soc, struct dp_pdev *pdev) 3574 { 3575 return QDF_STATUS_SUCCESS; 3576 } 3577 3578 void 3579 dp_rx_fst_detach_wrapper(struct dp_soc *soc, struct dp_pdev *pdev) 3580 { 3581 } 3582 #endif 3583 3584 /** 3585 * dp_pdev_attach_wifi3() - attach txrx pdev 3586 * @txrx_soc: Datapath SOC handle 3587 * @params: Params for PDEV attach 3588 * 3589 * Return: QDF_STATUS 3590 */ 3591 static inline 3592 QDF_STATUS dp_pdev_attach_wifi3(struct cdp_soc_t *txrx_soc, 3593 struct cdp_pdev_attach_params *params) 3594 { 3595 qdf_size_t pdev_context_size; 3596 struct dp_soc *soc = (struct dp_soc *)txrx_soc; 3597 struct dp_pdev *pdev = NULL; 3598 uint8_t pdev_id = params->pdev_id; 3599 struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx; 3600 int nss_cfg; 3601 QDF_STATUS ret; 3602 3603 pdev_context_size = 3604 soc->arch_ops.txrx_get_context_size(DP_CONTEXT_TYPE_PDEV); 3605 if (pdev_context_size) 3606 pdev = dp_context_alloc_mem(soc, DP_PDEV_TYPE, 3607 pdev_context_size); 3608 3609 if (!pdev) { 3610 dp_init_err("%pK: DP PDEV memory allocation failed", 3611 soc); 3612 goto fail0; 3613 } 3614 wlan_minidump_log(pdev, sizeof(*pdev), soc->ctrl_psoc, 3615 WLAN_MD_DP_PDEV, "dp_pdev"); 3616 3617 soc_cfg_ctx = soc->wlan_cfg_ctx; 3618 pdev->wlan_cfg_ctx = wlan_cfg_pdev_attach(soc->ctrl_psoc); 3619 3620 if (!pdev->wlan_cfg_ctx) { 3621 dp_init_err("%pK: pdev cfg_attach failed", soc); 3622 goto fail1; 3623 } 3624 3625 pdev->soc = soc; 3626 pdev->pdev_id = pdev_id; 3627 soc->pdev_list[pdev_id] = pdev; 3628 pdev->lmac_id = wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, pdev_id); 3629 soc->pdev_count++; 3630 3631 dp_ssr_dump_pdev_register(pdev, pdev_id); 3632 3633 /*sync DP pdev cfg items with profile support after cfg_pdev_attach*/ 3634 wlan_dp_pdev_cfg_sync_profile((struct cdp_soc_t *)soc, pdev_id); 3635 3636 /* 3637 * set nss pdev config based on soc config 3638 */ 3639 nss_cfg = wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx); 3640 wlan_cfg_set_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx, 3641 (nss_cfg & (1 << pdev_id))); 3642 3643 /* Allocate memory for pdev srng rings */ 3644 if (dp_pdev_srng_alloc(pdev)) { 3645 dp_init_err("%pK: dp_pdev_srng_alloc failed", soc); 3646 goto fail2; 3647 } 3648 3649 /* Setup second Rx refill buffer ring */ 3650 if (dp_setup_ipa_rx_refill_buf_ring(soc, pdev)) { 3651 dp_init_err("%pK: dp_srng_alloc failed rxrefill2 ring", 3652 soc); 3653 goto fail3; 3654 } 3655 3656 /* Allocate memory for pdev rxdma rings */ 3657 if (dp_rxdma_ring_alloc(soc, pdev)) { 3658 dp_init_err("%pK: dp_rxdma_ring_alloc failed", soc); 3659 goto fail4; 3660 } 3661 3662 /* Rx specific init */ 3663 if (dp_rx_pdev_desc_pool_alloc(pdev)) { 3664 dp_init_err("%pK: dp_rx_pdev_attach failed", soc); 3665 goto fail4; 3666 } 3667 3668 if (dp_monitor_pdev_attach(pdev)) { 3669 dp_init_err("%pK: dp_monitor_pdev_attach failed", soc); 3670 goto fail5; 3671 } 3672 3673 soc->arch_ops.txrx_pdev_attach(pdev, params); 3674 3675 /* Setup third Rx refill buffer ring */ 3676 if (dp_setup_ipa_rx_alt_refill_buf_ring(soc, pdev)) { 3677 dp_init_err("%pK: dp_srng_alloc failed rxrefill3 ring", 3678 soc); 3679 goto fail6; 3680 } 3681 3682 ret = dp_rx_fst_attach_wrapper(soc, pdev); 3683 if ((ret != QDF_STATUS_SUCCESS) && (ret != QDF_STATUS_E_NOSUPPORT)) { 3684 dp_init_err("%pK: RX FST attach failed: pdev %d err %d", 3685 soc, pdev_id, ret); 3686 goto fail7; 3687 } 3688 3689 return QDF_STATUS_SUCCESS; 3690 3691 fail7: 3692 dp_free_ipa_rx_alt_refill_buf_ring(soc, pdev); 3693 fail6: 3694 dp_monitor_pdev_detach(pdev); 3695 fail5: 3696 dp_rx_pdev_desc_pool_free(pdev); 3697 fail4: 3698 dp_rxdma_ring_free(pdev); 3699 dp_free_ipa_rx_refill_buf_ring(soc, pdev); 3700 fail3: 3701 dp_pdev_srng_free(pdev); 3702 fail2: 3703 wlan_cfg_pdev_detach(pdev->wlan_cfg_ctx); 3704 fail1: 3705 soc->pdev_list[pdev_id] = NULL; 3706 qdf_mem_free(pdev); 3707 fail0: 3708 return QDF_STATUS_E_FAILURE; 3709 } 3710 3711 /** 3712 * dp_pdev_flush_pending_vdevs() - Flush all delete pending vdevs in pdev 3713 * @pdev: Datapath PDEV handle 3714 * 3715 * This is the last chance to flush all pending dp vdevs/peers, 3716 * some peer/vdev leak case like Non-SSR + peer unmap missing 3717 * will be covered here. 3718 * 3719 * Return: None 3720 */ 3721 static void dp_pdev_flush_pending_vdevs(struct dp_pdev *pdev) 3722 { 3723 struct dp_soc *soc = pdev->soc; 3724 struct dp_vdev *vdev_arr[MAX_VDEV_CNT] = {0}; 3725 uint32_t i = 0; 3726 uint32_t num_vdevs = 0; 3727 struct dp_vdev *vdev = NULL; 3728 3729 if (TAILQ_EMPTY(&soc->inactive_vdev_list)) 3730 return; 3731 3732 qdf_spin_lock_bh(&soc->inactive_vdev_list_lock); 3733 TAILQ_FOREACH(vdev, &soc->inactive_vdev_list, 3734 inactive_list_elem) { 3735 if (vdev->pdev != pdev) 3736 continue; 3737 3738 vdev_arr[num_vdevs] = vdev; 3739 num_vdevs++; 3740 /* take reference to free */ 3741 dp_vdev_get_ref(soc, vdev, DP_MOD_ID_CDP); 3742 } 3743 qdf_spin_unlock_bh(&soc->inactive_vdev_list_lock); 3744 3745 for (i = 0; i < num_vdevs; i++) { 3746 dp_vdev_flush_peers((struct cdp_vdev *)vdev_arr[i], 0, 0); 3747 dp_vdev_unref_delete(soc, vdev_arr[i], DP_MOD_ID_CDP); 3748 } 3749 } 3750 3751 #ifdef QCA_VDEV_STATS_HW_OFFLOAD_SUPPORT 3752 /** 3753 * dp_vdev_stats_hw_offload_target_config() - Send HTT command to FW 3754 * for enable/disable of HW vdev stats 3755 * @soc: Datapath soc handle 3756 * @pdev_id: INVALID_PDEV_ID for all pdevs or 0,1,2 for individual pdev 3757 * @enable: flag to represent enable/disable of hw vdev stats 3758 * 3759 * Return: none 3760 */ 3761 static void dp_vdev_stats_hw_offload_target_config(struct dp_soc *soc, 3762 uint8_t pdev_id, 3763 bool enable) 3764 { 3765 /* Check SOC level config for HW offload vdev stats support */ 3766 if (!wlan_cfg_get_vdev_stats_hw_offload_config(soc->wlan_cfg_ctx)) { 3767 dp_debug("%pK: HW vdev offload stats is disabled", soc); 3768 return; 3769 } 3770 3771 /* Send HTT command to FW for enable of stats */ 3772 dp_h2t_hw_vdev_stats_config_send(soc, pdev_id, enable, false, 0); 3773 } 3774 3775 /** 3776 * dp_vdev_stats_hw_offload_target_clear() - Clear HW vdev stats on target 3777 * @soc: Datapath soc handle 3778 * @pdev_id: pdev_id (0,1,2) 3779 * @vdev_id_bitmask: bitmask with vdev_id(s) for which stats are to be 3780 * cleared on HW 3781 * 3782 * Return: none 3783 */ 3784 static 3785 void dp_vdev_stats_hw_offload_target_clear(struct dp_soc *soc, uint8_t pdev_id, 3786 uint64_t vdev_id_bitmask) 3787 { 3788 /* Check SOC level config for HW offload vdev stats support */ 3789 if (!wlan_cfg_get_vdev_stats_hw_offload_config(soc->wlan_cfg_ctx)) { 3790 dp_debug("%pK: HW vdev offload stats is disabled", soc); 3791 return; 3792 } 3793 3794 /* Send HTT command to FW for reset of stats */ 3795 dp_h2t_hw_vdev_stats_config_send(soc, pdev_id, true, true, 3796 vdev_id_bitmask); 3797 } 3798 #else 3799 static void 3800 dp_vdev_stats_hw_offload_target_config(struct dp_soc *soc, uint8_t pdev_id, 3801 bool enable) 3802 { 3803 } 3804 3805 static 3806 void dp_vdev_stats_hw_offload_target_clear(struct dp_soc *soc, uint8_t pdev_id, 3807 uint64_t vdev_id_bitmask) 3808 { 3809 } 3810 #endif /*QCA_VDEV_STATS_HW_OFFLOAD_SUPPORT */ 3811 3812 /** 3813 * dp_pdev_deinit() - Deinit txrx pdev 3814 * @txrx_pdev: Datapath PDEV handle 3815 * @force: Force deinit 3816 * 3817 * Return: None 3818 */ 3819 static void dp_pdev_deinit(struct cdp_pdev *txrx_pdev, int force) 3820 { 3821 struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev; 3822 qdf_nbuf_t curr_nbuf, next_nbuf; 3823 3824 if (pdev->pdev_deinit) 3825 return; 3826 3827 dp_tx_me_exit(pdev); 3828 dp_rx_pdev_buffers_free(pdev); 3829 dp_rx_pdev_desc_pool_deinit(pdev); 3830 dp_pdev_bkp_stats_detach(pdev); 3831 qdf_event_destroy(&pdev->fw_peer_stats_event); 3832 qdf_event_destroy(&pdev->fw_stats_event); 3833 qdf_event_destroy(&pdev->fw_obss_stats_event); 3834 if (pdev->sojourn_buf) 3835 qdf_nbuf_free(pdev->sojourn_buf); 3836 3837 dp_pdev_flush_pending_vdevs(pdev); 3838 dp_tx_desc_flush(pdev, NULL, true); 3839 3840 qdf_spinlock_destroy(&pdev->tx_mutex); 3841 qdf_spinlock_destroy(&pdev->vdev_list_lock); 3842 3843 dp_monitor_pdev_deinit(pdev); 3844 3845 dp_pdev_srng_deinit(pdev); 3846 3847 dp_ipa_uc_detach(pdev->soc, pdev); 3848 dp_deinit_ipa_rx_alt_refill_buf_ring(pdev->soc, pdev); 3849 dp_deinit_ipa_rx_refill_buf_ring(pdev->soc, pdev); 3850 dp_rxdma_ring_cleanup(pdev->soc, pdev); 3851 3852 curr_nbuf = pdev->invalid_peer_head_msdu; 3853 while (curr_nbuf) { 3854 next_nbuf = qdf_nbuf_next(curr_nbuf); 3855 dp_rx_nbuf_free(curr_nbuf); 3856 curr_nbuf = next_nbuf; 3857 } 3858 pdev->invalid_peer_head_msdu = NULL; 3859 pdev->invalid_peer_tail_msdu = NULL; 3860 3861 dp_wdi_event_detach(pdev); 3862 pdev->pdev_deinit = 1; 3863 } 3864 3865 /** 3866 * dp_pdev_deinit_wifi3() - Deinit txrx pdev 3867 * @psoc: Datapath psoc handle 3868 * @pdev_id: Id of datapath PDEV handle 3869 * @force: Force deinit 3870 * 3871 * Return: QDF_STATUS 3872 */ 3873 static QDF_STATUS 3874 dp_pdev_deinit_wifi3(struct cdp_soc_t *psoc, uint8_t pdev_id, 3875 int force) 3876 { 3877 struct dp_pdev *txrx_pdev; 3878 3879 txrx_pdev = dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)psoc, 3880 pdev_id); 3881 3882 if (!txrx_pdev) 3883 return QDF_STATUS_E_FAILURE; 3884 3885 dp_pdev_deinit((struct cdp_pdev *)txrx_pdev, force); 3886 3887 return QDF_STATUS_SUCCESS; 3888 } 3889 3890 /** 3891 * dp_pdev_post_attach() - Do post pdev attach after dev_alloc_name 3892 * @txrx_pdev: Datapath PDEV handle 3893 * 3894 * Return: None 3895 */ 3896 static void dp_pdev_post_attach(struct cdp_pdev *txrx_pdev) 3897 { 3898 struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev; 3899 3900 dp_monitor_tx_capture_debugfs_init(pdev); 3901 3902 if (dp_pdev_htt_stats_dbgfs_init(pdev)) { 3903 dp_init_err("%pK: Failed to initialize pdev HTT stats debugfs", pdev->soc); 3904 } 3905 } 3906 3907 /** 3908 * dp_pdev_post_attach_wifi3() - attach txrx pdev post 3909 * @soc: Datapath soc handle 3910 * @pdev_id: pdev id of pdev 3911 * 3912 * Return: QDF_STATUS 3913 */ 3914 static int dp_pdev_post_attach_wifi3(struct cdp_soc_t *soc, 3915 uint8_t pdev_id) 3916 { 3917 struct dp_pdev *pdev; 3918 3919 pdev = dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc, 3920 pdev_id); 3921 3922 if (!pdev) { 3923 dp_init_err("%pK: DP PDEV is Null for pdev id %d", 3924 (struct dp_soc *)soc, pdev_id); 3925 return QDF_STATUS_E_FAILURE; 3926 } 3927 3928 dp_pdev_post_attach((struct cdp_pdev *)pdev); 3929 return QDF_STATUS_SUCCESS; 3930 } 3931 3932 /** 3933 * dp_pdev_detach() - Complete rest of pdev detach 3934 * @txrx_pdev: Datapath PDEV handle 3935 * @force: Force deinit 3936 * 3937 * Return: None 3938 */ 3939 static void dp_pdev_detach(struct cdp_pdev *txrx_pdev, int force) 3940 { 3941 struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev; 3942 struct dp_soc *soc = pdev->soc; 3943 3944 dp_rx_fst_detach_wrapper(soc, pdev); 3945 dp_pdev_htt_stats_dbgfs_deinit(pdev); 3946 dp_rx_pdev_desc_pool_free(pdev); 3947 dp_monitor_pdev_detach(pdev); 3948 dp_rxdma_ring_free(pdev); 3949 dp_free_ipa_rx_refill_buf_ring(soc, pdev); 3950 dp_free_ipa_rx_alt_refill_buf_ring(soc, pdev); 3951 dp_pdev_srng_free(pdev); 3952 3953 soc->pdev_count--; 3954 soc->pdev_list[pdev->pdev_id] = NULL; 3955 3956 wlan_cfg_pdev_detach(pdev->wlan_cfg_ctx); 3957 wlan_minidump_remove(pdev, sizeof(*pdev), soc->ctrl_psoc, 3958 WLAN_MD_DP_PDEV, "dp_pdev"); 3959 dp_context_free_mem(soc, DP_PDEV_TYPE, pdev); 3960 } 3961 3962 /** 3963 * dp_pdev_detach_wifi3() - detach txrx pdev 3964 * @psoc: Datapath soc handle 3965 * @pdev_id: pdev id of pdev 3966 * @force: Force detach 3967 * 3968 * Return: QDF_STATUS 3969 */ 3970 static QDF_STATUS dp_pdev_detach_wifi3(struct cdp_soc_t *psoc, uint8_t pdev_id, 3971 int force) 3972 { 3973 struct dp_pdev *pdev; 3974 struct dp_soc *soc = (struct dp_soc *)psoc; 3975 3976 pdev = dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)psoc, 3977 pdev_id); 3978 3979 if (!pdev) { 3980 dp_init_err("%pK: DP PDEV is Null for pdev id %d", 3981 (struct dp_soc *)psoc, pdev_id); 3982 return QDF_STATUS_E_FAILURE; 3983 } 3984 3985 dp_ssr_dump_pdev_unregister(pdev_id); 3986 3987 soc->arch_ops.txrx_pdev_detach(pdev); 3988 3989 dp_pdev_detach((struct cdp_pdev *)pdev, force); 3990 return QDF_STATUS_SUCCESS; 3991 } 3992 3993 void dp_soc_print_inactive_objects(struct dp_soc *soc) 3994 { 3995 struct dp_peer *peer = NULL; 3996 struct dp_peer *tmp_peer = NULL; 3997 struct dp_vdev *vdev = NULL; 3998 struct dp_vdev *tmp_vdev = NULL; 3999 int i = 0; 4000 uint32_t count; 4001 4002 if (TAILQ_EMPTY(&soc->inactive_peer_list) && 4003 TAILQ_EMPTY(&soc->inactive_vdev_list)) 4004 return; 4005 4006 TAILQ_FOREACH_SAFE(peer, &soc->inactive_peer_list, 4007 inactive_list_elem, tmp_peer) { 4008 for (i = 0; i < DP_MOD_ID_MAX; i++) { 4009 count = qdf_atomic_read(&peer->mod_refs[i]); 4010 if (count) 4011 DP_PRINT_STATS("peer %pK Module id %u ==> %u", 4012 peer, i, count); 4013 } 4014 } 4015 4016 TAILQ_FOREACH_SAFE(vdev, &soc->inactive_vdev_list, 4017 inactive_list_elem, tmp_vdev) { 4018 for (i = 0; i < DP_MOD_ID_MAX; i++) { 4019 count = qdf_atomic_read(&vdev->mod_refs[i]); 4020 if (count) 4021 DP_PRINT_STATS("vdev %pK Module id %u ==> %u", 4022 vdev, i, count); 4023 } 4024 } 4025 QDF_BUG(0); 4026 } 4027 4028 /** 4029 * dp_soc_deinit_wifi3() - Deinitialize txrx SOC 4030 * @txrx_soc: Opaque DP SOC handle 4031 * 4032 * Return: None 4033 */ 4034 static void dp_soc_deinit_wifi3(struct cdp_soc_t *txrx_soc) 4035 { 4036 struct dp_soc *soc = (struct dp_soc *)txrx_soc; 4037 4038 soc->arch_ops.txrx_soc_deinit(soc); 4039 } 4040 4041 /** 4042 * dp_soc_detach() - Detach rest of txrx SOC 4043 * @txrx_soc: DP SOC handle, struct cdp_soc_t is first element of struct dp_soc. 4044 * 4045 * Return: None 4046 */ 4047 static void dp_soc_detach(struct cdp_soc_t *txrx_soc) 4048 { 4049 struct dp_soc *soc = (struct dp_soc *)txrx_soc; 4050 4051 soc->arch_ops.txrx_soc_detach(soc); 4052 4053 qdf_ssr_driver_dump_unregister_region("wlan_cfg_ctx"); 4054 qdf_ssr_driver_dump_unregister_region("dp_soc"); 4055 qdf_ssr_driver_dump_unregister_region("tcl_wbm_map_array"); 4056 qdf_nbuf_ssr_unregister_region(); 4057 4058 dp_runtime_deinit(); 4059 4060 dp_soc_unset_qref_debug_list(soc); 4061 dp_sysfs_deinitialize_stats(soc); 4062 dp_soc_swlm_detach(soc); 4063 dp_soc_tx_desc_sw_pools_free(soc); 4064 dp_soc_srng_free(soc); 4065 dp_hw_link_desc_ring_free(soc); 4066 dp_hw_link_desc_pool_banks_free(soc, WLAN_INVALID_PDEV_ID); 4067 wlan_cfg_soc_detach(soc->wlan_cfg_ctx); 4068 dp_soc_tx_hw_desc_history_detach(soc); 4069 dp_soc_tx_history_detach(soc); 4070 dp_soc_mon_status_ring_history_detach(soc); 4071 dp_soc_rx_history_detach(soc); 4072 dp_soc_cfg_history_detach(soc); 4073 dp_soc_msdu_done_fail_history_detach(soc); 4074 4075 if (!dp_monitor_modularized_enable()) { 4076 dp_mon_soc_detach_wrapper(soc); 4077 } 4078 4079 qdf_mem_free(soc->cdp_soc.ops); 4080 qdf_mem_common_free(soc); 4081 } 4082 4083 /** 4084 * dp_soc_detach_wifi3() - Detach txrx SOC 4085 * @txrx_soc: DP SOC handle, struct cdp_soc_t is first element of struct dp_soc. 4086 * 4087 * Return: None 4088 */ 4089 static void dp_soc_detach_wifi3(struct cdp_soc_t *txrx_soc) 4090 { 4091 dp_soc_detach(txrx_soc); 4092 } 4093 4094 #ifdef QCA_HOST2FW_RXBUF_RING 4095 #ifdef IPA_WDI3_VLAN_SUPPORT 4096 static inline 4097 void dp_rxdma_setup_refill_ring3(struct dp_soc *soc, 4098 struct dp_pdev *pdev, 4099 uint8_t idx) 4100 { 4101 if (pdev->rx_refill_buf_ring3.hal_srng) 4102 htt_srng_setup(soc->htt_handle, idx, 4103 pdev->rx_refill_buf_ring3.hal_srng, 4104 RXDMA_BUF); 4105 } 4106 #else 4107 static inline 4108 void dp_rxdma_setup_refill_ring3(struct dp_soc *soc, 4109 struct dp_pdev *pdev, 4110 uint8_t idx) 4111 { } 4112 #endif 4113 4114 #ifdef WIFI_MONITOR_SUPPORT 4115 static inline QDF_STATUS dp_lpc_tx_config(struct dp_pdev *pdev) 4116 { 4117 return dp_local_pkt_capture_tx_config(pdev); 4118 } 4119 #else 4120 static inline QDF_STATUS dp_lpc_tx_config(struct dp_pdev *pdev) 4121 { 4122 return QDF_STATUS_SUCCESS; 4123 } 4124 #endif 4125 4126 /** 4127 * dp_rxdma_ring_config() - configure the RX DMA rings 4128 * @soc: data path SoC handle 4129 * 4130 * This function is used to configure the MAC rings. 4131 * On MCL host provides buffers in Host2FW ring 4132 * FW refills (copies) buffers to the ring and updates 4133 * ring_idx in register 4134 * 4135 * Return: zero on success, non-zero on failure 4136 */ 4137 static QDF_STATUS dp_rxdma_ring_config(struct dp_soc *soc) 4138 { 4139 int i; 4140 QDF_STATUS status = QDF_STATUS_SUCCESS; 4141 4142 for (i = 0; i < MAX_PDEV_CNT; i++) { 4143 struct dp_pdev *pdev = soc->pdev_list[i]; 4144 4145 if (pdev) { 4146 int mac_id; 4147 int max_mac_rings = 4148 wlan_cfg_get_num_mac_rings 4149 (pdev->wlan_cfg_ctx); 4150 int lmac_id = dp_get_lmac_id_for_pdev_id(soc, 0, i); 4151 4152 htt_srng_setup(soc->htt_handle, i, 4153 soc->rx_refill_buf_ring[lmac_id] 4154 .hal_srng, 4155 RXDMA_BUF); 4156 4157 if (pdev->rx_refill_buf_ring2.hal_srng) 4158 htt_srng_setup(soc->htt_handle, i, 4159 pdev->rx_refill_buf_ring2 4160 .hal_srng, 4161 RXDMA_BUF); 4162 4163 dp_rxdma_setup_refill_ring3(soc, pdev, i); 4164 4165 dp_update_num_mac_rings_for_dbs(soc, &max_mac_rings); 4166 dp_lpc_tx_config(pdev); 4167 dp_info("pdev_id %d max_mac_rings %d", 4168 pdev->pdev_id, max_mac_rings); 4169 4170 for (mac_id = 0; mac_id < max_mac_rings; mac_id++) { 4171 int mac_for_pdev = 4172 dp_get_mac_id_for_pdev(mac_id, 4173 pdev->pdev_id); 4174 /* 4175 * Obtain lmac id from pdev to access the LMAC 4176 * ring in soc context 4177 */ 4178 lmac_id = 4179 dp_get_lmac_id_for_pdev_id(soc, 4180 mac_id, 4181 pdev->pdev_id); 4182 dp_info("mac_id %d", mac_for_pdev); 4183 4184 htt_srng_setup(soc->htt_handle, mac_for_pdev, 4185 pdev->rx_mac_buf_ring[mac_id] 4186 .hal_srng, 4187 RXDMA_BUF); 4188 4189 if (!soc->rxdma2sw_rings_not_supported) 4190 dp_htt_setup_rxdma_err_dst_ring(soc, 4191 mac_for_pdev, lmac_id); 4192 4193 /* Configure monitor mode rings */ 4194 status = dp_monitor_htt_srng_setup(soc, pdev, 4195 lmac_id, 4196 mac_for_pdev); 4197 if (status != QDF_STATUS_SUCCESS) { 4198 dp_err("Failed to send htt monitor messages to target"); 4199 return status; 4200 } 4201 4202 } 4203 } 4204 } 4205 4206 dp_reap_timer_init(soc); 4207 return status; 4208 } 4209 #else 4210 /* This is only for WIN */ 4211 static QDF_STATUS dp_rxdma_ring_config(struct dp_soc *soc) 4212 { 4213 int i; 4214 QDF_STATUS status = QDF_STATUS_SUCCESS; 4215 int mac_for_pdev; 4216 int lmac_id; 4217 4218 /* Configure monitor mode rings */ 4219 dp_monitor_soc_htt_srng_setup(soc); 4220 4221 for (i = 0; i < MAX_PDEV_CNT; i++) { 4222 struct dp_pdev *pdev = soc->pdev_list[i]; 4223 4224 if (!pdev) 4225 continue; 4226 4227 mac_for_pdev = i; 4228 lmac_id = dp_get_lmac_id_for_pdev_id(soc, 0, i); 4229 4230 if (soc->rx_refill_buf_ring[lmac_id].hal_srng) 4231 htt_srng_setup(soc->htt_handle, mac_for_pdev, 4232 soc->rx_refill_buf_ring[lmac_id]. 4233 hal_srng, RXDMA_BUF); 4234 4235 /* Configure monitor mode rings */ 4236 dp_monitor_htt_srng_setup(soc, pdev, 4237 lmac_id, 4238 mac_for_pdev); 4239 if (!soc->rxdma2sw_rings_not_supported) 4240 htt_srng_setup(soc->htt_handle, mac_for_pdev, 4241 soc->rxdma_err_dst_ring[lmac_id].hal_srng, 4242 RXDMA_DST); 4243 } 4244 4245 dp_reap_timer_init(soc); 4246 return status; 4247 } 4248 #endif 4249 4250 /** 4251 * dp_rx_target_fst_config() - configure the RXOLE Flow Search Engine 4252 * 4253 * This function is used to configure the FSE HW block in RX OLE on a 4254 * per pdev basis. Here, we will be programming parameters related to 4255 * the Flow Search Table. 4256 * 4257 * @soc: data path SoC handle 4258 * 4259 * Return: zero on success, non-zero on failure 4260 */ 4261 #ifdef WLAN_SUPPORT_RX_FLOW_TAG 4262 static QDF_STATUS 4263 dp_rx_target_fst_config(struct dp_soc *soc) 4264 { 4265 int i; 4266 QDF_STATUS status = QDF_STATUS_SUCCESS; 4267 4268 for (i = 0; i < MAX_PDEV_CNT; i++) { 4269 struct dp_pdev *pdev = soc->pdev_list[i]; 4270 4271 /* Flow search is not enabled if NSS offload is enabled */ 4272 if (pdev && 4273 !wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx)) { 4274 status = dp_rx_flow_send_fst_fw_setup(pdev->soc, pdev); 4275 if (status != QDF_STATUS_SUCCESS) 4276 break; 4277 } 4278 } 4279 return status; 4280 } 4281 #else 4282 static inline QDF_STATUS dp_rx_target_fst_config(struct dp_soc *soc) 4283 { 4284 return QDF_STATUS_SUCCESS; 4285 } 4286 #endif 4287 4288 #ifndef WLAN_DP_FEATURE_SW_LATENCY_MGR 4289 static inline QDF_STATUS dp_print_swlm_stats(struct dp_soc *soc) 4290 { 4291 return QDF_STATUS_SUCCESS; 4292 } 4293 #endif /* !WLAN_DP_FEATURE_SW_LATENCY_MGR */ 4294 4295 #ifdef WLAN_SUPPORT_PPEDS 4296 /** 4297 * dp_soc_target_ppe_rxole_rxdma_cfg() - Configure the RxOLe and RxDMA for PPE 4298 * @soc: DP Tx/Rx handle 4299 * 4300 * Return: QDF_STATUS 4301 */ 4302 static 4303 QDF_STATUS dp_soc_target_ppe_rxole_rxdma_cfg(struct dp_soc *soc) 4304 { 4305 struct dp_htt_rxdma_rxole_ppe_config htt_cfg = {0}; 4306 QDF_STATUS status; 4307 4308 /* 4309 * Program RxDMA to override the reo destination indication 4310 * with REO2PPE_DST_IND, when use_ppe is set to 1 in RX_MSDU_END, 4311 * thereby driving the packet to REO2PPE ring. 4312 * If the MSDU is spanning more than 1 buffer, then this 4313 * override is not done. 4314 */ 4315 htt_cfg.override = 1; 4316 htt_cfg.reo_destination_indication = REO2PPE_DST_IND; 4317 htt_cfg.multi_buffer_msdu_override_en = 0; 4318 4319 /* 4320 * Override use_ppe to 0 in RxOLE for the following 4321 * cases. 4322 */ 4323 htt_cfg.intra_bss_override = 1; 4324 htt_cfg.decap_raw_override = 1; 4325 htt_cfg.decap_nwifi_override = 1; 4326 htt_cfg.ip_frag_override = 1; 4327 4328 status = dp_htt_rxdma_rxole_ppe_cfg_set(soc, &htt_cfg); 4329 if (status != QDF_STATUS_SUCCESS) 4330 dp_err("RxOLE and RxDMA PPE config failed %d", status); 4331 4332 return status; 4333 } 4334 4335 #else 4336 static inline 4337 QDF_STATUS dp_soc_target_ppe_rxole_rxdma_cfg(struct dp_soc *soc) 4338 { 4339 return QDF_STATUS_SUCCESS; 4340 } 4341 4342 #endif /* WLAN_SUPPORT_PPEDS */ 4343 4344 #ifdef DP_UMAC_HW_RESET_SUPPORT 4345 static void dp_register_umac_reset_handlers(struct dp_soc *soc) 4346 { 4347 dp_umac_reset_register_rx_action_callback(soc, 4348 dp_umac_reset_action_trigger_recovery, 4349 UMAC_RESET_ACTION_DO_TRIGGER_RECOVERY); 4350 4351 dp_umac_reset_register_rx_action_callback(soc, 4352 dp_umac_reset_handle_pre_reset, UMAC_RESET_ACTION_DO_PRE_RESET); 4353 4354 dp_umac_reset_register_rx_action_callback(soc, 4355 dp_umac_reset_handle_post_reset, 4356 UMAC_RESET_ACTION_DO_POST_RESET_START); 4357 4358 dp_umac_reset_register_rx_action_callback(soc, 4359 dp_umac_reset_handle_post_reset_complete, 4360 UMAC_RESET_ACTION_DO_POST_RESET_COMPLETE); 4361 4362 } 4363 #else 4364 static void dp_register_umac_reset_handlers(struct dp_soc *soc) 4365 { 4366 } 4367 #endif 4368 /** 4369 * dp_soc_attach_target_wifi3() - SOC initialization in the target 4370 * @cdp_soc: Opaque Datapath SOC handle 4371 * 4372 * Return: zero on success, non-zero on failure 4373 */ 4374 static QDF_STATUS 4375 dp_soc_attach_target_wifi3(struct cdp_soc_t *cdp_soc) 4376 { 4377 struct dp_soc *soc = (struct dp_soc *)cdp_soc; 4378 QDF_STATUS status = QDF_STATUS_SUCCESS; 4379 struct hal_reo_params reo_params; 4380 4381 htt_soc_attach_target(soc->htt_handle); 4382 4383 status = dp_soc_target_ppe_rxole_rxdma_cfg(soc); 4384 if (status != QDF_STATUS_SUCCESS) { 4385 dp_err("Failed to send htt RxOLE and RxDMA messages to target"); 4386 return status; 4387 } 4388 4389 status = dp_rxdma_ring_config(soc); 4390 if (status != QDF_STATUS_SUCCESS) { 4391 dp_err("Failed to send htt srng setup messages to target"); 4392 return status; 4393 } 4394 4395 status = soc->arch_ops.dp_rxdma_ring_sel_cfg(soc); 4396 if (status != QDF_STATUS_SUCCESS) { 4397 dp_err("Failed to send htt ring config message to target"); 4398 return status; 4399 } 4400 4401 status = dp_soc_umac_reset_init(cdp_soc); 4402 if (status != QDF_STATUS_SUCCESS && 4403 status != QDF_STATUS_E_NOSUPPORT) { 4404 dp_err("Failed to initialize UMAC reset"); 4405 return status; 4406 } 4407 4408 dp_register_umac_reset_handlers(soc); 4409 4410 status = dp_rx_target_fst_config(soc); 4411 if (status != QDF_STATUS_SUCCESS && 4412 status != QDF_STATUS_E_NOSUPPORT) { 4413 dp_err("Failed to send htt fst setup config message to target"); 4414 return status; 4415 } 4416 4417 DP_STATS_INIT(soc); 4418 4419 dp_runtime_init(soc); 4420 4421 /* Enable HW vdev offload stats if feature is supported */ 4422 dp_vdev_stats_hw_offload_target_config(soc, INVALID_PDEV_ID, true); 4423 4424 /* initialize work queue for stats processing */ 4425 qdf_create_work(0, &soc->htt_stats.work, htt_t2h_stats_handler, soc); 4426 4427 wlan_cfg_soc_update_tgt_params(soc->wlan_cfg_ctx, 4428 soc->ctrl_psoc); 4429 /* Setup HW REO */ 4430 qdf_mem_zero(&reo_params, sizeof(reo_params)); 4431 4432 if (wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx)) { 4433 /* 4434 * Reo ring remap is not required if both radios 4435 * are offloaded to NSS 4436 */ 4437 4438 if (soc->arch_ops.reo_remap_config(soc, &reo_params.remap0, 4439 &reo_params.remap1, 4440 &reo_params.remap2)) 4441 reo_params.rx_hash_enabled = true; 4442 else 4443 reo_params.rx_hash_enabled = false; 4444 } 4445 4446 /* 4447 * set the fragment destination ring 4448 */ 4449 dp_reo_frag_dst_set(soc, &reo_params.frag_dst_ring); 4450 4451 if (wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx)) 4452 reo_params.alt_dst_ind_0 = REO_REMAP_RELEASE; 4453 4454 reo_params.reo_qref = &soc->reo_qref; 4455 hal_reo_setup(soc->hal_soc, &reo_params, 1); 4456 4457 hal_reo_set_err_dst_remap(soc->hal_soc); 4458 4459 soc->features.pn_in_reo_dest = hal_reo_enable_pn_in_dest(soc->hal_soc); 4460 4461 return QDF_STATUS_SUCCESS; 4462 } 4463 4464 /** 4465 * dp_vdev_id_map_tbl_add() - Add vdev into vdev_id table 4466 * @soc: SoC handle 4467 * @vdev: vdev handle 4468 * @vdev_id: vdev_id 4469 * 4470 * Return: None 4471 */ 4472 static void dp_vdev_id_map_tbl_add(struct dp_soc *soc, 4473 struct dp_vdev *vdev, 4474 uint8_t vdev_id) 4475 { 4476 QDF_ASSERT(vdev_id <= MAX_VDEV_CNT); 4477 4478 qdf_spin_lock_bh(&soc->vdev_map_lock); 4479 4480 if (dp_vdev_get_ref(soc, vdev, DP_MOD_ID_CONFIG) != 4481 QDF_STATUS_SUCCESS) { 4482 dp_vdev_info("%pK: unable to get vdev reference at MAP vdev %pK vdev_id %u", 4483 soc, vdev, vdev_id); 4484 qdf_spin_unlock_bh(&soc->vdev_map_lock); 4485 return; 4486 } 4487 4488 if (!soc->vdev_id_map[vdev_id]) 4489 soc->vdev_id_map[vdev_id] = vdev; 4490 else 4491 QDF_ASSERT(0); 4492 4493 qdf_spin_unlock_bh(&soc->vdev_map_lock); 4494 } 4495 4496 /** 4497 * dp_vdev_id_map_tbl_remove() - remove vdev from vdev_id table 4498 * @soc: SoC handle 4499 * @vdev: vdev handle 4500 * 4501 * Return: None 4502 */ 4503 static void dp_vdev_id_map_tbl_remove(struct dp_soc *soc, 4504 struct dp_vdev *vdev) 4505 { 4506 qdf_spin_lock_bh(&soc->vdev_map_lock); 4507 QDF_ASSERT(soc->vdev_id_map[vdev->vdev_id] == vdev); 4508 4509 soc->vdev_id_map[vdev->vdev_id] = NULL; 4510 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CONFIG); 4511 qdf_spin_unlock_bh(&soc->vdev_map_lock); 4512 } 4513 4514 /** 4515 * dp_vdev_pdev_list_add() - add vdev into pdev's list 4516 * @soc: soc handle 4517 * @pdev: pdev handle 4518 * @vdev: vdev handle 4519 * 4520 * Return: none 4521 */ 4522 static void dp_vdev_pdev_list_add(struct dp_soc *soc, 4523 struct dp_pdev *pdev, 4524 struct dp_vdev *vdev) 4525 { 4526 qdf_spin_lock_bh(&pdev->vdev_list_lock); 4527 if (dp_vdev_get_ref(soc, vdev, DP_MOD_ID_CONFIG) != 4528 QDF_STATUS_SUCCESS) { 4529 dp_vdev_info("%pK: unable to get vdev reference at MAP vdev %pK", 4530 soc, vdev); 4531 qdf_spin_unlock_bh(&pdev->vdev_list_lock); 4532 return; 4533 } 4534 /* add this vdev into the pdev's list */ 4535 TAILQ_INSERT_TAIL(&pdev->vdev_list, vdev, vdev_list_elem); 4536 qdf_spin_unlock_bh(&pdev->vdev_list_lock); 4537 } 4538 4539 /** 4540 * dp_vdev_pdev_list_remove() - remove vdev from pdev's list 4541 * @soc: SoC handle 4542 * @pdev: pdev handle 4543 * @vdev: VDEV handle 4544 * 4545 * Return: none 4546 */ 4547 static void dp_vdev_pdev_list_remove(struct dp_soc *soc, 4548 struct dp_pdev *pdev, 4549 struct dp_vdev *vdev) 4550 { 4551 uint8_t found = 0; 4552 struct dp_vdev *tmpvdev = NULL; 4553 4554 qdf_spin_lock_bh(&pdev->vdev_list_lock); 4555 TAILQ_FOREACH(tmpvdev, &pdev->vdev_list, vdev_list_elem) { 4556 if (tmpvdev == vdev) { 4557 found = 1; 4558 break; 4559 } 4560 } 4561 4562 if (found) { 4563 TAILQ_REMOVE(&pdev->vdev_list, vdev, vdev_list_elem); 4564 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CONFIG); 4565 } else { 4566 dp_vdev_debug("%pK: vdev:%pK not found in pdev:%pK vdevlist:%pK", 4567 soc, vdev, pdev, &pdev->vdev_list); 4568 QDF_ASSERT(0); 4569 } 4570 qdf_spin_unlock_bh(&pdev->vdev_list_lock); 4571 } 4572 4573 #ifdef QCA_SUPPORT_EAPOL_OVER_CONTROL_PORT 4574 /** 4575 * dp_vdev_init_rx_eapol() - initializing osif_rx_eapol 4576 * @vdev: Datapath VDEV handle 4577 * 4578 * Return: None 4579 */ 4580 static inline void dp_vdev_init_rx_eapol(struct dp_vdev *vdev) 4581 { 4582 vdev->osif_rx_eapol = NULL; 4583 } 4584 4585 /** 4586 * dp_vdev_register_rx_eapol() - Register VDEV operations for rx_eapol 4587 * @vdev: DP vdev handle 4588 * @txrx_ops: Tx and Rx operations 4589 * 4590 * Return: None 4591 */ 4592 static inline void dp_vdev_register_rx_eapol(struct dp_vdev *vdev, 4593 struct ol_txrx_ops *txrx_ops) 4594 { 4595 vdev->osif_rx_eapol = txrx_ops->rx.rx_eapol; 4596 } 4597 #else 4598 static inline void dp_vdev_init_rx_eapol(struct dp_vdev *vdev) 4599 { 4600 } 4601 4602 static inline void dp_vdev_register_rx_eapol(struct dp_vdev *vdev, 4603 struct ol_txrx_ops *txrx_ops) 4604 { 4605 } 4606 #endif 4607 4608 #ifdef WLAN_FEATURE_11BE_MLO 4609 static inline void dp_vdev_save_mld_addr(struct dp_vdev *vdev, 4610 struct cdp_vdev_info *vdev_info) 4611 { 4612 if (vdev_info->mld_mac_addr) 4613 qdf_mem_copy(&vdev->mld_mac_addr.raw[0], 4614 vdev_info->mld_mac_addr, QDF_MAC_ADDR_SIZE); 4615 } 4616 4617 #ifdef WLAN_MLO_MULTI_CHIP 4618 static inline void 4619 dp_vdev_update_bridge_vdev_param(struct dp_vdev *vdev, 4620 struct cdp_vdev_info *vdev_info) 4621 { 4622 if (vdev_info->is_bridge_vap) 4623 vdev->is_bridge_vdev = 1; 4624 4625 dp_info("is_bridge_link = %d vdev id = %d chip id = %d", 4626 vdev->is_bridge_vdev, vdev->vdev_id, 4627 dp_get_chip_id(vdev->pdev->soc)); 4628 } 4629 #else 4630 static inline void 4631 dp_vdev_update_bridge_vdev_param(struct dp_vdev *vdev, 4632 struct cdp_vdev_info *vdev_info) 4633 { 4634 } 4635 #endif /* WLAN_MLO_MULTI_CHIP */ 4636 4637 #else 4638 static inline void dp_vdev_save_mld_addr(struct dp_vdev *vdev, 4639 struct cdp_vdev_info *vdev_info) 4640 { 4641 4642 } 4643 4644 static inline void 4645 dp_vdev_update_bridge_vdev_param(struct dp_vdev *vdev, 4646 struct cdp_vdev_info *vdev_info) 4647 { 4648 } 4649 #endif 4650 4651 #ifdef DP_TRAFFIC_END_INDICATION 4652 /** 4653 * dp_tx_vdev_traffic_end_indication_attach() - Initialize data end indication 4654 * related members in VDEV 4655 * @vdev: DP vdev handle 4656 * 4657 * Return: None 4658 */ 4659 static inline void 4660 dp_tx_vdev_traffic_end_indication_attach(struct dp_vdev *vdev) 4661 { 4662 qdf_nbuf_queue_init(&vdev->end_ind_pkt_q); 4663 } 4664 4665 /** 4666 * dp_tx_vdev_traffic_end_indication_detach() - De-init data end indication 4667 * related members in VDEV 4668 * @vdev: DP vdev handle 4669 * 4670 * Return: None 4671 */ 4672 static inline void 4673 dp_tx_vdev_traffic_end_indication_detach(struct dp_vdev *vdev) 4674 { 4675 qdf_nbuf_t nbuf; 4676 4677 while ((nbuf = qdf_nbuf_queue_remove(&vdev->end_ind_pkt_q)) != NULL) 4678 qdf_nbuf_free(nbuf); 4679 } 4680 #else 4681 static inline void 4682 dp_tx_vdev_traffic_end_indication_attach(struct dp_vdev *vdev) 4683 {} 4684 4685 static inline void 4686 dp_tx_vdev_traffic_end_indication_detach(struct dp_vdev *vdev) 4687 {} 4688 #endif 4689 4690 #ifdef WLAN_DP_VDEV_NO_SELF_PEER 4691 static inline bool dp_vdev_self_peer_required(struct dp_soc *soc, 4692 struct dp_vdev *vdev) 4693 { 4694 return false; 4695 } 4696 #else 4697 static inline bool dp_vdev_self_peer_required(struct dp_soc *soc, 4698 struct dp_vdev *vdev) 4699 { 4700 if (wlan_op_mode_sta == vdev->opmode) 4701 return true; 4702 4703 return false; 4704 } 4705 #endif 4706 4707 /** 4708 * dp_vdev_attach_wifi3() - attach txrx vdev 4709 * @cdp_soc: CDP SoC context 4710 * @pdev_id: PDEV ID for vdev creation 4711 * @vdev_info: parameters used for vdev creation 4712 * 4713 * Return: status 4714 */ 4715 static QDF_STATUS dp_vdev_attach_wifi3(struct cdp_soc_t *cdp_soc, 4716 uint8_t pdev_id, 4717 struct cdp_vdev_info *vdev_info) 4718 { 4719 int i = 0; 4720 qdf_size_t vdev_context_size; 4721 struct dp_soc *soc = (struct dp_soc *)cdp_soc; 4722 struct dp_pdev *pdev = 4723 dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc, 4724 pdev_id); 4725 struct dp_vdev *vdev; 4726 uint8_t *vdev_mac_addr = vdev_info->vdev_mac_addr; 4727 uint8_t vdev_id = vdev_info->vdev_id; 4728 enum wlan_op_mode op_mode = vdev_info->op_mode; 4729 enum wlan_op_subtype subtype = vdev_info->subtype; 4730 enum QDF_OPMODE qdf_opmode = vdev_info->qdf_opmode; 4731 uint8_t vdev_stats_id = vdev_info->vdev_stats_id; 4732 4733 vdev_context_size = 4734 soc->arch_ops.txrx_get_context_size(DP_CONTEXT_TYPE_VDEV); 4735 vdev = qdf_mem_malloc(vdev_context_size); 4736 4737 if (!pdev) { 4738 dp_init_err("%pK: DP PDEV is Null for pdev id %d", 4739 cdp_soc, pdev_id); 4740 qdf_mem_free(vdev); 4741 goto fail0; 4742 } 4743 4744 if (!vdev) { 4745 dp_init_err("%pK: DP VDEV memory allocation failed", 4746 cdp_soc); 4747 goto fail0; 4748 } 4749 4750 wlan_minidump_log(vdev, sizeof(*vdev), soc->ctrl_psoc, 4751 WLAN_MD_DP_VDEV, "dp_vdev"); 4752 4753 vdev->pdev = pdev; 4754 vdev->vdev_id = vdev_id; 4755 vdev->vdev_stats_id = vdev_stats_id; 4756 vdev->opmode = op_mode; 4757 vdev->subtype = subtype; 4758 vdev->qdf_opmode = qdf_opmode; 4759 vdev->osdev = soc->osdev; 4760 4761 vdev->osif_rx = NULL; 4762 vdev->osif_rsim_rx_decap = NULL; 4763 vdev->osif_get_key = NULL; 4764 vdev->osif_tx_free_ext = NULL; 4765 vdev->osif_vdev = NULL; 4766 4767 vdev->delete.pending = 0; 4768 vdev->safemode = 0; 4769 vdev->drop_unenc = 1; 4770 vdev->sec_type = cdp_sec_type_none; 4771 vdev->multipass_en = false; 4772 vdev->wrap_vdev = false; 4773 dp_vdev_init_rx_eapol(vdev); 4774 qdf_atomic_init(&vdev->ref_cnt); 4775 for (i = 0; i < DP_MOD_ID_MAX; i++) 4776 qdf_atomic_init(&vdev->mod_refs[i]); 4777 4778 /* Take one reference for create*/ 4779 qdf_atomic_inc(&vdev->ref_cnt); 4780 qdf_atomic_inc(&vdev->mod_refs[DP_MOD_ID_CONFIG]); 4781 vdev->num_peers = 0; 4782 #ifdef notyet 4783 vdev->filters_num = 0; 4784 #endif 4785 vdev->lmac_id = pdev->lmac_id; 4786 4787 qdf_mem_copy(&vdev->mac_addr.raw[0], vdev_mac_addr, QDF_MAC_ADDR_SIZE); 4788 4789 dp_vdev_update_bridge_vdev_param(vdev, vdev_info); 4790 dp_vdev_save_mld_addr(vdev, vdev_info); 4791 4792 /* TODO: Initialize default HTT meta data that will be used in 4793 * TCL descriptors for packets transmitted from this VDEV 4794 */ 4795 4796 qdf_spinlock_create(&vdev->peer_list_lock); 4797 TAILQ_INIT(&vdev->peer_list); 4798 dp_peer_multipass_list_init(vdev); 4799 if ((soc->intr_mode == DP_INTR_POLL) && 4800 wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx) != 0) { 4801 if ((pdev->vdev_count == 0) || 4802 (wlan_op_mode_monitor == vdev->opmode)) 4803 qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS); 4804 } else if (dp_soc_get_con_mode(soc) == QDF_GLOBAL_MISSION_MODE && 4805 soc->intr_mode == DP_INTR_MSI && 4806 wlan_op_mode_monitor == vdev->opmode && 4807 !wlan_cfg_get_local_pkt_capture(soc->wlan_cfg_ctx)) { 4808 /* Timer to reap status ring in mission mode */ 4809 dp_monitor_vdev_timer_start(soc); 4810 } 4811 4812 dp_vdev_id_map_tbl_add(soc, vdev, vdev_id); 4813 4814 if (wlan_op_mode_monitor == vdev->opmode) { 4815 if (dp_monitor_vdev_attach(vdev) == QDF_STATUS_SUCCESS) { 4816 dp_monitor_pdev_set_mon_vdev(vdev); 4817 return dp_monitor_vdev_set_monitor_mode_buf_rings(pdev); 4818 } 4819 return QDF_STATUS_E_FAILURE; 4820 } 4821 4822 vdev->tx_encap_type = wlan_cfg_pkt_type(soc->wlan_cfg_ctx); 4823 vdev->rx_decap_type = wlan_cfg_pkt_type(soc->wlan_cfg_ctx); 4824 vdev->dscp_tid_map_id = 0; 4825 vdev->mcast_enhancement_en = 0; 4826 vdev->igmp_mcast_enhanc_en = 0; 4827 vdev->raw_mode_war = wlan_cfg_get_raw_mode_war(soc->wlan_cfg_ctx); 4828 vdev->prev_tx_enq_tstamp = 0; 4829 vdev->prev_rx_deliver_tstamp = 0; 4830 vdev->skip_sw_tid_classification = DP_TX_HW_DSCP_TID_MAP_VALID; 4831 dp_tx_vdev_traffic_end_indication_attach(vdev); 4832 4833 dp_vdev_pdev_list_add(soc, pdev, vdev); 4834 pdev->vdev_count++; 4835 4836 if (wlan_op_mode_sta != vdev->opmode && 4837 wlan_op_mode_ndi != vdev->opmode) 4838 vdev->ap_bridge_enabled = true; 4839 else 4840 vdev->ap_bridge_enabled = false; 4841 dp_init_info("%pK: wlan_cfg_ap_bridge_enabled %d", 4842 cdp_soc, vdev->ap_bridge_enabled); 4843 4844 dp_tx_vdev_attach(vdev); 4845 4846 dp_monitor_vdev_attach(vdev); 4847 if (!pdev->is_lro_hash_configured) { 4848 if (QDF_IS_STATUS_SUCCESS(dp_lro_hash_setup(soc, pdev))) 4849 pdev->is_lro_hash_configured = true; 4850 else 4851 dp_err("LRO hash setup failure!"); 4852 } 4853 4854 dp_cfg_event_record_vdev_evt(soc, DP_CFG_EVENT_VDEV_ATTACH, vdev); 4855 dp_info("Created vdev %pK ("QDF_MAC_ADDR_FMT") vdev_id %d", vdev, 4856 QDF_MAC_ADDR_REF(vdev->mac_addr.raw), vdev->vdev_id); 4857 DP_STATS_INIT(vdev); 4858 4859 if (QDF_IS_STATUS_ERROR(soc->arch_ops.txrx_vdev_attach(soc, vdev))) 4860 goto fail0; 4861 4862 if (dp_vdev_self_peer_required(soc, vdev)) 4863 dp_peer_create_wifi3((struct cdp_soc_t *)soc, vdev_id, 4864 vdev->mac_addr.raw, CDP_LINK_PEER_TYPE); 4865 4866 dp_pdev_update_fast_rx_flag(soc, pdev); 4867 4868 return QDF_STATUS_SUCCESS; 4869 4870 fail0: 4871 return QDF_STATUS_E_FAILURE; 4872 } 4873 4874 #ifndef QCA_HOST_MODE_WIFI_DISABLED 4875 /** 4876 * dp_vdev_fetch_tx_handler() - Fetch Tx handlers 4877 * @vdev: struct dp_vdev * 4878 * @soc: struct dp_soc * 4879 * @ctx: struct ol_txrx_hardtart_ctxt * 4880 */ 4881 static inline void dp_vdev_fetch_tx_handler(struct dp_vdev *vdev, 4882 struct dp_soc *soc, 4883 struct ol_txrx_hardtart_ctxt *ctx) 4884 { 4885 /* Enable vdev_id check only for ap, if flag is enabled */ 4886 if (vdev->mesh_vdev) 4887 ctx->tx = dp_tx_send_mesh; 4888 else if ((wlan_cfg_is_tx_per_pkt_vdev_id_check_enabled(soc->wlan_cfg_ctx)) && 4889 (vdev->opmode == wlan_op_mode_ap)) { 4890 ctx->tx = dp_tx_send_vdev_id_check; 4891 ctx->tx_fast = dp_tx_send_vdev_id_check; 4892 } else { 4893 ctx->tx = dp_tx_send; 4894 ctx->tx_fast = soc->arch_ops.dp_tx_send_fast; 4895 } 4896 4897 /* Avoid check in regular exception Path */ 4898 if ((wlan_cfg_is_tx_per_pkt_vdev_id_check_enabled(soc->wlan_cfg_ctx)) && 4899 (vdev->opmode == wlan_op_mode_ap)) 4900 ctx->tx_exception = dp_tx_send_exception_vdev_id_check; 4901 else 4902 ctx->tx_exception = dp_tx_send_exception; 4903 } 4904 4905 /** 4906 * dp_vdev_register_tx_handler() - Register Tx handler 4907 * @vdev: struct dp_vdev * 4908 * @soc: struct dp_soc * 4909 * @txrx_ops: struct ol_txrx_ops * 4910 */ 4911 static inline void dp_vdev_register_tx_handler(struct dp_vdev *vdev, 4912 struct dp_soc *soc, 4913 struct ol_txrx_ops *txrx_ops) 4914 { 4915 struct ol_txrx_hardtart_ctxt ctx = {0}; 4916 4917 dp_vdev_fetch_tx_handler(vdev, soc, &ctx); 4918 4919 txrx_ops->tx.tx = ctx.tx; 4920 txrx_ops->tx.tx_fast = ctx.tx_fast; 4921 txrx_ops->tx.tx_exception = ctx.tx_exception; 4922 4923 dp_info("Configure tx_vdev_id_chk_handler Feature Flag: %d and mode:%d for vdev_id:%d", 4924 wlan_cfg_is_tx_per_pkt_vdev_id_check_enabled(soc->wlan_cfg_ctx), 4925 vdev->opmode, vdev->vdev_id); 4926 } 4927 #else /* QCA_HOST_MODE_WIFI_DISABLED */ 4928 static inline void dp_vdev_register_tx_handler(struct dp_vdev *vdev, 4929 struct dp_soc *soc, 4930 struct ol_txrx_ops *txrx_ops) 4931 { 4932 } 4933 4934 static inline void dp_vdev_fetch_tx_handler(struct dp_vdev *vdev, 4935 struct dp_soc *soc, 4936 struct ol_txrx_hardtart_ctxt *ctx) 4937 { 4938 } 4939 #endif /* QCA_HOST_MODE_WIFI_DISABLED */ 4940 4941 /** 4942 * dp_vdev_register_wifi3() - Register VDEV operations from osif layer 4943 * @soc_hdl: Datapath soc handle 4944 * @vdev_id: id of Datapath VDEV handle 4945 * @osif_vdev: OSIF vdev handle 4946 * @txrx_ops: Tx and Rx operations 4947 * 4948 * Return: DP VDEV handle on success, NULL on failure 4949 */ 4950 static QDF_STATUS dp_vdev_register_wifi3(struct cdp_soc_t *soc_hdl, 4951 uint8_t vdev_id, 4952 ol_osif_vdev_handle osif_vdev, 4953 struct ol_txrx_ops *txrx_ops) 4954 { 4955 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 4956 struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id, 4957 DP_MOD_ID_CDP); 4958 4959 if (!vdev) 4960 return QDF_STATUS_E_FAILURE; 4961 4962 vdev->osif_vdev = osif_vdev; 4963 vdev->osif_rx = txrx_ops->rx.rx; 4964 vdev->osif_rx_stack = txrx_ops->rx.rx_stack; 4965 vdev->osif_rx_flush = txrx_ops->rx.rx_flush; 4966 vdev->osif_gro_flush = txrx_ops->rx.rx_gro_flush; 4967 vdev->osif_rsim_rx_decap = txrx_ops->rx.rsim_rx_decap; 4968 vdev->osif_fisa_rx = txrx_ops->rx.osif_fisa_rx; 4969 vdev->osif_fisa_flush = txrx_ops->rx.osif_fisa_flush; 4970 vdev->osif_get_key = txrx_ops->get_key; 4971 dp_monitor_vdev_register_osif(vdev, txrx_ops); 4972 vdev->osif_tx_free_ext = txrx_ops->tx.tx_free_ext; 4973 vdev->tx_comp = txrx_ops->tx.tx_comp; 4974 vdev->stats_cb = txrx_ops->rx.stats_rx; 4975 vdev->tx_classify_critical_pkt_cb = 4976 txrx_ops->tx.tx_classify_critical_pkt_cb; 4977 #ifdef notyet 4978 #if ATH_SUPPORT_WAPI 4979 vdev->osif_check_wai = txrx_ops->rx.wai_check; 4980 #endif 4981 #endif 4982 #ifdef UMAC_SUPPORT_PROXY_ARP 4983 vdev->osif_proxy_arp = txrx_ops->proxy_arp; 4984 #endif 4985 vdev->me_convert = txrx_ops->me_convert; 4986 vdev->get_tsf_time = txrx_ops->get_tsf_time; 4987 4988 dp_vdev_register_rx_eapol(vdev, txrx_ops); 4989 4990 dp_vdev_register_tx_handler(vdev, soc, txrx_ops); 4991 4992 dp_init_info("%pK: DP Vdev Register success", soc); 4993 4994 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP); 4995 return QDF_STATUS_SUCCESS; 4996 } 4997 4998 #ifdef WLAN_FEATURE_11BE_MLO 4999 void dp_peer_delete(struct dp_soc *soc, 5000 struct dp_peer *peer, 5001 void *arg) 5002 { 5003 if (!peer->valid) 5004 return; 5005 5006 dp_peer_delete_wifi3((struct cdp_soc_t *)soc, 5007 peer->vdev->vdev_id, 5008 peer->mac_addr.raw, 0, 5009 peer->peer_type); 5010 } 5011 #else 5012 void dp_peer_delete(struct dp_soc *soc, 5013 struct dp_peer *peer, 5014 void *arg) 5015 { 5016 if (!peer->valid) 5017 return; 5018 5019 dp_peer_delete_wifi3((struct cdp_soc_t *)soc, 5020 peer->vdev->vdev_id, 5021 peer->mac_addr.raw, 0, 5022 CDP_LINK_PEER_TYPE); 5023 } 5024 #endif 5025 5026 #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP) 5027 static uint8_t 5028 dp_mlo_get_num_link_peer(struct dp_soc *soc, struct dp_peer *peer) 5029 { 5030 if (soc->cdp_soc.ol_ops->peer_get_num_mlo_links) 5031 return soc->cdp_soc.ol_ops->peer_get_num_mlo_links( 5032 soc->ctrl_psoc, 5033 peer->vdev->vdev_id, 5034 peer->mac_addr.raw, 5035 IS_MLO_DP_MLD_PEER(peer)); 5036 5037 return 0; 5038 } 5039 5040 void dp_mlo_peer_delete(struct dp_soc *soc, struct dp_peer *peer, void *arg) 5041 { 5042 if (!peer->valid) 5043 return; 5044 5045 /* skip deleting the SLO peers */ 5046 if (dp_mlo_get_num_link_peer(soc, peer) == 1) 5047 return; 5048 5049 if (IS_MLO_DP_LINK_PEER(peer)) 5050 dp_peer_delete_wifi3((struct cdp_soc_t *)soc, 5051 peer->vdev->vdev_id, 5052 peer->mac_addr.raw, 0, 5053 CDP_LINK_PEER_TYPE); 5054 } 5055 5056 /** 5057 * dp_mlo_link_peer_flush() - flush all the link peers 5058 * @soc: Datapath soc handle 5059 * @peer: DP peer handle to be checked 5060 * 5061 * Return: None 5062 */ 5063 static void dp_mlo_link_peer_flush(struct dp_soc *soc, struct dp_peer *peer) 5064 { 5065 int cnt = 0; 5066 struct dp_peer *link_peer = NULL; 5067 struct dp_mld_link_peers link_peers_info = {NULL}; 5068 5069 if (!IS_MLO_DP_MLD_PEER(peer)) 5070 return; 5071 5072 /* get link peers with reference */ 5073 dp_get_link_peers_ref_from_mld_peer(soc, peer, &link_peers_info, 5074 DP_MOD_ID_CDP); 5075 for (cnt = 0; cnt < link_peers_info.num_links; cnt++) { 5076 link_peer = link_peers_info.link_peers[cnt]; 5077 if (!link_peer) 5078 continue; 5079 5080 /* delete all the link peers */ 5081 dp_mlo_peer_delete(link_peer->vdev->pdev->soc, link_peer, NULL); 5082 /* unmap all the link peers */ 5083 dp_rx_peer_unmap_handler(link_peer->vdev->pdev->soc, 5084 link_peer->peer_id, 5085 link_peer->vdev->vdev_id, 5086 link_peer->mac_addr.raw, 0, 5087 DP_PEER_WDS_COUNT_INVALID); 5088 } 5089 dp_release_link_peers_ref(&link_peers_info, DP_MOD_ID_CDP); 5090 } 5091 #else 5092 static uint8_t 5093 dp_mlo_get_num_link_peer(struct dp_soc *soc, struct dp_peer *peer) 5094 { 5095 return 0; 5096 } 5097 5098 void dp_mlo_peer_delete(struct dp_soc *soc, struct dp_peer *peer, void *arg) 5099 { 5100 } 5101 5102 static void dp_mlo_link_peer_flush(struct dp_soc *soc, struct dp_peer *peer) 5103 { 5104 } 5105 #endif 5106 /** 5107 * dp_vdev_flush_peers() - Forcibily Flush peers of vdev 5108 * @vdev_handle: Datapath VDEV handle 5109 * @unmap_only: Flag to indicate "only unmap" 5110 * @mlo_peers_only: true if only MLO peers should be flushed 5111 * 5112 * Return: void 5113 */ 5114 static void dp_vdev_flush_peers(struct cdp_vdev *vdev_handle, 5115 bool unmap_only, 5116 bool mlo_peers_only) 5117 { 5118 struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle; 5119 struct dp_pdev *pdev = vdev->pdev; 5120 struct dp_soc *soc = pdev->soc; 5121 struct dp_peer *peer; 5122 uint32_t i = 0; 5123 5124 5125 if (!unmap_only) { 5126 if (!mlo_peers_only) 5127 dp_vdev_iterate_peer_lock_safe(vdev, 5128 dp_peer_delete, 5129 NULL, 5130 DP_MOD_ID_CDP); 5131 else 5132 dp_vdev_iterate_peer_lock_safe(vdev, 5133 dp_mlo_peer_delete, 5134 NULL, 5135 DP_MOD_ID_CDP); 5136 } 5137 5138 for (i = 0; i < soc->max_peer_id ; i++) { 5139 peer = __dp_peer_get_ref_by_id(soc, i, DP_MOD_ID_CDP); 5140 5141 if (!peer) 5142 continue; 5143 5144 if (peer->vdev != vdev) { 5145 dp_peer_unref_delete(peer, DP_MOD_ID_CDP); 5146 continue; 5147 } 5148 5149 if (!mlo_peers_only) { 5150 dp_info("peer: " QDF_MAC_ADDR_FMT " is getting unmap", 5151 QDF_MAC_ADDR_REF(peer->mac_addr.raw)); 5152 dp_mlo_link_peer_flush(soc, peer); 5153 dp_rx_peer_unmap_handler(soc, i, 5154 vdev->vdev_id, 5155 peer->mac_addr.raw, 0, 5156 DP_PEER_WDS_COUNT_INVALID); 5157 if (!IS_MLO_DP_MLD_PEER(peer)) 5158 SET_PEER_REF_CNT_ONE(peer); 5159 } else if (IS_MLO_DP_LINK_PEER(peer) || 5160 IS_MLO_DP_MLD_PEER(peer)) { 5161 dp_info("peer: " QDF_MAC_ADDR_FMT " is getting unmap", 5162 QDF_MAC_ADDR_REF(peer->mac_addr.raw)); 5163 5164 /* skip deleting the SLO peers */ 5165 if (dp_mlo_get_num_link_peer(soc, peer) == 1) { 5166 dp_peer_unref_delete(peer, DP_MOD_ID_CDP); 5167 continue; 5168 } 5169 5170 dp_mlo_link_peer_flush(soc, peer); 5171 dp_rx_peer_unmap_handler(soc, i, 5172 vdev->vdev_id, 5173 peer->mac_addr.raw, 0, 5174 DP_PEER_WDS_COUNT_INVALID); 5175 if (!IS_MLO_DP_MLD_PEER(peer)) 5176 SET_PEER_REF_CNT_ONE(peer); 5177 } 5178 5179 dp_peer_unref_delete(peer, DP_MOD_ID_CDP); 5180 } 5181 } 5182 5183 #ifdef QCA_VDEV_STATS_HW_OFFLOAD_SUPPORT 5184 /** 5185 * dp_txrx_alloc_vdev_stats_id()- Allocate vdev_stats_id 5186 * @soc_hdl: Datapath soc handle 5187 * @vdev_stats_id: Address of vdev_stats_id 5188 * 5189 * Return: QDF_STATUS 5190 */ 5191 static QDF_STATUS dp_txrx_alloc_vdev_stats_id(struct cdp_soc_t *soc_hdl, 5192 uint8_t *vdev_stats_id) 5193 { 5194 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 5195 uint8_t id = 0; 5196 5197 if (!wlan_cfg_get_vdev_stats_hw_offload_config(soc->wlan_cfg_ctx)) { 5198 *vdev_stats_id = CDP_INVALID_VDEV_STATS_ID; 5199 return QDF_STATUS_E_FAILURE; 5200 } 5201 5202 while (id < CDP_MAX_VDEV_STATS_ID) { 5203 if (!qdf_atomic_test_and_set_bit(id, &soc->vdev_stats_id_map)) { 5204 *vdev_stats_id = id; 5205 return QDF_STATUS_SUCCESS; 5206 } 5207 id++; 5208 } 5209 5210 *vdev_stats_id = CDP_INVALID_VDEV_STATS_ID; 5211 return QDF_STATUS_E_FAILURE; 5212 } 5213 5214 /** 5215 * dp_txrx_reset_vdev_stats_id() - Reset vdev_stats_id in dp_soc 5216 * @soc_hdl: Datapath soc handle 5217 * @vdev_stats_id: vdev_stats_id to reset in dp_soc 5218 * 5219 * Return: none 5220 */ 5221 static void dp_txrx_reset_vdev_stats_id(struct cdp_soc_t *soc_hdl, 5222 uint8_t vdev_stats_id) 5223 { 5224 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 5225 5226 if ((!wlan_cfg_get_vdev_stats_hw_offload_config(soc->wlan_cfg_ctx)) || 5227 (vdev_stats_id >= CDP_MAX_VDEV_STATS_ID)) 5228 return; 5229 5230 qdf_atomic_clear_bit(vdev_stats_id, &soc->vdev_stats_id_map); 5231 } 5232 #else 5233 static void dp_txrx_reset_vdev_stats_id(struct cdp_soc_t *soc, 5234 uint8_t vdev_stats_id) 5235 {} 5236 #endif 5237 /** 5238 * dp_vdev_detach_wifi3() - Detach txrx vdev 5239 * @cdp_soc: Datapath soc handle 5240 * @vdev_id: VDEV Id 5241 * @callback: Callback OL_IF on completion of detach 5242 * @cb_context: Callback context 5243 * 5244 */ 5245 static QDF_STATUS dp_vdev_detach_wifi3(struct cdp_soc_t *cdp_soc, 5246 uint8_t vdev_id, 5247 ol_txrx_vdev_delete_cb callback, 5248 void *cb_context) 5249 { 5250 struct dp_soc *soc = (struct dp_soc *)cdp_soc; 5251 struct dp_pdev *pdev; 5252 struct dp_neighbour_peer *peer = NULL; 5253 struct dp_peer *vap_self_peer = NULL; 5254 struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id, 5255 DP_MOD_ID_CDP); 5256 5257 if (!vdev) 5258 return QDF_STATUS_E_FAILURE; 5259 5260 soc->arch_ops.txrx_vdev_detach(soc, vdev); 5261 5262 pdev = vdev->pdev; 5263 5264 vap_self_peer = dp_sta_vdev_self_peer_ref_n_get(soc, vdev, 5265 DP_MOD_ID_CONFIG); 5266 if (vap_self_peer) { 5267 qdf_spin_lock_bh(&soc->ast_lock); 5268 if (vap_self_peer->self_ast_entry) { 5269 dp_peer_del_ast(soc, vap_self_peer->self_ast_entry); 5270 vap_self_peer->self_ast_entry = NULL; 5271 } 5272 qdf_spin_unlock_bh(&soc->ast_lock); 5273 5274 dp_peer_delete_wifi3((struct cdp_soc_t *)soc, vdev->vdev_id, 5275 vap_self_peer->mac_addr.raw, 0, 5276 CDP_LINK_PEER_TYPE); 5277 dp_peer_unref_delete(vap_self_peer, DP_MOD_ID_CONFIG); 5278 } 5279 5280 /* 5281 * If Target is hung, flush all peers before detaching vdev 5282 * this will free all references held due to missing 5283 * unmap commands from Target 5284 */ 5285 if (!hif_is_target_ready(HIF_GET_SOFTC(soc->hif_handle))) 5286 dp_vdev_flush_peers((struct cdp_vdev *)vdev, false, false); 5287 else if (hif_get_target_status(soc->hif_handle) == TARGET_STATUS_RESET) 5288 dp_vdev_flush_peers((struct cdp_vdev *)vdev, true, false); 5289 5290 /* indicate that the vdev needs to be deleted */ 5291 vdev->delete.pending = 1; 5292 dp_rx_vdev_detach(vdev); 5293 /* 5294 * move it after dp_rx_vdev_detach(), 5295 * as the call back done in dp_rx_vdev_detach() 5296 * still need to get vdev pointer by vdev_id. 5297 */ 5298 dp_vdev_id_map_tbl_remove(soc, vdev); 5299 5300 dp_monitor_neighbour_peer_list_remove(pdev, vdev, peer); 5301 5302 dp_txrx_reset_vdev_stats_id(cdp_soc, vdev->vdev_stats_id); 5303 5304 dp_tx_vdev_multipass_deinit(vdev); 5305 dp_tx_vdev_traffic_end_indication_detach(vdev); 5306 5307 if (vdev->vdev_dp_ext_handle) { 5308 qdf_mem_free(vdev->vdev_dp_ext_handle); 5309 vdev->vdev_dp_ext_handle = NULL; 5310 } 5311 vdev->delete.callback = callback; 5312 vdev->delete.context = cb_context; 5313 5314 if (vdev->opmode != wlan_op_mode_monitor) 5315 dp_vdev_pdev_list_remove(soc, pdev, vdev); 5316 5317 pdev->vdev_count--; 5318 /* release reference taken above for find */ 5319 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP); 5320 5321 qdf_spin_lock_bh(&soc->inactive_vdev_list_lock); 5322 TAILQ_INSERT_TAIL(&soc->inactive_vdev_list, vdev, inactive_list_elem); 5323 qdf_spin_unlock_bh(&soc->inactive_vdev_list_lock); 5324 5325 dp_cfg_event_record_vdev_evt(soc, DP_CFG_EVENT_VDEV_DETACH, vdev); 5326 dp_info("detach vdev %pK id %d pending refs %d", 5327 vdev, vdev->vdev_id, qdf_atomic_read(&vdev->ref_cnt)); 5328 5329 /* release reference taken at dp_vdev_create */ 5330 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CONFIG); 5331 5332 return QDF_STATUS_SUCCESS; 5333 } 5334 5335 #ifdef WLAN_FEATURE_11BE_MLO 5336 /** 5337 * is_dp_peer_can_reuse() - check if the dp_peer match condition to be reused 5338 * @vdev: Target DP vdev handle 5339 * @peer: DP peer handle to be checked 5340 * @peer_mac_addr: Target peer mac address 5341 * @peer_type: Target peer type 5342 * 5343 * Return: true - if match, false - not match 5344 */ 5345 static inline 5346 bool is_dp_peer_can_reuse(struct dp_vdev *vdev, 5347 struct dp_peer *peer, 5348 uint8_t *peer_mac_addr, 5349 enum cdp_peer_type peer_type) 5350 { 5351 if (peer->bss_peer && (peer->vdev == vdev) && 5352 (peer->peer_type == peer_type) && 5353 (qdf_mem_cmp(peer_mac_addr, peer->mac_addr.raw, 5354 QDF_MAC_ADDR_SIZE) == 0)) 5355 return true; 5356 5357 return false; 5358 } 5359 #else 5360 static inline 5361 bool is_dp_peer_can_reuse(struct dp_vdev *vdev, 5362 struct dp_peer *peer, 5363 uint8_t *peer_mac_addr, 5364 enum cdp_peer_type peer_type) 5365 { 5366 if (peer->bss_peer && (peer->vdev == vdev) && 5367 (qdf_mem_cmp(peer_mac_addr, peer->mac_addr.raw, 5368 QDF_MAC_ADDR_SIZE) == 0)) 5369 return true; 5370 5371 return false; 5372 } 5373 #endif 5374 5375 static inline struct dp_peer *dp_peer_can_reuse(struct dp_vdev *vdev, 5376 uint8_t *peer_mac_addr, 5377 enum cdp_peer_type peer_type) 5378 { 5379 struct dp_peer *peer; 5380 struct dp_soc *soc = vdev->pdev->soc; 5381 5382 qdf_spin_lock_bh(&soc->inactive_peer_list_lock); 5383 TAILQ_FOREACH(peer, &soc->inactive_peer_list, 5384 inactive_list_elem) { 5385 5386 /* reuse bss peer only when vdev matches*/ 5387 if (is_dp_peer_can_reuse(vdev, peer, 5388 peer_mac_addr, peer_type)) { 5389 /* increment ref count for cdp_peer_create*/ 5390 if (dp_peer_get_ref(soc, peer, DP_MOD_ID_CONFIG) == 5391 QDF_STATUS_SUCCESS) { 5392 TAILQ_REMOVE(&soc->inactive_peer_list, peer, 5393 inactive_list_elem); 5394 qdf_spin_unlock_bh 5395 (&soc->inactive_peer_list_lock); 5396 return peer; 5397 } 5398 } 5399 } 5400 5401 qdf_spin_unlock_bh(&soc->inactive_peer_list_lock); 5402 return NULL; 5403 } 5404 5405 #ifdef FEATURE_AST 5406 static inline void dp_peer_ast_handle_roam_del(struct dp_soc *soc, 5407 struct dp_pdev *pdev, 5408 uint8_t *peer_mac_addr) 5409 { 5410 struct dp_ast_entry *ast_entry; 5411 5412 if (soc->ast_offload_support) 5413 return; 5414 5415 qdf_spin_lock_bh(&soc->ast_lock); 5416 if (soc->ast_override_support) 5417 ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, peer_mac_addr, 5418 pdev->pdev_id); 5419 else 5420 ast_entry = dp_peer_ast_hash_find_soc(soc, peer_mac_addr); 5421 5422 if (ast_entry && ast_entry->next_hop && !ast_entry->delete_in_progress) 5423 dp_peer_del_ast(soc, ast_entry); 5424 5425 qdf_spin_unlock_bh(&soc->ast_lock); 5426 } 5427 #else 5428 static inline void dp_peer_ast_handle_roam_del(struct dp_soc *soc, 5429 struct dp_pdev *pdev, 5430 uint8_t *peer_mac_addr) 5431 { 5432 } 5433 #endif 5434 5435 #ifdef QCA_VDEV_STATS_HW_OFFLOAD_SUPPORT 5436 /** 5437 * dp_peer_hw_txrx_stats_init() - Initialize hw_txrx_stats_en in dp_peer 5438 * @soc: Datapath soc handle 5439 * @txrx_peer: Datapath peer handle 5440 * 5441 * Return: none 5442 */ 5443 static inline 5444 void dp_peer_hw_txrx_stats_init(struct dp_soc *soc, 5445 struct dp_txrx_peer *txrx_peer) 5446 { 5447 txrx_peer->hw_txrx_stats_en = 5448 wlan_cfg_get_vdev_stats_hw_offload_config(soc->wlan_cfg_ctx); 5449 } 5450 #else 5451 static inline 5452 void dp_peer_hw_txrx_stats_init(struct dp_soc *soc, 5453 struct dp_txrx_peer *txrx_peer) 5454 { 5455 txrx_peer->hw_txrx_stats_en = 0; 5456 } 5457 #endif 5458 5459 static QDF_STATUS dp_txrx_peer_detach(struct dp_soc *soc, struct dp_peer *peer) 5460 { 5461 struct dp_txrx_peer *txrx_peer; 5462 struct dp_pdev *pdev; 5463 struct cdp_txrx_peer_params_update params = {0}; 5464 5465 /* dp_txrx_peer exists for mld peer and legacy peer */ 5466 if (peer->txrx_peer) { 5467 txrx_peer = peer->txrx_peer; 5468 peer->txrx_peer = NULL; 5469 pdev = txrx_peer->vdev->pdev; 5470 5471 if ((peer->vdev->opmode != wlan_op_mode_sta) && 5472 !peer->bss_peer) { 5473 params.vdev_id = peer->vdev->vdev_id; 5474 params.peer_mac = peer->mac_addr.raw; 5475 5476 dp_wdi_event_handler(WDI_EVENT_PEER_DELETE, soc, 5477 (void *)¶ms, peer->peer_id, 5478 WDI_NO_VAL, pdev->pdev_id); 5479 } 5480 5481 dp_peer_defrag_rx_tids_deinit(txrx_peer); 5482 /* 5483 * Deallocate the extended stats contenxt 5484 */ 5485 dp_peer_delay_stats_ctx_dealloc(soc, txrx_peer); 5486 dp_peer_rx_bufq_resources_deinit(txrx_peer); 5487 dp_peer_jitter_stats_ctx_dealloc(pdev, txrx_peer); 5488 dp_peer_sawf_stats_ctx_free(soc, txrx_peer); 5489 5490 qdf_mem_free(txrx_peer); 5491 } 5492 5493 return QDF_STATUS_SUCCESS; 5494 } 5495 5496 static inline 5497 uint8_t dp_txrx_peer_calculate_stats_size(struct dp_soc *soc, 5498 struct dp_peer *peer) 5499 { 5500 if ((wlan_cfg_is_peer_link_stats_enabled(soc->wlan_cfg_ctx)) && 5501 IS_MLO_DP_MLD_PEER(peer)) { 5502 return (DP_MAX_MLO_LINKS + 1); 5503 } 5504 return 1; 5505 } 5506 5507 static QDF_STATUS dp_txrx_peer_attach(struct dp_soc *soc, struct dp_peer *peer) 5508 { 5509 struct dp_txrx_peer *txrx_peer; 5510 struct dp_pdev *pdev; 5511 struct cdp_txrx_peer_params_update params = {0}; 5512 uint8_t stats_arr_size = 0; 5513 5514 stats_arr_size = dp_txrx_peer_calculate_stats_size(soc, peer); 5515 5516 txrx_peer = (struct dp_txrx_peer *)qdf_mem_malloc(sizeof(*txrx_peer) + 5517 (stats_arr_size * 5518 sizeof(struct dp_peer_stats))); 5519 5520 if (!txrx_peer) 5521 return QDF_STATUS_E_NOMEM; /* failure */ 5522 5523 txrx_peer->peer_id = HTT_INVALID_PEER; 5524 /* initialize the peer_id */ 5525 txrx_peer->vdev = peer->vdev; 5526 pdev = peer->vdev->pdev; 5527 txrx_peer->stats_arr_size = stats_arr_size; 5528 5529 DP_TXRX_PEER_STATS_INIT(txrx_peer, 5530 (txrx_peer->stats_arr_size * 5531 sizeof(struct dp_peer_stats))); 5532 5533 if (!IS_DP_LEGACY_PEER(peer)) 5534 txrx_peer->is_mld_peer = 1; 5535 5536 dp_wds_ext_peer_init(txrx_peer); 5537 dp_peer_rx_bufq_resources_init(txrx_peer); 5538 dp_peer_hw_txrx_stats_init(soc, txrx_peer); 5539 /* 5540 * Allocate peer extended stats context. Fall through in 5541 * case of failure as its not an implicit requirement to have 5542 * this object for regular statistics updates. 5543 */ 5544 if (dp_peer_delay_stats_ctx_alloc(soc, txrx_peer) != 5545 QDF_STATUS_SUCCESS) 5546 dp_warn("peer delay_stats ctx alloc failed"); 5547 5548 /* 5549 * Alloctate memory for jitter stats. Fall through in 5550 * case of failure as its not an implicit requirement to have 5551 * this object for regular statistics updates. 5552 */ 5553 if (dp_peer_jitter_stats_ctx_alloc(pdev, txrx_peer) != 5554 QDF_STATUS_SUCCESS) 5555 dp_warn("peer jitter_stats ctx alloc failed"); 5556 5557 dp_set_peer_isolation(txrx_peer, false); 5558 5559 dp_peer_defrag_rx_tids_init(txrx_peer); 5560 5561 if (dp_peer_sawf_stats_ctx_alloc(soc, txrx_peer) != QDF_STATUS_SUCCESS) 5562 dp_warn("peer sawf stats alloc failed"); 5563 5564 dp_txrx_peer_attach_add(soc, peer, txrx_peer); 5565 5566 if ((peer->vdev->opmode == wlan_op_mode_sta) || peer->bss_peer) 5567 return QDF_STATUS_SUCCESS; 5568 5569 params.peer_mac = peer->mac_addr.raw; 5570 params.vdev_id = peer->vdev->vdev_id; 5571 params.chip_id = dp_get_chip_id(soc); 5572 params.pdev_id = peer->vdev->pdev->pdev_id; 5573 5574 dp_wdi_event_handler(WDI_EVENT_TXRX_PEER_CREATE, soc, 5575 (void *)¶ms, peer->peer_id, 5576 WDI_NO_VAL, params.pdev_id); 5577 5578 return QDF_STATUS_SUCCESS; 5579 } 5580 5581 static inline 5582 void dp_txrx_peer_stats_clr(struct dp_txrx_peer *txrx_peer) 5583 { 5584 if (!txrx_peer) 5585 return; 5586 5587 txrx_peer->tx_failed = 0; 5588 txrx_peer->comp_pkt.num = 0; 5589 txrx_peer->comp_pkt.bytes = 0; 5590 txrx_peer->to_stack.num = 0; 5591 txrx_peer->to_stack.bytes = 0; 5592 5593 DP_TXRX_PEER_STATS_CLR(txrx_peer, 5594 (txrx_peer->stats_arr_size * 5595 sizeof(struct dp_peer_stats))); 5596 dp_peer_delay_stats_ctx_clr(txrx_peer); 5597 dp_peer_jitter_stats_ctx_clr(txrx_peer); 5598 } 5599 5600 #if defined WLAN_FEATURE_11BE_MLO && defined DP_MLO_LINK_STATS_SUPPORT 5601 /** 5602 * dp_txrx_peer_reset_local_link_id() - Reset local link id 5603 * @txrx_peer: txrx peer handle 5604 * 5605 * Return: None 5606 */ 5607 static inline void 5608 dp_txrx_peer_reset_local_link_id(struct dp_txrx_peer *txrx_peer) 5609 { 5610 int i; 5611 5612 for (i = 0; i <= DP_MAX_MLO_LINKS; i++) 5613 txrx_peer->ll_band[i] = DP_BAND_INVALID; 5614 } 5615 #else 5616 static inline void 5617 dp_txrx_peer_reset_local_link_id(struct dp_txrx_peer *txrx_peer) 5618 { 5619 } 5620 #endif 5621 5622 /** 5623 * dp_peer_create_wifi3() - attach txrx peer 5624 * @soc_hdl: Datapath soc handle 5625 * @vdev_id: id of vdev 5626 * @peer_mac_addr: Peer MAC address 5627 * @peer_type: link or MLD peer type 5628 * 5629 * Return: 0 on success, -1 on failure 5630 */ 5631 static QDF_STATUS 5632 dp_peer_create_wifi3(struct cdp_soc_t *soc_hdl, uint8_t vdev_id, 5633 uint8_t *peer_mac_addr, enum cdp_peer_type peer_type) 5634 { 5635 struct dp_peer *peer; 5636 int i; 5637 struct dp_soc *soc = (struct dp_soc *)soc_hdl; 5638 struct dp_pdev *pdev; 5639 enum cdp_txrx_ast_entry_type ast_type = CDP_TXRX_AST_TYPE_STATIC; 5640 struct dp_vdev *vdev = NULL; 5641 5642 if (!peer_mac_addr) 5643 return QDF_STATUS_E_FAILURE; 5644 5645 vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP); 5646 5647 if (!vdev) 5648 return QDF_STATUS_E_FAILURE; 5649 5650 pdev = vdev->pdev; 5651 soc = pdev->soc; 5652 5653 /* 5654 * If a peer entry with given MAC address already exists, 5655 * reuse the peer and reset the state of peer. 5656 */ 5657 peer = dp_peer_can_reuse(vdev, peer_mac_addr, peer_type); 5658 5659 if (peer) { 5660 qdf_atomic_init(&peer->is_default_route_set); 5661 dp_peer_cleanup(vdev, peer); 5662 5663 dp_peer_vdev_list_add(soc, vdev, peer); 5664 dp_peer_find_hash_add(soc, peer); 5665 5666 if (dp_peer_rx_tids_create(peer) != QDF_STATUS_SUCCESS) { 5667 dp_alert("RX tid alloc fail for peer %pK (" QDF_MAC_ADDR_FMT ")", 5668 peer, QDF_MAC_ADDR_REF(peer->mac_addr.raw)); 5669 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP); 5670 return QDF_STATUS_E_FAILURE; 5671 } 5672 5673 if (IS_MLO_DP_MLD_PEER(peer)) 5674 dp_mld_peer_init_link_peers_info(peer); 5675 5676 qdf_spin_lock_bh(&soc->ast_lock); 5677 dp_peer_delete_ast_entries(soc, peer); 5678 qdf_spin_unlock_bh(&soc->ast_lock); 5679 5680 if ((vdev->opmode == wlan_op_mode_sta) && 5681 !qdf_mem_cmp(peer_mac_addr, &vdev->mac_addr.raw[0], 5682 QDF_MAC_ADDR_SIZE)) { 5683 ast_type = CDP_TXRX_AST_TYPE_SELF; 5684 } 5685 dp_peer_add_ast(soc, peer, peer_mac_addr, ast_type, 0); 5686 5687 peer->valid = 1; 5688 peer->is_tdls_peer = false; 5689 dp_local_peer_id_alloc(pdev, peer); 5690 5691 qdf_spinlock_create(&peer->peer_info_lock); 5692 5693 DP_STATS_INIT(peer); 5694 5695 /* 5696 * In tx_monitor mode, filter may be set for unassociated peer 5697 * when unassociated peer get associated peer need to 5698 * update tx_cap_enabled flag to support peer filter. 5699 */ 5700 if (!IS_MLO_DP_MLD_PEER(peer)) { 5701 dp_monitor_peer_tx_capture_filter_check(pdev, peer); 5702 dp_monitor_peer_reset_stats(soc, peer); 5703 } 5704 5705 if (peer->txrx_peer) { 5706 dp_peer_rx_bufq_resources_init(peer->txrx_peer); 5707 dp_txrx_peer_stats_clr(peer->txrx_peer); 5708 dp_set_peer_isolation(peer->txrx_peer, false); 5709 dp_wds_ext_peer_init(peer->txrx_peer); 5710 dp_peer_hw_txrx_stats_init(soc, peer->txrx_peer); 5711 dp_txrx_peer_reset_local_link_id(peer->txrx_peer); 5712 } 5713 5714 dp_cfg_event_record_peer_evt(soc, DP_CFG_EVENT_PEER_CREATE, 5715 peer, vdev, 1); 5716 dp_info("vdev %pK Reused peer %pK ("QDF_MAC_ADDR_FMT 5717 ") vdev_ref_cnt " 5718 "%d peer_ref_cnt: %d", 5719 vdev, peer, QDF_MAC_ADDR_REF(peer->mac_addr.raw), 5720 qdf_atomic_read(&vdev->ref_cnt), 5721 qdf_atomic_read(&peer->ref_cnt)); 5722 dp_peer_update_state(soc, peer, DP_PEER_STATE_INIT); 5723 5724 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP); 5725 return QDF_STATUS_SUCCESS; 5726 } else { 5727 /* 5728 * When a STA roams from RPTR AP to ROOT AP and vice versa, we 5729 * need to remove the AST entry which was earlier added as a WDS 5730 * entry. 5731 * If an AST entry exists, but no peer entry exists with a given 5732 * MAC addresses, we could deduce it as a WDS entry 5733 */ 5734 dp_peer_ast_handle_roam_del(soc, pdev, peer_mac_addr); 5735 } 5736 5737 #ifdef notyet 5738 peer = (struct dp_peer *)qdf_mempool_alloc(soc->osdev, 5739 soc->mempool_ol_ath_peer); 5740 #else 5741 peer = (struct dp_peer *)qdf_mem_malloc(sizeof(*peer)); 5742 #endif 5743 wlan_minidump_log(peer, 5744 sizeof(*peer), 5745 soc->ctrl_psoc, 5746 WLAN_MD_DP_PEER, "dp_peer"); 5747 if (!peer) { 5748 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP); 5749 return QDF_STATUS_E_FAILURE; /* failure */ 5750 } 5751 5752 qdf_mem_zero(peer, sizeof(struct dp_peer)); 5753 5754 /* store provided params */ 5755 peer->vdev = vdev; 5756 5757 /* initialize the peer_id */ 5758 peer->peer_id = HTT_INVALID_PEER; 5759 5760 qdf_mem_copy( 5761 &peer->mac_addr.raw[0], peer_mac_addr, QDF_MAC_ADDR_SIZE); 5762 5763 DP_PEER_SET_TYPE(peer, peer_type); 5764 if (IS_MLO_DP_MLD_PEER(peer)) { 5765 if (dp_txrx_peer_attach(soc, peer) != 5766 QDF_STATUS_SUCCESS) 5767 goto fail; /* failure */ 5768 5769 dp_mld_peer_init_link_peers_info(peer); 5770 } 5771 5772 if (dp_monitor_peer_attach(soc, peer) != QDF_STATUS_SUCCESS) 5773 dp_warn("peer monitor ctx alloc failed"); 5774 5775 TAILQ_INIT(&peer->ast_entry_list); 5776 5777 /* get the vdev reference for new peer */ 5778 dp_vdev_get_ref(soc, vdev, DP_MOD_ID_CHILD); 5779 5780 if ((vdev->opmode == wlan_op_mode_sta) && 5781 !qdf_mem_cmp(peer_mac_addr, &vdev->mac_addr.raw[0], 5782 QDF_MAC_ADDR_SIZE)) { 5783 ast_type = CDP_TXRX_AST_TYPE_SELF; 5784 } 5785 qdf_spinlock_create(&peer->peer_state_lock); 5786 dp_peer_add_ast(soc, peer, peer_mac_addr, ast_type, 0); 5787 qdf_spinlock_create(&peer->peer_info_lock); 5788 5789 /* reset the ast index to flowid table */ 5790 dp_peer_reset_flowq_map(peer); 5791 5792 qdf_atomic_init(&peer->ref_cnt); 5793 5794 for (i = 0; i < DP_MOD_ID_MAX; i++) 5795 qdf_atomic_init(&peer->mod_refs[i]); 5796 5797 /* keep one reference for attach */ 5798 qdf_atomic_inc(&peer->ref_cnt); 5799 qdf_atomic_inc(&peer->mod_refs[DP_MOD_ID_CONFIG]); 5800 5801 dp_peer_vdev_list_add(soc, vdev, peer); 5802 5803 /* TODO: See if hash based search is required */ 5804 dp_peer_find_hash_add(soc, peer); 5805 5806 /* Initialize the peer state */ 5807 peer->state = OL_TXRX_PEER_STATE_DISC; 5808 5809 dp_cfg_event_record_peer_evt(soc, DP_CFG_EVENT_PEER_CREATE, 5810 peer, vdev, 0); 5811 dp_info("vdev %pK created peer %pK ("QDF_MAC_ADDR_FMT") vdev_ref_cnt " 5812 "%d peer_ref_cnt: %d", 5813 vdev, peer, QDF_MAC_ADDR_REF(peer->mac_addr.raw), 5814 qdf_atomic_read(&vdev->ref_cnt), 5815 qdf_atomic_read(&peer->ref_cnt)); 5816 /* 5817 * For every peer MAp message search and set if bss_peer 5818 */ 5819 if (qdf_mem_cmp(peer->mac_addr.raw, vdev->mac_addr.raw, 5820 QDF_MAC_ADDR_SIZE) == 0 && 5821 (wlan_op_mode_sta != vdev->opmode)) { 5822 dp_info("vdev bss_peer!!"); 5823 peer->bss_peer = 1; 5824 if (peer->txrx_peer) 5825 peer->txrx_peer->bss_peer = 1; 5826 } 5827 5828 if (wlan_op_mode_sta == vdev->opmode && 5829 qdf_mem_cmp(peer->mac_addr.raw, vdev->mac_addr.raw, 5830 QDF_MAC_ADDR_SIZE) == 0) { 5831 peer->sta_self_peer = 1; 5832 } 5833 5834 if (dp_peer_rx_tids_create(peer) != QDF_STATUS_SUCCESS) { 5835 dp_alert("RX tid alloc fail for peer %pK (" QDF_MAC_ADDR_FMT ")", 5836 peer, QDF_MAC_ADDR_REF(peer->mac_addr.raw)); 5837 goto fail; 5838 } 5839 5840 peer->valid = 1; 5841 dp_local_peer_id_alloc(pdev, peer); 5842 DP_STATS_INIT(peer); 5843 5844 if (dp_peer_sawf_ctx_alloc(soc, peer) != QDF_STATUS_SUCCESS) 5845 dp_warn("peer sawf context alloc failed"); 5846 5847 dp_peer_update_state(soc, peer, DP_PEER_STATE_INIT); 5848 5849 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP); 5850 5851 return QDF_STATUS_SUCCESS; 5852 fail: 5853 qdf_mem_free(peer); 5854 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP); 5855 5856 return QDF_STATUS_E_FAILURE; 5857 } 5858 5859 QDF_STATUS dp_peer_legacy_setup(struct dp_soc *soc, struct dp_peer *peer) 5860 { 5861 /* txrx_peer might exist already in peer reuse case */ 5862 if (peer->txrx_peer) 5863 return QDF_STATUS_SUCCESS; 5864 5865 if (dp_txrx_peer_attach(soc, peer) != 5866 QDF_STATUS_SUCCESS) { 5867 dp_err("peer txrx ctx alloc failed"); 5868 return QDF_STATUS_E_FAILURE; 5869 } 5870 5871 return QDF_STATUS_SUCCESS; 5872 } 5873 5874 #ifdef WLAN_FEATURE_11BE_MLO 5875 static QDF_STATUS dp_mld_peer_change_vdev(struct dp_soc *soc, 5876 struct dp_peer *mld_peer, 5877 uint8_t new_vdev_id) 5878 { 5879 struct dp_vdev *prev_vdev; 5880 5881 prev_vdev = mld_peer->vdev; 5882 /* release the ref to original dp_vdev */ 5883 dp_vdev_unref_delete(soc, mld_peer->vdev, 5884 DP_MOD_ID_CHILD); 5885 /* 5886 * get the ref to new dp_vdev, 5887 * increase dp_vdev ref_cnt 5888 */ 5889 mld_peer->vdev = dp_vdev_get_ref_by_id(soc, new_vdev_id, 5890 DP_MOD_ID_CHILD); 5891 mld_peer->txrx_peer->vdev = mld_peer->vdev; 5892 5893 dp_info("Change vdev for ML peer " QDF_MAC_ADDR_FMT 5894 " old vdev %pK id %d new vdev %pK id %d", 5895 QDF_MAC_ADDR_REF(mld_peer->mac_addr.raw), 5896 prev_vdev, prev_vdev->vdev_id, mld_peer->vdev, new_vdev_id); 5897 5898 dp_cfg_event_record_mlo_setup_vdev_update_evt( 5899 soc, mld_peer, prev_vdev, 5900 mld_peer->vdev); 5901 5902 return QDF_STATUS_SUCCESS; 5903 } 5904 5905 QDF_STATUS dp_peer_mlo_setup( 5906 struct dp_soc *soc, 5907 struct dp_peer *peer, 5908 uint8_t vdev_id, 5909 struct cdp_peer_setup_info *setup_info) 5910 { 5911 struct dp_peer *mld_peer = NULL; 5912 struct cdp_txrx_peer_params_update params = {0}; 5913 5914 /* Non-MLO connection */ 5915 if (!setup_info || !setup_info->mld_peer_mac) { 5916 /* To handle downgrade scenarios */ 5917 if (peer->vdev->opmode == wlan_op_mode_sta) { 5918 struct cdp_txrx_peer_params_update params = {0}; 5919 5920 params.chip_id = dp_get_chip_id(soc); 5921 params.pdev_id = peer->vdev->pdev->pdev_id; 5922 params.vdev_id = peer->vdev->vdev_id; 5923 5924 dp_wdi_event_handler( 5925 WDI_EVENT_STA_PRIMARY_UMAC_UPDATE, 5926 soc, 5927 (void *)¶ms, peer->peer_id, 5928 WDI_NO_VAL, params.pdev_id); 5929 } 5930 return QDF_STATUS_SUCCESS; 5931 } 5932 5933 dp_cfg_event_record_peer_setup_evt(soc, DP_CFG_EVENT_MLO_SETUP, 5934 peer, NULL, vdev_id, setup_info); 5935 5936 /* if this is the first link peer */ 5937 if (setup_info->is_first_link) 5938 /* create MLD peer */ 5939 dp_peer_create_wifi3((struct cdp_soc_t *)soc, 5940 vdev_id, 5941 setup_info->mld_peer_mac, 5942 CDP_MLD_PEER_TYPE); 5943 5944 if (peer->vdev->opmode == wlan_op_mode_sta && 5945 setup_info->is_primary_link) { 5946 struct cdp_txrx_peer_params_update params = {0}; 5947 5948 params.chip_id = dp_get_chip_id(soc); 5949 params.pdev_id = peer->vdev->pdev->pdev_id; 5950 params.vdev_id = peer->vdev->vdev_id; 5951 5952 dp_wdi_event_handler( 5953 WDI_EVENT_STA_PRIMARY_UMAC_UPDATE, 5954 soc, 5955 (void *)¶ms, peer->peer_id, 5956 WDI_NO_VAL, params.pdev_id); 5957 } 5958 5959 peer->first_link = setup_info->is_first_link; 5960 peer->primary_link = setup_info->is_primary_link; 5961 mld_peer = dp_mld_peer_find_hash_find(soc, 5962 setup_info->mld_peer_mac, 5963 0, vdev_id, DP_MOD_ID_CDP); 5964 5965 dp_info("Peer %pK MAC " QDF_MAC_ADDR_FMT " mld peer %pK MAC " 5966 QDF_MAC_ADDR_FMT " first_link %d, primary_link %d", peer, 5967 QDF_MAC_ADDR_REF(peer->mac_addr.raw), mld_peer, 5968 QDF_MAC_ADDR_REF(setup_info->mld_peer_mac), 5969 peer->first_link, 5970 peer->primary_link); 5971 5972 if (mld_peer) { 5973 if (setup_info->is_first_link) { 5974 /* assign rx_tid to mld peer */ 5975 mld_peer->rx_tid = peer->rx_tid; 5976 /* no cdp_peer_setup for MLD peer, 5977 * set it for addba processing 5978 */ 5979 qdf_atomic_set(&mld_peer->is_default_route_set, 1); 5980 } else { 5981 /* free link peer original rx_tids mem */ 5982 dp_peer_rx_tids_destroy(peer); 5983 /* assign mld peer rx_tid to link peer */ 5984 peer->rx_tid = mld_peer->rx_tid; 5985 } 5986 5987 if (setup_info->is_primary_link && 5988 !setup_info->is_first_link) { 5989 /* 5990 * if first link is not the primary link, 5991 * then need to change mld_peer->vdev as 5992 * primary link dp_vdev is not same one 5993 * during mld peer creation. 5994 */ 5995 dp_info("Primary link is not the first link. vdev: %pK " 5996 "vdev_id %d vdev_ref_cnt %d", 5997 mld_peer->vdev, vdev_id, 5998 qdf_atomic_read(&mld_peer->vdev->ref_cnt)); 5999 6000 dp_mld_peer_change_vdev(soc, mld_peer, vdev_id); 6001 6002 params.vdev_id = peer->vdev->vdev_id; 6003 params.peer_mac = mld_peer->mac_addr.raw; 6004 params.chip_id = dp_get_chip_id(soc); 6005 params.pdev_id = peer->vdev->pdev->pdev_id; 6006 6007 dp_wdi_event_handler( 6008 WDI_EVENT_PEER_PRIMARY_UMAC_UPDATE, 6009 soc, (void *)¶ms, peer->peer_id, 6010 WDI_NO_VAL, params.pdev_id); 6011 } 6012 6013 /* associate mld and link peer */ 6014 dp_link_peer_add_mld_peer(peer, mld_peer); 6015 dp_mld_peer_add_link_peer(mld_peer, peer, setup_info->is_bridge_peer); 6016 6017 mld_peer->txrx_peer->is_mld_peer = 1; 6018 dp_peer_unref_delete(mld_peer, DP_MOD_ID_CDP); 6019 } else { 6020 peer->mld_peer = NULL; 6021 dp_err("mld peer" QDF_MAC_ADDR_FMT "not found!", 6022 QDF_MAC_ADDR_REF(setup_info->mld_peer_mac)); 6023 return QDF_STATUS_E_FAILURE; 6024 } 6025 6026 return QDF_STATUS_SUCCESS; 6027 } 6028 6029 /** 6030 * dp_mlo_peer_authorize() - authorize MLO peer 6031 * @soc: soc handle 6032 * @peer: pointer to link peer 6033 * 6034 * Return: void 6035 */ 6036 static void dp_mlo_peer_authorize(struct dp_soc *soc, 6037 struct dp_peer *peer) 6038 { 6039 int i; 6040 struct dp_peer *link_peer = NULL; 6041 struct dp_peer *mld_peer = peer->mld_peer; 6042 struct dp_mld_link_peers link_peers_info; 6043 6044 if (!mld_peer) 6045 return; 6046 6047 /* get link peers with reference */ 6048 dp_get_link_peers_ref_from_mld_peer(soc, mld_peer, 6049 &link_peers_info, 6050 DP_MOD_ID_CDP); 6051 6052 for (i = 0; i < link_peers_info.num_links; i++) { 6053 link_peer = link_peers_info.link_peers[i]; 6054 6055 if (!link_peer->authorize) { 6056 dp_release_link_peers_ref(&link_peers_info, 6057 DP_MOD_ID_CDP); 6058 mld_peer->authorize = false; 6059 return; 6060 } 6061 } 6062 6063 /* if we are here all link peers are authorized, 6064 * authorize ml_peer also 6065 */ 6066 mld_peer->authorize = true; 6067 6068 /* release link peers reference */ 6069 dp_release_link_peers_ref(&link_peers_info, DP_MOD_ID_CDP); 6070 } 6071 #endif 6072 6073 /** 6074 * dp_peer_setup_wifi3_wrapper() - initialize the peer 6075 * @soc_hdl: soc handle object 6076 * @vdev_id : vdev_id of vdev object 6077 * @peer_mac: Peer's mac address 6078 * @setup_info: peer setup info for MLO 6079 * 6080 * Return: QDF_STATUS 6081 */ 6082 static QDF_STATUS 6083 dp_peer_setup_wifi3_wrapper(struct cdp_soc_t *soc_hdl, uint8_t vdev_id, 6084 uint8_t *peer_mac, 6085 struct cdp_peer_setup_info *setup_info) 6086 { 6087 struct dp_soc *soc = (struct dp_soc *)soc_hdl; 6088 6089 return soc->arch_ops.txrx_peer_setup(soc_hdl, vdev_id, 6090 peer_mac, setup_info); 6091 } 6092 6093 /** 6094 * dp_cp_peer_del_resp_handler() - Handle the peer delete response 6095 * @soc_hdl: Datapath SOC handle 6096 * @vdev_id: id of virtual device object 6097 * @mac_addr: Mac address of the peer 6098 * 6099 * Return: QDF_STATUS 6100 */ 6101 static QDF_STATUS dp_cp_peer_del_resp_handler(struct cdp_soc_t *soc_hdl, 6102 uint8_t vdev_id, 6103 uint8_t *mac_addr) 6104 { 6105 struct dp_soc *soc = (struct dp_soc *)soc_hdl; 6106 struct dp_ast_entry *ast_entry = NULL; 6107 txrx_ast_free_cb cb = NULL; 6108 void *cookie; 6109 6110 if (soc->ast_offload_support) 6111 return QDF_STATUS_E_INVAL; 6112 6113 qdf_spin_lock_bh(&soc->ast_lock); 6114 6115 ast_entry = 6116 dp_peer_ast_hash_find_by_vdevid(soc, mac_addr, 6117 vdev_id); 6118 6119 /* in case of qwrap we have multiple BSS peers 6120 * with same mac address 6121 * 6122 * AST entry for this mac address will be created 6123 * only for one peer hence it will be NULL here 6124 */ 6125 if ((!ast_entry || !ast_entry->delete_in_progress) || 6126 (ast_entry->peer_id != HTT_INVALID_PEER)) { 6127 qdf_spin_unlock_bh(&soc->ast_lock); 6128 return QDF_STATUS_E_FAILURE; 6129 } 6130 6131 if (ast_entry->is_mapped) 6132 soc->ast_table[ast_entry->ast_idx] = NULL; 6133 6134 DP_STATS_INC(soc, ast.deleted, 1); 6135 dp_peer_ast_hash_remove(soc, ast_entry); 6136 6137 cb = ast_entry->callback; 6138 cookie = ast_entry->cookie; 6139 ast_entry->callback = NULL; 6140 ast_entry->cookie = NULL; 6141 6142 soc->num_ast_entries--; 6143 qdf_spin_unlock_bh(&soc->ast_lock); 6144 6145 if (cb) { 6146 cb(soc->ctrl_psoc, 6147 dp_soc_to_cdp_soc(soc), 6148 cookie, 6149 CDP_TXRX_AST_DELETED); 6150 } 6151 qdf_mem_free(ast_entry); 6152 6153 return QDF_STATUS_SUCCESS; 6154 } 6155 6156 #ifdef WLAN_SUPPORT_MSCS 6157 /** 6158 * dp_record_mscs_params() - Record MSCS parameters sent by the STA in 6159 * the MSCS Request to the AP. 6160 * @soc_hdl: Datapath soc handle 6161 * @peer_mac: STA Mac address 6162 * @vdev_id: ID of the vdev handle 6163 * @mscs_params: Structure having MSCS parameters obtained 6164 * from handshake 6165 * @active: Flag to set MSCS active/inactive 6166 * 6167 * The AP makes a note of these parameters while comparing the MSDUs 6168 * sent by the STA, to send the downlink traffic with correct User 6169 * priority. 6170 * 6171 * Return: QDF_STATUS - Success/Invalid 6172 */ 6173 static QDF_STATUS 6174 dp_record_mscs_params(struct cdp_soc_t *soc_hdl, uint8_t *peer_mac, 6175 uint8_t vdev_id, struct cdp_mscs_params *mscs_params, 6176 bool active) 6177 { 6178 struct dp_peer *peer; 6179 struct dp_peer *tgt_peer; 6180 QDF_STATUS status = QDF_STATUS_E_INVAL; 6181 struct dp_soc *soc = (struct dp_soc *)soc_hdl; 6182 6183 peer = dp_peer_find_hash_find(soc, peer_mac, 0, vdev_id, 6184 DP_MOD_ID_CDP); 6185 6186 if (!peer) { 6187 dp_err("Peer is NULL!"); 6188 goto fail; 6189 } 6190 6191 tgt_peer = dp_get_tgt_peer_from_peer(peer); 6192 if (!tgt_peer) 6193 goto fail; 6194 6195 if (!active) { 6196 dp_info("MSCS Procedure is terminated"); 6197 tgt_peer->mscs_active = active; 6198 goto fail; 6199 } 6200 6201 if (mscs_params->classifier_type == IEEE80211_TCLAS_MASK_CLA_TYPE_4) { 6202 /* Populate entries inside IPV4 database first */ 6203 tgt_peer->mscs_ipv4_parameter.user_priority_bitmap = 6204 mscs_params->user_pri_bitmap; 6205 tgt_peer->mscs_ipv4_parameter.user_priority_limit = 6206 mscs_params->user_pri_limit; 6207 tgt_peer->mscs_ipv4_parameter.classifier_mask = 6208 mscs_params->classifier_mask; 6209 6210 /* Populate entries inside IPV6 database */ 6211 tgt_peer->mscs_ipv6_parameter.user_priority_bitmap = 6212 mscs_params->user_pri_bitmap; 6213 tgt_peer->mscs_ipv6_parameter.user_priority_limit = 6214 mscs_params->user_pri_limit; 6215 tgt_peer->mscs_ipv6_parameter.classifier_mask = 6216 mscs_params->classifier_mask; 6217 tgt_peer->mscs_active = 1; 6218 dp_info("\n\tMSCS Procedure request based parameters for "QDF_MAC_ADDR_FMT"\n" 6219 "\tClassifier_type = %d\tUser priority bitmap = %x\n" 6220 "\tUser priority limit = %x\tClassifier mask = %x", 6221 QDF_MAC_ADDR_REF(peer_mac), 6222 mscs_params->classifier_type, 6223 tgt_peer->mscs_ipv4_parameter.user_priority_bitmap, 6224 tgt_peer->mscs_ipv4_parameter.user_priority_limit, 6225 tgt_peer->mscs_ipv4_parameter.classifier_mask); 6226 } 6227 6228 status = QDF_STATUS_SUCCESS; 6229 fail: 6230 if (peer) 6231 dp_peer_unref_delete(peer, DP_MOD_ID_CDP); 6232 return status; 6233 } 6234 #endif 6235 6236 /** 6237 * dp_get_sec_type() - Get the security type 6238 * @soc: soc handle 6239 * @vdev_id: id of dp handle 6240 * @peer_mac: mac of datapath PEER handle 6241 * @sec_idx: Security id (mcast, ucast) 6242 * 6243 * return sec_type: Security type 6244 */ 6245 static int dp_get_sec_type(struct cdp_soc_t *soc, uint8_t vdev_id, 6246 uint8_t *peer_mac, uint8_t sec_idx) 6247 { 6248 int sec_type = 0; 6249 struct dp_peer *peer = 6250 dp_peer_get_tgt_peer_hash_find((struct dp_soc *)soc, 6251 peer_mac, 0, vdev_id, 6252 DP_MOD_ID_CDP); 6253 6254 if (!peer) { 6255 dp_cdp_err("%pK: Peer is NULL!", (struct dp_soc *)soc); 6256 return sec_type; 6257 } 6258 6259 if (!peer->txrx_peer) { 6260 dp_peer_unref_delete(peer, DP_MOD_ID_CDP); 6261 dp_peer_debug("%pK: txrx peer is NULL!", soc); 6262 return sec_type; 6263 } 6264 sec_type = peer->txrx_peer->security[sec_idx].sec_type; 6265 6266 dp_peer_unref_delete(peer, DP_MOD_ID_CDP); 6267 return sec_type; 6268 } 6269 6270 /** 6271 * dp_peer_authorize() - authorize txrx peer 6272 * @soc_hdl: soc handle 6273 * @vdev_id: id of dp handle 6274 * @peer_mac: mac of datapath PEER handle 6275 * @authorize: 6276 * 6277 * Return: QDF_STATUS 6278 * 6279 */ 6280 static QDF_STATUS 6281 dp_peer_authorize(struct cdp_soc_t *soc_hdl, uint8_t vdev_id, 6282 uint8_t *peer_mac, uint32_t authorize) 6283 { 6284 QDF_STATUS status = QDF_STATUS_SUCCESS; 6285 struct dp_soc *soc = (struct dp_soc *)soc_hdl; 6286 struct dp_peer *peer = dp_peer_get_tgt_peer_hash_find(soc, peer_mac, 6287 0, vdev_id, 6288 DP_MOD_ID_CDP); 6289 6290 if (!peer) { 6291 dp_cdp_debug("%pK: Peer is NULL!", soc); 6292 status = QDF_STATUS_E_FAILURE; 6293 } else { 6294 peer->authorize = authorize ? 1 : 0; 6295 if (peer->txrx_peer) 6296 peer->txrx_peer->authorize = peer->authorize; 6297 6298 if (!peer->authorize) 6299 dp_peer_flush_frags(soc_hdl, vdev_id, peer_mac); 6300 6301 dp_mlo_peer_authorize(soc, peer); 6302 dp_peer_unref_delete(peer, DP_MOD_ID_CDP); 6303 } 6304 6305 return status; 6306 } 6307 6308 /** 6309 * dp_peer_get_authorize() - get peer authorize status 6310 * @soc_hdl: soc handle 6311 * @vdev_id: id of dp handle 6312 * @peer_mac: mac of datapath PEER handle 6313 * 6314 * Return: true is peer is authorized, false otherwise 6315 */ 6316 static bool 6317 dp_peer_get_authorize(struct cdp_soc_t *soc_hdl, uint8_t vdev_id, 6318 uint8_t *peer_mac) 6319 { 6320 struct dp_soc *soc = (struct dp_soc *)soc_hdl; 6321 bool authorize = false; 6322 struct dp_peer *peer = dp_peer_find_hash_find(soc, peer_mac, 6323 0, vdev_id, 6324 DP_MOD_ID_CDP); 6325 6326 if (!peer) { 6327 dp_cdp_debug("%pK: Peer is NULL!", soc); 6328 return authorize; 6329 } 6330 6331 authorize = peer->authorize; 6332 dp_peer_unref_delete(peer, DP_MOD_ID_CDP); 6333 6334 return authorize; 6335 } 6336 6337 void dp_vdev_unref_delete(struct dp_soc *soc, struct dp_vdev *vdev, 6338 enum dp_mod_id mod_id) 6339 { 6340 ol_txrx_vdev_delete_cb vdev_delete_cb = NULL; 6341 void *vdev_delete_context = NULL; 6342 uint8_t vdev_id = vdev->vdev_id; 6343 struct dp_pdev *pdev = vdev->pdev; 6344 struct dp_vdev *tmp_vdev = NULL; 6345 uint8_t found = 0; 6346 6347 QDF_ASSERT(qdf_atomic_dec_return(&vdev->mod_refs[mod_id]) >= 0); 6348 6349 /* Return if this is not the last reference*/ 6350 if (!qdf_atomic_dec_and_test(&vdev->ref_cnt)) 6351 return; 6352 6353 /* 6354 * This should be set as last reference need to released 6355 * after cdp_vdev_detach() is called 6356 * 6357 * if this assert is hit there is a ref count issue 6358 */ 6359 QDF_ASSERT(vdev->delete.pending); 6360 6361 vdev_delete_cb = vdev->delete.callback; 6362 vdev_delete_context = vdev->delete.context; 6363 6364 dp_info("deleting vdev object %pK ("QDF_MAC_ADDR_FMT")- its last peer is done", 6365 vdev, QDF_MAC_ADDR_REF(vdev->mac_addr.raw)); 6366 6367 if (wlan_op_mode_monitor == vdev->opmode) { 6368 dp_monitor_vdev_delete(soc, vdev); 6369 goto free_vdev; 6370 } 6371 6372 /* all peers are gone, go ahead and delete it */ 6373 dp_tx_flow_pool_unmap_handler(pdev, vdev_id, 6374 FLOW_TYPE_VDEV, vdev_id); 6375 dp_tx_vdev_detach(vdev); 6376 dp_monitor_vdev_detach(vdev); 6377 6378 free_vdev: 6379 qdf_spinlock_destroy(&vdev->peer_list_lock); 6380 6381 qdf_spin_lock_bh(&soc->inactive_vdev_list_lock); 6382 TAILQ_FOREACH(tmp_vdev, &soc->inactive_vdev_list, 6383 inactive_list_elem) { 6384 if (tmp_vdev == vdev) { 6385 found = 1; 6386 break; 6387 } 6388 } 6389 if (found) 6390 TAILQ_REMOVE(&soc->inactive_vdev_list, vdev, 6391 inactive_list_elem); 6392 /* delete this peer from the list */ 6393 qdf_spin_unlock_bh(&soc->inactive_vdev_list_lock); 6394 6395 dp_cfg_event_record_vdev_evt(soc, DP_CFG_EVENT_VDEV_UNREF_DEL, 6396 vdev); 6397 dp_info("deleting vdev object %pK ("QDF_MAC_ADDR_FMT")", 6398 vdev, QDF_MAC_ADDR_REF(vdev->mac_addr.raw)); 6399 wlan_minidump_remove(vdev, sizeof(*vdev), soc->ctrl_psoc, 6400 WLAN_MD_DP_VDEV, "dp_vdev"); 6401 qdf_mem_free(vdev); 6402 vdev = NULL; 6403 6404 if (vdev_delete_cb) 6405 vdev_delete_cb(vdev_delete_context); 6406 } 6407 6408 qdf_export_symbol(dp_vdev_unref_delete); 6409 6410 void dp_peer_unref_delete(struct dp_peer *peer, enum dp_mod_id mod_id) 6411 { 6412 struct dp_vdev *vdev = peer->vdev; 6413 struct dp_pdev *pdev = vdev->pdev; 6414 struct dp_soc *soc = pdev->soc; 6415 uint16_t peer_id; 6416 struct dp_peer *tmp_peer; 6417 bool found = false; 6418 6419 if (mod_id > DP_MOD_ID_RX) 6420 QDF_ASSERT(qdf_atomic_dec_return(&peer->mod_refs[mod_id]) >= 0); 6421 6422 /* 6423 * Hold the lock all the way from checking if the peer ref count 6424 * is zero until the peer references are removed from the hash 6425 * table and vdev list (if the peer ref count is zero). 6426 * This protects against a new HL tx operation starting to use the 6427 * peer object just after this function concludes it's done being used. 6428 * Furthermore, the lock needs to be held while checking whether the 6429 * vdev's list of peers is empty, to make sure that list is not modified 6430 * concurrently with the empty check. 6431 */ 6432 if (qdf_atomic_dec_and_test(&peer->ref_cnt)) { 6433 peer_id = peer->peer_id; 6434 6435 /* 6436 * Make sure that the reference to the peer in 6437 * peer object map is removed 6438 */ 6439 QDF_ASSERT(peer_id == HTT_INVALID_PEER); 6440 6441 dp_peer_info("Deleting peer %pK ("QDF_MAC_ADDR_FMT")", peer, 6442 QDF_MAC_ADDR_REF(peer->mac_addr.raw)); 6443 6444 dp_peer_sawf_ctx_free(soc, peer); 6445 6446 wlan_minidump_remove(peer, sizeof(*peer), soc->ctrl_psoc, 6447 WLAN_MD_DP_PEER, "dp_peer"); 6448 6449 qdf_spin_lock_bh(&soc->inactive_peer_list_lock); 6450 TAILQ_FOREACH(tmp_peer, &soc->inactive_peer_list, 6451 inactive_list_elem) { 6452 if (tmp_peer == peer) { 6453 found = 1; 6454 break; 6455 } 6456 } 6457 if (found) 6458 TAILQ_REMOVE(&soc->inactive_peer_list, peer, 6459 inactive_list_elem); 6460 /* delete this peer from the list */ 6461 qdf_spin_unlock_bh(&soc->inactive_peer_list_lock); 6462 DP_AST_ASSERT(TAILQ_EMPTY(&peer->ast_entry_list)); 6463 dp_peer_update_state(soc, peer, DP_PEER_STATE_FREED); 6464 6465 /* cleanup the peer data */ 6466 dp_peer_cleanup(vdev, peer); 6467 6468 dp_monitor_peer_detach(soc, peer); 6469 6470 qdf_spinlock_destroy(&peer->peer_state_lock); 6471 6472 dp_txrx_peer_detach(soc, peer); 6473 dp_cfg_event_record_peer_evt(soc, DP_CFG_EVENT_PEER_UNREF_DEL, 6474 peer, vdev, 0); 6475 qdf_mem_free(peer); 6476 6477 /* 6478 * Decrement ref count taken at peer create 6479 */ 6480 dp_peer_info("Deleted peer. Unref vdev %pK, vdev_ref_cnt %d", 6481 vdev, qdf_atomic_read(&vdev->ref_cnt)); 6482 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CHILD); 6483 } 6484 } 6485 6486 qdf_export_symbol(dp_peer_unref_delete); 6487 6488 void dp_txrx_peer_unref_delete(dp_txrx_ref_handle handle, 6489 enum dp_mod_id mod_id) 6490 { 6491 dp_peer_unref_delete((struct dp_peer *)handle, mod_id); 6492 } 6493 6494 qdf_export_symbol(dp_txrx_peer_unref_delete); 6495 6496 /** 6497 * dp_peer_delete_wifi3() - Delete txrx peer 6498 * @soc_hdl: soc handle 6499 * @vdev_id: id of dp handle 6500 * @peer_mac: mac of datapath PEER handle 6501 * @bitmap: bitmap indicating special handling of request. 6502 * @peer_type: peer type (link or MLD) 6503 * 6504 */ 6505 static QDF_STATUS dp_peer_delete_wifi3(struct cdp_soc_t *soc_hdl, 6506 uint8_t vdev_id, 6507 uint8_t *peer_mac, uint32_t bitmap, 6508 enum cdp_peer_type peer_type) 6509 { 6510 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 6511 struct dp_peer *peer; 6512 struct cdp_peer_info peer_info = { 0 }; 6513 struct dp_vdev *vdev = NULL; 6514 6515 DP_PEER_INFO_PARAMS_INIT(&peer_info, vdev_id, peer_mac, 6516 false, peer_type); 6517 peer = dp_peer_hash_find_wrapper(soc, &peer_info, DP_MOD_ID_CDP); 6518 6519 /* Peer can be null for monitor vap mac address */ 6520 if (!peer) { 6521 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG, 6522 "%s: Invalid peer\n", __func__); 6523 return QDF_STATUS_E_FAILURE; 6524 } 6525 6526 if (!peer->valid) { 6527 dp_peer_unref_delete(peer, DP_MOD_ID_CDP); 6528 dp_err("Invalid peer: "QDF_MAC_ADDR_FMT, 6529 QDF_MAC_ADDR_REF(peer_mac)); 6530 return QDF_STATUS_E_ALREADY; 6531 } 6532 6533 vdev = peer->vdev; 6534 6535 if (!vdev) { 6536 dp_peer_unref_delete(peer, DP_MOD_ID_CDP); 6537 return QDF_STATUS_E_FAILURE; 6538 } 6539 6540 peer->valid = 0; 6541 6542 dp_cfg_event_record_peer_evt(soc, DP_CFG_EVENT_PEER_DELETE, peer, 6543 vdev, 0); 6544 dp_init_info("%pK: peer %pK (" QDF_MAC_ADDR_FMT ") pending-refs %d", 6545 soc, peer, QDF_MAC_ADDR_REF(peer->mac_addr.raw), 6546 qdf_atomic_read(&peer->ref_cnt)); 6547 6548 dp_peer_rx_reo_shared_qaddr_delete(soc, peer); 6549 6550 dp_local_peer_id_free(peer->vdev->pdev, peer); 6551 6552 /* Drop all rx packets before deleting peer */ 6553 dp_clear_peer_internal(soc, peer); 6554 6555 qdf_spinlock_destroy(&peer->peer_info_lock); 6556 dp_peer_multipass_list_remove(peer); 6557 6558 /* remove the reference to the peer from the hash table */ 6559 dp_peer_find_hash_remove(soc, peer); 6560 6561 dp_peer_vdev_list_remove(soc, vdev, peer); 6562 6563 dp_peer_mlo_delete(peer); 6564 6565 qdf_spin_lock_bh(&soc->inactive_peer_list_lock); 6566 TAILQ_INSERT_TAIL(&soc->inactive_peer_list, peer, 6567 inactive_list_elem); 6568 qdf_spin_unlock_bh(&soc->inactive_peer_list_lock); 6569 6570 /* 6571 * Remove the reference added during peer_attach. 6572 * The peer will still be left allocated until the 6573 * PEER_UNMAP message arrives to remove the other 6574 * reference, added by the PEER_MAP message. 6575 */ 6576 dp_peer_unref_delete(peer, DP_MOD_ID_CONFIG); 6577 /* 6578 * Remove the reference taken above 6579 */ 6580 dp_peer_unref_delete(peer, DP_MOD_ID_CDP); 6581 6582 return QDF_STATUS_SUCCESS; 6583 } 6584 6585 #ifdef DP_RX_UDP_OVER_PEER_ROAM 6586 static QDF_STATUS dp_update_roaming_peer_wifi3(struct cdp_soc_t *soc_hdl, 6587 uint8_t vdev_id, 6588 uint8_t *peer_mac, 6589 uint32_t auth_status) 6590 { 6591 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 6592 struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id, 6593 DP_MOD_ID_CDP); 6594 if (!vdev) 6595 return QDF_STATUS_E_FAILURE; 6596 6597 vdev->roaming_peer_status = auth_status; 6598 qdf_mem_copy(vdev->roaming_peer_mac.raw, peer_mac, 6599 QDF_MAC_ADDR_SIZE); 6600 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP); 6601 6602 return QDF_STATUS_SUCCESS; 6603 } 6604 #endif 6605 /** 6606 * dp_get_vdev_mac_addr_wifi3() - Detach txrx peer 6607 * @soc_hdl: Datapath soc handle 6608 * @vdev_id: virtual interface id 6609 * 6610 * Return: MAC address on success, NULL on failure. 6611 * 6612 */ 6613 static uint8_t *dp_get_vdev_mac_addr_wifi3(struct cdp_soc_t *soc_hdl, 6614 uint8_t vdev_id) 6615 { 6616 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 6617 struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id, 6618 DP_MOD_ID_CDP); 6619 uint8_t *mac = NULL; 6620 6621 if (!vdev) 6622 return NULL; 6623 6624 mac = vdev->mac_addr.raw; 6625 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP); 6626 6627 return mac; 6628 } 6629 6630 /** 6631 * dp_vdev_set_wds() - Enable per packet stats 6632 * @soc_hdl: DP soc handle 6633 * @vdev_id: id of DP VDEV handle 6634 * @val: value 6635 * 6636 * Return: none 6637 */ 6638 static int dp_vdev_set_wds(struct cdp_soc_t *soc_hdl, uint8_t vdev_id, 6639 uint32_t val) 6640 { 6641 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 6642 struct dp_vdev *vdev = 6643 dp_vdev_get_ref_by_id((struct dp_soc *)soc, vdev_id, 6644 DP_MOD_ID_CDP); 6645 6646 if (!vdev) 6647 return QDF_STATUS_E_FAILURE; 6648 6649 vdev->wds_enabled = val; 6650 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP); 6651 6652 return QDF_STATUS_SUCCESS; 6653 } 6654 6655 static int dp_get_opmode(struct cdp_soc_t *soc_hdl, uint8_t vdev_id) 6656 { 6657 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 6658 struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id, 6659 DP_MOD_ID_CDP); 6660 int opmode; 6661 6662 if (!vdev) { 6663 dp_err_rl("vdev for id %d is NULL", vdev_id); 6664 return -EINVAL; 6665 } 6666 opmode = vdev->opmode; 6667 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP); 6668 6669 return opmode; 6670 } 6671 6672 /** 6673 * dp_get_os_rx_handles_from_vdev_wifi3() - Get os rx handles for a vdev 6674 * @soc_hdl: ol_txrx_soc_handle handle 6675 * @vdev_id: vdev id for which os rx handles are needed 6676 * @stack_fn_p: pointer to stack function pointer 6677 * @osif_vdev_p: pointer to ol_osif_vdev_handle 6678 * 6679 * Return: void 6680 */ 6681 static 6682 void dp_get_os_rx_handles_from_vdev_wifi3(struct cdp_soc_t *soc_hdl, 6683 uint8_t vdev_id, 6684 ol_txrx_rx_fp *stack_fn_p, 6685 ol_osif_vdev_handle *osif_vdev_p) 6686 { 6687 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 6688 struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id, 6689 DP_MOD_ID_CDP); 6690 6691 if (qdf_unlikely(!vdev)) { 6692 *stack_fn_p = NULL; 6693 *osif_vdev_p = NULL; 6694 return; 6695 } 6696 *stack_fn_p = vdev->osif_rx_stack; 6697 *osif_vdev_p = vdev->osif_vdev; 6698 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP); 6699 } 6700 6701 /** 6702 * dp_get_ctrl_pdev_from_vdev_wifi3() - Get control pdev of vdev 6703 * @soc_hdl: datapath soc handle 6704 * @vdev_id: virtual device/interface id 6705 * 6706 * Return: Handle to control pdev 6707 */ 6708 static struct cdp_cfg *dp_get_ctrl_pdev_from_vdev_wifi3( 6709 struct cdp_soc_t *soc_hdl, 6710 uint8_t vdev_id) 6711 { 6712 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 6713 struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id, 6714 DP_MOD_ID_CDP); 6715 struct dp_pdev *pdev; 6716 6717 if (!vdev) 6718 return NULL; 6719 6720 pdev = vdev->pdev; 6721 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP); 6722 return pdev ? (struct cdp_cfg *)pdev->wlan_cfg_ctx : NULL; 6723 } 6724 6725 int32_t dp_get_tx_pending(struct cdp_pdev *pdev_handle) 6726 { 6727 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle; 6728 6729 return qdf_atomic_read(&pdev->num_tx_outstanding); 6730 } 6731 6732 /** 6733 * dp_get_peer_mac_from_peer_id() - get peer mac 6734 * @soc: CDP SoC handle 6735 * @peer_id: Peer ID 6736 * @peer_mac: MAC addr of PEER 6737 * 6738 * Return: QDF_STATUS 6739 */ 6740 static QDF_STATUS dp_get_peer_mac_from_peer_id(struct cdp_soc_t *soc, 6741 uint32_t peer_id, 6742 uint8_t *peer_mac) 6743 { 6744 struct dp_peer *peer; 6745 6746 if (soc && peer_mac) { 6747 peer = dp_peer_get_ref_by_id((struct dp_soc *)soc, 6748 (uint16_t)peer_id, 6749 DP_MOD_ID_CDP); 6750 if (peer) { 6751 qdf_mem_copy(peer_mac, peer->mac_addr.raw, 6752 QDF_MAC_ADDR_SIZE); 6753 dp_peer_unref_delete(peer, DP_MOD_ID_CDP); 6754 return QDF_STATUS_SUCCESS; 6755 } 6756 } 6757 6758 return QDF_STATUS_E_FAILURE; 6759 } 6760 6761 #ifdef MESH_MODE_SUPPORT 6762 static 6763 void dp_vdev_set_mesh_mode(struct cdp_vdev *vdev_hdl, uint32_t val) 6764 { 6765 struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl; 6766 6767 dp_cdp_info("%pK: val %d", vdev->pdev->soc, val); 6768 vdev->mesh_vdev = val; 6769 if (val) 6770 vdev->skip_sw_tid_classification |= 6771 DP_TX_MESH_ENABLED; 6772 else 6773 vdev->skip_sw_tid_classification &= 6774 ~DP_TX_MESH_ENABLED; 6775 } 6776 6777 /** 6778 * dp_vdev_set_mesh_rx_filter() - to set the mesh rx filter 6779 * @vdev_hdl: virtual device object 6780 * @val: value to be set 6781 * 6782 * Return: void 6783 */ 6784 static 6785 void dp_vdev_set_mesh_rx_filter(struct cdp_vdev *vdev_hdl, uint32_t val) 6786 { 6787 struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl; 6788 6789 dp_cdp_info("%pK: val %d", vdev->pdev->soc, val); 6790 vdev->mesh_rx_filter = val; 6791 } 6792 #endif 6793 6794 /** 6795 * dp_vdev_set_hlos_tid_override() - to set hlos tid override 6796 * @vdev: virtual device object 6797 * @val: value to be set 6798 * 6799 * Return: void 6800 */ 6801 static 6802 void dp_vdev_set_hlos_tid_override(struct dp_vdev *vdev, uint32_t val) 6803 { 6804 dp_cdp_info("%pK: val %d", vdev->pdev->soc, val); 6805 if (val) 6806 vdev->skip_sw_tid_classification |= 6807 DP_TXRX_HLOS_TID_OVERRIDE_ENABLED; 6808 else 6809 vdev->skip_sw_tid_classification &= 6810 ~DP_TXRX_HLOS_TID_OVERRIDE_ENABLED; 6811 } 6812 6813 /** 6814 * dp_vdev_get_hlos_tid_override() - to get hlos tid override flag 6815 * @vdev_hdl: virtual device object 6816 * 6817 * Return: 1 if this flag is set 6818 */ 6819 static 6820 uint8_t dp_vdev_get_hlos_tid_override(struct cdp_vdev *vdev_hdl) 6821 { 6822 struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl; 6823 6824 return !!(vdev->skip_sw_tid_classification & 6825 DP_TXRX_HLOS_TID_OVERRIDE_ENABLED); 6826 } 6827 6828 #ifdef VDEV_PEER_PROTOCOL_COUNT 6829 static void dp_enable_vdev_peer_protocol_count(struct cdp_soc_t *soc_hdl, 6830 int8_t vdev_id, 6831 bool enable) 6832 { 6833 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 6834 struct dp_vdev *vdev; 6835 6836 vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP); 6837 if (!vdev) 6838 return; 6839 6840 dp_info("enable %d vdev_id %d", enable, vdev_id); 6841 vdev->peer_protocol_count_track = enable; 6842 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP); 6843 } 6844 6845 static void dp_enable_vdev_peer_protocol_drop_mask(struct cdp_soc_t *soc_hdl, 6846 int8_t vdev_id, 6847 int drop_mask) 6848 { 6849 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 6850 struct dp_vdev *vdev; 6851 6852 vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP); 6853 if (!vdev) 6854 return; 6855 6856 dp_info("drop_mask %d vdev_id %d", drop_mask, vdev_id); 6857 vdev->peer_protocol_count_dropmask = drop_mask; 6858 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP); 6859 } 6860 6861 static int dp_is_vdev_peer_protocol_count_enabled(struct cdp_soc_t *soc_hdl, 6862 int8_t vdev_id) 6863 { 6864 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 6865 struct dp_vdev *vdev; 6866 int peer_protocol_count_track; 6867 6868 vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP); 6869 if (!vdev) 6870 return 0; 6871 6872 dp_info("enable %d vdev_id %d", vdev->peer_protocol_count_track, 6873 vdev_id); 6874 peer_protocol_count_track = 6875 vdev->peer_protocol_count_track; 6876 6877 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP); 6878 return peer_protocol_count_track; 6879 } 6880 6881 static int dp_get_vdev_peer_protocol_drop_mask(struct cdp_soc_t *soc_hdl, 6882 int8_t vdev_id) 6883 { 6884 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 6885 struct dp_vdev *vdev; 6886 int peer_protocol_count_dropmask; 6887 6888 vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP); 6889 if (!vdev) 6890 return 0; 6891 6892 dp_info("drop_mask %d vdev_id %d", vdev->peer_protocol_count_dropmask, 6893 vdev_id); 6894 peer_protocol_count_dropmask = 6895 vdev->peer_protocol_count_dropmask; 6896 6897 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP); 6898 return peer_protocol_count_dropmask; 6899 } 6900 6901 #endif 6902 6903 bool dp_check_pdev_exists(struct dp_soc *soc, struct dp_pdev *data) 6904 { 6905 uint8_t pdev_count; 6906 6907 for (pdev_count = 0; pdev_count < MAX_PDEV_CNT; pdev_count++) { 6908 if (soc->pdev_list[pdev_count] && 6909 soc->pdev_list[pdev_count] == data) 6910 return true; 6911 } 6912 return false; 6913 } 6914 6915 void dp_aggregate_vdev_stats(struct dp_vdev *vdev, 6916 struct cdp_vdev_stats *vdev_stats, 6917 enum dp_pkt_xmit_type xmit_type) 6918 { 6919 if (!vdev || !vdev->pdev) 6920 return; 6921 6922 dp_update_vdev_ingress_stats(vdev); 6923 6924 dp_copy_vdev_stats_to_tgt_buf(vdev_stats, 6925 &vdev->stats, xmit_type); 6926 dp_vdev_iterate_peer(vdev, dp_update_vdev_stats, vdev_stats, 6927 DP_MOD_ID_GENERIC_STATS); 6928 6929 dp_update_vdev_rate_stats(vdev_stats, &vdev->stats); 6930 6931 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE 6932 dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, vdev->pdev->soc, 6933 vdev_stats, vdev->vdev_id, 6934 UPDATE_VDEV_STATS, vdev->pdev->pdev_id); 6935 #endif 6936 } 6937 6938 void dp_aggregate_pdev_stats(struct dp_pdev *pdev) 6939 { 6940 struct dp_vdev *vdev = NULL; 6941 struct dp_soc *soc; 6942 struct cdp_vdev_stats *vdev_stats = 6943 qdf_mem_malloc_atomic(sizeof(struct cdp_vdev_stats)); 6944 6945 if (!vdev_stats) { 6946 dp_cdp_err("%pK: DP alloc failure - unable to get alloc vdev stats", 6947 pdev->soc); 6948 return; 6949 } 6950 6951 soc = pdev->soc; 6952 6953 qdf_mem_zero(&pdev->stats.tx, sizeof(pdev->stats.tx)); 6954 qdf_mem_zero(&pdev->stats.rx, sizeof(pdev->stats.rx)); 6955 qdf_mem_zero(&pdev->stats.tx_i, sizeof(pdev->stats.tx_i)); 6956 qdf_mem_zero(&pdev->stats.rx_i, sizeof(pdev->stats.rx_i)); 6957 6958 if (dp_monitor_is_enable_mcopy_mode(pdev)) 6959 dp_monitor_invalid_peer_update_pdev_stats(soc, pdev); 6960 6961 qdf_spin_lock_bh(&pdev->vdev_list_lock); 6962 TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) { 6963 6964 dp_aggregate_vdev_stats(vdev, vdev_stats, DP_XMIT_TOTAL); 6965 dp_update_pdev_stats(pdev, vdev_stats); 6966 dp_update_pdev_ingress_stats(pdev, vdev); 6967 } 6968 qdf_spin_unlock_bh(&pdev->vdev_list_lock); 6969 qdf_mem_free(vdev_stats); 6970 6971 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE 6972 dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, pdev->soc, &pdev->stats, 6973 pdev->pdev_id, UPDATE_PDEV_STATS, pdev->pdev_id); 6974 #endif 6975 } 6976 6977 /** 6978 * dp_vdev_getstats() - get vdev packet level stats 6979 * @vdev_handle: Datapath VDEV handle 6980 * @stats: cdp network device stats structure 6981 * 6982 * Return: QDF_STATUS 6983 */ 6984 static QDF_STATUS dp_vdev_getstats(struct cdp_vdev *vdev_handle, 6985 struct cdp_dev_stats *stats) 6986 { 6987 struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle; 6988 struct dp_pdev *pdev; 6989 struct dp_soc *soc; 6990 struct cdp_vdev_stats *vdev_stats; 6991 6992 if (!vdev) 6993 return QDF_STATUS_E_FAILURE; 6994 6995 pdev = vdev->pdev; 6996 if (!pdev) 6997 return QDF_STATUS_E_FAILURE; 6998 6999 soc = pdev->soc; 7000 7001 vdev_stats = qdf_mem_malloc_atomic(sizeof(struct cdp_vdev_stats)); 7002 7003 if (!vdev_stats) { 7004 dp_err("%pK: DP alloc failure - unable to get alloc vdev stats", 7005 soc); 7006 return QDF_STATUS_E_FAILURE; 7007 } 7008 7009 dp_aggregate_vdev_stats(vdev, vdev_stats, DP_XMIT_LINK); 7010 7011 stats->tx_packets = vdev_stats->tx.comp_pkt.num; 7012 stats->tx_bytes = vdev_stats->tx.comp_pkt.bytes; 7013 7014 stats->tx_errors = vdev_stats->tx.tx_failed; 7015 stats->tx_dropped = vdev_stats->tx_i.dropped.dropped_pkt.num + 7016 vdev_stats->tx_i.sg.dropped_host.num + 7017 vdev_stats->tx_i.mcast_en.dropped_map_error + 7018 vdev_stats->tx_i.mcast_en.dropped_self_mac + 7019 vdev_stats->tx_i.mcast_en.dropped_send_fail + 7020 vdev_stats->tx.nawds_mcast_drop; 7021 7022 if (!wlan_cfg_get_vdev_stats_hw_offload_config(soc->wlan_cfg_ctx)) { 7023 stats->rx_packets = vdev_stats->rx.to_stack.num; 7024 stats->rx_bytes = vdev_stats->rx.to_stack.bytes; 7025 } else { 7026 stats->rx_packets = vdev_stats->rx_i.reo_rcvd_pkt.num + 7027 vdev_stats->rx_i.null_q_desc_pkt.num + 7028 vdev_stats->rx_i.routed_eapol_pkt.num; 7029 stats->rx_bytes = vdev_stats->rx_i.reo_rcvd_pkt.bytes + 7030 vdev_stats->rx_i.null_q_desc_pkt.bytes + 7031 vdev_stats->rx_i.routed_eapol_pkt.bytes; 7032 } 7033 7034 stats->rx_errors = vdev_stats->rx.err.mic_err + 7035 vdev_stats->rx.err.decrypt_err + 7036 vdev_stats->rx.err.fcserr + 7037 vdev_stats->rx.err.pn_err + 7038 vdev_stats->rx.err.oor_err + 7039 vdev_stats->rx.err.jump_2k_err + 7040 vdev_stats->rx.err.rxdma_wifi_parse_err; 7041 7042 stats->rx_dropped = vdev_stats->rx.mec_drop.num + 7043 vdev_stats->rx.multipass_rx_pkt_drop + 7044 vdev_stats->rx.peer_unauth_rx_pkt_drop + 7045 vdev_stats->rx.policy_check_drop + 7046 vdev_stats->rx.nawds_mcast_drop + 7047 vdev_stats->rx.mcast_3addr_drop + 7048 vdev_stats->rx.ppeds_drop.num; 7049 7050 qdf_mem_free(vdev_stats); 7051 7052 return QDF_STATUS_SUCCESS; 7053 } 7054 7055 /** 7056 * dp_pdev_getstats() - get pdev packet level stats 7057 * @pdev_handle: Datapath PDEV handle 7058 * @stats: cdp network device stats structure 7059 * 7060 * Return: QDF_STATUS 7061 */ 7062 static void dp_pdev_getstats(struct cdp_pdev *pdev_handle, 7063 struct cdp_dev_stats *stats) 7064 { 7065 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle; 7066 7067 dp_aggregate_pdev_stats(pdev); 7068 7069 stats->tx_packets = pdev->stats.tx.comp_pkt.num; 7070 stats->tx_bytes = pdev->stats.tx.comp_pkt.bytes; 7071 7072 stats->tx_errors = pdev->stats.tx.tx_failed; 7073 stats->tx_dropped = pdev->stats.tx_i.dropped.dropped_pkt.num + 7074 pdev->stats.tx_i.sg.dropped_host.num + 7075 pdev->stats.tx_i.mcast_en.dropped_map_error + 7076 pdev->stats.tx_i.mcast_en.dropped_self_mac + 7077 pdev->stats.tx_i.mcast_en.dropped_send_fail + 7078 pdev->stats.tx.nawds_mcast_drop + 7079 pdev->stats.tso_stats.dropped_host.num; 7080 7081 if (!wlan_cfg_get_vdev_stats_hw_offload_config(pdev->soc->wlan_cfg_ctx)) { 7082 stats->rx_packets = pdev->stats.rx.to_stack.num; 7083 stats->rx_bytes = pdev->stats.rx.to_stack.bytes; 7084 } else { 7085 stats->rx_packets = pdev->stats.rx_i.reo_rcvd_pkt.num + 7086 pdev->stats.rx_i.null_q_desc_pkt.num + 7087 pdev->stats.rx_i.routed_eapol_pkt.num; 7088 stats->rx_bytes = pdev->stats.rx_i.reo_rcvd_pkt.bytes + 7089 pdev->stats.rx_i.null_q_desc_pkt.bytes + 7090 pdev->stats.rx_i.routed_eapol_pkt.bytes; 7091 } 7092 7093 stats->rx_errors = pdev->stats.err.ip_csum_err + 7094 pdev->stats.err.tcp_udp_csum_err + 7095 pdev->stats.rx.err.mic_err + 7096 pdev->stats.rx.err.decrypt_err + 7097 pdev->stats.rx.err.fcserr + 7098 pdev->stats.rx.err.pn_err + 7099 pdev->stats.rx.err.oor_err + 7100 pdev->stats.rx.err.jump_2k_err + 7101 pdev->stats.rx.err.rxdma_wifi_parse_err; 7102 stats->rx_dropped = pdev->stats.dropped.msdu_not_done + 7103 pdev->stats.dropped.mec + 7104 pdev->stats.dropped.mesh_filter + 7105 pdev->stats.dropped.wifi_parse + 7106 pdev->stats.dropped.mon_rx_drop + 7107 pdev->stats.dropped.mon_radiotap_update_err + 7108 pdev->stats.rx.mec_drop.num + 7109 pdev->stats.rx.ppeds_drop.num + 7110 pdev->stats.rx.multipass_rx_pkt_drop + 7111 pdev->stats.rx.peer_unauth_rx_pkt_drop + 7112 pdev->stats.rx.policy_check_drop + 7113 pdev->stats.rx.nawds_mcast_drop + 7114 pdev->stats.rx.mcast_3addr_drop; 7115 } 7116 7117 /** 7118 * dp_get_device_stats() - get interface level packet stats 7119 * @soc_hdl: soc handle 7120 * @id: vdev_id or pdev_id based on type 7121 * @stats: cdp network device stats structure 7122 * @type: device type pdev/vdev 7123 * 7124 * Return: QDF_STATUS 7125 */ 7126 static QDF_STATUS dp_get_device_stats(struct cdp_soc_t *soc_hdl, uint8_t id, 7127 struct cdp_dev_stats *stats, 7128 uint8_t type) 7129 { 7130 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 7131 QDF_STATUS status = QDF_STATUS_E_FAILURE; 7132 struct dp_vdev *vdev; 7133 7134 switch (type) { 7135 case UPDATE_VDEV_STATS: 7136 vdev = dp_vdev_get_ref_by_id(soc, id, DP_MOD_ID_CDP); 7137 7138 if (vdev) { 7139 status = dp_vdev_getstats((struct cdp_vdev *)vdev, 7140 stats); 7141 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP); 7142 } 7143 return status; 7144 case UPDATE_PDEV_STATS: 7145 { 7146 struct dp_pdev *pdev = 7147 dp_get_pdev_from_soc_pdev_id_wifi3( 7148 (struct dp_soc *)soc, 7149 id); 7150 if (pdev) { 7151 dp_pdev_getstats((struct cdp_pdev *)pdev, 7152 stats); 7153 return QDF_STATUS_SUCCESS; 7154 } 7155 } 7156 break; 7157 default: 7158 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 7159 "apstats cannot be updated for this input " 7160 "type %d", type); 7161 break; 7162 } 7163 7164 return QDF_STATUS_E_FAILURE; 7165 } 7166 7167 const 7168 char *dp_srng_get_str_from_hal_ring_type(enum hal_ring_type ring_type) 7169 { 7170 switch (ring_type) { 7171 case REO_DST: 7172 return "Reo_dst"; 7173 case REO_EXCEPTION: 7174 return "Reo_exception"; 7175 case REO_CMD: 7176 return "Reo_cmd"; 7177 case REO_REINJECT: 7178 return "Reo_reinject"; 7179 case REO_STATUS: 7180 return "Reo_status"; 7181 case WBM2SW_RELEASE: 7182 return "wbm2sw_release"; 7183 case TCL_DATA: 7184 return "tcl_data"; 7185 case TCL_CMD_CREDIT: 7186 return "tcl_cmd_credit"; 7187 case TCL_STATUS: 7188 return "tcl_status"; 7189 case SW2WBM_RELEASE: 7190 return "sw2wbm_release"; 7191 case RXDMA_BUF: 7192 return "Rxdma_buf"; 7193 case RXDMA_DST: 7194 return "Rxdma_dst"; 7195 case RXDMA_MONITOR_BUF: 7196 return "Rxdma_monitor_buf"; 7197 case RXDMA_MONITOR_DESC: 7198 return "Rxdma_monitor_desc"; 7199 case RXDMA_MONITOR_STATUS: 7200 return "Rxdma_monitor_status"; 7201 case RXDMA_MONITOR_DST: 7202 return "Rxdma_monitor_destination"; 7203 case WBM_IDLE_LINK: 7204 return "WBM_hw_idle_link"; 7205 case PPE2TCL: 7206 return "PPE2TCL"; 7207 case REO2PPE: 7208 return "REO2PPE"; 7209 case TX_MONITOR_DST: 7210 return "tx_monitor_destination"; 7211 case TX_MONITOR_BUF: 7212 return "tx_monitor_buf"; 7213 default: 7214 dp_err("Invalid ring type: %u", ring_type); 7215 break; 7216 } 7217 return "Invalid"; 7218 } 7219 7220 void dp_print_napi_stats(struct dp_soc *soc) 7221 { 7222 hif_print_napi_stats(soc->hif_handle); 7223 } 7224 7225 /** 7226 * dp_txrx_host_peer_stats_clr() - Reinitialize the txrx peer stats 7227 * @soc: Datapath soc 7228 * @peer: Datatpath peer 7229 * @arg: argument to iter function 7230 * 7231 * Return: QDF_STATUS 7232 */ 7233 static inline void 7234 dp_txrx_host_peer_stats_clr(struct dp_soc *soc, 7235 struct dp_peer *peer, 7236 void *arg) 7237 { 7238 struct dp_txrx_peer *txrx_peer = NULL; 7239 struct dp_peer *tgt_peer = NULL; 7240 struct cdp_interface_peer_stats peer_stats_intf = {0}; 7241 7242 peer_stats_intf.rx_avg_snr = CDP_INVALID_SNR; 7243 7244 DP_STATS_CLR(peer); 7245 /* Clear monitor peer stats */ 7246 dp_monitor_peer_reset_stats(soc, peer); 7247 7248 /* Clear MLD peer stats only when link peer is primary */ 7249 if (dp_peer_is_primary_link_peer(peer)) { 7250 tgt_peer = dp_get_tgt_peer_from_peer(peer); 7251 if (tgt_peer) { 7252 DP_STATS_CLR(tgt_peer); 7253 txrx_peer = tgt_peer->txrx_peer; 7254 dp_txrx_peer_stats_clr(txrx_peer); 7255 } 7256 } 7257 7258 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE 7259 dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, peer->vdev->pdev->soc, 7260 &peer_stats_intf, peer->peer_id, 7261 UPDATE_PEER_STATS, peer->vdev->pdev->pdev_id); 7262 #endif 7263 } 7264 7265 #ifdef WLAN_DP_SRNG_USAGE_WM_TRACKING 7266 static inline void dp_srng_clear_ring_usage_wm_stats(struct dp_soc *soc) 7267 { 7268 int ring; 7269 7270 for (ring = 0; ring < soc->num_reo_dest_rings; ring++) 7271 hal_srng_clear_ring_usage_wm_locked(soc->hal_soc, 7272 soc->reo_dest_ring[ring].hal_srng); 7273 7274 for (ring = 0; ring < soc->num_tcl_data_rings; ring++) { 7275 if (wlan_cfg_get_wbm_ring_num_for_index( 7276 soc->wlan_cfg_ctx, ring) == 7277 INVALID_WBM_RING_NUM) 7278 continue; 7279 7280 hal_srng_clear_ring_usage_wm_locked(soc->hal_soc, 7281 soc->tx_comp_ring[ring].hal_srng); 7282 } 7283 } 7284 #else 7285 static inline void dp_srng_clear_ring_usage_wm_stats(struct dp_soc *soc) 7286 { 7287 } 7288 #endif 7289 7290 #ifdef WLAN_SUPPORT_PPEDS 7291 static void dp_clear_tx_ppeds_stats(struct dp_soc *soc) 7292 { 7293 if (soc->arch_ops.dp_ppeds_clear_stats) 7294 soc->arch_ops.dp_ppeds_clear_stats(soc); 7295 } 7296 7297 static void dp_ppeds_clear_ring_util_stats(struct dp_soc *soc) 7298 { 7299 if (soc->arch_ops.dp_txrx_ppeds_clear_rings_stats) 7300 soc->arch_ops.dp_txrx_ppeds_clear_rings_stats(soc); 7301 } 7302 #else 7303 static void dp_clear_tx_ppeds_stats(struct dp_soc *soc) 7304 { 7305 } 7306 7307 static void dp_ppeds_clear_ring_util_stats(struct dp_soc *soc) 7308 { 7309 } 7310 #endif 7311 7312 /** 7313 * dp_txrx_host_stats_clr() - Reinitialize the txrx stats 7314 * @vdev: DP_VDEV handle 7315 * @soc: DP_SOC handle 7316 * 7317 * Return: QDF_STATUS 7318 */ 7319 static inline QDF_STATUS 7320 dp_txrx_host_stats_clr(struct dp_vdev *vdev, struct dp_soc *soc) 7321 { 7322 struct dp_vdev *var_vdev = NULL; 7323 7324 if (!vdev || !vdev->pdev) 7325 return QDF_STATUS_E_FAILURE; 7326 7327 /* 7328 * if NSS offload is enabled, then send message 7329 * to NSS FW to clear the stats. Once NSS FW clears the statistics 7330 * then clear host statistics. 7331 */ 7332 if (wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx)) { 7333 if (soc->cdp_soc.ol_ops->nss_stats_clr) 7334 soc->cdp_soc.ol_ops->nss_stats_clr(soc->ctrl_psoc, 7335 vdev->vdev_id); 7336 } 7337 7338 dp_vdev_stats_hw_offload_target_clear(soc, vdev->pdev->pdev_id, 7339 (1 << vdev->vdev_id)); 7340 7341 DP_STATS_CLR(vdev->pdev); 7342 DP_STATS_CLR(vdev->pdev->soc); 7343 7344 dp_clear_tx_ppeds_stats(soc); 7345 dp_ppeds_clear_ring_util_stats(soc); 7346 7347 hif_clear_napi_stats(vdev->pdev->soc->hif_handle); 7348 7349 TAILQ_FOREACH(var_vdev, &vdev->pdev->vdev_list, vdev_list_elem) { 7350 DP_STATS_CLR(var_vdev); 7351 dp_vdev_iterate_peer(var_vdev, dp_txrx_host_peer_stats_clr, 7352 NULL, DP_MOD_ID_GENERIC_STATS); 7353 } 7354 7355 dp_srng_clear_ring_usage_wm_stats(soc); 7356 7357 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE 7358 dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, vdev->pdev->soc, 7359 &vdev->stats, vdev->vdev_id, 7360 UPDATE_VDEV_STATS, vdev->pdev->pdev_id); 7361 #endif 7362 return QDF_STATUS_SUCCESS; 7363 } 7364 7365 /** 7366 * dp_get_peer_calibr_stats()- Get peer calibrated stats 7367 * @peer: Datapath peer 7368 * @peer_stats: buffer for peer stats 7369 * 7370 * Return: none 7371 */ 7372 static inline 7373 void dp_get_peer_calibr_stats(struct dp_peer *peer, 7374 struct cdp_peer_stats *peer_stats) 7375 { 7376 struct dp_peer *tgt_peer; 7377 7378 tgt_peer = dp_get_tgt_peer_from_peer(peer); 7379 if (!tgt_peer) 7380 return; 7381 7382 peer_stats->tx.last_per = tgt_peer->stats.tx.last_per; 7383 peer_stats->tx.tx_bytes_success_last = 7384 tgt_peer->stats.tx.tx_bytes_success_last; 7385 peer_stats->tx.tx_data_success_last = 7386 tgt_peer->stats.tx.tx_data_success_last; 7387 peer_stats->tx.tx_byte_rate = tgt_peer->stats.tx.tx_byte_rate; 7388 peer_stats->tx.tx_data_rate = tgt_peer->stats.tx.tx_data_rate; 7389 peer_stats->tx.tx_data_ucast_last = 7390 tgt_peer->stats.tx.tx_data_ucast_last; 7391 peer_stats->tx.tx_data_ucast_rate = 7392 tgt_peer->stats.tx.tx_data_ucast_rate; 7393 peer_stats->tx.inactive_time = tgt_peer->stats.tx.inactive_time; 7394 peer_stats->rx.rx_bytes_success_last = 7395 tgt_peer->stats.rx.rx_bytes_success_last; 7396 peer_stats->rx.rx_data_success_last = 7397 tgt_peer->stats.rx.rx_data_success_last; 7398 peer_stats->rx.rx_byte_rate = tgt_peer->stats.rx.rx_byte_rate; 7399 peer_stats->rx.rx_data_rate = tgt_peer->stats.rx.rx_data_rate; 7400 } 7401 7402 /** 7403 * dp_get_peer_basic_stats()- Get peer basic stats 7404 * @peer: Datapath peer 7405 * @peer_stats: buffer for peer stats 7406 * 7407 * Return: none 7408 */ 7409 static inline 7410 void dp_get_peer_basic_stats(struct dp_peer *peer, 7411 struct cdp_peer_stats *peer_stats) 7412 { 7413 struct dp_txrx_peer *txrx_peer; 7414 7415 txrx_peer = dp_get_txrx_peer(peer); 7416 if (!txrx_peer) 7417 return; 7418 7419 peer_stats->tx.comp_pkt.num += txrx_peer->comp_pkt.num; 7420 peer_stats->tx.comp_pkt.bytes += txrx_peer->comp_pkt.bytes; 7421 peer_stats->tx.tx_failed += txrx_peer->tx_failed; 7422 peer_stats->rx.to_stack.num += txrx_peer->to_stack.num; 7423 peer_stats->rx.to_stack.bytes += txrx_peer->to_stack.bytes; 7424 } 7425 7426 #ifdef QCA_ENHANCED_STATS_SUPPORT 7427 /** 7428 * dp_get_peer_per_pkt_stats()- Get peer per pkt stats 7429 * @peer: Datapath peer 7430 * @peer_stats: buffer for peer stats 7431 * 7432 * Return: none 7433 */ 7434 static inline 7435 void dp_get_peer_per_pkt_stats(struct dp_peer *peer, 7436 struct cdp_peer_stats *peer_stats) 7437 { 7438 struct dp_txrx_peer *txrx_peer; 7439 struct dp_peer_per_pkt_stats *per_pkt_stats; 7440 uint8_t inx = 0, link_id = 0; 7441 struct dp_pdev *pdev; 7442 struct dp_soc *soc; 7443 uint8_t stats_arr_size; 7444 7445 txrx_peer = dp_get_txrx_peer(peer); 7446 pdev = peer->vdev->pdev; 7447 7448 if (!txrx_peer) 7449 return; 7450 7451 if (!IS_MLO_DP_LINK_PEER(peer)) { 7452 stats_arr_size = txrx_peer->stats_arr_size; 7453 for (inx = 0; inx < stats_arr_size; inx++) { 7454 per_pkt_stats = &txrx_peer->stats[inx].per_pkt_stats; 7455 DP_UPDATE_PER_PKT_STATS(peer_stats, per_pkt_stats); 7456 } 7457 } else { 7458 soc = pdev->soc; 7459 link_id = dp_get_peer_hw_link_id(soc, pdev); 7460 per_pkt_stats = 7461 &txrx_peer->stats[link_id].per_pkt_stats; 7462 DP_UPDATE_PER_PKT_STATS(peer_stats, per_pkt_stats); 7463 } 7464 } 7465 7466 #ifdef WLAN_FEATURE_11BE_MLO 7467 /** 7468 * dp_get_peer_extd_stats()- Get peer extd stats 7469 * @peer: Datapath peer 7470 * @peer_stats: buffer for peer stats 7471 * 7472 * Return: none 7473 */ 7474 static inline 7475 void dp_get_peer_extd_stats(struct dp_peer *peer, 7476 struct cdp_peer_stats *peer_stats) 7477 { 7478 struct dp_soc *soc = peer->vdev->pdev->soc; 7479 7480 if (IS_MLO_DP_MLD_PEER(peer)) { 7481 uint8_t i; 7482 struct dp_peer *link_peer; 7483 struct dp_soc *link_peer_soc; 7484 struct dp_mld_link_peers link_peers_info; 7485 7486 dp_get_link_peers_ref_from_mld_peer(soc, peer, 7487 &link_peers_info, 7488 DP_MOD_ID_CDP); 7489 for (i = 0; i < link_peers_info.num_links; i++) { 7490 link_peer = link_peers_info.link_peers[i]; 7491 link_peer_soc = link_peer->vdev->pdev->soc; 7492 dp_monitor_peer_get_stats(link_peer_soc, link_peer, 7493 peer_stats, 7494 UPDATE_PEER_STATS); 7495 } 7496 dp_release_link_peers_ref(&link_peers_info, DP_MOD_ID_CDP); 7497 } else { 7498 dp_monitor_peer_get_stats(soc, peer, peer_stats, 7499 UPDATE_PEER_STATS); 7500 } 7501 } 7502 #else 7503 static inline 7504 void dp_get_peer_extd_stats(struct dp_peer *peer, 7505 struct cdp_peer_stats *peer_stats) 7506 { 7507 struct dp_soc *soc = peer->vdev->pdev->soc; 7508 7509 dp_monitor_peer_get_stats(soc, peer, peer_stats, UPDATE_PEER_STATS); 7510 } 7511 #endif 7512 #else 7513 #if defined WLAN_FEATURE_11BE_MLO && defined DP_MLO_LINK_STATS_SUPPORT 7514 static inline 7515 void dp_get_peer_per_pkt_stats(struct dp_peer *peer, 7516 struct cdp_peer_stats *peer_stats) 7517 { 7518 uint8_t i, index; 7519 struct dp_mld_link_peers link_peers_info; 7520 struct dp_txrx_peer *txrx_peer; 7521 struct dp_peer_per_pkt_stats *per_pkt_stats; 7522 struct dp_soc *soc = peer->vdev->pdev->soc; 7523 7524 txrx_peer = dp_get_txrx_peer(peer); 7525 if (!txrx_peer) 7526 return; 7527 7528 if (IS_MLO_DP_MLD_PEER(peer)) { 7529 dp_get_link_peers_ref_from_mld_peer(soc, peer, 7530 &link_peers_info, 7531 DP_MOD_ID_GENERIC_STATS); 7532 for (i = 0; i < link_peers_info.num_links; i++) { 7533 if (i > txrx_peer->stats_arr_size) 7534 break; 7535 per_pkt_stats = &txrx_peer->stats[i].per_pkt_stats; 7536 DP_UPDATE_PER_PKT_STATS(peer_stats, per_pkt_stats); 7537 } 7538 dp_release_link_peers_ref(&link_peers_info, 7539 DP_MOD_ID_GENERIC_STATS); 7540 } else { 7541 index = dp_get_peer_link_id(peer); 7542 per_pkt_stats = &txrx_peer->stats[index].per_pkt_stats; 7543 DP_UPDATE_PER_PKT_STATS(peer_stats, per_pkt_stats); 7544 qdf_mem_copy(&peer_stats->mac_addr, 7545 &peer->mac_addr.raw[0], 7546 QDF_MAC_ADDR_SIZE); 7547 } 7548 } 7549 7550 static inline 7551 void dp_get_peer_extd_stats(struct dp_peer *peer, 7552 struct cdp_peer_stats *peer_stats) 7553 { 7554 uint8_t i, index; 7555 struct dp_mld_link_peers link_peers_info; 7556 struct dp_txrx_peer *txrx_peer; 7557 struct dp_peer_extd_stats *extd_stats; 7558 struct dp_soc *soc = peer->vdev->pdev->soc; 7559 7560 txrx_peer = dp_get_txrx_peer(peer); 7561 if (qdf_unlikely(!txrx_peer)) { 7562 dp_err_rl("txrx_peer NULL for peer MAC: " QDF_MAC_ADDR_FMT, 7563 QDF_MAC_ADDR_REF(peer->mac_addr.raw)); 7564 return; 7565 } 7566 7567 if (IS_MLO_DP_MLD_PEER(peer)) { 7568 dp_get_link_peers_ref_from_mld_peer(soc, peer, 7569 &link_peers_info, 7570 DP_MOD_ID_GENERIC_STATS); 7571 for (i = 0; i < link_peers_info.num_links; i++) { 7572 if (i > txrx_peer->stats_arr_size) 7573 break; 7574 extd_stats = &txrx_peer->stats[i].extd_stats; 7575 /* Return aggregated stats for MLD peer */ 7576 DP_UPDATE_EXTD_STATS(peer_stats, extd_stats); 7577 } 7578 dp_release_link_peers_ref(&link_peers_info, 7579 DP_MOD_ID_GENERIC_STATS); 7580 } else { 7581 index = dp_get_peer_link_id(peer); 7582 extd_stats = &txrx_peer->stats[index].extd_stats; 7583 DP_UPDATE_EXTD_STATS(peer_stats, extd_stats); 7584 qdf_mem_copy(&peer_stats->mac_addr, 7585 &peer->mac_addr.raw[0], 7586 QDF_MAC_ADDR_SIZE); 7587 } 7588 } 7589 #else 7590 static inline 7591 void dp_get_peer_per_pkt_stats(struct dp_peer *peer, 7592 struct cdp_peer_stats *peer_stats) 7593 { 7594 struct dp_txrx_peer *txrx_peer; 7595 struct dp_peer_per_pkt_stats *per_pkt_stats; 7596 7597 txrx_peer = dp_get_txrx_peer(peer); 7598 if (!txrx_peer) 7599 return; 7600 7601 per_pkt_stats = &txrx_peer->stats[0].per_pkt_stats; 7602 DP_UPDATE_PER_PKT_STATS(peer_stats, per_pkt_stats); 7603 } 7604 7605 static inline 7606 void dp_get_peer_extd_stats(struct dp_peer *peer, 7607 struct cdp_peer_stats *peer_stats) 7608 { 7609 struct dp_txrx_peer *txrx_peer; 7610 struct dp_peer_extd_stats *extd_stats; 7611 7612 txrx_peer = dp_get_txrx_peer(peer); 7613 if (qdf_unlikely(!txrx_peer)) { 7614 dp_err_rl("txrx_peer NULL"); 7615 return; 7616 } 7617 7618 extd_stats = &txrx_peer->stats[0].extd_stats; 7619 DP_UPDATE_EXTD_STATS(peer_stats, extd_stats); 7620 } 7621 #endif 7622 #endif 7623 7624 /** 7625 * dp_get_peer_tx_per()- Get peer packet error ratio 7626 * @peer_stats: buffer for peer stats 7627 * 7628 * Return: none 7629 */ 7630 static inline 7631 void dp_get_peer_tx_per(struct cdp_peer_stats *peer_stats) 7632 { 7633 if (peer_stats->tx.tx_success.num + peer_stats->tx.retries > 0) 7634 peer_stats->tx.per = qdf_do_div((peer_stats->tx.retries * 100), 7635 (peer_stats->tx.tx_success.num + 7636 peer_stats->tx.retries)); 7637 else 7638 peer_stats->tx.per = 0; 7639 } 7640 7641 void dp_get_peer_stats(struct dp_peer *peer, struct cdp_peer_stats *peer_stats) 7642 { 7643 dp_get_peer_calibr_stats(peer, peer_stats); 7644 7645 dp_get_peer_basic_stats(peer, peer_stats); 7646 7647 dp_get_peer_per_pkt_stats(peer, peer_stats); 7648 7649 dp_get_peer_extd_stats(peer, peer_stats); 7650 7651 dp_get_peer_tx_per(peer_stats); 7652 } 7653 7654 /** 7655 * dp_get_host_peer_stats()- function to print peer stats 7656 * @soc: dp_soc handle 7657 * @mac_addr: mac address of the peer 7658 * 7659 * Return: QDF_STATUS 7660 */ 7661 static QDF_STATUS 7662 dp_get_host_peer_stats(struct cdp_soc_t *soc, uint8_t *mac_addr) 7663 { 7664 struct dp_peer *peer = NULL; 7665 struct cdp_peer_stats *peer_stats = NULL; 7666 struct cdp_peer_info peer_info = { 0 }; 7667 7668 if (!mac_addr) { 7669 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 7670 "%s: NULL peer mac addr\n", __func__); 7671 return QDF_STATUS_E_FAILURE; 7672 } 7673 7674 DP_PEER_INFO_PARAMS_INIT(&peer_info, DP_VDEV_ALL, mac_addr, false, 7675 CDP_WILD_PEER_TYPE); 7676 7677 peer = dp_peer_hash_find_wrapper((struct dp_soc *)soc, &peer_info, 7678 DP_MOD_ID_CDP); 7679 if (!peer) { 7680 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 7681 "%s: Invalid peer\n", __func__); 7682 return QDF_STATUS_E_FAILURE; 7683 } 7684 7685 peer_stats = qdf_mem_malloc(sizeof(struct cdp_peer_stats)); 7686 if (!peer_stats) { 7687 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 7688 "%s: Memory allocation failed for cdp_peer_stats\n", 7689 __func__); 7690 dp_peer_unref_delete(peer, DP_MOD_ID_CDP); 7691 return QDF_STATUS_E_NOMEM; 7692 } 7693 7694 qdf_mem_zero(peer_stats, sizeof(struct cdp_peer_stats)); 7695 7696 dp_get_peer_stats(peer, peer_stats); 7697 dp_print_peer_stats(peer, peer_stats); 7698 7699 dp_peer_rxtid_stats(dp_get_tgt_peer_from_peer(peer), 7700 dp_rx_tid_stats_cb, NULL); 7701 7702 qdf_mem_free(peer_stats); 7703 dp_peer_unref_delete(peer, DP_MOD_ID_CDP); 7704 7705 return QDF_STATUS_SUCCESS; 7706 } 7707 7708 /** 7709 * dp_txrx_stats_help() - Helper function for Txrx_Stats 7710 * 7711 * Return: None 7712 */ 7713 static void dp_txrx_stats_help(void) 7714 { 7715 dp_info("Command: iwpriv wlan0 txrx_stats <stats_option> <mac_id>"); 7716 dp_info("stats_option:"); 7717 dp_info(" 1 -- HTT Tx Statistics"); 7718 dp_info(" 2 -- HTT Rx Statistics"); 7719 dp_info(" 3 -- HTT Tx HW Queue Statistics"); 7720 dp_info(" 4 -- HTT Tx HW Sched Statistics"); 7721 dp_info(" 5 -- HTT Error Statistics"); 7722 dp_info(" 6 -- HTT TQM Statistics"); 7723 dp_info(" 7 -- HTT TQM CMDQ Statistics"); 7724 dp_info(" 8 -- HTT TX_DE_CMN Statistics"); 7725 dp_info(" 9 -- HTT Tx Rate Statistics"); 7726 dp_info(" 10 -- HTT Rx Rate Statistics"); 7727 dp_info(" 11 -- HTT Peer Statistics"); 7728 dp_info(" 12 -- HTT Tx SelfGen Statistics"); 7729 dp_info(" 13 -- HTT Tx MU HWQ Statistics"); 7730 dp_info(" 14 -- HTT RING_IF_INFO Statistics"); 7731 dp_info(" 15 -- HTT SRNG Statistics"); 7732 dp_info(" 16 -- HTT SFM Info Statistics"); 7733 dp_info(" 17 -- HTT PDEV_TX_MU_MIMO_SCHED INFO Statistics"); 7734 dp_info(" 18 -- HTT Peer List Details"); 7735 dp_info(" 20 -- Clear Host Statistics"); 7736 dp_info(" 21 -- Host Rx Rate Statistics"); 7737 dp_info(" 22 -- Host Tx Rate Statistics"); 7738 dp_info(" 23 -- Host Tx Statistics"); 7739 dp_info(" 24 -- Host Rx Statistics"); 7740 dp_info(" 25 -- Host AST Statistics"); 7741 dp_info(" 26 -- Host SRNG PTR Statistics"); 7742 dp_info(" 27 -- Host Mon Statistics"); 7743 dp_info(" 28 -- Host REO Queue Statistics"); 7744 dp_info(" 29 -- Host Soc cfg param Statistics"); 7745 dp_info(" 30 -- Host pdev cfg param Statistics"); 7746 dp_info(" 31 -- Host NAPI stats"); 7747 dp_info(" 32 -- Host Interrupt stats"); 7748 dp_info(" 33 -- Host FISA stats"); 7749 dp_info(" 34 -- Host Register Work stats"); 7750 dp_info(" 35 -- HW REO Queue stats"); 7751 dp_info(" 36 -- Host WBM IDLE link desc ring HP/TP"); 7752 dp_info(" 37 -- Host SRNG usage watermark stats"); 7753 } 7754 7755 #ifdef DP_UMAC_HW_RESET_SUPPORT 7756 /** 7757 * dp_umac_rst_skel_enable_update() - Update skel dbg flag for umac reset 7758 * @soc: dp soc handle 7759 * @en: ebable/disable 7760 * 7761 * Return: void 7762 */ 7763 static void dp_umac_rst_skel_enable_update(struct dp_soc *soc, bool en) 7764 { 7765 soc->umac_reset_ctx.skel_enable = en; 7766 dp_cdp_debug("UMAC HW reset debug skeleton code enabled :%u", 7767 soc->umac_reset_ctx.skel_enable); 7768 } 7769 7770 /** 7771 * dp_umac_rst_skel_enable_get() - Get skel dbg flag for umac reset 7772 * @soc: dp soc handle 7773 * 7774 * Return: enable/disable flag 7775 */ 7776 static bool dp_umac_rst_skel_enable_get(struct dp_soc *soc) 7777 { 7778 return soc->umac_reset_ctx.skel_enable; 7779 } 7780 #else 7781 static void dp_umac_rst_skel_enable_update(struct dp_soc *soc, bool en) 7782 { 7783 } 7784 7785 static bool dp_umac_rst_skel_enable_get(struct dp_soc *soc) 7786 { 7787 return false; 7788 } 7789 #endif 7790 7791 #ifndef WLAN_SOFTUMAC_SUPPORT 7792 static void dp_print_reg_write_stats(struct dp_soc *soc) 7793 { 7794 hal_dump_reg_write_stats(soc->hal_soc); 7795 hal_dump_reg_write_srng_stats(soc->hal_soc); 7796 } 7797 #else 7798 static void dp_print_reg_write_stats(struct dp_soc *soc) 7799 { 7800 hif_print_reg_write_stats(soc->hif_handle); 7801 } 7802 #endif 7803 7804 /** 7805 * dp_print_host_stats()- Function to print the stats aggregated at host 7806 * @vdev: DP_VDEV handle 7807 * @req: host stats type 7808 * @soc: dp soc handler 7809 * 7810 * Return: 0 on success, print error message in case of failure 7811 */ 7812 static int 7813 dp_print_host_stats(struct dp_vdev *vdev, 7814 struct cdp_txrx_stats_req *req, 7815 struct dp_soc *soc) 7816 { 7817 struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev; 7818 enum cdp_host_txrx_stats type = 7819 dp_stats_mapping_table[req->stats][STATS_HOST]; 7820 7821 dp_aggregate_pdev_stats(pdev); 7822 7823 switch (type) { 7824 case TXRX_CLEAR_STATS: 7825 dp_txrx_host_stats_clr(vdev, soc); 7826 break; 7827 case TXRX_RX_RATE_STATS: 7828 dp_print_rx_rates(vdev); 7829 break; 7830 case TXRX_TX_RATE_STATS: 7831 dp_print_tx_rates(vdev); 7832 break; 7833 case TXRX_TX_HOST_STATS: 7834 dp_print_pdev_tx_stats(pdev); 7835 dp_print_soc_tx_stats(pdev->soc); 7836 dp_print_global_desc_count(); 7837 dp_print_vdev_mlo_mcast_tx_stats(vdev); 7838 break; 7839 case TXRX_RX_HOST_STATS: 7840 dp_print_pdev_rx_stats(pdev); 7841 dp_print_soc_rx_stats(pdev->soc); 7842 break; 7843 case TXRX_AST_STATS: 7844 dp_print_ast_stats(pdev->soc); 7845 dp_print_mec_stats(pdev->soc); 7846 dp_print_peer_table(vdev); 7847 if (soc->arch_ops.dp_mlo_print_ptnr_info) 7848 soc->arch_ops.dp_mlo_print_ptnr_info(vdev); 7849 break; 7850 case TXRX_SRNG_PTR_STATS: 7851 dp_print_ring_stats(pdev); 7852 break; 7853 case TXRX_RX_MON_STATS: 7854 dp_monitor_print_pdev_rx_mon_stats(pdev); 7855 break; 7856 case TXRX_REO_QUEUE_STATS: 7857 dp_get_host_peer_stats((struct cdp_soc_t *)pdev->soc, 7858 req->peer_addr); 7859 break; 7860 case TXRX_SOC_CFG_PARAMS: 7861 dp_print_soc_cfg_params(pdev->soc); 7862 break; 7863 case TXRX_PDEV_CFG_PARAMS: 7864 dp_print_pdev_cfg_params(pdev); 7865 break; 7866 case TXRX_NAPI_STATS: 7867 dp_print_napi_stats(pdev->soc); 7868 break; 7869 case TXRX_SOC_INTERRUPT_STATS: 7870 dp_print_soc_interrupt_stats(pdev->soc); 7871 break; 7872 case TXRX_SOC_FSE_STATS: 7873 if (soc->cdp_soc.ol_ops->dp_print_fisa_stats) 7874 soc->cdp_soc.ol_ops->dp_print_fisa_stats( 7875 CDP_FISA_STATS_ID_DUMP_HW_FST); 7876 break; 7877 case TXRX_HAL_REG_WRITE_STATS: 7878 dp_print_reg_write_stats(pdev->soc); 7879 break; 7880 case TXRX_SOC_REO_HW_DESC_DUMP: 7881 dp_get_rx_reo_queue_info((struct cdp_soc_t *)pdev->soc, 7882 vdev->vdev_id); 7883 break; 7884 case TXRX_SOC_WBM_IDLE_HPTP_DUMP: 7885 dp_dump_wbm_idle_hptp(pdev->soc, pdev); 7886 break; 7887 case TXRX_SRNG_USAGE_WM_STATS: 7888 /* Dump usage watermark stats for all SRNGs */ 7889 dp_dump_srng_high_wm_stats(soc, DP_SRNG_WM_MASK_ALL); 7890 break; 7891 case TXRX_PEER_STATS: 7892 dp_print_per_link_stats((struct cdp_soc_t *)pdev->soc, 7893 vdev->vdev_id); 7894 break; 7895 default: 7896 dp_info("Wrong Input For TxRx Host Stats"); 7897 dp_txrx_stats_help(); 7898 break; 7899 } 7900 return 0; 7901 } 7902 7903 /** 7904 * dp_pdev_tid_stats_ingress_inc() - increment ingress_stack counter 7905 * @pdev: pdev handle 7906 * @val: increase in value 7907 * 7908 * Return: void 7909 */ 7910 static void 7911 dp_pdev_tid_stats_ingress_inc(struct dp_pdev *pdev, uint32_t val) 7912 { 7913 pdev->stats.tid_stats.ingress_stack += val; 7914 } 7915 7916 /** 7917 * dp_pdev_tid_stats_osif_drop() - increment osif_drop counter 7918 * @pdev: pdev handle 7919 * @val: increase in value 7920 * 7921 * Return: void 7922 */ 7923 static void 7924 dp_pdev_tid_stats_osif_drop(struct dp_pdev *pdev, uint32_t val) 7925 { 7926 pdev->stats.tid_stats.osif_drop += val; 7927 } 7928 7929 /** 7930 * dp_get_fw_peer_stats()- function to print peer stats 7931 * @soc: soc handle 7932 * @pdev_id: id of the pdev handle 7933 * @mac_addr: mac address of the peer 7934 * @cap: Type of htt stats requested 7935 * @is_wait: if set, wait on completion from firmware response 7936 * 7937 * Currently Supporting only MAC ID based requests Only 7938 * 1: HTT_PEER_STATS_REQ_MODE_NO_QUERY 7939 * 2: HTT_PEER_STATS_REQ_MODE_QUERY_TQM 7940 * 3: HTT_PEER_STATS_REQ_MODE_FLUSH_TQM 7941 * 7942 * Return: QDF_STATUS 7943 */ 7944 static QDF_STATUS 7945 dp_get_fw_peer_stats(struct cdp_soc_t *soc, uint8_t pdev_id, 7946 uint8_t *mac_addr, 7947 uint32_t cap, uint32_t is_wait) 7948 { 7949 int i; 7950 uint32_t config_param0 = 0; 7951 uint32_t config_param1 = 0; 7952 uint32_t config_param2 = 0; 7953 uint32_t config_param3 = 0; 7954 struct dp_pdev *pdev = 7955 dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc, 7956 pdev_id); 7957 7958 if (!pdev) 7959 return QDF_STATUS_E_FAILURE; 7960 7961 HTT_DBG_EXT_STATS_PEER_INFO_IS_MAC_ADDR_SET(config_param0, 1); 7962 config_param0 |= (1 << (cap + 1)); 7963 7964 for (i = 0; i < HTT_PEER_STATS_MAX_TLV; i++) { 7965 config_param1 |= (1 << i); 7966 } 7967 7968 config_param2 |= (mac_addr[0] & 0x000000ff); 7969 config_param2 |= ((mac_addr[1] << 8) & 0x0000ff00); 7970 config_param2 |= ((mac_addr[2] << 16) & 0x00ff0000); 7971 config_param2 |= ((mac_addr[3] << 24) & 0xff000000); 7972 7973 config_param3 |= (mac_addr[4] & 0x000000ff); 7974 config_param3 |= ((mac_addr[5] << 8) & 0x0000ff00); 7975 7976 if (is_wait) { 7977 qdf_event_reset(&pdev->fw_peer_stats_event); 7978 dp_h2t_ext_stats_msg_send(pdev, HTT_DBG_EXT_STATS_PEER_INFO, 7979 config_param0, config_param1, 7980 config_param2, config_param3, 7981 0, DBG_STATS_COOKIE_DP_STATS, 0); 7982 qdf_wait_single_event(&pdev->fw_peer_stats_event, 7983 DP_FW_PEER_STATS_CMP_TIMEOUT_MSEC); 7984 } else { 7985 dp_h2t_ext_stats_msg_send(pdev, HTT_DBG_EXT_STATS_PEER_INFO, 7986 config_param0, config_param1, 7987 config_param2, config_param3, 7988 0, DBG_STATS_COOKIE_DEFAULT, 0); 7989 } 7990 7991 return QDF_STATUS_SUCCESS; 7992 7993 } 7994 7995 /* This struct definition will be removed from here 7996 * once it get added in FW headers*/ 7997 struct httstats_cmd_req { 7998 uint32_t config_param0; 7999 uint32_t config_param1; 8000 uint32_t config_param2; 8001 uint32_t config_param3; 8002 int cookie; 8003 u_int8_t stats_id; 8004 }; 8005 8006 /** 8007 * dp_get_htt_stats: function to process the httstas request 8008 * @soc: DP soc handle 8009 * @pdev_id: id of pdev handle 8010 * @data: pointer to request data 8011 * @data_len: length for request data 8012 * 8013 * Return: QDF_STATUS 8014 */ 8015 static QDF_STATUS 8016 dp_get_htt_stats(struct cdp_soc_t *soc, uint8_t pdev_id, void *data, 8017 uint32_t data_len) 8018 { 8019 struct httstats_cmd_req *req = (struct httstats_cmd_req *)data; 8020 struct dp_pdev *pdev = 8021 dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc, 8022 pdev_id); 8023 8024 if (!pdev) 8025 return QDF_STATUS_E_FAILURE; 8026 8027 QDF_ASSERT(data_len == sizeof(struct httstats_cmd_req)); 8028 dp_h2t_ext_stats_msg_send(pdev, req->stats_id, 8029 req->config_param0, req->config_param1, 8030 req->config_param2, req->config_param3, 8031 req->cookie, DBG_STATS_COOKIE_DEFAULT, 0); 8032 8033 return QDF_STATUS_SUCCESS; 8034 } 8035 8036 /** 8037 * dp_set_pdev_tidmap_prty_wifi3() - update tidmap priority in pdev 8038 * @pdev: DP_PDEV handle 8039 * @prio: tidmap priority value passed by the user 8040 * 8041 * Return: QDF_STATUS_SUCCESS on success 8042 */ 8043 static QDF_STATUS dp_set_pdev_tidmap_prty_wifi3(struct dp_pdev *pdev, 8044 uint8_t prio) 8045 { 8046 struct dp_soc *soc = pdev->soc; 8047 8048 soc->tidmap_prty = prio; 8049 8050 hal_tx_set_tidmap_prty(soc->hal_soc, prio); 8051 return QDF_STATUS_SUCCESS; 8052 } 8053 8054 /** 8055 * dp_get_peer_param: function to get parameters in peer 8056 * @cdp_soc: DP soc handle 8057 * @vdev_id: id of vdev handle 8058 * @peer_mac: peer mac address 8059 * @param: parameter type to be set 8060 * @val: address of buffer 8061 * 8062 * Return: val 8063 */ 8064 static QDF_STATUS dp_get_peer_param(struct cdp_soc_t *cdp_soc, uint8_t vdev_id, 8065 uint8_t *peer_mac, 8066 enum cdp_peer_param_type param, 8067 cdp_config_param_type *val) 8068 { 8069 return QDF_STATUS_SUCCESS; 8070 } 8071 8072 #if defined(WLAN_FEATURE_11BE_MLO) && defined(DP_MLO_LINK_STATS_SUPPORT) 8073 static inline void 8074 dp_check_map_link_id_band(struct dp_peer *peer) 8075 { 8076 if (peer->link_id_valid) 8077 dp_map_link_id_band(peer); 8078 } 8079 8080 /** 8081 * dp_map_local_link_id_band() - map local link id band 8082 * @peer: dp peer handle 8083 * 8084 * Return: None 8085 */ 8086 static inline 8087 void dp_map_local_link_id_band(struct dp_peer *peer) 8088 { 8089 struct dp_txrx_peer *txrx_peer = NULL; 8090 enum dp_bands band; 8091 8092 txrx_peer = dp_get_txrx_peer(peer); 8093 if (txrx_peer && peer->local_link_id) { 8094 band = dp_freq_to_band(peer->freq); 8095 txrx_peer->ll_band[peer->local_link_id] = band; 8096 } else { 8097 dp_info("txrx_peer NULL or local link id not set: %u " 8098 QDF_MAC_ADDR_FMT, peer->local_link_id, 8099 QDF_MAC_ADDR_REF(peer->mac_addr.raw)); 8100 } 8101 } 8102 #else 8103 static inline void 8104 dp_check_map_link_id_band(struct dp_peer *peer) 8105 { 8106 } 8107 8108 static inline 8109 void dp_map_local_link_id_band(struct dp_peer *peer) 8110 { 8111 } 8112 #endif 8113 8114 /** 8115 * dp_set_peer_freq() - Set peer frequency 8116 * @cdp_soc: DP soc handle 8117 * @vdev_id: id of vdev handle 8118 * @peer_mac: peer mac address 8119 * @param: parameter type to be set 8120 * @val: value of parameter to be set 8121 * 8122 * Return: QDF_STATUS_SUCCESS for success. error code for failure. 8123 */ 8124 static inline QDF_STATUS 8125 dp_set_peer_freq(struct cdp_soc_t *cdp_soc, uint8_t vdev_id, 8126 uint8_t *peer_mac, enum cdp_peer_param_type param, 8127 cdp_config_param_type val) 8128 { 8129 struct dp_peer *peer = NULL; 8130 struct cdp_peer_info peer_info = { 0 }; 8131 8132 DP_PEER_INFO_PARAMS_INIT(&peer_info, vdev_id, peer_mac, 8133 false, CDP_LINK_PEER_TYPE); 8134 8135 peer = dp_peer_hash_find_wrapper((struct dp_soc *)cdp_soc, 8136 &peer_info, DP_MOD_ID_CDP); 8137 if (!peer) { 8138 dp_err("peer NULL,MAC " QDF_MAC_ADDR_FMT ", vdev_id %u", 8139 QDF_MAC_ADDR_REF(peer_mac), vdev_id); 8140 8141 return QDF_STATUS_E_FAILURE; 8142 } 8143 8144 peer->freq = val.cdp_peer_param_freq; 8145 dp_check_map_link_id_band(peer); 8146 dp_map_local_link_id_band(peer); 8147 dp_peer_unref_delete(peer, DP_MOD_ID_CDP); 8148 8149 dp_info("Peer " QDF_MAC_ADDR_FMT " vdev_id %u, frequency %u", 8150 QDF_MAC_ADDR_REF(peer_mac), vdev_id, 8151 peer->freq); 8152 8153 return QDF_STATUS_SUCCESS; 8154 } 8155 8156 /** 8157 * dp_set_peer_param: function to set parameters in peer 8158 * @cdp_soc: DP soc handle 8159 * @vdev_id: id of vdev handle 8160 * @peer_mac: peer mac address 8161 * @param: parameter type to be set 8162 * @val: value of parameter to be set 8163 * 8164 * Return: 0 for success. nonzero for failure. 8165 */ 8166 static QDF_STATUS dp_set_peer_param(struct cdp_soc_t *cdp_soc, uint8_t vdev_id, 8167 uint8_t *peer_mac, 8168 enum cdp_peer_param_type param, 8169 cdp_config_param_type val) 8170 { 8171 QDF_STATUS status = QDF_STATUS_SUCCESS; 8172 struct dp_peer *peer = 8173 dp_peer_get_tgt_peer_hash_find((struct dp_soc *)cdp_soc, 8174 peer_mac, 0, vdev_id, 8175 DP_MOD_ID_CDP); 8176 struct dp_txrx_peer *txrx_peer; 8177 8178 if (!peer) 8179 return QDF_STATUS_E_FAILURE; 8180 8181 txrx_peer = peer->txrx_peer; 8182 if (!txrx_peer) { 8183 dp_peer_unref_delete(peer, DP_MOD_ID_CDP); 8184 return QDF_STATUS_E_FAILURE; 8185 } 8186 8187 switch (param) { 8188 case CDP_CONFIG_NAWDS: 8189 txrx_peer->nawds_enabled = val.cdp_peer_param_nawds; 8190 break; 8191 case CDP_CONFIG_ISOLATION: 8192 dp_info("Peer " QDF_MAC_ADDR_FMT " vdev_id %d, isolation %d", 8193 QDF_MAC_ADDR_REF(peer_mac), vdev_id, 8194 val.cdp_peer_param_isolation); 8195 dp_set_peer_isolation(txrx_peer, val.cdp_peer_param_isolation); 8196 break; 8197 case CDP_CONFIG_IN_TWT: 8198 txrx_peer->in_twt = !!(val.cdp_peer_param_in_twt); 8199 break; 8200 case CDP_CONFIG_PEER_FREQ: 8201 status = dp_set_peer_freq(cdp_soc, vdev_id, 8202 peer_mac, param, val); 8203 break; 8204 default: 8205 break; 8206 } 8207 8208 dp_peer_unref_delete(peer, DP_MOD_ID_CDP); 8209 8210 return status; 8211 } 8212 8213 #ifdef WLAN_FEATURE_11BE_MLO 8214 /** 8215 * dp_set_mld_peer_param: function to set parameters in MLD peer 8216 * @cdp_soc: DP soc handle 8217 * @vdev_id: id of vdev handle 8218 * @peer_mac: peer mac address 8219 * @param: parameter type to be set 8220 * @val: value of parameter to be set 8221 * 8222 * Return: 0 for success. nonzero for failure. 8223 */ 8224 static QDF_STATUS dp_set_mld_peer_param(struct cdp_soc_t *cdp_soc, 8225 uint8_t vdev_id, 8226 uint8_t *peer_mac, 8227 enum cdp_peer_param_type param, 8228 cdp_config_param_type val) 8229 { 8230 struct dp_soc *soc = cdp_soc_t_to_dp_soc(cdp_soc); 8231 struct dp_peer *peer; 8232 struct dp_txrx_peer *txrx_peer; 8233 QDF_STATUS status = QDF_STATUS_SUCCESS; 8234 8235 peer = dp_mld_peer_find_hash_find(soc, peer_mac, 0, vdev_id, 8236 DP_MOD_ID_CDP); 8237 if (!peer) 8238 return QDF_STATUS_E_FAILURE; 8239 8240 txrx_peer = peer->txrx_peer; 8241 if (!txrx_peer) { 8242 dp_peer_unref_delete(peer, DP_MOD_ID_CDP); 8243 return QDF_STATUS_E_FAILURE; 8244 } 8245 8246 switch (param) { 8247 case CDP_CONFIG_MLD_PEER_VDEV: 8248 status = dp_mld_peer_change_vdev(soc, peer, val.new_vdev_id); 8249 break; 8250 default: 8251 break; 8252 } 8253 8254 dp_peer_unref_delete(peer, DP_MOD_ID_CDP); 8255 8256 return status; 8257 } 8258 8259 /** 8260 * dp_set_peer_param_wrapper: wrapper function to set parameters in 8261 * legacy/link/MLD peer 8262 * @cdp_soc: DP soc handle 8263 * @vdev_id: id of vdev handle 8264 * @peer_mac: peer mac address 8265 * @param: parameter type to be set 8266 * @val: value of parameter to be set 8267 * 8268 * Return: 0 for success. nonzero for failure. 8269 */ 8270 static QDF_STATUS 8271 dp_set_peer_param_wrapper(struct cdp_soc_t *cdp_soc, uint8_t vdev_id, 8272 uint8_t *peer_mac, enum cdp_peer_param_type param, 8273 cdp_config_param_type val) 8274 { 8275 QDF_STATUS status; 8276 8277 switch (param) { 8278 case CDP_CONFIG_MLD_PEER_VDEV: 8279 status = dp_set_mld_peer_param(cdp_soc, vdev_id, peer_mac, 8280 param, val); 8281 break; 8282 default: 8283 status = dp_set_peer_param(cdp_soc, vdev_id, peer_mac, 8284 param, val); 8285 break; 8286 } 8287 8288 return status; 8289 } 8290 #endif 8291 8292 /** 8293 * dp_get_pdev_param() - function to get parameters from pdev 8294 * @cdp_soc: DP soc handle 8295 * @pdev_id: id of pdev handle 8296 * @param: parameter type to be get 8297 * @val: buffer for value 8298 * 8299 * Return: status 8300 */ 8301 static QDF_STATUS dp_get_pdev_param(struct cdp_soc_t *cdp_soc, uint8_t pdev_id, 8302 enum cdp_pdev_param_type param, 8303 cdp_config_param_type *val) 8304 { 8305 struct cdp_pdev *pdev = (struct cdp_pdev *) 8306 dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)cdp_soc, 8307 pdev_id); 8308 if (!pdev) 8309 return QDF_STATUS_E_FAILURE; 8310 8311 switch (param) { 8312 case CDP_CONFIG_VOW: 8313 val->cdp_pdev_param_cfg_vow = 8314 ((struct dp_pdev *)pdev)->vow_stats; 8315 break; 8316 case CDP_TX_PENDING: 8317 val->cdp_pdev_param_tx_pending = dp_get_tx_pending(pdev); 8318 break; 8319 case CDP_FILTER_MCAST_DATA: 8320 val->cdp_pdev_param_fltr_mcast = 8321 dp_monitor_pdev_get_filter_mcast_data(pdev); 8322 break; 8323 case CDP_FILTER_NO_DATA: 8324 val->cdp_pdev_param_fltr_none = 8325 dp_monitor_pdev_get_filter_non_data(pdev); 8326 break; 8327 case CDP_FILTER_UCAST_DATA: 8328 val->cdp_pdev_param_fltr_ucast = 8329 dp_monitor_pdev_get_filter_ucast_data(pdev); 8330 break; 8331 case CDP_MONITOR_CHANNEL: 8332 val->cdp_pdev_param_monitor_chan = 8333 dp_monitor_get_chan_num((struct dp_pdev *)pdev); 8334 break; 8335 case CDP_MONITOR_FREQUENCY: 8336 val->cdp_pdev_param_mon_freq = 8337 dp_monitor_get_chan_freq((struct dp_pdev *)pdev); 8338 break; 8339 case CDP_CONFIG_RXDMA_BUF_RING_SIZE: 8340 val->cdp_rxdma_buf_ring_size = 8341 wlan_cfg_get_rx_dma_buf_ring_size(((struct dp_pdev *)pdev)->wlan_cfg_ctx); 8342 break; 8343 case CDP_CONFIG_DELAY_STATS: 8344 val->cdp_pdev_param_cfg_delay_stats = 8345 ((struct dp_pdev *)pdev)->delay_stats_flag; 8346 break; 8347 default: 8348 return QDF_STATUS_E_FAILURE; 8349 } 8350 8351 return QDF_STATUS_SUCCESS; 8352 } 8353 8354 /** 8355 * dp_set_pdev_param() - function to set parameters in pdev 8356 * @cdp_soc: DP soc handle 8357 * @pdev_id: id of pdev handle 8358 * @param: parameter type to be set 8359 * @val: value of parameter to be set 8360 * 8361 * Return: 0 for success. nonzero for failure. 8362 */ 8363 static QDF_STATUS dp_set_pdev_param(struct cdp_soc_t *cdp_soc, uint8_t pdev_id, 8364 enum cdp_pdev_param_type param, 8365 cdp_config_param_type val) 8366 { 8367 int target_type; 8368 struct dp_soc *soc = (struct dp_soc *)cdp_soc; 8369 struct dp_pdev *pdev = 8370 dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)cdp_soc, 8371 pdev_id); 8372 enum reg_wifi_band chan_band; 8373 8374 if (!pdev) 8375 return QDF_STATUS_E_FAILURE; 8376 8377 target_type = hal_get_target_type(soc->hal_soc); 8378 switch (target_type) { 8379 case TARGET_TYPE_QCA6750: 8380 case TARGET_TYPE_WCN6450: 8381 pdev->ch_band_lmac_id_mapping[REG_BAND_2G] = DP_MAC0_LMAC_ID; 8382 pdev->ch_band_lmac_id_mapping[REG_BAND_5G] = DP_MAC0_LMAC_ID; 8383 pdev->ch_band_lmac_id_mapping[REG_BAND_6G] = DP_MAC0_LMAC_ID; 8384 break; 8385 case TARGET_TYPE_KIWI: 8386 case TARGET_TYPE_MANGO: 8387 case TARGET_TYPE_PEACH: 8388 pdev->ch_band_lmac_id_mapping[REG_BAND_2G] = DP_MAC0_LMAC_ID; 8389 pdev->ch_band_lmac_id_mapping[REG_BAND_5G] = DP_MAC0_LMAC_ID; 8390 pdev->ch_band_lmac_id_mapping[REG_BAND_6G] = DP_MAC0_LMAC_ID; 8391 break; 8392 default: 8393 pdev->ch_band_lmac_id_mapping[REG_BAND_2G] = DP_MAC1_LMAC_ID; 8394 pdev->ch_band_lmac_id_mapping[REG_BAND_5G] = DP_MAC0_LMAC_ID; 8395 pdev->ch_band_lmac_id_mapping[REG_BAND_6G] = DP_MAC0_LMAC_ID; 8396 break; 8397 } 8398 8399 switch (param) { 8400 case CDP_CONFIG_TX_CAPTURE: 8401 return dp_monitor_config_debug_sniffer(pdev, 8402 val.cdp_pdev_param_tx_capture); 8403 case CDP_CONFIG_DEBUG_SNIFFER: 8404 return dp_monitor_config_debug_sniffer(pdev, 8405 val.cdp_pdev_param_dbg_snf); 8406 case CDP_CONFIG_BPR_ENABLE: 8407 return dp_monitor_set_bpr_enable(pdev, 8408 val.cdp_pdev_param_bpr_enable); 8409 case CDP_CONFIG_PRIMARY_RADIO: 8410 pdev->is_primary = val.cdp_pdev_param_primary_radio; 8411 break; 8412 case CDP_CONFIG_CAPTURE_LATENCY: 8413 pdev->latency_capture_enable = val.cdp_pdev_param_cptr_latcy; 8414 break; 8415 case CDP_INGRESS_STATS: 8416 dp_pdev_tid_stats_ingress_inc(pdev, 8417 val.cdp_pdev_param_ingrs_stats); 8418 break; 8419 case CDP_OSIF_DROP: 8420 dp_pdev_tid_stats_osif_drop(pdev, 8421 val.cdp_pdev_param_osif_drop); 8422 break; 8423 case CDP_CONFIG_ENH_RX_CAPTURE: 8424 return dp_monitor_config_enh_rx_capture(pdev, 8425 val.cdp_pdev_param_en_rx_cap); 8426 case CDP_CONFIG_ENH_TX_CAPTURE: 8427 return dp_monitor_config_enh_tx_capture(pdev, 8428 val.cdp_pdev_param_en_tx_cap); 8429 case CDP_CONFIG_HMMC_TID_OVERRIDE: 8430 pdev->hmmc_tid_override_en = val.cdp_pdev_param_hmmc_tid_ovrd; 8431 break; 8432 case CDP_CONFIG_HMMC_TID_VALUE: 8433 pdev->hmmc_tid = val.cdp_pdev_param_hmmc_tid; 8434 break; 8435 case CDP_CHAN_NOISE_FLOOR: 8436 pdev->chan_noise_floor = val.cdp_pdev_param_chn_noise_flr; 8437 break; 8438 case CDP_TIDMAP_PRTY: 8439 dp_set_pdev_tidmap_prty_wifi3(pdev, 8440 val.cdp_pdev_param_tidmap_prty); 8441 break; 8442 case CDP_FILTER_NEIGH_PEERS: 8443 dp_monitor_set_filter_neigh_peers(pdev, 8444 val.cdp_pdev_param_fltr_neigh_peers); 8445 break; 8446 case CDP_MONITOR_CHANNEL: 8447 dp_monitor_set_chan_num(pdev, val.cdp_pdev_param_monitor_chan); 8448 break; 8449 case CDP_MONITOR_FREQUENCY: 8450 chan_band = wlan_reg_freq_to_band(val.cdp_pdev_param_mon_freq); 8451 dp_monitor_set_chan_freq(pdev, val.cdp_pdev_param_mon_freq); 8452 dp_monitor_set_chan_band(pdev, chan_band); 8453 break; 8454 case CDP_CONFIG_BSS_COLOR: 8455 dp_monitor_set_bsscolor(pdev, val.cdp_pdev_param_bss_color); 8456 break; 8457 case CDP_SET_ATF_STATS_ENABLE: 8458 dp_monitor_set_atf_stats_enable(pdev, 8459 val.cdp_pdev_param_atf_stats_enable); 8460 break; 8461 case CDP_CONFIG_SPECIAL_VAP: 8462 dp_monitor_pdev_config_scan_spcl_vap(pdev, 8463 val.cdp_pdev_param_config_special_vap); 8464 dp_monitor_vdev_set_monitor_mode_buf_rings(pdev); 8465 break; 8466 case CDP_RESET_SCAN_SPCL_VAP_STATS_ENABLE: 8467 dp_monitor_pdev_reset_scan_spcl_vap_stats_enable(pdev, 8468 val.cdp_pdev_param_reset_scan_spcl_vap_stats_enable); 8469 break; 8470 case CDP_CONFIG_ENHANCED_STATS_ENABLE: 8471 pdev->enhanced_stats_en = val.cdp_pdev_param_enhanced_stats_enable; 8472 break; 8473 case CDP_ISOLATION: 8474 pdev->isolation = val.cdp_pdev_param_isolation; 8475 break; 8476 case CDP_CONFIG_UNDECODED_METADATA_CAPTURE_ENABLE: 8477 return dp_monitor_config_undecoded_metadata_capture(pdev, 8478 val.cdp_pdev_param_undecoded_metadata_enable); 8479 break; 8480 case CDP_CONFIG_RXDMA_BUF_RING_SIZE: 8481 wlan_cfg_set_rx_dma_buf_ring_size(pdev->wlan_cfg_ctx, 8482 val.cdp_rxdma_buf_ring_size); 8483 break; 8484 case CDP_CONFIG_VOW: 8485 pdev->vow_stats = val.cdp_pdev_param_cfg_vow; 8486 break; 8487 default: 8488 return QDF_STATUS_E_INVAL; 8489 } 8490 return QDF_STATUS_SUCCESS; 8491 } 8492 8493 #ifdef QCA_UNDECODED_METADATA_SUPPORT 8494 static 8495 QDF_STATUS dp_set_pdev_phyrx_error_mask(struct cdp_soc_t *cdp_soc, 8496 uint8_t pdev_id, uint32_t mask, 8497 uint32_t mask_cont) 8498 { 8499 struct dp_pdev *pdev = 8500 dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)cdp_soc, 8501 pdev_id); 8502 8503 if (!pdev) 8504 return QDF_STATUS_E_FAILURE; 8505 8506 return dp_monitor_config_undecoded_metadata_phyrx_error_mask(pdev, 8507 mask, mask_cont); 8508 } 8509 8510 static 8511 QDF_STATUS dp_get_pdev_phyrx_error_mask(struct cdp_soc_t *cdp_soc, 8512 uint8_t pdev_id, uint32_t *mask, 8513 uint32_t *mask_cont) 8514 { 8515 struct dp_pdev *pdev = 8516 dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)cdp_soc, 8517 pdev_id); 8518 8519 if (!pdev) 8520 return QDF_STATUS_E_FAILURE; 8521 8522 return dp_monitor_get_undecoded_metadata_phyrx_error_mask(pdev, 8523 mask, mask_cont); 8524 } 8525 #endif 8526 8527 #ifdef QCA_PEER_EXT_STATS 8528 static void dp_rx_update_peer_delay_stats(struct dp_soc *soc, 8529 qdf_nbuf_t nbuf) 8530 { 8531 struct dp_peer *peer = NULL; 8532 uint16_t peer_id, ring_id; 8533 uint8_t tid = qdf_nbuf_get_tid_val(nbuf); 8534 struct dp_peer_delay_stats *delay_stats = NULL; 8535 8536 peer_id = QDF_NBUF_CB_RX_PEER_ID(nbuf); 8537 if (peer_id > soc->max_peer_id) 8538 return; 8539 8540 peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_CDP); 8541 if (qdf_unlikely(!peer)) 8542 return; 8543 8544 if (qdf_unlikely(!peer->txrx_peer)) { 8545 dp_peer_unref_delete(peer, DP_MOD_ID_CDP); 8546 return; 8547 } 8548 8549 if (qdf_likely(peer->txrx_peer->delay_stats)) { 8550 delay_stats = peer->txrx_peer->delay_stats; 8551 ring_id = QDF_NBUF_CB_RX_CTX_ID(nbuf); 8552 dp_rx_compute_tid_delay(&delay_stats->delay_tid_stats[tid][ring_id], 8553 nbuf); 8554 } 8555 dp_peer_unref_delete(peer, DP_MOD_ID_CDP); 8556 } 8557 #else 8558 static inline void dp_rx_update_peer_delay_stats(struct dp_soc *soc, 8559 qdf_nbuf_t nbuf) 8560 { 8561 } 8562 #endif 8563 8564 /** 8565 * dp_calculate_delay_stats() - function to get rx delay stats 8566 * @cdp_soc: DP soc handle 8567 * @vdev_id: id of DP vdev handle 8568 * @nbuf: skb 8569 * 8570 * Return: QDF_STATUS 8571 */ 8572 static QDF_STATUS 8573 dp_calculate_delay_stats(struct cdp_soc_t *cdp_soc, uint8_t vdev_id, 8574 qdf_nbuf_t nbuf) 8575 { 8576 struct dp_soc *soc = cdp_soc_t_to_dp_soc(cdp_soc); 8577 struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id, 8578 DP_MOD_ID_CDP); 8579 8580 if (!vdev) 8581 return QDF_STATUS_SUCCESS; 8582 8583 if (vdev->pdev->delay_stats_flag) 8584 dp_rx_compute_delay(vdev, nbuf); 8585 else 8586 dp_rx_update_peer_delay_stats(soc, nbuf); 8587 8588 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP); 8589 return QDF_STATUS_SUCCESS; 8590 } 8591 8592 /** 8593 * dp_get_vdev_param() - function to get parameters from vdev 8594 * @cdp_soc: DP soc handle 8595 * @vdev_id: id of DP vdev handle 8596 * @param: parameter type to get value 8597 * @val: buffer address 8598 * 8599 * Return: status 8600 */ 8601 static QDF_STATUS dp_get_vdev_param(struct cdp_soc_t *cdp_soc, uint8_t vdev_id, 8602 enum cdp_vdev_param_type param, 8603 cdp_config_param_type *val) 8604 { 8605 struct dp_soc *soc = cdp_soc_t_to_dp_soc(cdp_soc); 8606 struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id, 8607 DP_MOD_ID_CDP); 8608 8609 if (!vdev) 8610 return QDF_STATUS_E_FAILURE; 8611 8612 switch (param) { 8613 case CDP_ENABLE_WDS: 8614 val->cdp_vdev_param_wds = vdev->wds_enabled; 8615 break; 8616 case CDP_ENABLE_MEC: 8617 val->cdp_vdev_param_mec = vdev->mec_enabled; 8618 break; 8619 case CDP_ENABLE_DA_WAR: 8620 val->cdp_vdev_param_da_war = vdev->pdev->soc->da_war_enabled; 8621 break; 8622 case CDP_ENABLE_IGMP_MCAST_EN: 8623 val->cdp_vdev_param_igmp_mcast_en = vdev->igmp_mcast_enhanc_en; 8624 break; 8625 case CDP_ENABLE_MCAST_EN: 8626 val->cdp_vdev_param_mcast_en = vdev->mcast_enhancement_en; 8627 break; 8628 case CDP_ENABLE_HLOS_TID_OVERRIDE: 8629 val->cdp_vdev_param_hlos_tid_override = 8630 dp_vdev_get_hlos_tid_override((struct cdp_vdev *)vdev); 8631 break; 8632 case CDP_ENABLE_PEER_AUTHORIZE: 8633 val->cdp_vdev_param_peer_authorize = 8634 vdev->peer_authorize; 8635 break; 8636 case CDP_TX_ENCAP_TYPE: 8637 val->cdp_vdev_param_tx_encap = vdev->tx_encap_type; 8638 break; 8639 case CDP_ENABLE_CIPHER: 8640 val->cdp_vdev_param_cipher_en = vdev->sec_type; 8641 break; 8642 #ifdef WLAN_SUPPORT_MESH_LATENCY 8643 case CDP_ENABLE_PEER_TID_LATENCY: 8644 val->cdp_vdev_param_peer_tid_latency_enable = 8645 vdev->peer_tid_latency_enabled; 8646 break; 8647 case CDP_SET_VAP_MESH_TID: 8648 val->cdp_vdev_param_mesh_tid = 8649 vdev->mesh_tid_latency_config.latency_tid; 8650 break; 8651 #endif 8652 case CDP_DROP_3ADDR_MCAST: 8653 val->cdp_drop_3addr_mcast = vdev->drop_3addr_mcast; 8654 break; 8655 case CDP_SET_MCAST_VDEV: 8656 soc->arch_ops.txrx_get_vdev_mcast_param(soc, vdev, val); 8657 break; 8658 #ifdef QCA_SUPPORT_WDS_EXTENDED 8659 case CDP_DROP_TX_MCAST: 8660 val->cdp_drop_tx_mcast = vdev->drop_tx_mcast; 8661 break; 8662 #endif 8663 8664 #ifdef MESH_MODE_SUPPORT 8665 case CDP_MESH_RX_FILTER: 8666 val->cdp_vdev_param_mesh_rx_filter = vdev->mesh_rx_filter; 8667 break; 8668 case CDP_MESH_MODE: 8669 val->cdp_vdev_param_mesh_mode = vdev->mesh_vdev; 8670 break; 8671 #endif 8672 case CDP_ENABLE_NAWDS: 8673 val->cdp_vdev_param_nawds = vdev->nawds_enabled; 8674 break; 8675 8676 case CDP_ENABLE_WRAP: 8677 val->cdp_vdev_param_wrap = vdev->wrap_vdev; 8678 break; 8679 8680 #ifdef DP_TRAFFIC_END_INDICATION 8681 case CDP_ENABLE_TRAFFIC_END_INDICATION: 8682 val->cdp_vdev_param_traffic_end_ind = vdev->traffic_end_ind_en; 8683 break; 8684 #endif 8685 8686 default: 8687 dp_cdp_err("%pK: param value %d is wrong", 8688 soc, param); 8689 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP); 8690 return QDF_STATUS_E_FAILURE; 8691 } 8692 8693 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP); 8694 return QDF_STATUS_SUCCESS; 8695 } 8696 8697 /** 8698 * dp_set_vdev_param() - function to set parameters in vdev 8699 * @cdp_soc: DP soc handle 8700 * @vdev_id: id of DP vdev handle 8701 * @param: parameter type to get value 8702 * @val: value 8703 * 8704 * Return: QDF_STATUS 8705 */ 8706 static QDF_STATUS 8707 dp_set_vdev_param(struct cdp_soc_t *cdp_soc, uint8_t vdev_id, 8708 enum cdp_vdev_param_type param, cdp_config_param_type val) 8709 { 8710 struct dp_soc *dsoc = (struct dp_soc *)cdp_soc; 8711 struct dp_vdev *vdev = 8712 dp_vdev_get_ref_by_id(dsoc, vdev_id, DP_MOD_ID_CDP); 8713 uint32_t var = 0; 8714 8715 if (!vdev) 8716 return QDF_STATUS_E_FAILURE; 8717 8718 switch (param) { 8719 case CDP_ENABLE_WDS: 8720 dp_cdp_err("%pK: wds_enable %d for vdev(%pK) id(%d)", 8721 dsoc, val.cdp_vdev_param_wds, vdev, vdev->vdev_id); 8722 vdev->wds_enabled = val.cdp_vdev_param_wds; 8723 break; 8724 case CDP_ENABLE_MEC: 8725 dp_cdp_err("%pK: mec_enable %d for vdev(%pK) id(%d)", 8726 dsoc, val.cdp_vdev_param_mec, vdev, vdev->vdev_id); 8727 vdev->mec_enabled = val.cdp_vdev_param_mec; 8728 break; 8729 case CDP_ENABLE_DA_WAR: 8730 dp_cdp_err("%pK: da_war_enable %d for vdev(%pK) id(%d)", 8731 dsoc, val.cdp_vdev_param_da_war, vdev, vdev->vdev_id); 8732 vdev->pdev->soc->da_war_enabled = val.cdp_vdev_param_da_war; 8733 dp_wds_flush_ast_table_wifi3(((struct cdp_soc_t *) 8734 vdev->pdev->soc)); 8735 break; 8736 case CDP_ENABLE_NAWDS: 8737 vdev->nawds_enabled = val.cdp_vdev_param_nawds; 8738 break; 8739 case CDP_ENABLE_MCAST_EN: 8740 vdev->mcast_enhancement_en = val.cdp_vdev_param_mcast_en; 8741 break; 8742 case CDP_ENABLE_IGMP_MCAST_EN: 8743 vdev->igmp_mcast_enhanc_en = val.cdp_vdev_param_igmp_mcast_en; 8744 break; 8745 case CDP_ENABLE_PROXYSTA: 8746 vdev->proxysta_vdev = val.cdp_vdev_param_proxysta; 8747 break; 8748 case CDP_UPDATE_TDLS_FLAGS: 8749 vdev->tdls_link_connected = val.cdp_vdev_param_tdls_flags; 8750 break; 8751 case CDP_CFG_WDS_AGING_TIMER: 8752 var = val.cdp_vdev_param_aging_tmr; 8753 if (!var) 8754 qdf_timer_stop(&vdev->pdev->soc->ast_aging_timer); 8755 else if (var != vdev->wds_aging_timer_val) 8756 qdf_timer_mod(&vdev->pdev->soc->ast_aging_timer, var); 8757 8758 vdev->wds_aging_timer_val = var; 8759 break; 8760 case CDP_ENABLE_AP_BRIDGE: 8761 if (wlan_op_mode_sta != vdev->opmode) 8762 vdev->ap_bridge_enabled = val.cdp_vdev_param_ap_brdg_en; 8763 else 8764 vdev->ap_bridge_enabled = false; 8765 break; 8766 case CDP_ENABLE_CIPHER: 8767 vdev->sec_type = val.cdp_vdev_param_cipher_en; 8768 break; 8769 case CDP_ENABLE_QWRAP_ISOLATION: 8770 vdev->isolation_vdev = val.cdp_vdev_param_qwrap_isolation; 8771 break; 8772 case CDP_UPDATE_MULTIPASS: 8773 vdev->multipass_en = val.cdp_vdev_param_update_multipass; 8774 dp_info("vdev %d Multipass enable %d", vdev_id, 8775 vdev->multipass_en); 8776 break; 8777 case CDP_TX_ENCAP_TYPE: 8778 vdev->tx_encap_type = val.cdp_vdev_param_tx_encap; 8779 break; 8780 case CDP_RX_DECAP_TYPE: 8781 vdev->rx_decap_type = val.cdp_vdev_param_rx_decap; 8782 break; 8783 case CDP_TID_VDEV_PRTY: 8784 vdev->tidmap_prty = val.cdp_vdev_param_tidmap_prty; 8785 break; 8786 case CDP_TIDMAP_TBL_ID: 8787 vdev->tidmap_tbl_id = val.cdp_vdev_param_tidmap_tbl_id; 8788 break; 8789 #ifdef MESH_MODE_SUPPORT 8790 case CDP_MESH_RX_FILTER: 8791 dp_vdev_set_mesh_rx_filter((struct cdp_vdev *)vdev, 8792 val.cdp_vdev_param_mesh_rx_filter); 8793 break; 8794 case CDP_MESH_MODE: 8795 dp_vdev_set_mesh_mode((struct cdp_vdev *)vdev, 8796 val.cdp_vdev_param_mesh_mode); 8797 break; 8798 #endif 8799 case CDP_ENABLE_HLOS_TID_OVERRIDE: 8800 dp_info("vdev_id %d enable hlod tid override %d", vdev_id, 8801 val.cdp_vdev_param_hlos_tid_override); 8802 dp_vdev_set_hlos_tid_override(vdev, 8803 val.cdp_vdev_param_hlos_tid_override); 8804 break; 8805 #ifdef QCA_SUPPORT_WDS_EXTENDED 8806 case CDP_CFG_WDS_EXT: 8807 if (vdev->opmode == wlan_op_mode_ap) 8808 vdev->wds_ext_enabled = val.cdp_vdev_param_wds_ext; 8809 break; 8810 case CDP_DROP_TX_MCAST: 8811 dp_info("vdev_id %d drop tx mcast :%d", vdev_id, 8812 val.cdp_drop_tx_mcast); 8813 vdev->drop_tx_mcast = val.cdp_drop_tx_mcast; 8814 break; 8815 #endif 8816 case CDP_ENABLE_PEER_AUTHORIZE: 8817 vdev->peer_authorize = val.cdp_vdev_param_peer_authorize; 8818 break; 8819 #ifdef WLAN_SUPPORT_MESH_LATENCY 8820 case CDP_ENABLE_PEER_TID_LATENCY: 8821 dp_info("vdev_id %d enable peer tid latency %d", vdev_id, 8822 val.cdp_vdev_param_peer_tid_latency_enable); 8823 vdev->peer_tid_latency_enabled = 8824 val.cdp_vdev_param_peer_tid_latency_enable; 8825 break; 8826 case CDP_SET_VAP_MESH_TID: 8827 dp_info("vdev_id %d enable peer tid latency %d", vdev_id, 8828 val.cdp_vdev_param_mesh_tid); 8829 vdev->mesh_tid_latency_config.latency_tid 8830 = val.cdp_vdev_param_mesh_tid; 8831 break; 8832 #endif 8833 #ifdef WLAN_VENDOR_SPECIFIC_BAR_UPDATE 8834 case CDP_SKIP_BAR_UPDATE_AP: 8835 dp_info("vdev_id %d skip BAR update: %u", vdev_id, 8836 val.cdp_skip_bar_update); 8837 vdev->skip_bar_update = val.cdp_skip_bar_update; 8838 vdev->skip_bar_update_last_ts = 0; 8839 break; 8840 #endif 8841 case CDP_DROP_3ADDR_MCAST: 8842 dp_info("vdev_id %d drop 3 addr mcast :%d", vdev_id, 8843 val.cdp_drop_3addr_mcast); 8844 vdev->drop_3addr_mcast = val.cdp_drop_3addr_mcast; 8845 break; 8846 case CDP_ENABLE_WRAP: 8847 vdev->wrap_vdev = val.cdp_vdev_param_wrap; 8848 break; 8849 #ifdef DP_TRAFFIC_END_INDICATION 8850 case CDP_ENABLE_TRAFFIC_END_INDICATION: 8851 vdev->traffic_end_ind_en = val.cdp_vdev_param_traffic_end_ind; 8852 break; 8853 #endif 8854 #ifdef FEATURE_DIRECT_LINK 8855 case CDP_VDEV_TX_TO_FW: 8856 dp_info("vdev_id %d to_fw :%d", vdev_id, val.cdp_vdev_tx_to_fw); 8857 vdev->to_fw = val.cdp_vdev_tx_to_fw; 8858 break; 8859 #endif 8860 case CDP_VDEV_SET_MAC_ADDR: 8861 dp_info("set mac addr, old mac addr" QDF_MAC_ADDR_FMT 8862 " new mac addr: " QDF_MAC_ADDR_FMT " for vdev %d", 8863 QDF_MAC_ADDR_REF(vdev->mac_addr.raw), 8864 QDF_MAC_ADDR_REF(val.mac_addr), vdev->vdev_id); 8865 qdf_mem_copy(&vdev->mac_addr.raw[0], val.mac_addr, 8866 QDF_MAC_ADDR_SIZE); 8867 break; 8868 default: 8869 break; 8870 } 8871 8872 dp_tx_vdev_update_search_flags((struct dp_vdev *)vdev); 8873 dsoc->arch_ops.txrx_set_vdev_param(dsoc, vdev, param, val); 8874 8875 /* Update PDEV flags as VDEV flags are updated */ 8876 dp_pdev_update_fast_rx_flag(dsoc, vdev->pdev); 8877 dp_vdev_unref_delete(dsoc, vdev, DP_MOD_ID_CDP); 8878 8879 return QDF_STATUS_SUCCESS; 8880 } 8881 8882 #if defined(FEATURE_WLAN_TDLS) && defined(WLAN_FEATURE_11BE_MLO) 8883 /** 8884 * dp_update_mlo_vdev_for_tdls() - update mlo vdev configuration 8885 * for TDLS 8886 * @cdp_soc: DP soc handle 8887 * @vdev_id: id of DP vdev handle 8888 * @param: parameter type for vdev 8889 * @val: value 8890 * 8891 * If TDLS connection is from secondary vdev, then copy osif_vdev from 8892 * primary vdev to support RX, update TX bank register info for primary 8893 * vdev as well. 8894 * If TDLS connection is from primary vdev, same as before. 8895 * 8896 * Return: None 8897 */ 8898 static void 8899 dp_update_mlo_vdev_for_tdls(struct cdp_soc_t *cdp_soc, uint8_t vdev_id, 8900 enum cdp_vdev_param_type param, 8901 cdp_config_param_type val) 8902 { 8903 struct dp_soc *soc = (struct dp_soc *)cdp_soc; 8904 struct dp_peer *peer; 8905 struct dp_peer *tmp_peer; 8906 struct dp_peer *mld_peer; 8907 struct dp_vdev *vdev = NULL; 8908 struct dp_vdev *pri_vdev = NULL; 8909 uint8_t pri_vdev_id = CDP_INVALID_VDEV_ID; 8910 8911 if (param != CDP_UPDATE_TDLS_FLAGS) 8912 return; 8913 8914 dp_info("update TDLS flag for vdev_id %d, val %d", 8915 vdev_id, val.cdp_vdev_param_tdls_flags); 8916 vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_MISC); 8917 /* only check for STA mode vdev */ 8918 if (!vdev || vdev->opmode != wlan_op_mode_sta) { 8919 dp_info("vdev is not as expected for TDLS"); 8920 goto comp_ret; 8921 } 8922 8923 /* Find primary vdev_id */ 8924 qdf_spin_lock_bh(&vdev->peer_list_lock); 8925 TAILQ_FOREACH_SAFE(peer, &vdev->peer_list, 8926 peer_list_elem, 8927 tmp_peer) { 8928 if (dp_peer_get_ref(soc, peer, DP_MOD_ID_CONFIG) == 8929 QDF_STATUS_SUCCESS) { 8930 /* do check only if MLO link peer exist */ 8931 if (IS_MLO_DP_LINK_PEER(peer)) { 8932 mld_peer = DP_GET_MLD_PEER_FROM_PEER(peer); 8933 pri_vdev_id = mld_peer->vdev->vdev_id; 8934 dp_peer_unref_delete(peer, DP_MOD_ID_CONFIG); 8935 break; 8936 } 8937 dp_peer_unref_delete(peer, DP_MOD_ID_CONFIG); 8938 } 8939 } 8940 qdf_spin_unlock_bh(&vdev->peer_list_lock); 8941 8942 if (pri_vdev_id != CDP_INVALID_VDEV_ID) 8943 pri_vdev = dp_vdev_get_ref_by_id(soc, pri_vdev_id, 8944 DP_MOD_ID_MISC); 8945 8946 /* If current vdev is not same as primary vdev */ 8947 if (pri_vdev && pri_vdev != vdev) { 8948 dp_info("primary vdev [%d] %pK different with vdev [%d] %pK", 8949 pri_vdev->vdev_id, pri_vdev, 8950 vdev->vdev_id, vdev); 8951 /* update osif_vdev to support RX for vdev */ 8952 vdev->osif_vdev = pri_vdev->osif_vdev; 8953 dp_set_vdev_param(cdp_soc, pri_vdev->vdev_id, 8954 CDP_UPDATE_TDLS_FLAGS, val); 8955 } 8956 8957 comp_ret: 8958 if (pri_vdev) 8959 dp_vdev_unref_delete(soc, pri_vdev, DP_MOD_ID_MISC); 8960 if (vdev) 8961 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_MISC); 8962 } 8963 8964 static QDF_STATUS 8965 dp_set_vdev_param_wrapper(struct cdp_soc_t *cdp_soc, uint8_t vdev_id, 8966 enum cdp_vdev_param_type param, 8967 cdp_config_param_type val) 8968 { 8969 dp_update_mlo_vdev_for_tdls(cdp_soc, vdev_id, param, val); 8970 8971 return dp_set_vdev_param(cdp_soc, vdev_id, param, val); 8972 } 8973 #else 8974 static QDF_STATUS 8975 dp_set_vdev_param_wrapper(struct cdp_soc_t *cdp_soc, uint8_t vdev_id, 8976 enum cdp_vdev_param_type param, 8977 cdp_config_param_type val) 8978 { 8979 return dp_set_vdev_param(cdp_soc, vdev_id, param, val); 8980 } 8981 #endif 8982 8983 /** 8984 * dp_rx_peer_metadata_ver_update() - update rx peer metadata version and 8985 * corresponding filed shift and mask 8986 * @soc: Handle to DP Soc structure 8987 * @peer_md_ver: RX peer metadata version value 8988 * 8989 * Return: None 8990 */ 8991 static void 8992 dp_rx_peer_metadata_ver_update(struct dp_soc *soc, uint8_t peer_md_ver) 8993 { 8994 dp_info("rx_peer_metadata version %d", peer_md_ver); 8995 8996 switch (peer_md_ver) { 8997 case 0: /* htt_rx_peer_metadata_v0 */ 8998 soc->htt_peer_id_s = HTT_RX_PEER_META_DATA_V0_PEER_ID_S; 8999 soc->htt_peer_id_m = HTT_RX_PEER_META_DATA_V0_PEER_ID_M; 9000 soc->htt_vdev_id_s = HTT_RX_PEER_META_DATA_V0_VDEV_ID_S; 9001 soc->htt_vdev_id_m = HTT_RX_PEER_META_DATA_V0_VDEV_ID_M; 9002 break; 9003 case 1: /* htt_rx_peer_metadata_v1 */ 9004 soc->htt_peer_id_s = HTT_RX_PEER_META_DATA_V1_PEER_ID_S; 9005 soc->htt_peer_id_m = HTT_RX_PEER_META_DATA_V1_PEER_ID_M; 9006 soc->htt_vdev_id_s = HTT_RX_PEER_META_DATA_V1_VDEV_ID_S; 9007 soc->htt_vdev_id_m = HTT_RX_PEER_META_DATA_V1_VDEV_ID_M; 9008 soc->htt_mld_peer_valid_s = 9009 HTT_RX_PEER_META_DATA_V1_ML_PEER_VALID_S; 9010 soc->htt_mld_peer_valid_m = 9011 HTT_RX_PEER_META_DATA_V1_ML_PEER_VALID_M; 9012 break; 9013 case 2: /* htt_rx_peer_metadata_v1a */ 9014 soc->htt_peer_id_s = HTT_RX_PEER_META_DATA_V1A_PEER_ID_S; 9015 soc->htt_peer_id_m = HTT_RX_PEER_META_DATA_V1A_PEER_ID_M; 9016 soc->htt_vdev_id_s = HTT_RX_PEER_META_DATA_V1A_VDEV_ID_S; 9017 soc->htt_vdev_id_m = HTT_RX_PEER_META_DATA_V1A_VDEV_ID_M; 9018 soc->htt_mld_peer_valid_s = 9019 HTT_RX_PEER_META_DATA_V1A_ML_PEER_VALID_S; 9020 soc->htt_mld_peer_valid_m = 9021 HTT_RX_PEER_META_DATA_V1A_ML_PEER_VALID_M; 9022 break; 9023 case 3: /* htt_rx_peer_metadata_v1b */ 9024 soc->htt_peer_id_s = HTT_RX_PEER_META_DATA_V1B_PEER_ID_S; 9025 soc->htt_peer_id_m = HTT_RX_PEER_META_DATA_V1B_PEER_ID_M; 9026 soc->htt_vdev_id_s = HTT_RX_PEER_META_DATA_V1B_VDEV_ID_S; 9027 soc->htt_vdev_id_m = HTT_RX_PEER_META_DATA_V1B_VDEV_ID_M; 9028 soc->htt_mld_peer_valid_s = 9029 HTT_RX_PEER_META_DATA_V1B_ML_PEER_VALID_S; 9030 soc->htt_mld_peer_valid_m = 9031 HTT_RX_PEER_META_DATA_V1B_ML_PEER_VALID_M; 9032 break; 9033 default: 9034 dp_err("invliad rx_peer_metadata version %d", peer_md_ver); 9035 break; 9036 } 9037 9038 soc->rx_peer_metadata_ver = peer_md_ver; 9039 } 9040 9041 /** 9042 * dp_set_psoc_param: function to set parameters in psoc 9043 * @cdp_soc: DP soc handle 9044 * @param: parameter type to be set 9045 * @val: value of parameter to be set 9046 * 9047 * Return: QDF_STATUS 9048 */ 9049 static QDF_STATUS 9050 dp_set_psoc_param(struct cdp_soc_t *cdp_soc, 9051 enum cdp_psoc_param_type param, cdp_config_param_type val) 9052 { 9053 struct dp_soc *soc = (struct dp_soc *)cdp_soc; 9054 struct wlan_cfg_dp_soc_ctxt *wlan_cfg_ctx = soc->wlan_cfg_ctx; 9055 9056 switch (param) { 9057 case CDP_ENABLE_RATE_STATS: 9058 soc->peerstats_enabled = val.cdp_psoc_param_en_rate_stats; 9059 break; 9060 case CDP_SET_NSS_CFG: 9061 wlan_cfg_set_dp_soc_nss_cfg(wlan_cfg_ctx, 9062 val.cdp_psoc_param_en_nss_cfg); 9063 /* 9064 * TODO: masked out based on the per offloaded radio 9065 */ 9066 switch (val.cdp_psoc_param_en_nss_cfg) { 9067 case dp_nss_cfg_default: 9068 break; 9069 case dp_nss_cfg_first_radio: 9070 /* 9071 * This configuration is valid for single band radio which 9072 * is also NSS offload. 9073 */ 9074 case dp_nss_cfg_dbdc: 9075 case dp_nss_cfg_dbtc: 9076 wlan_cfg_set_num_tx_desc_pool(wlan_cfg_ctx, 0); 9077 wlan_cfg_set_num_tx_ext_desc_pool(wlan_cfg_ctx, 0); 9078 wlan_cfg_set_num_tx_desc(wlan_cfg_ctx, 0); 9079 wlan_cfg_set_num_tx_spl_desc(soc->wlan_cfg_ctx, 0); 9080 wlan_cfg_set_num_tx_ext_desc(wlan_cfg_ctx, 0); 9081 break; 9082 default: 9083 dp_cdp_err("%pK: Invalid offload config %d", 9084 soc, val.cdp_psoc_param_en_nss_cfg); 9085 } 9086 9087 dp_cdp_err("%pK: nss-wifi<0> nss config is enabled" 9088 , soc); 9089 break; 9090 case CDP_SET_PREFERRED_HW_MODE: 9091 soc->preferred_hw_mode = val.cdp_psoc_param_preferred_hw_mode; 9092 break; 9093 case CDP_IPA_ENABLE: 9094 soc->wlan_cfg_ctx->ipa_enabled = val.cdp_ipa_enabled; 9095 break; 9096 case CDP_CFG_VDEV_STATS_HW_OFFLOAD: 9097 wlan_cfg_set_vdev_stats_hw_offload_config(wlan_cfg_ctx, 9098 val.cdp_psoc_param_vdev_stats_hw_offload); 9099 break; 9100 case CDP_SAWF_ENABLE: 9101 wlan_cfg_set_sawf_config(wlan_cfg_ctx, val.cdp_sawf_enabled); 9102 break; 9103 case CDP_UMAC_RST_SKEL_ENABLE: 9104 dp_umac_rst_skel_enable_update(soc, val.cdp_umac_rst_skel); 9105 break; 9106 case CDP_UMAC_RESET_STATS: 9107 dp_umac_reset_stats_print(soc); 9108 break; 9109 case CDP_SAWF_STATS: 9110 wlan_cfg_set_sawf_stats_config(wlan_cfg_ctx, 9111 val.cdp_sawf_stats); 9112 break; 9113 case CDP_CFG_RX_PEER_METADATA_VER: 9114 dp_rx_peer_metadata_ver_update( 9115 soc, val.cdp_peer_metadata_ver); 9116 break; 9117 case CDP_CFG_TX_DESC_NUM: 9118 wlan_cfg_set_num_tx_desc(wlan_cfg_ctx, 9119 val.cdp_tx_desc_num); 9120 break; 9121 case CDP_CFG_TX_EXT_DESC_NUM: 9122 wlan_cfg_set_num_tx_ext_desc(wlan_cfg_ctx, 9123 val.cdp_tx_ext_desc_num); 9124 break; 9125 case CDP_CFG_TX_RING_SIZE: 9126 wlan_cfg_set_tx_ring_size(wlan_cfg_ctx, 9127 val.cdp_tx_ring_size); 9128 break; 9129 case CDP_CFG_TX_COMPL_RING_SIZE: 9130 wlan_cfg_set_tx_comp_ring_size(wlan_cfg_ctx, 9131 val.cdp_tx_comp_ring_size); 9132 break; 9133 case CDP_CFG_RX_SW_DESC_NUM: 9134 wlan_cfg_set_dp_soc_rx_sw_desc_num(wlan_cfg_ctx, 9135 val.cdp_rx_sw_desc_num); 9136 break; 9137 case CDP_CFG_REO_DST_RING_SIZE: 9138 wlan_cfg_set_reo_dst_ring_size(wlan_cfg_ctx, 9139 val.cdp_reo_dst_ring_size); 9140 break; 9141 case CDP_CFG_RXDMA_REFILL_RING_SIZE: 9142 wlan_cfg_set_dp_soc_rxdma_refill_ring_size(wlan_cfg_ctx, 9143 val.cdp_rxdma_refill_ring_size); 9144 break; 9145 #ifdef WLAN_FEATURE_RX_PREALLOC_BUFFER_POOL 9146 case CDP_CFG_RX_REFILL_POOL_NUM: 9147 wlan_cfg_set_rx_refill_buf_pool_size(wlan_cfg_ctx, 9148 val.cdp_rx_refill_buf_pool_size); 9149 break; 9150 #endif 9151 case CDP_CFG_AST_INDICATION_DISABLE: 9152 wlan_cfg_set_ast_indication_disable 9153 (wlan_cfg_ctx, val.cdp_ast_indication_disable); 9154 break; 9155 case CDP_CONFIG_DP_DEBUG_LOG: 9156 soc->dp_debug_log_en = val.cdp_psoc_param_dp_debug_log; 9157 break; 9158 default: 9159 break; 9160 } 9161 9162 return QDF_STATUS_SUCCESS; 9163 } 9164 9165 #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP) 9166 /** 9167 * dp_get_mldev_mode: function to get mlo operation mode 9168 * @soc: soc structure for data path 9169 * 9170 * Return: uint8_t 9171 */ 9172 static uint8_t dp_get_mldev_mode(struct dp_soc *soc) 9173 { 9174 return soc->mld_mode_ap; 9175 } 9176 #else 9177 static uint8_t dp_get_mldev_mode(struct dp_soc *cdp_soc) 9178 { 9179 return MLD_MODE_INVALID; 9180 } 9181 #endif 9182 9183 /** 9184 * dp_get_psoc_param: function to get parameters in soc 9185 * @cdp_soc: DP soc handle 9186 * @param: parameter type to be get 9187 * @val: address of buffer 9188 * 9189 * Return: status 9190 */ 9191 static QDF_STATUS dp_get_psoc_param(struct cdp_soc_t *cdp_soc, 9192 enum cdp_psoc_param_type param, 9193 cdp_config_param_type *val) 9194 { 9195 struct dp_soc *soc = (struct dp_soc *)cdp_soc; 9196 struct wlan_cfg_dp_soc_ctxt *wlan_cfg_ctx; 9197 9198 if (!soc) 9199 return QDF_STATUS_E_FAILURE; 9200 9201 wlan_cfg_ctx = soc->wlan_cfg_ctx; 9202 9203 switch (param) { 9204 case CDP_ENABLE_RATE_STATS: 9205 val->cdp_psoc_param_en_rate_stats = soc->peerstats_enabled; 9206 break; 9207 case CDP_CFG_PEER_EXT_STATS: 9208 val->cdp_psoc_param_pext_stats = 9209 wlan_cfg_is_peer_ext_stats_enabled(wlan_cfg_ctx); 9210 break; 9211 case CDP_CFG_VDEV_STATS_HW_OFFLOAD: 9212 val->cdp_psoc_param_vdev_stats_hw_offload = 9213 wlan_cfg_get_vdev_stats_hw_offload_config(wlan_cfg_ctx); 9214 break; 9215 case CDP_UMAC_RST_SKEL_ENABLE: 9216 val->cdp_umac_rst_skel = dp_umac_rst_skel_enable_get(soc); 9217 break; 9218 case CDP_TXRX_HAL_SOC_HDL: 9219 val->hal_soc_hdl = soc->hal_soc; 9220 break; 9221 case CDP_CFG_TX_DESC_NUM: 9222 val->cdp_tx_desc_num = wlan_cfg_get_num_tx_desc(wlan_cfg_ctx); 9223 break; 9224 case CDP_CFG_TX_EXT_DESC_NUM: 9225 val->cdp_tx_ext_desc_num = 9226 wlan_cfg_get_num_tx_ext_desc(wlan_cfg_ctx); 9227 break; 9228 case CDP_CFG_TX_RING_SIZE: 9229 val->cdp_tx_ring_size = wlan_cfg_tx_ring_size(wlan_cfg_ctx); 9230 break; 9231 case CDP_CFG_TX_COMPL_RING_SIZE: 9232 val->cdp_tx_comp_ring_size = 9233 wlan_cfg_tx_comp_ring_size(wlan_cfg_ctx); 9234 break; 9235 case CDP_CFG_RX_SW_DESC_NUM: 9236 val->cdp_rx_sw_desc_num = 9237 wlan_cfg_get_dp_soc_rx_sw_desc_num(wlan_cfg_ctx); 9238 break; 9239 case CDP_CFG_REO_DST_RING_SIZE: 9240 val->cdp_reo_dst_ring_size = 9241 wlan_cfg_get_reo_dst_ring_size(wlan_cfg_ctx); 9242 break; 9243 case CDP_CFG_RXDMA_REFILL_RING_SIZE: 9244 val->cdp_rxdma_refill_ring_size = 9245 wlan_cfg_get_dp_soc_rxdma_refill_ring_size(wlan_cfg_ctx); 9246 break; 9247 #ifdef WLAN_FEATURE_RX_PREALLOC_BUFFER_POOL 9248 case CDP_CFG_RX_REFILL_POOL_NUM: 9249 val->cdp_rx_refill_buf_pool_size = 9250 wlan_cfg_get_rx_refill_buf_pool_size(wlan_cfg_ctx); 9251 break; 9252 #endif 9253 case CDP_CFG_FISA_PARAMS: 9254 val->fisa_params.fisa_fst_size = wlan_cfg_get_rx_flow_search_table_size(soc->wlan_cfg_ctx); 9255 val->fisa_params.rx_flow_max_search = 9256 wlan_cfg_rx_fst_get_max_search(soc->wlan_cfg_ctx); 9257 val->fisa_params.rx_toeplitz_hash_key = 9258 wlan_cfg_rx_fst_get_hash_key(soc->wlan_cfg_ctx); 9259 break; 9260 case CDP_RX_PKT_TLV_SIZE: 9261 val->rx_pkt_tlv_size = soc->rx_pkt_tlv_size; 9262 break; 9263 case CDP_CFG_GET_MLO_OPER_MODE: 9264 val->cdp_psoc_param_mlo_oper_mode = dp_get_mldev_mode(soc); 9265 break; 9266 case CDP_CFG_PEER_JITTER_STATS: 9267 val->cdp_psoc_param_jitter_stats = 9268 wlan_cfg_is_peer_jitter_stats_enabled(soc->wlan_cfg_ctx); 9269 break; 9270 case CDP_CONFIG_DP_DEBUG_LOG: 9271 val->cdp_psoc_param_dp_debug_log = soc->dp_debug_log_en; 9272 break; 9273 default: 9274 dp_warn("Invalid param: %u", param); 9275 break; 9276 } 9277 9278 return QDF_STATUS_SUCCESS; 9279 } 9280 9281 /** 9282 * dp_set_vdev_dscp_tid_map_wifi3() - Update Map ID selected for particular vdev 9283 * @cdp_soc: CDP SOC handle 9284 * @vdev_id: id of DP_VDEV handle 9285 * @map_id:ID of map that needs to be updated 9286 * 9287 * Return: QDF_STATUS 9288 */ 9289 static QDF_STATUS dp_set_vdev_dscp_tid_map_wifi3(ol_txrx_soc_handle cdp_soc, 9290 uint8_t vdev_id, 9291 uint8_t map_id) 9292 { 9293 cdp_config_param_type val; 9294 struct dp_soc *soc = cdp_soc_t_to_dp_soc(cdp_soc); 9295 struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id, 9296 DP_MOD_ID_CDP); 9297 if (vdev) { 9298 vdev->dscp_tid_map_id = map_id; 9299 val.cdp_vdev_param_dscp_tid_map_id = map_id; 9300 soc->arch_ops.txrx_set_vdev_param(soc, 9301 vdev, 9302 CDP_UPDATE_DSCP_TO_TID_MAP, 9303 val); 9304 /* Update flag for transmit tid classification */ 9305 if (vdev->dscp_tid_map_id < soc->num_hw_dscp_tid_map) 9306 vdev->skip_sw_tid_classification |= 9307 DP_TX_HW_DSCP_TID_MAP_VALID; 9308 else 9309 vdev->skip_sw_tid_classification &= 9310 ~DP_TX_HW_DSCP_TID_MAP_VALID; 9311 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP); 9312 return QDF_STATUS_SUCCESS; 9313 } 9314 9315 return QDF_STATUS_E_FAILURE; 9316 } 9317 9318 #ifdef DP_RATETABLE_SUPPORT 9319 static int dp_txrx_get_ratekbps(int preamb, int mcs, 9320 int htflag, int gintval) 9321 { 9322 uint32_t rix; 9323 uint16_t ratecode; 9324 enum cdp_punctured_modes punc_mode = NO_PUNCTURE; 9325 9326 return dp_getrateindex((uint32_t)gintval, (uint16_t)mcs, 1, 9327 (uint8_t)preamb, 1, punc_mode, 9328 &rix, &ratecode); 9329 } 9330 #else 9331 static int dp_txrx_get_ratekbps(int preamb, int mcs, 9332 int htflag, int gintval) 9333 { 9334 return 0; 9335 } 9336 #endif 9337 9338 /** 9339 * dp_txrx_get_pdev_stats() - Returns cdp_pdev_stats 9340 * @soc: DP soc handle 9341 * @pdev_id: id of DP pdev handle 9342 * @pdev_stats: buffer to copy to 9343 * 9344 * Return: status success/failure 9345 */ 9346 static QDF_STATUS 9347 dp_txrx_get_pdev_stats(struct cdp_soc_t *soc, uint8_t pdev_id, 9348 struct cdp_pdev_stats *pdev_stats) 9349 { 9350 struct dp_pdev *pdev = 9351 dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc, 9352 pdev_id); 9353 if (!pdev) 9354 return QDF_STATUS_E_FAILURE; 9355 9356 dp_aggregate_pdev_stats(pdev); 9357 9358 qdf_mem_copy(pdev_stats, &pdev->stats, sizeof(struct cdp_pdev_stats)); 9359 return QDF_STATUS_SUCCESS; 9360 } 9361 9362 /** 9363 * dp_txrx_update_vdev_me_stats() - Update vdev ME stats sent from CDP 9364 * @vdev: DP vdev handle 9365 * @buf: buffer containing specific stats structure 9366 * @xmit_type: xmit type of packet - MLD/Link 9367 * 9368 * Return: void 9369 */ 9370 static void dp_txrx_update_vdev_me_stats(struct dp_vdev *vdev, 9371 void *buf, uint8_t xmit_type) 9372 { 9373 struct cdp_tx_ingress_stats *host_stats = NULL; 9374 9375 if (!buf) { 9376 dp_cdp_err("%pK: Invalid host stats buf", vdev->pdev->soc); 9377 return; 9378 } 9379 host_stats = (struct cdp_tx_ingress_stats *)buf; 9380 9381 DP_STATS_INC_PKT(vdev, tx_i[xmit_type].mcast_en.mcast_pkt, 9382 host_stats->mcast_en.mcast_pkt.num, 9383 host_stats->mcast_en.mcast_pkt.bytes); 9384 DP_STATS_INC(vdev, tx_i[xmit_type].mcast_en.dropped_map_error, 9385 host_stats->mcast_en.dropped_map_error); 9386 DP_STATS_INC(vdev, tx_i[xmit_type].mcast_en.dropped_self_mac, 9387 host_stats->mcast_en.dropped_self_mac); 9388 DP_STATS_INC(vdev, tx_i[xmit_type].mcast_en.dropped_send_fail, 9389 host_stats->mcast_en.dropped_send_fail); 9390 DP_STATS_INC(vdev, tx_i[xmit_type].mcast_en.ucast, 9391 host_stats->mcast_en.ucast); 9392 DP_STATS_INC(vdev, tx_i[xmit_type].mcast_en.fail_seg_alloc, 9393 host_stats->mcast_en.fail_seg_alloc); 9394 DP_STATS_INC(vdev, tx_i[xmit_type].mcast_en.clone_fail, 9395 host_stats->mcast_en.clone_fail); 9396 } 9397 9398 /** 9399 * dp_txrx_update_vdev_igmp_me_stats() - Update vdev IGMP ME stats sent from CDP 9400 * @vdev: DP vdev handle 9401 * @buf: buffer containing specific stats structure 9402 * @xmit_type: xmit type of packet - MLD/Link 9403 * 9404 * Return: void 9405 */ 9406 static void dp_txrx_update_vdev_igmp_me_stats(struct dp_vdev *vdev, 9407 void *buf, uint8_t xmit_type) 9408 { 9409 struct cdp_tx_ingress_stats *host_stats = NULL; 9410 9411 if (!buf) { 9412 dp_cdp_err("%pK: Invalid host stats buf", vdev->pdev->soc); 9413 return; 9414 } 9415 host_stats = (struct cdp_tx_ingress_stats *)buf; 9416 9417 DP_STATS_INC(vdev, tx_i[xmit_type].igmp_mcast_en.igmp_rcvd, 9418 host_stats->igmp_mcast_en.igmp_rcvd); 9419 DP_STATS_INC(vdev, tx_i[xmit_type].igmp_mcast_en.igmp_ucast_converted, 9420 host_stats->igmp_mcast_en.igmp_ucast_converted); 9421 } 9422 9423 /** 9424 * dp_txrx_update_vdev_host_stats() - Update stats sent through CDP 9425 * @soc_hdl: DP soc handle 9426 * @vdev_id: id of DP vdev handle 9427 * @buf: buffer containing specific stats structure 9428 * @stats_id: stats type 9429 * @xmit_type: xmit type of packet - MLD/Link 9430 * 9431 * Return: QDF_STATUS 9432 */ 9433 static QDF_STATUS dp_txrx_update_vdev_host_stats(struct cdp_soc_t *soc_hdl, 9434 uint8_t vdev_id, 9435 void *buf, 9436 uint16_t stats_id, 9437 uint8_t xmit_type) 9438 { 9439 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 9440 struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id, 9441 DP_MOD_ID_CDP); 9442 9443 if (!vdev) { 9444 dp_cdp_err("%pK: Invalid vdev handle", soc); 9445 return QDF_STATUS_E_FAILURE; 9446 } 9447 9448 switch (stats_id) { 9449 case DP_VDEV_STATS_PKT_CNT_ONLY: 9450 break; 9451 case DP_VDEV_STATS_TX_ME: 9452 dp_txrx_update_vdev_me_stats(vdev, buf, xmit_type); 9453 dp_txrx_update_vdev_igmp_me_stats(vdev, buf, xmit_type); 9454 break; 9455 default: 9456 qdf_info("Invalid stats_id %d", stats_id); 9457 break; 9458 } 9459 9460 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP); 9461 return QDF_STATUS_SUCCESS; 9462 } 9463 9464 /** 9465 * dp_txrx_get_peer_stats_wrapper() - will get cdp_peer_stats 9466 * @soc: soc handle 9467 * @peer_stats: destination buffer to copy to 9468 * @peer_info: peer info 9469 * 9470 * Return: status success/failure 9471 */ 9472 static QDF_STATUS 9473 dp_txrx_get_peer_stats_wrapper(struct cdp_soc_t *soc, 9474 struct cdp_peer_stats *peer_stats, 9475 struct cdp_peer_info peer_info) 9476 { 9477 struct dp_peer *peer = NULL; 9478 9479 peer = dp_peer_hash_find_wrapper((struct dp_soc *)soc, &peer_info, 9480 DP_MOD_ID_CDP); 9481 9482 qdf_mem_zero(peer_stats, sizeof(struct cdp_peer_stats)); 9483 9484 if (!peer) 9485 return QDF_STATUS_E_FAILURE; 9486 9487 dp_get_peer_stats(peer, peer_stats); 9488 9489 dp_peer_unref_delete(peer, DP_MOD_ID_CDP); 9490 9491 return QDF_STATUS_SUCCESS; 9492 } 9493 9494 /** 9495 * dp_txrx_get_peer_stats() - will get cdp_peer_stats 9496 * @soc: soc handle 9497 * @vdev_id: id of vdev handle 9498 * @peer_mac: peer mac address of DP_PEER handle 9499 * @peer_stats: destination buffer to copy to 9500 * 9501 * Return: status success/failure 9502 */ 9503 static QDF_STATUS 9504 dp_txrx_get_peer_stats(struct cdp_soc_t *soc, uint8_t vdev_id, 9505 uint8_t *peer_mac, struct cdp_peer_stats *peer_stats) 9506 { 9507 struct cdp_peer_info peer_info = { 0 }; 9508 9509 DP_PEER_INFO_PARAMS_INIT(&peer_info, vdev_id, peer_mac, false, 9510 CDP_WILD_PEER_TYPE); 9511 9512 return dp_txrx_get_peer_stats_wrapper(soc, peer_stats, peer_info); 9513 } 9514 9515 /** 9516 * dp_txrx_get_peer_stats_based_on_peer_type() - get peer stats based on the 9517 * peer type 9518 * @soc: soc handle 9519 * @vdev_id: id of vdev handle 9520 * @peer_mac: mac of DP_PEER handle 9521 * @peer_stats: buffer to copy to 9522 * @peer_type: type of peer 9523 * 9524 * Return: status success/failure 9525 */ 9526 static QDF_STATUS 9527 dp_txrx_get_peer_stats_based_on_peer_type(struct cdp_soc_t *soc, uint8_t vdev_id, 9528 uint8_t *peer_mac, 9529 struct cdp_peer_stats *peer_stats, 9530 enum cdp_peer_type peer_type) 9531 { 9532 struct cdp_peer_info peer_info = { 0 }; 9533 9534 DP_PEER_INFO_PARAMS_INIT(&peer_info, vdev_id, peer_mac, false, 9535 peer_type); 9536 9537 return dp_txrx_get_peer_stats_wrapper(soc, peer_stats, peer_info); 9538 } 9539 9540 #if defined WLAN_FEATURE_11BE_MLO && defined DP_MLO_LINK_STATS_SUPPORT 9541 /** 9542 * dp_get_per_link_peer_stats() - Get per link stats 9543 * @peer: DP peer 9544 * @peer_stats: buffer to copy to 9545 * @peer_type: Peer type 9546 * @num_link: Number of ML links 9547 * 9548 * Return: status success/failure 9549 */ 9550 QDF_STATUS dp_get_per_link_peer_stats(struct dp_peer *peer, 9551 struct cdp_peer_stats *peer_stats, 9552 enum cdp_peer_type peer_type, 9553 uint8_t num_link) 9554 { 9555 uint8_t i, index = 0; 9556 struct dp_peer *link_peer; 9557 struct dp_mld_link_peers link_peers_info; 9558 struct cdp_peer_stats *stats; 9559 struct dp_soc *soc = peer->vdev->pdev->soc; 9560 9561 dp_get_peer_calibr_stats(peer, peer_stats); 9562 dp_get_peer_basic_stats(peer, peer_stats); 9563 dp_get_peer_tx_per(peer_stats); 9564 9565 if (IS_MLO_DP_MLD_PEER(peer)) { 9566 dp_get_link_peers_ref_from_mld_peer(soc, peer, 9567 &link_peers_info, 9568 DP_MOD_ID_GENERIC_STATS); 9569 for (i = 0; i < link_peers_info.num_links; i++) { 9570 link_peer = link_peers_info.link_peers[i]; 9571 if (qdf_unlikely(!link_peer)) 9572 continue; 9573 if (index > num_link) { 9574 dp_err("Request stats for %d link(s) is less than total link(s) %d", 9575 num_link, link_peers_info.num_links); 9576 break; 9577 } 9578 stats = &peer_stats[index]; 9579 dp_get_peer_per_pkt_stats(link_peer, stats); 9580 dp_get_peer_extd_stats(link_peer, stats); 9581 index++; 9582 } 9583 dp_release_link_peers_ref(&link_peers_info, 9584 DP_MOD_ID_GENERIC_STATS); 9585 } else { 9586 dp_get_peer_per_pkt_stats(peer, peer_stats); 9587 dp_get_peer_extd_stats(peer, peer_stats); 9588 } 9589 return QDF_STATUS_SUCCESS; 9590 } 9591 #else 9592 QDF_STATUS dp_get_per_link_peer_stats(struct dp_peer *peer, 9593 struct cdp_peer_stats *peer_stats, 9594 enum cdp_peer_type peer_type, 9595 uint8_t num_link) 9596 { 9597 dp_err("Per link stats not supported"); 9598 return QDF_STATUS_E_INVAL; 9599 } 9600 #endif 9601 9602 /** 9603 * dp_txrx_get_per_link_peer_stats() - Get per link peer stats 9604 * @soc: soc handle 9605 * @vdev_id: id of vdev handle 9606 * @peer_mac: peer mac address 9607 * @peer_stats: buffer to copy to 9608 * @peer_type: Peer type 9609 * @num_link: Number of ML links 9610 * 9611 * NOTE: For peer_type = CDP_MLD_PEER_TYPE peer_stats should point to 9612 * buffer of size = (sizeof(*peer_stats) * num_link) 9613 * 9614 * Return: status success/failure 9615 */ 9616 static QDF_STATUS 9617 dp_txrx_get_per_link_peer_stats(struct cdp_soc_t *soc, uint8_t vdev_id, 9618 uint8_t *peer_mac, 9619 struct cdp_peer_stats *peer_stats, 9620 enum cdp_peer_type peer_type, uint8_t num_link) 9621 { 9622 QDF_STATUS status; 9623 struct dp_peer *peer = NULL; 9624 struct cdp_peer_info peer_info = { 0 }; 9625 9626 DP_PEER_INFO_PARAMS_INIT(&peer_info, vdev_id, peer_mac, false, 9627 peer_type); 9628 9629 peer = dp_peer_hash_find_wrapper((struct dp_soc *)soc, &peer_info, 9630 DP_MOD_ID_GENERIC_STATS); 9631 if (!peer) 9632 return QDF_STATUS_E_FAILURE; 9633 9634 qdf_mem_zero(peer_stats, sizeof(struct cdp_peer_stats)); 9635 9636 status = dp_get_per_link_peer_stats(peer, peer_stats, peer_type, 9637 num_link); 9638 9639 dp_peer_unref_delete(peer, DP_MOD_ID_GENERIC_STATS); 9640 9641 return status; 9642 } 9643 9644 /** 9645 * dp_txrx_get_peer_stats_param() - will return specified cdp_peer_stats 9646 * @soc: soc handle 9647 * @vdev_id: vdev_id of vdev object 9648 * @peer_mac: mac address of the peer 9649 * @type: enum of required stats 9650 * @buf: buffer to hold the value 9651 * 9652 * Return: status success/failure 9653 */ 9654 static QDF_STATUS 9655 dp_txrx_get_peer_stats_param(struct cdp_soc_t *soc, uint8_t vdev_id, 9656 uint8_t *peer_mac, enum cdp_peer_stats_type type, 9657 cdp_peer_stats_param_t *buf) 9658 { 9659 QDF_STATUS ret; 9660 struct dp_peer *peer = NULL; 9661 struct cdp_peer_info peer_info = { 0 }; 9662 9663 DP_PEER_INFO_PARAMS_INIT(&peer_info, vdev_id, peer_mac, false, 9664 CDP_WILD_PEER_TYPE); 9665 9666 peer = dp_peer_hash_find_wrapper((struct dp_soc *)soc, &peer_info, 9667 DP_MOD_ID_CDP); 9668 9669 if (!peer) { 9670 dp_peer_err("%pK: Invalid Peer for Mac " QDF_MAC_ADDR_FMT, 9671 soc, QDF_MAC_ADDR_REF(peer_mac)); 9672 return QDF_STATUS_E_FAILURE; 9673 } 9674 9675 if (type >= cdp_peer_per_pkt_stats_min && 9676 type < cdp_peer_per_pkt_stats_max) { 9677 ret = dp_txrx_get_peer_per_pkt_stats_param(peer, type, buf); 9678 } else if (type >= cdp_peer_extd_stats_min && 9679 type < cdp_peer_extd_stats_max) { 9680 ret = dp_txrx_get_peer_extd_stats_param(peer, type, buf); 9681 } else { 9682 dp_err("%pK: Invalid stat type requested", soc); 9683 ret = QDF_STATUS_E_FAILURE; 9684 } 9685 9686 dp_peer_unref_delete(peer, DP_MOD_ID_CDP); 9687 9688 return ret; 9689 } 9690 9691 /** 9692 * dp_txrx_reset_peer_stats() - reset cdp_peer_stats for particular peer 9693 * @soc_hdl: soc handle 9694 * @vdev_id: id of vdev handle 9695 * @peer_mac: mac of DP_PEER handle 9696 * 9697 * Return: QDF_STATUS 9698 */ 9699 #ifdef WLAN_FEATURE_11BE_MLO 9700 static QDF_STATUS 9701 dp_txrx_reset_peer_stats(struct cdp_soc_t *soc_hdl, uint8_t vdev_id, 9702 uint8_t *peer_mac) 9703 { 9704 QDF_STATUS status = QDF_STATUS_SUCCESS; 9705 struct dp_soc *soc = (struct dp_soc *)soc_hdl; 9706 struct dp_peer *peer = 9707 dp_peer_get_tgt_peer_hash_find(soc, peer_mac, 0, 9708 vdev_id, DP_MOD_ID_CDP); 9709 9710 if (!peer) 9711 return QDF_STATUS_E_FAILURE; 9712 9713 DP_STATS_CLR(peer); 9714 dp_txrx_peer_stats_clr(peer->txrx_peer); 9715 9716 if (IS_MLO_DP_MLD_PEER(peer)) { 9717 uint8_t i; 9718 struct dp_peer *link_peer; 9719 struct dp_soc *link_peer_soc; 9720 struct dp_mld_link_peers link_peers_info; 9721 9722 dp_get_link_peers_ref_from_mld_peer(soc, peer, 9723 &link_peers_info, 9724 DP_MOD_ID_CDP); 9725 for (i = 0; i < link_peers_info.num_links; i++) { 9726 link_peer = link_peers_info.link_peers[i]; 9727 link_peer_soc = link_peer->vdev->pdev->soc; 9728 9729 DP_STATS_CLR(link_peer); 9730 dp_monitor_peer_reset_stats(link_peer_soc, link_peer); 9731 } 9732 9733 dp_release_link_peers_ref(&link_peers_info, DP_MOD_ID_CDP); 9734 } else { 9735 dp_monitor_peer_reset_stats(soc, peer); 9736 } 9737 9738 dp_peer_unref_delete(peer, DP_MOD_ID_CDP); 9739 9740 return status; 9741 } 9742 #else 9743 static QDF_STATUS 9744 dp_txrx_reset_peer_stats(struct cdp_soc_t *soc, uint8_t vdev_id, 9745 uint8_t *peer_mac) 9746 { 9747 QDF_STATUS status = QDF_STATUS_SUCCESS; 9748 struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc, 9749 peer_mac, 0, vdev_id, 9750 DP_MOD_ID_CDP); 9751 9752 if (!peer) 9753 return QDF_STATUS_E_FAILURE; 9754 9755 DP_STATS_CLR(peer); 9756 dp_txrx_peer_stats_clr(peer->txrx_peer); 9757 dp_monitor_peer_reset_stats((struct dp_soc *)soc, peer); 9758 9759 dp_peer_unref_delete(peer, DP_MOD_ID_CDP); 9760 9761 return status; 9762 } 9763 #endif 9764 9765 /** 9766 * dp_txrx_get_vdev_stats() - Update buffer with cdp_vdev_stats 9767 * @soc_hdl: CDP SoC handle 9768 * @vdev_id: vdev Id 9769 * @buf: buffer for vdev stats 9770 * @is_aggregate: are aggregate stats being collected 9771 * 9772 * Return: QDF_STATUS 9773 */ 9774 QDF_STATUS 9775 dp_txrx_get_vdev_stats(struct cdp_soc_t *soc_hdl, uint8_t vdev_id, 9776 void *buf, bool is_aggregate) 9777 { 9778 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 9779 struct cdp_vdev_stats *vdev_stats; 9780 struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id, 9781 DP_MOD_ID_CDP); 9782 9783 if (!vdev) 9784 return QDF_STATUS_E_RESOURCES; 9785 9786 vdev_stats = (struct cdp_vdev_stats *)buf; 9787 9788 if (is_aggregate) { 9789 dp_aggregate_vdev_stats(vdev, buf, DP_XMIT_LINK); 9790 } else { 9791 dp_copy_vdev_stats_to_tgt_buf(vdev_stats, 9792 &vdev->stats, DP_XMIT_LINK); 9793 } 9794 9795 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP); 9796 return QDF_STATUS_SUCCESS; 9797 } 9798 9799 /** 9800 * dp_get_total_per() - get total per 9801 * @soc: DP soc handle 9802 * @pdev_id: id of DP_PDEV handle 9803 * 9804 * Return: % error rate using retries per packet and success packets 9805 */ 9806 static int dp_get_total_per(struct cdp_soc_t *soc, uint8_t pdev_id) 9807 { 9808 struct dp_pdev *pdev = 9809 dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc, 9810 pdev_id); 9811 9812 if (!pdev) 9813 return 0; 9814 9815 dp_aggregate_pdev_stats(pdev); 9816 if ((pdev->stats.tx.tx_success.num + pdev->stats.tx.retries) == 0) 9817 return 0; 9818 return qdf_do_div((pdev->stats.tx.retries * 100), 9819 ((pdev->stats.tx.tx_success.num) + (pdev->stats.tx.retries))); 9820 } 9821 9822 /** 9823 * dp_txrx_stats_publish() - publish pdev stats into a buffer 9824 * @soc: DP soc handle 9825 * @pdev_id: id of DP_PDEV handle 9826 * @buf: to hold pdev_stats 9827 * 9828 * Return: int 9829 */ 9830 static int 9831 dp_txrx_stats_publish(struct cdp_soc_t *soc, uint8_t pdev_id, 9832 struct cdp_stats_extd *buf) 9833 { 9834 struct cdp_txrx_stats_req req = {0,}; 9835 QDF_STATUS status; 9836 struct dp_pdev *pdev = 9837 dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc, 9838 pdev_id); 9839 9840 if (!pdev) 9841 return TXRX_STATS_LEVEL_OFF; 9842 9843 if (pdev->pending_fw_stats_response) { 9844 dp_warn("pdev%d: prev req pending\n", pdev->pdev_id); 9845 return TXRX_STATS_LEVEL_OFF; 9846 } 9847 9848 dp_aggregate_pdev_stats(pdev); 9849 9850 pdev->pending_fw_stats_response = true; 9851 req.stats = (enum cdp_stats)HTT_DBG_EXT_STATS_PDEV_TX; 9852 req.cookie_val = DBG_STATS_COOKIE_DP_STATS; 9853 pdev->fw_stats_tlv_bitmap_rcvd = 0; 9854 qdf_event_reset(&pdev->fw_stats_event); 9855 status = dp_h2t_ext_stats_msg_send(pdev, req.stats, req.param0, 9856 req.param1, req.param2, req.param3, 0, 9857 req.cookie_val, 0); 9858 9859 if (status != QDF_STATUS_SUCCESS) { 9860 dp_warn("pdev%d: tx stats req failed\n", pdev->pdev_id); 9861 pdev->pending_fw_stats_response = false; 9862 return TXRX_STATS_LEVEL_OFF; 9863 } 9864 9865 req.stats = (enum cdp_stats)HTT_DBG_EXT_STATS_PDEV_RX; 9866 req.cookie_val = DBG_STATS_COOKIE_DP_STATS; 9867 status = dp_h2t_ext_stats_msg_send(pdev, req.stats, req.param0, 9868 req.param1, req.param2, req.param3, 0, 9869 req.cookie_val, 0); 9870 if (status != QDF_STATUS_SUCCESS) { 9871 dp_warn("pdev%d: rx stats req failed\n", pdev->pdev_id); 9872 pdev->pending_fw_stats_response = false; 9873 return TXRX_STATS_LEVEL_OFF; 9874 } 9875 9876 /* The event may have already been signaled. Wait only if it's pending */ 9877 if (!pdev->fw_stats_event.done) { 9878 status = 9879 qdf_wait_single_event(&pdev->fw_stats_event, 9880 DP_MAX_SLEEP_TIME); 9881 9882 if (status != QDF_STATUS_SUCCESS) { 9883 if (status == QDF_STATUS_E_TIMEOUT) 9884 dp_warn("pdev%d: fw stats timeout. TLVs rcvd 0x%llx\n", 9885 pdev->pdev_id, 9886 pdev->fw_stats_tlv_bitmap_rcvd); 9887 pdev->pending_fw_stats_response = false; 9888 return TXRX_STATS_LEVEL_OFF; 9889 } 9890 } 9891 9892 qdf_mem_copy(buf, &pdev->stats, sizeof(struct cdp_pdev_stats)); 9893 pdev->pending_fw_stats_response = false; 9894 9895 return TXRX_STATS_LEVEL; 9896 } 9897 9898 /** 9899 * dp_get_obss_stats() - Get Pdev OBSS stats from Fw 9900 * @soc: DP soc handle 9901 * @pdev_id: id of DP_PDEV handle 9902 * @buf: to hold pdev obss stats 9903 * @req: Pointer to CDP TxRx stats 9904 * 9905 * Return: status 9906 */ 9907 static QDF_STATUS 9908 dp_get_obss_stats(struct cdp_soc_t *soc, uint8_t pdev_id, 9909 struct cdp_pdev_obss_pd_stats_tlv *buf, 9910 struct cdp_txrx_stats_req *req) 9911 { 9912 QDF_STATUS status; 9913 struct dp_pdev *pdev = 9914 dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc, 9915 pdev_id); 9916 9917 if (!pdev) 9918 return QDF_STATUS_E_INVAL; 9919 9920 if (pdev->pending_fw_obss_stats_response) 9921 return QDF_STATUS_E_AGAIN; 9922 9923 pdev->pending_fw_obss_stats_response = true; 9924 req->stats = (enum cdp_stats)HTT_DBG_EXT_STATS_PDEV_OBSS_PD_STATS; 9925 req->cookie_val = DBG_STATS_COOKIE_HTT_OBSS; 9926 qdf_event_reset(&pdev->fw_obss_stats_event); 9927 status = dp_h2t_ext_stats_msg_send(pdev, req->stats, req->param0, 9928 req->param1, req->param2, 9929 req->param3, 0, req->cookie_val, 9930 req->mac_id); 9931 if (QDF_IS_STATUS_ERROR(status)) { 9932 pdev->pending_fw_obss_stats_response = false; 9933 return status; 9934 } 9935 status = 9936 qdf_wait_single_event(&pdev->fw_obss_stats_event, 9937 DP_MAX_SLEEP_TIME); 9938 9939 if (status != QDF_STATUS_SUCCESS) { 9940 if (status == QDF_STATUS_E_TIMEOUT) 9941 qdf_debug("TIMEOUT_OCCURS"); 9942 pdev->pending_fw_obss_stats_response = false; 9943 return QDF_STATUS_E_TIMEOUT; 9944 } 9945 qdf_mem_copy(buf, &pdev->stats.htt_tx_pdev_stats.obss_pd_stats_tlv, 9946 sizeof(struct cdp_pdev_obss_pd_stats_tlv)); 9947 pdev->pending_fw_obss_stats_response = false; 9948 return status; 9949 } 9950 9951 /** 9952 * dp_clear_pdev_obss_pd_stats() - Clear pdev obss stats 9953 * @soc: DP soc handle 9954 * @pdev_id: id of DP_PDEV handle 9955 * @req: Pointer to CDP TxRx stats request mac_id will be 9956 * pre-filled and should not be overwritten 9957 * 9958 * Return: status 9959 */ 9960 static QDF_STATUS 9961 dp_clear_pdev_obss_pd_stats(struct cdp_soc_t *soc, uint8_t pdev_id, 9962 struct cdp_txrx_stats_req *req) 9963 { 9964 struct dp_pdev *pdev = 9965 dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc, 9966 pdev_id); 9967 uint32_t cookie_val = DBG_STATS_COOKIE_DEFAULT; 9968 9969 if (!pdev) 9970 return QDF_STATUS_E_INVAL; 9971 9972 /* 9973 * For HTT_DBG_EXT_STATS_RESET command, FW need to config 9974 * from param0 to param3 according to below rule: 9975 * 9976 * PARAM: 9977 * - config_param0 : start_offset (stats type) 9978 * - config_param1 : stats bmask from start offset 9979 * - config_param2 : stats bmask from start offset + 32 9980 * - config_param3 : stats bmask from start offset + 64 9981 */ 9982 req->stats = (enum cdp_stats)HTT_DBG_EXT_STATS_RESET; 9983 req->param0 = HTT_DBG_EXT_STATS_PDEV_OBSS_PD_STATS; 9984 req->param1 = 0x00000001; 9985 9986 return dp_h2t_ext_stats_msg_send(pdev, req->stats, req->param0, 9987 req->param1, req->param2, req->param3, 0, 9988 cookie_val, req->mac_id); 9989 } 9990 9991 /** 9992 * dp_set_pdev_dscp_tid_map_wifi3() - update dscp tid map in pdev 9993 * @soc_handle: soc handle 9994 * @pdev_id: id of DP_PDEV handle 9995 * @map_id: ID of map that needs to be updated 9996 * @tos: index value in map 9997 * @tid: tid value passed by the user 9998 * 9999 * Return: QDF_STATUS 10000 */ 10001 static QDF_STATUS 10002 dp_set_pdev_dscp_tid_map_wifi3(struct cdp_soc_t *soc_handle, 10003 uint8_t pdev_id, 10004 uint8_t map_id, 10005 uint8_t tos, uint8_t tid) 10006 { 10007 uint8_t dscp; 10008 struct dp_soc *soc = (struct dp_soc *)soc_handle; 10009 struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); 10010 10011 if (!pdev) 10012 return QDF_STATUS_E_FAILURE; 10013 10014 dscp = (tos >> DP_IP_DSCP_SHIFT) & DP_IP_DSCP_MASK; 10015 pdev->dscp_tid_map[map_id][dscp] = tid; 10016 10017 if (map_id < soc->num_hw_dscp_tid_map) 10018 hal_tx_update_dscp_tid(soc->hal_soc, tid, 10019 map_id, dscp); 10020 else 10021 return QDF_STATUS_E_FAILURE; 10022 10023 return QDF_STATUS_SUCCESS; 10024 } 10025 10026 #ifdef WLAN_SYSFS_DP_STATS 10027 /** 10028 * dp_sysfs_event_trigger() - Trigger event to wait for firmware 10029 * stats request response. 10030 * @soc: soc handle 10031 * @cookie_val: cookie value 10032 * 10033 * Return: QDF_STATUS 10034 */ 10035 static QDF_STATUS 10036 dp_sysfs_event_trigger(struct dp_soc *soc, uint32_t cookie_val) 10037 { 10038 QDF_STATUS status = QDF_STATUS_SUCCESS; 10039 /* wait for firmware response for sysfs stats request */ 10040 if (cookie_val == DBG_SYSFS_STATS_COOKIE) { 10041 if (!soc) { 10042 dp_cdp_err("soc is NULL"); 10043 return QDF_STATUS_E_FAILURE; 10044 } 10045 /* wait for event completion */ 10046 status = qdf_wait_single_event(&soc->sysfs_config->sysfs_txrx_fw_request_done, 10047 WLAN_SYSFS_STAT_REQ_WAIT_MS); 10048 if (status == QDF_STATUS_SUCCESS) 10049 dp_cdp_info("sysfs_txrx_fw_request_done event completed"); 10050 else if (status == QDF_STATUS_E_TIMEOUT) 10051 dp_cdp_warn("sysfs_txrx_fw_request_done event expired"); 10052 else 10053 dp_cdp_warn("sysfs_txrx_fw_request_done event error code %d", status); 10054 } 10055 10056 return status; 10057 } 10058 #else /* WLAN_SYSFS_DP_STATS */ 10059 static QDF_STATUS 10060 dp_sysfs_event_trigger(struct dp_soc *soc, uint32_t cookie_val) 10061 { 10062 return QDF_STATUS_SUCCESS; 10063 } 10064 #endif /* WLAN_SYSFS_DP_STATS */ 10065 10066 /** 10067 * dp_fw_stats_process() - Process TXRX FW stats request. 10068 * @vdev: DP VDEV handle 10069 * @req: stats request 10070 * 10071 * Return: QDF_STATUS 10072 */ 10073 static QDF_STATUS 10074 dp_fw_stats_process(struct dp_vdev *vdev, 10075 struct cdp_txrx_stats_req *req) 10076 { 10077 struct dp_pdev *pdev = NULL; 10078 struct dp_soc *soc = NULL; 10079 uint32_t stats = req->stats; 10080 uint8_t mac_id = req->mac_id; 10081 uint32_t cookie_val = DBG_STATS_COOKIE_DEFAULT; 10082 10083 if (!vdev) { 10084 DP_TRACE(NONE, "VDEV not found"); 10085 return QDF_STATUS_E_FAILURE; 10086 } 10087 10088 pdev = vdev->pdev; 10089 if (!pdev) { 10090 DP_TRACE(NONE, "PDEV not found"); 10091 return QDF_STATUS_E_FAILURE; 10092 } 10093 10094 soc = pdev->soc; 10095 if (!soc) { 10096 DP_TRACE(NONE, "soc not found"); 10097 return QDF_STATUS_E_FAILURE; 10098 } 10099 10100 /* In case request is from host sysfs for displaying stats on console */ 10101 if (req->cookie_val == DBG_SYSFS_STATS_COOKIE) 10102 cookie_val = DBG_SYSFS_STATS_COOKIE; 10103 10104 /* 10105 * For HTT_DBG_EXT_STATS_RESET command, FW need to config 10106 * from param0 to param3 according to below rule: 10107 * 10108 * PARAM: 10109 * - config_param0 : start_offset (stats type) 10110 * - config_param1 : stats bmask from start offset 10111 * - config_param2 : stats bmask from start offset + 32 10112 * - config_param3 : stats bmask from start offset + 64 10113 */ 10114 if (req->stats == CDP_TXRX_STATS_0) { 10115 req->param0 = HTT_DBG_EXT_STATS_PDEV_TX; 10116 req->param1 = 0xFFFFFFFF; 10117 req->param2 = 0xFFFFFFFF; 10118 req->param3 = 0xFFFFFFFF; 10119 } else if (req->stats == (uint8_t)HTT_DBG_EXT_STATS_PDEV_TX_MU) { 10120 req->param0 = HTT_DBG_EXT_STATS_SET_VDEV_MASK(vdev->vdev_id); 10121 } 10122 10123 if (req->stats == (uint8_t)HTT_DBG_EXT_STATS_PDEV_RX_RATE_EXT) { 10124 dp_h2t_ext_stats_msg_send(pdev, 10125 HTT_DBG_EXT_STATS_PDEV_RX_RATE_EXT, 10126 req->param0, req->param1, req->param2, 10127 req->param3, 0, cookie_val, 10128 mac_id); 10129 } else { 10130 dp_h2t_ext_stats_msg_send(pdev, stats, req->param0, 10131 req->param1, req->param2, req->param3, 10132 0, cookie_val, mac_id); 10133 } 10134 10135 dp_sysfs_event_trigger(soc, cookie_val); 10136 10137 return QDF_STATUS_SUCCESS; 10138 } 10139 10140 /** 10141 * dp_txrx_stats_request - function to map to firmware and host stats 10142 * @soc_handle: soc handle 10143 * @vdev_id: virtual device ID 10144 * @req: stats request 10145 * 10146 * Return: QDF_STATUS 10147 */ 10148 static 10149 QDF_STATUS dp_txrx_stats_request(struct cdp_soc_t *soc_handle, 10150 uint8_t vdev_id, 10151 struct cdp_txrx_stats_req *req) 10152 { 10153 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_handle); 10154 int host_stats; 10155 int fw_stats; 10156 enum cdp_stats stats; 10157 int num_stats; 10158 struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id, 10159 DP_MOD_ID_CDP); 10160 QDF_STATUS status = QDF_STATUS_E_INVAL; 10161 10162 if (!vdev || !req) { 10163 dp_cdp_err("%pK: Invalid vdev/req instance", soc); 10164 status = QDF_STATUS_E_INVAL; 10165 goto fail0; 10166 } 10167 10168 if (req->mac_id >= WLAN_CFG_MAC_PER_TARGET) { 10169 dp_err("Invalid mac_id: %u request", req->mac_id); 10170 status = QDF_STATUS_E_INVAL; 10171 goto fail0; 10172 } 10173 10174 stats = req->stats; 10175 if (stats >= CDP_TXRX_MAX_STATS) { 10176 status = QDF_STATUS_E_INVAL; 10177 goto fail0; 10178 } 10179 10180 /* 10181 * DP_CURR_FW_STATS_AVAIL: no of FW stats currently available 10182 * has to be updated if new FW HTT stats added 10183 */ 10184 if (stats > CDP_TXRX_STATS_HTT_MAX) 10185 stats = stats + DP_CURR_FW_STATS_AVAIL - DP_HTT_DBG_EXT_STATS_MAX; 10186 10187 num_stats = QDF_ARRAY_SIZE(dp_stats_mapping_table); 10188 10189 if (stats >= num_stats) { 10190 dp_cdp_err("%pK : Invalid stats option: %d", soc, stats); 10191 status = QDF_STATUS_E_INVAL; 10192 goto fail0; 10193 } 10194 10195 req->stats = stats; 10196 fw_stats = dp_stats_mapping_table[stats][STATS_FW]; 10197 host_stats = dp_stats_mapping_table[stats][STATS_HOST]; 10198 10199 dp_info("stats: %u fw_stats_type: %d host_stats: %d", 10200 stats, fw_stats, host_stats); 10201 10202 if (fw_stats != TXRX_FW_STATS_INVALID) { 10203 /* update request with FW stats type */ 10204 req->stats = fw_stats; 10205 status = dp_fw_stats_process(vdev, req); 10206 } else if ((host_stats != TXRX_HOST_STATS_INVALID) && 10207 (host_stats <= TXRX_HOST_STATS_MAX)) 10208 status = dp_print_host_stats(vdev, req, soc); 10209 else 10210 dp_cdp_info("%pK: Wrong Input for TxRx Stats", soc); 10211 fail0: 10212 if (vdev) 10213 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP); 10214 return status; 10215 } 10216 10217 /** 10218 * dp_soc_notify_asserted_soc() - API to notify asserted soc info 10219 * @psoc: CDP soc handle 10220 * 10221 * Return: QDF_STATUS 10222 */ 10223 static QDF_STATUS dp_soc_notify_asserted_soc(struct cdp_soc_t *psoc) 10224 { 10225 struct dp_soc *soc = (struct dp_soc *)psoc; 10226 10227 if (!soc) { 10228 dp_cdp_err("%pK: soc is NULL", soc); 10229 return QDF_STATUS_E_INVAL; 10230 } 10231 10232 return dp_umac_reset_notify_asserted_soc(soc); 10233 } 10234 10235 /** 10236 * dp_txrx_dump_stats() - Dump statistics 10237 * @psoc: CDP soc handle 10238 * @value: Statistics option 10239 * @level: verbosity level 10240 */ 10241 static QDF_STATUS dp_txrx_dump_stats(struct cdp_soc_t *psoc, uint16_t value, 10242 enum qdf_stats_verbosity_level level) 10243 { 10244 struct dp_soc *soc = 10245 (struct dp_soc *)psoc; 10246 QDF_STATUS status = QDF_STATUS_SUCCESS; 10247 10248 if (!soc) { 10249 dp_cdp_err("%pK: soc is NULL", soc); 10250 return QDF_STATUS_E_INVAL; 10251 } 10252 10253 switch (value) { 10254 case CDP_TXRX_PATH_STATS: 10255 dp_txrx_path_stats(soc); 10256 dp_print_soc_interrupt_stats(soc); 10257 dp_print_reg_write_stats(soc); 10258 dp_pdev_print_tx_delay_stats(soc); 10259 /* Dump usage watermark stats for core TX/RX SRNGs */ 10260 dp_dump_srng_high_wm_stats(soc, 10261 DP_SRNG_WM_MASK_REO_DST | 10262 DP_SRNG_WM_MASK_TX_COMP); 10263 if (soc->cdp_soc.ol_ops->dp_print_fisa_stats) 10264 soc->cdp_soc.ol_ops->dp_print_fisa_stats( 10265 CDP_FISA_STATS_ID_ERR_STATS); 10266 break; 10267 10268 case CDP_RX_RING_STATS: 10269 dp_print_per_ring_stats(soc); 10270 break; 10271 10272 case CDP_TXRX_TSO_STATS: 10273 dp_print_tso_stats(soc, level); 10274 break; 10275 10276 case CDP_DUMP_TX_FLOW_POOL_INFO: 10277 if (level == QDF_STATS_VERBOSITY_LEVEL_HIGH) 10278 cdp_dump_flow_pool_info((struct cdp_soc_t *)soc); 10279 else 10280 dp_tx_dump_flow_pool_info_compact(soc); 10281 break; 10282 10283 case CDP_DP_NAPI_STATS: 10284 dp_print_napi_stats(soc); 10285 break; 10286 10287 case CDP_TXRX_DESC_STATS: 10288 /* TODO: NOT IMPLEMENTED */ 10289 break; 10290 10291 case CDP_DP_RX_FISA_STATS: 10292 if (soc->cdp_soc.ol_ops->dp_print_fisa_stats) 10293 soc->cdp_soc.ol_ops->dp_print_fisa_stats( 10294 CDP_FISA_STATS_ID_DUMP_SW_FST); 10295 break; 10296 10297 case CDP_DP_SWLM_STATS: 10298 dp_print_swlm_stats(soc); 10299 break; 10300 10301 case CDP_DP_TX_HW_LATENCY_STATS: 10302 dp_pdev_print_tx_delay_stats(soc); 10303 break; 10304 10305 default: 10306 status = QDF_STATUS_E_INVAL; 10307 break; 10308 } 10309 10310 return status; 10311 10312 } 10313 10314 #ifdef WLAN_SYSFS_DP_STATS 10315 static 10316 void dp_sysfs_get_stat_type(struct dp_soc *soc, uint32_t *mac_id, 10317 uint32_t *stat_type) 10318 { 10319 qdf_spinlock_acquire(&soc->sysfs_config->rw_stats_lock); 10320 *stat_type = soc->sysfs_config->stat_type_requested; 10321 *mac_id = soc->sysfs_config->mac_id; 10322 10323 qdf_spinlock_release(&soc->sysfs_config->rw_stats_lock); 10324 } 10325 10326 static 10327 void dp_sysfs_update_config_buf_params(struct dp_soc *soc, 10328 uint32_t curr_len, 10329 uint32_t max_buf_len, 10330 char *buf) 10331 { 10332 qdf_spinlock_acquire(&soc->sysfs_config->sysfs_write_user_buffer); 10333 /* set sysfs_config parameters */ 10334 soc->sysfs_config->buf = buf; 10335 soc->sysfs_config->curr_buffer_length = curr_len; 10336 soc->sysfs_config->max_buffer_length = max_buf_len; 10337 qdf_spinlock_release(&soc->sysfs_config->sysfs_write_user_buffer); 10338 } 10339 10340 static 10341 QDF_STATUS dp_sysfs_fill_stats(ol_txrx_soc_handle soc_hdl, 10342 char *buf, uint32_t buf_size) 10343 { 10344 uint32_t mac_id = 0; 10345 uint32_t stat_type = 0; 10346 uint32_t fw_stats = 0; 10347 uint32_t host_stats = 0; 10348 enum cdp_stats stats; 10349 struct cdp_txrx_stats_req req; 10350 uint32_t num_stats; 10351 struct dp_soc *soc = NULL; 10352 10353 if (!soc_hdl) { 10354 dp_cdp_err("%pK: soc_hdl is NULL", soc_hdl); 10355 return QDF_STATUS_E_INVAL; 10356 } 10357 10358 soc = cdp_soc_t_to_dp_soc(soc_hdl); 10359 10360 if (!soc) { 10361 dp_cdp_err("%pK: soc is NULL", soc); 10362 return QDF_STATUS_E_INVAL; 10363 } 10364 10365 dp_sysfs_get_stat_type(soc, &mac_id, &stat_type); 10366 10367 stats = stat_type; 10368 if (stats >= CDP_TXRX_MAX_STATS) { 10369 dp_cdp_info("sysfs stat type requested is invalid"); 10370 return QDF_STATUS_E_INVAL; 10371 } 10372 /* 10373 * DP_CURR_FW_STATS_AVAIL: no of FW stats currently available 10374 * has to be updated if new FW HTT stats added 10375 */ 10376 if (stats > CDP_TXRX_MAX_STATS) 10377 stats = stats + DP_CURR_FW_STATS_AVAIL - DP_HTT_DBG_EXT_STATS_MAX; 10378 10379 num_stats = QDF_ARRAY_SIZE(dp_stats_mapping_table); 10380 10381 if (stats >= num_stats) { 10382 dp_cdp_err("%pK : Invalid stats option: %d, max num stats: %d", 10383 soc, stats, num_stats); 10384 return QDF_STATUS_E_INVAL; 10385 } 10386 10387 /* build request */ 10388 fw_stats = dp_stats_mapping_table[stats][STATS_FW]; 10389 host_stats = dp_stats_mapping_table[stats][STATS_HOST]; 10390 10391 req.stats = stat_type; 10392 req.mac_id = mac_id; 10393 /* request stats to be printed */ 10394 qdf_mutex_acquire(&soc->sysfs_config->sysfs_read_lock); 10395 10396 if (fw_stats != TXRX_FW_STATS_INVALID) { 10397 /* update request with FW stats type */ 10398 req.cookie_val = DBG_SYSFS_STATS_COOKIE; 10399 } else if ((host_stats != TXRX_HOST_STATS_INVALID) && 10400 (host_stats <= TXRX_HOST_STATS_MAX)) { 10401 req.cookie_val = DBG_STATS_COOKIE_DEFAULT; 10402 soc->sysfs_config->process_id = qdf_get_current_pid(); 10403 soc->sysfs_config->printing_mode = PRINTING_MODE_ENABLED; 10404 } 10405 10406 dp_sysfs_update_config_buf_params(soc, 0, buf_size, buf); 10407 10408 dp_txrx_stats_request(soc_hdl, mac_id, &req); 10409 soc->sysfs_config->process_id = 0; 10410 soc->sysfs_config->printing_mode = PRINTING_MODE_DISABLED; 10411 10412 dp_sysfs_update_config_buf_params(soc, 0, 0, NULL); 10413 10414 qdf_mutex_release(&soc->sysfs_config->sysfs_read_lock); 10415 return QDF_STATUS_SUCCESS; 10416 } 10417 10418 static 10419 QDF_STATUS dp_sysfs_set_stat_type(ol_txrx_soc_handle soc_hdl, 10420 uint32_t stat_type, uint32_t mac_id) 10421 { 10422 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 10423 10424 if (!soc_hdl) { 10425 dp_cdp_err("%pK: soc is NULL", soc); 10426 return QDF_STATUS_E_INVAL; 10427 } 10428 10429 qdf_spinlock_acquire(&soc->sysfs_config->rw_stats_lock); 10430 10431 soc->sysfs_config->stat_type_requested = stat_type; 10432 soc->sysfs_config->mac_id = mac_id; 10433 10434 qdf_spinlock_release(&soc->sysfs_config->rw_stats_lock); 10435 10436 return QDF_STATUS_SUCCESS; 10437 } 10438 10439 static 10440 QDF_STATUS dp_sysfs_initialize_stats(struct dp_soc *soc_hdl) 10441 { 10442 struct dp_soc *soc; 10443 QDF_STATUS status; 10444 10445 if (!soc_hdl) { 10446 dp_cdp_err("%pK: soc_hdl is NULL", soc_hdl); 10447 return QDF_STATUS_E_INVAL; 10448 } 10449 10450 soc = soc_hdl; 10451 10452 soc->sysfs_config = qdf_mem_malloc(sizeof(struct sysfs_stats_config)); 10453 if (!soc->sysfs_config) { 10454 dp_cdp_err("failed to allocate memory for sysfs_config no memory"); 10455 return QDF_STATUS_E_NOMEM; 10456 } 10457 10458 status = qdf_event_create(&soc->sysfs_config->sysfs_txrx_fw_request_done); 10459 /* create event for fw stats request from sysfs */ 10460 if (status != QDF_STATUS_SUCCESS) { 10461 dp_cdp_err("failed to create event sysfs_txrx_fw_request_done"); 10462 qdf_mem_free(soc->sysfs_config); 10463 soc->sysfs_config = NULL; 10464 return QDF_STATUS_E_FAILURE; 10465 } 10466 10467 qdf_spinlock_create(&soc->sysfs_config->rw_stats_lock); 10468 qdf_mutex_create(&soc->sysfs_config->sysfs_read_lock); 10469 qdf_spinlock_create(&soc->sysfs_config->sysfs_write_user_buffer); 10470 10471 return QDF_STATUS_SUCCESS; 10472 } 10473 10474 static 10475 QDF_STATUS dp_sysfs_deinitialize_stats(struct dp_soc *soc_hdl) 10476 { 10477 struct dp_soc *soc; 10478 QDF_STATUS status; 10479 10480 if (!soc_hdl) { 10481 dp_cdp_err("%pK: soc_hdl is NULL", soc_hdl); 10482 return QDF_STATUS_E_INVAL; 10483 } 10484 10485 soc = soc_hdl; 10486 if (!soc->sysfs_config) { 10487 dp_cdp_err("soc->sysfs_config is NULL"); 10488 return QDF_STATUS_E_FAILURE; 10489 } 10490 10491 status = qdf_event_destroy(&soc->sysfs_config->sysfs_txrx_fw_request_done); 10492 if (status != QDF_STATUS_SUCCESS) 10493 dp_cdp_err("Failed to destroy event sysfs_txrx_fw_request_done"); 10494 10495 qdf_mutex_destroy(&soc->sysfs_config->sysfs_read_lock); 10496 qdf_spinlock_destroy(&soc->sysfs_config->rw_stats_lock); 10497 qdf_spinlock_destroy(&soc->sysfs_config->sysfs_write_user_buffer); 10498 10499 qdf_mem_free(soc->sysfs_config); 10500 10501 return QDF_STATUS_SUCCESS; 10502 } 10503 10504 #else /* WLAN_SYSFS_DP_STATS */ 10505 10506 static 10507 QDF_STATUS dp_sysfs_deinitialize_stats(struct dp_soc *soc_hdl) 10508 { 10509 return QDF_STATUS_SUCCESS; 10510 } 10511 10512 static 10513 QDF_STATUS dp_sysfs_initialize_stats(struct dp_soc *soc_hdl) 10514 { 10515 return QDF_STATUS_SUCCESS; 10516 } 10517 #endif /* WLAN_SYSFS_DP_STATS */ 10518 10519 /** 10520 * dp_txrx_clear_dump_stats() - clear dumpStats 10521 * @soc_hdl: soc handle 10522 * @pdev_id: pdev ID 10523 * @value: stats option 10524 * 10525 * Return: 0 - Success, non-zero - failure 10526 */ 10527 static 10528 QDF_STATUS dp_txrx_clear_dump_stats(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, 10529 uint8_t value) 10530 { 10531 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 10532 QDF_STATUS status = QDF_STATUS_SUCCESS; 10533 10534 if (!soc) { 10535 dp_err("soc is NULL"); 10536 return QDF_STATUS_E_INVAL; 10537 } 10538 10539 switch (value) { 10540 case CDP_TXRX_TSO_STATS: 10541 dp_txrx_clear_tso_stats(soc); 10542 break; 10543 10544 case CDP_DP_TX_HW_LATENCY_STATS: 10545 dp_pdev_clear_tx_delay_stats(soc); 10546 break; 10547 10548 default: 10549 status = QDF_STATUS_E_INVAL; 10550 break; 10551 } 10552 10553 return status; 10554 } 10555 10556 static QDF_STATUS 10557 dp_txrx_get_interface_stats(struct cdp_soc_t *soc_hdl, 10558 uint8_t vdev_id, 10559 void *buf, 10560 bool is_aggregate) 10561 { 10562 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 10563 10564 if (soc && soc->arch_ops.dp_get_interface_stats) 10565 return soc->arch_ops.dp_get_interface_stats(soc_hdl, 10566 vdev_id, 10567 buf, 10568 is_aggregate); 10569 return QDF_STATUS_E_FAILURE; 10570 } 10571 10572 #ifdef QCA_LL_TX_FLOW_CONTROL_V2 10573 /** 10574 * dp_update_flow_control_parameters() - API to store datapath 10575 * config parameters 10576 * @soc: soc handle 10577 * @params: ini parameter handle 10578 * 10579 * Return: void 10580 */ 10581 static inline 10582 void dp_update_flow_control_parameters(struct dp_soc *soc, 10583 struct cdp_config_params *params) 10584 { 10585 soc->wlan_cfg_ctx->tx_flow_stop_queue_threshold = 10586 params->tx_flow_stop_queue_threshold; 10587 soc->wlan_cfg_ctx->tx_flow_start_queue_offset = 10588 params->tx_flow_start_queue_offset; 10589 } 10590 #else 10591 static inline 10592 void dp_update_flow_control_parameters(struct dp_soc *soc, 10593 struct cdp_config_params *params) 10594 { 10595 } 10596 #endif 10597 10598 #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT 10599 /* Max packet limit for TX Comp packet loop (dp_tx_comp_handler) */ 10600 #define DP_TX_COMP_LOOP_PKT_LIMIT_MAX 1024 10601 10602 /* Max packet limit for RX REAP Loop (dp_rx_process) */ 10603 #define DP_RX_REAP_LOOP_PKT_LIMIT_MAX 1024 10604 10605 static 10606 void dp_update_rx_soft_irq_limit_params(struct dp_soc *soc, 10607 struct cdp_config_params *params) 10608 { 10609 soc->wlan_cfg_ctx->tx_comp_loop_pkt_limit = 10610 params->tx_comp_loop_pkt_limit; 10611 10612 if (params->tx_comp_loop_pkt_limit < DP_TX_COMP_LOOP_PKT_LIMIT_MAX) 10613 soc->wlan_cfg_ctx->tx_comp_enable_eol_data_check = true; 10614 else 10615 soc->wlan_cfg_ctx->tx_comp_enable_eol_data_check = false; 10616 10617 soc->wlan_cfg_ctx->rx_reap_loop_pkt_limit = 10618 params->rx_reap_loop_pkt_limit; 10619 10620 if (params->rx_reap_loop_pkt_limit < DP_RX_REAP_LOOP_PKT_LIMIT_MAX) 10621 soc->wlan_cfg_ctx->rx_enable_eol_data_check = true; 10622 else 10623 soc->wlan_cfg_ctx->rx_enable_eol_data_check = false; 10624 10625 soc->wlan_cfg_ctx->rx_hp_oos_update_limit = 10626 params->rx_hp_oos_update_limit; 10627 10628 dp_info("tx_comp_loop_pkt_limit %u tx_comp_enable_eol_data_check %u rx_reap_loop_pkt_limit %u rx_enable_eol_data_check %u rx_hp_oos_update_limit %u", 10629 soc->wlan_cfg_ctx->tx_comp_loop_pkt_limit, 10630 soc->wlan_cfg_ctx->tx_comp_enable_eol_data_check, 10631 soc->wlan_cfg_ctx->rx_reap_loop_pkt_limit, 10632 soc->wlan_cfg_ctx->rx_enable_eol_data_check, 10633 soc->wlan_cfg_ctx->rx_hp_oos_update_limit); 10634 } 10635 10636 #else 10637 static inline 10638 void dp_update_rx_soft_irq_limit_params(struct dp_soc *soc, 10639 struct cdp_config_params *params) 10640 { } 10641 10642 #endif /* WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT */ 10643 10644 /** 10645 * dp_update_config_parameters() - API to store datapath 10646 * config parameters 10647 * @psoc: soc handle 10648 * @params: ini parameter handle 10649 * 10650 * Return: status 10651 */ 10652 static 10653 QDF_STATUS dp_update_config_parameters(struct cdp_soc *psoc, 10654 struct cdp_config_params *params) 10655 { 10656 struct dp_soc *soc = (struct dp_soc *)psoc; 10657 10658 if (!(soc)) { 10659 dp_cdp_err("%pK: Invalid handle", soc); 10660 return QDF_STATUS_E_INVAL; 10661 } 10662 10663 soc->wlan_cfg_ctx->tso_enabled = params->tso_enable; 10664 soc->wlan_cfg_ctx->lro_enabled = params->lro_enable; 10665 soc->wlan_cfg_ctx->rx_hash = params->flow_steering_enable; 10666 soc->wlan_cfg_ctx->p2p_tcp_udp_checksumoffload = 10667 params->p2p_tcp_udp_checksumoffload; 10668 soc->wlan_cfg_ctx->nan_tcp_udp_checksumoffload = 10669 params->nan_tcp_udp_checksumoffload; 10670 soc->wlan_cfg_ctx->tcp_udp_checksumoffload = 10671 params->tcp_udp_checksumoffload; 10672 soc->wlan_cfg_ctx->napi_enabled = params->napi_enable; 10673 soc->wlan_cfg_ctx->ipa_enabled = params->ipa_enable; 10674 soc->wlan_cfg_ctx->gro_enabled = params->gro_enable; 10675 10676 dp_update_rx_soft_irq_limit_params(soc, params); 10677 dp_update_flow_control_parameters(soc, params); 10678 10679 return QDF_STATUS_SUCCESS; 10680 } 10681 10682 static struct cdp_wds_ops dp_ops_wds = { 10683 .vdev_set_wds = dp_vdev_set_wds, 10684 #ifdef WDS_VENDOR_EXTENSION 10685 .txrx_set_wds_rx_policy = dp_txrx_set_wds_rx_policy, 10686 .txrx_wds_peer_tx_policy_update = dp_txrx_peer_wds_tx_policy_update, 10687 #endif 10688 }; 10689 10690 /** 10691 * dp_txrx_data_tx_cb_set() - set the callback for non standard tx 10692 * @soc_hdl: datapath soc handle 10693 * @vdev_id: virtual interface id 10694 * @callback: callback function 10695 * @ctxt: callback context 10696 * 10697 */ 10698 static void 10699 dp_txrx_data_tx_cb_set(struct cdp_soc_t *soc_hdl, uint8_t vdev_id, 10700 ol_txrx_data_tx_cb callback, void *ctxt) 10701 { 10702 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 10703 struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id, 10704 DP_MOD_ID_CDP); 10705 10706 if (!vdev) 10707 return; 10708 10709 vdev->tx_non_std_data_callback.func = callback; 10710 vdev->tx_non_std_data_callback.ctxt = ctxt; 10711 10712 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP); 10713 } 10714 10715 /** 10716 * dp_pdev_get_dp_txrx_handle() - get dp handle from pdev 10717 * @soc: datapath soc handle 10718 * @pdev_id: id of datapath pdev handle 10719 * 10720 * Return: opaque pointer to dp txrx handle 10721 */ 10722 static void *dp_pdev_get_dp_txrx_handle(struct cdp_soc_t *soc, uint8_t pdev_id) 10723 { 10724 struct dp_pdev *pdev = 10725 dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc, 10726 pdev_id); 10727 if (qdf_unlikely(!pdev)) 10728 return NULL; 10729 10730 return pdev->dp_txrx_handle; 10731 } 10732 10733 /** 10734 * dp_pdev_set_dp_txrx_handle() - set dp handle in pdev 10735 * @soc: datapath soc handle 10736 * @pdev_id: id of datapath pdev handle 10737 * @dp_txrx_hdl: opaque pointer for dp_txrx_handle 10738 * 10739 * Return: void 10740 */ 10741 static void 10742 dp_pdev_set_dp_txrx_handle(struct cdp_soc_t *soc, uint8_t pdev_id, 10743 void *dp_txrx_hdl) 10744 { 10745 struct dp_pdev *pdev = 10746 dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc, 10747 pdev_id); 10748 10749 if (!pdev) 10750 return; 10751 10752 pdev->dp_txrx_handle = dp_txrx_hdl; 10753 } 10754 10755 /** 10756 * dp_vdev_get_dp_ext_handle() - get dp handle from vdev 10757 * @soc_hdl: datapath soc handle 10758 * @vdev_id: vdev id 10759 * 10760 * Return: opaque pointer to dp txrx handle 10761 */ 10762 static void *dp_vdev_get_dp_ext_handle(ol_txrx_soc_handle soc_hdl, 10763 uint8_t vdev_id) 10764 { 10765 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 10766 struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id, 10767 DP_MOD_ID_CDP); 10768 void *dp_ext_handle; 10769 10770 if (!vdev) 10771 return NULL; 10772 dp_ext_handle = vdev->vdev_dp_ext_handle; 10773 10774 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP); 10775 return dp_ext_handle; 10776 } 10777 10778 /** 10779 * dp_vdev_set_dp_ext_handle() - set dp handle in vdev 10780 * @soc_hdl: datapath soc handle 10781 * @vdev_id: vdev id 10782 * @size: size of advance dp handle 10783 * 10784 * Return: QDF_STATUS 10785 */ 10786 static QDF_STATUS 10787 dp_vdev_set_dp_ext_handle(ol_txrx_soc_handle soc_hdl, uint8_t vdev_id, 10788 uint16_t size) 10789 { 10790 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 10791 struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id, 10792 DP_MOD_ID_CDP); 10793 void *dp_ext_handle; 10794 10795 if (!vdev) 10796 return QDF_STATUS_E_FAILURE; 10797 10798 dp_ext_handle = qdf_mem_malloc(size); 10799 10800 if (!dp_ext_handle) { 10801 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP); 10802 return QDF_STATUS_E_FAILURE; 10803 } 10804 10805 vdev->vdev_dp_ext_handle = dp_ext_handle; 10806 10807 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP); 10808 return QDF_STATUS_SUCCESS; 10809 } 10810 10811 /** 10812 * dp_vdev_inform_ll_conn() - Inform vdev to add/delete a latency critical 10813 * connection for this vdev 10814 * @soc_hdl: CDP soc handle 10815 * @vdev_id: vdev ID 10816 * @action: Add/Delete action 10817 * 10818 * Return: QDF_STATUS. 10819 */ 10820 static QDF_STATUS 10821 dp_vdev_inform_ll_conn(struct cdp_soc_t *soc_hdl, uint8_t vdev_id, 10822 enum vdev_ll_conn_actions action) 10823 { 10824 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 10825 struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id, 10826 DP_MOD_ID_CDP); 10827 10828 if (!vdev) { 10829 dp_err("LL connection action for invalid vdev %d", vdev_id); 10830 return QDF_STATUS_E_FAILURE; 10831 } 10832 10833 switch (action) { 10834 case CDP_VDEV_LL_CONN_ADD: 10835 vdev->num_latency_critical_conn++; 10836 break; 10837 10838 case CDP_VDEV_LL_CONN_DEL: 10839 vdev->num_latency_critical_conn--; 10840 break; 10841 10842 default: 10843 dp_err("LL connection action invalid %d", action); 10844 break; 10845 } 10846 10847 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP); 10848 return QDF_STATUS_SUCCESS; 10849 } 10850 10851 #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR 10852 /** 10853 * dp_soc_set_swlm_enable() - Enable/Disable SWLM if initialized. 10854 * @soc_hdl: CDP Soc handle 10855 * @value: Enable/Disable value 10856 * 10857 * Return: QDF_STATUS 10858 */ 10859 static QDF_STATUS dp_soc_set_swlm_enable(struct cdp_soc_t *soc_hdl, 10860 uint8_t value) 10861 { 10862 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 10863 10864 if (!soc->swlm.is_init) { 10865 dp_err("SWLM is not initialized"); 10866 return QDF_STATUS_E_FAILURE; 10867 } 10868 10869 soc->swlm.is_enabled = !!value; 10870 10871 return QDF_STATUS_SUCCESS; 10872 } 10873 10874 /** 10875 * dp_soc_is_swlm_enabled() - Check if SWLM is enabled. 10876 * @soc_hdl: CDP Soc handle 10877 * 10878 * Return: QDF_STATUS 10879 */ 10880 static uint8_t dp_soc_is_swlm_enabled(struct cdp_soc_t *soc_hdl) 10881 { 10882 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 10883 10884 return soc->swlm.is_enabled; 10885 } 10886 #endif 10887 10888 /** 10889 * dp_soc_get_dp_txrx_handle() - get context for external-dp from dp soc 10890 * @soc_handle: datapath soc handle 10891 * 10892 * Return: opaque pointer to external dp (non-core DP) 10893 */ 10894 static void *dp_soc_get_dp_txrx_handle(struct cdp_soc *soc_handle) 10895 { 10896 struct dp_soc *soc = (struct dp_soc *)soc_handle; 10897 10898 return soc->external_txrx_handle; 10899 } 10900 10901 /** 10902 * dp_soc_set_dp_txrx_handle() - set external dp handle in soc 10903 * @soc_handle: datapath soc handle 10904 * @txrx_handle: opaque pointer to external dp (non-core DP) 10905 * 10906 * Return: void 10907 */ 10908 static void 10909 dp_soc_set_dp_txrx_handle(struct cdp_soc *soc_handle, void *txrx_handle) 10910 { 10911 struct dp_soc *soc = (struct dp_soc *)soc_handle; 10912 10913 soc->external_txrx_handle = txrx_handle; 10914 } 10915 10916 /** 10917 * dp_soc_map_pdev_to_lmac() - Save pdev_id to lmac_id mapping 10918 * @soc_hdl: datapath soc handle 10919 * @pdev_id: id of the datapath pdev handle 10920 * @lmac_id: lmac id 10921 * 10922 * Return: QDF_STATUS 10923 */ 10924 static QDF_STATUS 10925 dp_soc_map_pdev_to_lmac 10926 (struct cdp_soc_t *soc_hdl, uint8_t pdev_id, 10927 uint32_t lmac_id) 10928 { 10929 struct dp_soc *soc = (struct dp_soc *)soc_hdl; 10930 10931 wlan_cfg_set_hw_mac_idx(soc->wlan_cfg_ctx, 10932 pdev_id, 10933 lmac_id); 10934 10935 /*Set host PDEV ID for lmac_id*/ 10936 wlan_cfg_set_pdev_idx(soc->wlan_cfg_ctx, 10937 pdev_id, 10938 lmac_id); 10939 10940 return QDF_STATUS_SUCCESS; 10941 } 10942 10943 /** 10944 * dp_soc_handle_pdev_mode_change() - Update pdev to lmac mapping 10945 * @soc_hdl: datapath soc handle 10946 * @pdev_id: id of the datapath pdev handle 10947 * @lmac_id: lmac id 10948 * 10949 * In the event of a dynamic mode change, update the pdev to lmac mapping 10950 * 10951 * Return: QDF_STATUS 10952 */ 10953 static QDF_STATUS 10954 dp_soc_handle_pdev_mode_change 10955 (struct cdp_soc_t *soc_hdl, uint8_t pdev_id, 10956 uint32_t lmac_id) 10957 { 10958 struct dp_soc *soc = (struct dp_soc *)soc_hdl; 10959 struct dp_vdev *vdev = NULL; 10960 uint8_t hw_pdev_id, mac_id; 10961 struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, 10962 pdev_id); 10963 int nss_config = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx); 10964 10965 if (qdf_unlikely(!pdev)) 10966 return QDF_STATUS_E_FAILURE; 10967 10968 pdev->lmac_id = lmac_id; 10969 pdev->target_pdev_id = 10970 dp_calculate_target_pdev_id_from_host_pdev_id(soc, pdev_id); 10971 dp_info("mode change %d %d", pdev->pdev_id, pdev->lmac_id); 10972 10973 /*Set host PDEV ID for lmac_id*/ 10974 wlan_cfg_set_pdev_idx(soc->wlan_cfg_ctx, 10975 pdev->pdev_id, 10976 lmac_id); 10977 10978 hw_pdev_id = 10979 dp_get_target_pdev_id_for_host_pdev_id(soc, 10980 pdev->pdev_id); 10981 10982 /* 10983 * When NSS offload is enabled, send pdev_id->lmac_id 10984 * and pdev_id to hw_pdev_id to NSS FW 10985 */ 10986 if (nss_config) { 10987 mac_id = pdev->lmac_id; 10988 if (soc->cdp_soc.ol_ops->pdev_update_lmac_n_target_pdev_id) 10989 soc->cdp_soc.ol_ops-> 10990 pdev_update_lmac_n_target_pdev_id( 10991 soc->ctrl_psoc, 10992 &pdev_id, &mac_id, &hw_pdev_id); 10993 } 10994 10995 qdf_spin_lock_bh(&pdev->vdev_list_lock); 10996 TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) { 10997 DP_TX_TCL_METADATA_PDEV_ID_SET(vdev->htt_tcl_metadata, 10998 hw_pdev_id); 10999 vdev->lmac_id = pdev->lmac_id; 11000 } 11001 qdf_spin_unlock_bh(&pdev->vdev_list_lock); 11002 11003 return QDF_STATUS_SUCCESS; 11004 } 11005 11006 /** 11007 * dp_soc_set_pdev_status_down() - set pdev down/up status 11008 * @soc: datapath soc handle 11009 * @pdev_id: id of datapath pdev handle 11010 * @is_pdev_down: pdev down/up status 11011 * 11012 * Return: QDF_STATUS 11013 */ 11014 static QDF_STATUS 11015 dp_soc_set_pdev_status_down(struct cdp_soc_t *soc, uint8_t pdev_id, 11016 bool is_pdev_down) 11017 { 11018 struct dp_pdev *pdev = 11019 dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc, 11020 pdev_id); 11021 if (!pdev) 11022 return QDF_STATUS_E_FAILURE; 11023 11024 pdev->is_pdev_down = is_pdev_down; 11025 return QDF_STATUS_SUCCESS; 11026 } 11027 11028 /** 11029 * dp_get_cfg_capabilities() - get dp capabilities 11030 * @soc_handle: datapath soc handle 11031 * @dp_caps: enum for dp capabilities 11032 * 11033 * Return: bool to determine if dp caps is enabled 11034 */ 11035 static bool 11036 dp_get_cfg_capabilities(struct cdp_soc_t *soc_handle, 11037 enum cdp_capabilities dp_caps) 11038 { 11039 struct dp_soc *soc = (struct dp_soc *)soc_handle; 11040 11041 return wlan_cfg_get_dp_caps(soc->wlan_cfg_ctx, dp_caps); 11042 } 11043 11044 #ifdef FEATURE_AST 11045 static QDF_STATUS 11046 dp_peer_teardown_wifi3(struct cdp_soc_t *soc_hdl, uint8_t vdev_id, 11047 uint8_t *peer_mac) 11048 { 11049 struct dp_soc *soc = (struct dp_soc *)soc_hdl; 11050 QDF_STATUS status = QDF_STATUS_SUCCESS; 11051 struct dp_peer *peer = 11052 dp_peer_find_hash_find(soc, peer_mac, 0, vdev_id, 11053 DP_MOD_ID_CDP); 11054 11055 /* Peer can be null for monitor vap mac address */ 11056 if (!peer) { 11057 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG, 11058 "%s: Invalid peer\n", __func__); 11059 return QDF_STATUS_E_FAILURE; 11060 } 11061 11062 dp_peer_update_state(soc, peer, DP_PEER_STATE_LOGICAL_DELETE); 11063 11064 qdf_spin_lock_bh(&soc->ast_lock); 11065 dp_peer_send_wds_disconnect(soc, peer); 11066 dp_peer_delete_ast_entries(soc, peer); 11067 qdf_spin_unlock_bh(&soc->ast_lock); 11068 11069 dp_peer_unref_delete(peer, DP_MOD_ID_CDP); 11070 return status; 11071 } 11072 #endif 11073 11074 #ifndef WLAN_SUPPORT_RX_TAG_STATISTICS 11075 /** 11076 * dp_dump_pdev_rx_protocol_tag_stats - dump the number of packets tagged for 11077 * given protocol type (RX_PROTOCOL_TAG_ALL indicates for all protocol) 11078 * @soc: cdp_soc handle 11079 * @pdev_id: id of cdp_pdev handle 11080 * @protocol_type: protocol type for which stats should be displayed 11081 * 11082 * Return: none 11083 */ 11084 static inline void 11085 dp_dump_pdev_rx_protocol_tag_stats(struct cdp_soc_t *soc, uint8_t pdev_id, 11086 uint16_t protocol_type) 11087 { 11088 } 11089 #endif /* WLAN_SUPPORT_RX_TAG_STATISTICS */ 11090 11091 #ifndef WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG 11092 /** 11093 * dp_update_pdev_rx_protocol_tag() - Add/remove a protocol tag that should be 11094 * applied to the desired protocol type packets 11095 * @soc: soc handle 11096 * @pdev_id: id of cdp_pdev handle 11097 * @enable_rx_protocol_tag: bitmask that indicates what protocol types 11098 * are enabled for tagging. zero indicates disable feature, non-zero indicates 11099 * enable feature 11100 * @protocol_type: new protocol type for which the tag is being added 11101 * @tag: user configured tag for the new protocol 11102 * 11103 * Return: Success 11104 */ 11105 static inline QDF_STATUS 11106 dp_update_pdev_rx_protocol_tag(struct cdp_soc_t *soc, uint8_t pdev_id, 11107 uint32_t enable_rx_protocol_tag, 11108 uint16_t protocol_type, 11109 uint16_t tag) 11110 { 11111 return QDF_STATUS_SUCCESS; 11112 } 11113 #endif /* WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG */ 11114 11115 #ifndef WLAN_SUPPORT_RX_FLOW_TAG 11116 /** 11117 * dp_set_rx_flow_tag() - add/delete a flow 11118 * @cdp_soc: CDP soc handle 11119 * @pdev_id: id of cdp_pdev handle 11120 * @flow_info: flow tuple that is to be added to/deleted from flow search table 11121 * 11122 * Return: Success 11123 */ 11124 static inline QDF_STATUS 11125 dp_set_rx_flow_tag(struct cdp_soc_t *cdp_soc, uint8_t pdev_id, 11126 struct cdp_rx_flow_info *flow_info) 11127 { 11128 return QDF_STATUS_SUCCESS; 11129 } 11130 /** 11131 * dp_dump_rx_flow_tag_stats() - dump the number of packets tagged for 11132 * given flow 5-tuple 11133 * @cdp_soc: soc handle 11134 * @pdev_id: id of cdp_pdev handle 11135 * @flow_info: flow 5-tuple for which stats should be displayed 11136 * 11137 * Return: Success 11138 */ 11139 static inline QDF_STATUS 11140 dp_dump_rx_flow_tag_stats(struct cdp_soc_t *cdp_soc, uint8_t pdev_id, 11141 struct cdp_rx_flow_info *flow_info) 11142 { 11143 return QDF_STATUS_SUCCESS; 11144 } 11145 #endif /* WLAN_SUPPORT_RX_FLOW_TAG */ 11146 11147 static QDF_STATUS dp_peer_map_attach_wifi3(struct cdp_soc_t *soc_hdl, 11148 uint32_t max_peers, 11149 uint32_t max_ast_index, 11150 uint8_t peer_map_unmap_versions) 11151 { 11152 struct dp_soc *soc = (struct dp_soc *)soc_hdl; 11153 QDF_STATUS status; 11154 11155 soc->max_peers = max_peers; 11156 11157 wlan_cfg_set_max_ast_idx(soc->wlan_cfg_ctx, max_ast_index); 11158 11159 status = soc->arch_ops.txrx_peer_map_attach(soc); 11160 if (!QDF_IS_STATUS_SUCCESS(status)) { 11161 dp_err("failure in allocating peer tables"); 11162 return QDF_STATUS_E_FAILURE; 11163 } 11164 11165 dp_info("max_peers %u, calculated max_peers %u max_ast_index: %u", 11166 max_peers, soc->max_peer_id, max_ast_index); 11167 11168 status = dp_peer_find_attach(soc); 11169 if (!QDF_IS_STATUS_SUCCESS(status)) { 11170 dp_err("Peer find attach failure"); 11171 goto fail; 11172 } 11173 11174 soc->peer_map_unmap_versions = peer_map_unmap_versions; 11175 soc->peer_map_attach_success = TRUE; 11176 11177 return QDF_STATUS_SUCCESS; 11178 fail: 11179 soc->arch_ops.txrx_peer_map_detach(soc); 11180 11181 return status; 11182 } 11183 11184 static QDF_STATUS dp_soc_set_param(struct cdp_soc_t *soc_hdl, 11185 enum cdp_soc_param_t param, 11186 uint32_t value) 11187 { 11188 struct dp_soc *soc = (struct dp_soc *)soc_hdl; 11189 11190 switch (param) { 11191 case DP_SOC_PARAM_MSDU_EXCEPTION_DESC: 11192 soc->num_msdu_exception_desc = value; 11193 dp_info("num_msdu exception_desc %u", 11194 value); 11195 break; 11196 case DP_SOC_PARAM_CMEM_FSE_SUPPORT: 11197 if (wlan_cfg_is_fst_in_cmem_enabled(soc->wlan_cfg_ctx)) 11198 soc->fst_in_cmem = !!value; 11199 dp_info("FW supports CMEM FSE %u", value); 11200 break; 11201 case DP_SOC_PARAM_MAX_AST_AGEOUT: 11202 soc->max_ast_ageout_count = value; 11203 dp_info("Max ast ageout count %u", soc->max_ast_ageout_count); 11204 break; 11205 case DP_SOC_PARAM_EAPOL_OVER_CONTROL_PORT: 11206 soc->eapol_over_control_port = value; 11207 dp_info("Eapol over control_port:%d", 11208 soc->eapol_over_control_port); 11209 break; 11210 case DP_SOC_PARAM_MULTI_PEER_GRP_CMD_SUPPORT: 11211 soc->multi_peer_grp_cmd_supported = value; 11212 dp_info("Multi Peer group command support:%d", 11213 soc->multi_peer_grp_cmd_supported); 11214 break; 11215 case DP_SOC_PARAM_RSSI_DBM_CONV_SUPPORT: 11216 soc->features.rssi_dbm_conv_support = value; 11217 dp_info("Rssi dbm conversion support:%u", 11218 soc->features.rssi_dbm_conv_support); 11219 break; 11220 case DP_SOC_PARAM_UMAC_HW_RESET_SUPPORT: 11221 soc->features.umac_hw_reset_support = value; 11222 dp_info("UMAC HW reset support :%u", 11223 soc->features.umac_hw_reset_support); 11224 break; 11225 case DP_SOC_PARAM_MULTI_RX_REORDER_SETUP_SUPPORT: 11226 soc->features.multi_rx_reorder_q_setup_support = value; 11227 dp_info("Multi rx reorder queue setup support: %u", 11228 soc->features.multi_rx_reorder_q_setup_support); 11229 break; 11230 default: 11231 dp_info("not handled param %d ", param); 11232 break; 11233 } 11234 11235 return QDF_STATUS_SUCCESS; 11236 } 11237 11238 static void dp_soc_set_rate_stats_ctx(struct cdp_soc_t *soc_handle, 11239 void *stats_ctx) 11240 { 11241 struct dp_soc *soc = (struct dp_soc *)soc_handle; 11242 11243 soc->rate_stats_ctx = (struct cdp_soc_rate_stats_ctx *)stats_ctx; 11244 } 11245 11246 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE 11247 /** 11248 * dp_peer_flush_rate_stats_req() - Flush peer rate stats 11249 * @soc: Datapath SOC handle 11250 * @peer: Datapath peer 11251 * @arg: argument to iter function 11252 * 11253 * Return: QDF_STATUS 11254 */ 11255 static void 11256 dp_peer_flush_rate_stats_req(struct dp_soc *soc, struct dp_peer *peer, 11257 void *arg) 11258 { 11259 /* Skip self peer */ 11260 if (!qdf_mem_cmp(peer->mac_addr.raw, peer->vdev->mac_addr.raw, 11261 QDF_MAC_ADDR_SIZE)) 11262 return; 11263 11264 dp_wdi_event_handler( 11265 WDI_EVENT_FLUSH_RATE_STATS_REQ, 11266 soc, dp_monitor_peer_get_peerstats_ctx(soc, peer), 11267 peer->peer_id, 11268 WDI_NO_VAL, peer->vdev->pdev->pdev_id); 11269 } 11270 11271 /** 11272 * dp_flush_rate_stats_req() - Flush peer rate stats in pdev 11273 * @soc_hdl: Datapath SOC handle 11274 * @pdev_id: pdev_id 11275 * 11276 * Return: QDF_STATUS 11277 */ 11278 static QDF_STATUS dp_flush_rate_stats_req(struct cdp_soc_t *soc_hdl, 11279 uint8_t pdev_id) 11280 { 11281 struct dp_soc *soc = (struct dp_soc *)soc_hdl; 11282 struct dp_pdev *pdev = 11283 dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc, 11284 pdev_id); 11285 if (!pdev) 11286 return QDF_STATUS_E_FAILURE; 11287 11288 dp_pdev_iterate_peer(pdev, dp_peer_flush_rate_stats_req, NULL, 11289 DP_MOD_ID_CDP); 11290 11291 return QDF_STATUS_SUCCESS; 11292 } 11293 #else 11294 static inline QDF_STATUS 11295 dp_flush_rate_stats_req(struct cdp_soc_t *soc_hdl, 11296 uint8_t pdev_id) 11297 { 11298 return QDF_STATUS_SUCCESS; 11299 } 11300 #endif 11301 11302 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE 11303 #ifdef WLAN_FEATURE_11BE_MLO 11304 /** 11305 * dp_get_peer_extd_rate_link_stats() - function to get peer 11306 * extended rate and link stats 11307 * @soc_hdl: dp soc handler 11308 * @mac_addr: mac address of peer 11309 * 11310 * Return: QDF_STATUS 11311 */ 11312 static QDF_STATUS 11313 dp_get_peer_extd_rate_link_stats(struct cdp_soc_t *soc_hdl, uint8_t *mac_addr) 11314 { 11315 uint8_t i; 11316 struct dp_peer *link_peer; 11317 struct dp_soc *link_peer_soc; 11318 struct dp_mld_link_peers link_peers_info; 11319 struct dp_peer *peer = NULL; 11320 struct dp_soc *soc = (struct dp_soc *)soc_hdl; 11321 struct cdp_peer_info peer_info = { 0 }; 11322 11323 if (!mac_addr) { 11324 dp_err("NULL peer mac addr"); 11325 return QDF_STATUS_E_FAILURE; 11326 } 11327 11328 DP_PEER_INFO_PARAMS_INIT(&peer_info, DP_VDEV_ALL, mac_addr, false, 11329 CDP_WILD_PEER_TYPE); 11330 11331 peer = dp_peer_hash_find_wrapper(soc, &peer_info, DP_MOD_ID_CDP); 11332 if (!peer) { 11333 dp_err("Peer is NULL"); 11334 return QDF_STATUS_E_FAILURE; 11335 } 11336 11337 if (IS_MLO_DP_MLD_PEER(peer)) { 11338 dp_get_link_peers_ref_from_mld_peer(soc, peer, 11339 &link_peers_info, 11340 DP_MOD_ID_CDP); 11341 for (i = 0; i < link_peers_info.num_links; i++) { 11342 link_peer = link_peers_info.link_peers[i]; 11343 link_peer_soc = link_peer->vdev->pdev->soc; 11344 dp_wdi_event_handler(WDI_EVENT_FLUSH_RATE_STATS_REQ, 11345 link_peer_soc, 11346 dp_monitor_peer_get_peerstats_ctx 11347 (link_peer_soc, link_peer), 11348 link_peer->peer_id, 11349 WDI_NO_VAL, 11350 link_peer->vdev->pdev->pdev_id); 11351 } 11352 dp_release_link_peers_ref(&link_peers_info, DP_MOD_ID_CDP); 11353 } else { 11354 dp_wdi_event_handler( 11355 WDI_EVENT_FLUSH_RATE_STATS_REQ, soc, 11356 dp_monitor_peer_get_peerstats_ctx(soc, peer), 11357 peer->peer_id, 11358 WDI_NO_VAL, peer->vdev->pdev->pdev_id); 11359 } 11360 11361 dp_peer_unref_delete(peer, DP_MOD_ID_CDP); 11362 return QDF_STATUS_SUCCESS; 11363 } 11364 #else 11365 static QDF_STATUS 11366 dp_get_peer_extd_rate_link_stats(struct cdp_soc_t *soc_hdl, uint8_t *mac_addr) 11367 { 11368 struct dp_peer *peer = NULL; 11369 struct dp_soc *soc = (struct dp_soc *)soc_hdl; 11370 11371 if (!mac_addr) { 11372 dp_err("NULL peer mac addr"); 11373 return QDF_STATUS_E_FAILURE; 11374 } 11375 11376 peer = dp_peer_find_hash_find(soc, mac_addr, 0, 11377 DP_VDEV_ALL, DP_MOD_ID_CDP); 11378 if (!peer) { 11379 dp_err("Peer is NULL"); 11380 return QDF_STATUS_E_FAILURE; 11381 } 11382 11383 dp_wdi_event_handler( 11384 WDI_EVENT_FLUSH_RATE_STATS_REQ, soc, 11385 dp_monitor_peer_get_peerstats_ctx(soc, peer), 11386 peer->peer_id, 11387 WDI_NO_VAL, peer->vdev->pdev->pdev_id); 11388 11389 dp_peer_unref_delete(peer, DP_MOD_ID_CDP); 11390 return QDF_STATUS_SUCCESS; 11391 } 11392 #endif 11393 #else 11394 static inline QDF_STATUS 11395 dp_get_peer_extd_rate_link_stats(struct cdp_soc_t *soc_hdl, uint8_t *mac_addr) 11396 { 11397 return QDF_STATUS_SUCCESS; 11398 } 11399 #endif 11400 11401 static void *dp_peer_get_peerstats_ctx(struct cdp_soc_t *soc_hdl, 11402 uint8_t vdev_id, 11403 uint8_t *mac_addr) 11404 { 11405 struct dp_soc *soc = (struct dp_soc *)soc_hdl; 11406 struct dp_peer *peer; 11407 void *peerstats_ctx = NULL; 11408 11409 if (mac_addr) { 11410 peer = dp_peer_find_hash_find(soc, mac_addr, 11411 0, vdev_id, 11412 DP_MOD_ID_CDP); 11413 if (!peer) 11414 return NULL; 11415 11416 if (!IS_MLO_DP_MLD_PEER(peer)) 11417 peerstats_ctx = dp_monitor_peer_get_peerstats_ctx(soc, 11418 peer); 11419 11420 dp_peer_unref_delete(peer, DP_MOD_ID_CDP); 11421 } 11422 11423 return peerstats_ctx; 11424 } 11425 11426 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE 11427 static QDF_STATUS dp_peer_flush_rate_stats(struct cdp_soc_t *soc, 11428 uint8_t pdev_id, 11429 void *buf) 11430 { 11431 dp_wdi_event_handler(WDI_EVENT_PEER_FLUSH_RATE_STATS, 11432 (struct dp_soc *)soc, buf, HTT_INVALID_PEER, 11433 WDI_NO_VAL, pdev_id); 11434 return QDF_STATUS_SUCCESS; 11435 } 11436 #else 11437 static inline QDF_STATUS 11438 dp_peer_flush_rate_stats(struct cdp_soc_t *soc, 11439 uint8_t pdev_id, 11440 void *buf) 11441 { 11442 return QDF_STATUS_SUCCESS; 11443 } 11444 #endif 11445 11446 static void *dp_soc_get_rate_stats_ctx(struct cdp_soc_t *soc_handle) 11447 { 11448 struct dp_soc *soc = (struct dp_soc *)soc_handle; 11449 11450 return soc->rate_stats_ctx; 11451 } 11452 11453 /** 11454 * dp_get_cfg() - get dp cfg 11455 * @soc: cdp soc handle 11456 * @cfg: cfg enum 11457 * 11458 * Return: cfg value 11459 */ 11460 static uint32_t dp_get_cfg(struct cdp_soc_t *soc, enum cdp_dp_cfg cfg) 11461 { 11462 struct dp_soc *dpsoc = (struct dp_soc *)soc; 11463 uint32_t value = 0; 11464 11465 switch (cfg) { 11466 case cfg_dp_enable_data_stall: 11467 value = dpsoc->wlan_cfg_ctx->enable_data_stall_detection; 11468 break; 11469 case cfg_dp_enable_p2p_ip_tcp_udp_checksum_offload: 11470 value = dpsoc->wlan_cfg_ctx->p2p_tcp_udp_checksumoffload; 11471 break; 11472 case cfg_dp_enable_nan_ip_tcp_udp_checksum_offload: 11473 value = dpsoc->wlan_cfg_ctx->nan_tcp_udp_checksumoffload; 11474 break; 11475 case cfg_dp_enable_ip_tcp_udp_checksum_offload: 11476 value = dpsoc->wlan_cfg_ctx->tcp_udp_checksumoffload; 11477 break; 11478 case cfg_dp_disable_legacy_mode_csum_offload: 11479 value = dpsoc->wlan_cfg_ctx-> 11480 legacy_mode_checksumoffload_disable; 11481 break; 11482 case cfg_dp_tso_enable: 11483 value = dpsoc->wlan_cfg_ctx->tso_enabled; 11484 break; 11485 case cfg_dp_lro_enable: 11486 value = dpsoc->wlan_cfg_ctx->lro_enabled; 11487 break; 11488 case cfg_dp_gro_enable: 11489 value = dpsoc->wlan_cfg_ctx->gro_enabled; 11490 break; 11491 case cfg_dp_tc_based_dyn_gro_enable: 11492 value = dpsoc->wlan_cfg_ctx->tc_based_dynamic_gro; 11493 break; 11494 case cfg_dp_tc_ingress_prio: 11495 value = dpsoc->wlan_cfg_ctx->tc_ingress_prio; 11496 break; 11497 case cfg_dp_sg_enable: 11498 value = dpsoc->wlan_cfg_ctx->sg_enabled; 11499 break; 11500 case cfg_dp_tx_flow_start_queue_offset: 11501 value = dpsoc->wlan_cfg_ctx->tx_flow_start_queue_offset; 11502 break; 11503 case cfg_dp_tx_flow_stop_queue_threshold: 11504 value = dpsoc->wlan_cfg_ctx->tx_flow_stop_queue_threshold; 11505 break; 11506 case cfg_dp_disable_intra_bss_fwd: 11507 value = dpsoc->wlan_cfg_ctx->disable_intra_bss_fwd; 11508 break; 11509 case cfg_dp_pktlog_buffer_size: 11510 value = dpsoc->wlan_cfg_ctx->pktlog_buffer_size; 11511 break; 11512 case cfg_dp_wow_check_rx_pending: 11513 value = dpsoc->wlan_cfg_ctx->wow_check_rx_pending_enable; 11514 break; 11515 case cfg_dp_local_pkt_capture: 11516 value = wlan_cfg_get_local_pkt_capture(dpsoc->wlan_cfg_ctx); 11517 break; 11518 default: 11519 value = 0; 11520 } 11521 11522 return value; 11523 } 11524 11525 #ifdef PEER_FLOW_CONTROL 11526 /** 11527 * dp_tx_flow_ctrl_configure_pdev() - Configure flow control params 11528 * @soc_handle: datapath soc handle 11529 * @pdev_id: id of datapath pdev handle 11530 * @param: ol ath params 11531 * @value: value of the flag 11532 * @buff: Buffer to be passed 11533 * 11534 * Implemented this function same as legacy function. In legacy code, single 11535 * function is used to display stats and update pdev params. 11536 * 11537 * Return: 0 for success. nonzero for failure. 11538 */ 11539 static uint32_t dp_tx_flow_ctrl_configure_pdev(struct cdp_soc_t *soc_handle, 11540 uint8_t pdev_id, 11541 enum _dp_param_t param, 11542 uint32_t value, void *buff) 11543 { 11544 struct dp_soc *soc = (struct dp_soc *)soc_handle; 11545 struct dp_pdev *pdev = 11546 dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc, 11547 pdev_id); 11548 11549 if (qdf_unlikely(!pdev)) 11550 return 1; 11551 11552 soc = pdev->soc; 11553 if (!soc) 11554 return 1; 11555 11556 switch (param) { 11557 #ifdef QCA_ENH_V3_STATS_SUPPORT 11558 case DP_PARAM_VIDEO_DELAY_STATS_FC: 11559 if (value) 11560 pdev->delay_stats_flag = true; 11561 else 11562 pdev->delay_stats_flag = false; 11563 break; 11564 case DP_PARAM_VIDEO_STATS_FC: 11565 qdf_print("------- TID Stats ------\n"); 11566 dp_pdev_print_tid_stats(pdev); 11567 qdf_print("------ Delay Stats ------\n"); 11568 dp_pdev_print_delay_stats(pdev); 11569 qdf_print("------ Rx Error Stats ------\n"); 11570 dp_pdev_print_rx_error_stats(pdev); 11571 break; 11572 #endif 11573 case DP_PARAM_TOTAL_Q_SIZE: 11574 { 11575 uint32_t tx_min, tx_max; 11576 11577 tx_min = wlan_cfg_get_min_tx_desc(soc->wlan_cfg_ctx); 11578 tx_max = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx); 11579 11580 if (!buff) { 11581 if ((value >= tx_min) && (value <= tx_max)) { 11582 pdev->num_tx_allowed = value; 11583 } else { 11584 dp_tx_info("%pK: Failed to update num_tx_allowed, Q_min = %d Q_max = %d", 11585 soc, tx_min, tx_max); 11586 break; 11587 } 11588 } else { 11589 *(int *)buff = pdev->num_tx_allowed; 11590 } 11591 } 11592 break; 11593 default: 11594 dp_tx_info("%pK: not handled param %d ", soc, param); 11595 break; 11596 } 11597 11598 return 0; 11599 } 11600 #endif 11601 11602 #ifdef DP_UMAC_HW_RESET_SUPPORT 11603 /** 11604 * dp_reset_interrupt_ring_masks() - Reset rx interrupt masks 11605 * @soc: dp soc handle 11606 * 11607 * Return: void 11608 */ 11609 static void dp_reset_interrupt_ring_masks(struct dp_soc *soc) 11610 { 11611 struct dp_intr_bkp *intr_bkp; 11612 struct dp_intr *intr_ctx; 11613 int num_ctxt = wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); 11614 int i; 11615 11616 intr_bkp = 11617 (struct dp_intr_bkp *)qdf_mem_malloc_atomic(sizeof(struct dp_intr_bkp) * 11618 num_ctxt); 11619 11620 qdf_assert_always(intr_bkp); 11621 11622 soc->umac_reset_ctx.intr_ctx_bkp = intr_bkp; 11623 for (i = 0; i < num_ctxt; i++) { 11624 intr_ctx = &soc->intr_ctx[i]; 11625 11626 intr_bkp->tx_ring_mask = intr_ctx->tx_ring_mask; 11627 intr_bkp->rx_ring_mask = intr_ctx->rx_ring_mask; 11628 intr_bkp->rx_mon_ring_mask = intr_ctx->rx_mon_ring_mask; 11629 intr_bkp->rx_err_ring_mask = intr_ctx->rx_err_ring_mask; 11630 intr_bkp->rx_wbm_rel_ring_mask = intr_ctx->rx_wbm_rel_ring_mask; 11631 intr_bkp->reo_status_ring_mask = intr_ctx->reo_status_ring_mask; 11632 intr_bkp->rxdma2host_ring_mask = intr_ctx->rxdma2host_ring_mask; 11633 intr_bkp->host2rxdma_ring_mask = intr_ctx->host2rxdma_ring_mask; 11634 intr_bkp->host2rxdma_mon_ring_mask = 11635 intr_ctx->host2rxdma_mon_ring_mask; 11636 intr_bkp->tx_mon_ring_mask = intr_ctx->tx_mon_ring_mask; 11637 11638 intr_ctx->tx_ring_mask = 0; 11639 intr_ctx->rx_ring_mask = 0; 11640 intr_ctx->rx_mon_ring_mask = 0; 11641 intr_ctx->rx_err_ring_mask = 0; 11642 intr_ctx->rx_wbm_rel_ring_mask = 0; 11643 intr_ctx->reo_status_ring_mask = 0; 11644 intr_ctx->rxdma2host_ring_mask = 0; 11645 intr_ctx->host2rxdma_ring_mask = 0; 11646 intr_ctx->host2rxdma_mon_ring_mask = 0; 11647 intr_ctx->tx_mon_ring_mask = 0; 11648 11649 intr_bkp++; 11650 } 11651 } 11652 11653 /** 11654 * dp_restore_interrupt_ring_masks() - Restore rx interrupt masks 11655 * @soc: dp soc handle 11656 * 11657 * Return: void 11658 */ 11659 static void dp_restore_interrupt_ring_masks(struct dp_soc *soc) 11660 { 11661 struct dp_intr_bkp *intr_bkp = soc->umac_reset_ctx.intr_ctx_bkp; 11662 struct dp_intr_bkp *intr_bkp_base = intr_bkp; 11663 struct dp_intr *intr_ctx; 11664 int num_ctxt = wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); 11665 int i; 11666 11667 if (!intr_bkp) 11668 return; 11669 11670 for (i = 0; i < num_ctxt; i++) { 11671 intr_ctx = &soc->intr_ctx[i]; 11672 11673 intr_ctx->tx_ring_mask = intr_bkp->tx_ring_mask; 11674 intr_ctx->rx_ring_mask = intr_bkp->rx_ring_mask; 11675 intr_ctx->rx_mon_ring_mask = intr_bkp->rx_mon_ring_mask; 11676 intr_ctx->rx_err_ring_mask = intr_bkp->rx_err_ring_mask; 11677 intr_ctx->rx_wbm_rel_ring_mask = intr_bkp->rx_wbm_rel_ring_mask; 11678 intr_ctx->reo_status_ring_mask = intr_bkp->reo_status_ring_mask; 11679 intr_ctx->rxdma2host_ring_mask = intr_bkp->rxdma2host_ring_mask; 11680 intr_ctx->host2rxdma_ring_mask = intr_bkp->host2rxdma_ring_mask; 11681 intr_ctx->host2rxdma_mon_ring_mask = 11682 intr_bkp->host2rxdma_mon_ring_mask; 11683 intr_ctx->tx_mon_ring_mask = intr_bkp->tx_mon_ring_mask; 11684 11685 intr_bkp++; 11686 } 11687 11688 qdf_mem_free(intr_bkp_base); 11689 soc->umac_reset_ctx.intr_ctx_bkp = NULL; 11690 } 11691 11692 /** 11693 * dp_resume_tx_hardstart() - Restore the old Tx hardstart functions 11694 * @soc: dp soc handle 11695 * 11696 * Return: void 11697 */ 11698 static void dp_resume_tx_hardstart(struct dp_soc *soc) 11699 { 11700 struct dp_vdev *vdev; 11701 struct ol_txrx_hardtart_ctxt ctxt = {0}; 11702 struct cdp_ctrl_objmgr_psoc *psoc = soc->ctrl_psoc; 11703 int i; 11704 11705 for (i = 0; i < MAX_PDEV_CNT; i++) { 11706 struct dp_pdev *pdev = soc->pdev_list[i]; 11707 11708 if (!pdev) 11709 continue; 11710 11711 TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) { 11712 uint8_t vdev_id = vdev->vdev_id; 11713 11714 dp_vdev_fetch_tx_handler(vdev, soc, &ctxt); 11715 soc->cdp_soc.ol_ops->dp_update_tx_hardstart(psoc, 11716 vdev_id, 11717 &ctxt); 11718 } 11719 } 11720 } 11721 11722 /** 11723 * dp_pause_tx_hardstart() - Register Tx hardstart functions to drop packets 11724 * @soc: dp soc handle 11725 * 11726 * Return: void 11727 */ 11728 static void dp_pause_tx_hardstart(struct dp_soc *soc) 11729 { 11730 struct dp_vdev *vdev; 11731 struct ol_txrx_hardtart_ctxt ctxt; 11732 struct cdp_ctrl_objmgr_psoc *psoc = soc->ctrl_psoc; 11733 int i; 11734 11735 ctxt.tx = &dp_tx_drop; 11736 ctxt.tx_fast = &dp_tx_drop; 11737 ctxt.tx_exception = &dp_tx_exc_drop; 11738 11739 for (i = 0; i < MAX_PDEV_CNT; i++) { 11740 struct dp_pdev *pdev = soc->pdev_list[i]; 11741 11742 if (!pdev) 11743 continue; 11744 11745 TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) { 11746 uint8_t vdev_id = vdev->vdev_id; 11747 11748 soc->cdp_soc.ol_ops->dp_update_tx_hardstart(psoc, 11749 vdev_id, 11750 &ctxt); 11751 } 11752 } 11753 } 11754 11755 /** 11756 * dp_unregister_notify_umac_pre_reset_fw_callback() - unregister notify_fw_cb 11757 * @soc: dp soc handle 11758 * 11759 * Return: void 11760 */ 11761 static inline 11762 void dp_unregister_notify_umac_pre_reset_fw_callback(struct dp_soc *soc) 11763 { 11764 soc->notify_fw_callback = NULL; 11765 } 11766 11767 /** 11768 * dp_check_n_notify_umac_prereset_done() - Send pre reset done to firmware 11769 * @soc: dp soc handle 11770 * 11771 * Return: void 11772 */ 11773 static inline 11774 void dp_check_n_notify_umac_prereset_done(struct dp_soc *soc) 11775 { 11776 /* Some Cpu(s) is processing the umac rings*/ 11777 if (soc->service_rings_running) 11778 return; 11779 11780 /* Unregister the callback */ 11781 dp_unregister_notify_umac_pre_reset_fw_callback(soc); 11782 11783 /* Check if notify was already sent by any other thread */ 11784 if (qdf_atomic_test_and_set_bit(DP_UMAC_RESET_NOTIFY_DONE, 11785 &soc->service_rings_running)) 11786 return; 11787 11788 /* Notify the firmware that Umac pre reset is complete */ 11789 dp_umac_reset_notify_action_completion(soc, 11790 UMAC_RESET_ACTION_DO_PRE_RESET); 11791 } 11792 11793 /** 11794 * dp_register_notify_umac_pre_reset_fw_callback() - register notify_fw_cb 11795 * @soc: dp soc handle 11796 * 11797 * Return: void 11798 */ 11799 static inline 11800 void dp_register_notify_umac_pre_reset_fw_callback(struct dp_soc *soc) 11801 { 11802 soc->notify_fw_callback = dp_check_n_notify_umac_prereset_done; 11803 } 11804 11805 #ifdef DP_UMAC_HW_HARD_RESET 11806 /** 11807 * dp_set_umac_regs() - Reinitialize host umac registers 11808 * @soc: dp soc handle 11809 * 11810 * Return: void 11811 */ 11812 static void dp_set_umac_regs(struct dp_soc *soc) 11813 { 11814 int i; 11815 struct hal_reo_params reo_params; 11816 11817 qdf_mem_zero(&reo_params, sizeof(reo_params)); 11818 11819 if (wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx)) { 11820 if (soc->arch_ops.reo_remap_config(soc, &reo_params.remap0, 11821 &reo_params.remap1, 11822 &reo_params.remap2)) 11823 reo_params.rx_hash_enabled = true; 11824 else 11825 reo_params.rx_hash_enabled = false; 11826 } 11827 11828 reo_params.reo_qref = &soc->reo_qref; 11829 hal_reo_setup(soc->hal_soc, &reo_params, 0); 11830 11831 soc->arch_ops.dp_cc_reg_cfg_init(soc, true); 11832 11833 for (i = 0; i < PCP_TID_MAP_MAX; i++) 11834 hal_tx_update_pcp_tid_map(soc->hal_soc, soc->pcp_tid_map[i], i); 11835 11836 for (i = 0; i < MAX_PDEV_CNT; i++) { 11837 struct dp_vdev *vdev = NULL; 11838 struct dp_pdev *pdev = soc->pdev_list[i]; 11839 11840 if (!pdev) 11841 continue; 11842 11843 for (i = 0; i < soc->num_hw_dscp_tid_map; i++) 11844 hal_tx_set_dscp_tid_map(soc->hal_soc, 11845 pdev->dscp_tid_map[i], i); 11846 11847 TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) { 11848 soc->arch_ops.dp_bank_reconfig(soc, vdev); 11849 soc->arch_ops.dp_reconfig_tx_vdev_mcast_ctrl(soc, 11850 vdev); 11851 } 11852 } 11853 } 11854 #else 11855 static void dp_set_umac_regs(struct dp_soc *soc) 11856 { 11857 } 11858 #endif 11859 11860 /** 11861 * dp_reinit_rings() - Reinitialize host managed rings 11862 * @soc: dp soc handle 11863 * 11864 * Return: QDF_STATUS 11865 */ 11866 static void dp_reinit_rings(struct dp_soc *soc) 11867 { 11868 unsigned long end; 11869 11870 dp_soc_srng_deinit(soc); 11871 dp_hw_link_desc_ring_deinit(soc); 11872 11873 /* Busy wait for 2 ms to make sure the rings are in idle state 11874 * before we enable them again 11875 */ 11876 end = jiffies + msecs_to_jiffies(2); 11877 while (time_before(jiffies, end)) 11878 ; 11879 11880 dp_hw_link_desc_ring_init(soc); 11881 dp_link_desc_ring_replenish(soc, WLAN_INVALID_PDEV_ID); 11882 dp_soc_srng_init(soc); 11883 } 11884 11885 /** 11886 * dp_umac_reset_action_trigger_recovery() - Handle FW Umac recovery trigger 11887 * @soc: dp soc handle 11888 * 11889 * Return: QDF_STATUS 11890 */ 11891 static QDF_STATUS dp_umac_reset_action_trigger_recovery(struct dp_soc *soc) 11892 { 11893 enum umac_reset_action action = UMAC_RESET_ACTION_DO_TRIGGER_RECOVERY; 11894 11895 return dp_umac_reset_notify_action_completion(soc, action); 11896 } 11897 11898 #ifdef WLAN_SUPPORT_PPEDS 11899 /** 11900 * dp_umac_reset_service_handle_n_notify_done() 11901 * Handle Umac pre reset for direct switch 11902 * @soc: dp soc handle 11903 * 11904 * Return: QDF_STATUS 11905 */ 11906 static QDF_STATUS dp_umac_reset_service_handle_n_notify_done(struct dp_soc *soc) 11907 { 11908 if (!soc->arch_ops.txrx_soc_ppeds_enabled_check || 11909 !soc->arch_ops.txrx_soc_ppeds_service_status_update || 11910 !soc->arch_ops.txrx_soc_ppeds_interrupt_stop) 11911 goto non_ppeds; 11912 11913 /* 11914 * Check if ppeds is enabled on SoC. 11915 */ 11916 if (!soc->arch_ops.txrx_soc_ppeds_enabled_check(soc)) 11917 goto non_ppeds; 11918 11919 /* 11920 * Start the UMAC pre reset done service. 11921 */ 11922 soc->arch_ops.txrx_soc_ppeds_service_status_update(soc, true); 11923 11924 dp_register_notify_umac_pre_reset_fw_callback(soc); 11925 11926 soc->arch_ops.txrx_soc_ppeds_interrupt_stop(soc); 11927 11928 dp_soc_ppeds_stop((struct cdp_soc_t *)soc); 11929 11930 /* 11931 * UMAC pre reset service complete 11932 */ 11933 soc->arch_ops.txrx_soc_ppeds_service_status_update(soc, false); 11934 11935 soc->umac_reset_ctx.nbuf_list = NULL; 11936 return QDF_STATUS_SUCCESS; 11937 11938 non_ppeds: 11939 dp_register_notify_umac_pre_reset_fw_callback(soc); 11940 dp_umac_reset_trigger_pre_reset_notify_cb(soc); 11941 soc->umac_reset_ctx.nbuf_list = NULL; 11942 return QDF_STATUS_SUCCESS; 11943 } 11944 11945 static inline void dp_umac_reset_ppeds_txdesc_pool_reset(struct dp_soc *soc, 11946 qdf_nbuf_t *nbuf_list) 11947 { 11948 if (!soc->arch_ops.txrx_soc_ppeds_enabled_check || 11949 !soc->arch_ops.txrx_soc_ppeds_txdesc_pool_reset) 11950 return; 11951 11952 /* 11953 * Deinit of PPEDS Tx desc rings. 11954 */ 11955 if (soc->arch_ops.txrx_soc_ppeds_enabled_check(soc)) 11956 soc->arch_ops.txrx_soc_ppeds_txdesc_pool_reset(soc, nbuf_list); 11957 } 11958 11959 static inline void dp_umac_reset_ppeds_start(struct dp_soc *soc) 11960 { 11961 if (!soc->arch_ops.txrx_soc_ppeds_enabled_check || 11962 !soc->arch_ops.txrx_soc_ppeds_start || 11963 !soc->arch_ops.txrx_soc_ppeds_interrupt_start) 11964 return; 11965 11966 /* 11967 * Start PPEDS node and enable interrupt. 11968 */ 11969 if (soc->arch_ops.txrx_soc_ppeds_enabled_check(soc)) { 11970 soc->arch_ops.txrx_soc_ppeds_start(soc); 11971 soc->arch_ops.txrx_soc_ppeds_interrupt_start(soc); 11972 } 11973 } 11974 #else 11975 static QDF_STATUS dp_umac_reset_service_handle_n_notify_done(struct dp_soc *soc) 11976 { 11977 dp_register_notify_umac_pre_reset_fw_callback(soc); 11978 dp_umac_reset_trigger_pre_reset_notify_cb(soc); 11979 soc->umac_reset_ctx.nbuf_list = NULL; 11980 return QDF_STATUS_SUCCESS; 11981 } 11982 11983 static inline void dp_umac_reset_ppeds_txdesc_pool_reset(struct dp_soc *soc, 11984 qdf_nbuf_t *nbuf_list) 11985 { 11986 } 11987 11988 static inline void dp_umac_reset_ppeds_start(struct dp_soc *soc) 11989 { 11990 } 11991 #endif 11992 11993 /** 11994 * dp_umac_reset_handle_pre_reset() - Handle Umac prereset interrupt from FW 11995 * @soc: dp soc handle 11996 * 11997 * Return: QDF_STATUS 11998 */ 11999 static QDF_STATUS dp_umac_reset_handle_pre_reset(struct dp_soc *soc) 12000 { 12001 dp_reset_interrupt_ring_masks(soc); 12002 12003 dp_pause_tx_hardstart(soc); 12004 dp_pause_reo_send_cmd(soc); 12005 dp_umac_reset_service_handle_n_notify_done(soc); 12006 return QDF_STATUS_SUCCESS; 12007 } 12008 12009 /** 12010 * dp_umac_reset_handle_post_reset() - Handle Umac postreset interrupt from FW 12011 * @soc: dp soc handle 12012 * 12013 * Return: QDF_STATUS 12014 */ 12015 static QDF_STATUS dp_umac_reset_handle_post_reset(struct dp_soc *soc) 12016 { 12017 if (!soc->umac_reset_ctx.skel_enable) { 12018 bool cleanup_needed; 12019 qdf_nbuf_t *nbuf_list = &soc->umac_reset_ctx.nbuf_list; 12020 12021 dp_set_umac_regs(soc); 12022 12023 dp_reinit_rings(soc); 12024 12025 dp_rx_desc_reuse(soc, nbuf_list); 12026 12027 dp_cleanup_reo_cmd_module(soc); 12028 12029 dp_umac_reset_ppeds_txdesc_pool_reset(soc, nbuf_list); 12030 12031 cleanup_needed = dp_get_global_tx_desc_cleanup_flag(soc); 12032 12033 dp_tx_desc_pool_cleanup(soc, nbuf_list, cleanup_needed); 12034 12035 dp_reset_tid_q_setup(soc); 12036 } 12037 12038 return dp_umac_reset_notify_action_completion(soc, 12039 UMAC_RESET_ACTION_DO_POST_RESET_START); 12040 } 12041 12042 /** 12043 * dp_umac_reset_handle_post_reset_complete() - Handle Umac postreset_complete 12044 * interrupt from FW 12045 * @soc: dp soc handle 12046 * 12047 * Return: QDF_STATUS 12048 */ 12049 static QDF_STATUS dp_umac_reset_handle_post_reset_complete(struct dp_soc *soc) 12050 { 12051 QDF_STATUS status; 12052 qdf_nbuf_t nbuf_list = soc->umac_reset_ctx.nbuf_list; 12053 uint8_t mac_id; 12054 12055 soc->umac_reset_ctx.nbuf_list = NULL; 12056 12057 soc->service_rings_running = 0; 12058 12059 dp_resume_reo_send_cmd(soc); 12060 12061 dp_umac_reset_ppeds_start(soc); 12062 12063 dp_restore_interrupt_ring_masks(soc); 12064 12065 dp_resume_tx_hardstart(soc); 12066 12067 dp_reset_global_tx_desc_cleanup_flag(soc); 12068 12069 status = dp_umac_reset_notify_action_completion(soc, 12070 UMAC_RESET_ACTION_DO_POST_RESET_COMPLETE); 12071 12072 while (nbuf_list) { 12073 qdf_nbuf_t nbuf = nbuf_list->next; 12074 12075 qdf_nbuf_free(nbuf_list); 12076 nbuf_list = nbuf; 12077 } 12078 12079 /* 12080 * at pre-reset if in_use descriptors are not sufficient we replenish 12081 * only 1/3 of the ring. Try to replenish full ring here. 12082 */ 12083 for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) { 12084 struct dp_srng *dp_rxdma_srng = 12085 &soc->rx_refill_buf_ring[mac_id]; 12086 struct rx_desc_pool *rx_desc_pool = &soc->rx_desc_buf[mac_id]; 12087 12088 dp_rx_buffers_lt_replenish_simple(soc, mac_id, dp_rxdma_srng, 12089 rx_desc_pool, true); 12090 } 12091 12092 dp_umac_reset_info("Umac reset done on soc %pK\n trigger start : %u us " 12093 "trigger done : %u us prereset : %u us\n" 12094 "postreset : %u us \n postreset complete: %u us \n", 12095 soc, 12096 soc->umac_reset_ctx.ts.trigger_done - 12097 soc->umac_reset_ctx.ts.trigger_start, 12098 soc->umac_reset_ctx.ts.pre_reset_done - 12099 soc->umac_reset_ctx.ts.pre_reset_start, 12100 soc->umac_reset_ctx.ts.post_reset_done - 12101 soc->umac_reset_ctx.ts.post_reset_start, 12102 soc->umac_reset_ctx.ts.post_reset_complete_done - 12103 soc->umac_reset_ctx.ts.post_reset_complete_start); 12104 12105 return status; 12106 } 12107 #endif 12108 #ifdef WLAN_FEATURE_PKT_CAPTURE_V2 12109 static void 12110 dp_set_pkt_capture_mode(struct cdp_soc_t *soc_handle, bool val) 12111 { 12112 struct dp_soc *soc = (struct dp_soc *)soc_handle; 12113 12114 soc->wlan_cfg_ctx->pkt_capture_mode = val; 12115 } 12116 #endif 12117 12118 #ifdef HW_TX_DELAY_STATS_ENABLE 12119 /** 12120 * dp_enable_disable_vdev_tx_delay_stats() - Start/Stop tx delay stats capture 12121 * @soc_hdl: DP soc handle 12122 * @vdev_id: vdev id 12123 * @value: value 12124 * 12125 * Return: None 12126 */ 12127 static void 12128 dp_enable_disable_vdev_tx_delay_stats(struct cdp_soc_t *soc_hdl, 12129 uint8_t vdev_id, 12130 uint8_t value) 12131 { 12132 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 12133 struct dp_vdev *vdev = NULL; 12134 12135 vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP); 12136 if (!vdev) 12137 return; 12138 12139 vdev->hw_tx_delay_stats_enabled = value; 12140 12141 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP); 12142 } 12143 12144 /** 12145 * dp_check_vdev_tx_delay_stats_enabled() - check the feature is enabled or not 12146 * @soc_hdl: DP soc handle 12147 * @vdev_id: vdev id 12148 * 12149 * Return: 1 if enabled, 0 if disabled 12150 */ 12151 static uint8_t 12152 dp_check_vdev_tx_delay_stats_enabled(struct cdp_soc_t *soc_hdl, 12153 uint8_t vdev_id) 12154 { 12155 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 12156 struct dp_vdev *vdev; 12157 uint8_t ret_val = 0; 12158 12159 vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP); 12160 if (!vdev) 12161 return ret_val; 12162 12163 ret_val = vdev->hw_tx_delay_stats_enabled; 12164 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP); 12165 12166 return ret_val; 12167 } 12168 #endif 12169 12170 #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP) 12171 static void 12172 dp_recovery_vdev_flush_peers(struct cdp_soc_t *cdp_soc, 12173 uint8_t vdev_id, 12174 bool mlo_peers_only) 12175 { 12176 struct dp_soc *soc = (struct dp_soc *)cdp_soc; 12177 struct dp_vdev *vdev; 12178 12179 vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP); 12180 12181 if (!vdev) 12182 return; 12183 12184 dp_vdev_flush_peers((struct cdp_vdev *)vdev, false, mlo_peers_only); 12185 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP); 12186 } 12187 #endif 12188 #ifdef QCA_GET_TSF_VIA_REG 12189 /** 12190 * dp_get_tsf_time() - get tsf time 12191 * @soc_hdl: Datapath soc handle 12192 * @tsf_id: TSF identifier 12193 * @mac_id: mac_id 12194 * @tsf: pointer to update tsf value 12195 * @tsf_sync_soc_time: pointer to update tsf sync time 12196 * 12197 * Return: None. 12198 */ 12199 static inline void 12200 dp_get_tsf_time(struct cdp_soc_t *soc_hdl, uint32_t tsf_id, uint32_t mac_id, 12201 uint64_t *tsf, uint64_t *tsf_sync_soc_time) 12202 { 12203 hal_get_tsf_time(((struct dp_soc *)soc_hdl)->hal_soc, tsf_id, mac_id, 12204 tsf, tsf_sync_soc_time); 12205 } 12206 #else 12207 static inline void 12208 dp_get_tsf_time(struct cdp_soc_t *soc_hdl, uint32_t tsf_id, uint32_t mac_id, 12209 uint64_t *tsf, uint64_t *tsf_sync_soc_time) 12210 { 12211 } 12212 #endif 12213 12214 /** 12215 * dp_get_tsf2_scratch_reg() - get tsf2 offset from the scratch register 12216 * @soc_hdl: Datapath soc handle 12217 * @mac_id: mac_id 12218 * @value: pointer to update tsf2 offset value 12219 * 12220 * Return: None. 12221 */ 12222 static inline void 12223 dp_get_tsf2_scratch_reg(struct cdp_soc_t *soc_hdl, uint8_t mac_id, 12224 uint64_t *value) 12225 { 12226 hal_get_tsf2_offset(((struct dp_soc *)soc_hdl)->hal_soc, mac_id, value); 12227 } 12228 12229 /** 12230 * dp_get_tqm_scratch_reg() - get tqm offset from the scratch register 12231 * @soc_hdl: Datapath soc handle 12232 * @value: pointer to update tqm offset value 12233 * 12234 * Return: None. 12235 */ 12236 static inline void 12237 dp_get_tqm_scratch_reg(struct cdp_soc_t *soc_hdl, uint64_t *value) 12238 { 12239 hal_get_tqm_offset(((struct dp_soc *)soc_hdl)->hal_soc, value); 12240 } 12241 12242 /** 12243 * dp_set_tx_pause() - Pause or resume tx path 12244 * @soc_hdl: Datapath soc handle 12245 * @flag: set or clear is_tx_pause 12246 * 12247 * Return: None. 12248 */ 12249 static inline 12250 void dp_set_tx_pause(struct cdp_soc_t *soc_hdl, bool flag) 12251 { 12252 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 12253 12254 soc->is_tx_pause = flag; 12255 } 12256 12257 static inline uint64_t dp_rx_fisa_get_cmem_base(struct cdp_soc_t *soc_hdl, 12258 uint64_t size) 12259 { 12260 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 12261 12262 if (soc->arch_ops.dp_get_fst_cmem_base) 12263 return soc->arch_ops.dp_get_fst_cmem_base(soc, size); 12264 12265 return 0; 12266 } 12267 12268 #ifdef DP_TX_PACKET_INSPECT_FOR_ILP 12269 /** 12270 * dp_evaluate_update_tx_ilp_config() - Evaluate and update DP TX 12271 * ILP configuration 12272 * @soc_hdl: CDP SOC handle 12273 * @num_msdu_idx_map: Number of HTT msdu index to qtype map in array 12274 * @msdu_idx_map_arr: Pointer to HTT msdu index to qtype map array 12275 * 12276 * This function will check: (a) TX ILP INI configuration, 12277 * (b) index 3 value in array same as HTT_MSDU_QTYPE_LATENCY_TOLERANT, 12278 * only if both (a) and (b) condition is met, then TX ILP feature is 12279 * considered to be enabled. 12280 * 12281 * Return: Final updated TX ILP enable result in dp_soc, 12282 * true is enabled, false is not 12283 */ 12284 static 12285 bool dp_evaluate_update_tx_ilp_config(struct cdp_soc_t *soc_hdl, 12286 uint8_t num_msdu_idx_map, 12287 uint8_t *msdu_idx_map_arr) 12288 { 12289 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 12290 bool enable_tx_ilp = false; 12291 12292 /** 12293 * Check INI configuration firstly, if it's disabled, 12294 * then keep feature disabled. 12295 */ 12296 if (!wlan_cfg_get_tx_ilp_inspect_config(soc->wlan_cfg_ctx)) { 12297 dp_info("TX ILP INI is disabled already"); 12298 goto update_tx_ilp; 12299 } 12300 12301 /* Check if the msdu index to qtype map table is valid */ 12302 if (num_msdu_idx_map != HTT_MSDUQ_MAX_INDEX || !msdu_idx_map_arr) { 12303 dp_info("Invalid msdu_idx qtype map num: 0x%x, arr_addr %pK", 12304 num_msdu_idx_map, msdu_idx_map_arr); 12305 goto update_tx_ilp; 12306 } 12307 12308 dp_info("msdu_idx_map_arr idx 0x%x value 0x%x", 12309 HTT_MSDUQ_INDEX_CUSTOM_PRIO_1, 12310 msdu_idx_map_arr[HTT_MSDUQ_INDEX_CUSTOM_PRIO_1]); 12311 12312 if (HTT_MSDU_QTYPE_USER_SPECIFIED == 12313 msdu_idx_map_arr[HTT_MSDUQ_INDEX_CUSTOM_PRIO_1]) 12314 enable_tx_ilp = true; 12315 12316 update_tx_ilp: 12317 soc->tx_ilp_enable = enable_tx_ilp; 12318 dp_info("configure tx ilp enable %d", soc->tx_ilp_enable); 12319 12320 return soc->tx_ilp_enable; 12321 } 12322 #endif 12323 12324 static struct cdp_cmn_ops dp_ops_cmn = { 12325 .txrx_soc_attach_target = dp_soc_attach_target_wifi3, 12326 .txrx_vdev_attach = dp_vdev_attach_wifi3, 12327 .txrx_vdev_detach = dp_vdev_detach_wifi3, 12328 .txrx_pdev_attach = dp_pdev_attach_wifi3, 12329 .txrx_pdev_post_attach = dp_pdev_post_attach_wifi3, 12330 .txrx_pdev_detach = dp_pdev_detach_wifi3, 12331 .txrx_pdev_deinit = dp_pdev_deinit_wifi3, 12332 .txrx_peer_create = dp_peer_create_wifi3, 12333 .txrx_peer_setup = dp_peer_setup_wifi3_wrapper, 12334 #ifdef FEATURE_AST 12335 .txrx_peer_teardown = dp_peer_teardown_wifi3, 12336 #else 12337 .txrx_peer_teardown = NULL, 12338 #endif 12339 .txrx_peer_add_ast = dp_peer_add_ast_wifi3, 12340 .txrx_peer_update_ast = dp_peer_update_ast_wifi3, 12341 .txrx_peer_get_ast_info_by_soc = dp_peer_get_ast_info_by_soc_wifi3, 12342 .txrx_peer_get_ast_info_by_pdev = 12343 dp_peer_get_ast_info_by_pdevid_wifi3, 12344 .txrx_peer_ast_delete_by_soc = 12345 dp_peer_ast_entry_del_by_soc, 12346 .txrx_peer_ast_delete_by_pdev = 12347 dp_peer_ast_entry_del_by_pdev, 12348 .txrx_peer_HMWDS_ast_delete = dp_peer_HMWDS_ast_entry_del, 12349 .txrx_peer_delete = dp_peer_delete_wifi3, 12350 #ifdef DP_RX_UDP_OVER_PEER_ROAM 12351 .txrx_update_roaming_peer = dp_update_roaming_peer_wifi3, 12352 #endif 12353 .txrx_vdev_register = dp_vdev_register_wifi3, 12354 .txrx_soc_detach = dp_soc_detach_wifi3, 12355 .txrx_soc_deinit = dp_soc_deinit_wifi3, 12356 .txrx_soc_init = dp_soc_init_wifi3, 12357 #ifndef QCA_HOST_MODE_WIFI_DISABLED 12358 .txrx_tso_soc_attach = dp_tso_soc_attach, 12359 .txrx_tso_soc_detach = dp_tso_soc_detach, 12360 .tx_send = dp_tx_send, 12361 .tx_send_exc = dp_tx_send_exception, 12362 #endif 12363 .set_tx_pause = dp_set_tx_pause, 12364 .txrx_pdev_init = dp_pdev_init_wifi3, 12365 .txrx_get_vdev_mac_addr = dp_get_vdev_mac_addr_wifi3, 12366 .txrx_get_ctrl_pdev_from_vdev = dp_get_ctrl_pdev_from_vdev_wifi3, 12367 .txrx_ath_getstats = dp_get_device_stats, 12368 #ifndef WLAN_SOFTUMAC_SUPPORT 12369 .addba_requestprocess = dp_addba_requestprocess_wifi3, 12370 .addba_responsesetup = dp_addba_responsesetup_wifi3, 12371 .addba_resp_tx_completion = dp_addba_resp_tx_completion_wifi3, 12372 .delba_process = dp_delba_process_wifi3, 12373 .set_addba_response = dp_set_addba_response, 12374 .flush_cache_rx_queue = NULL, 12375 .tid_update_ba_win_size = dp_rx_tid_update_ba_win_size, 12376 #endif 12377 /* TODO: get API's for dscp-tid need to be added*/ 12378 .set_vdev_dscp_tid_map = dp_set_vdev_dscp_tid_map_wifi3, 12379 .set_pdev_dscp_tid_map = dp_set_pdev_dscp_tid_map_wifi3, 12380 .txrx_get_total_per = dp_get_total_per, 12381 .txrx_stats_request = dp_txrx_stats_request, 12382 .txrx_get_peer_mac_from_peer_id = dp_get_peer_mac_from_peer_id, 12383 .display_stats = dp_txrx_dump_stats, 12384 .notify_asserted_soc = dp_soc_notify_asserted_soc, 12385 .txrx_intr_attach = dp_soc_interrupt_attach_wrapper, 12386 .txrx_intr_detach = dp_soc_interrupt_detach_wrapper, 12387 .txrx_ppeds_stop = dp_soc_ppeds_stop, 12388 .set_key_sec_type = dp_set_key_sec_type_wifi3, 12389 .update_config_parameters = dp_update_config_parameters, 12390 /* TODO: Add other functions */ 12391 .txrx_data_tx_cb_set = dp_txrx_data_tx_cb_set, 12392 .get_dp_txrx_handle = dp_pdev_get_dp_txrx_handle, 12393 .set_dp_txrx_handle = dp_pdev_set_dp_txrx_handle, 12394 .get_vdev_dp_ext_txrx_handle = dp_vdev_get_dp_ext_handle, 12395 .set_vdev_dp_ext_txrx_handle = dp_vdev_set_dp_ext_handle, 12396 .get_soc_dp_txrx_handle = dp_soc_get_dp_txrx_handle, 12397 .set_soc_dp_txrx_handle = dp_soc_set_dp_txrx_handle, 12398 .map_pdev_to_lmac = dp_soc_map_pdev_to_lmac, 12399 .handle_mode_change = dp_soc_handle_pdev_mode_change, 12400 .set_pdev_status_down = dp_soc_set_pdev_status_down, 12401 .txrx_peer_reset_ast = dp_wds_reset_ast_wifi3, 12402 .txrx_peer_reset_ast_table = dp_wds_reset_ast_table_wifi3, 12403 .txrx_peer_flush_ast_table = dp_wds_flush_ast_table_wifi3, 12404 .txrx_peer_map_attach = dp_peer_map_attach_wifi3, 12405 .set_soc_param = dp_soc_set_param, 12406 .txrx_get_os_rx_handles_from_vdev = 12407 dp_get_os_rx_handles_from_vdev_wifi3, 12408 #ifndef WLAN_SOFTUMAC_SUPPORT 12409 .set_pn_check = dp_set_pn_check_wifi3, 12410 .txrx_set_ba_aging_timeout = dp_set_ba_aging_timeout, 12411 .txrx_get_ba_aging_timeout = dp_get_ba_aging_timeout, 12412 .delba_tx_completion = dp_delba_tx_completion_wifi3, 12413 .set_pdev_pcp_tid_map = dp_set_pdev_pcp_tid_map_wifi3, 12414 .set_vdev_pcp_tid_map = dp_set_vdev_pcp_tid_map_wifi3, 12415 #endif 12416 .get_dp_capabilities = dp_get_cfg_capabilities, 12417 .txrx_get_cfg = dp_get_cfg, 12418 .set_rate_stats_ctx = dp_soc_set_rate_stats_ctx, 12419 .get_rate_stats_ctx = dp_soc_get_rate_stats_ctx, 12420 .txrx_peer_flush_rate_stats = dp_peer_flush_rate_stats, 12421 .txrx_flush_rate_stats_request = dp_flush_rate_stats_req, 12422 .txrx_peer_get_peerstats_ctx = dp_peer_get_peerstats_ctx, 12423 12424 .txrx_cp_peer_del_response = dp_cp_peer_del_resp_handler, 12425 #ifdef QCA_MULTIPASS_SUPPORT 12426 .set_vlan_groupkey = dp_set_vlan_groupkey, 12427 #endif 12428 .get_peer_mac_list = dp_get_peer_mac_list, 12429 .get_peer_id = dp_get_peer_id, 12430 #ifdef QCA_SUPPORT_WDS_EXTENDED 12431 .set_wds_ext_peer_rx = dp_wds_ext_set_peer_rx, 12432 .get_wds_ext_peer_osif_handle = dp_wds_ext_get_peer_osif_handle, 12433 .set_wds_ext_peer_bit = dp_wds_ext_set_peer_bit, 12434 #endif /* QCA_SUPPORT_WDS_EXTENDED */ 12435 12436 #if defined(FEATURE_RUNTIME_PM) || defined(DP_POWER_SAVE) 12437 .txrx_drain = dp_drain_txrx, 12438 #endif 12439 #if defined(FEATURE_RUNTIME_PM) 12440 .set_rtpm_tput_policy = dp_set_rtpm_tput_policy_requirement, 12441 #endif 12442 #ifdef WLAN_SYSFS_DP_STATS 12443 .txrx_sysfs_fill_stats = dp_sysfs_fill_stats, 12444 .txrx_sysfs_set_stat_type = dp_sysfs_set_stat_type, 12445 #endif /* WLAN_SYSFS_DP_STATS */ 12446 #ifdef WLAN_FEATURE_PKT_CAPTURE_V2 12447 .set_pkt_capture_mode = dp_set_pkt_capture_mode, 12448 #endif 12449 #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP) 12450 .txrx_recovery_vdev_flush_peers = dp_recovery_vdev_flush_peers, 12451 #endif 12452 .txrx_umac_reset_deinit = dp_soc_umac_reset_deinit, 12453 .txrx_umac_reset_init = dp_soc_umac_reset_init, 12454 .txrx_get_tsf_time = dp_get_tsf_time, 12455 .txrx_get_tsf2_offset = dp_get_tsf2_scratch_reg, 12456 .txrx_get_tqm_offset = dp_get_tqm_scratch_reg, 12457 #ifdef WLAN_SUPPORT_RX_FISA 12458 .get_fst_cmem_base = dp_rx_fisa_get_cmem_base, 12459 #endif 12460 #ifdef WLAN_SUPPORT_DPDK 12461 .dpdk_get_ring_info = dp_dpdk_get_ring_info, 12462 .cfgmgr_get_soc_info = dp_cfgmgr_get_soc_info, 12463 .cfgmgr_get_vdev_info = dp_cfgmgr_get_vdev_info, 12464 .cfgmgr_get_peer_info = dp_cfgmgr_get_peer_info, 12465 .cfgmgr_get_vdev_create_evt_info = dp_cfgmgr_get_vdev_create_evt_info, 12466 .cfgmgr_get_peer_create_evt_info = dp_cfgmgr_get_peer_create_evt_info, 12467 #endif 12468 }; 12469 12470 static struct cdp_ctrl_ops dp_ops_ctrl = { 12471 .txrx_peer_authorize = dp_peer_authorize, 12472 .txrx_peer_get_authorize = dp_peer_get_authorize, 12473 #ifdef VDEV_PEER_PROTOCOL_COUNT 12474 .txrx_enable_peer_protocol_count = dp_enable_vdev_peer_protocol_count, 12475 .txrx_set_peer_protocol_drop_mask = 12476 dp_enable_vdev_peer_protocol_drop_mask, 12477 .txrx_is_peer_protocol_count_enabled = 12478 dp_is_vdev_peer_protocol_count_enabled, 12479 .txrx_get_peer_protocol_drop_mask = dp_get_vdev_peer_protocol_drop_mask, 12480 #endif 12481 .txrx_set_vdev_param = dp_set_vdev_param_wrapper, 12482 .txrx_set_psoc_param = dp_set_psoc_param, 12483 .txrx_get_psoc_param = dp_get_psoc_param, 12484 #ifndef WLAN_SOFTUMAC_SUPPORT 12485 .txrx_set_pdev_reo_dest = dp_set_pdev_reo_dest, 12486 .txrx_get_pdev_reo_dest = dp_get_pdev_reo_dest, 12487 #endif 12488 .txrx_get_sec_type = dp_get_sec_type, 12489 .txrx_wdi_event_sub = dp_wdi_event_sub, 12490 .txrx_wdi_event_unsub = dp_wdi_event_unsub, 12491 .txrx_set_pdev_param = dp_set_pdev_param, 12492 .txrx_get_pdev_param = dp_get_pdev_param, 12493 #ifdef WLAN_FEATURE_11BE_MLO 12494 .txrx_set_peer_param = dp_set_peer_param_wrapper, 12495 #else 12496 .txrx_set_peer_param = dp_set_peer_param, 12497 #endif 12498 .txrx_get_peer_param = dp_get_peer_param, 12499 #ifdef VDEV_PEER_PROTOCOL_COUNT 12500 .txrx_peer_protocol_cnt = dp_peer_stats_update_protocol_cnt, 12501 #endif 12502 #ifdef WLAN_SUPPORT_MSCS 12503 .txrx_record_mscs_params = dp_record_mscs_params, 12504 #endif 12505 .set_key = dp_set_michael_key, 12506 .txrx_get_vdev_param = dp_get_vdev_param, 12507 .calculate_delay_stats = dp_calculate_delay_stats, 12508 #ifdef WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG 12509 .txrx_update_pdev_rx_protocol_tag = dp_update_pdev_rx_protocol_tag, 12510 #ifdef WLAN_SUPPORT_RX_TAG_STATISTICS 12511 .txrx_dump_pdev_rx_protocol_tag_stats = 12512 dp_dump_pdev_rx_protocol_tag_stats, 12513 #endif /* WLAN_SUPPORT_RX_TAG_STATISTICS */ 12514 #endif /* WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG */ 12515 #ifdef WLAN_SUPPORT_RX_FLOW_TAG 12516 .txrx_set_rx_flow_tag = dp_set_rx_flow_tag, 12517 .txrx_dump_rx_flow_tag_stats = dp_dump_rx_flow_tag_stats, 12518 #endif /* WLAN_SUPPORT_RX_FLOW_TAG */ 12519 #ifdef QCA_MULTIPASS_SUPPORT 12520 .txrx_peer_set_vlan_id = dp_peer_set_vlan_id, 12521 #endif /*QCA_MULTIPASS_SUPPORT*/ 12522 #if defined(WLAN_FEATURE_TSF_AUTO_REPORT) || defined(WLAN_CONFIG_TX_DELAY) 12523 .txrx_set_delta_tsf = dp_set_delta_tsf, 12524 #endif 12525 #ifdef WLAN_FEATURE_TSF_UPLINK_DELAY 12526 .txrx_set_tsf_ul_delay_report = dp_set_tsf_ul_delay_report, 12527 .txrx_get_uplink_delay = dp_get_uplink_delay, 12528 #endif 12529 #ifdef QCA_UNDECODED_METADATA_SUPPORT 12530 .txrx_set_pdev_phyrx_error_mask = dp_set_pdev_phyrx_error_mask, 12531 .txrx_get_pdev_phyrx_error_mask = dp_get_pdev_phyrx_error_mask, 12532 #endif 12533 .txrx_peer_flush_frags = dp_peer_flush_frags, 12534 #ifdef DP_UMAC_HW_RESET_SUPPORT 12535 .get_umac_reset_in_progress_state = dp_get_umac_reset_in_progress_state, 12536 #endif 12537 #ifdef WLAN_SUPPORT_RX_FISA 12538 .txrx_fisa_config = dp_fisa_config, 12539 #endif 12540 }; 12541 12542 static struct cdp_me_ops dp_ops_me = { 12543 #ifndef QCA_HOST_MODE_WIFI_DISABLED 12544 #ifdef ATH_SUPPORT_IQUE 12545 .tx_me_alloc_descriptor = dp_tx_me_alloc_descriptor, 12546 .tx_me_free_descriptor = dp_tx_me_free_descriptor, 12547 .tx_me_convert_ucast = dp_tx_me_send_convert_ucast, 12548 #endif 12549 #endif 12550 }; 12551 12552 static struct cdp_host_stats_ops dp_ops_host_stats = { 12553 .txrx_per_peer_stats = dp_get_host_peer_stats, 12554 .get_fw_peer_stats = dp_get_fw_peer_stats, 12555 .get_htt_stats = dp_get_htt_stats, 12556 .txrx_stats_publish = dp_txrx_stats_publish, 12557 .txrx_get_vdev_stats = dp_txrx_get_vdev_stats, 12558 .txrx_get_peer_stats = dp_txrx_get_peer_stats, 12559 .txrx_get_peer_stats_based_on_peer_type = 12560 dp_txrx_get_peer_stats_based_on_peer_type, 12561 .txrx_get_soc_stats = dp_txrx_get_soc_stats, 12562 .txrx_get_peer_stats_param = dp_txrx_get_peer_stats_param, 12563 .txrx_get_per_link_stats = dp_txrx_get_per_link_peer_stats, 12564 .txrx_reset_peer_stats = dp_txrx_reset_peer_stats, 12565 .txrx_get_pdev_stats = dp_txrx_get_pdev_stats, 12566 #if defined(IPA_OFFLOAD) && defined(QCA_ENHANCED_STATS_SUPPORT) 12567 .txrx_get_peer_stats = dp_ipa_txrx_get_peer_stats, 12568 .txrx_get_vdev_stats = dp_ipa_txrx_get_vdev_stats, 12569 .txrx_get_pdev_stats = dp_ipa_txrx_get_pdev_stats, 12570 #endif 12571 .txrx_get_ratekbps = dp_txrx_get_ratekbps, 12572 .txrx_update_vdev_stats = dp_txrx_update_vdev_host_stats, 12573 .txrx_get_peer_delay_stats = dp_txrx_get_peer_delay_stats, 12574 .txrx_get_peer_jitter_stats = dp_txrx_get_peer_jitter_stats, 12575 #ifdef QCA_VDEV_STATS_HW_OFFLOAD_SUPPORT 12576 .txrx_alloc_vdev_stats_id = dp_txrx_alloc_vdev_stats_id, 12577 .txrx_reset_vdev_stats_id = dp_txrx_reset_vdev_stats_id, 12578 #endif 12579 #ifdef WLAN_TX_PKT_CAPTURE_ENH 12580 .get_peer_tx_capture_stats = dp_peer_get_tx_capture_stats, 12581 .get_pdev_tx_capture_stats = dp_pdev_get_tx_capture_stats, 12582 #endif /* WLAN_TX_PKT_CAPTURE_ENH */ 12583 #ifdef HW_TX_DELAY_STATS_ENABLE 12584 .enable_disable_vdev_tx_delay_stats = 12585 dp_enable_disable_vdev_tx_delay_stats, 12586 .is_tx_delay_stats_enabled = dp_check_vdev_tx_delay_stats_enabled, 12587 #endif 12588 .txrx_get_pdev_tid_stats = dp_pdev_get_tid_stats, 12589 #ifdef WLAN_CONFIG_TELEMETRY_AGENT 12590 .txrx_pdev_telemetry_stats = dp_get_pdev_telemetry_stats, 12591 .txrx_peer_telemetry_stats = dp_get_peer_telemetry_stats, 12592 .txrx_pdev_deter_stats = dp_get_pdev_deter_stats, 12593 .txrx_peer_deter_stats = dp_get_peer_deter_stats, 12594 .txrx_update_pdev_chan_util_stats = dp_update_pdev_chan_util_stats, 12595 #endif 12596 .txrx_get_peer_extd_rate_link_stats = 12597 dp_get_peer_extd_rate_link_stats, 12598 .get_pdev_obss_stats = dp_get_obss_stats, 12599 .clear_pdev_obss_pd_stats = dp_clear_pdev_obss_pd_stats, 12600 .txrx_get_interface_stats = dp_txrx_get_interface_stats, 12601 #ifdef WLAN_FEATURE_TX_LATENCY_STATS 12602 .tx_latency_stats_fetch = dp_tx_latency_stats_fetch, 12603 .tx_latency_stats_config = dp_tx_latency_stats_config, 12604 .tx_latency_stats_register_cb = dp_tx_latency_stats_register_cb, 12605 #endif 12606 /* TODO */ 12607 }; 12608 12609 static struct cdp_raw_ops dp_ops_raw = { 12610 /* TODO */ 12611 }; 12612 12613 #ifdef PEER_FLOW_CONTROL 12614 static struct cdp_pflow_ops dp_ops_pflow = { 12615 dp_tx_flow_ctrl_configure_pdev, 12616 }; 12617 #endif 12618 12619 #if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE) 12620 static struct cdp_cfr_ops dp_ops_cfr = { 12621 .txrx_get_cfr_rcc = dp_get_cfr_rcc, 12622 .txrx_set_cfr_rcc = dp_set_cfr_rcc, 12623 .txrx_get_cfr_dbg_stats = dp_get_cfr_dbg_stats, 12624 .txrx_clear_cfr_dbg_stats = dp_clear_cfr_dbg_stats, 12625 }; 12626 #endif 12627 12628 #ifdef WLAN_SUPPORT_MSCS 12629 static struct cdp_mscs_ops dp_ops_mscs = { 12630 .mscs_peer_lookup_n_get_priority = dp_mscs_peer_lookup_n_get_priority, 12631 }; 12632 #endif 12633 12634 #ifdef WLAN_SUPPORT_MESH_LATENCY 12635 static struct cdp_mesh_latency_ops dp_ops_mesh_latency = { 12636 .mesh_latency_update_peer_parameter = 12637 dp_mesh_latency_update_peer_parameter, 12638 }; 12639 #endif 12640 12641 #ifdef WLAN_SUPPORT_SCS 12642 static struct cdp_scs_ops dp_ops_scs = { 12643 .scs_peer_lookup_n_rule_match = dp_scs_peer_lookup_n_rule_match, 12644 }; 12645 #endif 12646 12647 #ifdef WLAN_SUPPORT_RX_FLOW_TAG 12648 static struct cdp_fse_ops dp_ops_fse = { 12649 .fse_rule_add = dp_rx_sfe_add_flow_entry, 12650 .fse_rule_delete = dp_rx_sfe_delete_flow_entry, 12651 }; 12652 #endif 12653 12654 #ifdef CONFIG_SAWF_DEF_QUEUES 12655 static struct cdp_sawf_ops dp_ops_sawf = { 12656 .sawf_def_queues_map_req = dp_sawf_def_queues_map_req, 12657 .sawf_def_queues_unmap_req = dp_sawf_def_queues_unmap_req, 12658 .sawf_def_queues_get_map_report = 12659 dp_sawf_def_queues_get_map_report, 12660 #ifdef CONFIG_SAWF_STATS 12661 .sawf_get_peer_msduq_info = dp_sawf_get_peer_msduq_info, 12662 .txrx_get_peer_sawf_delay_stats = dp_sawf_get_peer_delay_stats, 12663 .txrx_get_peer_sawf_tx_stats = dp_sawf_get_peer_tx_stats, 12664 .sawf_mpdu_stats_req = dp_sawf_mpdu_stats_req, 12665 .sawf_mpdu_details_stats_req = dp_sawf_mpdu_details_stats_req, 12666 .txrx_sawf_set_mov_avg_params = dp_sawf_set_mov_avg_params, 12667 .txrx_sawf_set_sla_params = dp_sawf_set_sla_params, 12668 .txrx_sawf_init_telemtery_params = dp_sawf_init_telemetry_params, 12669 .telemetry_get_throughput_stats = dp_sawf_get_tx_stats, 12670 .telemetry_get_mpdu_stats = dp_sawf_get_mpdu_sched_stats, 12671 .telemetry_get_drop_stats = dp_sawf_get_drop_stats, 12672 .peer_config_ul = dp_sawf_peer_config_ul, 12673 .swaf_peer_sla_configuration = dp_swaf_peer_sla_configuration, 12674 .sawf_peer_flow_count = dp_sawf_peer_flow_count, 12675 #endif 12676 #ifdef WLAN_FEATURE_11BE_MLO_3_LINK_TX 12677 .get_peer_msduq = dp_sawf_get_peer_msduq, 12678 .sawf_3_link_peer_flow_count = dp_sawf_3_link_peer_flow_count, 12679 #endif 12680 }; 12681 #endif 12682 12683 #ifdef DP_TX_TRACKING 12684 12685 #define DP_TX_COMP_MAX_LATENCY_MS 60000 12686 /** 12687 * dp_tx_comp_delay_check() - calculate time latency for tx completion per pkt 12688 * @tx_desc: tx descriptor 12689 * 12690 * Calculate time latency for tx completion per pkt and trigger self recovery 12691 * when the delay is more than threshold value. 12692 * 12693 * Return: True if delay is more than threshold 12694 */ 12695 static bool dp_tx_comp_delay_check(struct dp_tx_desc_s *tx_desc) 12696 { 12697 uint64_t time_latency, timestamp_tick = tx_desc->timestamp_tick; 12698 qdf_ktime_t current_time = qdf_ktime_real_get(); 12699 qdf_ktime_t timestamp = tx_desc->timestamp; 12700 12701 if (dp_tx_pkt_tracepoints_enabled()) { 12702 if (!timestamp) 12703 return false; 12704 12705 time_latency = qdf_ktime_to_ms(current_time) - 12706 qdf_ktime_to_ms(timestamp); 12707 if (time_latency >= DP_TX_COMP_MAX_LATENCY_MS) { 12708 dp_err_rl("enqueued: %llu ms, current : %llu ms", 12709 timestamp, current_time); 12710 return true; 12711 } 12712 } else { 12713 if (!timestamp_tick) 12714 return false; 12715 12716 current_time = qdf_system_ticks(); 12717 time_latency = qdf_system_ticks_to_msecs(current_time - 12718 timestamp_tick); 12719 if (time_latency >= DP_TX_COMP_MAX_LATENCY_MS) { 12720 dp_err_rl("enqueued: %u ms, current : %u ms", 12721 qdf_system_ticks_to_msecs(timestamp_tick), 12722 qdf_system_ticks_to_msecs(current_time)); 12723 return true; 12724 } 12725 } 12726 12727 return false; 12728 } 12729 12730 void dp_find_missing_tx_comp(struct dp_soc *soc) 12731 { 12732 uint8_t i; 12733 uint32_t j; 12734 uint32_t num_desc, page_id, offset; 12735 uint16_t num_desc_per_page; 12736 struct dp_tx_desc_s *tx_desc = NULL; 12737 struct dp_tx_desc_pool_s *tx_desc_pool = NULL; 12738 12739 for (i = 0; i < MAX_TXDESC_POOLS; i++) { 12740 tx_desc_pool = &soc->tx_desc[i]; 12741 if (!(tx_desc_pool->pool_size) || 12742 IS_TX_DESC_POOL_STATUS_INACTIVE(tx_desc_pool) || 12743 !(tx_desc_pool->desc_pages.cacheable_pages)) 12744 continue; 12745 12746 num_desc = tx_desc_pool->pool_size; 12747 num_desc_per_page = 12748 tx_desc_pool->desc_pages.num_element_per_page; 12749 for (j = 0; j < num_desc; j++) { 12750 page_id = j / num_desc_per_page; 12751 offset = j % num_desc_per_page; 12752 12753 if (qdf_unlikely(!(tx_desc_pool-> 12754 desc_pages.cacheable_pages))) 12755 break; 12756 12757 tx_desc = dp_tx_desc_find(soc, i, page_id, offset, 12758 false); 12759 if (tx_desc->magic == DP_TX_MAGIC_PATTERN_FREE) { 12760 continue; 12761 } else if (tx_desc->magic == 12762 DP_TX_MAGIC_PATTERN_INUSE) { 12763 if (dp_tx_comp_delay_check(tx_desc)) { 12764 dp_err_rl("Tx completion not rcvd for id: %u", 12765 tx_desc->id); 12766 if (tx_desc->vdev_id == DP_INVALID_VDEV_ID) { 12767 tx_desc->flags |= DP_TX_DESC_FLAG_FLUSH; 12768 dp_err_rl("Freed tx_desc %u", 12769 tx_desc->id); 12770 dp_tx_comp_free_buf(soc, 12771 tx_desc, 12772 false); 12773 dp_tx_desc_release(soc, tx_desc, 12774 i); 12775 DP_STATS_INC(soc, 12776 tx.tx_comp_force_freed, 1); 12777 } 12778 } 12779 } else { 12780 dp_err_rl("tx desc %u corrupted, flags: 0x%x", 12781 tx_desc->id, tx_desc->flags); 12782 } 12783 } 12784 } 12785 } 12786 #else 12787 inline void dp_find_missing_tx_comp(struct dp_soc *soc) 12788 { 12789 } 12790 #endif 12791 12792 #ifdef FEATURE_RUNTIME_PM 12793 /** 12794 * dp_runtime_suspend() - ensure DP is ready to runtime suspend 12795 * @soc_hdl: Datapath soc handle 12796 * @pdev_id: id of data path pdev handle 12797 * 12798 * DP is ready to runtime suspend if there are no pending TX packets. 12799 * 12800 * Return: QDF_STATUS 12801 */ 12802 static QDF_STATUS dp_runtime_suspend(struct cdp_soc_t *soc_hdl, uint8_t pdev_id) 12803 { 12804 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 12805 struct dp_pdev *pdev; 12806 int32_t tx_pending; 12807 12808 pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); 12809 if (!pdev) { 12810 dp_err("pdev is NULL"); 12811 return QDF_STATUS_E_INVAL; 12812 } 12813 12814 /* Abort if there are any pending TX packets */ 12815 tx_pending = dp_get_tx_pending(dp_pdev_to_cdp_pdev(pdev)); 12816 if (tx_pending) { 12817 dp_info_rl("%pK: Abort suspend due to pending TX packets %d", 12818 soc, tx_pending); 12819 dp_find_missing_tx_comp(soc); 12820 /* perform a force flush if tx is pending */ 12821 soc->arch_ops.dp_update_ring_hptp(soc, true); 12822 qdf_atomic_set(&soc->tx_pending_rtpm, 0); 12823 12824 return QDF_STATUS_E_AGAIN; 12825 } 12826 12827 if (dp_runtime_get_refcount(soc)) { 12828 dp_init_info("refcount: %d", dp_runtime_get_refcount(soc)); 12829 12830 return QDF_STATUS_E_AGAIN; 12831 } 12832 12833 if (soc->intr_mode == DP_INTR_POLL) 12834 qdf_timer_stop(&soc->int_timer); 12835 12836 return QDF_STATUS_SUCCESS; 12837 } 12838 12839 #define DP_FLUSH_WAIT_CNT 10 12840 #define DP_RUNTIME_SUSPEND_WAIT_MS 10 12841 /** 12842 * dp_runtime_resume() - ensure DP is ready to runtime resume 12843 * @soc_hdl: Datapath soc handle 12844 * @pdev_id: id of data path pdev handle 12845 * 12846 * Resume DP for runtime PM. 12847 * 12848 * Return: QDF_STATUS 12849 */ 12850 static QDF_STATUS dp_runtime_resume(struct cdp_soc_t *soc_hdl, uint8_t pdev_id) 12851 { 12852 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 12853 int suspend_wait = 0; 12854 12855 if (soc->intr_mode == DP_INTR_POLL) 12856 qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS); 12857 12858 /* 12859 * Wait until dp runtime refcount becomes zero or time out, then flush 12860 * pending tx for runtime suspend. 12861 */ 12862 while (dp_runtime_get_refcount(soc) && 12863 suspend_wait < DP_FLUSH_WAIT_CNT) { 12864 qdf_sleep(DP_RUNTIME_SUSPEND_WAIT_MS); 12865 suspend_wait++; 12866 } 12867 12868 soc->arch_ops.dp_update_ring_hptp(soc, false); 12869 qdf_atomic_set(&soc->tx_pending_rtpm, 0); 12870 12871 return QDF_STATUS_SUCCESS; 12872 } 12873 #endif /* FEATURE_RUNTIME_PM */ 12874 12875 /** 12876 * dp_tx_get_success_ack_stats() - get tx success completion count 12877 * @soc_hdl: Datapath soc handle 12878 * @vdev_id: vdev identifier 12879 * 12880 * Return: tx success ack count 12881 */ 12882 static uint32_t dp_tx_get_success_ack_stats(struct cdp_soc_t *soc_hdl, 12883 uint8_t vdev_id) 12884 { 12885 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 12886 struct cdp_vdev_stats *vdev_stats = NULL; 12887 uint32_t tx_success; 12888 struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id, 12889 DP_MOD_ID_CDP); 12890 12891 if (!vdev) { 12892 dp_cdp_err("%pK: Invalid vdev id %d", soc, vdev_id); 12893 return 0; 12894 } 12895 12896 vdev_stats = qdf_mem_malloc_atomic(sizeof(struct cdp_vdev_stats)); 12897 if (!vdev_stats) { 12898 dp_cdp_err("%pK: DP alloc failure - unable to get alloc vdev stats", soc); 12899 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP); 12900 return 0; 12901 } 12902 12903 dp_aggregate_vdev_stats(vdev, vdev_stats, DP_XMIT_TOTAL); 12904 12905 tx_success = vdev_stats->tx.tx_success.num; 12906 qdf_mem_free(vdev_stats); 12907 12908 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP); 12909 return tx_success; 12910 } 12911 12912 #ifdef WLAN_SUPPORT_DATA_STALL 12913 /** 12914 * dp_register_data_stall_detect_cb() - register data stall callback 12915 * @soc_hdl: Datapath soc handle 12916 * @pdev_id: id of data path pdev handle 12917 * @data_stall_detect_callback: data stall callback function 12918 * 12919 * Return: QDF_STATUS Enumeration 12920 */ 12921 static 12922 QDF_STATUS dp_register_data_stall_detect_cb( 12923 struct cdp_soc_t *soc_hdl, uint8_t pdev_id, 12924 data_stall_detect_cb data_stall_detect_callback) 12925 { 12926 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 12927 struct dp_pdev *pdev; 12928 12929 pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); 12930 if (!pdev) { 12931 dp_err("pdev NULL!"); 12932 return QDF_STATUS_E_INVAL; 12933 } 12934 12935 pdev->data_stall_detect_callback = data_stall_detect_callback; 12936 return QDF_STATUS_SUCCESS; 12937 } 12938 12939 /** 12940 * dp_deregister_data_stall_detect_cb() - de-register data stall callback 12941 * @soc_hdl: Datapath soc handle 12942 * @pdev_id: id of data path pdev handle 12943 * @data_stall_detect_callback: data stall callback function 12944 * 12945 * Return: QDF_STATUS Enumeration 12946 */ 12947 static 12948 QDF_STATUS dp_deregister_data_stall_detect_cb( 12949 struct cdp_soc_t *soc_hdl, uint8_t pdev_id, 12950 data_stall_detect_cb data_stall_detect_callback) 12951 { 12952 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 12953 struct dp_pdev *pdev; 12954 12955 pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); 12956 if (!pdev) { 12957 dp_err("pdev NULL!"); 12958 return QDF_STATUS_E_INVAL; 12959 } 12960 12961 pdev->data_stall_detect_callback = NULL; 12962 return QDF_STATUS_SUCCESS; 12963 } 12964 12965 /** 12966 * dp_txrx_post_data_stall_event() - post data stall event 12967 * @soc_hdl: Datapath soc handle 12968 * @indicator: Module triggering data stall 12969 * @data_stall_type: data stall event type 12970 * @pdev_id: pdev id 12971 * @vdev_id_bitmap: vdev id bitmap 12972 * @recovery_type: data stall recovery type 12973 * 12974 * Return: None 12975 */ 12976 static void 12977 dp_txrx_post_data_stall_event(struct cdp_soc_t *soc_hdl, 12978 enum data_stall_log_event_indicator indicator, 12979 enum data_stall_log_event_type data_stall_type, 12980 uint32_t pdev_id, uint32_t vdev_id_bitmap, 12981 enum data_stall_log_recovery_type recovery_type) 12982 { 12983 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 12984 struct data_stall_event_info data_stall_info; 12985 struct dp_pdev *pdev; 12986 12987 pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); 12988 if (!pdev) { 12989 dp_err("pdev NULL!"); 12990 return; 12991 } 12992 12993 if (!pdev->data_stall_detect_callback) { 12994 dp_err("data stall cb not registered!"); 12995 return; 12996 } 12997 12998 dp_info("data_stall_type: %x pdev_id: %d", 12999 data_stall_type, pdev_id); 13000 13001 data_stall_info.indicator = indicator; 13002 data_stall_info.data_stall_type = data_stall_type; 13003 data_stall_info.vdev_id_bitmap = vdev_id_bitmap; 13004 data_stall_info.pdev_id = pdev_id; 13005 data_stall_info.recovery_type = recovery_type; 13006 13007 pdev->data_stall_detect_callback(&data_stall_info); 13008 } 13009 #endif /* WLAN_SUPPORT_DATA_STALL */ 13010 13011 #ifdef WLAN_FEATURE_STATS_EXT 13012 /** 13013 * dp_txrx_ext_stats_request() - request dp txrx extended stats request 13014 * @soc_hdl: soc handle 13015 * @pdev_id: pdev id 13016 * @req: stats request 13017 * 13018 * Return: QDF_STATUS 13019 */ 13020 static QDF_STATUS 13021 dp_txrx_ext_stats_request(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, 13022 struct cdp_txrx_ext_stats *req) 13023 { 13024 struct dp_soc *soc = (struct dp_soc *)soc_hdl; 13025 struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); 13026 int i = 0; 13027 int tcl_ring_full = 0; 13028 13029 if (!pdev) { 13030 dp_err("pdev is null"); 13031 return QDF_STATUS_E_INVAL; 13032 } 13033 13034 dp_aggregate_pdev_stats(pdev); 13035 13036 for(i = 0 ; i < MAX_TCL_DATA_RINGS; i++) 13037 tcl_ring_full += soc->stats.tx.tcl_ring_full[i]; 13038 13039 req->tx_msdu_enqueue = pdev->stats.tx_i.processed.num; 13040 req->tx_msdu_overflow = tcl_ring_full; 13041 /* Error rate at LMAC */ 13042 req->rx_mpdu_received = soc->ext_stats.rx_mpdu_received + 13043 pdev->stats.err.fw_reported_rxdma_error; 13044 /* only count error source from RXDMA */ 13045 req->rx_mpdu_error = pdev->stats.err.fw_reported_rxdma_error; 13046 13047 /* Error rate at above the MAC */ 13048 req->rx_mpdu_delivered = soc->ext_stats.rx_mpdu_received; 13049 req->rx_mpdu_missed = pdev->stats.err.reo_error; 13050 13051 dp_info("ext stats: tx_msdu_enq = %u, tx_msdu_overflow = %u, " 13052 "rx_mpdu_receive = %u, rx_mpdu_delivered = %u, " 13053 "rx_mpdu_missed = %u, rx_mpdu_error = %u", 13054 req->tx_msdu_enqueue, 13055 req->tx_msdu_overflow, 13056 req->rx_mpdu_received, 13057 req->rx_mpdu_delivered, 13058 req->rx_mpdu_missed, 13059 req->rx_mpdu_error); 13060 13061 return QDF_STATUS_SUCCESS; 13062 } 13063 13064 #endif /* WLAN_FEATURE_STATS_EXT */ 13065 13066 #ifdef WLAN_FEATURE_MARK_FIRST_WAKEUP_PACKET 13067 /** 13068 * dp_mark_first_wakeup_packet() - set flag to indicate that 13069 * fw is compatible for marking first packet after wow wakeup 13070 * @soc_hdl: Datapath soc handle 13071 * @pdev_id: id of data path pdev handle 13072 * @value: 1 for enabled/ 0 for disabled 13073 * 13074 * Return: None 13075 */ 13076 static void dp_mark_first_wakeup_packet(struct cdp_soc_t *soc_hdl, 13077 uint8_t pdev_id, uint8_t value) 13078 { 13079 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 13080 struct dp_pdev *pdev; 13081 13082 pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); 13083 if (!pdev) { 13084 dp_err("pdev is NULL"); 13085 return; 13086 } 13087 13088 pdev->is_first_wakeup_packet = value; 13089 } 13090 #endif 13091 13092 #ifdef WLAN_FEATURE_PEER_TXQ_FLUSH_CONF 13093 /** 13094 * dp_set_peer_txq_flush_config() - Set the peer txq flush configuration 13095 * @soc_hdl: Opaque handle to the DP soc object 13096 * @vdev_id: VDEV identifier 13097 * @mac: MAC address of the peer 13098 * @ac: access category mask 13099 * @tid: TID mask 13100 * @policy: Flush policy 13101 * 13102 * Return: 0 on success, errno on failure 13103 */ 13104 static int dp_set_peer_txq_flush_config(struct cdp_soc_t *soc_hdl, 13105 uint8_t vdev_id, uint8_t *mac, 13106 uint8_t ac, uint32_t tid, 13107 enum cdp_peer_txq_flush_policy policy) 13108 { 13109 struct dp_soc *soc; 13110 13111 if (!soc_hdl) { 13112 dp_err("soc is null"); 13113 return -EINVAL; 13114 } 13115 soc = cdp_soc_t_to_dp_soc(soc_hdl); 13116 return target_if_peer_txq_flush_config(soc->ctrl_psoc, vdev_id, 13117 mac, ac, tid, policy); 13118 } 13119 #endif 13120 13121 #ifdef CONNECTIVITY_PKTLOG 13122 /** 13123 * dp_register_packetdump_callback() - registers 13124 * tx data packet, tx mgmt. packet and rx data packet 13125 * dump callback handler. 13126 * 13127 * @soc_hdl: Datapath soc handle 13128 * @pdev_id: id of data path pdev handle 13129 * @dp_tx_packetdump_cb: tx packetdump cb 13130 * @dp_rx_packetdump_cb: rx packetdump cb 13131 * 13132 * This function is used to register tx data pkt, tx mgmt. 13133 * pkt and rx data pkt dump callback 13134 * 13135 * Return: None 13136 * 13137 */ 13138 static inline 13139 void dp_register_packetdump_callback(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, 13140 ol_txrx_pktdump_cb dp_tx_packetdump_cb, 13141 ol_txrx_pktdump_cb dp_rx_packetdump_cb) 13142 { 13143 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 13144 struct dp_pdev *pdev; 13145 13146 pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); 13147 if (!pdev) { 13148 dp_err("pdev is NULL!"); 13149 return; 13150 } 13151 13152 pdev->dp_tx_packetdump_cb = dp_tx_packetdump_cb; 13153 pdev->dp_rx_packetdump_cb = dp_rx_packetdump_cb; 13154 } 13155 13156 /** 13157 * dp_deregister_packetdump_callback() - deregidters 13158 * tx data packet, tx mgmt. packet and rx data packet 13159 * dump callback handler 13160 * @soc_hdl: Datapath soc handle 13161 * @pdev_id: id of data path pdev handle 13162 * 13163 * This function is used to deregidter tx data pkt., 13164 * tx mgmt. pkt and rx data pkt. dump callback 13165 * 13166 * Return: None 13167 * 13168 */ 13169 static inline 13170 void dp_deregister_packetdump_callback(struct cdp_soc_t *soc_hdl, 13171 uint8_t pdev_id) 13172 { 13173 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 13174 struct dp_pdev *pdev; 13175 13176 pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); 13177 if (!pdev) { 13178 dp_err("pdev is NULL!"); 13179 return; 13180 } 13181 13182 pdev->dp_tx_packetdump_cb = NULL; 13183 pdev->dp_rx_packetdump_cb = NULL; 13184 } 13185 #endif 13186 13187 #ifdef FEATURE_RX_LINKSPEED_ROAM_TRIGGER 13188 /** 13189 * dp_set_bus_vote_lvl_high() - Take a vote on bus bandwidth from dp 13190 * @soc_hdl: Datapath soc handle 13191 * @high: whether the bus bw is high or not 13192 * 13193 * Return: void 13194 */ 13195 static void 13196 dp_set_bus_vote_lvl_high(ol_txrx_soc_handle soc_hdl, bool high) 13197 { 13198 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 13199 13200 soc->high_throughput = high; 13201 } 13202 13203 /** 13204 * dp_get_bus_vote_lvl_high() - get bus bandwidth vote to dp 13205 * @soc_hdl: Datapath soc handle 13206 * 13207 * Return: bool 13208 */ 13209 static bool 13210 dp_get_bus_vote_lvl_high(ol_txrx_soc_handle soc_hdl) 13211 { 13212 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 13213 13214 return soc->high_throughput; 13215 } 13216 #endif 13217 13218 #ifdef DP_PEER_EXTENDED_API 13219 static struct cdp_misc_ops dp_ops_misc = { 13220 #ifdef FEATURE_WLAN_TDLS 13221 .tx_non_std = dp_tx_non_std, 13222 #endif /* FEATURE_WLAN_TDLS */ 13223 .get_opmode = dp_get_opmode, 13224 #ifdef FEATURE_RUNTIME_PM 13225 .runtime_suspend = dp_runtime_suspend, 13226 .runtime_resume = dp_runtime_resume, 13227 #endif /* FEATURE_RUNTIME_PM */ 13228 .get_num_rx_contexts = dp_get_num_rx_contexts, 13229 .get_tx_ack_stats = dp_tx_get_success_ack_stats, 13230 #ifdef WLAN_SUPPORT_DATA_STALL 13231 .txrx_data_stall_cb_register = dp_register_data_stall_detect_cb, 13232 .txrx_data_stall_cb_deregister = dp_deregister_data_stall_detect_cb, 13233 .txrx_post_data_stall_event = dp_txrx_post_data_stall_event, 13234 #endif 13235 13236 #ifdef WLAN_FEATURE_STATS_EXT 13237 .txrx_ext_stats_request = dp_txrx_ext_stats_request, 13238 #ifndef WLAN_SOFTUMAC_SUPPORT 13239 .request_rx_hw_stats = dp_request_rx_hw_stats, 13240 .reset_rx_hw_ext_stats = dp_reset_rx_hw_ext_stats, 13241 #endif 13242 #endif /* WLAN_FEATURE_STATS_EXT */ 13243 .vdev_inform_ll_conn = dp_vdev_inform_ll_conn, 13244 #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR 13245 .set_swlm_enable = dp_soc_set_swlm_enable, 13246 .is_swlm_enabled = dp_soc_is_swlm_enabled, 13247 #endif 13248 .display_txrx_hw_info = dp_display_srng_info, 13249 #ifndef WLAN_SOFTUMAC_SUPPORT 13250 .get_tx_rings_grp_bitmap = dp_get_tx_rings_grp_bitmap, 13251 #endif 13252 #ifdef WLAN_FEATURE_MARK_FIRST_WAKEUP_PACKET 13253 .mark_first_wakeup_packet = dp_mark_first_wakeup_packet, 13254 #endif 13255 #ifdef WLAN_FEATURE_PEER_TXQ_FLUSH_CONF 13256 .set_peer_txq_flush_config = dp_set_peer_txq_flush_config, 13257 #endif 13258 #ifdef CONNECTIVITY_PKTLOG 13259 .register_pktdump_cb = dp_register_packetdump_callback, 13260 .unregister_pktdump_cb = dp_deregister_packetdump_callback, 13261 #endif 13262 #ifdef FEATURE_RX_LINKSPEED_ROAM_TRIGGER 13263 .set_bus_vote_lvl_high = dp_set_bus_vote_lvl_high, 13264 .get_bus_vote_lvl_high = dp_get_bus_vote_lvl_high, 13265 #endif 13266 #ifdef DP_TX_PACKET_INSPECT_FOR_ILP 13267 .evaluate_update_tx_ilp_cfg = dp_evaluate_update_tx_ilp_config, 13268 #endif 13269 }; 13270 #endif 13271 13272 #ifdef DP_FLOW_CTL 13273 static struct cdp_flowctl_ops dp_ops_flowctl = { 13274 /* WIFI 3.0 DP implement as required. */ 13275 #ifdef QCA_LL_TX_FLOW_CONTROL_V2 13276 #ifndef WLAN_SOFTUMAC_SUPPORT 13277 .flow_pool_map_handler = dp_tx_flow_pool_map, 13278 .flow_pool_unmap_handler = dp_tx_flow_pool_unmap, 13279 #endif /*WLAN_SOFTUMAC_SUPPORT */ 13280 .register_pause_cb = dp_txrx_register_pause_cb, 13281 .dump_flow_pool_info = dp_tx_dump_flow_pool_info, 13282 .tx_desc_thresh_reached = dp_tx_desc_thresh_reached, 13283 #endif /* QCA_LL_TX_FLOW_CONTROL_V2 */ 13284 }; 13285 13286 static struct cdp_lflowctl_ops dp_ops_l_flowctl = { 13287 /* WIFI 3.0 DP NOT IMPLEMENTED YET */ 13288 }; 13289 #endif 13290 13291 #ifdef IPA_OFFLOAD 13292 static struct cdp_ipa_ops dp_ops_ipa = { 13293 .ipa_get_resource = dp_ipa_get_resource, 13294 .ipa_set_doorbell_paddr = dp_ipa_set_doorbell_paddr, 13295 .ipa_iounmap_doorbell_vaddr = dp_ipa_iounmap_doorbell_vaddr, 13296 .ipa_op_response = dp_ipa_op_response, 13297 .ipa_register_op_cb = dp_ipa_register_op_cb, 13298 .ipa_deregister_op_cb = dp_ipa_deregister_op_cb, 13299 .ipa_get_stat = dp_ipa_get_stat, 13300 .ipa_tx_data_frame = dp_tx_send_ipa_data_frame, 13301 .ipa_enable_autonomy = dp_ipa_enable_autonomy, 13302 .ipa_disable_autonomy = dp_ipa_disable_autonomy, 13303 .ipa_setup = dp_ipa_setup, 13304 .ipa_cleanup = dp_ipa_cleanup, 13305 .ipa_setup_iface = dp_ipa_setup_iface, 13306 .ipa_cleanup_iface = dp_ipa_cleanup_iface, 13307 .ipa_enable_pipes = dp_ipa_enable_pipes, 13308 .ipa_disable_pipes = dp_ipa_disable_pipes, 13309 .ipa_set_perf_level = dp_ipa_set_perf_level, 13310 .ipa_rx_intrabss_fwd = dp_ipa_rx_intrabss_fwd, 13311 .ipa_tx_buf_smmu_mapping = dp_ipa_tx_buf_smmu_mapping, 13312 .ipa_tx_buf_smmu_unmapping = dp_ipa_tx_buf_smmu_unmapping, 13313 .ipa_rx_buf_smmu_pool_mapping = dp_ipa_rx_buf_pool_smmu_mapping, 13314 .ipa_set_smmu_mapped = dp_ipa_set_smmu_mapped, 13315 .ipa_get_smmu_mapped = dp_ipa_get_smmu_mapped, 13316 #ifdef QCA_SUPPORT_WDS_EXTENDED 13317 .ipa_rx_wdsext_iface = dp_ipa_rx_wdsext_iface, 13318 #endif 13319 #ifdef QCA_ENHANCED_STATS_SUPPORT 13320 .ipa_update_peer_rx_stats = dp_ipa_update_peer_rx_stats, 13321 #endif 13322 #ifdef IPA_OPT_WIFI_DP 13323 .ipa_rx_super_rule_setup = dp_ipa_rx_super_rule_setup, 13324 .ipa_pcie_link_up = dp_ipa_pcie_link_up, 13325 .ipa_pcie_link_down = dp_ipa_pcie_link_down, 13326 #endif 13327 #ifdef IPA_WDS_EASYMESH_FEATURE 13328 .ipa_ast_create = dp_ipa_ast_create, 13329 #endif 13330 .ipa_get_wdi_version = dp_ipa_get_wdi_version, 13331 }; 13332 #endif 13333 13334 #ifdef DP_POWER_SAVE 13335 static QDF_STATUS dp_bus_suspend(struct cdp_soc_t *soc_hdl, uint8_t pdev_id) 13336 { 13337 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 13338 struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); 13339 int timeout = SUSPEND_DRAIN_WAIT; 13340 int drain_wait_delay = 50; /* 50 ms */ 13341 int32_t tx_pending; 13342 13343 if (qdf_unlikely(!pdev)) { 13344 dp_err("pdev is NULL"); 13345 return QDF_STATUS_E_INVAL; 13346 } 13347 13348 /* Abort if there are any pending TX packets */ 13349 while ((tx_pending = dp_get_tx_pending((struct cdp_pdev *)pdev))) { 13350 qdf_sleep(drain_wait_delay); 13351 if (timeout <= 0) { 13352 dp_info("TX frames are pending %d, abort suspend", 13353 tx_pending); 13354 dp_find_missing_tx_comp(soc); 13355 return QDF_STATUS_E_TIMEOUT; 13356 } 13357 timeout = timeout - drain_wait_delay; 13358 } 13359 13360 if (soc->intr_mode == DP_INTR_POLL) 13361 qdf_timer_stop(&soc->int_timer); 13362 13363 /* Stop monitor reap timer and reap any pending frames in ring */ 13364 dp_monitor_reap_timer_suspend(soc); 13365 13366 return QDF_STATUS_SUCCESS; 13367 } 13368 13369 static QDF_STATUS dp_bus_resume(struct cdp_soc_t *soc_hdl, uint8_t pdev_id) 13370 { 13371 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 13372 struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); 13373 13374 if (qdf_unlikely(!pdev)) { 13375 dp_err("pdev is NULL"); 13376 return QDF_STATUS_E_INVAL; 13377 } 13378 13379 if (soc->intr_mode == DP_INTR_POLL) 13380 qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS); 13381 13382 /* Start monitor reap timer */ 13383 dp_monitor_reap_timer_start(soc, CDP_MON_REAP_SOURCE_ANY); 13384 13385 soc->arch_ops.dp_update_ring_hptp(soc, false); 13386 13387 return QDF_STATUS_SUCCESS; 13388 } 13389 13390 /** 13391 * dp_process_wow_ack_rsp() - process wow ack response 13392 * @soc_hdl: datapath soc handle 13393 * @pdev_id: data path pdev handle id 13394 * 13395 * Return: none 13396 */ 13397 static void dp_process_wow_ack_rsp(struct cdp_soc_t *soc_hdl, uint8_t pdev_id) 13398 { 13399 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 13400 struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); 13401 13402 if (qdf_unlikely(!pdev)) { 13403 dp_err("pdev is NULL"); 13404 return; 13405 } 13406 13407 /* 13408 * As part of wow enable FW disables the mon status ring and in wow ack 13409 * response from FW reap mon status ring to make sure no packets pending 13410 * in the ring. 13411 */ 13412 dp_monitor_reap_timer_suspend(soc); 13413 } 13414 13415 /** 13416 * dp_process_target_suspend_req() - process target suspend request 13417 * @soc_hdl: datapath soc handle 13418 * @pdev_id: data path pdev handle id 13419 * 13420 * Return: none 13421 */ 13422 static void dp_process_target_suspend_req(struct cdp_soc_t *soc_hdl, 13423 uint8_t pdev_id) 13424 { 13425 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 13426 struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); 13427 13428 if (qdf_unlikely(!pdev)) { 13429 dp_err("pdev is NULL"); 13430 return; 13431 } 13432 13433 /* Stop monitor reap timer and reap any pending frames in ring */ 13434 dp_monitor_reap_timer_suspend(soc); 13435 } 13436 13437 static struct cdp_bus_ops dp_ops_bus = { 13438 .bus_suspend = dp_bus_suspend, 13439 .bus_resume = dp_bus_resume, 13440 .process_wow_ack_rsp = dp_process_wow_ack_rsp, 13441 .process_target_suspend_req = dp_process_target_suspend_req 13442 }; 13443 #endif 13444 13445 #ifdef DP_FLOW_CTL 13446 static struct cdp_throttle_ops dp_ops_throttle = { 13447 /* WIFI 3.0 DP NOT IMPLEMENTED YET */ 13448 }; 13449 13450 static struct cdp_cfg_ops dp_ops_cfg = { 13451 /* WIFI 3.0 DP NOT IMPLEMENTED YET */ 13452 }; 13453 #endif 13454 13455 #ifdef DP_PEER_EXTENDED_API 13456 static struct cdp_ocb_ops dp_ops_ocb = { 13457 /* WIFI 3.0 DP NOT IMPLEMENTED YET */ 13458 }; 13459 13460 static struct cdp_mob_stats_ops dp_ops_mob_stats = { 13461 .clear_stats = dp_txrx_clear_dump_stats, 13462 }; 13463 13464 static struct cdp_peer_ops dp_ops_peer = { 13465 .register_peer = dp_register_peer, 13466 .clear_peer = dp_clear_peer, 13467 .find_peer_exist = dp_find_peer_exist, 13468 .find_peer_exist_on_vdev = dp_find_peer_exist_on_vdev, 13469 .find_peer_exist_on_other_vdev = dp_find_peer_exist_on_other_vdev, 13470 .peer_state_update = dp_peer_state_update, 13471 .get_vdevid = dp_get_vdevid, 13472 .get_vdev_by_peer_addr = dp_get_vdev_by_peer_addr, 13473 .peer_get_peer_mac_addr = dp_peer_get_peer_mac_addr, 13474 .get_peer_state = dp_get_peer_state, 13475 .peer_flush_frags = dp_peer_flush_frags, 13476 .set_peer_as_tdls_peer = dp_set_peer_as_tdls_peer, 13477 }; 13478 #endif 13479 13480 static void dp_soc_txrx_ops_attach(struct dp_soc *soc) 13481 { 13482 soc->cdp_soc.ops->cmn_drv_ops = &dp_ops_cmn; 13483 soc->cdp_soc.ops->ctrl_ops = &dp_ops_ctrl; 13484 soc->cdp_soc.ops->me_ops = &dp_ops_me; 13485 soc->cdp_soc.ops->host_stats_ops = &dp_ops_host_stats; 13486 soc->cdp_soc.ops->wds_ops = &dp_ops_wds; 13487 soc->cdp_soc.ops->raw_ops = &dp_ops_raw; 13488 #ifdef PEER_FLOW_CONTROL 13489 soc->cdp_soc.ops->pflow_ops = &dp_ops_pflow; 13490 #endif /* PEER_FLOW_CONTROL */ 13491 #ifdef DP_PEER_EXTENDED_API 13492 soc->cdp_soc.ops->misc_ops = &dp_ops_misc; 13493 soc->cdp_soc.ops->ocb_ops = &dp_ops_ocb; 13494 soc->cdp_soc.ops->peer_ops = &dp_ops_peer; 13495 soc->cdp_soc.ops->mob_stats_ops = &dp_ops_mob_stats; 13496 #endif 13497 #ifdef DP_FLOW_CTL 13498 soc->cdp_soc.ops->cfg_ops = &dp_ops_cfg; 13499 soc->cdp_soc.ops->flowctl_ops = &dp_ops_flowctl; 13500 soc->cdp_soc.ops->l_flowctl_ops = &dp_ops_l_flowctl; 13501 soc->cdp_soc.ops->throttle_ops = &dp_ops_throttle; 13502 #endif 13503 #ifdef IPA_OFFLOAD 13504 soc->cdp_soc.ops->ipa_ops = &dp_ops_ipa; 13505 #endif 13506 #ifdef DP_POWER_SAVE 13507 soc->cdp_soc.ops->bus_ops = &dp_ops_bus; 13508 #endif 13509 #if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE) 13510 soc->cdp_soc.ops->cfr_ops = &dp_ops_cfr; 13511 #endif 13512 #ifdef WLAN_SUPPORT_MSCS 13513 soc->cdp_soc.ops->mscs_ops = &dp_ops_mscs; 13514 #endif 13515 #ifdef WLAN_SUPPORT_MESH_LATENCY 13516 soc->cdp_soc.ops->mesh_latency_ops = &dp_ops_mesh_latency; 13517 #endif 13518 #ifdef CONFIG_SAWF_DEF_QUEUES 13519 soc->cdp_soc.ops->sawf_ops = &dp_ops_sawf; 13520 #endif 13521 #ifdef WLAN_SUPPORT_SCS 13522 soc->cdp_soc.ops->scs_ops = &dp_ops_scs; 13523 #endif 13524 #ifdef WLAN_SUPPORT_RX_FLOW_TAG 13525 soc->cdp_soc.ops->fse_ops = &dp_ops_fse; 13526 #endif 13527 }; 13528 13529 #if defined(QCA_WIFI_QCA8074) || defined(QCA_WIFI_QCA6018) || \ 13530 defined(QCA_WIFI_QCA5018) || defined(QCA_WIFI_QCA9574) || \ 13531 defined(QCA_WIFI_QCA5332) 13532 13533 /** 13534 * dp_soc_attach_wifi3() - Attach txrx SOC 13535 * @ctrl_psoc: Opaque SOC handle from control plane 13536 * @params: SOC attach params 13537 * 13538 * Return: DP SOC handle on success, NULL on failure 13539 */ 13540 struct cdp_soc_t * 13541 dp_soc_attach_wifi3(struct cdp_ctrl_objmgr_psoc *ctrl_psoc, 13542 struct cdp_soc_attach_params *params) 13543 { 13544 struct dp_soc *dp_soc = NULL; 13545 13546 dp_soc = dp_soc_attach(ctrl_psoc, params); 13547 13548 return dp_soc_to_cdp_soc_t(dp_soc); 13549 } 13550 13551 static inline void dp_soc_set_def_pdev(struct dp_soc *soc) 13552 { 13553 int lmac_id; 13554 13555 for (lmac_id = 0; lmac_id < MAX_NUM_LMAC_HW; lmac_id++) { 13556 /*Set default host PDEV ID for lmac_id*/ 13557 wlan_cfg_set_pdev_idx(soc->wlan_cfg_ctx, 13558 INVALID_PDEV_ID, lmac_id); 13559 } 13560 } 13561 13562 static void dp_soc_unset_qref_debug_list(struct dp_soc *soc) 13563 { 13564 uint32_t max_list_size = soc->wlan_cfg_ctx->qref_control_size; 13565 13566 if (max_list_size == 0) 13567 return; 13568 13569 qdf_mem_free(soc->list_shared_qaddr_del); 13570 qdf_mem_free(soc->reo_write_list); 13571 qdf_mem_free(soc->list_qdesc_addr_free); 13572 qdf_mem_free(soc->list_qdesc_addr_alloc); 13573 } 13574 13575 static void dp_soc_set_qref_debug_list(struct dp_soc *soc) 13576 { 13577 uint32_t max_list_size = soc->wlan_cfg_ctx->qref_control_size; 13578 13579 if (max_list_size == 0) 13580 return; 13581 13582 soc->list_shared_qaddr_del = 13583 (struct test_qaddr_del *) 13584 qdf_mem_malloc(sizeof(struct test_qaddr_del) * 13585 max_list_size); 13586 soc->reo_write_list = 13587 (struct test_qaddr_del *) 13588 qdf_mem_malloc(sizeof(struct test_qaddr_del) * 13589 max_list_size); 13590 soc->list_qdesc_addr_free = 13591 (struct test_mem_free *) 13592 qdf_mem_malloc(sizeof(struct test_mem_free) * 13593 max_list_size); 13594 soc->list_qdesc_addr_alloc = 13595 (struct test_mem_free *) 13596 qdf_mem_malloc(sizeof(struct test_mem_free) * 13597 max_list_size); 13598 } 13599 13600 static uint32_t 13601 dp_get_link_desc_id_start(uint16_t arch_id) 13602 { 13603 switch (arch_id) { 13604 case CDP_ARCH_TYPE_LI: 13605 case CDP_ARCH_TYPE_RH: 13606 return LINK_DESC_ID_START_21_BITS_COOKIE; 13607 case CDP_ARCH_TYPE_BE: 13608 return LINK_DESC_ID_START_20_BITS_COOKIE; 13609 default: 13610 dp_err("unknown arch_id 0x%x", arch_id); 13611 QDF_BUG(0); 13612 return LINK_DESC_ID_START_21_BITS_COOKIE; 13613 } 13614 } 13615 13616 #ifdef DP_TX_PACKET_INSPECT_FOR_ILP 13617 static inline 13618 void dp_soc_init_tx_ilp(struct dp_soc *soc) 13619 { 13620 soc->tx_ilp_enable = false; 13621 } 13622 #else 13623 static inline 13624 void dp_soc_init_tx_ilp(struct dp_soc *soc) 13625 { 13626 } 13627 #endif 13628 13629 /** 13630 * dp_soc_attach() - Attach txrx SOC 13631 * @ctrl_psoc: Opaque SOC handle from control plane 13632 * @params: SOC attach params 13633 * 13634 * Return: DP SOC handle on success, NULL on failure 13635 */ 13636 static struct dp_soc * 13637 dp_soc_attach(struct cdp_ctrl_objmgr_psoc *ctrl_psoc, 13638 struct cdp_soc_attach_params *params) 13639 { 13640 struct dp_soc *soc = NULL; 13641 uint16_t arch_id; 13642 struct hif_opaque_softc *hif_handle = params->hif_handle; 13643 qdf_device_t qdf_osdev = params->qdf_osdev; 13644 struct ol_if_ops *ol_ops = params->ol_ops; 13645 uint16_t device_id = params->device_id; 13646 13647 if (!hif_handle) { 13648 dp_err("HIF handle is NULL"); 13649 goto fail0; 13650 } 13651 arch_id = cdp_get_arch_type_from_devid(device_id); 13652 soc = qdf_mem_common_alloc(dp_get_soc_context_size(device_id)); 13653 if (!soc) { 13654 dp_err("DP SOC memory allocation failed"); 13655 goto fail0; 13656 } 13657 13658 dp_info("soc memory allocated %pK", soc); 13659 soc->hif_handle = hif_handle; 13660 soc->hal_soc = hif_get_hal_handle(soc->hif_handle); 13661 if (!soc->hal_soc) 13662 goto fail1; 13663 13664 hif_get_cmem_info(soc->hif_handle, 13665 &soc->cmem_base, 13666 &soc->cmem_total_size); 13667 soc->cmem_avail_size = soc->cmem_total_size; 13668 soc->device_id = device_id; 13669 soc->cdp_soc.ops = 13670 (struct cdp_ops *)qdf_mem_malloc(sizeof(struct cdp_ops)); 13671 if (!soc->cdp_soc.ops) 13672 goto fail1; 13673 13674 dp_soc_txrx_ops_attach(soc); 13675 soc->cdp_soc.ol_ops = ol_ops; 13676 soc->ctrl_psoc = ctrl_psoc; 13677 soc->osdev = qdf_osdev; 13678 soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_MAPS; 13679 dp_soc_init_tx_ilp(soc); 13680 hal_rx_get_tlv_size(soc->hal_soc, &soc->rx_pkt_tlv_size, 13681 &soc->rx_mon_pkt_tlv_size); 13682 soc->idle_link_bm_id = hal_get_idle_link_bm_id(soc->hal_soc, 13683 params->mlo_chip_id); 13684 soc->features.dmac_cmn_src_rxbuf_ring_enabled = 13685 hal_dmac_cmn_src_rxbuf_ring_get(soc->hal_soc); 13686 soc->arch_id = arch_id; 13687 soc->link_desc_id_start = 13688 dp_get_link_desc_id_start(soc->arch_id); 13689 dp_configure_arch_ops(soc); 13690 13691 /* Reset wbm sg list and flags */ 13692 dp_rx_wbm_sg_list_reset(soc); 13693 13694 dp_soc_cfg_history_attach(soc); 13695 dp_soc_tx_hw_desc_history_attach(soc); 13696 dp_soc_rx_history_attach(soc); 13697 dp_soc_mon_status_ring_history_attach(soc); 13698 dp_soc_tx_history_attach(soc); 13699 dp_soc_msdu_done_fail_desc_list_attach(soc); 13700 dp_soc_msdu_done_fail_history_attach(soc); 13701 wlan_set_srng_cfg(&soc->wlan_srng_cfg); 13702 soc->wlan_cfg_ctx = wlan_cfg_soc_attach(soc->ctrl_psoc); 13703 if (!soc->wlan_cfg_ctx) { 13704 dp_err("wlan_cfg_ctx failed"); 13705 goto fail2; 13706 } 13707 13708 qdf_ssr_driver_dump_register_region("wlan_cfg_ctx", soc->wlan_cfg_ctx, 13709 sizeof(*soc->wlan_cfg_ctx)); 13710 13711 /*sync DP soc cfg items with profile support after cfg_soc_attach*/ 13712 wlan_dp_soc_cfg_sync_profile((struct cdp_soc_t *)soc); 13713 13714 soc->arch_ops.soc_cfg_attach(soc); 13715 13716 qdf_ssr_driver_dump_register_region("tcl_wbm_map_array", 13717 &soc->wlan_cfg_ctx->tcl_wbm_map_array, 13718 sizeof(struct wlan_cfg_tcl_wbm_ring_num_map)); 13719 13720 if (dp_hw_link_desc_pool_banks_alloc(soc, WLAN_INVALID_PDEV_ID)) { 13721 dp_err("failed to allocate link desc pool banks"); 13722 goto fail3; 13723 } 13724 13725 if (dp_hw_link_desc_ring_alloc(soc)) { 13726 dp_err("failed to allocate link_desc_ring"); 13727 goto fail4; 13728 } 13729 13730 if (!QDF_IS_STATUS_SUCCESS(soc->arch_ops.txrx_soc_attach(soc, 13731 params))) { 13732 dp_err("unable to do target specific attach"); 13733 goto fail5; 13734 } 13735 13736 if (dp_soc_srng_alloc(soc)) { 13737 dp_err("failed to allocate soc srng rings"); 13738 goto fail6; 13739 } 13740 13741 if (dp_soc_tx_desc_sw_pools_alloc(soc)) { 13742 dp_err("dp_soc_tx_desc_sw_pools_alloc failed"); 13743 goto fail7; 13744 } 13745 13746 if (!dp_monitor_modularized_enable()) { 13747 if (dp_mon_soc_attach_wrapper(soc)) { 13748 dp_err("failed to attach monitor"); 13749 goto fail8; 13750 } 13751 } 13752 13753 if (hal_reo_shared_qaddr_setup((hal_soc_handle_t)soc->hal_soc, 13754 &soc->reo_qref) 13755 != QDF_STATUS_SUCCESS) { 13756 dp_err("unable to setup reo shared qaddr"); 13757 goto fail9; 13758 } 13759 13760 if (dp_sysfs_initialize_stats(soc) != QDF_STATUS_SUCCESS) { 13761 dp_err("failed to initialize dp stats sysfs file"); 13762 dp_sysfs_deinitialize_stats(soc); 13763 } 13764 13765 dp_soc_swlm_attach(soc); 13766 dp_soc_set_interrupt_mode(soc); 13767 dp_soc_set_def_pdev(soc); 13768 dp_soc_set_qref_debug_list(soc); 13769 qdf_ssr_driver_dump_register_region("dp_soc", soc, sizeof(*soc)); 13770 qdf_nbuf_ssr_register_region(); 13771 13772 dp_info("Mem stats: DMA = %u HEAP = %u SKB = %u", 13773 qdf_dma_mem_stats_read(), 13774 qdf_heap_mem_stats_read(), 13775 qdf_skb_total_mem_stats_read()); 13776 13777 return soc; 13778 fail9: 13779 if (!dp_monitor_modularized_enable()) 13780 dp_mon_soc_detach_wrapper(soc); 13781 fail8: 13782 dp_soc_tx_desc_sw_pools_free(soc); 13783 fail7: 13784 dp_soc_srng_free(soc); 13785 fail6: 13786 soc->arch_ops.txrx_soc_detach(soc); 13787 fail5: 13788 dp_hw_link_desc_ring_free(soc); 13789 fail4: 13790 dp_hw_link_desc_pool_banks_free(soc, WLAN_INVALID_PDEV_ID); 13791 fail3: 13792 wlan_cfg_soc_detach(soc->wlan_cfg_ctx); 13793 fail2: 13794 dp_soc_msdu_done_fail_history_detach(soc); 13795 qdf_mem_free(soc->cdp_soc.ops); 13796 fail1: 13797 qdf_mem_common_free(soc); 13798 fail0: 13799 return NULL; 13800 } 13801 13802 void *dp_soc_init_wifi3(struct cdp_soc_t *cdp_soc, 13803 struct cdp_ctrl_objmgr_psoc *ctrl_psoc, 13804 struct hif_opaque_softc *hif_handle, 13805 HTC_HANDLE htc_handle, qdf_device_t qdf_osdev, 13806 struct ol_if_ops *ol_ops, uint16_t device_id) 13807 { 13808 struct dp_soc *soc = (struct dp_soc *)cdp_soc; 13809 13810 return soc->arch_ops.txrx_soc_init(soc, htc_handle, hif_handle); 13811 } 13812 13813 #endif 13814 13815 void *dp_get_pdev_for_mac_id(struct dp_soc *soc, uint32_t mac_id) 13816 { 13817 if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx)) 13818 return (mac_id < MAX_PDEV_CNT) ? soc->pdev_list[mac_id] : NULL; 13819 13820 /* Typically for MCL as there only 1 PDEV*/ 13821 return soc->pdev_list[0]; 13822 } 13823 13824 void dp_update_num_mac_rings_for_dbs(struct dp_soc *soc, 13825 int *max_mac_rings) 13826 { 13827 bool dbs_enable = false; 13828 13829 if (soc->cdp_soc.ol_ops->is_hw_dbs_capable) 13830 dbs_enable = soc->cdp_soc.ol_ops-> 13831 is_hw_dbs_capable((void *)soc->ctrl_psoc); 13832 13833 *max_mac_rings = dbs_enable ? (*max_mac_rings) : 1; 13834 dp_info("dbs_enable %d, max_mac_rings %d", 13835 dbs_enable, *max_mac_rings); 13836 } 13837 13838 qdf_export_symbol(dp_update_num_mac_rings_for_dbs); 13839 13840 #if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE) 13841 /** 13842 * dp_get_cfr_rcc() - get cfr rcc config 13843 * @soc_hdl: Datapath soc handle 13844 * @pdev_id: id of objmgr pdev 13845 * 13846 * Return: true/false based on cfr mode setting 13847 */ 13848 static 13849 bool dp_get_cfr_rcc(struct cdp_soc_t *soc_hdl, uint8_t pdev_id) 13850 { 13851 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 13852 struct dp_pdev *pdev = NULL; 13853 13854 pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); 13855 if (!pdev) { 13856 dp_err("pdev is NULL"); 13857 return false; 13858 } 13859 13860 return pdev->cfr_rcc_mode; 13861 } 13862 13863 /** 13864 * dp_set_cfr_rcc() - enable/disable cfr rcc config 13865 * @soc_hdl: Datapath soc handle 13866 * @pdev_id: id of objmgr pdev 13867 * @enable: Enable/Disable cfr rcc mode 13868 * 13869 * Return: none 13870 */ 13871 static 13872 void dp_set_cfr_rcc(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, bool enable) 13873 { 13874 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 13875 struct dp_pdev *pdev = NULL; 13876 13877 pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); 13878 if (!pdev) { 13879 dp_err("pdev is NULL"); 13880 return; 13881 } 13882 13883 pdev->cfr_rcc_mode = enable; 13884 } 13885 13886 /** 13887 * dp_get_cfr_dbg_stats - Get the debug statistics for CFR 13888 * @soc_hdl: Datapath soc handle 13889 * @pdev_id: id of data path pdev handle 13890 * @cfr_rcc_stats: CFR RCC debug statistics buffer 13891 * 13892 * Return: none 13893 */ 13894 static inline void 13895 dp_get_cfr_dbg_stats(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, 13896 struct cdp_cfr_rcc_stats *cfr_rcc_stats) 13897 { 13898 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 13899 struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); 13900 13901 if (!pdev) { 13902 dp_err("pdev is NULL"); 13903 return; 13904 } 13905 13906 qdf_mem_copy(cfr_rcc_stats, &pdev->stats.rcc, 13907 sizeof(struct cdp_cfr_rcc_stats)); 13908 } 13909 13910 /** 13911 * dp_clear_cfr_dbg_stats - Clear debug statistics for CFR 13912 * @soc_hdl: Datapath soc handle 13913 * @pdev_id: id of data path pdev handle 13914 * 13915 * Return: none 13916 */ 13917 static void dp_clear_cfr_dbg_stats(struct cdp_soc_t *soc_hdl, 13918 uint8_t pdev_id) 13919 { 13920 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 13921 struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); 13922 13923 if (!pdev) { 13924 dp_err("dp pdev is NULL"); 13925 return; 13926 } 13927 13928 qdf_mem_zero(&pdev->stats.rcc, sizeof(pdev->stats.rcc)); 13929 } 13930 #endif 13931 13932 /** 13933 * dp_bucket_index() - Return index from array 13934 * 13935 * @delay: delay measured 13936 * @array: array used to index corresponding delay 13937 * @delay_in_us: flag to indicate whether the delay in ms or us 13938 * 13939 * Return: index 13940 */ 13941 static uint8_t 13942 dp_bucket_index(uint32_t delay, uint16_t *array, bool delay_in_us) 13943 { 13944 uint8_t i = CDP_DELAY_BUCKET_0; 13945 uint32_t thr_low, thr_high; 13946 13947 for (; i < CDP_DELAY_BUCKET_MAX - 1; i++) { 13948 thr_low = array[i]; 13949 thr_high = array[i + 1]; 13950 13951 if (delay_in_us) { 13952 thr_low = thr_low * USEC_PER_MSEC; 13953 thr_high = thr_high * USEC_PER_MSEC; 13954 } 13955 if (delay >= thr_low && delay <= thr_high) 13956 return i; 13957 } 13958 return (CDP_DELAY_BUCKET_MAX - 1); 13959 } 13960 13961 #ifdef HW_TX_DELAY_STATS_ENABLE 13962 /* 13963 * cdp_fw_to_hw_delay_range 13964 * Fw to hw delay ranges in milliseconds 13965 */ 13966 static uint16_t cdp_fw_to_hw_delay[CDP_DELAY_BUCKET_MAX] = { 13967 0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 250, 500}; 13968 #else 13969 static uint16_t cdp_fw_to_hw_delay[CDP_DELAY_BUCKET_MAX] = { 13970 0, 2, 4, 6, 8, 10, 20, 30, 40, 50, 100, 250, 500}; 13971 #endif 13972 13973 /* 13974 * cdp_sw_enq_delay_range 13975 * Software enqueue delay ranges in milliseconds 13976 */ 13977 static uint16_t cdp_sw_enq_delay[CDP_DELAY_BUCKET_MAX] = { 13978 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12}; 13979 13980 /* 13981 * cdp_intfrm_delay_range 13982 * Interframe delay ranges in milliseconds 13983 */ 13984 static uint16_t cdp_intfrm_delay[CDP_DELAY_BUCKET_MAX] = { 13985 0, 5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60}; 13986 13987 /** 13988 * dp_fill_delay_buckets() - Fill delay statistics bucket for each 13989 * type of delay 13990 * @tstats: tid tx stats 13991 * @rstats: tid rx stats 13992 * @delay: delay in ms 13993 * @tid: tid value 13994 * @mode: type of tx delay mode 13995 * @ring_id: ring number 13996 * @delay_in_us: flag to indicate whether the delay in ms or us 13997 * 13998 * Return: pointer to cdp_delay_stats structure 13999 */ 14000 static struct cdp_delay_stats * 14001 dp_fill_delay_buckets(struct cdp_tid_tx_stats *tstats, 14002 struct cdp_tid_rx_stats *rstats, uint32_t delay, 14003 uint8_t tid, uint8_t mode, uint8_t ring_id, 14004 bool delay_in_us) 14005 { 14006 uint8_t delay_index = 0; 14007 struct cdp_delay_stats *stats = NULL; 14008 14009 /* 14010 * Update delay stats in proper bucket 14011 */ 14012 switch (mode) { 14013 /* Software Enqueue delay ranges */ 14014 case CDP_DELAY_STATS_SW_ENQ: 14015 if (!tstats) 14016 break; 14017 14018 delay_index = dp_bucket_index(delay, cdp_sw_enq_delay, 14019 delay_in_us); 14020 tstats->swq_delay.delay_bucket[delay_index]++; 14021 stats = &tstats->swq_delay; 14022 break; 14023 14024 /* Tx Completion delay ranges */ 14025 case CDP_DELAY_STATS_FW_HW_TRANSMIT: 14026 if (!tstats) 14027 break; 14028 14029 delay_index = dp_bucket_index(delay, cdp_fw_to_hw_delay, 14030 delay_in_us); 14031 tstats->hwtx_delay.delay_bucket[delay_index]++; 14032 stats = &tstats->hwtx_delay; 14033 break; 14034 14035 /* Interframe tx delay ranges */ 14036 case CDP_DELAY_STATS_TX_INTERFRAME: 14037 if (!tstats) 14038 break; 14039 14040 delay_index = dp_bucket_index(delay, cdp_intfrm_delay, 14041 delay_in_us); 14042 tstats->intfrm_delay.delay_bucket[delay_index]++; 14043 stats = &tstats->intfrm_delay; 14044 break; 14045 14046 /* Interframe rx delay ranges */ 14047 case CDP_DELAY_STATS_RX_INTERFRAME: 14048 if (!rstats) 14049 break; 14050 14051 delay_index = dp_bucket_index(delay, cdp_intfrm_delay, 14052 delay_in_us); 14053 rstats->intfrm_delay.delay_bucket[delay_index]++; 14054 stats = &rstats->intfrm_delay; 14055 break; 14056 14057 /* Ring reap to indication to network stack */ 14058 case CDP_DELAY_STATS_REAP_STACK: 14059 if (!rstats) 14060 break; 14061 14062 delay_index = dp_bucket_index(delay, cdp_intfrm_delay, 14063 delay_in_us); 14064 rstats->to_stack_delay.delay_bucket[delay_index]++; 14065 stats = &rstats->to_stack_delay; 14066 break; 14067 default: 14068 dp_debug("Incorrect delay mode: %d", mode); 14069 } 14070 14071 return stats; 14072 } 14073 14074 void dp_update_delay_stats(struct cdp_tid_tx_stats *tstats, 14075 struct cdp_tid_rx_stats *rstats, uint32_t delay, 14076 uint8_t tid, uint8_t mode, uint8_t ring_id, 14077 bool delay_in_us) 14078 { 14079 struct cdp_delay_stats *dstats = NULL; 14080 14081 /* 14082 * Delay ranges are different for different delay modes 14083 * Get the correct index to update delay bucket 14084 */ 14085 dstats = dp_fill_delay_buckets(tstats, rstats, delay, tid, mode, 14086 ring_id, delay_in_us); 14087 if (qdf_unlikely(!dstats)) 14088 return; 14089 14090 if (delay != 0) { 14091 /* 14092 * Compute minimum,average and maximum 14093 * delay 14094 */ 14095 if (delay < dstats->min_delay) 14096 dstats->min_delay = delay; 14097 14098 if (delay > dstats->max_delay) 14099 dstats->max_delay = delay; 14100 14101 /* 14102 * Average over delay measured till now 14103 */ 14104 if (!dstats->avg_delay) 14105 dstats->avg_delay = delay; 14106 else 14107 dstats->avg_delay = ((delay + dstats->avg_delay) >> 1); 14108 } 14109 } 14110 14111 uint16_t dp_get_peer_mac_list(ol_txrx_soc_handle soc, uint8_t vdev_id, 14112 u_int8_t newmac[][QDF_MAC_ADDR_SIZE], 14113 u_int16_t mac_cnt, bool limit) 14114 { 14115 struct dp_soc *dp_soc = (struct dp_soc *)soc; 14116 struct dp_vdev *vdev = 14117 dp_vdev_get_ref_by_id(dp_soc, vdev_id, DP_MOD_ID_CDP); 14118 struct dp_peer *peer; 14119 uint16_t new_mac_cnt = 0; 14120 14121 if (!vdev) 14122 return new_mac_cnt; 14123 14124 if (limit && (vdev->num_peers > mac_cnt)) { 14125 dp_vdev_unref_delete(dp_soc, vdev, DP_MOD_ID_CDP); 14126 return 0; 14127 } 14128 14129 qdf_spin_lock_bh(&vdev->peer_list_lock); 14130 TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) { 14131 if (peer->bss_peer) 14132 continue; 14133 if (new_mac_cnt < mac_cnt) { 14134 WLAN_ADDR_COPY(newmac[new_mac_cnt], peer->mac_addr.raw); 14135 new_mac_cnt++; 14136 } 14137 } 14138 qdf_spin_unlock_bh(&vdev->peer_list_lock); 14139 dp_vdev_unref_delete(dp_soc, vdev, DP_MOD_ID_CDP); 14140 return new_mac_cnt; 14141 } 14142 14143 uint16_t dp_get_peer_id(ol_txrx_soc_handle soc, uint8_t vdev_id, uint8_t *mac) 14144 { 14145 struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc, 14146 mac, 0, vdev_id, 14147 DP_MOD_ID_CDP); 14148 uint16_t peer_id = HTT_INVALID_PEER; 14149 14150 if (!peer) { 14151 dp_cdp_debug("%pK: Peer is NULL!", (struct dp_soc *)soc); 14152 return peer_id; 14153 } 14154 14155 peer_id = peer->peer_id; 14156 dp_peer_unref_delete(peer, DP_MOD_ID_CDP); 14157 return peer_id; 14158 } 14159 14160 #ifdef QCA_SUPPORT_WDS_EXTENDED 14161 QDF_STATUS dp_wds_ext_set_peer_rx(ol_txrx_soc_handle soc, 14162 uint8_t vdev_id, 14163 uint8_t *mac, 14164 ol_txrx_rx_fp rx, 14165 ol_osif_peer_handle osif_peer) 14166 { 14167 struct dp_txrx_peer *txrx_peer = NULL; 14168 struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc, 14169 mac, 0, vdev_id, 14170 DP_MOD_ID_CDP); 14171 QDF_STATUS status = QDF_STATUS_E_INVAL; 14172 14173 if (!peer) { 14174 dp_cdp_debug("%pK: Peer is NULL!", (struct dp_soc *)soc); 14175 return status; 14176 } 14177 14178 txrx_peer = dp_get_txrx_peer(peer); 14179 if (!txrx_peer) { 14180 dp_peer_unref_delete(peer, DP_MOD_ID_CDP); 14181 return status; 14182 } 14183 14184 if (rx) { 14185 if (txrx_peer->osif_rx) { 14186 status = QDF_STATUS_E_ALREADY; 14187 } else { 14188 txrx_peer->osif_rx = rx; 14189 status = QDF_STATUS_SUCCESS; 14190 } 14191 } else { 14192 if (txrx_peer->osif_rx) { 14193 txrx_peer->osif_rx = NULL; 14194 status = QDF_STATUS_SUCCESS; 14195 } else { 14196 status = QDF_STATUS_E_ALREADY; 14197 } 14198 } 14199 14200 txrx_peer->wds_ext.osif_peer = osif_peer; 14201 dp_peer_unref_delete(peer, DP_MOD_ID_CDP); 14202 14203 return status; 14204 } 14205 14206 QDF_STATUS dp_wds_ext_get_peer_osif_handle( 14207 ol_txrx_soc_handle soc, 14208 uint8_t vdev_id, 14209 uint8_t *mac, 14210 ol_osif_peer_handle *osif_peer) 14211 { 14212 struct dp_soc *dp_soc = (struct dp_soc *)soc; 14213 struct dp_txrx_peer *txrx_peer = NULL; 14214 struct dp_peer *peer = dp_peer_find_hash_find(dp_soc, 14215 mac, 0, vdev_id, 14216 DP_MOD_ID_CDP); 14217 14218 if (!peer) { 14219 dp_cdp_debug("%pK: Peer is NULL!", dp_soc); 14220 return QDF_STATUS_E_INVAL; 14221 } 14222 14223 txrx_peer = dp_get_txrx_peer(peer); 14224 if (!txrx_peer) { 14225 dp_cdp_debug("%pK: TXRX Peer is NULL!", dp_soc); 14226 dp_peer_unref_delete(peer, DP_MOD_ID_CDP); 14227 return QDF_STATUS_E_INVAL; 14228 } 14229 14230 *osif_peer = txrx_peer->wds_ext.osif_peer; 14231 dp_peer_unref_delete(peer, DP_MOD_ID_CDP); 14232 14233 return QDF_STATUS_SUCCESS; 14234 } 14235 14236 QDF_STATUS dp_wds_ext_set_peer_bit(ol_txrx_soc_handle soc, uint8_t *mac) 14237 { 14238 struct dp_txrx_peer *txrx_peer = NULL; 14239 struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc, 14240 mac, 0, DP_VDEV_ALL, 14241 DP_MOD_ID_IPA); 14242 if (!peer) { 14243 dp_cdp_debug("%pK: Peer is NULL!\n", (struct dp_soc *)soc); 14244 return QDF_STATUS_E_INVAL; 14245 } 14246 14247 txrx_peer = dp_get_txrx_peer(peer); 14248 if (!txrx_peer) { 14249 dp_peer_unref_delete(peer, DP_MOD_ID_IPA); 14250 return QDF_STATUS_E_INVAL; 14251 } 14252 qdf_atomic_test_and_set_bit(WDS_EXT_PEER_INIT_BIT, 14253 &txrx_peer->wds_ext.init); 14254 dp_peer_unref_delete(peer, DP_MOD_ID_IPA); 14255 14256 return QDF_STATUS_SUCCESS; 14257 } 14258 #endif /* QCA_SUPPORT_WDS_EXTENDED */ 14259 14260 /** 14261 * dp_pdev_srng_deinit() - de-initialize all pdev srng ring including 14262 * monitor rings 14263 * @pdev: Datapath pdev handle 14264 * 14265 */ 14266 static void dp_pdev_srng_deinit(struct dp_pdev *pdev) 14267 { 14268 struct dp_soc *soc = pdev->soc; 14269 uint8_t i; 14270 14271 if (!soc->features.dmac_cmn_src_rxbuf_ring_enabled) 14272 dp_srng_deinit(soc, &soc->rx_refill_buf_ring[pdev->lmac_id], 14273 RXDMA_BUF, 14274 pdev->lmac_id); 14275 14276 if (!soc->rxdma2sw_rings_not_supported) { 14277 for (i = 0; 14278 i < soc->wlan_cfg_ctx->num_rxdma_dst_rings_per_pdev; i++) { 14279 int lmac_id = dp_get_lmac_id_for_pdev_id(soc, i, 14280 pdev->pdev_id); 14281 14282 wlan_minidump_remove(soc->rxdma_err_dst_ring[lmac_id]. 14283 base_vaddr_unaligned, 14284 soc->rxdma_err_dst_ring[lmac_id]. 14285 alloc_size, 14286 soc->ctrl_psoc, 14287 WLAN_MD_DP_SRNG_RXDMA_ERR_DST, 14288 "rxdma_err_dst"); 14289 dp_srng_deinit(soc, &soc->rxdma_err_dst_ring[lmac_id], 14290 RXDMA_DST, lmac_id); 14291 } 14292 } 14293 14294 14295 } 14296 14297 /** 14298 * dp_pdev_srng_init() - initialize all pdev srng rings including 14299 * monitor rings 14300 * @pdev: Datapath pdev handle 14301 * 14302 * Return: QDF_STATUS_SUCCESS on success 14303 * QDF_STATUS_E_NOMEM on failure 14304 */ 14305 static QDF_STATUS dp_pdev_srng_init(struct dp_pdev *pdev) 14306 { 14307 struct dp_soc *soc = pdev->soc; 14308 struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx; 14309 uint32_t i; 14310 14311 soc_cfg_ctx = soc->wlan_cfg_ctx; 14312 14313 if (!soc->features.dmac_cmn_src_rxbuf_ring_enabled) { 14314 if (dp_srng_init(soc, &soc->rx_refill_buf_ring[pdev->lmac_id], 14315 RXDMA_BUF, 0, pdev->lmac_id)) { 14316 dp_init_err("%pK: dp_srng_init failed rx refill ring", 14317 soc); 14318 goto fail1; 14319 } 14320 } 14321 14322 /* LMAC RxDMA to SW Rings configuration */ 14323 if (!wlan_cfg_per_pdev_lmac_ring(soc_cfg_ctx)) 14324 /* Only valid for MCL */ 14325 pdev = soc->pdev_list[0]; 14326 14327 if (!soc->rxdma2sw_rings_not_supported) { 14328 for (i = 0; 14329 i < soc->wlan_cfg_ctx->num_rxdma_dst_rings_per_pdev; i++) { 14330 int lmac_id = dp_get_lmac_id_for_pdev_id(soc, i, 14331 pdev->pdev_id); 14332 struct dp_srng *srng = 14333 &soc->rxdma_err_dst_ring[lmac_id]; 14334 14335 if (srng->hal_srng) 14336 continue; 14337 14338 if (dp_srng_init(soc, srng, RXDMA_DST, 0, lmac_id)) { 14339 dp_init_err("%pK:" RNG_ERR "rxdma_err_dst_ring", 14340 soc); 14341 goto fail1; 14342 } 14343 wlan_minidump_log(soc->rxdma_err_dst_ring[lmac_id]. 14344 base_vaddr_unaligned, 14345 soc->rxdma_err_dst_ring[lmac_id]. 14346 alloc_size, 14347 soc->ctrl_psoc, 14348 WLAN_MD_DP_SRNG_RXDMA_ERR_DST, 14349 "rxdma_err_dst"); 14350 } 14351 } 14352 return QDF_STATUS_SUCCESS; 14353 14354 fail1: 14355 dp_pdev_srng_deinit(pdev); 14356 return QDF_STATUS_E_NOMEM; 14357 } 14358 14359 /** 14360 * dp_pdev_srng_free() - free all pdev srng rings including monitor rings 14361 * @pdev: Datapath pdev handle 14362 * 14363 */ 14364 static void dp_pdev_srng_free(struct dp_pdev *pdev) 14365 { 14366 struct dp_soc *soc = pdev->soc; 14367 uint8_t i; 14368 14369 if (!soc->features.dmac_cmn_src_rxbuf_ring_enabled) 14370 dp_srng_free(soc, &soc->rx_refill_buf_ring[pdev->lmac_id]); 14371 14372 if (!soc->rxdma2sw_rings_not_supported) { 14373 for (i = 0; 14374 i < soc->wlan_cfg_ctx->num_rxdma_dst_rings_per_pdev; i++) { 14375 int lmac_id = dp_get_lmac_id_for_pdev_id(soc, i, 14376 pdev->pdev_id); 14377 14378 dp_srng_free(soc, &soc->rxdma_err_dst_ring[lmac_id]); 14379 } 14380 } 14381 } 14382 14383 /** 14384 * dp_pdev_srng_alloc() - allocate memory for all pdev srng rings including 14385 * monitor rings 14386 * @pdev: Datapath pdev handle 14387 * 14388 * Return: QDF_STATUS_SUCCESS on success 14389 * QDF_STATUS_E_NOMEM on failure 14390 */ 14391 static QDF_STATUS dp_pdev_srng_alloc(struct dp_pdev *pdev) 14392 { 14393 struct dp_soc *soc = pdev->soc; 14394 struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx; 14395 uint32_t ring_size; 14396 uint32_t i; 14397 14398 soc_cfg_ctx = soc->wlan_cfg_ctx; 14399 14400 ring_size = wlan_cfg_get_dp_soc_rxdma_refill_ring_size(soc_cfg_ctx); 14401 if (!soc->features.dmac_cmn_src_rxbuf_ring_enabled) { 14402 if (dp_srng_alloc(soc, &soc->rx_refill_buf_ring[pdev->lmac_id], 14403 RXDMA_BUF, ring_size, 0)) { 14404 dp_init_err("%pK: dp_srng_alloc failed rx refill ring", 14405 soc); 14406 goto fail1; 14407 } 14408 } 14409 14410 ring_size = wlan_cfg_get_dp_soc_rxdma_err_dst_ring_size(soc_cfg_ctx); 14411 /* LMAC RxDMA to SW Rings configuration */ 14412 if (!wlan_cfg_per_pdev_lmac_ring(soc_cfg_ctx)) 14413 /* Only valid for MCL */ 14414 pdev = soc->pdev_list[0]; 14415 14416 if (!soc->rxdma2sw_rings_not_supported) { 14417 for (i = 0; 14418 i < soc->wlan_cfg_ctx->num_rxdma_dst_rings_per_pdev; i++) { 14419 int lmac_id = dp_get_lmac_id_for_pdev_id(soc, i, 14420 pdev->pdev_id); 14421 struct dp_srng *srng = 14422 &soc->rxdma_err_dst_ring[lmac_id]; 14423 14424 if (srng->base_vaddr_unaligned) 14425 continue; 14426 14427 if (dp_srng_alloc(soc, srng, RXDMA_DST, ring_size, 0)) { 14428 dp_init_err("%pK:" RNG_ERR "rxdma_err_dst_ring", 14429 soc); 14430 goto fail1; 14431 } 14432 } 14433 } 14434 14435 return QDF_STATUS_SUCCESS; 14436 fail1: 14437 dp_pdev_srng_free(pdev); 14438 return QDF_STATUS_E_NOMEM; 14439 } 14440 14441 #if defined(WLAN_FEATURE_11BE_MLO) && defined(DP_MLO_LINK_STATS_SUPPORT) 14442 /** 14443 * dp_init_link_peer_stats_enabled() - Init link_peer_stats as per config 14444 * @pdev: DP pdev 14445 * 14446 * Return: None 14447 */ 14448 static inline void 14449 dp_init_link_peer_stats_enabled(struct dp_pdev *pdev) 14450 { 14451 pdev->link_peer_stats = wlan_cfg_is_peer_link_stats_enabled( 14452 pdev->soc->wlan_cfg_ctx); 14453 } 14454 #else 14455 static inline void 14456 dp_init_link_peer_stats_enabled(struct dp_pdev *pdev) 14457 { 14458 } 14459 #endif 14460 14461 static QDF_STATUS dp_pdev_init(struct cdp_soc_t *txrx_soc, 14462 HTC_HANDLE htc_handle, 14463 qdf_device_t qdf_osdev, 14464 uint8_t pdev_id) 14465 { 14466 struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx; 14467 int nss_cfg; 14468 void *sojourn_buf; 14469 14470 struct dp_soc *soc = (struct dp_soc *)txrx_soc; 14471 struct dp_pdev *pdev = soc->pdev_list[pdev_id]; 14472 14473 soc_cfg_ctx = soc->wlan_cfg_ctx; 14474 pdev->soc = soc; 14475 pdev->pdev_id = pdev_id; 14476 14477 /* 14478 * Variable to prevent double pdev deinitialization during 14479 * radio detach execution .i.e. in the absence of any vdev. 14480 */ 14481 pdev->pdev_deinit = 0; 14482 14483 if (dp_wdi_event_attach(pdev)) { 14484 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 14485 "dp_wdi_evet_attach failed"); 14486 goto fail0; 14487 } 14488 14489 if (dp_pdev_srng_init(pdev)) { 14490 dp_init_err("%pK: Failed to initialize pdev srng rings", soc); 14491 goto fail1; 14492 } 14493 14494 /* Initialize descriptors in TCL Rings used by IPA */ 14495 if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) { 14496 hal_tx_init_data_ring(soc->hal_soc, 14497 soc->tcl_data_ring[IPA_TCL_DATA_RING_IDX].hal_srng); 14498 dp_ipa_hal_tx_init_alt_data_ring(soc); 14499 } 14500 14501 /* 14502 * Initialize command/credit ring descriptor 14503 * Command/CREDIT ring also used for sending DATA cmds 14504 */ 14505 dp_tx_init_cmd_credit_ring(soc); 14506 14507 dp_tx_pdev_init(pdev); 14508 14509 /* 14510 * set nss pdev config based on soc config 14511 */ 14512 nss_cfg = wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx); 14513 wlan_cfg_set_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx, 14514 (nss_cfg & (1 << pdev_id))); 14515 pdev->target_pdev_id = 14516 dp_calculate_target_pdev_id_from_host_pdev_id(soc, pdev_id); 14517 14518 if (soc->preferred_hw_mode == WMI_HOST_HW_MODE_2G_PHYB && 14519 pdev->lmac_id == PHYB_2G_LMAC_ID) { 14520 pdev->target_pdev_id = PHYB_2G_TARGET_PDEV_ID; 14521 } 14522 14523 /* Reset the cpu ring map if radio is NSS offloaded */ 14524 if (wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx)) { 14525 dp_soc_reset_cpu_ring_map(soc); 14526 dp_soc_reset_intr_mask(soc); 14527 } 14528 14529 /* Reset the ring interrupt mask if DPDK is enabled */ 14530 if (wlan_cfg_get_dp_soc_dpdk_cfg(soc->ctrl_psoc)) { 14531 dp_soc_reset_dpdk_intr_mask(soc); 14532 } 14533 /* Reset the cpu ring map if radio is NSS offloaded */ 14534 dp_soc_reset_ipa_vlan_intr_mask(soc); 14535 14536 TAILQ_INIT(&pdev->vdev_list); 14537 qdf_spinlock_create(&pdev->vdev_list_lock); 14538 pdev->vdev_count = 0; 14539 pdev->is_lro_hash_configured = 0; 14540 14541 qdf_spinlock_create(&pdev->tx_mutex); 14542 pdev->ch_band_lmac_id_mapping[REG_BAND_2G] = DP_MON_INVALID_LMAC_ID; 14543 pdev->ch_band_lmac_id_mapping[REG_BAND_5G] = DP_MON_INVALID_LMAC_ID; 14544 pdev->ch_band_lmac_id_mapping[REG_BAND_6G] = DP_MON_INVALID_LMAC_ID; 14545 14546 DP_STATS_INIT(pdev); 14547 14548 dp_local_peer_id_pool_init(pdev); 14549 14550 dp_dscp_tid_map_setup(pdev); 14551 dp_pcp_tid_map_setup(pdev); 14552 14553 /* set the reo destination during initialization */ 14554 dp_pdev_set_default_reo(pdev); 14555 14556 qdf_mem_zero(&pdev->sojourn_stats, sizeof(struct cdp_tx_sojourn_stats)); 14557 14558 pdev->sojourn_buf = qdf_nbuf_alloc(pdev->soc->osdev, 14559 sizeof(struct cdp_tx_sojourn_stats), 0, 4, 14560 TRUE); 14561 14562 if (!pdev->sojourn_buf) { 14563 dp_init_err("%pK: Failed to allocate sojourn buf", soc); 14564 goto fail2; 14565 } 14566 sojourn_buf = qdf_nbuf_data(pdev->sojourn_buf); 14567 qdf_mem_zero(sojourn_buf, sizeof(struct cdp_tx_sojourn_stats)); 14568 14569 qdf_event_create(&pdev->fw_peer_stats_event); 14570 qdf_event_create(&pdev->fw_stats_event); 14571 qdf_event_create(&pdev->fw_obss_stats_event); 14572 14573 pdev->num_tx_allowed = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx); 14574 pdev->num_tx_spl_allowed = 14575 wlan_cfg_get_num_tx_spl_desc(soc->wlan_cfg_ctx); 14576 pdev->num_reg_tx_allowed = 14577 pdev->num_tx_allowed - pdev->num_tx_spl_allowed; 14578 if (dp_rxdma_ring_setup(soc, pdev)) { 14579 dp_init_err("%pK: RXDMA ring config failed", soc); 14580 goto fail3; 14581 } 14582 14583 if (dp_init_ipa_rx_refill_buf_ring(soc, pdev)) 14584 goto fail3; 14585 14586 if (dp_ipa_ring_resource_setup(soc, pdev)) 14587 goto fail4; 14588 14589 if (dp_ipa_uc_attach(soc, pdev) != QDF_STATUS_SUCCESS) { 14590 dp_init_err("%pK: dp_ipa_uc_attach failed", soc); 14591 goto fail4; 14592 } 14593 14594 if (dp_pdev_bkp_stats_attach(pdev) != QDF_STATUS_SUCCESS) { 14595 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 14596 FL("dp_pdev_bkp_stats_attach failed")); 14597 goto fail5; 14598 } 14599 14600 if (dp_monitor_pdev_init(pdev)) { 14601 dp_init_err("%pK: dp_monitor_pdev_init failed", soc); 14602 goto fail6; 14603 } 14604 14605 /* initialize sw rx descriptors */ 14606 dp_rx_pdev_desc_pool_init(pdev); 14607 /* allocate buffers and replenish the RxDMA ring */ 14608 dp_rx_pdev_buffers_alloc(pdev); 14609 14610 dp_init_tso_stats(pdev); 14611 dp_init_link_peer_stats_enabled(pdev); 14612 14613 /* Initialize dp tx fast path flag */ 14614 pdev->tx_fast_flag = DP_TX_DESC_FLAG_SIMPLE; 14615 if (soc->hw_txrx_stats_en) 14616 pdev->tx_fast_flag |= DP_TX_DESC_FLAG_FASTPATH_SIMPLE; 14617 14618 pdev->rx_fast_flag = false; 14619 dp_info("Mem stats: DMA = %u HEAP = %u SKB = %u", 14620 qdf_dma_mem_stats_read(), 14621 qdf_heap_mem_stats_read(), 14622 qdf_skb_total_mem_stats_read()); 14623 14624 return QDF_STATUS_SUCCESS; 14625 fail6: 14626 dp_pdev_bkp_stats_detach(pdev); 14627 fail5: 14628 dp_ipa_uc_detach(soc, pdev); 14629 fail4: 14630 dp_deinit_ipa_rx_refill_buf_ring(soc, pdev); 14631 fail3: 14632 dp_rxdma_ring_cleanup(soc, pdev); 14633 qdf_nbuf_free(pdev->sojourn_buf); 14634 fail2: 14635 qdf_spinlock_destroy(&pdev->tx_mutex); 14636 qdf_spinlock_destroy(&pdev->vdev_list_lock); 14637 dp_pdev_srng_deinit(pdev); 14638 fail1: 14639 dp_wdi_event_detach(pdev); 14640 fail0: 14641 return QDF_STATUS_E_FAILURE; 14642 } 14643 14644 /** 14645 * dp_pdev_init_wifi3() - Init txrx pdev 14646 * @txrx_soc: 14647 * @htc_handle: HTC handle for host-target interface 14648 * @qdf_osdev: QDF OS device 14649 * @pdev_id: pdev Id 14650 * 14651 * Return: QDF_STATUS 14652 */ 14653 static QDF_STATUS dp_pdev_init_wifi3(struct cdp_soc_t *txrx_soc, 14654 HTC_HANDLE htc_handle, 14655 qdf_device_t qdf_osdev, 14656 uint8_t pdev_id) 14657 { 14658 return dp_pdev_init(txrx_soc, htc_handle, qdf_osdev, pdev_id); 14659 } 14660 14661 #ifdef FEATURE_DIRECT_LINK 14662 struct dp_srng *dp_setup_direct_link_refill_ring(struct cdp_soc_t *soc_hdl, 14663 uint8_t pdev_id) 14664 { 14665 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 14666 struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); 14667 14668 if (!pdev) { 14669 dp_err("DP pdev is NULL"); 14670 return NULL; 14671 } 14672 14673 if (dp_srng_alloc(soc, &pdev->rx_refill_buf_ring4, 14674 RXDMA_BUF, DIRECT_LINK_REFILL_RING_ENTRIES, false)) { 14675 dp_err("SRNG alloc failed for rx_refill_buf_ring4"); 14676 return NULL; 14677 } 14678 14679 if (dp_srng_init(soc, &pdev->rx_refill_buf_ring4, 14680 RXDMA_BUF, DIRECT_LINK_REFILL_RING_IDX, 0)) { 14681 dp_err("SRNG init failed for rx_refill_buf_ring4"); 14682 dp_srng_free(soc, &pdev->rx_refill_buf_ring4); 14683 return NULL; 14684 } 14685 14686 if (htt_srng_setup(soc->htt_handle, pdev_id, 14687 pdev->rx_refill_buf_ring4.hal_srng, RXDMA_BUF)) { 14688 dp_srng_deinit(soc, &pdev->rx_refill_buf_ring4, RXDMA_BUF, 14689 DIRECT_LINK_REFILL_RING_IDX); 14690 dp_srng_free(soc, &pdev->rx_refill_buf_ring4); 14691 return NULL; 14692 } 14693 14694 return &pdev->rx_refill_buf_ring4; 14695 } 14696 14697 void dp_destroy_direct_link_refill_ring(struct cdp_soc_t *soc_hdl, 14698 uint8_t pdev_id) 14699 { 14700 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 14701 struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); 14702 14703 if (!pdev) { 14704 dp_err("DP pdev is NULL"); 14705 return; 14706 } 14707 14708 dp_srng_deinit(soc, &pdev->rx_refill_buf_ring4, RXDMA_BUF, 0); 14709 dp_srng_free(soc, &pdev->rx_refill_buf_ring4); 14710 } 14711 #endif 14712 14713 #ifdef QCA_MULTIPASS_SUPPORT 14714 QDF_STATUS dp_set_vlan_groupkey(struct cdp_soc_t *soc_hdl, uint8_t vdev_id, 14715 uint16_t vlan_id, uint16_t group_key) 14716 { 14717 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 14718 struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id, 14719 DP_MOD_ID_TX_MULTIPASS); 14720 QDF_STATUS status; 14721 14722 dp_info("Try: vdev_id %d, vdev %pK, multipass_en %d, vlan_id %d, group_key %d", 14723 vdev_id, vdev, vdev ? vdev->multipass_en : 0, vlan_id, 14724 group_key); 14725 if (!vdev || !vdev->multipass_en) { 14726 status = QDF_STATUS_E_INVAL; 14727 goto fail; 14728 } 14729 14730 if (!vdev->iv_vlan_map) { 14731 uint16_t vlan_map_size = (sizeof(uint16_t)) * DP_MAX_VLAN_IDS; 14732 14733 vdev->iv_vlan_map = (uint16_t *)qdf_mem_malloc(vlan_map_size); 14734 if (!vdev->iv_vlan_map) { 14735 QDF_TRACE_ERROR(QDF_MODULE_ID_DP, "iv_vlan_map"); 14736 status = QDF_STATUS_E_NOMEM; 14737 goto fail; 14738 } 14739 14740 /* 14741 * 0 is invalid group key. 14742 * Initilalize array with invalid group keys. 14743 */ 14744 qdf_mem_zero(vdev->iv_vlan_map, vlan_map_size); 14745 } 14746 14747 if (vlan_id >= DP_MAX_VLAN_IDS) { 14748 status = QDF_STATUS_E_INVAL; 14749 goto fail; 14750 } 14751 14752 dp_info("Successful setting: vdev_id %d, vlan_id %d, group_key %d", 14753 vdev_id, vlan_id, group_key); 14754 vdev->iv_vlan_map[vlan_id] = group_key; 14755 status = QDF_STATUS_SUCCESS; 14756 fail: 14757 if (vdev) 14758 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TX_MULTIPASS); 14759 return status; 14760 } 14761 14762 void dp_tx_remove_vlan_tag(struct dp_vdev *vdev, qdf_nbuf_t nbuf) 14763 { 14764 struct vlan_ethhdr veth_hdr; 14765 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)nbuf->data; 14766 14767 /* 14768 * Extract VLAN header of 4 bytes: 14769 * Frame Format : {dst_addr[6], src_addr[6], 802.1Q header[4], 14770 * EtherType[2], Payload} 14771 * Before Removal : xx xx xx xx xx xx xx xx xx xx xx xx 81 00 00 02 14772 * 08 00 45 00 00... 14773 * After Removal : xx xx xx xx xx xx xx xx xx xx xx xx 08 00 45 00 14774 * 00... 14775 */ 14776 qdf_mem_copy(&veth_hdr, veh, sizeof(veth_hdr)); 14777 qdf_nbuf_pull_head(nbuf, ETHERTYPE_VLAN_LEN); 14778 veh = (struct vlan_ethhdr *)nbuf->data; 14779 qdf_mem_copy(veh, &veth_hdr, 2 * QDF_MAC_ADDR_SIZE); 14780 } 14781 14782 void dp_tx_vdev_multipass_deinit(struct dp_vdev *vdev) 14783 { 14784 struct dp_txrx_peer *txrx_peer = NULL; 14785 14786 qdf_spin_lock_bh(&vdev->mpass_peer_mutex); 14787 TAILQ_FOREACH(txrx_peer, &vdev->mpass_peer_list, mpass_peer_list_elem) 14788 qdf_err("Peers present in mpass list : %d", txrx_peer->peer_id); 14789 qdf_spin_unlock_bh(&vdev->mpass_peer_mutex); 14790 14791 if (vdev->iv_vlan_map) { 14792 qdf_mem_free(vdev->iv_vlan_map); 14793 vdev->iv_vlan_map = NULL; 14794 } 14795 14796 qdf_spinlock_destroy(&vdev->mpass_peer_mutex); 14797 } 14798 14799 void dp_peer_multipass_list_init(struct dp_vdev *vdev) 14800 { 14801 /* 14802 * vdev->iv_vlan_map is allocated when the first configuration command 14803 * is issued to avoid unnecessary allocation for regular mode VAP. 14804 */ 14805 TAILQ_INIT(&vdev->mpass_peer_list); 14806 qdf_spinlock_create(&vdev->mpass_peer_mutex); 14807 } 14808 #endif /* QCA_MULTIPASS_SUPPORT */ 14809 14810 #ifdef WLAN_FEATURE_SSR_DRIVER_DUMP 14811 #define MAX_STR_LEN 50 14812 #define MAX_SRNG_STR_LEN 30 14813 14814 void dp_ssr_dump_srng_register(char *region_name, struct dp_srng *srng, int num) 14815 { 14816 char ring[MAX_SRNG_STR_LEN], ring_handle[MAX_STR_LEN]; 14817 14818 if (num >= 0) 14819 qdf_snprint(ring, MAX_SRNG_STR_LEN, "%s%s%d", 14820 region_name, "_", num); 14821 else 14822 qdf_snprint(ring, MAX_SRNG_STR_LEN, "%s", region_name); 14823 14824 qdf_snprint(ring_handle, MAX_STR_LEN, "%s%s", ring, "_handle"); 14825 14826 qdf_ssr_driver_dump_register_region(ring_handle, srng->hal_srng, 14827 sizeof(struct hal_srng)); 14828 qdf_ssr_driver_dump_register_region(ring, 14829 srng->base_vaddr_aligned, 14830 srng->alloc_size); 14831 } 14832 14833 void dp_ssr_dump_srng_unregister(char *region_name, int num) 14834 { 14835 char ring[MAX_SRNG_STR_LEN], ring_handle[MAX_STR_LEN]; 14836 14837 if (num >= 0) 14838 qdf_snprint(ring, MAX_SRNG_STR_LEN, "%s%s%d", 14839 region_name, "_", num); 14840 else 14841 qdf_snprint(ring, MAX_SRNG_STR_LEN, "%s", region_name); 14842 14843 qdf_snprint(ring_handle, MAX_STR_LEN, "%s%s", ring, "_handle"); 14844 14845 qdf_ssr_driver_dump_unregister_region(ring); 14846 qdf_ssr_driver_dump_unregister_region(ring_handle); 14847 } 14848 14849 void dp_ssr_dump_pdev_register(struct dp_pdev *pdev, uint8_t pdev_id) 14850 { 14851 char pdev_str[MAX_STR_LEN]; 14852 14853 qdf_snprint(pdev_str, MAX_STR_LEN, "%s%s%d", "dp_pdev", "_", pdev_id); 14854 qdf_ssr_driver_dump_register_region(pdev_str, pdev, sizeof(*pdev)); 14855 } 14856 14857 void dp_ssr_dump_pdev_unregister(uint8_t pdev_id) 14858 { 14859 char pdev_str[MAX_STR_LEN]; 14860 14861 qdf_snprint(pdev_str, MAX_STR_LEN, "%s%s%d", "dp_pdev", "_", pdev_id); 14862 qdf_ssr_driver_dump_unregister_region(pdev_str); 14863 } 14864 #endif 14865