1 /* 2 * Copyright (c) 2013-2021 The Linux Foundation. All rights reserved. 3 * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved. 4 * 5 * Permission to use, copy, modify, and/or distribute this software for 6 * any purpose with or without fee is hereby granted, provided that the 7 * above copyright notice and this permission notice appear in all 8 * copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 11 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 12 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 13 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 14 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 15 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 16 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 17 * PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20 /** 21 * DOC: wma_main.c 22 * 23 * This file contains wma initialization and FW exchange 24 * related functions. 25 */ 26 27 /* Header files */ 28 29 #include "wma.h" 30 #include "wma_api.h" 31 #include "cds_api.h" 32 #include "wmi_unified_api.h" 33 #include "wlan_qct_sys.h" 34 #include "wni_api.h" 35 #include "ani_global.h" 36 #include "wmi_unified.h" 37 #include "wni_cfg.h" 38 #if defined(CONFIG_HL_SUPPORT) 39 #include "wlan_tgt_def_config_hl.h" 40 #else 41 #include "wlan_tgt_def_config.h" 42 #endif 43 #include "qdf_nbuf.h" 44 #include "qdf_types.h" 45 #include "qdf_mem.h" 46 #include "wma_types.h" 47 #include "lim_api.h" 48 #include "lim_session_utils.h" 49 #include "wlan_cm_tgt_if_tx_api.h" 50 #include "wlan_cm_roam_api.h" 51 52 #include "cds_utils.h" 53 54 #if !defined(REMOVE_PKT_LOG) 55 #include "pktlog_ac.h" 56 #endif /* REMOVE_PKT_LOG */ 57 58 #include "dbglog_host.h" 59 #include "csr_api.h" 60 #include "ol_fw.h" 61 62 #include "wma_internal.h" 63 64 #include "wma_ocb.h" 65 #include "wlan_policy_mgr_api.h" 66 #include "cdp_txrx_cfg.h" 67 #include "cdp_txrx_flow_ctrl_legacy.h" 68 #include "cdp_txrx_flow_ctrl_v2.h" 69 #include "cdp_txrx_ipa.h" 70 #include "cdp_txrx_misc.h" 71 #include "wma_fips_api.h" 72 #include "wma_nan_datapath.h" 73 #include "wma_fw_state.h" 74 #include "wlan_lmac_if_def.h" 75 #include "wlan_lmac_if_api.h" 76 #include "target_if.h" 77 #include "target_if_scan.h" 78 #include "wlan_global_lmac_if_api.h" 79 #include "target_if_pmo.h" 80 #include "wma_he.h" 81 #include "wlan_pmo_obj_mgmt_api.h" 82 83 #include "wlan_reg_tgt_api.h" 84 #include "wlan_reg_services_api.h" 85 #include <cdp_txrx_handle.h> 86 #include <wlan_pmo_ucfg_api.h> 87 #include "wifi_pos_api.h" 88 #include "hif_main.h" 89 #ifdef WLAN_CONV_SPECTRAL_ENABLE 90 #include <target_if_spectral.h> 91 #include <wlan_spectral_utils_api.h> 92 #endif 93 #include "init_event_handler.h" 94 #include "init_deinit_lmac.h" 95 #include "target_if_green_ap.h" 96 #include "service_ready_param.h" 97 #include "wlan_cp_stats_mc_ucfg_api.h" 98 #include "cfg_nan_api.h" 99 #include "wlan_mlme_api.h" 100 #include "wlan_mlme_ucfg_api.h" 101 #include "cfg_ucfg_api.h" 102 #include "init_cmd_api.h" 103 #include "nan_ucfg_api.h" 104 #include "wma_coex.h" 105 #include "wma_twt.h" 106 #include "target_if_vdev_mgr_rx_ops.h" 107 #include "wlan_tdls_cfg_api.h" 108 #include "wlan_policy_mgr_i.h" 109 #include "target_if_psoc_timer_tx_ops.h" 110 #include <ftm_time_sync_ucfg_api.h> 111 #include "wlan_ipa_ucfg_api.h" 112 #include "wma_eht.h" 113 114 #ifdef DIRECT_BUF_RX_ENABLE 115 #include <target_if_direct_buf_rx_api.h> 116 #endif 117 118 #include "wlan_pkt_capture_ucfg_api.h" 119 #include "target_if_cm_roam_event.h" 120 #include "wlan_fwol_ucfg_api.h" 121 #include "wlan_tdls_api.h" 122 #include "wlan_twt_cfg_ext_api.h" 123 #include "wlan_mlo_mgr_sta.h" 124 #include "wlan_dp_api.h" 125 #include "wlan_dp_ucfg_api.h" 126 127 #define WMA_LOG_COMPLETION_TIMER 500 /* 500 msecs */ 128 #define WMI_TLV_HEADROOM 128 129 130 static uint32_t g_fw_wlan_feat_caps; 131 /** 132 * wma_get_fw_wlan_feat_caps() - get fw feature capability 133 * @feature: feature enum value 134 * 135 * Return: true/false 136 */ 137 bool wma_get_fw_wlan_feat_caps(enum cap_bitmap feature) 138 { 139 return (g_fw_wlan_feat_caps & (1 << feature)) ? true : false; 140 } 141 142 /** 143 * wma_set_fw_wlan_feat_caps() - set fw feature capability 144 * @feature: feature enum value 145 * 146 * Return: None 147 */ 148 void wma_set_fw_wlan_feat_caps(enum cap_bitmap feature) 149 { 150 g_fw_wlan_feat_caps |= (1 << feature); 151 } 152 153 /** 154 * wma_service_ready_ext_evt_timeout() - Service ready extended event timeout 155 * @data: Timeout handler data 156 * 157 * This function is called when the FW fails to send WMI_SERVICE_READY_EXT_EVENT 158 * message 159 * 160 * Return: None 161 */ 162 static void wma_service_ready_ext_evt_timeout(void *data) 163 { 164 wma_alert("Timeout waiting for WMI_SERVICE_READY_EXT_EVENT"); 165 166 /* Assert here. Panic is being called in insmod thread */ 167 QDF_ASSERT(0); 168 } 169 170 /** 171 * wma_get_ini_handle() - API to get WMA ini info handle 172 * @wma: WMA Handle 173 * 174 * Returns the pointer to WMA ini structure. 175 * Return: struct wma_ini_config 176 */ 177 struct wma_ini_config *wma_get_ini_handle(tp_wma_handle wma) 178 { 179 if (wma_validate_handle(wma)) 180 return NULL; 181 182 return &wma->ini_config; 183 } 184 185 int __wma_validate_handle(tp_wma_handle wma_handle, const char *func) 186 { 187 if (!wma_handle) { 188 wma_err("Invalid WMA handle (via %s)", func); 189 return -EINVAL; 190 } 191 192 return 0; 193 } 194 195 #define MAX_SUPPORTED_PEERS_REV1_1 14 196 #define MAX_SUPPORTED_PEERS_REV1_3 32 197 #ifdef WLAN_MAX_CLIENTS_ALLOWED 198 #define MAX_SUPPORTED_PEERS WLAN_MAX_CLIENTS_ALLOWED 199 #else 200 #define MAX_SUPPORTED_PEERS 32 201 #endif 202 #define MIN_NO_OF_PEERS 1 203 204 /** 205 * wma_get_number_of_peers_supported - API to query for number of peers 206 * supported 207 * @wma: WMA Handle 208 * 209 * Return: Max Number of Peers Supported 210 */ 211 static uint8_t wma_get_number_of_peers_supported(tp_wma_handle wma) 212 { 213 struct wma_ini_config *cfg = wma_get_ini_handle(wma); 214 uint8_t max_no_of_peers = cfg ? cfg->max_no_of_peers : MIN_NO_OF_PEERS; 215 216 return max_no_of_peers; 217 } 218 219 /** 220 * wma_get_number_of_tids_supported - API to query for number of tids supported 221 * @no_of_peers_supported: Number of peer supported 222 * 223 * Return: Max number of tids supported 224 */ 225 #if defined(CONFIG_HL_SUPPORT) 226 static uint32_t wma_get_number_of_tids_supported(uint8_t no_of_peers_supported, 227 uint8_t num_vdevs) 228 { 229 return 4 * no_of_peers_supported; 230 } 231 #else 232 static uint32_t wma_get_number_of_tids_supported(uint8_t no_of_peers_supported, 233 uint8_t num_vdevs) 234 { 235 return 2 * (no_of_peers_supported + num_vdevs + 2); 236 } 237 #endif 238 239 #if (defined(IPA_DISABLE_OVERRIDE)) && (!defined(IPA_OFFLOAD)) 240 static void wma_set_ipa_disable_config( 241 target_resource_config *tgt_cfg) 242 { 243 tgt_cfg->ipa_disable = true; 244 } 245 #else 246 static void wma_set_ipa_disable_config( 247 target_resource_config *tgt_cfg) 248 { 249 tgt_cfg->ipa_disable = ucfg_ipa_is_enabled() ? false : true; 250 } 251 #endif 252 253 #ifndef NUM_OF_ADDITIONAL_FW_PEERS 254 #define NUM_OF_ADDITIONAL_FW_PEERS 2 255 #endif 256 257 /** 258 * wma_update_num_peers_tids() - Update num_peers and tids based on num_vdevs 259 * @wma_handle: wma handle 260 * @tgt_cfg: Resource config given to target 261 * 262 * Get num_vdevs from tgt_cfg and update num_peers and tids based on it. 263 * 264 * Return: none 265 */ 266 static void wma_update_num_peers_tids(t_wma_handle *wma_handle, 267 target_resource_config *tgt_cfg) 268 269 { 270 uint8_t no_of_peers_supported; 271 272 no_of_peers_supported = wma_get_number_of_peers_supported(wma_handle); 273 274 tgt_cfg->num_peers = no_of_peers_supported + tgt_cfg->num_vdevs + 275 NUM_OF_ADDITIONAL_FW_PEERS; 276 /* The current firmware implementation requires the number of 277 * offload peers should be (number of vdevs + 1). 278 */ 279 tgt_cfg->num_tids = 280 wma_get_number_of_tids_supported(no_of_peers_supported, 281 tgt_cfg->num_vdevs); 282 } 283 284 #ifdef FEATURE_WDS 285 /** 286 * wma_set_peer_map_unmap_v2_config() - Update peer_map_unmap_v2 287 * @psoc: Object manager psoc 288 * @tgt_cfg: Resource config given to target 289 * 290 * This function enables Peer map/unmap v2 feature. 291 * 292 * Return: none 293 */ 294 static void wma_set_peer_map_unmap_v2_config(struct wlan_objmgr_psoc *psoc, 295 target_resource_config *tgt_cfg) 296 { 297 tgt_cfg->peer_map_unmap_v2 = 298 wlan_mlme_get_wds_mode(psoc) ? true : false; 299 } 300 #else 301 static void wma_set_peer_map_unmap_v2_config(struct wlan_objmgr_psoc *psoc, 302 target_resource_config *tgt_cfg) 303 { 304 tgt_cfg->peer_map_unmap_v2 = false; 305 } 306 #endif 307 308 #ifdef FEATURE_SET 309 /** 310 * wma_get_concurrency_support() - Get concurrency support 311 * @psoc: Object manager psoc 312 * 313 * Return: WMI_HOST_BAND_CONCURRENCY 314 */ 315 static WMI_HOST_BAND_CONCURRENCY 316 wma_get_concurrency_support(struct wlan_objmgr_psoc *psoc) 317 { 318 bool is_sbs_enabled = false; 319 320 if (policy_mgr_is_dual_mac_disabled_in_ini(psoc)) 321 return WMI_HOST_BAND_CONCURRENCY_NONE; 322 323 policy_mgr_get_sbs_cfg(psoc, &is_sbs_enabled); 324 325 if (is_sbs_enabled) 326 return WMI_HOST_BAND_CONCURRENCY_DBS_SBS; 327 else 328 return WMI_HOST_BAND_CONCURRENCY_DBS; 329 } 330 331 /** 332 * wma_update_set_feature_version() - Update the set feature version 333 * 334 * @fs: Feature set structure in which version needs to be updated. 335 * 336 * Version 1 - Base feature version 337 * Version 2 - WMI_HOST_VENDOR1_REQ1_VERSION_3_30 updated. 338 * Version 3 - min sleep period for TWT and Scheduled PM in FW updated 339 * Version 4 - WMI_HOST_VENDOR1_REQ1_VERSION_3_40 updated. 340 * Version 5 - INI based 11BE support updated 341 * Version 6 - sta dump info updated 342 * 343 * Return: None 344 */ 345 static void wma_update_set_feature_version(struct target_feature_set *fs) 346 { 347 fs->feature_set_version = 6; 348 } 349 350 /** 351 * wma_set_feature_set_info() - Set feature set info 352 * @wma_handle: WMA handle 353 * @feature_set: Feature set structure which needs to be filled 354 * 355 * Return: WMI_HOST_BAND_CONCURRENCY 356 */ 357 static void wma_set_feature_set_info(tp_wma_handle wma_handle, 358 struct target_feature_set *feature_set) 359 { 360 struct cds_context *cds_ctx = 361 (struct cds_context *)(wma_handle->cds_context); 362 struct wlan_objmgr_psoc *psoc; 363 struct wlan_scan_features scan_feature_set = {0}; 364 struct wlan_twt_features twt_feature_set = {0}; 365 struct wlan_mlme_features mlme_feature_set = {0}; 366 struct wlan_tdls_features tdls_feature_set = {0}; 367 368 psoc = wma_handle->psoc; 369 if (!psoc) { 370 wma_err("Invalid psoc"); 371 return; 372 } 373 374 if (!cds_ctx) { 375 wma_err("Invalid cds context"); 376 return; 377 } 378 379 if (!cds_ctx->cds_cfg) { 380 wma_err("Invalid cds config"); 381 return; 382 } 383 384 feature_set->wifi_standard = 385 cds_ctx->cds_cfg->cds_feature_set.wifi_standard; 386 feature_set->sap_5g_supported = 387 cds_ctx->cds_cfg->cds_feature_set.sap_5g_supported; 388 feature_set->sap_6g_supported = 389 cds_ctx->cds_cfg->cds_feature_set.sap_6g_supported; 390 feature_set->band_capability = 391 cds_ctx->cds_cfg->cds_feature_set.band_capability; 392 393 feature_set->concurrency_support = wma_get_concurrency_support(psoc); 394 395 wlan_scan_get_feature_info(psoc, &scan_feature_set); 396 feature_set->pno_in_unassoc_state = 397 scan_feature_set.pno_in_unassoc_state; 398 if (feature_set->pno_in_unassoc_state) 399 feature_set->pno_in_assoc_state = 400 scan_feature_set.pno_in_assoc_state; 401 402 wlan_twt_get_feature_info(psoc, &twt_feature_set); 403 feature_set->enable_twt = twt_feature_set.enable_twt; 404 if (feature_set->enable_twt) { 405 feature_set->enable_twt_requester = 406 twt_feature_set.enable_twt_requester; 407 feature_set->enable_twt_broadcast = 408 twt_feature_set.enable_twt_broadcast; 409 feature_set->enable_twt_flexible = 410 twt_feature_set.enable_twt_flexible; 411 } 412 413 feature_set->enable_rfc835 = true; 414 415 wlan_mlme_get_feature_info(psoc, &mlme_feature_set); 416 417 feature_set->enable_wifi_optimizer = 418 mlme_feature_set.enable_wifi_optimizer; 419 feature_set->sap_max_num_clients = 420 mlme_feature_set.sap_max_num_clients; 421 422 feature_set->vendor_req_1_version = 423 mlme_feature_set.vendor_req_1_version; 424 feature_set->roaming_high_cu_roam_trigger = 425 mlme_feature_set.roaming_high_cu_roam_trigger; 426 feature_set->roaming_emergency_trigger = 427 mlme_feature_set.roaming_emergency_trigger; 428 feature_set->roaming_btm_trihgger = 429 mlme_feature_set.roaming_btm_trihgger; 430 feature_set->roaming_idle_trigger = 431 mlme_feature_set.roaming_idle_trigger; 432 feature_set->roaming_wtc_trigger = 433 mlme_feature_set.roaming_wtc_trigger; 434 feature_set->roaming_btcoex_trigger = 435 mlme_feature_set.roaming_btcoex_trigger; 436 feature_set->roaming_btw_wpa_wpa2 = 437 mlme_feature_set.roaming_btw_wpa_wpa2; 438 feature_set->roaming_manage_chan_list_api = 439 mlme_feature_set.roaming_manage_chan_list_api; 440 441 feature_set->roaming_adaptive_11r = 442 mlme_feature_set.roaming_adaptive_11r; 443 feature_set->roaming_ctrl_api_get_set = 444 mlme_feature_set.roaming_ctrl_api_get_set; 445 feature_set->roaming_ctrl_api_reassoc = 446 mlme_feature_set.roaming_ctrl_api_reassoc; 447 feature_set->roaming_ctrl_get_cu = 448 mlme_feature_set.roaming_ctrl_get_cu; 449 feature_set->vendor_req_2_version = 450 mlme_feature_set.vendor_req_2_version; 451 feature_set->sta_dual_p2p_support = 452 mlme_feature_set.sta_dual_p2p_support; 453 if (mlme_feature_set.enable2x2) 454 feature_set->num_antennas = WMI_HOST_MIMO_2X2; 455 else 456 feature_set->num_antennas = WMI_HOST_SISO; 457 458 feature_set->set_country_code_hal_supported = true; 459 feature_set->get_valid_channel_supported = true; 460 feature_set->supported_dot11mode = feature_set->wifi_standard; 461 feature_set->sap_wpa3_support = true; 462 feature_set->assurance_disconnect_reason_api = true; 463 feature_set->frame_pcap_log_mgmt = 464 ucfg_dp_is_local_pkt_capture_enabled(psoc); 465 feature_set->frame_pcap_log_ctrl = feature_set->frame_pcap_log_mgmt; 466 feature_set->frame_pcap_log_data = feature_set->frame_pcap_log_mgmt; 467 468 /* 469 * This information is hardcoded based on hdd_sta_akm_suites, 470 *wlan_crypto_key_mgmt and wlan_crypto_rsnx_cap 471 */ 472 473 /* WLAN_CRYPTO_RSNX_CAP_SAE_H2E support*/ 474 feature_set->security_wpa3_sae_h2e = true; 475 feature_set->security_wpa3_sae_ft = true; 476 feature_set->security_wpa3_enterp_suitb = true; 477 feature_set->security_wpa3_enterp_suitb_192bit = true; 478 feature_set->security_fills_sha_256 = true; 479 feature_set->security_fills_sha_384 = true; 480 feature_set->security_fills_sha_256_FT = true; 481 feature_set->security_fills_sha_384_FT = true; 482 /* This is OWE security support */ 483 feature_set->security_enhanced_open = true; 484 485 feature_set->enable_nan = cfg_nan_get_enable(psoc); 486 487 wlan_tdls_get_features_info(psoc, &tdls_feature_set); 488 feature_set->enable_tdls = tdls_feature_set.enable_tdls; 489 if (feature_set->enable_tdls) { 490 feature_set->enable_tdls_offchannel = 491 tdls_feature_set.enable_tdls_offchannel; 492 feature_set->max_tdls_peers = tdls_feature_set.max_tdls_peers; 493 feature_set->enable_tdls_capability_enhance = 494 tdls_feature_set.enable_tdls_capability_enhance; 495 } 496 497 if (feature_set->sap_6g_supported) 498 feature_set->enable_p2p_6e = 499 policy_mgr_is_6ghz_conc_mode_supported( 500 psoc, 501 PM_P2P_CLIENT_MODE); 502 503 feature_set->peer_bigdata_getbssinfo_support = true; 504 feature_set->peer_bigdata_assocreject_info_support = true; 505 feature_set->peer_getstainfo_support = true; 506 feature_set->sta_dump_support = true; 507 wma_update_set_feature_version(feature_set); 508 } 509 510 /** 511 * wma_send_feature_set_cmd() - Send feature set command to FW 512 * @wma_handle: WMA handle 513 * 514 * Return: None 515 */ 516 static void wma_send_feature_set_cmd(tp_wma_handle wma_handle) 517 { 518 struct target_feature_set feature_set; 519 520 if (!wma_handle) { 521 wma_err("Invalid wma_handle"); 522 return; 523 } 524 525 wma_set_feature_set_info(wma_handle, &feature_set); 526 527 wmi_feature_set_cmd_send(wma_handle->wmi_handle, 528 &feature_set); 529 } 530 531 /** 532 * wma_is_feature_set_supported() - Check if feaure set is supported or not 533 * @wma_handle: WMA handle 534 * 535 * Return: True, if feature set is supported else return false 536 */ 537 static bool wma_is_feature_set_supported(tp_wma_handle wma_handle) 538 { 539 struct cds_context *cds_ctx = 540 (struct cds_context *)(wma_handle->cds_context); 541 bool is_feature_enabled_from_fw; 542 543 if (!cds_ctx) { 544 wma_err("Invalid cds context"); 545 return false; 546 } 547 548 if (!cds_ctx->cds_cfg) { 549 wma_err("Invalid cds config"); 550 return false; 551 } 552 553 is_feature_enabled_from_fw = 554 wmi_service_enabled(wma_handle->wmi_handle, 555 wmi_service_feature_set_event_support); 556 557 if (!is_feature_enabled_from_fw) 558 wma_debug("Get wifi feature is disabled from fw"); 559 560 return (is_feature_enabled_from_fw && 561 cds_ctx->cds_cfg->get_wifi_features); 562 } 563 #else 564 static inline void wma_send_feature_set_cmd(tp_wma_handle wma_handle) 565 { 566 } 567 568 static bool wma_is_feature_set_supported(tp_wma_handle wma_handle) 569 { 570 return false; 571 } 572 573 #endif 574 575 /** 576 * wma_set_default_tgt_config() - set default tgt config 577 * @wma_handle: wma handle 578 * @tgt_cfg: Resource config given to target 579 * 580 * Return: none 581 */ 582 static void wma_set_default_tgt_config(tp_wma_handle wma_handle, 583 target_resource_config *tgt_cfg, 584 struct cds_config_info *cds_cfg) 585 { 586 enum QDF_GLOBAL_MODE con_mode; 587 588 qdf_mem_zero(tgt_cfg, sizeof(target_resource_config)); 589 590 tgt_cfg->num_vdevs = cds_cfg->num_vdevs; 591 wma_update_num_peers_tids(wma_handle, tgt_cfg); 592 593 /* The current firmware implementation requires the number of 594 * offload peers should be (number of vdevs + 1). 595 */ 596 tgt_cfg->num_offload_peers = cds_cfg->ap_maxoffload_peers + 1; 597 tgt_cfg->num_offload_reorder_buffs = 598 cds_cfg->ap_maxoffload_reorderbuffs + 1; 599 tgt_cfg->num_peer_keys = CFG_TGT_NUM_PEER_KEYS; 600 tgt_cfg->ast_skid_limit = CFG_TGT_AST_SKID_LIMIT; 601 tgt_cfg->tx_chain_mask = CFG_TGT_DEFAULT_TX_CHAIN_MASK; 602 tgt_cfg->rx_chain_mask = CFG_TGT_DEFAULT_RX_CHAIN_MASK; 603 tgt_cfg->rx_timeout_pri[0] = CFG_TGT_RX_TIMEOUT_LO_PRI; 604 tgt_cfg->rx_timeout_pri[1] = CFG_TGT_RX_TIMEOUT_LO_PRI; 605 tgt_cfg->rx_timeout_pri[2] = CFG_TGT_RX_TIMEOUT_LO_PRI; 606 tgt_cfg->rx_timeout_pri[3] = CFG_TGT_RX_TIMEOUT_HI_PRI; 607 tgt_cfg->rx_decap_mode = CFG_TGT_RX_DECAP_MODE; 608 tgt_cfg->scan_max_pending_req = WLAN_MAX_ACTIVE_SCANS_ALLOWED; 609 tgt_cfg->bmiss_offload_max_vdev = 610 CFG_TGT_DEFAULT_BMISS_OFFLOAD_MAX_VDEV; 611 tgt_cfg->roam_offload_max_vdev = CFG_TGT_DEFAULT_ROAM_OFFLOAD_MAX_VDEV; 612 tgt_cfg->roam_offload_max_ap_profiles = 613 CFG_TGT_DEFAULT_ROAM_OFFLOAD_MAX_PROFILES; 614 tgt_cfg->num_mcast_groups = CFG_TGT_DEFAULT_NUM_MCAST_GROUPS; 615 tgt_cfg->num_mcast_table_elems = CFG_TGT_DEFAULT_NUM_MCAST_TABLE_ELEMS; 616 tgt_cfg->mcast2ucast_mode = CFG_TGT_DEFAULT_MCAST2UCAST_MODE; 617 tgt_cfg->tx_dbg_log_size = CFG_TGT_DEFAULT_TX_DBG_LOG_SIZE; 618 tgt_cfg->num_wds_entries = CFG_TGT_WDS_ENTRIES; 619 tgt_cfg->dma_burst_size = CFG_TGT_DEFAULT_DMA_BURST_SIZE; 620 tgt_cfg->mac_aggr_delim = CFG_TGT_DEFAULT_MAC_AGGR_DELIM; 621 tgt_cfg->rx_skip_defrag_timeout_dup_detection_check = 622 CFG_TGT_DEFAULT_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK, 623 tgt_cfg->vow_config = CFG_TGT_DEFAULT_VOW_CONFIG; 624 tgt_cfg->gtk_offload_max_vdev = CFG_TGT_DEFAULT_GTK_OFFLOAD_MAX_VDEV; 625 tgt_cfg->num_msdu_desc = CFG_TGT_NUM_MSDU_DESC; 626 tgt_cfg->max_frag_entries = CFG_TGT_MAX_FRAG_TABLE_ENTRIES; 627 tgt_cfg->num_tdls_vdevs = CFG_TGT_NUM_TDLS_VDEVS; 628 tgt_cfg->num_tdls_conn_table_entries = 629 QDF_MIN(CFG_TGT_NUM_TDLS_CONN_TABLE_ENTRIES, 630 cfg_tdls_get_max_peer_count(wma_handle->psoc)); 631 tgt_cfg->beacon_tx_offload_max_vdev = 632 CFG_TGT_DEFAULT_BEACON_TX_OFFLOAD_MAX_VDEV; 633 tgt_cfg->num_multicast_filter_entries = 634 CFG_TGT_MAX_MULTICAST_FILTER_ENTRIES; 635 tgt_cfg->num_wow_filters = 0; 636 tgt_cfg->num_keep_alive_pattern = MAXNUM_PERIODIC_TX_PTRNS; 637 tgt_cfg->num_max_sta_vdevs = CFG_TGT_DEFAULT_MAX_STA_VDEVS; 638 tgt_cfg->keep_alive_pattern_size = 0; 639 tgt_cfg->max_tdls_concurrent_sleep_sta = 640 CFG_TGT_NUM_TDLS_CONC_SLEEP_STAS; 641 tgt_cfg->max_tdls_concurrent_buffer_sta = 642 CFG_TGT_NUM_TDLS_CONC_BUFFER_STAS; 643 tgt_cfg->wmi_send_separate = 0; 644 tgt_cfg->num_ocb_vdevs = CFG_TGT_NUM_OCB_VDEVS; 645 tgt_cfg->num_ocb_channels = CFG_TGT_NUM_OCB_CHANNELS; 646 tgt_cfg->num_ocb_schedules = CFG_TGT_NUM_OCB_SCHEDULES; 647 tgt_cfg->twt_ap_sta_count = CFG_TGT_DEFAULT_TWT_AP_STA_COUNT; 648 tgt_cfg->enable_pci_gen = cfg_get(wma_handle->psoc, CFG_ENABLE_PCI_GEN); 649 650 tgt_cfg->mgmt_comp_evt_bundle_support = true; 651 tgt_cfg->tx_msdu_new_partition_id_support = true; 652 tgt_cfg->is_sap_connected_d3wow_enabled = 653 ucfg_pmo_get_sap_mode_bus_suspend(wma_handle->psoc); 654 tgt_cfg->is_go_connected_d3wow_enabled = 655 ucfg_pmo_get_go_mode_bus_suspend(wma_handle->psoc); 656 tgt_cfg->num_max_active_vdevs = 657 policy_mgr_get_max_conc_cxns(wma_handle->psoc); 658 tgt_cfg->num_max_mlo_link_per_ml_bss = 659 wlan_mlme_get_sta_mlo_conn_max_num(wma_handle->psoc); 660 cfg_nan_get_max_ndi(wma_handle->psoc, 661 &tgt_cfg->max_ndi); 662 663 con_mode = cds_get_conparam(); 664 if (con_mode == QDF_GLOBAL_MONITOR_MODE) 665 tgt_cfg->rx_decap_mode = CFG_TGT_RX_DECAP_MODE_RAW; 666 667 if (con_mode == QDF_GLOBAL_FTM_MODE) { 668 tgt_cfg->num_offload_peers = 0; 669 tgt_cfg->num_offload_reorder_buffs = 0; 670 tgt_cfg->bmiss_offload_max_vdev = 0; 671 tgt_cfg->roam_offload_max_vdev = 0; 672 tgt_cfg->roam_offload_max_ap_profiles = 0; 673 tgt_cfg->beacon_tx_offload_max_vdev = 1; 674 tgt_cfg->num_multicast_filter_entries = 0; 675 tgt_cfg->gtk_offload_max_vdev = 0; 676 } 677 cfg_nan_get_ndp_max_sessions(wma_handle->psoc, 678 &tgt_cfg->max_ndp_sessions); 679 680 wma_set_ipa_disable_config(tgt_cfg); 681 wma_set_peer_map_unmap_v2_config(wma_handle->psoc, tgt_cfg); 682 683 tgt_cfg->notify_frame_support = DP_MARK_NOTIFY_FRAME_SUPPORT; 684 } 685 686 /** 687 * wma_cli_get_command() - WMA "get" command processor 688 * @vdev_id: virtual device for the command 689 * @param_id: parameter id 690 * @vpdev: parameter category 691 * 692 * Return: parameter value on success, -EINVAL on failure 693 */ 694 int wma_cli_get_command(int vdev_id, int param_id, int vpdev) 695 { 696 int ret = 0; 697 tp_wma_handle wma; 698 struct wma_txrx_node *intr = NULL; 699 700 wma = cds_get_context(QDF_MODULE_ID_WMA); 701 if (!wma) 702 return -EINVAL; 703 704 intr = wma->interfaces; 705 706 if (VDEV_CMD == vpdev) { 707 switch (param_id) { 708 case wmi_vdev_param_nss: 709 ret = intr[vdev_id].config.nss; 710 break; 711 #ifdef QCA_SUPPORT_GTX 712 case wmi_vdev_param_gtx_ht_mcs: 713 ret = intr[vdev_id].config.gtx_info.gtxRTMask[0]; 714 break; 715 case wmi_vdev_param_gtx_vht_mcs: 716 ret = intr[vdev_id].config.gtx_info.gtxRTMask[1]; 717 break; 718 case wmi_vdev_param_gtx_usr_cfg: 719 ret = intr[vdev_id].config.gtx_info.gtxUsrcfg; 720 break; 721 case wmi_vdev_param_gtx_thre: 722 ret = intr[vdev_id].config.gtx_info.gtxPERThreshold; 723 break; 724 case wmi_vdev_param_gtx_margin: 725 ret = intr[vdev_id].config.gtx_info.gtxPERMargin; 726 break; 727 case wmi_vdev_param_gtx_step: 728 ret = intr[vdev_id].config.gtx_info.gtxTPCstep; 729 break; 730 case wmi_vdev_param_gtx_mintpc: 731 ret = intr[vdev_id].config.gtx_info.gtxTPCMin; 732 break; 733 case wmi_vdev_param_gtx_bw_mask: 734 ret = intr[vdev_id].config.gtx_info.gtxBWMask; 735 break; 736 #endif /* QCA_SUPPORT_GTX */ 737 case wmi_vdev_param_ldpc: 738 ret = intr[vdev_id].config.ldpc; 739 break; 740 case wmi_vdev_param_tx_stbc: 741 ret = intr[vdev_id].config.tx_stbc; 742 break; 743 case wmi_vdev_param_rx_stbc: 744 ret = intr[vdev_id].config.rx_stbc; 745 break; 746 case wmi_vdev_param_sgi: 747 ret = intr[vdev_id].config.shortgi; 748 break; 749 case wmi_vdev_param_enable_rtscts: 750 ret = intr[vdev_id].config.rtscts_en; 751 break; 752 case wmi_vdev_param_chwidth: 753 ret = intr[vdev_id].config.chwidth; 754 break; 755 case wmi_vdev_param_fixed_rate: 756 ret = intr[vdev_id].config.tx_rate; 757 break; 758 case wmi_vdev_param_he_dcm_enable: 759 case wmi_vdev_param_he_range_ext: 760 ret = wma_get_he_vdev_param(&intr[vdev_id], param_id); 761 break; 762 default: 763 wma_err("Invalid cli_get vdev command/Not yet implemented 0x%x", 764 param_id); 765 return -EINVAL; 766 } 767 } else if (PDEV_CMD == vpdev) { 768 switch (param_id) { 769 case wmi_pdev_param_ani_enable: 770 ret = wma->pdevconfig.ani_enable; 771 break; 772 case wmi_pdev_param_ani_poll_period: 773 ret = wma->pdevconfig.ani_poll_len; 774 break; 775 case wmi_pdev_param_ani_listen_period: 776 ret = wma->pdevconfig.ani_listen_len; 777 break; 778 case wmi_pdev_param_ani_ofdm_level: 779 ret = wma->pdevconfig.ani_ofdm_level; 780 break; 781 case wmi_pdev_param_ani_cck_level: 782 ret = wma->pdevconfig.ani_cck_level; 783 break; 784 case wmi_pdev_param_dynamic_bw: 785 ret = wma->pdevconfig.cwmenable; 786 break; 787 case wmi_pdev_param_cts_cbw: 788 ret = wma->pdevconfig.cts_cbw; 789 break; 790 case wmi_pdev_param_tx_chain_mask: 791 ret = wma->pdevconfig.txchainmask; 792 break; 793 case wmi_pdev_param_rx_chain_mask: 794 ret = wma->pdevconfig.rxchainmask; 795 break; 796 case wmi_pdev_param_txpower_limit2g: 797 ret = wma->pdevconfig.txpow2g; 798 break; 799 case wmi_pdev_param_txpower_limit5g: 800 ret = wma->pdevconfig.txpow5g; 801 break; 802 default: 803 wma_err("Invalid cli_get pdev command/Not yet implemented 0x%x", 804 param_id); 805 return -EINVAL; 806 } 807 } else if (GEN_CMD == vpdev) { 808 switch (param_id) { 809 case GEN_VDEV_PARAM_AMPDU: 810 ret = intr[vdev_id].config.ampdu; 811 break; 812 case GEN_VDEV_PARAM_AMSDU: 813 ret = intr[vdev_id].config.amsdu; 814 break; 815 case GEN_VDEV_ROAM_SYNCH_DELAY: 816 ret = intr[vdev_id].roam_synch_delay; 817 break; 818 case GEN_VDEV_PARAM_TX_AMPDU: 819 ret = intr[vdev_id].config.tx_ampdu; 820 break; 821 case GEN_VDEV_PARAM_RX_AMPDU: 822 ret = intr[vdev_id].config.rx_ampdu; 823 break; 824 case GEN_VDEV_PARAM_TX_AMSDU: 825 ret = intr[vdev_id].config.tx_amsdu; 826 break; 827 case GEN_VDEV_PARAM_RX_AMSDU: 828 ret = intr[vdev_id].config.rx_amsdu; 829 break; 830 default: 831 wma_warn("Invalid generic vdev command/Not yet implemented 0x%x", 832 param_id); 833 return -EINVAL; 834 } 835 } else if (PPS_CMD == vpdev) { 836 switch (param_id) { 837 case WMI_VDEV_PPS_PAID_MATCH: 838 ret = intr[vdev_id].config.pps_params.paid_match_enable; 839 break; 840 case WMI_VDEV_PPS_GID_MATCH: 841 ret = intr[vdev_id].config.pps_params.gid_match_enable; 842 break; 843 case WMI_VDEV_PPS_EARLY_TIM_CLEAR: 844 ret = intr[vdev_id].config.pps_params.tim_clear; 845 break; 846 case WMI_VDEV_PPS_EARLY_DTIM_CLEAR: 847 ret = intr[vdev_id].config.pps_params.dtim_clear; 848 break; 849 case WMI_VDEV_PPS_EOF_PAD_DELIM: 850 ret = intr[vdev_id].config.pps_params.eof_delim; 851 break; 852 case WMI_VDEV_PPS_MACADDR_MISMATCH: 853 ret = intr[vdev_id].config.pps_params.mac_match; 854 break; 855 case WMI_VDEV_PPS_DELIM_CRC_FAIL: 856 ret = intr[vdev_id].config.pps_params.delim_fail; 857 break; 858 case WMI_VDEV_PPS_GID_NSTS_ZERO: 859 ret = intr[vdev_id].config.pps_params.nsts_zero; 860 break; 861 case WMI_VDEV_PPS_RSSI_CHECK: 862 ret = intr[vdev_id].config.pps_params.rssi_chk; 863 break; 864 default: 865 wma_err("Invalid pps vdev command/Not yet implemented 0x%x", 866 param_id); 867 return -EINVAL; 868 } 869 } else if (QPOWER_CMD == vpdev) { 870 switch (param_id) { 871 case WMI_STA_PS_PARAM_QPOWER_PSPOLL_COUNT: 872 ret = intr[vdev_id].config.qpower_params. 873 max_ps_poll_cnt; 874 break; 875 case WMI_STA_PS_PARAM_QPOWER_MAX_TX_BEFORE_WAKE: 876 ret = intr[vdev_id].config.qpower_params. 877 max_tx_before_wake; 878 break; 879 case WMI_STA_PS_PARAM_QPOWER_SPEC_PSPOLL_WAKE_INTERVAL: 880 ret = intr[vdev_id].config.qpower_params. 881 spec_ps_poll_wake_interval; 882 break; 883 case WMI_STA_PS_PARAM_QPOWER_SPEC_MAX_SPEC_NODATA_PSPOLL: 884 ret = intr[vdev_id].config.qpower_params. 885 max_spec_nodata_ps_poll; 886 break; 887 default: 888 wma_warn("Invalid generic vdev command/Not yet implemented 0x%x", 889 param_id); 890 return -EINVAL; 891 } 892 } else if (GTX_CMD == vpdev) { 893 switch (param_id) { 894 case wmi_vdev_param_gtx_ht_mcs: 895 ret = intr[vdev_id].config.gtx_info.gtxRTMask[0]; 896 break; 897 case wmi_vdev_param_gtx_vht_mcs: 898 ret = intr[vdev_id].config.gtx_info.gtxRTMask[1]; 899 break; 900 case wmi_vdev_param_gtx_usr_cfg: 901 ret = intr[vdev_id].config.gtx_info.gtxUsrcfg; 902 break; 903 case wmi_vdev_param_gtx_thre: 904 ret = intr[vdev_id].config.gtx_info.gtxPERThreshold; 905 break; 906 case wmi_vdev_param_gtx_margin: 907 ret = intr[vdev_id].config.gtx_info.gtxPERMargin; 908 break; 909 case wmi_vdev_param_gtx_step: 910 ret = intr[vdev_id].config.gtx_info.gtxTPCstep; 911 break; 912 case wmi_vdev_param_gtx_mintpc: 913 ret = intr[vdev_id].config.gtx_info.gtxTPCMin; 914 break; 915 case wmi_vdev_param_gtx_bw_mask: 916 ret = intr[vdev_id].config.gtx_info.gtxBWMask; 917 break; 918 default: 919 wma_warn("Invalid generic vdev command/Not yet implemented 0x%x", 920 param_id); 921 return -EINVAL; 922 } 923 } 924 return ret; 925 } 926 927 /** 928 * wma_cli_set2_command() - WMA "set 2 params" command processor 929 * @vdev_id: virtual device for the command 930 * @param_id: parameter id 931 * @sval1: first parameter value 932 * @sval2: second parameter value 933 * @vpdev: parameter category 934 * 935 * Command handler for set operations which require 2 parameters 936 * 937 * Return: 0 on success, errno on failure 938 */ 939 int wma_cli_set2_command(int vdev_id, int param_id, int sval1, 940 int sval2, int vpdev) 941 { 942 struct scheduler_msg msg = { 0 }; 943 wma_cli_set_cmd_t *iwcmd; 944 945 iwcmd = qdf_mem_malloc(sizeof(*iwcmd)); 946 if (!iwcmd) 947 return -ENOMEM; 948 949 qdf_mem_zero(iwcmd, sizeof(*iwcmd)); 950 iwcmd->param_value = sval1; 951 iwcmd->param_sec_value = sval2; 952 iwcmd->param_vdev_id = vdev_id; 953 iwcmd->param_id = param_id; 954 iwcmd->param_vp_dev = vpdev; 955 msg.type = WMA_CLI_SET_CMD; 956 msg.reserved = 0; 957 msg.bodyptr = iwcmd; 958 959 if (QDF_STATUS_SUCCESS != 960 scheduler_post_message(QDF_MODULE_ID_WMA, 961 QDF_MODULE_ID_WMA, 962 QDF_MODULE_ID_WMA, &msg)) { 963 qdf_mem_free(iwcmd); 964 return -EIO; 965 } 966 return 0; 967 } 968 969 /** 970 * wma_cli_set_command() - WMA "set" command processor 971 * @vdev_id: virtual device for the command 972 * @param_id: parameter id 973 * @sval: parameter value 974 * @vpdev: parameter category 975 * 976 * Command handler for set operations 977 * 978 * Return: 0 on success, errno on failure 979 */ 980 int wma_cli_set_command(int vdev_id, int param_id, int sval, int vpdev) 981 { 982 return wma_cli_set2_command(vdev_id, param_id, sval, 0, vpdev); 983 984 } 985 986 QDF_STATUS wma_form_unit_test_cmd_and_send(uint32_t vdev_id, 987 uint32_t module_id, uint32_t arg_count, uint32_t *arg) 988 { 989 struct wmi_unit_test_cmd *unit_test_args; 990 tp_wma_handle wma_handle = cds_get_context(QDF_MODULE_ID_WMA); 991 uint32_t i; 992 QDF_STATUS status; 993 struct wmi_unified *wmi_handle; 994 995 wma_debug("enter"); 996 997 if (QDF_GLOBAL_FTM_MODE != cds_get_conparam()) { 998 if (!wma_is_vdev_valid(vdev_id)) 999 return QDF_STATUS_E_FAILURE; 1000 } 1001 1002 if (arg_count > WMI_UNIT_TEST_MAX_NUM_ARGS) { 1003 wma_err("arg_count is crossed the boundary"); 1004 return QDF_STATUS_E_FAILURE; 1005 } 1006 1007 if (wma_validate_handle(wma_handle)) 1008 return QDF_STATUS_E_FAILURE; 1009 1010 wmi_handle = wma_handle->wmi_handle; 1011 if (wmi_validate_handle(wmi_handle)) 1012 return QDF_STATUS_E_FAILURE; 1013 1014 unit_test_args = qdf_mem_malloc(sizeof(*unit_test_args)); 1015 if (!unit_test_args) 1016 return QDF_STATUS_E_NOMEM; 1017 1018 unit_test_args->vdev_id = vdev_id; 1019 unit_test_args->module_id = module_id; 1020 unit_test_args->num_args = arg_count; 1021 for (i = 0; i < arg_count; i++) 1022 unit_test_args->args[i] = arg[i]; 1023 1024 status = wmi_unified_unit_test_cmd(wmi_handle, 1025 unit_test_args); 1026 qdf_mem_free(unit_test_args); 1027 wma_debug("exit"); 1028 1029 return status; 1030 } 1031 1032 static void wma_process_send_addba_req(tp_wma_handle wma_handle, 1033 struct send_add_ba_req *send_addba) 1034 { 1035 QDF_STATUS status; 1036 struct wmi_unified *wmi_handle; 1037 1038 if (wma_validate_handle(wma_handle)) { 1039 qdf_mem_free(send_addba); 1040 return; 1041 } 1042 1043 wmi_handle = wma_handle->wmi_handle; 1044 if (wmi_validate_handle(wmi_handle)) { 1045 qdf_mem_free(send_addba); 1046 return; 1047 } 1048 1049 status = wmi_unified_addba_send_cmd_send(wmi_handle, 1050 send_addba->mac_addr, 1051 &send_addba->param); 1052 if (QDF_STATUS_SUCCESS != status) { 1053 wma_err("Failed to process WMA_SEND_ADDBA_REQ"); 1054 } 1055 wma_debug("sent ADDBA req to" QDF_MAC_ADDR_FMT "tid %d buff_size %d", 1056 QDF_MAC_ADDR_REF(send_addba->mac_addr), 1057 send_addba->param.tidno, 1058 send_addba->param.buffersize); 1059 1060 qdf_mem_free(send_addba); 1061 } 1062 1063 /** 1064 * wma_set_priv_cfg() - set private config parameters 1065 * @wma_handle: wma handle 1066 * @privcmd: private command 1067 * 1068 * Return: 0 for success or error code 1069 */ 1070 static int32_t wma_set_priv_cfg(tp_wma_handle wma_handle, 1071 wma_cli_set_cmd_t *privcmd) 1072 { 1073 int32_t ret = 0; 1074 1075 switch (privcmd->param_id) { 1076 case WMA_VDEV_TXRX_FWSTATS_ENABLE_CMDID: 1077 ret = wma_set_txrx_fw_stats_level(wma_handle, 1078 privcmd->param_vdev_id, 1079 privcmd->param_value); 1080 break; 1081 case WMA_VDEV_TXRX_FWSTATS_RESET_CMDID: 1082 ret = wma_txrx_fw_stats_reset(wma_handle, 1083 privcmd->param_vdev_id, 1084 privcmd->param_value); 1085 break; 1086 case WMI_STA_SMPS_FORCE_MODE_CMDID: 1087 ret = wma_set_mimops(wma_handle, 1088 privcmd->param_vdev_id, 1089 privcmd->param_value); 1090 break; 1091 case WMI_STA_SMPS_PARAM_CMDID: 1092 wma_set_smps_params(wma_handle, privcmd->param_vdev_id, 1093 privcmd->param_value); 1094 break; 1095 case WMA_VDEV_MCC_SET_TIME_LATENCY: 1096 { 1097 /* Extract first MCC adapter/vdev channel number and latency */ 1098 uint8_t mcc_channel = privcmd->param_value & 0x000000FF; 1099 uint8_t mcc_channel_latency = 1100 (privcmd->param_value & 0x0000FF00) >> 8; 1101 int ret = -1; 1102 1103 wma_debug("Parsed input: Channel #1:%d, latency:%dms", 1104 mcc_channel, mcc_channel_latency); 1105 ret = wma_set_mcc_channel_time_latency(wma_handle, 1106 mcc_channel, 1107 mcc_channel_latency); 1108 } 1109 break; 1110 case WMA_VDEV_MCC_SET_TIME_QUOTA: 1111 { 1112 /* Extract the MCC 2 adapters/vdevs channel numbers and time 1113 * quota value for the first adapter only (which is specified 1114 * in iwpriv command. 1115 */ 1116 uint8_t adapter_2_chan_number = 1117 privcmd->param_value & 0x000000FF; 1118 uint8_t adapter_1_chan_number = 1119 (privcmd->param_value & 0x0000FF00) >> 8; 1120 uint8_t adapter_1_quota = 1121 (privcmd->param_value & 0x00FF0000) >> 16; 1122 int ret = -1; 1123 1124 wma_debug("Parsed input: Channel #1:%d, Channel #2:%d, quota 1:%dms", 1125 adapter_1_chan_number, 1126 adapter_2_chan_number, adapter_1_quota); 1127 1128 ret = wma_set_mcc_channel_time_quota(wma_handle, 1129 adapter_1_chan_number, 1130 adapter_1_quota, 1131 adapter_2_chan_number); 1132 } 1133 break; 1134 default: 1135 wma_err("Invalid wma config command id:%d", privcmd->param_id); 1136 ret = -EINVAL; 1137 } 1138 return ret; 1139 } 1140 1141 /** 1142 * wma_set_dtim_period() - set dtim period to FW 1143 * @wma: wma handle 1144 * @dtim_params: dtim params 1145 * 1146 * Return: none 1147 */ 1148 static void wma_set_dtim_period(tp_wma_handle wma, 1149 struct set_dtim_params *dtim_params) 1150 { 1151 struct wma_txrx_node *iface = 1152 &wma->interfaces[dtim_params->session_id]; 1153 if (!wma_is_vdev_valid(dtim_params->session_id)) { 1154 wma_err("invalid VDEV"); 1155 return; 1156 } 1157 wma_debug("set dtim_period %d", dtim_params->dtim_period); 1158 iface->dtimPeriod = dtim_params->dtim_period; 1159 1160 } 1161 1162 static inline bool wma_is_tx_chainmask_valid(int value, 1163 struct target_psoc_info *tgt_hdl) 1164 { 1165 struct wlan_psoc_host_mac_phy_caps *mac_phy_cap; 1166 uint8_t total_mac_phy_cnt, i; 1167 1168 mac_phy_cap = target_psoc_get_mac_phy_cap(tgt_hdl); 1169 if (!mac_phy_cap) { 1170 wma_err("Invalid MAC PHY capabilities handle"); 1171 return false; 1172 } 1173 1174 total_mac_phy_cnt = target_psoc_get_total_mac_phy_cnt(tgt_hdl); 1175 for (i = 0; i < total_mac_phy_cnt; i++) { 1176 if (((mac_phy_cap[i].tx_chain_mask_5G) & (value))) { 1177 return true; 1178 } 1179 } 1180 1181 return false; 1182 } 1183 1184 /** 1185 * wma_convert_ac_value() - map ac setting to the value to be used in FW. 1186 * @ac_value: ac value to be mapped. 1187 * 1188 * Return: enum wmi_traffic_ac 1189 */ 1190 static inline wmi_traffic_ac wma_convert_ac_value(uint32_t ac_value) 1191 { 1192 switch (ac_value) { 1193 case QCA_WLAN_AC_BE: 1194 return WMI_AC_BE; 1195 case QCA_WLAN_AC_BK: 1196 return WMI_AC_BK; 1197 case QCA_WLAN_AC_VI: 1198 return WMI_AC_VI; 1199 case QCA_WLAN_AC_VO: 1200 return WMI_AC_VO; 1201 case QCA_WLAN_AC_ALL: 1202 return WMI_AC_MAX; 1203 } 1204 wma_err("invalid enum: %u", ac_value); 1205 return WMI_AC_MAX; 1206 } 1207 1208 #ifdef WLAN_FEATURE_11BE 1209 /** 1210 * wma_set_per_link_amsdu_cap() - Set AMSDU/AMPDU capability per link to FW. 1211 * @wma: wma handle 1212 * @privcmd: pointer to set command parameters 1213 * @aggr_type: aggregration type 1214 * 1215 * Return: QDF_STATUS_SUCCESS if set command is sent successfully, else 1216 * QDF_STATUS_E_FAILURE 1217 */ 1218 static QDF_STATUS 1219 wma_set_per_link_amsdu_cap(tp_wma_handle wma, wma_cli_set_cmd_t *privcmd, 1220 wmi_vdev_custom_aggr_type_t aggr_type) 1221 { 1222 uint8_t vdev_id; 1223 uint8_t op_mode; 1224 QDF_STATUS ret = QDF_STATUS_E_FAILURE; 1225 1226 for (vdev_id = 0; vdev_id < WLAN_MAX_VDEVS; vdev_id++) { 1227 op_mode = wlan_get_opmode_from_vdev_id(wma->pdev, vdev_id); 1228 if (op_mode == QDF_STA_MODE) { 1229 ret = wma_set_tx_rx_aggr_size(vdev_id, 1230 privcmd->param_value, 1231 privcmd->param_value, 1232 aggr_type); 1233 if (QDF_IS_STATUS_ERROR(ret)) { 1234 wma_err("set_aggr_size failed for vdev: %d, ret %d", 1235 vdev_id, ret); 1236 return ret; 1237 } 1238 } 1239 } 1240 1241 return ret; 1242 } 1243 #else 1244 static inline QDF_STATUS 1245 wma_set_per_link_amsdu_cap(tp_wma_handle wma, wma_cli_set_cmd_t *privcmd, 1246 wmi_vdev_custom_aggr_type_t aggr_type) 1247 { 1248 return QDF_STATUS_SUCCESS; 1249 } 1250 #endif 1251 1252 /** 1253 * wma_process_cli_set_cmd() - set parameters to fw 1254 * @wma: wma handle 1255 * @privcmd: command 1256 * 1257 * Return: none 1258 */ 1259 static void wma_process_cli_set_cmd(tp_wma_handle wma, 1260 wma_cli_set_cmd_t *privcmd) 1261 { 1262 int vid = privcmd->param_vdev_id, pps_val = 0; 1263 QDF_STATUS ret; 1264 struct wma_txrx_node *intr = wma->interfaces; 1265 struct mac_context *mac = cds_get_context(QDF_MODULE_ID_PE); 1266 struct qpower_params *qparams = &intr[vid].config.qpower_params; 1267 struct pdev_params pdev_param = {0}; 1268 void *soc = cds_get_context(QDF_MODULE_ID_SOC); 1269 struct target_psoc_info *tgt_hdl; 1270 enum wlan_eht_mode eht_mode; 1271 1272 if (!mac) { 1273 wma_err("Failed to get mac"); 1274 return; 1275 } 1276 1277 tgt_hdl = wlan_psoc_get_tgt_if_handle(wma->psoc); 1278 if (!tgt_hdl) { 1279 wma_err("target psoc info is NULL"); 1280 return; 1281 } 1282 1283 if (privcmd->param_id >= WMI_CMDID_MAX) { 1284 /* 1285 * This configuration setting is not done using any wmi 1286 * command, call appropriate handler. 1287 */ 1288 if (wma_set_priv_cfg(wma, privcmd)) 1289 wma_err("Failed to set wma priv configuration"); 1290 return; 1291 } 1292 1293 switch (privcmd->param_vp_dev) { 1294 case VDEV_CMD: 1295 if (!wma_is_vdev_valid(privcmd->param_vdev_id)) { 1296 wma_err("Vdev id is not valid"); 1297 return; 1298 } 1299 1300 wma_debug("vdev id %d pid %d pval %d", privcmd->param_vdev_id, 1301 privcmd->param_id, privcmd->param_value); 1302 ret = wma_vdev_set_param(wma->wmi_handle, 1303 privcmd->param_vdev_id, 1304 privcmd->param_id, 1305 privcmd->param_value); 1306 if (QDF_IS_STATUS_ERROR(ret)) { 1307 wma_err("wma_vdev_set_param failed ret %d", ret); 1308 return; 1309 } 1310 break; 1311 case PDEV_CMD: 1312 wma_debug("pdev pid %d pval %d", privcmd->param_id, 1313 privcmd->param_value); 1314 if ((privcmd->param_id == wmi_pdev_param_rx_chain_mask) || 1315 (privcmd->param_id == wmi_pdev_param_tx_chain_mask)) { 1316 if (QDF_STATUS_SUCCESS != 1317 wma_check_txrx_chainmask( 1318 target_if_get_num_rf_chains(tgt_hdl), 1319 privcmd->param_value)) { 1320 wma_debug("Chainmask value is invalid"); 1321 return; 1322 } 1323 } 1324 1325 if (privcmd->param_id == wmi_pdev_param_tx_chain_mask) { 1326 if (!wma_is_tx_chainmask_valid(privcmd->param_value, 1327 tgt_hdl)) { 1328 wma_debug("Chainmask value is invalid"); 1329 return; 1330 } 1331 } 1332 pdev_param.param_id = privcmd->param_id; 1333 pdev_param.param_value = privcmd->param_value; 1334 if (privcmd->param_id == wmi_pdev_param_twt_ac_config) 1335 pdev_param.param_value = 1336 wma_convert_ac_value(pdev_param.param_value); 1337 ret = wmi_unified_pdev_param_send(wma->wmi_handle, 1338 &pdev_param, 1339 privcmd->param_sec_value); 1340 if (QDF_IS_STATUS_ERROR(ret)) { 1341 wma_err("wma_vdev_set_param failed ret %d", ret); 1342 return; 1343 } 1344 break; 1345 case GEN_CMD: 1346 { 1347 struct wma_txrx_node *intr = wma->interfaces; 1348 wmi_vdev_custom_aggr_type_t aggr_type = 1349 WMI_VDEV_CUSTOM_AGGR_TYPE_AMSDU; 1350 1351 wma_debug("gen pid %d pval %d", privcmd->param_id, 1352 privcmd->param_value); 1353 1354 switch (privcmd->param_id) { 1355 case GEN_VDEV_PARAM_AMSDU: 1356 case GEN_VDEV_PARAM_AMPDU: 1357 if (!soc) { 1358 wma_err("SOC context is NULL"); 1359 return; 1360 } 1361 1362 if (privcmd->param_id == GEN_VDEV_PARAM_AMPDU) { 1363 ret = cdp_aggr_cfg(soc, privcmd->param_vdev_id, 1364 privcmd->param_value, 0); 1365 if (ret) 1366 wma_err("cdp_aggr_cfg set ampdu failed ret %d", 1367 ret); 1368 else 1369 intr[privcmd->param_vdev_id].config. 1370 ampdu = privcmd->param_value; 1371 1372 aggr_type = 1373 WMI_VDEV_CUSTOM_AGGR_TYPE_AMPDU; 1374 } 1375 1376 wlan_mlme_get_eht_mode(wma->psoc, &eht_mode); 1377 if (eht_mode == WLAN_EHT_MODE_MLSR || 1378 eht_mode == WLAN_EHT_MODE_MLMR) { 1379 ret = wma_set_per_link_amsdu_cap(wma, privcmd, 1380 aggr_type); 1381 if (QDF_IS_STATUS_ERROR(ret)) 1382 return; 1383 } else { 1384 ret = wma_set_tx_rx_aggr_size( 1385 vid, 1386 privcmd->param_value, 1387 privcmd->param_value, 1388 aggr_type); 1389 if (QDF_IS_STATUS_ERROR(ret)) { 1390 wma_err("set_aggr_size failed ret %d", 1391 ret); 1392 return; 1393 } 1394 } 1395 break; 1396 case GEN_PARAM_CRASH_INJECT: 1397 if (QDF_GLOBAL_FTM_MODE == cds_get_conparam()) 1398 wma_err("Crash inject not allowed in FTM mode"); 1399 else 1400 ret = wma_crash_inject(wma, 1401 privcmd->param_value, 1402 privcmd->param_sec_value); 1403 break; 1404 case GEN_PARAM_CAPTURE_TSF: 1405 ret = wma_capture_tsf(wma, privcmd->param_value); 1406 break; 1407 case GEN_PARAM_RESET_TSF_GPIO: 1408 ret = wma_reset_tsf_gpio(wma, privcmd->param_value); 1409 break; 1410 default: 1411 ret = wma_set_tsf_auto_report(wma, 1412 privcmd->param_vdev_id, 1413 privcmd->param_id, 1414 privcmd->param_value); 1415 if (ret == QDF_STATUS_E_FAILURE) 1416 wma_err("Invalid param id 0x%x", 1417 privcmd->param_id); 1418 break; 1419 } 1420 break; 1421 } 1422 case DBG_CMD: 1423 wma_debug("dbg pid %d pval %d", privcmd->param_id, 1424 privcmd->param_value); 1425 switch (privcmd->param_id) { 1426 case WMI_DBGLOG_LOG_LEVEL: 1427 ret = dbglog_set_log_lvl(wma->wmi_handle, 1428 privcmd->param_value); 1429 if (ret) 1430 wma_err("dbglog_set_log_lvl failed ret %d", 1431 ret); 1432 break; 1433 case WMI_DBGLOG_VAP_ENABLE: 1434 ret = dbglog_vap_log_enable(wma->wmi_handle, 1435 privcmd->param_value, true); 1436 if (ret) 1437 wma_err("dbglog_vap_log_enable failed ret %d", 1438 ret); 1439 break; 1440 case WMI_DBGLOG_VAP_DISABLE: 1441 ret = dbglog_vap_log_enable(wma->wmi_handle, 1442 privcmd->param_value, false); 1443 if (ret) 1444 wma_err("dbglog_vap_log_enable failed ret %d", 1445 ret); 1446 break; 1447 case WMI_DBGLOG_MODULE_ENABLE: 1448 ret = dbglog_module_log_enable(wma->wmi_handle, 1449 privcmd->param_value, true); 1450 if (ret) 1451 wma_err("dbglog_module_log_enable failed ret %d", 1452 ret); 1453 break; 1454 case WMI_DBGLOG_MODULE_DISABLE: 1455 ret = dbglog_module_log_enable(wma->wmi_handle, 1456 privcmd->param_value, false); 1457 if (ret) 1458 wma_err("dbglog_module_log_enable failed ret %d", 1459 ret); 1460 break; 1461 case WMI_DBGLOG_MOD_LOG_LEVEL: 1462 ret = dbglog_set_mod_log_lvl(wma->wmi_handle, 1463 privcmd->param_value); 1464 if (ret) 1465 wma_err("dbglog_module_log_enable failed ret %d", 1466 ret); 1467 break; 1468 case WMI_DBGLOG_MOD_WOW_LOG_LEVEL: 1469 ret = dbglog_set_mod_wow_log_lvl(wma->wmi_handle, 1470 privcmd->param_value); 1471 if (ret) 1472 wma_err("WMI_DBGLOG_MOD_WOW_LOG_LEVEL failed ret %d", 1473 ret); 1474 break; 1475 case WMI_DBGLOG_TYPE: 1476 ret = dbglog_parser_type_init(wma->wmi_handle, 1477 privcmd->param_value); 1478 if (ret) 1479 wma_err("dbglog_parser_type_init failed ret %d", 1480 ret); 1481 break; 1482 case WMI_DBGLOG_REPORT_ENABLE: 1483 ret = dbglog_report_enable(wma->wmi_handle, 1484 privcmd->param_value); 1485 if (ret) 1486 wma_err("dbglog_report_enable failed ret %d", 1487 ret); 1488 break; 1489 case WMI_WLAN_PROFILE_TRIGGER_CMDID: 1490 ret = wma_unified_fw_profiling_cmd(wma->wmi_handle, 1491 WMI_WLAN_PROFILE_TRIGGER_CMDID, 1492 privcmd->param_value, 0); 1493 if (ret) 1494 wma_err("Profile cmd failed for %d ret %d", 1495 WMI_WLAN_PROFILE_TRIGGER_CMDID, ret); 1496 break; 1497 case WMI_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID: 1498 ret = wma_unified_fw_profiling_cmd(wma->wmi_handle, 1499 WMI_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID, 1500 privcmd->param_value, 1501 privcmd->param_sec_value); 1502 if (ret) 1503 wma_err("Profile cmd failed for %d ret %d", 1504 WMI_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID, 1505 ret); 1506 break; 1507 case WMI_WLAN_PROFILE_SET_HIST_INTVL_CMDID: 1508 ret = wma_unified_fw_profiling_cmd(wma->wmi_handle, 1509 WMI_WLAN_PROFILE_SET_HIST_INTVL_CMDID, 1510 privcmd->param_value, 1511 privcmd->param_sec_value); 1512 if (ret) 1513 wma_err("Profile cmd failed for %d ret %d", 1514 WMI_WLAN_PROFILE_SET_HIST_INTVL_CMDID, 1515 ret); 1516 break; 1517 case WMI_WLAN_PROFILE_LIST_PROFILE_ID_CMDID: 1518 ret = wma_unified_fw_profiling_cmd(wma->wmi_handle, 1519 WMI_WLAN_PROFILE_LIST_PROFILE_ID_CMDID, 1520 0, 0); 1521 if (ret) 1522 wma_err("Profile cmd failed for %d ret %d", 1523 WMI_WLAN_PROFILE_LIST_PROFILE_ID_CMDID, 1524 ret); 1525 break; 1526 case WMI_WLAN_PROFILE_GET_PROFILE_DATA_CMDID: 1527 ret = wma_unified_fw_profiling_cmd(wma->wmi_handle, 1528 WMI_WLAN_PROFILE_GET_PROFILE_DATA_CMDID, 1529 0, 0); 1530 if (ret) 1531 wma_err("Profile cmd failed for %d ret %d", 1532 WMI_WLAN_PROFILE_GET_PROFILE_DATA_CMDID, 1533 ret); 1534 break; 1535 case WMI_PDEV_GREEN_AP_PS_ENABLE_CMDID: 1536 /* Set the Green AP */ 1537 ret = wmi_unified_green_ap_ps_send 1538 (wma->wmi_handle, privcmd->param_value, 1539 WMA_WILDCARD_PDEV_ID); 1540 if (ret) { 1541 wma_err("Set GreenAP Failed val %d", 1542 privcmd->param_value); 1543 } 1544 break; 1545 1546 default: 1547 wma_err("Invalid param id 0x%x", privcmd->param_id); 1548 break; 1549 } 1550 break; 1551 case PPS_CMD: 1552 wma_debug("dbg pid %d pval %d", privcmd->param_id, 1553 privcmd->param_value); 1554 switch (privcmd->param_id) { 1555 1556 case WMI_VDEV_PPS_PAID_MATCH: 1557 pps_val = ((privcmd->param_value << 31) & 0xffff0000) | 1558 (PKT_PWR_SAVE_PAID_MATCH & 0xffff); 1559 intr[vid].config.pps_params.paid_match_enable = 1560 privcmd->param_value; 1561 break; 1562 case WMI_VDEV_PPS_GID_MATCH: 1563 pps_val = ((privcmd->param_value << 31) & 0xffff0000) | 1564 (PKT_PWR_SAVE_GID_MATCH & 0xffff); 1565 intr[vid].config.pps_params.gid_match_enable = 1566 privcmd->param_value; 1567 break; 1568 case WMI_VDEV_PPS_EARLY_TIM_CLEAR: 1569 pps_val = ((privcmd->param_value << 31) & 0xffff0000) | 1570 (PKT_PWR_SAVE_EARLY_TIM_CLEAR & 0xffff); 1571 intr[vid].config.pps_params.tim_clear = 1572 privcmd->param_value; 1573 break; 1574 case WMI_VDEV_PPS_EARLY_DTIM_CLEAR: 1575 pps_val = ((privcmd->param_value << 31) & 0xffff0000) | 1576 (PKT_PWR_SAVE_EARLY_DTIM_CLEAR & 0xffff); 1577 intr[vid].config.pps_params.dtim_clear = 1578 privcmd->param_value; 1579 break; 1580 case WMI_VDEV_PPS_EOF_PAD_DELIM: 1581 pps_val = ((privcmd->param_value << 31) & 0xffff0000) | 1582 (PKT_PWR_SAVE_EOF_PAD_DELIM & 0xffff); 1583 intr[vid].config.pps_params.eof_delim = 1584 privcmd->param_value; 1585 break; 1586 case WMI_VDEV_PPS_MACADDR_MISMATCH: 1587 pps_val = ((privcmd->param_value << 31) & 0xffff0000) | 1588 (PKT_PWR_SAVE_MACADDR_MISMATCH & 0xffff); 1589 intr[vid].config.pps_params.mac_match = 1590 privcmd->param_value; 1591 break; 1592 case WMI_VDEV_PPS_DELIM_CRC_FAIL: 1593 pps_val = ((privcmd->param_value << 31) & 0xffff0000) | 1594 (PKT_PWR_SAVE_DELIM_CRC_FAIL & 0xffff); 1595 intr[vid].config.pps_params.delim_fail = 1596 privcmd->param_value; 1597 break; 1598 case WMI_VDEV_PPS_GID_NSTS_ZERO: 1599 pps_val = ((privcmd->param_value << 31) & 0xffff0000) | 1600 (PKT_PWR_SAVE_GID_NSTS_ZERO & 0xffff); 1601 intr[vid].config.pps_params.nsts_zero = 1602 privcmd->param_value; 1603 break; 1604 case WMI_VDEV_PPS_RSSI_CHECK: 1605 pps_val = ((privcmd->param_value << 31) & 0xffff0000) | 1606 (PKT_PWR_SAVE_RSSI_CHECK & 0xffff); 1607 intr[vid].config.pps_params.rssi_chk = 1608 privcmd->param_value; 1609 break; 1610 case WMI_VDEV_PPS_5G_EBT: 1611 pps_val = ((privcmd->param_value << 31) & 0xffff0000) | 1612 (PKT_PWR_SAVE_5G_EBT & 0xffff); 1613 intr[vid].config.pps_params.ebt_5g = 1614 privcmd->param_value; 1615 break; 1616 default: 1617 wma_err("Invalid param id 0x%x", privcmd->param_id); 1618 break; 1619 } 1620 break; 1621 1622 case QPOWER_CMD: 1623 wma_debug("QPOWER CLI CMD pid %d pval %d", privcmd->param_id, 1624 privcmd->param_value); 1625 switch (privcmd->param_id) { 1626 case WMI_STA_PS_PARAM_QPOWER_PSPOLL_COUNT: 1627 wma_debug("QPOWER CLI CMD:Ps Poll Cnt val %d", 1628 privcmd->param_value); 1629 /* Set the QPower Ps Poll Count */ 1630 ret = wma_unified_set_sta_ps_param(wma->wmi_handle, 1631 vid, WMI_STA_PS_PARAM_QPOWER_PSPOLL_COUNT, 1632 privcmd->param_value); 1633 if (ret) { 1634 wma_err("Set Q-PsPollCnt Failed vdevId %d val %d", 1635 vid, privcmd->param_value); 1636 } else { 1637 qparams->max_ps_poll_cnt = privcmd->param_value; 1638 } 1639 break; 1640 case WMI_STA_PS_PARAM_QPOWER_MAX_TX_BEFORE_WAKE: 1641 wma_debug("QPOWER CLI CMD:Max Tx Before wake val %d", 1642 privcmd->param_value); 1643 /* Set the QPower Max Tx Before Wake */ 1644 ret = wma_unified_set_sta_ps_param(wma->wmi_handle, 1645 vid, WMI_STA_PS_PARAM_QPOWER_MAX_TX_BEFORE_WAKE, 1646 privcmd->param_value); 1647 if (ret) { 1648 wma_err("Set Q-MaxTxBefWake Failed vId %d val %d", 1649 vid, privcmd->param_value); 1650 } else { 1651 qparams->max_tx_before_wake = 1652 privcmd->param_value; 1653 } 1654 break; 1655 case WMI_STA_PS_PARAM_QPOWER_SPEC_PSPOLL_WAKE_INTERVAL: 1656 wma_debug("QPOWER CLI CMD:Ps Poll Wake Inv val %d", 1657 privcmd->param_value); 1658 /* Set the QPower Spec Ps Poll Wake Inv */ 1659 ret = wma_unified_set_sta_ps_param(wma->wmi_handle, vid, 1660 WMI_STA_PS_PARAM_QPOWER_SPEC_PSPOLL_WAKE_INTERVAL, 1661 privcmd->param_value); 1662 if (ret) { 1663 wma_err("Set Q-PsPoll WakeIntv Failed vId %d val %d", 1664 vid, privcmd->param_value); 1665 } else { 1666 qparams->spec_ps_poll_wake_interval = 1667 privcmd->param_value; 1668 } 1669 break; 1670 case WMI_STA_PS_PARAM_QPOWER_SPEC_MAX_SPEC_NODATA_PSPOLL: 1671 wma_debug("QPOWER CLI CMD:Spec NoData Ps Poll val %d", 1672 privcmd->param_value); 1673 /* Set the QPower Spec NoData PsPoll */ 1674 ret = wma_unified_set_sta_ps_param(wma->wmi_handle, vid, 1675 WMI_STA_PS_PARAM_QPOWER_SPEC_MAX_SPEC_NODATA_PSPOLL, 1676 privcmd->param_value); 1677 if (ret) { 1678 wma_err("Set Q-SpecNoDataPsPoll Failed vId %d val %d", 1679 vid, privcmd->param_value); 1680 } else { 1681 qparams->max_spec_nodata_ps_poll = 1682 privcmd->param_value; 1683 } 1684 break; 1685 1686 default: 1687 wma_err("Invalid param id 0x%x", privcmd->param_id); 1688 break; 1689 } 1690 break; 1691 case GTX_CMD: 1692 wma_debug("vdev id %d pid %d pval %d", privcmd->param_vdev_id, 1693 privcmd->param_id, privcmd->param_value); 1694 switch (privcmd->param_id) { 1695 case wmi_vdev_param_gtx_ht_mcs: 1696 intr[vid].config.gtx_info.gtxRTMask[0] = 1697 privcmd->param_value; 1698 ret = wmi_unified_vdev_set_gtx_cfg_send(wma->wmi_handle, 1699 privcmd->param_vdev_id, 1700 &intr[vid].config.gtx_info); 1701 break; 1702 case wmi_vdev_param_gtx_vht_mcs: 1703 intr[vid].config.gtx_info.gtxRTMask[1] = 1704 privcmd->param_value; 1705 ret = wmi_unified_vdev_set_gtx_cfg_send(wma->wmi_handle, 1706 privcmd->param_vdev_id, 1707 &intr[vid].config.gtx_info); 1708 break; 1709 1710 case wmi_vdev_param_gtx_usr_cfg: 1711 intr[vid].config.gtx_info.gtxUsrcfg = 1712 privcmd->param_value; 1713 ret = wmi_unified_vdev_set_gtx_cfg_send(wma->wmi_handle, 1714 privcmd->param_vdev_id, 1715 &intr[vid].config.gtx_info); 1716 break; 1717 1718 case wmi_vdev_param_gtx_thre: 1719 intr[vid].config.gtx_info.gtxPERThreshold = 1720 privcmd->param_value; 1721 ret = wmi_unified_vdev_set_gtx_cfg_send(wma->wmi_handle, 1722 privcmd->param_vdev_id, 1723 &intr[vid].config.gtx_info); 1724 break; 1725 1726 case wmi_vdev_param_gtx_margin: 1727 intr[vid].config.gtx_info.gtxPERMargin = 1728 privcmd->param_value; 1729 ret = wmi_unified_vdev_set_gtx_cfg_send(wma->wmi_handle, 1730 privcmd->param_vdev_id, 1731 &intr[vid].config.gtx_info); 1732 break; 1733 1734 case wmi_vdev_param_gtx_step: 1735 intr[vid].config.gtx_info.gtxTPCstep = 1736 privcmd->param_value; 1737 ret = wmi_unified_vdev_set_gtx_cfg_send(wma->wmi_handle, 1738 privcmd->param_vdev_id, 1739 &intr[vid].config.gtx_info); 1740 break; 1741 1742 case wmi_vdev_param_gtx_mintpc: 1743 intr[vid].config.gtx_info.gtxTPCMin = 1744 privcmd->param_value; 1745 ret = wmi_unified_vdev_set_gtx_cfg_send(wma->wmi_handle, 1746 privcmd->param_vdev_id, 1747 &intr[vid].config.gtx_info); 1748 break; 1749 1750 case wmi_vdev_param_gtx_bw_mask: 1751 intr[vid].config.gtx_info.gtxBWMask = 1752 privcmd->param_value; 1753 ret = wmi_unified_vdev_set_gtx_cfg_send(wma->wmi_handle, 1754 privcmd->param_vdev_id, 1755 &intr[vid].config.gtx_info); 1756 if (ret) { 1757 wma_err("wma_vdev_set_param failed ret %d", 1758 ret); 1759 return; 1760 } 1761 break; 1762 default: 1763 break; 1764 } 1765 break; 1766 1767 default: 1768 wma_err("Invalid vpdev command id"); 1769 } 1770 if (1 == privcmd->param_vp_dev) { 1771 switch (privcmd->param_id) { 1772 case wmi_vdev_param_nss: 1773 intr[vid].config.nss = privcmd->param_value; 1774 break; 1775 case wmi_vdev_param_ldpc: 1776 intr[vid].config.ldpc = privcmd->param_value; 1777 break; 1778 case wmi_vdev_param_tx_stbc: 1779 intr[vid].config.tx_stbc = privcmd->param_value; 1780 break; 1781 case wmi_vdev_param_rx_stbc: 1782 intr[vid].config.rx_stbc = privcmd->param_value; 1783 break; 1784 case wmi_vdev_param_sgi: 1785 intr[vid].config.shortgi = privcmd->param_value; 1786 break; 1787 case wmi_vdev_param_enable_rtscts: 1788 intr[vid].config.rtscts_en = privcmd->param_value; 1789 break; 1790 case wmi_vdev_param_chwidth: 1791 intr[vid].config.chwidth = privcmd->param_value; 1792 break; 1793 case wmi_vdev_param_fixed_rate: 1794 intr[vid].config.tx_rate = privcmd->param_value; 1795 break; 1796 case wmi_vdev_param_early_rx_adjust_enable: 1797 intr[vid].config.erx_adjust = privcmd->param_value; 1798 break; 1799 case wmi_vdev_param_early_rx_tgt_bmiss_num: 1800 intr[vid].config.erx_bmiss_num = privcmd->param_value; 1801 break; 1802 case wmi_vdev_param_early_rx_bmiss_sample_cycle: 1803 intr[vid].config.erx_bmiss_cycle = privcmd->param_value; 1804 break; 1805 case wmi_vdev_param_early_rx_slop_step: 1806 intr[vid].config.erx_slop_step = privcmd->param_value; 1807 break; 1808 case wmi_vdev_param_early_rx_init_slop: 1809 intr[vid].config.erx_init_slop = privcmd->param_value; 1810 break; 1811 case wmi_vdev_param_early_rx_adjust_pause: 1812 intr[vid].config.erx_adj_pause = privcmd->param_value; 1813 break; 1814 case wmi_vdev_param_early_rx_drift_sample: 1815 intr[vid].config.erx_dri_sample = privcmd->param_value; 1816 break; 1817 case wmi_vdev_param_he_dcm_enable: 1818 case wmi_vdev_param_he_range_ext: 1819 wma_set_he_vdev_param(&intr[vid], privcmd->param_id, 1820 privcmd->param_value); 1821 break; 1822 default: 1823 wma_debug("vdev cmd is not part vdev_cli_config 0x%x", 1824 privcmd->param_id); 1825 break; 1826 } 1827 } else if (2 == privcmd->param_vp_dev) { 1828 switch (privcmd->param_id) { 1829 case wmi_pdev_param_ani_enable: 1830 wma->pdevconfig.ani_enable = privcmd->param_value; 1831 break; 1832 case wmi_pdev_param_ani_poll_period: 1833 wma->pdevconfig.ani_poll_len = privcmd->param_value; 1834 break; 1835 case wmi_pdev_param_ani_listen_period: 1836 wma->pdevconfig.ani_listen_len = privcmd->param_value; 1837 break; 1838 case wmi_pdev_param_ani_ofdm_level: 1839 wma->pdevconfig.ani_ofdm_level = privcmd->param_value; 1840 break; 1841 case wmi_pdev_param_ani_cck_level: 1842 wma->pdevconfig.ani_cck_level = privcmd->param_value; 1843 break; 1844 case wmi_pdev_param_dynamic_bw: 1845 wma->pdevconfig.cwmenable = privcmd->param_value; 1846 break; 1847 case wmi_pdev_param_cts_cbw: 1848 wma->pdevconfig.cts_cbw = privcmd->param_value; 1849 break; 1850 case wmi_pdev_param_tx_chain_mask: 1851 wma->pdevconfig.txchainmask = privcmd->param_value; 1852 break; 1853 case wmi_pdev_param_rx_chain_mask: 1854 wma->pdevconfig.rxchainmask = privcmd->param_value; 1855 break; 1856 case wmi_pdev_param_txpower_limit2g: 1857 wma->pdevconfig.txpow2g = privcmd->param_value; 1858 if (mac->mlme_cfg->gen.band_capability & BIT(REG_BAND_2G)) 1859 mac->mlme_cfg->power.current_tx_power_level = 1860 (uint8_t)privcmd->param_value; 1861 else 1862 wma_err("Current band is not 2G"); 1863 break; 1864 case wmi_pdev_param_txpower_limit5g: 1865 wma->pdevconfig.txpow5g = privcmd->param_value; 1866 if (mac->mlme_cfg->gen.band_capability & BIT(REG_BAND_5G)) 1867 mac->mlme_cfg->power.current_tx_power_level = 1868 (uint8_t)privcmd->param_value; 1869 else 1870 wma_err("Current band is not 5G"); 1871 break; 1872 default: 1873 wma_debug("Invalid wma_cli_set pdev command/Not yet implemented 0x%x", 1874 privcmd->param_id); 1875 break; 1876 } 1877 } else if (5 == privcmd->param_vp_dev) { 1878 ret = wma_vdev_set_param(wma->wmi_handle, 1879 privcmd->param_vdev_id, 1880 wmi_vdev_param_packet_powersave, 1881 pps_val); 1882 if (ret) 1883 wma_err("Failed to send wmi packet power save cmd"); 1884 else 1885 wma_debug("Sent packet power save cmd %d value %x to target", 1886 privcmd->param_id, pps_val); 1887 } 1888 } 1889 1890 uint32_t wma_critical_events_in_flight(void) 1891 { 1892 t_wma_handle *wma; 1893 1894 wma = cds_get_context(QDF_MODULE_ID_WMA); 1895 if (!wma) 1896 return 0; 1897 1898 if (wmi_validate_handle(wma->wmi_handle)) 1899 return 0; 1900 1901 return wmi_critical_events_in_flight(wma->wmi_handle); 1902 } 1903 1904 /** 1905 * wma_process_hal_pwr_dbg_cmd() - send hal pwr dbg cmd to fw. 1906 * @handle: wma handle 1907 * @sir_pwr_dbg_params: unit test command 1908 * 1909 * This function send unit test command to fw. 1910 * 1911 * Return: QDF_STATUS_SUCCESS on success, QDF_STATUS_E_** on error 1912 */ 1913 QDF_STATUS wma_process_hal_pwr_dbg_cmd(WMA_HANDLE handle, 1914 struct sir_mac_pwr_dbg_cmd * 1915 sir_pwr_dbg_params) 1916 { 1917 tp_wma_handle wma_handle = (tp_wma_handle)handle; 1918 int i; 1919 struct wmi_power_dbg_params wmi_pwr_dbg_params; 1920 QDF_STATUS status; 1921 1922 if (!sir_pwr_dbg_params) { 1923 wma_err("sir_pwr_dbg_params is null"); 1924 return QDF_STATUS_E_INVAL; 1925 } 1926 wmi_pwr_dbg_params.module_id = sir_pwr_dbg_params->module_id; 1927 wmi_pwr_dbg_params.pdev_id = sir_pwr_dbg_params->pdev_id; 1928 wmi_pwr_dbg_params.num_args = sir_pwr_dbg_params->num_args; 1929 1930 for (i = 0; i < wmi_pwr_dbg_params.num_args; i++) 1931 wmi_pwr_dbg_params.args[i] = sir_pwr_dbg_params->args[i]; 1932 1933 status = wmi_unified_send_power_dbg_cmd(wma_handle->wmi_handle, 1934 &wmi_pwr_dbg_params); 1935 1936 return status; 1937 } 1938 1939 static QDF_STATUS wma_discard_fw_event(struct scheduler_msg *msg) 1940 { 1941 if (!msg->bodyptr) 1942 return QDF_STATUS_E_INVAL; 1943 1944 qdf_mem_free(msg->bodyptr); 1945 msg->bodyptr = NULL; 1946 msg->bodyval = 0; 1947 msg->type = 0; 1948 1949 return QDF_STATUS_SUCCESS; 1950 } 1951 1952 QDF_STATUS 1953 wma_vdev_nss_chain_params_send(uint8_t vdev_id, 1954 struct wlan_mlme_nss_chains *user_cfg) 1955 { 1956 tp_wma_handle wma_handle = cds_get_context(QDF_MODULE_ID_WMA); 1957 struct vdev_nss_chains vdev_user_cfg; 1958 if (!wma_handle) 1959 return QDF_STATUS_E_FAILURE; 1960 1961 vdev_user_cfg.disable_rx_mrc[NSS_CHAINS_BAND_2GHZ] = 1962 user_cfg->disable_rx_mrc[NSS_CHAINS_BAND_2GHZ]; 1963 vdev_user_cfg.disable_tx_mrc[NSS_CHAINS_BAND_2GHZ] = 1964 user_cfg->disable_tx_mrc[NSS_CHAINS_BAND_2GHZ]; 1965 vdev_user_cfg.disable_rx_mrc[NSS_CHAINS_BAND_5GHZ] = 1966 user_cfg->disable_rx_mrc[NSS_CHAINS_BAND_5GHZ]; 1967 vdev_user_cfg.disable_tx_mrc[NSS_CHAINS_BAND_5GHZ] = 1968 user_cfg->disable_tx_mrc[NSS_CHAINS_BAND_5GHZ]; 1969 1970 vdev_user_cfg.num_rx_chains[NSS_CHAINS_BAND_2GHZ] 1971 = user_cfg->num_rx_chains[NSS_CHAINS_BAND_2GHZ]; 1972 vdev_user_cfg.num_tx_chains[NSS_CHAINS_BAND_2GHZ] 1973 = user_cfg->num_tx_chains[NSS_CHAINS_BAND_2GHZ]; 1974 vdev_user_cfg.num_rx_chains[NSS_CHAINS_BAND_5GHZ] = 1975 user_cfg->num_rx_chains[NSS_CHAINS_BAND_5GHZ]; 1976 vdev_user_cfg.num_tx_chains[NSS_CHAINS_BAND_5GHZ] = 1977 user_cfg->num_tx_chains[NSS_CHAINS_BAND_5GHZ]; 1978 1979 vdev_user_cfg.rx_nss[NSS_CHAINS_BAND_2GHZ] = 1980 user_cfg->rx_nss[NSS_CHAINS_BAND_2GHZ]; 1981 vdev_user_cfg.tx_nss[NSS_CHAINS_BAND_2GHZ] = 1982 user_cfg->tx_nss[NSS_CHAINS_BAND_2GHZ]; 1983 vdev_user_cfg.rx_nss[NSS_CHAINS_BAND_5GHZ] = 1984 user_cfg->rx_nss[NSS_CHAINS_BAND_5GHZ]; 1985 vdev_user_cfg.tx_nss[NSS_CHAINS_BAND_5GHZ] = 1986 user_cfg->tx_nss[NSS_CHAINS_BAND_5GHZ]; 1987 1988 vdev_user_cfg.num_tx_chains_11a = user_cfg->num_tx_chains_11a; 1989 vdev_user_cfg.num_tx_chains_11b = user_cfg->num_tx_chains_11b; 1990 vdev_user_cfg.num_tx_chains_11g = user_cfg->num_tx_chains_11g; 1991 1992 return wmi_unified_vdev_nss_chain_params_send(wma_handle->wmi_handle, 1993 vdev_id, 1994 &vdev_user_cfg); 1995 } 1996 1997 /** 1998 * wma_antenna_isolation_event_handler() - antenna isolation event handler 1999 * @handle: wma handle 2000 * @param: event data 2001 * @len: length 2002 * 2003 * Return: 0 for success or error code 2004 */ 2005 static int wma_antenna_isolation_event_handler(void *handle, 2006 u8 *param, 2007 u32 len) 2008 { 2009 struct scheduler_msg cds_msg = {0}; 2010 wmi_coex_report_isolation_event_fixed_param *event; 2011 WMI_COEX_REPORT_ANTENNA_ISOLATION_EVENTID_param_tlvs *param_buf; 2012 struct sir_isolation_resp *pisolation; 2013 struct mac_context *mac = NULL; 2014 2015 wma_debug("handle %pK param %pK len %d", handle, param, len); 2016 2017 mac = (struct mac_context *)cds_get_context(QDF_MODULE_ID_PE); 2018 if (!mac) { 2019 wma_err("Invalid mac context"); 2020 return -EINVAL; 2021 } 2022 2023 pisolation = qdf_mem_malloc(sizeof(*pisolation)); 2024 if (!pisolation) 2025 return 0; 2026 2027 param_buf = 2028 (WMI_COEX_REPORT_ANTENNA_ISOLATION_EVENTID_param_tlvs *)param; 2029 if (!param_buf) { 2030 wma_err("Invalid isolation event"); 2031 return -EINVAL; 2032 } 2033 event = param_buf->fixed_param; 2034 pisolation->isolation_chain0 = event->isolation_chain0; 2035 pisolation->isolation_chain1 = event->isolation_chain1; 2036 pisolation->isolation_chain2 = event->isolation_chain2; 2037 pisolation->isolation_chain3 = event->isolation_chain3; 2038 2039 wma_debug("chain1 %d chain2 %d chain3 %d chain4 %d", 2040 pisolation->isolation_chain0, pisolation->isolation_chain1, 2041 pisolation->isolation_chain2, pisolation->isolation_chain3); 2042 2043 cds_msg.type = eWNI_SME_ANTENNA_ISOLATION_RSP; 2044 cds_msg.bodyptr = pisolation; 2045 cds_msg.bodyval = 0; 2046 if (QDF_STATUS_SUCCESS != 2047 scheduler_post_message(QDF_MODULE_ID_WMA, 2048 QDF_MODULE_ID_SME, 2049 QDF_MODULE_ID_SME, &cds_msg)) { 2050 wma_err("could not post peer info rsp msg to SME"); 2051 /* free the mem and return */ 2052 qdf_mem_free(pisolation); 2053 } 2054 2055 return 0; 2056 } 2057 2058 /** 2059 * wma_init_max_no_of_peers - API to initialize wma configuration params 2060 * @wma_handle: WMA Handle 2061 * @max_peers: Max Peers supported 2062 * 2063 * Return: void 2064 */ 2065 static uint8_t wma_init_max_no_of_peers(tp_wma_handle wma_handle, 2066 uint16_t max_peers) 2067 { 2068 struct wma_ini_config *cfg = wma_get_ini_handle(wma_handle); 2069 struct hif_opaque_softc *scn = cds_get_context(QDF_MODULE_ID_HIF); 2070 uint32_t tgt_version = hif_get_target_info_handle(scn)->target_version; 2071 uint8_t max_no_of_peers; 2072 uint8_t max_supported_peers; 2073 2074 if (!cfg) { 2075 wma_err("NULL WMA ini handle"); 2076 return 0; 2077 } 2078 2079 switch (tgt_version) { 2080 case AR6320_REV1_1_VERSION: 2081 max_supported_peers = MAX_SUPPORTED_PEERS_REV1_1; 2082 break; 2083 case AR6320_REV1_3_VERSION: 2084 max_supported_peers = MAX_SUPPORTED_PEERS_REV1_3; 2085 break; 2086 default: 2087 max_supported_peers = MAX_SUPPORTED_PEERS; 2088 break; 2089 } 2090 max_no_of_peers = (max_peers > max_supported_peers) ? 2091 max_supported_peers : max_peers; 2092 cfg->max_no_of_peers = max_no_of_peers; 2093 2094 return max_no_of_peers; 2095 } 2096 2097 /** 2098 * wma_cleanup_hold_req() - cleanup hold request queue 2099 * @wma: wma handle 2100 * 2101 * Return: none 2102 */ 2103 static void wma_cleanup_hold_req(tp_wma_handle wma) 2104 { 2105 struct wma_target_req *req_msg = NULL; 2106 qdf_list_node_t *node1 = NULL; 2107 2108 qdf_spin_lock_bh(&wma->wma_hold_req_q_lock); 2109 if (!qdf_list_size(&wma->wma_hold_req_queue)) { 2110 qdf_spin_unlock_bh(&wma->wma_hold_req_q_lock); 2111 wma_debug("request queue is empty"); 2112 return; 2113 } 2114 2115 /* peek front, and then cleanup it in wma_hold_req_timer */ 2116 while (QDF_STATUS_SUCCESS == 2117 qdf_list_peek_front(&wma->wma_hold_req_queue, &node1)) { 2118 req_msg = qdf_container_of(node1, struct wma_target_req, node); 2119 qdf_spin_unlock_bh(&wma->wma_hold_req_q_lock); 2120 /* Cleanup timeout handler */ 2121 qdf_mc_timer_stop(&req_msg->event_timeout); 2122 wma_hold_req_timer(req_msg); 2123 qdf_spin_lock_bh(&wma->wma_hold_req_q_lock); 2124 } 2125 qdf_spin_unlock_bh(&wma->wma_hold_req_q_lock); 2126 } 2127 2128 /** 2129 * wma_cleanup_vdev_resp_and_hold_req() - cleaunup the vdev resp and hold req 2130 * queue 2131 * @msg :scheduler msg 2132 * 2133 * Return: QDF_STATUS 2134 */ 2135 static QDF_STATUS 2136 wma_cleanup_vdev_resp_and_hold_req(struct scheduler_msg *msg) 2137 { 2138 tp_wma_handle wma; 2139 2140 if (!msg || !msg->bodyptr) { 2141 wma_err("msg or body pointer is NULL"); 2142 return QDF_STATUS_E_INVAL; 2143 } 2144 2145 wma = msg->bodyptr; 2146 target_if_flush_psoc_vdev_timers(wma->psoc); 2147 wma_cleanup_hold_req(wma); 2148 2149 return QDF_STATUS_SUCCESS; 2150 } 2151 2152 /** 2153 * wma_cleanup_vdev_resp_and_hold_req_flush_cb() - flush cb for the msg to clean 2154 * up vdev resp and hold req 2155 * @msg :scheduler msg 2156 * 2157 * As passed msg->bodyptr is wma in this case this is dummy flush cb so that 2158 * driver doesn't try to free msg->bodyptr when this msg is flushed. 2159 * 2160 * Return: QDF_STATUS 2161 */ 2162 static inline QDF_STATUS 2163 wma_cleanup_vdev_resp_and_hold_req_flush_cb(struct scheduler_msg *msg) 2164 { 2165 return QDF_STATUS_SUCCESS; 2166 } 2167 2168 /** 2169 * wma_shutdown_notifier_cb - Shutdown notifier call back 2170 * @priv : WMA handle 2171 * 2172 * During recovery, WMA may wait for resume to complete if the crash happens 2173 * while in suspend. This may cause delays in completing the recovery. This call 2174 * back would be called during recovery and the event is completed so that if 2175 * the resume is waiting on FW to respond then it can get out of the wait so 2176 * that recovery thread can start bringing down all the modules. 2177 * 2178 * Return: None 2179 */ 2180 static void wma_shutdown_notifier_cb(void *priv) 2181 { 2182 tp_wma_handle wma_handle = priv; 2183 struct scheduler_msg msg = { 0 }; 2184 QDF_STATUS status; 2185 2186 ucfg_pmo_psoc_wakeup_host_event_received(wma_handle->psoc); 2187 wmi_stop(wma_handle->wmi_handle); 2188 2189 msg.bodyptr = wma_handle; 2190 msg.callback = wma_cleanup_vdev_resp_and_hold_req; 2191 msg.flush_callback = wma_cleanup_vdev_resp_and_hold_req_flush_cb; 2192 status = scheduler_post_message(QDF_MODULE_ID_WMA, 2193 QDF_MODULE_ID_WMA, 2194 QDF_MODULE_ID_TARGET_IF, &msg); 2195 } 2196 2197 struct wma_version_info g_wmi_version_info; 2198 2199 #ifdef WLAN_FEATURE_MEMDUMP_ENABLE 2200 /** 2201 * wma_state_info_dump() - prints state information of wma layer 2202 * @buf: buffer pointer 2203 * @size: size of buffer to be filled 2204 * 2205 * This function is used to dump state information of wma layer 2206 * 2207 * Return: None 2208 */ 2209 static void wma_state_info_dump(char **buf_ptr, uint16_t *size) 2210 { 2211 uint8_t vdev_id; 2212 uint16_t len = 0; 2213 t_wma_handle *wma; 2214 char *buf = *buf_ptr; 2215 struct wma_txrx_node *iface; 2216 struct wake_lock_stats stats; 2217 struct wlan_objmgr_vdev *vdev; 2218 uint32_t rate_flag; 2219 QDF_STATUS status; 2220 2221 wma = cds_get_context(QDF_MODULE_ID_WMA); 2222 if (!wma) 2223 return; 2224 2225 wma_debug("size of buffer: %d", *size); 2226 2227 for (vdev_id = 0; vdev_id < wma->max_bssid; vdev_id++) { 2228 iface = &wma->interfaces[vdev_id]; 2229 vdev = iface->vdev; 2230 if (!vdev) 2231 continue; 2232 2233 status = wma_get_vdev_rate_flag(iface->vdev, &rate_flag); 2234 if (QDF_IS_STATUS_ERROR(status)) 2235 continue; 2236 2237 vdev = wlan_objmgr_get_vdev_by_id_from_psoc(wma->psoc, 2238 vdev_id, WLAN_LEGACY_WMA_ID); 2239 if (!vdev) 2240 continue; 2241 ucfg_mc_cp_stats_get_vdev_wake_lock_stats(vdev, &stats); 2242 len += qdf_scnprintf(buf + len, *size - len, 2243 "\n" 2244 "vdev_id %d\n" 2245 "WoW Stats\n" 2246 "\tpno_match %u\n" 2247 "\tpno_complete %u\n" 2248 "\tgscan %u\n" 2249 "\tlow_rssi %u\n" 2250 "\trssi_breach %u\n" 2251 "\tucast %u\n" 2252 "\tbcast %u\n" 2253 "\ticmpv4 %u\n" 2254 "\ticmpv6 %u\n" 2255 "\tipv4_mcast %u\n" 2256 "\tipv6_mcast %u\n" 2257 "\tipv6_mcast_ra %u\n" 2258 "\tipv6_mcast_ns %u\n" 2259 "\tipv6_mcast_na %u\n" 2260 "\toem_response %u\n" 2261 "\tuc_drop %u\n" 2262 "\tfatal_event %u\n" 2263 "dtimPeriod %d\n" 2264 "chan_width %d\n" 2265 "vdev_active %d\n" 2266 "vdev_up %d\n" 2267 "aid %d\n" 2268 "rate_flags %d\n" 2269 "nss %d\n" 2270 "nwType %d\n" 2271 "tx_streams %d", 2272 vdev_id, 2273 stats.pno_match_wake_up_count, 2274 stats.pno_complete_wake_up_count, 2275 stats.gscan_wake_up_count, 2276 stats.low_rssi_wake_up_count, 2277 stats.rssi_breach_wake_up_count, 2278 stats.ucast_wake_up_count, 2279 stats.bcast_wake_up_count, 2280 stats.icmpv4_count, 2281 stats.icmpv6_count, 2282 stats.ipv4_mcast_wake_up_count, 2283 stats.ipv6_mcast_wake_up_count, 2284 stats.ipv6_mcast_ra_stats, 2285 stats.ipv6_mcast_ns_stats, 2286 stats.ipv6_mcast_na_stats, 2287 stats.oem_response_wake_up_count, 2288 stats.uc_drop_wake_up_count, 2289 stats.fatal_event_wake_up_count, 2290 iface->dtimPeriod, 2291 iface->chan_width, 2292 iface->vdev_active, 2293 wma_is_vdev_up(vdev_id), 2294 iface->aid, 2295 rate_flag, 2296 iface->nss, 2297 iface->nwType, 2298 iface->tx_streams); 2299 wlan_objmgr_vdev_release_ref(vdev, WLAN_LEGACY_WMA_ID); 2300 } 2301 2302 *size -= len; 2303 *buf_ptr += len; 2304 } 2305 2306 /** 2307 * wma_register_debug_callback() - registration function for wma layer 2308 * to print wma state information 2309 */ 2310 static void wma_register_debug_callback(void) 2311 { 2312 qdf_register_debug_callback(QDF_MODULE_ID_WMA, &wma_state_info_dump); 2313 } 2314 #else /* WLAN_FEATURE_MEMDUMP_ENABLE */ 2315 static void wma_register_debug_callback(void) 2316 { 2317 } 2318 #endif /* WLAN_FEATURE_MEMDUMP_ENABLE */ 2319 /** 2320 * wma_register_tx_ops_handler() - register tx_ops of southbound 2321 * @tx_ops: tx_ops pointer in southbound 2322 * 2323 * Return: 0 on success, errno on failure 2324 */ 2325 static QDF_STATUS 2326 wma_register_tx_ops_handler(struct wlan_lmac_if_tx_ops *tx_ops) 2327 { 2328 /* 2329 * Assign tx_ops, it's up to UMAC modules to declare and define these 2330 * functions which are used to send wmi command to target. 2331 */ 2332 2333 if (!tx_ops) { 2334 wma_err("pointer to lmac if tx ops is NULL"); 2335 return QDF_STATUS_E_INVAL; 2336 } 2337 2338 /* mgmt_txrx component's tx ops */ 2339 tx_ops->mgmt_txrx_tx_ops.mgmt_tx_send = wma_mgmt_unified_cmd_send; 2340 2341 /* mgmt txrx component nbuf op for nbuf dma unmap */ 2342 tx_ops->mgmt_txrx_tx_ops.tx_drain_nbuf_op = wma_mgmt_nbuf_unmap_cb; 2343 2344 return QDF_STATUS_SUCCESS; 2345 } 2346 2347 /** 2348 * wma_target_if_open() - Attach UMAC modules' interface with wmi layer 2349 * @wma_handle: wma handle 2350 * 2351 * Separate module defines below functions: 2352 * 1. tgt_wmi_<module>_<action> api sends wmi command, assigned to south bound 2353 * tx_ops function pointers; 2354 * 2. module's south dispatcher handles information from lower layer, assigned 2355 * to south bound rx_ops function pointers; 2356 * 3. wmi event handler deals with wmi event, extracts umac needed information, 2357 * and call rx_ops(module's dispatcher). It executes in tasklet context and 2358 * is up to dispatcher to decide the context to reside in tasklet or in 2359 * thread context. 2360 * 2361 * Return: None 2362 */ 2363 static void wma_target_if_open(tp_wma_handle wma_handle) 2364 { 2365 struct wlan_objmgr_psoc *psoc = wma_handle->psoc; 2366 2367 if (!psoc) 2368 return; 2369 2370 wlan_global_lmac_if_set_txops_registration_cb(WLAN_DEV_OL, 2371 target_if_register_tx_ops); 2372 wlan_lmac_if_set_umac_txops_registration_cb( 2373 wma_register_tx_ops_handler); 2374 wlan_global_lmac_if_open(psoc); 2375 2376 } 2377 2378 /** 2379 * wma_legacy_service_ready_event_handler() - legacy (ext)service ready handler 2380 * @event_id: event_id 2381 * @handle: wma handle 2382 * @event_data: event data 2383 * @length: event length 2384 * 2385 * Return: 0 for success, negative error code for failure 2386 */ 2387 static int wma_legacy_service_ready_event_handler(uint32_t event_id, 2388 void *handle, 2389 uint8_t *event_data, 2390 uint32_t length) 2391 { 2392 switch (event_id) { 2393 case wmi_service_ready_event_id: 2394 return wma_rx_service_ready_event(handle, event_data, length); 2395 case wmi_service_ready_ext_event_id: 2396 return wma_rx_service_ready_ext_event(handle, event_data, 2397 length); 2398 case wmi_ready_event_id: 2399 return wma_rx_ready_event(handle, event_data, length); 2400 case wmi_service_ready_ext2_event_id: 2401 return wma_rx_service_ready_ext2_event(handle, event_data, 2402 length); 2403 default: 2404 wma_err("Legacy callback invoked with invalid event_id:%d", 2405 event_id); 2406 QDF_BUG(0); 2407 } 2408 2409 return 0; 2410 } 2411 2412 #ifdef WLAN_FEATURE_CAL_FAILURE_TRIGGER 2413 /** 2414 * wma_process_cal_fail_info() - Process cal failure event and 2415 * send it to userspace 2416 * @wmi_event: Cal failure event data 2417 */ 2418 static void wma_process_cal_fail_info(uint8_t *wmi_event) 2419 { 2420 struct mac_context *mac = cds_get_context(QDF_MODULE_ID_PE); 2421 uint8_t *buf_ptr; 2422 wmi_debug_mesg_fw_cal_failure_param *cal_failure_event; 2423 2424 if (!mac) { 2425 wma_err("Invalid mac context"); 2426 return; 2427 } 2428 2429 if (!mac->cal_failure_event_cb) { 2430 wma_err("Callback not registered for cal failure event"); 2431 return; 2432 } 2433 2434 buf_ptr = wmi_event; 2435 buf_ptr = buf_ptr + sizeof(wmi_debug_mesg_flush_complete_fixed_param) + 2436 WMI_TLV_HDR_SIZE + 2437 sizeof(wmi_debug_mesg_fw_data_stall_param) + WMI_TLV_HDR_SIZE; 2438 2439 cal_failure_event = (wmi_debug_mesg_fw_cal_failure_param *)buf_ptr; 2440 2441 if (((cal_failure_event->tlv_header & 0xFFFF0000) >> 16 == 2442 WMITLV_TAG_STRUC_wmi_debug_mesg_fw_cal_failure_param)) { 2443 /** 2444 * Log calibration failure information received from FW 2445 */ 2446 wma_debug("Calibration failure event:"); 2447 wma_debug("calType: %x calFailureReasonCode: %x", 2448 cal_failure_event->cal_type, 2449 cal_failure_event->cal_failure_reason_code); 2450 mac->cal_failure_event_cb( 2451 cal_failure_event->cal_type, 2452 cal_failure_event->cal_failure_reason_code); 2453 } else { 2454 wma_err("Invalid TLV header in cal failure event"); 2455 } 2456 } 2457 #else 2458 static inline void wma_process_cal_fail_info(uint8_t *wmi_event) 2459 { 2460 } 2461 #endif 2462 2463 /** 2464 * wma_flush_complete_evt_handler() - FW log flush complete event handler 2465 * @handle: WMI handle 2466 * @event: Event received from FW 2467 * @len: Length of the event 2468 * 2469 */ 2470 static int wma_flush_complete_evt_handler(void *handle, 2471 u_int8_t *event, 2472 u_int32_t len) 2473 { 2474 QDF_STATUS status; 2475 tp_wma_handle wma = (tp_wma_handle) handle; 2476 2477 WMI_DEBUG_MESG_FLUSH_COMPLETE_EVENTID_param_tlvs *param_buf; 2478 wmi_debug_mesg_flush_complete_fixed_param *wmi_event; 2479 wmi_debug_mesg_fw_data_stall_param *data_stall_event; 2480 void *soc = cds_get_context(QDF_MODULE_ID_SOC); 2481 uint8_t *buf_ptr; 2482 uint32_t reason_code; 2483 2484 param_buf = (WMI_DEBUG_MESG_FLUSH_COMPLETE_EVENTID_param_tlvs *) event; 2485 if (!param_buf) { 2486 wma_err("Invalid log flush complete event buffer"); 2487 return QDF_STATUS_E_FAILURE; 2488 } 2489 2490 wmi_event = param_buf->fixed_param; 2491 reason_code = wmi_event->reserved0; 2492 wma_debug("Received reason code %d from FW", reason_code); 2493 2494 if (reason_code == WMI_DIAG_TRIGGER_DATA_STALL) { 2495 buf_ptr = (uint8_t *)wmi_event; 2496 buf_ptr = buf_ptr + 2497 sizeof(wmi_debug_mesg_flush_complete_fixed_param) + 2498 WMI_TLV_HDR_SIZE; 2499 data_stall_event = 2500 (wmi_debug_mesg_fw_data_stall_param *)buf_ptr; 2501 } 2502 2503 if (reason_code == WMI_DIAG_TRIGGER_DATA_STALL && 2504 ((data_stall_event->tlv_header & 0xFFFF0000) >> 16 == 2505 WMITLV_TAG_STRUC_wmi_debug_mesg_fw_data_stall_param)) { 2506 /** 2507 * Log data stall info received from FW: 2508 * 2509 * Possible data stall recovery types: 2510 * WLAN_DBG_DATA_STALL_RECOVERY_CONNECT_DISCONNECT 2511 * WLAN_DBG_DATA_STALL_RECOVERY_CONNECT_MAC_PHY_RESET 2512 * WLAN_DBG_DATA_STALL_RECOVERY_CONNECT_PDR 2513 * 2514 * Possible data stall event types: 2515 * WLAN_DBG_DATA_STALL_VDEV_PAUSE 2516 * WLAN_DBG_DATA_STALL_HWSCHED_CMD_FILTER 2517 * WLAN_DBG_DATA_STALL_HWSCHED_CMD_FLUSH 2518 * WLAN_DBG_DATA_STALL_RX_REFILL_FAILED 2519 * WLAN_DBG_DATA_STALL_RX_FCS_LEN_ERROR 2520 * 2521 * reason_code1: 2522 * The information stored in reason_code1 varies based on the 2523 * data stall type values: 2524 * 2525 * data_stall_type | reason_code1 2526 * ----------------------------------------------------- 2527 * HWSCHED_CMD_FLUSH | flush req reason (0-40) 2528 * RX_REFILL_FAILED | ring_id (0-7) 2529 * RX_FCS_LEN_ERROR | exact error type 2530 * 2531 * reasone_code2: 2532 * on which tid/hwq stall happened 2533 * 2534 */ 2535 QDF_TRACE(QDF_MODULE_ID_WMA, QDF_TRACE_LEVEL_DEBUG, 2536 "Data Stall event:"); 2537 QDF_TRACE(QDF_MODULE_ID_WMA, QDF_TRACE_LEVEL_DEBUG, 2538 "data_stall_type: %x vdev_id_bitmap: %x reason_code1: %x reason_code2: %x recovery_type: %x ", 2539 data_stall_event->data_stall_type, 2540 data_stall_event->vdev_id_bitmap, 2541 data_stall_event->reason_code1, 2542 data_stall_event->reason_code2, 2543 data_stall_event->recovery_type); 2544 2545 cdp_post_data_stall_event(soc, 2546 DATA_STALL_LOG_INDICATOR_FIRMWARE, 2547 data_stall_event->data_stall_type, 2548 OL_TXRX_PDEV_ID, 2549 data_stall_event->vdev_id_bitmap, 2550 data_stall_event->recovery_type); 2551 } 2552 2553 if (reason_code == WMI_DIAG_TRIGGER_CAL_FAILURE) { 2554 wma_process_cal_fail_info((uint8_t *)wmi_event); 2555 return QDF_STATUS_SUCCESS; 2556 } 2557 2558 /* 2559 * reason_code = 0; Flush event in response to flush command 2560 * reason_code = other value; Asynchronous flush event for fatal events 2561 */ 2562 if (!reason_code && (cds_is_log_report_in_progress() == false)) { 2563 wma_debug("Received WMI flush event without sending CMD"); 2564 return -EINVAL; 2565 } else if (!reason_code && cds_is_log_report_in_progress() == true) { 2566 /* Flush event in response to flush command */ 2567 wma_debug("Received WMI flush event in response to flush CMD"); 2568 status = qdf_mc_timer_stop(&wma->log_completion_timer); 2569 if (status != QDF_STATUS_SUCCESS) 2570 wma_err("Failed to stop the log completion timeout"); 2571 cds_logging_set_fw_flush_complete(); 2572 return QDF_STATUS_SUCCESS; 2573 } else if (reason_code && cds_is_log_report_in_progress() == false) { 2574 /* Asynchronous flush event for fatal events */ 2575 status = cds_set_log_completion(WLAN_LOG_TYPE_FATAL, 2576 WLAN_LOG_INDICATOR_FIRMWARE, 2577 reason_code, false); 2578 if (QDF_STATUS_SUCCESS != status) { 2579 wma_err("Failed to set log trigger params"); 2580 return QDF_STATUS_E_FAILURE; 2581 } 2582 cds_logging_set_fw_flush_complete(); 2583 return status; 2584 } else { 2585 /* Asynchronous flush event for fatal event, 2586 * but, report in progress already 2587 */ 2588 wma_debug("Bug report already in progress - dropping! type:%d, indicator=%d reason_code=%d", 2589 WLAN_LOG_TYPE_FATAL, 2590 WLAN_LOG_INDICATOR_FIRMWARE, reason_code); 2591 return QDF_STATUS_E_FAILURE; 2592 } 2593 /* Asynchronous flush event for fatal event, 2594 * but, report in progress already 2595 */ 2596 wma_warn("Bug report already in progress - dropping! type:%d, indicator=%d reason_code=%d", 2597 WLAN_LOG_TYPE_FATAL, 2598 WLAN_LOG_INDICATOR_FIRMWARE, reason_code); 2599 return QDF_STATUS_E_FAILURE; 2600 } 2601 2602 #ifdef WLAN_CONV_SPECTRAL_ENABLE 2603 /** 2604 * wma_extract_single_phyerr_spectral() - extract single phy error from event 2605 * @handle: wma handle 2606 * @param evt_buf: pointer to event buffer 2607 * @param datalen: data length of event buffer 2608 * @param buf_offset: Pointer to hold value of current event buffer offset 2609 * post extraction 2610 * @param phyerr: Pointer to hold phyerr 2611 * 2612 * Return: QDF_STATUS 2613 */ 2614 static QDF_STATUS wma_extract_single_phyerr_spectral(void *handle, 2615 void *evt_buf, 2616 uint16_t datalen, uint16_t *buf_offset, 2617 wmi_host_phyerr_t *phyerr) 2618 { 2619 wmi_single_phyerr_rx_event *ev; 2620 int n = *buf_offset; 2621 2622 ev = (wmi_single_phyerr_rx_event *)((uint8_t *)evt_buf + n); 2623 2624 if (n < datalen) { 2625 /* ensure there's at least space for the header */ 2626 if ((datalen - n) < sizeof(ev->hdr)) { 2627 wma_err("not enough space? (datalen=%d, n=%d, hdr=%zu bytes", 2628 datalen, n, sizeof(ev->hdr)); 2629 return QDF_STATUS_E_FAILURE; 2630 } 2631 2632 phyerr->bufp = ev->bufp; 2633 phyerr->buf_len = ev->hdr.buf_len; 2634 2635 /* 2636 * Sanity check the buffer length of the event against 2637 * what we currently have. 2638 * 2639 * Since buf_len is 32 bits, we check if it overflows 2640 * a large 32 bit value. It's not 0x7fffffff because 2641 * we increase n by (buf_len + sizeof(hdr)), which would 2642 * in itself cause n to overflow. 2643 * 2644 * If "int" is 64 bits then this becomes a moot point. 2645 */ 2646 if (ev->hdr.buf_len > 0x7f000000) { 2647 wma_err("buf_len is garbage? (0x%x)", ev->hdr.buf_len); 2648 return QDF_STATUS_E_FAILURE; 2649 } 2650 if (n + ev->hdr.buf_len > datalen) { 2651 wma_err("buf_len exceeds available space n=%d, buf_len=%d, datalen=%d", 2652 n, ev->hdr.buf_len, datalen); 2653 return QDF_STATUS_E_FAILURE; 2654 } 2655 2656 phyerr->phy_err_code = WMI_UNIFIED_PHYERRCODE_GET(&ev->hdr); 2657 phyerr->tsf_timestamp = ev->hdr.tsf_timestamp; 2658 2659 #ifdef DEBUG_SPECTRAL_SCAN 2660 wma_debug("len=%d, tsf=0x%08x, rssi = 0x%x/0x%x/0x%x/0x%x, comb rssi = 0x%x, phycode=%d", 2661 ev->hdr.buf_len, 2662 ev->hdr.tsf_timestamp, 2663 ev->hdr.rssi_chain0, 2664 ev->hdr.rssi_chain1, 2665 ev->hdr.rssi_chain2, 2666 ev->hdr.rssi_chain3, 2667 WMI_UNIFIED_RSSI_COMB_GET(&ev->hdr), 2668 phyerr->phy_err_code); 2669 2670 /* 2671 * For now, unroll this loop - the chain 'value' field isn't 2672 * a variable but glued together into a macro field definition. 2673 * Grr. :-) 2674 */ 2675 wma_debug("chain 0: raw=0x%08x; pri20=%d sec20=%d sec40=%d sec80=%d", 2676 ev->hdr.rssi_chain0, 2677 WMI_UNIFIED_RSSI_CHAN_GET(&ev->hdr, 0, PRI20), 2678 WMI_UNIFIED_RSSI_CHAN_GET(&ev->hdr, 0, SEC20), 2679 WMI_UNIFIED_RSSI_CHAN_GET(&ev->hdr, 0, SEC40), 2680 WMI_UNIFIED_RSSI_CHAN_GET(&ev->hdr, 0, SEC80)); 2681 2682 wma_debug("chain 1: raw=0x%08x: pri20=%d sec20=%d sec40=%d sec80=%d", 2683 ev->hdr.rssi_chain1, 2684 WMI_UNIFIED_RSSI_CHAN_GET(&ev->hdr, 1, PRI20), 2685 WMI_UNIFIED_RSSI_CHAN_GET(&ev->hdr, 1, SEC20), 2686 WMI_UNIFIED_RSSI_CHAN_GET(&ev->hdr, 1, SEC40), 2687 WMI_UNIFIED_RSSI_CHAN_GET(&ev->hdr, 1, SEC80)); 2688 2689 wma_debug("chain 2: raw=0x%08x: pri20=%d sec20=%d sec40=%d sec80=%d", 2690 ev->hdr.rssi_chain2, 2691 WMI_UNIFIED_RSSI_CHAN_GET(&ev->hdr, 2, PRI20), 2692 WMI_UNIFIED_RSSI_CHAN_GET(&ev->hdr, 2, SEC20), 2693 WMI_UNIFIED_RSSI_CHAN_GET(&ev->hdr, 2, SEC40), 2694 WMI_UNIFIED_RSSI_CHAN_GET(&ev->hdr, 2, SEC80)); 2695 2696 wma_debug("chain 3: raw=0x%08x: pri20=%d sec20=%d sec40=%d sec80=%d", 2697 ev->hdr.rssi_chain3, 2698 WMI_UNIFIED_RSSI_CHAN_GET(&ev->hdr, 3, PRI20), 2699 WMI_UNIFIED_RSSI_CHAN_GET(&ev->hdr, 3, SEC20), 2700 WMI_UNIFIED_RSSI_CHAN_GET(&ev->hdr, 3, SEC40), 2701 WMI_UNIFIED_RSSI_CHAN_GET(&ev->hdr, 3, SEC80)); 2702 2703 2704 wma_debug("freq_info_1=0x%08x, freq_info_2=0x%08x", 2705 ev->hdr.freq_info_1, ev->hdr.freq_info_2); 2706 2707 /* 2708 * The NF chain values are signed and are negative - hence 2709 * the cast evilness. 2710 */ 2711 wma_debug("nfval[1]=0x%08x, nfval[2]=0x%08x, nf=%d/%d/%d/%d, freq1=%d, freq2=%d, cw=%d", 2712 ev->hdr.nf_list_1, 2713 ev->hdr.nf_list_2, 2714 (int) WMI_UNIFIED_NF_CHAIN_GET(&ev->hdr, 0), 2715 (int) WMI_UNIFIED_NF_CHAIN_GET(&ev->hdr, 1), 2716 (int) WMI_UNIFIED_NF_CHAIN_GET(&ev->hdr, 2), 2717 (int) WMI_UNIFIED_NF_CHAIN_GET(&ev->hdr, 3), 2718 WMI_UNIFIED_FREQ_INFO_GET(&ev->hdr, 1), 2719 WMI_UNIFIED_FREQ_INFO_GET(&ev->hdr, 2), 2720 WMI_UNIFIED_CHWIDTH_GET(&ev->hdr)); 2721 #endif 2722 2723 /* 2724 * If required, pass spectral events to the spectral module 2725 */ 2726 if (ev->hdr.buf_len > 0) { 2727 2728 /* Initialize the NF values to Zero. */ 2729 phyerr->rf_info.noise_floor[0] = 2730 WMI_UNIFIED_NF_CHAIN_GET(&ev->hdr, 0); 2731 phyerr->rf_info.noise_floor[1] = 2732 WMI_UNIFIED_NF_CHAIN_GET(&ev->hdr, 1); 2733 phyerr->rf_info.noise_floor[2] = 2734 WMI_UNIFIED_NF_CHAIN_GET(&ev->hdr, 2); 2735 phyerr->rf_info.noise_floor[3] = 2736 WMI_UNIFIED_NF_CHAIN_GET(&ev->hdr, 3); 2737 2738 /* populate the rf info */ 2739 phyerr->rf_info.rssi_comb = 2740 WMI_UNIFIED_RSSI_COMB_GET(&ev->hdr); 2741 2742 /* Need to unroll loop due to macro 2743 * constraints chain 0 2744 */ 2745 phyerr->rf_info.pc_rssi_info[0].rssi_pri20 = 2746 WMI_UNIFIED_RSSI_CHAN_GET(&ev->hdr, 0, PRI20); 2747 phyerr->rf_info.pc_rssi_info[0].rssi_sec20 = 2748 WMI_UNIFIED_RSSI_CHAN_GET(&ev->hdr, 0, SEC20); 2749 phyerr->rf_info.pc_rssi_info[0].rssi_sec40 = 2750 WMI_UNIFIED_RSSI_CHAN_GET(&ev->hdr, 0, SEC40); 2751 phyerr->rf_info.pc_rssi_info[0].rssi_sec80 = 2752 WMI_UNIFIED_RSSI_CHAN_GET(&ev->hdr, 0, SEC80); 2753 2754 /* chain 1 */ 2755 phyerr->rf_info.pc_rssi_info[1].rssi_pri20 = 2756 WMI_UNIFIED_RSSI_CHAN_GET(&ev->hdr, 1, PRI20); 2757 phyerr->rf_info.pc_rssi_info[1].rssi_sec20 = 2758 WMI_UNIFIED_RSSI_CHAN_GET(&ev->hdr, 1, SEC20); 2759 phyerr->rf_info.pc_rssi_info[1].rssi_sec40 = 2760 WMI_UNIFIED_RSSI_CHAN_GET(&ev->hdr, 1, SEC40); 2761 phyerr->rf_info.pc_rssi_info[1].rssi_sec80 = 2762 WMI_UNIFIED_RSSI_CHAN_GET(&ev->hdr, 1, SEC80); 2763 2764 /* chain 2 */ 2765 phyerr->rf_info.pc_rssi_info[2].rssi_pri20 = 2766 WMI_UNIFIED_RSSI_CHAN_GET(&ev->hdr, 2, PRI20); 2767 phyerr->rf_info.pc_rssi_info[2].rssi_sec20 = 2768 WMI_UNIFIED_RSSI_CHAN_GET(&ev->hdr, 2, SEC20); 2769 phyerr->rf_info.pc_rssi_info[2].rssi_sec40 = 2770 WMI_UNIFIED_RSSI_CHAN_GET(&ev->hdr, 2, SEC40); 2771 phyerr->rf_info.pc_rssi_info[2].rssi_sec80 = 2772 WMI_UNIFIED_RSSI_CHAN_GET(&ev->hdr, 2, SEC80); 2773 2774 /* chain 3 */ 2775 phyerr->rf_info.pc_rssi_info[3].rssi_pri20 = 2776 WMI_UNIFIED_RSSI_CHAN_GET(&ev->hdr, 3, PRI20); 2777 phyerr->rf_info.pc_rssi_info[3].rssi_sec20 = 2778 WMI_UNIFIED_RSSI_CHAN_GET(&ev->hdr, 3, SEC20); 2779 phyerr->rf_info.pc_rssi_info[3].rssi_sec40 = 2780 WMI_UNIFIED_RSSI_CHAN_GET(&ev->hdr, 3, SEC40); 2781 phyerr->rf_info.pc_rssi_info[3].rssi_sec80 = 2782 WMI_UNIFIED_RSSI_CHAN_GET(&ev->hdr, 3, SEC80); 2783 2784 phyerr->chan_info.center_freq1 = 2785 WMI_UNIFIED_FREQ_INFO_GET(&ev->hdr, 1); 2786 phyerr->chan_info.center_freq2 = 2787 WMI_UNIFIED_FREQ_INFO_GET(&ev->hdr, 2); 2788 2789 } 2790 2791 /* 2792 * Advance the buffer pointer to the next PHY error. 2793 * buflen is the length of this payload, so we need to 2794 * advance past the current header _AND_ the payload. 2795 */ 2796 n += sizeof(*ev) + ev->hdr.buf_len; 2797 } 2798 *buf_offset += n; 2799 2800 return QDF_STATUS_SUCCESS; 2801 } 2802 2803 /** 2804 * spectral_phyerr_event_handler() - spectral phyerr event handler 2805 * @handle: wma handle 2806 * @data: data buffer 2807 * @datalen: buffer length 2808 * 2809 * Return: QDF_STATUS 2810 */ 2811 static QDF_STATUS spectral_phyerr_event_handler(void *handle, 2812 uint8_t *data, 2813 uint32_t datalen) 2814 { 2815 tp_wma_handle wma = (tp_wma_handle) handle; 2816 QDF_STATUS status = QDF_STATUS_SUCCESS; 2817 uint16_t buf_offset, event_buf_len = 0; 2818 wmi_single_phyerr_rx_event *ev; 2819 wmi_host_phyerr_t phyerr; 2820 struct target_if_spectral_rfqual_info rfqual_info; 2821 struct target_if_spectral_chan_info chan_info; 2822 struct target_if_spectral_acs_stats acs_stats; 2823 2824 if (wma_validate_handle(wma)) 2825 return QDF_STATUS_E_FAILURE; 2826 2827 memset(&phyerr, 0, sizeof(wmi_host_phyerr_t)); 2828 status = wmi_extract_comb_phyerr(wma->wmi_handle, data, datalen, 2829 &buf_offset, &phyerr); 2830 if (QDF_IS_STATUS_ERROR(status)) { 2831 wma_err("extract comb phyerr failed"); 2832 return QDF_STATUS_E_FAILURE; 2833 } 2834 2835 ev = (wmi_single_phyerr_rx_event *)phyerr.bufp; 2836 event_buf_len = phyerr.buf_len; 2837 /* Loop over the bufp, extracting out phyerrors */ 2838 buf_offset = 0; 2839 while (buf_offset < event_buf_len) { 2840 if (wma_extract_single_phyerr_spectral(handle, ev, 2841 event_buf_len, &buf_offset, &phyerr)) { 2842 wma_err("extract single phy err failed"); 2843 return QDF_STATUS_E_FAILURE; 2844 } 2845 2846 if (phyerr.buf_len > 0) { 2847 if (sizeof(phyerr.rf_info) > sizeof(rfqual_info)) 2848 qdf_mem_copy(&rfqual_info, &phyerr.rf_info, 2849 sizeof(rfqual_info)); 2850 else 2851 qdf_mem_copy(&rfqual_info, &phyerr.rf_info, 2852 sizeof(phyerr.rf_info)); 2853 2854 if (sizeof(phyerr.chan_info) > sizeof(chan_info)) 2855 qdf_mem_copy(&chan_info, &phyerr.chan_info, 2856 sizeof(chan_info)); 2857 else 2858 qdf_mem_copy(&chan_info, &phyerr.chan_info, 2859 sizeof(phyerr.chan_info)); 2860 2861 target_if_spectral_process_phyerr(wma->pdev, phyerr.bufp, 2862 phyerr.buf_len, 2863 &rfqual_info, 2864 &chan_info, 2865 phyerr.tsf64, 2866 &acs_stats); 2867 } 2868 } 2869 2870 return status; 2871 } 2872 #else 2873 static QDF_STATUS 2874 wma_extract_single_phyerr_spectral(void *handle, void *evt_buf, 2875 uint16_t datalen, 2876 uint16_t *buf_offset, 2877 wmi_host_phyerr_t *phyerr) 2878 { 2879 return QDF_STATUS_SUCCESS; 2880 } 2881 2882 static QDF_STATUS spectral_phyerr_event_handler(void *handle, 2883 uint8_t *data, uint32_t datalen) 2884 { 2885 return QDF_STATUS_SUCCESS; 2886 } 2887 #endif 2888 2889 /** 2890 * dfs_phyerr_event_handler() - dfs phyerr event handler 2891 * @handle: wma handle 2892 * @data: data buffer 2893 * @datalen: buffer length 2894 * @fulltsf: 64 bit event TSF 2895 * 2896 * Function to process DFS phy errors. 2897 * 2898 * Return: QDF_STATUS 2899 */ 2900 static QDF_STATUS dfs_phyerr_event_handler(tp_wma_handle handle, 2901 uint8_t *data, 2902 uint32_t datalen, 2903 uint64_t fulltsf) 2904 { 2905 QDF_STATUS status = QDF_STATUS_SUCCESS; 2906 struct wlan_lmac_if_dfs_rx_ops *dfs_rx_ops; 2907 wmi_host_phyerr_t phyerr; 2908 int8_t rssi_comb; 2909 uint16_t buf_offset; 2910 2911 if (!handle->psoc) { 2912 wma_err("psoc is null"); 2913 return QDF_STATUS_E_INVAL; 2914 } 2915 2916 dfs_rx_ops = wlan_lmac_if_get_dfs_rx_ops(handle->psoc); 2917 if (!dfs_rx_ops) { 2918 wma_err("dfs_rx_ops is null"); 2919 return QDF_STATUS_E_INVAL; 2920 } 2921 2922 if (!dfs_rx_ops->dfs_process_phyerr) { 2923 wma_err("dfs_process_phyerr handler is null"); 2924 return QDF_STATUS_E_INVAL; 2925 } 2926 2927 if (!handle->pdev) { 2928 wma_err("pdev is null"); 2929 return -EINVAL; 2930 } 2931 2932 buf_offset = 0; 2933 while (buf_offset < datalen) { 2934 status = wmi_extract_single_phyerr(handle->wmi_handle, data, datalen, 2935 &buf_offset, &phyerr); 2936 if (QDF_IS_STATUS_ERROR(status)) { 2937 /* wmi_extract_single_phyerr has logs */ 2938 return status; 2939 } 2940 2941 rssi_comb = phyerr.rf_info.rssi_comb & 0xFF; 2942 if (phyerr.buf_len > 0) 2943 dfs_rx_ops->dfs_process_phyerr(handle->pdev, 2944 &phyerr.bufp[0], 2945 phyerr.buf_len, 2946 rssi_comb, 2947 rssi_comb, 2948 phyerr.tsf_timestamp, 2949 fulltsf); 2950 } 2951 2952 return QDF_STATUS_SUCCESS; 2953 } 2954 2955 /** 2956 * wma_unified_phyerr_rx_event_handler() - phyerr event handler 2957 * @handle: wma handle 2958 * @data: data buffer 2959 * @datalen: buffer length 2960 * 2961 * WMI Handler for WMI_PHYERR_EVENTID event from firmware. 2962 * This handler is currently handling DFS and spectral scan 2963 * phy errors. 2964 * 2965 * Return: 0 for success, other value for failure 2966 */ 2967 static int wma_unified_phyerr_rx_event_handler(void *handle, 2968 uint8_t *data, 2969 uint32_t datalen) 2970 { 2971 /* phyerr handling is moved to cmn project 2972 * As WIN still uses handler registration in non-cmn code. 2973 * need complete testing of non offloaded DFS code before we enable 2974 * it in cmn code. 2975 **/ 2976 tp_wma_handle wma = (tp_wma_handle) handle; 2977 QDF_STATUS status = QDF_STATUS_SUCCESS; 2978 wmi_host_phyerr_t phyerr; 2979 uint16_t buf_offset = 0; 2980 wmi_single_phyerr_rx_event *ev; 2981 uint16_t event_buf_len = 0; 2982 wmi_host_phyerr_t phyerr2; 2983 bool spectralscan = false; 2984 2985 if (wma_validate_handle(wma)) 2986 return -EINVAL; 2987 2988 /* sanity check on data length */ 2989 status = wmi_extract_comb_phyerr(wma->wmi_handle, data, datalen, 2990 &buf_offset, &phyerr); 2991 if (QDF_IS_STATUS_ERROR(status)) { 2992 wma_err("extract phyerr failed: %d", status); 2993 return qdf_status_to_os_return(status); 2994 } 2995 ev = (wmi_single_phyerr_rx_event *)phyerr.bufp; 2996 event_buf_len = phyerr.buf_len; 2997 /* Loop over the bufp, extracting out phyerrors */ 2998 buf_offset = 0; 2999 while (ev && (buf_offset < event_buf_len)) { 3000 if (wma_extract_single_phyerr_spectral(handle, ev, 3001 event_buf_len, 3002 &buf_offset, 3003 &phyerr2)) { 3004 wma_err("extract single phy err failed"); 3005 return qdf_status_to_os_return(QDF_STATUS_E_FAILURE); 3006 } 3007 if ((buf_offset != 0) && (phyerr2.phy_err_code == 0x26 || 3008 phyerr2.phy_err_code == 0x24)) { 3009 spectralscan = true; 3010 } else { 3011 break; 3012 } 3013 } 3014 if (spectralscan) { 3015 status = spectral_phyerr_event_handler(wma, data, datalen); 3016 return qdf_status_to_os_return(status); 3017 } 3018 /* handle different PHY Error conditions */ 3019 if (((phyerr.phy_err_mask0 & (WMI_PHY_ERROR_MASK0_RADAR | 3020 WMI_PHY_ERROR_MASK0_FALSE_RADAR_EXT | 3021 WMI_PHY_ERROR_MASK0_SPECTRAL_SCAN)) == 0)) { 3022 wma_debug("Unknown phy error event"); 3023 return -EINVAL; 3024 } 3025 3026 /* Handle Spectral or DFS PHY Error */ 3027 if (phyerr.phy_err_mask0 & (WMI_PHY_ERROR_MASK0_RADAR | 3028 WMI_PHY_ERROR_MASK0_FALSE_RADAR_EXT)) { 3029 if (wma->is_dfs_offloaded) { 3030 wma_debug("Unexpected phy error, dfs offloaded"); 3031 return -EINVAL; 3032 } 3033 status = dfs_phyerr_event_handler(wma, 3034 phyerr.bufp, 3035 phyerr.buf_len, 3036 phyerr.tsf64); 3037 } else if (phyerr.phy_err_mask0 & (WMI_PHY_ERROR_MASK0_SPECTRAL_SCAN | 3038 WMI_PHY_ERROR_MASK0_FALSE_RADAR_EXT)) { 3039 status = spectral_phyerr_event_handler(wma, data, datalen); 3040 } 3041 3042 return qdf_status_to_os_return(status); 3043 } 3044 3045 void wma_vdev_init(struct wma_txrx_node *vdev) 3046 { 3047 vdev->is_waiting_for_key = false; 3048 } 3049 3050 void wma_vdev_deinit(struct wma_txrx_node *vdev) 3051 { 3052 struct beacon_info *bcn; 3053 tp_wma_handle wma_handle; 3054 3055 wma_handle = cds_get_context(QDF_MODULE_ID_WMA); 3056 if (!wma_handle) 3057 return; 3058 3059 bcn = vdev->beacon; 3060 if (bcn) { 3061 if (bcn->dma_mapped) 3062 qdf_nbuf_unmap_single(wma_handle->qdf_dev, 3063 bcn->buf, QDF_DMA_TO_DEVICE); 3064 qdf_nbuf_free(bcn->buf); 3065 qdf_mem_free(bcn); 3066 vdev->beacon = NULL; 3067 } 3068 3069 if (vdev->vdev_active == true) 3070 vdev->vdev_active = false; 3071 3072 if (vdev->addBssStaContext) { 3073 qdf_mem_free(vdev->addBssStaContext); 3074 vdev->addBssStaContext = NULL; 3075 } 3076 3077 if (vdev->psnr_req) { 3078 qdf_mem_free(vdev->psnr_req); 3079 vdev->psnr_req = NULL; 3080 } 3081 3082 if (vdev->rcpi_req) { 3083 qdf_mem_free(vdev->rcpi_req); 3084 vdev->rcpi_req = NULL; 3085 } 3086 3087 if (vdev->roam_scan_stats_req) { 3088 struct sir_roam_scan_stats *req; 3089 3090 req = vdev->roam_scan_stats_req; 3091 vdev->roam_scan_stats_req = NULL; 3092 qdf_mem_free(req); 3093 } 3094 3095 if (vdev->roam_synch_frame_ind.bcn_probe_rsp) { 3096 qdf_mem_free(vdev->roam_synch_frame_ind.bcn_probe_rsp); 3097 vdev->roam_synch_frame_ind.bcn_probe_rsp = NULL; 3098 } 3099 3100 if (vdev->roam_synch_frame_ind.reassoc_req) { 3101 qdf_mem_free(vdev->roam_synch_frame_ind.reassoc_req); 3102 vdev->roam_synch_frame_ind.reassoc_req = NULL; 3103 } 3104 3105 if (vdev->roam_synch_frame_ind.reassoc_rsp) { 3106 qdf_mem_free(vdev->roam_synch_frame_ind.reassoc_rsp); 3107 vdev->roam_synch_frame_ind.reassoc_rsp = NULL; 3108 } 3109 3110 if (vdev->plink_status_req) { 3111 qdf_mem_free(vdev->plink_status_req); 3112 vdev->plink_status_req = NULL; 3113 } 3114 3115 vdev->is_waiting_for_key = false; 3116 } 3117 3118 /** 3119 * wma_wmi_stop() - generic function to block WMI commands 3120 * @return: None 3121 */ 3122 void wma_wmi_stop(void) 3123 { 3124 tp_wma_handle wma_handle; 3125 3126 wma_handle = cds_get_context(QDF_MODULE_ID_WMA); 3127 if (!wma_handle) 3128 return; 3129 3130 if (wmi_validate_handle(wma_handle->wmi_handle)) 3131 return; 3132 3133 wmi_stop(wma_handle->wmi_handle); 3134 } 3135 3136 #ifdef WLAN_WMI_BCN 3137 static QDF_STATUS 3138 wma_register_swba_events(wmi_unified_t wmi_handle) 3139 { 3140 QDF_STATUS status; 3141 3142 status = wmi_unified_register_event_handler(wmi_handle, 3143 wmi_host_swba_event_id, 3144 wma_beacon_swba_handler, 3145 WMA_RX_SERIALIZER_CTX); 3146 3147 return status; 3148 } 3149 #else 3150 static QDF_STATUS wma_register_swba_events(wmi_unified_t wmi_handle) 3151 { 3152 return QDF_STATUS_SUCCESS; 3153 } 3154 #endif 3155 3156 #ifdef FEATURE_WLAN_APF 3157 static void wma_register_apf_events(tp_wma_handle wma_handle) 3158 { 3159 if (wma_validate_handle(wma_handle)) 3160 return; 3161 3162 wmi_unified_register_event_handler(wma_handle->wmi_handle, 3163 wmi_apf_capability_info_event_id, 3164 wma_get_apf_caps_event_handler, 3165 WMA_RX_SERIALIZER_CTX); 3166 wmi_unified_register_event_handler(wma_handle->wmi_handle, 3167 wmi_apf_get_vdev_work_memory_resp_event_id, 3168 wma_apf_read_work_memory_event_handler, 3169 WMA_RX_SERIALIZER_CTX); 3170 } 3171 #else /* FEATURE_WLAN_APF */ 3172 static void wma_register_apf_events(tp_wma_handle wma_handle) 3173 { 3174 } 3175 #endif /* FEATURE_WLAN_APF */ 3176 3177 #ifdef WLAN_FEATURE_MOTION_DETECTION 3178 /** 3179 * wma_register_md_events - Register motion detection event handlers 3180 * @wma_handle: wma handle 3181 * Return: None 3182 */ 3183 static void wma_register_md_events(tp_wma_handle wma_handle) 3184 { 3185 if (wma_validate_handle(wma_handle)) 3186 return; 3187 3188 wmi_unified_register_event_handler(wma_handle->wmi_handle, 3189 wmi_motion_det_host_eventid, 3190 wma_motion_det_host_event_handler, 3191 WMA_RX_SERIALIZER_CTX); 3192 3193 wmi_unified_register_event_handler( 3194 wma_handle->wmi_handle, 3195 wmi_motion_det_base_line_host_eventid, 3196 wma_motion_det_base_line_host_event_handler, 3197 WMA_RX_SERIALIZER_CTX); 3198 } 3199 #else /* WLAN_FEATURE_MOTION_DETECTION */ 3200 /** 3201 * wma_register_md_events - Register motion detection event handlers 3202 * @wma_handle: wma handle 3203 * Return: None 3204 */ 3205 static void wma_register_md_events(tp_wma_handle wma_handle) 3206 { 3207 } 3208 #endif /* WLAN_FEATURE_MOTION_DETECTION */ 3209 3210 #ifdef FEATURE_WLM_STATS 3211 static void wma_register_wlm_stats_events(tp_wma_handle wma_handle) 3212 { 3213 wmi_unified_register_event_handler(wma_handle->wmi_handle, 3214 wmi_wlm_stats_event_id, 3215 wma_wlm_stats_rsp, 3216 WMA_RX_SERIALIZER_CTX); 3217 } 3218 #else /* FEATURE_WLM_STATS */ 3219 static void wma_register_wlm_stats_events(tp_wma_handle wma_handle) 3220 { 3221 } 3222 #endif /* FEATURE_WLM_STATS */ 3223 3224 #ifdef MULTI_CLIENT_LL_SUPPORT 3225 static void wma_register_wlm_latency_level_event(tp_wma_handle wma_handle) 3226 { 3227 wmi_unified_register_event_handler(wma_handle->wmi_handle, 3228 wmi_vdev_latency_event_id, 3229 wma_latency_level_event_handler, 3230 WMA_RX_WORK_CTX); 3231 } 3232 #else 3233 static void wma_register_wlm_latency_level_event(tp_wma_handle wma_handle) 3234 { 3235 } 3236 #endif 3237 3238 struct wlan_objmgr_psoc *wma_get_psoc_from_scn_handle(void *scn_handle) 3239 { 3240 tp_wma_handle wma_handle; 3241 3242 if (!scn_handle) { 3243 wma_err("invalid scn handle"); 3244 return NULL; 3245 } 3246 wma_handle = (tp_wma_handle)scn_handle; 3247 3248 return wma_handle->psoc; 3249 } 3250 3251 void wma_get_fw_phy_mode_for_freq_cb(uint32_t freq, uint32_t chan_width, 3252 uint32_t *phy_mode) 3253 { 3254 uint32_t dot11_mode; 3255 enum wlan_phymode host_phy_mode; 3256 struct mac_context *mac = cds_get_context(QDF_MODULE_ID_PE); 3257 3258 if (!mac) { 3259 wma_err("MAC context is NULL"); 3260 *phy_mode = WLAN_PHYMODE_AUTO; 3261 return; 3262 } 3263 3264 dot11_mode = mac->mlme_cfg->dot11_mode.dot11_mode; 3265 3266 /* Update invalid dot11 modes to valid dot11 modes */ 3267 if (WLAN_REG_IS_24GHZ_CH_FREQ(freq) && 3268 dot11_mode == MLME_DOT11_MODE_11A) 3269 dot11_mode = MLME_DOT11_MODE_11G; 3270 3271 if (WLAN_REG_IS_5GHZ_CH_FREQ(freq) && 3272 (dot11_mode == MLME_DOT11_MODE_11B || 3273 dot11_mode == MLME_DOT11_MODE_11G || 3274 dot11_mode == MLME_DOT11_MODE_11G_ONLY)) 3275 dot11_mode = MLME_DOT11_MODE_11A; 3276 3277 host_phy_mode = wma_chan_phy_mode(freq, chan_width, dot11_mode); 3278 *phy_mode = wmi_host_to_fw_phymode(host_phy_mode); 3279 } 3280 3281 void wma_get_phy_mode_cb(qdf_freq_t freq, uint32_t chan_width, 3282 enum wlan_phymode *phy_mode) 3283 { 3284 uint32_t dot11_mode; 3285 struct mac_context *mac = cds_get_context(QDF_MODULE_ID_PE); 3286 3287 if (!mac) { 3288 wma_err("MAC context is NULL"); 3289 *phy_mode = WLAN_PHYMODE_AUTO; 3290 return; 3291 } 3292 3293 dot11_mode = mac->mlme_cfg->dot11_mode.dot11_mode; 3294 *phy_mode = wma_chan_phy_mode(freq, chan_width, dot11_mode); 3295 } 3296 3297 #ifdef WLAN_FEATURE_NAN 3298 static void 3299 wma_register_nan_callbacks(tp_wma_handle wma_handle) 3300 { 3301 struct nan_callbacks cb_obj = {0}; 3302 3303 cb_obj.update_ndi_conn = wma_ndi_update_connection_info; 3304 3305 ucfg_nan_register_wma_callbacks(wma_handle->psoc, &cb_obj); 3306 } 3307 #else 3308 static void wma_register_nan_callbacks(tp_wma_handle wma_handle) 3309 { 3310 } 3311 #endif 3312 3313 #ifdef WLAN_FEATURE_PKT_CAPTURE 3314 static void 3315 wma_register_pkt_capture_callbacks(tp_wma_handle wma_handle) 3316 { 3317 struct pkt_capture_callbacks cb_obj = {0}; 3318 3319 cb_obj.get_rmf_status = wma_get_rmf_status; 3320 3321 ucfg_pkt_capture_register_wma_callbacks(wma_handle->psoc, &cb_obj); 3322 } 3323 #else 3324 static inline void 3325 wma_register_pkt_capture_callbacks(tp_wma_handle wma_handle) 3326 { 3327 } 3328 #endif 3329 3330 #ifdef TRACE_RECORD 3331 static void wma_trace_dump(void *mac_ctx, tp_qdf_trace_record record, 3332 uint16_t rec_index) 3333 { 3334 /* 3335 * This is dummy handler registered to qdf_trace as wma module wants to 3336 * insert trace records in qdf trace global record table but qdf_trace 3337 * does not allow to insert the trace records in the global record 3338 * table if a module is not registered with the qdf trace. 3339 */ 3340 } 3341 3342 static void wma_trace_init(void) 3343 { 3344 qdf_trace_register(QDF_MODULE_ID_WMA, &wma_trace_dump); 3345 } 3346 #else 3347 static inline void wma_trace_init(void) 3348 { 3349 } 3350 #endif 3351 3352 #ifdef FEATURE_CLUB_LL_STATS_AND_GET_STATION 3353 static void wma_get_service_cap_club_get_sta_in_ll_stats_req( 3354 struct wmi_unified *wmi_handle, 3355 struct wma_tgt_services *cfg) 3356 { 3357 cfg->is_get_station_clubbed_in_ll_stats_req = 3358 wmi_service_enabled(wmi_handle, 3359 wmi_service_get_station_in_ll_stats_req); 3360 } 3361 #else 3362 static void wma_get_service_cap_club_get_sta_in_ll_stats_req( 3363 struct wmi_unified *wmi_handle, 3364 struct wma_tgt_services *cfg) 3365 { 3366 } 3367 #endif /* FEATURE_CLUB_LL_STATS_AND_GET_STATION */ 3368 3369 #ifdef WLAN_FEATURE_11BE_MLO 3370 static void 3371 wma_update_num_tdls_vdevs_if_11be_mlo(struct wlan_objmgr_psoc *psoc, 3372 target_resource_config *wlan_res_cfg) 3373 { 3374 if (!wlan_tdls_is_fw_11be_mlo_capable(psoc)) 3375 return; 3376 3377 wlan_res_cfg->num_tdls_vdevs = WLAN_UMAC_MLO_MAX_VDEVS; 3378 wma_debug("update tdls num vdevs %d", wlan_res_cfg->num_tdls_vdevs); 3379 } 3380 3381 static void 3382 wma_get_service_cap_per_link_mlo_stats(struct wmi_unified *wmi_handle, 3383 struct wma_tgt_services *cfg) 3384 { 3385 cfg->is_mlo_per_link_stats_supported = 3386 wmi_service_enabled(wmi_handle, 3387 wmi_service_per_link_stats_support); 3388 wma_debug("mlo_per_link stats is %s supported by FW", 3389 cfg->is_mlo_per_link_stats_supported ? "" : "NOT"); 3390 } 3391 #else 3392 static void 3393 wma_update_num_tdls_vdevs_if_11be_mlo(struct wlan_objmgr_psoc *psoc, 3394 target_resource_config *wlan_res_cfg) 3395 { 3396 } 3397 3398 static void 3399 wma_get_service_cap_per_link_mlo_stats(struct wmi_unified *wmi_handle, 3400 struct wma_tgt_services *cfg) 3401 { 3402 } 3403 #endif 3404 3405 /** 3406 * wma_set_exclude_selftx_from_cca_busy_time() - Set exclude self tx time from 3407 * cca busy time bool 3408 * @exclude_selftx_from_cca_busy: Bool to update in in wma ini config 3409 * @wma_handle: WMA handle 3410 * 3411 * Return: None 3412 */ 3413 static void 3414 wma_set_exclude_selftx_from_cca_busy_time(bool exclude_selftx_from_cca_busy, 3415 tp_wma_handle wma_handle) 3416 { 3417 struct wma_ini_config *cfg = wma_get_ini_handle(wma_handle); 3418 3419 if (!cfg) { 3420 wma_err("NULL WMA ini handle"); 3421 return; 3422 } 3423 3424 cfg->exclude_selftx_from_cca_busy = exclude_selftx_from_cca_busy; 3425 } 3426 3427 /** 3428 * wma_open() - Allocate wma context and initialize it. 3429 * @cds_context: cds context 3430 * @wma_tgt_cfg_cb: tgt config callback fun 3431 * @radar_ind_cb: dfs radar indication callback 3432 * @cds_cfg: mac parameters 3433 * 3434 * Return: 0 on success, errno on failure 3435 */ 3436 QDF_STATUS wma_open(struct wlan_objmgr_psoc *psoc, 3437 wma_tgt_cfg_cb tgt_cfg_cb, 3438 struct cds_config_info *cds_cfg, 3439 uint32_t target_type) 3440 { 3441 tp_wma_handle wma_handle; 3442 HTC_HANDLE htc_handle; 3443 qdf_device_t qdf_dev; 3444 void *wmi_handle; 3445 QDF_STATUS qdf_status; 3446 struct wmi_unified_attach_params *params; 3447 struct policy_mgr_wma_cbacks wma_cbacks; 3448 struct target_psoc_info *tgt_psoc_info; 3449 int i; 3450 bool val = 0; 3451 void *cds_context; 3452 target_resource_config *wlan_res_cfg; 3453 uint32_t self_gen_frm_pwr = 0; 3454 uint32_t device_mode = cds_get_conparam(); 3455 3456 wma_debug("Enter"); 3457 3458 cds_context = cds_get_global_context(); 3459 if (!cds_context) { 3460 wma_err("Invalid CDS context"); 3461 return QDF_STATUS_E_INVAL; 3462 } 3463 3464 g_wmi_version_info.major = __WMI_VER_MAJOR_; 3465 g_wmi_version_info.minor = __WMI_VER_MINOR_; 3466 g_wmi_version_info.revision = __WMI_REVISION_; 3467 3468 qdf_dev = cds_get_context(QDF_MODULE_ID_QDF_DEVICE); 3469 htc_handle = cds_get_context(QDF_MODULE_ID_HTC); 3470 3471 if (!htc_handle) { 3472 wma_err("Invalid HTC handle"); 3473 return QDF_STATUS_E_INVAL; 3474 } 3475 3476 /* Alloc memory for WMA Context */ 3477 qdf_status = cds_alloc_context(QDF_MODULE_ID_WMA, 3478 (void **)&wma_handle, 3479 sizeof(*wma_handle)); 3480 3481 if (qdf_status != QDF_STATUS_SUCCESS) { 3482 wma_err("Memory allocation failed for wma_handle"); 3483 return qdf_status; 3484 } 3485 3486 qdf_mem_zero(wma_handle, sizeof(t_wma_handle)); 3487 3488 if (target_if_alloc_psoc_tgt_info(psoc)) { 3489 wma_err("target psoc info allocation failed"); 3490 qdf_status = QDF_STATUS_E_NOMEM; 3491 goto err_free_wma_handle; 3492 } 3493 3494 if (device_mode != QDF_GLOBAL_FTM_MODE) { 3495 #ifdef FEATURE_WLAN_EXTSCAN 3496 qdf_wake_lock_create(&wma_handle->extscan_wake_lock, 3497 "wlan_extscan_wl"); 3498 #endif /* FEATURE_WLAN_EXTSCAN */ 3499 qdf_wake_lock_create(&wma_handle->wow_wake_lock, 3500 "wlan_wow_wl"); 3501 qdf_wake_lock_create(&wma_handle->wow_auth_req_wl, 3502 "wlan_auth_req_wl"); 3503 qdf_wake_lock_create(&wma_handle->wow_assoc_req_wl, 3504 "wlan_assoc_req_wl"); 3505 qdf_wake_lock_create(&wma_handle->wow_deauth_rec_wl, 3506 "wlan_deauth_rec_wl"); 3507 qdf_wake_lock_create(&wma_handle->wow_disassoc_rec_wl, 3508 "wlan_disassoc_rec_wl"); 3509 qdf_wake_lock_create(&wma_handle->wow_ap_assoc_lost_wl, 3510 "wlan_ap_assoc_lost_wl"); 3511 qdf_wake_lock_create(&wma_handle->wow_auto_shutdown_wl, 3512 "wlan_auto_shutdown_wl"); 3513 qdf_wake_lock_create(&wma_handle->roam_ho_wl, 3514 "wlan_roam_ho_wl"); 3515 qdf_wake_lock_create(&wma_handle->roam_preauth_wl, 3516 "wlan_roam_preauth_wl"); 3517 qdf_wake_lock_create(&wma_handle->probe_req_wps_wl, 3518 "wlan_probe_req_wps_wl"); 3519 qdf_wake_lock_create(&wma_handle->sap_d3_wow_wake_lock, 3520 "wlan_sap_d3_wow_wake_lock"); 3521 qdf_wake_lock_create(&wma_handle->go_d3_wow_wake_lock, 3522 "wlan_go_d3_wow_wake_lock"); 3523 } 3524 3525 qdf_status = wlan_objmgr_psoc_try_get_ref(psoc, WLAN_LEGACY_WMA_ID); 3526 if (QDF_IS_STATUS_ERROR(qdf_status)) { 3527 wma_err("PSOC get_ref fails"); 3528 goto err_get_psoc_ref; 3529 } 3530 wma_handle->psoc = psoc; 3531 3532 if (wlan_pmo_enable_ssr_on_page_fault(psoc)) { 3533 wma_handle->pagefault_wakeups_ts = 3534 qdf_mem_malloc( 3535 wlan_pmo_get_max_pagefault_wakeups_for_ssr(psoc) * 3536 sizeof(qdf_time_t)); 3537 if (!wma_handle->pagefault_wakeups_ts) 3538 goto err_wma_handle; 3539 } 3540 3541 wma_target_if_open(wma_handle); 3542 3543 /* 3544 * Allocate locally used params with its rx_ops member, 3545 * and free it immediately after used. 3546 */ 3547 params = qdf_mem_malloc(sizeof(*params)); 3548 if (!params) { 3549 qdf_status = QDF_STATUS_E_NOMEM; 3550 goto err_wma_handle; 3551 } 3552 3553 params->osdev = NULL; 3554 params->target_type = WMI_TLV_TARGET; 3555 params->use_cookie = false; 3556 params->psoc = psoc; 3557 params->max_commands = WMI_MAX_CMDS; 3558 3559 /* initialize tlv attach */ 3560 wmi_tlv_init(); 3561 3562 /* attach the wmi */ 3563 wmi_handle = wmi_unified_attach(wma_handle, params); 3564 qdf_mem_free(params); 3565 if (!wmi_handle) { 3566 wma_err("failed to attach WMI"); 3567 qdf_status = QDF_STATUS_E_NOMEM; 3568 goto err_wma_handle; 3569 } 3570 3571 target_if_register_legacy_service_ready_cb( 3572 wma_legacy_service_ready_event_handler); 3573 3574 wma_nofl_alert("WMA --> wmi_unified_attach - success"); 3575 3576 /* store the wmi handle in tgt_if_handle */ 3577 tgt_psoc_info = wlan_psoc_get_tgt_if_handle(psoc); 3578 3579 target_psoc_set_target_type(tgt_psoc_info, target_type); 3580 target_psoc_set_device_mode(tgt_psoc_info, device_mode); 3581 /* Save the WMI & HTC handle */ 3582 target_psoc_set_wmi_hdl(tgt_psoc_info, wmi_handle); 3583 wma_handle->wmi_handle = wmi_handle; 3584 target_psoc_set_htc_hdl(tgt_psoc_info, htc_handle); 3585 wma_handle->cds_context = cds_context; 3586 wma_handle->qdf_dev = qdf_dev; 3587 wma_handle->enable_tx_compl_tsf64 = 3588 cds_cfg->enable_tx_compl_tsf64; 3589 3590 /* Register Converged Event handlers */ 3591 init_deinit_register_tgt_psoc_ev_handlers(psoc); 3592 3593 /* Register LFR2/3 common Roam Event handler */ 3594 target_if_roam_register_common_events(psoc); 3595 3596 /* Register Roam offload Event handlers */ 3597 target_if_roam_offload_register_events(psoc); 3598 3599 /* Initialize max_no_of_peers for wma_get_number_of_peers_supported() */ 3600 cds_cfg->max_station = wma_init_max_no_of_peers(wma_handle, 3601 cds_cfg->max_station); 3602 3603 wlan_mlme_set_assoc_sta_limit(psoc, cds_cfg->max_station); 3604 3605 wlan_mlme_register_common_events(psoc); 3606 3607 /* initialize default target config */ 3608 wlan_res_cfg = target_psoc_get_wlan_res_cfg(tgt_psoc_info); 3609 if (!wlan_res_cfg) { 3610 wma_err("wlan_res_cfg is null"); 3611 qdf_status = QDF_STATUS_E_NOMEM; 3612 goto err_wma_handle; 3613 } 3614 3615 wma_set_default_tgt_config(wma_handle, wlan_res_cfg, cds_cfg); 3616 wma_update_num_tdls_vdevs_if_11be_mlo(psoc, wlan_res_cfg); 3617 3618 qdf_status = wlan_mlme_get_tx_chainmask_cck(psoc, &val); 3619 if (qdf_status != QDF_STATUS_SUCCESS) { 3620 wma_err("Failed to get tx_chainmask_cck"); 3621 qdf_status = QDF_STATUS_E_FAILURE; 3622 goto err_wma_handle; 3623 } 3624 wma_handle->tx_chain_mask_cck = val; 3625 3626 qdf_status = wlan_mlme_get_self_gen_frm_pwr(psoc, &self_gen_frm_pwr); 3627 if (qdf_status != QDF_STATUS_SUCCESS) 3628 wma_err("Failed to get self_gen_frm_pwr"); 3629 wma_handle->self_gen_frm_pwr = self_gen_frm_pwr; 3630 3631 cds_cfg->max_bssid = WLAN_MAX_VDEVS; 3632 3633 wma_handle->max_station = cds_cfg->max_station; 3634 wma_handle->max_bssid = cds_cfg->max_bssid; 3635 wma_handle->enable_mc_list = 3636 ucfg_pmo_is_mc_addr_list_enabled(wma_handle->psoc); 3637 wma_handle->active_uc_apf_mode = 3638 ucfg_pmo_get_active_uc_apf_mode(wma_handle->psoc); 3639 wma_handle->active_mc_bc_apf_mode = 3640 ucfg_pmo_get_active_mc_bc_apf_mode(wma_handle->psoc); 3641 wma_handle->link_stats_results = NULL; 3642 #ifdef WLAN_FEATURE_LPSS 3643 wma_handle->is_lpass_enabled = cds_cfg->is_lpass_enabled; 3644 #endif 3645 wma_handle->interfaces = qdf_mem_malloc(sizeof(struct wma_txrx_node) * 3646 wma_handle->max_bssid); 3647 if (!wma_handle->interfaces) { 3648 qdf_status = QDF_STATUS_E_NOMEM; 3649 goto err_scn_context; 3650 } 3651 3652 for (i = 0; i < wma_handle->max_bssid; ++i) 3653 wma_vdev_init(&wma_handle->interfaces[i]); 3654 3655 /* Register the debug print event handler */ 3656 wmi_unified_register_event_handler(wma_handle->wmi_handle, 3657 wmi_debug_print_event_id, 3658 wma_unified_debug_print_event_handler, 3659 WMA_RX_SERIALIZER_CTX); 3660 /* Register profiling event Handler */ 3661 wmi_unified_register_event_handler(wma_handle->wmi_handle, 3662 wmi_wlan_profile_data_event_id, 3663 wma_profile_data_report_event_handler, 3664 WMA_RX_SERIALIZER_CTX); 3665 3666 wma_handle->tgt_cfg_update_cb = tgt_cfg_cb; 3667 wma_handle->old_hw_mode_index = WMA_DEFAULT_HW_MODE_INDEX; 3668 wma_handle->new_hw_mode_index = WMA_DEFAULT_HW_MODE_INDEX; 3669 wma_handle->saved_chan.num_channels = 0; 3670 wma_handle->fw_timeout_crash = cds_cfg->fw_timeout_crash; 3671 3672 qdf_status = qdf_mc_timer_init(&wma_handle->service_ready_ext_timer, 3673 QDF_TIMER_TYPE_SW, 3674 wma_service_ready_ext_evt_timeout, 3675 wma_handle); 3676 if (!QDF_IS_STATUS_SUCCESS(qdf_status)) { 3677 wma_err("Failed to initialize service ready ext timeout"); 3678 goto err_event_init; 3679 } 3680 3681 qdf_status = qdf_event_create(&wma_handle->target_suspend); 3682 if (qdf_status != QDF_STATUS_SUCCESS) { 3683 wma_err("target suspend event initialization failed"); 3684 goto err_event_init; 3685 } 3686 3687 /* Init Tx Frame Complete event */ 3688 qdf_status = qdf_event_create(&wma_handle->tx_frm_download_comp_event); 3689 if (!QDF_IS_STATUS_SUCCESS(qdf_status)) { 3690 wma_err("failed to init tx_frm_download_comp_event"); 3691 goto err_event_init; 3692 } 3693 3694 /* Init tx queue empty check event */ 3695 qdf_status = qdf_event_create(&wma_handle->tx_queue_empty_event); 3696 if (!QDF_IS_STATUS_SUCCESS(qdf_status)) { 3697 wma_err("failed to init tx_queue_empty_event"); 3698 goto err_event_init; 3699 } 3700 3701 qdf_status = cds_shutdown_notifier_register(wma_shutdown_notifier_cb, 3702 wma_handle); 3703 if (qdf_status != QDF_STATUS_SUCCESS) { 3704 wma_err("Shutdown notifier register failed: %d", qdf_status); 3705 goto err_event_init; 3706 } 3707 3708 qdf_status = qdf_event_create(&wma_handle->runtime_suspend); 3709 if (qdf_status != QDF_STATUS_SUCCESS) { 3710 wma_err("runtime_suspend event initialization failed"); 3711 goto err_event_init; 3712 } 3713 3714 qdf_status = qdf_event_create(&wma_handle->recovery_event); 3715 if (qdf_status != QDF_STATUS_SUCCESS) { 3716 wma_err("recovery event initialization failed"); 3717 goto err_event_init; 3718 } 3719 3720 qdf_status = qdf_mutex_create(&wma_handle->radio_stats_lock); 3721 if (QDF_IS_STATUS_ERROR(qdf_status)) { 3722 wma_err("Failed to create radio stats mutex"); 3723 goto err_event_init; 3724 } 3725 3726 qdf_list_create(&wma_handle->wma_hold_req_queue, 3727 MAX_ENTRY_HOLD_REQ_QUEUE); 3728 qdf_spinlock_create(&wma_handle->wma_hold_req_q_lock); 3729 qdf_atomic_init(&wma_handle->is_wow_bus_suspended); 3730 qdf_atomic_init(&wma_handle->sap_num_clients_connected); 3731 qdf_atomic_init(&wma_handle->go_num_clients_connected); 3732 3733 /* register for STA kickout function */ 3734 wmi_unified_register_event_handler(wma_handle->wmi_handle, 3735 wmi_peer_sta_kickout_event_id, 3736 wma_peer_sta_kickout_event_handler, 3737 WMA_RX_SERIALIZER_CTX); 3738 /* register for fw state response event */ 3739 wma_register_fw_state_events(wma_handle->wmi_handle); 3740 3741 #ifdef WLAN_POWER_DEBUG 3742 /* register for Chip Power stats event */ 3743 wmi_unified_register_event_handler(wma_handle->wmi_handle, 3744 wmi_pdev_chip_power_stats_event_id, 3745 wma_unified_power_debug_stats_event_handler, 3746 WMA_RX_SERIALIZER_CTX); 3747 #endif 3748 #ifdef WLAN_FEATURE_BEACON_RECEPTION_STATS 3749 /* register for beacon stats event */ 3750 wmi_unified_register_event_handler(wma_handle->wmi_handle, 3751 wmi_vdev_bcn_reception_stats_event_id, 3752 wma_unified_beacon_debug_stats_event_handler, 3753 WMA_RX_SERIALIZER_CTX); 3754 #endif 3755 3756 #if defined(CLD_PM_QOS) && defined(WLAN_FEATURE_LL_MODE) 3757 wmi_unified_register_event_handler(wma_handle->wmi_handle, 3758 wmi_vdev_bcn_latency_event_id, 3759 wma_vdev_bcn_latency_event_handler, 3760 WMA_RX_SERIALIZER_CTX); 3761 #endif 3762 /* register for linkspeed response event */ 3763 wmi_unified_register_event_handler(wma_handle->wmi_handle, 3764 wmi_peer_estimated_linkspeed_event_id, 3765 wma_link_speed_event_handler, 3766 WMA_RX_SERIALIZER_CTX); 3767 3768 #ifdef FEATURE_OEM_DATA_SUPPORT 3769 wmi_unified_register_event_handler(wma_handle->wmi_handle, 3770 wmi_oem_response_event_id, 3771 wma_oem_data_response_handler, 3772 WMA_RX_SERIALIZER_CTX); 3773 #endif /* FEATURE_OEM_DATA_SUPPORT */ 3774 3775 /* Register beacon tx complete event id. The event is required 3776 * for sending channel switch announcement frames 3777 */ 3778 wmi_unified_register_event_handler(wma_handle->wmi_handle, 3779 wmi_offload_bcn_tx_status_event_id, 3780 wma_unified_bcntx_status_event_handler, 3781 WMA_RX_SERIALIZER_CTX); 3782 3783 wmi_unified_register_event_handler(wma_handle->wmi_handle, 3784 wmi_update_vdev_rate_stats_event_id, 3785 wma_link_status_event_handler, 3786 WMA_RX_SERIALIZER_CTX); 3787 3788 wmi_unified_register_event_handler(wma_handle->wmi_handle, 3789 wmi_roam_scan_stats_event_id, 3790 wma_roam_scan_stats_event_handler, 3791 WMA_RX_SERIALIZER_CTX); 3792 3793 wmi_unified_register_event_handler(wma_handle->wmi_handle, 3794 wmi_pdev_cold_boot_cal_event_id, 3795 wma_cold_boot_cal_event_handler, 3796 WMA_RX_WORK_CTX); 3797 3798 #ifdef FEATURE_OEM_DATA 3799 wmi_unified_register_event_handler(wma_handle->wmi_handle, 3800 wmi_oem_data_event_id, 3801 wma_oem_event_handler, 3802 WMA_RX_WORK_CTX); 3803 #endif 3804 3805 #ifdef WLAN_FEATURE_LINK_LAYER_STATS 3806 /* Register event handler for processing Link Layer Stats 3807 * response from the FW 3808 */ 3809 wma_register_ll_stats_event_handler(wma_handle); 3810 3811 #endif /* WLAN_FEATURE_LINK_LAYER_STATS */ 3812 3813 wmi_set_tgt_assert(wma_handle->wmi_handle, 3814 cds_cfg->force_target_assert_enabled); 3815 /* Firmware debug log */ 3816 qdf_status = dbglog_init(wma_handle->wmi_handle); 3817 if (qdf_status != QDF_STATUS_SUCCESS) { 3818 wma_err("Firmware Dbglog initialization failed"); 3819 goto err_dbglog_init; 3820 } 3821 3822 wma_handle->staMaxLIModDtim = cds_cfg->sta_maxlimod_dtim; 3823 wma_handle->sta_max_li_mod_dtim_ms = cds_cfg->sta_maxlimod_dtim_ms; 3824 wma_handle->staModDtim = ucfg_pmo_get_sta_mod_dtim(wma_handle->psoc); 3825 wma_handle->staDynamicDtim = 3826 ucfg_pmo_get_sta_dynamic_dtim(wma_handle->psoc); 3827 3828 #ifdef WLAN_FEATURE_STATS_EXT 3829 /* register for extended stats event */ 3830 wmi_unified_register_event_handler(wma_handle->wmi_handle, 3831 wmi_stats_ext_event_id, 3832 wma_stats_ext_event_handler, 3833 WMA_RX_SERIALIZER_CTX); 3834 #endif /* WLAN_FEATURE_STATS_EXT */ 3835 #ifdef FEATURE_WLAN_EXTSCAN 3836 wma_register_extscan_event_handler(wma_handle); 3837 #endif /* WLAN_FEATURE_STATS_EXT */ 3838 3839 wmi_unified_register_event_handler(wma_handle->wmi_handle, 3840 wmi_rssi_breach_event_id, 3841 wma_rssi_breached_event_handler, 3842 WMA_RX_SERIALIZER_CTX); 3843 3844 qdf_wake_lock_create(&wma_handle->wmi_cmd_rsp_wake_lock, 3845 "wlan_fw_rsp_wakelock"); 3846 qdf_runtime_lock_init(&wma_handle->wmi_cmd_rsp_runtime_lock); 3847 qdf_runtime_lock_init(&wma_handle->sap_prevent_runtime_pm_lock); 3848 qdf_runtime_lock_init(&wma_handle->ndp_prevent_runtime_pm_lock); 3849 3850 /* Register peer assoc conf event handler */ 3851 wmi_unified_register_event_handler(wma_handle->wmi_handle, 3852 wmi_peer_assoc_conf_event_id, 3853 wma_peer_assoc_conf_handler, 3854 WMA_RX_SERIALIZER_CTX); 3855 wmi_unified_register_event_handler(wma_handle->wmi_handle, 3856 wmi_peer_create_conf_event_id, 3857 wma_peer_create_confirm_handler, 3858 WMA_RX_SERIALIZER_CTX); 3859 wmi_unified_register_event_handler(wma_handle->wmi_handle, 3860 wmi_peer_delete_response_event_id, 3861 wma_peer_delete_handler, 3862 WMA_RX_SERIALIZER_CTX); 3863 wmi_unified_register_event_handler(wma_handle->wmi_handle, 3864 wmi_chan_info_event_id, 3865 wma_chan_info_event_handler, 3866 WMA_RX_SERIALIZER_CTX); 3867 wmi_unified_register_event_handler(wma_handle->wmi_handle, 3868 wmi_dbg_mesg_flush_complete_event_id, 3869 wma_flush_complete_evt_handler, 3870 WMA_RX_WORK_CTX); 3871 wmi_unified_register_event_handler(wma_handle->wmi_handle, 3872 wmi_report_rx_aggr_failure_event_id, 3873 wma_rx_aggr_failure_event_handler, 3874 WMA_RX_SERIALIZER_CTX); 3875 3876 wmi_unified_register_event_handler( 3877 wma_handle->wmi_handle, 3878 wmi_coex_report_antenna_isolation_event_id, 3879 wma_antenna_isolation_event_handler, 3880 WMA_RX_SERIALIZER_CTX); 3881 3882 wma_handle->ito_repeat_count = cds_cfg->ito_repeat_count; 3883 wma_handle->bandcapability = cds_cfg->bandcapability; 3884 3885 /* Register PWR_SAVE_FAIL event only in case of recovery(1) */ 3886 if (ucfg_pmo_get_auto_power_fail_mode(wma_handle->psoc) == 3887 PMO_FW_TO_SEND_WOW_IND_ON_PWR_FAILURE) { 3888 wmi_unified_register_event_handler(wma_handle->wmi_handle, 3889 wmi_pdev_chip_pwr_save_failure_detect_event_id, 3890 wma_chip_power_save_failure_detected_handler, 3891 WMA_RX_WORK_CTX); 3892 } 3893 3894 wmi_unified_register_event_handler(wma_handle->wmi_handle, 3895 wmi_pdev_div_rssi_antid_event_id, 3896 wma_pdev_div_info_evt_handler, 3897 WMA_RX_WORK_CTX); 3898 3899 wmi_unified_register_event_handler(wma_handle->wmi_handle, 3900 wmi_get_ani_level_event_id, 3901 wma_get_ani_level_evt_handler, 3902 WMA_RX_WORK_CTX); 3903 3904 wma_register_debug_callback(); 3905 wifi_pos_register_get_phy_mode_cb(wma_handle->psoc, 3906 wma_get_phy_mode_cb); 3907 wifi_pos_register_get_fw_phy_mode_for_freq_cb( 3908 wma_handle->psoc, 3909 wma_get_fw_phy_mode_for_freq_cb); 3910 3911 /* Register callback with PMO so PMO can update the vdev pause bitmap*/ 3912 pmo_register_pause_bitmap_notifier(wma_handle->psoc, 3913 wma_vdev_update_pause_bitmap); 3914 pmo_register_get_pause_bitmap(wma_handle->psoc, 3915 wma_vdev_get_pause_bitmap); 3916 pmo_register_is_device_in_low_pwr_mode(wma_handle->psoc, 3917 wma_vdev_is_device_in_low_pwr_mode); 3918 pmo_register_get_dtim_period_callback(wma_handle->psoc, 3919 wma_vdev_get_dtim_period); 3920 pmo_register_get_beacon_interval_callback(wma_handle->psoc, 3921 wma_vdev_get_beacon_interval); 3922 wma_cbacks.wma_get_connection_info = wma_get_connection_info; 3923 wma_register_nan_callbacks(wma_handle); 3924 wma_register_pkt_capture_callbacks(wma_handle); 3925 qdf_status = policy_mgr_register_wma_cb(wma_handle->psoc, &wma_cbacks); 3926 if (!QDF_IS_STATUS_SUCCESS(qdf_status)) { 3927 wma_err("Failed to register wma cb with Policy Manager"); 3928 } 3929 3930 wmi_unified_register_event_handler(wma_handle->wmi_handle, 3931 wmi_phyerr_event_id, 3932 wma_unified_phyerr_rx_event_handler, 3933 WMA_RX_WORK_CTX); 3934 3935 wmi_unified_register_event_handler(wma_handle->wmi_handle, 3936 wmi_sap_obss_detection_report_event_id, 3937 wma_vdev_obss_detection_info_handler, 3938 WMA_RX_SERIALIZER_CTX); 3939 3940 wmi_unified_register_event_handler(wma_handle->wmi_handle, 3941 wmi_obss_color_collision_report_event_id, 3942 wma_vdev_bss_color_collision_info_handler, 3943 WMA_RX_WORK_CTX); 3944 3945 wma_register_twt_events(wma_handle); 3946 3947 wma_register_apf_events(wma_handle); 3948 wma_register_md_events(wma_handle); 3949 wma_register_wlm_stats_events(wma_handle); 3950 wma_register_wlm_latency_level_event(wma_handle); 3951 wma_register_mws_coex_events(wma_handle); 3952 wma_trace_init(); 3953 wma_set_exclude_selftx_from_cca_busy_time( 3954 cds_cfg->exclude_selftx_from_cca_busy, 3955 wma_handle); 3956 return QDF_STATUS_SUCCESS; 3957 3958 err_dbglog_init: 3959 qdf_status = qdf_mutex_destroy(&wma_handle->radio_stats_lock); 3960 if (QDF_IS_STATUS_ERROR(qdf_status)) 3961 wma_err("Failed to destroy radio stats mutex"); 3962 3963 qdf_wake_lock_destroy(&wma_handle->wmi_cmd_rsp_wake_lock); 3964 qdf_runtime_lock_deinit(&wma_handle->ndp_prevent_runtime_pm_lock); 3965 qdf_runtime_lock_deinit(&wma_handle->sap_prevent_runtime_pm_lock); 3966 qdf_runtime_lock_deinit(&wma_handle->wmi_cmd_rsp_runtime_lock); 3967 qdf_spinlock_destroy(&wma_handle->wma_hold_req_q_lock); 3968 err_event_init: 3969 wmi_unified_unregister_event_handler(wma_handle->wmi_handle, 3970 wmi_debug_print_event_id); 3971 3972 for (i = 0; i < wma_handle->max_bssid; ++i) 3973 wma_vdev_deinit(&wma_handle->interfaces[i]); 3974 3975 qdf_mem_free(wma_handle->interfaces); 3976 3977 err_scn_context: 3978 qdf_mem_free(((struct cds_context *) cds_context)->cfg_ctx); 3979 ((struct cds_context *)cds_context)->cfg_ctx = NULL; 3980 qdf_mem_free(wmi_handle); 3981 3982 err_wma_handle: 3983 wlan_objmgr_psoc_release_ref(psoc, WLAN_LEGACY_WMA_ID); 3984 err_get_psoc_ref: 3985 target_if_free_psoc_tgt_info(psoc); 3986 if (cds_get_conparam() != QDF_GLOBAL_FTM_MODE) { 3987 qdf_wake_lock_destroy(&wma_handle->go_d3_wow_wake_lock); 3988 qdf_wake_lock_destroy(&wma_handle->sap_d3_wow_wake_lock); 3989 #ifdef FEATURE_WLAN_EXTSCAN 3990 qdf_wake_lock_destroy(&wma_handle->extscan_wake_lock); 3991 #endif /* FEATURE_WLAN_EXTSCAN */ 3992 qdf_wake_lock_destroy(&wma_handle->wow_wake_lock); 3993 qdf_wake_lock_destroy(&wma_handle->wow_auth_req_wl); 3994 qdf_wake_lock_destroy(&wma_handle->wow_assoc_req_wl); 3995 qdf_wake_lock_destroy(&wma_handle->wow_deauth_rec_wl); 3996 qdf_wake_lock_destroy(&wma_handle->wow_disassoc_rec_wl); 3997 qdf_wake_lock_destroy(&wma_handle->wow_ap_assoc_lost_wl); 3998 qdf_wake_lock_destroy(&wma_handle->wow_auto_shutdown_wl); 3999 qdf_wake_lock_destroy(&wma_handle->roam_ho_wl); 4000 qdf_wake_lock_destroy(&wma_handle->roam_preauth_wl); 4001 qdf_wake_lock_destroy(&wma_handle->probe_req_wps_wl); 4002 } 4003 err_free_wma_handle: 4004 cds_free_context(QDF_MODULE_ID_WMA, wma_handle); 4005 4006 wma_debug("Exit"); 4007 4008 return qdf_status; 4009 } 4010 4011 /** 4012 * wma_pre_start() - wma pre start 4013 * 4014 * Return: 0 on success, errno on failure 4015 */ 4016 QDF_STATUS wma_pre_start(void) 4017 { 4018 QDF_STATUS qdf_status = QDF_STATUS_SUCCESS; 4019 tp_wma_handle wma_handle; 4020 void *htc_handle; 4021 4022 wma_debug("Enter"); 4023 4024 wma_handle = cds_get_context(QDF_MODULE_ID_WMA); 4025 4026 /* Validate the wma_handle */ 4027 if (!wma_handle) { 4028 qdf_status = QDF_STATUS_E_INVAL; 4029 goto end; 4030 } 4031 4032 htc_handle = lmac_get_htc_hdl(wma_handle->psoc); 4033 if (!htc_handle) { 4034 wma_err("invalid htc handle"); 4035 qdf_status = QDF_STATUS_E_INVAL; 4036 goto end; 4037 } 4038 4039 /* Open endpoint for ctrl path - WMI <--> HTC */ 4040 qdf_status = wmi_unified_connect_htc_service(wma_handle->wmi_handle, 4041 htc_handle); 4042 if (qdf_status != QDF_STATUS_SUCCESS) { 4043 wma_err("wmi_unified_connect_htc_service"); 4044 if (!cds_is_fw_down()) 4045 QDF_BUG(0); 4046 4047 qdf_status = QDF_STATUS_E_FAULT; 4048 goto end; 4049 } 4050 4051 /* Open endpoint for wmi diag path */ 4052 qdf_status = wmi_diag_connect_pdev_htc_service(wma_handle->wmi_handle, 4053 htc_handle); 4054 if (qdf_status != QDF_STATUS_SUCCESS) { 4055 wma_err("wmi_diag_connect_pdev_htc_service"); 4056 if (!cds_is_fw_down()) 4057 QDF_BUG(0); 4058 4059 qdf_status = QDF_STATUS_E_FAULT; 4060 goto end; 4061 } 4062 4063 wma_debug("WMA --> wmi_unified_connect_htc_service - success"); 4064 4065 end: 4066 wma_debug("Exit"); 4067 return qdf_status; 4068 } 4069 4070 void wma_send_msg_by_priority(tp_wma_handle wma_handle, uint16_t msg_type, 4071 void *body_ptr, uint32_t body_val, bool is_high_priority) 4072 { 4073 struct scheduler_msg msg = {0}; 4074 QDF_STATUS status; 4075 4076 msg.type = msg_type; 4077 msg.bodyval = body_val; 4078 msg.bodyptr = body_ptr; 4079 msg.flush_callback = wma_discard_fw_event; 4080 4081 status = scheduler_post_msg_by_priority(QDF_MODULE_ID_PE, 4082 &msg, is_high_priority); 4083 if (!QDF_IS_STATUS_SUCCESS(status)) { 4084 if (body_ptr) 4085 qdf_mem_free(body_ptr); 4086 } 4087 } 4088 4089 4090 void wma_send_msg(tp_wma_handle wma_handle, uint16_t msg_type, 4091 void *body_ptr, uint32_t body_val) 4092 { 4093 wma_send_msg_by_priority(wma_handle, msg_type, 4094 body_ptr, body_val, false); 4095 } 4096 4097 void wma_send_msg_high_priority(tp_wma_handle wma_handle, uint16_t msg_type, 4098 void *body_ptr, uint32_t body_val) 4099 { 4100 wma_send_msg_by_priority(wma_handle, msg_type, 4101 body_ptr, body_val, true); 4102 } 4103 4104 /** 4105 * wma_set_base_macaddr_indicate() - set base mac address in fw 4106 * @wma_handle: wma handle 4107 * @customAddr: base mac address 4108 * 4109 * Return: 0 for success or error code 4110 */ 4111 static int wma_set_base_macaddr_indicate(tp_wma_handle wma_handle, 4112 tSirMacAddr *customAddr) 4113 { 4114 int err; 4115 4116 err = wmi_unified_set_base_macaddr_indicate_cmd(wma_handle->wmi_handle, 4117 (uint8_t *)customAddr); 4118 if (err) 4119 return -EIO; 4120 wma_debug("Base MAC Addr: " QDF_MAC_ADDR_FMT, 4121 QDF_MAC_ADDR_REF((*customAddr))); 4122 4123 return 0; 4124 } 4125 4126 /** 4127 * wma_log_supported_evt_handler() - Enable/Disable FW diag/log events 4128 * @handle: WMA handle 4129 * @event: Event received from FW 4130 * @len: Length of the event 4131 * 4132 * Enables the low frequency events and disables the high frequency 4133 * events. Bit 17 indicates if the event if low/high frequency. 4134 * 1 - high frequency, 0 - low frequency 4135 * 4136 * Return: 0 on successfully enabling/disabling the events 4137 */ 4138 static int wma_log_supported_evt_handler(void *handle, 4139 uint8_t *event, 4140 uint32_t len) 4141 { 4142 tp_wma_handle wma = (tp_wma_handle) handle; 4143 4144 if (wmi_unified_log_supported_evt_cmd(wma->wmi_handle, 4145 event, len)) 4146 return -EINVAL; 4147 4148 return 0; 4149 } 4150 4151 /** 4152 * wma_pdev_set_hw_mode_resp_evt_handler() - Set HW mode resp evt handler 4153 * @handle: WMI handle 4154 * @event: Event received from FW 4155 * @len: Length of the event 4156 * 4157 * Event handler for WMI_PDEV_SET_HW_MODE_RESP_EVENTID that is sent to host 4158 * driver in response to a WMI_PDEV_SET_HW_MODE_CMDID being sent to WLAN 4159 * firmware 4160 * 4161 * Return: QDF_STATUS 4162 */ 4163 static int wma_pdev_set_hw_mode_resp_evt_handler(void *handle, 4164 uint8_t *event, 4165 uint32_t len) 4166 { 4167 WMI_PDEV_SET_HW_MODE_RESP_EVENTID_param_tlvs *param_buf; 4168 wmi_pdev_set_hw_mode_response_event_fixed_param *wmi_event; 4169 wmi_pdev_set_hw_mode_response_vdev_mac_entry *vdev_mac_entry; 4170 uint32_t i; 4171 struct sir_set_hw_mode_resp *hw_mode_resp; 4172 tp_wma_handle wma = (tp_wma_handle) handle; 4173 4174 if (wma_validate_handle(wma)) { 4175 /* Since WMA handle itself is NULL, we cannot send fail 4176 * response back to LIM here 4177 */ 4178 return QDF_STATUS_E_NULL_VALUE; 4179 } 4180 4181 wma_release_wakelock(&wma->wmi_cmd_rsp_wake_lock); 4182 wma_remove_req(wma, 0, WMA_PDEV_SET_HW_MODE_RESP); 4183 4184 hw_mode_resp = qdf_mem_malloc(sizeof(*hw_mode_resp)); 4185 if (!hw_mode_resp) { 4186 /* Since this memory allocation itself failed, we cannot 4187 * send fail response back to LIM here 4188 */ 4189 return QDF_STATUS_E_NULL_VALUE; 4190 } 4191 4192 param_buf = (WMI_PDEV_SET_HW_MODE_RESP_EVENTID_param_tlvs *) event; 4193 if (!param_buf) { 4194 wma_err("Invalid WMI_PDEV_SET_HW_MODE_RESP_EVENTID event"); 4195 /* Need to send response back to upper layer to free 4196 * active command list 4197 */ 4198 goto fail; 4199 } 4200 if (param_buf->fixed_param->num_vdev_mac_entries >= 4201 MAX_VDEV_SUPPORTED) { 4202 wma_err("num_vdev_mac_entries crossed max value"); 4203 goto fail; 4204 } 4205 4206 wmi_event = param_buf->fixed_param; 4207 if (wmi_event->num_vdev_mac_entries > 4208 param_buf->num_wmi_pdev_set_hw_mode_response_vdev_mac_mapping) { 4209 wma_err("Invalid num_vdev_mac_entries: %d", 4210 wmi_event->num_vdev_mac_entries); 4211 goto fail; 4212 } 4213 hw_mode_resp->status = wmi_event->status; 4214 hw_mode_resp->cfgd_hw_mode_index = wmi_event->cfgd_hw_mode_index; 4215 hw_mode_resp->num_vdev_mac_entries = wmi_event->num_vdev_mac_entries; 4216 4217 wma->set_hw_mode_resp_status = wmi_event->status; 4218 wma_debug("status:%d cfgd_hw_mode_index:%d num_vdev_mac_entries:%d", 4219 wmi_event->status, 4220 wmi_event->cfgd_hw_mode_index, 4221 wmi_event->num_vdev_mac_entries); 4222 vdev_mac_entry = 4223 param_buf->wmi_pdev_set_hw_mode_response_vdev_mac_mapping; 4224 4225 /* Store the vdev-mac map in WMA and prepare to send to PE */ 4226 for (i = 0; i < wmi_event->num_vdev_mac_entries; i++) { 4227 uint32_t vdev_id, mac_id, pdev_id; 4228 4229 vdev_id = vdev_mac_entry[i].vdev_id; 4230 pdev_id = vdev_mac_entry[i].pdev_id; 4231 if (pdev_id == OL_TXRX_PDEV_ID) { 4232 wma_err("soc level id received for mac id"); 4233 goto fail; 4234 } 4235 if (vdev_id >= wma->max_bssid) { 4236 wma_err("vdev_id: %d is invalid, max_bssid: %d", 4237 vdev_id, wma->max_bssid); 4238 goto fail; 4239 } 4240 4241 mac_id = WMA_PDEV_TO_MAC_MAP(vdev_mac_entry[i].pdev_id); 4242 4243 wma_debug("vdev_id:%d mac_id:%d", vdev_id, mac_id); 4244 4245 hw_mode_resp->vdev_mac_map[i].vdev_id = vdev_id; 4246 hw_mode_resp->vdev_mac_map[i].mac_id = mac_id; 4247 wma_update_intf_hw_mode_params(vdev_id, mac_id, 4248 wmi_event->cfgd_hw_mode_index); 4249 } 4250 4251 if (hw_mode_resp->status == SET_HW_MODE_STATUS_OK) { 4252 if (WMA_DEFAULT_HW_MODE_INDEX == wma->new_hw_mode_index) { 4253 wma->new_hw_mode_index = wmi_event->cfgd_hw_mode_index; 4254 } else { 4255 wma->old_hw_mode_index = wma->new_hw_mode_index; 4256 wma->new_hw_mode_index = wmi_event->cfgd_hw_mode_index; 4257 } 4258 policy_mgr_update_hw_mode_index(wma->psoc, 4259 wmi_event->cfgd_hw_mode_index); 4260 } 4261 4262 wma_debug("Updated: old_hw_mode_index:%d new_hw_mode_index:%d", 4263 wma->old_hw_mode_index, wma->new_hw_mode_index); 4264 4265 wma_send_msg(wma, SIR_HAL_PDEV_SET_HW_MODE_RESP, 4266 (void *) hw_mode_resp, 0); 4267 4268 return QDF_STATUS_SUCCESS; 4269 4270 fail: 4271 wma_err("Sending fail response to LIM"); 4272 hw_mode_resp->status = SET_HW_MODE_STATUS_ECANCELED; 4273 hw_mode_resp->cfgd_hw_mode_index = 0; 4274 hw_mode_resp->num_vdev_mac_entries = 0; 4275 wma_send_msg(wma, SIR_HAL_PDEV_SET_HW_MODE_RESP, 4276 (void *) hw_mode_resp, 0); 4277 4278 return QDF_STATUS_E_FAILURE; 4279 } 4280 4281 /** 4282 * wma_process_pdev_hw_mode_trans_ind() - Process HW mode transition info 4283 * 4284 * @handle: WMA handle 4285 * @fixed_param: Event fixed parameters 4286 * @vdev_mac_entry - vdev mac entry 4287 * @hw_mode_trans_ind - Buffer to store parsed information 4288 * 4289 * Parses fixed_param, vdev_mac_entry and fills in the information into 4290 * hw_mode_trans_ind and wma 4291 * 4292 * Return: None 4293 */ 4294 void wma_process_pdev_hw_mode_trans_ind(void *handle, 4295 wmi_pdev_hw_mode_transition_event_fixed_param *fixed_param, 4296 wmi_pdev_set_hw_mode_response_vdev_mac_entry *vdev_mac_entry, 4297 struct cm_hw_mode_trans_ind *hw_mode_trans_ind) 4298 { 4299 uint32_t i; 4300 tp_wma_handle wma = (tp_wma_handle) handle; 4301 4302 if (fixed_param->num_vdev_mac_entries > MAX_VDEV_SUPPORTED) { 4303 wma_err("Number of Vdev mac entries %d exceeded max vdev supported %d", 4304 fixed_param->num_vdev_mac_entries, 4305 MAX_VDEV_SUPPORTED); 4306 return; 4307 } 4308 hw_mode_trans_ind->old_hw_mode_index = fixed_param->old_hw_mode_index; 4309 hw_mode_trans_ind->new_hw_mode_index = fixed_param->new_hw_mode_index; 4310 hw_mode_trans_ind->num_vdev_mac_entries = 4311 fixed_param->num_vdev_mac_entries; 4312 wma_debug("old_hw_mode_index:%d new_hw_mode_index:%d entries=%d", 4313 fixed_param->old_hw_mode_index, 4314 fixed_param->new_hw_mode_index, 4315 fixed_param->num_vdev_mac_entries); 4316 4317 if (!vdev_mac_entry) { 4318 wma_debug("null vdev_mac_entry"); 4319 goto update_hw_mode; 4320 } 4321 4322 /* Store the vdev-mac map in WMA and send to policy manager */ 4323 for (i = 0; i < fixed_param->num_vdev_mac_entries; i++) { 4324 uint32_t vdev_id, mac_id, pdev_id; 4325 4326 vdev_id = vdev_mac_entry[i].vdev_id; 4327 pdev_id = vdev_mac_entry[i].pdev_id; 4328 4329 if (pdev_id == OL_TXRX_PDEV_ID) { 4330 wma_err("soc level id received for mac id"); 4331 return; 4332 } 4333 if (vdev_id >= wma->max_bssid) { 4334 wma_err("vdev_id: %d is invalid, max_bssid: %d", 4335 vdev_id, wma->max_bssid); 4336 return; 4337 } 4338 4339 mac_id = WMA_PDEV_TO_MAC_MAP(vdev_mac_entry[i].pdev_id); 4340 4341 wma_debug("vdev_id:%d mac_id:%d", vdev_id, mac_id); 4342 4343 hw_mode_trans_ind->vdev_mac_map[i].vdev_id = vdev_id; 4344 hw_mode_trans_ind->vdev_mac_map[i].mac_id = mac_id; 4345 wma_update_intf_hw_mode_params(vdev_id, mac_id, 4346 fixed_param->new_hw_mode_index); 4347 } 4348 update_hw_mode: 4349 wma->old_hw_mode_index = fixed_param->old_hw_mode_index; 4350 wma->new_hw_mode_index = fixed_param->new_hw_mode_index; 4351 policy_mgr_update_new_hw_mode_index(wma->psoc, 4352 fixed_param->new_hw_mode_index); 4353 policy_mgr_update_old_hw_mode_index(wma->psoc, 4354 fixed_param->old_hw_mode_index); 4355 4356 wma_debug("Updated: old_hw_mode_index:%d new_hw_mode_index:%d", 4357 wma->old_hw_mode_index, wma->new_hw_mode_index); 4358 } 4359 4360 static void 4361 wma_process_mac_freq_mapping(struct cm_hw_mode_trans_ind *hw_mode_trans_ind, 4362 WMI_PDEV_HW_MODE_TRANSITION_EVENTID_param_tlvs *param_buf) 4363 { 4364 uint32_t i, num_mac_freq; 4365 wmi_pdev_band_to_mac *mac_freq; 4366 4367 mac_freq = param_buf->mac_freq_mapping; 4368 num_mac_freq = param_buf->num_mac_freq_mapping; 4369 4370 if (!mac_freq) { 4371 wma_debug("mac_freq Null"); 4372 return; 4373 } 4374 4375 if (!num_mac_freq || num_mac_freq > MAX_FREQ_RANGE_NUM) { 4376 wma_debug("num mac freq invalid %d", num_mac_freq); 4377 return; 4378 } 4379 4380 hw_mode_trans_ind->num_freq_map = num_mac_freq; 4381 for (i = 0; i < num_mac_freq; i++) { 4382 hw_mode_trans_ind->mac_freq_map[i].mac_id = 4383 WMA_PDEV_TO_MAC_MAP(mac_freq[i].pdev_id); 4384 hw_mode_trans_ind->mac_freq_map[i].start_freq = 4385 mac_freq[i].start_freq; 4386 hw_mode_trans_ind->mac_freq_map[i].end_freq = 4387 mac_freq[i].end_freq; 4388 } 4389 } 4390 4391 /** 4392 * wma_pdev_hw_mode_transition_evt_handler() - HW mode transition evt handler 4393 * @handle: WMI handle 4394 * @event: Event received from FW 4395 * @len: Length of the event 4396 * 4397 * Event handler for WMI_PDEV_HW_MODE_TRANSITION_EVENTID that indicates an 4398 * asynchronous hardware mode transition. This event notifies the host driver 4399 * that firmware independently changed the hardware mode for some reason, such 4400 * as Coex, LFR 3.0, etc 4401 * 4402 * Return: Success on receiving valid params from FW 4403 */ 4404 static int wma_pdev_hw_mode_transition_evt_handler(void *handle, 4405 uint8_t *event, 4406 uint32_t len) 4407 { 4408 WMI_PDEV_HW_MODE_TRANSITION_EVENTID_param_tlvs *param_buf; 4409 wmi_pdev_hw_mode_transition_event_fixed_param *wmi_event; 4410 wmi_pdev_set_hw_mode_response_vdev_mac_entry *vdev_mac_entry; 4411 struct cm_hw_mode_trans_ind *hw_mode_trans_ind; 4412 tp_wma_handle wma = (tp_wma_handle) handle; 4413 4414 if (wma_validate_handle(wma)) { 4415 /* This is an async event. So, not sending any event to LIM */ 4416 return QDF_STATUS_E_NULL_VALUE; 4417 } 4418 4419 param_buf = (WMI_PDEV_HW_MODE_TRANSITION_EVENTID_param_tlvs *) event; 4420 if (!param_buf) { 4421 /* This is an async event. So, not sending any event to LIM */ 4422 wma_err("Invalid WMI_PDEV_HW_MODE_TRANSITION_EVENTID event"); 4423 return QDF_STATUS_E_FAILURE; 4424 } 4425 4426 if (param_buf->fixed_param->num_vdev_mac_entries > MAX_VDEV_SUPPORTED) { 4427 wma_err("num_vdev_mac_entries: %d crossed max value: %d", 4428 param_buf->fixed_param->num_vdev_mac_entries, 4429 MAX_VDEV_SUPPORTED); 4430 return QDF_STATUS_E_FAILURE; 4431 } 4432 4433 hw_mode_trans_ind = qdf_mem_malloc(sizeof(*hw_mode_trans_ind)); 4434 if (!hw_mode_trans_ind) 4435 return QDF_STATUS_E_NOMEM; 4436 4437 wmi_event = param_buf->fixed_param; 4438 vdev_mac_entry = 4439 param_buf->wmi_pdev_set_hw_mode_response_vdev_mac_mapping; 4440 if (wmi_event->num_vdev_mac_entries > 4441 param_buf->num_wmi_pdev_set_hw_mode_response_vdev_mac_mapping) { 4442 wma_err("Invalid num_vdev_mac_entries: %d", 4443 wmi_event->num_vdev_mac_entries); 4444 qdf_mem_free(hw_mode_trans_ind); 4445 return -EINVAL; 4446 } 4447 4448 wma_process_pdev_hw_mode_trans_ind(wma, wmi_event, vdev_mac_entry, 4449 hw_mode_trans_ind); 4450 wma_process_mac_freq_mapping(hw_mode_trans_ind, param_buf); 4451 4452 if (policy_mgr_is_hwmode_offload_enabled(wma->psoc)) { 4453 policy_mgr_hw_mode_transition_cb( 4454 hw_mode_trans_ind->old_hw_mode_index, 4455 hw_mode_trans_ind->new_hw_mode_index, 4456 hw_mode_trans_ind->num_vdev_mac_entries, 4457 hw_mode_trans_ind->vdev_mac_map, 4458 hw_mode_trans_ind->num_freq_map, 4459 hw_mode_trans_ind->mac_freq_map, 4460 wma->psoc); 4461 qdf_mem_free(hw_mode_trans_ind); 4462 } else { 4463 struct scheduler_msg sme_msg = {0}; 4464 QDF_STATUS status; 4465 4466 wma_debug("post eWNI_SME_HW_MODE_TRANS_IND"); 4467 sme_msg.type = eWNI_SME_HW_MODE_TRANS_IND; 4468 sme_msg.bodyptr = hw_mode_trans_ind; 4469 sme_msg.flush_callback = wma_discard_fw_event; 4470 4471 status = scheduler_post_message(QDF_MODULE_ID_WMA, 4472 QDF_MODULE_ID_SME, 4473 QDF_MODULE_ID_SME, &sme_msg); 4474 if (QDF_IS_STATUS_ERROR(status)) 4475 qdf_mem_free(hw_mode_trans_ind); 4476 } 4477 4478 return QDF_STATUS_SUCCESS; 4479 } 4480 4481 /** 4482 * wma_pdev_set_dual_mode_config_resp_evt_handler() - Dual mode evt handler 4483 * @handle: WMI handle 4484 * @event: Event received from FW 4485 * @len: Length of the event 4486 * 4487 * Notifies the host driver of the completion or failure of a 4488 * WMI_PDEV_SET_MAC_CONFIG_CMDID command. This event would be returned to 4489 * the host driver once the firmware has completed a reconfiguration of the Scan 4490 * and FW mode configuration. This changes could include entering or leaving a 4491 * dual mac configuration for either scan and/or more permanent firmware mode. 4492 * 4493 * Return: Success on receiving valid params from FW 4494 */ 4495 static int wma_pdev_set_dual_mode_config_resp_evt_handler(void *handle, 4496 uint8_t *event, 4497 uint32_t len) 4498 { 4499 WMI_PDEV_SET_MAC_CONFIG_RESP_EVENTID_param_tlvs *param_buf; 4500 wmi_pdev_set_mac_config_response_event_fixed_param *wmi_event; 4501 tp_wma_handle wma = (tp_wma_handle) handle; 4502 struct sir_dual_mac_config_resp *dual_mac_cfg_resp; 4503 4504 if (wma_validate_handle(wma)) { 4505 /* Since the WMA handle is NULL, we cannot send resp to LIM. 4506 * So, returning from here. 4507 */ 4508 return QDF_STATUS_E_NULL_VALUE; 4509 } 4510 wma_release_wakelock(&wma->wmi_cmd_rsp_wake_lock); 4511 wma_remove_req(wma, 0, WMA_PDEV_MAC_CFG_RESP); 4512 4513 dual_mac_cfg_resp = qdf_mem_malloc(sizeof(*dual_mac_cfg_resp)); 4514 if (!dual_mac_cfg_resp) 4515 /* Since the mem alloc failed, we cannot send resp to LIM. 4516 * So, returning from here. 4517 */ 4518 return QDF_STATUS_E_NULL_VALUE; 4519 4520 param_buf = (WMI_PDEV_SET_MAC_CONFIG_RESP_EVENTID_param_tlvs *) 4521 event; 4522 if (!param_buf) { 4523 wma_err("Invalid event"); 4524 goto fail; 4525 } 4526 4527 wmi_event = param_buf->fixed_param; 4528 wma_debug("status: %d", wmi_event->status); 4529 dual_mac_cfg_resp->status = wmi_event->status; 4530 4531 if (SET_HW_MODE_STATUS_OK == dual_mac_cfg_resp->status) { 4532 policy_mgr_update_dbs_scan_config(wma->psoc); 4533 policy_mgr_update_dbs_fw_config(wma->psoc); 4534 } 4535 4536 /* Pass the message to PE */ 4537 wma_send_msg(wma, SIR_HAL_PDEV_MAC_CFG_RESP, 4538 (void *) dual_mac_cfg_resp, 0); 4539 4540 return QDF_STATUS_SUCCESS; 4541 4542 fail: 4543 wma_err("Sending fail response to LIM"); 4544 dual_mac_cfg_resp->status = SET_HW_MODE_STATUS_ECANCELED; 4545 wma_send_msg(wma, SIR_HAL_PDEV_MAC_CFG_RESP, 4546 (void *) dual_mac_cfg_resp, 0); 4547 4548 return QDF_STATUS_E_FAILURE; 4549 4550 } 4551 4552 #ifdef WLAN_CONV_SPECTRAL_ENABLE 4553 static void wma_register_spectral_cmds(tp_wma_handle wma_handle) 4554 { 4555 struct spectral_wmi_ops cmd_ops; 4556 4557 cmd_ops.wmi_spectral_configure_cmd_send = 4558 wmi_unified_vdev_spectral_configure_cmd_send; 4559 cmd_ops.wmi_spectral_enable_cmd_send = 4560 wmi_unified_vdev_spectral_enable_cmd_send; 4561 wlan_register_spectral_wmi_ops(wma_handle->psoc, &cmd_ops); 4562 } 4563 #else 4564 static void wma_register_spectral_cmds(tp_wma_handle wma_handle) 4565 { 4566 } 4567 #endif 4568 /** 4569 * wma_start() - wma start function. 4570 * Initialize event handlers and timers. 4571 * 4572 * Return: 0 on success, QDF Error on failure 4573 */ 4574 QDF_STATUS wma_start(void) 4575 { 4576 QDF_STATUS qdf_status = QDF_STATUS_SUCCESS; 4577 tp_wma_handle wma_handle; 4578 struct wmi_unified *wmi_handle; 4579 struct mac_context *mac = NULL; 4580 4581 wma_debug("Enter"); 4582 4583 wma_handle = cds_get_context(QDF_MODULE_ID_WMA); 4584 if (!wma_handle) { 4585 qdf_status = QDF_STATUS_E_INVAL; 4586 goto end; 4587 } 4588 4589 wmi_handle = get_wmi_unified_hdl_from_psoc(wma_handle->psoc); 4590 if (wmi_validate_handle(wmi_handle)) { 4591 qdf_status = QDF_STATUS_E_INVAL; 4592 goto end; 4593 } 4594 4595 mac = cds_get_context(QDF_MODULE_ID_PE); 4596 if (!mac) { 4597 qdf_status = QDF_STATUS_E_INVAL; 4598 goto end; 4599 } 4600 4601 qdf_status = wmi_unified_register_event_handler(wmi_handle, 4602 wmi_wow_wakeup_host_event_id, 4603 wma_wow_wakeup_host_event, 4604 WMA_RX_TASKLET_CTX); 4605 if (QDF_IS_STATUS_ERROR(qdf_status)) { 4606 wma_err("Failed to register wow wakeup host event handler"); 4607 qdf_status = QDF_STATUS_E_FAILURE; 4608 goto end; 4609 } 4610 4611 if (wma_d0_wow_is_supported()) { 4612 qdf_status = wmi_unified_register_event_handler( 4613 wmi_handle, 4614 wmi_d0_wow_disable_ack_event_id, 4615 wma_d0_wow_disable_ack_event, 4616 WMA_RX_TASKLET_CTX); 4617 if (QDF_IS_STATUS_ERROR(qdf_status)) { 4618 wma_err("Failed to register d0wow disable ack event handler"); 4619 qdf_status = QDF_STATUS_E_FAILURE; 4620 goto end; 4621 } 4622 } 4623 4624 qdf_status = wmi_unified_register_event_handler(wmi_handle, 4625 wmi_pdev_resume_event_id, 4626 wma_pdev_resume_event_handler, 4627 WMA_RX_TASKLET_CTX); 4628 if (QDF_IS_STATUS_ERROR(qdf_status)) { 4629 wma_err("Failed to register PDEV resume event handler"); 4630 qdf_status = QDF_STATUS_E_FAILURE; 4631 goto end; 4632 } 4633 #if defined(QCA_LL_LEGACY_TX_FLOW_CONTROL) || \ 4634 defined(QCA_LL_TX_FLOW_CONTROL_V2) || defined(CONFIG_HL_SUPPORT) 4635 wma_debug("MCC TX Pause Event Handler register"); 4636 qdf_status = wmi_unified_register_event_handler(wmi_handle, 4637 wmi_tx_pause_event_id, 4638 wma_mcc_vdev_tx_pause_evt_handler, 4639 WMA_RX_TASKLET_CTX); 4640 #endif /* QCA_LL_LEGACY_TX_FLOW_CONTROL */ 4641 4642 wma_debug("Registering SAR2 response handler"); 4643 qdf_status = wmi_unified_register_event_handler(wma_handle->wmi_handle, 4644 wmi_wlan_sar2_result_event_id, 4645 wma_sar_rsp_evt_handler, 4646 WMA_RX_SERIALIZER_CTX); 4647 if (QDF_IS_STATUS_ERROR(qdf_status)) { 4648 wma_err("Failed to register sar response event cb"); 4649 qdf_status = QDF_STATUS_E_FAILURE; 4650 goto end; 4651 } 4652 4653 #ifdef FEATURE_WLAN_AUTO_SHUTDOWN 4654 wma_debug("Registering auto shutdown handler"); 4655 qdf_status = wmi_unified_register_event_handler(wmi_handle, 4656 wmi_host_auto_shutdown_event_id, 4657 wma_auto_shutdown_event_handler, 4658 WMA_RX_SERIALIZER_CTX); 4659 if (QDF_IS_STATUS_ERROR(qdf_status)) { 4660 wma_err("Failed to register WMI Auto shutdown event handler"); 4661 qdf_status = QDF_STATUS_E_FAILURE; 4662 goto end; 4663 } 4664 #endif /* FEATURE_WLAN_AUTO_SHUTDOWN */ 4665 qdf_status = wmi_unified_register_event_handler(wmi_handle, 4666 wmi_thermal_mgmt_event_id, 4667 wma_thermal_mgmt_evt_handler, 4668 WMA_RX_SERIALIZER_CTX); 4669 if (QDF_IS_STATUS_ERROR(qdf_status)) { 4670 wma_err("Failed to register thermal mitigation event cb"); 4671 qdf_status = QDF_STATUS_E_FAILURE; 4672 goto end; 4673 } 4674 4675 qdf_status = wma_ocb_register_callbacks(wma_handle); 4676 if (QDF_IS_STATUS_ERROR(qdf_status)) { 4677 wma_err("Failed to register OCB callbacks"); 4678 qdf_status = QDF_STATUS_E_FAILURE; 4679 goto end; 4680 } 4681 4682 qdf_status = QDF_STATUS_SUCCESS; 4683 4684 #ifdef QCA_WIFI_FTM 4685 /* 4686 * Tx mgmt attach requires TXRX context which is not created 4687 * in FTM mode. So skip the TX mgmt attach. 4688 */ 4689 if (cds_get_conparam() == QDF_GLOBAL_FTM_MODE) 4690 goto end; 4691 #endif /* QCA_WIFI_FTM */ 4692 4693 qdf_status = wma_tx_attach(wma_handle); 4694 if (QDF_IS_STATUS_ERROR(qdf_status)) { 4695 wma_err("Failed to register tx management"); 4696 goto end; 4697 } 4698 4699 /* Initialize log completion timeout */ 4700 qdf_status = qdf_mc_timer_init(&wma_handle->log_completion_timer, 4701 QDF_TIMER_TYPE_SW, 4702 wma_log_completion_timeout, 4703 wma_handle); 4704 if (QDF_IS_STATUS_ERROR(qdf_status)) { 4705 wma_err("Failed to initialize log completion timeout"); 4706 goto end; 4707 } 4708 4709 qdf_status = wma_fips_register_event_handlers(wma_handle); 4710 if (QDF_IS_STATUS_ERROR(qdf_status)) { 4711 wma_err("Failed to register FIPS event handler"); 4712 qdf_status = QDF_STATUS_E_FAILURE; 4713 goto end; 4714 } 4715 4716 qdf_status = wma_sar_register_event_handlers(wma_handle); 4717 if (QDF_IS_STATUS_ERROR(qdf_status)) { 4718 wma_err("Failed to register SAR event handlers"); 4719 qdf_status = QDF_STATUS_E_FAILURE; 4720 goto end; 4721 } 4722 4723 /* Initialize the get temperature event handler */ 4724 qdf_status = wmi_unified_register_event_handler(wmi_handle, 4725 wmi_pdev_temperature_event_id, 4726 wma_pdev_temperature_evt_handler, 4727 WMA_RX_SERIALIZER_CTX); 4728 if (QDF_IS_STATUS_ERROR(qdf_status)) { 4729 wma_err("Failed to register get_temperature event cb"); 4730 qdf_status = QDF_STATUS_E_FAILURE; 4731 goto end; 4732 } 4733 4734 qdf_status = wmi_unified_register_event_handler(wmi_handle, 4735 wmi_vdev_tsf_report_event_id, 4736 wma_vdev_tsf_handler, 4737 WMA_RX_SERIALIZER_CTX); 4738 if (QDF_IS_STATUS_ERROR(qdf_status)) { 4739 wma_err("Failed to register tsf callback"); 4740 qdf_status = QDF_STATUS_E_FAILURE; 4741 goto end; 4742 } 4743 4744 /* Initialize the wma_pdev_set_hw_mode_resp_evt_handler event handler */ 4745 qdf_status = wmi_unified_register_event_handler(wmi_handle, 4746 wmi_pdev_set_hw_mode_rsp_event_id, 4747 wma_pdev_set_hw_mode_resp_evt_handler, 4748 WMA_RX_SERIALIZER_CTX); 4749 if (QDF_IS_STATUS_ERROR(qdf_status)) { 4750 wma_err("Failed to register set hw mode resp event cb"); 4751 qdf_status = QDF_STATUS_E_FAILURE; 4752 goto end; 4753 } 4754 4755 /* Initialize the WMI_SOC_HW_MODE_TRANSITION_EVENTID event handler */ 4756 qdf_status = wmi_unified_register_event_handler(wmi_handle, 4757 wmi_pdev_hw_mode_transition_event_id, 4758 wma_pdev_hw_mode_transition_evt_handler, 4759 WMA_RX_SERIALIZER_CTX); 4760 if (QDF_IS_STATUS_ERROR(qdf_status)) { 4761 wma_err("Failed to register hw mode transition event cb"); 4762 qdf_status = QDF_STATUS_E_FAILURE; 4763 goto end; 4764 } 4765 4766 /* Initialize the set dual mac configuration event handler */ 4767 qdf_status = wmi_unified_register_event_handler(wmi_handle, 4768 wmi_pdev_set_mac_config_resp_event_id, 4769 wma_pdev_set_dual_mode_config_resp_evt_handler, 4770 WMA_RX_SERIALIZER_CTX); 4771 if (QDF_IS_STATUS_ERROR(qdf_status)) { 4772 wma_err("Failed to register hw mode transition event cb"); 4773 qdf_status = QDF_STATUS_E_FAILURE; 4774 goto end; 4775 } 4776 4777 qdf_status = wmi_unified_register_event_handler(wmi_handle, 4778 wmi_coex_bt_activity_event_id, 4779 wma_wlan_bt_activity_evt_handler, 4780 WMA_RX_SERIALIZER_CTX); 4781 if (QDF_IS_STATUS_ERROR(qdf_status)) { 4782 wma_err("Failed to register coex bt activity event handler"); 4783 qdf_status = QDF_STATUS_E_FAILURE; 4784 goto end; 4785 } 4786 wma_register_spectral_cmds(wma_handle); 4787 4788 end: 4789 wma_debug("Exit"); 4790 return qdf_status; 4791 } 4792 4793 QDF_STATUS wma_stop(void) 4794 { 4795 tp_wma_handle wma_handle; 4796 QDF_STATUS qdf_status = QDF_STATUS_SUCCESS; 4797 int i; 4798 struct mac_context *mac = NULL; 4799 struct wlan_objmgr_vdev *vdev; 4800 4801 wma_handle = cds_get_context(QDF_MODULE_ID_WMA); 4802 wma_debug("Enter"); 4803 if (!wma_handle) { 4804 qdf_status = QDF_STATUS_E_INVAL; 4805 goto end; 4806 } 4807 mac = cds_get_context(QDF_MODULE_ID_PE); 4808 if (!mac) { 4809 goto end; 4810 } 4811 #ifdef QCA_WIFI_FTM 4812 /* 4813 * Tx mgmt detach requires TXRX context which is not created 4814 * in FTM mode. So skip the TX mgmt detach. 4815 */ 4816 if (cds_get_conparam() == QDF_GLOBAL_FTM_MODE) { 4817 qdf_status = QDF_STATUS_SUCCESS; 4818 goto end; 4819 } 4820 #endif /* QCA_WIFI_FTM */ 4821 4822 if (wma_handle->ack_work_ctx) { 4823 cds_flush_work(&wma_handle->ack_work_ctx->ack_cmp_work); 4824 qdf_mem_free(wma_handle->ack_work_ctx); 4825 wma_handle->ack_work_ctx = NULL; 4826 } 4827 4828 /* Destroy the timer for log completion */ 4829 qdf_status = qdf_mc_timer_destroy(&wma_handle->log_completion_timer); 4830 if (qdf_status != QDF_STATUS_SUCCESS) 4831 wma_err("Failed to destroy the log completion timer"); 4832 /* clean up ll-queue for all vdev */ 4833 for (i = 0; i < wma_handle->max_bssid; i++) { 4834 vdev = wma_handle->interfaces[i].vdev; 4835 if (!vdev) 4836 continue; 4837 4838 if (wma_is_vdev_up(i)) 4839 cdp_fc_vdev_flush(cds_get_context(QDF_MODULE_ID_SOC), 4840 i); 4841 } 4842 4843 qdf_status = wma_tx_detach(wma_handle); 4844 if (qdf_status != QDF_STATUS_SUCCESS) { 4845 wma_err("Failed to deregister tx management"); 4846 goto end; 4847 } 4848 4849 end: 4850 wma_debug("Exit"); 4851 return qdf_status; 4852 } 4853 4854 /** 4855 * wma_wmi_service_close() - close wma wmi service interface. 4856 * 4857 * Return: 0 on success, QDF Error on failure 4858 */ 4859 QDF_STATUS wma_wmi_service_close(void) 4860 { 4861 void *cds_ctx; 4862 tp_wma_handle wma_handle; 4863 uint8_t i; 4864 struct wmi_unified *wmi_handle; 4865 4866 wma_debug("Enter"); 4867 4868 cds_ctx = cds_get_global_context(); 4869 if (!cds_ctx) { 4870 wma_err("Invalid CDS context"); 4871 return QDF_STATUS_E_INVAL; 4872 } 4873 4874 wma_handle = cds_get_context(QDF_MODULE_ID_WMA); 4875 if (!wma_handle) 4876 return QDF_STATUS_E_INVAL; 4877 4878 wmi_handle = wma_handle->wmi_handle; 4879 if (wmi_validate_handle(wmi_handle)) 4880 return QDF_STATUS_E_INVAL; 4881 4882 /* detach the wmi service */ 4883 wma_debug("calling wmi_unified_detach"); 4884 wmi_unified_detach(wmi_handle); 4885 wma_handle->wmi_handle = NULL; 4886 4887 for (i = 0; i < wma_handle->max_bssid; i++) 4888 wma_vdev_deinit(&wma_handle->interfaces[i]); 4889 4890 qdf_mem_free(wma_handle->interfaces); 4891 4892 /* free the wma_handle */ 4893 cds_free_context(QDF_MODULE_ID_WMA, wma_handle); 4894 4895 if (((struct cds_context *)cds_ctx)->cfg_ctx) 4896 qdf_mem_free(((struct cds_context *)cds_ctx)->cfg_ctx); 4897 ((struct cds_context *)cds_ctx)->cfg_ctx = NULL; 4898 wma_debug("Exit"); 4899 return QDF_STATUS_SUCCESS; 4900 } 4901 4902 /** 4903 * wma_wmi_work_close() - close the work queue items associated with WMI 4904 * 4905 * This function closes work queue items associated with WMI, but not fully 4906 * closes WMI service. 4907 * 4908 * Return: QDF_STATUS_SUCCESS if work close is successful. Otherwise 4909 * proper error codes. 4910 */ 4911 QDF_STATUS wma_wmi_work_close(void) 4912 { 4913 tp_wma_handle wma_handle; 4914 struct wmi_unified *wmi_handle; 4915 4916 wma_debug("Enter"); 4917 4918 wma_handle = cds_get_context(QDF_MODULE_ID_WMA); 4919 if (!wma_handle) 4920 return QDF_STATUS_E_INVAL; 4921 4922 wmi_handle = wma_handle->wmi_handle; 4923 if (wmi_validate_handle(wmi_handle)) 4924 return QDF_STATUS_E_INVAL; 4925 4926 /* remove the wmi work */ 4927 wma_debug("calling wmi_unified_remove_work"); 4928 wmi_unified_remove_work(wmi_handle); 4929 4930 wma_debug("Exit"); 4931 return QDF_STATUS_SUCCESS; 4932 } 4933 4934 /** 4935 * wma_close() - wma close function. 4936 * cleanup resources attached with wma. 4937 * 4938 * Return: 0 on success, QDF Error on failure 4939 */ 4940 QDF_STATUS wma_close(void) 4941 { 4942 tp_wma_handle wma_handle; 4943 struct target_psoc_info *tgt_psoc_info; 4944 QDF_STATUS qdf_status = QDF_STATUS_SUCCESS; 4945 struct wmi_unified *wmi_handle; 4946 4947 wma_debug("Enter"); 4948 4949 wma_handle = cds_get_context(QDF_MODULE_ID_WMA); 4950 if (!wma_handle) 4951 return QDF_STATUS_E_INVAL; 4952 4953 wmi_handle = wma_handle->wmi_handle; 4954 if (wmi_validate_handle(wmi_handle)) 4955 return QDF_STATUS_E_INVAL; 4956 4957 if (wlan_pmo_enable_ssr_on_page_fault(wma_handle->psoc)) 4958 qdf_mem_free(wma_handle->pagefault_wakeups_ts); 4959 4960 qdf_atomic_set(&wma_handle->sap_num_clients_connected, 0); 4961 qdf_atomic_set(&wma_handle->go_num_clients_connected, 0); 4962 4963 if (cds_get_conparam() != QDF_GLOBAL_FTM_MODE) { 4964 qdf_wake_lock_destroy(&wma_handle->go_d3_wow_wake_lock); 4965 qdf_wake_lock_destroy(&wma_handle->sap_d3_wow_wake_lock); 4966 #ifdef FEATURE_WLAN_EXTSCAN 4967 qdf_wake_lock_destroy(&wma_handle->extscan_wake_lock); 4968 #endif /* FEATURE_WLAN_EXTSCAN */ 4969 qdf_wake_lock_destroy(&wma_handle->wow_wake_lock); 4970 qdf_wake_lock_destroy(&wma_handle->wow_auth_req_wl); 4971 qdf_wake_lock_destroy(&wma_handle->wow_assoc_req_wl); 4972 qdf_wake_lock_destroy(&wma_handle->wow_deauth_rec_wl); 4973 qdf_wake_lock_destroy(&wma_handle->wow_disassoc_rec_wl); 4974 qdf_wake_lock_destroy(&wma_handle->wow_ap_assoc_lost_wl); 4975 qdf_wake_lock_destroy(&wma_handle->wow_auto_shutdown_wl); 4976 qdf_wake_lock_destroy(&wma_handle->roam_ho_wl); 4977 qdf_wake_lock_destroy(&wma_handle->roam_preauth_wl); 4978 qdf_wake_lock_destroy(&wma_handle->probe_req_wps_wl); 4979 } 4980 4981 /* unregister Firmware debug log */ 4982 qdf_status = dbglog_deinit(wmi_handle); 4983 if (qdf_status != QDF_STATUS_SUCCESS) 4984 wma_err("dbglog_deinit failed"); 4985 4986 qdf_status = qdf_mc_timer_destroy(&wma_handle->service_ready_ext_timer); 4987 if (!QDF_IS_STATUS_SUCCESS(qdf_status)) 4988 wma_err("Failed to destroy service ready ext event timer"); 4989 4990 qdf_event_destroy(&wma_handle->target_suspend); 4991 qdf_event_destroy(&wma_handle->runtime_suspend); 4992 qdf_event_destroy(&wma_handle->recovery_event); 4993 qdf_event_destroy(&wma_handle->tx_frm_download_comp_event); 4994 qdf_event_destroy(&wma_handle->tx_queue_empty_event); 4995 wma_cleanup_hold_req(wma_handle); 4996 qdf_wake_lock_destroy(&wma_handle->wmi_cmd_rsp_wake_lock); 4997 qdf_runtime_lock_deinit(&wma_handle->ndp_prevent_runtime_pm_lock); 4998 qdf_runtime_lock_deinit(&wma_handle->sap_prevent_runtime_pm_lock); 4999 qdf_runtime_lock_deinit(&wma_handle->wmi_cmd_rsp_runtime_lock); 5000 qdf_spinlock_destroy(&wma_handle->wma_hold_req_q_lock); 5001 5002 if (wma_handle->pGetRssiReq) { 5003 qdf_mem_free(wma_handle->pGetRssiReq); 5004 wma_handle->pGetRssiReq = NULL; 5005 } 5006 5007 wma_unified_radio_tx_mem_free(wma_handle); 5008 5009 qdf_status = qdf_mutex_destroy(&wma_handle->radio_stats_lock); 5010 if (QDF_IS_STATUS_ERROR(qdf_status)) 5011 wma_err("Failed to destroy radio stats mutex"); 5012 5013 if (wma_handle->pdev) { 5014 wlan_objmgr_pdev_release_ref(wma_handle->pdev, 5015 WLAN_LEGACY_WMA_ID); 5016 wma_handle->pdev = NULL; 5017 } 5018 5019 pmo_unregister_get_beacon_interval_callback(wma_handle->psoc); 5020 pmo_unregister_get_dtim_period_callback(wma_handle->psoc); 5021 pmo_unregister_is_device_in_low_pwr_mode(wma_handle->psoc); 5022 pmo_unregister_get_pause_bitmap(wma_handle->psoc); 5023 pmo_unregister_pause_bitmap_notifier(wma_handle->psoc); 5024 5025 tgt_psoc_info = wlan_psoc_get_tgt_if_handle(wma_handle->psoc); 5026 init_deinit_free_num_units(wma_handle->psoc, tgt_psoc_info); 5027 target_if_free_psoc_tgt_info(wma_handle->psoc); 5028 5029 wlan_objmgr_psoc_release_ref(wma_handle->psoc, WLAN_LEGACY_WMA_ID); 5030 wma_handle->psoc = NULL; 5031 5032 wma_debug("Exit"); 5033 return QDF_STATUS_SUCCESS; 5034 } 5035 5036 /** 5037 * wma_update_fw_config() - update fw configuration 5038 * @psoc: psoc to query configuration from 5039 * @tgt_hdl: target capability info 5040 * 5041 * Return: none 5042 */ 5043 static void wma_update_fw_config(struct wlan_objmgr_psoc *psoc, 5044 struct target_psoc_info *tgt_hdl) 5045 { 5046 target_resource_config *cfg = &tgt_hdl->info.wlan_res_cfg; 5047 5048 /* Override the no. of max fragments as per platform configuration */ 5049 cfg->max_frag_entries = QDF_MIN(QCA_OL_11AC_TX_MAX_FRAGS, 5050 target_if_get_max_frag_entry(tgt_hdl)); 5051 target_if_set_max_frag_entry(tgt_hdl, cfg->max_frag_entries); 5052 5053 if (cds_get_conparam() == QDF_GLOBAL_FTM_MODE) 5054 cfg->num_wow_filters = 0; 5055 else 5056 cfg->num_wow_filters = ucfg_pmo_get_num_wow_filters(psoc); 5057 5058 cfg->apf_instruction_size = ucfg_pmo_get_apf_instruction_size(psoc); 5059 cfg->num_packet_filters = ucfg_pmo_get_num_packet_filters(psoc); 5060 } 5061 5062 /** 5063 * wma_set_tx_partition_base() - set TX MSDU ID partition base for IPA 5064 * @value: TX MSDU ID partition base 5065 * 5066 * Return: none 5067 */ 5068 #ifdef IPA_OFFLOAD 5069 static void wma_set_tx_partition_base(uint32_t value) 5070 { 5071 cdp_ipa_set_uc_tx_partition_base( 5072 cds_get_context(QDF_MODULE_ID_SOC), 5073 (struct cdp_cfg *)cds_get_context(QDF_MODULE_ID_CFG), 5074 value); 5075 wma_debug("TX_MSDU_ID_PARTITION=%d", value); 5076 } 5077 #else 5078 static void wma_set_tx_partition_base(uint32_t value) 5079 { 5080 } 5081 #endif 5082 5083 #ifdef WLAN_FEATURE_IGMP_OFFLOAD 5084 /** 5085 * wma_get_igmp_offload_enable() - update tgt service with igmp offload support 5086 * @wmi_handle: Unified wmi handle 5087 * @cfg: target services 5088 * 5089 * Return: none 5090 */ 5091 static inline void 5092 wma_get_igmp_offload_enable(struct wmi_unified *wmi_handle, 5093 struct wma_tgt_services *cfg) 5094 { 5095 cfg->igmp_offload_enable = wmi_service_enabled( 5096 wmi_handle, 5097 wmi_service_igmp_offload_support); 5098 } 5099 #else 5100 static inline void 5101 wma_get_igmp_offload_enable(struct wmi_unified *wmi_handle, 5102 struct wma_tgt_services *cfg) 5103 {} 5104 #endif 5105 5106 #ifdef FEATURE_WLAN_TDLS 5107 /** 5108 * wma_get_tdls_wideband_support() - update tgt service with service tdls 5109 * wideband support 5110 * @wmi_handle: Unified wmi handle 5111 * @cfg: target services 5112 * 5113 * Return: none 5114 */ 5115 static inline void 5116 wma_get_tdls_wideband_support(struct wmi_unified *wmi_handle, 5117 struct wma_tgt_services *cfg) 5118 { 5119 cfg->en_tdls_wideband_support = wmi_service_enabled( 5120 wmi_handle, 5121 wmi_service_tdls_wideband_support); 5122 } 5123 5124 #ifdef WLAN_FEATURE_11BE 5125 /** 5126 * wma_get_tdls_mlo_support() - update tgt service with service tdls 5127 * be support 5128 * @wmi_handle: Unified wmi handle 5129 * @cfg: target services 5130 * 5131 * Return: none 5132 */ 5133 static inline void 5134 wma_get_tdls_mlo_support(struct wmi_unified *wmi_handle, 5135 struct wma_tgt_services *cfg) 5136 { 5137 cfg->en_tdls_mlo_support = 5138 wmi_service_enabled(wmi_handle, 5139 wmi_service_tdls_mlo_support); 5140 } 5141 5142 static inline void 5143 wma_get_n_link_mlo_support(struct wmi_unified *wmi_handle, 5144 struct wma_tgt_services *cfg) 5145 { 5146 cfg->en_n_link_mlo_support = 5147 wmi_service_enabled(wmi_handle, 5148 wmi_service_n_link_mlo_support); 5149 } 5150 5151 #else 5152 static inline void 5153 wma_get_tdls_mlo_support(struct wmi_unified *wmi_handle, 5154 struct wma_tgt_services *cfg) 5155 { 5156 } 5157 5158 static inline void 5159 wma_get_n_link_mlo_support(struct wmi_unified *wmi_handle, 5160 struct wma_tgt_services *cfg) 5161 { 5162 } 5163 #endif /* WLAN_FEATURE_11BE */ 5164 5165 #ifdef WLAN_FEATURE_11AX 5166 /** 5167 * wma_get_tdls_ax_support() - update tgt service with service tdls ax support 5168 * @wmi_handle: Unified wmi handle 5169 * @cfg: target services 5170 * 5171 * Return: none 5172 */ 5173 static inline void 5174 wma_get_tdls_ax_support(struct wmi_unified *wmi_handle, 5175 struct wma_tgt_services *cfg) 5176 { 5177 cfg->en_tdls_11ax_support = wmi_service_enabled( 5178 wmi_handle, 5179 wmi_service_tdls_ax_support); 5180 } 5181 5182 static inline void 5183 wma_get_tdls_6g_support(struct wmi_unified *wmi_handle, 5184 struct wma_tgt_services *cfg) 5185 { 5186 cfg->en_tdls_6g_support = wmi_service_enabled( 5187 wmi_handle, 5188 wmi_service_tdls_6g_support); 5189 } 5190 5191 #else 5192 static inline void 5193 wma_get_tdls_ax_support(struct wmi_unified *wmi_handle, 5194 struct wma_tgt_services *cfg) 5195 {} 5196 5197 static inline void 5198 wma_get_tdls_6g_support(struct wmi_unified *wmi_handle, 5199 struct wma_tgt_services *cfg) 5200 {} 5201 5202 #endif 5203 #else 5204 static inline void 5205 wma_get_tdls_mlo_support(struct wmi_unified *wmi_handle, 5206 struct wma_tgt_services *cfg) 5207 { 5208 } 5209 5210 static inline void 5211 wma_get_n_link_mlo_support(struct wmi_unified *wmi_handle, 5212 struct wma_tgt_services *cfg) 5213 {} 5214 5215 static inline void 5216 wma_get_tdls_ax_support(struct wmi_unified *wmi_handle, 5217 struct wma_tgt_services *cfg) 5218 {} 5219 5220 static inline void 5221 wma_get_tdls_6g_support(struct wmi_unified *wmi_handle, 5222 struct wma_tgt_services *cfg) 5223 {} 5224 5225 static inline void 5226 wma_get_tdls_wideband_support(struct wmi_unified *wmi_handle, 5227 struct wma_tgt_services *cfg) 5228 {} 5229 #endif 5230 5231 #ifdef WLAN_FEATURE_DYNAMIC_MAC_ADDR_UPDATE 5232 static inline void wma_get_dynamic_vdev_macaddr_support( 5233 struct wmi_unified *wmi_handle, struct wma_tgt_services *cfg) 5234 { 5235 cfg->dynamic_vdev_macaddr_support = 5236 wmi_service_enabled( 5237 wmi_handle, 5238 wmi_service_dynamic_update_vdev_macaddr_support); 5239 } 5240 #else 5241 static inline void wma_get_dynamic_vdev_macaddr_support( 5242 struct wmi_unified *wmi_handle, struct wma_tgt_services *cfg) 5243 { 5244 } 5245 #endif 5246 5247 #ifdef WLAN_FEATURE_11BE 5248 /** 5249 * wma_get_mlo_tid_to_link_mapping_support() - update tgt service with 5250 * service tid to link mapping support 5251 * @wmi_handle: Unified wmi handle 5252 * @cfg: target services 5253 * 5254 * Return: none 5255 */ 5256 static inline void 5257 wma_get_mlo_tid_to_link_mapping_support(struct wmi_unified *wmi_handle, 5258 struct wma_tgt_services *cfg) 5259 { 5260 cfg->en_mlo_tid_to_link_support = 5261 wmi_service_enabled(wmi_handle, 5262 wmi_service_mlo_tid_to_link_mapping_support); 5263 } 5264 5265 #else 5266 static inline void 5267 wma_get_mlo_tid_to_link_mapping_support(struct wmi_unified *wmi_handle, 5268 struct wma_tgt_services *cfg) 5269 { 5270 } 5271 #endif 5272 5273 #ifdef WLAN_FEATURE_NAN 5274 /** 5275 * wma_nan_set_pairing_feature() - set feature bit for Secure NAN if max 5276 * pairing session has non-zero value. 5277 * 5278 * Return: none 5279 */ 5280 static void wma_nan_set_pairing_feature(void) 5281 { 5282 tp_wma_handle wma_handle = cds_get_context(QDF_MODULE_ID_WMA); 5283 struct target_psoc_info *tgt_hdl; 5284 struct wlan_objmgr_psoc *psoc; 5285 5286 if (!wma_handle) { 5287 wma_err("wma handle is null"); 5288 return; 5289 } 5290 5291 psoc = wma_handle->psoc; 5292 tgt_hdl = wlan_psoc_get_tgt_if_handle(psoc); 5293 if (!tgt_hdl) { 5294 wma_err("tgt_hdl is null"); 5295 return; 5296 } 5297 5298 if (tgt_hdl->info.service_ext2_param.max_nan_pairing_sessions) { 5299 wma_set_fw_wlan_feat_caps(SECURE_NAN); 5300 wma_debug("Secure NAN is enabled"); 5301 } 5302 } 5303 #endif /* WLAN_FEATURE_NAN */ 5304 5305 /** 5306 * wma_update_target_services() - update target services from wma handle 5307 * @wmi_handle: Unified wmi handle 5308 * @cfg: target services 5309 * 5310 * Return: none 5311 */ 5312 static inline void wma_update_target_services(struct wmi_unified *wmi_handle, 5313 struct wma_tgt_services *cfg) 5314 { 5315 /* STA power save */ 5316 cfg->sta_power_save = wmi_service_enabled(wmi_handle, 5317 wmi_service_sta_pwrsave); 5318 5319 /* Enable UAPSD */ 5320 cfg->uapsd = wmi_service_enabled(wmi_handle, 5321 wmi_service_ap_uapsd); 5322 5323 /* Update AP DFS service */ 5324 cfg->ap_dfs = wmi_service_enabled(wmi_handle, 5325 wmi_service_ap_dfs); 5326 5327 /* Enable 11AC */ 5328 cfg->en_11ac = wmi_service_enabled(wmi_handle, 5329 wmi_service_11ac); 5330 if (cfg->en_11ac) 5331 g_fw_wlan_feat_caps |= (1 << DOT11AC); 5332 5333 /* Proactive ARP response */ 5334 g_fw_wlan_feat_caps |= (1 << WLAN_PERIODIC_TX_PTRN); 5335 5336 /* Enable WOW */ 5337 g_fw_wlan_feat_caps |= (1 << WOW); 5338 5339 /* ARP offload */ 5340 cfg->arp_offload = wmi_service_enabled(wmi_handle, 5341 wmi_service_arpns_offload); 5342 5343 /* Adaptive early-rx */ 5344 cfg->early_rx = wmi_service_enabled(wmi_handle, 5345 wmi_service_early_rx); 5346 5347 cfg->is_fw_therm_throt_supp = wmi_service_enabled(wmi_handle, 5348 wmi_service_tt); 5349 5350 #ifdef FEATURE_WLAN_SCAN_PNO 5351 /* PNO offload */ 5352 if (wmi_service_enabled(wmi_handle, wmi_service_nlo)) { 5353 cfg->pno_offload = true; 5354 g_fw_wlan_feat_caps |= (1 << PNO); 5355 } 5356 #endif /* FEATURE_WLAN_SCAN_PNO */ 5357 5358 #ifdef FEATURE_WLAN_EXTSCAN 5359 if (wmi_service_enabled(wmi_handle, wmi_service_extscan)) 5360 g_fw_wlan_feat_caps |= (1 << EXTENDED_SCAN); 5361 #endif /* FEATURE_WLAN_EXTSCAN */ 5362 cfg->lte_coex_ant_share = wmi_service_enabled(wmi_handle, 5363 wmi_service_lte_ant_share_support); 5364 #ifdef FEATURE_WLAN_TDLS 5365 /* Enable TDLS */ 5366 if (wmi_service_enabled(wmi_handle, wmi_service_tdls)) { 5367 cfg->en_tdls = 1; 5368 g_fw_wlan_feat_caps |= (1 << TDLS); 5369 } 5370 /* Enable advanced TDLS features */ 5371 if (wmi_service_enabled(wmi_handle, wmi_service_tdls_offchan)) { 5372 cfg->en_tdls_offchan = 1; 5373 g_fw_wlan_feat_caps |= (1 << TDLS_OFF_CHANNEL); 5374 } 5375 5376 cfg->en_tdls_uapsd_buf_sta = 5377 wmi_service_enabled(wmi_handle, 5378 wmi_service_tdls_uapsd_buffer_sta); 5379 cfg->en_tdls_uapsd_sleep_sta = 5380 wmi_service_enabled(wmi_handle, 5381 wmi_service_tdls_uapsd_sleep_sta); 5382 #endif /* FEATURE_WLAN_TDLS */ 5383 if (wmi_service_enabled 5384 (wmi_handle, wmi_service_beacon_offload)) 5385 cfg->beacon_offload = true; 5386 if (wmi_service_enabled 5387 (wmi_handle, wmi_service_sta_pmf_offload)) 5388 cfg->pmf_offload = true; 5389 #ifdef WLAN_FEATURE_ROAM_OFFLOAD 5390 /* Enable Roam Offload */ 5391 cfg->en_roam_offload = wmi_service_enabled(wmi_handle, 5392 wmi_service_roam_ho_offload); 5393 #endif /* WLAN_FEATURE_ROAM_OFFLOAD */ 5394 #ifdef WLAN_FEATURE_NAN 5395 if (wmi_service_enabled(wmi_handle, wmi_service_nan)) 5396 g_fw_wlan_feat_caps |= (1 << NAN); 5397 wma_nan_set_pairing_feature(); 5398 #endif /* WLAN_FEATURE_NAN */ 5399 5400 if (wmi_service_enabled(wmi_handle, wmi_service_rtt)) 5401 g_fw_wlan_feat_caps |= (1 << RTT); 5402 5403 if (wmi_service_enabled(wmi_handle, 5404 wmi_service_tx_msdu_id_new_partition_support)) { 5405 wma_set_tx_partition_base(HTT_TX_IPA_NEW_MSDU_ID_SPACE_BEGIN); 5406 } else { 5407 wma_set_tx_partition_base(HTT_TX_IPA_MSDU_ID_SPACE_BEGIN); 5408 } 5409 5410 wma_he_update_tgt_services(wmi_handle, cfg); 5411 wma_eht_update_tgt_services(wmi_handle, cfg); 5412 5413 cfg->get_peer_info_enabled = 5414 wmi_service_enabled(wmi_handle, 5415 wmi_service_peer_stats_info); 5416 if (wmi_service_enabled(wmi_handle, wmi_service_fils_support)) 5417 cfg->is_fils_roaming_supported = true; 5418 5419 if (wmi_service_enabled(wmi_handle, wmi_service_mawc_support)) 5420 cfg->is_fw_mawc_capable = true; 5421 5422 if (wmi_service_enabled(wmi_handle, 5423 wmi_service_11k_neighbour_report_support)) 5424 cfg->is_11k_offload_supported = true; 5425 5426 if (wmi_service_enabled(wmi_handle, wmi_service_twt_requestor)) 5427 cfg->twt_requestor = true; 5428 if (wmi_service_enabled(wmi_handle, wmi_service_twt_responder)) 5429 cfg->twt_responder = true; 5430 if (wmi_service_enabled(wmi_handle, wmi_service_obss_scan)) 5431 cfg->obss_scan_offload = true; 5432 if (wmi_service_enabled(wmi_handle, wmi_service_beacon_reception_stats)) 5433 cfg->bcn_reception_stats = true; 5434 5435 if (wmi_service_enabled(wmi_handle, wmi_service_vdev_latency_config)) 5436 g_fw_wlan_feat_caps |= (1 << VDEV_LATENCY_CONFIG); 5437 if (wmi_service_enabled(wmi_handle, 5438 wmi_roam_scan_chan_list_to_host_support)) 5439 cfg->is_roam_scan_ch_to_host = true; 5440 5441 cfg->ll_stats_per_chan_rx_tx_time = 5442 wmi_service_enabled(wmi_handle, 5443 wmi_service_ll_stats_per_chan_rx_tx_time); 5444 5445 wma_get_service_cap_club_get_sta_in_ll_stats_req(wmi_handle, cfg); 5446 5447 wma_get_igmp_offload_enable(wmi_handle, cfg); 5448 wma_get_tdls_ax_support(wmi_handle, cfg); 5449 wma_get_tdls_mlo_support(wmi_handle, cfg); 5450 wma_get_tdls_6g_support(wmi_handle, cfg); 5451 wma_get_tdls_wideband_support(wmi_handle, cfg); 5452 wma_get_dynamic_vdev_macaddr_support(wmi_handle, cfg); 5453 wma_get_service_cap_per_link_mlo_stats(wmi_handle, cfg); 5454 wma_get_n_link_mlo_support(wmi_handle, cfg); 5455 wma_get_mlo_tid_to_link_mapping_support(wmi_handle, cfg); 5456 } 5457 5458 /** 5459 * wma_update_target_ht_cap() - update ht capabality from wma handle 5460 * @tgt_hdl: pointer to structure target_psoc_info 5461 * @cfg: ht capability 5462 * 5463 * Return: none 5464 */ 5465 static inline void 5466 wma_update_target_ht_cap(struct target_psoc_info *tgt_hdl, 5467 struct wma_tgt_ht_cap *cfg) 5468 { 5469 int ht_cap_info; 5470 5471 ht_cap_info = target_if_get_ht_cap_info(tgt_hdl); 5472 /* RX STBC */ 5473 cfg->ht_rx_stbc = !!(ht_cap_info & WMI_HT_CAP_RX_STBC); 5474 5475 /* TX STBC */ 5476 cfg->ht_tx_stbc = !!(ht_cap_info & WMI_HT_CAP_TX_STBC); 5477 5478 /* MPDU density */ 5479 cfg->mpdu_density = ht_cap_info & WMI_HT_CAP_MPDU_DENSITY; 5480 5481 /* HT RX LDPC */ 5482 cfg->ht_rx_ldpc = !!(ht_cap_info & WMI_HT_CAP_LDPC); 5483 5484 /* HT SGI */ 5485 cfg->ht_sgi_20 = !!(ht_cap_info & WMI_HT_CAP_HT20_SGI); 5486 5487 cfg->ht_sgi_40 = !!(ht_cap_info & WMI_HT_CAP_HT40_SGI); 5488 5489 /* RF chains */ 5490 cfg->num_rf_chains = target_if_get_num_rf_chains(tgt_hdl); 5491 5492 wma_nofl_debug("ht_cap_info - %x ht_rx_stbc - %d, ht_tx_stbc - %d\n" 5493 "mpdu_density - %d ht_rx_ldpc - %d ht_sgi_20 - %d\n" 5494 "ht_sgi_40 - %d num_rf_chains - %d", 5495 ht_cap_info, 5496 cfg->ht_rx_stbc, cfg->ht_tx_stbc, cfg->mpdu_density, 5497 cfg->ht_rx_ldpc, cfg->ht_sgi_20, cfg->ht_sgi_40, 5498 cfg->num_rf_chains); 5499 5500 } 5501 5502 /** 5503 * wma_update_target_vht_cap() - update vht capabality from wma handle 5504 * @tgt_hdl: pointer to structure target_psoc_info 5505 * @cfg: vht capabality 5506 * 5507 * Return: none 5508 */ 5509 static inline void 5510 wma_update_target_vht_cap(struct target_psoc_info *tgt_hdl, 5511 struct wma_tgt_vht_cap *cfg) 5512 { 5513 int vht_cap_info = target_if_get_vht_cap_info(tgt_hdl); 5514 5515 if (vht_cap_info & WMI_VHT_CAP_MAX_MPDU_LEN_11454) 5516 cfg->vht_max_mpdu = WMI_VHT_CAP_MAX_MPDU_LEN_11454; 5517 else if (vht_cap_info & WMI_VHT_CAP_MAX_MPDU_LEN_7935) 5518 cfg->vht_max_mpdu = WMI_VHT_CAP_MAX_MPDU_LEN_7935; 5519 else 5520 cfg->vht_max_mpdu = 0; 5521 5522 5523 if (vht_cap_info & WMI_VHT_CAP_CH_WIDTH_80P80_160MHZ) { 5524 cfg->supp_chan_width = 1 << eHT_CHANNEL_WIDTH_80P80MHZ; 5525 cfg->supp_chan_width |= 1 << eHT_CHANNEL_WIDTH_160MHZ; 5526 } else if (vht_cap_info & WMI_VHT_CAP_CH_WIDTH_160MHZ) { 5527 cfg->supp_chan_width = 1 << eHT_CHANNEL_WIDTH_160MHZ; 5528 } else { 5529 cfg->supp_chan_width = 1 << eHT_CHANNEL_WIDTH_80MHZ; 5530 } 5531 5532 cfg->vht_rx_ldpc = vht_cap_info & WMI_VHT_CAP_RX_LDPC; 5533 5534 cfg->vht_short_gi_80 = vht_cap_info & WMI_VHT_CAP_SGI_80MHZ; 5535 cfg->vht_short_gi_160 = vht_cap_info & WMI_VHT_CAP_SGI_160MHZ; 5536 5537 cfg->vht_tx_stbc = vht_cap_info & WMI_VHT_CAP_TX_STBC; 5538 5539 cfg->vht_rx_stbc = 5540 (vht_cap_info & WMI_VHT_CAP_RX_STBC_1SS) | 5541 (vht_cap_info & WMI_VHT_CAP_RX_STBC_2SS) | 5542 (vht_cap_info & WMI_VHT_CAP_RX_STBC_3SS); 5543 5544 cfg->vht_max_ampdu_len_exp = (vht_cap_info & 5545 WMI_VHT_CAP_MAX_AMPDU_LEN_EXP) 5546 >> WMI_VHT_CAP_MAX_AMPDU_LEN_EXP_SHIFT; 5547 5548 cfg->vht_su_bformer = vht_cap_info & WMI_VHT_CAP_SU_BFORMER; 5549 5550 cfg->vht_su_bformee = vht_cap_info & WMI_VHT_CAP_SU_BFORMEE; 5551 5552 cfg->vht_mu_bformer = vht_cap_info & WMI_VHT_CAP_MU_BFORMER; 5553 5554 cfg->vht_mu_bformee = vht_cap_info & WMI_VHT_CAP_MU_BFORMEE; 5555 5556 cfg->vht_txop_ps = vht_cap_info & WMI_VHT_CAP_TXOP_PS; 5557 5558 wma_nofl_debug("max_mpdu %d supp_chan_width %x rx_ldpc %x\n" 5559 "short_gi_80 %x tx_stbc %x rx_stbc %x txop_ps %x\n" 5560 "su_bformee %x mu_bformee %x max_ampdu_len_exp %d", 5561 cfg->vht_max_mpdu, cfg->supp_chan_width, cfg->vht_rx_ldpc, 5562 cfg->vht_short_gi_80, cfg->vht_tx_stbc, cfg->vht_rx_stbc, 5563 cfg->vht_txop_ps, cfg->vht_su_bformee, cfg->vht_mu_bformee, 5564 cfg->vht_max_ampdu_len_exp); 5565 } 5566 5567 /** 5568 * wma_update_supported_bands() - update supported bands from service ready ext 5569 * @supported_bands: Supported band given by FW through service ready ext params 5570 * @new_supported_bands: New supported band which needs to be updated by 5571 * this API which WMA layer understands 5572 * 5573 * This API will convert FW given supported band to enum which WMA layer 5574 * understands 5575 * 5576 * Return: QDF_STATUS 5577 */ 5578 static QDF_STATUS wma_update_supported_bands( 5579 WLAN_BAND_CAPABILITY supported_bands, 5580 WMI_PHY_CAPABILITY *new_supported_bands) 5581 { 5582 QDF_STATUS status = QDF_STATUS_SUCCESS; 5583 5584 if (!new_supported_bands) { 5585 wma_err("NULL new supported band variable"); 5586 return QDF_STATUS_E_FAILURE; 5587 } 5588 switch (supported_bands) { 5589 case WLAN_2G_CAPABILITY: 5590 *new_supported_bands |= WMI_11G_CAPABILITY; 5591 break; 5592 case WLAN_5G_CAPABILITY: 5593 *new_supported_bands |= WMI_11A_CAPABILITY; 5594 break; 5595 default: 5596 wma_err("wrong supported band"); 5597 status = QDF_STATUS_E_FAILURE; 5598 break; 5599 } 5600 return status; 5601 } 5602 5603 /** 5604 * wma_derive_ext_ht_cap() - Derive HT caps based on given value 5605 * @ht_cap: given pointer to HT caps which needs to be updated 5606 * @tx_chain: given tx chainmask value 5607 * @rx_chain: given rx chainmask value 5608 * @value: new HT cap info provided in form of bitmask 5609 * 5610 * This function takes the value provided in form of bitmask and decodes 5611 * it. After decoding, what ever value it gets, it takes the union(max) or 5612 * intersection(min) with previously derived values. 5613 * 5614 * Return: none 5615 * 5616 */ 5617 static void wma_derive_ext_ht_cap( 5618 struct wma_tgt_ht_cap *ht_cap, uint32_t value, 5619 uint32_t tx_chain, uint32_t rx_chain) 5620 { 5621 struct wma_tgt_ht_cap tmp = {0}; 5622 5623 if (!ht_cap) 5624 return; 5625 5626 if (!qdf_mem_cmp(ht_cap, &tmp, sizeof(struct wma_tgt_ht_cap))) { 5627 ht_cap->ht_rx_stbc = (!!(value & WMI_HT_CAP_RX_STBC)); 5628 ht_cap->ht_tx_stbc = (!!(value & WMI_HT_CAP_TX_STBC)); 5629 ht_cap->mpdu_density = (!!(value & WMI_HT_CAP_MPDU_DENSITY)); 5630 ht_cap->ht_rx_ldpc = (!!(value & WMI_HT_CAP_RX_LDPC)); 5631 ht_cap->ht_sgi_20 = (!!(value & WMI_HT_CAP_HT20_SGI)); 5632 ht_cap->ht_sgi_40 = (!!(value & WMI_HT_CAP_HT40_SGI)); 5633 ht_cap->num_rf_chains = 5634 QDF_MAX(wma_get_num_of_setbits_from_bitmask(tx_chain), 5635 wma_get_num_of_setbits_from_bitmask(rx_chain)); 5636 } else { 5637 ht_cap->ht_rx_stbc = QDF_MIN(ht_cap->ht_rx_stbc, 5638 (!!(value & WMI_HT_CAP_RX_STBC))); 5639 ht_cap->ht_tx_stbc = QDF_MAX(ht_cap->ht_tx_stbc, 5640 (!!(value & WMI_HT_CAP_TX_STBC))); 5641 ht_cap->mpdu_density = QDF_MIN(ht_cap->mpdu_density, 5642 (!!(value & WMI_HT_CAP_MPDU_DENSITY))); 5643 ht_cap->ht_rx_ldpc = QDF_MIN(ht_cap->ht_rx_ldpc, 5644 (!!(value & WMI_HT_CAP_RX_LDPC))); 5645 ht_cap->ht_sgi_20 = QDF_MIN(ht_cap->ht_sgi_20, 5646 (!!(value & WMI_HT_CAP_HT20_SGI))); 5647 ht_cap->ht_sgi_40 = QDF_MIN(ht_cap->ht_sgi_40, 5648 (!!(value & WMI_HT_CAP_HT40_SGI))); 5649 ht_cap->num_rf_chains = 5650 QDF_MAX(ht_cap->num_rf_chains, 5651 QDF_MAX(wma_get_num_of_setbits_from_bitmask( 5652 tx_chain), 5653 wma_get_num_of_setbits_from_bitmask( 5654 rx_chain))); 5655 } 5656 } 5657 5658 /** 5659 * wma_update_target_ext_ht_cap() - Update HT caps with given extended cap 5660 * @tgt_hdl - target psoc information 5661 * @ht_cap: HT cap structure to be filled 5662 * 5663 * This function loop through each hardware mode and for each hardware mode 5664 * again it loop through each MAC/PHY and pull the caps 2G and 5G specific 5665 * HT caps and derives the final cap. 5666 * 5667 * Return: none 5668 * 5669 */ 5670 static void wma_update_target_ext_ht_cap(struct target_psoc_info *tgt_hdl, 5671 struct wma_tgt_ht_cap *ht_cap) 5672 { 5673 int i, total_mac_phy_cnt; 5674 uint32_t ht_2g, ht_5g; 5675 struct wma_tgt_ht_cap tmp_ht_cap = {0}, tmp_cap = {0}; 5676 struct wlan_psoc_host_mac_phy_caps *mac_phy_cap; 5677 int num_hw_modes; 5678 5679 total_mac_phy_cnt = target_psoc_get_total_mac_phy_cnt(tgt_hdl); 5680 num_hw_modes = target_psoc_get_num_hw_modes(tgt_hdl); 5681 mac_phy_cap = target_psoc_get_mac_phy_cap(tgt_hdl); 5682 5683 if (!mac_phy_cap) { 5684 wma_err("Invalid MAC PHY capabilities handle"); 5685 return; 5686 } 5687 5688 /* 5689 * for legacy device extended cap might not even come, so in that case 5690 * don't overwrite legacy values 5691 */ 5692 if (!num_hw_modes) { 5693 wma_debug("No extended HT cap for current SOC"); 5694 return; 5695 } 5696 5697 for (i = 0; i < total_mac_phy_cnt; i++) { 5698 ht_2g = mac_phy_cap[i].ht_cap_info_2G; 5699 ht_5g = mac_phy_cap[i].ht_cap_info_5G; 5700 if (ht_2g) 5701 wma_derive_ext_ht_cap(&tmp_ht_cap, 5702 ht_2g, 5703 mac_phy_cap[i].tx_chain_mask_2G, 5704 mac_phy_cap[i].rx_chain_mask_2G); 5705 if (ht_5g) 5706 wma_derive_ext_ht_cap(&tmp_ht_cap, 5707 ht_5g, 5708 mac_phy_cap[i].tx_chain_mask_5G, 5709 mac_phy_cap[i].rx_chain_mask_5G); 5710 } 5711 5712 if (qdf_mem_cmp(&tmp_cap, &tmp_ht_cap, 5713 sizeof(struct wma_tgt_ht_cap))) { 5714 qdf_mem_copy(ht_cap, &tmp_ht_cap, 5715 sizeof(struct wma_tgt_ht_cap)); 5716 } 5717 5718 wma_nofl_debug("[ext ht cap] ht_rx_stbc - %d, ht_tx_stbc - %d\n" 5719 "mpdu_density - %d ht_rx_ldpc - %d ht_sgi_20 - %d\n" 5720 "ht_sgi_40 - %d num_rf_chains - %d", 5721 ht_cap->ht_rx_stbc, ht_cap->ht_tx_stbc, 5722 ht_cap->mpdu_density, ht_cap->ht_rx_ldpc, 5723 ht_cap->ht_sgi_20, ht_cap->ht_sgi_40, 5724 ht_cap->num_rf_chains); 5725 } 5726 5727 /** 5728 * wma_derive_ext_vht_cap() - Derive VHT caps based on given value 5729 * @vht_cap: pointer to given VHT caps to be filled 5730 * @value: new VHT cap info provided in form of bitmask 5731 * 5732 * This function takes the value provided in form of bitmask and decodes 5733 * it. After decoding, what ever value it gets, it takes the union(max) or 5734 * intersection(min) with previously derived values. 5735 * 5736 * Return: none 5737 * 5738 */ 5739 static void wma_derive_ext_vht_cap( 5740 struct wma_tgt_vht_cap *vht_cap, uint32_t value) 5741 { 5742 struct wma_tgt_vht_cap tmp_cap = {0}; 5743 uint32_t tmp = 0; 5744 5745 if (!vht_cap) 5746 return; 5747 5748 if (!qdf_mem_cmp(vht_cap, &tmp_cap, 5749 sizeof(struct wma_tgt_vht_cap))) { 5750 if (value & WMI_VHT_CAP_MAX_MPDU_LEN_11454) 5751 vht_cap->vht_max_mpdu = WMI_VHT_CAP_MAX_MPDU_LEN_11454; 5752 else if (value & WMI_VHT_CAP_MAX_MPDU_LEN_7935) 5753 vht_cap->vht_max_mpdu = WMI_VHT_CAP_MAX_MPDU_LEN_7935; 5754 else 5755 vht_cap->vht_max_mpdu = 0; 5756 5757 if (value & WMI_VHT_CAP_CH_WIDTH_80P80_160MHZ) { 5758 vht_cap->supp_chan_width = 5759 1 << eHT_CHANNEL_WIDTH_80P80MHZ; 5760 vht_cap->supp_chan_width |= 5761 1 << eHT_CHANNEL_WIDTH_160MHZ; 5762 } else if (value & WMI_VHT_CAP_CH_WIDTH_160MHZ) { 5763 vht_cap->supp_chan_width = 5764 1 << eHT_CHANNEL_WIDTH_160MHZ; 5765 } else { 5766 vht_cap->supp_chan_width = 1 << eHT_CHANNEL_WIDTH_80MHZ; 5767 } 5768 vht_cap->vht_rx_ldpc = value & WMI_VHT_CAP_RX_LDPC; 5769 vht_cap->vht_short_gi_80 = value & WMI_VHT_CAP_SGI_80MHZ; 5770 vht_cap->vht_short_gi_160 = value & WMI_VHT_CAP_SGI_160MHZ; 5771 vht_cap->vht_tx_stbc = value & WMI_VHT_CAP_TX_STBC; 5772 vht_cap->vht_rx_stbc = 5773 (value & WMI_VHT_CAP_RX_STBC_1SS) | 5774 (value & WMI_VHT_CAP_RX_STBC_2SS) | 5775 (value & WMI_VHT_CAP_RX_STBC_3SS); 5776 vht_cap->vht_max_ampdu_len_exp = 5777 (value & WMI_VHT_CAP_MAX_AMPDU_LEN_EXP) >> 5778 WMI_VHT_CAP_MAX_AMPDU_LEN_EXP_SHIFT; 5779 vht_cap->vht_su_bformer = value & WMI_VHT_CAP_SU_BFORMER; 5780 vht_cap->vht_su_bformee = value & WMI_VHT_CAP_SU_BFORMEE; 5781 vht_cap->vht_mu_bformer = value & WMI_VHT_CAP_MU_BFORMER; 5782 vht_cap->vht_mu_bformee = value & WMI_VHT_CAP_MU_BFORMEE; 5783 vht_cap->vht_txop_ps = value & WMI_VHT_CAP_TXOP_PS; 5784 } else { 5785 if (value & WMI_VHT_CAP_MAX_MPDU_LEN_11454) 5786 tmp = WMI_VHT_CAP_MAX_MPDU_LEN_11454; 5787 else if (value & WMI_VHT_CAP_MAX_MPDU_LEN_7935) 5788 tmp = WMI_VHT_CAP_MAX_MPDU_LEN_7935; 5789 else 5790 tmp = 0; 5791 vht_cap->vht_max_mpdu = QDF_MIN(vht_cap->vht_max_mpdu, tmp); 5792 5793 if ((value & WMI_VHT_CAP_CH_WIDTH_80P80_160MHZ)) { 5794 tmp = (1 << eHT_CHANNEL_WIDTH_80P80MHZ) | 5795 (1 << eHT_CHANNEL_WIDTH_160MHZ); 5796 } else if (value & WMI_VHT_CAP_CH_WIDTH_160MHZ) { 5797 tmp = 1 << eHT_CHANNEL_WIDTH_160MHZ; 5798 } else { 5799 tmp = 1 << eHT_CHANNEL_WIDTH_80MHZ; 5800 } 5801 vht_cap->supp_chan_width = 5802 QDF_MAX(vht_cap->supp_chan_width, tmp); 5803 vht_cap->vht_rx_ldpc = QDF_MIN(vht_cap->vht_rx_ldpc, 5804 value & WMI_VHT_CAP_RX_LDPC); 5805 vht_cap->vht_short_gi_80 = QDF_MAX(vht_cap->vht_short_gi_80, 5806 value & WMI_VHT_CAP_SGI_80MHZ); 5807 vht_cap->vht_short_gi_160 = QDF_MAX(vht_cap->vht_short_gi_160, 5808 value & WMI_VHT_CAP_SGI_160MHZ); 5809 vht_cap->vht_tx_stbc = QDF_MAX(vht_cap->vht_tx_stbc, 5810 value & WMI_VHT_CAP_TX_STBC); 5811 vht_cap->vht_rx_stbc = QDF_MIN(vht_cap->vht_rx_stbc, 5812 (value & WMI_VHT_CAP_RX_STBC_1SS) | 5813 (value & WMI_VHT_CAP_RX_STBC_2SS) | 5814 (value & WMI_VHT_CAP_RX_STBC_3SS)); 5815 vht_cap->vht_max_ampdu_len_exp = 5816 QDF_MIN(vht_cap->vht_max_ampdu_len_exp, 5817 (value & WMI_VHT_CAP_MAX_AMPDU_LEN_EXP) >> 5818 WMI_VHT_CAP_MAX_AMPDU_LEN_EXP_SHIFT); 5819 vht_cap->vht_su_bformer = QDF_MAX(vht_cap->vht_su_bformer, 5820 value & WMI_VHT_CAP_SU_BFORMER); 5821 vht_cap->vht_su_bformee = QDF_MAX(vht_cap->vht_su_bformee, 5822 value & WMI_VHT_CAP_SU_BFORMEE); 5823 vht_cap->vht_mu_bformer = QDF_MAX(vht_cap->vht_mu_bformer, 5824 value & WMI_VHT_CAP_MU_BFORMER); 5825 vht_cap->vht_mu_bformee = QDF_MAX(vht_cap->vht_mu_bformee, 5826 value & WMI_VHT_CAP_MU_BFORMEE); 5827 vht_cap->vht_txop_ps = QDF_MIN(vht_cap->vht_txop_ps, 5828 value & WMI_VHT_CAP_TXOP_PS); 5829 } 5830 } 5831 5832 /** 5833 * wma_update_target_ext_vht_cap() - Update VHT caps with given extended cap 5834 * @tgt_hdl - target psoc information 5835 * @vht_cap: VHT cap structure to be filled 5836 * 5837 * This function loop through each hardware mode and for each hardware mode 5838 * again it loop through each MAC/PHY and pull the caps 2G and 5G specific 5839 * VHT caps and derives the final cap. 5840 * 5841 * Return: none 5842 * 5843 */ 5844 static void wma_update_target_ext_vht_cap(struct target_psoc_info *tgt_hdl, 5845 struct wma_tgt_vht_cap *vht_cap) 5846 { 5847 int i, num_hw_modes, total_mac_phy_cnt; 5848 uint32_t vht_cap_info_2g, vht_cap_info_5g; 5849 struct wma_tgt_vht_cap tmp_vht_cap = {0}, tmp_cap = {0}; 5850 struct wlan_psoc_host_mac_phy_caps *mac_phy_cap; 5851 uint32_t vht_mcs_10_11_supp = 0; 5852 5853 total_mac_phy_cnt = target_psoc_get_total_mac_phy_cnt(tgt_hdl); 5854 num_hw_modes = target_psoc_get_num_hw_modes(tgt_hdl); 5855 5856 mac_phy_cap = target_psoc_get_mac_phy_cap(tgt_hdl); 5857 if (!mac_phy_cap) { 5858 wma_err("Invalid MAC PHY capabilities handle"); 5859 return; 5860 } 5861 5862 /* 5863 * for legacy device extended cap might not even come, so in that case 5864 * don't overwrite legacy values 5865 */ 5866 if (!num_hw_modes) { 5867 wma_debug("No extended VHT cap for current SOC"); 5868 return; 5869 } 5870 5871 for (i = 0; i < total_mac_phy_cnt; i++) { 5872 vht_cap_info_2g = mac_phy_cap[i].vht_cap_info_2G; 5873 vht_cap_info_5g = mac_phy_cap[i].vht_cap_info_5G; 5874 if (vht_cap_info_2g) 5875 wma_derive_ext_vht_cap(&tmp_vht_cap, 5876 vht_cap_info_2g); 5877 if (vht_cap_info_5g) 5878 wma_derive_ext_vht_cap(&tmp_vht_cap, 5879 vht_cap_info_5g); 5880 if (WMI_GET_BITS(mac_phy_cap[i].vht_supp_mcs_5G, 16, 2) && 5881 WMI_VHT_MCS_NOTIFY_EXT_SS_GET(mac_phy_cap[i]. 5882 vht_supp_mcs_5G)) 5883 vht_mcs_10_11_supp = 1; 5884 if (WMI_GET_BITS(mac_phy_cap[i].vht_supp_mcs_2G, 16, 2) && 5885 WMI_VHT_MCS_NOTIFY_EXT_SS_GET(mac_phy_cap[i]. 5886 vht_supp_mcs_2G)) 5887 vht_mcs_10_11_supp = 1; 5888 } 5889 5890 if (qdf_mem_cmp(&tmp_cap, &tmp_vht_cap, 5891 sizeof(struct wma_tgt_vht_cap))) { 5892 qdf_mem_copy(vht_cap, &tmp_vht_cap, 5893 sizeof(struct wma_tgt_vht_cap)); 5894 } 5895 vht_cap->vht_mcs_10_11_supp = vht_mcs_10_11_supp; 5896 wma_nofl_debug("[ext vhtcap] max_mpdu %d supp_chan_width %x rx_ldpc %x\n" 5897 "short_gi_80 %x tx_stbc %x rx_stbc %x txop_ps %x\n" 5898 "su_bformee %x mu_bformee %x max_ampdu_len_exp %d\n" 5899 "vht_mcs_10_11_supp %d", 5900 vht_cap->vht_max_mpdu, vht_cap->supp_chan_width, 5901 vht_cap->vht_rx_ldpc, vht_cap->vht_short_gi_80, 5902 vht_cap->vht_tx_stbc, vht_cap->vht_rx_stbc, 5903 vht_cap->vht_txop_ps, vht_cap->vht_su_bformee, 5904 vht_cap->vht_mu_bformee, vht_cap->vht_max_ampdu_len_exp, 5905 vht_cap->vht_mcs_10_11_supp); 5906 } 5907 5908 static void 5909 wma_update_sar_version(struct wlan_psoc_host_service_ext_param *param, 5910 struct wma_tgt_cfg *cfg) 5911 { 5912 cfg->sar_version = param ? param->sar_version : SAR_VERSION_1; 5913 } 5914 5915 /** 5916 * wma_update_hdd_band_cap() - update band cap which hdd understands 5917 * @supported_band: supported band which has been given by FW 5918 * @tgt_cfg: target configuration to be updated 5919 * @psoc: psoc ptr 5920 * 5921 * Convert WMA given supported band to enum which HDD understands 5922 * 5923 * Return: None 5924 */ 5925 static void wma_update_hdd_band_cap(WMI_PHY_CAPABILITY supported_band, 5926 struct wma_tgt_cfg *tgt_cfg, 5927 struct wlan_objmgr_psoc *psoc) 5928 { 5929 switch (supported_band) { 5930 case WMI_11G_CAPABILITY: 5931 case WMI_11NG_CAPABILITY: 5932 tgt_cfg->band_cap = BIT(REG_BAND_2G); 5933 break; 5934 case WMI_11A_CAPABILITY: 5935 case WMI_11NA_CAPABILITY: 5936 case WMI_11AC_CAPABILITY: 5937 tgt_cfg->band_cap = BIT(REG_BAND_5G); 5938 break; 5939 case WMI_11AG_CAPABILITY: 5940 case WMI_11NAG_CAPABILITY: 5941 case WMI_11AX_CAPABILITY: 5942 tgt_cfg->band_cap = (BIT(REG_BAND_2G) | BIT(REG_BAND_5G)); 5943 if (wlan_reg_is_6ghz_supported(psoc)) 5944 tgt_cfg->band_cap |= BIT(REG_BAND_6G); 5945 break; 5946 default: 5947 tgt_cfg->band_cap = (BIT(REG_BAND_2G) | 5948 BIT(REG_BAND_5G) | 5949 BIT(REG_BAND_6G)); 5950 } 5951 } 5952 5953 /** 5954 * wma_update_obss_detection_support() - update obss detection offload support 5955 * @wh: wma handle 5956 * @tgt_cfg: target configuration to be updated 5957 * 5958 * Update obss detection offload support based on service bit. 5959 * 5960 * Return: None 5961 */ 5962 static void wma_update_obss_detection_support(tp_wma_handle wh, 5963 struct wma_tgt_cfg *tgt_cfg) 5964 { 5965 if (wmi_service_enabled(wh->wmi_handle, 5966 wmi_service_ap_obss_detection_offload)) 5967 tgt_cfg->obss_detection_offloaded = true; 5968 else 5969 tgt_cfg->obss_detection_offloaded = false; 5970 } 5971 5972 /** 5973 * wma_update_obss_color_collision_support() - update obss color collision 5974 * offload support 5975 * @wh: wma handle 5976 * @tgt_cfg: target configuration to be updated 5977 * 5978 * Update obss color collision offload support based on service bit. 5979 * 5980 * Return: None 5981 */ 5982 static void wma_update_obss_color_collision_support(tp_wma_handle wh, 5983 struct wma_tgt_cfg *tgt_cfg) 5984 { 5985 if (wmi_service_enabled(wh->wmi_handle, wmi_service_bss_color_offload)) 5986 tgt_cfg->obss_color_collision_offloaded = true; 5987 else 5988 tgt_cfg->obss_color_collision_offloaded = false; 5989 } 5990 5991 /** 5992 * wma_update_restricted_80p80_bw_support() - update restricted 80+80 support 5993 * @wh: wma handle 5994 * @tgt_cfg: target configuration to be updated 5995 * 5996 * Update restricted 80+80MHz (165MHz) BW support based on service bit. 5997 * 5998 * Return: None 5999 */ 6000 static void wma_update_restricted_80p80_bw_support(tp_wma_handle wh, 6001 struct wma_tgt_cfg *tgt_cfg) 6002 { 6003 if (wmi_service_enabled(wh->wmi_handle, 6004 wmi_service_bw_165mhz_support)) 6005 tgt_cfg->restricted_80p80_bw_supp = true; 6006 else 6007 tgt_cfg->restricted_80p80_bw_supp = false; 6008 } 6009 6010 /** 6011 * wma_update_aux_dev_caps() - update aux device capability 6012 * @tgt_hdl: target psoc information 6013 * @tgt_cfg: target configuration to be updated 6014 * 6015 * Update aux device capability to wma_tgt_cfg. 6016 * 6017 * Return: None 6018 */ 6019 static void wma_update_aux_dev_caps(struct target_psoc_info *tgt_hdl, 6020 struct wma_tgt_cfg *tgt_cfg) 6021 { 6022 uint8_t cap_idx; 6023 uint32_t num_aux_dev_caps; 6024 struct wlan_psoc_host_aux_dev_caps *aux_dev_caps; 6025 enum wmi_host_hw_mode_config_type hw_mode_id; 6026 6027 num_aux_dev_caps = tgt_hdl->info.service_ext2_param.num_aux_dev_caps; 6028 aux_dev_caps = tgt_hdl->info.aux_dev_caps; 6029 6030 for (cap_idx = 0; cap_idx < num_aux_dev_caps; cap_idx++) { 6031 /*current only support AUX0*/ 6032 if (aux_dev_caps[cap_idx].aux_index != 0) 6033 continue; 6034 6035 hw_mode_id = aux_dev_caps[cap_idx].hw_mode_id; 6036 if (hw_mode_id >= WMI_HOST_HW_MODE_MAX) { 6037 wma_err("invalid hw mode id %d.", hw_mode_id); 6038 continue; 6039 } 6040 tgt_cfg->wma_aux0_dev_caps[hw_mode_id].supported_modes_bitmap = 6041 aux_dev_caps[cap_idx].supported_modes_bitmap; 6042 tgt_cfg->wma_aux0_dev_caps[hw_mode_id].listen_pdev_id_map = 6043 aux_dev_caps[cap_idx].listen_pdev_id_map; 6044 tgt_cfg->wma_aux0_dev_caps[hw_mode_id].emlsr_pdev_id_map = 6045 aux_dev_caps[cap_idx].emlsr_pdev_id_map; 6046 } 6047 } 6048 6049 #ifdef WLAN_SUPPORT_GREEN_AP 6050 static void wma_green_ap_register_handlers(tp_wma_handle wma_handle) 6051 { 6052 if (WMI_SERVICE_IS_ENABLED(wma_handle->wmi_service_bitmap, 6053 WMI_SERVICE_EGAP)) 6054 target_if_green_ap_register_egap_event_handler( 6055 wma_handle->pdev); 6056 6057 target_if_green_ap_register_ll_ps_event_handler(wma_handle->pdev); 6058 6059 } 6060 #else 6061 static inline void wma_green_ap_register_handlers(tp_wma_handle wma_handle) 6062 { 6063 } 6064 #endif 6065 6066 #ifdef WLAN_FEATURE_NAN 6067 #ifdef WLAN_FEATURE_11BE_MLO 6068 static void wma_update_mlo_sta_nan_ndi_target_caps(tp_wma_handle wma_handle, 6069 struct wma_tgt_cfg *tgt_cfg) 6070 { 6071 if (wmi_service_enabled(wma_handle->wmi_handle, 6072 wmi_service_mlo_sta_nan_ndi_support)) 6073 tgt_cfg->nan_caps.mlo_sta_nan_ndi_allowed = 1; 6074 } 6075 #else 6076 static void wma_update_mlo_sta_nan_ndi_target_caps(tp_wma_handle wma_handle, 6077 struct wma_tgt_cfg *tgt_cfg) 6078 { 6079 } 6080 #endif /* WLAN_FEATURE_11BE_MLO */ 6081 6082 static void wma_update_nan_target_caps(tp_wma_handle wma_handle, 6083 struct wma_tgt_cfg *tgt_cfg) 6084 { 6085 if (wmi_service_enabled(wma_handle->wmi_handle, 6086 wmi_service_nan_disable_support)) 6087 tgt_cfg->nan_caps.nan_conc_control = 1; 6088 6089 if (wmi_service_enabled(wma_handle->wmi_handle, 6090 wmi_service_nan_dbs_support)) 6091 tgt_cfg->nan_caps.nan_dbs_supported = 1; 6092 6093 if (wmi_service_enabled(wma_handle->wmi_handle, 6094 wmi_service_ndi_dbs_support)) 6095 tgt_cfg->nan_caps.ndi_dbs_supported = 1; 6096 6097 if (wmi_service_enabled(wma_handle->wmi_handle, 6098 wmi_service_nan_sap_support)) 6099 tgt_cfg->nan_caps.nan_sap_supported = 1; 6100 6101 if (wmi_service_enabled(wma_handle->wmi_handle, 6102 wmi_service_ndi_sap_support)) 6103 tgt_cfg->nan_caps.ndi_sap_supported = 1; 6104 6105 if (wmi_service_enabled(wma_handle->wmi_handle, wmi_service_nan_vdev)) 6106 tgt_cfg->nan_caps.nan_vdev_allowed = 1; 6107 6108 if (wmi_service_enabled(wma_handle->wmi_handle, 6109 wmi_service_sta_nan_ndi_four_port)) 6110 tgt_cfg->nan_caps.sta_nan_ndi_ndi_allowed = 1; 6111 6112 if (wmi_service_enabled(wma_handle->wmi_handle, 6113 wmi_service_ndi_txbf_support)) 6114 tgt_cfg->nan_caps.ndi_txbf_supported = 1; 6115 6116 wma_update_mlo_sta_nan_ndi_target_caps(wma_handle, tgt_cfg); 6117 } 6118 #else 6119 static void wma_update_nan_target_caps(tp_wma_handle wma_handle, 6120 struct wma_tgt_cfg *tgt_cfg) 6121 { 6122 } 6123 #endif 6124 6125 static uint8_t 6126 wma_convert_chainmask_to_chain(uint8_t chainmask) 6127 { 6128 uint8_t num_chains = 0; 6129 6130 while (chainmask) { 6131 chainmask &= (chainmask - 1); 6132 num_chains++; 6133 } 6134 6135 return num_chains; 6136 } 6137 6138 static void 6139 wma_fill_chain_cfg(struct target_psoc_info *tgt_hdl, 6140 uint8_t phy) 6141 { 6142 struct mac_context *mac_ctx; 6143 uint8_t num_chain; 6144 struct wlan_psoc_host_mac_phy_caps *mac_phy_cap = 6145 tgt_hdl->info.mac_phy_cap; 6146 6147 mac_ctx = cds_get_context(QDF_MODULE_ID_PE); 6148 if (!mac_ctx) { 6149 wma_err("fill chain cfg failed as mac_ctx is NULL"); 6150 return; 6151 } 6152 6153 num_chain = wma_convert_chainmask_to_chain(mac_phy_cap[phy]. 6154 tx_chain_mask_2G); 6155 6156 if (num_chain > mac_ctx->fw_chain_cfg.max_tx_chains_2g) 6157 mac_ctx->fw_chain_cfg.max_tx_chains_2g = num_chain; 6158 6159 num_chain = wma_convert_chainmask_to_chain(mac_phy_cap[phy]. 6160 tx_chain_mask_5G); 6161 6162 if (num_chain > mac_ctx->fw_chain_cfg.max_tx_chains_5g) 6163 mac_ctx->fw_chain_cfg.max_tx_chains_5g = num_chain; 6164 6165 num_chain = wma_convert_chainmask_to_chain(mac_phy_cap[phy]. 6166 rx_chain_mask_2G); 6167 6168 if (num_chain > mac_ctx->fw_chain_cfg.max_rx_chains_2g) 6169 mac_ctx->fw_chain_cfg.max_rx_chains_2g = num_chain; 6170 6171 num_chain = wma_convert_chainmask_to_chain(mac_phy_cap[phy]. 6172 rx_chain_mask_5G); 6173 6174 if (num_chain > mac_ctx->fw_chain_cfg.max_rx_chains_5g) 6175 mac_ctx->fw_chain_cfg.max_rx_chains_5g = num_chain; 6176 } 6177 6178 static void wma_update_mlme_related_tgt_caps(struct wlan_objmgr_psoc *psoc, 6179 struct wmi_unified *wmi_handle) 6180 { 6181 struct mlme_tgt_caps mlme_tgt_cfg; 6182 6183 mlme_tgt_cfg.data_stall_recovery_fw_support = 6184 wmi_service_enabled(wmi_handle, 6185 wmi_service_data_stall_recovery_support); 6186 6187 mlme_tgt_cfg.bigtk_support = 6188 wmi_service_enabled(wmi_handle, wmi_beacon_protection_support); 6189 6190 mlme_tgt_cfg.stop_all_host_scan_support = 6191 wmi_service_enabled(wmi_handle, 6192 wmi_service_host_scan_stop_vdev_all); 6193 mlme_tgt_cfg.dual_sta_roam_fw_support = 6194 wmi_service_enabled(wmi_handle, 6195 wmi_service_dual_sta_roam_support); 6196 6197 mlme_tgt_cfg.ocv_support = 6198 wmi_service_enabled(wmi_handle, 6199 wmi_service_ocv_support); 6200 6201 wma_debug("beacon protection support %d, ocv support %d", 6202 mlme_tgt_cfg.bigtk_support, mlme_tgt_cfg.ocv_support); 6203 6204 /* Call this at last only after filling all the tgt caps */ 6205 wlan_mlme_update_cfg_with_tgt_caps(psoc, &mlme_tgt_cfg); 6206 } 6207 6208 /** 6209 * wma_update_mlme_aux_dev_caps() - update aux device capability to mlme 6210 * @psoc: psoc handle 6211 * @tgt_hdl: target psoc information 6212 * 6213 * Update aux device capability to mlme. 6214 * 6215 * Return: None 6216 */ 6217 static void wma_update_mlme_aux_dev_caps(struct wlan_objmgr_psoc *psoc, 6218 struct target_psoc_info *tgt_hdl) 6219 { 6220 uint8_t cap_idx; 6221 uint32_t num_aux_dev_caps; 6222 struct wlan_psoc_host_aux_dev_caps *aux_dev_caps; 6223 enum wmi_host_hw_mode_config_type hw_mode_id; 6224 struct wlan_mlme_aux_dev_caps 6225 wlan_mlme_aux0_dev_caps[WLAN_MLME_HW_MODE_MAX] = {0}; 6226 6227 if (WMI_HOST_HW_MODE_MAX != WLAN_MLME_HW_MODE_MAX) 6228 wma_err("struct define mismatch, pls fix it."); 6229 6230 num_aux_dev_caps = 6231 tgt_hdl->info.service_ext2_param.num_aux_dev_caps; 6232 aux_dev_caps = tgt_hdl->info.aux_dev_caps; 6233 6234 for (cap_idx = 0; cap_idx < num_aux_dev_caps; cap_idx++) { 6235 /*current only support AUX0*/ 6236 if (aux_dev_caps[cap_idx].aux_index != 0) 6237 continue; 6238 6239 hw_mode_id = aux_dev_caps[cap_idx].hw_mode_id; 6240 if (hw_mode_id >= WMI_HOST_HW_MODE_MAX) { 6241 wma_err("invalid hw mode id %d.", hw_mode_id); 6242 continue; 6243 } 6244 wlan_mlme_aux0_dev_caps[hw_mode_id].supported_modes_bitmap = 6245 aux_dev_caps[cap_idx].supported_modes_bitmap; 6246 wlan_mlme_aux0_dev_caps[hw_mode_id].listen_pdev_id_map = 6247 aux_dev_caps[cap_idx].listen_pdev_id_map; 6248 wlan_mlme_aux0_dev_caps[hw_mode_id].emlsr_pdev_id_map = 6249 aux_dev_caps[cap_idx].emlsr_pdev_id_map; 6250 } 6251 6252 wlan_mlme_update_aux_dev_caps(psoc, wlan_mlme_aux0_dev_caps); 6253 } 6254 6255 static bool 6256 wma_is_dbs_mandatory(struct wlan_objmgr_psoc *psoc, 6257 struct target_psoc_info *tgt_hdl) 6258 { 6259 uint8_t i, total_mac_phy_cnt; 6260 struct wlan_psoc_host_mac_phy_caps *mac_cap, *mac_phy_cap; 6261 uint8_t supported_band = 0; 6262 6263 if (!policy_mgr_find_if_fw_supports_dbs(psoc) || 6264 !policy_mgr_find_if_hwlist_has_dbs(psoc)) { 6265 wma_debug("DBS is not mandatory"); 6266 return false; 6267 } 6268 6269 total_mac_phy_cnt = target_psoc_get_total_mac_phy_cnt(tgt_hdl); 6270 mac_phy_cap = target_psoc_get_mac_phy_cap(tgt_hdl); 6271 if (!mac_phy_cap) { 6272 wma_err("Invalid MAC PHY capabilities handle"); 6273 return false; 6274 } 6275 6276 6277 for (i = 0; i < total_mac_phy_cnt; i++) { 6278 mac_cap = &mac_phy_cap[i]; 6279 if (mac_cap && (mac_cap->phy_id == 0)) 6280 supported_band |= mac_cap->supported_bands; 6281 } 6282 6283 /* If Mac0 supports both the bands then DBS is not mandatory */ 6284 if (supported_band & WLAN_2G_CAPABILITY && 6285 supported_band & WLAN_5G_CAPABILITY) { 6286 wma_debug("Mac0 supports both bands DBS is optional"); 6287 return false; 6288 } 6289 6290 wma_info("MAC0 does not support both bands %d DBS is mandatory", 6291 supported_band); 6292 6293 return true; 6294 } 6295 6296 /** 6297 * wma_update_hdd_cfg() - update HDD config 6298 * @wma_handle: wma handle 6299 * 6300 * Return: Zero on success err number on failure 6301 */ 6302 static int wma_update_hdd_cfg(tp_wma_handle wma_handle) 6303 { 6304 struct wma_tgt_cfg tgt_cfg; 6305 void *hdd_ctx = cds_get_context(QDF_MODULE_ID_HDD); 6306 target_resource_config *wlan_res_cfg; 6307 struct wlan_psoc_host_service_ext_param *service_ext_param; 6308 struct target_psoc_info *tgt_hdl; 6309 struct wmi_unified *wmi_handle; 6310 uint8_t i; 6311 int ret; 6312 6313 wma_debug("Enter"); 6314 6315 tgt_hdl = wlan_psoc_get_tgt_if_handle(wma_handle->psoc); 6316 if (!tgt_hdl) { 6317 wma_err("target psoc info is NULL"); 6318 return -EINVAL; 6319 } 6320 6321 wlan_res_cfg = target_psoc_get_wlan_res_cfg(tgt_hdl); 6322 if (!wlan_res_cfg) { 6323 wma_err("wlan_res_cfg is null"); 6324 return -EINVAL; 6325 } 6326 6327 service_ext_param = 6328 target_psoc_get_service_ext_param(tgt_hdl); 6329 wmi_handle = get_wmi_unified_hdl_from_psoc(wma_handle->psoc); 6330 if (wmi_validate_handle(wmi_handle)) 6331 return -EINVAL; 6332 6333 wma_update_mlme_related_tgt_caps(wma_handle->psoc, wmi_handle); 6334 wma_update_mlme_aux_dev_caps(wma_handle->psoc, tgt_hdl); 6335 6336 if (wmi_service_enabled(wmi_handle, wmi_service_peer_create_conf)) 6337 wlan_psoc_nif_fw_ext_cap_set(wma_handle->psoc, 6338 WLAN_SOC_F_PEER_CREATE_RESP); 6339 6340 qdf_mem_zero(&tgt_cfg, sizeof(struct wma_tgt_cfg)); 6341 6342 tgt_cfg.sub_20_support = wma_handle->sub_20_support; 6343 tgt_cfg.reg_domain = wma_handle->reg_cap.eeprom_rd; 6344 tgt_cfg.eeprom_rd_ext = wma_handle->reg_cap.eeprom_rd_ext; 6345 6346 tgt_cfg.max_intf_count = wlan_res_cfg->num_vdevs; 6347 policy_mgr_set_max_conc_cxns(wma_handle->psoc, 6348 wlan_res_cfg->num_max_active_vdevs); 6349 6350 qdf_mem_copy(tgt_cfg.hw_macaddr.bytes, wma_handle->hwaddr, 6351 ATH_MAC_LEN); 6352 6353 wma_update_target_services(wmi_handle, &tgt_cfg.services); 6354 wma_update_target_ht_cap(tgt_hdl, &tgt_cfg.ht_cap); 6355 wma_update_target_vht_cap(tgt_hdl, &tgt_cfg.vht_cap); 6356 /* 6357 * This will overwrite the structure filled by wma_update_target_ht_cap 6358 * and wma_update_target_vht_cap APIs. 6359 */ 6360 wma_update_target_ext_ht_cap(tgt_hdl, &tgt_cfg.ht_cap); 6361 wma_update_target_ext_vht_cap(tgt_hdl, &tgt_cfg.vht_cap); 6362 6363 wma_update_target_ext_he_cap(tgt_hdl, &tgt_cfg); 6364 wma_update_target_ext_eht_cap(tgt_hdl, &tgt_cfg); 6365 6366 tgt_cfg.target_fw_version = target_if_get_fw_version(tgt_hdl); 6367 if (service_ext_param) 6368 tgt_cfg.target_fw_vers_ext = 6369 service_ext_param->fw_build_vers_ext; 6370 6371 tgt_cfg.hw_bd_id = wma_handle->hw_bd_id; 6372 tgt_cfg.hw_bd_info.bdf_version = wma_handle->hw_bd_info[BDF_VERSION]; 6373 tgt_cfg.hw_bd_info.ref_design_id = 6374 wma_handle->hw_bd_info[REF_DESIGN_ID]; 6375 tgt_cfg.hw_bd_info.customer_id = wma_handle->hw_bd_info[CUSTOMER_ID]; 6376 tgt_cfg.hw_bd_info.project_id = wma_handle->hw_bd_info[PROJECT_ID]; 6377 tgt_cfg.hw_bd_info.board_data_rev = 6378 wma_handle->hw_bd_info[BOARD_DATA_REV]; 6379 6380 #ifdef WLAN_FEATURE_LPSS 6381 tgt_cfg.lpss_support = wma_handle->lpss_support; 6382 #endif /* WLAN_FEATURE_LPSS */ 6383 tgt_cfg.ap_arpns_support = wma_handle->ap_arpns_support; 6384 tgt_cfg.dfs_cac_offload = wma_handle->is_dfs_offloaded; 6385 tgt_cfg.rcpi_enabled = wma_handle->rcpi_enabled; 6386 wma_update_hdd_band_cap(target_if_get_phy_capability(tgt_hdl), 6387 &tgt_cfg, wma_handle->psoc); 6388 wma_update_sar_version(service_ext_param, &tgt_cfg); 6389 tgt_cfg.fine_time_measurement_cap = 6390 target_if_get_wmi_fw_sub_feat_caps(tgt_hdl); 6391 tgt_cfg.wmi_max_len = wmi_get_max_msg_len(wma_handle->wmi_handle) 6392 - WMI_TLV_HEADROOM; 6393 tgt_cfg.tx_bfee_8ss_enabled = wma_handle->tx_bfee_8ss_enabled; 6394 tgt_cfg.dynamic_nss_chains_support = 6395 wma_handle->dynamic_nss_chains_support; 6396 wma_update_obss_detection_support(wma_handle, &tgt_cfg); 6397 wma_update_obss_color_collision_support(wma_handle, &tgt_cfg); 6398 wma_update_hdd_cfg_ndp(wma_handle, &tgt_cfg); 6399 wma_update_nan_target_caps(wma_handle, &tgt_cfg); 6400 wma_update_bcast_twt_support(wma_handle, &tgt_cfg); 6401 wma_update_twt_tgt_cap(wma_handle, &tgt_cfg); 6402 wma_update_restricted_80p80_bw_support(wma_handle, &tgt_cfg); 6403 wma_update_aux_dev_caps(tgt_hdl, &tgt_cfg); 6404 /* Take the max of chains supported by FW, which will limit nss */ 6405 for (i = 0; i < tgt_hdl->info.total_mac_phy_cnt; i++) 6406 wma_fill_chain_cfg(tgt_hdl, i); 6407 6408 ret = wma_handle->tgt_cfg_update_cb(hdd_ctx, &tgt_cfg); 6409 if (ret) 6410 return -EINVAL; 6411 6412 wma_green_ap_register_handlers(wma_handle); 6413 6414 return ret; 6415 } 6416 6417 /** 6418 * wma_init_scan_fw_mode_config() - Initialize scan/fw mode config 6419 * @psoc: Object manager psoc 6420 * @scan_config: Scam mode configuration 6421 * @fw_config: FW mode configuration 6422 * 6423 * Enables all the valid bits of concurrent_scan_config_bits and 6424 * fw_mode_config_bits. 6425 * 6426 * Return: None 6427 */ 6428 static void wma_init_scan_fw_mode_config(struct wlan_objmgr_psoc *psoc, 6429 uint32_t scan_config, 6430 uint32_t fw_config) 6431 { 6432 wma_debug("Enter"); 6433 6434 if (!psoc) { 6435 wma_err("obj psoc is NULL"); 6436 return; 6437 } 6438 6439 policy_mgr_init_dbs_config(psoc, scan_config, fw_config); 6440 policy_mgr_init_sbs_fw_config(psoc, fw_config); 6441 6442 wma_debug("Exit"); 6443 } 6444 6445 static void wma_set_pmo_caps(struct wlan_objmgr_psoc *psoc) 6446 { 6447 QDF_STATUS status; 6448 tp_wma_handle wma; 6449 struct pmo_device_caps caps; 6450 6451 wma = cds_get_context(QDF_MODULE_ID_WMA); 6452 if (!wma) 6453 return; 6454 6455 caps.arp_ns_offload = 6456 wmi_service_enabled(wma->wmi_handle, wmi_service_arpns_offload); 6457 caps.apf = 6458 wmi_service_enabled(wma->wmi_handle, wmi_service_apf_offload); 6459 caps.packet_filter = 6460 wmi_service_enabled(wma->wmi_handle, 6461 wmi_service_packet_filter_offload); 6462 caps.unified_wow = 6463 wmi_service_enabled(wma->wmi_handle, 6464 wmi_service_unified_wow_capability); 6465 caps.li_offload = 6466 wmi_service_enabled(wma->wmi_handle, 6467 wmi_service_listen_interval_offload_support 6468 ); 6469 6470 status = ucfg_pmo_psoc_set_caps(psoc, &caps); 6471 if (QDF_IS_STATUS_ERROR(status)) 6472 wma_err("Failed to set PMO capabilities; status:%d", status); 6473 } 6474 6475 /** 6476 * wma_set_mlme_caps() - Populate the MLME related target capabilities to the 6477 * mlme component 6478 * @psoc: Pointer to psoc object 6479 * 6480 * Return: None 6481 */ 6482 static void wma_set_mlme_caps(struct wlan_objmgr_psoc *psoc) 6483 { 6484 tp_wma_handle wma; 6485 bool tgt_cap; 6486 uint32_t akm_bitmap = 0; 6487 QDF_STATUS status; 6488 6489 wma = cds_get_context(QDF_MODULE_ID_WMA); 6490 if (!wma) 6491 return; 6492 6493 tgt_cap = wmi_service_enabled(wma->wmi_handle, 6494 wmi_service_adaptive_11r_support); 6495 6496 status = ucfg_mlme_set_tgt_adaptive_11r_cap(psoc, tgt_cap); 6497 if (QDF_IS_STATUS_ERROR(status)) 6498 wma_err("Failed to set adaptive 11r cap"); 6499 6500 tgt_cap = wmi_service_enabled(wma->wmi_handle, 6501 wmi_service_wpa3_ft_sae_support); 6502 if (tgt_cap) 6503 akm_bitmap |= (1 << AKM_FT_SAE); 6504 6505 tgt_cap = wmi_service_enabled(wma->wmi_handle, 6506 wmi_service_wpa3_ft_suite_b_support); 6507 if (tgt_cap) 6508 akm_bitmap |= (1 << AKM_FT_SUITEB_SHA384); 6509 6510 tgt_cap = wmi_service_enabled(wma->wmi_handle, 6511 wmi_service_ft_fils); 6512 if (tgt_cap) 6513 akm_bitmap |= (1 << AKM_FT_FILS); 6514 6515 tgt_cap = wmi_service_enabled(wma->wmi_handle, 6516 wmi_service_owe_roam_support); 6517 if (tgt_cap) 6518 akm_bitmap |= (1 << AKM_OWE); 6519 6520 tgt_cap = wmi_service_enabled(wma->wmi_handle, 6521 wmi_service_sae_roam_support); 6522 if (tgt_cap) 6523 akm_bitmap |= (1 << AKM_SAE); 6524 6525 tgt_cap = wmi_service_enabled(wma->wmi_handle, 6526 wmi_service_suiteb_roam_support); 6527 if (tgt_cap) 6528 akm_bitmap |= (1 << AKM_SUITEB); 6529 6530 tgt_cap = wmi_service_enabled(wma->wmi_handle, 6531 wmi_service_wpa3_sha384_roam_support); 6532 if (tgt_cap) 6533 akm_bitmap |= (1 << AKM_SAE_EXT); 6534 6535 status = mlme_set_tgt_wpa3_roam_cap(psoc, akm_bitmap); 6536 if (QDF_IS_STATUS_ERROR(status)) 6537 wma_err("Failed to set sae roam support"); 6538 } 6539 6540 #ifdef WLAN_FEATURE_BIG_DATA_STATS 6541 static bool wma_is_big_data_support_enable(struct wmi_unified *wmi_handle) 6542 { 6543 return wmi_service_enabled(wmi_handle, wmi_service_big_data_support); 6544 } 6545 #else 6546 static bool wma_is_big_data_support_enable(struct wmi_unified *wmi_handle) 6547 { 6548 return false; 6549 } 6550 #endif 6551 6552 /** 6553 * wma_set_mc_cp_caps() - Populate mc cp component related capabilities 6554 * to the mc cp component 6555 * 6556 * @psoc: Pointer to psoc object 6557 * 6558 * Return: None 6559 */ 6560 static void wma_set_mc_cp_caps(struct wlan_objmgr_psoc *psoc) 6561 { 6562 tp_wma_handle wma; 6563 6564 wma = cds_get_context(QDF_MODULE_ID_WMA); 6565 if (!wma) 6566 return; 6567 6568 if (wma_is_big_data_support_enable(wma->wmi_handle)) 6569 ucfg_mc_cp_set_big_data_fw_support(psoc, true); 6570 else 6571 ucfg_mc_cp_set_big_data_fw_support(psoc, false); 6572 } 6573 6574 #ifdef THERMAL_STATS_SUPPORT 6575 static void wma_set_thermal_stats_fw_cap(tp_wma_handle wma, 6576 struct wlan_fwol_capability_info *cap) 6577 { 6578 cap->fw_thermal_stats_cap = wmi_service_enabled(wma->wmi_handle, 6579 wmi_service_thermal_stats_temp_range_supported); 6580 } 6581 #else 6582 static void wma_set_thermal_stats_fw_cap(tp_wma_handle wma, 6583 struct wlan_fwol_capability_info *cap) 6584 { 6585 } 6586 #endif 6587 6588 /** 6589 * wma_set_fwol_caps() - Populate fwol component related capabilities 6590 * to the fwol component 6591 * 6592 * @psoc: Pointer to psoc object 6593 * 6594 * Return: None 6595 */ 6596 static void wma_set_fwol_caps(struct wlan_objmgr_psoc *psoc) 6597 { 6598 tp_wma_handle wma; 6599 struct wlan_fwol_capability_info cap_info; 6600 wma = cds_get_context(QDF_MODULE_ID_WMA); 6601 6602 if (!wma) { 6603 wma_err_rl("wma Null"); 6604 return; 6605 } 6606 if (!psoc) { 6607 wma_err_rl("psoc Null"); 6608 return; 6609 } 6610 6611 wma_set_thermal_stats_fw_cap(wma, &cap_info); 6612 ucfg_fwol_update_fw_cap_info(psoc, &cap_info); 6613 } 6614 static void wma_set_component_caps(struct wlan_objmgr_psoc *psoc) 6615 { 6616 wma_set_pmo_caps(psoc); 6617 wma_set_mlme_caps(psoc); 6618 wma_set_mc_cp_caps(psoc); 6619 wma_set_fwol_caps(psoc); 6620 } 6621 6622 #if defined(WLAN_FEATURE_GTK_OFFLOAD) && defined(WLAN_POWER_MANAGEMENT_OFFLOAD) 6623 static QDF_STATUS wma_register_gtk_offload_event(tp_wma_handle wma_handle) 6624 { 6625 QDF_STATUS status = QDF_STATUS_E_FAILURE; 6626 6627 if (wma_validate_handle(wma_handle)) 6628 return QDF_STATUS_E_FAILURE; 6629 6630 if (wmi_service_enabled(wma_handle->wmi_handle, 6631 wmi_service_gtk_offload)) { 6632 status = wmi_unified_register_event_handler( 6633 wma_handle->wmi_handle, 6634 wmi_gtk_offload_status_event_id, 6635 target_if_pmo_gtk_offload_status_event, 6636 WMA_RX_WORK_CTX); 6637 } 6638 return status; 6639 } 6640 #else 6641 static QDF_STATUS wma_register_gtk_offload_event(tp_wma_handle wma_handle) 6642 { 6643 return QDF_STATUS_SUCCESS; 6644 } 6645 #endif /* WLAN_FEATURE_GTK_OFFLOAD && WLAN_POWER_MANAGEMENT_OFFLOAD */ 6646 6647 /** 6648 * wma_rx_service_ready_event() - event handler to process 6649 * wmi rx service ready event. 6650 * @handle: wma handle 6651 * @cmd_param_info: command params info 6652 * 6653 * Return: none 6654 */ 6655 int wma_rx_service_ready_event(void *handle, uint8_t *cmd_param_info, 6656 uint32_t length) 6657 { 6658 tp_wma_handle wma_handle = (tp_wma_handle) handle; 6659 WMI_SERVICE_READY_EVENTID_param_tlvs *param_buf; 6660 wmi_service_ready_event_fixed_param *ev; 6661 QDF_STATUS status; 6662 uint32_t *ev_wlan_dbs_hw_mode_list; 6663 void *soc = cds_get_context(QDF_MODULE_ID_SOC); 6664 struct target_psoc_info *tgt_hdl; 6665 struct wlan_psoc_target_capability_info *tgt_cap_info; 6666 target_resource_config *wlan_res_cfg; 6667 struct wmi_unified *wmi_handle; 6668 uint32_t *service_bitmap; 6669 6670 wma_debug("Enter"); 6671 6672 if (wma_validate_handle(wma_handle)) 6673 return -EINVAL; 6674 6675 tgt_hdl = wlan_psoc_get_tgt_if_handle(wma_handle->psoc); 6676 if (!tgt_hdl) { 6677 wma_err("target psoc info is NULL"); 6678 return -EINVAL; 6679 } 6680 6681 wlan_res_cfg = target_psoc_get_wlan_res_cfg(tgt_hdl); 6682 tgt_cap_info = target_psoc_get_target_caps(tgt_hdl); 6683 service_bitmap = target_psoc_get_service_bitmap(tgt_hdl); 6684 6685 param_buf = (WMI_SERVICE_READY_EVENTID_param_tlvs *) cmd_param_info; 6686 if (!param_buf) { 6687 wma_err("Invalid arguments"); 6688 return -EINVAL; 6689 } 6690 6691 ev = param_buf->fixed_param; 6692 if (!ev) { 6693 wma_err("Invalid buffer"); 6694 return -EINVAL; 6695 } 6696 6697 wmi_handle = get_wmi_unified_hdl_from_psoc(wma_handle->psoc); 6698 if (wmi_validate_handle(wmi_handle)) 6699 return -EINVAL; 6700 6701 wma_debug("WMA <-- WMI_SERVICE_READY_EVENTID"); 6702 6703 if (ev->num_dbs_hw_modes > param_buf->num_wlan_dbs_hw_mode_list) { 6704 wma_err("FW dbs_hw_mode entry %d more than value %d in TLV hdr", 6705 ev->num_dbs_hw_modes, 6706 param_buf->num_wlan_dbs_hw_mode_list); 6707 return -EINVAL; 6708 } 6709 6710 ev_wlan_dbs_hw_mode_list = param_buf->wlan_dbs_hw_mode_list; 6711 6712 /* Continuing with the rest of the processing, 6713 * even if memory allocation fails 6714 */ 6715 policy_mgr_init_dbs_hw_mode(wma_handle->psoc, ev->num_dbs_hw_modes, 6716 ev_wlan_dbs_hw_mode_list); 6717 6718 /* Initializes the fw_mode and scan_config to zero. 6719 * If ext service ready event is present it will set 6720 * the actual values of these two params. 6721 * This is to ensure that no garbage values would be 6722 * present in the absence of ext service ready event. 6723 */ 6724 wma_init_scan_fw_mode_config(wma_handle->psoc, 0, 0); 6725 6726 qdf_mem_copy(&wma_handle->reg_cap, param_buf->hal_reg_capabilities, 6727 sizeof(HAL_REG_CAPABILITIES)); 6728 6729 wma_handle->vht_supp_mcs = ev->vht_supp_mcs; 6730 6731 wma_handle->new_hw_mode_index = tgt_cap_info->default_dbs_hw_mode_index; 6732 policy_mgr_update_new_hw_mode_index(wma_handle->psoc, 6733 tgt_cap_info->default_dbs_hw_mode_index); 6734 6735 wma_debug("Firmware default hw mode index : %d", 6736 tgt_cap_info->default_dbs_hw_mode_index); 6737 wma_info("Firmware build version : %08x", 6738 ev->fw_build_vers); 6739 wma_debug("FW fine time meas cap: 0x%x", 6740 tgt_cap_info->wmi_fw_sub_feat_caps); 6741 6742 wma_handle->hw_bd_id = ev->hw_bd_id; 6743 6744 wma_handle->hw_bd_info[BDF_VERSION] = 6745 WMI_GET_BDF_VERSION(ev->hw_bd_info); 6746 wma_handle->hw_bd_info[REF_DESIGN_ID] = 6747 WMI_GET_REF_DESIGN(ev->hw_bd_info); 6748 wma_handle->hw_bd_info[CUSTOMER_ID] = 6749 WMI_GET_CUSTOMER_ID(ev->hw_bd_info); 6750 wma_handle->hw_bd_info[PROJECT_ID] = 6751 WMI_GET_PROJECT_ID(ev->hw_bd_info); 6752 wma_handle->hw_bd_info[BOARD_DATA_REV] = 6753 WMI_GET_BOARD_DATA_REV(ev->hw_bd_info); 6754 6755 wma_info("Board id: %x, Board version: %x %x %x %x %x", 6756 wma_handle->hw_bd_id, 6757 wma_handle->hw_bd_info[BDF_VERSION], 6758 wma_handle->hw_bd_info[REF_DESIGN_ID], 6759 wma_handle->hw_bd_info[CUSTOMER_ID], 6760 wma_handle->hw_bd_info[PROJECT_ID], 6761 wma_handle->hw_bd_info[BOARD_DATA_REV]); 6762 6763 /* wmi service is ready */ 6764 qdf_mem_copy(wma_handle->wmi_service_bitmap, 6765 service_bitmap, 6766 sizeof(wma_handle->wmi_service_bitmap)); 6767 6768 cdp_cfg_tx_set_is_mgmt_over_wmi_enabled(soc, 6769 wmi_service_enabled(wmi_handle, wmi_service_mgmt_tx_wmi)); 6770 cdp_set_desc_global_pool_size(soc, ev->num_msdu_desc); 6771 /* SWBA event handler for beacon transmission */ 6772 status = wma_register_swba_events(wma_handle->wmi_handle); 6773 6774 if (QDF_IS_STATUS_ERROR(status)) { 6775 wma_err("Failed to register swba beacon event cb"); 6776 goto failure; 6777 } 6778 #ifdef WLAN_FEATURE_LPSS 6779 wma_handle->lpss_support = 6780 wmi_service_enabled(wmi_handle, wmi_service_lpass); 6781 #endif /* WLAN_FEATURE_LPSS */ 6782 6783 if (wmi_service_enabled(wmi_handle, wmi_service_fse_cmem_alloc_support)) 6784 wlan_dp_set_fst_in_cmem(true); 6785 6786 if (wmi_service_enabled(wmi_handle, 6787 wmi_service_fisa_dynamic_msdu_aggr_size_support)) 6788 wlan_dp_set_fisa_dynamic_aggr_size_support(true); 6789 /* 6790 * This Service bit is added to check for ARP/NS Offload 6791 * support for LL/HL targets 6792 */ 6793 wma_handle->ap_arpns_support = 6794 wmi_service_enabled(wmi_handle, wmi_service_ap_arpns_offload); 6795 6796 if (wmi_service_enabled(wmi_handle, wmi_service_csa_offload)) { 6797 wma_debug("FW support CSA offload capability"); 6798 status = wmi_unified_register_event_handler( 6799 wmi_handle, 6800 wmi_csa_handling_event_id, 6801 wma_csa_offload_handler, 6802 WMA_RX_SERIALIZER_CTX); 6803 if (QDF_IS_STATUS_ERROR(status)) { 6804 wma_err("Failed to register CSA offload event cb"); 6805 goto failure; 6806 } 6807 } 6808 6809 if (wmi_service_enabled(wmi_handle, wmi_service_mgmt_tx_wmi)) { 6810 wma_debug("Firmware supports management TX over WMI,use WMI interface instead of HTT for management Tx"); 6811 /* 6812 * Register Tx completion event handler for MGMT Tx over WMI 6813 * case 6814 */ 6815 status = wmi_unified_register_event_handler( 6816 wmi_handle, 6817 wmi_mgmt_tx_completion_event_id, 6818 wma_mgmt_tx_completion_handler, 6819 WMA_RX_SERIALIZER_CTX); 6820 if (QDF_IS_STATUS_ERROR(status)) { 6821 wma_err("Failed to register MGMT over WMI completion handler"); 6822 goto failure; 6823 } 6824 6825 status = wmi_unified_register_event_handler( 6826 wmi_handle, 6827 wmi_mgmt_tx_bundle_completion_event_id, 6828 wma_mgmt_tx_bundle_completion_handler, 6829 WMA_RX_SERIALIZER_CTX); 6830 if (QDF_IS_STATUS_ERROR(status)) { 6831 wma_err("Failed to register MGMT over WMI completion handler"); 6832 goto failure; 6833 } 6834 6835 } else { 6836 wma_err("FW does not support WMI_SERVICE_MGMT_TX_WMI, Use HTT interface for Management Tx"); 6837 } 6838 6839 status = wma_register_gtk_offload_event(wma_handle); 6840 if (QDF_IS_STATUS_ERROR(status)) { 6841 wma_err("Failed to register GTK offload event cb"); 6842 goto failure; 6843 } 6844 6845 status = wmi_unified_register_event_handler(wmi_handle, 6846 wmi_tbttoffset_update_event_id, 6847 wma_tbttoffset_update_event_handler, 6848 WMA_RX_SERIALIZER_CTX); 6849 if (QDF_IS_STATUS_ERROR(status)) { 6850 wma_err("Failed to register WMI_TBTTOFFSET_UPDATE_EVENTID callback"); 6851 goto failure; 6852 } 6853 6854 if (wmi_service_enabled(wma_handle->wmi_handle, 6855 wmi_service_rcpi_support)) { 6856 /* register for rcpi response event */ 6857 status = wmi_unified_register_event_handler( 6858 wmi_handle, 6859 wmi_update_rcpi_event_id, 6860 wma_rcpi_event_handler, 6861 WMA_RX_SERIALIZER_CTX); 6862 if (QDF_IS_STATUS_ERROR(status)) { 6863 wma_err("Failed to register RCPI event handler"); 6864 goto failure; 6865 } 6866 wma_handle->rcpi_enabled = true; 6867 } 6868 6869 /* mac_id is replaced with pdev_id in converged firmware to have 6870 * multi-radio support. In order to maintain backward compatibility 6871 * with old fw, host needs to check WMI_SERVICE_DEPRECATED_REPLACE 6872 * in service bitmap from FW and host needs to set use_pdev_id in 6873 * wmi_resource_config to true. If WMI_SERVICE_DEPRECATED_REPLACE 6874 * service is not set, then host shall not expect MAC ID from FW in 6875 * VDEV START RESPONSE event and host shall use PDEV ID. 6876 */ 6877 if (wmi_service_enabled(wmi_handle, wmi_service_deprecated_replace)) 6878 wlan_res_cfg->use_pdev_id = true; 6879 else 6880 wlan_res_cfg->use_pdev_id = false; 6881 6882 wlan_res_cfg->max_num_dbs_scan_duty_cycle = CDS_DBS_SCAN_CLIENTS_MAX; 6883 6884 /* Initialize the log supported event handler */ 6885 status = wmi_unified_register_event_handler(wmi_handle, 6886 wmi_diag_event_id_log_supported_event_id, 6887 wma_log_supported_evt_handler, 6888 WMA_RX_SERIALIZER_CTX); 6889 if (QDF_IS_STATUS_ERROR(status)) { 6890 wma_err("Failed to register log supported event cb"); 6891 goto failure; 6892 } 6893 6894 cdp_mark_first_wakeup_packet( 6895 soc, OL_TXRX_PDEV_ID, 6896 wmi_service_enabled(wmi_handle, 6897 wmi_service_mark_first_wakeup_packet)); 6898 wma_handle->is_dfs_offloaded = 6899 wmi_service_enabled(wmi_handle, 6900 wmi_service_dfs_phyerr_offload); 6901 6902 wma_handle->nan_datapath_enabled = 6903 wmi_service_enabled(wma_handle->wmi_handle, 6904 wmi_service_nan_data); 6905 6906 wma_handle->fw_therm_throt_support = 6907 wmi_service_enabled(wma_handle->wmi_handle, 6908 wmi_service_tt); 6909 6910 wma_set_component_caps(wma_handle->psoc); 6911 6912 wma_update_fw_config(wma_handle->psoc, tgt_hdl); 6913 6914 status = wmi_unified_save_fw_version_cmd(wmi_handle, param_buf); 6915 if (QDF_IS_STATUS_ERROR(status)) { 6916 wma_err("Failed to send WMI_INIT_CMDID command"); 6917 goto failure; 6918 } 6919 6920 if (wmi_service_enabled(wmi_handle, wmi_service_ext_msg)) { 6921 status = qdf_mc_timer_start( 6922 &wma_handle->service_ready_ext_timer, 6923 WMA_SERVICE_READY_EXT_TIMEOUT); 6924 if (QDF_IS_STATUS_ERROR(status)) 6925 wma_err("Failed to start the service ready ext timer"); 6926 } 6927 wma_handle->tx_bfee_8ss_enabled = 6928 wmi_service_enabled(wmi_handle, wmi_service_8ss_tx_bfee); 6929 6930 wma_handle->dynamic_nss_chains_support = wmi_service_enabled(wmi_handle, 6931 wmi_service_per_vdev_chain_support); 6932 target_psoc_set_num_radios(tgt_hdl, 1); 6933 6934 return 0; 6935 6936 failure: 6937 return -EINVAL; 6938 6939 } 6940 6941 /** 6942 * wma_get_caps_for_phyidx_hwmode() - to fetch caps for given hw mode and band 6943 * @caps_per_phy: Pointer to capabilities structure which needs to be filled 6944 * @hw_mode: Provided hardware mode 6945 * @band: Provide band i.e. 2G or 5G 6946 * 6947 * This API finds cap which suitable for provided hw mode and band. If user 6948 * is provides some invalid hw mode then it will automatically falls back to 6949 * default hw mode 6950 * 6951 * Return: QDF_STATUS 6952 */ 6953 QDF_STATUS wma_get_caps_for_phyidx_hwmode(struct wma_caps_per_phy *caps_per_phy, 6954 enum hw_mode_dbs_capab hw_mode, enum cds_band_type band) 6955 { 6956 t_wma_handle *wma_handle = cds_get_context(QDF_MODULE_ID_WMA); 6957 struct target_psoc_info *tgt_hdl; 6958 int ht_cap_info, vht_cap_info; 6959 uint8_t our_hw_mode = hw_mode, num_hw_modes, hw_mode_config_type; 6960 struct wlan_psoc_host_mac_phy_caps *mac_phy_cap; 6961 struct wlan_psoc_target_capability_info *tgt_cap_info; 6962 uint8_t total_mac_phy_cnt, i; 6963 6964 if (!wma_handle) 6965 return QDF_STATUS_E_FAILURE; 6966 6967 tgt_hdl = wlan_psoc_get_tgt_if_handle(wma_handle->psoc); 6968 if (!tgt_hdl) { 6969 wma_err("target psoc info is NULL"); 6970 return -EINVAL; 6971 } 6972 if (!caps_per_phy) { 6973 wma_err("Invalid caps pointer"); 6974 return QDF_STATUS_E_FAILURE; 6975 } 6976 6977 ht_cap_info = target_if_get_ht_cap_info(tgt_hdl); 6978 vht_cap_info = target_if_get_vht_cap_info(tgt_hdl); 6979 num_hw_modes = target_psoc_get_num_hw_modes(tgt_hdl); 6980 mac_phy_cap = target_psoc_get_mac_phy_cap(tgt_hdl); 6981 tgt_cap_info = target_psoc_get_target_caps(tgt_hdl); 6982 6983 if (!mac_phy_cap) { 6984 wma_err("Invalid MAC PHY capabilities handle"); 6985 return QDF_STATUS_E_FAILURE; 6986 } 6987 6988 if (!tgt_cap_info) { 6989 wma_err("Invalid target capabilities handle"); 6990 return QDF_STATUS_E_FAILURE; 6991 } 6992 6993 if (!num_hw_modes) { 6994 wma_debug("Invalid number of hw modes, use legacy HT/VHT caps"); 6995 caps_per_phy->ht_2g = ht_cap_info; 6996 caps_per_phy->ht_5g = ht_cap_info; 6997 caps_per_phy->vht_2g = vht_cap_info; 6998 caps_per_phy->vht_5g = vht_cap_info; 6999 /* legacy platform doesn't support HE IE */ 7000 caps_per_phy->he_2g[0] = 0; 7001 caps_per_phy->he_2g[1] = 0; 7002 caps_per_phy->he_5g[0] = 0; 7003 caps_per_phy->he_5g[1] = 0; 7004 caps_per_phy->tx_chain_mask_2G = 7005 EXTRACT_TX_CHAIN_MASK_2G(tgt_cap_info->txrx_chainmask); 7006 caps_per_phy->rx_chain_mask_2G = 7007 EXTRACT_RX_CHAIN_MASK_2G(tgt_cap_info->txrx_chainmask); 7008 caps_per_phy->tx_chain_mask_5G = 7009 EXTRACT_TX_CHAIN_MASK_5G(tgt_cap_info->txrx_chainmask); 7010 caps_per_phy->rx_chain_mask_5G = 7011 EXTRACT_RX_CHAIN_MASK_5G(tgt_cap_info->txrx_chainmask); 7012 7013 return QDF_STATUS_SUCCESS; 7014 } 7015 7016 if (!policy_mgr_is_dbs_enable(wma_handle->psoc)) 7017 our_hw_mode = HW_MODE_DBS_NONE; 7018 7019 total_mac_phy_cnt = target_psoc_get_total_mac_phy_cnt(tgt_hdl); 7020 for (i = 0; i < total_mac_phy_cnt; i++) { 7021 hw_mode_config_type = mac_phy_cap[i].hw_mode_config_type; 7022 if (our_hw_mode == HW_MODE_DBS && 7023 !(hw_mode_config_type == WMI_HW_MODE_DBS || 7024 hw_mode_config_type == WMI_HW_MODE_DBS_OR_SBS)) 7025 continue; 7026 7027 if ((band == CDS_BAND_2GHZ || band == CDS_BAND_ALL) && 7028 (WLAN_2G_CAPABILITY & mac_phy_cap[i].supported_bands) && 7029 !caps_per_phy->tx_chain_mask_2G) { 7030 caps_per_phy->ht_2g = mac_phy_cap[i].ht_cap_info_2G; 7031 caps_per_phy->vht_2g = mac_phy_cap[i].vht_cap_info_2G; 7032 qdf_mem_copy(caps_per_phy->he_2g, 7033 mac_phy_cap[i].he_cap_info_2G, 7034 sizeof(caps_per_phy->he_2g)); 7035 7036 caps_per_phy->tx_chain_mask_2G = 7037 mac_phy_cap[i].tx_chain_mask_2G; 7038 caps_per_phy->rx_chain_mask_2G = 7039 mac_phy_cap[i].rx_chain_mask_2G; 7040 7041 wma_debug("Select 2G capable phyid[%d] chain %d %d ht 0x%x vht 0x%x", 7042 i, 7043 caps_per_phy->tx_chain_mask_2G, 7044 caps_per_phy->rx_chain_mask_2G, 7045 caps_per_phy->ht_2g, 7046 caps_per_phy->vht_2g); 7047 } 7048 if ((band == CDS_BAND_5GHZ || band == CDS_BAND_ALL) && 7049 (WLAN_5G_CAPABILITY & mac_phy_cap[i].supported_bands) && 7050 !caps_per_phy->tx_chain_mask_5G) { 7051 caps_per_phy->ht_5g = mac_phy_cap[i].ht_cap_info_5G; 7052 caps_per_phy->vht_5g = mac_phy_cap[i].vht_cap_info_5G; 7053 qdf_mem_copy(caps_per_phy->he_5g, 7054 mac_phy_cap[i].he_cap_info_5G, 7055 sizeof(caps_per_phy->he_5g)); 7056 7057 caps_per_phy->tx_chain_mask_5G = 7058 mac_phy_cap[i].tx_chain_mask_5G; 7059 caps_per_phy->rx_chain_mask_5G = 7060 mac_phy_cap[i].rx_chain_mask_5G; 7061 7062 wma_debug("Select 5G capable phyid[%d] chain %d %d ht 0x%x vht 0x%x", 7063 i, 7064 caps_per_phy->tx_chain_mask_5G, 7065 caps_per_phy->rx_chain_mask_5G, 7066 caps_per_phy->ht_5g, 7067 caps_per_phy->vht_5g); 7068 } 7069 } 7070 7071 return QDF_STATUS_SUCCESS; 7072 } 7073 7074 /** 7075 * wma_is_rx_ldpc_supported_for_channel() - to find out if ldpc is supported 7076 * 7077 * @ch_freq: Channel freq for which it needs to check if rx ldpc is enabled 7078 * 7079 * This API takes channel number as argument and takes default hw mode as DBS 7080 * to check if rx LDPC support is enabled for that channel or no 7081 */ 7082 bool wma_is_rx_ldpc_supported_for_channel(uint32_t ch_freq) 7083 { 7084 t_wma_handle *wma_handle = cds_get_context(QDF_MODULE_ID_WMA); 7085 struct target_psoc_info *tgt_hdl; 7086 struct wma_caps_per_phy caps_per_phy = {0}; 7087 enum cds_band_type band; 7088 bool status; 7089 uint8_t num_hw_modes; 7090 7091 if (!wma_handle) 7092 return false; 7093 7094 tgt_hdl = wlan_psoc_get_tgt_if_handle(wma_handle->psoc); 7095 if (!tgt_hdl) { 7096 wma_err("Target handle is NULL"); 7097 return QDF_STATUS_E_FAILURE; 7098 } 7099 7100 num_hw_modes = target_psoc_get_num_hw_modes(tgt_hdl); 7101 7102 if (!WLAN_REG_IS_24GHZ_CH_FREQ(ch_freq)) 7103 band = CDS_BAND_5GHZ; 7104 else 7105 band = CDS_BAND_2GHZ; 7106 7107 if (QDF_STATUS_SUCCESS != wma_get_caps_for_phyidx_hwmode( 7108 &caps_per_phy, 7109 HW_MODE_DBS, band)) { 7110 return false; 7111 } 7112 7113 /* 7114 * Legacy platforms like Rome set WMI_HT_CAP_LDPC to specify RX LDPC 7115 * capability. But new platforms like Helium set WMI_HT_CAP_RX_LDPC 7116 * instead. 7117 */ 7118 if (0 == num_hw_modes) { 7119 status = (!!(caps_per_phy.ht_2g & WMI_HT_CAP_LDPC)); 7120 } else { 7121 if (WLAN_REG_IS_24GHZ_CH_FREQ(ch_freq)) 7122 status = (!!(caps_per_phy.ht_2g & WMI_HT_CAP_RX_LDPC)); 7123 else 7124 status = (!!(caps_per_phy.ht_5g & WMI_HT_CAP_RX_LDPC)); 7125 } 7126 7127 return status; 7128 } 7129 7130 /** 7131 * wma_print_mac_phy_capabilities() - Prints MAC PHY capabilities 7132 * @cap: pointer to WMI_MAC_PHY_CAPABILITIES 7133 * @index: MAC_PHY index 7134 * 7135 * Return: none 7136 */ 7137 static void wma_print_mac_phy_capabilities(struct wlan_psoc_host_mac_phy_caps 7138 *cap, int index) 7139 { 7140 uint32_t mac_2G[PSOC_HOST_MAX_MAC_SIZE]; 7141 uint32_t mac_5G[PSOC_HOST_MAX_MAC_SIZE]; 7142 uint32_t phy_2G[WMI_MAX_HECAP_PHY_SIZE]; 7143 uint32_t phy_5G[WMI_MAX_HECAP_PHY_SIZE]; 7144 struct wlan_psoc_host_ppe_threshold ppet_2G, ppet_5G; 7145 7146 wma_nofl_debug("\t: index [%d]", index); 7147 wma_nofl_debug("\t: cap for hw_mode_id[%d]", cap->hw_mode_id); 7148 wma_nofl_debug("\t: pdev_id[%d]", cap->pdev_id); 7149 wma_nofl_debug("\t: phy_id[%d]", cap->phy_id); 7150 wma_nofl_debug("\t: hw_mode_config_type[%d]", cap->hw_mode_config_type); 7151 wma_nofl_debug("\t: supports_11b[%d]", cap->supports_11b); 7152 wma_nofl_debug("\t: supports_11g[%d]", cap->supports_11g); 7153 wma_nofl_debug("\t: supports_11a[%d]", cap->supports_11a); 7154 wma_nofl_debug("\t: supports_11n[%d]", cap->supports_11n); 7155 wma_nofl_debug("\t: supports_11ac[%d]", cap->supports_11ac); 7156 wma_nofl_debug("\t: supports_11ax[%d]", cap->supports_11ax); 7157 wma_nofl_debug("\t: supported_bands[%d]", cap->supported_bands); 7158 wma_nofl_debug("\t: ampdu_density[%d]", cap->ampdu_density); 7159 wma_nofl_debug("\t: max_bw_supported_2G[%d]", cap->max_bw_supported_2G); 7160 wma_nofl_debug("\t: ht_cap_info_2G[%d]", cap->ht_cap_info_2G); 7161 wma_nofl_debug("\t: vht_cap_info_2G[0x%0X]", cap->vht_cap_info_2G); 7162 wma_nofl_debug("\t: vht_supp_mcs_2G[0x%0X]", cap->vht_supp_mcs_2G); 7163 wma_nofl_debug("\t: tx_chain_mask_2G[%d]", cap->tx_chain_mask_2G); 7164 wma_nofl_debug("\t: rx_chain_mask_2G[%d]", cap->rx_chain_mask_2G); 7165 wma_nofl_debug("\t: max_bw_supported_5G[%d]", cap->max_bw_supported_5G); 7166 wma_nofl_debug("\t: ht_cap_info_5G[%d]", cap->ht_cap_info_5G); 7167 wma_nofl_debug("\t: vht_cap_info_5G[0x%0X]", cap->vht_cap_info_5G); 7168 wma_nofl_debug("\t: vht_supp_mcs_5G[0x%0X]", cap->vht_supp_mcs_5G); 7169 wma_nofl_debug("\t: tx_chain_mask_5G[%d]", cap->tx_chain_mask_5G); 7170 wma_nofl_debug("\t: rx_chain_mask_5G[%d]", cap->rx_chain_mask_5G); 7171 wma_nofl_debug("\t: he_cap_info_2G[0][%08x]", cap->he_cap_info_2G[0]); 7172 wma_nofl_debug("\t: he_cap_info_2G[1][%08x]", cap->he_cap_info_2G[1]); 7173 wma_nofl_debug("\t: he_supp_mcs_2G[%08x]", cap->he_supp_mcs_2G); 7174 wma_nofl_debug("\t: he_cap_info_5G[0][%08x]", cap->he_cap_info_5G[0]); 7175 wma_nofl_debug("\t: he_cap_info_5G[1][%08x]", cap->he_cap_info_5G[1]); 7176 wma_nofl_debug("\t: he_supp_mcs_5G[%08x]", cap->he_supp_mcs_5G); 7177 qdf_mem_copy(mac_2G, cap->he_cap_info_2G, sizeof(mac_2G)); 7178 qdf_mem_copy(mac_5G, cap->he_cap_info_5G, sizeof(mac_5G)); 7179 qdf_mem_copy(phy_2G, cap->he_cap_phy_info_2G, 7180 WMI_MAX_HECAP_PHY_SIZE * 4); 7181 qdf_mem_copy(phy_5G, cap->he_cap_phy_info_5G, 7182 WMI_MAX_HECAP_PHY_SIZE * 4); 7183 ppet_2G = cap->he_ppet2G; 7184 ppet_5G = cap->he_ppet5G; 7185 7186 wma_print_he_mac_cap_w1(mac_2G[0]); 7187 wma_print_he_mac_cap_w2(mac_2G[1]); 7188 wma_print_he_phy_cap(phy_2G); 7189 wma_print_he_ppet(&ppet_2G); 7190 wma_print_he_mac_cap_w1(mac_5G[0]); 7191 wma_print_he_mac_cap_w1(mac_5G[1]); 7192 wma_print_he_phy_cap(phy_5G); 7193 wma_print_he_ppet(&ppet_5G); 7194 } 7195 7196 /** 7197 * wma_print_populate_soc_caps() - Prints all the caps populated per hw mode 7198 * @tgt_info: target related info 7199 * 7200 * This function prints all the caps populater per hw mode and per PHY 7201 * 7202 * Return: none 7203 */ 7204 static void wma_print_populate_soc_caps(struct target_psoc_info *tgt_hdl) 7205 { 7206 int i, num_hw_modes, total_mac_phy_cnt; 7207 struct wlan_psoc_host_mac_phy_caps *mac_phy_cap, *tmp; 7208 7209 num_hw_modes = target_psoc_get_num_hw_modes(tgt_hdl); 7210 total_mac_phy_cnt = target_psoc_get_total_mac_phy_cnt(tgt_hdl); 7211 7212 /* print number of hw modes */ 7213 wma_debug("num of hw modes [%d]", num_hw_modes); 7214 wma_debug("num mac_phy_cnt [%d]", total_mac_phy_cnt); 7215 7216 mac_phy_cap = target_psoc_get_mac_phy_cap(tgt_hdl); 7217 if (!mac_phy_cap) { 7218 wma_err("Invalid MAC PHY capabilities handle"); 7219 return; 7220 } 7221 7222 wma_debug("<====== HW mode cap printing starts ======>"); 7223 /* print cap of each hw mode */ 7224 for (i = 0; i < total_mac_phy_cnt; i++) { 7225 if (&mac_phy_cap[i]) { 7226 wma_nofl_debug("====>: hw mode id[%d], phy id[%d]", 7227 mac_phy_cap[i].hw_mode_id, 7228 mac_phy_cap[i].phy_id); 7229 tmp = &mac_phy_cap[i]; 7230 wma_print_mac_phy_capabilities(tmp, i); 7231 } 7232 } 7233 wma_debug("<====== HW mode cap printing ends ======>\n"); 7234 } 7235 7236 /** 7237 * wma_update_hw_mode_list() - updates hw_mode_list 7238 * @wma_handle: pointer to wma global structure 7239 * @tgt_hdl - target psoc information 7240 * 7241 * This function updates hw_mode_list with tx_streams, rx_streams, 7242 * bandwidth, dbs and agile dfs for each hw_mode. 7243 * 7244 * Returns: 0 for success else failure. 7245 */ 7246 static QDF_STATUS wma_update_hw_mode_list(t_wma_handle *wma_handle, 7247 struct target_psoc_info *tgt_hdl) 7248 { 7249 struct wlan_psoc_host_mac_phy_caps *tmp, *mac_phy_cap; 7250 uint32_t i, hw_config_type, j = 0; 7251 WMI_PHY_CAPABILITY new_supported_band = 0; 7252 bool supported_band_update_failure = false; 7253 struct wlan_psoc_target_capability_info *tgt_cap_info; 7254 int num_hw_modes; 7255 7256 if (wma_validate_handle(wma_handle)) 7257 return QDF_STATUS_E_FAILURE; 7258 7259 num_hw_modes = target_psoc_get_num_hw_modes(tgt_hdl); 7260 mac_phy_cap = target_psoc_get_mac_phy_cap(tgt_hdl); 7261 tgt_cap_info = target_psoc_get_target_caps(tgt_hdl); 7262 7263 if (!mac_phy_cap) { 7264 wma_err("mac_phy_cap Null"); 7265 return QDF_STATUS_E_FAILURE; 7266 } 7267 7268 wma_debug("Num modes:%d", num_hw_modes); 7269 for (i = 0; i < num_hw_modes; i++) { 7270 /* Update for MAC0 */ 7271 tmp = &mac_phy_cap[j++]; 7272 hw_config_type = tmp->hw_mode_config_type; 7273 if (wma_update_supported_bands(tmp->supported_bands, 7274 &new_supported_band) 7275 != QDF_STATUS_SUCCESS) 7276 supported_band_update_failure = true; 7277 7278 /* SBS and DBS have dual MAC. Upto 2 MACs are considered. */ 7279 if ((hw_config_type == WMI_HW_MODE_DBS) || 7280 (hw_config_type == WMI_HW_MODE_SBS_PASSIVE) || 7281 (hw_config_type == WMI_HW_MODE_SBS) || 7282 (hw_config_type == WMI_HW_MODE_DBS_OR_SBS)) { 7283 /* Update for MAC1 */ 7284 tmp = &mac_phy_cap[j++]; 7285 if (QDF_STATUS_SUCCESS != 7286 wma_update_supported_bands(tmp->supported_bands, 7287 &new_supported_band)) 7288 supported_band_update_failure = true; 7289 } 7290 } 7291 7292 /* overwrite phy_capability which we got from service ready event */ 7293 if (!supported_band_update_failure) { 7294 wma_debug("updating supported band from old[%d] to new[%d]", 7295 target_if_get_phy_capability(tgt_hdl), 7296 new_supported_band); 7297 target_if_set_phy_capability(tgt_hdl, new_supported_band); 7298 } 7299 7300 if (QDF_STATUS_SUCCESS != 7301 policy_mgr_update_hw_mode_list(wma_handle->psoc, 7302 tgt_hdl)) 7303 wma_err("failed to update policy manager"); 7304 7305 return QDF_STATUS_SUCCESS; 7306 } 7307 7308 static void wma_init_wifi_pos_dma_rings(t_wma_handle *wma_handle, 7309 uint8_t num_mac, void *buf) 7310 { 7311 struct hif_opaque_softc *hif_ctx = cds_get_context(QDF_MODULE_ID_HIF); 7312 void *hal_soc; 7313 7314 if (!hif_ctx) { 7315 wma_err("invalid hif context"); 7316 return; 7317 } 7318 7319 hal_soc = hif_get_hal_handle(hif_ctx); 7320 7321 wifi_pos_init_cir_cfr_rings(wma_handle->psoc, hal_soc, num_mac, buf); 7322 } 7323 7324 /** 7325 * wma_populate_soc_caps() - populate entire SOC's capabilities 7326 * @wma_handle: pointer to wma global structure 7327 * @tgt_hdl: target psoc information 7328 * @param_buf: pointer to param of service ready extension event from fw 7329 * 7330 * This API populates all capabilities of entire SOC. For example, 7331 * how many number of hw modes are supported by this SOC, what are the 7332 * capabilities of each phy per hw mode, what are HAL reg capabilities per 7333 * phy. 7334 * 7335 * Return: none 7336 */ 7337 static void wma_populate_soc_caps(t_wma_handle *wma_handle, 7338 struct target_psoc_info *tgt_hdl, 7339 WMI_SERVICE_READY_EXT_EVENTID_param_tlvs *param_buf) 7340 { 7341 7342 wma_debug("Enter"); 7343 7344 wma_init_wifi_pos_dma_rings(wma_handle, 7345 param_buf->num_oem_dma_ring_caps, 7346 param_buf->oem_dma_ring_caps); 7347 7348 wma_print_populate_soc_caps(tgt_hdl); 7349 wma_debug("Exit"); 7350 } 7351 7352 /** 7353 * wma_init_dbr_params() - init dbr params 7354 * @wma_handle: pointer to wma global structure 7355 * 7356 * This API initializes params of direct buffer rx component. 7357 * 7358 * Return: none 7359 */ 7360 #ifdef DIRECT_BUF_RX_ENABLE 7361 static void wma_init_dbr_params(t_wma_handle *wma_handle) 7362 { 7363 struct hif_opaque_softc *hif_ctx = cds_get_context(QDF_MODULE_ID_HIF); 7364 void *hal_soc; 7365 7366 if (!hif_ctx) { 7367 wma_err("invalid hif context"); 7368 return; 7369 } 7370 7371 hal_soc = hif_get_hal_handle(hif_ctx); 7372 direct_buf_rx_target_attach(wma_handle->psoc, hal_soc, 7373 wma_handle->qdf_dev); 7374 } 7375 #else 7376 static inline void wma_init_dbr_params(t_wma_handle *wma_handle) 7377 { 7378 } 7379 #endif 7380 7381 /** 7382 * wma_set_coex_res_cfg() - Set target COEX resource configuration. 7383 * @wma_handle: pointer to wma global structure 7384 * @wlan_res_cfg: Pointer to target resource configuration 7385 * 7386 * Return: none 7387 */ 7388 #ifdef FEATURE_COEX_CONFIG 7389 static void wma_set_coex_res_cfg(t_wma_handle *wma_handle, 7390 struct wmi_unified *wmi_handle, 7391 target_resource_config *wlan_res_cfg) 7392 { 7393 if (cfg_get(wma_handle->psoc, CFG_THREE_WAY_COEX_CONFIG_LEGACY) && 7394 wmi_service_enabled(wmi_handle, 7395 wmi_service_three_way_coex_config_legacy)) { 7396 wlan_res_cfg->three_way_coex_config_legacy_en = true; 7397 } else { 7398 wlan_res_cfg->three_way_coex_config_legacy_en = false; 7399 } 7400 } 7401 #else 7402 static void wma_set_coex_res_cfg(t_wma_handle *wma_handle, 7403 struct wmi_unified *wmi_handle, 7404 target_resource_config *wlan_res_cfg) 7405 { 7406 } 7407 #endif 7408 7409 static void wma_update_hw_mode_config(tp_wma_handle wma_handle, 7410 struct target_psoc_info *tgt_hdl) 7411 { 7412 uint32_t conc_scan_config_bits, fw_config_bits; 7413 7414 fw_config_bits = target_if_get_fw_config_bits(tgt_hdl); 7415 conc_scan_config_bits = target_if_get_conc_scan_config_bits(tgt_hdl); 7416 7417 wma_debug("Defaults: scan config:%x FW mode config:%x", 7418 conc_scan_config_bits, fw_config_bits); 7419 7420 if (wma_is_dbs_mandatory(wma_handle->psoc, tgt_hdl) && 7421 (policy_mgr_is_dual_mac_disabled_in_ini(wma_handle->psoc))) { 7422 policy_mgr_set_dual_mac_feature(wma_handle->psoc, 7423 ENABLE_DBS_CXN_AND_DISABLE_SIMULTANEOUS_SCAN); 7424 policy_mgr_set_ch_select_plcy(wma_handle->psoc, 7425 POLICY_MGR_CH_SELECT_POLICY_DEF); 7426 } 7427 wma_init_scan_fw_mode_config(wma_handle->psoc, conc_scan_config_bits, 7428 fw_config_bits); 7429 } 7430 7431 #define MAX_GRP_KEY 16 7432 7433 int wma_rx_service_ready_ext2_event(void *handle, uint8_t *ev, uint32_t len) 7434 { 7435 tp_wma_handle wma_handle = (tp_wma_handle)handle; 7436 struct target_psoc_info *tgt_hdl; 7437 target_resource_config *wlan_res_cfg; 7438 QDF_STATUS status; 7439 7440 wma_debug("Enter"); 7441 7442 if (wma_validate_handle(wma_handle)) 7443 return -EINVAL; 7444 7445 tgt_hdl = wlan_psoc_get_tgt_if_handle(wma_handle->psoc); 7446 if (!tgt_hdl) { 7447 wma_err("target psoc info is NULL"); 7448 return -EINVAL; 7449 } 7450 7451 wlan_res_cfg = target_psoc_get_wlan_res_cfg(tgt_hdl); 7452 7453 if (wlan_mlme_is_multipass_sap(wma_handle->psoc)) 7454 wlan_res_cfg->max_num_group_keys = MAX_GRP_KEY; 7455 7456 status = policy_mgr_update_sbs_freq(wma_handle->psoc, tgt_hdl); 7457 if (QDF_IS_STATUS_ERROR(status)) 7458 return -EINVAL; 7459 7460 wma_update_hw_mode_config(wma_handle, tgt_hdl); 7461 7462 return 0; 7463 } 7464 7465 /** 7466 * wma_rx_service_ready_ext_event() - evt handler for service ready ext event. 7467 * @handle: wma handle 7468 * @event: params of the service ready extended event 7469 * @length: param length 7470 * 7471 * Return: none 7472 */ 7473 int wma_rx_service_ready_ext_event(void *handle, uint8_t *event, 7474 uint32_t length) 7475 { 7476 tp_wma_handle wma_handle = (tp_wma_handle) handle; 7477 WMI_SERVICE_READY_EXT_EVENTID_param_tlvs *param_buf; 7478 wmi_service_ready_ext_event_fixed_param *ev; 7479 QDF_STATUS ret; 7480 struct target_psoc_info *tgt_hdl; 7481 struct wlan_psoc_target_capability_info *tgt_cap_info; 7482 struct wmi_unified *wmi_handle; 7483 void *soc = cds_get_context(QDF_MODULE_ID_SOC); 7484 target_resource_config *wlan_res_cfg; 7485 7486 wma_debug("Enter"); 7487 7488 if (wma_validate_handle(wma_handle)) 7489 return -EINVAL; 7490 7491 wmi_handle = get_wmi_unified_hdl_from_psoc(wma_handle->psoc); 7492 if (wmi_validate_handle(wmi_handle)) 7493 return -EINVAL; 7494 7495 tgt_hdl = wlan_psoc_get_tgt_if_handle(wma_handle->psoc); 7496 if (!tgt_hdl) { 7497 wma_err("target psoc info is NULL"); 7498 return -EINVAL; 7499 } 7500 7501 wlan_res_cfg = target_psoc_get_wlan_res_cfg(tgt_hdl); 7502 param_buf = (WMI_SERVICE_READY_EXT_EVENTID_param_tlvs *) event; 7503 if (!param_buf) { 7504 wma_err("Invalid event"); 7505 return -EINVAL; 7506 } 7507 7508 ev = param_buf->fixed_param; 7509 if (!ev) { 7510 wma_err("Invalid buffer"); 7511 return -EINVAL; 7512 } 7513 7514 wma_debug("WMA <-- WMI_SERVICE_READY_EXT_EVENTID"); 7515 7516 tgt_cap_info = target_psoc_get_target_caps(tgt_hdl); 7517 ret = qdf_mc_timer_stop(&wma_handle->service_ready_ext_timer); 7518 if (!QDF_IS_STATUS_SUCCESS(ret)) { 7519 wma_err("Failed to stop the service ready ext timer"); 7520 return -EINVAL; 7521 } 7522 wma_populate_soc_caps(wma_handle, tgt_hdl, param_buf); 7523 7524 ret = wma_update_hw_mode_list(wma_handle, tgt_hdl); 7525 if (QDF_IS_STATUS_ERROR(ret)) { 7526 wma_err("Failed to update hw mode list"); 7527 return -EINVAL; 7528 } 7529 7530 wma_debug("WMA --> WMI_INIT_CMDID"); 7531 7532 wma_update_hw_mode_config(wma_handle, tgt_hdl); 7533 7534 target_psoc_set_num_radios(tgt_hdl, 1); 7535 7536 wlan_dp_update_peer_map_unmap_version(&wlan_res_cfg->peer_map_unmap_version); 7537 7538 if (wmi_service_enabled(wmi_handle, 7539 wmi_service_new_htt_msg_format)) { 7540 cdp_cfg_set_new_htt_msg_format(soc, 1); 7541 wlan_res_cfg->new_htt_msg_format = true; 7542 } else { 7543 cdp_cfg_set_new_htt_msg_format(soc, 0); 7544 wlan_res_cfg->new_htt_msg_format = false; 7545 } 7546 7547 if (QDF_GLOBAL_FTM_MODE != cds_get_conparam() && 7548 ucfg_mlme_get_peer_unmap_conf(wma_handle->psoc) && 7549 wmi_service_enabled(wmi_handle, 7550 wmi_service_peer_unmap_cnf_support)) { 7551 wlan_res_cfg->peer_unmap_conf_support = true; 7552 cdp_cfg_set_peer_unmap_conf_support(soc, true); 7553 } else { 7554 wlan_res_cfg->peer_unmap_conf_support = false; 7555 cdp_cfg_set_peer_unmap_conf_support(soc, false); 7556 } 7557 7558 if (wma_handle->enable_tx_compl_tsf64 && 7559 wmi_service_enabled(wmi_handle, 7560 wmi_service_tx_compl_tsf64)) { 7561 wlan_res_cfg->tstamp64_en = true; 7562 cdp_cfg_set_tx_compl_tsf64(soc, true); 7563 } else { 7564 wlan_res_cfg->tstamp64_en = false; 7565 cdp_cfg_set_tx_compl_tsf64(soc, false); 7566 } 7567 7568 if (ucfg_is_ftm_time_sync_enable(wma_handle->psoc) && 7569 wmi_service_enabled(wmi_handle, wmi_service_time_sync_ftm)) { 7570 wlan_res_cfg->time_sync_ftm = true; 7571 ucfg_ftm_time_sync_set_enable(wma_handle->psoc, true); 7572 } else { 7573 wlan_res_cfg->time_sync_ftm = false; 7574 ucfg_ftm_time_sync_set_enable(wma_handle->psoc, false); 7575 } 7576 7577 if (wmi_service_enabled(wma_handle->wmi_handle, wmi_service_nan_vdev)) 7578 ucfg_nan_set_vdev_creation_supp_by_fw(wma_handle->psoc, true); 7579 7580 /* Change default hw mode as below kind of target will only be 7581 * sending single HW mode 7582 */ 7583 if (!wmi_service_enabled(wmi_handle, 7584 wmi_service_dual_band_simultaneous_support)) 7585 wma_handle->new_hw_mode_index = 7586 tgt_cap_info->default_dbs_hw_mode_index; 7587 7588 /* 7589 * Firmware can accommodate maximum 4 vdevs and the ini gNumVdevs 7590 * indicates the same. 7591 * If host driver is going to create vdev for NAN, it indicates 7592 * the total no.of vdevs supported to firmware which includes the 7593 * NAN vdev. 7594 * If firmware is going to create NAN discovery vdev, host should 7595 * indicate 3 vdevs and firmware shall add 1 vdev for NAN. So decrement 7596 * the num_vdevs by 1. 7597 * If NAN is not supported on some target(disabled through ini 7598 * param gEnableNanSupport), there is no use of reserving one vdev for 7599 * it in firmware though firmware advertises wmi_service_nan. Indicate 7600 * firmware that host is going to take care of the NAN vdev. Host can 7601 * use the vdev either for NAN or other operations on need basis. 7602 */ 7603 7604 if (wmi_service_enabled(wma_handle->wmi_handle, wmi_service_nan)) { 7605 if (ucfg_nan_is_vdev_creation_allowed(wma_handle->psoc) || 7606 QDF_GLOBAL_FTM_MODE == cds_get_conparam() || 7607 !cfg_nan_get_enable(wma_handle->psoc)) { 7608 wlan_res_cfg->nan_separate_iface_support = true; 7609 } else { 7610 wlan_res_cfg->num_vdevs--; 7611 wma_update_num_peers_tids(wma_handle, wlan_res_cfg); 7612 } 7613 } 7614 7615 if ((ucfg_pkt_capture_get_mode(wma_handle->psoc) != 7616 PACKET_CAPTURE_MODE_DISABLE) && 7617 wmi_service_enabled(wmi_handle, 7618 wmi_service_packet_capture_support)) 7619 wlan_res_cfg->pktcapture_support = true; 7620 else 7621 wlan_res_cfg->pktcapture_support = false; 7622 wlan_res_cfg->max_peer_ext_stats = WMA_SON_MAX_PEER_EXT_STATS; 7623 7624 if (wmi_service_enabled(wmi_handle, 7625 wmi_service_sae_eapol_offload_support)) 7626 wlan_res_cfg->sae_eapol_offload = true; 7627 else 7628 wlan_res_cfg->sae_eapol_offload = false; 7629 7630 wma_debug("num_vdevs: %u", wlan_res_cfg->num_vdevs); 7631 7632 wma_init_dbr_params(wma_handle); 7633 7634 wma_set_coex_res_cfg(wma_handle, wmi_handle, wlan_res_cfg); 7635 7636 return 0; 7637 } 7638 7639 /** 7640 * wma_rx_ready_event() - event handler to process 7641 * wmi rx ready event. 7642 * @handle: wma handle 7643 * @cmd_param_info: command params info 7644 * @length: param length 7645 * 7646 * Return: none 7647 */ 7648 int wma_rx_ready_event(void *handle, uint8_t *cmd_param_info, 7649 uint32_t length) 7650 { 7651 tp_wma_handle wma_handle = (tp_wma_handle) handle; 7652 WMI_READY_EVENTID_param_tlvs *param_buf = NULL; 7653 wmi_ready_event_fixed_param *ev = NULL; 7654 int ret; 7655 7656 wma_debug("Enter"); 7657 7658 param_buf = (WMI_READY_EVENTID_param_tlvs *) cmd_param_info; 7659 if (!(wma_handle && param_buf)) { 7660 wma_err("Invalid arguments"); 7661 QDF_ASSERT(0); 7662 return -EINVAL; 7663 } 7664 7665 wma_debug("WMA <-- WMI_READY_EVENTID"); 7666 7667 if (wma_is_feature_set_supported(wma_handle)) 7668 wma_send_feature_set_cmd(wma_handle); 7669 7670 ev = param_buf->fixed_param; 7671 /* Indicate to the waiting thread that the ready 7672 * event was received 7673 */ 7674 wma_handle->sub_20_support = 7675 wmi_service_enabled(wma_handle->wmi_handle, 7676 wmi_service_half_rate_quarter_rate_support); 7677 wma_handle->wmi_ready = true; 7678 wma_handle->wlan_init_status = ev->status; 7679 7680 if (wma_handle->is_dfs_offloaded) 7681 wmi_unified_dfs_phyerr_offload_en_cmd( 7682 wma_handle->wmi_handle, 0); 7683 /* copy the mac addr */ 7684 WMI_MAC_ADDR_TO_CHAR_ARRAY(&ev->mac_addr, wma_handle->myaddr); 7685 WMI_MAC_ADDR_TO_CHAR_ARRAY(&ev->mac_addr, wma_handle->hwaddr); 7686 ret = wma_update_hdd_cfg(wma_handle); 7687 if (ret) 7688 return ret; 7689 7690 wma_debug("Exit"); 7691 7692 return 0; 7693 } 7694 7695 /** 7696 * wma_wait_for_ready_event() - wait for wma ready event 7697 * @handle: wma handle 7698 * 7699 * Return: 0 for success or QDF error 7700 */ 7701 QDF_STATUS wma_wait_for_ready_event(WMA_HANDLE handle) 7702 { 7703 tp_wma_handle wma_handle = (tp_wma_handle)handle; 7704 QDF_STATUS status; 7705 struct target_psoc_info *tgt_hdl; 7706 7707 tgt_hdl = wlan_psoc_get_tgt_if_handle(wma_handle->psoc); 7708 if (!tgt_hdl) { 7709 wma_err("target psoc info is NULL"); 7710 return QDF_STATUS_E_INVAL; 7711 } 7712 7713 status = qdf_wait_for_event_completion(&tgt_hdl->info.event, 7714 WMA_READY_EVENTID_TIMEOUT); 7715 if (!tgt_hdl->info.wmi_ready) { 7716 wma_err("Error in pdev creation"); 7717 if (!cds_is_driver_recovering() || !cds_is_fw_down()) 7718 QDF_DEBUG_PANIC("FW ready event timed out"); 7719 return QDF_STATUS_E_INVAL; 7720 } 7721 7722 if (status == QDF_STATUS_E_TIMEOUT) 7723 wma_err("Timeout waiting for FW ready event"); 7724 else if (QDF_IS_STATUS_ERROR(status)) 7725 wma_err("Failed to wait for FW ready event; status:%u", status); 7726 else 7727 wma_info("FW ready event received"); 7728 7729 return status; 7730 } 7731 7732 /** 7733 * wma_set_ppsconfig() - set pps config in fw 7734 * @vdev_id: vdev id 7735 * @pps_param: pps params 7736 * @val : param value 7737 * 7738 * Return: 0 for success or QDF error 7739 */ 7740 QDF_STATUS wma_set_ppsconfig(uint8_t vdev_id, uint16_t pps_param, 7741 int val) 7742 { 7743 tp_wma_handle wma = cds_get_context(QDF_MODULE_ID_WMA); 7744 int ret = -EIO; 7745 uint32_t pps_val; 7746 7747 if (!wma) 7748 return QDF_STATUS_E_INVAL; 7749 7750 switch (pps_param) { 7751 case WMA_VHT_PPS_PAID_MATCH: 7752 pps_val = ((val << 31) & 0xffff0000) | 7753 (PKT_PWR_SAVE_PAID_MATCH & 0xffff); 7754 goto pkt_pwr_save_config; 7755 case WMA_VHT_PPS_GID_MATCH: 7756 pps_val = ((val << 31) & 0xffff0000) | 7757 (PKT_PWR_SAVE_GID_MATCH & 0xffff); 7758 goto pkt_pwr_save_config; 7759 case WMA_VHT_PPS_DELIM_CRC_FAIL: 7760 pps_val = ((val << 31) & 0xffff0000) | 7761 (PKT_PWR_SAVE_DELIM_CRC_FAIL & 0xffff); 7762 goto pkt_pwr_save_config; 7763 7764 /* Enable the code below as and when the functionality 7765 * is supported/added in host. 7766 */ 7767 #ifdef NOT_YET 7768 case WMA_VHT_PPS_EARLY_TIM_CLEAR: 7769 pps_val = ((val << 31) & 0xffff0000) | 7770 (PKT_PWR_SAVE_EARLY_TIM_CLEAR & 0xffff); 7771 goto pkt_pwr_save_config; 7772 case WMA_VHT_PPS_EARLY_DTIM_CLEAR: 7773 pps_val = ((val << 31) & 0xffff0000) | 7774 (PKT_PWR_SAVE_EARLY_DTIM_CLEAR & 0xffff); 7775 goto pkt_pwr_save_config; 7776 case WMA_VHT_PPS_EOF_PAD_DELIM: 7777 pps_val = ((val << 31) & 0xffff0000) | 7778 (PKT_PWR_SAVE_EOF_PAD_DELIM & 0xffff); 7779 goto pkt_pwr_save_config; 7780 case WMA_VHT_PPS_MACADDR_MISMATCH: 7781 pps_val = ((val << 31) & 0xffff0000) | 7782 (PKT_PWR_SAVE_MACADDR_MISMATCH & 0xffff); 7783 goto pkt_pwr_save_config; 7784 case WMA_VHT_PPS_GID_NSTS_ZERO: 7785 pps_val = ((val << 31) & 0xffff0000) | 7786 (PKT_PWR_SAVE_GID_NSTS_ZERO & 0xffff); 7787 goto pkt_pwr_save_config; 7788 case WMA_VHT_PPS_RSSI_CHECK: 7789 pps_val = ((val << 31) & 0xffff0000) | 7790 (PKT_PWR_SAVE_RSSI_CHECK & 0xffff); 7791 goto pkt_pwr_save_config; 7792 #endif /* NOT_YET */ 7793 pkt_pwr_save_config: 7794 wma_debug("vdev_id:%d val:0x%x pps_val:0x%x", vdev_id, 7795 val, pps_val); 7796 ret = wma_vdev_set_param(wma->wmi_handle, vdev_id, 7797 wmi_vdev_param_packet_powersave, 7798 pps_val); 7799 break; 7800 default: 7801 wma_err("INVALID PPS CONFIG"); 7802 } 7803 7804 return (ret) ? QDF_STATUS_E_FAILURE : QDF_STATUS_SUCCESS; 7805 } 7806 7807 /** 7808 * wma_process_set_mas() - Function to enable/disable MAS 7809 * @wma: Pointer to WMA handle 7810 * @mas_val: 1-Enable MAS, 0-Disable MAS 7811 * 7812 * This function enables/disables the MAS value 7813 * 7814 * Return: QDF_SUCCESS for success otherwise failure 7815 */ 7816 static QDF_STATUS wma_process_set_mas(tp_wma_handle wma, 7817 uint32_t *mas_val) 7818 { 7819 uint32_t val; 7820 7821 if (!wma || !mas_val) { 7822 wma_err("Invalid input to enable/disable MAS"); 7823 return QDF_STATUS_E_FAILURE; 7824 } 7825 7826 val = (*mas_val); 7827 7828 if (QDF_STATUS_SUCCESS != 7829 wma_set_enable_disable_mcc_adaptive_scheduler(val)) { 7830 wma_err("Unable to enable/disable MAS"); 7831 return QDF_STATUS_E_FAILURE; 7832 } 7833 wma_debug("Value is %d", val); 7834 return QDF_STATUS_SUCCESS; 7835 } 7836 7837 /** 7838 * wma_process_set_miracast() - Function to set miracast value in WMA 7839 * @wma: Pointer to WMA handle 7840 * @miracast_val: 0-Disabled,1-Source,2-Sink 7841 * 7842 * This function stores the miracast value in WMA 7843 * 7844 * Return: QDF_SUCCESS for success otherwise failure 7845 * 7846 */ 7847 static QDF_STATUS wma_process_set_miracast(tp_wma_handle wma, 7848 uint32_t *miracast_val) 7849 { 7850 if (!wma || !miracast_val) { 7851 wma_err("Invalid input to store miracast value"); 7852 return QDF_STATUS_E_FAILURE; 7853 } 7854 7855 wma->miracast_value = *miracast_val; 7856 wma_debug("Miracast value is %d", wma->miracast_value); 7857 7858 return QDF_STATUS_SUCCESS; 7859 } 7860 7861 /** 7862 * wma_config_stats_factor() - Function to configure stats avg. factor 7863 * @wma: pointer to WMA handle 7864 * @avg_factor: stats. avg. factor passed down by userspace 7865 * 7866 * This function configures the avg. stats value in firmware 7867 * 7868 * Return: QDF_STATUS_SUCCESS for success otherwise failure 7869 * 7870 */ 7871 static QDF_STATUS wma_config_stats_factor(tp_wma_handle wma, 7872 struct sir_stats_avg_factor *avg_factor) 7873 { 7874 QDF_STATUS ret; 7875 7876 if (!wma || !avg_factor) { 7877 wma_err("Invalid input of stats avg factor"); 7878 return QDF_STATUS_E_FAILURE; 7879 } 7880 7881 ret = wma_vdev_set_param(wma->wmi_handle, 7882 avg_factor->vdev_id, 7883 wmi_vdev_param_stats_avg_factor, 7884 avg_factor->stats_avg_factor); 7885 if (QDF_IS_STATUS_ERROR(ret)) { 7886 wma_err("failed to set avg_factor for vdev_id %d", 7887 avg_factor->vdev_id); 7888 } 7889 7890 wma_debug("Set stats_avg_factor %d for vdev_id %d", 7891 avg_factor->stats_avg_factor, avg_factor->vdev_id); 7892 7893 return ret; 7894 } 7895 7896 /** 7897 * wma_config_guard_time() - Function to set guard time in firmware 7898 * @wma: pointer to WMA handle 7899 * @guard_time: guard time passed down by userspace 7900 * 7901 * This function configures the guard time in firmware 7902 * 7903 * Return: QDF_STATUS_SUCCESS for success otherwise failure 7904 * 7905 */ 7906 static QDF_STATUS wma_config_guard_time(tp_wma_handle wma, 7907 struct sir_guard_time_request *guard_time) 7908 { 7909 QDF_STATUS ret; 7910 7911 if (!wma || !guard_time) { 7912 wma_err("Invalid input of guard time"); 7913 return QDF_STATUS_E_FAILURE; 7914 } 7915 7916 ret = wma_vdev_set_param(wma->wmi_handle, 7917 guard_time->vdev_id, 7918 wmi_vdev_param_rx_leak_window, 7919 guard_time->guard_time); 7920 if (QDF_IS_STATUS_ERROR(ret)) { 7921 wma_err("failed to set guard time for vdev_id %d", 7922 guard_time->vdev_id); 7923 } 7924 7925 wma_debug("Set guard time %d for vdev_id %d", 7926 guard_time->guard_time, guard_time->vdev_id); 7927 7928 return ret; 7929 } 7930 7931 /** 7932 * wma_enable_specific_fw_logs() - Start/Stop logging of diag event/log id 7933 * @wma_handle: WMA handle 7934 * @start_log: Start logging related parameters 7935 * 7936 * Send the command to the FW based on which specific logging of diag 7937 * event/log id can be started/stopped 7938 * 7939 * Return: None 7940 */ 7941 static void wma_enable_specific_fw_logs(tp_wma_handle wma_handle, 7942 struct sir_wifi_start_log *start_log) 7943 { 7944 7945 if (!start_log) { 7946 wma_err("start_log pointer is NULL"); 7947 return; 7948 } 7949 if (wma_validate_handle(wma_handle)) 7950 return; 7951 7952 if (!((start_log->ring_id == RING_ID_CONNECTIVITY) || 7953 (start_log->ring_id == RING_ID_FIRMWARE_DEBUG))) { 7954 wma_debug("Not connectivity or fw debug ring: %d", 7955 start_log->ring_id); 7956 return; 7957 } 7958 7959 wmi_unified_enable_specific_fw_logs_cmd(wma_handle->wmi_handle, 7960 (struct wmi_wifi_start_log *)start_log); 7961 } 7962 7963 #define MEGABYTE (1024 * 1024) 7964 /** 7965 * wma_set_wifi_start_packet_stats() - Start/stop packet stats 7966 * @wma_handle: WMA handle 7967 * @start_log: Structure containing the start wifi logger params 7968 * 7969 * This function is used to send the WMA commands to start/stop logging 7970 * of per packet statistics 7971 * 7972 * Return: None 7973 * 7974 */ 7975 #if !defined(FEATURE_PKTLOG) || defined(REMOVE_PKT_LOG) 7976 static void wma_set_wifi_start_packet_stats(void *wma_handle, 7977 struct sir_wifi_start_log *start_log) 7978 { 7979 } 7980 7981 #else 7982 static void wma_set_wifi_start_packet_stats(void *wma_handle, 7983 struct sir_wifi_start_log *start_log) 7984 { 7985 struct hif_opaque_softc *scn; 7986 uint32_t log_state; 7987 7988 if (!start_log) { 7989 wma_err("start_log pointer is NULL"); 7990 return; 7991 } 7992 if (wma_validate_handle(wma_handle)) 7993 return; 7994 7995 /* No need to register for ring IDs other than packet stats */ 7996 if (start_log->ring_id != RING_ID_PER_PACKET_STATS) { 7997 wma_debug("Ring id is not for per packet stats: %d", 7998 start_log->ring_id); 7999 return; 8000 } 8001 8002 scn = cds_get_context(QDF_MODULE_ID_HIF); 8003 if (!scn) { 8004 wma_err("Invalid HIF handle"); 8005 return; 8006 } 8007 8008 #ifdef PKTLOG_LEGACY 8009 log_state = ATH_PKTLOG_ANI | ATH_PKTLOG_RCUPDATE | ATH_PKTLOG_RCFIND | 8010 ATH_PKTLOG_RX | ATH_PKTLOG_TX | 8011 ATH_PKTLOG_TEXT | ATH_PKTLOG_SW_EVENT; 8012 #elif defined(QCA_WIFI_QCA6390) || defined(QCA_WIFI_QCA6490) || \ 8013 defined(QCA_WIFI_QCA6750) || defined(QCA_WIFI_KIWI) || \ 8014 defines(QCA_WIFI_WCN6450) 8015 log_state = ATH_PKTLOG_RCFIND | ATH_PKTLOG_RCUPDATE | 8016 ATH_PKTLOG_TX | ATH_PKTLOG_LITE_T2H | 8017 ATH_PKTLOG_SW_EVENT | ATH_PKTLOG_RX; 8018 #elif defined(QCA_WIFI_QCA6290) 8019 log_state = ATH_PKTLOG_LITE_RX | ATH_PKTLOG_LITE_T2H; 8020 #else 8021 wma_debug("Packet log Not supported"); 8022 log_state = 0; 8023 #endif 8024 if (start_log->size != 0) { 8025 pktlog_setsize(scn, start_log->size * MEGABYTE); 8026 return; 8027 } else if (start_log->is_pktlog_buff_clear == true) { 8028 pktlog_clearbuff(scn, start_log->is_pktlog_buff_clear); 8029 return; 8030 } 8031 8032 if (start_log->verbose_level == WLAN_LOG_LEVEL_ACTIVE) { 8033 pktlog_enable(scn, log_state, start_log->ini_triggered, 8034 start_log->user_triggered, 8035 start_log->is_iwpriv_command); 8036 wma_debug("Enabling per packet stats"); 8037 } else { 8038 pktlog_enable(scn, 0, start_log->ini_triggered, 8039 start_log->user_triggered, 8040 start_log->is_iwpriv_command); 8041 wma_debug("Disabling per packet stats"); 8042 } 8043 } 8044 #endif 8045 8046 /** 8047 * wma_send_flush_logs_to_fw() - Send log flush command to FW 8048 * @wma_handle: WMI handle 8049 * 8050 * This function is used to send the flush command to the FW, 8051 * that will flush the fw logs that are residue in the FW 8052 * 8053 * Return: None 8054 */ 8055 void wma_send_flush_logs_to_fw(tp_wma_handle wma_handle) 8056 { 8057 QDF_STATUS status; 8058 8059 status = wmi_unified_flush_logs_to_fw_cmd(wma_handle->wmi_handle); 8060 if (QDF_IS_STATUS_ERROR(status)) 8061 return; 8062 8063 status = qdf_mc_timer_start(&wma_handle->log_completion_timer, 8064 WMA_LOG_COMPLETION_TIMER); 8065 if (QDF_IS_STATUS_ERROR(status)) 8066 wma_err("Failed to start the log completion timer"); 8067 } 8068 8069 /** 8070 * wma_update_tx_fail_cnt_th() - Set threshold for TX pkt fail 8071 * @wma_handle: WMA handle 8072 * @tx_fail_cnt_th: sme_tx_fail_cnt_threshold parameter 8073 * 8074 * This function is used to set Tx pkt fail count threshold, 8075 * FW will do disconnect with station once this threshold is reached. 8076 * 8077 * Return: QDF_STATUS 8078 */ 8079 static QDF_STATUS wma_update_tx_fail_cnt_th(tp_wma_handle wma, 8080 struct sme_tx_fail_cnt_threshold *tx_fail_cnt_th) 8081 { 8082 u_int8_t vdev_id; 8083 u_int32_t tx_fail_disconn_th; 8084 int ret = -EIO; 8085 struct wmi_unified *wmi_handle; 8086 8087 if (wma_validate_handle(wma)) 8088 return QDF_STATUS_E_INVAL; 8089 8090 wmi_handle = wma->wmi_handle; 8091 if (wmi_validate_handle(wmi_handle)) 8092 return QDF_STATUS_E_INVAL; 8093 8094 vdev_id = tx_fail_cnt_th->session_id; 8095 tx_fail_disconn_th = tx_fail_cnt_th->tx_fail_cnt_threshold; 8096 wma_debug("Set TX pkt fail count threshold vdevId %d count %d", 8097 vdev_id, tx_fail_disconn_th); 8098 8099 ret = wma_vdev_set_param(wmi_handle, vdev_id, 8100 wmi_vdev_param_disconnect_th, 8101 tx_fail_disconn_th); 8102 8103 if (ret) { 8104 wma_err("Failed to send TX pkt fail count threshold command"); 8105 return QDF_STATUS_E_FAILURE; 8106 } 8107 8108 return QDF_STATUS_SUCCESS; 8109 } 8110 8111 /** 8112 * wma_update_short_retry_limit() - Set retry limit for short frames 8113 * @wma_handle: WMA handle 8114 * @short_retry_limit_th: retry limir count for Short frames. 8115 * 8116 * This function is used to configure the transmission retry limit at which 8117 * short frames needs to be retry. 8118 * 8119 * Return: QDF_STATUS 8120 */ 8121 static QDF_STATUS wma_update_short_retry_limit(tp_wma_handle wma, 8122 struct sme_short_retry_limit *short_retry_limit_th) 8123 { 8124 uint8_t vdev_id; 8125 uint32_t short_retry_limit; 8126 int ret; 8127 struct wmi_unified *wmi_handle; 8128 8129 if (wma_validate_handle(wma)) 8130 return QDF_STATUS_E_INVAL; 8131 8132 wmi_handle = wma->wmi_handle; 8133 if (wmi_validate_handle(wmi_handle)) 8134 return QDF_STATUS_E_INVAL; 8135 8136 vdev_id = short_retry_limit_th->session_id; 8137 short_retry_limit = short_retry_limit_th->short_retry_limit; 8138 wma_debug("Set short retry limit threshold vdevId %d count %d", 8139 vdev_id, short_retry_limit); 8140 8141 ret = wma_vdev_set_param(wmi_handle, vdev_id, 8142 wmi_vdev_param_non_agg_sw_retry_th, 8143 short_retry_limit); 8144 8145 if (ret) { 8146 wma_err("Failed to send short limit threshold command"); 8147 return QDF_STATUS_E_FAILURE; 8148 } 8149 return QDF_STATUS_SUCCESS; 8150 } 8151 8152 /** 8153 * wma_update_long_retry_limit() - Set retry limit for long frames 8154 * @wma_handle: WMA handle 8155 * @long_retry_limit_th: retry limir count for long frames 8156 * 8157 * This function is used to configure the transmission retry limit at which 8158 * long frames needs to be retry 8159 * 8160 * Return: QDF_STATUS 8161 */ 8162 static QDF_STATUS wma_update_long_retry_limit(tp_wma_handle wma, 8163 struct sme_long_retry_limit *long_retry_limit_th) 8164 { 8165 uint8_t vdev_id; 8166 uint32_t long_retry_limit; 8167 int ret; 8168 struct wmi_unified *wmi_handle; 8169 8170 if (wma_validate_handle(wma)) 8171 return QDF_STATUS_E_INVAL; 8172 8173 wmi_handle = wma->wmi_handle; 8174 if (wmi_validate_handle(wmi_handle)) 8175 return QDF_STATUS_E_INVAL; 8176 8177 vdev_id = long_retry_limit_th->session_id; 8178 long_retry_limit = long_retry_limit_th->long_retry_limit; 8179 wma_debug("Set TX pkt fail count threshold vdevId %d count %d", 8180 vdev_id, long_retry_limit); 8181 8182 ret = wma_vdev_set_param(wmi_handle, vdev_id, 8183 wmi_vdev_param_agg_sw_retry_th, 8184 long_retry_limit); 8185 8186 if (ret) { 8187 wma_err("Failed to send long limit threshold command"); 8188 return QDF_STATUS_E_FAILURE; 8189 } 8190 8191 return QDF_STATUS_SUCCESS; 8192 } 8193 8194 #define MAX_VDEV_AP_ALIVE_PARAMS 4 8195 /* params being sent: 8196 * wmi_vdev_param_ap_keepalive_min_idle_inactive_time_secs 8197 * wmi_vdev_param_ap_keepalive_max_idle_inactive_secs 8198 * wmi_vdev_param_ap_keepalive_min_idle_inactive_time_secs 8199 * wmi_vdev_param_ap_keepalive_max_unresponsive_time_secs 8200 */ 8201 8202 /* 8203 * wma_update_sta_inactivity_timeout() - Set sta_inactivity_timeout to fw 8204 * @wma_handle: WMA handle 8205 * @sta_inactivity_timer: sme_sta_inactivity_timeout 8206 * 8207 * This function is used to set sta_inactivity_timeout. 8208 * If a station does not send anything in sta_inactivity_timeout seconds, an 8209 * empty data frame is sent to it in order to verify whether it is 8210 * still in range. If this frame is not ACKed, the station will be 8211 * disassociated and then deauthenticated. 8212 * 8213 * Return: None 8214 */ 8215 void wma_update_sta_inactivity_timeout(tp_wma_handle wma, 8216 struct sme_sta_inactivity_timeout *sta_inactivity_timer) 8217 { 8218 uint8_t vdev_id; 8219 uint32_t max_unresponsive_time; 8220 uint32_t min_inactive_time, max_inactive_time; 8221 struct wmi_unified *wmi_handle; 8222 struct dev_set_param setparam[MAX_VDEV_AP_ALIVE_PARAMS] = {}; 8223 uint8_t index = 0; 8224 QDF_STATUS status = QDF_STATUS_E_FAILURE; 8225 8226 if (wma_validate_handle(wma)) 8227 return; 8228 8229 wmi_handle = wma->wmi_handle; 8230 if (wmi_validate_handle(wmi_handle)) 8231 return; 8232 8233 vdev_id = sta_inactivity_timer->session_id; 8234 max_unresponsive_time = sta_inactivity_timer->sta_inactivity_timeout; 8235 max_inactive_time = max_unresponsive_time * TWO_THIRD; 8236 min_inactive_time = max_unresponsive_time - max_inactive_time; 8237 status = mlme_check_index_setparam( 8238 setparam, 8239 wmi_vdev_param_ap_keepalive_min_idle_inactive_time_secs, 8240 min_inactive_time, index++, 8241 MAX_VDEV_AP_ALIVE_PARAMS); 8242 if (QDF_IS_STATUS_ERROR(status)) { 8243 wma_err("failed to set wmi_vdev_param_ap_keepalive_min_idle_inactive_time_secs"); 8244 goto error; 8245 } 8246 status = mlme_check_index_setparam( 8247 setparam, 8248 wmi_vdev_param_ap_keepalive_max_idle_inactive_secs, 8249 min_inactive_time, index++, MAX_VDEV_AP_ALIVE_PARAMS); 8250 if (QDF_IS_STATUS_ERROR(status)) { 8251 wma_err("failed to set wmi_vdev_param_ap_keepalive_max_idle_inactive_secs"); 8252 goto error; 8253 } 8254 status = mlme_check_index_setparam( 8255 setparam, 8256 wmi_vdev_param_ap_keepalive_min_idle_inactive_time_secs, 8257 max_inactive_time, index++, MAX_VDEV_AP_ALIVE_PARAMS); 8258 if (QDF_IS_STATUS_ERROR(status)) { 8259 wma_err("failed to set wmi_vdev_param_ap_keepalive_min_idle_inactive_time_secs"); 8260 goto error; 8261 } 8262 status = mlme_check_index_setparam( 8263 setparam, 8264 wmi_vdev_param_ap_keepalive_max_unresponsive_time_secs, 8265 max_unresponsive_time, index++, 8266 MAX_VDEV_AP_ALIVE_PARAMS); 8267 if (QDF_IS_STATUS_ERROR(status)) { 8268 wma_err("failed to set wmi_vdev_param_ap_keepalive_max_unresponsive_time_secs"); 8269 goto error; 8270 } 8271 8272 status = wma_send_multi_pdev_vdev_set_params(MLME_VDEV_SETPARAM, 8273 vdev_id, setparam, index); 8274 if (QDF_IS_STATUS_ERROR(status)) 8275 wma_err("Failed to send idle_inactive,unresponsive time vdev set params"); 8276 8277 error: 8278 return; 8279 } 8280 8281 #ifdef WLAN_FEATURE_WOW_PULSE 8282 8283 8284 #define WMI_WOW_HOSTWAKEUP_GPIO_CMD_FIXED_PARAM \ 8285 WMI_WOW_HOSTWAKEUP_GPIO_PIN_PATTERN_CONFIG_CMD_fixed_param 8286 8287 8288 #define WMITLV_TAG_HOSTWAKEUP_GPIO_CMD_FIXED_PARAM \ 8289 WMITLV_TAG_STRUC_wmi_wow_hostwakeup_gpio_pin_pattern_config_cmd_fixed_param 8290 8291 /** 8292 * wma_send_wow_pulse_cmd() - send wmi cmd of wow pulse cmd 8293 * information to fw. 8294 * @wma_handle: wma handler 8295 * @udp_response: wow_pulse_mode pointer 8296 * 8297 * Return: Return QDF_STATUS 8298 */ 8299 static QDF_STATUS wma_send_wow_pulse_cmd(tp_wma_handle wma_handle, 8300 struct wow_pulse_mode *wow_pulse_cmd) 8301 { 8302 QDF_STATUS status = QDF_STATUS_SUCCESS; 8303 wmi_buf_t buf; 8304 WMI_WOW_HOSTWAKEUP_GPIO_CMD_FIXED_PARAM *cmd; 8305 u_int16_t len; 8306 8307 len = sizeof(*cmd); 8308 buf = wmi_buf_alloc(wma_handle->wmi_handle, len); 8309 if (!buf) 8310 return QDF_STATUS_E_NOMEM; 8311 8312 cmd = (WMI_WOW_HOSTWAKEUP_GPIO_CMD_FIXED_PARAM *)wmi_buf_data(buf); 8313 qdf_mem_zero(cmd, len); 8314 8315 WMITLV_SET_HDR(&cmd->tlv_header, 8316 WMITLV_TAG_HOSTWAKEUP_GPIO_CMD_FIXED_PARAM, 8317 WMITLV_GET_STRUCT_TLVLEN( 8318 WMI_WOW_HOSTWAKEUP_GPIO_CMD_FIXED_PARAM)); 8319 8320 cmd->enable = wow_pulse_cmd->wow_pulse_enable; 8321 cmd->pin = wow_pulse_cmd->wow_pulse_pin; 8322 cmd->interval_low = wow_pulse_cmd->wow_pulse_interval_low; 8323 cmd->interval_high = wow_pulse_cmd->wow_pulse_interval_high; 8324 cmd->repeat_cnt = wow_pulse_cmd->wow_pulse_repeat_count; 8325 cmd->init_state = wow_pulse_cmd->wow_pulse_init_state; 8326 8327 if (wmi_unified_cmd_send(wma_handle->wmi_handle, buf, len, 8328 WMI_WOW_HOSTWAKEUP_GPIO_PIN_PATTERN_CONFIG_CMDID)) { 8329 wmi_buf_free(buf); 8330 status = QDF_STATUS_E_FAILURE; 8331 } 8332 8333 wma_debug("Exit"); 8334 return status; 8335 } 8336 8337 #undef WMI_WOW_HOSTWAKEUP_GPIO_CMD_FIXED_PARAM 8338 #undef WMITLV_TAG_HOSTWAKEUP_GPIO_CMD_FIXED_PARAM 8339 #undef WMI_WOW_PULSE_REPEAT_CNT 8340 8341 #else 8342 static inline QDF_STATUS wma_send_wow_pulse_cmd(tp_wma_handle wma_handle, 8343 struct wow_pulse_mode *wow_pulse_cmd) 8344 { 8345 return QDF_STATUS_E_FAILURE; 8346 } 8347 #endif 8348 8349 8350 /** 8351 * wma_process_power_debug_stats_req() - Process the Chip Power stats collect 8352 * request and pass the Power stats request to Fw 8353 * @wma_handle: WMA handle 8354 * 8355 * Return: QDF_STATUS 8356 */ 8357 #ifdef WLAN_POWER_DEBUG 8358 static QDF_STATUS wma_process_power_debug_stats_req(tp_wma_handle wma_handle) 8359 { 8360 wmi_pdev_get_chip_power_stats_cmd_fixed_param *cmd; 8361 int32_t len; 8362 wmi_buf_t buf; 8363 uint8_t *buf_ptr; 8364 int ret; 8365 8366 if (wma_validate_handle(wma_handle)) 8367 return QDF_STATUS_E_FAILURE; 8368 8369 len = sizeof(*cmd); 8370 buf = wmi_buf_alloc(wma_handle->wmi_handle, len); 8371 if (!buf) 8372 return QDF_STATUS_E_NOMEM; 8373 8374 buf_ptr = (u_int8_t *) wmi_buf_data(buf); 8375 cmd = (wmi_pdev_get_chip_power_stats_cmd_fixed_param *) buf_ptr; 8376 8377 WMITLV_SET_HDR(&cmd->tlv_header, 8378 WMITLV_TAG_STRUC_wmi_get_chip_power_stats_cmd_fixed_param, 8379 WMITLV_GET_STRUCT_TLVLEN( 8380 wmi_pdev_get_chip_power_stats_cmd_fixed_param)); 8381 cmd->pdev_id = 0; 8382 8383 wma_debug("POWER_DEBUG_STATS - Get Request Params; Pdev id - %d", 8384 cmd->pdev_id); 8385 ret = wmi_unified_cmd_send(wma_handle->wmi_handle, buf, len, 8386 WMI_PDEV_GET_CHIP_POWER_STATS_CMDID); 8387 if (ret) { 8388 wmi_buf_free(buf); 8389 return QDF_STATUS_E_FAILURE; 8390 } 8391 return QDF_STATUS_SUCCESS; 8392 } 8393 #else 8394 static QDF_STATUS wma_process_power_debug_stats_req(tp_wma_handle wma_handle) 8395 { 8396 return QDF_STATUS_SUCCESS; 8397 } 8398 #endif 8399 #ifdef WLAN_FEATURE_BEACON_RECEPTION_STATS 8400 static QDF_STATUS wma_process_beacon_debug_stats_req(tp_wma_handle wma_handle, 8401 uint32_t *vdev_id) 8402 { 8403 wmi_vdev_get_bcn_recv_stats_cmd_fixed_param *cmd; 8404 int32_t len; 8405 wmi_buf_t buf; 8406 uint8_t *buf_ptr; 8407 int ret; 8408 8409 wma_debug("Enter"); 8410 if (wma_validate_handle(wma_handle)) 8411 return QDF_STATUS_E_FAILURE; 8412 8413 len = sizeof(*cmd); 8414 buf = wmi_buf_alloc(wma_handle->wmi_handle, len); 8415 if (!buf) 8416 return QDF_STATUS_E_NOMEM; 8417 8418 buf_ptr = (u_int8_t *)wmi_buf_data(buf); 8419 cmd = (wmi_vdev_get_bcn_recv_stats_cmd_fixed_param *)buf_ptr; 8420 8421 WMITLV_SET_HDR(&cmd->tlv_header, 8422 WMITLV_TAG_STRUC_wmi_get_bcn_recv_stats_fixed_param, 8423 WMITLV_GET_STRUCT_TLVLEN( 8424 wmi_vdev_get_bcn_recv_stats_cmd_fixed_param)); 8425 cmd->vdev_id = *vdev_id; 8426 8427 wma_debug("BEACON_DEBUG_STATS - Get Request Params; vdev id - %d", 8428 cmd->vdev_id); 8429 ret = wmi_unified_cmd_send(wma_handle->wmi_handle, buf, len, 8430 WMI_VDEV_GET_BCN_RECEPTION_STATS_CMDID); 8431 if (ret) { 8432 wmi_buf_free(buf); 8433 return QDF_STATUS_E_FAILURE; 8434 } 8435 8436 wma_debug("Exit"); 8437 return QDF_STATUS_SUCCESS; 8438 } 8439 #else 8440 static QDF_STATUS wma_process_beacon_debug_stats_req(tp_wma_handle wma_handle, 8441 uint32_t *vdev_id) 8442 { 8443 return QDF_STATUS_SUCCESS; 8444 } 8445 #endif 8446 8447 /** 8448 * wma_set_arp_req_stats() - process set arp stats request command to fw 8449 * @wma_handle: WMA handle 8450 * @req_buf: set srp stats request buffer 8451 * 8452 * Return: None 8453 */ 8454 static void wma_set_arp_req_stats(WMA_HANDLE handle, 8455 struct set_arp_stats_params *req_buf) 8456 { 8457 QDF_STATUS status; 8458 struct set_arp_stats *arp_stats; 8459 tp_wma_handle wma_handle = (tp_wma_handle) handle; 8460 struct wlan_objmgr_vdev *vdev; 8461 struct wmi_unified *wmi_handle; 8462 8463 if (wma_validate_handle(wma_handle)) 8464 return; 8465 8466 wmi_handle = wma_handle->wmi_handle; 8467 if (wmi_validate_handle(wmi_handle)) 8468 return; 8469 8470 if (!wma_is_vdev_valid(req_buf->vdev_id)) { 8471 wma_err("vdev id:%d is not active", req_buf->vdev_id); 8472 return; 8473 } 8474 8475 vdev = wlan_objmgr_get_vdev_by_id_from_psoc(wma_handle->psoc, 8476 req_buf->vdev_id, 8477 WLAN_LEGACY_WMA_ID); 8478 if (!vdev) { 8479 wma_err("Can't get vdev by vdev_id:%d", req_buf->vdev_id); 8480 return; 8481 } 8482 8483 if (!wma_is_vdev_up(req_buf->vdev_id)) { 8484 wma_debug("vdev id:%d is not started", req_buf->vdev_id); 8485 goto release_ref; 8486 } 8487 8488 arp_stats = (struct set_arp_stats *)req_buf; 8489 status = wmi_unified_set_arp_stats_req(wmi_handle, arp_stats); 8490 if (QDF_IS_STATUS_ERROR(status)) 8491 wma_err("failed to set arp stats to FW"); 8492 8493 release_ref: 8494 wlan_objmgr_vdev_release_ref(vdev, WLAN_LEGACY_WMA_ID); 8495 } 8496 8497 /** 8498 * wma_get_arp_req_stats() - process get arp stats request command to fw 8499 * @wma_handle: WMA handle 8500 * @req_buf: get srp stats request buffer 8501 * 8502 * Return: None 8503 */ 8504 static void wma_get_arp_req_stats(WMA_HANDLE handle, 8505 struct get_arp_stats_params *req_buf) 8506 { 8507 QDF_STATUS status; 8508 struct get_arp_stats *arp_stats; 8509 tp_wma_handle wma_handle = (tp_wma_handle) handle; 8510 struct wmi_unified *wmi_handle; 8511 8512 if (wma_validate_handle(wma_handle)) 8513 return; 8514 8515 wmi_handle = wma_handle->wmi_handle; 8516 if (wmi_validate_handle(wmi_handle)) 8517 return; 8518 8519 if (!wma_is_vdev_valid(req_buf->vdev_id)) { 8520 wma_err("vdev id:%d is not active", req_buf->vdev_id); 8521 return; 8522 } 8523 8524 arp_stats = (struct get_arp_stats *)req_buf; 8525 status = wmi_unified_get_arp_stats_req(wmi_handle, arp_stats); 8526 if (QDF_IS_STATUS_ERROR(status)) 8527 wma_err("failed to send get arp stats to FW"); 8528 } 8529 8530 /** 8531 * wma_set_del_pmkid_cache() - API to set/delete PMKID cache entry in fw 8532 * @handle: WMA handle 8533 * @pmk_cache: PMK cache entry 8534 * 8535 * Return: None 8536 */ 8537 static void wma_set_del_pmkid_cache(WMA_HANDLE handle, 8538 struct wmi_unified_pmk_cache *pmk_cache) 8539 { 8540 QDF_STATUS status; 8541 tp_wma_handle wma_handle = (tp_wma_handle) handle; 8542 struct wmi_unified *wmi_handle; 8543 8544 if (wma_validate_handle(wma_handle)) 8545 return; 8546 8547 wmi_handle = wma_handle->wmi_handle; 8548 if (wmi_validate_handle(wmi_handle)) 8549 return; 8550 8551 status = wmi_unified_set_del_pmkid_cache(wmi_handle, pmk_cache); 8552 if (QDF_IS_STATUS_ERROR(status)) 8553 wma_err("failed to send set/del pmkid cmd to fw"); 8554 } 8555 8556 /** 8557 * wma_send_invoke_neighbor_report() - API to send invoke neighbor report 8558 * command to fw 8559 * 8560 * @handle: WMA handle 8561 * @params: Pointer to invoke neighbor report params 8562 * 8563 * Return: None 8564 */ 8565 static 8566 void wma_send_invoke_neighbor_report(WMA_HANDLE handle, 8567 struct wmi_invoke_neighbor_report_params *params) 8568 { 8569 QDF_STATUS status; 8570 tp_wma_handle wma_handle = (tp_wma_handle) handle; 8571 struct wmi_unified *wmi_handle; 8572 8573 if (wma_validate_handle(wma_handle)) 8574 return; 8575 8576 wmi_handle = wma_handle->wmi_handle; 8577 if (wmi_validate_handle(wmi_handle)) 8578 return; 8579 8580 status = wmi_unified_invoke_neighbor_report_cmd(wmi_handle, params); 8581 8582 if (status != QDF_STATUS_SUCCESS) 8583 wma_err("failed to send invoke neighbor report command"); 8584 } 8585 8586 QDF_STATUS wma_set_rx_reorder_timeout_val(tp_wma_handle wma_handle, 8587 struct sir_set_rx_reorder_timeout_val *reorder_timeout) 8588 { 8589 wmi_pdev_set_reorder_timeout_val_cmd_fixed_param *cmd; 8590 uint32_t len; 8591 wmi_buf_t buf; 8592 int ret; 8593 8594 if (!reorder_timeout) { 8595 wma_err("invalid pointer"); 8596 return QDF_STATUS_E_INVAL; 8597 } 8598 8599 if (wma_validate_handle(wma_handle)) 8600 return QDF_STATUS_E_INVAL; 8601 8602 len = sizeof(*cmd); 8603 buf = wmi_buf_alloc(wma_handle->wmi_handle, len); 8604 if (!buf) 8605 return QDF_STATUS_E_NOMEM; 8606 8607 cmd = (wmi_pdev_set_reorder_timeout_val_cmd_fixed_param *) 8608 wmi_buf_data(buf); 8609 8610 WMITLV_SET_HDR(&cmd->tlv_header, 8611 WMITLV_TAG_STRUC_wmi_pdev_set_reorder_timeout_val_cmd_fixed_param, 8612 WMITLV_GET_STRUCT_TLVLEN(wmi_pdev_set_reorder_timeout_val_cmd_fixed_param)); 8613 8614 memcpy(cmd->rx_timeout_pri, reorder_timeout->rx_timeout_pri, 8615 sizeof(reorder_timeout->rx_timeout_pri)); 8616 8617 wma_debug("rx aggr record timeout: VO: %d, VI: %d, BE: %d, BK: %d", 8618 cmd->rx_timeout_pri[0], cmd->rx_timeout_pri[1], 8619 cmd->rx_timeout_pri[2], cmd->rx_timeout_pri[3]); 8620 8621 ret = wmi_unified_cmd_send(wma_handle->wmi_handle, buf, len, 8622 WMI_PDEV_SET_REORDER_TIMEOUT_VAL_CMDID); 8623 if (ret) { 8624 wmi_buf_free(buf); 8625 return QDF_STATUS_E_FAILURE; 8626 } 8627 8628 return QDF_STATUS_SUCCESS; 8629 } 8630 8631 QDF_STATUS wma_set_rx_blocksize(tp_wma_handle wma_handle, 8632 struct sir_peer_set_rx_blocksize *peer_rx_blocksize) 8633 { 8634 wmi_peer_set_rx_blocksize_cmd_fixed_param *cmd; 8635 int32_t len; 8636 wmi_buf_t buf; 8637 u_int8_t *buf_ptr; 8638 int ret; 8639 8640 if (!peer_rx_blocksize) { 8641 wma_err("invalid pointer"); 8642 return QDF_STATUS_E_INVAL; 8643 } 8644 8645 if (wma_validate_handle(wma_handle)) 8646 return QDF_STATUS_E_INVAL; 8647 8648 len = sizeof(*cmd); 8649 buf = wmi_buf_alloc(wma_handle->wmi_handle, len); 8650 if (!buf) 8651 return QDF_STATUS_E_NOMEM; 8652 8653 buf_ptr = (u_int8_t *) wmi_buf_data(buf); 8654 cmd = (wmi_peer_set_rx_blocksize_cmd_fixed_param *) buf_ptr; 8655 8656 WMITLV_SET_HDR(&cmd->tlv_header, 8657 WMITLV_TAG_STRUC_wmi_peer_set_rx_blocksize_cmd_fixed_param, 8658 WMITLV_GET_STRUCT_TLVLEN(wmi_peer_set_rx_blocksize_cmd_fixed_param)); 8659 8660 cmd->vdev_id = peer_rx_blocksize->vdev_id; 8661 cmd->rx_block_ack_win_limit = 8662 peer_rx_blocksize->rx_block_ack_win_limit; 8663 WMI_CHAR_ARRAY_TO_MAC_ADDR(peer_rx_blocksize->peer_macaddr.bytes, 8664 &cmd->peer_macaddr); 8665 8666 wma_debug("rx aggr blocksize: %d", cmd->rx_block_ack_win_limit); 8667 8668 ret = wmi_unified_cmd_send(wma_handle->wmi_handle, buf, len, 8669 WMI_PEER_SET_RX_BLOCKSIZE_CMDID); 8670 if (ret) { 8671 wmi_buf_free(buf); 8672 return QDF_STATUS_E_FAILURE; 8673 } 8674 8675 return QDF_STATUS_SUCCESS; 8676 } 8677 8678 QDF_STATUS wma_get_chain_rssi(tp_wma_handle wma_handle, 8679 struct get_chain_rssi_req_params *req_params) 8680 { 8681 wmi_pdev_div_get_rssi_antid_fixed_param *cmd; 8682 wmi_buf_t wmi_buf; 8683 uint32_t len = sizeof(wmi_pdev_div_get_rssi_antid_fixed_param); 8684 u_int8_t *buf_ptr; 8685 8686 if (wma_validate_handle(wma_handle)) 8687 return QDF_STATUS_E_INVAL; 8688 8689 wmi_buf = wmi_buf_alloc(wma_handle->wmi_handle, len); 8690 if (!wmi_buf) 8691 return QDF_STATUS_E_NOMEM; 8692 8693 buf_ptr = (u_int8_t *)wmi_buf_data(wmi_buf); 8694 8695 cmd = (wmi_pdev_div_get_rssi_antid_fixed_param *)buf_ptr; 8696 WMITLV_SET_HDR(&cmd->tlv_header, 8697 WMITLV_TAG_STRUC_wmi_pdev_div_get_rssi_antid_fixed_param, 8698 WMITLV_GET_STRUCT_TLVLEN( 8699 wmi_pdev_div_get_rssi_antid_fixed_param)); 8700 cmd->pdev_id = 0; 8701 WMI_CHAR_ARRAY_TO_MAC_ADDR(req_params->peer_macaddr.bytes, 8702 &cmd->macaddr); 8703 8704 if (wmi_unified_cmd_send(wma_handle->wmi_handle, wmi_buf, len, 8705 WMI_PDEV_DIV_GET_RSSI_ANTID_CMDID)) { 8706 wmi_buf_free(wmi_buf); 8707 return QDF_STATUS_E_FAILURE; 8708 } 8709 8710 return QDF_STATUS_SUCCESS; 8711 } 8712 8713 #if defined(WLAN_FEATURE_FILS_SK) 8714 /** 8715 * wma_roam_scan_send_hlp() - API to send HLP IE info to fw 8716 * @wma_handle: WMA handle 8717 * @req: HLP params 8718 * 8719 * Return: QDF_STATUS 8720 */ 8721 static QDF_STATUS wma_roam_scan_send_hlp(tp_wma_handle wma_handle, 8722 struct hlp_params *req) 8723 { 8724 struct hlp_params *params; 8725 QDF_STATUS status; 8726 8727 params = qdf_mem_malloc(sizeof(*params)); 8728 if (!params) 8729 return QDF_STATUS_E_NOMEM; 8730 8731 params->vdev_id = req->vdev_id; 8732 params->hlp_ie_len = req->hlp_ie_len; 8733 qdf_mem_copy(params->hlp_ie, req->hlp_ie, req->hlp_ie_len); 8734 status = wmi_unified_roam_send_hlp_cmd(wma_handle->wmi_handle, params); 8735 8736 wma_debug("Send HLP status %d vdev id %d", status, params->vdev_id); 8737 qdf_trace_hex_dump(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_DEBUG, 8738 params->hlp_ie, 10); 8739 8740 qdf_mem_free(params); 8741 return status; 8742 } 8743 #else 8744 static QDF_STATUS wma_roam_scan_send_hlp(tp_wma_handle wma_handle, 8745 struct hlp_params *req) 8746 { 8747 return QDF_STATUS_SUCCESS; 8748 } 8749 #endif 8750 8751 /** 8752 * wma_process_set_limit_off_chan() - set limit off channel parameters 8753 * @wma_handle: pointer to wma handle 8754 * @param: pointer to sir_limit_off_chan 8755 * 8756 * Return: QDF_STATUS_SUCCESS for success or error code. 8757 */ 8758 static QDF_STATUS wma_process_limit_off_chan(tp_wma_handle wma_handle, 8759 struct sir_limit_off_chan *param) 8760 { 8761 int32_t err; 8762 struct wmi_limit_off_chan_param limit_off_chan_param; 8763 8764 if (param->vdev_id >= wma_handle->max_bssid) { 8765 wma_err("Invalid vdev_id: %d", param->vdev_id); 8766 return QDF_STATUS_E_INVAL; 8767 } 8768 if (!wma_is_vdev_up(param->vdev_id)) { 8769 wma_debug("vdev %d is not up skipping limit_off_chan_param", 8770 param->vdev_id); 8771 return QDF_STATUS_E_INVAL; 8772 } 8773 8774 limit_off_chan_param.vdev_id = param->vdev_id; 8775 limit_off_chan_param.status = param->is_tos_active; 8776 limit_off_chan_param.max_offchan_time = param->max_off_chan_time; 8777 limit_off_chan_param.rest_time = param->rest_time; 8778 limit_off_chan_param.skip_dfs_chans = param->skip_dfs_chans; 8779 8780 err = wmi_unified_send_limit_off_chan_cmd(wma_handle->wmi_handle, 8781 &limit_off_chan_param); 8782 if (err) { 8783 wma_err("failed to set limit off chan cmd"); 8784 return QDF_STATUS_E_FAILURE; 8785 } 8786 8787 return QDF_STATUS_SUCCESS; 8788 } 8789 8790 static QDF_STATUS wma_process_obss_color_collision_req(tp_wma_handle wma_handle, 8791 struct wmi_obss_color_collision_cfg_param *cfg) 8792 { 8793 QDF_STATUS status; 8794 8795 if (cfg->vdev_id >= wma_handle->max_bssid) { 8796 wma_err("Invalid vdev_id: %d", cfg->vdev_id); 8797 return QDF_STATUS_E_INVAL; 8798 } 8799 if (!wma_is_vdev_up(cfg->vdev_id)) { 8800 wma_err("vdev %d is not up skipping obss color collision req", 8801 cfg->vdev_id); 8802 return QDF_STATUS_E_INVAL; 8803 } 8804 8805 status = wmi_unified_send_obss_color_collision_cfg_cmd(wma_handle-> 8806 wmi_handle, cfg); 8807 if (QDF_IS_STATUS_ERROR(status)) 8808 wma_err("Failed to send obss color collision cfg"); 8809 8810 return status; 8811 } 8812 8813 /** 8814 * wma_send_obss_detection_cfg() - send obss detection cfg to firmware 8815 * @wma_handle: pointer to wma handle 8816 * @cfg: obss detection configuration 8817 * 8818 * Send obss detection configuration to firmware. 8819 * 8820 * Return: None 8821 */ 8822 static void wma_send_obss_detection_cfg(tp_wma_handle wma_handle, 8823 struct wmi_obss_detection_cfg_param 8824 *cfg) 8825 { 8826 QDF_STATUS status; 8827 8828 if (cfg->vdev_id >= wma_handle->max_bssid) { 8829 wma_err("Invalid vdev_id: %d", cfg->vdev_id); 8830 return; 8831 } 8832 if (!wma_is_vdev_up(cfg->vdev_id)) { 8833 wma_err("vdev %d is not up skipping obss detection req", 8834 cfg->vdev_id); 8835 return; 8836 } 8837 8838 status = wmi_unified_send_obss_detection_cfg_cmd(wma_handle->wmi_handle, 8839 cfg); 8840 if (QDF_IS_STATUS_ERROR(status)) 8841 wma_err("Failed to send obss detection cfg"); 8842 8843 return; 8844 } 8845 8846 #ifdef WLAN_FEATURE_MOTION_DETECTION 8847 /** 8848 * wma_motion_det_host_event_handler - motion detection event handler 8849 * @handle: WMA global handle 8850 * @event: motion detection event 8851 * @len: Length of cmd 8852 * 8853 * Call motion detection event callback handler 8854 * 8855 * Return: 0 on success, else error on failure 8856 */ 8857 int wma_motion_det_host_event_handler(void *handle, uint8_t *event, 8858 uint32_t len) 8859 { 8860 wmi_motion_det_event *motion_det_event_hdr; 8861 WMI_MOTION_DET_HOST_EVENTID_param_tlvs *param_buf = 8862 (WMI_MOTION_DET_HOST_EVENTID_param_tlvs *)event; 8863 struct sir_md_evt *md_event; 8864 struct mac_context *pmac = (struct mac_context *)cds_get_context( 8865 QDF_MODULE_ID_PE); 8866 8867 if (!param_buf) { 8868 wma_err("Invalid motion det host event buffer"); 8869 return -EINVAL; 8870 } 8871 8872 if (!pmac || !pmac->sme.md_host_evt_cb) { 8873 wma_err("Invalid motion detect callback"); 8874 return -EINVAL; 8875 } 8876 8877 motion_det_event_hdr = param_buf->fixed_param; 8878 wma_alert("motion detect host event received, vdev_id=%d, status=%d", 8879 motion_det_event_hdr->vdev_id, motion_det_event_hdr->status); 8880 8881 md_event = qdf_mem_malloc(sizeof(*md_event)); 8882 if (!md_event) 8883 return -ENOMEM; 8884 8885 md_event->vdev_id = motion_det_event_hdr->vdev_id; 8886 md_event->status = motion_det_event_hdr->status; 8887 8888 pmac->sme.md_host_evt_cb(pmac->sme.md_ctx, md_event); 8889 8890 qdf_mem_free(md_event); 8891 return 0; 8892 } 8893 8894 /** 8895 * wma_motion_det_base_line_host_event_handler - md baselining event handler 8896 * @handle: WMA global handle 8897 * @event: motion detection baselining event 8898 * @len: Length of cmd 8899 * 8900 * Return: 0 on success, else error on failure 8901 */ 8902 int wma_motion_det_base_line_host_event_handler(void *handle, 8903 uint8_t *event, uint32_t len) 8904 { 8905 wmi_motion_det_base_line_event *motion_det_base_line_event_hdr; 8906 WMI_MOTION_DET_BASE_LINE_HOST_EVENTID_param_tlvs *param_buf = 8907 (WMI_MOTION_DET_BASE_LINE_HOST_EVENTID_param_tlvs *)event; 8908 struct sir_md_bl_evt *md_bl_event; 8909 struct mac_context *pmac = (struct mac_context *)cds_get_context( 8910 QDF_MODULE_ID_PE); 8911 8912 if (!param_buf) { 8913 wma_err("Invalid motion detection base line event buffer"); 8914 return -EINVAL; 8915 } 8916 8917 if (!pmac || !pmac->sme.md_bl_evt_cb) { 8918 wma_err("Invalid motion detection base line callback"); 8919 return -EINVAL; 8920 } 8921 8922 motion_det_base_line_event_hdr = param_buf->fixed_param; 8923 wma_alert("motion detection base line event received, vdev_id=%d", 8924 motion_det_base_line_event_hdr->vdev_id); 8925 wma_alert("baseline_value=%d bl_max_corr_resv=%d bl_min_corr_resv=%d", 8926 motion_det_base_line_event_hdr->bl_baseline_value, 8927 motion_det_base_line_event_hdr->bl_max_corr_reserved, 8928 motion_det_base_line_event_hdr->bl_min_corr_reserved); 8929 8930 md_bl_event = qdf_mem_malloc(sizeof(*md_bl_event)); 8931 if (!md_bl_event) 8932 return -ENOMEM; 8933 8934 md_bl_event->vdev_id = motion_det_base_line_event_hdr->vdev_id; 8935 md_bl_event->bl_baseline_value = 8936 motion_det_base_line_event_hdr->bl_baseline_value; 8937 md_bl_event->bl_max_corr_reserved = 8938 motion_det_base_line_event_hdr->bl_max_corr_reserved; 8939 md_bl_event->bl_min_corr_reserved = 8940 motion_det_base_line_event_hdr->bl_min_corr_reserved; 8941 8942 pmac->sme.md_bl_evt_cb(pmac->sme.md_ctx, md_bl_event); 8943 8944 qdf_mem_free(md_bl_event); 8945 return 0; 8946 } 8947 8948 /** 8949 * wma_set_motion_det_config - Sends motion detection configuration wmi cmd 8950 * @wma_handle: WMA global handle 8951 * @motion_det_cfg: motion detection configuration 8952 * 8953 * Return: QDF_STATUS_SUCCESS on success, QDF_STATUS_ERROR on error 8954 */ 8955 static QDF_STATUS wma_set_motion_det_config( 8956 tp_wma_handle wma_handle, 8957 struct sme_motion_det_cfg *motion_det_cfg) 8958 { 8959 wmi_motion_det_config_params_cmd_fixed_param *cmd; 8960 wmi_buf_t buf; 8961 int err; 8962 8963 buf = wmi_buf_alloc(wma_handle->wmi_handle, sizeof(*cmd)); 8964 if (!buf) 8965 return QDF_STATUS_E_NOMEM; 8966 8967 cmd = (wmi_motion_det_config_params_cmd_fixed_param *)wmi_buf_data(buf); 8968 qdf_mem_zero(cmd, sizeof(*cmd)); 8969 8970 WMITLV_SET_HDR( 8971 &cmd->tlv_header, 8972 WMITLV_TAG_STRUC_wmi_motion_det_config_params_cmd_fixed_param, 8973 WMITLV_GET_STRUCT_TLVLEN( 8974 wmi_motion_det_config_params_cmd_fixed_param)); 8975 cmd->vdev_id = motion_det_cfg->vdev_id; 8976 cmd->time_t1 = motion_det_cfg->time_t1; 8977 cmd->time_t2 = motion_det_cfg->time_t2; 8978 cmd->n1 = motion_det_cfg->n1; 8979 cmd->n2 = motion_det_cfg->n2; 8980 cmd->time_t1_gap = motion_det_cfg->time_t1_gap; 8981 cmd->time_t2_gap = motion_det_cfg->time_t2_gap; 8982 cmd->coarse_K = motion_det_cfg->coarse_K; 8983 cmd->fine_K = motion_det_cfg->fine_K; 8984 cmd->coarse_Q = motion_det_cfg->coarse_Q; 8985 cmd->fine_Q = motion_det_cfg->fine_Q; 8986 cmd->md_coarse_thr_high = motion_det_cfg->md_coarse_thr_high; 8987 cmd->md_fine_thr_high = motion_det_cfg->md_fine_thr_high; 8988 cmd->md_coarse_thr_low = motion_det_cfg->md_coarse_thr_low; 8989 cmd->md_fine_thr_low = motion_det_cfg->md_fine_thr_low; 8990 8991 err = wmi_unified_cmd_send(wma_handle->wmi_handle, buf, sizeof(*cmd), 8992 WMI_MOTION_DET_CONFIG_PARAM_CMDID); 8993 if (err) { 8994 wmi_buf_free(buf); 8995 return QDF_STATUS_E_FAILURE; 8996 } 8997 wma_nofl_alert("Set motion_det_config to vdevId %d\n" 8998 "time_t1 %d\n" 8999 "time_t2 %d\n" 9000 "n1 %d\n" 9001 "n2 %d\n" 9002 "time_t1_gap %d\n" 9003 "time_t2_gap %d\n" 9004 "coarse_K %d\n" 9005 "fine_K %d\n" 9006 "coarse_Q %d\n" 9007 "fine_Q %d\n" 9008 "md_coarse_thr_high %d\n" 9009 "md_fine_thr_high %d\n" 9010 "md_coarse_thr_low %d\n" 9011 "md_fine_thr_low %d\n", 9012 motion_det_cfg->vdev_id, 9013 motion_det_cfg->time_t1, 9014 motion_det_cfg->time_t2, 9015 motion_det_cfg->n1, 9016 motion_det_cfg->n2, 9017 motion_det_cfg->time_t1_gap, 9018 motion_det_cfg->time_t2_gap, 9019 motion_det_cfg->coarse_K, 9020 motion_det_cfg->fine_K, 9021 motion_det_cfg->coarse_Q, 9022 motion_det_cfg->fine_Q, 9023 motion_det_cfg->md_coarse_thr_high, 9024 motion_det_cfg->md_fine_thr_high, 9025 motion_det_cfg->md_coarse_thr_low, 9026 motion_det_cfg->md_fine_thr_low); 9027 return QDF_STATUS_SUCCESS; 9028 } 9029 9030 /** 9031 * wma_set_motion_det_enable - Sends motion detection start/stop wmi cmd 9032 * @wma_handle: WMA global handle 9033 * @md_en: motion detection start/stop 9034 * 9035 * Return: QDF_STATUS_SUCCESS on success, QDF_STATUS_ERROR on error 9036 */ 9037 static QDF_STATUS wma_set_motion_det_enable(tp_wma_handle wma_handle, 9038 struct sme_motion_det_en *md_en) 9039 { 9040 wmi_motion_det_start_stop_cmd_fixed_param *cmd; 9041 wmi_buf_t buf; 9042 int err; 9043 9044 buf = wmi_buf_alloc(wma_handle->wmi_handle, sizeof(*cmd)); 9045 if (!buf) 9046 return QDF_STATUS_E_NOMEM; 9047 9048 cmd = (wmi_motion_det_start_stop_cmd_fixed_param *)wmi_buf_data(buf); 9049 qdf_mem_zero(cmd, sizeof(*cmd)); 9050 9051 WMITLV_SET_HDR( 9052 &cmd->tlv_header, 9053 WMITLV_TAG_STRUC_wmi_motion_det_start_stop_cmd_fixed_param, 9054 WMITLV_GET_STRUCT_TLVLEN( 9055 wmi_motion_det_start_stop_cmd_fixed_param)); 9056 cmd->vdev_id = md_en->vdev_id; 9057 cmd->enable = md_en->enable; 9058 9059 err = wmi_unified_cmd_send(wma_handle->wmi_handle, buf, sizeof(*cmd), 9060 WMI_MOTION_DET_START_STOP_CMDID); 9061 if (err) { 9062 wmi_buf_free(buf); 9063 return QDF_STATUS_E_FAILURE; 9064 } 9065 wma_alert("Set motion_det_enable to vdevId %d %d", md_en->vdev_id, 9066 md_en->enable); 9067 return QDF_STATUS_SUCCESS; 9068 } 9069 9070 /** 9071 * wma_set_motion_det_base_line_config - Sends md baselining cfg wmi cmd 9072 * @wma_handle: WMA global handle 9073 * @md_base_line_cfg: md baselining configuration 9074 * 9075 * Return: QDF_STATUS_SUCCESS on success, QDF_STATUS_ERROR on error 9076 */ 9077 static QDF_STATUS wma_set_motion_det_base_line_config( 9078 tp_wma_handle wma_handle, 9079 struct sme_motion_det_base_line_cfg *md_base_line_cfg) 9080 { 9081 wmi_motion_det_base_line_config_params_cmd_fixed_param *cmd; 9082 wmi_buf_t buf; 9083 int err; 9084 9085 buf = wmi_buf_alloc(wma_handle->wmi_handle, sizeof(*cmd)); 9086 if (!buf) 9087 return QDF_STATUS_E_NOMEM; 9088 9089 cmd = (wmi_motion_det_base_line_config_params_cmd_fixed_param *) 9090 wmi_buf_data(buf); 9091 qdf_mem_zero(cmd, sizeof(*cmd)); 9092 9093 WMITLV_SET_HDR( 9094 &cmd->tlv_header, 9095 WMITLV_TAG_STRUC_wmi_motion_det_base_line_config_params_cmd_fixed_param, 9096 WMITLV_GET_STRUCT_TLVLEN( 9097 wmi_motion_det_base_line_config_params_cmd_fixed_param)); 9098 9099 cmd->vdev_id = md_base_line_cfg->vdev_id; 9100 cmd->bl_time_t = md_base_line_cfg->bl_time_t; 9101 cmd->bl_packet_gap = md_base_line_cfg->bl_packet_gap; 9102 cmd->bl_n = md_base_line_cfg->bl_n; 9103 cmd->bl_num_meas = md_base_line_cfg->bl_num_meas; 9104 9105 err = wmi_unified_cmd_send(wma_handle->wmi_handle, buf, sizeof(*cmd), 9106 WMI_MOTION_DET_BASE_LINE_CONFIG_PARAM_CMDID); 9107 if (err) { 9108 wmi_buf_free(buf); 9109 return QDF_STATUS_E_FAILURE; 9110 } 9111 wma_nofl_alert("Set motion_det_baseline_config to vdevId %d\n" 9112 "bl_time_t %d\n" 9113 "bl_packet_gap %d\n" 9114 "bl_n %d\n" 9115 "bl_num_meas %d\n", 9116 md_base_line_cfg->vdev_id, 9117 md_base_line_cfg->bl_time_t, 9118 md_base_line_cfg->bl_packet_gap, 9119 md_base_line_cfg->bl_n, 9120 md_base_line_cfg->bl_num_meas); 9121 return QDF_STATUS_SUCCESS; 9122 } 9123 9124 /** 9125 * wma_set_motion_det_base_line_enable - Sends md baselining start/stop wmi cmd 9126 * @wma_handle: WMA global handle 9127 * @md_base_line_en: motion detection baselining start/stop 9128 * 9129 * Return: QDF_STATUS_SUCCESS on success, QDF_STATUS_ERROR on error 9130 */ 9131 static QDF_STATUS wma_set_motion_det_base_line_enable( 9132 tp_wma_handle wma_handle, 9133 struct sme_motion_det_base_line_en *md_base_line_en) 9134 { 9135 wmi_motion_det_base_line_start_stop_cmd_fixed_param *cmd; 9136 wmi_buf_t buf; 9137 int err; 9138 9139 buf = wmi_buf_alloc(wma_handle->wmi_handle, sizeof(*cmd)); 9140 if (!buf) 9141 return QDF_STATUS_E_NOMEM; 9142 9143 cmd = (wmi_motion_det_base_line_start_stop_cmd_fixed_param *) 9144 wmi_buf_data(buf); 9145 qdf_mem_zero(cmd, sizeof(*cmd)); 9146 9147 WMITLV_SET_HDR( 9148 &cmd->tlv_header, 9149 WMITLV_TAG_STRUC_wmi_motion_det_base_line_start_stop_cmd_fixed_param, 9150 WMITLV_GET_STRUCT_TLVLEN( 9151 wmi_motion_det_base_line_start_stop_cmd_fixed_param)); 9152 9153 cmd->vdev_id = md_base_line_en->vdev_id; 9154 cmd->enable = md_base_line_en->enable; 9155 9156 err = wmi_unified_cmd_send(wma_handle->wmi_handle, buf, sizeof(*cmd), 9157 WMI_MOTION_DET_BASE_LINE_START_STOP_CMDID); 9158 if (err) { 9159 wmi_buf_free(buf); 9160 return QDF_STATUS_E_FAILURE; 9161 } 9162 wma_alert("Set motion_det_base_line_enable to vdevId %d enable %d", 9163 md_base_line_en->vdev_id, md_base_line_en->enable); 9164 return QDF_STATUS_SUCCESS; 9165 } 9166 #endif /* WLAN_FEATURE_MOTION_DETECTION */ 9167 9168 /** 9169 * wma_mc_process_msg() - process wma messages and call appropriate function. 9170 * @msg: message 9171 * 9172 * Return: QDF_SUCCESS for success otherwise failure 9173 */ 9174 static QDF_STATUS wma_mc_process_msg(struct scheduler_msg *msg) 9175 { 9176 QDF_STATUS qdf_status = QDF_STATUS_SUCCESS; 9177 tp_wma_handle wma_handle; 9178 9179 if (!msg) { 9180 wma_err("msg is NULL"); 9181 QDF_ASSERT(0); 9182 qdf_status = QDF_STATUS_E_INVAL; 9183 goto end; 9184 } 9185 9186 wma_nofl_debug("Handle msg %s(0x%x)", 9187 mac_trace_get_wma_msg_string(msg->type), msg->type); 9188 9189 wma_handle = cds_get_context(QDF_MODULE_ID_WMA); 9190 if (!wma_handle) { 9191 QDF_ASSERT(0); 9192 qdf_mem_free(msg->bodyptr); 9193 qdf_status = QDF_STATUS_E_INVAL; 9194 goto end; 9195 } 9196 9197 switch (msg->type) { 9198 #ifdef FEATURE_WLAN_ESE 9199 case WMA_TSM_STATS_REQ: 9200 wma_debug("McThread: WMA_TSM_STATS_REQ"); 9201 wma_process_tsm_stats_req(wma_handle, (void *)msg->bodyptr); 9202 break; 9203 #endif /* FEATURE_WLAN_ESE */ 9204 case WMA_UPDATE_CHAN_LIST_REQ: 9205 wma_update_channel_list(wma_handle, 9206 (tSirUpdateChanList *) msg->bodyptr); 9207 qdf_mem_free(msg->bodyptr); 9208 break; 9209 case WMA_ADD_STA_REQ: 9210 wma_add_sta(wma_handle, (tpAddStaParams) msg->bodyptr); 9211 break; 9212 case WMA_SEND_PEER_UNMAP_CONF: 9213 wma_peer_unmap_conf_send( 9214 wma_handle, 9215 (struct send_peer_unmap_conf_params *)msg->bodyptr); 9216 qdf_mem_free(msg->bodyptr); 9217 break; 9218 case WMA_DELETE_STA_REQ: 9219 wma_delete_sta(wma_handle, (tpDeleteStaParams) msg->bodyptr); 9220 break; 9221 case WMA_DELETE_BSS_HO_FAIL_REQ: 9222 wma_delete_bss_ho_fail(wma_handle, msg->bodyval); 9223 break; 9224 case WMA_DELETE_BSS_REQ: 9225 wma_delete_bss(wma_handle, msg->bodyval); 9226 break; 9227 case WMA_UPDATE_EDCA_PROFILE_IND: 9228 wma_process_update_edca_param_req(wma_handle, 9229 (tEdcaParams *) msg->bodyptr); 9230 qdf_mem_free(msg->bodyptr); 9231 break; 9232 case WMA_SEND_BEACON_REQ: 9233 wma_send_beacon(wma_handle, (tpSendbeaconParams) msg->bodyptr); 9234 break; 9235 case WMA_SEND_AP_VDEV_UP: 9236 wma_set_ap_vdev_up(wma_handle, msg->bodyval); 9237 break; 9238 case WMA_SEND_PROBE_RSP_TMPL: 9239 wma_send_probe_rsp_tmpl(wma_handle, 9240 (tpSendProbeRespParams) msg->bodyptr); 9241 qdf_mem_free(msg->bodyptr); 9242 break; 9243 case WMA_CLI_SET_CMD: 9244 wma_process_cli_set_cmd(wma_handle, 9245 (wma_cli_set_cmd_t *) msg->bodyptr); 9246 qdf_mem_free(msg->bodyptr); 9247 break; 9248 case WMA_SET_PDEV_IE_REQ: 9249 wma_process_set_pdev_ie_req(wma_handle, 9250 (struct set_ie_param *)msg->bodyptr); 9251 qdf_mem_free(msg->bodyptr); 9252 break; 9253 #if !defined(REMOVE_PKT_LOG) && defined(FEATURE_PKTLOG) 9254 case WMA_PKTLOG_ENABLE_REQ: 9255 wma_pktlog_wmi_send_cmd(wma_handle, 9256 (struct ath_pktlog_wmi_params *)msg->bodyptr); 9257 qdf_mem_free(msg->bodyptr); 9258 break; 9259 #endif /* REMOVE_PKT_LOG */ 9260 case WMA_ENABLE_UAPSD_REQ: 9261 wma_enable_uapsd_mode(wma_handle, 9262 (tpEnableUapsdParams) msg->bodyptr); 9263 qdf_mem_free(msg->bodyptr); 9264 break; 9265 case WMA_DISABLE_UAPSD_REQ: 9266 wma_disable_uapsd_mode(wma_handle, 9267 (tpDisableUapsdParams) msg->bodyptr); 9268 qdf_mem_free(msg->bodyptr); 9269 break; 9270 case WMA_SET_DTIM_PERIOD: 9271 wma_set_dtim_period(wma_handle, 9272 (struct set_dtim_params *)msg->bodyptr); 9273 qdf_mem_free(msg->bodyptr); 9274 break; 9275 case WMA_SET_TX_POWER_REQ: 9276 wma_set_tx_power(wma_handle, (tpMaxTxPowerParams) msg->bodyptr); 9277 break; 9278 case WMA_SET_MAX_TX_POWER_REQ: 9279 wma_set_max_tx_power(wma_handle, 9280 (tpMaxTxPowerParams) msg->bodyptr); 9281 break; 9282 case WMA_SET_KEEP_ALIVE: 9283 wma_set_keepalive_req(wma_handle, msg->bodyptr); 9284 break; 9285 #ifdef FEATURE_WLAN_ESE 9286 case WMA_SET_PLM_REQ: 9287 wma_config_plm(wma_handle, msg->bodyptr); 9288 qdf_mem_free(msg->bodyptr); 9289 break; 9290 #endif 9291 9292 case WMA_UPDATE_OP_MODE: 9293 wma_process_update_opmode(wma_handle, 9294 (tUpdateVHTOpMode *) msg->bodyptr); 9295 qdf_mem_free(msg->bodyptr); 9296 break; 9297 case WMA_UPDATE_RX_NSS: 9298 wma_process_update_rx_nss(wma_handle, 9299 (tUpdateRxNss *) msg->bodyptr); 9300 qdf_mem_free(msg->bodyptr); 9301 break; 9302 case WMA_UPDATE_MEMBERSHIP: 9303 wma_process_update_membership(wma_handle, 9304 (tUpdateMembership *) msg->bodyptr); 9305 break; 9306 case WMA_UPDATE_USERPOS: 9307 wma_process_update_userpos(wma_handle, 9308 (tUpdateUserPos *) msg->bodyptr); 9309 break; 9310 case WMA_UPDATE_BEACON_IND: 9311 wma_process_update_beacon_params(wma_handle, 9312 (tUpdateBeaconParams *) msg->bodyptr); 9313 qdf_mem_free(msg->bodyptr); 9314 break; 9315 9316 case WMA_ADD_TS_REQ: 9317 wma_add_ts_req(wma_handle, msg->bodyptr); 9318 break; 9319 9320 case WMA_DEL_TS_REQ: 9321 wma_del_ts_req(wma_handle, msg->bodyptr); 9322 break; 9323 9324 case WMA_AGGR_QOS_REQ: 9325 wma_aggr_qos_req(wma_handle, msg->bodyptr); 9326 break; 9327 9328 case WMA_8023_MULTICAST_LIST_REQ: 9329 wma_process_mcbc_set_filter_req(wma_handle, 9330 (tpSirRcvFltMcAddrList) msg->bodyptr); 9331 qdf_mem_free(msg->bodyptr); 9332 break; 9333 9334 case WMA_ROAM_PRE_AUTH_STATUS: 9335 wma_send_roam_preauth_status(wma_handle, msg->bodyptr); 9336 qdf_mem_free(msg->bodyptr); 9337 break; 9338 9339 case WMA_ROAM_SYNC_TIMEOUT: 9340 wma_handle_roam_sync_timeout(wma_handle, msg->bodyptr); 9341 qdf_mem_free(msg->bodyptr); 9342 break; 9343 case WMA_RATE_UPDATE_IND: 9344 wma_process_rate_update_indicate(wma_handle, 9345 (tSirRateUpdateInd *) msg->bodyptr); 9346 break; 9347 9348 #ifdef FEATURE_WLAN_TDLS 9349 case WMA_UPDATE_TDLS_PEER_STATE: 9350 wma_update_tdls_peer_state(wma_handle, msg->bodyptr); 9351 break; 9352 #endif /* FEATURE_WLAN_TDLS */ 9353 case WMA_ADD_PERIODIC_TX_PTRN_IND: 9354 wma_process_add_periodic_tx_ptrn_ind(wma_handle, 9355 (tSirAddPeriodicTxPtrn *) msg->bodyptr); 9356 qdf_mem_free(msg->bodyptr); 9357 break; 9358 case WMA_DEL_PERIODIC_TX_PTRN_IND: 9359 wma_process_del_periodic_tx_ptrn_ind(wma_handle, 9360 (tSirDelPeriodicTxPtrn *) msg->bodyptr); 9361 qdf_mem_free(msg->bodyptr); 9362 break; 9363 case WMA_TX_POWER_LIMIT: 9364 wma_process_tx_power_limits(wma_handle, msg->bodyptr); 9365 qdf_mem_free(msg->bodyptr); 9366 break; 9367 case WMA_SEND_ADDBA_REQ: 9368 wma_process_send_addba_req(wma_handle, 9369 (struct send_add_ba_req *)msg->bodyptr); 9370 break; 9371 9372 #ifdef FEATURE_WLAN_CH_AVOID 9373 case WMA_CH_AVOID_UPDATE_REQ: 9374 wma_process_ch_avoid_update_req(wma_handle, 9375 (tSirChAvoidUpdateReq *) msg->bodyptr); 9376 qdf_mem_free(msg->bodyptr); 9377 break; 9378 #endif /* FEATURE_WLAN_CH_AVOID */ 9379 #ifdef FEATURE_WLAN_AUTO_SHUTDOWN 9380 case WMA_SET_AUTO_SHUTDOWN_TIMER_REQ: 9381 wma_set_auto_shutdown_timer_req(wma_handle, msg->bodyptr); 9382 qdf_mem_free(msg->bodyptr); 9383 break; 9384 #endif /* FEATURE_WLAN_AUTO_SHUTDOWN */ 9385 case WMA_DHCP_START_IND: 9386 case WMA_DHCP_STOP_IND: 9387 wma_process_dhcp_ind(wma_handle, (tAniDHCPInd *) msg->bodyptr); 9388 qdf_mem_free(msg->bodyptr); 9389 break; 9390 case WMA_INIT_THERMAL_INFO_CMD: 9391 wma_process_init_thermal_info(wma_handle, 9392 (t_thermal_mgmt *) msg->bodyptr); 9393 qdf_mem_free(msg->bodyptr); 9394 break; 9395 9396 case WMA_SET_THERMAL_LEVEL: 9397 wma_process_set_thermal_level(wma_handle, msg->bodyval); 9398 break; 9399 #ifdef CONFIG_HL_SUPPORT 9400 case WMA_INIT_BAD_PEER_TX_CTL_INFO_CMD: 9401 wma_process_init_bad_peer_tx_ctl_info( 9402 wma_handle, 9403 (struct t_bad_peer_txtcl_config *)msg->bodyptr); 9404 qdf_mem_free(msg->bodyptr); 9405 break; 9406 #endif 9407 case WMA_SET_MIMOPS_REQ: 9408 wma_process_set_mimops_req(wma_handle, 9409 (tSetMIMOPS *) msg->bodyptr); 9410 qdf_mem_free(msg->bodyptr); 9411 break; 9412 case WMA_SET_SAP_INTRABSS_DIS: 9413 wma_set_vdev_intrabss_fwd(wma_handle, 9414 (tDisableIntraBssFwd *) msg->bodyptr); 9415 qdf_mem_free(msg->bodyptr); 9416 break; 9417 case WMA_GET_ISOLATION: 9418 wma_get_isolation(wma_handle); 9419 break; 9420 case WMA_MODEM_POWER_STATE_IND: 9421 wma_notify_modem_power_state(wma_handle, 9422 (tSirModemPowerStateInd *) msg->bodyptr); 9423 qdf_mem_free(msg->bodyptr); 9424 break; 9425 #ifdef WLAN_FEATURE_STATS_EXT 9426 case WMA_STATS_EXT_REQUEST: 9427 wma_stats_ext_req(wma_handle, 9428 (tpStatsExtRequest) (msg->bodyptr)); 9429 qdf_mem_free(msg->bodyptr); 9430 break; 9431 #endif /* WLAN_FEATURE_STATS_EXT */ 9432 #ifdef WLAN_FEATURE_EXTWOW_SUPPORT 9433 case WMA_WLAN_EXT_WOW: 9434 wma_enable_ext_wow(wma_handle, 9435 (tSirExtWoWParams *) msg->bodyptr); 9436 qdf_mem_free(msg->bodyptr); 9437 break; 9438 case WMA_WLAN_SET_APP_TYPE1_PARAMS: 9439 wma_set_app_type1_params_in_fw(wma_handle, 9440 (tSirAppType1Params *) msg->bodyptr); 9441 qdf_mem_free(msg->bodyptr); 9442 break; 9443 case WMA_WLAN_SET_APP_TYPE2_PARAMS: 9444 wma_set_app_type2_params_in_fw(wma_handle, 9445 (tSirAppType2Params *) msg->bodyptr); 9446 qdf_mem_free(msg->bodyptr); 9447 break; 9448 #endif /* WLAN_FEATURE_EXTWOW_SUPPORT */ 9449 #ifdef FEATURE_WLAN_EXTSCAN 9450 case WMA_EXTSCAN_START_REQ: 9451 wma_start_extscan(wma_handle, msg->bodyptr); 9452 qdf_mem_free(msg->bodyptr); 9453 break; 9454 case WMA_EXTSCAN_STOP_REQ: 9455 wma_stop_extscan(wma_handle, msg->bodyptr); 9456 qdf_mem_free(msg->bodyptr); 9457 break; 9458 case WMA_EXTSCAN_SET_BSSID_HOTLIST_REQ: 9459 wma_extscan_start_hotlist_monitor(wma_handle, msg->bodyptr); 9460 qdf_mem_free(msg->bodyptr); 9461 break; 9462 case WMA_EXTSCAN_RESET_BSSID_HOTLIST_REQ: 9463 wma_extscan_stop_hotlist_monitor(wma_handle, msg->bodyptr); 9464 qdf_mem_free(msg->bodyptr); 9465 break; 9466 case WMA_EXTSCAN_SET_SIGNF_CHANGE_REQ: 9467 wma_extscan_start_change_monitor(wma_handle, msg->bodyptr); 9468 qdf_mem_free(msg->bodyptr); 9469 break; 9470 case WMA_EXTSCAN_RESET_SIGNF_CHANGE_REQ: 9471 wma_extscan_stop_change_monitor(wma_handle, msg->bodyptr); 9472 qdf_mem_free(msg->bodyptr); 9473 break; 9474 case WMA_EXTSCAN_GET_CACHED_RESULTS_REQ: 9475 wma_extscan_get_cached_results(wma_handle, msg->bodyptr); 9476 qdf_mem_free(msg->bodyptr); 9477 break; 9478 case WMA_EXTSCAN_GET_CAPABILITIES_REQ: 9479 wma_extscan_get_capabilities(wma_handle, msg->bodyptr); 9480 qdf_mem_free(msg->bodyptr); 9481 break; 9482 case WMA_SET_EPNO_LIST_REQ: 9483 wma_set_epno_network_list(wma_handle, msg->bodyptr); 9484 qdf_mem_free(msg->bodyptr); 9485 break; 9486 case WMA_SET_PASSPOINT_LIST_REQ: 9487 /* Issue reset passpoint network list first and clear 9488 * the entries 9489 */ 9490 wma_reset_passpoint_network_list(wma_handle, msg->bodyptr); 9491 9492 wma_set_passpoint_network_list(wma_handle, msg->bodyptr); 9493 qdf_mem_free(msg->bodyptr); 9494 break; 9495 case WMA_RESET_PASSPOINT_LIST_REQ: 9496 wma_reset_passpoint_network_list(wma_handle, msg->bodyptr); 9497 qdf_mem_free(msg->bodyptr); 9498 break; 9499 #endif /* FEATURE_WLAN_EXTSCAN */ 9500 case WMA_SET_SCAN_MAC_OUI_REQ: 9501 wma_scan_probe_setoui(wma_handle, msg->bodyptr); 9502 qdf_mem_free(msg->bodyptr); 9503 break; 9504 #ifdef WLAN_FEATURE_LINK_LAYER_STATS 9505 case WMA_LINK_LAYER_STATS_CLEAR_REQ: 9506 wma_process_ll_stats_clear_req(wma_handle, 9507 (tpSirLLStatsClearReq) msg->bodyptr); 9508 qdf_mem_free(msg->bodyptr); 9509 break; 9510 case WMA_LINK_LAYER_STATS_SET_REQ: 9511 wma_process_ll_stats_set_req(wma_handle, 9512 (tpSirLLStatsSetReq) msg->bodyptr); 9513 qdf_mem_free(msg->bodyptr); 9514 break; 9515 case WMA_LINK_LAYER_STATS_GET_REQ: 9516 wma_process_ll_stats_get_req(wma_handle, 9517 (tpSirLLStatsGetReq) msg->bodyptr); 9518 qdf_mem_free(msg->bodyptr); 9519 break; 9520 case WDA_LINK_LAYER_STATS_SET_THRESHOLD: 9521 wma_config_stats_ext_threshold(wma_handle, 9522 (struct sir_ll_ext_stats_threshold *)msg->bodyptr); 9523 qdf_mem_free(msg->bodyptr); 9524 break; 9525 #endif /* WLAN_FEATURE_LINK_LAYER_STATS */ 9526 case SIR_HAL_SET_BASE_MACADDR_IND: 9527 wma_set_base_macaddr_indicate(wma_handle, 9528 (tSirMacAddr *) msg->bodyptr); 9529 qdf_mem_free(msg->bodyptr); 9530 break; 9531 case WMA_LINK_STATUS_GET_REQ: 9532 wma_process_link_status_req(wma_handle, 9533 (tAniGetLinkStatus *) msg->bodyptr); 9534 break; 9535 case WMA_GET_TEMPERATURE_REQ: 9536 wma_get_temperature(wma_handle); 9537 qdf_mem_free(msg->bodyptr); 9538 break; 9539 case WMA_TSF_GPIO_PIN: 9540 wma_set_tsf_gpio_pin(wma_handle, msg->bodyval); 9541 break; 9542 9543 #ifdef DHCP_SERVER_OFFLOAD 9544 case WMA_SET_DHCP_SERVER_OFFLOAD_CMD: 9545 wma_process_dhcpserver_offload(wma_handle, msg->bodyptr); 9546 qdf_mem_free(msg->bodyptr); 9547 break; 9548 #endif /* DHCP_SERVER_OFFLOAD */ 9549 #ifdef WLAN_FEATURE_GPIO_LED_FLASHING 9550 case WMA_LED_FLASHING_REQ: 9551 wma_set_led_flashing(wma_handle, msg->bodyptr); 9552 qdf_mem_free(msg->bodyptr); 9553 break; 9554 #endif /* WLAN_FEATURE_GPIO_LED_FLASHING */ 9555 case SIR_HAL_SET_MAS: 9556 wma_process_set_mas(wma_handle, 9557 (uint32_t *)msg->bodyptr); 9558 qdf_mem_free(msg->bodyptr); 9559 break; 9560 case SIR_HAL_SET_MIRACAST: 9561 wma_process_set_miracast(wma_handle, 9562 (uint32_t *)msg->bodyptr); 9563 qdf_mem_free(msg->bodyptr); 9564 break; 9565 case SIR_HAL_CONFIG_STATS_FACTOR: 9566 wma_config_stats_factor(wma_handle, 9567 (struct sir_stats_avg_factor *) 9568 msg->bodyptr); 9569 qdf_mem_free(msg->bodyptr); 9570 break; 9571 case SIR_HAL_CONFIG_GUARD_TIME: 9572 wma_config_guard_time(wma_handle, 9573 (struct sir_guard_time_request *) 9574 msg->bodyptr); 9575 qdf_mem_free(msg->bodyptr); 9576 break; 9577 case SIR_HAL_START_STOP_LOGGING: 9578 wma_set_wifi_start_packet_stats(wma_handle, 9579 (struct sir_wifi_start_log *)msg->bodyptr); 9580 wma_enable_specific_fw_logs(wma_handle, 9581 (struct sir_wifi_start_log *)msg->bodyptr); 9582 qdf_mem_free(msg->bodyptr); 9583 break; 9584 case SIR_HAL_FLUSH_LOG_TO_FW: 9585 wma_send_flush_logs_to_fw(wma_handle); 9586 /* Body ptr is NULL here */ 9587 break; 9588 case WMA_SET_RSSI_MONITOR_REQ: 9589 wma_set_rssi_monitoring(wma_handle, msg->bodyptr); 9590 qdf_mem_free(msg->bodyptr); 9591 break; 9592 case SIR_HAL_SET_PCL_TO_FW: 9593 wma_send_set_pcl_cmd(wma_handle, 9594 (struct set_pcl_req *)msg->bodyptr); 9595 qdf_mem_free(msg->bodyptr); 9596 break; 9597 case SIR_HAL_PDEV_SET_HW_MODE: 9598 wma_send_pdev_set_hw_mode_cmd(wma_handle, 9599 (struct policy_mgr_hw_mode *)msg->bodyptr); 9600 qdf_mem_free(msg->bodyptr); 9601 break; 9602 case WMA_SET_WISA_PARAMS: 9603 wma_set_wisa_params(wma_handle, 9604 (struct sir_wisa_params *)msg->bodyptr); 9605 qdf_mem_free(msg->bodyptr); 9606 break; 9607 case SIR_HAL_PDEV_DUAL_MAC_CFG_REQ: 9608 wma_send_pdev_set_dual_mac_config(wma_handle, 9609 (struct policy_mgr_dual_mac_config *)msg->bodyptr); 9610 qdf_mem_free(msg->bodyptr); 9611 break; 9612 case WMA_SET_IE_INFO: 9613 wma_process_set_ie_info(wma_handle, 9614 (struct vdev_ie_info *)msg->bodyptr); 9615 qdf_mem_free(msg->bodyptr); 9616 break; 9617 case WMA_CFG_VENDOR_ACTION_TB_PPDU: 9618 wma_process_cfg_action_frm_tb_ppdu(wma_handle, msg->bodyptr); 9619 qdf_mem_free(msg->bodyptr); 9620 break; 9621 case SIR_HAL_SOC_ANTENNA_MODE_REQ: 9622 wma_send_pdev_set_antenna_mode(wma_handle, 9623 (struct sir_antenna_mode_param *)msg->bodyptr); 9624 qdf_mem_free(msg->bodyptr); 9625 break; 9626 case WMA_GW_PARAM_UPDATE_REQ: 9627 wma_set_gateway_params(wma_handle, msg->bodyptr); 9628 qdf_mem_free(msg->bodyptr); 9629 break; 9630 case WMA_SET_ADAPT_DWELLTIME_CONF_PARAMS: 9631 wma_send_adapt_dwelltime_params(wma_handle, 9632 (struct adaptive_dwelltime_params *)msg->bodyptr); 9633 qdf_mem_free(msg->bodyptr); 9634 break; 9635 case WMA_HT40_OBSS_SCAN_IND: 9636 wma_send_ht40_obss_scanind(wma_handle, 9637 (struct obss_ht40_scanind *)msg->bodyptr); 9638 qdf_mem_free(msg->bodyptr); 9639 break; 9640 case WMA_ADD_BCN_FILTER_CMDID: 9641 wma_add_beacon_filter(wma_handle, msg->bodyptr); 9642 qdf_mem_free(msg->bodyptr); 9643 break; 9644 case WMA_REMOVE_BCN_FILTER_CMDID: 9645 wma_remove_beacon_filter(wma_handle, msg->bodyptr); 9646 qdf_mem_free(msg->bodyptr); 9647 break; 9648 case WDA_APF_GET_CAPABILITIES_REQ: 9649 wma_get_apf_capabilities(wma_handle); 9650 break; 9651 case SIR_HAL_POWER_DBG_CMD: 9652 wma_process_hal_pwr_dbg_cmd(wma_handle, 9653 msg->bodyptr); 9654 qdf_mem_free(msg->bodyptr); 9655 break; 9656 case WMA_SEND_FREQ_RANGE_CONTROL_IND: 9657 wma_enable_disable_caevent_ind(wma_handle, msg->bodyval); 9658 break; 9659 case SIR_HAL_UPDATE_TX_FAIL_CNT_TH: 9660 wma_update_tx_fail_cnt_th(wma_handle, msg->bodyptr); 9661 qdf_mem_free(msg->bodyptr); 9662 break; 9663 case SIR_HAL_LONG_RETRY_LIMIT_CNT: 9664 wma_update_long_retry_limit(wma_handle, msg->bodyptr); 9665 qdf_mem_free(msg->bodyptr); 9666 break; 9667 case SIR_HAL_SHORT_RETRY_LIMIT_CNT: 9668 wma_update_short_retry_limit(wma_handle, msg->bodyptr); 9669 qdf_mem_free(msg->bodyptr); 9670 break; 9671 case SIR_HAL_POWER_DEBUG_STATS_REQ: 9672 wma_process_power_debug_stats_req(wma_handle); 9673 break; 9674 case WMA_BEACON_DEBUG_STATS_REQ: 9675 wma_process_beacon_debug_stats_req(wma_handle, msg->bodyptr); 9676 qdf_mem_free(msg->bodyptr); 9677 break; 9678 case WMA_GET_RCPI_REQ: 9679 wma_get_rcpi_req(wma_handle, 9680 (struct sme_rcpi_req *)msg->bodyptr); 9681 qdf_mem_free(msg->bodyptr); 9682 break; 9683 case WMA_SET_WOW_PULSE_CMD: 9684 wma_send_wow_pulse_cmd(wma_handle, 9685 (struct wow_pulse_mode *)msg->bodyptr); 9686 qdf_mem_free(msg->bodyptr); 9687 break; 9688 case WMA_SET_DBS_SCAN_SEL_CONF_PARAMS: 9689 wma_send_dbs_scan_selection_params(wma_handle, 9690 (struct wmi_dbs_scan_sel_params *)msg->bodyptr); 9691 qdf_mem_free(msg->bodyptr); 9692 break; 9693 case WMA_SET_ARP_STATS_REQ: 9694 wma_set_arp_req_stats(wma_handle, 9695 (struct set_arp_stats_params *)msg->bodyptr); 9696 qdf_mem_free(msg->bodyptr); 9697 break; 9698 case WMA_GET_ARP_STATS_REQ: 9699 wma_get_arp_req_stats(wma_handle, 9700 (struct get_arp_stats_params *)msg->bodyptr); 9701 qdf_mem_free(msg->bodyptr); 9702 break; 9703 case SIR_HAL_SET_DEL_PMKID_CACHE: 9704 wma_set_del_pmkid_cache(wma_handle, msg->bodyptr); 9705 if (msg->bodyptr) { 9706 qdf_mem_zero(msg->bodyptr, 9707 sizeof(struct wmi_unified_pmk_cache)); 9708 qdf_mem_free(msg->bodyptr); 9709 } 9710 break; 9711 case SIR_HAL_HLP_IE_INFO: 9712 wma_roam_scan_send_hlp(wma_handle, 9713 (struct hlp_params *)msg->bodyptr); 9714 qdf_mem_free(msg->bodyptr); 9715 break; 9716 case WMA_SET_LIMIT_OFF_CHAN: 9717 wma_process_limit_off_chan(wma_handle, msg->bodyptr); 9718 qdf_mem_free(msg->bodyptr); 9719 break; 9720 case WMA_OBSS_DETECTION_REQ: 9721 wma_send_obss_detection_cfg(wma_handle, msg->bodyptr); 9722 qdf_mem_free(msg->bodyptr); 9723 break; 9724 case WMA_INVOKE_NEIGHBOR_REPORT: 9725 wma_send_invoke_neighbor_report(wma_handle, msg->bodyptr); 9726 qdf_mem_free(msg->bodyptr); 9727 break; 9728 case WMA_OBSS_COLOR_COLLISION_REQ: 9729 wma_process_obss_color_collision_req(wma_handle, msg->bodyptr); 9730 qdf_mem_free(msg->bodyptr); 9731 break; 9732 case WMA_GET_ROAM_SCAN_STATS: 9733 wma_get_roam_scan_stats(wma_handle, msg->bodyptr); 9734 qdf_mem_free(msg->bodyptr); 9735 break; 9736 #ifdef WLAN_FEATURE_MOTION_DETECTION 9737 case WMA_SET_MOTION_DET_CONFIG: 9738 wma_set_motion_det_config( 9739 wma_handle, 9740 (struct sme_motion_det_cfg *)msg->bodyptr); 9741 qdf_mem_free(msg->bodyptr); 9742 break; 9743 case WMA_SET_MOTION_DET_ENABLE: 9744 wma_set_motion_det_enable( 9745 wma_handle, 9746 (struct sme_motion_det_en *)msg->bodyptr); 9747 qdf_mem_free(msg->bodyptr); 9748 break; 9749 case WMA_SET_MOTION_DET_BASE_LINE_CONFIG: 9750 wma_set_motion_det_base_line_config( 9751 wma_handle, 9752 (struct sme_motion_det_base_line_cfg *)msg->bodyptr); 9753 qdf_mem_free(msg->bodyptr); 9754 break; 9755 case WMA_SET_MOTION_DET_BASE_LINE_ENABLE: 9756 wma_set_motion_det_base_line_enable( 9757 wma_handle, 9758 (struct sme_motion_det_base_line_en *)msg->bodyptr); 9759 qdf_mem_free(msg->bodyptr); 9760 break; 9761 #endif /* WLAN_FEATURE_MOTION_DETECTION */ 9762 #ifdef FW_THERMAL_THROTTLE_SUPPORT 9763 case WMA_SET_THERMAL_THROTTLE_CFG: 9764 if (!wma_handle->thermal_mgmt_info.thermalMgmtEnabled) 9765 wmi_unified_thermal_mitigation_param_cmd_send( 9766 wma_handle->wmi_handle, msg->bodyptr); 9767 else 9768 qdf_status = QDF_STATUS_E_INVAL; 9769 qdf_mem_free(msg->bodyptr); 9770 break; 9771 case WMA_SET_THERMAL_MGMT: 9772 if (!wma_handle->thermal_mgmt_info.thermalMgmtEnabled) 9773 wma_set_thermal_mgmt( 9774 wma_handle, 9775 *((t_thermal_cmd_params *)msg->bodyptr)); 9776 else 9777 qdf_status = QDF_STATUS_E_INVAL; 9778 qdf_mem_free(msg->bodyptr); 9779 break; 9780 #endif /* FW_THERMAL_THROTTLE_SUPPORT */ 9781 #ifdef WLAN_MWS_INFO_DEBUGFS 9782 case WMA_GET_MWS_COEX_INFO_REQ: 9783 wma_get_mws_coex_info_req(wma_handle, msg->bodyptr); 9784 qdf_mem_free(msg->bodyptr); 9785 break; 9786 #endif 9787 case WMA_ROAM_SCAN_CH_REQ: 9788 wma_get_roam_scan_ch(wma_handle->wmi_handle, msg->bodyval); 9789 break; 9790 case WMA_TWT_ADD_DIALOG_REQUEST: 9791 wma_twt_process_add_dialog(wma_handle, msg->bodyptr); 9792 qdf_mem_free(msg->bodyptr); 9793 break; 9794 case WMA_TWT_DEL_DIALOG_REQUEST: 9795 wma_twt_process_del_dialog(wma_handle, msg->bodyptr); 9796 qdf_mem_free(msg->bodyptr); 9797 break; 9798 case WMA_TWT_PAUSE_DIALOG_REQUEST: 9799 wma_twt_process_pause_dialog(wma_handle, msg->bodyptr); 9800 qdf_mem_free(msg->bodyptr); 9801 break; 9802 case WMA_TWT_RESUME_DIALOG_REQUEST: 9803 wma_twt_process_resume_dialog(wma_handle, msg->bodyptr); 9804 qdf_mem_free(msg->bodyptr); 9805 break; 9806 case WMA_TWT_NUDGE_DIALOG_REQUEST: 9807 wma_twt_process_nudge_dialog(wma_handle, msg->bodyptr); 9808 qdf_mem_free(msg->bodyptr); 9809 break; 9810 case WMA_UPDATE_EDCA_PIFS_PARAM_IND: 9811 wma_update_edca_pifs_param( 9812 wma_handle, 9813 (struct edca_pifs_vparam *)msg->bodyptr); 9814 qdf_mem_free(msg->bodyptr); 9815 break; 9816 default: 9817 wma_debug("Unhandled WMA message of type %d", msg->type); 9818 if (msg->bodyptr) 9819 qdf_mem_free(msg->bodyptr); 9820 } 9821 end: 9822 return qdf_status; 9823 } 9824 9825 QDF_STATUS wma_mc_process_handler(struct scheduler_msg *msg) 9826 { 9827 return wma_mc_process_msg(msg); 9828 } 9829 9830 /** 9831 * wma_log_completion_timeout() - Log completion timeout 9832 * @data: Timeout handler data 9833 * 9834 * This function is called when log completion timer expires 9835 * 9836 * Return: None 9837 */ 9838 void wma_log_completion_timeout(void *data) 9839 { 9840 wma_debug("Timeout occurred for log completion command"); 9841 9842 /* Though we did not receive any event from FW, 9843 * we can flush whatever logs we have with us 9844 */ 9845 cds_logging_set_fw_flush_complete(); 9846 } 9847 9848 /** 9849 * wma_map_pcl_weights() - Map PCL weights 9850 * @pcl_weight: Internal PCL weights 9851 * 9852 * Maps the internal weights of PCL to the weights needed by FW 9853 * 9854 * Return: Mapped channel weight of type wmi_pcl_chan_weight 9855 */ 9856 wmi_pcl_chan_weight wma_map_pcl_weights(uint32_t pcl_weight) 9857 { 9858 switch (pcl_weight) { 9859 case WEIGHT_OF_GROUP1_PCL_CHANNELS: 9860 return WMI_PCL_WEIGHT_VERY_HIGH; 9861 case WEIGHT_OF_GROUP2_PCL_CHANNELS: 9862 return WMI_PCL_WEIGHT_HIGH; 9863 case WEIGHT_OF_GROUP3_PCL_CHANNELS: 9864 return WMI_PCL_WEIGHT_MEDIUM; 9865 case WEIGHT_OF_GROUP4_PCL_CHANNELS: 9866 return WMI_PCL_WEIGHT_MEDIUM; 9867 case WEIGHT_OF_NON_PCL_CHANNELS: 9868 return WMI_PCL_WEIGHT_LOW; 9869 default: 9870 return WMI_PCL_WEIGHT_DISALLOW; 9871 } 9872 } 9873 9874 /** 9875 * wma_send_set_pcl_cmd() - Send WMI_SOC_SET_PCL_CMDID to FW 9876 * @wma_handle: WMA handle 9877 * @msg: PCL structure containing the PCL and the number of channels 9878 * 9879 * WMI_PDEV_SET_PCL_CMDID provides a Preferred Channel List (PCL) to the WLAN 9880 * firmware. The DBS Manager is the consumer of this information in the WLAN 9881 * firmware. The channel list will be used when a Virtual DEVice (VDEV) needs 9882 * to migrate to a new channel without host driver involvement. An example of 9883 * this behavior is Legacy Fast Roaming (LFR 3.0). Generally, the host will 9884 * manage the channel selection without firmware involvement. 9885 * 9886 * WMI_PDEV_SET_PCL_CMDID will carry only the weight list and not the actual 9887 * channel list. The weights corresponds to the channels sent in 9888 * WMI_SCAN_CHAN_LIST_CMDID. The channels from PCL would be having a higher 9889 * weightage compared to the non PCL channels. 9890 * 9891 * Return: Success if the cmd is sent successfully to the firmware 9892 */ 9893 QDF_STATUS wma_send_set_pcl_cmd(tp_wma_handle wma_handle, 9894 struct set_pcl_req *msg) 9895 { 9896 uint32_t i; 9897 QDF_STATUS status; 9898 bool is_channel_allowed; 9899 9900 if (wma_validate_handle(wma_handle)) 9901 return QDF_STATUS_E_NULL_VALUE; 9902 9903 /* 9904 * if vdev_id is WLAN_UMAC_VDEV_ID_MAX, then roaming is enabled on 9905 * only one sta, so PDEV PCL command needs to be sent. 9906 * If a valid vdev id is present, then vdev pcl command needs to be 9907 * sent. 9908 */ 9909 if (msg->vdev_id != WLAN_UMAC_VDEV_ID_MAX) 9910 return wlan_cm_roam_send_set_vdev_pcl(wma_handle->psoc, msg); 9911 9912 9913 wma_debug("RSO_CFG: BandCapability:%d, band_mask:%d", 9914 wma_handle->bandcapability, msg->band_mask); 9915 for (i = 0; i < wma_handle->saved_chan.num_channels; i++) { 9916 msg->chan_weights.saved_chan_list[i] = 9917 wma_handle->saved_chan.ch_freq_list[i]; 9918 } 9919 9920 msg->chan_weights.saved_num_chan = wma_handle->saved_chan.num_channels; 9921 9922 status = policy_mgr_get_valid_chan_weights(wma_handle->psoc, 9923 (struct policy_mgr_pcl_chan_weights *)&msg->chan_weights, 9924 PM_STA_MODE, NULL); 9925 if (QDF_IS_STATUS_ERROR(status)) { 9926 wma_err("Error in creating weighed pcl"); 9927 return status; 9928 } 9929 9930 for (i = 0; i < msg->chan_weights.saved_num_chan; i++) { 9931 msg->chan_weights.weighed_valid_list[i] = 9932 wma_map_pcl_weights( 9933 msg->chan_weights.weighed_valid_list[i]); 9934 9935 is_channel_allowed = 9936 policy_mgr_is_sta_chan_valid_for_connect_and_roam( 9937 wma_handle->pdev, 9938 msg->chan_weights.saved_chan_list[i]); 9939 if (!is_channel_allowed) { 9940 msg->chan_weights.weighed_valid_list[i] = 9941 WEIGHT_OF_DISALLOWED_CHANNELS; 9942 continue; 9943 } 9944 9945 if (msg->band_mask == 9946 (BIT(REG_BAND_2G) | BIT(REG_BAND_5G) | BIT(REG_BAND_6G))) 9947 continue; 9948 9949 /* 9950 * Dont allow roaming on 5G/6G band if only 2G band configured 9951 * as supported roam band mask 9952 */ 9953 if (((wma_handle->bandcapability == BAND_2G) || 9954 (msg->band_mask == BIT(REG_BAND_2G))) && 9955 !WLAN_REG_IS_24GHZ_CH_FREQ( 9956 msg->chan_weights.saved_chan_list[i])) { 9957 msg->chan_weights.weighed_valid_list[i] = 9958 WEIGHT_OF_DISALLOWED_CHANNELS; 9959 continue; 9960 } 9961 9962 /* 9963 * Dont allow roaming on 2G/6G band if only 5G band configured 9964 * as supported roam band mask 9965 */ 9966 if (((wma_handle->bandcapability == BAND_5G) || 9967 (msg->band_mask == BIT(REG_BAND_5G))) && 9968 !WLAN_REG_IS_5GHZ_CH_FREQ( 9969 msg->chan_weights.saved_chan_list[i])) { 9970 msg->chan_weights.weighed_valid_list[i] = 9971 WEIGHT_OF_DISALLOWED_CHANNELS; 9972 continue; 9973 } 9974 9975 /* 9976 * Dont allow roaming on 2G/5G band if only 6G band configured 9977 * as supported roam band mask 9978 */ 9979 if (msg->band_mask == BIT(REG_BAND_6G) && 9980 !WLAN_REG_IS_6GHZ_CHAN_FREQ( 9981 msg->chan_weights.saved_chan_list[i])) { 9982 msg->chan_weights.weighed_valid_list[i] = 9983 WEIGHT_OF_DISALLOWED_CHANNELS; 9984 continue; 9985 } 9986 9987 /* 9988 * Dont allow roaming on 6G band if only 2G + 5G band configured 9989 * as supported roam band mask. 9990 */ 9991 if (msg->band_mask == (BIT(REG_BAND_2G) | BIT(REG_BAND_5G)) && 9992 (WLAN_REG_IS_6GHZ_CHAN_FREQ( 9993 msg->chan_weights.saved_chan_list[i]))) { 9994 msg->chan_weights.weighed_valid_list[i] = 9995 WEIGHT_OF_DISALLOWED_CHANNELS; 9996 continue; 9997 } 9998 9999 /* 10000 * Dont allow roaming on 2G band if only 5G + 6G band configured 10001 * as supported roam band mask. 10002 */ 10003 if (msg->band_mask == (BIT(REG_BAND_5G) | BIT(REG_BAND_6G)) && 10004 (WLAN_REG_IS_24GHZ_CH_FREQ( 10005 msg->chan_weights.saved_chan_list[i]))) { 10006 msg->chan_weights.weighed_valid_list[i] = 10007 WEIGHT_OF_DISALLOWED_CHANNELS; 10008 continue; 10009 } 10010 10011 /* 10012 * Dont allow roaming on 5G band if only 2G + 6G band configured 10013 * as supported roam band mask. 10014 */ 10015 if (msg->band_mask == (BIT(REG_BAND_2G) | BIT(REG_BAND_6G)) && 10016 (WLAN_REG_IS_5GHZ_CH_FREQ( 10017 msg->chan_weights.saved_chan_list[i]))) { 10018 msg->chan_weights.weighed_valid_list[i] = 10019 WEIGHT_OF_DISALLOWED_CHANNELS; 10020 continue; 10021 } 10022 } 10023 10024 wma_debug("RSO_CFG: Dump PDEV PCL weights for vdev[%d]", msg->vdev_id); 10025 policy_mgr_dump_channel_list(msg->chan_weights.saved_num_chan, 10026 msg->chan_weights.saved_chan_list, 10027 msg->chan_weights.weighed_valid_list); 10028 10029 if (wmi_unified_pdev_set_pcl_cmd(wma_handle->wmi_handle, 10030 &msg->chan_weights)) 10031 return QDF_STATUS_E_FAILURE; 10032 10033 return QDF_STATUS_SUCCESS; 10034 } 10035 10036 /** 10037 * wma_send_pdev_set_hw_mode_cmd() - Send WMI_PDEV_SET_HW_MODE_CMDID to FW 10038 * @wma_handle: WMA handle 10039 * @msg: Structure containing the following parameters 10040 * 10041 * - hw_mode_index: The HW_Mode field is a enumerated type that is selected 10042 * from the HW_Mode table, which is returned in the WMI_SERVICE_READY_EVENTID. 10043 * 10044 * Provides notification to the WLAN firmware that host driver is requesting a 10045 * HardWare (HW) Mode change. This command is needed to support iHelium in the 10046 * configurations that include the Dual Band Simultaneous (DBS) feature. 10047 * 10048 * Return: Success if the cmd is sent successfully to the firmware 10049 */ 10050 QDF_STATUS wma_send_pdev_set_hw_mode_cmd(tp_wma_handle wma_handle, 10051 struct policy_mgr_hw_mode *msg) 10052 { 10053 struct sir_set_hw_mode_resp *param; 10054 struct wma_target_req *timeout_msg; 10055 10056 if (wma_validate_handle(wma_handle)) { 10057 /* Handle is NULL. Will not be able to send failure 10058 * response as well 10059 */ 10060 return QDF_STATUS_E_NULL_VALUE; 10061 } 10062 10063 if (!msg) { 10064 wma_err("Set HW mode param is NULL"); 10065 /* Lets try to free the active command list */ 10066 goto fail; 10067 } 10068 10069 wma_acquire_wakelock(&wma_handle->wmi_cmd_rsp_wake_lock, 10070 WMA_VDEV_HW_MODE_REQUEST_TIMEOUT); 10071 if (wmi_unified_soc_set_hw_mode_cmd(wma_handle->wmi_handle, 10072 msg->hw_mode_index)) { 10073 wma_release_wakelock(&wma_handle->wmi_cmd_rsp_wake_lock); 10074 goto fail; 10075 } 10076 timeout_msg = wma_fill_hold_req(wma_handle, 0, 10077 SIR_HAL_PDEV_SET_HW_MODE, 10078 WMA_PDEV_SET_HW_MODE_RESP, NULL, 10079 WMA_VDEV_HW_MODE_REQUEST_TIMEOUT - 1); 10080 if (!timeout_msg) { 10081 wma_err("Failed to allocate request for SIR_HAL_PDEV_SET_HW_MODE"); 10082 wma_remove_req(wma_handle, 0, WMA_PDEV_SET_HW_MODE_RESP); 10083 } 10084 10085 return QDF_STATUS_SUCCESS; 10086 fail: 10087 param = qdf_mem_malloc(sizeof(*param)); 10088 if (!param) 10089 return QDF_STATUS_E_NULL_VALUE; 10090 10091 param->status = SET_HW_MODE_STATUS_ECANCELED; 10092 param->cfgd_hw_mode_index = 0; 10093 param->num_vdev_mac_entries = 0; 10094 wma_debug("Sending HW mode fail response to LIM"); 10095 wma_send_msg(wma_handle, SIR_HAL_PDEV_SET_HW_MODE_RESP, 10096 (void *) param, 0); 10097 return QDF_STATUS_E_FAILURE; 10098 } 10099 10100 /** 10101 * wma_send_pdev_set_dual_mac_config() - Set dual mac config to FW 10102 * @wma_handle: WMA handle 10103 * @msg: Dual MAC config parameters 10104 * 10105 * Configures WLAN firmware with the dual MAC features 10106 * 10107 * Return: QDF_STATUS. 0 on success. 10108 */ 10109 QDF_STATUS wma_send_pdev_set_dual_mac_config(tp_wma_handle wma_handle, 10110 struct policy_mgr_dual_mac_config *msg) 10111 { 10112 QDF_STATUS status; 10113 struct wma_target_req *req_msg; 10114 struct sir_dual_mac_config_resp *resp; 10115 10116 if (wma_validate_handle(wma_handle)) 10117 return QDF_STATUS_E_NULL_VALUE; 10118 10119 if (!msg) { 10120 wma_err("Set dual mode config is NULL"); 10121 return QDF_STATUS_E_NULL_VALUE; 10122 } 10123 10124 req_msg = wma_fill_hold_req(wma_handle, 0, 10125 SIR_HAL_PDEV_DUAL_MAC_CFG_REQ, 10126 WMA_PDEV_MAC_CFG_RESP, NULL, 10127 WMA_VDEV_DUAL_MAC_CFG_TIMEOUT); 10128 if (!req_msg) { 10129 wma_err("Failed to allocate request for SIR_HAL_PDEV_DUAL_MAC_CFG_REQ"); 10130 return QDF_STATUS_E_FAILURE; 10131 } 10132 10133 /* 10134 * acquire the wake lock here and release it in response handler function 10135 * In error condition, release the wake lock right away 10136 */ 10137 wma_acquire_wakelock(&wma_handle->wmi_cmd_rsp_wake_lock, 10138 WMA_VDEV_PLCY_MGR_WAKE_LOCK_TIMEOUT); 10139 status = wmi_unified_pdev_set_dual_mac_config_cmd( 10140 wma_handle->wmi_handle, 10141 (struct policy_mgr_dual_mac_config *)msg); 10142 if (QDF_IS_STATUS_ERROR(status)) { 10143 wma_err("Failed to send WMI_PDEV_SET_DUAL_MAC_CONFIG_CMDID: %d", 10144 status); 10145 wma_release_wakelock(&wma_handle->wmi_cmd_rsp_wake_lock); 10146 wma_remove_req(wma_handle, 0, WMA_PDEV_MAC_CFG_RESP); 10147 goto fail; 10148 } 10149 policy_mgr_update_dbs_req_config(wma_handle->psoc, 10150 msg->scan_config, msg->fw_mode_config); 10151 10152 return QDF_STATUS_SUCCESS; 10153 10154 fail: 10155 resp = qdf_mem_malloc(sizeof(*resp)); 10156 if (!resp) 10157 return QDF_STATUS_E_NULL_VALUE; 10158 10159 resp->status = SET_HW_MODE_STATUS_ECANCELED; 10160 wma_debug("Sending failure response to LIM"); 10161 wma_send_msg(wma_handle, SIR_HAL_PDEV_MAC_CFG_RESP, (void *) resp, 0); 10162 10163 return QDF_STATUS_E_FAILURE; 10164 } 10165 10166 /** 10167 * wma_send_pdev_set_antenna_mode() - Set antenna mode to FW 10168 * @wma_handle: WMA handle 10169 * @msg: Antenna mode parameters 10170 * 10171 * Send WMI_PDEV_SET_ANTENNA_MODE_CMDID to FW requesting to 10172 * modify the number of TX/RX chains from host 10173 * 10174 * Return: QDF_STATUS. 0 on success. 10175 */ 10176 QDF_STATUS wma_send_pdev_set_antenna_mode(tp_wma_handle wma_handle, 10177 struct sir_antenna_mode_param *msg) 10178 { 10179 wmi_pdev_set_antenna_mode_cmd_fixed_param *cmd; 10180 wmi_buf_t buf; 10181 uint32_t len; 10182 QDF_STATUS status = QDF_STATUS_SUCCESS; 10183 struct sir_antenna_mode_resp *param; 10184 10185 if (wma_validate_handle(wma_handle)) 10186 return QDF_STATUS_E_NULL_VALUE; 10187 10188 if (!msg) { 10189 wma_err("Set antenna mode param is NULL"); 10190 return QDF_STATUS_E_NULL_VALUE; 10191 } 10192 10193 len = sizeof(*cmd); 10194 10195 buf = wmi_buf_alloc(wma_handle->wmi_handle, len); 10196 if (!buf) { 10197 status = QDF_STATUS_E_NOMEM; 10198 goto resp; 10199 } 10200 10201 cmd = (wmi_pdev_set_antenna_mode_cmd_fixed_param *) wmi_buf_data(buf); 10202 WMITLV_SET_HDR(&cmd->tlv_header, 10203 WMITLV_TAG_STRUC_wmi_pdev_set_antenna_mode_cmd_fixed_param, 10204 WMITLV_GET_STRUCT_TLVLEN( 10205 wmi_pdev_set_antenna_mode_cmd_fixed_param)); 10206 10207 cmd->pdev_id = OL_TXRX_PDEV_ID; 10208 /* Bits 0-15 is num of RX chains 16-31 is num of TX chains */ 10209 cmd->num_txrx_chains = msg->num_rx_chains; 10210 cmd->num_txrx_chains |= (msg->num_tx_chains << 16); 10211 10212 wma_debug("Num of chains TX: %d RX: %d txrx_chains: 0x%x", 10213 msg->num_tx_chains, 10214 msg->num_rx_chains, cmd->num_txrx_chains); 10215 10216 if (wmi_unified_cmd_send(wma_handle->wmi_handle, buf, len, 10217 WMI_PDEV_SET_ANTENNA_MODE_CMDID)) { 10218 wmi_buf_free(buf); 10219 status = QDF_STATUS_E_FAILURE; 10220 goto resp; 10221 } 10222 status = QDF_STATUS_SUCCESS; 10223 10224 resp: 10225 param = qdf_mem_malloc(sizeof(*param)); 10226 if (!param) 10227 return QDF_STATUS_E_NOMEM; 10228 10229 param->status = (status) ? 10230 SET_ANTENNA_MODE_STATUS_ECANCELED : 10231 SET_ANTENNA_MODE_STATUS_OK; 10232 wma_debug("Send antenna mode resp to LIM status: %d", 10233 param->status); 10234 wma_send_msg(wma_handle, SIR_HAL_SOC_ANTENNA_MODE_RESP, 10235 (void *) param, 0); 10236 return status; 10237 } 10238 10239 /** 10240 * wma_crash_inject() - sends command to FW to simulate crash 10241 * @wma_handle: pointer of WMA context 10242 * @type: subtype of the command 10243 * @delay_time_ms: time in milliseconds for FW to delay the crash 10244 * 10245 * This function will send a command to FW in order to simulate different 10246 * kinds of FW crashes. 10247 * 10248 * Return: QDF_STATUS_SUCCESS for success or error code 10249 */ 10250 QDF_STATUS wma_crash_inject(WMA_HANDLE wma_handle, uint32_t type, 10251 uint32_t delay_time_ms) 10252 { 10253 struct crash_inject param; 10254 tp_wma_handle wma = (tp_wma_handle)wma_handle; 10255 10256 param.type = type; 10257 param.delay_time_ms = delay_time_ms; 10258 return wmi_crash_inject(wma->wmi_handle, ¶m); 10259 } 10260 10261 QDF_STATUS wma_configure_smps_params(uint32_t vdev_id, uint32_t param_id, 10262 uint32_t param_val) 10263 { 10264 tp_wma_handle wma = cds_get_context(QDF_MODULE_ID_WMA); 10265 int smps_cmd_value; 10266 int status = QDF_STATUS_E_INVAL; 10267 10268 if (!wma) 10269 return status; 10270 10271 smps_cmd_value = param_id << WMI_SMPS_PARAM_VALUE_S; 10272 smps_cmd_value = smps_cmd_value | param_val; 10273 10274 status = wma_set_smps_params(wma, vdev_id, smps_cmd_value); 10275 if (status) 10276 wma_err("Failed to set SMPS Param"); 10277 10278 return status; 10279 } 10280 10281 10282 /** 10283 * wma_config_bmiss_bcnt_params() - set bmiss config parameters 10284 * @vdev_id: virtual device for the command 10285 * @first_cnt: bmiss first value 10286 * @final_cnt: bmiss final value 10287 * 10288 * Return: QDF_STATUS_SUCCESS or non-zero on failure 10289 */ 10290 QDF_STATUS wma_config_bmiss_bcnt_params(uint32_t vdev_id, uint32_t first_cnt, 10291 uint32_t final_cnt) 10292 { 10293 tp_wma_handle wma_handle = cds_get_context(QDF_MODULE_ID_WMA); 10294 int status = QDF_STATUS_E_INVAL; 10295 10296 if (!wma_handle) 10297 return status; 10298 10299 status = wma_roam_scan_bmiss_cnt(wma_handle, first_cnt, final_cnt, 10300 vdev_id); 10301 10302 if (status) 10303 wma_err("Failed to set Bmiss Param"); 10304 10305 return status; 10306 } 10307 10308 QDF_STATUS wma_get_rx_chainmask(uint8_t pdev_id, uint32_t *chainmask_2g, 10309 uint32_t *chainmask_5g) 10310 { 10311 struct wlan_psoc_host_mac_phy_caps *mac_phy_cap; 10312 uint8_t total_mac_phy_cnt, idx; 10313 struct target_psoc_info *tgt_hdl; 10314 uint32_t hw_mode_idx = 0, num_hw_modes = 0; 10315 10316 tp_wma_handle wma_handle = cds_get_context(QDF_MODULE_ID_WMA); 10317 if (!wma_handle) 10318 return QDF_STATUS_E_INVAL; 10319 10320 tgt_hdl = wlan_psoc_get_tgt_if_handle(wma_handle->psoc); 10321 if (!tgt_hdl) { 10322 wma_err("target psoc info is NULL"); 10323 return QDF_STATUS_E_INVAL; 10324 } 10325 10326 total_mac_phy_cnt = target_psoc_get_total_mac_phy_cnt(tgt_hdl); 10327 num_hw_modes = target_psoc_get_num_hw_modes(tgt_hdl); 10328 if (total_mac_phy_cnt <= pdev_id) { 10329 wma_err("mac phy cnt %d, pdev id %d", 10330 total_mac_phy_cnt, pdev_id); 10331 return QDF_STATUS_E_FAILURE; 10332 } 10333 10334 if ((wma_handle->new_hw_mode_index != WMA_DEFAULT_HW_MODE_INDEX) && 10335 (wma_handle->new_hw_mode_index <= num_hw_modes)) 10336 hw_mode_idx = wma_handle->new_hw_mode_index; 10337 mac_phy_cap = target_psoc_get_mac_phy_cap(tgt_hdl); 10338 if (!mac_phy_cap) { 10339 wma_err("Invalid MAC PHY capabilities handle"); 10340 return QDF_STATUS_E_FAILURE; 10341 } 10342 for (idx = 0; idx < total_mac_phy_cnt; idx++) { 10343 if (mac_phy_cap[idx].hw_mode_id != hw_mode_idx) 10344 continue; 10345 if (mac_phy_cap[idx].supported_bands & WLAN_2G_CAPABILITY) 10346 *chainmask_2g = mac_phy_cap[idx].rx_chain_mask_2G; 10347 if (mac_phy_cap[idx].supported_bands & WLAN_5G_CAPABILITY) 10348 *chainmask_5g = mac_phy_cap[idx].rx_chain_mask_5G; 10349 } 10350 wma_debug("pdev id: %d, hw_mode_idx: %d, rx chainmask 2g:%d, 5g:%d", 10351 pdev_id, hw_mode_idx, *chainmask_2g, *chainmask_5g); 10352 10353 return QDF_STATUS_SUCCESS; 10354 } 10355 10356 #ifdef FEATURE_ANI_LEVEL_REQUEST 10357 QDF_STATUS wma_send_ani_level_request(tp_wma_handle wma_handle, 10358 uint32_t *freqs, uint8_t num_freqs) 10359 { 10360 return wmi_unified_ani_level_cmd_send(wma_handle->wmi_handle, freqs, 10361 num_freqs); 10362 } 10363 #endif 10364