1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (c) 2015-2021, The Linux Foundation. All rights reserved. 4 * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved. 5 */ 6 7 #include <linux/module.h> 8 #include <linux/soc/qcom/qmi.h> 9 10 #include "bus.h" 11 #include "debug.h" 12 #include "main.h" 13 #include "qmi.h" 14 #include "genl.h" 15 16 #define WLFW_SERVICE_INS_ID_V01 1 17 #define WLFW_CLIENT_ID 0x4b4e454c 18 #define BDF_FILE_NAME_PREFIX "bdwlan" 19 #define ELF_BDF_FILE_NAME "bdwlan.elf" 20 #define ELF_BDF_FILE_NAME_GF "bdwlang.elf" 21 #define ELF_BDF_FILE_NAME_PREFIX "bdwlan.e" 22 #define ELF_BDF_FILE_NAME_GF_PREFIX "bdwlang.e" 23 #define BIN_BDF_FILE_NAME "bdwlan.bin" 24 #define BIN_BDF_FILE_NAME_GF "bdwlang.bin" 25 #define BIN_BDF_FILE_NAME_PREFIX "bdwlan.b" 26 #define BIN_BDF_FILE_NAME_GF_PREFIX "bdwlang.b" 27 #define REGDB_FILE_NAME "regdb.bin" 28 #define HDS_FILE_NAME "hds.bin" 29 #define CHIP_ID_GF_MASK 0x10 30 31 #define QDSS_TRACE_CONFIG_FILE "qdss_trace_config" 32 /* 33 * Download QDSS config file based on build type. Add build type string to 34 * file name. Download "qdss_trace_config_debug_v<n>.cfg" for debug build 35 * and "qdss_trace_config_perf_v<n>.cfg" for perf build. 36 */ 37 #ifdef CONFIG_CNSS2_DEBUG 38 #define QDSS_FILE_BUILD_STR "debug_" 39 #else 40 #define QDSS_FILE_BUILD_STR "perf_" 41 #endif 42 #define HW_V1_NUMBER "v1" 43 #define HW_V2_NUMBER "v2" 44 #define CE_MSI_NAME "CE" 45 46 #define QMI_WLFW_TIMEOUT_MS (plat_priv->ctrl_params.qmi_timeout) 47 #define QMI_WLFW_TIMEOUT_JF msecs_to_jiffies(QMI_WLFW_TIMEOUT_MS) 48 #define COEX_TIMEOUT QMI_WLFW_TIMEOUT_JF 49 #define IMS_TIMEOUT QMI_WLFW_TIMEOUT_JF 50 51 #define QMI_WLFW_MAX_RECV_BUF_SIZE SZ_8K 52 #define IMSPRIVATE_SERVICE_MAX_MSG_LEN SZ_8K 53 #define DMS_QMI_MAX_MSG_LEN SZ_256 54 #define MAX_SHADOW_REG_RESERVED 2 55 #define MAX_NUM_SHADOW_REG_V3 (QMI_WLFW_MAX_NUM_SHADOW_REG_V3_USAGE_V01 - \ 56 MAX_SHADOW_REG_RESERVED) 57 58 #define QMI_WLFW_MAC_READY_TIMEOUT_MS 50 59 #define QMI_WLFW_MAC_READY_MAX_RETRY 200 60 61 enum nm_modem_bit { 62 SLEEP_CLOCK_SELECT_INTERNAL_BIT = BIT(1), 63 HOST_CSTATE_BIT = BIT(2), 64 }; 65 66 #ifdef CONFIG_CNSS2_DEBUG 67 static bool ignore_qmi_failure; 68 #define CNSS_QMI_ASSERT() CNSS_ASSERT(ignore_qmi_failure) 69 void cnss_ignore_qmi_failure(bool ignore) 70 { 71 ignore_qmi_failure = ignore; 72 } 73 #else 74 #define CNSS_QMI_ASSERT() do { } while (0) 75 void cnss_ignore_qmi_failure(bool ignore) { } 76 #endif 77 78 static char *cnss_qmi_mode_to_str(enum cnss_driver_mode mode) 79 { 80 switch (mode) { 81 case CNSS_MISSION: 82 return "MISSION"; 83 case CNSS_FTM: 84 return "FTM"; 85 case CNSS_EPPING: 86 return "EPPING"; 87 case CNSS_WALTEST: 88 return "WALTEST"; 89 case CNSS_OFF: 90 return "OFF"; 91 case CNSS_CCPM: 92 return "CCPM"; 93 case CNSS_QVIT: 94 return "QVIT"; 95 case CNSS_CALIBRATION: 96 return "CALIBRATION"; 97 default: 98 return "UNKNOWN"; 99 } 100 } 101 102 static int qmi_send_wait(struct qmi_handle *qmi, void *req, void *rsp, 103 struct qmi_elem_info *req_ei, 104 struct qmi_elem_info *rsp_ei, 105 int req_id, size_t req_len, 106 unsigned long timeout) 107 { 108 struct qmi_txn txn; 109 int ret; 110 char *err_msg; 111 struct qmi_response_type_v01 *resp = rsp; 112 113 ret = qmi_txn_init(qmi, &txn, rsp_ei, rsp); 114 if (ret < 0) { 115 err_msg = "Qmi fail: fail to init txn,"; 116 goto out; 117 } 118 119 ret = qmi_send_request(qmi, NULL, &txn, req_id, 120 req_len, req_ei, req); 121 if (ret < 0) { 122 qmi_txn_cancel(&txn); 123 err_msg = "Qmi fail: fail to send req,"; 124 goto out; 125 } 126 127 ret = qmi_txn_wait(&txn, timeout); 128 if (ret < 0) { 129 err_msg = "Qmi fail: wait timeout,"; 130 goto out; 131 } else if (resp->result != QMI_RESULT_SUCCESS_V01) { 132 err_msg = "Qmi fail: request rejected,"; 133 cnss_pr_err("Qmi fail: respons with error:%d\n", 134 resp->error); 135 ret = -resp->result; 136 goto out; 137 } 138 139 cnss_pr_dbg("req %x success\n", req_id); 140 return 0; 141 out: 142 cnss_pr_err("%s req %x, ret %d\n", err_msg, req_id, ret); 143 return ret; 144 } 145 146 static int cnss_wlfw_ind_register_send_sync(struct cnss_plat_data *plat_priv) 147 { 148 struct wlfw_ind_register_req_msg_v01 *req; 149 struct wlfw_ind_register_resp_msg_v01 *resp; 150 struct qmi_txn txn; 151 int ret = 0; 152 153 cnss_pr_dbg("Sending indication register message, state: 0x%lx\n", 154 plat_priv->driver_state); 155 156 req = kzalloc(sizeof(*req), GFP_KERNEL); 157 if (!req) 158 return -ENOMEM; 159 160 resp = kzalloc(sizeof(*resp), GFP_KERNEL); 161 if (!resp) { 162 kfree(req); 163 return -ENOMEM; 164 } 165 166 req->client_id_valid = 1; 167 req->client_id = WLFW_CLIENT_ID; 168 req->request_mem_enable_valid = 1; 169 req->request_mem_enable = 1; 170 req->fw_mem_ready_enable_valid = 1; 171 req->fw_mem_ready_enable = 1; 172 /* fw_ready indication is replaced by fw_init_done in HST/HSP */ 173 req->fw_init_done_enable_valid = 1; 174 req->fw_init_done_enable = 1; 175 req->pin_connect_result_enable_valid = 1; 176 req->pin_connect_result_enable = 1; 177 req->cal_done_enable_valid = 1; 178 req->cal_done_enable = 1; 179 req->qdss_trace_req_mem_enable_valid = 1; 180 req->qdss_trace_req_mem_enable = 1; 181 req->qdss_trace_save_enable_valid = 1; 182 req->qdss_trace_save_enable = 1; 183 req->qdss_trace_free_enable_valid = 1; 184 req->qdss_trace_free_enable = 1; 185 req->respond_get_info_enable_valid = 1; 186 req->respond_get_info_enable = 1; 187 req->wfc_call_twt_config_enable_valid = 1; 188 req->wfc_call_twt_config_enable = 1; 189 190 ret = qmi_txn_init(&plat_priv->qmi_wlfw, &txn, 191 wlfw_ind_register_resp_msg_v01_ei, resp); 192 if (ret < 0) { 193 cnss_pr_err("Failed to initialize txn for indication register request, err: %d\n", 194 ret); 195 goto out; 196 } 197 198 ret = qmi_send_request(&plat_priv->qmi_wlfw, NULL, &txn, 199 QMI_WLFW_IND_REGISTER_REQ_V01, 200 WLFW_IND_REGISTER_REQ_MSG_V01_MAX_MSG_LEN, 201 wlfw_ind_register_req_msg_v01_ei, req); 202 if (ret < 0) { 203 qmi_txn_cancel(&txn); 204 cnss_pr_err("Failed to send indication register request, err: %d\n", 205 ret); 206 goto out; 207 } 208 209 ret = qmi_txn_wait(&txn, QMI_WLFW_TIMEOUT_JF); 210 if (ret < 0) { 211 cnss_pr_err("Failed to wait for response of indication register request, err: %d\n", 212 ret); 213 goto out; 214 } 215 216 if (resp->resp.result != QMI_RESULT_SUCCESS_V01) { 217 cnss_pr_err("Indication register request failed, result: %d, err: %d\n", 218 resp->resp.result, resp->resp.error); 219 ret = -resp->resp.result; 220 goto out; 221 } 222 223 if (resp->fw_status_valid) { 224 if (resp->fw_status & QMI_WLFW_ALREADY_REGISTERED_V01) { 225 ret = -EALREADY; 226 goto qmi_registered; 227 } 228 } 229 230 kfree(req); 231 kfree(resp); 232 return 0; 233 234 out: 235 CNSS_QMI_ASSERT(); 236 237 qmi_registered: 238 kfree(req); 239 kfree(resp); 240 return ret; 241 } 242 243 static void cnss_wlfw_host_cap_parse_mlo(struct cnss_plat_data *plat_priv, 244 struct wlfw_host_cap_req_msg_v01 *req) 245 { 246 if (plat_priv->device_id == KIWI_DEVICE_ID || 247 plat_priv->device_id == MANGO_DEVICE_ID || 248 plat_priv->device_id == PEACH_DEVICE_ID) { 249 req->mlo_capable_valid = 1; 250 req->mlo_capable = 1; 251 req->mlo_chip_id_valid = 1; 252 req->mlo_chip_id = 0; 253 req->mlo_group_id_valid = 1; 254 req->mlo_group_id = 0; 255 req->max_mlo_peer_valid = 1; 256 /* Max peer number generally won't change for the same device 257 * but needs to be synced with host driver. 258 */ 259 req->max_mlo_peer = 32; 260 req->mlo_num_chips_valid = 1; 261 req->mlo_num_chips = 1; 262 req->mlo_chip_info_valid = 1; 263 req->mlo_chip_info[0].chip_id = 0; 264 req->mlo_chip_info[0].num_local_links = 2; 265 req->mlo_chip_info[0].hw_link_id[0] = 0; 266 req->mlo_chip_info[0].hw_link_id[1] = 1; 267 req->mlo_chip_info[0].valid_mlo_link_id[0] = 1; 268 req->mlo_chip_info[0].valid_mlo_link_id[1] = 1; 269 } 270 } 271 272 static int cnss_wlfw_host_cap_send_sync(struct cnss_plat_data *plat_priv) 273 { 274 struct wlfw_host_cap_req_msg_v01 *req; 275 struct wlfw_host_cap_resp_msg_v01 *resp; 276 struct qmi_txn txn; 277 int ret = 0; 278 u64 iova_start = 0, iova_size = 0, 279 iova_ipa_start = 0, iova_ipa_size = 0; 280 u64 feature_list = 0; 281 282 cnss_pr_dbg("Sending host capability message, state: 0x%lx\n", 283 plat_priv->driver_state); 284 285 req = kzalloc(sizeof(*req), GFP_KERNEL); 286 if (!req) 287 return -ENOMEM; 288 289 resp = kzalloc(sizeof(*resp), GFP_KERNEL); 290 if (!resp) { 291 kfree(req); 292 return -ENOMEM; 293 } 294 295 req->num_clients_valid = 1; 296 req->num_clients = 1; 297 cnss_pr_dbg("Number of clients is %d\n", req->num_clients); 298 299 req->wake_msi = cnss_bus_get_wake_irq(plat_priv); 300 if (req->wake_msi) { 301 cnss_pr_dbg("WAKE MSI base data is %d\n", req->wake_msi); 302 req->wake_msi_valid = 1; 303 } 304 305 req->bdf_support_valid = 1; 306 req->bdf_support = 1; 307 308 req->m3_support_valid = 1; 309 req->m3_support = 1; 310 311 req->m3_cache_support_valid = 1; 312 req->m3_cache_support = 1; 313 314 req->cal_done_valid = 1; 315 req->cal_done = plat_priv->cal_done; 316 cnss_pr_dbg("Calibration done is %d\n", plat_priv->cal_done); 317 318 if (plat_priv->sleep_clk) { 319 req->nm_modem_valid = 1; 320 /* Notify firmware about the sleep clock selection, 321 * nm_modem_bit[1] is used for this purpose. 322 */ 323 req->nm_modem |= SLEEP_CLOCK_SELECT_INTERNAL_BIT; 324 } 325 326 if (plat_priv->supported_link_speed) { 327 req->pcie_link_info_valid = 1; 328 req->pcie_link_info.pci_link_speed = 329 plat_priv->supported_link_speed; 330 cnss_pr_dbg("Supported link speed in Host Cap %d\n", 331 plat_priv->supported_link_speed); 332 } 333 334 if (cnss_bus_is_smmu_s1_enabled(plat_priv) && 335 !cnss_bus_get_iova(plat_priv, &iova_start, &iova_size) && 336 !cnss_bus_get_iova_ipa(plat_priv, &iova_ipa_start, 337 &iova_ipa_size)) { 338 req->ddr_range_valid = 1; 339 req->ddr_range[0].start = iova_start; 340 req->ddr_range[0].size = iova_size + iova_ipa_size; 341 cnss_pr_dbg("Sending iova starting 0x%llx with size 0x%llx\n", 342 req->ddr_range[0].start, req->ddr_range[0].size); 343 } 344 345 req->host_build_type_valid = 1; 346 req->host_build_type = cnss_get_host_build_type(); 347 348 cnss_wlfw_host_cap_parse_mlo(plat_priv, req); 349 350 ret = cnss_get_feature_list(plat_priv, &feature_list); 351 if (!ret) { 352 req->feature_list_valid = 1; 353 req->feature_list = feature_list; 354 cnss_pr_dbg("Sending feature list 0x%llx\n", 355 req->feature_list); 356 } 357 358 if (cnss_get_platform_name(plat_priv, req->platform_name, 359 QMI_WLFW_MAX_PLATFORM_NAME_LEN_V01)) 360 req->platform_name_valid = 1; 361 362 ret = qmi_txn_init(&plat_priv->qmi_wlfw, &txn, 363 wlfw_host_cap_resp_msg_v01_ei, resp); 364 if (ret < 0) { 365 cnss_pr_err("Failed to initialize txn for host capability request, err: %d\n", 366 ret); 367 goto out; 368 } 369 370 ret = qmi_send_request(&plat_priv->qmi_wlfw, NULL, &txn, 371 QMI_WLFW_HOST_CAP_REQ_V01, 372 WLFW_HOST_CAP_REQ_MSG_V01_MAX_MSG_LEN, 373 wlfw_host_cap_req_msg_v01_ei, req); 374 if (ret < 0) { 375 qmi_txn_cancel(&txn); 376 cnss_pr_err("Failed to send host capability request, err: %d\n", 377 ret); 378 goto out; 379 } 380 381 ret = qmi_txn_wait(&txn, QMI_WLFW_TIMEOUT_JF); 382 if (ret < 0) { 383 cnss_pr_err("Failed to wait for response of host capability request, err: %d\n", 384 ret); 385 goto out; 386 } 387 388 if (resp->resp.result != QMI_RESULT_SUCCESS_V01) { 389 cnss_pr_err("Host capability request failed, result: %d, err: %d\n", 390 resp->resp.result, resp->resp.error); 391 ret = -resp->resp.result; 392 goto out; 393 } 394 395 kfree(req); 396 kfree(resp); 397 return 0; 398 399 out: 400 CNSS_QMI_ASSERT(); 401 kfree(req); 402 kfree(resp); 403 return ret; 404 } 405 406 int cnss_wlfw_respond_mem_send_sync(struct cnss_plat_data *plat_priv) 407 { 408 struct wlfw_respond_mem_req_msg_v01 *req; 409 struct wlfw_respond_mem_resp_msg_v01 *resp; 410 struct qmi_txn txn; 411 struct cnss_fw_mem *fw_mem = plat_priv->fw_mem; 412 int ret = 0, i; 413 414 cnss_pr_dbg("Sending respond memory message, state: 0x%lx\n", 415 plat_priv->driver_state); 416 417 req = kzalloc(sizeof(*req), GFP_KERNEL); 418 if (!req) 419 return -ENOMEM; 420 421 resp = kzalloc(sizeof(*resp), GFP_KERNEL); 422 if (!resp) { 423 kfree(req); 424 return -ENOMEM; 425 } 426 427 if (plat_priv->fw_mem_seg_len > QMI_WLFW_MAX_NUM_MEM_SEG_V01) { 428 cnss_pr_err("Invalid seg len %u\n", plat_priv->fw_mem_seg_len); 429 ret = -EINVAL; 430 goto out; 431 } 432 433 req->mem_seg_len = plat_priv->fw_mem_seg_len; 434 for (i = 0; i < req->mem_seg_len; i++) { 435 if (!fw_mem[i].pa || !fw_mem[i].size) { 436 if (fw_mem[i].type == 0) { 437 cnss_pr_err("Invalid memory for FW type, segment = %d\n", 438 i); 439 ret = -EINVAL; 440 goto out; 441 } 442 cnss_pr_err("Memory for FW is not available for type: %u\n", 443 fw_mem[i].type); 444 ret = -ENOMEM; 445 goto out; 446 } 447 448 cnss_pr_dbg("Memory for FW, va: 0x%pK, pa: %pa, size: 0x%zx, type: %u\n", 449 fw_mem[i].va, &fw_mem[i].pa, 450 fw_mem[i].size, fw_mem[i].type); 451 452 req->mem_seg[i].addr = fw_mem[i].pa; 453 req->mem_seg[i].size = fw_mem[i].size; 454 req->mem_seg[i].type = fw_mem[i].type; 455 } 456 457 ret = qmi_txn_init(&plat_priv->qmi_wlfw, &txn, 458 wlfw_respond_mem_resp_msg_v01_ei, resp); 459 if (ret < 0) { 460 cnss_pr_err("Failed to initialize txn for respond memory request, err: %d\n", 461 ret); 462 goto out; 463 } 464 465 ret = qmi_send_request(&plat_priv->qmi_wlfw, NULL, &txn, 466 QMI_WLFW_RESPOND_MEM_REQ_V01, 467 WLFW_RESPOND_MEM_REQ_MSG_V01_MAX_MSG_LEN, 468 wlfw_respond_mem_req_msg_v01_ei, req); 469 if (ret < 0) { 470 qmi_txn_cancel(&txn); 471 cnss_pr_err("Failed to send respond memory request, err: %d\n", 472 ret); 473 goto out; 474 } 475 476 ret = qmi_txn_wait(&txn, QMI_WLFW_TIMEOUT_JF); 477 if (ret < 0) { 478 cnss_pr_err("Failed to wait for response of respond memory request, err: %d\n", 479 ret); 480 goto out; 481 } 482 483 if (resp->resp.result != QMI_RESULT_SUCCESS_V01) { 484 cnss_pr_err("Respond memory request failed, result: %d, err: %d\n", 485 resp->resp.result, resp->resp.error); 486 ret = -resp->resp.result; 487 goto out; 488 } 489 490 kfree(req); 491 kfree(resp); 492 return 0; 493 494 out: 495 CNSS_QMI_ASSERT(); 496 kfree(req); 497 kfree(resp); 498 return ret; 499 } 500 501 int cnss_wlfw_tgt_cap_send_sync(struct cnss_plat_data *plat_priv) 502 { 503 struct wlfw_cap_req_msg_v01 *req; 504 struct wlfw_cap_resp_msg_v01 *resp; 505 struct qmi_txn txn; 506 char *fw_build_timestamp; 507 int ret = 0, i; 508 509 cnss_pr_dbg("Sending target capability message, state: 0x%lx\n", 510 plat_priv->driver_state); 511 512 req = kzalloc(sizeof(*req), GFP_KERNEL); 513 if (!req) 514 return -ENOMEM; 515 516 resp = kzalloc(sizeof(*resp), GFP_KERNEL); 517 if (!resp) { 518 kfree(req); 519 return -ENOMEM; 520 } 521 522 ret = qmi_txn_init(&plat_priv->qmi_wlfw, &txn, 523 wlfw_cap_resp_msg_v01_ei, resp); 524 if (ret < 0) { 525 cnss_pr_err("Failed to initialize txn for target capability request, err: %d\n", 526 ret); 527 goto out; 528 } 529 530 ret = qmi_send_request(&plat_priv->qmi_wlfw, NULL, &txn, 531 QMI_WLFW_CAP_REQ_V01, 532 WLFW_CAP_REQ_MSG_V01_MAX_MSG_LEN, 533 wlfw_cap_req_msg_v01_ei, req); 534 if (ret < 0) { 535 qmi_txn_cancel(&txn); 536 cnss_pr_err("Failed to send respond target capability request, err: %d\n", 537 ret); 538 goto out; 539 } 540 541 ret = qmi_txn_wait(&txn, QMI_WLFW_TIMEOUT_JF); 542 if (ret < 0) { 543 cnss_pr_err("Failed to wait for response of target capability request, err: %d\n", 544 ret); 545 goto out; 546 } 547 548 if (resp->resp.result != QMI_RESULT_SUCCESS_V01) { 549 cnss_pr_err("Target capability request failed, result: %d, err: %d\n", 550 resp->resp.result, resp->resp.error); 551 ret = -resp->resp.result; 552 goto out; 553 } 554 555 if (resp->chip_info_valid) { 556 plat_priv->chip_info.chip_id = resp->chip_info.chip_id; 557 plat_priv->chip_info.chip_family = resp->chip_info.chip_family; 558 } 559 if (resp->board_info_valid) 560 plat_priv->board_info.board_id = resp->board_info.board_id; 561 else 562 plat_priv->board_info.board_id = 0xFF; 563 if (resp->soc_info_valid) 564 plat_priv->soc_info.soc_id = resp->soc_info.soc_id; 565 if (resp->fw_version_info_valid) { 566 plat_priv->fw_version_info.fw_version = 567 resp->fw_version_info.fw_version; 568 fw_build_timestamp = resp->fw_version_info.fw_build_timestamp; 569 fw_build_timestamp[QMI_WLFW_MAX_TIMESTAMP_LEN] = '\0'; 570 strlcpy(plat_priv->fw_version_info.fw_build_timestamp, 571 resp->fw_version_info.fw_build_timestamp, 572 QMI_WLFW_MAX_TIMESTAMP_LEN + 1); 573 } 574 if (resp->fw_build_id_valid) { 575 resp->fw_build_id[QMI_WLFW_MAX_BUILD_ID_LEN] = '\0'; 576 strlcpy(plat_priv->fw_build_id, resp->fw_build_id, 577 QMI_WLFW_MAX_BUILD_ID_LEN + 1); 578 } 579 /* FW will send aop retention volatage for qca6490 */ 580 if (resp->voltage_mv_valid) { 581 plat_priv->cpr_info.voltage = resp->voltage_mv; 582 cnss_pr_dbg("Voltage for CPR: %dmV\n", 583 plat_priv->cpr_info.voltage); 584 cnss_update_cpr_info(plat_priv); 585 } 586 if (resp->time_freq_hz_valid) { 587 plat_priv->device_freq_hz = resp->time_freq_hz; 588 cnss_pr_dbg("Device frequency is %d HZ\n", 589 plat_priv->device_freq_hz); 590 } 591 if (resp->otp_version_valid) 592 plat_priv->otp_version = resp->otp_version; 593 if (resp->dev_mem_info_valid) { 594 for (i = 0; i < QMI_WLFW_MAX_DEV_MEM_NUM_V01; i++) { 595 plat_priv->dev_mem_info[i].start = 596 resp->dev_mem_info[i].start; 597 plat_priv->dev_mem_info[i].size = 598 resp->dev_mem_info[i].size; 599 cnss_pr_buf("Device memory info[%d]: start = 0x%llx, size = 0x%llx\n", 600 i, plat_priv->dev_mem_info[i].start, 601 plat_priv->dev_mem_info[i].size); 602 } 603 } 604 if (resp->fw_caps_valid) { 605 plat_priv->fw_pcie_gen_switch = 606 !!(resp->fw_caps & QMI_WLFW_HOST_PCIE_GEN_SWITCH_V01); 607 plat_priv->fw_aux_uc_support = 608 !!(resp->fw_caps & QMI_WLFW_AUX_UC_SUPPORT_V01); 609 cnss_pr_dbg("FW aux uc support capability: %d\n", 610 plat_priv->fw_aux_uc_support); 611 plat_priv->fw_caps = resp->fw_caps; 612 } 613 614 if (resp->hang_data_length_valid && 615 resp->hang_data_length && 616 resp->hang_data_length <= WLFW_MAX_HANG_EVENT_DATA_SIZE) 617 plat_priv->hang_event_data_len = resp->hang_data_length; 618 else 619 plat_priv->hang_event_data_len = 0; 620 621 if (resp->hang_data_addr_offset_valid) 622 plat_priv->hang_data_addr_offset = resp->hang_data_addr_offset; 623 else 624 plat_priv->hang_data_addr_offset = 0; 625 626 if (resp->hwid_bitmap_valid) 627 plat_priv->hwid_bitmap = resp->hwid_bitmap; 628 629 if (resp->ol_cpr_cfg_valid) 630 cnss_aop_ol_cpr_cfg_setup(plat_priv, &resp->ol_cpr_cfg); 631 632 /* Disable WLAN PDC in AOP firmware for boards which support on chip PMIC 633 * so AOP will ignore SW_CTRL changes and do not update regulator votes. 634 **/ 635 for (i = 0; i < plat_priv->on_chip_pmic_devices_count; i++) { 636 if (plat_priv->board_info.board_id == 637 plat_priv->on_chip_pmic_board_ids[i]) { 638 cnss_pr_dbg("Disabling WLAN PDC for board_id: %02x\n", 639 plat_priv->board_info.board_id); 640 ret = cnss_aop_send_msg(plat_priv, 641 "{class: wlan_pdc, ss: rf, res: pdc, enable: 0}"); 642 if (ret < 0) 643 cnss_pr_dbg("Failed to Send AOP Msg"); 644 break; 645 } 646 } 647 648 cnss_pr_dbg("Target capability: chip_id: 0x%x, chip_family: 0x%x, board_id: 0x%x, soc_id: 0x%x, otp_version: 0x%x\n", 649 plat_priv->chip_info.chip_id, 650 plat_priv->chip_info.chip_family, 651 plat_priv->board_info.board_id, plat_priv->soc_info.soc_id, 652 plat_priv->otp_version); 653 cnss_pr_dbg("fw_version: 0x%x, fw_build_timestamp: %s, fw_build_id: %s, hwid_bitmap:0x%x\n", 654 plat_priv->fw_version_info.fw_version, 655 plat_priv->fw_version_info.fw_build_timestamp, 656 plat_priv->fw_build_id, 657 plat_priv->hwid_bitmap); 658 cnss_pr_dbg("Hang event params, Length: 0x%x, Offset Address: 0x%x\n", 659 plat_priv->hang_event_data_len, 660 plat_priv->hang_data_addr_offset); 661 662 kfree(req); 663 kfree(resp); 664 return 0; 665 666 out: 667 CNSS_QMI_ASSERT(); 668 kfree(req); 669 kfree(resp); 670 return ret; 671 } 672 673 static char *cnss_bdf_type_to_str(enum cnss_bdf_type bdf_type) 674 { 675 switch (bdf_type) { 676 case CNSS_BDF_BIN: 677 case CNSS_BDF_ELF: 678 return "BDF"; 679 case CNSS_BDF_REGDB: 680 return "REGDB"; 681 case CNSS_BDF_HDS: 682 return "HDS"; 683 default: 684 return "UNKNOWN"; 685 } 686 } 687 688 static int cnss_get_bdf_file_name(struct cnss_plat_data *plat_priv, 689 u32 bdf_type, char *filename, 690 u32 filename_len) 691 { 692 char filename_tmp[MAX_FIRMWARE_NAME_LEN]; 693 int ret = 0; 694 695 switch (bdf_type) { 696 case CNSS_BDF_ELF: 697 /* Board ID will be equal or less than 0xFF in GF mask case */ 698 if (plat_priv->board_info.board_id == 0xFF) { 699 if (plat_priv->chip_info.chip_id & CHIP_ID_GF_MASK) 700 snprintf(filename_tmp, filename_len, 701 ELF_BDF_FILE_NAME_GF); 702 else 703 snprintf(filename_tmp, filename_len, 704 ELF_BDF_FILE_NAME); 705 } else if (plat_priv->board_info.board_id < 0xFF) { 706 if (plat_priv->chip_info.chip_id & CHIP_ID_GF_MASK) 707 snprintf(filename_tmp, filename_len, 708 ELF_BDF_FILE_NAME_GF_PREFIX "%02x", 709 plat_priv->board_info.board_id); 710 else 711 snprintf(filename_tmp, filename_len, 712 ELF_BDF_FILE_NAME_PREFIX "%02x", 713 plat_priv->board_info.board_id); 714 } else { 715 snprintf(filename_tmp, filename_len, 716 BDF_FILE_NAME_PREFIX "%02x.e%02x", 717 plat_priv->board_info.board_id >> 8 & 0xFF, 718 plat_priv->board_info.board_id & 0xFF); 719 } 720 break; 721 case CNSS_BDF_BIN: 722 if (plat_priv->board_info.board_id == 0xFF) { 723 if (plat_priv->chip_info.chip_id & CHIP_ID_GF_MASK) 724 snprintf(filename_tmp, filename_len, 725 BIN_BDF_FILE_NAME_GF); 726 else 727 snprintf(filename_tmp, filename_len, 728 BIN_BDF_FILE_NAME); 729 } else if (plat_priv->board_info.board_id < 0xFF) { 730 if (plat_priv->chip_info.chip_id & CHIP_ID_GF_MASK) 731 snprintf(filename_tmp, filename_len, 732 BIN_BDF_FILE_NAME_GF_PREFIX "%02x", 733 plat_priv->board_info.board_id); 734 else 735 snprintf(filename_tmp, filename_len, 736 BIN_BDF_FILE_NAME_PREFIX "%02x", 737 plat_priv->board_info.board_id); 738 } else { 739 snprintf(filename_tmp, filename_len, 740 BDF_FILE_NAME_PREFIX "%02x.b%02x", 741 plat_priv->board_info.board_id >> 8 & 0xFF, 742 plat_priv->board_info.board_id & 0xFF); 743 } 744 break; 745 case CNSS_BDF_REGDB: 746 snprintf(filename_tmp, filename_len, REGDB_FILE_NAME); 747 break; 748 case CNSS_BDF_HDS: 749 snprintf(filename_tmp, filename_len, HDS_FILE_NAME); 750 break; 751 default: 752 cnss_pr_err("Invalid BDF type: %d\n", 753 plat_priv->ctrl_params.bdf_type); 754 ret = -EINVAL; 755 break; 756 } 757 758 if (!ret) 759 cnss_bus_add_fw_prefix_name(plat_priv, filename, filename_tmp); 760 761 return ret; 762 } 763 764 int cnss_wlfw_bdf_dnld_send_sync(struct cnss_plat_data *plat_priv, 765 u32 bdf_type) 766 { 767 struct wlfw_bdf_download_req_msg_v01 *req; 768 struct wlfw_bdf_download_resp_msg_v01 *resp; 769 struct qmi_txn txn; 770 char filename[MAX_FIRMWARE_NAME_LEN]; 771 const struct firmware *fw_entry = NULL; 772 const u8 *temp; 773 unsigned int remaining; 774 int ret = 0; 775 776 cnss_pr_dbg("Sending QMI_WLFW_BDF_DOWNLOAD_REQ_V01 message for bdf_type: %d (%s), state: 0x%lx\n", 777 bdf_type, cnss_bdf_type_to_str(bdf_type), plat_priv->driver_state); 778 779 req = kzalloc(sizeof(*req), GFP_KERNEL); 780 if (!req) 781 return -ENOMEM; 782 783 resp = kzalloc(sizeof(*resp), GFP_KERNEL); 784 if (!resp) { 785 kfree(req); 786 return -ENOMEM; 787 } 788 789 ret = cnss_get_bdf_file_name(plat_priv, bdf_type, 790 filename, sizeof(filename)); 791 if (ret) 792 goto err_req_fw; 793 794 cnss_pr_dbg("Invoke firmware_request_nowarn for %s\n", filename); 795 if (bdf_type == CNSS_BDF_REGDB) 796 ret = cnss_request_firmware_direct(plat_priv, &fw_entry, 797 filename); 798 else 799 ret = firmware_request_nowarn(&fw_entry, filename, 800 &plat_priv->plat_dev->dev); 801 802 if (ret) { 803 cnss_pr_err("Failed to load %s: %s, ret: %d\n", 804 cnss_bdf_type_to_str(bdf_type), filename, ret); 805 goto err_req_fw; 806 } 807 808 temp = fw_entry->data; 809 remaining = fw_entry->size; 810 811 cnss_pr_dbg("Downloading %s: %s, size: %u\n", 812 cnss_bdf_type_to_str(bdf_type), filename, remaining); 813 814 while (remaining) { 815 req->valid = 1; 816 req->file_id_valid = 1; 817 req->file_id = plat_priv->board_info.board_id; 818 req->total_size_valid = 1; 819 req->total_size = remaining; 820 req->seg_id_valid = 1; 821 req->data_valid = 1; 822 req->end_valid = 1; 823 req->bdf_type_valid = 1; 824 req->bdf_type = bdf_type; 825 826 if (remaining > QMI_WLFW_MAX_DATA_SIZE_V01) { 827 req->data_len = QMI_WLFW_MAX_DATA_SIZE_V01; 828 } else { 829 req->data_len = remaining; 830 req->end = 1; 831 } 832 833 memcpy(req->data, temp, req->data_len); 834 835 ret = qmi_txn_init(&plat_priv->qmi_wlfw, &txn, 836 wlfw_bdf_download_resp_msg_v01_ei, resp); 837 if (ret < 0) { 838 cnss_pr_err("Failed to initialize txn for QMI_WLFW_BDF_DOWNLOAD_REQ_V01 request for %s, error: %d\n", 839 cnss_bdf_type_to_str(bdf_type), ret); 840 goto err_send; 841 } 842 843 ret = qmi_send_request 844 (&plat_priv->qmi_wlfw, NULL, &txn, 845 QMI_WLFW_BDF_DOWNLOAD_REQ_V01, 846 WLFW_BDF_DOWNLOAD_REQ_MSG_V01_MAX_MSG_LEN, 847 wlfw_bdf_download_req_msg_v01_ei, req); 848 if (ret < 0) { 849 qmi_txn_cancel(&txn); 850 cnss_pr_err("Failed to send QMI_WLFW_BDF_DOWNLOAD_REQ_V01 request for %s, error: %d\n", 851 cnss_bdf_type_to_str(bdf_type), ret); 852 goto err_send; 853 } 854 855 ret = qmi_txn_wait(&txn, QMI_WLFW_TIMEOUT_JF); 856 if (ret < 0) { 857 cnss_pr_err("Timeout while waiting for FW response for QMI_WLFW_BDF_DOWNLOAD_REQ_V01 request for %s, err: %d\n", 858 cnss_bdf_type_to_str(bdf_type), ret); 859 goto err_send; 860 } 861 862 if (resp->resp.result != QMI_RESULT_SUCCESS_V01) { 863 cnss_pr_err("FW response for QMI_WLFW_BDF_DOWNLOAD_REQ_V01 request for %s failed, result: %d, err: %d\n", 864 cnss_bdf_type_to_str(bdf_type), resp->resp.result, 865 resp->resp.error); 866 ret = -resp->resp.result; 867 goto err_send; 868 } 869 870 remaining -= req->data_len; 871 temp += req->data_len; 872 req->seg_id++; 873 } 874 875 release_firmware(fw_entry); 876 877 if (resp->host_bdf_data_valid) { 878 /* QCA6490 enable S3E regulator for IPA configuration only */ 879 if (!(resp->host_bdf_data & QMI_WLFW_HW_XPA_V01)) 880 cnss_enable_int_pow_amp_vreg(plat_priv); 881 882 plat_priv->cbc_file_download = 883 resp->host_bdf_data & QMI_WLFW_CBC_FILE_DOWNLOAD_V01; 884 cnss_pr_info("Host BDF config: HW_XPA: %d CalDB: %d\n", 885 resp->host_bdf_data & QMI_WLFW_HW_XPA_V01, 886 plat_priv->cbc_file_download); 887 } 888 kfree(req); 889 kfree(resp); 890 return 0; 891 892 err_send: 893 release_firmware(fw_entry); 894 err_req_fw: 895 if (!(bdf_type == CNSS_BDF_REGDB || 896 test_bit(CNSS_IN_REBOOT, &plat_priv->driver_state) || 897 ret == -EAGAIN)) 898 CNSS_QMI_ASSERT(); 899 kfree(req); 900 kfree(resp); 901 return ret; 902 } 903 904 int cnss_wlfw_tme_patch_dnld_send_sync(struct cnss_plat_data *plat_priv, 905 enum wlfw_tme_lite_file_type_v01 file) 906 { 907 struct wlfw_tme_lite_info_req_msg_v01 *req; 908 struct wlfw_tme_lite_info_resp_msg_v01 *resp; 909 struct qmi_txn txn; 910 struct cnss_fw_mem *tme_lite_mem = &plat_priv->tme_lite_mem; 911 int ret = 0; 912 913 cnss_pr_dbg("Sending TME patch information message, state: 0x%lx\n", 914 plat_priv->driver_state); 915 916 if (plat_priv->device_id != PEACH_DEVICE_ID) 917 return 0; 918 919 req = kzalloc(sizeof(*req), GFP_KERNEL); 920 if (!req) 921 return -ENOMEM; 922 923 resp = kzalloc(sizeof(*resp), GFP_KERNEL); 924 if (!resp) { 925 kfree(req); 926 return -ENOMEM; 927 } 928 929 if (!tme_lite_mem->pa || !tme_lite_mem->size) { 930 cnss_pr_err("Memory for TME patch is not available\n"); 931 ret = -ENOMEM; 932 goto out; 933 } 934 935 cnss_pr_dbg("TME-L patch memory, va: 0x%pK, pa: %pa, size: 0x%zx\n", 936 tme_lite_mem->va, &tme_lite_mem->pa, tme_lite_mem->size); 937 938 req->tme_file = file; 939 req->addr = plat_priv->tme_lite_mem.pa; 940 req->size = plat_priv->tme_lite_mem.size; 941 942 ret = qmi_txn_init(&plat_priv->qmi_wlfw, &txn, 943 wlfw_tme_lite_info_resp_msg_v01_ei, resp); 944 if (ret < 0) { 945 cnss_pr_err("Failed to initialize txn for TME patch information request, err: %d\n", 946 ret); 947 goto out; 948 } 949 950 ret = qmi_send_request(&plat_priv->qmi_wlfw, NULL, &txn, 951 QMI_WLFW_TME_LITE_INFO_REQ_V01, 952 WLFW_TME_LITE_INFO_REQ_MSG_V01_MAX_MSG_LEN, 953 wlfw_tme_lite_info_req_msg_v01_ei, req); 954 if (ret < 0) { 955 qmi_txn_cancel(&txn); 956 cnss_pr_err("Failed to send TME patch information request, err: %d\n", 957 ret); 958 goto out; 959 } 960 961 ret = qmi_txn_wait(&txn, QMI_WLFW_TIMEOUT_JF); 962 if (ret < 0) { 963 cnss_pr_err("Failed to wait for response of TME patch information request, err: %d\n", 964 ret); 965 goto out; 966 } 967 968 if (resp->resp.result != QMI_RESULT_SUCCESS_V01) { 969 cnss_pr_err("TME patch information request failed, result: %d, err: %d\n", 970 resp->resp.result, resp->resp.error); 971 ret = -resp->resp.result; 972 goto out; 973 } 974 975 kfree(req); 976 kfree(resp); 977 return 0; 978 979 out: 980 kfree(req); 981 kfree(resp); 982 return ret; 983 } 984 985 int cnss_wlfw_m3_dnld_send_sync(struct cnss_plat_data *plat_priv) 986 { 987 struct wlfw_m3_info_req_msg_v01 *req; 988 struct wlfw_m3_info_resp_msg_v01 *resp; 989 struct qmi_txn txn; 990 struct cnss_fw_mem *m3_mem = &plat_priv->m3_mem; 991 int ret = 0; 992 993 cnss_pr_dbg("Sending M3 information message, state: 0x%lx\n", 994 plat_priv->driver_state); 995 996 req = kzalloc(sizeof(*req), GFP_KERNEL); 997 if (!req) 998 return -ENOMEM; 999 1000 resp = kzalloc(sizeof(*resp), GFP_KERNEL); 1001 if (!resp) { 1002 kfree(req); 1003 return -ENOMEM; 1004 } 1005 1006 if (!m3_mem->pa || !m3_mem->size) { 1007 cnss_pr_err("Memory for M3 is not available\n"); 1008 ret = -ENOMEM; 1009 goto out; 1010 } 1011 1012 cnss_pr_dbg("M3 memory, va: 0x%pK, pa: %pa, size: 0x%zx\n", 1013 m3_mem->va, &m3_mem->pa, m3_mem->size); 1014 1015 req->addr = plat_priv->m3_mem.pa; 1016 req->size = plat_priv->m3_mem.size; 1017 1018 ret = qmi_txn_init(&plat_priv->qmi_wlfw, &txn, 1019 wlfw_m3_info_resp_msg_v01_ei, resp); 1020 if (ret < 0) { 1021 cnss_pr_err("Failed to initialize txn for M3 information request, err: %d\n", 1022 ret); 1023 goto out; 1024 } 1025 1026 ret = qmi_send_request(&plat_priv->qmi_wlfw, NULL, &txn, 1027 QMI_WLFW_M3_INFO_REQ_V01, 1028 WLFW_M3_INFO_REQ_MSG_V01_MAX_MSG_LEN, 1029 wlfw_m3_info_req_msg_v01_ei, req); 1030 if (ret < 0) { 1031 qmi_txn_cancel(&txn); 1032 cnss_pr_err("Failed to send M3 information request, err: %d\n", 1033 ret); 1034 goto out; 1035 } 1036 1037 ret = qmi_txn_wait(&txn, QMI_WLFW_TIMEOUT_JF); 1038 if (ret < 0) { 1039 cnss_pr_err("Failed to wait for response of M3 information request, err: %d\n", 1040 ret); 1041 goto out; 1042 } 1043 1044 if (resp->resp.result != QMI_RESULT_SUCCESS_V01) { 1045 cnss_pr_err("M3 information request failed, result: %d, err: %d\n", 1046 resp->resp.result, resp->resp.error); 1047 ret = -resp->resp.result; 1048 goto out; 1049 } 1050 1051 kfree(req); 1052 kfree(resp); 1053 return 0; 1054 1055 out: 1056 CNSS_QMI_ASSERT(); 1057 kfree(req); 1058 kfree(resp); 1059 return ret; 1060 } 1061 1062 int cnss_wlfw_aux_dnld_send_sync(struct cnss_plat_data *plat_priv) 1063 { 1064 struct wlfw_aux_uc_info_req_msg_v01 *req; 1065 struct wlfw_aux_uc_info_resp_msg_v01 *resp; 1066 struct qmi_txn txn; 1067 struct cnss_fw_mem *aux_mem = &plat_priv->aux_mem; 1068 int ret = 0; 1069 1070 cnss_pr_dbg("Sending QMI_WLFW_AUX_UC_INFO_REQ_V01 message, state: 0x%lx\n", 1071 plat_priv->driver_state); 1072 1073 req = kzalloc(sizeof(*req), GFP_KERNEL); 1074 if (!req) 1075 return -ENOMEM; 1076 1077 resp = kzalloc(sizeof(*resp), GFP_KERNEL); 1078 if (!resp) { 1079 kfree(req); 1080 return -ENOMEM; 1081 } 1082 1083 if (!aux_mem->pa || !aux_mem->size) { 1084 cnss_pr_err("Memory for AUX is not available\n"); 1085 ret = -ENOMEM; 1086 goto out; 1087 } 1088 1089 cnss_pr_dbg("AUX memory, va: 0x%pK, pa: %pa, size: 0x%zx\n", 1090 aux_mem->va, &aux_mem->pa, aux_mem->size); 1091 1092 req->addr = plat_priv->aux_mem.pa; 1093 req->size = plat_priv->aux_mem.size; 1094 1095 ret = qmi_txn_init(&plat_priv->qmi_wlfw, &txn, 1096 wlfw_aux_uc_info_resp_msg_v01_ei, resp); 1097 if (ret < 0) { 1098 cnss_pr_err("Failed to initialize txn for QMI_WLFW_AUX_UC_INFO_REQ_V01 request, err: %d\n", 1099 ret); 1100 goto out; 1101 } 1102 1103 ret = qmi_send_request(&plat_priv->qmi_wlfw, NULL, &txn, 1104 QMI_WLFW_AUX_UC_INFO_REQ_V01, 1105 WLFW_AUX_UC_INFO_REQ_MSG_V01_MAX_MSG_LEN, 1106 wlfw_aux_uc_info_req_msg_v01_ei, req); 1107 if (ret < 0) { 1108 qmi_txn_cancel(&txn); 1109 cnss_pr_err("Failed to send QMI_WLFW_AUX_UC_INFO_REQ_V01 request, err: %d\n", 1110 ret); 1111 goto out; 1112 } 1113 1114 ret = qmi_txn_wait(&txn, QMI_WLFW_TIMEOUT_JF); 1115 if (ret < 0) { 1116 cnss_pr_err("Failed to wait for response of QMI_WLFW_AUX_UC_INFO_REQ_V01 request, err: %d\n", 1117 ret); 1118 goto out; 1119 } 1120 1121 if (resp->resp.result != QMI_RESULT_SUCCESS_V01) { 1122 cnss_pr_err("QMI_WLFW_AUX_UC_INFO_REQ_V01 request failed, result: %d, err: %d\n", 1123 resp->resp.result, resp->resp.error); 1124 ret = -resp->resp.result; 1125 goto out; 1126 } 1127 1128 kfree(req); 1129 kfree(resp); 1130 return 0; 1131 1132 out: 1133 CNSS_QMI_ASSERT(); 1134 kfree(req); 1135 kfree(resp); 1136 return ret; 1137 } 1138 1139 int cnss_wlfw_wlan_mac_req_send_sync(struct cnss_plat_data *plat_priv, 1140 u8 *mac, u32 mac_len) 1141 { 1142 struct wlfw_mac_addr_req_msg_v01 req; 1143 struct wlfw_mac_addr_resp_msg_v01 resp = {0}; 1144 struct qmi_txn txn; 1145 int ret; 1146 1147 if (!plat_priv || !mac || mac_len != QMI_WLFW_MAC_ADDR_SIZE_V01) 1148 return -EINVAL; 1149 1150 ret = qmi_txn_init(&plat_priv->qmi_wlfw, &txn, 1151 wlfw_mac_addr_resp_msg_v01_ei, &resp); 1152 if (ret < 0) { 1153 cnss_pr_err("Failed to initialize txn for mac req, err: %d\n", 1154 ret); 1155 ret = -EIO; 1156 goto out; 1157 } 1158 1159 cnss_pr_dbg("Sending WLAN mac req [%pM], state: 0x%lx\n", 1160 mac, plat_priv->driver_state); 1161 memcpy(req.mac_addr, mac, mac_len); 1162 req.mac_addr_valid = 1; 1163 1164 ret = qmi_send_request(&plat_priv->qmi_wlfw, NULL, &txn, 1165 QMI_WLFW_MAC_ADDR_REQ_V01, 1166 WLFW_MAC_ADDR_REQ_MSG_V01_MAX_MSG_LEN, 1167 wlfw_mac_addr_req_msg_v01_ei, &req); 1168 if (ret < 0) { 1169 qmi_txn_cancel(&txn); 1170 cnss_pr_err("Failed to send mac req, err: %d\n", ret); 1171 ret = -EIO; 1172 goto out; 1173 } 1174 1175 ret = qmi_txn_wait(&txn, QMI_WLFW_TIMEOUT_JF); 1176 if (ret < 0) { 1177 cnss_pr_err("Failed to wait for resp of mac req, err: %d\n", 1178 ret); 1179 ret = -EIO; 1180 goto out; 1181 } 1182 1183 if (resp.resp.result != QMI_RESULT_SUCCESS_V01) { 1184 cnss_pr_err("WLAN mac req failed, result: %d, err: %d\n", 1185 resp.resp.result); 1186 ret = -resp.resp.result; 1187 } 1188 out: 1189 return ret; 1190 } 1191 1192 int cnss_wlfw_qdss_data_send_sync(struct cnss_plat_data *plat_priv, char *file_name, 1193 u32 total_size) 1194 { 1195 int ret = 0; 1196 struct wlfw_qdss_trace_data_req_msg_v01 *req; 1197 struct wlfw_qdss_trace_data_resp_msg_v01 *resp; 1198 unsigned char *p_qdss_trace_data_temp, *p_qdss_trace_data = NULL; 1199 unsigned int remaining; 1200 struct qmi_txn txn; 1201 1202 cnss_pr_dbg("%s\n", __func__); 1203 1204 req = kzalloc(sizeof(*req), GFP_KERNEL); 1205 if (!req) 1206 return -ENOMEM; 1207 1208 resp = kzalloc(sizeof(*resp), GFP_KERNEL); 1209 if (!resp) { 1210 kfree(req); 1211 return -ENOMEM; 1212 } 1213 1214 p_qdss_trace_data = kzalloc(total_size, GFP_KERNEL); 1215 if (!p_qdss_trace_data) { 1216 ret = ENOMEM; 1217 goto end; 1218 } 1219 1220 remaining = total_size; 1221 p_qdss_trace_data_temp = p_qdss_trace_data; 1222 while (remaining && resp->end == 0) { 1223 ret = qmi_txn_init(&plat_priv->qmi_wlfw, &txn, 1224 wlfw_qdss_trace_data_resp_msg_v01_ei, resp); 1225 1226 if (ret < 0) { 1227 cnss_pr_err("Fail to init txn for QDSS trace resp %d\n", 1228 ret); 1229 goto fail; 1230 } 1231 1232 ret = qmi_send_request 1233 (&plat_priv->qmi_wlfw, NULL, &txn, 1234 QMI_WLFW_QDSS_TRACE_DATA_REQ_V01, 1235 WLFW_QDSS_TRACE_DATA_REQ_MSG_V01_MAX_MSG_LEN, 1236 wlfw_qdss_trace_data_req_msg_v01_ei, req); 1237 1238 if (ret < 0) { 1239 qmi_txn_cancel(&txn); 1240 cnss_pr_err("Fail to send QDSS trace data req %d\n", 1241 ret); 1242 goto fail; 1243 } 1244 1245 ret = qmi_txn_wait(&txn, plat_priv->ctrl_params.qmi_timeout); 1246 1247 if (ret < 0) { 1248 cnss_pr_err("QDSS trace resp wait failed with rc %d\n", 1249 ret); 1250 goto fail; 1251 } else if (resp->resp.result != QMI_RESULT_SUCCESS_V01) { 1252 cnss_pr_err("QMI QDSS trace request rejected, result:%d error:%d\n", 1253 resp->resp.result, resp->resp.error); 1254 ret = -resp->resp.result; 1255 goto fail; 1256 } else { 1257 ret = 0; 1258 } 1259 1260 cnss_pr_dbg("%s: response total size %d data len %d", 1261 __func__, resp->total_size, resp->data_len); 1262 1263 if ((resp->total_size_valid == 1 && 1264 resp->total_size == total_size) && 1265 (resp->seg_id_valid == 1 && resp->seg_id == req->seg_id) && 1266 (resp->data_valid == 1 && 1267 resp->data_len <= QMI_WLFW_MAX_DATA_SIZE_V01) && 1268 resp->data_len <= remaining) { 1269 memcpy(p_qdss_trace_data_temp, 1270 resp->data, resp->data_len); 1271 } else { 1272 cnss_pr_err("%s: Unmatched qdss trace data, Expect total_size %u, seg_id %u, Recv total_size_valid %u, total_size %u, seg_id_valid %u, seg_id %u, data_len_valid %u, data_len %u", 1273 __func__, 1274 total_size, req->seg_id, 1275 resp->total_size_valid, 1276 resp->total_size, 1277 resp->seg_id_valid, 1278 resp->seg_id, 1279 resp->data_valid, 1280 resp->data_len); 1281 ret = -1; 1282 goto fail; 1283 } 1284 1285 remaining -= resp->data_len; 1286 p_qdss_trace_data_temp += resp->data_len; 1287 req->seg_id++; 1288 } 1289 1290 if (remaining == 0 && (resp->end_valid && resp->end)) { 1291 ret = cnss_genl_send_msg(p_qdss_trace_data, 1292 CNSS_GENL_MSG_TYPE_QDSS, file_name, 1293 total_size); 1294 if (ret < 0) { 1295 cnss_pr_err("Fail to save QDSS trace data: %d\n", 1296 ret); 1297 ret = -1; 1298 goto fail; 1299 } 1300 } else { 1301 cnss_pr_err("%s: QDSS trace file corrupted: remaining %u, end_valid %u, end %u", 1302 __func__, 1303 remaining, resp->end_valid, resp->end); 1304 ret = -1; 1305 goto fail; 1306 } 1307 1308 fail: 1309 kfree(p_qdss_trace_data); 1310 1311 end: 1312 kfree(req); 1313 kfree(resp); 1314 return ret; 1315 } 1316 1317 void cnss_get_qdss_cfg_filename(struct cnss_plat_data *plat_priv, 1318 char *filename, u32 filename_len, 1319 bool fallback_file) 1320 { 1321 char filename_tmp[MAX_FIRMWARE_NAME_LEN]; 1322 char *build_str = QDSS_FILE_BUILD_STR; 1323 1324 if (fallback_file) 1325 build_str = ""; 1326 1327 if (plat_priv->device_version.major_version == FW_V2_NUMBER) 1328 snprintf(filename_tmp, filename_len, QDSS_TRACE_CONFIG_FILE 1329 "_%s%s.cfg", build_str, HW_V2_NUMBER); 1330 else 1331 snprintf(filename_tmp, filename_len, QDSS_TRACE_CONFIG_FILE 1332 "_%s%s.cfg", build_str, HW_V1_NUMBER); 1333 1334 cnss_bus_add_fw_prefix_name(plat_priv, filename, filename_tmp); 1335 } 1336 1337 int cnss_wlfw_qdss_dnld_send_sync(struct cnss_plat_data *plat_priv) 1338 { 1339 struct wlfw_qdss_trace_config_download_req_msg_v01 *req; 1340 struct wlfw_qdss_trace_config_download_resp_msg_v01 *resp; 1341 struct qmi_txn txn; 1342 const struct firmware *fw_entry = NULL; 1343 const u8 *temp; 1344 char qdss_cfg_filename[MAX_FIRMWARE_NAME_LEN]; 1345 unsigned int remaining; 1346 int ret = 0; 1347 1348 cnss_pr_dbg("Sending QDSS config download message, state: 0x%lx\n", 1349 plat_priv->driver_state); 1350 1351 req = kzalloc(sizeof(*req), GFP_KERNEL); 1352 if (!req) 1353 return -ENOMEM; 1354 1355 resp = kzalloc(sizeof(*resp), GFP_KERNEL); 1356 if (!resp) { 1357 kfree(req); 1358 return -ENOMEM; 1359 } 1360 1361 cnss_get_qdss_cfg_filename(plat_priv, qdss_cfg_filename, 1362 sizeof(qdss_cfg_filename), false); 1363 1364 cnss_pr_dbg("Invoke firmware_request_nowarn for %s\n", 1365 qdss_cfg_filename); 1366 ret = cnss_request_firmware_direct(plat_priv, &fw_entry, 1367 qdss_cfg_filename); 1368 if (ret) { 1369 cnss_pr_dbg("Unable to load %s ret %d, try default file\n", 1370 qdss_cfg_filename, ret); 1371 cnss_get_qdss_cfg_filename(plat_priv, qdss_cfg_filename, 1372 sizeof(qdss_cfg_filename), 1373 true); 1374 cnss_pr_dbg("Invoke firmware_request_nowarn for %s\n", 1375 qdss_cfg_filename); 1376 ret = cnss_request_firmware_direct(plat_priv, &fw_entry, 1377 qdss_cfg_filename); 1378 if (ret) { 1379 cnss_pr_err("Unable to load %s ret %d\n", 1380 qdss_cfg_filename, ret); 1381 goto err_req_fw; 1382 } 1383 } 1384 1385 temp = fw_entry->data; 1386 remaining = fw_entry->size; 1387 1388 cnss_pr_dbg("Downloading QDSS: %s, size: %u\n", 1389 qdss_cfg_filename, remaining); 1390 1391 while (remaining) { 1392 req->total_size_valid = 1; 1393 req->total_size = remaining; 1394 req->seg_id_valid = 1; 1395 req->data_valid = 1; 1396 req->end_valid = 1; 1397 1398 if (remaining > QMI_WLFW_MAX_DATA_SIZE_V01) { 1399 req->data_len = QMI_WLFW_MAX_DATA_SIZE_V01; 1400 } else { 1401 req->data_len = remaining; 1402 req->end = 1; 1403 } 1404 1405 memcpy(req->data, temp, req->data_len); 1406 1407 ret = qmi_txn_init 1408 (&plat_priv->qmi_wlfw, &txn, 1409 wlfw_qdss_trace_config_download_resp_msg_v01_ei, 1410 resp); 1411 if (ret < 0) { 1412 cnss_pr_err("Failed to initialize txn for QDSS download request, err: %d\n", 1413 ret); 1414 goto err_send; 1415 } 1416 1417 ret = qmi_send_request 1418 (&plat_priv->qmi_wlfw, NULL, &txn, 1419 QMI_WLFW_QDSS_TRACE_CONFIG_DOWNLOAD_REQ_V01, 1420 WLFW_QDSS_TRACE_CONFIG_DOWNLOAD_REQ_MSG_V01_MAX_MSG_LEN, 1421 wlfw_qdss_trace_config_download_req_msg_v01_ei, req); 1422 if (ret < 0) { 1423 qmi_txn_cancel(&txn); 1424 cnss_pr_err("Failed to send respond QDSS download request, err: %d\n", 1425 ret); 1426 goto err_send; 1427 } 1428 1429 ret = qmi_txn_wait(&txn, plat_priv->ctrl_params.qmi_timeout); 1430 if (ret < 0) { 1431 cnss_pr_err("Failed to wait for response of QDSS download request, err: %d\n", 1432 ret); 1433 goto err_send; 1434 } 1435 1436 if (resp->resp.result != QMI_RESULT_SUCCESS_V01) { 1437 cnss_pr_err("QDSS download request failed, result: %d, err: %d\n", 1438 resp->resp.result, resp->resp.error); 1439 ret = -resp->resp.result; 1440 goto err_send; 1441 } 1442 1443 remaining -= req->data_len; 1444 temp += req->data_len; 1445 req->seg_id++; 1446 } 1447 1448 release_firmware(fw_entry); 1449 kfree(req); 1450 kfree(resp); 1451 return 0; 1452 1453 err_send: 1454 release_firmware(fw_entry); 1455 err_req_fw: 1456 1457 kfree(req); 1458 kfree(resp); 1459 return ret; 1460 } 1461 1462 static int wlfw_send_qdss_trace_mode_req 1463 (struct cnss_plat_data *plat_priv, 1464 enum wlfw_qdss_trace_mode_enum_v01 mode, 1465 unsigned long long option) 1466 { 1467 int rc = 0; 1468 int tmp = 0; 1469 struct wlfw_qdss_trace_mode_req_msg_v01 *req; 1470 struct wlfw_qdss_trace_mode_resp_msg_v01 *resp; 1471 struct qmi_txn txn; 1472 1473 if (!plat_priv) 1474 return -ENODEV; 1475 1476 req = kzalloc(sizeof(*req), GFP_KERNEL); 1477 if (!req) 1478 return -ENOMEM; 1479 1480 resp = kzalloc(sizeof(*resp), GFP_KERNEL); 1481 if (!resp) { 1482 kfree(req); 1483 return -ENOMEM; 1484 } 1485 1486 req->mode_valid = 1; 1487 req->mode = mode; 1488 req->option_valid = 1; 1489 req->option = option; 1490 1491 tmp = plat_priv->hw_trc_override; 1492 1493 req->hw_trc_disable_override_valid = 1; 1494 req->hw_trc_disable_override = 1495 (tmp > QMI_PARAM_DISABLE_V01 ? QMI_PARAM_DISABLE_V01 : 1496 (tmp < 0 ? QMI_PARAM_INVALID_V01 : tmp)); 1497 1498 cnss_pr_dbg("%s: mode %u, option %llu, hw_trc_disable_override: %u", 1499 __func__, mode, option, req->hw_trc_disable_override); 1500 1501 rc = qmi_txn_init(&plat_priv->qmi_wlfw, &txn, 1502 wlfw_qdss_trace_mode_resp_msg_v01_ei, resp); 1503 if (rc < 0) { 1504 cnss_pr_err("Fail to init txn for QDSS Mode resp %d\n", 1505 rc); 1506 goto out; 1507 } 1508 1509 rc = qmi_send_request(&plat_priv->qmi_wlfw, NULL, &txn, 1510 QMI_WLFW_QDSS_TRACE_MODE_REQ_V01, 1511 WLFW_QDSS_TRACE_MODE_REQ_MSG_V01_MAX_MSG_LEN, 1512 wlfw_qdss_trace_mode_req_msg_v01_ei, req); 1513 if (rc < 0) { 1514 qmi_txn_cancel(&txn); 1515 cnss_pr_err("Fail to send QDSS Mode req %d\n", rc); 1516 goto out; 1517 } 1518 1519 rc = qmi_txn_wait(&txn, plat_priv->ctrl_params.qmi_timeout); 1520 if (rc < 0) { 1521 cnss_pr_err("QDSS Mode resp wait failed with rc %d\n", 1522 rc); 1523 goto out; 1524 } else if (resp->resp.result != QMI_RESULT_SUCCESS_V01) { 1525 cnss_pr_err("QMI QDSS Mode request rejected, result:%d error:%d\n", 1526 resp->resp.result, resp->resp.error); 1527 rc = -resp->resp.result; 1528 goto out; 1529 } 1530 1531 kfree(resp); 1532 kfree(req); 1533 return rc; 1534 out: 1535 kfree(resp); 1536 kfree(req); 1537 CNSS_QMI_ASSERT(); 1538 return rc; 1539 } 1540 1541 int wlfw_qdss_trace_start(struct cnss_plat_data *plat_priv) 1542 { 1543 return wlfw_send_qdss_trace_mode_req(plat_priv, 1544 QMI_WLFW_QDSS_TRACE_ON_V01, 0); 1545 } 1546 1547 int wlfw_qdss_trace_stop(struct cnss_plat_data *plat_priv, unsigned long long option) 1548 { 1549 return wlfw_send_qdss_trace_mode_req(plat_priv, QMI_WLFW_QDSS_TRACE_OFF_V01, 1550 option); 1551 } 1552 1553 int cnss_wlfw_wlan_mode_send_sync(struct cnss_plat_data *plat_priv, 1554 enum cnss_driver_mode mode) 1555 { 1556 struct wlfw_wlan_mode_req_msg_v01 *req; 1557 struct wlfw_wlan_mode_resp_msg_v01 *resp; 1558 struct qmi_txn txn; 1559 int ret = 0; 1560 1561 if (!plat_priv) 1562 return -ENODEV; 1563 1564 cnss_pr_dbg("Sending mode message, mode: %s(%d), state: 0x%lx\n", 1565 cnss_qmi_mode_to_str(mode), mode, plat_priv->driver_state); 1566 1567 if (mode == CNSS_OFF && 1568 test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state)) { 1569 cnss_pr_dbg("Recovery is in progress, ignore mode off request\n"); 1570 return 0; 1571 } 1572 1573 req = kzalloc(sizeof(*req), GFP_KERNEL); 1574 if (!req) 1575 return -ENOMEM; 1576 1577 resp = kzalloc(sizeof(*resp), GFP_KERNEL); 1578 if (!resp) { 1579 kfree(req); 1580 return -ENOMEM; 1581 } 1582 1583 req->mode = (enum wlfw_driver_mode_enum_v01)mode; 1584 req->hw_debug_valid = 1; 1585 req->hw_debug = 0; 1586 1587 ret = qmi_txn_init(&plat_priv->qmi_wlfw, &txn, 1588 wlfw_wlan_mode_resp_msg_v01_ei, resp); 1589 if (ret < 0) { 1590 cnss_pr_err("Failed to initialize txn for mode request, mode: %s(%d), err: %d\n", 1591 cnss_qmi_mode_to_str(mode), mode, ret); 1592 goto out; 1593 } 1594 1595 ret = qmi_send_request(&plat_priv->qmi_wlfw, NULL, &txn, 1596 QMI_WLFW_WLAN_MODE_REQ_V01, 1597 WLFW_WLAN_MODE_REQ_MSG_V01_MAX_MSG_LEN, 1598 wlfw_wlan_mode_req_msg_v01_ei, req); 1599 if (ret < 0) { 1600 qmi_txn_cancel(&txn); 1601 cnss_pr_err("Failed to send mode request, mode: %s(%d), err: %d\n", 1602 cnss_qmi_mode_to_str(mode), mode, ret); 1603 goto out; 1604 } 1605 1606 ret = qmi_txn_wait(&txn, QMI_WLFW_TIMEOUT_JF); 1607 if (ret < 0) { 1608 cnss_pr_err("Failed to wait for response of mode request, mode: %s(%d), err: %d\n", 1609 cnss_qmi_mode_to_str(mode), mode, ret); 1610 goto out; 1611 } 1612 1613 if (resp->resp.result != QMI_RESULT_SUCCESS_V01) { 1614 cnss_pr_err("Mode request failed, mode: %s(%d), result: %d, err: %d\n", 1615 cnss_qmi_mode_to_str(mode), mode, resp->resp.result, 1616 resp->resp.error); 1617 ret = -resp->resp.result; 1618 goto out; 1619 } 1620 1621 kfree(req); 1622 kfree(resp); 1623 return 0; 1624 1625 out: 1626 if (mode == CNSS_OFF) { 1627 cnss_pr_dbg("WLFW service is disconnected while sending mode off request\n"); 1628 ret = 0; 1629 } else { 1630 CNSS_QMI_ASSERT(); 1631 } 1632 kfree(req); 1633 kfree(resp); 1634 return ret; 1635 } 1636 1637 int cnss_wlfw_wlan_cfg_send_sync(struct cnss_plat_data *plat_priv, 1638 struct cnss_wlan_enable_cfg *config, 1639 const char *host_version) 1640 { 1641 struct wlfw_wlan_cfg_req_msg_v01 *req; 1642 struct wlfw_wlan_cfg_resp_msg_v01 *resp; 1643 struct qmi_txn txn; 1644 u32 i, ce_id, num_vectors, user_base_data, base_vector; 1645 int ret = 0; 1646 1647 if (!plat_priv) 1648 return -ENODEV; 1649 1650 cnss_pr_dbg("Sending WLAN config message, state: 0x%lx\n", 1651 plat_priv->driver_state); 1652 1653 req = kzalloc(sizeof(*req), GFP_KERNEL); 1654 if (!req) 1655 return -ENOMEM; 1656 1657 resp = kzalloc(sizeof(*resp), GFP_KERNEL); 1658 if (!resp) { 1659 kfree(req); 1660 return -ENOMEM; 1661 } 1662 1663 req->host_version_valid = 1; 1664 strlcpy(req->host_version, host_version, 1665 QMI_WLFW_MAX_STR_LEN_V01 + 1); 1666 1667 req->tgt_cfg_valid = 1; 1668 if (config->num_ce_tgt_cfg > QMI_WLFW_MAX_NUM_CE_V01) 1669 req->tgt_cfg_len = QMI_WLFW_MAX_NUM_CE_V01; 1670 else 1671 req->tgt_cfg_len = config->num_ce_tgt_cfg; 1672 for (i = 0; i < req->tgt_cfg_len; i++) { 1673 req->tgt_cfg[i].pipe_num = config->ce_tgt_cfg[i].pipe_num; 1674 req->tgt_cfg[i].pipe_dir = config->ce_tgt_cfg[i].pipe_dir; 1675 req->tgt_cfg[i].nentries = config->ce_tgt_cfg[i].nentries; 1676 req->tgt_cfg[i].nbytes_max = config->ce_tgt_cfg[i].nbytes_max; 1677 req->tgt_cfg[i].flags = config->ce_tgt_cfg[i].flags; 1678 } 1679 1680 req->svc_cfg_valid = 1; 1681 if (config->num_ce_svc_pipe_cfg > QMI_WLFW_MAX_NUM_SVC_V01) 1682 req->svc_cfg_len = QMI_WLFW_MAX_NUM_SVC_V01; 1683 else 1684 req->svc_cfg_len = config->num_ce_svc_pipe_cfg; 1685 for (i = 0; i < req->svc_cfg_len; i++) { 1686 req->svc_cfg[i].service_id = config->ce_svc_cfg[i].service_id; 1687 req->svc_cfg[i].pipe_dir = config->ce_svc_cfg[i].pipe_dir; 1688 req->svc_cfg[i].pipe_num = config->ce_svc_cfg[i].pipe_num; 1689 } 1690 1691 if (plat_priv->device_id != KIWI_DEVICE_ID && 1692 plat_priv->device_id != MANGO_DEVICE_ID && 1693 plat_priv->device_id != PEACH_DEVICE_ID) { 1694 if (plat_priv->device_id == QCN7605_DEVICE_ID && 1695 config->num_shadow_reg_cfg) { 1696 req->shadow_reg_valid = 1; 1697 if (config->num_shadow_reg_cfg > 1698 QMI_WLFW_MAX_NUM_SHADOW_REG_V01) 1699 req->shadow_reg_len = 1700 QMI_WLFW_MAX_NUM_SHADOW_REG_V01; 1701 else 1702 req->shadow_reg_len = 1703 config->num_shadow_reg_cfg; 1704 memcpy(req->shadow_reg, config->shadow_reg_cfg, 1705 sizeof(struct wlfw_shadow_reg_cfg_s_v01) * 1706 req->shadow_reg_len); 1707 } else { 1708 req->shadow_reg_v2_valid = 1; 1709 1710 if (config->num_shadow_reg_v2_cfg > 1711 QMI_WLFW_MAX_NUM_SHADOW_REG_V2_V01) 1712 req->shadow_reg_v2_len = 1713 QMI_WLFW_MAX_NUM_SHADOW_REG_V2_V01; 1714 else 1715 req->shadow_reg_v2_len = 1716 config->num_shadow_reg_v2_cfg; 1717 1718 memcpy(req->shadow_reg_v2, config->shadow_reg_v2_cfg, 1719 sizeof(struct wlfw_shadow_reg_v2_cfg_s_v01) * 1720 req->shadow_reg_v2_len); 1721 } 1722 } else { 1723 req->shadow_reg_v3_valid = 1; 1724 if (config->num_shadow_reg_v3_cfg > 1725 MAX_NUM_SHADOW_REG_V3) 1726 req->shadow_reg_v3_len = MAX_NUM_SHADOW_REG_V3; 1727 else 1728 req->shadow_reg_v3_len = config->num_shadow_reg_v3_cfg; 1729 1730 plat_priv->num_shadow_regs_v3 = req->shadow_reg_v3_len; 1731 1732 cnss_pr_dbg("Shadow reg v3 len: %d\n", 1733 plat_priv->num_shadow_regs_v3); 1734 1735 memcpy(req->shadow_reg_v3, config->shadow_reg_v3_cfg, 1736 sizeof(struct wlfw_shadow_reg_v3_cfg_s_v01) * 1737 req->shadow_reg_v3_len); 1738 } 1739 1740 if (config->rri_over_ddr_cfg_valid) { 1741 req->rri_over_ddr_cfg_valid = 1; 1742 req->rri_over_ddr_cfg.base_addr_low = 1743 config->rri_over_ddr_cfg.base_addr_low; 1744 req->rri_over_ddr_cfg.base_addr_high = 1745 config->rri_over_ddr_cfg.base_addr_high; 1746 } 1747 if (config->send_msi_ce) { 1748 ret = cnss_bus_get_msi_assignment(plat_priv, 1749 CE_MSI_NAME, 1750 &num_vectors, 1751 &user_base_data, 1752 &base_vector); 1753 if (!ret) { 1754 req->msi_cfg_valid = 1; 1755 req->msi_cfg_len = QMI_WLFW_MAX_NUM_CE_V01; 1756 for (ce_id = 0; ce_id < QMI_WLFW_MAX_NUM_CE_V01; 1757 ce_id++) { 1758 req->msi_cfg[ce_id].ce_id = ce_id; 1759 req->msi_cfg[ce_id].msi_vector = 1760 (ce_id % num_vectors) + base_vector; 1761 } 1762 } 1763 } 1764 1765 ret = qmi_txn_init(&plat_priv->qmi_wlfw, &txn, 1766 wlfw_wlan_cfg_resp_msg_v01_ei, resp); 1767 if (ret < 0) { 1768 cnss_pr_err("Failed to initialize txn for WLAN config request, err: %d\n", 1769 ret); 1770 goto out; 1771 } 1772 1773 ret = qmi_send_request(&plat_priv->qmi_wlfw, NULL, &txn, 1774 QMI_WLFW_WLAN_CFG_REQ_V01, 1775 WLFW_WLAN_CFG_REQ_MSG_V01_MAX_MSG_LEN, 1776 wlfw_wlan_cfg_req_msg_v01_ei, req); 1777 if (ret < 0) { 1778 qmi_txn_cancel(&txn); 1779 cnss_pr_err("Failed to send WLAN config request, err: %d\n", 1780 ret); 1781 goto out; 1782 } 1783 1784 ret = qmi_txn_wait(&txn, QMI_WLFW_TIMEOUT_JF); 1785 if (ret < 0) { 1786 cnss_pr_err("Failed to wait for response of WLAN config request, err: %d\n", 1787 ret); 1788 goto out; 1789 } 1790 1791 if (resp->resp.result != QMI_RESULT_SUCCESS_V01) { 1792 cnss_pr_err("WLAN config request failed, result: %d, err: %d\n", 1793 resp->resp.result, resp->resp.error); 1794 ret = -resp->resp.result; 1795 goto out; 1796 } 1797 1798 kfree(req); 1799 kfree(resp); 1800 return 0; 1801 1802 out: 1803 CNSS_QMI_ASSERT(); 1804 kfree(req); 1805 kfree(resp); 1806 return ret; 1807 } 1808 1809 int cnss_wlfw_athdiag_read_send_sync(struct cnss_plat_data *plat_priv, 1810 u32 offset, u32 mem_type, 1811 u32 data_len, u8 *data) 1812 { 1813 struct wlfw_athdiag_read_req_msg_v01 *req; 1814 struct wlfw_athdiag_read_resp_msg_v01 *resp; 1815 struct qmi_txn txn; 1816 int ret = 0; 1817 1818 if (!plat_priv) 1819 return -ENODEV; 1820 1821 if (!data || data_len == 0 || data_len > QMI_WLFW_MAX_DATA_SIZE_V01) { 1822 cnss_pr_err("Invalid parameters for athdiag read: data %pK, data_len %u\n", 1823 data, data_len); 1824 return -EINVAL; 1825 } 1826 1827 cnss_pr_dbg("athdiag read: state 0x%lx, offset %x, mem_type %x, data_len %u\n", 1828 plat_priv->driver_state, offset, mem_type, data_len); 1829 1830 req = kzalloc(sizeof(*req), GFP_KERNEL); 1831 if (!req) 1832 return -ENOMEM; 1833 1834 resp = kzalloc(sizeof(*resp), GFP_KERNEL); 1835 if (!resp) { 1836 kfree(req); 1837 return -ENOMEM; 1838 } 1839 1840 req->offset = offset; 1841 req->mem_type = mem_type; 1842 req->data_len = data_len; 1843 1844 ret = qmi_txn_init(&plat_priv->qmi_wlfw, &txn, 1845 wlfw_athdiag_read_resp_msg_v01_ei, resp); 1846 if (ret < 0) { 1847 cnss_pr_err("Failed to initialize txn for athdiag read request, err: %d\n", 1848 ret); 1849 goto out; 1850 } 1851 1852 ret = qmi_send_request(&plat_priv->qmi_wlfw, NULL, &txn, 1853 QMI_WLFW_ATHDIAG_READ_REQ_V01, 1854 WLFW_ATHDIAG_READ_REQ_MSG_V01_MAX_MSG_LEN, 1855 wlfw_athdiag_read_req_msg_v01_ei, req); 1856 if (ret < 0) { 1857 qmi_txn_cancel(&txn); 1858 cnss_pr_err("Failed to send athdiag read request, err: %d\n", 1859 ret); 1860 goto out; 1861 } 1862 1863 ret = qmi_txn_wait(&txn, QMI_WLFW_TIMEOUT_JF); 1864 if (ret < 0) { 1865 cnss_pr_err("Failed to wait for response of athdiag read request, err: %d\n", 1866 ret); 1867 goto out; 1868 } 1869 1870 if (resp->resp.result != QMI_RESULT_SUCCESS_V01) { 1871 cnss_pr_err("Athdiag read request failed, result: %d, err: %d\n", 1872 resp->resp.result, resp->resp.error); 1873 ret = -resp->resp.result; 1874 goto out; 1875 } 1876 1877 if (!resp->data_valid || resp->data_len != data_len) { 1878 cnss_pr_err("athdiag read data is invalid, data_valid = %u, data_len = %u\n", 1879 resp->data_valid, resp->data_len); 1880 ret = -EINVAL; 1881 goto out; 1882 } 1883 1884 memcpy(data, resp->data, resp->data_len); 1885 1886 kfree(req); 1887 kfree(resp); 1888 return 0; 1889 1890 out: 1891 kfree(req); 1892 kfree(resp); 1893 return ret; 1894 } 1895 1896 int cnss_wlfw_athdiag_write_send_sync(struct cnss_plat_data *plat_priv, 1897 u32 offset, u32 mem_type, 1898 u32 data_len, u8 *data) 1899 { 1900 struct wlfw_athdiag_write_req_msg_v01 *req; 1901 struct wlfw_athdiag_write_resp_msg_v01 *resp; 1902 struct qmi_txn txn; 1903 int ret = 0; 1904 1905 if (!plat_priv) 1906 return -ENODEV; 1907 1908 if (!data || data_len == 0 || data_len > QMI_WLFW_MAX_DATA_SIZE_V01) { 1909 cnss_pr_err("Invalid parameters for athdiag write: data %pK, data_len %u\n", 1910 data, data_len); 1911 return -EINVAL; 1912 } 1913 1914 cnss_pr_dbg("athdiag write: state 0x%lx, offset %x, mem_type %x, data_len %u, data %pK\n", 1915 plat_priv->driver_state, offset, mem_type, data_len, data); 1916 1917 req = kzalloc(sizeof(*req), GFP_KERNEL); 1918 if (!req) 1919 return -ENOMEM; 1920 1921 resp = kzalloc(sizeof(*resp), GFP_KERNEL); 1922 if (!resp) { 1923 kfree(req); 1924 return -ENOMEM; 1925 } 1926 1927 req->offset = offset; 1928 req->mem_type = mem_type; 1929 req->data_len = data_len; 1930 memcpy(req->data, data, data_len); 1931 1932 ret = qmi_txn_init(&plat_priv->qmi_wlfw, &txn, 1933 wlfw_athdiag_write_resp_msg_v01_ei, resp); 1934 if (ret < 0) { 1935 cnss_pr_err("Failed to initialize txn for athdiag write request, err: %d\n", 1936 ret); 1937 goto out; 1938 } 1939 1940 ret = qmi_send_request(&plat_priv->qmi_wlfw, NULL, &txn, 1941 QMI_WLFW_ATHDIAG_WRITE_REQ_V01, 1942 WLFW_ATHDIAG_WRITE_REQ_MSG_V01_MAX_MSG_LEN, 1943 wlfw_athdiag_write_req_msg_v01_ei, req); 1944 if (ret < 0) { 1945 qmi_txn_cancel(&txn); 1946 cnss_pr_err("Failed to send athdiag write request, err: %d\n", 1947 ret); 1948 goto out; 1949 } 1950 1951 ret = qmi_txn_wait(&txn, QMI_WLFW_TIMEOUT_JF); 1952 if (ret < 0) { 1953 cnss_pr_err("Failed to wait for response of athdiag write request, err: %d\n", 1954 ret); 1955 goto out; 1956 } 1957 1958 if (resp->resp.result != QMI_RESULT_SUCCESS_V01) { 1959 cnss_pr_err("Athdiag write request failed, result: %d, err: %d\n", 1960 resp->resp.result, resp->resp.error); 1961 ret = -resp->resp.result; 1962 goto out; 1963 } 1964 1965 kfree(req); 1966 kfree(resp); 1967 return 0; 1968 1969 out: 1970 kfree(req); 1971 kfree(resp); 1972 return ret; 1973 } 1974 1975 int cnss_wlfw_ini_send_sync(struct cnss_plat_data *plat_priv, 1976 u8 fw_log_mode) 1977 { 1978 struct wlfw_ini_req_msg_v01 *req; 1979 struct wlfw_ini_resp_msg_v01 *resp; 1980 struct qmi_txn txn; 1981 int ret = 0; 1982 1983 if (!plat_priv) 1984 return -ENODEV; 1985 1986 cnss_pr_dbg("Sending ini sync request, state: 0x%lx, fw_log_mode: %d\n", 1987 plat_priv->driver_state, fw_log_mode); 1988 1989 req = kzalloc(sizeof(*req), GFP_KERNEL); 1990 if (!req) 1991 return -ENOMEM; 1992 1993 resp = kzalloc(sizeof(*resp), GFP_KERNEL); 1994 if (!resp) { 1995 kfree(req); 1996 return -ENOMEM; 1997 } 1998 1999 req->enablefwlog_valid = 1; 2000 req->enablefwlog = fw_log_mode; 2001 2002 ret = qmi_txn_init(&plat_priv->qmi_wlfw, &txn, 2003 wlfw_ini_resp_msg_v01_ei, resp); 2004 if (ret < 0) { 2005 cnss_pr_err("Failed to initialize txn for ini request, fw_log_mode: %d, err: %d\n", 2006 fw_log_mode, ret); 2007 goto out; 2008 } 2009 2010 ret = qmi_send_request(&plat_priv->qmi_wlfw, NULL, &txn, 2011 QMI_WLFW_INI_REQ_V01, 2012 WLFW_INI_REQ_MSG_V01_MAX_MSG_LEN, 2013 wlfw_ini_req_msg_v01_ei, req); 2014 if (ret < 0) { 2015 qmi_txn_cancel(&txn); 2016 cnss_pr_err("Failed to send ini request, fw_log_mode: %d, err: %d\n", 2017 fw_log_mode, ret); 2018 goto out; 2019 } 2020 2021 ret = qmi_txn_wait(&txn, QMI_WLFW_TIMEOUT_JF); 2022 if (ret < 0) { 2023 cnss_pr_err("Failed to wait for response of ini request, fw_log_mode: %d, err: %d\n", 2024 fw_log_mode, ret); 2025 goto out; 2026 } 2027 2028 if (resp->resp.result != QMI_RESULT_SUCCESS_V01) { 2029 cnss_pr_err("Ini request failed, fw_log_mode: %d, result: %d, err: %d\n", 2030 fw_log_mode, resp->resp.result, resp->resp.error); 2031 ret = -resp->resp.result; 2032 goto out; 2033 } 2034 2035 kfree(req); 2036 kfree(resp); 2037 return 0; 2038 2039 out: 2040 kfree(req); 2041 kfree(resp); 2042 return ret; 2043 } 2044 2045 int cnss_wlfw_send_pcie_gen_speed_sync(struct cnss_plat_data *plat_priv) 2046 { 2047 struct wlfw_pcie_gen_switch_req_msg_v01 req; 2048 struct wlfw_pcie_gen_switch_resp_msg_v01 resp = {0}; 2049 struct qmi_txn txn; 2050 int ret = 0; 2051 2052 if (!plat_priv) 2053 return -ENODEV; 2054 2055 if (plat_priv->pcie_gen_speed == QMI_PCIE_GEN_SPEED_INVALID_V01 || 2056 !plat_priv->fw_pcie_gen_switch) { 2057 cnss_pr_dbg("PCIE Gen speed not setup\n"); 2058 return 0; 2059 } 2060 2061 cnss_pr_dbg("Sending PCIE Gen speed: %d state: 0x%lx\n", 2062 plat_priv->pcie_gen_speed, plat_priv->driver_state); 2063 req.pcie_speed = (enum wlfw_pcie_gen_speed_v01) 2064 plat_priv->pcie_gen_speed; 2065 2066 ret = qmi_txn_init(&plat_priv->qmi_wlfw, &txn, 2067 wlfw_pcie_gen_switch_resp_msg_v01_ei, &resp); 2068 if (ret < 0) { 2069 cnss_pr_err("Failed to initialize txn for PCIE speed switch err: %d\n", 2070 ret); 2071 goto out; 2072 } 2073 2074 ret = qmi_send_request(&plat_priv->qmi_wlfw, NULL, &txn, 2075 QMI_WLFW_PCIE_GEN_SWITCH_REQ_V01, 2076 WLFW_PCIE_GEN_SWITCH_REQ_MSG_V01_MAX_MSG_LEN, 2077 wlfw_pcie_gen_switch_req_msg_v01_ei, &req); 2078 if (ret < 0) { 2079 qmi_txn_cancel(&txn); 2080 cnss_pr_err("Failed to send PCIE speed switch, err: %d\n", ret); 2081 goto out; 2082 } 2083 2084 ret = qmi_txn_wait(&txn, QMI_WLFW_TIMEOUT_JF); 2085 if (ret < 0) { 2086 cnss_pr_err("Failed to wait for PCIE Gen switch resp, err: %d\n", 2087 ret); 2088 goto out; 2089 } 2090 2091 if (resp.resp.result != QMI_RESULT_SUCCESS_V01) { 2092 cnss_pr_err("PCIE Gen Switch req failed, Speed: %d, result: %d, err: %d\n", 2093 plat_priv->pcie_gen_speed, resp.resp.result, 2094 resp.resp.error); 2095 ret = -resp.resp.result; 2096 } 2097 out: 2098 /* Reset PCIE Gen speed after one time use */ 2099 plat_priv->pcie_gen_speed = QMI_PCIE_GEN_SPEED_INVALID_V01; 2100 return ret; 2101 } 2102 2103 int cnss_wlfw_antenna_switch_send_sync(struct cnss_plat_data *plat_priv) 2104 { 2105 struct wlfw_antenna_switch_req_msg_v01 *req; 2106 struct wlfw_antenna_switch_resp_msg_v01 *resp; 2107 struct qmi_txn txn; 2108 int ret = 0; 2109 2110 if (!plat_priv) 2111 return -ENODEV; 2112 2113 cnss_pr_dbg("Sending antenna switch sync request, state: 0x%lx\n", 2114 plat_priv->driver_state); 2115 2116 req = kzalloc(sizeof(*req), GFP_KERNEL); 2117 if (!req) 2118 return -ENOMEM; 2119 2120 resp = kzalloc(sizeof(*resp), GFP_KERNEL); 2121 if (!resp) { 2122 kfree(req); 2123 return -ENOMEM; 2124 } 2125 2126 ret = qmi_txn_init(&plat_priv->qmi_wlfw, &txn, 2127 wlfw_antenna_switch_resp_msg_v01_ei, resp); 2128 if (ret < 0) { 2129 cnss_pr_err("Failed to initialize txn for antenna switch request, err: %d\n", 2130 ret); 2131 goto out; 2132 } 2133 2134 ret = qmi_send_request(&plat_priv->qmi_wlfw, NULL, &txn, 2135 QMI_WLFW_ANTENNA_SWITCH_REQ_V01, 2136 WLFW_ANTENNA_SWITCH_REQ_MSG_V01_MAX_MSG_LEN, 2137 wlfw_antenna_switch_req_msg_v01_ei, req); 2138 if (ret < 0) { 2139 qmi_txn_cancel(&txn); 2140 cnss_pr_err("Failed to send antenna switch request, err: %d\n", 2141 ret); 2142 goto out; 2143 } 2144 2145 ret = qmi_txn_wait(&txn, QMI_WLFW_TIMEOUT_JF); 2146 if (ret < 0) { 2147 cnss_pr_err("Failed to wait for response of antenna switch request, err: %d\n", 2148 ret); 2149 goto out; 2150 } 2151 2152 if (resp->resp.result != QMI_RESULT_SUCCESS_V01) { 2153 cnss_pr_dbg("Antenna switch request failed, result: %d, err: %d\n", 2154 resp->resp.result, resp->resp.error); 2155 ret = -resp->resp.result; 2156 goto out; 2157 } 2158 2159 if (resp->antenna_valid) 2160 plat_priv->antenna = resp->antenna; 2161 2162 cnss_pr_dbg("Antenna valid: %u, antenna 0x%llx\n", 2163 resp->antenna_valid, resp->antenna); 2164 2165 kfree(req); 2166 kfree(resp); 2167 return 0; 2168 2169 out: 2170 kfree(req); 2171 kfree(resp); 2172 return ret; 2173 } 2174 2175 int cnss_wlfw_antenna_grant_send_sync(struct cnss_plat_data *plat_priv) 2176 { 2177 struct wlfw_antenna_grant_req_msg_v01 *req; 2178 struct wlfw_antenna_grant_resp_msg_v01 *resp; 2179 struct qmi_txn txn; 2180 int ret = 0; 2181 2182 if (!plat_priv) 2183 return -ENODEV; 2184 2185 cnss_pr_dbg("Sending antenna grant sync request, state: 0x%lx, grant 0x%llx\n", 2186 plat_priv->driver_state, plat_priv->grant); 2187 2188 req = kzalloc(sizeof(*req), GFP_KERNEL); 2189 if (!req) 2190 return -ENOMEM; 2191 2192 resp = kzalloc(sizeof(*resp), GFP_KERNEL); 2193 if (!resp) { 2194 kfree(req); 2195 return -ENOMEM; 2196 } 2197 2198 req->grant_valid = 1; 2199 req->grant = plat_priv->grant; 2200 2201 ret = qmi_txn_init(&plat_priv->qmi_wlfw, &txn, 2202 wlfw_antenna_grant_resp_msg_v01_ei, resp); 2203 if (ret < 0) { 2204 cnss_pr_err("Failed to initialize txn for antenna grant request, err: %d\n", 2205 ret); 2206 goto out; 2207 } 2208 2209 ret = qmi_send_request(&plat_priv->qmi_wlfw, NULL, &txn, 2210 QMI_WLFW_ANTENNA_GRANT_REQ_V01, 2211 WLFW_ANTENNA_GRANT_REQ_MSG_V01_MAX_MSG_LEN, 2212 wlfw_antenna_grant_req_msg_v01_ei, req); 2213 if (ret < 0) { 2214 qmi_txn_cancel(&txn); 2215 cnss_pr_err("Failed to send antenna grant request, err: %d\n", 2216 ret); 2217 goto out; 2218 } 2219 2220 ret = qmi_txn_wait(&txn, QMI_WLFW_TIMEOUT_JF); 2221 if (ret < 0) { 2222 cnss_pr_err("Failed to wait for response of antenna grant request, err: %d\n", 2223 ret); 2224 goto out; 2225 } 2226 2227 if (resp->resp.result != QMI_RESULT_SUCCESS_V01) { 2228 cnss_pr_err("Antenna grant request failed, result: %d, err: %d\n", 2229 resp->resp.result, resp->resp.error); 2230 ret = -resp->resp.result; 2231 goto out; 2232 } 2233 2234 kfree(req); 2235 kfree(resp); 2236 return 0; 2237 2238 out: 2239 kfree(req); 2240 kfree(resp); 2241 return ret; 2242 } 2243 2244 int cnss_wlfw_qdss_trace_mem_info_send_sync(struct cnss_plat_data *plat_priv) 2245 { 2246 struct wlfw_qdss_trace_mem_info_req_msg_v01 *req; 2247 struct wlfw_qdss_trace_mem_info_resp_msg_v01 *resp; 2248 struct qmi_txn txn; 2249 struct cnss_fw_mem *qdss_mem = plat_priv->qdss_mem; 2250 int ret = 0; 2251 int i; 2252 2253 cnss_pr_dbg("Sending QDSS trace mem info, state: 0x%lx\n", 2254 plat_priv->driver_state); 2255 2256 req = kzalloc(sizeof(*req), GFP_KERNEL); 2257 if (!req) 2258 return -ENOMEM; 2259 2260 resp = kzalloc(sizeof(*resp), GFP_KERNEL); 2261 if (!resp) { 2262 kfree(req); 2263 return -ENOMEM; 2264 } 2265 2266 if (plat_priv->qdss_mem_seg_len > QMI_WLFW_MAX_NUM_MEM_SEG_V01) { 2267 cnss_pr_err("Invalid seg len %u\n", plat_priv->qdss_mem_seg_len); 2268 ret = -EINVAL; 2269 goto out; 2270 } 2271 2272 req->mem_seg_len = plat_priv->qdss_mem_seg_len; 2273 for (i = 0; i < req->mem_seg_len; i++) { 2274 cnss_pr_dbg("Memory for FW, va: 0x%pK, pa: %pa, size: 0x%zx, type: %u\n", 2275 qdss_mem[i].va, &qdss_mem[i].pa, 2276 qdss_mem[i].size, qdss_mem[i].type); 2277 2278 req->mem_seg[i].addr = qdss_mem[i].pa; 2279 req->mem_seg[i].size = qdss_mem[i].size; 2280 req->mem_seg[i].type = qdss_mem[i].type; 2281 } 2282 2283 ret = qmi_txn_init(&plat_priv->qmi_wlfw, &txn, 2284 wlfw_qdss_trace_mem_info_resp_msg_v01_ei, resp); 2285 if (ret < 0) { 2286 cnss_pr_err("Fail to initialize txn for QDSS trace mem request: err %d\n", 2287 ret); 2288 goto out; 2289 } 2290 2291 ret = qmi_send_request(&plat_priv->qmi_wlfw, NULL, &txn, 2292 QMI_WLFW_QDSS_TRACE_MEM_INFO_REQ_V01, 2293 WLFW_QDSS_TRACE_MEM_INFO_REQ_MSG_V01_MAX_MSG_LEN, 2294 wlfw_qdss_trace_mem_info_req_msg_v01_ei, req); 2295 if (ret < 0) { 2296 qmi_txn_cancel(&txn); 2297 cnss_pr_err("Fail to send QDSS trace mem info request: err %d\n", 2298 ret); 2299 goto out; 2300 } 2301 2302 ret = qmi_txn_wait(&txn, QMI_WLFW_TIMEOUT_JF); 2303 if (ret < 0) { 2304 cnss_pr_err("Fail to wait for response of QDSS trace mem info request, err %d\n", 2305 ret); 2306 goto out; 2307 } 2308 2309 if (resp->resp.result != QMI_RESULT_SUCCESS_V01) { 2310 cnss_pr_err("QDSS trace mem info request failed, result: %d, err: %d\n", 2311 resp->resp.result, resp->resp.error); 2312 ret = -resp->resp.result; 2313 goto out; 2314 } 2315 2316 kfree(req); 2317 kfree(resp); 2318 return 0; 2319 2320 out: 2321 kfree(req); 2322 kfree(resp); 2323 return ret; 2324 } 2325 2326 int cnss_wlfw_send_host_wfc_call_status(struct cnss_plat_data *plat_priv, 2327 struct cnss_wfc_cfg cfg) 2328 { 2329 struct wlfw_wfc_call_status_req_msg_v01 *req; 2330 struct wlfw_wfc_call_status_resp_msg_v01 *resp; 2331 struct qmi_txn txn; 2332 int ret = 0; 2333 2334 if (!test_bit(CNSS_FW_READY, &plat_priv->driver_state)) { 2335 cnss_pr_err("Drop host WFC indication as FW not initialized\n"); 2336 return -EINVAL; 2337 } 2338 req = kzalloc(sizeof(*req), GFP_KERNEL); 2339 if (!req) 2340 return -ENOMEM; 2341 2342 resp = kzalloc(sizeof(*resp), GFP_KERNEL); 2343 if (!resp) { 2344 kfree(req); 2345 return -ENOMEM; 2346 } 2347 2348 req->wfc_call_active_valid = 1; 2349 req->wfc_call_active = cfg.mode; 2350 2351 cnss_pr_dbg("CNSS->FW: WFC_CALL_REQ: state: 0x%lx\n", 2352 plat_priv->driver_state); 2353 2354 ret = qmi_txn_init(&plat_priv->qmi_wlfw, &txn, 2355 wlfw_wfc_call_status_resp_msg_v01_ei, resp); 2356 if (ret < 0) { 2357 cnss_pr_err("CNSS->FW: WFC_CALL_REQ: QMI Txn Init: Err %d\n", 2358 ret); 2359 goto out; 2360 } 2361 2362 cnss_pr_dbg("Send WFC Mode: %d\n", cfg.mode); 2363 ret = qmi_send_request(&plat_priv->qmi_wlfw, NULL, &txn, 2364 QMI_WLFW_WFC_CALL_STATUS_REQ_V01, 2365 WLFW_WFC_CALL_STATUS_REQ_MSG_V01_MAX_MSG_LEN, 2366 wlfw_wfc_call_status_req_msg_v01_ei, req); 2367 if (ret < 0) { 2368 qmi_txn_cancel(&txn); 2369 cnss_pr_err("CNSS->FW: WFC_CALL_REQ: QMI Send Err: %d\n", 2370 ret); 2371 goto out; 2372 } 2373 2374 ret = qmi_txn_wait(&txn, QMI_WLFW_TIMEOUT_JF); 2375 if (ret < 0) { 2376 cnss_pr_err("FW->CNSS: WFC_CALL_RSP: QMI Wait Err: %d\n", 2377 ret); 2378 goto out; 2379 } 2380 2381 if (resp->resp.result != QMI_RESULT_SUCCESS_V01) { 2382 cnss_pr_err("FW->CNSS: WFC_CALL_RSP: Result: %d Err: %d\n", 2383 resp->resp.result, resp->resp.error); 2384 ret = -EINVAL; 2385 goto out; 2386 } 2387 ret = 0; 2388 out: 2389 kfree(req); 2390 kfree(resp); 2391 return ret; 2392 2393 } 2394 static int cnss_wlfw_wfc_call_status_send_sync 2395 (struct cnss_plat_data *plat_priv, 2396 const struct ims_private_service_wfc_call_status_ind_msg_v01 *ind_msg) 2397 { 2398 struct wlfw_wfc_call_status_req_msg_v01 *req; 2399 struct wlfw_wfc_call_status_resp_msg_v01 *resp; 2400 struct qmi_txn txn; 2401 int ret = 0; 2402 2403 if (!test_bit(CNSS_FW_READY, &plat_priv->driver_state)) { 2404 cnss_pr_err("Drop IMS WFC indication as FW not initialized\n"); 2405 return -EINVAL; 2406 } 2407 req = kzalloc(sizeof(*req), GFP_KERNEL); 2408 if (!req) 2409 return -ENOMEM; 2410 2411 resp = kzalloc(sizeof(*resp), GFP_KERNEL); 2412 if (!resp) { 2413 kfree(req); 2414 return -ENOMEM; 2415 } 2416 2417 /** 2418 * WFC Call r1 design has CNSS as pass thru using opaque hex buffer. 2419 * But in r2 update QMI structure is expanded and as an effect qmi 2420 * decoded structures have padding. Thus we cannot use buffer design. 2421 * For backward compatibility for r1 design copy only wfc_call_active 2422 * value in hex buffer. 2423 */ 2424 req->wfc_call_status_len = sizeof(ind_msg->wfc_call_active); 2425 req->wfc_call_status[0] = ind_msg->wfc_call_active; 2426 2427 /* wfc_call_active is mandatory in IMS indication */ 2428 req->wfc_call_active_valid = 1; 2429 req->wfc_call_active = ind_msg->wfc_call_active; 2430 req->all_wfc_calls_held_valid = ind_msg->all_wfc_calls_held_valid; 2431 req->all_wfc_calls_held = ind_msg->all_wfc_calls_held; 2432 req->is_wfc_emergency_valid = ind_msg->is_wfc_emergency_valid; 2433 req->is_wfc_emergency = ind_msg->is_wfc_emergency; 2434 req->twt_ims_start_valid = ind_msg->twt_ims_start_valid; 2435 req->twt_ims_start = ind_msg->twt_ims_start; 2436 req->twt_ims_int_valid = ind_msg->twt_ims_int_valid; 2437 req->twt_ims_int = ind_msg->twt_ims_int; 2438 req->media_quality_valid = ind_msg->media_quality_valid; 2439 req->media_quality = 2440 (enum wlfw_wfc_media_quality_v01)ind_msg->media_quality; 2441 2442 cnss_pr_dbg("CNSS->FW: WFC_CALL_REQ: state: 0x%lx\n", 2443 plat_priv->driver_state); 2444 2445 ret = qmi_txn_init(&plat_priv->qmi_wlfw, &txn, 2446 wlfw_wfc_call_status_resp_msg_v01_ei, resp); 2447 if (ret < 0) { 2448 cnss_pr_err("CNSS->FW: WFC_CALL_REQ: QMI Txn Init: Err %d\n", 2449 ret); 2450 goto out; 2451 } 2452 2453 ret = qmi_send_request(&plat_priv->qmi_wlfw, NULL, &txn, 2454 QMI_WLFW_WFC_CALL_STATUS_REQ_V01, 2455 WLFW_WFC_CALL_STATUS_REQ_MSG_V01_MAX_MSG_LEN, 2456 wlfw_wfc_call_status_req_msg_v01_ei, req); 2457 if (ret < 0) { 2458 qmi_txn_cancel(&txn); 2459 cnss_pr_err("CNSS->FW: WFC_CALL_REQ: QMI Send Err: %d\n", 2460 ret); 2461 goto out; 2462 } 2463 2464 ret = qmi_txn_wait(&txn, QMI_WLFW_TIMEOUT_JF); 2465 if (ret < 0) { 2466 cnss_pr_err("FW->CNSS: WFC_CALL_RSP: QMI Wait Err: %d\n", 2467 ret); 2468 goto out; 2469 } 2470 2471 if (resp->resp.result != QMI_RESULT_SUCCESS_V01) { 2472 cnss_pr_err("FW->CNSS: WFC_CALL_RSP: Result: %d Err: %d\n", 2473 resp->resp.result, resp->resp.error); 2474 ret = -resp->resp.result; 2475 goto out; 2476 } 2477 ret = 0; 2478 out: 2479 kfree(req); 2480 kfree(resp); 2481 return ret; 2482 } 2483 2484 int cnss_wlfw_dynamic_feature_mask_send_sync(struct cnss_plat_data *plat_priv) 2485 { 2486 struct wlfw_dynamic_feature_mask_req_msg_v01 *req; 2487 struct wlfw_dynamic_feature_mask_resp_msg_v01 *resp; 2488 struct qmi_txn txn; 2489 int ret = 0; 2490 2491 cnss_pr_dbg("Sending dynamic feature mask 0x%llx, state: 0x%lx\n", 2492 plat_priv->dynamic_feature, 2493 plat_priv->driver_state); 2494 2495 req = kzalloc(sizeof(*req), GFP_KERNEL); 2496 if (!req) 2497 return -ENOMEM; 2498 2499 resp = kzalloc(sizeof(*resp), GFP_KERNEL); 2500 if (!resp) { 2501 kfree(req); 2502 return -ENOMEM; 2503 } 2504 2505 req->mask_valid = 1; 2506 req->mask = plat_priv->dynamic_feature; 2507 2508 ret = qmi_txn_init(&plat_priv->qmi_wlfw, &txn, 2509 wlfw_dynamic_feature_mask_resp_msg_v01_ei, resp); 2510 if (ret < 0) { 2511 cnss_pr_err("Fail to initialize txn for dynamic feature mask request: err %d\n", 2512 ret); 2513 goto out; 2514 } 2515 2516 ret = qmi_send_request 2517 (&plat_priv->qmi_wlfw, NULL, &txn, 2518 QMI_WLFW_DYNAMIC_FEATURE_MASK_REQ_V01, 2519 WLFW_DYNAMIC_FEATURE_MASK_REQ_MSG_V01_MAX_MSG_LEN, 2520 wlfw_dynamic_feature_mask_req_msg_v01_ei, req); 2521 if (ret < 0) { 2522 qmi_txn_cancel(&txn); 2523 cnss_pr_err("Fail to send dynamic feature mask request: err %d\n", 2524 ret); 2525 goto out; 2526 } 2527 2528 ret = qmi_txn_wait(&txn, QMI_WLFW_TIMEOUT_JF); 2529 if (ret < 0) { 2530 cnss_pr_err("Fail to wait for response of dynamic feature mask request, err %d\n", 2531 ret); 2532 goto out; 2533 } 2534 2535 if (resp->resp.result != QMI_RESULT_SUCCESS_V01) { 2536 cnss_pr_err("Dynamic feature mask request failed, result: %d, err: %d\n", 2537 resp->resp.result, resp->resp.error); 2538 ret = -resp->resp.result; 2539 goto out; 2540 } 2541 2542 out: 2543 kfree(req); 2544 kfree(resp); 2545 return ret; 2546 } 2547 2548 int cnss_wlfw_get_info_send_sync(struct cnss_plat_data *plat_priv, int type, 2549 void *cmd, int cmd_len) 2550 { 2551 struct wlfw_get_info_req_msg_v01 *req; 2552 struct wlfw_get_info_resp_msg_v01 *resp; 2553 struct qmi_txn txn; 2554 int ret = 0; 2555 2556 cnss_pr_buf("Sending get info message, type: %d, cmd length: %d, state: 0x%lx\n", 2557 type, cmd_len, plat_priv->driver_state); 2558 2559 if (cmd_len > QMI_WLFW_MAX_DATA_SIZE_V01) 2560 return -EINVAL; 2561 2562 req = kzalloc(sizeof(*req), GFP_KERNEL); 2563 if (!req) 2564 return -ENOMEM; 2565 2566 resp = kzalloc(sizeof(*resp), GFP_KERNEL); 2567 if (!resp) { 2568 kfree(req); 2569 return -ENOMEM; 2570 } 2571 2572 req->type = type; 2573 req->data_len = cmd_len; 2574 memcpy(req->data, cmd, req->data_len); 2575 2576 ret = qmi_txn_init(&plat_priv->qmi_wlfw, &txn, 2577 wlfw_get_info_resp_msg_v01_ei, resp); 2578 if (ret < 0) { 2579 cnss_pr_err("Failed to initialize txn for get info request, err: %d\n", 2580 ret); 2581 goto out; 2582 } 2583 2584 ret = qmi_send_request(&plat_priv->qmi_wlfw, NULL, &txn, 2585 QMI_WLFW_GET_INFO_REQ_V01, 2586 WLFW_GET_INFO_REQ_MSG_V01_MAX_MSG_LEN, 2587 wlfw_get_info_req_msg_v01_ei, req); 2588 if (ret < 0) { 2589 qmi_txn_cancel(&txn); 2590 cnss_pr_err("Failed to send get info request, err: %d\n", 2591 ret); 2592 goto out; 2593 } 2594 2595 ret = qmi_txn_wait(&txn, QMI_WLFW_TIMEOUT_JF); 2596 if (ret < 0) { 2597 cnss_pr_err("Failed to wait for response of get info request, err: %d\n", 2598 ret); 2599 goto out; 2600 } 2601 2602 if (resp->resp.result != QMI_RESULT_SUCCESS_V01) { 2603 cnss_pr_err("Get info request failed, result: %d, err: %d\n", 2604 resp->resp.result, resp->resp.error); 2605 ret = -resp->resp.result; 2606 goto out; 2607 } 2608 2609 kfree(req); 2610 kfree(resp); 2611 return 0; 2612 2613 out: 2614 kfree(req); 2615 kfree(resp); 2616 return ret; 2617 } 2618 2619 unsigned int cnss_get_qmi_timeout(struct cnss_plat_data *plat_priv) 2620 { 2621 return QMI_WLFW_TIMEOUT_MS; 2622 } 2623 2624 static void cnss_wlfw_request_mem_ind_cb(struct qmi_handle *qmi_wlfw, 2625 struct sockaddr_qrtr *sq, 2626 struct qmi_txn *txn, const void *data) 2627 { 2628 struct cnss_plat_data *plat_priv = 2629 container_of(qmi_wlfw, struct cnss_plat_data, qmi_wlfw); 2630 const struct wlfw_request_mem_ind_msg_v01 *ind_msg = data; 2631 int i; 2632 2633 cnss_pr_dbg("Received QMI WLFW request memory indication\n"); 2634 2635 if (!txn) { 2636 cnss_pr_err("Spurious indication\n"); 2637 return; 2638 } 2639 2640 if (ind_msg->mem_seg_len > QMI_WLFW_MAX_NUM_MEM_SEG_V01) { 2641 cnss_pr_err("Invalid seg len %u\n", ind_msg->mem_seg_len); 2642 return; 2643 } 2644 2645 plat_priv->fw_mem_seg_len = ind_msg->mem_seg_len; 2646 for (i = 0; i < plat_priv->fw_mem_seg_len; i++) { 2647 cnss_pr_dbg("FW requests for memory, size: 0x%x, type: %u\n", 2648 ind_msg->mem_seg[i].size, ind_msg->mem_seg[i].type); 2649 plat_priv->fw_mem[i].type = ind_msg->mem_seg[i].type; 2650 plat_priv->fw_mem[i].size = ind_msg->mem_seg[i].size; 2651 if (!plat_priv->fw_mem[i].va && 2652 plat_priv->fw_mem[i].type == CNSS_MEM_TYPE_DDR) 2653 plat_priv->fw_mem[i].attrs |= 2654 DMA_ATTR_FORCE_CONTIGUOUS; 2655 if (plat_priv->fw_mem[i].type == CNSS_MEM_CAL_V01) 2656 plat_priv->cal_mem = &plat_priv->fw_mem[i]; 2657 } 2658 2659 cnss_driver_event_post(plat_priv, CNSS_DRIVER_EVENT_REQUEST_MEM, 2660 0, NULL); 2661 } 2662 2663 static void cnss_wlfw_fw_mem_ready_ind_cb(struct qmi_handle *qmi_wlfw, 2664 struct sockaddr_qrtr *sq, 2665 struct qmi_txn *txn, const void *data) 2666 { 2667 struct cnss_plat_data *plat_priv = 2668 container_of(qmi_wlfw, struct cnss_plat_data, qmi_wlfw); 2669 2670 cnss_pr_dbg("Received QMI WLFW FW memory ready indication\n"); 2671 2672 if (!txn) { 2673 cnss_pr_err("Spurious indication\n"); 2674 return; 2675 } 2676 2677 cnss_driver_event_post(plat_priv, CNSS_DRIVER_EVENT_FW_MEM_READY, 2678 0, NULL); 2679 } 2680 2681 /** 2682 * cnss_wlfw_fw_ready_ind_cb: FW ready indication handler (Helium arch) 2683 * 2684 * This event is not required for HST/ HSP as FW calibration done is 2685 * provided in QMI_WLFW_CAL_DONE_IND_V01 2686 */ 2687 static void cnss_wlfw_fw_ready_ind_cb(struct qmi_handle *qmi_wlfw, 2688 struct sockaddr_qrtr *sq, 2689 struct qmi_txn *txn, const void *data) 2690 { 2691 struct cnss_plat_data *plat_priv = 2692 container_of(qmi_wlfw, struct cnss_plat_data, qmi_wlfw); 2693 struct cnss_cal_info *cal_info; 2694 2695 if (!txn) { 2696 cnss_pr_err("Spurious indication\n"); 2697 return; 2698 } 2699 2700 if (plat_priv->device_id == QCA6390_DEVICE_ID || 2701 plat_priv->device_id == QCA6490_DEVICE_ID) { 2702 cnss_pr_dbg("Ignore FW Ready Indication for HST/HSP"); 2703 return; 2704 } 2705 2706 cnss_pr_dbg("Received QMI WLFW FW ready indication.\n"); 2707 cal_info = kzalloc(sizeof(*cal_info), GFP_KERNEL); 2708 if (!cal_info) 2709 return; 2710 2711 cal_info->cal_status = CNSS_CAL_DONE; 2712 cnss_driver_event_post(plat_priv, CNSS_DRIVER_EVENT_COLD_BOOT_CAL_DONE, 2713 0, cal_info); 2714 } 2715 2716 static void cnss_wlfw_fw_init_done_ind_cb(struct qmi_handle *qmi_wlfw, 2717 struct sockaddr_qrtr *sq, 2718 struct qmi_txn *txn, const void *data) 2719 { 2720 struct cnss_plat_data *plat_priv = 2721 container_of(qmi_wlfw, struct cnss_plat_data, qmi_wlfw); 2722 2723 cnss_pr_dbg("Received QMI WLFW FW initialization done indication\n"); 2724 2725 if (!txn) { 2726 cnss_pr_err("Spurious indication\n"); 2727 return; 2728 } 2729 2730 cnss_driver_event_post(plat_priv, CNSS_DRIVER_EVENT_FW_READY, 0, NULL); 2731 } 2732 2733 static void cnss_wlfw_pin_result_ind_cb(struct qmi_handle *qmi_wlfw, 2734 struct sockaddr_qrtr *sq, 2735 struct qmi_txn *txn, const void *data) 2736 { 2737 struct cnss_plat_data *plat_priv = 2738 container_of(qmi_wlfw, struct cnss_plat_data, qmi_wlfw); 2739 const struct wlfw_pin_connect_result_ind_msg_v01 *ind_msg = data; 2740 2741 cnss_pr_dbg("Received QMI WLFW pin connect result indication\n"); 2742 2743 if (!txn) { 2744 cnss_pr_err("Spurious indication\n"); 2745 return; 2746 } 2747 2748 if (ind_msg->pwr_pin_result_valid) 2749 plat_priv->pin_result.fw_pwr_pin_result = 2750 ind_msg->pwr_pin_result; 2751 if (ind_msg->phy_io_pin_result_valid) 2752 plat_priv->pin_result.fw_phy_io_pin_result = 2753 ind_msg->phy_io_pin_result; 2754 if (ind_msg->rf_pin_result_valid) 2755 plat_priv->pin_result.fw_rf_pin_result = ind_msg->rf_pin_result; 2756 2757 cnss_pr_dbg("Pin connect Result: pwr_pin: 0x%x phy_io_pin: 0x%x rf_io_pin: 0x%x\n", 2758 ind_msg->pwr_pin_result, ind_msg->phy_io_pin_result, 2759 ind_msg->rf_pin_result); 2760 } 2761 2762 int cnss_wlfw_cal_report_req_send_sync(struct cnss_plat_data *plat_priv, 2763 u32 cal_file_download_size) 2764 { 2765 struct wlfw_cal_report_req_msg_v01 req = {0}; 2766 struct wlfw_cal_report_resp_msg_v01 resp = {0}; 2767 struct qmi_txn txn; 2768 int ret = 0; 2769 2770 cnss_pr_dbg("Sending cal file report request. File size: %d, state: 0x%lx\n", 2771 cal_file_download_size, plat_priv->driver_state); 2772 req.cal_file_download_size_valid = 1; 2773 req.cal_file_download_size = cal_file_download_size; 2774 2775 ret = qmi_txn_init(&plat_priv->qmi_wlfw, &txn, 2776 wlfw_cal_report_resp_msg_v01_ei, &resp); 2777 if (ret < 0) { 2778 cnss_pr_err("Failed to initialize txn for Cal Report request, err: %d\n", 2779 ret); 2780 goto out; 2781 } 2782 ret = qmi_send_request(&plat_priv->qmi_wlfw, NULL, &txn, 2783 QMI_WLFW_CAL_REPORT_REQ_V01, 2784 WLFW_CAL_REPORT_REQ_MSG_V01_MAX_MSG_LEN, 2785 wlfw_cal_report_req_msg_v01_ei, &req); 2786 if (ret < 0) { 2787 qmi_txn_cancel(&txn); 2788 cnss_pr_err("Failed to send Cal Report request, err: %d\n", 2789 ret); 2790 goto out; 2791 } 2792 ret = qmi_txn_wait(&txn, QMI_WLFW_TIMEOUT_JF); 2793 if (ret < 0) { 2794 cnss_pr_err("Failed to wait for response of Cal Report request, err: %d\n", 2795 ret); 2796 goto out; 2797 } 2798 if (resp.resp.result != QMI_RESULT_SUCCESS_V01) { 2799 cnss_pr_err("Cal Report request failed, result: %d, err: %d\n", 2800 resp.resp.result, resp.resp.error); 2801 ret = -resp.resp.result; 2802 goto out; 2803 } 2804 out: 2805 return ret; 2806 } 2807 2808 static void cnss_wlfw_cal_done_ind_cb(struct qmi_handle *qmi_wlfw, 2809 struct sockaddr_qrtr *sq, 2810 struct qmi_txn *txn, const void *data) 2811 { 2812 struct cnss_plat_data *plat_priv = 2813 container_of(qmi_wlfw, struct cnss_plat_data, qmi_wlfw); 2814 const struct wlfw_cal_done_ind_msg_v01 *ind = data; 2815 struct cnss_cal_info *cal_info; 2816 2817 cnss_pr_dbg("Received Cal done indication. File size: %d\n", 2818 ind->cal_file_upload_size); 2819 cnss_pr_info("Calibration took %d ms\n", 2820 jiffies_to_msecs(jiffies - plat_priv->cal_time)); 2821 if (!txn) { 2822 cnss_pr_err("Spurious indication\n"); 2823 return; 2824 } 2825 if (ind->cal_file_upload_size_valid) 2826 plat_priv->cal_file_size = ind->cal_file_upload_size; 2827 cal_info = kzalloc(sizeof(*cal_info), GFP_KERNEL); 2828 if (!cal_info) 2829 return; 2830 2831 cal_info->cal_status = CNSS_CAL_DONE; 2832 cnss_driver_event_post(plat_priv, CNSS_DRIVER_EVENT_COLD_BOOT_CAL_DONE, 2833 0, cal_info); 2834 } 2835 2836 static void cnss_wlfw_qdss_trace_req_mem_ind_cb(struct qmi_handle *qmi_wlfw, 2837 struct sockaddr_qrtr *sq, 2838 struct qmi_txn *txn, 2839 const void *data) 2840 { 2841 struct cnss_plat_data *plat_priv = 2842 container_of(qmi_wlfw, struct cnss_plat_data, qmi_wlfw); 2843 const struct wlfw_qdss_trace_req_mem_ind_msg_v01 *ind_msg = data; 2844 int i; 2845 2846 cnss_pr_dbg("Received QMI WLFW QDSS trace request mem indication\n"); 2847 2848 if (!txn) { 2849 cnss_pr_err("Spurious indication\n"); 2850 return; 2851 } 2852 2853 if (plat_priv->qdss_mem_seg_len) { 2854 cnss_pr_err("Ignore double allocation for QDSS trace, current len %u\n", 2855 plat_priv->qdss_mem_seg_len); 2856 return; 2857 } 2858 2859 if (ind_msg->mem_seg_len > QMI_WLFW_MAX_NUM_MEM_SEG_V01) { 2860 cnss_pr_err("Invalid seg len %u\n", ind_msg->mem_seg_len); 2861 return; 2862 } 2863 2864 plat_priv->qdss_mem_seg_len = ind_msg->mem_seg_len; 2865 for (i = 0; i < plat_priv->qdss_mem_seg_len; i++) { 2866 cnss_pr_dbg("QDSS requests for memory, size: 0x%x, type: %u\n", 2867 ind_msg->mem_seg[i].size, ind_msg->mem_seg[i].type); 2868 plat_priv->qdss_mem[i].type = ind_msg->mem_seg[i].type; 2869 plat_priv->qdss_mem[i].size = ind_msg->mem_seg[i].size; 2870 } 2871 2872 cnss_driver_event_post(plat_priv, CNSS_DRIVER_EVENT_QDSS_TRACE_REQ_MEM, 2873 0, NULL); 2874 } 2875 2876 /** 2877 * cnss_wlfw_fw_mem_file_save_ind_cb: Save given FW mem to filesystem 2878 * 2879 * QDSS_TRACE_SAVE_IND feature is overloaded to provide any host allocated 2880 * fw memory segment for dumping to file system. Only one type of mem can be 2881 * saved per indication and is provided in mem seg index 0. 2882 * 2883 * Return: None 2884 */ 2885 static void cnss_wlfw_fw_mem_file_save_ind_cb(struct qmi_handle *qmi_wlfw, 2886 struct sockaddr_qrtr *sq, 2887 struct qmi_txn *txn, 2888 const void *data) 2889 { 2890 struct cnss_plat_data *plat_priv = 2891 container_of(qmi_wlfw, struct cnss_plat_data, qmi_wlfw); 2892 const struct wlfw_qdss_trace_save_ind_msg_v01 *ind_msg = data; 2893 struct cnss_qmi_event_fw_mem_file_save_data *event_data; 2894 int i = 0; 2895 2896 if (!txn || !data) { 2897 cnss_pr_err("Spurious indication\n"); 2898 return; 2899 } 2900 cnss_pr_dbg_buf("QMI fw_mem_file_save: source: %d mem_seg: %d type: %u len: %u\n", 2901 ind_msg->source, ind_msg->mem_seg_valid, 2902 ind_msg->mem_seg[0].type, ind_msg->mem_seg_len); 2903 2904 event_data = kzalloc(sizeof(*event_data), GFP_KERNEL); 2905 if (!event_data) 2906 return; 2907 2908 event_data->mem_type = ind_msg->mem_seg[0].type; 2909 event_data->mem_seg_len = ind_msg->mem_seg_len; 2910 event_data->total_size = ind_msg->total_size; 2911 2912 if (ind_msg->mem_seg_valid) { 2913 if (ind_msg->mem_seg_len > QMI_WLFW_MAX_STR_LEN_V01) { 2914 cnss_pr_err("Invalid seg len indication\n"); 2915 goto free_event_data; 2916 } 2917 for (i = 0; i < ind_msg->mem_seg_len; i++) { 2918 event_data->mem_seg[i].addr = ind_msg->mem_seg[i].addr; 2919 event_data->mem_seg[i].size = ind_msg->mem_seg[i].size; 2920 if (event_data->mem_type != ind_msg->mem_seg[i].type) { 2921 cnss_pr_err("FW Mem file save ind cannot have multiple mem types\n"); 2922 goto free_event_data; 2923 } 2924 cnss_pr_dbg_buf("seg-%d: addr 0x%llx size 0x%x\n", 2925 i, ind_msg->mem_seg[i].addr, 2926 ind_msg->mem_seg[i].size); 2927 } 2928 } 2929 2930 if (ind_msg->file_name_valid) 2931 strlcpy(event_data->file_name, ind_msg->file_name, 2932 QMI_WLFW_MAX_STR_LEN_V01 + 1); 2933 if (ind_msg->source == 1) { 2934 if (!ind_msg->file_name_valid) 2935 strlcpy(event_data->file_name, "qdss_trace_wcss_etb", 2936 QMI_WLFW_MAX_STR_LEN_V01 + 1); 2937 cnss_driver_event_post(plat_priv, CNSS_DRIVER_EVENT_QDSS_TRACE_REQ_DATA, 2938 0, event_data); 2939 } else { 2940 if (event_data->mem_type == QMI_WLFW_MEM_QDSS_V01) { 2941 if (!ind_msg->file_name_valid) 2942 strlcpy(event_data->file_name, "qdss_trace_ddr", 2943 QMI_WLFW_MAX_STR_LEN_V01 + 1); 2944 } else { 2945 if (!ind_msg->file_name_valid) 2946 strlcpy(event_data->file_name, "fw_mem_dump", 2947 QMI_WLFW_MAX_STR_LEN_V01 + 1); 2948 } 2949 2950 cnss_driver_event_post(plat_priv, CNSS_DRIVER_EVENT_FW_MEM_FILE_SAVE, 2951 0, event_data); 2952 } 2953 2954 return; 2955 2956 free_event_data: 2957 kfree(event_data); 2958 } 2959 2960 static void cnss_wlfw_qdss_trace_free_ind_cb(struct qmi_handle *qmi_wlfw, 2961 struct sockaddr_qrtr *sq, 2962 struct qmi_txn *txn, 2963 const void *data) 2964 { 2965 struct cnss_plat_data *plat_priv = 2966 container_of(qmi_wlfw, struct cnss_plat_data, qmi_wlfw); 2967 2968 cnss_driver_event_post(plat_priv, CNSS_DRIVER_EVENT_QDSS_TRACE_FREE, 2969 0, NULL); 2970 } 2971 2972 static void cnss_wlfw_respond_get_info_ind_cb(struct qmi_handle *qmi_wlfw, 2973 struct sockaddr_qrtr *sq, 2974 struct qmi_txn *txn, 2975 const void *data) 2976 { 2977 struct cnss_plat_data *plat_priv = 2978 container_of(qmi_wlfw, struct cnss_plat_data, qmi_wlfw); 2979 const struct wlfw_respond_get_info_ind_msg_v01 *ind_msg = data; 2980 2981 cnss_pr_buf("Received QMI WLFW respond get info indication\n"); 2982 2983 if (!txn) { 2984 cnss_pr_err("Spurious indication\n"); 2985 return; 2986 } 2987 2988 cnss_pr_buf("Extract message with event length: %d, type: %d, is last: %d, seq no: %d\n", 2989 ind_msg->data_len, ind_msg->type, 2990 ind_msg->is_last, ind_msg->seq_no); 2991 2992 if (plat_priv->get_info_cb_ctx && plat_priv->get_info_cb) 2993 plat_priv->get_info_cb(plat_priv->get_info_cb_ctx, 2994 (void *)ind_msg->data, 2995 ind_msg->data_len); 2996 } 2997 2998 static int cnss_ims_wfc_call_twt_cfg_send_sync 2999 (struct cnss_plat_data *plat_priv, 3000 const struct wlfw_wfc_call_twt_config_ind_msg_v01 *ind_msg) 3001 { 3002 struct ims_private_service_wfc_call_twt_config_req_msg_v01 *req; 3003 struct ims_private_service_wfc_call_twt_config_rsp_msg_v01 *resp; 3004 struct qmi_txn txn; 3005 int ret = 0; 3006 3007 if (!test_bit(CNSS_IMS_CONNECTED, &plat_priv->driver_state)) { 3008 cnss_pr_err("Drop FW WFC indication as IMS QMI not connected\n"); 3009 return -EINVAL; 3010 } 3011 3012 req = kzalloc(sizeof(*req), GFP_KERNEL); 3013 if (!req) 3014 return -ENOMEM; 3015 3016 resp = kzalloc(sizeof(*resp), GFP_KERNEL); 3017 if (!resp) { 3018 kfree(req); 3019 return -ENOMEM; 3020 } 3021 3022 req->twt_sta_start_valid = ind_msg->twt_sta_start_valid; 3023 req->twt_sta_start = ind_msg->twt_sta_start; 3024 req->twt_sta_int_valid = ind_msg->twt_sta_int_valid; 3025 req->twt_sta_int = ind_msg->twt_sta_int; 3026 req->twt_sta_upo_valid = ind_msg->twt_sta_upo_valid; 3027 req->twt_sta_upo = ind_msg->twt_sta_upo; 3028 req->twt_sta_sp_valid = ind_msg->twt_sta_sp_valid; 3029 req->twt_sta_sp = ind_msg->twt_sta_sp; 3030 req->twt_sta_dl_valid = req->twt_sta_dl_valid; 3031 req->twt_sta_dl = req->twt_sta_dl; 3032 req->twt_sta_config_changed_valid = 3033 ind_msg->twt_sta_config_changed_valid; 3034 req->twt_sta_config_changed = ind_msg->twt_sta_config_changed; 3035 3036 cnss_pr_dbg("CNSS->IMS: TWT_CFG_REQ: state: 0x%lx\n", 3037 plat_priv->driver_state); 3038 3039 ret = 3040 qmi_txn_init(&plat_priv->ims_qmi, &txn, 3041 ims_private_service_wfc_call_twt_config_rsp_msg_v01_ei, 3042 resp); 3043 if (ret < 0) { 3044 cnss_pr_err("CNSS->IMS: TWT_CFG_REQ: QMI Txn Init Err: %d\n", 3045 ret); 3046 goto out; 3047 } 3048 3049 ret = 3050 qmi_send_request(&plat_priv->ims_qmi, NULL, &txn, 3051 QMI_IMS_PRIVATE_SERVICE_WFC_CALL_TWT_CONFIG_REQ_V01, 3052 IMS_PRIVATE_SERVICE_WFC_CALL_TWT_CONFIG_REQ_MSG_V01_MAX_MSG_LEN, 3053 ims_private_service_wfc_call_twt_config_req_msg_v01_ei, req); 3054 if (ret < 0) { 3055 qmi_txn_cancel(&txn); 3056 cnss_pr_err("CNSS->IMS: TWT_CFG_REQ: QMI Send Err: %d\n", ret); 3057 goto out; 3058 } 3059 3060 ret = qmi_txn_wait(&txn, QMI_WLFW_TIMEOUT_JF); 3061 if (ret < 0) { 3062 cnss_pr_err("IMS->CNSS: TWT_CFG_RSP: QMI Wait Err: %d\n", ret); 3063 goto out; 3064 } 3065 3066 if (resp->resp.result != QMI_RESULT_SUCCESS_V01) { 3067 cnss_pr_err("IMS->CNSS: TWT_CFG_RSP: Result: %d Err: %d\n", 3068 resp->resp.result, resp->resp.error); 3069 ret = -resp->resp.result; 3070 goto out; 3071 } 3072 ret = 0; 3073 out: 3074 kfree(req); 3075 kfree(resp); 3076 return ret; 3077 } 3078 3079 int cnss_process_twt_cfg_ind_event(struct cnss_plat_data *plat_priv, 3080 void *data) 3081 { 3082 int ret; 3083 struct wlfw_wfc_call_twt_config_ind_msg_v01 *ind_msg = data; 3084 3085 ret = cnss_ims_wfc_call_twt_cfg_send_sync(plat_priv, ind_msg); 3086 kfree(data); 3087 return ret; 3088 } 3089 3090 static void cnss_wlfw_process_twt_cfg_ind(struct qmi_handle *qmi_wlfw, 3091 struct sockaddr_qrtr *sq, 3092 struct qmi_txn *txn, 3093 const void *data) 3094 { 3095 struct cnss_plat_data *plat_priv = 3096 container_of(qmi_wlfw, struct cnss_plat_data, qmi_wlfw); 3097 const struct wlfw_wfc_call_twt_config_ind_msg_v01 *ind_msg = data; 3098 struct wlfw_wfc_call_twt_config_ind_msg_v01 *event_data; 3099 3100 if (!txn) { 3101 cnss_pr_err("FW->CNSS: TWT_CFG_IND: Spurious indication\n"); 3102 return; 3103 } 3104 3105 if (!ind_msg) { 3106 cnss_pr_err("FW->CNSS: TWT_CFG_IND: Invalid indication\n"); 3107 return; 3108 } 3109 cnss_pr_dbg("FW->CNSS: TWT_CFG_IND: %x %llx, %x %x, %x %x, %x %x, %x %x, %x %x\n", 3110 ind_msg->twt_sta_start_valid, ind_msg->twt_sta_start, 3111 ind_msg->twt_sta_int_valid, ind_msg->twt_sta_int, 3112 ind_msg->twt_sta_upo_valid, ind_msg->twt_sta_upo, 3113 ind_msg->twt_sta_sp_valid, ind_msg->twt_sta_sp, 3114 ind_msg->twt_sta_dl_valid, ind_msg->twt_sta_dl, 3115 ind_msg->twt_sta_config_changed_valid, 3116 ind_msg->twt_sta_config_changed); 3117 3118 event_data = kmemdup(ind_msg, sizeof(*event_data), GFP_KERNEL); 3119 if (!event_data) 3120 return; 3121 cnss_driver_event_post(plat_priv, CNSS_DRIVER_EVENT_WLFW_TWT_CFG_IND, 0, 3122 event_data); 3123 } 3124 3125 static struct qmi_msg_handler qmi_wlfw_msg_handlers[] = { 3126 { 3127 .type = QMI_INDICATION, 3128 .msg_id = QMI_WLFW_REQUEST_MEM_IND_V01, 3129 .ei = wlfw_request_mem_ind_msg_v01_ei, 3130 .decoded_size = sizeof(struct wlfw_request_mem_ind_msg_v01), 3131 .fn = cnss_wlfw_request_mem_ind_cb 3132 }, 3133 { 3134 .type = QMI_INDICATION, 3135 .msg_id = QMI_WLFW_FW_MEM_READY_IND_V01, 3136 .ei = wlfw_fw_mem_ready_ind_msg_v01_ei, 3137 .decoded_size = sizeof(struct wlfw_fw_mem_ready_ind_msg_v01), 3138 .fn = cnss_wlfw_fw_mem_ready_ind_cb 3139 }, 3140 { 3141 .type = QMI_INDICATION, 3142 .msg_id = QMI_WLFW_FW_READY_IND_V01, 3143 .ei = wlfw_fw_ready_ind_msg_v01_ei, 3144 .decoded_size = sizeof(struct wlfw_fw_ready_ind_msg_v01), 3145 .fn = cnss_wlfw_fw_ready_ind_cb 3146 }, 3147 { 3148 .type = QMI_INDICATION, 3149 .msg_id = QMI_WLFW_FW_INIT_DONE_IND_V01, 3150 .ei = wlfw_fw_init_done_ind_msg_v01_ei, 3151 .decoded_size = sizeof(struct wlfw_fw_init_done_ind_msg_v01), 3152 .fn = cnss_wlfw_fw_init_done_ind_cb 3153 }, 3154 { 3155 .type = QMI_INDICATION, 3156 .msg_id = QMI_WLFW_PIN_CONNECT_RESULT_IND_V01, 3157 .ei = wlfw_pin_connect_result_ind_msg_v01_ei, 3158 .decoded_size = 3159 sizeof(struct wlfw_pin_connect_result_ind_msg_v01), 3160 .fn = cnss_wlfw_pin_result_ind_cb 3161 }, 3162 { 3163 .type = QMI_INDICATION, 3164 .msg_id = QMI_WLFW_CAL_DONE_IND_V01, 3165 .ei = wlfw_cal_done_ind_msg_v01_ei, 3166 .decoded_size = sizeof(struct wlfw_cal_done_ind_msg_v01), 3167 .fn = cnss_wlfw_cal_done_ind_cb 3168 }, 3169 { 3170 .type = QMI_INDICATION, 3171 .msg_id = QMI_WLFW_QDSS_TRACE_REQ_MEM_IND_V01, 3172 .ei = wlfw_qdss_trace_req_mem_ind_msg_v01_ei, 3173 .decoded_size = 3174 sizeof(struct wlfw_qdss_trace_req_mem_ind_msg_v01), 3175 .fn = cnss_wlfw_qdss_trace_req_mem_ind_cb 3176 }, 3177 { 3178 .type = QMI_INDICATION, 3179 .msg_id = QMI_WLFW_QDSS_TRACE_SAVE_IND_V01, 3180 .ei = wlfw_qdss_trace_save_ind_msg_v01_ei, 3181 .decoded_size = 3182 sizeof(struct wlfw_qdss_trace_save_ind_msg_v01), 3183 .fn = cnss_wlfw_fw_mem_file_save_ind_cb 3184 }, 3185 { 3186 .type = QMI_INDICATION, 3187 .msg_id = QMI_WLFW_QDSS_TRACE_FREE_IND_V01, 3188 .ei = wlfw_qdss_trace_free_ind_msg_v01_ei, 3189 .decoded_size = 3190 sizeof(struct wlfw_qdss_trace_free_ind_msg_v01), 3191 .fn = cnss_wlfw_qdss_trace_free_ind_cb 3192 }, 3193 { 3194 .type = QMI_INDICATION, 3195 .msg_id = QMI_WLFW_RESPOND_GET_INFO_IND_V01, 3196 .ei = wlfw_respond_get_info_ind_msg_v01_ei, 3197 .decoded_size = 3198 sizeof(struct wlfw_respond_get_info_ind_msg_v01), 3199 .fn = cnss_wlfw_respond_get_info_ind_cb 3200 }, 3201 { 3202 .type = QMI_INDICATION, 3203 .msg_id = QMI_WLFW_WFC_CALL_TWT_CONFIG_IND_V01, 3204 .ei = wlfw_wfc_call_twt_config_ind_msg_v01_ei, 3205 .decoded_size = 3206 sizeof(struct wlfw_wfc_call_twt_config_ind_msg_v01), 3207 .fn = cnss_wlfw_process_twt_cfg_ind 3208 }, 3209 {} 3210 }; 3211 3212 static int cnss_wlfw_connect_to_server(struct cnss_plat_data *plat_priv, 3213 void *data) 3214 { 3215 struct cnss_qmi_event_server_arrive_data *event_data = data; 3216 struct qmi_handle *qmi_wlfw = &plat_priv->qmi_wlfw; 3217 struct sockaddr_qrtr sq = { 0 }; 3218 int ret = 0; 3219 3220 if (!event_data) 3221 return -EINVAL; 3222 3223 sq.sq_family = AF_QIPCRTR; 3224 sq.sq_node = event_data->node; 3225 sq.sq_port = event_data->port; 3226 3227 ret = kernel_connect(qmi_wlfw->sock, (struct sockaddr *)&sq, 3228 sizeof(sq), 0); 3229 if (ret < 0) { 3230 cnss_pr_err("Failed to connect to QMI WLFW remote service port\n"); 3231 goto out; 3232 } 3233 3234 set_bit(CNSS_QMI_WLFW_CONNECTED, &plat_priv->driver_state); 3235 3236 cnss_pr_info("QMI WLFW service connected, state: 0x%lx\n", 3237 plat_priv->driver_state); 3238 3239 kfree(data); 3240 return 0; 3241 3242 out: 3243 CNSS_QMI_ASSERT(); 3244 kfree(data); 3245 return ret; 3246 } 3247 3248 int cnss_wlfw_server_arrive(struct cnss_plat_data *plat_priv, void *data) 3249 { 3250 int ret = 0; 3251 3252 if (!plat_priv) 3253 return -ENODEV; 3254 3255 if (test_bit(CNSS_QMI_WLFW_CONNECTED, &plat_priv->driver_state)) { 3256 cnss_pr_err("Unexpected WLFW server arrive\n"); 3257 CNSS_ASSERT(0); 3258 return -EINVAL; 3259 } 3260 3261 cnss_ignore_qmi_failure(false); 3262 3263 ret = cnss_wlfw_connect_to_server(plat_priv, data); 3264 if (ret < 0) 3265 goto out; 3266 3267 ret = cnss_wlfw_ind_register_send_sync(plat_priv); 3268 if (ret < 0) { 3269 if (ret == -EALREADY) 3270 ret = 0; 3271 goto out; 3272 } 3273 3274 ret = cnss_wlfw_host_cap_send_sync(plat_priv); 3275 if (ret < 0) 3276 goto out; 3277 3278 return 0; 3279 3280 out: 3281 return ret; 3282 } 3283 3284 int cnss_wlfw_server_exit(struct cnss_plat_data *plat_priv) 3285 { 3286 int ret; 3287 3288 if (!plat_priv) 3289 return -ENODEV; 3290 3291 clear_bit(CNSS_QMI_WLFW_CONNECTED, &plat_priv->driver_state); 3292 3293 cnss_pr_info("QMI WLFW service disconnected, state: 0x%lx\n", 3294 plat_priv->driver_state); 3295 3296 cnss_qmi_deinit(plat_priv); 3297 3298 clear_bit(CNSS_QMI_DEL_SERVER, &plat_priv->driver_state); 3299 3300 ret = cnss_qmi_init(plat_priv); 3301 if (ret < 0) { 3302 cnss_pr_err("QMI WLFW service registraton failed, ret\n", ret); 3303 CNSS_ASSERT(0); 3304 } 3305 return 0; 3306 } 3307 3308 static int wlfw_new_server(struct qmi_handle *qmi_wlfw, 3309 struct qmi_service *service) 3310 { 3311 struct cnss_plat_data *plat_priv = 3312 container_of(qmi_wlfw, struct cnss_plat_data, qmi_wlfw); 3313 struct cnss_qmi_event_server_arrive_data *event_data; 3314 3315 if (plat_priv && test_bit(CNSS_QMI_DEL_SERVER, &plat_priv->driver_state)) { 3316 cnss_pr_info("WLFW server delete in progress, Ignore server arrive, state: 0x%lx\n", 3317 plat_priv->driver_state); 3318 return 0; 3319 } 3320 3321 cnss_pr_dbg("WLFW server arriving: node %u port %u\n", 3322 service->node, service->port); 3323 3324 event_data = kzalloc(sizeof(*event_data), GFP_KERNEL); 3325 if (!event_data) 3326 return -ENOMEM; 3327 3328 event_data->node = service->node; 3329 event_data->port = service->port; 3330 3331 cnss_driver_event_post(plat_priv, CNSS_DRIVER_EVENT_SERVER_ARRIVE, 3332 0, event_data); 3333 3334 return 0; 3335 } 3336 3337 static void wlfw_del_server(struct qmi_handle *qmi_wlfw, 3338 struct qmi_service *service) 3339 { 3340 struct cnss_plat_data *plat_priv = 3341 container_of(qmi_wlfw, struct cnss_plat_data, qmi_wlfw); 3342 3343 if (plat_priv && test_bit(CNSS_QMI_DEL_SERVER, &plat_priv->driver_state)) { 3344 cnss_pr_info("WLFW server delete in progress, Ignore server delete, state: 0x%lx\n", 3345 plat_priv->driver_state); 3346 return; 3347 } 3348 3349 cnss_pr_dbg("WLFW server exiting\n"); 3350 3351 if (plat_priv) { 3352 cnss_ignore_qmi_failure(true); 3353 set_bit(CNSS_QMI_DEL_SERVER, &plat_priv->driver_state); 3354 } 3355 3356 cnss_driver_event_post(plat_priv, CNSS_DRIVER_EVENT_SERVER_EXIT, 3357 0, NULL); 3358 } 3359 3360 static struct qmi_ops qmi_wlfw_ops = { 3361 .new_server = wlfw_new_server, 3362 .del_server = wlfw_del_server, 3363 }; 3364 3365 static int cnss_qmi_add_lookup(struct cnss_plat_data *plat_priv) 3366 { 3367 unsigned int id = WLFW_SERVICE_INS_ID_V01; 3368 3369 /* In order to support dual wlan card attach case, 3370 * need separate qmi service instance id for each dev 3371 */ 3372 if (cnss_is_dual_wlan_enabled() && plat_priv->qrtr_node_id != 0 && 3373 plat_priv->wlfw_service_instance_id != 0) 3374 id = plat_priv->wlfw_service_instance_id; 3375 3376 return qmi_add_lookup(&plat_priv->qmi_wlfw, WLFW_SERVICE_ID_V01, 3377 WLFW_SERVICE_VERS_V01, id); 3378 } 3379 3380 int cnss_qmi_init(struct cnss_plat_data *plat_priv) 3381 { 3382 int ret = 0; 3383 cnss_get_qrtr_info(plat_priv); 3384 3385 ret = qmi_handle_init(&plat_priv->qmi_wlfw, 3386 QMI_WLFW_MAX_RECV_BUF_SIZE, 3387 &qmi_wlfw_ops, qmi_wlfw_msg_handlers); 3388 if (ret < 0) { 3389 cnss_pr_err("Failed to initialize WLFW QMI handle, err: %d\n", 3390 ret); 3391 goto out; 3392 } 3393 3394 ret = cnss_qmi_add_lookup(plat_priv); 3395 if (ret < 0) 3396 cnss_pr_err("Failed to add WLFW QMI lookup, err: %d\n", ret); 3397 3398 out: 3399 return ret; 3400 } 3401 3402 void cnss_qmi_deinit(struct cnss_plat_data *plat_priv) 3403 { 3404 qmi_handle_release(&plat_priv->qmi_wlfw); 3405 } 3406 3407 int cnss_qmi_get_dms_mac(struct cnss_plat_data *plat_priv) 3408 { 3409 struct dms_get_mac_address_req_msg_v01 req; 3410 struct dms_get_mac_address_resp_msg_v01 resp; 3411 struct qmi_txn txn; 3412 int ret = 0; 3413 3414 if (!test_bit(CNSS_QMI_DMS_CONNECTED, &plat_priv->driver_state)) { 3415 cnss_pr_err("DMS QMI connection not established\n"); 3416 return -EINVAL; 3417 } 3418 cnss_pr_dbg("Requesting DMS MAC address"); 3419 3420 memset(&resp, 0, sizeof(resp)); 3421 ret = qmi_txn_init(&plat_priv->qmi_dms, &txn, 3422 dms_get_mac_address_resp_msg_v01_ei, &resp); 3423 if (ret < 0) { 3424 cnss_pr_err("Failed to initialize txn for dms, err: %d\n", 3425 ret); 3426 goto out; 3427 } 3428 req.device = DMS_DEVICE_MAC_WLAN_V01; 3429 ret = qmi_send_request(&plat_priv->qmi_dms, NULL, &txn, 3430 QMI_DMS_GET_MAC_ADDRESS_REQ_V01, 3431 DMS_GET_MAC_ADDRESS_REQ_MSG_V01_MAX_MSG_LEN, 3432 dms_get_mac_address_req_msg_v01_ei, &req); 3433 if (ret < 0) { 3434 qmi_txn_cancel(&txn); 3435 cnss_pr_err("Failed to send QMI_DMS_GET_MAC_ADDRESS_REQ_V01, err: %d\n", 3436 ret); 3437 goto out; 3438 } 3439 ret = qmi_txn_wait(&txn, QMI_WLFW_TIMEOUT_JF); 3440 if (ret < 0) { 3441 cnss_pr_err("Failed to wait for QMI_DMS_GET_MAC_ADDRESS_RESP_V01, err: %d\n", 3442 ret); 3443 goto out; 3444 } 3445 3446 if (resp.resp.result != QMI_RESULT_SUCCESS_V01) { 3447 cnss_pr_err("QMI_DMS_GET_MAC_ADDRESS_REQ_V01 failed, result: %d, err: %d\n", 3448 resp.resp.result, resp.resp.error); 3449 ret = -resp.resp.result; 3450 goto out; 3451 } 3452 if (!resp.mac_address_valid || 3453 resp.mac_address_len != QMI_WLFW_MAC_ADDR_SIZE_V01) { 3454 cnss_pr_err("Invalid MAC address received from DMS\n"); 3455 plat_priv->dms.mac_valid = false; 3456 goto out; 3457 } 3458 plat_priv->dms.mac_valid = true; 3459 memcpy(plat_priv->dms.mac, resp.mac_address, QMI_WLFW_MAC_ADDR_SIZE_V01); 3460 cnss_pr_info("Received DMS MAC: [%pM]\n", plat_priv->dms.mac); 3461 out: 3462 return ret; 3463 } 3464 3465 static int cnss_dms_connect_to_server(struct cnss_plat_data *plat_priv, 3466 unsigned int node, unsigned int port) 3467 { 3468 struct qmi_handle *qmi_dms = &plat_priv->qmi_dms; 3469 struct sockaddr_qrtr sq = {0}; 3470 int ret = 0; 3471 3472 sq.sq_family = AF_QIPCRTR; 3473 sq.sq_node = node; 3474 sq.sq_port = port; 3475 3476 ret = kernel_connect(qmi_dms->sock, (struct sockaddr *)&sq, 3477 sizeof(sq), 0); 3478 if (ret < 0) { 3479 cnss_pr_err("Failed to connect to QMI DMS remote service Node: %d Port: %d\n", 3480 node, port); 3481 goto out; 3482 } 3483 3484 set_bit(CNSS_QMI_DMS_CONNECTED, &plat_priv->driver_state); 3485 cnss_pr_info("QMI DMS service connected, state: 0x%lx\n", 3486 plat_priv->driver_state); 3487 out: 3488 return ret; 3489 } 3490 3491 static int dms_new_server(struct qmi_handle *qmi_dms, 3492 struct qmi_service *service) 3493 { 3494 struct cnss_plat_data *plat_priv = 3495 container_of(qmi_dms, struct cnss_plat_data, qmi_dms); 3496 3497 if (!service) 3498 return -EINVAL; 3499 3500 return cnss_dms_connect_to_server(plat_priv, service->node, 3501 service->port); 3502 } 3503 3504 static void cnss_dms_server_exit_work(struct work_struct *work) 3505 { 3506 int ret; 3507 struct cnss_plat_data *plat_priv = cnss_get_plat_priv(NULL); 3508 3509 cnss_dms_deinit(plat_priv); 3510 3511 cnss_pr_info("QMI DMS Server Exit"); 3512 clear_bit(CNSS_DMS_DEL_SERVER, &plat_priv->driver_state); 3513 3514 ret = cnss_dms_init(plat_priv); 3515 if (ret < 0) 3516 cnss_pr_err("QMI DMS service registraton failed, ret\n", ret); 3517 } 3518 3519 static DECLARE_WORK(cnss_dms_del_work, cnss_dms_server_exit_work); 3520 3521 static void dms_del_server(struct qmi_handle *qmi_dms, 3522 struct qmi_service *service) 3523 { 3524 struct cnss_plat_data *plat_priv = 3525 container_of(qmi_dms, struct cnss_plat_data, qmi_dms); 3526 3527 if (!plat_priv) 3528 return; 3529 3530 if (test_bit(CNSS_DMS_DEL_SERVER, &plat_priv->driver_state)) { 3531 cnss_pr_info("DMS server delete or cnss remove in progress, Ignore server delete: 0x%lx\n", 3532 plat_priv->driver_state); 3533 return; 3534 } 3535 3536 set_bit(CNSS_DMS_DEL_SERVER, &plat_priv->driver_state); 3537 clear_bit(CNSS_QMI_DMS_CONNECTED, &plat_priv->driver_state); 3538 cnss_pr_info("QMI DMS service disconnected, state: 0x%lx\n", 3539 plat_priv->driver_state); 3540 schedule_work(&cnss_dms_del_work); 3541 } 3542 3543 void cnss_cancel_dms_work(void) 3544 { 3545 cancel_work_sync(&cnss_dms_del_work); 3546 } 3547 3548 static struct qmi_ops qmi_dms_ops = { 3549 .new_server = dms_new_server, 3550 .del_server = dms_del_server, 3551 }; 3552 3553 int cnss_dms_init(struct cnss_plat_data *plat_priv) 3554 { 3555 int ret = 0; 3556 3557 ret = qmi_handle_init(&plat_priv->qmi_dms, DMS_QMI_MAX_MSG_LEN, 3558 &qmi_dms_ops, NULL); 3559 if (ret < 0) { 3560 cnss_pr_err("Failed to initialize DMS handle, err: %d\n", ret); 3561 goto out; 3562 } 3563 3564 ret = qmi_add_lookup(&plat_priv->qmi_dms, DMS_SERVICE_ID_V01, 3565 DMS_SERVICE_VERS_V01, 0); 3566 if (ret < 0) 3567 cnss_pr_err("Failed to add DMS lookup, err: %d\n", ret); 3568 out: 3569 return ret; 3570 } 3571 3572 void cnss_dms_deinit(struct cnss_plat_data *plat_priv) 3573 { 3574 set_bit(CNSS_DMS_DEL_SERVER, &plat_priv->driver_state); 3575 qmi_handle_release(&plat_priv->qmi_dms); 3576 } 3577 3578 int coex_antenna_switch_to_wlan_send_sync_msg(struct cnss_plat_data *plat_priv) 3579 { 3580 int ret; 3581 struct coex_antenna_switch_to_wlan_req_msg_v01 *req; 3582 struct coex_antenna_switch_to_wlan_resp_msg_v01 *resp; 3583 struct qmi_txn txn; 3584 3585 if (!plat_priv) 3586 return -ENODEV; 3587 3588 cnss_pr_dbg("Sending coex antenna switch_to_wlan\n"); 3589 3590 req = kzalloc(sizeof(*req), GFP_KERNEL); 3591 if (!req) 3592 return -ENOMEM; 3593 3594 resp = kzalloc(sizeof(*resp), GFP_KERNEL); 3595 if (!resp) { 3596 kfree(req); 3597 return -ENOMEM; 3598 } 3599 3600 req->antenna = plat_priv->antenna; 3601 3602 ret = qmi_txn_init(&plat_priv->coex_qmi, &txn, 3603 coex_antenna_switch_to_wlan_resp_msg_v01_ei, resp); 3604 if (ret < 0) { 3605 cnss_pr_err("Fail to init txn for coex antenna switch_to_wlan resp %d\n", 3606 ret); 3607 goto out; 3608 } 3609 3610 ret = qmi_send_request 3611 (&plat_priv->coex_qmi, NULL, &txn, 3612 QMI_COEX_SWITCH_ANTENNA_TO_WLAN_REQ_V01, 3613 COEX_ANTENNA_SWITCH_TO_WLAN_REQ_MSG_V01_MAX_MSG_LEN, 3614 coex_antenna_switch_to_wlan_req_msg_v01_ei, req); 3615 if (ret < 0) { 3616 qmi_txn_cancel(&txn); 3617 cnss_pr_err("Fail to send coex antenna switch_to_wlan req %d\n", 3618 ret); 3619 goto out; 3620 } 3621 3622 ret = qmi_txn_wait(&txn, COEX_TIMEOUT); 3623 if (ret < 0) { 3624 cnss_pr_err("Coex antenna switch_to_wlan resp wait failed with ret %d\n", 3625 ret); 3626 goto out; 3627 } else if (resp->resp.result != QMI_RESULT_SUCCESS_V01) { 3628 cnss_pr_err("Coex antenna switch_to_wlan request rejected, result:%d error:%d\n", 3629 resp->resp.result, resp->resp.error); 3630 ret = -resp->resp.result; 3631 goto out; 3632 } 3633 3634 if (resp->grant_valid) 3635 plat_priv->grant = resp->grant; 3636 3637 cnss_pr_dbg("Coex antenna grant: 0x%llx\n", resp->grant); 3638 3639 kfree(resp); 3640 kfree(req); 3641 return 0; 3642 3643 out: 3644 kfree(resp); 3645 kfree(req); 3646 return ret; 3647 } 3648 3649 int coex_antenna_switch_to_mdm_send_sync_msg(struct cnss_plat_data *plat_priv) 3650 { 3651 int ret; 3652 struct coex_antenna_switch_to_mdm_req_msg_v01 *req; 3653 struct coex_antenna_switch_to_mdm_resp_msg_v01 *resp; 3654 struct qmi_txn txn; 3655 3656 if (!plat_priv) 3657 return -ENODEV; 3658 3659 cnss_pr_dbg("Sending coex antenna switch_to_mdm\n"); 3660 3661 req = kzalloc(sizeof(*req), GFP_KERNEL); 3662 if (!req) 3663 return -ENOMEM; 3664 3665 resp = kzalloc(sizeof(*resp), GFP_KERNEL); 3666 if (!resp) { 3667 kfree(req); 3668 return -ENOMEM; 3669 } 3670 3671 req->antenna = plat_priv->antenna; 3672 3673 ret = qmi_txn_init(&plat_priv->coex_qmi, &txn, 3674 coex_antenna_switch_to_mdm_resp_msg_v01_ei, resp); 3675 if (ret < 0) { 3676 cnss_pr_err("Fail to init txn for coex antenna switch_to_mdm resp %d\n", 3677 ret); 3678 goto out; 3679 } 3680 3681 ret = qmi_send_request 3682 (&plat_priv->coex_qmi, NULL, &txn, 3683 QMI_COEX_SWITCH_ANTENNA_TO_MDM_REQ_V01, 3684 COEX_ANTENNA_SWITCH_TO_MDM_REQ_MSG_V01_MAX_MSG_LEN, 3685 coex_antenna_switch_to_mdm_req_msg_v01_ei, req); 3686 if (ret < 0) { 3687 qmi_txn_cancel(&txn); 3688 cnss_pr_err("Fail to send coex antenna switch_to_mdm req %d\n", 3689 ret); 3690 goto out; 3691 } 3692 3693 ret = qmi_txn_wait(&txn, COEX_TIMEOUT); 3694 if (ret < 0) { 3695 cnss_pr_err("Coex antenna switch_to_mdm resp wait failed with ret %d\n", 3696 ret); 3697 goto out; 3698 } else if (resp->resp.result != QMI_RESULT_SUCCESS_V01) { 3699 cnss_pr_err("Coex antenna switch_to_mdm request rejected, result:%d error:%d\n", 3700 resp->resp.result, resp->resp.error); 3701 ret = -resp->resp.result; 3702 goto out; 3703 } 3704 3705 kfree(resp); 3706 kfree(req); 3707 return 0; 3708 3709 out: 3710 kfree(resp); 3711 kfree(req); 3712 return ret; 3713 } 3714 3715 int cnss_send_subsys_restart_level_msg(struct cnss_plat_data *plat_priv) 3716 { 3717 int ret; 3718 struct wlfw_subsys_restart_level_req_msg_v01 req; 3719 struct wlfw_subsys_restart_level_resp_msg_v01 resp; 3720 u8 pcss_enabled; 3721 3722 if (!plat_priv) 3723 return -ENODEV; 3724 3725 if (!test_bit(CNSS_FW_READY, &plat_priv->driver_state)) { 3726 cnss_pr_dbg("Can't send pcss cmd before fw ready\n"); 3727 return 0; 3728 } 3729 3730 pcss_enabled = plat_priv->recovery_pcss_enabled; 3731 cnss_pr_dbg("Sending pcss recovery status: %d\n", pcss_enabled); 3732 3733 req.restart_level_type_valid = 1; 3734 req.restart_level_type = pcss_enabled; 3735 3736 ret = qmi_send_wait(&plat_priv->qmi_wlfw, &req, &resp, 3737 wlfw_subsys_restart_level_req_msg_v01_ei, 3738 wlfw_subsys_restart_level_resp_msg_v01_ei, 3739 QMI_WLFW_SUBSYS_RESTART_LEVEL_REQ_V01, 3740 WLFW_SUBSYS_RESTART_LEVEL_REQ_MSG_V01_MAX_MSG_LEN, 3741 QMI_WLFW_TIMEOUT_JF); 3742 3743 if (ret < 0) 3744 cnss_pr_err("pcss recovery setting failed with ret %d\n", ret); 3745 return ret; 3746 } 3747 3748 static int coex_new_server(struct qmi_handle *qmi, 3749 struct qmi_service *service) 3750 { 3751 struct cnss_plat_data *plat_priv = 3752 container_of(qmi, struct cnss_plat_data, coex_qmi); 3753 struct sockaddr_qrtr sq = { 0 }; 3754 int ret = 0; 3755 3756 cnss_pr_dbg("COEX server arrive: node %u port %u\n", 3757 service->node, service->port); 3758 3759 sq.sq_family = AF_QIPCRTR; 3760 sq.sq_node = service->node; 3761 sq.sq_port = service->port; 3762 ret = kernel_connect(qmi->sock, (struct sockaddr *)&sq, sizeof(sq), 0); 3763 if (ret < 0) { 3764 cnss_pr_err("Fail to connect to remote service port\n"); 3765 return ret; 3766 } 3767 3768 set_bit(CNSS_COEX_CONNECTED, &plat_priv->driver_state); 3769 cnss_pr_dbg("COEX Server Connected: 0x%lx\n", 3770 plat_priv->driver_state); 3771 return 0; 3772 } 3773 3774 static void coex_del_server(struct qmi_handle *qmi, 3775 struct qmi_service *service) 3776 { 3777 struct cnss_plat_data *plat_priv = 3778 container_of(qmi, struct cnss_plat_data, coex_qmi); 3779 3780 cnss_pr_dbg("COEX server exit\n"); 3781 3782 clear_bit(CNSS_COEX_CONNECTED, &plat_priv->driver_state); 3783 } 3784 3785 static struct qmi_ops coex_qmi_ops = { 3786 .new_server = coex_new_server, 3787 .del_server = coex_del_server, 3788 }; 3789 3790 int cnss_register_coex_service(struct cnss_plat_data *plat_priv) 3791 { int ret; 3792 3793 ret = qmi_handle_init(&plat_priv->coex_qmi, 3794 COEX_SERVICE_MAX_MSG_LEN, 3795 &coex_qmi_ops, NULL); 3796 if (ret < 0) 3797 return ret; 3798 3799 ret = qmi_add_lookup(&plat_priv->coex_qmi, COEX_SERVICE_ID_V01, 3800 COEX_SERVICE_VERS_V01, 0); 3801 return ret; 3802 } 3803 3804 void cnss_unregister_coex_service(struct cnss_plat_data *plat_priv) 3805 { 3806 qmi_handle_release(&plat_priv->coex_qmi); 3807 } 3808 3809 /* IMS Service */ 3810 int ims_subscribe_for_indication_send_async(struct cnss_plat_data *plat_priv) 3811 { 3812 int ret; 3813 struct ims_private_service_subscribe_for_indications_req_msg_v01 *req; 3814 struct qmi_txn *txn; 3815 3816 if (!plat_priv) 3817 return -ENODEV; 3818 3819 cnss_pr_dbg("Sending ASYNC ims subscribe for indication\n"); 3820 3821 req = kzalloc(sizeof(*req), GFP_KERNEL); 3822 if (!req) 3823 return -ENOMEM; 3824 3825 req->wfc_call_status_valid = 1; 3826 req->wfc_call_status = 1; 3827 3828 txn = &plat_priv->txn; 3829 ret = qmi_txn_init(&plat_priv->ims_qmi, txn, NULL, NULL); 3830 if (ret < 0) { 3831 cnss_pr_err("Fail to init txn for ims subscribe for indication resp %d\n", 3832 ret); 3833 goto out; 3834 } 3835 3836 ret = qmi_send_request 3837 (&plat_priv->ims_qmi, NULL, txn, 3838 QMI_IMS_PRIVATE_SERVICE_SUBSCRIBE_FOR_INDICATIONS_REQ_V01, 3839 IMS_PRIVATE_SERVICE_SUBSCRIBE_FOR_INDICATIONS_REQ_MSG_V01_MAX_MSG_LEN, 3840 ims_private_service_subscribe_for_indications_req_msg_v01_ei, req); 3841 if (ret < 0) { 3842 qmi_txn_cancel(txn); 3843 cnss_pr_err("Fail to send ims subscribe for indication req %d\n", 3844 ret); 3845 goto out; 3846 } 3847 3848 kfree(req); 3849 return 0; 3850 3851 out: 3852 kfree(req); 3853 return ret; 3854 } 3855 3856 static void ims_subscribe_for_indication_resp_cb(struct qmi_handle *qmi, 3857 struct sockaddr_qrtr *sq, 3858 struct qmi_txn *txn, 3859 const void *data) 3860 { 3861 const 3862 struct ims_private_service_subscribe_for_indications_rsp_msg_v01 *resp = 3863 data; 3864 3865 cnss_pr_dbg("Received IMS subscribe indication response\n"); 3866 3867 if (!txn) { 3868 cnss_pr_err("spurious response\n"); 3869 return; 3870 } 3871 3872 if (resp->resp.result != QMI_RESULT_SUCCESS_V01) { 3873 cnss_pr_err("IMS subscribe for indication request rejected, result:%d error:%d\n", 3874 resp->resp.result, resp->resp.error); 3875 txn->result = -resp->resp.result; 3876 } 3877 } 3878 3879 int cnss_process_wfc_call_ind_event(struct cnss_plat_data *plat_priv, 3880 void *data) 3881 { 3882 int ret; 3883 struct ims_private_service_wfc_call_status_ind_msg_v01 *ind_msg = data; 3884 3885 ret = cnss_wlfw_wfc_call_status_send_sync(plat_priv, ind_msg); 3886 kfree(data); 3887 return ret; 3888 } 3889 3890 static void 3891 cnss_ims_process_wfc_call_ind_cb(struct qmi_handle *ims_qmi, 3892 struct sockaddr_qrtr *sq, 3893 struct qmi_txn *txn, const void *data) 3894 { 3895 struct cnss_plat_data *plat_priv = 3896 container_of(ims_qmi, struct cnss_plat_data, ims_qmi); 3897 const 3898 struct ims_private_service_wfc_call_status_ind_msg_v01 *ind_msg = data; 3899 struct ims_private_service_wfc_call_status_ind_msg_v01 *event_data; 3900 3901 if (!txn) { 3902 cnss_pr_err("IMS->CNSS: WFC_CALL_IND: Spurious indication\n"); 3903 return; 3904 } 3905 3906 if (!ind_msg) { 3907 cnss_pr_err("IMS->CNSS: WFC_CALL_IND: Invalid indication\n"); 3908 return; 3909 } 3910 cnss_pr_dbg("IMS->CNSS: WFC_CALL_IND: %x, %x %x, %x %x, %x %llx, %x %x, %x %x\n", 3911 ind_msg->wfc_call_active, ind_msg->all_wfc_calls_held_valid, 3912 ind_msg->all_wfc_calls_held, 3913 ind_msg->is_wfc_emergency_valid, ind_msg->is_wfc_emergency, 3914 ind_msg->twt_ims_start_valid, ind_msg->twt_ims_start, 3915 ind_msg->twt_ims_int_valid, ind_msg->twt_ims_int, 3916 ind_msg->media_quality_valid, ind_msg->media_quality); 3917 3918 event_data = kmemdup(ind_msg, sizeof(*event_data), GFP_KERNEL); 3919 if (!event_data) 3920 return; 3921 cnss_driver_event_post(plat_priv, CNSS_DRIVER_EVENT_IMS_WFC_CALL_IND, 3922 0, event_data); 3923 } 3924 3925 static struct qmi_msg_handler qmi_ims_msg_handlers[] = { 3926 { 3927 .type = QMI_RESPONSE, 3928 .msg_id = 3929 QMI_IMS_PRIVATE_SERVICE_SUBSCRIBE_FOR_INDICATIONS_REQ_V01, 3930 .ei = 3931 ims_private_service_subscribe_for_indications_rsp_msg_v01_ei, 3932 .decoded_size = sizeof(struct 3933 ims_private_service_subscribe_for_indications_rsp_msg_v01), 3934 .fn = ims_subscribe_for_indication_resp_cb 3935 }, 3936 { 3937 .type = QMI_INDICATION, 3938 .msg_id = QMI_IMS_PRIVATE_SERVICE_WFC_CALL_STATUS_IND_V01, 3939 .ei = ims_private_service_wfc_call_status_ind_msg_v01_ei, 3940 .decoded_size = 3941 sizeof(struct ims_private_service_wfc_call_status_ind_msg_v01), 3942 .fn = cnss_ims_process_wfc_call_ind_cb 3943 }, 3944 {} 3945 }; 3946 3947 static int ims_new_server(struct qmi_handle *qmi, 3948 struct qmi_service *service) 3949 { 3950 struct cnss_plat_data *plat_priv = 3951 container_of(qmi, struct cnss_plat_data, ims_qmi); 3952 struct sockaddr_qrtr sq = { 0 }; 3953 int ret = 0; 3954 3955 cnss_pr_dbg("IMS server arrive: node %u port %u\n", 3956 service->node, service->port); 3957 3958 sq.sq_family = AF_QIPCRTR; 3959 sq.sq_node = service->node; 3960 sq.sq_port = service->port; 3961 ret = kernel_connect(qmi->sock, (struct sockaddr *)&sq, sizeof(sq), 0); 3962 if (ret < 0) { 3963 cnss_pr_err("Fail to connect to remote service port\n"); 3964 return ret; 3965 } 3966 3967 set_bit(CNSS_IMS_CONNECTED, &plat_priv->driver_state); 3968 cnss_pr_dbg("IMS Server Connected: 0x%lx\n", 3969 plat_priv->driver_state); 3970 3971 ret = ims_subscribe_for_indication_send_async(plat_priv); 3972 return ret; 3973 } 3974 3975 static void ims_del_server(struct qmi_handle *qmi, 3976 struct qmi_service *service) 3977 { 3978 struct cnss_plat_data *plat_priv = 3979 container_of(qmi, struct cnss_plat_data, ims_qmi); 3980 3981 cnss_pr_dbg("IMS server exit\n"); 3982 3983 clear_bit(CNSS_IMS_CONNECTED, &plat_priv->driver_state); 3984 } 3985 3986 static struct qmi_ops ims_qmi_ops = { 3987 .new_server = ims_new_server, 3988 .del_server = ims_del_server, 3989 }; 3990 3991 int cnss_register_ims_service(struct cnss_plat_data *plat_priv) 3992 { int ret; 3993 3994 ret = qmi_handle_init(&plat_priv->ims_qmi, 3995 IMSPRIVATE_SERVICE_MAX_MSG_LEN, 3996 &ims_qmi_ops, qmi_ims_msg_handlers); 3997 if (ret < 0) 3998 return ret; 3999 4000 ret = qmi_add_lookup(&plat_priv->ims_qmi, IMSPRIVATE_SERVICE_ID_V01, 4001 IMSPRIVATE_SERVICE_VERS_V01, 0); 4002 return ret; 4003 } 4004 4005 void cnss_unregister_ims_service(struct cnss_plat_data *plat_priv) 4006 { 4007 qmi_handle_release(&plat_priv->ims_qmi); 4008 } 4009