1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved. 4 * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. 5 */ 6 7 #include <linux/completion.h> 8 #include <linux/io.h> 9 #include <linux/irq.h> 10 #include <linux/memblock.h> 11 #include <linux/module.h> 12 #include <linux/msi.h> 13 #include <linux/of.h> 14 #include <linux/of_gpio.h> 15 #include <linux/pm_runtime.h> 16 #include <linux/suspend.h> 17 #include <linux/version.h> 18 #include <linux/sched.h> 19 #include "main.h" 20 #include "bus.h" 21 #include "debug.h" 22 #include "pci.h" 23 #include "pci_platform.h" 24 #include "reg.h" 25 26 #define PCI_LINK_UP 1 27 #define PCI_LINK_DOWN 0 28 29 #define SAVE_PCI_CONFIG_SPACE 1 30 #define RESTORE_PCI_CONFIG_SPACE 0 31 32 #define PCI_BAR_NUM 0 33 #define PCI_INVALID_READ(val) ((val) == U32_MAX) 34 35 #define PCI_DMA_MASK_32_BIT DMA_BIT_MASK(32) 36 #define PCI_DMA_MASK_36_BIT DMA_BIT_MASK(36) 37 #define PCI_DMA_MASK_64_BIT DMA_BIT_MASK(64) 38 39 #define MHI_NODE_NAME "qcom,mhi" 40 #define MHI_MSI_NAME "MHI" 41 42 #define QCA6390_PATH_PREFIX "qca6390/" 43 #define QCA6490_PATH_PREFIX "qca6490/" 44 #define QCN7605_PATH_PREFIX "qcn7605/" 45 #define KIWI_PATH_PREFIX "kiwi/" 46 #define MANGO_PATH_PREFIX "mango/" 47 #define PEACH_PATH_PREFIX "peach/" 48 #define DEFAULT_PHY_M3_FILE_NAME "m3.bin" 49 #define DEFAULT_AUX_FILE_NAME "aux_ucode.elf" 50 #define DEFAULT_PHY_UCODE_FILE_NAME "phy_ucode.elf" 51 #define TME_PATCH_FILE_NAME_1_0 "tmel_peach_10.elf" 52 #define TME_PATCH_FILE_NAME_2_0 "tmel_peach_20.elf" 53 #define PHY_UCODE_V2_FILE_NAME "phy_ucode20.elf" 54 #define DEFAULT_FW_FILE_NAME "amss.bin" 55 #define FW_V2_FILE_NAME "amss20.bin" 56 #define DEVICE_MAJOR_VERSION_MASK 0xF 57 58 #define WAKE_MSI_NAME "WAKE" 59 60 #define DEV_RDDM_TIMEOUT 5000 61 #define WAKE_EVENT_TIMEOUT 5000 62 63 #ifdef CONFIG_CNSS_EMULATION 64 #define EMULATION_HW 1 65 #else 66 #define EMULATION_HW 0 67 #endif 68 69 #define RAMDUMP_SIZE_DEFAULT 0x420000 70 #define CNSS_256KB_SIZE 0x40000 71 #define DEVICE_RDDM_COOKIE 0xCAFECACE 72 73 static bool cnss_driver_registered; 74 75 static DEFINE_SPINLOCK(pci_link_down_lock); 76 static DEFINE_SPINLOCK(pci_reg_window_lock); 77 static DEFINE_SPINLOCK(time_sync_lock); 78 79 #define MHI_TIMEOUT_OVERWRITE_MS (plat_priv->ctrl_params.mhi_timeout) 80 #define MHI_M2_TIMEOUT_MS (plat_priv->ctrl_params.mhi_m2_timeout) 81 82 #define WLAON_PWR_CTRL_SHUTDOWN_DELAY_MIN_US 1000 83 #define WLAON_PWR_CTRL_SHUTDOWN_DELAY_MAX_US 2000 84 85 #define RDDM_LINK_RECOVERY_RETRY 20 86 #define RDDM_LINK_RECOVERY_RETRY_DELAY_MS 20 87 88 #define FORCE_WAKE_DELAY_MIN_US 4000 89 #define FORCE_WAKE_DELAY_MAX_US 6000 90 #define FORCE_WAKE_DELAY_TIMEOUT_US 60000 91 92 #define REG_RETRY_MAX_TIMES 3 93 94 #define MHI_SUSPEND_RETRY_MAX_TIMES 3 95 #define MHI_SUSPEND_RETRY_DELAY_US 5000 96 97 #define BOOT_DEBUG_TIMEOUT_MS 7000 98 99 #define HANG_DATA_LENGTH 384 100 #define HST_HANG_DATA_OFFSET ((3 * 1024 * 1024) - HANG_DATA_LENGTH) 101 #define HSP_HANG_DATA_OFFSET ((2 * 1024 * 1024) - HANG_DATA_LENGTH) 102 #define GNO_HANG_DATA_OFFSET (0x7d000 - HANG_DATA_LENGTH) 103 104 #define AFC_SLOT_SIZE 0x1000 105 #define AFC_MAX_SLOT 2 106 #define AFC_MEM_SIZE (AFC_SLOT_SIZE * AFC_MAX_SLOT) 107 #define AFC_AUTH_STATUS_OFFSET 1 108 #define AFC_AUTH_SUCCESS 1 109 #define AFC_AUTH_ERROR 0 110 111 static const struct mhi_channel_config cnss_mhi_channels[] = { 112 { 113 .num = 0, 114 .name = "LOOPBACK", 115 .num_elements = 32, 116 .event_ring = 1, 117 .dir = DMA_TO_DEVICE, 118 .ee_mask = 0x4, 119 .pollcfg = 0, 120 .doorbell = MHI_DB_BRST_DISABLE, 121 .lpm_notify = false, 122 .offload_channel = false, 123 .doorbell_mode_switch = false, 124 .auto_queue = false, 125 }, 126 { 127 .num = 1, 128 .name = "LOOPBACK", 129 .num_elements = 32, 130 .event_ring = 1, 131 .dir = DMA_FROM_DEVICE, 132 .ee_mask = 0x4, 133 .pollcfg = 0, 134 .doorbell = MHI_DB_BRST_DISABLE, 135 .lpm_notify = false, 136 .offload_channel = false, 137 .doorbell_mode_switch = false, 138 .auto_queue = false, 139 }, 140 { 141 .num = 4, 142 .name = "DIAG", 143 .num_elements = 64, 144 .event_ring = 1, 145 .dir = DMA_TO_DEVICE, 146 .ee_mask = 0x4, 147 .pollcfg = 0, 148 .doorbell = MHI_DB_BRST_DISABLE, 149 .lpm_notify = false, 150 .offload_channel = false, 151 .doorbell_mode_switch = false, 152 .auto_queue = false, 153 }, 154 { 155 .num = 5, 156 .name = "DIAG", 157 .num_elements = 64, 158 .event_ring = 1, 159 .dir = DMA_FROM_DEVICE, 160 .ee_mask = 0x4, 161 .pollcfg = 0, 162 .doorbell = MHI_DB_BRST_DISABLE, 163 .lpm_notify = false, 164 .offload_channel = false, 165 .doorbell_mode_switch = false, 166 .auto_queue = false, 167 }, 168 { 169 .num = 20, 170 .name = "IPCR", 171 .num_elements = 64, 172 .event_ring = 1, 173 .dir = DMA_TO_DEVICE, 174 .ee_mask = 0x4, 175 .pollcfg = 0, 176 .doorbell = MHI_DB_BRST_DISABLE, 177 .lpm_notify = false, 178 .offload_channel = false, 179 .doorbell_mode_switch = false, 180 .auto_queue = false, 181 }, 182 { 183 .num = 21, 184 .name = "IPCR", 185 .num_elements = 64, 186 .event_ring = 1, 187 .dir = DMA_FROM_DEVICE, 188 .ee_mask = 0x4, 189 .pollcfg = 0, 190 .doorbell = MHI_DB_BRST_DISABLE, 191 .lpm_notify = false, 192 .offload_channel = false, 193 .doorbell_mode_switch = false, 194 .auto_queue = true, 195 }, 196 /* All MHI satellite config to be at the end of data struct */ 197 #if IS_ENABLED(CONFIG_MHI_SATELLITE) 198 { 199 .num = 50, 200 .name = "ADSP_0", 201 .num_elements = 64, 202 .event_ring = 3, 203 .dir = DMA_BIDIRECTIONAL, 204 .ee_mask = 0x4, 205 .pollcfg = 0, 206 .doorbell = MHI_DB_BRST_DISABLE, 207 .lpm_notify = false, 208 .offload_channel = true, 209 .doorbell_mode_switch = false, 210 .auto_queue = false, 211 }, 212 { 213 .num = 51, 214 .name = "ADSP_1", 215 .num_elements = 64, 216 .event_ring = 3, 217 .dir = DMA_BIDIRECTIONAL, 218 .ee_mask = 0x4, 219 .pollcfg = 0, 220 .doorbell = MHI_DB_BRST_DISABLE, 221 .lpm_notify = false, 222 .offload_channel = true, 223 .doorbell_mode_switch = false, 224 .auto_queue = false, 225 }, 226 { 227 .num = 70, 228 .name = "ADSP_2", 229 .num_elements = 64, 230 .event_ring = 3, 231 .dir = DMA_BIDIRECTIONAL, 232 .ee_mask = 0x4, 233 .pollcfg = 0, 234 .doorbell = MHI_DB_BRST_DISABLE, 235 .lpm_notify = false, 236 .offload_channel = true, 237 .doorbell_mode_switch = false, 238 .auto_queue = false, 239 }, 240 { 241 .num = 71, 242 .name = "ADSP_3", 243 .num_elements = 64, 244 .event_ring = 3, 245 .dir = DMA_BIDIRECTIONAL, 246 .ee_mask = 0x4, 247 .pollcfg = 0, 248 .doorbell = MHI_DB_BRST_DISABLE, 249 .lpm_notify = false, 250 .offload_channel = true, 251 .doorbell_mode_switch = false, 252 .auto_queue = false, 253 }, 254 #endif 255 }; 256 257 static const struct mhi_channel_config cnss_mhi_channels_no_diag[] = { 258 { 259 .num = 0, 260 .name = "LOOPBACK", 261 .num_elements = 32, 262 .event_ring = 1, 263 .dir = DMA_TO_DEVICE, 264 .ee_mask = 0x4, 265 .pollcfg = 0, 266 .doorbell = MHI_DB_BRST_DISABLE, 267 .lpm_notify = false, 268 .offload_channel = false, 269 .doorbell_mode_switch = false, 270 .auto_queue = false, 271 }, 272 { 273 .num = 1, 274 .name = "LOOPBACK", 275 .num_elements = 32, 276 .event_ring = 1, 277 .dir = DMA_FROM_DEVICE, 278 .ee_mask = 0x4, 279 .pollcfg = 0, 280 .doorbell = MHI_DB_BRST_DISABLE, 281 .lpm_notify = false, 282 .offload_channel = false, 283 .doorbell_mode_switch = false, 284 .auto_queue = false, 285 }, 286 { 287 .num = 20, 288 .name = "IPCR", 289 .num_elements = 64, 290 .event_ring = 1, 291 .dir = DMA_TO_DEVICE, 292 .ee_mask = 0x4, 293 .pollcfg = 0, 294 .doorbell = MHI_DB_BRST_DISABLE, 295 .lpm_notify = false, 296 .offload_channel = false, 297 .doorbell_mode_switch = false, 298 .auto_queue = false, 299 }, 300 { 301 .num = 21, 302 .name = "IPCR", 303 .num_elements = 64, 304 .event_ring = 1, 305 .dir = DMA_FROM_DEVICE, 306 .ee_mask = 0x4, 307 .pollcfg = 0, 308 .doorbell = MHI_DB_BRST_DISABLE, 309 .lpm_notify = false, 310 .offload_channel = false, 311 .doorbell_mode_switch = false, 312 .auto_queue = true, 313 }, 314 /* All MHI satellite config to be at the end of data struct */ 315 #if IS_ENABLED(CONFIG_MHI_SATELLITE) 316 { 317 .num = 50, 318 .name = "ADSP_0", 319 .num_elements = 64, 320 .event_ring = 3, 321 .dir = DMA_BIDIRECTIONAL, 322 .ee_mask = 0x4, 323 .pollcfg = 0, 324 .doorbell = MHI_DB_BRST_DISABLE, 325 .lpm_notify = false, 326 .offload_channel = true, 327 .doorbell_mode_switch = false, 328 .auto_queue = false, 329 }, 330 { 331 .num = 51, 332 .name = "ADSP_1", 333 .num_elements = 64, 334 .event_ring = 3, 335 .dir = DMA_BIDIRECTIONAL, 336 .ee_mask = 0x4, 337 .pollcfg = 0, 338 .doorbell = MHI_DB_BRST_DISABLE, 339 .lpm_notify = false, 340 .offload_channel = true, 341 .doorbell_mode_switch = false, 342 .auto_queue = false, 343 }, 344 { 345 .num = 70, 346 .name = "ADSP_2", 347 .num_elements = 64, 348 .event_ring = 3, 349 .dir = DMA_BIDIRECTIONAL, 350 .ee_mask = 0x4, 351 .pollcfg = 0, 352 .doorbell = MHI_DB_BRST_DISABLE, 353 .lpm_notify = false, 354 .offload_channel = true, 355 .doorbell_mode_switch = false, 356 .auto_queue = false, 357 }, 358 { 359 .num = 71, 360 .name = "ADSP_3", 361 .num_elements = 64, 362 .event_ring = 3, 363 .dir = DMA_BIDIRECTIONAL, 364 .ee_mask = 0x4, 365 .pollcfg = 0, 366 .doorbell = MHI_DB_BRST_DISABLE, 367 .lpm_notify = false, 368 .offload_channel = true, 369 .doorbell_mode_switch = false, 370 .auto_queue = false, 371 }, 372 #endif 373 }; 374 375 static const struct mhi_channel_config cnss_mhi_channels_genoa[] = { 376 { 377 .num = 0, 378 .name = "LOOPBACK", 379 .num_elements = 32, 380 .event_ring = 1, 381 .dir = DMA_TO_DEVICE, 382 .ee_mask = 0x4, 383 .pollcfg = 0, 384 .doorbell = MHI_DB_BRST_DISABLE, 385 .lpm_notify = false, 386 .offload_channel = false, 387 .doorbell_mode_switch = false, 388 .auto_queue = false, 389 }, 390 { 391 .num = 1, 392 .name = "LOOPBACK", 393 .num_elements = 32, 394 .event_ring = 1, 395 .dir = DMA_FROM_DEVICE, 396 .ee_mask = 0x4, 397 .pollcfg = 0, 398 .doorbell = MHI_DB_BRST_DISABLE, 399 .lpm_notify = false, 400 .offload_channel = false, 401 .doorbell_mode_switch = false, 402 .auto_queue = false, 403 }, 404 { 405 .num = 4, 406 .name = "DIAG", 407 .num_elements = 64, 408 .event_ring = 1, 409 .dir = DMA_TO_DEVICE, 410 .ee_mask = 0x4, 411 .pollcfg = 0, 412 .doorbell = MHI_DB_BRST_DISABLE, 413 .lpm_notify = false, 414 .offload_channel = false, 415 .doorbell_mode_switch = false, 416 .auto_queue = false, 417 }, 418 { 419 .num = 5, 420 .name = "DIAG", 421 .num_elements = 64, 422 .event_ring = 1, 423 .dir = DMA_FROM_DEVICE, 424 .ee_mask = 0x4, 425 .pollcfg = 0, 426 .doorbell = MHI_DB_BRST_DISABLE, 427 .lpm_notify = false, 428 .offload_channel = false, 429 .doorbell_mode_switch = false, 430 .auto_queue = false, 431 }, 432 { 433 .num = 16, 434 .name = "IPCR", 435 .num_elements = 64, 436 .event_ring = 1, 437 .dir = DMA_TO_DEVICE, 438 .ee_mask = 0x4, 439 .pollcfg = 0, 440 .doorbell = MHI_DB_BRST_DISABLE, 441 .lpm_notify = false, 442 .offload_channel = false, 443 .doorbell_mode_switch = false, 444 .auto_queue = false, 445 }, 446 { 447 .num = 17, 448 .name = "IPCR", 449 .num_elements = 64, 450 .event_ring = 1, 451 .dir = DMA_FROM_DEVICE, 452 .ee_mask = 0x4, 453 .pollcfg = 0, 454 .doorbell = MHI_DB_BRST_DISABLE, 455 .lpm_notify = false, 456 .offload_channel = false, 457 .doorbell_mode_switch = false, 458 .auto_queue = true, 459 }, 460 }; 461 462 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 12, 0)) 463 static struct mhi_event_config cnss_mhi_events[] = { 464 #else 465 static const struct mhi_event_config cnss_mhi_events[] = { 466 #endif 467 { 468 .num_elements = 32, 469 .irq_moderation_ms = 0, 470 .irq = 1, 471 .mode = MHI_DB_BRST_DISABLE, 472 .data_type = MHI_ER_CTRL, 473 .priority = 0, 474 .hardware_event = false, 475 .client_managed = false, 476 .offload_channel = false, 477 }, 478 { 479 .num_elements = 256, 480 .irq_moderation_ms = 0, 481 .irq = 2, 482 .mode = MHI_DB_BRST_DISABLE, 483 .priority = 1, 484 .hardware_event = false, 485 .client_managed = false, 486 .offload_channel = false, 487 }, 488 #if IS_ENABLED(CONFIG_MHI_BUS_MISC) 489 { 490 .num_elements = 32, 491 .irq_moderation_ms = 0, 492 .irq = 1, 493 .mode = MHI_DB_BRST_DISABLE, 494 .data_type = MHI_ER_BW_SCALE, 495 .priority = 2, 496 .hardware_event = false, 497 .client_managed = false, 498 .offload_channel = false, 499 }, 500 #endif 501 #if IS_ENABLED(CONFIG_MHI_SATELLITE) 502 { 503 .num_elements = 256, 504 .irq_moderation_ms = 0, 505 .irq = 2, 506 .mode = MHI_DB_BRST_DISABLE, 507 .data_type = MHI_ER_DATA, 508 .priority = 1, 509 .hardware_event = false, 510 .client_managed = true, 511 .offload_channel = true, 512 }, 513 #endif 514 }; 515 516 #if IS_ENABLED(CONFIG_MHI_SATELLITE) 517 #define CNSS_MHI_SATELLITE_CH_CFG_COUNT 4 518 #define CNSS_MHI_SATELLITE_EVT_COUNT 1 519 #else 520 #define CNSS_MHI_SATELLITE_CH_CFG_COUNT 0 521 #define CNSS_MHI_SATELLITE_EVT_COUNT 0 522 #endif 523 524 static const struct mhi_controller_config cnss_mhi_config_no_diag = { 525 #if IS_ENABLED(CONFIG_MHI_SATELLITE) 526 .max_channels = 72, 527 #else 528 .max_channels = 32, 529 #endif 530 .timeout_ms = 10000, 531 .use_bounce_buf = false, 532 .buf_len = 0x8000, 533 .num_channels = ARRAY_SIZE(cnss_mhi_channels_no_diag), 534 .ch_cfg = cnss_mhi_channels_no_diag, 535 .num_events = ARRAY_SIZE(cnss_mhi_events), 536 .event_cfg = cnss_mhi_events, 537 .m2_no_db = true, 538 }; 539 540 static const struct mhi_controller_config cnss_mhi_config_default = { 541 #if IS_ENABLED(CONFIG_MHI_SATELLITE) 542 .max_channels = 72, 543 #else 544 .max_channels = 32, 545 #endif 546 .timeout_ms = 10000, 547 .use_bounce_buf = false, 548 .buf_len = 0x8000, 549 .num_channels = ARRAY_SIZE(cnss_mhi_channels), 550 .ch_cfg = cnss_mhi_channels, 551 .num_events = ARRAY_SIZE(cnss_mhi_events), 552 .event_cfg = cnss_mhi_events, 553 .m2_no_db = true, 554 }; 555 556 static const struct mhi_controller_config cnss_mhi_config_genoa = { 557 .max_channels = 32, 558 .timeout_ms = 10000, 559 .use_bounce_buf = false, 560 .buf_len = 0x8000, 561 .num_channels = ARRAY_SIZE(cnss_mhi_channels_genoa), 562 .ch_cfg = cnss_mhi_channels_genoa, 563 .num_events = ARRAY_SIZE(cnss_mhi_events) - 564 CNSS_MHI_SATELLITE_EVT_COUNT, 565 .event_cfg = cnss_mhi_events, 566 .m2_no_db = true, 567 #if (LINUX_VERSION_CODE < KERNEL_VERSION(6, 2, 0)) 568 .bhie_offset = 0x0324, 569 #endif 570 }; 571 572 static const struct mhi_controller_config cnss_mhi_config_no_satellite = { 573 .max_channels = 32, 574 .timeout_ms = 10000, 575 .use_bounce_buf = false, 576 .buf_len = 0x8000, 577 .num_channels = ARRAY_SIZE(cnss_mhi_channels) - 578 CNSS_MHI_SATELLITE_CH_CFG_COUNT, 579 .ch_cfg = cnss_mhi_channels, 580 .num_events = ARRAY_SIZE(cnss_mhi_events) - 581 CNSS_MHI_SATELLITE_EVT_COUNT, 582 .event_cfg = cnss_mhi_events, 583 .m2_no_db = true, 584 }; 585 586 static struct cnss_pci_reg ce_src[] = { 587 { "SRC_RING_BASE_LSB", CE_SRC_RING_BASE_LSB_OFFSET }, 588 { "SRC_RING_BASE_MSB", CE_SRC_RING_BASE_MSB_OFFSET }, 589 { "SRC_RING_ID", CE_SRC_RING_ID_OFFSET }, 590 { "SRC_RING_MISC", CE_SRC_RING_MISC_OFFSET }, 591 { "SRC_CTRL", CE_SRC_CTRL_OFFSET }, 592 { "SRC_R0_CE_CH_SRC_IS", CE_SRC_R0_CE_CH_SRC_IS_OFFSET }, 593 { "SRC_RING_HP", CE_SRC_RING_HP_OFFSET }, 594 { "SRC_RING_TP", CE_SRC_RING_TP_OFFSET }, 595 { NULL }, 596 }; 597 598 static struct cnss_pci_reg ce_dst[] = { 599 { "DEST_RING_BASE_LSB", CE_DEST_RING_BASE_LSB_OFFSET }, 600 { "DEST_RING_BASE_MSB", CE_DEST_RING_BASE_MSB_OFFSET }, 601 { "DEST_RING_ID", CE_DEST_RING_ID_OFFSET }, 602 { "DEST_RING_MISC", CE_DEST_RING_MISC_OFFSET }, 603 { "DEST_CTRL", CE_DEST_CTRL_OFFSET }, 604 { "CE_CH_DST_IS", CE_CH_DST_IS_OFFSET }, 605 { "CE_CH_DEST_CTRL2", CE_CH_DEST_CTRL2_OFFSET }, 606 { "DEST_RING_HP", CE_DEST_RING_HP_OFFSET }, 607 { "DEST_RING_TP", CE_DEST_RING_TP_OFFSET }, 608 { "STATUS_RING_BASE_LSB", CE_STATUS_RING_BASE_LSB_OFFSET }, 609 { "STATUS_RING_BASE_MSB", CE_STATUS_RING_BASE_MSB_OFFSET }, 610 { "STATUS_RING_ID", CE_STATUS_RING_ID_OFFSET }, 611 { "STATUS_RING_MISC", CE_STATUS_RING_MISC_OFFSET }, 612 { "STATUS_RING_HP", CE_STATUS_RING_HP_OFFSET }, 613 { "STATUS_RING_TP", CE_STATUS_RING_TP_OFFSET }, 614 { NULL }, 615 }; 616 617 static struct cnss_pci_reg ce_cmn[] = { 618 { "GXI_ERR_INTS", CE_COMMON_GXI_ERR_INTS }, 619 { "GXI_ERR_STATS", CE_COMMON_GXI_ERR_STATS }, 620 { "GXI_WDOG_STATUS", CE_COMMON_GXI_WDOG_STATUS }, 621 { "TARGET_IE_0", CE_COMMON_TARGET_IE_0 }, 622 { "TARGET_IE_1", CE_COMMON_TARGET_IE_1 }, 623 { NULL }, 624 }; 625 626 static struct cnss_pci_reg qdss_csr[] = { 627 { "QDSSCSR_ETRIRQCTRL", QDSS_APB_DEC_CSR_ETRIRQCTRL_OFFSET }, 628 { "QDSSCSR_PRESERVEETF", QDSS_APB_DEC_CSR_PRESERVEETF_OFFSET }, 629 { "QDSSCSR_PRESERVEETR0", QDSS_APB_DEC_CSR_PRESERVEETR0_OFFSET }, 630 { "QDSSCSR_PRESERVEETR1", QDSS_APB_DEC_CSR_PRESERVEETR1_OFFSET }, 631 { NULL }, 632 }; 633 634 static struct cnss_pci_reg pci_scratch[] = { 635 { "PCIE_SCRATCH_0", PCIE_SCRATCH_0_SOC_PCIE_REG }, 636 { "PCIE_SCRATCH_1", PCIE_SCRATCH_1_SOC_PCIE_REG }, 637 { "PCIE_SCRATCH_2", PCIE_SCRATCH_2_SOC_PCIE_REG }, 638 { NULL }, 639 }; 640 641 static struct cnss_pci_reg pci_bhi_debug[] = { 642 { "PCIE_BHIE_DEBUG_0", PCIE_PCIE_BHIE_DEBUG_0 }, 643 { "PCIE_BHIE_DEBUG_1", PCIE_PCIE_BHIE_DEBUG_1 }, 644 { "PCIE_BHIE_DEBUG_2", PCIE_PCIE_BHIE_DEBUG_2 }, 645 { "PCIE_BHIE_DEBUG_3", PCIE_PCIE_BHIE_DEBUG_3 }, 646 { "PCIE_BHIE_DEBUG_4", PCIE_PCIE_BHIE_DEBUG_4 }, 647 { "PCIE_BHIE_DEBUG_5", PCIE_PCIE_BHIE_DEBUG_5 }, 648 { "PCIE_BHIE_DEBUG_6", PCIE_PCIE_BHIE_DEBUG_6 }, 649 { "PCIE_BHIE_DEBUG_7", PCIE_PCIE_BHIE_DEBUG_7 }, 650 { "PCIE_BHIE_DEBUG_8", PCIE_PCIE_BHIE_DEBUG_8 }, 651 { "PCIE_BHIE_DEBUG_9", PCIE_PCIE_BHIE_DEBUG_9 }, 652 { "PCIE_BHIE_DEBUG_10", PCIE_PCIE_BHIE_DEBUG_10 }, 653 { NULL }, 654 }; 655 656 /* First field of the structure is the device bit mask. Use 657 * enum cnss_pci_reg_mask as reference for the value. 658 */ 659 static struct cnss_misc_reg wcss_reg_access_seq[] = { 660 {1, 0, QCA6390_GCC_DEBUG_CLK_CTL, 0}, 661 {1, 1, QCA6390_GCC_DEBUG_CLK_CTL, 0x802}, 662 {1, 0, QCA6390_GCC_DEBUG_CLK_CTL, 0}, 663 {1, 0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_PLL_MODE, 0}, 664 {1, 1, QCA6390_GCC_DEBUG_CLK_CTL, 0x805}, 665 {1, 0, QCA6390_GCC_DEBUG_CLK_CTL, 0}, 666 {1, 0, QCA6390_WCSS_WFSS_PMM_WFSS_PMM_R0_PMM_CTRL, 0}, 667 {1, 0, QCA6390_WCSS_PMM_TOP_PMU_CX_CSR, 0}, 668 {1, 0, QCA6390_WCSS_PMM_TOP_AON_INT_RAW_STAT, 0}, 669 {1, 0, QCA6390_WCSS_PMM_TOP_AON_INT_EN, 0}, 670 {1, 0, QCA6390_WCSS_PMM_TOP_PMU_TESTBUS_STS, 0}, 671 {1, 1, QCA6390_WCSS_PMM_TOP_PMU_TESTBUS_CTL, 0xD}, 672 {1, 0, QCA6390_WCSS_PMM_TOP_TESTBUS_STS, 0}, 673 {1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_SAW2_CFG, 0}, 674 {1, 1, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_SAW2_CFG, 0}, 675 {1, 1, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_CTL, 0x8}, 676 {1, 0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_VALUE, 0}, 677 {1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_SAW2_SPM_STS, 0}, 678 {1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_SAW2_SPM_CTL, 0}, 679 {1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_SAW2_SPM_SLP_SEQ_ENTRY_0, 0}, 680 {1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_SAW2_SPM_SLP_SEQ_ENTRY_9, 0}, 681 {1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_STATUS0, 0}, 682 {1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_STATUS1, 0}, 683 {1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_STATUS2, 0}, 684 {1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_STATUS3, 0}, 685 {1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_STATUS4, 0}, 686 {1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_STATUS5, 0}, 687 {1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_STATUS6, 0}, 688 {1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_ENABLE0, 0}, 689 {1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_ENABLE1, 0}, 690 {1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_ENABLE2, 0}, 691 {1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_ENABLE3, 0}, 692 {1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_ENABLE4, 0}, 693 {1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_ENABLE5, 0}, 694 {1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_ENABLE6, 0}, 695 {1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_PENDING0, 0}, 696 {1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_PENDING1, 0}, 697 {1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_PENDING2, 0}, 698 {1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_PENDING3, 0}, 699 {1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_PENDING4, 0}, 700 {1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_PENDING5, 0}, 701 {1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_PENDING6, 0}, 702 {1, 1, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_CTL, 0x30040}, 703 {1, 0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_CTL, 0}, 704 {1, 0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_VALUE, 0}, 705 {1, 0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_VALUE, 0}, 706 {1, 0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_VALUE, 0}, 707 {1, 1, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_CTL, 0x30105}, 708 {1, 0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_CTL, 0}, 709 {1, 0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_VALUE, 0}, 710 {1, 0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_VALUE, 0}, 711 {1, 0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_VALUE, 0}, 712 {1, 0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_VALUE, 0}, 713 {1, 0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_VALUE, 0}, 714 {1, 0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_VALUE, 0}, 715 {1, 0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_VALUE, 0}, 716 {1, 0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_CTL, 0}, 717 {1, 0, QCA6390_WCSS_CC_WCSS_UMAC_NOC_CBCR, 0}, 718 {1, 0, QCA6390_WCSS_CC_WCSS_UMAC_AHB_CBCR, 0}, 719 {1, 0, QCA6390_WCSS_CC_WCSS_UMAC_GDSCR, 0}, 720 {1, 0, QCA6390_WCSS_CC_WCSS_WLAN1_GDSCR, 0}, 721 {1, 0, QCA6390_WCSS_CC_WCSS_WLAN2_GDSCR, 0}, 722 {1, 0, QCA6390_WCSS_PMM_TOP_PMM_INT_CLR, 0}, 723 {1, 0, QCA6390_WCSS_PMM_TOP_AON_INT_STICKY_EN, 0}, 724 }; 725 726 static struct cnss_misc_reg pcie_reg_access_seq[] = { 727 {1, 0, QCA6390_PCIE_PCIE_WCSS_STATUS_FOR_DEBUG_LOW_PCIE_LOCAL_REG, 0}, 728 {1, 0, QCA6390_PCIE_SOC_PCIE_WRAP_INTR_MASK_SOC_PCIE_REG, 0}, 729 {1, 1, QCA6390_PCIE_SOC_PCIE_WRAP_INTR_MASK_SOC_PCIE_REG, 0x18}, 730 {1, 0, QCA6390_PCIE_SOC_PCIE_WRAP_INTR_MASK_SOC_PCIE_REG, 0}, 731 {1, 0, QCA6390_PCIE_SOC_PCIE_WRAP_INTR_MASK_SOC_PCIE_REG, 0}, 732 {1, 0, QCA6390_PCIE_SOC_PCIE_WRAP_INTR_STATUS_SOC_PCIE_REG, 0}, 733 {1, 0, QCA6390_PCIE_SOC_COMMIT_REPLAY_SOC_PCIE_REG, 0}, 734 {1, 0, QCA6390_TLMM_GPIO_IN_OUT57, 0}, 735 {1, 0, QCA6390_TLMM_GPIO_INTR_CFG57, 0}, 736 {1, 0, QCA6390_TLMM_GPIO_INTR_STATUS57, 0}, 737 {1, 0, QCA6390_TLMM_GPIO_IN_OUT59, 0}, 738 {1, 0, QCA6390_TLMM_GPIO_INTR_CFG59, 0}, 739 {1, 0, QCA6390_TLMM_GPIO_INTR_STATUS59, 0}, 740 {1, 0, QCA6390_PCIE_PCIE_PARF_LTSSM, 0}, 741 {1, 0, QCA6390_PCIE_PCIE_PARF_PM_STTS, 0}, 742 {1, 0, QCA6390_PCIE_PCIE_PARF_PM_STTS_1, 0}, 743 {1, 0, QCA6390_PCIE_PCIE_PARF_INT_STATUS, 0}, 744 {1, 0, QCA6390_PCIE_PCIE_INT_ALL_STATUS, 0}, 745 {1, 0, QCA6390_PCIE_PCIE_INT_ALL_MASK, 0}, 746 {1, 0, QCA6390_PCIE_PCIE_PARF_BDF_TO_SID_CFG, 0}, 747 {1, 0, QCA6390_PCIE_PCIE_PARF_L1SS_SLEEP_NO_MHI_ACCESS_HANDLER_RD_4, 0}, 748 {1, 0, QCA6390_PCIE_PCIE_PARF_L1SS_SLEEP_NO_MHI_ACCESS_HANDLER_RD_3, 0}, 749 {1, 0, QCA6390_PCIE_PCIE_PARF_MHI_CLOCK_RESET_CTRL, 0}, 750 {1, 0, QCA6390_PCIE_PCIE_PARF_MHI_BASE_ADDR_LOWER, 0}, 751 {1, 0, QCA6390_PCIE_PCIE_PARF_L1SS_SLEEP_MODE_HANDLER_STATUS, 0}, 752 {1, 0, QCA6390_PCIE_PCIE_PARF_L1SS_SLEEP_MODE_HANDLER_CFG, 0}, 753 {1, 0, QCA6390_PCIE_PCIE_PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L2, 0}, 754 {1, 0, QCA6390_PCIE_PCIE_PARF_DEBUG_CNT_PM_LINKST_IN_L1SUB, 0}, 755 {1, 0, QCA6390_PCIE_PCIE_CORE_CONFIG, 0}, 756 {1, 0, QCA6390_PCIE_PCIE_PARF_L1SS_SLEEP_NO_MHI_ACCESS_HANDLER_RD_4, 0}, 757 {1, 0, QCA6390_PCIE_PCIE_PARF_DEBUG_CNT_PM_LINKST_IN_L2, 0}, 758 {1, 0, QCA6390_PCIE_PCIE_PARF_DEBUG_CNT_PM_LINKST_IN_L1, 0}, 759 {1, 0, QCA6390_PCIE_PCIE_PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L1, 0}, 760 {1, 0, QCA6390_PCIE_PCIE_PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L2, 0}, 761 {1, 0, QCA6390_PCIE_PCIE_LOCAL_REG_WCSSAON_PCIE_SR_STATUS_HIGH, 0}, 762 {1, 0, QCA6390_PCIE_PCIE_LOCAL_REG_WCSSAON_PCIE_SR_STATUS_LOW, 0}, 763 {1, 0, QCA6390_PCIE_PCIE_LOCAL_REG_WCSS_STATUS_FOR_DEBUG_HIGH, 0}, 764 {1, 0, QCA6390_PCIE_PCIE_LOCAL_REG_WCSS_STATUS_FOR_DEBUG_LOW, 0}, 765 {1, 0, QCA6390_WFSS_PMM_WFSS_PMM_R0_WLAN1_STATUS_REG2, 0}, 766 {1, 0, QCA6390_WFSS_PMM_WFSS_PMM_R0_WLAN2_STATUS_REG2, 0}, 767 {1, 0, QCA6390_WFSS_PMM_WFSS_PMM_R0_PMM_WLAN2_CFG_REG1, 0}, 768 {1, 0, QCA6390_WFSS_PMM_WFSS_PMM_R0_PMM_WLAN1_CFG_REG1, 0}, 769 {1, 0, QCA6390_WFSS_PMM_WFSS_PMM_R0_WLAN2_APS_STATUS_REG1, 0}, 770 {1, 0, QCA6390_WFSS_PMM_WFSS_PMM_R0_WLAN1_APS_STATUS_REG1, 0}, 771 {1, 0, QCA6390_PCIE_PCIE_BHI_EXECENV_REG, 0}, 772 }; 773 774 static struct cnss_misc_reg wlaon_reg_access_seq[] = { 775 {3, 0, WLAON_SOC_POWER_CTRL, 0}, 776 {3, 0, WLAON_SOC_PWR_WDG_BARK_THRSHD, 0}, 777 {3, 0, WLAON_SOC_PWR_WDG_BITE_THRSHD, 0}, 778 {3, 0, WLAON_SW_COLD_RESET, 0}, 779 {3, 0, WLAON_RFA_MEM_SLP_NRET_N_OVERRIDE, 0}, 780 {3, 0, WLAON_GDSC_DELAY_SETTING, 0}, 781 {3, 0, WLAON_GDSC_DELAY_SETTING2, 0}, 782 {3, 0, WLAON_WL_PWR_STATUS_REG, 0}, 783 {3, 0, WLAON_WL_AON_DBG_CFG_REG, 0}, 784 {2, 0, WLAON_WL_AON_DBG_ENABLE_GRP0_REG, 0}, 785 {2, 0, WLAON_WL_AON_DBG_ENABLE_GRP1_REG, 0}, 786 {2, 0, WLAON_WL_AON_APM_CFG_CTRL0, 0}, 787 {2, 0, WLAON_WL_AON_APM_CFG_CTRL1, 0}, 788 {2, 0, WLAON_WL_AON_APM_CFG_CTRL2, 0}, 789 {2, 0, WLAON_WL_AON_APM_CFG_CTRL3, 0}, 790 {2, 0, WLAON_WL_AON_APM_CFG_CTRL4, 0}, 791 {2, 0, WLAON_WL_AON_APM_CFG_CTRL5, 0}, 792 {2, 0, WLAON_WL_AON_APM_CFG_CTRL5_1, 0}, 793 {2, 0, WLAON_WL_AON_APM_CFG_CTRL6, 0}, 794 {2, 0, WLAON_WL_AON_APM_CFG_CTRL6_1, 0}, 795 {2, 0, WLAON_WL_AON_APM_CFG_CTRL7, 0}, 796 {2, 0, WLAON_WL_AON_APM_CFG_CTRL8, 0}, 797 {2, 0, WLAON_WL_AON_APM_CFG_CTRL8_1, 0}, 798 {2, 0, WLAON_WL_AON_APM_CFG_CTRL9, 0}, 799 {2, 0, WLAON_WL_AON_APM_CFG_CTRL9_1, 0}, 800 {2, 0, WLAON_WL_AON_APM_CFG_CTRL10, 0}, 801 {2, 0, WLAON_WL_AON_APM_CFG_CTRL11, 0}, 802 {2, 0, WLAON_WL_AON_APM_CFG_CTRL12, 0}, 803 {2, 0, WLAON_WL_AON_APM_OVERRIDE_REG, 0}, 804 {2, 0, WLAON_WL_AON_CXPC_REG, 0}, 805 {2, 0, WLAON_WL_AON_APM_STATUS0, 0}, 806 {2, 0, WLAON_WL_AON_APM_STATUS1, 0}, 807 {2, 0, WLAON_WL_AON_APM_STATUS2, 0}, 808 {2, 0, WLAON_WL_AON_APM_STATUS3, 0}, 809 {2, 0, WLAON_WL_AON_APM_STATUS4, 0}, 810 {2, 0, WLAON_WL_AON_APM_STATUS5, 0}, 811 {2, 0, WLAON_WL_AON_APM_STATUS6, 0}, 812 {3, 0, WLAON_GLOBAL_COUNTER_CTRL1, 0}, 813 {3, 0, WLAON_GLOBAL_COUNTER_CTRL6, 0}, 814 {3, 0, WLAON_GLOBAL_COUNTER_CTRL7, 0}, 815 {3, 0, WLAON_GLOBAL_COUNTER_CTRL3, 0}, 816 {3, 0, WLAON_GLOBAL_COUNTER_CTRL4, 0}, 817 {3, 0, WLAON_GLOBAL_COUNTER_CTRL5, 0}, 818 {3, 0, WLAON_GLOBAL_COUNTER_CTRL8, 0}, 819 {3, 0, WLAON_GLOBAL_COUNTER_CTRL2, 0}, 820 {3, 0, WLAON_GLOBAL_COUNTER_CTRL9, 0}, 821 {3, 0, WLAON_RTC_CLK_CAL_CTRL1, 0}, 822 {3, 0, WLAON_RTC_CLK_CAL_CTRL2, 0}, 823 {3, 0, WLAON_RTC_CLK_CAL_CTRL3, 0}, 824 {3, 0, WLAON_RTC_CLK_CAL_CTRL4, 0}, 825 {3, 0, WLAON_RTC_CLK_CAL_CTRL5, 0}, 826 {3, 0, WLAON_RTC_CLK_CAL_CTRL6, 0}, 827 {3, 0, WLAON_RTC_CLK_CAL_CTRL7, 0}, 828 {3, 0, WLAON_RTC_CLK_CAL_CTRL8, 0}, 829 {3, 0, WLAON_RTC_CLK_CAL_CTRL9, 0}, 830 {3, 0, WLAON_WCSSAON_CONFIG_REG, 0}, 831 {3, 0, WLAON_WLAN_OEM_DEBUG_REG, 0}, 832 {3, 0, WLAON_WLAN_RAM_DUMP_REG, 0}, 833 {3, 0, WLAON_QDSS_WCSS_REG, 0}, 834 {3, 0, WLAON_QDSS_WCSS_ACK, 0}, 835 {3, 0, WLAON_WL_CLK_CNTL_KDF_REG, 0}, 836 {3, 0, WLAON_WL_CLK_CNTL_PMU_HFRC_REG, 0}, 837 {3, 0, WLAON_QFPROM_PWR_CTRL_REG, 0}, 838 {3, 0, WLAON_DLY_CONFIG, 0}, 839 {3, 0, WLAON_WLAON_Q6_IRQ_REG, 0}, 840 {3, 0, WLAON_PCIE_INTF_SW_CFG_REG, 0}, 841 {3, 0, WLAON_PCIE_INTF_STICKY_SW_CFG_REG, 0}, 842 {3, 0, WLAON_PCIE_INTF_PHY_SW_CFG_REG, 0}, 843 {3, 0, WLAON_PCIE_INTF_PHY_NOCSR_SW_CFG_REG, 0}, 844 {3, 0, WLAON_Q6_COOKIE_BIT, 0}, 845 {3, 0, WLAON_WARM_SW_ENTRY, 0}, 846 {3, 0, WLAON_RESET_DBG_SW_ENTRY, 0}, 847 {3, 0, WLAON_WL_PMUNOC_CFG_REG, 0}, 848 {3, 0, WLAON_RESET_CAUSE_CFG_REG, 0}, 849 {3, 0, WLAON_SOC_WCSSAON_WAKEUP_IRQ_7_EN_REG, 0}, 850 {3, 0, WLAON_DEBUG, 0}, 851 {3, 0, WLAON_SOC_PARAMETERS, 0}, 852 {3, 0, WLAON_WLPM_SIGNAL, 0}, 853 {3, 0, WLAON_SOC_RESET_CAUSE_REG, 0}, 854 {3, 0, WLAON_WAKEUP_PCIE_SOC_REG, 0}, 855 {3, 0, WLAON_PBL_STACK_CANARY, 0}, 856 {3, 0, WLAON_MEM_TOT_NUM_GRP_REG, 0}, 857 {3, 0, WLAON_MEM_TOT_BANKS_IN_GRP0_REG, 0}, 858 {3, 0, WLAON_MEM_TOT_BANKS_IN_GRP1_REG, 0}, 859 {3, 0, WLAON_MEM_TOT_BANKS_IN_GRP2_REG, 0}, 860 {3, 0, WLAON_MEM_TOT_BANKS_IN_GRP3_REG, 0}, 861 {3, 0, WLAON_MEM_TOT_SIZE_IN_GRP0_REG, 0}, 862 {3, 0, WLAON_MEM_TOT_SIZE_IN_GRP1_REG, 0}, 863 {3, 0, WLAON_MEM_TOT_SIZE_IN_GRP2_REG, 0}, 864 {3, 0, WLAON_MEM_TOT_SIZE_IN_GRP3_REG, 0}, 865 {3, 0, WLAON_MEM_SLP_NRET_OVERRIDE_GRP0_REG, 0}, 866 {3, 0, WLAON_MEM_SLP_NRET_OVERRIDE_GRP1_REG, 0}, 867 {3, 0, WLAON_MEM_SLP_NRET_OVERRIDE_GRP2_REG, 0}, 868 {3, 0, WLAON_MEM_SLP_NRET_OVERRIDE_GRP3_REG, 0}, 869 {3, 0, WLAON_MEM_SLP_RET_OVERRIDE_GRP0_REG, 0}, 870 {3, 0, WLAON_MEM_SLP_RET_OVERRIDE_GRP1_REG, 0}, 871 {3, 0, WLAON_MEM_SLP_RET_OVERRIDE_GRP2_REG, 0}, 872 {3, 0, WLAON_MEM_SLP_RET_OVERRIDE_GRP3_REG, 0}, 873 {3, 0, WLAON_MEM_CNT_SEL_REG, 0}, 874 {3, 0, WLAON_MEM_NO_EXTBHS_REG, 0}, 875 {3, 0, WLAON_MEM_DEBUG_REG, 0}, 876 {3, 0, WLAON_MEM_DEBUG_BUS_REG, 0}, 877 {3, 0, WLAON_MEM_REDUN_CFG_REG, 0}, 878 {3, 0, WLAON_WL_AON_SPARE2, 0}, 879 {3, 0, WLAON_VSEL_CFG_FOR_WL_RET_DISABLE_REG, 0}, 880 {3, 0, WLAON_BTFM_WLAN_IPC_STATUS_REG, 0}, 881 {3, 0, WLAON_MPM_COUNTER_CHICKEN_BITS, 0}, 882 {3, 0, WLAON_WLPM_CHICKEN_BITS, 0}, 883 {3, 0, WLAON_PCIE_PHY_PWR_REG, 0}, 884 {3, 0, WLAON_WL_CLK_CNTL_PMU_LPO2M_REG, 0}, 885 {3, 0, WLAON_WL_SS_ROOT_CLK_SWITCH_REG, 0}, 886 {3, 0, WLAON_POWERCTRL_PMU_REG, 0}, 887 {3, 0, WLAON_POWERCTRL_MEM_REG, 0}, 888 {3, 0, WLAON_PCIE_PWR_CTRL_REG, 0}, 889 {3, 0, WLAON_SOC_PWR_PROFILE_REG, 0}, 890 {3, 0, WLAON_WCSSAON_PCIE_SR_STATUS_HI_REG, 0}, 891 {3, 0, WLAON_WCSSAON_PCIE_SR_STATUS_LO_REG, 0}, 892 {3, 0, WLAON_WCSS_TCSR_PMM_SR_STATUS_HI_REG, 0}, 893 {3, 0, WLAON_WCSS_TCSR_PMM_SR_STATUS_LO_REG, 0}, 894 {3, 0, WLAON_MEM_SVS_CFG_REG, 0}, 895 {3, 0, WLAON_CMN_AON_MISC_REG, 0}, 896 {3, 0, WLAON_INTR_STATUS, 0}, 897 {2, 0, WLAON_INTR_ENABLE, 0}, 898 {2, 0, WLAON_NOC_DBG_BUS_SEL_REG, 0}, 899 {2, 0, WLAON_NOC_DBG_BUS_REG, 0}, 900 {2, 0, WLAON_WL_CTRL_MISC_REG, 0}, 901 {2, 0, WLAON_DBG_STATUS0, 0}, 902 {2, 0, WLAON_DBG_STATUS1, 0}, 903 {2, 0, WLAON_TIMERSYNC_OFFSET_L, 0}, 904 {2, 0, WLAON_TIMERSYNC_OFFSET_H, 0}, 905 {2, 0, WLAON_PMU_LDO_SETTLE_REG, 0}, 906 }; 907 908 static struct cnss_misc_reg syspm_reg_access_seq[] = { 909 {1, 0, QCA6390_SYSPM_SYSPM_PWR_STATUS, 0}, 910 {1, 0, QCA6390_SYSPM_DBG_BTFM_AON_REG, 0}, 911 {1, 0, QCA6390_SYSPM_DBG_BUS_SEL_REG, 0}, 912 {1, 0, QCA6390_SYSPM_WCSSAON_SR_STATUS, 0}, 913 {1, 0, QCA6390_SYSPM_WCSSAON_SR_STATUS, 0}, 914 {1, 0, QCA6390_SYSPM_WCSSAON_SR_STATUS, 0}, 915 {1, 0, QCA6390_SYSPM_WCSSAON_SR_STATUS, 0}, 916 {1, 0, QCA6390_SYSPM_WCSSAON_SR_STATUS, 0}, 917 {1, 0, QCA6390_SYSPM_WCSSAON_SR_STATUS, 0}, 918 {1, 0, QCA6390_SYSPM_WCSSAON_SR_STATUS, 0}, 919 {1, 0, QCA6390_SYSPM_WCSSAON_SR_STATUS, 0}, 920 {1, 0, QCA6390_SYSPM_WCSSAON_SR_STATUS, 0}, 921 {1, 0, QCA6390_SYSPM_WCSSAON_SR_STATUS, 0}, 922 }; 923 924 static struct cnss_print_optimize print_optimize; 925 926 #define WCSS_REG_SIZE ARRAY_SIZE(wcss_reg_access_seq) 927 #define PCIE_REG_SIZE ARRAY_SIZE(pcie_reg_access_seq) 928 #define WLAON_REG_SIZE ARRAY_SIZE(wlaon_reg_access_seq) 929 #define SYSPM_REG_SIZE ARRAY_SIZE(syspm_reg_access_seq) 930 931 static int cnss_pci_update_fw_name(struct cnss_pci_data *pci_priv); 932 static void cnss_pci_suspend_pwroff(struct pci_dev *pci_dev); 933 static bool cnss_should_suspend_pwroff(struct pci_dev *pci_dev); 934 static void cnss_pci_update_link_event(struct cnss_pci_data *pci_priv, 935 enum cnss_bus_event_type type, 936 void *data); 937 938 #if IS_ENABLED(CONFIG_MHI_BUS_MISC) 939 static void cnss_mhi_debug_reg_dump(struct cnss_pci_data *pci_priv) 940 { 941 mhi_debug_reg_dump(pci_priv->mhi_ctrl); 942 } 943 944 static void cnss_mhi_dump_sfr(struct cnss_pci_data *pci_priv) 945 { 946 mhi_dump_sfr(pci_priv->mhi_ctrl); 947 } 948 949 static bool cnss_mhi_scan_rddm_cookie(struct cnss_pci_data *pci_priv, 950 u32 cookie) 951 { 952 return mhi_scan_rddm_cookie(pci_priv->mhi_ctrl, cookie); 953 } 954 955 static int cnss_mhi_pm_fast_suspend(struct cnss_pci_data *pci_priv, 956 bool notify_clients) 957 { 958 return mhi_pm_fast_suspend(pci_priv->mhi_ctrl, notify_clients); 959 } 960 961 static int cnss_mhi_pm_fast_resume(struct cnss_pci_data *pci_priv, 962 bool notify_clients) 963 { 964 return mhi_pm_fast_resume(pci_priv->mhi_ctrl, notify_clients); 965 } 966 967 static void cnss_mhi_set_m2_timeout_ms(struct cnss_pci_data *pci_priv, 968 u32 timeout) 969 { 970 return mhi_set_m2_timeout_ms(pci_priv->mhi_ctrl, timeout); 971 } 972 973 static int cnss_mhi_device_get_sync_atomic(struct cnss_pci_data *pci_priv, 974 int timeout_us, bool in_panic) 975 { 976 return mhi_device_get_sync_atomic(pci_priv->mhi_ctrl->mhi_dev, 977 timeout_us, in_panic); 978 } 979 980 #ifdef CONFIG_CNSS2_SMMU_DB_SUPPORT 981 static int cnss_mhi_host_notify_db_disable_trace(struct cnss_pci_data *pci_priv) 982 { 983 return mhi_host_notify_db_disable_trace(pci_priv->mhi_ctrl); 984 } 985 #endif 986 987 static void 988 cnss_mhi_controller_set_bw_scale_cb(struct cnss_pci_data *pci_priv, 989 int (*cb)(struct mhi_controller *mhi_ctrl, 990 struct mhi_link_info *link_info)) 991 { 992 mhi_controller_set_bw_scale_cb(pci_priv->mhi_ctrl, cb); 993 } 994 995 static int cnss_mhi_force_reset(struct cnss_pci_data *pci_priv) 996 { 997 return mhi_force_reset(pci_priv->mhi_ctrl); 998 } 999 1000 void cnss_mhi_controller_set_base(struct cnss_pci_data *pci_priv, 1001 phys_addr_t base) 1002 { 1003 return mhi_controller_set_base(pci_priv->mhi_ctrl, base); 1004 } 1005 #else 1006 static void cnss_mhi_debug_reg_dump(struct cnss_pci_data *pci_priv) 1007 { 1008 } 1009 1010 static void cnss_mhi_dump_sfr(struct cnss_pci_data *pci_priv) 1011 { 1012 } 1013 1014 static bool cnss_mhi_scan_rddm_cookie(struct cnss_pci_data *pci_priv, 1015 u32 cookie) 1016 { 1017 return false; 1018 } 1019 1020 static int cnss_mhi_pm_fast_suspend(struct cnss_pci_data *pci_priv, 1021 bool notify_clients) 1022 { 1023 return -EOPNOTSUPP; 1024 } 1025 1026 static int cnss_mhi_pm_fast_resume(struct cnss_pci_data *pci_priv, 1027 bool notify_clients) 1028 { 1029 return -EOPNOTSUPP; 1030 } 1031 1032 static void cnss_mhi_set_m2_timeout_ms(struct cnss_pci_data *pci_priv, 1033 u32 timeout) 1034 { 1035 } 1036 1037 static int cnss_mhi_device_get_sync_atomic(struct cnss_pci_data *pci_priv, 1038 int timeout_us, bool in_panic) 1039 { 1040 return -EOPNOTSUPP; 1041 } 1042 1043 #ifdef CONFIG_CNSS2_SMMU_DB_SUPPORT 1044 static int cnss_mhi_host_notify_db_disable_trace(struct cnss_pci_data *pci_priv) 1045 { 1046 return -EOPNOTSUPP; 1047 } 1048 #endif 1049 1050 static void 1051 cnss_mhi_controller_set_bw_scale_cb(struct cnss_pci_data *pci_priv, 1052 int (*cb)(struct mhi_controller *mhi_ctrl, 1053 struct mhi_link_info *link_info)) 1054 { 1055 } 1056 1057 static int cnss_mhi_force_reset(struct cnss_pci_data *pci_priv) 1058 { 1059 return -EOPNOTSUPP; 1060 } 1061 1062 void cnss_mhi_controller_set_base(struct cnss_pci_data *pci_priv, 1063 phys_addr_t base) 1064 { 1065 } 1066 #endif /* CONFIG_MHI_BUS_MISC */ 1067 1068 #ifdef CONFIG_CNSS2_SMMU_DB_SUPPORT 1069 #define CNSS_MHI_WAKE_TIMEOUT 500000 1070 1071 static void cnss_record_smmu_fault_timestamp(struct cnss_pci_data *pci_priv, 1072 enum cnss_smmu_fault_time id) 1073 { 1074 if (id >= SMMU_CB_MAX) 1075 return; 1076 1077 pci_priv->smmu_fault_timestamp[id] = sched_clock(); 1078 } 1079 1080 static void cnss_pci_smmu_fault_handler_irq(struct iommu_domain *domain, 1081 void *handler_token) 1082 { 1083 struct cnss_pci_data *pci_priv = handler_token; 1084 int ret = 0; 1085 1086 cnss_record_smmu_fault_timestamp(pci_priv, SMMU_CB_ENTRY); 1087 ret = cnss_mhi_device_get_sync_atomic(pci_priv, 1088 CNSS_MHI_WAKE_TIMEOUT, true); 1089 if (ret < 0) { 1090 cnss_pr_err("Failed to bring mhi in M0 state, ret %d\n", ret); 1091 return; 1092 } 1093 1094 cnss_record_smmu_fault_timestamp(pci_priv, SMMU_CB_DOORBELL_RING); 1095 ret = cnss_mhi_host_notify_db_disable_trace(pci_priv); 1096 if (ret < 0) 1097 cnss_pr_err("Fail to notify wlan fw to stop trace collection, ret %d\n", ret); 1098 1099 cnss_record_smmu_fault_timestamp(pci_priv, SMMU_CB_EXIT); 1100 } 1101 1102 void cnss_register_iommu_fault_handler_irq(struct cnss_pci_data *pci_priv) 1103 { 1104 qcom_iommu_set_fault_handler_irq(pci_priv->iommu_domain, 1105 cnss_pci_smmu_fault_handler_irq, pci_priv); 1106 } 1107 #else 1108 void cnss_register_iommu_fault_handler_irq(struct cnss_pci_data *pci_priv) 1109 { 1110 } 1111 #endif 1112 1113 int cnss_pci_check_link_status(struct cnss_pci_data *pci_priv) 1114 { 1115 u16 device_id; 1116 1117 if (pci_priv->pci_link_state == PCI_LINK_DOWN) { 1118 cnss_pr_dbg("%ps: PCIe link is in suspend state\n", 1119 (void *)_RET_IP_); 1120 return -EACCES; 1121 } 1122 1123 if (pci_priv->pci_link_down_ind) { 1124 cnss_pr_err("%ps: PCIe link is down\n", (void *)_RET_IP_); 1125 return -EIO; 1126 } 1127 1128 pci_read_config_word(pci_priv->pci_dev, PCI_DEVICE_ID, &device_id); 1129 if (device_id != pci_priv->device_id) { 1130 cnss_fatal_err("%ps: PCI device ID mismatch, link possibly down, current read ID: 0x%x, record ID: 0x%x\n", 1131 (void *)_RET_IP_, device_id, 1132 pci_priv->device_id); 1133 return -EIO; 1134 } 1135 1136 return 0; 1137 } 1138 1139 static void cnss_pci_select_window(struct cnss_pci_data *pci_priv, u32 offset) 1140 { 1141 struct cnss_plat_data *plat_priv = pci_priv->plat_priv; 1142 1143 u32 window = (offset >> WINDOW_SHIFT) & WINDOW_VALUE_MASK; 1144 u32 window_enable = WINDOW_ENABLE_BIT | window; 1145 u32 val; 1146 1147 if (plat_priv->device_id == QCN7605_DEVICE_ID) 1148 window_enable = QCN7605_WINDOW_ENABLE_BIT | window; 1149 1150 if (plat_priv->device_id == PEACH_DEVICE_ID) { 1151 writel_relaxed(window_enable, pci_priv->bar + 1152 PEACH_PCIE_REMAP_BAR_CTRL_OFFSET); 1153 } else { 1154 writel_relaxed(window_enable, pci_priv->bar + 1155 QCA6390_PCIE_REMAP_BAR_CTRL_OFFSET); 1156 } 1157 1158 if (window != pci_priv->remap_window) { 1159 pci_priv->remap_window = window; 1160 cnss_pr_dbg("Config PCIe remap window register to 0x%x\n", 1161 window_enable); 1162 } 1163 1164 /* Read it back to make sure the write has taken effect */ 1165 if (plat_priv->device_id == PEACH_DEVICE_ID) { 1166 val = readl_relaxed(pci_priv->bar + 1167 PEACH_PCIE_REMAP_BAR_CTRL_OFFSET); 1168 } else { 1169 val = readl_relaxed(pci_priv->bar + 1170 QCA6390_PCIE_REMAP_BAR_CTRL_OFFSET); 1171 } 1172 if (val != window_enable) { 1173 cnss_pr_err("Failed to config window register to 0x%x, current value: 0x%x\n", 1174 window_enable, val); 1175 if (!cnss_pci_check_link_status(pci_priv) && 1176 !test_bit(CNSS_IN_PANIC, &plat_priv->driver_state)) 1177 CNSS_ASSERT(0); 1178 } 1179 } 1180 1181 static int cnss_pci_reg_read(struct cnss_pci_data *pci_priv, 1182 u32 offset, u32 *val) 1183 { 1184 int ret; 1185 struct cnss_plat_data *plat_priv = pci_priv->plat_priv; 1186 1187 if (!in_interrupt() && !irqs_disabled()) { 1188 ret = cnss_pci_check_link_status(pci_priv); 1189 if (ret) 1190 return ret; 1191 } 1192 1193 if (pci_priv->pci_dev->device == QCA6174_DEVICE_ID || 1194 offset < MAX_UNWINDOWED_ADDRESS) { 1195 *val = readl_relaxed(pci_priv->bar + offset); 1196 return 0; 1197 } 1198 1199 /* If in panic, assumption is kernel panic handler will hold all threads 1200 * and interrupts. Further pci_reg_window_lock could be held before 1201 * panic. So only lock during normal operation. 1202 */ 1203 if (test_bit(CNSS_IN_PANIC, &plat_priv->driver_state)) { 1204 cnss_pci_select_window(pci_priv, offset); 1205 *val = readl_relaxed(pci_priv->bar + WINDOW_START + 1206 (offset & WINDOW_RANGE_MASK)); 1207 } else { 1208 spin_lock_bh(&pci_reg_window_lock); 1209 cnss_pci_select_window(pci_priv, offset); 1210 *val = readl_relaxed(pci_priv->bar + WINDOW_START + 1211 (offset & WINDOW_RANGE_MASK)); 1212 spin_unlock_bh(&pci_reg_window_lock); 1213 } 1214 1215 return 0; 1216 } 1217 1218 static int cnss_pci_reg_write(struct cnss_pci_data *pci_priv, u32 offset, 1219 u32 val) 1220 { 1221 int ret; 1222 struct cnss_plat_data *plat_priv = pci_priv->plat_priv; 1223 1224 if (!in_interrupt() && !irqs_disabled()) { 1225 ret = cnss_pci_check_link_status(pci_priv); 1226 if (ret) 1227 return ret; 1228 } 1229 1230 if (pci_priv->pci_dev->device == QCA6174_DEVICE_ID || 1231 offset < MAX_UNWINDOWED_ADDRESS) { 1232 writel_relaxed(val, pci_priv->bar + offset); 1233 return 0; 1234 } 1235 1236 /* Same constraint as PCI register read in panic */ 1237 if (test_bit(CNSS_IN_PANIC, &plat_priv->driver_state)) { 1238 cnss_pci_select_window(pci_priv, offset); 1239 writel_relaxed(val, pci_priv->bar + WINDOW_START + 1240 (offset & WINDOW_RANGE_MASK)); 1241 } else { 1242 spin_lock_bh(&pci_reg_window_lock); 1243 cnss_pci_select_window(pci_priv, offset); 1244 writel_relaxed(val, pci_priv->bar + WINDOW_START + 1245 (offset & WINDOW_RANGE_MASK)); 1246 spin_unlock_bh(&pci_reg_window_lock); 1247 } 1248 1249 return 0; 1250 } 1251 1252 static int cnss_pci_force_wake_get(struct cnss_pci_data *pci_priv) 1253 { 1254 struct device *dev = &pci_priv->pci_dev->dev; 1255 int ret; 1256 1257 ret = cnss_pci_force_wake_request_sync(dev, 1258 FORCE_WAKE_DELAY_TIMEOUT_US); 1259 if (ret) { 1260 if (ret != -EAGAIN) 1261 cnss_pr_err("Failed to request force wake\n"); 1262 return ret; 1263 } 1264 1265 /* If device's M1 state-change event races here, it can be ignored, 1266 * as the device is expected to immediately move from M2 to M0 1267 * without entering low power state. 1268 */ 1269 if (cnss_pci_is_device_awake(dev) != true) 1270 cnss_pr_warn("MHI not in M0, while reg still accessible\n"); 1271 1272 return 0; 1273 } 1274 1275 static int cnss_pci_force_wake_put(struct cnss_pci_data *pci_priv) 1276 { 1277 struct device *dev = &pci_priv->pci_dev->dev; 1278 int ret; 1279 1280 ret = cnss_pci_force_wake_release(dev); 1281 if (ret && ret != -EAGAIN) 1282 cnss_pr_err("Failed to release force wake\n"); 1283 1284 return ret; 1285 } 1286 1287 #if IS_ENABLED(CONFIG_INTERCONNECT) 1288 /** 1289 * cnss_setup_bus_bandwidth() - Setup interconnect vote for given bandwidth 1290 * @plat_priv: Platform private data struct 1291 * @bw: bandwidth 1292 * @save: toggle flag to save bandwidth to current_bw_vote 1293 * 1294 * Setup bandwidth votes for configured interconnect paths 1295 * 1296 * Return: 0 for success 1297 */ 1298 static int cnss_setup_bus_bandwidth(struct cnss_plat_data *plat_priv, 1299 u32 bw, bool save) 1300 { 1301 int ret = 0; 1302 struct cnss_bus_bw_info *bus_bw_info; 1303 1304 if (!plat_priv->icc.path_count) 1305 return -EOPNOTSUPP; 1306 1307 if (bw >= plat_priv->icc.bus_bw_cfg_count) { 1308 cnss_pr_err("Invalid bus bandwidth Type: %d", bw); 1309 return -EINVAL; 1310 } 1311 1312 cnss_pr_buf("Bandwidth vote to %d, save %d\n", bw, save); 1313 1314 list_for_each_entry(bus_bw_info, &plat_priv->icc.list_head, list) { 1315 ret = icc_set_bw(bus_bw_info->icc_path, 1316 bus_bw_info->cfg_table[bw].avg_bw, 1317 bus_bw_info->cfg_table[bw].peak_bw); 1318 if (ret) { 1319 cnss_pr_err("Could not set BW Cfg: %d, err = %d ICC Path: %s Val: %d %d\n", 1320 bw, ret, bus_bw_info->icc_name, 1321 bus_bw_info->cfg_table[bw].avg_bw, 1322 bus_bw_info->cfg_table[bw].peak_bw); 1323 break; 1324 } 1325 } 1326 if (ret == 0 && save) 1327 plat_priv->icc.current_bw_vote = bw; 1328 return ret; 1329 } 1330 1331 int cnss_request_bus_bandwidth(struct device *dev, int bandwidth) 1332 { 1333 struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev); 1334 1335 if (!plat_priv) 1336 return -ENODEV; 1337 1338 if (bandwidth < 0) 1339 return -EINVAL; 1340 1341 return cnss_setup_bus_bandwidth(plat_priv, (u32)bandwidth, true); 1342 } 1343 #else 1344 static int cnss_setup_bus_bandwidth(struct cnss_plat_data *plat_priv, 1345 u32 bw, bool save) 1346 { 1347 return 0; 1348 } 1349 1350 int cnss_request_bus_bandwidth(struct device *dev, int bandwidth) 1351 { 1352 return 0; 1353 } 1354 #endif 1355 EXPORT_SYMBOL(cnss_request_bus_bandwidth); 1356 1357 int cnss_pci_debug_reg_read(struct cnss_pci_data *pci_priv, u32 offset, 1358 u32 *val, bool raw_access) 1359 { 1360 int ret = 0; 1361 bool do_force_wake_put = true; 1362 1363 if (raw_access) { 1364 ret = cnss_pci_reg_read(pci_priv, offset, val); 1365 goto out; 1366 } 1367 1368 ret = cnss_pci_is_device_down(&pci_priv->pci_dev->dev); 1369 if (ret) 1370 goto out; 1371 1372 ret = cnss_pci_pm_runtime_get_sync(pci_priv, RTPM_ID_CNSS); 1373 if (ret < 0) 1374 goto runtime_pm_put; 1375 1376 ret = cnss_pci_force_wake_get(pci_priv); 1377 if (ret) 1378 do_force_wake_put = false; 1379 1380 ret = cnss_pci_reg_read(pci_priv, offset, val); 1381 if (ret) { 1382 cnss_pr_err("Failed to read register offset 0x%x, err = %d\n", 1383 offset, ret); 1384 goto force_wake_put; 1385 } 1386 1387 force_wake_put: 1388 if (do_force_wake_put) 1389 cnss_pci_force_wake_put(pci_priv); 1390 runtime_pm_put: 1391 cnss_pci_pm_runtime_mark_last_busy(pci_priv); 1392 cnss_pci_pm_runtime_put_autosuspend(pci_priv, RTPM_ID_CNSS); 1393 out: 1394 return ret; 1395 } 1396 1397 int cnss_pci_debug_reg_write(struct cnss_pci_data *pci_priv, u32 offset, 1398 u32 val, bool raw_access) 1399 { 1400 int ret = 0; 1401 bool do_force_wake_put = true; 1402 1403 if (raw_access) { 1404 ret = cnss_pci_reg_write(pci_priv, offset, val); 1405 goto out; 1406 } 1407 1408 ret = cnss_pci_is_device_down(&pci_priv->pci_dev->dev); 1409 if (ret) 1410 goto out; 1411 1412 ret = cnss_pci_pm_runtime_get_sync(pci_priv, RTPM_ID_CNSS); 1413 if (ret < 0) 1414 goto runtime_pm_put; 1415 1416 ret = cnss_pci_force_wake_get(pci_priv); 1417 if (ret) 1418 do_force_wake_put = false; 1419 1420 ret = cnss_pci_reg_write(pci_priv, offset, val); 1421 if (ret) { 1422 cnss_pr_err("Failed to write 0x%x to register offset 0x%x, err = %d\n", 1423 val, offset, ret); 1424 goto force_wake_put; 1425 } 1426 1427 force_wake_put: 1428 if (do_force_wake_put) 1429 cnss_pci_force_wake_put(pci_priv); 1430 runtime_pm_put: 1431 cnss_pci_pm_runtime_mark_last_busy(pci_priv); 1432 cnss_pci_pm_runtime_put_autosuspend(pci_priv, RTPM_ID_CNSS); 1433 out: 1434 return ret; 1435 } 1436 1437 static int cnss_set_pci_config_space(struct cnss_pci_data *pci_priv, bool save) 1438 { 1439 struct pci_dev *pci_dev = pci_priv->pci_dev; 1440 struct cnss_plat_data *plat_priv = pci_priv->plat_priv; 1441 bool link_down_or_recovery; 1442 1443 if (!plat_priv) 1444 return -ENODEV; 1445 1446 link_down_or_recovery = pci_priv->pci_link_down_ind || 1447 (test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state)); 1448 1449 if (save) { 1450 if (link_down_or_recovery) { 1451 pci_priv->saved_state = NULL; 1452 } else { 1453 pci_save_state(pci_dev); 1454 pci_priv->saved_state = pci_store_saved_state(pci_dev); 1455 } 1456 } else { 1457 if (link_down_or_recovery) { 1458 pci_load_saved_state(pci_dev, pci_priv->default_state); 1459 pci_restore_state(pci_dev); 1460 } else if (pci_priv->saved_state) { 1461 pci_load_and_free_saved_state(pci_dev, 1462 &pci_priv->saved_state); 1463 pci_restore_state(pci_dev); 1464 } 1465 } 1466 1467 return 0; 1468 } 1469 1470 static int cnss_update_supported_link_info(struct cnss_pci_data *pci_priv) 1471 { 1472 int ret = 0; 1473 struct pci_dev *root_port; 1474 struct device_node *root_of_node; 1475 struct cnss_plat_data *plat_priv; 1476 1477 if (!pci_priv) 1478 return -EINVAL; 1479 1480 if (pci_priv->device_id != KIWI_DEVICE_ID) 1481 return ret; 1482 1483 plat_priv = pci_priv->plat_priv; 1484 root_port = pcie_find_root_port(pci_priv->pci_dev); 1485 1486 if (!root_port) { 1487 cnss_pr_err("PCIe root port is null\n"); 1488 return -EINVAL; 1489 } 1490 1491 root_of_node = root_port->dev.of_node; 1492 if (root_of_node && root_of_node->parent) { 1493 ret = of_property_read_u32(root_of_node->parent, 1494 "qcom,target-link-speed", 1495 &plat_priv->supported_link_speed); 1496 if (!ret) 1497 cnss_pr_dbg("Supported PCIe Link Speed: %d\n", 1498 plat_priv->supported_link_speed); 1499 else 1500 plat_priv->supported_link_speed = 0; 1501 } 1502 1503 return ret; 1504 } 1505 1506 static int cnss_pci_get_link_status(struct cnss_pci_data *pci_priv) 1507 { 1508 u16 link_status; 1509 int ret; 1510 1511 ret = pcie_capability_read_word(pci_priv->pci_dev, PCI_EXP_LNKSTA, 1512 &link_status); 1513 if (ret) 1514 return ret; 1515 1516 cnss_pr_dbg("Get PCI link status register: %u\n", link_status); 1517 1518 pci_priv->def_link_speed = link_status & PCI_EXP_LNKSTA_CLS; 1519 pci_priv->def_link_width = 1520 (link_status & PCI_EXP_LNKSTA_NLW) >> PCI_EXP_LNKSTA_NLW_SHIFT; 1521 pci_priv->cur_link_speed = pci_priv->def_link_speed; 1522 1523 cnss_pr_dbg("Default PCI link speed is 0x%x, link width is 0x%x\n", 1524 pci_priv->def_link_speed, pci_priv->def_link_width); 1525 1526 return 0; 1527 } 1528 1529 static void cnss_pci_soc_scratch_reg_dump(struct cnss_pci_data *pci_priv) 1530 { 1531 u32 reg_offset, val; 1532 int i; 1533 1534 switch (pci_priv->device_id) { 1535 case QCA6390_DEVICE_ID: 1536 case QCA6490_DEVICE_ID: 1537 case KIWI_DEVICE_ID: 1538 case MANGO_DEVICE_ID: 1539 case PEACH_DEVICE_ID: 1540 break; 1541 default: 1542 return; 1543 } 1544 1545 if (in_interrupt() || irqs_disabled()) 1546 return; 1547 1548 if (cnss_pci_check_link_status(pci_priv)) 1549 return; 1550 1551 cnss_pr_dbg("Start to dump SOC Scratch registers\n"); 1552 1553 for (i = 0; pci_scratch[i].name; i++) { 1554 reg_offset = pci_scratch[i].offset; 1555 if (cnss_pci_reg_read(pci_priv, reg_offset, &val)) 1556 return; 1557 cnss_pr_dbg("PCIE_SOC_REG_%s = 0x%x\n", 1558 pci_scratch[i].name, val); 1559 } 1560 } 1561 1562 static void cnss_pci_soc_reset_cause_reg_dump(struct cnss_pci_data *pci_priv) 1563 { 1564 u32 val; 1565 1566 switch (pci_priv->device_id) { 1567 case PEACH_DEVICE_ID: 1568 break; 1569 default: 1570 return; 1571 } 1572 1573 if (in_interrupt() || irqs_disabled()) 1574 return; 1575 1576 if (cnss_pci_check_link_status(pci_priv)) 1577 return; 1578 1579 cnss_pr_dbg("Start to dump SOC Reset Cause registers\n"); 1580 1581 if (cnss_pci_reg_read(pci_priv, WLAON_SOC_RESET_CAUSE_SHADOW_REG, 1582 &val)) 1583 return; 1584 cnss_pr_dbg("WLAON_SOC_RESET_CAUSE_SHADOW_REG = 0x%x\n", 1585 val); 1586 1587 } 1588 1589 static void cnss_pci_bhi_debug_reg_dump(struct cnss_pci_data *pci_priv) 1590 { 1591 u32 reg_offset, val; 1592 int i; 1593 1594 switch (pci_priv->device_id) { 1595 case PEACH_DEVICE_ID: 1596 break; 1597 default: 1598 return; 1599 } 1600 1601 if (cnss_pci_check_link_status(pci_priv)) 1602 return; 1603 1604 cnss_pr_dbg("Start to dump PCIE BHIE DEBUG registers\n"); 1605 1606 for (i = 0; pci_bhi_debug[i].name; i++) { 1607 reg_offset = pci_bhi_debug[i].offset; 1608 if (cnss_pci_reg_read(pci_priv, reg_offset, &val)) 1609 return; 1610 cnss_pr_dbg("PCIE__%s = 0x%x\n", 1611 pci_bhi_debug[i].name, val); 1612 } 1613 } 1614 1615 int cnss_suspend_pci_link(struct cnss_pci_data *pci_priv) 1616 { 1617 int ret = 0; 1618 1619 if (!pci_priv) 1620 return -ENODEV; 1621 1622 if (pci_priv->pci_link_state == PCI_LINK_DOWN) { 1623 cnss_pr_info("PCI link is already suspended\n"); 1624 goto out; 1625 } 1626 1627 pci_clear_master(pci_priv->pci_dev); 1628 1629 ret = cnss_set_pci_config_space(pci_priv, SAVE_PCI_CONFIG_SPACE); 1630 if (ret) 1631 goto out; 1632 1633 pci_disable_device(pci_priv->pci_dev); 1634 1635 if (pci_priv->pci_dev->device != QCA6174_DEVICE_ID) { 1636 ret = pci_set_power_state(pci_priv->pci_dev, PCI_D3hot); 1637 if (ret) 1638 cnss_pr_err("Failed to set D3Hot, err = %d\n", ret); 1639 } 1640 1641 /* Always do PCIe L2 suspend during power off/PCIe link recovery */ 1642 pci_priv->drv_connected_last = 0; 1643 1644 ret = cnss_set_pci_link(pci_priv, PCI_LINK_DOWN); 1645 if (ret) 1646 goto out; 1647 1648 pci_priv->pci_link_state = PCI_LINK_DOWN; 1649 1650 return 0; 1651 out: 1652 return ret; 1653 } 1654 1655 int cnss_resume_pci_link(struct cnss_pci_data *pci_priv) 1656 { 1657 int ret = 0; 1658 1659 if (!pci_priv) 1660 return -ENODEV; 1661 1662 if (pci_priv->pci_link_state == PCI_LINK_UP) { 1663 cnss_pr_info("PCI link is already resumed\n"); 1664 goto out; 1665 } 1666 1667 ret = cnss_set_pci_link(pci_priv, PCI_LINK_UP); 1668 if (ret) { 1669 ret = -EAGAIN; 1670 cnss_pci_update_link_event(pci_priv, 1671 BUS_EVENT_PCI_LINK_RESUME_FAIL, NULL); 1672 goto out; 1673 } 1674 1675 pci_priv->pci_link_state = PCI_LINK_UP; 1676 1677 if (pci_priv->pci_dev->device != QCA6174_DEVICE_ID) { 1678 ret = pci_set_power_state(pci_priv->pci_dev, PCI_D0); 1679 if (ret) { 1680 cnss_pr_err("Failed to set D0, err = %d\n", ret); 1681 goto out; 1682 } 1683 } 1684 1685 ret = cnss_set_pci_config_space(pci_priv, RESTORE_PCI_CONFIG_SPACE); 1686 if (ret) 1687 goto out; 1688 1689 ret = pci_enable_device(pci_priv->pci_dev); 1690 if (ret) { 1691 cnss_pr_err("Failed to enable PCI device, err = %d\n", ret); 1692 goto out; 1693 } 1694 1695 pci_set_master(pci_priv->pci_dev); 1696 1697 if (pci_priv->pci_link_down_ind) 1698 pci_priv->pci_link_down_ind = false; 1699 1700 return 0; 1701 out: 1702 return ret; 1703 } 1704 1705 static void cnss_pci_update_link_event(struct cnss_pci_data *pci_priv, 1706 enum cnss_bus_event_type type, 1707 void *data) 1708 { 1709 struct cnss_bus_event bus_event; 1710 1711 bus_event.etype = type; 1712 bus_event.event_data = data; 1713 cnss_pci_call_driver_uevent(pci_priv, CNSS_BUS_EVENT, &bus_event); 1714 } 1715 1716 void cnss_pci_handle_linkdown(struct cnss_pci_data *pci_priv) 1717 { 1718 struct cnss_plat_data *plat_priv = pci_priv->plat_priv; 1719 struct pci_dev *pci_dev = pci_priv->pci_dev; 1720 unsigned long flags; 1721 1722 if (test_bit(ENABLE_PCI_LINK_DOWN_PANIC, 1723 &plat_priv->ctrl_params.quirks)) 1724 panic("cnss: PCI link is down\n"); 1725 1726 spin_lock_irqsave(&pci_link_down_lock, flags); 1727 if (pci_priv->pci_link_down_ind) { 1728 cnss_pr_dbg("PCI link down recovery is in progress, ignore\n"); 1729 spin_unlock_irqrestore(&pci_link_down_lock, flags); 1730 return; 1731 } 1732 pci_priv->pci_link_down_ind = true; 1733 spin_unlock_irqrestore(&pci_link_down_lock, flags); 1734 1735 if (pci_priv->mhi_ctrl) { 1736 /* Notify MHI about link down*/ 1737 mhi_report_error(pci_priv->mhi_ctrl); 1738 } 1739 1740 if (pci_dev->device == QCA6174_DEVICE_ID) 1741 disable_irq_nosync(pci_dev->irq); 1742 1743 /* Notify bus related event. Now for all supported chips. 1744 * Here PCIe LINK_DOWN notification taken care. 1745 * uevent buffer can be extended later, to cover more bus info. 1746 */ 1747 cnss_pci_update_link_event(pci_priv, BUS_EVENT_PCI_LINK_DOWN, NULL); 1748 1749 cnss_fatal_err("PCI link down, schedule recovery\n"); 1750 reinit_completion(&pci_priv->wake_event_complete); 1751 cnss_schedule_recovery(&pci_dev->dev, CNSS_REASON_LINK_DOWN); 1752 } 1753 1754 int cnss_pci_link_down(struct device *dev) 1755 { 1756 struct pci_dev *pci_dev = to_pci_dev(dev); 1757 struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev); 1758 struct cnss_plat_data *plat_priv = NULL; 1759 int ret; 1760 1761 if (!pci_priv) { 1762 cnss_pr_err("pci_priv is NULL\n"); 1763 return -EINVAL; 1764 } 1765 1766 plat_priv = pci_priv->plat_priv; 1767 if (!plat_priv) { 1768 cnss_pr_err("plat_priv is NULL\n"); 1769 return -ENODEV; 1770 } 1771 1772 if (pci_priv->pci_link_down_ind) { 1773 cnss_pr_dbg("PCI link down recovery is already in progress\n"); 1774 return -EBUSY; 1775 } 1776 1777 if (pci_priv->drv_connected_last && 1778 of_property_read_bool(plat_priv->plat_dev->dev.of_node, 1779 "cnss-enable-self-recovery")) 1780 plat_priv->ctrl_params.quirks |= BIT(LINK_DOWN_SELF_RECOVERY); 1781 1782 cnss_pr_err("PCI link down is detected by drivers\n"); 1783 1784 ret = cnss_pci_assert_perst(pci_priv); 1785 if (ret) 1786 cnss_pci_handle_linkdown(pci_priv); 1787 1788 return ret; 1789 } 1790 EXPORT_SYMBOL(cnss_pci_link_down); 1791 1792 int cnss_pci_get_reg_dump(struct device *dev, uint8_t *buffer, uint32_t len) 1793 { 1794 struct pci_dev *pci_dev = to_pci_dev(dev); 1795 struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev); 1796 1797 if (!pci_priv) { 1798 cnss_pr_err("pci_priv is NULL\n"); 1799 return -ENODEV; 1800 } 1801 1802 if (pci_priv->pci_link_state == PCI_LINK_DOWN) { 1803 cnss_pr_dbg("No PCIe reg dump since PCIe is suspended(D3)\n"); 1804 return -EACCES; 1805 } 1806 1807 cnss_pr_dbg("Start to get PCIe reg dump\n"); 1808 1809 return _cnss_pci_get_reg_dump(pci_priv, buffer, len); 1810 } 1811 EXPORT_SYMBOL(cnss_pci_get_reg_dump); 1812 1813 int cnss_pcie_is_device_down(struct cnss_pci_data *pci_priv) 1814 { 1815 struct cnss_plat_data *plat_priv; 1816 1817 if (!pci_priv) { 1818 cnss_pr_err("pci_priv is NULL\n"); 1819 return -ENODEV; 1820 } 1821 1822 plat_priv = pci_priv->plat_priv; 1823 if (!plat_priv) { 1824 cnss_pr_err("plat_priv is NULL\n"); 1825 return -ENODEV; 1826 } 1827 1828 return test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state) | 1829 pci_priv->pci_link_down_ind; 1830 } 1831 1832 int cnss_pci_is_device_down(struct device *dev) 1833 { 1834 struct cnss_pci_data *pci_priv = cnss_get_pci_priv(to_pci_dev(dev)); 1835 1836 return cnss_pcie_is_device_down(pci_priv); 1837 } 1838 EXPORT_SYMBOL(cnss_pci_is_device_down); 1839 1840 int cnss_pci_shutdown_cleanup(struct cnss_pci_data *pci_priv) 1841 { 1842 int ret; 1843 1844 if (!pci_priv) { 1845 cnss_pr_err("pci_priv is NULL\n"); 1846 return -ENODEV; 1847 } 1848 1849 ret = del_timer(&pci_priv->dev_rddm_timer); 1850 cnss_pr_dbg("%s RDDM timer deleted", ret ? "Active" : "Inactive"); 1851 return ret; 1852 } 1853 1854 void cnss_pci_lock_reg_window(struct device *dev, unsigned long *flags) 1855 { 1856 spin_lock_bh(&pci_reg_window_lock); 1857 } 1858 EXPORT_SYMBOL(cnss_pci_lock_reg_window); 1859 1860 void cnss_pci_unlock_reg_window(struct device *dev, unsigned long *flags) 1861 { 1862 spin_unlock_bh(&pci_reg_window_lock); 1863 } 1864 EXPORT_SYMBOL(cnss_pci_unlock_reg_window); 1865 1866 int cnss_get_pci_slot(struct device *dev) 1867 { 1868 struct pci_dev *pci_dev = to_pci_dev(dev); 1869 struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev); 1870 struct cnss_plat_data *plat_priv = NULL; 1871 1872 if (!pci_priv) { 1873 cnss_pr_err("pci_priv is NULL\n"); 1874 return -EINVAL; 1875 } 1876 1877 plat_priv = pci_priv->plat_priv; 1878 if (!plat_priv) { 1879 cnss_pr_err("plat_priv is NULL\n"); 1880 return -ENODEV; 1881 } 1882 1883 return plat_priv->rc_num; 1884 } 1885 EXPORT_SYMBOL(cnss_get_pci_slot); 1886 1887 /** 1888 * cnss_pci_dump_bl_sram_mem - Dump WLAN device bootloader debug log 1889 * @pci_priv: driver PCI bus context pointer 1890 * 1891 * Dump primary and secondary bootloader debug log data. For SBL check the 1892 * log struct address and size for validity. 1893 * 1894 * Return: None 1895 */ 1896 static void cnss_pci_dump_bl_sram_mem(struct cnss_pci_data *pci_priv) 1897 { 1898 enum mhi_ee_type ee; 1899 u32 mem_addr, val, pbl_log_max_size, sbl_log_max_size; 1900 u32 pbl_log_sram_start; 1901 u32 pbl_stage, sbl_log_start, sbl_log_size; 1902 u32 pbl_wlan_boot_cfg, pbl_bootstrap_status; 1903 u32 pbl_bootstrap_status_reg = PBL_BOOTSTRAP_STATUS; 1904 u32 sbl_log_def_start = SRAM_START; 1905 u32 sbl_log_def_end = SRAM_END; 1906 int i; 1907 1908 cnss_pci_soc_reset_cause_reg_dump(pci_priv); 1909 1910 switch (pci_priv->device_id) { 1911 case QCA6390_DEVICE_ID: 1912 pbl_log_sram_start = QCA6390_DEBUG_PBL_LOG_SRAM_START; 1913 pbl_log_max_size = QCA6390_DEBUG_PBL_LOG_SRAM_MAX_SIZE; 1914 sbl_log_max_size = QCA6390_DEBUG_SBL_LOG_SRAM_MAX_SIZE; 1915 break; 1916 case QCA6490_DEVICE_ID: 1917 pbl_log_sram_start = QCA6490_DEBUG_PBL_LOG_SRAM_START; 1918 pbl_log_max_size = QCA6490_DEBUG_PBL_LOG_SRAM_MAX_SIZE; 1919 sbl_log_max_size = QCA6490_DEBUG_SBL_LOG_SRAM_MAX_SIZE; 1920 break; 1921 case KIWI_DEVICE_ID: 1922 pbl_bootstrap_status_reg = KIWI_PBL_BOOTSTRAP_STATUS; 1923 pbl_log_sram_start = KIWI_DEBUG_PBL_LOG_SRAM_START; 1924 pbl_log_max_size = KIWI_DEBUG_PBL_LOG_SRAM_MAX_SIZE; 1925 sbl_log_max_size = KIWI_DEBUG_SBL_LOG_SRAM_MAX_SIZE; 1926 break; 1927 case MANGO_DEVICE_ID: 1928 pbl_bootstrap_status_reg = MANGO_PBL_BOOTSTRAP_STATUS; 1929 pbl_log_sram_start = MANGO_DEBUG_PBL_LOG_SRAM_START; 1930 pbl_log_max_size = MANGO_DEBUG_PBL_LOG_SRAM_MAX_SIZE; 1931 sbl_log_max_size = MANGO_DEBUG_SBL_LOG_SRAM_MAX_SIZE; 1932 break; 1933 case PEACH_DEVICE_ID: 1934 pbl_bootstrap_status_reg = PEACH_PBL_BOOTSTRAP_STATUS; 1935 pbl_log_sram_start = PEACH_DEBUG_PBL_LOG_SRAM_START; 1936 pbl_log_max_size = PEACH_DEBUG_PBL_LOG_SRAM_MAX_SIZE; 1937 sbl_log_max_size = PEACH_DEBUG_SBL_LOG_SRAM_MAX_SIZE; 1938 break; 1939 default: 1940 return; 1941 } 1942 1943 if (cnss_pci_check_link_status(pci_priv)) 1944 return; 1945 1946 cnss_pci_reg_read(pci_priv, TCSR_PBL_LOGGING_REG, &pbl_stage); 1947 cnss_pci_reg_read(pci_priv, PCIE_BHI_ERRDBG2_REG, &sbl_log_start); 1948 cnss_pci_reg_read(pci_priv, PCIE_BHI_ERRDBG3_REG, &sbl_log_size); 1949 cnss_pci_reg_read(pci_priv, PBL_WLAN_BOOT_CFG, &pbl_wlan_boot_cfg); 1950 cnss_pci_reg_read(pci_priv, pbl_bootstrap_status_reg, 1951 &pbl_bootstrap_status); 1952 cnss_pr_dbg("TCSR_PBL_LOGGING: 0x%08x PCIE_BHI_ERRDBG: Start: 0x%08x Size:0x%08x\n", 1953 pbl_stage, sbl_log_start, sbl_log_size); 1954 cnss_pr_dbg("PBL_WLAN_BOOT_CFG: 0x%08x PBL_BOOTSTRAP_STATUS: 0x%08x\n", 1955 pbl_wlan_boot_cfg, pbl_bootstrap_status); 1956 1957 ee = mhi_get_exec_env(pci_priv->mhi_ctrl); 1958 if (CNSS_MHI_IN_MISSION_MODE(ee)) { 1959 cnss_pr_err("Avoid Dumping PBL log data in Mission mode\n"); 1960 return; 1961 } 1962 1963 cnss_pr_dbg("Dumping PBL log data\n"); 1964 for (i = 0; i < pbl_log_max_size; i += sizeof(val)) { 1965 mem_addr = pbl_log_sram_start + i; 1966 if (cnss_pci_reg_read(pci_priv, mem_addr, &val)) 1967 break; 1968 cnss_pr_dbg("SRAM[0x%x] = 0x%x\n", mem_addr, val); 1969 } 1970 1971 sbl_log_size = (sbl_log_size > sbl_log_max_size ? 1972 sbl_log_max_size : sbl_log_size); 1973 if (sbl_log_start < sbl_log_def_start || 1974 sbl_log_start > sbl_log_def_end || 1975 (sbl_log_start + sbl_log_size) > sbl_log_def_end) { 1976 cnss_pr_err("Invalid SBL log data\n"); 1977 return; 1978 } 1979 1980 ee = mhi_get_exec_env(pci_priv->mhi_ctrl); 1981 if (CNSS_MHI_IN_MISSION_MODE(ee)) { 1982 cnss_pr_err("Avoid Dumping SBL log data in Mission mode\n"); 1983 return; 1984 } 1985 1986 cnss_pr_dbg("Dumping SBL log data\n"); 1987 for (i = 0; i < sbl_log_size; i += sizeof(val)) { 1988 mem_addr = sbl_log_start + i; 1989 if (cnss_pci_reg_read(pci_priv, mem_addr, &val)) 1990 break; 1991 cnss_pr_dbg("SRAM[0x%x] = 0x%x\n", mem_addr, val); 1992 } 1993 } 1994 1995 #ifdef CONFIG_DISABLE_CNSS_SRAM_DUMP 1996 static void cnss_pci_dump_sram(struct cnss_pci_data *pci_priv) 1997 { 1998 } 1999 #else 2000 static void cnss_pci_dump_sram(struct cnss_pci_data *pci_priv) 2001 { 2002 struct cnss_plat_data *plat_priv; 2003 u32 i, mem_addr; 2004 u32 *dump_ptr; 2005 2006 plat_priv = pci_priv->plat_priv; 2007 2008 if (plat_priv->device_id != QCA6490_DEVICE_ID || 2009 cnss_get_host_build_type() != QMI_HOST_BUILD_TYPE_PRIMARY_V01) 2010 return; 2011 2012 if (!plat_priv->sram_dump) { 2013 cnss_pr_err("SRAM dump memory is not allocated\n"); 2014 return; 2015 } 2016 2017 if (cnss_pci_check_link_status(pci_priv)) 2018 return; 2019 2020 cnss_pr_dbg("Dumping SRAM at 0x%lx\n", plat_priv->sram_dump); 2021 2022 for (i = 0; i < SRAM_DUMP_SIZE; i += sizeof(u32)) { 2023 mem_addr = SRAM_START + i; 2024 dump_ptr = (u32 *)(plat_priv->sram_dump + i); 2025 if (cnss_pci_reg_read(pci_priv, mem_addr, dump_ptr)) { 2026 cnss_pr_err("SRAM Dump failed at 0x%x\n", mem_addr); 2027 break; 2028 } 2029 /* Relinquish CPU after dumping 256KB chunks*/ 2030 if (!(i % CNSS_256KB_SIZE)) 2031 cond_resched(); 2032 } 2033 } 2034 #endif 2035 2036 static int cnss_pci_handle_mhi_poweron_timeout(struct cnss_pci_data *pci_priv) 2037 { 2038 struct cnss_plat_data *plat_priv = pci_priv->plat_priv; 2039 2040 cnss_fatal_err("MHI power up returns timeout\n"); 2041 2042 if (cnss_mhi_scan_rddm_cookie(pci_priv, DEVICE_RDDM_COOKIE) || 2043 cnss_get_dev_sol_value(plat_priv) > 0) { 2044 /* Wait for RDDM if RDDM cookie is set or device SOL GPIO is 2045 * high. If RDDM times out, PBL/SBL error region may have been 2046 * erased so no need to dump them either. 2047 */ 2048 if (!test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state) && 2049 !pci_priv->pci_link_down_ind) { 2050 mod_timer(&pci_priv->dev_rddm_timer, 2051 jiffies + msecs_to_jiffies(DEV_RDDM_TIMEOUT)); 2052 } 2053 } else { 2054 cnss_pr_dbg("RDDM cookie is not set and device SOL is low\n"); 2055 cnss_mhi_debug_reg_dump(pci_priv); 2056 cnss_pci_bhi_debug_reg_dump(pci_priv); 2057 cnss_pci_soc_scratch_reg_dump(pci_priv); 2058 /* Dump PBL/SBL error log if RDDM cookie is not set */ 2059 cnss_pci_dump_bl_sram_mem(pci_priv); 2060 cnss_pci_dump_sram(pci_priv); 2061 return -ETIMEDOUT; 2062 } 2063 2064 return 0; 2065 } 2066 2067 static char *cnss_mhi_state_to_str(enum cnss_mhi_state mhi_state) 2068 { 2069 switch (mhi_state) { 2070 case CNSS_MHI_INIT: 2071 return "INIT"; 2072 case CNSS_MHI_DEINIT: 2073 return "DEINIT"; 2074 case CNSS_MHI_POWER_ON: 2075 return "POWER_ON"; 2076 case CNSS_MHI_POWERING_OFF: 2077 return "POWERING_OFF"; 2078 case CNSS_MHI_POWER_OFF: 2079 return "POWER_OFF"; 2080 case CNSS_MHI_FORCE_POWER_OFF: 2081 return "FORCE_POWER_OFF"; 2082 case CNSS_MHI_SUSPEND: 2083 return "SUSPEND"; 2084 case CNSS_MHI_RESUME: 2085 return "RESUME"; 2086 case CNSS_MHI_TRIGGER_RDDM: 2087 return "TRIGGER_RDDM"; 2088 case CNSS_MHI_RDDM_DONE: 2089 return "RDDM_DONE"; 2090 default: 2091 return "UNKNOWN"; 2092 } 2093 }; 2094 2095 static int cnss_pci_check_mhi_state_bit(struct cnss_pci_data *pci_priv, 2096 enum cnss_mhi_state mhi_state) 2097 { 2098 switch (mhi_state) { 2099 case CNSS_MHI_INIT: 2100 if (!test_bit(CNSS_MHI_INIT, &pci_priv->mhi_state)) 2101 return 0; 2102 break; 2103 case CNSS_MHI_DEINIT: 2104 case CNSS_MHI_POWER_ON: 2105 if (test_bit(CNSS_MHI_INIT, &pci_priv->mhi_state) && 2106 !test_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state)) 2107 return 0; 2108 break; 2109 case CNSS_MHI_FORCE_POWER_OFF: 2110 if (test_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state)) 2111 return 0; 2112 break; 2113 case CNSS_MHI_POWER_OFF: 2114 case CNSS_MHI_SUSPEND: 2115 if (test_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state) && 2116 !test_bit(CNSS_MHI_SUSPEND, &pci_priv->mhi_state)) 2117 return 0; 2118 break; 2119 case CNSS_MHI_RESUME: 2120 if (test_bit(CNSS_MHI_SUSPEND, &pci_priv->mhi_state)) 2121 return 0; 2122 break; 2123 case CNSS_MHI_TRIGGER_RDDM: 2124 if (test_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state) && 2125 !test_bit(CNSS_MHI_TRIGGER_RDDM, &pci_priv->mhi_state)) 2126 return 0; 2127 break; 2128 case CNSS_MHI_RDDM_DONE: 2129 return 0; 2130 default: 2131 cnss_pr_err("Unhandled MHI state: %s(%d)\n", 2132 cnss_mhi_state_to_str(mhi_state), mhi_state); 2133 } 2134 2135 cnss_pr_err("Cannot set MHI state %s(%d) in current MHI state (0x%lx)\n", 2136 cnss_mhi_state_to_str(mhi_state), mhi_state, 2137 pci_priv->mhi_state); 2138 if (mhi_state != CNSS_MHI_TRIGGER_RDDM) 2139 CNSS_ASSERT(0); 2140 2141 return -EINVAL; 2142 } 2143 2144 static int cnss_rddm_trigger_debug(struct cnss_pci_data *pci_priv) 2145 { 2146 int read_val, ret; 2147 2148 if (!pci_priv || pci_priv->device_id != QCA6490_DEVICE_ID) 2149 return -EOPNOTSUPP; 2150 2151 if (cnss_pci_check_link_status(pci_priv)) 2152 return -EINVAL; 2153 2154 cnss_pr_err("Write GCC Spare with ACE55 Pattern"); 2155 cnss_pci_reg_write(pci_priv, GCC_GCC_SPARE_REG_1, 0xACE55); 2156 ret = cnss_pci_reg_read(pci_priv, GCC_GCC_SPARE_REG_1, &read_val); 2157 cnss_pr_err("Read back GCC Spare: 0x%x, ret: %d", read_val, ret); 2158 ret = cnss_pci_reg_read(pci_priv, GCC_PRE_ARES_DEBUG_TIMER_VAL, 2159 &read_val); 2160 cnss_pr_err("Warm reset allowed check: 0x%x, ret: %d", read_val, ret); 2161 return ret; 2162 } 2163 2164 static int cnss_rddm_trigger_check(struct cnss_pci_data *pci_priv) 2165 { 2166 int read_val, ret; 2167 u32 pbl_stage, sbl_log_start, sbl_log_size, pbl_wlan_boot_cfg; 2168 2169 if (!pci_priv || pci_priv->device_id != QCA6490_DEVICE_ID) 2170 return -EOPNOTSUPP; 2171 2172 if (cnss_pci_check_link_status(pci_priv)) 2173 return -EINVAL; 2174 2175 ret = cnss_pci_reg_read(pci_priv, GCC_GCC_SPARE_REG_1, &read_val); 2176 cnss_pr_err("Read GCC spare to check reset status: 0x%x, ret: %d", 2177 read_val, ret); 2178 2179 cnss_pci_reg_read(pci_priv, TCSR_PBL_LOGGING_REG, &pbl_stage); 2180 cnss_pci_reg_read(pci_priv, PCIE_BHI_ERRDBG2_REG, &sbl_log_start); 2181 cnss_pci_reg_read(pci_priv, PCIE_BHI_ERRDBG3_REG, &sbl_log_size); 2182 cnss_pci_reg_read(pci_priv, PBL_WLAN_BOOT_CFG, &pbl_wlan_boot_cfg); 2183 cnss_pr_dbg("TCSR_PBL_LOGGING: 0x%08x PCIE_BHI_ERRDBG: Start: 0x%08x Size:0x%08x \n", 2184 pbl_stage, sbl_log_start, sbl_log_size); 2185 cnss_pr_dbg("PBL_WLAN_BOOT_CFG: 0x%08x\n", pbl_wlan_boot_cfg); 2186 2187 return ret; 2188 } 2189 2190 static void cnss_pci_set_mhi_state_bit(struct cnss_pci_data *pci_priv, 2191 enum cnss_mhi_state mhi_state) 2192 { 2193 switch (mhi_state) { 2194 case CNSS_MHI_INIT: 2195 set_bit(CNSS_MHI_INIT, &pci_priv->mhi_state); 2196 break; 2197 case CNSS_MHI_DEINIT: 2198 clear_bit(CNSS_MHI_INIT, &pci_priv->mhi_state); 2199 break; 2200 case CNSS_MHI_POWER_ON: 2201 set_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state); 2202 break; 2203 case CNSS_MHI_POWERING_OFF: 2204 set_bit(CNSS_MHI_POWERING_OFF, &pci_priv->mhi_state); 2205 break; 2206 case CNSS_MHI_POWER_OFF: 2207 case CNSS_MHI_FORCE_POWER_OFF: 2208 clear_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state); 2209 clear_bit(CNSS_MHI_POWERING_OFF, &pci_priv->mhi_state); 2210 clear_bit(CNSS_MHI_TRIGGER_RDDM, &pci_priv->mhi_state); 2211 clear_bit(CNSS_MHI_RDDM_DONE, &pci_priv->mhi_state); 2212 break; 2213 case CNSS_MHI_SUSPEND: 2214 set_bit(CNSS_MHI_SUSPEND, &pci_priv->mhi_state); 2215 break; 2216 case CNSS_MHI_RESUME: 2217 clear_bit(CNSS_MHI_SUSPEND, &pci_priv->mhi_state); 2218 break; 2219 case CNSS_MHI_TRIGGER_RDDM: 2220 set_bit(CNSS_MHI_TRIGGER_RDDM, &pci_priv->mhi_state); 2221 break; 2222 case CNSS_MHI_RDDM_DONE: 2223 set_bit(CNSS_MHI_RDDM_DONE, &pci_priv->mhi_state); 2224 break; 2225 default: 2226 cnss_pr_err("Unhandled MHI state (%d)\n", mhi_state); 2227 } 2228 } 2229 2230 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0)) 2231 static int cnss_mhi_pm_force_resume(struct cnss_pci_data *pci_priv) 2232 { 2233 return mhi_pm_resume_force(pci_priv->mhi_ctrl); 2234 } 2235 #else 2236 static int cnss_mhi_pm_force_resume(struct cnss_pci_data *pci_priv) 2237 { 2238 return mhi_pm_resume(pci_priv->mhi_ctrl); 2239 } 2240 #endif 2241 2242 static int cnss_pci_set_mhi_state(struct cnss_pci_data *pci_priv, 2243 enum cnss_mhi_state mhi_state) 2244 { 2245 int ret = 0, retry = 0; 2246 2247 if (pci_priv->device_id == QCA6174_DEVICE_ID) 2248 return 0; 2249 2250 if (mhi_state < 0) { 2251 cnss_pr_err("Invalid MHI state (%d)\n", mhi_state); 2252 return -EINVAL; 2253 } 2254 2255 ret = cnss_pci_check_mhi_state_bit(pci_priv, mhi_state); 2256 if (ret) 2257 goto out; 2258 2259 cnss_pr_vdbg("Setting MHI state: %s(%d)\n", 2260 cnss_mhi_state_to_str(mhi_state), mhi_state); 2261 2262 switch (mhi_state) { 2263 case CNSS_MHI_INIT: 2264 ret = mhi_prepare_for_power_up(pci_priv->mhi_ctrl); 2265 break; 2266 case CNSS_MHI_DEINIT: 2267 mhi_unprepare_after_power_down(pci_priv->mhi_ctrl); 2268 ret = 0; 2269 break; 2270 case CNSS_MHI_POWER_ON: 2271 ret = mhi_sync_power_up(pci_priv->mhi_ctrl); 2272 #if IS_ENABLED(CONFIG_MHI_BUS_MISC) 2273 /* Only set img_pre_alloc when power up succeeds */ 2274 if (!ret && !pci_priv->mhi_ctrl->img_pre_alloc) { 2275 cnss_pr_dbg("Notify MHI to use already allocated images\n"); 2276 pci_priv->mhi_ctrl->img_pre_alloc = true; 2277 } 2278 #endif 2279 break; 2280 case CNSS_MHI_POWER_OFF: 2281 mhi_power_down(pci_priv->mhi_ctrl, true); 2282 ret = 0; 2283 break; 2284 case CNSS_MHI_FORCE_POWER_OFF: 2285 mhi_power_down(pci_priv->mhi_ctrl, false); 2286 ret = 0; 2287 break; 2288 case CNSS_MHI_SUSPEND: 2289 retry_mhi_suspend: 2290 mutex_lock(&pci_priv->mhi_ctrl->pm_mutex); 2291 if (pci_priv->drv_connected_last) 2292 ret = cnss_mhi_pm_fast_suspend(pci_priv, true); 2293 else 2294 ret = mhi_pm_suspend(pci_priv->mhi_ctrl); 2295 mutex_unlock(&pci_priv->mhi_ctrl->pm_mutex); 2296 if (ret == -EBUSY && retry++ < MHI_SUSPEND_RETRY_MAX_TIMES) { 2297 cnss_pr_vdbg("Retry MHI suspend #%d\n", retry); 2298 usleep_range(MHI_SUSPEND_RETRY_DELAY_US, 2299 MHI_SUSPEND_RETRY_DELAY_US + 1000); 2300 goto retry_mhi_suspend; 2301 } 2302 break; 2303 case CNSS_MHI_RESUME: 2304 mutex_lock(&pci_priv->mhi_ctrl->pm_mutex); 2305 if (pci_priv->drv_connected_last) { 2306 ret = cnss_pci_prevent_l1(&pci_priv->pci_dev->dev); 2307 if (ret) { 2308 mutex_unlock(&pci_priv->mhi_ctrl->pm_mutex); 2309 break; 2310 } 2311 ret = cnss_mhi_pm_fast_resume(pci_priv, true); 2312 cnss_pci_allow_l1(&pci_priv->pci_dev->dev); 2313 } else { 2314 if (pci_priv->device_id == QCA6390_DEVICE_ID) 2315 ret = cnss_mhi_pm_force_resume(pci_priv); 2316 else 2317 ret = mhi_pm_resume(pci_priv->mhi_ctrl); 2318 } 2319 mutex_unlock(&pci_priv->mhi_ctrl->pm_mutex); 2320 break; 2321 case CNSS_MHI_TRIGGER_RDDM: 2322 cnss_rddm_trigger_debug(pci_priv); 2323 ret = mhi_force_rddm_mode(pci_priv->mhi_ctrl); 2324 if (ret) { 2325 cnss_pr_err("Failed to trigger RDDM, err = %d\n", ret); 2326 cnss_rddm_trigger_check(pci_priv); 2327 } 2328 break; 2329 case CNSS_MHI_RDDM_DONE: 2330 break; 2331 default: 2332 cnss_pr_err("Unhandled MHI state (%d)\n", mhi_state); 2333 ret = -EINVAL; 2334 } 2335 2336 if (ret) 2337 goto out; 2338 2339 cnss_pci_set_mhi_state_bit(pci_priv, mhi_state); 2340 2341 return 0; 2342 2343 out: 2344 cnss_pr_err("Failed to set MHI state: %s(%d), err = %d\n", 2345 cnss_mhi_state_to_str(mhi_state), mhi_state, ret); 2346 return ret; 2347 } 2348 2349 static int cnss_pci_config_msi_addr(struct cnss_pci_data *pci_priv) 2350 { 2351 int ret = 0; 2352 struct pci_dev *pci_dev = pci_priv->pci_dev; 2353 struct cnss_plat_data *plat_priv; 2354 2355 if (!pci_dev) 2356 return -ENODEV; 2357 2358 if (!pci_dev->msix_enabled) 2359 return ret; 2360 2361 plat_priv = pci_priv->plat_priv; 2362 if (!plat_priv) { 2363 cnss_pr_err("plat_priv is NULL\n"); 2364 return -ENODEV; 2365 } 2366 2367 ret = of_property_read_u32(plat_priv->plat_dev->dev.of_node, 2368 "msix-match-addr", 2369 &pci_priv->msix_addr); 2370 cnss_pr_dbg("MSI-X Match address is 0x%X\n", 2371 pci_priv->msix_addr); 2372 2373 return ret; 2374 } 2375 2376 static int cnss_pci_config_msi_data(struct cnss_pci_data *pci_priv) 2377 { 2378 struct msi_desc *msi_desc; 2379 struct cnss_msi_config *msi_config; 2380 struct pci_dev *pci_dev = pci_priv->pci_dev; 2381 2382 msi_config = pci_priv->msi_config; 2383 2384 if (pci_dev->msix_enabled) { 2385 pci_priv->msi_ep_base_data = msi_config->users[0].base_vector; 2386 cnss_pr_dbg("MSI-X base data is %d\n", 2387 pci_priv->msi_ep_base_data); 2388 return 0; 2389 } 2390 2391 msi_desc = irq_get_msi_desc(pci_dev->irq); 2392 if (!msi_desc) { 2393 cnss_pr_err("msi_desc is NULL!\n"); 2394 return -EINVAL; 2395 } 2396 2397 pci_priv->msi_ep_base_data = msi_desc->msg.data; 2398 cnss_pr_dbg("MSI base data is %d\n", pci_priv->msi_ep_base_data); 2399 2400 return 0; 2401 } 2402 2403 #ifdef CONFIG_CNSS_SUPPORT_DUAL_DEV 2404 #define PLC_PCIE_NAME_LEN 14 2405 2406 static struct cnss_plat_data * 2407 cnss_get_plat_priv_by_driver_ops(struct cnss_wlan_driver *driver_ops) 2408 { 2409 int plat_env_count = cnss_get_plat_env_count(); 2410 struct cnss_plat_data *plat_env; 2411 struct cnss_pci_data *pci_priv; 2412 int i = 0; 2413 2414 if (!driver_ops) { 2415 cnss_pr_err("No cnss driver\n"); 2416 return NULL; 2417 } 2418 2419 for (i = 0; i < plat_env_count; i++) { 2420 plat_env = cnss_get_plat_env(i); 2421 if (!plat_env) 2422 continue; 2423 if (driver_ops->name && plat_env->pld_bus_ops_name) { 2424 /* driver_ops->name = PLD_PCIE_OPS_NAME 2425 * #ifdef MULTI_IF_NAME 2426 * #define PLD_PCIE_OPS_NAME "pld_pcie_" MULTI_IF_NAME 2427 * #else 2428 * #define PLD_PCIE_OPS_NAME "pld_pcie" 2429 * #endif 2430 */ 2431 if (memcmp(driver_ops->name, 2432 plat_env->pld_bus_ops_name, 2433 PLC_PCIE_NAME_LEN) == 0) 2434 return plat_env; 2435 } 2436 } 2437 2438 cnss_pr_vdbg("Invalid cnss driver name from ko %s\n", driver_ops->name); 2439 /* in the dual wlan card case, the pld_bus_ops_name from dts 2440 * and driver_ops-> name from ko should match, otherwise 2441 * wlanhost driver don't know which plat_env it can use; 2442 * if doesn't find the match one, then get first available 2443 * instance insteadly. 2444 */ 2445 2446 for (i = 0; i < plat_env_count; i++) { 2447 plat_env = cnss_get_plat_env(i); 2448 2449 if (!plat_env) 2450 continue; 2451 2452 pci_priv = plat_env->bus_priv; 2453 if (!pci_priv) { 2454 cnss_pr_err("pci_priv is NULL\n"); 2455 continue; 2456 } 2457 2458 if (driver_ops == pci_priv->driver_ops) 2459 return plat_env; 2460 } 2461 /* Doesn't find the existing instance, 2462 * so return the fist empty instance 2463 */ 2464 for (i = 0; i < plat_env_count; i++) { 2465 plat_env = cnss_get_plat_env(i); 2466 2467 if (!plat_env) 2468 continue; 2469 pci_priv = plat_env->bus_priv; 2470 if (!pci_priv) { 2471 cnss_pr_err("pci_priv is NULL\n"); 2472 continue; 2473 } 2474 2475 if (!pci_priv->driver_ops) 2476 return plat_env; 2477 } 2478 2479 return NULL; 2480 } 2481 2482 static int cnss_pci_store_qrtr_node_id(struct cnss_pci_data *pci_priv) 2483 { 2484 int ret = 0; 2485 u32 scratch = QCA6390_PCIE_SOC_PCIE_REG_PCIE_SCRATCH_2_SOC_PCIE_REG; 2486 struct cnss_plat_data *plat_priv; 2487 2488 if (!pci_priv) { 2489 cnss_pr_err("pci_priv is NULL\n"); 2490 return -ENODEV; 2491 } 2492 2493 plat_priv = pci_priv->plat_priv; 2494 /** 2495 * in the single wlan chipset case, plat_priv->qrtr_node_id always is 0, 2496 * wlan fw will use the hardcode 7 as the qrtr node id. 2497 * in the dual Hastings case, we will read qrtr node id 2498 * from device tree and pass to get plat_priv->qrtr_node_id, 2499 * which always is not zero. And then store this new value 2500 * to pcie register, wlan fw will read out this qrtr node id 2501 * from this register and overwrite to the hardcode one 2502 * while do initialization for ipc router. 2503 * without this change, two Hastings will use the same 2504 * qrtr node instance id, which will mess up qmi message 2505 * exchange. According to qrtr spec, every node should 2506 * have unique qrtr node id 2507 */ 2508 if (plat_priv->device_id == QCA6390_DEVICE_ID && 2509 plat_priv->qrtr_node_id) { 2510 u32 val; 2511 2512 cnss_pr_dbg("write 0x%x to SCRATCH REG\n", 2513 plat_priv->qrtr_node_id); 2514 ret = cnss_pci_reg_write(pci_priv, scratch, 2515 plat_priv->qrtr_node_id); 2516 if (ret) { 2517 cnss_pr_err("Failed to write register offset 0x%x, err = %d\n", 2518 scratch, ret); 2519 goto out; 2520 } 2521 2522 ret = cnss_pci_reg_read(pci_priv, scratch, &val); 2523 if (ret) { 2524 cnss_pr_err("Failed to read SCRATCH REG"); 2525 goto out; 2526 } 2527 2528 if (val != plat_priv->qrtr_node_id) { 2529 cnss_pr_err("qrtr node id write to register doesn't match with readout value"); 2530 return -ERANGE; 2531 } 2532 } 2533 out: 2534 return ret; 2535 } 2536 #else 2537 static struct cnss_plat_data * 2538 cnss_get_plat_priv_by_driver_ops(struct cnss_wlan_driver *driver_ops) 2539 { 2540 return cnss_bus_dev_to_plat_priv(NULL); 2541 } 2542 2543 static int cnss_pci_store_qrtr_node_id(struct cnss_pci_data *pci_priv) 2544 { 2545 return 0; 2546 } 2547 #endif 2548 2549 int cnss_pci_start_mhi(struct cnss_pci_data *pci_priv) 2550 { 2551 int ret = 0; 2552 struct cnss_plat_data *plat_priv; 2553 unsigned int timeout = 0; 2554 int retry = 0; 2555 2556 if (!pci_priv) { 2557 cnss_pr_err("pci_priv is NULL\n"); 2558 return -ENODEV; 2559 } 2560 2561 plat_priv = pci_priv->plat_priv; 2562 if (test_bit(FBC_BYPASS, &plat_priv->ctrl_params.quirks)) 2563 return 0; 2564 2565 if (MHI_TIMEOUT_OVERWRITE_MS) 2566 pci_priv->mhi_ctrl->timeout_ms = MHI_TIMEOUT_OVERWRITE_MS; 2567 cnss_mhi_set_m2_timeout_ms(pci_priv, MHI_M2_TIMEOUT_MS); 2568 2569 ret = cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_INIT); 2570 if (ret) 2571 return ret; 2572 2573 timeout = pci_priv->mhi_ctrl->timeout_ms; 2574 /* For non-perf builds the timeout is 10 (default) * 6 seconds */ 2575 if (cnss_get_host_build_type() == QMI_HOST_BUILD_TYPE_PRIMARY_V01) 2576 pci_priv->mhi_ctrl->timeout_ms *= 6; 2577 else /* For perf builds the timeout is 10 (default) * 3 seconds */ 2578 pci_priv->mhi_ctrl->timeout_ms *= 3; 2579 2580 retry: 2581 ret = cnss_pci_store_qrtr_node_id(pci_priv); 2582 if (ret) { 2583 if (retry++ < REG_RETRY_MAX_TIMES) 2584 goto retry; 2585 else 2586 return ret; 2587 } 2588 2589 /* Start the timer to dump MHI/PBL/SBL debug data periodically */ 2590 mod_timer(&pci_priv->boot_debug_timer, 2591 jiffies + msecs_to_jiffies(BOOT_DEBUG_TIMEOUT_MS)); 2592 ret = cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_POWER_ON); 2593 del_timer_sync(&pci_priv->boot_debug_timer); 2594 if (ret == 0) 2595 cnss_wlan_adsp_pc_enable(pci_priv, false); 2596 2597 pci_priv->mhi_ctrl->timeout_ms = timeout; 2598 2599 if (ret == -ETIMEDOUT) { 2600 /* This is a special case needs to be handled that if MHI 2601 * power on returns -ETIMEDOUT, controller needs to take care 2602 * the cleanup by calling MHI power down. Force to set the bit 2603 * for driver internal MHI state to make sure it can be handled 2604 * properly later. 2605 */ 2606 set_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state); 2607 ret = cnss_pci_handle_mhi_poweron_timeout(pci_priv); 2608 } else if (!ret) { 2609 /* kernel may allocate a dummy vector before request_irq and 2610 * then allocate a real vector when request_irq is called. 2611 * So get msi_data here again to avoid spurious interrupt 2612 * as msi_data will configured to srngs. 2613 */ 2614 if (cnss_pci_is_one_msi(pci_priv)) 2615 ret = cnss_pci_config_msi_data(pci_priv); 2616 } 2617 2618 return ret; 2619 } 2620 2621 static void cnss_pci_power_off_mhi(struct cnss_pci_data *pci_priv) 2622 { 2623 struct cnss_plat_data *plat_priv = pci_priv->plat_priv; 2624 2625 if (test_bit(FBC_BYPASS, &plat_priv->ctrl_params.quirks)) 2626 return; 2627 2628 if (!test_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state)) { 2629 cnss_pr_dbg("MHI is already powered off\n"); 2630 return; 2631 } 2632 cnss_wlan_adsp_pc_enable(pci_priv, true); 2633 cnss_pci_set_mhi_state_bit(pci_priv, CNSS_MHI_RESUME); 2634 cnss_pci_set_mhi_state_bit(pci_priv, CNSS_MHI_POWERING_OFF); 2635 2636 if (!pci_priv->pci_link_down_ind) 2637 cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_POWER_OFF); 2638 else 2639 cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_FORCE_POWER_OFF); 2640 } 2641 2642 static void cnss_pci_deinit_mhi(struct cnss_pci_data *pci_priv) 2643 { 2644 struct cnss_plat_data *plat_priv = pci_priv->plat_priv; 2645 2646 if (test_bit(FBC_BYPASS, &plat_priv->ctrl_params.quirks)) 2647 return; 2648 2649 if (!test_bit(CNSS_MHI_INIT, &pci_priv->mhi_state)) { 2650 cnss_pr_dbg("MHI is already deinited\n"); 2651 return; 2652 } 2653 2654 cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_DEINIT); 2655 } 2656 2657 static void cnss_pci_set_wlaon_pwr_ctrl(struct cnss_pci_data *pci_priv, 2658 bool set_vddd4blow, bool set_shutdown, 2659 bool do_force_wake) 2660 { 2661 struct cnss_plat_data *plat_priv = pci_priv->plat_priv; 2662 int ret; 2663 u32 val; 2664 2665 if (!plat_priv->set_wlaon_pwr_ctrl) 2666 return; 2667 2668 if (pci_priv->pci_link_state == PCI_LINK_DOWN || 2669 pci_priv->pci_link_down_ind) 2670 return; 2671 2672 if (do_force_wake) 2673 if (cnss_pci_force_wake_get(pci_priv)) 2674 return; 2675 2676 ret = cnss_pci_reg_read(pci_priv, WLAON_QFPROM_PWR_CTRL_REG, &val); 2677 if (ret) { 2678 cnss_pr_err("Failed to read register offset 0x%x, err = %d\n", 2679 WLAON_QFPROM_PWR_CTRL_REG, ret); 2680 goto force_wake_put; 2681 } 2682 2683 cnss_pr_dbg("Read register offset 0x%x, val = 0x%x\n", 2684 WLAON_QFPROM_PWR_CTRL_REG, val); 2685 2686 if (set_vddd4blow) 2687 val |= QFPROM_PWR_CTRL_VDD4BLOW_SW_EN_MASK; 2688 else 2689 val &= ~QFPROM_PWR_CTRL_VDD4BLOW_SW_EN_MASK; 2690 2691 if (set_shutdown) 2692 val |= QFPROM_PWR_CTRL_SHUTDOWN_EN_MASK; 2693 else 2694 val &= ~QFPROM_PWR_CTRL_SHUTDOWN_EN_MASK; 2695 2696 ret = cnss_pci_reg_write(pci_priv, WLAON_QFPROM_PWR_CTRL_REG, val); 2697 if (ret) { 2698 cnss_pr_err("Failed to write register offset 0x%x, err = %d\n", 2699 WLAON_QFPROM_PWR_CTRL_REG, ret); 2700 goto force_wake_put; 2701 } 2702 2703 cnss_pr_dbg("Write val 0x%x to register offset 0x%x\n", val, 2704 WLAON_QFPROM_PWR_CTRL_REG); 2705 2706 if (set_shutdown) 2707 usleep_range(WLAON_PWR_CTRL_SHUTDOWN_DELAY_MIN_US, 2708 WLAON_PWR_CTRL_SHUTDOWN_DELAY_MAX_US); 2709 2710 force_wake_put: 2711 if (do_force_wake) 2712 cnss_pci_force_wake_put(pci_priv); 2713 } 2714 2715 static int cnss_pci_get_device_timestamp(struct cnss_pci_data *pci_priv, 2716 u64 *time_us) 2717 { 2718 struct cnss_plat_data *plat_priv = pci_priv->plat_priv; 2719 u32 low, high; 2720 u64 device_ticks; 2721 2722 if (!plat_priv->device_freq_hz) { 2723 cnss_pr_err("Device time clock frequency is not valid\n"); 2724 return -EINVAL; 2725 } 2726 2727 switch (pci_priv->device_id) { 2728 case KIWI_DEVICE_ID: 2729 case MANGO_DEVICE_ID: 2730 case PEACH_DEVICE_ID: 2731 cnss_pci_reg_read(pci_priv, PCIE_MHI_TIME_LOW, &low); 2732 cnss_pci_reg_read(pci_priv, PCIE_MHI_TIME_HIGH, &high); 2733 break; 2734 default: 2735 cnss_pci_reg_read(pci_priv, WLAON_GLOBAL_COUNTER_CTRL3, &low); 2736 cnss_pci_reg_read(pci_priv, WLAON_GLOBAL_COUNTER_CTRL4, &high); 2737 break; 2738 } 2739 2740 device_ticks = (u64)high << 32 | low; 2741 do_div(device_ticks, plat_priv->device_freq_hz / 100000); 2742 *time_us = device_ticks * 10; 2743 2744 return 0; 2745 } 2746 2747 static void cnss_pci_enable_time_sync_counter(struct cnss_pci_data *pci_priv) 2748 { 2749 switch (pci_priv->device_id) { 2750 case KIWI_DEVICE_ID: 2751 case MANGO_DEVICE_ID: 2752 case PEACH_DEVICE_ID: 2753 return; 2754 default: 2755 break; 2756 } 2757 2758 cnss_pci_reg_write(pci_priv, WLAON_GLOBAL_COUNTER_CTRL5, 2759 TIME_SYNC_ENABLE); 2760 } 2761 2762 static void cnss_pci_clear_time_sync_counter(struct cnss_pci_data *pci_priv) 2763 { 2764 switch (pci_priv->device_id) { 2765 case KIWI_DEVICE_ID: 2766 case MANGO_DEVICE_ID: 2767 case PEACH_DEVICE_ID: 2768 return; 2769 default: 2770 break; 2771 } 2772 2773 cnss_pci_reg_write(pci_priv, WLAON_GLOBAL_COUNTER_CTRL5, 2774 TIME_SYNC_CLEAR); 2775 } 2776 2777 2778 static void cnss_pci_time_sync_reg_update(struct cnss_pci_data *pci_priv, 2779 u32 low, u32 high) 2780 { 2781 u32 time_reg_low; 2782 u32 time_reg_high; 2783 2784 switch (pci_priv->device_id) { 2785 case KIWI_DEVICE_ID: 2786 case MANGO_DEVICE_ID: 2787 case PEACH_DEVICE_ID: 2788 /* Use the next two shadow registers after host's usage */ 2789 time_reg_low = PCIE_SHADOW_REG_VALUE_0 + 2790 (pci_priv->plat_priv->num_shadow_regs_v3 * 2791 SHADOW_REG_LEN_BYTES); 2792 time_reg_high = time_reg_low + SHADOW_REG_LEN_BYTES; 2793 break; 2794 default: 2795 time_reg_low = PCIE_SHADOW_REG_VALUE_34; 2796 time_reg_high = PCIE_SHADOW_REG_VALUE_35; 2797 break; 2798 } 2799 2800 cnss_pci_reg_write(pci_priv, time_reg_low, low); 2801 cnss_pci_reg_write(pci_priv, time_reg_high, high); 2802 2803 cnss_pci_reg_read(pci_priv, time_reg_low, &low); 2804 cnss_pci_reg_read(pci_priv, time_reg_high, &high); 2805 2806 cnss_pr_dbg("Updated time sync regs [0x%x] = 0x%x, [0x%x] = 0x%x\n", 2807 time_reg_low, low, time_reg_high, high); 2808 } 2809 2810 static int cnss_pci_update_timestamp(struct cnss_pci_data *pci_priv) 2811 { 2812 struct cnss_plat_data *plat_priv = pci_priv->plat_priv; 2813 struct device *dev = &pci_priv->pci_dev->dev; 2814 unsigned long flags = 0; 2815 u64 host_time_us, device_time_us, offset; 2816 u32 low, high; 2817 int ret; 2818 2819 ret = cnss_pci_prevent_l1(dev); 2820 if (ret) 2821 goto out; 2822 2823 ret = cnss_pci_force_wake_get(pci_priv); 2824 if (ret) 2825 goto allow_l1; 2826 2827 spin_lock_irqsave(&time_sync_lock, flags); 2828 cnss_pci_clear_time_sync_counter(pci_priv); 2829 cnss_pci_enable_time_sync_counter(pci_priv); 2830 host_time_us = cnss_get_host_timestamp(plat_priv); 2831 ret = cnss_pci_get_device_timestamp(pci_priv, &device_time_us); 2832 cnss_pci_clear_time_sync_counter(pci_priv); 2833 spin_unlock_irqrestore(&time_sync_lock, flags); 2834 if (ret) 2835 goto force_wake_put; 2836 2837 if (host_time_us < device_time_us) { 2838 cnss_pr_err("Host time (%llu us) is smaller than device time (%llu us), stop\n", 2839 host_time_us, device_time_us); 2840 ret = -EINVAL; 2841 goto force_wake_put; 2842 } 2843 2844 offset = host_time_us - device_time_us; 2845 cnss_pr_dbg("Host time = %llu us, device time = %llu us, offset = %llu us\n", 2846 host_time_us, device_time_us, offset); 2847 2848 low = offset & 0xFFFFFFFF; 2849 high = offset >> 32; 2850 2851 cnss_pci_time_sync_reg_update(pci_priv, low, high); 2852 2853 force_wake_put: 2854 cnss_pci_force_wake_put(pci_priv); 2855 allow_l1: 2856 cnss_pci_allow_l1(dev); 2857 out: 2858 return ret; 2859 } 2860 2861 static void cnss_pci_time_sync_work_hdlr(struct work_struct *work) 2862 { 2863 struct cnss_pci_data *pci_priv = 2864 container_of(work, struct cnss_pci_data, time_sync_work.work); 2865 struct cnss_plat_data *plat_priv = pci_priv->plat_priv; 2866 unsigned int time_sync_period_ms = 2867 plat_priv->ctrl_params.time_sync_period; 2868 2869 if (test_bit(DISABLE_TIME_SYNC, &plat_priv->ctrl_params.quirks)) { 2870 cnss_pr_dbg("Time sync is disabled\n"); 2871 return; 2872 } 2873 2874 if (!time_sync_period_ms) { 2875 cnss_pr_dbg("Skip time sync as time period is 0\n"); 2876 return; 2877 } 2878 2879 if (cnss_pci_is_device_down(&pci_priv->pci_dev->dev)) 2880 return; 2881 2882 if (cnss_pci_pm_runtime_get_sync(pci_priv, RTPM_ID_CNSS) < 0) 2883 goto runtime_pm_put; 2884 2885 mutex_lock(&pci_priv->bus_lock); 2886 cnss_pci_update_timestamp(pci_priv); 2887 mutex_unlock(&pci_priv->bus_lock); 2888 schedule_delayed_work(&pci_priv->time_sync_work, 2889 msecs_to_jiffies(time_sync_period_ms)); 2890 2891 runtime_pm_put: 2892 cnss_pci_pm_runtime_mark_last_busy(pci_priv); 2893 cnss_pci_pm_runtime_put_autosuspend(pci_priv, RTPM_ID_CNSS); 2894 } 2895 2896 static int cnss_pci_start_time_sync_update(struct cnss_pci_data *pci_priv) 2897 { 2898 struct cnss_plat_data *plat_priv = pci_priv->plat_priv; 2899 2900 switch (pci_priv->device_id) { 2901 case QCA6390_DEVICE_ID: 2902 case QCA6490_DEVICE_ID: 2903 case KIWI_DEVICE_ID: 2904 case MANGO_DEVICE_ID: 2905 case PEACH_DEVICE_ID: 2906 break; 2907 default: 2908 return -EOPNOTSUPP; 2909 } 2910 2911 if (!plat_priv->device_freq_hz) { 2912 cnss_pr_dbg("Device time clock frequency is not valid, skip time sync\n"); 2913 return -EINVAL; 2914 } 2915 2916 cnss_pci_time_sync_work_hdlr(&pci_priv->time_sync_work.work); 2917 2918 return 0; 2919 } 2920 2921 static void cnss_pci_stop_time_sync_update(struct cnss_pci_data *pci_priv) 2922 { 2923 switch (pci_priv->device_id) { 2924 case QCA6390_DEVICE_ID: 2925 case QCA6490_DEVICE_ID: 2926 case KIWI_DEVICE_ID: 2927 case MANGO_DEVICE_ID: 2928 case PEACH_DEVICE_ID: 2929 break; 2930 default: 2931 return; 2932 } 2933 2934 cancel_delayed_work_sync(&pci_priv->time_sync_work); 2935 } 2936 2937 int cnss_pci_set_therm_cdev_state(struct cnss_pci_data *pci_priv, 2938 unsigned long thermal_state, 2939 int tcdev_id) 2940 { 2941 if (!pci_priv) { 2942 cnss_pr_err("pci_priv is NULL!\n"); 2943 return -ENODEV; 2944 } 2945 2946 if (!pci_priv->driver_ops || !pci_priv->driver_ops->set_therm_cdev_state) { 2947 cnss_pr_err("driver_ops or set_therm_cdev_state is NULL\n"); 2948 return -EINVAL; 2949 } 2950 2951 return pci_priv->driver_ops->set_therm_cdev_state(pci_priv->pci_dev, 2952 thermal_state, 2953 tcdev_id); 2954 } 2955 2956 int cnss_pci_update_time_sync_period(struct cnss_pci_data *pci_priv, 2957 unsigned int time_sync_period) 2958 { 2959 struct cnss_plat_data *plat_priv; 2960 2961 if (!pci_priv) 2962 return -ENODEV; 2963 2964 plat_priv = pci_priv->plat_priv; 2965 2966 cnss_pci_stop_time_sync_update(pci_priv); 2967 plat_priv->ctrl_params.time_sync_period = time_sync_period; 2968 cnss_pci_start_time_sync_update(pci_priv); 2969 cnss_pr_dbg("WLAN time sync period %u ms\n", 2970 plat_priv->ctrl_params.time_sync_period); 2971 2972 return 0; 2973 } 2974 2975 int cnss_pci_call_driver_probe(struct cnss_pci_data *pci_priv) 2976 { 2977 int ret = 0; 2978 struct cnss_plat_data *plat_priv; 2979 2980 if (!pci_priv) 2981 return -ENODEV; 2982 2983 plat_priv = pci_priv->plat_priv; 2984 if (test_bit(CNSS_IN_REBOOT, &plat_priv->driver_state)) { 2985 cnss_pr_err("Reboot is in progress, skip driver probe\n"); 2986 return -EINVAL; 2987 } 2988 2989 if (test_bit(CNSS_DRIVER_DEBUG, &plat_priv->driver_state)) { 2990 clear_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state); 2991 cnss_pr_dbg("Skip driver probe\n"); 2992 goto out; 2993 } 2994 2995 if (!pci_priv->driver_ops) { 2996 cnss_pr_err("driver_ops is NULL\n"); 2997 ret = -EINVAL; 2998 goto out; 2999 } 3000 3001 if (test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state) && 3002 test_bit(CNSS_DRIVER_PROBED, &plat_priv->driver_state)) { 3003 ret = pci_priv->driver_ops->reinit(pci_priv->pci_dev, 3004 pci_priv->pci_device_id); 3005 if (ret) { 3006 cnss_pr_err("Failed to reinit host driver, err = %d\n", 3007 ret); 3008 goto out; 3009 } 3010 complete(&plat_priv->recovery_complete); 3011 } else if (test_bit(CNSS_DRIVER_LOADING, &plat_priv->driver_state)) { 3012 ret = pci_priv->driver_ops->probe(pci_priv->pci_dev, 3013 pci_priv->pci_device_id); 3014 if (ret) { 3015 cnss_pr_err("Failed to probe host driver, err = %d\n", 3016 ret); 3017 complete_all(&plat_priv->power_up_complete); 3018 goto out; 3019 } 3020 clear_bit(CNSS_DRIVER_LOADING, &plat_priv->driver_state); 3021 set_bit(CNSS_DRIVER_PROBED, &plat_priv->driver_state); 3022 cnss_pci_free_blob_mem(pci_priv); 3023 complete_all(&plat_priv->power_up_complete); 3024 } else if (test_bit(CNSS_DRIVER_IDLE_RESTART, 3025 &plat_priv->driver_state)) { 3026 ret = pci_priv->driver_ops->idle_restart(pci_priv->pci_dev, 3027 pci_priv->pci_device_id); 3028 if (ret) { 3029 cnss_pr_err("Failed to idle restart host driver, err = %d\n", 3030 ret); 3031 plat_priv->power_up_error = ret; 3032 complete_all(&plat_priv->power_up_complete); 3033 goto out; 3034 } 3035 clear_bit(CNSS_DRIVER_IDLE_RESTART, &plat_priv->driver_state); 3036 complete_all(&plat_priv->power_up_complete); 3037 } else { 3038 complete(&plat_priv->power_up_complete); 3039 } 3040 3041 if (test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state)) { 3042 clear_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state); 3043 __pm_relax(plat_priv->recovery_ws); 3044 } 3045 3046 cnss_pci_start_time_sync_update(pci_priv); 3047 3048 return 0; 3049 3050 out: 3051 return ret; 3052 } 3053 3054 int cnss_pci_call_driver_remove(struct cnss_pci_data *pci_priv) 3055 { 3056 struct cnss_plat_data *plat_priv; 3057 int ret; 3058 3059 if (!pci_priv) 3060 return -ENODEV; 3061 3062 plat_priv = pci_priv->plat_priv; 3063 3064 if (test_bit(CNSS_IN_COLD_BOOT_CAL, &plat_priv->driver_state) || 3065 test_bit(CNSS_FW_BOOT_RECOVERY, &plat_priv->driver_state) || 3066 test_bit(CNSS_DRIVER_DEBUG, &plat_priv->driver_state)) { 3067 cnss_pr_dbg("Skip driver remove\n"); 3068 return 0; 3069 } 3070 3071 if (!pci_priv->driver_ops) { 3072 cnss_pr_err("driver_ops is NULL\n"); 3073 return -EINVAL; 3074 } 3075 3076 cnss_pci_stop_time_sync_update(pci_priv); 3077 3078 if (test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state) && 3079 test_bit(CNSS_DRIVER_PROBED, &plat_priv->driver_state)) { 3080 complete(&plat_priv->rddm_complete); 3081 pci_priv->driver_ops->shutdown(pci_priv->pci_dev); 3082 } else if (test_bit(CNSS_DRIVER_UNLOADING, &plat_priv->driver_state)) { 3083 pci_priv->driver_ops->remove(pci_priv->pci_dev); 3084 clear_bit(CNSS_DRIVER_PROBED, &plat_priv->driver_state); 3085 } else if (test_bit(CNSS_DRIVER_IDLE_SHUTDOWN, 3086 &plat_priv->driver_state)) { 3087 ret = pci_priv->driver_ops->idle_shutdown(pci_priv->pci_dev); 3088 if (ret == -EAGAIN) { 3089 clear_bit(CNSS_DRIVER_IDLE_SHUTDOWN, 3090 &plat_priv->driver_state); 3091 return ret; 3092 } 3093 } 3094 3095 plat_priv->get_info_cb_ctx = NULL; 3096 plat_priv->get_info_cb = NULL; 3097 3098 return 0; 3099 } 3100 3101 int cnss_pci_call_driver_modem_status(struct cnss_pci_data *pci_priv, 3102 int modem_current_status) 3103 { 3104 struct cnss_wlan_driver *driver_ops; 3105 3106 if (!pci_priv) 3107 return -ENODEV; 3108 3109 driver_ops = pci_priv->driver_ops; 3110 if (!driver_ops || !driver_ops->modem_status) 3111 return -EINVAL; 3112 3113 driver_ops->modem_status(pci_priv->pci_dev, modem_current_status); 3114 3115 return 0; 3116 } 3117 3118 int cnss_pci_update_status(struct cnss_pci_data *pci_priv, 3119 enum cnss_driver_status status) 3120 { 3121 struct cnss_wlan_driver *driver_ops; 3122 3123 if (!pci_priv) 3124 return -ENODEV; 3125 3126 driver_ops = pci_priv->driver_ops; 3127 if (!driver_ops || !driver_ops->update_status) 3128 return -EINVAL; 3129 3130 cnss_pr_dbg("Update driver status: %d\n", status); 3131 3132 driver_ops->update_status(pci_priv->pci_dev, status); 3133 3134 return 0; 3135 } 3136 3137 static void cnss_pci_misc_reg_dump(struct cnss_pci_data *pci_priv, 3138 struct cnss_misc_reg *misc_reg, 3139 u32 misc_reg_size, 3140 char *reg_name) 3141 { 3142 struct cnss_plat_data *plat_priv = pci_priv->plat_priv; 3143 bool do_force_wake_put = true; 3144 int i; 3145 3146 if (!misc_reg) 3147 return; 3148 3149 if (in_interrupt() || irqs_disabled()) 3150 return; 3151 3152 if (cnss_pci_check_link_status(pci_priv)) 3153 return; 3154 3155 if (cnss_pci_force_wake_get(pci_priv)) { 3156 /* Continue to dump when device has entered RDDM already */ 3157 if (!test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state)) 3158 return; 3159 do_force_wake_put = false; 3160 } 3161 3162 cnss_pr_dbg("Start to dump %s registers\n", reg_name); 3163 3164 for (i = 0; i < misc_reg_size; i++) { 3165 if (!test_bit(pci_priv->misc_reg_dev_mask, 3166 &misc_reg[i].dev_mask)) 3167 continue; 3168 3169 if (misc_reg[i].wr) { 3170 if (misc_reg[i].offset == 3171 QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_SAW2_CFG && 3172 i >= 1) 3173 misc_reg[i].val = 3174 QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_SAW2_CFG_MSK | 3175 misc_reg[i - 1].val; 3176 if (cnss_pci_reg_write(pci_priv, 3177 misc_reg[i].offset, 3178 misc_reg[i].val)) 3179 goto force_wake_put; 3180 cnss_pr_vdbg("Write 0x%X to 0x%X\n", 3181 misc_reg[i].val, 3182 misc_reg[i].offset); 3183 3184 } else { 3185 if (cnss_pci_reg_read(pci_priv, 3186 misc_reg[i].offset, 3187 &misc_reg[i].val)) 3188 goto force_wake_put; 3189 } 3190 } 3191 3192 force_wake_put: 3193 if (do_force_wake_put) 3194 cnss_pci_force_wake_put(pci_priv); 3195 } 3196 3197 static void cnss_pci_dump_misc_reg(struct cnss_pci_data *pci_priv) 3198 { 3199 if (in_interrupt() || irqs_disabled()) 3200 return; 3201 3202 if (cnss_pci_check_link_status(pci_priv)) 3203 return; 3204 3205 cnss_pci_misc_reg_dump(pci_priv, pci_priv->wcss_reg, 3206 WCSS_REG_SIZE, "wcss"); 3207 cnss_pci_misc_reg_dump(pci_priv, pci_priv->pcie_reg, 3208 PCIE_REG_SIZE, "pcie"); 3209 cnss_pci_misc_reg_dump(pci_priv, pci_priv->wlaon_reg, 3210 WLAON_REG_SIZE, "wlaon"); 3211 cnss_pci_misc_reg_dump(pci_priv, pci_priv->syspm_reg, 3212 SYSPM_REG_SIZE, "syspm"); 3213 } 3214 3215 static void cnss_pci_dump_shadow_reg(struct cnss_pci_data *pci_priv) 3216 { 3217 int i, j = 0, array_size = SHADOW_REG_COUNT + SHADOW_REG_INTER_COUNT; 3218 u32 reg_offset; 3219 bool do_force_wake_put = true; 3220 3221 if (in_interrupt() || irqs_disabled()) 3222 return; 3223 3224 if (cnss_pci_check_link_status(pci_priv)) 3225 return; 3226 3227 if (!pci_priv->debug_reg) { 3228 pci_priv->debug_reg = devm_kzalloc(&pci_priv->pci_dev->dev, 3229 sizeof(*pci_priv->debug_reg) 3230 * array_size, GFP_KERNEL); 3231 if (!pci_priv->debug_reg) 3232 return; 3233 } 3234 3235 if (cnss_pci_force_wake_get(pci_priv)) 3236 do_force_wake_put = false; 3237 3238 cnss_pr_dbg("Start to dump shadow registers\n"); 3239 3240 for (i = 0; i < SHADOW_REG_COUNT; i++, j++) { 3241 reg_offset = PCIE_SHADOW_REG_VALUE_0 + i * 4; 3242 pci_priv->debug_reg[j].offset = reg_offset; 3243 if (cnss_pci_reg_read(pci_priv, reg_offset, 3244 &pci_priv->debug_reg[j].val)) 3245 goto force_wake_put; 3246 } 3247 3248 for (i = 0; i < SHADOW_REG_INTER_COUNT; i++, j++) { 3249 reg_offset = PCIE_SHADOW_REG_INTER_0 + i * 4; 3250 pci_priv->debug_reg[j].offset = reg_offset; 3251 if (cnss_pci_reg_read(pci_priv, reg_offset, 3252 &pci_priv->debug_reg[j].val)) 3253 goto force_wake_put; 3254 } 3255 3256 force_wake_put: 3257 if (do_force_wake_put) 3258 cnss_pci_force_wake_put(pci_priv); 3259 } 3260 3261 static int cnss_qca6174_powerup(struct cnss_pci_data *pci_priv) 3262 { 3263 int ret = 0; 3264 struct cnss_plat_data *plat_priv = pci_priv->plat_priv; 3265 3266 ret = cnss_power_on_device(plat_priv, false); 3267 if (ret) { 3268 cnss_pr_err("Failed to power on device, err = %d\n", ret); 3269 goto out; 3270 } 3271 3272 ret = cnss_resume_pci_link(pci_priv); 3273 if (ret) { 3274 cnss_pr_err("Failed to resume PCI link, err = %d\n", ret); 3275 goto power_off; 3276 } 3277 3278 ret = cnss_pci_call_driver_probe(pci_priv); 3279 if (ret) 3280 goto suspend_link; 3281 3282 return 0; 3283 suspend_link: 3284 cnss_suspend_pci_link(pci_priv); 3285 power_off: 3286 cnss_power_off_device(plat_priv); 3287 out: 3288 return ret; 3289 } 3290 3291 static int cnss_qca6174_shutdown(struct cnss_pci_data *pci_priv) 3292 { 3293 int ret = 0; 3294 struct cnss_plat_data *plat_priv = pci_priv->plat_priv; 3295 3296 cnss_pci_pm_runtime_resume(pci_priv); 3297 3298 ret = cnss_pci_call_driver_remove(pci_priv); 3299 if (ret == -EAGAIN) 3300 goto out; 3301 3302 cnss_request_bus_bandwidth(&plat_priv->plat_dev->dev, 3303 CNSS_BUS_WIDTH_NONE); 3304 cnss_pci_set_monitor_wake_intr(pci_priv, false); 3305 cnss_pci_set_auto_suspended(pci_priv, 0); 3306 3307 ret = cnss_suspend_pci_link(pci_priv); 3308 if (ret) 3309 cnss_pr_err("Failed to suspend PCI link, err = %d\n", ret); 3310 3311 cnss_power_off_device(plat_priv); 3312 3313 clear_bit(CNSS_DRIVER_UNLOADING, &plat_priv->driver_state); 3314 clear_bit(CNSS_DRIVER_IDLE_SHUTDOWN, &plat_priv->driver_state); 3315 3316 out: 3317 return ret; 3318 } 3319 3320 static void cnss_qca6174_crash_shutdown(struct cnss_pci_data *pci_priv) 3321 { 3322 if (pci_priv->driver_ops && pci_priv->driver_ops->crash_shutdown) 3323 pci_priv->driver_ops->crash_shutdown(pci_priv->pci_dev); 3324 } 3325 3326 static int cnss_qca6174_ramdump(struct cnss_pci_data *pci_priv) 3327 { 3328 struct cnss_plat_data *plat_priv = pci_priv->plat_priv; 3329 struct cnss_ramdump_info *ramdump_info; 3330 3331 ramdump_info = &plat_priv->ramdump_info; 3332 if (!ramdump_info->ramdump_size) 3333 return -EINVAL; 3334 3335 return cnss_do_ramdump(plat_priv); 3336 } 3337 3338 static int cnss_qca6290_powerup(struct cnss_pci_data *pci_priv) 3339 { 3340 int ret = 0; 3341 struct cnss_plat_data *plat_priv = pci_priv->plat_priv; 3342 unsigned int timeout; 3343 int retry = 0, bt_en_gpio = plat_priv->pinctrl_info.bt_en_gpio; 3344 int sw_ctrl_gpio = plat_priv->pinctrl_info.sw_ctrl_gpio; 3345 3346 if (plat_priv->ramdump_info_v2.dump_data_valid) { 3347 cnss_pci_clear_dump_info(pci_priv); 3348 cnss_pci_power_off_mhi(pci_priv); 3349 cnss_suspend_pci_link(pci_priv); 3350 cnss_pci_deinit_mhi(pci_priv); 3351 cnss_power_off_device(plat_priv); 3352 } 3353 3354 /* Clear QMI send usage count during every power up */ 3355 pci_priv->qmi_send_usage_count = 0; 3356 3357 plat_priv->power_up_error = 0; 3358 retry: 3359 ret = cnss_power_on_device(plat_priv, false); 3360 if (ret) { 3361 cnss_pr_err("Failed to power on device, err = %d\n", ret); 3362 goto out; 3363 } 3364 3365 ret = cnss_resume_pci_link(pci_priv); 3366 if (ret) { 3367 cnss_pr_err("Failed to resume PCI link, err = %d\n", ret); 3368 cnss_pr_dbg("Value of SW_CTRL GPIO: %d\n", 3369 cnss_get_input_gpio_value(plat_priv, sw_ctrl_gpio)); 3370 if (test_bit(IGNORE_PCI_LINK_FAILURE, 3371 &plat_priv->ctrl_params.quirks)) { 3372 cnss_pr_dbg("Ignore PCI link resume failure\n"); 3373 ret = 0; 3374 goto out; 3375 } 3376 if (ret == -EAGAIN && retry++ < POWER_ON_RETRY_MAX_TIMES) { 3377 cnss_power_off_device(plat_priv); 3378 /* Force toggle BT_EN GPIO low */ 3379 if (retry == POWER_ON_RETRY_MAX_TIMES) { 3380 cnss_pr_dbg("Retry #%d. Set BT_EN GPIO(%u) low\n", 3381 retry, bt_en_gpio); 3382 if (bt_en_gpio >= 0) 3383 gpio_direction_output(bt_en_gpio, 0); 3384 cnss_pr_dbg("BT_EN GPIO val: %d\n", 3385 gpio_get_value(bt_en_gpio)); 3386 } 3387 cnss_pr_dbg("Retry to resume PCI link #%d\n", retry); 3388 cnss_pr_dbg("Value of SW_CTRL GPIO: %d\n", 3389 cnss_get_input_gpio_value(plat_priv, 3390 sw_ctrl_gpio)); 3391 msleep(POWER_ON_RETRY_DELAY_MS * retry); 3392 goto retry; 3393 } 3394 /* Assert when it reaches maximum retries */ 3395 CNSS_ASSERT(0); 3396 goto power_off; 3397 } 3398 3399 cnss_pci_set_wlaon_pwr_ctrl(pci_priv, false, false, false); 3400 timeout = cnss_get_timeout(plat_priv, CNSS_TIMEOUT_QMI); 3401 3402 ret = cnss_pci_start_mhi(pci_priv); 3403 if (ret) { 3404 cnss_fatal_err("Failed to start MHI, err = %d\n", ret); 3405 if (!test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state) && 3406 !pci_priv->pci_link_down_ind && timeout) { 3407 /* Start recovery directly for MHI start failures */ 3408 cnss_schedule_recovery(&pci_priv->pci_dev->dev, 3409 CNSS_REASON_DEFAULT); 3410 } 3411 return 0; 3412 } 3413 3414 if (test_bit(USE_CORE_ONLY_FW, &plat_priv->ctrl_params.quirks)) { 3415 clear_bit(CNSS_FW_BOOT_RECOVERY, &plat_priv->driver_state); 3416 clear_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state); 3417 return 0; 3418 } 3419 3420 cnss_set_pin_connect_status(plat_priv); 3421 3422 if (test_bit(QMI_BYPASS, &plat_priv->ctrl_params.quirks)) { 3423 ret = cnss_pci_call_driver_probe(pci_priv); 3424 if (ret) 3425 goto stop_mhi; 3426 } else if (timeout) { 3427 if (test_bit(CNSS_IN_COLD_BOOT_CAL, &plat_priv->driver_state)) 3428 timeout += WLAN_COLD_BOOT_CAL_TIMEOUT; 3429 else 3430 timeout += WLAN_MISSION_MODE_TIMEOUT; 3431 mod_timer(&plat_priv->fw_boot_timer, 3432 jiffies + msecs_to_jiffies(timeout)); 3433 } 3434 3435 return 0; 3436 3437 stop_mhi: 3438 cnss_pci_set_wlaon_pwr_ctrl(pci_priv, false, true, true); 3439 cnss_pci_power_off_mhi(pci_priv); 3440 cnss_suspend_pci_link(pci_priv); 3441 cnss_pci_deinit_mhi(pci_priv); 3442 power_off: 3443 cnss_power_off_device(plat_priv); 3444 out: 3445 return ret; 3446 } 3447 3448 static int cnss_qca6290_shutdown(struct cnss_pci_data *pci_priv) 3449 { 3450 int ret = 0; 3451 struct cnss_plat_data *plat_priv = pci_priv->plat_priv; 3452 int do_force_wake = true; 3453 3454 cnss_pci_pm_runtime_resume(pci_priv); 3455 3456 ret = cnss_pci_call_driver_remove(pci_priv); 3457 if (ret == -EAGAIN) 3458 goto out; 3459 3460 cnss_request_bus_bandwidth(&plat_priv->plat_dev->dev, 3461 CNSS_BUS_WIDTH_NONE); 3462 cnss_pci_set_monitor_wake_intr(pci_priv, false); 3463 cnss_pci_set_auto_suspended(pci_priv, 0); 3464 3465 if ((test_bit(CNSS_DRIVER_LOADING, &plat_priv->driver_state) || 3466 test_bit(CNSS_DRIVER_UNLOADING, &plat_priv->driver_state) || 3467 test_bit(CNSS_DRIVER_IDLE_RESTART, &plat_priv->driver_state) || 3468 test_bit(CNSS_DRIVER_IDLE_SHUTDOWN, &plat_priv->driver_state) || 3469 test_bit(CNSS_IN_COLD_BOOT_CAL, &plat_priv->driver_state)) && 3470 test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state)) { 3471 del_timer(&pci_priv->dev_rddm_timer); 3472 cnss_pci_collect_dump_info(pci_priv, false); 3473 3474 if (!plat_priv->recovery_enabled) 3475 CNSS_ASSERT(0); 3476 } 3477 3478 if (!cnss_is_device_powered_on(plat_priv)) { 3479 cnss_pr_dbg("Device is already powered off, ignore\n"); 3480 goto skip_power_off; 3481 } 3482 3483 if (test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state)) 3484 do_force_wake = false; 3485 cnss_pci_set_wlaon_pwr_ctrl(pci_priv, false, true, do_force_wake); 3486 3487 /* FBC image will be freed after powering off MHI, so skip 3488 * if RAM dump data is still valid. 3489 */ 3490 if (plat_priv->ramdump_info_v2.dump_data_valid) 3491 goto skip_power_off; 3492 3493 cnss_pci_power_off_mhi(pci_priv); 3494 ret = cnss_suspend_pci_link(pci_priv); 3495 if (ret) 3496 cnss_pr_err("Failed to suspend PCI link, err = %d\n", ret); 3497 cnss_pci_deinit_mhi(pci_priv); 3498 cnss_power_off_device(plat_priv); 3499 3500 skip_power_off: 3501 pci_priv->remap_window = 0; 3502 3503 clear_bit(CNSS_FW_READY, &plat_priv->driver_state); 3504 clear_bit(CNSS_FW_MEM_READY, &plat_priv->driver_state); 3505 if (test_bit(CNSS_DRIVER_UNLOADING, &plat_priv->driver_state) || 3506 test_bit(CNSS_DRIVER_IDLE_SHUTDOWN, &plat_priv->driver_state)) { 3507 clear_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state); 3508 pci_priv->pci_link_down_ind = false; 3509 } 3510 clear_bit(CNSS_DRIVER_UNLOADING, &plat_priv->driver_state); 3511 clear_bit(CNSS_DRIVER_IDLE_SHUTDOWN, &plat_priv->driver_state); 3512 memset(&print_optimize, 0, sizeof(print_optimize)); 3513 3514 out: 3515 return ret; 3516 } 3517 3518 static void cnss_qca6290_crash_shutdown(struct cnss_pci_data *pci_priv) 3519 { 3520 struct cnss_plat_data *plat_priv = pci_priv->plat_priv; 3521 3522 set_bit(CNSS_IN_PANIC, &plat_priv->driver_state); 3523 cnss_pr_dbg("Crash shutdown with driver_state 0x%lx\n", 3524 plat_priv->driver_state); 3525 3526 cnss_pci_collect_dump_info(pci_priv, true); 3527 clear_bit(CNSS_IN_PANIC, &plat_priv->driver_state); 3528 } 3529 3530 static int cnss_qca6290_ramdump(struct cnss_pci_data *pci_priv) 3531 { 3532 struct cnss_plat_data *plat_priv = pci_priv->plat_priv; 3533 struct cnss_ramdump_info_v2 *info_v2 = &plat_priv->ramdump_info_v2; 3534 struct cnss_dump_data *dump_data = &info_v2->dump_data; 3535 struct cnss_dump_seg *dump_seg = info_v2->dump_data_vaddr; 3536 int ret = 0; 3537 3538 if (!info_v2->dump_data_valid || !dump_seg || 3539 dump_data->nentries == 0) 3540 return 0; 3541 3542 ret = cnss_do_elf_ramdump(plat_priv); 3543 3544 cnss_pci_clear_dump_info(pci_priv); 3545 cnss_pci_power_off_mhi(pci_priv); 3546 cnss_suspend_pci_link(pci_priv); 3547 cnss_pci_deinit_mhi(pci_priv); 3548 cnss_power_off_device(plat_priv); 3549 3550 return ret; 3551 } 3552 3553 int cnss_pci_dev_powerup(struct cnss_pci_data *pci_priv) 3554 { 3555 int ret = 0; 3556 3557 if (!pci_priv) { 3558 cnss_pr_err("pci_priv is NULL\n"); 3559 return -ENODEV; 3560 } 3561 3562 switch (pci_priv->device_id) { 3563 case QCA6174_DEVICE_ID: 3564 ret = cnss_qca6174_powerup(pci_priv); 3565 break; 3566 case QCA6290_DEVICE_ID: 3567 case QCA6390_DEVICE_ID: 3568 case QCN7605_DEVICE_ID: 3569 case QCA6490_DEVICE_ID: 3570 case KIWI_DEVICE_ID: 3571 case MANGO_DEVICE_ID: 3572 case PEACH_DEVICE_ID: 3573 ret = cnss_qca6290_powerup(pci_priv); 3574 break; 3575 default: 3576 cnss_pr_err("Unknown device_id found: 0x%x\n", 3577 pci_priv->device_id); 3578 ret = -ENODEV; 3579 } 3580 3581 return ret; 3582 } 3583 3584 int cnss_pci_dev_shutdown(struct cnss_pci_data *pci_priv) 3585 { 3586 int ret = 0; 3587 3588 if (!pci_priv) { 3589 cnss_pr_err("pci_priv is NULL\n"); 3590 return -ENODEV; 3591 } 3592 3593 switch (pci_priv->device_id) { 3594 case QCA6174_DEVICE_ID: 3595 ret = cnss_qca6174_shutdown(pci_priv); 3596 break; 3597 case QCA6290_DEVICE_ID: 3598 case QCA6390_DEVICE_ID: 3599 case QCN7605_DEVICE_ID: 3600 case QCA6490_DEVICE_ID: 3601 case KIWI_DEVICE_ID: 3602 case MANGO_DEVICE_ID: 3603 case PEACH_DEVICE_ID: 3604 ret = cnss_qca6290_shutdown(pci_priv); 3605 break; 3606 default: 3607 cnss_pr_err("Unknown device_id found: 0x%x\n", 3608 pci_priv->device_id); 3609 ret = -ENODEV; 3610 } 3611 3612 return ret; 3613 } 3614 3615 int cnss_pci_dev_crash_shutdown(struct cnss_pci_data *pci_priv) 3616 { 3617 int ret = 0; 3618 3619 if (!pci_priv) { 3620 cnss_pr_err("pci_priv is NULL\n"); 3621 return -ENODEV; 3622 } 3623 3624 switch (pci_priv->device_id) { 3625 case QCA6174_DEVICE_ID: 3626 cnss_qca6174_crash_shutdown(pci_priv); 3627 break; 3628 case QCA6290_DEVICE_ID: 3629 case QCA6390_DEVICE_ID: 3630 case QCN7605_DEVICE_ID: 3631 case QCA6490_DEVICE_ID: 3632 case KIWI_DEVICE_ID: 3633 case MANGO_DEVICE_ID: 3634 case PEACH_DEVICE_ID: 3635 cnss_qca6290_crash_shutdown(pci_priv); 3636 break; 3637 default: 3638 cnss_pr_err("Unknown device_id found: 0x%x\n", 3639 pci_priv->device_id); 3640 ret = -ENODEV; 3641 } 3642 3643 return ret; 3644 } 3645 3646 int cnss_pci_dev_ramdump(struct cnss_pci_data *pci_priv) 3647 { 3648 int ret = 0; 3649 3650 if (!pci_priv) { 3651 cnss_pr_err("pci_priv is NULL\n"); 3652 return -ENODEV; 3653 } 3654 3655 switch (pci_priv->device_id) { 3656 case QCA6174_DEVICE_ID: 3657 ret = cnss_qca6174_ramdump(pci_priv); 3658 break; 3659 case QCA6290_DEVICE_ID: 3660 case QCA6390_DEVICE_ID: 3661 case QCN7605_DEVICE_ID: 3662 case QCA6490_DEVICE_ID: 3663 case KIWI_DEVICE_ID: 3664 case MANGO_DEVICE_ID: 3665 case PEACH_DEVICE_ID: 3666 ret = cnss_qca6290_ramdump(pci_priv); 3667 break; 3668 default: 3669 cnss_pr_err("Unknown device_id found: 0x%x\n", 3670 pci_priv->device_id); 3671 ret = -ENODEV; 3672 } 3673 3674 return ret; 3675 } 3676 3677 int cnss_pci_is_drv_connected(struct device *dev) 3678 { 3679 struct cnss_pci_data *pci_priv = cnss_get_pci_priv(to_pci_dev(dev)); 3680 3681 if (!pci_priv) 3682 return -ENODEV; 3683 3684 return pci_priv->drv_connected_last; 3685 } 3686 EXPORT_SYMBOL(cnss_pci_is_drv_connected); 3687 3688 static void cnss_wlan_reg_driver_work(struct work_struct *work) 3689 { 3690 struct cnss_plat_data *plat_priv = 3691 container_of(work, struct cnss_plat_data, wlan_reg_driver_work.work); 3692 struct cnss_pci_data *pci_priv = plat_priv->bus_priv; 3693 struct cnss_cal_info *cal_info; 3694 unsigned int timeout; 3695 3696 if (test_bit(CNSS_WLAN_HW_DISABLED, &plat_priv->driver_state)) 3697 return; 3698 3699 if (test_bit(CNSS_COLD_BOOT_CAL_DONE, &plat_priv->driver_state)) { 3700 goto reg_driver; 3701 } else { 3702 if (plat_priv->charger_mode) { 3703 cnss_pr_err("Ignore calibration timeout in charger mode\n"); 3704 return; 3705 } 3706 if (!test_bit(CNSS_IN_COLD_BOOT_CAL, 3707 &plat_priv->driver_state)) { 3708 timeout = cnss_get_timeout(plat_priv, 3709 CNSS_TIMEOUT_CALIBRATION); 3710 cnss_pr_dbg("File system not ready to start calibration. Wait for %ds..\n", 3711 timeout / 1000); 3712 schedule_delayed_work(&plat_priv->wlan_reg_driver_work, 3713 msecs_to_jiffies(timeout)); 3714 return; 3715 } 3716 3717 del_timer(&plat_priv->fw_boot_timer); 3718 if (test_bit(CNSS_IN_COLD_BOOT_CAL, &plat_priv->driver_state) && 3719 !test_bit(CNSS_IN_REBOOT, &plat_priv->driver_state)) { 3720 cnss_pr_err("Timeout waiting for calibration to complete\n"); 3721 CNSS_ASSERT(0); 3722 } 3723 cal_info = kzalloc(sizeof(*cal_info), GFP_KERNEL); 3724 if (!cal_info) 3725 return; 3726 cal_info->cal_status = CNSS_CAL_TIMEOUT; 3727 cnss_driver_event_post(plat_priv, 3728 CNSS_DRIVER_EVENT_COLD_BOOT_CAL_DONE, 3729 0, cal_info); 3730 } 3731 reg_driver: 3732 if (test_bit(CNSS_IN_REBOOT, &plat_priv->driver_state)) { 3733 cnss_pr_dbg("Reboot/Shutdown is in progress, ignore register driver\n"); 3734 return; 3735 } 3736 reinit_completion(&plat_priv->power_up_complete); 3737 cnss_driver_event_post(plat_priv, 3738 CNSS_DRIVER_EVENT_REGISTER_DRIVER, 3739 CNSS_EVENT_SYNC_UNKILLABLE, 3740 pci_priv->driver_ops); 3741 } 3742 3743 int cnss_wlan_register_driver(struct cnss_wlan_driver *driver_ops) 3744 { 3745 int ret = 0; 3746 struct cnss_plat_data *plat_priv; 3747 struct cnss_pci_data *pci_priv; 3748 const struct pci_device_id *id_table = driver_ops->id_table; 3749 unsigned int timeout; 3750 3751 if (!cnss_check_driver_loading_allowed()) { 3752 cnss_pr_info("No cnss2 dtsi entry present"); 3753 return -ENODEV; 3754 } 3755 3756 plat_priv = cnss_get_plat_priv_by_driver_ops(driver_ops); 3757 3758 if (!plat_priv) { 3759 cnss_pr_buf("plat_priv is not ready for register driver\n"); 3760 return -EAGAIN; 3761 } 3762 3763 pci_priv = plat_priv->bus_priv; 3764 if (test_bit(CNSS_WLAN_HW_DISABLED, &plat_priv->driver_state)) { 3765 while (id_table && id_table->device) { 3766 if (plat_priv->device_id == id_table->device) { 3767 if (plat_priv->device_id == KIWI_DEVICE_ID && 3768 driver_ops->chip_version != 2) { 3769 cnss_pr_err("WLAN HW disabled. kiwi_v2 only supported\n"); 3770 return -ENODEV; 3771 } 3772 cnss_pr_info("WLAN register driver deferred for device ID: 0x%x due to HW disable\n", 3773 id_table->device); 3774 plat_priv->driver_ops = driver_ops; 3775 return 0; 3776 } 3777 id_table++; 3778 } 3779 return -ENODEV; 3780 } 3781 3782 if (!test_bit(CNSS_PCI_PROBE_DONE, &plat_priv->driver_state)) { 3783 cnss_pr_info("pci probe not yet done for register driver\n"); 3784 return -EAGAIN; 3785 } 3786 3787 if (test_bit(CNSS_DRIVER_REGISTERED, &plat_priv->driver_state)) { 3788 cnss_pr_err("Driver has already registered\n"); 3789 return -EEXIST; 3790 } 3791 3792 if (test_bit(CNSS_IN_REBOOT, &plat_priv->driver_state)) { 3793 cnss_pr_dbg("Reboot/Shutdown is in progress, ignore register driver\n"); 3794 return -EINVAL; 3795 } 3796 3797 if (!id_table || !pci_dev_present(id_table)) { 3798 /* id_table pointer will move from pci_dev_present(), 3799 * so check again using local pointer. 3800 */ 3801 id_table = driver_ops->id_table; 3802 while (id_table && id_table->vendor) { 3803 cnss_pr_info("Host driver is built for PCIe device ID 0x%x\n", 3804 id_table->device); 3805 id_table++; 3806 } 3807 cnss_pr_err("Enumerated PCIe device id is 0x%x, reject unsupported driver\n", 3808 pci_priv->device_id); 3809 return -ENODEV; 3810 } 3811 3812 if (driver_ops->chip_version != CNSS_CHIP_VER_ANY && 3813 driver_ops->chip_version != plat_priv->device_version.major_version) { 3814 cnss_pr_err("Driver built for chip ver 0x%x, enumerated ver 0x%x, reject unsupported driver\n", 3815 driver_ops->chip_version, 3816 plat_priv->device_version.major_version); 3817 return -ENODEV; 3818 } 3819 set_bit(CNSS_DRIVER_REGISTER, &plat_priv->driver_state); 3820 3821 if (!plat_priv->cbc_enabled || 3822 test_bit(CNSS_COLD_BOOT_CAL_DONE, &plat_priv->driver_state)) 3823 goto register_driver; 3824 3825 pci_priv->driver_ops = driver_ops; 3826 /* If Cold Boot Calibration is enabled, it is the 1st step in init 3827 * sequence.CBC is done on file system_ready trigger. Qcacld will be 3828 * loaded from vendor_modprobe.sh at early boot and must be deferred 3829 * until CBC is complete 3830 */ 3831 timeout = cnss_get_timeout(plat_priv, CNSS_TIMEOUT_CALIBRATION); 3832 INIT_DELAYED_WORK(&plat_priv->wlan_reg_driver_work, 3833 cnss_wlan_reg_driver_work); 3834 schedule_delayed_work(&plat_priv->wlan_reg_driver_work, 3835 msecs_to_jiffies(timeout)); 3836 cnss_pr_info("WLAN register driver deferred for Calibration\n"); 3837 return 0; 3838 register_driver: 3839 reinit_completion(&plat_priv->power_up_complete); 3840 ret = cnss_driver_event_post(plat_priv, 3841 CNSS_DRIVER_EVENT_REGISTER_DRIVER, 3842 CNSS_EVENT_SYNC_UNKILLABLE, 3843 driver_ops); 3844 3845 return ret; 3846 } 3847 EXPORT_SYMBOL(cnss_wlan_register_driver); 3848 3849 void cnss_wlan_unregister_driver(struct cnss_wlan_driver *driver_ops) 3850 { 3851 struct cnss_plat_data *plat_priv; 3852 int ret = 0; 3853 unsigned int timeout; 3854 3855 plat_priv = cnss_get_plat_priv_by_driver_ops(driver_ops); 3856 if (!plat_priv) { 3857 cnss_pr_err("plat_priv is NULL\n"); 3858 return; 3859 } 3860 3861 mutex_lock(&plat_priv->driver_ops_lock); 3862 3863 if (plat_priv->device_id == QCA6174_DEVICE_ID) 3864 goto skip_wait_power_up; 3865 3866 timeout = cnss_get_timeout(plat_priv, CNSS_TIMEOUT_WLAN_WATCHDOG); 3867 ret = wait_for_completion_timeout(&plat_priv->power_up_complete, 3868 msecs_to_jiffies(timeout)); 3869 if (!ret) { 3870 cnss_pr_err("Timeout (%ums) waiting for driver power up to complete\n", 3871 timeout); 3872 CNSS_ASSERT(0); 3873 } 3874 3875 skip_wait_power_up: 3876 if (!test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state) && 3877 !test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state)) 3878 goto skip_wait_recovery; 3879 3880 reinit_completion(&plat_priv->recovery_complete); 3881 timeout = cnss_get_timeout(plat_priv, CNSS_TIMEOUT_RECOVERY); 3882 ret = wait_for_completion_timeout(&plat_priv->recovery_complete, 3883 msecs_to_jiffies(timeout)); 3884 if (!ret) { 3885 cnss_pr_err("Timeout (%ums) waiting for recovery to complete\n", 3886 timeout); 3887 CNSS_ASSERT(0); 3888 } 3889 3890 skip_wait_recovery: 3891 cnss_driver_event_post(plat_priv, 3892 CNSS_DRIVER_EVENT_UNREGISTER_DRIVER, 3893 CNSS_EVENT_SYNC_UNKILLABLE, NULL); 3894 3895 mutex_unlock(&plat_priv->driver_ops_lock); 3896 } 3897 EXPORT_SYMBOL(cnss_wlan_unregister_driver); 3898 3899 int cnss_pci_register_driver_hdlr(struct cnss_pci_data *pci_priv, 3900 void *data) 3901 { 3902 int ret = 0; 3903 struct cnss_plat_data *plat_priv = pci_priv->plat_priv; 3904 3905 if (test_bit(CNSS_IN_REBOOT, &plat_priv->driver_state)) { 3906 cnss_pr_dbg("Reboot or shutdown is in progress, ignore register driver\n"); 3907 return -EINVAL; 3908 } 3909 3910 set_bit(CNSS_DRIVER_LOADING, &plat_priv->driver_state); 3911 pci_priv->driver_ops = data; 3912 3913 ret = cnss_pci_dev_powerup(pci_priv); 3914 if (ret) { 3915 clear_bit(CNSS_DRIVER_LOADING, &plat_priv->driver_state); 3916 pci_priv->driver_ops = NULL; 3917 } else { 3918 set_bit(CNSS_DRIVER_REGISTERED, &plat_priv->driver_state); 3919 } 3920 3921 return ret; 3922 } 3923 3924 int cnss_pci_unregister_driver_hdlr(struct cnss_pci_data *pci_priv) 3925 { 3926 struct cnss_plat_data *plat_priv; 3927 3928 if (!pci_priv) 3929 return -EINVAL; 3930 3931 plat_priv = pci_priv->plat_priv; 3932 set_bit(CNSS_DRIVER_UNLOADING, &plat_priv->driver_state); 3933 cnss_pci_dev_shutdown(pci_priv); 3934 pci_priv->driver_ops = NULL; 3935 clear_bit(CNSS_DRIVER_REGISTERED, &plat_priv->driver_state); 3936 3937 return 0; 3938 } 3939 3940 static int cnss_pci_suspend_driver(struct cnss_pci_data *pci_priv) 3941 { 3942 struct pci_dev *pci_dev = pci_priv->pci_dev; 3943 struct cnss_wlan_driver *driver_ops = pci_priv->driver_ops; 3944 int ret = 0; 3945 struct cnss_plat_data *plat_priv = pci_priv->plat_priv; 3946 3947 pm_message_t state = { .event = PM_EVENT_SUSPEND }; 3948 3949 if (test_bit(CNSS_DRIVER_REGISTERED, &plat_priv->driver_state) && 3950 driver_ops && driver_ops->suspend) { 3951 ret = driver_ops->suspend(pci_dev, state); 3952 if (ret) { 3953 cnss_pr_err("Failed to suspend host driver, err = %d\n", 3954 ret); 3955 ret = -EAGAIN; 3956 } 3957 } 3958 3959 return ret; 3960 } 3961 3962 static int cnss_pci_resume_driver(struct cnss_pci_data *pci_priv) 3963 { 3964 struct pci_dev *pci_dev = pci_priv->pci_dev; 3965 struct cnss_wlan_driver *driver_ops = pci_priv->driver_ops; 3966 int ret = 0; 3967 struct cnss_plat_data *plat_priv = pci_priv->plat_priv; 3968 3969 if (test_bit(CNSS_DRIVER_REGISTERED, &plat_priv->driver_state) && 3970 driver_ops && driver_ops->resume) { 3971 ret = driver_ops->resume(pci_dev); 3972 if (ret) 3973 cnss_pr_err("Failed to resume host driver, err = %d\n", 3974 ret); 3975 } 3976 3977 return ret; 3978 } 3979 3980 int cnss_pci_suspend_bus(struct cnss_pci_data *pci_priv) 3981 { 3982 struct pci_dev *pci_dev = pci_priv->pci_dev; 3983 int ret = 0; 3984 3985 if (pci_priv->pci_link_state == PCI_LINK_DOWN) 3986 goto out; 3987 3988 if (cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_SUSPEND)) { 3989 ret = -EAGAIN; 3990 goto out; 3991 } 3992 3993 if (pci_priv->drv_connected_last) 3994 goto skip_disable_pci; 3995 3996 pci_clear_master(pci_dev); 3997 cnss_set_pci_config_space(pci_priv, SAVE_PCI_CONFIG_SPACE); 3998 pci_disable_device(pci_dev); 3999 4000 ret = pci_set_power_state(pci_dev, PCI_D3hot); 4001 if (ret) 4002 cnss_pr_err("Failed to set D3Hot, err = %d\n", ret); 4003 4004 skip_disable_pci: 4005 if (cnss_set_pci_link(pci_priv, PCI_LINK_DOWN)) { 4006 ret = -EAGAIN; 4007 goto resume_mhi; 4008 } 4009 pci_priv->pci_link_state = PCI_LINK_DOWN; 4010 4011 return 0; 4012 4013 resume_mhi: 4014 if (!pci_is_enabled(pci_dev)) 4015 if (pci_enable_device(pci_dev)) 4016 cnss_pr_err("Failed to enable PCI device\n"); 4017 if (pci_priv->saved_state) 4018 cnss_set_pci_config_space(pci_priv, RESTORE_PCI_CONFIG_SPACE); 4019 pci_set_master(pci_dev); 4020 cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_RESUME); 4021 out: 4022 return ret; 4023 } 4024 4025 int cnss_pci_resume_bus(struct cnss_pci_data *pci_priv) 4026 { 4027 struct pci_dev *pci_dev = pci_priv->pci_dev; 4028 int ret = 0; 4029 4030 if (pci_priv->pci_link_state == PCI_LINK_UP) 4031 goto out; 4032 4033 if (cnss_set_pci_link(pci_priv, PCI_LINK_UP)) { 4034 cnss_fatal_err("Failed to resume PCI link from suspend\n"); 4035 cnss_pci_link_down(&pci_dev->dev); 4036 ret = -EAGAIN; 4037 goto out; 4038 } 4039 4040 pci_priv->pci_link_state = PCI_LINK_UP; 4041 4042 if (pci_priv->drv_connected_last) 4043 goto skip_enable_pci; 4044 4045 ret = pci_enable_device(pci_dev); 4046 if (ret) { 4047 cnss_pr_err("Failed to enable PCI device, err = %d\n", 4048 ret); 4049 goto out; 4050 } 4051 4052 if (pci_priv->saved_state) 4053 cnss_set_pci_config_space(pci_priv, 4054 RESTORE_PCI_CONFIG_SPACE); 4055 pci_set_master(pci_dev); 4056 4057 skip_enable_pci: 4058 cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_RESUME); 4059 out: 4060 return ret; 4061 } 4062 4063 static int cnss_pci_suspend(struct device *dev) 4064 { 4065 int ret = 0; 4066 struct pci_dev *pci_dev = to_pci_dev(dev); 4067 struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev); 4068 struct cnss_plat_data *plat_priv; 4069 4070 if (!pci_priv) 4071 goto out; 4072 4073 plat_priv = pci_priv->plat_priv; 4074 if (!plat_priv) 4075 goto out; 4076 4077 if (!cnss_is_device_powered_on(plat_priv)) 4078 goto out; 4079 4080 /* No mhi state bit set if only finish pcie enumeration, 4081 * so test_bit is not applicable to check if it is INIT state. 4082 */ 4083 if (pci_priv->mhi_state == CNSS_MHI_INIT) { 4084 bool suspend = cnss_should_suspend_pwroff(pci_dev); 4085 4086 /* Do PCI link suspend and power off in the LPM case 4087 * if chipset didn't do that after pcie enumeration. 4088 */ 4089 if (!suspend) { 4090 ret = cnss_suspend_pci_link(pci_priv); 4091 if (ret) 4092 cnss_pr_err("Failed to suspend PCI link, err = %d\n", 4093 ret); 4094 cnss_power_off_device(plat_priv); 4095 goto out; 4096 } 4097 } 4098 4099 if (!test_bit(DISABLE_DRV, &plat_priv->ctrl_params.quirks) && 4100 pci_priv->drv_supported) { 4101 pci_priv->drv_connected_last = 4102 cnss_pci_get_drv_connected(pci_priv); 4103 if (!pci_priv->drv_connected_last) { 4104 cnss_pr_dbg("Firmware does not support non-DRV suspend, reject\n"); 4105 ret = -EAGAIN; 4106 goto out; 4107 } 4108 } 4109 4110 set_bit(CNSS_IN_SUSPEND_RESUME, &plat_priv->driver_state); 4111 4112 ret = cnss_pci_suspend_driver(pci_priv); 4113 if (ret) 4114 goto clear_flag; 4115 4116 if (!pci_priv->disable_pc) { 4117 mutex_lock(&pci_priv->bus_lock); 4118 ret = cnss_pci_suspend_bus(pci_priv); 4119 mutex_unlock(&pci_priv->bus_lock); 4120 if (ret) 4121 goto resume_driver; 4122 } 4123 4124 cnss_pci_set_monitor_wake_intr(pci_priv, false); 4125 4126 return 0; 4127 4128 resume_driver: 4129 cnss_pci_resume_driver(pci_priv); 4130 clear_flag: 4131 pci_priv->drv_connected_last = 0; 4132 clear_bit(CNSS_IN_SUSPEND_RESUME, &plat_priv->driver_state); 4133 out: 4134 return ret; 4135 } 4136 4137 static int cnss_pci_resume(struct device *dev) 4138 { 4139 int ret = 0; 4140 struct pci_dev *pci_dev = to_pci_dev(dev); 4141 struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev); 4142 struct cnss_plat_data *plat_priv; 4143 4144 if (!pci_priv) 4145 goto out; 4146 4147 plat_priv = pci_priv->plat_priv; 4148 if (!plat_priv) 4149 goto out; 4150 4151 if (pci_priv->pci_link_down_ind) 4152 goto out; 4153 4154 if (!cnss_is_device_powered_on(pci_priv->plat_priv)) 4155 goto out; 4156 4157 if (!pci_priv->disable_pc) { 4158 mutex_lock(&pci_priv->bus_lock); 4159 ret = cnss_pci_resume_bus(pci_priv); 4160 mutex_unlock(&pci_priv->bus_lock); 4161 if (ret) 4162 goto out; 4163 } 4164 4165 ret = cnss_pci_resume_driver(pci_priv); 4166 4167 pci_priv->drv_connected_last = 0; 4168 clear_bit(CNSS_IN_SUSPEND_RESUME, &plat_priv->driver_state); 4169 4170 out: 4171 return ret; 4172 } 4173 4174 static int cnss_pci_suspend_noirq(struct device *dev) 4175 { 4176 int ret = 0; 4177 struct pci_dev *pci_dev = to_pci_dev(dev); 4178 struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev); 4179 struct cnss_wlan_driver *driver_ops; 4180 struct cnss_plat_data *plat_priv; 4181 4182 if (!pci_priv) 4183 goto out; 4184 4185 if (!cnss_is_device_powered_on(pci_priv->plat_priv)) 4186 goto out; 4187 4188 driver_ops = pci_priv->driver_ops; 4189 plat_priv = pci_priv->plat_priv; 4190 if (test_bit(CNSS_DRIVER_REGISTERED, &plat_priv->driver_state) && 4191 driver_ops && driver_ops->suspend_noirq) 4192 ret = driver_ops->suspend_noirq(pci_dev); 4193 4194 if (pci_priv->disable_pc && !pci_dev->state_saved && 4195 !pci_priv->plat_priv->use_pm_domain) 4196 pci_save_state(pci_dev); 4197 4198 out: 4199 return ret; 4200 } 4201 4202 static int cnss_pci_resume_noirq(struct device *dev) 4203 { 4204 int ret = 0; 4205 struct pci_dev *pci_dev = to_pci_dev(dev); 4206 struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev); 4207 struct cnss_wlan_driver *driver_ops; 4208 struct cnss_plat_data *plat_priv; 4209 4210 if (!pci_priv) 4211 goto out; 4212 4213 if (!cnss_is_device_powered_on(pci_priv->plat_priv)) 4214 goto out; 4215 4216 plat_priv = pci_priv->plat_priv; 4217 driver_ops = pci_priv->driver_ops; 4218 if (test_bit(CNSS_DRIVER_REGISTERED, &plat_priv->driver_state) && 4219 driver_ops && driver_ops->resume_noirq && 4220 !pci_priv->pci_link_down_ind) 4221 ret = driver_ops->resume_noirq(pci_dev); 4222 4223 out: 4224 return ret; 4225 } 4226 4227 static int cnss_pci_runtime_suspend(struct device *dev) 4228 { 4229 int ret = 0; 4230 struct pci_dev *pci_dev = to_pci_dev(dev); 4231 struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev); 4232 struct cnss_plat_data *plat_priv; 4233 struct cnss_wlan_driver *driver_ops; 4234 4235 if (!pci_priv) 4236 return -EAGAIN; 4237 4238 plat_priv = pci_priv->plat_priv; 4239 if (!plat_priv) 4240 return -EAGAIN; 4241 4242 if (!cnss_is_device_powered_on(pci_priv->plat_priv)) 4243 return -EAGAIN; 4244 4245 if (pci_priv->pci_link_down_ind) { 4246 cnss_pr_dbg("PCI link down recovery is in progress!\n"); 4247 return -EAGAIN; 4248 } 4249 4250 if (!test_bit(DISABLE_DRV, &plat_priv->ctrl_params.quirks) && 4251 pci_priv->drv_supported) { 4252 pci_priv->drv_connected_last = 4253 cnss_pci_get_drv_connected(pci_priv); 4254 if (!pci_priv->drv_connected_last) { 4255 cnss_pr_dbg("Firmware does not support non-DRV suspend, reject\n"); 4256 return -EAGAIN; 4257 } 4258 } 4259 4260 cnss_pr_vdbg("Runtime suspend start\n"); 4261 4262 driver_ops = pci_priv->driver_ops; 4263 if (driver_ops && driver_ops->runtime_ops && 4264 driver_ops->runtime_ops->runtime_suspend) 4265 ret = driver_ops->runtime_ops->runtime_suspend(pci_dev); 4266 else 4267 ret = cnss_auto_suspend(dev); 4268 4269 if (ret) 4270 pci_priv->drv_connected_last = 0; 4271 4272 cnss_pr_vdbg("Runtime suspend status: %d\n", ret); 4273 4274 return ret; 4275 } 4276 4277 static int cnss_pci_runtime_resume(struct device *dev) 4278 { 4279 int ret = 0; 4280 struct pci_dev *pci_dev = to_pci_dev(dev); 4281 struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev); 4282 struct cnss_wlan_driver *driver_ops; 4283 4284 if (!pci_priv) 4285 return -EAGAIN; 4286 4287 if (!cnss_is_device_powered_on(pci_priv->plat_priv)) 4288 return -EAGAIN; 4289 4290 if (pci_priv->pci_link_down_ind) { 4291 cnss_pr_dbg("PCI link down recovery is in progress!\n"); 4292 return -EAGAIN; 4293 } 4294 4295 cnss_pr_vdbg("Runtime resume start\n"); 4296 4297 driver_ops = pci_priv->driver_ops; 4298 if (driver_ops && driver_ops->runtime_ops && 4299 driver_ops->runtime_ops->runtime_resume) 4300 ret = driver_ops->runtime_ops->runtime_resume(pci_dev); 4301 else 4302 ret = cnss_auto_resume(dev); 4303 4304 cnss_pr_vdbg("Runtime resume status: %d\n", ret); 4305 4306 return ret; 4307 } 4308 4309 static int cnss_pci_runtime_idle(struct device *dev) 4310 { 4311 cnss_pr_vdbg("Runtime idle\n"); 4312 4313 pm_request_autosuspend(dev); 4314 4315 return -EBUSY; 4316 } 4317 4318 int cnss_wlan_pm_control(struct device *dev, bool vote) 4319 { 4320 struct pci_dev *pci_dev = to_pci_dev(dev); 4321 struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev); 4322 int ret = 0; 4323 4324 if (!pci_priv) 4325 return -ENODEV; 4326 4327 ret = cnss_pci_disable_pc(pci_priv, vote); 4328 if (ret) 4329 return ret; 4330 4331 pci_priv->disable_pc = vote; 4332 cnss_pr_dbg("%s PCIe power collapse\n", vote ? "disable" : "enable"); 4333 4334 return 0; 4335 } 4336 EXPORT_SYMBOL(cnss_wlan_pm_control); 4337 4338 static void cnss_pci_pm_runtime_get_record(struct cnss_pci_data *pci_priv, 4339 enum cnss_rtpm_id id) 4340 { 4341 if (id >= RTPM_ID_MAX) 4342 return; 4343 4344 atomic_inc(&pci_priv->pm_stats.runtime_get); 4345 atomic_inc(&pci_priv->pm_stats.runtime_get_id[id]); 4346 pci_priv->pm_stats.runtime_get_timestamp_id[id] = 4347 cnss_get_host_timestamp(pci_priv->plat_priv); 4348 } 4349 4350 static void cnss_pci_pm_runtime_put_record(struct cnss_pci_data *pci_priv, 4351 enum cnss_rtpm_id id) 4352 { 4353 if (id >= RTPM_ID_MAX) 4354 return; 4355 4356 atomic_inc(&pci_priv->pm_stats.runtime_put); 4357 atomic_inc(&pci_priv->pm_stats.runtime_put_id[id]); 4358 pci_priv->pm_stats.runtime_put_timestamp_id[id] = 4359 cnss_get_host_timestamp(pci_priv->plat_priv); 4360 } 4361 4362 void cnss_pci_pm_runtime_show_usage_count(struct cnss_pci_data *pci_priv) 4363 { 4364 struct device *dev; 4365 4366 if (!pci_priv) 4367 return; 4368 4369 dev = &pci_priv->pci_dev->dev; 4370 4371 cnss_pr_dbg("Runtime PM usage count: %d\n", 4372 atomic_read(&dev->power.usage_count)); 4373 } 4374 4375 int cnss_pci_pm_request_resume(struct cnss_pci_data *pci_priv) 4376 { 4377 struct device *dev; 4378 enum rpm_status status; 4379 4380 if (!pci_priv) 4381 return -ENODEV; 4382 4383 dev = &pci_priv->pci_dev->dev; 4384 4385 status = dev->power.runtime_status; 4386 if (status == RPM_SUSPENDING || status == RPM_SUSPENDED) 4387 cnss_pr_vdbg("Runtime PM resume is requested by %ps\n", 4388 (void *)_RET_IP_); 4389 4390 return pm_request_resume(dev); 4391 } 4392 4393 int cnss_pci_pm_runtime_resume(struct cnss_pci_data *pci_priv) 4394 { 4395 struct device *dev; 4396 enum rpm_status status; 4397 4398 if (!pci_priv) 4399 return -ENODEV; 4400 4401 dev = &pci_priv->pci_dev->dev; 4402 4403 status = dev->power.runtime_status; 4404 if (status == RPM_SUSPENDING || status == RPM_SUSPENDED) 4405 cnss_pr_vdbg("Runtime PM resume is requested by %ps\n", 4406 (void *)_RET_IP_); 4407 4408 return pm_runtime_resume(dev); 4409 } 4410 4411 int cnss_pci_pm_runtime_get(struct cnss_pci_data *pci_priv, 4412 enum cnss_rtpm_id id) 4413 { 4414 struct device *dev; 4415 enum rpm_status status; 4416 4417 if (!pci_priv) 4418 return -ENODEV; 4419 4420 dev = &pci_priv->pci_dev->dev; 4421 4422 status = dev->power.runtime_status; 4423 if (status == RPM_SUSPENDING || status == RPM_SUSPENDED) 4424 cnss_pr_vdbg("Runtime PM resume is requested by %ps\n", 4425 (void *)_RET_IP_); 4426 4427 cnss_pci_pm_runtime_get_record(pci_priv, id); 4428 4429 return pm_runtime_get(dev); 4430 } 4431 4432 int cnss_pci_pm_runtime_get_sync(struct cnss_pci_data *pci_priv, 4433 enum cnss_rtpm_id id) 4434 { 4435 struct device *dev; 4436 enum rpm_status status; 4437 4438 if (!pci_priv) 4439 return -ENODEV; 4440 4441 dev = &pci_priv->pci_dev->dev; 4442 4443 status = dev->power.runtime_status; 4444 if (status == RPM_SUSPENDING || status == RPM_SUSPENDED) 4445 cnss_pr_vdbg("Runtime PM resume is requested by %ps\n", 4446 (void *)_RET_IP_); 4447 4448 cnss_pci_pm_runtime_get_record(pci_priv, id); 4449 4450 return pm_runtime_get_sync(dev); 4451 } 4452 4453 void cnss_pci_pm_runtime_get_noresume(struct cnss_pci_data *pci_priv, 4454 enum cnss_rtpm_id id) 4455 { 4456 if (!pci_priv) 4457 return; 4458 4459 cnss_pci_pm_runtime_get_record(pci_priv, id); 4460 pm_runtime_get_noresume(&pci_priv->pci_dev->dev); 4461 } 4462 4463 int cnss_pci_pm_runtime_put_autosuspend(struct cnss_pci_data *pci_priv, 4464 enum cnss_rtpm_id id) 4465 { 4466 struct device *dev; 4467 4468 if (!pci_priv) 4469 return -ENODEV; 4470 4471 dev = &pci_priv->pci_dev->dev; 4472 4473 if (atomic_read(&dev->power.usage_count) == 0) { 4474 cnss_pr_dbg("Ignore excessive runtime PM put operation\n"); 4475 return -EINVAL; 4476 } 4477 4478 cnss_pci_pm_runtime_put_record(pci_priv, id); 4479 4480 return pm_runtime_put_autosuspend(&pci_priv->pci_dev->dev); 4481 } 4482 4483 void cnss_pci_pm_runtime_put_noidle(struct cnss_pci_data *pci_priv, 4484 enum cnss_rtpm_id id) 4485 { 4486 struct device *dev; 4487 4488 if (!pci_priv) 4489 return; 4490 4491 dev = &pci_priv->pci_dev->dev; 4492 4493 if (atomic_read(&dev->power.usage_count) == 0) { 4494 cnss_pr_dbg("Ignore excessive runtime PM put operation\n"); 4495 return; 4496 } 4497 4498 cnss_pci_pm_runtime_put_record(pci_priv, id); 4499 pm_runtime_put_noidle(&pci_priv->pci_dev->dev); 4500 } 4501 4502 void cnss_pci_pm_runtime_mark_last_busy(struct cnss_pci_data *pci_priv) 4503 { 4504 if (!pci_priv) 4505 return; 4506 4507 pm_runtime_mark_last_busy(&pci_priv->pci_dev->dev); 4508 } 4509 4510 int cnss_auto_suspend(struct device *dev) 4511 { 4512 int ret = 0; 4513 struct pci_dev *pci_dev = to_pci_dev(dev); 4514 struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev); 4515 struct cnss_plat_data *plat_priv; 4516 4517 if (!pci_priv) 4518 return -ENODEV; 4519 4520 plat_priv = pci_priv->plat_priv; 4521 if (!plat_priv) 4522 return -ENODEV; 4523 4524 mutex_lock(&pci_priv->bus_lock); 4525 if (!pci_priv->qmi_send_usage_count) { 4526 ret = cnss_pci_suspend_bus(pci_priv); 4527 if (ret) { 4528 mutex_unlock(&pci_priv->bus_lock); 4529 return ret; 4530 } 4531 } 4532 4533 cnss_pci_set_auto_suspended(pci_priv, 1); 4534 mutex_unlock(&pci_priv->bus_lock); 4535 4536 cnss_pci_set_monitor_wake_intr(pci_priv, true); 4537 4538 /* For suspend temporarily set bandwidth vote to NONE and dont save in 4539 * current_bw_vote as in resume path we should vote for last used 4540 * bandwidth vote. Also ignore error if bw voting is not setup. 4541 */ 4542 cnss_setup_bus_bandwidth(plat_priv, CNSS_BUS_WIDTH_NONE, false); 4543 return 0; 4544 } 4545 EXPORT_SYMBOL(cnss_auto_suspend); 4546 4547 int cnss_auto_resume(struct device *dev) 4548 { 4549 int ret = 0; 4550 struct pci_dev *pci_dev = to_pci_dev(dev); 4551 struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev); 4552 struct cnss_plat_data *plat_priv; 4553 4554 if (!pci_priv) 4555 return -ENODEV; 4556 4557 plat_priv = pci_priv->plat_priv; 4558 if (!plat_priv) 4559 return -ENODEV; 4560 4561 mutex_lock(&pci_priv->bus_lock); 4562 ret = cnss_pci_resume_bus(pci_priv); 4563 if (ret) { 4564 mutex_unlock(&pci_priv->bus_lock); 4565 return ret; 4566 } 4567 4568 cnss_pci_set_auto_suspended(pci_priv, 0); 4569 mutex_unlock(&pci_priv->bus_lock); 4570 4571 cnss_request_bus_bandwidth(dev, plat_priv->icc.current_bw_vote); 4572 pci_priv->drv_connected_last = 0; 4573 4574 return 0; 4575 } 4576 EXPORT_SYMBOL(cnss_auto_resume); 4577 4578 int cnss_pci_force_wake_request_sync(struct device *dev, int timeout_us) 4579 { 4580 struct pci_dev *pci_dev = to_pci_dev(dev); 4581 struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev); 4582 struct cnss_plat_data *plat_priv; 4583 struct mhi_controller *mhi_ctrl; 4584 4585 if (!pci_priv) 4586 return -ENODEV; 4587 4588 switch (pci_priv->device_id) { 4589 case QCA6390_DEVICE_ID: 4590 case QCA6490_DEVICE_ID: 4591 case KIWI_DEVICE_ID: 4592 case MANGO_DEVICE_ID: 4593 case PEACH_DEVICE_ID: 4594 break; 4595 default: 4596 return 0; 4597 } 4598 4599 mhi_ctrl = pci_priv->mhi_ctrl; 4600 if (!mhi_ctrl) 4601 return -EINVAL; 4602 4603 plat_priv = pci_priv->plat_priv; 4604 if (!plat_priv) 4605 return -ENODEV; 4606 4607 if (test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state)) 4608 return -EAGAIN; 4609 4610 if (timeout_us) { 4611 /* Busy wait for timeout_us */ 4612 return cnss_mhi_device_get_sync_atomic(pci_priv, 4613 timeout_us, false); 4614 } else { 4615 /* Sleep wait for mhi_ctrl->timeout_ms */ 4616 return mhi_device_get_sync(mhi_ctrl->mhi_dev); 4617 } 4618 } 4619 EXPORT_SYMBOL(cnss_pci_force_wake_request_sync); 4620 4621 int cnss_pci_force_wake_request(struct device *dev) 4622 { 4623 struct pci_dev *pci_dev = to_pci_dev(dev); 4624 struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev); 4625 struct cnss_plat_data *plat_priv; 4626 struct mhi_controller *mhi_ctrl; 4627 4628 if (!pci_priv) 4629 return -ENODEV; 4630 4631 switch (pci_priv->device_id) { 4632 case QCA6390_DEVICE_ID: 4633 case QCA6490_DEVICE_ID: 4634 case KIWI_DEVICE_ID: 4635 case MANGO_DEVICE_ID: 4636 case PEACH_DEVICE_ID: 4637 break; 4638 default: 4639 return 0; 4640 } 4641 4642 mhi_ctrl = pci_priv->mhi_ctrl; 4643 if (!mhi_ctrl) 4644 return -EINVAL; 4645 4646 plat_priv = pci_priv->plat_priv; 4647 if (!plat_priv) 4648 return -ENODEV; 4649 4650 if (test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state)) 4651 return -EAGAIN; 4652 4653 mhi_device_get(mhi_ctrl->mhi_dev); 4654 4655 return 0; 4656 } 4657 EXPORT_SYMBOL(cnss_pci_force_wake_request); 4658 4659 int cnss_pci_is_device_awake(struct device *dev) 4660 { 4661 struct pci_dev *pci_dev = to_pci_dev(dev); 4662 struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev); 4663 struct mhi_controller *mhi_ctrl; 4664 4665 if (!pci_priv) 4666 return -ENODEV; 4667 4668 switch (pci_priv->device_id) { 4669 case QCA6390_DEVICE_ID: 4670 case QCA6490_DEVICE_ID: 4671 case KIWI_DEVICE_ID: 4672 case MANGO_DEVICE_ID: 4673 case PEACH_DEVICE_ID: 4674 break; 4675 default: 4676 return 0; 4677 } 4678 4679 mhi_ctrl = pci_priv->mhi_ctrl; 4680 if (!mhi_ctrl) 4681 return -EINVAL; 4682 4683 return (mhi_ctrl->dev_state == MHI_STATE_M0); 4684 } 4685 EXPORT_SYMBOL(cnss_pci_is_device_awake); 4686 4687 int cnss_pci_force_wake_release(struct device *dev) 4688 { 4689 struct pci_dev *pci_dev = to_pci_dev(dev); 4690 struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev); 4691 struct cnss_plat_data *plat_priv; 4692 struct mhi_controller *mhi_ctrl; 4693 4694 if (!pci_priv) 4695 return -ENODEV; 4696 4697 switch (pci_priv->device_id) { 4698 case QCA6390_DEVICE_ID: 4699 case QCA6490_DEVICE_ID: 4700 case KIWI_DEVICE_ID: 4701 case MANGO_DEVICE_ID: 4702 case PEACH_DEVICE_ID: 4703 break; 4704 default: 4705 return 0; 4706 } 4707 4708 mhi_ctrl = pci_priv->mhi_ctrl; 4709 if (!mhi_ctrl) 4710 return -EINVAL; 4711 4712 plat_priv = pci_priv->plat_priv; 4713 if (!plat_priv) 4714 return -ENODEV; 4715 4716 if (test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state)) 4717 return -EAGAIN; 4718 4719 mhi_device_put(mhi_ctrl->mhi_dev); 4720 4721 return 0; 4722 } 4723 EXPORT_SYMBOL(cnss_pci_force_wake_release); 4724 4725 int cnss_pci_qmi_send_get(struct cnss_pci_data *pci_priv) 4726 { 4727 int ret = 0; 4728 4729 if (!pci_priv) 4730 return -ENODEV; 4731 4732 mutex_lock(&pci_priv->bus_lock); 4733 if (cnss_pci_get_auto_suspended(pci_priv) && 4734 !pci_priv->qmi_send_usage_count) 4735 ret = cnss_pci_resume_bus(pci_priv); 4736 pci_priv->qmi_send_usage_count++; 4737 cnss_pr_buf("Increased QMI send usage count to %d\n", 4738 pci_priv->qmi_send_usage_count); 4739 mutex_unlock(&pci_priv->bus_lock); 4740 4741 return ret; 4742 } 4743 4744 int cnss_pci_qmi_send_put(struct cnss_pci_data *pci_priv) 4745 { 4746 int ret = 0; 4747 4748 if (!pci_priv) 4749 return -ENODEV; 4750 4751 mutex_lock(&pci_priv->bus_lock); 4752 if (pci_priv->qmi_send_usage_count) 4753 pci_priv->qmi_send_usage_count--; 4754 cnss_pr_buf("Decreased QMI send usage count to %d\n", 4755 pci_priv->qmi_send_usage_count); 4756 if (cnss_pci_get_auto_suspended(pci_priv) && 4757 !pci_priv->qmi_send_usage_count && 4758 !cnss_pcie_is_device_down(pci_priv)) 4759 ret = cnss_pci_suspend_bus(pci_priv); 4760 mutex_unlock(&pci_priv->bus_lock); 4761 4762 return ret; 4763 } 4764 4765 int cnss_send_buffer_to_afcmem(struct device *dev, const uint8_t *afcdb, 4766 uint32_t len, uint8_t slotid) 4767 { 4768 struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev); 4769 struct cnss_fw_mem *fw_mem; 4770 void *mem = NULL; 4771 int i, ret; 4772 u32 *status; 4773 4774 if (!plat_priv) 4775 return -EINVAL; 4776 4777 fw_mem = plat_priv->fw_mem; 4778 if (slotid >= AFC_MAX_SLOT) { 4779 cnss_pr_err("Invalid slot id %d\n", slotid); 4780 ret = -EINVAL; 4781 goto err; 4782 } 4783 if (len > AFC_SLOT_SIZE) { 4784 cnss_pr_err("len %d greater than slot size", len); 4785 ret = -EINVAL; 4786 goto err; 4787 } 4788 4789 for (i = 0; i < plat_priv->fw_mem_seg_len; i++) { 4790 if (fw_mem[i].type == QMI_WLFW_AFC_MEM_V01) { 4791 mem = fw_mem[i].va; 4792 status = mem + (slotid * AFC_SLOT_SIZE); 4793 break; 4794 } 4795 } 4796 4797 if (!mem) { 4798 cnss_pr_err("AFC mem is not available\n"); 4799 ret = -ENOMEM; 4800 goto err; 4801 } 4802 4803 memcpy(mem + (slotid * AFC_SLOT_SIZE), afcdb, len); 4804 if (len < AFC_SLOT_SIZE) 4805 memset(mem + (slotid * AFC_SLOT_SIZE) + len, 4806 0, AFC_SLOT_SIZE - len); 4807 status[AFC_AUTH_STATUS_OFFSET] = cpu_to_le32(AFC_AUTH_SUCCESS); 4808 4809 return 0; 4810 err: 4811 return ret; 4812 } 4813 EXPORT_SYMBOL(cnss_send_buffer_to_afcmem); 4814 4815 int cnss_reset_afcmem(struct device *dev, uint8_t slotid) 4816 { 4817 struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev); 4818 struct cnss_fw_mem *fw_mem; 4819 void *mem = NULL; 4820 int i, ret; 4821 4822 if (!plat_priv) 4823 return -EINVAL; 4824 4825 fw_mem = plat_priv->fw_mem; 4826 if (slotid >= AFC_MAX_SLOT) { 4827 cnss_pr_err("Invalid slot id %d\n", slotid); 4828 ret = -EINVAL; 4829 goto err; 4830 } 4831 4832 for (i = 0; i < plat_priv->fw_mem_seg_len; i++) { 4833 if (fw_mem[i].type == QMI_WLFW_AFC_MEM_V01) { 4834 mem = fw_mem[i].va; 4835 break; 4836 } 4837 } 4838 4839 if (!mem) { 4840 cnss_pr_err("AFC mem is not available\n"); 4841 ret = -ENOMEM; 4842 goto err; 4843 } 4844 4845 memset(mem + (slotid * AFC_SLOT_SIZE), 0, AFC_SLOT_SIZE); 4846 return 0; 4847 4848 err: 4849 return ret; 4850 } 4851 EXPORT_SYMBOL(cnss_reset_afcmem); 4852 4853 int cnss_pci_alloc_fw_mem(struct cnss_pci_data *pci_priv) 4854 { 4855 struct cnss_plat_data *plat_priv = pci_priv->plat_priv; 4856 struct cnss_fw_mem *fw_mem = plat_priv->fw_mem; 4857 struct device *dev = &pci_priv->pci_dev->dev; 4858 int i; 4859 4860 for (i = 0; i < plat_priv->fw_mem_seg_len; i++) { 4861 if (!fw_mem[i].va && fw_mem[i].size) { 4862 retry: 4863 fw_mem[i].va = 4864 dma_alloc_attrs(dev, fw_mem[i].size, 4865 &fw_mem[i].pa, GFP_KERNEL, 4866 fw_mem[i].attrs); 4867 4868 if (!fw_mem[i].va) { 4869 if ((fw_mem[i].attrs & 4870 DMA_ATTR_FORCE_CONTIGUOUS)) { 4871 fw_mem[i].attrs &= 4872 ~DMA_ATTR_FORCE_CONTIGUOUS; 4873 4874 cnss_pr_dbg("Fallback to non-contiguous memory for FW, Mem type: %u\n", 4875 fw_mem[i].type); 4876 goto retry; 4877 } 4878 cnss_pr_err("Failed to allocate memory for FW, size: 0x%zx, type: %u\n", 4879 fw_mem[i].size, fw_mem[i].type); 4880 CNSS_ASSERT(0); 4881 return -ENOMEM; 4882 } 4883 } 4884 } 4885 4886 return 0; 4887 } 4888 4889 static void cnss_pci_free_fw_mem(struct cnss_pci_data *pci_priv) 4890 { 4891 struct cnss_plat_data *plat_priv = pci_priv->plat_priv; 4892 struct cnss_fw_mem *fw_mem = plat_priv->fw_mem; 4893 struct device *dev = &pci_priv->pci_dev->dev; 4894 int i; 4895 4896 for (i = 0; i < plat_priv->fw_mem_seg_len; i++) { 4897 if (fw_mem[i].va && fw_mem[i].size) { 4898 cnss_pr_dbg("Freeing memory for FW, va: 0x%pK, pa: %pa, size: 0x%zx, type: %u\n", 4899 fw_mem[i].va, &fw_mem[i].pa, 4900 fw_mem[i].size, fw_mem[i].type); 4901 dma_free_attrs(dev, fw_mem[i].size, 4902 fw_mem[i].va, fw_mem[i].pa, 4903 fw_mem[i].attrs); 4904 fw_mem[i].va = NULL; 4905 fw_mem[i].pa = 0; 4906 fw_mem[i].size = 0; 4907 fw_mem[i].type = 0; 4908 } 4909 } 4910 4911 plat_priv->fw_mem_seg_len = 0; 4912 } 4913 4914 int cnss_pci_alloc_qdss_mem(struct cnss_pci_data *pci_priv) 4915 { 4916 struct cnss_plat_data *plat_priv = pci_priv->plat_priv; 4917 struct cnss_fw_mem *qdss_mem = plat_priv->qdss_mem; 4918 int i, j; 4919 4920 for (i = 0; i < plat_priv->qdss_mem_seg_len; i++) { 4921 if (!qdss_mem[i].va && qdss_mem[i].size) { 4922 qdss_mem[i].va = 4923 dma_alloc_coherent(&pci_priv->pci_dev->dev, 4924 qdss_mem[i].size, 4925 &qdss_mem[i].pa, 4926 GFP_KERNEL); 4927 if (!qdss_mem[i].va) { 4928 cnss_pr_err("Failed to allocate QDSS memory for FW, size: 0x%zx, type: %u, chuck-ID: %d\n", 4929 qdss_mem[i].size, 4930 qdss_mem[i].type, i); 4931 break; 4932 } 4933 } 4934 } 4935 4936 /* Best-effort allocation for QDSS trace */ 4937 if (i < plat_priv->qdss_mem_seg_len) { 4938 for (j = i; j < plat_priv->qdss_mem_seg_len; j++) { 4939 qdss_mem[j].type = 0; 4940 qdss_mem[j].size = 0; 4941 } 4942 plat_priv->qdss_mem_seg_len = i; 4943 } 4944 4945 return 0; 4946 } 4947 4948 void cnss_pci_free_qdss_mem(struct cnss_pci_data *pci_priv) 4949 { 4950 struct cnss_plat_data *plat_priv = pci_priv->plat_priv; 4951 struct cnss_fw_mem *qdss_mem = plat_priv->qdss_mem; 4952 int i; 4953 4954 for (i = 0; i < plat_priv->qdss_mem_seg_len; i++) { 4955 if (qdss_mem[i].va && qdss_mem[i].size) { 4956 cnss_pr_dbg("Freeing memory for QDSS: pa: %pa, size: 0x%zx, type: %u\n", 4957 &qdss_mem[i].pa, qdss_mem[i].size, 4958 qdss_mem[i].type); 4959 dma_free_coherent(&pci_priv->pci_dev->dev, 4960 qdss_mem[i].size, qdss_mem[i].va, 4961 qdss_mem[i].pa); 4962 qdss_mem[i].va = NULL; 4963 qdss_mem[i].pa = 0; 4964 qdss_mem[i].size = 0; 4965 qdss_mem[i].type = 0; 4966 } 4967 } 4968 plat_priv->qdss_mem_seg_len = 0; 4969 } 4970 4971 int cnss_pci_load_tme_patch(struct cnss_pci_data *pci_priv) 4972 { 4973 struct cnss_plat_data *plat_priv = pci_priv->plat_priv; 4974 struct cnss_fw_mem *tme_lite_mem = &plat_priv->tme_lite_mem; 4975 char filename[MAX_FIRMWARE_NAME_LEN]; 4976 char *tme_patch_filename = NULL; 4977 const struct firmware *fw_entry; 4978 int ret = 0; 4979 4980 switch (pci_priv->device_id) { 4981 case PEACH_DEVICE_ID: 4982 if (plat_priv->device_version.major_version == FW_V1_NUMBER) 4983 tme_patch_filename = TME_PATCH_FILE_NAME_1_0; 4984 else if (plat_priv->device_version.major_version == FW_V2_NUMBER) 4985 tme_patch_filename = TME_PATCH_FILE_NAME_2_0; 4986 break; 4987 case QCA6174_DEVICE_ID: 4988 case QCA6290_DEVICE_ID: 4989 case QCA6390_DEVICE_ID: 4990 case QCA6490_DEVICE_ID: 4991 case KIWI_DEVICE_ID: 4992 case MANGO_DEVICE_ID: 4993 default: 4994 cnss_pr_dbg("TME-L not supported for device ID: (0x%x)\n", 4995 pci_priv->device_id); 4996 return 0; 4997 } 4998 4999 if (!tme_lite_mem->va && !tme_lite_mem->size) { 5000 scnprintf(filename, MAX_FIRMWARE_NAME_LEN, "%s", tme_patch_filename); 5001 5002 ret = firmware_request_nowarn(&fw_entry, filename, 5003 &pci_priv->pci_dev->dev); 5004 if (ret) { 5005 cnss_pr_err("Failed to load TME-L patch: %s, ret: %d\n", 5006 filename, ret); 5007 return ret; 5008 } 5009 5010 tme_lite_mem->va = dma_alloc_coherent(&pci_priv->pci_dev->dev, 5011 fw_entry->size, &tme_lite_mem->pa, 5012 GFP_KERNEL); 5013 if (!tme_lite_mem->va) { 5014 cnss_pr_err("Failed to allocate memory for M3, size: 0x%zx\n", 5015 fw_entry->size); 5016 release_firmware(fw_entry); 5017 return -ENOMEM; 5018 } 5019 5020 memcpy(tme_lite_mem->va, fw_entry->data, fw_entry->size); 5021 tme_lite_mem->size = fw_entry->size; 5022 release_firmware(fw_entry); 5023 } 5024 5025 return 0; 5026 } 5027 5028 static void cnss_pci_free_tme_lite_mem(struct cnss_pci_data *pci_priv) 5029 { 5030 struct cnss_plat_data *plat_priv = pci_priv->plat_priv; 5031 struct cnss_fw_mem *tme_lite_mem = &plat_priv->tme_lite_mem; 5032 5033 if (tme_lite_mem->va && tme_lite_mem->size) { 5034 cnss_pr_dbg("Freeing memory for TME patch, va: 0x%pK, pa: %pa, size: 0x%zx\n", 5035 tme_lite_mem->va, &tme_lite_mem->pa, tme_lite_mem->size); 5036 dma_free_coherent(&pci_priv->pci_dev->dev, tme_lite_mem->size, 5037 tme_lite_mem->va, tme_lite_mem->pa); 5038 } 5039 5040 tme_lite_mem->va = NULL; 5041 tme_lite_mem->pa = 0; 5042 tme_lite_mem->size = 0; 5043 } 5044 5045 int cnss_pci_load_tme_opt_file(struct cnss_pci_data *pci_priv, 5046 enum wlfw_tme_lite_file_type_v01 file) 5047 { 5048 struct cnss_plat_data *plat_priv = pci_priv->plat_priv; 5049 struct cnss_fw_mem *tme_lite_mem = NULL; 5050 char filename[MAX_FIRMWARE_NAME_LEN]; 5051 char *tme_opt_filename = NULL; 5052 const struct firmware *fw_entry; 5053 int ret = 0; 5054 5055 switch (pci_priv->device_id) { 5056 case PEACH_DEVICE_ID: 5057 if (file == WLFW_TME_LITE_OEM_FUSE_FILE_V01) { 5058 tme_opt_filename = TME_OEM_FUSE_FILE_NAME; 5059 tme_lite_mem = &plat_priv->tme_opt_file_mem[0]; 5060 } else if (file == WLFW_TME_LITE_RPR_FILE_V01) { 5061 tme_opt_filename = TME_RPR_FILE_NAME; 5062 tme_lite_mem = &plat_priv->tme_opt_file_mem[1]; 5063 } else if (file == WLFW_TME_LITE_DPR_FILE_V01) { 5064 tme_opt_filename = TME_DPR_FILE_NAME; 5065 tme_lite_mem = &plat_priv->tme_opt_file_mem[2]; 5066 } 5067 break; 5068 case QCA6174_DEVICE_ID: 5069 case QCA6290_DEVICE_ID: 5070 case QCA6390_DEVICE_ID: 5071 case QCA6490_DEVICE_ID: 5072 case KIWI_DEVICE_ID: 5073 case MANGO_DEVICE_ID: 5074 default: 5075 cnss_pr_dbg("TME-L opt file: %s not supported for device ID: (0x%x)\n", 5076 tme_opt_filename, pci_priv->device_id); 5077 return 0; 5078 } 5079 5080 if (!tme_lite_mem) 5081 return 0; 5082 5083 if (!tme_lite_mem->va && !tme_lite_mem->size) { 5084 cnss_pci_add_fw_prefix_name(pci_priv, filename, 5085 tme_opt_filename); 5086 5087 ret = firmware_request_nowarn(&fw_entry, filename, 5088 &pci_priv->pci_dev->dev); 5089 if (ret) { 5090 cnss_pr_err("Failed to load TME-L opt file: %s, ret: %d\n", 5091 filename, ret); 5092 return ret; 5093 } 5094 5095 tme_lite_mem->va = dma_alloc_coherent(&pci_priv->pci_dev->dev, 5096 fw_entry->size, &tme_lite_mem->pa, 5097 GFP_KERNEL); 5098 if (!tme_lite_mem->va) { 5099 cnss_pr_err("Failed to allocate memory for TME-L opt file %s,size: 0x%zx\n", 5100 filename, fw_entry->size); 5101 release_firmware(fw_entry); 5102 return -ENOMEM; 5103 } 5104 5105 memcpy(tme_lite_mem->va, fw_entry->data, fw_entry->size); 5106 tme_lite_mem->size = fw_entry->size; 5107 release_firmware(fw_entry); 5108 } 5109 5110 return 0; 5111 } 5112 5113 static void cnss_pci_free_tme_opt_file_mem(struct cnss_pci_data *pci_priv) 5114 { 5115 struct cnss_plat_data *plat_priv = pci_priv->plat_priv; 5116 struct cnss_fw_mem *tme_opt_file_mem = plat_priv->tme_opt_file_mem; 5117 int i = 0; 5118 5119 for (i = 0; i < QMI_WLFW_MAX_TME_OPT_FILE_NUM; i++) { 5120 if (tme_opt_file_mem[i].va && tme_opt_file_mem[i].size) { 5121 cnss_pr_dbg("Free memory for TME opt file,va:0x%pK, pa:%pa, size:0x%zx\n", 5122 tme_opt_file_mem[i].va, &tme_opt_file_mem[i].pa, 5123 tme_opt_file_mem[i].size); 5124 dma_free_coherent(&pci_priv->pci_dev->dev, tme_opt_file_mem[i].size, 5125 tme_opt_file_mem[i].va, tme_opt_file_mem[i].pa); 5126 } 5127 tme_opt_file_mem[i].va = NULL; 5128 tme_opt_file_mem[i].pa = 0; 5129 tme_opt_file_mem[i].size = 0; 5130 } 5131 } 5132 5133 int cnss_pci_load_m3(struct cnss_pci_data *pci_priv) 5134 { 5135 struct cnss_plat_data *plat_priv = pci_priv->plat_priv; 5136 struct cnss_fw_mem *m3_mem = &plat_priv->m3_mem; 5137 char filename[MAX_FIRMWARE_NAME_LEN]; 5138 char *phy_filename = DEFAULT_PHY_UCODE_FILE_NAME; 5139 const struct firmware *fw_entry; 5140 int ret = 0; 5141 5142 /* Use forward compatibility here since for any recent device 5143 * it should use DEFAULT_PHY_UCODE_FILE_NAME. 5144 */ 5145 switch (pci_priv->device_id) { 5146 case QCA6174_DEVICE_ID: 5147 cnss_pr_err("Invalid device ID (0x%x) to load phy image\n", 5148 pci_priv->device_id); 5149 return -EINVAL; 5150 case QCA6290_DEVICE_ID: 5151 case QCA6390_DEVICE_ID: 5152 case QCA6490_DEVICE_ID: 5153 phy_filename = DEFAULT_PHY_M3_FILE_NAME; 5154 break; 5155 case KIWI_DEVICE_ID: 5156 case MANGO_DEVICE_ID: 5157 case PEACH_DEVICE_ID: 5158 switch (plat_priv->device_version.major_version) { 5159 case FW_V2_NUMBER: 5160 phy_filename = PHY_UCODE_V2_FILE_NAME; 5161 break; 5162 default: 5163 break; 5164 } 5165 break; 5166 default: 5167 break; 5168 } 5169 5170 if (!m3_mem->va && !m3_mem->size) { 5171 cnss_pci_add_fw_prefix_name(pci_priv, filename, 5172 phy_filename); 5173 5174 ret = firmware_request_nowarn(&fw_entry, filename, 5175 &pci_priv->pci_dev->dev); 5176 if (ret) { 5177 cnss_pr_err("Failed to load M3 image: %s\n", filename); 5178 return ret; 5179 } 5180 5181 m3_mem->va = dma_alloc_coherent(&pci_priv->pci_dev->dev, 5182 fw_entry->size, &m3_mem->pa, 5183 GFP_KERNEL); 5184 if (!m3_mem->va) { 5185 cnss_pr_err("Failed to allocate memory for M3, size: 0x%zx\n", 5186 fw_entry->size); 5187 release_firmware(fw_entry); 5188 return -ENOMEM; 5189 } 5190 5191 memcpy(m3_mem->va, fw_entry->data, fw_entry->size); 5192 m3_mem->size = fw_entry->size; 5193 release_firmware(fw_entry); 5194 } 5195 5196 return 0; 5197 } 5198 5199 static void cnss_pci_free_m3_mem(struct cnss_pci_data *pci_priv) 5200 { 5201 struct cnss_plat_data *plat_priv = pci_priv->plat_priv; 5202 struct cnss_fw_mem *m3_mem = &plat_priv->m3_mem; 5203 5204 if (m3_mem->va && m3_mem->size) { 5205 cnss_pr_dbg("Freeing memory for M3, va: 0x%pK, pa: %pa, size: 0x%zx\n", 5206 m3_mem->va, &m3_mem->pa, m3_mem->size); 5207 dma_free_coherent(&pci_priv->pci_dev->dev, m3_mem->size, 5208 m3_mem->va, m3_mem->pa); 5209 } 5210 5211 m3_mem->va = NULL; 5212 m3_mem->pa = 0; 5213 m3_mem->size = 0; 5214 } 5215 5216 #ifdef CONFIG_FREE_M3_BLOB_MEM 5217 void cnss_pci_free_blob_mem(struct cnss_pci_data *pci_priv) 5218 { 5219 cnss_pci_free_m3_mem(pci_priv); 5220 } 5221 #else 5222 void cnss_pci_free_blob_mem(struct cnss_pci_data *pci_priv) 5223 { 5224 } 5225 #endif 5226 5227 int cnss_pci_load_aux(struct cnss_pci_data *pci_priv) 5228 { 5229 struct cnss_plat_data *plat_priv = pci_priv->plat_priv; 5230 struct cnss_fw_mem *aux_mem = &plat_priv->aux_mem; 5231 char filename[MAX_FIRMWARE_NAME_LEN]; 5232 char *aux_filename = DEFAULT_AUX_FILE_NAME; 5233 const struct firmware *fw_entry; 5234 int ret = 0; 5235 5236 if (!aux_mem->va && !aux_mem->size) { 5237 cnss_pci_add_fw_prefix_name(pci_priv, filename, 5238 aux_filename); 5239 5240 ret = firmware_request_nowarn(&fw_entry, filename, 5241 &pci_priv->pci_dev->dev); 5242 if (ret) { 5243 cnss_pr_err("Failed to load AUX image: %s\n", filename); 5244 return ret; 5245 } 5246 5247 aux_mem->va = dma_alloc_coherent(&pci_priv->pci_dev->dev, 5248 fw_entry->size, &aux_mem->pa, 5249 GFP_KERNEL); 5250 if (!aux_mem->va) { 5251 cnss_pr_err("Failed to allocate memory for AUX, size: 0x%zx\n", 5252 fw_entry->size); 5253 release_firmware(fw_entry); 5254 return -ENOMEM; 5255 } 5256 5257 memcpy(aux_mem->va, fw_entry->data, fw_entry->size); 5258 aux_mem->size = fw_entry->size; 5259 release_firmware(fw_entry); 5260 } 5261 5262 return 0; 5263 } 5264 5265 static void cnss_pci_free_aux_mem(struct cnss_pci_data *pci_priv) 5266 { 5267 struct cnss_plat_data *plat_priv = pci_priv->plat_priv; 5268 struct cnss_fw_mem *aux_mem = &plat_priv->aux_mem; 5269 5270 if (aux_mem->va && aux_mem->size) { 5271 cnss_pr_dbg("Freeing memory for AUX, va: 0x%pK, pa: %pa, size: 0x%zx\n", 5272 aux_mem->va, &aux_mem->pa, aux_mem->size); 5273 dma_free_coherent(&pci_priv->pci_dev->dev, aux_mem->size, 5274 aux_mem->va, aux_mem->pa); 5275 } 5276 5277 aux_mem->va = NULL; 5278 aux_mem->pa = 0; 5279 aux_mem->size = 0; 5280 } 5281 5282 void cnss_pci_fw_boot_timeout_hdlr(struct cnss_pci_data *pci_priv) 5283 { 5284 struct cnss_plat_data *plat_priv; 5285 5286 if (!pci_priv) 5287 return; 5288 5289 cnss_fatal_err("Timeout waiting for FW ready indication\n"); 5290 5291 plat_priv = pci_priv->plat_priv; 5292 if (!plat_priv) 5293 return; 5294 5295 if (test_bit(CNSS_IN_COLD_BOOT_CAL, &plat_priv->driver_state)) { 5296 cnss_pr_dbg("Ignore FW ready timeout for calibration mode\n"); 5297 return; 5298 } 5299 5300 cnss_schedule_recovery(&pci_priv->pci_dev->dev, 5301 CNSS_REASON_TIMEOUT); 5302 } 5303 5304 static void cnss_pci_deinit_smmu(struct cnss_pci_data *pci_priv) 5305 { 5306 pci_priv->iommu_domain = NULL; 5307 } 5308 5309 int cnss_pci_get_iova(struct cnss_pci_data *pci_priv, u64 *addr, u64 *size) 5310 { 5311 if (!pci_priv) 5312 return -ENODEV; 5313 5314 if (!pci_priv->smmu_iova_len) 5315 return -EINVAL; 5316 5317 *addr = pci_priv->smmu_iova_start; 5318 *size = pci_priv->smmu_iova_len; 5319 5320 return 0; 5321 } 5322 5323 int cnss_pci_get_iova_ipa(struct cnss_pci_data *pci_priv, u64 *addr, u64 *size) 5324 { 5325 if (!pci_priv) 5326 return -ENODEV; 5327 5328 if (!pci_priv->smmu_iova_ipa_len) 5329 return -EINVAL; 5330 5331 *addr = pci_priv->smmu_iova_ipa_start; 5332 *size = pci_priv->smmu_iova_ipa_len; 5333 5334 return 0; 5335 } 5336 5337 bool cnss_pci_is_smmu_s1_enabled(struct cnss_pci_data *pci_priv) 5338 { 5339 if (pci_priv) 5340 return pci_priv->smmu_s1_enable; 5341 5342 return false; 5343 } 5344 struct iommu_domain *cnss_smmu_get_domain(struct device *dev) 5345 { 5346 struct cnss_pci_data *pci_priv = cnss_get_pci_priv(to_pci_dev(dev)); 5347 5348 if (!pci_priv) 5349 return NULL; 5350 5351 return pci_priv->iommu_domain; 5352 } 5353 EXPORT_SYMBOL(cnss_smmu_get_domain); 5354 5355 int cnss_smmu_map(struct device *dev, 5356 phys_addr_t paddr, uint32_t *iova_addr, size_t size) 5357 { 5358 struct cnss_pci_data *pci_priv = cnss_get_pci_priv(to_pci_dev(dev)); 5359 struct cnss_plat_data *plat_priv; 5360 unsigned long iova; 5361 size_t len; 5362 int ret = 0; 5363 int flag = IOMMU_READ | IOMMU_WRITE; 5364 struct pci_dev *root_port; 5365 struct device_node *root_of_node; 5366 bool dma_coherent = false; 5367 5368 if (!pci_priv) 5369 return -ENODEV; 5370 5371 if (!iova_addr) { 5372 cnss_pr_err("iova_addr is NULL, paddr %pa, size %zu\n", 5373 &paddr, size); 5374 return -EINVAL; 5375 } 5376 5377 plat_priv = pci_priv->plat_priv; 5378 5379 len = roundup(size + paddr - rounddown(paddr, PAGE_SIZE), PAGE_SIZE); 5380 iova = roundup(pci_priv->smmu_iova_ipa_current, PAGE_SIZE); 5381 5382 if (pci_priv->iommu_geometry && 5383 iova >= pci_priv->smmu_iova_ipa_start + 5384 pci_priv->smmu_iova_ipa_len) { 5385 cnss_pr_err("No IOVA space to map, iova %lx, smmu_iova_ipa_start %pad, smmu_iova_ipa_len %zu\n", 5386 iova, 5387 &pci_priv->smmu_iova_ipa_start, 5388 pci_priv->smmu_iova_ipa_len); 5389 return -ENOMEM; 5390 } 5391 5392 if (!test_bit(DISABLE_IO_COHERENCY, 5393 &plat_priv->ctrl_params.quirks)) { 5394 root_port = pcie_find_root_port(pci_priv->pci_dev); 5395 if (!root_port) { 5396 cnss_pr_err("Root port is null, so dma_coherent is disabled\n"); 5397 } else { 5398 root_of_node = root_port->dev.of_node; 5399 if (root_of_node && root_of_node->parent) { 5400 dma_coherent = 5401 of_property_read_bool(root_of_node->parent, 5402 "dma-coherent"); 5403 cnss_pr_dbg("dma-coherent is %s\n", 5404 dma_coherent ? "enabled" : "disabled"); 5405 if (dma_coherent) 5406 flag |= IOMMU_CACHE; 5407 } 5408 } 5409 } 5410 5411 cnss_pr_dbg("IOMMU map: iova %lx, len %zu\n", iova, len); 5412 5413 ret = cnss_iommu_map(pci_priv->iommu_domain, iova, 5414 rounddown(paddr, PAGE_SIZE), len, flag); 5415 if (ret) { 5416 cnss_pr_err("PA to IOVA mapping failed, ret %d\n", ret); 5417 return ret; 5418 } 5419 5420 pci_priv->smmu_iova_ipa_current = iova + len; 5421 *iova_addr = (uint32_t)(iova + paddr - rounddown(paddr, PAGE_SIZE)); 5422 cnss_pr_dbg("IOMMU map: iova_addr %lx\n", *iova_addr); 5423 5424 return 0; 5425 } 5426 EXPORT_SYMBOL(cnss_smmu_map); 5427 5428 int cnss_smmu_unmap(struct device *dev, uint32_t iova_addr, size_t size) 5429 { 5430 struct cnss_pci_data *pci_priv = cnss_get_pci_priv(to_pci_dev(dev)); 5431 unsigned long iova; 5432 size_t unmapped; 5433 size_t len; 5434 5435 if (!pci_priv) 5436 return -ENODEV; 5437 5438 iova = rounddown(iova_addr, PAGE_SIZE); 5439 len = roundup(size + iova_addr - iova, PAGE_SIZE); 5440 5441 if (iova >= pci_priv->smmu_iova_ipa_start + 5442 pci_priv->smmu_iova_ipa_len) { 5443 cnss_pr_err("Out of IOVA space to unmap, iova %lx, smmu_iova_ipa_start %pad, smmu_iova_ipa_len %zu\n", 5444 iova, 5445 &pci_priv->smmu_iova_ipa_start, 5446 pci_priv->smmu_iova_ipa_len); 5447 return -ENOMEM; 5448 } 5449 5450 cnss_pr_dbg("IOMMU unmap: iova %lx, len %zu\n", iova, len); 5451 5452 unmapped = iommu_unmap(pci_priv->iommu_domain, iova, len); 5453 if (unmapped != len) { 5454 cnss_pr_err("IOMMU unmap failed, unmapped = %zu, requested = %zu\n", 5455 unmapped, len); 5456 return -EINVAL; 5457 } 5458 5459 pci_priv->smmu_iova_ipa_current = iova; 5460 return 0; 5461 } 5462 EXPORT_SYMBOL(cnss_smmu_unmap); 5463 5464 int cnss_get_soc_info(struct device *dev, struct cnss_soc_info *info) 5465 { 5466 struct cnss_pci_data *pci_priv = cnss_get_pci_priv(to_pci_dev(dev)); 5467 struct cnss_plat_data *plat_priv; 5468 5469 if (!pci_priv) 5470 return -ENODEV; 5471 5472 plat_priv = pci_priv->plat_priv; 5473 if (!plat_priv) 5474 return -ENODEV; 5475 5476 info->va = pci_priv->bar; 5477 info->pa = pci_resource_start(pci_priv->pci_dev, PCI_BAR_NUM); 5478 info->chip_id = plat_priv->chip_info.chip_id; 5479 info->chip_family = plat_priv->chip_info.chip_family; 5480 info->board_id = plat_priv->board_info.board_id; 5481 info->soc_id = plat_priv->soc_info.soc_id; 5482 info->fw_version = plat_priv->fw_version_info.fw_version; 5483 strlcpy(info->fw_build_timestamp, 5484 plat_priv->fw_version_info.fw_build_timestamp, 5485 sizeof(info->fw_build_timestamp)); 5486 memcpy(&info->device_version, &plat_priv->device_version, 5487 sizeof(info->device_version)); 5488 memcpy(&info->dev_mem_info, &plat_priv->dev_mem_info, 5489 sizeof(info->dev_mem_info)); 5490 memcpy(&info->fw_build_id, &plat_priv->fw_build_id, 5491 sizeof(info->fw_build_id)); 5492 5493 return 0; 5494 } 5495 EXPORT_SYMBOL(cnss_get_soc_info); 5496 5497 int cnss_pci_get_user_msi_assignment(struct cnss_pci_data *pci_priv, 5498 char *user_name, 5499 int *num_vectors, 5500 u32 *user_base_data, 5501 u32 *base_vector) 5502 { 5503 return cnss_get_user_msi_assignment(&pci_priv->pci_dev->dev, 5504 user_name, 5505 num_vectors, 5506 user_base_data, 5507 base_vector); 5508 } 5509 5510 static int cnss_pci_irq_set_affinity_hint(struct cnss_pci_data *pci_priv, 5511 unsigned int vec, 5512 const struct cpumask *cpumask) 5513 { 5514 int ret; 5515 struct pci_dev *pci_dev = pci_priv->pci_dev; 5516 5517 ret = irq_set_affinity_hint(pci_irq_vector(pci_dev, vec), 5518 cpumask); 5519 5520 return ret; 5521 } 5522 5523 static int cnss_pci_enable_msi(struct cnss_pci_data *pci_priv) 5524 { 5525 int ret = 0; 5526 struct pci_dev *pci_dev = pci_priv->pci_dev; 5527 int num_vectors; 5528 struct cnss_msi_config *msi_config; 5529 5530 if (pci_priv->device_id == QCA6174_DEVICE_ID) 5531 return 0; 5532 5533 if (cnss_pci_is_force_one_msi(pci_priv)) { 5534 ret = cnss_pci_get_one_msi_assignment(pci_priv); 5535 cnss_pr_dbg("force one msi\n"); 5536 } else { 5537 ret = cnss_pci_get_msi_assignment(pci_priv); 5538 } 5539 if (ret) { 5540 cnss_pr_err("Failed to get MSI assignment, err = %d\n", ret); 5541 goto out; 5542 } 5543 5544 msi_config = pci_priv->msi_config; 5545 if (!msi_config) { 5546 cnss_pr_err("msi_config is NULL!\n"); 5547 ret = -EINVAL; 5548 goto out; 5549 } 5550 5551 num_vectors = pci_alloc_irq_vectors(pci_dev, 5552 msi_config->total_vectors, 5553 msi_config->total_vectors, 5554 PCI_IRQ_MSI | PCI_IRQ_MSIX); 5555 if ((num_vectors != msi_config->total_vectors) && 5556 !cnss_pci_fallback_one_msi(pci_priv, &num_vectors)) { 5557 cnss_pr_err("Failed to get enough MSI vectors (%d), available vectors = %d", 5558 msi_config->total_vectors, num_vectors); 5559 if (num_vectors >= 0) 5560 ret = -EINVAL; 5561 goto reset_msi_config; 5562 } 5563 5564 /* With VT-d disabled on x86 platform, only one pci irq vector is 5565 * allocated. Once suspend the irq may be migrated to CPU0 if it was 5566 * affine to other CPU with one new msi vector re-allocated. 5567 * The observation cause the issue about no irq handler for vector 5568 * once resume. 5569 * The fix is to set irq vector affinity to CPU0 before calling 5570 * request_irq to avoid the irq migration. 5571 */ 5572 if (cnss_pci_is_one_msi(pci_priv)) { 5573 ret = cnss_pci_irq_set_affinity_hint(pci_priv, 5574 0, 5575 cpumask_of(0)); 5576 if (ret) { 5577 cnss_pr_err("Failed to affinize irq vector to CPU0\n"); 5578 goto free_msi_vector; 5579 } 5580 } 5581 5582 if (cnss_pci_config_msi_addr(pci_priv)) { 5583 ret = -EINVAL; 5584 goto free_msi_vector; 5585 } 5586 5587 if (cnss_pci_config_msi_data(pci_priv)) { 5588 ret = -EINVAL; 5589 goto free_msi_vector; 5590 } 5591 5592 return 0; 5593 5594 free_msi_vector: 5595 if (cnss_pci_is_one_msi(pci_priv)) 5596 cnss_pci_irq_set_affinity_hint(pci_priv, 0, NULL); 5597 pci_free_irq_vectors(pci_priv->pci_dev); 5598 reset_msi_config: 5599 pci_priv->msi_config = NULL; 5600 out: 5601 return ret; 5602 } 5603 5604 static void cnss_pci_disable_msi(struct cnss_pci_data *pci_priv) 5605 { 5606 if (pci_priv->device_id == QCA6174_DEVICE_ID) 5607 return; 5608 5609 if (cnss_pci_is_one_msi(pci_priv)) 5610 cnss_pci_irq_set_affinity_hint(pci_priv, 0, NULL); 5611 5612 pci_free_irq_vectors(pci_priv->pci_dev); 5613 } 5614 5615 int cnss_get_user_msi_assignment(struct device *dev, char *user_name, 5616 int *num_vectors, u32 *user_base_data, 5617 u32 *base_vector) 5618 { 5619 struct cnss_pci_data *pci_priv = cnss_get_pci_priv(to_pci_dev(dev)); 5620 struct cnss_msi_config *msi_config; 5621 int idx; 5622 5623 if (!pci_priv) 5624 return -ENODEV; 5625 5626 msi_config = pci_priv->msi_config; 5627 if (!msi_config) { 5628 cnss_pr_err("MSI is not supported.\n"); 5629 return -EINVAL; 5630 } 5631 5632 for (idx = 0; idx < msi_config->total_users; idx++) { 5633 if (strcmp(user_name, msi_config->users[idx].name) == 0) { 5634 *num_vectors = msi_config->users[idx].num_vectors; 5635 *user_base_data = msi_config->users[idx].base_vector 5636 + pci_priv->msi_ep_base_data; 5637 *base_vector = msi_config->users[idx].base_vector; 5638 /*Add only single print for each user*/ 5639 if (print_optimize.msi_log_chk[idx]++) 5640 goto skip_print; 5641 5642 cnss_pr_dbg("Assign MSI to user: %s, num_vectors: %d, user_base_data: %u, base_vector: %u\n", 5643 user_name, *num_vectors, *user_base_data, 5644 *base_vector); 5645 skip_print: 5646 return 0; 5647 } 5648 } 5649 5650 cnss_pr_err("Failed to find MSI assignment for %s!\n", user_name); 5651 5652 return -EINVAL; 5653 } 5654 EXPORT_SYMBOL(cnss_get_user_msi_assignment); 5655 5656 int cnss_get_msi_irq(struct device *dev, unsigned int vector) 5657 { 5658 struct pci_dev *pci_dev = to_pci_dev(dev); 5659 int irq_num; 5660 5661 irq_num = pci_irq_vector(pci_dev, vector); 5662 cnss_pr_dbg("Get IRQ number %d for vector index %d\n", irq_num, vector); 5663 5664 return irq_num; 5665 } 5666 EXPORT_SYMBOL(cnss_get_msi_irq); 5667 5668 bool cnss_is_one_msi(struct device *dev) 5669 { 5670 struct cnss_pci_data *pci_priv = cnss_get_pci_priv(to_pci_dev(dev)); 5671 5672 if (!pci_priv) 5673 return false; 5674 5675 return cnss_pci_is_one_msi(pci_priv); 5676 } 5677 EXPORT_SYMBOL(cnss_is_one_msi); 5678 5679 void cnss_get_msi_address(struct device *dev, u32 *msi_addr_low, 5680 u32 *msi_addr_high) 5681 { 5682 struct pci_dev *pci_dev = to_pci_dev(dev); 5683 struct cnss_pci_data *pci_priv; 5684 u16 control; 5685 5686 if (!pci_dev) 5687 return; 5688 5689 pci_priv = cnss_get_pci_priv(pci_dev); 5690 if (!pci_priv) 5691 return; 5692 5693 if (pci_dev->msix_enabled) { 5694 *msi_addr_low = pci_priv->msix_addr; 5695 *msi_addr_high = 0; 5696 if (!print_optimize.msi_addr_chk++) 5697 cnss_pr_dbg("Get MSI low addr = 0x%x, high addr = 0x%x\n", 5698 *msi_addr_low, *msi_addr_high); 5699 return; 5700 } 5701 5702 pci_read_config_word(pci_dev, pci_dev->msi_cap + PCI_MSI_FLAGS, 5703 &control); 5704 pci_read_config_dword(pci_dev, pci_dev->msi_cap + PCI_MSI_ADDRESS_LO, 5705 msi_addr_low); 5706 /* Return MSI high address only when device supports 64-bit MSI */ 5707 if (control & PCI_MSI_FLAGS_64BIT) 5708 pci_read_config_dword(pci_dev, 5709 pci_dev->msi_cap + PCI_MSI_ADDRESS_HI, 5710 msi_addr_high); 5711 else 5712 *msi_addr_high = 0; 5713 /*Add only single print as the address is constant*/ 5714 if (!print_optimize.msi_addr_chk++) 5715 cnss_pr_dbg("Get MSI low addr = 0x%x, high addr = 0x%x\n", 5716 *msi_addr_low, *msi_addr_high); 5717 } 5718 EXPORT_SYMBOL(cnss_get_msi_address); 5719 5720 u32 cnss_pci_get_wake_msi(struct cnss_pci_data *pci_priv) 5721 { 5722 int ret, num_vectors; 5723 u32 user_base_data, base_vector; 5724 5725 if (!pci_priv) 5726 return -ENODEV; 5727 5728 ret = cnss_get_user_msi_assignment(&pci_priv->pci_dev->dev, 5729 WAKE_MSI_NAME, &num_vectors, 5730 &user_base_data, &base_vector); 5731 if (ret) { 5732 cnss_pr_err("WAKE MSI is not valid\n"); 5733 return 0; 5734 } 5735 5736 return user_base_data; 5737 } 5738 5739 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 18, 0)) 5740 static inline int cnss_pci_set_dma_mask(struct pci_dev *pci_dev, u64 mask) 5741 { 5742 return dma_set_mask(&pci_dev->dev, mask); 5743 } 5744 5745 static inline int cnss_pci_set_coherent_dma_mask(struct pci_dev *pci_dev, 5746 u64 mask) 5747 { 5748 return dma_set_coherent_mask(&pci_dev->dev, mask); 5749 } 5750 #else /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 18, 0)) */ 5751 static inline int cnss_pci_set_dma_mask(struct pci_dev *pci_dev, u64 mask) 5752 { 5753 return pci_set_dma_mask(pci_dev, mask); 5754 } 5755 5756 static inline int cnss_pci_set_coherent_dma_mask(struct pci_dev *pci_dev, 5757 u64 mask) 5758 { 5759 return pci_set_consistent_dma_mask(pci_dev, mask); 5760 } 5761 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 18, 0)) */ 5762 5763 static int cnss_pci_enable_bus(struct cnss_pci_data *pci_priv) 5764 { 5765 int ret = 0; 5766 struct pci_dev *pci_dev = pci_priv->pci_dev; 5767 u16 device_id; 5768 5769 pci_read_config_word(pci_dev, PCI_DEVICE_ID, &device_id); 5770 if (device_id != pci_priv->pci_device_id->device) { 5771 cnss_pr_err("PCI device ID mismatch, config ID: 0x%x, probe ID: 0x%x\n", 5772 device_id, pci_priv->pci_device_id->device); 5773 ret = -EIO; 5774 goto out; 5775 } 5776 5777 ret = pci_assign_resource(pci_dev, PCI_BAR_NUM); 5778 if (ret) { 5779 pr_err("Failed to assign PCI resource, err = %d\n", ret); 5780 goto out; 5781 } 5782 5783 ret = pci_enable_device(pci_dev); 5784 if (ret) { 5785 cnss_pr_err("Failed to enable PCI device, err = %d\n", ret); 5786 goto out; 5787 } 5788 5789 ret = pci_request_region(pci_dev, PCI_BAR_NUM, "cnss"); 5790 if (ret) { 5791 cnss_pr_err("Failed to request PCI region, err = %d\n", ret); 5792 goto disable_device; 5793 } 5794 5795 switch (device_id) { 5796 case QCA6174_DEVICE_ID: 5797 case QCN7605_DEVICE_ID: 5798 pci_priv->dma_bit_mask = PCI_DMA_MASK_32_BIT; 5799 break; 5800 case QCA6390_DEVICE_ID: 5801 case QCA6490_DEVICE_ID: 5802 case KIWI_DEVICE_ID: 5803 case MANGO_DEVICE_ID: 5804 case PEACH_DEVICE_ID: 5805 pci_priv->dma_bit_mask = PCI_DMA_MASK_36_BIT; 5806 break; 5807 default: 5808 pci_priv->dma_bit_mask = PCI_DMA_MASK_32_BIT; 5809 break; 5810 } 5811 5812 cnss_pr_dbg("Set PCI DMA MASK (0x%llx)\n", pci_priv->dma_bit_mask); 5813 5814 ret = cnss_pci_set_dma_mask(pci_dev, pci_priv->dma_bit_mask); 5815 if (ret) { 5816 cnss_pr_err("Failed to set PCI DMA mask, err = %d\n", ret); 5817 goto release_region; 5818 } 5819 5820 ret = cnss_pci_set_coherent_dma_mask(pci_dev, pci_priv->dma_bit_mask); 5821 if (ret) { 5822 cnss_pr_err("Failed to set PCI coherent DMA mask, err = %d\n", 5823 ret); 5824 goto release_region; 5825 } 5826 5827 pci_priv->bar = pci_iomap(pci_dev, PCI_BAR_NUM, 0); 5828 if (!pci_priv->bar) { 5829 cnss_pr_err("Failed to do PCI IO map!\n"); 5830 ret = -EIO; 5831 goto release_region; 5832 } 5833 5834 /* Save default config space without BME enabled */ 5835 pci_save_state(pci_dev); 5836 pci_priv->default_state = pci_store_saved_state(pci_dev); 5837 5838 pci_set_master(pci_dev); 5839 5840 return 0; 5841 5842 release_region: 5843 pci_release_region(pci_dev, PCI_BAR_NUM); 5844 disable_device: 5845 pci_disable_device(pci_dev); 5846 out: 5847 return ret; 5848 } 5849 5850 static void cnss_pci_disable_bus(struct cnss_pci_data *pci_priv) 5851 { 5852 struct pci_dev *pci_dev = pci_priv->pci_dev; 5853 5854 pci_clear_master(pci_dev); 5855 pci_load_and_free_saved_state(pci_dev, &pci_priv->saved_state); 5856 pci_load_and_free_saved_state(pci_dev, &pci_priv->default_state); 5857 5858 if (pci_priv->bar) { 5859 pci_iounmap(pci_dev, pci_priv->bar); 5860 pci_priv->bar = NULL; 5861 } 5862 5863 pci_release_region(pci_dev, PCI_BAR_NUM); 5864 if (pci_is_enabled(pci_dev)) 5865 pci_disable_device(pci_dev); 5866 } 5867 5868 static void cnss_pci_dump_qdss_reg(struct cnss_pci_data *pci_priv) 5869 { 5870 struct cnss_plat_data *plat_priv = pci_priv->plat_priv; 5871 int i, array_size = ARRAY_SIZE(qdss_csr) - 1; 5872 gfp_t gfp = GFP_KERNEL; 5873 u32 reg_offset; 5874 5875 if (in_interrupt() || irqs_disabled()) 5876 gfp = GFP_ATOMIC; 5877 5878 if (!plat_priv->qdss_reg) { 5879 plat_priv->qdss_reg = devm_kzalloc(&pci_priv->pci_dev->dev, 5880 sizeof(*plat_priv->qdss_reg) 5881 * array_size, gfp); 5882 if (!plat_priv->qdss_reg) 5883 return; 5884 } 5885 5886 cnss_pr_dbg("Start to dump qdss registers\n"); 5887 5888 for (i = 0; qdss_csr[i].name; i++) { 5889 reg_offset = QDSS_APB_DEC_CSR_BASE + qdss_csr[i].offset; 5890 if (cnss_pci_reg_read(pci_priv, reg_offset, 5891 &plat_priv->qdss_reg[i])) 5892 return; 5893 cnss_pr_dbg("%s[0x%x] = 0x%x\n", qdss_csr[i].name, reg_offset, 5894 plat_priv->qdss_reg[i]); 5895 } 5896 } 5897 5898 static void cnss_pci_dump_ce_reg(struct cnss_pci_data *pci_priv, 5899 enum cnss_ce_index ce) 5900 { 5901 int i; 5902 u32 ce_base = ce * CE_REG_INTERVAL; 5903 u32 reg_offset, src_ring_base, dst_ring_base, cmn_base, val; 5904 5905 switch (pci_priv->device_id) { 5906 case QCA6390_DEVICE_ID: 5907 src_ring_base = QCA6390_CE_SRC_RING_REG_BASE; 5908 dst_ring_base = QCA6390_CE_DST_RING_REG_BASE; 5909 cmn_base = QCA6390_CE_COMMON_REG_BASE; 5910 break; 5911 case QCA6490_DEVICE_ID: 5912 src_ring_base = QCA6490_CE_SRC_RING_REG_BASE; 5913 dst_ring_base = QCA6490_CE_DST_RING_REG_BASE; 5914 cmn_base = QCA6490_CE_COMMON_REG_BASE; 5915 break; 5916 default: 5917 return; 5918 } 5919 5920 switch (ce) { 5921 case CNSS_CE_09: 5922 case CNSS_CE_10: 5923 for (i = 0; ce_src[i].name; i++) { 5924 reg_offset = src_ring_base + ce_base + ce_src[i].offset; 5925 if (cnss_pci_reg_read(pci_priv, reg_offset, &val)) 5926 return; 5927 cnss_pr_dbg("CE_%02d_%s[0x%x] = 0x%x\n", 5928 ce, ce_src[i].name, reg_offset, val); 5929 } 5930 5931 for (i = 0; ce_dst[i].name; i++) { 5932 reg_offset = dst_ring_base + ce_base + ce_dst[i].offset; 5933 if (cnss_pci_reg_read(pci_priv, reg_offset, &val)) 5934 return; 5935 cnss_pr_dbg("CE_%02d_%s[0x%x] = 0x%x\n", 5936 ce, ce_dst[i].name, reg_offset, val); 5937 } 5938 break; 5939 case CNSS_CE_COMMON: 5940 for (i = 0; ce_cmn[i].name; i++) { 5941 reg_offset = cmn_base + ce_cmn[i].offset; 5942 if (cnss_pci_reg_read(pci_priv, reg_offset, &val)) 5943 return; 5944 cnss_pr_dbg("CE_COMMON_%s[0x%x] = 0x%x\n", 5945 ce_cmn[i].name, reg_offset, val); 5946 } 5947 break; 5948 default: 5949 cnss_pr_err("Unsupported CE[%d] registers dump\n", ce); 5950 } 5951 } 5952 5953 static void cnss_pci_dump_debug_reg(struct cnss_pci_data *pci_priv) 5954 { 5955 if (cnss_pci_check_link_status(pci_priv)) 5956 return; 5957 5958 cnss_pr_dbg("Start to dump debug registers\n"); 5959 5960 cnss_mhi_debug_reg_dump(pci_priv); 5961 cnss_pci_bhi_debug_reg_dump(pci_priv); 5962 cnss_pci_soc_scratch_reg_dump(pci_priv); 5963 cnss_pci_dump_ce_reg(pci_priv, CNSS_CE_COMMON); 5964 cnss_pci_dump_ce_reg(pci_priv, CNSS_CE_09); 5965 cnss_pci_dump_ce_reg(pci_priv, CNSS_CE_10); 5966 } 5967 5968 static int cnss_pci_assert_host_sol(struct cnss_pci_data *pci_priv) 5969 { 5970 int ret; 5971 5972 ret = cnss_get_host_sol_value(pci_priv->plat_priv); 5973 if (ret) { 5974 if (ret < 0) { 5975 cnss_pr_dbg("Host SOL functionality is not enabled\n"); 5976 return ret; 5977 } else { 5978 cnss_pr_dbg("Host SOL is already high\n"); 5979 /* 5980 * Return success if HOST SOL is already high. 5981 * This will indicate caller that a HOST SOL is 5982 * already asserted from some other thread and 5983 * no further action required from the caller. 5984 */ 5985 return 0; 5986 } 5987 } 5988 5989 cnss_pr_dbg("Assert host SOL GPIO to retry RDDM, expecting link down\n"); 5990 cnss_set_host_sol_value(pci_priv->plat_priv, 1); 5991 5992 return 0; 5993 } 5994 5995 static void cnss_pci_mhi_reg_dump(struct cnss_pci_data *pci_priv) 5996 { 5997 if (!cnss_pci_check_link_status(pci_priv)) 5998 cnss_mhi_debug_reg_dump(pci_priv); 5999 6000 cnss_pci_bhi_debug_reg_dump(pci_priv); 6001 cnss_pci_soc_scratch_reg_dump(pci_priv); 6002 cnss_pci_dump_misc_reg(pci_priv); 6003 cnss_pci_dump_shadow_reg(pci_priv); 6004 } 6005 6006 int cnss_pci_recover_link_down(struct cnss_pci_data *pci_priv) 6007 { 6008 int ret; 6009 int retry = 0; 6010 enum mhi_ee_type mhi_ee; 6011 6012 switch (pci_priv->device_id) { 6013 case QCA6390_DEVICE_ID: 6014 case QCA6490_DEVICE_ID: 6015 case KIWI_DEVICE_ID: 6016 case MANGO_DEVICE_ID: 6017 case PEACH_DEVICE_ID: 6018 break; 6019 default: 6020 return -EOPNOTSUPP; 6021 } 6022 6023 /* Always wait here to avoid missing WAKE assert for RDDM 6024 * before link recovery 6025 */ 6026 ret = wait_for_completion_timeout(&pci_priv->wake_event_complete, 6027 msecs_to_jiffies(WAKE_EVENT_TIMEOUT)); 6028 if (!ret) 6029 cnss_pr_err("Timeout waiting for wake event after link down\n"); 6030 6031 ret = cnss_suspend_pci_link(pci_priv); 6032 if (ret) 6033 cnss_pr_err("Failed to suspend PCI link, err = %d\n", ret); 6034 6035 ret = cnss_resume_pci_link(pci_priv); 6036 if (ret) { 6037 cnss_pr_err("Failed to resume PCI link, err = %d\n", ret); 6038 del_timer(&pci_priv->dev_rddm_timer); 6039 return ret; 6040 } 6041 6042 retry: 6043 /* 6044 * After PCIe link resumes, 20 to 400 ms delay is observerved 6045 * before device moves to RDDM. 6046 */ 6047 msleep(RDDM_LINK_RECOVERY_RETRY_DELAY_MS); 6048 mhi_ee = mhi_get_exec_env(pci_priv->mhi_ctrl); 6049 if (mhi_ee == MHI_EE_RDDM) { 6050 del_timer(&pci_priv->dev_rddm_timer); 6051 cnss_pr_info("Device in RDDM after link recovery, try to collect dump\n"); 6052 cnss_schedule_recovery(&pci_priv->pci_dev->dev, 6053 CNSS_REASON_RDDM); 6054 return 0; 6055 } else if (retry++ < RDDM_LINK_RECOVERY_RETRY) { 6056 cnss_pr_dbg("Wait for RDDM after link recovery, retry #%d, Device EE: %d\n", 6057 retry, mhi_ee); 6058 goto retry; 6059 } 6060 6061 if (!cnss_pci_assert_host_sol(pci_priv)) 6062 return 0; 6063 cnss_mhi_debug_reg_dump(pci_priv); 6064 cnss_pci_bhi_debug_reg_dump(pci_priv); 6065 cnss_pci_soc_scratch_reg_dump(pci_priv); 6066 cnss_schedule_recovery(&pci_priv->pci_dev->dev, 6067 CNSS_REASON_TIMEOUT); 6068 return 0; 6069 } 6070 6071 int cnss_pci_force_fw_assert_hdlr(struct cnss_pci_data *pci_priv) 6072 { 6073 int ret; 6074 struct cnss_plat_data *plat_priv; 6075 6076 if (!pci_priv) 6077 return -ENODEV; 6078 6079 plat_priv = pci_priv->plat_priv; 6080 if (!plat_priv) 6081 return -ENODEV; 6082 6083 if (!test_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state) || 6084 test_bit(CNSS_MHI_POWERING_OFF, &pci_priv->mhi_state)) 6085 return -EINVAL; 6086 /* 6087 * Call pm_runtime_get_sync insteat of auto_resume to get 6088 * reference and make sure runtime_suspend wont get called. 6089 */ 6090 ret = cnss_pci_pm_runtime_get_sync(pci_priv, RTPM_ID_CNSS); 6091 if (ret < 0) 6092 goto runtime_pm_put; 6093 /* 6094 * In some scenarios, cnss_pci_pm_runtime_get_sync 6095 * might not resume PCI bus. For those cases do auto resume. 6096 */ 6097 cnss_auto_resume(&pci_priv->pci_dev->dev); 6098 6099 if (!pci_priv->is_smmu_fault) 6100 cnss_pci_mhi_reg_dump(pci_priv); 6101 6102 /* If link is still down here, directly trigger link down recovery */ 6103 ret = cnss_pci_check_link_status(pci_priv); 6104 if (ret) { 6105 cnss_pci_link_down(&pci_priv->pci_dev->dev); 6106 cnss_pci_pm_runtime_mark_last_busy(pci_priv); 6107 cnss_pci_pm_runtime_put_autosuspend(pci_priv, RTPM_ID_CNSS); 6108 return 0; 6109 } 6110 6111 /* 6112 * Fist try MHI SYS_ERR, if fails try HOST SOL and return. 6113 * If SOL is not enabled try HOST Reset Rquest after MHI 6114 * SYS_ERRR fails. 6115 */ 6116 ret = cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_TRIGGER_RDDM); 6117 if (ret) { 6118 if (pci_priv->is_smmu_fault) { 6119 cnss_pci_mhi_reg_dump(pci_priv); 6120 pci_priv->is_smmu_fault = false; 6121 } 6122 if (!test_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state) || 6123 test_bit(CNSS_MHI_POWERING_OFF, &pci_priv->mhi_state)) { 6124 cnss_pr_dbg("MHI is not powered on, ignore RDDM failure\n"); 6125 cnss_pci_pm_runtime_mark_last_busy(pci_priv); 6126 cnss_pci_pm_runtime_put_autosuspend(pci_priv, RTPM_ID_CNSS); 6127 return 0; 6128 } 6129 cnss_fatal_err("Failed to trigger RDDM, err = %d\n", ret); 6130 if (!cnss_pci_assert_host_sol(pci_priv)) { 6131 cnss_pci_pm_runtime_mark_last_busy(pci_priv); 6132 cnss_pci_pm_runtime_put_autosuspend(pci_priv, RTPM_ID_CNSS); 6133 return 0; 6134 } 6135 6136 cnss_pr_dbg("Sending Host Reset Req\n"); 6137 if (!cnss_mhi_force_reset(pci_priv)) { 6138 ret = 0; 6139 goto mhi_reg_dump; 6140 } 6141 6142 cnss_pci_dump_debug_reg(pci_priv); 6143 cnss_schedule_recovery(&pci_priv->pci_dev->dev, 6144 CNSS_REASON_DEFAULT); 6145 ret = 0; 6146 goto runtime_pm_put; 6147 } 6148 6149 mhi_reg_dump: 6150 if (pci_priv->is_smmu_fault) { 6151 cnss_pci_mhi_reg_dump(pci_priv); 6152 pci_priv->is_smmu_fault = false; 6153 } 6154 6155 if (!test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state)) { 6156 mod_timer(&pci_priv->dev_rddm_timer, 6157 jiffies + msecs_to_jiffies(DEV_RDDM_TIMEOUT)); 6158 } 6159 6160 runtime_pm_put: 6161 cnss_pci_pm_runtime_mark_last_busy(pci_priv); 6162 cnss_pci_pm_runtime_put_autosuspend(pci_priv, RTPM_ID_CNSS); 6163 return ret; 6164 } 6165 6166 static void cnss_pci_add_dump_seg(struct cnss_pci_data *pci_priv, 6167 struct cnss_dump_seg *dump_seg, 6168 enum cnss_fw_dump_type type, int seg_no, 6169 void *va, dma_addr_t dma, size_t size) 6170 { 6171 struct cnss_plat_data *plat_priv = pci_priv->plat_priv; 6172 struct device *dev = &pci_priv->pci_dev->dev; 6173 phys_addr_t pa; 6174 6175 dump_seg->address = dma; 6176 dump_seg->v_address = va; 6177 dump_seg->size = size; 6178 dump_seg->type = type; 6179 6180 cnss_pr_dbg("Seg: %x, va: %pK, dma: %pa, size: 0x%zx\n", 6181 seg_no, va, &dma, size); 6182 6183 if (type == CNSS_FW_CAL || cnss_va_to_pa(dev, size, va, dma, &pa, DMA_ATTR_FORCE_CONTIGUOUS)) 6184 return; 6185 6186 cnss_minidump_add_region(plat_priv, type, seg_no, va, pa, size); 6187 } 6188 6189 static void cnss_pci_remove_dump_seg(struct cnss_pci_data *pci_priv, 6190 struct cnss_dump_seg *dump_seg, 6191 enum cnss_fw_dump_type type, int seg_no, 6192 void *va, dma_addr_t dma, size_t size) 6193 { 6194 struct cnss_plat_data *plat_priv = pci_priv->plat_priv; 6195 struct device *dev = &pci_priv->pci_dev->dev; 6196 phys_addr_t pa; 6197 6198 cnss_va_to_pa(dev, size, va, dma, &pa, DMA_ATTR_FORCE_CONTIGUOUS); 6199 cnss_minidump_remove_region(plat_priv, type, seg_no, va, pa, size); 6200 } 6201 6202 int cnss_pci_call_driver_uevent(struct cnss_pci_data *pci_priv, 6203 enum cnss_driver_status status, void *data) 6204 { 6205 struct cnss_uevent_data uevent_data; 6206 struct cnss_wlan_driver *driver_ops; 6207 6208 driver_ops = pci_priv->driver_ops; 6209 if (!driver_ops || !driver_ops->update_event) { 6210 cnss_pr_dbg("Hang event driver ops is NULL\n"); 6211 return -EINVAL; 6212 } 6213 6214 cnss_pr_dbg("Calling driver uevent: %d\n", status); 6215 6216 uevent_data.status = status; 6217 uevent_data.data = data; 6218 6219 return driver_ops->update_event(pci_priv->pci_dev, &uevent_data); 6220 } 6221 6222 static void cnss_pci_send_hang_event(struct cnss_pci_data *pci_priv) 6223 { 6224 struct cnss_plat_data *plat_priv = pci_priv->plat_priv; 6225 struct cnss_fw_mem *fw_mem = plat_priv->fw_mem; 6226 struct cnss_hang_event hang_event; 6227 void *hang_data_va = NULL; 6228 u64 offset = 0; 6229 u16 length = 0; 6230 int i = 0; 6231 6232 if (!fw_mem || !plat_priv->fw_mem_seg_len) 6233 return; 6234 6235 memset(&hang_event, 0, sizeof(hang_event)); 6236 switch (pci_priv->device_id) { 6237 case QCA6390_DEVICE_ID: 6238 offset = HST_HANG_DATA_OFFSET; 6239 length = HANG_DATA_LENGTH; 6240 break; 6241 case QCA6490_DEVICE_ID: 6242 /* Fallback to hard-coded values if hang event params not 6243 * present in QMI. Once all the firmware branches have the 6244 * fix to send params over QMI, this can be removed. 6245 */ 6246 if (plat_priv->hang_event_data_len) { 6247 offset = plat_priv->hang_data_addr_offset; 6248 length = plat_priv->hang_event_data_len; 6249 } else { 6250 offset = HSP_HANG_DATA_OFFSET; 6251 length = HANG_DATA_LENGTH; 6252 } 6253 break; 6254 case KIWI_DEVICE_ID: 6255 case MANGO_DEVICE_ID: 6256 case PEACH_DEVICE_ID: 6257 offset = plat_priv->hang_data_addr_offset; 6258 length = plat_priv->hang_event_data_len; 6259 break; 6260 case QCN7605_DEVICE_ID: 6261 offset = GNO_HANG_DATA_OFFSET; 6262 length = HANG_DATA_LENGTH; 6263 break; 6264 default: 6265 cnss_pr_err("Skip Hang Event Data as unsupported Device ID received: %d\n", 6266 pci_priv->device_id); 6267 return; 6268 } 6269 6270 for (i = 0; i < plat_priv->fw_mem_seg_len; i++) { 6271 if (fw_mem[i].type == QMI_WLFW_MEM_TYPE_DDR_V01 && 6272 fw_mem[i].va) { 6273 /* The offset must be < (fw_mem size- hangdata length) */ 6274 if (!(offset <= fw_mem[i].size - length)) 6275 goto exit; 6276 6277 hang_data_va = fw_mem[i].va + offset; 6278 hang_event.hang_event_data = kmemdup(hang_data_va, 6279 length, 6280 GFP_ATOMIC); 6281 if (!hang_event.hang_event_data) { 6282 cnss_pr_dbg("Hang data memory alloc failed\n"); 6283 return; 6284 } 6285 hang_event.hang_event_data_len = length; 6286 break; 6287 } 6288 } 6289 6290 cnss_pci_call_driver_uevent(pci_priv, CNSS_HANG_EVENT, &hang_event); 6291 6292 kfree(hang_event.hang_event_data); 6293 hang_event.hang_event_data = NULL; 6294 return; 6295 exit: 6296 cnss_pr_dbg("Invalid hang event params, offset:0x%x, length:0x%x\n", 6297 plat_priv->hang_data_addr_offset, 6298 plat_priv->hang_event_data_len); 6299 } 6300 6301 #ifdef CONFIG_CNSS2_SSR_DRIVER_DUMP 6302 void cnss_pci_collect_host_dump_info(struct cnss_pci_data *pci_priv) 6303 { 6304 struct cnss_ssr_driver_dump_entry *ssr_entry; 6305 struct cnss_plat_data *plat_priv = pci_priv->plat_priv; 6306 size_t num_entries_loaded = 0; 6307 int x; 6308 int ret = -1; 6309 6310 ssr_entry = kmalloc(sizeof(*ssr_entry) * CNSS_HOST_DUMP_TYPE_MAX, GFP_KERNEL); 6311 if (!ssr_entry) { 6312 cnss_pr_err("ssr_entry malloc failed"); 6313 return; 6314 } 6315 6316 if (pci_priv->driver_ops && 6317 pci_priv->driver_ops->collect_driver_dump) { 6318 ret = pci_priv->driver_ops->collect_driver_dump(pci_priv->pci_dev, 6319 ssr_entry, 6320 &num_entries_loaded); 6321 } 6322 6323 if (!ret) { 6324 for (x = 0; x < num_entries_loaded; x++) { 6325 cnss_pr_info("Idx:%d, ptr: %p, name: %s, size: %d\n", 6326 x, ssr_entry[x].buffer_pointer, 6327 ssr_entry[x].region_name, 6328 ssr_entry[x].buffer_size); 6329 } 6330 6331 cnss_do_host_ramdump(plat_priv, ssr_entry, num_entries_loaded); 6332 } else { 6333 cnss_pr_info("Host SSR elf dump collection feature disabled\n"); 6334 } 6335 6336 kfree(ssr_entry); 6337 } 6338 #endif 6339 6340 void cnss_pci_collect_dump_info(struct cnss_pci_data *pci_priv, bool in_panic) 6341 { 6342 struct cnss_plat_data *plat_priv = pci_priv->plat_priv; 6343 struct cnss_dump_data *dump_data = 6344 &plat_priv->ramdump_info_v2.dump_data; 6345 struct cnss_dump_seg *dump_seg = 6346 plat_priv->ramdump_info_v2.dump_data_vaddr; 6347 struct image_info *fw_image, *rddm_image; 6348 struct cnss_fw_mem *fw_mem = plat_priv->fw_mem; 6349 int ret, i, j; 6350 6351 if (test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state) && 6352 !test_bit(CNSS_IN_PANIC, &plat_priv->driver_state)) 6353 cnss_pci_send_hang_event(pci_priv); 6354 6355 if (test_bit(CNSS_MHI_RDDM_DONE, &pci_priv->mhi_state)) { 6356 cnss_pr_dbg("RAM dump is already collected, skip\n"); 6357 return; 6358 } 6359 6360 if (!cnss_is_device_powered_on(plat_priv)) { 6361 cnss_pr_dbg("Device is already powered off, skip\n"); 6362 return; 6363 } 6364 6365 if (!in_panic) { 6366 mutex_lock(&pci_priv->bus_lock); 6367 ret = cnss_pci_check_link_status(pci_priv); 6368 if (ret) { 6369 if (ret != -EACCES) { 6370 mutex_unlock(&pci_priv->bus_lock); 6371 return; 6372 } 6373 if (cnss_pci_resume_bus(pci_priv)) { 6374 mutex_unlock(&pci_priv->bus_lock); 6375 return; 6376 } 6377 } 6378 mutex_unlock(&pci_priv->bus_lock); 6379 } else { 6380 if (cnss_pci_check_link_status(pci_priv)) 6381 return; 6382 /* Inside panic handler, reduce timeout for RDDM to avoid 6383 * unnecessary hypervisor watchdog bite. 6384 */ 6385 pci_priv->mhi_ctrl->timeout_ms /= 2; 6386 } 6387 6388 cnss_mhi_debug_reg_dump(pci_priv); 6389 cnss_pci_bhi_debug_reg_dump(pci_priv); 6390 cnss_pci_soc_scratch_reg_dump(pci_priv); 6391 cnss_pci_dump_misc_reg(pci_priv); 6392 cnss_rddm_trigger_debug(pci_priv); 6393 ret = mhi_download_rddm_image(pci_priv->mhi_ctrl, in_panic); 6394 if (ret) { 6395 cnss_fatal_err("Failed to download RDDM image, err = %d\n", 6396 ret); 6397 if (!cnss_pci_assert_host_sol(pci_priv)) 6398 return; 6399 cnss_rddm_trigger_check(pci_priv); 6400 cnss_pci_dump_debug_reg(pci_priv); 6401 return; 6402 } 6403 cnss_rddm_trigger_check(pci_priv); 6404 fw_image = pci_priv->mhi_ctrl->fbc_image; 6405 rddm_image = pci_priv->mhi_ctrl->rddm_image; 6406 dump_data->nentries = 0; 6407 6408 if (plat_priv->qdss_mem_seg_len) 6409 cnss_pci_dump_qdss_reg(pci_priv); 6410 cnss_mhi_dump_sfr(pci_priv); 6411 6412 if (!dump_seg) { 6413 cnss_pr_warn("FW image dump collection not setup"); 6414 goto skip_dump; 6415 } 6416 6417 cnss_pr_dbg("Collect FW image dump segment, nentries %d\n", 6418 fw_image->entries); 6419 6420 for (i = 0; i < fw_image->entries; i++) { 6421 cnss_pci_add_dump_seg(pci_priv, dump_seg, CNSS_FW_IMAGE, i, 6422 fw_image->mhi_buf[i].buf, 6423 fw_image->mhi_buf[i].dma_addr, 6424 fw_image->mhi_buf[i].len); 6425 dump_seg++; 6426 } 6427 6428 dump_data->nentries += fw_image->entries; 6429 6430 cnss_pr_dbg("Collect RDDM image dump segment, nentries %d\n", 6431 rddm_image->entries); 6432 6433 for (i = 0; i < rddm_image->entries; i++) { 6434 cnss_pci_add_dump_seg(pci_priv, dump_seg, CNSS_FW_RDDM, i, 6435 rddm_image->mhi_buf[i].buf, 6436 rddm_image->mhi_buf[i].dma_addr, 6437 rddm_image->mhi_buf[i].len); 6438 dump_seg++; 6439 } 6440 6441 dump_data->nentries += rddm_image->entries; 6442 6443 for (i = 0, j = 0; i < plat_priv->fw_mem_seg_len; i++) { 6444 if (fw_mem[i].type == CNSS_MEM_TYPE_DDR) { 6445 if (fw_mem[i].attrs & DMA_ATTR_FORCE_CONTIGUOUS) { 6446 cnss_pr_dbg("Collect remote heap dump segment\n"); 6447 cnss_pci_add_dump_seg(pci_priv, dump_seg, 6448 CNSS_FW_REMOTE_HEAP, j, 6449 fw_mem[i].va, 6450 fw_mem[i].pa, 6451 fw_mem[i].size); 6452 dump_seg++; 6453 dump_data->nentries++; 6454 j++; 6455 } else { 6456 cnss_pr_dbg("Skip remote heap dumps as it is non-contiguous\n"); 6457 } 6458 } else if (fw_mem[i].type == CNSS_MEM_CAL_V01) { 6459 cnss_pr_dbg("Collect CAL memory dump segment\n"); 6460 cnss_pci_add_dump_seg(pci_priv, dump_seg, 6461 CNSS_FW_CAL, j, 6462 fw_mem[i].va, 6463 fw_mem[i].pa, 6464 fw_mem[i].size); 6465 dump_seg++; 6466 dump_data->nentries++; 6467 j++; 6468 } 6469 } 6470 6471 if (dump_data->nentries > 0) 6472 plat_priv->ramdump_info_v2.dump_data_valid = true; 6473 6474 cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_RDDM_DONE); 6475 6476 skip_dump: 6477 complete(&plat_priv->rddm_complete); 6478 } 6479 6480 void cnss_pci_clear_dump_info(struct cnss_pci_data *pci_priv) 6481 { 6482 struct cnss_plat_data *plat_priv = pci_priv->plat_priv; 6483 struct cnss_dump_seg *dump_seg = 6484 plat_priv->ramdump_info_v2.dump_data_vaddr; 6485 struct image_info *fw_image, *rddm_image; 6486 struct cnss_fw_mem *fw_mem = plat_priv->fw_mem; 6487 int i, j; 6488 6489 if (!dump_seg) 6490 return; 6491 6492 fw_image = pci_priv->mhi_ctrl->fbc_image; 6493 rddm_image = pci_priv->mhi_ctrl->rddm_image; 6494 6495 for (i = 0; i < fw_image->entries; i++) { 6496 cnss_pci_remove_dump_seg(pci_priv, dump_seg, CNSS_FW_IMAGE, i, 6497 fw_image->mhi_buf[i].buf, 6498 fw_image->mhi_buf[i].dma_addr, 6499 fw_image->mhi_buf[i].len); 6500 dump_seg++; 6501 } 6502 6503 for (i = 0; i < rddm_image->entries; i++) { 6504 cnss_pci_remove_dump_seg(pci_priv, dump_seg, CNSS_FW_RDDM, i, 6505 rddm_image->mhi_buf[i].buf, 6506 rddm_image->mhi_buf[i].dma_addr, 6507 rddm_image->mhi_buf[i].len); 6508 dump_seg++; 6509 } 6510 6511 for (i = 0, j = 0; i < plat_priv->fw_mem_seg_len; i++) { 6512 if (fw_mem[i].type == CNSS_MEM_TYPE_DDR && 6513 (fw_mem[i].attrs & DMA_ATTR_FORCE_CONTIGUOUS)) { 6514 cnss_pci_remove_dump_seg(pci_priv, dump_seg, 6515 CNSS_FW_REMOTE_HEAP, j, 6516 fw_mem[i].va, fw_mem[i].pa, 6517 fw_mem[i].size); 6518 dump_seg++; 6519 j++; 6520 } else if (fw_mem[i].type == CNSS_MEM_CAL_V01) { 6521 cnss_pci_remove_dump_seg(pci_priv, dump_seg, 6522 CNSS_FW_CAL, j, 6523 fw_mem[i].va, fw_mem[i].pa, 6524 fw_mem[i].size); 6525 dump_seg++; 6526 j++; 6527 } 6528 } 6529 6530 plat_priv->ramdump_info_v2.dump_data.nentries = 0; 6531 plat_priv->ramdump_info_v2.dump_data_valid = false; 6532 } 6533 6534 void cnss_pci_device_crashed(struct cnss_pci_data *pci_priv) 6535 { 6536 struct cnss_plat_data *plat_priv; 6537 6538 if (!pci_priv) { 6539 cnss_pr_err("pci_priv is NULL\n"); 6540 return; 6541 } 6542 6543 plat_priv = pci_priv->plat_priv; 6544 if (!plat_priv) { 6545 cnss_pr_err("plat_priv is NULL\n"); 6546 return; 6547 } 6548 6549 if (plat_priv->recovery_enabled) 6550 cnss_pci_collect_host_dump_info(pci_priv); 6551 6552 /* Call recovery handler in the DRIVER_RECOVERY event context 6553 * instead of scheduling work. In that way complete recovery 6554 * will be done as part of DRIVER_RECOVERY event and get 6555 * serialized with other events. 6556 */ 6557 cnss_recovery_handler(plat_priv); 6558 } 6559 6560 static int cnss_mhi_pm_runtime_get(struct mhi_controller *mhi_ctrl) 6561 { 6562 struct cnss_pci_data *pci_priv = dev_get_drvdata(mhi_ctrl->cntrl_dev); 6563 6564 return cnss_pci_pm_runtime_get(pci_priv, RTPM_ID_MHI); 6565 } 6566 6567 static void cnss_mhi_pm_runtime_put_noidle(struct mhi_controller *mhi_ctrl) 6568 { 6569 struct cnss_pci_data *pci_priv = dev_get_drvdata(mhi_ctrl->cntrl_dev); 6570 6571 cnss_pci_pm_runtime_put_noidle(pci_priv, RTPM_ID_MHI); 6572 } 6573 6574 void cnss_pci_add_fw_prefix_name(struct cnss_pci_data *pci_priv, 6575 char *prefix_name, char *name) 6576 { 6577 struct cnss_plat_data *plat_priv; 6578 6579 if (!pci_priv) 6580 return; 6581 6582 plat_priv = pci_priv->plat_priv; 6583 6584 if (!plat_priv->use_fw_path_with_prefix) { 6585 scnprintf(prefix_name, MAX_FIRMWARE_NAME_LEN, "%s", name); 6586 return; 6587 } 6588 6589 switch (pci_priv->device_id) { 6590 case QCN7605_DEVICE_ID: 6591 scnprintf(prefix_name, MAX_FIRMWARE_NAME_LEN, 6592 QCN7605_PATH_PREFIX "%s", name); 6593 break; 6594 case QCA6390_DEVICE_ID: 6595 scnprintf(prefix_name, MAX_FIRMWARE_NAME_LEN, 6596 QCA6390_PATH_PREFIX "%s", name); 6597 break; 6598 case QCA6490_DEVICE_ID: 6599 scnprintf(prefix_name, MAX_FIRMWARE_NAME_LEN, 6600 QCA6490_PATH_PREFIX "%s", name); 6601 break; 6602 case KIWI_DEVICE_ID: 6603 scnprintf(prefix_name, MAX_FIRMWARE_NAME_LEN, 6604 KIWI_PATH_PREFIX "%s", name); 6605 break; 6606 case MANGO_DEVICE_ID: 6607 scnprintf(prefix_name, MAX_FIRMWARE_NAME_LEN, 6608 MANGO_PATH_PREFIX "%s", name); 6609 break; 6610 case PEACH_DEVICE_ID: 6611 scnprintf(prefix_name, MAX_FIRMWARE_NAME_LEN, 6612 PEACH_PATH_PREFIX "%s", name); 6613 break; 6614 default: 6615 scnprintf(prefix_name, MAX_FIRMWARE_NAME_LEN, "%s", name); 6616 break; 6617 } 6618 6619 cnss_pr_dbg("FW name added with prefix: %s\n", prefix_name); 6620 } 6621 6622 static int cnss_pci_update_fw_name(struct cnss_pci_data *pci_priv) 6623 { 6624 struct cnss_plat_data *plat_priv = pci_priv->plat_priv; 6625 6626 switch (pci_priv->device_id) { 6627 case QCA6390_DEVICE_ID: 6628 if (plat_priv->device_version.major_version < FW_V2_NUMBER) { 6629 cnss_pr_dbg("Device ID:version (0x%lx:%d) is not supported\n", 6630 pci_priv->device_id, 6631 plat_priv->device_version.major_version); 6632 return -EINVAL; 6633 } 6634 cnss_pci_add_fw_prefix_name(pci_priv, plat_priv->firmware_name, 6635 FW_V2_FILE_NAME); 6636 snprintf(plat_priv->fw_fallback_name, MAX_FIRMWARE_NAME_LEN, 6637 FW_V2_FILE_NAME); 6638 break; 6639 case QCA6490_DEVICE_ID: 6640 case KIWI_DEVICE_ID: 6641 case MANGO_DEVICE_ID: 6642 case PEACH_DEVICE_ID: 6643 switch (plat_priv->device_version.major_version) { 6644 case FW_V2_NUMBER: 6645 cnss_pci_add_fw_prefix_name(pci_priv, 6646 plat_priv->firmware_name, 6647 FW_V2_FILE_NAME); 6648 snprintf(plat_priv->fw_fallback_name, 6649 MAX_FIRMWARE_NAME_LEN, 6650 FW_V2_FILE_NAME); 6651 break; 6652 default: 6653 cnss_pci_add_fw_prefix_name(pci_priv, 6654 plat_priv->firmware_name, 6655 DEFAULT_FW_FILE_NAME); 6656 snprintf(plat_priv->fw_fallback_name, 6657 MAX_FIRMWARE_NAME_LEN, 6658 DEFAULT_FW_FILE_NAME); 6659 break; 6660 } 6661 break; 6662 default: 6663 cnss_pci_add_fw_prefix_name(pci_priv, plat_priv->firmware_name, 6664 DEFAULT_FW_FILE_NAME); 6665 snprintf(plat_priv->fw_fallback_name, MAX_FIRMWARE_NAME_LEN, 6666 DEFAULT_FW_FILE_NAME); 6667 break; 6668 } 6669 6670 cnss_pr_dbg("FW name is %s, FW fallback name is %s\n", 6671 plat_priv->firmware_name, plat_priv->fw_fallback_name); 6672 6673 return 0; 6674 } 6675 6676 static char *cnss_mhi_notify_status_to_str(enum mhi_callback status) 6677 { 6678 switch (status) { 6679 case MHI_CB_IDLE: 6680 return "IDLE"; 6681 case MHI_CB_EE_RDDM: 6682 return "RDDM"; 6683 case MHI_CB_SYS_ERROR: 6684 return "SYS_ERROR"; 6685 case MHI_CB_FATAL_ERROR: 6686 return "FATAL_ERROR"; 6687 case MHI_CB_EE_MISSION_MODE: 6688 return "MISSION_MODE"; 6689 #if IS_ENABLED(CONFIG_MHI_BUS_MISC) && \ 6690 (LINUX_VERSION_CODE < KERNEL_VERSION(6, 2, 0)) 6691 case MHI_CB_FALLBACK_IMG: 6692 return "FW_FALLBACK"; 6693 #endif 6694 default: 6695 return "UNKNOWN"; 6696 } 6697 }; 6698 6699 static void cnss_dev_rddm_timeout_hdlr(struct timer_list *t) 6700 { 6701 struct cnss_pci_data *pci_priv = 6702 from_timer(pci_priv, t, dev_rddm_timer); 6703 enum mhi_ee_type mhi_ee; 6704 6705 if (!pci_priv) 6706 return; 6707 6708 cnss_fatal_err("Timeout waiting for RDDM notification\n"); 6709 6710 mhi_ee = mhi_get_exec_env(pci_priv->mhi_ctrl); 6711 if (mhi_ee == MHI_EE_PBL) 6712 cnss_pr_err("Device MHI EE is PBL, unable to collect dump\n"); 6713 6714 if (mhi_ee == MHI_EE_RDDM) { 6715 cnss_pr_info("Device MHI EE is RDDM, try to collect dump\n"); 6716 cnss_schedule_recovery(&pci_priv->pci_dev->dev, 6717 CNSS_REASON_RDDM); 6718 } else { 6719 if (!cnss_pci_assert_host_sol(pci_priv)) 6720 return; 6721 cnss_mhi_debug_reg_dump(pci_priv); 6722 cnss_pci_bhi_debug_reg_dump(pci_priv); 6723 cnss_pci_soc_scratch_reg_dump(pci_priv); 6724 cnss_schedule_recovery(&pci_priv->pci_dev->dev, 6725 CNSS_REASON_TIMEOUT); 6726 } 6727 } 6728 6729 static void cnss_boot_debug_timeout_hdlr(struct timer_list *t) 6730 { 6731 struct cnss_pci_data *pci_priv = 6732 from_timer(pci_priv, t, boot_debug_timer); 6733 6734 if (!pci_priv) 6735 return; 6736 6737 if (cnss_pci_check_link_status(pci_priv)) 6738 return; 6739 6740 if (cnss_pci_is_device_down(&pci_priv->pci_dev->dev)) 6741 return; 6742 6743 if (test_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state)) 6744 return; 6745 6746 if (cnss_mhi_scan_rddm_cookie(pci_priv, DEVICE_RDDM_COOKIE)) 6747 return; 6748 6749 cnss_pr_dbg("Dump MHI/PBL/SBL debug data every %ds during MHI power on\n", 6750 BOOT_DEBUG_TIMEOUT_MS / 1000); 6751 cnss_mhi_debug_reg_dump(pci_priv); 6752 cnss_pci_bhi_debug_reg_dump(pci_priv); 6753 cnss_pci_soc_scratch_reg_dump(pci_priv); 6754 cnss_pci_dump_bl_sram_mem(pci_priv); 6755 6756 mod_timer(&pci_priv->boot_debug_timer, 6757 jiffies + msecs_to_jiffies(BOOT_DEBUG_TIMEOUT_MS)); 6758 } 6759 6760 static int cnss_pci_handle_mhi_sys_err(struct cnss_pci_data *pci_priv) 6761 { 6762 struct cnss_plat_data *plat_priv = pci_priv->plat_priv; 6763 6764 cnss_ignore_qmi_failure(true); 6765 set_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state); 6766 del_timer(&plat_priv->fw_boot_timer); 6767 reinit_completion(&pci_priv->wake_event_complete); 6768 mod_timer(&pci_priv->dev_rddm_timer, 6769 jiffies + msecs_to_jiffies(DEV_RDDM_TIMEOUT)); 6770 cnss_pci_update_status(pci_priv, CNSS_FW_DOWN); 6771 6772 return 0; 6773 } 6774 6775 int cnss_pci_handle_dev_sol_irq(struct cnss_pci_data *pci_priv) 6776 { 6777 return cnss_pci_handle_mhi_sys_err(pci_priv); 6778 } 6779 6780 static void cnss_mhi_notify_status(struct mhi_controller *mhi_ctrl, 6781 enum mhi_callback reason) 6782 { 6783 struct cnss_pci_data *pci_priv = dev_get_drvdata(mhi_ctrl->cntrl_dev); 6784 struct cnss_plat_data *plat_priv; 6785 enum cnss_recovery_reason cnss_reason; 6786 6787 if (!pci_priv) { 6788 cnss_pr_err("pci_priv is NULL"); 6789 return; 6790 } 6791 6792 plat_priv = pci_priv->plat_priv; 6793 6794 if (reason != MHI_CB_IDLE) 6795 cnss_pr_dbg("MHI status cb is called with reason %s(%d)\n", 6796 cnss_mhi_notify_status_to_str(reason), reason); 6797 6798 switch (reason) { 6799 case MHI_CB_IDLE: 6800 case MHI_CB_EE_MISSION_MODE: 6801 return; 6802 case MHI_CB_FATAL_ERROR: 6803 cnss_ignore_qmi_failure(true); 6804 set_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state); 6805 del_timer(&plat_priv->fw_boot_timer); 6806 cnss_pci_update_status(pci_priv, CNSS_FW_DOWN); 6807 cnss_reason = CNSS_REASON_DEFAULT; 6808 break; 6809 case MHI_CB_SYS_ERROR: 6810 cnss_pci_handle_mhi_sys_err(pci_priv); 6811 return; 6812 case MHI_CB_EE_RDDM: 6813 cnss_ignore_qmi_failure(true); 6814 set_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state); 6815 del_timer(&plat_priv->fw_boot_timer); 6816 del_timer(&pci_priv->dev_rddm_timer); 6817 cnss_pci_update_status(pci_priv, CNSS_FW_DOWN); 6818 cnss_reason = CNSS_REASON_RDDM; 6819 break; 6820 #if IS_ENABLED(CONFIG_MHI_BUS_MISC) && \ 6821 (LINUX_VERSION_CODE < KERNEL_VERSION(6, 2, 0)) 6822 case MHI_CB_FALLBACK_IMG: 6823 plat_priv->use_fw_path_with_prefix = false; 6824 cnss_pci_update_fw_name(pci_priv); 6825 return; 6826 #endif 6827 6828 default: 6829 cnss_pr_err("Unsupported MHI status cb reason: %d\n", reason); 6830 return; 6831 } 6832 6833 cnss_schedule_recovery(&pci_priv->pci_dev->dev, cnss_reason); 6834 } 6835 6836 static int cnss_pci_get_mhi_msi(struct cnss_pci_data *pci_priv) 6837 { 6838 int ret, num_vectors, i; 6839 u32 user_base_data, base_vector; 6840 int *irq; 6841 unsigned int msi_data; 6842 bool is_one_msi = false; 6843 6844 ret = cnss_get_user_msi_assignment(&pci_priv->pci_dev->dev, 6845 MHI_MSI_NAME, &num_vectors, 6846 &user_base_data, &base_vector); 6847 if (ret) 6848 return ret; 6849 6850 if (cnss_pci_is_one_msi(pci_priv)) { 6851 is_one_msi = true; 6852 num_vectors = cnss_pci_get_one_msi_mhi_irq_array_size(pci_priv); 6853 } 6854 cnss_pr_dbg("Number of assigned MSI for MHI is %d, base vector is %d\n", 6855 num_vectors, base_vector); 6856 6857 irq = kcalloc(num_vectors, sizeof(int), GFP_KERNEL); 6858 if (!irq) 6859 return -ENOMEM; 6860 6861 for (i = 0; i < num_vectors; i++) { 6862 msi_data = base_vector; 6863 if (!is_one_msi) 6864 msi_data += i; 6865 irq[i] = cnss_get_msi_irq(&pci_priv->pci_dev->dev, msi_data); 6866 } 6867 6868 pci_priv->mhi_ctrl->irq = irq; 6869 pci_priv->mhi_ctrl->nr_irqs = num_vectors; 6870 6871 return 0; 6872 } 6873 6874 static int cnss_mhi_bw_scale(struct mhi_controller *mhi_ctrl, 6875 struct mhi_link_info *link_info) 6876 { 6877 struct cnss_pci_data *pci_priv = dev_get_drvdata(mhi_ctrl->cntrl_dev); 6878 struct cnss_plat_data *plat_priv = pci_priv->plat_priv; 6879 int ret = 0; 6880 6881 cnss_pr_dbg("Setting link speed:0x%x, width:0x%x\n", 6882 link_info->target_link_speed, 6883 link_info->target_link_width); 6884 6885 /* It has to set target link speed here before setting link bandwidth 6886 * when device requests link speed change. This can avoid setting link 6887 * bandwidth getting rejected if requested link speed is higher than 6888 * current one. 6889 */ 6890 ret = cnss_pci_set_max_link_speed(pci_priv, plat_priv->rc_num, 6891 link_info->target_link_speed); 6892 if (ret) 6893 cnss_pr_err("Failed to set target link speed to 0x%x, err = %d\n", 6894 link_info->target_link_speed, ret); 6895 6896 ret = cnss_pci_set_link_bandwidth(pci_priv, 6897 link_info->target_link_speed, 6898 link_info->target_link_width); 6899 6900 if (ret) { 6901 cnss_pr_err("Failed to set link bandwidth, err = %d\n", ret); 6902 return ret; 6903 } 6904 6905 pci_priv->def_link_speed = link_info->target_link_speed; 6906 pci_priv->def_link_width = link_info->target_link_width; 6907 6908 return 0; 6909 } 6910 6911 static int cnss_mhi_read_reg(struct mhi_controller *mhi_ctrl, 6912 void __iomem *addr, u32 *out) 6913 { 6914 struct cnss_pci_data *pci_priv = dev_get_drvdata(mhi_ctrl->cntrl_dev); 6915 6916 u32 tmp = readl_relaxed(addr); 6917 6918 /* Unexpected value, query the link status */ 6919 if (PCI_INVALID_READ(tmp) && 6920 cnss_pci_check_link_status(pci_priv)) 6921 return -EIO; 6922 6923 *out = tmp; 6924 6925 return 0; 6926 } 6927 6928 static void cnss_mhi_write_reg(struct mhi_controller *mhi_ctrl, 6929 void __iomem *addr, u32 val) 6930 { 6931 writel_relaxed(val, addr); 6932 } 6933 6934 #if IS_ENABLED(CONFIG_MHI_BUS_MISC) 6935 /** 6936 * __cnss_get_mhi_soc_info - Get SoC info before registering mhi controller 6937 * @mhi_ctrl: MHI controller 6938 * 6939 * Return: 0 for success, error code on failure 6940 */ 6941 static inline int __cnss_get_mhi_soc_info(struct mhi_controller *mhi_ctrl) 6942 { 6943 return mhi_get_soc_info(mhi_ctrl); 6944 } 6945 #else 6946 #define SOC_HW_VERSION_OFFS (0x224) 6947 #define SOC_HW_VERSION_FAM_NUM_BMSK (0xF0000000) 6948 #define SOC_HW_VERSION_FAM_NUM_SHFT (28) 6949 #define SOC_HW_VERSION_DEV_NUM_BMSK (0x0FFF0000) 6950 #define SOC_HW_VERSION_DEV_NUM_SHFT (16) 6951 #define SOC_HW_VERSION_MAJOR_VER_BMSK (0x0000FF00) 6952 #define SOC_HW_VERSION_MAJOR_VER_SHFT (8) 6953 #define SOC_HW_VERSION_MINOR_VER_BMSK (0x000000FF) 6954 #define SOC_HW_VERSION_MINOR_VER_SHFT (0) 6955 6956 static int __cnss_get_mhi_soc_info(struct mhi_controller *mhi_ctrl) 6957 { 6958 u32 soc_info; 6959 int ret; 6960 6961 ret = mhi_ctrl->read_reg(mhi_ctrl, 6962 mhi_ctrl->regs + SOC_HW_VERSION_OFFS, 6963 &soc_info); 6964 if (ret) 6965 return ret; 6966 6967 mhi_ctrl->family_number = (soc_info & SOC_HW_VERSION_FAM_NUM_BMSK) >> 6968 SOC_HW_VERSION_FAM_NUM_SHFT; 6969 mhi_ctrl->device_number = (soc_info & SOC_HW_VERSION_DEV_NUM_BMSK) >> 6970 SOC_HW_VERSION_DEV_NUM_SHFT; 6971 mhi_ctrl->major_version = (soc_info & SOC_HW_VERSION_MAJOR_VER_BMSK) >> 6972 SOC_HW_VERSION_MAJOR_VER_SHFT; 6973 mhi_ctrl->minor_version = (soc_info & SOC_HW_VERSION_MINOR_VER_BMSK) >> 6974 SOC_HW_VERSION_MINOR_VER_SHFT; 6975 return 0; 6976 } 6977 #endif 6978 6979 static int cnss_get_mhi_soc_info(struct cnss_plat_data *plat_priv, 6980 struct mhi_controller *mhi_ctrl) 6981 { 6982 int ret = 0; 6983 6984 ret = __cnss_get_mhi_soc_info(mhi_ctrl); 6985 if (ret) { 6986 cnss_pr_err("failed to get mhi soc info, ret %d\n", ret); 6987 goto exit; 6988 } 6989 6990 plat_priv->device_version.family_number = mhi_ctrl->family_number; 6991 plat_priv->device_version.device_number = mhi_ctrl->device_number; 6992 plat_priv->device_version.major_version = mhi_ctrl->major_version; 6993 plat_priv->device_version.minor_version = mhi_ctrl->minor_version; 6994 6995 cnss_pr_dbg("Get device version info, family number: 0x%x, device number: 0x%x, major version: 0x%x, minor version: 0x%x\n", 6996 plat_priv->device_version.family_number, 6997 plat_priv->device_version.device_number, 6998 plat_priv->device_version.major_version, 6999 plat_priv->device_version.minor_version); 7000 7001 /* Only keep lower 4 bits as real device major version */ 7002 plat_priv->device_version.major_version &= DEVICE_MAJOR_VERSION_MASK; 7003 7004 exit: 7005 return ret; 7006 } 7007 7008 static bool cnss_is_tme_supported(struct cnss_pci_data *pci_priv) 7009 { 7010 if (!pci_priv) { 7011 cnss_pr_dbg("pci_priv is NULL"); 7012 return false; 7013 } 7014 7015 switch (pci_priv->device_id) { 7016 case PEACH_DEVICE_ID: 7017 return true; 7018 default: 7019 return false; 7020 } 7021 } 7022 7023 static int cnss_pci_register_mhi(struct cnss_pci_data *pci_priv) 7024 { 7025 int ret = 0; 7026 struct cnss_plat_data *plat_priv = pci_priv->plat_priv; 7027 struct pci_dev *pci_dev = pci_priv->pci_dev; 7028 struct mhi_controller *mhi_ctrl; 7029 phys_addr_t bar_start; 7030 const struct mhi_controller_config *cnss_mhi_config = 7031 &cnss_mhi_config_default; 7032 7033 ret = cnss_qmi_init(plat_priv); 7034 if (ret) 7035 return -EINVAL; 7036 7037 if (pci_priv->device_id == QCA6174_DEVICE_ID) 7038 return 0; 7039 7040 mhi_ctrl = mhi_alloc_controller(); 7041 if (!mhi_ctrl) { 7042 cnss_pr_err("Invalid MHI controller context\n"); 7043 return -EINVAL; 7044 } 7045 7046 pci_priv->mhi_ctrl = mhi_ctrl; 7047 mhi_ctrl->cntrl_dev = &pci_dev->dev; 7048 7049 mhi_ctrl->fw_image = plat_priv->firmware_name; 7050 #if IS_ENABLED(CONFIG_MHI_BUS_MISC) && \ 7051 (LINUX_VERSION_CODE < KERNEL_VERSION(6, 2, 0)) 7052 mhi_ctrl->fallback_fw_image = plat_priv->fw_fallback_name; 7053 #endif 7054 7055 mhi_ctrl->regs = pci_priv->bar; 7056 mhi_ctrl->reg_len = pci_resource_len(pci_priv->pci_dev, PCI_BAR_NUM); 7057 bar_start = pci_resource_start(pci_priv->pci_dev, PCI_BAR_NUM); 7058 cnss_pr_dbg("BAR starts at %pa, length is %x\n", 7059 &bar_start, mhi_ctrl->reg_len); 7060 7061 ret = cnss_pci_get_mhi_msi(pci_priv); 7062 if (ret) { 7063 cnss_pr_err("Failed to get MSI for MHI, err = %d\n", ret); 7064 goto free_mhi_ctrl; 7065 } 7066 7067 if (cnss_pci_is_one_msi(pci_priv)) 7068 mhi_ctrl->irq_flags = IRQF_SHARED | IRQF_NOBALANCING; 7069 7070 if (pci_priv->smmu_s1_enable) { 7071 mhi_ctrl->iova_start = pci_priv->smmu_iova_start; 7072 mhi_ctrl->iova_stop = pci_priv->smmu_iova_start + 7073 pci_priv->smmu_iova_len; 7074 } else { 7075 mhi_ctrl->iova_start = 0; 7076 mhi_ctrl->iova_stop = pci_priv->dma_bit_mask; 7077 } 7078 7079 mhi_ctrl->status_cb = cnss_mhi_notify_status; 7080 mhi_ctrl->runtime_get = cnss_mhi_pm_runtime_get; 7081 mhi_ctrl->runtime_put = cnss_mhi_pm_runtime_put_noidle; 7082 mhi_ctrl->read_reg = cnss_mhi_read_reg; 7083 mhi_ctrl->write_reg = cnss_mhi_write_reg; 7084 7085 mhi_ctrl->rddm_size = pci_priv->plat_priv->ramdump_info_v2.ramdump_size; 7086 if (!mhi_ctrl->rddm_size) 7087 mhi_ctrl->rddm_size = RAMDUMP_SIZE_DEFAULT; 7088 7089 if (plat_priv->device_id == QCN7605_DEVICE_ID) 7090 mhi_ctrl->sbl_size = SZ_256K; 7091 else 7092 mhi_ctrl->sbl_size = SZ_512K; 7093 7094 mhi_ctrl->seg_len = SZ_512K; 7095 mhi_ctrl->fbc_download = true; 7096 7097 ret = cnss_get_mhi_soc_info(plat_priv, mhi_ctrl); 7098 if (ret) 7099 goto free_mhi_irq; 7100 7101 /* Satellite config only supported on KIWI V2 and later chipset */ 7102 if (plat_priv->device_id <= QCA6490_DEVICE_ID || 7103 (plat_priv->device_id == KIWI_DEVICE_ID && 7104 plat_priv->device_version.major_version == 1)) { 7105 if (plat_priv->device_id == QCN7605_DEVICE_ID) 7106 cnss_mhi_config = &cnss_mhi_config_genoa; 7107 else 7108 cnss_mhi_config = &cnss_mhi_config_no_satellite; 7109 } 7110 7111 /* DIAG no longer supported on PEACH and later chipset */ 7112 if (plat_priv->device_id >= PEACH_DEVICE_ID) { 7113 cnss_mhi_config = &cnss_mhi_config_no_diag; 7114 } 7115 7116 mhi_ctrl->tme_supported_image = cnss_is_tme_supported(pci_priv); 7117 7118 ret = mhi_register_controller(mhi_ctrl, cnss_mhi_config); 7119 if (ret) { 7120 cnss_pr_err("Failed to register to MHI bus, err = %d\n", ret); 7121 goto free_mhi_irq; 7122 } 7123 7124 /* MHI satellite driver only needs to connect when DRV is supported */ 7125 if (cnss_pci_get_drv_supported(pci_priv)) 7126 cnss_mhi_controller_set_base(pci_priv, bar_start); 7127 7128 cnss_get_bwscal_info(plat_priv); 7129 cnss_pr_dbg("no_bwscale: %d\n", plat_priv->no_bwscale); 7130 7131 /* BW scale CB needs to be set after registering MHI per requirement */ 7132 if (!plat_priv->no_bwscale) 7133 cnss_mhi_controller_set_bw_scale_cb(pci_priv, 7134 cnss_mhi_bw_scale); 7135 7136 ret = cnss_pci_update_fw_name(pci_priv); 7137 if (ret) 7138 goto unreg_mhi; 7139 7140 return 0; 7141 7142 unreg_mhi: 7143 mhi_unregister_controller(mhi_ctrl); 7144 free_mhi_irq: 7145 kfree(mhi_ctrl->irq); 7146 free_mhi_ctrl: 7147 mhi_free_controller(mhi_ctrl); 7148 7149 return ret; 7150 } 7151 7152 static void cnss_pci_unregister_mhi(struct cnss_pci_data *pci_priv) 7153 { 7154 struct mhi_controller *mhi_ctrl = pci_priv->mhi_ctrl; 7155 7156 if (pci_priv->device_id == QCA6174_DEVICE_ID) 7157 return; 7158 7159 mhi_unregister_controller(mhi_ctrl); 7160 kfree(mhi_ctrl->irq); 7161 mhi_ctrl->irq = NULL; 7162 mhi_free_controller(mhi_ctrl); 7163 pci_priv->mhi_ctrl = NULL; 7164 } 7165 7166 static void cnss_pci_config_regs(struct cnss_pci_data *pci_priv) 7167 { 7168 switch (pci_priv->device_id) { 7169 case QCA6390_DEVICE_ID: 7170 pci_priv->misc_reg_dev_mask = REG_MASK_QCA6390; 7171 pci_priv->wcss_reg = wcss_reg_access_seq; 7172 pci_priv->pcie_reg = pcie_reg_access_seq; 7173 pci_priv->wlaon_reg = wlaon_reg_access_seq; 7174 pci_priv->syspm_reg = syspm_reg_access_seq; 7175 7176 /* Configure WDOG register with specific value so that we can 7177 * know if HW is in the process of WDOG reset recovery or not 7178 * when reading the registers. 7179 */ 7180 cnss_pci_reg_write 7181 (pci_priv, 7182 QCA6390_PCIE_SOC_WDOG_DISC_BAD_DATA_LOW_CFG_SOC_PCIE_REG, 7183 QCA6390_PCIE_SOC_WDOG_DISC_BAD_DATA_LOW_CFG_SOC_PCIE_REG_VAL); 7184 break; 7185 case QCA6490_DEVICE_ID: 7186 pci_priv->misc_reg_dev_mask = REG_MASK_QCA6490; 7187 pci_priv->wlaon_reg = wlaon_reg_access_seq; 7188 break; 7189 default: 7190 return; 7191 } 7192 } 7193 7194 #if !IS_ENABLED(CONFIG_ARCH_QCOM) 7195 static int cnss_pci_of_reserved_mem_device_init(struct cnss_pci_data *pci_priv) 7196 { 7197 return 0; 7198 } 7199 7200 static irqreturn_t cnss_pci_wake_handler(int irq, void *data) 7201 { 7202 struct cnss_pci_data *pci_priv = data; 7203 struct cnss_plat_data *plat_priv = pci_priv->plat_priv; 7204 enum rpm_status status; 7205 struct device *dev; 7206 7207 pci_priv->wake_counter++; 7208 cnss_pr_dbg("WLAN PCI wake IRQ (%u) is asserted #%u\n", 7209 pci_priv->wake_irq, pci_priv->wake_counter); 7210 7211 /* Make sure abort current suspend */ 7212 cnss_pm_stay_awake(plat_priv); 7213 cnss_pm_relax(plat_priv); 7214 /* Above two pm* API calls will abort system suspend only when 7215 * plat_dev->dev->ws is initiated by device_init_wakeup() API, and 7216 * calling pm_system_wakeup() is just to guarantee system suspend 7217 * can be aborted if it is not initiated in any case. 7218 */ 7219 pm_system_wakeup(); 7220 7221 dev = &pci_priv->pci_dev->dev; 7222 status = dev->power.runtime_status; 7223 7224 if ((cnss_pci_get_monitor_wake_intr(pci_priv) && 7225 cnss_pci_get_auto_suspended(pci_priv)) || 7226 (status == RPM_SUSPENDING || status == RPM_SUSPENDED)) { 7227 cnss_pci_set_monitor_wake_intr(pci_priv, false); 7228 cnss_pci_pm_request_resume(pci_priv); 7229 } 7230 7231 return IRQ_HANDLED; 7232 } 7233 7234 /** 7235 * cnss_pci_wake_gpio_init() - Setup PCI wake GPIO for WLAN 7236 * @pci_priv: driver PCI bus context pointer 7237 * 7238 * This function initializes WLAN PCI wake GPIO and corresponding 7239 * interrupt. It should be used in non-MSM platforms whose PCIe 7240 * root complex driver doesn't handle the GPIO. 7241 * 7242 * Return: 0 for success or skip, negative value for error 7243 */ 7244 static int cnss_pci_wake_gpio_init(struct cnss_pci_data *pci_priv) 7245 { 7246 struct cnss_plat_data *plat_priv = pci_priv->plat_priv; 7247 struct device *dev = &plat_priv->plat_dev->dev; 7248 int ret = 0; 7249 7250 pci_priv->wake_gpio = of_get_named_gpio(dev->of_node, 7251 "wlan-pci-wake-gpio", 0); 7252 if (pci_priv->wake_gpio < 0) 7253 goto out; 7254 7255 cnss_pr_dbg("Get PCI wake GPIO (%d) from device node\n", 7256 pci_priv->wake_gpio); 7257 7258 ret = gpio_request(pci_priv->wake_gpio, "wlan_pci_wake_gpio"); 7259 if (ret) { 7260 cnss_pr_err("Failed to request PCI wake GPIO, err = %d\n", 7261 ret); 7262 goto out; 7263 } 7264 7265 gpio_direction_input(pci_priv->wake_gpio); 7266 pci_priv->wake_irq = gpio_to_irq(pci_priv->wake_gpio); 7267 7268 ret = request_irq(pci_priv->wake_irq, cnss_pci_wake_handler, 7269 IRQF_TRIGGER_FALLING, "wlan_pci_wake_irq", pci_priv); 7270 if (ret) { 7271 cnss_pr_err("Failed to request PCI wake IRQ, err = %d\n", ret); 7272 goto free_gpio; 7273 } 7274 7275 ret = enable_irq_wake(pci_priv->wake_irq); 7276 if (ret) { 7277 cnss_pr_err("Failed to enable PCI wake IRQ, err = %d\n", ret); 7278 goto free_irq; 7279 } 7280 7281 return 0; 7282 7283 free_irq: 7284 free_irq(pci_priv->wake_irq, pci_priv); 7285 free_gpio: 7286 gpio_free(pci_priv->wake_gpio); 7287 out: 7288 return ret; 7289 } 7290 7291 static void cnss_pci_wake_gpio_deinit(struct cnss_pci_data *pci_priv) 7292 { 7293 if (pci_priv->wake_gpio < 0) 7294 return; 7295 7296 disable_irq_wake(pci_priv->wake_irq); 7297 free_irq(pci_priv->wake_irq, pci_priv); 7298 gpio_free(pci_priv->wake_gpio); 7299 } 7300 #endif 7301 7302 #ifdef CONFIG_CNSS_SUPPORT_DUAL_DEV 7303 static int cnss_try_suspend(struct cnss_plat_data *plat_priv) 7304 { 7305 int ret = 0; 7306 7307 /* in the dual wlan card case, if call pci_register_driver after 7308 * finishing the first pcie device enumeration, it will cause 7309 * the cnss_pci_probe called in advance with the second wlan card, 7310 * and the sequence like this: 7311 * enter msm_pcie_enumerate -> pci_bus_add_devices -> cnss_pci_probe 7312 * -> exit msm_pcie_enumerate. 7313 * But the correct sequence we expected is like this: 7314 * enter msm_pcie_enumerate -> pci_bus_add_devices -> 7315 * exit msm_pcie_enumerate -> cnss_pci_probe. 7316 * And this unexpected sequence will make the second wlan card do 7317 * pcie link suspend while the pcie enumeration not finished. 7318 * So need to add below logical to avoid doing pcie link suspend 7319 * if the enumeration has not finish. 7320 */ 7321 plat_priv->enumerate_done = true; 7322 7323 /* Now enumeration is finished, try to suspend PCIe link */ 7324 if (plat_priv->bus_priv) { 7325 struct cnss_pci_data *pci_priv = plat_priv->bus_priv; 7326 struct pci_dev *pci_dev = pci_priv->pci_dev; 7327 7328 switch (pci_dev->device) { 7329 case QCA6390_DEVICE_ID: 7330 cnss_pci_set_wlaon_pwr_ctrl(pci_priv, 7331 false, 7332 true, 7333 false); 7334 7335 cnss_pci_suspend_pwroff(pci_dev); 7336 break; 7337 default: 7338 cnss_pr_err("Unknown PCI device found: 0x%x\n", 7339 pci_dev->device); 7340 ret = -ENODEV; 7341 } 7342 } 7343 7344 return ret; 7345 } 7346 #else 7347 static int cnss_try_suspend(struct cnss_plat_data *plat_priv) 7348 { 7349 return 0; 7350 } 7351 #endif 7352 7353 /* Setting to use this cnss_pm_domain ops will let PM framework override the 7354 * ops from dev->bus->pm which is pci_dev_pm_ops from pci-driver.c. This ops 7355 * has to take care everything device driver needed which is currently done 7356 * from pci_dev_pm_ops. 7357 */ 7358 static struct dev_pm_domain cnss_pm_domain = { 7359 .ops = { 7360 SET_SYSTEM_SLEEP_PM_OPS(cnss_pci_suspend, cnss_pci_resume) 7361 SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(cnss_pci_suspend_noirq, 7362 cnss_pci_resume_noirq) 7363 SET_RUNTIME_PM_OPS(cnss_pci_runtime_suspend, 7364 cnss_pci_runtime_resume, 7365 cnss_pci_runtime_idle) 7366 } 7367 }; 7368 7369 static int cnss_pci_get_dev_cfg_node(struct cnss_plat_data *plat_priv) 7370 { 7371 struct device_node *child; 7372 u32 id, i; 7373 int id_n, ret; 7374 7375 if (plat_priv->dt_type != CNSS_DTT_MULTIEXCHG) 7376 return 0; 7377 7378 if (!plat_priv->device_id) { 7379 cnss_pr_err("Invalid device id\n"); 7380 return -EINVAL; 7381 } 7382 7383 for_each_available_child_of_node(plat_priv->plat_dev->dev.of_node, 7384 child) { 7385 if (strcmp(child->name, "chip_cfg")) 7386 continue; 7387 7388 id_n = of_property_count_u32_elems(child, "supported-ids"); 7389 if (id_n <= 0) { 7390 cnss_pr_err("Device id is NOT set\n"); 7391 return -EINVAL; 7392 } 7393 7394 for (i = 0; i < id_n; i++) { 7395 ret = of_property_read_u32_index(child, 7396 "supported-ids", 7397 i, &id); 7398 if (ret) { 7399 cnss_pr_err("Failed to read supported ids\n"); 7400 return -EINVAL; 7401 } 7402 7403 if (id == plat_priv->device_id) { 7404 plat_priv->dev_node = child; 7405 cnss_pr_dbg("got node[%s@%d] for device[0x%x]\n", 7406 child->name, i, id); 7407 return 0; 7408 } 7409 } 7410 } 7411 7412 return -EINVAL; 7413 } 7414 7415 #ifdef CONFIG_CNSS2_CONDITIONAL_POWEROFF 7416 static bool cnss_should_suspend_pwroff(struct pci_dev *pci_dev) 7417 { 7418 bool suspend_pwroff; 7419 7420 switch (pci_dev->device) { 7421 case QCA6390_DEVICE_ID: 7422 case QCA6490_DEVICE_ID: 7423 suspend_pwroff = false; 7424 break; 7425 default: 7426 suspend_pwroff = true; 7427 } 7428 7429 return suspend_pwroff; 7430 } 7431 #else 7432 static bool cnss_should_suspend_pwroff(struct pci_dev *pci_dev) 7433 { 7434 return true; 7435 } 7436 #endif 7437 7438 #ifdef CONFIG_CNSS2_ENUM_WITH_LOW_SPEED 7439 static void 7440 cnss_pci_downgrade_rc_speed(struct cnss_plat_data *plat_priv, u32 rc_num) 7441 { 7442 int ret; 7443 7444 ret = cnss_pci_set_max_link_speed(plat_priv->bus_priv, rc_num, 7445 PCI_EXP_LNKSTA_CLS_2_5GB); 7446 if (ret) 7447 cnss_pr_err("Failed to set max PCIe RC%x link speed to Gen1, err = %d\n", 7448 rc_num, ret); 7449 } 7450 7451 static void 7452 cnss_pci_restore_rc_speed(struct cnss_pci_data *pci_priv) 7453 { 7454 int ret; 7455 struct cnss_plat_data *plat_priv = pci_priv->plat_priv; 7456 7457 /* if not Genoa, do not restore rc speed */ 7458 if (pci_priv->device_id != QCN7605_DEVICE_ID) { 7459 /* The request 0 will reset maximum GEN speed to default */ 7460 ret = cnss_pci_set_max_link_speed(pci_priv, plat_priv->rc_num, 0); 7461 if (ret) 7462 cnss_pr_err("Failed to reset max PCIe RC%x link speed to default, err = %d\n", 7463 plat_priv->rc_num, ret); 7464 } 7465 } 7466 7467 static void 7468 cnss_pci_link_retrain_trigger(struct cnss_pci_data *pci_priv) 7469 { 7470 int ret; 7471 7472 /* suspend/resume will trigger retain to re-establish link speed */ 7473 ret = cnss_suspend_pci_link(pci_priv); 7474 if (ret) 7475 cnss_pr_err("Failed to suspend PCI link, err = %d\n", ret); 7476 7477 ret = cnss_resume_pci_link(pci_priv); 7478 if (ret) 7479 cnss_pr_err("Failed to resume PCI link, err = %d\n", ret); 7480 7481 cnss_pci_get_link_status(pci_priv); 7482 } 7483 #else 7484 static void 7485 cnss_pci_downgrade_rc_speed(struct cnss_plat_data *plat_priv, u32 rc_num) 7486 { 7487 } 7488 7489 static void 7490 cnss_pci_restore_rc_speed(struct cnss_pci_data *pci_priv) 7491 { 7492 } 7493 7494 static void 7495 cnss_pci_link_retrain_trigger(struct cnss_pci_data *pci_priv) 7496 { 7497 } 7498 #endif 7499 7500 static void cnss_pci_suspend_pwroff(struct pci_dev *pci_dev) 7501 { 7502 struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev); 7503 int rc_num = pci_dev->bus->domain_nr; 7504 struct cnss_plat_data *plat_priv; 7505 int ret = 0; 7506 bool suspend_pwroff = cnss_should_suspend_pwroff(pci_dev); 7507 7508 plat_priv = cnss_get_plat_priv_by_rc_num(rc_num); 7509 7510 if (suspend_pwroff) { 7511 ret = cnss_suspend_pci_link(pci_priv); 7512 if (ret) 7513 cnss_pr_err("Failed to suspend PCI link, err = %d\n", 7514 ret); 7515 cnss_power_off_device(plat_priv); 7516 } else { 7517 cnss_pr_dbg("bus suspend and dev power off disabled for device [0x%x]\n", 7518 pci_dev->device); 7519 cnss_pci_link_retrain_trigger(pci_priv); 7520 } 7521 } 7522 7523 static int cnss_pci_probe(struct pci_dev *pci_dev, 7524 const struct pci_device_id *id) 7525 { 7526 int ret = 0; 7527 struct cnss_pci_data *pci_priv; 7528 struct device *dev = &pci_dev->dev; 7529 int rc_num = pci_dev->bus->domain_nr; 7530 struct cnss_plat_data *plat_priv = cnss_get_plat_priv_by_rc_num(rc_num); 7531 7532 cnss_pr_dbg("PCI is probing, vendor ID: 0x%x, device ID: 0x%x rc_num %d\n", 7533 id->vendor, pci_dev->device, rc_num); 7534 if (!plat_priv) { 7535 cnss_pr_err("Find match plat_priv with rc number failure\n"); 7536 ret = -ENODEV; 7537 goto out; 7538 } 7539 7540 pci_priv = devm_kzalloc(dev, sizeof(*pci_priv), GFP_KERNEL); 7541 if (!pci_priv) { 7542 ret = -ENOMEM; 7543 goto out; 7544 } 7545 7546 pci_priv->pci_link_state = PCI_LINK_UP; 7547 pci_priv->plat_priv = plat_priv; 7548 pci_priv->pci_dev = pci_dev; 7549 pci_priv->pci_device_id = id; 7550 pci_priv->device_id = pci_dev->device; 7551 cnss_set_pci_priv(pci_dev, pci_priv); 7552 plat_priv->device_id = pci_dev->device; 7553 plat_priv->bus_priv = pci_priv; 7554 mutex_init(&pci_priv->bus_lock); 7555 if (plat_priv->use_pm_domain) 7556 dev->pm_domain = &cnss_pm_domain; 7557 7558 cnss_pci_restore_rc_speed(pci_priv); 7559 7560 ret = cnss_pci_get_dev_cfg_node(plat_priv); 7561 if (ret) { 7562 cnss_pr_err("Failed to get device cfg node, err = %d\n", ret); 7563 goto reset_ctx; 7564 } 7565 7566 cnss_get_sleep_clk_supported(plat_priv); 7567 7568 ret = cnss_dev_specific_power_on(plat_priv); 7569 if (ret < 0) 7570 goto reset_ctx; 7571 7572 cnss_pci_of_reserved_mem_device_init(pci_priv); 7573 7574 ret = cnss_register_subsys(plat_priv); 7575 if (ret) 7576 goto reset_ctx; 7577 7578 ret = cnss_register_ramdump(plat_priv); 7579 if (ret) 7580 goto unregister_subsys; 7581 7582 ret = cnss_pci_init_smmu(pci_priv); 7583 if (ret) 7584 goto unregister_ramdump; 7585 7586 /* update drv support flag */ 7587 cnss_pci_update_drv_supported(pci_priv); 7588 7589 cnss_update_supported_link_info(pci_priv); 7590 7591 init_completion(&pci_priv->wake_event_complete); 7592 7593 ret = cnss_reg_pci_event(pci_priv); 7594 if (ret) { 7595 cnss_pr_err("Failed to register PCI event, err = %d\n", ret); 7596 goto deinit_smmu; 7597 } 7598 7599 ret = cnss_pci_enable_bus(pci_priv); 7600 if (ret) 7601 goto dereg_pci_event; 7602 7603 ret = cnss_pci_enable_msi(pci_priv); 7604 if (ret) 7605 goto disable_bus; 7606 7607 ret = cnss_pci_register_mhi(pci_priv); 7608 if (ret) 7609 goto disable_msi; 7610 7611 switch (pci_dev->device) { 7612 case QCA6174_DEVICE_ID: 7613 pci_read_config_word(pci_dev, QCA6174_REV_ID_OFFSET, 7614 &pci_priv->revision_id); 7615 break; 7616 case QCA6290_DEVICE_ID: 7617 case QCA6390_DEVICE_ID: 7618 case QCN7605_DEVICE_ID: 7619 case QCA6490_DEVICE_ID: 7620 case KIWI_DEVICE_ID: 7621 case MANGO_DEVICE_ID: 7622 case PEACH_DEVICE_ID: 7623 if ((cnss_is_dual_wlan_enabled() && 7624 plat_priv->enumerate_done) || !cnss_is_dual_wlan_enabled()) 7625 cnss_pci_set_wlaon_pwr_ctrl(pci_priv, false, false, 7626 false); 7627 7628 timer_setup(&pci_priv->dev_rddm_timer, 7629 cnss_dev_rddm_timeout_hdlr, 0); 7630 timer_setup(&pci_priv->boot_debug_timer, 7631 cnss_boot_debug_timeout_hdlr, 0); 7632 INIT_DELAYED_WORK(&pci_priv->time_sync_work, 7633 cnss_pci_time_sync_work_hdlr); 7634 cnss_pci_get_link_status(pci_priv); 7635 cnss_pci_set_wlaon_pwr_ctrl(pci_priv, false, true, false); 7636 cnss_pci_wake_gpio_init(pci_priv); 7637 break; 7638 default: 7639 cnss_pr_err("Unknown PCI device found: 0x%x\n", 7640 pci_dev->device); 7641 ret = -ENODEV; 7642 goto unreg_mhi; 7643 } 7644 7645 cnss_pci_config_regs(pci_priv); 7646 if (EMULATION_HW) 7647 goto out; 7648 if (cnss_is_dual_wlan_enabled() && !plat_priv->enumerate_done) 7649 goto probe_done; 7650 cnss_pci_suspend_pwroff(pci_dev); 7651 7652 probe_done: 7653 set_bit(CNSS_PCI_PROBE_DONE, &plat_priv->driver_state); 7654 7655 return 0; 7656 7657 unreg_mhi: 7658 cnss_pci_unregister_mhi(pci_priv); 7659 disable_msi: 7660 cnss_pci_disable_msi(pci_priv); 7661 disable_bus: 7662 cnss_pci_disable_bus(pci_priv); 7663 dereg_pci_event: 7664 cnss_dereg_pci_event(pci_priv); 7665 deinit_smmu: 7666 cnss_pci_deinit_smmu(pci_priv); 7667 unregister_ramdump: 7668 cnss_unregister_ramdump(plat_priv); 7669 unregister_subsys: 7670 cnss_unregister_subsys(plat_priv); 7671 reset_ctx: 7672 plat_priv->bus_priv = NULL; 7673 out: 7674 return ret; 7675 } 7676 7677 static void cnss_pci_remove(struct pci_dev *pci_dev) 7678 { 7679 struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev); 7680 struct cnss_plat_data *plat_priv = 7681 cnss_bus_dev_to_plat_priv(&pci_dev->dev); 7682 7683 clear_bit(CNSS_PCI_PROBE_DONE, &plat_priv->driver_state); 7684 cnss_pci_unregister_driver_hdlr(pci_priv); 7685 cnss_pci_free_aux_mem(pci_priv); 7686 cnss_pci_free_tme_lite_mem(pci_priv); 7687 cnss_pci_free_tme_opt_file_mem(pci_priv); 7688 cnss_pci_free_m3_mem(pci_priv); 7689 cnss_pci_free_fw_mem(pci_priv); 7690 cnss_pci_free_qdss_mem(pci_priv); 7691 7692 switch (pci_dev->device) { 7693 case QCA6290_DEVICE_ID: 7694 case QCA6390_DEVICE_ID: 7695 case QCN7605_DEVICE_ID: 7696 case QCA6490_DEVICE_ID: 7697 case KIWI_DEVICE_ID: 7698 case MANGO_DEVICE_ID: 7699 case PEACH_DEVICE_ID: 7700 cnss_pci_wake_gpio_deinit(pci_priv); 7701 del_timer(&pci_priv->boot_debug_timer); 7702 del_timer(&pci_priv->dev_rddm_timer); 7703 break; 7704 default: 7705 break; 7706 } 7707 7708 cnss_pci_unregister_mhi(pci_priv); 7709 cnss_pci_disable_msi(pci_priv); 7710 cnss_pci_disable_bus(pci_priv); 7711 cnss_dereg_pci_event(pci_priv); 7712 cnss_pci_deinit_smmu(pci_priv); 7713 if (plat_priv) { 7714 cnss_unregister_ramdump(plat_priv); 7715 cnss_unregister_subsys(plat_priv); 7716 plat_priv->bus_priv = NULL; 7717 } else { 7718 cnss_pr_err("Plat_priv is null, Unable to unregister ramdump,subsys\n"); 7719 } 7720 } 7721 7722 static const struct pci_device_id cnss_pci_id_table[] = { 7723 { QCA6174_VENDOR_ID, QCA6174_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID }, 7724 { QCA6290_VENDOR_ID, QCA6290_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID }, 7725 { QCA6390_VENDOR_ID, QCA6390_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID }, 7726 { QCN7605_VENDOR_ID, QCN7605_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID }, 7727 { QCA6490_VENDOR_ID, QCA6490_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID }, 7728 { KIWI_VENDOR_ID, KIWI_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID }, 7729 { MANGO_VENDOR_ID, MANGO_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID }, 7730 { PEACH_VENDOR_ID, PEACH_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID }, 7731 { 0 } 7732 }; 7733 MODULE_DEVICE_TABLE(pci, cnss_pci_id_table); 7734 7735 static const struct dev_pm_ops cnss_pm_ops = { 7736 SET_SYSTEM_SLEEP_PM_OPS(cnss_pci_suspend, cnss_pci_resume) 7737 SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(cnss_pci_suspend_noirq, 7738 cnss_pci_resume_noirq) 7739 SET_RUNTIME_PM_OPS(cnss_pci_runtime_suspend, cnss_pci_runtime_resume, 7740 cnss_pci_runtime_idle) 7741 }; 7742 7743 static struct pci_driver cnss_pci_driver = { 7744 .name = "cnss_pci", 7745 .id_table = cnss_pci_id_table, 7746 .probe = cnss_pci_probe, 7747 .remove = cnss_pci_remove, 7748 .driver = { 7749 .pm = &cnss_pm_ops, 7750 }, 7751 }; 7752 7753 static int cnss_pci_enumerate(struct cnss_plat_data *plat_priv, u32 rc_num) 7754 { 7755 int ret, retry = 0; 7756 7757 /* Always set initial target PCIe link speed to Gen2 for QCA6490 device 7758 * since there may be link issues if it boots up with Gen3 link speed. 7759 * Device is able to change it later at any time. It will be rejected 7760 * if requested speed is higher than the one specified in PCIe DT. 7761 */ 7762 if (plat_priv->device_id == QCA6490_DEVICE_ID) { 7763 ret = cnss_pci_set_max_link_speed(plat_priv->bus_priv, rc_num, 7764 PCI_EXP_LNKSTA_CLS_5_0GB); 7765 if (ret && ret != -EPROBE_DEFER) 7766 cnss_pr_err("Failed to set max PCIe RC%x link speed to Gen2, err = %d\n", 7767 rc_num, ret); 7768 } else { 7769 cnss_pci_downgrade_rc_speed(plat_priv, rc_num); 7770 } 7771 7772 cnss_pr_dbg("Trying to enumerate with PCIe RC%x\n", rc_num); 7773 retry: 7774 ret = _cnss_pci_enumerate(plat_priv, rc_num); 7775 if (ret) { 7776 if (ret == -EPROBE_DEFER) { 7777 cnss_pr_dbg("PCIe RC driver is not ready, defer probe\n"); 7778 goto out; 7779 } 7780 cnss_pr_err("Failed to enable PCIe RC%x, err = %d\n", 7781 rc_num, ret); 7782 if (retry++ < LINK_TRAINING_RETRY_MAX_TIMES) { 7783 cnss_pr_dbg("Retry PCI link training #%d\n", retry); 7784 goto retry; 7785 } else { 7786 goto out; 7787 } 7788 } 7789 7790 plat_priv->rc_num = rc_num; 7791 7792 out: 7793 return ret; 7794 } 7795 7796 int cnss_pci_init(struct cnss_plat_data *plat_priv) 7797 { 7798 struct device *dev = &plat_priv->plat_dev->dev; 7799 const __be32 *prop; 7800 int ret = 0, prop_len = 0, rc_count, i; 7801 7802 prop = of_get_property(dev->of_node, "qcom,wlan-rc-num", &prop_len); 7803 if (!prop || !prop_len) { 7804 cnss_pr_err("Failed to get PCIe RC number from DT\n"); 7805 goto out; 7806 } 7807 7808 rc_count = prop_len / sizeof(__be32); 7809 for (i = 0; i < rc_count; i++) { 7810 ret = cnss_pci_enumerate(plat_priv, be32_to_cpup(&prop[i])); 7811 if (!ret) 7812 break; 7813 else if (ret == -EPROBE_DEFER || (ret && i == rc_count - 1)) 7814 goto out; 7815 } 7816 7817 ret = cnss_try_suspend(plat_priv); 7818 if (ret) { 7819 cnss_pr_err("Failed to suspend, ret: %d\n", ret); 7820 goto out; 7821 } 7822 7823 if (!cnss_driver_registered) { 7824 ret = pci_register_driver(&cnss_pci_driver); 7825 if (ret) { 7826 cnss_pr_err("Failed to register to PCI framework, err = %d\n", 7827 ret); 7828 goto out; 7829 } 7830 if (!plat_priv->bus_priv) { 7831 cnss_pr_err("Failed to probe PCI driver\n"); 7832 ret = -ENODEV; 7833 goto unreg_pci; 7834 } 7835 cnss_driver_registered = true; 7836 } 7837 7838 return 0; 7839 7840 unreg_pci: 7841 pci_unregister_driver(&cnss_pci_driver); 7842 out: 7843 return ret; 7844 } 7845 7846 void cnss_pci_deinit(struct cnss_plat_data *plat_priv) 7847 { 7848 if (cnss_driver_registered) { 7849 pci_unregister_driver(&cnss_pci_driver); 7850 cnss_driver_registered = false; 7851 } 7852 } 7853