1 /* 2 * Copyright (c) 2013-2020 The Linux Foundation. All rights reserved. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for 5 * any purpose with or without fee is hereby granted, provided that the 6 * above copyright notice and this permission notice appear in all 7 * copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 16 * PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 #include <linux/pci.h> 20 #include <linux/slab.h> 21 #include <linux/interrupt.h> 22 #include <linux/if_arp.h> 23 #ifdef CONFIG_PCI_MSM 24 #include <linux/msm_pcie.h> 25 #endif 26 #include "hif_io32.h" 27 #include "if_pci.h" 28 #include "hif.h" 29 #include "target_type.h" 30 #include "hif_main.h" 31 #include "ce_main.h" 32 #include "ce_api.h" 33 #include "ce_internal.h" 34 #include "ce_reg.h" 35 #include "ce_bmi.h" 36 #include "regtable.h" 37 #include "hif_hw_version.h" 38 #include <linux/debugfs.h> 39 #include <linux/seq_file.h> 40 #include "qdf_status.h" 41 #include "qdf_atomic.h" 42 #include "pld_common.h" 43 #include "mp_dev.h" 44 #include "hif_debug.h" 45 46 #include "if_pci_internal.h" 47 #include "ce_tasklet.h" 48 #include "targaddrs.h" 49 #include "hif_exec.h" 50 51 #include "pci_api.h" 52 #include "ahb_api.h" 53 54 /* Maximum ms timeout for host to wake up target */ 55 #define PCIE_WAKE_TIMEOUT 1000 56 #define RAMDUMP_EVENT_TIMEOUT 2500 57 58 /* Setting SOC_GLOBAL_RESET during driver unload causes intermittent 59 * PCIe data bus error 60 * As workaround for this issue - changing the reset sequence to 61 * use TargetCPU warm reset * instead of SOC_GLOBAL_RESET 62 */ 63 #define CPU_WARM_RESET_WAR 64 65 /* 66 * Top-level interrupt handler for all PCI interrupts from a Target. 67 * When a block of MSI interrupts is allocated, this top-level handler 68 * is not used; instead, we directly call the correct sub-handler. 69 */ 70 struct ce_irq_reg_table { 71 uint32_t irq_enable; 72 uint32_t irq_status; 73 }; 74 75 #ifndef QCA_WIFI_3_0_ADRASTEA 76 static inline void hif_pci_route_adrastea_interrupt(struct hif_pci_softc *sc) 77 { 78 } 79 #else 80 static void hif_pci_route_adrastea_interrupt(struct hif_pci_softc *sc) 81 { 82 struct hif_softc *scn = HIF_GET_SOFTC(sc); 83 unsigned int target_enable0, target_enable1; 84 unsigned int target_cause0, target_cause1; 85 86 target_enable0 = hif_read32_mb(sc, sc->mem + Q6_ENABLE_REGISTER_0); 87 target_enable1 = hif_read32_mb(sc, sc->mem + Q6_ENABLE_REGISTER_1); 88 target_cause0 = hif_read32_mb(sc, sc->mem + Q6_CAUSE_REGISTER_0); 89 target_cause1 = hif_read32_mb(sc, sc->mem + Q6_CAUSE_REGISTER_1); 90 91 if ((target_enable0 & target_cause0) || 92 (target_enable1 & target_cause1)) { 93 hif_write32_mb(sc, sc->mem + Q6_ENABLE_REGISTER_0, 0); 94 hif_write32_mb(sc, sc->mem + Q6_ENABLE_REGISTER_1, 0); 95 96 if (scn->notice_send) 97 pld_intr_notify_q6(sc->dev); 98 } 99 } 100 #endif 101 102 103 /** 104 * pci_dispatch_ce_irq() - pci_dispatch_ce_irq 105 * @scn: scn 106 * 107 * Return: N/A 108 */ 109 static void pci_dispatch_interrupt(struct hif_softc *scn) 110 { 111 uint32_t intr_summary; 112 int id; 113 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 114 115 if (scn->hif_init_done != true) 116 return; 117 118 if (Q_TARGET_ACCESS_BEGIN(scn) < 0) 119 return; 120 121 intr_summary = CE_INTERRUPT_SUMMARY(scn); 122 123 if (intr_summary == 0) { 124 if ((scn->target_status != TARGET_STATUS_RESET) && 125 (!qdf_atomic_read(&scn->link_suspended))) { 126 127 hif_write32_mb(scn, scn->mem + 128 (SOC_CORE_BASE_ADDRESS | 129 PCIE_INTR_ENABLE_ADDRESS), 130 HOST_GROUP0_MASK); 131 132 hif_read32_mb(scn, scn->mem + 133 (SOC_CORE_BASE_ADDRESS | 134 PCIE_INTR_ENABLE_ADDRESS)); 135 } 136 Q_TARGET_ACCESS_END(scn); 137 return; 138 } 139 Q_TARGET_ACCESS_END(scn); 140 141 scn->ce_irq_summary = intr_summary; 142 for (id = 0; intr_summary && (id < scn->ce_count); id++) { 143 if (intr_summary & (1 << id)) { 144 intr_summary &= ~(1 << id); 145 ce_dispatch_interrupt(id, &hif_state->tasklets[id]); 146 } 147 } 148 } 149 150 irqreturn_t hif_pci_legacy_ce_interrupt_handler(int irq, void *arg) 151 { 152 struct hif_pci_softc *sc = (struct hif_pci_softc *)arg; 153 struct hif_softc *scn = HIF_GET_SOFTC(sc); 154 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(arg); 155 156 volatile int tmp; 157 uint16_t val = 0; 158 uint32_t bar0 = 0; 159 uint32_t fw_indicator_address, fw_indicator; 160 bool ssr_irq = false; 161 unsigned int host_cause, host_enable; 162 163 if (LEGACY_INTERRUPTS(sc)) { 164 if (Q_TARGET_ACCESS_BEGIN(scn) < 0) 165 return IRQ_HANDLED; 166 167 if (ADRASTEA_BU) { 168 host_enable = hif_read32_mb(sc, sc->mem + 169 PCIE_INTR_ENABLE_ADDRESS); 170 host_cause = hif_read32_mb(sc, sc->mem + 171 PCIE_INTR_CAUSE_ADDRESS); 172 if (!(host_enable & host_cause)) { 173 hif_pci_route_adrastea_interrupt(sc); 174 return IRQ_HANDLED; 175 } 176 } 177 178 /* Clear Legacy PCI line interrupts 179 * IMPORTANT: INTR_CLR regiser has to be set 180 * after INTR_ENABLE is set to 0, 181 * otherwise interrupt can not be really cleared 182 */ 183 hif_write32_mb(sc, sc->mem + 184 (SOC_CORE_BASE_ADDRESS | 185 PCIE_INTR_ENABLE_ADDRESS), 0); 186 187 hif_write32_mb(sc, sc->mem + 188 (SOC_CORE_BASE_ADDRESS | PCIE_INTR_CLR_ADDRESS), 189 ADRASTEA_BU ? 190 (host_enable & host_cause) : 191 HOST_GROUP0_MASK); 192 193 if (ADRASTEA_BU) 194 hif_write32_mb(sc, sc->mem + 0x2f100c, 195 (host_cause >> 1)); 196 197 /* IMPORTANT: this extra read transaction is required to 198 * flush the posted write buffer 199 */ 200 if (!ADRASTEA_BU) { 201 tmp = 202 hif_read32_mb(sc, sc->mem + 203 (SOC_CORE_BASE_ADDRESS | 204 PCIE_INTR_ENABLE_ADDRESS)); 205 206 if (tmp == 0xdeadbeef) { 207 HIF_ERROR("BUG(%s): SoC returns 0xdeadbeef!!", 208 __func__); 209 210 pci_read_config_word(sc->pdev, PCI_VENDOR_ID, &val); 211 HIF_ERROR("%s: PCI Vendor ID = 0x%04x", 212 __func__, val); 213 214 pci_read_config_word(sc->pdev, PCI_DEVICE_ID, &val); 215 HIF_ERROR("%s: PCI Device ID = 0x%04x", 216 __func__, val); 217 218 pci_read_config_word(sc->pdev, PCI_COMMAND, &val); 219 HIF_ERROR("%s: PCI Command = 0x%04x", __func__, 220 val); 221 222 pci_read_config_word(sc->pdev, PCI_STATUS, &val); 223 HIF_ERROR("%s: PCI Status = 0x%04x", __func__, 224 val); 225 226 pci_read_config_dword(sc->pdev, PCI_BASE_ADDRESS_0, 227 &bar0); 228 HIF_ERROR("%s: PCI BAR0 = 0x%08x", __func__, 229 bar0); 230 231 HIF_ERROR("%s: RTC_STATE_ADDRESS = 0x%08x", 232 __func__, 233 hif_read32_mb(sc, sc->mem + 234 PCIE_LOCAL_BASE_ADDRESS 235 + RTC_STATE_ADDRESS)); 236 HIF_ERROR("%s: PCIE_SOC_WAKE_ADDRESS = 0x%08x", 237 __func__, 238 hif_read32_mb(sc, sc->mem + 239 PCIE_LOCAL_BASE_ADDRESS 240 + PCIE_SOC_WAKE_ADDRESS)); 241 HIF_ERROR("%s: 0x80008 = 0x%08x, 0x8000c = 0x%08x", 242 __func__, 243 hif_read32_mb(sc, sc->mem + 0x80008), 244 hif_read32_mb(sc, sc->mem + 0x8000c)); 245 HIF_ERROR("%s: 0x80010 = 0x%08x, 0x80014 = 0x%08x", 246 __func__, 247 hif_read32_mb(sc, sc->mem + 0x80010), 248 hif_read32_mb(sc, sc->mem + 0x80014)); 249 HIF_ERROR("%s: 0x80018 = 0x%08x, 0x8001c = 0x%08x", 250 __func__, 251 hif_read32_mb(sc, sc->mem + 0x80018), 252 hif_read32_mb(sc, sc->mem + 0x8001c)); 253 QDF_BUG(0); 254 } 255 256 PCI_CLR_CAUSE0_REGISTER(sc); 257 } 258 259 if (HAS_FW_INDICATOR) { 260 fw_indicator_address = hif_state->fw_indicator_address; 261 fw_indicator = A_TARGET_READ(scn, fw_indicator_address); 262 if ((fw_indicator != ~0) && 263 (fw_indicator & FW_IND_EVENT_PENDING)) 264 ssr_irq = true; 265 } 266 267 if (Q_TARGET_ACCESS_END(scn) < 0) 268 return IRQ_HANDLED; 269 } 270 /* TBDXXX: Add support for WMAC */ 271 272 if (ssr_irq) { 273 sc->irq_event = irq; 274 qdf_atomic_set(&scn->tasklet_from_intr, 1); 275 276 qdf_atomic_inc(&scn->active_tasklet_cnt); 277 tasklet_schedule(&sc->intr_tq); 278 } else { 279 pci_dispatch_interrupt(scn); 280 } 281 282 return IRQ_HANDLED; 283 } 284 285 bool hif_pci_targ_is_present(struct hif_softc *scn, void *__iomem *mem) 286 { 287 return 1; /* FIX THIS */ 288 } 289 290 int hif_get_irq_num(struct hif_opaque_softc *scn, int *irq, uint32_t size) 291 { 292 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn); 293 int i = 0; 294 295 if (!irq || !size) { 296 return -EINVAL; 297 } 298 299 if (!sc->num_msi_intrs || sc->num_msi_intrs == 1) { 300 irq[0] = sc->irq; 301 return 1; 302 } 303 304 if (sc->num_msi_intrs > size) { 305 qdf_print("Not enough space in irq buffer to return irqs"); 306 return -EINVAL; 307 } 308 309 for (i = 0; i < sc->num_msi_intrs; i++) { 310 irq[i] = sc->irq + i + MSI_ASSIGN_CE_INITIAL; 311 } 312 313 return sc->num_msi_intrs; 314 } 315 316 317 /** 318 * hif_pci_cancel_deferred_target_sleep() - cancels the defered target sleep 319 * @scn: hif_softc 320 * 321 * Return: void 322 */ 323 #if CONFIG_ATH_PCIE_MAX_PERF == 0 324 void hif_pci_cancel_deferred_target_sleep(struct hif_softc *scn) 325 { 326 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 327 A_target_id_t pci_addr = scn->mem; 328 329 qdf_spin_lock_irqsave(&hif_state->keep_awake_lock); 330 /* 331 * If the deferred sleep timer is running cancel it 332 * and put the soc into sleep. 333 */ 334 if (hif_state->fake_sleep == true) { 335 qdf_timer_stop(&hif_state->sleep_timer); 336 if (hif_state->verified_awake == false) { 337 hif_write32_mb(scn, pci_addr + PCIE_LOCAL_BASE_ADDRESS + 338 PCIE_SOC_WAKE_ADDRESS, 339 PCIE_SOC_WAKE_RESET); 340 } 341 hif_state->fake_sleep = false; 342 } 343 qdf_spin_unlock_irqrestore(&hif_state->keep_awake_lock); 344 } 345 #else 346 inline void hif_pci_cancel_deferred_target_sleep(struct hif_softc *scn) 347 { 348 } 349 #endif 350 351 #define A_PCIE_LOCAL_REG_READ(sc, mem, addr) \ 352 hif_read32_mb(sc, (char *)(mem) + \ 353 PCIE_LOCAL_BASE_ADDRESS + (uint32_t)(addr)) 354 355 #define A_PCIE_LOCAL_REG_WRITE(sc, mem, addr, val) \ 356 hif_write32_mb(sc, ((char *)(mem) + \ 357 PCIE_LOCAL_BASE_ADDRESS + (uint32_t)(addr)), (val)) 358 359 #ifdef QCA_WIFI_3_0 360 /** 361 * hif_targ_is_awake() - check to see if the target is awake 362 * @hif_ctx: hif context 363 * 364 * emulation never goes to sleep 365 * 366 * Return: true if target is awake 367 */ 368 static bool hif_targ_is_awake(struct hif_softc *hif_ctx, void *__iomem *mem) 369 { 370 return true; 371 } 372 #else 373 /** 374 * hif_targ_is_awake() - check to see if the target is awake 375 * @hif_ctx: hif context 376 * 377 * Return: true if the targets clocks are on 378 */ 379 static bool hif_targ_is_awake(struct hif_softc *scn, void *__iomem *mem) 380 { 381 uint32_t val; 382 383 if (scn->recovery) 384 return false; 385 val = hif_read32_mb(scn, mem + PCIE_LOCAL_BASE_ADDRESS 386 + RTC_STATE_ADDRESS); 387 return (RTC_STATE_V_GET(val) & RTC_STATE_V_ON) == RTC_STATE_V_ON; 388 } 389 #endif 390 391 #define ATH_PCI_RESET_WAIT_MAX 10 /* Ms */ 392 static void hif_pci_device_reset(struct hif_pci_softc *sc) 393 { 394 void __iomem *mem = sc->mem; 395 int i; 396 uint32_t val; 397 struct hif_softc *scn = HIF_GET_SOFTC(sc); 398 399 if (!scn->hostdef) 400 return; 401 402 /* NB: Don't check resetok here. This form of reset 403 * is integral to correct operation. 404 */ 405 406 if (!SOC_GLOBAL_RESET_ADDRESS) 407 return; 408 409 if (!mem) 410 return; 411 412 HIF_ERROR("%s: Reset Device", __func__); 413 414 /* 415 * NB: If we try to write SOC_GLOBAL_RESET_ADDRESS without first 416 * writing WAKE_V, the Target may scribble over Host memory! 417 */ 418 A_PCIE_LOCAL_REG_WRITE(sc, mem, PCIE_SOC_WAKE_ADDRESS, 419 PCIE_SOC_WAKE_V_MASK); 420 for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) { 421 if (hif_targ_is_awake(scn, mem)) 422 break; 423 424 qdf_mdelay(1); 425 } 426 427 /* Put Target, including PCIe, into RESET. */ 428 val = A_PCIE_LOCAL_REG_READ(sc, mem, SOC_GLOBAL_RESET_ADDRESS); 429 val |= 1; 430 A_PCIE_LOCAL_REG_WRITE(sc, mem, SOC_GLOBAL_RESET_ADDRESS, val); 431 for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) { 432 if (A_PCIE_LOCAL_REG_READ(sc, mem, RTC_STATE_ADDRESS) & 433 RTC_STATE_COLD_RESET_MASK) 434 break; 435 436 qdf_mdelay(1); 437 } 438 439 /* Pull Target, including PCIe, out of RESET. */ 440 val &= ~1; 441 A_PCIE_LOCAL_REG_WRITE(sc, mem, SOC_GLOBAL_RESET_ADDRESS, val); 442 for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) { 443 if (! 444 (A_PCIE_LOCAL_REG_READ(sc, mem, RTC_STATE_ADDRESS) & 445 RTC_STATE_COLD_RESET_MASK)) 446 break; 447 448 qdf_mdelay(1); 449 } 450 451 A_PCIE_LOCAL_REG_WRITE(sc, mem, PCIE_SOC_WAKE_ADDRESS, 452 PCIE_SOC_WAKE_RESET); 453 } 454 455 /* CPU warm reset function 456 * Steps: 457 * 1. Disable all pending interrupts - so no pending interrupts on WARM reset 458 * 2. Clear the FW_INDICATOR_ADDRESS -so Traget CPU initializes FW 459 * correctly on WARM reset 460 * 3. Clear TARGET CPU LF timer interrupt 461 * 4. Reset all CEs to clear any pending CE tarnsactions 462 * 5. Warm reset CPU 463 */ 464 static void hif_pci_device_warm_reset(struct hif_pci_softc *sc) 465 { 466 void __iomem *mem = sc->mem; 467 int i; 468 uint32_t val; 469 uint32_t fw_indicator; 470 struct hif_softc *scn = HIF_GET_SOFTC(sc); 471 472 /* NB: Don't check resetok here. This form of reset is 473 * integral to correct operation. 474 */ 475 476 if (!mem) 477 return; 478 479 HIF_INFO_MED("%s: Target Warm Reset", __func__); 480 481 /* 482 * NB: If we try to write SOC_GLOBAL_RESET_ADDRESS without first 483 * writing WAKE_V, the Target may scribble over Host memory! 484 */ 485 A_PCIE_LOCAL_REG_WRITE(sc, mem, PCIE_SOC_WAKE_ADDRESS, 486 PCIE_SOC_WAKE_V_MASK); 487 for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) { 488 if (hif_targ_is_awake(scn, mem)) 489 break; 490 qdf_mdelay(1); 491 } 492 493 /* 494 * Disable Pending interrupts 495 */ 496 val = 497 hif_read32_mb(sc, mem + 498 (SOC_CORE_BASE_ADDRESS | 499 PCIE_INTR_CAUSE_ADDRESS)); 500 HIF_INFO_MED("%s: Host Intr Cause reg 0x%x : value : 0x%x", __func__, 501 (SOC_CORE_BASE_ADDRESS | PCIE_INTR_CAUSE_ADDRESS), val); 502 /* Target CPU Intr Cause */ 503 val = hif_read32_mb(sc, mem + 504 (SOC_CORE_BASE_ADDRESS | CPU_INTR_ADDRESS)); 505 HIF_INFO_MED("%s: Target CPU Intr Cause 0x%x", __func__, val); 506 507 val = 508 hif_read32_mb(sc, mem + 509 (SOC_CORE_BASE_ADDRESS | 510 PCIE_INTR_ENABLE_ADDRESS)); 511 hif_write32_mb(sc, (mem + 512 (SOC_CORE_BASE_ADDRESS | PCIE_INTR_ENABLE_ADDRESS)), 0); 513 hif_write32_mb(sc, (mem + 514 (SOC_CORE_BASE_ADDRESS + PCIE_INTR_CLR_ADDRESS)), 515 HOST_GROUP0_MASK); 516 517 qdf_mdelay(100); 518 519 /* Clear FW_INDICATOR_ADDRESS */ 520 if (HAS_FW_INDICATOR) { 521 fw_indicator = hif_read32_mb(sc, mem + FW_INDICATOR_ADDRESS); 522 hif_write32_mb(sc, mem + FW_INDICATOR_ADDRESS, 0); 523 } 524 525 /* Clear Target LF Timer interrupts */ 526 val = 527 hif_read32_mb(sc, mem + 528 (RTC_SOC_BASE_ADDRESS + 529 SOC_LF_TIMER_CONTROL0_ADDRESS)); 530 HIF_INFO_MED("%s: addr 0x%x : 0x%x", __func__, 531 (RTC_SOC_BASE_ADDRESS + SOC_LF_TIMER_CONTROL0_ADDRESS), val); 532 val &= ~SOC_LF_TIMER_CONTROL0_ENABLE_MASK; 533 hif_write32_mb(sc, mem + 534 (RTC_SOC_BASE_ADDRESS + SOC_LF_TIMER_CONTROL0_ADDRESS), 535 val); 536 537 /* Reset CE */ 538 val = 539 hif_read32_mb(sc, mem + 540 (RTC_SOC_BASE_ADDRESS | 541 SOC_RESET_CONTROL_ADDRESS)); 542 val |= SOC_RESET_CONTROL_CE_RST_MASK; 543 hif_write32_mb(sc, (mem + 544 (RTC_SOC_BASE_ADDRESS | SOC_RESET_CONTROL_ADDRESS)), 545 val); 546 val = 547 hif_read32_mb(sc, mem + 548 (RTC_SOC_BASE_ADDRESS | 549 SOC_RESET_CONTROL_ADDRESS)); 550 qdf_mdelay(10); 551 552 /* CE unreset */ 553 val &= ~SOC_RESET_CONTROL_CE_RST_MASK; 554 hif_write32_mb(sc, mem + (RTC_SOC_BASE_ADDRESS | 555 SOC_RESET_CONTROL_ADDRESS), val); 556 val = 557 hif_read32_mb(sc, mem + 558 (RTC_SOC_BASE_ADDRESS | 559 SOC_RESET_CONTROL_ADDRESS)); 560 qdf_mdelay(10); 561 562 /* Read Target CPU Intr Cause */ 563 val = hif_read32_mb(sc, mem + 564 (SOC_CORE_BASE_ADDRESS | CPU_INTR_ADDRESS)); 565 HIF_INFO_MED("%s: Target CPU Intr Cause after CE reset 0x%x", 566 __func__, val); 567 568 /* CPU warm RESET */ 569 val = 570 hif_read32_mb(sc, mem + 571 (RTC_SOC_BASE_ADDRESS | 572 SOC_RESET_CONTROL_ADDRESS)); 573 val |= SOC_RESET_CONTROL_CPU_WARM_RST_MASK; 574 hif_write32_mb(sc, mem + (RTC_SOC_BASE_ADDRESS | 575 SOC_RESET_CONTROL_ADDRESS), val); 576 val = 577 hif_read32_mb(sc, mem + 578 (RTC_SOC_BASE_ADDRESS | 579 SOC_RESET_CONTROL_ADDRESS)); 580 HIF_INFO_MED("%s: RESET_CONTROL after cpu warm reset 0x%x", 581 __func__, val); 582 583 qdf_mdelay(100); 584 HIF_INFO_MED("%s: Target Warm reset complete", __func__); 585 586 } 587 588 #ifndef QCA_WIFI_3_0 589 /* only applicable to legacy ce */ 590 int hif_check_fw_reg(struct hif_opaque_softc *hif_ctx) 591 { 592 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 593 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn); 594 void __iomem *mem = sc->mem; 595 uint32_t val; 596 597 if (Q_TARGET_ACCESS_BEGIN(scn) < 0) 598 return ATH_ISR_NOSCHED; 599 val = hif_read32_mb(sc, mem + FW_INDICATOR_ADDRESS); 600 if (Q_TARGET_ACCESS_END(scn) < 0) 601 return ATH_ISR_SCHED; 602 603 HIF_INFO_MED("%s: FW_INDICATOR register is 0x%x", __func__, val); 604 605 if (val & FW_IND_HELPER) 606 return 0; 607 608 return 1; 609 } 610 #endif 611 612 int hif_check_soc_status(struct hif_opaque_softc *hif_ctx) 613 { 614 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 615 uint16_t device_id = 0; 616 uint32_t val; 617 uint16_t timeout_count = 0; 618 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn); 619 620 /* Check device ID from PCIe configuration space for link status */ 621 pci_read_config_word(sc->pdev, PCI_DEVICE_ID, &device_id); 622 if (device_id != sc->devid) { 623 HIF_ERROR("%s: device ID does match (read 0x%x, expect 0x%x)", 624 __func__, device_id, sc->devid); 625 return -EACCES; 626 } 627 628 /* Check PCIe local register for bar/memory access */ 629 val = hif_read32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS + 630 RTC_STATE_ADDRESS); 631 HIF_INFO_MED("%s: RTC_STATE_ADDRESS is %08x", __func__, val); 632 633 /* Try to wake up taget if it sleeps */ 634 hif_write32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS + 635 PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_V_MASK); 636 HIF_INFO_MED("%s: PCIE_SOC_WAKE_ADDRESS is %08x", __func__, 637 hif_read32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS + 638 PCIE_SOC_WAKE_ADDRESS)); 639 640 /* Check if taget can be woken up */ 641 while (!hif_targ_is_awake(scn, sc->mem)) { 642 if (timeout_count >= PCIE_WAKE_TIMEOUT) { 643 HIF_ERROR("%s: wake up timeout, %08x, %08x", 644 __func__, 645 hif_read32_mb(sc, sc->mem + 646 PCIE_LOCAL_BASE_ADDRESS + 647 RTC_STATE_ADDRESS), 648 hif_read32_mb(sc, sc->mem + 649 PCIE_LOCAL_BASE_ADDRESS + 650 PCIE_SOC_WAKE_ADDRESS)); 651 return -EACCES; 652 } 653 654 hif_write32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS + 655 PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_V_MASK); 656 657 qdf_mdelay(100); 658 timeout_count += 100; 659 } 660 661 /* Check Power register for SoC internal bus issues */ 662 val = 663 hif_read32_mb(sc, sc->mem + RTC_SOC_BASE_ADDRESS + 664 SOC_POWER_REG_OFFSET); 665 HIF_INFO_MED("%s: Power register is %08x", __func__, val); 666 667 return 0; 668 } 669 670 /** 671 * __hif_pci_dump_registers(): dump other PCI debug registers 672 * @scn: struct hif_softc 673 * 674 * This function dumps pci debug registers. The parrent function 675 * dumps the copy engine registers before calling this function. 676 * 677 * Return: void 678 */ 679 static void __hif_pci_dump_registers(struct hif_softc *scn) 680 { 681 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn); 682 void __iomem *mem = sc->mem; 683 uint32_t val, i, j; 684 uint32_t wrapper_idx[] = { 1, 2, 3, 4, 5, 6, 7, 8, 9 }; 685 uint32_t ce_base; 686 687 if (Q_TARGET_ACCESS_BEGIN(scn) < 0) 688 return; 689 690 /* DEBUG_INPUT_SEL_SRC = 0x6 */ 691 val = 692 hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS + 693 WLAN_DEBUG_INPUT_SEL_OFFSET); 694 val &= ~WLAN_DEBUG_INPUT_SEL_SRC_MASK; 695 val |= WLAN_DEBUG_INPUT_SEL_SRC_SET(0x6); 696 hif_write32_mb(sc, mem + GPIO_BASE_ADDRESS + 697 WLAN_DEBUG_INPUT_SEL_OFFSET, val); 698 699 /* DEBUG_CONTROL_ENABLE = 0x1 */ 700 val = hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS + 701 WLAN_DEBUG_CONTROL_OFFSET); 702 val &= ~WLAN_DEBUG_CONTROL_ENABLE_MASK; 703 val |= WLAN_DEBUG_CONTROL_ENABLE_SET(0x1); 704 hif_write32_mb(sc, mem + GPIO_BASE_ADDRESS + 705 WLAN_DEBUG_CONTROL_OFFSET, val); 706 707 HIF_INFO_MED("%s: Debug: inputsel: %x dbgctrl: %x", __func__, 708 hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS + 709 WLAN_DEBUG_INPUT_SEL_OFFSET), 710 hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS + 711 WLAN_DEBUG_CONTROL_OFFSET)); 712 713 HIF_INFO_MED("%s: Debug CE", __func__); 714 /* Loop CE debug output */ 715 /* AMBA_DEBUG_BUS_SEL = 0xc */ 716 val = hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS + 717 AMBA_DEBUG_BUS_OFFSET); 718 val &= ~AMBA_DEBUG_BUS_SEL_MASK; 719 val |= AMBA_DEBUG_BUS_SEL_SET(0xc); 720 hif_write32_mb(sc, mem + GPIO_BASE_ADDRESS + AMBA_DEBUG_BUS_OFFSET, 721 val); 722 723 for (i = 0; i < sizeof(wrapper_idx) / sizeof(uint32_t); i++) { 724 /* For (i=1,2,3,4,8,9) write CE_WRAPPER_DEBUG_SEL = i */ 725 val = hif_read32_mb(sc, mem + CE_WRAPPER_BASE_ADDRESS + 726 CE_WRAPPER_DEBUG_OFFSET); 727 val &= ~CE_WRAPPER_DEBUG_SEL_MASK; 728 val |= CE_WRAPPER_DEBUG_SEL_SET(wrapper_idx[i]); 729 hif_write32_mb(sc, mem + CE_WRAPPER_BASE_ADDRESS + 730 CE_WRAPPER_DEBUG_OFFSET, val); 731 732 HIF_INFO_MED("%s: ce wrapper: %d amdbg: %x cewdbg: %x", 733 __func__, wrapper_idx[i], 734 hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS + 735 AMBA_DEBUG_BUS_OFFSET), 736 hif_read32_mb(sc, mem + CE_WRAPPER_BASE_ADDRESS + 737 CE_WRAPPER_DEBUG_OFFSET)); 738 739 if (wrapper_idx[i] <= 7) { 740 for (j = 0; j <= 5; j++) { 741 ce_base = CE_BASE_ADDRESS(wrapper_idx[i]); 742 /* For (j=0~5) write CE_DEBUG_SEL = j */ 743 val = 744 hif_read32_mb(sc, mem + ce_base + 745 CE_DEBUG_OFFSET); 746 val &= ~CE_DEBUG_SEL_MASK; 747 val |= CE_DEBUG_SEL_SET(j); 748 hif_write32_mb(sc, mem + ce_base + 749 CE_DEBUG_OFFSET, val); 750 751 /* read (@gpio_athr_wlan_reg) 752 * WLAN_DEBUG_OUT_DATA 753 */ 754 val = hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS 755 + WLAN_DEBUG_OUT_OFFSET); 756 val = WLAN_DEBUG_OUT_DATA_GET(val); 757 758 HIF_INFO_MED("%s: module%d: cedbg: %x out: %x", 759 __func__, j, 760 hif_read32_mb(sc, mem + ce_base + 761 CE_DEBUG_OFFSET), val); 762 } 763 } else { 764 /* read (@gpio_athr_wlan_reg) WLAN_DEBUG_OUT_DATA */ 765 val = 766 hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS + 767 WLAN_DEBUG_OUT_OFFSET); 768 val = WLAN_DEBUG_OUT_DATA_GET(val); 769 770 HIF_INFO_MED("%s: out: %x", __func__, val); 771 } 772 } 773 774 HIF_INFO_MED("%s: Debug PCIe:", __func__); 775 /* Loop PCIe debug output */ 776 /* Write AMBA_DEBUG_BUS_SEL = 0x1c */ 777 val = hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS + 778 AMBA_DEBUG_BUS_OFFSET); 779 val &= ~AMBA_DEBUG_BUS_SEL_MASK; 780 val |= AMBA_DEBUG_BUS_SEL_SET(0x1c); 781 hif_write32_mb(sc, mem + GPIO_BASE_ADDRESS + 782 AMBA_DEBUG_BUS_OFFSET, val); 783 784 for (i = 0; i <= 8; i++) { 785 /* For (i=1~8) write AMBA_DEBUG_BUS_PCIE_DEBUG_SEL = i */ 786 val = 787 hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS + 788 AMBA_DEBUG_BUS_OFFSET); 789 val &= ~AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_MASK; 790 val |= AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_SET(i); 791 hif_write32_mb(sc, mem + GPIO_BASE_ADDRESS + 792 AMBA_DEBUG_BUS_OFFSET, val); 793 794 /* read (@gpio_athr_wlan_reg) WLAN_DEBUG_OUT_DATA */ 795 val = 796 hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS + 797 WLAN_DEBUG_OUT_OFFSET); 798 val = WLAN_DEBUG_OUT_DATA_GET(val); 799 800 HIF_INFO_MED("%s: amdbg: %x out: %x %x", __func__, 801 hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS + 802 WLAN_DEBUG_OUT_OFFSET), val, 803 hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS + 804 WLAN_DEBUG_OUT_OFFSET)); 805 } 806 807 Q_TARGET_ACCESS_END(scn); 808 } 809 810 /** 811 * hif_dump_registers(): dump bus debug registers 812 * @scn: struct hif_opaque_softc 813 * 814 * This function dumps hif bus debug registers 815 * 816 * Return: 0 for success or error code 817 */ 818 int hif_pci_dump_registers(struct hif_softc *hif_ctx) 819 { 820 int status; 821 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 822 823 status = hif_dump_ce_registers(scn); 824 825 if (status) 826 HIF_ERROR("%s: Dump CE Registers Failed", __func__); 827 828 /* dump non copy engine pci registers */ 829 __hif_pci_dump_registers(scn); 830 831 return 0; 832 } 833 834 #ifdef HIF_CONFIG_SLUB_DEBUG_ON 835 836 /* worker thread to schedule wlan_tasklet in SLUB debug build */ 837 static void reschedule_tasklet_work_handler(void *arg) 838 { 839 struct hif_pci_softc *sc = arg; 840 struct hif_softc *scn = HIF_GET_SOFTC(sc); 841 842 if (!scn) { 843 HIF_ERROR("%s: hif_softc is NULL\n", __func__); 844 return; 845 } 846 847 if (scn->hif_init_done == false) { 848 HIF_ERROR("%s: wlan driver is unloaded", __func__); 849 return; 850 } 851 852 tasklet_schedule(&sc->intr_tq); 853 } 854 855 /** 856 * hif_init_reschedule_tasklet_work() - API to initialize reschedule tasklet 857 * work 858 * @sc: HIF PCI Context 859 * 860 * Return: void 861 */ 862 static void hif_init_reschedule_tasklet_work(struct hif_pci_softc *sc) 863 { 864 qdf_create_work(0, &sc->reschedule_tasklet_work, 865 reschedule_tasklet_work_handler, NULL); 866 } 867 #else 868 static void hif_init_reschedule_tasklet_work(struct hif_pci_softc *sc) { } 869 #endif /* HIF_CONFIG_SLUB_DEBUG_ON */ 870 871 void wlan_tasklet(unsigned long data) 872 { 873 struct hif_pci_softc *sc = (struct hif_pci_softc *)data; 874 struct hif_softc *scn = HIF_GET_SOFTC(sc); 875 876 if (scn->hif_init_done == false) 877 goto end; 878 879 if (qdf_atomic_read(&scn->link_suspended)) 880 goto end; 881 882 if (!ADRASTEA_BU) { 883 hif_fw_interrupt_handler(sc->irq_event, scn); 884 if (scn->target_status == TARGET_STATUS_RESET) 885 goto end; 886 } 887 888 end: 889 qdf_atomic_set(&scn->tasklet_from_intr, 0); 890 qdf_atomic_dec(&scn->active_tasklet_cnt); 891 } 892 893 #ifdef FEATURE_RUNTIME_PM 894 static const char *hif_pm_runtime_state_to_string(uint32_t state) 895 { 896 switch (state) { 897 case HIF_PM_RUNTIME_STATE_NONE: 898 return "INIT_STATE"; 899 case HIF_PM_RUNTIME_STATE_ON: 900 return "ON"; 901 case HIF_PM_RUNTIME_STATE_RESUMING: 902 return "RESUMING"; 903 case HIF_PM_RUNTIME_STATE_SUSPENDING: 904 return "SUSPENDING"; 905 case HIF_PM_RUNTIME_STATE_SUSPENDED: 906 return "SUSPENDED"; 907 default: 908 return "INVALID STATE"; 909 } 910 } 911 912 #define HIF_PCI_RUNTIME_PM_STATS(_s, _sc, _name) \ 913 seq_printf(_s, "%30s: %u\n", #_name, _sc->pm_stats._name) 914 /** 915 * hif_pci_runtime_pm_warn() - Runtime PM Debugging API 916 * @sc: hif_pci_softc context 917 * @msg: log message 918 * 919 * log runtime pm stats when something seems off. 920 * 921 * Return: void 922 */ 923 static void hif_pci_runtime_pm_warn(struct hif_pci_softc *sc, const char *msg) 924 { 925 struct hif_pm_runtime_lock *ctx; 926 927 HIF_ERROR("%s: usage_count: %d, pm_state: %s, prevent_suspend_cnt: %d", 928 msg, atomic_read(&sc->dev->power.usage_count), 929 hif_pm_runtime_state_to_string( 930 atomic_read(&sc->pm_state)), 931 sc->prevent_suspend_cnt); 932 933 HIF_ERROR("runtime_status: %d, runtime_error: %d, disable_depth: %d autosuspend_delay: %d", 934 sc->dev->power.runtime_status, 935 sc->dev->power.runtime_error, 936 sc->dev->power.disable_depth, 937 sc->dev->power.autosuspend_delay); 938 939 HIF_ERROR("runtime_get: %u, runtime_put: %u, request_resume: %u", 940 sc->pm_stats.runtime_get, sc->pm_stats.runtime_put, 941 sc->pm_stats.request_resume); 942 943 HIF_ERROR("allow_suspend: %u, prevent_suspend: %u", 944 sc->pm_stats.allow_suspend, 945 sc->pm_stats.prevent_suspend); 946 947 HIF_ERROR("prevent_suspend_timeout: %u, allow_suspend_timeout: %u", 948 sc->pm_stats.prevent_suspend_timeout, 949 sc->pm_stats.allow_suspend_timeout); 950 951 HIF_ERROR("Suspended: %u, resumed: %u count", 952 sc->pm_stats.suspended, 953 sc->pm_stats.resumed); 954 955 HIF_ERROR("suspend_err: %u, runtime_get_err: %u", 956 sc->pm_stats.suspend_err, 957 sc->pm_stats.runtime_get_err); 958 959 HIF_ERROR("Active Wakeup Sources preventing Runtime Suspend: "); 960 961 list_for_each_entry(ctx, &sc->prevent_suspend_list, list) { 962 HIF_ERROR("source %s; timeout %d ms", ctx->name, ctx->timeout); 963 } 964 965 WARN_ON(1); 966 } 967 968 /** 969 * hif_pci_pm_runtime_debugfs_show(): show debug stats for runtimepm 970 * @s: file to print to 971 * @data: unused 972 * 973 * debugging tool added to the debug fs for displaying runtimepm stats 974 * 975 * Return: 0 976 */ 977 static int hif_pci_pm_runtime_debugfs_show(struct seq_file *s, void *data) 978 { 979 struct hif_pci_softc *sc = s->private; 980 static const char * const autopm_state[] = {"NONE", "ON", "RESUMING", 981 "SUSPENDING", "SUSPENDED"}; 982 unsigned int msecs_age; 983 qdf_time_t usecs_age; 984 int pm_state = atomic_read(&sc->pm_state); 985 unsigned long timer_expires; 986 struct hif_pm_runtime_lock *ctx; 987 988 seq_printf(s, "%30s: %s\n", "Runtime PM state", 989 autopm_state[pm_state]); 990 seq_printf(s, "%30s: %pf\n", "Last Resume Caller", 991 sc->pm_stats.last_resume_caller); 992 seq_printf(s, "%30s: %pf\n", "Last Busy Marker", 993 sc->pm_stats.last_busy_marker); 994 995 usecs_age = qdf_get_log_timestamp_usecs() - 996 sc->pm_stats.last_busy_timestamp; 997 seq_printf(s, "%30s: %lu.%06lus\n", "Last Busy Timestamp", 998 sc->pm_stats.last_busy_timestamp / 1000000, 999 sc->pm_stats.last_busy_timestamp % 1000000); 1000 seq_printf(s, "%30s: %lu.%06lus\n", "Last Busy Since", 1001 usecs_age / 1000000, usecs_age % 1000000); 1002 1003 if (pm_state == HIF_PM_RUNTIME_STATE_SUSPENDED) { 1004 msecs_age = jiffies_to_msecs(jiffies - 1005 sc->pm_stats.suspend_jiffies); 1006 seq_printf(s, "%30s: %d.%03ds\n", "Suspended Since", 1007 msecs_age / 1000, msecs_age % 1000); 1008 } 1009 1010 seq_printf(s, "%30s: %d\n", "PM Usage count", 1011 atomic_read(&sc->dev->power.usage_count)); 1012 1013 seq_printf(s, "%30s: %u\n", "prevent_suspend_cnt", 1014 sc->prevent_suspend_cnt); 1015 1016 HIF_PCI_RUNTIME_PM_STATS(s, sc, suspended); 1017 HIF_PCI_RUNTIME_PM_STATS(s, sc, suspend_err); 1018 HIF_PCI_RUNTIME_PM_STATS(s, sc, resumed); 1019 HIF_PCI_RUNTIME_PM_STATS(s, sc, runtime_get); 1020 HIF_PCI_RUNTIME_PM_STATS(s, sc, runtime_put); 1021 HIF_PCI_RUNTIME_PM_STATS(s, sc, request_resume); 1022 HIF_PCI_RUNTIME_PM_STATS(s, sc, prevent_suspend); 1023 HIF_PCI_RUNTIME_PM_STATS(s, sc, allow_suspend); 1024 HIF_PCI_RUNTIME_PM_STATS(s, sc, prevent_suspend_timeout); 1025 HIF_PCI_RUNTIME_PM_STATS(s, sc, allow_suspend_timeout); 1026 HIF_PCI_RUNTIME_PM_STATS(s, sc, runtime_get_err); 1027 1028 timer_expires = sc->runtime_timer_expires; 1029 if (timer_expires > 0) { 1030 msecs_age = jiffies_to_msecs(timer_expires - jiffies); 1031 seq_printf(s, "%30s: %d.%03ds\n", "Prevent suspend timeout", 1032 msecs_age / 1000, msecs_age % 1000); 1033 } 1034 1035 spin_lock_bh(&sc->runtime_lock); 1036 if (list_empty(&sc->prevent_suspend_list)) { 1037 spin_unlock_bh(&sc->runtime_lock); 1038 return 0; 1039 } 1040 1041 seq_printf(s, "%30s: ", "Active Wakeup_Sources"); 1042 list_for_each_entry(ctx, &sc->prevent_suspend_list, list) { 1043 seq_printf(s, "%s", ctx->name); 1044 if (ctx->timeout) 1045 seq_printf(s, "(%d ms)", ctx->timeout); 1046 seq_puts(s, " "); 1047 } 1048 seq_puts(s, "\n"); 1049 spin_unlock_bh(&sc->runtime_lock); 1050 1051 return 0; 1052 } 1053 #undef HIF_PCI_RUNTIME_PM_STATS 1054 1055 /** 1056 * hif_pci_autopm_open() - open a debug fs file to access the runtime pm stats 1057 * @inode 1058 * @file 1059 * 1060 * Return: linux error code of single_open. 1061 */ 1062 static int hif_pci_runtime_pm_open(struct inode *inode, struct file *file) 1063 { 1064 return single_open(file, hif_pci_pm_runtime_debugfs_show, 1065 inode->i_private); 1066 } 1067 1068 static const struct file_operations hif_pci_runtime_pm_fops = { 1069 .owner = THIS_MODULE, 1070 .open = hif_pci_runtime_pm_open, 1071 .release = single_release, 1072 .read = seq_read, 1073 .llseek = seq_lseek, 1074 }; 1075 1076 /** 1077 * hif_runtime_pm_debugfs_create() - creates runtimepm debugfs entry 1078 * @sc: pci context 1079 * 1080 * creates a debugfs entry to debug the runtime pm feature. 1081 */ 1082 static void hif_runtime_pm_debugfs_create(struct hif_pci_softc *sc) 1083 { 1084 sc->pm_dentry = debugfs_create_file("cnss_runtime_pm", 1085 0400, NULL, sc, 1086 &hif_pci_runtime_pm_fops); 1087 } 1088 1089 /** 1090 * hif_runtime_pm_debugfs_remove() - removes runtimepm debugfs entry 1091 * @sc: pci context 1092 * 1093 * removes the debugfs entry to debug the runtime pm feature. 1094 */ 1095 static void hif_runtime_pm_debugfs_remove(struct hif_pci_softc *sc) 1096 { 1097 debugfs_remove(sc->pm_dentry); 1098 } 1099 1100 static void hif_runtime_init(struct device *dev, int delay) 1101 { 1102 pm_runtime_set_autosuspend_delay(dev, delay); 1103 pm_runtime_use_autosuspend(dev); 1104 pm_runtime_allow(dev); 1105 pm_runtime_mark_last_busy(dev); 1106 pm_runtime_put_noidle(dev); 1107 pm_suspend_ignore_children(dev, true); 1108 } 1109 1110 static void hif_runtime_exit(struct device *dev) 1111 { 1112 pm_runtime_get_noresume(dev); 1113 pm_runtime_set_active(dev); 1114 } 1115 1116 static void hif_pm_runtime_lock_timeout_fn(void *data); 1117 1118 /** 1119 * hif_pm_runtime_start(): start the runtime pm 1120 * @sc: pci context 1121 * 1122 * After this call, runtime pm will be active. 1123 */ 1124 static void hif_pm_runtime_start(struct hif_pci_softc *sc) 1125 { 1126 struct hif_softc *ol_sc = HIF_GET_SOFTC(sc); 1127 uint32_t mode = hif_get_conparam(ol_sc); 1128 1129 if (!ol_sc->hif_config.enable_runtime_pm) { 1130 HIF_INFO("%s: RUNTIME PM is disabled in ini\n", __func__); 1131 return; 1132 } 1133 1134 if (mode == QDF_GLOBAL_FTM_MODE || QDF_IS_EPPING_ENABLED(mode) || 1135 mode == QDF_GLOBAL_MONITOR_MODE) { 1136 HIF_INFO("%s: RUNTIME PM is disabled for FTM/EPPING mode\n", 1137 __func__); 1138 return; 1139 } 1140 1141 qdf_timer_init(NULL, &sc->runtime_timer, 1142 hif_pm_runtime_lock_timeout_fn, 1143 sc, QDF_TIMER_TYPE_WAKE_APPS); 1144 1145 HIF_INFO("%s: Enabling RUNTIME PM, Delay: %d ms", __func__, 1146 ol_sc->hif_config.runtime_pm_delay); 1147 1148 hif_runtime_init(sc->dev, ol_sc->hif_config.runtime_pm_delay); 1149 qdf_atomic_set(&sc->pm_state, HIF_PM_RUNTIME_STATE_ON); 1150 hif_runtime_pm_debugfs_create(sc); 1151 } 1152 1153 /** 1154 * hif_pm_runtime_stop(): stop runtime pm 1155 * @sc: pci context 1156 * 1157 * Turns off runtime pm and frees corresponding resources 1158 * that were acquired by hif_runtime_pm_start(). 1159 */ 1160 static void hif_pm_runtime_stop(struct hif_pci_softc *sc) 1161 { 1162 struct hif_softc *ol_sc = HIF_GET_SOFTC(sc); 1163 uint32_t mode = hif_get_conparam(ol_sc); 1164 1165 if (!ol_sc->hif_config.enable_runtime_pm) 1166 return; 1167 1168 if (mode == QDF_GLOBAL_FTM_MODE || QDF_IS_EPPING_ENABLED(mode) || 1169 mode == QDF_GLOBAL_MONITOR_MODE) 1170 return; 1171 1172 hif_runtime_exit(sc->dev); 1173 hif_pm_runtime_resume(sc->dev); 1174 1175 qdf_atomic_set(&sc->pm_state, HIF_PM_RUNTIME_STATE_NONE); 1176 1177 hif_runtime_pm_debugfs_remove(sc); 1178 qdf_timer_free(&sc->runtime_timer); 1179 /* doesn't wait for penting trafic unlike cld-2.0 */ 1180 } 1181 1182 /** 1183 * hif_pm_runtime_open(): initialize runtime pm 1184 * @sc: pci data structure 1185 * 1186 * Early initialization 1187 */ 1188 static void hif_pm_runtime_open(struct hif_pci_softc *sc) 1189 { 1190 spin_lock_init(&sc->runtime_lock); 1191 1192 qdf_atomic_init(&sc->pm_state); 1193 qdf_runtime_lock_init(&sc->prevent_linkdown_lock); 1194 qdf_atomic_set(&sc->pm_state, HIF_PM_RUNTIME_STATE_NONE); 1195 INIT_LIST_HEAD(&sc->prevent_suspend_list); 1196 } 1197 1198 /** 1199 * hif_pm_runtime_sanitize_on_exit(): sanitize the pm usage count and state 1200 * @sc: pci context 1201 * 1202 * Ensure we have only one vote against runtime suspend before closing 1203 * the runtime suspend feature. 1204 * 1205 * all gets by the wlan driver should have been returned 1206 * one vote should remain as part of cnss_runtime_exit 1207 * 1208 * needs to be revisited if we share the root complex. 1209 */ 1210 static void hif_pm_runtime_sanitize_on_exit(struct hif_pci_softc *sc) 1211 { 1212 struct hif_pm_runtime_lock *ctx, *tmp; 1213 1214 if (atomic_read(&sc->dev->power.usage_count) != 1) 1215 hif_pci_runtime_pm_warn(sc, "Driver UnLoaded"); 1216 else 1217 return; 1218 1219 spin_lock_bh(&sc->runtime_lock); 1220 list_for_each_entry_safe(ctx, tmp, &sc->prevent_suspend_list, list) { 1221 spin_unlock_bh(&sc->runtime_lock); 1222 hif_runtime_lock_deinit(GET_HIF_OPAQUE_HDL(sc), ctx); 1223 spin_lock_bh(&sc->runtime_lock); 1224 } 1225 spin_unlock_bh(&sc->runtime_lock); 1226 1227 /* ensure 1 and only 1 usage count so that when the wlan 1228 * driver is re-insmodded runtime pm won't be 1229 * disabled also ensures runtime pm doesn't get 1230 * broken on by being less than 1. 1231 */ 1232 if (atomic_read(&sc->dev->power.usage_count) <= 0) 1233 atomic_set(&sc->dev->power.usage_count, 1); 1234 while (atomic_read(&sc->dev->power.usage_count) > 1) 1235 hif_pm_runtime_put_auto(sc->dev); 1236 } 1237 1238 static int __hif_pm_runtime_allow_suspend(struct hif_pci_softc *hif_sc, 1239 struct hif_pm_runtime_lock *lock); 1240 1241 /** 1242 * hif_pm_runtime_sanitize_on_ssr_exit() - Empty the suspend list on SSR 1243 * @sc: PCIe Context 1244 * 1245 * API is used to empty the runtime pm prevent suspend list. 1246 * 1247 * Return: void 1248 */ 1249 static void hif_pm_runtime_sanitize_on_ssr_exit(struct hif_pci_softc *sc) 1250 { 1251 struct hif_pm_runtime_lock *ctx, *tmp; 1252 1253 spin_lock_bh(&sc->runtime_lock); 1254 list_for_each_entry_safe(ctx, tmp, &sc->prevent_suspend_list, list) { 1255 __hif_pm_runtime_allow_suspend(sc, ctx); 1256 } 1257 spin_unlock_bh(&sc->runtime_lock); 1258 } 1259 1260 /** 1261 * hif_pm_runtime_close(): close runtime pm 1262 * @sc: pci bus handle 1263 * 1264 * ensure runtime_pm is stopped before closing the driver 1265 */ 1266 static void hif_pm_runtime_close(struct hif_pci_softc *sc) 1267 { 1268 struct hif_softc *scn = HIF_GET_SOFTC(sc); 1269 1270 qdf_runtime_lock_deinit(&sc->prevent_linkdown_lock); 1271 if (qdf_atomic_read(&sc->pm_state) == HIF_PM_RUNTIME_STATE_NONE) 1272 return; 1273 1274 hif_pm_runtime_stop(sc); 1275 1276 hif_is_recovery_in_progress(scn) ? 1277 hif_pm_runtime_sanitize_on_ssr_exit(sc) : 1278 hif_pm_runtime_sanitize_on_exit(sc); 1279 } 1280 #else 1281 static void hif_pm_runtime_close(struct hif_pci_softc *sc) {} 1282 static void hif_pm_runtime_open(struct hif_pci_softc *sc) {} 1283 static void hif_pm_runtime_start(struct hif_pci_softc *sc) {} 1284 static void hif_pm_runtime_stop(struct hif_pci_softc *sc) {} 1285 #endif 1286 1287 /** 1288 * hif_disable_power_gating() - disable HW power gating 1289 * @hif_ctx: hif context 1290 * 1291 * disables pcie L1 power states 1292 */ 1293 static void hif_disable_power_gating(struct hif_opaque_softc *hif_ctx) 1294 { 1295 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 1296 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx); 1297 1298 if (!scn) { 1299 HIF_ERROR("%s: Could not disable ASPM scn is null", 1300 __func__); 1301 return; 1302 } 1303 1304 /* Disable ASPM when pkt log is enabled */ 1305 pci_read_config_dword(sc->pdev, 0x80, &sc->lcr_val); 1306 pci_write_config_dword(sc->pdev, 0x80, (sc->lcr_val & 0xffffff00)); 1307 } 1308 1309 /** 1310 * hif_enable_power_gating() - enable HW power gating 1311 * @hif_ctx: hif context 1312 * 1313 * enables pcie L1 power states 1314 */ 1315 static void hif_enable_power_gating(struct hif_pci_softc *sc) 1316 { 1317 if (!sc) { 1318 HIF_ERROR("%s: Could not disable ASPM scn is null", 1319 __func__); 1320 return; 1321 } 1322 1323 /* Re-enable ASPM after firmware/OTP download is complete */ 1324 pci_write_config_dword(sc->pdev, 0x80, sc->lcr_val); 1325 } 1326 1327 /** 1328 * hif_enable_power_management() - enable power management 1329 * @hif_ctx: hif context 1330 * 1331 * Enables runtime pm, aspm(PCI.. hif_enable_power_gating) and re-enabling 1332 * soc-sleep after driver load (hif_pci_target_sleep_state_adjust). 1333 * 1334 * note: epping mode does not call this function as it does not 1335 * care about saving power. 1336 */ 1337 void hif_pci_enable_power_management(struct hif_softc *hif_sc, 1338 bool is_packet_log_enabled) 1339 { 1340 struct hif_pci_softc *pci_ctx = HIF_GET_PCI_SOFTC(hif_sc); 1341 uint32_t mode; 1342 1343 if (!pci_ctx) { 1344 HIF_ERROR("%s, hif_ctx null", __func__); 1345 return; 1346 } 1347 1348 mode = hif_get_conparam(hif_sc); 1349 if (mode == QDF_GLOBAL_FTM_MODE) { 1350 HIF_INFO("%s: Enable power gating for FTM mode", __func__); 1351 hif_enable_power_gating(pci_ctx); 1352 return; 1353 } 1354 1355 hif_pm_runtime_start(pci_ctx); 1356 1357 if (!is_packet_log_enabled) 1358 hif_enable_power_gating(pci_ctx); 1359 1360 if (!CONFIG_ATH_PCIE_MAX_PERF && 1361 CONFIG_ATH_PCIE_AWAKE_WHILE_DRIVER_LOAD && 1362 !ce_srng_based(hif_sc)) { 1363 /* allow sleep for PCIE_AWAKE_WHILE_DRIVER_LOAD feature */ 1364 if (hif_pci_target_sleep_state_adjust(hif_sc, true, false) < 0) 1365 HIF_ERROR("%s, failed to set target to sleep", 1366 __func__); 1367 } 1368 } 1369 1370 /** 1371 * hif_disable_power_management() - disable power management 1372 * @hif_ctx: hif context 1373 * 1374 * Currently disables runtime pm. Should be updated to behave 1375 * if runtime pm is not started. Should be updated to take care 1376 * of aspm and soc sleep for driver load. 1377 */ 1378 void hif_pci_disable_power_management(struct hif_softc *hif_ctx) 1379 { 1380 struct hif_pci_softc *pci_ctx = HIF_GET_PCI_SOFTC(hif_ctx); 1381 1382 if (!pci_ctx) { 1383 HIF_ERROR("%s, hif_ctx null", __func__); 1384 return; 1385 } 1386 1387 hif_pm_runtime_stop(pci_ctx); 1388 } 1389 1390 void hif_pci_display_stats(struct hif_softc *hif_ctx) 1391 { 1392 struct hif_pci_softc *pci_ctx = HIF_GET_PCI_SOFTC(hif_ctx); 1393 1394 if (!pci_ctx) { 1395 HIF_ERROR("%s, hif_ctx null", __func__); 1396 return; 1397 } 1398 hif_display_ce_stats(&pci_ctx->ce_sc); 1399 1400 hif_print_pci_stats(pci_ctx); 1401 } 1402 1403 void hif_pci_clear_stats(struct hif_softc *hif_ctx) 1404 { 1405 struct hif_pci_softc *pci_ctx = HIF_GET_PCI_SOFTC(hif_ctx); 1406 1407 if (!pci_ctx) { 1408 HIF_ERROR("%s, hif_ctx null", __func__); 1409 return; 1410 } 1411 hif_clear_ce_stats(&pci_ctx->ce_sc); 1412 } 1413 1414 #define ATH_PCI_PROBE_RETRY_MAX 3 1415 /** 1416 * hif_bus_open(): hif_bus_open 1417 * @scn: scn 1418 * @bus_type: bus type 1419 * 1420 * Return: n/a 1421 */ 1422 QDF_STATUS hif_pci_open(struct hif_softc *hif_ctx, enum qdf_bus_type bus_type) 1423 { 1424 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx); 1425 1426 hif_ctx->bus_type = bus_type; 1427 hif_pm_runtime_open(sc); 1428 1429 qdf_spinlock_create(&sc->irq_lock); 1430 1431 return hif_ce_open(hif_ctx); 1432 } 1433 1434 /** 1435 * hif_wake_target_cpu() - wake the target's cpu 1436 * @scn: hif context 1437 * 1438 * Send an interrupt to the device to wake up the Target CPU 1439 * so it has an opportunity to notice any changed state. 1440 */ 1441 static void hif_wake_target_cpu(struct hif_softc *scn) 1442 { 1443 QDF_STATUS rv; 1444 uint32_t core_ctrl; 1445 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn); 1446 1447 rv = hif_diag_read_access(hif_hdl, 1448 SOC_CORE_BASE_ADDRESS | CORE_CTRL_ADDRESS, 1449 &core_ctrl); 1450 QDF_ASSERT(rv == QDF_STATUS_SUCCESS); 1451 /* A_INUM_FIRMWARE interrupt to Target CPU */ 1452 core_ctrl |= CORE_CTRL_CPU_INTR_MASK; 1453 1454 rv = hif_diag_write_access(hif_hdl, 1455 SOC_CORE_BASE_ADDRESS | CORE_CTRL_ADDRESS, 1456 core_ctrl); 1457 QDF_ASSERT(rv == QDF_STATUS_SUCCESS); 1458 } 1459 1460 /** 1461 * soc_wake_reset() - allow the target to go to sleep 1462 * @scn: hif_softc 1463 * 1464 * Clear the force wake register. This is done by 1465 * hif_sleep_entry and cancel defered timer sleep. 1466 */ 1467 static void soc_wake_reset(struct hif_softc *scn) 1468 { 1469 hif_write32_mb(scn, scn->mem + 1470 PCIE_LOCAL_BASE_ADDRESS + 1471 PCIE_SOC_WAKE_ADDRESS, 1472 PCIE_SOC_WAKE_RESET); 1473 } 1474 1475 /** 1476 * hif_sleep_entry() - gate target sleep 1477 * @arg: hif context 1478 * 1479 * This function is the callback for the sleep timer. 1480 * Check if last force awake critical section was at least 1481 * HIF_MIN_SLEEP_INACTIVITY_TIME_MS time ago. if it was, 1482 * allow the target to go to sleep and cancel the sleep timer. 1483 * otherwise reschedule the sleep timer. 1484 */ 1485 static void hif_sleep_entry(void *arg) 1486 { 1487 struct HIF_CE_state *hif_state = (struct HIF_CE_state *)arg; 1488 struct hif_softc *scn = HIF_GET_SOFTC(hif_state); 1489 uint32_t idle_ms; 1490 1491 if (scn->recovery) 1492 return; 1493 1494 if (hif_is_driver_unloading(scn)) 1495 return; 1496 1497 qdf_spin_lock_irqsave(&hif_state->keep_awake_lock); 1498 if (hif_state->fake_sleep) { 1499 idle_ms = qdf_system_ticks_to_msecs(qdf_system_ticks() 1500 - hif_state->sleep_ticks); 1501 if (!hif_state->verified_awake && 1502 idle_ms >= HIF_MIN_SLEEP_INACTIVITY_TIME_MS) { 1503 if (!qdf_atomic_read(&scn->link_suspended)) { 1504 soc_wake_reset(scn); 1505 hif_state->fake_sleep = false; 1506 } 1507 } else { 1508 qdf_timer_stop(&hif_state->sleep_timer); 1509 qdf_timer_start(&hif_state->sleep_timer, 1510 HIF_SLEEP_INACTIVITY_TIMER_PERIOD_MS); 1511 } 1512 } 1513 qdf_spin_unlock_irqrestore(&hif_state->keep_awake_lock); 1514 } 1515 1516 #define HIF_HIA_MAX_POLL_LOOP 1000000 1517 #define HIF_HIA_POLLING_DELAY_MS 10 1518 1519 #ifdef QCA_HIF_HIA_EXTND 1520 1521 static void hif_set_hia_extnd(struct hif_softc *scn) 1522 { 1523 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn); 1524 struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl); 1525 uint32_t target_type = tgt_info->target_type; 1526 1527 HIF_TRACE("%s: E", __func__); 1528 1529 if ((target_type == TARGET_TYPE_AR900B) || 1530 target_type == TARGET_TYPE_QCA9984 || 1531 target_type == TARGET_TYPE_QCA9888) { 1532 /* CHIP revision is 8-11 bits of the CHIP_ID register 0xec 1533 * in RTC space 1534 */ 1535 tgt_info->target_revision 1536 = CHIP_ID_REVISION_GET(hif_read32_mb(scn, scn->mem 1537 + CHIP_ID_ADDRESS)); 1538 qdf_print("chip_id 0x%x chip_revision 0x%x", 1539 target_type, tgt_info->target_revision); 1540 } 1541 1542 { 1543 uint32_t flag2_value = 0; 1544 uint32_t flag2_targ_addr = 1545 host_interest_item_address(target_type, 1546 offsetof(struct host_interest_s, hi_skip_clock_init)); 1547 1548 if ((ar900b_20_targ_clk != -1) && 1549 (frac != -1) && (intval != -1)) { 1550 hif_diag_read_access(hif_hdl, flag2_targ_addr, 1551 &flag2_value); 1552 qdf_print("\n Setting clk_override"); 1553 flag2_value |= CLOCK_OVERRIDE; 1554 1555 hif_diag_write_access(hif_hdl, flag2_targ_addr, 1556 flag2_value); 1557 qdf_print("\n CLOCK PLL val set %d", flag2_value); 1558 } else { 1559 qdf_print("\n CLOCK PLL skipped"); 1560 } 1561 } 1562 1563 if (target_type == TARGET_TYPE_AR900B 1564 || target_type == TARGET_TYPE_QCA9984 1565 || target_type == TARGET_TYPE_QCA9888) { 1566 1567 /* for AR9980_2.0, 300 mhz clock is used, right now we assume 1568 * this would be supplied through module parameters, 1569 * if not supplied assumed default or same behavior as 1.0. 1570 * Assume 1.0 clock can't be tuned, reset to defaults 1571 */ 1572 1573 qdf_print(KERN_INFO 1574 "%s: setting the target pll frac %x intval %x", 1575 __func__, frac, intval); 1576 1577 /* do not touch frac, and int val, let them be default -1, 1578 * if desired, host can supply these through module params 1579 */ 1580 if (frac != -1 || intval != -1) { 1581 uint32_t flag2_value = 0; 1582 uint32_t flag2_targ_addr; 1583 1584 flag2_targ_addr = 1585 host_interest_item_address(target_type, 1586 offsetof(struct host_interest_s, 1587 hi_clock_info)); 1588 hif_diag_read_access(hif_hdl, 1589 flag2_targ_addr, &flag2_value); 1590 qdf_print("\n ====> FRAC Val %x Address %x", frac, 1591 flag2_value); 1592 hif_diag_write_access(hif_hdl, flag2_value, frac); 1593 qdf_print("\n INT Val %x Address %x", 1594 intval, flag2_value + 4); 1595 hif_diag_write_access(hif_hdl, 1596 flag2_value + 4, intval); 1597 } else { 1598 qdf_print(KERN_INFO 1599 "%s: no frac provided, skipping pre-configuring PLL", 1600 __func__); 1601 } 1602 1603 /* for 2.0 write 300 mhz into hi_desired_cpu_speed_hz */ 1604 if ((target_type == TARGET_TYPE_AR900B) 1605 && (tgt_info->target_revision == AR900B_REV_2) 1606 && ar900b_20_targ_clk != -1) { 1607 uint32_t flag2_value = 0; 1608 uint32_t flag2_targ_addr; 1609 1610 flag2_targ_addr 1611 = host_interest_item_address(target_type, 1612 offsetof(struct host_interest_s, 1613 hi_desired_cpu_speed_hz)); 1614 hif_diag_read_access(hif_hdl, flag2_targ_addr, 1615 &flag2_value); 1616 qdf_print("\n ==> hi_desired_cpu_speed_hz Address %x", 1617 flag2_value); 1618 hif_diag_write_access(hif_hdl, flag2_value, 1619 ar900b_20_targ_clk/*300000000u*/); 1620 } else if (target_type == TARGET_TYPE_QCA9888) { 1621 uint32_t flag2_targ_addr; 1622 1623 if (200000000u != qca9888_20_targ_clk) { 1624 qca9888_20_targ_clk = 300000000u; 1625 /* Setting the target clock speed to 300 mhz */ 1626 } 1627 1628 flag2_targ_addr 1629 = host_interest_item_address(target_type, 1630 offsetof(struct host_interest_s, 1631 hi_desired_cpu_speed_hz)); 1632 hif_diag_write_access(hif_hdl, flag2_targ_addr, 1633 qca9888_20_targ_clk); 1634 } else { 1635 qdf_print("%s: targ_clk is not provided, skipping pre-configuring PLL", 1636 __func__); 1637 } 1638 } else { 1639 if (frac != -1 || intval != -1) { 1640 uint32_t flag2_value = 0; 1641 uint32_t flag2_targ_addr = 1642 host_interest_item_address(target_type, 1643 offsetof(struct host_interest_s, 1644 hi_clock_info)); 1645 hif_diag_read_access(hif_hdl, flag2_targ_addr, 1646 &flag2_value); 1647 qdf_print("\n ====> FRAC Val %x Address %x", frac, 1648 flag2_value); 1649 hif_diag_write_access(hif_hdl, flag2_value, frac); 1650 qdf_print("\n INT Val %x Address %x", intval, 1651 flag2_value + 4); 1652 hif_diag_write_access(hif_hdl, flag2_value + 4, 1653 intval); 1654 } 1655 } 1656 } 1657 1658 #else 1659 1660 static void hif_set_hia_extnd(struct hif_softc *scn) 1661 { 1662 } 1663 1664 #endif 1665 1666 /** 1667 * hif_set_hia() - fill out the host interest area 1668 * @scn: hif context 1669 * 1670 * This is replaced by hif_wlan_enable for integrated targets. 1671 * This fills out the host interest area. The firmware will 1672 * process these memory addresses when it is first brought out 1673 * of reset. 1674 * 1675 * Return: 0 for success. 1676 */ 1677 static int hif_set_hia(struct hif_softc *scn) 1678 { 1679 QDF_STATUS rv; 1680 uint32_t interconnect_targ_addr = 0; 1681 uint32_t pcie_state_targ_addr = 0; 1682 uint32_t pipe_cfg_targ_addr = 0; 1683 uint32_t svc_to_pipe_map = 0; 1684 uint32_t pcie_config_flags = 0; 1685 uint32_t flag2_value = 0; 1686 uint32_t flag2_targ_addr = 0; 1687 #ifdef QCA_WIFI_3_0 1688 uint32_t host_interest_area = 0; 1689 uint8_t i; 1690 #else 1691 uint32_t ealloc_value = 0; 1692 uint32_t ealloc_targ_addr = 0; 1693 uint8_t banks_switched = 1; 1694 uint32_t chip_id; 1695 #endif 1696 uint32_t pipe_cfg_addr; 1697 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn); 1698 struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl); 1699 uint32_t target_type = tgt_info->target_type; 1700 uint32_t target_ce_config_sz, target_service_to_ce_map_sz; 1701 static struct CE_pipe_config *target_ce_config; 1702 struct service_to_pipe *target_service_to_ce_map; 1703 1704 HIF_TRACE("%s: E", __func__); 1705 1706 hif_get_target_ce_config(scn, 1707 &target_ce_config, &target_ce_config_sz, 1708 &target_service_to_ce_map, 1709 &target_service_to_ce_map_sz, 1710 NULL, NULL); 1711 1712 if (ADRASTEA_BU) 1713 return QDF_STATUS_SUCCESS; 1714 1715 #ifdef QCA_WIFI_3_0 1716 i = 0; 1717 while (i < HIF_HIA_MAX_POLL_LOOP) { 1718 host_interest_area = hif_read32_mb(scn, scn->mem + 1719 A_SOC_CORE_SCRATCH_0_ADDRESS); 1720 if ((host_interest_area & 0x01) == 0) { 1721 qdf_mdelay(HIF_HIA_POLLING_DELAY_MS); 1722 host_interest_area = 0; 1723 i++; 1724 if (i > HIF_HIA_MAX_POLL_LOOP && (i % 1000 == 0)) 1725 HIF_ERROR("%s: poll timeout(%d)", __func__, i); 1726 } else { 1727 host_interest_area &= (~0x01); 1728 hif_write32_mb(scn, scn->mem + 0x113014, 0); 1729 break; 1730 } 1731 } 1732 1733 if (i >= HIF_HIA_MAX_POLL_LOOP) { 1734 HIF_ERROR("%s: hia polling timeout", __func__); 1735 return -EIO; 1736 } 1737 1738 if (host_interest_area == 0) { 1739 HIF_ERROR("%s: host_interest_area = 0", __func__); 1740 return -EIO; 1741 } 1742 1743 interconnect_targ_addr = host_interest_area + 1744 offsetof(struct host_interest_area_t, 1745 hi_interconnect_state); 1746 1747 flag2_targ_addr = host_interest_area + 1748 offsetof(struct host_interest_area_t, hi_option_flag2); 1749 1750 #else 1751 interconnect_targ_addr = hif_hia_item_address(target_type, 1752 offsetof(struct host_interest_s, hi_interconnect_state)); 1753 ealloc_targ_addr = hif_hia_item_address(target_type, 1754 offsetof(struct host_interest_s, hi_early_alloc)); 1755 flag2_targ_addr = hif_hia_item_address(target_type, 1756 offsetof(struct host_interest_s, hi_option_flag2)); 1757 #endif 1758 /* Supply Target-side CE configuration */ 1759 rv = hif_diag_read_access(hif_hdl, interconnect_targ_addr, 1760 &pcie_state_targ_addr); 1761 if (rv != QDF_STATUS_SUCCESS) { 1762 HIF_ERROR("%s: interconnect_targ_addr = 0x%0x, ret = %d", 1763 __func__, interconnect_targ_addr, rv); 1764 goto done; 1765 } 1766 if (pcie_state_targ_addr == 0) { 1767 rv = QDF_STATUS_E_FAILURE; 1768 HIF_ERROR("%s: pcie state addr is 0", __func__); 1769 goto done; 1770 } 1771 pipe_cfg_addr = pcie_state_targ_addr + 1772 offsetof(struct pcie_state_s, 1773 pipe_cfg_addr); 1774 rv = hif_diag_read_access(hif_hdl, 1775 pipe_cfg_addr, 1776 &pipe_cfg_targ_addr); 1777 if (rv != QDF_STATUS_SUCCESS) { 1778 HIF_ERROR("%s: pipe_cfg_addr = 0x%0x, ret = %d", 1779 __func__, pipe_cfg_addr, rv); 1780 goto done; 1781 } 1782 if (pipe_cfg_targ_addr == 0) { 1783 rv = QDF_STATUS_E_FAILURE; 1784 HIF_ERROR("%s: pipe cfg addr is 0", __func__); 1785 goto done; 1786 } 1787 1788 rv = hif_diag_write_mem(hif_hdl, pipe_cfg_targ_addr, 1789 (uint8_t *) target_ce_config, 1790 target_ce_config_sz); 1791 1792 if (rv != QDF_STATUS_SUCCESS) { 1793 HIF_ERROR("%s: write pipe cfg (%d)", __func__, rv); 1794 goto done; 1795 } 1796 1797 rv = hif_diag_read_access(hif_hdl, 1798 pcie_state_targ_addr + 1799 offsetof(struct pcie_state_s, 1800 svc_to_pipe_map), 1801 &svc_to_pipe_map); 1802 if (rv != QDF_STATUS_SUCCESS) { 1803 HIF_ERROR("%s: get svc/pipe map (%d)", __func__, rv); 1804 goto done; 1805 } 1806 if (svc_to_pipe_map == 0) { 1807 rv = QDF_STATUS_E_FAILURE; 1808 HIF_ERROR("%s: svc_to_pipe map is 0", __func__); 1809 goto done; 1810 } 1811 1812 rv = hif_diag_write_mem(hif_hdl, 1813 svc_to_pipe_map, 1814 (uint8_t *) target_service_to_ce_map, 1815 target_service_to_ce_map_sz); 1816 if (rv != QDF_STATUS_SUCCESS) { 1817 HIF_ERROR("%s: write svc/pipe map (%d)", __func__, rv); 1818 goto done; 1819 } 1820 1821 rv = hif_diag_read_access(hif_hdl, 1822 pcie_state_targ_addr + 1823 offsetof(struct pcie_state_s, 1824 config_flags), 1825 &pcie_config_flags); 1826 if (rv != QDF_STATUS_SUCCESS) { 1827 HIF_ERROR("%s: get pcie config_flags (%d)", __func__, rv); 1828 goto done; 1829 } 1830 #if (CONFIG_PCIE_ENABLE_L1_CLOCK_GATE) 1831 pcie_config_flags |= PCIE_CONFIG_FLAG_ENABLE_L1; 1832 #else 1833 pcie_config_flags &= ~PCIE_CONFIG_FLAG_ENABLE_L1; 1834 #endif /* CONFIG_PCIE_ENABLE_L1_CLOCK_GATE */ 1835 pcie_config_flags |= PCIE_CONFIG_FLAG_CLK_SWITCH_WAIT; 1836 #if (CONFIG_PCIE_ENABLE_AXI_CLK_GATE) 1837 pcie_config_flags |= PCIE_CONFIG_FLAG_AXI_CLK_GATE; 1838 #endif 1839 rv = hif_diag_write_mem(hif_hdl, 1840 pcie_state_targ_addr + 1841 offsetof(struct pcie_state_s, 1842 config_flags), 1843 (uint8_t *) &pcie_config_flags, 1844 sizeof(pcie_config_flags)); 1845 if (rv != QDF_STATUS_SUCCESS) { 1846 HIF_ERROR("%s: write pcie config_flags (%d)", __func__, rv); 1847 goto done; 1848 } 1849 1850 #ifndef QCA_WIFI_3_0 1851 /* configure early allocation */ 1852 ealloc_targ_addr = hif_hia_item_address(target_type, 1853 offsetof( 1854 struct host_interest_s, 1855 hi_early_alloc)); 1856 1857 rv = hif_diag_read_access(hif_hdl, ealloc_targ_addr, 1858 &ealloc_value); 1859 if (rv != QDF_STATUS_SUCCESS) { 1860 HIF_ERROR("%s: get early alloc val (%d)", __func__, rv); 1861 goto done; 1862 } 1863 1864 /* 1 bank is switched to IRAM, except ROME 1.0 */ 1865 ealloc_value |= 1866 ((HI_EARLY_ALLOC_MAGIC << HI_EARLY_ALLOC_MAGIC_SHIFT) & 1867 HI_EARLY_ALLOC_MAGIC_MASK); 1868 1869 rv = hif_diag_read_access(hif_hdl, 1870 CHIP_ID_ADDRESS | 1871 RTC_SOC_BASE_ADDRESS, &chip_id); 1872 if (rv != QDF_STATUS_SUCCESS) { 1873 HIF_ERROR("%s: get chip id val (%d)", __func__, rv); 1874 goto done; 1875 } 1876 if (CHIP_ID_VERSION_GET(chip_id) == 0xD) { 1877 tgt_info->target_revision = CHIP_ID_REVISION_GET(chip_id); 1878 switch (CHIP_ID_REVISION_GET(chip_id)) { 1879 case 0x2: /* ROME 1.3 */ 1880 /* 2 banks are switched to IRAM */ 1881 banks_switched = 2; 1882 break; 1883 case 0x4: /* ROME 2.1 */ 1884 case 0x5: /* ROME 2.2 */ 1885 banks_switched = 6; 1886 break; 1887 case 0x8: /* ROME 3.0 */ 1888 case 0x9: /* ROME 3.1 */ 1889 case 0xA: /* ROME 3.2 */ 1890 banks_switched = 9; 1891 break; 1892 case 0x0: /* ROME 1.0 */ 1893 case 0x1: /* ROME 1.1 */ 1894 default: 1895 /* 3 banks are switched to IRAM */ 1896 banks_switched = 3; 1897 break; 1898 } 1899 } 1900 1901 ealloc_value |= 1902 ((banks_switched << HI_EARLY_ALLOC_IRAM_BANKS_SHIFT) 1903 & HI_EARLY_ALLOC_IRAM_BANKS_MASK); 1904 1905 rv = hif_diag_write_access(hif_hdl, 1906 ealloc_targ_addr, 1907 ealloc_value); 1908 if (rv != QDF_STATUS_SUCCESS) { 1909 HIF_ERROR("%s: set early alloc val (%d)", __func__, rv); 1910 goto done; 1911 } 1912 #endif 1913 if ((target_type == TARGET_TYPE_AR900B) 1914 || (target_type == TARGET_TYPE_QCA9984) 1915 || (target_type == TARGET_TYPE_QCA9888) 1916 || (target_type == TARGET_TYPE_AR9888)) { 1917 hif_set_hia_extnd(scn); 1918 } 1919 1920 /* Tell Target to proceed with initialization */ 1921 flag2_targ_addr = hif_hia_item_address(target_type, 1922 offsetof( 1923 struct host_interest_s, 1924 hi_option_flag2)); 1925 1926 rv = hif_diag_read_access(hif_hdl, flag2_targ_addr, 1927 &flag2_value); 1928 if (rv != QDF_STATUS_SUCCESS) { 1929 HIF_ERROR("%s: get option val (%d)", __func__, rv); 1930 goto done; 1931 } 1932 1933 flag2_value |= HI_OPTION_EARLY_CFG_DONE; 1934 rv = hif_diag_write_access(hif_hdl, flag2_targ_addr, 1935 flag2_value); 1936 if (rv != QDF_STATUS_SUCCESS) { 1937 HIF_ERROR("%s: set option val (%d)", __func__, rv); 1938 goto done; 1939 } 1940 1941 hif_wake_target_cpu(scn); 1942 1943 done: 1944 1945 return rv; 1946 } 1947 1948 /** 1949 * hif_bus_configure() - configure the pcie bus 1950 * @hif_sc: pointer to the hif context. 1951 * 1952 * return: 0 for success. nonzero for failure. 1953 */ 1954 int hif_pci_bus_configure(struct hif_softc *hif_sc) 1955 { 1956 int status = 0; 1957 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc); 1958 struct hif_opaque_softc *hif_osc = GET_HIF_OPAQUE_HDL(hif_sc); 1959 1960 hif_ce_prepare_config(hif_sc); 1961 1962 /* initialize sleep state adjust variables */ 1963 hif_state->sleep_timer_init = true; 1964 hif_state->keep_awake_count = 0; 1965 hif_state->fake_sleep = false; 1966 hif_state->sleep_ticks = 0; 1967 1968 qdf_timer_init(NULL, &hif_state->sleep_timer, 1969 hif_sleep_entry, (void *)hif_state, 1970 QDF_TIMER_TYPE_WAKE_APPS); 1971 hif_state->sleep_timer_init = true; 1972 1973 status = hif_wlan_enable(hif_sc); 1974 if (status) { 1975 HIF_ERROR("%s: hif_wlan_enable error = %d", 1976 __func__, status); 1977 goto timer_free; 1978 } 1979 1980 A_TARGET_ACCESS_LIKELY(hif_sc); 1981 1982 if ((CONFIG_ATH_PCIE_MAX_PERF || 1983 CONFIG_ATH_PCIE_AWAKE_WHILE_DRIVER_LOAD) && 1984 !ce_srng_based(hif_sc)) { 1985 /* 1986 * prevent sleep for PCIE_AWAKE_WHILE_DRIVER_LOAD feature 1987 * prevent sleep when we want to keep firmware always awake 1988 * note: when we want to keep firmware always awake, 1989 * hif_target_sleep_state_adjust will point to a dummy 1990 * function, and hif_pci_target_sleep_state_adjust must 1991 * be called instead. 1992 * note: bus type check is here because AHB bus is reusing 1993 * hif_pci_bus_configure code. 1994 */ 1995 if (hif_sc->bus_type == QDF_BUS_TYPE_PCI) { 1996 if (hif_pci_target_sleep_state_adjust(hif_sc, 1997 false, true) < 0) { 1998 status = -EACCES; 1999 goto disable_wlan; 2000 } 2001 } 2002 } 2003 2004 /* todo: consider replacing this with an srng field */ 2005 if (((hif_sc->target_info.target_type == TARGET_TYPE_QCA8074) || 2006 (hif_sc->target_info.target_type == TARGET_TYPE_QCA8074V2) || 2007 (hif_sc->target_info.target_type == TARGET_TYPE_QCA6018)) && 2008 (hif_sc->bus_type == QDF_BUS_TYPE_AHB)) { 2009 hif_sc->per_ce_irq = true; 2010 } 2011 2012 status = hif_config_ce(hif_sc); 2013 if (status) 2014 goto disable_wlan; 2015 2016 /* QCA_WIFI_QCA8074_VP:Should not be executed on 8074 VP platform */ 2017 if (hif_needs_bmi(hif_osc)) { 2018 status = hif_set_hia(hif_sc); 2019 if (status) 2020 goto unconfig_ce; 2021 2022 HIF_INFO_MED("%s: hif_set_hia done", __func__); 2023 2024 } 2025 2026 if (((hif_sc->target_info.target_type == TARGET_TYPE_QCA8074) || 2027 (hif_sc->target_info.target_type == TARGET_TYPE_QCA8074V2) || 2028 (hif_sc->target_info.target_type == TARGET_TYPE_QCA6018)) && 2029 (hif_sc->bus_type == QDF_BUS_TYPE_PCI)) 2030 HIF_INFO_MED("%s: Skip irq config for PCI based 8074 target", 2031 __func__); 2032 else { 2033 status = hif_configure_irq(hif_sc); 2034 if (status < 0) 2035 goto unconfig_ce; 2036 } 2037 2038 A_TARGET_ACCESS_UNLIKELY(hif_sc); 2039 2040 return status; 2041 2042 unconfig_ce: 2043 hif_unconfig_ce(hif_sc); 2044 disable_wlan: 2045 A_TARGET_ACCESS_UNLIKELY(hif_sc); 2046 hif_wlan_disable(hif_sc); 2047 2048 timer_free: 2049 qdf_timer_stop(&hif_state->sleep_timer); 2050 qdf_timer_free(&hif_state->sleep_timer); 2051 hif_state->sleep_timer_init = false; 2052 2053 HIF_ERROR("%s: failed, status = %d", __func__, status); 2054 return status; 2055 } 2056 2057 /** 2058 * hif_bus_close(): hif_bus_close 2059 * 2060 * Return: n/a 2061 */ 2062 void hif_pci_close(struct hif_softc *hif_sc) 2063 { 2064 struct hif_pci_softc *hif_pci_sc = HIF_GET_PCI_SOFTC(hif_sc); 2065 2066 hif_pm_runtime_close(hif_pci_sc); 2067 hif_ce_close(hif_sc); 2068 } 2069 2070 #define BAR_NUM 0 2071 2072 static int hif_enable_pci_nopld(struct hif_pci_softc *sc, 2073 struct pci_dev *pdev, 2074 const struct pci_device_id *id) 2075 { 2076 void __iomem *mem; 2077 int ret = 0; 2078 uint16_t device_id = 0; 2079 struct hif_softc *ol_sc = HIF_GET_SOFTC(sc); 2080 2081 pci_read_config_word(pdev, PCI_DEVICE_ID, &device_id); 2082 if (device_id != id->device) { 2083 HIF_ERROR( 2084 "%s: dev id mismatch, config id = 0x%x, probing id = 0x%x", 2085 __func__, device_id, id->device); 2086 /* pci link is down, so returing with error code */ 2087 return -EIO; 2088 } 2089 2090 /* FIXME: temp. commenting out assign_resource 2091 * call for dev_attach to work on 2.6.38 kernel 2092 */ 2093 #if (!defined(__LINUX_ARM_ARCH__)) 2094 if (pci_assign_resource(pdev, BAR_NUM)) { 2095 HIF_ERROR("%s: pci_assign_resource error", __func__); 2096 return -EIO; 2097 } 2098 #endif 2099 if (pci_enable_device(pdev)) { 2100 HIF_ERROR("%s: pci_enable_device error", 2101 __func__); 2102 return -EIO; 2103 } 2104 2105 /* Request MMIO resources */ 2106 ret = pci_request_region(pdev, BAR_NUM, "ath"); 2107 if (ret) { 2108 HIF_ERROR("%s: PCI MMIO reservation error", __func__); 2109 ret = -EIO; 2110 goto err_region; 2111 } 2112 2113 #ifdef CONFIG_ARM_LPAE 2114 /* if CONFIG_ARM_LPAE is enabled, we have to set 64 bits mask 2115 * for 32 bits device also. 2116 */ 2117 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); 2118 if (ret) { 2119 HIF_ERROR("%s: Cannot enable 64-bit pci DMA", __func__); 2120 goto err_dma; 2121 } 2122 ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); 2123 if (ret) { 2124 HIF_ERROR("%s: Cannot enable 64-bit DMA", __func__); 2125 goto err_dma; 2126 } 2127 #else 2128 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 2129 if (ret) { 2130 HIF_ERROR("%s: Cannot enable 32-bit pci DMA", __func__); 2131 goto err_dma; 2132 } 2133 ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 2134 if (ret) { 2135 HIF_ERROR("%s: Cannot enable 32-bit consistent DMA!", 2136 __func__); 2137 goto err_dma; 2138 } 2139 #endif 2140 2141 PCI_CFG_TO_DISABLE_L1SS_STATES(pdev, 0x188); 2142 2143 /* Set bus master bit in PCI_COMMAND to enable DMA */ 2144 pci_set_master(pdev); 2145 2146 /* Arrange for access to Target SoC registers. */ 2147 mem = pci_iomap(pdev, BAR_NUM, 0); 2148 if (!mem) { 2149 HIF_ERROR("%s: PCI iomap error", __func__); 2150 ret = -EIO; 2151 goto err_iomap; 2152 } 2153 2154 HIF_INFO("*****BAR is %pK\n", (void *)mem); 2155 2156 sc->mem = mem; 2157 2158 /* Hawkeye emulation specific change */ 2159 if ((device_id == RUMIM2M_DEVICE_ID_NODE0) || 2160 (device_id == RUMIM2M_DEVICE_ID_NODE1) || 2161 (device_id == RUMIM2M_DEVICE_ID_NODE2) || 2162 (device_id == RUMIM2M_DEVICE_ID_NODE3) || 2163 (device_id == RUMIM2M_DEVICE_ID_NODE4) || 2164 (device_id == RUMIM2M_DEVICE_ID_NODE5)) { 2165 mem = mem + 0x0c000000; 2166 sc->mem = mem; 2167 HIF_INFO("%s: Changing PCI mem base to %pK\n", 2168 __func__, sc->mem); 2169 } 2170 2171 sc->mem_len = pci_resource_len(pdev, BAR_NUM); 2172 ol_sc->mem = mem; 2173 ol_sc->mem_pa = pci_resource_start(pdev, BAR_NUM); 2174 sc->pci_enabled = true; 2175 return ret; 2176 2177 err_iomap: 2178 pci_clear_master(pdev); 2179 err_dma: 2180 pci_release_region(pdev, BAR_NUM); 2181 err_region: 2182 pci_disable_device(pdev); 2183 return ret; 2184 } 2185 2186 static int hif_enable_pci_pld(struct hif_pci_softc *sc, 2187 struct pci_dev *pdev, 2188 const struct pci_device_id *id) 2189 { 2190 PCI_CFG_TO_DISABLE_L1SS_STATES(pdev, 0x188); 2191 sc->pci_enabled = true; 2192 return 0; 2193 } 2194 2195 2196 static void hif_pci_deinit_nopld(struct hif_pci_softc *sc) 2197 { 2198 pci_disable_msi(sc->pdev); 2199 pci_iounmap(sc->pdev, sc->mem); 2200 pci_clear_master(sc->pdev); 2201 pci_release_region(sc->pdev, BAR_NUM); 2202 pci_disable_device(sc->pdev); 2203 } 2204 2205 static void hif_pci_deinit_pld(struct hif_pci_softc *sc) {} 2206 2207 static void hif_disable_pci(struct hif_pci_softc *sc) 2208 { 2209 struct hif_softc *ol_sc = HIF_GET_SOFTC(sc); 2210 2211 if (!ol_sc) { 2212 HIF_ERROR("%s: ol_sc = NULL", __func__); 2213 return; 2214 } 2215 hif_pci_device_reset(sc); 2216 sc->hif_pci_deinit(sc); 2217 2218 sc->mem = NULL; 2219 ol_sc->mem = NULL; 2220 } 2221 2222 static int hif_pci_probe_tgt_wakeup(struct hif_pci_softc *sc) 2223 { 2224 int ret = 0; 2225 int targ_awake_limit = 500; 2226 #ifndef QCA_WIFI_3_0 2227 uint32_t fw_indicator; 2228 #endif 2229 struct hif_softc *scn = HIF_GET_SOFTC(sc); 2230 2231 /* 2232 * Verify that the Target was started cleanly.* 2233 * The case where this is most likely is with an AUX-powered 2234 * Target and a Host in WoW mode. If the Host crashes, 2235 * loses power, or is restarted (without unloading the driver) 2236 * then the Target is left (aux) powered and running. On a 2237 * subsequent driver load, the Target is in an unexpected state. 2238 * We try to catch that here in order to reset the Target and 2239 * retry the probe. 2240 */ 2241 hif_write32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS + 2242 PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_V_MASK); 2243 while (!hif_targ_is_awake(scn, sc->mem)) { 2244 if (0 == targ_awake_limit) { 2245 HIF_ERROR("%s: target awake timeout", __func__); 2246 ret = -EAGAIN; 2247 goto end; 2248 } 2249 qdf_mdelay(1); 2250 targ_awake_limit--; 2251 } 2252 2253 #if PCIE_BAR0_READY_CHECKING 2254 { 2255 int wait_limit = 200; 2256 /* Synchronization point: wait the BAR0 is configured */ 2257 while (wait_limit-- && 2258 !(hif_read32_mb(sc, c->mem + 2259 PCIE_LOCAL_BASE_ADDRESS + 2260 PCIE_SOC_RDY_STATUS_ADDRESS) 2261 & PCIE_SOC_RDY_STATUS_BAR_MASK)) { 2262 qdf_mdelay(10); 2263 } 2264 if (wait_limit < 0) { 2265 /* AR6320v1 doesn't support checking of BAR0 2266 * configuration, takes one sec to wait BAR0 ready 2267 */ 2268 HIF_INFO_MED("%s: AR6320v1 waits two sec for BAR0", 2269 __func__); 2270 } 2271 } 2272 #endif 2273 2274 #ifndef QCA_WIFI_3_0 2275 fw_indicator = hif_read32_mb(sc, sc->mem + FW_INDICATOR_ADDRESS); 2276 hif_write32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS + 2277 PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_RESET); 2278 2279 if (fw_indicator & FW_IND_INITIALIZED) { 2280 HIF_ERROR("%s: Target is in an unknown state. EAGAIN", 2281 __func__); 2282 ret = -EAGAIN; 2283 goto end; 2284 } 2285 #endif 2286 2287 end: 2288 return ret; 2289 } 2290 2291 static int hif_pci_configure_legacy_irq(struct hif_pci_softc *sc) 2292 { 2293 int ret = 0; 2294 struct hif_softc *scn = HIF_GET_SOFTC(sc); 2295 uint32_t target_type = scn->target_info.target_type; 2296 2297 HIF_TRACE("%s: E", __func__); 2298 2299 /* do notn support MSI or MSI IRQ failed */ 2300 tasklet_init(&sc->intr_tq, wlan_tasklet, (unsigned long)sc); 2301 ret = request_irq(sc->pdev->irq, 2302 hif_pci_legacy_ce_interrupt_handler, IRQF_SHARED, 2303 "wlan_pci", sc); 2304 if (ret) { 2305 HIF_ERROR("%s: request_irq failed, ret = %d", __func__, ret); 2306 goto end; 2307 } 2308 scn->wake_irq = sc->pdev->irq; 2309 /* Use sc->irq instead of sc->pdev-irq 2310 * platform_device pdev doesn't have an irq field 2311 */ 2312 sc->irq = sc->pdev->irq; 2313 /* Use Legacy PCI Interrupts */ 2314 hif_write32_mb(sc, sc->mem + (SOC_CORE_BASE_ADDRESS | 2315 PCIE_INTR_ENABLE_ADDRESS), 2316 HOST_GROUP0_MASK); 2317 hif_read32_mb(sc, sc->mem + (SOC_CORE_BASE_ADDRESS | 2318 PCIE_INTR_ENABLE_ADDRESS)); 2319 hif_write32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS + 2320 PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_RESET); 2321 2322 if ((target_type == TARGET_TYPE_IPQ4019) || 2323 (target_type == TARGET_TYPE_AR900B) || 2324 (target_type == TARGET_TYPE_QCA9984) || 2325 (target_type == TARGET_TYPE_AR9888) || 2326 (target_type == TARGET_TYPE_QCA9888) || 2327 (target_type == TARGET_TYPE_AR6320V1) || 2328 (target_type == TARGET_TYPE_AR6320V2) || 2329 (target_type == TARGET_TYPE_AR6320V3)) { 2330 hif_write32_mb(scn, scn->mem + PCIE_LOCAL_BASE_ADDRESS + 2331 PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_V_MASK); 2332 } 2333 end: 2334 QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_ERROR, 2335 "%s: X, ret = %d", __func__, ret); 2336 return ret; 2337 } 2338 2339 static int hif_ce_srng_msi_free_irq(struct hif_softc *scn) 2340 { 2341 int ret; 2342 int ce_id, irq; 2343 uint32_t msi_data_start; 2344 uint32_t msi_data_count; 2345 uint32_t msi_irq_start; 2346 struct HIF_CE_state *ce_sc = HIF_GET_CE_STATE(scn); 2347 struct CE_attr *host_ce_conf = ce_sc->host_ce_config; 2348 2349 ret = pld_get_user_msi_assignment(scn->qdf_dev->dev, "CE", 2350 &msi_data_count, &msi_data_start, 2351 &msi_irq_start); 2352 if (ret) 2353 return ret; 2354 2355 /* needs to match the ce_id -> irq data mapping 2356 * used in the srng parameter configuration 2357 */ 2358 for (ce_id = 0; ce_id < scn->ce_count; ce_id++) { 2359 unsigned int msi_data; 2360 2361 if (host_ce_conf[ce_id].flags & CE_ATTR_DISABLE_INTR) 2362 continue; 2363 2364 if (!ce_sc->tasklets[ce_id].inited) 2365 continue; 2366 2367 msi_data = (ce_id % msi_data_count) + msi_irq_start; 2368 irq = pld_get_msi_irq(scn->qdf_dev->dev, msi_data); 2369 2370 hif_debug("%s: (ce_id %d, msi_data %d, irq %d)", __func__, 2371 ce_id, msi_data, irq); 2372 2373 free_irq(irq, &ce_sc->tasklets[ce_id]); 2374 } 2375 2376 return ret; 2377 } 2378 2379 static void hif_pci_deconfigure_grp_irq(struct hif_softc *scn) 2380 { 2381 int i, j, irq; 2382 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 2383 struct hif_exec_context *hif_ext_group; 2384 2385 for (i = 0; i < hif_state->hif_num_extgroup; i++) { 2386 hif_ext_group = hif_state->hif_ext_group[i]; 2387 if (hif_ext_group->irq_requested) { 2388 hif_ext_group->irq_requested = false; 2389 for (j = 0; j < hif_ext_group->numirq; j++) { 2390 irq = hif_ext_group->os_irq[j]; 2391 free_irq(irq, hif_ext_group); 2392 } 2393 hif_ext_group->numirq = 0; 2394 } 2395 } 2396 } 2397 2398 /** 2399 * hif_nointrs(): disable IRQ 2400 * 2401 * This function stops interrupt(s) 2402 * 2403 * @scn: struct hif_softc 2404 * 2405 * Return: none 2406 */ 2407 void hif_pci_nointrs(struct hif_softc *scn) 2408 { 2409 int i, ret; 2410 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn); 2411 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 2412 2413 ce_unregister_irq(hif_state, CE_ALL_BITMAP); 2414 2415 if (scn->request_irq_done == false) 2416 return; 2417 2418 hif_pci_deconfigure_grp_irq(scn); 2419 2420 ret = hif_ce_srng_msi_free_irq(scn); 2421 if (ret != -EINVAL) { 2422 /* ce irqs freed in hif_ce_srng_msi_free_irq */ 2423 2424 if (scn->wake_irq) 2425 free_irq(scn->wake_irq, scn); 2426 scn->wake_irq = 0; 2427 } else if (sc->num_msi_intrs > 0) { 2428 /* MSI interrupt(s) */ 2429 for (i = 0; i < sc->num_msi_intrs; i++) 2430 free_irq(sc->irq + i, sc); 2431 sc->num_msi_intrs = 0; 2432 } else { 2433 /* Legacy PCI line interrupt 2434 * Use sc->irq instead of sc->pdev-irq 2435 * platform_device pdev doesn't have an irq field 2436 */ 2437 free_irq(sc->irq, sc); 2438 } 2439 scn->request_irq_done = false; 2440 } 2441 2442 /** 2443 * hif_disable_bus(): hif_disable_bus 2444 * 2445 * This function disables the bus 2446 * 2447 * @bdev: bus dev 2448 * 2449 * Return: none 2450 */ 2451 void hif_pci_disable_bus(struct hif_softc *scn) 2452 { 2453 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn); 2454 struct pci_dev *pdev; 2455 void __iomem *mem; 2456 struct hif_target_info *tgt_info = &scn->target_info; 2457 2458 /* Attach did not succeed, all resources have been 2459 * freed in error handler 2460 */ 2461 if (!sc) 2462 return; 2463 2464 pdev = sc->pdev; 2465 if (ADRASTEA_BU) { 2466 hif_vote_link_down(GET_HIF_OPAQUE_HDL(scn)); 2467 2468 hif_write32_mb(sc, sc->mem + PCIE_INTR_ENABLE_ADDRESS, 0); 2469 hif_write32_mb(sc, sc->mem + PCIE_INTR_CLR_ADDRESS, 2470 HOST_GROUP0_MASK); 2471 } 2472 2473 #if defined(CPU_WARM_RESET_WAR) 2474 /* Currently CPU warm reset sequence is tested only for AR9888_REV2 2475 * Need to enable for AR9888_REV1 once CPU warm reset sequence is 2476 * verified for AR9888_REV1 2477 */ 2478 if ((tgt_info->target_version == AR9888_REV2_VERSION) || 2479 (tgt_info->target_version == AR9887_REV1_VERSION)) 2480 hif_pci_device_warm_reset(sc); 2481 else 2482 hif_pci_device_reset(sc); 2483 #else 2484 hif_pci_device_reset(sc); 2485 #endif 2486 mem = (void __iomem *)sc->mem; 2487 if (mem) { 2488 hif_dump_pipe_debug_count(scn); 2489 if (scn->athdiag_procfs_inited) { 2490 athdiag_procfs_remove(); 2491 scn->athdiag_procfs_inited = false; 2492 } 2493 sc->hif_pci_deinit(sc); 2494 scn->mem = NULL; 2495 } 2496 HIF_INFO("%s: X", __func__); 2497 } 2498 2499 #define OL_ATH_PCI_PM_CONTROL 0x44 2500 2501 #ifdef FEATURE_RUNTIME_PM 2502 /** 2503 * hif_runtime_prevent_linkdown() - prevent or allow a runtime pm from occurring 2504 * @scn: hif context 2505 * @flag: prevent linkdown if true otherwise allow 2506 * 2507 * this api should only be called as part of bus prevent linkdown 2508 */ 2509 static void hif_runtime_prevent_linkdown(struct hif_softc *scn, bool flag) 2510 { 2511 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn); 2512 2513 if (flag) 2514 qdf_runtime_pm_prevent_suspend(&sc->prevent_linkdown_lock); 2515 else 2516 qdf_runtime_pm_allow_suspend(&sc->prevent_linkdown_lock); 2517 } 2518 #else 2519 static void hif_runtime_prevent_linkdown(struct hif_softc *scn, bool flag) 2520 { 2521 } 2522 #endif 2523 2524 #if defined(CONFIG_PCI_MSM) 2525 /** 2526 * hif_bus_prevent_linkdown(): allow or permit linkdown 2527 * @flag: true prevents linkdown, false allows 2528 * 2529 * Calls into the platform driver to vote against taking down the 2530 * pcie link. 2531 * 2532 * Return: n/a 2533 */ 2534 void hif_pci_prevent_linkdown(struct hif_softc *scn, bool flag) 2535 { 2536 int errno; 2537 2538 HIF_INFO("wlan: %s pcie power collapse", flag ? "disable" : "enable"); 2539 hif_runtime_prevent_linkdown(scn, flag); 2540 2541 errno = pld_wlan_pm_control(scn->qdf_dev->dev, flag); 2542 if (errno) 2543 HIF_ERROR("%s: Failed pld_wlan_pm_control; errno %d", 2544 __func__, errno); 2545 } 2546 #else 2547 void hif_pci_prevent_linkdown(struct hif_softc *scn, bool flag) 2548 { 2549 HIF_INFO("wlan: %s pcie power collapse", (flag ? "disable" : "enable")); 2550 hif_runtime_prevent_linkdown(scn, flag); 2551 } 2552 #endif 2553 2554 /** 2555 * hif_pci_bus_suspend(): prepare hif for suspend 2556 * 2557 * Return: Errno 2558 */ 2559 int hif_pci_bus_suspend(struct hif_softc *scn) 2560 { 2561 hif_apps_irqs_disable(GET_HIF_OPAQUE_HDL(scn)); 2562 2563 if (hif_drain_tasklets(scn)) { 2564 hif_apps_irqs_enable(GET_HIF_OPAQUE_HDL(scn)); 2565 return -EBUSY; 2566 } 2567 2568 /* Stop the HIF Sleep Timer */ 2569 hif_cancel_deferred_target_sleep(scn); 2570 2571 return 0; 2572 } 2573 2574 /** 2575 * __hif_check_link_status() - API to check if PCIe link is active/not 2576 * @scn: HIF Context 2577 * 2578 * API reads the PCIe config space to verify if PCIe link training is 2579 * successful or not. 2580 * 2581 * Return: Success/Failure 2582 */ 2583 static int __hif_check_link_status(struct hif_softc *scn) 2584 { 2585 uint16_t dev_id = 0; 2586 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn); 2587 struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn); 2588 2589 if (!sc) { 2590 HIF_ERROR("%s: HIF Bus Context is Invalid", __func__); 2591 return -EINVAL; 2592 } 2593 2594 pci_read_config_word(sc->pdev, PCI_DEVICE_ID, &dev_id); 2595 2596 if (dev_id == sc->devid) 2597 return 0; 2598 2599 HIF_ERROR("%s: Invalid PCIe Config Space; PCIe link down dev_id:0x%04x", 2600 __func__, dev_id); 2601 2602 scn->recovery = true; 2603 2604 if (cbk && cbk->set_recovery_in_progress) 2605 cbk->set_recovery_in_progress(cbk->context, true); 2606 else 2607 HIF_ERROR("%s: Driver Global Recovery is not set", __func__); 2608 2609 pld_is_pci_link_down(sc->dev); 2610 return -EACCES; 2611 } 2612 2613 /** 2614 * hif_pci_bus_resume(): prepare hif for resume 2615 * 2616 * Return: Errno 2617 */ 2618 int hif_pci_bus_resume(struct hif_softc *scn) 2619 { 2620 int errno; 2621 2622 errno = __hif_check_link_status(scn); 2623 if (errno) 2624 return errno; 2625 2626 hif_apps_irqs_enable(GET_HIF_OPAQUE_HDL(scn)); 2627 2628 return 0; 2629 } 2630 2631 /** 2632 * hif_pci_bus_suspend_noirq() - ensure there are no pending transactions 2633 * @scn: hif context 2634 * 2635 * Ensure that if we received the wakeup message before the irq 2636 * was disabled that the message is pocessed before suspending. 2637 * 2638 * Return: -EBUSY if we fail to flush the tasklets. 2639 */ 2640 int hif_pci_bus_suspend_noirq(struct hif_softc *scn) 2641 { 2642 if (hif_can_suspend_link(GET_HIF_OPAQUE_HDL(scn))) 2643 qdf_atomic_set(&scn->link_suspended, 1); 2644 2645 hif_apps_wake_irq_enable(GET_HIF_OPAQUE_HDL(scn)); 2646 2647 return 0; 2648 } 2649 2650 /** 2651 * hif_pci_bus_resume_noirq() - ensure there are no pending transactions 2652 * @scn: hif context 2653 * 2654 * Ensure that if we received the wakeup message before the irq 2655 * was disabled that the message is pocessed before suspending. 2656 * 2657 * Return: -EBUSY if we fail to flush the tasklets. 2658 */ 2659 int hif_pci_bus_resume_noirq(struct hif_softc *scn) 2660 { 2661 hif_apps_wake_irq_disable(GET_HIF_OPAQUE_HDL(scn)); 2662 2663 if (hif_can_suspend_link(GET_HIF_OPAQUE_HDL(scn))) 2664 qdf_atomic_set(&scn->link_suspended, 0); 2665 2666 return 0; 2667 } 2668 2669 #ifdef FEATURE_RUNTIME_PM 2670 /** 2671 * __hif_runtime_pm_set_state(): utility function 2672 * @state: state to set 2673 * 2674 * indexes into the runtime pm state and sets it. 2675 */ 2676 static void __hif_runtime_pm_set_state(struct hif_softc *scn, 2677 enum hif_pm_runtime_state state) 2678 { 2679 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn); 2680 2681 if (!sc) { 2682 HIF_ERROR("%s: HIF_CTX not initialized", 2683 __func__); 2684 return; 2685 } 2686 2687 qdf_atomic_set(&sc->pm_state, state); 2688 } 2689 2690 /** 2691 * hif_runtime_pm_set_state_on(): adjust runtime pm state 2692 * 2693 * Notify hif that a the runtime pm state should be on 2694 */ 2695 static void hif_runtime_pm_set_state_on(struct hif_softc *scn) 2696 { 2697 __hif_runtime_pm_set_state(scn, HIF_PM_RUNTIME_STATE_ON); 2698 } 2699 2700 /** 2701 * hif_runtime_pm_set_state_resuming(): adjust runtime pm state 2702 * 2703 * Notify hif that a runtime pm resuming has started 2704 */ 2705 static void hif_runtime_pm_set_state_resuming(struct hif_softc *scn) 2706 { 2707 __hif_runtime_pm_set_state(scn, HIF_PM_RUNTIME_STATE_RESUMING); 2708 } 2709 2710 /** 2711 * hif_runtime_pm_set_state_suspending(): adjust runtime pm state 2712 * 2713 * Notify hif that a runtime pm suspend has started 2714 */ 2715 static void hif_runtime_pm_set_state_suspending(struct hif_softc *scn) 2716 { 2717 __hif_runtime_pm_set_state(scn, HIF_PM_RUNTIME_STATE_SUSPENDING); 2718 } 2719 2720 /** 2721 * hif_runtime_pm_set_state_suspended(): adjust runtime pm state 2722 * 2723 * Notify hif that a runtime suspend attempt has been completed successfully 2724 */ 2725 static void hif_runtime_pm_set_state_suspended(struct hif_softc *scn) 2726 { 2727 __hif_runtime_pm_set_state(scn, HIF_PM_RUNTIME_STATE_SUSPENDED); 2728 } 2729 2730 /** 2731 * hif_log_runtime_suspend_success() - log a successful runtime suspend 2732 */ 2733 static void hif_log_runtime_suspend_success(struct hif_softc *hif_ctx) 2734 { 2735 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx); 2736 2737 if (!sc) 2738 return; 2739 2740 sc->pm_stats.suspended++; 2741 sc->pm_stats.suspend_jiffies = jiffies; 2742 } 2743 2744 /** 2745 * hif_log_runtime_suspend_failure() - log a failed runtime suspend 2746 * 2747 * log a failed runtime suspend 2748 * mark last busy to prevent immediate runtime suspend 2749 */ 2750 static void hif_log_runtime_suspend_failure(void *hif_ctx) 2751 { 2752 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx); 2753 2754 if (!sc) 2755 return; 2756 2757 sc->pm_stats.suspend_err++; 2758 } 2759 2760 /** 2761 * hif_log_runtime_resume_success() - log a successful runtime resume 2762 * 2763 * log a successful runtime resume 2764 * mark last busy to prevent immediate runtime suspend 2765 */ 2766 static void hif_log_runtime_resume_success(void *hif_ctx) 2767 { 2768 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx); 2769 2770 if (!sc) 2771 return; 2772 2773 sc->pm_stats.resumed++; 2774 } 2775 2776 /** 2777 * hif_process_runtime_suspend_failure() - bookkeeping of suspend failure 2778 * 2779 * Record the failure. 2780 * mark last busy to delay a retry. 2781 * adjust the runtime_pm state. 2782 */ 2783 void hif_process_runtime_suspend_failure(struct hif_opaque_softc *hif_ctx) 2784 { 2785 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 2786 2787 hif_log_runtime_suspend_failure(hif_ctx); 2788 hif_pm_runtime_mark_last_busy(hif_ctx); 2789 hif_runtime_pm_set_state_on(scn); 2790 } 2791 2792 /** 2793 * hif_pre_runtime_suspend() - bookkeeping before beginning runtime suspend 2794 * 2795 * Makes sure that the pci link will be taken down by the suspend opperation. 2796 * If the hif layer is configured to leave the bus on, runtime suspend will 2797 * not save any power. 2798 * 2799 * Set the runtime suspend state to in progress. 2800 * 2801 * return -EINVAL if the bus won't go down. otherwise return 0 2802 */ 2803 int hif_pre_runtime_suspend(struct hif_opaque_softc *hif_ctx) 2804 { 2805 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 2806 2807 if (!hif_can_suspend_link(hif_ctx)) { 2808 HIF_ERROR("Runtime PM not supported for link up suspend"); 2809 return -EINVAL; 2810 } 2811 2812 hif_runtime_pm_set_state_suspending(scn); 2813 return 0; 2814 } 2815 2816 /** 2817 * hif_process_runtime_suspend_success() - bookkeeping of suspend success 2818 * 2819 * Record the success. 2820 * adjust the runtime_pm state 2821 */ 2822 void hif_process_runtime_suspend_success(struct hif_opaque_softc *hif_ctx) 2823 { 2824 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 2825 2826 hif_runtime_pm_set_state_suspended(scn); 2827 hif_log_runtime_suspend_success(scn); 2828 } 2829 2830 /** 2831 * hif_pre_runtime_resume() - bookkeeping before beginning runtime resume 2832 * 2833 * update the runtime pm state. 2834 */ 2835 void hif_pre_runtime_resume(struct hif_opaque_softc *hif_ctx) 2836 { 2837 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 2838 2839 hif_pm_runtime_set_monitor_wake_intr(hif_ctx, 0); 2840 hif_runtime_pm_set_state_resuming(scn); 2841 } 2842 2843 /** 2844 * hif_process_runtime_resume_success() - bookkeeping after a runtime resume 2845 * 2846 * record the success. 2847 * adjust the runtime_pm state 2848 */ 2849 void hif_process_runtime_resume_success(struct hif_opaque_softc *hif_ctx) 2850 { 2851 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 2852 2853 hif_log_runtime_resume_success(hif_ctx); 2854 hif_pm_runtime_mark_last_busy(hif_ctx); 2855 hif_runtime_pm_set_state_on(scn); 2856 } 2857 2858 /** 2859 * hif_runtime_suspend() - do the bus suspend part of a runtime suspend 2860 * 2861 * Return: 0 for success and non-zero error code for failure 2862 */ 2863 int hif_runtime_suspend(struct hif_opaque_softc *hif_ctx) 2864 { 2865 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx); 2866 int errno; 2867 2868 errno = hif_bus_suspend(hif_ctx); 2869 if (errno) { 2870 HIF_ERROR("%s: failed bus suspend: %d", __func__, errno); 2871 return errno; 2872 } 2873 2874 hif_pm_runtime_set_monitor_wake_intr(hif_ctx, 1); 2875 2876 errno = hif_bus_suspend_noirq(hif_ctx); 2877 if (errno) { 2878 HIF_ERROR("%s: failed bus suspend noirq: %d", __func__, errno); 2879 hif_pm_runtime_set_monitor_wake_intr(hif_ctx, 0); 2880 goto bus_resume; 2881 } 2882 2883 qdf_atomic_set(&sc->pm_dp_rx_busy, 0); 2884 2885 return 0; 2886 2887 bus_resume: 2888 QDF_BUG(!hif_bus_resume(hif_ctx)); 2889 2890 return errno; 2891 } 2892 2893 /** 2894 * hif_fastpath_resume() - resume fastpath for runtimepm 2895 * 2896 * ensure that the fastpath write index register is up to date 2897 * since runtime pm may cause ce_send_fast to skip the register 2898 * write. 2899 * 2900 * fastpath only applicable to legacy copy engine 2901 */ 2902 void hif_fastpath_resume(struct hif_opaque_softc *hif_ctx) 2903 { 2904 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 2905 struct CE_state *ce_state; 2906 2907 if (!scn) 2908 return; 2909 2910 if (scn->fastpath_mode_on) { 2911 if (Q_TARGET_ACCESS_BEGIN(scn) < 0) 2912 return; 2913 2914 ce_state = scn->ce_id_to_state[CE_HTT_H2T_MSG]; 2915 qdf_spin_lock_bh(&ce_state->ce_index_lock); 2916 2917 /*war_ce_src_ring_write_idx_set */ 2918 CE_SRC_RING_WRITE_IDX_SET(scn, ce_state->ctrl_addr, 2919 ce_state->src_ring->write_index); 2920 qdf_spin_unlock_bh(&ce_state->ce_index_lock); 2921 Q_TARGET_ACCESS_END(scn); 2922 } 2923 } 2924 2925 /** 2926 * hif_runtime_resume() - do the bus resume part of a runtime resume 2927 * 2928 * Return: 0 for success and non-zero error code for failure 2929 */ 2930 int hif_runtime_resume(struct hif_opaque_softc *hif_ctx) 2931 { 2932 QDF_BUG(!hif_bus_resume_noirq(hif_ctx)); 2933 QDF_BUG(!hif_bus_resume(hif_ctx)); 2934 return 0; 2935 } 2936 #endif /* #ifdef FEATURE_RUNTIME_PM */ 2937 2938 #if CONFIG_PCIE_64BIT_MSI 2939 static void hif_free_msi_ctx(struct hif_softc *scn) 2940 { 2941 struct hif_pci_softc *sc = scn->hif_sc; 2942 struct hif_msi_info *info = &sc->msi_info; 2943 struct device *dev = scn->qdf_dev->dev; 2944 2945 OS_FREE_CONSISTENT(dev, 4, info->magic, info->magic_dma, 2946 OS_GET_DMA_MEM_CONTEXT(scn, dmacontext)); 2947 info->magic = NULL; 2948 info->magic_dma = 0; 2949 } 2950 #else 2951 static void hif_free_msi_ctx(struct hif_softc *scn) 2952 { 2953 } 2954 #endif 2955 2956 void hif_pci_disable_isr(struct hif_softc *scn) 2957 { 2958 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn); 2959 2960 hif_exec_kill(&scn->osc); 2961 hif_nointrs(scn); 2962 hif_free_msi_ctx(scn); 2963 /* Cancel the pending tasklet */ 2964 ce_tasklet_kill(scn); 2965 tasklet_kill(&sc->intr_tq); 2966 qdf_atomic_set(&scn->active_tasklet_cnt, 0); 2967 qdf_atomic_set(&scn->active_grp_tasklet_cnt, 0); 2968 } 2969 2970 /* Function to reset SoC */ 2971 void hif_pci_reset_soc(struct hif_softc *hif_sc) 2972 { 2973 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_sc); 2974 struct hif_opaque_softc *ol_sc = GET_HIF_OPAQUE_HDL(hif_sc); 2975 struct hif_target_info *tgt_info = hif_get_target_info_handle(ol_sc); 2976 2977 #if defined(CPU_WARM_RESET_WAR) 2978 /* Currently CPU warm reset sequence is tested only for AR9888_REV2 2979 * Need to enable for AR9888_REV1 once CPU warm reset sequence is 2980 * verified for AR9888_REV1 2981 */ 2982 if (tgt_info->target_version == AR9888_REV2_VERSION) 2983 hif_pci_device_warm_reset(sc); 2984 else 2985 hif_pci_device_reset(sc); 2986 #else 2987 hif_pci_device_reset(sc); 2988 #endif 2989 } 2990 2991 #ifdef CONFIG_PCI_MSM 2992 static inline void hif_msm_pcie_debug_info(struct hif_pci_softc *sc) 2993 { 2994 msm_pcie_debug_info(sc->pdev, 13, 1, 0, 0, 0); 2995 msm_pcie_debug_info(sc->pdev, 13, 2, 0, 0, 0); 2996 } 2997 #else 2998 static inline void hif_msm_pcie_debug_info(struct hif_pci_softc *sc) {}; 2999 #endif 3000 3001 /** 3002 * hif_log_soc_wakeup_timeout() - API to log PCIe and SOC Info 3003 * @sc: HIF PCIe Context 3004 * 3005 * API to log PCIe Config space and SOC info when SOC wakeup timeout happens 3006 * 3007 * Return: Failure to caller 3008 */ 3009 static int hif_log_soc_wakeup_timeout(struct hif_pci_softc *sc) 3010 { 3011 uint16_t val = 0; 3012 uint32_t bar = 0; 3013 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(sc); 3014 struct hif_softc *scn = HIF_GET_SOFTC(sc); 3015 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(sc); 3016 struct hif_config_info *cfg = hif_get_ini_handle(hif_hdl); 3017 struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn); 3018 A_target_id_t pci_addr = scn->mem; 3019 3020 HIF_ERROR("%s: keep_awake_count = %d", 3021 __func__, hif_state->keep_awake_count); 3022 3023 pci_read_config_word(sc->pdev, PCI_VENDOR_ID, &val); 3024 3025 HIF_ERROR("%s: PCI Vendor ID = 0x%04x", __func__, val); 3026 3027 pci_read_config_word(sc->pdev, PCI_DEVICE_ID, &val); 3028 3029 HIF_ERROR("%s: PCI Device ID = 0x%04x", __func__, val); 3030 3031 pci_read_config_word(sc->pdev, PCI_COMMAND, &val); 3032 3033 HIF_ERROR("%s: PCI Command = 0x%04x", __func__, val); 3034 3035 pci_read_config_word(sc->pdev, PCI_STATUS, &val); 3036 3037 HIF_ERROR("%s: PCI Status = 0x%04x", __func__, val); 3038 3039 pci_read_config_dword(sc->pdev, PCI_BASE_ADDRESS_0, &bar); 3040 3041 HIF_ERROR("%s: PCI BAR 0 = 0x%08x", __func__, bar); 3042 3043 HIF_ERROR("%s: SOC_WAKE_ADDR 0%08x", __func__, 3044 hif_read32_mb(scn, pci_addr + PCIE_LOCAL_BASE_ADDRESS + 3045 PCIE_SOC_WAKE_ADDRESS)); 3046 3047 HIF_ERROR("%s: RTC_STATE_ADDR 0x%08x", __func__, 3048 hif_read32_mb(scn, pci_addr + PCIE_LOCAL_BASE_ADDRESS + 3049 RTC_STATE_ADDRESS)); 3050 3051 HIF_ERROR("%s:error, wakeup target", __func__); 3052 hif_msm_pcie_debug_info(sc); 3053 3054 if (!cfg->enable_self_recovery) 3055 QDF_BUG(0); 3056 3057 scn->recovery = true; 3058 3059 if (cbk->set_recovery_in_progress) 3060 cbk->set_recovery_in_progress(cbk->context, true); 3061 3062 pld_is_pci_link_down(sc->dev); 3063 return -EACCES; 3064 } 3065 3066 /* 3067 * For now, we use simple on-demand sleep/wake. 3068 * Some possible improvements: 3069 * -Use the Host-destined A_INUM_PCIE_AWAKE interrupt rather than spin/delay 3070 * (or perhaps spin/delay for a short while, then convert to sleep/interrupt) 3071 * Careful, though, these functions may be used by 3072 * interrupt handlers ("atomic") 3073 * -Don't use host_reg_table for this code; instead use values directly 3074 * -Use a separate timer to track activity and allow Target to sleep only 3075 * if it hasn't done anything for a while; may even want to delay some 3076 * processing for a short while in order to "batch" (e.g.) transmit 3077 * requests with completion processing into "windows of up time". Costs 3078 * some performance, but improves power utilization. 3079 * -On some platforms, it might be possible to eliminate explicit 3080 * sleep/wakeup. Instead, take a chance that each access works OK. If not, 3081 * recover from the failure by forcing the Target awake. 3082 * -Change keep_awake_count to an atomic_t in order to avoid spin lock 3083 * overhead in some cases. Perhaps this makes more sense when 3084 * CONFIG_ATH_PCIE_ACCESS_LIKELY is used and less sense when LIKELY is 3085 * disabled. 3086 * -It is possible to compile this code out and simply force the Target 3087 * to remain awake. That would yield optimal performance at the cost of 3088 * increased power. See CONFIG_ATH_PCIE_MAX_PERF. 3089 * 3090 * Note: parameter wait_for_it has meaning only when waking (when sleep_ok==0). 3091 */ 3092 /** 3093 * hif_target_sleep_state_adjust() - on-demand sleep/wake 3094 * @scn: hif_softc pointer. 3095 * @sleep_ok: bool 3096 * @wait_for_it: bool 3097 * 3098 * Output the pipe error counts of each pipe to log file 3099 * 3100 * Return: int 3101 */ 3102 int hif_pci_target_sleep_state_adjust(struct hif_softc *scn, 3103 bool sleep_ok, bool wait_for_it) 3104 { 3105 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 3106 A_target_id_t pci_addr = scn->mem; 3107 static int max_delay; 3108 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn); 3109 static int debug; 3110 if (scn->recovery) 3111 return -EACCES; 3112 3113 if (qdf_atomic_read(&scn->link_suspended)) { 3114 HIF_ERROR("%s:invalid access, PCIe link is down", __func__); 3115 debug = true; 3116 QDF_ASSERT(0); 3117 return -EACCES; 3118 } 3119 3120 if (debug) { 3121 wait_for_it = true; 3122 HIF_ERROR("%s: doing debug for invalid access, PCIe link is suspended", 3123 __func__); 3124 QDF_ASSERT(0); 3125 } 3126 3127 if (sleep_ok) { 3128 qdf_spin_lock_irqsave(&hif_state->keep_awake_lock); 3129 hif_state->keep_awake_count--; 3130 if (hif_state->keep_awake_count == 0) { 3131 /* Allow sleep */ 3132 hif_state->verified_awake = false; 3133 hif_state->sleep_ticks = qdf_system_ticks(); 3134 } 3135 if (hif_state->fake_sleep == false) { 3136 /* Set the Fake Sleep */ 3137 hif_state->fake_sleep = true; 3138 3139 /* Start the Sleep Timer */ 3140 qdf_timer_stop(&hif_state->sleep_timer); 3141 qdf_timer_start(&hif_state->sleep_timer, 3142 HIF_SLEEP_INACTIVITY_TIMER_PERIOD_MS); 3143 } 3144 qdf_spin_unlock_irqrestore(&hif_state->keep_awake_lock); 3145 } else { 3146 qdf_spin_lock_irqsave(&hif_state->keep_awake_lock); 3147 3148 if (hif_state->fake_sleep) { 3149 hif_state->verified_awake = true; 3150 } else { 3151 if (hif_state->keep_awake_count == 0) { 3152 /* Force AWAKE */ 3153 hif_write32_mb(sc, pci_addr + 3154 PCIE_LOCAL_BASE_ADDRESS + 3155 PCIE_SOC_WAKE_ADDRESS, 3156 PCIE_SOC_WAKE_V_MASK); 3157 } 3158 } 3159 hif_state->keep_awake_count++; 3160 qdf_spin_unlock_irqrestore(&hif_state->keep_awake_lock); 3161 3162 if (wait_for_it && !hif_state->verified_awake) { 3163 #define PCIE_SLEEP_ADJUST_TIMEOUT 8000 /* 8Ms */ 3164 int tot_delay = 0; 3165 int curr_delay = 5; 3166 3167 for (;; ) { 3168 if (hif_targ_is_awake(scn, pci_addr)) { 3169 hif_state->verified_awake = true; 3170 break; 3171 } 3172 if (!hif_pci_targ_is_present(scn, pci_addr)) 3173 break; 3174 if (tot_delay > PCIE_SLEEP_ADJUST_TIMEOUT) 3175 return hif_log_soc_wakeup_timeout(sc); 3176 3177 OS_DELAY(curr_delay); 3178 tot_delay += curr_delay; 3179 3180 if (curr_delay < 50) 3181 curr_delay += 5; 3182 } 3183 3184 /* 3185 * NB: If Target has to come out of Deep Sleep, 3186 * this may take a few Msecs. Typically, though 3187 * this delay should be <30us. 3188 */ 3189 if (tot_delay > max_delay) 3190 max_delay = tot_delay; 3191 } 3192 } 3193 3194 if (debug && hif_state->verified_awake) { 3195 debug = 0; 3196 HIF_ERROR("%s: INTR_ENABLE_REG = 0x%08x, INTR_CAUSE_REG = 0x%08x, CPU_INTR_REG = 0x%08x, INTR_CLR_REG = 0x%08x, CE_INTERRUPT_SUMMARY_REG = 0x%08x", 3197 __func__, 3198 hif_read32_mb(sc, sc->mem + SOC_CORE_BASE_ADDRESS + 3199 PCIE_INTR_ENABLE_ADDRESS), 3200 hif_read32_mb(sc, sc->mem + SOC_CORE_BASE_ADDRESS + 3201 PCIE_INTR_CAUSE_ADDRESS), 3202 hif_read32_mb(sc, sc->mem + SOC_CORE_BASE_ADDRESS + 3203 CPU_INTR_ADDRESS), 3204 hif_read32_mb(sc, sc->mem + SOC_CORE_BASE_ADDRESS + 3205 PCIE_INTR_CLR_ADDRESS), 3206 hif_read32_mb(sc, sc->mem + CE_WRAPPER_BASE_ADDRESS + 3207 CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS)); 3208 } 3209 3210 return 0; 3211 } 3212 3213 #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG 3214 uint32_t hif_target_read_checked(struct hif_softc *scn, uint32_t offset) 3215 { 3216 uint32_t value; 3217 void *addr; 3218 3219 addr = scn->mem + offset; 3220 value = hif_read32_mb(scn, addr); 3221 3222 { 3223 unsigned long irq_flags; 3224 int idx = pcie_access_log_seqnum % PCIE_ACCESS_LOG_NUM; 3225 3226 spin_lock_irqsave(&pcie_access_log_lock, irq_flags); 3227 pcie_access_log[idx].seqnum = pcie_access_log_seqnum; 3228 pcie_access_log[idx].is_write = false; 3229 pcie_access_log[idx].addr = addr; 3230 pcie_access_log[idx].value = value; 3231 pcie_access_log_seqnum++; 3232 spin_unlock_irqrestore(&pcie_access_log_lock, irq_flags); 3233 } 3234 3235 return value; 3236 } 3237 3238 void 3239 hif_target_write_checked(struct hif_softc *scn, uint32_t offset, uint32_t value) 3240 { 3241 void *addr; 3242 3243 addr = scn->mem + (offset); 3244 hif_write32_mb(scn, addr, value); 3245 3246 { 3247 unsigned long irq_flags; 3248 int idx = pcie_access_log_seqnum % PCIE_ACCESS_LOG_NUM; 3249 3250 spin_lock_irqsave(&pcie_access_log_lock, irq_flags); 3251 pcie_access_log[idx].seqnum = pcie_access_log_seqnum; 3252 pcie_access_log[idx].is_write = true; 3253 pcie_access_log[idx].addr = addr; 3254 pcie_access_log[idx].value = value; 3255 pcie_access_log_seqnum++; 3256 spin_unlock_irqrestore(&pcie_access_log_lock, irq_flags); 3257 } 3258 } 3259 3260 /** 3261 * hif_target_dump_access_log() - dump access log 3262 * 3263 * dump access log 3264 * 3265 * Return: n/a 3266 */ 3267 void hif_target_dump_access_log(void) 3268 { 3269 int idx, len, start_idx, cur_idx; 3270 unsigned long irq_flags; 3271 3272 spin_lock_irqsave(&pcie_access_log_lock, irq_flags); 3273 if (pcie_access_log_seqnum > PCIE_ACCESS_LOG_NUM) { 3274 len = PCIE_ACCESS_LOG_NUM; 3275 start_idx = pcie_access_log_seqnum % PCIE_ACCESS_LOG_NUM; 3276 } else { 3277 len = pcie_access_log_seqnum; 3278 start_idx = 0; 3279 } 3280 3281 for (idx = 0; idx < len; idx++) { 3282 cur_idx = (start_idx + idx) % PCIE_ACCESS_LOG_NUM; 3283 HIF_ERROR("%s: idx:%d sn:%u wr:%d addr:%pK val:%u.", 3284 __func__, idx, 3285 pcie_access_log[cur_idx].seqnum, 3286 pcie_access_log[cur_idx].is_write, 3287 pcie_access_log[cur_idx].addr, 3288 pcie_access_log[cur_idx].value); 3289 } 3290 3291 pcie_access_log_seqnum = 0; 3292 spin_unlock_irqrestore(&pcie_access_log_lock, irq_flags); 3293 } 3294 #endif 3295 3296 #ifndef HIF_AHB 3297 int hif_ahb_configure_legacy_irq(struct hif_pci_softc *sc) 3298 { 3299 QDF_BUG(0); 3300 return -EINVAL; 3301 } 3302 3303 int hif_ahb_configure_irq(struct hif_pci_softc *sc) 3304 { 3305 QDF_BUG(0); 3306 return -EINVAL; 3307 } 3308 #endif 3309 3310 static irqreturn_t hif_ce_interrupt_handler(int irq, void *context) 3311 { 3312 struct ce_tasklet_entry *tasklet_entry = context; 3313 return ce_dispatch_interrupt(tasklet_entry->ce_id, tasklet_entry); 3314 } 3315 extern const char *ce_name[]; 3316 3317 static int hif_ce_msi_map_ce_to_irq(struct hif_softc *scn, int ce_id) 3318 { 3319 struct hif_pci_softc *pci_scn = HIF_GET_PCI_SOFTC(scn); 3320 3321 return pci_scn->ce_msi_irq_num[ce_id]; 3322 } 3323 3324 /* hif_srng_msi_irq_disable() - disable the irq for msi 3325 * @hif_sc: hif context 3326 * @ce_id: which ce to disable copy complete interrupts for 3327 * 3328 * since MSI interrupts are not level based, the system can function 3329 * without disabling these interrupts. Interrupt mitigation can be 3330 * added here for better system performance. 3331 */ 3332 static void hif_ce_srng_msi_irq_disable(struct hif_softc *hif_sc, int ce_id) 3333 { 3334 disable_irq_nosync(hif_ce_msi_map_ce_to_irq(hif_sc, ce_id)); 3335 } 3336 3337 static void hif_ce_srng_msi_irq_enable(struct hif_softc *hif_sc, int ce_id) 3338 { 3339 enable_irq(hif_ce_msi_map_ce_to_irq(hif_sc, ce_id)); 3340 } 3341 3342 static void hif_ce_legacy_msi_irq_disable(struct hif_softc *hif_sc, int ce_id) 3343 { 3344 disable_irq_nosync(hif_ce_msi_map_ce_to_irq(hif_sc, ce_id)); 3345 } 3346 3347 static void hif_ce_legacy_msi_irq_enable(struct hif_softc *hif_sc, int ce_id) 3348 { 3349 enable_irq(hif_ce_msi_map_ce_to_irq(hif_sc, ce_id)); 3350 } 3351 3352 static int hif_ce_msi_configure_irq(struct hif_softc *scn) 3353 { 3354 int ret; 3355 int ce_id, irq; 3356 uint32_t msi_data_start; 3357 uint32_t msi_data_count; 3358 uint32_t msi_irq_start; 3359 struct HIF_CE_state *ce_sc = HIF_GET_CE_STATE(scn); 3360 struct hif_pci_softc *pci_sc = HIF_GET_PCI_SOFTC(scn); 3361 struct CE_attr *host_ce_conf = ce_sc->host_ce_config; 3362 3363 /* do wake irq assignment */ 3364 ret = pld_get_user_msi_assignment(scn->qdf_dev->dev, "WAKE", 3365 &msi_data_count, &msi_data_start, 3366 &msi_irq_start); 3367 if (ret) 3368 return ret; 3369 3370 scn->wake_irq = pld_get_msi_irq(scn->qdf_dev->dev, msi_irq_start); 3371 ret = request_irq(scn->wake_irq, hif_wake_interrupt_handler, 3372 IRQF_NO_SUSPEND, "wlan_wake_irq", scn); 3373 if (ret) 3374 return ret; 3375 3376 /* do ce irq assignments */ 3377 ret = pld_get_user_msi_assignment(scn->qdf_dev->dev, "CE", 3378 &msi_data_count, &msi_data_start, 3379 &msi_irq_start); 3380 if (ret) 3381 goto free_wake_irq; 3382 3383 if (ce_srng_based(scn)) { 3384 scn->bus_ops.hif_irq_disable = &hif_ce_srng_msi_irq_disable; 3385 scn->bus_ops.hif_irq_enable = &hif_ce_srng_msi_irq_enable; 3386 } else { 3387 scn->bus_ops.hif_irq_disable = &hif_ce_legacy_msi_irq_disable; 3388 scn->bus_ops.hif_irq_enable = &hif_ce_legacy_msi_irq_enable; 3389 } 3390 3391 scn->bus_ops.hif_map_ce_to_irq = &hif_ce_msi_map_ce_to_irq; 3392 3393 /* needs to match the ce_id -> irq data mapping 3394 * used in the srng parameter configuration 3395 */ 3396 for (ce_id = 0; ce_id < scn->ce_count; ce_id++) { 3397 unsigned int msi_data = (ce_id % msi_data_count) + 3398 msi_irq_start; 3399 if (host_ce_conf[ce_id].flags & CE_ATTR_DISABLE_INTR) 3400 continue; 3401 irq = pld_get_msi_irq(scn->qdf_dev->dev, msi_data); 3402 HIF_DBG("%s: (ce_id %d, msi_data %d, irq %d tasklet %pK)", 3403 __func__, ce_id, msi_data, irq, 3404 &ce_sc->tasklets[ce_id]); 3405 3406 /* implies the ce is also initialized */ 3407 if (!ce_sc->tasklets[ce_id].inited) 3408 continue; 3409 3410 pci_sc->ce_msi_irq_num[ce_id] = irq; 3411 ret = request_irq(irq, hif_ce_interrupt_handler, 3412 IRQF_SHARED, 3413 ce_name[ce_id], 3414 &ce_sc->tasklets[ce_id]); 3415 if (ret) 3416 goto free_irq; 3417 } 3418 3419 return ret; 3420 3421 free_irq: 3422 /* the request_irq for the last ce_id failed so skip it. */ 3423 while (ce_id > 0 && ce_id < scn->ce_count) { 3424 unsigned int msi_data; 3425 3426 ce_id--; 3427 msi_data = (ce_id % msi_data_count) + msi_irq_start; 3428 irq = pld_get_msi_irq(scn->qdf_dev->dev, msi_data); 3429 free_irq(irq, &ce_sc->tasklets[ce_id]); 3430 } 3431 3432 free_wake_irq: 3433 free_irq(scn->wake_irq, scn->qdf_dev->dev); 3434 scn->wake_irq = 0; 3435 3436 return ret; 3437 } 3438 3439 static void hif_exec_grp_irq_disable(struct hif_exec_context *hif_ext_group) 3440 { 3441 int i; 3442 3443 for (i = 0; i < hif_ext_group->numirq; i++) 3444 disable_irq_nosync(hif_ext_group->os_irq[i]); 3445 } 3446 3447 static void hif_exec_grp_irq_enable(struct hif_exec_context *hif_ext_group) 3448 { 3449 int i; 3450 3451 for (i = 0; i < hif_ext_group->numirq; i++) 3452 enable_irq(hif_ext_group->os_irq[i]); 3453 } 3454 3455 /** 3456 * hif_pci_get_irq_name() - get irqname 3457 * This function gives irqnumber to irqname 3458 * mapping. 3459 * 3460 * @irq_no: irq number 3461 * 3462 * Return: irq name 3463 */ 3464 const char *hif_pci_get_irq_name(int irq_no) 3465 { 3466 return "pci-dummy"; 3467 } 3468 3469 int hif_pci_configure_grp_irq(struct hif_softc *scn, 3470 struct hif_exec_context *hif_ext_group) 3471 { 3472 int ret = 0; 3473 int irq = 0; 3474 int j; 3475 3476 hif_ext_group->irq_enable = &hif_exec_grp_irq_enable; 3477 hif_ext_group->irq_disable = &hif_exec_grp_irq_disable; 3478 hif_ext_group->irq_name = &hif_pci_get_irq_name; 3479 hif_ext_group->work_complete = &hif_dummy_grp_done; 3480 3481 for (j = 0; j < hif_ext_group->numirq; j++) { 3482 irq = hif_ext_group->irq[j]; 3483 3484 hif_info("request_irq = %d for grp %d", 3485 irq, hif_ext_group->grp_id); 3486 ret = request_irq(irq, 3487 hif_ext_group_interrupt_handler, 3488 IRQF_SHARED | IRQF_NO_SUSPEND, 3489 "wlan_EXT_GRP", 3490 hif_ext_group); 3491 if (ret) { 3492 HIF_ERROR("%s: request_irq failed ret = %d", 3493 __func__, ret); 3494 return -EFAULT; 3495 } 3496 hif_ext_group->os_irq[j] = irq; 3497 } 3498 hif_ext_group->irq_requested = true; 3499 return 0; 3500 } 3501 3502 /** 3503 * hif_configure_irq() - configure interrupt 3504 * 3505 * This function configures interrupt(s) 3506 * 3507 * @sc: PCIe control struct 3508 * @hif_hdl: struct HIF_CE_state 3509 * 3510 * Return: 0 - for success 3511 */ 3512 int hif_configure_irq(struct hif_softc *scn) 3513 { 3514 int ret = 0; 3515 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn); 3516 3517 HIF_TRACE("%s: E", __func__); 3518 3519 if (hif_is_polled_mode_enabled(GET_HIF_OPAQUE_HDL(scn))) { 3520 scn->request_irq_done = false; 3521 return 0; 3522 } 3523 3524 hif_init_reschedule_tasklet_work(sc); 3525 3526 ret = hif_ce_msi_configure_irq(scn); 3527 if (ret == 0) { 3528 goto end; 3529 } 3530 3531 switch (scn->target_info.target_type) { 3532 case TARGET_TYPE_IPQ4019: 3533 ret = hif_ahb_configure_legacy_irq(sc); 3534 break; 3535 case TARGET_TYPE_QCA8074: 3536 case TARGET_TYPE_QCA8074V2: 3537 case TARGET_TYPE_QCA6018: 3538 ret = hif_ahb_configure_irq(sc); 3539 break; 3540 default: 3541 ret = hif_pci_configure_legacy_irq(sc); 3542 break; 3543 } 3544 if (ret < 0) { 3545 HIF_ERROR("%s: hif_pci_configure_legacy_irq error = %d", 3546 __func__, ret); 3547 return ret; 3548 } 3549 end: 3550 scn->request_irq_done = true; 3551 return 0; 3552 } 3553 3554 /** 3555 * hif_trigger_timer_irq() : Triggers interrupt on LF_Timer 0 3556 * @scn: hif control structure 3557 * 3558 * Sets IRQ bit in LF Timer Status Address to awake peregrine/swift 3559 * stuck at a polling loop in pcie_address_config in FW 3560 * 3561 * Return: none 3562 */ 3563 static void hif_trigger_timer_irq(struct hif_softc *scn) 3564 { 3565 int tmp; 3566 /* Trigger IRQ on Peregrine/Swift by setting 3567 * IRQ Bit of LF_TIMER 0 3568 */ 3569 tmp = hif_read32_mb(scn, scn->mem + (RTC_SOC_BASE_ADDRESS + 3570 SOC_LF_TIMER_STATUS0_ADDRESS)); 3571 /* Set Raw IRQ Bit */ 3572 tmp |= 1; 3573 /* SOC_LF_TIMER_STATUS0 */ 3574 hif_write32_mb(scn, scn->mem + (RTC_SOC_BASE_ADDRESS + 3575 SOC_LF_TIMER_STATUS0_ADDRESS), tmp); 3576 } 3577 3578 /** 3579 * hif_target_sync() : ensure the target is ready 3580 * @scn: hif control structure 3581 * 3582 * Informs fw that we plan to use legacy interupts so that 3583 * it can begin booting. Ensures that the fw finishes booting 3584 * before continuing. Should be called before trying to write 3585 * to the targets other registers for the first time. 3586 * 3587 * Return: none 3588 */ 3589 static void hif_target_sync(struct hif_softc *scn) 3590 { 3591 hif_write32_mb(scn, scn->mem + (SOC_CORE_BASE_ADDRESS | 3592 PCIE_INTR_ENABLE_ADDRESS), 3593 PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL); 3594 /* read to flush pcie write */ 3595 (void)hif_read32_mb(scn, scn->mem + (SOC_CORE_BASE_ADDRESS | 3596 PCIE_INTR_ENABLE_ADDRESS)); 3597 3598 hif_write32_mb(scn, scn->mem + PCIE_LOCAL_BASE_ADDRESS + 3599 PCIE_SOC_WAKE_ADDRESS, 3600 PCIE_SOC_WAKE_V_MASK); 3601 while (!hif_targ_is_awake(scn, scn->mem)) 3602 ; 3603 3604 if (HAS_FW_INDICATOR) { 3605 int wait_limit = 500; 3606 int fw_ind = 0; 3607 int retry_count = 0; 3608 uint32_t target_type = scn->target_info.target_type; 3609 fw_retry: 3610 HIF_TRACE("%s: Loop checking FW signal", __func__); 3611 while (1) { 3612 fw_ind = hif_read32_mb(scn, scn->mem + 3613 FW_INDICATOR_ADDRESS); 3614 if (fw_ind & FW_IND_INITIALIZED) 3615 break; 3616 if (wait_limit-- < 0) 3617 break; 3618 hif_write32_mb(scn, scn->mem + (SOC_CORE_BASE_ADDRESS | 3619 PCIE_INTR_ENABLE_ADDRESS), 3620 PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL); 3621 /* read to flush pcie write */ 3622 (void)hif_read32_mb(scn, scn->mem + 3623 (SOC_CORE_BASE_ADDRESS | PCIE_INTR_ENABLE_ADDRESS)); 3624 3625 qdf_mdelay(10); 3626 } 3627 if (wait_limit < 0) { 3628 if (target_type == TARGET_TYPE_AR9888 && 3629 retry_count++ < 2) { 3630 hif_trigger_timer_irq(scn); 3631 wait_limit = 500; 3632 goto fw_retry; 3633 } 3634 HIF_TRACE("%s: FW signal timed out", 3635 __func__); 3636 qdf_assert_always(0); 3637 } else { 3638 HIF_TRACE("%s: Got FW signal, retries = %x", 3639 __func__, 500-wait_limit); 3640 } 3641 } 3642 hif_write32_mb(scn, scn->mem + PCIE_LOCAL_BASE_ADDRESS + 3643 PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_RESET); 3644 } 3645 3646 static void hif_pci_get_soc_info_pld(struct hif_pci_softc *sc, 3647 struct device *dev) 3648 { 3649 struct pld_soc_info info; 3650 3651 pld_get_soc_info(dev, &info); 3652 sc->mem = info.v_addr; 3653 sc->ce_sc.ol_sc.mem = info.v_addr; 3654 sc->ce_sc.ol_sc.mem_pa = info.p_addr; 3655 } 3656 3657 static void hif_pci_get_soc_info_nopld(struct hif_pci_softc *sc, 3658 struct device *dev) 3659 {} 3660 3661 static bool hif_is_pld_based_target(struct hif_pci_softc *sc, 3662 int device_id) 3663 { 3664 if (!pld_have_platform_driver_support(sc->dev)) 3665 return false; 3666 3667 switch (device_id) { 3668 case QCA6290_DEVICE_ID: 3669 case QCN9000_DEVICE_ID: 3670 case QCA6290_EMULATION_DEVICE_ID: 3671 case QCA6390_DEVICE_ID: 3672 case QCA6490_DEVICE_ID: 3673 case AR6320_DEVICE_ID: 3674 case QCN7605_DEVICE_ID: 3675 return true; 3676 } 3677 return false; 3678 } 3679 3680 static void hif_pci_init_deinit_ops_attach(struct hif_pci_softc *sc, 3681 int device_id) 3682 { 3683 if (hif_is_pld_based_target(sc, device_id)) { 3684 sc->hif_enable_pci = hif_enable_pci_pld; 3685 sc->hif_pci_deinit = hif_pci_deinit_pld; 3686 sc->hif_pci_get_soc_info = hif_pci_get_soc_info_pld; 3687 } else { 3688 sc->hif_enable_pci = hif_enable_pci_nopld; 3689 sc->hif_pci_deinit = hif_pci_deinit_nopld; 3690 sc->hif_pci_get_soc_info = hif_pci_get_soc_info_nopld; 3691 } 3692 } 3693 3694 #ifdef HIF_REG_WINDOW_SUPPORT 3695 static void hif_pci_init_reg_windowing_support(struct hif_pci_softc *sc, 3696 u32 target_type) 3697 { 3698 switch (target_type) { 3699 case TARGET_TYPE_QCN7605: 3700 sc->use_register_windowing = true; 3701 qdf_spinlock_create(&sc->register_access_lock); 3702 sc->register_window = 0; 3703 break; 3704 default: 3705 sc->use_register_windowing = false; 3706 } 3707 } 3708 #else 3709 static void hif_pci_init_reg_windowing_support(struct hif_pci_softc *sc, 3710 u32 target_type) 3711 { 3712 sc->use_register_windowing = false; 3713 } 3714 #endif 3715 3716 /** 3717 * hif_enable_bus(): enable bus 3718 * 3719 * This function enables the bus 3720 * 3721 * @ol_sc: soft_sc struct 3722 * @dev: device pointer 3723 * @bdev: bus dev pointer 3724 * bid: bus id pointer 3725 * type: enum hif_enable_type such as HIF_ENABLE_TYPE_PROBE 3726 * Return: QDF_STATUS 3727 */ 3728 QDF_STATUS hif_pci_enable_bus(struct hif_softc *ol_sc, 3729 struct device *dev, void *bdev, 3730 const struct hif_bus_id *bid, 3731 enum hif_enable_type type) 3732 { 3733 int ret = 0; 3734 uint32_t hif_type, target_type; 3735 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(ol_sc); 3736 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(ol_sc); 3737 uint16_t revision_id = 0; 3738 int probe_again = 0; 3739 struct pci_dev *pdev = bdev; 3740 const struct pci_device_id *id = (const struct pci_device_id *)bid; 3741 struct hif_target_info *tgt_info; 3742 3743 if (!ol_sc) { 3744 HIF_ERROR("%s: hif_ctx is NULL", __func__); 3745 return QDF_STATUS_E_NOMEM; 3746 } 3747 3748 HIF_TRACE("%s: con_mode = 0x%x, device_id = 0x%x", 3749 __func__, hif_get_conparam(ol_sc), id->device); 3750 3751 sc->pdev = pdev; 3752 sc->dev = &pdev->dev; 3753 sc->devid = id->device; 3754 sc->cacheline_sz = dma_get_cache_alignment(); 3755 tgt_info = hif_get_target_info_handle(hif_hdl); 3756 hif_pci_init_deinit_ops_attach(sc, id->device); 3757 sc->hif_pci_get_soc_info(sc, dev); 3758 again: 3759 ret = sc->hif_enable_pci(sc, pdev, id); 3760 if (ret < 0) { 3761 HIF_ERROR("%s: ERROR - hif_enable_pci error = %d", 3762 __func__, ret); 3763 goto err_enable_pci; 3764 } 3765 HIF_TRACE("%s: hif_enable_pci done", __func__); 3766 3767 /* Temporary FIX: disable ASPM on peregrine. 3768 * Will be removed after the OTP is programmed 3769 */ 3770 hif_disable_power_gating(hif_hdl); 3771 3772 device_disable_async_suspend(&pdev->dev); 3773 pci_read_config_word(pdev, 0x08, &revision_id); 3774 3775 ret = hif_get_device_type(id->device, revision_id, 3776 &hif_type, &target_type); 3777 if (ret < 0) { 3778 HIF_ERROR("%s: invalid device id/revision_id", __func__); 3779 goto err_tgtstate; 3780 } 3781 HIF_TRACE("%s: hif_type = 0x%x, target_type = 0x%x", 3782 __func__, hif_type, target_type); 3783 3784 hif_register_tbl_attach(ol_sc, hif_type); 3785 hif_target_register_tbl_attach(ol_sc, target_type); 3786 3787 hif_pci_init_reg_windowing_support(sc, target_type); 3788 3789 tgt_info->target_type = target_type; 3790 3791 if (ce_srng_based(ol_sc)) { 3792 HIF_TRACE("%s:Skip tgt_wake up for srng devices\n", __func__); 3793 } else { 3794 ret = hif_pci_probe_tgt_wakeup(sc); 3795 if (ret < 0) { 3796 HIF_ERROR("%s: ERROR - hif_pci_prob_wakeup error = %d", 3797 __func__, ret); 3798 if (ret == -EAGAIN) 3799 probe_again++; 3800 goto err_tgtstate; 3801 } 3802 HIF_TRACE("%s: hif_pci_probe_tgt_wakeup done", __func__); 3803 } 3804 3805 if (!ol_sc->mem_pa) { 3806 HIF_ERROR("%s: ERROR - BAR0 uninitialized", __func__); 3807 ret = -EIO; 3808 goto err_tgtstate; 3809 } 3810 3811 if (!ce_srng_based(ol_sc)) { 3812 hif_target_sync(ol_sc); 3813 3814 if (ADRASTEA_BU) 3815 hif_vote_link_up(hif_hdl); 3816 } 3817 3818 return 0; 3819 3820 err_tgtstate: 3821 hif_disable_pci(sc); 3822 sc->pci_enabled = false; 3823 HIF_ERROR("%s: error, hif_disable_pci done", __func__); 3824 return QDF_STATUS_E_ABORTED; 3825 3826 err_enable_pci: 3827 if (probe_again && (probe_again <= ATH_PCI_PROBE_RETRY_MAX)) { 3828 int delay_time; 3829 3830 HIF_INFO("%s: pci reprobe", __func__); 3831 /* 10, 40, 90, 100, 100, ... */ 3832 delay_time = max(100, 10 * (probe_again * probe_again)); 3833 qdf_mdelay(delay_time); 3834 goto again; 3835 } 3836 return ret; 3837 } 3838 3839 /** 3840 * hif_pci_irq_enable() - ce_irq_enable 3841 * @scn: hif_softc 3842 * @ce_id: ce_id 3843 * 3844 * Return: void 3845 */ 3846 void hif_pci_irq_enable(struct hif_softc *scn, int ce_id) 3847 { 3848 uint32_t tmp = 1 << ce_id; 3849 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn); 3850 3851 qdf_spin_lock_irqsave(&sc->irq_lock); 3852 scn->ce_irq_summary &= ~tmp; 3853 if (scn->ce_irq_summary == 0) { 3854 /* Enable Legacy PCI line interrupts */ 3855 if (LEGACY_INTERRUPTS(sc) && 3856 (scn->target_status != TARGET_STATUS_RESET) && 3857 (!qdf_atomic_read(&scn->link_suspended))) { 3858 3859 hif_write32_mb(scn, scn->mem + 3860 (SOC_CORE_BASE_ADDRESS | 3861 PCIE_INTR_ENABLE_ADDRESS), 3862 HOST_GROUP0_MASK); 3863 3864 hif_read32_mb(scn, scn->mem + 3865 (SOC_CORE_BASE_ADDRESS | 3866 PCIE_INTR_ENABLE_ADDRESS)); 3867 } 3868 } 3869 if (scn->hif_init_done == true) 3870 Q_TARGET_ACCESS_END(scn); 3871 qdf_spin_unlock_irqrestore(&sc->irq_lock); 3872 3873 /* check for missed firmware crash */ 3874 hif_fw_interrupt_handler(0, scn); 3875 } 3876 3877 /** 3878 * hif_pci_irq_disable() - ce_irq_disable 3879 * @scn: hif_softc 3880 * @ce_id: ce_id 3881 * 3882 * only applicable to legacy copy engine... 3883 * 3884 * Return: void 3885 */ 3886 void hif_pci_irq_disable(struct hif_softc *scn, int ce_id) 3887 { 3888 /* For Rome only need to wake up target */ 3889 /* target access is maintained until interrupts are re-enabled */ 3890 Q_TARGET_ACCESS_BEGIN(scn); 3891 } 3892 3893 #ifdef FEATURE_RUNTIME_PM 3894 /** 3895 * hif_pm_runtime_get_sync() - do a get operation with sync resume 3896 * 3897 * A get operation will prevent a runtime suspend until a corresponding 3898 * put is done. Unlike hif_pm_runtime_get(), this API will do a sync 3899 * resume instead of requesting a resume if it is runtime PM suspended 3900 * so it can only be called in non-atomic context. 3901 * 3902 * @hif_ctx: pointer of HIF context 3903 * 3904 * Return: 0 if it is runtime PM resumed otherwise an error code. 3905 */ 3906 int hif_pm_runtime_get_sync(struct hif_opaque_softc *hif_ctx) 3907 { 3908 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx); 3909 int pm_state; 3910 int ret; 3911 3912 if (!sc) 3913 return -EINVAL; 3914 3915 if (!pm_runtime_enabled(sc->dev)) 3916 return 0; 3917 3918 pm_state = qdf_atomic_read(&sc->pm_state); 3919 if (pm_state == HIF_PM_RUNTIME_STATE_SUSPENDED || 3920 pm_state == HIF_PM_RUNTIME_STATE_SUSPENDING) 3921 hif_info_high("Runtime PM resume is requested by %ps", 3922 (void *)_RET_IP_); 3923 3924 sc->pm_stats.runtime_get++; 3925 ret = pm_runtime_get_sync(sc->dev); 3926 3927 /* Get can return 1 if the device is already active, just return 3928 * success in that case. 3929 */ 3930 if (ret > 0) 3931 ret = 0; 3932 3933 if (ret) { 3934 sc->pm_stats.runtime_get_err++; 3935 hif_err("Runtime PM Get Sync error in pm_state: %d, ret: %d", 3936 qdf_atomic_read(&sc->pm_state), ret); 3937 hif_pm_runtime_put(hif_ctx); 3938 } 3939 3940 return ret; 3941 } 3942 3943 /** 3944 * hif_pm_runtime_put_sync_suspend() - do a put operation with sync suspend 3945 * 3946 * This API will do a runtime put operation followed by a sync suspend if usage 3947 * count is 0 so it can only be called in non-atomic context. 3948 * 3949 * @hif_ctx: pointer of HIF context 3950 * 3951 * Return: 0 for success otherwise an error code 3952 */ 3953 int hif_pm_runtime_put_sync_suspend(struct hif_opaque_softc *hif_ctx) 3954 { 3955 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx); 3956 int usage_count, pm_state; 3957 char *err = NULL; 3958 3959 if (!sc) 3960 return -EINVAL; 3961 3962 if (!pm_runtime_enabled(sc->dev)) 3963 return 0; 3964 3965 usage_count = atomic_read(&sc->dev->power.usage_count); 3966 if (usage_count == 1) { 3967 pm_state = qdf_atomic_read(&sc->pm_state); 3968 if (pm_state == HIF_PM_RUNTIME_STATE_NONE) 3969 err = "Ignore unexpected Put as runtime PM is disabled"; 3970 } else if (usage_count == 0) { 3971 err = "Put without a Get Operation"; 3972 } 3973 3974 if (err) { 3975 hif_pci_runtime_pm_warn(sc, err); 3976 return -EINVAL; 3977 } 3978 3979 sc->pm_stats.runtime_put++; 3980 return pm_runtime_put_sync_suspend(sc->dev); 3981 } 3982 3983 int hif_pm_runtime_request_resume(struct hif_opaque_softc *hif_ctx) 3984 { 3985 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx); 3986 int pm_state; 3987 3988 if (!sc) 3989 return -EINVAL; 3990 3991 if (!pm_runtime_enabled(sc->dev)) 3992 return 0; 3993 3994 pm_state = qdf_atomic_read(&sc->pm_state); 3995 if (pm_state == HIF_PM_RUNTIME_STATE_SUSPENDED || 3996 pm_state == HIF_PM_RUNTIME_STATE_SUSPENDING) 3997 HIF_INFO("Runtime PM resume is requested by %ps", 3998 (void *)_RET_IP_); 3999 4000 sc->pm_stats.request_resume++; 4001 sc->pm_stats.last_resume_caller = (void *)_RET_IP_; 4002 4003 return hif_pm_request_resume(sc->dev); 4004 } 4005 4006 void hif_pm_runtime_mark_last_busy(struct hif_opaque_softc *hif_ctx) 4007 { 4008 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx); 4009 4010 if (!sc) 4011 return; 4012 4013 sc->pm_stats.last_busy_marker = (void *)_RET_IP_; 4014 sc->pm_stats.last_busy_timestamp = qdf_get_log_timestamp_usecs(); 4015 4016 return pm_runtime_mark_last_busy(sc->dev); 4017 } 4018 4019 void hif_pm_runtime_get_noresume(struct hif_opaque_softc *hif_ctx) 4020 { 4021 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx); 4022 4023 if (!sc) 4024 return; 4025 4026 if (!pm_runtime_enabled(sc->dev)) 4027 return; 4028 4029 sc->pm_stats.runtime_get++; 4030 pm_runtime_get_noresume(sc->dev); 4031 } 4032 4033 /** 4034 * hif_pm_runtime_get() - do a get opperation on the device 4035 * 4036 * A get opperation will prevent a runtime suspend until a 4037 * corresponding put is done. This api should be used when sending 4038 * data. 4039 * 4040 * CONTRARY TO THE REGULAR RUNTIME PM, WHEN THE BUS IS SUSPENDED, 4041 * THIS API WILL ONLY REQUEST THE RESUME AND NOT TO A GET!!! 4042 * 4043 * return: success if the bus is up and a get has been issued 4044 * otherwise an error code. 4045 */ 4046 int hif_pm_runtime_get(struct hif_opaque_softc *hif_ctx) 4047 { 4048 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 4049 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx); 4050 int ret; 4051 int pm_state; 4052 4053 if (!scn) { 4054 hif_err("Could not do runtime get, scn is null"); 4055 return -EFAULT; 4056 } 4057 4058 if (!pm_runtime_enabled(sc->dev)) 4059 return 0; 4060 4061 pm_state = qdf_atomic_read(&sc->pm_state); 4062 4063 if (pm_state == HIF_PM_RUNTIME_STATE_ON || 4064 pm_state == HIF_PM_RUNTIME_STATE_NONE) { 4065 sc->pm_stats.runtime_get++; 4066 ret = __hif_pm_runtime_get(sc->dev); 4067 4068 /* Get can return 1 if the device is already active, just return 4069 * success in that case 4070 */ 4071 if (ret > 0) 4072 ret = 0; 4073 4074 if (ret) 4075 hif_pm_runtime_put(hif_ctx); 4076 4077 if (ret && ret != -EINPROGRESS) { 4078 sc->pm_stats.runtime_get_err++; 4079 hif_err("Runtime Get PM Error in pm_state:%d ret: %d", 4080 qdf_atomic_read(&sc->pm_state), ret); 4081 } 4082 4083 return ret; 4084 } 4085 4086 if (pm_state == HIF_PM_RUNTIME_STATE_SUSPENDED || 4087 pm_state == HIF_PM_RUNTIME_STATE_SUSPENDING) { 4088 hif_info_high("Runtime PM resume is requested by %ps", 4089 (void *)_RET_IP_); 4090 ret = -EAGAIN; 4091 } else { 4092 ret = -EBUSY; 4093 } 4094 4095 sc->pm_stats.request_resume++; 4096 sc->pm_stats.last_resume_caller = (void *)_RET_IP_; 4097 hif_pm_request_resume(sc->dev); 4098 4099 return ret; 4100 } 4101 4102 /** 4103 * hif_pm_runtime_put() - do a put opperation on the device 4104 * 4105 * A put opperation will allow a runtime suspend after a corresponding 4106 * get was done. This api should be used when sending data. 4107 * 4108 * This api will return a failure if runtime pm is stopped 4109 * This api will return failure if it would decrement the usage count below 0. 4110 * 4111 * return: QDF_STATUS_SUCCESS if the put is performed 4112 */ 4113 int hif_pm_runtime_put(struct hif_opaque_softc *hif_ctx) 4114 { 4115 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 4116 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx); 4117 int pm_state, usage_count; 4118 char *error = NULL; 4119 4120 if (!scn) { 4121 HIF_ERROR("%s: Could not do runtime put, scn is null", 4122 __func__); 4123 return -EFAULT; 4124 } 4125 4126 if (!pm_runtime_enabled(sc->dev)) 4127 return 0; 4128 4129 usage_count = atomic_read(&sc->dev->power.usage_count); 4130 4131 if (usage_count == 1) { 4132 pm_state = qdf_atomic_read(&sc->pm_state); 4133 4134 if (pm_state == HIF_PM_RUNTIME_STATE_NONE) 4135 error = "Ignoring unexpected put when runtime pm is disabled"; 4136 4137 } else if (usage_count == 0) { 4138 error = "PUT Without a Get Operation"; 4139 } 4140 4141 if (error) { 4142 hif_pci_runtime_pm_warn(sc, error); 4143 return -EINVAL; 4144 } 4145 4146 sc->pm_stats.runtime_put++; 4147 4148 hif_pm_runtime_mark_last_busy(hif_ctx); 4149 hif_pm_runtime_put_auto(sc->dev); 4150 4151 return 0; 4152 } 4153 4154 4155 /** 4156 * __hif_pm_runtime_prevent_suspend() - prevent runtime suspend for a protocol 4157 * reason 4158 * @hif_sc: pci context 4159 * @lock: runtime_pm lock being acquired 4160 * 4161 * Return 0 if successful. 4162 */ 4163 static int __hif_pm_runtime_prevent_suspend(struct hif_pci_softc 4164 *hif_sc, struct hif_pm_runtime_lock *lock) 4165 { 4166 int ret = 0; 4167 4168 /* 4169 * We shouldn't be setting context->timeout to zero here when 4170 * context is active as we will have a case where Timeout API's 4171 * for the same context called back to back. 4172 * eg: echo "1=T:10:T:20" > /d/cnss_runtime_pm 4173 * Set context->timeout to zero in hif_pm_runtime_prevent_suspend 4174 * API to ensure the timeout version is no more active and 4175 * list entry of this context will be deleted during allow suspend. 4176 */ 4177 if (lock->active) 4178 return 0; 4179 4180 ret = __hif_pm_runtime_get(hif_sc->dev); 4181 4182 /** 4183 * The ret can be -EINPROGRESS, if Runtime status is RPM_RESUMING or 4184 * RPM_SUSPENDING. Any other negative value is an error. 4185 * We shouldn't be do runtime_put here as in later point allow 4186 * suspend gets called with the the context and there the usage count 4187 * is decremented, so suspend will be prevented. 4188 */ 4189 4190 if (ret < 0 && ret != -EINPROGRESS) { 4191 hif_sc->pm_stats.runtime_get_err++; 4192 hif_pci_runtime_pm_warn(hif_sc, 4193 "Prevent Suspend Runtime PM Error"); 4194 } 4195 4196 hif_sc->prevent_suspend_cnt++; 4197 4198 lock->active = true; 4199 4200 list_add_tail(&lock->list, &hif_sc->prevent_suspend_list); 4201 4202 hif_sc->pm_stats.prevent_suspend++; 4203 4204 HIF_ERROR("%s: in pm_state:%s ret: %d", __func__, 4205 hif_pm_runtime_state_to_string( 4206 qdf_atomic_read(&hif_sc->pm_state)), 4207 ret); 4208 4209 return ret; 4210 } 4211 4212 static int __hif_pm_runtime_allow_suspend(struct hif_pci_softc *hif_sc, 4213 struct hif_pm_runtime_lock *lock) 4214 { 4215 struct hif_opaque_softc *hif_ctx = GET_HIF_OPAQUE_HDL(hif_sc); 4216 int ret = 0; 4217 int usage_count; 4218 4219 if (hif_sc->prevent_suspend_cnt == 0) 4220 return ret; 4221 4222 if (!lock->active) 4223 return ret; 4224 4225 usage_count = atomic_read(&hif_sc->dev->power.usage_count); 4226 4227 /* 4228 * During Driver unload, platform driver increments the usage 4229 * count to prevent any runtime suspend getting called. 4230 * So during driver load in HIF_PM_RUNTIME_STATE_NONE state the 4231 * usage_count should be one. Ideally this shouldn't happen as 4232 * context->active should be active for allow suspend to happen 4233 * Handling this case here to prevent any failures. 4234 */ 4235 if ((qdf_atomic_read(&hif_sc->pm_state) == HIF_PM_RUNTIME_STATE_NONE 4236 && usage_count == 1) || usage_count == 0) { 4237 hif_pci_runtime_pm_warn(hif_sc, 4238 "Allow without a prevent suspend"); 4239 return -EINVAL; 4240 } 4241 4242 list_del(&lock->list); 4243 4244 hif_sc->prevent_suspend_cnt--; 4245 4246 lock->active = false; 4247 lock->timeout = 0; 4248 4249 hif_pm_runtime_mark_last_busy(hif_ctx); 4250 ret = hif_pm_runtime_put_auto(hif_sc->dev); 4251 4252 HIF_ERROR("%s: in pm_state:%s ret: %d", __func__, 4253 hif_pm_runtime_state_to_string( 4254 qdf_atomic_read(&hif_sc->pm_state)), 4255 ret); 4256 4257 hif_sc->pm_stats.allow_suspend++; 4258 return ret; 4259 } 4260 4261 /** 4262 * hif_pm_runtime_lock_timeout_fn() - callback the runtime lock timeout 4263 * @data: calback data that is the pci context 4264 * 4265 * if runtime locks are acquired with a timeout, this function releases 4266 * the locks when the last runtime lock expires. 4267 * 4268 * dummy implementation until lock acquisition is implemented. 4269 */ 4270 static void hif_pm_runtime_lock_timeout_fn(void *data) 4271 { 4272 struct hif_pci_softc *hif_sc = data; 4273 unsigned long timer_expires; 4274 struct hif_pm_runtime_lock *context, *temp; 4275 4276 spin_lock_bh(&hif_sc->runtime_lock); 4277 4278 timer_expires = hif_sc->runtime_timer_expires; 4279 4280 /* Make sure we are not called too early, this should take care of 4281 * following case 4282 * 4283 * CPU0 CPU1 (timeout function) 4284 * ---- ---------------------- 4285 * spin_lock_irq 4286 * timeout function called 4287 * 4288 * mod_timer() 4289 * 4290 * spin_unlock_irq 4291 * spin_lock_irq 4292 */ 4293 if (timer_expires > 0 && !time_after(timer_expires, jiffies)) { 4294 hif_sc->runtime_timer_expires = 0; 4295 list_for_each_entry_safe(context, temp, 4296 &hif_sc->prevent_suspend_list, list) { 4297 if (context->timeout) { 4298 __hif_pm_runtime_allow_suspend(hif_sc, context); 4299 hif_sc->pm_stats.allow_suspend_timeout++; 4300 } 4301 } 4302 } 4303 4304 spin_unlock_bh(&hif_sc->runtime_lock); 4305 } 4306 4307 int hif_pm_runtime_prevent_suspend(struct hif_opaque_softc *ol_sc, 4308 struct hif_pm_runtime_lock *data) 4309 { 4310 struct hif_softc *sc = HIF_GET_SOFTC(ol_sc); 4311 struct hif_pci_softc *hif_sc = HIF_GET_PCI_SOFTC(ol_sc); 4312 struct hif_pm_runtime_lock *context = data; 4313 4314 if (!sc->hif_config.enable_runtime_pm) 4315 return 0; 4316 4317 if (!context) 4318 return -EINVAL; 4319 4320 if (in_irq()) 4321 WARN_ON(1); 4322 4323 spin_lock_bh(&hif_sc->runtime_lock); 4324 context->timeout = 0; 4325 __hif_pm_runtime_prevent_suspend(hif_sc, context); 4326 spin_unlock_bh(&hif_sc->runtime_lock); 4327 4328 return 0; 4329 } 4330 4331 int hif_pm_runtime_allow_suspend(struct hif_opaque_softc *ol_sc, 4332 struct hif_pm_runtime_lock *data) 4333 { 4334 struct hif_softc *sc = HIF_GET_SOFTC(ol_sc); 4335 struct hif_pci_softc *hif_sc = HIF_GET_PCI_SOFTC(ol_sc); 4336 struct hif_pm_runtime_lock *context = data; 4337 4338 if (!sc->hif_config.enable_runtime_pm) 4339 return 0; 4340 4341 if (!context) 4342 return -EINVAL; 4343 4344 if (in_irq()) 4345 WARN_ON(1); 4346 4347 spin_lock_bh(&hif_sc->runtime_lock); 4348 4349 __hif_pm_runtime_allow_suspend(hif_sc, context); 4350 4351 /* The list can be empty as well in cases where 4352 * we have one context in the list and the allow 4353 * suspend came before the timer expires and we delete 4354 * context above from the list. 4355 * When list is empty prevent_suspend count will be zero. 4356 */ 4357 if (hif_sc->prevent_suspend_cnt == 0 && 4358 hif_sc->runtime_timer_expires > 0) { 4359 qdf_timer_free(&hif_sc->runtime_timer); 4360 hif_sc->runtime_timer_expires = 0; 4361 } 4362 4363 spin_unlock_bh(&hif_sc->runtime_lock); 4364 4365 return 0; 4366 } 4367 4368 /** 4369 * hif_pm_runtime_prevent_suspend_timeout() - Prevent runtime suspend timeout 4370 * @ol_sc: HIF context 4371 * @lock: which lock is being acquired 4372 * @delay: Timeout in milliseconds 4373 * 4374 * Prevent runtime suspend with a timeout after which runtime suspend would be 4375 * allowed. This API uses a single timer to allow the suspend and timer is 4376 * modified if the timeout is changed before timer fires. 4377 * If the timeout is less than autosuspend_delay then use mark_last_busy instead 4378 * of starting the timer. 4379 * 4380 * It is wise to try not to use this API and correct the design if possible. 4381 * 4382 * Return: 0 on success and negative error code on failure 4383 */ 4384 int hif_pm_runtime_prevent_suspend_timeout(struct hif_opaque_softc *ol_sc, 4385 struct hif_pm_runtime_lock *lock, unsigned int delay) 4386 { 4387 struct hif_softc *sc = HIF_GET_SOFTC(ol_sc); 4388 struct hif_pci_softc *hif_sc = HIF_GET_PCI_SOFTC(sc); 4389 4390 int ret = 0; 4391 unsigned long expires; 4392 struct hif_pm_runtime_lock *context = lock; 4393 4394 if (hif_is_load_or_unload_in_progress(sc)) { 4395 HIF_ERROR("%s: Load/unload in progress, ignore!", 4396 __func__); 4397 return -EINVAL; 4398 } 4399 4400 if (hif_is_recovery_in_progress(sc)) { 4401 HIF_ERROR("%s: LOGP in progress, ignore!", __func__); 4402 return -EINVAL; 4403 } 4404 4405 if (!sc->hif_config.enable_runtime_pm) 4406 return 0; 4407 4408 if (!context) 4409 return -EINVAL; 4410 4411 if (in_irq()) 4412 WARN_ON(1); 4413 4414 /* 4415 * Don't use internal timer if the timeout is less than auto suspend 4416 * delay. 4417 */ 4418 if (delay <= hif_sc->dev->power.autosuspend_delay) { 4419 hif_pm_request_resume(hif_sc->dev); 4420 hif_pm_runtime_mark_last_busy(ol_sc); 4421 return ret; 4422 } 4423 4424 expires = jiffies + msecs_to_jiffies(delay); 4425 expires += !expires; 4426 4427 spin_lock_bh(&hif_sc->runtime_lock); 4428 4429 context->timeout = delay; 4430 ret = __hif_pm_runtime_prevent_suspend(hif_sc, context); 4431 hif_sc->pm_stats.prevent_suspend_timeout++; 4432 4433 /* Modify the timer only if new timeout is after already configured 4434 * timeout 4435 */ 4436 if (time_after(expires, hif_sc->runtime_timer_expires)) { 4437 qdf_timer_mod(&hif_sc->runtime_timer, delay); 4438 hif_sc->runtime_timer_expires = expires; 4439 } 4440 4441 spin_unlock_bh(&hif_sc->runtime_lock); 4442 4443 HIF_ERROR("%s: pm_state: %s delay: %dms ret: %d\n", __func__, 4444 hif_pm_runtime_state_to_string( 4445 qdf_atomic_read(&hif_sc->pm_state)), 4446 delay, ret); 4447 4448 return ret; 4449 } 4450 4451 /** 4452 * hif_runtime_lock_init() - API to initialize Runtime PM context 4453 * @name: Context name 4454 * 4455 * This API initializes the Runtime PM context of the caller and 4456 * return the pointer. 4457 * 4458 * Return: None 4459 */ 4460 int hif_runtime_lock_init(qdf_runtime_lock_t *lock, const char *name) 4461 { 4462 struct hif_pm_runtime_lock *context; 4463 4464 HIF_INFO("Initializing Runtime PM wakelock %s", name); 4465 4466 context = qdf_mem_malloc(sizeof(*context)); 4467 if (!context) 4468 return -ENOMEM; 4469 4470 context->name = name ? name : "Default"; 4471 lock->lock = context; 4472 4473 return 0; 4474 } 4475 4476 /** 4477 * hif_runtime_lock_deinit() - This API frees the runtime pm ctx 4478 * @data: Runtime PM context 4479 * 4480 * Return: void 4481 */ 4482 void hif_runtime_lock_deinit(struct hif_opaque_softc *hif_ctx, 4483 struct hif_pm_runtime_lock *data) 4484 { 4485 struct hif_pm_runtime_lock *context = data; 4486 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx); 4487 4488 if (!context) { 4489 HIF_ERROR("Runtime PM wakelock context is NULL"); 4490 return; 4491 } 4492 4493 HIF_INFO("Deinitializing Runtime PM wakelock %s", context->name); 4494 4495 /* 4496 * Ensure to delete the context list entry and reduce the usage count 4497 * before freeing the context if context is active. 4498 */ 4499 if (sc) { 4500 spin_lock_bh(&sc->runtime_lock); 4501 __hif_pm_runtime_allow_suspend(sc, context); 4502 spin_unlock_bh(&sc->runtime_lock); 4503 } 4504 4505 qdf_mem_free(context); 4506 } 4507 4508 /** 4509 * hif_pm_runtime_is_suspended() - API to check if driver has runtime suspended 4510 * @hif_ctx: HIF context 4511 * 4512 * Return: true for runtime suspended, otherwise false 4513 */ 4514 bool hif_pm_runtime_is_suspended(struct hif_opaque_softc *hif_ctx) 4515 { 4516 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx); 4517 4518 return qdf_atomic_read(&sc->pm_state) == 4519 HIF_PM_RUNTIME_STATE_SUSPENDED; 4520 } 4521 4522 /** 4523 * hif_pm_runtime_get_monitor_wake_intr() - API to get monitor_wake_intr 4524 * @hif_ctx: HIF context 4525 * 4526 * monitor_wake_intr variable can be used to indicate if driver expects wake 4527 * MSI for runtime PM 4528 * 4529 * Return: monitor_wake_intr variable 4530 */ 4531 int hif_pm_runtime_get_monitor_wake_intr(struct hif_opaque_softc *hif_ctx) 4532 { 4533 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx); 4534 4535 return qdf_atomic_read(&sc->monitor_wake_intr); 4536 } 4537 4538 /** 4539 * hif_pm_runtime_set_monitor_wake_intr() - API to set monitor_wake_intr 4540 * @hif_ctx: HIF context 4541 * @val: value to set 4542 * 4543 * monitor_wake_intr variable can be used to indicate if driver expects wake 4544 * MSI for runtime PM 4545 * 4546 * Return: void 4547 */ 4548 void hif_pm_runtime_set_monitor_wake_intr(struct hif_opaque_softc *hif_ctx, 4549 int val) 4550 { 4551 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx); 4552 4553 qdf_atomic_set(&sc->monitor_wake_intr, val); 4554 } 4555 4556 /** 4557 * hif_pm_runtime_mark_dp_rx_busy() - Set last busy mark my data path 4558 * @hif_ctx: HIF context 4559 * 4560 * Return: void 4561 */ 4562 void hif_pm_runtime_mark_dp_rx_busy(struct hif_opaque_softc *hif_ctx) 4563 { 4564 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx); 4565 4566 if (!sc) 4567 return; 4568 4569 qdf_atomic_set(&sc->pm_dp_rx_busy, 1); 4570 sc->dp_last_busy_timestamp = qdf_get_log_timestamp_usecs(); 4571 4572 hif_pm_runtime_mark_last_busy(hif_ctx); 4573 } 4574 4575 /** 4576 * hif_pm_runtime_is_dp_rx_busy() - Check if last mark busy by dp rx 4577 * @hif_ctx: HIF context 4578 * 4579 * Return: dp rx busy set value 4580 */ 4581 int hif_pm_runtime_is_dp_rx_busy(struct hif_opaque_softc *hif_ctx) 4582 { 4583 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx); 4584 4585 if (!sc) 4586 return 0; 4587 4588 return qdf_atomic_read(&sc->pm_dp_rx_busy); 4589 } 4590 4591 /** 4592 * hif_pm_runtime_get_dp_rx_busy_mark() - Get last busy by dp rx timestamp 4593 * @hif_ctx: HIF context 4594 * 4595 * Return: timestamp of last mark busy by dp rx 4596 */ 4597 qdf_time_t hif_pm_runtime_get_dp_rx_busy_mark(struct hif_opaque_softc *hif_ctx) 4598 { 4599 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx); 4600 4601 if (!sc) 4602 return 0; 4603 4604 return sc->dp_last_busy_timestamp; 4605 } 4606 4607 #endif /* FEATURE_RUNTIME_PM */ 4608 4609 int hif_pci_legacy_map_ce_to_irq(struct hif_softc *scn, int ce_id) 4610 { 4611 struct hif_pci_softc *pci_scn = HIF_GET_PCI_SOFTC(scn); 4612 4613 /* legacy case only has one irq */ 4614 return pci_scn->irq; 4615 } 4616 4617 int hif_pci_addr_in_boundary(struct hif_softc *scn, uint32_t offset) 4618 { 4619 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn); 4620 struct hif_target_info *tgt_info; 4621 4622 tgt_info = hif_get_target_info_handle(GET_HIF_OPAQUE_HDL(scn)); 4623 4624 if (tgt_info->target_type == TARGET_TYPE_QCA6290 || 4625 tgt_info->target_type == TARGET_TYPE_QCA6390 || 4626 tgt_info->target_type == TARGET_TYPE_QCA6490 || 4627 tgt_info->target_type == TARGET_TYPE_QCA8074) { 4628 /* 4629 * Need to consider offset's memtype for QCA6290/QCA8074, 4630 * also mem_len and DRAM_BASE_ADDRESS/DRAM_SIZE need to be 4631 * well initialized/defined. 4632 */ 4633 return 0; 4634 } 4635 4636 if ((offset >= DRAM_BASE_ADDRESS && offset <= DRAM_BASE_ADDRESS + DRAM_SIZE) 4637 || (offset + sizeof(unsigned int) <= sc->mem_len)) { 4638 return 0; 4639 } 4640 4641 HIF_TRACE("Refusing to read memory at 0x%x - 0x%x (max 0x%zx)\n", 4642 offset, (uint32_t)(offset + sizeof(unsigned int)), 4643 sc->mem_len); 4644 4645 return -EINVAL; 4646 } 4647 4648 /** 4649 * hif_pci_needs_bmi() - return true if the soc needs bmi through the driver 4650 * @scn: hif context 4651 * 4652 * Return: true if soc needs driver bmi otherwise false 4653 */ 4654 bool hif_pci_needs_bmi(struct hif_softc *scn) 4655 { 4656 return !ce_srng_based(scn); 4657 } 4658 4659 #ifdef FORCE_WAKE 4660 int hif_force_wake_request(struct hif_opaque_softc *hif_handle) 4661 { 4662 uint32_t timeout = 0, value; 4663 struct hif_softc *scn = (struct hif_softc *)hif_handle; 4664 struct hif_pci_softc *pci_scn = HIF_GET_PCI_SOFTC(scn); 4665 4666 if (pld_force_wake_request(scn->qdf_dev->dev)) { 4667 hif_err("force wake request send failed"); 4668 return -EINVAL; 4669 } 4670 4671 HIF_STATS_INC(pci_scn, mhi_force_wake_request_vote, 1); 4672 while (!pld_is_device_awake(scn->qdf_dev->dev) && 4673 timeout <= FORCE_WAKE_DELAY_TIMEOUT_MS) { 4674 qdf_mdelay(FORCE_WAKE_DELAY_MS); 4675 timeout += FORCE_WAKE_DELAY_MS; 4676 } 4677 4678 if (pld_is_device_awake(scn->qdf_dev->dev) <= 0) { 4679 hif_err("Unable to wake up mhi"); 4680 HIF_STATS_INC(pci_scn, mhi_force_wake_failure, 1); 4681 return -EINVAL; 4682 } 4683 HIF_STATS_INC(pci_scn, mhi_force_wake_success, 1); 4684 hif_write32_mb(scn, 4685 scn->mem + 4686 PCIE_SOC_PCIE_REG_PCIE_SCRATCH_0_SOC_PCIE_REG, 4687 0); 4688 hif_write32_mb(scn, 4689 scn->mem + 4690 PCIE_PCIE_LOCAL_REG_PCIE_SOC_WAKE_PCIE_LOCAL_REG, 4691 1); 4692 4693 HIF_STATS_INC(pci_scn, soc_force_wake_register_write_success, 1); 4694 /* 4695 * do not reset the timeout 4696 * total_wake_time = MHI_WAKE_TIME + PCI_WAKE_TIME < 50 ms 4697 */ 4698 do { 4699 value = 4700 hif_read32_mb(scn, 4701 scn->mem + 4702 PCIE_SOC_PCIE_REG_PCIE_SCRATCH_0_SOC_PCIE_REG); 4703 if (value) 4704 break; 4705 qdf_mdelay(FORCE_WAKE_DELAY_MS); 4706 timeout += FORCE_WAKE_DELAY_MS; 4707 } while (timeout <= FORCE_WAKE_DELAY_TIMEOUT_MS); 4708 4709 if (!value) { 4710 hif_err("failed handshake mechanism"); 4711 HIF_STATS_INC(pci_scn, soc_force_wake_failure, 1); 4712 return -ETIMEDOUT; 4713 } 4714 4715 HIF_STATS_INC(pci_scn, soc_force_wake_success, 1); 4716 4717 return 0; 4718 } 4719 4720 int hif_force_wake_release(struct hif_opaque_softc *hif_handle) 4721 { 4722 int ret; 4723 struct hif_softc *scn = (struct hif_softc *)hif_handle; 4724 struct hif_pci_softc *pci_scn = HIF_GET_PCI_SOFTC(scn); 4725 4726 ret = pld_force_wake_release(scn->qdf_dev->dev); 4727 if (ret) { 4728 hif_err("force wake release failure"); 4729 HIF_STATS_INC(pci_scn, mhi_force_wake_release_failure, 1); 4730 return ret; 4731 } 4732 4733 HIF_STATS_INC(pci_scn, mhi_force_wake_release_success, 1); 4734 hif_write32_mb(scn, 4735 scn->mem + 4736 PCIE_PCIE_LOCAL_REG_PCIE_SOC_WAKE_PCIE_LOCAL_REG, 4737 0); 4738 HIF_STATS_INC(pci_scn, soc_force_wake_release_success, 1); 4739 return 0; 4740 } 4741 4742 void hif_print_pci_stats(struct hif_pci_softc *pci_handle) 4743 { 4744 hif_debug("mhi_force_wake_request_vote: %d", 4745 pci_handle->stats.mhi_force_wake_request_vote); 4746 hif_debug("mhi_force_wake_failure: %d", 4747 pci_handle->stats.mhi_force_wake_failure); 4748 hif_debug("mhi_force_wake_success: %d", 4749 pci_handle->stats.mhi_force_wake_success); 4750 hif_debug("soc_force_wake_register_write_success: %d", 4751 pci_handle->stats.soc_force_wake_register_write_success); 4752 hif_debug("soc_force_wake_failure: %d", 4753 pci_handle->stats.soc_force_wake_failure); 4754 hif_debug("soc_force_wake_success: %d", 4755 pci_handle->stats.soc_force_wake_success); 4756 hif_debug("mhi_force_wake_release_failure: %d", 4757 pci_handle->stats.mhi_force_wake_release_failure); 4758 hif_debug("mhi_force_wake_release_success: %d", 4759 pci_handle->stats.mhi_force_wake_release_success); 4760 hif_debug("oc_force_wake_release_success: %d", 4761 pci_handle->stats.soc_force_wake_release_success); 4762 } 4763 #endif /* FORCE_WAKE */ 4764 4765