1 /* 2 * Copyright (c) 2013-2020 The Linux Foundation. All rights reserved. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for 5 * any purpose with or without fee is hereby granted, provided that the 6 * above copyright notice and this permission notice appear in all 7 * copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 16 * PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 #include <linux/pci.h> 20 #include <linux/slab.h> 21 #include <linux/interrupt.h> 22 #include <linux/if_arp.h> 23 #ifdef CONFIG_PCI_MSM 24 #include <linux/msm_pcie.h> 25 #endif 26 #include "hif_io32.h" 27 #include "if_pci.h" 28 #include "hif.h" 29 #include "target_type.h" 30 #include "hif_main.h" 31 #include "ce_main.h" 32 #include "ce_api.h" 33 #include "ce_internal.h" 34 #include "ce_reg.h" 35 #include "ce_bmi.h" 36 #include "regtable.h" 37 #include "hif_hw_version.h" 38 #include <linux/debugfs.h> 39 #include <linux/seq_file.h> 40 #include "qdf_status.h" 41 #include "qdf_atomic.h" 42 #include "pld_common.h" 43 #include "mp_dev.h" 44 #include "hif_debug.h" 45 46 #include "if_pci_internal.h" 47 #include "ce_tasklet.h" 48 #include "targaddrs.h" 49 #include "hif_exec.h" 50 51 #include "pci_api.h" 52 #include "ahb_api.h" 53 54 /* Maximum ms timeout for host to wake up target */ 55 #define PCIE_WAKE_TIMEOUT 1000 56 #define RAMDUMP_EVENT_TIMEOUT 2500 57 58 /* Setting SOC_GLOBAL_RESET during driver unload causes intermittent 59 * PCIe data bus error 60 * As workaround for this issue - changing the reset sequence to 61 * use TargetCPU warm reset * instead of SOC_GLOBAL_RESET 62 */ 63 #define CPU_WARM_RESET_WAR 64 65 /* 66 * Top-level interrupt handler for all PCI interrupts from a Target. 67 * When a block of MSI interrupts is allocated, this top-level handler 68 * is not used; instead, we directly call the correct sub-handler. 69 */ 70 struct ce_irq_reg_table { 71 uint32_t irq_enable; 72 uint32_t irq_status; 73 }; 74 75 #ifndef QCA_WIFI_3_0_ADRASTEA 76 static inline void hif_pci_route_adrastea_interrupt(struct hif_pci_softc *sc) 77 { 78 } 79 #else 80 static void hif_pci_route_adrastea_interrupt(struct hif_pci_softc *sc) 81 { 82 struct hif_softc *scn = HIF_GET_SOFTC(sc); 83 unsigned int target_enable0, target_enable1; 84 unsigned int target_cause0, target_cause1; 85 86 target_enable0 = hif_read32_mb(sc, sc->mem + Q6_ENABLE_REGISTER_0); 87 target_enable1 = hif_read32_mb(sc, sc->mem + Q6_ENABLE_REGISTER_1); 88 target_cause0 = hif_read32_mb(sc, sc->mem + Q6_CAUSE_REGISTER_0); 89 target_cause1 = hif_read32_mb(sc, sc->mem + Q6_CAUSE_REGISTER_1); 90 91 if ((target_enable0 & target_cause0) || 92 (target_enable1 & target_cause1)) { 93 hif_write32_mb(sc, sc->mem + Q6_ENABLE_REGISTER_0, 0); 94 hif_write32_mb(sc, sc->mem + Q6_ENABLE_REGISTER_1, 0); 95 96 if (scn->notice_send) 97 pld_intr_notify_q6(sc->dev); 98 } 99 } 100 #endif 101 102 103 /** 104 * pci_dispatch_ce_irq() - pci_dispatch_ce_irq 105 * @scn: scn 106 * 107 * Return: N/A 108 */ 109 static void pci_dispatch_interrupt(struct hif_softc *scn) 110 { 111 uint32_t intr_summary; 112 int id; 113 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 114 115 if (scn->hif_init_done != true) 116 return; 117 118 if (Q_TARGET_ACCESS_BEGIN(scn) < 0) 119 return; 120 121 intr_summary = CE_INTERRUPT_SUMMARY(scn); 122 123 if (intr_summary == 0) { 124 if ((scn->target_status != TARGET_STATUS_RESET) && 125 (!qdf_atomic_read(&scn->link_suspended))) { 126 127 hif_write32_mb(scn, scn->mem + 128 (SOC_CORE_BASE_ADDRESS | 129 PCIE_INTR_ENABLE_ADDRESS), 130 HOST_GROUP0_MASK); 131 132 hif_read32_mb(scn, scn->mem + 133 (SOC_CORE_BASE_ADDRESS | 134 PCIE_INTR_ENABLE_ADDRESS)); 135 } 136 Q_TARGET_ACCESS_END(scn); 137 return; 138 } 139 Q_TARGET_ACCESS_END(scn); 140 141 scn->ce_irq_summary = intr_summary; 142 for (id = 0; intr_summary && (id < scn->ce_count); id++) { 143 if (intr_summary & (1 << id)) { 144 intr_summary &= ~(1 << id); 145 ce_dispatch_interrupt(id, &hif_state->tasklets[id]); 146 } 147 } 148 } 149 150 irqreturn_t hif_pci_legacy_ce_interrupt_handler(int irq, void *arg) 151 { 152 struct hif_pci_softc *sc = (struct hif_pci_softc *)arg; 153 struct hif_softc *scn = HIF_GET_SOFTC(sc); 154 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(arg); 155 156 volatile int tmp; 157 uint16_t val = 0; 158 uint32_t bar0 = 0; 159 uint32_t fw_indicator_address, fw_indicator; 160 bool ssr_irq = false; 161 unsigned int host_cause, host_enable; 162 163 if (LEGACY_INTERRUPTS(sc)) { 164 if (Q_TARGET_ACCESS_BEGIN(scn) < 0) 165 return IRQ_HANDLED; 166 167 if (ADRASTEA_BU) { 168 host_enable = hif_read32_mb(sc, sc->mem + 169 PCIE_INTR_ENABLE_ADDRESS); 170 host_cause = hif_read32_mb(sc, sc->mem + 171 PCIE_INTR_CAUSE_ADDRESS); 172 if (!(host_enable & host_cause)) { 173 hif_pci_route_adrastea_interrupt(sc); 174 return IRQ_HANDLED; 175 } 176 } 177 178 /* Clear Legacy PCI line interrupts 179 * IMPORTANT: INTR_CLR regiser has to be set 180 * after INTR_ENABLE is set to 0, 181 * otherwise interrupt can not be really cleared 182 */ 183 hif_write32_mb(sc, sc->mem + 184 (SOC_CORE_BASE_ADDRESS | 185 PCIE_INTR_ENABLE_ADDRESS), 0); 186 187 hif_write32_mb(sc, sc->mem + 188 (SOC_CORE_BASE_ADDRESS | PCIE_INTR_CLR_ADDRESS), 189 ADRASTEA_BU ? 190 (host_enable & host_cause) : 191 HOST_GROUP0_MASK); 192 193 if (ADRASTEA_BU) 194 hif_write32_mb(sc, sc->mem + 0x2f100c, 195 (host_cause >> 1)); 196 197 /* IMPORTANT: this extra read transaction is required to 198 * flush the posted write buffer 199 */ 200 if (!ADRASTEA_BU) { 201 tmp = 202 hif_read32_mb(sc, sc->mem + 203 (SOC_CORE_BASE_ADDRESS | 204 PCIE_INTR_ENABLE_ADDRESS)); 205 206 if (tmp == 0xdeadbeef) { 207 HIF_ERROR("BUG(%s): SoC returns 0xdeadbeef!!", 208 __func__); 209 210 pci_read_config_word(sc->pdev, PCI_VENDOR_ID, &val); 211 HIF_ERROR("%s: PCI Vendor ID = 0x%04x", 212 __func__, val); 213 214 pci_read_config_word(sc->pdev, PCI_DEVICE_ID, &val); 215 HIF_ERROR("%s: PCI Device ID = 0x%04x", 216 __func__, val); 217 218 pci_read_config_word(sc->pdev, PCI_COMMAND, &val); 219 HIF_ERROR("%s: PCI Command = 0x%04x", __func__, 220 val); 221 222 pci_read_config_word(sc->pdev, PCI_STATUS, &val); 223 HIF_ERROR("%s: PCI Status = 0x%04x", __func__, 224 val); 225 226 pci_read_config_dword(sc->pdev, PCI_BASE_ADDRESS_0, 227 &bar0); 228 HIF_ERROR("%s: PCI BAR0 = 0x%08x", __func__, 229 bar0); 230 231 HIF_ERROR("%s: RTC_STATE_ADDRESS = 0x%08x", 232 __func__, 233 hif_read32_mb(sc, sc->mem + 234 PCIE_LOCAL_BASE_ADDRESS 235 + RTC_STATE_ADDRESS)); 236 HIF_ERROR("%s: PCIE_SOC_WAKE_ADDRESS = 0x%08x", 237 __func__, 238 hif_read32_mb(sc, sc->mem + 239 PCIE_LOCAL_BASE_ADDRESS 240 + PCIE_SOC_WAKE_ADDRESS)); 241 HIF_ERROR("%s: 0x80008 = 0x%08x, 0x8000c = 0x%08x", 242 __func__, 243 hif_read32_mb(sc, sc->mem + 0x80008), 244 hif_read32_mb(sc, sc->mem + 0x8000c)); 245 HIF_ERROR("%s: 0x80010 = 0x%08x, 0x80014 = 0x%08x", 246 __func__, 247 hif_read32_mb(sc, sc->mem + 0x80010), 248 hif_read32_mb(sc, sc->mem + 0x80014)); 249 HIF_ERROR("%s: 0x80018 = 0x%08x, 0x8001c = 0x%08x", 250 __func__, 251 hif_read32_mb(sc, sc->mem + 0x80018), 252 hif_read32_mb(sc, sc->mem + 0x8001c)); 253 QDF_BUG(0); 254 } 255 256 PCI_CLR_CAUSE0_REGISTER(sc); 257 } 258 259 if (HAS_FW_INDICATOR) { 260 fw_indicator_address = hif_state->fw_indicator_address; 261 fw_indicator = A_TARGET_READ(scn, fw_indicator_address); 262 if ((fw_indicator != ~0) && 263 (fw_indicator & FW_IND_EVENT_PENDING)) 264 ssr_irq = true; 265 } 266 267 if (Q_TARGET_ACCESS_END(scn) < 0) 268 return IRQ_HANDLED; 269 } 270 /* TBDXXX: Add support for WMAC */ 271 272 if (ssr_irq) { 273 sc->irq_event = irq; 274 qdf_atomic_set(&scn->tasklet_from_intr, 1); 275 276 qdf_atomic_inc(&scn->active_tasklet_cnt); 277 tasklet_schedule(&sc->intr_tq); 278 } else { 279 pci_dispatch_interrupt(scn); 280 } 281 282 return IRQ_HANDLED; 283 } 284 285 bool hif_pci_targ_is_present(struct hif_softc *scn, void *__iomem *mem) 286 { 287 return 1; /* FIX THIS */ 288 } 289 290 int hif_get_irq_num(struct hif_opaque_softc *scn, int *irq, uint32_t size) 291 { 292 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn); 293 int i = 0; 294 295 if (!irq || !size) { 296 return -EINVAL; 297 } 298 299 if (!sc->num_msi_intrs || sc->num_msi_intrs == 1) { 300 irq[0] = sc->irq; 301 return 1; 302 } 303 304 if (sc->num_msi_intrs > size) { 305 qdf_print("Not enough space in irq buffer to return irqs"); 306 return -EINVAL; 307 } 308 309 for (i = 0; i < sc->num_msi_intrs; i++) { 310 irq[i] = sc->irq + i + MSI_ASSIGN_CE_INITIAL; 311 } 312 313 return sc->num_msi_intrs; 314 } 315 316 317 /** 318 * hif_pci_cancel_deferred_target_sleep() - cancels the defered target sleep 319 * @scn: hif_softc 320 * 321 * Return: void 322 */ 323 #if CONFIG_ATH_PCIE_MAX_PERF == 0 324 void hif_pci_cancel_deferred_target_sleep(struct hif_softc *scn) 325 { 326 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 327 A_target_id_t pci_addr = scn->mem; 328 329 qdf_spin_lock_irqsave(&hif_state->keep_awake_lock); 330 /* 331 * If the deferred sleep timer is running cancel it 332 * and put the soc into sleep. 333 */ 334 if (hif_state->fake_sleep == true) { 335 qdf_timer_stop(&hif_state->sleep_timer); 336 if (hif_state->verified_awake == false) { 337 hif_write32_mb(scn, pci_addr + PCIE_LOCAL_BASE_ADDRESS + 338 PCIE_SOC_WAKE_ADDRESS, 339 PCIE_SOC_WAKE_RESET); 340 } 341 hif_state->fake_sleep = false; 342 } 343 qdf_spin_unlock_irqrestore(&hif_state->keep_awake_lock); 344 } 345 #else 346 inline void hif_pci_cancel_deferred_target_sleep(struct hif_softc *scn) 347 { 348 } 349 #endif 350 351 #define A_PCIE_LOCAL_REG_READ(sc, mem, addr) \ 352 hif_read32_mb(sc, (char *)(mem) + \ 353 PCIE_LOCAL_BASE_ADDRESS + (uint32_t)(addr)) 354 355 #define A_PCIE_LOCAL_REG_WRITE(sc, mem, addr, val) \ 356 hif_write32_mb(sc, ((char *)(mem) + \ 357 PCIE_LOCAL_BASE_ADDRESS + (uint32_t)(addr)), (val)) 358 359 #ifdef QCA_WIFI_3_0 360 /** 361 * hif_targ_is_awake() - check to see if the target is awake 362 * @hif_ctx: hif context 363 * 364 * emulation never goes to sleep 365 * 366 * Return: true if target is awake 367 */ 368 static bool hif_targ_is_awake(struct hif_softc *hif_ctx, void *__iomem *mem) 369 { 370 return true; 371 } 372 #else 373 /** 374 * hif_targ_is_awake() - check to see if the target is awake 375 * @hif_ctx: hif context 376 * 377 * Return: true if the targets clocks are on 378 */ 379 static bool hif_targ_is_awake(struct hif_softc *scn, void *__iomem *mem) 380 { 381 uint32_t val; 382 383 if (scn->recovery) 384 return false; 385 val = hif_read32_mb(scn, mem + PCIE_LOCAL_BASE_ADDRESS 386 + RTC_STATE_ADDRESS); 387 return (RTC_STATE_V_GET(val) & RTC_STATE_V_ON) == RTC_STATE_V_ON; 388 } 389 #endif 390 391 #define ATH_PCI_RESET_WAIT_MAX 10 /* Ms */ 392 static void hif_pci_device_reset(struct hif_pci_softc *sc) 393 { 394 void __iomem *mem = sc->mem; 395 int i; 396 uint32_t val; 397 struct hif_softc *scn = HIF_GET_SOFTC(sc); 398 399 if (!scn->hostdef) 400 return; 401 402 /* NB: Don't check resetok here. This form of reset 403 * is integral to correct operation. 404 */ 405 406 if (!SOC_GLOBAL_RESET_ADDRESS) 407 return; 408 409 if (!mem) 410 return; 411 412 HIF_ERROR("%s: Reset Device", __func__); 413 414 /* 415 * NB: If we try to write SOC_GLOBAL_RESET_ADDRESS without first 416 * writing WAKE_V, the Target may scribble over Host memory! 417 */ 418 A_PCIE_LOCAL_REG_WRITE(sc, mem, PCIE_SOC_WAKE_ADDRESS, 419 PCIE_SOC_WAKE_V_MASK); 420 for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) { 421 if (hif_targ_is_awake(scn, mem)) 422 break; 423 424 qdf_mdelay(1); 425 } 426 427 /* Put Target, including PCIe, into RESET. */ 428 val = A_PCIE_LOCAL_REG_READ(sc, mem, SOC_GLOBAL_RESET_ADDRESS); 429 val |= 1; 430 A_PCIE_LOCAL_REG_WRITE(sc, mem, SOC_GLOBAL_RESET_ADDRESS, val); 431 for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) { 432 if (A_PCIE_LOCAL_REG_READ(sc, mem, RTC_STATE_ADDRESS) & 433 RTC_STATE_COLD_RESET_MASK) 434 break; 435 436 qdf_mdelay(1); 437 } 438 439 /* Pull Target, including PCIe, out of RESET. */ 440 val &= ~1; 441 A_PCIE_LOCAL_REG_WRITE(sc, mem, SOC_GLOBAL_RESET_ADDRESS, val); 442 for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) { 443 if (! 444 (A_PCIE_LOCAL_REG_READ(sc, mem, RTC_STATE_ADDRESS) & 445 RTC_STATE_COLD_RESET_MASK)) 446 break; 447 448 qdf_mdelay(1); 449 } 450 451 A_PCIE_LOCAL_REG_WRITE(sc, mem, PCIE_SOC_WAKE_ADDRESS, 452 PCIE_SOC_WAKE_RESET); 453 } 454 455 /* CPU warm reset function 456 * Steps: 457 * 1. Disable all pending interrupts - so no pending interrupts on WARM reset 458 * 2. Clear the FW_INDICATOR_ADDRESS -so Traget CPU initializes FW 459 * correctly on WARM reset 460 * 3. Clear TARGET CPU LF timer interrupt 461 * 4. Reset all CEs to clear any pending CE tarnsactions 462 * 5. Warm reset CPU 463 */ 464 static void hif_pci_device_warm_reset(struct hif_pci_softc *sc) 465 { 466 void __iomem *mem = sc->mem; 467 int i; 468 uint32_t val; 469 uint32_t fw_indicator; 470 struct hif_softc *scn = HIF_GET_SOFTC(sc); 471 472 /* NB: Don't check resetok here. This form of reset is 473 * integral to correct operation. 474 */ 475 476 if (!mem) 477 return; 478 479 HIF_INFO_MED("%s: Target Warm Reset", __func__); 480 481 /* 482 * NB: If we try to write SOC_GLOBAL_RESET_ADDRESS without first 483 * writing WAKE_V, the Target may scribble over Host memory! 484 */ 485 A_PCIE_LOCAL_REG_WRITE(sc, mem, PCIE_SOC_WAKE_ADDRESS, 486 PCIE_SOC_WAKE_V_MASK); 487 for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) { 488 if (hif_targ_is_awake(scn, mem)) 489 break; 490 qdf_mdelay(1); 491 } 492 493 /* 494 * Disable Pending interrupts 495 */ 496 val = 497 hif_read32_mb(sc, mem + 498 (SOC_CORE_BASE_ADDRESS | 499 PCIE_INTR_CAUSE_ADDRESS)); 500 HIF_INFO_MED("%s: Host Intr Cause reg 0x%x : value : 0x%x", __func__, 501 (SOC_CORE_BASE_ADDRESS | PCIE_INTR_CAUSE_ADDRESS), val); 502 /* Target CPU Intr Cause */ 503 val = hif_read32_mb(sc, mem + 504 (SOC_CORE_BASE_ADDRESS | CPU_INTR_ADDRESS)); 505 HIF_INFO_MED("%s: Target CPU Intr Cause 0x%x", __func__, val); 506 507 val = 508 hif_read32_mb(sc, mem + 509 (SOC_CORE_BASE_ADDRESS | 510 PCIE_INTR_ENABLE_ADDRESS)); 511 hif_write32_mb(sc, (mem + 512 (SOC_CORE_BASE_ADDRESS | PCIE_INTR_ENABLE_ADDRESS)), 0); 513 hif_write32_mb(sc, (mem + 514 (SOC_CORE_BASE_ADDRESS + PCIE_INTR_CLR_ADDRESS)), 515 HOST_GROUP0_MASK); 516 517 qdf_mdelay(100); 518 519 /* Clear FW_INDICATOR_ADDRESS */ 520 if (HAS_FW_INDICATOR) { 521 fw_indicator = hif_read32_mb(sc, mem + FW_INDICATOR_ADDRESS); 522 hif_write32_mb(sc, mem + FW_INDICATOR_ADDRESS, 0); 523 } 524 525 /* Clear Target LF Timer interrupts */ 526 val = 527 hif_read32_mb(sc, mem + 528 (RTC_SOC_BASE_ADDRESS + 529 SOC_LF_TIMER_CONTROL0_ADDRESS)); 530 HIF_INFO_MED("%s: addr 0x%x : 0x%x", __func__, 531 (RTC_SOC_BASE_ADDRESS + SOC_LF_TIMER_CONTROL0_ADDRESS), val); 532 val &= ~SOC_LF_TIMER_CONTROL0_ENABLE_MASK; 533 hif_write32_mb(sc, mem + 534 (RTC_SOC_BASE_ADDRESS + SOC_LF_TIMER_CONTROL0_ADDRESS), 535 val); 536 537 /* Reset CE */ 538 val = 539 hif_read32_mb(sc, mem + 540 (RTC_SOC_BASE_ADDRESS | 541 SOC_RESET_CONTROL_ADDRESS)); 542 val |= SOC_RESET_CONTROL_CE_RST_MASK; 543 hif_write32_mb(sc, (mem + 544 (RTC_SOC_BASE_ADDRESS | SOC_RESET_CONTROL_ADDRESS)), 545 val); 546 val = 547 hif_read32_mb(sc, mem + 548 (RTC_SOC_BASE_ADDRESS | 549 SOC_RESET_CONTROL_ADDRESS)); 550 qdf_mdelay(10); 551 552 /* CE unreset */ 553 val &= ~SOC_RESET_CONTROL_CE_RST_MASK; 554 hif_write32_mb(sc, mem + (RTC_SOC_BASE_ADDRESS | 555 SOC_RESET_CONTROL_ADDRESS), val); 556 val = 557 hif_read32_mb(sc, mem + 558 (RTC_SOC_BASE_ADDRESS | 559 SOC_RESET_CONTROL_ADDRESS)); 560 qdf_mdelay(10); 561 562 /* Read Target CPU Intr Cause */ 563 val = hif_read32_mb(sc, mem + 564 (SOC_CORE_BASE_ADDRESS | CPU_INTR_ADDRESS)); 565 HIF_INFO_MED("%s: Target CPU Intr Cause after CE reset 0x%x", 566 __func__, val); 567 568 /* CPU warm RESET */ 569 val = 570 hif_read32_mb(sc, mem + 571 (RTC_SOC_BASE_ADDRESS | 572 SOC_RESET_CONTROL_ADDRESS)); 573 val |= SOC_RESET_CONTROL_CPU_WARM_RST_MASK; 574 hif_write32_mb(sc, mem + (RTC_SOC_BASE_ADDRESS | 575 SOC_RESET_CONTROL_ADDRESS), val); 576 val = 577 hif_read32_mb(sc, mem + 578 (RTC_SOC_BASE_ADDRESS | 579 SOC_RESET_CONTROL_ADDRESS)); 580 HIF_INFO_MED("%s: RESET_CONTROL after cpu warm reset 0x%x", 581 __func__, val); 582 583 qdf_mdelay(100); 584 HIF_INFO_MED("%s: Target Warm reset complete", __func__); 585 586 } 587 588 #ifndef QCA_WIFI_3_0 589 /* only applicable to legacy ce */ 590 int hif_check_fw_reg(struct hif_opaque_softc *hif_ctx) 591 { 592 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 593 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn); 594 void __iomem *mem = sc->mem; 595 uint32_t val; 596 597 if (Q_TARGET_ACCESS_BEGIN(scn) < 0) 598 return ATH_ISR_NOSCHED; 599 val = hif_read32_mb(sc, mem + FW_INDICATOR_ADDRESS); 600 if (Q_TARGET_ACCESS_END(scn) < 0) 601 return ATH_ISR_SCHED; 602 603 HIF_INFO_MED("%s: FW_INDICATOR register is 0x%x", __func__, val); 604 605 if (val & FW_IND_HELPER) 606 return 0; 607 608 return 1; 609 } 610 #endif 611 612 int hif_check_soc_status(struct hif_opaque_softc *hif_ctx) 613 { 614 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 615 uint16_t device_id = 0; 616 uint32_t val; 617 uint16_t timeout_count = 0; 618 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn); 619 620 /* Check device ID from PCIe configuration space for link status */ 621 pfrm_read_config_word(sc->pdev, PCI_DEVICE_ID, &device_id); 622 if (device_id != sc->devid) { 623 HIF_ERROR("%s: device ID does match (read 0x%x, expect 0x%x)", 624 __func__, device_id, sc->devid); 625 return -EACCES; 626 } 627 628 /* Check PCIe local register for bar/memory access */ 629 val = hif_read32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS + 630 RTC_STATE_ADDRESS); 631 HIF_INFO_MED("%s: RTC_STATE_ADDRESS is %08x", __func__, val); 632 633 /* Try to wake up taget if it sleeps */ 634 hif_write32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS + 635 PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_V_MASK); 636 HIF_INFO_MED("%s: PCIE_SOC_WAKE_ADDRESS is %08x", __func__, 637 hif_read32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS + 638 PCIE_SOC_WAKE_ADDRESS)); 639 640 /* Check if taget can be woken up */ 641 while (!hif_targ_is_awake(scn, sc->mem)) { 642 if (timeout_count >= PCIE_WAKE_TIMEOUT) { 643 HIF_ERROR("%s: wake up timeout, %08x, %08x", 644 __func__, 645 hif_read32_mb(sc, sc->mem + 646 PCIE_LOCAL_BASE_ADDRESS + 647 RTC_STATE_ADDRESS), 648 hif_read32_mb(sc, sc->mem + 649 PCIE_LOCAL_BASE_ADDRESS + 650 PCIE_SOC_WAKE_ADDRESS)); 651 return -EACCES; 652 } 653 654 hif_write32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS + 655 PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_V_MASK); 656 657 qdf_mdelay(100); 658 timeout_count += 100; 659 } 660 661 /* Check Power register for SoC internal bus issues */ 662 val = 663 hif_read32_mb(sc, sc->mem + RTC_SOC_BASE_ADDRESS + 664 SOC_POWER_REG_OFFSET); 665 HIF_INFO_MED("%s: Power register is %08x", __func__, val); 666 667 return 0; 668 } 669 670 /** 671 * __hif_pci_dump_registers(): dump other PCI debug registers 672 * @scn: struct hif_softc 673 * 674 * This function dumps pci debug registers. The parrent function 675 * dumps the copy engine registers before calling this function. 676 * 677 * Return: void 678 */ 679 static void __hif_pci_dump_registers(struct hif_softc *scn) 680 { 681 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn); 682 void __iomem *mem = sc->mem; 683 uint32_t val, i, j; 684 uint32_t wrapper_idx[] = { 1, 2, 3, 4, 5, 6, 7, 8, 9 }; 685 uint32_t ce_base; 686 687 if (Q_TARGET_ACCESS_BEGIN(scn) < 0) 688 return; 689 690 /* DEBUG_INPUT_SEL_SRC = 0x6 */ 691 val = 692 hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS + 693 WLAN_DEBUG_INPUT_SEL_OFFSET); 694 val &= ~WLAN_DEBUG_INPUT_SEL_SRC_MASK; 695 val |= WLAN_DEBUG_INPUT_SEL_SRC_SET(0x6); 696 hif_write32_mb(sc, mem + GPIO_BASE_ADDRESS + 697 WLAN_DEBUG_INPUT_SEL_OFFSET, val); 698 699 /* DEBUG_CONTROL_ENABLE = 0x1 */ 700 val = hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS + 701 WLAN_DEBUG_CONTROL_OFFSET); 702 val &= ~WLAN_DEBUG_CONTROL_ENABLE_MASK; 703 val |= WLAN_DEBUG_CONTROL_ENABLE_SET(0x1); 704 hif_write32_mb(sc, mem + GPIO_BASE_ADDRESS + 705 WLAN_DEBUG_CONTROL_OFFSET, val); 706 707 HIF_INFO_MED("%s: Debug: inputsel: %x dbgctrl: %x", __func__, 708 hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS + 709 WLAN_DEBUG_INPUT_SEL_OFFSET), 710 hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS + 711 WLAN_DEBUG_CONTROL_OFFSET)); 712 713 HIF_INFO_MED("%s: Debug CE", __func__); 714 /* Loop CE debug output */ 715 /* AMBA_DEBUG_BUS_SEL = 0xc */ 716 val = hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS + 717 AMBA_DEBUG_BUS_OFFSET); 718 val &= ~AMBA_DEBUG_BUS_SEL_MASK; 719 val |= AMBA_DEBUG_BUS_SEL_SET(0xc); 720 hif_write32_mb(sc, mem + GPIO_BASE_ADDRESS + AMBA_DEBUG_BUS_OFFSET, 721 val); 722 723 for (i = 0; i < sizeof(wrapper_idx) / sizeof(uint32_t); i++) { 724 /* For (i=1,2,3,4,8,9) write CE_WRAPPER_DEBUG_SEL = i */ 725 val = hif_read32_mb(sc, mem + CE_WRAPPER_BASE_ADDRESS + 726 CE_WRAPPER_DEBUG_OFFSET); 727 val &= ~CE_WRAPPER_DEBUG_SEL_MASK; 728 val |= CE_WRAPPER_DEBUG_SEL_SET(wrapper_idx[i]); 729 hif_write32_mb(sc, mem + CE_WRAPPER_BASE_ADDRESS + 730 CE_WRAPPER_DEBUG_OFFSET, val); 731 732 HIF_INFO_MED("%s: ce wrapper: %d amdbg: %x cewdbg: %x", 733 __func__, wrapper_idx[i], 734 hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS + 735 AMBA_DEBUG_BUS_OFFSET), 736 hif_read32_mb(sc, mem + CE_WRAPPER_BASE_ADDRESS + 737 CE_WRAPPER_DEBUG_OFFSET)); 738 739 if (wrapper_idx[i] <= 7) { 740 for (j = 0; j <= 5; j++) { 741 ce_base = CE_BASE_ADDRESS(wrapper_idx[i]); 742 /* For (j=0~5) write CE_DEBUG_SEL = j */ 743 val = 744 hif_read32_mb(sc, mem + ce_base + 745 CE_DEBUG_OFFSET); 746 val &= ~CE_DEBUG_SEL_MASK; 747 val |= CE_DEBUG_SEL_SET(j); 748 hif_write32_mb(sc, mem + ce_base + 749 CE_DEBUG_OFFSET, val); 750 751 /* read (@gpio_athr_wlan_reg) 752 * WLAN_DEBUG_OUT_DATA 753 */ 754 val = hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS 755 + WLAN_DEBUG_OUT_OFFSET); 756 val = WLAN_DEBUG_OUT_DATA_GET(val); 757 758 HIF_INFO_MED("%s: module%d: cedbg: %x out: %x", 759 __func__, j, 760 hif_read32_mb(sc, mem + ce_base + 761 CE_DEBUG_OFFSET), val); 762 } 763 } else { 764 /* read (@gpio_athr_wlan_reg) WLAN_DEBUG_OUT_DATA */ 765 val = 766 hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS + 767 WLAN_DEBUG_OUT_OFFSET); 768 val = WLAN_DEBUG_OUT_DATA_GET(val); 769 770 HIF_INFO_MED("%s: out: %x", __func__, val); 771 } 772 } 773 774 HIF_INFO_MED("%s: Debug PCIe:", __func__); 775 /* Loop PCIe debug output */ 776 /* Write AMBA_DEBUG_BUS_SEL = 0x1c */ 777 val = hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS + 778 AMBA_DEBUG_BUS_OFFSET); 779 val &= ~AMBA_DEBUG_BUS_SEL_MASK; 780 val |= AMBA_DEBUG_BUS_SEL_SET(0x1c); 781 hif_write32_mb(sc, mem + GPIO_BASE_ADDRESS + 782 AMBA_DEBUG_BUS_OFFSET, val); 783 784 for (i = 0; i <= 8; i++) { 785 /* For (i=1~8) write AMBA_DEBUG_BUS_PCIE_DEBUG_SEL = i */ 786 val = 787 hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS + 788 AMBA_DEBUG_BUS_OFFSET); 789 val &= ~AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_MASK; 790 val |= AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_SET(i); 791 hif_write32_mb(sc, mem + GPIO_BASE_ADDRESS + 792 AMBA_DEBUG_BUS_OFFSET, val); 793 794 /* read (@gpio_athr_wlan_reg) WLAN_DEBUG_OUT_DATA */ 795 val = 796 hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS + 797 WLAN_DEBUG_OUT_OFFSET); 798 val = WLAN_DEBUG_OUT_DATA_GET(val); 799 800 HIF_INFO_MED("%s: amdbg: %x out: %x %x", __func__, 801 hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS + 802 WLAN_DEBUG_OUT_OFFSET), val, 803 hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS + 804 WLAN_DEBUG_OUT_OFFSET)); 805 } 806 807 Q_TARGET_ACCESS_END(scn); 808 } 809 810 /** 811 * hif_dump_registers(): dump bus debug registers 812 * @scn: struct hif_opaque_softc 813 * 814 * This function dumps hif bus debug registers 815 * 816 * Return: 0 for success or error code 817 */ 818 int hif_pci_dump_registers(struct hif_softc *hif_ctx) 819 { 820 int status; 821 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 822 823 status = hif_dump_ce_registers(scn); 824 825 if (status) 826 HIF_ERROR("%s: Dump CE Registers Failed", __func__); 827 828 /* dump non copy engine pci registers */ 829 __hif_pci_dump_registers(scn); 830 831 return 0; 832 } 833 834 #ifdef HIF_CONFIG_SLUB_DEBUG_ON 835 836 /* worker thread to schedule wlan_tasklet in SLUB debug build */ 837 static void reschedule_tasklet_work_handler(void *arg) 838 { 839 struct hif_pci_softc *sc = arg; 840 struct hif_softc *scn = HIF_GET_SOFTC(sc); 841 842 if (!scn) { 843 HIF_ERROR("%s: hif_softc is NULL\n", __func__); 844 return; 845 } 846 847 if (scn->hif_init_done == false) { 848 HIF_ERROR("%s: wlan driver is unloaded", __func__); 849 return; 850 } 851 852 tasklet_schedule(&sc->intr_tq); 853 } 854 855 /** 856 * hif_init_reschedule_tasklet_work() - API to initialize reschedule tasklet 857 * work 858 * @sc: HIF PCI Context 859 * 860 * Return: void 861 */ 862 static void hif_init_reschedule_tasklet_work(struct hif_pci_softc *sc) 863 { 864 qdf_create_work(0, &sc->reschedule_tasklet_work, 865 reschedule_tasklet_work_handler, NULL); 866 } 867 #else 868 static void hif_init_reschedule_tasklet_work(struct hif_pci_softc *sc) { } 869 #endif /* HIF_CONFIG_SLUB_DEBUG_ON */ 870 871 void wlan_tasklet(unsigned long data) 872 { 873 struct hif_pci_softc *sc = (struct hif_pci_softc *)data; 874 struct hif_softc *scn = HIF_GET_SOFTC(sc); 875 876 if (scn->hif_init_done == false) 877 goto end; 878 879 if (qdf_atomic_read(&scn->link_suspended)) 880 goto end; 881 882 if (!ADRASTEA_BU) { 883 hif_fw_interrupt_handler(sc->irq_event, scn); 884 if (scn->target_status == TARGET_STATUS_RESET) 885 goto end; 886 } 887 888 end: 889 qdf_atomic_set(&scn->tasklet_from_intr, 0); 890 qdf_atomic_dec(&scn->active_tasklet_cnt); 891 } 892 893 #ifdef FEATURE_RUNTIME_PM 894 static const char *hif_pm_runtime_state_to_string(uint32_t state) 895 { 896 switch (state) { 897 case HIF_PM_RUNTIME_STATE_NONE: 898 return "INIT_STATE"; 899 case HIF_PM_RUNTIME_STATE_ON: 900 return "ON"; 901 case HIF_PM_RUNTIME_STATE_RESUMING: 902 return "RESUMING"; 903 case HIF_PM_RUNTIME_STATE_SUSPENDING: 904 return "SUSPENDING"; 905 case HIF_PM_RUNTIME_STATE_SUSPENDED: 906 return "SUSPENDED"; 907 default: 908 return "INVALID STATE"; 909 } 910 } 911 912 #define HIF_PCI_RUNTIME_PM_STATS(_s, _sc, _name) \ 913 seq_printf(_s, "%30s: %u\n", #_name, _sc->pm_stats._name) 914 /** 915 * hif_pci_runtime_pm_warn() - Runtime PM Debugging API 916 * @sc: hif_pci_softc context 917 * @msg: log message 918 * 919 * log runtime pm stats when something seems off. 920 * 921 * Return: void 922 */ 923 static void hif_pci_runtime_pm_warn(struct hif_pci_softc *sc, const char *msg) 924 { 925 struct hif_pm_runtime_lock *ctx; 926 927 HIF_ERROR("%s: usage_count: %d, pm_state: %s, prevent_suspend_cnt: %d", 928 msg, atomic_read(&sc->dev->power.usage_count), 929 hif_pm_runtime_state_to_string( 930 atomic_read(&sc->pm_state)), 931 sc->prevent_suspend_cnt); 932 933 HIF_ERROR("runtime_status: %d, runtime_error: %d, disable_depth: %d autosuspend_delay: %d", 934 sc->dev->power.runtime_status, 935 sc->dev->power.runtime_error, 936 sc->dev->power.disable_depth, 937 sc->dev->power.autosuspend_delay); 938 939 HIF_ERROR("runtime_get: %u, runtime_put: %u, request_resume: %u", 940 sc->pm_stats.runtime_get, sc->pm_stats.runtime_put, 941 sc->pm_stats.request_resume); 942 943 HIF_ERROR("allow_suspend: %u, prevent_suspend: %u", 944 sc->pm_stats.allow_suspend, 945 sc->pm_stats.prevent_suspend); 946 947 HIF_ERROR("prevent_suspend_timeout: %u, allow_suspend_timeout: %u", 948 sc->pm_stats.prevent_suspend_timeout, 949 sc->pm_stats.allow_suspend_timeout); 950 951 HIF_ERROR("Suspended: %u, resumed: %u count", 952 sc->pm_stats.suspended, 953 sc->pm_stats.resumed); 954 955 HIF_ERROR("suspend_err: %u, runtime_get_err: %u", 956 sc->pm_stats.suspend_err, 957 sc->pm_stats.runtime_get_err); 958 959 HIF_ERROR("Active Wakeup Sources preventing Runtime Suspend: "); 960 961 list_for_each_entry(ctx, &sc->prevent_suspend_list, list) { 962 HIF_ERROR("source %s; timeout %d ms", ctx->name, ctx->timeout); 963 } 964 965 WARN_ON(1); 966 } 967 968 /** 969 * hif_pci_pm_runtime_debugfs_show(): show debug stats for runtimepm 970 * @s: file to print to 971 * @data: unused 972 * 973 * debugging tool added to the debug fs for displaying runtimepm stats 974 * 975 * Return: 0 976 */ 977 static int hif_pci_pm_runtime_debugfs_show(struct seq_file *s, void *data) 978 { 979 struct hif_pci_softc *sc = s->private; 980 static const char * const autopm_state[] = {"NONE", "ON", "RESUMING", 981 "SUSPENDING", "SUSPENDED"}; 982 unsigned int msecs_age; 983 qdf_time_t usecs_age; 984 int pm_state = atomic_read(&sc->pm_state); 985 unsigned long timer_expires; 986 struct hif_pm_runtime_lock *ctx; 987 988 seq_printf(s, "%30s: %s\n", "Runtime PM state", 989 autopm_state[pm_state]); 990 seq_printf(s, "%30s: %pf\n", "Last Resume Caller", 991 sc->pm_stats.last_resume_caller); 992 seq_printf(s, "%30s: %pf\n", "Last Busy Marker", 993 sc->pm_stats.last_busy_marker); 994 995 usecs_age = qdf_get_log_timestamp_usecs() - 996 sc->pm_stats.last_busy_timestamp; 997 seq_printf(s, "%30s: %lu.%06lus\n", "Last Busy Timestamp", 998 sc->pm_stats.last_busy_timestamp / 1000000, 999 sc->pm_stats.last_busy_timestamp % 1000000); 1000 seq_printf(s, "%30s: %lu.%06lus\n", "Last Busy Since", 1001 usecs_age / 1000000, usecs_age % 1000000); 1002 1003 if (pm_state == HIF_PM_RUNTIME_STATE_SUSPENDED) { 1004 msecs_age = jiffies_to_msecs(jiffies - 1005 sc->pm_stats.suspend_jiffies); 1006 seq_printf(s, "%30s: %d.%03ds\n", "Suspended Since", 1007 msecs_age / 1000, msecs_age % 1000); 1008 } 1009 1010 seq_printf(s, "%30s: %d\n", "PM Usage count", 1011 atomic_read(&sc->dev->power.usage_count)); 1012 1013 seq_printf(s, "%30s: %u\n", "prevent_suspend_cnt", 1014 sc->prevent_suspend_cnt); 1015 1016 HIF_PCI_RUNTIME_PM_STATS(s, sc, suspended); 1017 HIF_PCI_RUNTIME_PM_STATS(s, sc, suspend_err); 1018 HIF_PCI_RUNTIME_PM_STATS(s, sc, resumed); 1019 HIF_PCI_RUNTIME_PM_STATS(s, sc, runtime_get); 1020 HIF_PCI_RUNTIME_PM_STATS(s, sc, runtime_put); 1021 HIF_PCI_RUNTIME_PM_STATS(s, sc, request_resume); 1022 HIF_PCI_RUNTIME_PM_STATS(s, sc, prevent_suspend); 1023 HIF_PCI_RUNTIME_PM_STATS(s, sc, allow_suspend); 1024 HIF_PCI_RUNTIME_PM_STATS(s, sc, prevent_suspend_timeout); 1025 HIF_PCI_RUNTIME_PM_STATS(s, sc, allow_suspend_timeout); 1026 HIF_PCI_RUNTIME_PM_STATS(s, sc, runtime_get_err); 1027 1028 timer_expires = sc->runtime_timer_expires; 1029 if (timer_expires > 0) { 1030 msecs_age = jiffies_to_msecs(timer_expires - jiffies); 1031 seq_printf(s, "%30s: %d.%03ds\n", "Prevent suspend timeout", 1032 msecs_age / 1000, msecs_age % 1000); 1033 } 1034 1035 spin_lock_bh(&sc->runtime_lock); 1036 if (list_empty(&sc->prevent_suspend_list)) { 1037 spin_unlock_bh(&sc->runtime_lock); 1038 return 0; 1039 } 1040 1041 seq_printf(s, "%30s: ", "Active Wakeup_Sources"); 1042 list_for_each_entry(ctx, &sc->prevent_suspend_list, list) { 1043 seq_printf(s, "%s", ctx->name); 1044 if (ctx->timeout) 1045 seq_printf(s, "(%d ms)", ctx->timeout); 1046 seq_puts(s, " "); 1047 } 1048 seq_puts(s, "\n"); 1049 spin_unlock_bh(&sc->runtime_lock); 1050 1051 return 0; 1052 } 1053 #undef HIF_PCI_RUNTIME_PM_STATS 1054 1055 /** 1056 * hif_pci_autopm_open() - open a debug fs file to access the runtime pm stats 1057 * @inode 1058 * @file 1059 * 1060 * Return: linux error code of single_open. 1061 */ 1062 static int hif_pci_runtime_pm_open(struct inode *inode, struct file *file) 1063 { 1064 return single_open(file, hif_pci_pm_runtime_debugfs_show, 1065 inode->i_private); 1066 } 1067 1068 static const struct file_operations hif_pci_runtime_pm_fops = { 1069 .owner = THIS_MODULE, 1070 .open = hif_pci_runtime_pm_open, 1071 .release = single_release, 1072 .read = seq_read, 1073 .llseek = seq_lseek, 1074 }; 1075 1076 /** 1077 * hif_runtime_pm_debugfs_create() - creates runtimepm debugfs entry 1078 * @sc: pci context 1079 * 1080 * creates a debugfs entry to debug the runtime pm feature. 1081 */ 1082 static void hif_runtime_pm_debugfs_create(struct hif_pci_softc *sc) 1083 { 1084 sc->pm_dentry = debugfs_create_file("cnss_runtime_pm", 1085 0400, NULL, sc, 1086 &hif_pci_runtime_pm_fops); 1087 } 1088 1089 /** 1090 * hif_runtime_pm_debugfs_remove() - removes runtimepm debugfs entry 1091 * @sc: pci context 1092 * 1093 * removes the debugfs entry to debug the runtime pm feature. 1094 */ 1095 static void hif_runtime_pm_debugfs_remove(struct hif_pci_softc *sc) 1096 { 1097 debugfs_remove(sc->pm_dentry); 1098 } 1099 1100 static void hif_runtime_init(struct device *dev, int delay) 1101 { 1102 pm_runtime_set_autosuspend_delay(dev, delay); 1103 pm_runtime_use_autosuspend(dev); 1104 pm_runtime_allow(dev); 1105 pm_runtime_mark_last_busy(dev); 1106 pm_runtime_put_noidle(dev); 1107 pm_suspend_ignore_children(dev, true); 1108 } 1109 1110 static void hif_runtime_exit(struct device *dev) 1111 { 1112 pm_runtime_get_noresume(dev); 1113 pm_runtime_set_active(dev); 1114 } 1115 1116 static void hif_pm_runtime_lock_timeout_fn(void *data); 1117 1118 /** 1119 * hif_pm_runtime_start(): start the runtime pm 1120 * @sc: pci context 1121 * 1122 * After this call, runtime pm will be active. 1123 */ 1124 static void hif_pm_runtime_start(struct hif_pci_softc *sc) 1125 { 1126 struct hif_softc *ol_sc = HIF_GET_SOFTC(sc); 1127 uint32_t mode = hif_get_conparam(ol_sc); 1128 1129 if (!ol_sc->hif_config.enable_runtime_pm) { 1130 HIF_INFO("%s: RUNTIME PM is disabled in ini\n", __func__); 1131 return; 1132 } 1133 1134 if (mode == QDF_GLOBAL_FTM_MODE || QDF_IS_EPPING_ENABLED(mode) || 1135 mode == QDF_GLOBAL_MONITOR_MODE) { 1136 HIF_INFO("%s: RUNTIME PM is disabled for FTM/EPPING mode\n", 1137 __func__); 1138 return; 1139 } 1140 1141 qdf_timer_init(NULL, &sc->runtime_timer, 1142 hif_pm_runtime_lock_timeout_fn, 1143 sc, QDF_TIMER_TYPE_WAKE_APPS); 1144 1145 HIF_INFO("%s: Enabling RUNTIME PM, Delay: %d ms", __func__, 1146 ol_sc->hif_config.runtime_pm_delay); 1147 1148 hif_runtime_init(sc->dev, ol_sc->hif_config.runtime_pm_delay); 1149 qdf_atomic_set(&sc->pm_state, HIF_PM_RUNTIME_STATE_ON); 1150 hif_runtime_pm_debugfs_create(sc); 1151 } 1152 1153 /** 1154 * hif_pm_runtime_stop(): stop runtime pm 1155 * @sc: pci context 1156 * 1157 * Turns off runtime pm and frees corresponding resources 1158 * that were acquired by hif_runtime_pm_start(). 1159 */ 1160 static void hif_pm_runtime_stop(struct hif_pci_softc *sc) 1161 { 1162 struct hif_softc *ol_sc = HIF_GET_SOFTC(sc); 1163 uint32_t mode = hif_get_conparam(ol_sc); 1164 1165 if (!ol_sc->hif_config.enable_runtime_pm) 1166 return; 1167 1168 if (mode == QDF_GLOBAL_FTM_MODE || QDF_IS_EPPING_ENABLED(mode) || 1169 mode == QDF_GLOBAL_MONITOR_MODE) 1170 return; 1171 1172 hif_runtime_exit(sc->dev); 1173 hif_pm_runtime_resume(sc->dev); 1174 1175 qdf_atomic_set(&sc->pm_state, HIF_PM_RUNTIME_STATE_NONE); 1176 1177 hif_runtime_pm_debugfs_remove(sc); 1178 qdf_timer_free(&sc->runtime_timer); 1179 /* doesn't wait for penting trafic unlike cld-2.0 */ 1180 } 1181 1182 /** 1183 * hif_pm_runtime_open(): initialize runtime pm 1184 * @sc: pci data structure 1185 * 1186 * Early initialization 1187 */ 1188 static void hif_pm_runtime_open(struct hif_pci_softc *sc) 1189 { 1190 spin_lock_init(&sc->runtime_lock); 1191 1192 qdf_atomic_init(&sc->pm_state); 1193 qdf_runtime_lock_init(&sc->prevent_linkdown_lock); 1194 qdf_atomic_set(&sc->pm_state, HIF_PM_RUNTIME_STATE_NONE); 1195 INIT_LIST_HEAD(&sc->prevent_suspend_list); 1196 } 1197 1198 /** 1199 * hif_pm_runtime_sanitize_on_exit(): sanitize the pm usage count and state 1200 * @sc: pci context 1201 * 1202 * Ensure we have only one vote against runtime suspend before closing 1203 * the runtime suspend feature. 1204 * 1205 * all gets by the wlan driver should have been returned 1206 * one vote should remain as part of cnss_runtime_exit 1207 * 1208 * needs to be revisited if we share the root complex. 1209 */ 1210 static void hif_pm_runtime_sanitize_on_exit(struct hif_pci_softc *sc) 1211 { 1212 struct hif_pm_runtime_lock *ctx, *tmp; 1213 1214 if (atomic_read(&sc->dev->power.usage_count) != 1) 1215 hif_pci_runtime_pm_warn(sc, "Driver UnLoaded"); 1216 else 1217 return; 1218 1219 spin_lock_bh(&sc->runtime_lock); 1220 list_for_each_entry_safe(ctx, tmp, &sc->prevent_suspend_list, list) { 1221 spin_unlock_bh(&sc->runtime_lock); 1222 hif_runtime_lock_deinit(GET_HIF_OPAQUE_HDL(sc), ctx); 1223 spin_lock_bh(&sc->runtime_lock); 1224 } 1225 spin_unlock_bh(&sc->runtime_lock); 1226 1227 /* ensure 1 and only 1 usage count so that when the wlan 1228 * driver is re-insmodded runtime pm won't be 1229 * disabled also ensures runtime pm doesn't get 1230 * broken on by being less than 1. 1231 */ 1232 if (atomic_read(&sc->dev->power.usage_count) <= 0) 1233 atomic_set(&sc->dev->power.usage_count, 1); 1234 while (atomic_read(&sc->dev->power.usage_count) > 1) 1235 hif_pm_runtime_put_auto(sc->dev); 1236 } 1237 1238 static int __hif_pm_runtime_allow_suspend(struct hif_pci_softc *hif_sc, 1239 struct hif_pm_runtime_lock *lock); 1240 1241 /** 1242 * hif_pm_runtime_sanitize_on_ssr_exit() - Empty the suspend list on SSR 1243 * @sc: PCIe Context 1244 * 1245 * API is used to empty the runtime pm prevent suspend list. 1246 * 1247 * Return: void 1248 */ 1249 static void hif_pm_runtime_sanitize_on_ssr_exit(struct hif_pci_softc *sc) 1250 { 1251 struct hif_pm_runtime_lock *ctx, *tmp; 1252 1253 spin_lock_bh(&sc->runtime_lock); 1254 list_for_each_entry_safe(ctx, tmp, &sc->prevent_suspend_list, list) { 1255 __hif_pm_runtime_allow_suspend(sc, ctx); 1256 } 1257 spin_unlock_bh(&sc->runtime_lock); 1258 } 1259 1260 /** 1261 * hif_pm_runtime_close(): close runtime pm 1262 * @sc: pci bus handle 1263 * 1264 * ensure runtime_pm is stopped before closing the driver 1265 */ 1266 static void hif_pm_runtime_close(struct hif_pci_softc *sc) 1267 { 1268 struct hif_softc *scn = HIF_GET_SOFTC(sc); 1269 1270 qdf_runtime_lock_deinit(&sc->prevent_linkdown_lock); 1271 if (qdf_atomic_read(&sc->pm_state) == HIF_PM_RUNTIME_STATE_NONE) 1272 return; 1273 1274 hif_pm_runtime_stop(sc); 1275 1276 hif_is_recovery_in_progress(scn) ? 1277 hif_pm_runtime_sanitize_on_ssr_exit(sc) : 1278 hif_pm_runtime_sanitize_on_exit(sc); 1279 } 1280 #else 1281 static void hif_pm_runtime_close(struct hif_pci_softc *sc) {} 1282 static void hif_pm_runtime_open(struct hif_pci_softc *sc) {} 1283 static void hif_pm_runtime_start(struct hif_pci_softc *sc) {} 1284 static void hif_pm_runtime_stop(struct hif_pci_softc *sc) {} 1285 #endif 1286 1287 /** 1288 * hif_disable_power_gating() - disable HW power gating 1289 * @hif_ctx: hif context 1290 * 1291 * disables pcie L1 power states 1292 */ 1293 static void hif_disable_power_gating(struct hif_opaque_softc *hif_ctx) 1294 { 1295 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 1296 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx); 1297 1298 if (!scn) { 1299 HIF_ERROR("%s: Could not disable ASPM scn is null", 1300 __func__); 1301 return; 1302 } 1303 1304 /* Disable ASPM when pkt log is enabled */ 1305 pfrm_read_config_dword(sc->pdev, 0x80, &sc->lcr_val); 1306 pfrm_write_config_dword(sc->pdev, 0x80, (sc->lcr_val & 0xffffff00)); 1307 } 1308 1309 /** 1310 * hif_enable_power_gating() - enable HW power gating 1311 * @hif_ctx: hif context 1312 * 1313 * enables pcie L1 power states 1314 */ 1315 static void hif_enable_power_gating(struct hif_pci_softc *sc) 1316 { 1317 if (!sc) { 1318 HIF_ERROR("%s: Could not disable ASPM scn is null", 1319 __func__); 1320 return; 1321 } 1322 1323 /* Re-enable ASPM after firmware/OTP download is complete */ 1324 pfrm_write_config_dword(sc->pdev, 0x80, sc->lcr_val); 1325 } 1326 1327 /** 1328 * hif_enable_power_management() - enable power management 1329 * @hif_ctx: hif context 1330 * 1331 * Enables runtime pm, aspm(PCI.. hif_enable_power_gating) and re-enabling 1332 * soc-sleep after driver load (hif_pci_target_sleep_state_adjust). 1333 * 1334 * note: epping mode does not call this function as it does not 1335 * care about saving power. 1336 */ 1337 void hif_pci_enable_power_management(struct hif_softc *hif_sc, 1338 bool is_packet_log_enabled) 1339 { 1340 struct hif_pci_softc *pci_ctx = HIF_GET_PCI_SOFTC(hif_sc); 1341 uint32_t mode; 1342 1343 if (!pci_ctx) { 1344 HIF_ERROR("%s, hif_ctx null", __func__); 1345 return; 1346 } 1347 1348 mode = hif_get_conparam(hif_sc); 1349 if (mode == QDF_GLOBAL_FTM_MODE) { 1350 HIF_INFO("%s: Enable power gating for FTM mode", __func__); 1351 hif_enable_power_gating(pci_ctx); 1352 return; 1353 } 1354 1355 hif_pm_runtime_start(pci_ctx); 1356 1357 if (!is_packet_log_enabled) 1358 hif_enable_power_gating(pci_ctx); 1359 1360 if (!CONFIG_ATH_PCIE_MAX_PERF && 1361 CONFIG_ATH_PCIE_AWAKE_WHILE_DRIVER_LOAD && 1362 !ce_srng_based(hif_sc)) { 1363 /* allow sleep for PCIE_AWAKE_WHILE_DRIVER_LOAD feature */ 1364 if (hif_pci_target_sleep_state_adjust(hif_sc, true, false) < 0) 1365 HIF_ERROR("%s, failed to set target to sleep", 1366 __func__); 1367 } 1368 } 1369 1370 /** 1371 * hif_disable_power_management() - disable power management 1372 * @hif_ctx: hif context 1373 * 1374 * Currently disables runtime pm. Should be updated to behave 1375 * if runtime pm is not started. Should be updated to take care 1376 * of aspm and soc sleep for driver load. 1377 */ 1378 void hif_pci_disable_power_management(struct hif_softc *hif_ctx) 1379 { 1380 struct hif_pci_softc *pci_ctx = HIF_GET_PCI_SOFTC(hif_ctx); 1381 1382 if (!pci_ctx) { 1383 HIF_ERROR("%s, hif_ctx null", __func__); 1384 return; 1385 } 1386 1387 hif_pm_runtime_stop(pci_ctx); 1388 } 1389 1390 void hif_pci_display_stats(struct hif_softc *hif_ctx) 1391 { 1392 struct hif_pci_softc *pci_ctx = HIF_GET_PCI_SOFTC(hif_ctx); 1393 1394 if (!pci_ctx) { 1395 HIF_ERROR("%s, hif_ctx null", __func__); 1396 return; 1397 } 1398 hif_display_ce_stats(hif_ctx); 1399 1400 hif_print_pci_stats(pci_ctx); 1401 } 1402 1403 void hif_pci_clear_stats(struct hif_softc *hif_ctx) 1404 { 1405 struct hif_pci_softc *pci_ctx = HIF_GET_PCI_SOFTC(hif_ctx); 1406 1407 if (!pci_ctx) { 1408 HIF_ERROR("%s, hif_ctx null", __func__); 1409 return; 1410 } 1411 hif_clear_ce_stats(&pci_ctx->ce_sc); 1412 } 1413 1414 #define ATH_PCI_PROBE_RETRY_MAX 3 1415 /** 1416 * hif_bus_open(): hif_bus_open 1417 * @scn: scn 1418 * @bus_type: bus type 1419 * 1420 * Return: n/a 1421 */ 1422 QDF_STATUS hif_pci_open(struct hif_softc *hif_ctx, enum qdf_bus_type bus_type) 1423 { 1424 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx); 1425 1426 hif_ctx->bus_type = bus_type; 1427 hif_pm_runtime_open(sc); 1428 1429 qdf_spinlock_create(&sc->irq_lock); 1430 1431 return hif_ce_open(hif_ctx); 1432 } 1433 1434 /** 1435 * hif_wake_target_cpu() - wake the target's cpu 1436 * @scn: hif context 1437 * 1438 * Send an interrupt to the device to wake up the Target CPU 1439 * so it has an opportunity to notice any changed state. 1440 */ 1441 static void hif_wake_target_cpu(struct hif_softc *scn) 1442 { 1443 QDF_STATUS rv; 1444 uint32_t core_ctrl; 1445 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn); 1446 1447 rv = hif_diag_read_access(hif_hdl, 1448 SOC_CORE_BASE_ADDRESS | CORE_CTRL_ADDRESS, 1449 &core_ctrl); 1450 QDF_ASSERT(rv == QDF_STATUS_SUCCESS); 1451 /* A_INUM_FIRMWARE interrupt to Target CPU */ 1452 core_ctrl |= CORE_CTRL_CPU_INTR_MASK; 1453 1454 rv = hif_diag_write_access(hif_hdl, 1455 SOC_CORE_BASE_ADDRESS | CORE_CTRL_ADDRESS, 1456 core_ctrl); 1457 QDF_ASSERT(rv == QDF_STATUS_SUCCESS); 1458 } 1459 1460 /** 1461 * soc_wake_reset() - allow the target to go to sleep 1462 * @scn: hif_softc 1463 * 1464 * Clear the force wake register. This is done by 1465 * hif_sleep_entry and cancel defered timer sleep. 1466 */ 1467 static void soc_wake_reset(struct hif_softc *scn) 1468 { 1469 hif_write32_mb(scn, scn->mem + 1470 PCIE_LOCAL_BASE_ADDRESS + 1471 PCIE_SOC_WAKE_ADDRESS, 1472 PCIE_SOC_WAKE_RESET); 1473 } 1474 1475 /** 1476 * hif_sleep_entry() - gate target sleep 1477 * @arg: hif context 1478 * 1479 * This function is the callback for the sleep timer. 1480 * Check if last force awake critical section was at least 1481 * HIF_MIN_SLEEP_INACTIVITY_TIME_MS time ago. if it was, 1482 * allow the target to go to sleep and cancel the sleep timer. 1483 * otherwise reschedule the sleep timer. 1484 */ 1485 static void hif_sleep_entry(void *arg) 1486 { 1487 struct HIF_CE_state *hif_state = (struct HIF_CE_state *)arg; 1488 struct hif_softc *scn = HIF_GET_SOFTC(hif_state); 1489 uint32_t idle_ms; 1490 1491 if (scn->recovery) 1492 return; 1493 1494 if (hif_is_driver_unloading(scn)) 1495 return; 1496 1497 qdf_spin_lock_irqsave(&hif_state->keep_awake_lock); 1498 if (hif_state->fake_sleep) { 1499 idle_ms = qdf_system_ticks_to_msecs(qdf_system_ticks() 1500 - hif_state->sleep_ticks); 1501 if (!hif_state->verified_awake && 1502 idle_ms >= HIF_MIN_SLEEP_INACTIVITY_TIME_MS) { 1503 if (!qdf_atomic_read(&scn->link_suspended)) { 1504 soc_wake_reset(scn); 1505 hif_state->fake_sleep = false; 1506 } 1507 } else { 1508 qdf_timer_stop(&hif_state->sleep_timer); 1509 qdf_timer_start(&hif_state->sleep_timer, 1510 HIF_SLEEP_INACTIVITY_TIMER_PERIOD_MS); 1511 } 1512 } 1513 qdf_spin_unlock_irqrestore(&hif_state->keep_awake_lock); 1514 } 1515 1516 #define HIF_HIA_MAX_POLL_LOOP 1000000 1517 #define HIF_HIA_POLLING_DELAY_MS 10 1518 1519 #ifdef QCA_HIF_HIA_EXTND 1520 1521 static void hif_set_hia_extnd(struct hif_softc *scn) 1522 { 1523 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn); 1524 struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl); 1525 uint32_t target_type = tgt_info->target_type; 1526 1527 HIF_TRACE("%s: E", __func__); 1528 1529 if ((target_type == TARGET_TYPE_AR900B) || 1530 target_type == TARGET_TYPE_QCA9984 || 1531 target_type == TARGET_TYPE_QCA9888) { 1532 /* CHIP revision is 8-11 bits of the CHIP_ID register 0xec 1533 * in RTC space 1534 */ 1535 tgt_info->target_revision 1536 = CHIP_ID_REVISION_GET(hif_read32_mb(scn, scn->mem 1537 + CHIP_ID_ADDRESS)); 1538 qdf_print("chip_id 0x%x chip_revision 0x%x", 1539 target_type, tgt_info->target_revision); 1540 } 1541 1542 { 1543 uint32_t flag2_value = 0; 1544 uint32_t flag2_targ_addr = 1545 host_interest_item_address(target_type, 1546 offsetof(struct host_interest_s, hi_skip_clock_init)); 1547 1548 if ((ar900b_20_targ_clk != -1) && 1549 (frac != -1) && (intval != -1)) { 1550 hif_diag_read_access(hif_hdl, flag2_targ_addr, 1551 &flag2_value); 1552 qdf_print("\n Setting clk_override"); 1553 flag2_value |= CLOCK_OVERRIDE; 1554 1555 hif_diag_write_access(hif_hdl, flag2_targ_addr, 1556 flag2_value); 1557 qdf_print("\n CLOCK PLL val set %d", flag2_value); 1558 } else { 1559 qdf_print("\n CLOCK PLL skipped"); 1560 } 1561 } 1562 1563 if (target_type == TARGET_TYPE_AR900B 1564 || target_type == TARGET_TYPE_QCA9984 1565 || target_type == TARGET_TYPE_QCA9888) { 1566 1567 /* for AR9980_2.0, 300 mhz clock is used, right now we assume 1568 * this would be supplied through module parameters, 1569 * if not supplied assumed default or same behavior as 1.0. 1570 * Assume 1.0 clock can't be tuned, reset to defaults 1571 */ 1572 1573 qdf_print(KERN_INFO 1574 "%s: setting the target pll frac %x intval %x", 1575 __func__, frac, intval); 1576 1577 /* do not touch frac, and int val, let them be default -1, 1578 * if desired, host can supply these through module params 1579 */ 1580 if (frac != -1 || intval != -1) { 1581 uint32_t flag2_value = 0; 1582 uint32_t flag2_targ_addr; 1583 1584 flag2_targ_addr = 1585 host_interest_item_address(target_type, 1586 offsetof(struct host_interest_s, 1587 hi_clock_info)); 1588 hif_diag_read_access(hif_hdl, 1589 flag2_targ_addr, &flag2_value); 1590 qdf_print("\n ====> FRAC Val %x Address %x", frac, 1591 flag2_value); 1592 hif_diag_write_access(hif_hdl, flag2_value, frac); 1593 qdf_print("\n INT Val %x Address %x", 1594 intval, flag2_value + 4); 1595 hif_diag_write_access(hif_hdl, 1596 flag2_value + 4, intval); 1597 } else { 1598 qdf_print(KERN_INFO 1599 "%s: no frac provided, skipping pre-configuring PLL", 1600 __func__); 1601 } 1602 1603 /* for 2.0 write 300 mhz into hi_desired_cpu_speed_hz */ 1604 if ((target_type == TARGET_TYPE_AR900B) 1605 && (tgt_info->target_revision == AR900B_REV_2) 1606 && ar900b_20_targ_clk != -1) { 1607 uint32_t flag2_value = 0; 1608 uint32_t flag2_targ_addr; 1609 1610 flag2_targ_addr 1611 = host_interest_item_address(target_type, 1612 offsetof(struct host_interest_s, 1613 hi_desired_cpu_speed_hz)); 1614 hif_diag_read_access(hif_hdl, flag2_targ_addr, 1615 &flag2_value); 1616 qdf_print("\n ==> hi_desired_cpu_speed_hz Address %x", 1617 flag2_value); 1618 hif_diag_write_access(hif_hdl, flag2_value, 1619 ar900b_20_targ_clk/*300000000u*/); 1620 } else if (target_type == TARGET_TYPE_QCA9888) { 1621 uint32_t flag2_targ_addr; 1622 1623 if (200000000u != qca9888_20_targ_clk) { 1624 qca9888_20_targ_clk = 300000000u; 1625 /* Setting the target clock speed to 300 mhz */ 1626 } 1627 1628 flag2_targ_addr 1629 = host_interest_item_address(target_type, 1630 offsetof(struct host_interest_s, 1631 hi_desired_cpu_speed_hz)); 1632 hif_diag_write_access(hif_hdl, flag2_targ_addr, 1633 qca9888_20_targ_clk); 1634 } else { 1635 qdf_print("%s: targ_clk is not provided, skipping pre-configuring PLL", 1636 __func__); 1637 } 1638 } else { 1639 if (frac != -1 || intval != -1) { 1640 uint32_t flag2_value = 0; 1641 uint32_t flag2_targ_addr = 1642 host_interest_item_address(target_type, 1643 offsetof(struct host_interest_s, 1644 hi_clock_info)); 1645 hif_diag_read_access(hif_hdl, flag2_targ_addr, 1646 &flag2_value); 1647 qdf_print("\n ====> FRAC Val %x Address %x", frac, 1648 flag2_value); 1649 hif_diag_write_access(hif_hdl, flag2_value, frac); 1650 qdf_print("\n INT Val %x Address %x", intval, 1651 flag2_value + 4); 1652 hif_diag_write_access(hif_hdl, flag2_value + 4, 1653 intval); 1654 } 1655 } 1656 } 1657 1658 #else 1659 1660 static void hif_set_hia_extnd(struct hif_softc *scn) 1661 { 1662 } 1663 1664 #endif 1665 1666 /** 1667 * hif_set_hia() - fill out the host interest area 1668 * @scn: hif context 1669 * 1670 * This is replaced by hif_wlan_enable for integrated targets. 1671 * This fills out the host interest area. The firmware will 1672 * process these memory addresses when it is first brought out 1673 * of reset. 1674 * 1675 * Return: 0 for success. 1676 */ 1677 static int hif_set_hia(struct hif_softc *scn) 1678 { 1679 QDF_STATUS rv; 1680 uint32_t interconnect_targ_addr = 0; 1681 uint32_t pcie_state_targ_addr = 0; 1682 uint32_t pipe_cfg_targ_addr = 0; 1683 uint32_t svc_to_pipe_map = 0; 1684 uint32_t pcie_config_flags = 0; 1685 uint32_t flag2_value = 0; 1686 uint32_t flag2_targ_addr = 0; 1687 #ifdef QCA_WIFI_3_0 1688 uint32_t host_interest_area = 0; 1689 uint8_t i; 1690 #else 1691 uint32_t ealloc_value = 0; 1692 uint32_t ealloc_targ_addr = 0; 1693 uint8_t banks_switched = 1; 1694 uint32_t chip_id; 1695 #endif 1696 uint32_t pipe_cfg_addr; 1697 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn); 1698 struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl); 1699 uint32_t target_type = tgt_info->target_type; 1700 uint32_t target_ce_config_sz, target_service_to_ce_map_sz; 1701 static struct CE_pipe_config *target_ce_config; 1702 struct service_to_pipe *target_service_to_ce_map; 1703 1704 HIF_TRACE("%s: E", __func__); 1705 1706 hif_get_target_ce_config(scn, 1707 &target_ce_config, &target_ce_config_sz, 1708 &target_service_to_ce_map, 1709 &target_service_to_ce_map_sz, 1710 NULL, NULL); 1711 1712 if (ADRASTEA_BU) 1713 return QDF_STATUS_SUCCESS; 1714 1715 #ifdef QCA_WIFI_3_0 1716 i = 0; 1717 while (i < HIF_HIA_MAX_POLL_LOOP) { 1718 host_interest_area = hif_read32_mb(scn, scn->mem + 1719 A_SOC_CORE_SCRATCH_0_ADDRESS); 1720 if ((host_interest_area & 0x01) == 0) { 1721 qdf_mdelay(HIF_HIA_POLLING_DELAY_MS); 1722 host_interest_area = 0; 1723 i++; 1724 if (i > HIF_HIA_MAX_POLL_LOOP && (i % 1000 == 0)) 1725 HIF_ERROR("%s: poll timeout(%d)", __func__, i); 1726 } else { 1727 host_interest_area &= (~0x01); 1728 hif_write32_mb(scn, scn->mem + 0x113014, 0); 1729 break; 1730 } 1731 } 1732 1733 if (i >= HIF_HIA_MAX_POLL_LOOP) { 1734 HIF_ERROR("%s: hia polling timeout", __func__); 1735 return -EIO; 1736 } 1737 1738 if (host_interest_area == 0) { 1739 HIF_ERROR("%s: host_interest_area = 0", __func__); 1740 return -EIO; 1741 } 1742 1743 interconnect_targ_addr = host_interest_area + 1744 offsetof(struct host_interest_area_t, 1745 hi_interconnect_state); 1746 1747 flag2_targ_addr = host_interest_area + 1748 offsetof(struct host_interest_area_t, hi_option_flag2); 1749 1750 #else 1751 interconnect_targ_addr = hif_hia_item_address(target_type, 1752 offsetof(struct host_interest_s, hi_interconnect_state)); 1753 ealloc_targ_addr = hif_hia_item_address(target_type, 1754 offsetof(struct host_interest_s, hi_early_alloc)); 1755 flag2_targ_addr = hif_hia_item_address(target_type, 1756 offsetof(struct host_interest_s, hi_option_flag2)); 1757 #endif 1758 /* Supply Target-side CE configuration */ 1759 rv = hif_diag_read_access(hif_hdl, interconnect_targ_addr, 1760 &pcie_state_targ_addr); 1761 if (rv != QDF_STATUS_SUCCESS) { 1762 HIF_ERROR("%s: interconnect_targ_addr = 0x%0x, ret = %d", 1763 __func__, interconnect_targ_addr, rv); 1764 goto done; 1765 } 1766 if (pcie_state_targ_addr == 0) { 1767 rv = QDF_STATUS_E_FAILURE; 1768 HIF_ERROR("%s: pcie state addr is 0", __func__); 1769 goto done; 1770 } 1771 pipe_cfg_addr = pcie_state_targ_addr + 1772 offsetof(struct pcie_state_s, 1773 pipe_cfg_addr); 1774 rv = hif_diag_read_access(hif_hdl, 1775 pipe_cfg_addr, 1776 &pipe_cfg_targ_addr); 1777 if (rv != QDF_STATUS_SUCCESS) { 1778 HIF_ERROR("%s: pipe_cfg_addr = 0x%0x, ret = %d", 1779 __func__, pipe_cfg_addr, rv); 1780 goto done; 1781 } 1782 if (pipe_cfg_targ_addr == 0) { 1783 rv = QDF_STATUS_E_FAILURE; 1784 HIF_ERROR("%s: pipe cfg addr is 0", __func__); 1785 goto done; 1786 } 1787 1788 rv = hif_diag_write_mem(hif_hdl, pipe_cfg_targ_addr, 1789 (uint8_t *) target_ce_config, 1790 target_ce_config_sz); 1791 1792 if (rv != QDF_STATUS_SUCCESS) { 1793 HIF_ERROR("%s: write pipe cfg (%d)", __func__, rv); 1794 goto done; 1795 } 1796 1797 rv = hif_diag_read_access(hif_hdl, 1798 pcie_state_targ_addr + 1799 offsetof(struct pcie_state_s, 1800 svc_to_pipe_map), 1801 &svc_to_pipe_map); 1802 if (rv != QDF_STATUS_SUCCESS) { 1803 HIF_ERROR("%s: get svc/pipe map (%d)", __func__, rv); 1804 goto done; 1805 } 1806 if (svc_to_pipe_map == 0) { 1807 rv = QDF_STATUS_E_FAILURE; 1808 HIF_ERROR("%s: svc_to_pipe map is 0", __func__); 1809 goto done; 1810 } 1811 1812 rv = hif_diag_write_mem(hif_hdl, 1813 svc_to_pipe_map, 1814 (uint8_t *) target_service_to_ce_map, 1815 target_service_to_ce_map_sz); 1816 if (rv != QDF_STATUS_SUCCESS) { 1817 HIF_ERROR("%s: write svc/pipe map (%d)", __func__, rv); 1818 goto done; 1819 } 1820 1821 rv = hif_diag_read_access(hif_hdl, 1822 pcie_state_targ_addr + 1823 offsetof(struct pcie_state_s, 1824 config_flags), 1825 &pcie_config_flags); 1826 if (rv != QDF_STATUS_SUCCESS) { 1827 HIF_ERROR("%s: get pcie config_flags (%d)", __func__, rv); 1828 goto done; 1829 } 1830 #if (CONFIG_PCIE_ENABLE_L1_CLOCK_GATE) 1831 pcie_config_flags |= PCIE_CONFIG_FLAG_ENABLE_L1; 1832 #else 1833 pcie_config_flags &= ~PCIE_CONFIG_FLAG_ENABLE_L1; 1834 #endif /* CONFIG_PCIE_ENABLE_L1_CLOCK_GATE */ 1835 pcie_config_flags |= PCIE_CONFIG_FLAG_CLK_SWITCH_WAIT; 1836 #if (CONFIG_PCIE_ENABLE_AXI_CLK_GATE) 1837 pcie_config_flags |= PCIE_CONFIG_FLAG_AXI_CLK_GATE; 1838 #endif 1839 rv = hif_diag_write_mem(hif_hdl, 1840 pcie_state_targ_addr + 1841 offsetof(struct pcie_state_s, 1842 config_flags), 1843 (uint8_t *) &pcie_config_flags, 1844 sizeof(pcie_config_flags)); 1845 if (rv != QDF_STATUS_SUCCESS) { 1846 HIF_ERROR("%s: write pcie config_flags (%d)", __func__, rv); 1847 goto done; 1848 } 1849 1850 #ifndef QCA_WIFI_3_0 1851 /* configure early allocation */ 1852 ealloc_targ_addr = hif_hia_item_address(target_type, 1853 offsetof( 1854 struct host_interest_s, 1855 hi_early_alloc)); 1856 1857 rv = hif_diag_read_access(hif_hdl, ealloc_targ_addr, 1858 &ealloc_value); 1859 if (rv != QDF_STATUS_SUCCESS) { 1860 HIF_ERROR("%s: get early alloc val (%d)", __func__, rv); 1861 goto done; 1862 } 1863 1864 /* 1 bank is switched to IRAM, except ROME 1.0 */ 1865 ealloc_value |= 1866 ((HI_EARLY_ALLOC_MAGIC << HI_EARLY_ALLOC_MAGIC_SHIFT) & 1867 HI_EARLY_ALLOC_MAGIC_MASK); 1868 1869 rv = hif_diag_read_access(hif_hdl, 1870 CHIP_ID_ADDRESS | 1871 RTC_SOC_BASE_ADDRESS, &chip_id); 1872 if (rv != QDF_STATUS_SUCCESS) { 1873 HIF_ERROR("%s: get chip id val (%d)", __func__, rv); 1874 goto done; 1875 } 1876 if (CHIP_ID_VERSION_GET(chip_id) == 0xD) { 1877 tgt_info->target_revision = CHIP_ID_REVISION_GET(chip_id); 1878 switch (CHIP_ID_REVISION_GET(chip_id)) { 1879 case 0x2: /* ROME 1.3 */ 1880 /* 2 banks are switched to IRAM */ 1881 banks_switched = 2; 1882 break; 1883 case 0x4: /* ROME 2.1 */ 1884 case 0x5: /* ROME 2.2 */ 1885 banks_switched = 6; 1886 break; 1887 case 0x8: /* ROME 3.0 */ 1888 case 0x9: /* ROME 3.1 */ 1889 case 0xA: /* ROME 3.2 */ 1890 banks_switched = 9; 1891 break; 1892 case 0x0: /* ROME 1.0 */ 1893 case 0x1: /* ROME 1.1 */ 1894 default: 1895 /* 3 banks are switched to IRAM */ 1896 banks_switched = 3; 1897 break; 1898 } 1899 } 1900 1901 ealloc_value |= 1902 ((banks_switched << HI_EARLY_ALLOC_IRAM_BANKS_SHIFT) 1903 & HI_EARLY_ALLOC_IRAM_BANKS_MASK); 1904 1905 rv = hif_diag_write_access(hif_hdl, 1906 ealloc_targ_addr, 1907 ealloc_value); 1908 if (rv != QDF_STATUS_SUCCESS) { 1909 HIF_ERROR("%s: set early alloc val (%d)", __func__, rv); 1910 goto done; 1911 } 1912 #endif 1913 if ((target_type == TARGET_TYPE_AR900B) 1914 || (target_type == TARGET_TYPE_QCA9984) 1915 || (target_type == TARGET_TYPE_QCA9888) 1916 || (target_type == TARGET_TYPE_AR9888)) { 1917 hif_set_hia_extnd(scn); 1918 } 1919 1920 /* Tell Target to proceed with initialization */ 1921 flag2_targ_addr = hif_hia_item_address(target_type, 1922 offsetof( 1923 struct host_interest_s, 1924 hi_option_flag2)); 1925 1926 rv = hif_diag_read_access(hif_hdl, flag2_targ_addr, 1927 &flag2_value); 1928 if (rv != QDF_STATUS_SUCCESS) { 1929 HIF_ERROR("%s: get option val (%d)", __func__, rv); 1930 goto done; 1931 } 1932 1933 flag2_value |= HI_OPTION_EARLY_CFG_DONE; 1934 rv = hif_diag_write_access(hif_hdl, flag2_targ_addr, 1935 flag2_value); 1936 if (rv != QDF_STATUS_SUCCESS) { 1937 HIF_ERROR("%s: set option val (%d)", __func__, rv); 1938 goto done; 1939 } 1940 1941 hif_wake_target_cpu(scn); 1942 1943 done: 1944 1945 return rv; 1946 } 1947 1948 /** 1949 * hif_bus_configure() - configure the pcie bus 1950 * @hif_sc: pointer to the hif context. 1951 * 1952 * return: 0 for success. nonzero for failure. 1953 */ 1954 int hif_pci_bus_configure(struct hif_softc *hif_sc) 1955 { 1956 int status = 0; 1957 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc); 1958 struct hif_opaque_softc *hif_osc = GET_HIF_OPAQUE_HDL(hif_sc); 1959 1960 hif_ce_prepare_config(hif_sc); 1961 1962 /* initialize sleep state adjust variables */ 1963 hif_state->sleep_timer_init = true; 1964 hif_state->keep_awake_count = 0; 1965 hif_state->fake_sleep = false; 1966 hif_state->sleep_ticks = 0; 1967 1968 qdf_timer_init(NULL, &hif_state->sleep_timer, 1969 hif_sleep_entry, (void *)hif_state, 1970 QDF_TIMER_TYPE_WAKE_APPS); 1971 hif_state->sleep_timer_init = true; 1972 1973 status = hif_wlan_enable(hif_sc); 1974 if (status) { 1975 HIF_ERROR("%s: hif_wlan_enable error = %d", 1976 __func__, status); 1977 goto timer_free; 1978 } 1979 1980 A_TARGET_ACCESS_LIKELY(hif_sc); 1981 1982 if ((CONFIG_ATH_PCIE_MAX_PERF || 1983 CONFIG_ATH_PCIE_AWAKE_WHILE_DRIVER_LOAD) && 1984 !ce_srng_based(hif_sc)) { 1985 /* 1986 * prevent sleep for PCIE_AWAKE_WHILE_DRIVER_LOAD feature 1987 * prevent sleep when we want to keep firmware always awake 1988 * note: when we want to keep firmware always awake, 1989 * hif_target_sleep_state_adjust will point to a dummy 1990 * function, and hif_pci_target_sleep_state_adjust must 1991 * be called instead. 1992 * note: bus type check is here because AHB bus is reusing 1993 * hif_pci_bus_configure code. 1994 */ 1995 if (hif_sc->bus_type == QDF_BUS_TYPE_PCI) { 1996 if (hif_pci_target_sleep_state_adjust(hif_sc, 1997 false, true) < 0) { 1998 status = -EACCES; 1999 goto disable_wlan; 2000 } 2001 } 2002 } 2003 2004 /* todo: consider replacing this with an srng field */ 2005 if (((hif_sc->target_info.target_type == TARGET_TYPE_QCA8074) || 2006 (hif_sc->target_info.target_type == TARGET_TYPE_QCA8074V2) || 2007 (hif_sc->target_info.target_type == TARGET_TYPE_QCA6018)) && 2008 (hif_sc->bus_type == QDF_BUS_TYPE_AHB)) { 2009 hif_sc->per_ce_irq = true; 2010 } 2011 2012 status = hif_config_ce(hif_sc); 2013 if (status) 2014 goto disable_wlan; 2015 2016 /* QCA_WIFI_QCA8074_VP:Should not be executed on 8074 VP platform */ 2017 if (hif_needs_bmi(hif_osc)) { 2018 status = hif_set_hia(hif_sc); 2019 if (status) 2020 goto unconfig_ce; 2021 2022 HIF_INFO_MED("%s: hif_set_hia done", __func__); 2023 2024 } 2025 2026 if (((hif_sc->target_info.target_type == TARGET_TYPE_QCA8074) || 2027 (hif_sc->target_info.target_type == TARGET_TYPE_QCA8074V2) || 2028 (hif_sc->target_info.target_type == TARGET_TYPE_QCA6018)) && 2029 (hif_sc->bus_type == QDF_BUS_TYPE_PCI)) 2030 HIF_INFO_MED("%s: Skip irq config for PCI based 8074 target", 2031 __func__); 2032 else { 2033 status = hif_configure_irq(hif_sc); 2034 if (status < 0) 2035 goto unconfig_ce; 2036 } 2037 2038 A_TARGET_ACCESS_UNLIKELY(hif_sc); 2039 2040 return status; 2041 2042 unconfig_ce: 2043 hif_unconfig_ce(hif_sc); 2044 disable_wlan: 2045 A_TARGET_ACCESS_UNLIKELY(hif_sc); 2046 hif_wlan_disable(hif_sc); 2047 2048 timer_free: 2049 qdf_timer_stop(&hif_state->sleep_timer); 2050 qdf_timer_free(&hif_state->sleep_timer); 2051 hif_state->sleep_timer_init = false; 2052 2053 HIF_ERROR("%s: failed, status = %d", __func__, status); 2054 return status; 2055 } 2056 2057 /** 2058 * hif_bus_close(): hif_bus_close 2059 * 2060 * Return: n/a 2061 */ 2062 void hif_pci_close(struct hif_softc *hif_sc) 2063 { 2064 struct hif_pci_softc *hif_pci_sc = HIF_GET_PCI_SOFTC(hif_sc); 2065 2066 hif_pm_runtime_close(hif_pci_sc); 2067 hif_ce_close(hif_sc); 2068 } 2069 2070 #define BAR_NUM 0 2071 2072 static int hif_enable_pci_nopld(struct hif_pci_softc *sc, 2073 struct pci_dev *pdev, 2074 const struct pci_device_id *id) 2075 { 2076 void __iomem *mem; 2077 int ret = 0; 2078 uint16_t device_id = 0; 2079 struct hif_softc *ol_sc = HIF_GET_SOFTC(sc); 2080 2081 pci_read_config_word(pdev, PCI_DEVICE_ID, &device_id); 2082 if (device_id != id->device) { 2083 HIF_ERROR( 2084 "%s: dev id mismatch, config id = 0x%x, probing id = 0x%x", 2085 __func__, device_id, id->device); 2086 /* pci link is down, so returing with error code */ 2087 return -EIO; 2088 } 2089 2090 /* FIXME: temp. commenting out assign_resource 2091 * call for dev_attach to work on 2.6.38 kernel 2092 */ 2093 #if (!defined(__LINUX_ARM_ARCH__)) 2094 if (pci_assign_resource(pdev, BAR_NUM)) { 2095 HIF_ERROR("%s: pci_assign_resource error", __func__); 2096 return -EIO; 2097 } 2098 #endif 2099 if (pci_enable_device(pdev)) { 2100 HIF_ERROR("%s: pci_enable_device error", 2101 __func__); 2102 return -EIO; 2103 } 2104 2105 /* Request MMIO resources */ 2106 ret = pci_request_region(pdev, BAR_NUM, "ath"); 2107 if (ret) { 2108 HIF_ERROR("%s: PCI MMIO reservation error", __func__); 2109 ret = -EIO; 2110 goto err_region; 2111 } 2112 2113 #ifdef CONFIG_ARM_LPAE 2114 /* if CONFIG_ARM_LPAE is enabled, we have to set 64 bits mask 2115 * for 32 bits device also. 2116 */ 2117 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); 2118 if (ret) { 2119 HIF_ERROR("%s: Cannot enable 64-bit pci DMA", __func__); 2120 goto err_dma; 2121 } 2122 ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); 2123 if (ret) { 2124 HIF_ERROR("%s: Cannot enable 64-bit DMA", __func__); 2125 goto err_dma; 2126 } 2127 #else 2128 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 2129 if (ret) { 2130 HIF_ERROR("%s: Cannot enable 32-bit pci DMA", __func__); 2131 goto err_dma; 2132 } 2133 ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 2134 if (ret) { 2135 HIF_ERROR("%s: Cannot enable 32-bit consistent DMA!", 2136 __func__); 2137 goto err_dma; 2138 } 2139 #endif 2140 2141 PCI_CFG_TO_DISABLE_L1SS_STATES(pdev, 0x188); 2142 2143 /* Set bus master bit in PCI_COMMAND to enable DMA */ 2144 pci_set_master(pdev); 2145 2146 /* Arrange for access to Target SoC registers. */ 2147 mem = pci_iomap(pdev, BAR_NUM, 0); 2148 if (!mem) { 2149 HIF_ERROR("%s: PCI iomap error", __func__); 2150 ret = -EIO; 2151 goto err_iomap; 2152 } 2153 2154 HIF_INFO("*****BAR is %pK\n", (void *)mem); 2155 2156 sc->mem = mem; 2157 2158 /* Hawkeye emulation specific change */ 2159 if ((device_id == RUMIM2M_DEVICE_ID_NODE0) || 2160 (device_id == RUMIM2M_DEVICE_ID_NODE1) || 2161 (device_id == RUMIM2M_DEVICE_ID_NODE2) || 2162 (device_id == RUMIM2M_DEVICE_ID_NODE3) || 2163 (device_id == RUMIM2M_DEVICE_ID_NODE4) || 2164 (device_id == RUMIM2M_DEVICE_ID_NODE5)) { 2165 mem = mem + 0x0c000000; 2166 sc->mem = mem; 2167 HIF_INFO("%s: Changing PCI mem base to %pK\n", 2168 __func__, sc->mem); 2169 } 2170 2171 sc->mem_len = pci_resource_len(pdev, BAR_NUM); 2172 ol_sc->mem = mem; 2173 ol_sc->mem_pa = pci_resource_start(pdev, BAR_NUM); 2174 sc->pci_enabled = true; 2175 return ret; 2176 2177 err_iomap: 2178 pci_clear_master(pdev); 2179 err_dma: 2180 pci_release_region(pdev, BAR_NUM); 2181 err_region: 2182 pci_disable_device(pdev); 2183 return ret; 2184 } 2185 2186 static int hif_enable_pci_pld(struct hif_pci_softc *sc, 2187 struct pci_dev *pdev, 2188 const struct pci_device_id *id) 2189 { 2190 PCI_CFG_TO_DISABLE_L1SS_STATES(pdev, 0x188); 2191 sc->pci_enabled = true; 2192 return 0; 2193 } 2194 2195 2196 static void hif_pci_deinit_nopld(struct hif_pci_softc *sc) 2197 { 2198 pci_disable_msi(sc->pdev); 2199 pci_iounmap(sc->pdev, sc->mem); 2200 pci_clear_master(sc->pdev); 2201 pci_release_region(sc->pdev, BAR_NUM); 2202 pci_disable_device(sc->pdev); 2203 } 2204 2205 static void hif_pci_deinit_pld(struct hif_pci_softc *sc) {} 2206 2207 static void hif_disable_pci(struct hif_pci_softc *sc) 2208 { 2209 struct hif_softc *ol_sc = HIF_GET_SOFTC(sc); 2210 2211 if (!ol_sc) { 2212 HIF_ERROR("%s: ol_sc = NULL", __func__); 2213 return; 2214 } 2215 hif_pci_device_reset(sc); 2216 sc->hif_pci_deinit(sc); 2217 2218 sc->mem = NULL; 2219 ol_sc->mem = NULL; 2220 } 2221 2222 static int hif_pci_probe_tgt_wakeup(struct hif_pci_softc *sc) 2223 { 2224 int ret = 0; 2225 int targ_awake_limit = 500; 2226 #ifndef QCA_WIFI_3_0 2227 uint32_t fw_indicator; 2228 #endif 2229 struct hif_softc *scn = HIF_GET_SOFTC(sc); 2230 2231 /* 2232 * Verify that the Target was started cleanly.* 2233 * The case where this is most likely is with an AUX-powered 2234 * Target and a Host in WoW mode. If the Host crashes, 2235 * loses power, or is restarted (without unloading the driver) 2236 * then the Target is left (aux) powered and running. On a 2237 * subsequent driver load, the Target is in an unexpected state. 2238 * We try to catch that here in order to reset the Target and 2239 * retry the probe. 2240 */ 2241 hif_write32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS + 2242 PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_V_MASK); 2243 while (!hif_targ_is_awake(scn, sc->mem)) { 2244 if (0 == targ_awake_limit) { 2245 HIF_ERROR("%s: target awake timeout", __func__); 2246 ret = -EAGAIN; 2247 goto end; 2248 } 2249 qdf_mdelay(1); 2250 targ_awake_limit--; 2251 } 2252 2253 #if PCIE_BAR0_READY_CHECKING 2254 { 2255 int wait_limit = 200; 2256 /* Synchronization point: wait the BAR0 is configured */ 2257 while (wait_limit-- && 2258 !(hif_read32_mb(sc, c->mem + 2259 PCIE_LOCAL_BASE_ADDRESS + 2260 PCIE_SOC_RDY_STATUS_ADDRESS) 2261 & PCIE_SOC_RDY_STATUS_BAR_MASK)) { 2262 qdf_mdelay(10); 2263 } 2264 if (wait_limit < 0) { 2265 /* AR6320v1 doesn't support checking of BAR0 2266 * configuration, takes one sec to wait BAR0 ready 2267 */ 2268 HIF_INFO_MED("%s: AR6320v1 waits two sec for BAR0", 2269 __func__); 2270 } 2271 } 2272 #endif 2273 2274 #ifndef QCA_WIFI_3_0 2275 fw_indicator = hif_read32_mb(sc, sc->mem + FW_INDICATOR_ADDRESS); 2276 hif_write32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS + 2277 PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_RESET); 2278 2279 if (fw_indicator & FW_IND_INITIALIZED) { 2280 HIF_ERROR("%s: Target is in an unknown state. EAGAIN", 2281 __func__); 2282 ret = -EAGAIN; 2283 goto end; 2284 } 2285 #endif 2286 2287 end: 2288 return ret; 2289 } 2290 2291 static int hif_pci_configure_legacy_irq(struct hif_pci_softc *sc) 2292 { 2293 int ret = 0; 2294 struct hif_softc *scn = HIF_GET_SOFTC(sc); 2295 uint32_t target_type = scn->target_info.target_type; 2296 2297 HIF_TRACE("%s: E", __func__); 2298 2299 /* do notn support MSI or MSI IRQ failed */ 2300 tasklet_init(&sc->intr_tq, wlan_tasklet, (unsigned long)sc); 2301 ret = request_irq(sc->pdev->irq, 2302 hif_pci_legacy_ce_interrupt_handler, IRQF_SHARED, 2303 "wlan_pci", sc); 2304 if (ret) { 2305 HIF_ERROR("%s: request_irq failed, ret = %d", __func__, ret); 2306 goto end; 2307 } 2308 scn->wake_irq = sc->pdev->irq; 2309 /* Use sc->irq instead of sc->pdev-irq 2310 * platform_device pdev doesn't have an irq field 2311 */ 2312 sc->irq = sc->pdev->irq; 2313 /* Use Legacy PCI Interrupts */ 2314 hif_write32_mb(sc, sc->mem + (SOC_CORE_BASE_ADDRESS | 2315 PCIE_INTR_ENABLE_ADDRESS), 2316 HOST_GROUP0_MASK); 2317 hif_read32_mb(sc, sc->mem + (SOC_CORE_BASE_ADDRESS | 2318 PCIE_INTR_ENABLE_ADDRESS)); 2319 hif_write32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS + 2320 PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_RESET); 2321 2322 if ((target_type == TARGET_TYPE_IPQ4019) || 2323 (target_type == TARGET_TYPE_AR900B) || 2324 (target_type == TARGET_TYPE_QCA9984) || 2325 (target_type == TARGET_TYPE_AR9888) || 2326 (target_type == TARGET_TYPE_QCA9888) || 2327 (target_type == TARGET_TYPE_AR6320V1) || 2328 (target_type == TARGET_TYPE_AR6320V2) || 2329 (target_type == TARGET_TYPE_AR6320V3)) { 2330 hif_write32_mb(scn, scn->mem + PCIE_LOCAL_BASE_ADDRESS + 2331 PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_V_MASK); 2332 } 2333 end: 2334 QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_ERROR, 2335 "%s: X, ret = %d", __func__, ret); 2336 return ret; 2337 } 2338 2339 static int hif_ce_srng_msi_free_irq(struct hif_softc *scn) 2340 { 2341 int ret; 2342 int ce_id, irq; 2343 uint32_t msi_data_start; 2344 uint32_t msi_data_count; 2345 uint32_t msi_irq_start; 2346 struct HIF_CE_state *ce_sc = HIF_GET_CE_STATE(scn); 2347 struct CE_attr *host_ce_conf = ce_sc->host_ce_config; 2348 2349 ret = pld_get_user_msi_assignment(scn->qdf_dev->dev, "CE", 2350 &msi_data_count, &msi_data_start, 2351 &msi_irq_start); 2352 if (ret) 2353 return ret; 2354 2355 /* needs to match the ce_id -> irq data mapping 2356 * used in the srng parameter configuration 2357 */ 2358 for (ce_id = 0; ce_id < scn->ce_count; ce_id++) { 2359 unsigned int msi_data; 2360 2361 if (host_ce_conf[ce_id].flags & CE_ATTR_DISABLE_INTR) 2362 continue; 2363 2364 if (!ce_sc->tasklets[ce_id].inited) 2365 continue; 2366 2367 msi_data = (ce_id % msi_data_count) + msi_irq_start; 2368 irq = pld_get_msi_irq(scn->qdf_dev->dev, msi_data); 2369 2370 hif_debug("%s: (ce_id %d, msi_data %d, irq %d)", __func__, 2371 ce_id, msi_data, irq); 2372 2373 pfrm_free_irq(scn->qdf_dev->dev, irq, &ce_sc->tasklets[ce_id]); 2374 } 2375 2376 return ret; 2377 } 2378 2379 static void hif_pci_deconfigure_grp_irq(struct hif_softc *scn) 2380 { 2381 int i, j, irq; 2382 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 2383 struct hif_exec_context *hif_ext_group; 2384 2385 for (i = 0; i < hif_state->hif_num_extgroup; i++) { 2386 hif_ext_group = hif_state->hif_ext_group[i]; 2387 if (hif_ext_group->irq_requested) { 2388 hif_ext_group->irq_requested = false; 2389 for (j = 0; j < hif_ext_group->numirq; j++) { 2390 irq = hif_ext_group->os_irq[j]; 2391 pfrm_free_irq(scn->qdf_dev->dev, 2392 irq, hif_ext_group); 2393 } 2394 hif_ext_group->numirq = 0; 2395 } 2396 } 2397 } 2398 2399 /** 2400 * hif_nointrs(): disable IRQ 2401 * 2402 * This function stops interrupt(s) 2403 * 2404 * @scn: struct hif_softc 2405 * 2406 * Return: none 2407 */ 2408 void hif_pci_nointrs(struct hif_softc *scn) 2409 { 2410 int i, ret; 2411 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn); 2412 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 2413 2414 ce_unregister_irq(hif_state, CE_ALL_BITMAP); 2415 2416 if (scn->request_irq_done == false) 2417 return; 2418 2419 hif_pci_deconfigure_grp_irq(scn); 2420 2421 ret = hif_ce_srng_msi_free_irq(scn); 2422 if (ret != -EINVAL) { 2423 /* ce irqs freed in hif_ce_srng_msi_free_irq */ 2424 2425 if (scn->wake_irq) 2426 pfrm_free_irq(scn->qdf_dev->dev, scn->wake_irq, scn); 2427 scn->wake_irq = 0; 2428 } else if (sc->num_msi_intrs > 0) { 2429 /* MSI interrupt(s) */ 2430 for (i = 0; i < sc->num_msi_intrs; i++) 2431 free_irq(sc->irq + i, sc); 2432 sc->num_msi_intrs = 0; 2433 } else { 2434 /* Legacy PCI line interrupt 2435 * Use sc->irq instead of sc->pdev-irq 2436 * platform_device pdev doesn't have an irq field 2437 */ 2438 free_irq(sc->irq, sc); 2439 } 2440 scn->request_irq_done = false; 2441 } 2442 2443 /** 2444 * hif_disable_bus(): hif_disable_bus 2445 * 2446 * This function disables the bus 2447 * 2448 * @bdev: bus dev 2449 * 2450 * Return: none 2451 */ 2452 void hif_pci_disable_bus(struct hif_softc *scn) 2453 { 2454 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn); 2455 struct pci_dev *pdev; 2456 void __iomem *mem; 2457 struct hif_target_info *tgt_info = &scn->target_info; 2458 2459 /* Attach did not succeed, all resources have been 2460 * freed in error handler 2461 */ 2462 if (!sc) 2463 return; 2464 2465 pdev = sc->pdev; 2466 if (ADRASTEA_BU) { 2467 hif_vote_link_down(GET_HIF_OPAQUE_HDL(scn)); 2468 2469 hif_write32_mb(sc, sc->mem + PCIE_INTR_ENABLE_ADDRESS, 0); 2470 hif_write32_mb(sc, sc->mem + PCIE_INTR_CLR_ADDRESS, 2471 HOST_GROUP0_MASK); 2472 } 2473 2474 #if defined(CPU_WARM_RESET_WAR) 2475 /* Currently CPU warm reset sequence is tested only for AR9888_REV2 2476 * Need to enable for AR9888_REV1 once CPU warm reset sequence is 2477 * verified for AR9888_REV1 2478 */ 2479 if ((tgt_info->target_version == AR9888_REV2_VERSION) || 2480 (tgt_info->target_version == AR9887_REV1_VERSION)) 2481 hif_pci_device_warm_reset(sc); 2482 else 2483 hif_pci_device_reset(sc); 2484 #else 2485 hif_pci_device_reset(sc); 2486 #endif 2487 mem = (void __iomem *)sc->mem; 2488 if (mem) { 2489 hif_dump_pipe_debug_count(scn); 2490 if (scn->athdiag_procfs_inited) { 2491 athdiag_procfs_remove(); 2492 scn->athdiag_procfs_inited = false; 2493 } 2494 sc->hif_pci_deinit(sc); 2495 scn->mem = NULL; 2496 } 2497 HIF_INFO("%s: X", __func__); 2498 } 2499 2500 #define OL_ATH_PCI_PM_CONTROL 0x44 2501 2502 #ifdef FEATURE_RUNTIME_PM 2503 /** 2504 * hif_runtime_prevent_linkdown() - prevent or allow a runtime pm from occurring 2505 * @scn: hif context 2506 * @flag: prevent linkdown if true otherwise allow 2507 * 2508 * this api should only be called as part of bus prevent linkdown 2509 */ 2510 static void hif_runtime_prevent_linkdown(struct hif_softc *scn, bool flag) 2511 { 2512 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn); 2513 2514 if (flag) 2515 qdf_runtime_pm_prevent_suspend(&sc->prevent_linkdown_lock); 2516 else 2517 qdf_runtime_pm_allow_suspend(&sc->prevent_linkdown_lock); 2518 } 2519 #else 2520 static void hif_runtime_prevent_linkdown(struct hif_softc *scn, bool flag) 2521 { 2522 } 2523 #endif 2524 2525 #if defined(CONFIG_PCI_MSM) 2526 /** 2527 * hif_bus_prevent_linkdown(): allow or permit linkdown 2528 * @flag: true prevents linkdown, false allows 2529 * 2530 * Calls into the platform driver to vote against taking down the 2531 * pcie link. 2532 * 2533 * Return: n/a 2534 */ 2535 void hif_pci_prevent_linkdown(struct hif_softc *scn, bool flag) 2536 { 2537 int errno; 2538 2539 HIF_INFO("wlan: %s pcie power collapse", flag ? "disable" : "enable"); 2540 hif_runtime_prevent_linkdown(scn, flag); 2541 2542 errno = pld_wlan_pm_control(scn->qdf_dev->dev, flag); 2543 if (errno) 2544 HIF_ERROR("%s: Failed pld_wlan_pm_control; errno %d", 2545 __func__, errno); 2546 } 2547 #else 2548 void hif_pci_prevent_linkdown(struct hif_softc *scn, bool flag) 2549 { 2550 HIF_INFO("wlan: %s pcie power collapse", (flag ? "disable" : "enable")); 2551 hif_runtime_prevent_linkdown(scn, flag); 2552 } 2553 #endif 2554 2555 /** 2556 * hif_pci_bus_suspend(): prepare hif for suspend 2557 * 2558 * Return: Errno 2559 */ 2560 int hif_pci_bus_suspend(struct hif_softc *scn) 2561 { 2562 hif_apps_irqs_disable(GET_HIF_OPAQUE_HDL(scn)); 2563 2564 if (hif_drain_tasklets(scn)) { 2565 hif_apps_irqs_enable(GET_HIF_OPAQUE_HDL(scn)); 2566 return -EBUSY; 2567 } 2568 2569 /* Stop the HIF Sleep Timer */ 2570 hif_cancel_deferred_target_sleep(scn); 2571 2572 return 0; 2573 } 2574 2575 /** 2576 * __hif_check_link_status() - API to check if PCIe link is active/not 2577 * @scn: HIF Context 2578 * 2579 * API reads the PCIe config space to verify if PCIe link training is 2580 * successful or not. 2581 * 2582 * Return: Success/Failure 2583 */ 2584 static int __hif_check_link_status(struct hif_softc *scn) 2585 { 2586 uint16_t dev_id = 0; 2587 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn); 2588 struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn); 2589 2590 if (!sc) { 2591 HIF_ERROR("%s: HIF Bus Context is Invalid", __func__); 2592 return -EINVAL; 2593 } 2594 2595 pfrm_read_config_word(sc->pdev, PCI_DEVICE_ID, &dev_id); 2596 2597 if (dev_id == sc->devid) 2598 return 0; 2599 2600 HIF_ERROR("%s: Invalid PCIe Config Space; PCIe link down dev_id:0x%04x", 2601 __func__, dev_id); 2602 2603 scn->recovery = true; 2604 2605 if (cbk && cbk->set_recovery_in_progress) 2606 cbk->set_recovery_in_progress(cbk->context, true); 2607 else 2608 HIF_ERROR("%s: Driver Global Recovery is not set", __func__); 2609 2610 pld_is_pci_link_down(sc->dev); 2611 return -EACCES; 2612 } 2613 2614 /** 2615 * hif_pci_bus_resume(): prepare hif for resume 2616 * 2617 * Return: Errno 2618 */ 2619 int hif_pci_bus_resume(struct hif_softc *scn) 2620 { 2621 int errno; 2622 2623 errno = __hif_check_link_status(scn); 2624 if (errno) 2625 return errno; 2626 2627 hif_apps_irqs_enable(GET_HIF_OPAQUE_HDL(scn)); 2628 2629 return 0; 2630 } 2631 2632 /** 2633 * hif_pci_bus_suspend_noirq() - ensure there are no pending transactions 2634 * @scn: hif context 2635 * 2636 * Ensure that if we received the wakeup message before the irq 2637 * was disabled that the message is pocessed before suspending. 2638 * 2639 * Return: -EBUSY if we fail to flush the tasklets. 2640 */ 2641 int hif_pci_bus_suspend_noirq(struct hif_softc *scn) 2642 { 2643 if (hif_can_suspend_link(GET_HIF_OPAQUE_HDL(scn))) 2644 qdf_atomic_set(&scn->link_suspended, 1); 2645 2646 hif_apps_wake_irq_enable(GET_HIF_OPAQUE_HDL(scn)); 2647 2648 return 0; 2649 } 2650 2651 /** 2652 * hif_pci_bus_resume_noirq() - ensure there are no pending transactions 2653 * @scn: hif context 2654 * 2655 * Ensure that if we received the wakeup message before the irq 2656 * was disabled that the message is pocessed before suspending. 2657 * 2658 * Return: -EBUSY if we fail to flush the tasklets. 2659 */ 2660 int hif_pci_bus_resume_noirq(struct hif_softc *scn) 2661 { 2662 hif_apps_wake_irq_disable(GET_HIF_OPAQUE_HDL(scn)); 2663 2664 if (hif_can_suspend_link(GET_HIF_OPAQUE_HDL(scn))) 2665 qdf_atomic_set(&scn->link_suspended, 0); 2666 2667 return 0; 2668 } 2669 2670 #ifdef FEATURE_RUNTIME_PM 2671 /** 2672 * __hif_runtime_pm_set_state(): utility function 2673 * @state: state to set 2674 * 2675 * indexes into the runtime pm state and sets it. 2676 */ 2677 static void __hif_runtime_pm_set_state(struct hif_softc *scn, 2678 enum hif_pm_runtime_state state) 2679 { 2680 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn); 2681 2682 if (!sc) { 2683 HIF_ERROR("%s: HIF_CTX not initialized", 2684 __func__); 2685 return; 2686 } 2687 2688 qdf_atomic_set(&sc->pm_state, state); 2689 } 2690 2691 /** 2692 * hif_runtime_pm_set_state_on(): adjust runtime pm state 2693 * 2694 * Notify hif that a the runtime pm state should be on 2695 */ 2696 static void hif_runtime_pm_set_state_on(struct hif_softc *scn) 2697 { 2698 __hif_runtime_pm_set_state(scn, HIF_PM_RUNTIME_STATE_ON); 2699 } 2700 2701 /** 2702 * hif_runtime_pm_set_state_resuming(): adjust runtime pm state 2703 * 2704 * Notify hif that a runtime pm resuming has started 2705 */ 2706 static void hif_runtime_pm_set_state_resuming(struct hif_softc *scn) 2707 { 2708 __hif_runtime_pm_set_state(scn, HIF_PM_RUNTIME_STATE_RESUMING); 2709 } 2710 2711 /** 2712 * hif_runtime_pm_set_state_suspending(): adjust runtime pm state 2713 * 2714 * Notify hif that a runtime pm suspend has started 2715 */ 2716 static void hif_runtime_pm_set_state_suspending(struct hif_softc *scn) 2717 { 2718 __hif_runtime_pm_set_state(scn, HIF_PM_RUNTIME_STATE_SUSPENDING); 2719 } 2720 2721 /** 2722 * hif_runtime_pm_set_state_suspended(): adjust runtime pm state 2723 * 2724 * Notify hif that a runtime suspend attempt has been completed successfully 2725 */ 2726 static void hif_runtime_pm_set_state_suspended(struct hif_softc *scn) 2727 { 2728 __hif_runtime_pm_set_state(scn, HIF_PM_RUNTIME_STATE_SUSPENDED); 2729 } 2730 2731 /** 2732 * hif_log_runtime_suspend_success() - log a successful runtime suspend 2733 */ 2734 static void hif_log_runtime_suspend_success(struct hif_softc *hif_ctx) 2735 { 2736 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx); 2737 2738 if (!sc) 2739 return; 2740 2741 sc->pm_stats.suspended++; 2742 sc->pm_stats.suspend_jiffies = jiffies; 2743 } 2744 2745 /** 2746 * hif_log_runtime_suspend_failure() - log a failed runtime suspend 2747 * 2748 * log a failed runtime suspend 2749 * mark last busy to prevent immediate runtime suspend 2750 */ 2751 static void hif_log_runtime_suspend_failure(void *hif_ctx) 2752 { 2753 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx); 2754 2755 if (!sc) 2756 return; 2757 2758 sc->pm_stats.suspend_err++; 2759 } 2760 2761 /** 2762 * hif_log_runtime_resume_success() - log a successful runtime resume 2763 * 2764 * log a successful runtime resume 2765 * mark last busy to prevent immediate runtime suspend 2766 */ 2767 static void hif_log_runtime_resume_success(void *hif_ctx) 2768 { 2769 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx); 2770 2771 if (!sc) 2772 return; 2773 2774 sc->pm_stats.resumed++; 2775 } 2776 2777 /** 2778 * hif_process_runtime_suspend_failure() - bookkeeping of suspend failure 2779 * 2780 * Record the failure. 2781 * mark last busy to delay a retry. 2782 * adjust the runtime_pm state. 2783 */ 2784 void hif_process_runtime_suspend_failure(struct hif_opaque_softc *hif_ctx) 2785 { 2786 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 2787 2788 hif_log_runtime_suspend_failure(hif_ctx); 2789 hif_pm_runtime_mark_last_busy(hif_ctx); 2790 hif_runtime_pm_set_state_on(scn); 2791 } 2792 2793 /** 2794 * hif_pre_runtime_suspend() - bookkeeping before beginning runtime suspend 2795 * 2796 * Makes sure that the pci link will be taken down by the suspend opperation. 2797 * If the hif layer is configured to leave the bus on, runtime suspend will 2798 * not save any power. 2799 * 2800 * Set the runtime suspend state to in progress. 2801 * 2802 * return -EINVAL if the bus won't go down. otherwise return 0 2803 */ 2804 int hif_pre_runtime_suspend(struct hif_opaque_softc *hif_ctx) 2805 { 2806 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 2807 2808 if (!hif_can_suspend_link(hif_ctx)) { 2809 HIF_ERROR("Runtime PM not supported for link up suspend"); 2810 return -EINVAL; 2811 } 2812 2813 hif_runtime_pm_set_state_suspending(scn); 2814 return 0; 2815 } 2816 2817 /** 2818 * hif_process_runtime_suspend_success() - bookkeeping of suspend success 2819 * 2820 * Record the success. 2821 * adjust the runtime_pm state 2822 */ 2823 void hif_process_runtime_suspend_success(struct hif_opaque_softc *hif_ctx) 2824 { 2825 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 2826 2827 hif_runtime_pm_set_state_suspended(scn); 2828 hif_log_runtime_suspend_success(scn); 2829 } 2830 2831 /** 2832 * hif_pre_runtime_resume() - bookkeeping before beginning runtime resume 2833 * 2834 * update the runtime pm state. 2835 */ 2836 void hif_pre_runtime_resume(struct hif_opaque_softc *hif_ctx) 2837 { 2838 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 2839 2840 hif_pm_runtime_set_monitor_wake_intr(hif_ctx, 0); 2841 hif_runtime_pm_set_state_resuming(scn); 2842 } 2843 2844 /** 2845 * hif_process_runtime_resume_success() - bookkeeping after a runtime resume 2846 * 2847 * record the success. 2848 * adjust the runtime_pm state 2849 */ 2850 void hif_process_runtime_resume_success(struct hif_opaque_softc *hif_ctx) 2851 { 2852 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 2853 2854 hif_log_runtime_resume_success(hif_ctx); 2855 hif_pm_runtime_mark_last_busy(hif_ctx); 2856 hif_runtime_pm_set_state_on(scn); 2857 } 2858 2859 /** 2860 * hif_runtime_suspend() - do the bus suspend part of a runtime suspend 2861 * 2862 * Return: 0 for success and non-zero error code for failure 2863 */ 2864 int hif_runtime_suspend(struct hif_opaque_softc *hif_ctx) 2865 { 2866 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx); 2867 int errno; 2868 2869 errno = hif_bus_suspend(hif_ctx); 2870 if (errno) { 2871 HIF_ERROR("%s: failed bus suspend: %d", __func__, errno); 2872 return errno; 2873 } 2874 2875 hif_pm_runtime_set_monitor_wake_intr(hif_ctx, 1); 2876 2877 errno = hif_bus_suspend_noirq(hif_ctx); 2878 if (errno) { 2879 HIF_ERROR("%s: failed bus suspend noirq: %d", __func__, errno); 2880 hif_pm_runtime_set_monitor_wake_intr(hif_ctx, 0); 2881 goto bus_resume; 2882 } 2883 2884 qdf_atomic_set(&sc->pm_dp_rx_busy, 0); 2885 2886 return 0; 2887 2888 bus_resume: 2889 QDF_BUG(!hif_bus_resume(hif_ctx)); 2890 2891 return errno; 2892 } 2893 2894 /** 2895 * hif_fastpath_resume() - resume fastpath for runtimepm 2896 * 2897 * ensure that the fastpath write index register is up to date 2898 * since runtime pm may cause ce_send_fast to skip the register 2899 * write. 2900 * 2901 * fastpath only applicable to legacy copy engine 2902 */ 2903 void hif_fastpath_resume(struct hif_opaque_softc *hif_ctx) 2904 { 2905 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 2906 struct CE_state *ce_state; 2907 2908 if (!scn) 2909 return; 2910 2911 if (scn->fastpath_mode_on) { 2912 if (Q_TARGET_ACCESS_BEGIN(scn) < 0) 2913 return; 2914 2915 ce_state = scn->ce_id_to_state[CE_HTT_H2T_MSG]; 2916 qdf_spin_lock_bh(&ce_state->ce_index_lock); 2917 2918 /*war_ce_src_ring_write_idx_set */ 2919 CE_SRC_RING_WRITE_IDX_SET(scn, ce_state->ctrl_addr, 2920 ce_state->src_ring->write_index); 2921 qdf_spin_unlock_bh(&ce_state->ce_index_lock); 2922 Q_TARGET_ACCESS_END(scn); 2923 } 2924 } 2925 2926 /** 2927 * hif_runtime_resume() - do the bus resume part of a runtime resume 2928 * 2929 * Return: 0 for success and non-zero error code for failure 2930 */ 2931 int hif_runtime_resume(struct hif_opaque_softc *hif_ctx) 2932 { 2933 QDF_BUG(!hif_bus_resume_noirq(hif_ctx)); 2934 QDF_BUG(!hif_bus_resume(hif_ctx)); 2935 return 0; 2936 } 2937 #endif /* #ifdef FEATURE_RUNTIME_PM */ 2938 2939 #if CONFIG_PCIE_64BIT_MSI 2940 static void hif_free_msi_ctx(struct hif_softc *scn) 2941 { 2942 struct hif_pci_softc *sc = scn->hif_sc; 2943 struct hif_msi_info *info = &sc->msi_info; 2944 struct device *dev = scn->qdf_dev->dev; 2945 2946 OS_FREE_CONSISTENT(dev, 4, info->magic, info->magic_dma, 2947 OS_GET_DMA_MEM_CONTEXT(scn, dmacontext)); 2948 info->magic = NULL; 2949 info->magic_dma = 0; 2950 } 2951 #else 2952 static void hif_free_msi_ctx(struct hif_softc *scn) 2953 { 2954 } 2955 #endif 2956 2957 void hif_pci_disable_isr(struct hif_softc *scn) 2958 { 2959 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn); 2960 2961 hif_exec_kill(&scn->osc); 2962 hif_nointrs(scn); 2963 hif_free_msi_ctx(scn); 2964 /* Cancel the pending tasklet */ 2965 ce_tasklet_kill(scn); 2966 tasklet_kill(&sc->intr_tq); 2967 qdf_atomic_set(&scn->active_tasklet_cnt, 0); 2968 qdf_atomic_set(&scn->active_grp_tasklet_cnt, 0); 2969 } 2970 2971 /* Function to reset SoC */ 2972 void hif_pci_reset_soc(struct hif_softc *hif_sc) 2973 { 2974 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_sc); 2975 struct hif_opaque_softc *ol_sc = GET_HIF_OPAQUE_HDL(hif_sc); 2976 struct hif_target_info *tgt_info = hif_get_target_info_handle(ol_sc); 2977 2978 #if defined(CPU_WARM_RESET_WAR) 2979 /* Currently CPU warm reset sequence is tested only for AR9888_REV2 2980 * Need to enable for AR9888_REV1 once CPU warm reset sequence is 2981 * verified for AR9888_REV1 2982 */ 2983 if (tgt_info->target_version == AR9888_REV2_VERSION) 2984 hif_pci_device_warm_reset(sc); 2985 else 2986 hif_pci_device_reset(sc); 2987 #else 2988 hif_pci_device_reset(sc); 2989 #endif 2990 } 2991 2992 #ifdef CONFIG_PCI_MSM 2993 static inline void hif_msm_pcie_debug_info(struct hif_pci_softc *sc) 2994 { 2995 msm_pcie_debug_info(sc->pdev, 13, 1, 0, 0, 0); 2996 msm_pcie_debug_info(sc->pdev, 13, 2, 0, 0, 0); 2997 } 2998 #else 2999 static inline void hif_msm_pcie_debug_info(struct hif_pci_softc *sc) {}; 3000 #endif 3001 3002 /** 3003 * hif_log_soc_wakeup_timeout() - API to log PCIe and SOC Info 3004 * @sc: HIF PCIe Context 3005 * 3006 * API to log PCIe Config space and SOC info when SOC wakeup timeout happens 3007 * 3008 * Return: Failure to caller 3009 */ 3010 static int hif_log_soc_wakeup_timeout(struct hif_pci_softc *sc) 3011 { 3012 uint16_t val = 0; 3013 uint32_t bar = 0; 3014 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(sc); 3015 struct hif_softc *scn = HIF_GET_SOFTC(sc); 3016 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(sc); 3017 struct hif_config_info *cfg = hif_get_ini_handle(hif_hdl); 3018 struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn); 3019 A_target_id_t pci_addr = scn->mem; 3020 3021 HIF_ERROR("%s: keep_awake_count = %d", 3022 __func__, hif_state->keep_awake_count); 3023 3024 pfrm_read_config_word(sc->pdev, PCI_VENDOR_ID, &val); 3025 3026 HIF_ERROR("%s: PCI Vendor ID = 0x%04x", __func__, val); 3027 3028 pfrm_read_config_word(sc->pdev, PCI_DEVICE_ID, &val); 3029 3030 HIF_ERROR("%s: PCI Device ID = 0x%04x", __func__, val); 3031 3032 pfrm_read_config_word(sc->pdev, PCI_COMMAND, &val); 3033 3034 HIF_ERROR("%s: PCI Command = 0x%04x", __func__, val); 3035 3036 pfrm_read_config_word(sc->pdev, PCI_STATUS, &val); 3037 3038 HIF_ERROR("%s: PCI Status = 0x%04x", __func__, val); 3039 3040 pfrm_read_config_dword(sc->pdev, PCI_BASE_ADDRESS_0, &bar); 3041 3042 HIF_ERROR("%s: PCI BAR 0 = 0x%08x", __func__, bar); 3043 3044 HIF_ERROR("%s: SOC_WAKE_ADDR 0%08x", __func__, 3045 hif_read32_mb(scn, pci_addr + PCIE_LOCAL_BASE_ADDRESS + 3046 PCIE_SOC_WAKE_ADDRESS)); 3047 3048 HIF_ERROR("%s: RTC_STATE_ADDR 0x%08x", __func__, 3049 hif_read32_mb(scn, pci_addr + PCIE_LOCAL_BASE_ADDRESS + 3050 RTC_STATE_ADDRESS)); 3051 3052 HIF_ERROR("%s:error, wakeup target", __func__); 3053 hif_msm_pcie_debug_info(sc); 3054 3055 if (!cfg->enable_self_recovery) 3056 QDF_BUG(0); 3057 3058 scn->recovery = true; 3059 3060 if (cbk->set_recovery_in_progress) 3061 cbk->set_recovery_in_progress(cbk->context, true); 3062 3063 pld_is_pci_link_down(sc->dev); 3064 return -EACCES; 3065 } 3066 3067 /* 3068 * For now, we use simple on-demand sleep/wake. 3069 * Some possible improvements: 3070 * -Use the Host-destined A_INUM_PCIE_AWAKE interrupt rather than spin/delay 3071 * (or perhaps spin/delay for a short while, then convert to sleep/interrupt) 3072 * Careful, though, these functions may be used by 3073 * interrupt handlers ("atomic") 3074 * -Don't use host_reg_table for this code; instead use values directly 3075 * -Use a separate timer to track activity and allow Target to sleep only 3076 * if it hasn't done anything for a while; may even want to delay some 3077 * processing for a short while in order to "batch" (e.g.) transmit 3078 * requests with completion processing into "windows of up time". Costs 3079 * some performance, but improves power utilization. 3080 * -On some platforms, it might be possible to eliminate explicit 3081 * sleep/wakeup. Instead, take a chance that each access works OK. If not, 3082 * recover from the failure by forcing the Target awake. 3083 * -Change keep_awake_count to an atomic_t in order to avoid spin lock 3084 * overhead in some cases. Perhaps this makes more sense when 3085 * CONFIG_ATH_PCIE_ACCESS_LIKELY is used and less sense when LIKELY is 3086 * disabled. 3087 * -It is possible to compile this code out and simply force the Target 3088 * to remain awake. That would yield optimal performance at the cost of 3089 * increased power. See CONFIG_ATH_PCIE_MAX_PERF. 3090 * 3091 * Note: parameter wait_for_it has meaning only when waking (when sleep_ok==0). 3092 */ 3093 /** 3094 * hif_target_sleep_state_adjust() - on-demand sleep/wake 3095 * @scn: hif_softc pointer. 3096 * @sleep_ok: bool 3097 * @wait_for_it: bool 3098 * 3099 * Output the pipe error counts of each pipe to log file 3100 * 3101 * Return: int 3102 */ 3103 int hif_pci_target_sleep_state_adjust(struct hif_softc *scn, 3104 bool sleep_ok, bool wait_for_it) 3105 { 3106 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 3107 A_target_id_t pci_addr = scn->mem; 3108 static int max_delay; 3109 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn); 3110 static int debug; 3111 if (scn->recovery) 3112 return -EACCES; 3113 3114 if (qdf_atomic_read(&scn->link_suspended)) { 3115 HIF_ERROR("%s:invalid access, PCIe link is down", __func__); 3116 debug = true; 3117 QDF_ASSERT(0); 3118 return -EACCES; 3119 } 3120 3121 if (debug) { 3122 wait_for_it = true; 3123 HIF_ERROR("%s: doing debug for invalid access, PCIe link is suspended", 3124 __func__); 3125 QDF_ASSERT(0); 3126 } 3127 3128 if (sleep_ok) { 3129 qdf_spin_lock_irqsave(&hif_state->keep_awake_lock); 3130 hif_state->keep_awake_count--; 3131 if (hif_state->keep_awake_count == 0) { 3132 /* Allow sleep */ 3133 hif_state->verified_awake = false; 3134 hif_state->sleep_ticks = qdf_system_ticks(); 3135 } 3136 if (hif_state->fake_sleep == false) { 3137 /* Set the Fake Sleep */ 3138 hif_state->fake_sleep = true; 3139 3140 /* Start the Sleep Timer */ 3141 qdf_timer_stop(&hif_state->sleep_timer); 3142 qdf_timer_start(&hif_state->sleep_timer, 3143 HIF_SLEEP_INACTIVITY_TIMER_PERIOD_MS); 3144 } 3145 qdf_spin_unlock_irqrestore(&hif_state->keep_awake_lock); 3146 } else { 3147 qdf_spin_lock_irqsave(&hif_state->keep_awake_lock); 3148 3149 if (hif_state->fake_sleep) { 3150 hif_state->verified_awake = true; 3151 } else { 3152 if (hif_state->keep_awake_count == 0) { 3153 /* Force AWAKE */ 3154 hif_write32_mb(sc, pci_addr + 3155 PCIE_LOCAL_BASE_ADDRESS + 3156 PCIE_SOC_WAKE_ADDRESS, 3157 PCIE_SOC_WAKE_V_MASK); 3158 } 3159 } 3160 hif_state->keep_awake_count++; 3161 qdf_spin_unlock_irqrestore(&hif_state->keep_awake_lock); 3162 3163 if (wait_for_it && !hif_state->verified_awake) { 3164 #define PCIE_SLEEP_ADJUST_TIMEOUT 8000 /* 8Ms */ 3165 int tot_delay = 0; 3166 int curr_delay = 5; 3167 3168 for (;; ) { 3169 if (hif_targ_is_awake(scn, pci_addr)) { 3170 hif_state->verified_awake = true; 3171 break; 3172 } 3173 if (!hif_pci_targ_is_present(scn, pci_addr)) 3174 break; 3175 if (tot_delay > PCIE_SLEEP_ADJUST_TIMEOUT) 3176 return hif_log_soc_wakeup_timeout(sc); 3177 3178 OS_DELAY(curr_delay); 3179 tot_delay += curr_delay; 3180 3181 if (curr_delay < 50) 3182 curr_delay += 5; 3183 } 3184 3185 /* 3186 * NB: If Target has to come out of Deep Sleep, 3187 * this may take a few Msecs. Typically, though 3188 * this delay should be <30us. 3189 */ 3190 if (tot_delay > max_delay) 3191 max_delay = tot_delay; 3192 } 3193 } 3194 3195 if (debug && hif_state->verified_awake) { 3196 debug = 0; 3197 HIF_ERROR("%s: INTR_ENABLE_REG = 0x%08x, INTR_CAUSE_REG = 0x%08x, CPU_INTR_REG = 0x%08x, INTR_CLR_REG = 0x%08x, CE_INTERRUPT_SUMMARY_REG = 0x%08x", 3198 __func__, 3199 hif_read32_mb(sc, sc->mem + SOC_CORE_BASE_ADDRESS + 3200 PCIE_INTR_ENABLE_ADDRESS), 3201 hif_read32_mb(sc, sc->mem + SOC_CORE_BASE_ADDRESS + 3202 PCIE_INTR_CAUSE_ADDRESS), 3203 hif_read32_mb(sc, sc->mem + SOC_CORE_BASE_ADDRESS + 3204 CPU_INTR_ADDRESS), 3205 hif_read32_mb(sc, sc->mem + SOC_CORE_BASE_ADDRESS + 3206 PCIE_INTR_CLR_ADDRESS), 3207 hif_read32_mb(sc, sc->mem + CE_WRAPPER_BASE_ADDRESS + 3208 CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS)); 3209 } 3210 3211 return 0; 3212 } 3213 3214 #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG 3215 uint32_t hif_target_read_checked(struct hif_softc *scn, uint32_t offset) 3216 { 3217 uint32_t value; 3218 void *addr; 3219 3220 addr = scn->mem + offset; 3221 value = hif_read32_mb(scn, addr); 3222 3223 { 3224 unsigned long irq_flags; 3225 int idx = pcie_access_log_seqnum % PCIE_ACCESS_LOG_NUM; 3226 3227 spin_lock_irqsave(&pcie_access_log_lock, irq_flags); 3228 pcie_access_log[idx].seqnum = pcie_access_log_seqnum; 3229 pcie_access_log[idx].is_write = false; 3230 pcie_access_log[idx].addr = addr; 3231 pcie_access_log[idx].value = value; 3232 pcie_access_log_seqnum++; 3233 spin_unlock_irqrestore(&pcie_access_log_lock, irq_flags); 3234 } 3235 3236 return value; 3237 } 3238 3239 void 3240 hif_target_write_checked(struct hif_softc *scn, uint32_t offset, uint32_t value) 3241 { 3242 void *addr; 3243 3244 addr = scn->mem + (offset); 3245 hif_write32_mb(scn, addr, value); 3246 3247 { 3248 unsigned long irq_flags; 3249 int idx = pcie_access_log_seqnum % PCIE_ACCESS_LOG_NUM; 3250 3251 spin_lock_irqsave(&pcie_access_log_lock, irq_flags); 3252 pcie_access_log[idx].seqnum = pcie_access_log_seqnum; 3253 pcie_access_log[idx].is_write = true; 3254 pcie_access_log[idx].addr = addr; 3255 pcie_access_log[idx].value = value; 3256 pcie_access_log_seqnum++; 3257 spin_unlock_irqrestore(&pcie_access_log_lock, irq_flags); 3258 } 3259 } 3260 3261 /** 3262 * hif_target_dump_access_log() - dump access log 3263 * 3264 * dump access log 3265 * 3266 * Return: n/a 3267 */ 3268 void hif_target_dump_access_log(void) 3269 { 3270 int idx, len, start_idx, cur_idx; 3271 unsigned long irq_flags; 3272 3273 spin_lock_irqsave(&pcie_access_log_lock, irq_flags); 3274 if (pcie_access_log_seqnum > PCIE_ACCESS_LOG_NUM) { 3275 len = PCIE_ACCESS_LOG_NUM; 3276 start_idx = pcie_access_log_seqnum % PCIE_ACCESS_LOG_NUM; 3277 } else { 3278 len = pcie_access_log_seqnum; 3279 start_idx = 0; 3280 } 3281 3282 for (idx = 0; idx < len; idx++) { 3283 cur_idx = (start_idx + idx) % PCIE_ACCESS_LOG_NUM; 3284 HIF_ERROR("%s: idx:%d sn:%u wr:%d addr:%pK val:%u.", 3285 __func__, idx, 3286 pcie_access_log[cur_idx].seqnum, 3287 pcie_access_log[cur_idx].is_write, 3288 pcie_access_log[cur_idx].addr, 3289 pcie_access_log[cur_idx].value); 3290 } 3291 3292 pcie_access_log_seqnum = 0; 3293 spin_unlock_irqrestore(&pcie_access_log_lock, irq_flags); 3294 } 3295 #endif 3296 3297 #ifndef HIF_AHB 3298 int hif_ahb_configure_legacy_irq(struct hif_pci_softc *sc) 3299 { 3300 QDF_BUG(0); 3301 return -EINVAL; 3302 } 3303 3304 int hif_ahb_configure_irq(struct hif_pci_softc *sc) 3305 { 3306 QDF_BUG(0); 3307 return -EINVAL; 3308 } 3309 #endif 3310 3311 static irqreturn_t hif_ce_interrupt_handler(int irq, void *context) 3312 { 3313 struct ce_tasklet_entry *tasklet_entry = context; 3314 return ce_dispatch_interrupt(tasklet_entry->ce_id, tasklet_entry); 3315 } 3316 extern const char *ce_name[]; 3317 3318 static int hif_ce_msi_map_ce_to_irq(struct hif_softc *scn, int ce_id) 3319 { 3320 struct hif_pci_softc *pci_scn = HIF_GET_PCI_SOFTC(scn); 3321 3322 return pci_scn->ce_msi_irq_num[ce_id]; 3323 } 3324 3325 /* hif_srng_msi_irq_disable() - disable the irq for msi 3326 * @hif_sc: hif context 3327 * @ce_id: which ce to disable copy complete interrupts for 3328 * 3329 * since MSI interrupts are not level based, the system can function 3330 * without disabling these interrupts. Interrupt mitigation can be 3331 * added here for better system performance. 3332 */ 3333 static void hif_ce_srng_msi_irq_disable(struct hif_softc *hif_sc, int ce_id) 3334 { 3335 pfrm_disable_irq_nosync(hif_sc->qdf_dev->dev, 3336 hif_ce_msi_map_ce_to_irq(hif_sc, ce_id)); 3337 } 3338 3339 static void hif_ce_srng_msi_irq_enable(struct hif_softc *hif_sc, int ce_id) 3340 { 3341 pfrm_enable_irq(hif_sc->qdf_dev->dev, 3342 hif_ce_msi_map_ce_to_irq(hif_sc, ce_id)); 3343 } 3344 3345 static void hif_ce_legacy_msi_irq_disable(struct hif_softc *hif_sc, int ce_id) 3346 { 3347 disable_irq_nosync(hif_ce_msi_map_ce_to_irq(hif_sc, ce_id)); 3348 } 3349 3350 static void hif_ce_legacy_msi_irq_enable(struct hif_softc *hif_sc, int ce_id) 3351 { 3352 enable_irq(hif_ce_msi_map_ce_to_irq(hif_sc, ce_id)); 3353 } 3354 3355 static int hif_ce_msi_configure_irq(struct hif_softc *scn) 3356 { 3357 int ret; 3358 int ce_id, irq; 3359 uint32_t msi_data_start; 3360 uint32_t msi_data_count; 3361 uint32_t msi_irq_start; 3362 struct HIF_CE_state *ce_sc = HIF_GET_CE_STATE(scn); 3363 struct hif_pci_softc *pci_sc = HIF_GET_PCI_SOFTC(scn); 3364 struct CE_attr *host_ce_conf = ce_sc->host_ce_config; 3365 3366 if (!scn->disable_wake_irq) { 3367 /* do wake irq assignment */ 3368 ret = pld_get_user_msi_assignment(scn->qdf_dev->dev, "WAKE", 3369 &msi_data_count, 3370 &msi_data_start, 3371 &msi_irq_start); 3372 if (ret) 3373 return ret; 3374 3375 scn->wake_irq = pld_get_msi_irq(scn->qdf_dev->dev, 3376 msi_irq_start); 3377 3378 ret = pfrm_request_irq(scn->qdf_dev->dev, scn->wake_irq, 3379 hif_wake_interrupt_handler, 3380 IRQF_NO_SUSPEND, "wlan_wake_irq", scn); 3381 3382 if (ret) 3383 return ret; 3384 } 3385 3386 /* do ce irq assignments */ 3387 ret = pld_get_user_msi_assignment(scn->qdf_dev->dev, "CE", 3388 &msi_data_count, &msi_data_start, 3389 &msi_irq_start); 3390 if (ret) 3391 goto free_wake_irq; 3392 3393 if (ce_srng_based(scn)) { 3394 scn->bus_ops.hif_irq_disable = &hif_ce_srng_msi_irq_disable; 3395 scn->bus_ops.hif_irq_enable = &hif_ce_srng_msi_irq_enable; 3396 } else { 3397 scn->bus_ops.hif_irq_disable = &hif_ce_legacy_msi_irq_disable; 3398 scn->bus_ops.hif_irq_enable = &hif_ce_legacy_msi_irq_enable; 3399 } 3400 3401 scn->bus_ops.hif_map_ce_to_irq = &hif_ce_msi_map_ce_to_irq; 3402 3403 /* needs to match the ce_id -> irq data mapping 3404 * used in the srng parameter configuration 3405 */ 3406 for (ce_id = 0; ce_id < scn->ce_count; ce_id++) { 3407 unsigned int msi_data = (ce_id % msi_data_count) + 3408 msi_irq_start; 3409 if (host_ce_conf[ce_id].flags & CE_ATTR_DISABLE_INTR) 3410 continue; 3411 irq = pld_get_msi_irq(scn->qdf_dev->dev, msi_data); 3412 HIF_DBG("%s: (ce_id %d, msi_data %d, irq %d tasklet %pK)", 3413 __func__, ce_id, msi_data, irq, 3414 &ce_sc->tasklets[ce_id]); 3415 3416 /* implies the ce is also initialized */ 3417 if (!ce_sc->tasklets[ce_id].inited) 3418 continue; 3419 3420 pci_sc->ce_msi_irq_num[ce_id] = irq; 3421 ret = pfrm_request_irq(scn->qdf_dev->dev, 3422 irq, hif_ce_interrupt_handler, 3423 IRQF_SHARED, 3424 ce_name[ce_id], 3425 &ce_sc->tasklets[ce_id]); 3426 if (ret) 3427 goto free_irq; 3428 } 3429 3430 return ret; 3431 3432 free_irq: 3433 /* the request_irq for the last ce_id failed so skip it. */ 3434 while (ce_id > 0 && ce_id < scn->ce_count) { 3435 unsigned int msi_data; 3436 3437 ce_id--; 3438 msi_data = (ce_id % msi_data_count) + msi_irq_start; 3439 irq = pld_get_msi_irq(scn->qdf_dev->dev, msi_data); 3440 pfrm_free_irq(scn->qdf_dev->dev, 3441 irq, &ce_sc->tasklets[ce_id]); 3442 } 3443 3444 free_wake_irq: 3445 if (!scn->disable_wake_irq) { 3446 pfrm_free_irq(scn->qdf_dev->dev, 3447 scn->wake_irq, scn->qdf_dev->dev); 3448 scn->wake_irq = 0; 3449 } 3450 3451 return ret; 3452 } 3453 3454 static void hif_exec_grp_irq_disable(struct hif_exec_context *hif_ext_group) 3455 { 3456 int i; 3457 struct hif_softc *scn = HIF_GET_SOFTC(hif_ext_group->hif); 3458 3459 for (i = 0; i < hif_ext_group->numirq; i++) 3460 pfrm_disable_irq_nosync(scn->qdf_dev->dev, 3461 hif_ext_group->os_irq[i]); 3462 } 3463 3464 static void hif_exec_grp_irq_enable(struct hif_exec_context *hif_ext_group) 3465 { 3466 int i; 3467 struct hif_softc *scn = HIF_GET_SOFTC(hif_ext_group->hif); 3468 3469 for (i = 0; i < hif_ext_group->numirq; i++) 3470 pfrm_enable_irq(scn->qdf_dev->dev, hif_ext_group->os_irq[i]); 3471 } 3472 3473 /** 3474 * hif_pci_get_irq_name() - get irqname 3475 * This function gives irqnumber to irqname 3476 * mapping. 3477 * 3478 * @irq_no: irq number 3479 * 3480 * Return: irq name 3481 */ 3482 const char *hif_pci_get_irq_name(int irq_no) 3483 { 3484 return "pci-dummy"; 3485 } 3486 3487 int hif_pci_configure_grp_irq(struct hif_softc *scn, 3488 struct hif_exec_context *hif_ext_group) 3489 { 3490 int ret = 0; 3491 int irq = 0; 3492 int j; 3493 3494 hif_ext_group->irq_enable = &hif_exec_grp_irq_enable; 3495 hif_ext_group->irq_disable = &hif_exec_grp_irq_disable; 3496 hif_ext_group->irq_name = &hif_pci_get_irq_name; 3497 hif_ext_group->work_complete = &hif_dummy_grp_done; 3498 3499 for (j = 0; j < hif_ext_group->numirq; j++) { 3500 irq = hif_ext_group->irq[j]; 3501 3502 hif_info("request_irq = %d for grp %d", 3503 irq, hif_ext_group->grp_id); 3504 ret = pfrm_request_irq( 3505 scn->qdf_dev->dev, irq, 3506 hif_ext_group_interrupt_handler, 3507 IRQF_SHARED | IRQF_NO_SUSPEND, 3508 "wlan_EXT_GRP", 3509 hif_ext_group); 3510 if (ret) { 3511 HIF_ERROR("%s: request_irq failed ret = %d", 3512 __func__, ret); 3513 return -EFAULT; 3514 } 3515 hif_ext_group->os_irq[j] = irq; 3516 } 3517 hif_ext_group->irq_requested = true; 3518 return 0; 3519 } 3520 3521 /** 3522 * hif_configure_irq() - configure interrupt 3523 * 3524 * This function configures interrupt(s) 3525 * 3526 * @sc: PCIe control struct 3527 * @hif_hdl: struct HIF_CE_state 3528 * 3529 * Return: 0 - for success 3530 */ 3531 int hif_configure_irq(struct hif_softc *scn) 3532 { 3533 int ret = 0; 3534 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn); 3535 3536 HIF_TRACE("%s: E", __func__); 3537 3538 if (hif_is_polled_mode_enabled(GET_HIF_OPAQUE_HDL(scn))) { 3539 scn->request_irq_done = false; 3540 return 0; 3541 } 3542 3543 hif_init_reschedule_tasklet_work(sc); 3544 3545 ret = hif_ce_msi_configure_irq(scn); 3546 if (ret == 0) { 3547 goto end; 3548 } 3549 3550 switch (scn->target_info.target_type) { 3551 case TARGET_TYPE_IPQ4019: 3552 ret = hif_ahb_configure_legacy_irq(sc); 3553 break; 3554 case TARGET_TYPE_QCA8074: 3555 case TARGET_TYPE_QCA8074V2: 3556 case TARGET_TYPE_QCA6018: 3557 ret = hif_ahb_configure_irq(sc); 3558 break; 3559 default: 3560 ret = hif_pci_configure_legacy_irq(sc); 3561 break; 3562 } 3563 if (ret < 0) { 3564 HIF_ERROR("%s: hif_pci_configure_legacy_irq error = %d", 3565 __func__, ret); 3566 return ret; 3567 } 3568 end: 3569 scn->request_irq_done = true; 3570 return 0; 3571 } 3572 3573 /** 3574 * hif_trigger_timer_irq() : Triggers interrupt on LF_Timer 0 3575 * @scn: hif control structure 3576 * 3577 * Sets IRQ bit in LF Timer Status Address to awake peregrine/swift 3578 * stuck at a polling loop in pcie_address_config in FW 3579 * 3580 * Return: none 3581 */ 3582 static void hif_trigger_timer_irq(struct hif_softc *scn) 3583 { 3584 int tmp; 3585 /* Trigger IRQ on Peregrine/Swift by setting 3586 * IRQ Bit of LF_TIMER 0 3587 */ 3588 tmp = hif_read32_mb(scn, scn->mem + (RTC_SOC_BASE_ADDRESS + 3589 SOC_LF_TIMER_STATUS0_ADDRESS)); 3590 /* Set Raw IRQ Bit */ 3591 tmp |= 1; 3592 /* SOC_LF_TIMER_STATUS0 */ 3593 hif_write32_mb(scn, scn->mem + (RTC_SOC_BASE_ADDRESS + 3594 SOC_LF_TIMER_STATUS0_ADDRESS), tmp); 3595 } 3596 3597 /** 3598 * hif_target_sync() : ensure the target is ready 3599 * @scn: hif control structure 3600 * 3601 * Informs fw that we plan to use legacy interupts so that 3602 * it can begin booting. Ensures that the fw finishes booting 3603 * before continuing. Should be called before trying to write 3604 * to the targets other registers for the first time. 3605 * 3606 * Return: none 3607 */ 3608 static void hif_target_sync(struct hif_softc *scn) 3609 { 3610 hif_write32_mb(scn, scn->mem + (SOC_CORE_BASE_ADDRESS | 3611 PCIE_INTR_ENABLE_ADDRESS), 3612 PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL); 3613 /* read to flush pcie write */ 3614 (void)hif_read32_mb(scn, scn->mem + (SOC_CORE_BASE_ADDRESS | 3615 PCIE_INTR_ENABLE_ADDRESS)); 3616 3617 hif_write32_mb(scn, scn->mem + PCIE_LOCAL_BASE_ADDRESS + 3618 PCIE_SOC_WAKE_ADDRESS, 3619 PCIE_SOC_WAKE_V_MASK); 3620 while (!hif_targ_is_awake(scn, scn->mem)) 3621 ; 3622 3623 if (HAS_FW_INDICATOR) { 3624 int wait_limit = 500; 3625 int fw_ind = 0; 3626 int retry_count = 0; 3627 uint32_t target_type = scn->target_info.target_type; 3628 fw_retry: 3629 HIF_TRACE("%s: Loop checking FW signal", __func__); 3630 while (1) { 3631 fw_ind = hif_read32_mb(scn, scn->mem + 3632 FW_INDICATOR_ADDRESS); 3633 if (fw_ind & FW_IND_INITIALIZED) 3634 break; 3635 if (wait_limit-- < 0) 3636 break; 3637 hif_write32_mb(scn, scn->mem + (SOC_CORE_BASE_ADDRESS | 3638 PCIE_INTR_ENABLE_ADDRESS), 3639 PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL); 3640 /* read to flush pcie write */ 3641 (void)hif_read32_mb(scn, scn->mem + 3642 (SOC_CORE_BASE_ADDRESS | PCIE_INTR_ENABLE_ADDRESS)); 3643 3644 qdf_mdelay(10); 3645 } 3646 if (wait_limit < 0) { 3647 if (target_type == TARGET_TYPE_AR9888 && 3648 retry_count++ < 2) { 3649 hif_trigger_timer_irq(scn); 3650 wait_limit = 500; 3651 goto fw_retry; 3652 } 3653 HIF_TRACE("%s: FW signal timed out", 3654 __func__); 3655 qdf_assert_always(0); 3656 } else { 3657 HIF_TRACE("%s: Got FW signal, retries = %x", 3658 __func__, 500-wait_limit); 3659 } 3660 } 3661 hif_write32_mb(scn, scn->mem + PCIE_LOCAL_BASE_ADDRESS + 3662 PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_RESET); 3663 } 3664 3665 static void hif_pci_get_soc_info_pld(struct hif_pci_softc *sc, 3666 struct device *dev) 3667 { 3668 struct pld_soc_info info; 3669 3670 pld_get_soc_info(dev, &info); 3671 sc->mem = info.v_addr; 3672 sc->ce_sc.ol_sc.mem = info.v_addr; 3673 sc->ce_sc.ol_sc.mem_pa = info.p_addr; 3674 } 3675 3676 static void hif_pci_get_soc_info_nopld(struct hif_pci_softc *sc, 3677 struct device *dev) 3678 {} 3679 3680 static bool hif_is_pld_based_target(struct hif_pci_softc *sc, 3681 int device_id) 3682 { 3683 if (!pld_have_platform_driver_support(sc->dev)) 3684 return false; 3685 3686 switch (device_id) { 3687 case QCA6290_DEVICE_ID: 3688 case QCN9000_DEVICE_ID: 3689 case QCA6290_EMULATION_DEVICE_ID: 3690 case QCA6390_DEVICE_ID: 3691 case QCA6490_DEVICE_ID: 3692 case AR6320_DEVICE_ID: 3693 case QCN7605_DEVICE_ID: 3694 return true; 3695 } 3696 return false; 3697 } 3698 3699 static void hif_pci_init_deinit_ops_attach(struct hif_pci_softc *sc, 3700 int device_id) 3701 { 3702 if (hif_is_pld_based_target(sc, device_id)) { 3703 sc->hif_enable_pci = hif_enable_pci_pld; 3704 sc->hif_pci_deinit = hif_pci_deinit_pld; 3705 sc->hif_pci_get_soc_info = hif_pci_get_soc_info_pld; 3706 } else { 3707 sc->hif_enable_pci = hif_enable_pci_nopld; 3708 sc->hif_pci_deinit = hif_pci_deinit_nopld; 3709 sc->hif_pci_get_soc_info = hif_pci_get_soc_info_nopld; 3710 } 3711 } 3712 3713 #ifdef HIF_REG_WINDOW_SUPPORT 3714 static void hif_pci_init_reg_windowing_support(struct hif_pci_softc *sc, 3715 u32 target_type) 3716 { 3717 switch (target_type) { 3718 case TARGET_TYPE_QCN7605: 3719 sc->use_register_windowing = true; 3720 qdf_spinlock_create(&sc->register_access_lock); 3721 sc->register_window = 0; 3722 break; 3723 default: 3724 sc->use_register_windowing = false; 3725 } 3726 } 3727 #else 3728 static void hif_pci_init_reg_windowing_support(struct hif_pci_softc *sc, 3729 u32 target_type) 3730 { 3731 sc->use_register_windowing = false; 3732 } 3733 #endif 3734 3735 /** 3736 * hif_enable_bus(): enable bus 3737 * 3738 * This function enables the bus 3739 * 3740 * @ol_sc: soft_sc struct 3741 * @dev: device pointer 3742 * @bdev: bus dev pointer 3743 * bid: bus id pointer 3744 * type: enum hif_enable_type such as HIF_ENABLE_TYPE_PROBE 3745 * Return: QDF_STATUS 3746 */ 3747 QDF_STATUS hif_pci_enable_bus(struct hif_softc *ol_sc, 3748 struct device *dev, void *bdev, 3749 const struct hif_bus_id *bid, 3750 enum hif_enable_type type) 3751 { 3752 int ret = 0; 3753 uint32_t hif_type, target_type; 3754 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(ol_sc); 3755 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(ol_sc); 3756 uint16_t revision_id = 0; 3757 int probe_again = 0; 3758 struct pci_dev *pdev = bdev; 3759 const struct pci_device_id *id = (const struct pci_device_id *)bid; 3760 struct hif_target_info *tgt_info; 3761 3762 if (!ol_sc) { 3763 HIF_ERROR("%s: hif_ctx is NULL", __func__); 3764 return QDF_STATUS_E_NOMEM; 3765 } 3766 3767 HIF_TRACE("%s: con_mode = 0x%x, device_id = 0x%x", 3768 __func__, hif_get_conparam(ol_sc), id->device); 3769 3770 sc->pdev = pdev; 3771 sc->dev = &pdev->dev; 3772 sc->devid = id->device; 3773 sc->cacheline_sz = dma_get_cache_alignment(); 3774 tgt_info = hif_get_target_info_handle(hif_hdl); 3775 hif_pci_init_deinit_ops_attach(sc, id->device); 3776 sc->hif_pci_get_soc_info(sc, dev); 3777 again: 3778 ret = sc->hif_enable_pci(sc, pdev, id); 3779 if (ret < 0) { 3780 HIF_ERROR("%s: ERROR - hif_enable_pci error = %d", 3781 __func__, ret); 3782 goto err_enable_pci; 3783 } 3784 HIF_TRACE("%s: hif_enable_pci done", __func__); 3785 3786 /* Temporary FIX: disable ASPM on peregrine. 3787 * Will be removed after the OTP is programmed 3788 */ 3789 hif_disable_power_gating(hif_hdl); 3790 3791 device_disable_async_suspend(&pdev->dev); 3792 pfrm_read_config_word(pdev, 0x08, &revision_id); 3793 3794 ret = hif_get_device_type(id->device, revision_id, 3795 &hif_type, &target_type); 3796 if (ret < 0) { 3797 HIF_ERROR("%s: invalid device id/revision_id", __func__); 3798 goto err_tgtstate; 3799 } 3800 HIF_TRACE("%s: hif_type = 0x%x, target_type = 0x%x", 3801 __func__, hif_type, target_type); 3802 3803 hif_register_tbl_attach(ol_sc, hif_type); 3804 hif_target_register_tbl_attach(ol_sc, target_type); 3805 3806 hif_pci_init_reg_windowing_support(sc, target_type); 3807 3808 tgt_info->target_type = target_type; 3809 3810 if (ce_srng_based(ol_sc)) { 3811 HIF_TRACE("%s:Skip tgt_wake up for srng devices\n", __func__); 3812 } else { 3813 ret = hif_pci_probe_tgt_wakeup(sc); 3814 if (ret < 0) { 3815 HIF_ERROR("%s: ERROR - hif_pci_prob_wakeup error = %d", 3816 __func__, ret); 3817 if (ret == -EAGAIN) 3818 probe_again++; 3819 goto err_tgtstate; 3820 } 3821 HIF_TRACE("%s: hif_pci_probe_tgt_wakeup done", __func__); 3822 } 3823 3824 if (!ol_sc->mem_pa) { 3825 HIF_ERROR("%s: ERROR - BAR0 uninitialized", __func__); 3826 ret = -EIO; 3827 goto err_tgtstate; 3828 } 3829 3830 if (!ce_srng_based(ol_sc)) { 3831 hif_target_sync(ol_sc); 3832 3833 if (ADRASTEA_BU) 3834 hif_vote_link_up(hif_hdl); 3835 } 3836 3837 return 0; 3838 3839 err_tgtstate: 3840 hif_disable_pci(sc); 3841 sc->pci_enabled = false; 3842 HIF_ERROR("%s: error, hif_disable_pci done", __func__); 3843 return QDF_STATUS_E_ABORTED; 3844 3845 err_enable_pci: 3846 if (probe_again && (probe_again <= ATH_PCI_PROBE_RETRY_MAX)) { 3847 int delay_time; 3848 3849 HIF_INFO("%s: pci reprobe", __func__); 3850 /* 10, 40, 90, 100, 100, ... */ 3851 delay_time = max(100, 10 * (probe_again * probe_again)); 3852 qdf_mdelay(delay_time); 3853 goto again; 3854 } 3855 return ret; 3856 } 3857 3858 /** 3859 * hif_pci_irq_enable() - ce_irq_enable 3860 * @scn: hif_softc 3861 * @ce_id: ce_id 3862 * 3863 * Return: void 3864 */ 3865 void hif_pci_irq_enable(struct hif_softc *scn, int ce_id) 3866 { 3867 uint32_t tmp = 1 << ce_id; 3868 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn); 3869 3870 qdf_spin_lock_irqsave(&sc->irq_lock); 3871 scn->ce_irq_summary &= ~tmp; 3872 if (scn->ce_irq_summary == 0) { 3873 /* Enable Legacy PCI line interrupts */ 3874 if (LEGACY_INTERRUPTS(sc) && 3875 (scn->target_status != TARGET_STATUS_RESET) && 3876 (!qdf_atomic_read(&scn->link_suspended))) { 3877 3878 hif_write32_mb(scn, scn->mem + 3879 (SOC_CORE_BASE_ADDRESS | 3880 PCIE_INTR_ENABLE_ADDRESS), 3881 HOST_GROUP0_MASK); 3882 3883 hif_read32_mb(scn, scn->mem + 3884 (SOC_CORE_BASE_ADDRESS | 3885 PCIE_INTR_ENABLE_ADDRESS)); 3886 } 3887 } 3888 if (scn->hif_init_done == true) 3889 Q_TARGET_ACCESS_END(scn); 3890 qdf_spin_unlock_irqrestore(&sc->irq_lock); 3891 3892 /* check for missed firmware crash */ 3893 hif_fw_interrupt_handler(0, scn); 3894 } 3895 3896 /** 3897 * hif_pci_irq_disable() - ce_irq_disable 3898 * @scn: hif_softc 3899 * @ce_id: ce_id 3900 * 3901 * only applicable to legacy copy engine... 3902 * 3903 * Return: void 3904 */ 3905 void hif_pci_irq_disable(struct hif_softc *scn, int ce_id) 3906 { 3907 /* For Rome only need to wake up target */ 3908 /* target access is maintained until interrupts are re-enabled */ 3909 Q_TARGET_ACCESS_BEGIN(scn); 3910 } 3911 3912 #ifdef FEATURE_RUNTIME_PM 3913 /** 3914 * hif_pm_runtime_get_sync() - do a get operation with sync resume 3915 * 3916 * A get operation will prevent a runtime suspend until a corresponding 3917 * put is done. Unlike hif_pm_runtime_get(), this API will do a sync 3918 * resume instead of requesting a resume if it is runtime PM suspended 3919 * so it can only be called in non-atomic context. 3920 * 3921 * @hif_ctx: pointer of HIF context 3922 * 3923 * Return: 0 if it is runtime PM resumed otherwise an error code. 3924 */ 3925 int hif_pm_runtime_get_sync(struct hif_opaque_softc *hif_ctx) 3926 { 3927 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx); 3928 int pm_state; 3929 int ret; 3930 3931 if (!sc) 3932 return -EINVAL; 3933 3934 if (!pm_runtime_enabled(sc->dev)) 3935 return 0; 3936 3937 pm_state = qdf_atomic_read(&sc->pm_state); 3938 if (pm_state == HIF_PM_RUNTIME_STATE_SUSPENDED || 3939 pm_state == HIF_PM_RUNTIME_STATE_SUSPENDING) 3940 hif_info_high("Runtime PM resume is requested by %ps", 3941 (void *)_RET_IP_); 3942 3943 sc->pm_stats.runtime_get++; 3944 ret = pm_runtime_get_sync(sc->dev); 3945 3946 /* Get can return 1 if the device is already active, just return 3947 * success in that case. 3948 */ 3949 if (ret > 0) 3950 ret = 0; 3951 3952 if (ret) { 3953 sc->pm_stats.runtime_get_err++; 3954 hif_err("Runtime PM Get Sync error in pm_state: %d, ret: %d", 3955 qdf_atomic_read(&sc->pm_state), ret); 3956 hif_pm_runtime_put(hif_ctx); 3957 } 3958 3959 return ret; 3960 } 3961 3962 /** 3963 * hif_pm_runtime_put_sync_suspend() - do a put operation with sync suspend 3964 * 3965 * This API will do a runtime put operation followed by a sync suspend if usage 3966 * count is 0 so it can only be called in non-atomic context. 3967 * 3968 * @hif_ctx: pointer of HIF context 3969 * 3970 * Return: 0 for success otherwise an error code 3971 */ 3972 int hif_pm_runtime_put_sync_suspend(struct hif_opaque_softc *hif_ctx) 3973 { 3974 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx); 3975 int usage_count, pm_state; 3976 char *err = NULL; 3977 3978 if (!sc) 3979 return -EINVAL; 3980 3981 if (!pm_runtime_enabled(sc->dev)) 3982 return 0; 3983 3984 usage_count = atomic_read(&sc->dev->power.usage_count); 3985 if (usage_count == 1) { 3986 pm_state = qdf_atomic_read(&sc->pm_state); 3987 if (pm_state == HIF_PM_RUNTIME_STATE_NONE) 3988 err = "Ignore unexpected Put as runtime PM is disabled"; 3989 } else if (usage_count == 0) { 3990 err = "Put without a Get Operation"; 3991 } 3992 3993 if (err) { 3994 hif_pci_runtime_pm_warn(sc, err); 3995 return -EINVAL; 3996 } 3997 3998 sc->pm_stats.runtime_put++; 3999 return pm_runtime_put_sync_suspend(sc->dev); 4000 } 4001 4002 int hif_pm_runtime_request_resume(struct hif_opaque_softc *hif_ctx) 4003 { 4004 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx); 4005 int pm_state; 4006 4007 if (!sc) 4008 return -EINVAL; 4009 4010 if (!pm_runtime_enabled(sc->dev)) 4011 return 0; 4012 4013 pm_state = qdf_atomic_read(&sc->pm_state); 4014 if (pm_state == HIF_PM_RUNTIME_STATE_SUSPENDED || 4015 pm_state == HIF_PM_RUNTIME_STATE_SUSPENDING) 4016 HIF_INFO("Runtime PM resume is requested by %ps", 4017 (void *)_RET_IP_); 4018 4019 sc->pm_stats.request_resume++; 4020 sc->pm_stats.last_resume_caller = (void *)_RET_IP_; 4021 4022 return hif_pm_request_resume(sc->dev); 4023 } 4024 4025 void hif_pm_runtime_mark_last_busy(struct hif_opaque_softc *hif_ctx) 4026 { 4027 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx); 4028 4029 if (!sc) 4030 return; 4031 4032 sc->pm_stats.last_busy_marker = (void *)_RET_IP_; 4033 sc->pm_stats.last_busy_timestamp = qdf_get_log_timestamp_usecs(); 4034 4035 return pm_runtime_mark_last_busy(sc->dev); 4036 } 4037 4038 void hif_pm_runtime_get_noresume(struct hif_opaque_softc *hif_ctx) 4039 { 4040 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx); 4041 4042 if (!sc) 4043 return; 4044 4045 if (!pm_runtime_enabled(sc->dev)) 4046 return; 4047 4048 sc->pm_stats.runtime_get++; 4049 pm_runtime_get_noresume(sc->dev); 4050 } 4051 4052 /** 4053 * hif_pm_runtime_get() - do a get opperation on the device 4054 * 4055 * A get opperation will prevent a runtime suspend until a 4056 * corresponding put is done. This api should be used when sending 4057 * data. 4058 * 4059 * CONTRARY TO THE REGULAR RUNTIME PM, WHEN THE BUS IS SUSPENDED, 4060 * THIS API WILL ONLY REQUEST THE RESUME AND NOT TO A GET!!! 4061 * 4062 * return: success if the bus is up and a get has been issued 4063 * otherwise an error code. 4064 */ 4065 int hif_pm_runtime_get(struct hif_opaque_softc *hif_ctx) 4066 { 4067 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 4068 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx); 4069 int ret; 4070 int pm_state; 4071 4072 if (!scn) { 4073 hif_err("Could not do runtime get, scn is null"); 4074 return -EFAULT; 4075 } 4076 4077 if (!pm_runtime_enabled(sc->dev)) 4078 return 0; 4079 4080 pm_state = qdf_atomic_read(&sc->pm_state); 4081 4082 if (pm_state == HIF_PM_RUNTIME_STATE_ON || 4083 pm_state == HIF_PM_RUNTIME_STATE_NONE) { 4084 sc->pm_stats.runtime_get++; 4085 ret = __hif_pm_runtime_get(sc->dev); 4086 4087 /* Get can return 1 if the device is already active, just return 4088 * success in that case 4089 */ 4090 if (ret > 0) 4091 ret = 0; 4092 4093 if (ret) 4094 hif_pm_runtime_put(hif_ctx); 4095 4096 if (ret && ret != -EINPROGRESS) { 4097 sc->pm_stats.runtime_get_err++; 4098 hif_err("Runtime Get PM Error in pm_state:%d ret: %d", 4099 qdf_atomic_read(&sc->pm_state), ret); 4100 } 4101 4102 return ret; 4103 } 4104 4105 if (pm_state == HIF_PM_RUNTIME_STATE_SUSPENDED || 4106 pm_state == HIF_PM_RUNTIME_STATE_SUSPENDING) { 4107 hif_info_high("Runtime PM resume is requested by %ps", 4108 (void *)_RET_IP_); 4109 ret = -EAGAIN; 4110 } else { 4111 ret = -EBUSY; 4112 } 4113 4114 sc->pm_stats.request_resume++; 4115 sc->pm_stats.last_resume_caller = (void *)_RET_IP_; 4116 hif_pm_request_resume(sc->dev); 4117 4118 return ret; 4119 } 4120 4121 /** 4122 * hif_pm_runtime_put() - do a put opperation on the device 4123 * 4124 * A put opperation will allow a runtime suspend after a corresponding 4125 * get was done. This api should be used when sending data. 4126 * 4127 * This api will return a failure if runtime pm is stopped 4128 * This api will return failure if it would decrement the usage count below 0. 4129 * 4130 * return: QDF_STATUS_SUCCESS if the put is performed 4131 */ 4132 int hif_pm_runtime_put(struct hif_opaque_softc *hif_ctx) 4133 { 4134 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 4135 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx); 4136 int pm_state, usage_count; 4137 char *error = NULL; 4138 4139 if (!scn) { 4140 HIF_ERROR("%s: Could not do runtime put, scn is null", 4141 __func__); 4142 return -EFAULT; 4143 } 4144 4145 if (!pm_runtime_enabled(sc->dev)) 4146 return 0; 4147 4148 usage_count = atomic_read(&sc->dev->power.usage_count); 4149 4150 if (usage_count == 1) { 4151 pm_state = qdf_atomic_read(&sc->pm_state); 4152 4153 if (pm_state == HIF_PM_RUNTIME_STATE_NONE) 4154 error = "Ignoring unexpected put when runtime pm is disabled"; 4155 4156 } else if (usage_count == 0) { 4157 error = "PUT Without a Get Operation"; 4158 } 4159 4160 if (error) { 4161 hif_pci_runtime_pm_warn(sc, error); 4162 return -EINVAL; 4163 } 4164 4165 sc->pm_stats.runtime_put++; 4166 4167 hif_pm_runtime_mark_last_busy(hif_ctx); 4168 hif_pm_runtime_put_auto(sc->dev); 4169 4170 return 0; 4171 } 4172 4173 4174 /** 4175 * __hif_pm_runtime_prevent_suspend() - prevent runtime suspend for a protocol 4176 * reason 4177 * @hif_sc: pci context 4178 * @lock: runtime_pm lock being acquired 4179 * 4180 * Return 0 if successful. 4181 */ 4182 static int __hif_pm_runtime_prevent_suspend(struct hif_pci_softc 4183 *hif_sc, struct hif_pm_runtime_lock *lock) 4184 { 4185 int ret = 0; 4186 4187 /* 4188 * We shouldn't be setting context->timeout to zero here when 4189 * context is active as we will have a case where Timeout API's 4190 * for the same context called back to back. 4191 * eg: echo "1=T:10:T:20" > /d/cnss_runtime_pm 4192 * Set context->timeout to zero in hif_pm_runtime_prevent_suspend 4193 * API to ensure the timeout version is no more active and 4194 * list entry of this context will be deleted during allow suspend. 4195 */ 4196 if (lock->active) 4197 return 0; 4198 4199 ret = __hif_pm_runtime_get(hif_sc->dev); 4200 4201 /** 4202 * The ret can be -EINPROGRESS, if Runtime status is RPM_RESUMING or 4203 * RPM_SUSPENDING. Any other negative value is an error. 4204 * We shouldn't be do runtime_put here as in later point allow 4205 * suspend gets called with the the context and there the usage count 4206 * is decremented, so suspend will be prevented. 4207 */ 4208 4209 if (ret < 0 && ret != -EINPROGRESS) { 4210 hif_sc->pm_stats.runtime_get_err++; 4211 hif_pci_runtime_pm_warn(hif_sc, 4212 "Prevent Suspend Runtime PM Error"); 4213 } 4214 4215 hif_sc->prevent_suspend_cnt++; 4216 4217 lock->active = true; 4218 4219 list_add_tail(&lock->list, &hif_sc->prevent_suspend_list); 4220 4221 hif_sc->pm_stats.prevent_suspend++; 4222 4223 HIF_ERROR("%s: in pm_state:%s ret: %d", __func__, 4224 hif_pm_runtime_state_to_string( 4225 qdf_atomic_read(&hif_sc->pm_state)), 4226 ret); 4227 4228 return ret; 4229 } 4230 4231 static int __hif_pm_runtime_allow_suspend(struct hif_pci_softc *hif_sc, 4232 struct hif_pm_runtime_lock *lock) 4233 { 4234 struct hif_opaque_softc *hif_ctx = GET_HIF_OPAQUE_HDL(hif_sc); 4235 int ret = 0; 4236 int usage_count; 4237 4238 if (hif_sc->prevent_suspend_cnt == 0) 4239 return ret; 4240 4241 if (!lock->active) 4242 return ret; 4243 4244 usage_count = atomic_read(&hif_sc->dev->power.usage_count); 4245 4246 /* 4247 * During Driver unload, platform driver increments the usage 4248 * count to prevent any runtime suspend getting called. 4249 * So during driver load in HIF_PM_RUNTIME_STATE_NONE state the 4250 * usage_count should be one. Ideally this shouldn't happen as 4251 * context->active should be active for allow suspend to happen 4252 * Handling this case here to prevent any failures. 4253 */ 4254 if ((qdf_atomic_read(&hif_sc->pm_state) == HIF_PM_RUNTIME_STATE_NONE 4255 && usage_count == 1) || usage_count == 0) { 4256 hif_pci_runtime_pm_warn(hif_sc, 4257 "Allow without a prevent suspend"); 4258 return -EINVAL; 4259 } 4260 4261 list_del(&lock->list); 4262 4263 hif_sc->prevent_suspend_cnt--; 4264 4265 lock->active = false; 4266 lock->timeout = 0; 4267 4268 hif_pm_runtime_mark_last_busy(hif_ctx); 4269 ret = hif_pm_runtime_put_auto(hif_sc->dev); 4270 4271 HIF_ERROR("%s: in pm_state:%s ret: %d", __func__, 4272 hif_pm_runtime_state_to_string( 4273 qdf_atomic_read(&hif_sc->pm_state)), 4274 ret); 4275 4276 hif_sc->pm_stats.allow_suspend++; 4277 return ret; 4278 } 4279 4280 /** 4281 * hif_pm_runtime_lock_timeout_fn() - callback the runtime lock timeout 4282 * @data: calback data that is the pci context 4283 * 4284 * if runtime locks are acquired with a timeout, this function releases 4285 * the locks when the last runtime lock expires. 4286 * 4287 * dummy implementation until lock acquisition is implemented. 4288 */ 4289 static void hif_pm_runtime_lock_timeout_fn(void *data) 4290 { 4291 struct hif_pci_softc *hif_sc = data; 4292 unsigned long timer_expires; 4293 struct hif_pm_runtime_lock *context, *temp; 4294 4295 spin_lock_bh(&hif_sc->runtime_lock); 4296 4297 timer_expires = hif_sc->runtime_timer_expires; 4298 4299 /* Make sure we are not called too early, this should take care of 4300 * following case 4301 * 4302 * CPU0 CPU1 (timeout function) 4303 * ---- ---------------------- 4304 * spin_lock_irq 4305 * timeout function called 4306 * 4307 * mod_timer() 4308 * 4309 * spin_unlock_irq 4310 * spin_lock_irq 4311 */ 4312 if (timer_expires > 0 && !time_after(timer_expires, jiffies)) { 4313 hif_sc->runtime_timer_expires = 0; 4314 list_for_each_entry_safe(context, temp, 4315 &hif_sc->prevent_suspend_list, list) { 4316 if (context->timeout) { 4317 __hif_pm_runtime_allow_suspend(hif_sc, context); 4318 hif_sc->pm_stats.allow_suspend_timeout++; 4319 } 4320 } 4321 } 4322 4323 spin_unlock_bh(&hif_sc->runtime_lock); 4324 } 4325 4326 int hif_pm_runtime_prevent_suspend(struct hif_opaque_softc *ol_sc, 4327 struct hif_pm_runtime_lock *data) 4328 { 4329 struct hif_softc *sc = HIF_GET_SOFTC(ol_sc); 4330 struct hif_pci_softc *hif_sc = HIF_GET_PCI_SOFTC(ol_sc); 4331 struct hif_pm_runtime_lock *context = data; 4332 4333 if (!sc->hif_config.enable_runtime_pm) 4334 return 0; 4335 4336 if (!context) 4337 return -EINVAL; 4338 4339 if (in_irq()) 4340 WARN_ON(1); 4341 4342 spin_lock_bh(&hif_sc->runtime_lock); 4343 context->timeout = 0; 4344 __hif_pm_runtime_prevent_suspend(hif_sc, context); 4345 spin_unlock_bh(&hif_sc->runtime_lock); 4346 4347 return 0; 4348 } 4349 4350 int hif_pm_runtime_allow_suspend(struct hif_opaque_softc *ol_sc, 4351 struct hif_pm_runtime_lock *data) 4352 { 4353 struct hif_softc *sc = HIF_GET_SOFTC(ol_sc); 4354 struct hif_pci_softc *hif_sc = HIF_GET_PCI_SOFTC(ol_sc); 4355 struct hif_pm_runtime_lock *context = data; 4356 4357 if (!sc->hif_config.enable_runtime_pm) 4358 return 0; 4359 4360 if (!context) 4361 return -EINVAL; 4362 4363 if (in_irq()) 4364 WARN_ON(1); 4365 4366 spin_lock_bh(&hif_sc->runtime_lock); 4367 4368 __hif_pm_runtime_allow_suspend(hif_sc, context); 4369 4370 /* The list can be empty as well in cases where 4371 * we have one context in the list and the allow 4372 * suspend came before the timer expires and we delete 4373 * context above from the list. 4374 * When list is empty prevent_suspend count will be zero. 4375 */ 4376 if (hif_sc->prevent_suspend_cnt == 0 && 4377 hif_sc->runtime_timer_expires > 0) { 4378 qdf_timer_free(&hif_sc->runtime_timer); 4379 hif_sc->runtime_timer_expires = 0; 4380 } 4381 4382 spin_unlock_bh(&hif_sc->runtime_lock); 4383 4384 return 0; 4385 } 4386 4387 /** 4388 * hif_pm_runtime_prevent_suspend_timeout() - Prevent runtime suspend timeout 4389 * @ol_sc: HIF context 4390 * @lock: which lock is being acquired 4391 * @delay: Timeout in milliseconds 4392 * 4393 * Prevent runtime suspend with a timeout after which runtime suspend would be 4394 * allowed. This API uses a single timer to allow the suspend and timer is 4395 * modified if the timeout is changed before timer fires. 4396 * If the timeout is less than autosuspend_delay then use mark_last_busy instead 4397 * of starting the timer. 4398 * 4399 * It is wise to try not to use this API and correct the design if possible. 4400 * 4401 * Return: 0 on success and negative error code on failure 4402 */ 4403 int hif_pm_runtime_prevent_suspend_timeout(struct hif_opaque_softc *ol_sc, 4404 struct hif_pm_runtime_lock *lock, unsigned int delay) 4405 { 4406 struct hif_softc *sc = HIF_GET_SOFTC(ol_sc); 4407 struct hif_pci_softc *hif_sc = HIF_GET_PCI_SOFTC(sc); 4408 4409 int ret = 0; 4410 unsigned long expires; 4411 struct hif_pm_runtime_lock *context = lock; 4412 4413 if (hif_is_load_or_unload_in_progress(sc)) { 4414 HIF_ERROR("%s: Load/unload in progress, ignore!", 4415 __func__); 4416 return -EINVAL; 4417 } 4418 4419 if (hif_is_recovery_in_progress(sc)) { 4420 HIF_ERROR("%s: LOGP in progress, ignore!", __func__); 4421 return -EINVAL; 4422 } 4423 4424 if (!sc->hif_config.enable_runtime_pm) 4425 return 0; 4426 4427 if (!context) 4428 return -EINVAL; 4429 4430 if (in_irq()) 4431 WARN_ON(1); 4432 4433 /* 4434 * Don't use internal timer if the timeout is less than auto suspend 4435 * delay. 4436 */ 4437 if (delay <= hif_sc->dev->power.autosuspend_delay) { 4438 hif_pm_request_resume(hif_sc->dev); 4439 hif_pm_runtime_mark_last_busy(ol_sc); 4440 return ret; 4441 } 4442 4443 expires = jiffies + msecs_to_jiffies(delay); 4444 expires += !expires; 4445 4446 spin_lock_bh(&hif_sc->runtime_lock); 4447 4448 context->timeout = delay; 4449 ret = __hif_pm_runtime_prevent_suspend(hif_sc, context); 4450 hif_sc->pm_stats.prevent_suspend_timeout++; 4451 4452 /* Modify the timer only if new timeout is after already configured 4453 * timeout 4454 */ 4455 if (time_after(expires, hif_sc->runtime_timer_expires)) { 4456 qdf_timer_mod(&hif_sc->runtime_timer, delay); 4457 hif_sc->runtime_timer_expires = expires; 4458 } 4459 4460 spin_unlock_bh(&hif_sc->runtime_lock); 4461 4462 HIF_ERROR("%s: pm_state: %s delay: %dms ret: %d\n", __func__, 4463 hif_pm_runtime_state_to_string( 4464 qdf_atomic_read(&hif_sc->pm_state)), 4465 delay, ret); 4466 4467 return ret; 4468 } 4469 4470 /** 4471 * hif_runtime_lock_init() - API to initialize Runtime PM context 4472 * @name: Context name 4473 * 4474 * This API initializes the Runtime PM context of the caller and 4475 * return the pointer. 4476 * 4477 * Return: None 4478 */ 4479 int hif_runtime_lock_init(qdf_runtime_lock_t *lock, const char *name) 4480 { 4481 struct hif_pm_runtime_lock *context; 4482 4483 HIF_INFO("Initializing Runtime PM wakelock %s", name); 4484 4485 context = qdf_mem_malloc(sizeof(*context)); 4486 if (!context) 4487 return -ENOMEM; 4488 4489 context->name = name ? name : "Default"; 4490 lock->lock = context; 4491 4492 return 0; 4493 } 4494 4495 /** 4496 * hif_runtime_lock_deinit() - This API frees the runtime pm ctx 4497 * @data: Runtime PM context 4498 * 4499 * Return: void 4500 */ 4501 void hif_runtime_lock_deinit(struct hif_opaque_softc *hif_ctx, 4502 struct hif_pm_runtime_lock *data) 4503 { 4504 struct hif_pm_runtime_lock *context = data; 4505 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx); 4506 4507 if (!context) { 4508 HIF_ERROR("Runtime PM wakelock context is NULL"); 4509 return; 4510 } 4511 4512 HIF_INFO("Deinitializing Runtime PM wakelock %s", context->name); 4513 4514 /* 4515 * Ensure to delete the context list entry and reduce the usage count 4516 * before freeing the context if context is active. 4517 */ 4518 if (sc) { 4519 spin_lock_bh(&sc->runtime_lock); 4520 __hif_pm_runtime_allow_suspend(sc, context); 4521 spin_unlock_bh(&sc->runtime_lock); 4522 } 4523 4524 qdf_mem_free(context); 4525 } 4526 4527 /** 4528 * hif_pm_runtime_is_suspended() - API to check if driver has runtime suspended 4529 * @hif_ctx: HIF context 4530 * 4531 * Return: true for runtime suspended, otherwise false 4532 */ 4533 bool hif_pm_runtime_is_suspended(struct hif_opaque_softc *hif_ctx) 4534 { 4535 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx); 4536 4537 return qdf_atomic_read(&sc->pm_state) == 4538 HIF_PM_RUNTIME_STATE_SUSPENDED; 4539 } 4540 4541 /** 4542 * hif_pm_runtime_get_monitor_wake_intr() - API to get monitor_wake_intr 4543 * @hif_ctx: HIF context 4544 * 4545 * monitor_wake_intr variable can be used to indicate if driver expects wake 4546 * MSI for runtime PM 4547 * 4548 * Return: monitor_wake_intr variable 4549 */ 4550 int hif_pm_runtime_get_monitor_wake_intr(struct hif_opaque_softc *hif_ctx) 4551 { 4552 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx); 4553 4554 return qdf_atomic_read(&sc->monitor_wake_intr); 4555 } 4556 4557 /** 4558 * hif_pm_runtime_set_monitor_wake_intr() - API to set monitor_wake_intr 4559 * @hif_ctx: HIF context 4560 * @val: value to set 4561 * 4562 * monitor_wake_intr variable can be used to indicate if driver expects wake 4563 * MSI for runtime PM 4564 * 4565 * Return: void 4566 */ 4567 void hif_pm_runtime_set_monitor_wake_intr(struct hif_opaque_softc *hif_ctx, 4568 int val) 4569 { 4570 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx); 4571 4572 qdf_atomic_set(&sc->monitor_wake_intr, val); 4573 } 4574 4575 /** 4576 * hif_pm_runtime_mark_dp_rx_busy() - Set last busy mark my data path 4577 * @hif_ctx: HIF context 4578 * 4579 * Return: void 4580 */ 4581 void hif_pm_runtime_mark_dp_rx_busy(struct hif_opaque_softc *hif_ctx) 4582 { 4583 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx); 4584 4585 if (!sc) 4586 return; 4587 4588 qdf_atomic_set(&sc->pm_dp_rx_busy, 1); 4589 sc->dp_last_busy_timestamp = qdf_get_log_timestamp_usecs(); 4590 4591 hif_pm_runtime_mark_last_busy(hif_ctx); 4592 } 4593 4594 /** 4595 * hif_pm_runtime_is_dp_rx_busy() - Check if last mark busy by dp rx 4596 * @hif_ctx: HIF context 4597 * 4598 * Return: dp rx busy set value 4599 */ 4600 int hif_pm_runtime_is_dp_rx_busy(struct hif_opaque_softc *hif_ctx) 4601 { 4602 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx); 4603 4604 if (!sc) 4605 return 0; 4606 4607 return qdf_atomic_read(&sc->pm_dp_rx_busy); 4608 } 4609 4610 /** 4611 * hif_pm_runtime_get_dp_rx_busy_mark() - Get last busy by dp rx timestamp 4612 * @hif_ctx: HIF context 4613 * 4614 * Return: timestamp of last mark busy by dp rx 4615 */ 4616 qdf_time_t hif_pm_runtime_get_dp_rx_busy_mark(struct hif_opaque_softc *hif_ctx) 4617 { 4618 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx); 4619 4620 if (!sc) 4621 return 0; 4622 4623 return sc->dp_last_busy_timestamp; 4624 } 4625 4626 #endif /* FEATURE_RUNTIME_PM */ 4627 4628 int hif_pci_legacy_map_ce_to_irq(struct hif_softc *scn, int ce_id) 4629 { 4630 struct hif_pci_softc *pci_scn = HIF_GET_PCI_SOFTC(scn); 4631 4632 /* legacy case only has one irq */ 4633 return pci_scn->irq; 4634 } 4635 4636 int hif_pci_addr_in_boundary(struct hif_softc *scn, uint32_t offset) 4637 { 4638 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn); 4639 struct hif_target_info *tgt_info; 4640 4641 tgt_info = hif_get_target_info_handle(GET_HIF_OPAQUE_HDL(scn)); 4642 4643 if (tgt_info->target_type == TARGET_TYPE_QCA6290 || 4644 tgt_info->target_type == TARGET_TYPE_QCA6390 || 4645 tgt_info->target_type == TARGET_TYPE_QCA6490 || 4646 tgt_info->target_type == TARGET_TYPE_QCA8074) { 4647 /* 4648 * Need to consider offset's memtype for QCA6290/QCA8074, 4649 * also mem_len and DRAM_BASE_ADDRESS/DRAM_SIZE need to be 4650 * well initialized/defined. 4651 */ 4652 return 0; 4653 } 4654 4655 if ((offset >= DRAM_BASE_ADDRESS && offset <= DRAM_BASE_ADDRESS + DRAM_SIZE) 4656 || (offset + sizeof(unsigned int) <= sc->mem_len)) { 4657 return 0; 4658 } 4659 4660 HIF_TRACE("Refusing to read memory at 0x%x - 0x%x (max 0x%zx)\n", 4661 offset, (uint32_t)(offset + sizeof(unsigned int)), 4662 sc->mem_len); 4663 4664 return -EINVAL; 4665 } 4666 4667 /** 4668 * hif_pci_needs_bmi() - return true if the soc needs bmi through the driver 4669 * @scn: hif context 4670 * 4671 * Return: true if soc needs driver bmi otherwise false 4672 */ 4673 bool hif_pci_needs_bmi(struct hif_softc *scn) 4674 { 4675 return !ce_srng_based(scn); 4676 } 4677 4678 #ifdef FORCE_WAKE 4679 int hif_force_wake_request(struct hif_opaque_softc *hif_handle) 4680 { 4681 uint32_t timeout = 0, value; 4682 struct hif_softc *scn = (struct hif_softc *)hif_handle; 4683 struct hif_pci_softc *pci_scn = HIF_GET_PCI_SOFTC(scn); 4684 4685 if (pld_force_wake_request(scn->qdf_dev->dev)) { 4686 hif_err("force wake request send failed"); 4687 return -EINVAL; 4688 } 4689 4690 HIF_STATS_INC(pci_scn, mhi_force_wake_request_vote, 1); 4691 while (!pld_is_device_awake(scn->qdf_dev->dev) && 4692 timeout <= FORCE_WAKE_DELAY_TIMEOUT_MS) { 4693 qdf_mdelay(FORCE_WAKE_DELAY_MS); 4694 timeout += FORCE_WAKE_DELAY_MS; 4695 } 4696 4697 if (pld_is_device_awake(scn->qdf_dev->dev) <= 0) { 4698 hif_err("Unable to wake up mhi"); 4699 HIF_STATS_INC(pci_scn, mhi_force_wake_failure, 1); 4700 return -EINVAL; 4701 } 4702 HIF_STATS_INC(pci_scn, mhi_force_wake_success, 1); 4703 hif_write32_mb(scn, 4704 scn->mem + 4705 PCIE_SOC_PCIE_REG_PCIE_SCRATCH_0_SOC_PCIE_REG, 4706 0); 4707 hif_write32_mb(scn, 4708 scn->mem + 4709 PCIE_PCIE_LOCAL_REG_PCIE_SOC_WAKE_PCIE_LOCAL_REG, 4710 1); 4711 4712 HIF_STATS_INC(pci_scn, soc_force_wake_register_write_success, 1); 4713 /* 4714 * do not reset the timeout 4715 * total_wake_time = MHI_WAKE_TIME + PCI_WAKE_TIME < 50 ms 4716 */ 4717 do { 4718 value = 4719 hif_read32_mb(scn, 4720 scn->mem + 4721 PCIE_SOC_PCIE_REG_PCIE_SCRATCH_0_SOC_PCIE_REG); 4722 if (value) 4723 break; 4724 qdf_mdelay(FORCE_WAKE_DELAY_MS); 4725 timeout += FORCE_WAKE_DELAY_MS; 4726 } while (timeout <= FORCE_WAKE_DELAY_TIMEOUT_MS); 4727 4728 if (!value) { 4729 hif_err("failed handshake mechanism"); 4730 HIF_STATS_INC(pci_scn, soc_force_wake_failure, 1); 4731 return -ETIMEDOUT; 4732 } 4733 4734 HIF_STATS_INC(pci_scn, soc_force_wake_success, 1); 4735 4736 return 0; 4737 } 4738 4739 int hif_force_wake_release(struct hif_opaque_softc *hif_handle) 4740 { 4741 int ret; 4742 struct hif_softc *scn = (struct hif_softc *)hif_handle; 4743 struct hif_pci_softc *pci_scn = HIF_GET_PCI_SOFTC(scn); 4744 4745 ret = pld_force_wake_release(scn->qdf_dev->dev); 4746 if (ret) { 4747 hif_err("force wake release failure"); 4748 HIF_STATS_INC(pci_scn, mhi_force_wake_release_failure, 1); 4749 return ret; 4750 } 4751 4752 HIF_STATS_INC(pci_scn, mhi_force_wake_release_success, 1); 4753 hif_write32_mb(scn, 4754 scn->mem + 4755 PCIE_PCIE_LOCAL_REG_PCIE_SOC_WAKE_PCIE_LOCAL_REG, 4756 0); 4757 HIF_STATS_INC(pci_scn, soc_force_wake_release_success, 1); 4758 return 0; 4759 } 4760 4761 void hif_print_pci_stats(struct hif_pci_softc *pci_handle) 4762 { 4763 hif_debug("mhi_force_wake_request_vote: %d", 4764 pci_handle->stats.mhi_force_wake_request_vote); 4765 hif_debug("mhi_force_wake_failure: %d", 4766 pci_handle->stats.mhi_force_wake_failure); 4767 hif_debug("mhi_force_wake_success: %d", 4768 pci_handle->stats.mhi_force_wake_success); 4769 hif_debug("soc_force_wake_register_write_success: %d", 4770 pci_handle->stats.soc_force_wake_register_write_success); 4771 hif_debug("soc_force_wake_failure: %d", 4772 pci_handle->stats.soc_force_wake_failure); 4773 hif_debug("soc_force_wake_success: %d", 4774 pci_handle->stats.soc_force_wake_success); 4775 hif_debug("mhi_force_wake_release_failure: %d", 4776 pci_handle->stats.mhi_force_wake_release_failure); 4777 hif_debug("mhi_force_wake_release_success: %d", 4778 pci_handle->stats.mhi_force_wake_release_success); 4779 hif_debug("oc_force_wake_release_success: %d", 4780 pci_handle->stats.soc_force_wake_release_success); 4781 } 4782 #endif /* FORCE_WAKE */ 4783 4784