1 /* 2 * Copyright (c) 2013-2019 The Linux Foundation. All rights reserved. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for 5 * any purpose with or without fee is hereby granted, provided that the 6 * above copyright notice and this permission notice appear in all 7 * copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 16 * PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 #include <linux/pci.h> 20 #include <linux/slab.h> 21 #include <linux/interrupt.h> 22 #include <linux/if_arp.h> 23 #ifdef CONFIG_PCI_MSM 24 #include <linux/msm_pcie.h> 25 #endif 26 #include "hif_io32.h" 27 #include "if_pci.h" 28 #include "hif.h" 29 #include "target_type.h" 30 #include "hif_main.h" 31 #include "ce_main.h" 32 #include "ce_api.h" 33 #include "ce_internal.h" 34 #include "ce_reg.h" 35 #include "ce_bmi.h" 36 #include "regtable.h" 37 #include "hif_hw_version.h" 38 #include <linux/debugfs.h> 39 #include <linux/seq_file.h> 40 #include "qdf_status.h" 41 #include "qdf_atomic.h" 42 #include "pld_common.h" 43 #include "mp_dev.h" 44 #include "hif_debug.h" 45 46 #include "if_pci_internal.h" 47 #include "ce_tasklet.h" 48 #include "targaddrs.h" 49 #include "hif_exec.h" 50 51 #include "pci_api.h" 52 #include "ahb_api.h" 53 54 /* Maximum ms timeout for host to wake up target */ 55 #define PCIE_WAKE_TIMEOUT 1000 56 #define RAMDUMP_EVENT_TIMEOUT 2500 57 58 /* Setting SOC_GLOBAL_RESET during driver unload causes intermittent 59 * PCIe data bus error 60 * As workaround for this issue - changing the reset sequence to 61 * use TargetCPU warm reset * instead of SOC_GLOBAL_RESET 62 */ 63 #define CPU_WARM_RESET_WAR 64 65 /* 66 * Top-level interrupt handler for all PCI interrupts from a Target. 67 * When a block of MSI interrupts is allocated, this top-level handler 68 * is not used; instead, we directly call the correct sub-handler. 69 */ 70 struct ce_irq_reg_table { 71 uint32_t irq_enable; 72 uint32_t irq_status; 73 }; 74 75 #ifndef QCA_WIFI_3_0_ADRASTEA 76 static inline void hif_pci_route_adrastea_interrupt(struct hif_pci_softc *sc) 77 { 78 } 79 #else 80 static void hif_pci_route_adrastea_interrupt(struct hif_pci_softc *sc) 81 { 82 struct hif_softc *scn = HIF_GET_SOFTC(sc); 83 unsigned int target_enable0, target_enable1; 84 unsigned int target_cause0, target_cause1; 85 86 target_enable0 = hif_read32_mb(sc, sc->mem + Q6_ENABLE_REGISTER_0); 87 target_enable1 = hif_read32_mb(sc, sc->mem + Q6_ENABLE_REGISTER_1); 88 target_cause0 = hif_read32_mb(sc, sc->mem + Q6_CAUSE_REGISTER_0); 89 target_cause1 = hif_read32_mb(sc, sc->mem + Q6_CAUSE_REGISTER_1); 90 91 if ((target_enable0 & target_cause0) || 92 (target_enable1 & target_cause1)) { 93 hif_write32_mb(sc, sc->mem + Q6_ENABLE_REGISTER_0, 0); 94 hif_write32_mb(sc, sc->mem + Q6_ENABLE_REGISTER_1, 0); 95 96 if (scn->notice_send) 97 pld_intr_notify_q6(sc->dev); 98 } 99 } 100 #endif 101 102 103 /** 104 * pci_dispatch_ce_irq() - pci_dispatch_ce_irq 105 * @scn: scn 106 * 107 * Return: N/A 108 */ 109 static void pci_dispatch_interrupt(struct hif_softc *scn) 110 { 111 uint32_t intr_summary; 112 int id; 113 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 114 115 if (scn->hif_init_done != true) 116 return; 117 118 if (Q_TARGET_ACCESS_BEGIN(scn) < 0) 119 return; 120 121 intr_summary = CE_INTERRUPT_SUMMARY(scn); 122 123 if (intr_summary == 0) { 124 if ((scn->target_status != TARGET_STATUS_RESET) && 125 (!qdf_atomic_read(&scn->link_suspended))) { 126 127 hif_write32_mb(scn, scn->mem + 128 (SOC_CORE_BASE_ADDRESS | 129 PCIE_INTR_ENABLE_ADDRESS), 130 HOST_GROUP0_MASK); 131 132 hif_read32_mb(scn, scn->mem + 133 (SOC_CORE_BASE_ADDRESS | 134 PCIE_INTR_ENABLE_ADDRESS)); 135 } 136 Q_TARGET_ACCESS_END(scn); 137 return; 138 } 139 Q_TARGET_ACCESS_END(scn); 140 141 scn->ce_irq_summary = intr_summary; 142 for (id = 0; intr_summary && (id < scn->ce_count); id++) { 143 if (intr_summary & (1 << id)) { 144 intr_summary &= ~(1 << id); 145 ce_dispatch_interrupt(id, &hif_state->tasklets[id]); 146 } 147 } 148 } 149 150 irqreturn_t hif_pci_legacy_ce_interrupt_handler(int irq, void *arg) 151 { 152 struct hif_pci_softc *sc = (struct hif_pci_softc *)arg; 153 struct hif_softc *scn = HIF_GET_SOFTC(sc); 154 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(arg); 155 156 volatile int tmp; 157 uint16_t val = 0; 158 uint32_t bar0 = 0; 159 uint32_t fw_indicator_address, fw_indicator; 160 bool ssr_irq = false; 161 unsigned int host_cause, host_enable; 162 163 if (LEGACY_INTERRUPTS(sc)) { 164 if (Q_TARGET_ACCESS_BEGIN(scn) < 0) 165 return IRQ_HANDLED; 166 167 if (ADRASTEA_BU) { 168 host_enable = hif_read32_mb(sc, sc->mem + 169 PCIE_INTR_ENABLE_ADDRESS); 170 host_cause = hif_read32_mb(sc, sc->mem + 171 PCIE_INTR_CAUSE_ADDRESS); 172 if (!(host_enable & host_cause)) { 173 hif_pci_route_adrastea_interrupt(sc); 174 return IRQ_HANDLED; 175 } 176 } 177 178 /* Clear Legacy PCI line interrupts 179 * IMPORTANT: INTR_CLR regiser has to be set 180 * after INTR_ENABLE is set to 0, 181 * otherwise interrupt can not be really cleared 182 */ 183 hif_write32_mb(sc, sc->mem + 184 (SOC_CORE_BASE_ADDRESS | 185 PCIE_INTR_ENABLE_ADDRESS), 0); 186 187 hif_write32_mb(sc, sc->mem + 188 (SOC_CORE_BASE_ADDRESS | PCIE_INTR_CLR_ADDRESS), 189 ADRASTEA_BU ? 190 (host_enable & host_cause) : 191 HOST_GROUP0_MASK); 192 193 if (ADRASTEA_BU) 194 hif_write32_mb(sc, sc->mem + 0x2f100c, 195 (host_cause >> 1)); 196 197 /* IMPORTANT: this extra read transaction is required to 198 * flush the posted write buffer 199 */ 200 if (!ADRASTEA_BU) { 201 tmp = 202 hif_read32_mb(sc, sc->mem + 203 (SOC_CORE_BASE_ADDRESS | 204 PCIE_INTR_ENABLE_ADDRESS)); 205 206 if (tmp == 0xdeadbeef) { 207 HIF_ERROR("BUG(%s): SoC returns 0xdeadbeef!!", 208 __func__); 209 210 pci_read_config_word(sc->pdev, PCI_VENDOR_ID, &val); 211 HIF_ERROR("%s: PCI Vendor ID = 0x%04x", 212 __func__, val); 213 214 pci_read_config_word(sc->pdev, PCI_DEVICE_ID, &val); 215 HIF_ERROR("%s: PCI Device ID = 0x%04x", 216 __func__, val); 217 218 pci_read_config_word(sc->pdev, PCI_COMMAND, &val); 219 HIF_ERROR("%s: PCI Command = 0x%04x", __func__, 220 val); 221 222 pci_read_config_word(sc->pdev, PCI_STATUS, &val); 223 HIF_ERROR("%s: PCI Status = 0x%04x", __func__, 224 val); 225 226 pci_read_config_dword(sc->pdev, PCI_BASE_ADDRESS_0, 227 &bar0); 228 HIF_ERROR("%s: PCI BAR0 = 0x%08x", __func__, 229 bar0); 230 231 HIF_ERROR("%s: RTC_STATE_ADDRESS = 0x%08x", 232 __func__, 233 hif_read32_mb(sc, sc->mem + 234 PCIE_LOCAL_BASE_ADDRESS 235 + RTC_STATE_ADDRESS)); 236 HIF_ERROR("%s: PCIE_SOC_WAKE_ADDRESS = 0x%08x", 237 __func__, 238 hif_read32_mb(sc, sc->mem + 239 PCIE_LOCAL_BASE_ADDRESS 240 + PCIE_SOC_WAKE_ADDRESS)); 241 HIF_ERROR("%s: 0x80008 = 0x%08x, 0x8000c = 0x%08x", 242 __func__, 243 hif_read32_mb(sc, sc->mem + 0x80008), 244 hif_read32_mb(sc, sc->mem + 0x8000c)); 245 HIF_ERROR("%s: 0x80010 = 0x%08x, 0x80014 = 0x%08x", 246 __func__, 247 hif_read32_mb(sc, sc->mem + 0x80010), 248 hif_read32_mb(sc, sc->mem + 0x80014)); 249 HIF_ERROR("%s: 0x80018 = 0x%08x, 0x8001c = 0x%08x", 250 __func__, 251 hif_read32_mb(sc, sc->mem + 0x80018), 252 hif_read32_mb(sc, sc->mem + 0x8001c)); 253 QDF_BUG(0); 254 } 255 256 PCI_CLR_CAUSE0_REGISTER(sc); 257 } 258 259 if (HAS_FW_INDICATOR) { 260 fw_indicator_address = hif_state->fw_indicator_address; 261 fw_indicator = A_TARGET_READ(scn, fw_indicator_address); 262 if ((fw_indicator != ~0) && 263 (fw_indicator & FW_IND_EVENT_PENDING)) 264 ssr_irq = true; 265 } 266 267 if (Q_TARGET_ACCESS_END(scn) < 0) 268 return IRQ_HANDLED; 269 } 270 /* TBDXXX: Add support for WMAC */ 271 272 if (ssr_irq) { 273 sc->irq_event = irq; 274 qdf_atomic_set(&scn->tasklet_from_intr, 1); 275 276 qdf_atomic_inc(&scn->active_tasklet_cnt); 277 tasklet_schedule(&sc->intr_tq); 278 } else { 279 pci_dispatch_interrupt(scn); 280 } 281 282 return IRQ_HANDLED; 283 } 284 285 bool hif_pci_targ_is_present(struct hif_softc *scn, void *__iomem *mem) 286 { 287 return 1; /* FIX THIS */ 288 } 289 290 int hif_get_irq_num(struct hif_opaque_softc *scn, int *irq, uint32_t size) 291 { 292 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn); 293 int i = 0; 294 295 if (!irq || !size) { 296 return -EINVAL; 297 } 298 299 if (!sc->num_msi_intrs || sc->num_msi_intrs == 1) { 300 irq[0] = sc->irq; 301 return 1; 302 } 303 304 if (sc->num_msi_intrs > size) { 305 qdf_print("Not enough space in irq buffer to return irqs"); 306 return -EINVAL; 307 } 308 309 for (i = 0; i < sc->num_msi_intrs; i++) { 310 irq[i] = sc->irq + i + MSI_ASSIGN_CE_INITIAL; 311 } 312 313 return sc->num_msi_intrs; 314 } 315 316 317 /** 318 * hif_pci_cancel_deferred_target_sleep() - cancels the defered target sleep 319 * @scn: hif_softc 320 * 321 * Return: void 322 */ 323 #if CONFIG_ATH_PCIE_MAX_PERF == 0 324 void hif_pci_cancel_deferred_target_sleep(struct hif_softc *scn) 325 { 326 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 327 A_target_id_t pci_addr = scn->mem; 328 329 qdf_spin_lock_irqsave(&hif_state->keep_awake_lock); 330 /* 331 * If the deferred sleep timer is running cancel it 332 * and put the soc into sleep. 333 */ 334 if (hif_state->fake_sleep == true) { 335 qdf_timer_stop(&hif_state->sleep_timer); 336 if (hif_state->verified_awake == false) { 337 hif_write32_mb(scn, pci_addr + PCIE_LOCAL_BASE_ADDRESS + 338 PCIE_SOC_WAKE_ADDRESS, 339 PCIE_SOC_WAKE_RESET); 340 } 341 hif_state->fake_sleep = false; 342 } 343 qdf_spin_unlock_irqrestore(&hif_state->keep_awake_lock); 344 } 345 #else 346 inline void hif_pci_cancel_deferred_target_sleep(struct hif_softc *scn) 347 { 348 } 349 #endif 350 351 #define A_PCIE_LOCAL_REG_READ(sc, mem, addr) \ 352 hif_read32_mb(sc, (char *)(mem) + \ 353 PCIE_LOCAL_BASE_ADDRESS + (uint32_t)(addr)) 354 355 #define A_PCIE_LOCAL_REG_WRITE(sc, mem, addr, val) \ 356 hif_write32_mb(sc, ((char *)(mem) + \ 357 PCIE_LOCAL_BASE_ADDRESS + (uint32_t)(addr)), (val)) 358 359 #ifdef QCA_WIFI_3_0 360 /** 361 * hif_targ_is_awake() - check to see if the target is awake 362 * @hif_ctx: hif context 363 * 364 * emulation never goes to sleep 365 * 366 * Return: true if target is awake 367 */ 368 static bool hif_targ_is_awake(struct hif_softc *hif_ctx, void *__iomem *mem) 369 { 370 return true; 371 } 372 #else 373 /** 374 * hif_targ_is_awake() - check to see if the target is awake 375 * @hif_ctx: hif context 376 * 377 * Return: true if the targets clocks are on 378 */ 379 static bool hif_targ_is_awake(struct hif_softc *scn, void *__iomem *mem) 380 { 381 uint32_t val; 382 383 if (scn->recovery) 384 return false; 385 val = hif_read32_mb(scn, mem + PCIE_LOCAL_BASE_ADDRESS 386 + RTC_STATE_ADDRESS); 387 return (RTC_STATE_V_GET(val) & RTC_STATE_V_ON) == RTC_STATE_V_ON; 388 } 389 #endif 390 391 #define ATH_PCI_RESET_WAIT_MAX 10 /* Ms */ 392 static void hif_pci_device_reset(struct hif_pci_softc *sc) 393 { 394 void __iomem *mem = sc->mem; 395 int i; 396 uint32_t val; 397 struct hif_softc *scn = HIF_GET_SOFTC(sc); 398 399 if (!scn->hostdef) 400 return; 401 402 /* NB: Don't check resetok here. This form of reset 403 * is integral to correct operation. 404 */ 405 406 if (!SOC_GLOBAL_RESET_ADDRESS) 407 return; 408 409 if (!mem) 410 return; 411 412 HIF_ERROR("%s: Reset Device", __func__); 413 414 /* 415 * NB: If we try to write SOC_GLOBAL_RESET_ADDRESS without first 416 * writing WAKE_V, the Target may scribble over Host memory! 417 */ 418 A_PCIE_LOCAL_REG_WRITE(sc, mem, PCIE_SOC_WAKE_ADDRESS, 419 PCIE_SOC_WAKE_V_MASK); 420 for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) { 421 if (hif_targ_is_awake(scn, mem)) 422 break; 423 424 qdf_mdelay(1); 425 } 426 427 /* Put Target, including PCIe, into RESET. */ 428 val = A_PCIE_LOCAL_REG_READ(sc, mem, SOC_GLOBAL_RESET_ADDRESS); 429 val |= 1; 430 A_PCIE_LOCAL_REG_WRITE(sc, mem, SOC_GLOBAL_RESET_ADDRESS, val); 431 for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) { 432 if (A_PCIE_LOCAL_REG_READ(sc, mem, RTC_STATE_ADDRESS) & 433 RTC_STATE_COLD_RESET_MASK) 434 break; 435 436 qdf_mdelay(1); 437 } 438 439 /* Pull Target, including PCIe, out of RESET. */ 440 val &= ~1; 441 A_PCIE_LOCAL_REG_WRITE(sc, mem, SOC_GLOBAL_RESET_ADDRESS, val); 442 for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) { 443 if (! 444 (A_PCIE_LOCAL_REG_READ(sc, mem, RTC_STATE_ADDRESS) & 445 RTC_STATE_COLD_RESET_MASK)) 446 break; 447 448 qdf_mdelay(1); 449 } 450 451 A_PCIE_LOCAL_REG_WRITE(sc, mem, PCIE_SOC_WAKE_ADDRESS, 452 PCIE_SOC_WAKE_RESET); 453 } 454 455 /* CPU warm reset function 456 * Steps: 457 * 1. Disable all pending interrupts - so no pending interrupts on WARM reset 458 * 2. Clear the FW_INDICATOR_ADDRESS -so Traget CPU initializes FW 459 * correctly on WARM reset 460 * 3. Clear TARGET CPU LF timer interrupt 461 * 4. Reset all CEs to clear any pending CE tarnsactions 462 * 5. Warm reset CPU 463 */ 464 static void hif_pci_device_warm_reset(struct hif_pci_softc *sc) 465 { 466 void __iomem *mem = sc->mem; 467 int i; 468 uint32_t val; 469 uint32_t fw_indicator; 470 struct hif_softc *scn = HIF_GET_SOFTC(sc); 471 472 /* NB: Don't check resetok here. This form of reset is 473 * integral to correct operation. 474 */ 475 476 if (!mem) 477 return; 478 479 HIF_INFO_MED("%s: Target Warm Reset", __func__); 480 481 /* 482 * NB: If we try to write SOC_GLOBAL_RESET_ADDRESS without first 483 * writing WAKE_V, the Target may scribble over Host memory! 484 */ 485 A_PCIE_LOCAL_REG_WRITE(sc, mem, PCIE_SOC_WAKE_ADDRESS, 486 PCIE_SOC_WAKE_V_MASK); 487 for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) { 488 if (hif_targ_is_awake(scn, mem)) 489 break; 490 qdf_mdelay(1); 491 } 492 493 /* 494 * Disable Pending interrupts 495 */ 496 val = 497 hif_read32_mb(sc, mem + 498 (SOC_CORE_BASE_ADDRESS | 499 PCIE_INTR_CAUSE_ADDRESS)); 500 HIF_INFO_MED("%s: Host Intr Cause reg 0x%x : value : 0x%x", __func__, 501 (SOC_CORE_BASE_ADDRESS | PCIE_INTR_CAUSE_ADDRESS), val); 502 /* Target CPU Intr Cause */ 503 val = hif_read32_mb(sc, mem + 504 (SOC_CORE_BASE_ADDRESS | CPU_INTR_ADDRESS)); 505 HIF_INFO_MED("%s: Target CPU Intr Cause 0x%x", __func__, val); 506 507 val = 508 hif_read32_mb(sc, mem + 509 (SOC_CORE_BASE_ADDRESS | 510 PCIE_INTR_ENABLE_ADDRESS)); 511 hif_write32_mb(sc, (mem + 512 (SOC_CORE_BASE_ADDRESS | PCIE_INTR_ENABLE_ADDRESS)), 0); 513 hif_write32_mb(sc, (mem + 514 (SOC_CORE_BASE_ADDRESS + PCIE_INTR_CLR_ADDRESS)), 515 HOST_GROUP0_MASK); 516 517 qdf_mdelay(100); 518 519 /* Clear FW_INDICATOR_ADDRESS */ 520 if (HAS_FW_INDICATOR) { 521 fw_indicator = hif_read32_mb(sc, mem + FW_INDICATOR_ADDRESS); 522 hif_write32_mb(sc, mem + FW_INDICATOR_ADDRESS, 0); 523 } 524 525 /* Clear Target LF Timer interrupts */ 526 val = 527 hif_read32_mb(sc, mem + 528 (RTC_SOC_BASE_ADDRESS + 529 SOC_LF_TIMER_CONTROL0_ADDRESS)); 530 HIF_INFO_MED("%s: addr 0x%x : 0x%x", __func__, 531 (RTC_SOC_BASE_ADDRESS + SOC_LF_TIMER_CONTROL0_ADDRESS), val); 532 val &= ~SOC_LF_TIMER_CONTROL0_ENABLE_MASK; 533 hif_write32_mb(sc, mem + 534 (RTC_SOC_BASE_ADDRESS + SOC_LF_TIMER_CONTROL0_ADDRESS), 535 val); 536 537 /* Reset CE */ 538 val = 539 hif_read32_mb(sc, mem + 540 (RTC_SOC_BASE_ADDRESS | 541 SOC_RESET_CONTROL_ADDRESS)); 542 val |= SOC_RESET_CONTROL_CE_RST_MASK; 543 hif_write32_mb(sc, (mem + 544 (RTC_SOC_BASE_ADDRESS | SOC_RESET_CONTROL_ADDRESS)), 545 val); 546 val = 547 hif_read32_mb(sc, mem + 548 (RTC_SOC_BASE_ADDRESS | 549 SOC_RESET_CONTROL_ADDRESS)); 550 qdf_mdelay(10); 551 552 /* CE unreset */ 553 val &= ~SOC_RESET_CONTROL_CE_RST_MASK; 554 hif_write32_mb(sc, mem + (RTC_SOC_BASE_ADDRESS | 555 SOC_RESET_CONTROL_ADDRESS), val); 556 val = 557 hif_read32_mb(sc, mem + 558 (RTC_SOC_BASE_ADDRESS | 559 SOC_RESET_CONTROL_ADDRESS)); 560 qdf_mdelay(10); 561 562 /* Read Target CPU Intr Cause */ 563 val = hif_read32_mb(sc, mem + 564 (SOC_CORE_BASE_ADDRESS | CPU_INTR_ADDRESS)); 565 HIF_INFO_MED("%s: Target CPU Intr Cause after CE reset 0x%x", 566 __func__, val); 567 568 /* CPU warm RESET */ 569 val = 570 hif_read32_mb(sc, mem + 571 (RTC_SOC_BASE_ADDRESS | 572 SOC_RESET_CONTROL_ADDRESS)); 573 val |= SOC_RESET_CONTROL_CPU_WARM_RST_MASK; 574 hif_write32_mb(sc, mem + (RTC_SOC_BASE_ADDRESS | 575 SOC_RESET_CONTROL_ADDRESS), val); 576 val = 577 hif_read32_mb(sc, mem + 578 (RTC_SOC_BASE_ADDRESS | 579 SOC_RESET_CONTROL_ADDRESS)); 580 HIF_INFO_MED("%s: RESET_CONTROL after cpu warm reset 0x%x", 581 __func__, val); 582 583 qdf_mdelay(100); 584 HIF_INFO_MED("%s: Target Warm reset complete", __func__); 585 586 } 587 588 #ifndef QCA_WIFI_3_0 589 /* only applicable to legacy ce */ 590 int hif_check_fw_reg(struct hif_opaque_softc *hif_ctx) 591 { 592 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 593 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn); 594 void __iomem *mem = sc->mem; 595 uint32_t val; 596 597 if (Q_TARGET_ACCESS_BEGIN(scn) < 0) 598 return ATH_ISR_NOSCHED; 599 val = hif_read32_mb(sc, mem + FW_INDICATOR_ADDRESS); 600 if (Q_TARGET_ACCESS_END(scn) < 0) 601 return ATH_ISR_SCHED; 602 603 HIF_INFO_MED("%s: FW_INDICATOR register is 0x%x", __func__, val); 604 605 if (val & FW_IND_HELPER) 606 return 0; 607 608 return 1; 609 } 610 #endif 611 612 int hif_check_soc_status(struct hif_opaque_softc *hif_ctx) 613 { 614 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 615 uint16_t device_id = 0; 616 uint32_t val; 617 uint16_t timeout_count = 0; 618 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn); 619 620 /* Check device ID from PCIe configuration space for link status */ 621 pci_read_config_word(sc->pdev, PCI_DEVICE_ID, &device_id); 622 if (device_id != sc->devid) { 623 HIF_ERROR("%s: device ID does match (read 0x%x, expect 0x%x)", 624 __func__, device_id, sc->devid); 625 return -EACCES; 626 } 627 628 /* Check PCIe local register for bar/memory access */ 629 val = hif_read32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS + 630 RTC_STATE_ADDRESS); 631 HIF_INFO_MED("%s: RTC_STATE_ADDRESS is %08x", __func__, val); 632 633 /* Try to wake up taget if it sleeps */ 634 hif_write32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS + 635 PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_V_MASK); 636 HIF_INFO_MED("%s: PCIE_SOC_WAKE_ADDRESS is %08x", __func__, 637 hif_read32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS + 638 PCIE_SOC_WAKE_ADDRESS)); 639 640 /* Check if taget can be woken up */ 641 while (!hif_targ_is_awake(scn, sc->mem)) { 642 if (timeout_count >= PCIE_WAKE_TIMEOUT) { 643 HIF_ERROR("%s: wake up timeout, %08x, %08x", 644 __func__, 645 hif_read32_mb(sc, sc->mem + 646 PCIE_LOCAL_BASE_ADDRESS + 647 RTC_STATE_ADDRESS), 648 hif_read32_mb(sc, sc->mem + 649 PCIE_LOCAL_BASE_ADDRESS + 650 PCIE_SOC_WAKE_ADDRESS)); 651 return -EACCES; 652 } 653 654 hif_write32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS + 655 PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_V_MASK); 656 657 qdf_mdelay(100); 658 timeout_count += 100; 659 } 660 661 /* Check Power register for SoC internal bus issues */ 662 val = 663 hif_read32_mb(sc, sc->mem + RTC_SOC_BASE_ADDRESS + 664 SOC_POWER_REG_OFFSET); 665 HIF_INFO_MED("%s: Power register is %08x", __func__, val); 666 667 return 0; 668 } 669 670 /** 671 * __hif_pci_dump_registers(): dump other PCI debug registers 672 * @scn: struct hif_softc 673 * 674 * This function dumps pci debug registers. The parrent function 675 * dumps the copy engine registers before calling this function. 676 * 677 * Return: void 678 */ 679 static void __hif_pci_dump_registers(struct hif_softc *scn) 680 { 681 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn); 682 void __iomem *mem = sc->mem; 683 uint32_t val, i, j; 684 uint32_t wrapper_idx[] = { 1, 2, 3, 4, 5, 6, 7, 8, 9 }; 685 uint32_t ce_base; 686 687 if (Q_TARGET_ACCESS_BEGIN(scn) < 0) 688 return; 689 690 /* DEBUG_INPUT_SEL_SRC = 0x6 */ 691 val = 692 hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS + 693 WLAN_DEBUG_INPUT_SEL_OFFSET); 694 val &= ~WLAN_DEBUG_INPUT_SEL_SRC_MASK; 695 val |= WLAN_DEBUG_INPUT_SEL_SRC_SET(0x6); 696 hif_write32_mb(sc, mem + GPIO_BASE_ADDRESS + 697 WLAN_DEBUG_INPUT_SEL_OFFSET, val); 698 699 /* DEBUG_CONTROL_ENABLE = 0x1 */ 700 val = hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS + 701 WLAN_DEBUG_CONTROL_OFFSET); 702 val &= ~WLAN_DEBUG_CONTROL_ENABLE_MASK; 703 val |= WLAN_DEBUG_CONTROL_ENABLE_SET(0x1); 704 hif_write32_mb(sc, mem + GPIO_BASE_ADDRESS + 705 WLAN_DEBUG_CONTROL_OFFSET, val); 706 707 HIF_INFO_MED("%s: Debug: inputsel: %x dbgctrl: %x", __func__, 708 hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS + 709 WLAN_DEBUG_INPUT_SEL_OFFSET), 710 hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS + 711 WLAN_DEBUG_CONTROL_OFFSET)); 712 713 HIF_INFO_MED("%s: Debug CE", __func__); 714 /* Loop CE debug output */ 715 /* AMBA_DEBUG_BUS_SEL = 0xc */ 716 val = hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS + 717 AMBA_DEBUG_BUS_OFFSET); 718 val &= ~AMBA_DEBUG_BUS_SEL_MASK; 719 val |= AMBA_DEBUG_BUS_SEL_SET(0xc); 720 hif_write32_mb(sc, mem + GPIO_BASE_ADDRESS + AMBA_DEBUG_BUS_OFFSET, 721 val); 722 723 for (i = 0; i < sizeof(wrapper_idx) / sizeof(uint32_t); i++) { 724 /* For (i=1,2,3,4,8,9) write CE_WRAPPER_DEBUG_SEL = i */ 725 val = hif_read32_mb(sc, mem + CE_WRAPPER_BASE_ADDRESS + 726 CE_WRAPPER_DEBUG_OFFSET); 727 val &= ~CE_WRAPPER_DEBUG_SEL_MASK; 728 val |= CE_WRAPPER_DEBUG_SEL_SET(wrapper_idx[i]); 729 hif_write32_mb(sc, mem + CE_WRAPPER_BASE_ADDRESS + 730 CE_WRAPPER_DEBUG_OFFSET, val); 731 732 HIF_INFO_MED("%s: ce wrapper: %d amdbg: %x cewdbg: %x", 733 __func__, wrapper_idx[i], 734 hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS + 735 AMBA_DEBUG_BUS_OFFSET), 736 hif_read32_mb(sc, mem + CE_WRAPPER_BASE_ADDRESS + 737 CE_WRAPPER_DEBUG_OFFSET)); 738 739 if (wrapper_idx[i] <= 7) { 740 for (j = 0; j <= 5; j++) { 741 ce_base = CE_BASE_ADDRESS(wrapper_idx[i]); 742 /* For (j=0~5) write CE_DEBUG_SEL = j */ 743 val = 744 hif_read32_mb(sc, mem + ce_base + 745 CE_DEBUG_OFFSET); 746 val &= ~CE_DEBUG_SEL_MASK; 747 val |= CE_DEBUG_SEL_SET(j); 748 hif_write32_mb(sc, mem + ce_base + 749 CE_DEBUG_OFFSET, val); 750 751 /* read (@gpio_athr_wlan_reg) 752 * WLAN_DEBUG_OUT_DATA 753 */ 754 val = hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS 755 + WLAN_DEBUG_OUT_OFFSET); 756 val = WLAN_DEBUG_OUT_DATA_GET(val); 757 758 HIF_INFO_MED("%s: module%d: cedbg: %x out: %x", 759 __func__, j, 760 hif_read32_mb(sc, mem + ce_base + 761 CE_DEBUG_OFFSET), val); 762 } 763 } else { 764 /* read (@gpio_athr_wlan_reg) WLAN_DEBUG_OUT_DATA */ 765 val = 766 hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS + 767 WLAN_DEBUG_OUT_OFFSET); 768 val = WLAN_DEBUG_OUT_DATA_GET(val); 769 770 HIF_INFO_MED("%s: out: %x", __func__, val); 771 } 772 } 773 774 HIF_INFO_MED("%s: Debug PCIe:", __func__); 775 /* Loop PCIe debug output */ 776 /* Write AMBA_DEBUG_BUS_SEL = 0x1c */ 777 val = hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS + 778 AMBA_DEBUG_BUS_OFFSET); 779 val &= ~AMBA_DEBUG_BUS_SEL_MASK; 780 val |= AMBA_DEBUG_BUS_SEL_SET(0x1c); 781 hif_write32_mb(sc, mem + GPIO_BASE_ADDRESS + 782 AMBA_DEBUG_BUS_OFFSET, val); 783 784 for (i = 0; i <= 8; i++) { 785 /* For (i=1~8) write AMBA_DEBUG_BUS_PCIE_DEBUG_SEL = i */ 786 val = 787 hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS + 788 AMBA_DEBUG_BUS_OFFSET); 789 val &= ~AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_MASK; 790 val |= AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_SET(i); 791 hif_write32_mb(sc, mem + GPIO_BASE_ADDRESS + 792 AMBA_DEBUG_BUS_OFFSET, val); 793 794 /* read (@gpio_athr_wlan_reg) WLAN_DEBUG_OUT_DATA */ 795 val = 796 hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS + 797 WLAN_DEBUG_OUT_OFFSET); 798 val = WLAN_DEBUG_OUT_DATA_GET(val); 799 800 HIF_INFO_MED("%s: amdbg: %x out: %x %x", __func__, 801 hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS + 802 WLAN_DEBUG_OUT_OFFSET), val, 803 hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS + 804 WLAN_DEBUG_OUT_OFFSET)); 805 } 806 807 Q_TARGET_ACCESS_END(scn); 808 } 809 810 /** 811 * hif_dump_registers(): dump bus debug registers 812 * @scn: struct hif_opaque_softc 813 * 814 * This function dumps hif bus debug registers 815 * 816 * Return: 0 for success or error code 817 */ 818 int hif_pci_dump_registers(struct hif_softc *hif_ctx) 819 { 820 int status; 821 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 822 823 status = hif_dump_ce_registers(scn); 824 825 if (status) 826 HIF_ERROR("%s: Dump CE Registers Failed", __func__); 827 828 /* dump non copy engine pci registers */ 829 __hif_pci_dump_registers(scn); 830 831 return 0; 832 } 833 834 #ifdef HIF_CONFIG_SLUB_DEBUG_ON 835 836 /* worker thread to schedule wlan_tasklet in SLUB debug build */ 837 static void reschedule_tasklet_work_handler(void *arg) 838 { 839 struct hif_pci_softc *sc = arg; 840 struct hif_softc *scn = HIF_GET_SOFTC(sc); 841 842 if (!scn) { 843 HIF_ERROR("%s: hif_softc is NULL\n", __func__); 844 return; 845 } 846 847 if (scn->hif_init_done == false) { 848 HIF_ERROR("%s: wlan driver is unloaded", __func__); 849 return; 850 } 851 852 tasklet_schedule(&sc->intr_tq); 853 } 854 855 /** 856 * hif_init_reschedule_tasklet_work() - API to initialize reschedule tasklet 857 * work 858 * @sc: HIF PCI Context 859 * 860 * Return: void 861 */ 862 static void hif_init_reschedule_tasklet_work(struct hif_pci_softc *sc) 863 { 864 qdf_create_work(0, &sc->reschedule_tasklet_work, 865 reschedule_tasklet_work_handler, NULL); 866 } 867 #else 868 static void hif_init_reschedule_tasklet_work(struct hif_pci_softc *sc) { } 869 #endif /* HIF_CONFIG_SLUB_DEBUG_ON */ 870 871 void wlan_tasklet(unsigned long data) 872 { 873 struct hif_pci_softc *sc = (struct hif_pci_softc *)data; 874 struct hif_softc *scn = HIF_GET_SOFTC(sc); 875 876 if (scn->hif_init_done == false) 877 goto end; 878 879 if (qdf_atomic_read(&scn->link_suspended)) 880 goto end; 881 882 if (!ADRASTEA_BU) { 883 (irqreturn_t) hif_fw_interrupt_handler(sc->irq_event, scn); 884 if (scn->target_status == TARGET_STATUS_RESET) 885 goto end; 886 } 887 888 end: 889 qdf_atomic_set(&scn->tasklet_from_intr, 0); 890 qdf_atomic_dec(&scn->active_tasklet_cnt); 891 } 892 893 #ifdef FEATURE_RUNTIME_PM 894 static const char *hif_pm_runtime_state_to_string(uint32_t state) 895 { 896 switch (state) { 897 case HIF_PM_RUNTIME_STATE_NONE: 898 return "INIT_STATE"; 899 case HIF_PM_RUNTIME_STATE_ON: 900 return "ON"; 901 case HIF_PM_RUNTIME_STATE_RESUMING: 902 return "RESUMING"; 903 case HIF_PM_RUNTIME_STATE_SUSPENDING: 904 return "SUSPENDING"; 905 case HIF_PM_RUNTIME_STATE_SUSPENDED: 906 return "SUSPENDED"; 907 default: 908 return "INVALID STATE"; 909 } 910 } 911 912 #define HIF_PCI_RUNTIME_PM_STATS(_s, _sc, _name) \ 913 seq_printf(_s, "%30s: %u\n", #_name, _sc->pm_stats._name) 914 /** 915 * hif_pci_runtime_pm_warn() - Runtime PM Debugging API 916 * @sc: hif_pci_softc context 917 * @msg: log message 918 * 919 * log runtime pm stats when something seems off. 920 * 921 * Return: void 922 */ 923 static void hif_pci_runtime_pm_warn(struct hif_pci_softc *sc, const char *msg) 924 { 925 struct hif_pm_runtime_lock *ctx; 926 927 HIF_ERROR("%s: usage_count: %d, pm_state: %s, prevent_suspend_cnt: %d", 928 msg, atomic_read(&sc->dev->power.usage_count), 929 hif_pm_runtime_state_to_string( 930 atomic_read(&sc->pm_state)), 931 sc->prevent_suspend_cnt); 932 933 HIF_ERROR("runtime_status: %d, runtime_error: %d, disable_depth: %d autosuspend_delay: %d", 934 sc->dev->power.runtime_status, 935 sc->dev->power.runtime_error, 936 sc->dev->power.disable_depth, 937 sc->dev->power.autosuspend_delay); 938 939 HIF_ERROR("runtime_get: %u, runtime_put: %u, request_resume: %u", 940 sc->pm_stats.runtime_get, sc->pm_stats.runtime_put, 941 sc->pm_stats.request_resume); 942 943 HIF_ERROR("allow_suspend: %u, prevent_suspend: %u", 944 sc->pm_stats.allow_suspend, 945 sc->pm_stats.prevent_suspend); 946 947 HIF_ERROR("prevent_suspend_timeout: %u, allow_suspend_timeout: %u", 948 sc->pm_stats.prevent_suspend_timeout, 949 sc->pm_stats.allow_suspend_timeout); 950 951 HIF_ERROR("Suspended: %u, resumed: %u count", 952 sc->pm_stats.suspended, 953 sc->pm_stats.resumed); 954 955 HIF_ERROR("suspend_err: %u, runtime_get_err: %u", 956 sc->pm_stats.suspend_err, 957 sc->pm_stats.runtime_get_err); 958 959 HIF_ERROR("Active Wakeup Sources preventing Runtime Suspend: "); 960 961 list_for_each_entry(ctx, &sc->prevent_suspend_list, list) { 962 HIF_ERROR("source %s; timeout %d ms", ctx->name, ctx->timeout); 963 } 964 965 WARN_ON(1); 966 } 967 968 /** 969 * hif_pci_pm_runtime_debugfs_show(): show debug stats for runtimepm 970 * @s: file to print to 971 * @data: unused 972 * 973 * debugging tool added to the debug fs for displaying runtimepm stats 974 * 975 * Return: 0 976 */ 977 static int hif_pci_pm_runtime_debugfs_show(struct seq_file *s, void *data) 978 { 979 struct hif_pci_softc *sc = s->private; 980 static const char * const autopm_state[] = {"NONE", "ON", "RESUMING", 981 "SUSPENDING", "SUSPENDED"}; 982 unsigned int msecs_age; 983 qdf_time_t usecs_age; 984 int pm_state = atomic_read(&sc->pm_state); 985 unsigned long timer_expires; 986 struct hif_pm_runtime_lock *ctx; 987 988 seq_printf(s, "%30s: %s\n", "Runtime PM state", 989 autopm_state[pm_state]); 990 seq_printf(s, "%30s: %pf\n", "Last Resume Caller", 991 sc->pm_stats.last_resume_caller); 992 seq_printf(s, "%30s: %pf\n", "Last Busy Marker", 993 sc->pm_stats.last_busy_marker); 994 995 usecs_age = qdf_get_log_timestamp_usecs() - 996 sc->pm_stats.last_busy_timestamp; 997 seq_printf(s, "%30s: %lu.%06lus\n", "Last Busy Timestamp", 998 sc->pm_stats.last_busy_timestamp / 1000000, 999 sc->pm_stats.last_busy_timestamp % 1000000); 1000 seq_printf(s, "%30s: %lu.%06lus\n", "Last Busy Since", 1001 usecs_age / 1000000, usecs_age % 1000000); 1002 1003 if (pm_state == HIF_PM_RUNTIME_STATE_SUSPENDED) { 1004 msecs_age = jiffies_to_msecs(jiffies - 1005 sc->pm_stats.suspend_jiffies); 1006 seq_printf(s, "%30s: %d.%03ds\n", "Suspended Since", 1007 msecs_age / 1000, msecs_age % 1000); 1008 } 1009 1010 seq_printf(s, "%30s: %d\n", "PM Usage count", 1011 atomic_read(&sc->dev->power.usage_count)); 1012 1013 seq_printf(s, "%30s: %u\n", "prevent_suspend_cnt", 1014 sc->prevent_suspend_cnt); 1015 1016 HIF_PCI_RUNTIME_PM_STATS(s, sc, suspended); 1017 HIF_PCI_RUNTIME_PM_STATS(s, sc, suspend_err); 1018 HIF_PCI_RUNTIME_PM_STATS(s, sc, resumed); 1019 HIF_PCI_RUNTIME_PM_STATS(s, sc, runtime_get); 1020 HIF_PCI_RUNTIME_PM_STATS(s, sc, runtime_put); 1021 HIF_PCI_RUNTIME_PM_STATS(s, sc, request_resume); 1022 HIF_PCI_RUNTIME_PM_STATS(s, sc, prevent_suspend); 1023 HIF_PCI_RUNTIME_PM_STATS(s, sc, allow_suspend); 1024 HIF_PCI_RUNTIME_PM_STATS(s, sc, prevent_suspend_timeout); 1025 HIF_PCI_RUNTIME_PM_STATS(s, sc, allow_suspend_timeout); 1026 HIF_PCI_RUNTIME_PM_STATS(s, sc, runtime_get_err); 1027 1028 timer_expires = sc->runtime_timer_expires; 1029 if (timer_expires > 0) { 1030 msecs_age = jiffies_to_msecs(timer_expires - jiffies); 1031 seq_printf(s, "%30s: %d.%03ds\n", "Prevent suspend timeout", 1032 msecs_age / 1000, msecs_age % 1000); 1033 } 1034 1035 spin_lock_bh(&sc->runtime_lock); 1036 if (list_empty(&sc->prevent_suspend_list)) { 1037 spin_unlock_bh(&sc->runtime_lock); 1038 return 0; 1039 } 1040 1041 seq_printf(s, "%30s: ", "Active Wakeup_Sources"); 1042 list_for_each_entry(ctx, &sc->prevent_suspend_list, list) { 1043 seq_printf(s, "%s", ctx->name); 1044 if (ctx->timeout) 1045 seq_printf(s, "(%d ms)", ctx->timeout); 1046 seq_puts(s, " "); 1047 } 1048 seq_puts(s, "\n"); 1049 spin_unlock_bh(&sc->runtime_lock); 1050 1051 return 0; 1052 } 1053 #undef HIF_PCI_RUNTIME_PM_STATS 1054 1055 /** 1056 * hif_pci_autopm_open() - open a debug fs file to access the runtime pm stats 1057 * @inode 1058 * @file 1059 * 1060 * Return: linux error code of single_open. 1061 */ 1062 static int hif_pci_runtime_pm_open(struct inode *inode, struct file *file) 1063 { 1064 return single_open(file, hif_pci_pm_runtime_debugfs_show, 1065 inode->i_private); 1066 } 1067 1068 static const struct file_operations hif_pci_runtime_pm_fops = { 1069 .owner = THIS_MODULE, 1070 .open = hif_pci_runtime_pm_open, 1071 .release = single_release, 1072 .read = seq_read, 1073 .llseek = seq_lseek, 1074 }; 1075 1076 /** 1077 * hif_runtime_pm_debugfs_create() - creates runtimepm debugfs entry 1078 * @sc: pci context 1079 * 1080 * creates a debugfs entry to debug the runtime pm feature. 1081 */ 1082 static void hif_runtime_pm_debugfs_create(struct hif_pci_softc *sc) 1083 { 1084 sc->pm_dentry = debugfs_create_file("cnss_runtime_pm", 1085 0400, NULL, sc, 1086 &hif_pci_runtime_pm_fops); 1087 } 1088 1089 /** 1090 * hif_runtime_pm_debugfs_remove() - removes runtimepm debugfs entry 1091 * @sc: pci context 1092 * 1093 * removes the debugfs entry to debug the runtime pm feature. 1094 */ 1095 static void hif_runtime_pm_debugfs_remove(struct hif_pci_softc *sc) 1096 { 1097 debugfs_remove(sc->pm_dentry); 1098 } 1099 1100 static void hif_runtime_init(struct device *dev, int delay) 1101 { 1102 pm_runtime_set_autosuspend_delay(dev, delay); 1103 pm_runtime_use_autosuspend(dev); 1104 pm_runtime_allow(dev); 1105 pm_runtime_mark_last_busy(dev); 1106 pm_runtime_put_noidle(dev); 1107 pm_suspend_ignore_children(dev, true); 1108 } 1109 1110 static void hif_runtime_exit(struct device *dev) 1111 { 1112 pm_runtime_get_noresume(dev); 1113 pm_runtime_set_active(dev); 1114 } 1115 1116 static void hif_pm_runtime_lock_timeout_fn(void *data); 1117 1118 /** 1119 * hif_pm_runtime_start(): start the runtime pm 1120 * @sc: pci context 1121 * 1122 * After this call, runtime pm will be active. 1123 */ 1124 static void hif_pm_runtime_start(struct hif_pci_softc *sc) 1125 { 1126 struct hif_softc *ol_sc = HIF_GET_SOFTC(sc); 1127 uint32_t mode = hif_get_conparam(ol_sc); 1128 1129 if (!ol_sc->hif_config.enable_runtime_pm) { 1130 HIF_INFO("%s: RUNTIME PM is disabled in ini\n", __func__); 1131 return; 1132 } 1133 1134 if (mode == QDF_GLOBAL_FTM_MODE || QDF_IS_EPPING_ENABLED(mode) || 1135 mode == QDF_GLOBAL_MONITOR_MODE) { 1136 HIF_INFO("%s: RUNTIME PM is disabled for FTM/EPPING mode\n", 1137 __func__); 1138 return; 1139 } 1140 1141 qdf_timer_init(NULL, &sc->runtime_timer, 1142 hif_pm_runtime_lock_timeout_fn, 1143 sc, QDF_TIMER_TYPE_WAKE_APPS); 1144 1145 HIF_INFO("%s: Enabling RUNTIME PM, Delay: %d ms", __func__, 1146 ol_sc->hif_config.runtime_pm_delay); 1147 1148 hif_runtime_init(sc->dev, ol_sc->hif_config.runtime_pm_delay); 1149 qdf_atomic_set(&sc->pm_state, HIF_PM_RUNTIME_STATE_ON); 1150 hif_runtime_pm_debugfs_create(sc); 1151 } 1152 1153 /** 1154 * hif_pm_runtime_stop(): stop runtime pm 1155 * @sc: pci context 1156 * 1157 * Turns off runtime pm and frees corresponding resources 1158 * that were acquired by hif_runtime_pm_start(). 1159 */ 1160 static void hif_pm_runtime_stop(struct hif_pci_softc *sc) 1161 { 1162 struct hif_softc *ol_sc = HIF_GET_SOFTC(sc); 1163 uint32_t mode = hif_get_conparam(ol_sc); 1164 1165 if (!ol_sc->hif_config.enable_runtime_pm) 1166 return; 1167 1168 if (mode == QDF_GLOBAL_FTM_MODE || QDF_IS_EPPING_ENABLED(mode) || 1169 mode == QDF_GLOBAL_MONITOR_MODE) 1170 return; 1171 1172 hif_runtime_exit(sc->dev); 1173 hif_pm_runtime_resume(sc->dev); 1174 1175 qdf_atomic_set(&sc->pm_state, HIF_PM_RUNTIME_STATE_NONE); 1176 1177 hif_runtime_pm_debugfs_remove(sc); 1178 qdf_timer_free(&sc->runtime_timer); 1179 /* doesn't wait for penting trafic unlike cld-2.0 */ 1180 } 1181 1182 /** 1183 * hif_pm_runtime_open(): initialize runtime pm 1184 * @sc: pci data structure 1185 * 1186 * Early initialization 1187 */ 1188 static void hif_pm_runtime_open(struct hif_pci_softc *sc) 1189 { 1190 spin_lock_init(&sc->runtime_lock); 1191 1192 qdf_atomic_init(&sc->pm_state); 1193 qdf_runtime_lock_init(&sc->prevent_linkdown_lock); 1194 qdf_atomic_set(&sc->pm_state, HIF_PM_RUNTIME_STATE_NONE); 1195 INIT_LIST_HEAD(&sc->prevent_suspend_list); 1196 } 1197 1198 /** 1199 * hif_pm_runtime_sanitize_on_exit(): sanitize the pm usage count and state 1200 * @sc: pci context 1201 * 1202 * Ensure we have only one vote against runtime suspend before closing 1203 * the runtime suspend feature. 1204 * 1205 * all gets by the wlan driver should have been returned 1206 * one vote should remain as part of cnss_runtime_exit 1207 * 1208 * needs to be revisited if we share the root complex. 1209 */ 1210 static void hif_pm_runtime_sanitize_on_exit(struct hif_pci_softc *sc) 1211 { 1212 struct hif_pm_runtime_lock *ctx, *tmp; 1213 1214 if (atomic_read(&sc->dev->power.usage_count) != 1) 1215 hif_pci_runtime_pm_warn(sc, "Driver UnLoaded"); 1216 else 1217 return; 1218 1219 spin_lock_bh(&sc->runtime_lock); 1220 list_for_each_entry_safe(ctx, tmp, &sc->prevent_suspend_list, list) { 1221 spin_unlock_bh(&sc->runtime_lock); 1222 hif_runtime_lock_deinit(GET_HIF_OPAQUE_HDL(sc), ctx); 1223 spin_lock_bh(&sc->runtime_lock); 1224 } 1225 spin_unlock_bh(&sc->runtime_lock); 1226 1227 /* ensure 1 and only 1 usage count so that when the wlan 1228 * driver is re-insmodded runtime pm won't be 1229 * disabled also ensures runtime pm doesn't get 1230 * broken on by being less than 1. 1231 */ 1232 if (atomic_read(&sc->dev->power.usage_count) <= 0) 1233 atomic_set(&sc->dev->power.usage_count, 1); 1234 while (atomic_read(&sc->dev->power.usage_count) > 1) 1235 hif_pm_runtime_put_auto(sc->dev); 1236 } 1237 1238 static int __hif_pm_runtime_allow_suspend(struct hif_pci_softc *hif_sc, 1239 struct hif_pm_runtime_lock *lock); 1240 1241 /** 1242 * hif_pm_runtime_sanitize_on_ssr_exit() - Empty the suspend list on SSR 1243 * @sc: PCIe Context 1244 * 1245 * API is used to empty the runtime pm prevent suspend list. 1246 * 1247 * Return: void 1248 */ 1249 static void hif_pm_runtime_sanitize_on_ssr_exit(struct hif_pci_softc *sc) 1250 { 1251 struct hif_pm_runtime_lock *ctx, *tmp; 1252 1253 spin_lock_bh(&sc->runtime_lock); 1254 list_for_each_entry_safe(ctx, tmp, &sc->prevent_suspend_list, list) { 1255 __hif_pm_runtime_allow_suspend(sc, ctx); 1256 } 1257 spin_unlock_bh(&sc->runtime_lock); 1258 } 1259 1260 /** 1261 * hif_pm_runtime_close(): close runtime pm 1262 * @sc: pci bus handle 1263 * 1264 * ensure runtime_pm is stopped before closing the driver 1265 */ 1266 static void hif_pm_runtime_close(struct hif_pci_softc *sc) 1267 { 1268 struct hif_softc *scn = HIF_GET_SOFTC(sc); 1269 1270 qdf_runtime_lock_deinit(&sc->prevent_linkdown_lock); 1271 if (qdf_atomic_read(&sc->pm_state) == HIF_PM_RUNTIME_STATE_NONE) 1272 return; 1273 1274 hif_pm_runtime_stop(sc); 1275 1276 hif_is_recovery_in_progress(scn) ? 1277 hif_pm_runtime_sanitize_on_ssr_exit(sc) : 1278 hif_pm_runtime_sanitize_on_exit(sc); 1279 } 1280 #else 1281 static void hif_pm_runtime_close(struct hif_pci_softc *sc) {} 1282 static void hif_pm_runtime_open(struct hif_pci_softc *sc) {} 1283 static void hif_pm_runtime_start(struct hif_pci_softc *sc) {} 1284 static void hif_pm_runtime_stop(struct hif_pci_softc *sc) {} 1285 #endif 1286 1287 /** 1288 * hif_disable_power_gating() - disable HW power gating 1289 * @hif_ctx: hif context 1290 * 1291 * disables pcie L1 power states 1292 */ 1293 static void hif_disable_power_gating(struct hif_opaque_softc *hif_ctx) 1294 { 1295 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 1296 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx); 1297 1298 if (!scn) { 1299 HIF_ERROR("%s: Could not disable ASPM scn is null", 1300 __func__); 1301 return; 1302 } 1303 1304 /* Disable ASPM when pkt log is enabled */ 1305 pci_read_config_dword(sc->pdev, 0x80, &sc->lcr_val); 1306 pci_write_config_dword(sc->pdev, 0x80, (sc->lcr_val & 0xffffff00)); 1307 } 1308 1309 /** 1310 * hif_enable_power_gating() - enable HW power gating 1311 * @hif_ctx: hif context 1312 * 1313 * enables pcie L1 power states 1314 */ 1315 static void hif_enable_power_gating(struct hif_pci_softc *sc) 1316 { 1317 if (!sc) { 1318 HIF_ERROR("%s: Could not disable ASPM scn is null", 1319 __func__); 1320 return; 1321 } 1322 1323 /* Re-enable ASPM after firmware/OTP download is complete */ 1324 pci_write_config_dword(sc->pdev, 0x80, sc->lcr_val); 1325 } 1326 1327 /** 1328 * hif_enable_power_management() - enable power management 1329 * @hif_ctx: hif context 1330 * 1331 * Enables runtime pm, aspm(PCI.. hif_enable_power_gating) and re-enabling 1332 * soc-sleep after driver load (hif_pci_target_sleep_state_adjust). 1333 * 1334 * note: epping mode does not call this function as it does not 1335 * care about saving power. 1336 */ 1337 void hif_pci_enable_power_management(struct hif_softc *hif_sc, 1338 bool is_packet_log_enabled) 1339 { 1340 struct hif_pci_softc *pci_ctx = HIF_GET_PCI_SOFTC(hif_sc); 1341 uint32_t mode; 1342 1343 if (!pci_ctx) { 1344 HIF_ERROR("%s, hif_ctx null", __func__); 1345 return; 1346 } 1347 1348 mode = hif_get_conparam(hif_sc); 1349 if (mode == QDF_GLOBAL_FTM_MODE) { 1350 HIF_INFO("%s: Enable power gating for FTM mode", __func__); 1351 hif_enable_power_gating(pci_ctx); 1352 return; 1353 } 1354 1355 hif_pm_runtime_start(pci_ctx); 1356 1357 if (!is_packet_log_enabled) 1358 hif_enable_power_gating(pci_ctx); 1359 1360 if (!CONFIG_ATH_PCIE_MAX_PERF && 1361 CONFIG_ATH_PCIE_AWAKE_WHILE_DRIVER_LOAD && 1362 !ce_srng_based(hif_sc)) { 1363 /* allow sleep for PCIE_AWAKE_WHILE_DRIVER_LOAD feature */ 1364 if (hif_pci_target_sleep_state_adjust(hif_sc, true, false) < 0) 1365 HIF_ERROR("%s, failed to set target to sleep", 1366 __func__); 1367 } 1368 } 1369 1370 /** 1371 * hif_disable_power_management() - disable power management 1372 * @hif_ctx: hif context 1373 * 1374 * Currently disables runtime pm. Should be updated to behave 1375 * if runtime pm is not started. Should be updated to take care 1376 * of aspm and soc sleep for driver load. 1377 */ 1378 void hif_pci_disable_power_management(struct hif_softc *hif_ctx) 1379 { 1380 struct hif_pci_softc *pci_ctx = HIF_GET_PCI_SOFTC(hif_ctx); 1381 1382 if (!pci_ctx) { 1383 HIF_ERROR("%s, hif_ctx null", __func__); 1384 return; 1385 } 1386 1387 hif_pm_runtime_stop(pci_ctx); 1388 } 1389 1390 void hif_pci_display_stats(struct hif_softc *hif_ctx) 1391 { 1392 struct hif_pci_softc *pci_ctx = HIF_GET_PCI_SOFTC(hif_ctx); 1393 1394 if (!pci_ctx) { 1395 HIF_ERROR("%s, hif_ctx null", __func__); 1396 return; 1397 } 1398 hif_display_ce_stats(&pci_ctx->ce_sc); 1399 } 1400 1401 void hif_pci_clear_stats(struct hif_softc *hif_ctx) 1402 { 1403 struct hif_pci_softc *pci_ctx = HIF_GET_PCI_SOFTC(hif_ctx); 1404 1405 if (!pci_ctx) { 1406 HIF_ERROR("%s, hif_ctx null", __func__); 1407 return; 1408 } 1409 hif_clear_ce_stats(&pci_ctx->ce_sc); 1410 } 1411 1412 #define ATH_PCI_PROBE_RETRY_MAX 3 1413 /** 1414 * hif_bus_open(): hif_bus_open 1415 * @scn: scn 1416 * @bus_type: bus type 1417 * 1418 * Return: n/a 1419 */ 1420 QDF_STATUS hif_pci_open(struct hif_softc *hif_ctx, enum qdf_bus_type bus_type) 1421 { 1422 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx); 1423 1424 hif_ctx->bus_type = bus_type; 1425 hif_pm_runtime_open(sc); 1426 1427 qdf_spinlock_create(&sc->irq_lock); 1428 1429 return hif_ce_open(hif_ctx); 1430 } 1431 1432 /** 1433 * hif_wake_target_cpu() - wake the target's cpu 1434 * @scn: hif context 1435 * 1436 * Send an interrupt to the device to wake up the Target CPU 1437 * so it has an opportunity to notice any changed state. 1438 */ 1439 static void hif_wake_target_cpu(struct hif_softc *scn) 1440 { 1441 QDF_STATUS rv; 1442 uint32_t core_ctrl; 1443 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn); 1444 1445 rv = hif_diag_read_access(hif_hdl, 1446 SOC_CORE_BASE_ADDRESS | CORE_CTRL_ADDRESS, 1447 &core_ctrl); 1448 QDF_ASSERT(rv == QDF_STATUS_SUCCESS); 1449 /* A_INUM_FIRMWARE interrupt to Target CPU */ 1450 core_ctrl |= CORE_CTRL_CPU_INTR_MASK; 1451 1452 rv = hif_diag_write_access(hif_hdl, 1453 SOC_CORE_BASE_ADDRESS | CORE_CTRL_ADDRESS, 1454 core_ctrl); 1455 QDF_ASSERT(rv == QDF_STATUS_SUCCESS); 1456 } 1457 1458 /** 1459 * soc_wake_reset() - allow the target to go to sleep 1460 * @scn: hif_softc 1461 * 1462 * Clear the force wake register. This is done by 1463 * hif_sleep_entry and cancel defered timer sleep. 1464 */ 1465 static void soc_wake_reset(struct hif_softc *scn) 1466 { 1467 hif_write32_mb(scn, scn->mem + 1468 PCIE_LOCAL_BASE_ADDRESS + 1469 PCIE_SOC_WAKE_ADDRESS, 1470 PCIE_SOC_WAKE_RESET); 1471 } 1472 1473 /** 1474 * hif_sleep_entry() - gate target sleep 1475 * @arg: hif context 1476 * 1477 * This function is the callback for the sleep timer. 1478 * Check if last force awake critical section was at least 1479 * HIF_MIN_SLEEP_INACTIVITY_TIME_MS time ago. if it was, 1480 * allow the target to go to sleep and cancel the sleep timer. 1481 * otherwise reschedule the sleep timer. 1482 */ 1483 static void hif_sleep_entry(void *arg) 1484 { 1485 struct HIF_CE_state *hif_state = (struct HIF_CE_state *)arg; 1486 struct hif_softc *scn = HIF_GET_SOFTC(hif_state); 1487 uint32_t idle_ms; 1488 1489 if (scn->recovery) 1490 return; 1491 1492 if (hif_is_driver_unloading(scn)) 1493 return; 1494 1495 qdf_spin_lock_irqsave(&hif_state->keep_awake_lock); 1496 if (hif_state->fake_sleep) { 1497 idle_ms = qdf_system_ticks_to_msecs(qdf_system_ticks() 1498 - hif_state->sleep_ticks); 1499 if (!hif_state->verified_awake && 1500 idle_ms >= HIF_MIN_SLEEP_INACTIVITY_TIME_MS) { 1501 if (!qdf_atomic_read(&scn->link_suspended)) { 1502 soc_wake_reset(scn); 1503 hif_state->fake_sleep = false; 1504 } 1505 } else { 1506 qdf_timer_stop(&hif_state->sleep_timer); 1507 qdf_timer_start(&hif_state->sleep_timer, 1508 HIF_SLEEP_INACTIVITY_TIMER_PERIOD_MS); 1509 } 1510 } 1511 qdf_spin_unlock_irqrestore(&hif_state->keep_awake_lock); 1512 } 1513 1514 #define HIF_HIA_MAX_POLL_LOOP 1000000 1515 #define HIF_HIA_POLLING_DELAY_MS 10 1516 1517 #ifdef QCA_HIF_HIA_EXTND 1518 1519 static void hif_set_hia_extnd(struct hif_softc *scn) 1520 { 1521 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn); 1522 struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl); 1523 uint32_t target_type = tgt_info->target_type; 1524 1525 HIF_TRACE("%s: E", __func__); 1526 1527 if ((target_type == TARGET_TYPE_AR900B) || 1528 target_type == TARGET_TYPE_QCA9984 || 1529 target_type == TARGET_TYPE_QCA9888) { 1530 /* CHIP revision is 8-11 bits of the CHIP_ID register 0xec 1531 * in RTC space 1532 */ 1533 tgt_info->target_revision 1534 = CHIP_ID_REVISION_GET(hif_read32_mb(scn, scn->mem 1535 + CHIP_ID_ADDRESS)); 1536 qdf_print("chip_id 0x%x chip_revision 0x%x", 1537 target_type, tgt_info->target_revision); 1538 } 1539 1540 { 1541 uint32_t flag2_value = 0; 1542 uint32_t flag2_targ_addr = 1543 host_interest_item_address(target_type, 1544 offsetof(struct host_interest_s, hi_skip_clock_init)); 1545 1546 if ((ar900b_20_targ_clk != -1) && 1547 (frac != -1) && (intval != -1)) { 1548 hif_diag_read_access(hif_hdl, flag2_targ_addr, 1549 &flag2_value); 1550 qdf_print("\n Setting clk_override"); 1551 flag2_value |= CLOCK_OVERRIDE; 1552 1553 hif_diag_write_access(hif_hdl, flag2_targ_addr, 1554 flag2_value); 1555 qdf_print("\n CLOCK PLL val set %d", flag2_value); 1556 } else { 1557 qdf_print("\n CLOCK PLL skipped"); 1558 } 1559 } 1560 1561 if (target_type == TARGET_TYPE_AR900B 1562 || target_type == TARGET_TYPE_QCA9984 1563 || target_type == TARGET_TYPE_QCA9888) { 1564 1565 /* for AR9980_2.0, 300 mhz clock is used, right now we assume 1566 * this would be supplied through module parameters, 1567 * if not supplied assumed default or same behavior as 1.0. 1568 * Assume 1.0 clock can't be tuned, reset to defaults 1569 */ 1570 1571 qdf_print(KERN_INFO 1572 "%s: setting the target pll frac %x intval %x", 1573 __func__, frac, intval); 1574 1575 /* do not touch frac, and int val, let them be default -1, 1576 * if desired, host can supply these through module params 1577 */ 1578 if (frac != -1 || intval != -1) { 1579 uint32_t flag2_value = 0; 1580 uint32_t flag2_targ_addr; 1581 1582 flag2_targ_addr = 1583 host_interest_item_address(target_type, 1584 offsetof(struct host_interest_s, 1585 hi_clock_info)); 1586 hif_diag_read_access(hif_hdl, 1587 flag2_targ_addr, &flag2_value); 1588 qdf_print("\n ====> FRAC Val %x Address %x", frac, 1589 flag2_value); 1590 hif_diag_write_access(hif_hdl, flag2_value, frac); 1591 qdf_print("\n INT Val %x Address %x", 1592 intval, flag2_value + 4); 1593 hif_diag_write_access(hif_hdl, 1594 flag2_value + 4, intval); 1595 } else { 1596 qdf_print(KERN_INFO 1597 "%s: no frac provided, skipping pre-configuring PLL", 1598 __func__); 1599 } 1600 1601 /* for 2.0 write 300 mhz into hi_desired_cpu_speed_hz */ 1602 if ((target_type == TARGET_TYPE_AR900B) 1603 && (tgt_info->target_revision == AR900B_REV_2) 1604 && ar900b_20_targ_clk != -1) { 1605 uint32_t flag2_value = 0; 1606 uint32_t flag2_targ_addr; 1607 1608 flag2_targ_addr 1609 = host_interest_item_address(target_type, 1610 offsetof(struct host_interest_s, 1611 hi_desired_cpu_speed_hz)); 1612 hif_diag_read_access(hif_hdl, flag2_targ_addr, 1613 &flag2_value); 1614 qdf_print("\n ==> hi_desired_cpu_speed_hz Address %x", 1615 flag2_value); 1616 hif_diag_write_access(hif_hdl, flag2_value, 1617 ar900b_20_targ_clk/*300000000u*/); 1618 } else if (target_type == TARGET_TYPE_QCA9888) { 1619 uint32_t flag2_targ_addr; 1620 1621 if (200000000u != qca9888_20_targ_clk) { 1622 qca9888_20_targ_clk = 300000000u; 1623 /* Setting the target clock speed to 300 mhz */ 1624 } 1625 1626 flag2_targ_addr 1627 = host_interest_item_address(target_type, 1628 offsetof(struct host_interest_s, 1629 hi_desired_cpu_speed_hz)); 1630 hif_diag_write_access(hif_hdl, flag2_targ_addr, 1631 qca9888_20_targ_clk); 1632 } else { 1633 qdf_print("%s: targ_clk is not provided, skipping pre-configuring PLL", 1634 __func__); 1635 } 1636 } else { 1637 if (frac != -1 || intval != -1) { 1638 uint32_t flag2_value = 0; 1639 uint32_t flag2_targ_addr = 1640 host_interest_item_address(target_type, 1641 offsetof(struct host_interest_s, 1642 hi_clock_info)); 1643 hif_diag_read_access(hif_hdl, flag2_targ_addr, 1644 &flag2_value); 1645 qdf_print("\n ====> FRAC Val %x Address %x", frac, 1646 flag2_value); 1647 hif_diag_write_access(hif_hdl, flag2_value, frac); 1648 qdf_print("\n INT Val %x Address %x", intval, 1649 flag2_value + 4); 1650 hif_diag_write_access(hif_hdl, flag2_value + 4, 1651 intval); 1652 } 1653 } 1654 } 1655 1656 #else 1657 1658 static void hif_set_hia_extnd(struct hif_softc *scn) 1659 { 1660 } 1661 1662 #endif 1663 1664 /** 1665 * hif_set_hia() - fill out the host interest area 1666 * @scn: hif context 1667 * 1668 * This is replaced by hif_wlan_enable for integrated targets. 1669 * This fills out the host interest area. The firmware will 1670 * process these memory addresses when it is first brought out 1671 * of reset. 1672 * 1673 * Return: 0 for success. 1674 */ 1675 static int hif_set_hia(struct hif_softc *scn) 1676 { 1677 QDF_STATUS rv; 1678 uint32_t interconnect_targ_addr = 0; 1679 uint32_t pcie_state_targ_addr = 0; 1680 uint32_t pipe_cfg_targ_addr = 0; 1681 uint32_t svc_to_pipe_map = 0; 1682 uint32_t pcie_config_flags = 0; 1683 uint32_t flag2_value = 0; 1684 uint32_t flag2_targ_addr = 0; 1685 #ifdef QCA_WIFI_3_0 1686 uint32_t host_interest_area = 0; 1687 uint8_t i; 1688 #else 1689 uint32_t ealloc_value = 0; 1690 uint32_t ealloc_targ_addr = 0; 1691 uint8_t banks_switched = 1; 1692 uint32_t chip_id; 1693 #endif 1694 uint32_t pipe_cfg_addr; 1695 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn); 1696 struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl); 1697 uint32_t target_type = tgt_info->target_type; 1698 uint32_t target_ce_config_sz, target_service_to_ce_map_sz; 1699 static struct CE_pipe_config *target_ce_config; 1700 struct service_to_pipe *target_service_to_ce_map; 1701 1702 HIF_TRACE("%s: E", __func__); 1703 1704 hif_get_target_ce_config(scn, 1705 &target_ce_config, &target_ce_config_sz, 1706 &target_service_to_ce_map, 1707 &target_service_to_ce_map_sz, 1708 NULL, NULL); 1709 1710 if (ADRASTEA_BU) 1711 return QDF_STATUS_SUCCESS; 1712 1713 #ifdef QCA_WIFI_3_0 1714 i = 0; 1715 while (i < HIF_HIA_MAX_POLL_LOOP) { 1716 host_interest_area = hif_read32_mb(scn, scn->mem + 1717 A_SOC_CORE_SCRATCH_0_ADDRESS); 1718 if ((host_interest_area & 0x01) == 0) { 1719 qdf_mdelay(HIF_HIA_POLLING_DELAY_MS); 1720 host_interest_area = 0; 1721 i++; 1722 if (i > HIF_HIA_MAX_POLL_LOOP && (i % 1000 == 0)) 1723 HIF_ERROR("%s: poll timeout(%d)", __func__, i); 1724 } else { 1725 host_interest_area &= (~0x01); 1726 hif_write32_mb(scn, scn->mem + 0x113014, 0); 1727 break; 1728 } 1729 } 1730 1731 if (i >= HIF_HIA_MAX_POLL_LOOP) { 1732 HIF_ERROR("%s: hia polling timeout", __func__); 1733 return -EIO; 1734 } 1735 1736 if (host_interest_area == 0) { 1737 HIF_ERROR("%s: host_interest_area = 0", __func__); 1738 return -EIO; 1739 } 1740 1741 interconnect_targ_addr = host_interest_area + 1742 offsetof(struct host_interest_area_t, 1743 hi_interconnect_state); 1744 1745 flag2_targ_addr = host_interest_area + 1746 offsetof(struct host_interest_area_t, hi_option_flag2); 1747 1748 #else 1749 interconnect_targ_addr = hif_hia_item_address(target_type, 1750 offsetof(struct host_interest_s, hi_interconnect_state)); 1751 ealloc_targ_addr = hif_hia_item_address(target_type, 1752 offsetof(struct host_interest_s, hi_early_alloc)); 1753 flag2_targ_addr = hif_hia_item_address(target_type, 1754 offsetof(struct host_interest_s, hi_option_flag2)); 1755 #endif 1756 /* Supply Target-side CE configuration */ 1757 rv = hif_diag_read_access(hif_hdl, interconnect_targ_addr, 1758 &pcie_state_targ_addr); 1759 if (rv != QDF_STATUS_SUCCESS) { 1760 HIF_ERROR("%s: interconnect_targ_addr = 0x%0x, ret = %d", 1761 __func__, interconnect_targ_addr, rv); 1762 goto done; 1763 } 1764 if (pcie_state_targ_addr == 0) { 1765 rv = QDF_STATUS_E_FAILURE; 1766 HIF_ERROR("%s: pcie state addr is 0", __func__); 1767 goto done; 1768 } 1769 pipe_cfg_addr = pcie_state_targ_addr + 1770 offsetof(struct pcie_state_s, 1771 pipe_cfg_addr); 1772 rv = hif_diag_read_access(hif_hdl, 1773 pipe_cfg_addr, 1774 &pipe_cfg_targ_addr); 1775 if (rv != QDF_STATUS_SUCCESS) { 1776 HIF_ERROR("%s: pipe_cfg_addr = 0x%0x, ret = %d", 1777 __func__, pipe_cfg_addr, rv); 1778 goto done; 1779 } 1780 if (pipe_cfg_targ_addr == 0) { 1781 rv = QDF_STATUS_E_FAILURE; 1782 HIF_ERROR("%s: pipe cfg addr is 0", __func__); 1783 goto done; 1784 } 1785 1786 rv = hif_diag_write_mem(hif_hdl, pipe_cfg_targ_addr, 1787 (uint8_t *) target_ce_config, 1788 target_ce_config_sz); 1789 1790 if (rv != QDF_STATUS_SUCCESS) { 1791 HIF_ERROR("%s: write pipe cfg (%d)", __func__, rv); 1792 goto done; 1793 } 1794 1795 rv = hif_diag_read_access(hif_hdl, 1796 pcie_state_targ_addr + 1797 offsetof(struct pcie_state_s, 1798 svc_to_pipe_map), 1799 &svc_to_pipe_map); 1800 if (rv != QDF_STATUS_SUCCESS) { 1801 HIF_ERROR("%s: get svc/pipe map (%d)", __func__, rv); 1802 goto done; 1803 } 1804 if (svc_to_pipe_map == 0) { 1805 rv = QDF_STATUS_E_FAILURE; 1806 HIF_ERROR("%s: svc_to_pipe map is 0", __func__); 1807 goto done; 1808 } 1809 1810 rv = hif_diag_write_mem(hif_hdl, 1811 svc_to_pipe_map, 1812 (uint8_t *) target_service_to_ce_map, 1813 target_service_to_ce_map_sz); 1814 if (rv != QDF_STATUS_SUCCESS) { 1815 HIF_ERROR("%s: write svc/pipe map (%d)", __func__, rv); 1816 goto done; 1817 } 1818 1819 rv = hif_diag_read_access(hif_hdl, 1820 pcie_state_targ_addr + 1821 offsetof(struct pcie_state_s, 1822 config_flags), 1823 &pcie_config_flags); 1824 if (rv != QDF_STATUS_SUCCESS) { 1825 HIF_ERROR("%s: get pcie config_flags (%d)", __func__, rv); 1826 goto done; 1827 } 1828 #if (CONFIG_PCIE_ENABLE_L1_CLOCK_GATE) 1829 pcie_config_flags |= PCIE_CONFIG_FLAG_ENABLE_L1; 1830 #else 1831 pcie_config_flags &= ~PCIE_CONFIG_FLAG_ENABLE_L1; 1832 #endif /* CONFIG_PCIE_ENABLE_L1_CLOCK_GATE */ 1833 pcie_config_flags |= PCIE_CONFIG_FLAG_CLK_SWITCH_WAIT; 1834 #if (CONFIG_PCIE_ENABLE_AXI_CLK_GATE) 1835 pcie_config_flags |= PCIE_CONFIG_FLAG_AXI_CLK_GATE; 1836 #endif 1837 rv = hif_diag_write_mem(hif_hdl, 1838 pcie_state_targ_addr + 1839 offsetof(struct pcie_state_s, 1840 config_flags), 1841 (uint8_t *) &pcie_config_flags, 1842 sizeof(pcie_config_flags)); 1843 if (rv != QDF_STATUS_SUCCESS) { 1844 HIF_ERROR("%s: write pcie config_flags (%d)", __func__, rv); 1845 goto done; 1846 } 1847 1848 #ifndef QCA_WIFI_3_0 1849 /* configure early allocation */ 1850 ealloc_targ_addr = hif_hia_item_address(target_type, 1851 offsetof( 1852 struct host_interest_s, 1853 hi_early_alloc)); 1854 1855 rv = hif_diag_read_access(hif_hdl, ealloc_targ_addr, 1856 &ealloc_value); 1857 if (rv != QDF_STATUS_SUCCESS) { 1858 HIF_ERROR("%s: get early alloc val (%d)", __func__, rv); 1859 goto done; 1860 } 1861 1862 /* 1 bank is switched to IRAM, except ROME 1.0 */ 1863 ealloc_value |= 1864 ((HI_EARLY_ALLOC_MAGIC << HI_EARLY_ALLOC_MAGIC_SHIFT) & 1865 HI_EARLY_ALLOC_MAGIC_MASK); 1866 1867 rv = hif_diag_read_access(hif_hdl, 1868 CHIP_ID_ADDRESS | 1869 RTC_SOC_BASE_ADDRESS, &chip_id); 1870 if (rv != QDF_STATUS_SUCCESS) { 1871 HIF_ERROR("%s: get chip id val (%d)", __func__, rv); 1872 goto done; 1873 } 1874 if (CHIP_ID_VERSION_GET(chip_id) == 0xD) { 1875 tgt_info->target_revision = CHIP_ID_REVISION_GET(chip_id); 1876 switch (CHIP_ID_REVISION_GET(chip_id)) { 1877 case 0x2: /* ROME 1.3 */ 1878 /* 2 banks are switched to IRAM */ 1879 banks_switched = 2; 1880 break; 1881 case 0x4: /* ROME 2.1 */ 1882 case 0x5: /* ROME 2.2 */ 1883 banks_switched = 6; 1884 break; 1885 case 0x8: /* ROME 3.0 */ 1886 case 0x9: /* ROME 3.1 */ 1887 case 0xA: /* ROME 3.2 */ 1888 banks_switched = 9; 1889 break; 1890 case 0x0: /* ROME 1.0 */ 1891 case 0x1: /* ROME 1.1 */ 1892 default: 1893 /* 3 banks are switched to IRAM */ 1894 banks_switched = 3; 1895 break; 1896 } 1897 } 1898 1899 ealloc_value |= 1900 ((banks_switched << HI_EARLY_ALLOC_IRAM_BANKS_SHIFT) 1901 & HI_EARLY_ALLOC_IRAM_BANKS_MASK); 1902 1903 rv = hif_diag_write_access(hif_hdl, 1904 ealloc_targ_addr, 1905 ealloc_value); 1906 if (rv != QDF_STATUS_SUCCESS) { 1907 HIF_ERROR("%s: set early alloc val (%d)", __func__, rv); 1908 goto done; 1909 } 1910 #endif 1911 if ((target_type == TARGET_TYPE_AR900B) 1912 || (target_type == TARGET_TYPE_QCA9984) 1913 || (target_type == TARGET_TYPE_QCA9888) 1914 || (target_type == TARGET_TYPE_AR9888)) { 1915 hif_set_hia_extnd(scn); 1916 } 1917 1918 /* Tell Target to proceed with initialization */ 1919 flag2_targ_addr = hif_hia_item_address(target_type, 1920 offsetof( 1921 struct host_interest_s, 1922 hi_option_flag2)); 1923 1924 rv = hif_diag_read_access(hif_hdl, flag2_targ_addr, 1925 &flag2_value); 1926 if (rv != QDF_STATUS_SUCCESS) { 1927 HIF_ERROR("%s: get option val (%d)", __func__, rv); 1928 goto done; 1929 } 1930 1931 flag2_value |= HI_OPTION_EARLY_CFG_DONE; 1932 rv = hif_diag_write_access(hif_hdl, flag2_targ_addr, 1933 flag2_value); 1934 if (rv != QDF_STATUS_SUCCESS) { 1935 HIF_ERROR("%s: set option val (%d)", __func__, rv); 1936 goto done; 1937 } 1938 1939 hif_wake_target_cpu(scn); 1940 1941 done: 1942 1943 return rv; 1944 } 1945 1946 /** 1947 * hif_bus_configure() - configure the pcie bus 1948 * @hif_sc: pointer to the hif context. 1949 * 1950 * return: 0 for success. nonzero for failure. 1951 */ 1952 int hif_pci_bus_configure(struct hif_softc *hif_sc) 1953 { 1954 int status = 0; 1955 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc); 1956 struct hif_opaque_softc *hif_osc = GET_HIF_OPAQUE_HDL(hif_sc); 1957 1958 hif_ce_prepare_config(hif_sc); 1959 1960 /* initialize sleep state adjust variables */ 1961 hif_state->sleep_timer_init = true; 1962 hif_state->keep_awake_count = 0; 1963 hif_state->fake_sleep = false; 1964 hif_state->sleep_ticks = 0; 1965 1966 qdf_timer_init(NULL, &hif_state->sleep_timer, 1967 hif_sleep_entry, (void *)hif_state, 1968 QDF_TIMER_TYPE_WAKE_APPS); 1969 hif_state->sleep_timer_init = true; 1970 1971 status = hif_wlan_enable(hif_sc); 1972 if (status) { 1973 HIF_ERROR("%s: hif_wlan_enable error = %d", 1974 __func__, status); 1975 goto timer_free; 1976 } 1977 1978 A_TARGET_ACCESS_LIKELY(hif_sc); 1979 1980 if ((CONFIG_ATH_PCIE_MAX_PERF || 1981 CONFIG_ATH_PCIE_AWAKE_WHILE_DRIVER_LOAD) && 1982 !ce_srng_based(hif_sc)) { 1983 /* 1984 * prevent sleep for PCIE_AWAKE_WHILE_DRIVER_LOAD feature 1985 * prevent sleep when we want to keep firmware always awake 1986 * note: when we want to keep firmware always awake, 1987 * hif_target_sleep_state_adjust will point to a dummy 1988 * function, and hif_pci_target_sleep_state_adjust must 1989 * be called instead. 1990 * note: bus type check is here because AHB bus is reusing 1991 * hif_pci_bus_configure code. 1992 */ 1993 if (hif_sc->bus_type == QDF_BUS_TYPE_PCI) { 1994 if (hif_pci_target_sleep_state_adjust(hif_sc, 1995 false, true) < 0) { 1996 status = -EACCES; 1997 goto disable_wlan; 1998 } 1999 } 2000 } 2001 2002 /* todo: consider replacing this with an srng field */ 2003 if (((hif_sc->target_info.target_type == TARGET_TYPE_QCA8074) || 2004 (hif_sc->target_info.target_type == TARGET_TYPE_QCA8074V2) || 2005 (hif_sc->target_info.target_type == TARGET_TYPE_QCA6018)) && 2006 (hif_sc->bus_type == QDF_BUS_TYPE_AHB)) { 2007 hif_sc->per_ce_irq = true; 2008 } 2009 2010 status = hif_config_ce(hif_sc); 2011 if (status) 2012 goto disable_wlan; 2013 2014 /* QCA_WIFI_QCA8074_VP:Should not be executed on 8074 VP platform */ 2015 if (hif_needs_bmi(hif_osc)) { 2016 status = hif_set_hia(hif_sc); 2017 if (status) 2018 goto unconfig_ce; 2019 2020 HIF_INFO_MED("%s: hif_set_hia done", __func__); 2021 2022 } 2023 2024 if (((hif_sc->target_info.target_type == TARGET_TYPE_QCA8074) || 2025 (hif_sc->target_info.target_type == TARGET_TYPE_QCA8074V2) || 2026 (hif_sc->target_info.target_type == TARGET_TYPE_QCA6018)) && 2027 (hif_sc->bus_type == QDF_BUS_TYPE_PCI)) 2028 HIF_INFO_MED("%s: Skip irq config for PCI based 8074 target", 2029 __func__); 2030 else { 2031 status = hif_configure_irq(hif_sc); 2032 if (status < 0) 2033 goto unconfig_ce; 2034 } 2035 2036 A_TARGET_ACCESS_UNLIKELY(hif_sc); 2037 2038 return status; 2039 2040 unconfig_ce: 2041 hif_unconfig_ce(hif_sc); 2042 disable_wlan: 2043 A_TARGET_ACCESS_UNLIKELY(hif_sc); 2044 hif_wlan_disable(hif_sc); 2045 2046 timer_free: 2047 qdf_timer_stop(&hif_state->sleep_timer); 2048 qdf_timer_free(&hif_state->sleep_timer); 2049 hif_state->sleep_timer_init = false; 2050 2051 HIF_ERROR("%s: failed, status = %d", __func__, status); 2052 return status; 2053 } 2054 2055 /** 2056 * hif_bus_close(): hif_bus_close 2057 * 2058 * Return: n/a 2059 */ 2060 void hif_pci_close(struct hif_softc *hif_sc) 2061 { 2062 struct hif_pci_softc *hif_pci_sc = HIF_GET_PCI_SOFTC(hif_sc); 2063 2064 hif_pm_runtime_close(hif_pci_sc); 2065 hif_ce_close(hif_sc); 2066 } 2067 2068 #define BAR_NUM 0 2069 2070 static int hif_enable_pci_nopld(struct hif_pci_softc *sc, 2071 struct pci_dev *pdev, 2072 const struct pci_device_id *id) 2073 { 2074 void __iomem *mem; 2075 int ret = 0; 2076 uint16_t device_id = 0; 2077 struct hif_softc *ol_sc = HIF_GET_SOFTC(sc); 2078 2079 pci_read_config_word(pdev, PCI_DEVICE_ID, &device_id); 2080 if (device_id != id->device) { 2081 HIF_ERROR( 2082 "%s: dev id mismatch, config id = 0x%x, probing id = 0x%x", 2083 __func__, device_id, id->device); 2084 /* pci link is down, so returing with error code */ 2085 return -EIO; 2086 } 2087 2088 /* FIXME: temp. commenting out assign_resource 2089 * call for dev_attach to work on 2.6.38 kernel 2090 */ 2091 #if (!defined(__LINUX_ARM_ARCH__)) 2092 if (pci_assign_resource(pdev, BAR_NUM)) { 2093 HIF_ERROR("%s: pci_assign_resource error", __func__); 2094 return -EIO; 2095 } 2096 #endif 2097 if (pci_enable_device(pdev)) { 2098 HIF_ERROR("%s: pci_enable_device error", 2099 __func__); 2100 return -EIO; 2101 } 2102 2103 /* Request MMIO resources */ 2104 ret = pci_request_region(pdev, BAR_NUM, "ath"); 2105 if (ret) { 2106 HIF_ERROR("%s: PCI MMIO reservation error", __func__); 2107 ret = -EIO; 2108 goto err_region; 2109 } 2110 2111 #ifdef CONFIG_ARM_LPAE 2112 /* if CONFIG_ARM_LPAE is enabled, we have to set 64 bits mask 2113 * for 32 bits device also. 2114 */ 2115 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); 2116 if (ret) { 2117 HIF_ERROR("%s: Cannot enable 64-bit pci DMA", __func__); 2118 goto err_dma; 2119 } 2120 ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); 2121 if (ret) { 2122 HIF_ERROR("%s: Cannot enable 64-bit DMA", __func__); 2123 goto err_dma; 2124 } 2125 #else 2126 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 2127 if (ret) { 2128 HIF_ERROR("%s: Cannot enable 32-bit pci DMA", __func__); 2129 goto err_dma; 2130 } 2131 ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 2132 if (ret) { 2133 HIF_ERROR("%s: Cannot enable 32-bit consistent DMA!", 2134 __func__); 2135 goto err_dma; 2136 } 2137 #endif 2138 2139 PCI_CFG_TO_DISABLE_L1SS_STATES(pdev, 0x188); 2140 2141 /* Set bus master bit in PCI_COMMAND to enable DMA */ 2142 pci_set_master(pdev); 2143 2144 /* Arrange for access to Target SoC registers. */ 2145 mem = pci_iomap(pdev, BAR_NUM, 0); 2146 if (!mem) { 2147 HIF_ERROR("%s: PCI iomap error", __func__); 2148 ret = -EIO; 2149 goto err_iomap; 2150 } 2151 2152 HIF_INFO("*****BAR is %pK\n", (void *)mem); 2153 2154 sc->mem = mem; 2155 2156 /* Hawkeye emulation specific change */ 2157 if ((device_id == RUMIM2M_DEVICE_ID_NODE0) || 2158 (device_id == RUMIM2M_DEVICE_ID_NODE1) || 2159 (device_id == RUMIM2M_DEVICE_ID_NODE2) || 2160 (device_id == RUMIM2M_DEVICE_ID_NODE3) || 2161 (device_id == RUMIM2M_DEVICE_ID_NODE4) || 2162 (device_id == RUMIM2M_DEVICE_ID_NODE5)) { 2163 mem = mem + 0x0c000000; 2164 sc->mem = mem; 2165 HIF_INFO("%s: Changing PCI mem base to %pK\n", 2166 __func__, sc->mem); 2167 } 2168 2169 sc->mem_len = pci_resource_len(pdev, BAR_NUM); 2170 ol_sc->mem = mem; 2171 ol_sc->mem_pa = pci_resource_start(pdev, BAR_NUM); 2172 sc->pci_enabled = true; 2173 return ret; 2174 2175 err_iomap: 2176 pci_clear_master(pdev); 2177 err_dma: 2178 pci_release_region(pdev, BAR_NUM); 2179 err_region: 2180 pci_disable_device(pdev); 2181 return ret; 2182 } 2183 2184 static int hif_enable_pci_pld(struct hif_pci_softc *sc, 2185 struct pci_dev *pdev, 2186 const struct pci_device_id *id) 2187 { 2188 PCI_CFG_TO_DISABLE_L1SS_STATES(pdev, 0x188); 2189 sc->pci_enabled = true; 2190 return 0; 2191 } 2192 2193 2194 static void hif_pci_deinit_nopld(struct hif_pci_softc *sc) 2195 { 2196 pci_disable_msi(sc->pdev); 2197 pci_iounmap(sc->pdev, sc->mem); 2198 pci_clear_master(sc->pdev); 2199 pci_release_region(sc->pdev, BAR_NUM); 2200 pci_disable_device(sc->pdev); 2201 } 2202 2203 static void hif_pci_deinit_pld(struct hif_pci_softc *sc) {} 2204 2205 static void hif_disable_pci(struct hif_pci_softc *sc) 2206 { 2207 struct hif_softc *ol_sc = HIF_GET_SOFTC(sc); 2208 2209 if (!ol_sc) { 2210 HIF_ERROR("%s: ol_sc = NULL", __func__); 2211 return; 2212 } 2213 hif_pci_device_reset(sc); 2214 sc->hif_pci_deinit(sc); 2215 2216 sc->mem = NULL; 2217 ol_sc->mem = NULL; 2218 } 2219 2220 static int hif_pci_probe_tgt_wakeup(struct hif_pci_softc *sc) 2221 { 2222 int ret = 0; 2223 int targ_awake_limit = 500; 2224 #ifndef QCA_WIFI_3_0 2225 uint32_t fw_indicator; 2226 #endif 2227 struct hif_softc *scn = HIF_GET_SOFTC(sc); 2228 2229 /* 2230 * Verify that the Target was started cleanly.* 2231 * The case where this is most likely is with an AUX-powered 2232 * Target and a Host in WoW mode. If the Host crashes, 2233 * loses power, or is restarted (without unloading the driver) 2234 * then the Target is left (aux) powered and running. On a 2235 * subsequent driver load, the Target is in an unexpected state. 2236 * We try to catch that here in order to reset the Target and 2237 * retry the probe. 2238 */ 2239 hif_write32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS + 2240 PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_V_MASK); 2241 while (!hif_targ_is_awake(scn, sc->mem)) { 2242 if (0 == targ_awake_limit) { 2243 HIF_ERROR("%s: target awake timeout", __func__); 2244 ret = -EAGAIN; 2245 goto end; 2246 } 2247 qdf_mdelay(1); 2248 targ_awake_limit--; 2249 } 2250 2251 #if PCIE_BAR0_READY_CHECKING 2252 { 2253 int wait_limit = 200; 2254 /* Synchronization point: wait the BAR0 is configured */ 2255 while (wait_limit-- && 2256 !(hif_read32_mb(sc, c->mem + 2257 PCIE_LOCAL_BASE_ADDRESS + 2258 PCIE_SOC_RDY_STATUS_ADDRESS) 2259 & PCIE_SOC_RDY_STATUS_BAR_MASK)) { 2260 qdf_mdelay(10); 2261 } 2262 if (wait_limit < 0) { 2263 /* AR6320v1 doesn't support checking of BAR0 2264 * configuration, takes one sec to wait BAR0 ready 2265 */ 2266 HIF_INFO_MED("%s: AR6320v1 waits two sec for BAR0", 2267 __func__); 2268 } 2269 } 2270 #endif 2271 2272 #ifndef QCA_WIFI_3_0 2273 fw_indicator = hif_read32_mb(sc, sc->mem + FW_INDICATOR_ADDRESS); 2274 hif_write32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS + 2275 PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_RESET); 2276 2277 if (fw_indicator & FW_IND_INITIALIZED) { 2278 HIF_ERROR("%s: Target is in an unknown state. EAGAIN", 2279 __func__); 2280 ret = -EAGAIN; 2281 goto end; 2282 } 2283 #endif 2284 2285 end: 2286 return ret; 2287 } 2288 2289 static int hif_pci_configure_legacy_irq(struct hif_pci_softc *sc) 2290 { 2291 int ret = 0; 2292 struct hif_softc *scn = HIF_GET_SOFTC(sc); 2293 uint32_t target_type = scn->target_info.target_type; 2294 2295 HIF_TRACE("%s: E", __func__); 2296 2297 /* do notn support MSI or MSI IRQ failed */ 2298 tasklet_init(&sc->intr_tq, wlan_tasklet, (unsigned long)sc); 2299 ret = request_irq(sc->pdev->irq, 2300 hif_pci_legacy_ce_interrupt_handler, IRQF_SHARED, 2301 "wlan_pci", sc); 2302 if (ret) { 2303 HIF_ERROR("%s: request_irq failed, ret = %d", __func__, ret); 2304 goto end; 2305 } 2306 scn->wake_irq = sc->pdev->irq; 2307 /* Use sc->irq instead of sc->pdev-irq 2308 * platform_device pdev doesn't have an irq field 2309 */ 2310 sc->irq = sc->pdev->irq; 2311 /* Use Legacy PCI Interrupts */ 2312 hif_write32_mb(sc, sc->mem + (SOC_CORE_BASE_ADDRESS | 2313 PCIE_INTR_ENABLE_ADDRESS), 2314 HOST_GROUP0_MASK); 2315 hif_read32_mb(sc, sc->mem + (SOC_CORE_BASE_ADDRESS | 2316 PCIE_INTR_ENABLE_ADDRESS)); 2317 hif_write32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS + 2318 PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_RESET); 2319 2320 if ((target_type == TARGET_TYPE_IPQ4019) || 2321 (target_type == TARGET_TYPE_AR900B) || 2322 (target_type == TARGET_TYPE_QCA9984) || 2323 (target_type == TARGET_TYPE_AR9888) || 2324 (target_type == TARGET_TYPE_QCA9888) || 2325 (target_type == TARGET_TYPE_AR6320V1) || 2326 (target_type == TARGET_TYPE_AR6320V2) || 2327 (target_type == TARGET_TYPE_AR6320V3)) { 2328 hif_write32_mb(scn, scn->mem + PCIE_LOCAL_BASE_ADDRESS + 2329 PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_V_MASK); 2330 } 2331 end: 2332 QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_ERROR, 2333 "%s: X, ret = %d", __func__, ret); 2334 return ret; 2335 } 2336 2337 static int hif_ce_srng_msi_free_irq(struct hif_softc *scn) 2338 { 2339 int ret; 2340 int ce_id, irq; 2341 uint32_t msi_data_start; 2342 uint32_t msi_data_count; 2343 uint32_t msi_irq_start; 2344 struct HIF_CE_state *ce_sc = HIF_GET_CE_STATE(scn); 2345 2346 ret = pld_get_user_msi_assignment(scn->qdf_dev->dev, "CE", 2347 &msi_data_count, &msi_data_start, 2348 &msi_irq_start); 2349 if (ret) 2350 return ret; 2351 2352 /* needs to match the ce_id -> irq data mapping 2353 * used in the srng parameter configuration 2354 */ 2355 for (ce_id = 0; ce_id < scn->ce_count; ce_id++) { 2356 unsigned int msi_data; 2357 2358 if (!ce_sc->tasklets[ce_id].inited) 2359 continue; 2360 2361 msi_data = (ce_id % msi_data_count) + msi_irq_start; 2362 irq = pld_get_msi_irq(scn->qdf_dev->dev, msi_data); 2363 2364 hif_debug("%s: (ce_id %d, msi_data %d, irq %d)", __func__, 2365 ce_id, msi_data, irq); 2366 2367 free_irq(irq, &ce_sc->tasklets[ce_id]); 2368 } 2369 2370 return ret; 2371 } 2372 2373 static void hif_pci_deconfigure_grp_irq(struct hif_softc *scn) 2374 { 2375 int i, j, irq; 2376 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 2377 struct hif_exec_context *hif_ext_group; 2378 2379 for (i = 0; i < hif_state->hif_num_extgroup; i++) { 2380 hif_ext_group = hif_state->hif_ext_group[i]; 2381 if (hif_ext_group->irq_requested) { 2382 hif_ext_group->irq_requested = false; 2383 for (j = 0; j < hif_ext_group->numirq; j++) { 2384 irq = hif_ext_group->os_irq[j]; 2385 free_irq(irq, hif_ext_group); 2386 } 2387 hif_ext_group->numirq = 0; 2388 } 2389 } 2390 } 2391 2392 /** 2393 * hif_nointrs(): disable IRQ 2394 * 2395 * This function stops interrupt(s) 2396 * 2397 * @scn: struct hif_softc 2398 * 2399 * Return: none 2400 */ 2401 void hif_pci_nointrs(struct hif_softc *scn) 2402 { 2403 int i, ret; 2404 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn); 2405 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 2406 2407 ce_unregister_irq(hif_state, CE_ALL_BITMAP); 2408 2409 if (scn->request_irq_done == false) 2410 return; 2411 2412 hif_pci_deconfigure_grp_irq(scn); 2413 2414 ret = hif_ce_srng_msi_free_irq(scn); 2415 if (ret != -EINVAL) { 2416 /* ce irqs freed in hif_ce_srng_msi_free_irq */ 2417 2418 if (scn->wake_irq) 2419 free_irq(scn->wake_irq, scn); 2420 scn->wake_irq = 0; 2421 } else if (sc->num_msi_intrs > 0) { 2422 /* MSI interrupt(s) */ 2423 for (i = 0; i < sc->num_msi_intrs; i++) 2424 free_irq(sc->irq + i, sc); 2425 sc->num_msi_intrs = 0; 2426 } else { 2427 /* Legacy PCI line interrupt 2428 * Use sc->irq instead of sc->pdev-irq 2429 * platform_device pdev doesn't have an irq field 2430 */ 2431 free_irq(sc->irq, sc); 2432 } 2433 scn->request_irq_done = false; 2434 } 2435 2436 /** 2437 * hif_disable_bus(): hif_disable_bus 2438 * 2439 * This function disables the bus 2440 * 2441 * @bdev: bus dev 2442 * 2443 * Return: none 2444 */ 2445 void hif_pci_disable_bus(struct hif_softc *scn) 2446 { 2447 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn); 2448 struct pci_dev *pdev; 2449 void __iomem *mem; 2450 struct hif_target_info *tgt_info = &scn->target_info; 2451 2452 /* Attach did not succeed, all resources have been 2453 * freed in error handler 2454 */ 2455 if (!sc) 2456 return; 2457 2458 pdev = sc->pdev; 2459 if (ADRASTEA_BU) { 2460 hif_vote_link_down(GET_HIF_OPAQUE_HDL(scn)); 2461 2462 hif_write32_mb(sc, sc->mem + PCIE_INTR_ENABLE_ADDRESS, 0); 2463 hif_write32_mb(sc, sc->mem + PCIE_INTR_CLR_ADDRESS, 2464 HOST_GROUP0_MASK); 2465 } 2466 2467 #if defined(CPU_WARM_RESET_WAR) 2468 /* Currently CPU warm reset sequence is tested only for AR9888_REV2 2469 * Need to enable for AR9888_REV1 once CPU warm reset sequence is 2470 * verified for AR9888_REV1 2471 */ 2472 if ((tgt_info->target_version == AR9888_REV2_VERSION) || 2473 (tgt_info->target_version == AR9887_REV1_VERSION)) 2474 hif_pci_device_warm_reset(sc); 2475 else 2476 hif_pci_device_reset(sc); 2477 #else 2478 hif_pci_device_reset(sc); 2479 #endif 2480 mem = (void __iomem *)sc->mem; 2481 if (mem) { 2482 hif_dump_pipe_debug_count(scn); 2483 if (scn->athdiag_procfs_inited) { 2484 athdiag_procfs_remove(); 2485 scn->athdiag_procfs_inited = false; 2486 } 2487 sc->hif_pci_deinit(sc); 2488 scn->mem = NULL; 2489 } 2490 HIF_INFO("%s: X", __func__); 2491 } 2492 2493 #define OL_ATH_PCI_PM_CONTROL 0x44 2494 2495 #ifdef FEATURE_RUNTIME_PM 2496 /** 2497 * hif_runtime_prevent_linkdown() - prevent or allow a runtime pm from occurring 2498 * @scn: hif context 2499 * @flag: prevent linkdown if true otherwise allow 2500 * 2501 * this api should only be called as part of bus prevent linkdown 2502 */ 2503 static void hif_runtime_prevent_linkdown(struct hif_softc *scn, bool flag) 2504 { 2505 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn); 2506 2507 if (flag) 2508 qdf_runtime_pm_prevent_suspend(&sc->prevent_linkdown_lock); 2509 else 2510 qdf_runtime_pm_allow_suspend(&sc->prevent_linkdown_lock); 2511 } 2512 #else 2513 static void hif_runtime_prevent_linkdown(struct hif_softc *scn, bool flag) 2514 { 2515 } 2516 #endif 2517 2518 #if defined(CONFIG_PCI_MSM) 2519 /** 2520 * hif_bus_prevent_linkdown(): allow or permit linkdown 2521 * @flag: true prevents linkdown, false allows 2522 * 2523 * Calls into the platform driver to vote against taking down the 2524 * pcie link. 2525 * 2526 * Return: n/a 2527 */ 2528 void hif_pci_prevent_linkdown(struct hif_softc *scn, bool flag) 2529 { 2530 int errno; 2531 2532 HIF_INFO("wlan: %s pcie power collapse", flag ? "disable" : "enable"); 2533 hif_runtime_prevent_linkdown(scn, flag); 2534 2535 errno = pld_wlan_pm_control(scn->qdf_dev->dev, flag); 2536 if (errno) 2537 HIF_ERROR("%s: Failed pld_wlan_pm_control; errno %d", 2538 __func__, errno); 2539 } 2540 #else 2541 void hif_pci_prevent_linkdown(struct hif_softc *scn, bool flag) 2542 { 2543 HIF_INFO("wlan: %s pcie power collapse", (flag ? "disable" : "enable")); 2544 hif_runtime_prevent_linkdown(scn, flag); 2545 } 2546 #endif 2547 2548 /** 2549 * hif_pci_bus_suspend(): prepare hif for suspend 2550 * 2551 * Return: Errno 2552 */ 2553 int hif_pci_bus_suspend(struct hif_softc *scn) 2554 { 2555 hif_apps_irqs_disable(GET_HIF_OPAQUE_HDL(scn)); 2556 2557 if (hif_drain_tasklets(scn)) { 2558 hif_apps_irqs_enable(GET_HIF_OPAQUE_HDL(scn)); 2559 return -EBUSY; 2560 } 2561 2562 /* Stop the HIF Sleep Timer */ 2563 hif_cancel_deferred_target_sleep(scn); 2564 2565 return 0; 2566 } 2567 2568 /** 2569 * __hif_check_link_status() - API to check if PCIe link is active/not 2570 * @scn: HIF Context 2571 * 2572 * API reads the PCIe config space to verify if PCIe link training is 2573 * successful or not. 2574 * 2575 * Return: Success/Failure 2576 */ 2577 static int __hif_check_link_status(struct hif_softc *scn) 2578 { 2579 uint16_t dev_id = 0; 2580 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn); 2581 struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn); 2582 2583 if (!sc) { 2584 HIF_ERROR("%s: HIF Bus Context is Invalid", __func__); 2585 return -EINVAL; 2586 } 2587 2588 pci_read_config_word(sc->pdev, PCI_DEVICE_ID, &dev_id); 2589 2590 if (dev_id == sc->devid) 2591 return 0; 2592 2593 HIF_ERROR("%s: Invalid PCIe Config Space; PCIe link down dev_id:0x%04x", 2594 __func__, dev_id); 2595 2596 scn->recovery = true; 2597 2598 if (cbk && cbk->set_recovery_in_progress) 2599 cbk->set_recovery_in_progress(cbk->context, true); 2600 else 2601 HIF_ERROR("%s: Driver Global Recovery is not set", __func__); 2602 2603 pld_is_pci_link_down(sc->dev); 2604 return -EACCES; 2605 } 2606 2607 /** 2608 * hif_pci_bus_resume(): prepare hif for resume 2609 * 2610 * Return: Errno 2611 */ 2612 int hif_pci_bus_resume(struct hif_softc *scn) 2613 { 2614 int errno; 2615 2616 errno = __hif_check_link_status(scn); 2617 if (errno) 2618 return errno; 2619 2620 hif_apps_irqs_enable(GET_HIF_OPAQUE_HDL(scn)); 2621 2622 return 0; 2623 } 2624 2625 /** 2626 * hif_pci_bus_suspend_noirq() - ensure there are no pending transactions 2627 * @scn: hif context 2628 * 2629 * Ensure that if we received the wakeup message before the irq 2630 * was disabled that the message is pocessed before suspending. 2631 * 2632 * Return: -EBUSY if we fail to flush the tasklets. 2633 */ 2634 int hif_pci_bus_suspend_noirq(struct hif_softc *scn) 2635 { 2636 if (hif_can_suspend_link(GET_HIF_OPAQUE_HDL(scn))) 2637 qdf_atomic_set(&scn->link_suspended, 1); 2638 2639 hif_apps_wake_irq_enable(GET_HIF_OPAQUE_HDL(scn)); 2640 2641 return 0; 2642 } 2643 2644 /** 2645 * hif_pci_bus_resume_noirq() - ensure there are no pending transactions 2646 * @scn: hif context 2647 * 2648 * Ensure that if we received the wakeup message before the irq 2649 * was disabled that the message is pocessed before suspending. 2650 * 2651 * Return: -EBUSY if we fail to flush the tasklets. 2652 */ 2653 int hif_pci_bus_resume_noirq(struct hif_softc *scn) 2654 { 2655 hif_apps_wake_irq_disable(GET_HIF_OPAQUE_HDL(scn)); 2656 2657 if (hif_can_suspend_link(GET_HIF_OPAQUE_HDL(scn))) 2658 qdf_atomic_set(&scn->link_suspended, 0); 2659 2660 return 0; 2661 } 2662 2663 #ifdef FEATURE_RUNTIME_PM 2664 /** 2665 * __hif_runtime_pm_set_state(): utility function 2666 * @state: state to set 2667 * 2668 * indexes into the runtime pm state and sets it. 2669 */ 2670 static void __hif_runtime_pm_set_state(struct hif_softc *scn, 2671 enum hif_pm_runtime_state state) 2672 { 2673 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn); 2674 2675 if (!sc) { 2676 HIF_ERROR("%s: HIF_CTX not initialized", 2677 __func__); 2678 return; 2679 } 2680 2681 qdf_atomic_set(&sc->pm_state, state); 2682 } 2683 2684 /** 2685 * hif_runtime_pm_set_state_on(): adjust runtime pm state 2686 * 2687 * Notify hif that a the runtime pm state should be on 2688 */ 2689 static void hif_runtime_pm_set_state_on(struct hif_softc *scn) 2690 { 2691 __hif_runtime_pm_set_state(scn, HIF_PM_RUNTIME_STATE_ON); 2692 } 2693 2694 /** 2695 * hif_runtime_pm_set_state_resuming(): adjust runtime pm state 2696 * 2697 * Notify hif that a runtime pm resuming has started 2698 */ 2699 static void hif_runtime_pm_set_state_resuming(struct hif_softc *scn) 2700 { 2701 __hif_runtime_pm_set_state(scn, HIF_PM_RUNTIME_STATE_RESUMING); 2702 } 2703 2704 /** 2705 * hif_runtime_pm_set_state_suspending(): adjust runtime pm state 2706 * 2707 * Notify hif that a runtime pm suspend has started 2708 */ 2709 static void hif_runtime_pm_set_state_suspending(struct hif_softc *scn) 2710 { 2711 __hif_runtime_pm_set_state(scn, HIF_PM_RUNTIME_STATE_SUSPENDING); 2712 } 2713 2714 /** 2715 * hif_runtime_pm_set_state_suspended(): adjust runtime pm state 2716 * 2717 * Notify hif that a runtime suspend attempt has been completed successfully 2718 */ 2719 static void hif_runtime_pm_set_state_suspended(struct hif_softc *scn) 2720 { 2721 __hif_runtime_pm_set_state(scn, HIF_PM_RUNTIME_STATE_SUSPENDED); 2722 } 2723 2724 /** 2725 * hif_log_runtime_suspend_success() - log a successful runtime suspend 2726 */ 2727 static void hif_log_runtime_suspend_success(struct hif_softc *hif_ctx) 2728 { 2729 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx); 2730 2731 if (!sc) 2732 return; 2733 2734 sc->pm_stats.suspended++; 2735 sc->pm_stats.suspend_jiffies = jiffies; 2736 } 2737 2738 /** 2739 * hif_log_runtime_suspend_failure() - log a failed runtime suspend 2740 * 2741 * log a failed runtime suspend 2742 * mark last busy to prevent immediate runtime suspend 2743 */ 2744 static void hif_log_runtime_suspend_failure(void *hif_ctx) 2745 { 2746 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx); 2747 2748 if (!sc) 2749 return; 2750 2751 sc->pm_stats.suspend_err++; 2752 } 2753 2754 /** 2755 * hif_log_runtime_resume_success() - log a successful runtime resume 2756 * 2757 * log a successful runtime resume 2758 * mark last busy to prevent immediate runtime suspend 2759 */ 2760 static void hif_log_runtime_resume_success(void *hif_ctx) 2761 { 2762 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx); 2763 2764 if (!sc) 2765 return; 2766 2767 sc->pm_stats.resumed++; 2768 } 2769 2770 /** 2771 * hif_process_runtime_suspend_failure() - bookkeeping of suspend failure 2772 * 2773 * Record the failure. 2774 * mark last busy to delay a retry. 2775 * adjust the runtime_pm state. 2776 */ 2777 void hif_process_runtime_suspend_failure(struct hif_opaque_softc *hif_ctx) 2778 { 2779 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 2780 2781 hif_log_runtime_suspend_failure(hif_ctx); 2782 hif_pm_runtime_mark_last_busy(hif_ctx); 2783 hif_runtime_pm_set_state_on(scn); 2784 } 2785 2786 /** 2787 * hif_pre_runtime_suspend() - bookkeeping before beginning runtime suspend 2788 * 2789 * Makes sure that the pci link will be taken down by the suspend opperation. 2790 * If the hif layer is configured to leave the bus on, runtime suspend will 2791 * not save any power. 2792 * 2793 * Set the runtime suspend state to in progress. 2794 * 2795 * return -EINVAL if the bus won't go down. otherwise return 0 2796 */ 2797 int hif_pre_runtime_suspend(struct hif_opaque_softc *hif_ctx) 2798 { 2799 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 2800 2801 if (!hif_can_suspend_link(hif_ctx)) { 2802 HIF_ERROR("Runtime PM not supported for link up suspend"); 2803 return -EINVAL; 2804 } 2805 2806 hif_runtime_pm_set_state_suspending(scn); 2807 return 0; 2808 } 2809 2810 /** 2811 * hif_process_runtime_suspend_success() - bookkeeping of suspend success 2812 * 2813 * Record the success. 2814 * adjust the runtime_pm state 2815 */ 2816 void hif_process_runtime_suspend_success(struct hif_opaque_softc *hif_ctx) 2817 { 2818 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 2819 2820 hif_runtime_pm_set_state_suspended(scn); 2821 hif_log_runtime_suspend_success(scn); 2822 } 2823 2824 /** 2825 * hif_pre_runtime_resume() - bookkeeping before beginning runtime resume 2826 * 2827 * update the runtime pm state. 2828 */ 2829 void hif_pre_runtime_resume(struct hif_opaque_softc *hif_ctx) 2830 { 2831 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 2832 2833 hif_pm_runtime_set_monitor_wake_intr(hif_ctx, 0); 2834 hif_runtime_pm_set_state_resuming(scn); 2835 } 2836 2837 /** 2838 * hif_process_runtime_resume_success() - bookkeeping after a runtime resume 2839 * 2840 * record the success. 2841 * adjust the runtime_pm state 2842 */ 2843 void hif_process_runtime_resume_success(struct hif_opaque_softc *hif_ctx) 2844 { 2845 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 2846 2847 hif_log_runtime_resume_success(hif_ctx); 2848 hif_pm_runtime_mark_last_busy(hif_ctx); 2849 hif_runtime_pm_set_state_on(scn); 2850 } 2851 2852 /** 2853 * hif_runtime_suspend() - do the bus suspend part of a runtime suspend 2854 * 2855 * Return: 0 for success and non-zero error code for failure 2856 */ 2857 int hif_runtime_suspend(struct hif_opaque_softc *hif_ctx) 2858 { 2859 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx); 2860 int errno; 2861 2862 errno = hif_bus_suspend(hif_ctx); 2863 if (errno) { 2864 HIF_ERROR("%s: failed bus suspend: %d", __func__, errno); 2865 return errno; 2866 } 2867 2868 hif_pm_runtime_set_monitor_wake_intr(hif_ctx, 1); 2869 2870 errno = hif_bus_suspend_noirq(hif_ctx); 2871 if (errno) { 2872 HIF_ERROR("%s: failed bus suspend noirq: %d", __func__, errno); 2873 hif_pm_runtime_set_monitor_wake_intr(hif_ctx, 0); 2874 goto bus_resume; 2875 } 2876 2877 qdf_atomic_set(&sc->pm_dp_rx_busy, 0); 2878 2879 return 0; 2880 2881 bus_resume: 2882 QDF_BUG(!hif_bus_resume(hif_ctx)); 2883 2884 return errno; 2885 } 2886 2887 /** 2888 * hif_fastpath_resume() - resume fastpath for runtimepm 2889 * 2890 * ensure that the fastpath write index register is up to date 2891 * since runtime pm may cause ce_send_fast to skip the register 2892 * write. 2893 * 2894 * fastpath only applicable to legacy copy engine 2895 */ 2896 void hif_fastpath_resume(struct hif_opaque_softc *hif_ctx) 2897 { 2898 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 2899 struct CE_state *ce_state; 2900 2901 if (!scn) 2902 return; 2903 2904 if (scn->fastpath_mode_on) { 2905 if (Q_TARGET_ACCESS_BEGIN(scn) < 0) 2906 return; 2907 2908 ce_state = scn->ce_id_to_state[CE_HTT_H2T_MSG]; 2909 qdf_spin_lock_bh(&ce_state->ce_index_lock); 2910 2911 /*war_ce_src_ring_write_idx_set */ 2912 CE_SRC_RING_WRITE_IDX_SET(scn, ce_state->ctrl_addr, 2913 ce_state->src_ring->write_index); 2914 qdf_spin_unlock_bh(&ce_state->ce_index_lock); 2915 Q_TARGET_ACCESS_END(scn); 2916 } 2917 } 2918 2919 /** 2920 * hif_runtime_resume() - do the bus resume part of a runtime resume 2921 * 2922 * Return: 0 for success and non-zero error code for failure 2923 */ 2924 int hif_runtime_resume(struct hif_opaque_softc *hif_ctx) 2925 { 2926 QDF_BUG(!hif_bus_resume_noirq(hif_ctx)); 2927 QDF_BUG(!hif_bus_resume(hif_ctx)); 2928 return 0; 2929 } 2930 #endif /* #ifdef FEATURE_RUNTIME_PM */ 2931 2932 #if CONFIG_PCIE_64BIT_MSI 2933 static void hif_free_msi_ctx(struct hif_softc *scn) 2934 { 2935 struct hif_pci_softc *sc = scn->hif_sc; 2936 struct hif_msi_info *info = &sc->msi_info; 2937 struct device *dev = scn->qdf_dev->dev; 2938 2939 OS_FREE_CONSISTENT(dev, 4, info->magic, info->magic_dma, 2940 OS_GET_DMA_MEM_CONTEXT(scn, dmacontext)); 2941 info->magic = NULL; 2942 info->magic_dma = 0; 2943 } 2944 #else 2945 static void hif_free_msi_ctx(struct hif_softc *scn) 2946 { 2947 } 2948 #endif 2949 2950 void hif_pci_disable_isr(struct hif_softc *scn) 2951 { 2952 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn); 2953 2954 hif_exec_kill(&scn->osc); 2955 hif_nointrs(scn); 2956 hif_free_msi_ctx(scn); 2957 /* Cancel the pending tasklet */ 2958 ce_tasklet_kill(scn); 2959 tasklet_kill(&sc->intr_tq); 2960 qdf_atomic_set(&scn->active_tasklet_cnt, 0); 2961 qdf_atomic_set(&scn->active_grp_tasklet_cnt, 0); 2962 } 2963 2964 /* Function to reset SoC */ 2965 void hif_pci_reset_soc(struct hif_softc *hif_sc) 2966 { 2967 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_sc); 2968 struct hif_opaque_softc *ol_sc = GET_HIF_OPAQUE_HDL(hif_sc); 2969 struct hif_target_info *tgt_info = hif_get_target_info_handle(ol_sc); 2970 2971 #if defined(CPU_WARM_RESET_WAR) 2972 /* Currently CPU warm reset sequence is tested only for AR9888_REV2 2973 * Need to enable for AR9888_REV1 once CPU warm reset sequence is 2974 * verified for AR9888_REV1 2975 */ 2976 if (tgt_info->target_version == AR9888_REV2_VERSION) 2977 hif_pci_device_warm_reset(sc); 2978 else 2979 hif_pci_device_reset(sc); 2980 #else 2981 hif_pci_device_reset(sc); 2982 #endif 2983 } 2984 2985 #ifdef CONFIG_PCI_MSM 2986 static inline void hif_msm_pcie_debug_info(struct hif_pci_softc *sc) 2987 { 2988 msm_pcie_debug_info(sc->pdev, 13, 1, 0, 0, 0); 2989 msm_pcie_debug_info(sc->pdev, 13, 2, 0, 0, 0); 2990 } 2991 #else 2992 static inline void hif_msm_pcie_debug_info(struct hif_pci_softc *sc) {}; 2993 #endif 2994 2995 /** 2996 * hif_log_soc_wakeup_timeout() - API to log PCIe and SOC Info 2997 * @sc: HIF PCIe Context 2998 * 2999 * API to log PCIe Config space and SOC info when SOC wakeup timeout happens 3000 * 3001 * Return: Failure to caller 3002 */ 3003 static int hif_log_soc_wakeup_timeout(struct hif_pci_softc *sc) 3004 { 3005 uint16_t val = 0; 3006 uint32_t bar = 0; 3007 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(sc); 3008 struct hif_softc *scn = HIF_GET_SOFTC(sc); 3009 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(sc); 3010 struct hif_config_info *cfg = hif_get_ini_handle(hif_hdl); 3011 struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn); 3012 A_target_id_t pci_addr = scn->mem; 3013 3014 HIF_ERROR("%s: keep_awake_count = %d", 3015 __func__, hif_state->keep_awake_count); 3016 3017 pci_read_config_word(sc->pdev, PCI_VENDOR_ID, &val); 3018 3019 HIF_ERROR("%s: PCI Vendor ID = 0x%04x", __func__, val); 3020 3021 pci_read_config_word(sc->pdev, PCI_DEVICE_ID, &val); 3022 3023 HIF_ERROR("%s: PCI Device ID = 0x%04x", __func__, val); 3024 3025 pci_read_config_word(sc->pdev, PCI_COMMAND, &val); 3026 3027 HIF_ERROR("%s: PCI Command = 0x%04x", __func__, val); 3028 3029 pci_read_config_word(sc->pdev, PCI_STATUS, &val); 3030 3031 HIF_ERROR("%s: PCI Status = 0x%04x", __func__, val); 3032 3033 pci_read_config_dword(sc->pdev, PCI_BASE_ADDRESS_0, &bar); 3034 3035 HIF_ERROR("%s: PCI BAR 0 = 0x%08x", __func__, bar); 3036 3037 HIF_ERROR("%s: SOC_WAKE_ADDR 0%08x", __func__, 3038 hif_read32_mb(scn, pci_addr + PCIE_LOCAL_BASE_ADDRESS + 3039 PCIE_SOC_WAKE_ADDRESS)); 3040 3041 HIF_ERROR("%s: RTC_STATE_ADDR 0x%08x", __func__, 3042 hif_read32_mb(scn, pci_addr + PCIE_LOCAL_BASE_ADDRESS + 3043 RTC_STATE_ADDRESS)); 3044 3045 HIF_ERROR("%s:error, wakeup target", __func__); 3046 hif_msm_pcie_debug_info(sc); 3047 3048 if (!cfg->enable_self_recovery) 3049 QDF_BUG(0); 3050 3051 scn->recovery = true; 3052 3053 if (cbk->set_recovery_in_progress) 3054 cbk->set_recovery_in_progress(cbk->context, true); 3055 3056 pld_is_pci_link_down(sc->dev); 3057 return -EACCES; 3058 } 3059 3060 /* 3061 * For now, we use simple on-demand sleep/wake. 3062 * Some possible improvements: 3063 * -Use the Host-destined A_INUM_PCIE_AWAKE interrupt rather than spin/delay 3064 * (or perhaps spin/delay for a short while, then convert to sleep/interrupt) 3065 * Careful, though, these functions may be used by 3066 * interrupt handlers ("atomic") 3067 * -Don't use host_reg_table for this code; instead use values directly 3068 * -Use a separate timer to track activity and allow Target to sleep only 3069 * if it hasn't done anything for a while; may even want to delay some 3070 * processing for a short while in order to "batch" (e.g.) transmit 3071 * requests with completion processing into "windows of up time". Costs 3072 * some performance, but improves power utilization. 3073 * -On some platforms, it might be possible to eliminate explicit 3074 * sleep/wakeup. Instead, take a chance that each access works OK. If not, 3075 * recover from the failure by forcing the Target awake. 3076 * -Change keep_awake_count to an atomic_t in order to avoid spin lock 3077 * overhead in some cases. Perhaps this makes more sense when 3078 * CONFIG_ATH_PCIE_ACCESS_LIKELY is used and less sense when LIKELY is 3079 * disabled. 3080 * -It is possible to compile this code out and simply force the Target 3081 * to remain awake. That would yield optimal performance at the cost of 3082 * increased power. See CONFIG_ATH_PCIE_MAX_PERF. 3083 * 3084 * Note: parameter wait_for_it has meaning only when waking (when sleep_ok==0). 3085 */ 3086 /** 3087 * hif_target_sleep_state_adjust() - on-demand sleep/wake 3088 * @scn: hif_softc pointer. 3089 * @sleep_ok: bool 3090 * @wait_for_it: bool 3091 * 3092 * Output the pipe error counts of each pipe to log file 3093 * 3094 * Return: int 3095 */ 3096 int hif_pci_target_sleep_state_adjust(struct hif_softc *scn, 3097 bool sleep_ok, bool wait_for_it) 3098 { 3099 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 3100 A_target_id_t pci_addr = scn->mem; 3101 static int max_delay; 3102 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn); 3103 static int debug; 3104 if (scn->recovery) 3105 return -EACCES; 3106 3107 if (qdf_atomic_read(&scn->link_suspended)) { 3108 HIF_ERROR("%s:invalid access, PCIe link is down", __func__); 3109 debug = true; 3110 QDF_ASSERT(0); 3111 return -EACCES; 3112 } 3113 3114 if (debug) { 3115 wait_for_it = true; 3116 HIF_ERROR("%s: doing debug for invalid access, PCIe link is suspended", 3117 __func__); 3118 QDF_ASSERT(0); 3119 } 3120 3121 if (sleep_ok) { 3122 qdf_spin_lock_irqsave(&hif_state->keep_awake_lock); 3123 hif_state->keep_awake_count--; 3124 if (hif_state->keep_awake_count == 0) { 3125 /* Allow sleep */ 3126 hif_state->verified_awake = false; 3127 hif_state->sleep_ticks = qdf_system_ticks(); 3128 } 3129 if (hif_state->fake_sleep == false) { 3130 /* Set the Fake Sleep */ 3131 hif_state->fake_sleep = true; 3132 3133 /* Start the Sleep Timer */ 3134 qdf_timer_stop(&hif_state->sleep_timer); 3135 qdf_timer_start(&hif_state->sleep_timer, 3136 HIF_SLEEP_INACTIVITY_TIMER_PERIOD_MS); 3137 } 3138 qdf_spin_unlock_irqrestore(&hif_state->keep_awake_lock); 3139 } else { 3140 qdf_spin_lock_irqsave(&hif_state->keep_awake_lock); 3141 3142 if (hif_state->fake_sleep) { 3143 hif_state->verified_awake = true; 3144 } else { 3145 if (hif_state->keep_awake_count == 0) { 3146 /* Force AWAKE */ 3147 hif_write32_mb(sc, pci_addr + 3148 PCIE_LOCAL_BASE_ADDRESS + 3149 PCIE_SOC_WAKE_ADDRESS, 3150 PCIE_SOC_WAKE_V_MASK); 3151 } 3152 } 3153 hif_state->keep_awake_count++; 3154 qdf_spin_unlock_irqrestore(&hif_state->keep_awake_lock); 3155 3156 if (wait_for_it && !hif_state->verified_awake) { 3157 #define PCIE_SLEEP_ADJUST_TIMEOUT 8000 /* 8Ms */ 3158 int tot_delay = 0; 3159 int curr_delay = 5; 3160 3161 for (;; ) { 3162 if (hif_targ_is_awake(scn, pci_addr)) { 3163 hif_state->verified_awake = true; 3164 break; 3165 } 3166 if (!hif_pci_targ_is_present(scn, pci_addr)) 3167 break; 3168 if (tot_delay > PCIE_SLEEP_ADJUST_TIMEOUT) 3169 return hif_log_soc_wakeup_timeout(sc); 3170 3171 OS_DELAY(curr_delay); 3172 tot_delay += curr_delay; 3173 3174 if (curr_delay < 50) 3175 curr_delay += 5; 3176 } 3177 3178 /* 3179 * NB: If Target has to come out of Deep Sleep, 3180 * this may take a few Msecs. Typically, though 3181 * this delay should be <30us. 3182 */ 3183 if (tot_delay > max_delay) 3184 max_delay = tot_delay; 3185 } 3186 } 3187 3188 if (debug && hif_state->verified_awake) { 3189 debug = 0; 3190 HIF_ERROR("%s: INTR_ENABLE_REG = 0x%08x, INTR_CAUSE_REG = 0x%08x, CPU_INTR_REG = 0x%08x, INTR_CLR_REG = 0x%08x, CE_INTERRUPT_SUMMARY_REG = 0x%08x", 3191 __func__, 3192 hif_read32_mb(sc, sc->mem + SOC_CORE_BASE_ADDRESS + 3193 PCIE_INTR_ENABLE_ADDRESS), 3194 hif_read32_mb(sc, sc->mem + SOC_CORE_BASE_ADDRESS + 3195 PCIE_INTR_CAUSE_ADDRESS), 3196 hif_read32_mb(sc, sc->mem + SOC_CORE_BASE_ADDRESS + 3197 CPU_INTR_ADDRESS), 3198 hif_read32_mb(sc, sc->mem + SOC_CORE_BASE_ADDRESS + 3199 PCIE_INTR_CLR_ADDRESS), 3200 hif_read32_mb(sc, sc->mem + CE_WRAPPER_BASE_ADDRESS + 3201 CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS)); 3202 } 3203 3204 return 0; 3205 } 3206 3207 #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG 3208 uint32_t hif_target_read_checked(struct hif_softc *scn, uint32_t offset) 3209 { 3210 uint32_t value; 3211 void *addr; 3212 3213 addr = scn->mem + offset; 3214 value = hif_read32_mb(scn, addr); 3215 3216 { 3217 unsigned long irq_flags; 3218 int idx = pcie_access_log_seqnum % PCIE_ACCESS_LOG_NUM; 3219 3220 spin_lock_irqsave(&pcie_access_log_lock, irq_flags); 3221 pcie_access_log[idx].seqnum = pcie_access_log_seqnum; 3222 pcie_access_log[idx].is_write = false; 3223 pcie_access_log[idx].addr = addr; 3224 pcie_access_log[idx].value = value; 3225 pcie_access_log_seqnum++; 3226 spin_unlock_irqrestore(&pcie_access_log_lock, irq_flags); 3227 } 3228 3229 return value; 3230 } 3231 3232 void 3233 hif_target_write_checked(struct hif_softc *scn, uint32_t offset, uint32_t value) 3234 { 3235 void *addr; 3236 3237 addr = scn->mem + (offset); 3238 hif_write32_mb(scn, addr, value); 3239 3240 { 3241 unsigned long irq_flags; 3242 int idx = pcie_access_log_seqnum % PCIE_ACCESS_LOG_NUM; 3243 3244 spin_lock_irqsave(&pcie_access_log_lock, irq_flags); 3245 pcie_access_log[idx].seqnum = pcie_access_log_seqnum; 3246 pcie_access_log[idx].is_write = true; 3247 pcie_access_log[idx].addr = addr; 3248 pcie_access_log[idx].value = value; 3249 pcie_access_log_seqnum++; 3250 spin_unlock_irqrestore(&pcie_access_log_lock, irq_flags); 3251 } 3252 } 3253 3254 /** 3255 * hif_target_dump_access_log() - dump access log 3256 * 3257 * dump access log 3258 * 3259 * Return: n/a 3260 */ 3261 void hif_target_dump_access_log(void) 3262 { 3263 int idx, len, start_idx, cur_idx; 3264 unsigned long irq_flags; 3265 3266 spin_lock_irqsave(&pcie_access_log_lock, irq_flags); 3267 if (pcie_access_log_seqnum > PCIE_ACCESS_LOG_NUM) { 3268 len = PCIE_ACCESS_LOG_NUM; 3269 start_idx = pcie_access_log_seqnum % PCIE_ACCESS_LOG_NUM; 3270 } else { 3271 len = pcie_access_log_seqnum; 3272 start_idx = 0; 3273 } 3274 3275 for (idx = 0; idx < len; idx++) { 3276 cur_idx = (start_idx + idx) % PCIE_ACCESS_LOG_NUM; 3277 HIF_ERROR("%s: idx:%d sn:%u wr:%d addr:%pK val:%u.", 3278 __func__, idx, 3279 pcie_access_log[cur_idx].seqnum, 3280 pcie_access_log[cur_idx].is_write, 3281 pcie_access_log[cur_idx].addr, 3282 pcie_access_log[cur_idx].value); 3283 } 3284 3285 pcie_access_log_seqnum = 0; 3286 spin_unlock_irqrestore(&pcie_access_log_lock, irq_flags); 3287 } 3288 #endif 3289 3290 #ifndef HIF_AHB 3291 int hif_ahb_configure_legacy_irq(struct hif_pci_softc *sc) 3292 { 3293 QDF_BUG(0); 3294 return -EINVAL; 3295 } 3296 3297 int hif_ahb_configure_irq(struct hif_pci_softc *sc) 3298 { 3299 QDF_BUG(0); 3300 return -EINVAL; 3301 } 3302 #endif 3303 3304 static irqreturn_t hif_ce_interrupt_handler(int irq, void *context) 3305 { 3306 struct ce_tasklet_entry *tasklet_entry = context; 3307 return ce_dispatch_interrupt(tasklet_entry->ce_id, tasklet_entry); 3308 } 3309 extern const char *ce_name[]; 3310 3311 static int hif_ce_msi_map_ce_to_irq(struct hif_softc *scn, int ce_id) 3312 { 3313 struct hif_pci_softc *pci_scn = HIF_GET_PCI_SOFTC(scn); 3314 3315 return pci_scn->ce_msi_irq_num[ce_id]; 3316 } 3317 3318 /* hif_srng_msi_irq_disable() - disable the irq for msi 3319 * @hif_sc: hif context 3320 * @ce_id: which ce to disable copy complete interrupts for 3321 * 3322 * since MSI interrupts are not level based, the system can function 3323 * without disabling these interrupts. Interrupt mitigation can be 3324 * added here for better system performance. 3325 */ 3326 static void hif_ce_srng_msi_irq_disable(struct hif_softc *hif_sc, int ce_id) 3327 { 3328 disable_irq_nosync(hif_ce_msi_map_ce_to_irq(hif_sc, ce_id)); 3329 } 3330 3331 static void hif_ce_srng_msi_irq_enable(struct hif_softc *hif_sc, int ce_id) 3332 { 3333 enable_irq(hif_ce_msi_map_ce_to_irq(hif_sc, ce_id)); 3334 } 3335 3336 static void hif_ce_legacy_msi_irq_disable(struct hif_softc *hif_sc, int ce_id) 3337 { 3338 disable_irq_nosync(hif_ce_msi_map_ce_to_irq(hif_sc, ce_id)); 3339 } 3340 3341 static void hif_ce_legacy_msi_irq_enable(struct hif_softc *hif_sc, int ce_id) 3342 { 3343 enable_irq(hif_ce_msi_map_ce_to_irq(hif_sc, ce_id)); 3344 } 3345 3346 static int hif_ce_msi_configure_irq(struct hif_softc *scn) 3347 { 3348 int ret; 3349 int ce_id, irq; 3350 uint32_t msi_data_start; 3351 uint32_t msi_data_count; 3352 uint32_t msi_irq_start; 3353 struct HIF_CE_state *ce_sc = HIF_GET_CE_STATE(scn); 3354 struct hif_pci_softc *pci_sc = HIF_GET_PCI_SOFTC(scn); 3355 3356 /* do wake irq assignment */ 3357 ret = pld_get_user_msi_assignment(scn->qdf_dev->dev, "WAKE", 3358 &msi_data_count, &msi_data_start, 3359 &msi_irq_start); 3360 if (ret) 3361 return ret; 3362 3363 scn->wake_irq = pld_get_msi_irq(scn->qdf_dev->dev, msi_irq_start); 3364 ret = request_irq(scn->wake_irq, hif_wake_interrupt_handler, 3365 IRQF_NO_SUSPEND, "wlan_wake_irq", scn); 3366 if (ret) 3367 return ret; 3368 3369 /* do ce irq assignments */ 3370 ret = pld_get_user_msi_assignment(scn->qdf_dev->dev, "CE", 3371 &msi_data_count, &msi_data_start, 3372 &msi_irq_start); 3373 if (ret) 3374 goto free_wake_irq; 3375 3376 if (ce_srng_based(scn)) { 3377 scn->bus_ops.hif_irq_disable = &hif_ce_srng_msi_irq_disable; 3378 scn->bus_ops.hif_irq_enable = &hif_ce_srng_msi_irq_enable; 3379 } else { 3380 scn->bus_ops.hif_irq_disable = &hif_ce_legacy_msi_irq_disable; 3381 scn->bus_ops.hif_irq_enable = &hif_ce_legacy_msi_irq_enable; 3382 } 3383 3384 scn->bus_ops.hif_map_ce_to_irq = &hif_ce_msi_map_ce_to_irq; 3385 3386 /* needs to match the ce_id -> irq data mapping 3387 * used in the srng parameter configuration 3388 */ 3389 for (ce_id = 0; ce_id < scn->ce_count; ce_id++) { 3390 unsigned int msi_data = (ce_id % msi_data_count) + 3391 msi_irq_start; 3392 irq = pld_get_msi_irq(scn->qdf_dev->dev, msi_data); 3393 HIF_DBG("%s: (ce_id %d, msi_data %d, irq %d tasklet %pK)", 3394 __func__, ce_id, msi_data, irq, 3395 &ce_sc->tasklets[ce_id]); 3396 3397 /* implies the ce is also initialized */ 3398 if (!ce_sc->tasklets[ce_id].inited) 3399 continue; 3400 3401 pci_sc->ce_msi_irq_num[ce_id] = irq; 3402 ret = request_irq(irq, hif_ce_interrupt_handler, 3403 IRQF_SHARED, 3404 ce_name[ce_id], 3405 &ce_sc->tasklets[ce_id]); 3406 if (ret) 3407 goto free_irq; 3408 } 3409 3410 return ret; 3411 3412 free_irq: 3413 /* the request_irq for the last ce_id failed so skip it. */ 3414 while (ce_id > 0 && ce_id < scn->ce_count) { 3415 unsigned int msi_data; 3416 3417 ce_id--; 3418 msi_data = (ce_id % msi_data_count) + msi_irq_start; 3419 irq = pld_get_msi_irq(scn->qdf_dev->dev, msi_data); 3420 free_irq(irq, &ce_sc->tasklets[ce_id]); 3421 } 3422 3423 free_wake_irq: 3424 free_irq(scn->wake_irq, scn->qdf_dev->dev); 3425 scn->wake_irq = 0; 3426 3427 return ret; 3428 } 3429 3430 static void hif_exec_grp_irq_disable(struct hif_exec_context *hif_ext_group) 3431 { 3432 int i; 3433 3434 for (i = 0; i < hif_ext_group->numirq; i++) 3435 disable_irq_nosync(hif_ext_group->os_irq[i]); 3436 } 3437 3438 static void hif_exec_grp_irq_enable(struct hif_exec_context *hif_ext_group) 3439 { 3440 int i; 3441 3442 for (i = 0; i < hif_ext_group->numirq; i++) 3443 enable_irq(hif_ext_group->os_irq[i]); 3444 } 3445 3446 /** 3447 * hif_pci_get_irq_name() - get irqname 3448 * This function gives irqnumber to irqname 3449 * mapping. 3450 * 3451 * @irq_no: irq number 3452 * 3453 * Return: irq name 3454 */ 3455 const char *hif_pci_get_irq_name(int irq_no) 3456 { 3457 return "pci-dummy"; 3458 } 3459 3460 int hif_pci_configure_grp_irq(struct hif_softc *scn, 3461 struct hif_exec_context *hif_ext_group) 3462 { 3463 int ret = 0; 3464 int irq = 0; 3465 int j; 3466 3467 hif_ext_group->irq_enable = &hif_exec_grp_irq_enable; 3468 hif_ext_group->irq_disable = &hif_exec_grp_irq_disable; 3469 hif_ext_group->irq_name = &hif_pci_get_irq_name; 3470 hif_ext_group->work_complete = &hif_dummy_grp_done; 3471 3472 for (j = 0; j < hif_ext_group->numirq; j++) { 3473 irq = hif_ext_group->irq[j]; 3474 3475 hif_info("request_irq = %d for grp %d", 3476 irq, hif_ext_group->grp_id); 3477 ret = request_irq(irq, 3478 hif_ext_group_interrupt_handler, 3479 IRQF_SHARED | IRQF_NO_SUSPEND, 3480 "wlan_EXT_GRP", 3481 hif_ext_group); 3482 if (ret) { 3483 HIF_ERROR("%s: request_irq failed ret = %d", 3484 __func__, ret); 3485 return -EFAULT; 3486 } 3487 hif_ext_group->os_irq[j] = irq; 3488 } 3489 hif_ext_group->irq_requested = true; 3490 return 0; 3491 } 3492 3493 /** 3494 * hif_configure_irq() - configure interrupt 3495 * 3496 * This function configures interrupt(s) 3497 * 3498 * @sc: PCIe control struct 3499 * @hif_hdl: struct HIF_CE_state 3500 * 3501 * Return: 0 - for success 3502 */ 3503 int hif_configure_irq(struct hif_softc *scn) 3504 { 3505 int ret = 0; 3506 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn); 3507 3508 HIF_TRACE("%s: E", __func__); 3509 3510 if (hif_is_polled_mode_enabled(GET_HIF_OPAQUE_HDL(scn))) { 3511 scn->request_irq_done = false; 3512 return 0; 3513 } 3514 3515 hif_init_reschedule_tasklet_work(sc); 3516 3517 ret = hif_ce_msi_configure_irq(scn); 3518 if (ret == 0) { 3519 goto end; 3520 } 3521 3522 switch (scn->target_info.target_type) { 3523 case TARGET_TYPE_IPQ4019: 3524 ret = hif_ahb_configure_legacy_irq(sc); 3525 break; 3526 case TARGET_TYPE_QCA8074: 3527 case TARGET_TYPE_QCA8074V2: 3528 case TARGET_TYPE_QCA6018: 3529 ret = hif_ahb_configure_irq(sc); 3530 break; 3531 default: 3532 ret = hif_pci_configure_legacy_irq(sc); 3533 break; 3534 } 3535 if (ret < 0) { 3536 HIF_ERROR("%s: hif_pci_configure_legacy_irq error = %d", 3537 __func__, ret); 3538 return ret; 3539 } 3540 end: 3541 scn->request_irq_done = true; 3542 return 0; 3543 } 3544 3545 /** 3546 * hif_trigger_timer_irq() : Triggers interrupt on LF_Timer 0 3547 * @scn: hif control structure 3548 * 3549 * Sets IRQ bit in LF Timer Status Address to awake peregrine/swift 3550 * stuck at a polling loop in pcie_address_config in FW 3551 * 3552 * Return: none 3553 */ 3554 static void hif_trigger_timer_irq(struct hif_softc *scn) 3555 { 3556 int tmp; 3557 /* Trigger IRQ on Peregrine/Swift by setting 3558 * IRQ Bit of LF_TIMER 0 3559 */ 3560 tmp = hif_read32_mb(scn, scn->mem + (RTC_SOC_BASE_ADDRESS + 3561 SOC_LF_TIMER_STATUS0_ADDRESS)); 3562 /* Set Raw IRQ Bit */ 3563 tmp |= 1; 3564 /* SOC_LF_TIMER_STATUS0 */ 3565 hif_write32_mb(scn, scn->mem + (RTC_SOC_BASE_ADDRESS + 3566 SOC_LF_TIMER_STATUS0_ADDRESS), tmp); 3567 } 3568 3569 /** 3570 * hif_target_sync() : ensure the target is ready 3571 * @scn: hif control structure 3572 * 3573 * Informs fw that we plan to use legacy interupts so that 3574 * it can begin booting. Ensures that the fw finishes booting 3575 * before continuing. Should be called before trying to write 3576 * to the targets other registers for the first time. 3577 * 3578 * Return: none 3579 */ 3580 static void hif_target_sync(struct hif_softc *scn) 3581 { 3582 hif_write32_mb(scn, scn->mem + (SOC_CORE_BASE_ADDRESS | 3583 PCIE_INTR_ENABLE_ADDRESS), 3584 PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL); 3585 /* read to flush pcie write */ 3586 (void)hif_read32_mb(scn, scn->mem + (SOC_CORE_BASE_ADDRESS | 3587 PCIE_INTR_ENABLE_ADDRESS)); 3588 3589 hif_write32_mb(scn, scn->mem + PCIE_LOCAL_BASE_ADDRESS + 3590 PCIE_SOC_WAKE_ADDRESS, 3591 PCIE_SOC_WAKE_V_MASK); 3592 while (!hif_targ_is_awake(scn, scn->mem)) 3593 ; 3594 3595 if (HAS_FW_INDICATOR) { 3596 int wait_limit = 500; 3597 int fw_ind = 0; 3598 int retry_count = 0; 3599 uint32_t target_type = scn->target_info.target_type; 3600 fw_retry: 3601 HIF_TRACE("%s: Loop checking FW signal", __func__); 3602 while (1) { 3603 fw_ind = hif_read32_mb(scn, scn->mem + 3604 FW_INDICATOR_ADDRESS); 3605 if (fw_ind & FW_IND_INITIALIZED) 3606 break; 3607 if (wait_limit-- < 0) 3608 break; 3609 hif_write32_mb(scn, scn->mem + (SOC_CORE_BASE_ADDRESS | 3610 PCIE_INTR_ENABLE_ADDRESS), 3611 PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL); 3612 /* read to flush pcie write */ 3613 (void)hif_read32_mb(scn, scn->mem + 3614 (SOC_CORE_BASE_ADDRESS | PCIE_INTR_ENABLE_ADDRESS)); 3615 3616 qdf_mdelay(10); 3617 } 3618 if (wait_limit < 0) { 3619 if (target_type == TARGET_TYPE_AR9888 && 3620 retry_count++ < 2) { 3621 hif_trigger_timer_irq(scn); 3622 wait_limit = 500; 3623 goto fw_retry; 3624 } 3625 HIF_TRACE("%s: FW signal timed out", 3626 __func__); 3627 qdf_assert_always(0); 3628 } else { 3629 HIF_TRACE("%s: Got FW signal, retries = %x", 3630 __func__, 500-wait_limit); 3631 } 3632 } 3633 hif_write32_mb(scn, scn->mem + PCIE_LOCAL_BASE_ADDRESS + 3634 PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_RESET); 3635 } 3636 3637 static void hif_pci_get_soc_info_pld(struct hif_pci_softc *sc, 3638 struct device *dev) 3639 { 3640 struct pld_soc_info info; 3641 3642 pld_get_soc_info(dev, &info); 3643 sc->mem = info.v_addr; 3644 sc->ce_sc.ol_sc.mem = info.v_addr; 3645 sc->ce_sc.ol_sc.mem_pa = info.p_addr; 3646 } 3647 3648 static void hif_pci_get_soc_info_nopld(struct hif_pci_softc *sc, 3649 struct device *dev) 3650 {} 3651 3652 static bool hif_is_pld_based_target(struct hif_pci_softc *sc, 3653 int device_id) 3654 { 3655 if (!pld_have_platform_driver_support(sc->dev)) 3656 return false; 3657 3658 switch (device_id) { 3659 case QCA6290_DEVICE_ID: 3660 case QCN9000_DEVICE_ID: 3661 case QCA6290_EMULATION_DEVICE_ID: 3662 case QCA6390_DEVICE_ID: 3663 case QCA6490_DEVICE_ID: 3664 case AR6320_DEVICE_ID: 3665 case QCN7605_DEVICE_ID: 3666 return true; 3667 } 3668 return false; 3669 } 3670 3671 static void hif_pci_init_deinit_ops_attach(struct hif_pci_softc *sc, 3672 int device_id) 3673 { 3674 if (hif_is_pld_based_target(sc, device_id)) { 3675 sc->hif_enable_pci = hif_enable_pci_pld; 3676 sc->hif_pci_deinit = hif_pci_deinit_pld; 3677 sc->hif_pci_get_soc_info = hif_pci_get_soc_info_pld; 3678 } else { 3679 sc->hif_enable_pci = hif_enable_pci_nopld; 3680 sc->hif_pci_deinit = hif_pci_deinit_nopld; 3681 sc->hif_pci_get_soc_info = hif_pci_get_soc_info_nopld; 3682 } 3683 } 3684 3685 #ifdef HIF_REG_WINDOW_SUPPORT 3686 static void hif_pci_init_reg_windowing_support(struct hif_pci_softc *sc, 3687 u32 target_type) 3688 { 3689 switch (target_type) { 3690 case TARGET_TYPE_QCN7605: 3691 sc->use_register_windowing = true; 3692 qdf_spinlock_create(&sc->register_access_lock); 3693 sc->register_window = 0; 3694 break; 3695 default: 3696 sc->use_register_windowing = false; 3697 } 3698 } 3699 #else 3700 static void hif_pci_init_reg_windowing_support(struct hif_pci_softc *sc, 3701 u32 target_type) 3702 { 3703 sc->use_register_windowing = false; 3704 } 3705 #endif 3706 3707 /** 3708 * hif_enable_bus(): enable bus 3709 * 3710 * This function enables the bus 3711 * 3712 * @ol_sc: soft_sc struct 3713 * @dev: device pointer 3714 * @bdev: bus dev pointer 3715 * bid: bus id pointer 3716 * type: enum hif_enable_type such as HIF_ENABLE_TYPE_PROBE 3717 * Return: QDF_STATUS 3718 */ 3719 QDF_STATUS hif_pci_enable_bus(struct hif_softc *ol_sc, 3720 struct device *dev, void *bdev, 3721 const struct hif_bus_id *bid, 3722 enum hif_enable_type type) 3723 { 3724 int ret = 0; 3725 uint32_t hif_type, target_type; 3726 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(ol_sc); 3727 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(ol_sc); 3728 uint16_t revision_id = 0; 3729 int probe_again = 0; 3730 struct pci_dev *pdev = bdev; 3731 const struct pci_device_id *id = (const struct pci_device_id *)bid; 3732 struct hif_target_info *tgt_info; 3733 3734 if (!ol_sc) { 3735 HIF_ERROR("%s: hif_ctx is NULL", __func__); 3736 return QDF_STATUS_E_NOMEM; 3737 } 3738 3739 HIF_TRACE("%s: con_mode = 0x%x, device_id = 0x%x", 3740 __func__, hif_get_conparam(ol_sc), id->device); 3741 3742 sc->pdev = pdev; 3743 sc->dev = &pdev->dev; 3744 sc->devid = id->device; 3745 sc->cacheline_sz = dma_get_cache_alignment(); 3746 tgt_info = hif_get_target_info_handle(hif_hdl); 3747 hif_pci_init_deinit_ops_attach(sc, id->device); 3748 sc->hif_pci_get_soc_info(sc, dev); 3749 again: 3750 ret = sc->hif_enable_pci(sc, pdev, id); 3751 if (ret < 0) { 3752 HIF_ERROR("%s: ERROR - hif_enable_pci error = %d", 3753 __func__, ret); 3754 goto err_enable_pci; 3755 } 3756 HIF_TRACE("%s: hif_enable_pci done", __func__); 3757 3758 /* Temporary FIX: disable ASPM on peregrine. 3759 * Will be removed after the OTP is programmed 3760 */ 3761 hif_disable_power_gating(hif_hdl); 3762 3763 device_disable_async_suspend(&pdev->dev); 3764 pci_read_config_word(pdev, 0x08, &revision_id); 3765 3766 ret = hif_get_device_type(id->device, revision_id, 3767 &hif_type, &target_type); 3768 if (ret < 0) { 3769 HIF_ERROR("%s: invalid device id/revision_id", __func__); 3770 goto err_tgtstate; 3771 } 3772 HIF_TRACE("%s: hif_type = 0x%x, target_type = 0x%x", 3773 __func__, hif_type, target_type); 3774 3775 hif_register_tbl_attach(ol_sc, hif_type); 3776 hif_target_register_tbl_attach(ol_sc, target_type); 3777 3778 hif_pci_init_reg_windowing_support(sc, target_type); 3779 3780 tgt_info->target_type = target_type; 3781 3782 if (ce_srng_based(ol_sc)) { 3783 HIF_TRACE("%s:Skip tgt_wake up for srng devices\n", __func__); 3784 } else { 3785 ret = hif_pci_probe_tgt_wakeup(sc); 3786 if (ret < 0) { 3787 HIF_ERROR("%s: ERROR - hif_pci_prob_wakeup error = %d", 3788 __func__, ret); 3789 if (ret == -EAGAIN) 3790 probe_again++; 3791 goto err_tgtstate; 3792 } 3793 HIF_TRACE("%s: hif_pci_probe_tgt_wakeup done", __func__); 3794 } 3795 3796 if (!ol_sc->mem_pa) { 3797 HIF_ERROR("%s: ERROR - BAR0 uninitialized", __func__); 3798 ret = -EIO; 3799 goto err_tgtstate; 3800 } 3801 3802 if (!ce_srng_based(ol_sc)) { 3803 hif_target_sync(ol_sc); 3804 3805 if (ADRASTEA_BU) 3806 hif_vote_link_up(hif_hdl); 3807 } 3808 3809 return 0; 3810 3811 err_tgtstate: 3812 hif_disable_pci(sc); 3813 sc->pci_enabled = false; 3814 HIF_ERROR("%s: error, hif_disable_pci done", __func__); 3815 return QDF_STATUS_E_ABORTED; 3816 3817 err_enable_pci: 3818 if (probe_again && (probe_again <= ATH_PCI_PROBE_RETRY_MAX)) { 3819 int delay_time; 3820 3821 HIF_INFO("%s: pci reprobe", __func__); 3822 /* 10, 40, 90, 100, 100, ... */ 3823 delay_time = max(100, 10 * (probe_again * probe_again)); 3824 qdf_mdelay(delay_time); 3825 goto again; 3826 } 3827 return ret; 3828 } 3829 3830 /** 3831 * hif_pci_irq_enable() - ce_irq_enable 3832 * @scn: hif_softc 3833 * @ce_id: ce_id 3834 * 3835 * Return: void 3836 */ 3837 void hif_pci_irq_enable(struct hif_softc *scn, int ce_id) 3838 { 3839 uint32_t tmp = 1 << ce_id; 3840 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn); 3841 3842 qdf_spin_lock_irqsave(&sc->irq_lock); 3843 scn->ce_irq_summary &= ~tmp; 3844 if (scn->ce_irq_summary == 0) { 3845 /* Enable Legacy PCI line interrupts */ 3846 if (LEGACY_INTERRUPTS(sc) && 3847 (scn->target_status != TARGET_STATUS_RESET) && 3848 (!qdf_atomic_read(&scn->link_suspended))) { 3849 3850 hif_write32_mb(scn, scn->mem + 3851 (SOC_CORE_BASE_ADDRESS | 3852 PCIE_INTR_ENABLE_ADDRESS), 3853 HOST_GROUP0_MASK); 3854 3855 hif_read32_mb(scn, scn->mem + 3856 (SOC_CORE_BASE_ADDRESS | 3857 PCIE_INTR_ENABLE_ADDRESS)); 3858 } 3859 } 3860 if (scn->hif_init_done == true) 3861 Q_TARGET_ACCESS_END(scn); 3862 qdf_spin_unlock_irqrestore(&sc->irq_lock); 3863 3864 /* check for missed firmware crash */ 3865 hif_fw_interrupt_handler(0, scn); 3866 } 3867 3868 /** 3869 * hif_pci_irq_disable() - ce_irq_disable 3870 * @scn: hif_softc 3871 * @ce_id: ce_id 3872 * 3873 * only applicable to legacy copy engine... 3874 * 3875 * Return: void 3876 */ 3877 void hif_pci_irq_disable(struct hif_softc *scn, int ce_id) 3878 { 3879 /* For Rome only need to wake up target */ 3880 /* target access is maintained until interrupts are re-enabled */ 3881 Q_TARGET_ACCESS_BEGIN(scn); 3882 } 3883 3884 #ifdef FEATURE_RUNTIME_PM 3885 /** 3886 * hif_pm_runtime_get_sync() - do a get operation with sync resume 3887 * 3888 * A get operation will prevent a runtime suspend until a corresponding 3889 * put is done. Unlike hif_pm_runtime_get(), this API will do a sync 3890 * resume instead of requesting a resume if it is runtime PM suspended 3891 * so it can only be called in non-atomic context. 3892 * 3893 * @hif_ctx: pointer of HIF context 3894 * 3895 * Return: 0 if it is runtime PM resumed otherwise an error code. 3896 */ 3897 int hif_pm_runtime_get_sync(struct hif_opaque_softc *hif_ctx) 3898 { 3899 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx); 3900 int pm_state; 3901 int ret; 3902 3903 if (!sc) 3904 return -EINVAL; 3905 3906 pm_state = qdf_atomic_read(&sc->pm_state); 3907 if (pm_state == HIF_PM_RUNTIME_STATE_SUSPENDED || 3908 pm_state == HIF_PM_RUNTIME_STATE_SUSPENDING) 3909 hif_info_high("Runtime PM resume is requested by %ps", 3910 (void *)_RET_IP_); 3911 3912 sc->pm_stats.runtime_get++; 3913 ret = pm_runtime_get_sync(sc->dev); 3914 3915 /* Get can return 1 if the device is already active, just return 3916 * success in that case. 3917 */ 3918 if (ret > 0) 3919 ret = 0; 3920 3921 if (ret) { 3922 sc->pm_stats.runtime_get_err++; 3923 hif_err("Runtime PM Get Sync error in pm_state: %d, ret: %d", 3924 qdf_atomic_read(&sc->pm_state), ret); 3925 hif_pm_runtime_put(hif_ctx); 3926 } 3927 3928 return ret; 3929 } 3930 3931 /** 3932 * hif_pm_runtime_put_sync_suspend() - do a put operation with sync suspend 3933 * 3934 * This API will do a runtime put operation followed by a sync suspend if usage 3935 * count is 0 so it can only be called in non-atomic context. 3936 * 3937 * @hif_ctx: pointer of HIF context 3938 * 3939 * Return: 0 for success otherwise an error code 3940 */ 3941 int hif_pm_runtime_put_sync_suspend(struct hif_opaque_softc *hif_ctx) 3942 { 3943 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx); 3944 int usage_count, pm_state; 3945 char *err = NULL; 3946 3947 if (!sc) 3948 return -EINVAL; 3949 3950 usage_count = atomic_read(&sc->dev->power.usage_count); 3951 if (usage_count == 1) { 3952 pm_state = qdf_atomic_read(&sc->pm_state); 3953 if (pm_state == HIF_PM_RUNTIME_STATE_NONE) 3954 err = "Ignore unexpected Put as runtime PM is disabled"; 3955 } else if (usage_count == 0) { 3956 err = "Put without a Get Operation"; 3957 } 3958 3959 if (err) { 3960 hif_pci_runtime_pm_warn(sc, err); 3961 return -EINVAL; 3962 } 3963 3964 sc->pm_stats.runtime_put++; 3965 return pm_runtime_put_sync_suspend(sc->dev); 3966 } 3967 3968 int hif_pm_runtime_request_resume(struct hif_opaque_softc *hif_ctx) 3969 { 3970 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx); 3971 int pm_state; 3972 3973 if (!sc) 3974 return -EINVAL; 3975 3976 pm_state = qdf_atomic_read(&sc->pm_state); 3977 if (pm_state == HIF_PM_RUNTIME_STATE_SUSPENDED || 3978 pm_state == HIF_PM_RUNTIME_STATE_SUSPENDING) 3979 HIF_INFO("Runtime PM resume is requested by %ps", 3980 (void *)_RET_IP_); 3981 3982 sc->pm_stats.request_resume++; 3983 sc->pm_stats.last_resume_caller = (void *)_RET_IP_; 3984 3985 return hif_pm_request_resume(sc->dev); 3986 } 3987 3988 void hif_pm_runtime_mark_last_busy(struct hif_opaque_softc *hif_ctx) 3989 { 3990 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx); 3991 3992 if (!sc) 3993 return; 3994 3995 sc->pm_stats.last_busy_marker = (void *)_RET_IP_; 3996 sc->pm_stats.last_busy_timestamp = qdf_get_log_timestamp_usecs(); 3997 3998 return pm_runtime_mark_last_busy(sc->dev); 3999 } 4000 4001 void hif_pm_runtime_get_noresume(struct hif_opaque_softc *hif_ctx) 4002 { 4003 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx); 4004 4005 if (!sc) 4006 return; 4007 4008 sc->pm_stats.runtime_get++; 4009 pm_runtime_get_noresume(sc->dev); 4010 } 4011 4012 /** 4013 * hif_pm_runtime_get() - do a get opperation on the device 4014 * 4015 * A get opperation will prevent a runtime suspend until a 4016 * corresponding put is done. This api should be used when sending 4017 * data. 4018 * 4019 * CONTRARY TO THE REGULAR RUNTIME PM, WHEN THE BUS IS SUSPENDED, 4020 * THIS API WILL ONLY REQUEST THE RESUME AND NOT TO A GET!!! 4021 * 4022 * return: success if the bus is up and a get has been issued 4023 * otherwise an error code. 4024 */ 4025 int hif_pm_runtime_get(struct hif_opaque_softc *hif_ctx) 4026 { 4027 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 4028 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx); 4029 int ret; 4030 int pm_state; 4031 4032 if (!scn) { 4033 hif_err("Could not do runtime get, scn is null"); 4034 return -EFAULT; 4035 } 4036 4037 pm_state = qdf_atomic_read(&sc->pm_state); 4038 4039 if (pm_state == HIF_PM_RUNTIME_STATE_ON || 4040 pm_state == HIF_PM_RUNTIME_STATE_NONE) { 4041 sc->pm_stats.runtime_get++; 4042 ret = __hif_pm_runtime_get(sc->dev); 4043 4044 /* Get can return 1 if the device is already active, just return 4045 * success in that case 4046 */ 4047 if (ret > 0) 4048 ret = 0; 4049 4050 if (ret) 4051 hif_pm_runtime_put(hif_ctx); 4052 4053 if (ret && ret != -EINPROGRESS) { 4054 sc->pm_stats.runtime_get_err++; 4055 hif_err("Runtime Get PM Error in pm_state:%d ret: %d", 4056 qdf_atomic_read(&sc->pm_state), ret); 4057 } 4058 4059 return ret; 4060 } 4061 4062 if (pm_state == HIF_PM_RUNTIME_STATE_SUSPENDED || 4063 pm_state == HIF_PM_RUNTIME_STATE_SUSPENDING) { 4064 hif_info_high("Runtime PM resume is requested by %ps", 4065 (void *)_RET_IP_); 4066 ret = -EAGAIN; 4067 } else { 4068 ret = -EBUSY; 4069 } 4070 4071 sc->pm_stats.request_resume++; 4072 sc->pm_stats.last_resume_caller = (void *)_RET_IP_; 4073 hif_pm_request_resume(sc->dev); 4074 4075 return ret; 4076 } 4077 4078 /** 4079 * hif_pm_runtime_put() - do a put opperation on the device 4080 * 4081 * A put opperation will allow a runtime suspend after a corresponding 4082 * get was done. This api should be used when sending data. 4083 * 4084 * This api will return a failure if runtime pm is stopped 4085 * This api will return failure if it would decrement the usage count below 0. 4086 * 4087 * return: QDF_STATUS_SUCCESS if the put is performed 4088 */ 4089 int hif_pm_runtime_put(struct hif_opaque_softc *hif_ctx) 4090 { 4091 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 4092 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx); 4093 int pm_state, usage_count; 4094 char *error = NULL; 4095 4096 if (!scn) { 4097 HIF_ERROR("%s: Could not do runtime put, scn is null", 4098 __func__); 4099 return -EFAULT; 4100 } 4101 usage_count = atomic_read(&sc->dev->power.usage_count); 4102 4103 if (usage_count == 1) { 4104 pm_state = qdf_atomic_read(&sc->pm_state); 4105 4106 if (pm_state == HIF_PM_RUNTIME_STATE_NONE) 4107 error = "Ignoring unexpected put when runtime pm is disabled"; 4108 4109 } else if (usage_count == 0) { 4110 error = "PUT Without a Get Operation"; 4111 } 4112 4113 if (error) { 4114 hif_pci_runtime_pm_warn(sc, error); 4115 return -EINVAL; 4116 } 4117 4118 sc->pm_stats.runtime_put++; 4119 4120 hif_pm_runtime_mark_last_busy(hif_ctx); 4121 hif_pm_runtime_put_auto(sc->dev); 4122 4123 return 0; 4124 } 4125 4126 4127 /** 4128 * __hif_pm_runtime_prevent_suspend() - prevent runtime suspend for a protocol 4129 * reason 4130 * @hif_sc: pci context 4131 * @lock: runtime_pm lock being acquired 4132 * 4133 * Return 0 if successful. 4134 */ 4135 static int __hif_pm_runtime_prevent_suspend(struct hif_pci_softc 4136 *hif_sc, struct hif_pm_runtime_lock *lock) 4137 { 4138 int ret = 0; 4139 4140 /* 4141 * We shouldn't be setting context->timeout to zero here when 4142 * context is active as we will have a case where Timeout API's 4143 * for the same context called back to back. 4144 * eg: echo "1=T:10:T:20" > /d/cnss_runtime_pm 4145 * Set context->timeout to zero in hif_pm_runtime_prevent_suspend 4146 * API to ensure the timeout version is no more active and 4147 * list entry of this context will be deleted during allow suspend. 4148 */ 4149 if (lock->active) 4150 return 0; 4151 4152 ret = __hif_pm_runtime_get(hif_sc->dev); 4153 4154 /** 4155 * The ret can be -EINPROGRESS, if Runtime status is RPM_RESUMING or 4156 * RPM_SUSPENDING. Any other negative value is an error. 4157 * We shouldn't be do runtime_put here as in later point allow 4158 * suspend gets called with the the context and there the usage count 4159 * is decremented, so suspend will be prevented. 4160 */ 4161 4162 if (ret < 0 && ret != -EINPROGRESS) { 4163 hif_sc->pm_stats.runtime_get_err++; 4164 hif_pci_runtime_pm_warn(hif_sc, 4165 "Prevent Suspend Runtime PM Error"); 4166 } 4167 4168 hif_sc->prevent_suspend_cnt++; 4169 4170 lock->active = true; 4171 4172 list_add_tail(&lock->list, &hif_sc->prevent_suspend_list); 4173 4174 hif_sc->pm_stats.prevent_suspend++; 4175 4176 HIF_ERROR("%s: in pm_state:%s ret: %d", __func__, 4177 hif_pm_runtime_state_to_string( 4178 qdf_atomic_read(&hif_sc->pm_state)), 4179 ret); 4180 4181 return ret; 4182 } 4183 4184 static int __hif_pm_runtime_allow_suspend(struct hif_pci_softc *hif_sc, 4185 struct hif_pm_runtime_lock *lock) 4186 { 4187 struct hif_opaque_softc *hif_ctx = GET_HIF_OPAQUE_HDL(hif_sc); 4188 int ret = 0; 4189 int usage_count; 4190 4191 if (hif_sc->prevent_suspend_cnt == 0) 4192 return ret; 4193 4194 if (!lock->active) 4195 return ret; 4196 4197 usage_count = atomic_read(&hif_sc->dev->power.usage_count); 4198 4199 /* 4200 * During Driver unload, platform driver increments the usage 4201 * count to prevent any runtime suspend getting called. 4202 * So during driver load in HIF_PM_RUNTIME_STATE_NONE state the 4203 * usage_count should be one. Ideally this shouldn't happen as 4204 * context->active should be active for allow suspend to happen 4205 * Handling this case here to prevent any failures. 4206 */ 4207 if ((qdf_atomic_read(&hif_sc->pm_state) == HIF_PM_RUNTIME_STATE_NONE 4208 && usage_count == 1) || usage_count == 0) { 4209 hif_pci_runtime_pm_warn(hif_sc, 4210 "Allow without a prevent suspend"); 4211 return -EINVAL; 4212 } 4213 4214 list_del(&lock->list); 4215 4216 hif_sc->prevent_suspend_cnt--; 4217 4218 lock->active = false; 4219 lock->timeout = 0; 4220 4221 hif_pm_runtime_mark_last_busy(hif_ctx); 4222 ret = hif_pm_runtime_put_auto(hif_sc->dev); 4223 4224 HIF_ERROR("%s: in pm_state:%s ret: %d", __func__, 4225 hif_pm_runtime_state_to_string( 4226 qdf_atomic_read(&hif_sc->pm_state)), 4227 ret); 4228 4229 hif_sc->pm_stats.allow_suspend++; 4230 return ret; 4231 } 4232 4233 /** 4234 * hif_pm_runtime_lock_timeout_fn() - callback the runtime lock timeout 4235 * @data: calback data that is the pci context 4236 * 4237 * if runtime locks are acquired with a timeout, this function releases 4238 * the locks when the last runtime lock expires. 4239 * 4240 * dummy implementation until lock acquisition is implemented. 4241 */ 4242 static void hif_pm_runtime_lock_timeout_fn(void *data) 4243 { 4244 struct hif_pci_softc *hif_sc = data; 4245 unsigned long timer_expires; 4246 struct hif_pm_runtime_lock *context, *temp; 4247 4248 spin_lock_bh(&hif_sc->runtime_lock); 4249 4250 timer_expires = hif_sc->runtime_timer_expires; 4251 4252 /* Make sure we are not called too early, this should take care of 4253 * following case 4254 * 4255 * CPU0 CPU1 (timeout function) 4256 * ---- ---------------------- 4257 * spin_lock_irq 4258 * timeout function called 4259 * 4260 * mod_timer() 4261 * 4262 * spin_unlock_irq 4263 * spin_lock_irq 4264 */ 4265 if (timer_expires > 0 && !time_after(timer_expires, jiffies)) { 4266 hif_sc->runtime_timer_expires = 0; 4267 list_for_each_entry_safe(context, temp, 4268 &hif_sc->prevent_suspend_list, list) { 4269 if (context->timeout) { 4270 __hif_pm_runtime_allow_suspend(hif_sc, context); 4271 hif_sc->pm_stats.allow_suspend_timeout++; 4272 } 4273 } 4274 } 4275 4276 spin_unlock_bh(&hif_sc->runtime_lock); 4277 } 4278 4279 int hif_pm_runtime_prevent_suspend(struct hif_opaque_softc *ol_sc, 4280 struct hif_pm_runtime_lock *data) 4281 { 4282 struct hif_softc *sc = HIF_GET_SOFTC(ol_sc); 4283 struct hif_pci_softc *hif_sc = HIF_GET_PCI_SOFTC(ol_sc); 4284 struct hif_pm_runtime_lock *context = data; 4285 4286 if (!sc->hif_config.enable_runtime_pm) 4287 return 0; 4288 4289 if (!context) 4290 return -EINVAL; 4291 4292 if (in_irq()) 4293 WARN_ON(1); 4294 4295 spin_lock_bh(&hif_sc->runtime_lock); 4296 context->timeout = 0; 4297 __hif_pm_runtime_prevent_suspend(hif_sc, context); 4298 spin_unlock_bh(&hif_sc->runtime_lock); 4299 4300 return 0; 4301 } 4302 4303 int hif_pm_runtime_allow_suspend(struct hif_opaque_softc *ol_sc, 4304 struct hif_pm_runtime_lock *data) 4305 { 4306 struct hif_softc *sc = HIF_GET_SOFTC(ol_sc); 4307 struct hif_pci_softc *hif_sc = HIF_GET_PCI_SOFTC(ol_sc); 4308 struct hif_pm_runtime_lock *context = data; 4309 4310 if (!sc->hif_config.enable_runtime_pm) 4311 return 0; 4312 4313 if (!context) 4314 return -EINVAL; 4315 4316 if (in_irq()) 4317 WARN_ON(1); 4318 4319 spin_lock_bh(&hif_sc->runtime_lock); 4320 4321 __hif_pm_runtime_allow_suspend(hif_sc, context); 4322 4323 /* The list can be empty as well in cases where 4324 * we have one context in the list and the allow 4325 * suspend came before the timer expires and we delete 4326 * context above from the list. 4327 * When list is empty prevent_suspend count will be zero. 4328 */ 4329 if (hif_sc->prevent_suspend_cnt == 0 && 4330 hif_sc->runtime_timer_expires > 0) { 4331 qdf_timer_free(&hif_sc->runtime_timer); 4332 hif_sc->runtime_timer_expires = 0; 4333 } 4334 4335 spin_unlock_bh(&hif_sc->runtime_lock); 4336 4337 return 0; 4338 } 4339 4340 /** 4341 * hif_pm_runtime_prevent_suspend_timeout() - Prevent runtime suspend timeout 4342 * @ol_sc: HIF context 4343 * @lock: which lock is being acquired 4344 * @delay: Timeout in milliseconds 4345 * 4346 * Prevent runtime suspend with a timeout after which runtime suspend would be 4347 * allowed. This API uses a single timer to allow the suspend and timer is 4348 * modified if the timeout is changed before timer fires. 4349 * If the timeout is less than autosuspend_delay then use mark_last_busy instead 4350 * of starting the timer. 4351 * 4352 * It is wise to try not to use this API and correct the design if possible. 4353 * 4354 * Return: 0 on success and negative error code on failure 4355 */ 4356 int hif_pm_runtime_prevent_suspend_timeout(struct hif_opaque_softc *ol_sc, 4357 struct hif_pm_runtime_lock *lock, unsigned int delay) 4358 { 4359 struct hif_softc *sc = HIF_GET_SOFTC(ol_sc); 4360 struct hif_pci_softc *hif_sc = HIF_GET_PCI_SOFTC(sc); 4361 4362 int ret = 0; 4363 unsigned long expires; 4364 struct hif_pm_runtime_lock *context = lock; 4365 4366 if (hif_is_load_or_unload_in_progress(sc)) { 4367 HIF_ERROR("%s: Load/unload in progress, ignore!", 4368 __func__); 4369 return -EINVAL; 4370 } 4371 4372 if (hif_is_recovery_in_progress(sc)) { 4373 HIF_ERROR("%s: LOGP in progress, ignore!", __func__); 4374 return -EINVAL; 4375 } 4376 4377 if (!sc->hif_config.enable_runtime_pm) 4378 return 0; 4379 4380 if (!context) 4381 return -EINVAL; 4382 4383 if (in_irq()) 4384 WARN_ON(1); 4385 4386 /* 4387 * Don't use internal timer if the timeout is less than auto suspend 4388 * delay. 4389 */ 4390 if (delay <= hif_sc->dev->power.autosuspend_delay) { 4391 hif_pm_request_resume(hif_sc->dev); 4392 hif_pm_runtime_mark_last_busy(ol_sc); 4393 return ret; 4394 } 4395 4396 expires = jiffies + msecs_to_jiffies(delay); 4397 expires += !expires; 4398 4399 spin_lock_bh(&hif_sc->runtime_lock); 4400 4401 context->timeout = delay; 4402 ret = __hif_pm_runtime_prevent_suspend(hif_sc, context); 4403 hif_sc->pm_stats.prevent_suspend_timeout++; 4404 4405 /* Modify the timer only if new timeout is after already configured 4406 * timeout 4407 */ 4408 if (time_after(expires, hif_sc->runtime_timer_expires)) { 4409 qdf_timer_mod(&hif_sc->runtime_timer, delay); 4410 hif_sc->runtime_timer_expires = expires; 4411 } 4412 4413 spin_unlock_bh(&hif_sc->runtime_lock); 4414 4415 HIF_ERROR("%s: pm_state: %s delay: %dms ret: %d\n", __func__, 4416 hif_pm_runtime_state_to_string( 4417 qdf_atomic_read(&hif_sc->pm_state)), 4418 delay, ret); 4419 4420 return ret; 4421 } 4422 4423 /** 4424 * hif_runtime_lock_init() - API to initialize Runtime PM context 4425 * @name: Context name 4426 * 4427 * This API initializes the Runtime PM context of the caller and 4428 * return the pointer. 4429 * 4430 * Return: None 4431 */ 4432 int hif_runtime_lock_init(qdf_runtime_lock_t *lock, const char *name) 4433 { 4434 struct hif_pm_runtime_lock *context; 4435 4436 HIF_INFO("Initializing Runtime PM wakelock %s", name); 4437 4438 context = qdf_mem_malloc(sizeof(*context)); 4439 if (!context) 4440 return -ENOMEM; 4441 4442 context->name = name ? name : "Default"; 4443 lock->lock = context; 4444 4445 return 0; 4446 } 4447 4448 /** 4449 * hif_runtime_lock_deinit() - This API frees the runtime pm ctx 4450 * @data: Runtime PM context 4451 * 4452 * Return: void 4453 */ 4454 void hif_runtime_lock_deinit(struct hif_opaque_softc *hif_ctx, 4455 struct hif_pm_runtime_lock *data) 4456 { 4457 struct hif_pm_runtime_lock *context = data; 4458 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx); 4459 4460 if (!context) { 4461 HIF_ERROR("Runtime PM wakelock context is NULL"); 4462 return; 4463 } 4464 4465 HIF_INFO("Deinitializing Runtime PM wakelock %s", context->name); 4466 4467 /* 4468 * Ensure to delete the context list entry and reduce the usage count 4469 * before freeing the context if context is active. 4470 */ 4471 if (sc) { 4472 spin_lock_bh(&sc->runtime_lock); 4473 __hif_pm_runtime_allow_suspend(sc, context); 4474 spin_unlock_bh(&sc->runtime_lock); 4475 } 4476 4477 qdf_mem_free(context); 4478 } 4479 4480 /** 4481 * hif_pm_runtime_is_suspended() - API to check if driver has runtime suspended 4482 * @hif_ctx: HIF context 4483 * 4484 * Return: true for runtime suspended, otherwise false 4485 */ 4486 bool hif_pm_runtime_is_suspended(struct hif_opaque_softc *hif_ctx) 4487 { 4488 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx); 4489 4490 return qdf_atomic_read(&sc->pm_state) == 4491 HIF_PM_RUNTIME_STATE_SUSPENDED; 4492 } 4493 4494 /** 4495 * hif_pm_runtime_get_monitor_wake_intr() - API to get monitor_wake_intr 4496 * @hif_ctx: HIF context 4497 * 4498 * monitor_wake_intr variable can be used to indicate if driver expects wake 4499 * MSI for runtime PM 4500 * 4501 * Return: monitor_wake_intr variable 4502 */ 4503 int hif_pm_runtime_get_monitor_wake_intr(struct hif_opaque_softc *hif_ctx) 4504 { 4505 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx); 4506 4507 return qdf_atomic_read(&sc->monitor_wake_intr); 4508 } 4509 4510 /** 4511 * hif_pm_runtime_set_monitor_wake_intr() - API to set monitor_wake_intr 4512 * @hif_ctx: HIF context 4513 * @val: value to set 4514 * 4515 * monitor_wake_intr variable can be used to indicate if driver expects wake 4516 * MSI for runtime PM 4517 * 4518 * Return: void 4519 */ 4520 void hif_pm_runtime_set_monitor_wake_intr(struct hif_opaque_softc *hif_ctx, 4521 int val) 4522 { 4523 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx); 4524 4525 qdf_atomic_set(&sc->monitor_wake_intr, val); 4526 } 4527 4528 /** 4529 * hif_pm_runtime_mark_dp_rx_busy() - Set last busy mark my data path 4530 * @hif_ctx: HIF context 4531 * 4532 * Return: void 4533 */ 4534 void hif_pm_runtime_mark_dp_rx_busy(struct hif_opaque_softc *hif_ctx) 4535 { 4536 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx); 4537 4538 if (!sc) 4539 return; 4540 4541 qdf_atomic_set(&sc->pm_dp_rx_busy, 1); 4542 sc->dp_last_busy_timestamp = qdf_get_log_timestamp_usecs(); 4543 4544 hif_pm_runtime_mark_last_busy(hif_ctx); 4545 } 4546 4547 /** 4548 * hif_pm_runtime_is_dp_rx_busy() - Check if last mark busy by dp rx 4549 * @hif_ctx: HIF context 4550 * 4551 * Return: dp rx busy set value 4552 */ 4553 int hif_pm_runtime_is_dp_rx_busy(struct hif_opaque_softc *hif_ctx) 4554 { 4555 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx); 4556 4557 if (!sc) 4558 return 0; 4559 4560 return qdf_atomic_read(&sc->pm_dp_rx_busy); 4561 } 4562 4563 /** 4564 * hif_pm_runtime_get_dp_rx_busy_mark() - Get last busy by dp rx timestamp 4565 * @hif_ctx: HIF context 4566 * 4567 * Return: timestamp of last mark busy by dp rx 4568 */ 4569 qdf_time_t hif_pm_runtime_get_dp_rx_busy_mark(struct hif_opaque_softc *hif_ctx) 4570 { 4571 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx); 4572 4573 if (!sc) 4574 return 0; 4575 4576 return sc->dp_last_busy_timestamp; 4577 } 4578 4579 #endif /* FEATURE_RUNTIME_PM */ 4580 4581 int hif_pci_legacy_map_ce_to_irq(struct hif_softc *scn, int ce_id) 4582 { 4583 struct hif_pci_softc *pci_scn = HIF_GET_PCI_SOFTC(scn); 4584 4585 /* legacy case only has one irq */ 4586 return pci_scn->irq; 4587 } 4588 4589 int hif_pci_addr_in_boundary(struct hif_softc *scn, uint32_t offset) 4590 { 4591 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn); 4592 struct hif_target_info *tgt_info; 4593 4594 tgt_info = hif_get_target_info_handle(GET_HIF_OPAQUE_HDL(scn)); 4595 4596 if (tgt_info->target_type == TARGET_TYPE_QCA6290 || 4597 tgt_info->target_type == TARGET_TYPE_QCA6390 || 4598 tgt_info->target_type == TARGET_TYPE_QCA6490 || 4599 tgt_info->target_type == TARGET_TYPE_QCA8074) { 4600 /* 4601 * Need to consider offset's memtype for QCA6290/QCA8074, 4602 * also mem_len and DRAM_BASE_ADDRESS/DRAM_SIZE need to be 4603 * well initialized/defined. 4604 */ 4605 return 0; 4606 } 4607 4608 if ((offset >= DRAM_BASE_ADDRESS && offset <= DRAM_BASE_ADDRESS + DRAM_SIZE) 4609 || (offset + sizeof(unsigned int) <= sc->mem_len)) { 4610 return 0; 4611 } 4612 4613 HIF_TRACE("Refusing to read memory at 0x%x - 0x%x (max 0x%zx)\n", 4614 offset, (uint32_t)(offset + sizeof(unsigned int)), 4615 sc->mem_len); 4616 4617 return -EINVAL; 4618 } 4619 4620 /** 4621 * hif_pci_needs_bmi() - return true if the soc needs bmi through the driver 4622 * @scn: hif context 4623 * 4624 * Return: true if soc needs driver bmi otherwise false 4625 */ 4626 bool hif_pci_needs_bmi(struct hif_softc *scn) 4627 { 4628 return !ce_srng_based(scn); 4629 } 4630