Lines Matching +full:deep +full:- +full:sleep

1 // SPDX-License-Identifier: GPL-2.0-only
4 * Copyright (c) 2021-2022, Intel Corporation.
23 #include <linux/dma-mapping.h>
78 return -ENODEV; in t7xx_mode_store()
80 mode = READ_ONCE(t7xx_dev->mode); in t7xx_mode_store()
84 return -EBUSY; in t7xx_mode_store()
90 WRITE_ONCE(t7xx_dev->mode, T7XX_FASTBOOT_SWITCHING); in t7xx_mode_store()
112 return -ENODEV; in t7xx_mode_show()
114 mode = READ_ONCE(t7xx_dev->mode); in t7xx_mode_show()
137 WRITE_ONCE(t7xx_dev->mode, mode); in t7xx_mode_update()
138 sysfs_notify(&t7xx_dev->pdev->dev.kobj, NULL, "t7xx_mode"); in t7xx_mode_update()
171 if (ret == -ETIMEDOUT) in t7xx_wait_pm_config()
172 dev_err(&t7xx_dev->pdev->dev, "PM configuration timed out\n"); in t7xx_wait_pm_config()
179 struct pci_dev *pdev = t7xx_dev->pdev; in t7xx_pci_pm_init()
181 INIT_LIST_HEAD(&t7xx_dev->md_pm_entities); in t7xx_pci_pm_init()
182 mutex_init(&t7xx_dev->md_pm_entity_mtx); in t7xx_pci_pm_init()
183 spin_lock_init(&t7xx_dev->md_pm_lock); in t7xx_pci_pm_init()
184 init_completion(&t7xx_dev->sleep_lock_acquire); in t7xx_pci_pm_init()
185 init_completion(&t7xx_dev->pm_sr_ack); in t7xx_pci_pm_init()
186 init_completion(&t7xx_dev->init_done); in t7xx_pci_pm_init()
187 atomic_set(&t7xx_dev->md_pm_state, MTK_PM_INIT); in t7xx_pci_pm_init()
189 device_init_wakeup(&pdev->dev, true); in t7xx_pci_pm_init()
190 dev_pm_set_driver_flags(&pdev->dev, pdev->dev.power.driver_flags | in t7xx_pci_pm_init()
194 pm_runtime_set_autosuspend_delay(&pdev->dev, PM_AUTOSUSPEND_MS); in t7xx_pci_pm_init()
195 pm_runtime_use_autosuspend(&pdev->dev); in t7xx_pci_pm_init()
202 /* Enable the PCIe resource lock only after MD deep sleep is done */ in t7xx_pci_pm_init_late()
210 atomic_set(&t7xx_dev->md_pm_state, MTK_PM_RESUMED); in t7xx_pci_pm_init_late()
212 pm_runtime_mark_last_busy(&t7xx_dev->pdev->dev); in t7xx_pci_pm_init_late()
213 pm_runtime_allow(&t7xx_dev->pdev->dev); in t7xx_pci_pm_init_late()
214 pm_runtime_put_noidle(&t7xx_dev->pdev->dev); in t7xx_pci_pm_init_late()
215 complete_all(&t7xx_dev->init_done); in t7xx_pci_pm_init_late()
220 /* The device is kept in FSM re-init flow in t7xx_pci_pm_reinit()
223 atomic_set(&t7xx_dev->md_pm_state, MTK_PM_INIT); in t7xx_pci_pm_reinit()
225 pm_runtime_get_noresume(&t7xx_dev->pdev->dev); in t7xx_pci_pm_reinit()
235 atomic_set(&t7xx_dev->md_pm_state, MTK_PM_EXCEPTION); in t7xx_pci_pm_exp_detected()
242 mutex_lock(&t7xx_dev->md_pm_entity_mtx); in t7xx_pci_pm_entity_register()
243 list_for_each_entry(entity, &t7xx_dev->md_pm_entities, entity) { in t7xx_pci_pm_entity_register()
244 if (entity->id == pm_entity->id) { in t7xx_pci_pm_entity_register()
245 mutex_unlock(&t7xx_dev->md_pm_entity_mtx); in t7xx_pci_pm_entity_register()
246 return -EEXIST; in t7xx_pci_pm_entity_register()
250 list_add_tail(&pm_entity->entity, &t7xx_dev->md_pm_entities); in t7xx_pci_pm_entity_register()
251 mutex_unlock(&t7xx_dev->md_pm_entity_mtx); in t7xx_pci_pm_entity_register()
259 mutex_lock(&t7xx_dev->md_pm_entity_mtx); in t7xx_pci_pm_entity_unregister()
260 list_for_each_entry_safe(entity, tmp_entity, &t7xx_dev->md_pm_entities, entity) { in t7xx_pci_pm_entity_unregister()
261 if (entity->id == pm_entity->id) { in t7xx_pci_pm_entity_unregister()
262 list_del(&pm_entity->entity); in t7xx_pci_pm_entity_unregister()
263 mutex_unlock(&t7xx_dev->md_pm_entity_mtx); in t7xx_pci_pm_entity_unregister()
268 mutex_unlock(&t7xx_dev->md_pm_entity_mtx); in t7xx_pci_pm_entity_unregister()
270 return -ENXIO; in t7xx_pci_pm_entity_unregister()
275 struct device *dev = &t7xx_dev->pdev->dev; in t7xx_pci_sleep_disable_complete()
278 ret = wait_for_completion_timeout(&t7xx_dev->sleep_lock_acquire, in t7xx_pci_sleep_disable_complete()
287 * t7xx_pci_disable_sleep() - Disable deep sleep capability.
290 * Lock the deep sleep capability, note that the device can still go into deep sleep
291 * state while device is in D0 state, from the host's point-of-view.
293 * If device is in deep sleep state, wake up the device and disable deep sleep capability.
299 spin_lock_irqsave(&t7xx_dev->md_pm_lock, flags); in t7xx_pci_disable_sleep()
300 t7xx_dev->sleep_disable_count++; in t7xx_pci_disable_sleep()
301 if (atomic_read(&t7xx_dev->md_pm_state) < MTK_PM_RESUMED) in t7xx_pci_disable_sleep()
304 if (t7xx_dev->sleep_disable_count == 1) { in t7xx_pci_disable_sleep()
307 reinit_completion(&t7xx_dev->sleep_lock_acquire); in t7xx_pci_disable_sleep()
316 spin_unlock_irqrestore(&t7xx_dev->md_pm_lock, flags); in t7xx_pci_disable_sleep()
320 spin_unlock_irqrestore(&t7xx_dev->md_pm_lock, flags); in t7xx_pci_disable_sleep()
321 complete_all(&t7xx_dev->sleep_lock_acquire); in t7xx_pci_disable_sleep()
325 * t7xx_pci_enable_sleep() - Enable deep sleep capability.
328 * After enabling deep sleep, device can enter into deep sleep state.
334 spin_lock_irqsave(&t7xx_dev->md_pm_lock, flags); in t7xx_pci_enable_sleep()
335 t7xx_dev->sleep_disable_count--; in t7xx_pci_enable_sleep()
336 if (atomic_read(&t7xx_dev->md_pm_state) < MTK_PM_RESUMED) in t7xx_pci_enable_sleep()
339 if (t7xx_dev->sleep_disable_count == 0) in t7xx_pci_enable_sleep()
343 spin_unlock_irqrestore(&t7xx_dev->md_pm_lock, flags); in t7xx_pci_enable_sleep()
350 reinit_completion(&t7xx_dev->pm_sr_ack); in t7xx_send_pm_request()
352 wait_ret = wait_for_completion_timeout(&t7xx_dev->pm_sr_ack, in t7xx_send_pm_request()
355 return -ETIMEDOUT; in t7xx_send_pm_request()
368 if (atomic_read(&t7xx_dev->md_pm_state) <= MTK_PM_INIT || in __t7xx_pci_pm_suspend()
369 READ_ONCE(t7xx_dev->mode) != T7XX_READY) { in __t7xx_pci_pm_suspend()
370 dev_err(&pdev->dev, "[PM] Exiting suspend, modem in invalid state\n"); in __t7xx_pci_pm_suspend()
371 return -EFAULT; in __t7xx_pci_pm_suspend()
381 atomic_set(&t7xx_dev->md_pm_state, MTK_PM_SUSPENDED); in __t7xx_pci_pm_suspend()
383 t7xx_dev->rgu_pci_irq_en = false; in __t7xx_pci_pm_suspend()
385 list_for_each_entry(entity, &t7xx_dev->md_pm_entities, entity) { in __t7xx_pci_pm_suspend()
386 if (!entity->suspend) in __t7xx_pci_pm_suspend()
389 ret = entity->suspend(t7xx_dev, entity->entity_param); in __t7xx_pci_pm_suspend()
391 entity_id = entity->id; in __t7xx_pci_pm_suspend()
392 dev_err(&pdev->dev, "[PM] Suspend error: %d, id: %d\n", ret, entity_id); in __t7xx_pci_pm_suspend()
399 dev_err(&pdev->dev, "[PM] MD suspend error: %d\n", ret); in __t7xx_pci_pm_suspend()
406 dev_err(&pdev->dev, "[PM] SAP suspend error: %d\n", ret); in __t7xx_pci_pm_suspend()
410 list_for_each_entry(entity, &t7xx_dev->md_pm_entities, entity) { in __t7xx_pci_pm_suspend()
411 if (entity->suspend_late) in __t7xx_pci_pm_suspend()
412 entity->suspend_late(t7xx_dev, entity->entity_param); in __t7xx_pci_pm_suspend()
419 list_for_each_entry(entity, &t7xx_dev->md_pm_entities, entity) { in __t7xx_pci_pm_suspend()
420 if (entity_id == entity->id) in __t7xx_pci_pm_suspend()
423 if (entity->resume) in __t7xx_pci_pm_suspend()
424 entity->resume(t7xx_dev, entity->entity_param); in __t7xx_pci_pm_suspend()
428 atomic_set(&t7xx_dev->md_pm_state, MTK_PM_RESUMED); in __t7xx_pci_pm_suspend()
441 * following function will re-enable PCIe interrupts. in t7xx_pcie_interrupt_reinit()
451 ret = pcim_enable_device(t7xx_dev->pdev); in t7xx_pcie_reinit()
468 struct t7xx_fsm_ctl *fsm_ctl = t7xx_dev->md->fsm_ctl; in t7xx_send_fsm_command()
469 struct device *dev = &t7xx_dev->pdev->dev; in t7xx_send_fsm_command()
470 int ret = -EINVAL; in t7xx_send_fsm_command()
480 t7xx_dev->rgu_pci_irq_en = true; in t7xx_send_fsm_command()
497 enum t7xx_mode mode = READ_ONCE(t7xx_dev->mode); in t7xx_pci_reprobe_early()
501 pm_runtime_put_noidle(&t7xx_dev->pdev->dev); in t7xx_pci_reprobe_early()
530 if (atomic_read(&t7xx_dev->md_pm_state) <= MTK_PM_INIT) { in __t7xx_pci_pm_resume()
563 atomic_set(&t7xx_dev->md_pm_state, MTK_PM_SUSPENDED); in __t7xx_pci_pm_resume()
564 t7xx_dev->rgu_pci_irq_en = true; in __t7xx_pci_pm_resume()
589 atomic_set(&t7xx_dev->md_pm_state, MTK_PM_SUSPENDED); in __t7xx_pci_pm_resume()
597 list_for_each_entry(entity, &t7xx_dev->md_pm_entities, entity) { in __t7xx_pci_pm_resume()
598 if (entity->resume_early) in __t7xx_pci_pm_resume()
599 entity->resume_early(t7xx_dev, entity->entity_param); in __t7xx_pci_pm_resume()
604 dev_err(&pdev->dev, "[PM] MD resume error: %d\n", ret); in __t7xx_pci_pm_resume()
608 dev_err(&pdev->dev, "[PM] SAP resume error: %d\n", ret); in __t7xx_pci_pm_resume()
610 list_for_each_entry(entity, &t7xx_dev->md_pm_entities, entity) { in __t7xx_pci_pm_resume()
611 if (entity->resume) { in __t7xx_pci_pm_resume()
612 ret = entity->resume(t7xx_dev, entity->entity_param); in __t7xx_pci_pm_resume()
614 dev_err(&pdev->dev, "[PM] Resume entry ID: %d error: %d\n", in __t7xx_pci_pm_resume()
615 entity->id, ret); in __t7xx_pci_pm_resume()
619 t7xx_dev->rgu_pci_irq_en = true; in __t7xx_pci_pm_resume()
622 pm_runtime_mark_last_busy(&pdev->dev); in __t7xx_pci_pm_resume()
623 atomic_set(&t7xx_dev->md_pm_state, MTK_PM_RESUMED); in __t7xx_pci_pm_resume()
650 if (!wait_for_completion_timeout(&t7xx_dev->init_done, T7XX_INIT_TIMEOUT * HZ)) { in t7xx_pci_pm_prepare()
651 dev_warn(dev, "Not ready for system sleep.\n"); in t7xx_pci_pm_prepare()
652 return -ETIMEDOUT; in t7xx_pci_pm_prepare()
708 if (!t7xx_dev->intr_handler[i]) in t7xx_request_irq()
711 irq_descr = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%s_%d", in t7xx_request_irq()
712 dev_driver_string(&pdev->dev), i); in t7xx_request_irq()
714 ret = -ENOMEM; in t7xx_request_irq()
719 ret = request_threaded_irq(irq_vec, t7xx_dev->intr_handler[i], in t7xx_request_irq()
720 t7xx_dev->intr_thread[i], 0, irq_descr, in t7xx_request_irq()
721 t7xx_dev->callback_param[i]); in t7xx_request_irq()
723 dev_err(&pdev->dev, "Failed to request IRQ: %d\n", ret); in t7xx_request_irq()
729 while (i--) { in t7xx_request_irq()
730 if (!t7xx_dev->intr_handler[i]) in t7xx_request_irq()
733 free_irq(pci_irq_vector(pdev, i), t7xx_dev->callback_param[i]); in t7xx_request_irq()
742 struct pci_dev *pdev = t7xx_dev->pdev; in t7xx_setup_msix()
745 /* Only using 6 interrupts, but HW-design requires power-of-2 IRQs allocation */ in t7xx_setup_msix()
748 dev_err(&pdev->dev, "Failed to allocate MSI-X entry: %d\n", ret); in t7xx_setup_msix()
766 if (!t7xx_dev->pdev->msix_cap) in t7xx_interrupt_init()
767 return -EINVAL; in t7xx_interrupt_init()
782 t7xx_dev->base_addr.infracfg_ao_base = t7xx_dev->base_addr.pcie_ext_reg_base + in t7xx_pci_infracfg_ao_calc()
783 INFRACFG_AO_DEV_CHIP - in t7xx_pci_infracfg_ao_calc()
784 t7xx_dev->base_addr.pcie_dev_reg_trsl_addr; in t7xx_pci_infracfg_ao_calc()
792 t7xx_dev = devm_kzalloc(&pdev->dev, sizeof(*t7xx_dev), GFP_KERNEL); in t7xx_pci_probe()
794 return -ENOMEM; in t7xx_pci_probe()
797 t7xx_dev->pdev = pdev; in t7xx_pci_probe()
808 dev_err(&pdev->dev, "Could not request BARs: %d\n", ret); in t7xx_pci_probe()
809 return -ENOMEM; in t7xx_pci_probe()
812 ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)); in t7xx_pci_probe()
814 dev_err(&pdev->dev, "Could not set PCI DMA mask: %d\n", ret); in t7xx_pci_probe()
818 ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); in t7xx_pci_probe()
820 dev_err(&pdev->dev, "Could not set consistent PCI DMA mask: %d\n", ret); in t7xx_pci_probe()
825 t7xx_dev->base_addr.pcie_ext_reg_base = pcim_iomap_table(pdev)[T7XX_PCI_EREG_BASE]; in t7xx_pci_probe()
841 ret = sysfs_create_group(&t7xx_dev->pdev->dev.kobj, in t7xx_pci_probe()
857 sysfs_remove_group(&t7xx_dev->pdev->dev.kobj, in t7xx_pci_probe()
872 sysfs_remove_group(&t7xx_dev->pdev->dev.kobj, in t7xx_pci_remove()
877 if (!t7xx_dev->intr_handler[i]) in t7xx_pci_remove()
880 free_irq(pci_irq_vector(pdev, i), t7xx_dev->callback_param[i]); in t7xx_pci_remove()
883 pci_free_irq_vectors(t7xx_dev->pdev); in t7xx_pci_remove()