Lines Matching +full:os +full:- +full:initiated
1 // SPDX-License-Identifier: MIT
32 * System Level Suspend (S-States) - In general this is OS initiated suspend
37 * PCI Device Suspend (D-States) - This is the opportunistic PCIe device low power
54 * Runtime PM - This infrastructure provided by the Linux kernel allows the
57 * (PC-states), and/or other low level power states. Xe PM component provides
66 * sysfs, debugfs, dma-buf sharing, GPU execution.
83 * xe_rpm_reclaim_safe() - Whether runtime resume can be done from reclaim context
91 return !xe->d3cold.capable && !xe->info.has_sriov; in xe_rpm_reclaim_safe()
109 * xe_pm_suspend - Helper for System suspend, i.e. S0->S3 / S0->S2idle
120 drm_dbg(&xe->drm, "Suspending device\n"); in xe_pm_suspend()
145 drm_dbg(&xe->drm, "Device suspended\n"); in xe_pm_suspend()
148 drm_dbg(&xe->drm, "Device suspend failed %d\n", err); in xe_pm_suspend()
153 * xe_pm_resume - Helper for System resume S3->S0 / S2idle->S0
165 drm_dbg(&xe->drm, "Resuming device\n"); in xe_pm_resume()
196 drm_dbg(&xe->drm, "Device resumed\n"); in xe_pm_resume()
199 drm_dbg(&xe->drm, "Device resume failed %d\n", err); in xe_pm_resume()
205 struct pci_dev *pdev = to_pci_dev(xe->drm.dev); in xe_pm_pci_d3cold_capable()
214 drm_dbg(&xe->drm, "d3cold: PME# not supported\n"); in xe_pm_pci_d3cold_capable()
220 drm_dbg(&xe->drm, "d3cold: ACPI _PR3 not present\n"); in xe_pm_pci_d3cold_capable()
229 struct device *dev = xe->drm.dev; in xe_pm_runtime_init()
254 INIT_LIST_HEAD(&xe->mem_access.vram_userfault.list); in xe_pm_init_early()
256 err = drmm_mutex_init(&xe->drm, &xe->mem_access.vram_userfault.lock); in xe_pm_init_early()
260 err = drmm_mutex_init(&xe->drm, &xe->d3cold.lock); in xe_pm_init_early()
268 * xe_pm_init - Initialize Xe Power Management
283 xe->d3cold.capable = xe_pm_pci_d3cold_capable(xe); in xe_pm_init()
285 if (xe->d3cold.capable) { in xe_pm_init()
301 * xe_pm_runtime_fini - Finalize Runtime PM
306 struct device *dev = xe->drm.dev; in xe_pm_runtime_fini()
315 WRITE_ONCE(xe->pm_callback_task, task); in xe_pm_write_callback_task()
319 * the extent that something else re-uses the task written in in xe_pm_write_callback_task()
330 return READ_ONCE(xe->pm_callback_task); in xe_pm_read_callback_task()
334 * xe_pm_runtime_suspended - Check if runtime_pm state is suspended
339 * It can be used only as a non-reliable assertion, to ensure that we are not in
346 return pm_runtime_suspended(xe->drm.dev); in xe_pm_runtime_suspended()
350 * xe_pm_runtime_suspend - Prepare our device for D3hot/D3Cold
393 mutex_lock(&xe->mem_access.vram_userfault.lock); in xe_pm_runtime_suspend()
395 &xe->mem_access.vram_userfault.list, vram_userfault_link) in xe_pm_runtime_suspend()
397 mutex_unlock(&xe->mem_access.vram_userfault.lock); in xe_pm_runtime_suspend()
401 if (xe->d3cold.allowed) { in xe_pm_runtime_suspend()
415 if (xe->d3cold.allowed) in xe_pm_runtime_suspend()
426 * xe_pm_runtime_resume - Waking up from D3hot/D3Cold
443 if (xe->d3cold.allowed) { in xe_pm_runtime_resume()
466 if (xe->d3cold.allowed) { in xe_pm_runtime_resume()
481 * sensitive to ever see the 0 -> 1 transition with the callers locks
486 * non-debug builds). Lockdep then only needs to see the
487 * xe_pm_runtime_xxx_map -> runtime_resume callback once, and then can
488 * hopefully validate all the (callers_locks) -> xe_pm_runtime_xxx_map.
521 * xe_pm_runtime_get - Get a runtime_pm reference and resume synchronously
527 pm_runtime_get_noresume(xe->drm.dev); in xe_pm_runtime_get()
533 pm_runtime_resume(xe->drm.dev); in xe_pm_runtime_get()
537 * xe_pm_runtime_put - Put the runtime_pm reference back and mark as idle
544 pm_runtime_put_noidle(xe->drm.dev); in xe_pm_runtime_put()
546 pm_runtime_mark_last_busy(xe->drm.dev); in xe_pm_runtime_put()
547 pm_runtime_put(xe->drm.dev); in xe_pm_runtime_put()
552 * xe_pm_runtime_get_ioctl - Get a runtime_pm reference before ioctl
562 return -ELOOP; in xe_pm_runtime_get_ioctl()
565 return pm_runtime_get_sync(xe->drm.dev); in xe_pm_runtime_get_ioctl()
569 * xe_pm_runtime_get_if_active - Get a runtime_pm reference if device active
577 return pm_runtime_get_if_active(xe->drm.dev) > 0; in xe_pm_runtime_get_if_active()
581 * xe_pm_runtime_get_if_in_use - Get a new reference if device is active with previous ref taken
591 pm_runtime_get_noresume(xe->drm.dev); in xe_pm_runtime_get_if_in_use()
595 return pm_runtime_get_if_in_use(xe->drm.dev) > 0; in xe_pm_runtime_get_if_in_use()
605 struct device *dev = xe->drm.dev; in xe_pm_suspending_or_resuming()
607 return dev->power.runtime_status == RPM_SUSPENDING || in xe_pm_suspending_or_resuming()
608 dev->power.runtime_status == RPM_RESUMING; in xe_pm_suspending_or_resuming()
615 * xe_pm_runtime_get_noresume - Bump runtime PM usage counter without resuming
619 * protected by outer-bound callers of `xe_pm_runtime_get`.
631 pm_runtime_get_noresume(xe->drm.dev); in xe_pm_runtime_get_noresume()
632 drm_WARN(&xe->drm, !xe_pm_suspending_or_resuming(xe), in xe_pm_runtime_get_noresume()
638 * xe_pm_runtime_resume_and_get - Resume, then get a runtime_pm ref if awake.
647 pm_runtime_get_noresume(xe->drm.dev); in xe_pm_runtime_resume_and_get()
652 return pm_runtime_resume_and_get(xe->drm.dev) >= 0; in xe_pm_runtime_resume_and_get()
656 * xe_pm_assert_unbounded_bridge - Disable PM on unbounded pcie parent bridge
661 struct pci_dev *pdev = to_pci_dev(xe->drm.dev); in xe_pm_assert_unbounded_bridge()
667 if (!bridge->driver) { in xe_pm_assert_unbounded_bridge()
668 drm_warn(&xe->drm, "unbounded parent pci bridge, device won't support any PM support.\n"); in xe_pm_assert_unbounded_bridge()
669 device_set_pm_not_required(&pdev->dev); in xe_pm_assert_unbounded_bridge()
674 * xe_pm_set_vram_threshold - Set a vram threshold for allowing/blocking D3Cold
687 man = ttm_manager_type(&xe->ttm, i); in xe_pm_set_vram_threshold()
689 vram_total_mb += DIV_ROUND_UP_ULL(man->size, 1024 * 1024); in xe_pm_set_vram_threshold()
692 drm_dbg(&xe->drm, "Total vram %u mb\n", vram_total_mb); in xe_pm_set_vram_threshold()
695 return -EINVAL; in xe_pm_set_vram_threshold()
697 mutex_lock(&xe->d3cold.lock); in xe_pm_set_vram_threshold()
698 xe->d3cold.vram_threshold = threshold; in xe_pm_set_vram_threshold()
699 mutex_unlock(&xe->d3cold.lock); in xe_pm_set_vram_threshold()
705 * xe_pm_d3cold_allowed_toggle - Check conditions to toggle d3cold.allowed
718 if (!xe->d3cold.capable) { in xe_pm_d3cold_allowed_toggle()
719 xe->d3cold.allowed = false; in xe_pm_d3cold_allowed_toggle()
724 man = ttm_manager_type(&xe->ttm, i); in xe_pm_d3cold_allowed_toggle()
731 mutex_lock(&xe->d3cold.lock); in xe_pm_d3cold_allowed_toggle()
733 if (total_vram_used_mb < xe->d3cold.vram_threshold) in xe_pm_d3cold_allowed_toggle()
734 xe->d3cold.allowed = true; in xe_pm_d3cold_allowed_toggle()
736 xe->d3cold.allowed = false; in xe_pm_d3cold_allowed_toggle()
738 mutex_unlock(&xe->d3cold.lock); in xe_pm_d3cold_allowed_toggle()
740 drm_dbg(&xe->drm, in xe_pm_d3cold_allowed_toggle()
741 "d3cold: allowed=%s\n", str_yes_no(xe->d3cold.allowed)); in xe_pm_d3cold_allowed_toggle()
745 * xe_pm_module_init() - Perform xe_pm specific module initialization.