Lines Matching +full:0 +full:xe

27  * DOC: Xe Power Management
29 * Xe PM implements the main routines for both system level suspend states and
49 * to perform the transition from D3hot to D3cold. Xe may disallow this
57 * (PC-states), and/or other low level power states. Xe PM component provides
61 * Also, Xe PM provides get and put functions that Xe driver will use to
84 * @xe: The xe device.
89 bool xe_rpm_reclaim_safe(const struct xe_device *xe) in xe_rpm_reclaim_safe() argument
91 return !xe->d3cold.capable && !xe->info.has_sriov; in xe_rpm_reclaim_safe()
94 static void xe_rpm_lockmap_acquire(const struct xe_device *xe) in xe_rpm_lockmap_acquire() argument
96 lock_map_acquire(xe_rpm_reclaim_safe(xe) ? in xe_rpm_lockmap_acquire()
101 static void xe_rpm_lockmap_release(const struct xe_device *xe) in xe_rpm_lockmap_release() argument
103 lock_map_release(xe_rpm_reclaim_safe(xe) ? in xe_rpm_lockmap_release()
110 * @xe: xe device instance
112 * Return: 0 on success
114 int xe_pm_suspend(struct xe_device *xe) in xe_pm_suspend() argument
120 drm_dbg(&xe->drm, "Suspending device\n"); in xe_pm_suspend()
121 trace_xe_pm_suspend(xe, __builtin_return_address(0)); in xe_pm_suspend()
123 for_each_gt(gt, xe, id) in xe_pm_suspend()
126 xe_display_pm_suspend(xe); in xe_pm_suspend()
129 err = xe_bo_evict_all(xe); in xe_pm_suspend()
133 for_each_gt(gt, xe, id) { in xe_pm_suspend()
136 xe_display_pm_resume(xe); in xe_pm_suspend()
141 xe_irq_suspend(xe); in xe_pm_suspend()
143 xe_display_pm_suspend_late(xe); in xe_pm_suspend()
145 drm_dbg(&xe->drm, "Device suspended\n"); in xe_pm_suspend()
146 return 0; in xe_pm_suspend()
148 drm_dbg(&xe->drm, "Device suspend failed %d\n", err); in xe_pm_suspend()
154 * @xe: xe device instance
156 * Return: 0 on success
158 int xe_pm_resume(struct xe_device *xe) in xe_pm_resume() argument
165 drm_dbg(&xe->drm, "Resuming device\n"); in xe_pm_resume()
166 trace_xe_pm_resume(xe, __builtin_return_address(0)); in xe_pm_resume()
168 for_each_tile(tile, xe, id) in xe_pm_resume()
171 err = xe_pcode_ready(xe, true); in xe_pm_resume()
175 xe_display_pm_resume_early(xe); in xe_pm_resume()
181 err = xe_bo_restore_kernel(xe); in xe_pm_resume()
185 xe_irq_resume(xe); in xe_pm_resume()
187 for_each_gt(gt, xe, id) in xe_pm_resume()
190 xe_display_pm_resume(xe); in xe_pm_resume()
192 err = xe_bo_restore_user(xe); in xe_pm_resume()
196 drm_dbg(&xe->drm, "Device resumed\n"); in xe_pm_resume()
197 return 0; in xe_pm_resume()
199 drm_dbg(&xe->drm, "Device resume failed %d\n", err); in xe_pm_resume()
203 static bool xe_pm_pci_d3cold_capable(struct xe_device *xe) in xe_pm_pci_d3cold_capable() argument
205 struct pci_dev *pdev = to_pci_dev(xe->drm.dev); in xe_pm_pci_d3cold_capable()
214 drm_dbg(&xe->drm, "d3cold: PME# not supported\n"); in xe_pm_pci_d3cold_capable()
220 drm_dbg(&xe->drm, "d3cold: ACPI _PR3 not present\n"); in xe_pm_pci_d3cold_capable()
227 static void xe_pm_runtime_init(struct xe_device *xe) in xe_pm_runtime_init() argument
229 struct device *dev = xe->drm.dev; in xe_pm_runtime_init()
239 if (IS_DGFX(xe)) in xe_pm_runtime_init()
250 int xe_pm_init_early(struct xe_device *xe) in xe_pm_init_early() argument
254 INIT_LIST_HEAD(&xe->mem_access.vram_userfault.list); in xe_pm_init_early()
256 err = drmm_mutex_init(&xe->drm, &xe->mem_access.vram_userfault.lock); in xe_pm_init_early()
260 err = drmm_mutex_init(&xe->drm, &xe->d3cold.lock); in xe_pm_init_early()
264 return 0; in xe_pm_init_early()
268 * xe_pm_init - Initialize Xe Power Management
269 * @xe: xe device instance
273 * Returns 0 for success, negative error code otherwise.
275 int xe_pm_init(struct xe_device *xe) in xe_pm_init() argument
280 if (!xe_device_uc_enabled(xe)) in xe_pm_init()
281 return 0; in xe_pm_init()
283 xe->d3cold.capable = xe_pm_pci_d3cold_capable(xe); in xe_pm_init()
285 if (xe->d3cold.capable) { in xe_pm_init()
286 err = xe_device_sysfs_init(xe); in xe_pm_init()
290 err = xe_pm_set_vram_threshold(xe, DEFAULT_VRAM_THRESHOLD); in xe_pm_init()
295 xe_pm_runtime_init(xe); in xe_pm_init()
297 return 0; in xe_pm_init()
302 * @xe: xe device instance
304 void xe_pm_runtime_fini(struct xe_device *xe) in xe_pm_runtime_fini() argument
306 struct device *dev = xe->drm.dev; in xe_pm_runtime_fini()
312 static void xe_pm_write_callback_task(struct xe_device *xe, in xe_pm_write_callback_task() argument
315 WRITE_ONCE(xe->pm_callback_task, task); in xe_pm_write_callback_task()
326 struct task_struct *xe_pm_read_callback_task(struct xe_device *xe) in xe_pm_read_callback_task() argument
330 return READ_ONCE(xe->pm_callback_task); in xe_pm_read_callback_task()
335 * @xe: xe device instance
344 bool xe_pm_runtime_suspended(struct xe_device *xe) in xe_pm_runtime_suspended() argument
346 return pm_runtime_suspended(xe->drm.dev); in xe_pm_runtime_suspended()
351 * @xe: xe device instance
353 * Returns 0 for success, negative error code otherwise.
355 int xe_pm_runtime_suspend(struct xe_device *xe) in xe_pm_runtime_suspend() argument
360 int err = 0; in xe_pm_runtime_suspend()
362 trace_xe_pm_runtime_suspend(xe, __builtin_return_address(0)); in xe_pm_runtime_suspend()
364 xe_pm_write_callback_task(xe, current); in xe_pm_runtime_suspend()
387 xe_rpm_lockmap_acquire(xe); in xe_pm_runtime_suspend()
393 mutex_lock(&xe->mem_access.vram_userfault.lock); in xe_pm_runtime_suspend()
395 &xe->mem_access.vram_userfault.list, vram_userfault_link) in xe_pm_runtime_suspend()
397 mutex_unlock(&xe->mem_access.vram_userfault.lock); in xe_pm_runtime_suspend()
399 xe_display_pm_runtime_suspend(xe); in xe_pm_runtime_suspend()
401 if (xe->d3cold.allowed) { in xe_pm_runtime_suspend()
402 err = xe_bo_evict_all(xe); in xe_pm_runtime_suspend()
407 for_each_gt(gt, xe, id) { in xe_pm_runtime_suspend()
413 xe_irq_suspend(xe); in xe_pm_runtime_suspend()
415 if (xe->d3cold.allowed) in xe_pm_runtime_suspend()
416 xe_display_pm_suspend_late(xe); in xe_pm_runtime_suspend()
419 xe_display_pm_runtime_resume(xe); in xe_pm_runtime_suspend()
420 xe_rpm_lockmap_release(xe); in xe_pm_runtime_suspend()
421 xe_pm_write_callback_task(xe, NULL); in xe_pm_runtime_suspend()
427 * @xe: xe device instance
429 * Returns 0 for success, negative error code otherwise.
431 int xe_pm_runtime_resume(struct xe_device *xe) in xe_pm_runtime_resume() argument
435 int err = 0; in xe_pm_runtime_resume()
437 trace_xe_pm_runtime_resume(xe, __builtin_return_address(0)); in xe_pm_runtime_resume()
439 xe_pm_write_callback_task(xe, current); in xe_pm_runtime_resume()
441 xe_rpm_lockmap_acquire(xe); in xe_pm_runtime_resume()
443 if (xe->d3cold.allowed) { in xe_pm_runtime_resume()
444 err = xe_pcode_ready(xe, true); in xe_pm_runtime_resume()
448 xe_display_pm_resume_early(xe); in xe_pm_runtime_resume()
454 err = xe_bo_restore_kernel(xe); in xe_pm_runtime_resume()
459 xe_irq_resume(xe); in xe_pm_runtime_resume()
461 for_each_gt(gt, xe, id) in xe_pm_runtime_resume()
464 xe_display_pm_runtime_resume(xe); in xe_pm_runtime_resume()
466 if (xe->d3cold.allowed) { in xe_pm_runtime_resume()
467 err = xe_bo_restore_user(xe); in xe_pm_runtime_resume()
473 xe_rpm_lockmap_release(xe); in xe_pm_runtime_resume()
474 xe_pm_write_callback_task(xe, NULL); in xe_pm_runtime_resume()
481 * sensitive to ever see the 0 -> 1 transition with the callers locks
492 static void xe_rpm_might_enter_cb(const struct xe_device *xe) in xe_rpm_might_enter_cb() argument
494 xe_rpm_lockmap_acquire(xe); in xe_rpm_might_enter_cb()
495 xe_rpm_lockmap_release(xe); in xe_rpm_might_enter_cb()
522 * @xe: xe device instance
524 void xe_pm_runtime_get(struct xe_device *xe) in xe_pm_runtime_get() argument
526 trace_xe_pm_runtime_get(xe, __builtin_return_address(0)); in xe_pm_runtime_get()
527 pm_runtime_get_noresume(xe->drm.dev); in xe_pm_runtime_get()
529 if (xe_pm_read_callback_task(xe) == current) in xe_pm_runtime_get()
532 xe_rpm_might_enter_cb(xe); in xe_pm_runtime_get()
533 pm_runtime_resume(xe->drm.dev); in xe_pm_runtime_get()
538 * @xe: xe device instance
540 void xe_pm_runtime_put(struct xe_device *xe) in xe_pm_runtime_put() argument
542 trace_xe_pm_runtime_put(xe, __builtin_return_address(0)); in xe_pm_runtime_put()
543 if (xe_pm_read_callback_task(xe) == current) { in xe_pm_runtime_put()
544 pm_runtime_put_noidle(xe->drm.dev); in xe_pm_runtime_put()
546 pm_runtime_mark_last_busy(xe->drm.dev); in xe_pm_runtime_put()
547 pm_runtime_put(xe->drm.dev); in xe_pm_runtime_put()
553 * @xe: xe device instance
555 * Returns: Any number greater than or equal to 0 for success, negative error
558 int xe_pm_runtime_get_ioctl(struct xe_device *xe) in xe_pm_runtime_get_ioctl() argument
560 trace_xe_pm_runtime_get_ioctl(xe, __builtin_return_address(0)); in xe_pm_runtime_get_ioctl()
561 if (WARN_ON(xe_pm_read_callback_task(xe) == current)) in xe_pm_runtime_get_ioctl()
564 xe_rpm_might_enter_cb(xe); in xe_pm_runtime_get_ioctl()
565 return pm_runtime_get_sync(xe->drm.dev); in xe_pm_runtime_get_ioctl()
570 * @xe: xe device instance
575 bool xe_pm_runtime_get_if_active(struct xe_device *xe) in xe_pm_runtime_get_if_active() argument
577 return pm_runtime_get_if_active(xe->drm.dev) > 0; in xe_pm_runtime_get_if_active()
582 * @xe: xe device instance
587 bool xe_pm_runtime_get_if_in_use(struct xe_device *xe) in xe_pm_runtime_get_if_in_use() argument
589 if (xe_pm_read_callback_task(xe) == current) { in xe_pm_runtime_get_if_in_use()
591 pm_runtime_get_noresume(xe->drm.dev); in xe_pm_runtime_get_if_in_use()
595 return pm_runtime_get_if_in_use(xe->drm.dev) > 0; in xe_pm_runtime_get_if_in_use()
602 static bool xe_pm_suspending_or_resuming(struct xe_device *xe) in xe_pm_suspending_or_resuming() argument
605 struct device *dev = xe->drm.dev; in xe_pm_suspending_or_resuming()
616 * @xe: xe device instance
624 void xe_pm_runtime_get_noresume(struct xe_device *xe) in xe_pm_runtime_get_noresume() argument
628 ref = xe_pm_runtime_get_if_in_use(xe); in xe_pm_runtime_get_noresume()
631 pm_runtime_get_noresume(xe->drm.dev); in xe_pm_runtime_get_noresume()
632 drm_WARN(&xe->drm, !xe_pm_suspending_or_resuming(xe), in xe_pm_runtime_get_noresume()
639 * @xe: xe device instance
643 bool xe_pm_runtime_resume_and_get(struct xe_device *xe) in xe_pm_runtime_resume_and_get() argument
645 if (xe_pm_read_callback_task(xe) == current) { in xe_pm_runtime_resume_and_get()
647 pm_runtime_get_noresume(xe->drm.dev); in xe_pm_runtime_resume_and_get()
651 xe_rpm_might_enter_cb(xe); in xe_pm_runtime_resume_and_get()
652 return pm_runtime_resume_and_get(xe->drm.dev) >= 0; in xe_pm_runtime_resume_and_get()
657 * @xe: xe device instance
659 void xe_pm_assert_unbounded_bridge(struct xe_device *xe) in xe_pm_assert_unbounded_bridge() argument
661 struct pci_dev *pdev = to_pci_dev(xe->drm.dev); in xe_pm_assert_unbounded_bridge()
668 drm_warn(&xe->drm, "unbounded parent pci bridge, device won't support any PM support.\n"); in xe_pm_assert_unbounded_bridge()
675 * @xe: xe device instance
678 * Returns 0 for success, negative error code otherwise.
680 int xe_pm_set_vram_threshold(struct xe_device *xe, u32 threshold) in xe_pm_set_vram_threshold() argument
683 u32 vram_total_mb = 0; in xe_pm_set_vram_threshold()
687 man = ttm_manager_type(&xe->ttm, i); in xe_pm_set_vram_threshold()
692 drm_dbg(&xe->drm, "Total vram %u mb\n", vram_total_mb); in xe_pm_set_vram_threshold()
697 mutex_lock(&xe->d3cold.lock); in xe_pm_set_vram_threshold()
698 xe->d3cold.vram_threshold = threshold; in xe_pm_set_vram_threshold()
699 mutex_unlock(&xe->d3cold.lock); in xe_pm_set_vram_threshold()
701 return 0; in xe_pm_set_vram_threshold()
706 * @xe: xe device instance
711 void xe_pm_d3cold_allowed_toggle(struct xe_device *xe) in xe_pm_d3cold_allowed_toggle() argument
714 u32 total_vram_used_mb = 0; in xe_pm_d3cold_allowed_toggle()
718 if (!xe->d3cold.capable) { in xe_pm_d3cold_allowed_toggle()
719 xe->d3cold.allowed = false; in xe_pm_d3cold_allowed_toggle()
724 man = ttm_manager_type(&xe->ttm, i); in xe_pm_d3cold_allowed_toggle()
731 mutex_lock(&xe->d3cold.lock); in xe_pm_d3cold_allowed_toggle()
733 if (total_vram_used_mb < xe->d3cold.vram_threshold) in xe_pm_d3cold_allowed_toggle()
734 xe->d3cold.allowed = true; in xe_pm_d3cold_allowed_toggle()
736 xe->d3cold.allowed = false; in xe_pm_d3cold_allowed_toggle()
738 mutex_unlock(&xe->d3cold.lock); in xe_pm_d3cold_allowed_toggle()
740 drm_dbg(&xe->drm, in xe_pm_d3cold_allowed_toggle()
741 "d3cold: allowed=%s\n", str_yes_no(xe->d3cold.allowed)); in xe_pm_d3cold_allowed_toggle()
747 * Return: 0 on success. Currently doesn't fail.
752 return 0; in xe_pm_module_init()