Lines Matching +full:power +full:- +full:domain

1 /* SPDX-License-Identifier: MIT */
33 for_each_if(test_bit((__domain), (__power_well)->domains.bits))
37 for_each_if(test_bit((__domain), (__power_well)->domains.bits))
40 intel_display_power_domain_str(enum intel_display_power_domain domain) in intel_display_power_domain_str() argument
42 switch (domain) { in intel_display_power_domain_str()
196 MISSING_CASE(domain); in intel_display_power_domain_str()
202 enum intel_display_power_domain domain) in __intel_display_power_is_enabled() argument
207 if (pm_runtime_suspended(dev_priv->drm.dev)) in __intel_display_power_is_enabled()
212 for_each_power_domain_well_reverse(dev_priv, power_well, domain) { in __intel_display_power_is_enabled()
226 * intel_display_power_is_enabled - check for a power domain
228 * @domain: power domain to check
230 * This function can be used to check the hw power domain state. It is mostly
232 * upon explicit power domain reference counting to ensure that the hardware
236 * threads can't disable the power well while the caller tries to read a few
240 * True when the power domain is enabled, false otherwise.
243 enum intel_display_power_domain domain) in intel_display_power_is_enabled() argument
248 power_domains = &dev_priv->display.power.domains; in intel_display_power_is_enabled()
250 mutex_lock(&power_domains->lock); in intel_display_power_is_enabled()
251 ret = __intel_display_power_is_enabled(dev_priv, domain); in intel_display_power_is_enabled()
252 mutex_unlock(&power_domains->lock); in intel_display_power_is_enabled()
261 struct i915_power_domains *power_domains = &i915->display.power.domains; in sanitize_target_dc_state()
270 for (i = 0; i < ARRAY_SIZE(states) - 1; i++) { in sanitize_target_dc_state()
274 if (power_domains->allowed_dc_mask & target_dc_state) in sanitize_target_dc_state()
284 * intel_display_power_set_target_dc_state - Set target dc state.
288 * This function set the "DC off" power well target_dc_state,
289 * based upon this target_dc_stste, "DC off" power well will
297 struct i915_power_domains *power_domains = &dev_priv->display.power.domains; in intel_display_power_set_target_dc_state()
299 mutex_lock(&power_domains->lock); in intel_display_power_set_target_dc_state()
302 if (drm_WARN_ON(&dev_priv->drm, !power_well)) in intel_display_power_set_target_dc_state()
307 if (state == power_domains->target_dc_state) in intel_display_power_set_target_dc_state()
312 * If DC off power well is disabled, need to enable and disable the in intel_display_power_set_target_dc_state()
313 * DC off power well to effect target DC state. in intel_display_power_set_target_dc_state()
318 power_domains->target_dc_state = state; in intel_display_power_set_target_dc_state()
324 mutex_unlock(&power_domains->lock); in intel_display_power_set_target_dc_state()
330 bitmap_or(mask->bits, in __async_put_domains_mask()
331 power_domains->async_put_domains[0].bits, in __async_put_domains_mask()
332 power_domains->async_put_domains[1].bits, in __async_put_domains_mask()
343 display.power.domains); in assert_async_put_domain_masks_disjoint()
345 return !drm_WARN_ON(&i915->drm, in assert_async_put_domain_masks_disjoint()
346 bitmap_intersects(power_domains->async_put_domains[0].bits, in assert_async_put_domain_masks_disjoint()
347 power_domains->async_put_domains[1].bits, in assert_async_put_domain_masks_disjoint()
356 display.power.domains); in __async_put_domains_state_ok()
358 enum intel_display_power_domain domain; in __async_put_domains_state_ok() local
363 err |= drm_WARN_ON(&i915->drm, in __async_put_domains_state_ok()
364 !!power_domains->async_put_wakeref != in __async_put_domains_state_ok()
367 for_each_power_domain(domain, &async_put_mask) in __async_put_domains_state_ok()
368 err |= drm_WARN_ON(&i915->drm, in __async_put_domains_state_ok()
369 power_domains->domain_use_count[domain] != 1); in __async_put_domains_state_ok()
379 display.power.domains); in print_power_domains()
380 enum intel_display_power_domain domain; in print_power_domains() local
382 drm_dbg(&i915->drm, "%s (%d):\n", prefix, bitmap_weight(mask->bits, POWER_DOMAIN_NUM)); in print_power_domains()
383 for_each_power_domain(domain, mask) in print_power_domains()
384 drm_dbg(&i915->drm, "%s use_count %d\n", in print_power_domains()
385 intel_display_power_domain_str(domain), in print_power_domains()
386 power_domains->domain_use_count[domain]); in print_power_domains()
394 display.power.domains); in print_async_put_domains_state()
396 drm_dbg(&i915->drm, "async_put_wakeref: %s\n", in print_async_put_domains_state()
397 str_yes_no(power_domains->async_put_wakeref)); in print_async_put_domains_state()
400 &power_domains->async_put_domains[0]); in print_async_put_domains_state()
402 &power_domains->async_put_domains[1]); in print_async_put_domains_state()
437 enum intel_display_power_domain domain) in async_put_domains_clear_domain() argument
441 clear_bit(domain, power_domains->async_put_domains[0].bits); in async_put_domains_clear_domain()
442 clear_bit(domain, power_domains->async_put_domains[1].bits); in async_put_domains_clear_domain()
449 cancel_delayed_work_sync(&power_domains->async_put_work); in cancel_async_put_work()
451 cancel_delayed_work(&power_domains->async_put_work); in cancel_async_put_work()
453 power_domains->async_put_next_delay = 0; in cancel_async_put_work()
458 enum intel_display_power_domain domain) in intel_display_power_grab_async_put_ref() argument
460 struct i915_power_domains *power_domains = &dev_priv->display.power.domains; in intel_display_power_grab_async_put_ref()
465 if (!test_bit(domain, async_put_mask.bits)) in intel_display_power_grab_async_put_ref()
468 async_put_domains_clear_domain(power_domains, domain); in intel_display_power_grab_async_put_ref()
477 intel_runtime_pm_put_raw(&dev_priv->runtime_pm, in intel_display_power_grab_async_put_ref()
478 fetch_and_zero(&power_domains->async_put_wakeref)); in intel_display_power_grab_async_put_ref()
487 enum intel_display_power_domain domain) in __intel_display_power_get_domain() argument
489 struct i915_power_domains *power_domains = &dev_priv->display.power.domains; in __intel_display_power_get_domain()
492 if (intel_display_power_grab_async_put_ref(dev_priv, domain)) in __intel_display_power_get_domain()
495 for_each_power_domain_well(dev_priv, power_well, domain) in __intel_display_power_get_domain()
498 power_domains->domain_use_count[domain]++; in __intel_display_power_get_domain()
502 * intel_display_power_get - grab a power domain reference
504 * @domain: power domain to reference
506 * This function grabs a power domain reference for @domain and ensures that the
507 * power domain and all its parents are powered up. Therefore users should only
508 * grab a reference to the innermost power domain they need.
510 * Any power domain reference obtained by this function must have a symmetric
514 enum intel_display_power_domain domain) in intel_display_power_get() argument
516 struct i915_power_domains *power_domains = &dev_priv->display.power.domains; in intel_display_power_get()
517 intel_wakeref_t wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); in intel_display_power_get()
519 mutex_lock(&power_domains->lock); in intel_display_power_get()
520 __intel_display_power_get_domain(dev_priv, domain); in intel_display_power_get()
521 mutex_unlock(&power_domains->lock); in intel_display_power_get()
527 * intel_display_power_get_if_enabled - grab a reference for an enabled display power domain
529 * @domain: power domain to reference
531 * This function grabs a power domain reference for @domain and ensures that the
532 * power domain and all its parents are powered up. Therefore users should only
533 * grab a reference to the innermost power domain they need.
535 * Any power domain reference obtained by this function must have a symmetric
540 enum intel_display_power_domain domain) in intel_display_power_get_if_enabled() argument
542 struct i915_power_domains *power_domains = &dev_priv->display.power.domains; in intel_display_power_get_if_enabled()
546 wakeref = intel_runtime_pm_get_if_in_use(&dev_priv->runtime_pm); in intel_display_power_get_if_enabled()
550 mutex_lock(&power_domains->lock); in intel_display_power_get_if_enabled()
552 if (__intel_display_power_is_enabled(dev_priv, domain)) { in intel_display_power_get_if_enabled()
553 __intel_display_power_get_domain(dev_priv, domain); in intel_display_power_get_if_enabled()
559 mutex_unlock(&power_domains->lock); in intel_display_power_get_if_enabled()
562 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); in intel_display_power_get_if_enabled()
571 enum intel_display_power_domain domain) in __intel_display_power_put_domain() argument
575 const char *name = intel_display_power_domain_str(domain); in __intel_display_power_put_domain()
578 power_domains = &dev_priv->display.power.domains; in __intel_display_power_put_domain()
580 drm_WARN(&dev_priv->drm, !power_domains->domain_use_count[domain], in __intel_display_power_put_domain()
581 "Use count on domain %s is already zero\n", in __intel_display_power_put_domain()
584 drm_WARN(&dev_priv->drm, in __intel_display_power_put_domain()
585 test_bit(domain, async_put_mask.bits), in __intel_display_power_put_domain()
586 "Async disabling of domain %s is pending\n", in __intel_display_power_put_domain()
589 power_domains->domain_use_count[domain]--; in __intel_display_power_put_domain()
591 for_each_power_domain_well_reverse(dev_priv, power_well, domain) in __intel_display_power_put_domain()
596 enum intel_display_power_domain domain) in __intel_display_power_put() argument
598 struct i915_power_domains *power_domains = &dev_priv->display.power.domains; in __intel_display_power_put()
600 mutex_lock(&power_domains->lock); in __intel_display_power_put()
601 __intel_display_power_put_domain(dev_priv, domain); in __intel_display_power_put()
602 mutex_unlock(&power_domains->lock); in __intel_display_power_put()
612 display.power.domains); in queue_async_put_domains_work()
613 drm_WARN_ON(&i915->drm, power_domains->async_put_wakeref); in queue_async_put_domains_work()
614 power_domains->async_put_wakeref = wakeref; in queue_async_put_domains_work()
615 drm_WARN_ON(&i915->drm, !queue_delayed_work(system_unbound_wq, in queue_async_put_domains_work()
616 &power_domains->async_put_work, in queue_async_put_domains_work()
626 display.power.domains); in release_async_put_domains()
627 struct intel_runtime_pm *rpm = &dev_priv->runtime_pm; in release_async_put_domains()
628 enum intel_display_power_domain domain; in release_async_put_domains() local
633 for_each_power_domain(domain, mask) { in release_async_put_domains()
635 async_put_domains_clear_domain(power_domains, domain); in release_async_put_domains()
636 __intel_display_power_put_domain(dev_priv, domain); in release_async_put_domains()
647 display.power.domains.async_put_work.work); in intel_display_power_put_async_work()
648 struct i915_power_domains *power_domains = &dev_priv->display.power.domains; in intel_display_power_put_async_work()
649 struct intel_runtime_pm *rpm = &dev_priv->runtime_pm; in intel_display_power_put_async_work()
653 mutex_lock(&power_domains->lock); in intel_display_power_put_async_work()
656 * Bail out if all the domain refs pending to be released were grabbed in intel_display_power_put_async_work()
659 old_work_wakeref = fetch_and_zero(&power_domains->async_put_wakeref); in intel_display_power_put_async_work()
664 &power_domains->async_put_domains[0]); in intel_display_power_put_async_work()
668 * since here we released the corresponding async-put reference. in intel_display_power_put_async_work()
673 if (!bitmap_empty(power_domains->async_put_domains[1].bits, POWER_DOMAIN_NUM)) { in intel_display_power_put_async_work()
674 bitmap_copy(power_domains->async_put_domains[0].bits, in intel_display_power_put_async_work()
675 power_domains->async_put_domains[1].bits, in intel_display_power_put_async_work()
677 bitmap_zero(power_domains->async_put_domains[1].bits, in intel_display_power_put_async_work()
681 power_domains->async_put_next_delay); in intel_display_power_put_async_work()
682 power_domains->async_put_next_delay = 0; in intel_display_power_put_async_work()
688 mutex_unlock(&power_domains->lock); in intel_display_power_put_async_work()
697 * __intel_display_power_put_async - release a power domain reference asynchronously
699 * @domain: power domain to reference
701 * @delay_ms: delay of powering down the power domain
703 * This function drops the power domain reference obtained by
704 * intel_display_power_get*() and schedules a work to power down the
706 * The power down is delayed by @delay_ms if this is >= 0, or by a default
710 enum intel_display_power_domain domain, in __intel_display_power_put_async() argument
714 struct i915_power_domains *power_domains = &i915->display.power.domains; in __intel_display_power_put_async()
715 struct intel_runtime_pm *rpm = &i915->runtime_pm; in __intel_display_power_put_async()
720 mutex_lock(&power_domains->lock); in __intel_display_power_put_async()
722 if (power_domains->domain_use_count[domain] > 1) { in __intel_display_power_put_async()
723 __intel_display_power_put_domain(i915, domain); in __intel_display_power_put_async()
728 drm_WARN_ON(&i915->drm, power_domains->domain_use_count[domain] != 1); in __intel_display_power_put_async()
731 if (power_domains->async_put_wakeref) { in __intel_display_power_put_async()
732 set_bit(domain, power_domains->async_put_domains[1].bits); in __intel_display_power_put_async()
733 power_domains->async_put_next_delay = max(power_domains->async_put_next_delay, in __intel_display_power_put_async()
736 set_bit(domain, power_domains->async_put_domains[0].bits); in __intel_display_power_put_async()
745 mutex_unlock(&power_domains->lock); in __intel_display_power_put_async()
754 * intel_display_power_flush_work - flushes the async display power disabling work
759 * corresponding power domains.
767 struct i915_power_domains *power_domains = &i915->display.power.domains; in intel_display_power_flush_work()
771 mutex_lock(&power_domains->lock); in intel_display_power_flush_work()
773 work_wakeref = fetch_and_zero(&power_domains->async_put_wakeref); in intel_display_power_flush_work()
784 mutex_unlock(&power_domains->lock); in intel_display_power_flush_work()
787 intel_runtime_pm_put_raw(&i915->runtime_pm, work_wakeref); in intel_display_power_flush_work()
791 * intel_display_power_flush_work_sync - flushes and syncs the async display power disabling work
800 struct i915_power_domains *power_domains = &i915->display.power.domains; in intel_display_power_flush_work_sync()
807 drm_WARN_ON(&i915->drm, power_domains->async_put_wakeref); in intel_display_power_flush_work_sync()
812 * intel_display_power_put - release a power domain reference
814 * @domain: power domain to reference
817 * This function drops the power domain reference obtained by
818 * intel_display_power_get() and might power down the corresponding hardware
822 enum intel_display_power_domain domain, in intel_display_power_put() argument
825 __intel_display_power_put(dev_priv, domain); in intel_display_power_put()
826 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); in intel_display_power_put()
830 * intel_display_power_put_unchecked - release an unchecked power domain reference
832 * @domain: power domain to reference
834 * This function drops the power domain reference obtained by
835 * intel_display_power_get() and might power down the corresponding hardware
838 * This function is only for the power domain code's internal use to suppress wakeref
843 enum intel_display_power_domain domain) in intel_display_power_put_unchecked() argument
845 __intel_display_power_put(dev_priv, domain); in intel_display_power_put_unchecked()
846 intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm); in intel_display_power_put_unchecked()
853 enum intel_display_power_domain domain) in intel_display_power_get_in_set() argument
857 drm_WARN_ON(&i915->drm, test_bit(domain, power_domain_set->mask.bits)); in intel_display_power_get_in_set()
859 wf = intel_display_power_get(i915, domain); in intel_display_power_get_in_set()
861 power_domain_set->wakerefs[domain] = wf; in intel_display_power_get_in_set()
863 set_bit(domain, power_domain_set->mask.bits); in intel_display_power_get_in_set()
869 enum intel_display_power_domain domain) in intel_display_power_get_in_set_if_enabled() argument
873 drm_WARN_ON(&i915->drm, test_bit(domain, power_domain_set->mask.bits)); in intel_display_power_get_in_set_if_enabled()
875 wf = intel_display_power_get_if_enabled(i915, domain); in intel_display_power_get_in_set_if_enabled()
880 power_domain_set->wakerefs[domain] = wf; in intel_display_power_get_in_set_if_enabled()
882 set_bit(domain, power_domain_set->mask.bits); in intel_display_power_get_in_set_if_enabled()
892 enum intel_display_power_domain domain; in intel_display_power_put_mask_in_set() local
894 drm_WARN_ON(&i915->drm, in intel_display_power_put_mask_in_set()
895 !bitmap_subset(mask->bits, power_domain_set->mask.bits, POWER_DOMAIN_NUM)); in intel_display_power_put_mask_in_set()
897 for_each_power_domain(domain, mask) { in intel_display_power_put_mask_in_set()
898 intel_wakeref_t __maybe_unused wf = -1; in intel_display_power_put_mask_in_set()
901 wf = fetch_and_zero(&power_domain_set->wakerefs[domain]); in intel_display_power_put_mask_in_set()
903 intel_display_power_put(i915, domain, wf); in intel_display_power_put_mask_in_set()
904 clear_bit(domain, power_domain_set->mask.bits); in intel_display_power_put_mask_in_set()
952 if (!dev_priv->display.params.disable_power_well) in get_allowed_dc_mask()
957 } else if (enable_dc == -1) { in get_allowed_dc_mask()
960 drm_dbg_kms(&dev_priv->drm, in get_allowed_dc_mask()
961 "Adjusting requested max DC state (%d->%d)\n", in get_allowed_dc_mask()
965 drm_err(&dev_priv->drm, in get_allowed_dc_mask()
985 drm_dbg_kms(&dev_priv->drm, "Allowed DC state mask %02x\n", mask); in get_allowed_dc_mask()
991 * intel_power_domains_init - initializes the power domain structures
994 * Initializes the power domain structures for @dev_priv depending upon the
999 struct i915_power_domains *power_domains = &dev_priv->display.power.domains; in intel_power_domains_init()
1001 dev_priv->display.params.disable_power_well = in intel_power_domains_init()
1003 dev_priv->display.params.disable_power_well); in intel_power_domains_init()
1004 power_domains->allowed_dc_mask = in intel_power_domains_init()
1005 get_allowed_dc_mask(dev_priv, dev_priv->display.params.enable_dc); in intel_power_domains_init()
1007 power_domains->target_dc_state = in intel_power_domains_init()
1010 mutex_init(&power_domains->lock); in intel_power_domains_init()
1012 INIT_DELAYED_WORK(&power_domains->async_put_work, in intel_power_domains_init()
1019 * intel_power_domains_cleanup - clean up power domains resources
1026 intel_display_power_map_cleanup(&dev_priv->display.power.domains); in intel_power_domains_cleanup()
1031 struct i915_power_domains *power_domains = &dev_priv->display.power.domains; in intel_power_domains_sync_hw()
1034 mutex_lock(&power_domains->lock); in intel_power_domains_sync_hw()
1037 mutex_unlock(&power_domains->lock); in intel_power_domains_sync_hw()
1052 drm_WARN(&dev_priv->drm, enable != state, in gen9_dbuf_slice_set()
1053 "DBuf slice %d power %s timeout!\n", in gen9_dbuf_slice_set()
1060 struct i915_power_domains *power_domains = &dev_priv->display.power.domains; in gen9_dbuf_slices_update()
1061 u8 slice_mask = DISPLAY_INFO(dev_priv)->dbuf.slice_mask; in gen9_dbuf_slices_update()
1064 drm_WARN(&dev_priv->drm, req_slices & ~slice_mask, in gen9_dbuf_slices_update()
1068 drm_dbg_kms(&dev_priv->drm, "Updating dbuf slices to 0x%x\n", in gen9_dbuf_slices_update()
1078 mutex_lock(&power_domains->lock); in gen9_dbuf_slices_update()
1083 dev_priv->display.dbuf.enabled_slices = req_slices; in gen9_dbuf_slices_update()
1085 mutex_unlock(&power_domains->lock); in gen9_dbuf_slices_update()
1092 dev_priv->display.dbuf.enabled_slices = in gen9_dbuf_enable()
1095 slices_mask = BIT(DBUF_S1) | dev_priv->display.dbuf.enabled_slices; in gen9_dbuf_enable()
1101 * Just power up at least 1 slice, we will in gen9_dbuf_enable()
1130 unsigned long abox_regs = DISPLAY_INFO(dev_priv)->abox_mask; in icl_mbus_init()
1148 * we don't have to program other instance-0 registers like BW_BUDDY. in icl_mbus_init()
1168 drm_err(&dev_priv->drm, "CDCLK source is not LCPLL\n"); in hsw_assert_cdclk()
1171 drm_err(&dev_priv->drm, "LCPLL is disabled\n"); in hsw_assert_cdclk()
1174 drm_err(&dev_priv->drm, "LCPLL not using non-SSC reference\n"); in hsw_assert_cdclk()
1181 for_each_intel_crtc(&dev_priv->drm, crtc) in assert_can_disable_lcpll()
1182 I915_STATE_WARN(dev_priv, crtc->active, in assert_can_disable_lcpll()
1184 pipe_name(crtc->pipe)); in assert_can_disable_lcpll()
1187 "Display power well on\n"); in assert_can_disable_lcpll()
1199 "Panel power on\n"); in assert_can_disable_lcpll()
1220 * gen-specific and since we only disable LCPLL after we fully disable in assert_can_disable_lcpll()
1238 if (snb_pcode_write(&dev_priv->uncore, GEN6_PCODE_WRITE_D_COMP, val)) in hsw_write_dcomp()
1239 drm_dbg_kms(&dev_priv->drm, in hsw_write_dcomp()
1249 * - Sequence for display software to disable LCPLL
1250 * - Sequence for display software to allow package C8+
1270 drm_err(&dev_priv->drm, "Switching to FCLK failed\n"); in hsw_disable_lcpll()
1280 drm_err(&dev_priv->drm, "LCPLL still locked\n"); in hsw_disable_lcpll()
1289 drm_err(&dev_priv->drm, "D_COMP RCOMP still in progress\n"); in hsw_disable_lcpll()
1298 * Fully restores LCPLL, disallowing power down and switching back to LCPLL
1315 intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL); in hsw_restore_lcpll()
1333 drm_err(&dev_priv->drm, "LCPLL not locked yet\n"); in hsw_restore_lcpll()
1340 drm_err(&dev_priv->drm, in hsw_restore_lcpll()
1344 intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); in hsw_restore_lcpll()
1347 intel_cdclk_dump_config(dev_priv, &dev_priv->display.cdclk.hw, "Current CDCLK"); in hsw_restore_lcpll()
1356 * The requirements for PC8+ are that all the outputs are disabled, the power
1360 * to Fclk. If we're in PC8+ and we get an non-hotplug interrupt, we can hard
1375 drm_dbg_kms(&dev_priv->drm, "Enabling package C8+\n"); in hsw_enable_pc8()
1387 drm_dbg_kms(&dev_priv->drm, "Disabling package C8+\n"); in hsw_disable_pc8()
1419 struct i915_power_domains *power_domains = &dev_priv->display.power.domains; in skl_display_core_init()
1431 mutex_lock(&power_domains->lock); in skl_display_core_init()
1439 mutex_unlock(&power_domains->lock); in skl_display_core_init()
1451 struct i915_power_domains *power_domains = &dev_priv->display.power.domains; in skl_display_core_uninit()
1467 mutex_lock(&power_domains->lock); in skl_display_core_uninit()
1470 * BSpec says to keep the MISC IO power well enabled here, only in skl_display_core_uninit()
1471 * remove our request for power well 1. in skl_display_core_uninit()
1472 * Note that even though the driver's request is removed power well 1 in skl_display_core_uninit()
1478 mutex_unlock(&power_domains->lock); in skl_display_core_uninit()
1485 struct i915_power_domains *power_domains = &dev_priv->display.power.domains; in bxt_display_core_init()
1502 mutex_lock(&power_domains->lock); in bxt_display_core_init()
1507 mutex_unlock(&power_domains->lock); in bxt_display_core_init()
1519 struct i915_power_domains *power_domains = &dev_priv->display.power.domains; in bxt_display_core_uninit()
1536 * Note that even though the driver's request is removed power well 1 in bxt_display_core_uninit()
1539 mutex_lock(&power_domains->lock); in bxt_display_core_uninit()
1544 mutex_unlock(&power_domains->lock); in bxt_display_core_uninit()
1581 enum intel_dram_type type = dev_priv->dram_info.type; in tgl_bw_buddy_init()
1582 u8 num_channels = dev_priv->dram_info.num_channels; in tgl_bw_buddy_init()
1584 unsigned long abox_mask = DISPLAY_INFO(dev_priv)->abox_mask; in tgl_bw_buddy_init()
1604 drm_dbg(&dev_priv->drm, in tgl_bw_buddy_init()
1614 /* Wa_22010178259:tgl,dg1,rkl,adl-s */ in tgl_bw_buddy_init()
1626 struct i915_power_domains *power_domains = &dev_priv->display.power.domains; in icl_display_core_init()
1631 /* Wa_14011294188:ehl,jsl,tgl,rkl,adl-s */ in icl_display_core_init()
1647 * 3. Enable Power Well 1 (PG1). in icl_display_core_init()
1648 * The AUX IO power wells will be enabled on demand. in icl_display_core_init()
1650 mutex_lock(&power_domains->lock); in icl_display_core_init()
1653 mutex_unlock(&power_domains->lock); in icl_display_core_init()
1686 /* Wa_14011508470:tgl,dg1,rkl,adl-s,adl-p,dg2 */ in icl_display_core_init()
1707 struct i915_power_domains *power_domains = &dev_priv->display.power.domains; in icl_display_core_uninit()
1716 /* 1. Disable all display engine functions -> aready done */ in icl_display_core_uninit()
1729 * 4. Disable Power Well 1 (PG1). in icl_display_core_uninit()
1730 * The AUX IO power wells are toggled on demand, so they are already in icl_display_core_uninit()
1733 mutex_lock(&power_domains->lock); in icl_display_core_uninit()
1736 mutex_unlock(&power_domains->lock); in icl_display_core_uninit()
1753 * power well state and lane status to reconstruct the in chv_phy_control_init()
1756 dev_priv->display.power.chv_phy_control = in chv_phy_control_init()
1765 * with all power down bits cleared to match the state we in chv_phy_control_init()
1778 dev_priv->display.power.chv_phy_control |= in chv_phy_control_init()
1781 dev_priv->display.power.chv_phy_control |= in chv_phy_control_init()
1788 dev_priv->display.power.chv_phy_control |= in chv_phy_control_init()
1791 dev_priv->display.power.chv_phy_control |= in chv_phy_control_init()
1794 dev_priv->display.power.chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY0); in chv_phy_control_init()
1796 dev_priv->display.power.chv_phy_assert[DPIO_PHY0] = false; in chv_phy_control_init()
1798 dev_priv->display.power.chv_phy_assert[DPIO_PHY0] = true; in chv_phy_control_init()
1810 dev_priv->display.power.chv_phy_control |= in chv_phy_control_init()
1813 dev_priv->display.power.chv_phy_control |= in chv_phy_control_init()
1816 dev_priv->display.power.chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY1); in chv_phy_control_init()
1818 dev_priv->display.power.chv_phy_assert[DPIO_PHY1] = false; in chv_phy_control_init()
1820 dev_priv->display.power.chv_phy_assert[DPIO_PHY1] = true; in chv_phy_control_init()
1823 drm_dbg_kms(&dev_priv->drm, "Initial PHY_CONTROL=0x%08x\n", in chv_phy_control_init()
1824 dev_priv->display.power.chv_phy_control); in chv_phy_control_init()
1842 drm_dbg_kms(&dev_priv->drm, "toggling display PHY side reset\n"); in vlv_cmnlane_wa()
1849 * Need to assert and de-assert PHY SB reset by gating the in vlv_cmnlane_wa()
1850 * common lane power, then un-gating it. in vlv_cmnlane_wa()
1870 drm_WARN(&dev_priv->drm, in assert_ved_power_gated()
1872 "VED not power gated\n"); in assert_ved_power_gated()
1883 drm_WARN(&dev_priv->drm, !pci_dev_present(isp_ids) && in assert_isp_power_gated()
1885 "ISP not power gated\n"); in assert_isp_power_gated()
1891 * intel_power_domains_init_hw - initialize hardware power domain state
1895 * This function initializes the hardware power domain state and enables all
1896 * power wells belonging to the INIT power domain. Power wells in other
1897 * domains (and not in the INIT domain) are referenced or disabled by
1899 * power well must match its HW enabled state, see
1902 * It will return with power domains disabled (to be enabled later by
1908 struct i915_power_domains *power_domains = &i915->display.power.domains; in intel_power_domains_init_hw()
1910 power_domains->initializing = true; in intel_power_domains_init_hw()
1919 mutex_lock(&power_domains->lock); in intel_power_domains_init_hw()
1921 mutex_unlock(&power_domains->lock); in intel_power_domains_init_hw()
1924 mutex_lock(&power_domains->lock); in intel_power_domains_init_hw()
1926 mutex_unlock(&power_domains->lock); in intel_power_domains_init_hw()
1937 * Keep all power wells enabled for any dependent HW access during in intel_power_domains_init_hw()
1942 drm_WARN_ON(&i915->drm, power_domains->init_wakeref); in intel_power_domains_init_hw()
1943 power_domains->init_wakeref = in intel_power_domains_init_hw()
1946 /* Disable power support if the user asked so. */ in intel_power_domains_init_hw()
1947 if (!i915->display.params.disable_power_well) { in intel_power_domains_init_hw()
1948 drm_WARN_ON(&i915->drm, power_domains->disable_wakeref); in intel_power_domains_init_hw()
1949 i915->display.power.domains.disable_wakeref = intel_display_power_get(i915, in intel_power_domains_init_hw()
1954 power_domains->initializing = false; in intel_power_domains_init_hw()
1958 * intel_power_domains_driver_remove - deinitialize hw power domain state
1961 * De-initializes the display power domain HW state. It also ensures that the
1964 * It must be called with power domains already disabled (after a call to
1971 fetch_and_zero(&i915->display.power.domains.init_wakeref); in intel_power_domains_driver_remove()
1973 /* Remove the refcount we took to keep power well support disabled. */ in intel_power_domains_driver_remove()
1974 if (!i915->display.params.disable_power_well) in intel_power_domains_driver_remove()
1976 fetch_and_zero(&i915->display.power.domains.disable_wakeref)); in intel_power_domains_driver_remove()
1982 /* Keep the power well enabled, but cancel its rpm wakeref. */ in intel_power_domains_driver_remove()
1983 intel_runtime_pm_put(&i915->runtime_pm, wakeref); in intel_power_domains_driver_remove()
1987 * intel_power_domains_sanitize_state - sanitize power domains state
1990 * Sanitize the power domains state during driver loading and system resume.
1991 * The function will disable all display power wells that BIOS has enabled
1992 * without a user for it (any user for a power well has taken a reference
1998 struct i915_power_domains *power_domains = &i915->display.power.domains; in intel_power_domains_sanitize_state()
2001 mutex_lock(&power_domains->lock); in intel_power_domains_sanitize_state()
2004 if (power_well->desc->always_on || power_well->count || in intel_power_domains_sanitize_state()
2008 drm_dbg_kms(&i915->drm, in intel_power_domains_sanitize_state()
2009 "BIOS left unused %s power well enabled, disabling it\n", in intel_power_domains_sanitize_state()
2014 mutex_unlock(&power_domains->lock); in intel_power_domains_sanitize_state()
2018 * intel_power_domains_enable - enable toggling of display power wells
2021 * Enable the ondemand enabling/disabling of the display power wells. Note that
2022 * power wells not belonging to POWER_DOMAIN_INIT are allowed to be toggled
2025 * of these function is to keep the rest of power wells enabled until the end
2026 * of display HW readout (which will acquire the power references reflecting
2032 fetch_and_zero(&i915->display.power.domains.init_wakeref); in intel_power_domains_enable()
2039 * intel_power_domains_disable - disable toggling of display power wells
2042 * Disable the ondemand enabling/disabling of the display power wells. See
2043 * intel_power_domains_enable() for which power wells this call controls.
2047 struct i915_power_domains *power_domains = &i915->display.power.domains; in intel_power_domains_disable()
2049 drm_WARN_ON(&i915->drm, power_domains->init_wakeref); in intel_power_domains_disable()
2050 power_domains->init_wakeref = in intel_power_domains_disable()
2057 * intel_power_domains_suspend - suspend power domain state
2061 * This function prepares the hardware power domain state before entering
2064 * It must be called with power domains already disabled (after a call to
2069 struct i915_power_domains *power_domains = &i915->display.power.domains; in intel_power_domains_suspend()
2071 fetch_and_zero(&power_domains->init_wakeref); in intel_power_domains_suspend()
2076 * In case of suspend-to-idle (aka S0ix) on a DMC platform without DC9 in intel_power_domains_suspend()
2077 * support don't manually deinit the power domains. This also means the in intel_power_domains_suspend()
2078 * DMC firmware will stay active, it will power down any HW in intel_power_domains_suspend()
2079 * resources as required and also enable deeper system power states in intel_power_domains_suspend()
2082 if (!(power_domains->allowed_dc_mask & DC_STATE_EN_DC9) && s2idle && in intel_power_domains_suspend()
2090 * Even if power well support was disabled we still want to disable in intel_power_domains_suspend()
2091 * power wells if power domains must be deinitialized for suspend. in intel_power_domains_suspend()
2093 if (!i915->display.params.disable_power_well) in intel_power_domains_suspend()
2095 fetch_and_zero(&i915->display.power.domains.disable_wakeref)); in intel_power_domains_suspend()
2107 power_domains->display_core_suspended = true; in intel_power_domains_suspend()
2111 * intel_power_domains_resume - resume power domain state
2114 * This function resume the hardware power domain state during system resume.
2116 * It will return with power domain support disabled (to be enabled later by
2122 struct i915_power_domains *power_domains = &i915->display.power.domains; in intel_power_domains_resume()
2124 if (power_domains->display_core_suspended) { in intel_power_domains_resume()
2126 power_domains->display_core_suspended = false; in intel_power_domains_resume()
2128 drm_WARN_ON(&i915->drm, power_domains->init_wakeref); in intel_power_domains_resume()
2129 power_domains->init_wakeref = in intel_power_domains_resume()
2140 struct i915_power_domains *power_domains = &i915->display.power.domains; in intel_power_domains_dump_info()
2144 enum intel_display_power_domain domain; in intel_power_domains_dump_info() local
2146 drm_dbg(&i915->drm, "%-25s %d\n", in intel_power_domains_dump_info()
2149 for_each_power_domain(domain, intel_power_well_domains(power_well)) in intel_power_domains_dump_info()
2150 drm_dbg(&i915->drm, " %-23s %d\n", in intel_power_domains_dump_info()
2151 intel_display_power_domain_str(domain), in intel_power_domains_dump_info()
2152 power_domains->domain_use_count[domain]); in intel_power_domains_dump_info()
2157 * intel_power_domains_verify_state - verify the HW/SW state for all power wells
2160 * Verify if the reference count of each power well matches its HW enabled
2163 * acquiring reference counts for any power wells in use and disabling the
2168 struct i915_power_domains *power_domains = &i915->display.power.domains; in intel_power_domains_verify_state()
2172 mutex_lock(&power_domains->lock); in intel_power_domains_verify_state()
2178 enum intel_display_power_domain domain; in intel_power_domains_verify_state() local
2186 drm_err(&i915->drm, in intel_power_domains_verify_state()
2187 "power well %s state mismatch (refcount %d/enabled %d)", in intel_power_domains_verify_state()
2192 for_each_power_domain(domain, intel_power_well_domains(power_well)) in intel_power_domains_verify_state()
2193 domains_count += power_domains->domain_use_count[domain]; in intel_power_domains_verify_state()
2196 drm_err(&i915->drm, in intel_power_domains_verify_state()
2197 "power well %s refcount/domain refcount mismatch " in intel_power_domains_verify_state()
2215 mutex_unlock(&power_domains->lock); in intel_power_domains_verify_state()
2270 struct i915_power_domains *power_domains = &i915->display.power.domains; in intel_display_power_resume()
2276 if (power_domains->allowed_dc_mask & DC_STATE_EN_UPTO_DC6) in intel_display_power_resume()
2278 else if (power_domains->allowed_dc_mask & DC_STATE_EN_UPTO_DC5) in intel_display_power_resume()
2285 (power_domains->allowed_dc_mask & DC_STATE_EN_UPTO_DC5)) in intel_display_power_resume()
2294 struct i915_power_domains *power_domains = &i915->display.power.domains; in intel_display_power_debug()
2297 mutex_lock(&power_domains->lock); in intel_display_power_debug()
2299 seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count"); in intel_display_power_debug()
2300 for (i = 0; i < power_domains->power_well_count; i++) { in intel_display_power_debug()
2304 power_well = &power_domains->power_wells[i]; in intel_display_power_debug()
2305 seq_printf(m, "%-25s %d\n", intel_power_well_name(power_well), in intel_display_power_debug()
2309 seq_printf(m, " %-23s %d\n", in intel_display_power_debug()
2311 power_domains->domain_use_count[power_domain]); in intel_display_power_debug()
2314 mutex_unlock(&power_domains->lock); in intel_display_power_debug()
2478 if (drm_WARN_ON(&i915->drm, !domains || domains->ddi_io == POWER_DOMAIN_INVALID)) in intel_display_power_ddi_io_domain()
2481 return domains->ddi_io + (int)(port - domains->port_start); in intel_display_power_ddi_io_domain()
2489 if (drm_WARN_ON(&i915->drm, !domains || domains->ddi_lanes == POWER_DOMAIN_INVALID)) in intel_display_power_ddi_lanes_domain()
2492 return domains->ddi_lanes + (int)(port - domains->port_start); in intel_display_power_ddi_lanes_domain()
2515 if (drm_WARN_ON(&i915->drm, !domains || domains->aux_io == POWER_DOMAIN_INVALID)) in intel_display_power_aux_io_domain()
2518 return domains->aux_io + (int)(aux_ch - domains->aux_ch_start); in intel_display_power_aux_io_domain()
2526 if (drm_WARN_ON(&i915->drm, !domains || domains->aux_legacy_usbc == POWER_DOMAIN_INVALID)) in intel_display_power_legacy_aux_domain()
2529 return domains->aux_legacy_usbc + (int)(aux_ch - domains->aux_ch_start); in intel_display_power_legacy_aux_domain()
2537 if (drm_WARN_ON(&i915->drm, !domains || domains->aux_tbt == POWER_DOMAIN_INVALID)) in intel_display_power_tbt_aux_domain()
2540 return domains->aux_tbt + (int)(aux_ch - domains->aux_ch_start); in intel_display_power_tbt_aux_domain()