Lines Matching +full:power +full:- +full:domains
1 /* SPDX-License-Identifier: MIT */
33 for_each_if(test_bit((__domain), (__power_well)->domains.bits))
37 for_each_if(test_bit((__domain), (__power_well)->domains.bits))
202 * __intel_display_power_is_enabled - unlocked check for a power domain
204 * @domain: power domain to check
211 * True when the power domain is enabled, false otherwise.
219 if (pm_runtime_suspended(dev_priv->drm.dev)) in __intel_display_power_is_enabled()
238 * intel_display_power_is_enabled - check for a power domain
240 * @domain: power domain to check
242 * This function can be used to check the hw power domain state. It is mostly
244 * upon explicit power domain reference counting to ensure that the hardware
248 * threads can't disable the power well while the caller tries to read a few
252 * True when the power domain is enabled, false otherwise.
260 power_domains = &dev_priv->display.power.domains; in intel_display_power_is_enabled()
262 mutex_lock(&power_domains->lock); in intel_display_power_is_enabled()
264 mutex_unlock(&power_domains->lock); in intel_display_power_is_enabled()
273 struct i915_power_domains *power_domains = &i915->display.power.domains; in sanitize_target_dc_state()
282 for (i = 0; i < ARRAY_SIZE(states) - 1; i++) { in sanitize_target_dc_state()
286 if (power_domains->allowed_dc_mask & target_dc_state) in sanitize_target_dc_state()
296 * intel_display_power_set_target_dc_state - Set target dc state.
300 * This function set the "DC off" power well target_dc_state,
301 * based upon this target_dc_stste, "DC off" power well will
309 struct i915_power_domains *power_domains = &dev_priv->display.power.domains; in intel_display_power_set_target_dc_state()
311 mutex_lock(&power_domains->lock); in intel_display_power_set_target_dc_state()
314 if (drm_WARN_ON(&dev_priv->drm, !power_well)) in intel_display_power_set_target_dc_state()
319 if (state == power_domains->target_dc_state) in intel_display_power_set_target_dc_state()
324 * If DC off power well is disabled, need to enable and disable the in intel_display_power_set_target_dc_state()
325 * DC off power well to effect target DC state. in intel_display_power_set_target_dc_state()
330 power_domains->target_dc_state = state; in intel_display_power_set_target_dc_state()
336 mutex_unlock(&power_domains->lock); in intel_display_power_set_target_dc_state()
342 bitmap_or(mask->bits, in __async_put_domains_mask()
343 power_domains->async_put_domains[0].bits, in __async_put_domains_mask()
344 power_domains->async_put_domains[1].bits, in __async_put_domains_mask()
355 display.power.domains); in assert_async_put_domain_masks_disjoint()
357 return !drm_WARN_ON(&i915->drm, in assert_async_put_domain_masks_disjoint()
358 bitmap_intersects(power_domains->async_put_domains[0].bits, in assert_async_put_domain_masks_disjoint()
359 power_domains->async_put_domains[1].bits, in assert_async_put_domain_masks_disjoint()
368 display.power.domains); in __async_put_domains_state_ok()
375 err |= drm_WARN_ON(&i915->drm, in __async_put_domains_state_ok()
376 !!power_domains->async_put_wakeref != in __async_put_domains_state_ok()
380 err |= drm_WARN_ON(&i915->drm, in __async_put_domains_state_ok()
381 power_domains->domain_use_count[domain] != 1); in __async_put_domains_state_ok()
391 display.power.domains); in print_power_domains()
394 drm_dbg(&i915->drm, "%s (%d):\n", prefix, bitmap_weight(mask->bits, POWER_DOMAIN_NUM)); in print_power_domains()
396 drm_dbg(&i915->drm, "%s use_count %d\n", in print_power_domains()
398 power_domains->domain_use_count[domain]); in print_power_domains()
406 display.power.domains); in print_async_put_domains_state()
408 drm_dbg(&i915->drm, "async_put_wakeref: %s\n", in print_async_put_domains_state()
409 str_yes_no(power_domains->async_put_wakeref)); in print_async_put_domains_state()
412 &power_domains->async_put_domains[0]); in print_async_put_domains_state()
414 &power_domains->async_put_domains[1]); in print_async_put_domains_state()
453 clear_bit(domain, power_domains->async_put_domains[0].bits); in async_put_domains_clear_domain()
454 clear_bit(domain, power_domains->async_put_domains[1].bits); in async_put_domains_clear_domain()
461 cancel_delayed_work_sync(&power_domains->async_put_work); in cancel_async_put_work()
463 cancel_delayed_work(&power_domains->async_put_work); in cancel_async_put_work()
465 power_domains->async_put_next_delay = 0; in cancel_async_put_work()
472 struct i915_power_domains *power_domains = &dev_priv->display.power.domains; in intel_display_power_grab_async_put_ref()
489 intel_runtime_pm_put_raw(&dev_priv->runtime_pm, in intel_display_power_grab_async_put_ref()
490 fetch_and_zero(&power_domains->async_put_wakeref)); in intel_display_power_grab_async_put_ref()
501 struct i915_power_domains *power_domains = &dev_priv->display.power.domains; in __intel_display_power_get_domain()
510 power_domains->domain_use_count[domain]++; in __intel_display_power_get_domain()
514 * intel_display_power_get - grab a power domain reference
516 * @domain: power domain to reference
518 * This function grabs a power domain reference for @domain and ensures that the
519 * power domain and all its parents are powered up. Therefore users should only
520 * grab a reference to the innermost power domain they need.
522 * Any power domain reference obtained by this function must have a symmetric
528 struct i915_power_domains *power_domains = &dev_priv->display.power.domains; in intel_display_power_get()
529 intel_wakeref_t wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); in intel_display_power_get()
531 mutex_lock(&power_domains->lock); in intel_display_power_get()
533 mutex_unlock(&power_domains->lock); in intel_display_power_get()
539 * intel_display_power_get_if_enabled - grab a reference for an enabled display power domain
541 * @domain: power domain to reference
543 * This function grabs a power domain reference for @domain and ensures that the
544 * power domain and all its parents are powered up. Therefore users should only
545 * grab a reference to the innermost power domain they need.
547 * Any power domain reference obtained by this function must have a symmetric
554 struct i915_power_domains *power_domains = &dev_priv->display.power.domains; in intel_display_power_get_if_enabled()
558 wakeref = intel_runtime_pm_get_if_in_use(&dev_priv->runtime_pm); in intel_display_power_get_if_enabled()
562 mutex_lock(&power_domains->lock); in intel_display_power_get_if_enabled()
571 mutex_unlock(&power_domains->lock); in intel_display_power_get_if_enabled()
574 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); in intel_display_power_get_if_enabled()
590 power_domains = &dev_priv->display.power.domains; in __intel_display_power_put_domain()
592 drm_WARN(&dev_priv->drm, !power_domains->domain_use_count[domain], in __intel_display_power_put_domain()
596 drm_WARN(&dev_priv->drm, in __intel_display_power_put_domain()
601 power_domains->domain_use_count[domain]--; in __intel_display_power_put_domain()
610 struct i915_power_domains *power_domains = &dev_priv->display.power.domains; in __intel_display_power_put()
612 mutex_lock(&power_domains->lock); in __intel_display_power_put()
614 mutex_unlock(&power_domains->lock); in __intel_display_power_put()
624 display.power.domains); in queue_async_put_domains_work()
625 drm_WARN_ON(&i915->drm, power_domains->async_put_wakeref); in queue_async_put_domains_work()
626 power_domains->async_put_wakeref = wakeref; in queue_async_put_domains_work()
627 drm_WARN_ON(&i915->drm, !queue_delayed_work(system_unbound_wq, in queue_async_put_domains_work()
628 &power_domains->async_put_work, in queue_async_put_domains_work()
638 display.power.domains); in release_async_put_domains()
639 struct intel_runtime_pm *rpm = &dev_priv->runtime_pm; in release_async_put_domains()
646 * power well disabling. in release_async_put_domains()
665 display.power.domains.async_put_work.work); in intel_display_power_put_async_work()
666 struct i915_power_domains *power_domains = &dev_priv->display.power.domains; in intel_display_power_put_async_work()
667 struct intel_runtime_pm *rpm = &dev_priv->runtime_pm; in intel_display_power_put_async_work()
671 mutex_lock(&power_domains->lock); in intel_display_power_put_async_work()
677 old_work_wakeref = fetch_and_zero(&power_domains->async_put_wakeref); in intel_display_power_put_async_work()
682 &power_domains->async_put_domains[0]); in intel_display_power_put_async_work()
684 /* Requeue the work if more domains were async put meanwhile. */ in intel_display_power_put_async_work()
685 if (!bitmap_empty(power_domains->async_put_domains[1].bits, POWER_DOMAIN_NUM)) { in intel_display_power_put_async_work()
686 bitmap_copy(power_domains->async_put_domains[0].bits, in intel_display_power_put_async_work()
687 power_domains->async_put_domains[1].bits, in intel_display_power_put_async_work()
689 bitmap_zero(power_domains->async_put_domains[1].bits, in intel_display_power_put_async_work()
693 power_domains->async_put_next_delay); in intel_display_power_put_async_work()
694 power_domains->async_put_next_delay = 0; in intel_display_power_put_async_work()
698 * since here we released the corresponding async-put reference. in intel_display_power_put_async_work()
706 mutex_unlock(&power_domains->lock); in intel_display_power_put_async_work()
715 * __intel_display_power_put_async - release a power domain reference asynchronously
717 * @domain: power domain to reference
719 * @delay_ms: delay of powering down the power domain
721 * This function drops the power domain reference obtained by
722 * intel_display_power_get*() and schedules a work to power down the
724 * The power down is delayed by @delay_ms if this is >= 0, or by a default
732 struct i915_power_domains *power_domains = &i915->display.power.domains; in __intel_display_power_put_async()
733 struct intel_runtime_pm *rpm = &i915->runtime_pm; in __intel_display_power_put_async()
738 mutex_lock(&power_domains->lock); in __intel_display_power_put_async()
740 if (power_domains->domain_use_count[domain] > 1) { in __intel_display_power_put_async()
746 drm_WARN_ON(&i915->drm, power_domains->domain_use_count[domain] != 1); in __intel_display_power_put_async()
749 if (power_domains->async_put_wakeref) { in __intel_display_power_put_async()
750 set_bit(domain, power_domains->async_put_domains[1].bits); in __intel_display_power_put_async()
751 power_domains->async_put_next_delay = max(power_domains->async_put_next_delay, in __intel_display_power_put_async()
754 set_bit(domain, power_domains->async_put_domains[0].bits); in __intel_display_power_put_async()
763 mutex_unlock(&power_domains->lock); in __intel_display_power_put_async()
772 * intel_display_power_flush_work - flushes the async display power disabling work
777 * corresponding power domains.
785 struct i915_power_domains *power_domains = &i915->display.power.domains; in intel_display_power_flush_work()
789 mutex_lock(&power_domains->lock); in intel_display_power_flush_work()
791 work_wakeref = fetch_and_zero(&power_domains->async_put_wakeref); in intel_display_power_flush_work()
802 mutex_unlock(&power_domains->lock); in intel_display_power_flush_work()
805 intel_runtime_pm_put_raw(&i915->runtime_pm, work_wakeref); in intel_display_power_flush_work()
809 * intel_display_power_flush_work_sync - flushes and syncs the async display power disabling work
818 struct i915_power_domains *power_domains = &i915->display.power.domains; in intel_display_power_flush_work_sync()
825 drm_WARN_ON(&i915->drm, power_domains->async_put_wakeref); in intel_display_power_flush_work_sync()
830 * intel_display_power_put - release a power domain reference
832 * @domain: power domain to reference
835 * This function drops the power domain reference obtained by
836 * intel_display_power_get() and might power down the corresponding hardware
844 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); in intel_display_power_put()
848 * intel_display_power_put_unchecked - release an unchecked power domain reference
850 * @domain: power domain to reference
852 * This function drops the power domain reference obtained by
853 * intel_display_power_get() and might power down the corresponding hardware
856 * This function is only for the power domain code's internal use to suppress wakeref
864 intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm); in intel_display_power_put_unchecked()
875 drm_WARN_ON(&i915->drm, test_bit(domain, power_domain_set->mask.bits)); in intel_display_power_get_in_set()
879 power_domain_set->wakerefs[domain] = wf; in intel_display_power_get_in_set()
881 set_bit(domain, power_domain_set->mask.bits); in intel_display_power_get_in_set()
891 drm_WARN_ON(&i915->drm, test_bit(domain, power_domain_set->mask.bits)); in intel_display_power_get_in_set_if_enabled()
898 power_domain_set->wakerefs[domain] = wf; in intel_display_power_get_in_set_if_enabled()
900 set_bit(domain, power_domain_set->mask.bits); in intel_display_power_get_in_set_if_enabled()
912 drm_WARN_ON(&i915->drm, in intel_display_power_put_mask_in_set()
913 !bitmap_subset(mask->bits, power_domain_set->mask.bits, POWER_DOMAIN_NUM)); in intel_display_power_put_mask_in_set()
916 intel_wakeref_t __maybe_unused wf = -1; in intel_display_power_put_mask_in_set()
919 wf = fetch_and_zero(&power_domain_set->wakerefs[domain]); in intel_display_power_put_mask_in_set()
922 clear_bit(domain, power_domain_set->mask.bits); in intel_display_power_put_mask_in_set()
970 if (!dev_priv->display.params.disable_power_well) in get_allowed_dc_mask()
975 } else if (enable_dc == -1) { in get_allowed_dc_mask()
978 drm_dbg_kms(&dev_priv->drm, in get_allowed_dc_mask()
979 "Adjusting requested max DC state (%d->%d)\n", in get_allowed_dc_mask()
983 drm_err(&dev_priv->drm, in get_allowed_dc_mask()
1003 drm_dbg_kms(&dev_priv->drm, "Allowed DC state mask %02x\n", mask); in get_allowed_dc_mask()
1009 * intel_power_domains_init - initializes the power domain structures
1012 * Initializes the power domain structures for @dev_priv depending upon the
1017 struct i915_power_domains *power_domains = &dev_priv->display.power.domains; in intel_power_domains_init()
1019 dev_priv->display.params.disable_power_well = in intel_power_domains_init()
1021 dev_priv->display.params.disable_power_well); in intel_power_domains_init()
1022 power_domains->allowed_dc_mask = in intel_power_domains_init()
1023 get_allowed_dc_mask(dev_priv, dev_priv->display.params.enable_dc); in intel_power_domains_init()
1025 power_domains->target_dc_state = in intel_power_domains_init()
1028 mutex_init(&power_domains->lock); in intel_power_domains_init()
1030 INIT_DELAYED_WORK(&power_domains->async_put_work, in intel_power_domains_init()
1037 * intel_power_domains_cleanup - clean up power domains resources
1044 intel_display_power_map_cleanup(&dev_priv->display.power.domains); in intel_power_domains_cleanup()
1049 struct i915_power_domains *power_domains = &dev_priv->display.power.domains; in intel_power_domains_sync_hw()
1052 mutex_lock(&power_domains->lock); in intel_power_domains_sync_hw()
1055 mutex_unlock(&power_domains->lock); in intel_power_domains_sync_hw()
1070 drm_WARN(&dev_priv->drm, enable != state, in gen9_dbuf_slice_set()
1071 "DBuf slice %d power %s timeout!\n", in gen9_dbuf_slice_set()
1078 struct i915_power_domains *power_domains = &dev_priv->display.power.domains; in gen9_dbuf_slices_update()
1079 u8 slice_mask = DISPLAY_INFO(dev_priv)->dbuf.slice_mask; in gen9_dbuf_slices_update()
1082 drm_WARN(&dev_priv->drm, req_slices & ~slice_mask, in gen9_dbuf_slices_update()
1086 drm_dbg_kms(&dev_priv->drm, "Updating dbuf slices to 0x%x\n", in gen9_dbuf_slices_update()
1096 mutex_lock(&power_domains->lock); in gen9_dbuf_slices_update()
1101 dev_priv->display.dbuf.enabled_slices = req_slices; in gen9_dbuf_slices_update()
1103 mutex_unlock(&power_domains->lock); in gen9_dbuf_slices_update()
1110 dev_priv->display.dbuf.enabled_slices = in gen9_dbuf_enable()
1113 slices_mask = BIT(DBUF_S1) | dev_priv->display.dbuf.enabled_slices; in gen9_dbuf_enable()
1119 * Just power up at least 1 slice, we will in gen9_dbuf_enable()
1148 unsigned long abox_regs = DISPLAY_INFO(dev_priv)->abox_mask; in icl_mbus_init()
1166 * we don't have to program other instance-0 registers like BW_BUDDY. in icl_mbus_init()
1186 drm_err(&dev_priv->drm, "CDCLK source is not LCPLL\n"); in hsw_assert_cdclk()
1189 drm_err(&dev_priv->drm, "LCPLL is disabled\n"); in hsw_assert_cdclk()
1192 drm_err(&dev_priv->drm, "LCPLL not using non-SSC reference\n"); in hsw_assert_cdclk()
1199 for_each_intel_crtc(&dev_priv->drm, crtc) in assert_can_disable_lcpll()
1200 I915_STATE_WARN(dev_priv, crtc->active, in assert_can_disable_lcpll()
1202 pipe_name(crtc->pipe)); in assert_can_disable_lcpll()
1205 "Display power well on\n"); in assert_can_disable_lcpll()
1217 "Panel power on\n"); in assert_can_disable_lcpll()
1238 * gen-specific and since we only disable LCPLL after we fully disable in assert_can_disable_lcpll()
1256 if (snb_pcode_write(&dev_priv->uncore, GEN6_PCODE_WRITE_D_COMP, val)) in hsw_write_dcomp()
1257 drm_dbg_kms(&dev_priv->drm, in hsw_write_dcomp()
1267 * - Sequence for display software to disable LCPLL
1268 * - Sequence for display software to allow package C8+
1288 drm_err(&dev_priv->drm, "Switching to FCLK failed\n"); in hsw_disable_lcpll()
1298 drm_err(&dev_priv->drm, "LCPLL still locked\n"); in hsw_disable_lcpll()
1307 drm_err(&dev_priv->drm, "D_COMP RCOMP still in progress\n"); in hsw_disable_lcpll()
1316 * Fully restores LCPLL, disallowing power down and switching back to LCPLL
1333 intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL); in hsw_restore_lcpll()
1351 drm_err(&dev_priv->drm, "LCPLL not locked yet\n"); in hsw_restore_lcpll()
1358 drm_err(&dev_priv->drm, in hsw_restore_lcpll()
1362 intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); in hsw_restore_lcpll()
1365 intel_cdclk_dump_config(dev_priv, &dev_priv->display.cdclk.hw, "Current CDCLK"); in hsw_restore_lcpll()
1374 * The requirements for PC8+ are that all the outputs are disabled, the power
1378 * to Fclk. If we're in PC8+ and we get an non-hotplug interrupt, we can hard
1393 drm_dbg_kms(&dev_priv->drm, "Enabling package C8+\n"); in hsw_enable_pc8()
1405 drm_dbg_kms(&dev_priv->drm, "Disabling package C8+\n"); in hsw_disable_pc8()
1437 struct i915_power_domains *power_domains = &dev_priv->display.power.domains; in skl_display_core_init()
1449 mutex_lock(&power_domains->lock); in skl_display_core_init()
1457 mutex_unlock(&power_domains->lock); in skl_display_core_init()
1469 struct i915_power_domains *power_domains = &dev_priv->display.power.domains; in skl_display_core_uninit()
1485 mutex_lock(&power_domains->lock); in skl_display_core_uninit()
1488 * BSpec says to keep the MISC IO power well enabled here, only in skl_display_core_uninit()
1489 * remove our request for power well 1. in skl_display_core_uninit()
1490 * Note that even though the driver's request is removed power well 1 in skl_display_core_uninit()
1496 mutex_unlock(&power_domains->lock); in skl_display_core_uninit()
1503 struct i915_power_domains *power_domains = &dev_priv->display.power.domains; in bxt_display_core_init()
1520 mutex_lock(&power_domains->lock); in bxt_display_core_init()
1525 mutex_unlock(&power_domains->lock); in bxt_display_core_init()
1537 struct i915_power_domains *power_domains = &dev_priv->display.power.domains; in bxt_display_core_uninit()
1554 * Note that even though the driver's request is removed power well 1 in bxt_display_core_uninit()
1557 mutex_lock(&power_domains->lock); in bxt_display_core_uninit()
1562 mutex_unlock(&power_domains->lock); in bxt_display_core_uninit()
1599 enum intel_dram_type type = dev_priv->dram_info.type; in tgl_bw_buddy_init()
1600 u8 num_channels = dev_priv->dram_info.num_channels; in tgl_bw_buddy_init()
1602 unsigned long abox_mask = DISPLAY_INFO(dev_priv)->abox_mask; in tgl_bw_buddy_init()
1622 drm_dbg(&dev_priv->drm, in tgl_bw_buddy_init()
1632 /* Wa_22010178259:tgl,dg1,rkl,adl-s */ in tgl_bw_buddy_init()
1644 struct i915_power_domains *power_domains = &dev_priv->display.power.domains; in icl_display_core_init()
1649 /* Wa_14011294188:ehl,jsl,tgl,rkl,adl-s */ in icl_display_core_init()
1665 * 3. Enable Power Well 1 (PG1). in icl_display_core_init()
1666 * The AUX IO power wells will be enabled on demand. in icl_display_core_init()
1668 mutex_lock(&power_domains->lock); in icl_display_core_init()
1671 mutex_unlock(&power_domains->lock); in icl_display_core_init()
1700 /* Wa_14011508470:tgl,dg1,rkl,adl-s,adl-p,dg2 */ in icl_display_core_init()
1713 struct i915_power_domains *power_domains = &dev_priv->display.power.domains; in icl_display_core_uninit()
1722 /* 1. Disable all display engine functions -> aready done */ in icl_display_core_uninit()
1735 * 4. Disable Power Well 1 (PG1). in icl_display_core_uninit()
1736 * The AUX IO power wells are toggled on demand, so they are already in icl_display_core_uninit()
1739 mutex_lock(&power_domains->lock); in icl_display_core_uninit()
1742 mutex_unlock(&power_domains->lock); in icl_display_core_uninit()
1759 * power well state and lane status to reconstruct the in chv_phy_control_init()
1762 dev_priv->display.power.chv_phy_control = in chv_phy_control_init()
1771 * with all power down bits cleared to match the state we in chv_phy_control_init()
1784 dev_priv->display.power.chv_phy_control |= in chv_phy_control_init()
1787 dev_priv->display.power.chv_phy_control |= in chv_phy_control_init()
1794 dev_priv->display.power.chv_phy_control |= in chv_phy_control_init()
1797 dev_priv->display.power.chv_phy_control |= in chv_phy_control_init()
1800 dev_priv->display.power.chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY0); in chv_phy_control_init()
1802 dev_priv->display.power.chv_phy_assert[DPIO_PHY0] = false; in chv_phy_control_init()
1804 dev_priv->display.power.chv_phy_assert[DPIO_PHY0] = true; in chv_phy_control_init()
1816 dev_priv->display.power.chv_phy_control |= in chv_phy_control_init()
1819 dev_priv->display.power.chv_phy_control |= in chv_phy_control_init()
1822 dev_priv->display.power.chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY1); in chv_phy_control_init()
1824 dev_priv->display.power.chv_phy_assert[DPIO_PHY1] = false; in chv_phy_control_init()
1826 dev_priv->display.power.chv_phy_assert[DPIO_PHY1] = true; in chv_phy_control_init()
1829 drm_dbg_kms(&dev_priv->drm, "Initial PHY_CONTROL=0x%08x\n", in chv_phy_control_init()
1830 dev_priv->display.power.chv_phy_control); in chv_phy_control_init()
1848 drm_dbg_kms(&dev_priv->drm, "toggling display PHY side reset\n"); in vlv_cmnlane_wa()
1855 * Need to assert and de-assert PHY SB reset by gating the in vlv_cmnlane_wa()
1856 * common lane power, then un-gating it. in vlv_cmnlane_wa()
1876 drm_WARN(&dev_priv->drm, in assert_ved_power_gated()
1878 "VED not power gated\n"); in assert_ved_power_gated()
1889 drm_WARN(&dev_priv->drm, !pci_dev_present(isp_ids) && in assert_isp_power_gated()
1891 "ISP not power gated\n"); in assert_isp_power_gated()
1897 * intel_power_domains_init_hw - initialize hardware power domain state
1901 * This function initializes the hardware power domain state and enables all
1902 * power wells belonging to the INIT power domain. Power wells in other
1903 * domains (and not in the INIT domain) are referenced or disabled by
1905 * power well must match its HW enabled state, see
1908 * It will return with power domains disabled (to be enabled later by
1914 struct i915_power_domains *power_domains = &i915->display.power.domains; in intel_power_domains_init_hw()
1916 power_domains->initializing = true; in intel_power_domains_init_hw()
1925 mutex_lock(&power_domains->lock); in intel_power_domains_init_hw()
1927 mutex_unlock(&power_domains->lock); in intel_power_domains_init_hw()
1930 mutex_lock(&power_domains->lock); in intel_power_domains_init_hw()
1932 mutex_unlock(&power_domains->lock); in intel_power_domains_init_hw()
1943 * Keep all power wells enabled for any dependent HW access during in intel_power_domains_init_hw()
1948 drm_WARN_ON(&i915->drm, power_domains->init_wakeref); in intel_power_domains_init_hw()
1949 power_domains->init_wakeref = in intel_power_domains_init_hw()
1952 /* Disable power support if the user asked so. */ in intel_power_domains_init_hw()
1953 if (!i915->display.params.disable_power_well) { in intel_power_domains_init_hw()
1954 drm_WARN_ON(&i915->drm, power_domains->disable_wakeref); in intel_power_domains_init_hw()
1955 i915->display.power.domains.disable_wakeref = intel_display_power_get(i915, in intel_power_domains_init_hw()
1960 power_domains->initializing = false; in intel_power_domains_init_hw()
1964 * intel_power_domains_driver_remove - deinitialize hw power domain state
1967 * De-initializes the display power domain HW state. It also ensures that the
1970 * It must be called with power domains already disabled (after a call to
1977 fetch_and_zero(&i915->display.power.domains.init_wakeref); in intel_power_domains_driver_remove()
1979 /* Remove the refcount we took to keep power well support disabled. */ in intel_power_domains_driver_remove()
1980 if (!i915->display.params.disable_power_well) in intel_power_domains_driver_remove()
1982 fetch_and_zero(&i915->display.power.domains.disable_wakeref)); in intel_power_domains_driver_remove()
1988 /* Keep the power well enabled, but cancel its rpm wakeref. */ in intel_power_domains_driver_remove()
1989 intel_runtime_pm_put(&i915->runtime_pm, wakeref); in intel_power_domains_driver_remove()
1993 * intel_power_domains_sanitize_state - sanitize power domains state
1996 * Sanitize the power domains state during driver loading and system resume.
1997 * The function will disable all display power wells that BIOS has enabled
1998 * without a user for it (any user for a power well has taken a reference
2004 struct i915_power_domains *power_domains = &i915->display.power.domains; in intel_power_domains_sanitize_state()
2007 mutex_lock(&power_domains->lock); in intel_power_domains_sanitize_state()
2010 if (power_well->desc->always_on || power_well->count || in intel_power_domains_sanitize_state()
2014 drm_dbg_kms(&i915->drm, in intel_power_domains_sanitize_state()
2015 "BIOS left unused %s power well enabled, disabling it\n", in intel_power_domains_sanitize_state()
2020 mutex_unlock(&power_domains->lock); in intel_power_domains_sanitize_state()
2024 * intel_power_domains_enable - enable toggling of display power wells
2027 * Enable the ondemand enabling/disabling of the display power wells. Note that
2028 * power wells not belonging to POWER_DOMAIN_INIT are allowed to be toggled
2031 * of these function is to keep the rest of power wells enabled until the end
2032 * of display HW readout (which will acquire the power references reflecting
2038 fetch_and_zero(&i915->display.power.domains.init_wakeref); in intel_power_domains_enable()
2045 * intel_power_domains_disable - disable toggling of display power wells
2048 * Disable the ondemand enabling/disabling of the display power wells. See
2049 * intel_power_domains_enable() for which power wells this call controls.
2053 struct i915_power_domains *power_domains = &i915->display.power.domains; in intel_power_domains_disable()
2055 drm_WARN_ON(&i915->drm, power_domains->init_wakeref); in intel_power_domains_disable()
2056 power_domains->init_wakeref = in intel_power_domains_disable()
2063 * intel_power_domains_suspend - suspend power domain state
2067 * This function prepares the hardware power domain state before entering
2070 * It must be called with power domains already disabled (after a call to
2075 struct i915_power_domains *power_domains = &i915->display.power.domains; in intel_power_domains_suspend()
2077 fetch_and_zero(&power_domains->init_wakeref); in intel_power_domains_suspend()
2082 * In case of suspend-to-idle (aka S0ix) on a DMC platform without DC9 in intel_power_domains_suspend()
2083 * support don't manually deinit the power domains. This also means the in intel_power_domains_suspend()
2084 * DMC firmware will stay active, it will power down any HW in intel_power_domains_suspend()
2085 * resources as required and also enable deeper system power states in intel_power_domains_suspend()
2088 if (!(power_domains->allowed_dc_mask & DC_STATE_EN_DC9) && s2idle && in intel_power_domains_suspend()
2096 * Even if power well support was disabled we still want to disable in intel_power_domains_suspend()
2097 * power wells if power domains must be deinitialized for suspend. in intel_power_domains_suspend()
2099 if (!i915->display.params.disable_power_well) in intel_power_domains_suspend()
2101 fetch_and_zero(&i915->display.power.domains.disable_wakeref)); in intel_power_domains_suspend()
2113 power_domains->display_core_suspended = true; in intel_power_domains_suspend()
2117 * intel_power_domains_resume - resume power domain state
2120 * This function resume the hardware power domain state during system resume.
2122 * It will return with power domain support disabled (to be enabled later by
2128 struct i915_power_domains *power_domains = &i915->display.power.domains; in intel_power_domains_resume()
2130 if (power_domains->display_core_suspended) { in intel_power_domains_resume()
2132 power_domains->display_core_suspended = false; in intel_power_domains_resume()
2134 drm_WARN_ON(&i915->drm, power_domains->init_wakeref); in intel_power_domains_resume()
2135 power_domains->init_wakeref = in intel_power_domains_resume()
2146 struct i915_power_domains *power_domains = &i915->display.power.domains; in intel_power_domains_dump_info()
2152 drm_dbg(&i915->drm, "%-25s %d\n", in intel_power_domains_dump_info()
2156 drm_dbg(&i915->drm, " %-23s %d\n", in intel_power_domains_dump_info()
2158 power_domains->domain_use_count[domain]); in intel_power_domains_dump_info()
2163 * intel_power_domains_verify_state - verify the HW/SW state for all power wells
2166 * Verify if the reference count of each power well matches its HW enabled
2167 * state and the total refcount of the domains it belongs to. This must be
2169 * acquiring reference counts for any power wells in use and disabling the
2174 struct i915_power_domains *power_domains = &i915->display.power.domains; in intel_power_domains_verify_state()
2178 mutex_lock(&power_domains->lock); in intel_power_domains_verify_state()
2192 drm_err(&i915->drm, in intel_power_domains_verify_state()
2193 "power well %s state mismatch (refcount %d/enabled %d)", in intel_power_domains_verify_state()
2199 domains_count += power_domains->domain_use_count[domain]; in intel_power_domains_verify_state()
2202 drm_err(&i915->drm, in intel_power_domains_verify_state()
2203 "power well %s refcount/domain refcount mismatch " in intel_power_domains_verify_state()
2204 "(refcount %d/domains refcount %d)\n", in intel_power_domains_verify_state()
2221 mutex_unlock(&power_domains->lock); in intel_power_domains_verify_state()
2276 struct i915_power_domains *power_domains = &i915->display.power.domains; in intel_display_power_resume()
2282 if (power_domains->allowed_dc_mask & DC_STATE_EN_UPTO_DC6) in intel_display_power_resume()
2284 else if (power_domains->allowed_dc_mask & DC_STATE_EN_UPTO_DC5) in intel_display_power_resume()
2291 (power_domains->allowed_dc_mask & DC_STATE_EN_UPTO_DC5)) in intel_display_power_resume()
2300 struct i915_power_domains *power_domains = &i915->display.power.domains; in intel_display_power_debug()
2303 mutex_lock(&power_domains->lock); in intel_display_power_debug()
2305 seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count"); in intel_display_power_debug()
2306 for (i = 0; i < power_domains->power_well_count; i++) { in intel_display_power_debug()
2310 power_well = &power_domains->power_wells[i]; in intel_display_power_debug()
2311 seq_printf(m, "%-25s %d\n", intel_power_well_name(power_well), in intel_display_power_debug()
2315 seq_printf(m, " %-23s %d\n", in intel_display_power_debug()
2317 power_domains->domain_use_count[power_domain]); in intel_display_power_debug()
2320 mutex_unlock(&power_domains->lock); in intel_display_power_debug()
2446 const struct intel_ddi_port_domains **domains, in intel_port_domains_for_platform() argument
2450 *domains = d13_port_domains; in intel_port_domains_for_platform()
2453 *domains = d12_port_domains; in intel_port_domains_for_platform()
2456 *domains = d11_port_domains; in intel_port_domains_for_platform()
2459 *domains = i9xx_port_domains; in intel_port_domains_for_platform()
2467 const struct intel_ddi_port_domains *domains; in intel_port_domains_for_port() local
2471 intel_port_domains_for_platform(i915, &domains, &domains_size); in intel_port_domains_for_port()
2473 if (port >= domains[i].port_start && port <= domains[i].port_end) in intel_port_domains_for_port()
2474 return &domains[i]; in intel_port_domains_for_port()
2482 const struct intel_ddi_port_domains *domains = intel_port_domains_for_port(i915, port); in intel_display_power_ddi_io_domain() local
2484 if (drm_WARN_ON(&i915->drm, !domains || domains->ddi_io == POWER_DOMAIN_INVALID)) in intel_display_power_ddi_io_domain()
2487 return domains->ddi_io + (int)(port - domains->port_start); in intel_display_power_ddi_io_domain()
2493 const struct intel_ddi_port_domains *domains = intel_port_domains_for_port(i915, port); in intel_display_power_ddi_lanes_domain() local
2495 if (drm_WARN_ON(&i915->drm, !domains || domains->ddi_lanes == POWER_DOMAIN_INVALID)) in intel_display_power_ddi_lanes_domain()
2498 return domains->ddi_lanes + (int)(port - domains->port_start); in intel_display_power_ddi_lanes_domain()
2504 const struct intel_ddi_port_domains *domains; in intel_port_domains_for_aux_ch() local
2508 intel_port_domains_for_platform(i915, &domains, &domains_size); in intel_port_domains_for_aux_ch()
2510 if (aux_ch >= domains[i].aux_ch_start && aux_ch <= domains[i].aux_ch_end) in intel_port_domains_for_aux_ch()
2511 return &domains[i]; in intel_port_domains_for_aux_ch()
2519 const struct intel_ddi_port_domains *domains = intel_port_domains_for_aux_ch(i915, aux_ch); in intel_display_power_aux_io_domain() local
2521 if (drm_WARN_ON(&i915->drm, !domains || domains->aux_io == POWER_DOMAIN_INVALID)) in intel_display_power_aux_io_domain()
2524 return domains->aux_io + (int)(aux_ch - domains->aux_ch_start); in intel_display_power_aux_io_domain()
2530 const struct intel_ddi_port_domains *domains = intel_port_domains_for_aux_ch(i915, aux_ch); in intel_display_power_legacy_aux_domain() local
2532 if (drm_WARN_ON(&i915->drm, !domains || domains->aux_legacy_usbc == POWER_DOMAIN_INVALID)) in intel_display_power_legacy_aux_domain()
2535 return domains->aux_legacy_usbc + (int)(aux_ch - domains->aux_ch_start); in intel_display_power_legacy_aux_domain()
2541 const struct intel_ddi_port_domains *domains = intel_port_domains_for_aux_ch(i915, aux_ch); in intel_display_power_tbt_aux_domain() local
2543 if (drm_WARN_ON(&i915->drm, !domains || domains->aux_tbt == POWER_DOMAIN_INVALID)) in intel_display_power_tbt_aux_domain()
2546 return domains->aux_tbt + (int)(aux_ch - domains->aux_ch_start); in intel_display_power_tbt_aux_domain()