Lines Matching +full:ecx +full:- +full:2000

1 // SPDX-License-Identifier: GPL-2.0-only
3 * intel_idle.c - native hardware idle loop for modern Intel processors
5 * Copyright (c) 2013 - 2020, Intel Corporation.
23 * for preventing entry into deep C-states
25 * CPU will flush caches as needed when entering a C-state via MWAIT
33 * ACPI has a .suspend hack to turn off deep c-statees during suspend
39 /* un-comment DEBUG to enable pr_debug() statements */
55 #include <asm/intel-family.h>
57 #include <asm/spec-ctrl.h>
67 static int max_cstate = CPUIDLE_STATE_MAX - 1;
87 * Hardware C-state auto-demotion may not always be optimal.
102 * Enable interrupts before entering the C-state. On some platforms and for
103 * some C-states, this may measurably decrease interrupt latency.
119 * Initialize large xstate for the C6-state entrance.
124 * MWAIT takes an 8-bit "hint" in EAX "suggesting"
125 * the C-state (top nibble) and sub-state (bottom nibble)
137 struct cpuidle_state *state = &drv->states[index]; in __intel_idle()
138 unsigned long eax = flg2MWAIT(state->flags); in __intel_idle()
139 unsigned long ecx = 1*irqoff; /* break on interrupt flag */ in __intel_idle() local
141 mwait_idle_with_hints(eax, ecx); in __intel_idle()
147 * intel_idle - Ask the processor to enter the given idle state.
156 * enable one-shot tick broadcasting for the target CPU before executing MWAIT.
198 * intel_idle_s2idle - Ask the processor to enter the given idle state.
206 * Invoked as a suspend-to-idle callback routine with frozen user space, frozen
212 unsigned long ecx = 1; /* break on interrupt flag */ in intel_idle_s2idle() local
213 struct cpuidle_state *state = &drv->states[index]; in intel_idle_s2idle()
214 unsigned long eax = flg2MWAIT(state->flags); in intel_idle_s2idle()
216 if (state->flags & CPUIDLE_FLAG_INIT_XSTATE) in intel_idle_s2idle()
219 mwait_idle_with_hints(eax, ecx); in intel_idle_s2idle()
869 .target_residency = 2000,
1191 .exit_latency = 2000,
1192 .target_residency = 2000,
1563 unsigned long eax = flg2MWAIT(state->flags); in intel_idle_state_needs_timer_stop()
1569 * Switch over to one-shot tick broadcast if the target C-state in intel_idle_state_needs_timer_stop()
1589 * intel_idle_cst_usable - Check if the _CST information can be used.
1591 * Check if all of the C-states listed by _CST in the max_cstate range are
1604 if (cx->entry_method != ACPI_CSTATE_FFH) in intel_idle_cst_usable()
1626 if (acpi_processor_evaluate_cst(pr->handle, cpu, &acpi_state_table)) in intel_idle_acpi_cst_extract()
1657 if (intel_idle_max_cstate_reached(cstate - 1)) in intel_idle_init_cstates_acpi()
1662 state = &drv->states[drv->state_count++]; in intel_idle_init_cstates_acpi()
1664 snprintf(state->name, CPUIDLE_NAME_LEN, "C%d_ACPI", cstate); in intel_idle_init_cstates_acpi()
1665 strscpy(state->desc, cx->desc, CPUIDLE_DESC_LEN); in intel_idle_init_cstates_acpi()
1666 state->exit_latency = cx->latency; in intel_idle_init_cstates_acpi()
1668 * For C1-type C-states use the same number for both the exit in intel_idle_init_cstates_acpi()
1670 * C1 in the majority of the static C-states tables above. in intel_idle_init_cstates_acpi()
1671 * For the other types of C-states, however, set the target in intel_idle_init_cstates_acpi()
1673 * a reasonable balance between energy-efficiency and in intel_idle_init_cstates_acpi()
1676 state->target_residency = cx->latency; in intel_idle_init_cstates_acpi()
1677 if (cx->type > ACPI_STATE_C1) in intel_idle_init_cstates_acpi()
1678 state->target_residency *= 3; in intel_idle_init_cstates_acpi()
1680 state->flags = MWAIT2flg(cx->address); in intel_idle_init_cstates_acpi()
1681 if (cx->type > ACPI_STATE_C2) in intel_idle_init_cstates_acpi()
1682 state->flags |= CPUIDLE_FLAG_TLB_FLUSHED; in intel_idle_init_cstates_acpi()
1685 state->flags |= CPUIDLE_FLAG_OFF; in intel_idle_init_cstates_acpi()
1688 state->flags |= CPUIDLE_FLAG_TIMER_STOP; in intel_idle_init_cstates_acpi()
1690 state->enter = intel_idle; in intel_idle_init_cstates_acpi()
1691 state->enter_s2idle = intel_idle_s2idle; in intel_idle_init_cstates_acpi()
1700 * If there are no _CST C-states, do not disable any C-states by in intel_idle_off_by_default()
1726 * ivt_idle_state_table_update - Tune the idle states table for Ivy Town.
1728 * Tune IVT multi-socket targets.
1733 /* IVT uses a different table for 1-2, 3-4, and > 4 sockets */ in ivt_idle_state_table_update()
1755 * irtl_2_usec - IRTL to microseconds conversion.
1776 * bxt_idle_state_table_update - Fix up the Broxton idle states table.
1824 * sklh_idle_state_table_update - Fix up the Sky Lake idle states table.
1826 * On SKL-H (model 0x5e) skip C8 and C9 if C10 is enabled and SGX disabled.
1831 unsigned int eax, ebx, ecx, edx; in sklh_idle_state_table_update() local
1844 /* PC10 is not enabled in PKG C-state limit */ in sklh_idle_state_table_update()
1848 ecx = 0; in sklh_idle_state_table_update()
1849 cpuid(7, &eax, &ebx, &ecx, &edx); in sklh_idle_state_table_update()
1861 skl_cstates[5].flags |= CPUIDLE_FLAG_UNUSABLE; /* C8-SKL */ in sklh_idle_state_table_update()
1862 skl_cstates[6].flags |= CPUIDLE_FLAG_UNUSABLE; /* C9-SKL */ in sklh_idle_state_table_update()
1866 * skx_idle_state_table_update - Adjust the Sky Lake/Cascade Lake
1876 * 000b: C0/C1 (no package C-state support) in skx_idle_state_table_update()
1878 * 010b: C6 (non-retention) in skx_idle_state_table_update()
1896 * adl_idle_state_table_update - Adjust AlderLake idle states table.
1915 * spr_idle_state_table_update - Adjust Sapphire Rapids idle states table.
1922 * By default, the C6 state assumes the worst-case scenario of package in spr_idle_state_table_update()
1941 /* Ignore the C-state if there are NO sub-states in CPUID for it. */ in intel_idle_verify_cstate()
1953 if (state->flags & CPUIDLE_FLAG_INIT_XSTATE) { in state_update_enter_method()
1958 WARN_ON_ONCE(state->flags & CPUIDLE_FLAG_IBRS); in state_update_enter_method()
1959 WARN_ON_ONCE(state->flags & CPUIDLE_FLAG_IRQ_ENABLE); in state_update_enter_method()
1960 state->enter = intel_idle_xstate; in state_update_enter_method()
1965 ((state->flags & CPUIDLE_FLAG_IBRS) || ibrs_off)) { in state_update_enter_method()
1967 * IBRS mitigation requires that C-states are entered in state_update_enter_method()
1970 if (ibrs_off && (state->flags & CPUIDLE_FLAG_IRQ_ENABLE)) in state_update_enter_method()
1971 state->flags &= ~CPUIDLE_FLAG_IRQ_ENABLE; in state_update_enter_method()
1972 WARN_ON_ONCE(state->flags & CPUIDLE_FLAG_IRQ_ENABLE); in state_update_enter_method()
1973 state->enter = intel_idle_ibrs; in state_update_enter_method()
1977 if (state->flags & CPUIDLE_FLAG_IRQ_ENABLE) { in state_update_enter_method()
1978 state->enter = intel_idle_irq; in state_update_enter_method()
1984 state->enter = intel_idle_irq; in state_update_enter_method()
2040 drv->states[drv->state_count] = cpuidle_state_table[cstate]; in intel_idle_init_cstates_icpu()
2041 state = &drv->states[drv->state_count]; in intel_idle_init_cstates_icpu()
2046 if ((disabled_states_mask & BIT(drv->state_count)) || in intel_idle_init_cstates_icpu()
2047 ((icpu->use_acpi || force_use_acpi) && in intel_idle_init_cstates_icpu()
2049 !(state->flags & CPUIDLE_FLAG_ALWAYS_ENABLE))) in intel_idle_init_cstates_icpu()
2050 state->flags |= CPUIDLE_FLAG_OFF; in intel_idle_init_cstates_icpu()
2053 state->flags |= CPUIDLE_FLAG_TIMER_STOP; in intel_idle_init_cstates_icpu()
2055 drv->state_count++; in intel_idle_init_cstates_icpu()
2058 if (icpu->byt_auto_demotion_disable_flag) { in intel_idle_init_cstates_icpu()
2065 * intel_idle_cpuidle_driver_init - Create the list of available idle states.
2073 drv->states[0].flags |= CPUIDLE_FLAG_OFF; in intel_idle_cpuidle_driver_init()
2075 drv->state_count = 1; in intel_idle_cpuidle_driver_init()
2111 * intel_idle_cpu_init - Register the target CPU with the cpuidle core.
2122 dev->cpu = cpu; in intel_idle_cpu_init()
2126 return -EIO; in intel_idle_cpu_init()
2153 if (!dev->registered) in intel_idle_cpu_online()
2160 * intel_idle_cpuidle_devices_uninit - Unregister all cpuidle devices.
2173 unsigned int eax, ebx, ecx; in intel_idle_init() local
2178 return -ENODEV; in intel_idle_init()
2182 return -EPERM; in intel_idle_init()
2189 return -ENODEV; in intel_idle_init()
2194 return -ENODEV; in intel_idle_init()
2198 return -ENODEV; in intel_idle_init()
2200 cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &mwait_substates); in intel_idle_init()
2202 if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) || in intel_idle_init()
2203 !(ecx & CPUID5_ECX_INTERRUPT_BREAK) || in intel_idle_init()
2205 return -ENODEV; in intel_idle_init()
2209 icpu = (const struct idle_cpu *)id->driver_data; in intel_idle_init()
2211 cpuidle_state_table = icpu->state_table; in intel_idle_init()
2212 auto_demotion_disable_flags = icpu->auto_demotion_disable_flags; in intel_idle_init()
2213 if (icpu->disable_promotion_to_c1e) in intel_idle_init()
2215 if (icpu->use_acpi || force_use_acpi) in intel_idle_init()
2218 return -ENODEV; in intel_idle_init()
2226 return -ENOMEM; in intel_idle_init()
2234 drv ? drv->name : "none"); in intel_idle_init()
2244 boot_cpu_has(X86_FEATURE_ARAT) ? "all C-states" : "C1"); in intel_idle_init()
2260 * support "intel_idle.max_cstate=..." at boot and also a read-only export of
2261 * it at /sys/module/intel_idle/parameters/max_cstate -- so using module_param
2274 * Some platforms come with mutually exclusive C-states, so that if one is
2275 * enabled, the other C-states must not be used. Example: C1 and C1E on
2277 * preferred C-states among the groups of mutually exclusive C-states - the
2278 * selected C-states will be registered, the other C-states from the mutually
2280 * exclusive C-states, this parameter has no effect.
2285 * Debugging option that forces the driver to enter all C-states with
2286 * interrupts enabled. Does not apply to C-states with