1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2005 Intel Corporation
4  * 	Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
5  * 	- Added _PDC for SMP C-states on Intel CPUs
6  */
7 
8 #include <linux/kernel.h>
9 #include <linux/export.h>
10 #include <linux/init.h>
11 #include <linux/acpi.h>
12 #include <linux/cpu.h>
13 #include <linux/sched.h>
14 
15 #include <acpi/processor.h>
16 #include <asm/cpu_device_id.h>
17 #include <asm/cpuid.h>
18 #include <asm/mwait.h>
19 #include <asm/special_insns.h>
20 #include <asm/smp.h>
21 
22 /*
23  * Initialize bm_flags based on the CPU cache properties
24  * On SMP it depends on cache configuration
25  * - When cache is not shared among all CPUs, we flush cache
26  *   before entering C3.
27  * - When cache is shared among all CPUs, we use bm_check
28  *   mechanism as in UP case
29  *
30  * This routine is called only after all the CPUs are online
31  */
acpi_processor_power_init_bm_check(struct acpi_processor_flags * flags,unsigned int cpu)32 void acpi_processor_power_init_bm_check(struct acpi_processor_flags *flags,
33 					unsigned int cpu)
34 {
35 	struct cpuinfo_x86 *c = &cpu_data(cpu);
36 
37 	flags->bm_check = 0;
38 	if (num_online_cpus() == 1)
39 		flags->bm_check = 1;
40 	else if (c->x86_vendor == X86_VENDOR_INTEL) {
41 		/*
42 		 * Today all MP CPUs that support C3 share cache.
43 		 * And caches should not be flushed by software while
44 		 * entering C3 type state.
45 		 */
46 		flags->bm_check = 1;
47 	}
48 
49 	/*
50 	 * On all recent Intel platforms, ARB_DISABLE is a nop.
51 	 * So, set bm_control to zero to indicate that ARB_DISABLE
52 	 * is not required while entering C3 type state.
53 	 */
54 	if (c->x86_vendor == X86_VENDOR_INTEL &&
55 	    (c->x86 > 15 || (c->x86_vfm >= INTEL_CORE2_MEROM && c->x86_vfm <= INTEL_FAM6_LAST)))
56 		flags->bm_control = 0;
57 
58 	if (c->x86_vendor == X86_VENDOR_CENTAUR) {
59 		if (c->x86 > 6 || (c->x86 == 6 && c->x86_model == 0x0f &&
60 		    c->x86_stepping >= 0x0e)) {
61 			/*
62 			 * For all recent Centaur CPUs, the ucode will make sure that each
63 			 * core can keep cache coherence with each other while entering C3
64 			 * type state. So, set bm_check to 1 to indicate that the kernel
65 			 * doesn't need to execute a cache flush operation (WBINVD) when
66 			 * entering C3 type state.
67 			 */
68 			flags->bm_check = 1;
69 			/*
70 			 * For all recent Centaur platforms, ARB_DISABLE is a nop.
71 			 * Set bm_control to zero to indicate that ARB_DISABLE is
72 			 * not required while entering C3 type state.
73 			 */
74 			flags->bm_control = 0;
75 		}
76 	}
77 
78 	if (c->x86_vendor == X86_VENDOR_ZHAOXIN) {
79 		/*
80 		 * All Zhaoxin CPUs that support C3 share cache.
81 		 * And caches should not be flushed by software while
82 		 * entering C3 type state.
83 		 */
84 		flags->bm_check = 1;
85 		/*
86 		 * On all recent Zhaoxin platforms, ARB_DISABLE is a nop.
87 		 * So, set bm_control to zero to indicate that ARB_DISABLE
88 		 * is not required while entering C3 type state.
89 		 */
90 		flags->bm_control = 0;
91 	}
92 	if (c->x86_vendor == X86_VENDOR_AMD && c->x86 >= 0x17) {
93 		/*
94 		 * For all AMD Zen or newer CPUs that support C3, caches
95 		 * should not be flushed by software while entering C3
96 		 * type state. Set bm->check to 1 so that kernel doesn't
97 		 * need to execute cache flush operation.
98 		 */
99 		flags->bm_check = 1;
100 		/*
101 		 * In current AMD C state implementation ARB_DIS is no longer
102 		 * used. So set bm_control to zero to indicate ARB_DIS is not
103 		 * required while entering C3 type state.
104 		 */
105 		flags->bm_control = 0;
106 	}
107 }
108 EXPORT_SYMBOL(acpi_processor_power_init_bm_check);
109 
110 /* The code below handles cstate entry with monitor-mwait pair on Intel*/
111 
112 struct cstate_entry {
113 	struct {
114 		unsigned int eax;
115 		unsigned int ecx;
116 	} states[ACPI_PROCESSOR_MAX_POWER];
117 };
118 static struct cstate_entry __percpu *cpu_cstate_entry;	/* per CPU ptr */
119 
120 static short mwait_supported[ACPI_PROCESSOR_MAX_POWER];
121 
122 #define NATIVE_CSTATE_BEYOND_HALT	(2)
123 
acpi_processor_ffh_cstate_probe_cpu(void * _cx)124 static long acpi_processor_ffh_cstate_probe_cpu(void *_cx)
125 {
126 	struct acpi_processor_cx *cx = _cx;
127 	long retval;
128 	unsigned int eax, ebx, ecx, edx;
129 	unsigned int edx_part;
130 	unsigned int cstate_type; /* C-state type and not ACPI C-state type */
131 	unsigned int num_cstate_subtype;
132 
133 	cpuid(CPUID_LEAF_MWAIT, &eax, &ebx, &ecx, &edx);
134 
135 	/* Check whether this particular cx_type (in CST) is supported or not */
136 	cstate_type = (((cx->address >> MWAIT_SUBSTATE_SIZE) &
137 			MWAIT_CSTATE_MASK) + 1) & MWAIT_CSTATE_MASK;
138 	edx_part = edx >> (cstate_type * MWAIT_SUBSTATE_SIZE);
139 	num_cstate_subtype = edx_part & MWAIT_SUBSTATE_MASK;
140 
141 	retval = 0;
142 	/* If the HW does not support any sub-states in this C-state */
143 	if (num_cstate_subtype == 0) {
144 		pr_warn(FW_BUG "ACPI MWAIT C-state 0x%x not supported by HW (0x%x)\n",
145 				cx->address, edx_part);
146 		retval = -1;
147 		goto out;
148 	}
149 
150 	/* mwait ecx extensions INTERRUPT_BREAK should be supported for C2/C3 */
151 	if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) ||
152 	    !(ecx & CPUID5_ECX_INTERRUPT_BREAK)) {
153 		retval = -1;
154 		goto out;
155 	}
156 
157 	if (!mwait_supported[cstate_type]) {
158 		mwait_supported[cstate_type] = 1;
159 		printk(KERN_DEBUG
160 			"Monitor-Mwait will be used to enter C-%d state\n",
161 			cx->type);
162 	}
163 	snprintf(cx->desc,
164 			ACPI_CX_DESC_LEN, "ACPI FFH MWAIT 0x%x",
165 			cx->address);
166 out:
167 	return retval;
168 }
169 
acpi_processor_ffh_cstate_probe(unsigned int cpu,struct acpi_processor_cx * cx,struct acpi_power_register * reg)170 int acpi_processor_ffh_cstate_probe(unsigned int cpu,
171 		struct acpi_processor_cx *cx, struct acpi_power_register *reg)
172 {
173 	struct cstate_entry *percpu_entry;
174 	struct cpuinfo_x86 *c = &cpu_data(cpu);
175 	long retval;
176 
177 	if (!cpu_cstate_entry || c->cpuid_level < CPUID_LEAF_MWAIT)
178 		return -1;
179 
180 	if (reg->bit_offset != NATIVE_CSTATE_BEYOND_HALT)
181 		return -1;
182 
183 	percpu_entry = per_cpu_ptr(cpu_cstate_entry, cpu);
184 	percpu_entry->states[cx->index].eax = 0;
185 	percpu_entry->states[cx->index].ecx = 0;
186 
187 	/* Make sure we are running on right CPU */
188 
189 	retval = call_on_cpu(cpu, acpi_processor_ffh_cstate_probe_cpu, cx,
190 			     false);
191 	if (retval == 0) {
192 		/* Use the hint in CST */
193 		percpu_entry->states[cx->index].eax = cx->address;
194 		percpu_entry->states[cx->index].ecx = MWAIT_ECX_INTERRUPT_BREAK;
195 	}
196 
197 	/*
198 	 * For _CST FFH on Intel, if GAS.access_size bit 1 is cleared,
199 	 * then we should skip checking BM_STS for this C-state.
200 	 * ref: "Intel Processor Vendor-Specific ACPI Interface Specification"
201 	 */
202 	if ((c->x86_vendor == X86_VENDOR_INTEL) && !(reg->access_size & 0x2))
203 		cx->bm_sts_skip = 1;
204 
205 	return retval;
206 }
207 EXPORT_SYMBOL_GPL(acpi_processor_ffh_cstate_probe);
208 
acpi_processor_ffh_play_dead(struct acpi_processor_cx * cx)209 void __noreturn acpi_processor_ffh_play_dead(struct acpi_processor_cx *cx)
210 {
211 	unsigned int cpu = smp_processor_id();
212 	struct cstate_entry *percpu_entry;
213 
214 	percpu_entry = per_cpu_ptr(cpu_cstate_entry, cpu);
215 	mwait_play_dead(percpu_entry->states[cx->index].eax);
216 }
217 EXPORT_SYMBOL_GPL(acpi_processor_ffh_play_dead);
218 
acpi_processor_ffh_cstate_enter(struct acpi_processor_cx * cx)219 void __cpuidle acpi_processor_ffh_cstate_enter(struct acpi_processor_cx *cx)
220 {
221 	unsigned int cpu = smp_processor_id();
222 	struct cstate_entry *percpu_entry;
223 
224 	percpu_entry = per_cpu_ptr(cpu_cstate_entry, cpu);
225 	mwait_idle_with_hints(percpu_entry->states[cx->index].eax,
226 	                      percpu_entry->states[cx->index].ecx);
227 }
228 EXPORT_SYMBOL_GPL(acpi_processor_ffh_cstate_enter);
229 
ffh_cstate_init(void)230 static int __init ffh_cstate_init(void)
231 {
232 	struct cpuinfo_x86 *c = &boot_cpu_data;
233 
234 	if (c->x86_vendor != X86_VENDOR_INTEL &&
235 	    c->x86_vendor != X86_VENDOR_AMD &&
236 	    c->x86_vendor != X86_VENDOR_HYGON)
237 		return -1;
238 
239 	cpu_cstate_entry = alloc_percpu(struct cstate_entry);
240 	return 0;
241 }
242 
ffh_cstate_exit(void)243 static void __exit ffh_cstate_exit(void)
244 {
245 	free_percpu(cpu_cstate_entry);
246 	cpu_cstate_entry = NULL;
247 }
248 
249 arch_initcall(ffh_cstate_init);
250 __exitcall(ffh_cstate_exit);
251