1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * cppc.c: CPPC Interface for x86
4 * Copyright (c) 2016, Intel Corporation.
5 */
6
7 #include <linux/bitfield.h>
8
9 #include <acpi/cppc_acpi.h>
10 #include <asm/msr.h>
11 #include <asm/processor.h>
12 #include <asm/topology.h>
13
14 #define CPPC_HIGHEST_PERF_PERFORMANCE 196
15 #define CPPC_HIGHEST_PERF_PREFCORE 166
16
17 enum amd_pref_core {
18 AMD_PREF_CORE_UNKNOWN = 0,
19 AMD_PREF_CORE_SUPPORTED,
20 AMD_PREF_CORE_UNSUPPORTED,
21 };
22 static enum amd_pref_core amd_pref_core_detected;
23 static u64 boost_numerator;
24
25 /* Refer to drivers/acpi/cppc_acpi.c for the description of functions */
26
cpc_supported_by_cpu(void)27 bool cpc_supported_by_cpu(void)
28 {
29 switch (boot_cpu_data.x86_vendor) {
30 case X86_VENDOR_AMD:
31 case X86_VENDOR_HYGON:
32 if (boot_cpu_data.x86 == 0x19 && ((boot_cpu_data.x86_model <= 0x0f) ||
33 (boot_cpu_data.x86_model >= 0x20 && boot_cpu_data.x86_model <= 0x2f)))
34 return true;
35 else if (boot_cpu_data.x86 == 0x17 &&
36 boot_cpu_data.x86_model >= 0x30 && boot_cpu_data.x86_model <= 0x7f)
37 return true;
38 return boot_cpu_has(X86_FEATURE_CPPC);
39 }
40 return false;
41 }
42
cpc_ffh_supported(void)43 bool cpc_ffh_supported(void)
44 {
45 return true;
46 }
47
cpc_read_ffh(int cpunum,struct cpc_reg * reg,u64 * val)48 int cpc_read_ffh(int cpunum, struct cpc_reg *reg, u64 *val)
49 {
50 int err;
51
52 err = rdmsrl_safe_on_cpu(cpunum, reg->address, val);
53 if (!err) {
54 u64 mask = GENMASK_ULL(reg->bit_offset + reg->bit_width - 1,
55 reg->bit_offset);
56
57 *val &= mask;
58 *val >>= reg->bit_offset;
59 }
60 return err;
61 }
62
cpc_write_ffh(int cpunum,struct cpc_reg * reg,u64 val)63 int cpc_write_ffh(int cpunum, struct cpc_reg *reg, u64 val)
64 {
65 u64 rd_val;
66 int err;
67
68 err = rdmsrl_safe_on_cpu(cpunum, reg->address, &rd_val);
69 if (!err) {
70 u64 mask = GENMASK_ULL(reg->bit_offset + reg->bit_width - 1,
71 reg->bit_offset);
72
73 val <<= reg->bit_offset;
74 val &= mask;
75 rd_val &= ~mask;
76 rd_val |= val;
77 err = wrmsrl_safe_on_cpu(cpunum, reg->address, rd_val);
78 }
79 return err;
80 }
81
amd_set_max_freq_ratio(void)82 static void amd_set_max_freq_ratio(void)
83 {
84 struct cppc_perf_caps perf_caps;
85 u64 numerator, nominal_perf;
86 u64 perf_ratio;
87 int rc;
88
89 rc = cppc_get_perf_caps(0, &perf_caps);
90 if (rc) {
91 pr_warn("Could not retrieve perf counters (%d)\n", rc);
92 return;
93 }
94
95 rc = amd_get_boost_ratio_numerator(0, &numerator);
96 if (rc) {
97 pr_warn("Could not retrieve highest performance (%d)\n", rc);
98 return;
99 }
100 nominal_perf = perf_caps.nominal_perf;
101
102 if (!nominal_perf) {
103 pr_warn("Could not retrieve nominal performance\n");
104 return;
105 }
106
107 /* midpoint between max_boost and max_P */
108 perf_ratio = (div_u64(numerator * SCHED_CAPACITY_SCALE, nominal_perf) + SCHED_CAPACITY_SCALE) >> 1;
109
110 freq_invariance_set_perf_ratio(perf_ratio, false);
111 }
112
113 static DEFINE_MUTEX(freq_invariance_lock);
114
init_freq_invariance_cppc(void)115 static inline void init_freq_invariance_cppc(void)
116 {
117 static bool init_done;
118
119 if (!cpu_feature_enabled(X86_FEATURE_APERFMPERF))
120 return;
121
122 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
123 return;
124
125 mutex_lock(&freq_invariance_lock);
126 if (!init_done)
127 amd_set_max_freq_ratio();
128 init_done = true;
129 mutex_unlock(&freq_invariance_lock);
130 }
131
acpi_processor_init_invariance_cppc(void)132 void acpi_processor_init_invariance_cppc(void)
133 {
134 init_freq_invariance_cppc();
135 }
136
137 /*
138 * Get the highest performance register value.
139 * @cpu: CPU from which to get highest performance.
140 * @highest_perf: Return address for highest performance value.
141 *
142 * Return: 0 for success, negative error code otherwise.
143 */
amd_get_highest_perf(unsigned int cpu,u32 * highest_perf)144 int amd_get_highest_perf(unsigned int cpu, u32 *highest_perf)
145 {
146 u64 val;
147 int ret;
148
149 if (cpu_feature_enabled(X86_FEATURE_CPPC)) {
150 ret = rdmsrl_safe_on_cpu(cpu, MSR_AMD_CPPC_CAP1, &val);
151 if (ret)
152 goto out;
153
154 val = FIELD_GET(AMD_CPPC_HIGHEST_PERF_MASK, val);
155 } else {
156 ret = cppc_get_highest_perf(cpu, &val);
157 if (ret)
158 goto out;
159 }
160
161 WRITE_ONCE(*highest_perf, (u32)val);
162 out:
163 return ret;
164 }
165 EXPORT_SYMBOL_GPL(amd_get_highest_perf);
166
167 /**
168 * amd_detect_prefcore: Detect if CPUs in the system support preferred cores
169 * @detected: Output variable for the result of the detection.
170 *
171 * Determine whether CPUs in the system support preferred cores. On systems
172 * that support preferred cores, different highest perf values will be found
173 * on different cores. On other systems, the highest perf value will be the
174 * same on all cores.
175 *
176 * The result of the detection will be stored in the 'detected' parameter.
177 *
178 * Return: 0 for success, negative error code otherwise
179 */
amd_detect_prefcore(bool * detected)180 int amd_detect_prefcore(bool *detected)
181 {
182 int cpu, count = 0;
183 u64 highest_perf[2] = {0};
184
185 if (WARN_ON(!detected))
186 return -EINVAL;
187
188 switch (amd_pref_core_detected) {
189 case AMD_PREF_CORE_SUPPORTED:
190 *detected = true;
191 return 0;
192 case AMD_PREF_CORE_UNSUPPORTED:
193 *detected = false;
194 return 0;
195 default:
196 break;
197 }
198
199 for_each_present_cpu(cpu) {
200 u32 tmp;
201 int ret;
202
203 ret = amd_get_highest_perf(cpu, &tmp);
204 if (ret)
205 return ret;
206
207 if (!count || (count == 1 && tmp != highest_perf[0]))
208 highest_perf[count++] = tmp;
209
210 if (count == 2)
211 break;
212 }
213
214 *detected = (count == 2);
215 boost_numerator = highest_perf[0];
216
217 amd_pref_core_detected = *detected ? AMD_PREF_CORE_SUPPORTED :
218 AMD_PREF_CORE_UNSUPPORTED;
219
220 pr_debug("AMD CPPC preferred core is %ssupported (highest perf: 0x%llx)\n",
221 *detected ? "" : "un", highest_perf[0]);
222
223 return 0;
224 }
225 EXPORT_SYMBOL_GPL(amd_detect_prefcore);
226
227 /**
228 * amd_get_boost_ratio_numerator: Get the numerator to use for boost ratio calculation
229 * @cpu: CPU to get numerator for.
230 * @numerator: Output variable for numerator.
231 *
232 * Determine the numerator to use for calculating the boost ratio on
233 * a CPU. On systems that support preferred cores, this will be a hardcoded
234 * value. On other systems this will the highest performance register value.
235 *
236 * If booting the system with amd-pstate enabled but preferred cores disabled then
237 * the correct boost numerator will be returned to match hardware capabilities
238 * even if the preferred cores scheduling hints are not enabled.
239 *
240 * Return: 0 for success, negative error code otherwise.
241 */
amd_get_boost_ratio_numerator(unsigned int cpu,u64 * numerator)242 int amd_get_boost_ratio_numerator(unsigned int cpu, u64 *numerator)
243 {
244 enum x86_topology_cpu_type core_type = get_topology_cpu_type(&cpu_data(cpu));
245 bool prefcore;
246 int ret;
247 u32 tmp;
248
249 ret = amd_detect_prefcore(&prefcore);
250 if (ret)
251 return ret;
252
253 /* without preferred cores, return the highest perf register value */
254 if (!prefcore) {
255 *numerator = boost_numerator;
256 return 0;
257 }
258
259 /*
260 * For AMD CPUs with Family ID 19H and Model ID range 0x70 to 0x7f,
261 * the highest performance level is set to 196.
262 * https://bugzilla.kernel.org/show_bug.cgi?id=218759
263 */
264 if (cpu_feature_enabled(X86_FEATURE_ZEN4)) {
265 switch (boot_cpu_data.x86_model) {
266 case 0x70 ... 0x7f:
267 *numerator = CPPC_HIGHEST_PERF_PERFORMANCE;
268 return 0;
269 default:
270 break;
271 }
272 }
273
274 /* detect if running on heterogeneous design */
275 if (cpu_feature_enabled(X86_FEATURE_AMD_HETEROGENEOUS_CORES)) {
276 switch (core_type) {
277 case TOPO_CPU_TYPE_UNKNOWN:
278 pr_warn("Undefined core type found for cpu %d\n", cpu);
279 break;
280 case TOPO_CPU_TYPE_PERFORMANCE:
281 /* use the max scale for performance cores */
282 *numerator = CPPC_HIGHEST_PERF_PERFORMANCE;
283 return 0;
284 case TOPO_CPU_TYPE_EFFICIENCY:
285 /* use the highest perf value for efficiency cores */
286 ret = amd_get_highest_perf(cpu, &tmp);
287 if (ret)
288 return ret;
289 *numerator = tmp;
290 return 0;
291 }
292 }
293
294 *numerator = CPPC_HIGHEST_PERF_PREFCORE;
295
296 return 0;
297 }
298 EXPORT_SYMBOL_GPL(amd_get_boost_ratio_numerator);
299