1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/pgtable.h>
4 
5 #include <linux/string.h>
6 #include <linux/bitops.h>
7 #include <linux/smp.h>
8 #include <linux/sched.h>
9 #include <linux/sched/clock.h>
10 #include <linux/semaphore.h>
11 #include <linux/thread_info.h>
12 #include <linux/init.h>
13 #include <linux/uaccess.h>
14 #include <linux/workqueue.h>
15 #include <linux/delay.h>
16 #include <linux/cpuhotplug.h>
17 
18 #include <asm/cpufeature.h>
19 #include <asm/msr.h>
20 #include <asm/bugs.h>
21 #include <asm/cpu.h>
22 #include <asm/intel-family.h>
23 #include <asm/microcode.h>
24 #include <asm/hwcap2.h>
25 #include <asm/elf.h>
26 #include <asm/cpu_device_id.h>
27 #include <asm/cmdline.h>
28 #include <asm/traps.h>
29 #include <asm/resctrl.h>
30 #include <asm/numa.h>
31 #include <asm/thermal.h>
32 
33 #ifdef CONFIG_X86_64
34 #include <linux/topology.h>
35 #endif
36 
37 #include "cpu.h"
38 
39 #ifdef CONFIG_X86_LOCAL_APIC
40 #include <asm/mpspec.h>
41 #include <asm/apic.h>
42 #endif
43 
44 enum split_lock_detect_state {
45 	sld_off = 0,
46 	sld_warn,
47 	sld_fatal,
48 	sld_ratelimit,
49 };
50 
51 /*
52  * Default to sld_off because most systems do not support split lock detection.
53  * sld_state_setup() will switch this to sld_warn on systems that support
54  * split lock/bus lock detect, unless there is a command line override.
55  */
56 static enum split_lock_detect_state sld_state __ro_after_init = sld_off;
57 static u64 msr_test_ctrl_cache __ro_after_init;
58 
59 /*
60  * With a name like MSR_TEST_CTL it should go without saying, but don't touch
61  * MSR_TEST_CTL unless the CPU is one of the whitelisted models.  Writing it
62  * on CPUs that do not support SLD can cause fireworks, even when writing '0'.
63  */
64 static bool cpu_model_supports_sld __ro_after_init;
65 
66 /*
67  * Processors which have self-snooping capability can handle conflicting
68  * memory type across CPUs by snooping its own cache. However, there exists
69  * CPU models in which having conflicting memory types still leads to
70  * unpredictable behavior, machine check errors, or hangs. Clear this
71  * feature to prevent its use on machines with known erratas.
72  */
check_memory_type_self_snoop_errata(struct cpuinfo_x86 * c)73 static void check_memory_type_self_snoop_errata(struct cpuinfo_x86 *c)
74 {
75 	switch (c->x86_model) {
76 	case INTEL_FAM6_CORE_YONAH:
77 	case INTEL_FAM6_CORE2_MEROM:
78 	case INTEL_FAM6_CORE2_MEROM_L:
79 	case INTEL_FAM6_CORE2_PENRYN:
80 	case INTEL_FAM6_CORE2_DUNNINGTON:
81 	case INTEL_FAM6_NEHALEM:
82 	case INTEL_FAM6_NEHALEM_G:
83 	case INTEL_FAM6_NEHALEM_EP:
84 	case INTEL_FAM6_NEHALEM_EX:
85 	case INTEL_FAM6_WESTMERE:
86 	case INTEL_FAM6_WESTMERE_EP:
87 	case INTEL_FAM6_SANDYBRIDGE:
88 		setup_clear_cpu_cap(X86_FEATURE_SELFSNOOP);
89 	}
90 }
91 
92 static bool ring3mwait_disabled __read_mostly;
93 
ring3mwait_disable(char * __unused)94 static int __init ring3mwait_disable(char *__unused)
95 {
96 	ring3mwait_disabled = true;
97 	return 1;
98 }
99 __setup("ring3mwait=disable", ring3mwait_disable);
100 
probe_xeon_phi_r3mwait(struct cpuinfo_x86 * c)101 static void probe_xeon_phi_r3mwait(struct cpuinfo_x86 *c)
102 {
103 	/*
104 	 * Ring 3 MONITOR/MWAIT feature cannot be detected without
105 	 * cpu model and family comparison.
106 	 */
107 	if (c->x86 != 6)
108 		return;
109 	switch (c->x86_model) {
110 	case INTEL_FAM6_XEON_PHI_KNL:
111 	case INTEL_FAM6_XEON_PHI_KNM:
112 		break;
113 	default:
114 		return;
115 	}
116 
117 	if (ring3mwait_disabled)
118 		return;
119 
120 	set_cpu_cap(c, X86_FEATURE_RING3MWAIT);
121 	this_cpu_or(msr_misc_features_shadow,
122 		    1UL << MSR_MISC_FEATURES_ENABLES_RING3MWAIT_BIT);
123 
124 	if (c == &boot_cpu_data)
125 		ELF_HWCAP2 |= HWCAP2_RING3MWAIT;
126 }
127 
128 /*
129  * Early microcode releases for the Spectre v2 mitigation were broken.
130  * Information taken from;
131  * - https://newsroom.intel.com/wp-content/uploads/sites/11/2018/03/microcode-update-guidance.pdf
132  * - https://kb.vmware.com/s/article/52345
133  * - Microcode revisions observed in the wild
134  * - Release note from 20180108 microcode release
135  */
136 struct sku_microcode {
137 	u8 model;
138 	u8 stepping;
139 	u32 microcode;
140 };
141 static const struct sku_microcode spectre_bad_microcodes[] = {
142 	{ INTEL_FAM6_KABYLAKE,		0x0B,	0x80 },
143 	{ INTEL_FAM6_KABYLAKE,		0x0A,	0x80 },
144 	{ INTEL_FAM6_KABYLAKE,		0x09,	0x80 },
145 	{ INTEL_FAM6_KABYLAKE_L,	0x0A,	0x80 },
146 	{ INTEL_FAM6_KABYLAKE_L,	0x09,	0x80 },
147 	{ INTEL_FAM6_SKYLAKE_X,		0x03,	0x0100013e },
148 	{ INTEL_FAM6_SKYLAKE_X,		0x04,	0x0200003c },
149 	{ INTEL_FAM6_BROADWELL,		0x04,	0x28 },
150 	{ INTEL_FAM6_BROADWELL_G,	0x01,	0x1b },
151 	{ INTEL_FAM6_BROADWELL_D,	0x02,	0x14 },
152 	{ INTEL_FAM6_BROADWELL_D,	0x03,	0x07000011 },
153 	{ INTEL_FAM6_BROADWELL_X,	0x01,	0x0b000025 },
154 	{ INTEL_FAM6_HASWELL_L,		0x01,	0x21 },
155 	{ INTEL_FAM6_HASWELL_G,		0x01,	0x18 },
156 	{ INTEL_FAM6_HASWELL,		0x03,	0x23 },
157 	{ INTEL_FAM6_HASWELL_X,		0x02,	0x3b },
158 	{ INTEL_FAM6_HASWELL_X,		0x04,	0x10 },
159 	{ INTEL_FAM6_IVYBRIDGE_X,	0x04,	0x42a },
160 	/* Observed in the wild */
161 	{ INTEL_FAM6_SANDYBRIDGE_X,	0x06,	0x61b },
162 	{ INTEL_FAM6_SANDYBRIDGE_X,	0x07,	0x712 },
163 };
164 
bad_spectre_microcode(struct cpuinfo_x86 * c)165 static bool bad_spectre_microcode(struct cpuinfo_x86 *c)
166 {
167 	int i;
168 
169 	/*
170 	 * We know that the hypervisor lie to us on the microcode version so
171 	 * we may as well hope that it is running the correct version.
172 	 */
173 	if (cpu_has(c, X86_FEATURE_HYPERVISOR))
174 		return false;
175 
176 	if (c->x86 != 6)
177 		return false;
178 
179 	for (i = 0; i < ARRAY_SIZE(spectre_bad_microcodes); i++) {
180 		if (c->x86_model == spectre_bad_microcodes[i].model &&
181 		    c->x86_stepping == spectre_bad_microcodes[i].stepping)
182 			return (c->microcode <= spectre_bad_microcodes[i].microcode);
183 	}
184 	return false;
185 }
186 
187 #define MSR_IA32_TME_ACTIVATE		0x982
188 
189 /* Helpers to access TME_ACTIVATE MSR */
190 #define TME_ACTIVATE_LOCKED(x)		(x & 0x1)
191 #define TME_ACTIVATE_ENABLED(x)		(x & 0x2)
192 
193 #define TME_ACTIVATE_POLICY(x)		((x >> 4) & 0xf)	/* Bits 7:4 */
194 #define TME_ACTIVATE_POLICY_AES_XTS_128	0
195 
196 #define TME_ACTIVATE_KEYID_BITS(x)	((x >> 32) & 0xf)	/* Bits 35:32 */
197 
198 #define TME_ACTIVATE_CRYPTO_ALGS(x)	((x >> 48) & 0xffff)	/* Bits 63:48 */
199 #define TME_ACTIVATE_CRYPTO_AES_XTS_128	1
200 
201 /* Values for mktme_status (SW only construct) */
202 #define MKTME_ENABLED			0
203 #define MKTME_DISABLED			1
204 #define MKTME_UNINITIALIZED		2
205 static int mktme_status = MKTME_UNINITIALIZED;
206 
detect_tme_early(struct cpuinfo_x86 * c)207 static void detect_tme_early(struct cpuinfo_x86 *c)
208 {
209 	u64 tme_activate, tme_policy, tme_crypto_algs;
210 	int keyid_bits = 0, nr_keyids = 0;
211 	static u64 tme_activate_cpu0 = 0;
212 
213 	rdmsrl(MSR_IA32_TME_ACTIVATE, tme_activate);
214 
215 	if (mktme_status != MKTME_UNINITIALIZED) {
216 		if (tme_activate != tme_activate_cpu0) {
217 			/* Broken BIOS? */
218 			pr_err_once("x86/tme: configuration is inconsistent between CPUs\n");
219 			pr_err_once("x86/tme: MKTME is not usable\n");
220 			mktme_status = MKTME_DISABLED;
221 
222 			/* Proceed. We may need to exclude bits from x86_phys_bits. */
223 		}
224 	} else {
225 		tme_activate_cpu0 = tme_activate;
226 	}
227 
228 	if (!TME_ACTIVATE_LOCKED(tme_activate) || !TME_ACTIVATE_ENABLED(tme_activate)) {
229 		pr_info_once("x86/tme: not enabled by BIOS\n");
230 		mktme_status = MKTME_DISABLED;
231 		return;
232 	}
233 
234 	if (mktme_status != MKTME_UNINITIALIZED)
235 		goto detect_keyid_bits;
236 
237 	pr_info("x86/tme: enabled by BIOS\n");
238 
239 	tme_policy = TME_ACTIVATE_POLICY(tme_activate);
240 	if (tme_policy != TME_ACTIVATE_POLICY_AES_XTS_128)
241 		pr_warn("x86/tme: Unknown policy is active: %#llx\n", tme_policy);
242 
243 	tme_crypto_algs = TME_ACTIVATE_CRYPTO_ALGS(tme_activate);
244 	if (!(tme_crypto_algs & TME_ACTIVATE_CRYPTO_AES_XTS_128)) {
245 		pr_err("x86/mktme: No known encryption algorithm is supported: %#llx\n",
246 				tme_crypto_algs);
247 		mktme_status = MKTME_DISABLED;
248 	}
249 detect_keyid_bits:
250 	keyid_bits = TME_ACTIVATE_KEYID_BITS(tme_activate);
251 	nr_keyids = (1UL << keyid_bits) - 1;
252 	if (nr_keyids) {
253 		pr_info_once("x86/mktme: enabled by BIOS\n");
254 		pr_info_once("x86/mktme: %d KeyIDs available\n", nr_keyids);
255 	} else {
256 		pr_info_once("x86/mktme: disabled by BIOS\n");
257 	}
258 
259 	if (mktme_status == MKTME_UNINITIALIZED) {
260 		/* MKTME is usable */
261 		mktme_status = MKTME_ENABLED;
262 	}
263 
264 	/*
265 	 * KeyID bits effectively lower the number of physical address
266 	 * bits.  Update cpuinfo_x86::x86_phys_bits accordingly.
267 	 */
268 	c->x86_phys_bits -= keyid_bits;
269 }
270 
early_init_intel(struct cpuinfo_x86 * c)271 static void early_init_intel(struct cpuinfo_x86 *c)
272 {
273 	u64 misc_enable;
274 
275 	/* Unmask CPUID levels if masked: */
276 	if (c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xd)) {
277 		if (msr_clear_bit(MSR_IA32_MISC_ENABLE,
278 				  MSR_IA32_MISC_ENABLE_LIMIT_CPUID_BIT) > 0) {
279 			c->cpuid_level = cpuid_eax(0);
280 			get_cpu_cap(c);
281 		}
282 	}
283 
284 	if ((c->x86 == 0xf && c->x86_model >= 0x03) ||
285 		(c->x86 == 0x6 && c->x86_model >= 0x0e))
286 		set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
287 
288 	if (c->x86 >= 6 && !cpu_has(c, X86_FEATURE_IA64))
289 		c->microcode = intel_get_microcode_revision();
290 
291 	/* Now if any of them are set, check the blacklist and clear the lot */
292 	if ((cpu_has(c, X86_FEATURE_SPEC_CTRL) ||
293 	     cpu_has(c, X86_FEATURE_INTEL_STIBP) ||
294 	     cpu_has(c, X86_FEATURE_IBRS) || cpu_has(c, X86_FEATURE_IBPB) ||
295 	     cpu_has(c, X86_FEATURE_STIBP)) && bad_spectre_microcode(c)) {
296 		pr_warn("Intel Spectre v2 broken microcode detected; disabling Speculation Control\n");
297 		setup_clear_cpu_cap(X86_FEATURE_IBRS);
298 		setup_clear_cpu_cap(X86_FEATURE_IBPB);
299 		setup_clear_cpu_cap(X86_FEATURE_STIBP);
300 		setup_clear_cpu_cap(X86_FEATURE_SPEC_CTRL);
301 		setup_clear_cpu_cap(X86_FEATURE_MSR_SPEC_CTRL);
302 		setup_clear_cpu_cap(X86_FEATURE_INTEL_STIBP);
303 		setup_clear_cpu_cap(X86_FEATURE_SSBD);
304 		setup_clear_cpu_cap(X86_FEATURE_SPEC_CTRL_SSBD);
305 	}
306 
307 	/*
308 	 * Atom erratum AAE44/AAF40/AAG38/AAH41:
309 	 *
310 	 * A race condition between speculative fetches and invalidating
311 	 * a large page.  This is worked around in microcode, but we
312 	 * need the microcode to have already been loaded... so if it is
313 	 * not, recommend a BIOS update and disable large pages.
314 	 */
315 	if (c->x86 == 6 && c->x86_model == 0x1c && c->x86_stepping <= 2 &&
316 	    c->microcode < 0x20e) {
317 		pr_warn("Atom PSE erratum detected, BIOS microcode update recommended\n");
318 		clear_cpu_cap(c, X86_FEATURE_PSE);
319 	}
320 
321 #ifdef CONFIG_X86_64
322 	set_cpu_cap(c, X86_FEATURE_SYSENTER32);
323 #else
324 	/* Netburst reports 64 bytes clflush size, but does IO in 128 bytes */
325 	if (c->x86 == 15 && c->x86_cache_alignment == 64)
326 		c->x86_cache_alignment = 128;
327 #endif
328 
329 	/* CPUID workaround for 0F33/0F34 CPU */
330 	if (c->x86 == 0xF && c->x86_model == 0x3
331 	    && (c->x86_stepping == 0x3 || c->x86_stepping == 0x4))
332 		c->x86_phys_bits = 36;
333 
334 	/*
335 	 * c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate
336 	 * with P/T states and does not stop in deep C-states.
337 	 *
338 	 * It is also reliable across cores and sockets. (but not across
339 	 * cabinets - we turn it off in that case explicitly.)
340 	 */
341 	if (c->x86_power & (1 << 8)) {
342 		set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
343 		set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
344 	}
345 
346 	/* Penwell and Cloverview have the TSC which doesn't sleep on S3 */
347 	if (c->x86 == 6) {
348 		switch (c->x86_model) {
349 		case INTEL_FAM6_ATOM_SALTWELL_MID:
350 		case INTEL_FAM6_ATOM_SALTWELL_TABLET:
351 		case INTEL_FAM6_ATOM_SILVERMONT_MID:
352 		case INTEL_FAM6_ATOM_AIRMONT_NP:
353 			set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC_S3);
354 			break;
355 		default:
356 			break;
357 		}
358 	}
359 
360 	/*
361 	 * There is a known erratum on Pentium III and Core Solo
362 	 * and Core Duo CPUs.
363 	 * " Page with PAT set to WC while associated MTRR is UC
364 	 *   may consolidate to UC "
365 	 * Because of this erratum, it is better to stick with
366 	 * setting WC in MTRR rather than using PAT on these CPUs.
367 	 *
368 	 * Enable PAT WC only on P4, Core 2 or later CPUs.
369 	 */
370 	if (c->x86 == 6 && c->x86_model < 15)
371 		clear_cpu_cap(c, X86_FEATURE_PAT);
372 
373 	/*
374 	 * If fast string is not enabled in IA32_MISC_ENABLE for any reason,
375 	 * clear the fast string and enhanced fast string CPU capabilities.
376 	 */
377 	if (c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xd)) {
378 		rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
379 		if (!(misc_enable & MSR_IA32_MISC_ENABLE_FAST_STRING)) {
380 			pr_info("Disabled fast string operations\n");
381 			setup_clear_cpu_cap(X86_FEATURE_REP_GOOD);
382 			setup_clear_cpu_cap(X86_FEATURE_ERMS);
383 		}
384 	}
385 
386 	/*
387 	 * Intel Quark Core DevMan_001.pdf section 6.4.11
388 	 * "The operating system also is required to invalidate (i.e., flush)
389 	 *  the TLB when any changes are made to any of the page table entries.
390 	 *  The operating system must reload CR3 to cause the TLB to be flushed"
391 	 *
392 	 * As a result, boot_cpu_has(X86_FEATURE_PGE) in arch/x86/include/asm/tlbflush.h
393 	 * should be false so that __flush_tlb_all() causes CR3 instead of CR4.PGE
394 	 * to be modified.
395 	 */
396 	if (c->x86 == 5 && c->x86_model == 9) {
397 		pr_info("Disabling PGE capability bit\n");
398 		setup_clear_cpu_cap(X86_FEATURE_PGE);
399 	}
400 
401 	check_memory_type_self_snoop_errata(c);
402 
403 	/*
404 	 * Get the number of SMT siblings early from the extended topology
405 	 * leaf, if available. Otherwise try the legacy SMT detection.
406 	 */
407 	if (detect_extended_topology_early(c) < 0)
408 		detect_ht_early(c);
409 
410 	/*
411 	 * Adjust the number of physical bits early because it affects the
412 	 * valid bits of the MTRR mask registers.
413 	 */
414 	if (cpu_has(c, X86_FEATURE_TME))
415 		detect_tme_early(c);
416 }
417 
bsp_init_intel(struct cpuinfo_x86 * c)418 static void bsp_init_intel(struct cpuinfo_x86 *c)
419 {
420 	resctrl_cpu_detect(c);
421 }
422 
423 #ifdef CONFIG_X86_32
424 /*
425  *	Early probe support logic for ppro memory erratum #50
426  *
427  *	This is called before we do cpu ident work
428  */
429 
ppro_with_ram_bug(void)430 int ppro_with_ram_bug(void)
431 {
432 	/* Uses data from early_cpu_detect now */
433 	if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
434 	    boot_cpu_data.x86 == 6 &&
435 	    boot_cpu_data.x86_model == 1 &&
436 	    boot_cpu_data.x86_stepping < 8) {
437 		pr_info("Pentium Pro with Errata#50 detected. Taking evasive action.\n");
438 		return 1;
439 	}
440 	return 0;
441 }
442 
intel_smp_check(struct cpuinfo_x86 * c)443 static void intel_smp_check(struct cpuinfo_x86 *c)
444 {
445 	/* calling is from identify_secondary_cpu() ? */
446 	if (!c->cpu_index)
447 		return;
448 
449 	/*
450 	 * Mask B, Pentium, but not Pentium MMX
451 	 */
452 	if (c->x86 == 5 &&
453 	    c->x86_stepping >= 1 && c->x86_stepping <= 4 &&
454 	    c->x86_model <= 3) {
455 		/*
456 		 * Remember we have B step Pentia with bugs
457 		 */
458 		WARN_ONCE(1, "WARNING: SMP operation may be unreliable"
459 				    "with B stepping processors.\n");
460 	}
461 }
462 
463 static int forcepae;
forcepae_setup(char * __unused)464 static int __init forcepae_setup(char *__unused)
465 {
466 	forcepae = 1;
467 	return 1;
468 }
469 __setup("forcepae", forcepae_setup);
470 
intel_workarounds(struct cpuinfo_x86 * c)471 static void intel_workarounds(struct cpuinfo_x86 *c)
472 {
473 #ifdef CONFIG_X86_F00F_BUG
474 	/*
475 	 * All models of Pentium and Pentium with MMX technology CPUs
476 	 * have the F0 0F bug, which lets nonprivileged users lock up the
477 	 * system. Announce that the fault handler will be checking for it.
478 	 * The Quark is also family 5, but does not have the same bug.
479 	 */
480 	clear_cpu_bug(c, X86_BUG_F00F);
481 	if (c->x86 == 5 && c->x86_model < 9) {
482 		static int f00f_workaround_enabled;
483 
484 		set_cpu_bug(c, X86_BUG_F00F);
485 		if (!f00f_workaround_enabled) {
486 			pr_notice("Intel Pentium with F0 0F bug - workaround enabled.\n");
487 			f00f_workaround_enabled = 1;
488 		}
489 	}
490 #endif
491 
492 	/*
493 	 * SEP CPUID bug: Pentium Pro reports SEP but doesn't have it until
494 	 * model 3 mask 3
495 	 */
496 	if ((c->x86<<8 | c->x86_model<<4 | c->x86_stepping) < 0x633)
497 		clear_cpu_cap(c, X86_FEATURE_SEP);
498 
499 	/*
500 	 * PAE CPUID issue: many Pentium M report no PAE but may have a
501 	 * functionally usable PAE implementation.
502 	 * Forcefully enable PAE if kernel parameter "forcepae" is present.
503 	 */
504 	if (forcepae) {
505 		pr_warn("PAE forced!\n");
506 		set_cpu_cap(c, X86_FEATURE_PAE);
507 		add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_NOW_UNRELIABLE);
508 	}
509 
510 	/*
511 	 * P4 Xeon erratum 037 workaround.
512 	 * Hardware prefetcher may cause stale data to be loaded into the cache.
513 	 */
514 	if ((c->x86 == 15) && (c->x86_model == 1) && (c->x86_stepping == 1)) {
515 		if (msr_set_bit(MSR_IA32_MISC_ENABLE,
516 				MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE_BIT) > 0) {
517 			pr_info("CPU: C0 stepping P4 Xeon detected.\n");
518 			pr_info("CPU: Disabling hardware prefetching (Erratum 037)\n");
519 		}
520 	}
521 
522 	/*
523 	 * See if we have a good local APIC by checking for buggy Pentia,
524 	 * i.e. all B steppings and the C2 stepping of P54C when using their
525 	 * integrated APIC (see 11AP erratum in "Pentium Processor
526 	 * Specification Update").
527 	 */
528 	if (boot_cpu_has(X86_FEATURE_APIC) && (c->x86<<8 | c->x86_model<<4) == 0x520 &&
529 	    (c->x86_stepping < 0x6 || c->x86_stepping == 0xb))
530 		set_cpu_bug(c, X86_BUG_11AP);
531 
532 
533 #ifdef CONFIG_X86_INTEL_USERCOPY
534 	/*
535 	 * Set up the preferred alignment for movsl bulk memory moves
536 	 */
537 	switch (c->x86) {
538 	case 4:		/* 486: untested */
539 		break;
540 	case 5:		/* Old Pentia: untested */
541 		break;
542 	case 6:		/* PII/PIII only like movsl with 8-byte alignment */
543 		movsl_mask.mask = 7;
544 		break;
545 	case 15:	/* P4 is OK down to 8-byte alignment */
546 		movsl_mask.mask = 7;
547 		break;
548 	}
549 #endif
550 
551 	intel_smp_check(c);
552 }
553 #else
intel_workarounds(struct cpuinfo_x86 * c)554 static void intel_workarounds(struct cpuinfo_x86 *c)
555 {
556 }
557 #endif
558 
srat_detect_node(struct cpuinfo_x86 * c)559 static void srat_detect_node(struct cpuinfo_x86 *c)
560 {
561 #ifdef CONFIG_NUMA
562 	unsigned node;
563 	int cpu = smp_processor_id();
564 
565 	/* Don't do the funky fallback heuristics the AMD version employs
566 	   for now. */
567 	node = numa_cpu_node(cpu);
568 	if (node == NUMA_NO_NODE || !node_online(node)) {
569 		/* reuse the value from init_cpu_to_node() */
570 		node = cpu_to_node(cpu);
571 	}
572 	numa_set_node(cpu, node);
573 #endif
574 }
575 
init_cpuid_fault(struct cpuinfo_x86 * c)576 static void init_cpuid_fault(struct cpuinfo_x86 *c)
577 {
578 	u64 msr;
579 
580 	if (!rdmsrl_safe(MSR_PLATFORM_INFO, &msr)) {
581 		if (msr & MSR_PLATFORM_INFO_CPUID_FAULT)
582 			set_cpu_cap(c, X86_FEATURE_CPUID_FAULT);
583 	}
584 }
585 
init_intel_misc_features(struct cpuinfo_x86 * c)586 static void init_intel_misc_features(struct cpuinfo_x86 *c)
587 {
588 	u64 msr;
589 
590 	if (rdmsrl_safe(MSR_MISC_FEATURES_ENABLES, &msr))
591 		return;
592 
593 	/* Clear all MISC features */
594 	this_cpu_write(msr_misc_features_shadow, 0);
595 
596 	/* Check features and update capabilities and shadow control bits */
597 	init_cpuid_fault(c);
598 	probe_xeon_phi_r3mwait(c);
599 
600 	msr = this_cpu_read(msr_misc_features_shadow);
601 	wrmsrl(MSR_MISC_FEATURES_ENABLES, msr);
602 }
603 
604 static void split_lock_init(void);
605 static void bus_lock_init(void);
606 
init_intel(struct cpuinfo_x86 * c)607 static void init_intel(struct cpuinfo_x86 *c)
608 {
609 	early_init_intel(c);
610 
611 	intel_workarounds(c);
612 
613 	/*
614 	 * Detect the extended topology information if available. This
615 	 * will reinitialise the initial_apicid which will be used
616 	 * in init_intel_cacheinfo()
617 	 */
618 	detect_extended_topology(c);
619 
620 	if (!cpu_has(c, X86_FEATURE_XTOPOLOGY)) {
621 		/*
622 		 * let's use the legacy cpuid vector 0x1 and 0x4 for topology
623 		 * detection.
624 		 */
625 		detect_num_cpu_cores(c);
626 #ifdef CONFIG_X86_32
627 		detect_ht(c);
628 #endif
629 	}
630 
631 	init_intel_cacheinfo(c);
632 
633 	if (c->cpuid_level > 9) {
634 		unsigned eax = cpuid_eax(10);
635 		/* Check for version and the number of counters */
636 		if ((eax & 0xff) && (((eax>>8) & 0xff) > 1))
637 			set_cpu_cap(c, X86_FEATURE_ARCH_PERFMON);
638 	}
639 
640 	if (cpu_has(c, X86_FEATURE_XMM2))
641 		set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
642 
643 	if (boot_cpu_has(X86_FEATURE_DS)) {
644 		unsigned int l1, l2;
645 
646 		rdmsr(MSR_IA32_MISC_ENABLE, l1, l2);
647 		if (!(l1 & MSR_IA32_MISC_ENABLE_BTS_UNAVAIL))
648 			set_cpu_cap(c, X86_FEATURE_BTS);
649 		if (!(l1 & MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL))
650 			set_cpu_cap(c, X86_FEATURE_PEBS);
651 	}
652 
653 	if (c->x86 == 6 && boot_cpu_has(X86_FEATURE_CLFLUSH) &&
654 	    (c->x86_model == 29 || c->x86_model == 46 || c->x86_model == 47))
655 		set_cpu_bug(c, X86_BUG_CLFLUSH_MONITOR);
656 
657 	if (c->x86 == 6 && boot_cpu_has(X86_FEATURE_MWAIT) &&
658 		((c->x86_model == INTEL_FAM6_ATOM_GOLDMONT)))
659 		set_cpu_bug(c, X86_BUG_MONITOR);
660 
661 #ifdef CONFIG_X86_64
662 	if (c->x86 == 15)
663 		c->x86_cache_alignment = c->x86_clflush_size * 2;
664 	if (c->x86 == 6)
665 		set_cpu_cap(c, X86_FEATURE_REP_GOOD);
666 #else
667 	/*
668 	 * Names for the Pentium II/Celeron processors
669 	 * detectable only by also checking the cache size.
670 	 * Dixon is NOT a Celeron.
671 	 */
672 	if (c->x86 == 6) {
673 		unsigned int l2 = c->x86_cache_size;
674 		char *p = NULL;
675 
676 		switch (c->x86_model) {
677 		case 5:
678 			if (l2 == 0)
679 				p = "Celeron (Covington)";
680 			else if (l2 == 256)
681 				p = "Mobile Pentium II (Dixon)";
682 			break;
683 
684 		case 6:
685 			if (l2 == 128)
686 				p = "Celeron (Mendocino)";
687 			else if (c->x86_stepping == 0 || c->x86_stepping == 5)
688 				p = "Celeron-A";
689 			break;
690 
691 		case 8:
692 			if (l2 == 128)
693 				p = "Celeron (Coppermine)";
694 			break;
695 		}
696 
697 		if (p)
698 			strcpy(c->x86_model_id, p);
699 	}
700 
701 	if (c->x86 == 15)
702 		set_cpu_cap(c, X86_FEATURE_P4);
703 	if (c->x86 == 6)
704 		set_cpu_cap(c, X86_FEATURE_P3);
705 #endif
706 
707 	/* Work around errata */
708 	srat_detect_node(c);
709 
710 	init_ia32_feat_ctl(c);
711 
712 	init_intel_misc_features(c);
713 
714 	split_lock_init();
715 	bus_lock_init();
716 
717 	intel_init_thermal(c);
718 }
719 
720 #ifdef CONFIG_X86_32
intel_size_cache(struct cpuinfo_x86 * c,unsigned int size)721 static unsigned int intel_size_cache(struct cpuinfo_x86 *c, unsigned int size)
722 {
723 	/*
724 	 * Intel PIII Tualatin. This comes in two flavours.
725 	 * One has 256kb of cache, the other 512. We have no way
726 	 * to determine which, so we use a boottime override
727 	 * for the 512kb model, and assume 256 otherwise.
728 	 */
729 	if ((c->x86 == 6) && (c->x86_model == 11) && (size == 0))
730 		size = 256;
731 
732 	/*
733 	 * Intel Quark SoC X1000 contains a 4-way set associative
734 	 * 16K cache with a 16 byte cache line and 256 lines per tag
735 	 */
736 	if ((c->x86 == 5) && (c->x86_model == 9))
737 		size = 16;
738 	return size;
739 }
740 #endif
741 
742 #define TLB_INST_4K	0x01
743 #define TLB_INST_4M	0x02
744 #define TLB_INST_2M_4M	0x03
745 
746 #define TLB_INST_ALL	0x05
747 #define TLB_INST_1G	0x06
748 
749 #define TLB_DATA_4K	0x11
750 #define TLB_DATA_4M	0x12
751 #define TLB_DATA_2M_4M	0x13
752 #define TLB_DATA_4K_4M	0x14
753 
754 #define TLB_DATA_1G	0x16
755 
756 #define TLB_DATA0_4K	0x21
757 #define TLB_DATA0_4M	0x22
758 #define TLB_DATA0_2M_4M	0x23
759 
760 #define STLB_4K		0x41
761 #define STLB_4K_2M	0x42
762 
763 static const struct _tlb_table intel_tlb_table[] = {
764 	{ 0x01, TLB_INST_4K,		32,	" TLB_INST 4 KByte pages, 4-way set associative" },
765 	{ 0x02, TLB_INST_4M,		2,	" TLB_INST 4 MByte pages, full associative" },
766 	{ 0x03, TLB_DATA_4K,		64,	" TLB_DATA 4 KByte pages, 4-way set associative" },
767 	{ 0x04, TLB_DATA_4M,		8,	" TLB_DATA 4 MByte pages, 4-way set associative" },
768 	{ 0x05, TLB_DATA_4M,		32,	" TLB_DATA 4 MByte pages, 4-way set associative" },
769 	{ 0x0b, TLB_INST_4M,		4,	" TLB_INST 4 MByte pages, 4-way set associative" },
770 	{ 0x4f, TLB_INST_4K,		32,	" TLB_INST 4 KByte pages" },
771 	{ 0x50, TLB_INST_ALL,		64,	" TLB_INST 4 KByte and 2-MByte or 4-MByte pages" },
772 	{ 0x51, TLB_INST_ALL,		128,	" TLB_INST 4 KByte and 2-MByte or 4-MByte pages" },
773 	{ 0x52, TLB_INST_ALL,		256,	" TLB_INST 4 KByte and 2-MByte or 4-MByte pages" },
774 	{ 0x55, TLB_INST_2M_4M,		7,	" TLB_INST 2-MByte or 4-MByte pages, fully associative" },
775 	{ 0x56, TLB_DATA0_4M,		16,	" TLB_DATA0 4 MByte pages, 4-way set associative" },
776 	{ 0x57, TLB_DATA0_4K,		16,	" TLB_DATA0 4 KByte pages, 4-way associative" },
777 	{ 0x59, TLB_DATA0_4K,		16,	" TLB_DATA0 4 KByte pages, fully associative" },
778 	{ 0x5a, TLB_DATA0_2M_4M,	32,	" TLB_DATA0 2-MByte or 4 MByte pages, 4-way set associative" },
779 	{ 0x5b, TLB_DATA_4K_4M,		64,	" TLB_DATA 4 KByte and 4 MByte pages" },
780 	{ 0x5c, TLB_DATA_4K_4M,		128,	" TLB_DATA 4 KByte and 4 MByte pages" },
781 	{ 0x5d, TLB_DATA_4K_4M,		256,	" TLB_DATA 4 KByte and 4 MByte pages" },
782 	{ 0x61, TLB_INST_4K,		48,	" TLB_INST 4 KByte pages, full associative" },
783 	{ 0x63, TLB_DATA_1G,		4,	" TLB_DATA 1 GByte pages, 4-way set associative" },
784 	{ 0x6b, TLB_DATA_4K,		256,	" TLB_DATA 4 KByte pages, 8-way associative" },
785 	{ 0x6c, TLB_DATA_2M_4M,		128,	" TLB_DATA 2 MByte or 4 MByte pages, 8-way associative" },
786 	{ 0x6d, TLB_DATA_1G,		16,	" TLB_DATA 1 GByte pages, fully associative" },
787 	{ 0x76, TLB_INST_2M_4M,		8,	" TLB_INST 2-MByte or 4-MByte pages, fully associative" },
788 	{ 0xb0, TLB_INST_4K,		128,	" TLB_INST 4 KByte pages, 4-way set associative" },
789 	{ 0xb1, TLB_INST_2M_4M,		4,	" TLB_INST 2M pages, 4-way, 8 entries or 4M pages, 4-way entries" },
790 	{ 0xb2, TLB_INST_4K,		64,	" TLB_INST 4KByte pages, 4-way set associative" },
791 	{ 0xb3, TLB_DATA_4K,		128,	" TLB_DATA 4 KByte pages, 4-way set associative" },
792 	{ 0xb4, TLB_DATA_4K,		256,	" TLB_DATA 4 KByte pages, 4-way associative" },
793 	{ 0xb5, TLB_INST_4K,		64,	" TLB_INST 4 KByte pages, 8-way set associative" },
794 	{ 0xb6, TLB_INST_4K,		128,	" TLB_INST 4 KByte pages, 8-way set associative" },
795 	{ 0xba, TLB_DATA_4K,		64,	" TLB_DATA 4 KByte pages, 4-way associative" },
796 	{ 0xc0, TLB_DATA_4K_4M,		8,	" TLB_DATA 4 KByte and 4 MByte pages, 4-way associative" },
797 	{ 0xc1, STLB_4K_2M,		1024,	" STLB 4 KByte and 2 MByte pages, 8-way associative" },
798 	{ 0xc2, TLB_DATA_2M_4M,		16,	" TLB_DATA 2 MByte/4MByte pages, 4-way associative" },
799 	{ 0xca, STLB_4K,		512,	" STLB 4 KByte pages, 4-way associative" },
800 	{ 0x00, 0, 0 }
801 };
802 
intel_tlb_lookup(const unsigned char desc)803 static void intel_tlb_lookup(const unsigned char desc)
804 {
805 	unsigned char k;
806 	if (desc == 0)
807 		return;
808 
809 	/* look up this descriptor in the table */
810 	for (k = 0; intel_tlb_table[k].descriptor != desc &&
811 	     intel_tlb_table[k].descriptor != 0; k++)
812 		;
813 
814 	if (intel_tlb_table[k].tlb_type == 0)
815 		return;
816 
817 	switch (intel_tlb_table[k].tlb_type) {
818 	case STLB_4K:
819 		if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries)
820 			tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries;
821 		if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries)
822 			tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries;
823 		break;
824 	case STLB_4K_2M:
825 		if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries)
826 			tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries;
827 		if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries)
828 			tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries;
829 		if (tlb_lli_2m[ENTRIES] < intel_tlb_table[k].entries)
830 			tlb_lli_2m[ENTRIES] = intel_tlb_table[k].entries;
831 		if (tlb_lld_2m[ENTRIES] < intel_tlb_table[k].entries)
832 			tlb_lld_2m[ENTRIES] = intel_tlb_table[k].entries;
833 		if (tlb_lli_4m[ENTRIES] < intel_tlb_table[k].entries)
834 			tlb_lli_4m[ENTRIES] = intel_tlb_table[k].entries;
835 		if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries)
836 			tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries;
837 		break;
838 	case TLB_INST_ALL:
839 		if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries)
840 			tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries;
841 		if (tlb_lli_2m[ENTRIES] < intel_tlb_table[k].entries)
842 			tlb_lli_2m[ENTRIES] = intel_tlb_table[k].entries;
843 		if (tlb_lli_4m[ENTRIES] < intel_tlb_table[k].entries)
844 			tlb_lli_4m[ENTRIES] = intel_tlb_table[k].entries;
845 		break;
846 	case TLB_INST_4K:
847 		if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries)
848 			tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries;
849 		break;
850 	case TLB_INST_4M:
851 		if (tlb_lli_4m[ENTRIES] < intel_tlb_table[k].entries)
852 			tlb_lli_4m[ENTRIES] = intel_tlb_table[k].entries;
853 		break;
854 	case TLB_INST_2M_4M:
855 		if (tlb_lli_2m[ENTRIES] < intel_tlb_table[k].entries)
856 			tlb_lli_2m[ENTRIES] = intel_tlb_table[k].entries;
857 		if (tlb_lli_4m[ENTRIES] < intel_tlb_table[k].entries)
858 			tlb_lli_4m[ENTRIES] = intel_tlb_table[k].entries;
859 		break;
860 	case TLB_DATA_4K:
861 	case TLB_DATA0_4K:
862 		if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries)
863 			tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries;
864 		break;
865 	case TLB_DATA_4M:
866 	case TLB_DATA0_4M:
867 		if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries)
868 			tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries;
869 		break;
870 	case TLB_DATA_2M_4M:
871 	case TLB_DATA0_2M_4M:
872 		if (tlb_lld_2m[ENTRIES] < intel_tlb_table[k].entries)
873 			tlb_lld_2m[ENTRIES] = intel_tlb_table[k].entries;
874 		if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries)
875 			tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries;
876 		break;
877 	case TLB_DATA_4K_4M:
878 		if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries)
879 			tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries;
880 		if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries)
881 			tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries;
882 		break;
883 	case TLB_DATA_1G:
884 		if (tlb_lld_1g[ENTRIES] < intel_tlb_table[k].entries)
885 			tlb_lld_1g[ENTRIES] = intel_tlb_table[k].entries;
886 		break;
887 	}
888 }
889 
intel_detect_tlb(struct cpuinfo_x86 * c)890 static void intel_detect_tlb(struct cpuinfo_x86 *c)
891 {
892 	int i, j, n;
893 	unsigned int regs[4];
894 	unsigned char *desc = (unsigned char *)regs;
895 
896 	if (c->cpuid_level < 2)
897 		return;
898 
899 	/* Number of times to iterate */
900 	n = cpuid_eax(2) & 0xFF;
901 
902 	for (i = 0 ; i < n ; i++) {
903 		cpuid(2, &regs[0], &regs[1], &regs[2], &regs[3]);
904 
905 		/* If bit 31 is set, this is an unknown format */
906 		for (j = 0 ; j < 3 ; j++)
907 			if (regs[j] & (1 << 31))
908 				regs[j] = 0;
909 
910 		/* Byte 0 is level count, not a descriptor */
911 		for (j = 1 ; j < 16 ; j++)
912 			intel_tlb_lookup(desc[j]);
913 	}
914 }
915 
916 static const struct cpu_dev intel_cpu_dev = {
917 	.c_vendor	= "Intel",
918 	.c_ident	= { "GenuineIntel" },
919 #ifdef CONFIG_X86_32
920 	.legacy_models = {
921 		{ .family = 4, .model_names =
922 		  {
923 			  [0] = "486 DX-25/33",
924 			  [1] = "486 DX-50",
925 			  [2] = "486 SX",
926 			  [3] = "486 DX/2",
927 			  [4] = "486 SL",
928 			  [5] = "486 SX/2",
929 			  [7] = "486 DX/2-WB",
930 			  [8] = "486 DX/4",
931 			  [9] = "486 DX/4-WB"
932 		  }
933 		},
934 		{ .family = 5, .model_names =
935 		  {
936 			  [0] = "Pentium 60/66 A-step",
937 			  [1] = "Pentium 60/66",
938 			  [2] = "Pentium 75 - 200",
939 			  [3] = "OverDrive PODP5V83",
940 			  [4] = "Pentium MMX",
941 			  [7] = "Mobile Pentium 75 - 200",
942 			  [8] = "Mobile Pentium MMX",
943 			  [9] = "Quark SoC X1000",
944 		  }
945 		},
946 		{ .family = 6, .model_names =
947 		  {
948 			  [0] = "Pentium Pro A-step",
949 			  [1] = "Pentium Pro",
950 			  [3] = "Pentium II (Klamath)",
951 			  [4] = "Pentium II (Deschutes)",
952 			  [5] = "Pentium II (Deschutes)",
953 			  [6] = "Mobile Pentium II",
954 			  [7] = "Pentium III (Katmai)",
955 			  [8] = "Pentium III (Coppermine)",
956 			  [10] = "Pentium III (Cascades)",
957 			  [11] = "Pentium III (Tualatin)",
958 		  }
959 		},
960 		{ .family = 15, .model_names =
961 		  {
962 			  [0] = "Pentium 4 (Unknown)",
963 			  [1] = "Pentium 4 (Willamette)",
964 			  [2] = "Pentium 4 (Northwood)",
965 			  [4] = "Pentium 4 (Foster)",
966 			  [5] = "Pentium 4 (Foster)",
967 		  }
968 		},
969 	},
970 	.legacy_cache_size = intel_size_cache,
971 #endif
972 	.c_detect_tlb	= intel_detect_tlb,
973 	.c_early_init   = early_init_intel,
974 	.c_bsp_init	= bsp_init_intel,
975 	.c_init		= init_intel,
976 	.c_x86_vendor	= X86_VENDOR_INTEL,
977 };
978 
979 cpu_dev_register(intel_cpu_dev);
980 
981 #undef pr_fmt
982 #define pr_fmt(fmt) "x86/split lock detection: " fmt
983 
984 static const struct {
985 	const char			*option;
986 	enum split_lock_detect_state	state;
987 } sld_options[] __initconst = {
988 	{ "off",	sld_off   },
989 	{ "warn",	sld_warn  },
990 	{ "fatal",	sld_fatal },
991 	{ "ratelimit:", sld_ratelimit },
992 };
993 
994 static struct ratelimit_state bld_ratelimit;
995 
996 static unsigned int sysctl_sld_mitigate = 1;
997 static DEFINE_SEMAPHORE(buslock_sem, 1);
998 
999 #ifdef CONFIG_PROC_SYSCTL
1000 static struct ctl_table sld_sysctls[] = {
1001 	{
1002 		.procname       = "split_lock_mitigate",
1003 		.data           = &sysctl_sld_mitigate,
1004 		.maxlen         = sizeof(unsigned int),
1005 		.mode           = 0644,
1006 		.proc_handler	= proc_douintvec_minmax,
1007 		.extra1         = SYSCTL_ZERO,
1008 		.extra2         = SYSCTL_ONE,
1009 	},
1010 };
1011 
sld_mitigate_sysctl_init(void)1012 static int __init sld_mitigate_sysctl_init(void)
1013 {
1014 	register_sysctl_init("kernel", sld_sysctls);
1015 	return 0;
1016 }
1017 
1018 late_initcall(sld_mitigate_sysctl_init);
1019 #endif
1020 
match_option(const char * arg,int arglen,const char * opt)1021 static inline bool match_option(const char *arg, int arglen, const char *opt)
1022 {
1023 	int len = strlen(opt), ratelimit;
1024 
1025 	if (strncmp(arg, opt, len))
1026 		return false;
1027 
1028 	/*
1029 	 * Min ratelimit is 1 bus lock/sec.
1030 	 * Max ratelimit is 1000 bus locks/sec.
1031 	 */
1032 	if (sscanf(arg, "ratelimit:%d", &ratelimit) == 1 &&
1033 	    ratelimit > 0 && ratelimit <= 1000) {
1034 		ratelimit_state_init(&bld_ratelimit, HZ, ratelimit);
1035 		ratelimit_set_flags(&bld_ratelimit, RATELIMIT_MSG_ON_RELEASE);
1036 		return true;
1037 	}
1038 
1039 	return len == arglen;
1040 }
1041 
split_lock_verify_msr(bool on)1042 static bool split_lock_verify_msr(bool on)
1043 {
1044 	u64 ctrl, tmp;
1045 
1046 	if (rdmsrl_safe(MSR_TEST_CTRL, &ctrl))
1047 		return false;
1048 	if (on)
1049 		ctrl |= MSR_TEST_CTRL_SPLIT_LOCK_DETECT;
1050 	else
1051 		ctrl &= ~MSR_TEST_CTRL_SPLIT_LOCK_DETECT;
1052 	if (wrmsrl_safe(MSR_TEST_CTRL, ctrl))
1053 		return false;
1054 	rdmsrl(MSR_TEST_CTRL, tmp);
1055 	return ctrl == tmp;
1056 }
1057 
sld_state_setup(void)1058 static void __init sld_state_setup(void)
1059 {
1060 	enum split_lock_detect_state state = sld_warn;
1061 	char arg[20];
1062 	int i, ret;
1063 
1064 	if (!boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT) &&
1065 	    !boot_cpu_has(X86_FEATURE_BUS_LOCK_DETECT))
1066 		return;
1067 
1068 	ret = cmdline_find_option(boot_command_line, "split_lock_detect",
1069 				  arg, sizeof(arg));
1070 	if (ret >= 0) {
1071 		for (i = 0; i < ARRAY_SIZE(sld_options); i++) {
1072 			if (match_option(arg, ret, sld_options[i].option)) {
1073 				state = sld_options[i].state;
1074 				break;
1075 			}
1076 		}
1077 	}
1078 	sld_state = state;
1079 }
1080 
__split_lock_setup(void)1081 static void __init __split_lock_setup(void)
1082 {
1083 	if (!split_lock_verify_msr(false)) {
1084 		pr_info("MSR access failed: Disabled\n");
1085 		return;
1086 	}
1087 
1088 	rdmsrl(MSR_TEST_CTRL, msr_test_ctrl_cache);
1089 
1090 	if (!split_lock_verify_msr(true)) {
1091 		pr_info("MSR access failed: Disabled\n");
1092 		return;
1093 	}
1094 
1095 	/* Restore the MSR to its cached value. */
1096 	wrmsrl(MSR_TEST_CTRL, msr_test_ctrl_cache);
1097 
1098 	setup_force_cpu_cap(X86_FEATURE_SPLIT_LOCK_DETECT);
1099 }
1100 
1101 /*
1102  * MSR_TEST_CTRL is per core, but we treat it like a per CPU MSR. Locking
1103  * is not implemented as one thread could undo the setting of the other
1104  * thread immediately after dropping the lock anyway.
1105  */
sld_update_msr(bool on)1106 static void sld_update_msr(bool on)
1107 {
1108 	u64 test_ctrl_val = msr_test_ctrl_cache;
1109 
1110 	if (on)
1111 		test_ctrl_val |= MSR_TEST_CTRL_SPLIT_LOCK_DETECT;
1112 
1113 	wrmsrl(MSR_TEST_CTRL, test_ctrl_val);
1114 }
1115 
split_lock_init(void)1116 static void split_lock_init(void)
1117 {
1118 	/*
1119 	 * #DB for bus lock handles ratelimit and #AC for split lock is
1120 	 * disabled.
1121 	 */
1122 	if (sld_state == sld_ratelimit) {
1123 		split_lock_verify_msr(false);
1124 		return;
1125 	}
1126 
1127 	if (cpu_model_supports_sld)
1128 		split_lock_verify_msr(sld_state != sld_off);
1129 }
1130 
__split_lock_reenable_unlock(struct work_struct * work)1131 static void __split_lock_reenable_unlock(struct work_struct *work)
1132 {
1133 	sld_update_msr(true);
1134 	up(&buslock_sem);
1135 }
1136 
1137 static DECLARE_DELAYED_WORK(sl_reenable_unlock, __split_lock_reenable_unlock);
1138 
__split_lock_reenable(struct work_struct * work)1139 static void __split_lock_reenable(struct work_struct *work)
1140 {
1141 	sld_update_msr(true);
1142 }
1143 static DECLARE_DELAYED_WORK(sl_reenable, __split_lock_reenable);
1144 
1145 /*
1146  * If a CPU goes offline with pending delayed work to re-enable split lock
1147  * detection then the delayed work will be executed on some other CPU. That
1148  * handles releasing the buslock_sem, but because it executes on a
1149  * different CPU probably won't re-enable split lock detection. This is a
1150  * problem on HT systems since the sibling CPU on the same core may then be
1151  * left running with split lock detection disabled.
1152  *
1153  * Unconditionally re-enable detection here.
1154  */
splitlock_cpu_offline(unsigned int cpu)1155 static int splitlock_cpu_offline(unsigned int cpu)
1156 {
1157 	sld_update_msr(true);
1158 
1159 	return 0;
1160 }
1161 
split_lock_warn(unsigned long ip)1162 static void split_lock_warn(unsigned long ip)
1163 {
1164 	struct delayed_work *work;
1165 	int cpu;
1166 
1167 	if (!current->reported_split_lock)
1168 		pr_warn_ratelimited("#AC: %s/%d took a split_lock trap at address: 0x%lx\n",
1169 				    current->comm, current->pid, ip);
1170 	current->reported_split_lock = 1;
1171 
1172 	if (sysctl_sld_mitigate) {
1173 		/*
1174 		 * misery factor #1:
1175 		 * sleep 10ms before trying to execute split lock.
1176 		 */
1177 		if (msleep_interruptible(10) > 0)
1178 			return;
1179 		/*
1180 		 * Misery factor #2:
1181 		 * only allow one buslocked disabled core at a time.
1182 		 */
1183 		if (down_interruptible(&buslock_sem) == -EINTR)
1184 			return;
1185 		work = &sl_reenable_unlock;
1186 	} else {
1187 		work = &sl_reenable;
1188 	}
1189 
1190 	cpu = get_cpu();
1191 	schedule_delayed_work_on(cpu, work, 2);
1192 
1193 	/* Disable split lock detection on this CPU to make progress */
1194 	sld_update_msr(false);
1195 	put_cpu();
1196 }
1197 
handle_guest_split_lock(unsigned long ip)1198 bool handle_guest_split_lock(unsigned long ip)
1199 {
1200 	if (sld_state == sld_warn) {
1201 		split_lock_warn(ip);
1202 		return true;
1203 	}
1204 
1205 	pr_warn_once("#AC: %s/%d %s split_lock trap at address: 0x%lx\n",
1206 		     current->comm, current->pid,
1207 		     sld_state == sld_fatal ? "fatal" : "bogus", ip);
1208 
1209 	current->thread.error_code = 0;
1210 	current->thread.trap_nr = X86_TRAP_AC;
1211 	force_sig_fault(SIGBUS, BUS_ADRALN, NULL);
1212 	return false;
1213 }
1214 EXPORT_SYMBOL_GPL(handle_guest_split_lock);
1215 
bus_lock_init(void)1216 static void bus_lock_init(void)
1217 {
1218 	u64 val;
1219 
1220 	if (!boot_cpu_has(X86_FEATURE_BUS_LOCK_DETECT))
1221 		return;
1222 
1223 	rdmsrl(MSR_IA32_DEBUGCTLMSR, val);
1224 
1225 	if ((boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT) &&
1226 	    (sld_state == sld_warn || sld_state == sld_fatal)) ||
1227 	    sld_state == sld_off) {
1228 		/*
1229 		 * Warn and fatal are handled by #AC for split lock if #AC for
1230 		 * split lock is supported.
1231 		 */
1232 		val &= ~DEBUGCTLMSR_BUS_LOCK_DETECT;
1233 	} else {
1234 		val |= DEBUGCTLMSR_BUS_LOCK_DETECT;
1235 	}
1236 
1237 	wrmsrl(MSR_IA32_DEBUGCTLMSR, val);
1238 }
1239 
handle_user_split_lock(struct pt_regs * regs,long error_code)1240 bool handle_user_split_lock(struct pt_regs *regs, long error_code)
1241 {
1242 	if ((regs->flags & X86_EFLAGS_AC) || sld_state == sld_fatal)
1243 		return false;
1244 	split_lock_warn(regs->ip);
1245 	return true;
1246 }
1247 
handle_bus_lock(struct pt_regs * regs)1248 void handle_bus_lock(struct pt_regs *regs)
1249 {
1250 	switch (sld_state) {
1251 	case sld_off:
1252 		break;
1253 	case sld_ratelimit:
1254 		/* Enforce no more than bld_ratelimit bus locks/sec. */
1255 		while (!__ratelimit(&bld_ratelimit))
1256 			msleep(20);
1257 		/* Warn on the bus lock. */
1258 		fallthrough;
1259 	case sld_warn:
1260 		pr_warn_ratelimited("#DB: %s/%d took a bus_lock trap at address: 0x%lx\n",
1261 				    current->comm, current->pid, regs->ip);
1262 		break;
1263 	case sld_fatal:
1264 		force_sig_fault(SIGBUS, BUS_ADRALN, NULL);
1265 		break;
1266 	}
1267 }
1268 
1269 /*
1270  * CPU models that are known to have the per-core split-lock detection
1271  * feature even though they do not enumerate IA32_CORE_CAPABILITIES.
1272  */
1273 static const struct x86_cpu_id split_lock_cpu_ids[] __initconst = {
1274 	X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_X,	0),
1275 	X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_L,	0),
1276 	X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_D,	0),
1277 	{}
1278 };
1279 
split_lock_setup(struct cpuinfo_x86 * c)1280 static void __init split_lock_setup(struct cpuinfo_x86 *c)
1281 {
1282 	const struct x86_cpu_id *m;
1283 	u64 ia32_core_caps;
1284 
1285 	if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
1286 		return;
1287 
1288 	/* Check for CPUs that have support but do not enumerate it: */
1289 	m = x86_match_cpu(split_lock_cpu_ids);
1290 	if (m)
1291 		goto supported;
1292 
1293 	if (!cpu_has(c, X86_FEATURE_CORE_CAPABILITIES))
1294 		return;
1295 
1296 	/*
1297 	 * Not all bits in MSR_IA32_CORE_CAPS are architectural, but
1298 	 * MSR_IA32_CORE_CAPS_SPLIT_LOCK_DETECT is.  All CPUs that set
1299 	 * it have split lock detection.
1300 	 */
1301 	rdmsrl(MSR_IA32_CORE_CAPS, ia32_core_caps);
1302 	if (ia32_core_caps & MSR_IA32_CORE_CAPS_SPLIT_LOCK_DETECT)
1303 		goto supported;
1304 
1305 	/* CPU is not in the model list and does not have the MSR bit: */
1306 	return;
1307 
1308 supported:
1309 	cpu_model_supports_sld = true;
1310 	__split_lock_setup();
1311 }
1312 
sld_state_show(void)1313 static void sld_state_show(void)
1314 {
1315 	if (!boot_cpu_has(X86_FEATURE_BUS_LOCK_DETECT) &&
1316 	    !boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT))
1317 		return;
1318 
1319 	switch (sld_state) {
1320 	case sld_off:
1321 		pr_info("disabled\n");
1322 		break;
1323 	case sld_warn:
1324 		if (boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT)) {
1325 			pr_info("#AC: crashing the kernel on kernel split_locks and warning on user-space split_locks\n");
1326 			if (cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
1327 					      "x86/splitlock", NULL, splitlock_cpu_offline) < 0)
1328 				pr_warn("No splitlock CPU offline handler\n");
1329 		} else if (boot_cpu_has(X86_FEATURE_BUS_LOCK_DETECT)) {
1330 			pr_info("#DB: warning on user-space bus_locks\n");
1331 		}
1332 		break;
1333 	case sld_fatal:
1334 		if (boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT)) {
1335 			pr_info("#AC: crashing the kernel on kernel split_locks and sending SIGBUS on user-space split_locks\n");
1336 		} else if (boot_cpu_has(X86_FEATURE_BUS_LOCK_DETECT)) {
1337 			pr_info("#DB: sending SIGBUS on user-space bus_locks%s\n",
1338 				boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT) ?
1339 				" from non-WB" : "");
1340 		}
1341 		break;
1342 	case sld_ratelimit:
1343 		if (boot_cpu_has(X86_FEATURE_BUS_LOCK_DETECT))
1344 			pr_info("#DB: setting system wide bus lock rate limit to %u/sec\n", bld_ratelimit.burst);
1345 		break;
1346 	}
1347 }
1348 
sld_setup(struct cpuinfo_x86 * c)1349 void __init sld_setup(struct cpuinfo_x86 *c)
1350 {
1351 	split_lock_setup(c);
1352 	sld_state_setup();
1353 	sld_state_show();
1354 }
1355 
1356 #define X86_HYBRID_CPU_TYPE_ID_SHIFT	24
1357 
1358 /**
1359  * get_this_hybrid_cpu_type() - Get the type of this hybrid CPU
1360  *
1361  * Returns the CPU type [31:24] (i.e., Atom or Core) of a CPU in
1362  * a hybrid processor. If the processor is not hybrid, returns 0.
1363  */
get_this_hybrid_cpu_type(void)1364 u8 get_this_hybrid_cpu_type(void)
1365 {
1366 	if (!cpu_feature_enabled(X86_FEATURE_HYBRID_CPU))
1367 		return 0;
1368 
1369 	return cpuid_eax(0x0000001a) >> X86_HYBRID_CPU_TYPE_ID_SHIFT;
1370 }
1371